Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-upstream
diff --git a/CREDITS b/CREDITS
index abe05a0..2b39168 100644
--- a/CREDITS
+++ b/CREDITS
@@ -464,6 +464,11 @@
 S: Nampa, Idaho 83686
 S: USA
 
+N: Dirk J. Brandewie
+E: dirk.j.brandewie@intel.com
+E: linux-wimax@intel.com
+D: Intel Wireless WiMAX Connection 2400 SDIO driver
+
 N: Derrick J. Brashear
 E: shadow@dementia.org
 W: http://www.dementia.org/~shadow
@@ -1681,7 +1686,7 @@
 D: fbdev hacking
 
 N: Jesper Juhl
-E: jesper.juhl@gmail.com
+E: jj@chaosbits.net
 D: Various fixes, cleanups and minor features all over the tree.
 D: Wrote initial version of the hdaps driver (since passed on to others).
 S: Lemnosvej 1, 3.tv
@@ -2119,6 +2124,11 @@
 E: hjl@gnu.ai.mit.edu
 D: GCC + libraries hacker
 
+N: Yanir Lubetkin
+E: yanirx.lubatkin@intel.com
+E: linux-wimax@intel.com
+D: Intel Wireless WiMAX Connection 2400 driver
+
 N: Michal Ludvig
 E: michal@logix.cz
 E: michal.ludvig@asterisk.co.nz
@@ -2693,6 +2703,13 @@
 S: Thunder Bay, Ontario
 S: CANADA P7C 5M9
 
+N: Inaky Perez-Gonzalez
+E: inaky.perez-gonzalez@intel.com
+E: linux-wimax@intel.com
+E: inakypg@yahoo.com
+D: WiMAX stack
+D: Intel Wireless WiMAX Connection 2400 driver
+
 N: Yuri Per
 E: yuri@pts.mipt.ru
 D: Some smbfs fixes
@@ -3769,14 +3786,11 @@
 
 N: David Woodhouse
 E: dwmw2@infradead.org
-D: ARCnet stuff, Applicom board driver, SO_BINDTODEVICE,
-D: some Alpha platform porting from 2.0, Memory Technology Devices,
-D: Acquire watchdog timer, PC speaker driver maintenance,
+D: JFFS2 file system, Memory Technology Device subsystem,
 D: various other stuff that annoyed me by not working.
-S: c/o Red Hat Engineering
-S: Rustat House
-S: 60 Clifton Road
-S: Cambridge. CB1 7EG
+S: c/o Intel Corporation
+S: Pipers Way
+S: Swindon. SN3 1RJ
 S: England
 
 N: Chris Wright
diff --git a/Documentation/ABI/testing/sysfs-class-regulator b/Documentation/ABI/testing/sysfs-class-regulator
index 3731f6f..873ef1f 100644
--- a/Documentation/ABI/testing/sysfs-class-regulator
+++ b/Documentation/ABI/testing/sysfs-class-regulator
@@ -3,8 +3,9 @@
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
-		state. This holds the regulator output state.
+		Some regulator directories will contain a field called
+		state. This reports the regulator enable status, for
+		regulators which can report that value.
 
 		This will be one of the following strings:
 
@@ -18,7 +19,8 @@
 		'disabled' means the regulator output is OFF and is not
 		supplying power to the system..
 
-		'unknown' means software cannot determine the state.
+		'unknown' means software cannot determine the state, or
+		the reported state is invalid.
 
 		NOTE: this field can be used in conjunction with microvolts
 		and microamps to determine regulator output levels.
@@ -53,9 +55,10 @@
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		microvolts. This holds the regulator output voltage setting
-		measured in microvolts (i.e. E-6 Volts).
+		measured in microvolts (i.e. E-6 Volts), for regulators
+		which can report that voltage.
 
 		NOTE: This value should not be used to determine the regulator
 		output voltage level as this value is the same regardless of
@@ -67,9 +70,10 @@
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		microamps. This holds the regulator output current limit
-		setting measured in microamps (i.e. E-6 Amps).
+		setting measured in microamps (i.e. E-6 Amps), for regulators
+		which can report that current.
 
 		NOTE: This value should not be used to determine the regulator
 		output current level as this value is the same regardless of
@@ -81,8 +85,9 @@
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
-		opmode. This holds the regulator operating mode setting.
+		Some regulator directories will contain a field called
+		opmode. This holds the current regulator operating mode,
+		for regulators which can report it.
 
 		The opmode value can be one of the following strings:
 
@@ -92,7 +97,7 @@
 		'standby'
 		'unknown'
 
-		The modes are described in include/linux/regulator/regulator.h
+		The modes are described in include/linux/regulator/consumer.h
 
 		NOTE: This value should not be used to determine the regulator
 		output operating mode as this value is the same regardless of
@@ -104,9 +109,10 @@
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		min_microvolts. This holds the minimum safe working regulator
-		output voltage setting for this domain measured in microvolts.
+		output voltage setting for this domain measured in microvolts,
+		for regulators which support voltage constraints.
 
 		NOTE: this will return the string 'constraint not defined' if
 		the power domain has no min microvolts constraint defined by
@@ -118,9 +124,10 @@
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		max_microvolts. This holds the maximum safe working regulator
-		output voltage setting for this domain measured in microvolts.
+		output voltage setting for this domain measured in microvolts,
+		for regulators which support voltage constraints.
 
 		NOTE: this will return the string 'constraint not defined' if
 		the power domain has no max microvolts constraint defined by
@@ -132,10 +139,10 @@
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		min_microamps. This holds the minimum safe working regulator
 		output current limit setting for this domain measured in
-		microamps.
+		microamps, for regulators which support current constraints.
 
 		NOTE: this will return the string 'constraint not defined' if
 		the power domain has no min microamps constraint defined by
@@ -147,10 +154,10 @@
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		max_microamps. This holds the maximum safe working regulator
 		output current limit setting for this domain measured in
-		microamps.
+		microamps, for regulators which support current constraints.
 
 		NOTE: this will return the string 'constraint not defined' if
 		the power domain has no max microamps constraint defined by
@@ -185,7 +192,7 @@
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		requested_microamps. This holds the total requested load
 		current in microamps for this regulator from all its consumer
 		devices.
@@ -204,125 +211,102 @@
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		suspend_mem_microvolts. This holds the regulator output
 		voltage setting for this domain measured in microvolts when
-		the system is suspended to memory.
-
-		NOTE: this will return the string 'not defined' if
-		the power domain has no suspend to memory voltage defined by
-		platform code.
+		the system is suspended to memory, for voltage regulators
+		implementing suspend voltage configuration constraints.
 
 What:		/sys/class/regulator/.../suspend_disk_microvolts
 Date:		May 2008
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		suspend_disk_microvolts. This holds the regulator output
 		voltage setting for this domain measured in microvolts when
-		the system is suspended to disk.
-
-		NOTE: this will return the string 'not defined' if
-		the power domain has no suspend to disk voltage defined by
-		platform code.
+		the system is suspended to disk, for voltage regulators
+		implementing suspend voltage configuration constraints.
 
 What:		/sys/class/regulator/.../suspend_standby_microvolts
 Date:		May 2008
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		suspend_standby_microvolts. This holds the regulator output
 		voltage setting for this domain measured in microvolts when
-		the system is suspended to standby.
-
-		NOTE: this will return the string 'not defined' if
-		the power domain has no suspend to standby voltage defined by
-		platform code.
+		the system is suspended to standby, for voltage regulators
+		implementing suspend voltage configuration constraints.
 
 What:		/sys/class/regulator/.../suspend_mem_mode
 Date:		May 2008
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		suspend_mem_mode. This holds the regulator operating mode
 		setting for this domain when the system is suspended to
-		memory.
-
-		NOTE: this will return the string 'not defined' if
-		the power domain has no suspend to memory mode defined by
-		platform code.
+		memory, for regulators implementing suspend mode
+		configuration constraints.
 
 What:		/sys/class/regulator/.../suspend_disk_mode
 Date:		May 2008
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		suspend_disk_mode. This holds the regulator operating mode
-		setting for this domain when the system is suspended to disk.
-
-		NOTE: this will return the string 'not defined' if
-		the power domain has no suspend to disk mode defined by
-		platform code.
+		setting for this domain when the system is suspended to disk,
+		for regulators implementing suspend mode configuration
+		constraints.
 
 What:		/sys/class/regulator/.../suspend_standby_mode
 Date:		May 2008
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		suspend_standby_mode. This holds the regulator operating mode
 		setting for this domain when the system is suspended to
-		standby.
-
-		NOTE: this will return the string 'not defined' if
-		the power domain has no suspend to standby mode defined by
-		platform code.
+		standby, for regulators implementing suspend mode
+		configuration constraints.
 
 What:		/sys/class/regulator/.../suspend_mem_state
 Date:		May 2008
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		suspend_mem_state. This holds the regulator operating state
-		when suspended to memory.
+		when suspended to memory, for regulators implementing suspend
+		configuration constraints.
 
-		This will be one of the following strings:
-
-		'enabled'
-		'disabled'
-		'not defined'
+		This will be one of the same strings reported by
+		the "state" attribute.
 
 What:		/sys/class/regulator/.../suspend_disk_state
 Date:		May 2008
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		suspend_disk_state. This holds the regulator operating state
-		when suspended to disk.
+		when suspended to disk, for regulators implementing
+		suspend configuration constraints.
 
-		This will be one of the following strings:
-
-		'enabled'
-		'disabled'
-		'not defined'
+		This will be one of the same strings reported by
+		the "state" attribute.
 
 What:		/sys/class/regulator/.../suspend_standby_state
 Date:		May 2008
 KernelVersion:	2.6.26
 Contact:	Liam Girdwood <lrg@slimlogic.co.uk>
 Description:
-		Each regulator directory will contain a field called
+		Some regulator directories will contain a field called
 		suspend_standby_state. This holds the regulator operating
-		state when suspended to standby.
+		state when suspended to standby, for regulators implementing
+		suspend configuration constraints.
 
-		This will be one of the following strings:
-
-		'enabled'
-		'disabled'
-		'not defined'
+		This will be one of the same strings reported by
+		the "state" attribute.
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index b462bb1..5244169 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -170,16 +170,15 @@
 u64
 dma_get_required_mask(struct device *dev)
 
-After setting the mask with dma_set_mask(), this API returns the
-actual mask (within that already set) that the platform actually
-requires to operate efficiently.  Usually this means the returned mask
+This API returns the mask that the platform requires to
+operate efficiently.  Usually this means the returned mask
 is the minimum required to cover all of memory.  Examining the
 required mask gives drivers with variable descriptor sizes the
 opportunity to use smaller descriptors as necessary.
 
 Requesting the required mask does not alter the current mask.  If you
-wish to take advantage of it, you should issue another dma_set_mask()
-call to lower the mask again.
+wish to take advantage of it, you should issue a dma_set_mask()
+call to set the mask to the value returned.
 
 
 Part Id - Streaming DMA mappings
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 0a08126..dc3154e 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -12,7 +12,7 @@
 	    kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
 	    gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
 	    genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
-	    mac80211.xml debugobjects.xml sh.xml
+	    mac80211.xml debugobjects.xml sh.xml regulator.xml
 
 ###
 # The build process is as follows (targets):
diff --git a/Documentation/DocBook/networking.tmpl b/Documentation/DocBook/networking.tmpl
index 627707a..59ad69a 100644
--- a/Documentation/DocBook/networking.tmpl
+++ b/Documentation/DocBook/networking.tmpl
@@ -74,6 +74,14 @@
 !Enet/sunrpc/rpcb_clnt.c
 !Enet/sunrpc/clnt.c
      </sect1>
+     <sect1><title>WiMAX</title>
+!Enet/wimax/op-msg.c
+!Enet/wimax/op-reset.c
+!Enet/wimax/op-rfkill.c
+!Enet/wimax/stack.c
+!Iinclude/net/wimax.h
+!Iinclude/linux/wimax.h
+     </sect1>
   </chapter>
 
   <chapter id="netdev">
diff --git a/Documentation/DocBook/regulator.tmpl b/Documentation/DocBook/regulator.tmpl
new file mode 100644
index 0000000..53f4f8d
--- /dev/null
+++ b/Documentation/DocBook/regulator.tmpl
@@ -0,0 +1,304 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="regulator-api">
+ <bookinfo>
+  <title>Voltage and current regulator API</title>
+
+  <authorgroup>
+   <author>
+    <firstname>Liam</firstname>
+    <surname>Girdwood</surname>
+    <affiliation>
+     <address>
+      <email>lrg@slimlogic.co.uk</email>
+     </address>
+    </affiliation>
+   </author>
+   <author>
+    <firstname>Mark</firstname>
+    <surname>Brown</surname>
+    <affiliation>
+     <orgname>Wolfson Microelectronics</orgname>
+     <address>
+      <email>broonie@opensource.wolfsonmicro.com</email>
+     </address>
+    </affiliation>
+   </author>
+  </authorgroup>
+
+  <copyright>
+   <year>2007-2008</year>
+   <holder>Wolfson Microelectronics</holder>
+  </copyright>
+  <copyright>
+   <year>2008</year>
+   <holder>Liam Girdwood</holder>
+  </copyright>
+
+  <legalnotice>
+   <para>
+     This documentation is free software; you can redistribute
+     it and/or modify it under the terms of the GNU General Public
+     License version 2 as published by the Free Software Foundation.
+   </para>
+
+   <para>
+     This program is distributed in the hope that it will be
+     useful, but WITHOUT ANY WARRANTY; without even the implied
+     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+     See the GNU General Public License for more details.
+   </para>
+
+   <para>
+     You should have received a copy of the GNU General Public
+     License along with this program; if not, write to the Free
+     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+     MA 02111-1307 USA
+   </para>
+
+   <para>
+     For more details see the file COPYING in the source
+     distribution of Linux.
+   </para>
+  </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+  <chapter id="intro">
+    <title>Introduction</title>
+    <para>
+	This framework is designed to provide a standard kernel
+	interface to control voltage and current regulators.
+    </para>
+    <para>
+	The intention is to allow systems to dynamically control
+	regulator power output in order to save power and prolong
+	battery life.  This applies to both voltage regulators (where
+	voltage output is controllable) and current sinks (where current
+	limit is controllable).
+    </para>
+    <para>
+	Note that additional (and currently more complete) documentation
+	is available in the Linux kernel source under
+	<filename>Documentation/power/regulator</filename>.
+    </para>
+
+    <sect1 id="glossary">
+       <title>Glossary</title>
+       <para>
+	The regulator API uses a number of terms which may not be
+	familiar:
+       </para>
+       <glossary>
+
+         <glossentry>
+	   <glossterm>Regulator</glossterm>
+	   <glossdef>
+	     <para>
+	Electronic device that supplies power to other devices.  Most
+	regulators can enable and disable their output and some can also
+	control their output voltage or current.
+	     </para>
+	   </glossdef>
+         </glossentry>
+
+	 <glossentry>
+	   <glossterm>Consumer</glossterm>
+	   <glossdef>
+	     <para>
+	Electronic device which consumes power provided by a regulator.
+	These may either be static, requiring only a fixed supply, or
+	dynamic, requiring active management of the regulator at
+	runtime.
+	     </para>
+	   </glossdef>
+	 </glossentry>
+
+	 <glossentry>
+	   <glossterm>Power Domain</glossterm>
+	   <glossdef>
+	     <para>
+	The electronic circuit supplied by a given regulator, including
+	the regulator and all consumer devices.  The configuration of
+	the regulator is shared between all the components in the
+	circuit.
+	     </para>
+	   </glossdef>
+	 </glossentry>
+
+	 <glossentry>
+	   <glossterm>Power Management Integrated Circuit</glossterm>
+	   <acronym>PMIC</acronym>
+	   <glossdef>
+	     <para>
+	An IC which contains numerous regulators and often also other
+	subsystems.  In an embedded system the primary PMIC is often
+	equivalent to a combination of the PSU and southbridge in a
+	desktop system.
+	     </para>
+	   </glossdef>
+	 </glossentry>
+	</glossary>
+     </sect1>
+  </chapter>
+
+  <chapter id="consumer">
+     <title>Consumer driver interface</title>
+     <para>
+       This offers a similar API to the kernel clock framework.
+       Consumer drivers use <link
+       linkend='API-regulator-get'>get</link> and <link
+       linkend='API-regulator-put'>put</link> operations to acquire and
+       release regulators.  Functions are
+       provided to <link linkend='API-regulator-enable'>enable</link>
+       and <link linkend='API-regulator-disable'>disable</link> the
+       reguator and to get and set the runtime parameters of the
+       regulator.
+     </para>
+     <para>
+       When requesting regulators consumers use symbolic names for their
+       supplies, such as "Vcc", which are mapped into actual regulator
+       devices by the machine interface.
+     </para>
+     <para>
+	A stub version of this API is provided when the regulator
+	framework is not in use in order to minimise the need to use
+	ifdefs.
+     </para>
+
+     <sect1 id="consumer-enable">
+       <title>Enabling and disabling</title>
+       <para>
+         The regulator API provides reference counted enabling and
+	 disabling of regulators. Consumer devices use the <function><link
+	 linkend='API-regulator-enable'>regulator_enable</link></function>
+	 and <function><link
+	 linkend='API-regulator-disable'>regulator_disable</link>
+	 </function> functions to enable and disable regulators.  Calls
+	 to the two functions must be balanced.
+       </para>
+       <para>
+         Note that since multiple consumers may be using a regulator and
+	 machine constraints may not allow the regulator to be disabled
+	 there is no guarantee that calling
+	 <function>regulator_disable</function> will actually cause the
+	 supply provided by the regulator to be disabled. Consumer
+	 drivers should assume that the regulator may be enabled at all
+	 times.
+       </para>
+     </sect1>
+
+     <sect1 id="consumer-config">
+       <title>Configuration</title>
+       <para>
+         Some consumer devices may need to be able to dynamically
+	 configure their supplies.  For example, MMC drivers may need to
+	 select the correct operating voltage for their cards.  This may
+	 be done while the regulator is enabled or disabled.
+       </para>
+       <para>
+	 The <function><link
+	 linkend='API-regulator-set-voltage'>regulator_set_voltage</link>
+	 </function> and <function><link
+	 linkend='API-regulator-set-current-limit'
+	 >regulator_set_current_limit</link>
+	 </function> functions provide the primary interface for this.
+	 Both take ranges of voltages and currents, supporting drivers
+	 that do not require a specific value (eg, CPU frequency scaling
+	 normally permits the CPU to use a wider range of supply
+	 voltages at lower frequencies but does not require that the
+	 supply voltage be lowered).  Where an exact value is required
+	 both minimum and maximum values should be identical.
+       </para>
+     </sect1>
+
+     <sect1 id="consumer-callback">
+       <title>Callbacks</title>
+       <para>
+	  Callbacks may also be <link
+	  linkend='API-regulator-register-notifier'>registered</link>
+	  for events such as regulation failures.
+       </para>
+     </sect1>
+   </chapter>
+
+   <chapter id="driver">
+     <title>Regulator driver interface</title>
+     <para>
+       Drivers for regulator chips <link
+       linkend='API-regulator-register'>register</link> the regulators
+       with the regulator core, providing operations structures to the
+       core.  A <link
+       linkend='API-regulator-notifier-call-chain'>notifier</link> interface
+       allows error conditions to be reported to the core.
+     </para>
+     <para>
+       Registration should be triggered by explicit setup done by the
+       platform, supplying a <link
+       linkend='API-struct-regulator-init-data'>struct
+       regulator_init_data</link> for the regulator containing
+       <link linkend='machine-constraint'>constraint</link> and
+       <link linkend='machine-supply'>supply</link> information.
+     </para>
+   </chapter>
+
+   <chapter id="machine">
+     <title>Machine interface</title>
+     <para>
+       This interface provides a way to define how regulators are
+       connected to consumers on a given system and what the valid
+       operating parameters are for the system.
+     </para>
+
+     <sect1 id="machine-supply">
+       <title>Supplies</title>
+       <para>
+         Regulator supplies are specified using <link
+	 linkend='API-struct-regulator-consumer-supply'>struct
+	 regulator_consumer_supply</link>.  This is done at
+	 <link linkend='driver'>driver registration
+	 time</link> as part of the machine constraints.
+       </para>
+     </sect1>
+
+     <sect1 id="machine-constraint">
+       <title>Constraints</title>
+       <para>
+	 As well as definining the connections the machine interface
+	 also provides constraints definining the operations that
+	 clients are allowed to perform and the parameters that may be
+	 set.  This is required since generally regulator devices will
+	 offer more flexibility than it is safe to use on a given
+	 system, for example supporting higher supply voltages than the
+	 consumers are rated for.
+       </para>
+       <para>
+	 This is done at <link linkend='driver'>driver
+	 registration time</link> by providing a <link
+	 linkend='API-struct-regulation-constraints'>struct
+	 regulation_constraints</link>.
+       </para>
+       <para>
+         The constraints may also specify an initial configuration for the
+         regulator in the constraints, which is particularly useful for
+         use with static consumers.
+       </para>
+     </sect1>
+  </chapter>
+
+  <chapter id="api">
+    <title>API reference</title>
+    <para>
+      Due to limitations of the kernel documentation framework and the
+      existing layout of the source code the entire regulator API is
+      documented here.
+    </para>
+!Iinclude/linux/regulator/consumer.h
+!Iinclude/linux/regulator/machine.h
+!Iinclude/linux/regulator/driver.h
+!Edrivers/regulator/core.c
+  </chapter>
+</book>
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt
index fd4907a..7f6de6e 100644
--- a/Documentation/PCI/pci.txt
+++ b/Documentation/PCI/pci.txt
@@ -294,7 +294,8 @@
 
 pci_set_master() will enable DMA by setting the bus master bit
 in the PCI_COMMAND register. It also fixes the latency timer value if
-it's set to something bogus by the BIOS.
+it's set to something bogus by the BIOS.  pci_clear_master() will
+disable DMA by clearing the bus master bit.
 
 If the PCI device can use the PCI Memory-Write-Invalidate transaction,
 call pci_set_mwi().  This enables the PCI_COMMAND bit for Mem-Wr-Inval
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
index 7dc0695..9bb62f7 100644
--- a/Documentation/RCU/00-INDEX
+++ b/Documentation/RCU/00-INDEX
@@ -12,6 +12,8 @@
 	- Reference-count design for elements of lists/arrays protected by RCU
 rcu.txt
 	- RCU Concepts
+rcubarrier.txt
+	- Unloading modules that use RCU callbacks
 RTFP.txt
 	- List of RCU papers (bibliography) going back to 1980.
 torture.txt
diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt
new file mode 100644
index 0000000..909602d
--- /dev/null
+++ b/Documentation/RCU/rcubarrier.txt
@@ -0,0 +1,304 @@
+RCU and Unloadable Modules
+
+[Originally published in LWN Jan. 14, 2007: http://lwn.net/Articles/217484/]
+
+RCU (read-copy update) is a synchronization mechanism that can be thought
+of as a replacement for read-writer locking (among other things), but with
+very low-overhead readers that are immune to deadlock, priority inversion,
+and unbounded latency. RCU read-side critical sections are delimited
+by rcu_read_lock() and rcu_read_unlock(), which, in non-CONFIG_PREEMPT
+kernels, generate no code whatsoever.
+
+This means that RCU writers are unaware of the presence of concurrent
+readers, so that RCU updates to shared data must be undertaken quite
+carefully, leaving an old version of the data structure in place until all
+pre-existing readers have finished. These old versions are needed because
+such readers might hold a reference to them. RCU updates can therefore be
+rather expensive, and RCU is thus best suited for read-mostly situations.
+
+How can an RCU writer possibly determine when all readers are finished,
+given that readers might well leave absolutely no trace of their
+presence? There is a synchronize_rcu() primitive that blocks until all
+pre-existing readers have completed. An updater wishing to delete an
+element p from a linked list might do the following, while holding an
+appropriate lock, of course:
+
+	list_del_rcu(p);
+	synchronize_rcu();
+	kfree(p);
+
+But the above code cannot be used in IRQ context -- the call_rcu()
+primitive must be used instead. This primitive takes a pointer to an
+rcu_head struct placed within the RCU-protected data structure and
+another pointer to a function that may be invoked later to free that
+structure. Code to delete an element p from the linked list from IRQ
+context might then be as follows:
+
+	list_del_rcu(p);
+	call_rcu(&p->rcu, p_callback);
+
+Since call_rcu() never blocks, this code can safely be used from within
+IRQ context. The function p_callback() might be defined as follows:
+
+	static void p_callback(struct rcu_head *rp)
+	{
+		struct pstruct *p = container_of(rp, struct pstruct, rcu);
+
+		kfree(p);
+	}
+
+
+Unloading Modules That Use call_rcu()
+
+But what if p_callback is defined in an unloadable module?
+
+If we unload the module while some RCU callbacks are pending,
+the CPUs executing these callbacks are going to be severely
+disappointed when they are later invoked, as fancifully depicted at
+http://lwn.net/images/ns/kernel/rcu-drop.jpg.
+
+We could try placing a synchronize_rcu() in the module-exit code path,
+but this is not sufficient. Although synchronize_rcu() does wait for a
+grace period to elapse, it does not wait for the callbacks to complete.
+
+One might be tempted to try several back-to-back synchronize_rcu()
+calls, but this is still not guaranteed to work. If there is a very
+heavy RCU-callback load, then some of the callbacks might be deferred
+in order to allow other processing to proceed. Such deferral is required
+in realtime kernels in order to avoid excessive scheduling latencies.
+
+
+rcu_barrier()
+
+We instead need the rcu_barrier() primitive. This primitive is similar
+to synchronize_rcu(), but instead of waiting solely for a grace
+period to elapse, it also waits for all outstanding RCU callbacks to
+complete. Pseudo-code using rcu_barrier() is as follows:
+
+   1. Prevent any new RCU callbacks from being posted.
+   2. Execute rcu_barrier().
+   3. Allow the module to be unloaded.
+
+Quick Quiz #1: Why is there no srcu_barrier()?
+
+The rcutorture module makes use of rcu_barrier in its exit function
+as follows:
+
+ 1 static void
+ 2 rcu_torture_cleanup(void)
+ 3 {
+ 4   int i;
+ 5
+ 6   fullstop = 1;
+ 7   if (shuffler_task != NULL) {
+ 8     VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
+ 9     kthread_stop(shuffler_task);
+10   }
+11   shuffler_task = NULL;
+12
+13   if (writer_task != NULL) {
+14     VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
+15     kthread_stop(writer_task);
+16   }
+17   writer_task = NULL;
+18
+19   if (reader_tasks != NULL) {
+20     for (i = 0; i < nrealreaders; i++) {
+21       if (reader_tasks[i] != NULL) {
+22         VERBOSE_PRINTK_STRING(
+23           "Stopping rcu_torture_reader task");
+24         kthread_stop(reader_tasks[i]);
+25       }
+26       reader_tasks[i] = NULL;
+27     }
+28     kfree(reader_tasks);
+29     reader_tasks = NULL;
+30   }
+31   rcu_torture_current = NULL;
+32
+33   if (fakewriter_tasks != NULL) {
+34     for (i = 0; i < nfakewriters; i++) {
+35       if (fakewriter_tasks[i] != NULL) {
+36         VERBOSE_PRINTK_STRING(
+37           "Stopping rcu_torture_fakewriter task");
+38         kthread_stop(fakewriter_tasks[i]);
+39       }
+40       fakewriter_tasks[i] = NULL;
+41     }
+42     kfree(fakewriter_tasks);
+43     fakewriter_tasks = NULL;
+44   }
+45
+46   if (stats_task != NULL) {
+47     VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
+48     kthread_stop(stats_task);
+49   }
+50   stats_task = NULL;
+51
+52   /* Wait for all RCU callbacks to fire. */
+53   rcu_barrier();
+54
+55   rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
+56
+57   if (cur_ops->cleanup != NULL)
+58     cur_ops->cleanup();
+59   if (atomic_read(&n_rcu_torture_error))
+60     rcu_torture_print_module_parms("End of test: FAILURE");
+61   else
+62     rcu_torture_print_module_parms("End of test: SUCCESS");
+63 }
+
+Line 6 sets a global variable that prevents any RCU callbacks from
+re-posting themselves. This will not be necessary in most cases, since
+RCU callbacks rarely include calls to call_rcu(). However, the rcutorture
+module is an exception to this rule, and therefore needs to set this
+global variable.
+
+Lines 7-50 stop all the kernel tasks associated with the rcutorture
+module. Therefore, once execution reaches line 53, no more rcutorture
+RCU callbacks will be posted. The rcu_barrier() call on line 53 waits
+for any pre-existing callbacks to complete.
+
+Then lines 55-62 print status and do operation-specific cleanup, and
+then return, permitting the module-unload operation to be completed.
+
+Quick Quiz #2: Is there any other situation where rcu_barrier() might
+	be required?
+
+Your module might have additional complications. For example, if your
+module invokes call_rcu() from timers, you will need to first cancel all
+the timers, and only then invoke rcu_barrier() to wait for any remaining
+RCU callbacks to complete.
+
+
+Implementing rcu_barrier()
+
+Dipankar Sarma's implementation of rcu_barrier() makes use of the fact
+that RCU callbacks are never reordered once queued on one of the per-CPU
+queues. His implementation queues an RCU callback on each of the per-CPU
+callback queues, and then waits until they have all started executing, at
+which point, all earlier RCU callbacks are guaranteed to have completed.
+
+The original code for rcu_barrier() was as follows:
+
+ 1 void rcu_barrier(void)
+ 2 {
+ 3   BUG_ON(in_interrupt());
+ 4   /* Take cpucontrol mutex to protect against CPU hotplug */
+ 5   mutex_lock(&rcu_barrier_mutex);
+ 6   init_completion(&rcu_barrier_completion);
+ 7   atomic_set(&rcu_barrier_cpu_count, 0);
+ 8   on_each_cpu(rcu_barrier_func, NULL, 0, 1);
+ 9   wait_for_completion(&rcu_barrier_completion);
+10   mutex_unlock(&rcu_barrier_mutex);
+11 }
+
+Line 3 verifies that the caller is in process context, and lines 5 and 10
+use rcu_barrier_mutex to ensure that only one rcu_barrier() is using the
+global completion and counters at a time, which are initialized on lines
+6 and 7. Line 8 causes each CPU to invoke rcu_barrier_func(), which is
+shown below. Note that the final "1" in on_each_cpu()'s argument list
+ensures that all the calls to rcu_barrier_func() will have completed
+before on_each_cpu() returns. Line 9 then waits for the completion.
+
+This code was rewritten in 2008 to support rcu_barrier_bh() and
+rcu_barrier_sched() in addition to the original rcu_barrier().
+
+The rcu_barrier_func() runs on each CPU, where it invokes call_rcu()
+to post an RCU callback, as follows:
+
+ 1 static void rcu_barrier_func(void *notused)
+ 2 {
+ 3 int cpu = smp_processor_id();
+ 4 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ 5 struct rcu_head *head;
+ 6
+ 7 head = &rdp->barrier;
+ 8 atomic_inc(&rcu_barrier_cpu_count);
+ 9 call_rcu(head, rcu_barrier_callback);
+10 }
+
+Lines 3 and 4 locate RCU's internal per-CPU rcu_data structure,
+which contains the struct rcu_head that needed for the later call to
+call_rcu(). Line 7 picks up a pointer to this struct rcu_head, and line
+8 increments a global counter. This counter will later be decremented
+by the callback. Line 9 then registers the rcu_barrier_callback() on
+the current CPU's queue.
+
+The rcu_barrier_callback() function simply atomically decrements the
+rcu_barrier_cpu_count variable and finalizes the completion when it
+reaches zero, as follows:
+
+ 1 static void rcu_barrier_callback(struct rcu_head *notused)
+ 2 {
+ 3 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
+ 4 complete(&rcu_barrier_completion);
+ 5 }
+
+Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
+	immediately (thus incrementing rcu_barrier_cpu_count to the
+	value one), but the other CPU's rcu_barrier_func() invocations
+	are delayed for a full grace period? Couldn't this result in
+	rcu_barrier() returning prematurely?
+
+
+rcu_barrier() Summary
+
+The rcu_barrier() primitive has seen relatively little use, since most
+code using RCU is in the core kernel rather than in modules. However, if
+you are using RCU from an unloadable module, you need to use rcu_barrier()
+so that your module may be safely unloaded.
+
+
+Answers to Quick Quizzes
+
+Quick Quiz #1: Why is there no srcu_barrier()?
+
+Answer: Since there is no call_srcu(), there can be no outstanding SRCU
+	callbacks. Therefore, there is no need to wait for them.
+
+Quick Quiz #2: Is there any other situation where rcu_barrier() might
+	be required?
+
+Answer: Interestingly enough, rcu_barrier() was not originally
+	implemented for module unloading. Nikita Danilov was using
+	RCU in a filesystem, which resulted in a similar situation at
+	filesystem-unmount time. Dipankar Sarma coded up rcu_barrier()
+	in response, so that Nikita could invoke it during the
+	filesystem-unmount process.
+
+	Much later, yours truly hit the RCU module-unload problem when
+	implementing rcutorture, and found that rcu_barrier() solves
+	this problem as well.
+
+Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
+	immediately (thus incrementing rcu_barrier_cpu_count to the
+	value one), but the other CPU's rcu_barrier_func() invocations
+	are delayed for a full grace period? Couldn't this result in
+	rcu_barrier() returning prematurely?
+
+Answer: This cannot happen. The reason is that on_each_cpu() has its last
+	argument, the wait flag, set to "1". This flag is passed through
+	to smp_call_function() and further to smp_call_function_on_cpu(),
+	causing this latter to spin until the cross-CPU invocation of
+	rcu_barrier_func() has completed. This by itself would prevent
+	a grace period from completing on non-CONFIG_PREEMPT kernels,
+	since each CPU must undergo a context switch (or other quiescent
+	state) before the grace period can complete. However, this is
+	of no use in CONFIG_PREEMPT kernels.
+
+	Therefore, on_each_cpu() disables preemption across its call
+	to smp_call_function() and also across the local call to
+	rcu_barrier_func(). This prevents the local CPU from context
+	switching, again preventing grace periods from completing. This
+	means that all CPUs have executed rcu_barrier_func() before
+	the first rcu_barrier_callback() can possibly execute, in turn
+	preventing rcu_barrier_cpu_count from prematurely reaching zero.
+
+	Currently, -rt implementations of RCU keep but a single global
+	queue for RCU callbacks, and thus do not suffer from this
+	problem. However, when the -rt RCU eventually does have per-CPU
+	callback queues, things will have to change. One simple change
+	is to add an rcu_read_lock() before line 8 of rcu_barrier()
+	and an rcu_read_unlock() after line 8 of this same function. If
+	you can think of a better change, please let me know!
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index cc49400..7ea2311 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -392,6 +392,10 @@
 			goto err;
 		}
 	}
+	if (!maskset && !tid && !containerset) {
+		usage();
+		goto err;
+	}
 
 	do {
 		int i;
diff --git a/Documentation/bad_memory.txt b/Documentation/bad_memory.txt
new file mode 100644
index 0000000..df84162
--- /dev/null
+++ b/Documentation/bad_memory.txt
@@ -0,0 +1,45 @@
+March 2008
+Jan-Simon Moeller, dl9pf@gmx.de
+
+
+How to deal with bad memory e.g. reported by memtest86+ ?
+#########################################################
+
+There are three possibilities I know of:
+
+1) Reinsert/swap the memory modules
+
+2) Buy new modules (best!) or try to exchange the memory
+   if you have spare-parts
+
+3) Use BadRAM or memmap
+
+This Howto is about number 3) .
+
+
+BadRAM
+######
+BadRAM is the actively developed and available as kernel-patch
+here:  http://rick.vanrein.org/linux/badram/
+
+For more details see the BadRAM documentation.
+
+memmap
+######
+
+memmap is already in the kernel and usable as kernel-parameter at
+boot-time.  Its syntax is slightly strange and you may need to
+calculate the values by yourself!
+
+Syntax to exclude a memory area (see kernel-parameters.txt for details):
+memmap=<size>$<address>
+
+Example: memtest86+ reported here errors at address 0x18691458, 0x18698424 and
+         some others. All had 0x1869xxxx in common, so I chose a pattern of
+         0x18690000,0xffff0000.
+
+With the numbers of the example above:
+memmap=64K$0x18690000
+ or
+memmap=0x10000$0x18690000
+
diff --git a/Documentation/blackfin/00-INDEX b/Documentation/blackfin/00-INDEX
index 7cb3b35..d6840a9 100644
--- a/Documentation/blackfin/00-INDEX
+++ b/Documentation/blackfin/00-INDEX
@@ -9,3 +9,6 @@
 
 Filesystems
 	- Requirements for mounting the root file system.
+
+bfin-gpio-note.txt
+	- Notes in developing/using bfin-gpio driver.
diff --git a/Documentation/blackfin/bfin-gpio-notes.txt b/Documentation/blackfin/bfin-gpio-notes.txt
new file mode 100644
index 0000000..9898c7de
--- /dev/null
+++ b/Documentation/blackfin/bfin-gpio-notes.txt
@@ -0,0 +1,71 @@
+/*
+ * File:         Documentation/blackfin/bfin-gpio-note.txt
+ * Based on:
+ * Author:
+ *
+ * Created:      $Id: bfin-gpio-note.txt 2008-11-24 16:42 grafyang $
+ * Description:  This file contains the notes in developing/using bfin-gpio.
+ *
+ *
+ * Rev:
+ *
+ * Modified:
+ *               Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ */
+
+
+1. Blackfin GPIO introduction
+
+    There are many GPIO pins on Blackfin. Most of these pins are muxed to
+    multi-functions. They can be configured as peripheral, or just as GPIO,
+    configured to input with interrupt enabled, or output.
+
+    For detailed information, please see "arch/blackfin/kernel/bfin_gpio.c",
+    or the relevant HRM.
+
+
+2. Avoiding resource conflict
+
+    Followed function groups are used to avoiding resource conflict,
+    - Use the pin as peripheral,
+	int peripheral_request(unsigned short per, const char *label);
+	int peripheral_request_list(const unsigned short per[], const char *label);
+	void peripheral_free(unsigned short per);
+	void peripheral_free_list(const unsigned short per[]);
+    - Use the pin as GPIO,
+	int bfin_gpio_request(unsigned gpio, const char *label);
+	void bfin_gpio_free(unsigned gpio);
+    - Use the pin as GPIO interrupt,
+	int bfin_gpio_irq_request(unsigned gpio, const char *label);
+	void bfin_gpio_irq_free(unsigned gpio);
+
+    The request functions will record the function state for a certain pin,
+    the free functions will clear it's function state.
+    Once a pin is requested, it can't be requested again before it is freed by
+    previous caller, otherwise kernel will dump stacks, and the request
+    function fail.
+    These functions are wrapped by other functions, most of the users need not
+    care.
+
+
+3. But there are some exceptions
+    - Kernel permit the identical GPIO be requested both as GPIO and GPIO
+    interrut.
+    Some drivers, like gpio-keys, need this behavior. Kernel only print out
+    warning messages like,
+	bfin-gpio: GPIO 24 is already reserved by gpio-keys: BTN0, and you are
+configuring it as IRQ!
+
+        Note: Consider the case that, if there are two drivers need the
+	identical GPIO, one of them use it as GPIO, the other use it as
+	GPIO interrupt. This will really cause resource conflict. So if
+	there is any abnormal driver behavior, please check the bfin-gpio
+	warning messages.
+
+    - Kernel permit the identical GPIO be requested from the same driver twice.
+
+
+
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index d9014aa..d9e5d6f 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -1,7 +1,8 @@
 				CGROUPS
 				-------
 
-Written by Paul Menage <menage@google.com> based on Documentation/cpusets.txt
+Written by Paul Menage <menage@google.com> based on
+Documentation/cgroups/cpusets.txt
 
 Original copyright statements from cpusets.txt:
 Portions Copyright (C) 2004 BULL SA.
@@ -68,7 +69,7 @@
 tracking. The intention is that other subsystems hook into the generic
 cgroup support to provide new attributes for cgroups, such as
 accounting/limiting the resources which processes in a cgroup can
-access. For example, cpusets (see Documentation/cpusets.txt) allows
+access. For example, cpusets (see Documentation/cgroups/cpusets.txt) allows
 you to associate a set of CPUs and a set of memory nodes with the
 tasks in each cgroup.
 
@@ -227,7 +228,6 @@
 containing the following files describing that cgroup:
 
  - tasks: list of tasks (by pid) attached to that cgroup
- - releasable flag: cgroup currently removeable?
  - notify_on_release flag: run the release agent on exit?
  - release_agent: the path to use for release notifications (this file
    exists in the top cgroup only)
@@ -360,7 +360,7 @@
 
 In this directory you can find several files:
 # ls
-notify_on_release releasable tasks
+notify_on_release tasks
 (plus whatever files added by the attached subsystems)
 
 Now attach your shell to this cgroup:
@@ -479,7 +479,6 @@
 create() method has been called for the new cgroup).
 
 void pre_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
-(cgroup_mutex held by caller)
 
 Called before checking the reference count on each subsystem. This may
 be useful for subsystems which have some extra references even if
@@ -498,6 +497,7 @@
 
 void attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
 	    struct cgroup *old_cgrp, struct task_struct *task)
+(cgroup_mutex held by caller)
 
 Called after the task has been attached to the cgroup, to allow any
 post-attachment activity that requires memory allocations or blocking.
@@ -511,6 +511,7 @@
 Called during task exit.
 
 int populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
+(cgroup_mutex held by caller)
 
 Called after creation of a cgroup to allow a subsystem to populate
 the cgroup directory with file entries.  The subsystem should make
@@ -520,6 +521,7 @@
 always handled well.
 
 void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp)
+(cgroup_mutex held by caller)
 
 Called at the end of cgroup_clone() to do any paramater
 initialization which might be required before a task could attach.  For
@@ -527,7 +529,7 @@
 up.
 
 void bind(struct cgroup_subsys *ss, struct cgroup *root)
-(cgroup_mutex held by caller)
+(cgroup_mutex and ss->hierarchy_mutex held by caller)
 
 Called when a cgroup subsystem is rebound to a different hierarchy
 and root cgroup. Currently this will only involve movement between
diff --git a/Documentation/controllers/cpuacct.txt b/Documentation/cgroups/cpuacct.txt
similarity index 100%
rename from Documentation/controllers/cpuacct.txt
rename to Documentation/cgroups/cpuacct.txt
diff --git a/Documentation/cpusets.txt b/Documentation/cgroups/cpusets.txt
similarity index 100%
rename from Documentation/cpusets.txt
rename to Documentation/cgroups/cpusets.txt
diff --git a/Documentation/controllers/devices.txt b/Documentation/cgroups/devices.txt
similarity index 100%
rename from Documentation/controllers/devices.txt
rename to Documentation/cgroups/devices.txt
diff --git a/Documentation/cgroups/memcg_test.txt b/Documentation/cgroups/memcg_test.txt
new file mode 100644
index 0000000..19533f9
--- /dev/null
+++ b/Documentation/cgroups/memcg_test.txt
@@ -0,0 +1,342 @@
+Memory Resource Controller(Memcg)  Implementation Memo.
+Last Updated: 2008/12/15
+Base Kernel Version: based on 2.6.28-rc8-mm.
+
+Because VM is getting complex (one of reasons is memcg...), memcg's behavior
+is complex. This is a document for memcg's internal behavior.
+Please note that implementation details can be changed.
+
+(*) Topics on API should be in Documentation/cgroups/memory.txt)
+
+0. How to record usage ?
+   2 objects are used.
+
+   page_cgroup ....an object per page.
+	Allocated at boot or memory hotplug. Freed at memory hot removal.
+
+   swap_cgroup ... an entry per swp_entry.
+	Allocated at swapon(). Freed at swapoff().
+
+   The page_cgroup has USED bit and double count against a page_cgroup never
+   occurs. swap_cgroup is used only when a charged page is swapped-out.
+
+1. Charge
+
+   a page/swp_entry may be charged (usage += PAGE_SIZE) at
+
+	mem_cgroup_newpage_charge()
+	  Called at new page fault and Copy-On-Write.
+
+	mem_cgroup_try_charge_swapin()
+	  Called at do_swap_page() (page fault on swap entry) and swapoff.
+	  Followed by charge-commit-cancel protocol. (With swap accounting)
+	  At commit, a charge recorded in swap_cgroup is removed.
+
+	mem_cgroup_cache_charge()
+	  Called at add_to_page_cache()
+
+	mem_cgroup_cache_charge_swapin()
+	  Called at shmem's swapin.
+
+	mem_cgroup_prepare_migration()
+	  Called before migration. "extra" charge is done and followed by
+	  charge-commit-cancel protocol.
+	  At commit, charge against oldpage or newpage will be committed.
+
+2. Uncharge
+  a page/swp_entry may be uncharged (usage -= PAGE_SIZE) by
+
+	mem_cgroup_uncharge_page()
+	  Called when an anonymous page is fully unmapped. I.e., mapcount goes
+	  to 0. If the page is SwapCache, uncharge is delayed until
+	  mem_cgroup_uncharge_swapcache().
+
+	mem_cgroup_uncharge_cache_page()
+	  Called when a page-cache is deleted from radix-tree. If the page is
+	  SwapCache, uncharge is delayed until mem_cgroup_uncharge_swapcache().
+
+	mem_cgroup_uncharge_swapcache()
+	  Called when SwapCache is removed from radix-tree. The charge itself
+	  is moved to swap_cgroup. (If mem+swap controller is disabled, no
+	  charge to swap occurs.)
+
+	mem_cgroup_uncharge_swap()
+	  Called when swp_entry's refcnt goes down to 0. A charge against swap
+	  disappears.
+
+	mem_cgroup_end_migration(old, new)
+	At success of migration old is uncharged (if necessary), a charge
+	to new page is committed. At failure, charge to old page is committed.
+
+3. charge-commit-cancel
+	In some case, we can't know this "charge" is valid or not at charging
+	(because of races).
+	To handle such case, there are charge-commit-cancel functions.
+		mem_cgroup_try_charge_XXX
+		mem_cgroup_commit_charge_XXX
+		mem_cgroup_cancel_charge_XXX
+	these are used in swap-in and migration.
+
+	At try_charge(), there are no flags to say "this page is charged".
+	at this point, usage += PAGE_SIZE.
+
+	At commit(), the function checks the page should be charged or not
+	and set flags or avoid charging.(usage -= PAGE_SIZE)
+
+	At cancel(), simply usage -= PAGE_SIZE.
+
+Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
+
+4. Anonymous
+	Anonymous page is newly allocated at
+		  - page fault into MAP_ANONYMOUS mapping.
+		  - Copy-On-Write.
+ 	It is charged right after it's allocated before doing any page table
+	related operations. Of course, it's uncharged when another page is used
+	for the fault address.
+
+	At freeing anonymous page (by exit() or munmap()), zap_pte() is called
+	and pages for ptes are freed one by one.(see mm/memory.c). Uncharges
+	are done at page_remove_rmap() when page_mapcount() goes down to 0.
+
+	Another page freeing is by page-reclaim (vmscan.c) and anonymous
+	pages are swapped out. In this case, the page is marked as
+	PageSwapCache(). uncharge() routine doesn't uncharge the page marked
+	as SwapCache(). It's delayed until __delete_from_swap_cache().
+
+	4.1 Swap-in.
+	At swap-in, the page is taken from swap-cache. There are 2 cases.
+
+	(a) If the SwapCache is newly allocated and read, it has no charges.
+	(b) If the SwapCache has been mapped by processes, it has been
+	    charged already.
+
+	This swap-in is one of the most complicated work. In do_swap_page(),
+	following events occur when pte is unchanged.
+
+	(1) the page (SwapCache) is looked up.
+	(2) lock_page()
+	(3) try_charge_swapin()
+	(4) reuse_swap_page() (may call delete_swap_cache())
+	(5) commit_charge_swapin()
+	(6) swap_free().
+
+	Considering following situation for example.
+
+	(A) The page has not been charged before (2) and reuse_swap_page()
+	    doesn't call delete_from_swap_cache().
+	(B) The page has not been charged before (2) and reuse_swap_page()
+	    calls delete_from_swap_cache().
+	(C) The page has been charged before (2) and reuse_swap_page() doesn't
+	    call delete_from_swap_cache().
+	(D) The page has been charged before (2) and reuse_swap_page() calls
+	    delete_from_swap_cache().
+
+	    memory.usage/memsw.usage changes to this page/swp_entry will be
+	 Case          (A)      (B)       (C)     (D)
+         Event
+       Before (2)     0/ 1     0/ 1      1/ 1    1/ 1
+          ===========================================
+          (3)        +1/+1    +1/+1     +1/+1   +1/+1
+          (4)          -       0/ 0       -     -1/ 0
+          (5)         0/-1     0/ 0     -1/-1    0/ 0
+          (6)          -       0/-1       -      0/-1
+          ===========================================
+       Result         1/ 1     1/ 1      1/ 1    1/ 1
+
+       In any cases, charges to this page should be 1/ 1.
+
+	4.2 Swap-out.
+	At swap-out, typical state transition is below.
+
+	(a) add to swap cache. (marked as SwapCache)
+	    swp_entry's refcnt += 1.
+	(b) fully unmapped.
+	    swp_entry's refcnt += # of ptes.
+	(c) write back to swap.
+	(d) delete from swap cache. (remove from SwapCache)
+	    swp_entry's refcnt -= 1.
+
+
+	At (b), the page is marked as SwapCache and not uncharged.
+	At (d), the page is removed from SwapCache and a charge in page_cgroup
+	is moved to swap_cgroup.
+
+	Finally, at task exit,
+	(e) zap_pte() is called and swp_entry's refcnt -=1 -> 0.
+	Here, a charge in swap_cgroup disappears.
+
+5. Page Cache
+   	Page Cache is charged at
+	- add_to_page_cache_locked().
+
+	uncharged at
+	- __remove_from_page_cache().
+
+	The logic is very clear. (About migration, see below)
+	Note: __remove_from_page_cache() is called by remove_from_page_cache()
+	and __remove_mapping().
+
+6. Shmem(tmpfs) Page Cache
+	Memcg's charge/uncharge have special handlers of shmem. The best way
+	to understand shmem's page state transition is to read mm/shmem.c.
+	But brief explanation of the behavior of memcg around shmem will be
+	helpful to understand the logic.
+
+	Shmem's page (just leaf page, not direct/indirect block) can be on
+		- radix-tree of shmem's inode.
+		- SwapCache.
+		- Both on radix-tree and SwapCache. This happens at swap-in
+		  and swap-out,
+
+	It's charged when...
+	- A new page is added to shmem's radix-tree.
+	- A swp page is read. (move a charge from swap_cgroup to page_cgroup)
+	It's uncharged when
+	- A page is removed from radix-tree and not SwapCache.
+	- When SwapCache is removed, a charge is moved to swap_cgroup.
+	- When swp_entry's refcnt goes down to 0, a charge in swap_cgroup
+	  disappears.
+
+7. Page Migration
+   	One of the most complicated functions is page-migration-handler.
+	Memcg has 2 routines. Assume that we are migrating a page's contents
+	from OLDPAGE to NEWPAGE.
+
+	Usual migration logic is..
+	(a) remove the page from LRU.
+	(b) allocate NEWPAGE (migration target)
+	(c) lock by lock_page().
+	(d) unmap all mappings.
+	(e-1) If necessary, replace entry in radix-tree.
+	(e-2) move contents of a page.
+	(f) map all mappings again.
+	(g) pushback the page to LRU.
+	(-) OLDPAGE will be freed.
+
+	Before (g), memcg should complete all necessary charge/uncharge to
+	NEWPAGE/OLDPAGE.
+
+	The point is....
+	- If OLDPAGE is anonymous, all charges will be dropped at (d) because
+          try_to_unmap() drops all mapcount and the page will not be
+	  SwapCache.
+
+	- If OLDPAGE is SwapCache, charges will be kept at (g) because
+	  __delete_from_swap_cache() isn't called at (e-1)
+
+	- If OLDPAGE is page-cache, charges will be kept at (g) because
+	  remove_from_swap_cache() isn't called at (e-1)
+
+	memcg provides following hooks.
+
+	- mem_cgroup_prepare_migration(OLDPAGE)
+	  Called after (b) to account a charge (usage += PAGE_SIZE) against
+	  memcg which OLDPAGE belongs to.
+
+        - mem_cgroup_end_migration(OLDPAGE, NEWPAGE)
+	  Called after (f) before (g).
+	  If OLDPAGE is used, commit OLDPAGE again. If OLDPAGE is already
+	  charged, a charge by prepare_migration() is automatically canceled.
+	  If NEWPAGE is used, commit NEWPAGE and uncharge OLDPAGE.
+
+	  But zap_pte() (by exit or munmap) can be called while migration,
+	  we have to check if OLDPAGE/NEWPAGE is a valid page after commit().
+
+8. LRU
+        Each memcg has its own private LRU. Now, it's handling is under global
+	VM's control (means that it's handled under global zone->lru_lock).
+	Almost all routines around memcg's LRU is called by global LRU's
+	list management functions under zone->lru_lock().
+
+	A special function is mem_cgroup_isolate_pages(). This scans
+	memcg's private LRU and call __isolate_lru_page() to extract a page
+	from LRU.
+	(By __isolate_lru_page(), the page is removed from both of global and
+	 private LRU.)
+
+
+9. Typical Tests.
+
+ Tests for racy cases.
+
+ 9.1 Small limit to memcg.
+	When you do test to do racy case, it's good test to set memcg's limit
+	to be very small rather than GB. Many races found in the test under
+	xKB or xxMB limits.
+	(Memory behavior under GB and Memory behavior under MB shows very
+	 different situation.)
+
+ 9.2 Shmem
+	Historically, memcg's shmem handling was poor and we saw some amount
+	of troubles here. This is because shmem is page-cache but can be
+	SwapCache. Test with shmem/tmpfs is always good test.
+
+ 9.3 Migration
+	For NUMA, migration is an another special case. To do easy test, cpuset
+	is useful. Following is a sample script to do migration.
+
+	mount -t cgroup -o cpuset none /opt/cpuset
+
+	mkdir /opt/cpuset/01
+	echo 1 > /opt/cpuset/01/cpuset.cpus
+	echo 0 > /opt/cpuset/01/cpuset.mems
+	echo 1 > /opt/cpuset/01/cpuset.memory_migrate
+	mkdir /opt/cpuset/02
+	echo 1 > /opt/cpuset/02/cpuset.cpus
+	echo 1 > /opt/cpuset/02/cpuset.mems
+	echo 1 > /opt/cpuset/02/cpuset.memory_migrate
+
+	In above set, when you moves a task from 01 to 02, page migration to
+	node 0 to node 1 will occur. Following is a script to migrate all
+	under cpuset.
+	--
+	move_task()
+	{
+	for pid in $1
+        do
+                /bin/echo $pid >$2/tasks 2>/dev/null
+		echo -n $pid
+		echo -n " "
+        done
+	echo END
+	}
+
+	G1_TASK=`cat ${G1}/tasks`
+	G2_TASK=`cat ${G2}/tasks`
+	move_task "${G1_TASK}" ${G2} &
+	--
+ 9.4 Memory hotplug.
+	memory hotplug test is one of good test.
+	to offline memory, do following.
+	# echo offline > /sys/devices/system/memory/memoryXXX/state
+	(XXX is the place of memory)
+	This is an easy way to test page migration, too.
+
+ 9.5 mkdir/rmdir
+	When using hierarchy, mkdir/rmdir test should be done.
+	Use tests like the following.
+
+	echo 1 >/opt/cgroup/01/memory/use_hierarchy
+	mkdir /opt/cgroup/01/child_a
+	mkdir /opt/cgroup/01/child_b
+
+	set limit to 01.
+	add limit to 01/child_b
+	run jobs under child_a and child_b
+
+	create/delete following groups at random while jobs are running.
+	/opt/cgroup/01/child_a/child_aa
+	/opt/cgroup/01/child_b/child_bb
+	/opt/cgroup/01/child_c
+
+	running new jobs in new group is also good.
+
+ 9.6 Mount with other subsystems.
+	Mounting with other subsystems is a good test because there is a
+	race and lock dependency with other cgroup subsystems.
+
+	example)
+	# mount -t cgroup none /cgroup -t cpuset,memory,cpu,devices
+
+	and do task move, mkdir, rmdir etc...under this.
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
new file mode 100644
index 0000000..e150196
--- /dev/null
+++ b/Documentation/cgroups/memory.txt
@@ -0,0 +1,399 @@
+Memory Resource Controller
+
+NOTE: The Memory Resource Controller has been generically been referred
+to as the memory controller in this document. Do not confuse memory controller
+used here with the memory controller that is used in hardware.
+
+Salient features
+
+a. Enable control of both RSS (mapped) and Page Cache (unmapped) pages
+b. The infrastructure allows easy addition of other types of memory to control
+c. Provides *zero overhead* for non memory controller users
+d. Provides a double LRU: global memory pressure causes reclaim from the
+   global LRU; a cgroup on hitting a limit, reclaims from the per
+   cgroup LRU
+
+NOTE: Swap Cache (unmapped) is not accounted now.
+
+Benefits and Purpose of the memory controller
+
+The memory controller isolates the memory behaviour of a group of tasks
+from the rest of the system. The article on LWN [12] mentions some probable
+uses of the memory controller. The memory controller can be used to
+
+a. Isolate an application or a group of applications
+   Memory hungry applications can be isolated and limited to a smaller
+   amount of memory.
+b. Create a cgroup with limited amount of memory, this can be used
+   as a good alternative to booting with mem=XXXX.
+c. Virtualization solutions can control the amount of memory they want
+   to assign to a virtual machine instance.
+d. A CD/DVD burner could control the amount of memory used by the
+   rest of the system to ensure that burning does not fail due to lack
+   of available memory.
+e. There are several other use cases, find one or use the controller just
+   for fun (to learn and hack on the VM subsystem).
+
+1. History
+
+The memory controller has a long history. A request for comments for the memory
+controller was posted by Balbir Singh [1]. At the time the RFC was posted
+there were several implementations for memory control. The goal of the
+RFC was to build consensus and agreement for the minimal features required
+for memory control. The first RSS controller was posted by Balbir Singh[2]
+in Feb 2007. Pavel Emelianov [3][4][5] has since posted three versions of the
+RSS controller. At OLS, at the resource management BoF, everyone suggested
+that we handle both page cache and RSS together. Another request was raised
+to allow user space handling of OOM. The current memory controller is
+at version 6; it combines both mapped (RSS) and unmapped Page
+Cache Control [11].
+
+2. Memory Control
+
+Memory is a unique resource in the sense that it is present in a limited
+amount. If a task requires a lot of CPU processing, the task can spread
+its processing over a period of hours, days, months or years, but with
+memory, the same physical memory needs to be reused to accomplish the task.
+
+The memory controller implementation has been divided into phases. These
+are:
+
+1. Memory controller
+2. mlock(2) controller
+3. Kernel user memory accounting and slab control
+4. user mappings length controller
+
+The memory controller is the first controller developed.
+
+2.1. Design
+
+The core of the design is a counter called the res_counter. The res_counter
+tracks the current memory usage and limit of the group of processes associated
+with the controller. Each cgroup has a memory controller specific data
+structure (mem_cgroup) associated with it.
+
+2.2. Accounting
+
+		+--------------------+
+		|  mem_cgroup     |
+		|  (res_counter)     |
+		+--------------------+
+		 /            ^      \
+		/             |       \
+           +---------------+  |        +---------------+
+           | mm_struct     |  |....    | mm_struct     |
+           |               |  |        |               |
+           +---------------+  |        +---------------+
+                              |
+                              + --------------+
+                                              |
+           +---------------+           +------+--------+
+           | page          +---------->  page_cgroup|
+           |               |           |               |
+           +---------------+           +---------------+
+
+             (Figure 1: Hierarchy of Accounting)
+
+
+Figure 1 shows the important aspects of the controller
+
+1. Accounting happens per cgroup
+2. Each mm_struct knows about which cgroup it belongs to
+3. Each page has a pointer to the page_cgroup, which in turn knows the
+   cgroup it belongs to
+
+The accounting is done as follows: mem_cgroup_charge() is invoked to setup
+the necessary data structures and check if the cgroup that is being charged
+is over its limit. If it is then reclaim is invoked on the cgroup.
+More details can be found in the reclaim section of this document.
+If everything goes well, a page meta-data-structure called page_cgroup is
+allocated and associated with the page.  This routine also adds the page to
+the per cgroup LRU.
+
+2.2.1 Accounting details
+
+All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
+(some pages which never be reclaimable and will not be on global LRU
+ are not accounted. we just accounts pages under usual vm management.)
+
+RSS pages are accounted at page_fault unless they've already been accounted
+for earlier. A file page will be accounted for as Page Cache when it's
+inserted into inode (radix-tree). While it's mapped into the page tables of
+processes, duplicate accounting is carefully avoided.
+
+A RSS page is unaccounted when it's fully unmapped. A PageCache page is
+unaccounted when it's removed from radix-tree.
+
+At page migration, accounting information is kept.
+
+Note: we just account pages-on-lru because our purpose is to control amount
+of used pages. not-on-lru pages are tend to be out-of-control from vm view.
+
+2.3 Shared Page Accounting
+
+Shared pages are accounted on the basis of the first touch approach. The
+cgroup that first touches a page is accounted for the page. The principle
+behind this approach is that a cgroup that aggressively uses a shared
+page will eventually get charged for it (once it is uncharged from
+the cgroup that brought it in -- this will happen on memory pressure).
+
+Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used..
+When you do swapoff and make swapped-out pages of shmem(tmpfs) to
+be backed into memory in force, charges for pages are accounted against the
+caller of swapoff rather than the users of shmem.
+
+
+2.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
+Swap Extension allows you to record charge for swap. A swapped-in page is
+charged back to original page allocator if possible.
+
+When swap is accounted, following files are added.
+ - memory.memsw.usage_in_bytes.
+ - memory.memsw.limit_in_bytes.
+
+usage of mem+swap is limited by memsw.limit_in_bytes.
+
+Note: why 'mem+swap' rather than swap.
+The global LRU(kswapd) can swap out arbitrary pages. Swap-out means
+to move account from memory to swap...there is no change in usage of
+mem+swap.
+
+In other words, when we want to limit the usage of swap without affecting
+global LRU, mem+swap limit is better than just limiting swap from OS point
+of view.
+
+2.5 Reclaim
+
+Each cgroup maintains a per cgroup LRU that consists of an active
+and inactive list. When a cgroup goes over its limit, we first try
+to reclaim memory from the cgroup so as to make space for the new
+pages that the cgroup has touched. If the reclaim is unsuccessful,
+an OOM routine is invoked to select and kill the bulkiest task in the
+cgroup.
+
+The reclaim algorithm has not been modified for cgroups, except that
+pages that are selected for reclaiming come from the per cgroup LRU
+list.
+
+2. Locking
+
+The memory controller uses the following hierarchy
+
+1. zone->lru_lock is used for selecting pages to be isolated
+2. mem->per_zone->lru_lock protects the per cgroup LRU (per zone)
+3. lock_page_cgroup() is used to protect page->page_cgroup
+
+3. User Interface
+
+0. Configuration
+
+a. Enable CONFIG_CGROUPS
+b. Enable CONFIG_RESOURCE_COUNTERS
+c. Enable CONFIG_CGROUP_MEM_RES_CTLR
+
+1. Prepare the cgroups
+# mkdir -p /cgroups
+# mount -t cgroup none /cgroups -o memory
+
+2. Make the new group and move bash into it
+# mkdir /cgroups/0
+# echo $$ >  /cgroups/0/tasks
+
+Since now we're in the 0 cgroup,
+We can alter the memory limit:
+# echo 4M > /cgroups/0/memory.limit_in_bytes
+
+NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
+mega or gigabytes.
+
+# cat /cgroups/0/memory.limit_in_bytes
+4194304
+
+NOTE: The interface has now changed to display the usage in bytes
+instead of pages
+
+We can check the usage:
+# cat /cgroups/0/memory.usage_in_bytes
+1216512
+
+A successful write to this file does not guarantee a successful set of
+this limit to the value written into the file.  This can be due to a
+number of factors, such as rounding up to page boundaries or the total
+availability of memory on the system.  The user is required to re-read
+this file after a write to guarantee the value committed by the kernel.
+
+# echo 1 > memory.limit_in_bytes
+# cat memory.limit_in_bytes
+4096
+
+The memory.failcnt field gives the number of times that the cgroup limit was
+exceeded.
+
+The memory.stat file gives accounting information. Now, the number of
+caches, RSS and Active pages/Inactive pages are shown.
+
+4. Testing
+
+Balbir posted lmbench, AIM9, LTP and vmmstress results [10] and [11].
+Apart from that v6 has been tested with several applications and regular
+daily use. The controller has also been tested on the PPC64, x86_64 and
+UML platforms.
+
+4.1 Troubleshooting
+
+Sometimes a user might find that the application under a cgroup is
+terminated. There are several causes for this:
+
+1. The cgroup limit is too low (just too low to do anything useful)
+2. The user is using anonymous memory and swap is turned off or too low
+
+A sync followed by echo 1 > /proc/sys/vm/drop_caches will help get rid of
+some of the pages cached in the cgroup (page cache pages).
+
+4.2 Task migration
+
+When a task migrates from one cgroup to another, it's charge is not
+carried forward. The pages allocated from the original cgroup still
+remain charged to it, the charge is dropped when the page is freed or
+reclaimed.
+
+4.3 Removing a cgroup
+
+A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a
+cgroup might have some charge associated with it, even though all
+tasks have migrated away from it.
+Such charges are freed(at default) or moved to its parent. When moved,
+both of RSS and CACHES are moved to parent.
+If both of them are busy, rmdir() returns -EBUSY. See 5.1 Also.
+
+Charges recorded in swap information is not updated at removal of cgroup.
+Recorded information is discarded and a cgroup which uses swap (swapcache)
+will be charged as a new owner of it.
+
+
+5. Misc. interfaces.
+
+5.1 force_empty
+  memory.force_empty interface is provided to make cgroup's memory usage empty.
+  You can use this interface only when the cgroup has no tasks.
+  When writing anything to this
+
+  # echo 0 > memory.force_empty
+
+  Almost all pages tracked by this memcg will be unmapped and freed. Some of
+  pages cannot be freed because it's locked or in-use. Such pages are moved
+  to parent and this cgroup will be empty. But this may return -EBUSY in
+  some too busy case.
+
+  Typical use case of this interface is that calling this before rmdir().
+  Because rmdir() moves all pages to parent, some out-of-use page caches can be
+  moved to the parent. If you want to avoid that, force_empty will be useful.
+
+5.2 stat file
+  memory.stat file includes following statistics (now)
+	cache			- # of pages from page-cache and shmem.
+	rss			- # of pages from anonymous memory.
+	pgpgin			- # of event of charging
+	pgpgout			- # of event of uncharging
+	active_anon		- # of pages on active lru of anon, shmem.
+	inactive_anon 		- # of pages on active lru of anon, shmem
+	active_file		- # of pages on active lru of file-cache
+	inactive_file		- # of pages on inactive lru of file cache
+	unevictable		- # of pages cannot be reclaimed.(mlocked etc)
+
+	Below is depend on CONFIG_DEBUG_VM.
+	inactive_ratio		- VM inernal parameter. (see mm/page_alloc.c)
+	recent_rotated_anon	- VM internal parameter. (see mm/vmscan.c)
+	recent_rotated_file	- VM internal parameter. (see mm/vmscan.c)
+	recent_scanned_anon 	- VM internal parameter. (see mm/vmscan.c)
+	recent_scanned_file 	- VM internal parameter. (see mm/vmscan.c)
+
+  Memo:
+	recent_rotated means recent frequency of lru rotation.
+	recent_scanned means recent # of scans to lru.
+	showing for better debug please see the code for meanings.
+
+
+5.3 swappiness
+  Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
+
+  Following cgroup's swapiness can't be changed.
+  - root cgroup (uses /proc/sys/vm/swappiness).
+  - a cgroup which uses hierarchy and it has child cgroup.
+  - a cgroup which uses hierarchy and not the root of hierarchy.
+
+
+6. Hierarchy support
+
+The memory controller supports a deep hierarchy and hierarchical accounting.
+The hierarchy is created by creating the appropriate cgroups in the
+cgroup filesystem. Consider for example, the following cgroup filesystem
+hierarchy
+
+		root
+	     /  |   \
+           /	|    \
+	  a	b	c
+			| \
+			|  \
+			d   e
+
+In the diagram above, with hierarchical accounting enabled, all memory
+usage of e, is accounted to its ancestors up until the root (i.e, c and root),
+that has memory.use_hierarchy enabled.  If one of the ancestors goes over its
+limit, the reclaim algorithm reclaims from the tasks in the ancestor and the
+children of the ancestor.
+
+6.1 Enabling hierarchical accounting and reclaim
+
+The memory controller by default disables the hierarchy feature. Support
+can be enabled by writing 1 to memory.use_hierarchy file of the root cgroup
+
+# echo 1 > memory.use_hierarchy
+
+The feature can be disabled by
+
+# echo 0 > memory.use_hierarchy
+
+NOTE1: Enabling/disabling will fail if the cgroup already has other
+cgroups created below it.
+
+NOTE2: This feature can be enabled/disabled per subtree.
+
+7. TODO
+
+1. Add support for accounting huge pages (as a separate controller)
+2. Make per-cgroup scanner reclaim not-shared pages first
+3. Teach controller to account for shared-pages
+4. Start reclamation in the background when the limit is
+   not yet hit but the usage is getting closer
+
+Summary
+
+Overall, the memory controller has been a stable controller and has been
+commented and discussed quite extensively in the community.
+
+References
+
+1. Singh, Balbir. RFC: Memory Controller, http://lwn.net/Articles/206697/
+2. Singh, Balbir. Memory Controller (RSS Control),
+   http://lwn.net/Articles/222762/
+3. Emelianov, Pavel. Resource controllers based on process cgroups
+   http://lkml.org/lkml/2007/3/6/198
+4. Emelianov, Pavel. RSS controller based on process cgroups (v2)
+   http://lkml.org/lkml/2007/4/9/78
+5. Emelianov, Pavel. RSS controller based on process cgroups (v3)
+   http://lkml.org/lkml/2007/5/30/244
+6. Menage, Paul. Control Groups v10, http://lwn.net/Articles/236032/
+7. Vaidyanathan, Srinivasan, Control Groups: Pagecache accounting and control
+   subsystem (v3), http://lwn.net/Articles/235534/
+8. Singh, Balbir. RSS controller v2 test results (lmbench),
+   http://lkml.org/lkml/2007/5/17/232
+9. Singh, Balbir. RSS controller v2 AIM9 results
+   http://lkml.org/lkml/2007/5/18/1
+10. Singh, Balbir. Memory controller v6 test results,
+    http://lkml.org/lkml/2007/8/19/36
+11. Singh, Balbir. Memory controller introduction (v6),
+    http://lkml.org/lkml/2007/8/17/69
+12. Corbet, Jonathan, Controlling memory use in cgroups,
+    http://lwn.net/Articles/243795/
diff --git a/Documentation/controllers/resource_counter.txt b/Documentation/cgroups/resource_counter.txt
similarity index 100%
rename from Documentation/controllers/resource_counter.txt
rename to Documentation/cgroups/resource_counter.txt
diff --git a/Documentation/controllers/memory.txt b/Documentation/controllers/memory.txt
deleted file mode 100644
index 1c07547..0000000
--- a/Documentation/controllers/memory.txt
+++ /dev/null
@@ -1,284 +0,0 @@
-Memory Resource Controller
-
-NOTE: The Memory Resource Controller has been generically been referred
-to as the memory controller in this document. Do not confuse memory controller
-used here with the memory controller that is used in hardware.
-
-Salient features
-
-a. Enable control of both RSS (mapped) and Page Cache (unmapped) pages
-b. The infrastructure allows easy addition of other types of memory to control
-c. Provides *zero overhead* for non memory controller users
-d. Provides a double LRU: global memory pressure causes reclaim from the
-   global LRU; a cgroup on hitting a limit, reclaims from the per
-   cgroup LRU
-
-NOTE: Swap Cache (unmapped) is not accounted now.
-
-Benefits and Purpose of the memory controller
-
-The memory controller isolates the memory behaviour of a group of tasks
-from the rest of the system. The article on LWN [12] mentions some probable
-uses of the memory controller. The memory controller can be used to
-
-a. Isolate an application or a group of applications
-   Memory hungry applications can be isolated and limited to a smaller
-   amount of memory.
-b. Create a cgroup with limited amount of memory, this can be used
-   as a good alternative to booting with mem=XXXX.
-c. Virtualization solutions can control the amount of memory they want
-   to assign to a virtual machine instance.
-d. A CD/DVD burner could control the amount of memory used by the
-   rest of the system to ensure that burning does not fail due to lack
-   of available memory.
-e. There are several other use cases, find one or use the controller just
-   for fun (to learn and hack on the VM subsystem).
-
-1. History
-
-The memory controller has a long history. A request for comments for the memory
-controller was posted by Balbir Singh [1]. At the time the RFC was posted
-there were several implementations for memory control. The goal of the
-RFC was to build consensus and agreement for the minimal features required
-for memory control. The first RSS controller was posted by Balbir Singh[2]
-in Feb 2007. Pavel Emelianov [3][4][5] has since posted three versions of the
-RSS controller. At OLS, at the resource management BoF, everyone suggested
-that we handle both page cache and RSS together. Another request was raised
-to allow user space handling of OOM. The current memory controller is
-at version 6; it combines both mapped (RSS) and unmapped Page
-Cache Control [11].
-
-2. Memory Control
-
-Memory is a unique resource in the sense that it is present in a limited
-amount. If a task requires a lot of CPU processing, the task can spread
-its processing over a period of hours, days, months or years, but with
-memory, the same physical memory needs to be reused to accomplish the task.
-
-The memory controller implementation has been divided into phases. These
-are:
-
-1. Memory controller
-2. mlock(2) controller
-3. Kernel user memory accounting and slab control
-4. user mappings length controller
-
-The memory controller is the first controller developed.
-
-2.1. Design
-
-The core of the design is a counter called the res_counter. The res_counter
-tracks the current memory usage and limit of the group of processes associated
-with the controller. Each cgroup has a memory controller specific data
-structure (mem_cgroup) associated with it.
-
-2.2. Accounting
-
-		+--------------------+
-		|  mem_cgroup     |
-		|  (res_counter)     |
-		+--------------------+
-		 /            ^      \
-		/             |       \
-           +---------------+  |        +---------------+
-           | mm_struct     |  |....    | mm_struct     |
-           |               |  |        |               |
-           +---------------+  |        +---------------+
-                              |
-                              + --------------+
-                                              |
-           +---------------+           +------+--------+
-           | page          +---------->  page_cgroup|
-           |               |           |               |
-           +---------------+           +---------------+
-
-             (Figure 1: Hierarchy of Accounting)
-
-
-Figure 1 shows the important aspects of the controller
-
-1. Accounting happens per cgroup
-2. Each mm_struct knows about which cgroup it belongs to
-3. Each page has a pointer to the page_cgroup, which in turn knows the
-   cgroup it belongs to
-
-The accounting is done as follows: mem_cgroup_charge() is invoked to setup
-the necessary data structures and check if the cgroup that is being charged
-is over its limit. If it is then reclaim is invoked on the cgroup.
-More details can be found in the reclaim section of this document.
-If everything goes well, a page meta-data-structure called page_cgroup is
-allocated and associated with the page.  This routine also adds the page to
-the per cgroup LRU.
-
-2.2.1 Accounting details
-
-All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
-(some pages which never be reclaimable and will not be on global LRU
- are not accounted. we just accounts pages under usual vm management.)
-
-RSS pages are accounted at page_fault unless they've already been accounted
-for earlier. A file page will be accounted for as Page Cache when it's
-inserted into inode (radix-tree). While it's mapped into the page tables of
-processes, duplicate accounting is carefully avoided.
-
-A RSS page is unaccounted when it's fully unmapped. A PageCache page is
-unaccounted when it's removed from radix-tree.
-
-At page migration, accounting information is kept.
-
-Note: we just account pages-on-lru because our purpose is to control amount
-of used pages. not-on-lru pages are tend to be out-of-control from vm view.
-
-2.3 Shared Page Accounting
-
-Shared pages are accounted on the basis of the first touch approach. The
-cgroup that first touches a page is accounted for the page. The principle
-behind this approach is that a cgroup that aggressively uses a shared
-page will eventually get charged for it (once it is uncharged from
-the cgroup that brought it in -- this will happen on memory pressure).
-
-2.4 Reclaim
-
-Each cgroup maintains a per cgroup LRU that consists of an active
-and inactive list. When a cgroup goes over its limit, we first try
-to reclaim memory from the cgroup so as to make space for the new
-pages that the cgroup has touched. If the reclaim is unsuccessful,
-an OOM routine is invoked to select and kill the bulkiest task in the
-cgroup.
-
-The reclaim algorithm has not been modified for cgroups, except that
-pages that are selected for reclaiming come from the per cgroup LRU
-list.
-
-2. Locking
-
-The memory controller uses the following hierarchy
-
-1. zone->lru_lock is used for selecting pages to be isolated
-2. mem->per_zone->lru_lock protects the per cgroup LRU (per zone)
-3. lock_page_cgroup() is used to protect page->page_cgroup
-
-3. User Interface
-
-0. Configuration
-
-a. Enable CONFIG_CGROUPS
-b. Enable CONFIG_RESOURCE_COUNTERS
-c. Enable CONFIG_CGROUP_MEM_RES_CTLR
-
-1. Prepare the cgroups
-# mkdir -p /cgroups
-# mount -t cgroup none /cgroups -o memory
-
-2. Make the new group and move bash into it
-# mkdir /cgroups/0
-# echo $$ >  /cgroups/0/tasks
-
-Since now we're in the 0 cgroup,
-We can alter the memory limit:
-# echo 4M > /cgroups/0/memory.limit_in_bytes
-
-NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
-mega or gigabytes.
-
-# cat /cgroups/0/memory.limit_in_bytes
-4194304
-
-NOTE: The interface has now changed to display the usage in bytes
-instead of pages
-
-We can check the usage:
-# cat /cgroups/0/memory.usage_in_bytes
-1216512
-
-A successful write to this file does not guarantee a successful set of
-this limit to the value written into the file.  This can be due to a
-number of factors, such as rounding up to page boundaries or the total
-availability of memory on the system.  The user is required to re-read
-this file after a write to guarantee the value committed by the kernel.
-
-# echo 1 > memory.limit_in_bytes
-# cat memory.limit_in_bytes
-4096
-
-The memory.failcnt field gives the number of times that the cgroup limit was
-exceeded.
-
-The memory.stat file gives accounting information. Now, the number of
-caches, RSS and Active pages/Inactive pages are shown.
-
-The memory.force_empty gives an interface to drop *all* charges by force.
-
-# echo 1 > memory.force_empty
-
-will drop all charges in cgroup. Currently, this is maintained for test.
-
-4. Testing
-
-Balbir posted lmbench, AIM9, LTP and vmmstress results [10] and [11].
-Apart from that v6 has been tested with several applications and regular
-daily use. The controller has also been tested on the PPC64, x86_64 and
-UML platforms.
-
-4.1 Troubleshooting
-
-Sometimes a user might find that the application under a cgroup is
-terminated. There are several causes for this:
-
-1. The cgroup limit is too low (just too low to do anything useful)
-2. The user is using anonymous memory and swap is turned off or too low
-
-A sync followed by echo 1 > /proc/sys/vm/drop_caches will help get rid of
-some of the pages cached in the cgroup (page cache pages).
-
-4.2 Task migration
-
-When a task migrates from one cgroup to another, it's charge is not
-carried forward. The pages allocated from the original cgroup still
-remain charged to it, the charge is dropped when the page is freed or
-reclaimed.
-
-4.3 Removing a cgroup
-
-A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a
-cgroup might have some charge associated with it, even though all
-tasks have migrated away from it. Such charges are automatically dropped at
-rmdir() if there are no tasks.
-
-5. TODO
-
-1. Add support for accounting huge pages (as a separate controller)
-2. Make per-cgroup scanner reclaim not-shared pages first
-3. Teach controller to account for shared-pages
-4. Start reclamation in the background when the limit is
-   not yet hit but the usage is getting closer
-
-Summary
-
-Overall, the memory controller has been a stable controller and has been
-commented and discussed quite extensively in the community.
-
-References
-
-1. Singh, Balbir. RFC: Memory Controller, http://lwn.net/Articles/206697/
-2. Singh, Balbir. Memory Controller (RSS Control),
-   http://lwn.net/Articles/222762/
-3. Emelianov, Pavel. Resource controllers based on process cgroups
-   http://lkml.org/lkml/2007/3/6/198
-4. Emelianov, Pavel. RSS controller based on process cgroups (v2)
-   http://lkml.org/lkml/2007/4/9/78
-5. Emelianov, Pavel. RSS controller based on process cgroups (v3)
-   http://lkml.org/lkml/2007/5/30/244
-6. Menage, Paul. Control Groups v10, http://lwn.net/Articles/236032/
-7. Vaidyanathan, Srinivasan, Control Groups: Pagecache accounting and control
-   subsystem (v3), http://lwn.net/Articles/235534/
-8. Singh, Balbir. RSS controller v2 test results (lmbench),
-   http://lkml.org/lkml/2007/5/17/232
-9. Singh, Balbir. RSS controller v2 AIM9 results
-   http://lkml.org/lkml/2007/5/18/1
-10. Singh, Balbir. Memory controller v6 test results,
-    http://lkml.org/lkml/2007/8/19/36
-11. Singh, Balbir. Memory controller introduction (v6),
-    http://lkml.org/lkml/2007/8/17/69
-12. Corbet, Jonathan, Controlling memory use in cgroups,
-    http://lwn.net/Articles/243795/
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt
index c1e9545..9f59fcb 100644
--- a/Documentation/crypto/async-tx-api.txt
+++ b/Documentation/crypto/async-tx-api.txt
@@ -13,9 +13,9 @@
 3.6 Constraints
 3.7 Example
 
-4 DRIVER DEVELOPER NOTES
+4 DMAENGINE DRIVER DEVELOPER NOTES
 4.1 Conformance points
-4.2 "My application needs finer control of hardware channels"
+4.2 "My application needs exclusive control of hardware channels"
 
 5 SOURCE
 
@@ -150,6 +150,7 @@
 implementation examples.
 
 4 DRIVER DEVELOPMENT NOTES
+
 4.1 Conformance points:
 There are a few conformance points required in dmaengine drivers to
 accommodate assumptions made by applications using the async_tx API:
@@ -158,58 +159,49 @@
 3/ Use async_tx_run_dependencies() in the descriptor clean up path to
    handle submission of dependent operations
 
-4.2 "My application needs finer control of hardware channels"
-This requirement seems to arise from cases where a DMA engine driver is
-trying to support device-to-memory DMA.  The dmaengine and async_tx
-implementations were designed for offloading memory-to-memory
-operations; however, there are some capabilities of the dmaengine layer
-that can be used for platform-specific channel management.
-Platform-specific constraints can be handled by registering the
-application as a 'dma_client' and implementing a 'dma_event_callback' to
-apply a filter to the available channels in the system.  Before showing
-how to implement a custom dma_event callback some background of
-dmaengine's client support is required.
+4.2 "My application needs exclusive control of hardware channels"
+Primarily this requirement arises from cases where a DMA engine driver
+is being used to support device-to-memory operations.  A channel that is
+performing these operations cannot, for many platform specific reasons,
+be shared.  For these cases the dma_request_channel() interface is
+provided.
 
-The following routines in dmaengine support multiple clients requesting
-use of a channel:
-- dma_async_client_register(struct dma_client *client)
-- dma_async_client_chan_request(struct dma_client *client)
+The interface is:
+struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
+				     dma_filter_fn filter_fn,
+				     void *filter_param);
 
-dma_async_client_register takes a pointer to an initialized dma_client
-structure.  It expects that the 'event_callback' and 'cap_mask' fields
-are already initialized.
+Where dma_filter_fn is defined as:
+typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
 
-dma_async_client_chan_request triggers dmaengine to notify the client of
-all channels that satisfy the capability mask.  It is up to the client's
-event_callback routine to track how many channels the client needs and
-how many it is currently using.  The dma_event_callback routine returns a
-dma_state_client code to let dmaengine know the status of the
-allocation.
+When the optional 'filter_fn' parameter is set to NULL
+dma_request_channel simply returns the first channel that satisfies the
+capability mask.  Otherwise, when the mask parameter is insufficient for
+specifying the necessary channel, the filter_fn routine can be used to
+disposition the available channels in the system. The filter_fn routine
+is called once for each free channel in the system.  Upon seeing a
+suitable channel filter_fn returns DMA_ACK which flags that channel to
+be the return value from dma_request_channel.  A channel allocated via
+this interface is exclusive to the caller, until dma_release_channel()
+is called.
 
-Below is the example of how to extend this functionality for
-platform-specific filtering of the available channels beyond the
-standard capability mask:
+The DMA_PRIVATE capability flag is used to tag dma devices that should
+not be used by the general-purpose allocator.  It can be set at
+initialization time if it is known that a channel will always be
+private.  Alternatively, it is set when dma_request_channel() finds an
+unused "public" channel.
 
-static enum dma_state_client
-my_dma_client_callback(struct dma_client *client,
-			struct dma_chan *chan, enum dma_state state)
-{
-	struct dma_device *dma_dev;
-	struct my_platform_specific_dma *plat_dma_dev;
-	
-	dma_dev = chan->device;
-	plat_dma_dev = container_of(dma_dev,
-				    struct my_platform_specific_dma,
-				    dma_dev);
-
-	if (!plat_dma_dev->platform_specific_capability)
-		return DMA_DUP;
-
-	. . .
-}
+A couple caveats to note when implementing a driver and consumer:
+1/ Once a channel has been privately allocated it will no longer be
+   considered by the general-purpose allocator even after a call to
+   dma_release_channel().
+2/ Since capabilities are specified at the device level a dma_device
+   with multiple channels will either have all channels public, or all
+   channels private.
 
 5 SOURCE
-include/linux/dmaengine.h: core header file for DMA drivers and clients
+
+include/linux/dmaengine.h: core header file for DMA drivers and api users
 drivers/dma/dmaengine.c: offload engine channel management routines
 drivers/dma/: location for offload engine drivers
 include/linux/async_tx.h: core header file for the async_tx api
diff --git a/Documentation/dell_rbu.txt b/Documentation/dell_rbu.txt
index 2c0d631..c11b931 100644
--- a/Documentation/dell_rbu.txt
+++ b/Documentation/dell_rbu.txt
@@ -81,8 +81,8 @@
 Also echoing either mono ,packet or init in to image_type will free up the
 memory allocated by the driver.
 
-If an user by accident executes steps 1 and 3 above without executing step 2;
-it will make the /sys/class/firmware/dell_rbu/ entries to disappear.
+If a user by accident executes steps 1 and 3 above without executing step 2;
+it will make the /sys/class/firmware/dell_rbu/ entries disappear.
 The entries can be recreated by doing the following
 echo init > /sys/devices/platform/dell_rbu/image_type
 NOTE: echoing init in image_type does not change it original value.
diff --git a/Documentation/development-process/4.Coding b/Documentation/development-process/4.Coding
index 014aca8..a5a3450 100644
--- a/Documentation/development-process/4.Coding
+++ b/Documentation/development-process/4.Coding
@@ -375,10 +375,10 @@
 justification is solid.
 
 When making an incompatible API change, one should, whenever possible,
-ensure that code which has not been updated is caught by the compiler.  
+ensure that code which has not been updated is caught by the compiler.
 This will help you to be sure that you have found all in-tree uses of that
 interface.  It will also alert developers of out-of-tree code that there is
 a change that they need to respond to.  Supporting out-of-tree code is not
 something that kernel developers need to be worried about, but we also do
-not have to make life harder for out-of-tree developers than it it needs to
-be. 
+not have to make life harder for out-of-tree developers than it needs to
+be.
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt
new file mode 100644
index 0000000..0c1c2f6
--- /dev/null
+++ b/Documentation/dmaengine.txt
@@ -0,0 +1 @@
+See Documentation/crypto/async-tx-api.txt
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 2193be5..5ddbe35 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -318,6 +318,14 @@
 
 ---------------------------
 
+What:	fscher and fscpos drivers
+When:	June 2009
+Why:	Deprecated by the new fschmd driver.
+Who:	Hans de Goede <hdegoede@redhat.com>
+	Jean Delvare <khali@linux-fr.org>
+
+---------------------------
+
 What:	SELinux "compat_net" functionality
 When:	2.6.30 at the earliest
 Why:	In 2.6.18 the Secmark concept was introduced to replace the "compat_net"
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index cfbfa15..ec6a939 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -97,8 +97,8 @@
 	void (*put_super) (struct super_block *);
 	void (*write_super) (struct super_block *);
 	int (*sync_fs)(struct super_block *sb, int wait);
-	void (*write_super_lockfs) (struct super_block *);
-	void (*unlockfs) (struct super_block *);
+	int (*freeze_fs) (struct super_block *);
+	int (*unfreeze_fs) (struct super_block *);
 	int (*statfs) (struct dentry *, struct kstatfs *);
 	int (*remount_fs) (struct super_block *, int *, char *);
 	void (*clear_inode) (struct inode *);
@@ -119,8 +119,8 @@
 put_super:		yes	yes	no
 write_super:		no	yes	read
 sync_fs:		no	no	read
-write_super_lockfs:	?
-unlockfs:		?
+freeze_fs:		?
+unfreeze_fs:		?
 statfs:			no	no	no
 remount_fs:		yes	yes	maybe		(see below)
 clear_inode:		no
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
new file mode 100644
index 0000000..64087c3
--- /dev/null
+++ b/Documentation/filesystems/btrfs.txt
@@ -0,0 +1,91 @@
+
+	BTRFS
+	=====
+
+Btrfs is a new copy on write filesystem for Linux aimed at
+implementing advanced features while focusing on fault tolerance,
+repair and easy administration. Initially developed by Oracle, Btrfs
+is licensed under the GPL and open for contribution from anyone.
+
+Linux has a wealth of filesystems to choose from, but we are facing a
+number of challenges with scaling to the large storage subsystems that
+are becoming common in today's data centers. Filesystems need to scale
+in their ability to address and manage large storage, and also in
+their ability to detect, repair and tolerate errors in the data stored
+on disk.  Btrfs is under heavy development, and is not suitable for
+any uses other than benchmarking and review. The Btrfs disk format is
+not yet finalized.
+
+The main Btrfs features include:
+
+    * Extent based file storage (2^64 max file size)
+    * Space efficient packing of small files
+    * Space efficient indexed directories
+    * Dynamic inode allocation
+    * Writable snapshots
+    * Subvolumes (separate internal filesystem roots)
+    * Object level mirroring and striping
+    * Checksums on data and metadata (multiple algorithms available)
+    * Compression
+    * Integrated multiple device support, with several raid algorithms
+    * Online filesystem check (not yet implemented)
+    * Very fast offline filesystem check
+    * Efficient incremental backup and FS mirroring (not yet implemented)
+    * Online filesystem defragmentation
+
+
+
+	MAILING LIST
+	============
+
+There is a Btrfs mailing list hosted on vger.kernel.org. You can
+find details on how to subscribe here:
+
+http://vger.kernel.org/vger-lists.html#linux-btrfs
+
+Mailing list archives are available from gmane:
+
+http://dir.gmane.org/gmane.comp.file-systems.btrfs
+
+
+
+	IRC
+	===
+
+Discussion of Btrfs also occurs on the #btrfs channel of the Freenode
+IRC network.
+
+
+
+	UTILITIES
+	=========
+
+Userspace tools for creating and manipulating Btrfs file systems are
+available from the git repository at the following location:
+
+ http://git.kernel.org/?p=linux/kernel/git/mason/btrfs-progs-unstable.git
+ git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-progs-unstable.git
+
+These include the following tools:
+
+mkfs.btrfs: create a filesystem
+
+btrfsctl: control program to create snapshots and subvolumes:
+
+	mount /dev/sda2 /mnt
+	btrfsctl -s new_subvol_name /mnt
+	btrfsctl -s snapshot_of_default /mnt/default
+	btrfsctl -s snapshot_of_new_subvol /mnt/new_subvol_name
+	btrfsctl -s snapshot_of_a_snapshot /mnt/snapshot_of_new_subvol
+	ls /mnt
+	default snapshot_of_a_snapshot snapshot_of_new_subvol
+	new_subvol_name snapshot_of_default
+
+	Snapshots and subvolumes cannot be deleted right now, but you can
+	rm -rf all the files and directories inside them.
+
+btrfsck: do a limited check of the FS extent trees.
+
+btrfs-debug-tree: print all of the FS metadata in text form.  Example:
+
+	btrfs-debug-tree /dev/sda2 >& big_output_file
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 174eaff..cec829b 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -58,13 +58,22 @@
 
 	# mount -t ext4 /dev/hda1 /wherever
 
-  - When comparing performance with other filesystems, remember that
-    ext3/4 by default offers higher data integrity guarantees than most.
-    So when comparing with a metadata-only journalling filesystem, such
-    as ext3, use `mount -o data=writeback'.  And you might as well use
-    `mount -o nobh' too along with it.  Making the journal larger than
-    the mke2fs default often helps performance with metadata-intensive
-    workloads.
+  - When comparing performance with other filesystems, it's always
+    important to try multiple workloads; very often a subtle change in a
+    workload parameter can completely change the ranking of which
+    filesystems do well compared to others.  When comparing versus ext3,
+    note that ext4 enables write barriers by default, while ext3 does
+    not enable write barriers by default.  So it is useful to use
+    explicitly specify whether barriers are enabled or not when via the
+    '-o barriers=[0|1]' mount option for both ext3 and ext4 filesystems
+    for a fair comparison.  When tuning ext3 for best benchmark numbers,
+    it is often worthwhile to try changing the data journaling mode; '-o
+    data=writeback,nobh' can be faster for some workloads.  (Note
+    however that running mounted with data=writeback can potentially
+    leave stale data exposed in recently written files in case of an
+    unclean shutdown, which could be a security exposure in some
+    situations.)  Configuring the filesystem with a large journal can
+    also be helpful for metadata-intensive workloads.
 
 2. Features
 ===========
@@ -74,7 +83,7 @@
 * ability to use filesystems > 16TB (e2fsprogs support not available yet)
 * extent format reduces metadata overhead (RAM, IO for access, transactions)
 * extent format more robust in face of on-disk corruption due to magics,
-* internal redunancy in tree
+* internal redundancy in tree
 * improved file allocation (multi-block alloc)
 * fix 32000 subdirectory limit
 * nsec timestamps for mtime, atime, ctime, create time
@@ -116,10 +125,11 @@
 When mounting an ext4 filesystem, the following option are accepted:
 (*) == default
 
-extents		(*)	ext4 will use extents to address file data.  The
-			file system will no longer be mountable by ext3.
-
-noextents		ext4 will not use extents for newly created files
+ro                   	Mount filesystem read only. Note that ext4 will
+                     	replay the journal (and thus write to the
+                     	partition) even when mounted "read only". The
+                     	mount options "ro,noload" can be used to prevent
+		     	writes to the filesystem.
 
 journal_checksum	Enable checksumming of the journal transactions.
 			This will allow the recovery code in e2fsck and the
@@ -134,17 +144,17 @@
 journal=update		Update the ext4 file system's journal to the current
 			format.
 
-journal=inum		When a journal already exists, this option is ignored.
-			Otherwise, it specifies the number of the inode which
-			will represent the ext4 file system's journal file.
-
 journal_dev=devnum	When the external journal device's major/minor numbers
 			have changed, this option allows the user to specify
 			the new journal location.  The journal device is
 			identified through its new major/minor numbers encoded
 			in devnum.
 
-noload			Don't load the journal on mounting.
+noload			Don't load the journal on mounting.  Note that
+                     	if the filesystem was not unmounted cleanly,
+                     	skipping the journal replay will lead to the
+                     	filesystem containing inconsistencies that can
+                     	lead to any number of problems.
 
 data=journal		All data are committed into the journal prior to being
 			written into the main file system.
@@ -219,9 +229,12 @@
 
 debug			Extra debugging information is sent to syslog.
 
-errors=remount-ro(*)	Remount the filesystem read-only on an error.
+errors=remount-ro	Remount the filesystem read-only on an error.
 errors=continue		Keep going on a filesystem error.
 errors=panic		Panic and halt the machine if an error occurs.
+                        (These mount options override the errors behavior
+                        specified in the superblock, which can be configured
+                        using tune2fs)
 
 data_err=ignore(*)	Just print an error message if an error occurs
 			in a file data buffer in ordered mode.
@@ -261,6 +274,42 @@
 nodelalloc		Disable delayed allocation. Blocks are allocation
 			when data is copied from user to page cache.
 
+max_batch_time=usec	Maximum amount of time ext4 should wait for
+			additional filesystem operations to be batch
+			together with a synchronous write operation.
+			Since a synchronous write operation is going to
+			force a commit and then a wait for the I/O
+			complete, it doesn't cost much, and can be a
+			huge throughput win, we wait for a small amount
+			of time to see if any other transactions can
+			piggyback on the synchronous write.   The
+			algorithm used is designed to automatically tune
+			for the speed of the disk, by measuring the
+			amount of time (on average) that it takes to
+			finish committing a transaction.  Call this time
+			the "commit time".  If the time that the
+			transactoin has been running is less than the
+			commit time, ext4 will try sleeping for the
+			commit time to see if other operations will join
+			the transaction.   The commit time is capped by
+			the max_batch_time, which defaults to 15000us
+			(15ms).   This optimization can be turned off
+			entirely by setting max_batch_time to 0.
+
+min_batch_time=usec	This parameter sets the commit time (as
+			described above) to be at least min_batch_time.
+			It defaults to zero microseconds.  Increasing
+			this parameter may improve the throughput of
+			multi-threaded, synchronous workloads on very
+			fast disks, at the cost of increasing latency.
+
+journal_ioprio=prio	The I/O priority (from 0 to 7, where 0 is the
+			highest priorty) which should be used for I/O
+			operations submitted by kjournald2 during a
+			commit operation.  This defaults to 3, which is
+			a slightly higher priority than the default I/O
+			priority.
+
 Data Mode
 =========
 There are 3 different data modes:
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 32e9463..bbebc3a 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -140,6 +140,7 @@
  statm		Process memory status information
  status		Process status in human readable form
  wchan		If CONFIG_KALLSYMS is set, a pre-decoded wchan
+ stack		Report full stack trace, enable via CONFIG_STACKTRACE
  smaps		Extension based on maps, the rss size for each mapped file
 ..............................................................................
 
@@ -1370,292 +1371,8 @@
 2.4 /proc/sys/vm - The virtual memory subsystem
 -----------------------------------------------
 
-The files  in  this directory can be used to tune the operation of the virtual
-memory (VM)  subsystem  of  the  Linux  kernel.
-
-vfs_cache_pressure
-------------------
-
-Controls the tendency of the kernel to reclaim the memory which is used for
-caching of directory and inode objects.
-
-At the default value of vfs_cache_pressure=100 the kernel will attempt to
-reclaim dentries and inodes at a "fair" rate with respect to pagecache and
-swapcache reclaim.  Decreasing vfs_cache_pressure causes the kernel to prefer
-to retain dentry and inode caches.  Increasing vfs_cache_pressure beyond 100
-causes the kernel to prefer to reclaim dentries and inodes.
-
-dirty_background_bytes
-----------------------
-
-Contains the amount of dirty memory at which the pdflush background writeback
-daemon will start writeback.
-
-If dirty_background_bytes is written, dirty_background_ratio becomes a function
-of its value (dirty_background_bytes / the amount of dirtyable system memory).
-
-dirty_background_ratio
-----------------------
-
-Contains, as a percentage of the dirtyable system memory (free pages + mapped
-pages + file cache, not including locked pages and HugePages), the number of
-pages at which the pdflush background writeback daemon will start writing out
-dirty data.
-
-If dirty_background_ratio is written, dirty_background_bytes becomes a function
-of its value (dirty_background_ratio * the amount of dirtyable system memory).
-
-dirty_bytes
------------
-
-Contains the amount of dirty memory at which a process generating disk writes
-will itself start writeback.
-
-If dirty_bytes is written, dirty_ratio becomes a function of its value
-(dirty_bytes / the amount of dirtyable system memory).
-
-dirty_ratio
------------
-
-Contains, as a percentage of the dirtyable system memory (free pages + mapped
-pages + file cache, not including locked pages and HugePages), the number of
-pages at which a process which is generating disk writes will itself start
-writing out dirty data.
-
-If dirty_ratio is written, dirty_bytes becomes a function of its value
-(dirty_ratio * the amount of dirtyable system memory).
-
-dirty_writeback_centisecs
--------------------------
-
-The pdflush writeback daemons will periodically wake up and write `old' data
-out to disk.  This tunable expresses the interval between those wakeups, in
-100'ths of a second.
-
-Setting this to zero disables periodic writeback altogether.
-
-dirty_expire_centisecs
-----------------------
-
-This tunable is used to define when dirty data is old enough to be eligible
-for writeout by the pdflush daemons.  It is expressed in 100'ths of a second. 
-Data which has been dirty in-memory for longer than this interval will be
-written out next time a pdflush daemon wakes up.
-
-highmem_is_dirtyable
---------------------
-
-Only present if CONFIG_HIGHMEM is set.
-
-This defaults to 0 (false), meaning that the ratios set above are calculated
-as a percentage of lowmem only.  This protects against excessive scanning
-in page reclaim, swapping and general VM distress.
-
-Setting this to 1 can be useful on 32 bit machines where you want to make
-random changes within an MMAPed file that is larger than your available
-lowmem without causing large quantities of random IO.  Is is safe if the
-behavior of all programs running on the machine is known and memory will
-not be otherwise stressed.
-
-legacy_va_layout
-----------------
-
-If non-zero, this sysctl disables the new 32-bit mmap mmap layout - the kernel
-will use the legacy (2.4) layout for all processes.
-
-lowmem_reserve_ratio
----------------------
-
-For some specialised workloads on highmem machines it is dangerous for
-the kernel to allow process memory to be allocated from the "lowmem"
-zone.  This is because that memory could then be pinned via the mlock()
-system call, or by unavailability of swapspace.
-
-And on large highmem machines this lack of reclaimable lowmem memory
-can be fatal.
-
-So the Linux page allocator has a mechanism which prevents allocations
-which _could_ use highmem from using too much lowmem.  This means that
-a certain amount of lowmem is defended from the possibility of being
-captured into pinned user memory.
-
-(The same argument applies to the old 16 megabyte ISA DMA region.  This
-mechanism will also defend that region from allocations which could use
-highmem or lowmem).
-
-The `lowmem_reserve_ratio' tunable determines how aggressive the kernel is
-in defending these lower zones.
-
-If you have a machine which uses highmem or ISA DMA and your
-applications are using mlock(), or if you are running with no swap then
-you probably should change the lowmem_reserve_ratio setting.
-
-The lowmem_reserve_ratio is an array. You can see them by reading this file.
--
-% cat /proc/sys/vm/lowmem_reserve_ratio
-256     256     32
--
-Note: # of this elements is one fewer than number of zones. Because the highest
-      zone's value is not necessary for following calculation.
-
-But, these values are not used directly. The kernel calculates # of protection
-pages for each zones from them. These are shown as array of protection pages
-in /proc/zoneinfo like followings. (This is an example of x86-64 box).
-Each zone has an array of protection pages like this.
-
--
-Node 0, zone      DMA
-  pages free     1355
-        min      3
-        low      3
-        high     4
-	:
-	:
-    numa_other   0
-        protection: (0, 2004, 2004, 2004)
-	^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-  pagesets
-    cpu: 0 pcp: 0
-        :
--
-These protections are added to score to judge whether this zone should be used
-for page allocation or should be reclaimed.
-
-In this example, if normal pages (index=2) are required to this DMA zone and
-pages_high is used for watermark, the kernel judges this zone should not be
-used because pages_free(1355) is smaller than watermark + protection[2]
-(4 + 2004 = 2008). If this protection value is 0, this zone would be used for
-normal page requirement. If requirement is DMA zone(index=0), protection[0]
-(=0) is used.
-
-zone[i]'s protection[j] is calculated by following expression.
-
-(i < j):
-  zone[i]->protection[j]
-  = (total sums of present_pages from zone[i+1] to zone[j] on the node)
-    / lowmem_reserve_ratio[i];
-(i = j):
-   (should not be protected. = 0;
-(i > j):
-   (not necessary, but looks 0)
-
-The default values of lowmem_reserve_ratio[i] are
-    256 (if zone[i] means DMA or DMA32 zone)
-    32  (others).
-As above expression, they are reciprocal number of ratio.
-256 means 1/256. # of protection pages becomes about "0.39%" of total present
-pages of higher zones on the node.
-
-If you would like to protect more pages, smaller values are effective.
-The minimum value is 1 (1/1 -> 100%).
-
-page-cluster
-------------
-
-page-cluster controls the number of pages which are written to swap in
-a single attempt.  The swap I/O size.
-
-It is a logarithmic value - setting it to zero means "1 page", setting
-it to 1 means "2 pages", setting it to 2 means "4 pages", etc.
-
-The default value is three (eight pages at a time).  There may be some
-small benefits in tuning this to a different value if your workload is
-swap-intensive.
-
-overcommit_memory
------------------
-
-Controls overcommit of system memory, possibly allowing processes
-to allocate (but not use) more memory than is actually available.
-
-
-0	-	Heuristic overcommit handling. Obvious overcommits of
-		address space are refused. Used for a typical system. It
-		ensures a seriously wild allocation fails while allowing
-		overcommit to reduce swap usage.  root is allowed to
-		allocate slightly more memory in this mode. This is the
-		default.
-
-1	-	Always overcommit. Appropriate for some scientific
-		applications.
-
-2	-	Don't overcommit. The total address space commit
-		for the system is not permitted to exceed swap plus a
-		configurable percentage (default is 50) of physical RAM.
-		Depending on the percentage you use, in most situations
-		this means a process will not be killed while attempting
-		to use already-allocated memory but will receive errors
-		on memory allocation as	appropriate.
-
-overcommit_ratio
-----------------
-
-Percentage of physical memory size to include in overcommit calculations
-(see above.)
-
-Memory allocation limit = swapspace + physmem * (overcommit_ratio / 100)
-
-	swapspace = total size of all swap areas
-	physmem = size of physical memory in system
-
-nr_hugepages and hugetlb_shm_group
-----------------------------------
-
-nr_hugepages configures number of hugetlb page reserved for the system.
-
-hugetlb_shm_group contains group id that is allowed to create SysV shared
-memory segment using hugetlb page.
-
-hugepages_treat_as_movable
---------------------------
-
-This parameter is only useful when kernelcore= is specified at boot time to
-create ZONE_MOVABLE for pages that may be reclaimed or migrated. Huge pages
-are not movable so are not normally allocated from ZONE_MOVABLE. A non-zero
-value written to hugepages_treat_as_movable allows huge pages to be allocated
-from ZONE_MOVABLE.
-
-Once enabled, the ZONE_MOVABLE is treated as an area of memory the huge
-pages pool can easily grow or shrink within. Assuming that applications are
-not running that mlock() a lot of memory, it is likely the huge pages pool
-can grow to the size of ZONE_MOVABLE by repeatedly entering the desired value
-into nr_hugepages and triggering page reclaim.
-
-laptop_mode
------------
-
-laptop_mode is a knob that controls "laptop mode". All the things that are
-controlled by this knob are discussed in Documentation/laptops/laptop-mode.txt.
-
-block_dump
-----------
-
-block_dump enables block I/O debugging when set to a nonzero value. More
-information on block I/O debugging is in Documentation/laptops/laptop-mode.txt.
-
-swap_token_timeout
-------------------
-
-This file contains valid hold time of swap out protection token. The Linux
-VM has token based thrashing control mechanism and uses the token to prevent
-unnecessary page faults in thrashing situation. The unit of the value is
-second. The value would be useful to tune thrashing behavior.
-
-drop_caches
------------
-
-Writing to this will cause the kernel to drop clean caches, dentries and
-inodes from memory, causing that memory to become free.
-
-To free pagecache:
-	echo 1 > /proc/sys/vm/drop_caches
-To free dentries and inodes:
-	echo 2 > /proc/sys/vm/drop_caches
-To free pagecache, dentries and inodes:
-	echo 3 > /proc/sys/vm/drop_caches
-
-As this is a non-destructive operation and dirty objects are not freeable, the
-user should run `sync' first.
+Please see: Documentation/sysctls/vm.txt for a description of these
+entries.
 
 
 2.5 /proc/sys/dev - Device specific parameters
diff --git a/Documentation/filesystems/squashfs.txt b/Documentation/filesystems/squashfs.txt
new file mode 100644
index 0000000..3e79e4a
--- /dev/null
+++ b/Documentation/filesystems/squashfs.txt
@@ -0,0 +1,225 @@
+SQUASHFS 4.0 FILESYSTEM
+=======================
+
+Squashfs is a compressed read-only filesystem for Linux.
+It uses zlib compression to compress files, inodes and directories.
+Inodes in the system are very small and all blocks are packed to minimise
+data overhead. Block sizes greater than 4K are supported up to a maximum
+of 1Mbytes (default block size 128K).
+
+Squashfs is intended for general read-only filesystem use, for archival
+use (i.e. in cases where a .tar.gz file may be used), and in constrained
+block device/memory systems (e.g. embedded systems) where low overhead is
+needed.
+
+Mailing list: squashfs-devel@lists.sourceforge.net
+Web site: www.squashfs.org
+
+1. FILESYSTEM FEATURES
+----------------------
+
+Squashfs filesystem features versus Cramfs:
+
+				Squashfs		Cramfs
+
+Max filesystem size:		2^64			16 MiB
+Max file size:			~ 2 TiB			16 MiB
+Max files:			unlimited		unlimited
+Max directories:		unlimited		unlimited
+Max entries per directory:	unlimited		unlimited
+Max block size:			1 MiB			4 KiB
+Metadata compression:		yes			no
+Directory indexes:		yes			no
+Sparse file support:		yes			no
+Tail-end packing (fragments):	yes			no
+Exportable (NFS etc.):		yes			no
+Hard link support:		yes			no
+"." and ".." in readdir:	yes			no
+Real inode numbers:		yes			no
+32-bit uids/gids:		yes			no
+File creation time:		yes			no
+Xattr and ACL support:		no			no
+
+Squashfs compresses data, inodes and directories.  In addition, inode and
+directory data are highly compacted, and packed on byte boundaries.  Each
+compressed inode is on average 8 bytes in length (the exact length varies on
+file type, i.e. regular file, directory, symbolic link, and block/char device
+inodes have different sizes).
+
+2. USING SQUASHFS
+-----------------
+
+As squashfs is a read-only filesystem, the mksquashfs program must be used to
+create populated squashfs filesystems.  This and other squashfs utilities
+can be obtained from http://www.squashfs.org.  Usage instructions can be
+obtained from this site also.
+
+
+3. SQUASHFS FILESYSTEM DESIGN
+-----------------------------
+
+A squashfs filesystem consists of seven parts, packed together on a byte
+alignment:
+
+	 ---------------
+	|  superblock 	|
+	|---------------|
+	|  datablocks   |
+	|  & fragments  |
+	|---------------|
+	|  inode table	|
+	|---------------|
+	|   directory	|
+	|     table     |
+	|---------------|
+	|   fragment	|
+	|    table      |
+	|---------------|
+	|    export     |
+	|    table      |
+	|---------------|
+	|    uid/gid	|
+	|  lookup table	|
+	 ---------------
+
+Compressed data blocks are written to the filesystem as files are read from
+the source directory, and checked for duplicates.  Once all file data has been
+written the completed inode, directory, fragment, export and uid/gid lookup
+tables are written.
+
+3.1 Inodes
+----------
+
+Metadata (inodes and directories) are compressed in 8Kbyte blocks.  Each
+compressed block is prefixed by a two byte length, the top bit is set if the
+block is uncompressed.  A block will be uncompressed if the -noI option is set,
+or if the compressed block was larger than the uncompressed block.
+
+Inodes are packed into the metadata blocks, and are not aligned to block
+boundaries, therefore inodes overlap compressed blocks.  Inodes are identified
+by a 48-bit number which encodes the location of the compressed metadata block
+containing the inode, and the byte offset into that block where the inode is
+placed (<block, offset>).
+
+To maximise compression there are different inodes for each file type
+(regular file, directory, device, etc.), the inode contents and length
+varying with the type.
+
+To further maximise compression, two types of regular file inode and
+directory inode are defined: inodes optimised for frequently occurring
+regular files and directories, and extended types where extra
+information has to be stored.
+
+3.2 Directories
+---------------
+
+Like inodes, directories are packed into compressed metadata blocks, stored
+in a directory table.  Directories are accessed using the start address of
+the metablock containing the directory and the offset into the
+decompressed block (<block, offset>).
+
+Directories are organised in a slightly complex way, and are not simply
+a list of file names.  The organisation takes advantage of the
+fact that (in most cases) the inodes of the files will be in the same
+compressed metadata block, and therefore, can share the start block.
+Directories are therefore organised in a two level list, a directory
+header containing the shared start block value, and a sequence of directory
+entries, each of which share the shared start block.  A new directory header
+is written once/if the inode start block changes.  The directory
+header/directory entry list is repeated as many times as necessary.
+
+Directories are sorted, and can contain a directory index to speed up
+file lookup.  Directory indexes store one entry per metablock, each entry
+storing the index/filename mapping to the first directory header
+in each metadata block.  Directories are sorted in alphabetical order,
+and at lookup the index is scanned linearly looking for the first filename
+alphabetically larger than the filename being looked up.  At this point the
+location of the metadata block the filename is in has been found.
+The general idea of the index is ensure only one metadata block needs to be
+decompressed to do a lookup irrespective of the length of the directory.
+This scheme has the advantage that it doesn't require extra memory overhead
+and doesn't require much extra storage on disk.
+
+3.3 File data
+-------------
+
+Regular files consist of a sequence of contiguous compressed blocks, and/or a
+compressed fragment block (tail-end packed block).   The compressed size
+of each datablock is stored in a block list contained within the
+file inode.
+
+To speed up access to datablocks when reading 'large' files (256 Mbytes or
+larger), the code implements an index cache that caches the mapping from
+block index to datablock location on disk.
+
+The index cache allows Squashfs to handle large files (up to 1.75 TiB) while
+retaining a simple and space-efficient block list on disk.  The cache
+is split into slots, caching up to eight 224 GiB files (128 KiB blocks).
+Larger files use multiple slots, with 1.75 TiB files using all 8 slots.
+The index cache is designed to be memory efficient, and by default uses
+16 KiB.
+
+3.4 Fragment lookup table
+-------------------------
+
+Regular files can contain a fragment index which is mapped to a fragment
+location on disk and compressed size using a fragment lookup table.  This
+fragment lookup table is itself stored compressed into metadata blocks.
+A second index table is used to locate these.  This second index table for
+speed of access (and because it is small) is read at mount time and cached
+in memory.
+
+3.5 Uid/gid lookup table
+------------------------
+
+For space efficiency regular files store uid and gid indexes, which are
+converted to 32-bit uids/gids using an id look up table.  This table is
+stored compressed into metadata blocks.  A second index table is used to
+locate these.  This second index table for speed of access (and because it
+is small) is read at mount time and cached in memory.
+
+3.6 Export table
+----------------
+
+To enable Squashfs filesystems to be exportable (via NFS etc.) filesystems
+can optionally (disabled with the -no-exports Mksquashfs option) contain
+an inode number to inode disk location lookup table.  This is required to
+enable Squashfs to map inode numbers passed in filehandles to the inode
+location on disk, which is necessary when the export code reinstantiates
+expired/flushed inodes.
+
+This table is stored compressed into metadata blocks.  A second index table is
+used to locate these.  This second index table for speed of access (and because
+it is small) is read at mount time and cached in memory.
+
+
+4. TODOS AND OUTSTANDING ISSUES
+-------------------------------
+
+4.1 Todo list
+-------------
+
+Implement Xattr and ACL support.  The Squashfs 4.0 filesystem layout has hooks
+for these but the code has not been written.  Once the code has been written
+the existing layout should not require modification.
+
+4.2 Squashfs internal cache
+---------------------------
+
+Blocks in Squashfs are compressed.  To avoid repeatedly decompressing
+recently accessed data Squashfs uses two small metadata and fragment caches.
+
+The cache is not used for file datablocks, these are decompressed and cached in
+the page-cache in the normal way.  The cache is used to temporarily cache
+fragment and metadata blocks which have been read as a result of a metadata
+(i.e. inode or directory) or fragment access.  Because metadata and fragments
+are packed together into blocks (to gain greater compression) the read of a
+particular piece of metadata or fragment will retrieve other metadata/fragments
+which have been packed with it, these because of locality-of-reference may be
+read in the near future. Temporarily caching them ensures they are available
+for near future access without requiring an additional read and decompress.
+
+In the future this internal cache may be replaced with an implementation which
+uses the kernel page cache.  Because the page cache operates on page sized
+units this may introduce additional complexity in terms of locking and
+associated race conditions.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index ef19afa..deeeed0 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -210,8 +210,8 @@
         void (*put_super) (struct super_block *);
         void (*write_super) (struct super_block *);
         int (*sync_fs)(struct super_block *sb, int wait);
-        void (*write_super_lockfs) (struct super_block *);
-        void (*unlockfs) (struct super_block *);
+        int (*freeze_fs) (struct super_block *);
+        int (*unfreeze_fs) (struct super_block *);
         int (*statfs) (struct dentry *, struct kstatfs *);
         int (*remount_fs) (struct super_block *, int *, char *);
         void (*clear_inode) (struct inode *);
@@ -270,11 +270,11 @@
   	a superblock. The second parameter indicates whether the method
 	should wait until the write out has been completed. Optional.
 
-  write_super_lockfs: called when VFS is locking a filesystem and
+  freeze_fs: called when VFS is locking a filesystem and
   	forcing it into a consistent state.  This method is currently
   	used by the Logical Volume Manager (LVM).
 
-  unlockfs: called when VFS is unlocking a filesystem and making it writable
+  unfreeze_fs: called when VFS is unlocking a filesystem and making it writable
   	again.
 
   statfs: called when the VFS needs to get filesystem statistics. This
diff --git a/Documentation/hwmon/abituguru-datasheet b/Documentation/hwmon/abituguru-datasheet
index aef5a9b..d9251ef 100644
--- a/Documentation/hwmon/abituguru-datasheet
+++ b/Documentation/hwmon/abituguru-datasheet
@@ -74,7 +74,7 @@
 Notice that some banks have both a read and a write address this is how the
 uGuru determines if a read from or a write to the bank is taking place, thus
 when reading you should always use the read address and when writing the
-write address. The write address is always one (1) more then the read address.
+write address. The write address is always one (1) more than the read address.
 
 
 uGuru ready
@@ -121,7 +121,7 @@
 test for this. Notice that the number of bytes is bank address dependent see
 above and below.
 
-After completing a successfull read it is advised to put the uGuru back in
+After completing a successful read it is advised to put the uGuru back in
 ready mode, so that it is ready for the next read / write cycle. This way
 if your program / driver is unloaded and later loaded again the detection
 algorithm described above will still work.
@@ -141,7 +141,7 @@
 
 Once DATA holds 0x01 read CMD it should hold 0xAC now.
 
-After completing a successfull write it is advised to put the uGuru back in
+After completing a successful write it is advised to put the uGuru back in
 ready mode, so that it is ready for the next read / write cycle. This way
 if your program / driver is unloaded and later loaded again the detection
 algorithm described above will still work.
@@ -224,7 +224,7 @@
 Bit 4: 1 if alarm cause measured temp is over the warning threshold	(R)
 Bit 5: 1 if alarm cause measured volt is over the max threshold		(R)
 Bit 6: 1 if alarm cause measured volt is under the min threshold	(R)
-Bit 7: Volt sensor: Shutdown if alarm persist for more then 4 seconds	(RW)
+Bit 7: Volt sensor: Shutdown if alarm persist for more than 4 seconds	(RW)
        Temp sensor: Shutdown if temp is over the shutdown threshold	(RW)
 
 *  This bit is only honored/used by the uGuru if a temp sensor is connected
@@ -293,7 +293,7 @@
 Alarm behaviour for the selected sensor. A 1 enables the described behaviour.
 Bit 0: Give an alarm if measured rpm is under the min threshold	(RW)
 Bit 3: Beep if alarm						(RW)
-Bit 7: Shutdown if alarm persist for more then 4 seconds	(RW)
+Bit 7: Shutdown if alarm persist for more than 4 seconds	(RW)
 
 Byte 1:
 min threshold (scale as bank 0x26)
diff --git a/Documentation/hwmon/adt7475 b/Documentation/hwmon/adt7475
new file mode 100644
index 0000000..a2b1abe
--- /dev/null
+++ b/Documentation/hwmon/adt7475
@@ -0,0 +1,87 @@
+This describes the interface for the ADT7475 driver:
+
+(there are 4 fans, numbered fan1 to fan4):
+
+fanX_input		Read the current speed of the fan (in RPMs)
+fanX_min		Read/write the minimum speed of the fan.  Dropping
+			below this sets an alarm.
+
+(there are three PWMs, numbered pwm1 to pwm3):
+
+pwmX			Read/write the current duty cycle of the PWM.  Writes
+			only have effect when auto mode is turned off (see
+			below).  Range is 0 - 255.
+
+pwmX_enable		Fan speed control method:
+
+			0 - No control (fan at full speed)
+			1 - Manual fan speed control (using pwm[1-*])
+			2 - Automatic fan speed control
+
+pwmX_auto_channels_temp	Select which channels affect this PWM
+
+			1 - TEMP1 controls PWM
+			2 - TEMP2 controls PWM
+			4 - TEMP3 controls PWM
+			6 - TEMP2 and TEMP3 control PWM
+			7 - All three inputs control PWM
+
+pwmX_freq		Read/write the PWM frequency in Hz. The number
+			should be one of the following:
+
+			11 Hz
+			14 Hz
+			22 Hz
+			29 Hz
+			35 Hz
+			44 Hz
+			58 Hz
+			88 Hz
+
+pwmX_auto_point1_pwm	Read/write the minimum PWM duty cycle in automatic mode
+
+pwmX_auto_point2_pwm	Read/write the maximum PWM duty cycle in automatic mode
+
+(there are three temperature settings numbered temp1 to temp3):
+
+tempX_input		Read the current temperature.  The value is in milli
+			degrees of Celsius.
+
+tempX_max		Read/write the upper temperature limit - exceeding this
+			will cause an alarm.
+
+tempX_min		Read/write the lower temperature limit - exceeding this
+			will cause an alarm.
+
+tempX_offset		Read/write the temperature adjustment offset
+
+tempX_crit		Read/write the THERM limit for remote1.
+
+tempX_crit_hyst		Set the temperature value below crit where the
+			fans will stay on - this helps drive the temperature
+			low enough so it doesn't stay near the edge and
+			cause THERM to keep tripping.
+
+tempX_auto_point1_temp	Read/write the minimum temperature where the fans will
+			turn on in automatic mode.
+
+tempX_auto_point2_temp	Read/write the maximum temperature over which the fans
+			will run in automatic mode.  tempX_auto_point1_temp
+			and tempX_auto_point2_temp together define the
+			range of automatic control.
+
+tempX_alarm		Read a 1 if the max/min alarm is set
+tempX_fault		Read a 1 if either temp1 or temp3 diode has a fault
+
+(There are two voltage settings, in1 and in2):
+
+inX_input		Read the current voltage on VCC.  Value is in
+			millivolts.
+
+inX_min			read/write the minimum voltage limit.
+			Dropping below this causes an alarm.
+
+inX_max			read/write the maximum voltage limit.
+			Exceeding this causes an alarm.
+
+inX_alarm		Read a 1 if the max/min alarm is set.
diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg
new file mode 100644
index 0000000..a832126
--- /dev/null
+++ b/Documentation/hwmon/f71882fg
@@ -0,0 +1,89 @@
+Kernel driver f71882fg
+======================
+
+Supported chips:
+  * Fintek F71882FG and F71883FG
+    Prefix: 'f71882fg'
+    Addresses scanned: none, address read from Super I/O config space
+    Datasheet: Available from the Fintek website
+  * Fintek F71862FG and F71863FG
+    Prefix: 'f71862fg'
+    Addresses scanned: none, address read from Super I/O config space
+    Datasheet: Available from the Fintek website
+  * Fintek F8000
+    Prefix: 'f8000'
+    Addresses scanned: none, address read from Super I/O config space
+    Datasheet: Not public
+
+Author: Hans de Goede <hdegoede@redhat.com>
+
+
+Description
+-----------
+
+Fintek F718xxFG/F8000 Super I/O chips include complete hardware monitoring
+capabilities. They can monitor up to 9 voltages (3 for the F8000), 4 fans and
+3 temperature sensors.
+
+These chips also have fan controlling features, using either DC or PWM, in
+three different modes (one manual, two automatic).
+
+The driver assumes that no more than one chip is present, which seems
+reasonable.
+
+
+Monitoring
+----------
+
+The Voltage, Fan and Temperature Monitoring uses the standard sysfs
+interface as documented in sysfs-interface, without any exceptions.
+
+
+Fan Control
+-----------
+
+Both PWM (pulse-width modulation) and DC fan speed control methods are
+supported. The right one to use depends on external circuitry on the
+motherboard, so the driver assumes that the BIOS set the method
+properly.
+
+There are 2 modes to specify the speed of the fan, PWM duty cycle (or DC
+voltage) mode, where 0-100% duty cycle (0-100% of 12V) is specified. And RPM
+mode where the actual RPM of the fan (as measured) is controlled and the speed
+gets specified as 0-100% of the fan#_full_speed file.
+
+Since both modes work in a 0-100% (mapped to 0-255) scale, there isn't a
+whole lot of a difference when modifying fan control settings. The only
+important difference is that in RPM mode the 0-100% controls the fan speed
+between 0-100% of fan#_full_speed. It is assumed that if the BIOS programs
+RPM mode, it will also set fan#_full_speed properly, if it does not then
+fan control will not work properly, unless you set a sane fan#_full_speed
+value yourself.
+
+Switching between these modes requires re-initializing a whole bunch of
+registers, so the mode which the BIOS has set is kept. The mode is
+printed when loading the driver.
+
+Three different fan control modes are supported; the mode number is written
+to the pwm#_enable file. Note that not all modes are supported on all
+chips, and some modes may only be available in RPM / PWM mode on the F8000.
+Writing an unsupported mode will result in an invalid parameter error.
+
+* 1: Manual mode
+  You ask for a specific PWM duty cycle / DC voltage or a specific % of
+  fan#_full_speed by writing to the pwm# file. This mode is only
+  available on the F8000 if the fan channel is in RPM mode.
+
+* 2: Normal auto mode
+  You can define a number of temperature/fan speed trip points, which % the
+  fan should run at at this temp and which temp a fan should follow using the
+  standard sysfs interface. The number and type of trip points is chip
+  depended, see which files are available in sysfs.
+  Fan/PWM channel 3 of the F8000 is always in this mode!
+
+* 3: Thermostat mode (Only available on the F8000 when in duty cycle mode)
+  The fan speed is regulated to keep the temp the fan is mapped to between
+  temp#_auto_point2_temp and temp#_auto_point3_temp.
+
+Both of the automatic modes require that pwm1 corresponds to fan1, pwm2 to
+fan2 and pwm3 to fan3.
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87
index 042c041..659315d 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87
@@ -26,6 +26,10 @@
     Datasheet: Publicly available at the ITE website
                http://www.ite.com.tw/product_info/file/pc/IT8718F_V0.2.zip
                http://www.ite.com.tw/product_info/file/pc/IT8718F_V0%203_(for%20C%20version).zip
+  * IT8720F
+    Prefix: 'it8720'
+    Addresses scanned: from Super I/O config space (8 I/O ports)
+    Datasheet: Not yet publicly available.
   * SiS950   [clone of IT8705F]
     Prefix: 'it87'
     Addresses scanned: from Super I/O config space (8 I/O ports)
@@ -71,7 +75,7 @@
 -----------
 
 This driver implements support for the IT8705F, IT8712F, IT8716F,
-IT8718F, IT8726F and SiS950 chips.
+IT8718F, IT8720F, IT8726F and SiS950 chips.
 
 These chips are 'Super I/O chips', supporting floppy disks, infrared ports,
 joysticks and other miscellaneous stuff. For hardware monitoring, they
@@ -84,19 +88,19 @@
 though, so the functionality may not be available on a given system.
 The driver dumbly assume it is there.
 
-The IT8718F also features VID inputs (up to 8 pins) but the value is
-stored in the Super-I/O configuration space. Due to technical limitations,
+The IT8718F and IT8720F also features VID inputs (up to 8 pins) but the value
+is stored in the Super-I/O configuration space. Due to technical limitations,
 this value can currently only be read once at initialization time, so
 the driver won't notice and report changes in the VID value. The two
 upper VID bits share their pins with voltage inputs (in5 and in6) so you
 can't have both on a given board.
 
-The IT8716F, IT8718F and later IT8712F revisions have support for
+The IT8716F, IT8718F, IT8720F and later IT8712F revisions have support for
 2 additional fans. The additional fans are supported by the driver.
 
-The IT8716F and IT8718F, and late IT8712F and IT8705F also have optional
-16-bit tachometer counters for fans 1 to 3. This is better (no more fan
-clock divider mess) but not compatible with the older chips and
+The IT8716F, IT8718F and IT8720F, and late IT8712F and IT8705F also have
+optional 16-bit tachometer counters for fans 1 to 3. This is better (no more
+fan clock divider mess) but not compatible with the older chips and
 revisions. The 16-bit tachometer mode is enabled by the driver when one
 of the above chips is detected.
 
@@ -122,7 +126,7 @@
 inputs can measure voltages between 0 and 4.08 volts, with a resolution of
 0.016 volt. The battery voltage in8 does not have limit registers.
 
-The VID lines (IT8712F/IT8716F/IT8718F) encode the core voltage value:
+The VID lines (IT8712F/IT8716F/IT8718F/IT8720F) encode the core voltage value:
 the voltage level your processor should work with. This is hardcoded by
 the mainboard and/or processor itself. It is a value in volts.
 
diff --git a/Documentation/hwmon/lis3lv02d b/Documentation/hwmon/lis3lv02d
index 65dfb0c..0fcfc4a 100644
--- a/Documentation/hwmon/lis3lv02d
+++ b/Documentation/hwmon/lis3lv02d
@@ -13,18 +13,21 @@
 Description
 -----------
 
-This driver provides support for the accelerometer found in various HP laptops
-sporting the feature officially called "HP Mobile Data Protection System 3D" or
-"HP 3D DriveGuard". It detect automatically laptops with this sensor. Known models
-(for now the HP 2133, nc6420, nc2510, nc8510, nc84x0, nw9440 and nx9420) will
-have their axis automatically oriented on standard way (eg: you can directly
-play neverball).  The accelerometer data is readable via
+This driver provides support for the accelerometer found in various HP
+laptops sporting the feature officially called "HP Mobile Data
+Protection System 3D" or "HP 3D DriveGuard". It detect automatically
+laptops with this sensor. Known models (for now the HP 2133, nc6420,
+nc2510, nc8510, nc84x0, nw9440 and nx9420) will have their axis
+automatically oriented on standard way (eg: you can directly play
+neverball).  The accelerometer data is readable via
 /sys/devices/platform/lis3lv02d.
 
 Sysfs attributes under /sys/devices/platform/lis3lv02d/:
 position - 3D position that the accelerometer reports. Format: "(x,y,z)"
-calibrate - read: values (x, y, z) that are used as the base for input class device operation.
-            write: forces the base to be recalibrated with the current position.
+calibrate - read: values (x, y, z) that are used as the base for input
+		  class device operation.
+            write: forces the base to be recalibrated with the current
+		  position.
 rate - reports the sampling rate of the accelerometer device in HZ
 
 This driver also provides an absolute input class device, allowing
@@ -39,11 +42,12 @@
  * When the laptop is horizontal the position reported is about 0 for X and Y
 and a positive value for Z
  * If the left side is elevated, X increases (becomes positive)
- * If the front side (where the touchpad is) is elevated, Y decreases (becomes negative)
+ * If the front side (where the touchpad is) is elevated, Y decreases
+	(becomes negative)
  * If the laptop is put upside-down, Z becomes negative
 
-If your laptop model is not recognized (cf "dmesg"), you can send an email to the
-authors to add it to the database.  When reporting a new laptop, please include
-the output of "dmidecode" plus the value of /sys/devices/platform/lis3lv02d/position
-in these four cases.
+If your laptop model is not recognized (cf "dmesg"), you can send an
+email to the authors to add it to the database.  When reporting a new
+laptop, please include the output of "dmidecode" plus the value of
+/sys/devices/platform/lis3lv02d/position in these four cases.
 
diff --git a/Documentation/hwmon/lm70 b/Documentation/hwmon/lm70
index 2bdd3fe..0d24029 100644
--- a/Documentation/hwmon/lm70
+++ b/Documentation/hwmon/lm70
@@ -1,9 +1,11 @@
 Kernel driver lm70
 ==================
 
-Supported chip:
+Supported chips:
   * National Semiconductor LM70
     Datasheet: http://www.national.com/pf/LM/LM70.html
+  * Texas Instruments TMP121/TMP123
+    Information: http://focus.ti.com/docs/prod/folders/print/tmp121.html
 
 Author:
         Kaiwan N Billimoria <kaiwan@designergraphix.com>
@@ -25,6 +27,14 @@
 driver for interpretation. This driver makes use of the kernel's in-core
 SPI support.
 
+As a real (in-tree) example of this "SPI protocol driver" interfacing
+with a "SPI master controller driver", see drivers/spi/spi_lm70llp.c
+and its associated documentation.
+
+The TMP121/TMP123 are very similar; main differences are 4 wire SPI inter-
+face (read only) and 13-bit temperature data (0.0625 degrees celsius reso-
+lution).
+
 Thanks to
 ---------
 Jean Delvare <khali@linux-fr.org> for mentoring the hwmon-side driver
diff --git a/Documentation/hwmon/lm85 b/Documentation/hwmon/lm85
index 4006207..a136808 100644
--- a/Documentation/hwmon/lm85
+++ b/Documentation/hwmon/lm85
@@ -164,7 +164,7 @@
                       temperature. (PWM value from 0 to 255)
 
 * pwm#_auto_pwm_minctl - this flags selects for temp#_auto_temp_off temperature
-                         the bahaviour of fans. Write 1 to let fans spinning at
+                         the behaviour of fans. Write 1 to let fans spinning at
 			 pwm#_auto_pwm_min or write 0 to let them off.
 
 NOTE: It has been reported that there is a bug in the LM85 that causes the flag
diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245
new file mode 100644
index 0000000..bae7a3a
--- /dev/null
+++ b/Documentation/hwmon/ltc4245
@@ -0,0 +1,81 @@
+Kernel driver ltc4245
+=====================
+
+Supported chips:
+  * Linear Technology LTC4245
+    Prefix: 'ltc4245'
+    Addresses scanned: 0x20-0x3f
+    Datasheet:
+        http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1140,P19392,D13517
+
+Author: Ira W. Snyder <iws@ovro.caltech.edu>
+
+
+Description
+-----------
+
+The LTC4245 controller allows a board to be safely inserted and removed
+from a live backplane in multiple supply systems such as CompactPCI and
+PCI Express.
+
+
+Usage Notes
+-----------
+
+This driver does not probe for LTC4245 devices, due to the fact that some
+of the possible addresses are unfriendly to probing. You will need to use
+the "force" parameter to tell the driver where to find the device.
+
+Example: the following will load the driver for an LTC4245 at address 0x23
+on I2C bus #1:
+$ modprobe ltc4245 force=1,0x23
+
+
+Sysfs entries
+-------------
+
+The LTC4245 has built-in limits for over and under current warnings. This
+makes it very likely that the reference circuit will be used.
+
+This driver uses the values in the datasheet to change the register values
+into the values specified in the sysfs-interface document. The current readings
+rely on the sense resistors listed in Table 2: "Sense Resistor Values".
+
+in1_input		12v input voltage (mV)
+in2_input		5v  input voltage (mV)
+in3_input		3v  input voltage (mV)
+in4_input		Vee (-12v) input voltage (mV)
+
+in1_min_alarm		12v input undervoltage alarm
+in2_min_alarm		5v  input undervoltage alarm
+in3_min_alarm		3v  input undervoltage alarm
+in4_min_alarm		Vee (-12v) input undervoltage alarm
+
+curr1_input		12v current (mA)
+curr2_input		5v  current (mA)
+curr3_input		3v  current (mA)
+curr4_input		Vee (-12v) current (mA)
+
+curr1_max_alarm		12v overcurrent alarm
+curr2_max_alarm		5v  overcurrent alarm
+curr3_max_alarm		3v  overcurrent alarm
+curr4_max_alarm		Vee (-12v) overcurrent alarm
+
+in5_input		12v output voltage (mV)
+in6_input		5v  output voltage (mV)
+in7_input		3v  output voltage (mV)
+in8_input		Vee (-12v) output voltage (mV)
+
+in5_min_alarm		12v output undervoltage alarm
+in6_min_alarm		5v  output undervoltage alarm
+in7_min_alarm		3v  output undervoltage alarm
+in8_min_alarm		Vee (-12v) output undervoltage alarm
+
+in9_input		GPIO #1 voltage data
+in10_input		GPIO #2 voltage data
+in11_input		GPIO #3 voltage data
+
+power1_input		12v power usage (mW)
+power2_input		5v  power usage (mW)
+power3_input		3v  power usage (mW)
+power4_input		Vee (-12v) power usage (mW)
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 5177184..923f9dd 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -124,3 +124,10 @@
 --------------------------------------------------
 For modules use symbols from another modules.
 See more details in modules.txt.
+
+ALLSOURCE_ARCHS
+--------------------------------------------------
+For tags/TAGS/cscope targets, you can specify more than one archs
+to be included in the databases, separated by blankspace. e.g.
+
+    $ make ALLSOURCE_ARCHS="x86 mips arm" tags
diff --git a/Documentation/kbuild/modules.txt b/Documentation/kbuild/modules.txt
index 1821c07..b1096da 100644
--- a/Documentation/kbuild/modules.txt
+++ b/Documentation/kbuild/modules.txt
@@ -253,7 +253,7 @@
 
 		# Module specific targets
 		genbin:
-			echo "X" > 8123_bin_shipped
+			echo "X" > 8123_bin.o_shipped
 
 
 	In example 2, we are down to two fairly simple files and for simple
@@ -279,7 +279,7 @@
 
 		# Module specific targets
 		genbin:
-			echo "X" > 8123_bin_shipped
+			echo "X" > 8123_bin.o_shipped
 
 		endif
 
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 0b3f671..8511d35 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -91,6 +91,7 @@
 	SUSPEND	System suspend states are enabled.
 	FTRACE	Function tracing enabled.
 	TS	Appropriate touchscreen support is enabled.
+	UMS	USB Mass Storage support is enabled.
 	USB	USB support is enabled.
 	USBHID	USB Human Interface Device support is enabled.
 	V4L	Video For Linux support is enabled.
@@ -140,6 +141,7 @@
 			ht -- run only enough ACPI to enable Hyper Threading
 			strict -- Be less tolerant of platforms that are not
 				strictly ACPI specification compliant.
+			rsdt -- prefer RSDT over (default) XSDT
 
 			See also Documentation/power/pm.txt, pci=noacpi
 
@@ -150,16 +152,20 @@
 			default: 0
 
 	acpi_sleep=	[HW,ACPI] Sleep options
-			Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, old_ordering }
-			See Documentation/power/video.txt for s3_bios and s3_mode.
+			Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
+				  old_ordering, s4_nonvs }
+			See Documentation/power/video.txt for information on
+			s3_bios and s3_mode.
 			s3_beep is for debugging; it makes the PC's speaker beep
 			as soon as the kernel's real-mode entry point is called.
 			s4_nohwsig prevents ACPI hardware signature from being
 			used during resume from hibernation.
 			old_ordering causes the ACPI 1.0 ordering of the _PTS
-			control method, wrt putting devices into low power
-			states, to be enforced (the ACPI 2.0 ordering of _PTS is
-			used by default).
+			control method, with respect to putting devices into
+			low power states, to be enforced (the ACPI 2.0 ordering
+			of _PTS is used by default).
+			s4_nonvs prevents the kernel from saving/restoring the
+			ACPI NVS memory during hibernation.
 
 	acpi_sci=	[HW,ACPI] ACPI System Control Interrupt trigger mode
 			Format: { level | edge | high | low }
@@ -194,7 +200,7 @@
 	acpi_skip_timer_override [HW,ACPI]
 			Recognize and ignore IRQ0/pin2 Interrupt Override.
 			For broken nForce2 BIOS resulting in XT-PIC timer.
-	acpi_use_timer_override [HW,ACPI}
+	acpi_use_timer_override [HW,ACPI]
 			Use timer override. For some broken Nvidia NF5 boards
 			that require a timer override, but don't have
 			HPET
@@ -828,8 +834,8 @@
 
 	hlt		[BUGS=ARM,SH]
 
-	hvc_iucv=	[S390] Number of z/VM IUCV Hypervisor console (HVC)
-			       back-ends. Valid parameters: 0..8
+	hvc_iucv=	[S390] Number of z/VM IUCV hypervisor console (HVC)
+			       terminal devices. Valid values: 0..8
 
 	i8042.debug	[HW] Toggle i8042 debug mode
 	i8042.direct	[HW] Put keyboard port into non-translated mode
@@ -877,17 +883,19 @@
 			See Documentation/ide/ide.txt.
 
 	idle=		[X86]
-			Format: idle=poll or idle=mwait, idle=halt, idle=nomwait
-			Poll forces a polling idle loop that can slightly improves the performance
-			of waking up a idle CPU, but will use a lot of power and make the system
-			run hot. Not recommended.
-			idle=mwait. On systems which support MONITOR/MWAIT but the kernel chose
-			to not use it because it doesn't save as much power as a normal idle
-			loop use the MONITOR/MWAIT idle loop anyways. Performance should be the same
-			as idle=poll.
-			idle=halt. Halt is forced to be used for CPU idle.
+			Format: idle=poll, idle=mwait, idle=halt, idle=nomwait
+			Poll forces a polling idle loop that can slightly
+			improve the performance of waking up a idle CPU, but
+			will use a lot of power and make the system run hot.
+			Not recommended.
+			idle=mwait: On systems which support MONITOR/MWAIT but
+			the kernel chose to not use it because it doesn't save
+			as much power as a normal idle loop, use the
+			MONITOR/MWAIT idle loop anyways. Performance should be
+			the same as idle=poll.
+			idle=halt: Halt is forced to be used for CPU idle.
 			In such case C2/C3 won't be used again.
-			idle=nomwait. Disable mwait for CPU C-states
+			idle=nomwait: Disable mwait for CPU C-states
 
 	ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem
 			Claim all unknown PCI IDE storage controllers.
@@ -918,6 +926,10 @@
 
 	inttest=	[IA64]
 
+	iomem=		Disable strict checking of access to MMIO memory
+		strict	regions from userspace.
+		relaxed
+
 	iommu=		[x86]
 		off
 		force
@@ -1069,8 +1081,8 @@
 	lapic		[X86-32,APIC] Enable the local APIC even if BIOS
 			disabled it.
 
-	lapic_timer_c2_ok	[X86-32,x86-64,APIC] trust the local apic timer in
-			C2 power state.
+	lapic_timer_c2_ok	[X86-32,x86-64,APIC] trust the local apic timer
+			in C2 power state.
 
 	libata.dma=	[LIBATA] DMA control
 			libata.dma=0	  Disable all PATA and SATA DMA
@@ -1557,6 +1569,9 @@
 
 	nosoftlockup	[KNL] Disable the soft-lockup detector.
 
+	noswapaccount	[KNL] Disable accounting of swap in memory resource
+			controller. (See Documentation/controllers/memory.txt)
+
 	nosync		[HW,M68K] Disables sync negotiation for all devices.
 
 	notsc		[BUGS=X86-32] Disable Time Stamp Counter
@@ -2295,7 +2310,8 @@
 
 	thermal.psv=	[HW,ACPI]
 			-1: disable all passive trip points
-			<degrees C>: override all passive trip points to this value
+			<degrees C>: override all passive trip points to this
+			value
 
 	thermal.tzp=	[HW,ACPI]
 			Specify global default ACPI thermal zone polling rate
@@ -2383,6 +2399,41 @@
 	usbhid.mousepoll=
 			[USBHID] The interval which mice are to be polled at.
 
+	usb-storage.delay_use=
+			[UMS] The delay in seconds before a new device is
+			scanned for Logical Units (default 5).
+
+	usb-storage.quirks=
+			[UMS] A list of quirks entries to supplement or
+			override the built-in unusual_devs list.  List
+			entries are separated by commas.  Each entry has
+			the form VID:PID:Flags where VID and PID are Vendor
+			and Product ID values (4-digit hex numbers) and
+			Flags is a set of characters, each corresponding
+			to a common usb-storage quirk flag as follows:
+				a = SANE_SENSE (collect more than 18 bytes
+					of sense data);
+				c = FIX_CAPACITY (decrease the reported
+					device capacity by one sector);
+				h = CAPACITY_HEURISTICS (decrease the
+					reported device capacity by one
+					sector if the number is odd);
+				i = IGNORE_DEVICE (don't bind to this
+					device);
+				l = NOT_LOCKABLE (don't try to lock and
+					unlock ejectable media);
+				m = MAX_SECTORS_64 (don't transfer more
+					than 64 sectors = 32 KB at a time);
+				o = CAPACITY_OK (accept the capacity
+					reported by the device);
+				r = IGNORE_RESIDUE (the device reports
+					bogus residue values);
+				s = SINGLE_LUN (the device has only one
+					Logical Unit);
+				w = NO_WP_DETECT (don't test whether the
+					medium is write-protected).
+			Example: quirks=0419:aaf5:rl,0421:0433:rc
+
 	add_efi_memmap	[EFI; x86-32,X86-64] Include EFI memory map in
 			kernel's map of available physical RAM.
 
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 71f0fe1..41bc99f 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -1,7 +1,7 @@
 		     ThinkPad ACPI Extras Driver
 
-                            Version 0.21
-                           May 29th, 2008
+                            Version 0.22
+                        November 23rd,  2008
 
                Borislav Deianov <borislav@users.sf.net>
              Henrique de Moraes Holschuh <hmh@hmh.eng.br>
@@ -16,7 +16,8 @@
 This driver used to be named ibm-acpi until kernel 2.6.21 and release
 0.13-20070314.  It used to be in the drivers/acpi tree, but it was
 moved to the drivers/misc tree and renamed to thinkpad-acpi for kernel
-2.6.22, and release 0.14.
+2.6.22, and release 0.14.  It was moved to drivers/platform/x86 for
+kernel 2.6.29 and release 0.22.
 
 The driver is named "thinkpad-acpi".  In some places, like module
 names, "thinkpad_acpi" is used because of userspace issues.
@@ -1412,6 +1413,24 @@
 	rfkill controller switch "tpacpi_wwan_sw": refer to
 	Documentation/rfkill.txt for details.
 
+EXPERIMENTAL: UWB
+-----------------
+
+This feature is marked EXPERIMENTAL because it has not been extensively
+tested and validated in various ThinkPad models yet.  The feature may not
+work as expected. USE WITH CAUTION! To use this feature, you need to supply
+the experimental=1 parameter when loading the module.
+
+sysfs rfkill class: switch "tpacpi_uwb_sw"
+
+This feature exports an rfkill controller for the UWB device, if one is
+present and enabled in the BIOS.
+
+Sysfs notes:
+
+	rfkill controller switch "tpacpi_uwb_sw": refer to
+	Documentation/rfkill.txt for details.
+
 Multiple Commands, Module Parameters
 ------------------------------------
 
@@ -1475,7 +1494,7 @@
 
 0x020100:	Marker for thinkpad-acpi with hot key NVRAM polling
 		support.  If you must, use it to know you should not
-		start an userspace NVRAM poller (allows to detect when
+		start a userspace NVRAM poller (allows to detect when
 		NVRAM is compiled out by the user because it is
 		unneeded/undesired in the first place).
 0x020101:	Marker for thinkpad-acpi with hot key NVRAM polling
diff --git a/Documentation/mips/AU1xxx_IDE.README b/Documentation/mips/AU1xxx_IDE.README
index f54962a..8ace35e 100644
--- a/Documentation/mips/AU1xxx_IDE.README
+++ b/Documentation/mips/AU1xxx_IDE.README
@@ -52,14 +52,12 @@
   b) 'drivers/ide/mips/au1xxx-ide.c'
      contains the functionality of the AU1XXX IDE driver
 
-Four configs variables are introduced:
+Following extra configs variables are introduced:
 
   CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA    - enable the PIO+DBDMA mode
   CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA  - enable the MWDMA mode
   CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON - set Burstable FIFO in DBDMA
                                            controller
-  CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ - maximum transfer size
-                                           per descriptor
 
 
 SUPPORTED IDE MODES
@@ -87,7 +85,6 @@
 CONFIG_IDEDMA_PCI_AUTO=y
 CONFIG_BLK_DEV_IDE_AU1XXX=y
 CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA=y
-CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ=128
 CONFIG_BLK_DEV_IDEDMA=y
 CONFIG_IDEDMA_AUTO=y
 
@@ -105,7 +102,6 @@
 CONFIG_IDEDMA_PCI_AUTO=y
 CONFIG_BLK_DEV_IDE_AU1XXX=y
 CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA=y
-CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ=128
 CONFIG_BLK_DEV_IDEDMA=y
 CONFIG_IDEDMA_AUTO=y
 
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index c3669a3..60d05eb 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -540,7 +540,7 @@
      MSG_MORE should be set in msghdr::msg_flags on all but the last part of
      the request.  Multiple requests may be made simultaneously.
 
-     If a call is intended to go to a destination other then the default
+     If a call is intended to go to a destination other than the default
      specified through connect(), then msghdr::msg_name should be set on the
      first request message of that call.
 
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt
index 839cbb7..c0aab98 100644
--- a/Documentation/networking/tuntap.txt
+++ b/Documentation/networking/tuntap.txt
@@ -118,7 +118,7 @@
 It is used by VTun (http://vtun.sourceforge.net).
 
 Another interesting application using TUN/TAP is pipsecd
-(http://perso.enst.fr/~beyssac/pipsec/), an userspace IPSec
+(http://perso.enst.fr/~beyssac/pipsec/), a userspace IPSec
 implementation that can use complete kernel routing (unlike FreeS/WAN).
 
 3. How does Virtual network device actually work ? 
diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt
index 7714f57..b565e82 100644
--- a/Documentation/nommu-mmap.txt
+++ b/Documentation/nommu-mmap.txt
@@ -109,12 +109,18 @@
 FURTHER NOTES ON NO-MMU MMAP
 ============================
 
- (*) A request for a private mapping of less than a page in size may not return
-     a page-aligned buffer. This is because the kernel calls kmalloc() to
-     allocate the buffer, not get_free_page().
+ (*) A request for a private mapping of a file may return a buffer that is not
+     page-aligned.  This is because XIP may take place, and the data may not be
+     paged aligned in the backing store.
 
- (*) A list of all the mappings on the system is visible through /proc/maps in
-     no-MMU mode.
+ (*) A request for an anonymous mapping will always be page aligned.  If
+     possible the size of the request should be a power of two otherwise some
+     of the space may be wasted as the kernel must allocate a power-of-2
+     granule but will only discard the excess if appropriately configured as
+     this has an effect on fragmentation.
+
+ (*) A list of all the private copy and anonymous mappings on the system is
+     visible through /proc/maps in no-MMU mode.
 
  (*) A list of all the mappings in use by a process is visible through
      /proc/<pid>/maps in no-MMU mode.
@@ -242,3 +248,18 @@
 Provision of shared mappings on block device files is exactly the same as for
 character devices. If there isn't a real device underneath, then the driver
 should allocate sufficient contiguous memory to honour any supported mapping.
+
+
+=================================
+ADJUSTING PAGE TRIMMING BEHAVIOUR
+=================================
+
+NOMMU mmap automatically rounds up to the nearest power-of-2 number of pages
+when performing an allocation.  This can have adverse effects on memory
+fragmentation, and as such, is left configurable.  The default behaviour is to
+aggressively trim allocations and discard any excess pages back in to the page
+allocator.  In order to retain finer-grained control over fragmentation, this
+behaviour can either be disabled completely, or bumped up to a higher page
+watermark where trimming begins.
+
+Page trimming behaviour is configurable via the sysctl `vm.nr_trim_pages'.
diff --git a/Documentation/powerpc/dts-bindings/4xx/ndfc.txt b/Documentation/powerpc/dts-bindings/4xx/ndfc.txt
new file mode 100644
index 0000000..869f0b5
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/ndfc.txt
@@ -0,0 +1,39 @@
+AMCC NDFC (NanD Flash Controller)
+
+Required properties:
+- compatible : "ibm,ndfc".
+- reg : should specify chip select and size used for the chip (0x2000).
+
+Optional properties:
+- ccr : NDFC config and control register value (default 0).
+- bank-settings : NDFC bank configuration register value (default 0).
+
+Notes:
+- partition(s) - follows the OF MTD standard for partitions
+
+Example:
+
+ndfc@1,0 {
+	compatible = "ibm,ndfc";
+	reg = <0x00000001 0x00000000 0x00002000>;
+	ccr = <0x00001000>;
+	bank-settings = <0x80002222>;
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	nand {
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "kernel";
+			reg = <0x00000000 0x00200000>;
+		};
+		partition@200000 {
+			label = "root";
+			reg = <0x00200000 0x03E00000>;
+		};
+	};
+};
+
+
diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/powerpc/dts-bindings/fsl/board.txt
index 81a917e..6c974d2 100644
--- a/Documentation/powerpc/dts-bindings/fsl/board.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/board.txt
@@ -18,7 +18,7 @@
 
 Required properities:
 - compatible : should be "fsl,fpga-pixis".
-- reg : should contain the address and the lenght of the FPPGA register
+- reg : should contain the address and the length of the FPPGA register
   set.
 
 Example (MPC8610HPCD):
@@ -27,3 +27,33 @@
 		compatible = "fsl,fpga-pixis";
 		reg = <0xe8000000 32>;
 	};
+
+* Freescale BCSR GPIO banks
+
+Some BCSR registers act as simple GPIO controllers, each such
+register can be represented by the gpio-controller node.
+
+Required properities:
+- compatible : Should be "fsl,<board>-bcsr-gpio".
+- reg : Should contain the address and the length of the GPIO bank
+  register.
+- #gpio-cells : Should be two. The first cell is the pin number and the
+  second cell is used to specify optional paramters (currently unused).
+- gpio-controller : Marks the port as GPIO controller.
+
+Example:
+
+	bcsr@1,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,mpc8360mds-bcsr";
+		reg = <1 0 0x8000>;
+		ranges = <0 1 0 0x8000>;
+
+		bcsr13: gpio-controller@d {
+			#gpio-cells = <2>;
+			compatible = "fsl,mpc8360mds-bcsr-gpio";
+			reg = <0xd 1>;
+			gpio-controller;
+		};
+	};
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 8398ca4..6f33593 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -231,7 +231,7 @@
 
    This options needs CONFIG_CGROUPS to be defined, and lets the administrator
    create arbitrary groups of tasks, using the "cgroup" pseudo filesystem.  See
-   Documentation/cgroups.txt for more information about this filesystem.
+   Documentation/cgroups/cgroups.txt for more information about this filesystem.
 
 Only one of these options to group tasks can be chosen and not both.
 
diff --git a/Documentation/scsi/ChangeLog.lpfc b/Documentation/scsi/ChangeLog.lpfc
index ae3f962a..ff19a52 100644
--- a/Documentation/scsi/ChangeLog.lpfc
+++ b/Documentation/scsi/ChangeLog.lpfc
@@ -733,7 +733,7 @@
 	  I/O completion path a little more, especially taking care of
 	  fast-pathing the non-error case.  Also removes tons of dead
 	  members and defines from lpfc_scsi.h - e.g. lpfc_target is down
-	  to nothing more then the lpfc_nodelist pointer.
+	  to nothing more than the lpfc_nodelist pointer.
 	* Added binary sysfs file to issue mbox commands
 	* Replaced #if __BIG_ENDIAN with #if __BIG_ENDIAN_BITFIELD for
 	  compatibility with the user space applications.
diff --git a/Documentation/scsi/ChangeLog.ncr53c8xx b/Documentation/scsi/ChangeLog.ncr53c8xx
index a9f721a..8b278c1 100644
--- a/Documentation/scsi/ChangeLog.ncr53c8xx
+++ b/Documentation/scsi/ChangeLog.ncr53c8xx
@@ -19,7 +19,7 @@
 
 Wed Jul 26 23:30 2000 Gerard Roudier (groudier@club-internet.fr)
 	* version ncr53c8xx-3.4.1
-	- Provide OpenFirmare path through the proc FS on PPC.
+	- Provide OpenFirmware path through the proc FS on PPC.
 	- Remove trailing argument #2 from a couple of #undefs.
 
 Sun Jul 09 16:30 2000 Gerard Roudier (groudier@club-internet.fr)
diff --git a/Documentation/scsi/ChangeLog.sym53c8xx b/Documentation/scsi/ChangeLog.sym53c8xx
index ef985ec..02ffbc1 100644
--- a/Documentation/scsi/ChangeLog.sym53c8xx
+++ b/Documentation/scsi/ChangeLog.sym53c8xx
@@ -81,7 +81,7 @@
 
 Wed Jul 26 23:30 2000 Gerard Roudier (groudier@club-internet.fr)
 	* version sym53c8xx-1.7.1
-	- Provide OpenFirmare path through the proc FS on PPC.
+	- Provide OpenFirmware path through the proc FS on PPC.
 	- Download of on-chip SRAM using memcpy_toio() doesn't work 
 	  on PPC. Restore previous method (MEMORY MOVE from SCRIPTS).
 	- Remove trailing argument #2 from a couple of #undefs.
diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.txt
index 38d324d6..e5b071d 100644
--- a/Documentation/scsi/scsi_fc_transport.txt
+++ b/Documentation/scsi/scsi_fc_transport.txt
@@ -191,7 +191,7 @@
       This is equivalent to a driver "attach" on an adapter, which is
       independent of the adapter's link state.
     - Instantiation of the vport on the FC link via ELS traffic, etc.
-      This is equivalent to a "link up" and successfull link initialization.
+      This is equivalent to a "link up" and successful link initialization.
   Further information can be found in the interfaces section below for
   Vport Creation.
 
@@ -320,7 +320,7 @@
       This is equivalent to a driver "attach" on an adapter, which is
       independent of the adapter's link state.
     - Instantiation of the vport on the FC link via ELS traffic, etc.
-      This is equivalent to a "link up" and successfull link initialization.
+      This is equivalent to a "link up" and successful link initialization.
 
   The LLDD's vport_create() function will not synchronously wait for both
   parts to be fully completed before returning. It must validate that the
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 4b7ac21..64eb110 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -275,7 +275,8 @@
   dell-m25	Dell Inspiron E1505n
   dell-m26	Dell Inspiron 1501
   dell-m27	Dell Inspiron E1705/9400
-  gateway	Gateway laptops with EAPD control
+  gateway-m4	Gateway laptops with EAPD control
+  gateway-m4-2	Gateway laptops with EAPD control
   panasonic	Panasonic CF-74
 
 STAC9205/9254
@@ -302,6 +303,7 @@
   macbook-pro	Intel Mac Book Pro 2nd generation (eq. type 3)
   imac-intel	Intel iMac (eq. type 2)
   imac-intel-20	Intel iMac (newer version) (eq. type 3)
+  ecs202	ECS/PC chips
   dell-d81	Dell (unknown)
   dell-d82	Dell (unknown)
   dell-m81	Dell (unknown)
@@ -310,9 +312,13 @@
 STAC9202/9250/9251
 ==================
   ref		Reference board, base config
+  m1		Some Gateway MX series laptops (NX560XL)
+  m1-2		Some Gateway MX series laptops (MX6453)
+  m2		Some Gateway MX series laptops (M255)
   m2-2		Some Gateway MX series laptops
+  m3		Some Gateway MX series laptops
+  m5		Some Gateway MX series laptops (MP6954)
   m6		Some Gateway NX series laptops
-  pa6		Gateway NX860 series
 
 STAC9227/9228/9229/927x
 =======================
@@ -329,6 +335,7 @@
   dell-m4-1	Dell desktops
   dell-m4-2	Dell desktops
   dell-m4-3	Dell desktops
+  hp-m4		HP dv laptops
 
 STAC92HD73*
 ===========
@@ -337,6 +344,7 @@
   dell-m6-amic	Dell desktops/laptops with analog mics
   dell-m6-dmic	Dell desktops/laptops with digital mics
   dell-m6	Dell desktops/laptops with both type of mics
+  dell-eq	Dell desktops/laptops
 
 STAC92HD83*
 ===========
diff --git a/Documentation/spi/spi-lm70llp b/Documentation/spi/spi-lm70llp
index 154bd02..34a9cfd 100644
--- a/Documentation/spi/spi-lm70llp
+++ b/Documentation/spi/spi-lm70llp
@@ -13,10 +13,20 @@
 This driver provides glue code connecting a National Semiconductor LM70 LLP
 temperature sensor evaluation board to the kernel's SPI core subsystem.
 
+This is a SPI master controller driver. It can be used in conjunction with
+(layered under) the LM70 logical driver (a "SPI protocol driver").
 In effect, this driver turns the parallel port interface on the eval board
 into a SPI bus with a single device, which will be driven by the generic
 LM70 driver (drivers/hwmon/lm70.c).
 
+
+Hardware Interfacing
+--------------------
+The schematic for this particular board (the LM70EVAL-LLP) is
+available (on page 4) here:
+
+  http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf
+
 The hardware interfacing on the LM70 LLP eval board is as follows:
 
    Parallel                 LM70 LLP
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index cd05994..3197fc8 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -1,12 +1,13 @@
-Documentation for /proc/sys/vm/*	kernel version 2.2.10
+Documentation for /proc/sys/vm/*	kernel version 2.6.29
 	(c) 1998, 1999,  Rik van Riel <riel@nl.linux.org>
+	(c) 2008         Peter W. Morreale <pmorreale@novell.com>
 
 For general info and legal blurb, please look in README.
 
 ==============================================================
 
 This file contains the documentation for the sysctl files in
-/proc/sys/vm and is valid for Linux kernel version 2.2.
+/proc/sys/vm and is valid for Linux kernel version 2.6.29.
 
 The files in this directory can be used to tune the operation
 of the virtual memory (VM) subsystem of the Linux kernel and
@@ -16,82 +17,244 @@
 files can be found in mm/swap.c.
 
 Currently, these files are in /proc/sys/vm:
-- overcommit_memory
-- page-cluster
-- dirty_ratio
+
+- block_dump
+- dirty_background_bytes
 - dirty_background_ratio
+- dirty_bytes
 - dirty_expire_centisecs
+- dirty_ratio
 - dirty_writeback_centisecs
-- highmem_is_dirtyable   (only if CONFIG_HIGHMEM set)
+- drop_caches
+- hugepages_treat_as_movable
+- hugetlb_shm_group
+- laptop_mode
+- legacy_va_layout
+- lowmem_reserve_ratio
 - max_map_count
 - min_free_kbytes
-- laptop_mode
-- block_dump
-- drop-caches
-- zone_reclaim_mode
-- min_unmapped_ratio
 - min_slab_ratio
-- panic_on_oom
-- oom_dump_tasks
-- oom_kill_allocating_task
-- mmap_min_address
-- numa_zonelist_order
+- min_unmapped_ratio
+- mmap_min_addr
 - nr_hugepages
 - nr_overcommit_hugepages
+- nr_pdflush_threads
+- nr_trim_pages         (only if CONFIG_MMU=n)
+- numa_zonelist_order
+- oom_dump_tasks
+- oom_kill_allocating_task
+- overcommit_memory
+- overcommit_ratio
+- page-cluster
+- panic_on_oom
+- percpu_pagelist_fraction
+- stat_interval
+- swappiness
+- vfs_cache_pressure
+- zone_reclaim_mode
+
 
 ==============================================================
 
-dirty_bytes, dirty_ratio, dirty_background_bytes,
-dirty_background_ratio, dirty_expire_centisecs,
-dirty_writeback_centisecs, highmem_is_dirtyable,
-vfs_cache_pressure, laptop_mode, block_dump, swap_token_timeout,
-drop-caches, hugepages_treat_as_movable:
+block_dump
 
-See Documentation/filesystems/proc.txt
+block_dump enables block I/O debugging when set to a nonzero value. More
+information on block I/O debugging is in Documentation/laptops/laptop-mode.txt.
 
 ==============================================================
 
-overcommit_memory:
+dirty_background_bytes
 
-This value contains a flag that enables memory overcommitment.
+Contains the amount of dirty memory at which the pdflush background writeback
+daemon will start writeback.
 
-When this flag is 0, the kernel attempts to estimate the amount
-of free memory left when userspace requests more memory.
-
-When this flag is 1, the kernel pretends there is always enough
-memory until it actually runs out.
-
-When this flag is 2, the kernel uses a "never overcommit"
-policy that attempts to prevent any overcommit of memory.  
-
-This feature can be very useful because there are a lot of
-programs that malloc() huge amounts of memory "just-in-case"
-and don't use much of it.
-
-The default value is 0.
-
-See Documentation/vm/overcommit-accounting and
-security/commoncap.c::cap_vm_enough_memory() for more information.
+If dirty_background_bytes is written, dirty_background_ratio becomes a function
+of its value (dirty_background_bytes / the amount of dirtyable system memory).
 
 ==============================================================
 
-overcommit_ratio:
+dirty_background_ratio
 
-When overcommit_memory is set to 2, the committed address
-space is not permitted to exceed swap plus this percentage
-of physical RAM.  See above.
+Contains, as a percentage of total system memory, the number of pages at which
+the pdflush background writeback daemon will start writing out dirty data.
 
 ==============================================================
 
-page-cluster:
+dirty_bytes
 
-The Linux VM subsystem avoids excessive disk seeks by reading
-multiple pages on a page fault. The number of pages it reads
-is dependent on the amount of memory in your machine.
+Contains the amount of dirty memory at which a process generating disk writes
+will itself start writeback.
 
-The number of pages the kernel reads in at once is equal to
-2 ^ page-cluster. Values above 2 ^ 5 don't make much sense
-for swap because we only cluster swap data in 32-page groups.
+If dirty_bytes is written, dirty_ratio becomes a function of its value
+(dirty_bytes / the amount of dirtyable system memory).
+
+==============================================================
+
+dirty_expire_centisecs
+
+This tunable is used to define when dirty data is old enough to be eligible
+for writeout by the pdflush daemons.  It is expressed in 100'ths of a second.
+Data which has been dirty in-memory for longer than this interval will be
+written out next time a pdflush daemon wakes up.
+
+==============================================================
+
+dirty_ratio
+
+Contains, as a percentage of total system memory, the number of pages at which
+a process which is generating disk writes will itself start writing out dirty
+data.
+
+==============================================================
+
+dirty_writeback_centisecs
+
+The pdflush writeback daemons will periodically wake up and write `old' data
+out to disk.  This tunable expresses the interval between those wakeups, in
+100'ths of a second.
+
+Setting this to zero disables periodic writeback altogether.
+
+==============================================================
+
+drop_caches
+
+Writing to this will cause the kernel to drop clean caches, dentries and
+inodes from memory, causing that memory to become free.
+
+To free pagecache:
+	echo 1 > /proc/sys/vm/drop_caches
+To free dentries and inodes:
+	echo 2 > /proc/sys/vm/drop_caches
+To free pagecache, dentries and inodes:
+	echo 3 > /proc/sys/vm/drop_caches
+
+As this is a non-destructive operation and dirty objects are not freeable, the
+user should run `sync' first.
+
+==============================================================
+
+hugepages_treat_as_movable
+
+This parameter is only useful when kernelcore= is specified at boot time to
+create ZONE_MOVABLE for pages that may be reclaimed or migrated. Huge pages
+are not movable so are not normally allocated from ZONE_MOVABLE. A non-zero
+value written to hugepages_treat_as_movable allows huge pages to be allocated
+from ZONE_MOVABLE.
+
+Once enabled, the ZONE_MOVABLE is treated as an area of memory the huge
+pages pool can easily grow or shrink within. Assuming that applications are
+not running that mlock() a lot of memory, it is likely the huge pages pool
+can grow to the size of ZONE_MOVABLE by repeatedly entering the desired value
+into nr_hugepages and triggering page reclaim.
+
+==============================================================
+
+hugetlb_shm_group
+
+hugetlb_shm_group contains group id that is allowed to create SysV
+shared memory segment using hugetlb page.
+
+==============================================================
+
+laptop_mode
+
+laptop_mode is a knob that controls "laptop mode". All the things that are
+controlled by this knob are discussed in Documentation/laptops/laptop-mode.txt.
+
+==============================================================
+
+legacy_va_layout
+
+If non-zero, this sysctl disables the new 32-bit mmap mmap layout - the kernel
+will use the legacy (2.4) layout for all processes.
+
+==============================================================
+
+lowmem_reserve_ratio
+
+For some specialised workloads on highmem machines it is dangerous for
+the kernel to allow process memory to be allocated from the "lowmem"
+zone.  This is because that memory could then be pinned via the mlock()
+system call, or by unavailability of swapspace.
+
+And on large highmem machines this lack of reclaimable lowmem memory
+can be fatal.
+
+So the Linux page allocator has a mechanism which prevents allocations
+which _could_ use highmem from using too much lowmem.  This means that
+a certain amount of lowmem is defended from the possibility of being
+captured into pinned user memory.
+
+(The same argument applies to the old 16 megabyte ISA DMA region.  This
+mechanism will also defend that region from allocations which could use
+highmem or lowmem).
+
+The `lowmem_reserve_ratio' tunable determines how aggressive the kernel is
+in defending these lower zones.
+
+If you have a machine which uses highmem or ISA DMA and your
+applications are using mlock(), or if you are running with no swap then
+you probably should change the lowmem_reserve_ratio setting.
+
+The lowmem_reserve_ratio is an array. You can see them by reading this file.
+-
+% cat /proc/sys/vm/lowmem_reserve_ratio
+256     256     32
+-
+Note: # of this elements is one fewer than number of zones. Because the highest
+      zone's value is not necessary for following calculation.
+
+But, these values are not used directly. The kernel calculates # of protection
+pages for each zones from them. These are shown as array of protection pages
+in /proc/zoneinfo like followings. (This is an example of x86-64 box).
+Each zone has an array of protection pages like this.
+
+-
+Node 0, zone      DMA
+  pages free     1355
+        min      3
+        low      3
+        high     4
+	:
+	:
+    numa_other   0
+        protection: (0, 2004, 2004, 2004)
+	^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+  pagesets
+    cpu: 0 pcp: 0
+        :
+-
+These protections are added to score to judge whether this zone should be used
+for page allocation or should be reclaimed.
+
+In this example, if normal pages (index=2) are required to this DMA zone and
+pages_high is used for watermark, the kernel judges this zone should not be
+used because pages_free(1355) is smaller than watermark + protection[2]
+(4 + 2004 = 2008). If this protection value is 0, this zone would be used for
+normal page requirement. If requirement is DMA zone(index=0), protection[0]
+(=0) is used.
+
+zone[i]'s protection[j] is calculated by following expression.
+
+(i < j):
+  zone[i]->protection[j]
+  = (total sums of present_pages from zone[i+1] to zone[j] on the node)
+    / lowmem_reserve_ratio[i];
+(i = j):
+   (should not be protected. = 0;
+(i > j):
+   (not necessary, but looks 0)
+
+The default values of lowmem_reserve_ratio[i] are
+    256 (if zone[i] means DMA or DMA32 zone)
+    32  (others).
+As above expression, they are reciprocal number of ratio.
+256 means 1/256. # of protection pages becomes about "0.39%" of total present
+pages of higher zones on the node.
+
+If you would like to protect more pages, smaller values are effective.
+The minimum value is 1 (1/1 -> 100%).
 
 ==============================================================
 
@@ -112,9 +275,9 @@
 
 min_free_kbytes:
 
-This is used to force the Linux VM to keep a minimum number 
+This is used to force the Linux VM to keep a minimum number
 of kilobytes free.  The VM uses this number to compute a pages_min
-value for each lowmem zone in the system.  Each lowmem zone gets 
+value for each lowmem zone in the system.  Each lowmem zone gets
 a number of reserved free pages based proportionally on its size.
 
 Some minimal amount of memory is needed to satisfy PF_MEMALLOC
@@ -123,73 +286,6 @@
 
 Setting this too high will OOM your machine instantly.
 
-==============================================================
-
-percpu_pagelist_fraction
-
-This is the fraction of pages at most (high mark pcp->high) in each zone that
-are allocated for each per cpu page list.  The min value for this is 8.  It
-means that we don't allow more than 1/8th of pages in each zone to be
-allocated in any single per_cpu_pagelist.  This entry only changes the value
-of hot per cpu pagelists.  User can specify a number like 100 to allocate
-1/100th of each zone to each per cpu page list.
-
-The batch value of each per cpu pagelist is also updated as a result.  It is
-set to pcp->high/4.  The upper limit of batch is (PAGE_SHIFT * 8)
-
-The initial value is zero.  Kernel does not use this value at boot time to set
-the high water marks for each per cpu page list.
-
-===============================================================
-
-zone_reclaim_mode:
-
-Zone_reclaim_mode allows someone to set more or less aggressive approaches to
-reclaim memory when a zone runs out of memory. If it is set to zero then no
-zone reclaim occurs. Allocations will be satisfied from other zones / nodes
-in the system.
-
-This is value ORed together of
-
-1	= Zone reclaim on
-2	= Zone reclaim writes dirty pages out
-4	= Zone reclaim swaps pages
-
-zone_reclaim_mode is set during bootup to 1 if it is determined that pages
-from remote zones will cause a measurable performance reduction. The
-page allocator will then reclaim easily reusable pages (those page
-cache pages that are currently not used) before allocating off node pages.
-
-It may be beneficial to switch off zone reclaim if the system is
-used for a file server and all of memory should be used for caching files
-from disk. In that case the caching effect is more important than
-data locality.
-
-Allowing zone reclaim to write out pages stops processes that are
-writing large amounts of data from dirtying pages on other nodes. Zone
-reclaim will write out dirty pages if a zone fills up and so effectively
-throttle the process. This may decrease the performance of a single process
-since it cannot use all of system memory to buffer the outgoing writes
-anymore but it preserve the memory on other nodes so that the performance
-of other processes running on other nodes will not be affected.
-
-Allowing regular swap effectively restricts allocations to the local
-node unless explicitly overridden by memory policies or cpuset
-configurations.
-
-=============================================================
-
-min_unmapped_ratio:
-
-This is available only on NUMA kernels.
-
-A percentage of the total pages in each zone.  Zone reclaim will only
-occur if more than this percentage of pages are file backed and unmapped.
-This is to insure that a minimal amount of local pages is still available for
-file I/O even if the node is overallocated.
-
-The default is 1 percent.
-
 =============================================================
 
 min_slab_ratio:
@@ -210,69 +306,16 @@
 
 =============================================================
 
-panic_on_oom
+min_unmapped_ratio:
 
-This enables or disables panic on out-of-memory feature.
+This is available only on NUMA kernels.
 
-If this is set to 0, the kernel will kill some rogue process,
-called oom_killer.  Usually, oom_killer can kill rogue processes and
-system will survive.
+A percentage of the total pages in each zone.  Zone reclaim will only
+occur if more than this percentage of pages are file backed and unmapped.
+This is to insure that a minimal amount of local pages is still available for
+file I/O even if the node is overallocated.
 
-If this is set to 1, the kernel panics when out-of-memory happens.
-However, if a process limits using nodes by mempolicy/cpusets,
-and those nodes become memory exhaustion status, one process
-may be killed by oom-killer. No panic occurs in this case.
-Because other nodes' memory may be free. This means system total status
-may be not fatal yet.
-
-If this is set to 2, the kernel panics compulsorily even on the
-above-mentioned.
-
-The default value is 0.
-1 and 2 are for failover of clustering. Please select either
-according to your policy of failover.
-
-=============================================================
-
-oom_dump_tasks
-
-Enables a system-wide task dump (excluding kernel threads) to be
-produced when the kernel performs an OOM-killing and includes such
-information as pid, uid, tgid, vm size, rss, cpu, oom_adj score, and
-name.  This is helpful to determine why the OOM killer was invoked
-and to identify the rogue task that caused it.
-
-If this is set to zero, this information is suppressed.  On very
-large systems with thousands of tasks it may not be feasible to dump
-the memory state information for each one.  Such systems should not
-be forced to incur a performance penalty in OOM conditions when the
-information may not be desired.
-
-If this is set to non-zero, this information is shown whenever the
-OOM killer actually kills a memory-hogging task.
-
-The default value is 0.
-
-=============================================================
-
-oom_kill_allocating_task
-
-This enables or disables killing the OOM-triggering task in
-out-of-memory situations.
-
-If this is set to zero, the OOM killer will scan through the entire
-tasklist and select a task based on heuristics to kill.  This normally
-selects a rogue memory-hogging task that frees up a large amount of
-memory when killed.
-
-If this is set to non-zero, the OOM killer simply kills the task that
-triggered the out-of-memory condition.  This avoids the expensive
-tasklist scan.
-
-If panic_on_oom is selected, it takes precedence over whatever value
-is used in oom_kill_allocating_task.
-
-The default value is 0.
+The default is 1 percent.
 
 ==============================================================
 
@@ -289,6 +332,50 @@
 
 ==============================================================
 
+nr_hugepages
+
+Change the minimum size of the hugepage pool.
+
+See Documentation/vm/hugetlbpage.txt
+
+==============================================================
+
+nr_overcommit_hugepages
+
+Change the maximum size of the hugepage pool. The maximum is
+nr_hugepages + nr_overcommit_hugepages.
+
+See Documentation/vm/hugetlbpage.txt
+
+==============================================================
+
+nr_pdflush_threads
+
+The current number of pdflush threads.  This value is read-only.
+The value changes according to the number of dirty pages in the system.
+
+When neccessary, additional pdflush threads are created, one per second, up to
+nr_pdflush_threads_max.
+
+==============================================================
+
+nr_trim_pages
+
+This is available only on NOMMU kernels.
+
+This value adjusts the excess page trimming behaviour of power-of-2 aligned
+NOMMU mmap allocations.
+
+A value of 0 disables trimming of allocations entirely, while a value of 1
+trims excess pages aggressively. Any value >= 1 acts as the watermark where
+trimming of allocations is initiated.
+
+The default value is 1.
+
+See Documentation/nommu-mmap.txt for more information.
+
+==============================================================
+
 numa_zonelist_order
 
 This sysctl is only for NUMA.
@@ -334,17 +421,199 @@
 
 ==============================================================
 
-nr_hugepages
+oom_dump_tasks
 
-Change the minimum size of the hugepage pool.
+Enables a system-wide task dump (excluding kernel threads) to be
+produced when the kernel performs an OOM-killing and includes such
+information as pid, uid, tgid, vm size, rss, cpu, oom_adj score, and
+name.  This is helpful to determine why the OOM killer was invoked
+and to identify the rogue task that caused it.
 
-See Documentation/vm/hugetlbpage.txt
+If this is set to zero, this information is suppressed.  On very
+large systems with thousands of tasks it may not be feasible to dump
+the memory state information for each one.  Such systems should not
+be forced to incur a performance penalty in OOM conditions when the
+information may not be desired.
+
+If this is set to non-zero, this information is shown whenever the
+OOM killer actually kills a memory-hogging task.
+
+The default value is 0.
 
 ==============================================================
 
-nr_overcommit_hugepages
+oom_kill_allocating_task
 
-Change the maximum size of the hugepage pool. The maximum is
-nr_hugepages + nr_overcommit_hugepages.
+This enables or disables killing the OOM-triggering task in
+out-of-memory situations.
 
-See Documentation/vm/hugetlbpage.txt
+If this is set to zero, the OOM killer will scan through the entire
+tasklist and select a task based on heuristics to kill.  This normally
+selects a rogue memory-hogging task that frees up a large amount of
+memory when killed.
+
+If this is set to non-zero, the OOM killer simply kills the task that
+triggered the out-of-memory condition.  This avoids the expensive
+tasklist scan.
+
+If panic_on_oom is selected, it takes precedence over whatever value
+is used in oom_kill_allocating_task.
+
+The default value is 0.
+
+==============================================================
+
+overcommit_memory:
+
+This value contains a flag that enables memory overcommitment.
+
+When this flag is 0, the kernel attempts to estimate the amount
+of free memory left when userspace requests more memory.
+
+When this flag is 1, the kernel pretends there is always enough
+memory until it actually runs out.
+
+When this flag is 2, the kernel uses a "never overcommit"
+policy that attempts to prevent any overcommit of memory.
+
+This feature can be very useful because there are a lot of
+programs that malloc() huge amounts of memory "just-in-case"
+and don't use much of it.
+
+The default value is 0.
+
+See Documentation/vm/overcommit-accounting and
+security/commoncap.c::cap_vm_enough_memory() for more information.
+
+==============================================================
+
+overcommit_ratio:
+
+When overcommit_memory is set to 2, the committed address
+space is not permitted to exceed swap plus this percentage
+of physical RAM.  See above.
+
+==============================================================
+
+page-cluster
+
+page-cluster controls the number of pages which are written to swap in
+a single attempt.  The swap I/O size.
+
+It is a logarithmic value - setting it to zero means "1 page", setting
+it to 1 means "2 pages", setting it to 2 means "4 pages", etc.
+
+The default value is three (eight pages at a time).  There may be some
+small benefits in tuning this to a different value if your workload is
+swap-intensive.
+
+=============================================================
+
+panic_on_oom
+
+This enables or disables panic on out-of-memory feature.
+
+If this is set to 0, the kernel will kill some rogue process,
+called oom_killer.  Usually, oom_killer can kill rogue processes and
+system will survive.
+
+If this is set to 1, the kernel panics when out-of-memory happens.
+However, if a process limits using nodes by mempolicy/cpusets,
+and those nodes become memory exhaustion status, one process
+may be killed by oom-killer. No panic occurs in this case.
+Because other nodes' memory may be free. This means system total status
+may be not fatal yet.
+
+If this is set to 2, the kernel panics compulsorily even on the
+above-mentioned.
+
+The default value is 0.
+1 and 2 are for failover of clustering. Please select either
+according to your policy of failover.
+
+=============================================================
+
+percpu_pagelist_fraction
+
+This is the fraction of pages at most (high mark pcp->high) in each zone that
+are allocated for each per cpu page list.  The min value for this is 8.  It
+means that we don't allow more than 1/8th of pages in each zone to be
+allocated in any single per_cpu_pagelist.  This entry only changes the value
+of hot per cpu pagelists.  User can specify a number like 100 to allocate
+1/100th of each zone to each per cpu page list.
+
+The batch value of each per cpu pagelist is also updated as a result.  It is
+set to pcp->high/4.  The upper limit of batch is (PAGE_SHIFT * 8)
+
+The initial value is zero.  Kernel does not use this value at boot time to set
+the high water marks for each per cpu page list.
+
+==============================================================
+
+stat_interval
+
+The time interval between which vm statistics are updated.  The default
+is 1 second.
+
+==============================================================
+
+swappiness
+
+This control is used to define how aggressive the kernel will swap
+memory pages.  Higher values will increase agressiveness, lower values
+descrease the amount of swap.
+
+The default value is 60.
+
+==============================================================
+
+vfs_cache_pressure
+------------------
+
+Controls the tendency of the kernel to reclaim the memory which is used for
+caching of directory and inode objects.
+
+At the default value of vfs_cache_pressure=100 the kernel will attempt to
+reclaim dentries and inodes at a "fair" rate with respect to pagecache and
+swapcache reclaim.  Decreasing vfs_cache_pressure causes the kernel to prefer
+to retain dentry and inode caches.  Increasing vfs_cache_pressure beyond 100
+causes the kernel to prefer to reclaim dentries and inodes.
+
+==============================================================
+
+zone_reclaim_mode:
+
+Zone_reclaim_mode allows someone to set more or less aggressive approaches to
+reclaim memory when a zone runs out of memory. If it is set to zero then no
+zone reclaim occurs. Allocations will be satisfied from other zones / nodes
+in the system.
+
+This is value ORed together of
+
+1	= Zone reclaim on
+2	= Zone reclaim writes dirty pages out
+4	= Zone reclaim swaps pages
+
+zone_reclaim_mode is set during bootup to 1 if it is determined that pages
+from remote zones will cause a measurable performance reduction. The
+page allocator will then reclaim easily reusable pages (those page
+cache pages that are currently not used) before allocating off node pages.
+
+It may be beneficial to switch off zone reclaim if the system is
+used for a file server and all of memory should be used for caching files
+from disk. In that case the caching effect is more important than
+data locality.
+
+Allowing zone reclaim to write out pages stops processes that are
+writing large amounts of data from dirtying pages on other nodes. Zone
+reclaim will write out dirty pages if a zone fills up and so effectively
+throttle the process. This may decrease the performance of a single process
+since it cannot use all of system memory to buffer the outgoing writes
+anymore but it preserve the memory on other nodes so that the performance
+of other processes running on other nodes will not be affected.
+
+Allowing regular swap effectively restricts allocations to the local
+node unless explicitly overridden by memory policies or cpuset
+configurations.
+
+============ End of Document =================================
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 10a0263..9e592c7 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -1,6 +1,5 @@
 Linux Magic System Request Key Hacks
 Documentation for sysrq.c
-Last update: 2007-AUG-04
 
 *  What is the magic SysRq key?
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -211,6 +210,24 @@
 a lock (you are also in an interrupt handler, which means don't sleep!), so
 you must call __handle_sysrq_nolock instead.
 
+*  When I hit a SysRq key combination only the header appears on the console?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Sysrq output is subject to the same console loglevel control as all
+other console output.  This means that if the kernel was booted 'quiet'
+as is common on distro kernels the output may not appear on the actual
+console, even though it will appear in the dmesg buffer, and be accessible
+via the dmesg command and to the consumers of /proc/kmsg.  As a specific
+exception the header line from the sysrq command is passed to all console
+consumers as if the current loglevel was maximum.  If only the header
+is emitted it is almost certain that the kernel loglevel is too low.
+Should you require the output on the console channel then you will need
+to temporarily up the console loglevel using alt-sysrq-8 or:
+
+    echo 8 > /proc/sysrq-trigger
+
+Remember to return the loglevel to normal after triggering the sysrq
+command you are interested in.
+
 *  I have more questions, who can I ask?
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 And I'll answer any questions about the registration system you got, also
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index e48ea1d..ad64261 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -313,11 +313,13 @@
 that it supports autosuspend by setting the .supports_autosuspend flag
 in its usb_driver structure.  It is then responsible for informing the
 USB core whenever one of its interfaces becomes busy or idle.  The
-driver does so by calling these three functions:
+driver does so by calling these five functions:
 
 	int  usb_autopm_get_interface(struct usb_interface *intf);
 	void usb_autopm_put_interface(struct usb_interface *intf);
 	int  usb_autopm_set_interface(struct usb_interface *intf);
+	int  usb_autopm_get_interface_async(struct usb_interface *intf);
+	void usb_autopm_put_interface_async(struct usb_interface *intf);
 
 The functions work by maintaining a counter in the usb_interface
 structure.  When intf->pm_usage_count is > 0 then the interface is
@@ -330,10 +332,12 @@
 This field is used only by the USB core.)
 
 The driver owns intf->pm_usage_count; it can modify the value however
-and whenever it likes.  A nice aspect of the usb_autopm_* routines is
-that the changes they make are protected by the usb_device structure's
-PM mutex (udev->pm_mutex); however drivers may change pm_usage_count
-without holding the mutex.
+and whenever it likes.  A nice aspect of the non-async usb_autopm_*
+routines is that the changes they make are protected by the usb_device
+structure's PM mutex (udev->pm_mutex); however drivers may change
+pm_usage_count without holding the mutex.  Drivers using the async
+routines are responsible for their own synchronization and mutual
+exclusion.
 
 	usb_autopm_get_interface() increments pm_usage_count and
 	attempts an autoresume if the new value is > 0 and the
@@ -348,6 +352,14 @@
 	is suspended, and it attempts an autosuspend if the value is
 	<= 0 and the device isn't suspended.
 
+	usb_autopm_get_interface_async() and
+	usb_autopm_put_interface_async() do almost the same things as
+	their non-async counterparts.  The differences are: they do
+	not acquire the PM mutex, and they use a workqueue to do their
+	jobs.  As a result they can be called in an atomic context,
+	such as an URB's completion handler, but when they return the
+	device will not generally not yet be in the desired state.
+
 There also are a couple of utility routines drivers can use:
 
 	usb_autopm_enable() sets pm_usage_cnt to 0 and then calls
diff --git a/Documentation/w1/masters/00-INDEX b/Documentation/w1/masters/00-INDEX
index 7b0ceaa..d63fa02 100644
--- a/Documentation/w1/masters/00-INDEX
+++ b/Documentation/w1/masters/00-INDEX
@@ -4,5 +4,7 @@
 	- The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses.
 ds2490
 	- The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges.
+mxc_w1
+	- W1 master controller driver found on Freescale MX2/MX3 SoCs
 w1-gpio
 	- GPIO 1-wire bus master driver.
diff --git a/Documentation/w1/masters/mxc-w1 b/Documentation/w1/masters/mxc-w1
new file mode 100644
index 0000000..97f6199
--- /dev/null
+++ b/Documentation/w1/masters/mxc-w1
@@ -0,0 +1,11 @@
+Kernel driver mxc_w1
+====================
+
+Supported chips:
+  * Freescale MX27, MX31 and probably other i.MX SoCs
+    Datasheets:
+        http://www.freescale.com/files/32bit/doc/data_sheet/MCIMX31.pdf?fpsp=1
+	http://www.freescale.com/files/dsp/MCIMX27.pdf?fpsp=1
+
+Author: Originally based on Freescale code, prepared for mainline by
+	Sascha Hauer <s.hauer@pengutronix.de>
diff --git a/Documentation/w1/w1.netlink b/Documentation/w1/w1.netlink
index 3640c7c8..804445f 100644
--- a/Documentation/w1/w1.netlink
+++ b/Documentation/w1/w1.netlink
@@ -5,69 +5,157 @@
 =============
 
 There are three types of messages between w1 core and userspace:
-1. Events. They are generated each time new master or slave device found
-	either due to automatic or requested search.
-2. Userspace commands. Includes read/write and search/alarm search comamnds.
+1. Events. They are generated each time new master or slave device
+	found either due to automatic or requested search.
+2. Userspace commands.
 3. Replies to userspace commands.
 
 
 Protocol.
 ========
 
-[struct cn_msg] - connector header. It's length field is equal to size of the attached data.
+[struct cn_msg] - connector header.
+	Its length field is equal to size of the attached data
 [struct w1_netlink_msg] - w1 netlink header.
 	__u8 type 	- message type.
-			W1_SLAVE_ADD/W1_SLAVE_REMOVE - slave add/remove events.
-			W1_MASTER_ADD/W1_MASTER_REMOVE - master add/remove events.
-			W1_MASTER_CMD - userspace command for bus master device (search/alarm search).
-			W1_SLAVE_CMD - userspace command for slave device (read/write/ search/alarm search
-					for bus master device where given slave device found).
+			W1_LIST_MASTERS
+				list current bus masters
+			W1_SLAVE_ADD/W1_SLAVE_REMOVE
+				slave add/remove events
+			W1_MASTER_ADD/W1_MASTER_REMOVE
+				master add/remove events
+			W1_MASTER_CMD
+				userspace command for bus master
+				device (search/alarm search)
+			W1_SLAVE_CMD
+				userspace command for slave device
+				(read/write/touch)
 	__u8 res	- reserved
-	__u16 len	- size of attached to this header data.
+	__u16 len	- size of data attached to this header data
 	union {
-		__u8 id;			 - slave unique device id
+		__u8 id[8];			 - slave unique device id
 		struct w1_mst {
-			__u32		id;	 - master's id.
+			__u32		id;	 - master's id
 			__u32		res;	 - reserved
 		} mst;
 	} id;
 
-[strucrt w1_netlink_cmd] - command for gived master or slave device.
+[struct w1_netlink_cmd] - command for given master or slave device.
 	__u8 cmd	- command opcode.
-			W1_CMD_READ 	- read command.
-			W1_CMD_WRITE	- write command.
-			W1_CMD_SEARCH	- search command.
-			W1_CMD_ALARM_SEARCH - alarm search command.
+			W1_CMD_READ 	- read command
+			W1_CMD_WRITE	- write command
+			W1_CMD_TOUCH	- touch command
+				(write and sample data back to userspace)
+			W1_CMD_SEARCH	- search command
+			W1_CMD_ALARM_SEARCH - alarm search command
 	__u8 res	- reserved
-	__u16 len	- length of data for this command.
-			For read command data must be allocated like for write command.
-	__u8 data[0]	- data for this command.
+	__u16 len	- length of data for this command
+		For read command data must be allocated like for write command
+	__u8 data[0]	- data for this command
 
 
-Each connector message can include one or more w1_netlink_msg with zero of more attached w1_netlink_cmd messages.
+Each connector message can include one or more w1_netlink_msg with
+zero or more attached w1_netlink_cmd messages.
 
-For event messages there are no w1_netlink_cmd embedded structures, only connector header
-and w1_netlink_msg strucutre with "len" field being zero and filled type (one of event types)
-and id - either 8 bytes of slave unique id in host order, or master's id, which is assigned
-to bus master device when it is added to w1 core.
+For event messages there are no w1_netlink_cmd embedded structures,
+only connector header and w1_netlink_msg strucutre with "len" field
+being zero and filled type (one of event types) and id:
+either 8 bytes of slave unique id in host order,
+or master's id, which is assigned to bus master device
+when it is added to w1 core.
 
-Currently replies to userspace commands are only generated for read command request.
-One reply is generated exactly for one w1_netlink_cmd read request.
-Replies are not combined when sent - i.e. typical reply messages looks like the following:
+Currently replies to userspace commands are only generated for read
+command request. One reply is generated exactly for one w1_netlink_cmd
+read request. Replies are not combined when sent - i.e. typical reply
+messages looks like the following:
+
 [cn_msg][w1_netlink_msg][w1_netlink_cmd]
-cn_msg.len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd) + cmd->len;
+cn_msg.len = sizeof(struct w1_netlink_msg) +
+	     sizeof(struct w1_netlink_cmd) +
+	     cmd->len;
 w1_netlink_msg.len = sizeof(struct w1_netlink_cmd) + cmd->len;
 w1_netlink_cmd.len = cmd->len;
 
+Replies to W1_LIST_MASTERS should send a message back to the userspace
+which will contain list of all registered master ids in the following
+format:
+
+	cn_msg (CN_W1_IDX.CN_W1_VAL as id, len is equal to sizeof(struct
+	w1_netlink_msg) plus number of masters multipled by 4)
+	w1_netlink_msg (type: W1_LIST_MASTERS, len is equal to
+		number of masters multiplied by 4 (u32 size))
+	id0 ... idN
+
+	Each message is at most 4k in size, so if number of master devices
+	exceeds this, it will be split into several messages,
+	cn.seq will be increased for each one.
+
+W1 search and alarm search commands.
+request:
+[cn_msg]
+  [w1_netlink_msg type = W1_MASTER_CMD
+  	id is equal to the bus master id to use for searching]
+  [w1_netlink_cmd cmd = W1_CMD_SEARCH or W1_CMD_ALARM_SEARCH]
+
+reply:
+  [cn_msg, ack = 1 and increasing, 0 means the last message,
+  	seq is equal to the request seq]
+  [w1_netlink_msg type = W1_MASTER_CMD]
+  [w1_netlink_cmd cmd = W1_CMD_SEARCH or W1_CMD_ALARM_SEARCH
+	len is equal to number of IDs multiplied by 8]
+  [64bit-id0 ... 64bit-idN]
+Length in each header corresponds to the size of the data behind it, so
+w1_netlink_cmd->len = N * 8; where N is number of IDs in this message.
+	Can be zero.
+w1_netlink_msg->len = sizeof(struct w1_netlink_cmd) + N * 8;
+cn_msg->len = sizeof(struct w1_netlink_msg) +
+	      sizeof(struct w1_netlink_cmd) +
+	      N*8;
+
+W1 reset command.
+[cn_msg]
+  [w1_netlink_msg type = W1_MASTER_CMD
+  	id is equal to the bus master id to use for searching]
+  [w1_netlink_cmd cmd = W1_CMD_RESET]
+
+
+Command status replies.
+======================
+
+Each command (either root, master or slave with or without w1_netlink_cmd
+structure) will be 'acked' by the w1 core. Format of the reply is the same
+as request message except that length parameters do not account for data
+requested by the user, i.e. read/write/touch IO requests will not contain
+data, so w1_netlink_cmd.len will be 0, w1_netlink_msg.len will be size
+of the w1_netlink_cmd structure and cn_msg.len will be equal to the sum
+of the sizeof(struct w1_netlink_msg) and sizeof(struct w1_netlink_cmd).
+If reply is generated for master or root command (which do not have
+w1_netlink_cmd attached), reply will contain only cn_msg and w1_netlink_msg
+structires.
+
+w1_netlink_msg.status field will carry positive error value
+(EINVAL for example) or zero in case of success.
+
+All other fields in every structure will mirror the same parameters in the
+request message (except lengths as described above).
+
+Status reply is generated for every w1_netlink_cmd embedded in the
+w1_netlink_msg, if there are no w1_netlink_cmd structures,
+reply will be generated for the w1_netlink_msg.
+
+All w1_netlink_cmd command structures are handled in every w1_netlink_msg,
+even if there were errors, only length mismatch interrupts message processing.
+
 
 Operation steps in w1 core when new command is received.
 =======================================================
 
-When new message (w1_netlink_msg) is received w1 core detects if it is master of slave request,
-according to w1_netlink_msg.type field.
+When new message (w1_netlink_msg) is received w1 core detects if it is
+master or slave request, according to w1_netlink_msg.type field.
 Then master or slave device is searched for.
-When found, master device (requested or those one on where slave device is found) is locked.
-If slave command is requested, then reset/select procedure is started to select given device.
+When found, master device (requested or those one on where slave device
+is found) is locked. If slave command is requested, then reset/select
+procedure is started to select given device.
 
 Then all requested in w1_netlink_msg operations are performed one by one.
 If command requires reply (like read command) it is sent on command completion.
@@ -82,8 +170,8 @@
 Each connector message includes two u32 fields as "address".
 w1 uses CN_W1_IDX and CN_W1_VAL defined in include/linux/connector.h header.
 Each message also includes sequence and acknowledge numbers.
-Sequence number for event messages is appropriate bus master sequence number increased with
-each event message sent "through" this master.
+Sequence number for event messages is appropriate bus master sequence number
+increased with each event message sent "through" this master.
 Sequence number for userspace requests is set by userspace application.
 Sequence number for reply is the same as was in request, and
 acknowledge number is set to seq+1.
@@ -93,6 +181,6 @@
 ============================================
 
 1. Documentation/connector
-2. http://tservice.net.ru/~s0mbre/archive/w1
-This archive includes userspace application w1d.c which
-uses read/write/search commands for all master/slave devices found on the bus.
+2. http://www.ioremap.net/archive/w1
+This archive includes userspace application w1d.c which uses
+read/write/search commands for all master/slave devices found on the bus.
diff --git a/Documentation/wimax/README.i2400m b/Documentation/wimax/README.i2400m
new file mode 100644
index 0000000..7dffd89
--- /dev/null
+++ b/Documentation/wimax/README.i2400m
@@ -0,0 +1,260 @@
+
+   Driver for the Intel Wireless Wimax Connection 2400m
+
+   (C) 2008 Intel Corporation < linux-wimax@intel.com >
+
+   This provides a driver for the Intel Wireless WiMAX Connection 2400m
+   and a basic Linux kernel WiMAX stack.
+
+1. Requirements
+
+     * Linux installation with Linux kernel 2.6.22 or newer (if building
+       from a separate tree)
+     * Intel i2400m Echo Peak or Baxter Peak; this includes the Intel
+       Wireless WiMAX/WiFi Link 5x50 series.
+     * build tools:
+          + Linux kernel development package for the target kernel; to
+            build against your currently running kernel, you need to have
+            the kernel development package corresponding to the running
+            image installed (usually if your kernel is named
+            linux-VERSION, the development package is called
+            linux-dev-VERSION or linux-headers-VERSION).
+          + GNU C Compiler, make
+
+2. Compilation and installation
+
+2.1. Compilation of the drivers included in the kernel
+
+   Configure the kernel; to enable the WiMAX drivers select Drivers >
+   Networking Drivers > WiMAX device support. Enable all of them as
+   modules (easier).
+
+   If USB or SDIO are not enabled in the kernel configuration, the options
+   to build the i2400m USB or SDIO drivers will not show. Enable said
+   subsystems and go back to the WiMAX menu to enable the drivers.
+
+   Compile and install your kernel as usual.
+
+2.2. Compilation of the drivers distributed as an standalone module
+
+   To compile
+
+$ cd source/directory
+$ make
+
+   Once built you can load and unload using the provided load.sh script;
+   load.sh will load the modules, load.sh u will unload them.
+
+   To install in the default kernel directories (and enable auto loading
+   when the device is plugged):
+
+$ make install
+$ depmod -a
+
+   If your kernel development files are located in a non standard
+   directory or if you want to build for a kernel that is not the
+   currently running one, set KDIR to the right location:
+
+$ make KDIR=/path/to/kernel/dev/tree
+
+   For more information, please contact linux-wimax@intel.com.
+
+3. Installing the firmware
+
+   The firmware can be obtained from http://linuxwimax.org or might have
+   been supplied with your hardware.
+
+   It has to be installed in the target system:
+     *
+$ cp FIRMWAREFILE.sbcf /lib/firmware/i2400m-fw-BUSTYPE-1.3.sbcf
+
+     * NOTE: if your firmware came in an .rpm or .deb file, just install
+       it as normal, with the rpm (rpm -i FIRMWARE.rpm) or dpkg
+       (dpkg -i FIRMWARE.deb) commands. No further action is needed.
+     * BUSTYPE will be usb or sdio, depending on the hardware you have.
+       Each hardware type comes with its own firmware and will not work
+       with other types.
+
+4. Design
+
+   This package contains two major parts: a WiMAX kernel stack and a
+   driver for the Intel i2400m.
+
+   The WiMAX stack is designed to provide for common WiMAX control
+   services to current and future WiMAX devices from any vendor; please
+   see README.wimax for details.
+
+   The i2400m kernel driver is broken up in two main parts: the bus
+   generic driver and the bus-specific drivers. The bus generic driver
+   forms the drivercore and contain no knowledge of the actual method we
+   use to connect to the device. The bus specific drivers are just the
+   glue to connect the bus-generic driver and the device. Currently only
+   USB and SDIO are supported. See drivers/net/wimax/i2400m/i2400m.h for
+   more information.
+
+   The bus generic driver is logically broken up in two parts: OS-glue and
+   hardware-glue. The OS-glue interfaces with Linux. The hardware-glue
+   interfaces with the device on using an interface provided by the
+   bus-specific driver. The reason for this breakup is to be able to
+   easily reuse the hardware-glue to write drivers for other OSes; note
+   the hardware glue part is written as a native Linux driver; no
+   abstraction layers are used, so to port to another OS, the Linux kernel
+   API calls should be replaced with the target OS's.
+
+5. Usage
+
+   To load the driver, follow the instructions in the install section;
+   once the driver is loaded, plug in the device (unless it is permanently
+   plugged in). The driver will enumerate the device, upload the firmware
+   and output messages in the kernel log (dmesg, /var/log/messages or
+   /var/log/kern.log) such as:
+
+...
+i2400m_usb 5-4:1.0: firmware interface version 8.0.0
+i2400m_usb 5-4:1.0: WiMAX interface wmx0 (00:1d:e1:01:94:2c) ready
+
+   At this point the device is ready to work.
+
+   Current versions require the Intel WiMAX Network Service in userspace
+   to make things work. See the network service's README for instructions
+   on how to scan, connect and disconnect.
+
+5.1. Module parameters
+
+   Module parameters can be set at kernel or module load time or by
+   echoing values:
+
+$ echo VALUE > /sys/module/MODULENAME/parameters/PARAMETERNAME
+
+   To make changes permanent, for example, for the i2400m module, you can
+   also create a file named /etc/modprobe.d/i2400m containing:
+
+options i2400m idle_mode_disabled=1
+
+   To find which parameters are supported by a module, run:
+
+$ modinfo path/to/module.ko
+
+   During kernel bootup (if the driver is linked in the kernel), specify
+   the following to the kernel command line:
+
+i2400m.PARAMETER=VALUE
+
+5.1.1. i2400m: idle_mode_disabled
+
+   The i2400m module supports a parameter to disable idle mode. This
+   parameter, once set, will take effect only when the device is
+   reinitialized by the driver (eg: following a reset or a reconnect).
+
+5.2. Debug operations: debugfs entries
+
+   The driver will register debugfs entries that allow the user to tweak
+   debug settings. There are three main container directories where
+   entries are placed, which correspond to the three blocks a i2400m WiMAX
+   driver has:
+     * /sys/kernel/debug/wimax:DEVNAME/ for the generic WiMAX stack
+       controls
+     * /sys/kernel/debug/wimax:DEVNAME/i2400m for the i2400m generic
+       driver controls
+     * /sys/kernel/debug/wimax:DEVNAME/i2400m-usb (or -sdio) for the
+       bus-specific i2400m-usb or i2400m-sdio controls).
+
+   Of course, if debugfs is mounted in a directory other than
+   /sys/kernel/debug, those paths will change.
+
+5.2.1. Increasing debug output
+
+   The files named *dl_* indicate knobs for controlling the debug output
+   of different submodules:
+     *
+# find /sys/kernel/debug/wimax\:wmx0 -name \*dl_\*
+/sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_tx
+/sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_rx
+/sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_notif
+/sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_fw
+/sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_usb
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_tx
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_rx
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_rfkill
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_netdev
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_fw
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_debugfs
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_driver
+/sys/kernel/debug/wimax:wmx0/i2400m/dl_control
+/sys/kernel/debug/wimax:wmx0/wimax_dl_stack
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_rfkill
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_reset
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_msg
+/sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+/sys/kernel/debug/wimax:wmx0/wimax_dl_debugfs
+
+   By reading the file you can obtain the current value of said debug
+   level; by writing to it, you can set it.
+
+   To increase the debug level of, for example, the i2400m's generic TX
+   engine, just write:
+
+$ echo 3 > /sys/kernel/debug/wimax:wmx0/i2400m/dl_tx
+
+   Increasing numbers yield increasing debug information; for details of
+   what is printed and the available levels, check the source. The code
+   uses 0 for disabled and increasing values until 8.
+
+5.2.2. RX and TX statistics
+
+   The i2400m/rx_stats and i2400m/tx_stats provide statistics about the
+   data reception/delivery from the device:
+
+$ cat /sys/kernel/debug/wimax:wmx0/i2400m/rx_stats
+45 1 3 34 3104 48 480
+
+   The numbers reported are
+     * packets/RX-buffer: total, min, max
+     * RX-buffers: total RX buffers received, accumulated RX buffer size
+       in bytes, min size received, max size received
+
+   Thus, to find the average buffer size received, divide accumulated
+   RX-buffer / total RX-buffers.
+
+   To clear the statistics back to 0, write anything to the rx_stats file:
+
+$ echo 1 > /sys/kernel/debug/wimax:wmx0/i2400m_rx_stats
+
+   Likewise for TX.
+
+   Note the packets this debug file refers to are not network packet, but
+   packets in the sense of the device-specific protocol for communication
+   to the host. See drivers/net/wimax/i2400m/tx.c.
+
+5.2.3. Tracing messages received from user space
+
+   To echo messages received from user space into the trace pipe that the
+   i2400m driver creates, set the debug file i2400m/trace_msg_from_user to
+   1:
+     *
+$ echo 1 > /sys/kernel/debug/wimax:wmx0/i2400m/trace_msg_from_user
+
+5.2.4. Performing a device reset
+
+   By writing a 0, a 1 or a 2 to the file
+   /sys/kernel/debug/wimax:wmx0/reset, the driver performs a warm (without
+   disconnecting from the bus), cold (disconnecting from the bus) or bus
+   (bus specific) reset on the device.
+
+5.2.5. Asking the device to enter power saving mode
+
+   By writing any value to the /sys/kernel/debug/wimax:wmx0 file, the
+   device will attempt to enter power saving mode.
+
+6. Troubleshooting
+
+6.1. Driver complains about 'i2400m-fw-usb-1.2.sbcf: request failed'
+
+   If upon connecting the device, the following is output in the kernel
+   log:
+
+i2400m_usb 5-4:1.0: fw i2400m-fw-usb-1.3.sbcf: request failed: -2
+
+   This means that the driver cannot locate the firmware file named
+   /lib/firmware/i2400m-fw-usb-1.2.sbcf. Check that the file is present in
+   the right location.
diff --git a/Documentation/wimax/README.wimax b/Documentation/wimax/README.wimax
new file mode 100644
index 0000000..b78c437
--- /dev/null
+++ b/Documentation/wimax/README.wimax
@@ -0,0 +1,81 @@
+
+   Linux kernel WiMAX stack
+
+   (C) 2008 Intel Corporation < linux-wimax@intel.com >
+
+   This provides a basic Linux kernel WiMAX stack to provide a common
+   control API for WiMAX devices, usable from kernel and user space.
+
+1. Design
+
+   The WiMAX stack is designed to provide for common WiMAX control
+   services to current and future WiMAX devices from any vendor.
+
+   Because currently there is only one and we don't know what would be the
+   common services, the APIs it currently provides are very minimal.
+   However, it is done in such a way that it is easily extensible to
+   accommodate future requirements.
+
+   The stack works by embedding a struct wimax_dev in your device's
+   control structures. This provides a set of callbacks that the WiMAX
+   stack will call in order to implement control operations requested by
+   the user. As well, the stack provides API functions that the driver
+   calls to notify about changes of state in the device.
+
+   The stack exports the API calls needed to control the device to user
+   space using generic netlink as a marshalling mechanism. You can access
+   them using your own code or use the wrappers provided for your
+   convenience in libwimax (in the wimax-tools package).
+
+   For detailed information on the stack, please see
+   include/linux/wimax.h.
+
+2. Usage
+
+   For usage in a driver (registration, API, etc) please refer to the
+   instructions in the header file include/linux/wimax.h.
+
+   When a device is registered with the WiMAX stack, a set of debugfs
+   files will appear in /sys/kernel/debug/wimax:wmxX can tweak for
+   control.
+
+2.1. Obtaining debug information: debugfs entries
+
+   The WiMAX stack is compiled, by default, with debug messages that can
+   be used to diagnose issues. By default, said messages are disabled.
+
+   The drivers will register debugfs entries that allow the user to tweak
+   debug settings.
+
+   Each driver, when registering with the stack, will cause a debugfs
+   directory named wimax:DEVICENAME to be created; optionally, it might
+   create more subentries below it.
+
+2.1.1. Increasing debug output
+
+   The files named *dl_* indicate knobs for controlling the debug output
+   of different submodules of the WiMAX stack:
+     *
+# find /sys/kernel/debug/wimax\:wmx0 -name \*dl_\*
+/sys/kernel/debug/wimax:wmx0/wimax_dl_stack
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_rfkill
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_reset
+/sys/kernel/debug/wimax:wmx0/wimax_dl_op_msg
+/sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+/sys/kernel/debug/wimax:wmx0/wimax_dl_debugfs
+/sys/kernel/debug/wimax:wmx0/.... # other driver specific files
+
+       NOTE: Of course, if debugfs is mounted in a directory other than
+       /sys/kernel/debug, those paths will change.
+
+   By reading the file you can obtain the current value of said debug
+   level; by writing to it, you can set it.
+
+   To increase the debug level of, for example, the id-table submodule,
+   just write:
+
+$ echo 3 > /sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+
+   Increasing numbers yield increasing debug information; for details of
+   what is printed and the available levels, check the source. The code
+   uses 0 for disabled and increasing values until 8.
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index fcdc62b..7b4596a 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -44,7 +44,7 @@
 		and KEEP_SEGMENTS flag in load_flags.
 
 Protocol 2.08:	(Kernel 2.6.26) Added crc32 checksum and ELF format
-		payload. Introduced payload_offset and payload length
+		payload. Introduced payload_offset and payload_length
 		fields to aid in locating the payload.
 
 Protocol 2.09:	(Kernel 2.6.26) Added a field of 64-bit physical
diff --git a/MAINTAINERS b/MAINTAINERS
index 094dd52..3fe4dc2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1024,16 +1024,17 @@
 BTTV VIDEO4LINUX DRIVER
 P:	Mauro Carvalho Chehab
 M:	mchehab@infradead.org
-M:	v4l-dvb-maintainer@linuxtv.org
+L:	linux-media@vger.kernel.org
 L:	video4linux-list@redhat.com
 W:	http://linuxtv.org
-T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER
 P:	Jonathan Corbet
 M:	corbet@lwn.net
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 CALGARY x86-64 IOMMU
@@ -1261,7 +1262,8 @@
 M:	hverkuil@xs4all.nl, awalls@radix.net
 L:	ivtv-devel@ivtvdriver.org
 L:	ivtv-users@ivtvdriver.org
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 W:	http://linuxtv.org
 S:	Maintained
 
@@ -1358,6 +1360,11 @@
 M:	macro@linux-mips.org
 S:	Maintained
 
+DELL LAPTOP DRIVER
+P:	Matthew Garrett
+M:	mjg59@srcf.ucam.org
+S:	Maintained
+
 DELL LAPTOP SMM DRIVER
 P:	Massimo Dal Zotto
 M:	dz@debian.org
@@ -1487,10 +1494,10 @@
 
 DVB SUBSYSTEM AND DRIVERS
 P:	LinuxTV.org Project
-M:	v4l-dvb-maintainer@linuxtv.org
+M:	linux-media@vger.kernel.org
 L:	linux-dvb@linuxtv.org (subscription required)
 W:	http://linuxtv.org/
-T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 DZ DECSTATION DZ11 SERIAL DRIVER
@@ -1574,6 +1581,13 @@
 W:	bluesmoke.sourceforge.net
 S:	Maintained
 
+EDAC-I5400
+P:	Mauro Carvalho Chehab
+M:	mchehab@redhat.com
+L:	bluesmoke-devel@lists.sourceforge.net
+W:	bluesmoke.sourceforge.net
+S:	Maintained
+
 EDAC-I82975X
 P:	Ranganathan Desikan
 P:	Arvind R.
@@ -1807,6 +1821,14 @@
 W:	ftp://ftp.openlinux.org/pub/people/hch/vxfs
 S:	Maintained
 
+FREEZER
+P:	Pavel Machek
+M:	pavel@suse.cz
+P:	Rafael J. Wysocki
+M:	rjw@sisk.pl
+L:	linux-pm@lists.linux-foundation.org
+S:	Supported
+
 FTRACE
 P:	Steven Rostedt
 M:	rostedt@goodmis.org
@@ -1882,32 +1904,37 @@
 GSPCA FINEPIX SUBDRIVER
 P:	Frank Zago
 M:	frank@zago.net
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 GSPCA M5602 SUBDRIVER
 P:	Erik Andren
 M:	erik.andren@gmail.com
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 GSPCA PAC207 SONIXB SUBDRIVER
 P:	Hans de Goede
 M:	hdegoede@redhat.com
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 GSPCA T613 SUBDRIVER
 P:	Leandro Costantino
 M:	lcostantino@gmail.com
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 GSPCA USB WEBCAM DRIVER
 P:	Jean-Francois Moine
 M:	moinejf@free.fr
 W:	http://moinejf.free.fr
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 HARDWARE MONITORING
@@ -2305,6 +2332,14 @@
 W:	http://ipw2200.sourceforge.net
 S:	Supported
 
+INTEL WIRELESS WIMAX CONNECTION 2400
+P:	Inaky Perez-Gonzalez
+M:	inaky.perez-gonzalez@intel.com
+M:	linux-wimax@intel.com
+L:	wimax@linuxwimax.org
+S:	Supported
+W:	http://linuxwimax.org
+
 INTEL WIRELESS WIFI LINK (iwlwifi)
 P:	Zhu Yi
 M:	yi.zhu@intel.com
@@ -2429,7 +2464,8 @@
 M:	hverkuil@xs4all.nl
 L:	ivtv-devel@ivtvdriver.org
 L:	ivtv-users@ivtvdriver.org
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 W:	http://www.ivtvdriver.org
 S:	Maintained
 
@@ -2982,6 +3018,7 @@
 P:	Felipe Balbi
 M:	felipe.balbi@nokia.com
 L:	linux-usb@vger.kernel.org
+T:	git gitorious.org:/musb/mainline.git
 S:	Maintained
 
 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
@@ -3188,7 +3225,8 @@
 OMNIVISION OV7670 SENSOR DRIVER
 P:	Jonathan Corbet
 M:	corbet@lwn.net
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 ONENAND FLASH DRIVER
@@ -3466,12 +3504,19 @@
 L:	cbe-oss-dev@ozlabs.org
 S:	Supported
 
+PS3VRAM DRIVER
+P:	Jim Paris
+M:	jim@jtan.com
+L:	cbe-oss-dev@ozlabs.org
+S:	Maintained
+
 PVRUSB2 VIDEO4LINUX DRIVER
 P:	Mike Isely
 M:	isely@pobox.com
 L:	pvrusb2@isely.net	(subscribers-only)
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
 W:	http://www.isely.net/pvrusb2/
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 PXA2xx/PXA3xx SUPPORT
@@ -3691,6 +3736,8 @@
 SAA7146 VIDEO4LINUX-2 DRIVER
 P:	Michael Hunold
 M:	michael@mihu.de
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 W:	http://www.mihu.de/linux/saa7146
 S:	Maintained
 
@@ -3954,7 +4001,8 @@
 SOC-CAMERA V4L2 SUBSYSTEM
 P:	Guennadi Liakhovetski
 M:	g.liakhovetski@gmx.de
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 SOEKRIS NET48XX LED SUPPORT
@@ -4048,6 +4096,13 @@
 W:	http://www.ibm.com/developerworks/power/cell/
 S:	Supported
 
+SQUASHFS FILE SYSTEM
+P:	Phillip Lougher
+M:	phillip@lougher.demon.co.uk
+L:	squashfs-devel@lists.sourceforge.net (subscribers-only)
+W:	http://squashfs.org.uk
+S:	Maintained
+
 SRM (Alpha) environment access
 P:	Jan-Benedict Glaw
 M:	jbglaw@lug-owl.de
@@ -4229,9 +4284,10 @@
 S:	Maintained
 
 TRIVIAL PATCHES
-P:	Jesper Juhl
+P:	Jiri Kosina
 M:	trivial@kernel.org
 L:	linux-kernel@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/jikos/trivial.git
 S:	Maintained
 
 TTY LAYER
@@ -4372,7 +4428,8 @@
 P:	Luca Risolia
 M:	luca.risolia@studio.unibo.it
 L:	linux-usb@vger.kernel.org
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 W:	http://www.linux-projects.org
 S:	Maintained
 
@@ -4521,7 +4578,8 @@
 P:	Luca Risolia
 M:	luca.risolia@studio.unibo.it
 L:	linux-usb@vger.kernel.org
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 W:	http://www.linux-projects.org
 S:	Maintained
 
@@ -4550,7 +4608,8 @@
 P:	Laurent Pinchart
 M:	laurent.pinchart@skynet.be
 L:	linux-uvc-devel@lists.berlios.de (subscribers-only)
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 W:	http://linux-uvc.berlios.de
 S:	Maintained
 
@@ -4558,7 +4617,8 @@
 P:	Luca Risolia
 M:	luca.risolia@studio.unibo.it
 L:	linux-usb@vger.kernel.org
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 W:	http://www.linux-projects.org
 S:	Maintained
 
@@ -4572,7 +4632,8 @@
 P:	Luca Risolia
 M:	luca.risolia@studio.unibo.it
 L:	linux-usb@vger.kernel.org
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 W:	http://www.linux-projects.org
 S:	Maintained
 
@@ -4587,7 +4648,8 @@
 P:	Antoine Jacquet
 M:	royale@zerezo.com
 L:	linux-usb@vger.kernel.org
-L:	video4linux-list@redhat.com
+L:	linux-media@vger.kernel.org
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 W:	http://royale.zerezo.com/zr364xx/
 S:	Maintained
 
@@ -4656,10 +4718,10 @@
 VIDEO FOR LINUX (V4L)
 P:	Mauro Carvalho Chehab
 M:	mchehab@infradead.org
-M:	v4l-dvb-maintainer@linuxtv.org
+L:	linux-media@vger.kernel.org
 L:	video4linux-list@redhat.com
 W:	http://linuxtv.org
-T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git
+T:	git kernel.org:/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:	Maintained
 
 VLAN (802.1Q)
@@ -4732,6 +4794,14 @@
 L:	linux-scsi@vger.kernel.org
 S:	Maintained
 
+WIMAX STACK
+P:	Inaky Perez-Gonzalez
+M:	inaky.perez-gonzalez@intel.com
+M:	linux-wimax@intel.com
+L:	wimax@linuxwimax.org
+S:	Supported
+W:	http://linuxwimax.org
+
 WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM
 P:	David Vrabel
 M:	david.vrabel@csr.com
@@ -4787,11 +4857,11 @@
 
 XFS FILESYSTEM
 P:	Silicon Graphics Inc
-P:	Tim Shimmin
+P:	Bill O'Donnell
 M:	xfs-masters@oss.sgi.com
 L:	xfs@oss.sgi.com
 W:	http://oss.sgi.com/projects/xfs
-T:	git git://oss.sgi.com:8090/xfs/xfs-2.6.git
+T:	git://oss.sgi.com/xfs/xfs.git
 S:	Supported
 
 XILINX SYSTEMACE DRIVER
diff --git a/Makefile b/Makefile
index f900666..207303d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
-SUBLEVEL = 28
-EXTRAVERSION =
+SUBLEVEL = 29
+EXTRAVERSION = -rc2
 NAME = Erotic Pickled Herring
 
 # *DOCUMENTATION*
@@ -965,6 +965,7 @@
 	    mkdir -p include2;                                          \
 	    ln -fsn $(srctree)/include/asm-$(SRCARCH) include2/asm;     \
 	fi
+	ln -fsn $(srctree) source
 endif
 
 # prepare2 creates a makefile if using a separate output directory
@@ -1008,7 +1009,7 @@
 endef
 
 # We create the target directory of the symlink if it does
-# not exist so the test in chack-symlink works and we have a
+# not exist so the test in check-symlink works and we have a
 # directory for generated filesas used by some architectures.
 define create-symlink
 	if [ ! -L include/asm ]; then                                      \
diff --git a/arch/Kconfig b/arch/Kconfig
index 2e13aa2..550dab2 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -62,6 +62,9 @@
 	  See Documentation/unaligned-memory-access.txt for more
 	  information on the topic of unaligned memory accesses.
 
+config HAVE_SYSCALL_WRAPPERS
+	bool
+
 config KRETPROBES
 	def_bool y
 	depends on KPROBES && HAVE_KRETPROBES
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index 4dad273..b7c8f18 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -9,4 +9,3 @@
 unifdef-y += fpu.h
 unifdef-y += sysinfo.h
 unifdef-y += compiler.h
-unifdef-y += swab.h
diff --git a/arch/alpha/include/asm/byteorder.h b/arch/alpha/include/asm/byteorder.h
index 6772f31..7368309 100644
--- a/arch/alpha/include/asm/byteorder.h
+++ b/arch/alpha/include/asm/byteorder.h
@@ -1,7 +1,6 @@
 #ifndef _ALPHA_BYTEORDER_H
 #define _ALPHA_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/little_endian.h>
 
 #endif /* _ALPHA_BYTEORDER_H */
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index a86c083..fea4ea7 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -21,6 +21,7 @@
 struct pci_ops;
 struct pci_controller;
 struct _alpha_agp_info;
+struct rtc_time;
 
 struct alpha_machine_vector
 {
@@ -94,6 +95,9 @@
 
 	struct _alpha_agp_info *(*agp_info)(void);
 
+	unsigned int (*rtc_get_time)(struct rtc_time *);
+	int (*rtc_set_time)(struct rtc_time *);
+
 	const char *vector_name;
 
 	/* NUMA information */
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index fd09015..bc2a0da 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -50,7 +50,12 @@
 	free_page((unsigned long)pmd);
 }
 
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
+static inline pte_t *
+pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+	return pte;
+}
 
 static inline void
 pte_free_kernel(struct mm_struct *mm, pte_t *pte)
diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h
index 4e854b1..1f7fba6 100644
--- a/arch/alpha/include/asm/rtc.h
+++ b/arch/alpha/include/asm/rtc.h
@@ -1,9 +1,15 @@
 #ifndef _ALPHA_RTC_H
 #define _ALPHA_RTC_H
 
-/*
- * Alpha uses the default access methods for the RTC.
- */
+#if defined(CONFIG_ALPHA_GENERIC)
+# define get_rtc_time		alpha_mv.rtc_get_time
+# define set_rtc_time		alpha_mv.rtc_set_time
+#else
+# if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP)
+#  define get_rtc_time		marvel_get_rtc_time
+#  define set_rtc_time		marvel_set_rtc_time
+# endif
+#endif
 
 #include <asm-generic/rtc.h>
 
diff --git a/arch/alpha/kernel/.gitignore b/arch/alpha/kernel/.gitignore
new file mode 100644
index 0000000..c5f676c
--- /dev/null
+++ b/arch/alpha/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index 9cd8dca..e302dae 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -658,16 +658,8 @@
 		rtc_access.data = bcd2bin(b);
 		rtc_access.function = 0x48 + !write;	/* GET/PUT_TOY */
 
-#ifdef CONFIG_SMP
-		if (smp_processor_id() != boot_cpuid)
-			smp_call_function_single(boot_cpuid,
-						 __marvel_access_rtc,
-						 &rtc_access, 1);
-		else
-			__marvel_access_rtc(&rtc_access);
-#else
 		__marvel_access_rtc(&rtc_access);
-#endif
+
 		ret = bin2bcd(rtc_access.data);
 		break;
 
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index f77345b..aa2e50c 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -896,9 +896,9 @@
 .end sys_getxpid
 
 	.align	4
-	.globl	sys_pipe
-	.ent	sys_pipe
-sys_pipe:
+	.globl	sys_alpha_pipe
+	.ent	sys_alpha_pipe
+sys_alpha_pipe:
 	lda	$sp, -16($sp)
 	stq	$26, 0($sp)
 	.prologue 0
@@ -916,7 +916,7 @@
 	stq	$1, 80+16($sp)
 1:	lda	$sp, 16($sp)
 	ret
-.end sys_pipe
+.end sys_alpha_pipe
 
 	.align	4
 	.globl	sys_execve
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index 3221201..a03fbca 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -63,6 +63,8 @@
 {
 	long i;
 
+	if (NR_IRQS <= 16)
+		return;
 	for (i = 16; i < max; ++i) {
 		if (i < 64 && ((ignore_mask >> i) & 1))
 			continue;
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index 466c9df..512685f 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -40,7 +40,10 @@
 #define CAT1(x,y)  x##y
 #define CAT(x,y)   CAT1(x,y)
 
-#define DO_DEFAULT_RTC .rtc_port = 0x70
+#define DO_DEFAULT_RTC \
+	.rtc_port = 0x70, \
+	.rtc_get_time = common_get_rtc_time, \
+	.rtc_set_time = common_set_rtc_time
 
 #define DO_EV4_MMU							\
 	.max_asn =			EV4_MAX_ASN,			\
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index ff8cb63..a3b9388 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -320,24 +320,6 @@
 	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
 }
 
-/* Most Alphas have straight-forward swizzling needs.  */
-
-u8 __init
-common_swizzle(struct pci_dev *dev, u8 *pinp)
-{
-	u8 pin = *pinp;
-
-	while (dev->bus->parent) {
-		pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
-		/* Move up the chain of bridges. */
-		dev = dev->bus->self;
-        }
-	*pinp = pin;
-
-	/* The slot is the slot of the last bridge. */
-	return PCI_SLOT(dev->devfn);
-}
-
 void
 pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
 			 struct resource *res)
diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
index f8b7499..00edd04 100644
--- a/arch/alpha/kernel/pci_impl.h
+++ b/arch/alpha/kernel/pci_impl.h
@@ -106,16 +106,11 @@
  *   Where A = pin 1, B = pin 2 and so on and pin=0 = default = A.
  *   Thus, each swizzle is ((pin-1) + (device#-4)) % 4
  *
- *   The following code swizzles for exactly one bridge.  The routine
- *   common_swizzle below handles multiple bridges.  But there are a
- *   couple boards that do strange things, so we define this here.
+ *   pci_swizzle_interrupt_pin() swizzles for exactly one bridge.  The routine
+ *   pci_common_swizzle() handles multiple bridges.  But there are a
+ *   couple boards that do strange things.
  */
 
-static inline u8 bridge_swizzle(u8 pin, u8 slot) 
-{
-	return (((pin-1) + slot) % 4) + 1;
-}
-
 
 /* The following macro is used to implement the table-based irq mapping
    function for all single-bus Alphas.  */
@@ -184,7 +179,7 @@
 extern unsigned long alpha_agpgart_size;
 
 extern void common_init_pci(void);
-extern u8 common_swizzle(struct pci_dev *, u8 *);
+#define common_swizzle pci_common_swizzle
 extern struct pci_controller *alloc_pci_controller(void);
 extern struct resource *alloc_resource(void);
 
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index 708d5ca..fe14c67 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -145,6 +145,8 @@
 extern irqreturn_t timer_interrupt(int irq, void *dev);
 extern void common_init_rtc(void);
 extern unsigned long est_cycle_freq;
+extern unsigned int common_get_rtc_time(struct rtc_time *time);
+extern int common_set_rtc_time(struct rtc_time *time);
 
 /* smc37c93x.c */
 extern void SMC93x_Init(void);
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index ab44c16..9c9d1fd 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -481,7 +481,7 @@
 				slot = PCI_SLOT(dev->devfn);
 				break;
 			}
-			pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ;
+			pin = pci_swizzle_interrupt_pin(dev, pin);
 
 			/* Move up the chain of bridges.  */
 			dev = dev->bus->self;
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 7ef3b6f..baf60f3 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -204,7 +204,7 @@
 			break;
 		}
 		/* Must be a card-based bridge.  */
-		pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
+		pin = pci_swizzle_interrupt_pin(dev, pin);
 
 		/* Move up the chain of bridges.  */
 		dev = dev->bus->self;
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 2c3de97..e2516f9 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -261,6 +261,8 @@
 	.machine_check		= jensen_machine_check,
 	.max_isa_dma_address	= ALPHA_MAX_ISA_DMA_ADDRESS,
 	.rtc_port		= 0x170,
+	.rtc_get_time		= common_get_rtc_time,
+	.rtc_set_time		= common_set_rtc_time,
 
 	.nr_irqs		= 16,
 	.device_interrupt	= jensen_device_interrupt,
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 828449c..c5a1a24 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -23,6 +23,7 @@
 #include <asm/hwrpb.h>
 #include <asm/tlbflush.h>
 #include <asm/vga.h>
+#include <asm/rtc.h>
 
 #include "proto.h"
 #include "err_impl.h"
@@ -426,6 +427,57 @@
 	init_rtc_irq();
 }
 
+struct marvel_rtc_time {
+	struct rtc_time *time;
+	int retval;
+};
+
+#ifdef CONFIG_SMP
+static void
+smp_get_rtc_time(void *data)
+{
+	struct marvel_rtc_time *mrt = data;
+	mrt->retval = __get_rtc_time(mrt->time);
+}
+
+static void
+smp_set_rtc_time(void *data)
+{
+	struct marvel_rtc_time *mrt = data;
+	mrt->retval = __set_rtc_time(mrt->time);
+}
+#endif
+
+static unsigned int
+marvel_get_rtc_time(struct rtc_time *time)
+{
+#ifdef CONFIG_SMP
+	struct marvel_rtc_time mrt;
+
+	if (smp_processor_id() != boot_cpuid) {
+		mrt.time = time;
+		smp_call_function_single(boot_cpuid, smp_get_rtc_time, &mrt, 1);
+		return mrt.retval;
+	}
+#endif
+	return __get_rtc_time(time);
+}
+
+static int
+marvel_set_rtc_time(struct rtc_time *time)
+{
+#ifdef CONFIG_SMP
+	struct marvel_rtc_time mrt;
+
+	if (smp_processor_id() != boot_cpuid) {
+		mrt.time = time;
+		smp_call_function_single(boot_cpuid, smp_set_rtc_time, &mrt, 1);
+		return mrt.retval;
+	}
+#endif
+	return __set_rtc_time(time);
+}
+
 static void
 marvel_smp_callin(void)
 {
@@ -466,7 +518,9 @@
 struct alpha_machine_vector marvel_ev7_mv __initmv = {
 	.vector_name		= "MARVEL/EV7",
 	DO_EV7_MMU,
-	DO_DEFAULT_RTC,
+	.rtc_port		= 0x70,
+	.rtc_get_time		= marvel_get_rtc_time,
+	.rtc_set_time		= marvel_set_rtc_time,
 	DO_MARVEL_IO,
 	.machine_check		= marvel_machine_check,
 	.max_isa_dma_address	= ALPHA_MAX_ISA_DMA_ADDRESS,
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
index 910b43c..61ccd95 100644
--- a/arch/alpha/kernel/sys_miata.c
+++ b/arch/alpha/kernel/sys_miata.c
@@ -219,7 +219,7 @@
 				slot = PCI_SLOT(dev->devfn) + 9;
 				break;
 			}
-			pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
+			pin = pci_swizzle_interrupt_pin(dev, pin);
 
 			/* Move up the chain of bridges.  */
 			dev = dev->bus->self;
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index a7f23b5..99c0f46 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -245,6 +245,10 @@
 		IRONGATE0->pci_mem = pci_mem;
 
 	pci_bus_assign_resources(bus);
+
+	/* pci_common_swizzle() relies on bus->self being NULL
+	   for the root bus, so just clear it. */
+	bus->self = NULL;
 	pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
 }
 
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index eb2a1d6..538876b 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -257,7 +257,7 @@
 				slot = PCI_SLOT(dev->devfn) + 15;
 				break;
 			}
-			pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ;
+			pin = pci_swizzle_interrupt_pin(dev, pin);
 
 			/* Move up the chain of bridges.  */
 			dev = dev->bus->self;
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index 5b99cf3..f15a329 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -160,7 +160,7 @@
 				slot = PCI_SLOT(dev->devfn) + 10;
 				break;
 			}
-			pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
+			pin = pci_swizzle_interrupt_pin(dev, pin);
 
 			/* Move up the chain of bridges.  */
 			dev = dev->bus->self;
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index a4555f4..d232e42 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -425,7 +425,7 @@
 				slot = PCI_SLOT(dev->devfn) + 11;
 				break;
 			}
-			pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ;
+			pin = pci_swizzle_interrupt_pin(dev, pin);
 
 			/* Move up the chain of bridges.  */
 			dev = dev->bus->self;
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index ba914af..9d9e3a9 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -52,7 +52,7 @@
 	.quad sys_setpgid
 	.quad alpha_ni_syscall			/* 40 */
 	.quad sys_dup
-	.quad sys_pipe
+	.quad sys_alpha_pipe
 	.quad osf_set_program_attributes
 	.quad alpha_ni_syscall
 	.quad sys_open				/* 45 */
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index e6a2314..b04e2cb 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -46,6 +46,7 @@
 #include <asm/io.h>
 #include <asm/hwrpb.h>
 #include <asm/8253pit.h>
+#include <asm/rtc.h>
 
 #include <linux/mc146818rtc.h>
 #include <linux/time.h>
@@ -180,6 +181,15 @@
 	init_rtc_irq();
 }
 
+unsigned int common_get_rtc_time(struct rtc_time *time)
+{
+	return __get_rtc_time(time);
+}
+
+int common_set_rtc_time(struct rtc_time *time)
+{
+	return __set_rtc_time(time);
+}
 
 /* Validate a computed cycle counter result against the known bounds for
    the given processor core.  There's too much brokenness in the way of
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 234e42b..5d7a16e 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -59,13 +59,6 @@
 	return ret;
 }
 
-pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
-	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-	return pte;
-}
-
 
 /*
  * BAD_PAGE is the page that is used for page faults when linux
diff --git a/arch/arm/configs/clps7500_defconfig b/arch/arm/configs/clps7500_defconfig
deleted file mode 100644
index 49e9f9d..0000000
--- a/arch/arm/configs/clps7500_defconfig
+++ /dev/null
@@ -1,801 +0,0 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.12-rc1-bk2
-# Sun Mar 27 17:20:48 2005
-#
-CONFIG_ARM=y
-CONFIG_MMU=y
-CONFIG_UID16=y
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_GENERIC_IOMAP=y
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
-CONFIG_BROKEN_ON_SMP=y
-
-#
-# General setup
-#
-CONFIG_LOCALVERSION=""
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-# CONFIG_POSIX_MQUEUE is not set
-# CONFIG_BSD_PROCESS_ACCT is not set
-# CONFIG_SYSCTL is not set
-# CONFIG_AUDIT is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_KOBJECT_UEVENT=y
-# CONFIG_IKCONFIG is not set
-CONFIG_EMBEDDED=y
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SHMEM=y
-CONFIG_CC_ALIGN_FUNCTIONS=0
-CONFIG_CC_ALIGN_LABELS=0
-CONFIG_CC_ALIGN_LOOPS=0
-CONFIG_CC_ALIGN_JUMPS=0
-# CONFIG_TINY_SHMEM is not set
-CONFIG_BASE_SMALL=0
-
-#
-# Loadable module support
-#
-# CONFIG_MODULES is not set
-
-#
-# System Type
-#
-CONFIG_ARCH_CLPS7500=y
-# CONFIG_ARCH_CLPS711X is not set
-# CONFIG_ARCH_CO285 is not set
-# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_FOOTBRIDGE is not set
-# CONFIG_ARCH_INTEGRATOR is not set
-# CONFIG_ARCH_IOP3XX is not set
-# CONFIG_ARCH_IXP4XX is not set
-# CONFIG_ARCH_IXP2000 is not set
-# CONFIG_ARCH_L7200 is not set
-# CONFIG_ARCH_PXA is not set
-# CONFIG_ARCH_RPC is not set
-# CONFIG_ARCH_SA1100 is not set
-# CONFIG_ARCH_S3C2410 is not set
-# CONFIG_ARCH_SHARK is not set
-# CONFIG_ARCH_LH7A40X is not set
-# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_VERSATILE is not set
-# CONFIG_ARCH_IMX is not set
-# CONFIG_ARCH_H720X is not set
-
-#
-# Processor Type
-#
-CONFIG_CPU_32=y
-CONFIG_CPU_ARM710=y
-CONFIG_CPU_32v3=y
-CONFIG_CPU_CACHE_V3=y
-CONFIG_CPU_CACHE_VIVT=y
-CONFIG_CPU_COPY_V3=y
-CONFIG_CPU_TLB_V3=y
-
-#
-# Processor Features
-#
-CONFIG_TIMER_ACORN=y
-
-#
-# Bus support
-#
-CONFIG_ISA=y
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
-# CONFIG_PCCARD is not set
-
-#
-# Kernel Features
-#
-# CONFIG_PREEMPT is not set
-CONFIG_ALIGNMENT_TRAP=y
-
-#
-# Boot options
-#
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=16M root=nfs"
-# CONFIG_XIP_KERNEL is not set
-
-#
-# Floating point emulation
-#
-
-#
-# At least one emulation must be selected
-#
-# CONFIG_FPE_NWFPE is not set
-
-#
-# Userspace binary formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
-# CONFIG_ARTHUR is not set
-
-#
-# Power management options
-#
-# CONFIG_PM is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
-
-#
-# Memory Technology Devices (MTD)
-#
-CONFIG_MTD=y
-# CONFIG_MTD_DEBUG is not set
-# CONFIG_MTD_CONCAT is not set
-# CONFIG_MTD_PARTITIONS is not set
-
-#
-# User Modules And Translation Layers
-#
-# CONFIG_MTD_CHAR is not set
-# CONFIG_MTD_BLOCK is not set
-# CONFIG_MTD_BLOCK_RO is not set
-# CONFIG_FTL is not set
-# CONFIG_NFTL is not set
-# CONFIG_INFTL is not set
-
-#
-# RAM/ROM/Flash chip drivers
-#
-# CONFIG_MTD_CFI is not set
-# CONFIG_MTD_JEDECPROBE is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
-# CONFIG_MTD_RAM is not set
-# CONFIG_MTD_ROM is not set
-# CONFIG_MTD_ABSENT is not set
-
-#
-# Mapping drivers for chip access
-#
-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-
-#
-# Self-contained MTD device drivers
-#
-# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
-# CONFIG_MTD_MTDRAM is not set
-# CONFIG_MTD_BLKMTD is not set
-# CONFIG_MTD_BLOCK2MTD is not set
-
-#
-# Disk-On-Chip Device Drivers
-#
-# CONFIG_MTD_DOC2000 is not set
-# CONFIG_MTD_DOC2001 is not set
-# CONFIG_MTD_DOC2001PLUS is not set
-
-#
-# NAND Flash Device Drivers
-#
-# CONFIG_MTD_NAND is not set
-
-#
-# Parallel port support
-#
-CONFIG_PARPORT=y
-CONFIG_PARPORT_PC=y
-CONFIG_PARPORT_PC_FIFO=y
-# CONFIG_PARPORT_PC_SUPERIO is not set
-# CONFIG_PARPORT_ARC is not set
-# CONFIG_PARPORT_GSC is not set
-CONFIG_PARPORT_1284=y
-
-#
-# Plug and Play support
-#
-# CONFIG_PNP is not set
-
-#
-# Block devices
-#
-# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_DEV_XD is not set
-# CONFIG_PARIDE is not set
-# CONFIG_BLK_DEV_COW_COMMON is not set
-# CONFIG_BLK_DEV_LOOP is not set
-CONFIG_BLK_DEV_NBD=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=4096
-# CONFIG_BLK_DEV_INITRD is not set
-CONFIG_INITRAMFS_SOURCE=""
-# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_ATA_OVER_ETH is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-# CONFIG_SCSI is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
-
-#
-# IEEE 1394 (FireWire) support
-#
-
-#
-# I2O device support
-#
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-# CONFIG_PACKET is not set
-# CONFIG_NETLINK_DEV is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-# CONFIG_IP_PNP_DHCP is not set
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_TUNNEL is not set
-CONFIG_IP_TCPDIAG=y
-# CONFIG_IP_TCPDIAG_IPV6 is not set
-# CONFIG_IPV6 is not set
-# CONFIG_NETFILTER is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-# CONFIG_MII is not set
-# CONFIG_NET_VENDOR_3COM is not set
-# CONFIG_LANCE is not set
-# CONFIG_NET_VENDOR_SMC is not set
-# CONFIG_SMC91X is not set
-# CONFIG_NET_VENDOR_RACAL is not set
-# CONFIG_AT1700 is not set
-# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
-# CONFIG_NET_ISA is not set
-CONFIG_NET_PCI=y
-# CONFIG_AC3200 is not set
-# CONFIG_APRICOT is not set
-CONFIG_CS89x0=y
-# CONFIG_NET_POCKET is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-
-#
-# Ethernet (10000 Mbit)
-#
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_PLIP is not set
-CONFIG_PPP=y
-# CONFIG_PPP_MULTILINK is not set
-# CONFIG_PPP_FILTER is not set
-# CONFIG_PPP_ASYNC is not set
-# CONFIG_PPP_SYNC_TTY is not set
-# CONFIG_PPP_DEFLATE is not set
-# CONFIG_PPP_BSDCOMP is not set
-# CONFIG_PPPOE is not set
-CONFIG_SLIP=y
-CONFIG_SLIP_COMPRESSED=y
-# CONFIG_SLIP_SMART is not set
-# CONFIG_SLIP_MODE_SLIP6 is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input Device Drivers
-#
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
-# CONFIG_KEYBOARD_NEWTON is not set
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=y
-# CONFIG_MOUSE_SERIAL is not set
-# CONFIG_MOUSE_INPORT is not set
-# CONFIG_MOUSE_LOGIBM is not set
-# CONFIG_MOUSE_PC110PAD is not set
-# CONFIG_MOUSE_VSXXXAA is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Hardware I/O ports
-#
-CONFIG_SERIO=y
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_SERIO_PARKBD is not set
-CONFIG_SERIO_RPCKBD=y
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_SERIO_RAW is not set
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=4
-# CONFIG_SERIAL_8250_EXTENDED is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-CONFIG_PRINTER=y
-# CONFIG_LP_CONSOLE is not set
-# CONFIG_PPDEV is not set
-# CONFIG_TIPAR is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
-# CONFIG_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-
-#
-# Ftape, the floppy tape device driver
-#
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
-
-#
-# TPM devices
-#
-# CONFIG_TCG_TPM is not set
-
-#
-# I2C support
-#
-CONFIG_I2C=y
-# CONFIG_I2C_CHARDEV is not set
-
-#
-# I2C Algorithms
-#
-CONFIG_I2C_ALGOBIT=y
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
-
-#
-# I2C Hardware Bus support
-#
-# CONFIG_I2C_ELEKTOR is not set
-# CONFIG_I2C_PARPORT is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
-# CONFIG_I2C_PCA_ISA is not set
-
-#
-# Hardware Sensors Chip support
-#
-# CONFIG_I2C_SENSOR is not set
-# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_ADM1025 is not set
-# CONFIG_SENSORS_ADM1026 is not set
-# CONFIG_SENSORS_ADM1031 is not set
-# CONFIG_SENSORS_ASB100 is not set
-# CONFIG_SENSORS_DS1621 is not set
-# CONFIG_SENSORS_FSCHER is not set
-# CONFIG_SENSORS_FSCPOS is not set
-# CONFIG_SENSORS_GL518SM is not set
-# CONFIG_SENSORS_GL520SM is not set
-# CONFIG_SENSORS_IT87 is not set
-# CONFIG_SENSORS_LM63 is not set
-# CONFIG_SENSORS_LM75 is not set
-# CONFIG_SENSORS_LM77 is not set
-# CONFIG_SENSORS_LM78 is not set
-# CONFIG_SENSORS_LM80 is not set
-# CONFIG_SENSORS_LM83 is not set
-# CONFIG_SENSORS_LM85 is not set
-# CONFIG_SENSORS_LM87 is not set
-# CONFIG_SENSORS_LM90 is not set
-# CONFIG_SENSORS_MAX1619 is not set
-# CONFIG_SENSORS_PC87360 is not set
-# CONFIG_SENSORS_SMSC47B397 is not set
-# CONFIG_SENSORS_SMSC47M1 is not set
-# CONFIG_SENSORS_W83781D is not set
-# CONFIG_SENSORS_W83L785TS is not set
-# CONFIG_SENSORS_W83627HF is not set
-
-#
-# Other I2C Chip support
-#
-# CONFIG_SENSORS_EEPROM is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_SENSORS_RTC8564 is not set
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# Misc devices
-#
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-
-#
-# Graphics support
-#
-CONFIG_FB=y
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
-CONFIG_FB_SOFT_CURSOR=y
-# CONFIG_FB_MODE_HELPERS is not set
-# CONFIG_FB_TILEBLITTING is not set
-CONFIG_FB_ACORN=y
-# CONFIG_FB_VIRTUAL is not set
-
-#
-# Console display driver support
-#
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_MDA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-# CONFIG_FONT_6x11 is not set
-# CONFIG_FONT_PEARL_8x8 is not set
-# CONFIG_FONT_ACORN_8x8 is not set
-# CONFIG_FONT_MINI_4x6 is not set
-# CONFIG_FONT_SUN8x16 is not set
-# CONFIG_FONT_SUN12x22 is not set
-
-#
-# Logo configuration
-#
-CONFIG_LOGO=y
-CONFIG_LOGO_LINUX_MONO=y
-CONFIG_LOGO_LINUX_VGA16=y
-CONFIG_LOGO_LINUX_CLUT224=y
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# USB support
-#
-CONFIG_USB_ARCH_HAS_HCD=y
-# CONFIG_USB_ARCH_HAS_OHCI is not set
-# CONFIG_USB is not set
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# MMC/SD Card support
-#
-# CONFIG_MMC is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_JBD is not set
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-
-#
-# XFS support
-#
-# CONFIG_XFS_FS is not set
-CONFIG_MINIX_FS=y
-# CONFIG_ROMFS_FS is not set
-# CONFIG_QUOTA is not set
-CONFIG_DNOTIFY=y
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-# CONFIG_TMPFS is not set
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-# CONFIG_JFFS_FS is not set
-# CONFIG_JFFS2_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=y
-# CONFIG_NFS_V3 is not set
-# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-# CONFIG_NFSD is not set
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
-# CONFIG_RPCSEC_GSS_SPKM3 is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_LDM_PARTITION is not set
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_EFI_PARTITION is not set
-
-#
-# Native Language Support
-#
-# CONFIG_NLS is not set
-
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
-#
-# Kernel hacking
-#
-# CONFIG_PRINTK_TIME is not set
-# CONFIG_DEBUG_KERNEL is not set
-CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_FRAME_POINTER=y
-# CONFIG_DEBUG_USER is not set
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-# CONFIG_CRYPTO is not set
-
-#
-# Hardware crypto devices
-#
-
-#
-# Library routines
-#
-# CONFIG_CRC_CCITT is not set
-CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 43b0b2b..73237bd 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -1,4 +1,3 @@
 include include/asm-generic/Kbuild.asm
 
 unifdef-y += hwcap.h
-unifdef-y += swab.h
diff --git a/arch/arm/include/asm/byteorder.h b/arch/arm/include/asm/byteorder.h
index c02b6fc..7737974 100644
--- a/arch/arm/include/asm/byteorder.h
+++ b/arch/arm/include/asm/byteorder.h
@@ -15,8 +15,6 @@
 #ifndef __ASM_ARM_BYTEORDER_H
 #define __ASM_ARM_BYTEORDER_H
 
-#include <asm/swab.h>
-
 #ifdef __ARMEB__
 #include <linux/byteorder/big_endian.h>
 #else
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 32da1ae..a38bdc7 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -42,7 +42,7 @@
 /*
  * This is the standard PCI-PCI bridge swizzling algorithm.
  */
-u8 pci_std_swizzle(struct pci_dev *dev, u8 *pinp);
+#define pci_std_swizzle pci_common_swizzle
 
 /*
  * Call this with your hw_pci struct to initialise the PCI system.
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 53099d4..b561584 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -24,7 +24,6 @@
  *  modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
  */
 typedef struct {
-	struct vm_list_struct	*vmlist;
 	unsigned long		end_brk;
 } mm_context_t;
 
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 17a59b6..8096819 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -480,33 +480,6 @@
 #endif
 
 /*
- * This is the standard PCI-PCI bridge swizzling algorithm:
- *
- *   Dev: 0  1  2  3
- *    A   A  B  C  D
- *    B   B  C  D  A
- *    C   C  D  A  B
- *    D   D  A  B  C
- *        ^^^^^^^^^^ irq pin on bridge
- */
-u8 __devinit pci_std_swizzle(struct pci_dev *dev, u8 *pinp)
-{
-	int pin = *pinp - 1;
-
-	while (dev->bus->self) {
-		pin = (pin + PCI_SLOT(dev->devfn)) & 3;
-		/*
-		 * move up the chain of bridges,
-		 * swizzling as we go.
-		 */
-		dev = dev->bus->self;
-	}
-	*pinp = pin + 1;
-
-	return PCI_SLOT(dev->devfn);
-}
-
-/*
  * Swizzle the device pin each time we cross a bridge.
  * This might update pin and returns the slot number.
  */
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 09a061c..9ca8d13 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -98,7 +98,7 @@
 		CALL(sys_uselib)
 		CALL(sys_swapon)
 		CALL(sys_reboot)
-		CALL(OBSOLETE(old_readdir))	/* used by libc4 */
+		CALL(OBSOLETE(sys_old_readdir))	/* used by libc4 */
 /* 90 */	CALL(OBSOLETE(old_mmap))	/* used by libc4 */
 		CALL(sys_munmap)
 		CALL(sys_truncate)
diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c
index 50a30bc..8ac9b84 100644
--- a/arch/arm/kernel/isa.c
+++ b/arch/arm/kernel/isa.c
@@ -16,6 +16,7 @@
 #include <linux/fs.h>
 #include <linux/sysctl.h>
 #include <linux/init.h>
+#include <linux/io.h>
 
 static unsigned int isa_membase, isa_portbase, isa_portshift;
 
diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c
index 0a38c69..7337617 100644
--- a/arch/arm/mach-at91/at91cap9.c
+++ b/arch/arm/mach-at91/at91cap9.c
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/pm.h>
 
+#include <asm/irq.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 28594fc..2e9ecad 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -12,6 +12,7 @@
 
 #include <linux/module.h>
 
+#include <asm/irq.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <mach/at91rm9200.h>
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index accb69e..0894f10 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/pm.h>
 
+#include <asm/irq.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <mach/cpu.h>
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index 7b51a59..3acd7d7 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/pm.h>
 
+#include <asm/irq.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <mach/at91sam9261.h>
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index ada4b67..942792d 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/pm.h>
 
+#include <asm/irq.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <mach/at91sam9263.h>
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 252e954..211c5c1 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/pm.h>
 
+#include <asm/irq.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <mach/cpu.h>
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index 9b937ee..35e12a4 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -29,6 +29,7 @@
 #include <mach/hardware.h>
 #include <mach/board.h>
 #include <mach/gpio.h>
+#include <mach/at91sam9_smc.h>
 #include <mach/at91_shdwc.h>
 
 #include "sam9_smc.h"
diff --git a/arch/arm/mach-clps711x/edb7211-mm.c b/arch/arm/mach-clps711x/edb7211-mm.c
index c58e32e..0bea145 100644
--- a/arch/arm/mach-clps711x/edb7211-mm.c
+++ b/arch/arm/mach-clps711x/edb7211-mm.c
@@ -24,7 +24,6 @@
 
 #include <mach/hardware.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/sizes.h>
  
 #include <asm/mach/map.h>
diff --git a/arch/arm/mach-clps711x/fortunet.c b/arch/arm/mach-clps711x/fortunet.c
index 7122b3d..7430e40 100644
--- a/arch/arm/mach-clps711x/fortunet.c
+++ b/arch/arm/mach-clps711x/fortunet.c
@@ -24,7 +24,6 @@
 #include <linux/initrd.h>
 
 #include <mach/hardware.h>
-#include <asm/irq.h>
 #include <asm/setup.h>
 #include <asm/mach-types.h>
 
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 3d4b1de..808633f 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -20,6 +20,7 @@
 
 #include <mach/hardware.h>
 #include <mach/i2c.h>
+#include <mach/irqs.h>
 
 static struct resource i2c_resources[] = {
 	{
diff --git a/arch/arm/mach-davinci/include/mach/gpio.h b/arch/arm/mach-davinci/include/mach/gpio.h
index b3a2961..b456f07 100644
--- a/arch/arm/mach-davinci/include/mach/gpio.h
+++ b/arch/arm/mach-davinci/include/mach/gpio.h
@@ -16,6 +16,7 @@
 #include <linux/io.h>
 #include <asm-generic/gpio.h>
 #include <mach/hardware.h>
+#include <mach/irqs.h>
 
 /*
  * basic gpio routines
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 36ff06d..b97f529 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -28,12 +28,17 @@
 
 #include "common.h"
 
-extern void __init isa_init_irq(unsigned int irq);
-
 unsigned int mem_fclk_21285 = 50000000;
 
 EXPORT_SYMBOL(mem_fclk_21285);
 
+static void __init early_fclk(char **arg)
+{
+	mem_fclk_21285 = simple_strtoul(*arg, arg, 0);
+}
+
+__early_param("mem_fclk_21285=", early_fclk);
+
 static int __init parse_tag_memclk(const struct tag *tag)
 {
 	mem_fclk_21285 = tag->u.memclk.fmemclk;
diff --git a/arch/arm/mach-footbridge/common.h b/arch/arm/mach-footbridge/common.h
index 580e31b..b05e662 100644
--- a/arch/arm/mach-footbridge/common.h
+++ b/arch/arm/mach-footbridge/common.h
@@ -7,3 +7,4 @@
 extern void footbridge_map_io(void);
 extern void footbridge_init_irq(void);
 
+extern void isa_init_irq(unsigned int irq);
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 1330860..3ffa548 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -287,6 +287,9 @@
 	return pci_scan_bus(0, &dc21285_ops, sys);
 }
 
+#define dc21285_request_irq(_a, _b, _c, _d, _e) \
+	WARN_ON(request_irq(_a, _b, _c, _d, _e) < 0)
+
 void __init dc21285_preinit(void)
 {
 	unsigned int mem_size, mem_mask;
@@ -335,16 +338,16 @@
 	/*
 	 * We don't care if these fail.
 	 */
-	request_irq(IRQ_PCI_SERR, dc21285_serr_irq, IRQF_DISABLED,
-		    "PCI system error", &serr_timer);
-	request_irq(IRQ_PCI_PERR, dc21285_parity_irq, IRQF_DISABLED,
-		    "PCI parity error", &perr_timer);
-	request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, IRQF_DISABLED,
-		    "PCI abort", NULL);
-	request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, IRQF_DISABLED,
-		    "Discard timer", NULL);
-	request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, IRQF_DISABLED,
-		    "PCI data parity", NULL);
+	dc21285_request_irq(IRQ_PCI_SERR, dc21285_serr_irq, IRQF_DISABLED,
+			    "PCI system error", &serr_timer);
+	dc21285_request_irq(IRQ_PCI_PERR, dc21285_parity_irq, IRQF_DISABLED,
+			    "PCI parity error", &perr_timer);
+	dc21285_request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, IRQF_DISABLED,
+			    "PCI abort", NULL);
+	dc21285_request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, IRQF_DISABLED,
+			    "Discard timer", NULL);
+	dc21285_request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, IRQF_DISABLED,
+			    "PCI data parity", NULL);
 
 	if (cfn_mode) {
 		static struct resource csrio;
diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c
index 9ee80a2..8bfd06a 100644
--- a/arch/arm/mach-footbridge/isa-irq.c
+++ b/arch/arm/mach-footbridge/isa-irq.c
@@ -28,6 +28,8 @@
 #include <asm/irq.h>
 #include <asm/mach-types.h>
 
+#include "common.h"
+
 static void isa_mask_pic_lo_irq(unsigned int irq)
 {
 	unsigned int mask = 1 << (irq & 7);
diff --git a/arch/arm/mach-h720x/h7202-eval.c b/arch/arm/mach-h720x/h7202-eval.c
index 56161d5..8c0ba99 100644
--- a/arch/arm/mach-h720x/h7202-eval.c
+++ b/arch/arm/mach-h720x/h7202-eval.c
@@ -25,6 +25,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/mach/arch.h>
+#include <mach/irqs.h>
 #include <mach/hardware.h>
 #include "common.h"
 
diff --git a/arch/arm/mach-imx/clock.c b/arch/arm/mach-imx/clock.c
index 7ec60fc..cf332ae 100644
--- a/arch/arm/mach-imx/clock.c
+++ b/arch/arm/mach-imx/clock.c
@@ -23,7 +23,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 
-#include <mach/imx-regs.h>
+#include <mach/hardware.h>
 
 /*
  * Very simple approach: We can't disable clocks, so we do
diff --git a/arch/arm/mach-imx/generic.c b/arch/arm/mach-imx/generic.c
index fa72174..887cb21 100644
--- a/arch/arm/mach-imx/generic.c
+++ b/arch/arm/mach-imx/generic.c
@@ -245,11 +245,11 @@
 	imx_mmc_device.dev.platform_data = info;
 }
 
-static struct imxfb_mach_info imx_fb_info;
+static struct imx_fb_platform_data imx_fb_info;
 
-void __init set_imx_fb_info(struct imxfb_mach_info *hard_imx_fb_info)
+void __init set_imx_fb_info(struct imx_fb_platform_data *hard_imx_fb_info)
 {
-	memcpy(&imx_fb_info,hard_imx_fb_info,sizeof(struct imxfb_mach_info));
+	memcpy(&imx_fb_info,hard_imx_fb_info,sizeof(struct imx_fb_platform_data));
 }
 
 static struct resource imxfb_resources[] = {
diff --git a/arch/arm/mach-imx/include/mach/imx-regs.h b/arch/arm/mach-imx/include/mach/imx-regs.h
index fb9de27..490297f 100644
--- a/arch/arm/mach-imx/include/mach/imx-regs.h
+++ b/arch/arm/mach-imx/include/mach/imx-regs.h
@@ -373,110 +373,4 @@
 #define TSTAT_CAPT         (1<<1)
 #define TSTAT_COMP         (1<<0)
 
-/*
- * LCD Controller
- */
-
-#define LCDC_SSA	__REG(IMX_LCDC_BASE+0x00)
-
-#define LCDC_SIZE	__REG(IMX_LCDC_BASE+0x04)
-#define SIZE_XMAX(x)	((((x) >> 4) & 0x3f) << 20)
-#define SIZE_YMAX(y)    ( (y) & 0x1ff )
-
-#define LCDC_VPW	__REG(IMX_LCDC_BASE+0x08)
-#define VPW_VPW(x)	( (x) & 0x3ff )
-
-#define LCDC_CPOS	__REG(IMX_LCDC_BASE+0x0C)
-#define CPOS_CC1        (1<<31)
-#define CPOS_CC0        (1<<30)
-#define CPOS_OP         (1<<28)
-#define CPOS_CXP(x)     (((x) & 3ff) << 16)
-#define CPOS_CYP(y)     ((y) & 0x1ff)
-
-#define LCDC_LCWHB	__REG(IMX_LCDC_BASE+0x10)
-#define LCWHB_BK_EN     (1<<31)
-#define LCWHB_CW(w)     (((w) & 0x1f) << 24)
-#define LCWHB_CH(h)     (((h) & 0x1f) << 16)
-#define LCWHB_BD(x)     ((x) & 0xff)
-
-#define LCDC_LCHCC	__REG(IMX_LCDC_BASE+0x14)
-#define LCHCC_CUR_COL_R(r) (((r) & 0x1f) << 11)
-#define LCHCC_CUR_COL_G(g) (((g) & 0x3f) << 5)
-#define LCHCC_CUR_COL_B(b) ((b) & 0x1f)
-
-#define LCDC_PCR	__REG(IMX_LCDC_BASE+0x18)
-#define PCR_TFT         (1<<31)
-#define PCR_COLOR       (1<<30)
-#define PCR_PBSIZ_1     (0<<28)
-#define PCR_PBSIZ_2     (1<<28)
-#define PCR_PBSIZ_4     (2<<28)
-#define PCR_PBSIZ_8     (3<<28)
-#define PCR_BPIX_1      (0<<25)
-#define PCR_BPIX_2      (1<<25)
-#define PCR_BPIX_4      (2<<25)
-#define PCR_BPIX_8      (3<<25)
-#define PCR_BPIX_12     (4<<25)
-#define PCR_BPIX_16     (4<<25)
-#define PCR_PIXPOL      (1<<24)
-#define PCR_FLMPOL      (1<<23)
-#define PCR_LPPOL       (1<<22)
-#define PCR_CLKPOL      (1<<21)
-#define PCR_OEPOL       (1<<20)
-#define PCR_SCLKIDLE    (1<<19)
-#define PCR_END_SEL     (1<<18)
-#define PCR_END_BYTE_SWAP (1<<17)
-#define PCR_REV_VS      (1<<16)
-#define PCR_ACD_SEL     (1<<15)
-#define PCR_ACD(x)      (((x) & 0x7f) << 8)
-#define PCR_SCLK_SEL    (1<<7)
-#define PCR_SHARP       (1<<6)
-#define PCR_PCD(x)      ((x) & 0x3f)
-
-#define LCDC_HCR	__REG(IMX_LCDC_BASE+0x1C)
-#define HCR_H_WIDTH(x)  (((x) & 0x3f) << 26)
-#define HCR_H_WAIT_1(x) (((x) & 0xff) << 8)
-#define HCR_H_WAIT_2(x) ((x) & 0xff)
-
-#define LCDC_VCR	__REG(IMX_LCDC_BASE+0x20)
-#define VCR_V_WIDTH(x)  (((x) & 0x3f) << 26)
-#define VCR_V_WAIT_1(x) (((x) & 0xff) << 8)
-#define VCR_V_WAIT_2(x) ((x) & 0xff)
-
-#define LCDC_POS	__REG(IMX_LCDC_BASE+0x24)
-#define POS_POS(x)      ((x) & 1f)
-
-#define LCDC_LSCR1	__REG(IMX_LCDC_BASE+0x28)
-#define LSCR1_PS_RISE_DELAY(x)    (((x) & 0x7f) << 26)
-#define LSCR1_CLS_RISE_DELAY(x)   (((x) & 0x3f) << 16)
-#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8)
-#define LSCR1_GRAY2(x)            (((x) & 0xf) << 4)
-#define LSCR1_GRAY1(x)            (((x) & 0xf))
-
-#define LCDC_PWMR	__REG(IMX_LCDC_BASE+0x2C)
-#define PWMR_CLS(x)     (((x) & 0x1ff) << 16)
-#define PWMR_LDMSK      (1<<15)
-#define PWMR_SCR1       (1<<10)
-#define PWMR_SCR0       (1<<9)
-#define PWMR_CC_EN      (1<<8)
-#define PWMR_PW(x)      ((x) & 0xff)
-
-#define LCDC_DMACR	__REG(IMX_LCDC_BASE+0x30)
-#define DMACR_BURST     (1<<31)
-#define DMACR_HM(x)     (((x) & 0xf) << 16)
-#define DMACR_TM(x)     ((x) &0xf)
-
-#define LCDC_RMCR	__REG(IMX_LCDC_BASE+0x34)
-#define RMCR_LCDC_EN		(1<<1)
-#define RMCR_SELF_REF		(1<<0)
-
-#define LCDC_LCDICR	__REG(IMX_LCDC_BASE+0x38)
-#define LCDICR_INT_SYN  (1<<2)
-#define LCDICR_INT_CON  (1)
-
-#define LCDC_LCDISR	__REG(IMX_LCDC_BASE+0x40)
-#define LCDISR_UDR_ERR (1<<3)
-#define LCDISR_ERR_RES (1<<2)
-#define LCDISR_EOF     (1<<1)
-#define LCDISR_BOF     (1<<0)
-
 #endif				// _IMX_REGS_H
diff --git a/arch/arm/mach-integrator/pci.c b/arch/arm/mach-integrator/pci.c
index af7d3ff..2fdb954 100644
--- a/arch/arm/mach-integrator/pci.c
+++ b/arch/arm/mach-integrator/pci.c
@@ -63,13 +63,7 @@
  *
  * Where A = pin 1, B = pin 2 and so on and pin=0 = default = A.
  * Thus, each swizzle is ((pin-1) + (device#-4)) % 4
- *
- * The following code swizzles for exactly one bridge.  
  */
-static inline int bridge_swizzle(int pin, unsigned int slot) 
-{
-	return (pin + slot) & 3;
-}
 
 /*
  * This routine handles multiple bridges.
@@ -81,15 +75,14 @@
 	if (pin == 0)
 		pin = 1;
 
-	pin -= 1;
 	while (dev->bus->self) {
-		pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
+		pin = pci_swizzle_interrupt_pin(dev, pin);
 		/*
 		 * move up the chain of bridges, swizzling as we go.
 		 */
 		dev = dev->bus->self;
 	}
-	*pinp = pin + 1;
+	*pinp = pin;
 
 	return PCI_SLOT(dev->devfn);
 }
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 7b8ef97..b3404b7 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -698,6 +698,7 @@
 	printk(KERN_INFO "Kirkwood: %s, TCLK=%d.\n",
 		kirkwood_id(), kirkwood_tclk);
 	kirkwood_ge00_shared_data.t_clk = kirkwood_tclk;
+	kirkwood_ge01_shared_data.t_clk = kirkwood_tclk;
 	kirkwood_spi_plat_data.tclk = kirkwood_tclk;
 	kirkwood_uart0_data[0].uartclk = kirkwood_tclk;
 	kirkwood_uart1_data[0].uartclk = kirkwood_tclk;
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index f6b08f2..73fccac 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/mbus.h>
+#include <asm/irq.h>
 #include <asm/mach/pci.h>
 #include <plat/pcie.h>
 #include "common.h"
diff --git a/arch/arm/mach-ks8695/devices.c b/arch/arm/mach-ks8695/devices.c
index 36ab0fd..b89fb6d 100644
--- a/arch/arm/mach-ks8695/devices.c
+++ b/arch/arm/mach-ks8695/devices.c
@@ -22,6 +22,7 @@
 
 #include <linux/platform_device.h>
 
+#include <mach/irqs.h>
 #include <mach/regs-wan.h>
 #include <mach/regs-lan.h>
 #include <mach/regs-hpna.h>
diff --git a/arch/arm/mach-msm/devices.c b/arch/arm/mach-msm/devices.c
index f2a74b9..31b6b30 100644
--- a/arch/arm/mach-msm/devices.c
+++ b/arch/arm/mach-msm/devices.c
@@ -16,6 +16,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 
+#include <mach/irqs.h>
 #include <mach/msm_iomap.h>
 #include "devices.h"
 
diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c
index 430ea84..aad3a7a 100644
--- a/arch/arm/mach-mv78xx0/pcie.c
+++ b/arch/arm/mach-mv78xx0/pcie.c
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/mbus.h>
+#include <asm/irq.h>
 #include <asm/mach/pci.h>
 #include <plat/pcie.h>
 #include "common.h"
diff --git a/arch/arm/mach-mx2/devices.c b/arch/arm/mach-mx2/devices.c
index af121f5..2f9240b 100644
--- a/arch/arm/mach-mx2/devices.c
+++ b/arch/arm/mach-mx2/devices.c
@@ -32,6 +32,7 @@
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
 
+#include <mach/irqs.h>
 #include <mach/hardware.h>
 
 /*
diff --git a/arch/arm/mach-mx3/devices.c b/arch/arm/mach-mx3/devices.c
index 1d46cb4..f842880 100644
--- a/arch/arm/mach-mx3/devices.c
+++ b/arch/arm/mach-mx3/devices.c
@@ -22,6 +22,7 @@
 #include <linux/serial.h>
 #include <linux/gpio.h>
 #include <mach/hardware.h>
+#include <mach/irqs.h>
 #include <mach/imx-uart.h>
 
 static struct resource uart0[] = {
diff --git a/arch/arm/mach-netx/fb.c b/arch/arm/mach-netx/fb.c
index ea8fa88..1d844e2 100644
--- a/arch/arm/mach-netx/fb.c
+++ b/arch/arm/mach-netx/fb.c
@@ -24,6 +24,8 @@
 #include <linux/amba/clcd.h>
 #include <linux/err.h>
 
+#include <asm/irq.h>
+
 #include <mach/netx-regs.h>
 #include <mach/hardware.h>
 
diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c
index d51d627..f201fdd 100644
--- a/arch/arm/mach-netx/time.c
+++ b/arch/arm/mach-netx/time.c
@@ -163,7 +163,7 @@
 	 * Adding some safety ... */
 	netx_clockevent.min_delta_ns =
 		clockevent_delta2ns(0xa00, &netx_clockevent);
-	netx_clockevent.cpumask = cpumask_of_cpu(0);
+	netx_clockevent.cpumask = cpumask_of(0);
 	clockevents_register_device(&netx_clockevent);
 }
 
diff --git a/arch/arm/mach-netx/xc.c b/arch/arm/mach-netx/xc.c
index 8fc6205..181a78b 100644
--- a/arch/arm/mach-netx/xc.c
+++ b/arch/arm/mach-netx/xc.c
@@ -24,6 +24,7 @@
 #include <linux/io.h>
 
 #include <mach/hardware.h>
+#include <mach/irqs.h>
 #include <mach/netx-regs.h>
 
 #include <mach/xc.h>
diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c
index 7de7c69..4474da7 100644
--- a/arch/arm/mach-omap1/mcbsp.c
+++ b/arch/arm/mach-omap1/mcbsp.c
@@ -18,6 +18,7 @@
 #include <linux/platform_device.h>
 
 #include <mach/dma.h>
+#include <mach/irqs.h>
 #include <mach/mux.h>
 #include <mach/cpu.h>
 #include <mach/mcbsp.h>
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index cae3ebe..acdc709 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -18,6 +18,7 @@
 #include <linux/platform_device.h>
 
 #include <mach/dma.h>
+#include <mach/irqs.h>
 #include <mach/mux.h>
 #include <mach/cpu.h>
 #include <mach/mcbsp.h>
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index a7b7d77..d0a785a 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/mbus.h>
+#include <asm/irq.h>
 #include <asm/mach/pci.h>
 #include <plat/pcie.h>
 #include "common.h"
diff --git a/arch/arm/mach-pnx4008/gpio.c b/arch/arm/mach-pnx4008/gpio.c
index 015cc21..f219914 100644
--- a/arch/arm/mach-pnx4008/gpio.c
+++ b/arch/arm/mach-pnx4008/gpio.c
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/io.h>
+#include <mach/hardware.h>
 #include <mach/platform.h>
 #include <mach/gpio.h>
 
diff --git a/arch/arm/mach-pnx4008/i2c.c b/arch/arm/mach-pnx4008/i2c.c
index 87c0932..f3fea29 100644
--- a/arch/arm/mach-pnx4008/i2c.c
+++ b/arch/arm/mach-pnx4008/i2c.c
@@ -15,6 +15,7 @@
 #include <linux/platform_device.h>
 #include <linux/err.h>
 #include <mach/platform.h>
+#include <mach/irqs.h>
 #include <mach/i2c.h>
 
 static int set_clock_run(struct platform_device *pdev)
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index c5e28a4..a8d91b6 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -27,6 +27,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
 #include <linux/spi/corgi_lcd.h>
+#include <linux/mtd/sharpsl.h>
 #include <video/w100fb.h>
 
 #include <asm/setup.h>
@@ -542,6 +543,55 @@
 static inline void corgi_init_spi(void) {}
 #endif
 
+static struct mtd_partition sharpsl_nand_partitions[] = {
+	{
+		.name = "System Area",
+		.offset = 0,
+		.size = 7 * 1024 * 1024,
+	},
+	{
+		.name = "Root Filesystem",
+		.offset = 7 * 1024 * 1024,
+		.size = 25 * 1024 * 1024,
+	},
+	{
+		.name = "Home Filesystem",
+		.offset = MTDPART_OFS_APPEND,
+		.size = MTDPART_SIZ_FULL,
+	},
+};
+
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr sharpsl_bbt = {
+	.options = 0,
+	.offs = 4,
+	.len = 2,
+	.pattern = scan_ff_pattern
+};
+
+static struct sharpsl_nand_platform_data sharpsl_nand_platform_data = {
+	.badblock_pattern	= &sharpsl_bbt,
+	.partitions		= sharpsl_nand_partitions,
+	.nr_partitions		= ARRAY_SIZE(sharpsl_nand_partitions),
+};
+
+static struct resource sharpsl_nand_resources[] = {
+	{
+		.start	= 0x0C000000,
+		.end	= 0x0C000FFF,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device sharpsl_nand_device = {
+	.name		= "sharpsl-nand",
+	.id		= -1,
+	.resource	= sharpsl_nand_resources,
+	.num_resources	= ARRAY_SIZE(sharpsl_nand_resources),
+	.dev.platform_data	= &sharpsl_nand_platform_data,
+};
+
 static struct mtd_partition sharpsl_rom_parts[] = {
 	{
 		.name	="Boot PROM Filesystem",
@@ -577,6 +627,7 @@
 	&corgifb_device,
 	&corgikbd_device,
 	&corgiled_device,
+	&sharpsl_nand_device,
 	&sharpsl_rom_device,
 };
 
@@ -617,6 +668,9 @@
 
 	platform_scoop_config = &corgi_pcmcia_config;
 
+	if (machine_is_husky())
+		sharpsl_nand_partitions[1].size = 53 * 1024 * 1024;
+
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 }
 
diff --git a/arch/arm/mach-pxa/e350.c b/arch/arm/mach-pxa/e350.c
index 2511293..edcd9d5 100644
--- a/arch/arm/mach-pxa/e350.c
+++ b/arch/arm/mach-pxa/e350.c
@@ -20,6 +20,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 
+#include <mach/irqs.h>
 #include <mach/mfp-pxa25x.h>
 #include <mach/pxa-regs.h>
 #include <mach/hardware.h>
diff --git a/arch/arm/mach-pxa/e400.c b/arch/arm/mach-pxa/e400.c
index bed0336..77bb8e2 100644
--- a/arch/arm/mach-pxa/e400.c
+++ b/arch/arm/mach-pxa/e400.c
@@ -28,6 +28,7 @@
 #include <mach/eseries-gpio.h>
 #include <mach/pxafb.h>
 #include <mach/udc.h>
+#include <mach/irqs.h>
 
 #include "generic.h"
 #include "eseries.h"
diff --git a/arch/arm/mach-pxa/e740.c b/arch/arm/mach-pxa/e740.c
index b00d670..6d48e00 100644
--- a/arch/arm/mach-pxa/e740.c
+++ b/arch/arm/mach-pxa/e740.c
@@ -30,6 +30,7 @@
 #include <mach/eseries-gpio.h>
 #include <mach/udc.h>
 #include <mach/irda.h>
+#include <mach/irqs.h>
 
 #include "generic.h"
 #include "eseries.h"
diff --git a/arch/arm/mach-pxa/e750.c b/arch/arm/mach-pxa/e750.c
index 84d7c1a..be1ab8e 100644
--- a/arch/arm/mach-pxa/e750.c
+++ b/arch/arm/mach-pxa/e750.c
@@ -29,6 +29,7 @@
 #include <mach/eseries-gpio.h>
 #include <mach/udc.h>
 #include <mach/irda.h>
+#include <mach/irqs.h>
 
 #include "generic.h"
 #include "eseries.h"
@@ -105,6 +106,57 @@
 	.resource       = e750_fb_resources,
 };
 
+/* -------------------- e750 MFP parameters -------------------- */
+
+static unsigned long e750_pin_config[] __initdata = {
+	/* Chip selects */
+	GPIO15_nCS_1,   /* CS1 - Flash */
+	GPIO79_nCS_3,   /* CS3 - IMAGEON */
+	GPIO80_nCS_4,   /* CS4 - TMIO */
+
+	/* Clocks */
+	GPIO11_3_6MHz,
+
+	/* BTUART */
+	GPIO42_BTUART_RXD,
+	GPIO43_BTUART_TXD,
+	GPIO44_BTUART_CTS,
+
+	/* TMIO controller */
+	GPIO19_GPIO, /* t7l66xb #PCLR */
+	GPIO45_GPIO, /* t7l66xb #SUSPEND (NOT BTUART!) */
+
+	/* UDC */
+	GPIO13_GPIO,
+	GPIO3_GPIO,
+
+	/* IrDA */
+	GPIO38_GPIO | MFP_LPM_DRIVE_HIGH,
+
+	/* PC Card */
+	GPIO8_GPIO,   /* CD0 */
+	GPIO44_GPIO,  /* CD1 */
+	GPIO11_GPIO,  /* IRQ0 */
+	GPIO6_GPIO,   /* IRQ1 */
+	GPIO27_GPIO,  /* RST0 */
+	GPIO24_GPIO,  /* RST1 */
+	GPIO20_GPIO,  /* PWR0 */
+	GPIO23_GPIO,  /* PWR1 */
+	GPIO48_nPOE,
+	GPIO49_nPWE,
+	GPIO50_nPIOR,
+	GPIO51_nPIOW,
+	GPIO52_nPCE_1,
+	GPIO53_nPCE_2,
+	GPIO54_nPSKTSEL,
+	GPIO55_nPREG,
+	GPIO56_nPWAIT,
+	GPIO57_nIOIS16,
+
+	/* wakeup */
+	GPIO0_GPIO | WAKEUP_ON_EDGE_RISE,
+};
+
 /* ----------------- e750 tc6393xb parameters ------------------ */
 
 static struct tc6393xb_platform_data e750_tc6393xb_info = {
@@ -137,6 +189,7 @@
 
 static void __init e750_init(void)
 {
+	pxa2xx_mfp_config(ARRAY_AND_SIZE(e750_pin_config));
 	clk_add_alias("CLK_CK3P6MI", &e750_tc6393xb_device.dev,
 			"GPIO11_CLK", NULL),
 	eseries_get_tmio_gpios();
diff --git a/arch/arm/mach-pxa/e800.c b/arch/arm/mach-pxa/e800.c
index 9a86a42..cc9b129 100644
--- a/arch/arm/mach-pxa/e800.c
+++ b/arch/arm/mach-pxa/e800.c
@@ -28,6 +28,7 @@
 #include <mach/hardware.h>
 #include <mach/eseries-gpio.h>
 #include <mach/udc.h>
+#include <mach/irqs.h>
 
 #include "generic.h"
 #include "eseries.h"
diff --git a/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h b/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h
index b1fcd10..bcf3fb2 100644
--- a/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h
+++ b/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h
@@ -193,10 +193,8 @@
 #define CKEN_MINI_IM	48	/* < Mini-IM */
 #define CKEN_MINI_LCD	49	/* < Mini LCD */
 
-#if defined(CONFIG_CPU_PXA310)
 #define CKEN_MMC3	5	/* < MMC3 Clock Enable */
 #define CKEN_MVED	43	/* < MVED clock enable */
-#endif
 
 /* Note: GCU clock enable bit differs on PXA300/PXA310 and PXA320 */
 #define PXA300_CKEN_GRAPHICS	42	/* Graphics controller clock enable */
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index ae88855..f9093be 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -24,6 +24,7 @@
 #include <linux/gpio.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
+#include <linux/mtd/sharpsl.h>
 
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
@@ -414,6 +415,55 @@
 	.lcd_conn	= LCD_COLOR_TFT_16BPP,
 };
 
+static struct mtd_partition sharpsl_nand_partitions[] = {
+	{
+		.name = "System Area",
+		.offset = 0,
+		.size = 7 * 1024 * 1024,
+	},
+	{
+		.name = "Root Filesystem",
+		.offset = 7 * 1024 * 1024,
+		.size = 22 * 1024 * 1024,
+	},
+	{
+		.name = "Home Filesystem",
+		.offset = MTDPART_OFS_APPEND,
+		.size = MTDPART_SIZ_FULL,
+	},
+};
+
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr sharpsl_bbt = {
+	.options = 0,
+	.offs = 4,
+	.len = 2,
+	.pattern = scan_ff_pattern
+};
+
+static struct sharpsl_nand_platform_data sharpsl_nand_platform_data = {
+	.badblock_pattern	= &sharpsl_bbt,
+	.partitions		= sharpsl_nand_partitions,
+	.nr_partitions		= ARRAY_SIZE(sharpsl_nand_partitions),
+};
+
+static struct resource sharpsl_nand_resources[] = {
+	{
+		.start	= 0x0C000000,
+		.end	= 0x0C000FFF,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device sharpsl_nand_device = {
+	.name		= "sharpsl-nand",
+	.id		= -1,
+	.resource	= sharpsl_nand_resources,
+	.num_resources	= ARRAY_SIZE(sharpsl_nand_resources),
+	.dev.platform_data	= &sharpsl_nand_platform_data,
+};
+
 static struct mtd_partition sharpsl_rom_parts[] = {
 	{
 		.name	="Boot PROM Filesystem",
@@ -447,6 +497,7 @@
 static struct platform_device *devices[] __initdata = {
 	&poodle_locomo_device,
 	&poodle_scoop_device,
+	&sharpsl_nand_device,
 	&sharpsl_rom_device,
 };
 
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 7299d87..6d447c9 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -31,6 +31,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
 #include <linux/spi/corgi_lcd.h>
+#include <linux/mtd/sharpsl.h>
 
 #include <asm/setup.h>
 #include <asm/memory.h>
@@ -613,6 +614,54 @@
 	.lcd_conn	= LCD_COLOR_TFT_16BPP | LCD_ALTERNATE_MAPPING,
 };
 
+static struct mtd_partition sharpsl_nand_partitions[] = {
+	{
+		.name = "System Area",
+		.offset = 0,
+		.size = 7 * 1024 * 1024,
+	},
+	{
+		.name = "Root Filesystem",
+		.offset = 7 * 1024 * 1024,
+	},
+	{
+		.name = "Home Filesystem",
+		.offset = MTDPART_OFS_APPEND,
+		.size = MTDPART_SIZ_FULL,
+	},
+};
+
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr sharpsl_bbt = {
+	.options = 0,
+	.offs = 4,
+	.len = 2,
+	.pattern = scan_ff_pattern
+};
+
+static struct sharpsl_nand_platform_data sharpsl_nand_platform_data = {
+	.badblock_pattern	= &sharpsl_bbt,
+	.partitions		= sharpsl_nand_partitions,
+	.nr_partitions		= ARRAY_SIZE(sharpsl_nand_partitions),
+};
+
+static struct resource sharpsl_nand_resources[] = {
+	{
+		.start	= 0x0C000000,
+		.end	= 0x0C000FFF,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device sharpsl_nand_device = {
+	.name		= "sharpsl-nand",
+	.id		= -1,
+	.resource	= sharpsl_nand_resources,
+	.num_resources	= ARRAY_SIZE(sharpsl_nand_resources),
+	.dev.platform_data	= &sharpsl_nand_platform_data,
+};
+
 
 static struct mtd_partition sharpsl_rom_parts[] = {
 	{
@@ -648,6 +697,7 @@
 	&spitzscoop_device,
 	&spitzkbd_device,
 	&spitzled_device,
+	&sharpsl_nand_device,
 	&sharpsl_rom_device,
 };
 
@@ -671,6 +721,14 @@
 	pm_power_off = spitz_poweroff;
 	arm_pm_restart = spitz_restart;
 
+	if (machine_is_spitz()) {
+		sharpsl_nand_partitions[1].size = 5 * 1024 * 1024;
+	} else if (machine_is_akita()) {
+		sharpsl_nand_partitions[1].size = 58 * 1024 * 1024;
+	} else if (machine_is_borzoi()) {
+		sharpsl_nand_partitions[1].size = 32 * 1024 * 1024;
+	}
+
 	PMCR = 0x00;
 
 	/* Stop 3.6MHz and drive HIGH to PCMCIA and CS */
@@ -715,10 +773,29 @@
 	},
 };
 
+static struct nand_bbt_descr sharpsl_akita_bbt = {
+	.options = 0,
+	.offs = 4,
+	.len = 1,
+	.pattern = scan_ff_pattern
+};
+
+static struct nand_ecclayout akita_oobinfo = {
+	.eccbytes = 24,
+	.eccpos = {
+		   0x5, 0x1, 0x2, 0x3, 0x6, 0x7, 0x15, 0x11,
+		   0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23,
+		   0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37},
+	.oobfree = {{0x08, 0x09}}
+};
+
 static void __init akita_init(void)
 {
 	spitz_ficp_platform_data.transceiver_mode = akita_irda_transceiver_mode;
 
+	sharpsl_nand_platform_data.badblock_pattern = &sharpsl_akita_bbt;
+	sharpsl_nand_platform_data.ecc_layout = &akita_oobinfo;
+
 	/* We just pretend the second element of the array doesn't exist */
 	spitz_pcmcia_config.num_devs = 1;
 	platform_scoop_config = &spitz_pcmcia_config;
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index 8fce85f..ea3c755 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -12,6 +12,7 @@
 #include <linux/errno.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/jiffies.h>
 #include <linux/smp.h>
 #include <linux/io.h>
 
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio.h b/arch/arm/mach-s3c2410/include/mach/gpio.h
index e0349af..00476a5 100644
--- a/arch/arm/mach-s3c2410/include/mach/gpio.h
+++ b/arch/arm/mach-s3c2410/include/mach/gpio.h
@@ -14,6 +14,7 @@
 #define gpio_get_value	__gpio_get_value
 #define gpio_set_value	__gpio_set_value
 #define gpio_cansleep	__gpio_cansleep
+#define gpio_to_irq	__gpio_to_irq
 
 /* some boards require extra gpio capacity to support external
  * devices that need GPIO.
diff --git a/arch/arm/mach-s3c2410/include/mach/irqs.h b/arch/arm/mach-s3c2410/include/mach/irqs.h
index 9565903..49efce8 100644
--- a/arch/arm/mach-s3c2410/include/mach/irqs.h
+++ b/arch/arm/mach-s3c2410/include/mach/irqs.h
@@ -12,10 +12,6 @@
 #ifndef __ASM_ARCH_IRQS_H
 #define __ASM_ARCH_IRQS_H __FILE__
 
-#ifndef __ASM_ARM_IRQ_H
-#error "Do not include this directly, instead #include <asm/irq.h>"
-#endif
-
 /* we keep the first set of CPU IRQs out of the range of
  * the ISA space, so that the PC104 has them to itself
  * and we don't end up having to do horrible things to the
diff --git a/arch/arm/mach-s3c2440/mach-at2440evb.c b/arch/arm/mach-s3c2440/mach-at2440evb.c
index 0a6d0a5..315c42e 100644
--- a/arch/arm/mach-s3c2440/mach-at2440evb.c
+++ b/arch/arm/mach-s3c2440/mach-at2440evb.c
@@ -47,7 +47,7 @@
 #include <plat/clock.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
-#include <asm/plat-s3c24xx/mci.h>
+#include <plat/mci.h>
 
 static struct map_desc at2440evb_iodesc[] __initdata = {
 	/* Nothing here */
diff --git a/arch/arm/mach-s3c6400/include/mach/irqs.h b/arch/arm/mach-s3c6400/include/mach/irqs.h
index b38c47c..4c97f9a 100644
--- a/arch/arm/mach-s3c6400/include/mach/irqs.h
+++ b/arch/arm/mach-s3c6400/include/mach/irqs.h
@@ -11,10 +11,6 @@
 #ifndef __ASM_ARCH_IRQS_H
 #define __ASM_ARCH_IRQS_H __FILE__
 
-#ifndef __ASM_ARM_IRQ_H
-#error "Do not include this directly, instead #include <asm/irq.h>"
-#endif
-
 #include <plat/irqs.h>
 
 #endif /* __ASM_ARCH_IRQ_H */
diff --git a/arch/arm/mach-w90x900/mach-w90p910evb.c b/arch/arm/mach-w90x900/mach-w90p910evb.c
index 9307a24..9ebc93f 100644
--- a/arch/arm/mach-w90x900/mach-w90p910evb.c
+++ b/arch/arm/mach-w90x900/mach-w90p910evb.c
@@ -29,6 +29,7 @@
 #include <asm/mach-types.h>
 
 #include <mach/regs-serial.h>
+#include <mach/map.h>
 
 #include "cpu.h"
 
diff --git a/arch/arm/mach-w90x900/time.c b/arch/arm/mach-w90x900/time.c
index 3a69e38..bcc838f 100644
--- a/arch/arm/mach-w90x900/time.c
+++ b/arch/arm/mach-w90x900/time.c
@@ -28,7 +28,6 @@
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
 
-#include <mach/system.h>
 #include <mach/map.h>
 #include <mach/regs-timer.h>
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6796001..310e479 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -71,7 +71,7 @@
  * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
  * would have to initialise this each time prior to calling vm_region_alloc().
  */
-struct vm_region {
+struct arm_vm_region {
 	struct list_head	vm_list;
 	unsigned long		vm_start;
 	unsigned long		vm_end;
@@ -79,20 +79,20 @@
 	int			vm_active;
 };
 
-static struct vm_region consistent_head = {
+static struct arm_vm_region consistent_head = {
 	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list),
 	.vm_start	= CONSISTENT_BASE,
 	.vm_end		= CONSISTENT_END,
 };
 
-static struct vm_region *
-vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
+static struct arm_vm_region *
+arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp)
 {
 	unsigned long addr = head->vm_start, end = head->vm_end - size;
 	unsigned long flags;
-	struct vm_region *c, *new;
+	struct arm_vm_region *c, *new;
 
-	new = kmalloc(sizeof(struct vm_region), gfp);
+	new = kmalloc(sizeof(struct arm_vm_region), gfp);
 	if (!new)
 		goto out;
 
@@ -127,9 +127,9 @@
 	return NULL;
 }
 
-static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
+static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr)
 {
-	struct vm_region *c;
+	struct arm_vm_region *c;
 	
 	list_for_each_entry(c, &head->vm_list, vm_list) {
 		if (c->vm_active && c->vm_start == addr)
@@ -149,7 +149,7 @@
 	    pgprot_t prot)
 {
 	struct page *page;
-	struct vm_region *c;
+	struct arm_vm_region *c;
 	unsigned long order;
 	u64 mask = ISA_DMA_THRESHOLD, limit;
 
@@ -214,7 +214,7 @@
 	/*
 	 * Allocate a virtual address in the consistent mapping region.
 	 */
-	c = vm_region_alloc(&consistent_head, size,
+	c = arm_vm_region_alloc(&consistent_head, size,
 			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
 	if (c) {
 		pte_t *pte;
@@ -311,13 +311,13 @@
 		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
 {
 	unsigned long flags, user_size, kern_size;
-	struct vm_region *c;
+	struct arm_vm_region *c;
 	int ret = -ENXIO;
 
 	user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 
 	spin_lock_irqsave(&consistent_lock, flags);
-	c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
+	c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
 	spin_unlock_irqrestore(&consistent_lock, flags);
 
 	if (c) {
@@ -359,7 +359,7 @@
  */
 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
 {
-	struct vm_region *c;
+	struct arm_vm_region *c;
 	unsigned long flags, addr;
 	pte_t *ptep;
 	int idx;
@@ -378,7 +378,7 @@
 	size = PAGE_ALIGN(size);
 
 	spin_lock_irqsave(&consistent_lock, flags);
-	c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
+	c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
 	if (!c)
 		goto no_area;
 
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 4ad3bf2..195e48e 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -27,6 +27,7 @@
 EXPORT_SYMBOL(__cpuc_flush_user_all);
 EXPORT_SYMBOL(__cpuc_flush_user_range);
 EXPORT_SYMBOL(__cpuc_coherent_kern_range);
+EXPORT_SYMBOL(dmac_inv_range);  /* because of flush_ioremap_region() */
 #else
 EXPORT_SYMBOL(cpu_cache);
 #endif
diff --git a/arch/arm/plat-mxc/include/mach/usb.h b/arch/arm/plat-mxc/include/mach/usb.h
new file mode 100644
index 0000000..2dacb308
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/usb.h
@@ -0,0 +1,23 @@
+/*
+ *	Copyright (C) 2008 Darius Augulis <augulis.darius@gmail.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *	GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MXC_USB
+#define __ASM_ARCH_MXC_USB
+
+struct imxusb_platform_data {
+	int (*init)(struct device *);
+	int (*exit)(struct device *);
+};
+
+#endif /* __ASM_ARCH_MXC_USB */
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
index 89a6ab0..467531e 100644
--- a/arch/arm/plat-omap/i2c.c
+++ b/arch/arm/plat-omap/i2c.c
@@ -26,6 +26,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
+#include <mach/irqs.h>
 #include <mach/mux.h>
 
 #define OMAP_I2C_SIZE		0x3f
diff --git a/arch/arm/plat-omap/usb.c b/arch/arm/plat-omap/usb.c
index 67ca1e2..add0485 100644
--- a/arch/arm/plat-omap/usb.c
+++ b/arch/arm/plat-omap/usb.c
@@ -77,38 +77,6 @@
 
 /*-------------------------------------------------------------------------*/
 
-#if	defined(CONFIG_ARCH_OMAP_OTG) || defined(CONFIG_USB_MUSB_OTG)
-
-static struct otg_transceiver *xceiv;
-
-/**
- * otg_get_transceiver - find the (single) OTG transceiver driver
- *
- * Returns the transceiver driver, after getting a refcount to it; or
- * null if there is no such transceiver.  The caller is responsible for
- * releasing that count.
- */
-struct otg_transceiver *otg_get_transceiver(void)
-{
-	if (xceiv)
-		get_device(xceiv->dev);
-	return xceiv;
-}
-EXPORT_SYMBOL(otg_get_transceiver);
-
-int otg_set_transceiver(struct otg_transceiver *x)
-{
-	if (xceiv && x)
-		return -EBUSY;
-	xceiv = x;
-	return 0;
-}
-EXPORT_SYMBOL(otg_set_transceiver);
-
-#endif
-
-/*-------------------------------------------------------------------------*/
-
 #if defined(CONFIG_ARCH_OMAP_OTG) || defined(CONFIG_ARCH_OMAP15XX)
 
 static void omap2_usb_devconf_clear(u8 port, u32 mask)
diff --git a/arch/arm/plat-s3c/dev-fb.c b/arch/arm/plat-s3c/dev-fb.c
index 0454b8e..a90198f 100644
--- a/arch/arm/plat-s3c/dev-fb.c
+++ b/arch/arm/plat-s3c/dev-fb.c
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/fb.h>
 
+#include <mach/irqs.h>
 #include <mach/map.h>
 #include <mach/regs-fb.h>
 
diff --git a/arch/arm/plat-s3c/dev-i2c0.c b/arch/arm/plat-s3c/dev-i2c0.c
index 2c0128c..fe32707 100644
--- a/arch/arm/plat-s3c/dev-i2c0.c
+++ b/arch/arm/plat-s3c/dev-i2c0.c
@@ -15,6 +15,7 @@
 #include <linux/string.h>
 #include <linux/platform_device.h>
 
+#include <mach/irqs.h>
 #include <mach/map.h>
 
 #include <plat/regs-iic.h>
diff --git a/arch/arm/plat-s3c/dev-i2c1.c b/arch/arm/plat-s3c/dev-i2c1.c
index 9658fb0..2387fbf 100644
--- a/arch/arm/plat-s3c/dev-i2c1.c
+++ b/arch/arm/plat-s3c/dev-i2c1.c
@@ -15,6 +15,7 @@
 #include <linux/string.h>
 #include <linux/platform_device.h>
 
+#include <mach/irqs.h>
 #include <mach/map.h>
 
 #include <plat/regs-iic.h>
diff --git a/arch/arm/plat-s3c24xx/gpiolib.c b/arch/arm/plat-s3c24xx/gpiolib.c
index f95c6c9..94a341a 100644
--- a/arch/arm/plat-s3c24xx/gpiolib.c
+++ b/arch/arm/plat-s3c24xx/gpiolib.c
@@ -59,6 +59,22 @@
 	return 0;
 }
 
+static int s3c24xx_gpiolib_bankf_toirq(struct gpio_chip *chip, unsigned offset)
+{
+	if (offset < 4)
+		return IRQ_EINT0 + offset;
+	
+	if (offset < 8)
+		return IRQ_EINT4 + offset - 4;
+	
+	return -EINVAL;
+}
+
+static int s3c24xx_gpiolib_bankg_toirq(struct gpio_chip *chip, unsigned offset)
+{
+	return IRQ_EINT8 + offset;
+}
+
 struct s3c_gpio_chip s3c24xx_gpios[] = {
 	[0] = {
 		.base	= S3C24XX_GPIO_BASE(S3C2410_GPA0),
@@ -114,6 +130,7 @@
 			.owner			= THIS_MODULE,
 			.label			= "GPIOF",
 			.ngpio			= 8,
+			.to_irq			= s3c24xx_gpiolib_bankf_toirq,
 		},
 	},
 	[6] = {
@@ -123,6 +140,7 @@
 			.owner			= THIS_MODULE,
 			.label			= "GPIOG",
 			.ngpio			= 10,
+			.to_irq			= s3c24xx_gpiolib_bankg_toirq,
 		},
 	},
 };
diff --git a/arch/arm/plat-s3c24xx/pwm.c b/arch/arm/plat-s3c24xx/pwm.c
index ec56b88..0120b76 100644
--- a/arch/arm/plat-s3c24xx/pwm.c
+++ b/arch/arm/plat-s3c24xx/pwm.c
@@ -19,6 +19,8 @@
 #include <linux/io.h>
 #include <linux/pwm.h>
 
+#include <mach/irqs.h>
+
 #include <plat/devs.h>
 #include <plat/regs-timer.h>
 
diff --git a/arch/arm/plat-s3c64xx/include/plat/irqs.h b/arch/arm/plat-s3c64xx/include/plat/irqs.h
index 02e8dd4..2846f55 100644
--- a/arch/arm/plat-s3c64xx/include/plat/irqs.h
+++ b/arch/arm/plat-s3c64xx/include/plat/irqs.h
@@ -191,7 +191,7 @@
 #define IRQ_EINT_GROUP8_BASE	(IRQ_EINT_GROUP7_BASE + IRQ_EINT_GROUP7_NR)
 #define IRQ_EINT_GROUP9_BASE	(IRQ_EINT_GROUP8_BASE + IRQ_EINT_GROUP8_NR)
 
-#define IRQ_EINT_GROUP(group, no)	(IRQ_EINT_GROUP##group##__BASE + (x))
+#define IRQ_EINT_GROUP(group, no)	(IRQ_EINT_GROUP##group##_BASE + (no))
 
 /* Set the default NR_IRQS */
 
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 26eca87f..b189680 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -122,6 +122,24 @@
 	bool "ATNGW100 Network Gateway"
 	select CPU_AT32AP7000
 
+config BOARD_HAMMERHEAD
+	bool "Hammerhead board"
+	select CPU_AT32AP7000
+	select USB_ARCH_HAS_HCD
+	help
+	  The Hammerhead platform is built around a AVR32 32-bit microcontroller from Atmel.
+	  It offers versatile peripherals, such as ethernet, usb device, usb host etc.
+
+	  The board also incooperates a power supply and is a Power over Ethernet (PoE) Powered
+	  Device (PD).
+
+	  Additonally, a Cyclone III FPGA from Altera is integrated on the board. The FPGA is
+	  mapped into the 32-bit AVR memory bus. The FPGA offers two DDR2 SDRAM interfaces, which
+	  will cover even the most exceptional need of memory bandwidth. Together with the onboard
+	  video decoder the board is ready for video processing.
+
+	  For more information see: http://www.miromico.com/hammerhead
+
 config BOARD_FAVR_32
 	bool "Favr-32 LCD-board"
 	select CPU_AT32AP7000
@@ -133,6 +151,7 @@
 
 source "arch/avr32/boards/atstk1000/Kconfig"
 source "arch/avr32/boards/atngw100/Kconfig"
+source "arch/avr32/boards/hammerhead/Kconfig"
 source "arch/avr32/boards/favr-32/Kconfig"
 
 choice
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
index b088e10..f3ef3bb 100644
--- a/arch/avr32/Makefile
+++ b/arch/avr32/Makefile
@@ -33,6 +33,7 @@
 core-y					+= $(machdirs)
 core-$(CONFIG_BOARD_ATSTK1000)		+= arch/avr32/boards/atstk1000/
 core-$(CONFIG_BOARD_ATNGW100)		+= arch/avr32/boards/atngw100/
+core-$(CONFIG_BOARD_HAMMERHEAD)		+= arch/avr32/boards/hammerhead/
 core-$(CONFIG_BOARD_FAVR_32)		+= arch/avr32/boards/favr-32/
 core-$(CONFIG_BOARD_MIMC200)		+= arch/avr32/boards/mimc200/
 core-$(CONFIG_LOADER_U_BOOT)		+= arch/avr32/boot/u-boot/
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
index 32fb9ba..05d3722 100644
--- a/arch/avr32/boards/atngw100/setup.c
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -19,8 +19,8 @@
 #include <linux/types.h>
 #include <linux/leds.h>
 #include <linux/spi/spi.h>
+#include <linux/atmel-mci.h>
 
-#include <asm/atmel-mci.h>
 #include <asm/io.h>
 #include <asm/setup.h>
 
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index 5c5cdf3..1f33a10 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -16,12 +16,12 @@
 #include <linux/types.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/at73c213.h>
+#include <linux/atmel-mci.h>
 
 #include <video/atmel_lcdc.h>
 
 #include <asm/io.h>
 #include <asm/setup.h>
-#include <asm/atmel-mci.h>
 
 #include <mach/at32ap700x.h>
 #include <mach/board.h>
@@ -287,23 +287,7 @@
 	 * ATSTK1000 uses 32-bit SDRAM interface. Reserve the
 	 * SDRAM-specific pins so that nobody messes with them.
 	 */
-	at32_reserve_pin(GPIO_PIN_PE(0));	/* DATA[16]	*/
-	at32_reserve_pin(GPIO_PIN_PE(1));	/* DATA[17]	*/
-	at32_reserve_pin(GPIO_PIN_PE(2));	/* DATA[18]	*/
-	at32_reserve_pin(GPIO_PIN_PE(3));	/* DATA[19]	*/
-	at32_reserve_pin(GPIO_PIN_PE(4));	/* DATA[20]	*/
-	at32_reserve_pin(GPIO_PIN_PE(5));	/* DATA[21]	*/
-	at32_reserve_pin(GPIO_PIN_PE(6));	/* DATA[22]	*/
-	at32_reserve_pin(GPIO_PIN_PE(7));	/* DATA[23]	*/
-	at32_reserve_pin(GPIO_PIN_PE(8));	/* DATA[24]	*/
-	at32_reserve_pin(GPIO_PIN_PE(9));	/* DATA[25]	*/
-	at32_reserve_pin(GPIO_PIN_PE(10));	/* DATA[26]	*/
-	at32_reserve_pin(GPIO_PIN_PE(11));	/* DATA[27]	*/
-	at32_reserve_pin(GPIO_PIN_PE(12));	/* DATA[28]	*/
-	at32_reserve_pin(GPIO_PIN_PE(13));	/* DATA[29]	*/
-	at32_reserve_pin(GPIO_PIN_PE(14));	/* DATA[30]	*/
-	at32_reserve_pin(GPIO_PIN_PE(15));	/* DATA[31]	*/
-	at32_reserve_pin(GPIO_PIN_PE(26));	/* SDCS		*/
+	at32_reserve_pin(GPIO_PIOE_BASE, ATMEL_EBI_PE_DATA_ALL);
 
 #ifdef CONFIG_BOARD_ATSTK1006
 	smc_set_timing(&nand_config, &nand_timing);
diff --git a/arch/avr32/boards/atstk1000/atstk1003.c b/arch/avr32/boards/atstk1000/atstk1003.c
index 134b566..b3a23c8 100644
--- a/arch/avr32/boards/atstk1000/atstk1003.c
+++ b/arch/avr32/boards/atstk1000/atstk1003.c
@@ -17,9 +17,9 @@
 
 #include <linux/spi/at73c213.h>
 #include <linux/spi/spi.h>
+#include <linux/atmel-mci.h>
 
 #include <asm/setup.h>
-#include <asm/atmel-mci.h>
 
 #include <mach/at32ap700x.h>
 #include <mach/board.h>
@@ -131,23 +131,7 @@
 	 * ATSTK1000 uses 32-bit SDRAM interface. Reserve the
 	 * SDRAM-specific pins so that nobody messes with them.
 	 */
-	at32_reserve_pin(GPIO_PIN_PE(0));	/* DATA[16]	*/
-	at32_reserve_pin(GPIO_PIN_PE(1));	/* DATA[17]	*/
-	at32_reserve_pin(GPIO_PIN_PE(2));	/* DATA[18]	*/
-	at32_reserve_pin(GPIO_PIN_PE(3));	/* DATA[19]	*/
-	at32_reserve_pin(GPIO_PIN_PE(4));	/* DATA[20]	*/
-	at32_reserve_pin(GPIO_PIN_PE(5));	/* DATA[21]	*/
-	at32_reserve_pin(GPIO_PIN_PE(6));	/* DATA[22]	*/
-	at32_reserve_pin(GPIO_PIN_PE(7));	/* DATA[23]	*/
-	at32_reserve_pin(GPIO_PIN_PE(8));	/* DATA[24]	*/
-	at32_reserve_pin(GPIO_PIN_PE(9));	/* DATA[25]	*/
-	at32_reserve_pin(GPIO_PIN_PE(10));	/* DATA[26]	*/
-	at32_reserve_pin(GPIO_PIN_PE(11));	/* DATA[27]	*/
-	at32_reserve_pin(GPIO_PIN_PE(12));	/* DATA[28]	*/
-	at32_reserve_pin(GPIO_PIN_PE(13));	/* DATA[29]	*/
-	at32_reserve_pin(GPIO_PIN_PE(14));	/* DATA[30]	*/
-	at32_reserve_pin(GPIO_PIN_PE(15));	/* DATA[31]	*/
-	at32_reserve_pin(GPIO_PIN_PE(26));	/* SDCS		*/
+	at32_reserve_pin(GPIO_PIOE_BASE, ATMEL_EBI_PE_DATA_ALL);
 
 #ifdef	CONFIG_BOARD_ATSTK100X_SW2_CUSTOM
 	at32_add_device_usart(1);
diff --git a/arch/avr32/boards/atstk1000/atstk1004.c b/arch/avr32/boards/atstk1000/atstk1004.c
index cb32eb8..29b35ac 100644
--- a/arch/avr32/boards/atstk1000/atstk1004.c
+++ b/arch/avr32/boards/atstk1000/atstk1004.c
@@ -17,11 +17,11 @@
 
 #include <linux/spi/at73c213.h>
 #include <linux/spi/spi.h>
+#include <linux/atmel-mci.h>
 
 #include <video/atmel_lcdc.h>
 
 #include <asm/setup.h>
-#include <asm/atmel-mci.h>
 
 #include <mach/at32ap700x.h>
 #include <mach/board.h>
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c
index 1ee4faf..745c408 100644
--- a/arch/avr32/boards/favr-32/setup.c
+++ b/arch/avr32/boards/favr-32/setup.c
@@ -17,6 +17,7 @@
 #include <linux/linkage.h>
 #include <linux/gpio.h>
 #include <linux/leds.h>
+#include <linux/atmel-mci.h>
 #include <linux/atmel-pwm-bl.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
@@ -79,6 +80,14 @@
 	},
 };
 
+static struct mci_platform_data __initdata mci0_data = {
+	.slot[0] = {
+		.bus_width	= 4,
+		.detect_pin	= -ENODEV,
+		.wp_pin		= -ENODEV,
+	},
+};
+
 static struct fb_videomode __initdata lb104v03_modes[] = {
 	{
 		.name		= "640x480 @ 50",
@@ -307,28 +316,10 @@
 	 * Favr-32 uses 32-bit SDRAM interface. Reserve the SDRAM-specific
 	 * pins so that nobody messes with them.
 	 */
-	at32_reserve_pin(GPIO_PIN_PE(0));	/* DATA[16]	*/
-	at32_reserve_pin(GPIO_PIN_PE(1));	/* DATA[17]	*/
-	at32_reserve_pin(GPIO_PIN_PE(2));	/* DATA[18]	*/
-	at32_reserve_pin(GPIO_PIN_PE(3));	/* DATA[19]	*/
-	at32_reserve_pin(GPIO_PIN_PE(4));	/* DATA[20]	*/
-	at32_reserve_pin(GPIO_PIN_PE(5));	/* DATA[21]	*/
-	at32_reserve_pin(GPIO_PIN_PE(6));	/* DATA[22]	*/
-	at32_reserve_pin(GPIO_PIN_PE(7));	/* DATA[23]	*/
-	at32_reserve_pin(GPIO_PIN_PE(8));	/* DATA[24]	*/
-	at32_reserve_pin(GPIO_PIN_PE(9));	/* DATA[25]	*/
-	at32_reserve_pin(GPIO_PIN_PE(10));	/* DATA[26]	*/
-	at32_reserve_pin(GPIO_PIN_PE(11));	/* DATA[27]	*/
-	at32_reserve_pin(GPIO_PIN_PE(12));	/* DATA[28]	*/
-	at32_reserve_pin(GPIO_PIN_PE(13));	/* DATA[29]	*/
-	at32_reserve_pin(GPIO_PIN_PE(14));	/* DATA[30]	*/
-	at32_reserve_pin(GPIO_PIN_PE(15));	/* DATA[31]	*/
-	at32_reserve_pin(GPIO_PIN_PE(26));	/* SDCS		*/
+	at32_reserve_pin(GPIO_PIOE_BASE, ATMEL_EBI_PE_DATA_ALL);
 
 	at32_select_gpio(GPIO_PIN_PB(3), 0);	/* IRQ from ADS7843 */
 
-	at32_add_system_devices();
-
 	at32_add_device_usart(0);
 
 	set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
@@ -339,7 +330,7 @@
 
 	at32_add_device_pwm(1 << atmel_pwm_bl_pdata.pwm_channel);
 	at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info));
-	at32_add_device_mci(0, NULL);
+	at32_add_device_mci(0, &mci0_data);
 	at32_add_device_usba(0, NULL);
 	at32_add_device_lcdc(0, &favr32_lcdc_data, fbmem_start, fbmem_size, 0);
 
diff --git a/arch/avr32/boards/hammerhead/Kconfig b/arch/avr32/boards/hammerhead/Kconfig
new file mode 100644
index 0000000..fda2331
--- /dev/null
+++ b/arch/avr32/boards/hammerhead/Kconfig
@@ -0,0 +1,43 @@
+# Hammerhead customization
+
+if BOARD_HAMMERHEAD
+
+config BOARD_HAMMERHEAD_USB
+	bool "Philips ISP116x-hcd USB support"
+	help
+	  This enables USB support for Hammerheads internal ISP116x
+	  controller from Philips.
+
+	  Choose 'Y' here if you want to have your board USB driven.
+
+config BOARD_HAMMERHEAD_LCD
+	bool "Atmel AT91/AT32 LCD support"
+	help
+	  This enables LCD support for the Hammerhead board. You may
+	  also add support for framebuffer devices (AT91/AT32 LCD Controller)
+	  and framebuffer console support to get the most out of your LCD.
+
+	  Choose 'Y' here if you have ordered a Corona daugther board and
+	  want to have support for your Hantronix HDA-351T-LV LCD.
+
+config BOARD_HAMMERHEAD_SND
+	bool "Atmel AC97 Sound support"
+	help
+	  This enables Sound support for the Hammerhead board. You may
+	  also go trough the ALSA settings to get it working.
+
+	  Choose 'Y' here if you have ordered a Corona daugther board and
+	  want to make your board funky.
+
+config BOARD_HAMMERHEAD_FPGA
+	bool "Hammerhead FPGA Support"
+	default y
+	help
+	  This adds support for the Cyclone III FPGA from Altera
+	  found on Miromico's Hammerhead board.
+
+	  Choose 'Y' here if you want to have FPGA support enabled.
+	  You will have to choose the "Hammerhead FPGA Device Support" in
+	  Device Drivers->Misc to be able to use FPGA functionality.
+
+endif	# BOARD_ATNGW100
diff --git a/arch/avr32/boards/hammerhead/Makefile b/arch/avr32/boards/hammerhead/Makefile
new file mode 100644
index 0000000..c740aa1
--- /dev/null
+++ b/arch/avr32/boards/hammerhead/Makefile
@@ -0,0 +1 @@
+obj-y				+= setup.o flash.o
diff --git a/arch/avr32/boards/hammerhead/flash.c b/arch/avr32/boards/hammerhead/flash.c
new file mode 100644
index 0000000..a98c6dd
--- /dev/null
+++ b/arch/avr32/boards/hammerhead/flash.c
@@ -0,0 +1,377 @@
+/*
+ * Hammerhead board-specific flash initialization
+ *
+ * Copyright (C) 2008 Miromico AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/usb/isp116x.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <mach/portmux.h>
+#include <mach/at32ap700x.h>
+#include <mach/smc.h>
+
+#include "../../mach-at32ap/clock.h"
+#include "flash.h"
+
+
+#define HAMMERHEAD_USB_PERIPH_GCLK0	0x40000000
+#define HAMMERHEAD_USB_PERIPH_CS2	0x02000000
+#define HAMMERHEAD_USB_PERIPH_EXTINT0	0x02000000
+
+#define HAMMERHEAD_FPGA_PERIPH_MOSI	0x00000002
+#define HAMMERHEAD_FPGA_PERIPH_SCK	0x00000020
+#define HAMMERHEAD_FPGA_PERIPH_EXTINT3	0x10000000
+
+static struct smc_timing flash_timing __initdata = {
+	.ncs_read_setup		= 0,
+	.nrd_setup		= 40,
+	.ncs_write_setup	= 0,
+	.nwe_setup		= 10,
+
+	.ncs_read_pulse		= 80,
+	.nrd_pulse		= 40,
+	.ncs_write_pulse	= 65,
+	.nwe_pulse		= 55,
+
+	.read_cycle		= 120,
+	.write_cycle		= 120,
+};
+
+static struct smc_config flash_config __initdata = {
+	.bus_width		= 2,
+	.nrd_controlled		= 1,
+	.nwe_controlled		= 1,
+	.byte_write		= 1,
+};
+
+static struct mtd_partition flash_parts[] = {
+	{
+		.name		= "u-boot",
+		.offset		= 0x00000000,
+		.size		= 0x00020000,           /* 128 KiB */
+		.mask_flags	= MTD_WRITEABLE,
+	},
+	{
+		.name		= "root",
+		.offset		= 0x00020000,
+		.size		= 0x007d0000,
+	},
+	{
+		.name		= "env",
+		.offset		= 0x007f0000,
+		.size		= 0x00010000,
+		.mask_flags	= MTD_WRITEABLE,
+	},
+};
+
+static struct physmap_flash_data flash_data = {
+	.width		= 2,
+	.nr_parts	= ARRAY_SIZE(flash_parts),
+	.parts		= flash_parts,
+};
+
+static struct resource flash_resource = {
+	.start		= 0x00000000,
+	.end		= 0x007fffff,
+	.flags		= IORESOURCE_MEM,
+};
+
+static struct platform_device flash_device = {
+	.name		= "physmap-flash",
+	.id		= 0,
+	.resource	= &flash_resource,
+	.num_resources	= 1,
+	.dev		= { .platform_data = &flash_data, },
+};
+
+#ifdef CONFIG_BOARD_HAMMERHEAD_USB
+
+static struct smc_timing isp1160_timing __initdata = {
+	.ncs_read_setup		= 75,
+	.nrd_setup		= 75,
+	.ncs_write_setup	= 75,
+	.nwe_setup		= 75,
+
+
+	/* We use conservative timing settings, as the minimal settings aren't
+	   stable. There may be room for tweaking. */
+	.ncs_read_pulse		= 75,  /* min. 33ns */
+	.nrd_pulse		= 75,  /* min. 33ns */
+	.ncs_write_pulse	= 75,  /* min. 26ns */
+	.nwe_pulse		= 75,  /* min. 26ns */
+
+	.read_cycle		= 225, /* min. 143ns */
+	.write_cycle		= 225, /* min. 136ns */
+};
+
+static struct smc_config isp1160_config __initdata = {
+	.bus_width		= 2,
+	.nrd_controlled		= 1,
+	.nwe_controlled		= 1,
+	.byte_write		= 0,
+};
+
+/*
+ * The platform delay function is only used to enforce the strange
+ * read to write delay. This can not be configured in the SMC. All other
+ * timings are controlled by the SMC (see timings obove)
+ * So in isp116x-hcd.c we should comment out USE_PLATFORM_DELAY
+ */
+void isp116x_delay(struct device *dev, int delay)
+{
+	if (delay > 150)
+		ndelay(delay - 150);
+}
+
+static struct  isp116x_platform_data isp1160_data = {
+	.sel15Kres		= 1,	/* use internal downstream resistors */
+	.oc_enable		= 0,	/* external overcurrent detection */
+	.int_edge_triggered	= 0,	/* interrupt is level triggered */
+	.int_act_high		= 0,	/* interrupt is active low */
+	.delay = isp116x_delay,		/* platform delay function */
+};
+
+static struct resource isp1160_resource[] = {
+	{
+		.start		= 0x08000000,
+		.end		= 0x08000001,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= 0x08000002,
+		.end		= 0x08000003,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= 64,
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device isp1160_device = {
+	.name		= "isp116x-hcd",
+	.id		= 0,
+	.resource	= isp1160_resource,
+	.num_resources	= 3,
+	.dev		= {
+		.platform_data = &isp1160_data,
+	},
+};
+#endif
+
+#ifdef CONFIG_BOARD_HAMMERHEAD_USB
+static int __init hammerhead_usbh_init(void)
+{
+	struct clk *gclk;
+	struct clk *osc;
+
+	int ret;
+
+	/* setup smc for usbh */
+	smc_set_timing(&isp1160_config, &isp1160_timing);
+	ret = smc_set_configuration(2, &isp1160_config);
+
+	if (ret < 0) {
+		printk(KERN_ERR
+		       "hammerhead: failed to set ISP1160 USBH timing\n");
+		return ret;
+	}
+
+	/* setup gclk0 to run from osc1 */
+	gclk = clk_get(NULL, "gclk0");
+	if (IS_ERR(gclk))
+		goto err_gclk;
+
+	osc = clk_get(NULL, "osc1");
+	if (IS_ERR(osc))
+		goto err_osc;
+
+	if (clk_set_parent(gclk, osc)) {
+		pr_debug("hammerhead: failed to set osc1 for USBH clock\n");
+		goto err_set_clk;
+	}
+
+	/* set clock to 6MHz */
+	clk_set_rate(gclk, 6000000);
+
+	/* and enable */
+	clk_enable(gclk);
+
+	/* select GCLK0 peripheral function */
+	at32_select_periph(GPIO_PIOA_BASE, HAMMERHEAD_USB_PERIPH_GCLK0,
+			   GPIO_PERIPH_A, 0);
+
+	/* enable CS2 peripheral function */
+	at32_select_periph(GPIO_PIOE_BASE, HAMMERHEAD_USB_PERIPH_CS2,
+			   GPIO_PERIPH_A, 0);
+
+	/* H_WAKEUP must be driven low */
+	at32_select_gpio(GPIO_PIN_PA(8), AT32_GPIOF_OUTPUT);
+
+	/* Select EXTINT0 for PB25 */
+	at32_select_periph(GPIO_PIOB_BASE, HAMMERHEAD_USB_PERIPH_EXTINT0,
+			   GPIO_PERIPH_A, 0);
+
+	/* register usbh device driver */
+	platform_device_register(&isp1160_device);
+
+ err_set_clk:
+	clk_put(osc);
+ err_osc:
+	clk_put(gclk);
+ err_gclk:
+	return ret;
+}
+#endif
+
+#ifdef CONFIG_BOARD_HAMMERHEAD_FPGA
+static struct smc_timing fpga_timing __initdata = {
+	.ncs_read_setup		= 16,
+	.nrd_setup		= 32,
+	.ncs_read_pulse		= 48,
+	.nrd_pulse		= 32,
+	.read_cycle		= 64,
+
+	.ncs_write_setup	= 16,
+	.nwe_setup		= 16,
+	.ncs_write_pulse	= 32,
+	.nwe_pulse		= 32,
+	.write_cycle		= 64,
+};
+
+static struct smc_config fpga_config __initdata = {
+	.bus_width		= 4,
+	.nrd_controlled		= 1,
+	.nwe_controlled		= 1,
+	.byte_write		= 0,
+};
+
+static struct resource hh_fpga0_resource[] = {
+	{
+		.start		= 0xffe00400,
+		.end		= 0xffe00400 + 0x3ff,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= 4,
+		.end		= 4,
+		.flags		= IORESOURCE_IRQ,
+	},
+	{
+		.start		= 0x0c000000,
+		.end		= 0x0c000100,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= 67,
+		.end		= 67,
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static u64 hh_fpga0_dma_mask = DMA_32BIT_MASK;
+static struct platform_device hh_fpga0_device = {
+	.name		= "hh_fpga",
+	.id		= 0,
+	.dev		= {
+		.dma_mask = &hh_fpga0_dma_mask,
+		.coherent_dma_mask = DMA_32BIT_MASK,
+	},
+	.resource	= hh_fpga0_resource,
+	.num_resources	= ARRAY_SIZE(hh_fpga0_resource),
+};
+
+static struct clk hh_fpga0_spi_clk = {
+	.name		= "spi_clk",
+	.dev		= &hh_fpga0_device.dev,
+	.mode		= pba_clk_mode,
+	.get_rate	= pba_clk_get_rate,
+	.index		= 1,
+};
+
+struct platform_device *__init at32_add_device_hh_fpga(void)
+{
+	/* Select peripheral functionallity for SPI SCK and MOSI */
+	at32_select_periph(GPIO_PIOB_BASE, HAMMERHEAD_FPGA_PERIPH_SCK,
+			   GPIO_PERIPH_B, 0);
+	at32_select_periph(GPIO_PIOB_BASE, HAMMERHEAD_FPGA_PERIPH_MOSI,
+			   GPIO_PERIPH_B, 0);
+
+	/* reserve all other needed gpio
+	 * We have on board pull ups, so there is no need
+	 * to enable gpio pull ups */
+	/* INIT_DONE (input) */
+	at32_select_gpio(GPIO_PIN_PB(0), 0);
+
+	/* nSTATUS (input) */
+	at32_select_gpio(GPIO_PIN_PB(2), 0);
+
+	/* nCONFIG (output, low) */
+	at32_select_gpio(GPIO_PIN_PB(3), AT32_GPIOF_OUTPUT);
+
+	/* CONF_DONE (input) */
+	at32_select_gpio(GPIO_PIN_PB(4), 0);
+
+	/* Select EXTINT3 for PB28 (Interrupt from FPGA) */
+	at32_select_periph(GPIO_PIOB_BASE, HAMMERHEAD_FPGA_PERIPH_EXTINT3,
+			   GPIO_PERIPH_A, 0);
+
+	/* Get our parent clock */
+	hh_fpga0_spi_clk.parent = clk_get(NULL, "pba");
+	clk_put(hh_fpga0_spi_clk.parent);
+
+	/* Register clock in at32 clock tree */
+	at32_clk_register(&hh_fpga0_spi_clk);
+
+	platform_device_register(&hh_fpga0_device);
+	return &hh_fpga0_device;
+}
+#endif
+
+/* This needs to be called after the SMC has been initialized */
+static int __init hammerhead_flash_init(void)
+{
+	int ret;
+
+	smc_set_timing(&flash_config, &flash_timing);
+	ret = smc_set_configuration(0, &flash_config);
+
+	if (ret < 0) {
+		printk(KERN_ERR "hammerhead: failed to set NOR flash timing\n");
+		return ret;
+	}
+
+	platform_device_register(&flash_device);
+
+#ifdef CONFIG_BOARD_HAMMERHEAD_USB
+	hammerhead_usbh_init();
+#endif
+
+#ifdef CONFIG_BOARD_HAMMERHEAD_FPGA
+	/* Setup SMC for FPGA interface */
+	smc_set_timing(&fpga_config, &fpga_timing);
+	ret = smc_set_configuration(3, &fpga_config);
+#endif
+
+
+	if (ret < 0) {
+		printk(KERN_ERR "hammerhead: failed to set FPGA timing\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+device_initcall(hammerhead_flash_init);
diff --git a/arch/avr32/boards/hammerhead/flash.h b/arch/avr32/boards/hammerhead/flash.h
new file mode 100644
index 0000000..ea70c62
--- /dev/null
+++ b/arch/avr32/boards/hammerhead/flash.h
@@ -0,0 +1,6 @@
+#ifndef __BOARDS_HAMMERHEAD_FLASH_H
+#define __BOARDS_HAMMERHEAD_FLASH_H
+
+struct platform_device *at32_add_device_hh_fpga(void);
+
+#endif /* __BOARDS_HAMMERHEAD_FLASH_H */
diff --git a/arch/avr32/boards/hammerhead/setup.c b/arch/avr32/boards/hammerhead/setup.c
new file mode 100644
index 0000000..4d2fe82
--- /dev/null
+++ b/arch/avr32/boards/hammerhead/setup.c
@@ -0,0 +1,245 @@
+/*
+ * Board-specific setup code for the Miromico Hammerhead board
+ *
+ * Copyright (C) 2008 Miromico AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/atmel-mci.h>
+#include <linux/clk.h>
+#include <linux/fb.h>
+#include <linux/etherdevice.h>
+#include <linux/i2c.h>
+#include <linux/i2c-gpio.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/spi/spi.h>
+
+#include <video/atmel_lcdc.h>
+
+#include <linux/io.h>
+#include <asm/setup.h>
+
+#include <mach/at32ap700x.h>
+#include <mach/board.h>
+#include <mach/init.h>
+#include <mach/portmux.h>
+
+#include "../../mach-at32ap/clock.h"
+#include "flash.h"
+
+/* Oscillator frequencies. These are board-specific */
+unsigned long at32_board_osc_rates[3] = {
+	[0] = 32768,	/* 32.768 kHz on RTC osc */
+	[1] = 25000000, /* 25MHz on osc0 */
+	[2] = 12000000,	/* 12 MHz on osc1 */
+};
+
+/* Initialized by bootloader-specific startup code. */
+struct tag *bootloader_tags __initdata;
+
+#ifdef CONFIG_BOARD_HAMMERHEAD_LCD
+static struct fb_videomode __initdata hda350tlv_modes[] = {
+	{
+		.name		= "320x240 @ 75",
+		.refresh	= 75,
+		.xres		= 320,
+		.yres		= 240,
+		.pixclock	= KHZ2PICOS(6891),
+
+		.left_margin	= 48,
+		.right_margin	= 18,
+		.upper_margin	= 18,
+		.lower_margin	= 4,
+		.hsync_len	= 20,
+		.vsync_len	= 2,
+
+		.sync		= 0,
+		.vmode		= FB_VMODE_NONINTERLACED,
+	},
+};
+
+static struct fb_monspecs __initdata hammerhead_hda350t_monspecs = {
+	.manufacturer		= "HAN",
+	.monitor		= "HDA350T-LV",
+	.modedb			= hda350tlv_modes,
+	.modedb_len		= ARRAY_SIZE(hda350tlv_modes),
+	.hfmin			= 14900,
+	.hfmax			= 22350,
+	.vfmin			= 60,
+	.vfmax			= 90,
+	.dclkmax		= 10000000,
+};
+
+struct atmel_lcdfb_info __initdata hammerhead_lcdc_data = {
+	.default_bpp		= 24,
+	.default_dmacon		= ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
+	.default_lcdcon2	= (ATMEL_LCDC_DISTYPE_TFT
+				   | ATMEL_LCDC_INVCLK
+				   | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE
+				   | ATMEL_LCDC_MEMOR_BIG),
+	.default_monspecs	= &hammerhead_hda350t_monspecs,
+	.guard_time		= 2,
+};
+#endif
+
+static struct mci_platform_data __initdata mci0_data = {
+	.slot[0] = {
+		.bus_width	= 4,
+		.detect_pin	= -ENODEV,
+		.wp_pin		= -ENODEV,
+	},
+};
+
+struct eth_addr {
+	u8 addr[6];
+};
+
+static struct eth_addr __initdata hw_addr[1];
+static struct eth_platform_data __initdata eth_data[1];
+
+/*
+ * The next two functions should go away as the boot loader is
+ * supposed to initialize the macb address registers with a valid
+ * ethernet address. But we need to keep it around for a while until
+ * we can be reasonably sure the boot loader does this.
+ *
+ * The phy_id is ignored as the driver will probe for it.
+ */
+static int __init parse_tag_ethernet(struct tag *tag)
+{
+	int i = tag->u.ethernet.mac_index;
+
+	if (i < ARRAY_SIZE(hw_addr))
+		memcpy(hw_addr[i].addr, tag->u.ethernet.hw_address,
+		       sizeof(hw_addr[i].addr));
+
+	return 0;
+}
+__tagtable(ATAG_ETHERNET, parse_tag_ethernet);
+
+static void __init set_hw_addr(struct platform_device *pdev)
+{
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	const u8 *addr;
+	void __iomem *regs;
+	struct clk *pclk;
+
+	if (!res)
+		return;
+
+	if (pdev->id >= ARRAY_SIZE(hw_addr))
+		return;
+
+	addr = hw_addr[pdev->id].addr;
+
+	if (!is_valid_ether_addr(addr))
+		return;
+
+	/*
+	 * Since this is board-specific code, we'll cheat and use the
+	 * physical address directly as we happen to know that it's
+	 * the same as the virtual address.
+	 */
+	regs = (void __iomem __force *)res->start;
+	pclk = clk_get(&pdev->dev, "pclk");
+
+	if (!pclk)
+		return;
+
+	clk_enable(pclk);
+
+	__raw_writel((addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) |
+		     addr[0], regs + 0x98);
+	__raw_writel((addr[5] << 8) | addr[4], regs + 0x9c);
+
+	clk_disable(pclk);
+	clk_put(pclk);
+}
+
+void __init setup_board(void)
+{
+	at32_map_usart(1, 0);	/* USART 1: /dev/ttyS0, DB9 */
+	at32_setup_serial_console(0);
+}
+
+static struct i2c_gpio_platform_data i2c_gpio_data = {
+	.sda_pin		= GPIO_PIN_PA(6),
+	.scl_pin		= GPIO_PIN_PA(7),
+	.sda_is_open_drain	= 1,
+	.scl_is_open_drain	= 1,
+	.udelay			= 2,	/* close to 100 kHz */
+};
+
+static struct platform_device i2c_gpio_device = {
+	.name		= "i2c-gpio",
+	.id		= 0,
+	.dev		= { .platform_data = &i2c_gpio_data, },
+};
+
+static struct i2c_board_info __initdata i2c_info[] = {};
+
+#ifdef CONFIG_BOARD_HAMMERHEAD_SND
+static struct ac97c_platform_data ac97c_data = {
+	.reset_pin = GPIO_PIN_PA(16),
+};
+#endif
+
+static int __init hammerhead_init(void)
+{
+	/*
+	 * Hammerhead uses 32-bit SDRAM interface. Reserve the
+	 * SDRAM-specific pins so that nobody messes with them.
+	 */
+	at32_reserve_pin(GPIO_PIOE_BASE, ATMEL_EBI_PE_DATA_ALL);
+
+	at32_add_device_usart(0);
+
+	/* Reserve PB29 (GCLK3). This pin is used as clock source
+	 * for ETH PHY (25MHz). GCLK3 setup is done by U-Boot.
+	 */
+	at32_reserve_pin(GPIO_PIOB_BASE, (1<<29));
+
+	/*
+	 * Hammerhead uses only one ethernet port, so we don't set
+	 * address of second port
+	 */
+	set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
+
+#ifdef CONFIG_BOARD_HAMMERHEAD_FPGA
+	at32_add_device_hh_fpga();
+#endif
+	at32_add_device_mci(0, &mci0_data);
+
+#ifdef CONFIG_BOARD_HAMMERHEAD_USB
+	at32_add_device_usba(0, NULL);
+#endif
+#ifdef CONFIG_BOARD_HAMMERHEAD_LCD
+	at32_add_device_lcdc(0, &hammerhead_lcdc_data, fbmem_start,
+			     fbmem_size, ATMEL_LCDC_PRI_24BIT);
+#endif
+
+	at32_select_gpio(i2c_gpio_data.sda_pin,
+			 AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT |
+			 AT32_GPIOF_HIGH);
+	at32_select_gpio(i2c_gpio_data.scl_pin,
+			 AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT |
+			 AT32_GPIOF_HIGH);
+	platform_device_register(&i2c_gpio_device);
+	i2c_register_board_info(0, i2c_info, ARRAY_SIZE(i2c_info));
+
+#ifdef CONFIG_BOARD_HAMMERHEAD_SND
+	at32_add_device_ac97c(0, &ac97c_data);
+#endif
+
+	/* Select the Touchscreen interrupt pin mode */
+	at32_select_periph(GPIO_PIOB_BASE, 0x08000000, GPIO_PERIPH_A, 0);
+
+	return 0;
+}
+
+postcore_initcall(hammerhead_init);
diff --git a/arch/avr32/boards/mimc200/setup.c b/arch/avr32/boards/mimc200/setup.c
index 397cbb8..2b58d61 100644
--- a/arch/avr32/boards/mimc200/setup.c
+++ b/arch/avr32/boards/mimc200/setup.c
@@ -24,7 +24,7 @@
 #include <video/atmel_lcdc.h>
 #include <linux/fb.h>
 
-#include <asm/atmel-mci.h>
+#include <linux/atmel-mci.h>
 #include <linux/io.h>
 #include <asm/setup.h>
 
@@ -207,8 +207,6 @@
 	 * reserve any pins for it.
 	 */
 
-	at32_add_system_devices();
-
 	at32_add_device_usart(0);
 	at32_add_device_usart(1);
 	at32_add_device_usart(2);
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index 54152091..164e281 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -892,7 +892,7 @@
 # DMA Clients
 #
 # CONFIG_NET_DMA is not set
-CONFIG_DMATEST=m
+# CONFIG_DMATEST is not set
 # CONFIG_UIO is not set
 
 #
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index 69fce6b..c9dc648 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -964,7 +964,7 @@
 # DMA Clients
 #
 # CONFIG_NET_DMA is not set
-CONFIG_DMATEST=m
+# CONFIG_DMATEST is not set
 # CONFIG_UIO is not set
 
 #
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index 5477ed3..29ea132 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -882,7 +882,7 @@
 # DMA Clients
 #
 # CONFIG_NET_DMA is not set
-CONFIG_DMATEST=m
+# CONFIG_DMATEST is not set
 # CONFIG_UIO is not set
 
 #
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index 6c45a3b..361c31c 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -1014,7 +1014,7 @@
 # DMA Clients
 #
 # CONFIG_NET_DMA is not set
-CONFIG_DMATEST=m
+# CONFIG_DMATEST is not set
 # CONFIG_UIO is not set
 # CONFIG_STAGING is not set
 CONFIG_STAGING_EXCLUDE_BUILD=y
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
new file mode 100644
index 0000000..0d3d298
--- /dev/null
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -0,0 +1,1467 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.27
+# Tue Dec  9 15:37:30 2008
+#
+CONFIG_AVR32=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+# CONFIG_GROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_BASE_FULL is not set
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+# CONFIG_MARKERS is not set
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_CLK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=1
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_FREEZER is not set
+
+#
+# System Type and features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SUBARCH_AVR32B=y
+CONFIG_MMU=y
+CONFIG_PERFORMANCE_COUNTERS=y
+CONFIG_PLATFORM_AT32AP=y
+CONFIG_CPU_AT32AP700X=y
+CONFIG_CPU_AT32AP7000=y
+# CONFIG_BOARD_ATSTK1000 is not set
+# CONFIG_BOARD_ATNGW100 is not set
+CONFIG_BOARD_HAMMERHEAD=y
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MIMC200 is not set
+CONFIG_BOARD_HAMMERHEAD_USB=y
+CONFIG_BOARD_HAMMERHEAD_LCD=y
+CONFIG_BOARD_HAMMERHEAD_SND=y
+# CONFIG_BOARD_HAMMERHEAD_FPGA is not set
+CONFIG_LOADER_U_BOOT=y
+
+#
+# Atmel AVR32 AP options
+#
+# CONFIG_AP700X_32_BIT_SMC is not set
+CONFIG_AP700X_16_BIT_SMC=y
+# CONFIG_AP700X_8_BIT_SMC is not set
+CONFIG_LOAD_ADDRESS=0x10000000
+CONFIG_ENTRY_ADDRESS=0x90000000
+CONFIG_PHYS_OFFSET=0x10000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_QUICKLIST=y
+# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
+# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+# CONFIG_OWNERSHIP_TRACE is not set
+# CONFIG_NMI_DEBUGGING is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
+CONFIG_CMDLINE=""
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+# CONFIG_CPU_FREQ_STAT is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_AT32AP=y
+
+#
+# Bus options
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_MULTIPLE_TABLES is not set
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+# CONFIG_IP_PIMSM_V2 is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_XFRM_TUNNEL=y
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_NETFILTER_ADVANCED is not set
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+# CONFIG_NF_NAT_TFTP is not set
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x80000000
+CONFIG_MTD_PHYSMAP_LEN=0x0
+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=m
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ATMEL_PWM is not set
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_TCB_CLKSRC=y
+CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+CONFIG_MACB=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_GPIO is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_AT32PSIF is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_PDC=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ATMEL=y
+# CONFIG_SPI_BITBANG is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+CONFIG_SPI_SPIDEV=m
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT32AP700X_WDT=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_ATMEL=y
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=m
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_SEQUENCER=m
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=m
+# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
+CONFIG_HID_A4TECH=m
+CONFIG_HID_APPLE=m
+CONFIG_HID_BELKIN=m
+CONFIG_HID_BRIGHT=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_CHICONY=m
+CONFIG_HID_CYPRESS=m
+CONFIG_HID_DELL=m
+CONFIG_HID_EZKEY=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_LOGITECH=m
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MONTEREY=m
+CONFIG_HID_PANTHERLORD=m
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_THRUSTMASTER_FF=m
+CONFIG_ZEROPLUS_FF=m
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=m
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_ISP116X_HCD=m
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+CONFIG_USB_GADGET_ATMEL_USBA=y
+CONFIG_USB_ATMEL_USBA=y
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+CONFIG_MMC=m
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=m
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_ATMELMCI=m
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_AT32AP700X=y
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=m
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_REGISTER_V4 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_SAMPLES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+CONFIG_CRC7=m
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index 219822c..3136628 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -1,4 +1,3 @@
 include include/asm-generic/Kbuild.asm
 
-header-y	+= swab.h
 header-y	+= cachectl.h
diff --git a/arch/avr32/include/asm/atmel-mci.h b/arch/avr32/include/asm/atmel-mci.h
deleted file mode 100644
index 59f3fad..0000000
--- a/arch/avr32/include/asm/atmel-mci.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef __ASM_AVR32_ATMEL_MCI_H
-#define __ASM_AVR32_ATMEL_MCI_H
-
-#define ATMEL_MCI_MAX_NR_SLOTS	2
-
-struct dma_slave;
-
-/**
- * struct mci_slot_pdata - board-specific per-slot configuration
- * @bus_width: Number of data lines wired up the slot
- * @detect_pin: GPIO pin wired to the card detect switch
- * @wp_pin: GPIO pin wired to the write protect sensor
- *
- * If a given slot is not present on the board, @bus_width should be
- * set to 0. The other fields are ignored in this case.
- *
- * Any pins that aren't available should be set to a negative value.
- *
- * Note that support for multiple slots is experimental -- some cards
- * might get upset if we don't get the clock management exactly right.
- * But in most cases, it should work just fine.
- */
-struct mci_slot_pdata {
-	unsigned int		bus_width;
-	int			detect_pin;
-	int			wp_pin;
-};
-
-/**
- * struct mci_platform_data - board-specific MMC/SDcard configuration
- * @dma_slave: DMA slave interface to use in data transfers, or NULL.
- * @slot: Per-slot configuration data.
- */
-struct mci_platform_data {
-	struct dma_slave	*dma_slave;
-	struct mci_slot_pdata	slot[ATMEL_MCI_MAX_NR_SLOTS];
-};
-
-#endif /* __ASM_AVR32_ATMEL_MCI_H */
diff --git a/arch/avr32/include/asm/byteorder.h b/arch/avr32/include/asm/byteorder.h
index 2aba64b..50abc21 100644
--- a/arch/avr32/include/asm/byteorder.h
+++ b/arch/avr32/include/asm/byteorder.h
@@ -4,7 +4,6 @@
 #ifndef __ASM_AVR32_BYTEORDER_H
 #define __ASM_AVR32_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/big_endian.h>
 
 #endif /* __ASM_AVR32_BYTEORDER_H */
diff --git a/arch/avr32/include/asm/kdebug.h b/arch/avr32/include/asm/kdebug.h
index ca4f954..f930ce2 100644
--- a/arch/avr32/include/asm/kdebug.h
+++ b/arch/avr32/include/asm/kdebug.h
@@ -6,6 +6,7 @@
 	DIE_BREAKPOINT,
 	DIE_SSTEP,
 	DIE_NMI,
+	DIE_OOPS,
 };
 
 #endif /* __ASM_AVR32_KDEBUG_H */
diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h
new file mode 100644
index 0000000..483d666
--- /dev/null
+++ b/arch/avr32/include/asm/syscalls.h
@@ -0,0 +1,39 @@
+/*
+ * syscalls.h - Linux syscall interfaces (arch-specific)
+ *
+ * Copyright (c) 2008 Jaswinder Singh
+ *
+ * This file is released under the GPLv2.
+ * See the file COPYING for more details.
+ */
+
+#ifndef _ASM_AVR32_SYSCALLS_H
+#define _ASM_AVR32_SYSCALLS_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/signal.h>
+
+/* kernel/process.c */
+asmlinkage int sys_fork(struct pt_regs *);
+asmlinkage int sys_clone(unsigned long, unsigned long,
+			 unsigned long, unsigned long,
+			 struct pt_regs *);
+asmlinkage int sys_vfork(struct pt_regs *);
+asmlinkage int sys_execve(char __user *, char __user *__user *,
+			  char __user *__user *, struct pt_regs *);
+
+/* kernel/signal.c */
+asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *,
+			       struct pt_regs *);
+asmlinkage int sys_rt_sigreturn(struct pt_regs *);
+
+/* kernel/sys_avr32.c */
+asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
+			  unsigned long, unsigned long, off_t);
+
+/* mm/cache.c */
+asmlinkage int sys_cacheflush(int, void __user *, size_t);
+
+#endif /* _ASM_AVR32_SYSCALLS_H */
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index 134d530..43ae555 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -18,6 +18,7 @@
 
 #include <asm/sysreg.h>
 #include <asm/ocd.h>
+#include <asm/syscalls.h>
 
 #include <mach/pm.h>
 
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
index c5b11f9..803d7be 100644
--- a/arch/avr32/kernel/signal.c
+++ b/arch/avr32/kernel/signal.c
@@ -19,6 +19,7 @@
 
 #include <asm/uaccess.h>
 #include <asm/ucontext.h>
+#include <asm/syscalls.h>
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
diff --git a/arch/avr32/kernel/sys_avr32.c b/arch/avr32/kernel/sys_avr32.c
index 8e8911e..5d2daea 100644
--- a/arch/avr32/kernel/sys_avr32.c
+++ b/arch/avr32/kernel/sys_avr32.c
@@ -13,6 +13,7 @@
 
 #include <asm/mman.h>
 #include <asm/uaccess.h>
+#include <asm/syscalls.h>
 
 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
 			  unsigned long prot, unsigned long flags,
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 066252e..3fbfd1e 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -15,8 +15,8 @@
 #include <linux/gpio.h>
 #include <linux/spi/spi.h>
 #include <linux/usb/atmel_usba_udc.h>
+#include <linux/atmel-mci.h>
 
-#include <asm/atmel-mci.h>
 #include <asm/io.h>
 #include <asm/irq.h>
 
@@ -421,7 +421,7 @@
 	return bus_clk_get_rate(clk, shift);
 }
 
-static void pba_clk_mode(struct clk *clk, int enabled)
+void pba_clk_mode(struct clk *clk, int enabled)
 {
 	unsigned long flags;
 	u32 mask;
@@ -436,7 +436,7 @@
 	spin_unlock_irqrestore(&pm_lock, flags);
 }
 
-static unsigned long pba_clk_get_rate(struct clk *clk)
+unsigned long pba_clk_get_rate(struct clk *clk)
 {
 	unsigned long cksel, shift = 0;
 
@@ -1305,7 +1305,7 @@
 at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
 {
 	struct platform_device		*pdev;
-	struct dw_dma_slave		*dws;
+	struct dw_dma_slave		*dws = &data->dma_slave;
 	u32				pioa_mask;
 	u32				piob_mask;
 
@@ -1324,22 +1324,13 @@
 				ARRAY_SIZE(atmel_mci0_resource)))
 		goto fail;
 
-	if (data->dma_slave)
-		dws = kmemdup(to_dw_dma_slave(data->dma_slave),
-				sizeof(struct dw_dma_slave), GFP_KERNEL);
-	else
-		dws = kzalloc(sizeof(struct dw_dma_slave), GFP_KERNEL);
-
-	dws->slave.dev = &pdev->dev;
-	dws->slave.dma_dev = &dw_dmac0_device.dev;
-	dws->slave.reg_width = DMA_SLAVE_WIDTH_32BIT;
+	dws->dma_dev = &dw_dmac0_device.dev;
+	dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
 	dws->cfg_hi = (DWC_CFGH_SRC_PER(0)
 				| DWC_CFGH_DST_PER(1));
 	dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL
 				| DWC_CFGL_HS_SRC_POL);
 
-	data->dma_slave = &dws->slave;
-
 	if (platform_device_add_data(pdev, data,
 				sizeof(struct mci_platform_data)))
 		goto fail;
diff --git a/arch/avr32/mach-at32ap/clock.h b/arch/avr32/mach-at32ap/clock.h
index 623bf0e..4c7ebbd 100644
--- a/arch/avr32/mach-at32ap/clock.h
+++ b/arch/avr32/mach-at32ap/clock.h
@@ -30,3 +30,6 @@
 	u16		users;		/* Enabled if non-zero */
 	u16		index;		/* Sibling index */
 };
+
+unsigned long pba_clk_get_rate(struct clk *clk);
+void pba_clk_mode(struct clk *clk, int enabled);
diff --git a/arch/avr32/mach-at32ap/include/mach/at32ap700x.h b/arch/avr32/mach-at32ap/include/mach/at32ap700x.h
index a77d372..5c4c971 100644
--- a/arch/avr32/mach-at32ap/include/mach/at32ap700x.h
+++ b/arch/avr32/mach-at32ap/include/mach/at32ap700x.h
@@ -211,4 +211,7 @@
 
 #define ATMEL_LCDC_ALT_15BIT	(ATMEL_LCDC_CONTROL | ATMEL_LCDC_ALT_15B_DATA)
 
+/* Bitmask for all EBI data (D16..D31) pins on port E */
+#define ATMEL_EBI_PE_DATA_ALL  (0x0000FFFF)
+
 #endif /* __ASM_ARCH_AT32AP700X_H__ */
diff --git a/arch/avr32/mach-at32ap/include/mach/portmux.h b/arch/avr32/mach-at32ap/include/mach/portmux.h
index 21c7937..4873024 100644
--- a/arch/avr32/mach-at32ap/include/mach/portmux.h
+++ b/arch/avr32/mach-at32ap/include/mach/portmux.h
@@ -25,6 +25,6 @@
 			unsigned int periph, unsigned long flags);
 void at32_select_gpio(unsigned int pin, unsigned long flags);
 void at32_deselect_pin(unsigned int pin);
-void at32_reserve_pin(unsigned int pin);
+void at32_reserve_pin(unsigned int port, u32 pin_mask);
 
 #endif /* __ASM_ARCH_PORTMUX_H__ */
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index ed81a8b..09a274c 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -167,22 +167,29 @@
 }
 
 /* Reserve a pin, preventing anyone else from changing its configuration. */
-void __init at32_reserve_pin(unsigned int pin)
+void __init at32_reserve_pin(unsigned int port, u32 pin_mask)
 {
 	struct pio_device *pio;
-	unsigned int pin_index = pin & 0x1f;
 
-	pio = gpio_to_pio(pin);
+	/* assign and verify pio */
+	pio = gpio_to_pio(port);
 	if (unlikely(!pio)) {
-		printk("pio: invalid pin %u\n", pin);
+		printk(KERN_WARNING "pio: invalid port %u\n", port);
 		goto fail;
 	}
 
-	if (unlikely(test_and_set_bit(pin_index, &pio->pinmux_mask))) {
-		printk("%s: pin %u is busy\n", pio->name, pin_index);
+	/* Test if any of the requested pins is already muxed */
+	spin_lock(&pio_lock);
+	if (unlikely(pio->pinmux_mask & pin_mask)) {
+		printk(KERN_WARNING "%s: pin(s) busy (req. 0x%x, busy 0x%x)\n",
+		       pio->name, pin_mask, pio->pinmux_mask & pin_mask);
+		spin_unlock(&pio_lock);
 		goto fail;
 	}
 
+	/* Reserve pins */
+	pio->pinmux_mask |= pin_mask;
+	spin_unlock(&pio_lock);
 	return;
 
 fail:
diff --git a/arch/avr32/mm/cache.c b/arch/avr32/mm/cache.c
index 15a4e5e..24a74d1 100644
--- a/arch/avr32/mm/cache.c
+++ b/arch/avr32/mm/cache.c
@@ -13,6 +13,7 @@
 #include <asm/cachectl.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
+#include <asm/syscalls.h>
 
 /*
  * If you attempt to flush anything more than this, you need superuser
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index fa92ff6..e819fa6 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -97,7 +97,6 @@
 
 	mem_map = NODE_DATA(0)->node_mem_map;
 
-	memset(zero_page, 0, PAGE_SIZE);
 	empty_zero_page = virt_to_page(zero_page);
 	flush_dcache_page(empty_zero_page);
 }
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 29e71ed..a949c4f 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -26,6 +26,7 @@
 	default y
 	select HAVE_IDE
 	select HAVE_OPROFILE
+	select ARCH_WANT_OPTIONAL_GPIOLIB
 
 config ZONE_DMA
 	bool
@@ -59,10 +60,6 @@
 	bool
 	default y
 
-config HARDWARE_PM
-	def_bool y
-	depends on OPROFILE
-
 source "init/Kconfig"
 
 source "kernel/Kconfig.preempt"
@@ -77,6 +74,26 @@
 	prompt "CPU"
 	default BF533
 
+config BF512
+	bool "BF512"
+	help
+	  BF512 Processor Support.
+
+config BF514
+	bool "BF514"
+	help
+	  BF514 Processor Support.
+
+config BF516
+	bool "BF516"
+	help
+	  BF516 Processor Support.
+
+config BF518
+	bool "BF518"
+	help
+	  BF518 Processor Support.
+
 config BF522
 	bool "BF522"
 	help
@@ -137,6 +154,16 @@
 	help
 	  BF537 Processor Support.
 
+config BF538
+	bool "BF538"
+	help
+	  BF538 Processor Support.
+
+config BF539
+	bool "BF539"
+	help
+	  BF539 Processor Support.
+
 config BF542
 	bool "BF542"
 	help
@@ -169,28 +196,55 @@
 
 endchoice
 
+config SMP
+	depends on BF561
+	bool "Symmetric multi-processing support"
+	---help---
+	  This enables support for systems with more than one CPU,
+	  like the dual core BF561. If you have a system with only one
+	  CPU, say N. If you have a system with more than one CPU, say Y.
+
+	  If you don't know what to do here, say N.
+
+config NR_CPUS
+	int
+	depends on SMP
+	default 2 if BF561
+
+config IRQ_PER_CPU
+	bool
+	depends on SMP
+	default y
+
+config TICK_SOURCE_SYSTMR0
+	bool
+	select BFIN_GPTIMERS
+	depends on SMP
+	default y
+
 config BF_REV_MIN
 	int
-	default 0 if (BF52x || BF54x)
+	default 0 if (BF51x || BF52x || BF54x)
 	default 2 if (BF537 || BF536 || BF534)
 	default 3 if (BF561 ||BF533 || BF532 || BF531)
+	default 4 if (BF538 || BF539)
 
 config BF_REV_MAX
 	int
-	default 2 if (BF52x || BF54x)
+	default 2 if (BF51x || BF52x || BF54x)
 	default 3 if (BF537 || BF536 || BF534)
-	default 5 if (BF561)
+	default 5 if (BF561 || BF538 || BF539)
 	default 6 if (BF533 || BF532 || BF531)
 
 choice
 	prompt "Silicon Rev"
-	default BF_REV_0_1 if (BF52x || BF54x)
+	default BF_REV_0_1 if (BF51x || BF52x || BF54x)
 	default BF_REV_0_2 if (BF534 || BF536 || BF537)
 	default BF_REV_0_3 if (BF531 || BF532 || BF533 || BF561)
 
 config BF_REV_0_0
 	bool "0.0"
-	depends on (BF52x || BF54x)
+	depends on (BF51x || BF52x || BF54x)
 
 config BF_REV_0_1
 	bool "0.1"
@@ -206,11 +260,11 @@
 
 config BF_REV_0_4
 	bool "0.4"
-	depends on (BF561 || BF533 || BF532 || BF531)
+	depends on (BF561 || BF533 || BF532 || BF531 || BF538 || BF539)
 
 config BF_REV_0_5
 	bool "0.5"
-	depends on (BF561 || BF533 || BF532 || BF531)
+	depends on (BF561 || BF533 || BF532 || BF531 || BF538 || BF539)
 
 config BF_REV_0_6
 	bool "0.6"
@@ -224,6 +278,11 @@
 
 endchoice
 
+config BF51x
+	bool
+	depends on (BF512 || BF514 || BF516 || BF518)
+	default y
+
 config BF52x
 	bool
 	depends on (BF522 || BF523 || BF524 || BF525 || BF526 || BF527)
@@ -258,7 +317,7 @@
 
 config MEM_MT48LC32M8A2_75
 	bool
-	depends on (BFIN537_STAMP || PNAV10)
+	depends on (BFIN537_STAMP || PNAV10 || BFIN538_EZKIT)
 	default y
 
 config MEM_MT48LC8M32B2B5_7
@@ -271,10 +330,17 @@
 	depends on (BFIN527_EZKIT || BFIN532_IP0X || BLACKSTAMP || BFIN526_EZBRD)
 	default y
 
+config MEM_MT48LC32M8A2_75
+	bool
+	depends on (BFIN518F_EZBRD)
+	default y
+
+source "arch/blackfin/mach-bf518/Kconfig"
 source "arch/blackfin/mach-bf527/Kconfig"
 source "arch/blackfin/mach-bf533/Kconfig"
 source "arch/blackfin/mach-bf561/Kconfig"
 source "arch/blackfin/mach-bf537/Kconfig"
+source "arch/blackfin/mach-bf538/Kconfig"
 source "arch/blackfin/mach-bf548/Kconfig"
 
 menu "Board customizations"
@@ -307,6 +373,7 @@
 
 config ROM_BASE
 	hex "Kernel ROM Base"
+	depends on ROMKERNEL
 	default "0x20040000"
 	range 0x20000000 0x20400000 if !(BF54x || BF561)
 	range 0x20000000 0x30000000 if (BF54x || BF561)
@@ -318,7 +385,7 @@
 	int "Frequency of the crystal on the board in Hz"
 	default "11059200" if BFIN533_STAMP
 	default "27000000" if BFIN533_EZKIT
-	default "25000000" if (BFIN537_STAMP || BFIN527_EZKIT || H8606_HVSISTEMAS || BLACKSTAMP || BFIN526_EZBRD)
+	default "25000000" if (BFIN537_STAMP || BFIN527_EZKIT || H8606_HVSISTEMAS || BLACKSTAMP || BFIN526_EZBRD || BFIN538_EZKIT || BFIN518F-EZBRD)
 	default "30000000" if BFIN561_EZKIT
 	default "24576000" if PNAV10
 	default "10000000" if BFIN532_IP0X
@@ -354,11 +421,11 @@
 	range 1 64
 	default "22" if BFIN533_EZKIT
 	default "45" if BFIN533_STAMP
-	default "20" if (BFIN537_STAMP || BFIN527_EZKIT || BFIN548_EZKIT || BFIN548_BLUETECHNIX_CM)
+	default "20" if (BFIN537_STAMP || BFIN527_EZKIT || BFIN548_EZKIT || BFIN548_BLUETECHNIX_CM || BFIN538_EZKIT)
 	default "22" if BFIN533_BLUETECHNIX_CM
 	default "20" if (BFIN537_BLUETECHNIX_CM || BFIN527_BLUETECHNIX_CM || BFIN561_BLUETECHNIX_CM)
 	default "20" if BFIN561_EZKIT
-	default "16" if (H8606_HVSISTEMAS || BLACKSTAMP || BFIN526_EZBRD)
+	default "16" if (H8606_HVSISTEMAS || BLACKSTAMP || BFIN526_EZBRD || BFIN518F_EZBRD)
 	help
 	  This controls the frequency of the on-chip PLL. This can be between 1 and 64.
 	  PLL Frequency = (Crystal Frequency) * (this setting)
@@ -407,19 +474,70 @@
 	bool "MT46V32M16_5B"
 endchoice
 
-config MAX_MEM_SIZE
-	int "Max SDRAM Memory Size in MBytes"
-	depends on !MPU
-	default 512
+choice
+	prompt "DDR/SDRAM Timing"
+	depends on BFIN_KERNEL_CLOCK
+	default BFIN_KERNEL_CLOCK_MEMINIT_CALC
 	help
-	  This is the max memory size that the kernel will create CPLB
-	  tables for.  Your system will not be able to handle any more.
+	  This option allows you to specify Blackfin SDRAM/DDR Timing parameters
+	  The calculated SDRAM timing parameters may not be 100%
+	  accurate - This option is therefore marked experimental.
+
+config BFIN_KERNEL_CLOCK_MEMINIT_CALC
+	bool "Calculate Timings (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+
+config BFIN_KERNEL_CLOCK_MEMINIT_SPEC
+	bool "Provide accurate Timings based on target SCLK"
+	help
+	  Please consult the Blackfin Hardware Reference Manuals as well
+	  as the memory device datasheet.
+	  http://docs.blackfin.uclinux.org/doku.php?id=bfin:sdram
+endchoice
+
+menu "Memory Init Control"
+	depends on BFIN_KERNEL_CLOCK_MEMINIT_SPEC
+
+config MEM_DDRCTL0
+	depends on BF54x
+	hex "DDRCTL0"
+	default 0x0
+
+config MEM_DDRCTL1
+	depends on BF54x
+	hex "DDRCTL1"
+	default 0x0
+
+config MEM_DDRCTL2
+	depends on BF54x
+	hex "DDRCTL2"
+	default 0x0
+
+config MEM_EBIU_DDRQUE
+	depends on BF54x
+	hex "DDRQUE"
+	default 0x0
+
+config MEM_SDRRC
+	depends on !BF54x
+	hex "SDRRC"
+	default 0x0
+
+config MEM_SDGCTL
+	depends on !BF54x
+	hex "SDGCTL"
+	default 0x0
+endmenu
 
 #
 # Max & Min Speeds for various Chips
 #
 config MAX_VCO_HZ
 	int
+	default 400000000 if BF512
+	default 400000000 if BF514
+	default 400000000 if BF516
+	default 400000000 if BF518
 	default 600000000 if BF522
 	default 400000000 if BF523
 	default 400000000 if BF524
@@ -459,6 +577,7 @@
 
 config GENERIC_TIME
 	bool "Generic time"
+	depends on !SMP
 	default y
 
 config GENERIC_CLOCKEVENTS
@@ -533,6 +652,7 @@
 
 
 menu "Blackfin Kernel Optimizations"
+	depends on !SMP
 
 comment "Memory Optimizations"
 
@@ -655,6 +775,17 @@
 
 	  Currently only works with FLAT binaries.
 
+config EXCEPTION_L1_SCRATCH
+	bool "Locate exception stack in L1 Scratch Memory"
+	default n
+	depends on !APP_STACK_L1 && !SYSCALL_TAB_L1
+	help
+	  Whenever an exception occurs, use the L1 Scratch memory for
+	  stack storage.  You cannot place the stacks of FLAT binaries
+	  in L1 when using this option.
+
+	  If you don't use L1 Scratch, then you should say Y here.
+
 comment "Speed Optimizations"
 config BFIN_INS_LOWOVERHEAD
 	bool "ins[bwl] low overhead, higher interrupt latency"
@@ -684,7 +815,6 @@
 
 endmenu
 
-
 choice
 	prompt "Kernel executes from"
 	help
@@ -714,17 +844,9 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called gptimers.ko.
 
-config BFIN_DMA_5XX
-	bool "Enable DMA Support"
-	depends on (BF52x || BF53x || BF561 || BF54x)
-	default y
-	help
-	  DMA driver for BF5xx.
-
 choice
-	prompt "Uncached SDRAM region"
+	prompt "Uncached DMA region"
 	default DMA_UNCACHED_1M
-	depends on BFIN_DMA_5XX
 config DMA_UNCACHED_4M
 	bool "Enable 4M DMA region"
 config DMA_UNCACHED_2M
@@ -751,9 +873,11 @@
 choice
 	prompt "Policy"
 	depends on BFIN_DCACHE
-	default BFIN_WB
+	default BFIN_WB if !SMP
+	default BFIN_WT if SMP
 config BFIN_WB
 	bool "Write back"
+	depends on !SMP
 	help
 	  Write Back Policy:
 	    Cached data will be written back to SDRAM only when needed.
@@ -790,7 +914,7 @@
 
 config BFIN_L2_CACHEABLE
 	bool "Cache L2 SRAM"
-	depends on (BFIN_DCACHE || BFIN_ICACHE) && (BF54x || BF561)
+	depends on (BFIN_DCACHE || BFIN_ICACHE) && (BF54x || (BF561 && !SMP))
 	default n
 	help
 	  Select to make L2 SRAM cacheable in L1 data and instruction cache.
@@ -980,7 +1104,7 @@
 	int "GPIO number"
 	range 0 47
 	depends on PM_WAKEUP_BY_GPIO
-	default 2 if BFIN537_STAMP
+	default 2
 
 choice
 	prompt "GPIO Polarity"
@@ -1003,7 +1127,7 @@
 
 config PM_BFIN_WAKE_PH6
 	bool "Allow Wake-Up from on-chip PHY or PH6 GP"
-	depends on PM && (BF52x || BF534 || BF536 || BF537)
+	depends on PM && (BF51x || BF52x || BF534 || BF536 || BF537)
 	default n
 	help
 	  Enable PHY and PH6 GP Wake-Up (Voltage Regulator Power-Up)
@@ -1020,15 +1144,21 @@
 
 source "drivers/cpufreq/Kconfig"
 
+config BFIN_CPU_FREQ
+	bool
+	depends on CPU_FREQ
+	select CPU_FREQ_TABLE
+	default y
+
 config CPU_VOLTAGE
 	bool "CPU Voltage scaling"
-	depends on EXPERIMENTAL	
+	depends on EXPERIMENTAL
 	depends on CPU_FREQ
 	default n
 	help
 	  Say Y here if you want CPU voltage scaling according to the CPU frequency.
 	  This option violates the PLL BYPASS recommendation in the Blackfin Processor
-	  manuals. There is a theoretical risk that during VDDINT transitions 
+	  manuals. There is a theoretical risk that during VDDINT transitions
 	  the PLL may unlock.
 
 endmenu
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index 3ad2598..5f981d9 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -2,8 +2,30 @@
 
 source "lib/Kconfig.debug"
 
+config DEBUG_STACKOVERFLOW
+	bool "Check for stack overflows"
+	depends on DEBUG_KERNEL
+	help
+	  This option will cause messages to be printed if free stack space
+	  drops below a certain limit.
+
+config DEBUG_STACK_USAGE
+	bool "Enable stack utilization instrumentation"
+	depends on DEBUG_KERNEL
+	help
+	  Enables the display of the minimum amount of free stack which each
+	  task has ever had available in the sysrq-T output.
+
+	  This option will slow down process creation somewhat.
+
 config HAVE_ARCH_KGDB
-       def_bool y
+	def_bool y
+
+config KGDB_TESTCASE
+	tristate "KGDB: for test case in expect"
+	default n
+	help
+	  This is a kgdb test case for automated testing.
 
 config DEBUG_VERBOSE
 	bool "Verbose fault messages"
@@ -182,11 +204,11 @@
 	  4 for (2^4) 16k, or 4096 entries
 
 config DEBUG_BFIN_NO_KERN_HWTRACE
-	bool "Trace user apps (turn off hwtrace in kernel)"
+	bool "Turn off hwtrace in CPLB handlers"
 	depends on DEBUG_BFIN_HWTRACE_ON
-	default n
+	default y
 	help
-	  Some pieces of the kernel contain a lot of flow changes which can
+	  The CPLB error handler contains a lot of flow changes which can
 	  quickly fill up the hardware trace buffer.  When debugging crashes,
 	  the hardware trace may indicate that the problem lies in kernel
 	  space when in reality an application is buggy.
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index 6bf5097..e550c8d 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -21,6 +21,10 @@
 KBUILD_DEFCONFIG := BF537-STAMP_defconfig
 
 # setup the machine name and the machine dependent settings
+machine-$(CONFIG_BF512) := bf518
+machine-$(CONFIG_BF514) := bf518
+machine-$(CONFIG_BF516) := bf518
+machine-$(CONFIG_BF518) := bf518
 machine-$(CONFIG_BF522) := bf527
 machine-$(CONFIG_BF523) := bf527
 machine-$(CONFIG_BF524) := bf527
@@ -33,6 +37,8 @@
 machine-$(CONFIG_BF534) := bf537
 machine-$(CONFIG_BF536) := bf537
 machine-$(CONFIG_BF537) := bf537
+machine-$(CONFIG_BF538) := bf538
+machine-$(CONFIG_BF539) := bf538
 machine-$(CONFIG_BF542) := bf548
 machine-$(CONFIG_BF544) := bf548
 machine-$(CONFIG_BF547) := bf548
@@ -42,6 +48,10 @@
 MACHINE := $(machine-y)
 export MACHINE
 
+cpu-$(CONFIG_BF512) := bf512
+cpu-$(CONFIG_BF514) := bf514
+cpu-$(CONFIG_BF516) := bf516
+cpu-$(CONFIG_BF518) := bf518
 cpu-$(CONFIG_BF522) := bf522
 cpu-$(CONFIG_BF523) := bf523
 cpu-$(CONFIG_BF524) := bf524
@@ -54,6 +64,8 @@
 cpu-$(CONFIG_BF534) := bf534
 cpu-$(CONFIG_BF536) := bf536
 cpu-$(CONFIG_BF537) := bf537
+cpu-$(CONFIG_BF538) := bf538
+cpu-$(CONFIG_BF539) := bf539
 cpu-$(CONFIG_BF542) := bf542
 cpu-$(CONFIG_BF544) := bf544
 cpu-$(CONFIG_BF547) := bf547
@@ -79,7 +91,7 @@
 CHECKFLAGS_SILICON = $(shell echo "" | $(CPP) $(KBUILD_CFLAGS) -dD - 2>/dev/null | awk '$$2 == "__SILICON_REVISION__" { print $$3 }')
 CHECKFLAGS += -D__SILICON_REVISION__=$(CHECKFLAGS_SILICON) -Dl1_text=__used__
 
-head-y   := arch/$(ARCH)/mach-$(MACHINE)/head.o arch/$(ARCH)/kernel/init_task.o
+head-y   := arch/$(ARCH)/kernel/init_task.o
 
 core-y   += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/ arch/$(ARCH)/mach-common/
 
@@ -95,10 +107,10 @@
 core-y	+= arch/$(ARCH)/kernel/cplb-nompu/
 endif
 
-libs-y   += arch/$(ARCH)/lib/
-
 drivers-$(CONFIG_OPROFILE) += arch/$(ARCH)/oprofile/
 
+libs-y   += arch/$(ARCH)/lib/
+
 machdirs	:= $(patsubst %,arch/blackfin/mach-%/, $(machine-y))
 
 KBUILD_CFLAGS += -Iarch/$(ARCH)/include/
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
new file mode 100644
index 0000000..e0b3f24
--- /dev/null
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -0,0 +1,1191 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.28-rc2
+#
+# CONFIG_MMU is not set
+# CONFIG_FPU is not set
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+CONFIG_BLACKFIN=y
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_FORCE_MAX_ZONEORDER=14
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_TINY_SHMEM=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
+
+#
+# Blackfin Processor Options
+#
+
+#
+# Processor and Board Settings
+#
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+CONFIG_BF518=y
+# CONFIG_BF522 is not set
+# CONFIG_BF523 is not set
+# CONFIG_BF524 is not set
+# CONFIG_BF525 is not set
+# CONFIG_BF526 is not set
+# CONFIG_BF527 is not set
+# CONFIG_BF531 is not set
+# CONFIG_BF532 is not set
+# CONFIG_BF533 is not set
+# CONFIG_BF534 is not set
+# CONFIG_BF536 is not set
+# CONFIG_BF537 is not set
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
+# CONFIG_BF542 is not set
+# CONFIG_BF544 is not set
+# CONFIG_BF547 is not set
+# CONFIG_BF548 is not set
+# CONFIG_BF549 is not set
+# CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=0
+CONFIG_BF_REV_MAX=2
+CONFIG_BF_REV_0_0=y
+# CONFIG_BF_REV_0_1 is not set
+# CONFIG_BF_REV_0_2 is not set
+# CONFIG_BF_REV_0_3 is not set
+# CONFIG_BF_REV_0_4 is not set
+# CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
+# CONFIG_BF_REV_ANY is not set
+# CONFIG_BF_REV_NONE is not set
+CONFIG_BF51x=y
+CONFIG_BFIN518F_EZBRD=y
+
+#
+# BF518 Specific Configuration
+#
+
+#
+# Alternative Multiplexing Scheme
+#
+# CONFIG_BF518_SPORT0_PORTF is not set
+CONFIG_BF518_SPORT0_PORTG=y
+CONFIG_BF518_SPORT0_TSCLK_PG10=y
+# CONFIG_BF518_SPORT0_TSCLK_PG14 is not set
+CONFIG_BF518_UART1_PORTF=y
+# CONFIG_BF518_UART1_PORTG is not set
+
+#
+# Interrupt Priority Assignment
+#
+
+#
+# Priority
+#
+CONFIG_IRQ_PLL_WAKEUP=7
+CONFIG_IRQ_DMA0_ERROR=7
+CONFIG_IRQ_DMAR0_BLK=7
+CONFIG_IRQ_DMAR1_BLK=7
+CONFIG_IRQ_DMAR0_OVR=7
+CONFIG_IRQ_DMAR1_OVR=7
+CONFIG_IRQ_PPI_ERROR=7
+CONFIG_IRQ_MAC_ERROR=7
+CONFIG_IRQ_SPORT0_ERROR=7
+CONFIG_IRQ_SPORT1_ERROR=7
+CONFIG_IRQ_PTP_ERROR=7
+CONFIG_IRQ_UART0_ERROR=7
+CONFIG_IRQ_UART1_ERROR=7
+CONFIG_IRQ_RTC=8
+CONFIG_IRQ_PPI=8
+CONFIG_IRQ_SPORT0_RX=9
+CONFIG_IRQ_SPORT0_TX=9
+CONFIG_IRQ_SPORT1_RX=9
+CONFIG_IRQ_SPORT1_TX=9
+CONFIG_IRQ_TWI=10
+CONFIG_IRQ_SPI0=10
+CONFIG_IRQ_UART0_RX=10
+CONFIG_IRQ_UART0_TX=10
+CONFIG_IRQ_UART1_RX=10
+CONFIG_IRQ_UART1_TX=10
+CONFIG_IRQ_OPTSEC=11
+CONFIG_IRQ_CNT=11
+CONFIG_IRQ_MAC_RX=11
+CONFIG_IRQ_PORTH_INTA=11
+CONFIG_IRQ_MAC_TX=11
+CONFIG_IRQ_PORTH_INTB=11
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
+CONFIG_IRQ_PORTG_INTA=12
+CONFIG_IRQ_PORTG_INTB=12
+CONFIG_IRQ_MEM_DMA0=13
+CONFIG_IRQ_MEM_DMA1=13
+CONFIG_IRQ_WATCH=13
+CONFIG_IRQ_PORTF_INTA=13
+CONFIG_IRQ_PORTF_INTB=13
+CONFIG_IRQ_SPI0_ERROR=7
+CONFIG_IRQ_SPI1_ERROR=7
+CONFIG_IRQ_RSI_INT0=7
+CONFIG_IRQ_RSI_INT1=7
+CONFIG_IRQ_PWM_TRIP=10
+CONFIG_IRQ_PWM_SYNC=10
+CONFIG_IRQ_PTP_STAT=10
+
+#
+# Board customizations
+#
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_BOOT_LOAD=0x1000
+
+#
+# Clock/PLL Setup
+#
+CONFIG_CLKIN_HZ=25000000
+# CONFIG_BFIN_KERNEL_CLOCK is not set
+CONFIG_MAX_VCO_HZ=400000000
+CONFIG_MIN_VCO_HZ=50000000
+CONFIG_MAX_SCLK_HZ=133333333
+CONFIG_MIN_SCLK_HZ=27000000
+
+#
+# Kernel Timer/Scheduler
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_CYCLES_CLOCKSOURCE is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+
+#
+# Misc
+#
+CONFIG_BFIN_SCRATCH_REG_RETN=y
+# CONFIG_BFIN_SCRATCH_REG_RETE is not set
+# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
+
+#
+# Blackfin Kernel Optimizations
+#
+
+#
+# Memory Optimizations
+#
+CONFIG_I_ENTRY_L1=y
+CONFIG_EXCPT_IRQ_SYSC_L1=y
+CONFIG_DO_IRQ_L1=y
+CONFIG_CORE_TIMER_IRQ_L1=y
+CONFIG_IDLE_L1=y
+# CONFIG_SCHEDULE_L1 is not set
+CONFIG_ARITHMETIC_OPS_L1=y
+CONFIG_ACCESS_OK_L1=y
+# CONFIG_MEMSET_L1 is not set
+# CONFIG_MEMCPY_L1 is not set
+# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set
+# CONFIG_IP_CHECKSUM_L1 is not set
+CONFIG_CACHELINE_ALIGNED_L1=y
+# CONFIG_SYSCALL_TAB_L1 is not set
+# CONFIG_CPLB_SWITCH_TAB_L1 is not set
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
+CONFIG_RAMKERNEL=y
+# CONFIG_ROMKERNEL is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_VIRT_TO_BUS=y
+CONFIG_BFIN_GPTIMERS=y
+# CONFIG_DMA_UNCACHED_4M is not set
+# CONFIG_DMA_UNCACHED_2M is not set
+CONFIG_DMA_UNCACHED_1M=y
+# CONFIG_DMA_UNCACHED_NONE is not set
+
+#
+# Cache Support
+#
+CONFIG_BFIN_ICACHE=y
+CONFIG_BFIN_DCACHE=y
+# CONFIG_BFIN_DCACHE_BANKA is not set
+# CONFIG_BFIN_ICACHE_LOCK is not set
+CONFIG_BFIN_WB=y
+# CONFIG_BFIN_WT is not set
+# CONFIG_MPU is not set
+
+#
+# Asynchonous Memory Configuration
+#
+
+#
+# EBIU_AMGCTL Global Control
+#
+CONFIG_C_AMCKEN=y
+CONFIG_C_CDPRIO=y
+# CONFIG_C_AMBEN is not set
+# CONFIG_C_AMBEN_B0 is not set
+# CONFIG_C_AMBEN_B0_B1 is not set
+# CONFIG_C_AMBEN_B0_B1_B2 is not set
+CONFIG_C_AMBEN_ALL=y
+
+#
+# EBIU_AMBCTL Control
+#
+CONFIG_BANK_0=0x7BB0
+CONFIG_BANK_1=0x5554
+CONFIG_BANK_2=0x7BB0
+CONFIG_BANK_3=0xFFC0
+
+#
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF_FDPIC=y
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+# CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_PM_WAKEUP_BY_GPIO is not set
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETLABEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_GEN_PROBE=m
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=m
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_GPIO_ADDR is not set
+# CONFIG_MTD_UCLINUX is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_BFIN_MAC is not set
+# CONFIG_SMC91X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_CONFIG_INPUT_PCF8574 is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_AD9960 is not set
+# CONFIG_SPI_ADC_BF533 is not set
+# CONFIG_BF5xx_PPIFCD is not set
+# CONFIG_BFIN_SIMPLE_TIMER is not set
+# CONFIG_BF5xx_PPI is not set
+# CONFIG_BFIN_SPORT is not set
+# CONFIG_BFIN_TIMER_LATENCY is not set
+# CONFIG_TWI_LCD is not set
+CONFIG_BFIN_DMA_INTERFACE=m
+CONFIG_SIMPLE_GPIO=m
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+CONFIG_SERIAL_BFIN_DMA=y
+# CONFIG_SERIAL_BFIN_PIO is not set
+CONFIG_SERIAL_BFIN_UART0=y
+# CONFIG_BFIN_UART0_CTSRTS is not set
+# CONFIG_SERIAL_BFIN_UART1 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_BFIN_SPORT is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+
+#
+# CAN, the car bus and industrial fieldbus
+#
+# CONFIG_CAN4LINUX is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_BLACKFIN_TWI=y
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
+# CONFIG_SENSORS_AD5252 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_BFIN_WDT=y
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_BFIN=y
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_YAFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+# CONFIG_SUNRPC_REGISTER_V4 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_VERBOSE=y
+CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_HWERR is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
+CONFIG_DEBUG_HUNT_FOR_ZERO=y
+CONFIG_DEBUG_BFIN_HWTRACE_ON=y
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
+# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set
+# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
+# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
+# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
+CONFIG_EARLY_PRINTK=y
+CONFIG_CPLB_INFO=y
+CONFIG_ACCESS_CHECK=y
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_NETWORK is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index 4443a47..69f66c3 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -1,7 +1,6 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.26.3
-# Thu Aug 28 16:49:53 2008
+# Linux kernel version: 2.6.28-rc2
 #
 # CONFIG_MMU is not set
 # CONFIG_FPU is not set
@@ -37,8 +36,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CGROUPS is not set
 # CONFIG_GROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
 # CONFIG_NAMESPACES is not set
 CONFIG_BLK_DEV_INITRD=y
@@ -48,13 +46,13 @@
 CONFIG_EMBEDDED=y
 CONFIG_UID16=y
 CONFIG_SYSCTL_SYSCALL=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
 CONFIG_COMPAT_BRK=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
@@ -63,6 +61,7 @@
 CONFIG_SIGNALFD=y
 CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
@@ -70,9 +69,7 @@
 # CONFIG_PROFILING is not set
 # CONFIG_MARKERS is not set
 CONFIG_HAVE_OPROFILE=y
-# CONFIG_HAVE_KPROBES is not set
-# CONFIG_HAVE_KRETPROBES is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
 CONFIG_TINY_SHMEM=y
@@ -89,6 +86,7 @@
 # CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_LSF is not set
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -106,6 +104,7 @@
 # CONFIG_PREEMPT_NONE is not set
 CONFIG_PREEMPT_VOLUNTARY=y
 # CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
 
 #
 # Blackfin Processor Options
@@ -114,6 +113,10 @@
 #
 # Processor and Board Settings
 #
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
 # CONFIG_BF522 is not set
 # CONFIG_BF523 is not set
 # CONFIG_BF524 is not set
@@ -126,22 +129,71 @@
 # CONFIG_BF534 is not set
 # CONFIG_BF536 is not set
 # CONFIG_BF537 is not set
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
 # CONFIG_BF542 is not set
 # CONFIG_BF544 is not set
 # CONFIG_BF547 is not set
 # CONFIG_BF548 is not set
 # CONFIG_BF549 is not set
 # CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=0
+CONFIG_BF_REV_MAX=2
 CONFIG_BF_REV_0_0=y
 # CONFIG_BF_REV_0_1 is not set
 # CONFIG_BF_REV_0_2 is not set
 # CONFIG_BF_REV_0_3 is not set
 # CONFIG_BF_REV_0_4 is not set
 # CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
 # CONFIG_BF_REV_ANY is not set
 # CONFIG_BF_REV_NONE is not set
 CONFIG_BF52x=y
 CONFIG_MEM_MT48LC32M16A2TG_75=y
+CONFIG_IRQ_PLL_WAKEUP=7
+CONFIG_IRQ_DMA0_ERROR=7
+CONFIG_IRQ_DMAR0_BLK=7
+CONFIG_IRQ_DMAR1_BLK=7
+CONFIG_IRQ_DMAR0_OVR=7
+CONFIG_IRQ_DMAR1_OVR=7
+CONFIG_IRQ_PPI_ERROR=7
+CONFIG_IRQ_MAC_ERROR=7
+CONFIG_IRQ_SPORT0_ERROR=7
+CONFIG_IRQ_SPORT1_ERROR=7
+CONFIG_IRQ_UART0_ERROR=7
+CONFIG_IRQ_UART1_ERROR=7
+CONFIG_IRQ_RTC=8
+CONFIG_IRQ_PPI=8
+CONFIG_IRQ_SPORT0_RX=9
+CONFIG_IRQ_SPORT0_TX=9
+CONFIG_IRQ_SPORT1_RX=9
+CONFIG_IRQ_SPORT1_TX=9
+CONFIG_IRQ_TWI=10
+CONFIG_IRQ_UART0_RX=10
+CONFIG_IRQ_UART0_TX=10
+CONFIG_IRQ_UART1_RX=10
+CONFIG_IRQ_UART1_TX=10
+CONFIG_IRQ_OPTSEC=11
+CONFIG_IRQ_CNT=11
+CONFIG_IRQ_MAC_RX=11
+CONFIG_IRQ_PORTH_INTA=11
+CONFIG_IRQ_MAC_TX=11
+CONFIG_IRQ_PORTH_INTB=11
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
+CONFIG_IRQ_PORTG_INTA=12
+CONFIG_IRQ_PORTG_INTB=12
+CONFIG_IRQ_MEM_DMA0=13
+CONFIG_IRQ_MEM_DMA1=13
+CONFIG_IRQ_WATCH=13
+CONFIG_IRQ_PORTF_INTA=13
+CONFIG_IRQ_PORTF_INTB=13
 # CONFIG_BFIN527_EZKIT is not set
 # CONFIG_BFIN527_BLUETECHNIX_CM is not set
 CONFIG_BFIN526_EZBRD=y
@@ -169,51 +221,7 @@
 #
 # Priority
 #
-CONFIG_IRQ_PLL_WAKEUP=7
-CONFIG_IRQ_DMA0_ERROR=7
-CONFIG_IRQ_DMAR0_BLK=7
-CONFIG_IRQ_DMAR1_BLK=7
-CONFIG_IRQ_DMAR0_OVR=7
-CONFIG_IRQ_DMAR1_OVR=7
-CONFIG_IRQ_PPI_ERROR=7
-CONFIG_IRQ_MAC_ERROR=7
-CONFIG_IRQ_SPORT0_ERROR=7
-CONFIG_IRQ_SPORT1_ERROR=7
-CONFIG_IRQ_UART0_ERROR=7
-CONFIG_IRQ_UART1_ERROR=7
-CONFIG_IRQ_RTC=8
-CONFIG_IRQ_PPI=8
-CONFIG_IRQ_SPORT0_RX=9
-CONFIG_IRQ_SPORT0_TX=9
-CONFIG_IRQ_SPORT1_RX=9
-CONFIG_IRQ_SPORT1_TX=9
-CONFIG_IRQ_TWI=10
 CONFIG_IRQ_SPI=10
-CONFIG_IRQ_UART0_RX=10
-CONFIG_IRQ_UART0_TX=10
-CONFIG_IRQ_UART1_RX=10
-CONFIG_IRQ_UART1_TX=10
-CONFIG_IRQ_OPTSEC=11
-CONFIG_IRQ_CNT=11
-CONFIG_IRQ_MAC_RX=11
-CONFIG_IRQ_PORTH_INTA=11
-CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_PORTH_INTB=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
-CONFIG_IRQ_PORTG_INTA=12
-CONFIG_IRQ_PORTG_INTB=12
-CONFIG_IRQ_MEM_DMA0=13
-CONFIG_IRQ_MEM_DMA1=13
-CONFIG_IRQ_WATCH=13
-CONFIG_IRQ_PORTF_INTA=13
-CONFIG_IRQ_PORTF_INTB=13
 CONFIG_IRQ_SPI_ERROR=7
 CONFIG_IRQ_NFC_ERROR=7
 CONFIG_IRQ_HDMA_ERROR=7
@@ -235,7 +243,6 @@
 #
 CONFIG_CLKIN_HZ=25000000
 # CONFIG_BFIN_KERNEL_CLOCK is not set
-CONFIG_MAX_MEM_SIZE=512
 CONFIG_MAX_VCO_HZ=400000000
 CONFIG_MIN_VCO_HZ=50000000
 CONFIG_MAX_SCLK_HZ=133333333
@@ -253,16 +260,11 @@
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 # CONFIG_CYCLES_CLOCKSOURCE is not set
-# CONFIG_TICK_ONESHOT is not set
 # CONFIG_NO_HZ is not set
 # CONFIG_HIGH_RES_TIMERS is not set
 CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
 
 #
-# Memory Setup
-#
-
-#
 # Misc
 #
 CONFIG_BFIN_SCRATCH_REG_RETN=y
@@ -291,6 +293,7 @@
 CONFIG_CACHELINE_ALIGNED_L1=y
 # CONFIG_SYSCALL_TAB_L1 is not set
 # CONFIG_CPLB_SWITCH_TAB_L1 is not set
+CONFIG_APP_STACK_L1=y
 
 #
 # Speed Optimizations
@@ -304,15 +307,13 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
 CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_VIRT_TO_BUS=y
 CONFIG_BFIN_GPTIMERS=y
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_4M is not set
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
@@ -365,6 +366,7 @@
 CONFIG_BINFMT_FLAT=y
 CONFIG_BINFMT_ZFLAT=y
 # CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 
 #
@@ -378,10 +380,6 @@
 # CPU Frequency scaling
 #
 # CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
@@ -432,6 +430,7 @@
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -452,11 +451,10 @@
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
 # CONFIG_WIRELESS_EXT is not set
 # CONFIG_MAC80211 is not set
 # CONFIG_IEEE80211 is not set
@@ -474,6 +472,8 @@
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 # CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
 # CONFIG_CONNECTOR is not set
 CONFIG_MTD=y
@@ -534,7 +534,8 @@
 # Self-contained MTD device drivers
 #
 # CONFIG_MTD_DATAFLASH is not set
-# CONFIG_MTD_M25P80 is not set
+CONFIG_MTD_M25P80=y
+CONFIG_M25PXX_USE_FAST_READ=y
 # CONFIG_MTD_SLRAM is not set
 # CONFIG_MTD_PHRAM is not set
 # CONFIG_MTD_MTDRAM is not set
@@ -579,6 +580,7 @@
 # CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_EEPROM_93CX6 is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
@@ -595,7 +597,6 @@
 # CONFIG_ATA is not set
 # CONFIG_MD is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
@@ -633,9 +634,10 @@
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-# CONFIG_B44 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NETDEV_1000=y
-# CONFIG_E1000E_ENABLED is not set
 # CONFIG_AX88180 is not set
 CONFIG_NETDEV_10000=y
 
@@ -667,7 +669,7 @@
 # Input device support
 #
 CONFIG_INPUT=y
-# CONFIG_INPUT_FF_MEMLESS is not set
+CONFIG_INPUT_FF_MEMLESS=m
 # CONFIG_INPUT_POLLDEV is not set
 
 #
@@ -692,8 +694,9 @@
 # CONFIG_INPUT_KEYSPAN_REMOTE is not set
 # CONFIG_INPUT_POWERMATE is not set
 # CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
 # CONFIG_INPUT_UINPUT is not set
-# CONFIG_TWI_KEYPAD is not set
+# CONFIG_CONFIG_INPUT_PCF8574 is not set
 
 #
 # Hardware I/O ports
@@ -712,12 +715,15 @@
 # CONFIG_BFIN_SPORT is not set
 # CONFIG_BFIN_TIMER_LATENCY is not set
 # CONFIG_TWI_LCD is not set
+CONFIG_BFIN_DMA_INTERFACE=m
 CONFIG_SIMPLE_GPIO=m
 CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
 CONFIG_VT_CONSOLE=y
 CONFIG_HW_CONSOLE=y
 # CONFIG_VT_HW_CONSOLE_BINDING is not set
-CONFIG_DEVKMEM=y
+# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -760,25 +766,39 @@
 #
 # I2C Hardware Bus support
 #
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
 CONFIG_I2C_BLACKFIN_TWI=y
-CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 # CONFIG_I2C_GPIO is not set
 # CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_TAOS_EVM is not set
-# CONFIG_I2C_STUB is not set
 # CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
 # CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
 
 #
 # Miscellaneous I2C Chip support
 #
 # CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
 # CONFIG_SENSORS_AD5252 is not set
 # CONFIG_SENSORS_EEPROM is not set
 # CONFIG_SENSORS_PCF8574 is not set
 # CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
 # CONFIG_SENSORS_PCF8591 is not set
 # CONFIG_SENSORS_MAX6875 is not set
 # CONFIG_SENSORS_TSL2550 is not set
@@ -787,12 +807,14 @@
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_I2C_DEBUG_CHIP is not set
 CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
 CONFIG_SPI_MASTER=y
 
 #
 # SPI Master Controller Drivers
 #
 CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
 # CONFIG_SPI_BITBANG is not set
 
 #
@@ -801,11 +823,15 @@
 # CONFIG_SPI_AT25 is not set
 # CONFIG_SPI_SPIDEV is not set
 # CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
 # CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
 # CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
 # CONFIG_SENSORS_ADM1021 is not set
 # CONFIG_SENSORS_ADM1025 is not set
 # CONFIG_SENSORS_ADM1026 is not set
@@ -834,6 +860,7 @@
 # CONFIG_SENSORS_LM90 is not set
 # CONFIG_SENSORS_LM92 is not set
 # CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_MAX1111 is not set
 # CONFIG_SENSORS_MAX1619 is not set
 # CONFIG_SENSORS_MAX6650 is not set
 # CONFIG_SENSORS_PC87360 is not set
@@ -871,16 +898,14 @@
 # CONFIG_USBPCWATCHDOG is not set
 
 #
-# Sonics Silicon Backplane
-#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
-
-#
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
 
 #
 # Multimedia devices
@@ -915,15 +940,8 @@
 # Console display driver support
 #
 CONFIG_DUMMY_CONSOLE=y
-
-#
-# Sound
-#
 CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
 CONFIG_SND=m
 CONFIG_SND_TIMER=m
 CONFIG_SND_PCM=m
@@ -937,56 +955,40 @@
 CONFIG_SND_VERBOSE_PROCFS=y
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_DRIVERS=y
 # CONFIG_SND_DUMMY is not set
 # CONFIG_SND_MTPAV is not set
 # CONFIG_SND_SERIAL_U16550 is not set
 # CONFIG_SND_MPU401 is not set
-
-#
-# SPI devices
-#
+CONFIG_SND_SPI=y
 
 #
 # ALSA Blackfin devices
 #
 # CONFIG_SND_BLACKFIN_AD1836 is not set
-# CONFIG_SND_BFIN_AD73311 is not set
 # CONFIG_SND_BFIN_AD73322 is not set
-
-#
-# USB devices
-#
+CONFIG_SND_USB=y
 # CONFIG_SND_USB_AUDIO is not set
 # CONFIG_SND_USB_CAIAQ is not set
-
-#
-# System on Chip audio support
-#
 CONFIG_SND_SOC=m
+CONFIG_SND_SOC_AC97_BUS=y
 CONFIG_SND_BF5XX_I2S=m
 CONFIG_SND_BF5XX_SOC_SSM2602=m
-# CONFIG_SND_BF5XX_AC97 is not set
+# CONFIG_SND_BF5XX_SOC_AD73311 is not set
+CONFIG_SND_BF5XX_AC97=m
+CONFIG_SND_BF5XX_MMAP_SUPPORT=y
+# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set
 CONFIG_SND_BF5XX_SOC_SPORT=m
 CONFIG_SND_BF5XX_SOC_I2S=m
+CONFIG_SND_BF5XX_SOC_AC97=m
+CONFIG_SND_BF5XX_SOC_AD1980=m
 CONFIG_SND_BF5XX_SPORT_NUM=0
-
-#
-# ALSA SoC audio for Freescale SOCs
-#
-
-#
-# SoC Audio for the Texas Instruments OMAP
-#
+# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_AD1980=m
 CONFIG_SND_SOC_SSM2602=m
-
-#
-# Open Sound System
-#
 # CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=m
 CONFIG_HID_SUPPORT=y
 CONFIG_HID=y
 # CONFIG_HID_DEBUG is not set
@@ -996,9 +998,36 @@
 # USB Input Devices
 #
 CONFIG_USB_HID=y
-# CONFIG_USB_HIDINPUT_POWERBOOK is not set
-# CONFIG_HID_FF is not set
+# CONFIG_HID_PID is not set
 # CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_BRIGHT=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DELL=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_PANTHERLORD=y
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_THRUSTMASTER_FF=m
+CONFIG_ZEROPLUS_FF=m
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 # CONFIG_USB_ARCH_HAS_OHCI is not set
@@ -1016,6 +1045,9 @@
 # CONFIG_USB_OTG is not set
 # CONFIG_USB_OTG_WHITELIST is not set
 CONFIG_USB_OTG_BLACKLIST_HUB=y
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
 
 #
 # USB Host Controller Drivers
@@ -1026,6 +1058,7 @@
 # CONFIG_USB_ISP1362_HCD is not set
 # CONFIG_USB_SL811_HCD is not set
 # CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
 CONFIG_USB_MUSB_HDRC=y
 CONFIG_USB_MUSB_SOC=y
 
@@ -1037,7 +1070,7 @@
 # CONFIG_USB_MUSB_OTG is not set
 CONFIG_USB_MUSB_HDRC_HCD=y
 CONFIG_MUSB_PIO_ONLY=y
-CONFIG_USB_MUSB_LOGLEVEL=0
+# CONFIG_USB_MUSB_DEBUG is not set
 
 #
 # USB Device Class drivers
@@ -1045,6 +1078,7 @@
 # CONFIG_USB_ACM is not set
 # CONFIG_USB_PRINTER is not set
 # CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
 
 #
 # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
@@ -1059,7 +1093,6 @@
 # USB Imaging devices
 #
 # CONFIG_USB_MDC800 is not set
-CONFIG_USB_MON=y
 
 #
 # USB port drivers
@@ -1072,7 +1105,7 @@
 # CONFIG_USB_EMI62 is not set
 # CONFIG_USB_EMI26 is not set
 # CONFIG_USB_ADUTUX is not set
-# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_SEVSEG is not set
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
 # CONFIG_USB_LCD is not set
@@ -1089,6 +1122,7 @@
 # CONFIG_USB_TRANCEVIBRATOR is not set
 # CONFIG_USB_IOWARRIOR is not set
 # CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
 # CONFIG_USB_GADGET is not set
 # CONFIG_MMC is not set
 # CONFIG_MEMSTICK is not set
@@ -1128,36 +1162,45 @@
 #
 # SPI RTC drivers
 #
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
 # CONFIG_RTC_DRV_MAX6902 is not set
 # CONFIG_RTC_DRV_R9701 is not set
 # CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
 
 #
 # Platform RTC drivers
 #
+# CONFIG_RTC_DRV_DS1286 is not set
 # CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
 # CONFIG_RTC_DRV_DS1742 is not set
 # CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
 # on-CPU RTC drivers
 #
 CONFIG_RTC_DRV_BFIN=y
+# CONFIG_DMADEVICES is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
 #
 # CONFIG_EXT2_FS is not set
 # CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
 # CONFIG_OCFS2_FS is not set
 # CONFIG_DNOTIFY is not set
@@ -1225,6 +1268,7 @@
 # CONFIG_CRAMFS is not set
 # CONFIG_VXFS_FS is not set
 # CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
 # CONFIG_ROMFS_FS is not set
@@ -1240,7 +1284,7 @@
 CONFIG_LOCKD_V4=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 CONFIG_SMB_FS=m
@@ -1308,10 +1352,48 @@
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_VERBOSE=y
 CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_HWERR is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
 CONFIG_DEBUG_HUNT_FOR_ZERO=y
 CONFIG_DEBUG_BFIN_HWTRACE_ON=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -1329,8 +1411,9 @@
 #
 # CONFIG_KEYS is not set
 CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_NETWORK is not set
-# CONFIG_SECURITY_CAPABILITIES is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
 # CONFIG_SECURITY_ROOTPLUG is not set
 CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
 CONFIG_CRYPTO=y
@@ -1338,6 +1421,7 @@
 #
 # Crypto core or helper
 #
+# CONFIG_CRYPTO_FIPS is not set
 # CONFIG_CRYPTO_MANAGER is not set
 # CONFIG_CRYPTO_GF128MUL is not set
 # CONFIG_CRYPTO_NULL is not set
@@ -1376,6 +1460,10 @@
 # CONFIG_CRYPTO_MD4 is not set
 # CONFIG_CRYPTO_MD5 is not set
 # CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
 # CONFIG_CRYPTO_SHA1 is not set
 # CONFIG_CRYPTO_SHA256 is not set
 # CONFIG_CRYPTO_SHA512 is not set
@@ -1406,15 +1494,20 @@
 #
 # CONFIG_CRYPTO_DEFLATE is not set
 # CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRYPTO_HW=y
 
 #
 # Library routines
 #
 CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
 CONFIG_CRC_CCITT=m
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 4a2a660..f92668a 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -1,6 +1,6 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24.7
+# Linux kernel version: 2.6.28-rc2
 #
 # CONFIG_MMU is not set
 # CONFIG_FPU is not set
@@ -8,7 +8,6 @@
 # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
 CONFIG_BLACKFIN=y
 CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
@@ -31,18 +30,16 @@
 # CONFIG_POSIX_MQUEUE is not set
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
 # CONFIG_AUDIT is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -51,26 +48,35 @@
 CONFIG_UID16=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
+CONFIG_COMPAT_BRK=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
 CONFIG_TINY_SHMEM=y
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
@@ -81,6 +87,7 @@
 # CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_LSF is not set
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -94,9 +101,11 @@
 # CONFIG_DEFAULT_CFQ is not set
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
 # CONFIG_PREEMPT_NONE is not set
 CONFIG_PREEMPT_VOLUNTARY=y
 # CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
 
 #
 # Blackfin Processor Options
@@ -105,6 +114,10 @@
 #
 # Processor and Board Settings
 #
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
 # CONFIG_BF522 is not set
 # CONFIG_BF523 is not set
 # CONFIG_BF524 is not set
@@ -117,23 +130,74 @@
 # CONFIG_BF534 is not set
 # CONFIG_BF536 is not set
 # CONFIG_BF537 is not set
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
 # CONFIG_BF542 is not set
 # CONFIG_BF544 is not set
 # CONFIG_BF547 is not set
 # CONFIG_BF548 is not set
 # CONFIG_BF549 is not set
 # CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=0
+CONFIG_BF_REV_MAX=2
 CONFIG_BF_REV_0_0=y
 # CONFIG_BF_REV_0_1 is not set
 # CONFIG_BF_REV_0_2 is not set
 # CONFIG_BF_REV_0_3 is not set
 # CONFIG_BF_REV_0_4 is not set
 # CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
 # CONFIG_BF_REV_ANY is not set
 # CONFIG_BF_REV_NONE is not set
 CONFIG_BF52x=y
 CONFIG_MEM_MT48LC32M16A2TG_75=y
+CONFIG_IRQ_PLL_WAKEUP=7
+CONFIG_IRQ_DMA0_ERROR=7
+CONFIG_IRQ_DMAR0_BLK=7
+CONFIG_IRQ_DMAR1_BLK=7
+CONFIG_IRQ_DMAR0_OVR=7
+CONFIG_IRQ_DMAR1_OVR=7
+CONFIG_IRQ_PPI_ERROR=7
+CONFIG_IRQ_MAC_ERROR=7
+CONFIG_IRQ_SPORT0_ERROR=7
+CONFIG_IRQ_SPORT1_ERROR=7
+CONFIG_IRQ_UART0_ERROR=7
+CONFIG_IRQ_UART1_ERROR=7
+CONFIG_IRQ_RTC=8
+CONFIG_IRQ_PPI=8
+CONFIG_IRQ_SPORT0_RX=9
+CONFIG_IRQ_SPORT0_TX=9
+CONFIG_IRQ_SPORT1_RX=9
+CONFIG_IRQ_SPORT1_TX=9
+CONFIG_IRQ_TWI=10
+CONFIG_IRQ_UART0_RX=10
+CONFIG_IRQ_UART0_TX=10
+CONFIG_IRQ_UART1_RX=10
+CONFIG_IRQ_UART1_TX=10
+CONFIG_IRQ_OPTSEC=11
+CONFIG_IRQ_CNT=11
+CONFIG_IRQ_MAC_RX=11
+CONFIG_IRQ_PORTH_INTA=11
+CONFIG_IRQ_MAC_TX=11
+CONFIG_IRQ_PORTH_INTB=11
+CONFIG_IRQ_TIMER0=8
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
+CONFIG_IRQ_PORTG_INTA=12
+CONFIG_IRQ_PORTG_INTB=12
+CONFIG_IRQ_MEM_DMA0=13
+CONFIG_IRQ_MEM_DMA1=13
+CONFIG_IRQ_WATCH=13
+CONFIG_IRQ_PORTF_INTA=13
+CONFIG_IRQ_PORTF_INTB=13
 CONFIG_BFIN527_EZKIT=y
+# CONFIG_BFIN527_BLUETECHNIX_CM is not set
+# CONFIG_BFIN526_EZBRD is not set
 
 #
 # BF527 Specific Configuration
@@ -158,51 +222,7 @@
 #
 # Priority
 #
-CONFIG_IRQ_PLL_WAKEUP=7
-CONFIG_IRQ_DMA0_ERROR=7
-CONFIG_IRQ_DMAR0_BLK=7
-CONFIG_IRQ_DMAR1_BLK=7
-CONFIG_IRQ_DMAR0_OVR=7
-CONFIG_IRQ_DMAR1_OVR=7
-CONFIG_IRQ_PPI_ERROR=7
-CONFIG_IRQ_MAC_ERROR=7
-CONFIG_IRQ_SPORT0_ERROR=7
-CONFIG_IRQ_SPORT1_ERROR=7
-CONFIG_IRQ_UART0_ERROR=7
-CONFIG_IRQ_UART1_ERROR=7
-CONFIG_IRQ_RTC=8
-CONFIG_IRQ_PPI=8
-CONFIG_IRQ_SPORT0_RX=9
-CONFIG_IRQ_SPORT0_TX=9
-CONFIG_IRQ_SPORT1_RX=9
-CONFIG_IRQ_SPORT1_TX=9
-CONFIG_IRQ_TWI=10
 CONFIG_IRQ_SPI=10
-CONFIG_IRQ_UART0_RX=10
-CONFIG_IRQ_UART0_TX=10
-CONFIG_IRQ_UART1_RX=10
-CONFIG_IRQ_UART1_TX=10
-CONFIG_IRQ_OPTSEC=11
-CONFIG_IRQ_CNT=11
-CONFIG_IRQ_MAC_RX=11
-CONFIG_IRQ_PORTH_INTA=11
-CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_PORTH_INTB=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
-CONFIG_IRQ_PORTG_INTA=12
-CONFIG_IRQ_PORTG_INTB=12
-CONFIG_IRQ_MEM_DMA0=13
-CONFIG_IRQ_MEM_DMA1=13
-CONFIG_IRQ_WATCH=13
-CONFIG_IRQ_PORTF_INTA=13
-CONFIG_IRQ_PORTF_INTB=13
 CONFIG_IRQ_SPI_ERROR=7
 CONFIG_IRQ_NFC_ERROR=7
 CONFIG_IRQ_HDMA_ERROR=7
@@ -224,7 +244,6 @@
 #
 CONFIG_CLKIN_HZ=25000000
 # CONFIG_BFIN_KERNEL_CLOCK is not set
-CONFIG_MAX_MEM_SIZE=512
 CONFIG_MAX_VCO_HZ=600000000
 CONFIG_MIN_VCO_HZ=50000000
 CONFIG_MAX_SCLK_HZ=133333333
@@ -238,10 +257,10 @@
 # CONFIG_HZ_300 is not set
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 # CONFIG_CYCLES_CLOCKSOURCE is not set
-# CONFIG_TICK_ONESHOT is not set
 # CONFIG_NO_HZ is not set
 # CONFIG_HIGH_RES_TIMERS is not set
 CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
@@ -275,6 +294,12 @@
 CONFIG_CACHELINE_ALIGNED_L1=y
 # CONFIG_SYSCALL_TAB_L1 is not set
 # CONFIG_CPLB_SWITCH_TAB_L1 is not set
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
 CONFIG_RAMKERNEL=y
 # CONFIG_ROMKERNEL is not set
 CONFIG_SELECT_MEMORY_MODEL=y
@@ -283,14 +308,13 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_VIRT_TO_BUS=y
 CONFIG_BFIN_GPTIMERS=y
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_4M is not set
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
@@ -305,7 +329,6 @@
 # CONFIG_BFIN_ICACHE_LOCK is not set
 # CONFIG_BFIN_WB is not set
 CONFIG_BFIN_WT=y
-CONFIG_L1_MAX_PIECE=16
 # CONFIG_MPU is not set
 
 #
@@ -334,7 +357,6 @@
 #
 # Bus options (PCI, PCMCIA, EISA, MCA, ISA)
 #
-# CONFIG_PCI is not set
 # CONFIG_ARCH_SUPPORTS_MSI is not set
 # CONFIG_PCCARD is not set
 
@@ -345,23 +367,20 @@
 CONFIG_BINFMT_FLAT=y
 CONFIG_BINFMT_ZFLAT=y
 # CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 
 #
 # Power management options
 #
 # CONFIG_PM is not set
-CONFIG_SUSPEND_UP_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
 # CONFIG_PM_WAKEUP_BY_GPIO is not set
 
 #
 # CPU Frequency scaling
 #
 # CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
@@ -374,6 +393,7 @@
 # CONFIG_XFRM_USER is not set
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
@@ -403,8 +423,6 @@
 CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
 # CONFIG_NETLABEL is not set
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
@@ -413,6 +431,7 @@
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -429,6 +448,7 @@
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
 CONFIG_IRDA=m
 
 #
@@ -467,15 +487,6 @@
 # CONFIG_KS959_DONGLE is not set
 
 #
-# Old SIR device drivers
-#
-# CONFIG_IRPORT_SIR is not set
-
-#
-# Old Serial dongle support
-#
-
-#
 # FIR device drivers
 #
 # CONFIG_USB_IRDA is not set
@@ -483,11 +494,10 @@
 # CONFIG_MCS_FIR is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
 # CONFIG_WIRELESS_EXT is not set
 # CONFIG_MAC80211 is not set
 # CONFIG_IEEE80211 is not set
@@ -505,6 +515,8 @@
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 # CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
 # CONFIG_CONNECTOR is not set
 CONFIG_MTD=y
@@ -513,6 +525,7 @@
 CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_REDBOOT_PARTS is not set
 # CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
 
 #
 # User Modules And Translation Layers
@@ -556,6 +569,7 @@
 #
 CONFIG_MTD_COMPLEX_MAPPINGS=y
 # CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_GPIO_ADDR is not set
 # CONFIG_MTD_UCLINUX is not set
 # CONFIG_MTD_PLATRAM is not set
 
@@ -563,7 +577,8 @@
 # Self-contained MTD device drivers
 #
 # CONFIG_MTD_DATAFLASH is not set
-# CONFIG_MTD_M25P80 is not set
+CONFIG_MTD_M25P80=y
+CONFIG_M25PXX_USE_FAST_READ=y
 # CONFIG_MTD_SLRAM is not set
 # CONFIG_MTD_PHRAM is not set
 # CONFIG_MTD_MTDRAM is not set
@@ -605,11 +620,14 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
@@ -622,7 +640,6 @@
 # CONFIG_ATA is not set
 # CONFIG_MD is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
@@ -643,6 +660,7 @@
 # CONFIG_SMSC_PHY is not set
 # CONFIG_BROADCOM_PHY is not set
 # CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
 # CONFIG_FIXED_PHY is not set
 # CONFIG_MDIO_BITBANG is not set
 CONFIG_NET_ETHERNET=y
@@ -655,11 +673,14 @@
 # CONFIG_SMC91X is not set
 # CONFIG_SMSC911X is not set
 # CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-# CONFIG_B44 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NETDEV_1000=y
 # CONFIG_AX88180 is not set
 CONFIG_NETDEV_10000=y
@@ -669,6 +690,7 @@
 #
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
 
 #
 # USB Network Adapters
@@ -681,7 +703,6 @@
 # CONFIG_WAN is not set
 # CONFIG_PPP is not set
 # CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
@@ -692,7 +713,7 @@
 # Input device support
 #
 CONFIG_INPUT=y
-# CONFIG_INPUT_FF_MEMLESS is not set
+CONFIG_INPUT_FF_MEMLESS=m
 # CONFIG_INPUT_POLLDEV is not set
 
 #
@@ -717,8 +738,9 @@
 # CONFIG_INPUT_KEYSPAN_REMOTE is not set
 # CONFIG_INPUT_POWERMATE is not set
 # CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
 # CONFIG_INPUT_UINPUT is not set
-# CONFIG_TWI_KEYPAD is not set
+# CONFIG_CONFIG_INPUT_PCF8574 is not set
 
 #
 # Hardware I/O ports
@@ -734,16 +756,18 @@
 # CONFIG_BF5xx_PPIFCD is not set
 # CONFIG_BFIN_SIMPLE_TIMER is not set
 # CONFIG_BF5xx_PPI is not set
-CONFIG_BFIN_OTP=y
-# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
 # CONFIG_BFIN_SPORT is not set
 # CONFIG_BFIN_TIMER_LATENCY is not set
 # CONFIG_TWI_LCD is not set
+CONFIG_BFIN_DMA_INTERFACE=m
 CONFIG_SIMPLE_GPIO=m
 CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
 CONFIG_VT_CONSOLE=y
 CONFIG_HW_CONSOLE=y
 # CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -766,6 +790,8 @@
 # CONFIG_SERIAL_BFIN_SPORT is not set
 CONFIG_UNIX98_PTYS=y
 # CONFIG_LEGACY_PTYS is not set
+CONFIG_BFIN_OTP=y
+# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
 
 #
 # CAN, the car bus and industrial fieldbus
@@ -773,44 +799,49 @@
 # CONFIG_CAN4LINUX is not set
 # CONFIG_IPMI_HANDLER is not set
 # CONFIG_HW_RANDOM is not set
-# CONFIG_GEN_RTC is not set
 # CONFIG_R3964 is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
 CONFIG_I2C=y
 CONFIG_I2C_BOARDINFO=y
 CONFIG_I2C_CHARDEV=m
-
-#
-# I2C Algorithms
-#
-# CONFIG_I2C_ALGOBIT is not set
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
+CONFIG_I2C_HELPER_AUTO=y
 
 #
 # I2C Hardware Bus support
 #
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
 CONFIG_I2C_BLACKFIN_TWI=m
-CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 # CONFIG_I2C_GPIO is not set
 # CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_TAOS_EVM is not set
-# CONFIG_I2C_STUB is not set
 # CONFIG_I2C_TINY_USB is not set
 
 #
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
 # Miscellaneous I2C Chip support
 #
-# CONFIG_SENSORS_DS1337 is not set
-# CONFIG_SENSORS_DS1374 is not set
 # CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
 # CONFIG_SENSORS_AD5252 is not set
 # CONFIG_SENSORS_EEPROM is not set
 # CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_SENSORS_PCF8575 is not set
+# CONFIG_PCF8575 is not set
 # CONFIG_SENSORS_PCA9539 is not set
 # CONFIG_SENSORS_PCF8591 is not set
 # CONFIG_SENSORS_MAX6875 is not set
@@ -819,17 +850,15 @@
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# SPI support
-#
 CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
 CONFIG_SPI_MASTER=y
 
 #
 # SPI Master Controller Drivers
 #
 CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
 # CONFIG_SPI_BITBANG is not set
 
 #
@@ -838,11 +867,15 @@
 # CONFIG_SPI_AT25 is not set
 # CONFIG_SPI_SPIDEV is not set
 # CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
 # CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
 # CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
 # CONFIG_SENSORS_ADM1021 is not set
 # CONFIG_SENSORS_ADM1025 is not set
 # CONFIG_SENSORS_ADM1026 is not set
@@ -850,6 +883,7 @@
 # CONFIG_SENSORS_ADM1031 is not set
 # CONFIG_SENSORS_ADM9240 is not set
 # CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
 # CONFIG_SENSORS_ATXP1 is not set
 # CONFIG_SENSORS_DS1621 is not set
 # CONFIG_SENSORS_F71805F is not set
@@ -870,6 +904,7 @@
 # CONFIG_SENSORS_LM90 is not set
 # CONFIG_SENSORS_LM92 is not set
 # CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_MAX1111 is not set
 # CONFIG_SENSORS_MAX1619 is not set
 # CONFIG_SENSORS_MAX6650 is not set
 # CONFIG_SENSORS_PC87360 is not set
@@ -878,6 +913,7 @@
 # CONFIG_SENSORS_SMSC47M1 is not set
 # CONFIG_SENSORS_SMSC47M192 is not set
 # CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
 # CONFIG_SENSORS_THMC50 is not set
 # CONFIG_SENSORS_VT1211 is not set
 # CONFIG_SENSORS_W83781D is not set
@@ -885,9 +921,12 @@
 # CONFIG_SENSORS_W83792D is not set
 # CONFIG_SENSORS_W83793 is not set
 # CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
 # CONFIG_SENSORS_W83627HF is not set
 # CONFIG_SENSORS_W83627EHF is not set
 # CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
 CONFIG_WATCHDOG=y
 # CONFIG_WATCHDOG_NOWAYOUT is not set
 
@@ -903,21 +942,29 @@
 # CONFIG_USBPCWATCHDOG is not set
 
 #
-# Sonics Silicon Backplane
-#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
-
-#
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
 
 #
 # Multimedia devices
 #
+
+#
+# Multimedia core support
+#
 # CONFIG_VIDEO_DEV is not set
 # CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
 # CONFIG_DAB is not set
 
 #
@@ -928,6 +975,7 @@
 CONFIG_FB=y
 # CONFIG_FIRMWARE_EDID is not set
 # CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
 CONFIG_FB_CFB_FILLRECT=y
 CONFIG_FB_CFB_COPYAREA=y
 CONFIG_FB_CFB_IMAGEBLIT=y
@@ -935,8 +983,8 @@
 # CONFIG_FB_SYS_FILLRECT is not set
 # CONFIG_FB_SYS_COPYAREA is not set
 # CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
 # CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
 # CONFIG_FB_SVGALIB is not set
 # CONFIG_FB_MACMODES is not set
 # CONFIG_FB_BACKLIGHT is not set
@@ -947,12 +995,18 @@
 # Frame buffer hardware drivers
 #
 CONFIG_FB_BFIN_T350MCQB=y
+# CONFIG_FB_BFIN_LQ035Q1 is not set
 # CONFIG_FB_BFIN_7393 is not set
 # CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_LCD_CLASS_DEVICE=m
 CONFIG_LCD_LTV350QV=m
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+# CONFIG_LCD_PLATFORM is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=m
 # CONFIG_BACKLIGHT_CORGI is not set
 
@@ -977,15 +1031,8 @@
 # CONFIG_LOGO_LINUX_CLUT224 is not set
 # CONFIG_LOGO_BLACKFIN_VGA16 is not set
 CONFIG_LOGO_BLACKFIN_CLUT224=y
-
-#
-# Sound
-#
 CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
+# CONFIG_SOUND_OSS_CORE is not set
 CONFIG_SND=m
 CONFIG_SND_TIMER=m
 CONFIG_SND_PCM=m
@@ -997,62 +1044,38 @@
 CONFIG_SND_VERBOSE_PROCFS=y
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_DRIVERS=y
 # CONFIG_SND_DUMMY is not set
 # CONFIG_SND_MTPAV is not set
 # CONFIG_SND_SERIAL_U16550 is not set
 # CONFIG_SND_MPU401 is not set
-
-#
-# SPI devices
-#
+CONFIG_SND_SPI=y
 
 #
 # ALSA Blackfin devices
 #
 # CONFIG_SND_BLACKFIN_AD1836 is not set
-# CONFIG_SND_BLACKFIN_AD1836_TDM is not set
-# CONFIG_SND_BLACKFIN_AD1836_I2S is not set
-# CONFIG_SND_BLACKFIN_AD1836_MULSUB is not set
-# CONFIG_SND_BLACKFIN_AD1836_5P1 is not set
-# CONFIG_SND_BFIN_AD73311 is not set
 # CONFIG_SND_BFIN_AD73322 is not set
-
-#
-# USB devices
-#
+CONFIG_SND_USB=y
 # CONFIG_SND_USB_AUDIO is not set
 # CONFIG_SND_USB_CAIAQ is not set
-
-#
-# System on Chip audio support
-#
-CONFIG_SND_SOC_AC97_BUS=y
 CONFIG_SND_SOC=m
-CONFIG_SND_BF5XX_SOC=m
-CONFIG_SND_MMAP_SUPPORT=y
+CONFIG_SND_SOC_AC97_BUS=y
+CONFIG_SND_BF5XX_I2S=m
+CONFIG_SND_BF5XX_SOC_SSM2602=m
+# CONFIG_SND_BF5XX_SOC_AD73311 is not set
+CONFIG_SND_BF5XX_AC97=m
+CONFIG_SND_BF5XX_MMAP_SUPPORT=y
+# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set
+CONFIG_SND_BF5XX_SOC_SPORT=m
 CONFIG_SND_BF5XX_SOC_I2S=m
 CONFIG_SND_BF5XX_SOC_AC97=m
-# CONFIG_SND_BF5XX_SOC_WM8750 is not set
-# CONFIG_SND_BF5XX_SOC_WM8731 is not set
-CONFIG_SND_BF5XX_SOC_SSM2602=m
-CONFIG_SND_BF5XX_SOC_BF5xx=m
+CONFIG_SND_BF5XX_SOC_AD1980=m
 CONFIG_SND_BF5XX_SPORT_NUM=0
 # CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set
-
-#
-# SoC Audio support for SuperH
-#
-CONFIG_SND_SOC_SSM2602=m
-# CONFIG_SND_SOC_SSM2602_SPI is not set
+# CONFIG_SND_SOC_ALL_CODECS is not set
 CONFIG_SND_SOC_AD1980=m
-
-#
-# Open Sound System
-#
+CONFIG_SND_SOC_SSM2602=m
 # CONFIG_SOUND_PRIME is not set
 CONFIG_AC97_BUS=m
 CONFIG_HID_SUPPORT=y
@@ -1064,15 +1087,43 @@
 # USB Input Devices
 #
 CONFIG_USB_HID=y
-# CONFIG_USB_HIDINPUT_POWERBOOK is not set
-# CONFIG_HID_FF is not set
+# CONFIG_HID_PID is not set
 # CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_BRIGHT=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DELL=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_PANTHERLORD=y
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_THRUSTMASTER_FF=m
+CONFIG_ZEROPLUS_FF=m
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 # CONFIG_USB_ARCH_HAS_OHCI is not set
 # CONFIG_USB_ARCH_HAS_EHCI is not set
 CONFIG_USB=y
 # CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
 
 #
 # Miscellaneous USB options
@@ -1083,15 +1134,20 @@
 # CONFIG_USB_OTG is not set
 # CONFIG_USB_OTG_WHITELIST is not set
 CONFIG_USB_OTG_BLACKLIST_HUB=y
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
 
 #
 # USB Host Controller Drivers
 #
+# CONFIG_USB_C67X00_HCD is not set
 # CONFIG_USB_ISP116X_HCD is not set
-# CONFIG_USB_ISP1362_HCD is not set
 # CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
 # CONFIG_USB_SL811_HCD is not set
 # CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
 CONFIG_USB_MUSB_HDRC=y
 CONFIG_USB_MUSB_SOC=y
 
@@ -1103,13 +1159,15 @@
 # CONFIG_USB_MUSB_OTG is not set
 CONFIG_USB_MUSB_HDRC_HCD=y
 CONFIG_MUSB_PIO_ONLY=y
-CONFIG_USB_MUSB_LOGLEVEL=0
+# CONFIG_USB_MUSB_DEBUG is not set
 
 #
 # USB Device Class drivers
 #
 # CONFIG_USB_ACM is not set
 # CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
 
 #
 # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
@@ -1124,15 +1182,10 @@
 # USB Imaging devices
 #
 # CONFIG_USB_MDC800 is not set
-CONFIG_USB_MON=y
 
 #
 # USB port drivers
 #
-
-#
-# USB Serial Converter support
-#
 # CONFIG_USB_SERIAL is not set
 
 #
@@ -1141,7 +1194,7 @@
 # CONFIG_USB_EMI62 is not set
 # CONFIG_USB_EMI26 is not set
 # CONFIG_USB_ADUTUX is not set
-# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_SEVSEG is not set
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
 # CONFIG_USB_LCD is not set
@@ -1157,17 +1210,13 @@
 # CONFIG_USB_LD is not set
 # CONFIG_USB_TRANCEVIBRATOR is not set
 # CONFIG_USB_IOWARRIOR is not set
-
-#
-# USB DSL modem support
-#
-
-#
-# USB Gadget Support
-#
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
 # CONFIG_USB_GADGET is not set
 # CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
 CONFIG_RTC_LIB=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_HCTOSYS=y
@@ -1196,51 +1245,57 @@
 # CONFIG_RTC_DRV_PCF8563 is not set
 # CONFIG_RTC_DRV_PCF8583 is not set
 # CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
 
 #
 # SPI RTC drivers
 #
-# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
 # CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
 
 #
 # Platform RTC drivers
 #
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
-# CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
 # on-CPU RTC drivers
 #
 CONFIG_RTC_DRV_BFIN=y
-
-#
-# Userspace I/O
-#
+# CONFIG_DMADEVICES is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
 #
 # CONFIG_EXT2_FS is not set
 # CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_DNOTIFY is not set
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
 # CONFIG_FUSE_FS is not set
@@ -1280,11 +1335,11 @@
 # CONFIG_EFS_FS is not set
 CONFIG_YAFFS_FS=m
 CONFIG_YAFFS_YAFFS1=y
+# CONFIG_YAFFS_9BYTE_TAGS is not set
 # CONFIG_YAFFS_DOES_ECC is not set
 CONFIG_YAFFS_YAFFS2=y
 CONFIG_YAFFS_AUTO_YAFFS2=y
 # CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
-CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS=10
 # CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
 # CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
 CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
@@ -1301,8 +1356,11 @@
 # CONFIG_JFFS2_RUBIN is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
@@ -1310,13 +1368,12 @@
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
 # CONFIG_NFSD is not set
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 CONFIG_SMB_FS=m
@@ -1372,9 +1429,6 @@
 # CONFIG_NLS_KOI8_U is not set
 # CONFIG_NLS_UTF8 is not set
 # CONFIG_DLM is not set
-CONFIG_INSTRUMENTATION=y
-# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 
 #
 # Kernel hacking
@@ -1382,14 +1436,53 @@
 # CONFIG_PRINTK_TIME is not set
 CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
 # CONFIG_MAGIC_SYSRQ is not set
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_VERBOSE=y
 CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_HWERR is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
 CONFIG_DEBUG_HUNT_FOR_ZERO=y
 CONFIG_DEBUG_BFIN_HWTRACE_ON=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -1407,10 +1500,95 @@
 #
 # CONFIG_KEYS is not set
 CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_NETWORK is not set
-# CONFIG_SECURITY_CAPABILITIES is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
 # CONFIG_SECURITY_ROOTPLUG is not set
-# CONFIG_CRYPTO is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
 
 #
 # Library routines
@@ -1418,6 +1596,7 @@
 CONFIG_BITREVERSE=y
 CONFIG_CRC_CCITT=m
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index deeb5e4..92afd98 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -1,6 +1,6 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24.7
+# Linux kernel version: 2.6.28-rc2
 #
 # CONFIG_MMU is not set
 # CONFIG_FPU is not set
@@ -8,7 +8,6 @@
 # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
 CONFIG_BLACKFIN=y
 CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
@@ -31,18 +30,16 @@
 # CONFIG_POSIX_MQUEUE is not set
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
 # CONFIG_AUDIT is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -51,26 +48,35 @@
 CONFIG_UID16=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
+CONFIG_COMPAT_BRK=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
 CONFIG_TINY_SHMEM=y
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
@@ -81,6 +87,7 @@
 # CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_LSF is not set
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -94,9 +101,11 @@
 # CONFIG_DEFAULT_CFQ is not set
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
 # CONFIG_PREEMPT_NONE is not set
 CONFIG_PREEMPT_VOLUNTARY=y
 # CONFIG_PREEMPT is not set
+CONFIG_FREEZER=y
 
 #
 # Blackfin Processor Options
@@ -105,6 +114,10 @@
 #
 # Processor and Board Settings
 #
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
 # CONFIG_BF522 is not set
 # CONFIG_BF523 is not set
 # CONFIG_BF524 is not set
@@ -117,24 +130,30 @@
 # CONFIG_BF534 is not set
 # CONFIG_BF536 is not set
 # CONFIG_BF537 is not set
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
 # CONFIG_BF542 is not set
 # CONFIG_BF544 is not set
 # CONFIG_BF547 is not set
 # CONFIG_BF548 is not set
 # CONFIG_BF549 is not set
 # CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=3
+CONFIG_BF_REV_MAX=6
 # CONFIG_BF_REV_0_0 is not set
 # CONFIG_BF_REV_0_1 is not set
 # CONFIG_BF_REV_0_2 is not set
 CONFIG_BF_REV_0_3=y
 # CONFIG_BF_REV_0_4 is not set
 # CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
 # CONFIG_BF_REV_ANY is not set
 # CONFIG_BF_REV_NONE is not set
 CONFIG_BF53x=y
 CONFIG_MEM_MT48LC16M16A2TG_75=y
 CONFIG_BFIN533_EZKIT=y
 # CONFIG_BFIN533_STAMP is not set
+# CONFIG_BLACKSTAMP is not set
 # CONFIG_BFIN533_BLUETECHNIX_CM is not set
 # CONFIG_H8606_HVSISTEMAS is not set
 # CONFIG_BFIN532_IP0X is not set
@@ -187,7 +206,6 @@
 #
 CONFIG_CLKIN_HZ=27000000
 # CONFIG_BFIN_KERNEL_CLOCK is not set
-CONFIG_MAX_MEM_SIZE=512
 CONFIG_MAX_VCO_HZ=750000000
 CONFIG_MIN_VCO_HZ=50000000
 CONFIG_MAX_SCLK_HZ=133333333
@@ -201,6 +219,7 @@
 # CONFIG_HZ_300 is not set
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 # CONFIG_CYCLES_CLOCKSOURCE is not set
@@ -238,6 +257,12 @@
 CONFIG_CACHELINE_ALIGNED_L1=y
 # CONFIG_SYSCALL_TAB_L1 is not set
 # CONFIG_CPLB_SWITCH_TAB_L1 is not set
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
 CONFIG_RAMKERNEL=y
 # CONFIG_ROMKERNEL is not set
 CONFIG_SELECT_MEMORY_MODEL=y
@@ -246,14 +271,13 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_VIRT_TO_BUS=y
 # CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_4M is not set
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
@@ -268,7 +292,6 @@
 # CONFIG_BFIN_ICACHE_LOCK is not set
 # CONFIG_BFIN_WB is not set
 CONFIG_BFIN_WT=y
-CONFIG_L1_MAX_PIECE=16
 # CONFIG_MPU is not set
 
 #
@@ -297,7 +320,6 @@
 #
 # Bus options (PCI, PCMCIA, EISA, MCA, ISA)
 #
-# CONFIG_PCI is not set
 # CONFIG_ARCH_SUPPORTS_MSI is not set
 # CONFIG_PCCARD is not set
 
@@ -308,29 +330,30 @@
 CONFIG_BINFMT_FLAT=y
 CONFIG_BINFMT_ZFLAT=y
 # CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 
 #
 # Power management options
 #
 CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
 # CONFIG_PM_DEBUG is not set
 CONFIG_PM_SLEEP=y
-CONFIG_SUSPEND_UP_POSSIBLE=y
 CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
 CONFIG_PM_BFIN_SLEEP_DEEPER=y
 # CONFIG_PM_BFIN_SLEEP is not set
 # CONFIG_PM_WAKEUP_BY_GPIO is not set
 
 #
+# Possible Suspend Mem / Hibernate Wake-Up Sources
+#
+
+#
 # CPU Frequency scaling
 #
 # CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
@@ -343,6 +366,7 @@
 # CONFIG_XFRM_USER is not set
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
@@ -372,8 +396,6 @@
 CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
 # CONFIG_NETLABEL is not set
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
@@ -382,6 +404,7 @@
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -398,6 +421,7 @@
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
 CONFIG_IRDA=m
 
 #
@@ -430,24 +454,14 @@
 # CONFIG_DONGLE is not set
 
 #
-# Old SIR device drivers
-#
-# CONFIG_IRPORT_SIR is not set
-
-#
-# Old Serial dongle support
-#
-
-#
 # FIR device drivers
 #
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
 # CONFIG_WIRELESS_EXT is not set
 # CONFIG_MAC80211 is not set
 # CONFIG_IEEE80211 is not set
@@ -465,6 +479,8 @@
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 # CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
 # CONFIG_CONNECTOR is not set
 CONFIG_MTD=y
@@ -473,6 +489,7 @@
 CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_REDBOOT_PARTS is not set
 # CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
 
 #
 # User Modules And Translation Layers
@@ -516,6 +533,7 @@
 #
 CONFIG_MTD_COMPLEX_MAPPINGS=y
 # CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_GPIO_ADDR is not set
 # CONFIG_MTD_UCLINUX is not set
 # CONFIG_MTD_PLATRAM is not set
 
@@ -550,11 +568,14 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
@@ -567,7 +588,6 @@
 # CONFIG_ATA is not set
 # CONFIG_MD is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
@@ -580,11 +600,14 @@
 CONFIG_SMC91X=y
 # CONFIG_SMSC911X is not set
 # CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-# CONFIG_B44 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NETDEV_1000=y
 # CONFIG_AX88180 is not set
 CONFIG_NETDEV_10000=y
@@ -594,10 +617,10 @@
 #
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
 # CONFIG_WAN is not set
 # CONFIG_PPP is not set
 # CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
@@ -645,8 +668,11 @@
 # CONFIG_BF5xx_PPI is not set
 CONFIG_BFIN_SPORT=y
 # CONFIG_BFIN_TIMER_LATENCY is not set
+CONFIG_BFIN_DMA_INTERFACE=m
 CONFIG_SIMPLE_GPIO=m
 # CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -675,22 +701,19 @@
 # CONFIG_CAN4LINUX is not set
 # CONFIG_IPMI_HANDLER is not set
 # CONFIG_HW_RANDOM is not set
-# CONFIG_GEN_RTC is not set
 # CONFIG_R3964 is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
 # CONFIG_I2C is not set
-
-#
-# SPI support
-#
 CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
 CONFIG_SPI_MASTER=y
 
 #
 # SPI Master Controller Drivers
 #
 CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
 # CONFIG_SPI_BITBANG is not set
 
 #
@@ -699,14 +722,18 @@
 # CONFIG_SPI_AT25 is not set
 # CONFIG_SPI_SPIDEV is not set
 # CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
 # CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADCXX is not set
 # CONFIG_SENSORS_F71805F is not set
 # CONFIG_SENSORS_F71882FG is not set
 # CONFIG_SENSORS_IT87 is not set
 # CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_MAX1111 is not set
 # CONFIG_SENSORS_PC87360 is not set
 # CONFIG_SENSORS_PC87427 is not set
 # CONFIG_SENSORS_SMSC47M1 is not set
@@ -715,6 +742,8 @@
 # CONFIG_SENSORS_W83627HF is not set
 # CONFIG_SENSORS_W83627EHF is not set
 # CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
 CONFIG_WATCHDOG=y
 # CONFIG_WATCHDOG_NOWAYOUT is not set
 
@@ -725,21 +754,28 @@
 CONFIG_BFIN_WDT=y
 
 #
-# Sonics Silicon Backplane
-#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
-
-#
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
 
 #
 # Multimedia devices
 #
+
+#
+# Multimedia core support
+#
 # CONFIG_VIDEO_DEV is not set
 # CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
 # CONFIG_DAB is not set
 
 #
@@ -754,18 +790,22 @@
 # Display device support
 #
 # CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Sound
-#
 # CONFIG_SOUND is not set
 CONFIG_HID_SUPPORT=y
 CONFIG_HID=m
 # CONFIG_HID_DEBUG is not set
 # CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
 CONFIG_RTC_LIB=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_HCTOSYS=y
@@ -784,47 +824,51 @@
 #
 # SPI RTC drivers
 #
-# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
 # CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
 
 #
 # Platform RTC drivers
 #
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
-# CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
 # on-CPU RTC drivers
 #
 CONFIG_RTC_DRV_BFIN=y
-
-#
-# Userspace I/O
-#
+# CONFIG_DMADEVICES is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
 #
 # CONFIG_EXT2_FS is not set
 # CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_DNOTIFY is not set
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
 # CONFIG_FUSE_FS is not set
@@ -864,11 +908,11 @@
 # CONFIG_EFS_FS is not set
 CONFIG_YAFFS_FS=m
 CONFIG_YAFFS_YAFFS1=y
+# CONFIG_YAFFS_9BYTE_TAGS is not set
 # CONFIG_YAFFS_DOES_ECC is not set
 CONFIG_YAFFS_YAFFS2=y
 CONFIG_YAFFS_AUTO_YAFFS2=y
 # CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
-CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS=10
 # CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
 # CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
 CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
@@ -885,8 +929,11 @@
 # CONFIG_JFFS2_RUBIN is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
@@ -894,13 +941,12 @@
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
 # CONFIG_NFSD is not set
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 CONFIG_SMB_FS=m
@@ -956,9 +1002,6 @@
 # CONFIG_NLS_KOI8_U is not set
 # CONFIG_NLS_UTF8 is not set
 # CONFIG_DLM is not set
-CONFIG_INSTRUMENTATION=y
-# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 
 #
 # Kernel hacking
@@ -966,14 +1009,53 @@
 # CONFIG_PRINTK_TIME is not set
 CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
 # CONFIG_MAGIC_SYSRQ is not set
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_VERBOSE=y
 CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_HWERR is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
 CONFIG_DEBUG_HUNT_FOR_ZERO=y
 CONFIG_DEBUG_BFIN_HWTRACE_ON=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -991,9 +1073,94 @@
 #
 # CONFIG_KEYS is not set
 CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_NETWORK is not set
-# CONFIG_SECURITY_CAPABILITIES is not set
-# CONFIG_CRYPTO is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
 
 #
 # Library routines
@@ -1001,6 +1168,7 @@
 CONFIG_BITREVERSE=y
 CONFIG_CRC_CCITT=m
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index c23267e..49eabb4 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -1,6 +1,6 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24.7
+# Linux kernel version: 2.6.28-rc2
 #
 # CONFIG_MMU is not set
 # CONFIG_FPU is not set
@@ -8,7 +8,6 @@
 # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
 CONFIG_BLACKFIN=y
 CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
@@ -31,18 +30,16 @@
 # CONFIG_POSIX_MQUEUE is not set
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
 # CONFIG_AUDIT is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -51,26 +48,35 @@
 CONFIG_UID16=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
+CONFIG_COMPAT_BRK=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
 CONFIG_TINY_SHMEM=y
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
@@ -81,6 +87,7 @@
 # CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_LSF is not set
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -94,9 +101,11 @@
 # CONFIG_DEFAULT_CFQ is not set
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
 # CONFIG_PREEMPT_NONE is not set
 CONFIG_PREEMPT_VOLUNTARY=y
 # CONFIG_PREEMPT is not set
+CONFIG_FREEZER=y
 
 #
 # Blackfin Processor Options
@@ -105,6 +114,10 @@
 #
 # Processor and Board Settings
 #
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
 # CONFIG_BF522 is not set
 # CONFIG_BF523 is not set
 # CONFIG_BF524 is not set
@@ -117,24 +130,30 @@
 # CONFIG_BF534 is not set
 # CONFIG_BF536 is not set
 # CONFIG_BF537 is not set
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
 # CONFIG_BF542 is not set
 # CONFIG_BF544 is not set
 # CONFIG_BF547 is not set
 # CONFIG_BF548 is not set
 # CONFIG_BF549 is not set
 # CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=3
+CONFIG_BF_REV_MAX=6
 # CONFIG_BF_REV_0_0 is not set
 # CONFIG_BF_REV_0_1 is not set
 # CONFIG_BF_REV_0_2 is not set
 CONFIG_BF_REV_0_3=y
 # CONFIG_BF_REV_0_4 is not set
 # CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
 # CONFIG_BF_REV_ANY is not set
 # CONFIG_BF_REV_NONE is not set
 CONFIG_BF53x=y
 CONFIG_MEM_MT48LC64M4A2FB_7E=y
 # CONFIG_BFIN533_EZKIT is not set
 CONFIG_BFIN533_STAMP=y
+# CONFIG_BLACKSTAMP is not set
 # CONFIG_BFIN533_BLUETECHNIX_CM is not set
 # CONFIG_H8606_HVSISTEMAS is not set
 # CONFIG_BFIN532_IP0X is not set
@@ -187,7 +206,6 @@
 #
 CONFIG_CLKIN_HZ=11059200
 # CONFIG_BFIN_KERNEL_CLOCK is not set
-CONFIG_MAX_MEM_SIZE=512
 CONFIG_MAX_VCO_HZ=750000000
 CONFIG_MIN_VCO_HZ=50000000
 CONFIG_MAX_SCLK_HZ=133333333
@@ -201,6 +219,7 @@
 # CONFIG_HZ_300 is not set
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 # CONFIG_CYCLES_CLOCKSOURCE is not set
@@ -238,6 +257,12 @@
 CONFIG_CACHELINE_ALIGNED_L1=y
 # CONFIG_SYSCALL_TAB_L1 is not set
 # CONFIG_CPLB_SWITCH_TAB_L1 is not set
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
 CONFIG_RAMKERNEL=y
 # CONFIG_ROMKERNEL is not set
 CONFIG_SELECT_MEMORY_MODEL=y
@@ -246,14 +271,13 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_VIRT_TO_BUS=y
 # CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_4M is not set
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
@@ -268,7 +292,6 @@
 # CONFIG_BFIN_ICACHE_LOCK is not set
 # CONFIG_BFIN_WB is not set
 CONFIG_BFIN_WT=y
-CONFIG_L1_MAX_PIECE=16
 # CONFIG_MPU is not set
 
 #
@@ -297,7 +320,6 @@
 #
 # Bus options (PCI, PCMCIA, EISA, MCA, ISA)
 #
-# CONFIG_PCI is not set
 # CONFIG_ARCH_SUPPORTS_MSI is not set
 # CONFIG_PCCARD is not set
 
@@ -308,29 +330,30 @@
 CONFIG_BINFMT_FLAT=y
 CONFIG_BINFMT_ZFLAT=y
 # CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 
 #
 # Power management options
 #
 CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
 # CONFIG_PM_DEBUG is not set
 CONFIG_PM_SLEEP=y
-CONFIG_SUSPEND_UP_POSSIBLE=y
 CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
 CONFIG_PM_BFIN_SLEEP_DEEPER=y
 # CONFIG_PM_BFIN_SLEEP is not set
 # CONFIG_PM_WAKEUP_BY_GPIO is not set
 
 #
+# Possible Suspend Mem / Hibernate Wake-Up Sources
+#
+
+#
 # CPU Frequency scaling
 #
 # CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
@@ -343,6 +366,7 @@
 # CONFIG_XFRM_USER is not set
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
@@ -372,8 +396,6 @@
 CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
 # CONFIG_NETLABEL is not set
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
@@ -382,6 +404,7 @@
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -398,6 +421,7 @@
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
 CONFIG_IRDA=m
 
 #
@@ -432,24 +456,14 @@
 # CONFIG_DONGLE is not set
 
 #
-# Old SIR device drivers
-#
-# CONFIG_IRPORT_SIR is not set
-
-#
-# Old Serial dongle support
-#
-
-#
 # FIR device drivers
 #
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
 # CONFIG_WIRELESS_EXT is not set
 # CONFIG_MAC80211 is not set
 # CONFIG_IEEE80211 is not set
@@ -467,6 +481,8 @@
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 # CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
 # CONFIG_CONNECTOR is not set
 CONFIG_MTD=y
@@ -475,6 +491,7 @@
 CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_REDBOOT_PARTS is not set
 CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
 
 #
 # User Modules And Translation Layers
@@ -520,6 +537,7 @@
 CONFIG_MTD_COMPLEX_MAPPINGS=y
 # CONFIG_MTD_PHYSMAP is not set
 CONFIG_MTD_BFIN_ASYNC=m
+# CONFIG_MTD_GPIO_ADDR is not set
 # CONFIG_MTD_UCLINUX is not set
 # CONFIG_MTD_PLATRAM is not set
 
@@ -554,11 +572,14 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
@@ -571,7 +592,6 @@
 # CONFIG_ATA is not set
 # CONFIG_MD is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
@@ -584,11 +604,14 @@
 CONFIG_SMC91X=y
 # CONFIG_SMSC911X is not set
 # CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-# CONFIG_B44 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NETDEV_1000=y
 # CONFIG_AX88180 is not set
 CONFIG_NETDEV_10000=y
@@ -598,10 +621,10 @@
 #
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
 # CONFIG_WAN is not set
 # CONFIG_PPP is not set
 # CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
@@ -633,7 +656,7 @@
 # CONFIG_INPUT_TOUCHSCREEN is not set
 CONFIG_INPUT_MISC=y
 # CONFIG_INPUT_UINPUT is not set
-CONFIG_TWI_KEYPAD=m
+CONFIG_CONFIG_INPUT_PCF8574=m
 
 #
 # Hardware I/O ports
@@ -652,8 +675,11 @@
 CONFIG_BFIN_SPORT=y
 # CONFIG_BFIN_TIMER_LATENCY is not set
 CONFIG_TWI_LCD=m
+CONFIG_BFIN_DMA_INTERFACE=m
 CONFIG_SIMPLE_GPIO=m
 # CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -682,41 +708,46 @@
 # CONFIG_CAN4LINUX is not set
 # CONFIG_IPMI_HANDLER is not set
 # CONFIG_HW_RANDOM is not set
-# CONFIG_GEN_RTC is not set
 # CONFIG_R3964 is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
 CONFIG_I2C=m
 CONFIG_I2C_BOARDINFO=y
 CONFIG_I2C_CHARDEV=m
-
-#
-# I2C Algorithms
-#
-CONFIG_I2C_ALGOBIT=m
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
+CONFIG_I2C_HELPER_AUTO=y
 
 #
 # I2C Hardware Bus support
 #
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
 # CONFIG_I2C_GPIO is not set
 # CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
 # CONFIG_I2C_STUB is not set
 
 #
 # Miscellaneous I2C Chip support
 #
-# CONFIG_SENSORS_DS1337 is not set
-# CONFIG_SENSORS_DS1374 is not set
 # CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
 # CONFIG_SENSORS_AD5252 is not set
 # CONFIG_SENSORS_EEPROM is not set
 # CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_SENSORS_PCF8575 is not set
+# CONFIG_PCF8575 is not set
 # CONFIG_SENSORS_PCA9539 is not set
 # CONFIG_SENSORS_PCF8591 is not set
 # CONFIG_SENSORS_MAX6875 is not set
@@ -725,17 +756,15 @@
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# SPI support
-#
 CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
 CONFIG_SPI_MASTER=y
 
 #
 # SPI Master Controller Drivers
 #
 CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
 # CONFIG_SPI_BITBANG is not set
 
 #
@@ -744,11 +773,15 @@
 # CONFIG_SPI_AT25 is not set
 # CONFIG_SPI_SPIDEV is not set
 # CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
 # CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
 # CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
 # CONFIG_SENSORS_ADM1021 is not set
 # CONFIG_SENSORS_ADM1025 is not set
 # CONFIG_SENSORS_ADM1026 is not set
@@ -756,6 +789,7 @@
 # CONFIG_SENSORS_ADM1031 is not set
 # CONFIG_SENSORS_ADM9240 is not set
 # CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
 # CONFIG_SENSORS_ATXP1 is not set
 # CONFIG_SENSORS_DS1621 is not set
 # CONFIG_SENSORS_F71805F is not set
@@ -776,6 +810,7 @@
 # CONFIG_SENSORS_LM90 is not set
 # CONFIG_SENSORS_LM92 is not set
 # CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_MAX1111 is not set
 # CONFIG_SENSORS_MAX1619 is not set
 # CONFIG_SENSORS_MAX6650 is not set
 # CONFIG_SENSORS_PC87360 is not set
@@ -784,6 +819,7 @@
 # CONFIG_SENSORS_SMSC47M1 is not set
 # CONFIG_SENSORS_SMSC47M192 is not set
 # CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
 # CONFIG_SENSORS_THMC50 is not set
 # CONFIG_SENSORS_VT1211 is not set
 # CONFIG_SENSORS_W83781D is not set
@@ -791,9 +827,12 @@
 # CONFIG_SENSORS_W83792D is not set
 # CONFIG_SENSORS_W83793 is not set
 # CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
 # CONFIG_SENSORS_W83627HF is not set
 # CONFIG_SENSORS_W83627EHF is not set
 # CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
 CONFIG_WATCHDOG=y
 # CONFIG_WATCHDOG_NOWAYOUT is not set
 
@@ -804,21 +843,29 @@
 CONFIG_BFIN_WDT=y
 
 #
-# Sonics Silicon Backplane
-#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
-
-#
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
 
 #
 # Multimedia devices
 #
+
+#
+# Multimedia core support
+#
 # CONFIG_VIDEO_DEV is not set
 # CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
 # CONFIG_DAB is not set
 
 #
@@ -829,6 +876,7 @@
 CONFIG_FB=m
 CONFIG_FIRMWARE_EDID=y
 # CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
 CONFIG_FB_CFB_FILLRECT=m
 CONFIG_FB_CFB_COPYAREA=m
 CONFIG_FB_CFB_IMAGEBLIT=m
@@ -836,8 +884,8 @@
 # CONFIG_FB_SYS_FILLRECT is not set
 # CONFIG_FB_SYS_COPYAREA is not set
 # CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
 # CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
 # CONFIG_FB_SVGALIB is not set
 # CONFIG_FB_MACMODES is not set
 # CONFIG_FB_BACKLIGHT is not set
@@ -848,6 +896,7 @@
 # Frame buffer hardware drivers
 #
 # CONFIG_FB_BFIN_T350MCQB is not set
+# CONFIG_FB_BFIN_LQ035Q1 is not set
 CONFIG_FB_BFIN_7393=m
 CONFIG_NTSC=y
 # CONFIG_PAL is not set
@@ -859,6 +908,7 @@
 # CONFIG_ADV7393_2XMEM is not set
 # CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
@@ -866,15 +916,8 @@
 #
 # CONFIG_DISPLAY_SUPPORT is not set
 # CONFIG_LOGO is not set
-
-#
-# Sound
-#
 CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
 CONFIG_SND=m
 CONFIG_SND_TIMER=m
 CONFIG_SND_PCM=m
@@ -888,18 +931,12 @@
 CONFIG_SND_VERBOSE_PROCFS=y
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_DRIVERS=y
 # CONFIG_SND_DUMMY is not set
 # CONFIG_SND_MTPAV is not set
 # CONFIG_SND_SERIAL_U16550 is not set
 # CONFIG_SND_MPU401 is not set
-
-#
-# SPI devices
-#
+CONFIG_SND_SPI=y
 
 #
 # ALSA Blackfin devices
@@ -911,46 +948,46 @@
 # CONFIG_SND_BLACKFIN_AD1836_5P1 is not set
 CONFIG_SND_BLACKFIN_SPORT=0
 CONFIG_SND_BLACKFIN_SPI_PFBIT=4
-CONFIG_SND_BFIN_AD73311=m
 CONFIG_SND_BFIN_SPORT=0
-CONFIG_SND_BFIN_AD73311_SE=4
 CONFIG_SND_BFIN_AD73322=m
 CONFIG_SND_BFIN_AD73322_SPORT0_SE=10
 CONFIG_SND_BFIN_AD73322_SPORT1_SE=14
 CONFIG_SND_BFIN_AD73322_RESET=12
-
-#
-# System on Chip audio support
-#
-CONFIG_SND_SOC_AC97_BUS=y
 CONFIG_SND_SOC=m
-CONFIG_SND_BF5XX_SOC=m
-CONFIG_SND_MMAP_SUPPORT=y
-CONFIG_SND_BF5XX_SOC_AC97=m
-# CONFIG_SND_BF5XX_SOC_WM8750 is not set
-# CONFIG_SND_BF5XX_SOC_WM8731 is not set
+CONFIG_SND_SOC_AC97_BUS=y
+CONFIG_SND_BF5XX_I2S=m
 # CONFIG_SND_BF5XX_SOC_SSM2602 is not set
-CONFIG_SND_BF5XX_SOC_BF5xx=m
+CONFIG_SND_BF5XX_SOC_AD73311=m
+CONFIG_SND_BFIN_AD73311_SE=4
+CONFIG_SND_BF5XX_AC97=m
+CONFIG_SND_BF5XX_MMAP_SUPPORT=y
+# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set
+CONFIG_SND_BF5XX_SOC_SPORT=m
+CONFIG_SND_BF5XX_SOC_I2S=m
+CONFIG_SND_BF5XX_SOC_AC97=m
+CONFIG_SND_BF5XX_SOC_AD1980=m
 CONFIG_SND_BF5XX_SPORT_NUM=0
 # CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set
-
-#
-# SoC Audio support for SuperH
-#
+# CONFIG_SND_SOC_ALL_CODECS is not set
 CONFIG_SND_SOC_AD1980=m
-
-#
-# Open Sound System
-#
+CONFIG_SND_SOC_AD73311=m
 # CONFIG_SOUND_PRIME is not set
 CONFIG_AC97_BUS=m
 CONFIG_HID_SUPPORT=y
 CONFIG_HID=y
 # CONFIG_HID_DEBUG is not set
 # CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
 CONFIG_RTC_LIB=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_HCTOSYS=y
@@ -979,51 +1016,57 @@
 # CONFIG_RTC_DRV_PCF8563 is not set
 # CONFIG_RTC_DRV_PCF8583 is not set
 # CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
 
 #
 # SPI RTC drivers
 #
-# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
 # CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
 
 #
 # Platform RTC drivers
 #
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
-# CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
 # on-CPU RTC drivers
 #
 CONFIG_RTC_DRV_BFIN=y
-
-#
-# Userspace I/O
-#
+# CONFIG_DMADEVICES is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
 #
 # CONFIG_EXT2_FS is not set
 # CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_DNOTIFY is not set
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
 # CONFIG_FUSE_FS is not set
@@ -1063,11 +1106,11 @@
 # CONFIG_EFS_FS is not set
 CONFIG_YAFFS_FS=m
 CONFIG_YAFFS_YAFFS1=y
+# CONFIG_YAFFS_9BYTE_TAGS is not set
 # CONFIG_YAFFS_DOES_ECC is not set
 CONFIG_YAFFS_YAFFS2=y
 CONFIG_YAFFS_AUTO_YAFFS2=y
 # CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
-CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS=10
 # CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
 # CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
 CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
@@ -1084,8 +1127,11 @@
 # CONFIG_JFFS2_RUBIN is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
@@ -1093,13 +1139,12 @@
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
 # CONFIG_NFSD is not set
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 CONFIG_SMB_FS=m
@@ -1155,9 +1200,6 @@
 # CONFIG_NLS_KOI8_U is not set
 # CONFIG_NLS_UTF8 is not set
 # CONFIG_DLM is not set
-CONFIG_INSTRUMENTATION=y
-# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 
 #
 # Kernel hacking
@@ -1165,14 +1207,53 @@
 # CONFIG_PRINTK_TIME is not set
 CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
 # CONFIG_MAGIC_SYSRQ is not set
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_VERBOSE=y
 CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_HWERR is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
 CONFIG_DEBUG_HUNT_FOR_ZERO=y
 CONFIG_DEBUG_BFIN_HWTRACE_ON=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -1190,9 +1271,94 @@
 #
 # CONFIG_KEYS is not set
 CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_NETWORK is not set
-# CONFIG_SECURITY_CAPABILITIES is not set
-# CONFIG_CRYPTO is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
 
 #
 # Library routines
@@ -1200,6 +1366,7 @@
 CONFIG_BITREVERSE=y
 CONFIG_CRC_CCITT=m
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index 63a0f85..332142f 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -1,6 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24.7
+# Linux kernel version: 2.6.28-rc2
+# Tue Dec 30 17:24:37 2008
 #
 # CONFIG_MMU is not set
 # CONFIG_FPU is not set
@@ -8,7 +9,6 @@
 # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
 CONFIG_BLACKFIN=y
 CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
@@ -31,18 +31,16 @@
 # CONFIG_POSIX_MQUEUE is not set
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
 # CONFIG_AUDIT is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -51,26 +49,35 @@
 CONFIG_UID16=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
+CONFIG_COMPAT_BRK=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
 CONFIG_TINY_SHMEM=y
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
@@ -81,6 +88,7 @@
 # CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_LSF is not set
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -94,9 +102,11 @@
 # CONFIG_DEFAULT_CFQ is not set
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
 # CONFIG_PREEMPT_NONE is not set
 CONFIG_PREEMPT_VOLUNTARY=y
 # CONFIG_PREEMPT is not set
+CONFIG_FREEZER=y
 
 #
 # Blackfin Processor Options
@@ -105,6 +115,10 @@
 #
 # Processor and Board Settings
 #
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
 # CONFIG_BF522 is not set
 # CONFIG_BF523 is not set
 # CONFIG_BF524 is not set
@@ -117,18 +131,23 @@
 # CONFIG_BF534 is not set
 # CONFIG_BF536 is not set
 CONFIG_BF537=y
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
 # CONFIG_BF542 is not set
 # CONFIG_BF544 is not set
 # CONFIG_BF547 is not set
 # CONFIG_BF548 is not set
 # CONFIG_BF549 is not set
 # CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=2
+CONFIG_BF_REV_MAX=3
 # CONFIG_BF_REV_0_0 is not set
 # CONFIG_BF_REV_0_1 is not set
 CONFIG_BF_REV_0_2=y
 # CONFIG_BF_REV_0_3 is not set
 # CONFIG_BF_REV_0_4 is not set
 # CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
 # CONFIG_BF_REV_ANY is not set
 # CONFIG_BF_REV_NONE is not set
 CONFIG_BF53x=y
@@ -141,27 +160,28 @@
 CONFIG_IRQ_SPORT1_RX=9
 CONFIG_IRQ_SPORT1_TX=9
 CONFIG_IRQ_TWI=10
-CONFIG_IRQ_SPI=10
 CONFIG_IRQ_UART0_RX=10
 CONFIG_IRQ_UART0_TX=10
 CONFIG_IRQ_UART1_RX=10
 CONFIG_IRQ_UART1_TX=10
 CONFIG_IRQ_MAC_RX=11
 CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=8
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
 CONFIG_IRQ_PORTG_INTB=12
 CONFIG_IRQ_MEM_DMA0=13
 CONFIG_IRQ_MEM_DMA1=13
 CONFIG_IRQ_WATCH=13
+CONFIG_IRQ_SPI=10
 CONFIG_BFIN537_STAMP=y
 # CONFIG_BFIN537_BLUETECHNIX_CM is not set
+# CONFIG_BFIN537_BLUETECHNIX_TCM is not set
 # CONFIG_PNAV10 is not set
 # CONFIG_CAMSIG_MINOTAUR is not set
 # CONFIG_GENERIC_BF537_BOARD is not set
@@ -194,7 +214,6 @@
 #
 CONFIG_CLKIN_HZ=25000000
 # CONFIG_BFIN_KERNEL_CLOCK is not set
-CONFIG_MAX_MEM_SIZE=512
 CONFIG_MAX_VCO_HZ=600000000
 CONFIG_MIN_VCO_HZ=50000000
 CONFIG_MAX_SCLK_HZ=133333333
@@ -208,6 +227,7 @@
 # CONFIG_HZ_300 is not set
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 # CONFIG_CYCLES_CLOCKSOURCE is not set
@@ -245,6 +265,12 @@
 CONFIG_CACHELINE_ALIGNED_L1=y
 # CONFIG_SYSCALL_TAB_L1 is not set
 # CONFIG_CPLB_SWITCH_TAB_L1 is not set
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
 CONFIG_RAMKERNEL=y
 # CONFIG_ROMKERNEL is not set
 CONFIG_SELECT_MEMORY_MODEL=y
@@ -253,14 +279,13 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_VIRT_TO_BUS=y
-# CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
+CONFIG_BFIN_GPTIMERS=m
 # CONFIG_DMA_UNCACHED_4M is not set
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
@@ -275,7 +300,6 @@
 # CONFIG_BFIN_ICACHE_LOCK is not set
 # CONFIG_BFIN_WB is not set
 CONFIG_BFIN_WT=y
-CONFIG_L1_MAX_PIECE=16
 # CONFIG_MPU is not set
 
 #
@@ -304,7 +328,6 @@
 #
 # Bus options (PCI, PCMCIA, EISA, MCA, ISA)
 #
-# CONFIG_PCI is not set
 # CONFIG_ARCH_SUPPORTS_MSI is not set
 # CONFIG_PCCARD is not set
 
@@ -315,29 +338,31 @@
 CONFIG_BINFMT_FLAT=y
 CONFIG_BINFMT_ZFLAT=y
 # CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 
 #
 # Power management options
 #
 CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
 # CONFIG_PM_DEBUG is not set
 CONFIG_PM_SLEEP=y
-CONFIG_SUSPEND_UP_POSSIBLE=y
 CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
 CONFIG_PM_BFIN_SLEEP_DEEPER=y
 # CONFIG_PM_BFIN_SLEEP is not set
 # CONFIG_PM_WAKEUP_BY_GPIO is not set
 
 #
+# Possible Suspend Mem / Hibernate Wake-Up Sources
+#
+# CONFIG_PM_BFIN_WAKE_PH6 is not set
+
+#
 # CPU Frequency scaling
 #
 # CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
@@ -350,6 +375,7 @@
 # CONFIG_XFRM_USER is not set
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
@@ -379,8 +405,6 @@
 CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
 # CONFIG_NETLABEL is not set
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
@@ -389,6 +413,7 @@
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -405,6 +430,7 @@
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
 CONFIG_IRDA=m
 
 #
@@ -440,24 +466,14 @@
 # CONFIG_DONGLE is not set
 
 #
-# Old SIR device drivers
-#
-# CONFIG_IRPORT_SIR is not set
-
-#
-# Old Serial dongle support
-#
-
-#
 # FIR device drivers
 #
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
 # CONFIG_WIRELESS_EXT is not set
 # CONFIG_MAC80211 is not set
 # CONFIG_IEEE80211 is not set
@@ -475,6 +491,8 @@
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 # CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
 # CONFIG_CONNECTOR is not set
 CONFIG_MTD=y
@@ -483,6 +501,7 @@
 CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_REDBOOT_PARTS is not set
 CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
 
 #
 # User Modules And Translation Layers
@@ -553,15 +572,11 @@
 # CONFIG_MTD_NAND_VERIFY_WRITE is not set
 # CONFIG_MTD_NAND_ECC_SMC is not set
 # CONFIG_MTD_NAND_MUSEUM_IDS is not set
-CONFIG_MTD_NAND_BFIN=m
-CONFIG_BFIN_NAND_BASE=0x20212000
-CONFIG_BFIN_NAND_CLE=2
-CONFIG_BFIN_NAND_ALE=1
-CONFIG_BFIN_NAND_READY=3
+# CONFIG_MTD_NAND_BFIN is not set
 CONFIG_MTD_NAND_IDS=m
 # CONFIG_MTD_NAND_DISKONCHIP is not set
 # CONFIG_MTD_NAND_NANDSIM is not set
-# CONFIG_MTD_NAND_PLATFORM is not set
+CONFIG_MTD_NAND_PLATFORM=m
 # CONFIG_MTD_ONENAND is not set
 
 #
@@ -576,11 +591,14 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
@@ -593,7 +611,6 @@
 # CONFIG_ATA is not set
 # CONFIG_MD is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
@@ -614,6 +631,7 @@
 CONFIG_SMSC_PHY=y
 # CONFIG_BROADCOM_PHY is not set
 # CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
 # CONFIG_FIXED_PHY is not set
 # CONFIG_MDIO_BITBANG is not set
 CONFIG_NET_ETHERNET=y
@@ -626,11 +644,14 @@
 # CONFIG_SMC91X is not set
 # CONFIG_SMSC911X is not set
 # CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-# CONFIG_B44 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NETDEV_1000=y
 # CONFIG_AX88180 is not set
 CONFIG_NETDEV_10000=y
@@ -640,10 +661,10 @@
 #
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
 # CONFIG_WAN is not set
 # CONFIG_PPP is not set
 # CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
@@ -675,12 +696,15 @@
 # CONFIG_INPUT_TOUCHSCREEN is not set
 CONFIG_INPUT_MISC=y
 # CONFIG_INPUT_UINPUT is not set
-CONFIG_TWI_KEYPAD=m
+CONFIG_CONFIG_INPUT_PCF8574=m
 
 #
 # Hardware I/O ports
 #
-# CONFIG_SERIO is not set
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
 # CONFIG_GAMEPORT is not set
 
 #
@@ -691,11 +715,14 @@
 # CONFIG_BF5xx_PPIFCD is not set
 # CONFIG_BFIN_SIMPLE_TIMER is not set
 # CONFIG_BF5xx_PPI is not set
-CONFIG_BFIN_SPORT=y
+CONFIG_BFIN_SPORT=m
 # CONFIG_BFIN_TIMER_LATENCY is not set
 CONFIG_TWI_LCD=m
+CONFIG_BFIN_DMA_INTERFACE=m
 CONFIG_SIMPLE_GPIO=m
 # CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -727,48 +754,51 @@
 #
 # linux embedded drivers
 #
-# CONFIG_CAN_MCF5282 is not set
-# CONFIG_CAN_UNCTWINCAN is not set
 CONFIG_CAN_BLACKFIN=m
 # CONFIG_IPMI_HANDLER is not set
 # CONFIG_HW_RANDOM is not set
-# CONFIG_GEN_RTC is not set
 # CONFIG_R3964 is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
 CONFIG_I2C=m
 CONFIG_I2C_BOARDINFO=y
 CONFIG_I2C_CHARDEV=m
-
-#
-# I2C Algorithms
-#
-# CONFIG_I2C_ALGOBIT is not set
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
+CONFIG_I2C_HELPER_AUTO=y
 
 #
 # I2C Hardware Bus support
 #
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
 CONFIG_I2C_BLACKFIN_TWI=m
-CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 # CONFIG_I2C_GPIO is not set
 # CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
 # CONFIG_I2C_STUB is not set
 
 #
 # Miscellaneous I2C Chip support
 #
-# CONFIG_SENSORS_DS1337 is not set
-# CONFIG_SENSORS_DS1374 is not set
 # CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
 CONFIG_SENSORS_AD5252=m
 # CONFIG_SENSORS_EEPROM is not set
 # CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_SENSORS_PCF8575 is not set
+# CONFIG_PCF8575 is not set
 # CONFIG_SENSORS_PCA9539 is not set
 # CONFIG_SENSORS_PCF8591 is not set
 # CONFIG_SENSORS_MAX6875 is not set
@@ -777,17 +807,15 @@
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# SPI support
-#
 CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
 CONFIG_SPI_MASTER=y
 
 #
 # SPI Master Controller Drivers
 #
 CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
 # CONFIG_SPI_BITBANG is not set
 
 #
@@ -796,11 +824,15 @@
 # CONFIG_SPI_AT25 is not set
 # CONFIG_SPI_SPIDEV is not set
 # CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
 # CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
 # CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
 # CONFIG_SENSORS_ADM1021 is not set
 # CONFIG_SENSORS_ADM1025 is not set
 # CONFIG_SENSORS_ADM1026 is not set
@@ -808,6 +840,7 @@
 # CONFIG_SENSORS_ADM1031 is not set
 # CONFIG_SENSORS_ADM9240 is not set
 # CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
 # CONFIG_SENSORS_ATXP1 is not set
 # CONFIG_SENSORS_DS1621 is not set
 # CONFIG_SENSORS_F71805F is not set
@@ -828,6 +861,7 @@
 # CONFIG_SENSORS_LM90 is not set
 # CONFIG_SENSORS_LM92 is not set
 # CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_MAX1111 is not set
 # CONFIG_SENSORS_MAX1619 is not set
 # CONFIG_SENSORS_MAX6650 is not set
 # CONFIG_SENSORS_PC87360 is not set
@@ -836,6 +870,7 @@
 # CONFIG_SENSORS_SMSC47M1 is not set
 # CONFIG_SENSORS_SMSC47M192 is not set
 # CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
 # CONFIG_SENSORS_THMC50 is not set
 # CONFIG_SENSORS_VT1211 is not set
 # CONFIG_SENSORS_W83781D is not set
@@ -843,9 +878,12 @@
 # CONFIG_SENSORS_W83792D is not set
 # CONFIG_SENSORS_W83793 is not set
 # CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
 # CONFIG_SENSORS_W83627HF is not set
 # CONFIG_SENSORS_W83627EHF is not set
 # CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
 CONFIG_WATCHDOG=y
 # CONFIG_WATCHDOG_NOWAYOUT is not set
 
@@ -856,21 +894,29 @@
 CONFIG_BFIN_WDT=y
 
 #
-# Sonics Silicon Backplane
-#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
-
-#
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
 
 #
 # Multimedia devices
 #
+
+#
+# Multimedia core support
+#
 # CONFIG_VIDEO_DEV is not set
 # CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
 # CONFIG_DAB is not set
 
 #
@@ -881,6 +927,7 @@
 CONFIG_FB=m
 CONFIG_FIRMWARE_EDID=y
 # CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
 CONFIG_FB_CFB_FILLRECT=m
 CONFIG_FB_CFB_COPYAREA=m
 CONFIG_FB_CFB_IMAGEBLIT=m
@@ -888,8 +935,8 @@
 # CONFIG_FB_SYS_FILLRECT is not set
 # CONFIG_FB_SYS_COPYAREA is not set
 # CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
 # CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
 # CONFIG_FB_SVGALIB is not set
 # CONFIG_FB_MACMODES is not set
 # CONFIG_FB_BACKLIGHT is not set
@@ -899,8 +946,12 @@
 #
 # Frame buffer hardware drivers
 #
-# CONFIG_FB_HITACHI_TX09 is not set
 # CONFIG_FB_BFIN_T350MCQB is not set
+# CONFIG_FB_BFIN_LQ035Q1 is not set
+CONFIG_FB_BF537_LQ035=m
+CONFIG_LQ035_SLAVE_ADDR=0x58
+# CONFIG_FB_BFIN_LANDSCAPE is not set
+# CONFIG_FB_BFIN_BGR is not set
 CONFIG_FB_BFIN_7393=m
 CONFIG_NTSC=y
 # CONFIG_PAL is not set
@@ -910,15 +961,17 @@
 # CONFIG_PAL_YCBCR is not set
 CONFIG_ADV7393_1XMEM=y
 # CONFIG_ADV7393_2XMEM is not set
-CONFIG_FB_BF537_LQ035=m
-CONFIG_LQ035_SLAVE_ADDR=0x58
-# CONFIG_FB_BFIN_LANDSCAPE is not set
-# CONFIG_FB_BFIN_BGR is not set
+# CONFIG_FB_HITACHI_TX09 is not set
 # CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_LCD_CLASS_DEVICE=m
 # CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+# CONFIG_LCD_PLATFORM is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=m
 CONFIG_BACKLIGHT_CORGI=m
 
@@ -927,15 +980,8 @@
 #
 # CONFIG_DISPLAY_SUPPORT is not set
 # CONFIG_LOGO is not set
-
-#
-# Sound
-#
 CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
 CONFIG_SND=m
 CONFIG_SND_TIMER=m
 CONFIG_SND_PCM=m
@@ -949,18 +995,12 @@
 CONFIG_SND_VERBOSE_PROCFS=y
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_DRIVERS=y
 # CONFIG_SND_DUMMY is not set
 # CONFIG_SND_MTPAV is not set
 # CONFIG_SND_SERIAL_U16550 is not set
 # CONFIG_SND_MPU401 is not set
-
-#
-# SPI devices
-#
+CONFIG_SND_SPI=y
 
 #
 # ALSA Blackfin devices
@@ -972,51 +1012,46 @@
 # CONFIG_SND_BLACKFIN_AD1836_5P1 is not set
 CONFIG_SND_BLACKFIN_SPORT=0
 CONFIG_SND_BLACKFIN_SPI_PFBIT=4
-CONFIG_SND_BFIN_AD73311=m
 CONFIG_SND_BFIN_SPORT=0
-CONFIG_SND_BFIN_AD73311_SE=4
 CONFIG_SND_BFIN_AD73322=m
 CONFIG_SND_BFIN_AD73322_SPORT0_SE=10
 CONFIG_SND_BFIN_AD73322_SPORT1_SE=14
 CONFIG_SND_BFIN_AD73322_RESET=12
-
-#
-# System on Chip audio support
-#
-CONFIG_SND_SOC_AC97_BUS=y
 CONFIG_SND_SOC=m
-CONFIG_SND_BF5XX_SOC=m
-CONFIG_SND_MMAP_SUPPORT=y
-CONFIG_SND_BF5XX_SOC_AC97=m
-# CONFIG_SND_BF5XX_SOC_WM8750 is not set
-# CONFIG_SND_BF5XX_SOC_WM8731 is not set
+CONFIG_SND_SOC_AC97_BUS=y
+CONFIG_SND_BF5XX_I2S=m
 # CONFIG_SND_BF5XX_SOC_SSM2602 is not set
-CONFIG_SND_BF5XX_SOC_BF5xx=m
+CONFIG_SND_BF5XX_SOC_AD73311=m
+CONFIG_SND_BFIN_AD73311_SE=4
+CONFIG_SND_BF5XX_AC97=m
+CONFIG_SND_BF5XX_MMAP_SUPPORT=y
+# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set
+CONFIG_SND_BF5XX_SOC_SPORT=m
+CONFIG_SND_BF5XX_SOC_I2S=m
+CONFIG_SND_BF5XX_SOC_AC97=m
+CONFIG_SND_BF5XX_SOC_AD1980=m
 CONFIG_SND_BF5XX_SPORT_NUM=0
 # CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set
-
-#
-# SoC Audio support for SuperH
-#
+# CONFIG_SND_SOC_ALL_CODECS is not set
 CONFIG_SND_SOC_AD1980=m
-
-#
-# Open Sound System
-#
+CONFIG_SND_SOC_AD73311=m
 # CONFIG_SOUND_PRIME is not set
 CONFIG_AC97_BUS=m
 CONFIG_HID_SUPPORT=y
 CONFIG_HID=y
 # CONFIG_HID_DEBUG is not set
 # CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
 # CONFIG_USB_SUPPORT is not set
-# CONFIG_NO_DUMMY_DELAY is not set
-# CONFIG_DUMMY_DELAY_BANK0 is not set
-# CONFIG_DUMMY_DELAY_BANK1 is not set
-# CONFIG_DUMMY_DELAY_BANK2 is not set
-# CONFIG_DUMMY_DELAY_BANK3 is not set
 # CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
 CONFIG_RTC_LIB=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_HCTOSYS=y
@@ -1045,51 +1080,57 @@
 # CONFIG_RTC_DRV_PCF8563 is not set
 # CONFIG_RTC_DRV_PCF8583 is not set
 # CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
 
 #
 # SPI RTC drivers
 #
-# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
 # CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
 
 #
 # Platform RTC drivers
 #
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
-# CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
 # on-CPU RTC drivers
 #
 CONFIG_RTC_DRV_BFIN=y
-
-#
-# Userspace I/O
-#
+# CONFIG_DMADEVICES is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
 #
 # CONFIG_EXT2_FS is not set
 # CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_DNOTIFY is not set
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
 # CONFIG_FUSE_FS is not set
@@ -1129,11 +1170,11 @@
 # CONFIG_EFS_FS is not set
 CONFIG_YAFFS_FS=m
 CONFIG_YAFFS_YAFFS1=y
+# CONFIG_YAFFS_9BYTE_TAGS is not set
 # CONFIG_YAFFS_DOES_ECC is not set
 CONFIG_YAFFS_YAFFS2=y
 CONFIG_YAFFS_AUTO_YAFFS2=y
 # CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
-CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS=10
 # CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
 # CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
 CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
@@ -1150,8 +1191,11 @@
 # CONFIG_JFFS2_RUBIN is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
@@ -1159,13 +1203,12 @@
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
 # CONFIG_NFSD is not set
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 CONFIG_SMB_FS=m
@@ -1221,9 +1264,6 @@
 # CONFIG_NLS_KOI8_U is not set
 # CONFIG_NLS_UTF8 is not set
 # CONFIG_DLM is not set
-CONFIG_INSTRUMENTATION=y
-# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 
 #
 # Kernel hacking
@@ -1231,14 +1271,53 @@
 # CONFIG_PRINTK_TIME is not set
 CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
 # CONFIG_MAGIC_SYSRQ is not set
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_VERBOSE=y
 CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_HWERR is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
 CONFIG_DEBUG_HUNT_FOR_ZERO=y
 CONFIG_DEBUG_BFIN_HWTRACE_ON=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -1256,9 +1335,94 @@
 #
 # CONFIG_KEYS is not set
 CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_NETWORK is not set
-# CONFIG_SECURITY_CAPABILITIES is not set
-# CONFIG_CRYPTO is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
 
 #
 # Library routines
@@ -1266,6 +1430,7 @@
 CONFIG_BITREVERSE=y
 CONFIG_CRC_CCITT=m
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
new file mode 100644
index 0000000..ed15934
--- /dev/null
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -0,0 +1,1368 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.28-rc2
+#
+# CONFIG_MMU is not set
+# CONFIG_FPU is not set
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+CONFIG_BLACKFIN=y
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_FORCE_MAX_ZONEORDER=14
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_TINY_SHMEM=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
+
+#
+# Blackfin Processor Options
+#
+
+#
+# Processor and Board Settings
+#
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
+# CONFIG_BF522 is not set
+# CONFIG_BF523 is not set
+# CONFIG_BF524 is not set
+# CONFIG_BF525 is not set
+# CONFIG_BF526 is not set
+# CONFIG_BF527 is not set
+# CONFIG_BF531 is not set
+# CONFIG_BF532 is not set
+# CONFIG_BF533 is not set
+# CONFIG_BF534 is not set
+# CONFIG_BF536 is not set
+# CONFIG_BF537 is not set
+CONFIG_BF538=y
+# CONFIG_BF539 is not set
+# CONFIG_BF542 is not set
+# CONFIG_BF544 is not set
+# CONFIG_BF547 is not set
+# CONFIG_BF548 is not set
+# CONFIG_BF549 is not set
+# CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=4
+CONFIG_BF_REV_MAX=5
+# CONFIG_BF_REV_0_0 is not set
+# CONFIG_BF_REV_0_1 is not set
+# CONFIG_BF_REV_0_2 is not set
+# CONFIG_BF_REV_0_3 is not set
+CONFIG_BF_REV_0_4=y
+# CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
+# CONFIG_BF_REV_ANY is not set
+# CONFIG_BF_REV_NONE is not set
+CONFIG_MEM_MT48LC32M8A2_75=y
+CONFIG_IRQ_PLL_WAKEUP=7
+CONFIG_IRQ_DMA0_ERROR=7
+CONFIG_IRQ_PPI_ERROR=7
+CONFIG_IRQ_SPORT0_ERROR=7
+CONFIG_IRQ_SPORT1_ERROR=7
+CONFIG_IRQ_UART0_ERROR=7
+CONFIG_IRQ_UART1_ERROR=7
+CONFIG_IRQ_RTC=8
+CONFIG_IRQ_PPI=8
+CONFIG_IRQ_SPORT0_RX=9
+CONFIG_IRQ_SPORT0_TX=9
+CONFIG_IRQ_SPORT1_RX=9
+CONFIG_IRQ_SPORT1_TX=9
+CONFIG_IRQ_SPI0=10
+CONFIG_IRQ_UART0_RX=10
+CONFIG_IRQ_UART0_TX=10
+CONFIG_IRQ_UART1_RX=10
+CONFIG_IRQ_UART1_TX=10
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_WATCH=13
+CONFIG_IRQ_PORTF_INTA=12
+CONFIG_IRQ_PORTF_INTB=12
+CONFIG_IRQ_SPI0_ERROR=7
+CONFIG_IRQ_SPI1_ERROR=7
+CONFIG_IRQ_DMA1_ERROR=7
+CONFIG_IRQ_CAN_RX=11
+CONFIG_IRQ_CAN_TX=11
+CONFIG_BFIN538_EZKIT=y
+
+#
+# BF538 Specific Configuration
+#
+
+#
+# Interrupt Priority Assignment
+#
+
+#
+# Priority
+#
+CONFIG_IRQ_MEM0_DMA0=13
+CONFIG_IRQ_MEM0_DMA1=13
+CONFIG_IRQ_SPORT2_ERROR=7
+CONFIG_IRQ_SPORT3_ERROR=7
+CONFIG_IRQ_SPI2_ERROR=7
+CONFIG_IRQ_UART2_ERROR=7
+CONFIG_IRQ_CAN_ERROR=7
+CONFIG_IRQ_SPORT2_RX=9
+CONFIG_IRQ_SPORT2_TX=9
+CONFIG_IRQ_SPORT3_RX=9
+CONFIG_IRQ_SPORT3_TX=9
+CONFIG_IRQ_SPI1=10
+CONFIG_IRQ_SPI2=10
+CONFIG_IRQ_UART2_RX=10
+CONFIG_IRQ_UART2_TX=10
+CONFIG_IRQ_TWI0=11
+CONFIG_IRQ_TWI1=11
+CONFIG_IRQ_MEM1_DMA0=13
+CONFIG_IRQ_MEM1_DMA1=13
+
+#
+# Board customizations
+#
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_BOOT_LOAD=0x1000
+
+#
+# Clock/PLL Setup
+#
+CONFIG_CLKIN_HZ=25000000
+# CONFIG_BFIN_KERNEL_CLOCK is not set
+CONFIG_MAX_VCO_HZ=533333333
+CONFIG_MIN_VCO_HZ=50000000
+CONFIG_MAX_SCLK_HZ=133333333
+CONFIG_MIN_SCLK_HZ=27000000
+
+#
+# Kernel Timer/Scheduler
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_CYCLES_CLOCKSOURCE is not set
+CONFIG_TICK_ONESHOT=y
+# CONFIG_NO_HZ is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+
+#
+# Misc
+#
+CONFIG_BFIN_SCRATCH_REG_RETN=y
+# CONFIG_BFIN_SCRATCH_REG_RETE is not set
+# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
+
+#
+# Blackfin Kernel Optimizations
+#
+
+#
+# Memory Optimizations
+#
+CONFIG_I_ENTRY_L1=y
+CONFIG_EXCPT_IRQ_SYSC_L1=y
+CONFIG_DO_IRQ_L1=y
+CONFIG_CORE_TIMER_IRQ_L1=y
+CONFIG_IDLE_L1=y
+CONFIG_SCHEDULE_L1=y
+CONFIG_ARITHMETIC_OPS_L1=y
+CONFIG_ACCESS_OK_L1=y
+CONFIG_MEMSET_L1=y
+CONFIG_MEMCPY_L1=y
+CONFIG_SYS_BFIN_SPINLOCK_L1=y
+# CONFIG_IP_CHECKSUM_L1 is not set
+CONFIG_CACHELINE_ALIGNED_L1=y
+# CONFIG_SYSCALL_TAB_L1 is not set
+# CONFIG_CPLB_SWITCH_TAB_L1 is not set
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
+CONFIG_RAMKERNEL=y
+# CONFIG_ROMKERNEL is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_VIRT_TO_BUS=y
+CONFIG_BFIN_GPTIMERS=y
+# CONFIG_DMA_UNCACHED_4M is not set
+# CONFIG_DMA_UNCACHED_2M is not set
+CONFIG_DMA_UNCACHED_1M=y
+# CONFIG_DMA_UNCACHED_NONE is not set
+
+#
+# Cache Support
+#
+CONFIG_BFIN_ICACHE=y
+CONFIG_BFIN_DCACHE=y
+# CONFIG_BFIN_DCACHE_BANKA is not set
+# CONFIG_BFIN_ICACHE_LOCK is not set
+# CONFIG_BFIN_WB is not set
+CONFIG_BFIN_WT=y
+# CONFIG_MPU is not set
+
+#
+# Asynchonous Memory Configuration
+#
+
+#
+# EBIU_AMGCTL Global Control
+#
+CONFIG_C_AMCKEN=y
+CONFIG_C_CDPRIO=y
+# CONFIG_C_AMBEN is not set
+# CONFIG_C_AMBEN_B0 is not set
+# CONFIG_C_AMBEN_B0_B1 is not set
+# CONFIG_C_AMBEN_B0_B1_B2 is not set
+CONFIG_C_AMBEN_ALL=y
+
+#
+# EBIU_AMBCTL Control
+#
+CONFIG_BANK_0=0x7BB0
+CONFIG_BANK_1=0x7BB0
+CONFIG_BANK_2=0x7BB0
+CONFIG_BANK_3=0x99B2
+
+#
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF_FDPIC=y
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+# CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_PM_WAKEUP_BY_GPIO is not set
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETLABEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+CONFIG_IRDA=m
+
+#
+# IrDA protocols
+#
+CONFIG_IRLAN=m
+CONFIG_IRCOMM=m
+# CONFIG_IRDA_ULTRA is not set
+
+#
+# IrDA options
+#
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+# CONFIG_IRDA_FAST_RR is not set
+# CONFIG_IRDA_DEBUG is not set
+
+#
+# Infrared-port device drivers
+#
+
+#
+# SIR device drivers
+#
+CONFIG_IRTTY_SIR=m
+CONFIG_BFIN_SIR=m
+CONFIG_SIR_BFIN_DMA=y
+# CONFIG_SIR_BFIN_PIO is not set
+
+#
+# Dongle support
+#
+# CONFIG_DONGLE is not set
+
+#
+# FIR device drivers
+#
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=m
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=m
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=m
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=m
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=m
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=m
+CONFIG_MTD_PHYSMAP_START=0x20000000
+CONFIG_MTD_PHYSMAP_LEN=0x0
+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_UCLINUX is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=m
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_BFIN=m
+CONFIG_BFIN_NAND_BASE=0x20212000
+CONFIG_BFIN_NAND_CLE=2
+CONFIG_BFIN_NAND_ALE=1
+CONFIG_BFIN_NAND_READY=3
+CONFIG_MTD_NAND_IDS=m
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+CONFIG_SMSC_PHY=y
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_SMC91X=y
+# CONFIG_SMSC911X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+CONFIG_TOUCHSCREEN_AD7879_SPI=y
+CONFIG_TOUCHSCREEN_AD7879=m
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_CONFIG_INPUT_PCF8574 is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_AD9960 is not set
+# CONFIG_SPI_ADC_BF533 is not set
+# CONFIG_BF5xx_PPIFCD is not set
+# CONFIG_BFIN_SIMPLE_TIMER is not set
+# CONFIG_BF5xx_PPI is not set
+CONFIG_BFIN_SPORT=y
+# CONFIG_BFIN_TIMER_LATENCY is not set
+# CONFIG_TWI_LCD is not set
+CONFIG_BFIN_DMA_INTERFACE=m
+CONFIG_SIMPLE_GPIO=m
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+CONFIG_SERIAL_BFIN_DMA=y
+# CONFIG_SERIAL_BFIN_PIO is not set
+CONFIG_SERIAL_BFIN_UART0=y
+# CONFIG_BFIN_UART0_CTSRTS is not set
+CONFIG_SERIAL_BFIN_UART1=y
+# CONFIG_BFIN_UART1_CTSRTS is not set
+CONFIG_SERIAL_BFIN_UART2=y
+# CONFIG_BFIN_UART2_CTSRTS is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_BFIN_SPORT is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+
+#
+# CAN, the car bus and industrial fieldbus
+#
+# CONFIG_CAN4LINUX is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+# CONFIG_I2C_CHARDEV is not set
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_BLACKFIN_TWI=y
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
+# CONFIG_SENSORS_AD5252 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
+# CONFIG_SPI_BITBANG is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_BFIN_WDT=y
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=m
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=m
+CONFIG_FB_CFB_COPYAREA=m
+CONFIG_FB_CFB_IMAGEBLIT=m
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_BFIN_T350MCQB is not set
+CONFIG_FB_BFIN_LQ035Q1=m
+# CONFIG_FB_BFIN_7393 is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_LOGO is not set
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_BFIN=y
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_YAFFS_FS=m
+CONFIG_YAFFS_YAFFS1=y
+# CONFIG_YAFFS_9BYTE_TAGS is not set
+# CONFIG_YAFFS_DOES_ECC is not set
+CONFIG_YAFFS_YAFFS2=y
+CONFIG_YAFFS_AUTO_YAFFS2=y
+# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
+# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
+# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
+CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+# CONFIG_SUNRPC_REGISTER_V4 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_VERBOSE=y
+CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_HWERR is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
+CONFIG_DEBUG_HUNT_FOR_ZERO=y
+CONFIG_DEBUG_BFIN_HWTRACE_ON=y
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
+# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set
+# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
+# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
+# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
+CONFIG_EARLY_PRINTK=y
+CONFIG_CPLB_INFO=y
+CONFIG_ACCESS_CHECK=y
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_NETWORK is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index bf63660..d4ed9ce 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -1,6 +1,6 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24.7
+# Linux kernel version: 2.6.28-rc2
 #
 # CONFIG_MMU is not set
 # CONFIG_FPU is not set
@@ -8,7 +8,6 @@
 # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
 CONFIG_BLACKFIN=y
 CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
@@ -31,18 +30,16 @@
 # CONFIG_POSIX_MQUEUE is not set
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
 # CONFIG_AUDIT is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -51,26 +48,35 @@
 CONFIG_UID16=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
+CONFIG_COMPAT_BRK=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
 CONFIG_TINY_SHMEM=y
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
@@ -81,6 +87,7 @@
 # CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_LSF is not set
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -94,9 +101,11 @@
 # CONFIG_DEFAULT_CFQ is not set
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
 # CONFIG_PREEMPT_NONE is not set
 CONFIG_PREEMPT_VOLUNTARY=y
 # CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
 
 #
 # Blackfin Processor Options
@@ -105,6 +114,10 @@
 #
 # Processor and Board Settings
 #
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
 # CONFIG_BF522 is not set
 # CONFIG_BF523 is not set
 # CONFIG_BF524 is not set
@@ -117,18 +130,23 @@
 # CONFIG_BF534 is not set
 # CONFIG_BF536 is not set
 # CONFIG_BF537 is not set
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
 # CONFIG_BF542 is not set
 # CONFIG_BF544 is not set
 # CONFIG_BF547 is not set
 CONFIG_BF548=y
 # CONFIG_BF549 is not set
 # CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=0
+CONFIG_BF_REV_MAX=2
 CONFIG_BF_REV_0_0=y
 # CONFIG_BF_REV_0_1 is not set
 # CONFIG_BF_REV_0_2 is not set
 # CONFIG_BF_REV_0_3 is not set
 # CONFIG_BF_REV_0_4 is not set
 # CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
 # CONFIG_BF_REV_ANY is not set
 # CONFIG_BF_REV_NONE is not set
 CONFIG_BF54x=y
@@ -138,15 +156,12 @@
 CONFIG_IRQ_SPORT0_TX=9
 CONFIG_IRQ_SPORT1_RX=9
 CONFIG_IRQ_SPORT1_TX=9
+CONFIG_IRQ_SPI0=10
 CONFIG_IRQ_UART0_RX=10
 CONFIG_IRQ_UART0_TX=10
 CONFIG_IRQ_UART1_RX=10
 CONFIG_IRQ_UART1_TX=10
 CONFIG_IRQ_CNT=8
-CONFIG_IRQ_USB_INT0=11
-CONFIG_IRQ_USB_INT1=11
-CONFIG_IRQ_USB_INT2=11
-CONFIG_IRQ_USB_DMA=11
 CONFIG_IRQ_TIMER0=11
 CONFIG_IRQ_TIMER1=11
 CONFIG_IRQ_TIMER2=11
@@ -155,9 +170,21 @@
 CONFIG_IRQ_TIMER5=11
 CONFIG_IRQ_TIMER6=11
 CONFIG_IRQ_TIMER7=11
+CONFIG_IRQ_USB_INT0=11
+CONFIG_IRQ_USB_INT1=11
+CONFIG_IRQ_USB_INT2=11
+CONFIG_IRQ_USB_DMA=11
 CONFIG_IRQ_TIMER8=11
 CONFIG_IRQ_TIMER9=11
 CONFIG_IRQ_TIMER10=11
+CONFIG_IRQ_SPORT2_RX=9
+CONFIG_IRQ_SPORT2_TX=9
+CONFIG_IRQ_SPORT3_RX=9
+CONFIG_IRQ_SPORT3_TX=9
+CONFIG_IRQ_SPI1=10
+CONFIG_IRQ_SPI2=10
+CONFIG_IRQ_TWI0=11
+CONFIG_IRQ_TWI1=11
 CONFIG_BFIN548_EZKIT=y
 # CONFIG_BFIN548_BLUETECHNIX_CM is not set
 
@@ -180,7 +207,6 @@
 CONFIG_IRQ_SPI0_ERR=7
 CONFIG_IRQ_UART0_ERR=7
 CONFIG_IRQ_EPPI0=8
-CONFIG_IRQ_SPI0=10
 CONFIG_IRQ_PINT0=12
 CONFIG_IRQ_PINT1=12
 CONFIG_IRQ_MDMAS0=13
@@ -195,18 +221,10 @@
 CONFIG_IRQ_UART1_ERR=7
 CONFIG_IRQ_UART2_ERR=7
 CONFIG_IRQ_CAN0_ERR=7
-CONFIG_IRQ_SPORT2_RX=9
-CONFIG_IRQ_SPORT2_TX=9
-CONFIG_IRQ_SPORT3_RX=9
-CONFIG_IRQ_SPORT3_TX=9
 CONFIG_IRQ_EPPI1=9
 CONFIG_IRQ_EPPI2=9
-CONFIG_IRQ_SPI1=10
-CONFIG_IRQ_SPI2=10
 CONFIG_IRQ_ATAPI_RX=10
 CONFIG_IRQ_ATAPI_TX=10
-CONFIG_IRQ_TWI0=11
-CONFIG_IRQ_TWI1=11
 CONFIG_IRQ_CAN0_RX=11
 CONFIG_IRQ_CAN0_TX=11
 CONFIG_IRQ_MDMAS2=13
@@ -260,7 +278,6 @@
 #
 CONFIG_CLKIN_HZ=25000000
 # CONFIG_BFIN_KERNEL_CLOCK is not set
-CONFIG_MAX_MEM_SIZE=512
 CONFIG_MAX_VCO_HZ=600000000
 CONFIG_MIN_VCO_HZ=50000000
 CONFIG_MAX_SCLK_HZ=133333333
@@ -274,10 +291,10 @@
 # CONFIG_HZ_300 is not set
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 # CONFIG_CYCLES_CLOCKSOURCE is not set
-# CONFIG_TICK_ONESHOT is not set
 # CONFIG_NO_HZ is not set
 # CONFIG_HIGH_RES_TIMERS is not set
 CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
@@ -311,6 +328,12 @@
 CONFIG_CACHELINE_ALIGNED_L1=y
 # CONFIG_SYSCALL_TAB_L1 is not set
 # CONFIG_CPLB_SWITCH_TAB_L1 is not set
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
 CONFIG_RAMKERNEL=y
 # CONFIG_ROMKERNEL is not set
 CONFIG_SELECT_MEMORY_MODEL=y
@@ -319,14 +342,13 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_VIRT_TO_BUS=y
 # CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_4M is not set
 CONFIG_DMA_UNCACHED_2M=y
 # CONFIG_DMA_UNCACHED_1M is not set
@@ -341,7 +363,7 @@
 # CONFIG_BFIN_ICACHE_LOCK is not set
 # CONFIG_BFIN_WB is not set
 CONFIG_BFIN_WT=y
-CONFIG_L1_MAX_PIECE=16
+# CONFIG_BFIN_L2_CACHEABLE is not set
 # CONFIG_MPU is not set
 
 #
@@ -373,7 +395,6 @@
 #
 # Bus options (PCI, PCMCIA, EISA, MCA, ISA)
 #
-# CONFIG_PCI is not set
 # CONFIG_ARCH_SUPPORTS_MSI is not set
 # CONFIG_PCCARD is not set
 
@@ -384,23 +405,20 @@
 CONFIG_BINFMT_FLAT=y
 CONFIG_BINFMT_ZFLAT=y
 # CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 
 #
 # Power management options
 #
 # CONFIG_PM is not set
-CONFIG_SUSPEND_UP_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
 # CONFIG_PM_WAKEUP_BY_GPIO is not set
 
 #
 # CPU Frequency scaling
 #
 # CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
@@ -413,6 +431,7 @@
 # CONFIG_XFRM_USER is not set
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
@@ -442,8 +461,6 @@
 CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
 # CONFIG_NETLABEL is not set
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
@@ -452,6 +469,7 @@
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -468,6 +486,7 @@
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
 CONFIG_IRDA=m
 
 #
@@ -493,9 +512,9 @@
 #
 CONFIG_IRTTY_SIR=m
 CONFIG_BFIN_SIR=m
+CONFIG_BFIN_SIR3=y
 # CONFIG_BFIN_SIR0 is not set
 # CONFIG_BFIN_SIR2 is not set
-CONFIG_BFIN_SIR3=y
 CONFIG_SIR_BFIN_DMA=y
 # CONFIG_SIR_BFIN_PIO is not set
 
@@ -508,15 +527,6 @@
 # CONFIG_KS959_DONGLE is not set
 
 #
-# Old SIR device drivers
-#
-# CONFIG_IRPORT_SIR is not set
-
-#
-# Old Serial dongle support
-#
-
-#
 # FIR device drivers
 #
 # CONFIG_USB_IRDA is not set
@@ -524,11 +534,10 @@
 # CONFIG_MCS_FIR is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
 # CONFIG_WIRELESS_EXT is not set
 # CONFIG_MAC80211 is not set
 # CONFIG_IEEE80211 is not set
@@ -546,6 +555,8 @@
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 # CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
 # CONFIG_CONNECTOR is not set
 CONFIG_MTD=y
@@ -554,6 +565,7 @@
 CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_REDBOOT_PARTS is not set
 CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
 
 #
 # User Modules And Translation Layers
@@ -601,6 +613,7 @@
 CONFIG_MTD_PHYSMAP_START=0x20000000
 CONFIG_MTD_PHYSMAP_LEN=0
 CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_GPIO_ADDR is not set
 # CONFIG_MTD_UCLINUX is not set
 # CONFIG_MTD_PLATRAM is not set
 
@@ -608,7 +621,8 @@
 # Self-contained MTD device drivers
 #
 # CONFIG_MTD_DATAFLASH is not set
-# CONFIG_MTD_M25P80 is not set
+CONFIG_MTD_M25P80=y
+CONFIG_M25PXX_USE_FAST_READ=y
 # CONFIG_MTD_SLRAM is not set
 # CONFIG_MTD_PHRAM is not set
 # CONFIG_MTD_MTDRAM is not set
@@ -648,11 +662,14 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
@@ -696,13 +713,16 @@
 CONFIG_SCSI_LOWLEVEL=y
 # CONFIG_ISCSI_TCP is not set
 # CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
 CONFIG_ATA=y
 # CONFIG_ATA_NONSTANDARD is not set
+CONFIG_SATA_PMP=y
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_MV is not set
 # CONFIG_PATA_PLATFORM is not set
 CONFIG_PATA_BF54X=y
 # CONFIG_MD is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
@@ -715,11 +735,14 @@
 # CONFIG_SMC91X is not set
 CONFIG_SMSC911X=y
 # CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-# CONFIG_B44 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NETDEV_1000=y
 # CONFIG_AX88180 is not set
 CONFIG_NETDEV_10000=y
@@ -729,6 +752,7 @@
 #
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
 
 #
 # USB Network Adapters
@@ -741,7 +765,6 @@
 # CONFIG_WAN is not set
 # CONFIG_PPP is not set
 # CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
@@ -752,7 +775,7 @@
 # Input device support
 #
 CONFIG_INPUT=y
-# CONFIG_INPUT_FF_MEMLESS is not set
+CONFIG_INPUT_FF_MEMLESS=m
 # CONFIG_INPUT_POLLDEV is not set
 
 #
@@ -776,30 +799,37 @@
 # CONFIG_KEYBOARD_GPIO is not set
 CONFIG_KEYBOARD_BFIN=y
 # CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_ADP5588 is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_INPUT_JOYSTICK is not set
 # CONFIG_INPUT_TABLET is not set
 CONFIG_INPUT_TOUCHSCREEN=y
 # CONFIG_TOUCHSCREEN_ADS7846 is not set
 CONFIG_TOUCHSCREEN_AD7877=m
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
 # CONFIG_TOUCHSCREEN_FUJITSU is not set
 # CONFIG_TOUCHSCREEN_GUNZE is not set
 # CONFIG_TOUCHSCREEN_ELO is not set
 # CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
 # CONFIG_TOUCHSCREEN_MK712 is not set
 # CONFIG_TOUCHSCREEN_PENMOUNT is not set
 # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
 # CONFIG_TOUCHSCREEN_TOUCHWIN is not set
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
+# CONFIG_TOUCHSCREEN_WM97XX is not set
 # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
 CONFIG_INPUT_MISC=y
 # CONFIG_INPUT_ATI_REMOTE is not set
 # CONFIG_INPUT_ATI_REMOTE2 is not set
 # CONFIG_INPUT_KEYSPAN_REMOTE is not set
 # CONFIG_INPUT_POWERMATE is not set
 # CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
 # CONFIG_INPUT_UINPUT is not set
-# CONFIG_TWI_KEYPAD is not set
+# CONFIG_CONFIG_INPUT_PCF8574 is not set
 
 #
 # Hardware I/O ports
@@ -815,16 +845,18 @@
 # CONFIG_BF5xx_PPIFCD is not set
 # CONFIG_BFIN_SIMPLE_TIMER is not set
 # CONFIG_BF5xx_PPI is not set
-CONFIG_BFIN_OTP=y
-# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
 # CONFIG_BFIN_SPORT is not set
 # CONFIG_BFIN_TIMER_LATENCY is not set
 # CONFIG_TWI_LCD is not set
+CONFIG_BFIN_DMA_INTERFACE=m
 CONFIG_SIMPLE_GPIO=m
 CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
 CONFIG_VT_CONSOLE=y
 CONFIG_HW_CONSOLE=y
 # CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -849,6 +881,8 @@
 # CONFIG_SERIAL_BFIN_SPORT is not set
 CONFIG_UNIX98_PTYS=y
 # CONFIG_LEGACY_PTYS is not set
+CONFIG_BFIN_OTP=y
+# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
 
 #
 # CAN, the car bus and industrial fieldbus
@@ -856,44 +890,49 @@
 # CONFIG_CAN4LINUX is not set
 # CONFIG_IPMI_HANDLER is not set
 # CONFIG_HW_RANDOM is not set
-# CONFIG_GEN_RTC is not set
 # CONFIG_R3964 is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
 CONFIG_I2C=y
 CONFIG_I2C_BOARDINFO=y
 CONFIG_I2C_CHARDEV=y
-
-#
-# I2C Algorithms
-#
-# CONFIG_I2C_ALGOBIT is not set
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
+CONFIG_I2C_HELPER_AUTO=y
 
 #
 # I2C Hardware Bus support
 #
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
 CONFIG_I2C_BLACKFIN_TWI=y
-CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 # CONFIG_I2C_GPIO is not set
 # CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_TAOS_EVM is not set
-# CONFIG_I2C_STUB is not set
 # CONFIG_I2C_TINY_USB is not set
 
 #
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
 # Miscellaneous I2C Chip support
 #
-# CONFIG_SENSORS_DS1337 is not set
-# CONFIG_SENSORS_DS1374 is not set
 # CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
 # CONFIG_SENSORS_AD5252 is not set
 # CONFIG_SENSORS_EEPROM is not set
 # CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_SENSORS_PCF8575 is not set
+# CONFIG_PCF8575 is not set
 # CONFIG_SENSORS_PCA9539 is not set
 # CONFIG_SENSORS_PCF8591 is not set
 # CONFIG_SENSORS_MAX6875 is not set
@@ -902,17 +941,15 @@
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# SPI support
-#
 CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
 CONFIG_SPI_MASTER=y
 
 #
 # SPI Master Controller Drivers
 #
 CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
 # CONFIG_SPI_BITBANG is not set
 
 #
@@ -921,11 +958,15 @@
 # CONFIG_SPI_AT25 is not set
 # CONFIG_SPI_SPIDEV is not set
 # CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
 # CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
 # CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
 # CONFIG_SENSORS_ADM1021 is not set
 # CONFIG_SENSORS_ADM1025 is not set
 # CONFIG_SENSORS_ADM1026 is not set
@@ -933,6 +974,7 @@
 # CONFIG_SENSORS_ADM1031 is not set
 # CONFIG_SENSORS_ADM9240 is not set
 # CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
 # CONFIG_SENSORS_ATXP1 is not set
 # CONFIG_SENSORS_DS1621 is not set
 # CONFIG_SENSORS_F71805F is not set
@@ -953,6 +995,7 @@
 # CONFIG_SENSORS_LM90 is not set
 # CONFIG_SENSORS_LM92 is not set
 # CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_MAX1111 is not set
 # CONFIG_SENSORS_MAX1619 is not set
 # CONFIG_SENSORS_MAX6650 is not set
 # CONFIG_SENSORS_PC87360 is not set
@@ -961,6 +1004,7 @@
 # CONFIG_SENSORS_SMSC47M1 is not set
 # CONFIG_SENSORS_SMSC47M192 is not set
 # CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
 # CONFIG_SENSORS_THMC50 is not set
 # CONFIG_SENSORS_VT1211 is not set
 # CONFIG_SENSORS_W83781D is not set
@@ -968,9 +1012,12 @@
 # CONFIG_SENSORS_W83792D is not set
 # CONFIG_SENSORS_W83793 is not set
 # CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
 # CONFIG_SENSORS_W83627HF is not set
 # CONFIG_SENSORS_W83627EHF is not set
 # CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
 CONFIG_WATCHDOG=y
 # CONFIG_WATCHDOG_NOWAYOUT is not set
 
@@ -986,23 +1033,30 @@
 # CONFIG_USBPCWATCHDOG is not set
 
 #
-# Sonics Silicon Backplane
-#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
-
-#
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
 
 #
 # Multimedia devices
 #
+
+#
+# Multimedia core support
+#
 # CONFIG_VIDEO_DEV is not set
 # CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
 # CONFIG_DAB is not set
-# CONFIG_USB_DABUSB is not set
 
 #
 # Graphics support
@@ -1012,6 +1066,7 @@
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 # CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
 CONFIG_FB_CFB_FILLRECT=y
 CONFIG_FB_CFB_COPYAREA=y
 CONFIG_FB_CFB_IMAGEBLIT=y
@@ -1019,8 +1074,8 @@
 # CONFIG_FB_SYS_FILLRECT is not set
 # CONFIG_FB_SYS_COPYAREA is not set
 # CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
 # CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
 # CONFIG_FB_SVGALIB is not set
 # CONFIG_FB_MACMODES is not set
 # CONFIG_FB_BACKLIGHT is not set
@@ -1032,9 +1087,11 @@
 #
 CONFIG_FB_BF54X_LQ043=y
 # CONFIG_FB_BFIN_T350MCQB is not set
+# CONFIG_FB_BFIN_LQ035Q1 is not set
 # CONFIG_FB_BFIN_7393 is not set
 # CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
@@ -1066,15 +1123,8 @@
 # CONFIG_LOGO_LINUX_CLUT224 is not set
 # CONFIG_LOGO_BLACKFIN_VGA16 is not set
 CONFIG_LOGO_BLACKFIN_CLUT224=y
-
-#
-# Sound
-#
 CONFIG_SOUND=y
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
 CONFIG_SND=y
 CONFIG_SND_TIMER=y
 CONFIG_SND_PCM=y
@@ -1088,56 +1138,35 @@
 CONFIG_SND_VERBOSE_PROCFS=y
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_DRIVERS=y
 # CONFIG_SND_DUMMY is not set
 # CONFIG_SND_MTPAV is not set
 # CONFIG_SND_SERIAL_U16550 is not set
 # CONFIG_SND_MPU401 is not set
-
-#
-# SPI devices
-#
+CONFIG_SND_SPI=y
 
 #
 # ALSA Blackfin devices
 #
 # CONFIG_SND_BLACKFIN_AD1836 is not set
-# CONFIG_SND_BFIN_AD73311 is not set
 # CONFIG_SND_BFIN_AD73322 is not set
-
-#
-# USB devices
-#
+CONFIG_SND_USB=y
 # CONFIG_SND_USB_AUDIO is not set
 # CONFIG_SND_USB_CAIAQ is not set
-
-#
-# System on Chip audio support
-#
-CONFIG_SND_SOC_AC97_BUS=y
 CONFIG_SND_SOC=y
-CONFIG_SND_BF5XX_SOC=y
-CONFIG_SND_MMAP_SUPPORT=y
+CONFIG_SND_SOC_AC97_BUS=y
+# CONFIG_SND_BF5XX_I2S is not set
+CONFIG_SND_BF5XX_AC97=y
+CONFIG_SND_BF5XX_MMAP_SUPPORT=y
+# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set
+CONFIG_SND_BF5XX_SOC_SPORT=y
 CONFIG_SND_BF5XX_SOC_AC97=y
-CONFIG_SND_BF5XX_SOC_BF548_EZKIT=y
-# CONFIG_SND_BF5XX_SOC_WM8750 is not set
-# CONFIG_SND_BF5XX_SOC_WM8731 is not set
-# CONFIG_SND_BF5XX_SOC_SSM2602 is not set
+CONFIG_SND_BF5XX_SOC_AD1980=y
 CONFIG_SND_BF5XX_SPORT_NUM=0
 CONFIG_SND_BF5XX_HAVE_COLD_RESET=y
 CONFIG_SND_BF5XX_RESET_GPIO_NUM=19
-
-#
-# SoC Audio support for SuperH
-#
+# CONFIG_SND_SOC_ALL_CODECS is not set
 CONFIG_SND_SOC_AD1980=y
-
-#
-# Open Sound System
-#
 # CONFIG_SOUND_PRIME is not set
 CONFIG_AC97_BUS=y
 CONFIG_HID_SUPPORT=y
@@ -1149,15 +1178,43 @@
 # USB Input Devices
 #
 CONFIG_USB_HID=y
-# CONFIG_USB_HIDINPUT_POWERBOOK is not set
-# CONFIG_HID_FF is not set
+# CONFIG_HID_PID is not set
 # CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_BRIGHT=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DELL=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_PANTHERLORD=y
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_THRUSTMASTER_FF=m
+CONFIG_ZEROPLUS_FF=m
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 # CONFIG_USB_ARCH_HAS_OHCI is not set
 # CONFIG_USB_ARCH_HAS_EHCI is not set
 CONFIG_USB=y
 # CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
 
 #
 # Miscellaneous USB options
@@ -1168,15 +1225,20 @@
 # CONFIG_USB_OTG is not set
 # CONFIG_USB_OTG_WHITELIST is not set
 CONFIG_USB_OTG_BLACKLIST_HUB=y
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
 
 #
 # USB Host Controller Drivers
 #
+# CONFIG_USB_C67X00_HCD is not set
 # CONFIG_USB_ISP116X_HCD is not set
-# CONFIG_USB_ISP1362_HCD is not set
 # CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
 # CONFIG_USB_SL811_HCD is not set
 # CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
 CONFIG_USB_MUSB_HDRC=y
 CONFIG_USB_MUSB_SOC=y
 
@@ -1190,13 +1252,15 @@
 # CONFIG_MUSB_PIO_ONLY is not set
 CONFIG_USB_INVENTRA_DMA=y
 # CONFIG_USB_TI_CPPI_DMA is not set
-CONFIG_USB_MUSB_LOGLEVEL=0
+# CONFIG_USB_MUSB_DEBUG is not set
 
 #
 # USB Device Class drivers
 #
 # CONFIG_USB_ACM is not set
 # CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
 
 #
 # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
@@ -1218,6 +1282,7 @@
 # CONFIG_USB_STORAGE_ALAUDA is not set
 # CONFIG_USB_STORAGE_ONETOUCH is not set
 # CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
 # CONFIG_USB_LIBUSUAL is not set
 
 #
@@ -1225,15 +1290,10 @@
 #
 # CONFIG_USB_MDC800 is not set
 # CONFIG_USB_MICROTEK is not set
-CONFIG_USB_MON=y
 
 #
 # USB port drivers
 #
-
-#
-# USB Serial Converter support
-#
 # CONFIG_USB_SERIAL is not set
 
 #
@@ -1242,7 +1302,7 @@
 # CONFIG_USB_EMI62 is not set
 # CONFIG_USB_EMI26 is not set
 # CONFIG_USB_ADUTUX is not set
-# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_SEVSEG is not set
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
 # CONFIG_USB_LCD is not set
@@ -1258,34 +1318,31 @@
 # CONFIG_USB_LD is not set
 # CONFIG_USB_TRANCEVIBRATOR is not set
 # CONFIG_USB_IOWARRIOR is not set
-
-#
-# USB DSL modem support
-#
-
-#
-# USB Gadget Support
-#
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
 # CONFIG_USB_GADGET is not set
 CONFIG_MMC=m
 # CONFIG_MMC_DEBUG is not set
 # CONFIG_MMC_UNSAFE_RESUME is not set
 
 #
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
 #
 CONFIG_MMC_BLOCK=m
 CONFIG_MMC_BLOCK_BOUNCE=y
 # CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
 
 #
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
 #
+# CONFIG_MMC_SDHCI is not set
 CONFIG_SDH_BFIN=m
 # CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND is not set
 # CONFIG_MMC_SPI is not set
-# CONFIG_SPI_MMC is not set
+# CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
 CONFIG_RTC_LIB=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_HCTOSYS=y
@@ -1314,32 +1371,40 @@
 # CONFIG_RTC_DRV_PCF8563 is not set
 # CONFIG_RTC_DRV_PCF8583 is not set
 # CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
 
 #
 # SPI RTC drivers
 #
-# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
 # CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
 
 #
 # Platform RTC drivers
 #
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
-# CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
 # on-CPU RTC drivers
 #
 CONFIG_RTC_DRV_BFIN=y
-
-#
-# Userspace I/O
-#
+# CONFIG_DMADEVICES is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
@@ -1352,22 +1417,20 @@
 CONFIG_EXT3_FS_XATTR=y
 # CONFIG_EXT3_FS_POSIX_ACL is not set
 # CONFIG_EXT3_FS_SECURITY is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 CONFIG_JBD=y
 # CONFIG_JBD_DEBUG is not set
 CONFIG_FS_MBCACHE=y
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_DNOTIFY is not set
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
 # CONFIG_FUSE_FS is not set
@@ -1414,11 +1477,11 @@
 # CONFIG_EFS_FS is not set
 CONFIG_YAFFS_FS=m
 CONFIG_YAFFS_YAFFS1=y
+# CONFIG_YAFFS_9BYTE_TAGS is not set
 # CONFIG_YAFFS_DOES_ECC is not set
 CONFIG_YAFFS_YAFFS2=y
 CONFIG_YAFFS_AUTO_YAFFS2=y
 # CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
-CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS=10
 # CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
 # CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
 CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
@@ -1435,8 +1498,11 @@
 # CONFIG_JFFS2_RUBIN is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
@@ -1444,18 +1510,16 @@
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
 # CONFIG_NFSD_V3_ACL is not set
 # CONFIG_NFSD_V4 is not set
-CONFIG_NFSD_TCP=y
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_EXPORTFS=m
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 CONFIG_SMB_FS=m
@@ -1533,9 +1597,6 @@
 CONFIG_NLS_KOI8_U=m
 CONFIG_NLS_UTF8=m
 # CONFIG_DLM is not set
-CONFIG_INSTRUMENTATION=y
-# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 
 #
 # Kernel hacking
@@ -1543,14 +1604,53 @@
 # CONFIG_PRINTK_TIME is not set
 CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
 # CONFIG_MAGIC_SYSRQ is not set
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_VERBOSE=y
 CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_HWERR is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
 CONFIG_DEBUG_HUNT_FOR_ZERO=y
 CONFIG_DEBUG_BFIN_HWTRACE_ON=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -1568,10 +1668,95 @@
 #
 # CONFIG_KEYS is not set
 CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_NETWORK is not set
-# CONFIG_SECURITY_CAPABILITIES is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
 # CONFIG_SECURITY_ROOTPLUG is not set
-# CONFIG_CRYPTO is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
 
 #
 # Library routines
@@ -1579,6 +1764,7 @@
 CONFIG_BITREVERSE=y
 CONFIG_CRC_CCITT=m
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 3c70d62..1ecb7a3 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -1,6 +1,6 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24.7
+# Linux kernel version: 2.6.28-rc2
 #
 # CONFIG_MMU is not set
 # CONFIG_FPU is not set
@@ -8,7 +8,6 @@
 # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
 CONFIG_BLACKFIN=y
 CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
@@ -31,18 +30,16 @@
 # CONFIG_POSIX_MQUEUE is not set
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
 # CONFIG_AUDIT is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -51,26 +48,35 @@
 CONFIG_UID16=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
+CONFIG_COMPAT_BRK=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
 CONFIG_TINY_SHMEM=y
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
@@ -81,6 +87,7 @@
 # CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_LSF is not set
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -94,9 +101,11 @@
 # CONFIG_DEFAULT_CFQ is not set
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
 # CONFIG_PREEMPT_NONE is not set
 CONFIG_PREEMPT_VOLUNTARY=y
 # CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
 
 #
 # Blackfin Processor Options
@@ -105,6 +114,10 @@
 #
 # Processor and Board Settings
 #
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
 # CONFIG_BF522 is not set
 # CONFIG_BF523 is not set
 # CONFIG_BF524 is not set
@@ -117,24 +130,38 @@
 # CONFIG_BF534 is not set
 # CONFIG_BF536 is not set
 # CONFIG_BF537 is not set
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
 # CONFIG_BF542 is not set
 # CONFIG_BF544 is not set
 # CONFIG_BF547 is not set
 # CONFIG_BF548 is not set
 # CONFIG_BF549 is not set
 CONFIG_BF561=y
+# CONFIG_SMP is not set
+CONFIG_BF_REV_MIN=3
+CONFIG_BF_REV_MAX=5
 # CONFIG_BF_REV_0_0 is not set
 # CONFIG_BF_REV_0_1 is not set
 # CONFIG_BF_REV_0_2 is not set
 CONFIG_BF_REV_0_3=y
 # CONFIG_BF_REV_0_4 is not set
 # CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
 # CONFIG_BF_REV_ANY is not set
 # CONFIG_BF_REV_NONE is not set
 CONFIG_MEM_MT48LC16M16A2TG_75=y
 CONFIG_IRQ_PLL_WAKEUP=7
 CONFIG_IRQ_SPORT0_ERROR=7
 CONFIG_IRQ_SPORT1_ERROR=7
+CONFIG_IRQ_TIMER0=10
+CONFIG_IRQ_TIMER1=10
+CONFIG_IRQ_TIMER2=10
+CONFIG_IRQ_TIMER3=10
+CONFIG_IRQ_TIMER4=10
+CONFIG_IRQ_TIMER5=10
+CONFIG_IRQ_TIMER6=10
+CONFIG_IRQ_TIMER7=10
 CONFIG_IRQ_SPI_ERROR=7
 CONFIG_BFIN561_EZKIT=y
 # CONFIG_BFIN561_TEPLA is not set
@@ -148,10 +175,6 @@
 #
 # Core B Support
 #
-
-#
-# Core B Support
-#
 CONFIG_BF561_COREB=y
 CONFIG_BF561_COREB_RESET=y
 
@@ -193,14 +216,6 @@
 CONFIG_IRQ_DMA2_9=9
 CONFIG_IRQ_DMA2_10=9
 CONFIG_IRQ_DMA2_11=9
-CONFIG_IRQ_TIMER0=10
-CONFIG_IRQ_TIMER1=10
-CONFIG_IRQ_TIMER2=10
-CONFIG_IRQ_TIMER3=10
-CONFIG_IRQ_TIMER4=10
-CONFIG_IRQ_TIMER5=10
-CONFIG_IRQ_TIMER6=10
-CONFIG_IRQ_TIMER7=10
 CONFIG_IRQ_TIMER8=10
 CONFIG_IRQ_TIMER9=10
 CONFIG_IRQ_TIMER10=10
@@ -230,7 +245,6 @@
 #
 CONFIG_CLKIN_HZ=30000000
 # CONFIG_BFIN_KERNEL_CLOCK is not set
-CONFIG_MAX_MEM_SIZE=512
 CONFIG_MAX_VCO_HZ=600000000
 CONFIG_MIN_VCO_HZ=50000000
 CONFIG_MAX_SCLK_HZ=133333333
@@ -244,6 +258,7 @@
 # CONFIG_HZ_300 is not set
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 # CONFIG_CYCLES_CLOCKSOURCE is not set
@@ -281,6 +296,12 @@
 CONFIG_CACHELINE_ALIGNED_L1=y
 # CONFIG_SYSCALL_TAB_L1 is not set
 # CONFIG_CPLB_SWITCH_TAB_L1 is not set
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
 CONFIG_RAMKERNEL=y
 # CONFIG_ROMKERNEL is not set
 CONFIG_SELECT_MEMORY_MODEL=y
@@ -289,14 +310,13 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_VIRT_TO_BUS=y
 # CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_4M is not set
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
@@ -311,7 +331,7 @@
 # CONFIG_BFIN_ICACHE_LOCK is not set
 # CONFIG_BFIN_WB is not set
 CONFIG_BFIN_WT=y
-CONFIG_L1_MAX_PIECE=16
+# CONFIG_BFIN_L2_CACHEABLE is not set
 # CONFIG_MPU is not set
 
 #
@@ -344,7 +364,6 @@
 #
 # Bus options (PCI, PCMCIA, EISA, MCA, ISA)
 #
-# CONFIG_PCI is not set
 # CONFIG_ARCH_SUPPORTS_MSI is not set
 # CONFIG_PCCARD is not set
 
@@ -355,23 +374,20 @@
 CONFIG_BINFMT_FLAT=y
 CONFIG_BINFMT_ZFLAT=y
 # CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 
 #
 # Power management options
 #
 # CONFIG_PM is not set
-CONFIG_SUSPEND_UP_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
 # CONFIG_PM_WAKEUP_BY_GPIO is not set
 
 #
 # CPU Frequency scaling
 #
 # CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
@@ -384,6 +400,7 @@
 # CONFIG_XFRM_USER is not set
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
@@ -413,8 +430,6 @@
 CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
 # CONFIG_NETLABEL is not set
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
@@ -423,6 +438,7 @@
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -439,6 +455,7 @@
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
 CONFIG_IRDA=m
 
 #
@@ -471,24 +488,14 @@
 # CONFIG_DONGLE is not set
 
 #
-# Old SIR device drivers
-#
-# CONFIG_IRPORT_SIR is not set
-
-#
-# Old Serial dongle support
-#
-
-#
 # FIR device drivers
 #
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
 # CONFIG_WIRELESS_EXT is not set
 # CONFIG_MAC80211 is not set
 # CONFIG_IEEE80211 is not set
@@ -506,6 +513,8 @@
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 # CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
 # CONFIG_CONNECTOR is not set
 CONFIG_MTD=y
@@ -514,6 +523,7 @@
 CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_REDBOOT_PARTS is not set
 CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
 
 #
 # User Modules And Translation Layers
@@ -595,11 +605,14 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
@@ -612,7 +625,6 @@
 # CONFIG_ATA is not set
 # CONFIG_MD is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
@@ -625,11 +637,14 @@
 CONFIG_SMC91X=y
 # CONFIG_SMSC911X is not set
 # CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
 # CONFIG_IBM_NEW_EMAC_ZMII is not set
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-# CONFIG_B44 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NETDEV_1000=y
 # CONFIG_AX88180 is not set
 CONFIG_NETDEV_10000=y
@@ -639,10 +654,10 @@
 #
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
 # CONFIG_WAN is not set
 # CONFIG_PPP is not set
 # CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
@@ -690,8 +705,11 @@
 # CONFIG_BF5xx_PPI is not set
 # CONFIG_BFIN_SPORT is not set
 # CONFIG_BFIN_TIMER_LATENCY is not set
+CONFIG_BFIN_DMA_INTERFACE=m
 CONFIG_SIMPLE_GPIO=m
 # CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -720,22 +738,19 @@
 # CONFIG_CAN4LINUX is not set
 # CONFIG_IPMI_HANDLER is not set
 # CONFIG_HW_RANDOM is not set
-# CONFIG_GEN_RTC is not set
 # CONFIG_R3964 is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
 # CONFIG_I2C is not set
-
-#
-# SPI support
-#
 CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
 CONFIG_SPI_MASTER=y
 
 #
 # SPI Master Controller Drivers
 #
 CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
 # CONFIG_SPI_BITBANG is not set
 
 #
@@ -744,14 +759,18 @@
 # CONFIG_SPI_AT25 is not set
 # CONFIG_SPI_SPIDEV is not set
 # CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
 # CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADCXX is not set
 # CONFIG_SENSORS_F71805F is not set
 # CONFIG_SENSORS_F71882FG is not set
 # CONFIG_SENSORS_IT87 is not set
 # CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_MAX1111 is not set
 # CONFIG_SENSORS_PC87360 is not set
 # CONFIG_SENSORS_PC87427 is not set
 # CONFIG_SENSORS_SMSC47M1 is not set
@@ -760,6 +779,8 @@
 # CONFIG_SENSORS_W83627HF is not set
 # CONFIG_SENSORS_W83627EHF is not set
 # CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
 CONFIG_WATCHDOG=y
 # CONFIG_WATCHDOG_NOWAYOUT is not set
 
@@ -770,21 +791,28 @@
 CONFIG_BFIN_WDT=y
 
 #
-# Sonics Silicon Backplane
-#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
-
-#
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
 
 #
 # Multimedia devices
 #
+
+#
+# Multimedia core support
+#
 # CONFIG_VIDEO_DEV is not set
 # CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
 # CONFIG_DAB is not set
 
 #
@@ -799,43 +827,43 @@
 # Display device support
 #
 # CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Sound
-#
 # CONFIG_SOUND is not set
 CONFIG_HID_SUPPORT=y
 CONFIG_HID=m
 # CONFIG_HID_DEBUG is not set
 # CONFIG_HIDRAW is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_MMC is not set
-# CONFIG_NEW_LEDS is not set
-# CONFIG_RTC_CLASS is not set
+# CONFIG_HID_PID is not set
 
 #
-# Userspace I/O
+# Special HID drivers
 #
+CONFIG_HID_COMPAT=y
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
 #
 # CONFIG_EXT2_FS is not set
 # CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_DNOTIFY is not set
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
 # CONFIG_FUSE_FS is not set
@@ -875,11 +903,11 @@
 # CONFIG_EFS_FS is not set
 CONFIG_YAFFS_FS=m
 CONFIG_YAFFS_YAFFS1=y
+# CONFIG_YAFFS_9BYTE_TAGS is not set
 # CONFIG_YAFFS_DOES_ECC is not set
 CONFIG_YAFFS_YAFFS2=y
 CONFIG_YAFFS_AUTO_YAFFS2=y
 # CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
-CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS=10
 # CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
 # CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
 CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
@@ -896,8 +924,11 @@
 # CONFIG_JFFS2_RUBIN is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
@@ -905,13 +936,12 @@
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
 # CONFIG_NFSD is not set
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 CONFIG_SMB_FS=m
@@ -967,9 +997,6 @@
 # CONFIG_NLS_KOI8_U is not set
 # CONFIG_NLS_UTF8 is not set
 # CONFIG_DLM is not set
-CONFIG_INSTRUMENTATION=y
-# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 
 #
 # Kernel hacking
@@ -977,14 +1004,53 @@
 # CONFIG_PRINTK_TIME is not set
 CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
 # CONFIG_MAGIC_SYSRQ is not set
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_VERBOSE=y
 CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_HWERR is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
 CONFIG_DEBUG_HUNT_FOR_ZERO=y
 CONFIG_DEBUG_BFIN_HWTRACE_ON=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -994,7 +1060,6 @@
 # CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
 # CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
 CONFIG_EARLY_PRINTK=y
-# CONFIG_DUAL_CORE_TEST_MODULE is not set
 CONFIG_CPLB_INFO=y
 CONFIG_ACCESS_CHECK=y
 
@@ -1003,9 +1068,94 @@
 #
 # CONFIG_KEYS is not set
 CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_NETWORK is not set
-# CONFIG_SECURITY_CAPABILITIES is not set
-# CONFIG_CRYPTO is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
 
 #
 # Library routines
@@ -1013,6 +1163,7 @@
 CONFIG_BITREVERSE=y
 CONFIG_CRC_CCITT=m
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index 2921f99..9683b2e 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -53,7 +53,7 @@
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
 CONFIG_COMPAT_BRK=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
@@ -276,7 +276,6 @@
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_VIRT_TO_BUS=y
 CONFIG_BFIN_GPTIMERS=y
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_4M is not set
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index b6a1463..a041e7e 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -42,7 +42,7 @@
 CONFIG_FAIR_GROUP_SCHED=y
 CONFIG_FAIR_USER_SCHED=y
 # CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED is not set
 # CONFIG_RELAY is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
@@ -56,7 +56,7 @@
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
@@ -190,14 +190,14 @@
 CONFIG_IRQ_PORTH_INTA=11
 CONFIG_IRQ_MAC_TX=11
 CONFIG_IRQ_PORTH_INTB=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
 CONFIG_IRQ_PORTG_INTA=12
 CONFIG_IRQ_PORTG_INTB=12
 CONFIG_IRQ_MEM_DMA0=13
@@ -292,7 +292,6 @@
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_VIRT_TO_BUS=y
 CONFIG_BFIN_GPTIMERS=y
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_4M is not set
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
@@ -650,6 +649,7 @@
 # CONFIG_TWI_LCD is not set
 CONFIG_SIMPLE_GPIO=m
 # CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -699,7 +699,7 @@
 # I2C Hardware Bus support
 #
 CONFIG_I2C_BLACKFIN_TWI=m
-CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 # CONFIG_I2C_GPIO is not set
 # CONFIG_I2C_OCORES is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index c3ba906..085211b 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -42,7 +42,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED is not set
 # CONFIG_RELAY is not set
 # CONFIG_BLK_DEV_INITRD is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -55,7 +55,7 @@
 # CONFIG_HOTPLUG is not set
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
@@ -254,7 +254,6 @@
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_LARGE_ALLOCS=y
 # CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
 # CONFIG_DMA_UNCACHED_NONE is not set
@@ -598,6 +597,7 @@
 CONFIG_BFIN_SPORT=y
 # CONFIG_BFIN_TIMER_LATENCY is not set
 # CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index cdc6b7f..750203e 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -42,7 +42,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED is not set
 # CONFIG_RELAY is not set
 # CONFIG_BLK_DEV_INITRD is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -55,7 +55,7 @@
 # CONFIG_HOTPLUG is not set
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
@@ -157,14 +157,14 @@
 CONFIG_IRQ_UART1_TX=10
 CONFIG_IRQ_MAC_RX=11
 CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
 CONFIG_IRQ_PORTG_INTB=12
 CONFIG_IRQ_MEM_DMA0=13
 CONFIG_IRQ_MEM_DMA1=13
@@ -262,7 +262,6 @@
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_LARGE_ALLOCS=y
 # CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
 # CONFIG_DMA_UNCACHED_NONE is not set
@@ -627,6 +626,7 @@
 CONFIG_BFIN_SPORT=y
 # CONFIG_BFIN_TIMER_LATENCY is not set
 # CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index f074bdc..dec8a7d 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -42,7 +42,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED is not set
 # CONFIG_RELAY is not set
 # CONFIG_BLK_DEV_INITRD is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -55,7 +55,7 @@
 # CONFIG_HOTPLUG is not set
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
@@ -157,14 +157,14 @@
 CONFIG_IRQ_UART1_TX=10
 CONFIG_IRQ_MAC_RX=11
 CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
 CONFIG_IRQ_PORTG_INTB=12
 CONFIG_IRQ_MEM_DMA0=13
 CONFIG_IRQ_MEM_DMA1=13
@@ -262,7 +262,6 @@
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_LARGE_ALLOCS=y
 # CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
 # CONFIG_DMA_UNCACHED_NONE is not set
@@ -607,6 +606,7 @@
 CONFIG_BFIN_SPORT=y
 # CONFIG_BFIN_TIMER_LATENCY is not set
 # CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index 5c44fdb..efd68bc 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -41,7 +41,7 @@
 CONFIG_FAIR_GROUP_SCHED=y
 CONFIG_FAIR_USER_SCHED=y
 # CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED is not set
 # CONFIG_RELAY is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
@@ -55,7 +55,7 @@
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
@@ -325,7 +325,6 @@
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_VIRT_TO_BUS=y
 # CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
 # CONFIG_DMA_UNCACHED_NONE is not set
@@ -544,7 +543,7 @@
 CONFIG_MTD_COMPLEX_MAPPINGS=y
 CONFIG_MTD_PHYSMAP=y
 CONFIG_MTD_PHYSMAP_START=0x20000000
-CONFIG_MTD_PHYSMAP_LEN=0x800000
+CONFIG_MTD_PHYSMAP_LEN=0
 CONFIG_MTD_PHYSMAP_BANKWIDTH=2
 # CONFIG_MTD_UCLINUX is not set
 # CONFIG_MTD_PLATRAM is not set
@@ -732,6 +731,7 @@
 # CONFIG_TWI_LCD is not set
 # CONFIG_SIMPLE_GPIO is not set
 # CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -782,7 +782,7 @@
 # I2C Hardware Bus support
 #
 CONFIG_I2C_BLACKFIN_TWI=y
-CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 # CONFIG_I2C_GPIO is not set
 # CONFIG_I2C_OCORES is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index 086fe5d..346bc7a 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -42,7 +42,7 @@
 CONFIG_FAIR_GROUP_SCHED=y
 CONFIG_FAIR_USER_SCHED=y
 # CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED is not set
 # CONFIG_RELAY is not set
 # CONFIG_BLK_DEV_INITRD is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -55,7 +55,7 @@
 # CONFIG_HOTPLUG is not set
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
@@ -300,7 +300,6 @@
 CONFIG_VIRT_TO_BUS=y
 CONFIG_LARGE_ALLOCS=y
 # CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
 # CONFIG_DMA_UNCACHED_NONE is not set
@@ -612,6 +611,7 @@
 # CONFIG_BFIN_TIMER_LATENCY is not set
 # CONFIG_SIMPLE_GPIO is not set
 # CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index 1fc31f1..5d3901d 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -54,7 +54,7 @@
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
@@ -250,7 +250,6 @@
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_LARGE_ALLOCS=y
 CONFIG_BFIN_GPTIMERS=y
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
 # CONFIG_DMA_UNCACHED_NONE is not set
diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig
index 285d224..e66f5da 100644
--- a/arch/blackfin/configs/IP0X_defconfig
+++ b/arch/blackfin/configs/IP0X_defconfig
@@ -55,7 +55,7 @@
 # CONFIG_HOTPLUG is not set
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
@@ -262,7 +262,6 @@
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_LARGE_ALLOCS=y
 # CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
 # CONFIG_DMA_UNCACHED_NONE is not set
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index bffca7d..ce5dde9 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -1,6 +1,6 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.22.12
+# Linux kernel version: 2.6.28-rc2
 #
 # CONFIG_MMU is not set
 # CONFIG_FPU is not set
@@ -8,41 +8,37 @@
 # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
 CONFIG_BLACKFIN=y
 CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_GPIO=y
 CONFIG_FORCE_MAX_ZONEORDER=14
 CONFIG_GENERIC_CALIBRATE_DELAY=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 
 #
-# Code maturity level options
+# General setup
 #
 CONFIG_EXPERIMENTAL=y
 CONFIG_BROKEN_ON_SMP=y
 CONFIG_INIT_ENV_ARG_LIMIT=32
-
-#
-# General setup
-#
 CONFIG_LOCALVERSION=""
 CONFIG_LOCALVERSION_AUTO=y
 CONFIG_SYSVIPC=y
-# CONFIG_IPC_NS is not set
 CONFIG_SYSVIPC_SYSCTL=y
 # CONFIG_POSIX_MQUEUE is not set
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
 # CONFIG_AUDIT is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_CGROUPS is not set
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
 # CONFIG_BLK_DEV_INITRD is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SYSCTL=y
@@ -54,40 +50,41 @@
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
+CONFIG_COMPAT_BRK=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_BIG_ORDER_ALLOC_NOFAIL_MAGIC=9
-# CONFIG_NP2 is not set
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
 CONFIG_TINY_SHMEM=y
 CONFIG_BASE_SMALL=0
-
-#
-# Loadable module support
-#
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
 # CONFIG_MODULE_SRCVERSION_ALL is not set
 CONFIG_KMOD=y
-
-#
-# Block layer
-#
 CONFIG_BLOCK=y
 # CONFIG_LBD is not set
 # CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -101,9 +98,11 @@
 # CONFIG_DEFAULT_CFQ is not set
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
 # CONFIG_PREEMPT_NONE is not set
 CONFIG_PREEMPT_VOLUNTARY=y
 # CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
 
 #
 # Blackfin Processor Options
@@ -112,8 +111,15 @@
 #
 # Processor and Board Settings
 #
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
 # CONFIG_BF522 is not set
+# CONFIG_BF523 is not set
+# CONFIG_BF524 is not set
 # CONFIG_BF525 is not set
+# CONFIG_BF526 is not set
 # CONFIG_BF527 is not set
 # CONFIG_BF531 is not set
 # CONFIG_BF532 is not set
@@ -121,22 +127,26 @@
 # CONFIG_BF534 is not set
 # CONFIG_BF536 is not set
 CONFIG_BF537=y
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
 # CONFIG_BF542 is not set
 # CONFIG_BF544 is not set
 # CONFIG_BF547 is not set
 # CONFIG_BF548 is not set
 # CONFIG_BF549 is not set
 # CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=2
+CONFIG_BF_REV_MAX=3
 # CONFIG_BF_REV_0_0 is not set
 # CONFIG_BF_REV_0_1 is not set
 CONFIG_BF_REV_0_2=y
 # CONFIG_BF_REV_0_3 is not set
 # CONFIG_BF_REV_0_4 is not set
 # CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
 # CONFIG_BF_REV_ANY is not set
 # CONFIG_BF_REV_NONE is not set
 CONFIG_BF53x=y
-CONFIG_BFIN_SINGLE_CORE=y
 CONFIG_MEM_MT48LC32M8A2_75=y
 CONFIG_IRQ_PLL_WAKEUP=7
 CONFIG_IRQ_RTC=8
@@ -146,28 +156,30 @@
 CONFIG_IRQ_SPORT1_RX=9
 CONFIG_IRQ_SPORT1_TX=9
 CONFIG_IRQ_TWI=10
-CONFIG_IRQ_SPI=10
 CONFIG_IRQ_UART0_RX=10
 CONFIG_IRQ_UART0_TX=10
 CONFIG_IRQ_UART1_RX=10
 CONFIG_IRQ_UART1_TX=10
 CONFIG_IRQ_MAC_RX=11
 CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
 CONFIG_IRQ_PORTG_INTB=12
 CONFIG_IRQ_MEM_DMA0=13
 CONFIG_IRQ_MEM_DMA1=13
 CONFIG_IRQ_WATCH=13
+CONFIG_IRQ_SPI=10
 # CONFIG_BFIN537_STAMP is not set
 # CONFIG_BFIN537_BLUETECHNIX_CM is not set
+# CONFIG_BFIN537_BLUETECHNIX_TCM is not set
 CONFIG_PNAV10=y
+# CONFIG_CAMSIG_MINOTAUR is not set
 # CONFIG_GENERIC_BF537_BOARD is not set
 
 #
@@ -191,6 +203,7 @@
 # Board customizations
 #
 # CONFIG_CMDLINE_BOOL is not set
+CONFIG_BOOT_LOAD=0x1000
 
 #
 # Clock/PLL Setup
@@ -199,7 +212,7 @@
 # CONFIG_BFIN_KERNEL_CLOCK is not set
 CONFIG_MAX_VCO_HZ=600000000
 CONFIG_MIN_VCO_HZ=50000000
-CONFIG_MAX_SCLK_HZ=133000000
+CONFIG_MAX_SCLK_HZ=133333333
 CONFIG_MIN_SCLK_HZ=27000000
 
 #
@@ -210,13 +223,17 @@
 # CONFIG_HZ_300 is not set
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_CYCLES_CLOCKSOURCE is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
 
 #
-# Memory Setup
+# Misc
 #
-CONFIG_MAX_MEM_SIZE=64
-CONFIG_MEM_ADD_WIDTH=10
-CONFIG_BOOT_LOAD=0x1000
 CONFIG_BFIN_SCRATCH_REG_RETN=y
 # CONFIG_BFIN_SCRATCH_REG_RETE is not set
 # CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
@@ -243,6 +260,12 @@
 CONFIG_CACHELINE_ALIGNED_L1=y
 CONFIG_SYSCALL_TAB_L1=y
 CONFIG_CPLB_SWITCH_TAB_L1=y
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
 CONFIG_RAMKERNEL=y
 # CONFIG_ROMKERNEL is not set
 CONFIG_SELECT_MEMORY_MODEL=y
@@ -251,13 +274,14 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
-CONFIG_LARGE_ALLOCS=y
-# CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_BFIN_GPTIMERS=y
+# CONFIG_DMA_UNCACHED_4M is not set
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
 # CONFIG_DMA_UNCACHED_NONE is not set
@@ -271,7 +295,7 @@
 # CONFIG_BFIN_ICACHE_LOCK is not set
 CONFIG_BFIN_WB=y
 # CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+# CONFIG_MPU is not set
 
 #
 # Asynchonous Memory Configuration
@@ -299,12 +323,7 @@
 #
 # Bus options (PCI, PCMCIA, EISA, MCA, ISA)
 #
-# CONFIG_PCI is not set
 # CONFIG_ARCH_SUPPORTS_MSI is not set
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
 # CONFIG_PCCARD is not set
 
 #
@@ -314,21 +333,20 @@
 CONFIG_BINFMT_FLAT=y
 CONFIG_BINFMT_ZFLAT=y
 # CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 
 #
 # Power management options
 #
 # CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_PM_WAKEUP_BY_GPIO is not set
 
 #
 # CPU Frequency scaling
 #
 # CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
@@ -341,6 +359,7 @@
 # CONFIG_XFRM_USER is not set
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
@@ -362,6 +381,7 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=y
 CONFIG_INET_XFRM_MODE_TUNNEL=y
 CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=y
 CONFIG_INET_TCP_DIAG=y
 # CONFIG_TCP_CONG_ADVANCED is not set
@@ -369,8 +389,6 @@
 CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
 # CONFIG_NETLABEL is not set
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
@@ -379,6 +397,7 @@
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -388,10 +407,6 @@
 # CONFIG_LAPB is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
 # CONFIG_NET_SCHED is not set
 
 #
@@ -399,18 +414,19 @@
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
 # CONFIG_WIRELESS_EXT is not set
 # CONFIG_MAC80211 is not set
 # CONFIG_IEEE80211 is not set
 # CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
 
 #
 # Device Drivers
@@ -419,14 +435,11 @@
 #
 # Generic Driver Options
 #
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 # CONFIG_FW_LOADER is not set
 # CONFIG_SYS_HYPERVISOR is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
 # CONFIG_CONNECTOR is not set
 CONFIG_MTD=y
 # CONFIG_MTD_DEBUG is not set
@@ -434,6 +447,7 @@
 CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_REDBOOT_PARTS is not set
 # CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
 
 #
 # User Modules And Translation Layers
@@ -446,6 +460,7 @@
 # CONFIG_INFTL is not set
 # CONFIG_RFD_FTL is not set
 # CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
 
 #
 # RAM/ROM/Flash chip drivers
@@ -470,7 +485,7 @@
 # Mapping drivers for chip access
 #
 CONFIG_MTD_COMPLEX_MAPPINGS=y
-# CONFIG_MTD_BF5xx is not set
+# CONFIG_MTD_GPIO_ADDR is not set
 CONFIG_MTD_UCLINUX=y
 # CONFIG_MTD_PLATRAM is not set
 
@@ -509,33 +524,22 @@
 # UBI - Unsorted block images
 #
 # CONFIG_MTD_UBI is not set
-
-#
-# Parallel port support
-#
 # CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNPACPI is not set
-
-#
-# Block devices
-#
+CONFIG_BLK_DEV=y
 # CONFIG_BLK_DEV_COW_COMMON is not set
 # CONFIG_BLK_DEV_LOOP is not set
 # CONFIG_BLK_DEV_NBD is not set
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
-
-#
-# Misc devices
-#
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
@@ -543,22 +547,17 @@
 #
 # CONFIG_RAID_ATTRS is not set
 # CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
 # CONFIG_SCSI_NETLINK is not set
 # CONFIG_ATA is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
 # CONFIG_MD is not set
-
-#
-# Network device support
-#
 CONFIG_NETDEVICES=y
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
 # CONFIG_EQUALIZER is not set
 # CONFIG_TUN is not set
+# CONFIG_VETH is not set
 CONFIG_PHYLIB=y
 
 #
@@ -572,46 +571,45 @@
 # CONFIG_VITESSE_PHY is not set
 # CONFIG_SMSC_PHY is not set
 # CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
 # CONFIG_FIXED_PHY is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
+# CONFIG_MDIO_BITBANG is not set
 CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
-# CONFIG_SMC91X is not set
 CONFIG_BFIN_MAC=y
 # CONFIG_BFIN_MAC_USE_L1 is not set
 CONFIG_BFIN_TX_DESC_NUM=100
 CONFIG_BFIN_RX_DESC_NUM=100
 CONFIG_BFIN_MAC_RMII=y
+# CONFIG_SMC91X is not set
 # CONFIG_SMSC911X is not set
 # CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NETDEV_1000=y
-CONFIG_NETDEV_10000=y
 # CONFIG_AX88180 is not set
+CONFIG_NETDEV_10000=y
 
 #
 # Wireless LAN
 #
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
 # CONFIG_WAN is not set
 # CONFIG_PPP is not set
 # CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
-
-#
-# ISDN subsystem
-#
 # CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
 # CONFIG_PHONE is not set
 
 #
@@ -626,9 +624,6 @@
 #
 # CONFIG_INPUT_MOUSEDEV is not set
 # CONFIG_INPUT_JOYDEV is not set
-CONFIG_INPUT_TSDEV=y
-CONFIG_INPUT_TSDEV_SCREEN_X=240
-CONFIG_INPUT_TSDEV_SCREEN_Y=320
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_EVBUG is not set
 
@@ -642,24 +637,29 @@
 CONFIG_INPUT_TOUCHSCREEN=y
 # CONFIG_TOUCHSCREEN_ADS7846 is not set
 CONFIG_TOUCHSCREEN_AD7877=y
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
 # CONFIG_TOUCHSCREEN_GUNZE is not set
 # CONFIG_TOUCHSCREEN_ELO is not set
 # CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
 # CONFIG_TOUCHSCREEN_MK712 is not set
 # CONFIG_TOUCHSCREEN_PENMOUNT is not set
 # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
 # CONFIG_TOUCHSCREEN_TOUCHWIN is not set
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
 # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
 CONFIG_INPUT_MISC=y
 # CONFIG_INPUT_ATI_REMOTE is not set
 # CONFIG_INPUT_ATI_REMOTE2 is not set
 # CONFIG_INPUT_KEYSPAN_REMOTE is not set
 # CONFIG_INPUT_POWERMATE is not set
 # CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
 CONFIG_INPUT_UINPUT=y
-# CONFIG_BF53X_PFBUTTONS is not set
-# CONFIG_TWI_KEYPAD is not set
+# CONFIG_CONFIG_INPUT_PCF8574 is not set
 
 #
 # Hardware I/O ports
@@ -672,18 +672,17 @@
 #
 # CONFIG_AD9960 is not set
 # CONFIG_SPI_ADC_BF533 is not set
-# CONFIG_BF5xx_PFLAGS is not set
 # CONFIG_BF5xx_PPIFCD is not set
 # CONFIG_BFIN_SIMPLE_TIMER is not set
 # CONFIG_BF5xx_PPI is not set
 CONFIG_BFIN_SPORT=y
 # CONFIG_BFIN_TIMER_LATENCY is not set
 CONFIG_TWI_LCD=m
-CONFIG_TWI_LCD_SLAVE_ADDR=34
-# CONFIG_AD5304 is not set
-# CONFIG_BF5xx_TEA5764 is not set
-# CONFIG_BF5xx_FBDMA is not set
+CONFIG_BFIN_DMA_INTERFACE=m
+# CONFIG_SIMPLE_GPIO is not set
 # CONFIG_VT is not set
+CONFIG_DEVKMEM=y
+# CONFIG_BFIN_JTAG_COMM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -716,68 +715,59 @@
 #
 # linux embedded drivers
 #
-# CONFIG_CAN_MCF5282 is not set
-# CONFIG_CAN_UNCTWINCAN is not set
 CONFIG_CAN_BLACKFIN=m
-
-#
-# IPMI
-#
 # CONFIG_IPMI_HANDLER is not set
-# CONFIG_WATCHDOG is not set
 CONFIG_HW_RANDOM=y
-# CONFIG_GEN_RTC is not set
 # CONFIG_R3964 is not set
 # CONFIG_RAW_DRIVER is not set
-
-#
-# TPM devices
-#
 # CONFIG_TCG_TPM is not set
 CONFIG_I2C=y
 CONFIG_I2C_BOARDINFO=y
 CONFIG_I2C_CHARDEV=y
-
-#
-# I2C Algorithms
-#
-# CONFIG_I2C_ALGOBIT is not set
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
+CONFIG_I2C_HELPER_AUTO=y
 
 #
 # I2C Hardware Bus support
 #
-# CONFIG_I2C_BLACKFIN_GPIO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
 CONFIG_I2C_BLACKFIN_TWI=y
-CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 # CONFIG_I2C_GPIO is not set
 # CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
 # CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
 # CONFIG_I2C_STUB is not set
 
 #
 # Miscellaneous I2C Chip support
 #
-# CONFIG_SENSORS_DS1337 is not set
-# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
 # CONFIG_SENSORS_AD5252 is not set
 # CONFIG_SENSORS_EEPROM is not set
 CONFIG_SENSORS_PCF8574=m
-CONFIG_SENSORS_PCF8575=y
-# CONFIG_SENSORS_PCA9543 is not set
+# CONFIG_PCF8575 is not set
 # CONFIG_SENSORS_PCA9539 is not set
 # CONFIG_SENSORS_PCF8591 is not set
 # CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# SPI support
-#
 CONFIG_SPI=y
 CONFIG_SPI_MASTER=y
 
@@ -785,6 +775,7 @@
 # SPI Master Controller Drivers
 #
 CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
 # CONFIG_SPI_BITBANG is not set
 
 #
@@ -792,27 +783,29 @@
 #
 # CONFIG_SPI_AT25 is not set
 # CONFIG_SPI_SPIDEV is not set
-
-#
-# Dallas's 1-wire bus
-#
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
 # CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
 # CONFIG_HWMON_VID is not set
-# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_AD7414 is not set
 # CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
 # CONFIG_SENSORS_ADM1021 is not set
 # CONFIG_SENSORS_ADM1025 is not set
 # CONFIG_SENSORS_ADM1026 is not set
 # CONFIG_SENSORS_ADM1029 is not set
 # CONFIG_SENSORS_ADM1031 is not set
 # CONFIG_SENSORS_ADM9240 is not set
-# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
 # CONFIG_SENSORS_ATXP1 is not set
 # CONFIG_SENSORS_DS1621 is not set
 # CONFIG_SENSORS_F71805F is not set
-# CONFIG_SENSORS_FSCHER is not set
-# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
 # CONFIG_SENSORS_GL518SM is not set
 # CONFIG_SENSORS_GL520SM is not set
 # CONFIG_SENSORS_IT87 is not set
@@ -827,58 +820,76 @@
 # CONFIG_SENSORS_LM87 is not set
 # CONFIG_SENSORS_LM90 is not set
 # CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_MAX1111 is not set
 # CONFIG_SENSORS_MAX1619 is not set
 # CONFIG_SENSORS_MAX6650 is not set
 # CONFIG_SENSORS_PC87360 is not set
 # CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_DME1737 is not set
 # CONFIG_SENSORS_SMSC47M1 is not set
 # CONFIG_SENSORS_SMSC47M192 is not set
 # CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
 # CONFIG_SENSORS_VT1211 is not set
 # CONFIG_SENSORS_W83781D is not set
 # CONFIG_SENSORS_W83791D is not set
 # CONFIG_SENSORS_W83792D is not set
 # CONFIG_SENSORS_W83793 is not set
 # CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
 # CONFIG_SENSORS_W83627HF is not set
 # CONFIG_SENSORS_W83627EHF is not set
 # CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
 
 #
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
 
 #
 # Multimedia devices
 #
+
+#
+# Multimedia core support
+#
 # CONFIG_VIDEO_DEV is not set
 # CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
 CONFIG_DAB=y
 
 #
 # Graphics support
 #
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_LCD_CLASS_DEVICE=y
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
 # CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 # CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
 CONFIG_FB_CFB_FILLRECT=y
 CONFIG_FB_CFB_COPYAREA=y
 CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
 # CONFIG_FB_SYS_FILLRECT is not set
 # CONFIG_FB_SYS_COPYAREA is not set
 # CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
 # CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
 # CONFIG_FB_SVGALIB is not set
 # CONFIG_FB_MACMODES is not set
 # CONFIG_FB_BACKLIGHT is not set
@@ -888,25 +899,34 @@
 #
 # Frame buffer hardware drivers
 #
-# CONFIG_FB_BFIN_7171 is not set
-# CONFIG_FB_BFIN_7393 is not set
+# CONFIG_FB_BFIN_T350MCQB is not set
+# CONFIG_FB_BFIN_LQ035Q1 is not set
 CONFIG_FB_BF537_LQ035=y
 CONFIG_LQ035_SLAVE_ADDR=0x58
 CONFIG_FB_BFIN_LANDSCAPE=y
 # CONFIG_FB_BFIN_BGR is not set
-# CONFIG_FB_BFIN_T350MCQB is not set
+# CONFIG_FB_BFIN_7393 is not set
+# CONFIG_FB_HITACHI_TX09 is not set
 # CONFIG_FB_S1D13XXX is not set
 # CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+# CONFIG_LCD_PLATFORM is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CORGI is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
 # CONFIG_LOGO is not set
-
-#
-# Sound
-#
 CONFIG_SOUND=y
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
 CONFIG_SND=m
 # CONFIG_SND_SEQUENCER is not set
 # CONFIG_SND_MIXER_OSS is not set
@@ -916,46 +936,30 @@
 # CONFIG_SND_VERBOSE_PROCFS is not set
 # CONFIG_SND_VERBOSE_PRINTK is not set
 # CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_DRIVERS=y
 # CONFIG_SND_DUMMY is not set
 # CONFIG_SND_MTPAV is not set
 # CONFIG_SND_SERIAL_U16550 is not set
 # CONFIG_SND_MPU401 is not set
+CONFIG_SND_SPI=y
 
 #
 # ALSA Blackfin devices
 #
 # CONFIG_SND_BLACKFIN_AD1836 is not set
-# CONFIG_SND_BFIN_AD73311 is not set
-
-#
-# System on Chip audio support
-#
+# CONFIG_SND_BFIN_AD73322 is not set
 # CONFIG_SND_SOC is not set
-
-#
-# Open Sound System
-#
 CONFIG_SOUND_PRIME=y
-# CONFIG_OSS_OBSOLETE is not set
-# CONFIG_SOUND_MSNDCLAS is not set
-# CONFIG_SOUND_MSNDPIN is not set
-
-#
-# HID Devices
-#
+CONFIG_HID_SUPPORT=y
 # CONFIG_HID is not set
-
-#
-# USB support
-#
+# CONFIG_HID_PID is not set
+CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 # CONFIG_USB_ARCH_HAS_OHCI is not set
 # CONFIG_USB_ARCH_HAS_EHCI is not set
 # CONFIG_USB is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
 
 #
 # Enable Host or Gadget support to see Inventra options
@@ -964,37 +968,11 @@
 #
 # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
 #
-
-#
-# USB Gadget Support
-#
 # CONFIG_USB_GADGET is not set
 # CONFIG_MMC is not set
-
-#
-# LED devices
-#
+# CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
-
-#
-# LED drivers
-#
-
-#
-# LED Triggers
-#
-
-#
-# InfiniBand support
-#
-
-#
-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
-#
-
-#
-# Real Time Clock
-#
+# CONFIG_ACCESSIBILITY is not set
 CONFIG_RTC_LIB=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_HCTOSYS=y
@@ -1014,6 +992,7 @@
 # I2C RTC drivers
 #
 # CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
 # CONFIG_RTC_DRV_DS1672 is not set
 # CONFIG_RTC_DRV_MAX6900 is not set
 # CONFIG_RTC_DRV_RS5C372 is not set
@@ -1021,43 +1000,41 @@
 # CONFIG_RTC_DRV_X1205 is not set
 # CONFIG_RTC_DRV_PCF8563 is not set
 # CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
 
 #
 # SPI RTC drivers
 #
-# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
 # CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
 
 #
 # Platform RTC drivers
 #
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
 # CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
 # on-CPU RTC drivers
 #
 CONFIG_RTC_DRV_BFIN=y
-
-#
-# DMA Engine support
-#
-# CONFIG_DMA_ENGINE is not set
-
-#
-# DMA Clients
-#
-
-#
-# DMA Devices
-#
-
-#
-# PBX support
-#
-# CONFIG_PBX is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
@@ -1067,20 +1044,18 @@
 # CONFIG_EXT2_FS_POSIX_ACL is not set
 # CONFIG_EXT2_FS_SECURITY is not set
 # CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 CONFIG_FS_MBCACHE=y
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_DNOTIFY is not set
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
 # CONFIG_FUSE_FS is not set
@@ -1106,7 +1081,6 @@
 CONFIG_SYSFS=y
 # CONFIG_TMPFS is not set
 # CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
 # CONFIG_CONFIGFS_FS is not set
 
 #
@@ -1121,36 +1095,35 @@
 # CONFIG_EFS_FS is not set
 CONFIG_YAFFS_FS=y
 CONFIG_YAFFS_YAFFS1=y
+# CONFIG_YAFFS_9BYTE_TAGS is not set
 # CONFIG_YAFFS_DOES_ECC is not set
 CONFIG_YAFFS_YAFFS2=y
 CONFIG_YAFFS_AUTO_YAFFS2=y
 # CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
-CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS=10
 # CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
 # CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
 CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
 # CONFIG_JFFS2_FS is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
+CONFIG_NETWORK_FILESYSTEMS=y
 CONFIG_NFS_FS=m
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
 # CONFIG_NFSD is not set
 CONFIG_LOCKD=m
 CONFIG_LOCKD_V4=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 CONFIG_SMB_FS=m
@@ -1159,17 +1132,12 @@
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
 # CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
 
 #
 # Partition Types
 #
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
 CONFIG_NLS=m
 CONFIG_NLS_DEFAULT="iso8859-1"
 # CONFIG_NLS_CODEPAGE_437 is not set
@@ -1210,29 +1178,30 @@
 # CONFIG_NLS_KOI8_R is not set
 # CONFIG_NLS_KOI8_U is not set
 # CONFIG_NLS_UTF8 is not set
-
-#
-# Distributed Lock Manager
-#
 # CONFIG_DLM is not set
 
 #
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
-#
 # Kernel hacking
 #
 # CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
 # CONFIG_MAGIC_SYSRQ is not set
 # CONFIG_UNUSED_SYMBOLS is not set
 # CONFIG_DEBUG_FS is not set
 # CONFIG_HEADERS_CHECK is not set
 # CONFIG_DEBUG_KERNEL is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_DEBUG_VERBOSE=y
 # CONFIG_DEBUG_MMRS is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
 # CONFIG_DEBUG_HUNT_FOR_ZERO is not set
 CONFIG_DEBUG_BFIN_HWTRACE_ON=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -1250,13 +1219,94 @@
 #
 # CONFIG_KEYS is not set
 CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_NETWORK is not set
-CONFIG_SECURITY_CAPABILITIES=y
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
 
 #
-# Cryptographic options
+# Crypto core or helper
 #
-# CONFIG_CRYPTO is not set
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
 
 #
 # Library routines
@@ -1264,8 +1314,10 @@
 CONFIG_BITREVERSE=y
 CONFIG_CRC_CCITT=m
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
 # CONFIG_LIBCRC32C is not set
 CONFIG_ZLIB_INFLATE=y
 CONFIG_PLIST=y
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index b1309f8..7c8250d 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -59,7 +59,7 @@
 CONFIG_HOTPLUG=y
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
@@ -172,14 +172,14 @@
 CONFIG_IRQ_UART1_TX=10
 CONFIG_IRQ_MAC_RX=11
 CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
 CONFIG_IRQ_PORTG_INTB=12
 CONFIG_IRQ_MEM_DMA0=13
 CONFIG_IRQ_MEM_DMA1=13
@@ -271,7 +271,6 @@
 # CONFIG_RESOURCES_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_LARGE_ALLOCS=y
-CONFIG_BFIN_DMA_5XX=y
 CONFIG_DMA_UNCACHED_2M=y
 # CONFIG_DMA_UNCACHED_1M is not set
 # CONFIG_DMA_UNCACHED_NONE is not set
@@ -786,7 +785,7 @@
 #
 # CONFIG_I2C_BLACKFIN_GPIO is not set
 CONFIG_I2C_BLACKFIN_TWI=y
-CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 # CONFIG_I2C_GPIO is not set
 # CONFIG_I2C_OCORES is not set
 # CONFIG_I2C_PARPORT_LIGHT is not set
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index c482ee1..9af522c 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24.7
-# Thu Jul 31 00:53:15 2008
+# Linux kernel version: 2.6.28-rc2
+# Tue Jan  6 09:22:17 2009
 #
 # CONFIG_MMU is not set
 # CONFIG_FPU is not set
@@ -9,7 +9,6 @@
 # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
 CONFIG_BLACKFIN=y
 CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
@@ -30,17 +29,14 @@
 CONFIG_SYSVIPC=y
 CONFIG_SYSVIPC_SYSCTL=y
 # CONFIG_BSD_PROCESS_ACCT is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
 # CONFIG_BLK_DEV_INITRD is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SYSCTL=y
@@ -52,22 +48,30 @@
 # CONFIG_HOTPLUG is not set
 CONFIG_PRINTK=y
 CONFIG_BUG=y
-CONFIG_ELF_CORE=y
+# CONFIG_ELF_CORE is not set
+CONFIG_COMPAT_BRK=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
 CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
 CONFIG_TINY_SHMEM=y
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
@@ -78,6 +82,7 @@
 # CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_LSF is not set
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -91,9 +96,11 @@
 # CONFIG_DEFAULT_CFQ is not set
 CONFIG_DEFAULT_NOOP=y
 CONFIG_DEFAULT_IOSCHED="noop"
+CONFIG_CLASSIC_RCU=y
 CONFIG_PREEMPT_NONE=y
 # CONFIG_PREEMPT_VOLUNTARY is not set
 # CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
 
 #
 # Blackfin Processor Options
@@ -102,6 +109,10 @@
 #
 # Processor and Board Settings
 #
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
 # CONFIG_BF522 is not set
 # CONFIG_BF523 is not set
 # CONFIG_BF524 is not set
@@ -114,18 +125,23 @@
 # CONFIG_BF534 is not set
 # CONFIG_BF536 is not set
 CONFIG_BF537=y
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
 # CONFIG_BF542 is not set
 # CONFIG_BF544 is not set
 # CONFIG_BF547 is not set
 # CONFIG_BF548 is not set
 # CONFIG_BF549 is not set
 # CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=2
+CONFIG_BF_REV_MAX=3
 # CONFIG_BF_REV_0_0 is not set
 # CONFIG_BF_REV_0_1 is not set
 CONFIG_BF_REV_0_2=y
 # CONFIG_BF_REV_0_3 is not set
 # CONFIG_BF_REV_0_4 is not set
 # CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
 # CONFIG_BF_REV_ANY is not set
 # CONFIG_BF_REV_NONE is not set
 CONFIG_BF53x=y
@@ -137,25 +153,25 @@
 CONFIG_IRQ_SPORT1_RX=9
 CONFIG_IRQ_SPORT1_TX=9
 CONFIG_IRQ_TWI=10
-CONFIG_IRQ_SPI=10
 CONFIG_IRQ_UART0_RX=10
 CONFIG_IRQ_UART0_TX=10
 CONFIG_IRQ_UART1_RX=10
 CONFIG_IRQ_UART1_TX=10
 CONFIG_IRQ_MAC_RX=11
 CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
 CONFIG_IRQ_PORTG_INTB=12
 CONFIG_IRQ_MEM_DMA0=13
 CONFIG_IRQ_MEM_DMA1=13
 CONFIG_IRQ_WATCH=13
+CONFIG_IRQ_SPI=10
 # CONFIG_BFIN537_STAMP is not set
 # CONFIG_BFIN537_BLUETECHNIX_CM is not set
 CONFIG_BFIN537_BLUETECHNIX_TCM=y
@@ -191,7 +207,6 @@
 #
 CONFIG_CLKIN_HZ=25000000
 # CONFIG_BFIN_KERNEL_CLOCK is not set
-CONFIG_MAX_MEM_SIZE=32
 CONFIG_MAX_VCO_HZ=600000000
 CONFIG_MIN_VCO_HZ=50000000
 CONFIG_MAX_SCLK_HZ=133333333
@@ -205,10 +220,10 @@
 # CONFIG_HZ_300 is not set
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 # CONFIG_CYCLES_CLOCKSOURCE is not set
-# CONFIG_TICK_ONESHOT is not set
 # CONFIG_NO_HZ is not set
 # CONFIG_HIGH_RES_TIMERS is not set
 CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
@@ -242,6 +257,12 @@
 CONFIG_CACHELINE_ALIGNED_L1=y
 CONFIG_SYSCALL_TAB_L1=y
 CONFIG_CPLB_SWITCH_TAB_L1=y
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
 CONFIG_RAMKERNEL=y
 # CONFIG_ROMKERNEL is not set
 CONFIG_SELECT_MEMORY_MODEL=y
@@ -250,14 +271,13 @@
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 # CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_VIRT_TO_BUS=y
 # CONFIG_BFIN_GPTIMERS is not set
-CONFIG_BFIN_DMA_5XX=y
 # CONFIG_DMA_UNCACHED_4M is not set
 # CONFIG_DMA_UNCACHED_2M is not set
 CONFIG_DMA_UNCACHED_1M=y
@@ -300,7 +320,6 @@
 #
 # Bus options (PCI, PCMCIA, EISA, MCA, ISA)
 #
-# CONFIG_PCI is not set
 # CONFIG_ARCH_SUPPORTS_MSI is not set
 
 #
@@ -310,23 +329,20 @@
 CONFIG_BINFMT_FLAT=y
 CONFIG_BINFMT_ZFLAT=y
 CONFIG_BINFMT_SHARED_FLAT=y
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 
 #
 # Power management options
 #
 # CONFIG_PM is not set
-CONFIG_SUSPEND_UP_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
 # CONFIG_PM_WAKEUP_BY_GPIO is not set
 
 #
 # CPU Frequency scaling
 #
 # CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
 # CONFIG_NET is not set
 
 #
@@ -345,6 +361,7 @@
 CONFIG_MTD_PARTITIONS=y
 # CONFIG_MTD_REDBOOT_PARTS is not set
 # CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
 
 #
 # User Modules And Translation Layers
@@ -362,8 +379,10 @@
 #
 # RAM/ROM/Flash chip drivers
 #
-# CONFIG_MTD_CFI is not set
+CONFIG_MTD_CFI=y
 # CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
 CONFIG_MTD_MAP_BANK_WIDTH_1=y
 CONFIG_MTD_MAP_BANK_WIDTH_2=y
 CONFIG_MTD_MAP_BANK_WIDTH_4=y
@@ -374,6 +393,10 @@
 CONFIG_MTD_CFI_I2=y
 # CONFIG_MTD_CFI_I4 is not set
 # CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
 CONFIG_MTD_RAM=y
 # CONFIG_MTD_ROM is not set
 # CONFIG_MTD_ABSENT is not set
@@ -381,8 +404,9 @@
 #
 # Mapping drivers for chip access
 #
-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-# CONFIG_MTD_GPIO_ADDR is not set
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_GPIO_ADDR=y
 CONFIG_MTD_UCLINUX=y
 # CONFIG_MTD_PLATRAM is not set
 
@@ -416,10 +440,13 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
@@ -454,8 +481,11 @@
 # CONFIG_BF5xx_PPI is not set
 CONFIG_BFIN_SPORT=y
 # CONFIG_BFIN_TIMER_LATENCY is not set
+CONFIG_BFIN_DMA_INTERFACE=m
 # CONFIG_SIMPLE_GPIO is not set
 # CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
 # CONFIG_SERIAL_NONSTANDARD is not set
 
 #
@@ -486,15 +516,10 @@
 # CONFIG_CAN4LINUX is not set
 # CONFIG_IPMI_HANDLER is not set
 # CONFIG_HW_RANDOM is not set
-# CONFIG_GEN_RTC is not set
 # CONFIG_R3964 is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
 # CONFIG_I2C is not set
-
-#
-# SPI support
-#
 CONFIG_SPI=y
 CONFIG_SPI_MASTER=y
 
@@ -502,6 +527,7 @@
 # SPI Master Controller Drivers
 #
 CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
 # CONFIG_SPI_BITBANG is not set
 
 #
@@ -510,9 +536,13 @@
 # CONFIG_SPI_AT25 is not set
 # CONFIG_SPI_SPIDEV is not set
 # CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 # CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
 CONFIG_WATCHDOG=y
 # CONFIG_WATCHDOG_NOWAYOUT is not set
 
@@ -523,20 +553,27 @@
 CONFIG_BFIN_WDT=y
 
 #
-# Sonics Silicon Backplane
-#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
-
-#
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
 
 #
 # Multimedia devices
 #
+
+#
+# Multimedia core support
+#
 # CONFIG_VIDEO_DEV is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
 # CONFIG_DAB is not set
 
 #
@@ -551,20 +588,16 @@
 # Display device support
 #
 # CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Sound
-#
 # CONFIG_SOUND is not set
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
 # CONFIG_RTC_CLASS is not set
-
-#
-# Userspace I/O
-#
+# CONFIG_DMADEVICES is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
@@ -574,19 +607,17 @@
 # CONFIG_EXT2_FS_POSIX_ACL is not set
 # CONFIG_EXT2_FS_SECURITY is not set
 # CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 CONFIG_FS_MBCACHE=y
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_DNOTIFY is not set
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
 # CONFIG_FUSE_FS is not set
@@ -628,8 +659,11 @@
 # CONFIG_JFFS2_FS is not set
 # CONFIG_CRAMFS is not set
 # CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
 
@@ -639,7 +673,6 @@
 # CONFIG_PARTITION_ADVANCED is not set
 CONFIG_MSDOS_PARTITION=y
 # CONFIG_NLS is not set
-# CONFIG_INSTRUMENTATION is not set
 
 #
 # Kernel hacking
@@ -647,14 +680,22 @@
 # CONFIG_PRINTK_TIME is not set
 CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
 # CONFIG_MAGIC_SYSRQ is not set
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
 # CONFIG_DEBUG_KERNEL is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_DEBUG_VERBOSE=y
 CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_DOUBLEFAULT is not set
 CONFIG_DEBUG_HUNT_FOR_ZERO=y
 CONFIG_DEBUG_BFIN_HWTRACE_ON=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -671,9 +712,8 @@
 # Security options
 #
 # CONFIG_KEYS is not set
-CONFIG_SECURITY=y
-# CONFIG_SECURITY_NETWORK is not set
-CONFIG_SECURITY_CAPABILITIES=y
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_FILE_CAPABILITIES is not set
 # CONFIG_CRYPTO is not set
 
@@ -682,6 +722,7 @@
 #
 # CONFIG_CRC_CCITT is not set
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 # CONFIG_CRC32 is not set
 # CONFIG_CRC7 is not set
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index d0d1ac4..606ecfd 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -1,4 +1,3 @@
 include include/asm-generic/Kbuild.asm
 
 unifdef-y += fixed_code.h
-unifdef-y += swab.h
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index 25776c1..94b2a9b 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -15,65 +15,172 @@
  */
 
 #define ATOMIC_INIT(i)	{ (i) }
-
-#define atomic_read(v)		((v)->counter)
 #define atomic_set(v, i)	(((v)->counter) = i)
 
-static __inline__ void atomic_add(int i, atomic_t * v)
+#ifdef CONFIG_SMP
+
+#define atomic_read(v)	__raw_uncached_fetch_asm(&(v)->counter)
+
+asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
+
+asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+	__raw_atomic_update_asm(&v->counter, i);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	__raw_atomic_update_asm(&v->counter, -i);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	return __raw_atomic_update_asm(&v->counter, i);
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	return __raw_atomic_update_asm(&v->counter, -i);
+}
+
+static inline void atomic_inc(volatile atomic_t *v)
+{
+	__raw_atomic_update_asm(&v->counter, 1);
+}
+
+static inline void atomic_dec(volatile atomic_t *v)
+{
+	__raw_atomic_update_asm(&v->counter, -1);
+}
+
+static inline void atomic_clear_mask(int mask, atomic_t *v)
+{
+	__raw_atomic_clear_asm(&v->counter, mask);
+}
+
+static inline void atomic_set_mask(int mask, atomic_t *v)
+{
+	__raw_atomic_set_asm(&v->counter, mask);
+}
+
+static inline int atomic_test_mask(int mask, atomic_t *v)
+{
+	return __raw_atomic_test_asm(&v->counter, mask);
+}
+
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec()    barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc()    barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#else /* !CONFIG_SMP */
+
+#define atomic_read(v)	((v)->counter)
+
+static inline void atomic_add(int i, atomic_t *v)
 {
 	long flags;
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	v->counter += i;
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 }
 
-static __inline__ void atomic_sub(int i, atomic_t * v)
+static inline void atomic_sub(int i, atomic_t *v)
 {
 	long flags;
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	v->counter -= i;
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 
 }
 
-static inline int atomic_add_return(int i, atomic_t * v)
+static inline int atomic_add_return(int i, atomic_t *v)
 {
 	int __temp = 0;
 	long flags;
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	v->counter += i;
 	__temp = v->counter;
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 
 
 	return __temp;
 }
 
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	int __temp = 0;
+	long flags;
+
+	local_irq_save_hw(flags);
+	v->counter -= i;
+	__temp = v->counter;
+	local_irq_restore_hw(flags);
+
+	return __temp;
+}
+
+static inline void atomic_inc(volatile atomic_t *v)
+{
+	long flags;
+
+	local_irq_save_hw(flags);
+	v->counter++;
+	local_irq_restore_hw(flags);
+}
+
+static inline void atomic_dec(volatile atomic_t *v)
+{
+	long flags;
+
+	local_irq_save_hw(flags);
+	v->counter--;
+	local_irq_restore_hw(flags);
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+	long flags;
+
+	local_irq_save_hw(flags);
+	v->counter &= ~mask;
+	local_irq_restore_hw(flags);
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+	long flags;
+
+	local_irq_save_hw(flags);
+	v->counter |= mask;
+	local_irq_restore_hw(flags);
+}
+
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec()    barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc()    barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#endif /* !CONFIG_SMP */
+
 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
-static inline int atomic_sub_return(int i, atomic_t * v)
-{
-	int __temp = 0;
-	long flags;
-
-	local_irq_save(flags);
-	v->counter -= i;
-	__temp = v->counter;
-	local_irq_restore(flags);
-
-	return __temp;
-}
-
-static __inline__ void atomic_inc(volatile atomic_t * v)
-{
-	long flags;
-
-	local_irq_save(flags);
-	v->counter++;
-	local_irq_restore(flags);
-}
+#define atomic_dec_return(v) atomic_sub_return(1,(v))
+#define atomic_inc_return(v) atomic_add_return(1,(v))
 
 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -88,42 +195,6 @@
 })
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
-static __inline__ void atomic_dec(volatile atomic_t * v)
-{
-	long flags;
-
-	local_irq_save(flags);
-	v->counter--;
-	local_irq_restore(flags);
-}
-
-static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
-{
-	long flags;
-
-	local_irq_save(flags);
-	v->counter &= ~mask;
-	local_irq_restore(flags);
-}
-
-static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
-{
-	long flags;
-
-	local_irq_save(flags);
-	v->counter |= mask;
-	local_irq_restore(flags);
-}
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec()    barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc()    barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-#define atomic_inc_return(v) atomic_add_return(1,(v))
-
 /*
  * atomic_inc_and_test - increment and test
  * @v: pointer of type atomic_t
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h
index 7729566..daffc06 100644
--- a/arch/blackfin/include/asm/bfin-global.h
+++ b/arch/blackfin/include/asm/bfin-global.h
@@ -47,6 +47,9 @@
 # define DMA_UNCACHED_REGION (0)
 #endif
 
+extern void bfin_setup_caches(unsigned int cpu);
+extern void bfin_setup_cpudata(unsigned int cpu);
+
 extern unsigned long get_cclk(void);
 extern unsigned long get_sclk(void);
 extern unsigned long sclk_to_usecs(unsigned long sclk);
@@ -58,8 +61,6 @@
 
 /* init functions only */
 extern int init_arch_irq(void);
-extern void bfin_icache_init(void);
-extern void bfin_dcache_init(void);
 extern void init_exception_vectors(void);
 extern void program_IAR(void);
 
@@ -110,7 +111,7 @@
 
 #ifdef CONFIG_BFIN_ICACHE_LOCK
 extern void cache_grab_lock(int way);
-extern void cache_lock(int way);
+extern void bfin_cache_lock(int way);
 #endif
 
 #endif
diff --git a/arch/blackfin/include/asm/bfin5xx_spi.h b/arch/blackfin/include/asm/bfin5xx_spi.h
index 9fa1915..1306e6b 100644
--- a/arch/blackfin/include/asm/bfin5xx_spi.h
+++ b/arch/blackfin/include/asm/bfin5xx_spi.h
@@ -1,22 +1,12 @@
-/************************************************************
-
-* Copyright (C) 2006-2008, Analog Devices. All Rights Reserved
-*
-* FILE bfin5xx_spi.h
-* PROGRAMMER(S): Luke Yang (Analog Devices Inc.)
-*
-*
-* DATE OF CREATION: March. 10th 2006
-*
-* SYNOPSIS:
-*
-* DESCRIPTION: header file for SPI controller driver for Blackfin5xx.
-**************************************************************
-
-* MODIFICATION HISTORY:
-* March 10, 2006  bfin5xx_spi.h Created. (Luke Yang)
-
-************************************************************/
+/*
+ * Blackfin On-Chip SPI Driver
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
 
 #ifndef _SPI_CHANNEL_H_
 #define _SPI_CHANNEL_H_
diff --git a/arch/blackfin/include/asm/bfin_sdh.h b/arch/blackfin/include/asm/bfin_sdh.h
new file mode 100644
index 0000000..d61d549
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_sdh.h
@@ -0,0 +1,19 @@
+/*
+ * bfin_sdh.h - Blackfin SDH definitions
+ *
+ * Copyright 2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_SDH_H__
+#define __BFIN_SDH_H__
+
+struct bfin_sd_host {
+	int dma_chan;
+	int irq_int0;
+	int irq_int1;
+	u16 pin_req[7];
+};
+
+#endif
diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h
index c76ed8d..fe88a2c 100644
--- a/arch/blackfin/include/asm/bfin_sport.h
+++ b/arch/blackfin/include/asm/bfin_sport.h
@@ -120,9 +120,6 @@
 #define SPORT_IOC_MAGIC		'P'
 #define SPORT_IOC_CONFIG	_IOWR('P', 0x01, struct sport_config)
 
-/* Test purpose */
-#define ENABLE_AD73311		_IOWR('P', 0x02, int)
-
 struct sport_dev {
 	struct cdev cdev;	/* Char device structure */
 
diff --git a/arch/blackfin/include/asm/bfrom.h b/arch/blackfin/include/asm/bfrom.h
index cfe8024..9e4be5e5 100644
--- a/arch/blackfin/include/asm/bfrom.h
+++ b/arch/blackfin/include/asm/bfrom.h
@@ -43,6 +43,11 @@
 static inline void bfrom_SoftReset(void *new_stack)
 {
 	while (1)
+		/*
+		 * We don't declare the SP as clobbered on purpose, since
+		 * it confuses the heck out of the compiler, and this function
+		 * never returns
+		 */
 		__asm__ __volatile__(
 			"sp = %[stack];"
 			"jump (%[bfrom_syscontrol]);"
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index c428e41..21b036e 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -7,7 +7,6 @@
 
 #include <linux/compiler.h>
 #include <asm/byteorder.h>	/* swab32 */
-#include <asm/system.h>		/* save_flags */
 
 #ifdef __KERNEL__
 
@@ -20,96 +19,192 @@
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/ffz.h>
 
-static __inline__ void set_bit(int nr, volatile unsigned long *addr)
+#ifdef CONFIG_SMP
+
+#include <linux/linkage.h>
+
+asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_clear_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_toggle_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_test_set_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr);
+
+asmlinkage int __raw_bit_test_asm(const volatile unsigned long *addr, int nr);
+
+static inline void set_bit(int nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr + (nr >> 5);
+	__raw_bit_set_asm(a, nr & 0x1f);
+}
+
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr + (nr >> 5);
+	__raw_bit_clear_asm(a, nr & 0x1f);
+}
+
+static inline void change_bit(int nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr + (nr >> 5);
+	__raw_bit_toggle_asm(a, nr & 0x1f);
+}
+
+static inline int test_bit(int nr, const volatile unsigned long *addr)
+{
+	volatile const unsigned long *a = addr + (nr >> 5);
+	return __raw_bit_test_asm(a, nr & 0x1f) != 0;
+}
+
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr + (nr >> 5);
+	return __raw_bit_test_set_asm(a, nr & 0x1f);
+}
+
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr + (nr >> 5);
+	return __raw_bit_test_clear_asm(a, nr & 0x1f);
+}
+
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+	volatile unsigned long *a = addr + (nr >> 5);
+	return __raw_bit_test_toggle_asm(a, nr & 0x1f);
+}
+
+#else /* !CONFIG_SMP */
+
+#include <asm/system.h>		/* save_flags */
+
+static inline void set_bit(int nr, volatile unsigned long *addr)
 {
 	int *a = (int *)addr;
 	int mask;
 	unsigned long flags;
-
 	a += nr >> 5;
 	mask = 1 << (nr & 0x1f);
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	*a |= mask;
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 }
 
-static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
+static inline void clear_bit(int nr, volatile unsigned long *addr)
 {
 	int *a = (int *)addr;
 	int mask;
+	unsigned long flags;
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save_hw(flags);
+	*a &= ~mask;
+	local_irq_restore_hw(flags);
+}
+
+static inline void change_bit(int nr, volatile unsigned long *addr)
+{
+	int mask, flags;
+	unsigned long *ADDR = (unsigned long *)addr;
+
+	ADDR += nr >> 5;
+	mask = 1 << (nr & 31);
+	local_irq_save_hw(flags);
+	*ADDR ^= mask;
+	local_irq_restore_hw(flags);
+}
+
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+	int mask, retval;
+	volatile unsigned int *a = (volatile unsigned int *)addr;
+	unsigned long flags;
 
 	a += nr >> 5;
 	mask = 1 << (nr & 0x1f);
+	local_irq_save_hw(flags);
+	retval = (mask & *a) != 0;
 	*a |= mask;
+	local_irq_restore_hw(flags);
+
+	return retval;
 }
 
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+	int mask, retval;
+	volatile unsigned int *a = (volatile unsigned int *)addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save_hw(flags);
+	retval = (mask & *a) != 0;
+	*a &= ~mask;
+	local_irq_restore_hw(flags);
+
+	return retval;
+}
+
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+	int mask, retval;
+	volatile unsigned int *a = (volatile unsigned int *)addr;
+	unsigned long flags;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	local_irq_save_hw(flags);
+	retval = (mask & *a) != 0;
+	*a ^= mask;
+	local_irq_restore_hw(flags);
+	return retval;
+}
+
+#endif /* CONFIG_SMP */
+
 /*
  * clear_bit() doesn't provide any barrier for the compiler.
  */
 #define smp_mb__before_clear_bit()	barrier()
 #define smp_mb__after_clear_bit()	barrier()
 
-static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
-{
-	int *a = (int *)addr;
-	int mask;
-	unsigned long flags;
-	a += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	local_irq_save(flags);
-	*a &= ~mask;
-	local_irq_restore(flags);
-}
-
-static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
+static inline void __set_bit(int nr, volatile unsigned long *addr)
 {
 	int *a = (int *)addr;
 	int mask;
 
 	a += nr >> 5;
 	mask = 1 << (nr & 0x1f);
-	*a &= ~mask;
-}
-
-static __inline__ void change_bit(int nr, volatile unsigned long *addr)
-{
-	int mask, flags;
-	unsigned long *ADDR = (unsigned long *)addr;
-
-	ADDR += nr >> 5;
-	mask = 1 << (nr & 31);
-	local_irq_save(flags);
-	*ADDR ^= mask;
-	local_irq_restore(flags);
-}
-
-static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
-{
-	int mask;
-	unsigned long *ADDR = (unsigned long *)addr;
-
-	ADDR += nr >> 5;
-	mask = 1 << (nr & 31);
-	*ADDR ^= mask;
-}
-
-static __inline__ int test_and_set_bit(int nr, void *addr)
-{
-	int mask, retval;
-	volatile unsigned int *a = (volatile unsigned int *)addr;
-	unsigned long flags;
-
-	a += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	local_irq_save(flags);
-	retval = (mask & *a) != 0;
 	*a |= mask;
-	local_irq_restore(flags);
-
-	return retval;
 }
 
-static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline void __clear_bit(int nr, volatile unsigned long *addr)
+{
+	int *a = (int *)addr;
+	int mask;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+	*a &= ~mask;
+}
+
+static inline void __change_bit(int nr, volatile unsigned long *addr)
+{
+	int mask;
+	unsigned long *ADDR = (unsigned long *)addr;
+
+	ADDR += nr >> 5;
+	mask = 1 << (nr & 31);
+	*ADDR ^= mask;
+}
+
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
 {
 	int mask, retval;
 	volatile unsigned int *a = (volatile unsigned int *)addr;
@@ -121,23 +216,7 @@
 	return retval;
 }
 
-static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
-	int mask, retval;
-	volatile unsigned int *a = (volatile unsigned int *)addr;
-	unsigned long flags;
-
-	a += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	local_irq_save(flags);
-	retval = (mask & *a) != 0;
-	*a &= ~mask;
-	local_irq_restore(flags);
-
-	return retval;
-}
-
-static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
 	int mask, retval;
 	volatile unsigned int *a = (volatile unsigned int *)addr;
@@ -149,22 +228,7 @@
 	return retval;
 }
 
-static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
-{
-	int mask, retval;
-	volatile unsigned int *a = (volatile unsigned int *)addr;
-	unsigned long flags;
-
-	a += nr >> 5;
-	mask = 1 << (nr & 0x1f);
-	local_irq_save(flags);
-	retval = (mask & *a) != 0;
-	*a ^= mask;
-	local_irq_restore(flags);
-	return retval;
-}
-
-static __inline__ int __test_and_change_bit(int nr,
+static inline int __test_and_change_bit(int nr,
 					    volatile unsigned long *addr)
 {
 	int mask, retval;
@@ -177,16 +241,7 @@
 	return retval;
 }
 
-/*
- * This routine doesn't need to be atomic.
- */
-static __inline__ int __constant_test_bit(int nr, const void *addr)
-{
-	return ((1UL << (nr & 31)) &
-		(((const volatile unsigned int *)addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int __test_bit(int nr, const void *addr)
+static inline int __test_bit(int nr, const void *addr)
 {
 	int *a = (int *)addr;
 	int mask;
@@ -196,10 +251,16 @@
 	return ((mask & *a) != 0);
 }
 
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- __constant_test_bit((nr),(addr)) : \
- __test_bit((nr),(addr)))
+#ifndef CONFIG_SMP
+/*
+ * This routine doesn't need irq save and restore ops in UP
+ * context.
+ */
+static inline int test_bit(int nr, const void *addr)
+{
+	return __test_bit(nr, addr);
+}
+#endif
 
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/hweight.h>
diff --git a/arch/blackfin/include/asm/blackfin.h b/arch/blackfin/include/asm/blackfin.h
index 8749b0e..8bb2cb1 100644
--- a/arch/blackfin/include/asm/blackfin.h
+++ b/arch/blackfin/include/asm/blackfin.h
@@ -6,11 +6,6 @@
 #ifndef _BLACKFIN_H_
 #define _BLACKFIN_H_
 
-#define LO(con32) ((con32) & 0xFFFF)
-#define lo(con32) ((con32) & 0xFFFF)
-#define HI(con32) (((con32) >> 16) & 0xFFFF)
-#define hi(con32) (((con32) >> 16) & 0xFFFF)
-
 #include <mach/anomaly.h>
 
 #ifndef __ASSEMBLY__
@@ -65,6 +60,11 @@
 
 #else  /* __ASSEMBLY__ */
 
+#define LO(con32) ((con32) & 0xFFFF)
+#define lo(con32) ((con32) & 0xFFFF)
+#define HI(con32) (((con32) >> 16) & 0xFFFF)
+#define hi(con32) (((con32) >> 16) & 0xFFFF)
+
 /* SSYNC & CSYNC implementations for assembly files */
 
 #define ssync(x) SSYNC(x)
diff --git a/arch/blackfin/include/asm/byteorder.h b/arch/blackfin/include/asm/byteorder.h
index b9e797a..3e69106 100644
--- a/arch/blackfin/include/asm/byteorder.h
+++ b/arch/blackfin/include/asm/byteorder.h
@@ -1,7 +1,6 @@
 #ifndef _BLACKFIN_BYTEORDER_H
 #define _BLACKFIN_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/little_endian.h>
 
 #endif				/* _BLACKFIN_BYTEORDER_H */
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index 023d721..8663781 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -12,6 +12,11 @@
 #define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
 #define SMP_CACHE_BYTES	L1_CACHE_BYTES
 
+#ifdef CONFIG_SMP
+#define __cacheline_aligned
+#else
+#define ____cacheline_aligned
+
 /*
  * Put cacheline_aliged data to L1 data memory
  */
@@ -21,9 +26,33 @@
 		__section__(".data_l1.cacheline_aligned")))
 #endif
 
+#endif
+
 /*
  * largest L1 which this arch supports
  */
 #define L1_CACHE_SHIFT_MAX	5
 
+#if defined(CONFIG_SMP) && \
+    !defined(CONFIG_BFIN_CACHE_COHERENT) && \
+    defined(CONFIG_BFIN_DCACHE)
+#define __ARCH_SYNC_CORE_DCACHE
+#ifndef __ASSEMBLY__
+asmlinkage void __raw_smp_mark_barrier_asm(void);
+asmlinkage void __raw_smp_check_barrier_asm(void);
+
+static inline void smp_mark_barrier(void)
+{
+	__raw_smp_mark_barrier_asm();
+}
+static inline void smp_check_barrier(void)
+{
+	__raw_smp_check_barrier_asm();
+}
+
+void resync_core_dcache(void);
+#endif
+#endif
+
+
 #endif
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 4403415..1b040f5 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -35,6 +35,7 @@
 extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
 extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
 extern void blackfin_dflush_page(void *page);
+extern void blackfin_invalidate_entire_dcache(void);
 
 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
@@ -44,12 +45,20 @@
 #define flush_cache_vmap(start, end)		do { } while (0)
 #define flush_cache_vunmap(start, end)		do { } while (0)
 
+#ifdef CONFIG_SMP
+#define flush_icache_range_others(start, end)	\
+	smp_icache_flush_range_others((start), (end))
+#else
+#define flush_icache_range_others(start, end)	do { } while (0)
+#endif
+
 static inline void flush_icache_range(unsigned start, unsigned end)
 {
 #if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_ICACHE)
 
 # if defined(CONFIG_BFIN_WT)
 	blackfin_icache_flush_range((start), (end));
+	flush_icache_range_others(start, end);
 # else
 	blackfin_icache_dcache_flush_range((start), (end));
 # endif
@@ -58,6 +67,7 @@
 
 # if defined(CONFIG_BFIN_ICACHE)
 	blackfin_icache_flush_range((start), (end));
+	flush_icache_range_others(start, end);
 # endif
 # if defined(CONFIG_BFIN_DCACHE)
 	blackfin_dcache_flush_range((start), (end));
@@ -66,10 +76,12 @@
 #endif
 }
 
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
-     flush_icache_range ((unsigned) (dst), (unsigned) (dst) + (len)); \
+#define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
+do { memcpy(dst, src, len);						\
+     flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len));	\
+     flush_icache_range_others((unsigned long) (dst), (unsigned long) (dst) + (len));\
 } while (0)
+
 #define copy_from_user_page(vma, page, vaddr, dst, src, len)	memcpy(dst, src, len)
 
 #if defined(CONFIG_BFIN_DCACHE)
@@ -82,7 +94,7 @@
 # define flush_dcache_page(page)			blackfin_dflush_page(page_address(page))
 #else
 # define flush_dcache_range(start,end)		do { } while (0)
-# define flush_dcache_page(page)			do { } while (0)
+# define flush_dcache_page(page)		do { } while (0)
 #endif
 
 extern unsigned long reserved_mem_dcache_on;
diff --git a/arch/blackfin/include/asm/checksum.h b/arch/blackfin/include/asm/checksum.h
index 6f6af2b..f67289a 100644
--- a/arch/blackfin/include/asm/checksum.h
+++ b/arch/blackfin/include/asm/checksum.h
@@ -78,7 +78,8 @@
                  "%0 = %0 + %4;\n\t"
                  "NOP;\n\t"
  		 : "=d" (sum)
-		 : "d" (daddr), "d" (saddr), "d" ((ntohs(len)<<16)+proto*256), "d" (1), "0"(sum));
+		 : "d" (daddr), "d" (saddr), "d" ((ntohs(len)<<16)+proto*256), "d" (1), "0"(sum)
+		 : "CC");
 
 	return (sum);
 }
diff --git a/arch/blackfin/include/asm/context.S b/arch/blackfin/include/asm/context.S
index c0e630e..16561ab 100644
--- a/arch/blackfin/include/asm/context.S
+++ b/arch/blackfin/include/asm/context.S
@@ -303,9 +303,14 @@
 	RETI = [sp++];
 	RETS = [sp++];
 
-	p0.h = _irq_flags;
-	p0.l = _irq_flags;
+#ifdef CONFIG_SMP
+	GET_PDA(p0, r0);
+	r0 = [p0 + PDA_IRQFLAGS];
+#else
+	p0.h = _bfin_irq_flags;
+	p0.l = _bfin_irq_flags;
 	r0 = [p0];
+#endif
 	sti r0;
 
 	sp += 4;	/* Skip Reserved */
@@ -353,3 +358,41 @@
 	csync;
 .endm
 
+.macro save_context_cplb
+	[--sp] = (R7:0, P5:0);
+	[--sp] = fp;
+
+	[--sp] = a0.x;
+	[--sp] = a0.w;
+	[--sp] = a1.x;
+	[--sp] = a1.w;
+
+	[--sp] = LC0;
+	[--sp] = LC1;
+	[--sp] = LT0;
+	[--sp] = LT1;
+	[--sp] = LB0;
+	[--sp] = LB1;
+
+	[--sp] = RETS;
+.endm
+
+.macro restore_context_cplb
+	RETS = [sp++];
+
+	LB1 = [sp++];
+	LB0 = [sp++];
+	LT1 = [sp++];
+	LT0 = [sp++];
+	LC1 = [sp++];
+	LC0 = [sp++];
+
+	a1.w = [sp++];
+	a1.x = [sp++];
+	a0.w = [sp++];
+	a0.x = [sp++];
+
+	fp = [sp++];
+
+	(R7:0, P5:0) = [SP++];
+.endm
diff --git a/arch/blackfin/include/asm/cplb-mpu.h b/arch/blackfin/include/asm/cplb-mpu.h
deleted file mode 100644
index 75c67b9..0000000
--- a/arch/blackfin/include/asm/cplb-mpu.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * File:         include/asm-blackfin/cplbinit.h
- * Based on:
- * Author:
- *
- * Created:
- * Description:
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef __ASM_BFIN_CPLB_MPU_H
-#define __ASM_BFIN_CPLB_MPU_H
-
-struct cplb_entry {
-	unsigned long data, addr;
-};
-
-struct mem_region {
-	unsigned long start, end;
-	unsigned long dcplb_data;
-	unsigned long icplb_data;
-};
-
-extern struct cplb_entry dcplb_tbl[MAX_CPLBS];
-extern struct cplb_entry icplb_tbl[MAX_CPLBS];
-extern int first_switched_icplb;
-extern int first_mask_dcplb;
-extern int first_switched_dcplb;
-
-extern int nr_dcplb_miss, nr_icplb_miss, nr_icplb_supv_miss, nr_dcplb_prot;
-extern int nr_cplb_flush;
-
-extern int page_mask_order;
-extern int page_mask_nelts;
-
-extern unsigned long *current_rwx_mask;
-
-extern void flush_switched_cplbs(void);
-extern void set_mask_dcplbs(unsigned long *);
-
-extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *);
-
-#endif /* __ASM_BFIN_CPLB_MPU_H */
diff --git a/arch/blackfin/include/asm/cplb.h b/arch/blackfin/include/asm/cplb.h
index 9e8b403..ad566ff 100644
--- a/arch/blackfin/include/asm/cplb.h
+++ b/arch/blackfin/include/asm/cplb.h
@@ -30,7 +30,6 @@
 #ifndef _CPLB_H
 #define _CPLB_H
 
-#include <asm/blackfin.h>
 #include <mach/anomaly.h>
 
 #define SDRAM_IGENERIC    (CPLB_L1_CHBL | CPLB_USER_RD | CPLB_VALID | CPLB_PORTPRIO)
@@ -55,13 +54,24 @@
 #endif
 
 #define L1_DMEMORY       (CPLB_LOCK | CPLB_COMMON)
+
+#ifdef CONFIG_SMP
+#define L2_ATTR           (INITIAL_T | I_CPLB | D_CPLB)
+#define L2_IMEMORY         (CPLB_COMMON | CPLB_LOCK)
+#define L2_DMEMORY         (CPLB_COMMON | CPLB_LOCK)
+
+#else
 #ifdef CONFIG_BFIN_L2_CACHEABLE
 #define L2_IMEMORY        (SDRAM_IGENERIC)
 #define L2_DMEMORY        (SDRAM_DGENERIC)
 #else
 #define L2_IMEMORY        (CPLB_COMMON)
 #define L2_DMEMORY        (CPLB_COMMON)
-#endif
+#endif /* CONFIG_BFIN_L2_CACHEABLE */
+
+#define L2_ATTR           (INITIAL_T | SWITCH_T | I_CPLB | D_CPLB)
+#endif /* CONFIG_SMP */
+
 #define SDRAM_DNON_CHBL  (CPLB_COMMON)
 #define SDRAM_EBIU       (CPLB_COMMON)
 #define SDRAM_OOPS       (CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY)
@@ -71,14 +81,7 @@
 #define SIZE_1M 0x00100000      /* 1M */
 #define SIZE_4M 0x00400000      /* 4M */
 
-#ifdef CONFIG_MPU
 #define MAX_CPLBS 16
-#else
-#define MAX_CPLBS (16 * 2)
-#endif
-
-#define ASYNC_MEMORY_CPLB_COVERAGE	((ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
-				 ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) / SIZE_4M)
 
 #define CPLB_ENABLE_ICACHE_P	0
 #define CPLB_ENABLE_DCACHE_P	1
@@ -113,4 +116,8 @@
 #define CPLB_INOCACHE   	CPLB_USER_RD | CPLB_VALID
 #define CPLB_IDOCACHE   	CPLB_INOCACHE | CPLB_L1_CHBL
 
+#define FAULT_RW        (1 << 16)
+#define FAULT_USERSUPV  (1 << 17)
+#define FAULT_CPLBBITS  0x0000ffff
+
 #endif				/* _CPLB_H */
diff --git a/arch/blackfin/include/asm/cplbinit.h b/arch/blackfin/include/asm/cplbinit.h
index f845b41..05b14a6 100644
--- a/arch/blackfin/include/asm/cplbinit.h
+++ b/arch/blackfin/include/asm/cplbinit.h
@@ -32,61 +32,56 @@
 
 #include <asm/blackfin.h>
 #include <asm/cplb.h>
+#include <linux/threads.h>
+
+#ifdef CONFIG_CPLB_SWITCH_TAB_L1
+# define PDT_ATTR __attribute__((l1_data))
+#else
+# define PDT_ATTR
+#endif
+
+struct cplb_entry {
+	unsigned long data, addr;
+};
+
+struct cplb_boundary {
+	unsigned long eaddr; /* End of this region.  */
+	unsigned long data; /* CPLB data value.  */
+};
+
+extern struct cplb_boundary dcplb_bounds[];
+extern struct cplb_boundary icplb_bounds[];
+extern int dcplb_nr_bounds, icplb_nr_bounds;
+
+extern struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS];
+extern struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS];
+extern int first_switched_icplb;
+extern int first_switched_dcplb;
+
+extern int nr_dcplb_miss[], nr_icplb_miss[], nr_icplb_supv_miss[];
+extern int nr_dcplb_prot[], nr_cplb_flush[];
 
 #ifdef CONFIG_MPU
 
-#include <asm/cplb-mpu.h>
+extern int first_mask_dcplb;
 
-#else
+extern int page_mask_order;
+extern int page_mask_nelts;
 
-#define INITIAL_T 0x1
-#define SWITCH_T  0x2
-#define I_CPLB    0x4
-#define D_CPLB    0x8
+extern unsigned long *current_rwx_mask[NR_CPUS];
 
-#define IN_KERNEL 1
+extern void flush_switched_cplbs(unsigned int);
+extern void set_mask_dcplbs(unsigned long *, unsigned int);
 
-enum
-{ZERO_P, L1I_MEM, L1D_MEM, SDRAM_KERN , SDRAM_RAM_MTD, SDRAM_DMAZ, RES_MEM, ASYNC_MEM, L2_MEM};
-
-struct cplb_desc {
-	u32 start; /* start address */
-	u32 end; /* end address */
-	u32 psize; /* prefered size if any otherwise 1MB or 4MB*/
-	u16 attr;/* attributes */
-	u16 i_conf;/* I-CPLB DATA */
-	u16 d_conf;/* D-CPLB DATA */
-	u16 valid;/* valid */
-	const s8 name[30];/* name */
-};
-
-struct cplb_tab {
-  u_long *tab;
-	u16 pos;
-	u16 size;
-};
-
-extern u_long icplb_table[];
-extern u_long dcplb_table[];
-
-/* Till here we are discussing about the static memory management model.
- * However, the operating envoronments commonly define more CPLB
- * descriptors to cover the entire addressable memory than will fit into
- * the available on-chip 16 CPLB MMRs. When this happens, the below table
- * will be used which will hold all the potentially required CPLB descriptors
- *
- * This is how Page descriptor Table is implemented in uClinux/Blackfin.
- */
-
-extern u_long ipdt_table[];
-extern u_long dpdt_table[];
-#ifdef CONFIG_CPLB_INFO
-extern u_long ipdt_swapcount_table[];
-extern u_long dpdt_swapcount_table[];
-#endif
+extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *);
 
 #endif /* CONFIG_MPU */
 
-extern void generate_cplb_tables(void);
+extern void bfin_icache_init(struct cplb_entry *icplb_tbl);
+extern void bfin_dcache_init(struct cplb_entry *icplb_tbl);
 
+#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
+extern void generate_cplb_tables_all(void);
+extern void generate_cplb_tables_cpu(unsigned int cpu);
+#endif
 #endif
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h
new file mode 100644
index 0000000..c2594ef
--- /dev/null
+++ b/arch/blackfin/include/asm/cpu.h
@@ -0,0 +1,41 @@
+/*
+ * File:         arch/blackfin/include/asm/cpu.h.
+ * Author:       Philippe Gerum <rpm@xenomai.org>
+ *
+ *               Copyright 2007 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef __ASM_BLACKFIN_CPU_H
+#define __ASM_BLACKFIN_CPU_H
+
+#include <linux/percpu.h>
+
+struct task_struct;
+
+struct blackfin_cpudata {
+	struct cpu cpu;
+	struct task_struct *idle;
+	unsigned int imemctl;
+	unsigned int dmemctl;
+	unsigned long loops_per_jiffy;
+	unsigned long dcache_invld_count;
+};
+
+DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
+
+#endif
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index 6509733..e4f7b80 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -1,44 +1,17 @@
 /*
- * File:         include/asm-blackfin/simple_bf533_dma.h
- * Based on:     none - original work
- * Author:       LG Soft India
- *               Copyright (C) 2004-2005 Analog Devices Inc.
- * Created:      Tue Sep 21 2004
- * Description:  This file contains the major Data structures and constants
- * 		 used for DMA Implementation in BF533
- * Modified:
+ * dma.h - Blackfin DMA defines/structures/etc...
  *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Copyright 2004-2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _BLACKFIN_DMA_H_
 #define _BLACKFIN_DMA_H_
 
-#include <asm/io.h>
-#include <linux/slab.h>
-#include <asm/irq.h>
-#include <asm/signal.h>
-
-#include <linux/kernel.h>
-#include <mach/dma.h>
-#include <linux/mm.h>
 #include <linux/interrupt.h>
+#include <mach/dma.h>
 #include <asm/blackfin.h>
+#include <asm/page.h>
 
 #define MAX_DMA_ADDRESS PAGE_OFFSET
 
@@ -79,7 +52,7 @@
 #define DMA_SYNC_RESTART	1
 
 struct dmasg {
-	unsigned long next_desc_addr;
+	void *next_desc_addr;
 	unsigned long start_addr;
 	unsigned short cfg;
 	unsigned short x_count;
@@ -89,7 +62,7 @@
 } __attribute__((packed));
 
 struct dma_register {
-	unsigned long next_desc_ptr;	/* DMA Next Descriptor Pointer register */
+	void *next_desc_ptr;	/* DMA Next Descriptor Pointer register */
 	unsigned long start_addr;	/* DMA Start address  register */
 
 	unsigned short cfg;	/* DMA Configuration register */
@@ -109,7 +82,7 @@
 	short y_modify;	/* DMA y_modify register */
 	unsigned short dummy5;
 
-	unsigned long curr_desc_ptr;	/* DMA Current Descriptor Pointer
+	void *curr_desc_ptr;	/* DMA Current Descriptor Pointer
 					   register */
 	unsigned long curr_addr_ptr;	/* DMA Current Address Pointer
 						   register */
@@ -131,19 +104,15 @@
 
 };
 
-typedef irqreturn_t(*dma_interrupt_t) (int irq, void *dev_id);
-
+struct mutex;
 struct dma_channel {
 	struct mutex dmalock;
-	char *device_id;
+	const char *device_id;
 	enum dma_chan_status chan_status;
-	struct dma_register *regs;
+	volatile struct dma_register *regs;
 	struct dmasg *sg;		/* large mode descriptor */
-	unsigned int ctrl_num;	/* controller number */
-	dma_interrupt_t irq_callback;
+	unsigned int irq;
 	void *data;
-	unsigned int dma_enable_flag;
-	unsigned int loopback_flag;
 #ifdef CONFIG_PM
 	unsigned short saved_peripheral_map;
 #endif
@@ -157,49 +126,132 @@
 /*******************************************************************************
 *	DMA API's
 *******************************************************************************/
-/* functions to set register mode */
-void set_dma_start_addr(unsigned int channel, unsigned long addr);
-void set_dma_next_desc_addr(unsigned int channel, unsigned long addr);
-void set_dma_curr_desc_addr(unsigned int channel, unsigned long addr);
-void set_dma_x_count(unsigned int channel, unsigned short x_count);
-void set_dma_x_modify(unsigned int channel, short x_modify);
-void set_dma_y_count(unsigned int channel, unsigned short y_count);
-void set_dma_y_modify(unsigned int channel, short y_modify);
-void set_dma_config(unsigned int channel, unsigned short config);
-unsigned short set_bfin_dma_config(char direction, char flow_mode,
-				   char intr_mode, char dma_mode, char width,
-				   char syncmode);
-void set_dma_curr_addr(unsigned int channel, unsigned long addr);
+extern struct dma_channel dma_ch[MAX_DMA_CHANNELS];
+extern struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS];
+extern int channel2irq(unsigned int channel);
 
-/* get curr status for polling */
-unsigned short get_dma_curr_irqstat(unsigned int channel);
-unsigned short get_dma_curr_xcount(unsigned int channel);
-unsigned short get_dma_curr_ycount(unsigned int channel);
-unsigned long get_dma_next_desc_ptr(unsigned int channel);
-unsigned long get_dma_curr_desc_ptr(unsigned int channel);
-unsigned long get_dma_curr_addr(unsigned int channel);
+static inline void set_dma_start_addr(unsigned int channel, unsigned long addr)
+{
+	dma_ch[channel].regs->start_addr = addr;
+}
+static inline void set_dma_next_desc_addr(unsigned int channel, void *addr)
+{
+	dma_ch[channel].regs->next_desc_ptr = addr;
+}
+static inline void set_dma_curr_desc_addr(unsigned int channel, void *addr)
+{
+	dma_ch[channel].regs->curr_desc_ptr = addr;
+}
+static inline void set_dma_x_count(unsigned int channel, unsigned short x_count)
+{
+	dma_ch[channel].regs->x_count = x_count;
+}
+static inline void set_dma_y_count(unsigned int channel, unsigned short y_count)
+{
+	dma_ch[channel].regs->y_count = y_count;
+}
+static inline void set_dma_x_modify(unsigned int channel, short x_modify)
+{
+	dma_ch[channel].regs->x_modify = x_modify;
+}
+static inline void set_dma_y_modify(unsigned int channel, short y_modify)
+{
+	dma_ch[channel].regs->y_modify = y_modify;
+}
+static inline void set_dma_config(unsigned int channel, unsigned short config)
+{
+	dma_ch[channel].regs->cfg = config;
+}
+static inline void set_dma_curr_addr(unsigned int channel, unsigned long addr)
+{
+	dma_ch[channel].regs->curr_addr_ptr = addr;
+}
 
-/* set large DMA mode descriptor */
-void set_dma_sg(unsigned int channel, struct dmasg *sg, int nr_sg);
+static inline unsigned short
+set_bfin_dma_config(char direction, char flow_mode,
+		    char intr_mode, char dma_mode, char width, char syncmode)
+{
+	return (direction << 1) | (width << 2) | (dma_mode << 4) |
+		(intr_mode << 6) | (flow_mode << 12) | (syncmode << 5);
+}
 
-/* check if current channel is in use */
-int dma_channel_active(unsigned int channel);
+static inline unsigned short get_dma_curr_irqstat(unsigned int channel)
+{
+	return dma_ch[channel].regs->irq_status;
+}
+static inline unsigned short get_dma_curr_xcount(unsigned int channel)
+{
+	return dma_ch[channel].regs->curr_x_count;
+}
+static inline unsigned short get_dma_curr_ycount(unsigned int channel)
+{
+	return dma_ch[channel].regs->curr_y_count;
+}
+static inline void *get_dma_next_desc_ptr(unsigned int channel)
+{
+	return dma_ch[channel].regs->next_desc_ptr;
+}
+static inline void *get_dma_curr_desc_ptr(unsigned int channel)
+{
+	return dma_ch[channel].regs->curr_desc_ptr;
+}
+static inline unsigned short get_dma_config(unsigned int channel)
+{
+	return dma_ch[channel].regs->cfg;
+}
+static inline unsigned long get_dma_curr_addr(unsigned int channel)
+{
+	return dma_ch[channel].regs->curr_addr_ptr;
+}
 
-/* common functions must be called in any mode */
+static inline void set_dma_sg(unsigned int channel, struct dmasg *sg, int ndsize)
+{
+	dma_ch[channel].regs->cfg =
+		(dma_ch[channel].regs->cfg & ~(0xf << 8)) |
+		((ndsize & 0xf) << 8);
+	dma_ch[channel].regs->next_desc_ptr = sg;
+}
+
+static inline int dma_channel_active(unsigned int channel)
+{
+	if (dma_ch[channel].chan_status == DMA_CHANNEL_FREE)
+		return 0;
+	else
+		return 1;
+}
+
+static inline void disable_dma(unsigned int channel)
+{
+	dma_ch[channel].regs->cfg &= ~DMAEN;
+	SSYNC();
+	dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED;
+}
+static inline void enable_dma(unsigned int channel)
+{
+	dma_ch[channel].regs->curr_x_count = 0;
+	dma_ch[channel].regs->curr_y_count = 0;
+	dma_ch[channel].regs->cfg |= DMAEN;
+	dma_ch[channel].chan_status = DMA_CHANNEL_ENABLED;
+}
 void free_dma(unsigned int channel);
-int dma_channel_active(unsigned int channel); /* check if a channel is in use */
-void disable_dma(unsigned int channel);
-void enable_dma(unsigned int channel);
-int request_dma(unsigned int channel, char *device_id);
-int set_dma_callback(unsigned int channel, dma_interrupt_t callback,
-		     void *data);
-void dma_disable_irq(unsigned int channel);
-void dma_enable_irq(unsigned int channel);
-void clear_dma_irqstat(unsigned int channel);
+int request_dma(unsigned int channel, const char *device_id);
+int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data);
+
+static inline void dma_disable_irq(unsigned int channel)
+{
+	disable_irq(dma_ch[channel].irq);
+}
+static inline void dma_enable_irq(unsigned int channel)
+{
+	enable_irq(dma_ch[channel].irq);
+}
+static inline void clear_dma_irqstat(unsigned int channel)
+{
+	dma_ch[channel].regs->irq_status = DMA_DONE | DMA_ERR;
+}
+
 void *dma_memcpy(void *dest, const void *src, size_t count);
 void *safe_dma_memcpy(void *dest, const void *src, size_t count);
-
-extern int channel2irq(unsigned int channel);
-extern struct dma_register *dma_io_base_addr[MAX_BLACKFIN_DMA_CHANNEL];
+void blackfin_dma_early_init(void);
 
 #endif
diff --git a/arch/blackfin/include/asm/entry.h b/arch/blackfin/include/asm/entry.h
index c4f721e..b30a296 100644
--- a/arch/blackfin/include/asm/entry.h
+++ b/arch/blackfin/include/asm/entry.h
@@ -27,6 +27,14 @@
 #define SAVE_ALL_SYS		save_context_no_interrupts
 /* This is used for all normal interrupts.  It saves a minimum of registers
    to the stack, loads the IRQ number, and jumps to common code.  */
+#ifdef CONFIG_IPIPE
+# define LOAD_IPIPE_IPEND \
+	P0.l = lo(IPEND); \
+	P0.h = hi(IPEND); \
+	R1 = [P0];
+#else
+# define LOAD_IPIPE_IPEND
+#endif
 #define INTERRUPT_ENTRY(N)						\
     [--sp] = SYSCFG;							\
 									\
@@ -34,6 +42,7 @@
     [--sp] = R0;	/*orig_r0*/					\
     [--sp] = (R7:0,P5:0);						\
     R0 = (N);								\
+    LOAD_IPIPE_IPEND							\
     jump __common_int_entry;
 
 /* For timer interrupts, we need to save IPEND, since the user_mode
@@ -53,9 +62,11 @@
 /* This one pushes RETI without using CLI.  Interrupts are enabled.  */
 #define SAVE_CONTEXT_SYSCALL	save_context_syscall
 #define SAVE_CONTEXT		save_context_with_interrupts
+#define SAVE_CONTEXT_CPLB	save_context_cplb
 
 #define RESTORE_ALL_SYS		restore_context_no_interrupts
 #define RESTORE_CONTEXT		restore_context_with_interrupts
+#define RESTORE_CONTEXT_CPLB	restore_context_cplb
 
 #endif				/* __ASSEMBLY__ */
 #endif				/* __BFIN_ENTRY_H */
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index ad33ac2..9477d82 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -84,11 +84,14 @@
 #ifndef __ARCH_BLACKFIN_GPIO_H__
 #define __ARCH_BLACKFIN_GPIO_H__
 
-#define gpio_bank(x) ((x) >> 4)
-#define gpio_bit(x)  (1<<((x) & 0xF))
-#define gpio_sub_n(x) ((x) & 0xF)
+#define gpio_bank(x) 	((x) >> 4)
+#define gpio_bit(x)  	(1<<((x) & 0xF))
+#define gpio_sub_n(x) 	((x) & 0xF)
 
-#define GPIO_BANKSIZE 16
+#define GPIO_BANKSIZE 	16
+#define GPIO_BANK_NUM 	DIV_ROUND_UP(MAX_BLACKFIN_GPIOS, GPIO_BANKSIZE)
+
+#include <mach/gpio.h>
 
 #define	GPIO_0	0
 #define	GPIO_1	1
@@ -139,151 +142,9 @@
 #define	GPIO_46	46
 #define	GPIO_47	47
 
-
 #define PERIPHERAL_USAGE 1
 #define GPIO_USAGE 0
 
-#ifdef BF533_FAMILY
-#define MAX_BLACKFIN_GPIOS 16
-
-#define	GPIO_PF0	0
-#define	GPIO_PF1	1
-#define	GPIO_PF2	2
-#define	GPIO_PF3	3
-#define	GPIO_PF4	4
-#define	GPIO_PF5	5
-#define	GPIO_PF6	6
-#define	GPIO_PF7	7
-#define	GPIO_PF8	8
-#define	GPIO_PF9	9
-#define	GPIO_PF10	10
-#define	GPIO_PF11	11
-#define	GPIO_PF12	12
-#define	GPIO_PF13	13
-#define	GPIO_PF14	14
-#define	GPIO_PF15	15
-
-#endif
-
-#if defined(BF527_FAMILY) || defined(BF537_FAMILY)
-#define MAX_BLACKFIN_GPIOS 48
-
-#define	GPIO_PF0	0
-#define	GPIO_PF1	1
-#define	GPIO_PF2	2
-#define	GPIO_PF3	3
-#define	GPIO_PF4	4
-#define	GPIO_PF5	5
-#define	GPIO_PF6	6
-#define	GPIO_PF7	7
-#define	GPIO_PF8	8
-#define	GPIO_PF9	9
-#define	GPIO_PF10	10
-#define	GPIO_PF11	11
-#define	GPIO_PF12	12
-#define	GPIO_PF13	13
-#define	GPIO_PF14	14
-#define	GPIO_PF15	15
-#define	GPIO_PG0	16
-#define	GPIO_PG1	17
-#define	GPIO_PG2	18
-#define	GPIO_PG3	19
-#define	GPIO_PG4	20
-#define	GPIO_PG5	21
-#define	GPIO_PG6	22
-#define	GPIO_PG7	23
-#define	GPIO_PG8	24
-#define	GPIO_PG9	25
-#define	GPIO_PG10      	26
-#define	GPIO_PG11      	27
-#define	GPIO_PG12      	28
-#define	GPIO_PG13      	29
-#define	GPIO_PG14      	30
-#define	GPIO_PG15      	31
-#define	GPIO_PH0	32
-#define	GPIO_PH1	33
-#define	GPIO_PH2	34
-#define	GPIO_PH3	35
-#define	GPIO_PH4	36
-#define	GPIO_PH5	37
-#define	GPIO_PH6	38
-#define	GPIO_PH7	39
-#define	GPIO_PH8	40
-#define	GPIO_PH9	41
-#define	GPIO_PH10      	42
-#define	GPIO_PH11      	43
-#define	GPIO_PH12      	44
-#define	GPIO_PH13      	45
-#define	GPIO_PH14      	46
-#define	GPIO_PH15      	47
-
-#define PORT_F GPIO_PF0
-#define PORT_G GPIO_PG0
-#define PORT_H GPIO_PH0
-
-#endif
-
-#ifdef BF548_FAMILY
-#include <mach/gpio.h>
-#endif
-
-#ifdef BF561_FAMILY
-#define MAX_BLACKFIN_GPIOS 48
-
-#define	GPIO_PF0	0
-#define	GPIO_PF1	1
-#define	GPIO_PF2	2
-#define	GPIO_PF3	3
-#define	GPIO_PF4	4
-#define	GPIO_PF5	5
-#define	GPIO_PF6	6
-#define	GPIO_PF7	7
-#define	GPIO_PF8	8
-#define	GPIO_PF9	9
-#define	GPIO_PF10	10
-#define	GPIO_PF11	11
-#define	GPIO_PF12	12
-#define	GPIO_PF13	13
-#define	GPIO_PF14	14
-#define	GPIO_PF15	15
-#define	GPIO_PF16	16
-#define	GPIO_PF17	17
-#define	GPIO_PF18	18
-#define	GPIO_PF19	19
-#define	GPIO_PF20	20
-#define	GPIO_PF21	21
-#define	GPIO_PF22	22
-#define	GPIO_PF23	23
-#define	GPIO_PF24	24
-#define	GPIO_PF25	25
-#define	GPIO_PF26	26
-#define	GPIO_PF27	27
-#define	GPIO_PF28	28
-#define	GPIO_PF29	29
-#define	GPIO_PF30	30
-#define	GPIO_PF31	31
-#define	GPIO_PF32	32
-#define	GPIO_PF33	33
-#define	GPIO_PF34	34
-#define	GPIO_PF35	35
-#define	GPIO_PF36	36
-#define	GPIO_PF37	37
-#define	GPIO_PF38	38
-#define	GPIO_PF39	39
-#define	GPIO_PF40	40
-#define	GPIO_PF41	41
-#define	GPIO_PF42	42
-#define	GPIO_PF43	43
-#define	GPIO_PF44	44
-#define	GPIO_PF45	45
-#define	GPIO_PF46	46
-#define	GPIO_PF47	47
-
-#define PORT_FIO0 GPIO_0
-#define PORT_FIO1 GPIO_16
-#define PORT_FIO2 GPIO_32
-#endif
-
 #ifndef __ASSEMBLY__
 
 /***********************************************************
@@ -425,20 +286,77 @@
 * MODIFICATION HISTORY :
 **************************************************************/
 
-int gpio_request(unsigned, const char *);
-void gpio_free(unsigned);
-
-void gpio_set_value(unsigned gpio, int arg);
-int gpio_get_value(unsigned gpio);
+int bfin_gpio_request(unsigned gpio, const char *label);
+void bfin_gpio_free(unsigned gpio);
+int bfin_gpio_irq_request(unsigned gpio, const char *label);
+void bfin_gpio_irq_free(unsigned gpio);
+int bfin_gpio_direction_input(unsigned gpio);
+int bfin_gpio_direction_output(unsigned gpio, int value);
+int bfin_gpio_get_value(unsigned gpio);
+void bfin_gpio_set_value(unsigned gpio, int value);
 
 #ifndef BF548_FAMILY
-#define gpio_set_value(gpio, value)	set_gpio_data(gpio, value)
+#define bfin_gpio_set_value(gpio, value)    set_gpio_data(gpio, value)
 #endif
 
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
+#ifdef CONFIG_GPIOLIB
+#include <asm-generic/gpio.h>		/* cansleep wrappers */
+
+static inline int gpio_get_value(unsigned int gpio)
+{
+	if (gpio < MAX_BLACKFIN_GPIOS)
+		return bfin_gpio_get_value(gpio);
+	else
+		return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned int gpio, int value)
+{
+	if (gpio < MAX_BLACKFIN_GPIOS)
+		bfin_gpio_set_value(gpio, value);
+	else
+		__gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned int gpio)
+{
+	return __gpio_cansleep(gpio);
+}
+
+#else /* !CONFIG_GPIOLIB */
+
+static inline int gpio_request(unsigned gpio, const char *label)
+{
+	return bfin_gpio_request(gpio, label);
+}
+
+static inline void gpio_free(unsigned gpio)
+{
+	return bfin_gpio_free(gpio);
+}
+
+static inline int gpio_direction_input(unsigned gpio)
+{
+	return bfin_gpio_direction_input(gpio);
+}
+
+static inline int gpio_direction_output(unsigned gpio, int value)
+{
+	return bfin_gpio_direction_output(gpio, value);
+}
+
+static inline int gpio_get_value(unsigned gpio)
+{
+	return bfin_gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+	return bfin_gpio_set_value(gpio, value);
+}
 
 #include <asm-generic/gpio.h>		/* cansleep wrappers */
+#endif	/* !CONFIG_GPIOLIB */
 #include <asm/irq.h>
 
 static inline int gpio_to_irq(unsigned gpio)
diff --git a/arch/blackfin/include/asm/hardirq.h b/arch/blackfin/include/asm/hardirq.h
index b6b19f1..717181a 100644
--- a/arch/blackfin/include/asm/hardirq.h
+++ b/arch/blackfin/include/asm/hardirq.h
@@ -42,4 +42,6 @@
 
 #define __ARCH_IRQ_EXIT_IRQS_DISABLED	1
 
+extern void ack_bad_irq(unsigned int irq);
+
 #endif
diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h
index 7dc77a2..63b2d8c 100644
--- a/arch/blackfin/include/asm/io.h
+++ b/arch/blackfin/include/asm/io.h
@@ -94,12 +94,12 @@
 #define outw_p(x,addr) outw(x,addr)
 #define outl_p(x,addr) outl(x,addr)
 
-#define ioread8_rep(a,d,c)	insb(a,d,c)
-#define ioread16_rep(a,d,c)	insw(a,d,c)
-#define ioread32_rep(a,d,c)	insl(a,d,c)
-#define iowrite8_rep(a,s,c)	outsb(a,s,c)
-#define iowrite16_rep(a,s,c)	outsw(a,s,c)
-#define iowrite32_rep(a,s,c)	outsl(a,s,c)
+#define ioread8_rep(a,d,c)	readsb(a,d,c)
+#define ioread16_rep(a,d,c)	readsw(a,d,c)
+#define ioread32_rep(a,d,c)	readsl(a,d,c)
+#define iowrite8_rep(a,s,c)	writesb(a,s,c)
+#define iowrite16_rep(a,s,c)	writesw(a,s,c)
+#define iowrite32_rep(a,s,c)	writesl(a,s,c)
 
 #define ioread8(X)			readb(X)
 #define ioread16(X)			readw(X)
@@ -108,6 +108,8 @@
 #define iowrite16(val,X)		writew(val,X)
 #define iowrite32(val,X)		writel(val,X)
 
+#define mmiowb() wmb()
+
 #define IO_SPACE_LIMIT 0xffffffff
 
 /* Values for nocacheflag and cmode */
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
new file mode 100644
index 0000000..76f53d8
--- /dev/null
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -0,0 +1,278 @@
+/*   -*- linux-c -*-
+ *   include/asm-blackfin/ipipe.h
+ *
+ *   Copyright (C) 2002-2007 Philippe Gerum.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ASM_BLACKFIN_IPIPE_H
+#define __ASM_BLACKFIN_IPIPE_H
+
+#ifdef CONFIG_IPIPE
+
+#include <linux/cpumask.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+#include <linux/ipipe_percpu.h>
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/atomic.h>
+#include <asm/traps.h>
+
+#define IPIPE_ARCH_STRING     "1.8-00"
+#define IPIPE_MAJOR_NUMBER    1
+#define IPIPE_MINOR_NUMBER    8
+#define IPIPE_PATCH_NUMBER    0
+
+#ifdef CONFIG_SMP
+#error "I-pipe/blackfin: SMP not implemented"
+#else /* !CONFIG_SMP */
+#define ipipe_processor_id()	0
+#endif	/* CONFIG_SMP */
+
+#define prepare_arch_switch(next)		\
+do {						\
+	ipipe_schedule_notify(current, next);	\
+	local_irq_disable_hw();			\
+} while (0)
+
+#define task_hijacked(p)						\
+	({								\
+		int __x__ = ipipe_current_domain != ipipe_root_domain;	\
+		/* We would need to clear the SYNC flag for the root domain */ \
+		/* over the current processor in SMP mode. */		\
+		local_irq_enable_hw(); __x__;				\
+	})
+
+struct ipipe_domain;
+
+struct ipipe_sysinfo {
+
+	int ncpus;		/* Number of CPUs on board */
+	u64 cpufreq;		/* CPU frequency (in Hz) */
+
+	/* Arch-dependent block */
+
+	struct {
+		unsigned tmirq;	/* Timer tick IRQ */
+		u64 tmfreq;	/* Timer frequency */
+	} archdep;
+};
+
+#define ipipe_read_tsc(t)					\
+	({							\
+	unsigned long __cy2;					\
+	__asm__ __volatile__ ("1: %0 = CYCLES2\n"		\
+				"%1 = CYCLES\n"			\
+				"%2 = CYCLES2\n"		\
+				"CC = %2 == %0\n"		\
+				"if ! CC jump 1b\n"		\
+				: "=r" (((unsigned long *)&t)[1]),	\
+				  "=r" (((unsigned long *)&t)[0]),	\
+				  "=r" (__cy2)				\
+				: /*no input*/ : "CC");			\
+	t;								\
+	})
+
+#define ipipe_cpu_freq()	__ipipe_core_clock
+#define ipipe_tsc2ns(_t)	(((unsigned long)(_t)) * __ipipe_freq_scale)
+#define ipipe_tsc2us(_t)	(ipipe_tsc2ns(_t) / 1000 + 1)
+
+/* Private interface -- Internal use only */
+
+#define __ipipe_check_platform()	do { } while (0)
+
+#define __ipipe_init_platform()		do { } while (0)
+
+extern atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
+
+extern unsigned long __ipipe_irq_lvmask;
+
+extern struct ipipe_domain ipipe_root;
+
+/* enable/disable_irqdesc _must_ be used in pairs. */
+
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd,
+			    unsigned irq);
+
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd,
+			     unsigned irq);
+
+#define __ipipe_enable_irq(irq)		(irq_desc[irq].chip->unmask(irq))
+
+#define __ipipe_disable_irq(irq)	(irq_desc[irq].chip->mask(irq))
+
+#define __ipipe_lock_root()					\
+	set_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
+
+#define __ipipe_unlock_root()					\
+	clear_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
+
+void __ipipe_enable_pipeline(void);
+
+#define __ipipe_hook_critical_ipi(ipd) do { } while (0)
+
+#define __ipipe_sync_pipeline(syncmask)					\
+	do {								\
+		struct ipipe_domain *ipd = ipipe_current_domain;	\
+		if (likely(ipd != ipipe_root_domain || !test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags))) \
+			__ipipe_sync_stage(syncmask);			\
+	} while (0)
+
+void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs);
+
+int __ipipe_get_irq_priority(unsigned irq);
+
+int __ipipe_get_irqthread_priority(unsigned irq);
+
+void __ipipe_stall_root_raw(void);
+
+void __ipipe_unstall_root_raw(void);
+
+void __ipipe_serial_debug(const char *fmt, ...);
+
+DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
+
+extern unsigned long __ipipe_core_clock;
+
+extern unsigned long __ipipe_freq_scale;
+
+extern unsigned long __ipipe_irq_tail_hook;
+
+static inline unsigned long __ipipe_ffnz(unsigned long ul)
+{
+	return ffs(ul) - 1;
+}
+
+#define __ipipe_run_irqtail()  /* Must be a macro */			\
+	do {								\
+		asmlinkage void __ipipe_call_irqtail(void);		\
+		unsigned long __pending;				\
+		CSYNC();					\
+		__pending = bfin_read_IPEND();				\
+		if (__pending & 0x8000) {				\
+			__pending &= ~0x8010;				\
+			if (__pending && (__pending & (__pending - 1)) == 0) \
+				__ipipe_call_irqtail();			\
+		}							\
+	} while (0)
+
+#define __ipipe_run_isr(ipd, irq)					\
+	do {								\
+		if (ipd == ipipe_root_domain) {				\
+			/*						\
+			 * Note: the I-pipe implements a threaded interrupt model on \
+			 * this arch for Linux external IRQs. The interrupt handler we \
+			 * call here only wakes up the associated IRQ thread. \
+			 */						\
+			if (ipipe_virtual_irq_p(irq)) {			\
+				/* No irqtail here; virtual interrupts have no effect \
+				   on IPEND so there is no need for processing \
+				   deferral. */				\
+				local_irq_enable_nohead(ipd);		\
+				ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
+				local_irq_disable_nohead(ipd);		\
+			} else						\
+				/*					\
+				 * No need to run the irqtail here either; \
+				 * we can't be preempted by hw IRQs, so	\
+				 * non-Linux IRQs cannot stack over the short \
+				 * thread wakeup code. Which in turn means \
+				 * that no irqtail condition could be pending \
+				 * for domains above Linux in the pipeline. \
+				 */					\
+				ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
+		} else {						\
+			__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
+			local_irq_enable_nohead(ipd);			\
+			ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
+			/* Attempt to exit the outer interrupt level before \
+			 * starting the deferred IRQ processing. */	\
+			local_irq_disable_nohead(ipd);			\
+			__ipipe_run_irqtail();				\
+			__set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
+		}							\
+	} while (0)
+
+#define __ipipe_syscall_watched_p(p, sc)	\
+	(((p)->flags & PF_EVNOTIFY) || (unsigned long)sc >= NR_syscalls)
+
+void ipipe_init_irq_threads(void);
+
+int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
+
+#define IS_SYSIRQ(irq)		((irq) > IRQ_CORETMR && (irq) <= SYS_IRQS)
+#define IS_GPIOIRQ(irq)		((irq) >= GPIO_IRQ_BASE && (irq) < NR_IRQS)
+
+#define IRQ_SYSTMR		IRQ_TIMER0
+#define IRQ_PRIOTMR		CONFIG_IRQ_TIMER0
+
+#if defined(CONFIG_BF531) || defined(CONFIG_BF532) || defined(CONFIG_BF533)
+#define PRIO_GPIODEMUX(irq)	CONFIG_PFA
+#elif defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
+#define PRIO_GPIODEMUX(irq)	CONFIG_IRQ_PROG_INTA
+#elif defined(CONFIG_BF52x)
+#define PRIO_GPIODEMUX(irq)	((irq) == IRQ_PORTF_INTA ? CONFIG_IRQ_PORTF_INTA : \
+				 (irq) == IRQ_PORTG_INTA ? CONFIG_IRQ_PORTG_INTA : \
+				 (irq) == IRQ_PORTH_INTA ? CONFIG_IRQ_PORTH_INTA : \
+				 -1)
+#elif defined(CONFIG_BF561)
+#define PRIO_GPIODEMUX(irq)	((irq) == IRQ_PROG0_INTA ? CONFIG_IRQ_PROG0_INTA : \
+				 (irq) == IRQ_PROG1_INTA ? CONFIG_IRQ_PROG1_INTA : \
+				 (irq) == IRQ_PROG2_INTA ? CONFIG_IRQ_PROG2_INTA : \
+				 -1)
+#define bfin_write_TIMER_DISABLE(val)	bfin_write_TMRS8_DISABLE(val)
+#define bfin_write_TIMER_ENABLE(val)	bfin_write_TMRS8_ENABLE(val)
+#define bfin_write_TIMER_STATUS(val)	bfin_write_TMRS8_STATUS(val)
+#define bfin_read_TIMER_STATUS()	bfin_read_TMRS8_STATUS()
+#elif defined(CONFIG_BF54x)
+#define PRIO_GPIODEMUX(irq)	((irq) == IRQ_PINT0 ? CONFIG_IRQ_PINT0 : \
+				 (irq) == IRQ_PINT1 ? CONFIG_IRQ_PINT1 : \
+				 (irq) == IRQ_PINT2 ? CONFIG_IRQ_PINT2 : \
+				 (irq) == IRQ_PINT3 ? CONFIG_IRQ_PINT3 : \
+				 -1)
+#define bfin_write_TIMER_DISABLE(val)	bfin_write_TIMER_DISABLE0(val)
+#define bfin_write_TIMER_ENABLE(val)	bfin_write_TIMER_ENABLE0(val)
+#define bfin_write_TIMER_STATUS(val)	bfin_write_TIMER_STATUS0(val)
+#define bfin_read_TIMER_STATUS(val)	bfin_read_TIMER_STATUS0(val)
+#else
+# error "no PRIO_GPIODEMUX() for this part"
+#endif
+
+#define __ipipe_root_tick_p(regs)	((regs->ipend & 0x10) != 0)
+
+#else /* !CONFIG_IPIPE */
+
+#define task_hijacked(p)		0
+#define ipipe_trap_notify(t, r)  	0
+
+#define __ipipe_stall_root_raw()	do { } while (0)
+#define __ipipe_unstall_root_raw()	do { } while (0)
+
+#define ipipe_init_irq_threads()		do { } while (0)
+#define ipipe_start_irq_thread(irq, desc)	0
+
+#define IRQ_SYSTMR		IRQ_CORETMR
+#define IRQ_PRIOTMR		IRQ_CORETMR
+
+#define __ipipe_root_tick_p(regs)	1
+
+#endif /* !CONFIG_IPIPE */
+
+#endif	/* !__ASM_BLACKFIN_IPIPE_H */
diff --git a/arch/blackfin/include/asm/ipipe_base.h b/arch/blackfin/include/asm/ipipe_base.h
new file mode 100644
index 0000000..cb1025a
--- /dev/null
+++ b/arch/blackfin/include/asm/ipipe_base.h
@@ -0,0 +1,80 @@
+/*   -*- linux-c -*-
+ *   include/asm-blackfin/_baseipipe.h
+ *
+ *   Copyright (C) 2007 Philippe Gerum.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ASM_BLACKFIN_IPIPE_BASE_H
+#define __ASM_BLACKFIN_IPIPE_BASE_H
+
+#ifdef CONFIG_IPIPE
+
+#define IPIPE_NR_XIRQS		NR_IRQS
+#define IPIPE_IRQ_ISHIFT	5	/* 2^5 for 32bits arch. */
+
+/* Blackfin-specific, global domain flags */
+#define IPIPE_ROOTLOCK_FLAG	1	/* Lock pipeline for root */
+
+ /* Blackfin traps -- i.e. exception vector numbers */
+#define IPIPE_NR_FAULTS		52 /* We leave a gap after VEC_ILL_RES. */
+/* Pseudo-vectors used for kernel events */
+#define IPIPE_FIRST_EVENT	IPIPE_NR_FAULTS
+#define IPIPE_EVENT_SYSCALL	(IPIPE_FIRST_EVENT)
+#define IPIPE_EVENT_SCHEDULE	(IPIPE_FIRST_EVENT + 1)
+#define IPIPE_EVENT_SIGWAKE	(IPIPE_FIRST_EVENT + 2)
+#define IPIPE_EVENT_SETSCHED	(IPIPE_FIRST_EVENT + 3)
+#define IPIPE_EVENT_INIT	(IPIPE_FIRST_EVENT + 4)
+#define IPIPE_EVENT_EXIT	(IPIPE_FIRST_EVENT + 5)
+#define IPIPE_EVENT_CLEANUP	(IPIPE_FIRST_EVENT + 6)
+#define IPIPE_LAST_EVENT	IPIPE_EVENT_CLEANUP
+#define IPIPE_NR_EVENTS		(IPIPE_LAST_EVENT + 1)
+
+#define IPIPE_TIMER_IRQ		IRQ_CORETMR
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bitops.h>
+
+extern int test_bit(int nr, const void *addr);
+
+
+extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
+
+static inline void __ipipe_stall_root(void)
+{
+	volatile unsigned long *p = &__ipipe_root_status;
+	set_bit(0, p);
+}
+
+static inline unsigned long __ipipe_test_and_stall_root(void)
+{
+	volatile unsigned long *p = &__ipipe_root_status;
+	return test_and_set_bit(0, p);
+}
+
+static inline unsigned long __ipipe_test_root(void)
+{
+	const unsigned long *p = &__ipipe_root_status;
+	return test_bit(0, p);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_IPIPE */
+
+#endif /* !__ASM_BLACKFIN_IPIPE_BASE_H */
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index 89f59e1..3d97790 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -17,56 +17,272 @@
 #ifndef _BFIN_IRQ_H_
 #define _BFIN_IRQ_H_
 
-#include <mach/irq.h>
-#include <asm/ptrace.h>
-
-/*******************************************************************************
- *****   INTRODUCTION ***********
- *   On the Blackfin, the interrupt structure allows remmapping of the hardware
- *   levels.
- * - I'm going to assume that the H/W level is going to stay at the default
- *   settings. If someone wants to go through and abstart this out, feel free
- *   to mod the interrupt numbering scheme.
- * - I'm abstracting the interrupts so that uClinux does not know anything
- *   about the H/W levels. If you want to change the H/W AND keep the abstracted
- *   levels that uClinux sees, you should be able to do most of it here.
- * - I've left the "abstract" numbering sparce in case someone wants to pull the
- *   interrupts apart (just the TX/RX for the various devices)
- *******************************************************************************/
-
 /* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h>*/
+#include <mach/irq.h>
+#include <asm/pda.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_SMP
+/* Forward decl needed due to cdef inter dependencies */
+static inline uint32_t __pure bfin_dspid(void);
+# define blackfin_core_id() (bfin_dspid() & 0xff)
+# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
+#else
+extern unsigned long bfin_irq_flags;
+#endif
+
+#ifdef CONFIG_IPIPE
+
+#include <linux/ipipe_trace.h>
+
+void __ipipe_unstall_root(void);
+
+void __ipipe_restore_root(unsigned long flags);
+
+#ifdef CONFIG_DEBUG_HWERR
+# define __all_masked_irq_flags 0x3f
+# define __save_and_cli_hw(x) \
+	__asm__ __volatile__( \
+		"cli %0;" \
+		"sti %1;" \
+		: "=&d"(x) \
+		: "d" (0x3F) \
+	)
+#else
+# define __all_masked_irq_flags 0x1f
+# define __save_and_cli_hw(x) \
+	__asm__ __volatile__( \
+		"cli %0;" \
+		: "=&d"(x) \
+	)
+#endif
+
+#define irqs_enabled_from_flags_hw(x)	((x) != __all_masked_irq_flags)
+#define raw_irqs_disabled_flags(flags)	(!irqs_enabled_from_flags_hw(flags))
+#define local_test_iflag_hw(x)		irqs_enabled_from_flags_hw(x)
+
+#define local_save_flags(x)						\
+	do {								\
+		(x) = __ipipe_test_root() ? \
+			__all_masked_irq_flags : bfin_irq_flags; \
+	} while (0)
+
+#define local_irq_save(x)				\
+	do {						\
+		(x) = __ipipe_test_and_stall_root();	\
+	} while (0)
+
+#define local_irq_restore(x)	__ipipe_restore_root(x)
+#define local_irq_disable()	__ipipe_stall_root()
+#define local_irq_enable()	__ipipe_unstall_root()
+#define irqs_disabled()		__ipipe_test_root()
+
+#define local_save_flags_hw(x) \
+	__asm__ __volatile__( \
+		"cli %0;" \
+		"sti %0;" \
+		: "=d"(x) \
+	)
+
+#define	irqs_disabled_hw()				\
+	({						\
+		unsigned long flags;			\
+		local_save_flags_hw(flags);		\
+		!irqs_enabled_from_flags_hw(flags);	\
+	})
+
+static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
+{
+	/* Merge virtual and real interrupt mask bits into a single
+	   32bit word. */
+	return (real & ~(1 << 31)) | ((virt != 0) << 31);
+}
+
+static inline int raw_demangle_irq_bits(unsigned long *x)
+{
+	int virt = (*x & (1 << 31)) != 0;
+	*x &= ~(1L << 31);
+	return virt;
+}
+
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
+
+#define local_irq_disable_hw()						\
+	do {								\
+		int _tmp_dummy;						\
+		if (!irqs_disabled_hw())				\
+			ipipe_trace_begin(0x80000000);			\
+		__asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : );	\
+	} while (0)
+
+#define local_irq_enable_hw()						\
+	do {								\
+		if (irqs_disabled_hw())					\
+			ipipe_trace_end(0x80000000);			\
+		__asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags));	\
+	} while (0)
+
+#define local_irq_save_hw(x)				\
+	do {						\
+		__save_and_cli_hw(x);			\
+		if (local_test_iflag_hw(x))		\
+			ipipe_trace_begin(0x80000001);	\
+	} while (0)
+
+#define local_irq_restore_hw(x)				\
+	do {						\
+		if (local_test_iflag_hw(x)) {		\
+			ipipe_trace_end(0x80000001);	\
+			local_irq_enable_hw_notrace();	\
+		}					\
+	} while (0)
+
+#define local_irq_disable_hw_notrace()					\
+	do {								\
+		int _tmp_dummy;						\
+		__asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : );	\
+	} while (0)
+
+#define local_irq_enable_hw_notrace() \
+	__asm__ __volatile__( \
+		"sti %0;" \
+		: \
+		: "d"(bfin_irq_flags) \
+	)
+
+#define local_irq_save_hw_notrace(x) __save_and_cli_hw(x)
+
+#define local_irq_restore_hw_notrace(x)			\
+	do {						\
+		if (local_test_iflag_hw(x))		\
+			local_irq_enable_hw_notrace();	\
+	} while (0)
+
+#else /* CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#define local_irq_enable_hw() \
+	__asm__ __volatile__( \
+		"sti %0;" \
+		: \
+		: "d"(bfin_irq_flags) \
+	)
+
+#define local_irq_disable_hw()			\
+	do {					\
+		int _tmp_dummy;			\
+		__asm__ __volatile__ (		\
+			"cli %0;"		\
+			: "=d" (_tmp_dummy));	\
+	} while (0)
+
+#define local_irq_restore_hw(x) \
+	do { \
+		if (irqs_enabled_from_flags_hw(x)) \
+			local_irq_enable_hw(); \
+	} while (0)
+
+#define local_irq_save_hw(x)		__save_and_cli_hw(x)
+
+#define local_irq_disable_hw_notrace()	local_irq_disable_hw()
+#define local_irq_enable_hw_notrace()	local_irq_enable_hw()
+#define local_irq_save_hw_notrace(x)	local_irq_save_hw(x)
+#define local_irq_restore_hw_notrace(x)	local_irq_restore_hw(x)
+
+#endif  /* CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#else /* !CONFIG_IPIPE */
 
 /*
- * Machine specific interrupt sources.
- *
- * Adding an interrupt service routine for a source with this bit
- * set indicates a special machine specific interrupt source.
- * The machine specific files define these sources.
- *
- * The IRQ_MACHSPEC bit is now gone - the only thing it did was to
- * introduce unnecessary overhead.
- *
- * All interrupt handling is actually machine specific so it is better
- * to use function pointers, as used by the Sparc port, and select the
- * interrupt handling functions when initializing the kernel. This way
- * we save some unnecessary overhead at run-time.
- *                                                      01/11/97 - Jes
+ * Interrupt configuring macros.
  */
+#define local_irq_disable() \
+	do { \
+		int __tmp_dummy; \
+		__asm__ __volatile__( \
+			"cli %0;" \
+			: "=d" (__tmp_dummy) \
+		); \
+	} while (0)
 
-extern void ack_bad_irq(unsigned int irq);
+#define local_irq_enable() \
+	__asm__ __volatile__( \
+		"sti %0;" \
+		: \
+		: "d" (bfin_irq_flags) \
+	)
 
-static __inline__ int irq_canonicalize(int irq)
+#ifdef CONFIG_DEBUG_HWERR
+# define __save_and_cli(x) \
+	__asm__ __volatile__( \
+		"cli %0;" \
+		"sti %1;" \
+		: "=&d" (x) \
+		: "d" (0x3F) \
+	)
+#else
+# define __save_and_cli(x) \
+	__asm__ __volatile__( \
+		"cli %0;" \
+		: "=&d" (x) \
+	)
+#endif
+
+#define local_save_flags(x) \
+	__asm__ __volatile__( \
+		"cli %0;" \
+		"sti %0;" \
+		: "=d" (x) \
+	)
+
+#ifdef CONFIG_DEBUG_HWERR
+#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
+#else
+#define irqs_enabled_from_flags(x) ((x) != 0x1f)
+#endif
+
+#define local_irq_restore(x) \
+	do { \
+		if (irqs_enabled_from_flags(x)) \
+			local_irq_enable(); \
+	} while (0)
+
+/* For spinlocks etc */
+#define local_irq_save(x) __save_and_cli(x)
+
+#define irqs_disabled()				\
+({						\
+	unsigned long flags;			\
+	local_save_flags(flags);		\
+	!irqs_enabled_from_flags(flags);	\
+})
+
+#define local_irq_save_hw(x)		local_irq_save(x)
+#define local_irq_restore_hw(x)		local_irq_restore(x)
+#define local_irq_enable_hw()		local_irq_enable()
+#define local_irq_disable_hw()		local_irq_disable()
+#define irqs_disabled_hw()		irqs_disabled()
+
+#endif /* !CONFIG_IPIPE */
+
+#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
+# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
+#else
+# define NOP_PAD_ANOMALY_05000244
+#endif
+
+#define idle_with_irq_disabled() \
+	__asm__ __volatile__( \
+		NOP_PAD_ANOMALY_05000244 \
+		".align 8;" \
+		"sti %0;" \
+		"idle;" \
+		: \
+		: "d" (bfin_irq_flags) \
+	)
+
+static inline int irq_canonicalize(int irq)
 {
 	return irq;
 }
 
-/* count of spurious interrupts */
-/* extern volatile unsigned int num_spurious; */
-
-#ifndef NO_IRQ
-#define NO_IRQ ((unsigned int)(-1))
-#endif
-
-#define SIC_SYSIRQ(irq)	(irq - (IRQ_CORETMR + 1))
-
 #endif				/* _BFIN_IRQ_H_ */
diff --git a/arch/blackfin/include/asm/l1layout.h b/arch/blackfin/include/asm/l1layout.h
index c13ded7..79dbefaa 100644
--- a/arch/blackfin/include/asm/l1layout.h
+++ b/arch/blackfin/include/asm/l1layout.h
@@ -8,6 +8,7 @@
 
 #include <asm/blackfin.h>
 
+#ifndef CONFIG_SMP
 #ifndef __ASSEMBLY__
 
 /* Data that is "mapped" into the process VM at the start of the L1 scratch
@@ -24,8 +25,10 @@
 };
 
 /* A pointer to the structure in memory.  */
-#define L1_SCRATCH_TASK_INFO ((struct l1_scratch_task_info *)L1_SCRATCH_START)
+#define L1_SCRATCH_TASK_INFO ((struct l1_scratch_task_info *)\
+						get_l1_scratch_start())
 
 #endif
+#endif
 
 #endif
diff --git a/arch/blackfin/include/asm/mem_init.h b/arch/blackfin/include/asm/mem_init.h
new file mode 100644
index 0000000..255a931
--- /dev/null
+++ b/arch/blackfin/include/asm/mem_init.h
@@ -0,0 +1,364 @@
+/*
+ * arch/blackfin/include/asm/mem_init.h - reprogram clocks / memory
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#if defined(EBIU_SDGCTL)
+#if defined(CONFIG_MEM_MT48LC16M16A2TG_75) || \
+    defined(CONFIG_MEM_MT48LC64M4A2FB_7E) || \
+    defined(CONFIG_MEM_MT48LC16M8A2TG_75) || \
+    defined(CONFIG_MEM_GENERIC_BOARD) || \
+    defined(CONFIG_MEM_MT48LC32M8A2_75) || \
+    defined(CONFIG_MEM_MT48LC8M32B2B5_7) || \
+    defined(CONFIG_MEM_MT48LC32M16A2TG_75) || \
+    defined(CONFIG_MEM_MT48LC32M8A2_75)
+#if (CONFIG_SCLK_HZ > 119402985)
+#define SDRAM_tRP       TRP_2
+#define SDRAM_tRP_num   2
+#define SDRAM_tRAS      TRAS_7
+#define SDRAM_tRAS_num  7
+#define SDRAM_tRCD      TRCD_2
+#define SDRAM_tWR       TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985)
+#define SDRAM_tRP       TRP_2
+#define SDRAM_tRP_num   2
+#define SDRAM_tRAS      TRAS_6
+#define SDRAM_tRAS_num  6
+#define SDRAM_tRCD      TRCD_2
+#define SDRAM_tWR       TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612)
+#define SDRAM_tRP       TRP_2
+#define SDRAM_tRP_num   2
+#define SDRAM_tRAS      TRAS_5
+#define SDRAM_tRAS_num  5
+#define SDRAM_tRCD      TRCD_2
+#define SDRAM_tWR       TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239)
+#define SDRAM_tRP       TRP_2
+#define SDRAM_tRP_num   2
+#define SDRAM_tRAS      TRAS_4
+#define SDRAM_tRAS_num  4
+#define SDRAM_tRCD      TRCD_2
+#define SDRAM_tWR       TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866)
+#define SDRAM_tRP       TRP_2
+#define SDRAM_tRP_num   2
+#define SDRAM_tRAS      TRAS_3
+#define SDRAM_tRAS_num  3
+#define SDRAM_tRCD      TRCD_2
+#define SDRAM_tWR       TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667)
+#define SDRAM_tRP       TRP_1
+#define SDRAM_tRP_num   1
+#define SDRAM_tRAS      TRAS_4
+#define SDRAM_tRAS_num  3
+#define SDRAM_tRCD      TRCD_1
+#define SDRAM_tWR       TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493)
+#define SDRAM_tRP       TRP_1
+#define SDRAM_tRP_num   1
+#define SDRAM_tRAS      TRAS_3
+#define SDRAM_tRAS_num  3
+#define SDRAM_tRCD      TRCD_1
+#define SDRAM_tWR       TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119)
+#define SDRAM_tRP       TRP_1
+#define SDRAM_tRP_num   1
+#define SDRAM_tRAS      TRAS_2
+#define SDRAM_tRAS_num  2
+#define SDRAM_tRCD      TRCD_1
+#define SDRAM_tWR       TWR_2
+#endif
+#if (CONFIG_SCLK_HZ <= 29850746)
+#define SDRAM_tRP       TRP_1
+#define SDRAM_tRP_num   1
+#define SDRAM_tRAS      TRAS_1
+#define SDRAM_tRAS_num  1
+#define SDRAM_tRCD      TRCD_1
+#define SDRAM_tWR       TWR_2
+#endif
+#endif
+
+#if defined(CONFIG_MEM_MT48LC16M8A2TG_75) || \
+    defined(CONFIG_MEM_MT48LC8M32B2B5_7)
+  /*SDRAM INFORMATION: */
+#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
+#define SDRAM_NRA   4096	/* Number of row addresses in SDRAM */
+#define SDRAM_CL    CL_3
+#endif
+
+#if defined(CONFIG_MEM_MT48LC32M8A2_75) || \
+    defined(CONFIG_MEM_MT48LC64M4A2FB_7E) || \
+    defined(CONFIG_MEM_GENERIC_BOARD) || \
+    defined(CONFIG_MEM_MT48LC32M16A2TG_75) || \
+    defined(CONFIG_MEM_MT48LC16M16A2TG_75) || \
+    defined(CONFIG_MEM_MT48LC32M8A2_75)
+  /*SDRAM INFORMATION: */
+#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
+#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
+#define SDRAM_CL    CL_3
+#endif
+
+
+#ifdef CONFIG_BFIN_KERNEL_CLOCK_MEMINIT_CALC
+/* Equation from section 17 (p17-46) of BF533 HRM */
+#define mem_SDRRC       (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num)
+
+/* Enable SCLK Out */
+#define mem_SDGCTL        (0x80000000 | SCTLE | SDRAM_CL | SDRAM_tRAS | SDRAM_tRP | SDRAM_tRCD | SDRAM_tWR | PSS)
+#else
+#define mem_SDRRC 	CONFIG_MEM_SDRRC
+#define mem_SDGCTL	CONFIG_MEM_SDGCTL
+#endif
+#endif
+
+
+#if defined(EBIU_DDRCTL0)
+#define MIN_DDR_SCLK(x)	(x*(CONFIG_SCLK_HZ/1000/1000)/1000 + 1)
+#define MAX_DDR_SCLK(x)	(x*(CONFIG_SCLK_HZ/1000/1000)/1000)
+#define DDR_CLK_HZ(x)	(1000*1000*1000/x)
+
+#if defined(CONFIG_MEM_MT46V32M16_6T)
+#define DDR_SIZE	DEVSZ_512
+#define DDR_WIDTH	DEVWD_16
+#define DDR_MAX_tCK	13
+
+#define DDR_tRC		DDR_TRC(MIN_DDR_SCLK(60))
+#define DDR_tRAS	DDR_TRAS(MIN_DDR_SCLK(42))
+#define DDR_tRP		DDR_TRP(MIN_DDR_SCLK(15))
+#define DDR_tRFC	DDR_TRFC(MIN_DDR_SCLK(72))
+#define DDR_tREFI	DDR_TREFI(MAX_DDR_SCLK(7800))
+
+#define DDR_tRCD	DDR_TRCD(MIN_DDR_SCLK(15))
+#define DDR_tWTR	DDR_TWTR(1)
+#define DDR_tMRD	DDR_TMRD(MIN_DDR_SCLK(12))
+#define DDR_tWR		DDR_TWR(MIN_DDR_SCLK(15))
+#endif
+
+#if defined(CONFIG_MEM_MT46V32M16_5B)
+#define DDR_SIZE	DEVSZ_512
+#define DDR_WIDTH	DEVWD_16
+#define DDR_MAX_tCK	13
+
+#define DDR_tRC		DDR_TRC(MIN_DDR_SCLK(55))
+#define DDR_tRAS	DDR_TRAS(MIN_DDR_SCLK(40))
+#define DDR_tRP		DDR_TRP(MIN_DDR_SCLK(15))
+#define DDR_tRFC	DDR_TRFC(MIN_DDR_SCLK(70))
+#define DDR_tREFI	DDR_TREFI(MAX_DDR_SCLK(7800))
+
+#define DDR_tRCD	DDR_TRCD(MIN_DDR_SCLK(15))
+#define DDR_tWTR	DDR_TWTR(2)
+#define DDR_tMRD	DDR_TMRD(MIN_DDR_SCLK(10))
+#define DDR_tWR		DDR_TWR(MIN_DDR_SCLK(15))
+#endif
+
+#if defined(CONFIG_MEM_GENERIC_BOARD)
+#define DDR_SIZE	DEVSZ_512
+#define DDR_WIDTH	DEVWD_16
+#define DDR_MAX_tCK	13
+
+#define DDR_tRCD	DDR_TRCD(3)
+#define DDR_tWTR	DDR_TWTR(2)
+#define DDR_tWR		DDR_TWR(2)
+#define DDR_tMRD	DDR_TMRD(2)
+#define DDR_tRP		DDR_TRP(3)
+#define DDR_tRAS	DDR_TRAS(7)
+#define DDR_tRC		DDR_TRC(10)
+#define DDR_tRFC	DDR_TRFC(12)
+#define DDR_tREFI	DDR_TREFI(1288)
+#endif
+
+#if (CONFIG_SCLK_HZ < DDR_CLK_HZ(DDR_MAX_tCK))
+# error "CONFIG_SCLK_HZ is too small (<DDR_CLK_HZ(DDR_MAX_tCK) Hz)."
+#elif(CONFIG_SCLK_HZ <= 133333333)
+# define	DDR_CL		CL_2
+#else
+# error "CONFIG_SCLK_HZ is too large (>133333333 Hz)."
+#endif
+
+#ifdef CONFIG_BFIN_KERNEL_CLOCK_MEMINIT_CALC
+#define mem_DDRCTL0	(DDR_tRP | DDR_tRAS | DDR_tRC | DDR_tRFC | DDR_tREFI)
+#define mem_DDRCTL1	(DDR_DATWIDTH | EXTBANK_1 | DDR_SIZE | DDR_WIDTH | DDR_tWTR \
+			| DDR_tMRD | DDR_tWR | DDR_tRCD)
+#define mem_DDRCTL2	DDR_CL
+#else
+#define mem_DDRCTL0	CONFIG_MEM_DDRCTL0
+#define mem_DDRCTL1	CONFIG_MEM_DDRCTL1
+#define mem_DDRCTL2	CONFIG_MEM_DDRCTL2
+#endif
+#endif
+
+#if defined CONFIG_CLKIN_HALF
+#define CLKIN_HALF       1
+#else
+#define CLKIN_HALF       0
+#endif
+
+#if defined CONFIG_PLL_BYPASS
+#define PLL_BYPASS      1
+#else
+#define PLL_BYPASS       0
+#endif
+
+/***************************************Currently Not Being Used *********************************/
+
+#if defined(CONFIG_FLASH_SPEED_BWAT) && \
+defined(CONFIG_FLASH_SPEED_BRAT) && \
+defined(CONFIG_FLASH_SPEED_BHT) && \
+defined(CONFIG_FLASH_SPEED_BST) && \
+defined(CONFIG_FLASH_SPEED_BTT)
+
+#define flash_EBIU_AMBCTL_WAT  ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
+#define flash_EBIU_AMBCTL_RAT  ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
+#define flash_EBIU_AMBCTL_HT   ((CONFIG_FLASH_SPEED_BHT  * 4) / (4000000000 / CONFIG_SCLK_HZ))
+#define flash_EBIU_AMBCTL_ST   ((CONFIG_FLASH_SPEED_BST  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
+#define flash_EBIU_AMBCTL_TT   ((CONFIG_FLASH_SPEED_BTT  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
+
+#if (flash_EBIU_AMBCTL_TT > 3)
+#define flash_EBIU_AMBCTL0_TT   B0TT_4
+#endif
+#if (flash_EBIU_AMBCTL_TT == 3)
+#define flash_EBIU_AMBCTL0_TT   B0TT_3
+#endif
+#if (flash_EBIU_AMBCTL_TT == 2)
+#define flash_EBIU_AMBCTL0_TT   B0TT_2
+#endif
+#if (flash_EBIU_AMBCTL_TT < 2)
+#define flash_EBIU_AMBCTL0_TT   B0TT_1
+#endif
+
+#if (flash_EBIU_AMBCTL_ST > 3)
+#define flash_EBIU_AMBCTL0_ST   B0ST_4
+#endif
+#if (flash_EBIU_AMBCTL_ST == 3)
+#define flash_EBIU_AMBCTL0_ST   B0ST_3
+#endif
+#if (flash_EBIU_AMBCTL_ST == 2)
+#define flash_EBIU_AMBCTL0_ST   B0ST_2
+#endif
+#if (flash_EBIU_AMBCTL_ST < 2)
+#define flash_EBIU_AMBCTL0_ST   B0ST_1
+#endif
+
+#if (flash_EBIU_AMBCTL_HT > 2)
+#define flash_EBIU_AMBCTL0_HT   B0HT_3
+#endif
+#if (flash_EBIU_AMBCTL_HT == 2)
+#define flash_EBIU_AMBCTL0_HT   B0HT_2
+#endif
+#if (flash_EBIU_AMBCTL_HT == 1)
+#define flash_EBIU_AMBCTL0_HT   B0HT_1
+#endif
+#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0)
+#define flash_EBIU_AMBCTL0_HT   B0HT_0
+#endif
+#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0)
+#define flash_EBIU_AMBCTL0_HT   B0HT_1
+#endif
+
+#if (flash_EBIU_AMBCTL_WAT > 14)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_15
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 14)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_14
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 13)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_13
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 12)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_12
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 11)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_11
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 10)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_10
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 9)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_9
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 8)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_8
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 7)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_7
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 6)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_6
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 5)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_5
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 4)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_4
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 3)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_3
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 2)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_2
+#endif
+#if (flash_EBIU_AMBCTL_WAT == 1)
+#define flash_EBIU_AMBCTL0_WAT  B0WAT_1
+#endif
+
+#if (flash_EBIU_AMBCTL_RAT > 14)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_15
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 14)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_14
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 13)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_13
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 12)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_12
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 11)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_11
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 10)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_10
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 9)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_9
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 8)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_8
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 7)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_7
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 6)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_6
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 5)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_5
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 4)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_4
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 3)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_3
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 2)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_2
+#endif
+#if (flash_EBIU_AMBCTL_RAT == 1)
+#define flash_EBIU_AMBCTL0_RAT  B0RAT_1
+#endif
+
+#define flash_EBIU_AMBCTL0  \
+	(flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \
+	 flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN)
+#endif
diff --git a/arch/blackfin/include/asm/mem_map.h b/arch/blackfin/include/asm/mem_map.h
index 88d04a7..e92b310 100644
--- a/arch/blackfin/include/asm/mem_map.h
+++ b/arch/blackfin/include/asm/mem_map.h
@@ -9,4 +9,79 @@
 
 #include <mach/mem_map.h>
 
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+static inline ulong get_l1_scratch_start_cpu(int cpu)
+{
+	return (cpu) ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
+}
+static inline ulong get_l1_code_start_cpu(int cpu)
+{
+	return (cpu) ? COREB_L1_CODE_START : COREA_L1_CODE_START;
+}
+static inline ulong get_l1_data_a_start_cpu(int cpu)
+{
+	return (cpu) ? COREB_L1_DATA_A_START : COREA_L1_DATA_A_START;
+}
+static inline ulong get_l1_data_b_start_cpu(int cpu)
+{
+	return (cpu) ? COREB_L1_DATA_B_START : COREA_L1_DATA_B_START;
+}
+
+static inline ulong get_l1_scratch_start(void)
+{
+	return get_l1_scratch_start_cpu(blackfin_core_id());
+}
+static inline ulong get_l1_code_start(void)
+{
+	return get_l1_code_start_cpu(blackfin_core_id());
+}
+static inline ulong get_l1_data_a_start(void)
+{
+	return get_l1_data_a_start_cpu(blackfin_core_id());
+}
+static inline ulong get_l1_data_b_start(void)
+{
+	return get_l1_data_b_start_cpu(blackfin_core_id());
+}
+
+#else /* !CONFIG_SMP */
+
+static inline ulong get_l1_scratch_start_cpu(int cpu)
+{
+	return L1_SCRATCH_START;
+}
+static inline ulong get_l1_code_start_cpu(int cpu)
+{
+	return L1_CODE_START;
+}
+static inline ulong get_l1_data_a_start_cpu(int cpu)
+{
+	return L1_DATA_A_START;
+}
+static inline ulong get_l1_data_b_start_cpu(int cpu)
+{
+	return L1_DATA_B_START;
+}
+static inline ulong get_l1_scratch_start(void)
+{
+	return get_l1_scratch_start_cpu(0);
+}
+static inline ulong get_l1_code_start(void)
+{
+	return  get_l1_code_start_cpu(0);
+}
+static inline ulong get_l1_data_a_start(void)
+{
+	return get_l1_data_a_start_cpu(0);
+}
+static inline ulong get_l1_data_b_start(void)
+{
+	return get_l1_data_b_start_cpu(0);
+}
+
+#endif /* CONFIG_SMP */
+#endif /* __ASSEMBLY__ */
+
 #endif				/* _MEM_MAP_H_ */
diff --git a/arch/blackfin/include/asm/mmu.h b/arch/blackfin/include/asm/mmu.h
index 757e439..dbfd686 100644
--- a/arch/blackfin/include/asm/mmu.h
+++ b/arch/blackfin/include/asm/mmu.h
@@ -10,7 +10,6 @@
 };
 
 typedef struct {
-	struct vm_list_struct *vmlist;
 	unsigned long end_brk;
 	unsigned long stack_start;
 
diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h
index 35593dd..944e29f 100644
--- a/arch/blackfin/include/asm/mmu_context.h
+++ b/arch/blackfin/include/asm/mmu_context.h
@@ -37,6 +37,10 @@
 #include <asm/pgalloc.h>
 #include <asm/cplbinit.h>
 
+/* Note: L1 stacks are CPU-private things, so we bluntly disable this
+   feature in SMP mode, and use the per-CPU scratch SRAM bank only to
+   store the PDA instead. */
+
 extern void *current_l1_stack_save;
 extern int nr_l1stack_tasks;
 extern void *l1_stack_base;
@@ -88,12 +92,15 @@
 static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
 			     struct task_struct *tsk)
 {
+#ifdef CONFIG_MPU
+	unsigned int cpu = smp_processor_id();
+#endif
 	if (prev_mm == next_mm)
 		return;
 #ifdef CONFIG_MPU
-	if (prev_mm->context.page_rwx_mask == current_rwx_mask) {
-		flush_switched_cplbs();
-		set_mask_dcplbs(next_mm->context.page_rwx_mask);
+	if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
+		flush_switched_cplbs(cpu);
+		set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
 	}
 #endif
 
@@ -138,9 +145,10 @@
 
 static inline void update_protections(struct mm_struct *mm)
 {
-	if (mm->context.page_rwx_mask == current_rwx_mask) {
-		flush_switched_cplbs();
-		set_mask_dcplbs(mm->context.page_rwx_mask);
+	unsigned int cpu = smp_processor_id();
+	if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
+		flush_switched_cplbs(cpu);
+		set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
 	}
 }
 #endif
@@ -165,6 +173,9 @@
 static inline void destroy_context(struct mm_struct *mm)
 {
 	struct sram_list_struct *tmp;
+#ifdef CONFIG_MPU
+	unsigned int cpu = smp_processor_id();
+#endif
 
 #ifdef CONFIG_APP_STACK_L1
 	if (current_l1_stack_save == mm->context.l1_stack_save)
@@ -179,8 +190,8 @@
 		kfree(tmp);
 	}
 #ifdef CONFIG_MPU
-	if (current_rwx_mask == mm->context.page_rwx_mask)
-		current_rwx_mask = NULL;
+	if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
+		current_rwx_mask[cpu] = NULL;
 	free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
 #endif
 }
diff --git a/arch/blackfin/include/asm/mutex-dec.h b/arch/blackfin/include/asm/mutex-dec.h
new file mode 100644
index 0000000..0134151
--- /dev/null
+++ b/arch/blackfin/include/asm/mutex-dec.h
@@ -0,0 +1,112 @@
+/*
+ * include/asm-generic/mutex-dec.h
+ *
+ * Generic implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ */
+#ifndef _ASM_GENERIC_MUTEX_DEC_H
+#define _ASM_GENERIC_MUTEX_DEC_H
+
+/**
+ *  __mutex_fastpath_lock - try to take the lock by moving the count
+ *                          from 1 to a 0 value
+ *  @count: pointer of type atomic_t
+ *  @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function MUST leave the value lower than
+ * 1 even when the "1" assertion wasn't true.
+ */
+static inline void
+__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+	if (unlikely(atomic_dec_return(count) < 0))
+		fail_fn(count);
+	else
+		smp_mb();
+}
+
+/**
+ *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ *                                 from 1 to a 0 value
+ *  @count: pointer of type atomic_t
+ *  @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns.
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
+{
+	if (unlikely(atomic_dec_return(count) < 0))
+		return fail_fn(count);
+	else {
+		smp_mb();
+		return 0;
+	}
+}
+
+/**
+ *  __mutex_fastpath_unlock - try to promote the count from 0 to 1
+ *  @count: pointer of type atomic_t
+ *  @fail_fn: function to call if the original value was not 0
+ *
+ * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, then the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
+{
+	smp_mb();
+	if (unlikely(atomic_inc_return(count) <= 0))
+		fail_fn(count);
+}
+
+#define __mutex_slowpath_needs_to_unlock()		1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ *  @count: pointer of type atomic_t
+ *  @fail_fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ *
+ * If the architecture has no effective trylock variant, it should call the
+ * <fail_fn> spinlock-based trylock variant unconditionally.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+	/*
+	 * We have two variants here. The cmpxchg based one is the best one
+	 * because it never induce a false contention state.  It is included
+	 * here because architectures using the inc/dec algorithms over the
+	 * xchg ones are much more likely to support cmpxchg natively.
+	 *
+	 * If not we fall back to the spinlock based variant - that is
+	 * just as efficient (and simpler) as a 'destructive' probing of
+	 * the mutex state would be.
+	 */
+#ifdef __HAVE_ARCH_CMPXCHG
+	if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
+		smp_mb();
+		return 1;
+	}
+	return 0;
+#else
+	return fail_fn(count);
+#endif
+}
+
+#endif
diff --git a/arch/blackfin/include/asm/mutex.h b/arch/blackfin/include/asm/mutex.h
index 458c1f7..5d39925 100644
--- a/arch/blackfin/include/asm/mutex.h
+++ b/arch/blackfin/include/asm/mutex.h
@@ -6,4 +6,67 @@
  * implementation. (see asm-generic/mutex-xchg.h for details)
  */
 
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+#ifndef CONFIG_SMP
 #include <asm-generic/mutex-dec.h>
+#else
+
+static inline void
+__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+	if (unlikely(atomic_dec_return(count) < 0))
+		fail_fn(count);
+	else
+		smp_mb();
+}
+
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+	if (unlikely(atomic_dec_return(count) < 0))
+		return fail_fn(count);
+	else {
+		smp_mb();
+		return 0;
+	}
+}
+
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+	smp_mb();
+	if (unlikely(atomic_inc_return(count) <= 0))
+		fail_fn(count);
+}
+
+#define __mutex_slowpath_needs_to_unlock()		1
+
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+	/*
+	 * We have two variants here. The cmpxchg based one is the best one
+	 * because it never induce a false contention state.  It is included
+	 * here because architectures using the inc/dec algorithms over the
+	 * xchg ones are much more likely to support cmpxchg natively.
+	 *
+	 * If not we fall back to the spinlock based variant - that is
+	 * just as efficient (and simpler) as a 'destructive' probing of
+	 * the mutex state would be.
+	 */
+#ifdef __HAVE_ARCH_CMPXCHG
+	if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
+		smp_mb();
+		return 1;
+	}
+	return 0;
+#else
+	return fail_fn(count);
+#endif
+}
+
+#endif
+
+#endif
diff --git a/arch/blackfin/include/asm/pda.h b/arch/blackfin/include/asm/pda.h
new file mode 100644
index 0000000..bd8d4a7
--- /dev/null
+++ b/arch/blackfin/include/asm/pda.h
@@ -0,0 +1,70 @@
+/*
+ * File:         arch/blackfin/include/asm/pda.h
+ * Author:       Philippe Gerum <rpm@xenomai.org>
+ *
+ *               Copyright 2007 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef _ASM_BLACKFIN_PDA_H
+#define _ASM_BLACKFIN_PDA_H
+
+#include <mach/anomaly.h>
+
+#ifndef __ASSEMBLY__
+
+struct blackfin_pda {			/* Per-processor Data Area */
+	struct blackfin_pda *next;
+
+	unsigned long syscfg;
+#ifdef CONFIG_SMP
+	unsigned long imask;		/* Current IMASK value */
+#endif
+
+	unsigned long *ipdt;		/* Start of switchable I-CPLB table */
+	unsigned long *ipdt_swapcount;	/* Number of swaps in ipdt */
+	unsigned long *dpdt;		/* Start of switchable D-CPLB table */
+	unsigned long *dpdt_swapcount;	/* Number of swaps in dpdt */
+
+	/*
+	 * Single instructions can have multiple faults, which
+	 * need to be handled by traps.c, in irq5. We store
+	 * the exception cause to ensure we don't miss a
+	 * double fault condition
+	 */
+	unsigned long ex_iptr;
+	unsigned long ex_optr;
+	unsigned long ex_buf[4];
+	unsigned long ex_imask;		/* Saved imask from exception */
+	unsigned long *ex_stack;	/* Exception stack space */
+
+#ifdef ANOMALY_05000261
+	unsigned long last_cplb_fault_retx;
+#endif
+	unsigned long dcplb_fault_addr;
+	unsigned long icplb_fault_addr;
+	unsigned long retx;
+	unsigned long seqstat;
+};
+
+extern struct blackfin_pda cpu_pda[];
+
+void reserve_pda(void);
+
+#endif	/* __ASSEMBLY__ */
+
+#endif /* _ASM_BLACKFIN_PDA_H */
diff --git a/arch/blackfin/include/asm/percpu.h b/arch/blackfin/include/asm/percpu.h
index 78dd61f..797c0c1 100644
--- a/arch/blackfin/include/asm/percpu.h
+++ b/arch/blackfin/include/asm/percpu.h
@@ -3,4 +3,14 @@
 
 #include <asm-generic/percpu.h>
 
-#endif				/* __ARCH_BLACKFIN_PERCPU__ */
+#ifdef CONFIG_MODULES
+#define PERCPU_MODULE_RESERVE 8192
+#else
+#define PERCPU_MODULE_RESERVE 0
+#endif
+
+#define PERCPU_ENOUGH_ROOM \
+	(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
+	 PERCPU_MODULE_RESERVE)
+
+#endif	/* __ARCH_BLACKFIN_PERCPU__ */
diff --git a/arch/blackfin/include/asm/pgtable.h b/arch/blackfin/include/asm/pgtable.h
index f11684e..783c8f7 100644
--- a/arch/blackfin/include/asm/pgtable.h
+++ b/arch/blackfin/include/asm/pgtable.h
@@ -29,6 +29,7 @@
 #define PAGE_COPY		__pgprot(0)	/* these mean nothing to NO_MM */
 #define PAGE_READONLY		__pgprot(0)	/* these mean nothing to NO_MM */
 #define PAGE_KERNEL		__pgprot(0)	/* these mean nothing to NO_MM */
+#define pgprot_noncached(prot)	(prot)
 
 extern void paging_init(void);
 
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index e3e9b41..0eece23 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -24,6 +24,14 @@
 	__asm__ __volatile__("usp = %0;\n\t"::"da"(usp));
 }
 
+static inline unsigned long __get_SP(void)
+{
+	unsigned long sp;
+
+	__asm__ __volatile__("%0 = sp;\n\t" : "=da"(sp));
+	return sp;
+}
+
 /*
  * User space process size: 1st byte beyond user address space.
  * Fairly meaningless on nommu.  Parts of user programs can be scattered
@@ -57,6 +65,7 @@
  * pass the data segment into user programs if it exists,
  * it can't hurt anything as far as I can tell
  */
+#ifndef CONFIG_SMP
 #define start_thread(_regs, _pc, _usp)					\
 do {									\
 	set_fs(USER_DS);						\
@@ -70,6 +79,16 @@
 		sizeof(*L1_SCRATCH_TASK_INFO));				\
 	wrusp(_usp);							\
 } while(0)
+#else
+#define start_thread(_regs, _pc, _usp)					\
+do {									\
+	set_fs(USER_DS);						\
+	(_regs)->pc = (_pc);						\
+	if (current->mm)						\
+		(_regs)->p5 = current->mm->start_data;			\
+	wrusp(_usp);							\
+} while (0)
+#endif
 
 /* Forward declaration, a strange C thing */
 struct task_struct;
@@ -106,7 +125,8 @@
 	eip; })
 #define	KSTK_ESP(tsk)	((tsk) == current ? rdusp() : (tsk)->thread.usp)
 
-#define cpu_relax()    	barrier()
+#define cpu_relax()    	smp_mb()
+
 
 /* Get the Silicon Revision of the chip */
 static inline uint32_t __pure bfin_revid(void)
@@ -137,7 +157,11 @@
 static inline uint16_t __pure bfin_cpuid(void)
 {
 	return (bfin_read_CHIPID() & CHIPID_FAMILY) >> 12;
+}
 
+static inline uint32_t __pure bfin_dspid(void)
+{
+	return bfin_read_DSPID();
 }
 
 static inline uint32_t __pure bfin_compiled_revid(void)
@@ -154,6 +178,8 @@
 	return 4;
 #elif defined(CONFIG_BF_REV_0_5)
 	return 5;
+#elif defined(CONFIG_BF_REV_0_6)
+	return 6;
 #elif defined(CONFIG_BF_REV_ANY)
 	return 0xffff;
 #else
diff --git a/arch/blackfin/include/asm/reboot.h b/arch/blackfin/include/asm/reboot.h
index 6d448b5..4856d62 100644
--- a/arch/blackfin/include/asm/reboot.h
+++ b/arch/blackfin/include/asm/reboot.h
@@ -1,7 +1,7 @@
 /*
- * include/asm-blackfin/reboot.h - shutdown/reboot header
+ * reboot.h - shutdown/reboot header
  *
- * Copyright 2004-2007 Analog Devices Inc.
+ * Copyright 2004-2008 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
diff --git a/arch/blackfin/include/asm/rwlock.h b/arch/blackfin/include/asm/rwlock.h
new file mode 100644
index 0000000..4a724b3
--- /dev/null
+++ b/arch/blackfin/include/asm/rwlock.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_BLACKFIN_RWLOCK_H
+#define _ASM_BLACKFIN_RWLOCK_H
+
+#define RW_LOCK_BIAS	0x01000000
+
+#endif
diff --git a/arch/blackfin/include/asm/serial.h b/arch/blackfin/include/asm/serial.h
index 994dd86..3a47606 100644
--- a/arch/blackfin/include/asm/serial.h
+++ b/arch/blackfin/include/asm/serial.h
@@ -3,3 +3,4 @@
  */
 
 #define SERIAL_EXTRA_IRQ_FLAGS IRQF_TRIGGER_HIGH
+#define BASE_BAUD (1843200 / 16)
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h
new file mode 100644
index 0000000..118deee
--- /dev/null
+++ b/arch/blackfin/include/asm/smp.h
@@ -0,0 +1,44 @@
+/*
+ * File:         arch/blackfin/include/asm/smp.h
+ * Author:       Philippe Gerum <rpm@xenomai.org>
+ *
+ *               Copyright 2007 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef __ASM_BLACKFIN_SMP_H
+#define __ASM_BLACKFIN_SMP_H
+
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/cache.h>
+#include <asm/blackfin.h>
+#include <mach/smp.h>
+
+#define raw_smp_processor_id()  blackfin_core_id()
+
+extern char coreb_trampoline_start, coreb_trampoline_end;
+
+struct corelock_slot {
+	int lock;
+};
+
+void smp_icache_flush_range_others(unsigned long start,
+				   unsigned long end);
+
+#endif /* !__ASM_BLACKFIN_SMP_H */
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 64e908a..0249ac3 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -1,6 +1,89 @@
 #ifndef __BFIN_SPINLOCK_H
 #define __BFIN_SPINLOCK_H
 
-#error blackfin architecture does not support SMP spin lock yet
+#include <asm/atomic.h>
 
-#endif
+asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
+asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
+asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
+asmlinkage void __raw_read_lock_asm(volatile int *ptr);
+asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
+asmlinkage void __raw_write_lock_asm(volatile int *ptr);
+asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
+
+static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+{
+	return __raw_spin_is_locked_asm(&lock->lock);
+}
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+	__raw_spin_lock_asm(&lock->lock);
+}
+
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+	return __raw_spin_trylock_asm(&lock->lock);
+}
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+	__raw_spin_unlock_asm(&lock->lock);
+}
+
+static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+{
+	while (__raw_spin_is_locked(lock))
+		cpu_relax();
+}
+
+static inline int __raw_read_can_lock(raw_rwlock_t *rw)
+{
+	return __raw_uncached_fetch_asm(&rw->lock) > 0;
+}
+
+static inline int __raw_write_can_lock(raw_rwlock_t *rw)
+{
+	return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
+}
+
+static inline void __raw_read_lock(raw_rwlock_t *rw)
+{
+	__raw_read_lock_asm(&rw->lock);
+}
+
+static inline int __raw_read_trylock(raw_rwlock_t *rw)
+{
+	return __raw_read_trylock_asm(&rw->lock);
+}
+
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+	__raw_read_unlock_asm(&rw->lock);
+}
+
+static inline void __raw_write_lock(raw_rwlock_t *rw)
+{
+	__raw_write_lock_asm(&rw->lock);
+}
+
+static inline int __raw_write_trylock(raw_rwlock_t *rw)
+{
+	return __raw_write_trylock_asm(&rw->lock);
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+	__raw_write_unlock_asm(&rw->lock);
+}
+
+#define _raw_spin_relax(lock)  	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
+#endif /*  !__BFIN_SPINLOCK_H */
diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h
new file mode 100644
index 0000000..b1e3c4c
--- /dev/null
+++ b/arch/blackfin/include/asm/spinlock_types.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+#include <asm/rwlock.h>
+
+typedef struct {
+	volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED	{ 0 }
+
+typedef struct {
+	volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED		{ RW_LOCK_BIAS }
+
+#endif
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h
index 8f1627d..a4c8254 100644
--- a/arch/blackfin/include/asm/system.h
+++ b/arch/blackfin/include/asm/system.h
@@ -37,114 +37,98 @@
 #include <linux/linkage.h>
 #include <linux/compiler.h>
 #include <mach/anomaly.h>
-
-/*
- * Interrupt configuring macros.
- */
-
-extern unsigned long irq_flags;
-
-#define local_irq_enable() \
-	__asm__ __volatile__( \
-		"sti %0;" \
-		: \
-		: "d" (irq_flags) \
-	)
-
-#define local_irq_disable() \
-	do { \
-		int __tmp_dummy; \
-		__asm__ __volatile__( \
-			"cli %0;" \
-			: "=d" (__tmp_dummy) \
-		); \
-	} while (0)
-
-#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
-# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
-#else
-# define NOP_PAD_ANOMALY_05000244
-#endif
-
-#define idle_with_irq_disabled() \
-	__asm__ __volatile__( \
-		NOP_PAD_ANOMALY_05000244 \
-		".align 8;" \
-		"sti %0;" \
-		"idle;" \
-		: \
-		: "d" (irq_flags) \
-	)
-
-#ifdef CONFIG_DEBUG_HWERR
-# define __save_and_cli(x) \
-	__asm__ __volatile__( \
-		"cli %0;" \
-		"sti %1;" \
-		: "=&d" (x) \
-		: "d" (0x3F) \
-	)
-#else
-# define __save_and_cli(x) \
-	__asm__ __volatile__( \
-		"cli %0;" \
-		: "=&d" (x) \
-	)
-#endif
-
-#define local_save_flags(x) \
-	__asm__ __volatile__( \
-		"cli %0;" \
-		"sti %0;" \
-		: "=d" (x) \
-	)
-
-#ifdef CONFIG_DEBUG_HWERR
-#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
-#else
-#define irqs_enabled_from_flags(x) ((x) != 0x1f)
-#endif
-
-#define local_irq_restore(x) \
-	do { \
-		if (irqs_enabled_from_flags(x)) \
-			local_irq_enable(); \
-	} while (0)
-
-/* For spinlocks etc */
-#define local_irq_save(x) __save_and_cli(x)
-
-#define	irqs_disabled()				\
-({						\
-	unsigned long flags;			\
-	local_save_flags(flags);		\
-	!irqs_enabled_from_flags(flags);	\
-})
+#include <asm/pda.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
 
 /*
  * Force strict CPU ordering.
  */
-#define nop()  asm volatile ("nop;\n\t"::)
-#define mb()   asm volatile (""   : : :"memory")
-#define rmb()  asm volatile (""   : : :"memory")
-#define wmb()  asm volatile (""   : : :"memory")
+#define nop()  __asm__ __volatile__ ("nop;\n\t" : : )
+#define mb()   __asm__ __volatile__ (""   : : : "memory")
+#define rmb()  __asm__ __volatile__ (""   : : : "memory")
+#define wmb()  __asm__ __volatile__ (""   : : : "memory")
 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-
 #define read_barrier_depends() 		do { } while(0)
 
 #ifdef CONFIG_SMP
-#define smp_mb()	mb()
-#define smp_rmb()	rmb()
-#define smp_wmb()	wmb()
-#define smp_read_barrier_depends()	read_barrier_depends()
+asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
+asmlinkage unsigned long __raw_xchg_2_asm(volatile void *ptr, unsigned long value);
+asmlinkage unsigned long __raw_xchg_4_asm(volatile void *ptr, unsigned long value);
+asmlinkage unsigned long __raw_cmpxchg_1_asm(volatile void *ptr,
+					unsigned long new, unsigned long old);
+asmlinkage unsigned long __raw_cmpxchg_2_asm(volatile void *ptr,
+					unsigned long new, unsigned long old);
+asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
+					unsigned long new, unsigned long old);
+
+#ifdef __ARCH_SYNC_CORE_DCACHE
+# define smp_mb()	do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
+# define smp_rmb()	do { barrier(); smp_check_barrier(); } while (0)
+# define smp_wmb()	do { barrier(); smp_mark_barrier(); } while (0)
+#define smp_read_barrier_depends()	do { barrier(); smp_check_barrier(); } while (0)
+
 #else
+# define smp_mb()	barrier()
+# define smp_rmb()	barrier()
+# define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	barrier()
+#endif
+
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+				   int size)
+{
+	unsigned long tmp;
+
+	switch (size) {
+	case 1:
+		tmp = __raw_xchg_1_asm(ptr, x);
+		break;
+	case 2:
+		tmp = __raw_xchg_2_asm(ptr, x);
+		break;
+	case 4:
+		tmp = __raw_xchg_4_asm(ptr, x);
+		break;
+	}
+
+	return tmp;
+}
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+				      unsigned long new, int size)
+{
+	unsigned long tmp;
+
+	switch (size) {
+	case 1:
+		tmp = __raw_cmpxchg_1_asm(ptr, new, old);
+		break;
+	case 2:
+		tmp = __raw_cmpxchg_2_asm(ptr, new, old);
+		break;
+	case 4:
+		tmp = __raw_cmpxchg_4_asm(ptr, new, old);
+		break;
+	}
+
+	return tmp;
+}
+#define cmpxchg(ptr, o, n) \
+	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+		(unsigned long)(n), sizeof(*(ptr))))
+
+#else /* !CONFIG_SMP */
+
 #define smp_mb()	barrier()
 #define smp_rmb()	barrier()
 #define smp_wmb()	barrier()
 #define smp_read_barrier_depends()	do { } while(0)
-#endif
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 
 struct __xchg_dummy {
 	unsigned long a[100];
@@ -157,7 +141,7 @@
 	unsigned long tmp = 0;
 	unsigned long flags = 0;
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 
 	switch (size) {
 	case 1:
@@ -179,7 +163,7 @@
 			 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
 		break;
 	}
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 	return tmp;
 }
 
@@ -194,9 +178,12 @@
 			(unsigned long)(n), sizeof(*(ptr))))
 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 
-#ifndef CONFIG_SMP
 #include <asm-generic/cmpxchg.h>
-#endif
+
+#endif /* !CONFIG_SMP */
+
+#define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+#define tas(ptr) ((void)xchg((ptr), 1))
 
 #define prepare_to_switch()     do { } while(0)
 
@@ -205,10 +192,12 @@
  * ptr isn't the current task, in which case it does nothing.
  */
 
-#include <asm/blackfin.h>
+#include <asm/l1layout.h>
+#include <asm/mem_map.h>
 
 asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next);
 
+#ifndef CONFIG_SMP
 #define switch_to(prev,next,last) \
 do {    \
 	memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \
@@ -217,5 +206,11 @@
 		sizeof *L1_SCRATCH_TASK_INFO); \
 	(last) = resume (prev, next);   \
 } while (0)
+#else
+#define switch_to(prev, next, last) \
+do {    \
+	(last) = resume(prev, next);   \
+} while (0)
+#endif
 
-#endif				/* _BLACKFIN_SYSTEM_H */
+#endif	/* _BLACKFIN_SYSTEM_H */
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 6427693..e721ce5 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -44,6 +44,7 @@
  */
 #define THREAD_SIZE_ORDER	1
 #define THREAD_SIZE		8192	/* 2 pages */
+#define STACK_WARN		(THREAD_SIZE/8)
 
 #ifndef __ASSEMBLY__
 
@@ -62,7 +63,9 @@
 	int preempt_count;	/* 0 => preemptable, <0 => BUG */
 	mm_segment_t addr_limit;	/* address limit */
 	struct restart_block restart_block;
+#ifndef CONFIG_SMP
 	struct l1_scratch_task_info l1_task_info;
+#endif
 };
 
 /*
@@ -90,7 +93,7 @@
 static inline struct thread_info *current_thread_info(void)
 {
 	struct thread_info *ti;
-      __asm__("%0 = sp;": "=&d"(ti):
+      __asm__("%0 = sp;" : "=da"(ti) :
 	);
 	return (struct thread_info *)((long)ti & ~((long)THREAD_SIZE-1));
 }
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index d928b80..3248033 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -149,54 +149,42 @@
 		 : /* no outputs */			\
 		 :"d" (x),"a" (__ptr(p)) : "memory")
 
-#define get_user(x,p)							\
-	({								\
-		int _err = 0;						\
-		typeof(*(p)) *_p = (p);					\
-		if (!access_ok(VERIFY_READ, _p, sizeof(*(_p)))) {	\
-			_err = -EFAULT;					\
-		}							\
-		else {							\
-		switch (sizeof(*(_p))) {				\
-		case 1:							\
-			__get_user_asm(x, _p, B,(Z));			\
-			break;						\
-		case 2:							\
-			__get_user_asm(x, _p, W,(Z));			\
-			break;						\
-		case 4:							\
-			__get_user_asm(x, _p,  , );			\
-			break;						\
-		case 8: {						\
-			unsigned long _xl, _xh;				\
-			__get_user_asm(_xl, ((unsigned long *)_p)+0,  , ); \
-			__get_user_asm(_xh, ((unsigned long *)_p)+1,  , ); \
-			((unsigned long *)&x)[0] = _xl;			\
-			((unsigned long *)&x)[1] = _xh;			\
-		} break;						\
-		default:						\
-			x = 0;						\
-			printk(KERN_INFO "get_user_bad: %s:%d %s\n",    \
-			       __FILE__, __LINE__, __func__);	\
-			_err = __get_user_bad();			\
-			break;						\
-		}							\
-		}							\
-		_err;							\
-	})
+#define get_user(x, ptr)					\
+({								\
+	int _err = 0;						\
+	unsigned long _val = 0;					\
+	const typeof(*(ptr)) __user *_p = (ptr);		\
+	const size_t ptr_size = sizeof(*(_p));			\
+	if (likely(access_ok(VERIFY_READ, _p, ptr_size))) {	\
+		BUILD_BUG_ON(ptr_size >= 8);			\
+		switch (ptr_size) {				\
+		case 1:						\
+			__get_user_asm(_val, _p, B,(Z));	\
+			break;					\
+		case 2:						\
+			__get_user_asm(_val, _p, W,(Z));	\
+			break;					\
+		case 4:						\
+			__get_user_asm(_val, _p,  , );		\
+			break;					\
+		}						\
+	} else							\
+		_err = -EFAULT;					\
+	x = (typeof(*(ptr)))_val;				\
+	_err;							\
+})
 
 #define __get_user(x,p) get_user(x,p)
 
 #define __get_user_bad() (bad_user_access_length(), (-EFAULT))
 
-#define __get_user_asm(x,p,bhw,option)				\
-	{							\
-		unsigned long _tmp;				\
-		__asm__ ("%0 =" #bhw "[%1]"#option";\n\t"	\
-			 : "=d" (_tmp)				\
-			 : "a" (__ptr(p)));			\
-		(x) = (__typeof__(*(p))) _tmp;			\
-	}
+#define __get_user_asm(x, ptr, bhw, option)	\
+({						\
+	__asm__ __volatile__ (			\
+		"%0 =" #bhw "[%1]" #option ";"	\
+		: "=d" (x)			\
+		: "a" (__ptr(ptr)));		\
+})
 
 #define __copy_from_user(to, from, n) copy_from_user(to, from, n)
 #define __copy_to_user(to, from, n) copy_to_user(to, from, n)
@@ -209,8 +197,8 @@
 #define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\
                                                    return retval; })
 
-static inline long copy_from_user(void *to,
-				  const void __user * from, unsigned long n)
+static inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	if (access_ok(VERIFY_READ, from, n))
 		memcpy(to, from, n);
@@ -219,8 +207,8 @@
 	return 0;
 }
 
-static inline long copy_to_user(void *to,
-				const void __user * from, unsigned long n)
+static inline unsigned long __must_check
+copy_to_user(void *to, const void __user *from, unsigned long n)
 {
 	if (access_ok(VERIFY_WRITE, to, n))
 		memcpy(to, from, n);
@@ -233,8 +221,8 @@
  * Copy a null terminated string from userspace.
  */
 
-static inline long strncpy_from_user(char *dst,
-                                     const char *src, long count)
+static inline long __must_check
+strncpy_from_user(char *dst, const char *src, long count)
 {
 	char *tmp;
 	if (!access_ok(VERIFY_READ, src, 1))
@@ -260,7 +248,8 @@
  * Zero Userspace
  */
 
-static inline unsigned long __clear_user(void *to, unsigned long n)
+static inline unsigned long __must_check
+__clear_user(void *to, unsigned long n)
 {
 	memset(to, 0, n);
 	return 0;
diff --git a/arch/blackfin/include/asm/xor.h b/arch/blackfin/include/asm/xor.h
new file mode 100644
index 0000000..c82eb12
--- /dev/null
+++ b/arch/blackfin/include/asm/xor.h
@@ -0,0 +1 @@
+#include <asm-generic/xor.h>
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 606adc7..38a2333 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -7,7 +7,7 @@
 obj-y := \
 	entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \
 	sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \
-	fixed_code.o reboot.o bfin_gpio.o
+	fixed_code.o reboot.o bfin_gpio.o bfin_dma_5xx.o
 
 ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y)
     obj-y += time-ts.o
@@ -15,8 +15,11 @@
     obj-y += time.o
 endif
 
+obj-$(CONFIG_IPIPE)                  += ipipe.o
+obj-$(CONFIG_IPIPE_TRACE_MCOUNT)     += mcount.o
 obj-$(CONFIG_BFIN_GPTIMERS)          += gptimers.o
+obj-$(CONFIG_CPLB_INFO)              += cplbinfo.o
 obj-$(CONFIG_MODULES)                += module.o
-obj-$(CONFIG_BFIN_DMA_5XX)           += bfin_dma_5xx.o
 obj-$(CONFIG_KGDB)                   += kgdb.o
+obj-$(CONFIG_KGDB_TESTCASE)          += kgdb_test.o
 obj-$(CONFIG_EARLY_PRINTK)           += early_printk.o
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
index 9bb85dd..b5df945 100644
--- a/arch/blackfin/kernel/asm-offsets.c
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -56,6 +56,9 @@
 	/* offsets into the thread struct */
 	DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
+	DEFINE(THREAD_SR, offsetof(struct thread_struct, seqstat));
+	DEFINE(PT_SR, offsetof(struct thread_struct, seqstat));
+	DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
 	DEFINE(THREAD_PC, offsetof(struct thread_struct, pc));
 	DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE);
 
@@ -128,5 +131,31 @@
 	DEFINE(SIGSEGV, SIGSEGV);
 	DEFINE(SIGTRAP, SIGTRAP);
 
+	/* PDA management (in L1 scratchpad) */
+	DEFINE(PDA_SYSCFG, offsetof(struct blackfin_pda, syscfg));
+#ifdef CONFIG_SMP
+	DEFINE(PDA_IRQFLAGS, offsetof(struct blackfin_pda, imask));
+#endif
+	DEFINE(PDA_IPDT, offsetof(struct blackfin_pda, ipdt));
+	DEFINE(PDA_IPDT_SWAPCOUNT, offsetof(struct blackfin_pda, ipdt_swapcount));
+	DEFINE(PDA_DPDT, offsetof(struct blackfin_pda, dpdt));
+	DEFINE(PDA_DPDT_SWAPCOUNT, offsetof(struct blackfin_pda, dpdt_swapcount));
+	DEFINE(PDA_EXIPTR, offsetof(struct blackfin_pda, ex_iptr));
+	DEFINE(PDA_EXOPTR, offsetof(struct blackfin_pda, ex_optr));
+	DEFINE(PDA_EXBUF, offsetof(struct blackfin_pda, ex_buf));
+	DEFINE(PDA_EXIMASK, offsetof(struct blackfin_pda, ex_imask));
+	DEFINE(PDA_EXSTACK, offsetof(struct blackfin_pda, ex_stack));
+#ifdef ANOMALY_05000261
+	DEFINE(PDA_LFRETX, offsetof(struct blackfin_pda, last_cplb_fault_retx));
+#endif
+	DEFINE(PDA_DCPLB, offsetof(struct blackfin_pda, dcplb_fault_addr));
+	DEFINE(PDA_ICPLB, offsetof(struct blackfin_pda, icplb_fault_addr));
+	DEFINE(PDA_RETX, offsetof(struct blackfin_pda, retx));
+	DEFINE(PDA_SEQSTAT, offsetof(struct blackfin_pda, seqstat));
+#ifdef CONFIG_SMP
+	/* Inter-core lock (in L2 SRAM) */
+	DEFINE(SIZEOF_CORELOCK, sizeof(struct corelock_slot));
+#endif
+
 	return 0;
 }
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 339293d..07e02c0 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -1,63 +1,27 @@
 /*
- * File:         arch/blackfin/kernel/bfin_dma_5xx.c
- * Based on:
- * Author:
+ * bfin_dma_5xx.c - Blackfin DMA implementation
  *
- * Created:
- * Description:  This file contains the simple DMA Implementation for Blackfin
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ * Copyright 2004-2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
  */
 
 #include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/param.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
 
 #include <asm/blackfin.h>
-#include <asm/dma.h>
 #include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
 
-/* Remove unused code not exported by symbol or internally called */
-#define REMOVE_DEAD_CODE
-
-/**************************************************************************
- * Global Variables
-***************************************************************************/
-
-static struct dma_channel dma_ch[MAX_BLACKFIN_DMA_CHANNEL];
-
-/*------------------------------------------------------------------------------
- *       Set the Buffer Clear bit in the Configuration register of specific DMA
- *       channel. This will stop the descriptor based DMA operation.
- *-----------------------------------------------------------------------------*/
-static void clear_dma_buffer(unsigned int channel)
-{
-	dma_ch[channel].regs->cfg |= RESTART;
-	SSYNC();
-	dma_ch[channel].regs->cfg &= ~RESTART;
-	SSYNC();
-}
+struct dma_channel dma_ch[MAX_DMA_CHANNELS];
+EXPORT_SYMBOL(dma_ch);
 
 static int __init blackfin_dma_init(void)
 {
@@ -65,32 +29,67 @@
 
 	printk(KERN_INFO "Blackfin DMA Controller\n");
 
-	for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++) {
+	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
 		dma_ch[i].chan_status = DMA_CHANNEL_FREE;
 		dma_ch[i].regs = dma_io_base_addr[i];
 		mutex_init(&(dma_ch[i].dmalock));
 	}
 	/* Mark MEMDMA Channel 0 as requested since we're using it internally */
-	dma_ch[CH_MEM_STREAM0_DEST].chan_status = DMA_CHANNEL_REQUESTED;
-	dma_ch[CH_MEM_STREAM0_SRC].chan_status = DMA_CHANNEL_REQUESTED;
+	request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy");
+	request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy");
 
 #if defined(CONFIG_DEB_DMA_URGENT)
 	bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE()
 			 | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT);
 #endif
+
+	return 0;
+}
+arch_initcall(blackfin_dma_init);
+
+#ifdef CONFIG_PROC_FS
+static int proc_dma_show(struct seq_file *m, void *v)
+{
+	int i;
+
+	for (i = 0; i < MAX_DMA_CHANNELS; ++i)
+		if (dma_ch[i].chan_status != DMA_CHANNEL_FREE)
+			seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id);
+
 	return 0;
 }
 
-arch_initcall(blackfin_dma_init);
-
-/*------------------------------------------------------------------------------
- *	Request the specific DMA channel from the system.
- *-----------------------------------------------------------------------------*/
-int request_dma(unsigned int channel, char *device_id)
+static int proc_dma_open(struct inode *inode, struct file *file)
 {
+	return single_open(file, proc_dma_show, NULL);
+}
 
+static const struct file_operations proc_dma_operations = {
+	.open		= proc_dma_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int __init proc_dma_init(void)
+{
+	return proc_create("dma", 0, NULL, &proc_dma_operations) != NULL;
+}
+late_initcall(proc_dma_init);
+#endif
+
+/**
+ *	request_dma - request a DMA channel
+ *
+ * Request the specific DMA channel from the system if it's available.
+ */
+int request_dma(unsigned int channel, const char *device_id)
+{
 	pr_debug("request_dma() : BEGIN \n");
 
+	if (device_id == NULL)
+		printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel);
+
 #if defined(CONFIG_BF561) && ANOMALY_05000182
 	if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) {
 		if (get_cclk() > 500000000) {
@@ -129,60 +128,63 @@
 #endif
 
 	dma_ch[channel].device_id = device_id;
-	dma_ch[channel].irq_callback = NULL;
+	dma_ch[channel].irq = 0;
 
 	/* This is to be enabled by putting a restriction -
 	 * you have to request DMA, before doing any operations on
 	 * descriptor/channel
 	 */
 	pr_debug("request_dma() : END  \n");
-	return channel;
+	return 0;
 }
 EXPORT_SYMBOL(request_dma);
 
-int set_dma_callback(unsigned int channel, dma_interrupt_t callback, void *data)
+int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data)
 {
-	int ret_irq = 0;
-
 	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
+	       && channel < MAX_DMA_CHANNELS));
 
 	if (callback != NULL) {
-		int ret_val;
-		ret_irq = channel2irq(channel);
+		int ret;
+		unsigned int irq = channel2irq(channel);
 
+		ret = request_irq(irq, callback, IRQF_DISABLED,
+			dma_ch[channel].device_id, data);
+		if (ret)
+			return ret;
+
+		dma_ch[channel].irq = irq;
 		dma_ch[channel].data = data;
-
-		ret_val =
-		    request_irq(ret_irq, (void *)callback, IRQF_DISABLED,
-				dma_ch[channel].device_id, data);
-		if (ret_val) {
-			printk(KERN_NOTICE
-			       "Request irq in DMA engine failed.\n");
-			return -EPERM;
-		}
-		dma_ch[channel].irq_callback = callback;
 	}
 	return 0;
 }
 EXPORT_SYMBOL(set_dma_callback);
 
+/**
+ *	clear_dma_buffer - clear DMA fifos for specified channel
+ *
+ * Set the Buffer Clear bit in the Configuration register of specific DMA
+ * channel. This will stop the descriptor based DMA operation.
+ */
+static void clear_dma_buffer(unsigned int channel)
+{
+	dma_ch[channel].regs->cfg |= RESTART;
+	SSYNC();
+	dma_ch[channel].regs->cfg &= ~RESTART;
+}
+
 void free_dma(unsigned int channel)
 {
-	int ret_irq;
-
 	pr_debug("freedma() : BEGIN \n");
 	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
+	       && channel < MAX_DMA_CHANNELS));
 
 	/* Halt the DMA */
 	disable_dma(channel);
 	clear_dma_buffer(channel);
 
-	if (dma_ch[channel].irq_callback != NULL) {
-		ret_irq = channel2irq(channel);
-		free_irq(ret_irq, dma_ch[channel].data);
-	}
+	if (dma_ch[channel].irq)
+		free_irq(dma_ch[channel].irq, dma_ch[channel].data);
 
 	/* Clear the DMA Variable in the Channel */
 	mutex_lock(&(dma_ch[channel].dmalock));
@@ -193,294 +195,15 @@
 }
 EXPORT_SYMBOL(free_dma);
 
-void dma_enable_irq(unsigned int channel)
-{
-	int ret_irq;
-
-	pr_debug("dma_enable_irq() : BEGIN \n");
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	ret_irq = channel2irq(channel);
-	enable_irq(ret_irq);
-}
-EXPORT_SYMBOL(dma_enable_irq);
-
-void dma_disable_irq(unsigned int channel)
-{
-	int ret_irq;
-
-	pr_debug("dma_disable_irq() : BEGIN \n");
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	ret_irq = channel2irq(channel);
-	disable_irq(ret_irq);
-}
-EXPORT_SYMBOL(dma_disable_irq);
-
-int dma_channel_active(unsigned int channel)
-{
-	if (dma_ch[channel].chan_status == DMA_CHANNEL_FREE) {
-		return 0;
-	} else {
-		return 1;
-	}
-}
-EXPORT_SYMBOL(dma_channel_active);
-
-/*------------------------------------------------------------------------------
-*	stop the specific DMA channel.
-*-----------------------------------------------------------------------------*/
-void disable_dma(unsigned int channel)
-{
-	pr_debug("stop_dma() : BEGIN \n");
-
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].regs->cfg &= ~DMAEN;	/* Clean the enable bit */
-	SSYNC();
-	dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED;
-	/* Needs to be enabled Later */
-	pr_debug("stop_dma() : END \n");
-	return;
-}
-EXPORT_SYMBOL(disable_dma);
-
-void enable_dma(unsigned int channel)
-{
-	pr_debug("enable_dma() : BEGIN \n");
-
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].chan_status = DMA_CHANNEL_ENABLED;
-	dma_ch[channel].regs->curr_x_count = 0;
-	dma_ch[channel].regs->curr_y_count = 0;
-
-	dma_ch[channel].regs->cfg |= DMAEN;	/* Set the enable bit */
-	SSYNC();
-	pr_debug("enable_dma() : END \n");
-	return;
-}
-EXPORT_SYMBOL(enable_dma);
-
-/*------------------------------------------------------------------------------
-*		Set the Start Address register for the specific DMA channel
-* 		This function can be used for register based DMA,
-*		to setup the start address
-*		addr:		Starting address of the DMA Data to be transferred.
-*-----------------------------------------------------------------------------*/
-void set_dma_start_addr(unsigned int channel, unsigned long addr)
-{
-	pr_debug("set_dma_start_addr() : BEGIN \n");
-
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].regs->start_addr = addr;
-	SSYNC();
-	pr_debug("set_dma_start_addr() : END\n");
-}
-EXPORT_SYMBOL(set_dma_start_addr);
-
-void set_dma_next_desc_addr(unsigned int channel, unsigned long addr)
-{
-	pr_debug("set_dma_next_desc_addr() : BEGIN \n");
-
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].regs->next_desc_ptr = addr;
-	SSYNC();
-	pr_debug("set_dma_next_desc_addr() : END\n");
-}
-EXPORT_SYMBOL(set_dma_next_desc_addr);
-
-void set_dma_curr_desc_addr(unsigned int channel, unsigned long addr)
-{
-	pr_debug("set_dma_curr_desc_addr() : BEGIN \n");
-
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].regs->curr_desc_ptr = addr;
-	SSYNC();
-	pr_debug("set_dma_curr_desc_addr() : END\n");
-}
-EXPORT_SYMBOL(set_dma_curr_desc_addr);
-
-void set_dma_x_count(unsigned int channel, unsigned short x_count)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].regs->x_count = x_count;
-	SSYNC();
-}
-EXPORT_SYMBOL(set_dma_x_count);
-
-void set_dma_y_count(unsigned int channel, unsigned short y_count)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].regs->y_count = y_count;
-	SSYNC();
-}
-EXPORT_SYMBOL(set_dma_y_count);
-
-void set_dma_x_modify(unsigned int channel, short x_modify)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].regs->x_modify = x_modify;
-	SSYNC();
-}
-EXPORT_SYMBOL(set_dma_x_modify);
-
-void set_dma_y_modify(unsigned int channel, short y_modify)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].regs->y_modify = y_modify;
-	SSYNC();
-}
-EXPORT_SYMBOL(set_dma_y_modify);
-
-void set_dma_config(unsigned int channel, unsigned short config)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].regs->cfg = config;
-	SSYNC();
-}
-EXPORT_SYMBOL(set_dma_config);
-
-unsigned short
-set_bfin_dma_config(char direction, char flow_mode,
-		    char intr_mode, char dma_mode, char width, char syncmode)
-{
-	unsigned short config;
-
-	config =
-	    ((direction << 1) | (width << 2) | (dma_mode << 4) |
-	     (intr_mode << 6) | (flow_mode << 12) | (syncmode << 5));
-	return config;
-}
-EXPORT_SYMBOL(set_bfin_dma_config);
-
-void set_dma_sg(unsigned int channel, struct dmasg *sg, int nr_sg)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].regs->cfg |= ((nr_sg & 0x0F) << 8);
-
-	dma_ch[channel].regs->next_desc_ptr = (unsigned int)sg;
-
-	SSYNC();
-}
-EXPORT_SYMBOL(set_dma_sg);
-
-void set_dma_curr_addr(unsigned int channel, unsigned long addr)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	dma_ch[channel].regs->curr_addr_ptr = addr;
-	SSYNC();
-}
-EXPORT_SYMBOL(set_dma_curr_addr);
-
-/*------------------------------------------------------------------------------
- *	Get the DMA status of a specific DMA channel from the system.
- *-----------------------------------------------------------------------------*/
-unsigned short get_dma_curr_irqstat(unsigned int channel)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	return dma_ch[channel].regs->irq_status;
-}
-EXPORT_SYMBOL(get_dma_curr_irqstat);
-
-/*------------------------------------------------------------------------------
- *	Clear the DMA_DONE bit in DMA status. Stop the DMA completion interrupt.
- *-----------------------------------------------------------------------------*/
-void clear_dma_irqstat(unsigned int channel)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-	dma_ch[channel].regs->irq_status |= 3;
-}
-EXPORT_SYMBOL(clear_dma_irqstat);
-
-/*------------------------------------------------------------------------------
- *	Get current DMA xcount of a specific DMA channel from the system.
- *-----------------------------------------------------------------------------*/
-unsigned short get_dma_curr_xcount(unsigned int channel)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	return dma_ch[channel].regs->curr_x_count;
-}
-EXPORT_SYMBOL(get_dma_curr_xcount);
-
-/*------------------------------------------------------------------------------
- *	Get current DMA ycount of a specific DMA channel from the system.
- *-----------------------------------------------------------------------------*/
-unsigned short get_dma_curr_ycount(unsigned int channel)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	       && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	return dma_ch[channel].regs->curr_y_count;
-}
-EXPORT_SYMBOL(get_dma_curr_ycount);
-
-unsigned long get_dma_next_desc_ptr(unsigned int channel)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	      && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	return dma_ch[channel].regs->next_desc_ptr;
-}
-EXPORT_SYMBOL(get_dma_next_desc_ptr);
-
-unsigned long get_dma_curr_desc_ptr(unsigned int channel)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	      && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	return dma_ch[channel].regs->curr_desc_ptr;
-}
-EXPORT_SYMBOL(get_dma_curr_desc_ptr);
-
-unsigned long get_dma_curr_addr(unsigned int channel)
-{
-	BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
-	      && channel < MAX_BLACKFIN_DMA_CHANNEL));
-
-	return dma_ch[channel].regs->curr_addr_ptr;
-}
-EXPORT_SYMBOL(get_dma_curr_addr);
-
 #ifdef CONFIG_PM
+# ifndef MAX_DMA_SUSPEND_CHANNELS
+#  define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS
+# endif
 int blackfin_dma_suspend(void)
 {
 	int i;
 
-#ifdef CONFIG_BF561	/* IMDMA channels doesn't have a PERIPHERAL_MAP */
-	for (i = 0; i <= CH_MEM_STREAM3_SRC; i++) {
-#else
-	for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++) {
-#endif
+	for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i) {
 		if (dma_ch[i].chan_status == DMA_CHANNEL_ENABLED) {
 			printk(KERN_ERR "DMA Channel %d failed to suspend\n", i);
 			return -EBUSY;
@@ -495,388 +218,201 @@
 void blackfin_dma_resume(void)
 {
 	int i;
-
-#ifdef CONFIG_BF561	/* IMDMA channels doesn't have a PERIPHERAL_MAP */
-	for (i = 0; i <= CH_MEM_STREAM3_SRC; i++)
-#else
-	for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++)
-#endif
+	for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i)
 		dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
 }
 #endif
 
-static void *__dma_memcpy(void *dest, const void *src, size_t size)
+/**
+ *	blackfin_dma_early_init - minimal DMA init
+ *
+ * Setup a few DMA registers so we can safely do DMA transfers early on in
+ * the kernel booting process.  Really this just means using dma_memcpy().
+ */
+void __init blackfin_dma_early_init(void)
 {
-	int direction;	/* 1 - address decrease, 0 - address increase */
-	int flag_align;	/* 1 - address aligned,  0 - address unaligned */
-	int flag_2D;	/* 1 - 2D DMA needed,	 0 - 1D DMA needed */
+	bfin_write_MDMA_S0_CONFIG(0);
+}
+
+/**
+ *	__dma_memcpy - program the MDMA registers
+ *
+ * Actually program MDMA0 and wait for the transfer to finish.  Disable IRQs
+ * while programming registers so that everything is fully configured.  Wait
+ * for DMA to finish with IRQs enabled.  If interrupted, the initial DMA_DONE
+ * check will make sure we don't clobber any existing transfer.
+ */
+static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf)
+{
+	static DEFINE_SPINLOCK(mdma_lock);
 	unsigned long flags;
 
-	if (size <= 0)
-		return NULL;
+	spin_lock_irqsave(&mdma_lock, flags);
 
-	local_irq_save(flags);
+	if (bfin_read_MDMA_S0_CONFIG())
+		while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
+			continue;
 
-	if ((unsigned long)src < memory_end)
-		blackfin_dcache_flush_range((unsigned int)src,
-					    (unsigned int)(src + size));
+	if (conf & DMA2D) {
+		/* For larger bit sizes, we've already divided down cnt so it
+		 * is no longer a multiple of 64k.  So we have to break down
+		 * the limit here so it is a multiple of the incoming size.
+		 * There is no limitation here in terms of total size other
+		 * than the hardware though as the bits lost in the shift are
+		 * made up by MODIFY (== we can hit the whole address space).
+		 * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4
+		 */
+		u32 shift = abs(dmod) >> 1;
+		size_t ycnt = cnt >> (16 - shift);
+		cnt = 1 << (16 - shift);
+		bfin_write_MDMA_D0_Y_COUNT(ycnt);
+		bfin_write_MDMA_S0_Y_COUNT(ycnt);
+		bfin_write_MDMA_D0_Y_MODIFY(dmod);
+		bfin_write_MDMA_S0_Y_MODIFY(smod);
+	}
 
-	if ((unsigned long)dest < memory_end)
-		blackfin_dcache_invalidate_range((unsigned int)dest,
-						 (unsigned int)(dest + size));
-
+	bfin_write_MDMA_D0_START_ADDR(daddr);
+	bfin_write_MDMA_D0_X_COUNT(cnt);
+	bfin_write_MDMA_D0_X_MODIFY(dmod);
 	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
 
-	if ((unsigned long)src < (unsigned long)dest)
-		direction = 1;
-	else
-		direction = 0;
+	bfin_write_MDMA_S0_START_ADDR(saddr);
+	bfin_write_MDMA_S0_X_COUNT(cnt);
+	bfin_write_MDMA_S0_X_MODIFY(smod);
+	bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
 
-	if ((((unsigned long)dest % 2) == 0) && (((unsigned long)src % 2) == 0)
-	    && ((size % 2) == 0))
-		flag_align = 1;
-	else
-		flag_align = 0;
+	bfin_write_MDMA_S0_CONFIG(DMAEN | conf);
+	bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf);
 
-	if (size > 0x10000)	/* size > 64K */
-		flag_2D = 1;
-	else
-		flag_2D = 0;
-
-	/* Setup destination and source start address */
-	if (direction) {
-		if (flag_align) {
-			bfin_write_MDMA_D0_START_ADDR(dest + size - 2);
-			bfin_write_MDMA_S0_START_ADDR(src + size - 2);
-		} else {
-			bfin_write_MDMA_D0_START_ADDR(dest + size - 1);
-			bfin_write_MDMA_S0_START_ADDR(src + size - 1);
-		}
-	} else {
-		bfin_write_MDMA_D0_START_ADDR(dest);
-		bfin_write_MDMA_S0_START_ADDR(src);
-	}
-
-	/* Setup destination and source xcount */
-	if (flag_2D) {
-		if (flag_align) {
-			bfin_write_MDMA_D0_X_COUNT(1024 / 2);
-			bfin_write_MDMA_S0_X_COUNT(1024 / 2);
-		} else {
-			bfin_write_MDMA_D0_X_COUNT(1024);
-			bfin_write_MDMA_S0_X_COUNT(1024);
-		}
-		bfin_write_MDMA_D0_Y_COUNT(size >> 10);
-		bfin_write_MDMA_S0_Y_COUNT(size >> 10);
-	} else {
-		if (flag_align) {
-			bfin_write_MDMA_D0_X_COUNT(size / 2);
-			bfin_write_MDMA_S0_X_COUNT(size / 2);
-		} else {
-			bfin_write_MDMA_D0_X_COUNT(size);
-			bfin_write_MDMA_S0_X_COUNT(size);
-		}
-	}
-
-	/* Setup destination and source xmodify and ymodify */
-	if (direction) {
-		if (flag_align) {
-			bfin_write_MDMA_D0_X_MODIFY(-2);
-			bfin_write_MDMA_S0_X_MODIFY(-2);
-			if (flag_2D) {
-				bfin_write_MDMA_D0_Y_MODIFY(-2);
-				bfin_write_MDMA_S0_Y_MODIFY(-2);
-			}
-		} else {
-			bfin_write_MDMA_D0_X_MODIFY(-1);
-			bfin_write_MDMA_S0_X_MODIFY(-1);
-			if (flag_2D) {
-				bfin_write_MDMA_D0_Y_MODIFY(-1);
-				bfin_write_MDMA_S0_Y_MODIFY(-1);
-			}
-		}
-	} else {
-		if (flag_align) {
-			bfin_write_MDMA_D0_X_MODIFY(2);
-			bfin_write_MDMA_S0_X_MODIFY(2);
-			if (flag_2D) {
-				bfin_write_MDMA_D0_Y_MODIFY(2);
-				bfin_write_MDMA_S0_Y_MODIFY(2);
-			}
-		} else {
-			bfin_write_MDMA_D0_X_MODIFY(1);
-			bfin_write_MDMA_S0_X_MODIFY(1);
-			if (flag_2D) {
-				bfin_write_MDMA_D0_Y_MODIFY(1);
-				bfin_write_MDMA_S0_Y_MODIFY(1);
-			}
-		}
-	}
-
-	/* Enable source DMA */
-	if (flag_2D) {
-		if (flag_align) {
-			bfin_write_MDMA_S0_CONFIG(DMAEN | DMA2D | WDSIZE_16);
-			bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | DMA2D | WDSIZE_16);
-		} else {
-			bfin_write_MDMA_S0_CONFIG(DMAEN | DMA2D);
-			bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | DMA2D);
-		}
-	} else {
-		if (flag_align) {
-			bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16);
-			bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16);
-		} else {
-			bfin_write_MDMA_S0_CONFIG(DMAEN);
-			bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN);
-		}
-	}
+	spin_unlock_irqrestore(&mdma_lock, flags);
 
 	SSYNC();
 
 	while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
-		;
+		if (bfin_read_MDMA_S0_CONFIG())
+			continue;
+		else
+			return;
 
-	bfin_write_MDMA_D0_IRQ_STATUS(bfin_read_MDMA_D0_IRQ_STATUS() |
-				      (DMA_DONE | DMA_ERR));
+	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
 
 	bfin_write_MDMA_S0_CONFIG(0);
 	bfin_write_MDMA_D0_CONFIG(0);
-
-	local_irq_restore(flags);
-
-	return dest;
 }
 
-void *dma_memcpy(void *dest, const void *src, size_t size)
+/**
+ *	_dma_memcpy - translate C memcpy settings into MDMA settings
+ *
+ * Handle all the high level steps before we touch the MDMA registers.  So
+ * handle direction, tweaking of sizes, and formatting of addresses.
+ */
+static void *_dma_memcpy(void *pdst, const void *psrc, size_t size)
 {
-	size_t bulk;
-	size_t rest;
-	void * addr;
+	u32 conf, shift;
+	s16 mod;
+	unsigned long dst = (unsigned long)pdst;
+	unsigned long src = (unsigned long)psrc;
 
-	bulk = (size >> 16) << 16;
+	if (size == 0)
+		return NULL;
+
+	if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) {
+		conf = WDSIZE_32;
+		shift = 2;
+	} else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) {
+		conf = WDSIZE_16;
+		shift = 1;
+	} else {
+		conf = WDSIZE_8;
+		shift = 0;
+	}
+
+	/* If the two memory regions have a chance of overlapping, make
+	 * sure the memcpy still works as expected.  Do this by having the
+	 * copy run backwards instead.
+	 */
+	mod = 1 << shift;
+	if (src < dst) {
+		mod *= -1;
+		dst += size + mod;
+		src += size + mod;
+	}
+	size >>= shift;
+
+	if (size > 0x10000)
+		conf |= DMA2D;
+
+	__dma_memcpy(dst, mod, src, mod, size, conf);
+
+	return pdst;
+}
+
+/**
+ *	dma_memcpy - DMA memcpy under mutex lock
+ *
+ * Do not check arguments before starting the DMA memcpy.  Break the transfer
+ * up into two pieces.  The first transfer is in multiples of 64k and the
+ * second transfer is the piece smaller than 64k.
+ */
+void *dma_memcpy(void *pdst, const void *psrc, size_t size)
+{
+	unsigned long dst = (unsigned long)pdst;
+	unsigned long src = (unsigned long)psrc;
+	size_t bulk, rest;
+
+	if (bfin_addr_dcachable(src))
+		blackfin_dcache_flush_range(src, src + size);
+
+	if (bfin_addr_dcachable(dst))
+		blackfin_dcache_invalidate_range(dst, dst + size);
+
+	bulk = size & ~0xffff;
 	rest = size - bulk;
 	if (bulk)
-		__dma_memcpy(dest, src, bulk);
-	addr = __dma_memcpy(dest+bulk, src+bulk, rest);
-	return addr;
+		_dma_memcpy(pdst, psrc, bulk);
+	_dma_memcpy(pdst + bulk, psrc + bulk, rest);
+	return pdst;
 }
 EXPORT_SYMBOL(dma_memcpy);
 
-void *safe_dma_memcpy(void *dest, const void *src, size_t size)
+/**
+ *	safe_dma_memcpy - DMA memcpy w/argument checking
+ *
+ * Verify arguments are safe before heading to dma_memcpy().
+ */
+void *safe_dma_memcpy(void *dst, const void *src, size_t size)
 {
-	void *addr;
-	addr = dma_memcpy(dest, src, size);
-	return addr;
+	if (!access_ok(VERIFY_WRITE, dst, size))
+		return NULL;
+	if (!access_ok(VERIFY_READ, src, size))
+		return NULL;
+	return dma_memcpy(dst, src, size);
 }
 EXPORT_SYMBOL(safe_dma_memcpy);
 
-void dma_outsb(unsigned long addr, const void *buf, unsigned short len)
+static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len,
+                     u16 size, u16 dma_size)
 {
-	unsigned long flags;
-
-	local_irq_save(flags);
-
-	blackfin_dcache_flush_range((unsigned int)buf,
-			 (unsigned int)(buf) + len);
-
-	bfin_write_MDMA_D0_START_ADDR(addr);
-	bfin_write_MDMA_D0_X_COUNT(len);
-	bfin_write_MDMA_D0_X_MODIFY(0);
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_START_ADDR(buf);
-	bfin_write_MDMA_S0_X_COUNT(len);
-	bfin_write_MDMA_S0_X_MODIFY(1);
-	bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_8);
-	bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_8);
-
-	SSYNC();
-
-	while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
-
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(0);
-	bfin_write_MDMA_D0_CONFIG(0);
-	local_irq_restore(flags);
-
+	blackfin_dcache_flush_range(buf, buf + len * size);
+	__dma_memcpy(addr, 0, buf, size, len, dma_size);
 }
-EXPORT_SYMBOL(dma_outsb);
 
-
-void dma_insb(unsigned long addr, void *buf, unsigned short len)
+static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len,
+                    u16 size, u16 dma_size)
 {
-	unsigned long flags;
-
-	blackfin_dcache_invalidate_range((unsigned int)buf,
-			 (unsigned int)(buf) + len);
-
-	local_irq_save(flags);
-	bfin_write_MDMA_D0_START_ADDR(buf);
-	bfin_write_MDMA_D0_X_COUNT(len);
-	bfin_write_MDMA_D0_X_MODIFY(1);
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_START_ADDR(addr);
-	bfin_write_MDMA_S0_X_COUNT(len);
-	bfin_write_MDMA_S0_X_MODIFY(0);
-	bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_8);
-	bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_8);
-
-	SSYNC();
-
-	while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
-
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(0);
-	bfin_write_MDMA_D0_CONFIG(0);
-	local_irq_restore(flags);
-
+	blackfin_dcache_invalidate_range(buf, buf + len * size);
+	__dma_memcpy(buf, size, addr, 0, len, dma_size);
 }
-EXPORT_SYMBOL(dma_insb);
 
-void dma_outsw(unsigned long addr, const void  *buf, unsigned short len)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-
-	blackfin_dcache_flush_range((unsigned int)buf,
-			 (unsigned int)(buf) + len * sizeof(short));
-
-	bfin_write_MDMA_D0_START_ADDR(addr);
-	bfin_write_MDMA_D0_X_COUNT(len);
-	bfin_write_MDMA_D0_X_MODIFY(0);
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_START_ADDR(buf);
-	bfin_write_MDMA_S0_X_COUNT(len);
-	bfin_write_MDMA_S0_X_MODIFY(2);
-	bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16);
-	bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16);
-
-	SSYNC();
-
-	while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
-
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(0);
-	bfin_write_MDMA_D0_CONFIG(0);
-	local_irq_restore(flags);
-
-}
-EXPORT_SYMBOL(dma_outsw);
-
-void dma_insw(unsigned long addr, void *buf, unsigned short len)
-{
-	unsigned long flags;
-
-	blackfin_dcache_invalidate_range((unsigned int)buf,
-			 (unsigned int)(buf) + len * sizeof(short));
-
-	local_irq_save(flags);
-
-	bfin_write_MDMA_D0_START_ADDR(buf);
-	bfin_write_MDMA_D0_X_COUNT(len);
-	bfin_write_MDMA_D0_X_MODIFY(2);
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_START_ADDR(addr);
-	bfin_write_MDMA_S0_X_COUNT(len);
-	bfin_write_MDMA_S0_X_MODIFY(0);
-	bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16);
-	bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16);
-
-	SSYNC();
-
-	while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
-
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(0);
-	bfin_write_MDMA_D0_CONFIG(0);
-	local_irq_restore(flags);
-
-}
-EXPORT_SYMBOL(dma_insw);
-
-void dma_outsl(unsigned long addr, const void *buf, unsigned short len)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-
-	blackfin_dcache_flush_range((unsigned int)buf,
-			 (unsigned int)(buf) + len * sizeof(long));
-
-	bfin_write_MDMA_D0_START_ADDR(addr);
-	bfin_write_MDMA_D0_X_COUNT(len);
-	bfin_write_MDMA_D0_X_MODIFY(0);
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_START_ADDR(buf);
-	bfin_write_MDMA_S0_X_COUNT(len);
-	bfin_write_MDMA_S0_X_MODIFY(4);
-	bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_32);
-	bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_32);
-
-	SSYNC();
-
-	while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
-
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(0);
-	bfin_write_MDMA_D0_CONFIG(0);
-	local_irq_restore(flags);
-
-}
-EXPORT_SYMBOL(dma_outsl);
-
-void dma_insl(unsigned long addr, void *buf, unsigned short len)
-{
-	unsigned long flags;
-
-	blackfin_dcache_invalidate_range((unsigned int)buf,
-			 (unsigned int)(buf) + len * sizeof(long));
-
-	local_irq_save(flags);
-
-	bfin_write_MDMA_D0_START_ADDR(buf);
-	bfin_write_MDMA_D0_X_COUNT(len);
-	bfin_write_MDMA_D0_X_MODIFY(4);
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_START_ADDR(addr);
-	bfin_write_MDMA_S0_X_COUNT(len);
-	bfin_write_MDMA_S0_X_MODIFY(0);
-	bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_32);
-	bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_32);
-
-	SSYNC();
-
-	while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
-
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
-
-	bfin_write_MDMA_S0_CONFIG(0);
-	bfin_write_MDMA_D0_CONFIG(0);
-	local_irq_restore(flags);
-
-}
-EXPORT_SYMBOL(dma_insl);
+#define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \
+void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \
+{ \
+	_dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \
+} \
+EXPORT_SYMBOL(dma_##io##s##bwl)
+MAKE_DMA_IO(out, b, 1,  8, const);
+MAKE_DMA_IO(in,  b, 1,  8, );
+MAKE_DMA_IO(out, w, 2, 16, const);
+MAKE_DMA_IO(in,  w, 2, 16, );
+MAKE_DMA_IO(out, l, 4, 32, const);
+MAKE_DMA_IO(in,  l, 4, 32, );
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index 5c0800a..4c14331 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -119,28 +119,28 @@
 #define AWA_DUMMY_READ(...)  do { } while (0)
 #endif
 
-#ifdef BF533_FAMILY
-static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = {
+#if defined(BF533_FAMILY) || defined(BF538_FAMILY)
+static struct gpio_port_t *gpio_bankb[] = {
 	(struct gpio_port_t *) FIO_FLAG_D,
 };
 #endif
 
-#if defined(BF527_FAMILY) || defined(BF537_FAMILY)
-static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = {
+#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
+static struct gpio_port_t *gpio_bankb[] = {
 	(struct gpio_port_t *) PORTFIO,
 	(struct gpio_port_t *) PORTGIO,
 	(struct gpio_port_t *) PORTHIO,
 };
 
-static unsigned short *port_fer[gpio_bank(MAX_BLACKFIN_GPIOS)] = {
+static unsigned short *port_fer[] = {
 	(unsigned short *) PORTF_FER,
 	(unsigned short *) PORTG_FER,
 	(unsigned short *) PORTH_FER,
 };
 #endif
 
-#ifdef BF527_FAMILY
-static unsigned short *port_mux[gpio_bank(MAX_BLACKFIN_GPIOS)] = {
+#if defined(BF527_FAMILY) || defined(BF518_FAMILY)
+static unsigned short *port_mux[] = {
 	(unsigned short *) PORTF_MUX,
 	(unsigned short *) PORTG_MUX,
 	(unsigned short *) PORTH_MUX,
@@ -155,7 +155,7 @@
 #endif
 
 #ifdef BF561_FAMILY
-static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = {
+static struct gpio_port_t *gpio_bankb[] = {
 	(struct gpio_port_t *) FIO0_FLAG_D,
 	(struct gpio_port_t *) FIO1_FLAG_D,
 	(struct gpio_port_t *) FIO2_FLAG_D,
@@ -163,7 +163,7 @@
 #endif
 
 #ifdef BF548_FAMILY
-static struct gpio_port_t *gpio_array[gpio_bank(MAX_BLACKFIN_GPIOS)] = {
+static struct gpio_port_t *gpio_array[] = {
 	(struct gpio_port_t *)PORTA_FER,
 	(struct gpio_port_t *)PORTB_FER,
 	(struct gpio_port_t *)PORTC_FER,
@@ -177,8 +177,9 @@
 };
 #endif
 
-static unsigned short reserved_gpio_map[gpio_bank(MAX_BLACKFIN_GPIOS)];
+static unsigned short reserved_gpio_map[GPIO_BANK_NUM];
 static unsigned short reserved_peri_map[gpio_bank(MAX_RESOURCES)];
+static unsigned short reserved_gpio_irq_map[GPIO_BANK_NUM];
 
 #define RESOURCE_LABEL_SIZE 	16
 
@@ -188,48 +189,46 @@
 
 #if defined(CONFIG_PM)
 #if defined(CONFIG_BF54x)
-static struct gpio_port_s gpio_bank_saved[gpio_bank(MAX_BLACKFIN_GPIOS)];
+static struct gpio_port_s gpio_bank_saved[GPIO_BANK_NUM];
 #else
-static unsigned short wakeup_map[gpio_bank(MAX_BLACKFIN_GPIOS)];
+static unsigned short wakeup_map[GPIO_BANK_NUM];
 static unsigned char wakeup_flags_map[MAX_BLACKFIN_GPIOS];
-static struct gpio_port_s gpio_bank_saved[gpio_bank(MAX_BLACKFIN_GPIOS)];
+static struct gpio_port_s gpio_bank_saved[GPIO_BANK_NUM];
 
 #ifdef BF533_FAMILY
-static unsigned int sic_iwr_irqs[gpio_bank(MAX_BLACKFIN_GPIOS)] = {IRQ_PROG_INTB};
+static unsigned int sic_iwr_irqs[] = {IRQ_PROG_INTB};
 #endif
 
 #ifdef BF537_FAMILY
-static unsigned int sic_iwr_irqs[gpio_bank(MAX_BLACKFIN_GPIOS)] = {IRQ_PROG_INTB, IRQ_PORTG_INTB, IRQ_MAC_TX};
+static unsigned int sic_iwr_irqs[] = {IRQ_PROG_INTB, IRQ_PORTG_INTB, IRQ_MAC_TX};
 #endif
 
-#ifdef BF527_FAMILY
-static unsigned int sic_iwr_irqs[gpio_bank(MAX_BLACKFIN_GPIOS)] = {IRQ_PORTF_INTB, IRQ_PORTG_INTB, IRQ_PORTH_INTB};
+#ifdef BF538_FAMILY
+static unsigned int sic_iwr_irqs[] = {IRQ_PORTF_INTB};
+#endif
+
+#if defined(BF527_FAMILY) || defined(BF518_FAMILY)
+static unsigned int sic_iwr_irqs[] = {IRQ_PORTF_INTB, IRQ_PORTG_INTB, IRQ_PORTH_INTB};
 #endif
 
 #ifdef BF561_FAMILY
-static unsigned int sic_iwr_irqs[gpio_bank(MAX_BLACKFIN_GPIOS)] = {IRQ_PROG0_INTB, IRQ_PROG1_INTB, IRQ_PROG2_INTB};
+static unsigned int sic_iwr_irqs[] = {IRQ_PROG0_INTB, IRQ_PROG1_INTB, IRQ_PROG2_INTB};
 #endif
 #endif
 #endif /* CONFIG_PM */
 
-#if defined(BF548_FAMILY)
 inline int check_gpio(unsigned gpio)
 {
+#if defined(BF548_FAMILY)
 	if (gpio == GPIO_PB15 || gpio == GPIO_PC14 || gpio == GPIO_PC15
 	    || gpio == GPIO_PH14 || gpio == GPIO_PH15
-	    || gpio == GPIO_PJ14 || gpio == GPIO_PJ15
-	    || gpio >= MAX_BLACKFIN_GPIOS)
+	    || gpio == GPIO_PJ14 || gpio == GPIO_PJ15)
 		return -EINVAL;
-	return 0;
-}
-#else
-inline int check_gpio(unsigned gpio)
-{
+#endif
 	if (gpio >= MAX_BLACKFIN_GPIOS)
 		return -EINVAL;
 	return 0;
 }
-#endif
 
 static void gpio_error(unsigned gpio)
 {
@@ -258,35 +257,30 @@
 	}
 
 	if (label)
-		return strncmp(str_ident[ident].name,
-				 label, strlen(label));
+		return strcmp(str_ident[ident].name, label);
 	else
 		return -EINVAL;
 }
 
-#if defined(BF527_FAMILY) || defined(BF537_FAMILY)
 static void port_setup(unsigned gpio, unsigned short usage)
 {
-	if (!check_gpio(gpio)) {
-		if (usage == GPIO_USAGE)
-			*port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio);
-		else
-			*port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
-		SSYNC();
-	}
-}
+	if (check_gpio(gpio))
+		return;
+
+#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
+	if (usage == GPIO_USAGE)
+		*port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio);
+	else
+		*port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
+	SSYNC();
 #elif defined(BF548_FAMILY)
-static void port_setup(unsigned gpio, unsigned short usage)
-{
 	if (usage == GPIO_USAGE)
 		gpio_array[gpio_bank(gpio)]->port_fer &= ~gpio_bit(gpio);
 	else
 		gpio_array[gpio_bank(gpio)]->port_fer |= gpio_bit(gpio);
 	SSYNC();
-}
-#else
-# define port_setup(...)  do { } while (0)
 #endif
+}
 
 #ifdef BF537_FAMILY
 static struct {
@@ -379,7 +373,7 @@
 
 	return (pmux >> (2 * gpio_sub_n(portno)) & 0x3);
 }
-#elif defined(BF527_FAMILY)
+#elif defined(BF527_FAMILY) || defined(BF518_FAMILY)
 inline void portmux_setup(unsigned short portno, unsigned short function)
 {
 	u16 pmux, ident = P_IDENT(portno);
@@ -428,13 +422,13 @@
 void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
 { \
 	unsigned long flags; \
-	local_irq_save(flags); \
+	local_irq_save_hw(flags); \
 	if (arg) \
 		gpio_bankb[gpio_bank(gpio)]->name |= gpio_bit(gpio); \
 	else \
 		gpio_bankb[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \
 	AWA_DUMMY_READ(name); \
-	local_irq_restore(flags); \
+	local_irq_restore_hw(flags); \
 } \
 EXPORT_SYMBOL(set_gpio_ ## name);
 
@@ -450,13 +444,13 @@
 void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
 { \
 	unsigned long flags; \
-	local_irq_save(flags); \
+	local_irq_save_hw(flags); \
 	if (arg) \
 		gpio_bankb[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \
 	else \
 		gpio_bankb[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \
 	AWA_DUMMY_READ(name); \
-	local_irq_restore(flags); \
+	local_irq_restore_hw(flags); \
 } \
 EXPORT_SYMBOL(set_gpio_ ## name);
 #else
@@ -479,10 +473,10 @@
 void set_gpio_toggle(unsigned gpio)
 {
 	unsigned long flags;
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio);
 	AWA_DUMMY_READ(toggle);
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 }
 #else
 void set_gpio_toggle(unsigned gpio)
@@ -500,10 +494,10 @@
 void set_gpiop_ ## name(unsigned gpio, unsigned short arg) \
 { \
 	unsigned long flags; \
-	local_irq_save(flags); \
+	local_irq_save_hw(flags); \
 	gpio_bankb[gpio_bank(gpio)]->name = arg; \
 	AWA_DUMMY_READ(name); \
-	local_irq_restore(flags); \
+	local_irq_restore_hw(flags); \
 } \
 EXPORT_SYMBOL(set_gpiop_ ## name);
 #else
@@ -531,10 +525,10 @@
 { \
 	unsigned long flags; \
 	unsigned short ret; \
-	local_irq_save(flags); \
+	local_irq_save_hw(flags); \
 	ret = 0x01 & (gpio_bankb[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \
 	AWA_DUMMY_READ(name); \
-	local_irq_restore(flags); \
+	local_irq_restore_hw(flags); \
 	return ret; \
 } \
 EXPORT_SYMBOL(get_gpio_ ## name);
@@ -564,10 +558,10 @@
 { \
 	unsigned long flags; \
 	unsigned short ret; \
-	local_irq_save(flags); \
+	local_irq_save_hw(flags); \
 	ret = (gpio_bankb[gpio_bank(gpio)]->name); \
 	AWA_DUMMY_READ(name); \
-	local_irq_restore(flags); \
+	local_irq_restore_hw(flags); \
 	return ret; \
 } \
 EXPORT_SYMBOL(get_gpiop_ ## name);
@@ -617,10 +611,10 @@
 	if ((check_gpio(gpio) < 0) || !type)
 		return -EINVAL;
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio);
 	wakeup_flags_map[gpio] = type;
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 
 	return 0;
 }
@@ -633,11 +627,11 @@
 	if (check_gpio(gpio) < 0)
 		return;
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 
 	wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
 
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 }
 EXPORT_SYMBOL(gpio_pm_wakeup_free);
 
@@ -679,7 +673,7 @@
 		gpio_bankb[bank]->maskb = 0;
 
 		if (mask) {
-#if defined(BF527_FAMILY) || defined(BF537_FAMILY)
+#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
 			gpio_bank_saved[bank].fer   = *port_fer[bank];
 #endif
 			gpio_bank_saved[bank].inen  = gpio_bankb[bank]->inen;
@@ -724,7 +718,7 @@
 		bank = gpio_bank(i);
 
 		if (mask) {
-#if defined(BF527_FAMILY) || defined(BF537_FAMILY)
+#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
 			*port_fer[bank]   	= gpio_bank_saved[bank].fer;
 #endif
 			gpio_bankb[bank]->inen  = gpio_bank_saved[bank].inen;
@@ -750,9 +744,9 @@
 	for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
 		bank = gpio_bank(i);
 
-#if defined(BF527_FAMILY) || defined(BF537_FAMILY)
+#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
 			gpio_bank_saved[bank].fer   = *port_fer[bank];
-#ifdef BF527_FAMILY
+#if defined(BF527_FAMILY) || defined(BF518_FAMILY)
 			gpio_bank_saved[bank].mux   = *port_mux[bank];
 #else
 			if (bank == 0)
@@ -778,8 +772,8 @@
 	for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
 			bank = gpio_bank(i);
 
-#if defined(BF527_FAMILY) || defined(BF537_FAMILY)
-#ifdef BF527_FAMILY
+#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
+#if defined(BF527_FAMILY) || defined(BF518_FAMILY)
 			*port_mux[bank] = gpio_bank_saved[bank].mux;
 #else
 			if (bank == 0)
@@ -873,7 +867,6 @@
 * MODIFICATION HISTORY :
 **************************************************************/
 
-#ifdef BF548_FAMILY
 int peripheral_request(unsigned short per, const char *label)
 {
 	unsigned long flags;
@@ -889,31 +882,35 @@
 	if (!(per & P_DEFINED))
 		return -ENODEV;
 
-	if (check_gpio(ident) < 0)
-		return -EINVAL;
+	local_irq_save_hw(flags);
 
-	local_irq_save(flags);
-
-	if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) {
+	/* If a pin can be muxed as either GPIO or peripheral, make
+	 * sure it is not already a GPIO pin when we request it.
+	 */
+	if (unlikely(!check_gpio(ident) &&
+	    reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) {
 		dump_stack();
 		printk(KERN_ERR
-		    "%s: Peripheral %d is already reserved as GPIO by %s !\n",
+		       "%s: Peripheral %d is already reserved as GPIO by %s !\n",
 		       __func__, ident, get_label(ident));
-		local_irq_restore(flags);
+		local_irq_restore_hw(flags);
 		return -EBUSY;
 	}
 
 	if (unlikely(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident))) {
 
-		u16 funct = get_portmux(ident);
-
 		/*
 		 * Pin functions like AMC address strobes my
 		 * be requested and used by several drivers
 		 */
 
-		if (!((per & P_MAYSHARE) && (funct == P_FUNCT2MUX(per)))) {
+#ifdef BF548_FAMILY
+		u16 funct = get_portmux(ident);
 
+		if (!((per & P_MAYSHARE) && (funct == P_FUNCT2MUX(per)))) {
+#else
+		if (!(per & P_MAYSHARE)) {
+#endif
 			/*
 			 * Allow that the identical pin function can
 			 * be requested from the same driver twice
@@ -926,7 +923,7 @@
 			printk(KERN_ERR
 			       "%s: Peripheral %d function %d is already reserved by %s !\n",
 			       __func__, ident, P_FUNCT2MUX(per), get_label(ident));
-			local_irq_restore(flags);
+			local_irq_restore_hw(flags);
 			return -EBUSY;
 		}
 	}
@@ -934,89 +931,19 @@
  anyway:
 	reserved_peri_map[gpio_bank(ident)] |= gpio_bit(ident);
 
+#ifdef BF548_FAMILY
 	portmux_setup(ident, P_FUNCT2MUX(per));
-	port_setup(ident, PERIPHERAL_USAGE);
-
-	local_irq_restore(flags);
-	set_label(ident, label);
-
-	return 0;
-}
-EXPORT_SYMBOL(peripheral_request);
 #else
-
-int peripheral_request(unsigned short per, const char *label)
-{
-	unsigned long flags;
-	unsigned short ident = P_IDENT(per);
-
-	/*
-	 * Don't cares are pins with only one dedicated function
-	 */
-
-	if (per & P_DONTCARE)
-		return 0;
-
-	if (!(per & P_DEFINED))
-		return -ENODEV;
-
-	local_irq_save(flags);
-
-	if (!check_gpio(ident)) {
-
-		if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) {
-			dump_stack();
-			printk(KERN_ERR
-			       "%s: Peripheral %d is already reserved as GPIO by %s !\n",
-			       __func__, ident, get_label(ident));
-			local_irq_restore(flags);
-			return -EBUSY;
-		}
-
-	}
-
-	if (unlikely(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident))) {
-
-		/*
-		 * Pin functions like AMC address strobes my
-		 * be requested and used by several drivers
-		 */
-
-		if (!(per & P_MAYSHARE)) {
-
-			/*
-			 * Allow that the identical pin function can
-			 * be requested from the same driver twice
-			 */
-
-			if (cmp_label(ident, label) == 0)
-				goto anyway;
-
-			dump_stack();
-			printk(KERN_ERR
-			       "%s: Peripheral %d function %d is already"
-			       " reserved by %s !\n",
-			       __func__, ident, P_FUNCT2MUX(per),
-				get_label(ident));
-			local_irq_restore(flags);
-			return -EBUSY;
-		}
-
-	}
-
- anyway:
 	portmux_setup(per, P_FUNCT2MUX(per));
-
+#endif
 	port_setup(ident, PERIPHERAL_USAGE);
 
-	reserved_peri_map[gpio_bank(ident)] |= gpio_bit(ident);
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 	set_label(ident, label);
 
 	return 0;
 }
 EXPORT_SYMBOL(peripheral_request);
-#endif
 
 int peripheral_request_list(const unsigned short per[], const char *label)
 {
@@ -1053,10 +980,10 @@
 	if (check_gpio(ident) < 0)
 		return;
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 
 	if (unlikely(!(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident)))) {
-		local_irq_restore(flags);
+		local_irq_restore_hw(flags);
 		return;
 	}
 
@@ -1067,7 +994,7 @@
 
 	set_label(ident, "free");
 
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 }
 EXPORT_SYMBOL(peripheral_free);
 
@@ -1094,14 +1021,14 @@
 * MODIFICATION HISTORY :
 **************************************************************/
 
-int gpio_request(unsigned gpio, const char *label)
+int bfin_gpio_request(unsigned gpio, const char *label)
 {
 	unsigned long flags;
 
 	if (check_gpio(gpio) < 0)
 		return -EINVAL;
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 
 	/*
 	 * Allow that the identical GPIO can
@@ -1110,15 +1037,15 @@
 	 */
 
 	if (cmp_label(gpio, label) == 0) {
-		local_irq_restore(flags);
+		local_irq_restore_hw(flags);
 		return 0;
 	}
 
 	if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
 		dump_stack();
 		printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
-			 gpio, get_label(gpio));
-		local_irq_restore(flags);
+		       gpio, get_label(gpio));
+		local_irq_restore_hw(flags);
 		return -EBUSY;
 	}
 	if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
@@ -1126,34 +1053,37 @@
 		printk(KERN_ERR
 		       "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
 		       gpio, get_label(gpio));
-		local_irq_restore(flags);
+		local_irq_restore_hw(flags);
 		return -EBUSY;
 	}
+	if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio)))
+		printk(KERN_NOTICE "bfin-gpio: GPIO %d is already reserved as gpio-irq!"
+		       " (Documentation/blackfin/bfin-gpio-notes.txt)\n", gpio);
 
 	reserved_gpio_map[gpio_bank(gpio)] |= gpio_bit(gpio);
+	set_label(gpio, label);
 
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 
 	port_setup(gpio, GPIO_USAGE);
-	set_label(gpio, label);
 
 	return 0;
 }
-EXPORT_SYMBOL(gpio_request);
+EXPORT_SYMBOL(bfin_gpio_request);
 
-void gpio_free(unsigned gpio)
+void bfin_gpio_free(unsigned gpio)
 {
 	unsigned long flags;
 
 	if (check_gpio(gpio) < 0)
 		return;
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 
 	if (unlikely(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) {
 		dump_stack();
 		gpio_error(gpio);
-		local_irq_restore(flags);
+		local_irq_restore_hw(flags);
 		return;
 	}
 
@@ -1161,13 +1091,76 @@
 
 	set_label(gpio, "free");
 
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 }
-EXPORT_SYMBOL(gpio_free);
+EXPORT_SYMBOL(bfin_gpio_free);
+
+int bfin_gpio_irq_request(unsigned gpio, const char *label)
+{
+	unsigned long flags;
+
+	if (check_gpio(gpio) < 0)
+		return -EINVAL;
+
+	local_irq_save_hw(flags);
+
+	if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
+		dump_stack();
+		printk(KERN_ERR
+		       "bfin-gpio: GPIO %d is already reserved as gpio-irq !\n",
+		       gpio);
+		local_irq_restore_hw(flags);
+		return -EBUSY;
+	}
+	if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
+		dump_stack();
+		printk(KERN_ERR
+		       "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
+		       gpio, get_label(gpio));
+		local_irq_restore_hw(flags);
+		return -EBUSY;
+	}
+	if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))
+		printk(KERN_NOTICE "bfin-gpio: GPIO %d is already reserved by %s! "
+		       "(Documentation/blackfin/bfin-gpio-notes.txt)\n",
+		       gpio, get_label(gpio));
+
+	reserved_gpio_irq_map[gpio_bank(gpio)] |= gpio_bit(gpio);
+	set_label(gpio, label);
+
+	local_irq_restore_hw(flags);
+
+	port_setup(gpio, GPIO_USAGE);
+
+	return 0;
+}
+
+void bfin_gpio_irq_free(unsigned gpio)
+{
+	unsigned long flags;
+
+	if (check_gpio(gpio) < 0)
+		return;
+
+	local_irq_save_hw(flags);
+
+	if (unlikely(!(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio)))) {
+		dump_stack();
+		gpio_error(gpio);
+		local_irq_restore_hw(flags);
+		return;
+	}
+
+	reserved_gpio_irq_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
+
+	set_label(gpio, "free");
+
+	local_irq_restore_hw(flags);
+}
 
 
 #ifdef BF548_FAMILY
-int gpio_direction_input(unsigned gpio)
+int bfin_gpio_direction_input(unsigned gpio)
 {
 	unsigned long flags;
 
@@ -1176,16 +1169,16 @@
 		return -EINVAL;
 	}
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio);
 	gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio);
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 
 	return 0;
 }
-EXPORT_SYMBOL(gpio_direction_input);
+EXPORT_SYMBOL(bfin_gpio_direction_input);
 
-int gpio_direction_output(unsigned gpio, int value)
+int bfin_gpio_direction_output(unsigned gpio, int value)
 {
 	unsigned long flags;
 
@@ -1194,30 +1187,30 @@
 		return -EINVAL;
 	}
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	gpio_array[gpio_bank(gpio)]->port_inen &= ~gpio_bit(gpio);
 	gpio_set_value(gpio, value);
 	gpio_array[gpio_bank(gpio)]->port_dir_set = gpio_bit(gpio);
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 
 	return 0;
 }
-EXPORT_SYMBOL(gpio_direction_output);
+EXPORT_SYMBOL(bfin_gpio_direction_output);
 
-void gpio_set_value(unsigned gpio, int arg)
+void bfin_gpio_set_value(unsigned gpio, int arg)
 {
 	if (arg)
 		gpio_array[gpio_bank(gpio)]->port_set = gpio_bit(gpio);
 	else
 		gpio_array[gpio_bank(gpio)]->port_clear = gpio_bit(gpio);
 }
-EXPORT_SYMBOL(gpio_set_value);
+EXPORT_SYMBOL(bfin_gpio_set_value);
 
-int gpio_get_value(unsigned gpio)
+int bfin_gpio_get_value(unsigned gpio)
 {
 	return (1 & (gpio_array[gpio_bank(gpio)]->port_data >> gpio_sub_n(gpio)));
 }
-EXPORT_SYMBOL(gpio_get_value);
+EXPORT_SYMBOL(bfin_gpio_get_value);
 
 void bfin_gpio_irq_prepare(unsigned gpio)
 {
@@ -1225,34 +1218,34 @@
 
 	port_setup(gpio, GPIO_USAGE);
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio);
 	gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio);
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 }
 
 #else
 
-int gpio_get_value(unsigned gpio)
+int bfin_gpio_get_value(unsigned gpio)
 {
 	unsigned long flags;
 	int ret;
 
 	if (unlikely(get_gpio_edge(gpio))) {
-		local_irq_save(flags);
+		local_irq_save_hw(flags);
 		set_gpio_edge(gpio, 0);
 		ret = get_gpio_data(gpio);
 		set_gpio_edge(gpio, 1);
-		local_irq_restore(flags);
+		local_irq_restore_hw(flags);
 
 		return ret;
 	} else
 		return get_gpio_data(gpio);
 }
-EXPORT_SYMBOL(gpio_get_value);
+EXPORT_SYMBOL(bfin_gpio_get_value);
 
 
-int gpio_direction_input(unsigned gpio)
+int bfin_gpio_direction_input(unsigned gpio)
 {
 	unsigned long flags;
 
@@ -1261,17 +1254,17 @@
 		return -EINVAL;
 	}
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	gpio_bankb[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio);
 	gpio_bankb[gpio_bank(gpio)]->inen |= gpio_bit(gpio);
 	AWA_DUMMY_READ(inen);
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 
 	return 0;
 }
-EXPORT_SYMBOL(gpio_direction_input);
+EXPORT_SYMBOL(bfin_gpio_direction_input);
 
-int gpio_direction_output(unsigned gpio, int value)
+int bfin_gpio_direction_output(unsigned gpio, int value)
 {
 	unsigned long flags;
 
@@ -1280,7 +1273,7 @@
 		return -EINVAL;
 	}
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	gpio_bankb[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio);
 
 	if (value)
@@ -1290,11 +1283,11 @@
 
 	gpio_bankb[gpio_bank(gpio)]->dir |= gpio_bit(gpio);
 	AWA_DUMMY_READ(dir);
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 
 	return 0;
 }
-EXPORT_SYMBOL(gpio_direction_output);
+EXPORT_SYMBOL(bfin_gpio_direction_output);
 
 /* If we are booting from SPI and our board lacks a strong enough pull up,
  * the core can reset and execute the bootrom faster than the resistor can
@@ -1327,14 +1320,17 @@
 static int gpio_proc_read(char *buf, char **start, off_t offset,
 			  int len, int *unused_i, void *unused_v)
 {
-	int c, outlen = 0;
+	int c, irq, gpio, outlen = 0;
 
 	for (c = 0; c < MAX_RESOURCES; c++) {
-		if (!check_gpio(c) && (reserved_gpio_map[gpio_bank(c)] & gpio_bit(c)))
-			len = sprintf(buf, "GPIO_%d: %s \t\tGPIO %s\n", c,
-				 get_label(c), get_gpio_dir(c) ? "OUTPUT" : "INPUT");
+		irq = reserved_gpio_irq_map[gpio_bank(c)] & gpio_bit(c);
+		gpio = reserved_gpio_map[gpio_bank(c)] & gpio_bit(c);
+		if (!check_gpio(c) && (gpio || irq))
+			len = sprintf(buf, "GPIO_%d: \t%s%s \t\tGPIO %s\n", c,
+				 get_label(c), (gpio && irq) ? " *" : "",
+				 get_gpio_dir(c) ? "OUTPUT" : "INPUT");
 		else if (reserved_peri_map[gpio_bank(c)] & gpio_bit(c))
-			len = sprintf(buf, "GPIO_%d: %s \t\tPeripheral\n", c, get_label(c));
+			len = sprintf(buf, "GPIO_%d: \t%s \t\tPeripheral\n", c, get_label(c));
 		else
 			continue;
 		buf += len;
@@ -1354,3 +1350,57 @@
 }
 __initcall(gpio_register_proc);
 #endif
+
+#ifdef CONFIG_GPIOLIB
+int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+	return bfin_gpio_direction_input(gpio);
+}
+
+int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level)
+{
+	return bfin_gpio_direction_output(gpio, level);
+}
+
+int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+	return bfin_gpio_get_value(gpio);
+}
+
+void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value)
+{
+#ifdef BF548_FAMILY
+	return bfin_gpio_set_value(gpio, value);
+#else
+	return set_gpio_data(gpio, value);
+#endif
+}
+
+int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+	return bfin_gpio_request(gpio, chip->label);
+}
+
+void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+	return bfin_gpio_free(gpio);
+}
+
+static struct gpio_chip bfin_chip = {
+	.label			= "Blackfin-GPIOlib",
+	.direction_input	= bfin_gpiolib_direction_input,
+	.get			= bfin_gpiolib_get_value,
+	.direction_output	= bfin_gpiolib_direction_output,
+	.set			= bfin_gpiolib_set_value,
+	.request		= bfin_gpiolib_gpio_request,
+	.free			= bfin_gpiolib_gpio_free,
+	.base			= 0,
+	.ngpio			= MAX_BLACKFIN_GPIOS,
+};
+
+static int __init bfin_gpiolib_setup(void)
+{
+	return gpiochip_add(&bfin_chip);
+}
+arch_initcall(bfin_gpiolib_setup);
+#endif
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 4367330..01f917d 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -1,52 +1,25 @@
 /*
- * File:         arch/blackfin/kernel/bfin_ksyms.c
- * Based on:     none - original work
- * Author:
+ * arch/blackfin/kernel/bfin_ksyms.c - exports for random symbols
  *
- * Created:
- * Description:
+ * Copyright 2004-2008 Analog Devices Inc.
  *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ * Licensed under the GPL-2 or later.
  */
 
 #include <linux/module.h>
-#include <linux/irq.h>
 #include <linux/uaccess.h>
 
-#include <asm/checksum.h>
 #include <asm/cacheflush.h>
 
-/* platform dependent support */
-
-EXPORT_SYMBOL(__ioremap);
-
-EXPORT_SYMBOL(ip_fast_csum);
-
-EXPORT_SYMBOL(kernel_thread);
-
-EXPORT_SYMBOL(is_in_rom);
+/* Allow people to have their own Blackfin exception handler in a module */
 EXPORT_SYMBOL(bfin_return_from_exception);
 
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy);
+/* All the Blackfin cache functions: mach-common/cache.S */
+EXPORT_SYMBOL(blackfin_dcache_invalidate_range);
+EXPORT_SYMBOL(blackfin_icache_dcache_flush_range);
+EXPORT_SYMBOL(blackfin_icache_flush_range);
+EXPORT_SYMBOL(blackfin_dcache_flush_range);
+EXPORT_SYMBOL(blackfin_dflush_page);
 
 /* The following are special because they're not called
  * explicitly (the C compiler generates them).  Fortunately,
@@ -74,8 +47,6 @@
 extern void __muldi3(void);
 extern void __udivsi3(void);
 extern void __umodsi3(void);
-
-/* gcc lib functions */
 EXPORT_SYMBOL(__ashldi3);
 EXPORT_SYMBOL(__ashrdi3);
 EXPORT_SYMBOL(__umulsi3_highpart);
@@ -87,6 +58,7 @@
 EXPORT_SYMBOL(__udivsi3);
 EXPORT_SYMBOL(__umodsi3);
 
+/* Input/output symbols: lib/{in,out}s.S */
 EXPORT_SYMBOL(outsb);
 EXPORT_SYMBOL(insb);
 EXPORT_SYMBOL(outsw);
@@ -96,20 +68,39 @@
 EXPORT_SYMBOL(outsl);
 EXPORT_SYMBOL(insl);
 EXPORT_SYMBOL(insl_16);
-EXPORT_SYMBOL(irq_flags);
-EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(blackfin_dcache_invalidate_range);
-EXPORT_SYMBOL(blackfin_icache_dcache_flush_range);
-EXPORT_SYMBOL(blackfin_icache_flush_range);
-EXPORT_SYMBOL(blackfin_dcache_flush_range);
-EXPORT_SYMBOL(blackfin_dflush_page);
 
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(__init_begin);
-EXPORT_SYMBOL(__init_end);
-EXPORT_SYMBOL(_ebss_l1);
-EXPORT_SYMBOL(_stext_l1);
-EXPORT_SYMBOL(_etext_l1);
-EXPORT_SYMBOL(_sdata_l1);
-EXPORT_SYMBOL(_ebss_b_l1);
-EXPORT_SYMBOL(_sdata_b_l1);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(__raw_atomic_update_asm);
+EXPORT_SYMBOL(__raw_atomic_clear_asm);
+EXPORT_SYMBOL(__raw_atomic_set_asm);
+EXPORT_SYMBOL(__raw_atomic_xor_asm);
+EXPORT_SYMBOL(__raw_atomic_test_asm);
+EXPORT_SYMBOL(__raw_xchg_1_asm);
+EXPORT_SYMBOL(__raw_xchg_2_asm);
+EXPORT_SYMBOL(__raw_xchg_4_asm);
+EXPORT_SYMBOL(__raw_cmpxchg_1_asm);
+EXPORT_SYMBOL(__raw_cmpxchg_2_asm);
+EXPORT_SYMBOL(__raw_cmpxchg_4_asm);
+EXPORT_SYMBOL(__raw_spin_is_locked_asm);
+EXPORT_SYMBOL(__raw_spin_lock_asm);
+EXPORT_SYMBOL(__raw_spin_trylock_asm);
+EXPORT_SYMBOL(__raw_spin_unlock_asm);
+EXPORT_SYMBOL(__raw_read_lock_asm);
+EXPORT_SYMBOL(__raw_read_trylock_asm);
+EXPORT_SYMBOL(__raw_read_unlock_asm);
+EXPORT_SYMBOL(__raw_write_lock_asm);
+EXPORT_SYMBOL(__raw_write_trylock_asm);
+EXPORT_SYMBOL(__raw_write_unlock_asm);
+EXPORT_SYMBOL(__raw_bit_set_asm);
+EXPORT_SYMBOL(__raw_bit_clear_asm);
+EXPORT_SYMBOL(__raw_bit_toggle_asm);
+EXPORT_SYMBOL(__raw_bit_test_asm);
+EXPORT_SYMBOL(__raw_bit_test_set_asm);
+EXPORT_SYMBOL(__raw_bit_test_clear_asm);
+EXPORT_SYMBOL(__raw_bit_test_toggle_asm);
+EXPORT_SYMBOL(__raw_uncached_fetch_asm);
+#ifdef __ARCH_SYNC_CORE_DCACHE
+EXPORT_SYMBOL(__raw_smp_mark_barrier_asm);
+EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
+#endif
+#endif
diff --git a/arch/blackfin/kernel/cplb-mpu/Makefile b/arch/blackfin/kernel/cplb-mpu/Makefile
index 286b693..7d70d3b 100644
--- a/arch/blackfin/kernel/cplb-mpu/Makefile
+++ b/arch/blackfin/kernel/cplb-mpu/Makefile
@@ -4,5 +4,7 @@
 
 obj-y := cplbinit.o cacheinit.o cplbmgr.o
 
-obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
-
+CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
+		    -ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
+		    -ffixed-M0 -ffixed-M1 -ffixed-M2 -ffixed-M3 \
+		    -ffixed-B0 -ffixed-B1 -ffixed-B2 -ffixed-B3
diff --git a/arch/blackfin/kernel/cplb-mpu/cacheinit.c b/arch/blackfin/kernel/cplb-mpu/cacheinit.c
index a8b712a..c6ff947 100644
--- a/arch/blackfin/kernel/cplb-mpu/cacheinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cacheinit.c
@@ -25,7 +25,7 @@
 #include <asm/cplbinit.h>
 
 #if defined(CONFIG_BFIN_ICACHE)
-void __init bfin_icache_init(void)
+void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
 {
 	unsigned long ctrl;
 	int i;
@@ -43,7 +43,7 @@
 #endif
 
 #if defined(CONFIG_BFIN_DCACHE)
-void __init bfin_dcache_init(void)
+void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
 {
 	unsigned long ctrl;
 	int i;
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinfo.c b/arch/blackfin/kernel/cplb-mpu/cplbinfo.c
deleted file mode 100644
index 822beef..0000000
--- a/arch/blackfin/kernel/cplb-mpu/cplbinfo.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * File:         arch/blackfin/mach-common/cplbinfo.c
- * Based on:
- * Author:       Sonic Zhang <sonic.zhang@analog.com>
- *
- * Created:      Jan. 2005
- * Description:  Display CPLB status
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/uaccess.h>
-
-#include <asm/current.h>
-#include <asm/system.h>
-#include <asm/cplb.h>
-#include <asm/cplbinit.h>
-#include <asm/blackfin.h>
-
-static char page_size_string_table[][4] = { "1K", "4K", "1M", "4M" };
-
-static char *cplb_print_entry(char *buf, struct cplb_entry *tbl, int switched)
-{
-	int i;
-	buf += sprintf(buf, "Index\tAddress\t\tData\tSize\tU/RD\tU/WR\tS/WR\tSwitch\n");
-	for (i = 0; i < MAX_CPLBS; i++) {
-		unsigned long data = tbl[i].data;
-		unsigned long addr = tbl[i].addr;
-		if (!(data & CPLB_VALID))
-			continue;
-
-		buf +=
-		    sprintf(buf,
-			    "%d\t0x%08lx\t%06lx\t%s\t%c\t%c\t%c\t%c\n",
-			    i, addr, data,
-			    page_size_string_table[(data & 0x30000) >> 16],
-			    (data & CPLB_USER_RD) ? 'Y' : 'N',
-			    (data & CPLB_USER_WR) ? 'Y' : 'N',
-			    (data & CPLB_SUPV_WR) ? 'Y' : 'N',
-			    i < switched ? 'N' : 'Y');
-	}
-	buf += sprintf(buf, "\n");
-
-	return buf;
-}
-
-int cplbinfo_proc_output(char *buf)
-{
-	char *p;
-
-	p = buf;
-
-	p += sprintf(p, "------------------ CPLB Information ------------------\n\n");
-
-	if (bfin_read_IMEM_CONTROL() & ENICPLB) {
-		p += sprintf(p, "Instruction CPLB entry:\n");
-		p = cplb_print_entry(p, icplb_tbl, first_switched_icplb);
-	} else
-		p += sprintf(p, "Instruction CPLB is disabled.\n\n");
-
-	if (1 || bfin_read_DMEM_CONTROL() & ENDCPLB) {
-		p += sprintf(p, "Data CPLB entry:\n");
-		p = cplb_print_entry(p, dcplb_tbl, first_switched_dcplb);
-	} else
-		p += sprintf(p, "Data CPLB is disabled.\n");
-
-	p += sprintf(p, "ICPLB miss: %d\nICPLB supervisor miss: %d\n",
-		     nr_icplb_miss, nr_icplb_supv_miss);
-	p += sprintf(p, "DCPLB miss: %d\nDCPLB protection fault:%d\n",
-		     nr_dcplb_miss, nr_dcplb_prot);
-	p += sprintf(p, "CPLB flushes: %d\n",
-		     nr_cplb_flush);
-
-	return p - buf;
-}
-
-static int cplbinfo_read_proc(char *page, char **start, off_t off,
-			      int count, int *eof, void *data)
-{
-	int len;
-
-	len = cplbinfo_proc_output(page);
-	if (len <= off + count)
-		*eof = 1;
-	*start = page + off;
-	len -= off;
-	if (len > count)
-		len = count;
-	if (len < 0)
-		len = 0;
-	return len;
-}
-
-static int __init cplbinfo_init(void)
-{
-	struct proc_dir_entry *entry;
-
-	entry = create_proc_entry("cplbinfo", 0, NULL);
-	if (!entry)
-		return -ENOMEM;
-
-	entry->read_proc = cplbinfo_read_proc;
-	entry->data = NULL;
-
-	return 0;
-}
-
-static void __exit cplbinfo_exit(void)
-{
-	remove_proc_entry("cplbinfo", NULL);
-}
-
-module_init(cplbinfo_init);
-module_exit(cplbinfo_exit);
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index 55af729..bdb9584 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -25,18 +25,19 @@
 #include <asm/blackfin.h>
 #include <asm/cplb.h>
 #include <asm/cplbinit.h>
+#include <asm/mem_map.h>
 
 #if ANOMALY_05000263
 # error the MPU will not function safely while Anomaly 05000263 applies
 #endif
 
-struct cplb_entry icplb_tbl[MAX_CPLBS];
-struct cplb_entry dcplb_tbl[MAX_CPLBS];
+struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS];
+struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS];
 
 int first_switched_icplb, first_switched_dcplb;
 int first_mask_dcplb;
 
-void __init generate_cplb_tables(void)
+void __init generate_cplb_tables_cpu(unsigned int cpu)
 {
 	int i_d, i_i;
 	unsigned long addr;
@@ -55,15 +56,16 @@
 	d_cache |= CPLB_L1_AOW | CPLB_WT;
 #endif
 #endif
+
 	i_d = i_i = 0;
 
 	/* Set up the zero page.  */
-	dcplb_tbl[i_d].addr = 0;
-	dcplb_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
+	dcplb_tbl[cpu][i_d].addr = 0;
+	dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
 
 #if 0
-	icplb_tbl[i_i].addr = 0;
-	icplb_tbl[i_i++].data = i_cache | CPLB_USER_RD | PAGE_SIZE_4KB;
+	icplb_tbl[cpu][i_i].addr = 0;
+	icplb_tbl[cpu][i_i++].data = i_cache | CPLB_USER_RD | PAGE_SIZE_4KB;
 #endif
 
 	/* Cover kernel memory with 4M pages.  */
@@ -72,28 +74,28 @@
 	i_data = i_cache | CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4MB;
 
 	for (; addr < memory_start; addr += 4 * 1024 * 1024) {
-		dcplb_tbl[i_d].addr = addr;
-		dcplb_tbl[i_d++].data = d_data;
-		icplb_tbl[i_i].addr = addr;
-		icplb_tbl[i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0);
+		dcplb_tbl[cpu][i_d].addr = addr;
+		dcplb_tbl[cpu][i_d++].data = d_data;
+		icplb_tbl[cpu][i_i].addr = addr;
+		icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0);
 	}
 
 	/* Cover L1 memory.  One 4M area for code and data each is enough.  */
 #if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0
-	dcplb_tbl[i_d].addr = L1_DATA_A_START;
-	dcplb_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
+	dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu);
+	dcplb_tbl[cpu][i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
 #endif
 #if L1_CODE_LENGTH > 0
-	icplb_tbl[i_i].addr = L1_CODE_START;
-	icplb_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
+	icplb_tbl[cpu][i_i].addr = get_l1_code_start_cpu(cpu);
+	icplb_tbl[cpu][i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
 #endif
 
 	/* Cover L2 memory */
 #if L2_LENGTH > 0
-	dcplb_tbl[i_d].addr = L2_START;
-	dcplb_tbl[i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB;
-	icplb_tbl[i_i].addr = L2_START;
-	icplb_tbl[i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB;
+	dcplb_tbl[cpu][i_d].addr = L2_START;
+	dcplb_tbl[cpu][i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB;
+	icplb_tbl[cpu][i_i].addr = L2_START;
+	icplb_tbl[cpu][i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB;
 #endif
 
 	first_mask_dcplb = i_d;
@@ -101,7 +103,11 @@
 	first_switched_icplb = i_i;
 
 	while (i_d < MAX_CPLBS)
-		dcplb_tbl[i_d++].data = 0;
+		dcplb_tbl[cpu][i_d++].data = 0;
 	while (i_i < MAX_CPLBS)
-		icplb_tbl[i_i++].data = 0;
+		icplb_tbl[cpu][i_i++].data = 0;
+}
+
+void generate_cplb_tables_all(void)
+{
 }
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index baa52e2..87463ce 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -25,15 +25,21 @@
 #include <asm/cplbinit.h>
 #include <asm/mmu_context.h>
 
-#define FAULT_RW	(1 << 16)
-#define FAULT_USERSUPV	(1 << 17)
+/*
+ * WARNING
+ *
+ * This file is compiled with certain -ffixed-reg options.  We have to
+ * make sure not to call any functions here that could clobber these
+ * registers.
+ */
 
 int page_mask_nelts;
 int page_mask_order;
-unsigned long *current_rwx_mask;
+unsigned long *current_rwx_mask[NR_CPUS];
 
-int nr_dcplb_miss, nr_icplb_miss, nr_icplb_supv_miss, nr_dcplb_prot;
-int nr_cplb_flush;
+int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
+int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
+int nr_cplb_flush[NR_CPUS];
 
 static inline void disable_dcplb(void)
 {
@@ -98,42 +104,42 @@
 }
 
 /* Counters to implement round-robin replacement.  */
-static int icplb_rr_index, dcplb_rr_index;
+static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
 
 /*
  * Find an ICPLB entry to be evicted and return its index.
  */
-static int evict_one_icplb(void)
+static int evict_one_icplb(unsigned int cpu)
 {
 	int i;
 	for (i = first_switched_icplb; i < MAX_CPLBS; i++)
-		if ((icplb_tbl[i].data & CPLB_VALID) == 0)
+		if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
 			return i;
-	i = first_switched_icplb + icplb_rr_index;
+	i = first_switched_icplb + icplb_rr_index[cpu];
 	if (i >= MAX_CPLBS) {
 		i -= MAX_CPLBS - first_switched_icplb;
-		icplb_rr_index -= MAX_CPLBS - first_switched_icplb;
+		icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
 	}
-	icplb_rr_index++;
+	icplb_rr_index[cpu]++;
 	return i;
 }
 
-static int evict_one_dcplb(void)
+static int evict_one_dcplb(unsigned int cpu)
 {
 	int i;
 	for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
-		if ((dcplb_tbl[i].data & CPLB_VALID) == 0)
+		if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
 			return i;
-	i = first_switched_dcplb + dcplb_rr_index;
+	i = first_switched_dcplb + dcplb_rr_index[cpu];
 	if (i >= MAX_CPLBS) {
 		i -= MAX_CPLBS - first_switched_dcplb;
-		dcplb_rr_index -= MAX_CPLBS - first_switched_dcplb;
+		dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
 	}
-	dcplb_rr_index++;
+	dcplb_rr_index[cpu]++;
 	return i;
 }
 
-static noinline int dcplb_miss(void)
+static noinline int dcplb_miss(unsigned int cpu)
 {
 	unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
 	int status = bfin_read_DCPLB_STATUS();
@@ -141,7 +147,7 @@
 	int idx;
 	unsigned long d_data;
 
-	nr_dcplb_miss++;
+	nr_dcplb_miss[cpu]++;
 
 	d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
 #ifdef CONFIG_BFIN_DCACHE
@@ -168,25 +174,25 @@
 	} else if (addr >= _ramend) {
 	    d_data |= CPLB_USER_RD | CPLB_USER_WR;
 	} else {
-		mask = current_rwx_mask;
+		mask = current_rwx_mask[cpu];
 		if (mask) {
 			int page = addr >> PAGE_SHIFT;
-			int offs = page >> 5;
+			int idx = page >> 5;
 			int bit = 1 << (page & 31);
 
-			if (mask[offs] & bit)
+			if (mask[idx] & bit)
 				d_data |= CPLB_USER_RD;
 
 			mask += page_mask_nelts;
-			if (mask[offs] & bit)
+			if (mask[idx] & bit)
 				d_data |= CPLB_USER_WR;
 		}
 	}
-	idx = evict_one_dcplb();
+	idx = evict_one_dcplb(cpu);
 
 	addr &= PAGE_MASK;
-	dcplb_tbl[idx].addr = addr;
-	dcplb_tbl[idx].data = d_data;
+	dcplb_tbl[cpu][idx].addr = addr;
+	dcplb_tbl[cpu][idx].data = d_data;
 
 	disable_dcplb();
 	bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
@@ -196,21 +202,21 @@
 	return 0;
 }
 
-static noinline int icplb_miss(void)
+static noinline int icplb_miss(unsigned int cpu)
 {
 	unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
 	int status = bfin_read_ICPLB_STATUS();
 	int idx;
 	unsigned long i_data;
 
-	nr_icplb_miss++;
+	nr_icplb_miss[cpu]++;
 
 	/* If inside the uncached DMA region, fault.  */
 	if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
 		return CPLB_PROT_VIOL;
 
 	if (status & FAULT_USERSUPV)
-		nr_icplb_supv_miss++;
+		nr_icplb_supv_miss[cpu]++;
 
 	/*
 	 * First, try to find a CPLB that matches this address.  If we
@@ -218,8 +224,8 @@
 	 * that the instruction crosses a page boundary.
 	 */
 	for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
-		if (icplb_tbl[idx].data & CPLB_VALID) {
-			unsigned long this_addr = icplb_tbl[idx].addr;
+		if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
+			unsigned long this_addr = icplb_tbl[cpu][idx].addr;
 			if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
 				addr += PAGE_SIZE;
 				break;
@@ -257,23 +263,23 @@
 		 * Otherwise, check the x bitmap of the current process.
 		 */
 		if (!(status & FAULT_USERSUPV)) {
-			unsigned long *mask = current_rwx_mask;
+			unsigned long *mask = current_rwx_mask[cpu];
 
 			if (mask) {
 				int page = addr >> PAGE_SHIFT;
-				int offs = page >> 5;
+				int idx = page >> 5;
 				int bit = 1 << (page & 31);
 
 				mask += 2 * page_mask_nelts;
-				if (mask[offs] & bit)
+				if (mask[idx] & bit)
 					i_data |= CPLB_USER_RD;
 			}
 		}
 	}
-	idx = evict_one_icplb();
+	idx = evict_one_icplb(cpu);
 	addr &= PAGE_MASK;
-	icplb_tbl[idx].addr = addr;
-	icplb_tbl[idx].data = i_data;
+	icplb_tbl[cpu][idx].addr = addr;
+	icplb_tbl[cpu][idx].data = i_data;
 
 	disable_icplb();
 	bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
@@ -283,19 +289,19 @@
 	return 0;
 }
 
-static noinline int dcplb_protection_fault(void)
+static noinline int dcplb_protection_fault(unsigned int cpu)
 {
 	int status = bfin_read_DCPLB_STATUS();
 
-	nr_dcplb_prot++;
+	nr_dcplb_prot[cpu]++;
 
 	if (status & FAULT_RW) {
 		int idx = faulting_cplb_index(status);
-		unsigned long data = dcplb_tbl[idx].data;
+		unsigned long data = dcplb_tbl[cpu][idx].data;
 		if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
 		    write_permitted(status, data)) {
 			data |= CPLB_DIRTY;
-			dcplb_tbl[idx].data = data;
+			dcplb_tbl[cpu][idx].data = data;
 			bfin_write32(DCPLB_DATA0 + idx * 4, data);
 			return 0;
 		}
@@ -306,44 +312,45 @@
 int cplb_hdr(int seqstat, struct pt_regs *regs)
 {
 	int cause = seqstat & 0x3f;
+	unsigned int cpu = smp_processor_id();
 	switch (cause) {
 	case 0x23:
-		return dcplb_protection_fault();
+		return dcplb_protection_fault(cpu);
 	case 0x2C:
-		return icplb_miss();
+		return icplb_miss(cpu);
 	case 0x26:
-		return dcplb_miss();
+		return dcplb_miss(cpu);
 	default:
 		return 1;
 	}
 }
 
-void flush_switched_cplbs(void)
+void flush_switched_cplbs(unsigned int cpu)
 {
 	int i;
 	unsigned long flags;
 
-	nr_cplb_flush++;
+	nr_cplb_flush[cpu]++;
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	disable_icplb();
 	for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
-		icplb_tbl[i].data = 0;
+		icplb_tbl[cpu][i].data = 0;
 		bfin_write32(ICPLB_DATA0 + i * 4, 0);
 	}
 	enable_icplb();
 
 	disable_dcplb();
 	for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
-		dcplb_tbl[i].data = 0;
+		dcplb_tbl[cpu][i].data = 0;
 		bfin_write32(DCPLB_DATA0 + i * 4, 0);
 	}
 	enable_dcplb();
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 
 }
 
-void set_mask_dcplbs(unsigned long *masks)
+void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
 {
 	int i;
 	unsigned long addr = (unsigned long)masks;
@@ -351,12 +358,12 @@
 	unsigned long flags;
 
 	if (!masks) {
-		current_rwx_mask = masks;
+		current_rwx_mask[cpu] = masks;
 		return;
 	}
 
-	local_irq_save(flags);
-	current_rwx_mask = masks;
+	local_irq_save_hw(flags);
+	current_rwx_mask[cpu] = masks;
 
 	d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
 #ifdef CONFIG_BFIN_DCACHE
@@ -368,12 +375,12 @@
 
 	disable_dcplb();
 	for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
-		dcplb_tbl[i].addr = addr;
-		dcplb_tbl[i].data = d_data;
+		dcplb_tbl[cpu][i].addr = addr;
+		dcplb_tbl[cpu][i].data = d_data;
 		bfin_write32(DCPLB_DATA0 + i * 4, d_data);
 		bfin_write32(DCPLB_ADDR0 + i * 4, addr);
 		addr += PAGE_SIZE;
 	}
 	enable_dcplb();
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 }
diff --git a/arch/blackfin/kernel/cplb-nompu/Makefile b/arch/blackfin/kernel/cplb-nompu/Makefile
index d36ea9b..7d70d3b 100644
--- a/arch/blackfin/kernel/cplb-nompu/Makefile
+++ b/arch/blackfin/kernel/cplb-nompu/Makefile
@@ -2,7 +2,9 @@
 # arch/blackfin/kernel/cplb-nompu/Makefile
 #
 
-obj-y := cplbinit.o cacheinit.o cplbhdlr.o cplbmgr.o
+obj-y := cplbinit.o cacheinit.o cplbmgr.o
 
-obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
-
+CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
+		    -ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
+		    -ffixed-M0 -ffixed-M1 -ffixed-M2 -ffixed-M3 \
+		    -ffixed-B0 -ffixed-B1 -ffixed-B2 -ffixed-B3
diff --git a/arch/blackfin/kernel/cplb-nompu/cacheinit.c b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
index bd08315..c6ff947 100644
--- a/arch/blackfin/kernel/cplb-nompu/cacheinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
@@ -25,19 +25,15 @@
 #include <asm/cplbinit.h>
 
 #if defined(CONFIG_BFIN_ICACHE)
-void __init bfin_icache_init(void)
+void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
 {
-	unsigned long *table = icplb_table;
 	unsigned long ctrl;
 	int i;
 
+	SSYNC();
 	for (i = 0; i < MAX_CPLBS; i++) {
-		unsigned long addr = *table++;
-		unsigned long data = *table++;
-		if (addr == (unsigned long)-1)
-			break;
-		bfin_write32(ICPLB_ADDR0 + i * 4, addr);
-		bfin_write32(ICPLB_DATA0 + i * 4, data);
+		bfin_write32(ICPLB_ADDR0 + i * 4, icplb_tbl[i].addr);
+		bfin_write32(ICPLB_DATA0 + i * 4, icplb_tbl[i].data);
 	}
 	ctrl = bfin_read_IMEM_CONTROL();
 	ctrl |= IMC | ENICPLB;
@@ -47,20 +43,17 @@
 #endif
 
 #if defined(CONFIG_BFIN_DCACHE)
-void __init bfin_dcache_init(void)
+void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
 {
-	unsigned long *table = dcplb_table;
 	unsigned long ctrl;
 	int i;
 
+	SSYNC();
 	for (i = 0; i < MAX_CPLBS; i++) {
-		unsigned long addr = *table++;
-		unsigned long data = *table++;
-		if (addr == (unsigned long)-1)
-			break;
-		bfin_write32(DCPLB_ADDR0 + i * 4, addr);
-		bfin_write32(DCPLB_DATA0 + i * 4, data);
+		bfin_write32(DCPLB_ADDR0 + i * 4, dcplb_tbl[i].addr);
+		bfin_write32(DCPLB_DATA0 + i * 4, dcplb_tbl[i].data);
 	}
+
 	ctrl = bfin_read_DMEM_CONTROL();
 	ctrl |= DMEM_CNTR;
 	bfin_write_DMEM_CONTROL(ctrl);
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S b/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S
deleted file mode 100644
index ecbabc0..0000000
--- a/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * File:         arch/blackfin/mach-common/cplbhdlr.S
- * Based on:
- * Author:       LG Soft India
- *
- * Created:      ?
- * Description:  CPLB exception handler
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/linkage.h>
-#include <asm/cplb.h>
-#include <asm/entry.h>
-
-#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
-.section .l1.text
-#else
-.text
-#endif
-
-.type _cplb_mgr, STT_FUNC;
-.type _panic_cplb_error, STT_FUNC;
-
-.align 2
-
-ENTRY(__cplb_hdr)
-	R2 = SEQSTAT;
-
-	/* Mask the contents of SEQSTAT and leave only EXCAUSE in R2 */
-	R2 <<= 26;
-	R2 >>= 26;
-
-	R1 = 0x23; /* Data access CPLB protection violation */
-	CC = R2 == R1;
-	IF !CC JUMP .Lnot_data_write;
-	R0 = 2;		/* is a write to data space*/
-	JUMP .Lis_icplb_miss;
-
-.Lnot_data_write:
-	R1 = 0x2C; /* CPLB miss on an instruction fetch */
-	CC = R2 == R1;
-	R0 = 0;		/* is_data_miss == False*/
-	IF CC JUMP .Lis_icplb_miss;
-
-	R1 = 0x26;
-	CC = R2 == R1;
-	IF !CC JUMP .Lunknown;
-
-	R0 = 1;		/* is_data_miss == True*/
-
-.Lis_icplb_miss:
-
-#if defined(CONFIG_BFIN_ICACHE) || defined(CONFIG_BFIN_DCACHE)
-# if defined(CONFIG_BFIN_ICACHE) && !defined(CONFIG_BFIN_DCACHE)
-	R1 = CPLB_ENABLE_ICACHE;
-# endif
-# if !defined(CONFIG_BFIN_ICACHE) && defined(CONFIG_BFIN_DCACHE)
-	R1 = CPLB_ENABLE_DCACHE;
-# endif
-# if defined(CONFIG_BFIN_ICACHE) && defined(CONFIG_BFIN_DCACHE)
-	R1 = CPLB_ENABLE_DCACHE | CPLB_ENABLE_ICACHE;
-# endif
-#else
-	R1 = 0;
-#endif
-
-	[--SP] = RETS;
-	CALL _cplb_mgr;
-	RETS = [SP++];
-	CC = R0 == 0;
-	IF !CC JUMP .Lnot_replaced;
-	RTS;
-
-/*
- * Diagnostic exception handlers
- */
-.Lunknown:
-	R0 = CPLB_UNKNOWN_ERR;
-	JUMP .Lcplb_error;
-
-.Lnot_replaced:
-	CC = R0 == CPLB_NO_UNLOCKED;
-	IF !CC JUMP .Lnext_check;
-	R0 = CPLB_NO_UNLOCKED;
-	JUMP .Lcplb_error;
-
-.Lnext_check:
-	CC = R0 == CPLB_NO_ADDR_MATCH;
-	IF !CC JUMP .Lnext_check2;
-	R0 = CPLB_NO_ADDR_MATCH;
-	JUMP .Lcplb_error;
-
-.Lnext_check2:
-	CC = R0 == CPLB_PROT_VIOL;
-	IF !CC JUMP .Lstrange_return_from_cplb_mgr;
-	R0 = CPLB_PROT_VIOL;
-	JUMP .Lcplb_error;
-
-.Lstrange_return_from_cplb_mgr:
-	IDLE;
-	CSYNC;
-	JUMP .Lstrange_return_from_cplb_mgr;
-
-.Lcplb_error:
-	R1 = sp;
-	SP += -12;
-	call _panic_cplb_error;
-	SP += 12;
-	JUMP.L _handle_bad_cplb;
-
-ENDPROC(__cplb_hdr)
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinfo.c b/arch/blackfin/kernel/cplb-nompu/cplbinfo.c
deleted file mode 100644
index 1e74f0b..0000000
--- a/arch/blackfin/kernel/cplb-nompu/cplbinfo.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * File:         arch/blackfin/mach-common/cplbinfo.c
- * Based on:
- * Author:       Sonic Zhang <sonic.zhang@analog.com>
- *
- * Created:      Jan. 2005
- * Description:  Display CPLB status
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/uaccess.h>
-
-#include <asm/cplbinit.h>
-#include <asm/blackfin.h>
-
-#define CPLB_I 1
-#define CPLB_D 2
-
-#define SYNC_SYS    SSYNC()
-#define SYNC_CORE   CSYNC()
-
-#define CPLB_BIT_PAGESIZE 0x30000
-
-static int page_size_table[4] = {
-	0x00000400,		/* 1K */
-	0x00001000,		/* 4K */
-	0x00100000,		/* 1M */
-	0x00400000		/* 4M */
-};
-
-static char page_size_string_table[][4] = { "1K", "4K", "1M", "4M" };
-
-static int cplb_find_entry(unsigned long *cplb_addr,
-			   unsigned long *cplb_data, unsigned long addr,
-			   unsigned long data)
-{
-	int ii;
-
-	for (ii = 0; ii < 16; ii++)
-		if (addr >= cplb_addr[ii] && addr < cplb_addr[ii] +
-		    page_size_table[(cplb_data[ii] & CPLB_BIT_PAGESIZE) >> 16]
-			&& (cplb_data[ii] == data))
-			return ii;
-
-	return -1;
-}
-
-static char *cplb_print_entry(char *buf, int type)
-{
-	unsigned long *p_addr = dpdt_table;
-	unsigned long *p_data = dpdt_table + 1;
-	unsigned long *p_icount = dpdt_swapcount_table;
-	unsigned long *p_ocount = dpdt_swapcount_table + 1;
-	unsigned long *cplb_addr = (unsigned long *)DCPLB_ADDR0;
-	unsigned long *cplb_data = (unsigned long *)DCPLB_DATA0;
-	int entry = 0, used_cplb = 0;
-
-	if (type == CPLB_I) {
-		buf += sprintf(buf, "Instruction CPLB entry:\n");
-		p_addr = ipdt_table;
-		p_data = ipdt_table + 1;
-		p_icount = ipdt_swapcount_table;
-		p_ocount = ipdt_swapcount_table + 1;
-		cplb_addr = (unsigned long *)ICPLB_ADDR0;
-		cplb_data = (unsigned long *)ICPLB_DATA0;
-	} else
-		buf += sprintf(buf, "Data CPLB entry:\n");
-
-	buf += sprintf(buf, "Address\t\tData\tSize\tValid\tLocked\tSwapin\tiCount\toCount\n");
-
-	while (*p_addr != 0xffffffff) {
-		entry = cplb_find_entry(cplb_addr, cplb_data, *p_addr, *p_data);
-		if (entry >= 0)
-			used_cplb |= 1 << entry;
-
-		buf +=
-		    sprintf(buf,
-			    "0x%08lx\t0x%05lx\t%s\t%c\t%c\t%2d\t%ld\t%ld\n",
-			    *p_addr, *p_data,
-			    page_size_string_table[(*p_data & 0x30000) >> 16],
-			    (*p_data & CPLB_VALID) ? 'Y' : 'N',
-			    (*p_data & CPLB_LOCK) ? 'Y' : 'N', entry, *p_icount,
-			    *p_ocount);
-
-		p_addr += 2;
-		p_data += 2;
-		p_icount += 2;
-		p_ocount += 2;
-	}
-
-	if (used_cplb != 0xffff) {
-		buf += sprintf(buf, "Unused/mismatched CPLBs:\n");
-
-		for (entry = 0; entry < 16; entry++)
-			if (0 == ((1 << entry) & used_cplb)) {
-				int flags = cplb_data[entry];
-				buf +=
-				    sprintf(buf,
-					    "%2d: 0x%08lx\t0x%05x\t%s\t%c\t%c\n",
-					    entry, cplb_addr[entry], flags,
-					    page_size_string_table[(flags &
-								    0x30000) >>
-								   16],
-					    (flags & CPLB_VALID) ? 'Y' : 'N',
-					    (flags & CPLB_LOCK) ? 'Y' : 'N');
-			}
-	}
-
-	buf += sprintf(buf, "\n");
-
-	return buf;
-}
-
-static int cplbinfo_proc_output(char *buf)
-{
-	char *p;
-
-	p = buf;
-
-	p += sprintf(p, "------------------ CPLB Information ------------------\n\n");
-
-	if (bfin_read_IMEM_CONTROL() & ENICPLB)
-		p = cplb_print_entry(p, CPLB_I);
-	else
-		p += sprintf(p, "Instruction CPLB is disabled.\n\n");
-
-	if (bfin_read_DMEM_CONTROL() & ENDCPLB)
-		p = cplb_print_entry(p, CPLB_D);
-	else
-		p += sprintf(p, "Data CPLB is disabled.\n");
-
-	return p - buf;
-}
-
-static int cplbinfo_read_proc(char *page, char **start, off_t off,
-			      int count, int *eof, void *data)
-{
-	int len;
-
-	len = cplbinfo_proc_output(page);
-	if (len <= off + count)
-		*eof = 1;
-	*start = page + off;
-	len -= off;
-	if (len > count)
-		len = count;
-	if (len < 0)
-		len = 0;
-	return len;
-}
-
-static int __init cplbinfo_init(void)
-{
-	struct proc_dir_entry *entry;
-
-	entry = create_proc_entry("cplbinfo", 0, NULL);
-	if (!entry)
-		return -ENOMEM;
-
-	entry->read_proc = cplbinfo_read_proc;
-	entry->data = NULL;
-
-	return 0;
-}
-
-static void __exit cplbinfo_exit(void)
-{
-	remove_proc_entry("cplbinfo", NULL);
-}
-
-module_init(cplbinfo_init);
-module_exit(cplbinfo_exit);
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 2debc90..0e28f75 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -20,445 +20,152 @@
  * to the Free Software Foundation, Inc.,
  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
+
 #include <linux/module.h>
 
 #include <asm/blackfin.h>
 #include <asm/cacheflush.h>
 #include <asm/cplb.h>
 #include <asm/cplbinit.h>
+#include <asm/mem_map.h>
 
-#define CPLB_MEM CONFIG_MAX_MEM_SIZE
+struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
+struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
 
-/*
-* Number of required data CPLB switchtable entries
-* MEMSIZE / 4 (we mostly install 4M page size CPLBs
-* approx 16 for smaller 1MB page size CPLBs for allignment purposes
-* 1 for L1 Data Memory
-* possibly 1 for L2 Data Memory
-* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
-* 1 for ASYNC Memory
-*/
-#define MAX_SWITCH_D_CPLBS (((CPLB_MEM / 4) + 16 + 1 + 1 + 1 \
-				 + ASYNC_MEMORY_CPLB_COVERAGE) * 2)
+int first_switched_icplb PDT_ATTR;
+int first_switched_dcplb PDT_ATTR;
 
-/*
-* Number of required instruction CPLB switchtable entries
-* MEMSIZE / 4 (we mostly install 4M page size CPLBs
-* approx 12 for smaller 1MB page size CPLBs for allignment purposes
-* 1 for L1 Instruction Memory
-* possibly 1 for L2 Instruction Memory
-* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
-*/
-#define MAX_SWITCH_I_CPLBS (((CPLB_MEM / 4) + 12 + 1 + 1 + 1) * 2)
+struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
+struct cplb_boundary icplb_bounds[7] PDT_ATTR;
 
+int icplb_nr_bounds PDT_ATTR;
+int dcplb_nr_bounds PDT_ATTR;
 
-u_long icplb_table[MAX_CPLBS + 1];
-u_long dcplb_table[MAX_CPLBS + 1];
-
-#ifdef CONFIG_CPLB_SWITCH_TAB_L1
-# define PDT_ATTR __attribute__((l1_data))
-#else
-# define PDT_ATTR
-#endif
-
-u_long ipdt_table[MAX_SWITCH_I_CPLBS + 1] PDT_ATTR;
-u_long dpdt_table[MAX_SWITCH_D_CPLBS + 1] PDT_ATTR;
-
-#ifdef CONFIG_CPLB_INFO
-u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS] PDT_ATTR;
-u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS] PDT_ATTR;
-#endif
-
-struct s_cplb {
-	struct cplb_tab init_i;
-	struct cplb_tab init_d;
-	struct cplb_tab switch_i;
-	struct cplb_tab switch_d;
-};
-
-#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
-static struct cplb_desc cplb_data[] = {
-	{
-		.start = 0,
-		.end = SIZE_1K,
-		.psize = SIZE_1K,
-		.attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
-		.i_conf = SDRAM_OOPS,
-		.d_conf = SDRAM_OOPS,
-#if defined(CONFIG_DEBUG_HUNT_FOR_ZERO)
-		.valid = 1,
-#else
-		.valid = 0,
-#endif
-		.name = "Zero Pointer Guard Page",
-	},
-	{
-		.start = L1_CODE_START,
-		.end = L1_CODE_START + L1_CODE_LENGTH,
-		.psize = SIZE_4M,
-		.attr = INITIAL_T | SWITCH_T | I_CPLB,
-		.i_conf = L1_IMEMORY,
-		.d_conf = 0,
-		.valid = 1,
-		.name = "L1 I-Memory",
-	},
-	{
-		.start = L1_DATA_A_START,
-		.end = L1_DATA_B_START + L1_DATA_B_LENGTH,
-		.psize = SIZE_4M,
-		.attr = INITIAL_T | SWITCH_T | D_CPLB,
-		.i_conf = 0,
-		.d_conf = L1_DMEMORY,
-#if ((L1_DATA_A_LENGTH > 0) || (L1_DATA_B_LENGTH > 0))
-		.valid = 1,
-#else
-		.valid = 0,
-#endif
-		.name = "L1 D-Memory",
-	},
-	{
-		.start = 0,
-		.end = 0,  /* dynamic */
-		.psize = 0,
-		.attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
-		.i_conf = SDRAM_IGENERIC,
-		.d_conf = SDRAM_DGENERIC,
-		.valid = 1,
-		.name = "Kernel Memory",
-	},
-	{
-		.start = 0, /* dynamic */
-		.end = 0, /* dynamic */
-		.psize = 0,
-		.attr = INITIAL_T | SWITCH_T | D_CPLB,
-		.i_conf = SDRAM_IGENERIC,
-		.d_conf = SDRAM_DNON_CHBL,
-		.valid = 1,
-		.name = "uClinux MTD Memory",
-	},
-	{
-		.start = 0, /* dynamic */
-		.end = 0,   /* dynamic */
-		.psize = SIZE_1M,
-		.attr = INITIAL_T | SWITCH_T | D_CPLB,
-		.d_conf = SDRAM_DNON_CHBL,
-		.valid = 1,
-		.name = "Uncached DMA Zone",
-	},
-	{
-		.start = 0, /* dynamic */
-		.end = 0, /* dynamic */
-		.psize = 0,
-		.attr = SWITCH_T | D_CPLB,
-		.i_conf = 0, /* dynamic */
-		.d_conf = 0, /* dynamic */
-		.valid = 1,
-		.name = "Reserved Memory",
-	},
-	{
-		.start = ASYNC_BANK0_BASE,
-		.end = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE,
-		.psize = 0,
-		.attr = SWITCH_T | D_CPLB,
-		.d_conf = SDRAM_EBIU,
-		.valid = 1,
-		.name = "Asynchronous Memory Banks",
-	},
-	{
-		.start = L2_START,
-		.end = L2_START + L2_LENGTH,
-		.psize = SIZE_1M,
-		.attr = SWITCH_T | I_CPLB | D_CPLB,
-		.i_conf = L2_IMEMORY,
-		.d_conf = L2_DMEMORY,
-		.valid = (L2_LENGTH > 0),
-		.name = "L2 Memory",
-	},
-	{
-		.start = BOOT_ROM_START,
-		.end = BOOT_ROM_START + BOOT_ROM_LENGTH,
-		.psize = SIZE_1M,
-		.attr = SWITCH_T | I_CPLB | D_CPLB,
-		.i_conf = SDRAM_IGENERIC,
-		.d_conf = SDRAM_DGENERIC,
-		.valid = 1,
-		.name = "On-Chip BootROM",
-	},
-};
-
-static u16 __init lock_kernel_check(u32 start, u32 end)
+void __init generate_cplb_tables_cpu(unsigned int cpu)
 {
-	if (start >= (u32)_end || end <= (u32)_stext)
-		return 0;
+	int i_d, i_i;
+	unsigned long addr;
 
-	/* This cplb block overlapped with kernel area. */
-	return IN_KERNEL;
-}
+	struct cplb_entry *d_tbl = dcplb_tbl[cpu];
+	struct cplb_entry *i_tbl = icplb_tbl[cpu];
 
-static unsigned short __init
-fill_cplbtab(struct cplb_tab *table,
-	     unsigned long start, unsigned long end,
-	     unsigned long block_size, unsigned long cplb_data)
-{
-	int i;
+	printk(KERN_INFO "NOMPU: setting up cplb tables\n");
 
-	switch (block_size) {
-	case SIZE_4M:
-		i = 3;
-		break;
-	case SIZE_1M:
-		i = 2;
-		break;
-	case SIZE_4K:
-		i = 1;
-		break;
-	case SIZE_1K:
-	default:
-		i = 0;
-		break;
+	i_d = i_i = 0;
+
+	/* Set up the zero page.  */
+	d_tbl[i_d].addr = 0;
+	d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
+
+	/* Cover kernel memory with 4M pages.  */
+	addr = 0;
+
+	for (; addr < memory_start; addr += 4 * 1024 * 1024) {
+		d_tbl[i_d].addr = addr;
+		d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
+		i_tbl[i_i].addr = addr;
+		i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
 	}
 
-	cplb_data = (cplb_data & ~(3 << 16)) | (i << 16);
-
-	while ((start < end) && (table->pos < table->size)) {
-
-		table->tab[table->pos++] = start;
-
-		if (lock_kernel_check(start, start + block_size) == IN_KERNEL)
-			table->tab[table->pos++] =
-			    cplb_data | CPLB_LOCK | CPLB_DIRTY;
-		else
-			table->tab[table->pos++] = cplb_data;
-
-		start += block_size;
+	/* Cover L1 memory.  One 4M area for code and data each is enough.  */
+	if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
+		d_tbl[i_d].addr = L1_DATA_A_START;
+		d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
 	}
-	return 0;
+	i_tbl[i_i].addr = L1_CODE_START;
+	i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
+
+	first_switched_dcplb = i_d;
+	first_switched_icplb = i_i;
+
+	BUG_ON(first_switched_dcplb > MAX_CPLBS);
+	BUG_ON(first_switched_icplb > MAX_CPLBS);
+
+	while (i_d < MAX_CPLBS)
+		d_tbl[i_d++].data = 0;
+	while (i_i < MAX_CPLBS)
+		i_tbl[i_i++].data = 0;
 }
 
-static unsigned short __init
-close_cplbtab(struct cplb_tab *table)
+void __init generate_cplb_tables_all(void)
 {
+	int i_d, i_i;
 
-	while (table->pos < table->size) {
-
-		table->tab[table->pos++] = 0;
-		table->tab[table->pos++] = 0; /* !CPLB_VALID */
-	}
-	return 0;
-}
-
-/* helper function */
-static void __init
-__fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
-{
-	if (cplb_data[i].psize) {
-		fill_cplbtab(t,
-				cplb_data[i].start,
-				cplb_data[i].end,
-				cplb_data[i].psize,
-				cplb_data[i].i_conf);
-	} else {
-#if defined(CONFIG_BFIN_ICACHE)
-		if (ANOMALY_05000263 && i == SDRAM_KERN) {
-			fill_cplbtab(t,
-					cplb_data[i].start,
-					cplb_data[i].end,
-					SIZE_4M,
-					cplb_data[i].i_conf);
-		} else
-#endif
-		{
-			fill_cplbtab(t,
-					cplb_data[i].start,
-					a_start,
-					SIZE_1M,
-					cplb_data[i].i_conf);
-			fill_cplbtab(t,
-					a_start,
-					a_end,
-					SIZE_4M,
-					cplb_data[i].i_conf);
-			fill_cplbtab(t, a_end,
-					cplb_data[i].end,
-					SIZE_1M,
-					cplb_data[i].i_conf);
-		}
-	}
-}
-
-static void __init
-__fill_data_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
-{
-	if (cplb_data[i].psize) {
-		fill_cplbtab(t,
-				cplb_data[i].start,
-				cplb_data[i].end,
-				cplb_data[i].psize,
-				cplb_data[i].d_conf);
-	} else {
-		fill_cplbtab(t,
-				cplb_data[i].start,
-				a_start, SIZE_1M,
-				cplb_data[i].d_conf);
-		fill_cplbtab(t, a_start,
-				a_end, SIZE_4M,
-				cplb_data[i].d_conf);
-		fill_cplbtab(t, a_end,
-				cplb_data[i].end,
-				SIZE_1M,
-				cplb_data[i].d_conf);
-	}
-}
-
-void __init generate_cplb_tables(void)
-{
-
-	u16 i, j, process;
-	u32 a_start, a_end, as, ae, as_1m;
-
-	struct cplb_tab *t_i = NULL;
-	struct cplb_tab *t_d = NULL;
-	struct s_cplb cplb;
-
-	printk(KERN_INFO "NOMPU: setting up cplb tables for global access\n");
-
-	cplb.init_i.size = MAX_CPLBS;
-	cplb.init_d.size = MAX_CPLBS;
-	cplb.switch_i.size = MAX_SWITCH_I_CPLBS;
-	cplb.switch_d.size = MAX_SWITCH_D_CPLBS;
-
-	cplb.init_i.pos = 0;
-	cplb.init_d.pos = 0;
-	cplb.switch_i.pos = 0;
-	cplb.switch_d.pos = 0;
-
-	cplb.init_i.tab = icplb_table;
-	cplb.init_d.tab = dcplb_table;
-	cplb.switch_i.tab = ipdt_table;
-	cplb.switch_d.tab = dpdt_table;
-
-	cplb_data[SDRAM_KERN].end = memory_end;
-
+	i_d = 0;
+	/* Normal RAM, including MTD FS.  */
 #ifdef CONFIG_MTD_UCLINUX
-	cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start;
-	cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size;
-	cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0;
-# if defined(CONFIG_ROMFS_FS)
-	cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB;
-
-	/*
-	 * The ROMFS_FS size is often not multiple of 1MB.
-	 * This can cause multiple CPLB sets covering the same memory area.
-	 * This will then cause multiple CPLB hit exceptions.
-	 * Workaround: We ensure a contiguous memory area by extending the kernel
-	 * memory section over the mtd section.
-	 * For ROMFS_FS memory must be covered with ICPLBs anyways.
-	 * So there is no difference between kernel and mtd memory setup.
-	 */
-
-	cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;;
-	cplb_data[SDRAM_RAM_MTD].valid = 0;
-
-# endif
+	dcplb_bounds[i_d].eaddr = memory_mtd_start + mtd_size;
 #else
-	cplb_data[SDRAM_RAM_MTD].valid = 0;
+	dcplb_bounds[i_d].eaddr = memory_end;
 #endif
-
-	cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION;
-	cplb_data[SDRAM_DMAZ].end = _ramend;
-
-	cplb_data[RES_MEM].start = _ramend;
-	cplb_data[RES_MEM].end = physical_mem_end;
-
-	if (reserved_mem_dcache_on)
-		cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC;
-	else
-		cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL;
-
-	if (reserved_mem_icache_on)
-		cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC;
-	else
-		cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL;
-
-	for (i = ZERO_P; i < ARRAY_SIZE(cplb_data); ++i) {
-		if (!cplb_data[i].valid)
-			continue;
-
-		as_1m = cplb_data[i].start % SIZE_1M;
-
-		/* We need to make sure all sections are properly 1M aligned
-		 * However between Kernel Memory and the Kernel mtd section, depending on the
-		 * rootfs size, there can be overlapping memory areas.
-		 */
-
-		if (as_1m && i != L1I_MEM && i != L1D_MEM) {
-#ifdef CONFIG_MTD_UCLINUX
-			if (i == SDRAM_RAM_MTD) {
-				if ((cplb_data[SDRAM_KERN].end + 1) > cplb_data[SDRAM_RAM_MTD].start)
-					cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)) + SIZE_1M;
-				else
-					cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M));
-			} else
-#endif
-				printk(KERN_WARNING "Unaligned Start of %s at 0x%X\n",
-				       cplb_data[i].name, cplb_data[i].start);
-		}
-
-		as = cplb_data[i].start % SIZE_4M;
-		ae = cplb_data[i].end % SIZE_4M;
-
-		if (as)
-			a_start = cplb_data[i].start + (SIZE_4M - (as));
-		else
-			a_start = cplb_data[i].start;
-
-		a_end = cplb_data[i].end - ae;
-
-		for (j = INITIAL_T; j <= SWITCH_T; j++) {
-
-			switch (j) {
-			case INITIAL_T:
-				if (cplb_data[i].attr & INITIAL_T) {
-					t_i = &cplb.init_i;
-					t_d = &cplb.init_d;
-					process = 1;
-				} else
-					process = 0;
-				break;
-			case SWITCH_T:
-				if (cplb_data[i].attr & SWITCH_T) {
-					t_i = &cplb.switch_i;
-					t_d = &cplb.switch_d;
-					process = 1;
-				} else
-					process = 0;
-				break;
-			default:
-					process = 0;
-				break;
-			}
-
-			if (!process)
-				continue;
-			if (cplb_data[i].attr & I_CPLB)
-				__fill_code_cplbtab(t_i, i, a_start, a_end);
-
-			if (cplb_data[i].attr & D_CPLB)
-				__fill_data_cplbtab(t_d, i, a_start, a_end);
-		}
+	dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
+	/* DMA uncached region.  */
+	if (DMA_UNCACHED_REGION) {
+		dcplb_bounds[i_d].eaddr = _ramend;
+		dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
 	}
+	if (_ramend != physical_mem_end) {
+		/* Reserved memory.  */
+		dcplb_bounds[i_d].eaddr = physical_mem_end;
+		dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
+					    SDRAM_DGENERIC : SDRAM_DNON_CHBL);
+	}
+	/* Addressing hole up to the async bank.  */
+	dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
+	dcplb_bounds[i_d++].data = 0;
+	/* ASYNC banks.  */
+	dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
+	dcplb_bounds[i_d++].data = SDRAM_EBIU;
+	/* Addressing hole up to BootROM.  */
+	dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
+	dcplb_bounds[i_d++].data = 0;
+	/* BootROM -- largest one should be less than 1 meg.  */
+	dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
+	dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
+	if (L2_LENGTH) {
+		/* Addressing hole up to L2 SRAM.  */
+		dcplb_bounds[i_d].eaddr = L2_START;
+		dcplb_bounds[i_d++].data = 0;
+		/* L2 SRAM.  */
+		dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
+		dcplb_bounds[i_d++].data = L2_DMEMORY;
+	}
+	dcplb_nr_bounds = i_d;
+	BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
 
-/* close tables */
-
-	close_cplbtab(&cplb.init_i);
-	close_cplbtab(&cplb.init_d);
-
-	cplb.init_i.tab[cplb.init_i.pos] = -1;
-	cplb.init_d.tab[cplb.init_d.pos] = -1;
-	cplb.switch_i.tab[cplb.switch_i.pos] = -1;
-	cplb.switch_d.tab[cplb.switch_d.pos] = -1;
-
-}
-
+	i_i = 0;
+	/* Normal RAM, including MTD FS.  */
+#ifdef CONFIG_MTD_UCLINUX
+	icplb_bounds[i_i].eaddr = memory_mtd_start + mtd_size;
+#else
+	icplb_bounds[i_i].eaddr = memory_end;
 #endif
-
+	icplb_bounds[i_i++].data = SDRAM_IGENERIC;
+	/* DMA uncached region.  */
+	if (DMA_UNCACHED_REGION) {
+		icplb_bounds[i_i].eaddr = _ramend;
+		icplb_bounds[i_i++].data = 0;
+	}
+	if (_ramend != physical_mem_end) {
+		/* Reserved memory.  */
+		icplb_bounds[i_i].eaddr = physical_mem_end;
+		icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
+					    SDRAM_IGENERIC : SDRAM_INON_CHBL);
+	}
+	/* Addressing hole up to BootROM.  */
+	icplb_bounds[i_i].eaddr = BOOT_ROM_START;
+	icplb_bounds[i_i++].data = 0;
+	/* BootROM -- largest one should be less than 1 meg.  */
+	icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
+	icplb_bounds[i_i++].data = SDRAM_IGENERIC;
+	if (L2_LENGTH) {
+		/* Addressing hole up to L2 SRAM, including the async bank.  */
+		icplb_bounds[i_i].eaddr = L2_START;
+		icplb_bounds[i_i++].data = 0;
+		/* L2 SRAM.  */
+		icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
+		icplb_bounds[i_i++].data = L2_IMEMORY;
+	}
+	icplb_nr_bounds = i_i;
+	BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
+}
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.S b/arch/blackfin/kernel/cplb-nompu/cplbmgr.S
deleted file mode 100644
index f5cf3ac..0000000
--- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.S
+++ /dev/null
@@ -1,646 +0,0 @@
-/*
- * File:         arch/blackfin/mach-common/cplbmgtr.S
- * Based on:
- * Author:       LG Soft India
- *
- * Created:      ?
- * Description:  CPLB replacement routine for CPLB mismatch
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-/* Usage: int _cplb_mgr(is_data_miss,int enable_cache)
- * is_data_miss==2 => Mark as Dirty, write to the clean data page
- * is_data_miss==1 => Replace a data CPLB.
- * is_data_miss==0 => Replace an instruction CPLB.
- *
- * Returns:
- * CPLB_RELOADED	=> Successfully updated CPLB table.
- * CPLB_NO_UNLOCKED	=> All CPLBs are locked, so cannot be evicted.
- *			   This indicates that the CPLBs in the configuration
- *			   tablei are badly configured, as this should never
- *			   occur.
- * CPLB_NO_ADDR_MATCH	=> The address being accessed, that triggered the
- *			   exception, is not covered by any of the CPLBs in
- *			   the configuration table. The application is
- *			   presumably misbehaving.
- * CPLB_PROT_VIOL	=> The address being accessed, that triggered the
- *			   exception, was not a first-write to a clean Write
- *			   Back Data page, and so presumably is a genuine
- *			   violation of the page's protection attributes.
- *			   The application is misbehaving.
- */
-
-#include <linux/linkage.h>
-#include <asm/blackfin.h>
-#include <asm/cplb.h>
-
-#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
-.section .l1.text
-#else
-.text
-#endif
-
-.align 2;
-ENTRY(_cplb_mgr)
-
-	[--SP]=( R7:4,P5:3 );
-
-	CC = R0 == 2;
-	IF CC JUMP .Ldcplb_write;
-
-	CC = R0 == 0;
-	IF !CC JUMP .Ldcplb_miss_compare;
-
-	/* ICPLB Miss Exception. We need to choose one of the
-	* currently-installed CPLBs, and replace it with one
-	* from the configuration table.
-	*/
-
-	/* A multi-word instruction can cross a page boundary. This means the
-	 * first part of the instruction can be in a valid page, but the
-	 * second part is not, and hence generates the instruction miss.
-	 * However, the fault address is for the start of the instruction,
-	 * not the part that's in the bad page. Therefore, we have to check
-	 * whether the fault address applies to a page that is already present
-	 * in the table.
-	 */
-
-	P4.L = LO(ICPLB_FAULT_ADDR);
-	P4.H = HI(ICPLB_FAULT_ADDR);
-
-	P1 = 16;
-	P5.L = _page_size_table;
-	P5.H = _page_size_table;
-
-	P0.L = LO(ICPLB_DATA0);
-	P0.H = HI(ICPLB_DATA0);
-	R4 = [P4];		/* Get faulting address*/
-	R6 = 64;		/* Advance past the fault address, which*/
-	R6 = R6 + R4;		/* we'll use if we find a match*/
-	R3 = ((16 << 8) | 2);	/* Extract mask, two bits at posn 16 */
-
-	R5 = 0;
-.Lisearch:
-
-	R1 = [P0-0x100];	/* Address for this CPLB */
-
-	R0 = [P0++];		/* Info for this CPLB*/
-	CC = BITTST(R0,0);	/* Is the CPLB valid?*/
-	IF !CC JUMP .Lnomatch;	/* Skip it, if not.*/
-	CC = R4 < R1(IU);	/* If fault address less than page start*/
-	IF CC JUMP .Lnomatch;	/* then skip this one.*/
-	R2 = EXTRACT(R0,R3.L) (Z);	/* Get page size*/
-	P1 = R2;
-	P1 = P5 + (P1<<2);	/* index into page-size table*/
-	R2 = [P1];		/* Get the page size*/
-	R1 = R1 + R2;		/* and add to page start, to get page end*/
-	CC = R4 < R1(IU);	/* and see whether fault addr is in page.*/
-	IF !CC R4 = R6;		/* If so, advance the address and finish loop.*/
-	IF !CC JUMP .Lisearch_done;
-.Lnomatch:
-	/* Go around again*/
-	R5 += 1;
-	CC = BITTST(R5, 4);	/* i.e CC = R5 >= 16*/
-	IF !CC JUMP .Lisearch;
-
-.Lisearch_done:
-	I0 = R4;		/* Fault address we'll search for*/
-
-	/* set up pointers */
-	P0.L = LO(ICPLB_DATA0);
-	P0.H = HI(ICPLB_DATA0);
-
-	/* The replacement procedure for ICPLBs */
-
-	P4.L = LO(IMEM_CONTROL);
-	P4.H = HI(IMEM_CONTROL);
-
-	/* Turn off CPLBs while we work, necessary according to HRM before
-	 * modifying CPLB descriptors
-	 */
-	R5 = [P4];		/* Control Register*/
-	BITCLR(R5,ENICPLB_P);
-	CLI R1;
-	SSYNC;		/* SSYNC required before writing to IMEM_CONTROL. */
-	.align 8;
-	[P4] = R5;
-	SSYNC;
-	STI R1;
-
-	R1 = -1;		/* end point comparison */
-	R3 = 16;		/* counter */
-
-	/* Search through CPLBs for first non-locked entry */
-	/* Overwrite it by moving everyone else up by 1 */
-.Licheck_lock:
-	R0 = [P0++];
-	R3 = R3 + R1;
-	CC = R3 == R1;
-	IF CC JUMP .Lall_locked;
-	CC = BITTST(R0, 0);		/* an invalid entry is good */
-	IF !CC JUMP .Lifound_victim;
-	CC = BITTST(R0,1);		/* but a locked entry isn't */
-	IF CC JUMP .Licheck_lock;
-
-.Lifound_victim:
-#ifdef CONFIG_CPLB_INFO
-	R7 = [P0 - 0x104];
-	P2.L = _ipdt_table;
-	P2.H = _ipdt_table;
-	P3.L = _ipdt_swapcount_table;
-	P3.H = _ipdt_swapcount_table;
-	P3 += -4;
-.Licount:
-	R2 = [P2];	/* address from config table */
-	P2 += 8;
-	P3 += 8;
-	CC = R2==-1;
-	IF CC JUMP .Licount_done;
-	CC = R7==R2;
-	IF !CC JUMP .Licount;
-	R7 = [P3];
-	R7 += 1;
-	[P3] = R7;
-	CSYNC;
-.Licount_done:
-#endif
-	LC0=R3;
-	LSETUP(.Lis_move,.Lie_move) LC0;
-.Lis_move:
-	R0 = [P0];
-	[P0 - 4] = R0;
-	R0 = [P0 - 0x100];
-	[P0-0x104] = R0;
-.Lie_move:
-	P0+=4;
-
-	/* Clear ICPLB_DATA15, in case we don't find a replacement
-	 * otherwise, we would have a duplicate entry, and will crash
-	 */
-	R0 = 0;
-	[P0 - 4] = R0;
-
-	/* We've made space in the ICPLB table, so that ICPLB15
-	 * is now free to be overwritten. Next, we have to determine
-	 * which CPLB we need to install, from the configuration
-	 * table. This is a matter of getting the start-of-page
-	 * addresses and page-lengths from the config table, and
-	 * determining whether the fault address falls within that
-	 * range.
- 	 */
-
-	P2.L = _ipdt_table;
-	P2.H = _ipdt_table;
-#ifdef	CONFIG_CPLB_INFO
-	P3.L = _ipdt_swapcount_table;
-	P3.H = _ipdt_swapcount_table;
-	P3 += -8;
-#endif
-	P0.L = _page_size_table;
-	P0.H = _page_size_table;
-
-	/* Retrieve our fault address (which may have been advanced
-	 * because the faulting instruction crossed a page boundary).
-	 */
-
-	R0 = I0;
-
-	/* An extraction pattern, to get the page-size bits from
-	 * the CPLB data entry. Bits 16-17, so two bits at posn 16.
-	 */
-
-	R1 = ((16<<8)|2);
-.Linext:	R4 = [P2++];	/* address from config table */
-	R2 = [P2++];	/* data from config table */
-#ifdef	CONFIG_CPLB_INFO
-	P3 += 8;
-#endif
-
-	CC = R4 == -1;	/* End of config table*/
-	IF CC JUMP .Lno_page_in_table;
-
-	/* See if failed address > start address */
-	CC = R4 <= R0(IU);
-	IF !CC JUMP .Linext;
-
-	/* extract page size (17:16)*/
-	R3 = EXTRACT(R2, R1.L) (Z);
-
-	/* add page size to addr to get range */
-
-	P5 = R3;
-	P5 = P0 + (P5 << 2);	/* scaled, for int access*/
-	R3 = [P5];
-	R3 = R3 + R4;
-
-	/* See if failed address < (start address + page size) */
-	CC = R0 < R3(IU);
-	IF !CC JUMP .Linext;
-
-	/* We've found a CPLB in the config table that covers
-	 * the faulting address, so install this CPLB into the
-	 * last entry of the table.
-	 */
-
-	P1.L = LO(ICPLB_DATA15);		/* ICPLB_DATA15 */
-	P1.H = HI(ICPLB_DATA15);
-	[P1] = R2;
-	[P1-0x100] = R4;
-#ifdef	CONFIG_CPLB_INFO
-	R3 = [P3];
-	R3 += 1;
-	[P3] = R3;
-#endif
-
-	/* P4 points to IMEM_CONTROL, and R5 contains its old
-	 * value, after we disabled ICPLBS. Re-enable them.
-	 */
-
-	BITSET(R5,ENICPLB_P);
-	CLI R2;
-	SSYNC;		/* SSYNC required before writing to IMEM_CONTROL. */
-	.align 8;
-	[P4] = R5;
-	SSYNC;
-	STI R2;
-
-	( R7:4,P5:3 ) = [SP++];
-	R0 = CPLB_RELOADED;
-	RTS;
-
-/* FAILED CASES*/
-.Lno_page_in_table:
-	R0 = CPLB_NO_ADDR_MATCH;
-	JUMP .Lfail_ret;
-
-.Lall_locked:
-	R0 = CPLB_NO_UNLOCKED;
-	JUMP .Lfail_ret;
-
-.Lprot_violation:
-	R0 = CPLB_PROT_VIOL;
-
-.Lfail_ret:
-	/* Make sure we turn protection/cache back on, even in the failing case */
-	BITSET(R5,ENICPLB_P);
-	CLI R2;
-	SSYNC;          /* SSYNC required before writing to IMEM_CONTROL. */
-	.align 8;
-	[P4] = R5;
-	SSYNC;
-	STI R2;
-
-	( R7:4,P5:3 ) = [SP++];
-	RTS;
-
-.Ldcplb_write:
-
-	/* if a DCPLB is marked as write-back (CPLB_WT==0), and
-	 * it is clean (CPLB_DIRTY==0), then a write to the
-	 * CPLB's page triggers a protection violation. We have to
-	 * mark the CPLB as dirty, to indicate that there are
-	 * pending writes associated with the CPLB.
-	 */
-
-	P4.L = LO(DCPLB_STATUS);
-	P4.H = HI(DCPLB_STATUS);
-	P3.L = LO(DCPLB_DATA0);
-	P3.H = HI(DCPLB_DATA0);
-	R5 = [P4];
-
-	/* A protection violation can be caused by more than just writes
-	 * to a clean WB page, so we have to ensure that:
-	 * - It's a write
-	 * - to a clean WB page
-	 * - and is allowed in the mode the access occurred.
-	 */
-
-	CC = BITTST(R5, 16);	/* ensure it was a write*/
-	IF !CC JUMP .Lprot_violation;
-
-	/* to check the rest, we have to retrieve the DCPLB.*/
-
-	/* The low half of DCPLB_STATUS is a bit mask*/
-
-	R2 = R5.L (Z);	/* indicating which CPLB triggered the event.*/
-	R3 = 30;	/* so we can use this to determine the offset*/
-	R2.L = SIGNBITS R2;
-	R2 = R2.L (Z);	/* into the DCPLB table.*/
-	R3 = R3 - R2;
-	P4 = R3;
-	P3 = P3 + (P4<<2);
-	R3 = [P3];	/* Retrieve the CPLB*/
-
-	/* Now we can check whether it's a clean WB page*/
-
-	CC = BITTST(R3, 14);	/* 0==WB, 1==WT*/
-	IF CC JUMP .Lprot_violation;
-	CC = BITTST(R3, 7);	/* 0 == clean, 1 == dirty*/
-	IF CC JUMP .Lprot_violation;
-
-	/* Check whether the write is allowed in the mode that was active.*/
-
-	R2 = 1<<3;		/* checking write in user mode*/
-	CC = BITTST(R5, 17);	/* 0==was user, 1==was super*/
-	R5 = CC;
-	R2 <<= R5;		/* if was super, check write in super mode*/
-	R2 = R3 & R2;
-	CC = R2 == 0;
-	IF CC JUMP .Lprot_violation;
-
-	/* It's a genuine write-to-clean-page.*/
-
-	BITSET(R3, 7);		/* mark as dirty*/
-	[P3] = R3;		/* and write back.*/
-	NOP;
-	CSYNC;
-	( R7:4,P5:3 ) = [SP++];
-	R0 = CPLB_RELOADED;
-	RTS;
-
-.Ldcplb_miss_compare:
-
-	/* Data CPLB Miss event. We need to choose a CPLB to
-	 * evict, and then locate a new CPLB to install from the
-	 * config table, that covers the faulting address.
-	 */
-
-	P1.L = LO(DCPLB_DATA15);
-	P1.H = HI(DCPLB_DATA15);
-
-	P4.L = LO(DCPLB_FAULT_ADDR);
-	P4.H = HI(DCPLB_FAULT_ADDR);
-	R4 = [P4];
-	I0 = R4;
-
-	/* The replacement procedure for DCPLBs*/
-
-	R6 = R1;	/* Save for later*/
-
-	/* Turn off CPLBs while we work.*/
-	P4.L = LO(DMEM_CONTROL);
-	P4.H = HI(DMEM_CONTROL);
-	R5 = [P4];
-	BITCLR(R5,ENDCPLB_P);
-	CLI R0;
-	SSYNC;		/* SSYNC required before writing to DMEM_CONTROL. */
-	.align 8;
-	[P4] = R5;
-	SSYNC;
-	STI R0;
-
-	/* Start looking for a CPLB to evict. Our order of preference
-	 * is: invalid CPLBs, clean CPLBs, dirty CPLBs. Locked CPLBs
-	 * are no good.
-	 */
-
-	I1.L = LO(DCPLB_DATA0);
-	I1.H = HI(DCPLB_DATA0);
-	P1 = 2;
-	P2 = 16;
-	I2.L = _dcplb_preference;
-	I2.H = _dcplb_preference;
-	LSETUP(.Lsdsearch1, .Ledsearch1) LC0 = P1;
-.Lsdsearch1:
-	R0 = [I2++];		/* Get the bits we're interested in*/
-	P0 = I1;		/* Go back to start of table*/
-	LSETUP (.Lsdsearch2, .Ledsearch2) LC1 = P2;
-.Lsdsearch2:
-	R1 = [P0++];		/* Fetch each installed CPLB in turn*/
-	R2 = R1 & R0;		/* and test for interesting bits.*/
-	CC = R2 == 0;		/* If none are set, it'll do.*/
-	IF !CC JUMP .Lskip_stack_check;
-
-	R2 = [P0 - 0x104]; 	/* R2 - PageStart */
-	P3.L = _page_size_table; /* retrieve end address */
-	P3.H = _page_size_table; /* retrieve end address */
-	R3 = 0x1002;		/* 16th - position, 2 bits -length */
-#if ANOMALY_05000209
-	nop;			/* Anomaly 05000209 */
-#endif
-	R7 = EXTRACT(R1,R3.l);
-	R7 = R7 << 2;		/* Page size index offset */
-	P5 = R7;
-	P3 = P3 + P5;
-	R7 = [P3];		/* page size in bytes */
-
-	R7 = R2 + R7;		/* R7 - PageEnd */
-	R4 = SP; 		/* Test SP is in range */
-
-	CC = R7 < R4;		/* if PageEnd < SP */
-	IF CC JUMP .Ldfound_victim;
-	R3 = 0x284;		/* stack length from start of trap till
-				 * the point.
-				 * 20 stack locations for future modifications
-				 */
-	R4 = R4 + R3;
-	CC = R4 < R2;		/* if SP + stacklen < PageStart */
-	IF CC JUMP .Ldfound_victim;
-.Lskip_stack_check:
-
-.Ledsearch2: NOP;
-.Ledsearch1: NOP;
-
-	/* If we got here, we didn't find a DCPLB we considered
-	 * replacable, which means all of them were locked.
-	 */
-
-	JUMP .Lall_locked;
-.Ldfound_victim:
-
-#ifdef CONFIG_CPLB_INFO
-	R7 = [P0 - 0x104];
-	P2.L = _dpdt_table;
-	P2.H = _dpdt_table;
-	P3.L = _dpdt_swapcount_table;
-	P3.H = _dpdt_swapcount_table;
-	P3 += -4;
-.Ldicount:
-	R2 = [P2];
-	P2 += 8;
-	P3 += 8;
-	CC = R2==-1;
-	IF CC JUMP .Ldicount_done;
-	CC = R7==R2;
-	IF !CC JUMP .Ldicount;
-	R7 = [P3];
-	R7 += 1;
-	[P3] = R7;
-.Ldicount_done:
-#endif
-
-	/* Clean down the hardware loops*/
-	R2 = 0;
-	LC1 = R2;
-	LC0 = R2;
-
-	/* There's a suitable victim in [P0-4] (because we've
-	 * advanced already).
-	 */
-
-.LDdoverwrite:
-
-	/* [P0-4] is a suitable victim CPLB, so we want to
-	 * overwrite it by moving all the following CPLBs
-	 * one space closer to the start.
-	 */
-
-	R1.L = LO(DCPLB_DATA16);		/* DCPLB_DATA15 + 4 */
-	R1.H = HI(DCPLB_DATA16);
-	R0 = P0;
-
-	/* If the victim happens to be in DCPLB15,
-	 * we don't need to move anything.
-	 */
-
-	CC = R1 == R0;
-	IF CC JUMP .Lde_moved;
-	R1 = R1 - R0;
-	R1 >>= 2;
-	P1 = R1;
-	LSETUP(.Lds_move, .Lde_move) LC0=P1;
-.Lds_move:
-	R0 = [P0++];	/* move data */
-	[P0 - 8] = R0;
-	R0 = [P0-0x104]	/* move address */
-.Lde_move:
-	 [P0-0x108] = R0;
-
-.Lde_moved:
-	NOP;
-
-	/* Clear DCPLB_DATA15, in case we don't find a replacement
-	 * otherwise, we would have a duplicate entry, and will crash
-	 */
-	R0 = 0;
-	[P0 - 0x4] = R0;
-
-	/* We've now made space in DCPLB15 for the new CPLB to be
-	 * installed. The next stage is to locate a CPLB in the
-	 * config table that covers the faulting address.
-	 */
-
-	R0 = I0;		/* Our faulting address */
-
-	P2.L = _dpdt_table;
-	P2.H = _dpdt_table;
-#ifdef	CONFIG_CPLB_INFO
-	P3.L = _dpdt_swapcount_table;
-	P3.H = _dpdt_swapcount_table;
-	P3 += -8;
-#endif
-
-	P1.L = _page_size_table;
-	P1.H = _page_size_table;
-
-	/* An extraction pattern, to retrieve bits 17:16.*/
-
-	R1 = (16<<8)|2;
-.Ldnext:	R4 = [P2++];	/* address */
-	R2 = [P2++];	/* data */
-#ifdef	CONFIG_CPLB_INFO
-	P3 += 8;
-#endif
-
-	CC = R4 == -1;
-	IF CC JUMP .Lno_page_in_table;
-
-	/* See if failed address > start address */
-	CC = R4 <= R0(IU);
-	IF !CC JUMP .Ldnext;
-
-	/* extract page size (17:16)*/
-	R3 = EXTRACT(R2, R1.L) (Z);
-
-	/* add page size to addr to get range */
-
-	P5 = R3;
-	P5 = P1 + (P5 << 2);
-	R3 = [P5];
-	R3 = R3 + R4;
-
-	/* See if failed address < (start address + page size) */
-	CC = R0 < R3(IU);
-	IF !CC JUMP .Ldnext;
-
-	/* We've found the CPLB that should be installed, so
-	 * write it into CPLB15, masking off any caching bits
-	 * if necessary.
-	 */
-
-	P1.L = LO(DCPLB_DATA15);
-	P1.H = HI(DCPLB_DATA15);
-
-	/* If the DCPLB has cache bits set, but caching hasn't
-	 * been enabled, then we want to mask off the cache-in-L1
-	 * bit before installing. Moreover, if caching is off, we
-	 * also want to ensure that the DCPLB has WT mode set, rather
-	 * than WB, since WB pages still trigger first-write exceptions
-	 * even when not caching is off, and the page isn't marked as
-	 * cachable. Finally, we could mark the page as clean, not dirty,
-	 * but we choose to leave that decision to the user; if the user
-	 * chooses to have a CPLB pre-defined as dirty, then they always
-	 * pay the cost of flushing during eviction, but don't pay the
-	 * cost of first-write exceptions to mark the page as dirty.
-	 */
-
-#ifdef CONFIG_BFIN_WT
-	BITSET(R6, 14);		/* Set WT*/
-#endif
-
-	[P1] = R2;
-	[P1-0x100] = R4;
-#ifdef	CONFIG_CPLB_INFO
-	R3 = [P3];
-	R3 += 1;
-	[P3] = R3;
-#endif
-
-	/* We've installed the CPLB, so re-enable CPLBs. P4
-	 * points to DMEM_CONTROL, and R5 is the value we
-	 * last wrote to it, when we were disabling CPLBs.
-	 */
-
-	BITSET(R5,ENDCPLB_P);
-	CLI R2;
-	.align 8;
-	[P4] = R5;
-	SSYNC;
-	STI R2;
-
-	( R7:4,P5:3 ) = [SP++];
-	R0 = CPLB_RELOADED;
-	RTS;
-ENDPROC(_cplb_mgr)
-
-.data
-.align 4;
-_page_size_table:
-.byte4	0x00000400;	/* 1K */
-.byte4	0x00001000;	/* 4K */
-.byte4	0x00100000;	/* 1M */
-.byte4	0x00400000;	/* 4M */
-
-.align 4;
-_dcplb_preference:
-.byte4	0x00000001;	/* valid bit */
-.byte4	0x00000002;	/* lock bit */
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
new file mode 100644
index 0000000..376249a
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -0,0 +1,283 @@
+/*
+ * File:         arch/blackfin/kernel/cplb-nompu-c/cplbmgr.c
+ * Based on:     arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+ * Author:       Michael McTernan <mmcternan@airvana.com>
+ *
+ * Created:      01Nov2008
+ * Description:  CPLB miss handler.
+ *
+ * Modified:
+ *               Copyright 2008 Airvana Inc.
+ *               Copyright 2004-2007 Analog Devices Inc.
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <asm/blackfin.h>
+#include <asm/cplbinit.h>
+#include <asm/cplb.h>
+#include <asm/mmu_context.h>
+
+/*
+ * WARNING
+ *
+ * This file is compiled with certain -ffixed-reg options.  We have to
+ * make sure not to call any functions here that could clobber these
+ * registers.
+ */
+
+int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
+int nr_dcplb_supv_miss[NR_CPUS], nr_icplb_supv_miss[NR_CPUS];
+int nr_cplb_flush[NR_CPUS], nr_dcplb_prot[NR_CPUS];
+
+#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
+#define MGR_ATTR __attribute__((l1_text))
+#else
+#define MGR_ATTR
+#endif
+
+/*
+ * We're in an exception handler.  The normal cli nop nop workaround
+ * isn't going to do very much, as the only thing that can interrupt
+ * us is an NMI, and the cli isn't going to stop that.
+ */
+#define NOWA_SSYNC __asm__ __volatile__ ("ssync;")
+
+/* Anomaly handlers provide SSYNCs, so avoid extra if anomaly is present */
+#if ANOMALY_05000125
+
+#define bfin_write_DMEM_CONTROL_SSYNC(v)    bfin_write_DMEM_CONTROL(v)
+#define bfin_write_IMEM_CONTROL_SSYNC(v)    bfin_write_IMEM_CONTROL(v)
+
+#else
+
+#define bfin_write_DMEM_CONTROL_SSYNC(v) \
+    do { NOWA_SSYNC; bfin_write_DMEM_CONTROL(v); NOWA_SSYNC; } while (0)
+#define bfin_write_IMEM_CONTROL_SSYNC(v) \
+    do { NOWA_SSYNC; bfin_write_IMEM_CONTROL(v); NOWA_SSYNC; } while (0)
+
+#endif
+
+static inline void write_dcplb_data(int cpu, int idx, unsigned long data,
+				    unsigned long addr)
+{
+	unsigned long ctrl = bfin_read_DMEM_CONTROL();
+	bfin_write_DMEM_CONTROL_SSYNC(ctrl & ~ENDCPLB);
+	bfin_write32(DCPLB_DATA0 + idx * 4, data);
+	bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
+	bfin_write_DMEM_CONTROL_SSYNC(ctrl);
+
+#ifdef CONFIG_CPLB_INFO
+	dcplb_tbl[cpu][idx].addr = addr;
+	dcplb_tbl[cpu][idx].data = data;
+#endif
+}
+
+static inline void write_icplb_data(int cpu, int idx, unsigned long data,
+				    unsigned long addr)
+{
+	unsigned long ctrl = bfin_read_IMEM_CONTROL();
+
+	bfin_write_IMEM_CONTROL_SSYNC(ctrl & ~ENICPLB);
+	bfin_write32(ICPLB_DATA0 + idx * 4, data);
+	bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
+	bfin_write_IMEM_CONTROL_SSYNC(ctrl);
+
+#ifdef CONFIG_CPLB_INFO
+	icplb_tbl[cpu][idx].addr = addr;
+	icplb_tbl[cpu][idx].data = data;
+#endif
+}
+
+/*
+ * Given the contents of the status register, return the index of the
+ * CPLB that caused the fault.
+ */
+static inline int faulting_cplb_index(int status)
+{
+	int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
+	return 30 - signbits;
+}
+
+/*
+ * Given the contents of the status register and the DCPLB_DATA contents,
+ * return true if a write access should be permitted.
+ */
+static inline int write_permitted(int status, unsigned long data)
+{
+	if (status & FAULT_USERSUPV)
+		return !!(data & CPLB_SUPV_WR);
+	else
+		return !!(data & CPLB_USER_WR);
+}
+
+/* Counters to implement round-robin replacement.  */
+static int icplb_rr_index[NR_CPUS] PDT_ATTR;
+static int dcplb_rr_index[NR_CPUS] PDT_ATTR;
+
+/*
+ * Find an ICPLB entry to be evicted and return its index.
+ */
+static int evict_one_icplb(int cpu)
+{
+	int i = first_switched_icplb + icplb_rr_index[cpu];
+	if (i >= MAX_CPLBS) {
+		i -= MAX_CPLBS - first_switched_icplb;
+		icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
+	}
+	icplb_rr_index[cpu]++;
+	return i;
+}
+
+static int evict_one_dcplb(int cpu)
+{
+	int i = first_switched_dcplb + dcplb_rr_index[cpu];
+	if (i >= MAX_CPLBS) {
+		i -= MAX_CPLBS - first_switched_dcplb;
+		dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
+	}
+	dcplb_rr_index[cpu]++;
+	return i;
+}
+
+MGR_ATTR static int icplb_miss(int cpu)
+{
+	unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
+	int status = bfin_read_ICPLB_STATUS();
+	int idx;
+	unsigned long i_data, base, addr1, eaddr;
+
+	nr_icplb_miss[cpu]++;
+	if (unlikely(status & FAULT_USERSUPV))
+		nr_icplb_supv_miss[cpu]++;
+
+	base = 0;
+	for (idx = 0; idx < icplb_nr_bounds; idx++) {
+		eaddr = icplb_bounds[idx].eaddr;
+		if (addr < eaddr)
+			break;
+		base = eaddr;
+	}
+	if (unlikely(idx == icplb_nr_bounds))
+		return CPLB_NO_ADDR_MATCH;
+
+	i_data = icplb_bounds[idx].data;
+	if (unlikely(i_data == 0))
+		return CPLB_NO_ADDR_MATCH;
+
+	addr1 = addr & ~(SIZE_4M - 1);
+	addr &= ~(SIZE_1M - 1);
+	i_data |= PAGE_SIZE_1MB;
+	if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
+		/*
+		 * This works because
+		 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
+		 */
+		i_data |= PAGE_SIZE_4MB;
+		addr = addr1;
+	}
+
+	/* Pick entry to evict */
+	idx = evict_one_icplb(cpu);
+
+	write_icplb_data(cpu, idx, i_data, addr);
+
+	return CPLB_RELOADED;
+}
+
+MGR_ATTR static int dcplb_miss(int cpu)
+{
+	unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
+	int status = bfin_read_DCPLB_STATUS();
+	int idx;
+	unsigned long d_data, base, addr1, eaddr;
+
+	nr_dcplb_miss[cpu]++;
+	if (unlikely(status & FAULT_USERSUPV))
+		nr_dcplb_supv_miss[cpu]++;
+
+	base = 0;
+	for (idx = 0; idx < dcplb_nr_bounds; idx++) {
+		eaddr = dcplb_bounds[idx].eaddr;
+		if (addr < eaddr)
+			break;
+		base = eaddr;
+	}
+	if (unlikely(idx == dcplb_nr_bounds))
+		return CPLB_NO_ADDR_MATCH;
+
+	d_data = dcplb_bounds[idx].data;
+	if (unlikely(d_data == 0))
+		return CPLB_NO_ADDR_MATCH;
+
+	addr1 = addr & ~(SIZE_4M - 1);
+	addr &= ~(SIZE_1M - 1);
+	d_data |= PAGE_SIZE_1MB;
+	if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
+		/*
+		 * This works because
+		 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
+		 */
+		d_data |= PAGE_SIZE_4MB;
+		addr = addr1;
+	}
+
+	/* Pick entry to evict */
+	idx = evict_one_dcplb(cpu);
+
+	write_dcplb_data(cpu, idx, d_data, addr);
+
+	return CPLB_RELOADED;
+}
+
+MGR_ATTR static noinline int dcplb_protection_fault(int cpu)
+{
+	int status = bfin_read_DCPLB_STATUS();
+
+	nr_dcplb_prot[cpu]++;
+
+	if (likely(status & FAULT_RW)) {
+		int idx = faulting_cplb_index(status);
+		unsigned long regaddr = DCPLB_DATA0 + idx * 4;
+		unsigned long data = bfin_read32(regaddr);
+
+		/* Check if fault is to dirty a clean page */
+		if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
+		    write_permitted(status, data)) {
+
+			dcplb_tbl[cpu][idx].data = data;
+			bfin_write32(regaddr, data);
+			return CPLB_RELOADED;
+		}
+	}
+
+	return CPLB_PROT_VIOL;
+}
+
+MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
+{
+	int cause = seqstat & 0x3f;
+	unsigned int cpu = smp_processor_id();
+	switch (cause) {
+	case 0x2C:
+		return icplb_miss(cpu);
+	case 0x26:
+		return dcplb_miss(cpu);
+	default:
+		if (unlikely(cause == 0x23))
+			return dcplb_protection_fault(cpu);
+
+		return CPLB_UNKNOWN_ERR;
+	}
+}
diff --git a/arch/blackfin/kernel/cplbinfo.c b/arch/blackfin/kernel/cplbinfo.c
new file mode 100644
index 0000000..64d7830
--- /dev/null
+++ b/arch/blackfin/kernel/cplbinfo.c
@@ -0,0 +1,177 @@
+/*
+ * arch/blackfin/kernel/cplbinfo.c - display CPLB status
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include <asm/cplbinit.h>
+#include <asm/blackfin.h>
+
+static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" };
+#define page(flags)    (((flags) & 0x30000) >> 16)
+#define strpage(flags) page_strtbl[page(flags)]
+
+struct cplbinfo_data {
+	loff_t pos;
+	char cplb_type;
+	u32 mem_control;
+	struct cplb_entry *tbl;
+	int switched;
+};
+
+static void cplbinfo_print_header(struct seq_file *m)
+{
+	seq_printf(m, "Index\tAddress\t\tData\tSize\tU/RD\tU/WR\tS/WR\tSwitch\n");
+}
+
+static int cplbinfo_nomore(struct cplbinfo_data *cdata)
+{
+	return cdata->pos >= MAX_CPLBS;
+}
+
+static int cplbinfo_show(struct seq_file *m, void *p)
+{
+	struct cplbinfo_data *cdata;
+	unsigned long data, addr;
+	loff_t pos;
+
+	cdata = p;
+	pos = cdata->pos;
+	addr = cdata->tbl[pos].addr;
+	data = cdata->tbl[pos].data;
+
+	seq_printf(m,
+		"%d\t0x%08lx\t%05lx\t%s\t%c\t%c\t%c\t%c\n",
+		(int)pos, addr, data, strpage(data),
+		(data & CPLB_USER_RD) ? 'Y' : 'N',
+		(data & CPLB_USER_WR) ? 'Y' : 'N',
+		(data & CPLB_SUPV_WR) ? 'Y' : 'N',
+		pos < cdata->switched ? 'N' : 'Y');
+
+	return 0;
+}
+
+static void cplbinfo_seq_init(struct cplbinfo_data *cdata, unsigned int cpu)
+{
+	if (cdata->cplb_type == 'I') {
+		cdata->mem_control = bfin_read_IMEM_CONTROL();
+		cdata->tbl = icplb_tbl[cpu];
+		cdata->switched = first_switched_icplb;
+	} else {
+		cdata->mem_control = bfin_read_DMEM_CONTROL();
+		cdata->tbl = dcplb_tbl[cpu];
+		cdata->switched = first_switched_dcplb;
+	}
+}
+
+static void *cplbinfo_start(struct seq_file *m, loff_t *pos)
+{
+	struct cplbinfo_data *cdata = m->private;
+
+	if (!*pos) {
+		seq_printf(m, "%cCPLBs are %sabled: 0x%x\n", cdata->cplb_type,
+			(cdata->mem_control & ENDCPLB ? "en" : "dis"),
+			cdata->mem_control);
+		cplbinfo_print_header(m);
+	} else if (cplbinfo_nomore(cdata))
+		return NULL;
+
+	get_cpu();
+	return cdata;
+}
+
+static void *cplbinfo_next(struct seq_file *m, void *p, loff_t *pos)
+{
+	struct cplbinfo_data *cdata = p;
+	cdata->pos = ++(*pos);
+	if (cplbinfo_nomore(cdata))
+		return NULL;
+	else
+		return cdata;
+}
+
+static void cplbinfo_stop(struct seq_file *m, void *p)
+{
+	put_cpu();
+}
+
+static const struct seq_operations cplbinfo_sops = {
+	.start = cplbinfo_start,
+	.next  = cplbinfo_next,
+	.stop  = cplbinfo_stop,
+	.show  = cplbinfo_show,
+};
+
+static int cplbinfo_open(struct inode *inode, struct file *file)
+{
+	char buf[256], *path, *p;
+	unsigned int cpu;
+	char *s_cpu, *s_cplb;
+	int ret;
+	struct seq_file *m;
+	struct cplbinfo_data *cdata;
+
+	path = d_path(&file->f_path, buf, sizeof(buf));
+	if (IS_ERR(path))
+		return PTR_ERR(path);
+	s_cpu = strstr(path, "/cpu");
+	s_cplb = strrchr(path, '/');
+	if (!s_cpu || !s_cplb)
+		return -EINVAL;
+
+	cpu = simple_strtoul(s_cpu + 4, &p, 10);
+	if (!cpu_online(cpu))
+		return -ENODEV;
+
+	ret = seq_open_private(file, &cplbinfo_sops, sizeof(*cdata));
+	if (ret)
+		return ret;
+	m = file->private_data;
+	cdata = m->private;
+
+	cdata->pos = 0;
+	cdata->cplb_type = toupper(s_cplb[1]);
+	cplbinfo_seq_init(cdata, cpu);
+
+	return 0;
+}
+
+static const struct file_operations cplbinfo_fops = {
+	.open    = cplbinfo_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release_private,
+};
+
+static int __init cplbinfo_init(void)
+{
+	struct proc_dir_entry *cplb_dir, *cpu_dir;
+	char buf[10];
+	unsigned int cpu;
+
+	cplb_dir = proc_mkdir("cplbinfo", NULL);
+	if (!cplb_dir)
+		return -ENOMEM;
+
+	for_each_possible_cpu(cpu) {
+		sprintf(buf, "cpu%i", cpu);
+		cpu_dir = proc_mkdir(buf, cplb_dir);
+		if (!cpu_dir)
+			return -ENOMEM;
+
+		proc_create("icplb", S_IRUGO, cpu_dir, &cplbinfo_fops);
+		proc_create("dcplb", S_IRUGO, cpu_dir, &cplbinfo_fops);
+	}
+
+	return 0;
+}
+late_initcall(cplbinfo_init);
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index 1f4e3d2..c8ad051 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -105,10 +105,10 @@
 		cflag |= CS5;
 		break;
 	case 6:
-		cflag |= CS5;
+		cflag |= CS6;
 		break;
 	case 7:
-		cflag |= CS5;
+		cflag |= CS7;
 		break;
 	default:
 		cflag |= CS8;
diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S
index faea88e..a9cfba9 100644
--- a/arch/blackfin/kernel/entry.S
+++ b/arch/blackfin/kernel/entry.S
@@ -30,6 +30,7 @@
 #include <linux/linkage.h>
 #include <asm/thread_info.h>
 #include <asm/errno.h>
+#include <asm/blackfin.h>
 #include <asm/asm-offsets.h>
 
 #include <asm/context.S>
@@ -41,6 +42,10 @@
 #endif
 
 ENTRY(_ret_from_fork)
+#ifdef CONFIG_IPIPE
+	[--sp] = reti; 		/* IRQs on. */
+	SP += 4;
+#endif /* CONFIG_IPIPE */
 	SP += -12;
 	call _schedule_tail;
 	SP += 12;
diff --git a/arch/blackfin/kernel/fixed_code.S b/arch/blackfin/kernel/fixed_code.S
index 4b03ba0..0d2d9e0 100644
--- a/arch/blackfin/kernel/fixed_code.S
+++ b/arch/blackfin/kernel/fixed_code.S
@@ -8,10 +8,12 @@
  * BF561 SMP).
  */
 #include <linux/linkage.h>
+#include <linux/init.h>
 #include <linux/unistd.h>
 #include <asm/entry.h>
 
-.text
+__INIT
+
 ENTRY(_fixed_code_start)
 
 .align 16
@@ -144,3 +146,5 @@
 ENDPROC(_safe_user_instruction)
 
 ENTRY(_fixed_code_end)
+
+__FINIT
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
new file mode 100644
index 0000000..339be5a
--- /dev/null
+++ b/arch/blackfin/kernel/ipipe.c
@@ -0,0 +1,428 @@
+/* -*- linux-c -*-
+ * linux/arch/blackfin/kernel/ipipe.c
+ *
+ * Copyright (C) 2005-2007 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-dependent I-pipe support for the Blackfin.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/kthread.h>
+#include <asm/unistd.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+
+static int create_irq_threads;
+
+DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
+
+static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask);
+
+static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count);
+
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
+
+static void __ipipe_no_irqtail(void);
+
+unsigned long __ipipe_irq_tail_hook = (unsigned long)&__ipipe_no_irqtail;
+EXPORT_SYMBOL(__ipipe_irq_tail_hook);
+
+unsigned long __ipipe_core_clock;
+EXPORT_SYMBOL(__ipipe_core_clock);
+
+unsigned long __ipipe_freq_scale;
+EXPORT_SYMBOL(__ipipe_freq_scale);
+
+atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
+
+unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags;
+EXPORT_SYMBOL(__ipipe_irq_lvmask);
+
+static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
+{
+	desc->ipipe_ack(irq, desc);
+}
+
+/*
+ * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
+ * interrupts are off, and secondary CPUs are still lost in space.
+ */
+void __ipipe_enable_pipeline(void)
+{
+	unsigned irq;
+
+	__ipipe_core_clock = get_cclk(); /* Fetch this once. */
+	__ipipe_freq_scale = 1000000000UL / __ipipe_core_clock;
+
+	for (irq = 0; irq < NR_IRQS; ++irq)
+		ipipe_virtualize_irq(ipipe_root_domain,
+				     irq,
+				     (ipipe_irq_handler_t)&asm_do_IRQ,
+				     NULL,
+				     &__ipipe_ack_irq,
+				     IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
+}
+
+/*
+ * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
+ * interrupt protection log is maintained here for each domain. Hw
+ * interrupts are masked on entry.
+ */
+void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
+{
+	struct ipipe_domain *this_domain, *next_domain;
+	struct list_head *head, *pos;
+	int m_ack, s = -1;
+
+	/*
+	 * Software-triggered IRQs do not need any ack.  The contents
+	 * of the register frame should only be used when processing
+	 * the timer interrupt, but not for handling any other
+	 * interrupt.
+	 */
+	m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
+
+	this_domain = ipipe_current_domain;
+
+	if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
+		head = &this_domain->p_link;
+	else {
+		head = __ipipe_pipeline.next;
+		next_domain = list_entry(head, struct ipipe_domain, p_link);
+		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
+			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
+				next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
+			if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
+				s = __test_and_set_bit(IPIPE_STALL_FLAG,
+						       &ipipe_root_cpudom_var(status));
+			__ipipe_dispatch_wired(next_domain, irq);
+				goto finalize;
+			return;
+		}
+	}
+
+	/* Ack the interrupt. */
+
+	pos = head;
+
+	while (pos != &__ipipe_pipeline) {
+		next_domain = list_entry(pos, struct ipipe_domain, p_link);
+		/*
+		 * For each domain handling the incoming IRQ, mark it
+		 * as pending in its log.
+		 */
+		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
+			/*
+			 * Domains that handle this IRQ are polled for
+			 * acknowledging it by decreasing priority
+			 * order. The interrupt must be made pending
+			 * _first_ in the domain's status flags before
+			 * the PIC is unlocked.
+			 */
+			__ipipe_set_irq_pending(next_domain, irq);
+
+			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
+				next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
+				m_ack = 1;
+			}
+		}
+
+		/*
+		 * If the domain does not want the IRQ to be passed
+		 * down the interrupt pipe, exit the loop now.
+		 */
+		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
+			break;
+
+		pos = next_domain->p_link.next;
+	}
+
+	/*
+	 * Now walk the pipeline, yielding control to the highest
+	 * priority domain that has pending interrupt(s) or
+	 * immediately to the current domain if the interrupt has been
+	 * marked as 'sticky'. This search does not go beyond the
+	 * current domain in the pipeline. We also enforce the
+	 * additional root stage lock (blackfin-specific). */
+
+	if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
+		s = __test_and_set_bit(IPIPE_STALL_FLAG,
+				       &ipipe_root_cpudom_var(status));
+finalize:
+
+	__ipipe_walk_pipeline(head);
+
+	if (!s)
+		__clear_bit(IPIPE_STALL_FLAG,
+			    &ipipe_root_cpudom_var(status));
+}
+
+int __ipipe_check_root(void)
+{
+	return ipipe_root_domain_p;
+}
+
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
+{
+	struct irq_desc *desc = irq_desc + irq;
+	int prio = desc->ic_prio;
+
+	desc->depth = 0;
+	if (ipd != &ipipe_root &&
+	    atomic_inc_return(&__ipipe_irq_lvdepth[prio]) == 1)
+		__set_bit(prio, &__ipipe_irq_lvmask);
+}
+EXPORT_SYMBOL(__ipipe_enable_irqdesc);
+
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
+{
+	struct irq_desc *desc = irq_desc + irq;
+	int prio = desc->ic_prio;
+
+	if (ipd != &ipipe_root &&
+	    atomic_dec_and_test(&__ipipe_irq_lvdepth[prio]))
+		__clear_bit(prio, &__ipipe_irq_lvmask);
+}
+EXPORT_SYMBOL(__ipipe_disable_irqdesc);
+
+void __ipipe_stall_root_raw(void)
+{
+	/*
+	 * This code is called by the ins{bwl} routines (see
+	 * arch/blackfin/lib/ins.S), which are heavily used by the
+	 * network stack. It masks all interrupts but those handled by
+	 * non-root domains, so that we keep decent network transfer
+	 * rates for Linux without inducing pathological jitter for
+	 * the real-time domain.
+	 */
+	__asm__ __volatile__ ("sti %0;" : : "d"(__ipipe_irq_lvmask));
+
+	__set_bit(IPIPE_STALL_FLAG,
+		  &ipipe_root_cpudom_var(status));
+}
+
+void __ipipe_unstall_root_raw(void)
+{
+	__clear_bit(IPIPE_STALL_FLAG,
+		    &ipipe_root_cpudom_var(status));
+
+	__asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags));
+}
+
+int __ipipe_syscall_root(struct pt_regs *regs)
+{
+	unsigned long flags;
+
+	/* We need to run the IRQ tail hook whenever we don't
+	 * propagate a syscall to higher domains, because we know that
+	 * important operations might be pending there (e.g. Xenomai
+	 * deferred rescheduling). */
+
+	if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) {
+		void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
+		hook();
+		return 0;
+	}
+
+	/*
+	 * This routine either returns:
+	 * 0 -- if the syscall is to be passed to Linux;
+	 * 1 -- if the syscall should not be passed to Linux, and no
+	 * tail work should be performed;
+	 * -1 -- if the syscall should not be passed to Linux but the
+	 * tail work has to be performed (for handling signals etc).
+	 */
+
+	if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) &&
+	    __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) {
+		if (ipipe_root_domain_p && !in_atomic()) {
+			/*
+			 * Sync pending VIRQs before _TIF_NEED_RESCHED
+			 * is tested.
+			 */
+			local_irq_save_hw(flags);
+			if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0)
+				__ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
+			local_irq_restore_hw(flags);
+			return -1;
+		}
+		return 1;
+	}
+
+	return 0;
+}
+
+unsigned long ipipe_critical_enter(void (*syncfn) (void))
+{
+	unsigned long flags;
+
+	local_irq_save_hw(flags);
+
+	return flags;
+}
+
+void ipipe_critical_exit(unsigned long flags)
+{
+	local_irq_restore_hw(flags);
+}
+
+static void __ipipe_no_irqtail(void)
+{
+}
+
+int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
+{
+	info->ncpus = num_online_cpus();
+	info->cpufreq = ipipe_cpu_freq();
+	info->archdep.tmirq = IPIPE_TIMER_IRQ;
+	info->archdep.tmfreq = info->cpufreq;
+
+	return 0;
+}
+
+/*
+ * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
+ * just like if it has been actually received from a hw source. Also
+ * works for virtual interrupts.
+ */
+int ipipe_trigger_irq(unsigned irq)
+{
+	unsigned long flags;
+
+	if (irq >= IPIPE_NR_IRQS ||
+	    (ipipe_virtual_irq_p(irq)
+	     && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
+		return -EINVAL;
+
+	local_irq_save_hw(flags);
+
+	__ipipe_handle_irq(irq, NULL);
+
+	local_irq_restore_hw(flags);
+
+	return 1;
+}
+
+/* Move Linux IRQ to threads. */
+
+static int do_irqd(void *__desc)
+{
+	struct irq_desc *desc = __desc;
+	unsigned irq = desc - irq_desc;
+	int thrprio = desc->thr_prio;
+	int thrmask = 1 << thrprio;
+	int cpu = smp_processor_id();
+	cpumask_t cpumask;
+
+	sigfillset(&current->blocked);
+	current->flags |= PF_NOFREEZE;
+	cpumask = cpumask_of_cpu(cpu);
+	set_cpus_allowed(current, cpumask);
+	ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio);
+
+	while (!kthread_should_stop()) {
+		local_irq_disable();
+		if (!(desc->status & IRQ_SCHEDULED)) {
+			set_current_state(TASK_INTERRUPTIBLE);
+resched:
+			local_irq_enable();
+			schedule();
+			local_irq_disable();
+		}
+		__set_current_state(TASK_RUNNING);
+		/*
+		 * If higher priority interrupt servers are ready to
+		 * run, reschedule immediately. We need this for the
+		 * GPIO demux IRQ handler to unmask the interrupt line
+		 * _last_, after all GPIO IRQs have run.
+		 */
+		if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1)))
+			goto resched;
+		if (--per_cpu(pending_irq_count[thrprio], cpu) == 0)
+			per_cpu(pending_irqthread_mask, cpu) &= ~thrmask;
+		desc->status &= ~IRQ_SCHEDULED;
+		desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs));
+		local_irq_enable();
+	}
+	__set_current_state(TASK_RUNNING);
+	return 0;
+}
+
+static void kick_irqd(unsigned irq, void *cookie)
+{
+	struct irq_desc *desc = irq_desc + irq;
+	int thrprio = desc->thr_prio;
+	int thrmask = 1 << thrprio;
+	int cpu = smp_processor_id();
+
+	if (!(desc->status & IRQ_SCHEDULED)) {
+		desc->status |= IRQ_SCHEDULED;
+		per_cpu(pending_irqthread_mask, cpu) |= thrmask;
+		++per_cpu(pending_irq_count[thrprio], cpu);
+		wake_up_process(desc->thread);
+	}
+}
+
+int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc)
+{
+	if (desc->thread || !create_irq_threads)
+		return 0;
+
+	desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);
+	if (desc->thread == NULL) {
+		printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
+		return -ENOMEM;
+	}
+
+	wake_up_process(desc->thread);
+
+	desc->thr_handler = ipipe_root_domain->irqs[irq].handler;
+	ipipe_root_domain->irqs[irq].handler = &kick_irqd;
+
+	return 0;
+}
+
+void __init ipipe_init_irq_threads(void)
+{
+	unsigned irq;
+	struct irq_desc *desc;
+
+	create_irq_threads = 1;
+
+	for (irq = 0; irq < NR_IRQS; irq++) {
+		desc = irq_desc + irq;
+		if (desc->action != NULL ||
+			(desc->status & IRQ_NOREQUEST) != 0)
+			ipipe_start_irq_thread(irq, desc);
+	}
+}
+
+EXPORT_SYMBOL(show_stack);
+
+#ifdef CONFIG_IPIPE_TRACE_MCOUNT
+void notrace _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 07402f5..ab8209c 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -36,7 +36,7 @@
 #include <linux/irq.h>
 #include <asm/trace.h>
 
-static unsigned long irq_err_count;
+static atomic_t irq_err_count;
 static spinlock_t irq_controller_lock;
 
 /*
@@ -48,10 +48,9 @@
 
 void ack_bad_irq(unsigned int irq)
 {
-	irq_err_count += 1;
+	atomic_inc(&irq_err_count);
 	printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
 }
-EXPORT_SYMBOL(ack_bad_irq);
 
 static struct irq_chip bad_chip = {
 	.ack = dummy_mask_unmask_irq,
@@ -72,7 +71,7 @@
 
 int show_interrupts(struct seq_file *p, void *v)
 {
-	int i = *(loff_t *) v;
+	int i = *(loff_t *) v, j;
 	struct irqaction *action;
 	unsigned long flags;
 
@@ -80,19 +79,20 @@
 		spin_lock_irqsave(&irq_desc[i].lock, flags);
 		action = irq_desc[i].action;
 		if (!action)
-			goto unlock;
-
-		seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
+			goto skip;
+		seq_printf(p, "%3d: ", i);
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+		seq_printf(p, " %8s", irq_desc[i].chip->name);
 		seq_printf(p, "  %s", action->name);
 		for (action = action->next; action; action = action->next)
-			seq_printf(p, ", %s", action->name);
+			seq_printf(p, "  %s", action->name);
 
 		seq_putc(p, '\n');
- unlock:
+ skip:
 		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-	} else if (i == NR_IRQS) {
-		seq_printf(p, "Err: %10lu\n", irq_err_count);
-	}
+	} else if (i == NR_IRQS)
+		seq_printf(p, "Err: %10u\n",  atomic_read(&irq_err_count));
 	return 0;
 }
 
@@ -101,7 +101,6 @@
  * come via this function.  Instead, they should provide their
  * own 'handler'
  */
-
 #ifdef CONFIG_DO_IRQ_L1
 __attribute__((l1_text))
 #endif
@@ -109,8 +108,9 @@
 {
 	struct pt_regs *old_regs;
 	struct irq_desc *desc = irq_desc + irq;
+#ifndef CONFIG_IPIPE
 	unsigned short pending, other_ints;
-
+#endif
 	old_regs = set_irq_regs(regs);
 
 	/*
@@ -121,9 +121,24 @@
 		desc = &bad_irq_desc;
 
 	irq_enter();
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+	/* Debugging check for stack overflow: is there less than STACK_WARN free? */
+	{
+		long sp;
 
+		sp = __get_SP() & (THREAD_SIZE-1);
+
+		if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+			dump_stack();
+			printk(KERN_EMERG "%s: possible stack overflow while handling irq %i "
+					" only %ld bytes free\n",
+				__func__, irq, sp - sizeof(struct thread_info));
+		}
+	}
+#endif
 	generic_handle_irq(irq);
 
+#ifndef CONFIG_IPIPE	/* Useless and bugous over the I-pipe: IRQs are threaded. */
 	/* If we're the only interrupt running (ignoring IRQ15 which is for
 	   syscalls), lower our priority to IRQ14 so that softirqs run at
 	   that level.  If there's another, lower-level interrupt, irq_exit
@@ -133,6 +148,7 @@
 	other_ints = pending & (pending - 1);
 	if (other_ints == 0)
 		lower_to_irq14();
+#endif /* !CONFIG_IPIPE */
 	irq_exit();
 
 	set_irq_regs(old_regs);
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index b795a20..b163f6d 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -34,9 +34,14 @@
 #error change the definition of slavecpulocks
 #endif
 
-#ifdef CONFIG_BFIN_WDT
-# error "Please unselect blackfin watchdog driver before build KGDB."
-#endif
+#define IN_MEM(addr, size, l1_addr, l1_size) \
+({ \
+	unsigned long __addr = (unsigned long)(addr); \
+	(l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
+})
+#define ASYNC_BANK_SIZE \
+	(ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
+	 ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
 
 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
 {
@@ -105,7 +110,7 @@
  * Extracts ebp, esp and eip values understandable by gdb from the values
  * saved by switch_to.
  * thread.esp points to ebp. flags and ebp are pushed in switch_to hence esp
- * prior to entering switch_to is 8 greater then the value that is saved.
+ * prior to entering switch_to is 8 greater than the value that is saved.
  * If switch_to changes, change following code appropriately.
  */
 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
@@ -219,6 +224,7 @@
 		if (bfin_type == breakinfo[breakno].type
 			&& !breakinfo[breakno].occupied) {
 			breakinfo[breakno].occupied = 1;
+			breakinfo[breakno].skip = 0;
 			breakinfo[breakno].enabled = 1;
 			breakinfo[breakno].addr = addr;
 			breakinfo[breakno].dataacc = dataacc;
@@ -363,12 +369,12 @@
 
 void kgdb_roundup_cpus(unsigned long flags)
 {
-	smp_call_function(kgdb_passive_cpu_callback, NULL, 0, 0);
+	smp_call_function(kgdb_passive_cpu_callback, NULL, 0);
 }
 
 void kgdb_roundup_cpu(int cpu, unsigned long flags)
 {
-	smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0, 0);
+	smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0);
 }
 #endif
 
@@ -385,10 +391,8 @@
 			       struct pt_regs *regs)
 {
 	long addr;
-	long breakno;
 	char *ptr;
 	int newPC;
-	int wp_status;
 	int i;
 
 	switch (remcom_in_buffer[0]) {
@@ -426,17 +430,6 @@
 			kgdb_single_step = i + 1;
 		}
 
-		if (vector == VEC_WATCH) {
-			wp_status = bfin_read_WPSTAT();
-			for (breakno = 0; breakno < HW_WATCHPOINT_NUM; breakno++) {
-				if (wp_status & (1 << breakno)) {
-					breakinfo->skip = 1;
-					break;
-				}
-			}
-			bfin_write_WPSTAT(0);
-		}
-
 		bfin_correct_hw_break();
 
 		return 0;
@@ -478,57 +471,32 @@
 		return 0;
 	if (addr >= SYSMMR_BASE)
 		return 0;
-	if (addr >= ASYNC_BANK0_BASE
-	   && addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
+	if (IN_MEM(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE))
 		return 0;
 	if (cpu == 0) {
-		if (addr >= L1_SCRATCH_START
-		   && (addr + size <= L1_SCRATCH_START + L1_SCRATCH_LENGTH))
+		if (IN_MEM(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
 			return 0;
-#if L1_CODE_LENGTH != 0
-		if (addr >= L1_CODE_START
-		   && (addr + size <= L1_CODE_START + L1_CODE_LENGTH))
+		if (IN_MEM(addr, size, L1_CODE_START, L1_CODE_LENGTH))
 			return 0;
-#endif
-#if L1_DATA_A_LENGTH != 0
-		if (addr >= L1_DATA_A_START
-		   && (addr + size <= L1_DATA_A_START + L1_DATA_A_LENGTH))
+		if (IN_MEM(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
 			return 0;
-#endif
-#if L1_DATA_B_LENGTH != 0
-		if (addr >= L1_DATA_B_START
-		   && (addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH))
+		if (IN_MEM(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
 			return 0;
-#endif
 #ifdef CONFIG_SMP
 	} else if (cpu == 1) {
-		if (addr >= COREB_L1_SCRATCH_START
-		   && (addr + size <= COREB_L1_SCRATCH_START
-		   + L1_SCRATCH_LENGTH))
+		if (IN_MEM(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
 			return 0;
-# if L1_CODE_LENGTH != 0
-		if (addr >= COREB_L1_CODE_START
-		   && (addr + size <= COREB_L1_CODE_START + L1_CODE_LENGTH))
+		if (IN_MEM(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
 			return 0;
-# endif
-# if L1_DATA_A_LENGTH != 0
-		if (addr >= COREB_L1_DATA_A_START
-		   && (addr + size <= COREB_L1_DATA_A_START + L1_DATA_A_LENGTH))
+		if (IN_MEM(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
 			return 0;
-# endif
-# if L1_DATA_B_LENGTH != 0
-		if (addr >= COREB_L1_DATA_B_START
-		   && (addr + size <= COREB_L1_DATA_B_START + L1_DATA_B_LENGTH))
+		if (IN_MEM(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
 			return 0;
-# endif
 #endif
 	}
 
-#if L2_LENGTH != 0
-	if (addr >= L2_START
-	   && addr + size <= L2_START + L2_LENGTH)
+	if (IN_MEM(addr, size, L2_START, L2_LENGTH))
 		return 0;
-#endif
 
 	return EFAULT;
 }
@@ -582,12 +550,9 @@
 		default:
 			err = EFAULT;
 		}
-	} else if (cpu == 0 && (unsigned int)mem >= L1_CODE_START &&
-		(unsigned int)(mem + count) <= L1_CODE_START + L1_CODE_LENGTH
+	} else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
 #ifdef CONFIG_SMP
-		|| cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START &&
-		(unsigned int)(mem + count) <=
-		COREB_L1_CODE_START + L1_CODE_LENGTH
+		|| (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
 #endif
 		) {
 		/* access L1 instruction SRAM*/
@@ -658,12 +623,9 @@
 		default:
 			return EFAULT;
 		}
-	} else if (cpu == 0 && (unsigned int)mem >= L1_CODE_START &&
-		(unsigned int)(mem + count) < L1_CODE_START + L1_CODE_LENGTH
+	} else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
 #ifdef CONFIG_SMP
-		|| cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START &&
-		(unsigned int)(mem + count) <=
-		COREB_L1_CODE_START + L1_CODE_LENGTH
+		|| (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
 #endif
 		) {
 		/* access L1 instruction SRAM */
@@ -723,12 +685,9 @@
 		default:
 			return EFAULT;
 		}
-	} else if (cpu == 0 && (unsigned int)mem >= L1_CODE_START &&
-		(unsigned int)(mem + count) <= L1_CODE_START + L1_CODE_LENGTH
+	} else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
 #ifdef CONFIG_SMP
-		|| cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START &&
-		(unsigned int)(mem + count) <=
-		COREB_L1_CODE_START + L1_CODE_LENGTH
+		|| (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
 #endif
 		) {
 		/* access L1 instruction SRAM */
@@ -745,24 +704,16 @@
 
 	if (addr >= 0x1000 && (addr + BREAK_INSTR_SIZE) <= physical_mem_end)
 		return 0;
-	if (addr >= ASYNC_BANK0_BASE
-	   && addr + BREAK_INSTR_SIZE <= ASYNC_BANK3_BASE + ASYNC_BANK3_BASE)
+	if (IN_MEM(addr, BREAK_INSTR_SIZE, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE))
 		return 0;
-#if L1_CODE_LENGTH != 0
-	if (cpu == 0 && addr >= L1_CODE_START
-	   && addr + BREAK_INSTR_SIZE <= L1_CODE_START + L1_CODE_LENGTH)
+	if (cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH))
 		return 0;
-# ifdef CONFIG_SMP
-	else if (cpu == 1 && addr >= COREB_L1_CODE_START
-	   && addr + BREAK_INSTR_SIZE <= COREB_L1_CODE_START + L1_CODE_LENGTH)
-		return 0;
-# endif
-#endif
-#if L2_LENGTH != 0
-	if (addr >= L2_START
-	   && addr + BREAK_INSTR_SIZE <= L2_START + L2_LENGTH)
+#ifdef CONFIG_SMP
+	else if (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH))
 		return 0;
 #endif
+	if (IN_MEM(addr, BREAK_INSTR_SIZE, L2_START, L2_LENGTH))
+		return 0;
 
 	return EFAULT;
 }
@@ -772,13 +723,9 @@
 	int err;
 	int cpu = raw_smp_processor_id();
 
-	if ((cpu == 0 && (unsigned int)addr >= L1_CODE_START
-		&& (unsigned int)(addr + BREAK_INSTR_SIZE)
-		< L1_CODE_START + L1_CODE_LENGTH)
+	if ((cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH))
 #ifdef CONFIG_SMP
-		|| (cpu == 1 && (unsigned int)addr >= COREB_L1_CODE_START
-		&& (unsigned int)(addr + BREAK_INSTR_SIZE)
-		< COREB_L1_CODE_START + L1_CODE_LENGTH)
+		|| (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH))
 #endif
 		) {
 		/* access L1 instruction SRAM */
@@ -804,9 +751,7 @@
 
 int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
 {
-	if ((unsigned int)addr >= L1_CODE_START &&
-		(unsigned int)(addr + BREAK_INSTR_SIZE) <
-			L1_CODE_START + L1_CODE_LENGTH) {
+	if (IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) {
 		/* access L1 instruction SRAM */
 		if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL)
 			return -EFAULT;
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
new file mode 100644
index 0000000..3dba9c1
--- /dev/null
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -0,0 +1,123 @@
+/*
+ * arch/blackfin/kernel/kgdb_test.c - Blackfin kgdb tests
+ *
+ * Copyright 2005-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+
+#include <asm/current.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <asm/blackfin.h>
+
+static char cmdline[256];
+static unsigned long len;
+
+static int num1 __attribute__((l1_data));
+
+void kgdb_l1_test(void) __attribute__((l1_text));
+
+void kgdb_l1_test(void)
+{
+	printk(KERN_ALERT "L1(before change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
+	printk(KERN_ALERT "L1 : code function addr = 0x%p\n", kgdb_l1_test);
+	num1 = num1 + 10 ;
+	printk(KERN_ALERT "L1(after change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
+	return ;
+}
+#if L2_LENGTH
+
+static int num2 __attribute__((l2));
+void kgdb_l2_test(void) __attribute__((l2));
+
+void kgdb_l2_test(void)
+{
+	printk(KERN_ALERT "L2(before change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
+	printk(KERN_ALERT "L2 : code function addr = 0x%p\n", kgdb_l2_test);
+	num2 = num2 + 20 ;
+	printk(KERN_ALERT "L2(after change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
+	return ;
+}
+
+#endif
+
+
+int kgdb_test(char *name, int len, int count, int z)
+{
+	printk(KERN_DEBUG "kgdb name(%d): %s, %d, %d\n", len, name, count, z);
+	count = z;
+	return count;
+}
+
+static int test_proc_output(char *buf)
+{
+	kgdb_test("hello world!", 12, 0x55, 0x10);
+	kgdb_l1_test();
+	#if L2_LENGTH
+	kgdb_l2_test();
+	#endif
+
+	return 0;
+}
+
+static int test_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int len;
+
+	len = test_proc_output(page);
+	if (len <= off+count)
+		*eof = 1;
+	*start = page + off;
+	len -= off;
+	if (len > count)
+		len = count;
+	if (len < 0)
+		len = 0;
+	return len;
+}
+
+static int test_write_proc(struct file *file, const char *buffer,
+				 unsigned long count, void *data)
+{
+	if (count >= 256)
+		len = 255;
+	else
+		len = count;
+
+	memcpy(cmdline, buffer, count);
+	cmdline[len] = 0;
+
+	return len;
+}
+
+static int __init kgdbtest_init(void)
+{
+	struct proc_dir_entry *entry;
+
+	entry = create_proc_entry("kgdbtest", 0, NULL);
+	if (entry == NULL)
+		return -ENOMEM;
+
+	entry->read_proc = test_read_proc;
+	entry->write_proc = test_write_proc;
+	entry->data = NULL;
+
+	return 0;
+}
+
+static void __exit kgdbtest_exit(void)
+{
+	remove_proc_entry("kgdbtest", NULL);
+}
+
+module_init(kgdbtest_init);
+module_exit(kgdbtest_exit);
+MODULE_LICENSE("GPL");
diff --git a/arch/blackfin/kernel/mcount.S b/arch/blackfin/kernel/mcount.S
new file mode 100644
index 0000000..edcfb38
--- /dev/null
+++ b/arch/blackfin/kernel/mcount.S
@@ -0,0 +1,70 @@
+/*
+ * linux/arch/blackfin/mcount.S
+ *
+ * Copyright (C) 2006 Analog Devices Inc.
+ *
+ * 2007/04/12 Save index, length, modify and base registers. --rpm
+ */
+
+#include <linux/linkage.h>
+#include <asm/blackfin.h>
+
+.text
+
+.align 4 	/* just in case */
+
+ENTRY(__mcount)
+	[--sp] = i0;
+	[--sp] = i1;
+	[--sp] = i2;
+	[--sp] = i3;
+	[--sp] = l0;
+	[--sp] = l1;
+	[--sp] = l2;
+	[--sp] = l3;
+	[--sp] = m0;
+	[--sp] = m1;
+	[--sp] = m2;
+	[--sp] = m3;
+	[--sp] = b0;
+	[--sp] = b1;
+	[--sp] = b2;
+	[--sp] = b3;
+	[--sp] = ( r7:0, p5:0 );
+	[--sp] = ASTAT;
+
+	p1.L = _ipipe_trace_enable;
+	p1.H = _ipipe_trace_enable;
+	r7 = [p1];
+	CC = r7 == 0;
+	if CC jump out;
+	link 0x10;
+	r0 = 0x0;
+	[sp + 0xc] = r0; /* v */
+	r0 = 0x0;	/* type: IPIPE_TRACE_FN */
+	r1 = rets;
+	p0 = [fp];	/* p0: Prior FP */
+	r2 = [p0 + 4];	/* r2: Prior RETS */
+	call ___ipipe_trace;
+	unlink;
+out:
+	ASTAT = [sp++];
+	( r7:0, p5:0 ) = [sp++];
+	b3 = [sp++];
+	b2 = [sp++];
+	b1 = [sp++];
+	b0 = [sp++];
+	m3 = [sp++];
+	m2 = [sp++];
+	m1 = [sp++];
+	m0 = [sp++];
+	l3 = [sp++];
+	l2 = [sp++];
+	l1 = [sp++];
+	l0 = [sp++];
+	i3 = [sp++];
+	i2 = [sp++];
+	i1 = [sp++];
+	i0 = [sp++];
+	rts;
+ENDPROC(__mcount)
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
index e1bebc8..1bd7f2d 100644
--- a/arch/blackfin/kernel/module.c
+++ b/arch/blackfin/kernel/module.c
@@ -37,111 +37,6 @@
 #include <asm/dma.h>
 #include <asm/cacheflush.h>
 
-/*
- * handle arithmetic relocations.
- * See binutils/bfd/elf32-bfin.c for more details
- */
-#define RELOC_STACK_SIZE 100
-static uint32_t reloc_stack[RELOC_STACK_SIZE];
-static unsigned int reloc_stack_tos;
-
-#define is_reloc_stack_empty() ((reloc_stack_tos > 0)?0:1)
-
-static void reloc_stack_push(uint32_t value)
-{
-	reloc_stack[reloc_stack_tos++] = value;
-}
-
-static uint32_t reloc_stack_pop(void)
-{
-	return reloc_stack[--reloc_stack_tos];
-}
-
-static uint32_t reloc_stack_operate(unsigned int oper, struct module *mod)
-{
-	uint32_t value;
-
-	switch (oper) {
-	case R_add:
-		value = reloc_stack[reloc_stack_tos - 2] +
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_sub:
-		value = reloc_stack[reloc_stack_tos - 2] -
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_mult:
-		value = reloc_stack[reloc_stack_tos - 2] *
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_div:
-		value = reloc_stack[reloc_stack_tos - 2] /
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_mod:
-		value = reloc_stack[reloc_stack_tos - 2] %
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_lshift:
-		value = reloc_stack[reloc_stack_tos - 2] <<
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_rshift:
-		value = reloc_stack[reloc_stack_tos - 2] >>
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_and:
-		value = reloc_stack[reloc_stack_tos - 2] &
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_or:
-		value = reloc_stack[reloc_stack_tos - 2] |
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_xor:
-		value = reloc_stack[reloc_stack_tos - 2] ^
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_land:
-		value = reloc_stack[reloc_stack_tos - 2] &&
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_lor:
-		value = reloc_stack[reloc_stack_tos - 2] ||
-			reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 2;
-		break;
-	case R_neg:
-		value = -reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos--;
-		break;
-	case R_comp:
-		value = ~reloc_stack[reloc_stack_tos - 1];
-		reloc_stack_tos -= 1;
-		break;
-	default:
-		printk(KERN_WARNING "module %s: unhandled reloction\n",
-				mod->name);
-		return 0;
-	}
-
-	/* now push the new value back on stack */
-	reloc_stack_push(value);
-
-	return value;
-}
-
 void *module_alloc(unsigned long size)
 {
 	if (size == 0)
@@ -334,16 +229,18 @@
 		   undefined symbols have been resolved. */
 		sym = (Elf32_Sym *) sechdrs[symindex].sh_addr
 		    + ELF32_R_SYM(rel[i].r_info);
-		if (is_reloc_stack_empty()) {
-			value = sym->st_value;
-		} else {
-			value = reloc_stack_pop();
-		}
+		value = sym->st_value;
 		value += rel[i].r_addend;
 		pr_debug("location is %x, value is %x type is %d \n",
 			 (unsigned int) location32, value,
 			 ELF32_R_TYPE(rel[i].r_info));
-
+#ifdef CONFIG_SMP
+		if ((unsigned long)location16 >= COREB_L1_DATA_A_START) {
+			printk(KERN_ERR "module %s: cannot relocate in L1: %u (SMP kernel)",
+				       mod->name, ELF32_R_TYPE(rel[i].r_info));
+			return -ENOEXEC;
+		}
+#endif
 		switch (ELF32_R_TYPE(rel[i].r_info)) {
 
 		case R_pcrel24:
@@ -355,6 +252,12 @@
 			location32 = (uint32_t *) location16;
 			value -= (uint32_t) location32;
 			value >>= 1;
+			if ((value & 0xFF000000) != 0 &&
+			    (value & 0xFF000000) != 0xFF000000) {
+				printk(KERN_ERR "module %s: relocation overflow\n",
+				       mod->name);
+				return -ENOEXEC;
+			}
 			pr_debug("value is %x, before %x-%x after %x-%x\n", value,
 			       *location16, *(location16 + 1),
 			       (*location16 & 0xff00) | (value >> 16 & 0x00ff),
@@ -399,28 +302,6 @@
 			pr_debug("before %x after %x\n", *location32, value);
 			*location32 = value;
 			break;
-		case R_push:
-			reloc_stack_push(value);
-			break;
-		case R_const:
-			reloc_stack_push(rel[i].r_addend);
-			break;
-		case R_add:
-		case R_sub:
-		case R_mult:
-		case R_div:
-		case R_mod:
-		case R_lshift:
-		case R_rshift:
-		case R_and:
-		case R_or:
-		case R_xor:
-		case R_land:
-		case R_lor:
-		case R_neg:
-		case R_comp:
-			reloc_stack_operate(ELF32_R_TYPE(rel[i].r_info), mod);
-			break;
 		default:
 			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
 			       mod->name, ELF32_R_TYPE(rel[i].r_info));
@@ -436,6 +317,7 @@
 {
 	unsigned int i, strindex = 0, symindex = 0;
 	char *secstrings;
+	long err = 0;
 
 	secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
@@ -460,8 +342,10 @@
 		    (strcmp(".rela.l1.text", secstrings + sechdrs[i].sh_name) == 0) ||
 		    ((strcmp(".rela.text", secstrings + sechdrs[i].sh_name) == 0) &&
 			(hdr->e_flags & (EF_BFIN_CODE_IN_L1|EF_BFIN_CODE_IN_L2))))) {
-			apply_relocate_add((Elf_Shdr *) sechdrs, strtab,
+			err = apply_relocate_add((Elf_Shdr *) sechdrs, strtab,
 					   symindex, i, mod);
+			if (err < 0)
+				return -ENOEXEC;
 		}
 	}
 	return 0;
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 0c3ea11..33e2e89 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -39,6 +39,7 @@
 
 #include <asm/blackfin.h>
 #include <asm/fixed_code.h>
+#include <asm/mem_map.h>
 
 asmlinkage void ret_from_fork(void);
 
@@ -81,11 +82,14 @@
  */
 static void default_idle(void)
 {
-	local_irq_disable();
+#ifdef CONFIG_IPIPE
+	ipipe_suspend_domain();
+#endif
+	local_irq_disable_hw();
 	if (!need_resched())
 		idle_with_irq_disabled();
 
-	local_irq_enable();
+	local_irq_enable_hw();
 }
 
 /*
@@ -154,6 +158,7 @@
 	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
 		       NULL);
 }
+EXPORT_SYMBOL(kernel_thread);
 
 void flush_thread(void)
 {
@@ -170,6 +175,13 @@
 	unsigned long clone_flags;
 	unsigned long newsp;
 
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	if (current->rt.nr_cpus_allowed == num_possible_cpus()) {
+		current->cpus_allowed = cpumask_of_cpu(smp_processor_id());
+		current->rt.nr_cpus_allowed = 1;
+	}
+#endif
+
 	/* syscall2 puts clone_flags in r0 and usp in r1 */
 	clone_flags = regs->r0;
 	newsp = regs->r1;
@@ -337,22 +349,22 @@
 	if (addr >= (unsigned long)__init_begin &&
 	    addr + size <= (unsigned long)__init_end)
 		return 1;
-	if (addr >= L1_SCRATCH_START
-	    && addr + size <= L1_SCRATCH_START + L1_SCRATCH_LENGTH)
+	if (addr >= get_l1_scratch_start()
+	    && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
 		return 1;
 #if L1_CODE_LENGTH != 0
-	if (addr >= L1_CODE_START + (_etext_l1 - _stext_l1)
-	    && addr + size <= L1_CODE_START + L1_CODE_LENGTH)
+	if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1)
+	    && addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
 		return 1;
 #endif
 #if L1_DATA_A_LENGTH != 0
-	if (addr >= L1_DATA_A_START + (_ebss_l1 - _sdata_l1)
-	    && addr + size <= L1_DATA_A_START + L1_DATA_A_LENGTH)
+	if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
+	    && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
 		return 1;
 #endif
 #if L1_DATA_B_LENGTH != 0
-	if (addr >= L1_DATA_B_START + (_ebss_b_l1 - _sdata_b_l1)
-	    && addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH)
+	if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
+	    && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
 		return 1;
 #endif
 #if L2_LENGTH != 0
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 140bf00..594e325 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -45,6 +45,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/dma.h>
 #include <asm/fixed_code.h>
+#include <asm/mem_map.h>
 
 #define TEXT_OFFSET 0
 /*
@@ -80,10 +81,12 @@
 /*
  * Get all user integer registers.
  */
-static inline int ptrace_getregs(struct task_struct *tsk, void __user * uregs)
+static inline int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
 {
-	struct pt_regs *regs = get_user_regs(tsk);
-	return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
+	struct pt_regs regs;
+	memcpy(&regs, get_user_regs(tsk), sizeof(regs));
+	regs.usp = tsk->thread.usp;
+	return copy_to_user(uregs, &regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
 }
 
 /* Mapping from PT_xxx to the stack offset at which the register is
@@ -157,15 +160,15 @@
 static inline int is_user_addr_valid(struct task_struct *child,
 				     unsigned long start, unsigned long len)
 {
-	struct vm_list_struct *vml;
+	struct vm_area_struct *vma;
 	struct sram_list_struct *sraml;
 
 	/* overflow */
 	if (start + len < start)
 		return -EIO;
 
-	for (vml = child->mm->context.vmlist; vml; vml = vml->next)
-		if (start >= vml->vma->vm_start && start + len < vml->vma->vm_end)
+	vma = find_vma(child->mm, start);
+	if (vma && start >= vma->vm_start && start + len <= vma->vm_end)
 			return 0;
 
 	for (sraml = child->mm->context.sram_list; sraml; sraml = sraml->next)
@@ -220,8 +223,8 @@
 				break;
 			pr_debug("ptrace: user address is valid\n");
 
-			if (L1_CODE_LENGTH != 0 && addr >= L1_CODE_START
-			    && addr + sizeof(tmp) <= L1_CODE_START + L1_CODE_LENGTH) {
+			if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
+			    && addr + sizeof(tmp) <= get_l1_code_start() + L1_CODE_LENGTH) {
 				safe_dma_memcpy (&tmp, (const void *)(addr), sizeof(tmp));
 				copied = sizeof(tmp);
 
@@ -300,8 +303,8 @@
 				break;
 			pr_debug("ptrace: user address is valid\n");
 
-			if (L1_CODE_LENGTH != 0 && addr >= L1_CODE_START
-			    && addr + sizeof(data) <= L1_CODE_START + L1_CODE_LENGTH) {
+			if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
+			    && addr + sizeof(data) <= get_l1_code_start() + L1_CODE_LENGTH) {
 				safe_dma_memcpy ((void *)(addr), &data, sizeof(data));
 				copied = sizeof(data);
 
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
index ae97ca4..eeee8cb 100644
--- a/arch/blackfin/kernel/reboot.c
+++ b/arch/blackfin/kernel/reboot.c
@@ -21,7 +21,7 @@
  * the core reset.
  */
 __attribute__((l1_text))
-static void bfin_reset(void)
+static void _bfin_reset(void)
 {
 	/* Wait for completion of "system" events such as cache line
 	 * line fills so that we avoid infinite stalls later on as
@@ -66,6 +66,18 @@
 	}
 }
 
+static void bfin_reset(void)
+{
+	if (ANOMALY_05000353 || ANOMALY_05000386)
+		_bfin_reset();
+	else
+		/* the bootrom checks to see how it was reset and will
+		 * automatically perform a software reset for us when
+		 * it starts executing boot
+		 */
+		asm("raise 1;");
+}
+
 __attribute__((weak))
 void native_machine_restart(char *cmd)
 {
@@ -75,14 +87,10 @@
 {
 	native_machine_restart(cmd);
 	local_irq_disable();
-	if (ANOMALY_05000353 || ANOMALY_05000386)
-		bfin_reset();
+	if (smp_processor_id())
+		smp_call_function((void *)bfin_reset, 0, 1);
 	else
-		/* the bootrom checks to see how it was reset and will
-		 * automatically perform a software reset for us when
-		 * it starts executing boot
-		 */
-		asm("raise 1;");
+		bfin_reset();
 }
 
 __attribute__((weak))
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 71a9a8c..b2a8113 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -13,6 +13,7 @@
 #include <linux/bootmem.h>
 #include <linux/seq_file.h>
 #include <linux/cpu.h>
+#include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/tty.h>
 #include <linux/pfn.h>
@@ -26,11 +27,10 @@
 #include <asm/blackfin.h>
 #include <asm/cplbinit.h>
 #include <asm/div64.h>
+#include <asm/cpu.h>
 #include <asm/fixed_code.h>
 #include <asm/early_printk.h>
 
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
-
 u16 _bfin_swrst;
 EXPORT_SYMBOL(_bfin_swrst);
 
@@ -79,29 +79,70 @@
 static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
 static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
 
-void __init bfin_cache_init(void)
-{
+DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
+
+static int early_init_clkin_hz(char *buf);
+
 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
-	generate_cplb_tables();
+void __init generate_cplb_tables(void)
+{
+	unsigned int cpu;
+
+	generate_cplb_tables_all();
+	/* Generate per-CPU I&D CPLB tables */
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
+		generate_cplb_tables_cpu(cpu);
+}
 #endif
 
+void __cpuinit bfin_setup_caches(unsigned int cpu)
+{
 #ifdef CONFIG_BFIN_ICACHE
-	bfin_icache_init();
-	printk(KERN_INFO "Instruction Cache Enabled\n");
+	bfin_icache_init(icplb_tbl[cpu]);
 #endif
 
 #ifdef CONFIG_BFIN_DCACHE
-	bfin_dcache_init();
-	printk(KERN_INFO "Data Cache Enabled"
+	bfin_dcache_init(dcplb_tbl[cpu]);
+#endif
+
+	/*
+	 * In cache coherence emulation mode, we need to have the
+	 * D-cache enabled before running any atomic operation which
+	 * might invove cache invalidation (i.e. spinlock, rwlock).
+	 * So printk's are deferred until then.
+	 */
+#ifdef CONFIG_BFIN_ICACHE
+	printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
+#endif
+#ifdef CONFIG_BFIN_DCACHE
+	printk(KERN_INFO "Data Cache Enabled for CPU%u"
 # if defined CONFIG_BFIN_WB
 		" (write-back)"
 # elif defined CONFIG_BFIN_WT
 		" (write-through)"
 # endif
-		"\n");
+		"\n", cpu);
 #endif
 }
 
+void __cpuinit bfin_setup_cpudata(unsigned int cpu)
+{
+	struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
+
+	cpudata->idle = current;
+	cpudata->loops_per_jiffy = loops_per_jiffy;
+	cpudata->imemctl = bfin_read_IMEM_CONTROL();
+	cpudata->dmemctl = bfin_read_DMEM_CONTROL();
+}
+
+void __init bfin_cache_init(void)
+{
+#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
+	generate_cplb_tables();
+#endif
+	bfin_setup_caches(0);
+}
+
 void __init bfin_relocate_l1_mem(void)
 {
 	unsigned long l1_code_length;
@@ -109,6 +150,8 @@
 	unsigned long l1_data_b_length;
 	unsigned long l2_length;
 
+	blackfin_dma_early_init();
+
 	l1_code_length = _etext_l1 - _stext_l1;
 	if (l1_code_length > L1_CODE_LENGTH)
 		panic("L1 Instruction SRAM Overflow\n");
@@ -230,7 +273,7 @@
 	/* record all known change-points (starting and ending addresses),
 	   omitting those that are for empty memory regions */
 	chgidx = 0;
-	for (i = 0; i < old_nr; i++)	{
+	for (i = 0; i < old_nr; i++) {
 		if (map[i].size != 0) {
 			change_point[chgidx]->addr = map[i].addr;
 			change_point[chgidx++]->pentry = &map[i];
@@ -238,13 +281,13 @@
 			change_point[chgidx++]->pentry = &map[i];
 		}
 	}
-	chg_nr = chgidx;    	/* true number of change-points */
+	chg_nr = chgidx;	/* true number of change-points */
 
 	/* sort change-point list by memory addresses (low -> high) */
 	still_changing = 1;
-	while (still_changing)	{
+	while (still_changing) {
 		still_changing = 0;
-		for (i = 1; i < chg_nr; i++)  {
+		for (i = 1; i < chg_nr; i++) {
 			/* if <current_addr> > <last_addr>, swap */
 			/* or, if current=<start_addr> & last=<end_addr>, swap */
 			if ((change_point[i]->addr < change_point[i-1]->addr) ||
@@ -261,10 +304,10 @@
 	}
 
 	/* create a new memmap, removing overlaps */
-	overlap_entries = 0;	 /* number of entries in the overlap table */
-	new_entry = 0;	 /* index for creating new memmap entries */
-	last_type = 0;		 /* start with undefined memory type */
-	last_addr = 0;		 /* start with 0 as last starting address */
+	overlap_entries = 0;	/* number of entries in the overlap table */
+	new_entry = 0;		/* index for creating new memmap entries */
+	last_type = 0;		/* start with undefined memory type */
+	last_addr = 0;		/* start with 0 as last starting address */
 	/* loop through change-points, determining affect on the new memmap */
 	for (chgidx = 0; chgidx < chg_nr; chgidx++) {
 		/* keep track of all overlapping memmap entries */
@@ -286,14 +329,14 @@
 			if (overlap_list[i]->type > current_type)
 				current_type = overlap_list[i]->type;
 		/* continue building up new memmap based on this information */
-		if (current_type != last_type)	{
+		if (current_type != last_type) {
 			if (last_type != 0) {
 				new_map[new_entry].size =
 					change_point[chgidx]->addr - last_addr;
 				/* move forward only if the new size was non-zero */
 				if (new_map[new_entry].size != 0)
 					if (++new_entry >= BFIN_MEMMAP_MAX)
-						break; 	/* no more space left for new entries */
+						break;	/* no more space left for new entries */
 			}
 			if (current_type != 0) {
 				new_map[new_entry].addr = change_point[chgidx]->addr;
@@ -303,9 +346,9 @@
 			last_type = current_type;
 		}
 	}
-	new_nr = new_entry;   /* retain count for new entries */
+	new_nr = new_entry;	/* retain count for new entries */
 
-	/* copy new  mapping into original location */
+	/* copy new mapping into original location */
 	memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
 	*pnr_map = new_nr;
 
@@ -361,7 +404,6 @@
  *  - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
  *       @ from <start> to <start>+<mem>, type RAM
  *       $ from <start> to <start>+<mem>, type RESERVED
- *
  */
 static __init void parse_cmdline_early(char *cmdline_p)
 {
@@ -383,14 +425,15 @@
 					if (*to != ' ') {
 						if (*to == '$'
 						    || *(to + 1) == '$')
-							reserved_mem_dcache_on =
-							    1;
+							reserved_mem_dcache_on = 1;
 						if (*to == '#'
 						    || *(to + 1) == '#')
-							reserved_mem_icache_on =
-							    1;
+							reserved_mem_icache_on = 1;
 					}
 				}
+			} else if (!memcmp(to, "clkin_hz=", 9)) {
+				to += 9;
+				early_init_clkin_hz(to);
 			} else if (!memcmp(to, "earlyprintk=", 12)) {
 				to += 12;
 				setup_early_printk(to);
@@ -417,9 +460,8 @@
  *	[_ramend - DMA_UNCACHED_REGION,
  *		_ramend]:			uncached DMA region
  *  [_ramend, physical_mem_end]:	memory not managed by kernel
- *
  */
-static __init void  memory_setup(void)
+static __init void memory_setup(void)
 {
 #ifdef CONFIG_MTD_UCLINUX
 	unsigned long mtd_phys = 0;
@@ -436,7 +478,7 @@
 	memory_end = _ramend - DMA_UNCACHED_REGION;
 
 #ifdef CONFIG_MPU
-	/* Round up to multiple of 4MB.  */
+	/* Round up to multiple of 4MB */
 	memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
 #else
 	memory_start = PAGE_ALIGN(_ramstart);
@@ -616,7 +658,7 @@
 	end_pfn = memory_end >> PAGE_SHIFT;
 
 	/*
-	 * give all the memory to the bootmap allocator,  tell it to put the
+	 * give all the memory to the bootmap allocator, tell it to put the
 	 * boot mem_map at the start of memory.
 	 */
 	bootmap_size = init_bootmem_node(NODE_DATA(0),
@@ -791,7 +833,11 @@
 	bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
 #endif
 
+#ifdef CONFIG_SMP
+	if (_bfin_swrst & SWRST_DBL_FAULT_A) {
+#else
 	if (_bfin_swrst & RESET_DOUBLE) {
+#endif
 		printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
 #ifdef CONFIG_DEBUG_DOUBLEFAULT
 		/* We assume the crashing kernel, and the current symbol table match */
@@ -823,9 +869,12 @@
 			if (bfin_compiled_revid() == -1)
 				printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
 				       bfin_revid());
-			else if (bfin_compiled_revid() != 0xffff)
+			else if (bfin_compiled_revid() != 0xffff) {
 				printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
 				       bfin_compiled_revid(), bfin_revid());
+				if (bfin_compiled_revid() > bfin_revid())
+					panic("Error: you are missing anomaly workarounds for this rev\n");
+			}
 		}
 		if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
 			printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
@@ -835,7 +884,7 @@
 	printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
 
 	printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
-	       cclk / 1000000,  sclk / 1000000);
+	       cclk / 1000000, sclk / 1000000);
 
 	if (ANOMALY_05000273 && (cclk >> 1) <= sclk)
 		printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n");
@@ -867,18 +916,21 @@
 	BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
 		!= SAFE_USER_INSTRUCTION - FIXED_CODE_START);
 
+#ifdef CONFIG_SMP
+	platform_init_cpus();
+#endif
 	init_exception_vectors();
-	bfin_cache_init();
+	bfin_cache_init();	/* Initialize caches for the boot CPU */
 }
 
 static int __init topology_init(void)
 {
-	int cpu;
+	unsigned int cpu;
+	/* Record CPU-private information for the boot processor. */
+	bfin_setup_cpudata(0);
 
 	for_each_possible_cpu(cpu) {
-		struct cpu *c = &per_cpu(cpu_devices, cpu);
-
-		register_cpu(c, cpu);
+		register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
 	}
 
 	return 0;
@@ -886,36 +938,54 @@
 
 subsys_initcall(topology_init);
 
+/* Get the input clock frequency */
+static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
+static u_long get_clkin_hz(void)
+{
+	return cached_clkin_hz;
+}
+static int __init early_init_clkin_hz(char *buf)
+{
+	cached_clkin_hz = simple_strtoul(buf, NULL, 0);
+#ifdef BFIN_KERNEL_CLOCK
+	if (cached_clkin_hz != CONFIG_CLKIN_HZ)
+		panic("cannot change clkin_hz when reprogramming clocks");
+#endif
+	return 1;
+}
+early_param("clkin_hz=", early_init_clkin_hz);
+
 /* Get the voltage input multiplier */
-static u_long cached_vco_pll_ctl, cached_vco;
 static u_long get_vco(void)
 {
-	u_long msel;
+	static u_long cached_vco;
+	u_long msel, pll_ctl;
 
-	u_long pll_ctl = bfin_read_PLL_CTL();
-	if (pll_ctl == cached_vco_pll_ctl)
+	/* The assumption here is that VCO never changes at runtime.
+	 * If, someday, we support that, then we'll have to change this.
+	 */
+	if (cached_vco)
 		return cached_vco;
-	else
-		cached_vco_pll_ctl = pll_ctl;
 
+	pll_ctl = bfin_read_PLL_CTL();
 	msel = (pll_ctl >> 9) & 0x3F;
 	if (0 == msel)
 		msel = 64;
 
-	cached_vco = CONFIG_CLKIN_HZ;
+	cached_vco = get_clkin_hz();
 	cached_vco >>= (1 & pll_ctl);	/* DF bit */
 	cached_vco *= msel;
 	return cached_vco;
 }
 
 /* Get the Core clock */
-static u_long cached_cclk_pll_div, cached_cclk;
 u_long get_cclk(void)
 {
+	static u_long cached_cclk_pll_div, cached_cclk;
 	u_long csel, ssel;
 
 	if (bfin_read_PLL_STAT() & 0x1)
-		return CONFIG_CLKIN_HZ;
+		return get_clkin_hz();
 
 	ssel = bfin_read_PLL_DIV();
 	if (ssel == cached_cclk_pll_div)
@@ -934,21 +1004,21 @@
 EXPORT_SYMBOL(get_cclk);
 
 /* Get the System clock */
-static u_long cached_sclk_pll_div, cached_sclk;
 u_long get_sclk(void)
 {
+	static u_long cached_sclk;
 	u_long ssel;
 
-	if (bfin_read_PLL_STAT() & 0x1)
-		return CONFIG_CLKIN_HZ;
-
-	ssel = bfin_read_PLL_DIV();
-	if (ssel == cached_sclk_pll_div)
+	/* The assumption here is that SCLK never changes at runtime.
+	 * If, someday, we support that, then we'll have to change this.
+	 */
+	if (cached_sclk)
 		return cached_sclk;
-	else
-		cached_sclk_pll_div = ssel;
 
-	ssel &= 0xf;
+	if (bfin_read_PLL_STAT() & 0x1)
+		return get_clkin_hz();
+
+	ssel = bfin_read_PLL_DIV() & 0xf;
 	if (0 == ssel) {
 		printk(KERN_WARNING "Invalid System Clock\n");
 		ssel = 1;
@@ -982,17 +1052,18 @@
 {
 	char *cpu, *mmu, *fpu, *vendor, *cache;
 	uint32_t revid;
-
-	u_long cclk = 0, sclk = 0;
+	int cpu_num = *(unsigned int *)v;
+	u_long sclk, cclk;
 	u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
+	struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
 
 	cpu = CPU;
 	mmu = "none";
 	fpu = "none";
 	revid = bfin_revid();
 
-	cclk = get_cclk();
 	sclk = get_sclk();
+	cclk = get_cclk();
 
 	switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
 	case 0xca:
@@ -1003,10 +1074,7 @@
 		break;
 	}
 
-	seq_printf(m, "processor\t: %d\n"
-		"vendor_id\t: %s\n",
-		*(unsigned int *)v,
-		vendor);
+	seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
 
 	if (CPUID == bfin_cpuid())
 		seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
@@ -1029,12 +1097,12 @@
 		sclk/1000000, sclk%1000000);
 	seq_printf(m, "bogomips\t: %lu.%02lu\n"
 		"Calibration\t: %lu loops\n",
-		(loops_per_jiffy * HZ) / 500000,
-		((loops_per_jiffy * HZ) / 5000) % 100,
-		(loops_per_jiffy * HZ));
+		(cpudata->loops_per_jiffy * HZ) / 500000,
+		((cpudata->loops_per_jiffy * HZ) / 5000) % 100,
+		(cpudata->loops_per_jiffy * HZ));
 
 	/* Check Cache configutation */
-	switch (bfin_read_DMEM_CONTROL() & (1 << DMC0_P | 1 << DMC1_P)) {
+	switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
 	case ACACHE_BSRAM:
 		cache = "dbank-A/B\t: cache/sram";
 		dcache_size = 16;
@@ -1058,10 +1126,10 @@
 	}
 
 	/* Is it turned on? */
-	if ((bfin_read_DMEM_CONTROL() & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
+	if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
 		dcache_size = 0;
 
-	if ((bfin_read_IMEM_CONTROL() & (IMC | ENICPLB)) != (IMC | ENICPLB))
+	if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
 		icache_size = 0;
 
 	seq_printf(m, "cache size\t: %d KB(L1 icache) "
@@ -1086,8 +1154,11 @@
 		   "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
 		   dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
 		   BFIN_DLINES);
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count);
+#endif
 #ifdef CONFIG_BFIN_ICACHE_LOCK
-	switch ((bfin_read_IMEM_CONTROL() >> 3) & WAYALL_L) {
+	switch ((cpudata->imemctl >> 3) & WAYALL_L) {
 	case WAY0_L:
 		seq_printf(m, "Way0 Locked-Down\n");
 		break;
@@ -1137,6 +1208,12 @@
 		seq_printf(m, "No Ways are locked\n");
 	}
 #endif
+
+	if (cpu_num != num_possible_cpus() - 1)
+		return 0;
+
+	if (L2_LENGTH)
+		seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
 	seq_printf(m, "board name\t: %s\n", bfin_board_name);
 	seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
 		 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
@@ -1144,6 +1221,7 @@
 		((int)memory_end - (int)_stext) >> 10,
 		_stext,
 		(void *)memory_end);
+	seq_printf(m, "\n");
 
 	return 0;
 }
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index eb23523..172b4c58 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -1,32 +1,11 @@
 /*
- * File:         arch/blackfin/kernel/time.c
- * Based on:     none - original work
- * Author:
+ * arch/blackfin/kernel/time.c
  *
- * Created:
- * Description:  This file contains the bfin-specific time handling details.
- *               Most of the stuff is located in the machine specific files.
- *		 FIXME: (This file is subject for removal)
+ * This file contains the Blackfin-specific time handling details.
+ * Most of the stuff is located in the machine specific files.
  *
- * Modified:
- *               Copyright 2004-2008 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ * Copyright 2004-2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
  */
 
 #include <linux/module.h>
@@ -34,23 +13,43 @@
 #include <linux/interrupt.h>
 #include <linux/time.h>
 #include <linux/irq.h>
+#include <linux/delay.h>
 
 #include <asm/blackfin.h>
 #include <asm/time.h>
+#include <asm/gptimers.h>
 
 /* This is an NTP setting */
 #define	TICK_SIZE (tick_nsec / 1000)
 
-static void time_sched_init(irq_handler_t timer_routine);
-static unsigned long gettimeoffset(void);
-
 static struct irqaction bfin_timer_irq = {
-	.name = "BFIN Timer Tick",
+	.name = "Blackfin Timer Tick",
+#ifdef CONFIG_IRQ_PER_CPU
+	.flags = IRQF_DISABLED | IRQF_PERCPU,
+#else
 	.flags = IRQF_DISABLED
+#endif
 };
 
-static void
-time_sched_init(irq_handler_t timer_routine)
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
+void __init setup_system_timer0(void)
+{
+	/* Power down the core timer, just to play safe. */
+	bfin_write_TCNTL(0);
+
+	disable_gptimers(TIMER0bit);
+	set_gptimer_status(0, TIMER_STATUS_TRUN0);
+	while (get_gptimer_status(0) & TIMER_STATUS_TRUN0)
+		udelay(10);
+
+	set_gptimer_config(0, 0x59); /* IRQ enable, periodic, PWM_OUT, SCLKed, OUT PAD disabled */
+	set_gptimer_period(TIMER0_id, get_sclk() / HZ);
+	set_gptimer_pwidth(TIMER0_id, 1);
+	SSYNC();
+	enable_gptimers(TIMER0bit);
+}
+#else
+void __init setup_core_timer(void)
 {
 	u32 tcount;
 
@@ -58,10 +57,8 @@
 	bfin_write_TCNTL(1);
 	CSYNC();
 
-	/*
-	 * the TSCALE prescaler counter.
-	 */
-	bfin_write_TSCALE((TIME_SCALE - 1));
+	/* the TSCALE prescaler counter */
+	bfin_write_TSCALE(TIME_SCALE - 1);
 
 	tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
 	bfin_write_TPERIOD(tcount);
@@ -71,35 +68,52 @@
 	CSYNC();
 
 	bfin_write_TCNTL(7);
+}
+#endif
 
-	bfin_timer_irq.handler = (irq_handler_t)timer_routine;
-	/* call setup_irq instead of request_irq because request_irq calls
-	 * kmalloc which has not been initialized yet
-	 */
+static void __init
+time_sched_init(irqreturn_t(*timer_routine) (int, void *))
+{
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
+	setup_system_timer0();
+	bfin_timer_irq.handler = timer_routine;
+	setup_irq(IRQ_TIMER0, &bfin_timer_irq);
+#else
+	setup_core_timer();
+	bfin_timer_irq.handler = timer_routine;
 	setup_irq(IRQ_CORETMR, &bfin_timer_irq);
+#endif
 }
 
 /*
  * Should return useconds since last timer tick
  */
+#ifndef CONFIG_GENERIC_TIME
 static unsigned long gettimeoffset(void)
 {
 	unsigned long offset;
 	unsigned long clocks_per_jiffy;
 
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
+	clocks_per_jiffy = bfin_read_TIMER0_PERIOD();
+	offset = bfin_read_TIMER0_COUNTER() / \
+		(((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
+
+	if ((get_gptimer_status(0) & TIMER_STATUS_TIMIL0) && offset < (100000 / HZ / 2))
+		offset += (USEC_PER_SEC / HZ);
+#else
 	clocks_per_jiffy = bfin_read_TPERIOD();
-	offset =
-	    (clocks_per_jiffy -
-	     bfin_read_TCOUNT()) / (((clocks_per_jiffy + 1) * HZ) /
-				    USEC_PER_SEC);
+	offset = (clocks_per_jiffy - bfin_read_TCOUNT()) / \
+		(((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
 
 	/* Check if we just wrapped the counters and maybe missed a tick */
 	if ((bfin_read_ILAT() & (1 << IRQ_CORETMR))
-	    && (offset < (100000 / HZ / 2)))
+		&& (offset < (100000 / HZ / 2)))
 		offset += (USEC_PER_SEC / HZ);
-
+#endif
 	return offset;
 }
+#endif
 
 static inline int set_rtc_mmss(unsigned long nowtime)
 {
@@ -111,43 +125,49 @@
  * as well as call the "do_timer()" routine every clocktick
  */
 #ifdef CONFIG_CORE_TIMER_IRQ_L1
-irqreturn_t timer_interrupt(int irq, void *dummy)__attribute__((l1_text));
+__attribute__((l1_text))
 #endif
-
 irqreturn_t timer_interrupt(int irq, void *dummy)
 {
 	/* last time the cmos clock got updated */
 	static long last_rtc_update;
 
 	write_seqlock(&xtime_lock);
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
+/* FIXME: Here TIMIL0 is not set when IPIPE enabled, why? */
+	if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) {
+#endif
+		do_timer(1);
 
-	do_timer(1);
-
-	profile_tick(CPU_PROFILING);
-
-	/*
-	 * If we have an externally synchronized Linux clock, then update
-	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
-	 * called as close as possible to 500 ms before the new second starts.
-	 */
-
-	if (ntp_synced() &&
-	    xtime.tv_sec > last_rtc_update + 660 &&
-	    (xtime.tv_nsec / NSEC_PER_USEC) >=
-	    500000 - ((unsigned)TICK_SIZE) / 2
-	    && (xtime.tv_nsec / NSEC_PER_USEC) <=
-	    500000 + ((unsigned)TICK_SIZE) / 2) {
-		if (set_rtc_mmss(xtime.tv_sec) == 0)
-			last_rtc_update = xtime.tv_sec;
-		else
-			/* Do it again in 60s. */
-			last_rtc_update = xtime.tv_sec - 600;
+		/*
+		 * If we have an externally synchronized Linux clock, then update
+		 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+		 * called as close as possible to 500 ms before the new second starts.
+		 */
+		if (ntp_synced() &&
+		    xtime.tv_sec > last_rtc_update + 660 &&
+		    (xtime.tv_nsec / NSEC_PER_USEC) >=
+		    500000 - ((unsigned)TICK_SIZE) / 2
+		    && (xtime.tv_nsec / NSEC_PER_USEC) <=
+		    500000 + ((unsigned)TICK_SIZE) / 2) {
+			if (set_rtc_mmss(xtime.tv_sec) == 0)
+				last_rtc_update = xtime.tv_sec;
+			else
+				/* Do it again in 60s. */
+				last_rtc_update = xtime.tv_sec - 600;
+		}
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
+		set_gptimer_status(0, TIMER_STATUS_TIMIL0);
 	}
+#endif
 	write_sequnlock(&xtime_lock);
 
-#ifndef CONFIG_SMP
+#ifdef CONFIG_IPIPE
+	update_root_process_times(get_irq_regs());
+#else
 	update_process_times(user_mode(get_irq_regs()));
 #endif
+	profile_tick(CPU_PROFILING);
 
 	return IRQ_HANDLED;
 }
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index bef025b..5b0667d 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/fs.h>
+#include <linux/rbtree.h>
 #include <asm/traps.h>
 #include <asm/cacheflush.h>
 #include <asm/cplb.h>
@@ -75,16 +76,6 @@
 	CSYNC();
 }
 
-/*
- * Used to save the RETX, SEQSTAT, I/D CPLB FAULT ADDR
- * values across the transition from exception to IRQ5.
- * We put these in L1, so they are going to be in a valid
- * location during exception context
- */
-__attribute__((l1_data))
-unsigned long saved_retx, saved_seqstat,
-	saved_icplb_fault_addr, saved_dcplb_fault_addr;
-
 static void decode_address(char *buf, unsigned long address)
 {
 #ifdef CONFIG_DEBUG_VERBOSE
@@ -93,6 +84,7 @@
 	struct mm_struct *mm;
 	unsigned long flags, offset;
 	unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
+	struct rb_node *n;
 
 #ifdef CONFIG_KALLSYMS
 	unsigned long symsize;
@@ -138,9 +130,10 @@
 		if (!mm)
 			continue;
 
-		vml = mm->context.vmlist;
-		while (vml) {
-			struct vm_area_struct *vma = vml->vma;
+		for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
+			struct vm_area_struct *vma;
+
+			vma = rb_entry(n, struct vm_area_struct, vm_rb);
 
 			if (address >= vma->vm_start && address < vma->vm_end) {
 				char _tmpbuf[256];
@@ -186,8 +179,6 @@
 
 				goto done;
 			}
-
-			vml = vml->next;
 		}
 		if (!in_atomic)
 			mmput(mm);
@@ -211,18 +202,18 @@
 	printk(KERN_EMERG "\n" KERN_EMERG "Double Fault\n");
 #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
 	if (((long)fp->seqstat &  SEQSTAT_EXCAUSE) == VEC_UNCOV) {
+		unsigned int cpu = smp_processor_id();
 		char buf[150];
-		decode_address(buf, saved_retx);
+		decode_address(buf, cpu_pda[cpu].retx);
 		printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
-			(int)saved_seqstat & SEQSTAT_EXCAUSE, buf);
-		decode_address(buf, saved_dcplb_fault_addr);
+			(unsigned int)cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE, buf);
+		decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
 		printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %s\n", buf);
-		decode_address(buf, saved_icplb_fault_addr);
+		decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
 		printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %s\n", buf);
 
 		decode_address(buf, fp->retx);
-		printk(KERN_NOTICE "The instruction at %s caused a double exception\n",
-			buf);
+		printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf);
 	} else
 #endif
 	{
@@ -240,6 +231,9 @@
 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
 	int j;
 #endif
+#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
+	unsigned int cpu = smp_processor_id();
+#endif
 	int sig = 0;
 	siginfo_t info;
 	unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
@@ -417,7 +411,7 @@
 		info.si_code = ILL_CPLB_MULHIT;
 		sig = SIGSEGV;
 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
-		if (saved_dcplb_fault_addr < FIXED_CODE_START)
+		if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START)
 			verbose_printk(KERN_NOTICE "NULL pointer access\n");
 		else
 #endif
@@ -471,7 +465,7 @@
 		info.si_code = ILL_CPLB_MULHIT;
 		sig = SIGSEGV;
 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
-		if (saved_icplb_fault_addr < FIXED_CODE_START)
+		if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START)
 			verbose_printk(KERN_NOTICE "Jump to NULL address\n");
 		else
 #endif
@@ -584,10 +578,15 @@
 		}
 	}
 
-	info.si_signo = sig;
-	info.si_errno = 0;
-	info.si_addr = (void __user *)fp->pc;
-	force_sig_info(sig, &info, current);
+#ifdef CONFIG_IPIPE
+	if (!ipipe_trap_notify(fp->seqstat & 0x3f, fp))
+#endif
+	{
+		info.si_signo = sig;
+		info.si_errno = 0;
+		info.si_addr = (void __user *)fp->pc;
+		force_sig_info(sig, &info, current);
+	}
 
 	trace_buffer_restore(j);
 	return;
@@ -656,13 +655,13 @@
 	return false;
 }
 
-/* 
+/*
  * decode the instruction if we are printing out the trace, as it
  * makes things easier to follow, without running it through objdump
  * These are the normal instructions which cause change of flow, which
  * would be at the source of the trace buffer
  */
-#ifdef CONFIG_DEBUG_VERBOSE
+#if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
 static void decode_instruction(unsigned short *address)
 {
 	unsigned short opcode;
@@ -846,7 +845,7 @@
 	}
 	if (fp) {
 		frame = fp;
-		printk(" FP: (0x%p)\n", fp);
+		printk(KERN_NOTICE " FP: (0x%p)\n", fp);
 	} else
 		frame = 0;
 
@@ -960,6 +959,7 @@
 		else
 			verbose_printk(KERN_NOTICE "COMM= invalid\n");
 
+		printk(KERN_NOTICE "CPU = %d\n", current_thread_info()->cpu);
 		if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
 			verbose_printk(KERN_NOTICE  "TEXT = 0x%p-0x%p        DATA = 0x%p-0x%p\n"
 				KERN_NOTICE " BSS = 0x%p-0x%p  USER-STACK = 0x%p\n"
@@ -1053,6 +1053,7 @@
 	struct irqaction *action;
 	unsigned int i;
 	unsigned long flags;
+	unsigned int cpu = smp_processor_id();
 
 	verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "SEQUENCER STATUS:\t\t%s\n", print_tainted());
 	verbose_printk(KERN_NOTICE " SEQSTAT: %08lx  IPEND: %04lx  SYSCFG: %04lx\n",
@@ -1112,9 +1113,9 @@
 
 	if (((long)fp->seqstat &  SEQSTAT_EXCAUSE) &&
 	    (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
-		decode_address(buf, saved_dcplb_fault_addr);
+		decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
 		verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf);
-		decode_address(buf, saved_icplb_fault_addr);
+		decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
 		verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf);
 	}
 
@@ -1153,20 +1154,21 @@
 asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text));
 #endif
 
-asmlinkage int sys_bfin_spinlock(int *spinlock)
-{
-	int ret = 0;
-	int tmp = 0;
+static DEFINE_SPINLOCK(bfin_spinlock_lock);
 
-	local_irq_disable();
-	ret = get_user(tmp, spinlock);
-	if (ret == 0) {
-		if (tmp)
+asmlinkage int sys_bfin_spinlock(int *p)
+{
+	int ret, tmp = 0;
+
+	spin_lock(&bfin_spinlock_lock);	/* This would also hold kernel preemption. */
+	ret = get_user(tmp, p);
+	if (likely(ret == 0)) {
+		if (unlikely(tmp))
 			ret = 1;
-		tmp = 1;
-		put_user(tmp, spinlock);
+		else
+			put_user(1, p);
 	}
-	local_irq_enable();
+	spin_unlock(&bfin_spinlock_lock);
 	return ret;
 }
 
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 7d12c66..4b4341d 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -68,6 +68,8 @@
 		__etext = .;
 	}
 
+	NOTES
+
 	/* Just in case the first read only is a 32-bit access */
 	RO_DATA(4)
 
@@ -109,7 +111,6 @@
 #endif
 
 		DATA_DATA
-		*(.data.*)
 		CONSTRUCTORS
 
 		/* make sure the init_task is aligned to the
@@ -161,12 +162,14 @@
 		*(.con_initcall.init)
 		___con_initcall_end = .;
 	}
+	PERCPU(4)
 	SECURITY_INIT
 	.init.ramfs :
 	{
 		. = ALIGN(4);
 		___initramfs_start = .;
 		*(.init.ramfs)
+		. = ALIGN(4);
 		___initramfs_end = .;
 	}
 
@@ -212,7 +215,7 @@
 		__ebss_b_l1 = .;
 	}
 
-	__l2_lma_start = .;
+	__l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
 
 	.text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
 	{
@@ -240,7 +243,7 @@
 	/* Force trailing alignment of our init section so that when we
 	 * free our init memory, we don't leave behind a partial page.
 	 */
-	. = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
+	. = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2);
 	. = ALIGN(PAGE_SIZE);
 	___init_end = .;
 
diff --git a/arch/blackfin/lib/checksum.c b/arch/blackfin/lib/checksum.c
index 5c87505..762a7f0 100644
--- a/arch/blackfin/lib/checksum.c
+++ b/arch/blackfin/lib/checksum.c
@@ -29,6 +29,7 @@
  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
+#include <linux/module.h>
 #include <net/checksum.h>
 #include <asm/checksum.h>
 
@@ -76,6 +77,7 @@
 {
 	return (__force __sum16)~do_csum(iph, ihl * 4);
 }
+EXPORT_SYMBOL(ip_fast_csum);
 
 /*
  * computes the checksum of a memory block at buff, length len,
@@ -104,6 +106,7 @@
 
 	return sum;
 }
+EXPORT_SYMBOL(csum_partial);
 
 /*
  * this routine is used for miscellaneous IP-like checksums, mainly
@@ -137,3 +140,4 @@
 	memcpy(dst, src, len);
 	return csum_partial(dst, len, sum);
 }
+EXPORT_SYMBOL(csum_partial_copy);
diff --git a/arch/blackfin/lib/ins.S b/arch/blackfin/lib/ins.S
index d60554d..1863a6b 100644
--- a/arch/blackfin/lib/ins.S
+++ b/arch/blackfin/lib/ins.S
@@ -1,31 +1,9 @@
 /*
- * File:         arch/blackfin/lib/ins.S
- * Based on:
- * Author:       Bas Vermeulen <bas@buyways.nl>
+ * arch/blackfin/lib/ins.S - ins{bwl} using hardware loops
  *
- * Created:      Tue Mar 22 15:27:24 CEST 2005
- * Description:  Implementation of ins{bwl} for BlackFin processors using zero overhead loops.
- *
- * Modified:
- *               Copyright 2004-2008 Analog Devices Inc.
- *               Copyright (C) 2005 Bas Vermeulen, BuyWays BV <bas@buyways.nl>
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ * Copyright 2004-2008 Analog Devices Inc.
+ * Copyright (C) 2005 Bas Vermeulen, BuyWays BV <bas@buyways.nl>
+ * Licensed under the GPL-2 or later.
  */
 
 #include <linux/linkage.h>
@@ -33,6 +11,46 @@
 
 .align 2
 
+#ifdef CONFIG_IPIPE
+# define DO_CLI \
+	[--sp] = rets; \
+	[--sp] = (P5:0); \
+	sp += -12; \
+	call ___ipipe_stall_root_raw; \
+	sp += 12; \
+	(P5:0) = [sp++];
+# define CLI_INNER_NOP
+#else
+# define DO_CLI cli R3;
+# define CLI_INNER_NOP nop; nop; nop;
+#endif
+
+#ifdef CONFIG_IPIPE
+# define DO_STI \
+	sp += -12; \
+	call ___ipipe_unstall_root_raw; \
+	sp += 12; \
+2:	rets = [sp++];
+#else
+# define DO_STI 2: sti R3;
+#endif
+
+#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
+# define CLI_OUTER DO_CLI;
+# define STI_OUTER DO_STI;
+# define CLI_INNER 1:
+# if ANOMALY_05000416
+#  define STI_INNER nop; 2: nop;
+# else
+#  define STI_INNER 2:
+# endif
+#else
+# define CLI_OUTER
+# define STI_OUTER
+# define CLI_INNER 1: DO_CLI; CLI_INNER_NOP;
+# define STI_INNER DO_STI;
+#endif
+
 /*
  * Reads on the Blackfin are speculative. In Blackfin terms, this means they
  * can be interrupted at any time (even after they have been issued on to the
@@ -53,170 +71,48 @@
  * buffers in/out of FIFOs.
  */
 
-ENTRY(_insl)
-#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
-	P0 = R0;	/* P0 = port */
-	cli R3;
-	P1 = R1;	/* P1 = address */
-	P2 = R2;	/* P2 = count */
-	SSYNC;
-	LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
-.Llong_loop_s:  R0 = [P0];
-		[P1++] = R0;
-		NOP;
-.Llong_loop_e: 	NOP;
-	sti R3;
-	RTS;
-#else
-	P0 = R0;	/* P0 = port */
-	P1 = R1;	/* P1 = address */
-	P2 = R2;	/* P2 = count */
-	SSYNC;
-	LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
-.Llong_loop_s:
-	CLI R3;
-	NOP; NOP; NOP;
-	R0 = [P0];
-	[P1++] = R0;
-.Llong_loop_e:
-	STI R3;
+#define COMMON_INS(func, ops) \
+ENTRY(_ins##func) \
+	P0 = R0;	/* P0 = port */ \
+	CLI_OUTER;	/* 3 instructions before first read access */ \
+	P1 = R1;	/* P1 = address */ \
+	P2 = R2;	/* P2 = count */ \
+	SSYNC; \
+ \
+	LSETUP(1f, 2f) LC0 = P2; \
+	CLI_INNER; \
+	ops; \
+	STI_INNER; \
+ \
+	STI_OUTER; \
+	RTS; \
+ENDPROC(_ins##func)
 
-	RTS;
-#endif
-ENDPROC(_insl)
+COMMON_INS(l, \
+	R0 = [P0]; \
+	[P1++] = R0; \
+)
 
-ENTRY(_insw)
-#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
-	P0 = R0;	/* P0 = port */
-	cli R3;
-	P1 = R1;	/* P1 = address */
-	P2 = R2;	/* P2 = count */
-	SSYNC;
-	LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
-.Lword_loop_s:  R0 = W[P0];
-		W[P1++] = R0;
-		NOP;
-.Lword_loop_e: 	NOP;
-	sti R3;
-	RTS;
-#else
-	P0 = R0;	/* P0 = port */
-	P1 = R1;	/* P1 = address */
-	P2 = R2;	/* P2 = count */
-	SSYNC;
-	LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
-.Lword_loop_s:
-	CLI R3;
-	NOP; NOP; NOP;
-	R0 = W[P0];
-	W[P1++] = R0;
-.Lword_loop_e:
-	STI R3;
-	RTS;
+COMMON_INS(w, \
+	R0 = W[P0]; \
+	W[P1++] = R0; \
+)
 
-#endif
-ENDPROC(_insw)
+COMMON_INS(w_8, \
+	R0 = W[P0]; \
+	B[P1++] = R0; \
+	R0 = R0 >> 8; \
+	B[P1++] = R0; \
+)
 
-ENTRY(_insw_8)
-#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
-	P0 = R0;	/* P0 = port */
-	cli R3;
-	P1 = R1;	/* P1 = address */
-	P2 = R2;	/* P2 = count */
-	SSYNC;
-	LSETUP( .Lword8_loop_s, .Lword8_loop_e) LC0 = P2;
-.Lword8_loop_s:  R0 = W[P0];
-		B[P1++] = R0;
-		R0 = R0 >> 8;
-		B[P1++] = R0;
-		NOP;
-.Lword8_loop_e: NOP;
-	sti R3;
-	RTS;
-#else
-	P0 = R0;	/* P0 = port */
-	P1 = R1;	/* P1 = address */
-	P2 = R2;	/* P2 = count */
-	SSYNC;
-	LSETUP( .Lword8_loop_s, .Lword8_loop_e) LC0 = P2;
-.Lword8_loop_s:
-	CLI R3;
-	NOP; NOP; NOP;
-	R0 = W[P0];
-	B[P1++] = R0;
-	R0 = R0 >> 8;
-	B[P1++] = R0;
-	NOP;
-.Lword8_loop_e:
-	STI R3;
+COMMON_INS(b, \
+	R0 = B[P0]; \
+	B[P1++] = R0; \
+)
 
-	RTS;
-#endif
-ENDPROC(_insw_8)
-
-ENTRY(_insb)
-#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
-	P0 = R0;	/* P0 = port */
-	cli R3;
-	P1 = R1;	/* P1 = address */
-	P2 = R2;	/* P2 = count */
-	SSYNC;
-	LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
-.Lbyte_loop_s:  R0 = B[P0];
-		B[P1++] = R0;
-		NOP;
-.Lbyte_loop_e:  NOP;
-	sti R3;
-	RTS;
-#else
-	P0 = R0;        /* P0 = port */
-	P1 = R1;        /* P1 = address */
-	P2 = R2;        /* P2 = count */
-	SSYNC;
-	LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
-.Lbyte_loop_s:
-	CLI R3;
-	NOP; NOP; NOP;
-	R0 = B[P0];
-	B[P1++] = R0;
-.Lbyte_loop_e:
-	STI R3;
-
-	RTS;
-#endif
-ENDPROC(_insb)
-
-ENTRY(_insl_16)
-#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
-	P0 = R0;	/* P0 = port */
-	cli R3;
-	P1 = R1;	/* P1 = address */
-	P2 = R2;	/* P2 = count */
-	SSYNC;
-	LSETUP( .Llong16_loop_s, .Llong16_loop_e) LC0 = P2;
-.Llong16_loop_s:  R0 = [P0];
-		  W[P1++] = R0;
-		  R0 = R0 >> 16;
-		  W[P1++] = R0;
-		  NOP;
-.Llong16_loop_e:  NOP;
-	sti R3;
-	RTS;
-#else
-	P0 = R0;	/* P0 = port */
-	P1 = R1;	/* P1 = address */
-	P2 = R2;	/* P2 = count */
-	SSYNC;
-	LSETUP( .Llong16_loop_s, .Llong16_loop_e) LC0 = P2;
-.Llong16_loop_s:
-	CLI R3;
-	NOP; NOP; NOP;
-	R0 = [P0];
-	W[P1++] = R0;
-	R0 = R0 >> 16;
-	W[P1++] = R0;
-.Llong16_loop_e:
-	STI R3;
-	RTS;
-#endif
-ENDPROC(_insl_16)
+COMMON_INS(l_16, \
+	R0 = [P0]; \
+	W[P1++] = R0; \
+	R0 = R0 >> 16; \
+	W[P1++] = R0; \
+)
diff --git a/arch/blackfin/lib/muldi3.S b/arch/blackfin/lib/muldi3.S
new file mode 100644
index 0000000..abde120
--- /dev/null
+++ b/arch/blackfin/lib/muldi3.S
@@ -0,0 +1,68 @@
+.align 2
+.global ___muldi3;
+.type ___muldi3, STT_FUNC;
+
+#ifdef CONFIG_ARITHMETIC_OPS_L1
+.section .l1.text
+#else
+.text
+#endif
+
+/*
+	   R1:R0 * R3:R2
+	 = R1.h:R1.l:R0.h:R0.l * R3.h:R3.l:R2.h:R2.l
+[X]	 = (R1.h * R3.h) * 2^96
+[X]	   + (R1.h * R3.l + R1.l * R3.h) * 2^80
+[X]	   + (R1.h * R2.h + R1.l * R3.l + R3.h * R0.h) * 2^64
+[T1]	   + (R1.h * R2.l + R3.h * R0.l + R1.l * R2.h + R3.l * R0.h) * 2^48
+[T2]	   + (R1.l * R2.l + R3.l * R0.l + R0.h * R2.h) * 2^32
+[T3]	   + (R0.l * R2.h + R2.l * R0.h) * 2^16
+[T4]	   + (R0.l * R2.l)
+
+	We can discard the first three lines marked "X" since we produce
+	only a 64 bit result.  So, we need ten 16-bit multiplies.
+
+	Individual mul-acc results:
+[E1]	 =  R1.h * R2.l + R3.h * R0.l + R1.l * R2.h + R3.l * R0.h
+[E2]	 =  R1.l * R2.l + R3.l * R0.l + R0.h * R2.h
+[E3]	 =  R0.l * R2.h + R2.l * R0.h
+[E4]	 =  R0.l * R2.l
+
+	We also need to add high parts from lower-level results to higher ones:
+	E[n]c = E[n] + (E[n+1]c >> 16), where E4c := E4
+
+	One interesting property is that all parts of the result that depend
+	on the sign of the multiplication are discarded.  Those would be the
+	multiplications involving R1.h and R3.h, but only the top 16 bit of
+	the 32 bit result depend on the sign, and since R1.h and R3.h only
+	occur in E1, the top half of these results is cut off.
+	So, we can just use FU mode for all of the 16-bit multiplies, and
+	ignore questions of when to use mixed mode.  */
+
+___muldi3:
+	/* [SP] technically is part of the caller's frame, but we can
+	   use it as scratch space.  */
+	A0 = R2.H * R1.L, A1 = R2.L * R1.H (FU) || R3 = [SP + 12];	/* E1 */
+	A0 += R3.H * R0.L, A1 += R3.L * R0.H (FU) || [SP] = R4;		/* E1 */
+	A0 += A1;							/* E1 */
+	R4 = A0.w;
+	A0 = R0.l * R3.l (FU);						/* E2 */
+	A0 += R2.l * R1.l (FU);						/* E2 */
+
+	A1 = R2.L * R0.L (FU);						/* E4 */
+	R3 = A1.w;
+	A1 = A1 >> 16;							/* E3c */
+	A0 += R2.H * R0.H, A1 += R2.L * R0.H (FU);			/* E2, E3c */
+	A1 += R0.L * R2.H (FU);						/* E3c */
+	R0 = A1.w;
+	A1 = A1 >> 16;							/* E2c */
+	A0 += A1;							/* E2c */
+	R1 = A0.w;
+
+	/* low(result) = low(E3c):low(E4) */
+	R0 = PACK (R0.l, R3.l);
+	/* high(result) = E2c + (E1 << 16) */
+	R1.h = R1.h + R4.l (NS) || R4 = [SP];
+	RTS;
+
+.size ___muldi3, .-___muldi3
diff --git a/arch/blackfin/lib/muldi3.c b/arch/blackfin/lib/muldi3.c
deleted file mode 100644
index 303d0c6..0000000
--- a/arch/blackfin/lib/muldi3.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * File:         arch/blackfin/lib/muldi3.c
- * Based on:
- * Author:
- *
- * Created:
- * Description:
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef SI_TYPE_SIZE
-#define SI_TYPE_SIZE 32
-#endif
-#define __ll_b (1L << (SI_TYPE_SIZE / 2))
-#define __ll_lowpart(t) ((usitype) (t) % __ll_b)
-#define __ll_highpart(t) ((usitype) (t) / __ll_b)
-#define BITS_PER_UNIT 8
-
-#if !defined(umul_ppmm)
-#define umul_ppmm(w1, w0, u, v)						\
-  do {									\
-    usitype __x0, __x1, __x2, __x3;					\
-    usitype __ul, __vl, __uh, __vh;					\
-									\
-    __ul = __ll_lowpart (u);						\
-    __uh = __ll_highpart (u);						\
-    __vl = __ll_lowpart (v);						\
-    __vh = __ll_highpart (v);						\
-									\
-    __x0 = (usitype) __ul * __vl;					\
-    __x1 = (usitype) __ul * __vh;					\
-    __x2 = (usitype) __uh * __vl;					\
-    __x3 = (usitype) __uh * __vh;					\
-									\
-    __x1 += __ll_highpart (__x0);/* this can't give carry */		\
-    __x1 += __x2;		/* but this indeed can */		\
-    if (__x1 < __x2)		/* did we get it? */			\
-      __x3 += __ll_b;		/* yes, add it in the proper pos. */	\
-									\
-    (w1) = __x3 + __ll_highpart (__x1);					\
-    (w0) = __ll_lowpart (__x1) * __ll_b + __ll_lowpart (__x0);		\
-  } while (0)
-#endif
-
-#if !defined(__umulsidi3)
-#define __umulsidi3(u, v) 						\
-  ({diunion __w;                                                        \
-       umul_ppmm (__w.s.high, __w.s.low, u, v);                         \
-           __w.ll; })
-#endif
-
-typedef unsigned int usitype __attribute__ ((mode(SI)));
-typedef int sitype __attribute__ ((mode(SI)));
-typedef int ditype __attribute__ ((mode(DI)));
-typedef int word_type __attribute__ ((mode(__word__)));
-
-struct distruct {
-	sitype low, high;
-};
-typedef union {
-	struct distruct s;
-	ditype ll;
-} diunion;
-
-#ifdef CONFIG_ARITHMETIC_OPS_L1
-ditype __muldi3(ditype u, ditype v)__attribute__((l1_text));
-#endif
-
-ditype __muldi3(ditype u, ditype v)
-{
-	diunion w;
-	diunion uu, vv;
-
-	uu.ll = u, vv.ll = v;
-	w.ll = __umulsidi3(uu.s.low, vv.s.low);
-	w.s.high += ((usitype) uu.s.low * (usitype) vv.s.high
-		     + (usitype) uu.s.high * (usitype) vv.s.low);
-
-	return w.ll;
-}
diff --git a/arch/blackfin/mach-bf518/Kconfig b/arch/blackfin/mach-bf518/Kconfig
new file mode 100644
index 0000000..f397ede
--- /dev/null
+++ b/arch/blackfin/mach-bf518/Kconfig
@@ -0,0 +1,233 @@
+if (BF51x)
+
+source "arch/blackfin/mach-bf518/boards/Kconfig"
+
+menu "BF518 Specific Configuration"
+
+comment "Alternative Multiplexing Scheme"
+
+choice
+	prompt "SPORT0"
+	default BF518_SPORT0_PORTG
+	help
+	  Select PORT used for SPORT0. See Hardware Reference Manual
+
+config BF518_SPORT0_PORTF
+	bool "PORT F"
+	help
+	  PORT F
+
+config BF518_SPORT0_PORTG
+	bool "PORT G"
+	help
+	  PORT G
+endchoice
+
+choice
+	prompt "SPORT0 TSCLK Location"
+	depends on BF518_SPORT0_PORTG
+	default BF518_SPORT0_TSCLK_PG10
+	help
+	  Select PIN used for SPORT0_TSCLK. See Hardware Reference Manual
+
+config BF518_SPORT0_TSCLK_PG10
+	bool "PORT PG10"
+	help
+	  PORT PG10
+
+config BF518_SPORT0_TSCLK_PG14
+	bool "PORT PG14"
+	help
+	  PORT PG14
+endchoice
+
+choice
+	prompt "UART1"
+	default BF518_UART1_PORTF
+	help
+	  Select PORT used for UART1. See Hardware Reference Manual
+
+config BF518_UART1_PORTF
+	bool "PORT F"
+	help
+	  PORT F
+
+config BF518_UART1_PORTG
+	bool "PORT G"
+	help
+	  PORT G
+endchoice
+
+comment "Interrupt Priority Assignment"
+menu "Priority"
+
+config IRQ_PLL_WAKEUP
+	int "IRQ_PLL_WAKEUP"
+	default 7
+config IRQ_DMA0_ERROR
+	int "IRQ_DMA0_ERROR"
+	default 7
+config IRQ_DMAR0_BLK
+	int "IRQ_DMAR0_BLK"
+	default 7
+config IRQ_DMAR1_BLK
+	int "IRQ_DMAR1_BLK"
+	default 7
+config IRQ_DMAR0_OVR
+	int "IRQ_DMAR0_OVR"
+	default 7
+config IRQ_DMAR1_OVR
+	int "IRQ_DMAR1_OVR"
+	default 7
+config IRQ_PPI_ERROR
+	int "IRQ_PPI_ERROR"
+	default 7
+config IRQ_MAC_ERROR
+	int "IRQ_MAC_ERROR"
+	default 7
+config IRQ_SPORT0_ERROR
+	int "IRQ_SPORT0_ERROR"
+	default 7
+config IRQ_SPORT1_ERROR
+	int "IRQ_SPORT1_ERROR"
+	default 7
+config IRQ_PTP_ERROR
+	int "IRQ_PTP_ERROR"
+	default 7
+config IRQ_UART0_ERROR
+	int "IRQ_UART0_ERROR"
+	default 7
+config IRQ_UART1_ERROR
+	int "IRQ_UART1_ERROR"
+	default 7
+config IRQ_RTC
+	int "IRQ_RTC"
+	default 8
+config IRQ_PPI
+	int "IRQ_PPI"
+	default 8
+config IRQ_SPORT0_RX
+	int "IRQ_SPORT0_RX"
+	default 9
+config IRQ_SPORT0_TX
+	int "IRQ_SPORT0_TX"
+	default 9
+config IRQ_SPORT1_RX
+	int "IRQ_SPORT1_RX"
+	default 9
+config IRQ_SPORT1_TX
+	int "IRQ_SPORT1_TX"
+	default 9
+config IRQ_TWI
+	int "IRQ_TWI"
+	default 10
+config IRQ_SPI0
+	int "IRQ_SPI"
+	default 10
+config IRQ_UART0_RX
+	int "IRQ_UART0_RX"
+	default 10
+config IRQ_UART0_TX
+	int "IRQ_UART0_TX"
+	default 10
+config IRQ_UART1_RX
+	int "IRQ_UART1_RX"
+	default 10
+config IRQ_UART1_TX
+	int "IRQ_UART1_TX"
+	default 10
+config IRQ_OPTSEC
+	int "IRQ_OPTSEC"
+	default 11
+config IRQ_CNT
+	int "IRQ_CNT"
+	default 11
+config IRQ_MAC_RX
+	int "IRQ_MAC_RX"
+	default 11
+config IRQ_PORTH_INTA
+	int "IRQ_PORTH_INTA"
+	default 11
+config IRQ_MAC_TX
+	int "IRQ_MAC_TX/NFC"
+	default 11
+config IRQ_PORTH_INTB
+	int "IRQ_PORTH_INTB"
+	default 11
+config IRQ_TIMER0
+	int "IRQ_TIMER0"
+	default 8
+config IRQ_TIMER1
+	int "IRQ_TIMER1"
+	default 12
+config IRQ_TIMER2
+	int "IRQ_TIMER2"
+	default 12
+config IRQ_TIMER3
+	int "IRQ_TIMER3"
+	default 12
+config IRQ_TIMER4
+	int "IRQ_TIMER4"
+	default 12
+config IRQ_TIMER5
+	int "IRQ_TIMER5"
+	default 12
+config IRQ_TIMER6
+	int "IRQ_TIMER6"
+	default 12
+config IRQ_TIMER7
+	int "IRQ_TIMER7"
+	default 12
+config IRQ_PORTG_INTA
+	int "IRQ_PORTG_INTA"
+	default 12
+config IRQ_PORTG_INTB
+	int "IRQ_PORTG_INTB"
+	default 12
+config IRQ_MEM_DMA0
+	int "IRQ_MEM_DMA0"
+	default 13
+config IRQ_MEM_DMA1
+	int "IRQ_MEM_DMA1"
+	default 13
+config IRQ_WATCH
+	int "IRQ_WATCH"
+	default 13
+config IRQ_PORTF_INTA
+	int "IRQ_PORTF_INTA"
+	default 13
+config IRQ_PORTF_INTB
+	int "IRQ_PORTF_INTB"
+	default 13
+config IRQ_SPI0_ERROR
+	int "IRQ_SPI0_ERROR"
+	default 7
+config IRQ_SPI1_ERROR
+	int "IRQ_SPI1_ERROR"
+	default 7
+config IRQ_RSI_INT0
+	int "IRQ_RSI_INT0"
+	default 7
+config IRQ_RSI_INT1
+	int "IRQ_RSI_INT1"
+	default 7
+config IRQ_PWM_TRIP
+	int "IRQ_PWM_TRIP"
+	default 10
+config IRQ_PWM_SYNC
+	int "IRQ_PWM_SYNC"
+	default 10
+config IRQ_PTP_STAT
+	int "IRQ_PTP_STAT"
+	default 10
+
+	help
+	  Enter the priority numbers between 7-13 ONLY.  Others are Reserved.
+	  This applies to all the above.  It is not recommended to assign the
+	  highest priority number 7 to UART or any other device.
+
+endmenu
+
+endmenu
+
+endif
diff --git a/arch/blackfin/mach-bf518/Makefile b/arch/blackfin/mach-bf518/Makefile
new file mode 100644
index 0000000..168a193
--- /dev/null
+++ b/arch/blackfin/mach-bf518/Makefile
@@ -0,0 +1,5 @@
+#
+# arch/blackfin/mach-bf518/Makefile
+#
+
+obj-y := ints-priority.o dma.o
diff --git a/arch/blackfin/mach-bf518/boards/Kconfig b/arch/blackfin/mach-bf518/boards/Kconfig
new file mode 100644
index 0000000..9616351
--- /dev/null
+++ b/arch/blackfin/mach-bf518/boards/Kconfig
@@ -0,0 +1,12 @@
+choice
+	prompt "System type"
+	default BFIN518F_EZBRD
+	help
+	  Select your board!
+
+config BFIN518F_EZBRD
+	bool "BF518F-EZBRD"
+	help
+	  BF518-EZBRD board support.
+
+endchoice
diff --git a/arch/blackfin/mach-bf518/boards/Makefile b/arch/blackfin/mach-bf518/boards/Makefile
new file mode 100644
index 0000000..172e859
--- /dev/null
+++ b/arch/blackfin/mach-bf518/boards/Makefile
@@ -0,0 +1,5 @@
+#
+# arch/blackfin/mach-bf518/boards/Makefile
+#
+
+obj-$(CONFIG_BFIN518F_EZBRD)            += ezbrd.o
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
new file mode 100644
index 0000000..15f1351
--- /dev/null
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -0,0 +1,669 @@
+/*
+ * File:         arch/blackfin/mach-bf518/boards/ezbrd.c
+ * Based on:     arch/blackfin/mach-bf527/boards/ezbrd.c
+ * Author:       Bryan Wu <cooloney@kernel.org>
+ *
+ * Created:
+ * Description:
+ *
+ * Modified:
+ *               Copyright 2005 National ICT Australia (NICTA)
+ *               Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <asm/dma.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/reboot.h>
+#include <asm/portmux.h>
+#include <asm/dpmc.h>
+#include <asm/bfin_sdh.h>
+#include <linux/spi/ad7877.h>
+
+/*
+ * Name the Board for the /proc/cpuinfo
+ */
+const char bfin_board_name[] = "ADI BF518F-EZBRD";
+
+/*
+ *  Driver needs to know address, irq and flag pin.
+ */
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+static struct mtd_partition ezbrd_partitions[] = {
+	{
+		.name       = "bootloader(nor)",
+		.size       = 0x40000,
+		.offset     = 0,
+	}, {
+		.name       = "linux kernel(nor)",
+		.size       = 0x1C0000,
+		.offset     = MTDPART_OFS_APPEND,
+	}, {
+		.name       = "file system(nor)",
+		.size       = MTDPART_SIZ_FULL,
+		.offset     = MTDPART_OFS_APPEND,
+	}
+};
+
+static struct physmap_flash_data ezbrd_flash_data = {
+	.width      = 2,
+	.parts      = ezbrd_partitions,
+	.nr_parts   = ARRAY_SIZE(ezbrd_partitions),
+};
+
+static struct resource ezbrd_flash_resource = {
+	.start = 0x20000000,
+	.end   = 0x203fffff,
+	.flags = IORESOURCE_MEM,
+};
+
+static struct platform_device ezbrd_flash_device = {
+	.name          = "physmap-flash",
+	.id            = 0,
+	.dev = {
+		.platform_data = &ezbrd_flash_data,
+	},
+	.num_resources = 1,
+	.resource      = &ezbrd_flash_resource,
+};
+#endif
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+static struct platform_device rtc_device = {
+	.name = "rtc-bfin",
+	.id   = -1,
+};
+#endif
+
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+static struct platform_device bfin_mac_device = {
+	.name = "bfin_mac",
+};
+#endif
+
+#if defined(CONFIG_MTD_M25P80) \
+	|| defined(CONFIG_MTD_M25P80_MODULE)
+static struct mtd_partition bfin_spi_flash_partitions[] = {
+	{
+		.name = "bootloader(spi)",
+		.size = 0x00040000,
+		.offset = 0,
+		.mask_flags = MTD_CAP_ROM
+	}, {
+		.name = "linux kernel(spi)",
+		.size = MTDPART_SIZ_FULL,
+		.offset = MTDPART_OFS_APPEND,
+	}
+};
+
+static struct flash_platform_data bfin_spi_flash_data = {
+	.name = "m25p80",
+	.parts = bfin_spi_flash_partitions,
+	.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
+	.type = "m25p16",
+};
+
+/* SPI flash chip (m25p64) */
+static struct bfin5xx_spi_chip spi_flash_chip_info = {
+	.enable_dma = 0,         /* use dma transfer with this chip*/
+	.bits_per_word = 8,
+};
+#endif
+
+#if defined(CONFIG_SPI_ADC_BF533) \
+	|| defined(CONFIG_SPI_ADC_BF533_MODULE)
+/* SPI ADC chip */
+static struct bfin5xx_spi_chip spi_adc_chip_info = {
+	.enable_dma = 1,         /* use dma transfer with this chip*/
+	.bits_per_word = 16,
+};
+#endif
+
+#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+static struct bfin5xx_spi_chip spi_mmc_chip_info = {
+	.enable_dma = 1,
+	.bits_per_word = 8,
+};
+#endif
+
+#if defined(CONFIG_PBX)
+static struct bfin5xx_spi_chip spi_si3xxx_chip_info = {
+	.ctl_reg	= 0x4, /* send zero */
+	.enable_dma	= 0,
+	.bits_per_word	= 8,
+	.cs_change_per_word = 1,
+};
+#endif
+
+#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
+static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
+	.enable_dma = 0,
+	.bits_per_word = 16,
+};
+
+static const struct ad7877_platform_data bfin_ad7877_ts_info = {
+	.model			= 7877,
+	.vref_delay_usecs	= 50,	/* internal, no capacitor */
+	.x_plate_ohms		= 419,
+	.y_plate_ohms		= 486,
+	.pressure_max		= 1000,
+	.pressure_min		= 0,
+	.stopacq_polarity 	= 1,
+	.first_conversion_delay = 3,
+	.acquisition_time 	= 1,
+	.averaging 		= 1,
+	.pen_down_acc_interval 	= 1,
+};
+#endif
+
+#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
+	 && defined(CONFIG_SND_SOC_WM8731_SPI)
+static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
+	.enable_dma = 0,
+	.bits_per_word = 16,
+};
+#endif
+
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+static struct bfin5xx_spi_chip spidev_chip_info = {
+	.enable_dma = 0,
+	.bits_per_word = 8,
+};
+#endif
+
+static struct spi_board_info bfin_spi_board_info[] __initdata = {
+#if defined(CONFIG_MTD_M25P80) \
+	|| defined(CONFIG_MTD_M25P80_MODULE)
+	{
+		/* the modalias must be the same as spi device driver name */
+		.modalias = "m25p80", /* Name of spi_driver for this device */
+		.max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0, /* Framework bus number */
+		.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
+		.platform_data = &bfin_spi_flash_data,
+		.controller_data = &spi_flash_chip_info,
+		.mode = SPI_MODE_3,
+	},
+#endif
+
+#if defined(CONFIG_SPI_ADC_BF533) \
+	|| defined(CONFIG_SPI_ADC_BF533_MODULE)
+	{
+		.modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
+		.max_speed_hz = 6250000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0, /* Framework bus number */
+		.chip_select = 1, /* Framework chip select. */
+		.platform_data = NULL, /* No spi_driver specific config */
+		.controller_data = &spi_adc_chip_info,
+	},
+#endif
+
+#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+	{
+		.modalias = "spi_mmc_dummy",
+		.max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = 0,
+		.platform_data = NULL,
+		.controller_data = &spi_mmc_chip_info,
+		.mode = SPI_MODE_3,
+	},
+	{
+		.modalias = "spi_mmc",
+		.max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = CONFIG_SPI_MMC_CS_CHAN,
+		.platform_data = NULL,
+		.controller_data = &spi_mmc_chip_info,
+		.mode = SPI_MODE_3,
+	},
+#endif
+#if defined(CONFIG_PBX)
+	{
+		.modalias = "fxs-spi",
+		.max_speed_hz = 12500000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = 8 - CONFIG_J11_JUMPER,
+		.controller_data = &spi_si3xxx_chip_info,
+		.mode = SPI_MODE_3,
+	},
+	{
+		.modalias = "fxo-spi",
+		.max_speed_hz = 12500000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = 8 - CONFIG_J19_JUMPER,
+		.controller_data = &spi_si3xxx_chip_info,
+		.mode = SPI_MODE_3,
+	},
+#endif
+#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
+	{
+		.modalias		= "ad7877",
+		.platform_data		= &bfin_ad7877_ts_info,
+		.irq			= IRQ_PF8,
+		.max_speed_hz	= 12500000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num	= 0,
+		.chip_select  = 2,
+		.controller_data = &spi_ad7877_chip_info,
+	},
+#endif
+#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
+	 && defined(CONFIG_SND_SOC_WM8731_SPI)
+	{
+		.modalias	= "wm8731",
+		.max_speed_hz	= 3125000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num	= 0,
+		.chip_select    = 5,
+		.controller_data = &spi_wm8731_chip_info,
+		.mode = SPI_MODE_0,
+	},
+#endif
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+	{
+		.modalias = "spidev",
+		.max_speed_hz = 3125000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = 1,
+		.controller_data = &spidev_chip_info,
+	},
+#endif
+#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
+	{
+		.modalias = "bfin-lq035q1-spi",
+		.max_speed_hz = 20000000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = 1,
+		.controller_data = &lq035q1_spi_chip_info,
+		.mode = SPI_CPHA | SPI_CPOL,
+	},
+#endif
+};
+
+/* SPI controller data */
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+/* SPI (0) */
+static struct bfin5xx_spi_master bfin_spi0_info = {
+	.num_chipselect = 5,
+	.enable_dma = 1,  /* master has the ability to do dma transfer */
+	.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
+};
+
+static struct resource bfin_spi0_resource[] = {
+	[0] = {
+		.start = SPI0_REGBASE,
+		.end   = SPI0_REGBASE + 0xFF,
+		.flags = IORESOURCE_MEM,
+		},
+	[1] = {
+		.start = CH_SPI0,
+		.end   = CH_SPI0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_spi0_device = {
+	.name = "bfin-spi",
+	.id = 0, /* Bus number */
+	.num_resources = ARRAY_SIZE(bfin_spi0_resource),
+	.resource = bfin_spi0_resource,
+	.dev = {
+		.platform_data = &bfin_spi0_info, /* Passed to driver */
+	},
+};
+
+/* SPI (1) */
+static struct bfin5xx_spi_master bfin_spi1_info = {
+	.num_chipselect = 5,
+	.enable_dma = 1,  /* master has the ability to do dma transfer */
+	.pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0},
+};
+
+static struct resource bfin_spi1_resource[] = {
+	[0] = {
+		.start = SPI1_REGBASE,
+		.end   = SPI1_REGBASE + 0xFF,
+		.flags = IORESOURCE_MEM,
+		},
+	[1] = {
+		.start = CH_SPI1,
+		.end   = CH_SPI1,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_spi1_device = {
+	.name = "bfin-spi",
+	.id = 1, /* Bus number */
+	.num_resources = ARRAY_SIZE(bfin_spi1_resource),
+	.resource = bfin_spi1_resource,
+	.dev = {
+		.platform_data = &bfin_spi1_info, /* Passed to driver */
+	},
+};
+#endif  /* spi master and devices */
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+static struct resource bfin_uart_resources[] = {
+#ifdef CONFIG_SERIAL_BFIN_UART0
+	{
+		.start = 0xFFC00400,
+		.end = 0xFFC004FF,
+		.flags = IORESOURCE_MEM,
+	},
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART1
+	{
+		.start = 0xFFC02000,
+		.end = 0xFFC020FF,
+		.flags = IORESOURCE_MEM,
+	},
+#endif
+};
+
+static struct platform_device bfin_uart_device = {
+	.name = "bfin-uart",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_uart_resources),
+	.resource = bfin_uart_resources,
+};
+#endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
+	{
+		.start = 0xFFC00400,
+		.end = 0xFFC004FF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
+#endif
+#ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
+	{
+		.start = 0xFFC02000,
+		.end = 0xFFC020FF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir1_device = {
+	.name = "bfin_sir",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
+};
+#endif
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static struct resource bfin_twi0_resource[] = {
+	[0] = {
+		.start = TWI0_REGBASE,
+		.end   = TWI0_REGBASE,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = IRQ_TWI,
+		.end   = IRQ_TWI,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device i2c_bfin_twi_device = {
+	.name = "i2c-bfin-twi",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
+	.resource = bfin_twi0_resource,
+};
+#endif
+
+#ifdef CONFIG_I2C_BOARDINFO
+static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
+#if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE)
+	{
+		I2C_BOARD_INFO("pcf8574_lcd", 0x22),
+	},
+#endif
+#if defined(CONFIG_TWI_KEYPAD) || defined(CONFIG_TWI_KEYPAD_MODULE)
+	{
+		I2C_BOARD_INFO("pcf8574_keypad", 0x27),
+		.irq = IRQ_PF8,
+	},
+#endif
+};
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+static struct platform_device bfin_sport0_uart_device = {
+	.name = "bfin-sport-uart",
+	.id = 0,
+};
+
+static struct platform_device bfin_sport1_uart_device = {
+	.name = "bfin-sport-uart",
+	.id = 1,
+};
+#endif
+
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+
+static struct gpio_keys_button bfin_gpio_keys_table[] = {
+	{BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"},
+	{BTN_1, GPIO_PG13, 1, "gpio-keys: BTN1"},
+};
+
+static struct gpio_keys_platform_data bfin_gpio_keys_data = {
+	.buttons        = bfin_gpio_keys_table,
+	.nbuttons       = ARRAY_SIZE(bfin_gpio_keys_table),
+};
+
+static struct platform_device bfin_device_gpiokeys = {
+	.name      = "gpio-keys",
+	.dev = {
+		.platform_data = &bfin_gpio_keys_data,
+	},
+};
+#endif
+
+#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE)
+
+static struct bfin_sd_host bfin_sdh_data = {
+	.dma_chan = CH_RSI,
+	.irq_int0 = IRQ_RSI_INT0,
+	.pin_req = {P_RSI_DATA0, P_RSI_DATA1, P_RSI_DATA2, P_RSI_DATA3, P_RSI_CMD, P_RSI_CLK, 0},
+};
+
+static struct platform_device bf51x_sdh_device = {
+	.name = "bfin-sdh",
+	.id = 0,
+	.dev = {
+		.platform_data = &bfin_sdh_data,
+	},
+};
+#endif
+
+static struct resource bfin_gpios_resources = {
+	.start = 0,
+	.end   = MAX_BLACKFIN_GPIOS - 1,
+	.flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+	.name = "simple-gpio",
+	.id = -1,
+	.num_resources = 1,
+	.resource = &bfin_gpios_resources,
+};
+
+static const unsigned int cclk_vlev_datasheet[] =
+{
+	VRPAIR(VLEV_100, 400000000),
+	VRPAIR(VLEV_105, 426000000),
+	VRPAIR(VLEV_110, 500000000),
+	VRPAIR(VLEV_115, 533000000),
+	VRPAIR(VLEV_120, 600000000),
+};
+
+static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
+	.tuple_tab = cclk_vlev_datasheet,
+	.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
+	.vr_settling_time = 25 /* us */,
+};
+
+static struct platform_device bfin_dpmc = {
+	.name = "bfin dpmc",
+	.dev = {
+		.platform_data = &bfin_dmpc_vreg_data,
+	},
+};
+
+static struct platform_device *stamp_devices[] __initdata = {
+
+	&bfin_dpmc,
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+	&rtc_device,
+#endif
+
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+	&bfin_mac_device,
+#endif
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+	&bfin_spi0_device,
+	&bfin_spi1_device,
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+	&bfin_uart_device,
+#endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+	&i2c_bfin_twi_device,
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+	&bfin_sport0_uart_device,
+	&bfin_sport1_uart_device,
+#endif
+
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+	&bfin_device_gpiokeys,
+#endif
+
+#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE)
+	&bf51x_sdh_device,
+#endif
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+	&ezbrd_flash_device,
+#endif
+
+	&bfin_gpios_device,
+};
+
+static int __init ezbrd_init(void)
+{
+	printk(KERN_INFO "%s(): registering device resources\n", __func__);
+
+#ifdef CONFIG_I2C_BOARDINFO
+	i2c_register_board_info(0, bfin_i2c_board_info,
+				ARRAY_SIZE(bfin_i2c_board_info));
+#endif
+
+	platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
+	spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
+	return 0;
+}
+
+arch_initcall(ezbrd_init);
+
+void native_machine_restart(char *cmd)
+{
+	/* workaround reboot hang when booting from SPI */
+	if ((bfin_read_SYSCR() & 0x7) == 0x3)
+		bfin_gpio_reset_spi0_ssel1();
+}
+
+void bfin_get_ether_addr(char *addr)
+{
+	/* the MAC is stored in OTP memory page 0xDF */
+	u32 ret;
+	u64 otp_mac;
+	u32 (*otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)0xEF00001A;
+
+	ret = otp_read(0xDF, 0x00, &otp_mac);
+	if (!(ret & 0x1)) {
+		char *otp_mac_p = (char *)&otp_mac;
+		for (ret = 0; ret < 6; ++ret)
+			addr[ret] = otp_mac_p[5 - ret];
+	}
+}
+EXPORT_SYMBOL(bfin_get_ether_addr);
diff --git a/arch/blackfin/mach-bf518/dma.c b/arch/blackfin/mach-bf518/dma.c
new file mode 100644
index 0000000..698e88c
--- /dev/null
+++ b/arch/blackfin/mach-bf518/dma.c
@@ -0,0 +1,118 @@
+/*
+ * File:         arch/blackfin/mach-bf518/dma.c
+ * Based on:
+ * Author:       Bryan Wu <cooloney@kernel.org>
+ *
+ * Created:
+ * Description:  This file contains the simple DMA Implementation for Blackfin
+ *
+ * Modified:
+ *               Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <linux/module.h>
+
+#include <asm/blackfin.h>
+#include <asm/dma.h>
+
+struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+	(struct dma_register *) DMA0_NEXT_DESC_PTR,
+	(struct dma_register *) DMA1_NEXT_DESC_PTR,
+	(struct dma_register *) DMA2_NEXT_DESC_PTR,
+	(struct dma_register *) DMA3_NEXT_DESC_PTR,
+	(struct dma_register *) DMA4_NEXT_DESC_PTR,
+	(struct dma_register *) DMA5_NEXT_DESC_PTR,
+	(struct dma_register *) DMA6_NEXT_DESC_PTR,
+	(struct dma_register *) DMA7_NEXT_DESC_PTR,
+	(struct dma_register *) DMA8_NEXT_DESC_PTR,
+	(struct dma_register *) DMA9_NEXT_DESC_PTR,
+	(struct dma_register *) DMA10_NEXT_DESC_PTR,
+	(struct dma_register *) DMA11_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D1_NEXT_DESC_PTR,
+};
+EXPORT_SYMBOL(dma_io_base_addr);
+
+int channel2irq(unsigned int channel)
+{
+	int ret_irq = -1;
+
+	switch (channel) {
+	case CH_PPI:
+		ret_irq = IRQ_PPI;
+		break;
+
+	case CH_EMAC_RX:
+		ret_irq = IRQ_MAC_RX;
+		break;
+
+	case CH_EMAC_TX:
+		ret_irq = IRQ_MAC_TX;
+		break;
+
+	case CH_UART1_RX:
+		ret_irq = IRQ_UART1_RX;
+		break;
+
+	case CH_UART1_TX:
+		ret_irq = IRQ_UART1_TX;
+		break;
+
+	case CH_SPORT0_RX:
+		ret_irq = IRQ_SPORT0_RX;
+		break;
+
+	case CH_SPORT0_TX:
+		ret_irq = IRQ_SPORT0_TX;
+		break;
+
+	case CH_SPORT1_RX:
+		ret_irq = IRQ_SPORT1_RX;
+		break;
+
+	case CH_SPORT1_TX:
+		ret_irq = IRQ_SPORT1_TX;
+		break;
+
+	case CH_SPI0:
+		ret_irq = IRQ_SPI0;
+		break;
+
+	case CH_UART0_RX:
+		ret_irq = IRQ_UART0_RX;
+		break;
+
+	case CH_UART0_TX:
+		ret_irq = IRQ_UART0_TX;
+		break;
+
+	case CH_MEM_STREAM0_SRC:
+	case CH_MEM_STREAM0_DEST:
+		ret_irq = IRQ_MEM_DMA0;
+		break;
+
+	case CH_MEM_STREAM1_SRC:
+	case CH_MEM_STREAM1_DEST:
+		ret_irq = IRQ_MEM_DMA1;
+		break;
+	}
+	return ret_irq;
+}
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
new file mode 100644
index 0000000..e5b4bef
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -0,0 +1,79 @@
+/*
+ * File: include/asm-blackfin/mach-bf518/anomaly.h
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright (C) 2004-2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+/* This file shoule be up to date with:
+ *  - ????
+ */
+
+#ifndef _MACH_ANOMALY_H_
+#define _MACH_ANOMALY_H_
+
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
+#define ANOMALY_05000074 (1)
+/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
+#define ANOMALY_05000122 (1)
+/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
+#define ANOMALY_05000245 (1)
+/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
+#define ANOMALY_05000265 (1)
+/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
+#define ANOMALY_05000310 (1)
+/* PPI Underflow Error Goes Undetected in ITU-R 656 Mode */
+#define ANOMALY_05000366 (1)
+/* Lockbox SESR Firmware Does Not Save/Restore Full Context */
+#define ANOMALY_05000405 (1)
+/* Lockbox Firmware Memory Cleanup Routine Does not Clear Registers */
+#define ANOMALY_05000408 (1)
+/* Speculative Fetches Can Cause Undesired External FIFO Operations */
+#define ANOMALY_05000416 (1)
+/* TWI Fall Time (Tof) May Violate the Minimum I2C Specification */
+#define ANOMALY_05000421 (1)
+/* TWI Input Capacitance (Ci) May Violate the Maximum I2C Specification */
+#define ANOMALY_05000422 (1)
+/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
+#define ANOMALY_05000426 (1)
+/* Software System Reset Corrupts PLL_LOCKCNT Register */
+#define ANOMALY_05000430 (1)
+/* Incorrect Use of Stack in Lockbox Firmware During Authentication */
+#define ANOMALY_05000431 (1)
+/* Certain SIC Registers are not Reset After Soft or Core Double Fault Reset */
+#define ANOMALY_05000435 (1)
+/* PORTx_DRIVE and PORTx_HYSTERESIS Registers Read Back Incorrect Values */
+#define ANOMALY_05000438 (1)
+/* Preboot Cannot be Used to Program the PLL_DIV Register */
+#define ANOMALY_05000439 (1)
+/* bfrom_SysControl() Cannot be Used to Write the PLL_DIV Register */
+#define ANOMALY_05000440 (1)
+/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
+#define ANOMALY_05000443 (1)
+/* Incorrect L1 Instruction Bank B Memory Map Location */
+#define ANOMALY_05000444 (1)
+
+/* Anomalies that don't exist on this proc */
+#define ANOMALY_05000125 (0)
+#define ANOMALY_05000158 (0)
+#define ANOMALY_05000183 (0)
+#define ANOMALY_05000198 (0)
+#define ANOMALY_05000230 (0)
+#define ANOMALY_05000244 (0)
+#define ANOMALY_05000261 (0)
+#define ANOMALY_05000263 (0)
+#define ANOMALY_05000266 (0)
+#define ANOMALY_05000273 (0)
+#define ANOMALY_05000285 (0)
+#define ANOMALY_05000307 (0)
+#define ANOMALY_05000311 (0)
+#define ANOMALY_05000312 (0)
+#define ANOMALY_05000323 (0)
+#define ANOMALY_05000353 (0)
+#define ANOMALY_05000363 (0)
+#define ANOMALY_05000386 (0)
+#define ANOMALY_05000412 (0)
+#define ANOMALY_05000432 (0)
+
+#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/bf518.h b/arch/blackfin/mach-bf518/include/mach/bf518.h
new file mode 100644
index 0000000..78da1a0
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/bf518.h
@@ -0,0 +1,132 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/bf518.h
+ * Based on:	include/asm-blackfin/mach-bf527/bf527.h
+ * Author:	Michael Hennerich (michael.hennerich@analog.com)
+ *
+ * Created:
+ * Description:  SYSTEM MMR REGISTER AND MEMORY MAP FOR ADSP-BF518
+ *
+ * Modified:
+ *               Copyright 2004-2007 Analog Devices Inc.
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef __MACH_BF518_H__
+#define __MACH_BF518_H__
+
+#define OFFSET_(x) ((x) & 0x0000FFFF)
+
+/*some misc defines*/
+#define IMASK_IVG15		0x8000
+#define IMASK_IVG14		0x4000
+#define IMASK_IVG13		0x2000
+#define IMASK_IVG12		0x1000
+
+#define IMASK_IVG11		0x0800
+#define IMASK_IVG10		0x0400
+#define IMASK_IVG9		0x0200
+#define IMASK_IVG8		0x0100
+
+#define IMASK_IVG7		0x0080
+#define IMASK_IVGTMR		0x0040
+#define IMASK_IVGHW		0x0020
+
+/***************************/
+
+#define BFIN_DSUBBANKS	4
+#define BFIN_DWAYS		2
+#define BFIN_DLINES		64
+#define BFIN_ISUBBANKS	4
+#define BFIN_IWAYS		4
+#define BFIN_ILINES		32
+
+#define WAY0_L			0x1
+#define WAY1_L			0x2
+#define WAY01_L			0x3
+#define WAY2_L			0x4
+#define WAY02_L			0x5
+#define	WAY12_L			0x6
+#define	WAY012_L		0x7
+
+#define	WAY3_L			0x8
+#define	WAY03_L			0x9
+#define	WAY13_L			0xA
+#define	WAY013_L		0xB
+
+#define	WAY32_L			0xC
+#define	WAY320_L		0xD
+#define	WAY321_L		0xE
+#define	WAYALL_L		0xF
+
+#define DMC_ENABLE (2<<2)	/*yes, 2, not 1 */
+
+/********************************* EBIU Settings ************************************/
+#define AMBCTL0VAL	((CONFIG_BANK_1 << 16) | CONFIG_BANK_0)
+#define AMBCTL1VAL	((CONFIG_BANK_3 << 16) | CONFIG_BANK_2)
+
+#ifdef CONFIG_C_AMBEN_ALL
+#define V_AMBEN AMBEN_ALL
+#endif
+#ifdef CONFIG_C_AMBEN
+#define V_AMBEN 0x0
+#endif
+#ifdef CONFIG_C_AMBEN_B0
+#define V_AMBEN AMBEN_B0
+#endif
+#ifdef CONFIG_C_AMBEN_B0_B1
+#define V_AMBEN AMBEN_B0_B1
+#endif
+#ifdef CONFIG_C_AMBEN_B0_B1_B2
+#define V_AMBEN AMBEN_B0_B1_B2
+#endif
+#ifdef CONFIG_C_AMCKEN
+#define V_AMCKEN AMCKEN
+#else
+#define V_AMCKEN 0x0
+#endif
+#ifdef CONFIG_C_CDPRIO
+#define V_CDPRIO 0x100
+#else
+#define V_CDPRIO 0x0
+#endif
+
+#define AMGCTLVAL	(V_AMBEN | V_AMCKEN | V_CDPRIO)
+
+#ifdef CONFIG_BF518
+#define CPU "BF518"
+#define CPUID 0x27e8
+#endif
+#ifdef CONFIG_BF516
+#define CPU "BF516"
+#define CPUID 0x27e8
+#endif
+#ifdef CONFIG_BF514
+#define CPU "BF514"
+#define CPUID 0x27e8
+#endif
+#ifdef CONFIG_BF512
+#define CPU "BF512"
+#define CPUID 0x27e8
+#endif
+
+#ifndef CPU
+#error "Unknown CPU type - This kernel doesn't seem to be configured properly"
+#endif
+
+#endif				/* __MACH_BF518_H__  */
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
new file mode 100644
index 0000000..b50a63b
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
@@ -0,0 +1,169 @@
+/*
+ * file:        include/asm-blackfin/mach-bf518/bfin_serial_5xx.h
+ * based on:
+ * author:
+ *
+ * created:
+ * description:
+ *	blackfin serial driver head file
+ * rev:
+ *
+ * modified:
+ *
+ *
+ * bugs:         enter bugs at http://blackfin.uclinux.org/
+ *
+ * this program is free software; you can redistribute it and/or modify
+ * it under the terms of the gnu general public license as published by
+ * the free software foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * this program is distributed in the hope that it will be useful,
+ * but without any warranty; without even the implied warranty of
+ * merchantability or fitness for a particular purpose.  see the
+ * gnu general public license for more details.
+ *
+ * you should have received a copy of the gnu general public license
+ * along with this program; see the file copying.
+ * if not, write to the free software foundation,
+ * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ */
+
+#include <linux/serial.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
+#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
+#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
+#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
+#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
+#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
+#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
+
+#define UART_PUT_CHAR(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_THR), v)
+#define UART_PUT_DLL(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
+#define UART_PUT_IER(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_IER), v)
+#define UART_SET_IER(uart, v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
+#define UART_CLEAR_IER(uart, v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
+#define UART_PUT_DLH(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
+#define UART_PUT_LCR(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
+#define UART_PUT_GCTL(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
+
+#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
+#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
+
+#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
+#define UART_SET_RTS(x) gpio_set_value(x->rts_pin, 1)
+#define UART_CLEAR_RTS(x) gpio_set_value(x->rts_pin, 0)
+#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
+#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
+
+#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
+# define CONFIG_SERIAL_BFIN_CTSRTS
+
+# ifndef CONFIG_UART0_CTS_PIN
+#  define CONFIG_UART0_CTS_PIN -1
+# endif
+
+# ifndef CONFIG_UART0_RTS_PIN
+#  define CONFIG_UART0_RTS_PIN -1
+# endif
+
+# ifndef CONFIG_UART1_CTS_PIN
+#  define CONFIG_UART1_CTS_PIN -1
+# endif
+
+# ifndef CONFIG_UART1_RTS_PIN
+#  define CONFIG_UART1_RTS_PIN -1
+# endif
+#endif
+
+#define BFIN_UART_TX_FIFO_SIZE	2
+
+/*
+ * The pin configuration is different from schematic
+ */
+struct bfin_serial_port {
+	struct uart_port port;
+	unsigned int old_status;
+	unsigned int lsr;
+#ifdef CONFIG_SERIAL_BFIN_DMA
+	int tx_done;
+	int tx_count;
+	struct circ_buf rx_dma_buf;
+	struct timer_list rx_dma_timer;
+	int rx_dma_nrows;
+	unsigned int tx_dma_channel;
+	unsigned int rx_dma_channel;
+	struct work_struct tx_dma_workqueue;
+#endif
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+	struct timer_list cts_timer;
+	int cts_pin;
+	int rts_pin;
+#endif
+};
+
+/* The hardware clears the LSR bits upon read, so we need to cache
+ * some of the more fun bits in software so they don't get lost
+ * when checking the LSR in other code paths (TX).
+ */
+static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
+{
+	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
+	uart->lsr |= (lsr & (BI|FE|PE|OE));
+	return lsr | uart->lsr;
+}
+
+static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
+{
+	uart->lsr = 0;
+	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
+}
+
+struct bfin_serial_res {
+	unsigned long uart_base_addr;
+	int uart_irq;
+#ifdef CONFIG_SERIAL_BFIN_DMA
+	unsigned int uart_tx_dma_channel;
+	unsigned int uart_rx_dma_channel;
+#endif
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+	int uart_cts_pin;
+	int uart_rts_pin;
+#endif
+};
+
+struct bfin_serial_res bfin_serial_resource[] = {
+#ifdef CONFIG_SERIAL_BFIN_UART0
+	{
+	 0xFFC00400,
+	 IRQ_UART0_RX,
+#ifdef CONFIG_SERIAL_BFIN_DMA
+	 CH_UART0_TX,
+	 CH_UART0_RX,
+#endif
+#ifdef CONFIG_BFIN_UART0_CTSRTS
+	 CONFIG_UART0_CTS_PIN,
+	 CONFIG_UART0_RTS_PIN,
+#endif
+	 },
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART1
+	{
+	 0xFFC02000,
+	 IRQ_UART1_RX,
+#ifdef CONFIG_SERIAL_BFIN_DMA
+	 CH_UART1_TX,
+	 CH_UART1_RX,
+#endif
+#ifdef CONFIG_BFIN_UART1_CTSRTS
+	 CONFIG_UART1_CTS_PIN,
+	 CONFIG_UART1_RTS_PIN,
+#endif
+	 },
+#endif
+};
+
+#define DRIVER_NAME "bfin-uart"
diff --git a/arch/blackfin/mach-bf518/include/mach/blackfin.h b/arch/blackfin/mach-bf518/include/mach/blackfin.h
new file mode 100644
index 0000000..d1a2b9c
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/blackfin.h
@@ -0,0 +1,105 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/blackfin.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _MACH_BLACKFIN_H_
+#define _MACH_BLACKFIN_H_
+
+#define BF518_FAMILY
+
+#include "bf518.h"
+#include "mem_map.h"
+#include "defBF512.h"
+#include "anomaly.h"
+
+#if defined(CONFIG_BF518)
+#include "defBF518.h"
+#endif
+
+#if defined(CONFIG_BF516)
+#include "defBF516.h"
+#endif
+
+#if defined(CONFIG_BF514)
+#include "defBF514.h"
+#endif
+
+#if defined(CONFIG_BF512)
+#include "defBF512.h"
+#endif
+
+#if !defined(__ASSEMBLY__)
+#include "cdefBF512.h"
+
+#if defined(CONFIG_BF518)
+#include "cdefBF518.h"
+#endif
+
+#if defined(CONFIG_BF516)
+#include "cdefBF516.h"
+#endif
+
+#if defined(CONFIG_BF514)
+#include "cdefBF514.h"
+#endif
+#endif
+
+/* UART_IIR Register */
+#define STATUS(x)	((x << 1) & 0x06)
+#define STATUS_P1	0x02
+#define STATUS_P0	0x01
+
+#define BFIN_UART_NR_PORTS	2
+
+#define OFFSET_THR              0x00	/* Transmit Holding register            */
+#define OFFSET_RBR              0x00	/* Receive Buffer register              */
+#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
+#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
+#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
+#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
+#define OFFSET_LCR              0x0C	/* Line Control Register                */
+#define OFFSET_MCR              0x10	/* Modem Control Register               */
+#define OFFSET_LSR              0x14	/* Line Status Register                 */
+#define OFFSET_MSR              0x18	/* Modem Status Register                */
+#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
+#define OFFSET_GCTL             0x24	/* Global Control Register              */
+
+/* DPMC*/
+#define bfin_read_STOPCK_OFF() bfin_read_STOPCK()
+#define bfin_write_STOPCK_OFF(val) bfin_write_STOPCK(val)
+#define STOPCK_OFF STOPCK
+
+/* PLL_DIV Masks													*/
+#define CCLK_DIV1 CSEL_DIV1	/*          CCLK = VCO / 1                                  */
+#define CCLK_DIV2 CSEL_DIV2	/*          CCLK = VCO / 2                                  */
+#define CCLK_DIV4 CSEL_DIV4	/*          CCLK = VCO / 4                                  */
+#define CCLK_DIV8 CSEL_DIV8	/*          CCLK = VCO / 8                                  */
+
+#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
new file mode 100644
index 0000000..820c13c4
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
@@ -0,0 +1,46 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/cdefbf512.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:  system mmr register map
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _CDEF_BF512_H
+#define _CDEF_BF512_H
+
+/* include all Core registers and bit definitions */
+#include "defBF512.h"
+
+/* include core specific register pointer definitions */
+#include <asm/cdef_LPBlackfin.h>
+
+/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF512 */
+
+/* include cdefBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
+#include "cdefBF51x_base.h"
+
+#endif /* _CDEF_BF512_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
new file mode 100644
index 0000000..9521e17
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
@@ -0,0 +1,48 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/cdefbf514.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:  system mmr register map
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _CDEF_BF514_H
+#define _CDEF_BF514_H
+
+/* include all Core registers and bit definitions */
+#include "defBF514.h"
+
+/* include core specific register pointer definitions */
+#include <asm/cdef_LPBlackfin.h>
+
+/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF514 */
+
+/* include cdefBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
+#include "cdefBF51x_base.h"
+
+/* The following are the #defines needed by ADSP-BF514 that are not in the common header */
+
+#endif /* _CDEF_BF514_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
new file mode 100644
index 0000000..4e26ccf
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
@@ -0,0 +1,213 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/cdefbf516.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:  system mmr register map
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _CDEF_BF516_H
+#define _CDEF_BF516_H
+
+/* include all Core registers and bit definitions */
+#include "defBF516.h"
+
+/* include core specific register pointer definitions */
+#include <asm/cdef_LPBlackfin.h>
+
+/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF516 */
+
+/* include cdefBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
+#include "cdefBF51x_base.h"
+
+/* The following are the #defines needed by ADSP-BF516 that are not in the common header */
+
+/* 10/100 Ethernet Controller	(0xFFC03000 - 0xFFC031FF) */
+
+#define bfin_read_EMAC_OPMODE()			bfin_read32(EMAC_OPMODE)
+#define bfin_write_EMAC_OPMODE(val)		bfin_write32(EMAC_OPMODE, val)
+#define bfin_read_EMAC_ADDRLO()			bfin_read32(EMAC_ADDRLO)
+#define bfin_write_EMAC_ADDRLO(val)		bfin_write32(EMAC_ADDRLO, val)
+#define bfin_read_EMAC_ADDRHI()			bfin_read32(EMAC_ADDRHI)
+#define bfin_write_EMAC_ADDRHI(val)		bfin_write32(EMAC_ADDRHI, val)
+#define bfin_read_EMAC_HASHLO()			bfin_read32(EMAC_HASHLO)
+#define bfin_write_EMAC_HASHLO(val)		bfin_write32(EMAC_HASHLO, val)
+#define bfin_read_EMAC_HASHHI()			bfin_read32(EMAC_HASHHI)
+#define bfin_write_EMAC_HASHHI(val)		bfin_write32(EMAC_HASHHI, val)
+#define bfin_read_EMAC_STAADD()			bfin_read32(EMAC_STAADD)
+#define bfin_write_EMAC_STAADD(val)		bfin_write32(EMAC_STAADD, val)
+#define bfin_read_EMAC_STADAT()			bfin_read32(EMAC_STADAT)
+#define bfin_write_EMAC_STADAT(val)		bfin_write32(EMAC_STADAT, val)
+#define bfin_read_EMAC_FLC()			bfin_read32(EMAC_FLC)
+#define bfin_write_EMAC_FLC(val)		bfin_write32(EMAC_FLC, val)
+#define bfin_read_EMAC_VLAN1()			bfin_read32(EMAC_VLAN1)
+#define bfin_write_EMAC_VLAN1(val)		bfin_write32(EMAC_VLAN1, val)
+#define bfin_read_EMAC_VLAN2()			bfin_read32(EMAC_VLAN2)
+#define bfin_write_EMAC_VLAN2(val)		bfin_write32(EMAC_VLAN2, val)
+#define bfin_read_EMAC_WKUP_CTL()		bfin_read32(EMAC_WKUP_CTL)
+#define bfin_write_EMAC_WKUP_CTL(val)		bfin_write32(EMAC_WKUP_CTL, val)
+#define bfin_read_EMAC_WKUP_FFMSK0()		bfin_read32(EMAC_WKUP_FFMSK0)
+#define bfin_write_EMAC_WKUP_FFMSK0(val)	bfin_write32(EMAC_WKUP_FFMSK0, val)
+#define bfin_read_EMAC_WKUP_FFMSK1()		bfin_read32(EMAC_WKUP_FFMSK1)
+#define bfin_write_EMAC_WKUP_FFMSK1(val)	bfin_write32(EMAC_WKUP_FFMSK1, val)
+#define bfin_read_EMAC_WKUP_FFMSK2()		bfin_read32(EMAC_WKUP_FFMSK2)
+#define bfin_write_EMAC_WKUP_FFMSK2(val)	bfin_write32(EMAC_WKUP_FFMSK2, val)
+#define bfin_read_EMAC_WKUP_FFMSK3()		bfin_read32(EMAC_WKUP_FFMSK3)
+#define bfin_write_EMAC_WKUP_FFMSK3(val)	bfin_write32(EMAC_WKUP_FFMSK3, val)
+#define bfin_read_EMAC_WKUP_FFCMD()		bfin_read32(EMAC_WKUP_FFCMD)
+#define bfin_write_EMAC_WKUP_FFCMD(val)		bfin_write32(EMAC_WKUP_FFCMD, val)
+#define bfin_read_EMAC_WKUP_FFOFF()		bfin_read32(EMAC_WKUP_FFOFF)
+#define bfin_write_EMAC_WKUP_FFOFF(val)		bfin_write32(EMAC_WKUP_FFOFF, val)
+#define bfin_read_EMAC_WKUP_FFCRC0()		bfin_read32(EMAC_WKUP_FFCRC0)
+#define bfin_write_EMAC_WKUP_FFCRC0(val)	bfin_write32(EMAC_WKUP_FFCRC0, val)
+#define bfin_read_EMAC_WKUP_FFCRC1()		bfin_read32(EMAC_WKUP_FFCRC1)
+#define bfin_write_EMAC_WKUP_FFCRC1(val)	bfin_write32(EMAC_WKUP_FFCRC1, val)
+
+#define bfin_read_EMAC_SYSCTL()			bfin_read32(EMAC_SYSCTL)
+#define bfin_write_EMAC_SYSCTL(val)		bfin_write32(EMAC_SYSCTL, val)
+#define bfin_read_EMAC_SYSTAT()			bfin_read32(EMAC_SYSTAT)
+#define bfin_write_EMAC_SYSTAT(val)		bfin_write32(EMAC_SYSTAT, val)
+#define bfin_read_EMAC_RX_STAT()		bfin_read32(EMAC_RX_STAT)
+#define bfin_write_EMAC_RX_STAT(val)		bfin_write32(EMAC_RX_STAT, val)
+#define bfin_read_EMAC_RX_STKY()		bfin_read32(EMAC_RX_STKY)
+#define bfin_write_EMAC_RX_STKY(val)		bfin_write32(EMAC_RX_STKY, val)
+#define bfin_read_EMAC_RX_IRQE()		bfin_read32(EMAC_RX_IRQE)
+#define bfin_write_EMAC_RX_IRQE(val)		bfin_write32(EMAC_RX_IRQE, val)
+#define bfin_read_EMAC_TX_STAT()		bfin_read32(EMAC_TX_STAT)
+#define bfin_write_EMAC_TX_STAT(val)		bfin_write32(EMAC_TX_STAT, val)
+#define bfin_read_EMAC_TX_STKY()		bfin_read32(EMAC_TX_STKY)
+#define bfin_write_EMAC_TX_STKY(val)		bfin_write32(EMAC_TX_STKY, val)
+#define bfin_read_EMAC_TX_IRQE()		bfin_read32(EMAC_TX_IRQE)
+#define bfin_write_EMAC_TX_IRQE(val)		bfin_write32(EMAC_TX_IRQE, val)
+
+#define bfin_read_EMAC_MMC_CTL()		bfin_read32(EMAC_MMC_CTL)
+#define bfin_write_EMAC_MMC_CTL(val)		bfin_write32(EMAC_MMC_CTL, val)
+#define bfin_read_EMAC_MMC_RIRQS()		bfin_read32(EMAC_MMC_RIRQS)
+#define bfin_write_EMAC_MMC_RIRQS(val)		bfin_write32(EMAC_MMC_RIRQS, val)
+#define bfin_read_EMAC_MMC_RIRQE()		bfin_read32(EMAC_MMC_RIRQE)
+#define bfin_write_EMAC_MMC_RIRQE(val)		bfin_write32(EMAC_MMC_RIRQE, val)
+#define bfin_read_EMAC_MMC_TIRQS()		bfin_read32(EMAC_MMC_TIRQS)
+#define bfin_write_EMAC_MMC_TIRQS(val)		bfin_write32(EMAC_MMC_TIRQS, val)
+#define bfin_read_EMAC_MMC_TIRQE()		bfin_read32(EMAC_MMC_TIRQE)
+#define bfin_write_EMAC_MMC_TIRQE(val)		bfin_write32(EMAC_MMC_TIRQE, val)
+
+#define bfin_read_EMAC_RXC_OK()			bfin_read32(EMAC_RXC_OK)
+#define bfin_write_EMAC_RXC_OK(val)		bfin_write32(EMAC_RXC_OK, val)
+#define bfin_read_EMAC_RXC_FCS()		bfin_read32(EMAC_RXC_FCS)
+#define bfin_write_EMAC_RXC_FCS(val)		bfin_write32(EMAC_RXC_FCS, val)
+#define bfin_read_EMAC_RXC_ALIGN()		bfin_read32(EMAC_RXC_ALIGN)
+#define bfin_write_EMAC_RXC_ALIGN(val)		bfin_write32(EMAC_RXC_ALIGN, val)
+#define bfin_read_EMAC_RXC_OCTET()		bfin_read32(EMAC_RXC_OCTET)
+#define bfin_write_EMAC_RXC_OCTET(val)		bfin_write32(EMAC_RXC_OCTET, val)
+#define bfin_read_EMAC_RXC_DMAOVF()		bfin_read32(EMAC_RXC_DMAOVF)
+#define bfin_write_EMAC_RXC_DMAOVF(val)		bfin_write32(EMAC_RXC_DMAOVF, val)
+#define bfin_read_EMAC_RXC_UNICST()		bfin_read32(EMAC_RXC_UNICST)
+#define bfin_write_EMAC_RXC_UNICST(val)		bfin_write32(EMAC_RXC_UNICST, val)
+#define bfin_read_EMAC_RXC_MULTI()		bfin_read32(EMAC_RXC_MULTI)
+#define bfin_write_EMAC_RXC_MULTI(val)		bfin_write32(EMAC_RXC_MULTI, val)
+#define bfin_read_EMAC_RXC_BROAD()		bfin_read32(EMAC_RXC_BROAD)
+#define bfin_write_EMAC_RXC_BROAD(val)		bfin_write32(EMAC_RXC_BROAD, val)
+#define bfin_read_EMAC_RXC_LNERRI()		bfin_read32(EMAC_RXC_LNERRI)
+#define bfin_write_EMAC_RXC_LNERRI(val)		bfin_write32(EMAC_RXC_LNERRI, val)
+#define bfin_read_EMAC_RXC_LNERRO()		bfin_read32(EMAC_RXC_LNERRO)
+#define bfin_write_EMAC_RXC_LNERRO(val)		bfin_write32(EMAC_RXC_LNERRO, val)
+#define bfin_read_EMAC_RXC_LONG()		bfin_read32(EMAC_RXC_LONG)
+#define bfin_write_EMAC_RXC_LONG(val)		bfin_write32(EMAC_RXC_LONG, val)
+#define bfin_read_EMAC_RXC_MACCTL()		bfin_read32(EMAC_RXC_MACCTL)
+#define bfin_write_EMAC_RXC_MACCTL(val)		bfin_write32(EMAC_RXC_MACCTL, val)
+#define bfin_read_EMAC_RXC_OPCODE()		bfin_read32(EMAC_RXC_OPCODE)
+#define bfin_write_EMAC_RXC_OPCODE(val)		bfin_write32(EMAC_RXC_OPCODE, val)
+#define bfin_read_EMAC_RXC_PAUSE()		bfin_read32(EMAC_RXC_PAUSE)
+#define bfin_write_EMAC_RXC_PAUSE(val)		bfin_write32(EMAC_RXC_PAUSE, val)
+#define bfin_read_EMAC_RXC_ALLFRM()		bfin_read32(EMAC_RXC_ALLFRM)
+#define bfin_write_EMAC_RXC_ALLFRM(val)		bfin_write32(EMAC_RXC_ALLFRM, val)
+#define bfin_read_EMAC_RXC_ALLOCT()		bfin_read32(EMAC_RXC_ALLOCT)
+#define bfin_write_EMAC_RXC_ALLOCT(val)		bfin_write32(EMAC_RXC_ALLOCT, val)
+#define bfin_read_EMAC_RXC_TYPED()		bfin_read32(EMAC_RXC_TYPED)
+#define bfin_write_EMAC_RXC_TYPED(val)		bfin_write32(EMAC_RXC_TYPED, val)
+#define bfin_read_EMAC_RXC_SHORT()		bfin_read32(EMAC_RXC_SHORT)
+#define bfin_write_EMAC_RXC_SHORT(val)		bfin_write32(EMAC_RXC_SHORT, val)
+#define bfin_read_EMAC_RXC_EQ64()		bfin_read32(EMAC_RXC_EQ64)
+#define bfin_write_EMAC_RXC_EQ64(val)		bfin_write32(EMAC_RXC_EQ64, val)
+#define bfin_read_EMAC_RXC_LT128()		bfin_read32(EMAC_RXC_LT128)
+#define bfin_write_EMAC_RXC_LT128(val)		bfin_write32(EMAC_RXC_LT128, val)
+#define bfin_read_EMAC_RXC_LT256()		bfin_read32(EMAC_RXC_LT256)
+#define bfin_write_EMAC_RXC_LT256(val)		bfin_write32(EMAC_RXC_LT256, val)
+#define bfin_read_EMAC_RXC_LT512()		bfin_read32(EMAC_RXC_LT512)
+#define bfin_write_EMAC_RXC_LT512(val)		bfin_write32(EMAC_RXC_LT512, val)
+#define bfin_read_EMAC_RXC_LT1024()		bfin_read32(EMAC_RXC_LT1024)
+#define bfin_write_EMAC_RXC_LT1024(val)		bfin_write32(EMAC_RXC_LT1024, val)
+#define bfin_read_EMAC_RXC_GE1024()		bfin_read32(EMAC_RXC_GE1024)
+#define bfin_write_EMAC_RXC_GE1024(val)		bfin_write32(EMAC_RXC_GE1024, val)
+
+#define bfin_read_EMAC_TXC_OK()			bfin_read32(EMAC_TXC_OK)
+#define bfin_write_EMAC_TXC_OK(val)		bfin_write32(EMAC_TXC_OK, val)
+#define bfin_read_EMAC_TXC_1COL()		bfin_read32(EMAC_TXC_1COL)
+#define bfin_write_EMAC_TXC_1COL(val)		bfin_write32(EMAC_TXC_1COL, val)
+#define bfin_read_EMAC_TXC_GT1COL()		bfin_read32(EMAC_TXC_GT1COL)
+#define bfin_write_EMAC_TXC_GT1COL(val)		bfin_write32(EMAC_TXC_GT1COL, val)
+#define bfin_read_EMAC_TXC_OCTET()		bfin_read32(EMAC_TXC_OCTET)
+#define bfin_write_EMAC_TXC_OCTET(val)		bfin_write32(EMAC_TXC_OCTET, val)
+#define bfin_read_EMAC_TXC_DEFER()		bfin_read32(EMAC_TXC_DEFER)
+#define bfin_write_EMAC_TXC_DEFER(val)		bfin_write32(EMAC_TXC_DEFER, val)
+#define bfin_read_EMAC_TXC_LATECL()		bfin_read32(EMAC_TXC_LATECL)
+#define bfin_write_EMAC_TXC_LATECL(val)		bfin_write32(EMAC_TXC_LATECL, val)
+#define bfin_read_EMAC_TXC_XS_COL()		bfin_read32(EMAC_TXC_XS_COL)
+#define bfin_write_EMAC_TXC_XS_COL(val)		bfin_write32(EMAC_TXC_XS_COL, val)
+#define bfin_read_EMAC_TXC_DMAUND()		bfin_read32(EMAC_TXC_DMAUND)
+#define bfin_write_EMAC_TXC_DMAUND(val)		bfin_write32(EMAC_TXC_DMAUND, val)
+#define bfin_read_EMAC_TXC_CRSERR()		bfin_read32(EMAC_TXC_CRSERR)
+#define bfin_write_EMAC_TXC_CRSERR(val)		bfin_write32(EMAC_TXC_CRSERR, val)
+#define bfin_read_EMAC_TXC_UNICST()		bfin_read32(EMAC_TXC_UNICST)
+#define bfin_write_EMAC_TXC_UNICST(val)		bfin_write32(EMAC_TXC_UNICST, val)
+#define bfin_read_EMAC_TXC_MULTI()		bfin_read32(EMAC_TXC_MULTI)
+#define bfin_write_EMAC_TXC_MULTI(val)		bfin_write32(EMAC_TXC_MULTI, val)
+#define bfin_read_EMAC_TXC_BROAD()		bfin_read32(EMAC_TXC_BROAD)
+#define bfin_write_EMAC_TXC_BROAD(val)		bfin_write32(EMAC_TXC_BROAD, val)
+#define bfin_read_EMAC_TXC_XS_DFR()		bfin_read32(EMAC_TXC_XS_DFR)
+#define bfin_write_EMAC_TXC_XS_DFR(val)		bfin_write32(EMAC_TXC_XS_DFR, val)
+#define bfin_read_EMAC_TXC_MACCTL()		bfin_read32(EMAC_TXC_MACCTL)
+#define bfin_write_EMAC_TXC_MACCTL(val)		bfin_write32(EMAC_TXC_MACCTL, val)
+#define bfin_read_EMAC_TXC_ALLFRM()		bfin_read32(EMAC_TXC_ALLFRM)
+#define bfin_write_EMAC_TXC_ALLFRM(val)		bfin_write32(EMAC_TXC_ALLFRM, val)
+#define bfin_read_EMAC_TXC_ALLOCT()		bfin_read32(EMAC_TXC_ALLOCT)
+#define bfin_write_EMAC_TXC_ALLOCT(val)		bfin_write32(EMAC_TXC_ALLOCT, val)
+#define bfin_read_EMAC_TXC_EQ64()		bfin_read32(EMAC_TXC_EQ64)
+#define bfin_write_EMAC_TXC_EQ64(val)		bfin_write32(EMAC_TXC_EQ64, val)
+#define bfin_read_EMAC_TXC_LT128()		bfin_read32(EMAC_TXC_LT128)
+#define bfin_write_EMAC_TXC_LT128(val)		bfin_write32(EMAC_TXC_LT128, val)
+#define bfin_read_EMAC_TXC_LT256()		bfin_read32(EMAC_TXC_LT256)
+#define bfin_write_EMAC_TXC_LT256(val)		bfin_write32(EMAC_TXC_LT256, val)
+#define bfin_read_EMAC_TXC_LT512()		bfin_read32(EMAC_TXC_LT512)
+#define bfin_write_EMAC_TXC_LT512(val)		bfin_write32(EMAC_TXC_LT512, val)
+#define bfin_read_EMAC_TXC_LT1024()		bfin_read32(EMAC_TXC_LT1024)
+#define bfin_write_EMAC_TXC_LT1024(val)		bfin_write32(EMAC_TXC_LT1024, val)
+#define bfin_read_EMAC_TXC_GE1024()		bfin_read32(EMAC_TXC_GE1024)
+#define bfin_write_EMAC_TXC_GE1024(val)		bfin_write32(EMAC_TXC_GE1024, val)
+#define bfin_read_EMAC_TXC_ABORT()		bfin_read32(EMAC_TXC_ABORT)
+#define bfin_write_EMAC_TXC_ABORT(val)		bfin_write32(EMAC_TXC_ABORT, val)
+
+#endif /* _CDEF_BF516_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
new file mode 100644
index 0000000..bafb370
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
@@ -0,0 +1,282 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/cdefbf518.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:  system mmr register map
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _CDEF_BF518_H
+#define _CDEF_BF518_H
+
+/* include all Core registers and bit definitions */
+#include "defBF518.h"
+
+/* include core specific register pointer definitions */
+#include <asm/cdef_LPBlackfin.h>
+
+/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF518 */
+
+/* include cdefBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
+#include "cdefBF51x_base.h"
+
+/* The following are the #defines needed by ADSP-BF518 that are not in the common header */
+
+
+/* 10/100 Ethernet Controller	(0xFFC03000 - 0xFFC031FF) */
+
+#define bfin_read_EMAC_OPMODE()			bfin_read32(EMAC_OPMODE)
+#define bfin_write_EMAC_OPMODE(val)		bfin_write32(EMAC_OPMODE, val)
+#define bfin_read_EMAC_ADDRLO()			bfin_read32(EMAC_ADDRLO)
+#define bfin_write_EMAC_ADDRLO(val)		bfin_write32(EMAC_ADDRLO, val)
+#define bfin_read_EMAC_ADDRHI()			bfin_read32(EMAC_ADDRHI)
+#define bfin_write_EMAC_ADDRHI(val)		bfin_write32(EMAC_ADDRHI, val)
+#define bfin_read_EMAC_HASHLO()			bfin_read32(EMAC_HASHLO)
+#define bfin_write_EMAC_HASHLO(val)		bfin_write32(EMAC_HASHLO, val)
+#define bfin_read_EMAC_HASHHI()			bfin_read32(EMAC_HASHHI)
+#define bfin_write_EMAC_HASHHI(val)		bfin_write32(EMAC_HASHHI, val)
+#define bfin_read_EMAC_STAADD()			bfin_read32(EMAC_STAADD)
+#define bfin_write_EMAC_STAADD(val)		bfin_write32(EMAC_STAADD, val)
+#define bfin_read_EMAC_STADAT()			bfin_read32(EMAC_STADAT)
+#define bfin_write_EMAC_STADAT(val)		bfin_write32(EMAC_STADAT, val)
+#define bfin_read_EMAC_FLC()			bfin_read32(EMAC_FLC)
+#define bfin_write_EMAC_FLC(val)		bfin_write32(EMAC_FLC, val)
+#define bfin_read_EMAC_VLAN1()			bfin_read32(EMAC_VLAN1)
+#define bfin_write_EMAC_VLAN1(val)		bfin_write32(EMAC_VLAN1, val)
+#define bfin_read_EMAC_VLAN2()			bfin_read32(EMAC_VLAN2)
+#define bfin_write_EMAC_VLAN2(val)		bfin_write32(EMAC_VLAN2, val)
+#define bfin_read_EMAC_WKUP_CTL()		bfin_read32(EMAC_WKUP_CTL)
+#define bfin_write_EMAC_WKUP_CTL(val)		bfin_write32(EMAC_WKUP_CTL, val)
+#define bfin_read_EMAC_WKUP_FFMSK0()		bfin_read32(EMAC_WKUP_FFMSK0)
+#define bfin_write_EMAC_WKUP_FFMSK0(val)	bfin_write32(EMAC_WKUP_FFMSK0, val)
+#define bfin_read_EMAC_WKUP_FFMSK1()		bfin_read32(EMAC_WKUP_FFMSK1)
+#define bfin_write_EMAC_WKUP_FFMSK1(val)	bfin_write32(EMAC_WKUP_FFMSK1, val)
+#define bfin_read_EMAC_WKUP_FFMSK2()		bfin_read32(EMAC_WKUP_FFMSK2)
+#define bfin_write_EMAC_WKUP_FFMSK2(val)	bfin_write32(EMAC_WKUP_FFMSK2, val)
+#define bfin_read_EMAC_WKUP_FFMSK3()		bfin_read32(EMAC_WKUP_FFMSK3)
+#define bfin_write_EMAC_WKUP_FFMSK3(val)	bfin_write32(EMAC_WKUP_FFMSK3, val)
+#define bfin_read_EMAC_WKUP_FFCMD()		bfin_read32(EMAC_WKUP_FFCMD)
+#define bfin_write_EMAC_WKUP_FFCMD(val)		bfin_write32(EMAC_WKUP_FFCMD, val)
+#define bfin_read_EMAC_WKUP_FFOFF()		bfin_read32(EMAC_WKUP_FFOFF)
+#define bfin_write_EMAC_WKUP_FFOFF(val)		bfin_write32(EMAC_WKUP_FFOFF, val)
+#define bfin_read_EMAC_WKUP_FFCRC0()		bfin_read32(EMAC_WKUP_FFCRC0)
+#define bfin_write_EMAC_WKUP_FFCRC0(val)	bfin_write32(EMAC_WKUP_FFCRC0, val)
+#define bfin_read_EMAC_WKUP_FFCRC1()		bfin_read32(EMAC_WKUP_FFCRC1)
+#define bfin_write_EMAC_WKUP_FFCRC1(val)	bfin_write32(EMAC_WKUP_FFCRC1, val)
+
+#define bfin_read_EMAC_SYSCTL()			bfin_read32(EMAC_SYSCTL)
+#define bfin_write_EMAC_SYSCTL(val)		bfin_write32(EMAC_SYSCTL, val)
+#define bfin_read_EMAC_SYSTAT()			bfin_read32(EMAC_SYSTAT)
+#define bfin_write_EMAC_SYSTAT(val)		bfin_write32(EMAC_SYSTAT, val)
+#define bfin_read_EMAC_RX_STAT()		bfin_read32(EMAC_RX_STAT)
+#define bfin_write_EMAC_RX_STAT(val)		bfin_write32(EMAC_RX_STAT, val)
+#define bfin_read_EMAC_RX_STKY()		bfin_read32(EMAC_RX_STKY)
+#define bfin_write_EMAC_RX_STKY(val)		bfin_write32(EMAC_RX_STKY, val)
+#define bfin_read_EMAC_RX_IRQE()		bfin_read32(EMAC_RX_IRQE)
+#define bfin_write_EMAC_RX_IRQE(val)		bfin_write32(EMAC_RX_IRQE, val)
+#define bfin_read_EMAC_TX_STAT()		bfin_read32(EMAC_TX_STAT)
+#define bfin_write_EMAC_TX_STAT(val)		bfin_write32(EMAC_TX_STAT, val)
+#define bfin_read_EMAC_TX_STKY()		bfin_read32(EMAC_TX_STKY)
+#define bfin_write_EMAC_TX_STKY(val)		bfin_write32(EMAC_TX_STKY, val)
+#define bfin_read_EMAC_TX_IRQE()		bfin_read32(EMAC_TX_IRQE)
+#define bfin_write_EMAC_TX_IRQE(val)		bfin_write32(EMAC_TX_IRQE, val)
+
+#define bfin_read_EMAC_MMC_CTL()		bfin_read32(EMAC_MMC_CTL)
+#define bfin_write_EMAC_MMC_CTL(val)		bfin_write32(EMAC_MMC_CTL, val)
+#define bfin_read_EMAC_MMC_RIRQS()		bfin_read32(EMAC_MMC_RIRQS)
+#define bfin_write_EMAC_MMC_RIRQS(val)		bfin_write32(EMAC_MMC_RIRQS, val)
+#define bfin_read_EMAC_MMC_RIRQE()		bfin_read32(EMAC_MMC_RIRQE)
+#define bfin_write_EMAC_MMC_RIRQE(val)		bfin_write32(EMAC_MMC_RIRQE, val)
+#define bfin_read_EMAC_MMC_TIRQS()		bfin_read32(EMAC_MMC_TIRQS)
+#define bfin_write_EMAC_MMC_TIRQS(val)		bfin_write32(EMAC_MMC_TIRQS, val)
+#define bfin_read_EMAC_MMC_TIRQE()		bfin_read32(EMAC_MMC_TIRQE)
+#define bfin_write_EMAC_MMC_TIRQE(val)		bfin_write32(EMAC_MMC_TIRQE, val)
+
+#define bfin_read_EMAC_RXC_OK()			bfin_read32(EMAC_RXC_OK)
+#define bfin_write_EMAC_RXC_OK(val)		bfin_write32(EMAC_RXC_OK, val)
+#define bfin_read_EMAC_RXC_FCS()		bfin_read32(EMAC_RXC_FCS)
+#define bfin_write_EMAC_RXC_FCS(val)		bfin_write32(EMAC_RXC_FCS, val)
+#define bfin_read_EMAC_RXC_ALIGN()		bfin_read32(EMAC_RXC_ALIGN)
+#define bfin_write_EMAC_RXC_ALIGN(val)		bfin_write32(EMAC_RXC_ALIGN, val)
+#define bfin_read_EMAC_RXC_OCTET()		bfin_read32(EMAC_RXC_OCTET)
+#define bfin_write_EMAC_RXC_OCTET(val)		bfin_write32(EMAC_RXC_OCTET, val)
+#define bfin_read_EMAC_RXC_DMAOVF()		bfin_read32(EMAC_RXC_DMAOVF)
+#define bfin_write_EMAC_RXC_DMAOVF(val)		bfin_write32(EMAC_RXC_DMAOVF, val)
+#define bfin_read_EMAC_RXC_UNICST()		bfin_read32(EMAC_RXC_UNICST)
+#define bfin_write_EMAC_RXC_UNICST(val)		bfin_write32(EMAC_RXC_UNICST, val)
+#define bfin_read_EMAC_RXC_MULTI()		bfin_read32(EMAC_RXC_MULTI)
+#define bfin_write_EMAC_RXC_MULTI(val)		bfin_write32(EMAC_RXC_MULTI, val)
+#define bfin_read_EMAC_RXC_BROAD()		bfin_read32(EMAC_RXC_BROAD)
+#define bfin_write_EMAC_RXC_BROAD(val)		bfin_write32(EMAC_RXC_BROAD, val)
+#define bfin_read_EMAC_RXC_LNERRI()		bfin_read32(EMAC_RXC_LNERRI)
+#define bfin_write_EMAC_RXC_LNERRI(val)		bfin_write32(EMAC_RXC_LNERRI, val)
+#define bfin_read_EMAC_RXC_LNERRO()		bfin_read32(EMAC_RXC_LNERRO)
+#define bfin_write_EMAC_RXC_LNERRO(val)		bfin_write32(EMAC_RXC_LNERRO, val)
+#define bfin_read_EMAC_RXC_LONG()		bfin_read32(EMAC_RXC_LONG)
+#define bfin_write_EMAC_RXC_LONG(val)		bfin_write32(EMAC_RXC_LONG, val)
+#define bfin_read_EMAC_RXC_MACCTL()		bfin_read32(EMAC_RXC_MACCTL)
+#define bfin_write_EMAC_RXC_MACCTL(val)		bfin_write32(EMAC_RXC_MACCTL, val)
+#define bfin_read_EMAC_RXC_OPCODE()		bfin_read32(EMAC_RXC_OPCODE)
+#define bfin_write_EMAC_RXC_OPCODE(val)		bfin_write32(EMAC_RXC_OPCODE, val)
+#define bfin_read_EMAC_RXC_PAUSE()		bfin_read32(EMAC_RXC_PAUSE)
+#define bfin_write_EMAC_RXC_PAUSE(val)		bfin_write32(EMAC_RXC_PAUSE, val)
+#define bfin_read_EMAC_RXC_ALLFRM()		bfin_read32(EMAC_RXC_ALLFRM)
+#define bfin_write_EMAC_RXC_ALLFRM(val)		bfin_write32(EMAC_RXC_ALLFRM, val)
+#define bfin_read_EMAC_RXC_ALLOCT()		bfin_read32(EMAC_RXC_ALLOCT)
+#define bfin_write_EMAC_RXC_ALLOCT(val)		bfin_write32(EMAC_RXC_ALLOCT, val)
+#define bfin_read_EMAC_RXC_TYPED()		bfin_read32(EMAC_RXC_TYPED)
+#define bfin_write_EMAC_RXC_TYPED(val)		bfin_write32(EMAC_RXC_TYPED, val)
+#define bfin_read_EMAC_RXC_SHORT()		bfin_read32(EMAC_RXC_SHORT)
+#define bfin_write_EMAC_RXC_SHORT(val)		bfin_write32(EMAC_RXC_SHORT, val)
+#define bfin_read_EMAC_RXC_EQ64()		bfin_read32(EMAC_RXC_EQ64)
+#define bfin_write_EMAC_RXC_EQ64(val)		bfin_write32(EMAC_RXC_EQ64, val)
+#define bfin_read_EMAC_RXC_LT128()		bfin_read32(EMAC_RXC_LT128)
+#define bfin_write_EMAC_RXC_LT128(val)		bfin_write32(EMAC_RXC_LT128, val)
+#define bfin_read_EMAC_RXC_LT256()		bfin_read32(EMAC_RXC_LT256)
+#define bfin_write_EMAC_RXC_LT256(val)		bfin_write32(EMAC_RXC_LT256, val)
+#define bfin_read_EMAC_RXC_LT512()		bfin_read32(EMAC_RXC_LT512)
+#define bfin_write_EMAC_RXC_LT512(val)		bfin_write32(EMAC_RXC_LT512, val)
+#define bfin_read_EMAC_RXC_LT1024()		bfin_read32(EMAC_RXC_LT1024)
+#define bfin_write_EMAC_RXC_LT1024(val)		bfin_write32(EMAC_RXC_LT1024, val)
+#define bfin_read_EMAC_RXC_GE1024()		bfin_read32(EMAC_RXC_GE1024)
+#define bfin_write_EMAC_RXC_GE1024(val)		bfin_write32(EMAC_RXC_GE1024, val)
+
+#define bfin_read_EMAC_TXC_OK()			bfin_read32(EMAC_TXC_OK)
+#define bfin_write_EMAC_TXC_OK(val)		bfin_write32(EMAC_TXC_OK, val)
+#define bfin_read_EMAC_TXC_1COL()		bfin_read32(EMAC_TXC_1COL)
+#define bfin_write_EMAC_TXC_1COL(val)		bfin_write32(EMAC_TXC_1COL, val)
+#define bfin_read_EMAC_TXC_GT1COL()		bfin_read32(EMAC_TXC_GT1COL)
+#define bfin_write_EMAC_TXC_GT1COL(val)		bfin_write32(EMAC_TXC_GT1COL, val)
+#define bfin_read_EMAC_TXC_OCTET()		bfin_read32(EMAC_TXC_OCTET)
+#define bfin_write_EMAC_TXC_OCTET(val)		bfin_write32(EMAC_TXC_OCTET, val)
+#define bfin_read_EMAC_TXC_DEFER()		bfin_read32(EMAC_TXC_DEFER)
+#define bfin_write_EMAC_TXC_DEFER(val)		bfin_write32(EMAC_TXC_DEFER, val)
+#define bfin_read_EMAC_TXC_LATECL()		bfin_read32(EMAC_TXC_LATECL)
+#define bfin_write_EMAC_TXC_LATECL(val)		bfin_write32(EMAC_TXC_LATECL, val)
+#define bfin_read_EMAC_TXC_XS_COL()		bfin_read32(EMAC_TXC_XS_COL)
+#define bfin_write_EMAC_TXC_XS_COL(val)		bfin_write32(EMAC_TXC_XS_COL, val)
+#define bfin_read_EMAC_TXC_DMAUND()		bfin_read32(EMAC_TXC_DMAUND)
+#define bfin_write_EMAC_TXC_DMAUND(val)		bfin_write32(EMAC_TXC_DMAUND, val)
+#define bfin_read_EMAC_TXC_CRSERR()		bfin_read32(EMAC_TXC_CRSERR)
+#define bfin_write_EMAC_TXC_CRSERR(val)		bfin_write32(EMAC_TXC_CRSERR, val)
+#define bfin_read_EMAC_TXC_UNICST()		bfin_read32(EMAC_TXC_UNICST)
+#define bfin_write_EMAC_TXC_UNICST(val)		bfin_write32(EMAC_TXC_UNICST, val)
+#define bfin_read_EMAC_TXC_MULTI()		bfin_read32(EMAC_TXC_MULTI)
+#define bfin_write_EMAC_TXC_MULTI(val)		bfin_write32(EMAC_TXC_MULTI, val)
+#define bfin_read_EMAC_TXC_BROAD()		bfin_read32(EMAC_TXC_BROAD)
+#define bfin_write_EMAC_TXC_BROAD(val)		bfin_write32(EMAC_TXC_BROAD, val)
+#define bfin_read_EMAC_TXC_XS_DFR()		bfin_read32(EMAC_TXC_XS_DFR)
+#define bfin_write_EMAC_TXC_XS_DFR(val)		bfin_write32(EMAC_TXC_XS_DFR, val)
+#define bfin_read_EMAC_TXC_MACCTL()		bfin_read32(EMAC_TXC_MACCTL)
+#define bfin_write_EMAC_TXC_MACCTL(val)		bfin_write32(EMAC_TXC_MACCTL, val)
+#define bfin_read_EMAC_TXC_ALLFRM()		bfin_read32(EMAC_TXC_ALLFRM)
+#define bfin_write_EMAC_TXC_ALLFRM(val)		bfin_write32(EMAC_TXC_ALLFRM, val)
+#define bfin_read_EMAC_TXC_ALLOCT()		bfin_read32(EMAC_TXC_ALLOCT)
+#define bfin_write_EMAC_TXC_ALLOCT(val)		bfin_write32(EMAC_TXC_ALLOCT, val)
+#define bfin_read_EMAC_TXC_EQ64()		bfin_read32(EMAC_TXC_EQ64)
+#define bfin_write_EMAC_TXC_EQ64(val)		bfin_write32(EMAC_TXC_EQ64, val)
+#define bfin_read_EMAC_TXC_LT128()		bfin_read32(EMAC_TXC_LT128)
+#define bfin_write_EMAC_TXC_LT128(val)		bfin_write32(EMAC_TXC_LT128, val)
+#define bfin_read_EMAC_TXC_LT256()		bfin_read32(EMAC_TXC_LT256)
+#define bfin_write_EMAC_TXC_LT256(val)		bfin_write32(EMAC_TXC_LT256, val)
+#define bfin_read_EMAC_TXC_LT512()		bfin_read32(EMAC_TXC_LT512)
+#define bfin_write_EMAC_TXC_LT512(val)		bfin_write32(EMAC_TXC_LT512, val)
+#define bfin_read_EMAC_TXC_LT1024()		bfin_read32(EMAC_TXC_LT1024)
+#define bfin_write_EMAC_TXC_LT1024(val)		bfin_write32(EMAC_TXC_LT1024, val)
+#define bfin_read_EMAC_TXC_GE1024()		bfin_read32(EMAC_TXC_GE1024)
+#define bfin_write_EMAC_TXC_GE1024(val)		bfin_write32(EMAC_TXC_GE1024, val)
+#define bfin_read_EMAC_TXC_ABORT()		bfin_read32(EMAC_TXC_ABORT)
+#define bfin_write_EMAC_TXC_ABORT(val)		bfin_write32(EMAC_TXC_ABORT, val)
+
+/* Removable Storage Interface Registers */
+
+#define bfin_read_RSI_PWR_CTL()        bfin_read16(RSI_PWR_CONTROL)
+#define bfin_write_RSI_PWR_CTL(val)    bfin_write16(RSI_PWR_CONTROL, val)
+#define bfin_read_RSI_CLK_CTL()	       bfin_read16(RSI_CLK_CONTROL)
+#define bfin_write_RSI_CLK_CTL(val)    bfin_write16(RSI_CLK_CONTROL, val)
+#define bfin_read_RSI_ARGUMENT()       bfin_read32(RSI_ARGUMENT)
+#define bfin_write_RSI_ARGUMENT(val)   bfin_write32(RSI_ARGUMENT, val)
+#define bfin_read_RSI_COMMAND()        bfin_read16(RSI_COMMAND)
+#define bfin_write_RSI_COMMAND(val)    bfin_write16(RSI_COMMAND, val)
+#define bfin_read_RSI_RESP_CMD()       bfin_read16(RSI_RESP_CMD)
+#define bfin_write_RSI_RESP_CMD(val)   bfin_write16(RSI_RESP_CMD, val)
+#define bfin_read_RSI_RESPONSE0()      bfin_read32(RSI_RESPONSE0)
+#define bfin_write_RSI_RESPONSE0(val)  bfin_write32(RSI_RESPONSE0, val)
+#define bfin_read_RSI_RESPONSE1()      bfin_read32(RSI_RESPONSE1)
+#define bfin_write_RSI_RESPONSE1(val)  bfin_write32(RSI_RESPONSE1, val)
+#define bfin_read_RSI_RESPONSE2()      bfin_read32(RSI_RESPONSE2)
+#define bfin_write_RSI_RESPONSE2(val)  bfin_write32(RSI_RESPONSE2, val)
+#define bfin_read_RSI_RESPONSE3()      bfin_read32(RSI_RESPONSE3)
+#define bfin_write_RSI_RESPONSE3(val)  bfin_write32(RSI_RESPONSE3, val)
+#define bfin_read_RSI_DATA_TIMER()     bfin_read32(RSI_DATA_TIMER)
+#define bfin_write_RSI_DATA_TIMER(val) bfin_write32(RSI_DATA_TIMER, val)
+#define bfin_read_RSI_DATA_LGTH()      bfin_read16(RSI_DATA_LGTH)
+#define bfin_write_RSI_DATA_LGTH(val)  bfin_write16(RSI_DATA_LGTH, val)
+#define bfin_read_RSI_DATA_CTL()       bfin_read16(RSI_DATA_CONTROL)
+#define bfin_write_RSI_DATA_CTL(val)   bfin_write16(RSI_DATA_CONTROL, val)
+#define bfin_read_RSI_DATA_CNT()       bfin_read16(RSI_DATA_CNT)
+#define bfin_write_RSI_DATA_CNT(val)   bfin_write16(RSI_DATA_CNT, val)
+#define bfin_read_RSI_STATUS()         bfin_read32(RSI_STATUS)
+#define bfin_write_RSI_STATUS(val)     bfin_write32(RSI_STATUS, val)
+#define bfin_read_RSI_STATUS_CLR()     bfin_read16(RSI_STATUSCL)
+#define bfin_write_RSI_STATUS_CLR(val) bfin_write16(RSI_STATUSCL, val)
+#define bfin_read_RSI_MASK0()          bfin_read32(RSI_MASK0)
+#define bfin_write_RSI_MASK0(val)      bfin_write32(RSI_MASK0, val)
+#define bfin_read_RSI_MASK1()          bfin_read32(RSI_MASK1)
+#define bfin_write_RSI_MASK1(val)      bfin_write32(RSI_MASK1, val)
+#define bfin_read_RSI_FIFO_CNT()       bfin_read16(RSI_FIFO_CNT)
+#define bfin_write_RSI_FIFO_CNT(val)   bfin_write16(RSI_FIFO_CNT, val)
+#define bfin_read_RSI_CEATA_CTL()      bfin_read16(RSI_CEATA_CONTROL)
+#define bfin_write_RSI_CEATA_CTL(val)  bfin_write16(RSI_CEATA_CONTROL, val)
+#define bfin_read_RSI_FIFO()           bfin_read32(RSI_FIFO)
+#define bfin_write_RSI_FIFO(val)       bfin_write32(RSI_FIFO, val)
+#define bfin_read_RSI_E_STATUS()       bfin_read16(RSI_ESTAT)
+#define bfin_write_RSI_E_STATUS(val)   bfin_write16(RSI_ESTAT, val)
+#define bfin_read_RSI_E_MASK()         bfin_read16(RSI_EMASK)
+#define bfin_write_RSI_E_MASK(val)     bfin_write16(RSI_EMASK, val)
+#define bfin_read_RSI_CFG()            bfin_read16(RSI_CONFIG)
+#define bfin_write_RSI_CFG(val)        bfin_write16(RSI_CONFIG, val)
+#define bfin_read_RSI_RD_WAIT_EN()     bfin_read16(RSI_RD_WAIT_EN)
+#define bfin_write_RSI_RD_WAIT_EN(val) bfin_write16(RSI_RD_WAIT_EN, val)
+#define bfin_read_RSI_PID0()           bfin_read16(RSI_PID0)
+#define bfin_write_RSI_PID0(val)       bfin_write16(RSI_PID0, val)
+#define bfin_read_RSI_PID1()           bfin_read16(RSI_PID1)
+#define bfin_write_RSI_PID1(val)       bfin_write16(RSI_PID1, val)
+#define bfin_read_RSI_PID2()           bfin_read16(RSI_PID2)
+#define bfin_write_RSI_PID2(val)       bfin_write16(RSI_PID2, val)
+#define bfin_read_RSI_PID3()           bfin_read16(RSI_PID3)
+#define bfin_write_RSI_PID3(val)       bfin_write16(RSI_PID3, val)
+#define bfin_read_RSI_PID4()           bfin_read16(RSI_PID4)
+#define bfin_write_RSI_PID4(val)       bfin_write16(RSI_PID4, val)
+#define bfin_read_RSI_PID5()           bfin_read16(RSI_PID5)
+#define bfin_write_RSI_PID5(val)       bfin_write16(RSI_PID5, val)
+#define bfin_read_RSI_PID6()           bfin_read16(RSI_PID6)
+#define bfin_write_RSI_PID6(val)       bfin_write16(RSI_PID6, val)
+#define bfin_read_RSI_PID7()           bfin_read16(RSI_PID7)
+#define bfin_write_RSI_PID7(val)       bfin_write16(RSI_PID7, val)
+
+
+#endif /* _CDEF_BF518_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
new file mode 100644
index 0000000..ee3d473
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
@@ -0,0 +1,1208 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/cdefBF51x_base.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _CDEF_BF52X_H
+#define _CDEF_BF52X_H
+
+#include <asm/blackfin.h>
+
+#include "defBF51x_base.h"
+
+/* Include core specific register pointer definitions 								*/
+#include <asm/cdef_LPBlackfin.h>
+
+/* ==== begin from cdefBF534.h ==== */
+
+/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
+#define bfin_read_PLL_CTL()			bfin_read16(PLL_CTL)
+#define bfin_read_PLL_DIV()			bfin_read16(PLL_DIV)
+#define bfin_write_PLL_DIV(val)			bfin_write16(PLL_DIV, val)
+#define bfin_read_VR_CTL()			bfin_read16(VR_CTL)
+#define bfin_read_PLL_STAT()			bfin_read16(PLL_STAT)
+#define bfin_write_PLL_STAT(val)		bfin_write16(PLL_STAT, val)
+#define bfin_read_PLL_LOCKCNT()			bfin_read16(PLL_LOCKCNT)
+#define bfin_write_PLL_LOCKCNT(val)		bfin_write16(PLL_LOCKCNT, val)
+#define bfin_read_CHIPID()			bfin_read32(CHIPID)
+#define bfin_write_CHIPID(val)			bfin_write32(CHIPID, val)
+
+
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
+#define bfin_read_SWRST()			bfin_read16(SWRST)
+#define bfin_write_SWRST(val)			bfin_write16(SWRST, val)
+#define bfin_read_SYSCR()			bfin_read16(SYSCR)
+#define bfin_write_SYSCR(val)			bfin_write16(SYSCR, val)
+
+#define bfin_read_SIC_RVECT()			bfin_read32(SIC_RVECT)
+#define bfin_write_SIC_RVECT(val)		bfin_write32(SIC_RVECT, val)
+#define bfin_read_SIC_IMASK0()			bfin_read32(SIC_IMASK0)
+#define bfin_write_SIC_IMASK0(val)		bfin_write32(SIC_IMASK0, val)
+#define bfin_read_SIC_IMASK(x)			bfin_read32(SIC_IMASK0 + (x << 6))
+#define bfin_write_SIC_IMASK(x, val)		bfin_write32((SIC_IMASK0 + (x << 6)), val)
+
+#define bfin_read_SIC_IAR0()			bfin_read32(SIC_IAR0)
+#define bfin_write_SIC_IAR0(val)		bfin_write32(SIC_IAR0, val)
+#define bfin_read_SIC_IAR1()			bfin_read32(SIC_IAR1)
+#define bfin_write_SIC_IAR1(val)		bfin_write32(SIC_IAR1, val)
+#define bfin_read_SIC_IAR2()			bfin_read32(SIC_IAR2)
+#define bfin_write_SIC_IAR2(val)		bfin_write32(SIC_IAR2, val)
+#define bfin_read_SIC_IAR3()			bfin_read32(SIC_IAR3)
+#define bfin_write_SIC_IAR3(val)		bfin_write32(SIC_IAR3, val)
+
+#define bfin_read_SIC_ISR0()			bfin_read32(SIC_ISR0)
+#define bfin_write_SIC_ISR0(val)		bfin_write32(SIC_ISR0, val)
+#define bfin_read_SIC_ISR(x)			bfin_read32(SIC_ISR0 + (x << 6))
+#define bfin_write_SIC_ISR(x, val)		bfin_write32((SIC_ISR0 + (x << 6)), val)
+
+#define bfin_read_SIC_IWR0()			bfin_read32(SIC_IWR0)
+#define bfin_write_SIC_IWR0(val)		bfin_write32(SIC_IWR0, val)
+#define bfin_read_SIC_IWR(x)			bfin_read32(SIC_IWR0 + (x << 6))
+#define bfin_write_SIC_IWR(x, val)		bfin_write32((SIC_IWR0 + (x << 6)), val)
+
+/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
+
+#define bfin_read_SIC_IMASK1()			bfin_read32(SIC_IMASK1)
+#define bfin_write_SIC_IMASK1(val)		bfin_write32(SIC_IMASK1, val)
+#define bfin_read_SIC_IAR4()			bfin_read32(SIC_IAR4)
+#define bfin_write_SIC_IAR4(val)		bfin_write32(SIC_IAR4, val)
+#define bfin_read_SIC_IAR5()			bfin_read32(SIC_IAR5)
+#define bfin_write_SIC_IAR5(val)		bfin_write32(SIC_IAR5, val)
+#define bfin_read_SIC_IAR6()			bfin_read32(SIC_IAR6)
+#define bfin_write_SIC_IAR6(val)		bfin_write32(SIC_IAR6, val)
+#define bfin_read_SIC_IAR7()			bfin_read32(SIC_IAR7)
+#define bfin_write_SIC_IAR7(val)		bfin_write32(SIC_IAR7, val)
+#define bfin_read_SIC_ISR1()			bfin_read32(SIC_ISR1)
+#define bfin_write_SIC_ISR1(val)		bfin_write32(SIC_ISR1, val)
+#define bfin_read_SIC_IWR1()			bfin_read32(SIC_IWR1)
+#define bfin_write_SIC_IWR1(val)		bfin_write32(SIC_IWR1, val)
+
+/* Watchdog Timer		(0xFFC00200 - 0xFFC002FF)									*/
+#define bfin_read_WDOG_CTL()			bfin_read16(WDOG_CTL)
+#define bfin_write_WDOG_CTL(val)		bfin_write16(WDOG_CTL, val)
+#define bfin_read_WDOG_CNT()			bfin_read32(WDOG_CNT)
+#define bfin_write_WDOG_CNT(val)		bfin_write32(WDOG_CNT, val)
+#define bfin_read_WDOG_STAT()			bfin_read32(WDOG_STAT)
+#define bfin_write_WDOG_STAT(val)		bfin_write32(WDOG_STAT, val)
+
+
+/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
+#define bfin_read_RTC_STAT()			bfin_read32(RTC_STAT)
+#define bfin_write_RTC_STAT(val)		bfin_write32(RTC_STAT, val)
+#define bfin_read_RTC_ICTL()			bfin_read16(RTC_ICTL)
+#define bfin_write_RTC_ICTL(val)		bfin_write16(RTC_ICTL, val)
+#define bfin_read_RTC_ISTAT()			bfin_read16(RTC_ISTAT)
+#define bfin_write_RTC_ISTAT(val)		bfin_write16(RTC_ISTAT, val)
+#define bfin_read_RTC_SWCNT()			bfin_read16(RTC_SWCNT)
+#define bfin_write_RTC_SWCNT(val)		bfin_write16(RTC_SWCNT, val)
+#define bfin_read_RTC_ALARM()			bfin_read32(RTC_ALARM)
+#define bfin_write_RTC_ALARM(val)		bfin_write32(RTC_ALARM, val)
+#define bfin_read_RTC_FAST()			bfin_read16(RTC_FAST)
+#define bfin_write_RTC_FAST(val)		bfin_write16(RTC_FAST, val)
+#define bfin_read_RTC_PREN()			bfin_read16(RTC_PREN)
+#define bfin_write_RTC_PREN(val)		bfin_write16(RTC_PREN, val)
+
+
+/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
+#define bfin_read_UART0_THR()			bfin_read16(UART0_THR)
+#define bfin_write_UART0_THR(val)		bfin_write16(UART0_THR, val)
+#define bfin_read_UART0_RBR()			bfin_read16(UART0_RBR)
+#define bfin_write_UART0_RBR(val)		bfin_write16(UART0_RBR, val)
+#define bfin_read_UART0_DLL()			bfin_read16(UART0_DLL)
+#define bfin_write_UART0_DLL(val)		bfin_write16(UART0_DLL, val)
+#define bfin_read_UART0_IER()			bfin_read16(UART0_IER)
+#define bfin_write_UART0_IER(val)		bfin_write16(UART0_IER, val)
+#define bfin_read_UART0_DLH()			bfin_read16(UART0_DLH)
+#define bfin_write_UART0_DLH(val)		bfin_write16(UART0_DLH, val)
+#define bfin_read_UART0_IIR()			bfin_read16(UART0_IIR)
+#define bfin_write_UART0_IIR(val)		bfin_write16(UART0_IIR, val)
+#define bfin_read_UART0_LCR()			bfin_read16(UART0_LCR)
+#define bfin_write_UART0_LCR(val)		bfin_write16(UART0_LCR, val)
+#define bfin_read_UART0_MCR()			bfin_read16(UART0_MCR)
+#define bfin_write_UART0_MCR(val)		bfin_write16(UART0_MCR, val)
+#define bfin_read_UART0_LSR()			bfin_read16(UART0_LSR)
+#define bfin_write_UART0_LSR(val)		bfin_write16(UART0_LSR, val)
+#define bfin_read_UART0_MSR()			bfin_read16(UART0_MSR)
+#define bfin_write_UART0_MSR(val)		bfin_write16(UART0_MSR, val)
+#define bfin_read_UART0_SCR()			bfin_read16(UART0_SCR)
+#define bfin_write_UART0_SCR(val)		bfin_write16(UART0_SCR, val)
+#define bfin_read_UART0_GCTL()			bfin_read16(UART0_GCTL)
+#define bfin_write_UART0_GCTL(val)		bfin_write16(UART0_GCTL, val)
+
+
+/* SPI Controller		(0xFFC00500 - 0xFFC005FF)									*/
+#define bfin_read_SPI_CTL()			bfin_read16(SPI_CTL)
+#define bfin_write_SPI_CTL(val)			bfin_write16(SPI_CTL, val)
+#define bfin_read_SPI_FLG()			bfin_read16(SPI_FLG)
+#define bfin_write_SPI_FLG(val)			bfin_write16(SPI_FLG, val)
+#define bfin_read_SPI_STAT()			bfin_read16(SPI_STAT)
+#define bfin_write_SPI_STAT(val)		bfin_write16(SPI_STAT, val)
+#define bfin_read_SPI_TDBR()			bfin_read16(SPI_TDBR)
+#define bfin_write_SPI_TDBR(val)		bfin_write16(SPI_TDBR, val)
+#define bfin_read_SPI_RDBR()			bfin_read16(SPI_RDBR)
+#define bfin_write_SPI_RDBR(val)		bfin_write16(SPI_RDBR, val)
+#define bfin_read_SPI_BAUD()			bfin_read16(SPI_BAUD)
+#define bfin_write_SPI_BAUD(val)		bfin_write16(SPI_BAUD, val)
+#define bfin_read_SPI_SHADOW()			bfin_read16(SPI_SHADOW)
+#define bfin_write_SPI_SHADOW(val)		bfin_write16(SPI_SHADOW, val)
+
+
+/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
+#define bfin_read_TIMER0_CONFIG()		bfin_read16(TIMER0_CONFIG)
+#define bfin_write_TIMER0_CONFIG(val)		bfin_write16(TIMER0_CONFIG, val)
+#define bfin_read_TIMER0_COUNTER()		bfin_read32(TIMER0_COUNTER)
+#define bfin_write_TIMER0_COUNTER(val)		bfin_write32(TIMER0_COUNTER, val)
+#define bfin_read_TIMER0_PERIOD()		bfin_read32(TIMER0_PERIOD)
+#define bfin_write_TIMER0_PERIOD(val)		bfin_write32(TIMER0_PERIOD, val)
+#define bfin_read_TIMER0_WIDTH()		bfin_read32(TIMER0_WIDTH)
+#define bfin_write_TIMER0_WIDTH(val)		bfin_write32(TIMER0_WIDTH, val)
+
+#define bfin_read_TIMER1_CONFIG()		bfin_read16(TIMER1_CONFIG)
+#define bfin_write_TIMER1_CONFIG(val)		bfin_write16(TIMER1_CONFIG, val)
+#define bfin_read_TIMER1_COUNTER()		bfin_read32(TIMER1_COUNTER)
+#define bfin_write_TIMER1_COUNTER(val)		bfin_write32(TIMER1_COUNTER, val)
+#define bfin_read_TIMER1_PERIOD()		bfin_read32(TIMER1_PERIOD)
+#define bfin_write_TIMER1_PERIOD(val)		bfin_write32(TIMER1_PERIOD, val)
+#define bfin_read_TIMER1_WIDTH()		bfin_read32(TIMER1_WIDTH)
+#define bfin_write_TIMER1_WIDTH(val)		bfin_write32(TIMER1_WIDTH, val)
+
+#define bfin_read_TIMER2_CONFIG()		bfin_read16(TIMER2_CONFIG)
+#define bfin_write_TIMER2_CONFIG(val)		bfin_write16(TIMER2_CONFIG, val)
+#define bfin_read_TIMER2_COUNTER()		bfin_read32(TIMER2_COUNTER)
+#define bfin_write_TIMER2_COUNTER(val)		bfin_write32(TIMER2_COUNTER, val)
+#define bfin_read_TIMER2_PERIOD()		bfin_read32(TIMER2_PERIOD)
+#define bfin_write_TIMER2_PERIOD(val)		bfin_write32(TIMER2_PERIOD, val)
+#define bfin_read_TIMER2_WIDTH()		bfin_read32(TIMER2_WIDTH)
+#define bfin_write_TIMER2_WIDTH(val)		bfin_write32(TIMER2_WIDTH, val)
+
+#define bfin_read_TIMER3_CONFIG()		bfin_read16(TIMER3_CONFIG)
+#define bfin_write_TIMER3_CONFIG(val)		bfin_write16(TIMER3_CONFIG, val)
+#define bfin_read_TIMER3_COUNTER()		bfin_read32(TIMER3_COUNTER)
+#define bfin_write_TIMER3_COUNTER(val)		bfin_write32(TIMER3_COUNTER, val)
+#define bfin_read_TIMER3_PERIOD()		bfin_read32(TIMER3_PERIOD)
+#define bfin_write_TIMER3_PERIOD(val)		bfin_write32(TIMER3_PERIOD, val)
+#define bfin_read_TIMER3_WIDTH()		bfin_read32(TIMER3_WIDTH)
+#define bfin_write_TIMER3_WIDTH(val)		bfin_write32(TIMER3_WIDTH, val)
+
+#define bfin_read_TIMER4_CONFIG()		bfin_read16(TIMER4_CONFIG)
+#define bfin_write_TIMER4_CONFIG(val)		bfin_write16(TIMER4_CONFIG, val)
+#define bfin_read_TIMER4_COUNTER()		bfin_read32(TIMER4_COUNTER)
+#define bfin_write_TIMER4_COUNTER(val)		bfin_write32(TIMER4_COUNTER, val)
+#define bfin_read_TIMER4_PERIOD()		bfin_read32(TIMER4_PERIOD)
+#define bfin_write_TIMER4_PERIOD(val)		bfin_write32(TIMER4_PERIOD, val)
+#define bfin_read_TIMER4_WIDTH()		bfin_read32(TIMER4_WIDTH)
+#define bfin_write_TIMER4_WIDTH(val)		bfin_write32(TIMER4_WIDTH, val)
+
+#define bfin_read_TIMER5_CONFIG()		bfin_read16(TIMER5_CONFIG)
+#define bfin_write_TIMER5_CONFIG(val)		bfin_write16(TIMER5_CONFIG, val)
+#define bfin_read_TIMER5_COUNTER()		bfin_read32(TIMER5_COUNTER)
+#define bfin_write_TIMER5_COUNTER(val)		bfin_write32(TIMER5_COUNTER, val)
+#define bfin_read_TIMER5_PERIOD()		bfin_read32(TIMER5_PERIOD)
+#define bfin_write_TIMER5_PERIOD(val)		bfin_write32(TIMER5_PERIOD, val)
+#define bfin_read_TIMER5_WIDTH()		bfin_read32(TIMER5_WIDTH)
+#define bfin_write_TIMER5_WIDTH(val)		bfin_write32(TIMER5_WIDTH, val)
+
+#define bfin_read_TIMER6_CONFIG()		bfin_read16(TIMER6_CONFIG)
+#define bfin_write_TIMER6_CONFIG(val)		bfin_write16(TIMER6_CONFIG, val)
+#define bfin_read_TIMER6_COUNTER()		bfin_read32(TIMER6_COUNTER)
+#define bfin_write_TIMER6_COUNTER(val)		bfin_write32(TIMER6_COUNTER, val)
+#define bfin_read_TIMER6_PERIOD()		bfin_read32(TIMER6_PERIOD)
+#define bfin_write_TIMER6_PERIOD(val)		bfin_write32(TIMER6_PERIOD, val)
+#define bfin_read_TIMER6_WIDTH()		bfin_read32(TIMER6_WIDTH)
+#define bfin_write_TIMER6_WIDTH(val)		bfin_write32(TIMER6_WIDTH, val)
+
+#define bfin_read_TIMER7_CONFIG()		bfin_read16(TIMER7_CONFIG)
+#define bfin_write_TIMER7_CONFIG(val)		bfin_write16(TIMER7_CONFIG, val)
+#define bfin_read_TIMER7_COUNTER()		bfin_read32(TIMER7_COUNTER)
+#define bfin_write_TIMER7_COUNTER(val)		bfin_write32(TIMER7_COUNTER, val)
+#define bfin_read_TIMER7_PERIOD()		bfin_read32(TIMER7_PERIOD)
+#define bfin_write_TIMER7_PERIOD(val)		bfin_write32(TIMER7_PERIOD, val)
+#define bfin_read_TIMER7_WIDTH()		bfin_read32(TIMER7_WIDTH)
+#define bfin_write_TIMER7_WIDTH(val)		bfin_write32(TIMER7_WIDTH, val)
+
+#define bfin_read_TIMER_ENABLE()		bfin_read16(TIMER_ENABLE)
+#define bfin_write_TIMER_ENABLE(val)		bfin_write16(TIMER_ENABLE, val)
+#define bfin_read_TIMER_DISABLE()		bfin_read16(TIMER_DISABLE)
+#define bfin_write_TIMER_DISABLE(val)		bfin_write16(TIMER_DISABLE, val)
+#define bfin_read_TIMER_STATUS()		bfin_read32(TIMER_STATUS)
+#define bfin_write_TIMER_STATUS(val)		bfin_write32(TIMER_STATUS, val)
+
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)								*/
+#define bfin_read_PORTFIO()			bfin_read16(PORTFIO)
+#define bfin_write_PORTFIO(val)			bfin_write16(PORTFIO, val)
+#define bfin_read_PORTFIO_CLEAR()		bfin_read16(PORTFIO_CLEAR)
+#define bfin_write_PORTFIO_CLEAR(val)		bfin_write16(PORTFIO_CLEAR, val)
+#define bfin_read_PORTFIO_SET()			bfin_read16(PORTFIO_SET)
+#define bfin_write_PORTFIO_SET(val)		bfin_write16(PORTFIO_SET, val)
+#define bfin_read_PORTFIO_TOGGLE()		bfin_read16(PORTFIO_TOGGLE)
+#define bfin_write_PORTFIO_TOGGLE(val)		bfin_write16(PORTFIO_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKA()		bfin_read16(PORTFIO_MASKA)
+#define bfin_write_PORTFIO_MASKA(val)		bfin_write16(PORTFIO_MASKA, val)
+#define bfin_read_PORTFIO_MASKA_CLEAR()		bfin_read16(PORTFIO_MASKA_CLEAR)
+#define bfin_write_PORTFIO_MASKA_CLEAR(val)	bfin_write16(PORTFIO_MASKA_CLEAR, val)
+#define bfin_read_PORTFIO_MASKA_SET()		bfin_read16(PORTFIO_MASKA_SET)
+#define bfin_write_PORTFIO_MASKA_SET(val)	bfin_write16(PORTFIO_MASKA_SET, val)
+#define bfin_read_PORTFIO_MASKA_TOGGLE()	bfin_read16(PORTFIO_MASKA_TOGGLE)
+#define bfin_write_PORTFIO_MASKA_TOGGLE(val)	bfin_write16(PORTFIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKB()		bfin_read16(PORTFIO_MASKB)
+#define bfin_write_PORTFIO_MASKB(val)		bfin_write16(PORTFIO_MASKB, val)
+#define bfin_read_PORTFIO_MASKB_CLEAR()		bfin_read16(PORTFIO_MASKB_CLEAR)
+#define bfin_write_PORTFIO_MASKB_CLEAR(val)	bfin_write16(PORTFIO_MASKB_CLEAR, val)
+#define bfin_read_PORTFIO_MASKB_SET()		bfin_read16(PORTFIO_MASKB_SET)
+#define bfin_write_PORTFIO_MASKB_SET(val)	bfin_write16(PORTFIO_MASKB_SET, val)
+#define bfin_read_PORTFIO_MASKB_TOGGLE()	bfin_read16(PORTFIO_MASKB_TOGGLE)
+#define bfin_write_PORTFIO_MASKB_TOGGLE(val)	bfin_write16(PORTFIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTFIO_DIR()			bfin_read16(PORTFIO_DIR)
+#define bfin_write_PORTFIO_DIR(val)		bfin_write16(PORTFIO_DIR, val)
+#define bfin_read_PORTFIO_POLAR()		bfin_read16(PORTFIO_POLAR)
+#define bfin_write_PORTFIO_POLAR(val)		bfin_write16(PORTFIO_POLAR, val)
+#define bfin_read_PORTFIO_EDGE()		bfin_read16(PORTFIO_EDGE)
+#define bfin_write_PORTFIO_EDGE(val)		bfin_write16(PORTFIO_EDGE, val)
+#define bfin_read_PORTFIO_BOTH()		bfin_read16(PORTFIO_BOTH)
+#define bfin_write_PORTFIO_BOTH(val)		bfin_write16(PORTFIO_BOTH, val)
+#define bfin_read_PORTFIO_INEN()		bfin_read16(PORTFIO_INEN)
+#define bfin_write_PORTFIO_INEN(val)		bfin_write16(PORTFIO_INEN, val)
+
+
+/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)								*/
+#define bfin_read_SPORT0_TCR1()			bfin_read16(SPORT0_TCR1)
+#define bfin_write_SPORT0_TCR1(val)		bfin_write16(SPORT0_TCR1, val)
+#define bfin_read_SPORT0_TCR2()			bfin_read16(SPORT0_TCR2)
+#define bfin_write_SPORT0_TCR2(val)		bfin_write16(SPORT0_TCR2, val)
+#define bfin_read_SPORT0_TCLKDIV()		bfin_read16(SPORT0_TCLKDIV)
+#define bfin_write_SPORT0_TCLKDIV(val)		bfin_write16(SPORT0_TCLKDIV, val)
+#define bfin_read_SPORT0_TFSDIV()		bfin_read16(SPORT0_TFSDIV)
+#define bfin_write_SPORT0_TFSDIV(val)		bfin_write16(SPORT0_TFSDIV, val)
+#define bfin_read_SPORT0_TX()			bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX(val)		bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX()			bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX(val)		bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX32()			bfin_read32(SPORT0_TX32)
+#define bfin_write_SPORT0_TX32(val)		bfin_write32(SPORT0_TX32, val)
+#define bfin_read_SPORT0_RX32()			bfin_read32(SPORT0_RX32)
+#define bfin_write_SPORT0_RX32(val)		bfin_write32(SPORT0_RX32, val)
+#define bfin_read_SPORT0_TX16()			bfin_read16(SPORT0_TX16)
+#define bfin_write_SPORT0_TX16(val)		bfin_write16(SPORT0_TX16, val)
+#define bfin_read_SPORT0_RX16()			bfin_read16(SPORT0_RX16)
+#define bfin_write_SPORT0_RX16(val)		bfin_write16(SPORT0_RX16, val)
+#define bfin_read_SPORT0_RCR1()			bfin_read16(SPORT0_RCR1)
+#define bfin_write_SPORT0_RCR1(val)		bfin_write16(SPORT0_RCR1, val)
+#define bfin_read_SPORT0_RCR2()			bfin_read16(SPORT0_RCR2)
+#define bfin_write_SPORT0_RCR2(val)		bfin_write16(SPORT0_RCR2, val)
+#define bfin_read_SPORT0_RCLKDIV()		bfin_read16(SPORT0_RCLKDIV)
+#define bfin_write_SPORT0_RCLKDIV(val)		bfin_write16(SPORT0_RCLKDIV, val)
+#define bfin_read_SPORT0_RFSDIV()		bfin_read16(SPORT0_RFSDIV)
+#define bfin_write_SPORT0_RFSDIV(val)		bfin_write16(SPORT0_RFSDIV, val)
+#define bfin_read_SPORT0_STAT()			bfin_read16(SPORT0_STAT)
+#define bfin_write_SPORT0_STAT(val)		bfin_write16(SPORT0_STAT, val)
+#define bfin_read_SPORT0_CHNL()			bfin_read16(SPORT0_CHNL)
+#define bfin_write_SPORT0_CHNL(val)		bfin_write16(SPORT0_CHNL, val)
+#define bfin_read_SPORT0_MCMC1()		bfin_read16(SPORT0_MCMC1)
+#define bfin_write_SPORT0_MCMC1(val)		bfin_write16(SPORT0_MCMC1, val)
+#define bfin_read_SPORT0_MCMC2()		bfin_read16(SPORT0_MCMC2)
+#define bfin_write_SPORT0_MCMC2(val)		bfin_write16(SPORT0_MCMC2, val)
+#define bfin_read_SPORT0_MTCS0()		bfin_read32(SPORT0_MTCS0)
+#define bfin_write_SPORT0_MTCS0(val)		bfin_write32(SPORT0_MTCS0, val)
+#define bfin_read_SPORT0_MTCS1()		bfin_read32(SPORT0_MTCS1)
+#define bfin_write_SPORT0_MTCS1(val)		bfin_write32(SPORT0_MTCS1, val)
+#define bfin_read_SPORT0_MTCS2()		bfin_read32(SPORT0_MTCS2)
+#define bfin_write_SPORT0_MTCS2(val)		bfin_write32(SPORT0_MTCS2, val)
+#define bfin_read_SPORT0_MTCS3()		bfin_read32(SPORT0_MTCS3)
+#define bfin_write_SPORT0_MTCS3(val)		bfin_write32(SPORT0_MTCS3, val)
+#define bfin_read_SPORT0_MRCS0()		bfin_read32(SPORT0_MRCS0)
+#define bfin_write_SPORT0_MRCS0(val)		bfin_write32(SPORT0_MRCS0, val)
+#define bfin_read_SPORT0_MRCS1()		bfin_read32(SPORT0_MRCS1)
+#define bfin_write_SPORT0_MRCS1(val)		bfin_write32(SPORT0_MRCS1, val)
+#define bfin_read_SPORT0_MRCS2()		bfin_read32(SPORT0_MRCS2)
+#define bfin_write_SPORT0_MRCS2(val)		bfin_write32(SPORT0_MRCS2, val)
+#define bfin_read_SPORT0_MRCS3()		bfin_read32(SPORT0_MRCS3)
+#define bfin_write_SPORT0_MRCS3(val)		bfin_write32(SPORT0_MRCS3, val)
+
+
+/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)								*/
+#define bfin_read_SPORT1_TCR1()			bfin_read16(SPORT1_TCR1)
+#define bfin_write_SPORT1_TCR1(val)		bfin_write16(SPORT1_TCR1, val)
+#define bfin_read_SPORT1_TCR2()			bfin_read16(SPORT1_TCR2)
+#define bfin_write_SPORT1_TCR2(val)		bfin_write16(SPORT1_TCR2, val)
+#define bfin_read_SPORT1_TCLKDIV()		bfin_read16(SPORT1_TCLKDIV)
+#define bfin_write_SPORT1_TCLKDIV(val)		bfin_write16(SPORT1_TCLKDIV, val)
+#define bfin_read_SPORT1_TFSDIV()		bfin_read16(SPORT1_TFSDIV)
+#define bfin_write_SPORT1_TFSDIV(val)		bfin_write16(SPORT1_TFSDIV, val)
+#define bfin_read_SPORT1_TX()			bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX(val)		bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX()			bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX(val)		bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX32()			bfin_read32(SPORT1_TX32)
+#define bfin_write_SPORT1_TX32(val)		bfin_write32(SPORT1_TX32, val)
+#define bfin_read_SPORT1_RX32()			bfin_read32(SPORT1_RX32)
+#define bfin_write_SPORT1_RX32(val)		bfin_write32(SPORT1_RX32, val)
+#define bfin_read_SPORT1_TX16()			bfin_read16(SPORT1_TX16)
+#define bfin_write_SPORT1_TX16(val)		bfin_write16(SPORT1_TX16, val)
+#define bfin_read_SPORT1_RX16()			bfin_read16(SPORT1_RX16)
+#define bfin_write_SPORT1_RX16(val)		bfin_write16(SPORT1_RX16, val)
+#define bfin_read_SPORT1_RCR1()			bfin_read16(SPORT1_RCR1)
+#define bfin_write_SPORT1_RCR1(val)		bfin_write16(SPORT1_RCR1, val)
+#define bfin_read_SPORT1_RCR2()			bfin_read16(SPORT1_RCR2)
+#define bfin_write_SPORT1_RCR2(val)		bfin_write16(SPORT1_RCR2, val)
+#define bfin_read_SPORT1_RCLKDIV()		bfin_read16(SPORT1_RCLKDIV)
+#define bfin_write_SPORT1_RCLKDIV(val)		bfin_write16(SPORT1_RCLKDIV, val)
+#define bfin_read_SPORT1_RFSDIV()		bfin_read16(SPORT1_RFSDIV)
+#define bfin_write_SPORT1_RFSDIV(val)		bfin_write16(SPORT1_RFSDIV, val)
+#define bfin_read_SPORT1_STAT()			bfin_read16(SPORT1_STAT)
+#define bfin_write_SPORT1_STAT(val)		bfin_write16(SPORT1_STAT, val)
+#define bfin_read_SPORT1_CHNL()			bfin_read16(SPORT1_CHNL)
+#define bfin_write_SPORT1_CHNL(val)		bfin_write16(SPORT1_CHNL, val)
+#define bfin_read_SPORT1_MCMC1()		bfin_read16(SPORT1_MCMC1)
+#define bfin_write_SPORT1_MCMC1(val)		bfin_write16(SPORT1_MCMC1, val)
+#define bfin_read_SPORT1_MCMC2()		bfin_read16(SPORT1_MCMC2)
+#define bfin_write_SPORT1_MCMC2(val)		bfin_write16(SPORT1_MCMC2, val)
+#define bfin_read_SPORT1_MTCS0()		bfin_read32(SPORT1_MTCS0)
+#define bfin_write_SPORT1_MTCS0(val)		bfin_write32(SPORT1_MTCS0, val)
+#define bfin_read_SPORT1_MTCS1()		bfin_read32(SPORT1_MTCS1)
+#define bfin_write_SPORT1_MTCS1(val)		bfin_write32(SPORT1_MTCS1, val)
+#define bfin_read_SPORT1_MTCS2()		bfin_read32(SPORT1_MTCS2)
+#define bfin_write_SPORT1_MTCS2(val)		bfin_write32(SPORT1_MTCS2, val)
+#define bfin_read_SPORT1_MTCS3()		bfin_read32(SPORT1_MTCS3)
+#define bfin_write_SPORT1_MTCS3(val)		bfin_write32(SPORT1_MTCS3, val)
+#define bfin_read_SPORT1_MRCS0()		bfin_read32(SPORT1_MRCS0)
+#define bfin_write_SPORT1_MRCS0(val)		bfin_write32(SPORT1_MRCS0, val)
+#define bfin_read_SPORT1_MRCS1()		bfin_read32(SPORT1_MRCS1)
+#define bfin_write_SPORT1_MRCS1(val)		bfin_write32(SPORT1_MRCS1, val)
+#define bfin_read_SPORT1_MRCS2()		bfin_read32(SPORT1_MRCS2)
+#define bfin_write_SPORT1_MRCS2(val)		bfin_write32(SPORT1_MRCS2, val)
+#define bfin_read_SPORT1_MRCS3()		bfin_read32(SPORT1_MRCS3)
+#define bfin_write_SPORT1_MRCS3(val)		bfin_write32(SPORT1_MRCS3, val)
+
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)							*/
+#define bfin_read_EBIU_AMGCTL()			bfin_read16(EBIU_AMGCTL)
+#define bfin_write_EBIU_AMGCTL(val)		bfin_write16(EBIU_AMGCTL, val)
+#define bfin_read_EBIU_AMBCTL0()		bfin_read32(EBIU_AMBCTL0)
+#define bfin_write_EBIU_AMBCTL0(val)		bfin_write32(EBIU_AMBCTL0, val)
+#define bfin_read_EBIU_AMBCTL1()		bfin_read32(EBIU_AMBCTL1)
+#define bfin_write_EBIU_AMBCTL1(val)		bfin_write32(EBIU_AMBCTL1, val)
+#define bfin_read_EBIU_SDGCTL()			bfin_read32(EBIU_SDGCTL)
+#define bfin_write_EBIU_SDGCTL(val)		bfin_write32(EBIU_SDGCTL, val)
+#define bfin_read_EBIU_SDBCTL()			bfin_read16(EBIU_SDBCTL)
+#define bfin_write_EBIU_SDBCTL(val)		bfin_write16(EBIU_SDBCTL, val)
+#define bfin_read_EBIU_SDRRC()			bfin_read16(EBIU_SDRRC)
+#define bfin_write_EBIU_SDRRC(val)		bfin_write16(EBIU_SDRRC, val)
+#define bfin_read_EBIU_SDSTAT()			bfin_read16(EBIU_SDSTAT)
+#define bfin_write_EBIU_SDSTAT(val)		bfin_write16(EBIU_SDSTAT, val)
+
+
+/* DMA Traffic Control Registers													*/
+#define bfin_read_DMA_TC_PER()			bfin_read16(DMA_TC_PER)
+#define bfin_write_DMA_TC_PER(val)		bfin_write16(DMA_TC_PER, val)
+#define bfin_read_DMA_TC_CNT()			bfin_read16(DMA_TC_CNT)
+#define bfin_write_DMA_TC_CNT(val)		bfin_write16(DMA_TC_CNT, val)
+
+/* Alternate deprecated register names (below) provided for backwards code compatibility */
+#define bfin_read_DMA_TCPER()			bfin_read16(DMA_TCPER)
+#define bfin_write_DMA_TCPER(val)		bfin_write16(DMA_TCPER, val)
+#define bfin_read_DMA_TCCNT()			bfin_read16(DMA_TCCNT)
+#define bfin_write_DMA_TCCNT(val)		bfin_write16(DMA_TCCNT, val)
+
+/* DMA Controller																	*/
+#define bfin_read_DMA0_CONFIG()			bfin_read16(DMA0_CONFIG)
+#define bfin_write_DMA0_CONFIG(val)		bfin_write16(DMA0_CONFIG, val)
+#define bfin_read_DMA0_NEXT_DESC_PTR()		bfin_read32(DMA0_NEXT_DESC_PTR)
+#define bfin_write_DMA0_NEXT_DESC_PTR(val)	bfin_write32(DMA0_NEXT_DESC_PTR, val)
+#define bfin_read_DMA0_START_ADDR()		bfin_read32(DMA0_START_ADDR)
+#define bfin_write_DMA0_START_ADDR(val)		bfin_write32(DMA0_START_ADDR, val)
+#define bfin_read_DMA0_X_COUNT()		bfin_read16(DMA0_X_COUNT)
+#define bfin_write_DMA0_X_COUNT(val)		bfin_write16(DMA0_X_COUNT, val)
+#define bfin_read_DMA0_Y_COUNT()		bfin_read16(DMA0_Y_COUNT)
+#define bfin_write_DMA0_Y_COUNT(val)		bfin_write16(DMA0_Y_COUNT, val)
+#define bfin_read_DMA0_X_MODIFY()		bfin_read16(DMA0_X_MODIFY)
+#define bfin_write_DMA0_X_MODIFY(val)		bfin_write16(DMA0_X_MODIFY, val)
+#define bfin_read_DMA0_Y_MODIFY()		bfin_read16(DMA0_Y_MODIFY)
+#define bfin_write_DMA0_Y_MODIFY(val)		bfin_write16(DMA0_Y_MODIFY, val)
+#define bfin_read_DMA0_CURR_DESC_PTR()		bfin_read32(DMA0_CURR_DESC_PTR)
+#define bfin_write_DMA0_CURR_DESC_PTR(val)	bfin_write32(DMA0_CURR_DESC_PTR, val)
+#define bfin_read_DMA0_CURR_ADDR()		bfin_read32(DMA0_CURR_ADDR)
+#define bfin_write_DMA0_CURR_ADDR(val)		bfin_write32(DMA0_CURR_ADDR, val)
+#define bfin_read_DMA0_CURR_X_COUNT()		bfin_read16(DMA0_CURR_X_COUNT)
+#define bfin_write_DMA0_CURR_X_COUNT(val)	bfin_write16(DMA0_CURR_X_COUNT, val)
+#define bfin_read_DMA0_CURR_Y_COUNT()		bfin_read16(DMA0_CURR_Y_COUNT)
+#define bfin_write_DMA0_CURR_Y_COUNT(val)	bfin_write16(DMA0_CURR_Y_COUNT, val)
+#define bfin_read_DMA0_IRQ_STATUS()		bfin_read16(DMA0_IRQ_STATUS)
+#define bfin_write_DMA0_IRQ_STATUS(val)		bfin_write16(DMA0_IRQ_STATUS, val)
+#define bfin_read_DMA0_PERIPHERAL_MAP()		bfin_read16(DMA0_PERIPHERAL_MAP)
+#define bfin_write_DMA0_PERIPHERAL_MAP(val)	bfin_write16(DMA0_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA1_CONFIG()			bfin_read16(DMA1_CONFIG)
+#define bfin_write_DMA1_CONFIG(val)		bfin_write16(DMA1_CONFIG, val)
+#define bfin_read_DMA1_NEXT_DESC_PTR()		bfin_read32(DMA1_NEXT_DESC_PTR)
+#define bfin_write_DMA1_NEXT_DESC_PTR(val)	bfin_write32(DMA1_NEXT_DESC_PTR, val)
+#define bfin_read_DMA1_START_ADDR()		bfin_read32(DMA1_START_ADDR)
+#define bfin_write_DMA1_START_ADDR(val)		bfin_write32(DMA1_START_ADDR, val)
+#define bfin_read_DMA1_X_COUNT()		bfin_read16(DMA1_X_COUNT)
+#define bfin_write_DMA1_X_COUNT(val)		bfin_write16(DMA1_X_COUNT, val)
+#define bfin_read_DMA1_Y_COUNT()		bfin_read16(DMA1_Y_COUNT)
+#define bfin_write_DMA1_Y_COUNT(val)		bfin_write16(DMA1_Y_COUNT, val)
+#define bfin_read_DMA1_X_MODIFY()		bfin_read16(DMA1_X_MODIFY)
+#define bfin_write_DMA1_X_MODIFY(val)		bfin_write16(DMA1_X_MODIFY, val)
+#define bfin_read_DMA1_Y_MODIFY()		bfin_read16(DMA1_Y_MODIFY)
+#define bfin_write_DMA1_Y_MODIFY(val)		bfin_write16(DMA1_Y_MODIFY, val)
+#define bfin_read_DMA1_CURR_DESC_PTR()		bfin_read32(DMA1_CURR_DESC_PTR)
+#define bfin_write_DMA1_CURR_DESC_PTR(val)	bfin_write32(DMA1_CURR_DESC_PTR, val)
+#define bfin_read_DMA1_CURR_ADDR()		bfin_read32(DMA1_CURR_ADDR)
+#define bfin_write_DMA1_CURR_ADDR(val)		bfin_write32(DMA1_CURR_ADDR, val)
+#define bfin_read_DMA1_CURR_X_COUNT()		bfin_read16(DMA1_CURR_X_COUNT)
+#define bfin_write_DMA1_CURR_X_COUNT(val)	bfin_write16(DMA1_CURR_X_COUNT, val)
+#define bfin_read_DMA1_CURR_Y_COUNT()		bfin_read16(DMA1_CURR_Y_COUNT)
+#define bfin_write_DMA1_CURR_Y_COUNT(val)	bfin_write16(DMA1_CURR_Y_COUNT, val)
+#define bfin_read_DMA1_IRQ_STATUS()		bfin_read16(DMA1_IRQ_STATUS)
+#define bfin_write_DMA1_IRQ_STATUS(val)		bfin_write16(DMA1_IRQ_STATUS, val)
+#define bfin_read_DMA1_PERIPHERAL_MAP()		bfin_read16(DMA1_PERIPHERAL_MAP)
+#define bfin_write_DMA1_PERIPHERAL_MAP(val)	bfin_write16(DMA1_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA2_CONFIG()			bfin_read16(DMA2_CONFIG)
+#define bfin_write_DMA2_CONFIG(val)		bfin_write16(DMA2_CONFIG, val)
+#define bfin_read_DMA2_NEXT_DESC_PTR()		bfin_read32(DMA2_NEXT_DESC_PTR)
+#define bfin_write_DMA2_NEXT_DESC_PTR(val)	bfin_write32(DMA2_NEXT_DESC_PTR, val)
+#define bfin_read_DMA2_START_ADDR()		bfin_read32(DMA2_START_ADDR)
+#define bfin_write_DMA2_START_ADDR(val)		bfin_write32(DMA2_START_ADDR, val)
+#define bfin_read_DMA2_X_COUNT()		bfin_read16(DMA2_X_COUNT)
+#define bfin_write_DMA2_X_COUNT(val)		bfin_write16(DMA2_X_COUNT, val)
+#define bfin_read_DMA2_Y_COUNT()		bfin_read16(DMA2_Y_COUNT)
+#define bfin_write_DMA2_Y_COUNT(val)		bfin_write16(DMA2_Y_COUNT, val)
+#define bfin_read_DMA2_X_MODIFY()		bfin_read16(DMA2_X_MODIFY)
+#define bfin_write_DMA2_X_MODIFY(val)		bfin_write16(DMA2_X_MODIFY, val)
+#define bfin_read_DMA2_Y_MODIFY()		bfin_read16(DMA2_Y_MODIFY)
+#define bfin_write_DMA2_Y_MODIFY(val)		bfin_write16(DMA2_Y_MODIFY, val)
+#define bfin_read_DMA2_CURR_DESC_PTR()		bfin_read32(DMA2_CURR_DESC_PTR)
+#define bfin_write_DMA2_CURR_DESC_PTR(val)	bfin_write32(DMA2_CURR_DESC_PTR, val)
+#define bfin_read_DMA2_CURR_ADDR()		bfin_read32(DMA2_CURR_ADDR)
+#define bfin_write_DMA2_CURR_ADDR(val)		bfin_write32(DMA2_CURR_ADDR, val)
+#define bfin_read_DMA2_CURR_X_COUNT()		bfin_read16(DMA2_CURR_X_COUNT)
+#define bfin_write_DMA2_CURR_X_COUNT(val)	bfin_write16(DMA2_CURR_X_COUNT, val)
+#define bfin_read_DMA2_CURR_Y_COUNT()		bfin_read16(DMA2_CURR_Y_COUNT)
+#define bfin_write_DMA2_CURR_Y_COUNT(val)	bfin_write16(DMA2_CURR_Y_COUNT, val)
+#define bfin_read_DMA2_IRQ_STATUS()		bfin_read16(DMA2_IRQ_STATUS)
+#define bfin_write_DMA2_IRQ_STATUS(val)		bfin_write16(DMA2_IRQ_STATUS, val)
+#define bfin_read_DMA2_PERIPHERAL_MAP()		bfin_read16(DMA2_PERIPHERAL_MAP)
+#define bfin_write_DMA2_PERIPHERAL_MAP(val)	bfin_write16(DMA2_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA3_CONFIG()			bfin_read16(DMA3_CONFIG)
+#define bfin_write_DMA3_CONFIG(val)		bfin_write16(DMA3_CONFIG, val)
+#define bfin_read_DMA3_NEXT_DESC_PTR()		bfin_read32(DMA3_NEXT_DESC_PTR)
+#define bfin_write_DMA3_NEXT_DESC_PTR(val)	bfin_write32(DMA3_NEXT_DESC_PTR, val)
+#define bfin_read_DMA3_START_ADDR()		bfin_read32(DMA3_START_ADDR)
+#define bfin_write_DMA3_START_ADDR(val)		bfin_write32(DMA3_START_ADDR, val)
+#define bfin_read_DMA3_X_COUNT()		bfin_read16(DMA3_X_COUNT)
+#define bfin_write_DMA3_X_COUNT(val)		bfin_write16(DMA3_X_COUNT, val)
+#define bfin_read_DMA3_Y_COUNT()		bfin_read16(DMA3_Y_COUNT)
+#define bfin_write_DMA3_Y_COUNT(val)		bfin_write16(DMA3_Y_COUNT, val)
+#define bfin_read_DMA3_X_MODIFY()		bfin_read16(DMA3_X_MODIFY)
+#define bfin_write_DMA3_X_MODIFY(val)		bfin_write16(DMA3_X_MODIFY, val)
+#define bfin_read_DMA3_Y_MODIFY()		bfin_read16(DMA3_Y_MODIFY)
+#define bfin_write_DMA3_Y_MODIFY(val)		bfin_write16(DMA3_Y_MODIFY, val)
+#define bfin_read_DMA3_CURR_DESC_PTR()		bfin_read32(DMA3_CURR_DESC_PTR)
+#define bfin_write_DMA3_CURR_DESC_PTR(val)	bfin_write32(DMA3_CURR_DESC_PTR, val)
+#define bfin_read_DMA3_CURR_ADDR()		bfin_read32(DMA3_CURR_ADDR)
+#define bfin_write_DMA3_CURR_ADDR(val)		bfin_write32(DMA3_CURR_ADDR, val)
+#define bfin_read_DMA3_CURR_X_COUNT()		bfin_read16(DMA3_CURR_X_COUNT)
+#define bfin_write_DMA3_CURR_X_COUNT(val)	bfin_write16(DMA3_CURR_X_COUNT, val)
+#define bfin_read_DMA3_CURR_Y_COUNT()		bfin_read16(DMA3_CURR_Y_COUNT)
+#define bfin_write_DMA3_CURR_Y_COUNT(val)	bfin_write16(DMA3_CURR_Y_COUNT, val)
+#define bfin_read_DMA3_IRQ_STATUS()		bfin_read16(DMA3_IRQ_STATUS)
+#define bfin_write_DMA3_IRQ_STATUS(val)		bfin_write16(DMA3_IRQ_STATUS, val)
+#define bfin_read_DMA3_PERIPHERAL_MAP()		bfin_read16(DMA3_PERIPHERAL_MAP)
+#define bfin_write_DMA3_PERIPHERAL_MAP(val)	bfin_write16(DMA3_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA4_CONFIG()			bfin_read16(DMA4_CONFIG)
+#define bfin_write_DMA4_CONFIG(val)		bfin_write16(DMA4_CONFIG, val)
+#define bfin_read_DMA4_NEXT_DESC_PTR()		bfin_read32(DMA4_NEXT_DESC_PTR)
+#define bfin_write_DMA4_NEXT_DESC_PTR(val)	bfin_write32(DMA4_NEXT_DESC_PTR, val)
+#define bfin_read_DMA4_START_ADDR()		bfin_read32(DMA4_START_ADDR)
+#define bfin_write_DMA4_START_ADDR(val)		bfin_write32(DMA4_START_ADDR, val)
+#define bfin_read_DMA4_X_COUNT()		bfin_read16(DMA4_X_COUNT)
+#define bfin_write_DMA4_X_COUNT(val)		bfin_write16(DMA4_X_COUNT, val)
+#define bfin_read_DMA4_Y_COUNT()		bfin_read16(DMA4_Y_COUNT)
+#define bfin_write_DMA4_Y_COUNT(val)		bfin_write16(DMA4_Y_COUNT, val)
+#define bfin_read_DMA4_X_MODIFY()		bfin_read16(DMA4_X_MODIFY)
+#define bfin_write_DMA4_X_MODIFY(val)		bfin_write16(DMA4_X_MODIFY, val)
+#define bfin_read_DMA4_Y_MODIFY()		bfin_read16(DMA4_Y_MODIFY)
+#define bfin_write_DMA4_Y_MODIFY(val)		bfin_write16(DMA4_Y_MODIFY, val)
+#define bfin_read_DMA4_CURR_DESC_PTR()		bfin_read32(DMA4_CURR_DESC_PTR)
+#define bfin_write_DMA4_CURR_DESC_PTR(val)	bfin_write32(DMA4_CURR_DESC_PTR, val)
+#define bfin_read_DMA4_CURR_ADDR()		bfin_read32(DMA4_CURR_ADDR)
+#define bfin_write_DMA4_CURR_ADDR(val)		bfin_write32(DMA4_CURR_ADDR, val)
+#define bfin_read_DMA4_CURR_X_COUNT()		bfin_read16(DMA4_CURR_X_COUNT)
+#define bfin_write_DMA4_CURR_X_COUNT(val)	bfin_write16(DMA4_CURR_X_COUNT, val)
+#define bfin_read_DMA4_CURR_Y_COUNT()		bfin_read16(DMA4_CURR_Y_COUNT)
+#define bfin_write_DMA4_CURR_Y_COUNT(val)	bfin_write16(DMA4_CURR_Y_COUNT, val)
+#define bfin_read_DMA4_IRQ_STATUS()		bfin_read16(DMA4_IRQ_STATUS)
+#define bfin_write_DMA4_IRQ_STATUS(val)		bfin_write16(DMA4_IRQ_STATUS, val)
+#define bfin_read_DMA4_PERIPHERAL_MAP()		bfin_read16(DMA4_PERIPHERAL_MAP)
+#define bfin_write_DMA4_PERIPHERAL_MAP(val)	bfin_write16(DMA4_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA5_CONFIG()			bfin_read16(DMA5_CONFIG)
+#define bfin_write_DMA5_CONFIG(val)		bfin_write16(DMA5_CONFIG, val)
+#define bfin_read_DMA5_NEXT_DESC_PTR()		bfin_read32(DMA5_NEXT_DESC_PTR)
+#define bfin_write_DMA5_NEXT_DESC_PTR(val)	bfin_write32(DMA5_NEXT_DESC_PTR, val)
+#define bfin_read_DMA5_START_ADDR()		bfin_read32(DMA5_START_ADDR)
+#define bfin_write_DMA5_START_ADDR(val)		bfin_write32(DMA5_START_ADDR, val)
+#define bfin_read_DMA5_X_COUNT()		bfin_read16(DMA5_X_COUNT)
+#define bfin_write_DMA5_X_COUNT(val)		bfin_write16(DMA5_X_COUNT, val)
+#define bfin_read_DMA5_Y_COUNT()		bfin_read16(DMA5_Y_COUNT)
+#define bfin_write_DMA5_Y_COUNT(val)		bfin_write16(DMA5_Y_COUNT, val)
+#define bfin_read_DMA5_X_MODIFY()		bfin_read16(DMA5_X_MODIFY)
+#define bfin_write_DMA5_X_MODIFY(val)		bfin_write16(DMA5_X_MODIFY, val)
+#define bfin_read_DMA5_Y_MODIFY()		bfin_read16(DMA5_Y_MODIFY)
+#define bfin_write_DMA5_Y_MODIFY(val)		bfin_write16(DMA5_Y_MODIFY, val)
+#define bfin_read_DMA5_CURR_DESC_PTR()		bfin_read32(DMA5_CURR_DESC_PTR)
+#define bfin_write_DMA5_CURR_DESC_PTR(val)	bfin_write32(DMA5_CURR_DESC_PTR, val)
+#define bfin_read_DMA5_CURR_ADDR()		bfin_read32(DMA5_CURR_ADDR)
+#define bfin_write_DMA5_CURR_ADDR(val)		bfin_write32(DMA5_CURR_ADDR, val)
+#define bfin_read_DMA5_CURR_X_COUNT()		bfin_read16(DMA5_CURR_X_COUNT)
+#define bfin_write_DMA5_CURR_X_COUNT(val)	bfin_write16(DMA5_CURR_X_COUNT, val)
+#define bfin_read_DMA5_CURR_Y_COUNT()		bfin_read16(DMA5_CURR_Y_COUNT)
+#define bfin_write_DMA5_CURR_Y_COUNT(val)	bfin_write16(DMA5_CURR_Y_COUNT, val)
+#define bfin_read_DMA5_IRQ_STATUS()		bfin_read16(DMA5_IRQ_STATUS)
+#define bfin_write_DMA5_IRQ_STATUS(val)		bfin_write16(DMA5_IRQ_STATUS, val)
+#define bfin_read_DMA5_PERIPHERAL_MAP()		bfin_read16(DMA5_PERIPHERAL_MAP)
+#define bfin_write_DMA5_PERIPHERAL_MAP(val)	bfin_write16(DMA5_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA6_CONFIG()			bfin_read16(DMA6_CONFIG)
+#define bfin_write_DMA6_CONFIG(val)		bfin_write16(DMA6_CONFIG, val)
+#define bfin_read_DMA6_NEXT_DESC_PTR()		bfin_read32(DMA6_NEXT_DESC_PTR)
+#define bfin_write_DMA6_NEXT_DESC_PTR(val)	bfin_write32(DMA6_NEXT_DESC_PTR, val)
+#define bfin_read_DMA6_START_ADDR()		bfin_read32(DMA6_START_ADDR)
+#define bfin_write_DMA6_START_ADDR(val)		bfin_write32(DMA6_START_ADDR, val)
+#define bfin_read_DMA6_X_COUNT()		bfin_read16(DMA6_X_COUNT)
+#define bfin_write_DMA6_X_COUNT(val)		bfin_write16(DMA6_X_COUNT, val)
+#define bfin_read_DMA6_Y_COUNT()		bfin_read16(DMA6_Y_COUNT)
+#define bfin_write_DMA6_Y_COUNT(val)		bfin_write16(DMA6_Y_COUNT, val)
+#define bfin_read_DMA6_X_MODIFY()		bfin_read16(DMA6_X_MODIFY)
+#define bfin_write_DMA6_X_MODIFY(val)		bfin_write16(DMA6_X_MODIFY, val)
+#define bfin_read_DMA6_Y_MODIFY()		bfin_read16(DMA6_Y_MODIFY)
+#define bfin_write_DMA6_Y_MODIFY(val)		bfin_write16(DMA6_Y_MODIFY, val)
+#define bfin_read_DMA6_CURR_DESC_PTR()		bfin_read32(DMA6_CURR_DESC_PTR)
+#define bfin_write_DMA6_CURR_DESC_PTR(val)	bfin_write32(DMA6_CURR_DESC_PTR, val)
+#define bfin_read_DMA6_CURR_ADDR()		bfin_read32(DMA6_CURR_ADDR)
+#define bfin_write_DMA6_CURR_ADDR(val)		bfin_write32(DMA6_CURR_ADDR, val)
+#define bfin_read_DMA6_CURR_X_COUNT()		bfin_read16(DMA6_CURR_X_COUNT)
+#define bfin_write_DMA6_CURR_X_COUNT(val)	bfin_write16(DMA6_CURR_X_COUNT, val)
+#define bfin_read_DMA6_CURR_Y_COUNT()		bfin_read16(DMA6_CURR_Y_COUNT)
+#define bfin_write_DMA6_CURR_Y_COUNT(val)	bfin_write16(DMA6_CURR_Y_COUNT, val)
+#define bfin_read_DMA6_IRQ_STATUS()		bfin_read16(DMA6_IRQ_STATUS)
+#define bfin_write_DMA6_IRQ_STATUS(val)		bfin_write16(DMA6_IRQ_STATUS, val)
+#define bfin_read_DMA6_PERIPHERAL_MAP()		bfin_read16(DMA6_PERIPHERAL_MAP)
+#define bfin_write_DMA6_PERIPHERAL_MAP(val)	bfin_write16(DMA6_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA7_CONFIG()			bfin_read16(DMA7_CONFIG)
+#define bfin_write_DMA7_CONFIG(val)		bfin_write16(DMA7_CONFIG, val)
+#define bfin_read_DMA7_NEXT_DESC_PTR()		bfin_read32(DMA7_NEXT_DESC_PTR)
+#define bfin_write_DMA7_NEXT_DESC_PTR(val)	bfin_write32(DMA7_NEXT_DESC_PTR, val)
+#define bfin_read_DMA7_START_ADDR()		bfin_read32(DMA7_START_ADDR)
+#define bfin_write_DMA7_START_ADDR(val)		bfin_write32(DMA7_START_ADDR, val)
+#define bfin_read_DMA7_X_COUNT()		bfin_read16(DMA7_X_COUNT)
+#define bfin_write_DMA7_X_COUNT(val)		bfin_write16(DMA7_X_COUNT, val)
+#define bfin_read_DMA7_Y_COUNT()		bfin_read16(DMA7_Y_COUNT)
+#define bfin_write_DMA7_Y_COUNT(val)		bfin_write16(DMA7_Y_COUNT, val)
+#define bfin_read_DMA7_X_MODIFY()		bfin_read16(DMA7_X_MODIFY)
+#define bfin_write_DMA7_X_MODIFY(val)		bfin_write16(DMA7_X_MODIFY, val)
+#define bfin_read_DMA7_Y_MODIFY()		bfin_read16(DMA7_Y_MODIFY)
+#define bfin_write_DMA7_Y_MODIFY(val)		bfin_write16(DMA7_Y_MODIFY, val)
+#define bfin_read_DMA7_CURR_DESC_PTR()		bfin_read32(DMA7_CURR_DESC_PTR)
+#define bfin_write_DMA7_CURR_DESC_PTR(val)	bfin_write32(DMA7_CURR_DESC_PTR, val)
+#define bfin_read_DMA7_CURR_ADDR()		bfin_read32(DMA7_CURR_ADDR)
+#define bfin_write_DMA7_CURR_ADDR(val)		bfin_write32(DMA7_CURR_ADDR, val)
+#define bfin_read_DMA7_CURR_X_COUNT()		bfin_read16(DMA7_CURR_X_COUNT)
+#define bfin_write_DMA7_CURR_X_COUNT(val)	bfin_write16(DMA7_CURR_X_COUNT, val)
+#define bfin_read_DMA7_CURR_Y_COUNT()		bfin_read16(DMA7_CURR_Y_COUNT)
+#define bfin_write_DMA7_CURR_Y_COUNT(val)	bfin_write16(DMA7_CURR_Y_COUNT, val)
+#define bfin_read_DMA7_IRQ_STATUS()		bfin_read16(DMA7_IRQ_STATUS)
+#define bfin_write_DMA7_IRQ_STATUS(val)		bfin_write16(DMA7_IRQ_STATUS, val)
+#define bfin_read_DMA7_PERIPHERAL_MAP()		bfin_read16(DMA7_PERIPHERAL_MAP)
+#define bfin_write_DMA7_PERIPHERAL_MAP(val)	bfin_write16(DMA7_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA8_CONFIG()			bfin_read16(DMA8_CONFIG)
+#define bfin_write_DMA8_CONFIG(val)		bfin_write16(DMA8_CONFIG, val)
+#define bfin_read_DMA8_NEXT_DESC_PTR()		bfin_read32(DMA8_NEXT_DESC_PTR)
+#define bfin_write_DMA8_NEXT_DESC_PTR(val)	bfin_write32(DMA8_NEXT_DESC_PTR, val)
+#define bfin_read_DMA8_START_ADDR()		bfin_read32(DMA8_START_ADDR)
+#define bfin_write_DMA8_START_ADDR(val)		bfin_write32(DMA8_START_ADDR, val)
+#define bfin_read_DMA8_X_COUNT()		bfin_read16(DMA8_X_COUNT)
+#define bfin_write_DMA8_X_COUNT(val)		bfin_write16(DMA8_X_COUNT, val)
+#define bfin_read_DMA8_Y_COUNT()		bfin_read16(DMA8_Y_COUNT)
+#define bfin_write_DMA8_Y_COUNT(val)		bfin_write16(DMA8_Y_COUNT, val)
+#define bfin_read_DMA8_X_MODIFY()		bfin_read16(DMA8_X_MODIFY)
+#define bfin_write_DMA8_X_MODIFY(val)		bfin_write16(DMA8_X_MODIFY, val)
+#define bfin_read_DMA8_Y_MODIFY()		bfin_read16(DMA8_Y_MODIFY)
+#define bfin_write_DMA8_Y_MODIFY(val)		bfin_write16(DMA8_Y_MODIFY, val)
+#define bfin_read_DMA8_CURR_DESC_PTR()		bfin_read32(DMA8_CURR_DESC_PTR)
+#define bfin_write_DMA8_CURR_DESC_PTR(val)	bfin_write32(DMA8_CURR_DESC_PTR, val)
+#define bfin_read_DMA8_CURR_ADDR()		bfin_read32(DMA8_CURR_ADDR)
+#define bfin_write_DMA8_CURR_ADDR(val)		bfin_write32(DMA8_CURR_ADDR, val)
+#define bfin_read_DMA8_CURR_X_COUNT()		bfin_read16(DMA8_CURR_X_COUNT)
+#define bfin_write_DMA8_CURR_X_COUNT(val)	bfin_write16(DMA8_CURR_X_COUNT, val)
+#define bfin_read_DMA8_CURR_Y_COUNT()		bfin_read16(DMA8_CURR_Y_COUNT)
+#define bfin_write_DMA8_CURR_Y_COUNT(val)	bfin_write16(DMA8_CURR_Y_COUNT, val)
+#define bfin_read_DMA8_IRQ_STATUS()		bfin_read16(DMA8_IRQ_STATUS)
+#define bfin_write_DMA8_IRQ_STATUS(val)		bfin_write16(DMA8_IRQ_STATUS, val)
+#define bfin_read_DMA8_PERIPHERAL_MAP()		bfin_read16(DMA8_PERIPHERAL_MAP)
+#define bfin_write_DMA8_PERIPHERAL_MAP(val)	bfin_write16(DMA8_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA9_CONFIG()			bfin_read16(DMA9_CONFIG)
+#define bfin_write_DMA9_CONFIG(val)		bfin_write16(DMA9_CONFIG, val)
+#define bfin_read_DMA9_NEXT_DESC_PTR()		bfin_read32(DMA9_NEXT_DESC_PTR)
+#define bfin_write_DMA9_NEXT_DESC_PTR(val)	bfin_write32(DMA9_NEXT_DESC_PTR, val)
+#define bfin_read_DMA9_START_ADDR()		bfin_read32(DMA9_START_ADDR)
+#define bfin_write_DMA9_START_ADDR(val)		bfin_write32(DMA9_START_ADDR, val)
+#define bfin_read_DMA9_X_COUNT()		bfin_read16(DMA9_X_COUNT)
+#define bfin_write_DMA9_X_COUNT(val)		bfin_write16(DMA9_X_COUNT, val)
+#define bfin_read_DMA9_Y_COUNT()		bfin_read16(DMA9_Y_COUNT)
+#define bfin_write_DMA9_Y_COUNT(val)		bfin_write16(DMA9_Y_COUNT, val)
+#define bfin_read_DMA9_X_MODIFY()		bfin_read16(DMA9_X_MODIFY)
+#define bfin_write_DMA9_X_MODIFY(val)		bfin_write16(DMA9_X_MODIFY, val)
+#define bfin_read_DMA9_Y_MODIFY()		bfin_read16(DMA9_Y_MODIFY)
+#define bfin_write_DMA9_Y_MODIFY(val)		bfin_write16(DMA9_Y_MODIFY, val)
+#define bfin_read_DMA9_CURR_DESC_PTR()		bfin_read32(DMA9_CURR_DESC_PTR)
+#define bfin_write_DMA9_CURR_DESC_PTR(val)	bfin_write32(DMA9_CURR_DESC_PTR, val)
+#define bfin_read_DMA9_CURR_ADDR()		bfin_read32(DMA9_CURR_ADDR)
+#define bfin_write_DMA9_CURR_ADDR(val)		bfin_write32(DMA9_CURR_ADDR, val)
+#define bfin_read_DMA9_CURR_X_COUNT()		bfin_read16(DMA9_CURR_X_COUNT)
+#define bfin_write_DMA9_CURR_X_COUNT(val)	bfin_write16(DMA9_CURR_X_COUNT, val)
+#define bfin_read_DMA9_CURR_Y_COUNT()		bfin_read16(DMA9_CURR_Y_COUNT)
+#define bfin_write_DMA9_CURR_Y_COUNT(val)	bfin_write16(DMA9_CURR_Y_COUNT, val)
+#define bfin_read_DMA9_IRQ_STATUS()		bfin_read16(DMA9_IRQ_STATUS)
+#define bfin_write_DMA9_IRQ_STATUS(val)		bfin_write16(DMA9_IRQ_STATUS, val)
+#define bfin_read_DMA9_PERIPHERAL_MAP()		bfin_read16(DMA9_PERIPHERAL_MAP)
+#define bfin_write_DMA9_PERIPHERAL_MAP(val)	bfin_write16(DMA9_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA10_CONFIG()		bfin_read16(DMA10_CONFIG)
+#define bfin_write_DMA10_CONFIG(val)		bfin_write16(DMA10_CONFIG, val)
+#define bfin_read_DMA10_NEXT_DESC_PTR()		bfin_read32(DMA10_NEXT_DESC_PTR)
+#define bfin_write_DMA10_NEXT_DESC_PTR(val)	bfin_write32(DMA10_NEXT_DESC_PTR, val)
+#define bfin_read_DMA10_START_ADDR()		bfin_read32(DMA10_START_ADDR)
+#define bfin_write_DMA10_START_ADDR(val)	bfin_write32(DMA10_START_ADDR, val)
+#define bfin_read_DMA10_X_COUNT()		bfin_read16(DMA10_X_COUNT)
+#define bfin_write_DMA10_X_COUNT(val)		bfin_write16(DMA10_X_COUNT, val)
+#define bfin_read_DMA10_Y_COUNT()		bfin_read16(DMA10_Y_COUNT)
+#define bfin_write_DMA10_Y_COUNT(val)		bfin_write16(DMA10_Y_COUNT, val)
+#define bfin_read_DMA10_X_MODIFY()		bfin_read16(DMA10_X_MODIFY)
+#define bfin_write_DMA10_X_MODIFY(val)		bfin_write16(DMA10_X_MODIFY, val)
+#define bfin_read_DMA10_Y_MODIFY()		bfin_read16(DMA10_Y_MODIFY)
+#define bfin_write_DMA10_Y_MODIFY(val)		bfin_write16(DMA10_Y_MODIFY, val)
+#define bfin_read_DMA10_CURR_DESC_PTR()		bfin_read32(DMA10_CURR_DESC_PTR)
+#define bfin_write_DMA10_CURR_DESC_PTR(val)	bfin_write32(DMA10_CURR_DESC_PTR, val)
+#define bfin_read_DMA10_CURR_ADDR()		bfin_read32(DMA10_CURR_ADDR)
+#define bfin_write_DMA10_CURR_ADDR(val)		bfin_write32(DMA10_CURR_ADDR, val)
+#define bfin_read_DMA10_CURR_X_COUNT()		bfin_read16(DMA10_CURR_X_COUNT)
+#define bfin_write_DMA10_CURR_X_COUNT(val)	bfin_write16(DMA10_CURR_X_COUNT, val)
+#define bfin_read_DMA10_CURR_Y_COUNT()		bfin_read16(DMA10_CURR_Y_COUNT)
+#define bfin_write_DMA10_CURR_Y_COUNT(val)	bfin_write16(DMA10_CURR_Y_COUNT, val)
+#define bfin_read_DMA10_IRQ_STATUS()		bfin_read16(DMA10_IRQ_STATUS)
+#define bfin_write_DMA10_IRQ_STATUS(val)	bfin_write16(DMA10_IRQ_STATUS, val)
+#define bfin_read_DMA10_PERIPHERAL_MAP()	bfin_read16(DMA10_PERIPHERAL_MAP)
+#define bfin_write_DMA10_PERIPHERAL_MAP(val)	bfin_write16(DMA10_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA11_CONFIG()		bfin_read16(DMA11_CONFIG)
+#define bfin_write_DMA11_CONFIG(val)		bfin_write16(DMA11_CONFIG, val)
+#define bfin_read_DMA11_NEXT_DESC_PTR()		bfin_read32(DMA11_NEXT_DESC_PTR)
+#define bfin_write_DMA11_NEXT_DESC_PTR(val)	bfin_write32(DMA11_NEXT_DESC_PTR, val)
+#define bfin_read_DMA11_START_ADDR()		bfin_read32(DMA11_START_ADDR)
+#define bfin_write_DMA11_START_ADDR(val)	bfin_write32(DMA11_START_ADDR, val)
+#define bfin_read_DMA11_X_COUNT()		bfin_read16(DMA11_X_COUNT)
+#define bfin_write_DMA11_X_COUNT(val)		bfin_write16(DMA11_X_COUNT, val)
+#define bfin_read_DMA11_Y_COUNT()		bfin_read16(DMA11_Y_COUNT)
+#define bfin_write_DMA11_Y_COUNT(val)		bfin_write16(DMA11_Y_COUNT, val)
+#define bfin_read_DMA11_X_MODIFY()		bfin_read16(DMA11_X_MODIFY)
+#define bfin_write_DMA11_X_MODIFY(val)		bfin_write16(DMA11_X_MODIFY, val)
+#define bfin_read_DMA11_Y_MODIFY()		bfin_read16(DMA11_Y_MODIFY)
+#define bfin_write_DMA11_Y_MODIFY(val)		bfin_write16(DMA11_Y_MODIFY, val)
+#define bfin_read_DMA11_CURR_DESC_PTR()		bfin_read32(DMA11_CURR_DESC_PTR)
+#define bfin_write_DMA11_CURR_DESC_PTR(val)	bfin_write32(DMA11_CURR_DESC_PTR, val)
+#define bfin_read_DMA11_CURR_ADDR()		bfin_read32(DMA11_CURR_ADDR)
+#define bfin_write_DMA11_CURR_ADDR(val)		bfin_write32(DMA11_CURR_ADDR, val)
+#define bfin_read_DMA11_CURR_X_COUNT()		bfin_read16(DMA11_CURR_X_COUNT)
+#define bfin_write_DMA11_CURR_X_COUNT(val)	bfin_write16(DMA11_CURR_X_COUNT, val)
+#define bfin_read_DMA11_CURR_Y_COUNT()		bfin_read16(DMA11_CURR_Y_COUNT)
+#define bfin_write_DMA11_CURR_Y_COUNT(val)	bfin_write16(DMA11_CURR_Y_COUNT, val)
+#define bfin_read_DMA11_IRQ_STATUS()		bfin_read16(DMA11_IRQ_STATUS)
+#define bfin_write_DMA11_IRQ_STATUS(val)	bfin_write16(DMA11_IRQ_STATUS, val)
+#define bfin_read_DMA11_PERIPHERAL_MAP()	bfin_read16(DMA11_PERIPHERAL_MAP)
+#define bfin_write_DMA11_PERIPHERAL_MAP(val)	bfin_write16(DMA11_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D0_CONFIG()		bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val)		bfin_write16(MDMA_D0_CONFIG, val)
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR()	bfin_read32(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D0_START_ADDR()		bfin_read32(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val)	bfin_write32(MDMA_D0_START_ADDR, val)
+#define bfin_read_MDMA_D0_X_COUNT()		bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val)		bfin_write16(MDMA_D0_X_COUNT, val)
+#define bfin_read_MDMA_D0_Y_COUNT()		bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val)		bfin_write16(MDMA_D0_Y_COUNT, val)
+#define bfin_read_MDMA_D0_X_MODIFY()		bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val)	bfin_write16(MDMA_D0_X_MODIFY, val)
+#define bfin_read_MDMA_D0_Y_MODIFY()		bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val)	bfin_write16(MDMA_D0_Y_MODIFY, val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR()	bfin_read32(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val)	bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D0_CURR_ADDR()		bfin_read32(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val)	bfin_write32(MDMA_D0_CURR_ADDR, val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT()	bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val)	bfin_write16(MDMA_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT()	bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val)	bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D0_IRQ_STATUS()		bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val)	bfin_write16(MDMA_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP()	bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S0_CONFIG()		bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val)		bfin_write16(MDMA_S0_CONFIG, val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR()	bfin_read32(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S0_START_ADDR()		bfin_read32(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val)	bfin_write32(MDMA_S0_START_ADDR, val)
+#define bfin_read_MDMA_S0_X_COUNT()		bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val)		bfin_write16(MDMA_S0_X_COUNT, val)
+#define bfin_read_MDMA_S0_Y_COUNT()		bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val)		bfin_write16(MDMA_S0_Y_COUNT, val)
+#define bfin_read_MDMA_S0_X_MODIFY()		bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val)	bfin_write16(MDMA_S0_X_MODIFY, val)
+#define bfin_read_MDMA_S0_Y_MODIFY()		bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val)	bfin_write16(MDMA_S0_Y_MODIFY, val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR()	bfin_read32(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val)	bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S0_CURR_ADDR()		bfin_read32(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val)	bfin_write32(MDMA_S0_CURR_ADDR, val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT()	bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val)	bfin_write16(MDMA_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT()	bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val)	bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S0_IRQ_STATUS()		bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val)	bfin_write16(MDMA_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP()	bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D1_CONFIG()		bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val)		bfin_write16(MDMA_D1_CONFIG, val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR()	bfin_read32(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D1_START_ADDR()		bfin_read32(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val)	bfin_write32(MDMA_D1_START_ADDR, val)
+#define bfin_read_MDMA_D1_X_COUNT()		bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val)		bfin_write16(MDMA_D1_X_COUNT, val)
+#define bfin_read_MDMA_D1_Y_COUNT()		bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val)		bfin_write16(MDMA_D1_Y_COUNT, val)
+#define bfin_read_MDMA_D1_X_MODIFY()		bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val)	bfin_write16(MDMA_D1_X_MODIFY, val)
+#define bfin_read_MDMA_D1_Y_MODIFY()		bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val)	bfin_write16(MDMA_D1_Y_MODIFY, val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR()	bfin_read32(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val)	bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D1_CURR_ADDR()		bfin_read32(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val)	bfin_write32(MDMA_D1_CURR_ADDR, val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT()	bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val)	bfin_write16(MDMA_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT()	bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val)	bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D1_IRQ_STATUS()		bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val)	bfin_write16(MDMA_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP()	bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S1_CONFIG()		bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val)		bfin_write16(MDMA_S1_CONFIG, val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR()	bfin_read32(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S1_START_ADDR()		bfin_read32(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val)	bfin_write32(MDMA_S1_START_ADDR, val)
+#define bfin_read_MDMA_S1_X_COUNT()		bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val)		bfin_write16(MDMA_S1_X_COUNT, val)
+#define bfin_read_MDMA_S1_Y_COUNT()		bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val)		bfin_write16(MDMA_S1_Y_COUNT, val)
+#define bfin_read_MDMA_S1_X_MODIFY()		bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val)	bfin_write16(MDMA_S1_X_MODIFY, val)
+#define bfin_read_MDMA_S1_Y_MODIFY()		bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val)	bfin_write16(MDMA_S1_Y_MODIFY, val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR()	bfin_read32(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val)	bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S1_CURR_ADDR()		bfin_read32(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val)	bfin_write32(MDMA_S1_CURR_ADDR, val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT()	bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val)	bfin_write16(MDMA_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT()	bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val)	bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S1_IRQ_STATUS()		bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val)	bfin_write16(MDMA_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP()	bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)							*/
+#define bfin_read_PPI_CONTROL()			bfin_read16(PPI_CONTROL)
+#define bfin_write_PPI_CONTROL(val)		bfin_write16(PPI_CONTROL, val)
+#define bfin_read_PPI_STATUS()			bfin_read16(PPI_STATUS)
+#define bfin_write_PPI_STATUS(val)		bfin_write16(PPI_STATUS, val)
+#define bfin_read_PPI_DELAY()			bfin_read16(PPI_DELAY)
+#define bfin_write_PPI_DELAY(val)		bfin_write16(PPI_DELAY, val)
+#define bfin_read_PPI_COUNT()			bfin_read16(PPI_COUNT)
+#define bfin_write_PPI_COUNT(val)		bfin_write16(PPI_COUNT, val)
+#define bfin_read_PPI_FRAME()			bfin_read16(PPI_FRAME)
+#define bfin_write_PPI_FRAME(val)		bfin_write16(PPI_FRAME, val)
+
+
+/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)								*/
+#define bfin_read_PORTGIO()			bfin_read16(PORTGIO)
+#define bfin_write_PORTGIO(val)			bfin_write16(PORTGIO, val)
+#define bfin_read_PORTGIO_CLEAR()		bfin_read16(PORTGIO_CLEAR)
+#define bfin_write_PORTGIO_CLEAR(val)		bfin_write16(PORTGIO_CLEAR, val)
+#define bfin_read_PORTGIO_SET()			bfin_read16(PORTGIO_SET)
+#define bfin_write_PORTGIO_SET(val)		bfin_write16(PORTGIO_SET, val)
+#define bfin_read_PORTGIO_TOGGLE()		bfin_read16(PORTGIO_TOGGLE)
+#define bfin_write_PORTGIO_TOGGLE(val)		bfin_write16(PORTGIO_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKA()		bfin_read16(PORTGIO_MASKA)
+#define bfin_write_PORTGIO_MASKA(val)		bfin_write16(PORTGIO_MASKA, val)
+#define bfin_read_PORTGIO_MASKA_CLEAR()		bfin_read16(PORTGIO_MASKA_CLEAR)
+#define bfin_write_PORTGIO_MASKA_CLEAR(val)	bfin_write16(PORTGIO_MASKA_CLEAR, val)
+#define bfin_read_PORTGIO_MASKA_SET()		bfin_read16(PORTGIO_MASKA_SET)
+#define bfin_write_PORTGIO_MASKA_SET(val)	bfin_write16(PORTGIO_MASKA_SET, val)
+#define bfin_read_PORTGIO_MASKA_TOGGLE()	bfin_read16(PORTGIO_MASKA_TOGGLE)
+#define bfin_write_PORTGIO_MASKA_TOGGLE(val)	bfin_write16(PORTGIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKB()		bfin_read16(PORTGIO_MASKB)
+#define bfin_write_PORTGIO_MASKB(val)		bfin_write16(PORTGIO_MASKB, val)
+#define bfin_read_PORTGIO_MASKB_CLEAR()		bfin_read16(PORTGIO_MASKB_CLEAR)
+#define bfin_write_PORTGIO_MASKB_CLEAR(val)	bfin_write16(PORTGIO_MASKB_CLEAR, val)
+#define bfin_read_PORTGIO_MASKB_SET()		bfin_read16(PORTGIO_MASKB_SET)
+#define bfin_write_PORTGIO_MASKB_SET(val)	bfin_write16(PORTGIO_MASKB_SET, val)
+#define bfin_read_PORTGIO_MASKB_TOGGLE()	bfin_read16(PORTGIO_MASKB_TOGGLE)
+#define bfin_write_PORTGIO_MASKB_TOGGLE(val)	bfin_write16(PORTGIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTGIO_DIR()			bfin_read16(PORTGIO_DIR)
+#define bfin_write_PORTGIO_DIR(val)		bfin_write16(PORTGIO_DIR, val)
+#define bfin_read_PORTGIO_POLAR()		bfin_read16(PORTGIO_POLAR)
+#define bfin_write_PORTGIO_POLAR(val)		bfin_write16(PORTGIO_POLAR, val)
+#define bfin_read_PORTGIO_EDGE()		bfin_read16(PORTGIO_EDGE)
+#define bfin_write_PORTGIO_EDGE(val)		bfin_write16(PORTGIO_EDGE, val)
+#define bfin_read_PORTGIO_BOTH()		bfin_read16(PORTGIO_BOTH)
+#define bfin_write_PORTGIO_BOTH(val)		bfin_write16(PORTGIO_BOTH, val)
+#define bfin_read_PORTGIO_INEN()		bfin_read16(PORTGIO_INEN)
+#define bfin_write_PORTGIO_INEN(val)		bfin_write16(PORTGIO_INEN, val)
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)								*/
+#define bfin_read_PORTHIO()			bfin_read16(PORTHIO)
+#define bfin_write_PORTHIO(val)			bfin_write16(PORTHIO, val)
+#define bfin_read_PORTHIO_CLEAR()		bfin_read16(PORTHIO_CLEAR)
+#define bfin_write_PORTHIO_CLEAR(val)		bfin_write16(PORTHIO_CLEAR, val)
+#define bfin_read_PORTHIO_SET()			bfin_read16(PORTHIO_SET)
+#define bfin_write_PORTHIO_SET(val)		bfin_write16(PORTHIO_SET, val)
+#define bfin_read_PORTHIO_TOGGLE()		bfin_read16(PORTHIO_TOGGLE)
+#define bfin_write_PORTHIO_TOGGLE(val)		bfin_write16(PORTHIO_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKA()		bfin_read16(PORTHIO_MASKA)
+#define bfin_write_PORTHIO_MASKA(val)		bfin_write16(PORTHIO_MASKA, val)
+#define bfin_read_PORTHIO_MASKA_CLEAR()		bfin_read16(PORTHIO_MASKA_CLEAR)
+#define bfin_write_PORTHIO_MASKA_CLEAR(val)	bfin_write16(PORTHIO_MASKA_CLEAR, val)
+#define bfin_read_PORTHIO_MASKA_SET()		bfin_read16(PORTHIO_MASKA_SET)
+#define bfin_write_PORTHIO_MASKA_SET(val)	bfin_write16(PORTHIO_MASKA_SET, val)
+#define bfin_read_PORTHIO_MASKA_TOGGLE()	bfin_read16(PORTHIO_MASKA_TOGGLE)
+#define bfin_write_PORTHIO_MASKA_TOGGLE(val)	bfin_write16(PORTHIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKB()		bfin_read16(PORTHIO_MASKB)
+#define bfin_write_PORTHIO_MASKB(val)		bfin_write16(PORTHIO_MASKB, val)
+#define bfin_read_PORTHIO_MASKB_CLEAR()		bfin_read16(PORTHIO_MASKB_CLEAR)
+#define bfin_write_PORTHIO_MASKB_CLEAR(val)	bfin_write16(PORTHIO_MASKB_CLEAR, val)
+#define bfin_read_PORTHIO_MASKB_SET()		bfin_read16(PORTHIO_MASKB_SET)
+#define bfin_write_PORTHIO_MASKB_SET(val)	bfin_write16(PORTHIO_MASKB_SET, val)
+#define bfin_read_PORTHIO_MASKB_TOGGLE()	bfin_read16(PORTHIO_MASKB_TOGGLE)
+#define bfin_write_PORTHIO_MASKB_TOGGLE(val)	bfin_write16(PORTHIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTHIO_DIR()			bfin_read16(PORTHIO_DIR)
+#define bfin_write_PORTHIO_DIR(val)		bfin_write16(PORTHIO_DIR, val)
+#define bfin_read_PORTHIO_POLAR()		bfin_read16(PORTHIO_POLAR)
+#define bfin_write_PORTHIO_POLAR(val)		bfin_write16(PORTHIO_POLAR, val)
+#define bfin_read_PORTHIO_EDGE()		bfin_read16(PORTHIO_EDGE)
+#define bfin_write_PORTHIO_EDGE(val)		bfin_write16(PORTHIO_EDGE, val)
+#define bfin_read_PORTHIO_BOTH()		bfin_read16(PORTHIO_BOTH)
+#define bfin_write_PORTHIO_BOTH(val)		bfin_write16(PORTHIO_BOTH, val)
+#define bfin_read_PORTHIO_INEN()		bfin_read16(PORTHIO_INEN)
+#define bfin_write_PORTHIO_INEN(val)		bfin_write16(PORTHIO_INEN, val)
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
+#define bfin_read_UART1_THR()			bfin_read16(UART1_THR)
+#define bfin_write_UART1_THR(val)		bfin_write16(UART1_THR, val)
+#define bfin_read_UART1_RBR()			bfin_read16(UART1_RBR)
+#define bfin_write_UART1_RBR(val)		bfin_write16(UART1_RBR, val)
+#define bfin_read_UART1_DLL()			bfin_read16(UART1_DLL)
+#define bfin_write_UART1_DLL(val)		bfin_write16(UART1_DLL, val)
+#define bfin_read_UART1_IER()			bfin_read16(UART1_IER)
+#define bfin_write_UART1_IER(val)		bfin_write16(UART1_IER, val)
+#define bfin_read_UART1_DLH()			bfin_read16(UART1_DLH)
+#define bfin_write_UART1_DLH(val)		bfin_write16(UART1_DLH, val)
+#define bfin_read_UART1_IIR()			bfin_read16(UART1_IIR)
+#define bfin_write_UART1_IIR(val)		bfin_write16(UART1_IIR, val)
+#define bfin_read_UART1_LCR()			bfin_read16(UART1_LCR)
+#define bfin_write_UART1_LCR(val)		bfin_write16(UART1_LCR, val)
+#define bfin_read_UART1_MCR()			bfin_read16(UART1_MCR)
+#define bfin_write_UART1_MCR(val)		bfin_write16(UART1_MCR, val)
+#define bfin_read_UART1_LSR()			bfin_read16(UART1_LSR)
+#define bfin_write_UART1_LSR(val)		bfin_write16(UART1_LSR, val)
+#define bfin_read_UART1_MSR()			bfin_read16(UART1_MSR)
+#define bfin_write_UART1_MSR(val)		bfin_write16(UART1_MSR, val)
+#define bfin_read_UART1_SCR()			bfin_read16(UART1_SCR)
+#define bfin_write_UART1_SCR(val)		bfin_write16(UART1_SCR, val)
+#define bfin_read_UART1_GCTL()			bfin_read16(UART1_GCTL)
+#define bfin_write_UART1_GCTL(val)		bfin_write16(UART1_GCTL, val)
+
+/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF51x processor) */
+
+/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)								*/
+#define bfin_read_PORTF_FER()			bfin_read16(PORTF_FER)
+#define bfin_write_PORTF_FER(val)		bfin_write16(PORTF_FER, val)
+#define bfin_read_PORTG_FER()			bfin_read16(PORTG_FER)
+#define bfin_write_PORTG_FER(val)		bfin_write16(PORTG_FER, val)
+#define bfin_read_PORTH_FER()			bfin_read16(PORTH_FER)
+#define bfin_write_PORTH_FER(val)		bfin_write16(PORTH_FER, val)
+#define bfin_read_PORT_MUX()			bfin_read16(PORT_MUX)
+#define bfin_write_PORT_MUX(val)		bfin_write16(PORT_MUX, val)
+
+
+/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)								*/
+#define bfin_read_HMDMA0_CONTROL()		bfin_read16(HMDMA0_CONTROL)
+#define bfin_write_HMDMA0_CONTROL(val)		bfin_write16(HMDMA0_CONTROL, val)
+#define bfin_read_HMDMA0_ECINIT()		bfin_read16(HMDMA0_ECINIT)
+#define bfin_write_HMDMA0_ECINIT(val)		bfin_write16(HMDMA0_ECINIT, val)
+#define bfin_read_HMDMA0_BCINIT()		bfin_read16(HMDMA0_BCINIT)
+#define bfin_write_HMDMA0_BCINIT(val)		bfin_write16(HMDMA0_BCINIT, val)
+#define bfin_read_HMDMA0_ECURGENT()		bfin_read16(HMDMA0_ECURGENT)
+#define bfin_write_HMDMA0_ECURGENT(val)		bfin_write16(HMDMA0_ECURGENT, val)
+#define bfin_read_HMDMA0_ECOVERFLOW()		bfin_read16(HMDMA0_ECOVERFLOW)
+#define bfin_write_HMDMA0_ECOVERFLOW(val)	bfin_write16(HMDMA0_ECOVERFLOW, val)
+#define bfin_read_HMDMA0_ECOUNT()		bfin_read16(HMDMA0_ECOUNT)
+#define bfin_write_HMDMA0_ECOUNT(val)		bfin_write16(HMDMA0_ECOUNT, val)
+#define bfin_read_HMDMA0_BCOUNT()		bfin_read16(HMDMA0_BCOUNT)
+#define bfin_write_HMDMA0_BCOUNT(val)		bfin_write16(HMDMA0_BCOUNT, val)
+
+#define bfin_read_HMDMA1_CONTROL()		bfin_read16(HMDMA1_CONTROL)
+#define bfin_write_HMDMA1_CONTROL(val)		bfin_write16(HMDMA1_CONTROL, val)
+#define bfin_read_HMDMA1_ECINIT()		bfin_read16(HMDMA1_ECINIT)
+#define bfin_write_HMDMA1_ECINIT(val)		bfin_write16(HMDMA1_ECINIT, val)
+#define bfin_read_HMDMA1_BCINIT()		bfin_read16(HMDMA1_BCINIT)
+#define bfin_write_HMDMA1_BCINIT(val)		bfin_write16(HMDMA1_BCINIT, val)
+#define bfin_read_HMDMA1_ECURGENT()		bfin_read16(HMDMA1_ECURGENT)
+#define bfin_write_HMDMA1_ECURGENT(val)		bfin_write16(HMDMA1_ECURGENT, val)
+#define bfin_read_HMDMA1_ECOVERFLOW()		bfin_read16(HMDMA1_ECOVERFLOW)
+#define bfin_write_HMDMA1_ECOVERFLOW(val)	bfin_write16(HMDMA1_ECOVERFLOW, val)
+#define bfin_read_HMDMA1_ECOUNT()		bfin_read16(HMDMA1_ECOUNT)
+#define bfin_write_HMDMA1_ECOUNT(val)		bfin_write16(HMDMA1_ECOUNT, val)
+#define bfin_read_HMDMA1_BCOUNT()		bfin_read16(HMDMA1_BCOUNT)
+#define bfin_write_HMDMA1_BCOUNT(val)		bfin_write16(HMDMA1_BCOUNT, val)
+
+/* ==== end from cdefBF534.h ==== */
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+
+#define bfin_read_PORTF_MUX()			bfin_read16(PORTF_MUX)
+#define bfin_write_PORTF_MUX(val)		bfin_write16(PORTF_MUX, val)
+#define bfin_read_PORTG_MUX()			bfin_read16(PORTG_MUX)
+#define bfin_write_PORTG_MUX(val)		bfin_write16(PORTG_MUX, val)
+#define bfin_read_PORTH_MUX()			bfin_read16(PORTH_MUX)
+#define bfin_write_PORTH_MUX(val)		bfin_write16(PORTH_MUX, val)
+
+#define bfin_read_PORTF_DRIVE()			bfin_read16(PORTF_DRIVE)
+#define bfin_write_PORTF_DRIVE(val)		bfin_write16(PORTF_DRIVE, val)
+#define bfin_read_PORTG_DRIVE()			bfin_read16(PORTG_DRIVE)
+#define bfin_write_PORTG_DRIVE(val)		bfin_write16(PORTG_DRIVE, val)
+#define bfin_read_PORTH_DRIVE()			bfin_read16(PORTH_DRIVE)
+#define bfin_write_PORTH_DRIVE(val)		bfin_write16(PORTH_DRIVE, val)
+#define bfin_read_PORTF_SLEW()			bfin_read16(PORTF_SLEW)
+#define bfin_write_PORTF_SLEW(val)		bfin_write16(PORTF_SLEW, val)
+#define bfin_read_PORTG_SLEW()			bfin_read16(PORTG_SLEW)
+#define bfin_write_PORTG_SLEW(val)		bfin_write16(PORTG_SLEW, val)
+#define bfin_read_PORTH_SLEW()			bfin_read16(PORTH_SLEW)
+#define bfin_write_PORTH_SLEW(val)		bfin_write16(PORTH_SLEW, val)
+#define bfin_read_PORTF_HYSTERISIS()		bfin_read16(PORTF_HYSTERISIS)
+#define bfin_write_PORTF_HYSTERISIS(val)	bfin_write16(PORTF_HYSTERISIS, val)
+#define bfin_read_PORTG_HYSTERISIS()		bfin_read16(PORTG_HYSTERISIS)
+#define bfin_write_PORTG_HYSTERISIS(val)	bfin_write16(PORTG_HYSTERISIS, val)
+#define bfin_read_PORTH_HYSTERISIS()		bfin_read16(PORTH_HYSTERISIS)
+#define bfin_write_PORTH_HYSTERISIS(val)	bfin_write16(PORTH_HYSTERISIS, val)
+#define bfin_read_MISCPORT_DRIVE()		bfin_read16(MISCPORT_DRIVE)
+#define bfin_write_MISCPORT_DRIVE(val)		bfin_write16(MISCPORT_DRIVE, val)
+#define bfin_read_MISCPORT_SLEW()		bfin_read16(MISCPORT_SLEW)
+#define bfin_write_MISCPORT_SLEW(val)		bfin_write16(MISCPORT_SLEW, val)
+#define bfin_read_MISCPORT_HYSTERISIS()		bfin_read16(MISCPORT_HYSTERISIS)
+#define bfin_write_MISCPORT_HYSTERISIS(val)	bfin_write16(MISCPORT_HYSTERISIS, val)
+
+/* HOST Port Registers */
+
+#define bfin_read_HOST_CONTROL()		bfin_read16(HOST_CONTROL)
+#define bfin_write_HOST_CONTROL(val)		bfin_write16(HOST_CONTROL, val)
+#define bfin_read_HOST_STATUS()			bfin_read16(HOST_STATUS)
+#define bfin_write_HOST_STATUS(val)		bfin_write16(HOST_STATUS, val)
+#define bfin_read_HOST_TIMEOUT()		bfin_read16(HOST_TIMEOUT)
+#define bfin_write_HOST_TIMEOUT(val)		bfin_write16(HOST_TIMEOUT, val)
+
+/* Counter Registers */
+
+#define bfin_read_CNT_CONFIG()			bfin_read16(CNT_CONFIG)
+#define bfin_write_CNT_CONFIG(val)		bfin_write16(CNT_CONFIG, val)
+#define bfin_read_CNT_IMASK()			bfin_read16(CNT_IMASK)
+#define bfin_write_CNT_IMASK(val)		bfin_write16(CNT_IMASK, val)
+#define bfin_read_CNT_STATUS()			bfin_read16(CNT_STATUS)
+#define bfin_write_CNT_STATUS(val)		bfin_write16(CNT_STATUS, val)
+#define bfin_read_CNT_COMMAND()			bfin_read16(CNT_COMMAND)
+#define bfin_write_CNT_COMMAND(val)		bfin_write16(CNT_COMMAND, val)
+#define bfin_read_CNT_DEBOUNCE()		bfin_read16(CNT_DEBOUNCE)
+#define bfin_write_CNT_DEBOUNCE(val)		bfin_write16(CNT_DEBOUNCE, val)
+#define bfin_read_CNT_COUNTER()			bfin_read32(CNT_COUNTER)
+#define bfin_write_CNT_COUNTER(val)		bfin_write32(CNT_COUNTER, val)
+#define bfin_read_CNT_MAX()			bfin_read32(CNT_MAX)
+#define bfin_write_CNT_MAX(val)			bfin_write32(CNT_MAX, val)
+#define bfin_read_CNT_MIN()			bfin_read32(CNT_MIN)
+#define bfin_write_CNT_MIN(val)			bfin_write32(CNT_MIN, val)
+
+/* OTP/FUSE Registers */
+
+#define bfin_read_OTP_CONTROL()			bfin_read16(OTP_CONTROL)
+#define bfin_write_OTP_CONTROL(val)		bfin_write16(OTP_CONTROL, val)
+#define bfin_read_OTP_BEN()			bfin_read16(OTP_BEN)
+#define bfin_write_OTP_BEN(val)			bfin_write16(OTP_BEN, val)
+#define bfin_read_OTP_STATUS()			bfin_read16(OTP_STATUS)
+#define bfin_write_OTP_STATUS(val)		bfin_write16(OTP_STATUS, val)
+#define bfin_read_OTP_TIMING()			bfin_read32(OTP_TIMING)
+#define bfin_write_OTP_TIMING(val)		bfin_write32(OTP_TIMING, val)
+
+/* Security Registers */
+
+#define bfin_read_SECURE_SYSSWT()		bfin_read32(SECURE_SYSSWT)
+#define bfin_write_SECURE_SYSSWT(val)		bfin_write32(SECURE_SYSSWT, val)
+#define bfin_read_SECURE_CONTROL()		bfin_read16(SECURE_CONTROL)
+#define bfin_write_SECURE_CONTROL(val)		bfin_write16(SECURE_CONTROL, val)
+#define bfin_read_SECURE_STATUS()		bfin_read16(SECURE_STATUS)
+#define bfin_write_SECURE_STATUS(val)		bfin_write16(SECURE_STATUS, val)
+
+/* OTP Read/Write Data Buffer Registers */
+
+#define bfin_read_OTP_DATA0()			bfin_read32(OTP_DATA0)
+#define bfin_write_OTP_DATA0(val)		bfin_write32(OTP_DATA0, val)
+#define bfin_read_OTP_DATA1()			bfin_read32(OTP_DATA1)
+#define bfin_write_OTP_DATA1(val)		bfin_write32(OTP_DATA1, val)
+#define bfin_read_OTP_DATA2()			bfin_read32(OTP_DATA2)
+#define bfin_write_OTP_DATA2(val)		bfin_write32(OTP_DATA2, val)
+#define bfin_read_OTP_DATA3()			bfin_read32(OTP_DATA3)
+#define bfin_write_OTP_DATA3(val)		bfin_write32(OTP_DATA3, val)
+
+/* NFC Registers */
+
+#define bfin_read_NFC_CTL()			bfin_read16(NFC_CTL)
+#define bfin_write_NFC_CTL(val)			bfin_write16(NFC_CTL, val)
+#define bfin_read_NFC_STAT()			bfin_read16(NFC_STAT)
+#define bfin_write_NFC_STAT(val)		bfin_write16(NFC_STAT, val)
+#define bfin_read_NFC_IRQSTAT()			bfin_read16(NFC_IRQSTAT)
+#define bfin_write_NFC_IRQSTAT(val)		bfin_write16(NFC_IRQSTAT, val)
+#define bfin_read_NFC_IRQMASK()			bfin_read16(NFC_IRQMASK)
+#define bfin_write_NFC_IRQMASK(val)		bfin_write16(NFC_IRQMASK, val)
+#define bfin_read_NFC_ECC0()			bfin_read16(NFC_ECC0)
+#define bfin_write_NFC_ECC0(val)		bfin_write16(NFC_ECC0, val)
+#define bfin_read_NFC_ECC1()			bfin_read16(NFC_ECC1)
+#define bfin_write_NFC_ECC1(val)		bfin_write16(NFC_ECC1, val)
+#define bfin_read_NFC_ECC2()			bfin_read16(NFC_ECC2)
+#define bfin_write_NFC_ECC2(val)		bfin_write16(NFC_ECC2, val)
+#define bfin_read_NFC_ECC3()			bfin_read16(NFC_ECC3)
+#define bfin_write_NFC_ECC3(val)		bfin_write16(NFC_ECC3, val)
+#define bfin_read_NFC_COUNT()			bfin_read16(NFC_COUNT)
+#define bfin_write_NFC_COUNT(val)		bfin_write16(NFC_COUNT, val)
+#define bfin_read_NFC_RST()			bfin_read16(NFC_RST)
+#define bfin_write_NFC_RST(val)			bfin_write16(NFC_RST, val)
+#define bfin_read_NFC_PGCTL()			bfin_read16(NFC_PGCTL)
+#define bfin_write_NFC_PGCTL(val)		bfin_write16(NFC_PGCTL, val)
+#define bfin_read_NFC_READ()			bfin_read16(NFC_READ)
+#define bfin_write_NFC_READ(val)		bfin_write16(NFC_READ, val)
+#define bfin_read_NFC_ADDR()			bfin_read16(NFC_ADDR)
+#define bfin_write_NFC_ADDR(val)		bfin_write16(NFC_ADDR, val)
+#define bfin_read_NFC_CMD()			bfin_read16(NFC_CMD)
+#define bfin_write_NFC_CMD(val)			bfin_write16(NFC_CMD, val)
+#define bfin_read_NFC_DATA_WR()			bfin_read16(NFC_DATA_WR)
+#define bfin_write_NFC_DATA_WR(val)		bfin_write16(NFC_DATA_WR, val)
+#define bfin_read_NFC_DATA_RD()			bfin_read16(NFC_DATA_RD)
+#define bfin_write_NFC_DATA_RD(val)		bfin_write16(NFC_DATA_RD, val)
+
+/* These need to be last due to the cdef/linux inter-dependencies */
+#include <asm/irq.h>
+
+/* Writing to PLL_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_PLL_CTL(unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1;
+
+	if (val == bfin_read_PLL_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr0 = bfin_read32(SIC_IWR0);
+	iwr1 = bfin_read32(SIC_IWR1);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
+	bfin_write32(SIC_IWR1, 0);
+
+	bfin_write16(PLL_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR0, iwr0);
+	bfin_write32(SIC_IWR1, iwr1);
+	local_irq_restore_hw(flags);
+}
+
+/* Writing to VR_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_VR_CTL(unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1;
+
+	if (val == bfin_read_VR_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr0 = bfin_read32(SIC_IWR0);
+	iwr1 = bfin_read32(SIC_IWR1);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
+	bfin_write32(SIC_IWR1, 0);
+
+	bfin_write16(VR_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR0, iwr0);
+	bfin_write32(SIC_IWR1, iwr1);
+	local_irq_restore_hw(flags);
+}
+
+#endif /* _CDEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF512.h b/arch/blackfin/mach-bf518/include/mach/defBF512.h
new file mode 100644
index 0000000..a96ca90
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/defBF512.h
@@ -0,0 +1,42 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/defBF512.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _DEF_BF512_H
+#define _DEF_BF512_H
+
+/* Include all Core registers and bit definitions */
+#include <asm/def_LPBlackfin.h>
+
+/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF512 */
+
+/* Include defBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
+#include "defBF51x_base.h"
+
+#endif /* _DEF_BF512_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF514.h b/arch/blackfin/mach-bf518/include/mach/defBF514.h
new file mode 100644
index 0000000..543f291
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/defBF514.h
@@ -0,0 +1,113 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/defBF514.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _DEF_BF514_H
+#define _DEF_BF514_H
+
+/* Include all Core registers and bit definitions */
+#include <asm/def_LPBlackfin.h>
+
+/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF514 */
+
+/* Include defBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
+#include "defBF51x_base.h"
+
+/* The following are the #defines needed by ADSP-BF514 that are not in the common header */
+
+/* SDH Registers */
+
+#define SDH_PWR_CTL                    0xFFC03900 /* SDH Power Control */
+#define SDH_CLK_CTL                    0xFFC03904 /* SDH Clock Control */
+#define SDH_ARGUMENT                   0xFFC03908 /* SDH Argument */
+#define SDH_COMMAND                    0xFFC0390C /* SDH Command */
+#define SDH_RESP_CMD                   0xFFC03910 /* SDH Response Command */
+#define SDH_RESPONSE0                  0xFFC03914 /* SDH Response0 */
+#define SDH_RESPONSE1                  0xFFC03918 /* SDH Response1 */
+#define SDH_RESPONSE2                  0xFFC0391C /* SDH Response2 */
+#define SDH_RESPONSE3                  0xFFC03920 /* SDH Response3 */
+#define SDH_DATA_TIMER                 0xFFC03924 /* SDH Data Timer */
+#define SDH_DATA_LGTH                  0xFFC03928 /* SDH Data Length */
+#define SDH_DATA_CTL                   0xFFC0392C /* SDH Data Control */
+#define SDH_DATA_CNT                   0xFFC03930 /* SDH Data Counter */
+#define SDH_STATUS                     0xFFC03934 /* SDH Status */
+#define SDH_STATUS_CLR                 0xFFC03938 /* SDH Status Clear */
+#define SDH_MASK0                      0xFFC0393C /* SDH Interrupt0 Mask */
+#define SDH_MASK1                      0xFFC03940 /* SDH Interrupt1 Mask */
+#define SDH_FIFO_CNT                   0xFFC03948 /* SDH FIFO Counter */
+#define SDH_FIFO                       0xFFC03980 /* SDH Data FIFO */
+#define SDH_E_STATUS                   0xFFC039C0 /* SDH Exception Status */
+#define SDH_E_MASK                     0xFFC039C4 /* SDH Exception Mask */
+#define SDH_CFG                        0xFFC039C8 /* SDH Configuration */
+#define SDH_RD_WAIT_EN                 0xFFC039CC /* SDH Read Wait Enable */
+#define SDH_PID0                       0xFFC039D0 /* SDH Peripheral Identification0 */
+#define SDH_PID1                       0xFFC039D4 /* SDH Peripheral Identification1 */
+#define SDH_PID2                       0xFFC039D8 /* SDH Peripheral Identification2 */
+#define SDH_PID3                       0xFFC039DC /* SDH Peripheral Identification3 */
+#define SDH_PID4                       0xFFC039E0 /* SDH Peripheral Identification4 */
+#define SDH_PID5                       0xFFC039E4 /* SDH Peripheral Identification5 */
+#define SDH_PID6                       0xFFC039E8 /* SDH Peripheral Identification6 */
+#define SDH_PID7                       0xFFC039EC /* SDH Peripheral Identification7 */
+
+/* Removable Storage Interface Registers */
+
+#define RSI_PWR_CONTROL                0xFFC03800 /* RSI Power Control Register */
+#define RSI_CLK_CONTROL                0xFFC03804 /* RSI Clock Control Register */
+#define RSI_ARGUMENT                   0xFFC03808 /* RSI Argument Register */
+#define RSI_COMMAND                    0xFFC0380C /* RSI Command Register */
+#define RSI_RESP_CMD                   0xFFC03810 /* RSI Response Command Register */
+#define RSI_RESPONSE0                  0xFFC03814 /* RSI Response Register */
+#define RSI_RESPONSE1                  0xFFC03818 /* RSI Response Register */
+#define RSI_RESPONSE2                  0xFFC0381C /* RSI Response Register */
+#define RSI_RESPONSE3                  0xFFC03820 /* RSI Response Register */
+#define RSI_DATA_TIMER                 0xFFC03824 /* RSI Data Timer Register */
+#define RSI_DATA_LGTH                  0xFFC03828 /* RSI Data Length Register */
+#define RSI_DATA_CONTROL               0xFFC0382C /* RSI Data Control Register */
+#define RSI_DATA_CNT                   0xFFC03830 /* RSI Data Counter Register */
+#define RSI_STATUS                     0xFFC03834 /* RSI Status Register */
+#define RSI_STATUSCL                   0xFFC03838 /* RSI Status Clear Register */
+#define RSI_MASK0                      0xFFC0383C /* RSI Interrupt 0 Mask Register */
+#define RSI_MASK1                      0xFFC03840 /* RSI Interrupt 1 Mask Register */
+#define RSI_FIFO_CNT                   0xFFC03848 /* RSI FIFO Counter Register */
+#define RSI_CEATA_CONTROL              0xFFC0384C /* RSI CEATA Register */
+#define RSI_FIFO                       0xFFC03880 /* RSI Data FIFO Register */
+#define RSI_ESTAT                      0xFFC038C0 /* RSI Exception Status Register */
+#define RSI_EMASK                      0xFFC038C4 /* RSI Exception Mask Register */
+#define RSI_CONFIG                     0xFFC038C8 /* RSI Configuration Register */
+#define RSI_RD_WAIT_EN                 0xFFC038CC /* RSI Read Wait Enable Register */
+#define RSI_PID0                       0xFFC03FE0 /* RSI Peripheral ID Register 0 */
+#define RSI_PID1                       0xFFC03FE4 /* RSI Peripheral ID Register 1 */
+#define RSI_PID2                       0xFFC03FE8 /* RSI Peripheral ID Register 2 */
+#define RSI_PID3                       0xFFC03FEC /* RSI Peripheral ID Register 3 */
+#define RSI_PID4                       0xFFC03FF0 /* RSI Peripheral ID Register 4 */
+#define RSI_PID5                       0xFFC03FF4 /* RSI Peripheral ID Register 5 */
+#define RSI_PID6                       0xFFC03FF8 /* RSI Peripheral ID Register 6 */
+#define RSI_PID7                       0xFFC03FFC /* RSI Peripheral ID Register 7 */
+
+#endif /* _DEF_BF514_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF516.h b/arch/blackfin/mach-bf518/include/mach/defBF516.h
new file mode 100644
index 0000000..149a269
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/defBF516.h
@@ -0,0 +1,490 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/defBF516.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _DEF_BF516_H
+#define _DEF_BF516_H
+
+/* Include all Core registers and bit definitions */
+#include <asm/def_LPBlackfin.h>
+
+/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF516 */
+
+/* Include defBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
+#include "defBF51x_base.h"
+
+/* The following are the #defines needed by ADSP-BF516 that are not in the common header */
+/* 10/100 Ethernet Controller	(0xFFC03000 - 0xFFC031FF) */
+
+#define EMAC_OPMODE             0xFFC03000       /* Operating Mode Register                              */
+#define EMAC_ADDRLO             0xFFC03004       /* Address Low (32 LSBs) Register                       */
+#define EMAC_ADDRHI             0xFFC03008       /* Address High (16 MSBs) Register                      */
+#define EMAC_HASHLO             0xFFC0300C       /* Multicast Hash Table Low (Bins 31-0) Register        */
+#define EMAC_HASHHI             0xFFC03010       /* Multicast Hash Table High (Bins 63-32) Register      */
+#define EMAC_STAADD             0xFFC03014       /* Station Management Address Register                  */
+#define EMAC_STADAT             0xFFC03018       /* Station Management Data Register                     */
+#define EMAC_FLC                0xFFC0301C       /* Flow Control Register                                */
+#define EMAC_VLAN1              0xFFC03020       /* VLAN1 Tag Register                                   */
+#define EMAC_VLAN2              0xFFC03024       /* VLAN2 Tag Register                                   */
+#define EMAC_WKUP_CTL           0xFFC0302C       /* Wake-Up Control/Status Register                      */
+#define EMAC_WKUP_FFMSK0        0xFFC03030       /* Wake-Up Frame Filter 0 Byte Mask Register            */
+#define EMAC_WKUP_FFMSK1        0xFFC03034       /* Wake-Up Frame Filter 1 Byte Mask Register            */
+#define EMAC_WKUP_FFMSK2        0xFFC03038       /* Wake-Up Frame Filter 2 Byte Mask Register            */
+#define EMAC_WKUP_FFMSK3        0xFFC0303C       /* Wake-Up Frame Filter 3 Byte Mask Register            */
+#define EMAC_WKUP_FFCMD         0xFFC03040       /* Wake-Up Frame Filter Commands Register               */
+#define EMAC_WKUP_FFOFF         0xFFC03044       /* Wake-Up Frame Filter Offsets Register                */
+#define EMAC_WKUP_FFCRC0        0xFFC03048       /* Wake-Up Frame Filter 0,1 CRC-16 Register             */
+#define EMAC_WKUP_FFCRC1        0xFFC0304C       /* Wake-Up Frame Filter 2,3 CRC-16 Register             */
+
+#define EMAC_SYSCTL             0xFFC03060       /* EMAC System Control Register                         */
+#define EMAC_SYSTAT             0xFFC03064       /* EMAC System Status Register                          */
+#define EMAC_RX_STAT            0xFFC03068       /* RX Current Frame Status Register                     */
+#define EMAC_RX_STKY            0xFFC0306C       /* RX Sticky Frame Status Register                      */
+#define EMAC_RX_IRQE            0xFFC03070       /* RX Frame Status Interrupt Enables Register           */
+#define EMAC_TX_STAT            0xFFC03074       /* TX Current Frame Status Register                     */
+#define EMAC_TX_STKY            0xFFC03078       /* TX Sticky Frame Status Register                      */
+#define EMAC_TX_IRQE            0xFFC0307C       /* TX Frame Status Interrupt Enables Register           */
+
+#define EMAC_MMC_CTL            0xFFC03080       /* MMC Counter Control Register                         */
+#define EMAC_MMC_RIRQS          0xFFC03084       /* MMC RX Interrupt Status Register                     */
+#define EMAC_MMC_RIRQE          0xFFC03088       /* MMC RX Interrupt Enables Register                    */
+#define EMAC_MMC_TIRQS          0xFFC0308C       /* MMC TX Interrupt Status Register                     */
+#define EMAC_MMC_TIRQE          0xFFC03090       /* MMC TX Interrupt Enables Register                    */
+
+#define EMAC_RXC_OK             0xFFC03100       /* RX Frame Successful Count                            */
+#define EMAC_RXC_FCS            0xFFC03104       /* RX Frame FCS Failure Count                           */
+#define EMAC_RXC_ALIGN          0xFFC03108       /* RX Alignment Error Count                             */
+#define EMAC_RXC_OCTET          0xFFC0310C       /* RX Octets Successfully Received Count                */
+#define EMAC_RXC_DMAOVF         0xFFC03110       /* Internal MAC Sublayer Error RX Frame Count           */
+#define EMAC_RXC_UNICST         0xFFC03114       /* Unicast RX Frame Count                               */
+#define EMAC_RXC_MULTI          0xFFC03118       /* Multicast RX Frame Count                             */
+#define EMAC_RXC_BROAD          0xFFC0311C       /* Broadcast RX Frame Count                             */
+#define EMAC_RXC_LNERRI         0xFFC03120       /* RX Frame In Range Error Count                        */
+#define EMAC_RXC_LNERRO         0xFFC03124       /* RX Frame Out Of Range Error Count                    */
+#define EMAC_RXC_LONG           0xFFC03128       /* RX Frame Too Long Count                              */
+#define EMAC_RXC_MACCTL         0xFFC0312C       /* MAC Control RX Frame Count                           */
+#define EMAC_RXC_OPCODE         0xFFC03130       /* Unsupported Op-Code RX Frame Count                   */
+#define EMAC_RXC_PAUSE          0xFFC03134       /* MAC Control Pause RX Frame Count                     */
+#define EMAC_RXC_ALLFRM         0xFFC03138       /* Overall RX Frame Count                               */
+#define EMAC_RXC_ALLOCT         0xFFC0313C       /* Overall RX Octet Count                               */
+#define EMAC_RXC_TYPED          0xFFC03140       /* Type/Length Consistent RX Frame Count                */
+#define EMAC_RXC_SHORT          0xFFC03144       /* RX Frame Fragment Count - Byte Count x < 64          */
+#define EMAC_RXC_EQ64           0xFFC03148       /* Good RX Frame Count - Byte Count x = 64              */
+#define EMAC_RXC_LT128          0xFFC0314C       /* Good RX Frame Count - Byte Count  64 < x < 128       */
+#define EMAC_RXC_LT256          0xFFC03150       /* Good RX Frame Count - Byte Count 128 <= x < 256      */
+#define EMAC_RXC_LT512          0xFFC03154       /* Good RX Frame Count - Byte Count 256 <= x < 512      */
+#define EMAC_RXC_LT1024         0xFFC03158       /* Good RX Frame Count - Byte Count 512 <= x < 1024     */
+#define EMAC_RXC_GE1024         0xFFC0315C       /* Good RX Frame Count - Byte Count x >= 1024           */
+
+#define EMAC_TXC_OK             0xFFC03180       /* TX Frame Successful Count                             */
+#define EMAC_TXC_1COL           0xFFC03184       /* TX Frames Successful After Single Collision Count     */
+#define EMAC_TXC_GT1COL         0xFFC03188       /* TX Frames Successful After Multiple Collisions Count  */
+#define EMAC_TXC_OCTET          0xFFC0318C       /* TX Octets Successfully Received Count                 */
+#define EMAC_TXC_DEFER          0xFFC03190       /* TX Frame Delayed Due To Busy Count                    */
+#define EMAC_TXC_LATECL         0xFFC03194       /* Late TX Collisions Count                              */
+#define EMAC_TXC_XS_COL         0xFFC03198       /* TX Frame Failed Due To Excessive Collisions Count     */
+#define EMAC_TXC_DMAUND         0xFFC0319C       /* Internal MAC Sublayer Error TX Frame Count            */
+#define EMAC_TXC_CRSERR         0xFFC031A0       /* Carrier Sense Deasserted During TX Frame Count        */
+#define EMAC_TXC_UNICST         0xFFC031A4       /* Unicast TX Frame Count                                */
+#define EMAC_TXC_MULTI          0xFFC031A8       /* Multicast TX Frame Count                              */
+#define EMAC_TXC_BROAD          0xFFC031AC       /* Broadcast TX Frame Count                              */
+#define EMAC_TXC_XS_DFR         0xFFC031B0       /* TX Frames With Excessive Deferral Count               */
+#define EMAC_TXC_MACCTL         0xFFC031B4       /* MAC Control TX Frame Count                            */
+#define EMAC_TXC_ALLFRM         0xFFC031B8       /* Overall TX Frame Count                                */
+#define EMAC_TXC_ALLOCT         0xFFC031BC       /* Overall TX Octet Count                                */
+#define EMAC_TXC_EQ64           0xFFC031C0       /* Good TX Frame Count - Byte Count x = 64               */
+#define EMAC_TXC_LT128          0xFFC031C4       /* Good TX Frame Count - Byte Count  64 < x < 128        */
+#define EMAC_TXC_LT256          0xFFC031C8       /* Good TX Frame Count - Byte Count 128 <= x < 256       */
+#define EMAC_TXC_LT512          0xFFC031CC       /* Good TX Frame Count - Byte Count 256 <= x < 512       */
+#define EMAC_TXC_LT1024         0xFFC031D0       /* Good TX Frame Count - Byte Count 512 <= x < 1024      */
+#define EMAC_TXC_GE1024         0xFFC031D4       /* Good TX Frame Count - Byte Count x >= 1024            */
+#define EMAC_TXC_ABORT          0xFFC031D8       /* Total TX Frames Aborted Count                         */
+
+/* Listing for IEEE-Supported Count Registers */
+
+#define FramesReceivedOK                EMAC_RXC_OK        /* RX Frame Successful Count                            */
+#define FrameCheckSequenceErrors        EMAC_RXC_FCS       /* RX Frame FCS Failure Count                           */
+#define AlignmentErrors                 EMAC_RXC_ALIGN     /* RX Alignment Error Count                             */
+#define OctetsReceivedOK                EMAC_RXC_OCTET     /* RX Octets Successfully Received Count                */
+#define FramesLostDueToIntMACRcvError   EMAC_RXC_DMAOVF    /* Internal MAC Sublayer Error RX Frame Count           */
+#define UnicastFramesReceivedOK         EMAC_RXC_UNICST    /* Unicast RX Frame Count                               */
+#define MulticastFramesReceivedOK       EMAC_RXC_MULTI     /* Multicast RX Frame Count                             */
+#define BroadcastFramesReceivedOK       EMAC_RXC_BROAD     /* Broadcast RX Frame Count                             */
+#define InRangeLengthErrors             EMAC_RXC_LNERRI    /* RX Frame In Range Error Count                        */
+#define OutOfRangeLengthField           EMAC_RXC_LNERRO    /* RX Frame Out Of Range Error Count                    */
+#define FrameTooLongErrors              EMAC_RXC_LONG      /* RX Frame Too Long Count                              */
+#define MACControlFramesReceived        EMAC_RXC_MACCTL    /* MAC Control RX Frame Count                           */
+#define UnsupportedOpcodesReceived      EMAC_RXC_OPCODE    /* Unsupported Op-Code RX Frame Count                   */
+#define PAUSEMACCtrlFramesReceived      EMAC_RXC_PAUSE     /* MAC Control Pause RX Frame Count                     */
+#define FramesReceivedAll               EMAC_RXC_ALLFRM    /* Overall RX Frame Count                               */
+#define OctetsReceivedAll               EMAC_RXC_ALLOCT    /* Overall RX Octet Count                               */
+#define TypedFramesReceived             EMAC_RXC_TYPED     /* Type/Length Consistent RX Frame Count                */
+#define FramesLenLt64Received           EMAC_RXC_SHORT     /* RX Frame Fragment Count - Byte Count x < 64          */
+#define FramesLenEq64Received           EMAC_RXC_EQ64      /* Good RX Frame Count - Byte Count x = 64              */
+#define FramesLen65_127Received         EMAC_RXC_LT128     /* Good RX Frame Count - Byte Count  64 < x < 128       */
+#define FramesLen128_255Received        EMAC_RXC_LT256     /* Good RX Frame Count - Byte Count 128 <= x < 256      */
+#define FramesLen256_511Received        EMAC_RXC_LT512     /* Good RX Frame Count - Byte Count 256 <= x < 512      */
+#define FramesLen512_1023Received       EMAC_RXC_LT1024    /* Good RX Frame Count - Byte Count 512 <= x < 1024     */
+#define FramesLen1024_MaxReceived       EMAC_RXC_GE1024    /* Good RX Frame Count - Byte Count x >= 1024           */
+
+#define FramesTransmittedOK             EMAC_TXC_OK        /* TX Frame Successful Count                            */
+#define SingleCollisionFrames           EMAC_TXC_1COL      /* TX Frames Successful After Single Collision Count    */
+#define MultipleCollisionFrames         EMAC_TXC_GT1COL    /* TX Frames Successful After Multiple Collisions Count */
+#define OctetsTransmittedOK             EMAC_TXC_OCTET     /* TX Octets Successfully Received Count                */
+#define FramesWithDeferredXmissions     EMAC_TXC_DEFER     /* TX Frame Delayed Due To Busy Count                   */
+#define LateCollisions                  EMAC_TXC_LATECL    /* Late TX Collisions Count                             */
+#define FramesAbortedDueToXSColls       EMAC_TXC_XS_COL    /* TX Frame Failed Due To Excessive Collisions Count    */
+#define FramesLostDueToIntMacXmitError  EMAC_TXC_DMAUND    /* Internal MAC Sublayer Error TX Frame Count           */
+#define CarrierSenseErrors              EMAC_TXC_CRSERR    /* Carrier Sense Deasserted During TX Frame Count       */
+#define UnicastFramesXmittedOK          EMAC_TXC_UNICST    /* Unicast TX Frame Count                               */
+#define MulticastFramesXmittedOK        EMAC_TXC_MULTI     /* Multicast TX Frame Count                             */
+#define BroadcastFramesXmittedOK        EMAC_TXC_BROAD     /* Broadcast TX Frame Count                             */
+#define FramesWithExcessiveDeferral     EMAC_TXC_XS_DFR    /* TX Frames With Excessive Deferral Count              */
+#define MACControlFramesTransmitted     EMAC_TXC_MACCTL    /* MAC Control TX Frame Count                           */
+#define FramesTransmittedAll            EMAC_TXC_ALLFRM    /* Overall TX Frame Count                               */
+#define OctetsTransmittedAll            EMAC_TXC_ALLOCT    /* Overall TX Octet Count                               */
+#define FramesLenEq64Transmitted        EMAC_TXC_EQ64      /* Good TX Frame Count - Byte Count x = 64              */
+#define FramesLen65_127Transmitted      EMAC_TXC_LT128     /* Good TX Frame Count - Byte Count  64 < x < 128       */
+#define FramesLen128_255Transmitted     EMAC_TXC_LT256     /* Good TX Frame Count - Byte Count 128 <= x < 256      */
+#define FramesLen256_511Transmitted     EMAC_TXC_LT512     /* Good TX Frame Count - Byte Count 256 <= x < 512      */
+#define FramesLen512_1023Transmitted    EMAC_TXC_LT1024    /* Good TX Frame Count - Byte Count 512 <= x < 1024     */
+#define FramesLen1024_MaxTransmitted    EMAC_TXC_GE1024    /* Good TX Frame Count - Byte Count x >= 1024           */
+#define TxAbortedFrames                 EMAC_TXC_ABORT     /* Total TX Frames Aborted Count                        */
+
+/***********************************************************************************
+** System MMR Register Bits And Macros
+**
+** Disclaimer:	All macros are intended to make C and Assembly code more readable.
+**				Use these macros carefully, as any that do left shifts for field
+**				depositing will result in the lower order bits being destroyed.  Any
+**				macro that shifts left to properly position the bit-field should be
+**				used as part of an OR to initialize a register and NOT as a dynamic
+**				modifier UNLESS the lower order bits are saved and ORed back in when
+**				the macro is used.
+*************************************************************************************/
+
+/************************  ETHERNET 10/100 CONTROLLER MASKS  ************************/
+
+/* EMAC_OPMODE Masks */
+
+#define	RE                 0x00000001     /* Receiver Enable                                    */
+#define	ASTP               0x00000002     /* Enable Automatic Pad Stripping On RX Frames        */
+#define	HU                 0x00000010     /* Hash Filter Unicast Address                        */
+#define	HM                 0x00000020     /* Hash Filter Multicast Address                      */
+#define	PAM                0x00000040     /* Pass-All-Multicast Mode Enable                     */
+#define	PR                 0x00000080     /* Promiscuous Mode Enable                            */
+#define	IFE                0x00000100     /* Inverse Filtering Enable                           */
+#define	DBF                0x00000200     /* Disable Broadcast Frame Reception                  */
+#define	PBF                0x00000400     /* Pass Bad Frames Enable                             */
+#define	PSF                0x00000800     /* Pass Short Frames Enable                           */
+#define	RAF                0x00001000     /* Receive-All Mode                                   */
+#define	TE                 0x00010000     /* Transmitter Enable                                 */
+#define	DTXPAD             0x00020000     /* Disable Automatic TX Padding                       */
+#define	DTXCRC             0x00040000     /* Disable Automatic TX CRC Generation                */
+#define	DC                 0x00080000     /* Deferral Check                                     */
+#define	BOLMT              0x00300000     /* Back-Off Limit                                     */
+#define	BOLMT_10           0x00000000     /*		10-bit range                            */
+#define	BOLMT_8            0x00100000     /*		8-bit range                             */
+#define	BOLMT_4            0x00200000     /*		4-bit range                             */
+#define	BOLMT_1            0x00300000     /*		1-bit range                             */
+#define	DRTY               0x00400000     /* Disable TX Retry On Collision                      */
+#define	LCTRE              0x00800000     /* Enable TX Retry On Late Collision                  */
+#define	RMII               0x01000000     /* RMII/MII* Mode                                     */
+#define	RMII_10            0x02000000     /* Speed Select for RMII Port (10MBit/100MBit*)       */
+#define	FDMODE             0x04000000     /* Duplex Mode Enable (Full/Half*)                    */
+#define	LB                 0x08000000     /* Internal Loopback Enable                           */
+#define	DRO                0x10000000     /* Disable Receive Own Frames (Half-Duplex Mode)      */
+
+/* EMAC_STAADD Masks */
+
+#define	STABUSY            0x00000001     /* Initiate Station Mgt Reg Access / STA Busy Stat    */
+#define	STAOP              0x00000002     /* Station Management Operation Code (Write/Read*)    */
+#define	STADISPRE          0x00000004     /* Disable Preamble Generation                        */
+#define	STAIE              0x00000008     /* Station Mgt. Transfer Done Interrupt Enable        */
+#define	REGAD              0x000007C0     /* STA Register Address                               */
+#define	PHYAD              0x0000F800     /* PHY Device Address                                 */
+
+#define	SET_REGAD(x) (((x)&0x1F)<<  6 )   /* Set STA Register Address                           */
+#define	SET_PHYAD(x) (((x)&0x1F)<< 11 )   /* Set PHY Device Address                             */
+
+/* EMAC_STADAT Mask */
+
+#define	STADATA            0x0000FFFF     /* Station Management Data                            */
+
+/* EMAC_FLC Masks */
+
+#define	FLCBUSY            0x00000001     /* Send Flow Ctrl Frame / Flow Ctrl Busy Status       */
+#define	FLCE               0x00000002     /* Flow Control Enable                                */
+#define	PCF                0x00000004     /* Pass Control Frames                                */
+#define	BKPRSEN            0x00000008     /* Enable Backpressure                                */
+#define	FLCPAUSE           0xFFFF0000     /* Pause Time                                         */
+
+#define	SET_FLCPAUSE(x) (((x)&0xFFFF)<< 16) /* Set Pause Time                                   */
+
+/* EMAC_WKUP_CTL Masks */
+
+#define	CAPWKFRM           0x00000001    /* Capture Wake-Up Frames                              */
+#define	MPKE               0x00000002    /* Magic Packet Enable                                 */
+#define	RWKE               0x00000004    /* Remote Wake-Up Frame Enable                         */
+#define	GUWKE              0x00000008    /* Global Unicast Wake Enable                          */
+#define	MPKS               0x00000020    /* Magic Packet Received Status                        */
+#define	RWKS               0x00000F00    /* Wake-Up Frame Received Status, Filters 3:0          */
+
+/* EMAC_WKUP_FFCMD Masks */
+
+#define	WF0_E              0x00000001    /* Enable Wake-Up Filter 0                              */
+#define	WF0_T              0x00000008    /* Wake-Up Filter 0 Addr Type (Multicast/Unicast*)      */
+#define	WF1_E              0x00000100    /* Enable Wake-Up Filter 1                              */
+#define	WF1_T              0x00000800    /* Wake-Up Filter 1 Addr Type (Multicast/Unicast*)      */
+#define	WF2_E              0x00010000    /* Enable Wake-Up Filter 2                              */
+#define	WF2_T              0x00080000    /* Wake-Up Filter 2 Addr Type (Multicast/Unicast*)      */
+#define	WF3_E              0x01000000    /* Enable Wake-Up Filter 3                              */
+#define	WF3_T              0x08000000    /* Wake-Up Filter 3 Addr Type (Multicast/Unicast*)      */
+
+/* EMAC_WKUP_FFOFF Masks */
+
+#define	WF0_OFF            0x000000FF    /* Wake-Up Filter 0 Pattern Offset                      */
+#define	WF1_OFF            0x0000FF00    /* Wake-Up Filter 1 Pattern Offset                      */
+#define	WF2_OFF            0x00FF0000    /* Wake-Up Filter 2 Pattern Offset                      */
+#define	WF3_OFF            0xFF000000    /* Wake-Up Filter 3 Pattern Offset                      */
+
+#define	SET_WF0_OFF(x) (((x)&0xFF)<<  0 ) /* Set Wake-Up Filter 0 Byte Offset                    */
+#define	SET_WF1_OFF(x) (((x)&0xFF)<<  8 ) /* Set Wake-Up Filter 1 Byte Offset                    */
+#define	SET_WF2_OFF(x) (((x)&0xFF)<< 16 ) /* Set Wake-Up Filter 2 Byte Offset                    */
+#define	SET_WF3_OFF(x) (((x)&0xFF)<< 24 ) /* Set Wake-Up Filter 3 Byte Offset                    */
+/* Set ALL Offsets */
+#define	SET_WF_OFFS(x0,x1,x2,x3) (SET_WF0_OFF((x0))|SET_WF1_OFF((x1))|SET_WF2_OFF((x2))|SET_WF3_OFF((x3)))
+
+/* EMAC_WKUP_FFCRC0 Masks */
+
+#define	WF0_CRC           0x0000FFFF    /* Wake-Up Filter 0 Pattern CRC                           */
+#define	WF1_CRC           0xFFFF0000    /* Wake-Up Filter 1 Pattern CRC                           */
+
+#define	SET_WF0_CRC(x) (((x)&0xFFFF)<<   0 ) /* Set Wake-Up Filter 0 Target CRC                   */
+#define	SET_WF1_CRC(x) (((x)&0xFFFF)<<  16 ) /* Set Wake-Up Filter 1 Target CRC                   */
+
+/* EMAC_WKUP_FFCRC1 Masks */
+
+#define	WF2_CRC           0x0000FFFF    /* Wake-Up Filter 2 Pattern CRC                           */
+#define	WF3_CRC           0xFFFF0000    /* Wake-Up Filter 3 Pattern CRC                           */
+
+#define	SET_WF2_CRC(x) (((x)&0xFFFF)<<   0 ) /* Set Wake-Up Filter 2 Target CRC                   */
+#define	SET_WF3_CRC(x) (((x)&0xFFFF)<<  16 ) /* Set Wake-Up Filter 3 Target CRC                   */
+
+/* EMAC_SYSCTL Masks */
+
+#define	PHYIE             0x00000001    /* PHY_INT Interrupt Enable                               */
+#define	RXDWA             0x00000002    /* Receive Frame DMA Word Alignment (Odd/Even*)           */
+#define	RXCKS             0x00000004    /* Enable RX Frame TCP/UDP Checksum Computation           */
+#define	TXDWA             0x00000010    /* Transmit Frame DMA Word Alignment (Odd/Even*)          */
+#define	MDCDIV            0x00003F00    /* SCLK:MDC Clock Divisor [MDC=SCLK/(2*(N+1))]            */
+
+#define	SET_MDCDIV(x) (((x)&0x3F)<< 8)   /* Set MDC Clock Divisor                                 */
+
+/* EMAC_SYSTAT Masks */
+
+#define	PHYINT            0x00000001    /* PHY_INT Interrupt Status                               */
+#define	MMCINT            0x00000002    /* MMC Counter Interrupt Status                           */
+#define	RXFSINT           0x00000004    /* RX Frame-Status Interrupt Status                       */
+#define	TXFSINT           0x00000008    /* TX Frame-Status Interrupt Status                       */
+#define	WAKEDET           0x00000010    /* Wake-Up Detected Status                                */
+#define	RXDMAERR          0x00000020    /* RX DMA Direction Error Status                          */
+#define	TXDMAERR          0x00000040    /* TX DMA Direction Error Status                          */
+#define	STMDONE           0x00000080    /* Station Mgt. Transfer Done Interrupt Status            */
+
+/* EMAC_RX_STAT, EMAC_RX_STKY, and EMAC_RX_IRQE Masks */
+
+#define	RX_FRLEN          0x000007FF    /* Frame Length In Bytes                                  */
+#define	RX_COMP           0x00001000    /* RX Frame Complete                                      */
+#define	RX_OK             0x00002000    /* RX Frame Received With No Errors                       */
+#define	RX_LONG           0x00004000    /* RX Frame Too Long Error                                */
+#define	RX_ALIGN          0x00008000    /* RX Frame Alignment Error                               */
+#define	RX_CRC            0x00010000    /* RX Frame CRC Error                                     */
+#define	RX_LEN            0x00020000    /* RX Frame Length Error                                  */
+#define	RX_FRAG           0x00040000    /* RX Frame Fragment Error                                */
+#define	RX_ADDR           0x00080000    /* RX Frame Address Filter Failed Error                   */
+#define	RX_DMAO           0x00100000    /* RX Frame DMA Overrun Error                             */
+#define	RX_PHY            0x00200000    /* RX Frame PHY Error                                     */
+#define	RX_LATE           0x00400000    /* RX Frame Late Collision Error                          */
+#define	RX_RANGE          0x00800000    /* RX Frame Length Field Out of Range Error               */
+#define	RX_MULTI          0x01000000    /* RX Multicast Frame Indicator                           */
+#define	RX_BROAD          0x02000000    /* RX Broadcast Frame Indicator                           */
+#define	RX_CTL            0x04000000    /* RX Control Frame Indicator                             */
+#define	RX_UCTL           0x08000000    /* Unsupported RX Control Frame Indicator                 */
+#define	RX_TYPE           0x10000000    /* RX Typed Frame Indicator                               */
+#define	RX_VLAN1          0x20000000    /* RX VLAN1 Frame Indicator                               */
+#define	RX_VLAN2          0x40000000    /* RX VLAN2 Frame Indicator                               */
+#define	RX_ACCEPT         0x80000000    /* RX Frame Accepted Indicator                            */
+
+/*  EMAC_TX_STAT, EMAC_TX_STKY, and EMAC_TX_IRQE Masks  */
+
+#define	TX_COMP           0x00000001    /* TX Frame Complete                                      */
+#define	TX_OK             0x00000002    /* TX Frame Sent With No Errors                           */
+#define	TX_ECOLL          0x00000004    /* TX Frame Excessive Collision Error                     */
+#define	TX_LATE           0x00000008    /* TX Frame Late Collision Error                          */
+#define	TX_DMAU           0x00000010    /* TX Frame DMA Underrun Error (STAT)                     */
+#define	TX_MACE           0x00000010    /* Internal MAC Error Detected (STKY and IRQE)            */
+#define	TX_EDEFER         0x00000020    /* TX Frame Excessive Deferral Error                      */
+#define	TX_BROAD          0x00000040    /* TX Broadcast Frame Indicator                           */
+#define	TX_MULTI          0x00000080    /* TX Multicast Frame Indicator                           */
+#define	TX_CCNT           0x00000F00    /* TX Frame Collision Count                               */
+#define	TX_DEFER          0x00001000    /* TX Frame Deferred Indicator                            */
+#define	TX_CRS            0x00002000    /* TX Frame Carrier Sense Not Asserted Error              */
+#define	TX_LOSS           0x00004000    /* TX Frame Carrier Lost During TX Error                  */
+#define	TX_RETRY          0x00008000    /* TX Frame Successful After Retry                        */
+#define	TX_FRLEN          0x07FF0000    /* TX Frame Length (Bytes)                                */
+
+/* EMAC_MMC_CTL Masks */
+#define	RSTC              0x00000001    /* Reset All Counters                                     */
+#define	CROLL             0x00000002    /* Counter Roll-Over Enable                               */
+#define	CCOR              0x00000004    /* Counter Clear-On-Read Mode Enable                      */
+#define	MMCE              0x00000008    /* Enable MMC Counter Operation                           */
+
+/* EMAC_MMC_RIRQS and EMAC_MMC_RIRQE Masks */
+#define	RX_OK_CNT         0x00000001    /* RX Frames Received With No Errors                      */
+#define	RX_FCS_CNT        0x00000002    /* RX Frames W/Frame Check Sequence Errors                */
+#define	RX_ALIGN_CNT      0x00000004    /* RX Frames With Alignment Errors                        */
+#define	RX_OCTET_CNT      0x00000008    /* RX Octets Received OK                                  */
+#define	RX_LOST_CNT       0x00000010    /* RX Frames Lost Due To Internal MAC RX Error            */
+#define	RX_UNI_CNT        0x00000020    /* Unicast RX Frames Received OK                          */
+#define	RX_MULTI_CNT      0x00000040    /* Multicast RX Frames Received OK                        */
+#define	RX_BROAD_CNT      0x00000080    /* Broadcast RX Frames Received OK                        */
+#define	RX_IRL_CNT        0x00000100    /* RX Frames With In-Range Length Errors                  */
+#define	RX_ORL_CNT        0x00000200    /* RX Frames With Out-Of-Range Length Errors              */
+#define	RX_LONG_CNT       0x00000400    /* RX Frames With Frame Too Long Errors                   */
+#define	RX_MACCTL_CNT     0x00000800    /* MAC Control RX Frames Received                         */
+#define	RX_OPCODE_CTL     0x00001000    /* Unsupported Op-Code RX Frames Received                 */
+#define	RX_PAUSE_CNT      0x00002000    /* PAUSEMAC Control RX Frames Received                    */
+#define	RX_ALLF_CNT       0x00004000    /* All RX Frames Received                                 */
+#define	RX_ALLO_CNT       0x00008000    /* All RX Octets Received                                 */
+#define	RX_TYPED_CNT      0x00010000    /* Typed RX Frames Received                               */
+#define	RX_SHORT_CNT      0x00020000    /* RX Frame Fragments (< 64 Bytes) Received               */
+#define	RX_EQ64_CNT       0x00040000    /* 64-Byte RX Frames Received                             */
+#define	RX_LT128_CNT      0x00080000    /* 65-127-Byte RX Frames Received                         */
+#define	RX_LT256_CNT      0x00100000    /* 128-255-Byte RX Frames Received                        */
+#define	RX_LT512_CNT      0x00200000    /* 256-511-Byte RX Frames Received                        */
+#define	RX_LT1024_CNT     0x00400000    /* 512-1023-Byte RX Frames Received                       */
+#define	RX_GE1024_CNT     0x00800000    /* 1024-Max-Byte RX Frames Received                       */
+
+/* EMAC_MMC_TIRQS and EMAC_MMC_TIRQE Masks  */
+
+#define	TX_OK_CNT         0x00000001    /* TX Frames Sent OK                                      */
+#define	TX_SCOLL_CNT      0x00000002    /* TX Frames With Single Collisions                       */
+#define	TX_MCOLL_CNT      0x00000004    /* TX Frames With Multiple Collisions                     */
+#define	TX_OCTET_CNT      0x00000008    /* TX Octets Sent OK                                      */
+#define	TX_DEFER_CNT      0x00000010    /* TX Frames With Deferred Transmission                   */
+#define	TX_LATE_CNT       0x00000020    /* TX Frames With Late Collisions                         */
+#define	TX_ABORTC_CNT     0x00000040    /* TX Frames Aborted Due To Excess Collisions             */
+#define	TX_LOST_CNT       0x00000080    /* TX Frames Lost Due To Internal MAC TX Error            */
+#define	TX_CRS_CNT        0x00000100    /* TX Frames With Carrier Sense Errors                    */
+#define	TX_UNI_CNT        0x00000200    /* Unicast TX Frames Sent                                 */
+#define	TX_MULTI_CNT      0x00000400    /* Multicast TX Frames Sent                               */
+#define	TX_BROAD_CNT      0x00000800    /* Broadcast TX Frames Sent                               */
+#define	TX_EXDEF_CTL      0x00001000    /* TX Frames With Excessive Deferral                      */
+#define	TX_MACCTL_CNT     0x00002000    /* MAC Control TX Frames Sent                             */
+#define	TX_ALLF_CNT       0x00004000    /* All TX Frames Sent                                     */
+#define	TX_ALLO_CNT       0x00008000    /* All TX Octets Sent                                     */
+#define	TX_EQ64_CNT       0x00010000    /* 64-Byte TX Frames Sent                                 */
+#define	TX_LT128_CNT      0x00020000    /* 65-127-Byte TX Frames Sent                             */
+#define	TX_LT256_CNT      0x00040000    /* 128-255-Byte TX Frames Sent                            */
+#define	TX_LT512_CNT      0x00080000    /* 256-511-Byte TX Frames Sent                            */
+#define	TX_LT1024_CNT     0x00100000    /* 512-1023-Byte TX Frames Sent                           */
+#define	TX_GE1024_CNT     0x00200000    /* 1024-Max-Byte TX Frames Sent                           */
+#define	TX_ABORT_CNT      0x00400000    /* TX Frames Aborted                                      */
+
+/* SDH Registers */
+
+#define SDH_PWR_CTL                    0xFFC03900 /* SDH Power Control */
+#define SDH_CLK_CTL                    0xFFC03904 /* SDH Clock Control */
+#define SDH_ARGUMENT                   0xFFC03908 /* SDH Argument */
+#define SDH_COMMAND                    0xFFC0390C /* SDH Command */
+#define SDH_RESP_CMD                   0xFFC03910 /* SDH Response Command */
+#define SDH_RESPONSE0                  0xFFC03914 /* SDH Response0 */
+#define SDH_RESPONSE1                  0xFFC03918 /* SDH Response1 */
+#define SDH_RESPONSE2                  0xFFC0391C /* SDH Response2 */
+#define SDH_RESPONSE3                  0xFFC03920 /* SDH Response3 */
+#define SDH_DATA_TIMER                 0xFFC03924 /* SDH Data Timer */
+#define SDH_DATA_LGTH                  0xFFC03928 /* SDH Data Length */
+#define SDH_DATA_CTL                   0xFFC0392C /* SDH Data Control */
+#define SDH_DATA_CNT                   0xFFC03930 /* SDH Data Counter */
+#define SDH_STATUS                     0xFFC03934 /* SDH Status */
+#define SDH_STATUS_CLR                 0xFFC03938 /* SDH Status Clear */
+#define SDH_MASK0                      0xFFC0393C /* SDH Interrupt0 Mask */
+#define SDH_MASK1                      0xFFC03940 /* SDH Interrupt1 Mask */
+#define SDH_FIFO_CNT                   0xFFC03948 /* SDH FIFO Counter */
+#define SDH_FIFO                       0xFFC03980 /* SDH Data FIFO */
+#define SDH_E_STATUS                   0xFFC039C0 /* SDH Exception Status */
+#define SDH_E_MASK                     0xFFC039C4 /* SDH Exception Mask */
+#define SDH_CFG                        0xFFC039C8 /* SDH Configuration */
+#define SDH_RD_WAIT_EN                 0xFFC039CC /* SDH Read Wait Enable */
+#define SDH_PID0                       0xFFC039D0 /* SDH Peripheral Identification0 */
+#define SDH_PID1                       0xFFC039D4 /* SDH Peripheral Identification1 */
+#define SDH_PID2                       0xFFC039D8 /* SDH Peripheral Identification2 */
+#define SDH_PID3                       0xFFC039DC /* SDH Peripheral Identification3 */
+#define SDH_PID4                       0xFFC039E0 /* SDH Peripheral Identification4 */
+#define SDH_PID5                       0xFFC039E4 /* SDH Peripheral Identification5 */
+#define SDH_PID6                       0xFFC039E8 /* SDH Peripheral Identification6 */
+#define SDH_PID7                       0xFFC039EC /* SDH Peripheral Identification7 */
+
+/* Removable Storage Interface Registers */
+
+#define RSI_PWR_CONTROL                0xFFC03800 /* RSI Power Control Register */
+#define RSI_CLK_CONTROL                0xFFC03804 /* RSI Clock Control Register */
+#define RSI_ARGUMENT                   0xFFC03808 /* RSI Argument Register */
+#define RSI_COMMAND                    0xFFC0380C /* RSI Command Register */
+#define RSI_RESP_CMD                   0xFFC03810 /* RSI Response Command Register */
+#define RSI_RESPONSE0                  0xFFC03814 /* RSI Response Register */
+#define RSI_RESPONSE1                  0xFFC03818 /* RSI Response Register */
+#define RSI_RESPONSE2                  0xFFC0381C /* RSI Response Register */
+#define RSI_RESPONSE3                  0xFFC03820 /* RSI Response Register */
+#define RSI_DATA_TIMER                 0xFFC03824 /* RSI Data Timer Register */
+#define RSI_DATA_LGTH                  0xFFC03828 /* RSI Data Length Register */
+#define RSI_DATA_CONTROL               0xFFC0382C /* RSI Data Control Register */
+#define RSI_DATA_CNT                   0xFFC03830 /* RSI Data Counter Register */
+#define RSI_STATUS                     0xFFC03834 /* RSI Status Register */
+#define RSI_STATUSCL                   0xFFC03838 /* RSI Status Clear Register */
+#define RSI_MASK0                      0xFFC0383C /* RSI Interrupt 0 Mask Register */
+#define RSI_MASK1                      0xFFC03840 /* RSI Interrupt 1 Mask Register */
+#define RSI_FIFO_CNT                   0xFFC03848 /* RSI FIFO Counter Register */
+#define RSI_CEATA_CONTROL              0xFFC0384C /* RSI CEATA Register */
+#define RSI_FIFO                       0xFFC03880 /* RSI Data FIFO Register */
+#define RSI_ESTAT                      0xFFC038C0 /* RSI Exception Status Register */
+#define RSI_EMASK                      0xFFC038C4 /* RSI Exception Mask Register */
+#define RSI_CONFIG                     0xFFC038C8 /* RSI Configuration Register */
+#define RSI_RD_WAIT_EN                 0xFFC038CC /* RSI Read Wait Enable Register */
+#define RSI_PID0                       0xFFC03FE0 /* RSI Peripheral ID Register 0 */
+#define RSI_PID1                       0xFFC03FE4 /* RSI Peripheral ID Register 1 */
+#define RSI_PID2                       0xFFC03FE8 /* RSI Peripheral ID Register 2 */
+#define RSI_PID3                       0xFFC03FEC /* RSI Peripheral ID Register 3 */
+#define RSI_PID4                       0xFFC03FF0 /* RSI Peripheral ID Register 4 */
+#define RSI_PID5                       0xFFC03FF4 /* RSI Peripheral ID Register 5 */
+#define RSI_PID6                       0xFFC03FF8 /* RSI Peripheral ID Register 6 */
+#define RSI_PID7                       0xFFC03FFC /* RSI Peripheral ID Register 7 */
+
+#endif /* _DEF_BF516_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF518.h b/arch/blackfin/mach-bf518/include/mach/defBF518.h
new file mode 100644
index 0000000..6e982ab
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/defBF518.h
@@ -0,0 +1,651 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/defBF518.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _DEF_BF518_H
+#define _DEF_BF518_H
+
+/* Include all Core registers and bit definitions */
+#include <asm/def_LPBlackfin.h>
+
+/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF518 */
+
+/* Include defBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
+#include "defBF51x_base.h"
+
+/* The following are the #defines needed by ADSP-BF518 that are not in the common header */
+/* 10/100 Ethernet Controller	(0xFFC03000 - 0xFFC031FF) */
+
+#define EMAC_OPMODE             0xFFC03000       /* Operating Mode Register                              */
+#define EMAC_ADDRLO             0xFFC03004       /* Address Low (32 LSBs) Register                       */
+#define EMAC_ADDRHI             0xFFC03008       /* Address High (16 MSBs) Register                      */
+#define EMAC_HASHLO             0xFFC0300C       /* Multicast Hash Table Low (Bins 31-0) Register        */
+#define EMAC_HASHHI             0xFFC03010       /* Multicast Hash Table High (Bins 63-32) Register      */
+#define EMAC_STAADD             0xFFC03014       /* Station Management Address Register                  */
+#define EMAC_STADAT             0xFFC03018       /* Station Management Data Register                     */
+#define EMAC_FLC                0xFFC0301C       /* Flow Control Register                                */
+#define EMAC_VLAN1              0xFFC03020       /* VLAN1 Tag Register                                   */
+#define EMAC_VLAN2              0xFFC03024       /* VLAN2 Tag Register                                   */
+#define EMAC_WKUP_CTL           0xFFC0302C       /* Wake-Up Control/Status Register                      */
+#define EMAC_WKUP_FFMSK0        0xFFC03030       /* Wake-Up Frame Filter 0 Byte Mask Register            */
+#define EMAC_WKUP_FFMSK1        0xFFC03034       /* Wake-Up Frame Filter 1 Byte Mask Register            */
+#define EMAC_WKUP_FFMSK2        0xFFC03038       /* Wake-Up Frame Filter 2 Byte Mask Register            */
+#define EMAC_WKUP_FFMSK3        0xFFC0303C       /* Wake-Up Frame Filter 3 Byte Mask Register            */
+#define EMAC_WKUP_FFCMD         0xFFC03040       /* Wake-Up Frame Filter Commands Register               */
+#define EMAC_WKUP_FFOFF         0xFFC03044       /* Wake-Up Frame Filter Offsets Register                */
+#define EMAC_WKUP_FFCRC0        0xFFC03048       /* Wake-Up Frame Filter 0,1 CRC-16 Register             */
+#define EMAC_WKUP_FFCRC1        0xFFC0304C       /* Wake-Up Frame Filter 2,3 CRC-16 Register             */
+
+#define EMAC_SYSCTL             0xFFC03060       /* EMAC System Control Register                         */
+#define EMAC_SYSTAT             0xFFC03064       /* EMAC System Status Register                          */
+#define EMAC_RX_STAT            0xFFC03068       /* RX Current Frame Status Register                     */
+#define EMAC_RX_STKY            0xFFC0306C       /* RX Sticky Frame Status Register                      */
+#define EMAC_RX_IRQE            0xFFC03070       /* RX Frame Status Interrupt Enables Register           */
+#define EMAC_TX_STAT            0xFFC03074       /* TX Current Frame Status Register                     */
+#define EMAC_TX_STKY            0xFFC03078       /* TX Sticky Frame Status Register                      */
+#define EMAC_TX_IRQE            0xFFC0307C       /* TX Frame Status Interrupt Enables Register           */
+
+#define EMAC_MMC_CTL            0xFFC03080       /* MMC Counter Control Register                         */
+#define EMAC_MMC_RIRQS          0xFFC03084       /* MMC RX Interrupt Status Register                     */
+#define EMAC_MMC_RIRQE          0xFFC03088       /* MMC RX Interrupt Enables Register                    */
+#define EMAC_MMC_TIRQS          0xFFC0308C       /* MMC TX Interrupt Status Register                     */
+#define EMAC_MMC_TIRQE          0xFFC03090       /* MMC TX Interrupt Enables Register                    */
+
+#define EMAC_RXC_OK             0xFFC03100       /* RX Frame Successful Count                            */
+#define EMAC_RXC_FCS            0xFFC03104       /* RX Frame FCS Failure Count                           */
+#define EMAC_RXC_ALIGN          0xFFC03108       /* RX Alignment Error Count                             */
+#define EMAC_RXC_OCTET          0xFFC0310C       /* RX Octets Successfully Received Count                */
+#define EMAC_RXC_DMAOVF         0xFFC03110       /* Internal MAC Sublayer Error RX Frame Count           */
+#define EMAC_RXC_UNICST         0xFFC03114       /* Unicast RX Frame Count                               */
+#define EMAC_RXC_MULTI          0xFFC03118       /* Multicast RX Frame Count                             */
+#define EMAC_RXC_BROAD          0xFFC0311C       /* Broadcast RX Frame Count                             */
+#define EMAC_RXC_LNERRI         0xFFC03120       /* RX Frame In Range Error Count                        */
+#define EMAC_RXC_LNERRO         0xFFC03124       /* RX Frame Out Of Range Error Count                    */
+#define EMAC_RXC_LONG           0xFFC03128       /* RX Frame Too Long Count                              */
+#define EMAC_RXC_MACCTL         0xFFC0312C       /* MAC Control RX Frame Count                           */
+#define EMAC_RXC_OPCODE         0xFFC03130       /* Unsupported Op-Code RX Frame Count                   */
+#define EMAC_RXC_PAUSE          0xFFC03134       /* MAC Control Pause RX Frame Count                     */
+#define EMAC_RXC_ALLFRM         0xFFC03138       /* Overall RX Frame Count                               */
+#define EMAC_RXC_ALLOCT         0xFFC0313C       /* Overall RX Octet Count                               */
+#define EMAC_RXC_TYPED          0xFFC03140       /* Type/Length Consistent RX Frame Count                */
+#define EMAC_RXC_SHORT          0xFFC03144       /* RX Frame Fragment Count - Byte Count x < 64          */
+#define EMAC_RXC_EQ64           0xFFC03148       /* Good RX Frame Count - Byte Count x = 64              */
+#define EMAC_RXC_LT128          0xFFC0314C       /* Good RX Frame Count - Byte Count  64 < x < 128       */
+#define EMAC_RXC_LT256          0xFFC03150       /* Good RX Frame Count - Byte Count 128 <= x < 256      */
+#define EMAC_RXC_LT512          0xFFC03154       /* Good RX Frame Count - Byte Count 256 <= x < 512      */
+#define EMAC_RXC_LT1024         0xFFC03158       /* Good RX Frame Count - Byte Count 512 <= x < 1024     */
+#define EMAC_RXC_GE1024         0xFFC0315C       /* Good RX Frame Count - Byte Count x >= 1024           */
+
+#define EMAC_TXC_OK             0xFFC03180       /* TX Frame Successful Count                             */
+#define EMAC_TXC_1COL           0xFFC03184       /* TX Frames Successful After Single Collision Count     */
+#define EMAC_TXC_GT1COL         0xFFC03188       /* TX Frames Successful After Multiple Collisions Count  */
+#define EMAC_TXC_OCTET          0xFFC0318C       /* TX Octets Successfully Received Count                 */
+#define EMAC_TXC_DEFER          0xFFC03190       /* TX Frame Delayed Due To Busy Count                    */
+#define EMAC_TXC_LATECL         0xFFC03194       /* Late TX Collisions Count                              */
+#define EMAC_TXC_XS_COL         0xFFC03198       /* TX Frame Failed Due To Excessive Collisions Count     */
+#define EMAC_TXC_DMAUND         0xFFC0319C       /* Internal MAC Sublayer Error TX Frame Count            */
+#define EMAC_TXC_CRSERR         0xFFC031A0       /* Carrier Sense Deasserted During TX Frame Count        */
+#define EMAC_TXC_UNICST         0xFFC031A4       /* Unicast TX Frame Count                                */
+#define EMAC_TXC_MULTI          0xFFC031A8       /* Multicast TX Frame Count                              */
+#define EMAC_TXC_BROAD          0xFFC031AC       /* Broadcast TX Frame Count                              */
+#define EMAC_TXC_XS_DFR         0xFFC031B0       /* TX Frames With Excessive Deferral Count               */
+#define EMAC_TXC_MACCTL         0xFFC031B4       /* MAC Control TX Frame Count                            */
+#define EMAC_TXC_ALLFRM         0xFFC031B8       /* Overall TX Frame Count                                */
+#define EMAC_TXC_ALLOCT         0xFFC031BC       /* Overall TX Octet Count                                */
+#define EMAC_TXC_EQ64           0xFFC031C0       /* Good TX Frame Count - Byte Count x = 64               */
+#define EMAC_TXC_LT128          0xFFC031C4       /* Good TX Frame Count - Byte Count  64 < x < 128        */
+#define EMAC_TXC_LT256          0xFFC031C8       /* Good TX Frame Count - Byte Count 128 <= x < 256       */
+#define EMAC_TXC_LT512          0xFFC031CC       /* Good TX Frame Count - Byte Count 256 <= x < 512       */
+#define EMAC_TXC_LT1024         0xFFC031D0       /* Good TX Frame Count - Byte Count 512 <= x < 1024      */
+#define EMAC_TXC_GE1024         0xFFC031D4       /* Good TX Frame Count - Byte Count x >= 1024            */
+#define EMAC_TXC_ABORT          0xFFC031D8       /* Total TX Frames Aborted Count                         */
+
+/* Listing for IEEE-Supported Count Registers */
+
+#define FramesReceivedOK                EMAC_RXC_OK        /* RX Frame Successful Count                            */
+#define FrameCheckSequenceErrors        EMAC_RXC_FCS       /* RX Frame FCS Failure Count                           */
+#define AlignmentErrors                 EMAC_RXC_ALIGN     /* RX Alignment Error Count                             */
+#define OctetsReceivedOK                EMAC_RXC_OCTET     /* RX Octets Successfully Received Count                */
+#define FramesLostDueToIntMACRcvError   EMAC_RXC_DMAOVF    /* Internal MAC Sublayer Error RX Frame Count           */
+#define UnicastFramesReceivedOK         EMAC_RXC_UNICST    /* Unicast RX Frame Count                               */
+#define MulticastFramesReceivedOK       EMAC_RXC_MULTI     /* Multicast RX Frame Count                             */
+#define BroadcastFramesReceivedOK       EMAC_RXC_BROAD     /* Broadcast RX Frame Count                             */
+#define InRangeLengthErrors             EMAC_RXC_LNERRI    /* RX Frame In Range Error Count                        */
+#define OutOfRangeLengthField           EMAC_RXC_LNERRO    /* RX Frame Out Of Range Error Count                    */
+#define FrameTooLongErrors              EMAC_RXC_LONG      /* RX Frame Too Long Count                              */
+#define MACControlFramesReceived        EMAC_RXC_MACCTL    /* MAC Control RX Frame Count                           */
+#define UnsupportedOpcodesReceived      EMAC_RXC_OPCODE    /* Unsupported Op-Code RX Frame Count                   */
+#define PAUSEMACCtrlFramesReceived      EMAC_RXC_PAUSE     /* MAC Control Pause RX Frame Count                     */
+#define FramesReceivedAll               EMAC_RXC_ALLFRM    /* Overall RX Frame Count                               */
+#define OctetsReceivedAll               EMAC_RXC_ALLOCT    /* Overall RX Octet Count                               */
+#define TypedFramesReceived             EMAC_RXC_TYPED     /* Type/Length Consistent RX Frame Count                */
+#define FramesLenLt64Received           EMAC_RXC_SHORT     /* RX Frame Fragment Count - Byte Count x < 64          */
+#define FramesLenEq64Received           EMAC_RXC_EQ64      /* Good RX Frame Count - Byte Count x = 64              */
+#define FramesLen65_127Received         EMAC_RXC_LT128     /* Good RX Frame Count - Byte Count  64 < x < 128       */
+#define FramesLen128_255Received        EMAC_RXC_LT256     /* Good RX Frame Count - Byte Count 128 <= x < 256      */
+#define FramesLen256_511Received        EMAC_RXC_LT512     /* Good RX Frame Count - Byte Count 256 <= x < 512      */
+#define FramesLen512_1023Received       EMAC_RXC_LT1024    /* Good RX Frame Count - Byte Count 512 <= x < 1024     */
+#define FramesLen1024_MaxReceived       EMAC_RXC_GE1024    /* Good RX Frame Count - Byte Count x >= 1024           */
+
+#define FramesTransmittedOK             EMAC_TXC_OK        /* TX Frame Successful Count                            */
+#define SingleCollisionFrames           EMAC_TXC_1COL      /* TX Frames Successful After Single Collision Count    */
+#define MultipleCollisionFrames         EMAC_TXC_GT1COL    /* TX Frames Successful After Multiple Collisions Count */
+#define OctetsTransmittedOK             EMAC_TXC_OCTET     /* TX Octets Successfully Received Count                */
+#define FramesWithDeferredXmissions     EMAC_TXC_DEFER     /* TX Frame Delayed Due To Busy Count                   */
+#define LateCollisions                  EMAC_TXC_LATECL    /* Late TX Collisions Count                             */
+#define FramesAbortedDueToXSColls       EMAC_TXC_XS_COL    /* TX Frame Failed Due To Excessive Collisions Count    */
+#define FramesLostDueToIntMacXmitError  EMAC_TXC_DMAUND    /* Internal MAC Sublayer Error TX Frame Count           */
+#define CarrierSenseErrors              EMAC_TXC_CRSERR    /* Carrier Sense Deasserted During TX Frame Count       */
+#define UnicastFramesXmittedOK          EMAC_TXC_UNICST    /* Unicast TX Frame Count                               */
+#define MulticastFramesXmittedOK        EMAC_TXC_MULTI     /* Multicast TX Frame Count                             */
+#define BroadcastFramesXmittedOK        EMAC_TXC_BROAD     /* Broadcast TX Frame Count                             */
+#define FramesWithExcessiveDeferral     EMAC_TXC_XS_DFR    /* TX Frames With Excessive Deferral Count              */
+#define MACControlFramesTransmitted     EMAC_TXC_MACCTL    /* MAC Control TX Frame Count                           */
+#define FramesTransmittedAll            EMAC_TXC_ALLFRM    /* Overall TX Frame Count                               */
+#define OctetsTransmittedAll            EMAC_TXC_ALLOCT    /* Overall TX Octet Count                               */
+#define FramesLenEq64Transmitted        EMAC_TXC_EQ64      /* Good TX Frame Count - Byte Count x = 64              */
+#define FramesLen65_127Transmitted      EMAC_TXC_LT128     /* Good TX Frame Count - Byte Count  64 < x < 128       */
+#define FramesLen128_255Transmitted     EMAC_TXC_LT256     /* Good TX Frame Count - Byte Count 128 <= x < 256      */
+#define FramesLen256_511Transmitted     EMAC_TXC_LT512     /* Good TX Frame Count - Byte Count 256 <= x < 512      */
+#define FramesLen512_1023Transmitted    EMAC_TXC_LT1024    /* Good TX Frame Count - Byte Count 512 <= x < 1024     */
+#define FramesLen1024_MaxTransmitted    EMAC_TXC_GE1024    /* Good TX Frame Count - Byte Count x >= 1024           */
+#define TxAbortedFrames                 EMAC_TXC_ABORT     /* Total TX Frames Aborted Count                        */
+
+/***********************************************************************************
+** System MMR Register Bits And Macros
+**
+** Disclaimer:	All macros are intended to make C and Assembly code more readable.
+**				Use these macros carefully, as any that do left shifts for field
+**				depositing will result in the lower order bits being destroyed.  Any
+**				macro that shifts left to properly position the bit-field should be
+**				used as part of an OR to initialize a register and NOT as a dynamic
+**				modifier UNLESS the lower order bits are saved and ORed back in when
+**				the macro is used.
+*************************************************************************************/
+
+/************************  ETHERNET 10/100 CONTROLLER MASKS  ************************/
+
+/* EMAC_OPMODE Masks */
+
+#define	RE                 0x00000001     /* Receiver Enable                                    */
+#define	ASTP               0x00000002     /* Enable Automatic Pad Stripping On RX Frames        */
+#define	HU                 0x00000010     /* Hash Filter Unicast Address                        */
+#define	HM                 0x00000020     /* Hash Filter Multicast Address                      */
+#define	PAM                0x00000040     /* Pass-All-Multicast Mode Enable                     */
+#define	PR                 0x00000080     /* Promiscuous Mode Enable                            */
+#define	IFE                0x00000100     /* Inverse Filtering Enable                           */
+#define	DBF                0x00000200     /* Disable Broadcast Frame Reception                  */
+#define	PBF                0x00000400     /* Pass Bad Frames Enable                             */
+#define	PSF                0x00000800     /* Pass Short Frames Enable                           */
+#define	RAF                0x00001000     /* Receive-All Mode                                   */
+#define	TE                 0x00010000     /* Transmitter Enable                                 */
+#define	DTXPAD             0x00020000     /* Disable Automatic TX Padding                       */
+#define	DTXCRC             0x00040000     /* Disable Automatic TX CRC Generation                */
+#define	DC                 0x00080000     /* Deferral Check                                     */
+#define	BOLMT              0x00300000     /* Back-Off Limit                                     */
+#define	BOLMT_10           0x00000000     /*		10-bit range                            */
+#define	BOLMT_8            0x00100000     /*		8-bit range                             */
+#define	BOLMT_4            0x00200000     /*		4-bit range                             */
+#define	BOLMT_1            0x00300000     /*		1-bit range                             */
+#define	DRTY               0x00400000     /* Disable TX Retry On Collision                      */
+#define	LCTRE              0x00800000     /* Enable TX Retry On Late Collision                  */
+#define	RMII               0x01000000     /* RMII/MII* Mode                                     */
+#define	RMII_10            0x02000000     /* Speed Select for RMII Port (10MBit/100MBit*)       */
+#define	FDMODE             0x04000000     /* Duplex Mode Enable (Full/Half*)                    */
+#define	LB                 0x08000000     /* Internal Loopback Enable                           */
+#define	DRO                0x10000000     /* Disable Receive Own Frames (Half-Duplex Mode)      */
+
+/* EMAC_STAADD Masks */
+
+#define	STABUSY            0x00000001     /* Initiate Station Mgt Reg Access / STA Busy Stat    */
+#define	STAOP              0x00000002     /* Station Management Operation Code (Write/Read*)    */
+#define	STADISPRE          0x00000004     /* Disable Preamble Generation                        */
+#define	STAIE              0x00000008     /* Station Mgt. Transfer Done Interrupt Enable        */
+#define	REGAD              0x000007C0     /* STA Register Address                               */
+#define	PHYAD              0x0000F800     /* PHY Device Address                                 */
+
+#define	SET_REGAD(x) (((x)&0x1F)<<  6 )   /* Set STA Register Address                           */
+#define	SET_PHYAD(x) (((x)&0x1F)<< 11 )   /* Set PHY Device Address                             */
+
+/* EMAC_STADAT Mask */
+
+#define	STADATA            0x0000FFFF     /* Station Management Data                            */
+
+/* EMAC_FLC Masks */
+
+#define	FLCBUSY            0x00000001     /* Send Flow Ctrl Frame / Flow Ctrl Busy Status       */
+#define	FLCE               0x00000002     /* Flow Control Enable                                */
+#define	PCF                0x00000004     /* Pass Control Frames                                */
+#define	BKPRSEN            0x00000008     /* Enable Backpressure                                */
+#define	FLCPAUSE           0xFFFF0000     /* Pause Time                                         */
+
+#define	SET_FLCPAUSE(x) (((x)&0xFFFF)<< 16) /* Set Pause Time                                   */
+
+/* EMAC_WKUP_CTL Masks */
+
+#define	CAPWKFRM           0x00000001    /* Capture Wake-Up Frames                              */
+#define	MPKE               0x00000002    /* Magic Packet Enable                                 */
+#define	RWKE               0x00000004    /* Remote Wake-Up Frame Enable                         */
+#define	GUWKE              0x00000008    /* Global Unicast Wake Enable                          */
+#define	MPKS               0x00000020    /* Magic Packet Received Status                        */
+#define	RWKS               0x00000F00    /* Wake-Up Frame Received Status, Filters 3:0          */
+
+/* EMAC_WKUP_FFCMD Masks */
+
+#define	WF0_E              0x00000001    /* Enable Wake-Up Filter 0                              */
+#define	WF0_T              0x00000008    /* Wake-Up Filter 0 Addr Type (Multicast/Unicast*)      */
+#define	WF1_E              0x00000100    /* Enable Wake-Up Filter 1                              */
+#define	WF1_T              0x00000800    /* Wake-Up Filter 1 Addr Type (Multicast/Unicast*)      */
+#define	WF2_E              0x00010000    /* Enable Wake-Up Filter 2                              */
+#define	WF2_T              0x00080000    /* Wake-Up Filter 2 Addr Type (Multicast/Unicast*)      */
+#define	WF3_E              0x01000000    /* Enable Wake-Up Filter 3                              */
+#define	WF3_T              0x08000000    /* Wake-Up Filter 3 Addr Type (Multicast/Unicast*)      */
+
+/* EMAC_WKUP_FFOFF Masks */
+
+#define	WF0_OFF            0x000000FF    /* Wake-Up Filter 0 Pattern Offset                      */
+#define	WF1_OFF            0x0000FF00    /* Wake-Up Filter 1 Pattern Offset                      */
+#define	WF2_OFF            0x00FF0000    /* Wake-Up Filter 2 Pattern Offset                      */
+#define	WF3_OFF            0xFF000000    /* Wake-Up Filter 3 Pattern Offset                      */
+
+#define	SET_WF0_OFF(x) (((x)&0xFF)<<  0 ) /* Set Wake-Up Filter 0 Byte Offset                    */
+#define	SET_WF1_OFF(x) (((x)&0xFF)<<  8 ) /* Set Wake-Up Filter 1 Byte Offset                    */
+#define	SET_WF2_OFF(x) (((x)&0xFF)<< 16 ) /* Set Wake-Up Filter 2 Byte Offset                    */
+#define	SET_WF3_OFF(x) (((x)&0xFF)<< 24 ) /* Set Wake-Up Filter 3 Byte Offset                    */
+/* Set ALL Offsets */
+#define	SET_WF_OFFS(x0,x1,x2,x3) (SET_WF0_OFF((x0))|SET_WF1_OFF((x1))|SET_WF2_OFF((x2))|SET_WF3_OFF((x3)))
+
+/* EMAC_WKUP_FFCRC0 Masks */
+
+#define	WF0_CRC           0x0000FFFF    /* Wake-Up Filter 0 Pattern CRC                           */
+#define	WF1_CRC           0xFFFF0000    /* Wake-Up Filter 1 Pattern CRC                           */
+
+#define	SET_WF0_CRC(x) (((x)&0xFFFF)<<   0 ) /* Set Wake-Up Filter 0 Target CRC                   */
+#define	SET_WF1_CRC(x) (((x)&0xFFFF)<<  16 ) /* Set Wake-Up Filter 1 Target CRC                   */
+
+/* EMAC_WKUP_FFCRC1 Masks */
+
+#define	WF2_CRC           0x0000FFFF    /* Wake-Up Filter 2 Pattern CRC                           */
+#define	WF3_CRC           0xFFFF0000    /* Wake-Up Filter 3 Pattern CRC                           */
+
+#define	SET_WF2_CRC(x) (((x)&0xFFFF)<<   0 ) /* Set Wake-Up Filter 2 Target CRC                   */
+#define	SET_WF3_CRC(x) (((x)&0xFFFF)<<  16 ) /* Set Wake-Up Filter 3 Target CRC                   */
+
+/* EMAC_SYSCTL Masks */
+
+#define	PHYIE             0x00000001    /* PHY_INT Interrupt Enable                               */
+#define	RXDWA             0x00000002    /* Receive Frame DMA Word Alignment (Odd/Even*)           */
+#define	RXCKS             0x00000004    /* Enable RX Frame TCP/UDP Checksum Computation           */
+#define	TXDWA             0x00000010    /* Transmit Frame DMA Word Alignment (Odd/Even*)          */
+#define	MDCDIV            0x00003F00    /* SCLK:MDC Clock Divisor [MDC=SCLK/(2*(N+1))]            */
+
+#define	SET_MDCDIV(x) (((x)&0x3F)<< 8)   /* Set MDC Clock Divisor                                 */
+
+/* EMAC_SYSTAT Masks */
+
+#define	PHYINT            0x00000001    /* PHY_INT Interrupt Status                               */
+#define	MMCINT            0x00000002    /* MMC Counter Interrupt Status                           */
+#define	RXFSINT           0x00000004    /* RX Frame-Status Interrupt Status                       */
+#define	TXFSINT           0x00000008    /* TX Frame-Status Interrupt Status                       */
+#define	WAKEDET           0x00000010    /* Wake-Up Detected Status                                */
+#define	RXDMAERR          0x00000020    /* RX DMA Direction Error Status                          */
+#define	TXDMAERR          0x00000040    /* TX DMA Direction Error Status                          */
+#define	STMDONE           0x00000080    /* Station Mgt. Transfer Done Interrupt Status            */
+
+/* EMAC_RX_STAT, EMAC_RX_STKY, and EMAC_RX_IRQE Masks */
+
+#define	RX_FRLEN          0x000007FF    /* Frame Length In Bytes                                  */
+#define	RX_COMP           0x00001000    /* RX Frame Complete                                      */
+#define	RX_OK             0x00002000    /* RX Frame Received With No Errors                       */
+#define	RX_LONG           0x00004000    /* RX Frame Too Long Error                                */
+#define	RX_ALIGN          0x00008000    /* RX Frame Alignment Error                               */
+#define	RX_CRC            0x00010000    /* RX Frame CRC Error                                     */
+#define	RX_LEN            0x00020000    /* RX Frame Length Error                                  */
+#define	RX_FRAG           0x00040000    /* RX Frame Fragment Error                                */
+#define	RX_ADDR           0x00080000    /* RX Frame Address Filter Failed Error                   */
+#define	RX_DMAO           0x00100000    /* RX Frame DMA Overrun Error                             */
+#define	RX_PHY            0x00200000    /* RX Frame PHY Error                                     */
+#define	RX_LATE           0x00400000    /* RX Frame Late Collision Error                          */
+#define	RX_RANGE          0x00800000    /* RX Frame Length Field Out of Range Error               */
+#define	RX_MULTI          0x01000000    /* RX Multicast Frame Indicator                           */
+#define	RX_BROAD          0x02000000    /* RX Broadcast Frame Indicator                           */
+#define	RX_CTL            0x04000000    /* RX Control Frame Indicator                             */
+#define	RX_UCTL           0x08000000    /* Unsupported RX Control Frame Indicator                 */
+#define	RX_TYPE           0x10000000    /* RX Typed Frame Indicator                               */
+#define	RX_VLAN1          0x20000000    /* RX VLAN1 Frame Indicator                               */
+#define	RX_VLAN2          0x40000000    /* RX VLAN2 Frame Indicator                               */
+#define	RX_ACCEPT         0x80000000    /* RX Frame Accepted Indicator                            */
+
+/*  EMAC_TX_STAT, EMAC_TX_STKY, and EMAC_TX_IRQE Masks  */
+
+#define	TX_COMP           0x00000001    /* TX Frame Complete                                      */
+#define	TX_OK             0x00000002    /* TX Frame Sent With No Errors                           */
+#define	TX_ECOLL          0x00000004    /* TX Frame Excessive Collision Error                     */
+#define	TX_LATE           0x00000008    /* TX Frame Late Collision Error                          */
+#define	TX_DMAU           0x00000010    /* TX Frame DMA Underrun Error (STAT)                     */
+#define	TX_MACE           0x00000010    /* Internal MAC Error Detected (STKY and IRQE)            */
+#define	TX_EDEFER         0x00000020    /* TX Frame Excessive Deferral Error                      */
+#define	TX_BROAD          0x00000040    /* TX Broadcast Frame Indicator                           */
+#define	TX_MULTI          0x00000080    /* TX Multicast Frame Indicator                           */
+#define	TX_CCNT           0x00000F00    /* TX Frame Collision Count                               */
+#define	TX_DEFER          0x00001000    /* TX Frame Deferred Indicator                            */
+#define	TX_CRS            0x00002000    /* TX Frame Carrier Sense Not Asserted Error              */
+#define	TX_LOSS           0x00004000    /* TX Frame Carrier Lost During TX Error                  */
+#define	TX_RETRY          0x00008000    /* TX Frame Successful After Retry                        */
+#define	TX_FRLEN          0x07FF0000    /* TX Frame Length (Bytes)                                */
+
+/* EMAC_MMC_CTL Masks */
+#define	RSTC              0x00000001    /* Reset All Counters                                     */
+#define	CROLL             0x00000002    /* Counter Roll-Over Enable                               */
+#define	CCOR              0x00000004    /* Counter Clear-On-Read Mode Enable                      */
+#define	MMCE              0x00000008    /* Enable MMC Counter Operation                           */
+
+/* EMAC_MMC_RIRQS and EMAC_MMC_RIRQE Masks */
+#define	RX_OK_CNT         0x00000001    /* RX Frames Received With No Errors                      */
+#define	RX_FCS_CNT        0x00000002    /* RX Frames W/Frame Check Sequence Errors                */
+#define	RX_ALIGN_CNT      0x00000004    /* RX Frames With Alignment Errors                        */
+#define	RX_OCTET_CNT      0x00000008    /* RX Octets Received OK                                  */
+#define	RX_LOST_CNT       0x00000010    /* RX Frames Lost Due To Internal MAC RX Error            */
+#define	RX_UNI_CNT        0x00000020    /* Unicast RX Frames Received OK                          */
+#define	RX_MULTI_CNT      0x00000040    /* Multicast RX Frames Received OK                        */
+#define	RX_BROAD_CNT      0x00000080    /* Broadcast RX Frames Received OK                        */
+#define	RX_IRL_CNT        0x00000100    /* RX Frames With In-Range Length Errors                  */
+#define	RX_ORL_CNT        0x00000200    /* RX Frames With Out-Of-Range Length Errors              */
+#define	RX_LONG_CNT       0x00000400    /* RX Frames With Frame Too Long Errors                   */
+#define	RX_MACCTL_CNT     0x00000800    /* MAC Control RX Frames Received                         */
+#define	RX_OPCODE_CTL     0x00001000    /* Unsupported Op-Code RX Frames Received                 */
+#define	RX_PAUSE_CNT      0x00002000    /* PAUSEMAC Control RX Frames Received                    */
+#define	RX_ALLF_CNT       0x00004000    /* All RX Frames Received                                 */
+#define	RX_ALLO_CNT       0x00008000    /* All RX Octets Received                                 */
+#define	RX_TYPED_CNT      0x00010000    /* Typed RX Frames Received                               */
+#define	RX_SHORT_CNT      0x00020000    /* RX Frame Fragments (< 64 Bytes) Received               */
+#define	RX_EQ64_CNT       0x00040000    /* 64-Byte RX Frames Received                             */
+#define	RX_LT128_CNT      0x00080000    /* 65-127-Byte RX Frames Received                         */
+#define	RX_LT256_CNT      0x00100000    /* 128-255-Byte RX Frames Received                        */
+#define	RX_LT512_CNT      0x00200000    /* 256-511-Byte RX Frames Received                        */
+#define	RX_LT1024_CNT     0x00400000    /* 512-1023-Byte RX Frames Received                       */
+#define	RX_GE1024_CNT     0x00800000    /* 1024-Max-Byte RX Frames Received                       */
+
+/* EMAC_MMC_TIRQS and EMAC_MMC_TIRQE Masks  */
+
+#define	TX_OK_CNT         0x00000001    /* TX Frames Sent OK                                      */
+#define	TX_SCOLL_CNT      0x00000002    /* TX Frames With Single Collisions                       */
+#define	TX_MCOLL_CNT      0x00000004    /* TX Frames With Multiple Collisions                     */
+#define	TX_OCTET_CNT      0x00000008    /* TX Octets Sent OK                                      */
+#define	TX_DEFER_CNT      0x00000010    /* TX Frames With Deferred Transmission                   */
+#define	TX_LATE_CNT       0x00000020    /* TX Frames With Late Collisions                         */
+#define	TX_ABORTC_CNT     0x00000040    /* TX Frames Aborted Due To Excess Collisions             */
+#define	TX_LOST_CNT       0x00000080    /* TX Frames Lost Due To Internal MAC TX Error            */
+#define	TX_CRS_CNT        0x00000100    /* TX Frames With Carrier Sense Errors                    */
+#define	TX_UNI_CNT        0x00000200    /* Unicast TX Frames Sent                                 */
+#define	TX_MULTI_CNT      0x00000400    /* Multicast TX Frames Sent                               */
+#define	TX_BROAD_CNT      0x00000800    /* Broadcast TX Frames Sent                               */
+#define	TX_EXDEF_CTL      0x00001000    /* TX Frames With Excessive Deferral                      */
+#define	TX_MACCTL_CNT     0x00002000    /* MAC Control TX Frames Sent                             */
+#define	TX_ALLF_CNT       0x00004000    /* All TX Frames Sent                                     */
+#define	TX_ALLO_CNT       0x00008000    /* All TX Octets Sent                                     */
+#define	TX_EQ64_CNT       0x00010000    /* 64-Byte TX Frames Sent                                 */
+#define	TX_LT128_CNT      0x00020000    /* 65-127-Byte TX Frames Sent                             */
+#define	TX_LT256_CNT      0x00040000    /* 128-255-Byte TX Frames Sent                            */
+#define	TX_LT512_CNT      0x00080000    /* 256-511-Byte TX Frames Sent                            */
+#define	TX_LT1024_CNT     0x00100000    /* 512-1023-Byte TX Frames Sent                           */
+#define	TX_GE1024_CNT     0x00200000    /* 1024-Max-Byte TX Frames Sent                           */
+#define	TX_ABORT_CNT      0x00400000    /* TX Frames Aborted                                      */
+
+/* SDH Registers */
+
+#define SDH_PWR_CTL                    0xFFC03900 /* SDH Power Control */
+#define SDH_CLK_CTL                    0xFFC03904 /* SDH Clock Control */
+#define SDH_ARGUMENT                   0xFFC03908 /* SDH Argument */
+#define SDH_COMMAND                    0xFFC0390C /* SDH Command */
+#define SDH_RESP_CMD                   0xFFC03910 /* SDH Response Command */
+#define SDH_RESPONSE0                  0xFFC03914 /* SDH Response0 */
+#define SDH_RESPONSE1                  0xFFC03918 /* SDH Response1 */
+#define SDH_RESPONSE2                  0xFFC0391C /* SDH Response2 */
+#define SDH_RESPONSE3                  0xFFC03920 /* SDH Response3 */
+#define SDH_DATA_TIMER                 0xFFC03924 /* SDH Data Timer */
+#define SDH_DATA_LGTH                  0xFFC03928 /* SDH Data Length */
+#define SDH_DATA_CTL                   0xFFC0392C /* SDH Data Control */
+#define SDH_DATA_CNT                   0xFFC03930 /* SDH Data Counter */
+#define SDH_STATUS                     0xFFC03934 /* SDH Status */
+#define SDH_STATUS_CLR                 0xFFC03938 /* SDH Status Clear */
+#define SDH_MASK0                      0xFFC0393C /* SDH Interrupt0 Mask */
+#define SDH_MASK1                      0xFFC03940 /* SDH Interrupt1 Mask */
+#define SDH_FIFO_CNT                   0xFFC03948 /* SDH FIFO Counter */
+#define SDH_FIFO                       0xFFC03980 /* SDH Data FIFO */
+#define SDH_E_STATUS                   0xFFC039C0 /* SDH Exception Status */
+#define SDH_E_MASK                     0xFFC039C4 /* SDH Exception Mask */
+#define SDH_CFG                        0xFFC039C8 /* SDH Configuration */
+#define SDH_RD_WAIT_EN                 0xFFC039CC /* SDH Read Wait Enable */
+#define SDH_PID0                       0xFFC039D0 /* SDH Peripheral Identification0 */
+#define SDH_PID1                       0xFFC039D4 /* SDH Peripheral Identification1 */
+#define SDH_PID2                       0xFFC039D8 /* SDH Peripheral Identification2 */
+#define SDH_PID3                       0xFFC039DC /* SDH Peripheral Identification3 */
+#define SDH_PID4                       0xFFC039E0 /* SDH Peripheral Identification4 */
+#define SDH_PID5                       0xFFC039E4 /* SDH Peripheral Identification5 */
+#define SDH_PID6                       0xFFC039E8 /* SDH Peripheral Identification6 */
+#define SDH_PID7                       0xFFC039EC /* SDH Peripheral Identification7 */
+
+/* Removable Storage Interface Registers */
+
+#define RSI_PWR_CONTROL                0xFFC03800 /* RSI Power Control Register */
+#define RSI_CLK_CONTROL                0xFFC03804 /* RSI Clock Control Register */
+#define RSI_ARGUMENT                   0xFFC03808 /* RSI Argument Register */
+#define RSI_COMMAND                    0xFFC0380C /* RSI Command Register */
+#define RSI_RESP_CMD                   0xFFC03810 /* RSI Response Command Register */
+#define RSI_RESPONSE0                  0xFFC03814 /* RSI Response Register */
+#define RSI_RESPONSE1                  0xFFC03818 /* RSI Response Register */
+#define RSI_RESPONSE2                  0xFFC0381C /* RSI Response Register */
+#define RSI_RESPONSE3                  0xFFC03820 /* RSI Response Register */
+#define RSI_DATA_TIMER                 0xFFC03824 /* RSI Data Timer Register */
+#define RSI_DATA_LGTH                  0xFFC03828 /* RSI Data Length Register */
+#define RSI_DATA_CONTROL               0xFFC0382C /* RSI Data Control Register */
+#define RSI_DATA_CNT                   0xFFC03830 /* RSI Data Counter Register */
+#define RSI_STATUS                     0xFFC03834 /* RSI Status Register */
+#define RSI_STATUSCL                   0xFFC03838 /* RSI Status Clear Register */
+#define RSI_MASK0                      0xFFC0383C /* RSI Interrupt 0 Mask Register */
+#define RSI_MASK1                      0xFFC03840 /* RSI Interrupt 1 Mask Register */
+#define RSI_FIFO_CNT                   0xFFC03848 /* RSI FIFO Counter Register */
+#define RSI_CEATA_CONTROL              0xFFC0384C /* RSI CEATA Register */
+#define RSI_FIFO                       0xFFC03880 /* RSI Data FIFO Register */
+#define RSI_ESTAT                      0xFFC038C0 /* RSI Exception Status Register */
+#define RSI_EMASK                      0xFFC038C4 /* RSI Exception Mask Register */
+#define RSI_CONFIG                     0xFFC038C8 /* RSI Configuration Register */
+#define RSI_RD_WAIT_EN                 0xFFC038CC /* RSI Read Wait Enable Register */
+#define RSI_PID0                       0xFFC03FE0 /* RSI Peripheral ID Register 0 */
+#define RSI_PID1                       0xFFC03FE4 /* RSI Peripheral ID Register 1 */
+#define RSI_PID2                       0xFFC03FE8 /* RSI Peripheral ID Register 2 */
+#define RSI_PID3                       0xFFC03FEC /* RSI Peripheral ID Register 3 */
+#define RSI_PID4                       0xFFC03FF0 /* RSI Peripheral ID Register 4 */
+#define RSI_PID5                       0xFFC03FF4 /* RSI Peripheral ID Register 5 */
+#define RSI_PID6                       0xFFC03FF8 /* RSI Peripheral ID Register 6 */
+#define RSI_PID7                       0xFFC03FFC /* RSI Peripheral ID Register 7 */
+
+/* PTP TSYNC Registers */
+
+#define EMAC_PTP_CTL                   0xFFC030A0 /* PTP Block Control */
+#define EMAC_PTP_IE                    0xFFC030A4 /* PTP Block Interrupt Enable */
+#define EMAC_PTP_ISTAT                 0xFFC030A8 /* PTP Block Interrupt Status */
+#define EMAC_PTP_FOFF                  0xFFC030AC /* PTP Filter offset Register */
+#define EMAC_PTP_FV1                   0xFFC030B0 /* PTP Filter Value Register 1 */
+#define EMAC_PTP_FV2                   0xFFC030B4 /* PTP Filter Value Register 2 */
+#define EMAC_PTP_FV3                   0xFFC030B8 /* PTP Filter Value Register 3 */
+#define EMAC_PTP_ADDEND                0xFFC030BC /* PTP Addend for Frequency Compensation */
+#define EMAC_PTP_ACCR                  0xFFC030C0 /* PTP Accumulator for Frequency Compensation */
+#define EMAC_PTP_OFFSET                0xFFC030C4 /* PTP Time Offset Register */
+#define EMAC_PTP_TIMELO                0xFFC030C8 /* PTP Precision Clock Time Low */
+#define EMAC_PTP_TIMEHI                0xFFC030CC /* PTP Precision Clock Time High */
+#define EMAC_PTP_RXSNAPLO              0xFFC030D0 /* PTP Receive Snapshot Register Low */
+#define EMAC_PTP_RXSNAPHI              0xFFC030D4 /* PTP Receive Snapshot Register High */
+#define EMAC_PTP_TXSNAPLO              0xFFC030D8 /* PTP Transmit Snapshot Register Low */
+#define EMAC_PTP_TXSNAPHI              0xFFC030DC /* PTP Transmit Snapshot Register High */
+#define EMAC_PTP_ALARMLO               0xFFC030E0 /* PTP Alarm time Low */
+#define EMAC_PTP_ALARMHI               0xFFC030E4 /* PTP Alarm time High */
+#define EMAC_PTP_ID_OFF                0xFFC030E8 /* PTP Capture ID offset register */
+#define EMAC_PTP_ID_SNAP               0xFFC030EC /* PTP Capture ID register */
+#define EMAC_PTP_PPS_STARTLO           0xFFC030F0 /* PPS Start Time Low */
+#define EMAC_PTP_PPS_STARTHI           0xFFC030F4 /* PPS Start Time High */
+#define EMAC_PTP_PPS_PERIOD            0xFFC030F8 /* PPS Count Register */
+
+/* ********************************************************** */
+/*     SINGLE BIT MACRO PAIRS (bit mask and negated one)      */
+/*     and MULTI BIT READ MACROS                              */
+/* ********************************************************** */
+
+/* Bit masks for SDH_COMMAND */
+
+#define                   CMD_IDX  0x3f       /* Command Index */
+#define                   CMD_RSP  0x40       /* Response */
+#define                 CMD_L_RSP  0x80       /* Long Response */
+#define                 CMD_INT_E  0x100      /* Command Interrupt */
+#define                CMD_PEND_E  0x200      /* Command Pending */
+#define                     CMD_E  0x400      /* Command Enable */
+
+/* Bit masks for SDH_PWR_CTL */
+
+#define                    PWR_ON  0x3        /* Power On */
+#if 0
+#define                       TBD  0x3c       /* TBD */
+#endif
+#define                 SD_CMD_OD  0x40       /* Open Drain Output */
+#define                   ROD_CTL  0x80       /* Rod Control */
+
+/* Bit masks for SDH_CLK_CTL */
+
+#define                    CLKDIV  0xff       /* MC_CLK Divisor */
+#define                     CLK_E  0x100      /* MC_CLK Bus Clock Enable */
+#define                  PWR_SV_E  0x200      /* Power Save Enable */
+#define             CLKDIV_BYPASS  0x400      /* Bypass Divisor */
+#define                  WIDE_BUS  0x800      /* Wide Bus Mode Enable */
+
+/* Bit masks for SDH_RESP_CMD */
+
+#define                  RESP_CMD  0x3f       /* Response Command */
+
+/* Bit masks for SDH_DATA_CTL */
+
+#define                     DTX_E  0x1        /* Data Transfer Enable */
+#define                   DTX_DIR  0x2        /* Data Transfer Direction */
+#define                  DTX_MODE  0x4        /* Data Transfer Mode */
+#define                 DTX_DMA_E  0x8        /* Data Transfer DMA Enable */
+#define              DTX_BLK_LGTH  0xf0       /* Data Transfer Block Length */
+
+/* Bit masks for SDH_STATUS */
+
+#define              CMD_CRC_FAIL  0x1        /* CMD CRC Fail */
+#define              DAT_CRC_FAIL  0x2        /* Data CRC Fail */
+#define               CMD_TIME_OUT  0x4        /* CMD Time Out */
+#define               DAT_TIME_OUT  0x8        /* Data Time Out */
+#define               TX_UNDERRUN  0x10       /* Transmit Underrun */
+#define                RX_OVERRUN  0x20       /* Receive Overrun */
+#define              CMD_RESP_END  0x40       /* CMD Response End */
+#define                  CMD_SENT  0x80       /* CMD Sent */
+#define                   DAT_END  0x100      /* Data End */
+#define             START_BIT_ERR  0x200      /* Start Bit Error */
+#define               DAT_BLK_END  0x400      /* Data Block End */
+#define                   CMD_ACT  0x800      /* CMD Active */
+#define                    TX_ACT  0x1000     /* Transmit Active */
+#define                    RX_ACT  0x2000     /* Receive Active */
+#define              TX_FIFO_STAT  0x4000     /* Transmit FIFO Status */
+#define              RX_FIFO_STAT  0x8000     /* Receive FIFO Status */
+#define              TX_FIFO_FULL  0x10000    /* Transmit FIFO Full */
+#define              RX_FIFO_FULL  0x20000    /* Receive FIFO Full */
+#define              TX_FIFO_ZERO  0x40000    /* Transmit FIFO Empty */
+#define               RX_DAT_ZERO  0x80000    /* Receive FIFO Empty */
+#define                TX_DAT_RDY  0x100000   /* Transmit Data Available */
+#define               RX_FIFO_RDY  0x200000   /* Receive Data Available */
+
+/* Bit masks for SDH_STATUS_CLR */
+
+#define         CMD_CRC_FAIL_STAT  0x1        /* CMD CRC Fail Status */
+#define         DAT_CRC_FAIL_STAT  0x2        /* Data CRC Fail Status */
+#define          CMD_TIMEOUT_STAT  0x4        /* CMD Time Out Status */
+#define          DAT_TIMEOUT_STAT  0x8        /* Data Time Out status */
+#define          TX_UNDERRUN_STAT  0x10       /* Transmit Underrun Status */
+#define           RX_OVERRUN_STAT  0x20       /* Receive Overrun Status */
+#define         CMD_RESP_END_STAT  0x40       /* CMD Response End Status */
+#define             CMD_SENT_STAT  0x80       /* CMD Sent Status */
+#define              DAT_END_STAT  0x100      /* Data End Status */
+#define        START_BIT_ERR_STAT  0x200      /* Start Bit Error Status */
+#define          DAT_BLK_END_STAT  0x400      /* Data Block End Status */
+
+/* Bit masks for SDH_MASK0 */
+
+#define         CMD_CRC_FAIL_MASK  0x1        /* CMD CRC Fail Mask */
+#define         DAT_CRC_FAIL_MASK  0x2        /* Data CRC Fail Mask */
+#define          CMD_TIMEOUT_MASK  0x4        /* CMD Time Out Mask */
+#define          DAT_TIMEOUT_MASK  0x8        /* Data Time Out Mask */
+#define          TX_UNDERRUN_MASK  0x10       /* Transmit Underrun Mask */
+#define           RX_OVERRUN_MASK  0x20       /* Receive Overrun Mask */
+#define         CMD_RESP_END_MASK  0x40       /* CMD Response End Mask */
+#define             CMD_SENT_MASK  0x80       /* CMD Sent Mask */
+#define              DAT_END_MASK  0x100      /* Data End Mask */
+#define        START_BIT_ERR_MASK  0x200      /* Start Bit Error Mask */
+#define          DAT_BLK_END_MASK  0x400      /* Data Block End Mask */
+#define              CMD_ACT_MASK  0x800      /* CMD Active Mask */
+#define               TX_ACT_MASK  0x1000     /* Transmit Active Mask */
+#define               RX_ACT_MASK  0x2000     /* Receive Active Mask */
+#define         TX_FIFO_STAT_MASK  0x4000     /* Transmit FIFO Status Mask */
+#define         RX_FIFO_STAT_MASK  0x8000     /* Receive FIFO Status Mask */
+#define         TX_FIFO_FULL_MASK  0x10000    /* Transmit FIFO Full Mask */
+#define         RX_FIFO_FULL_MASK  0x20000    /* Receive FIFO Full Mask */
+#define         TX_FIFO_ZERO_MASK  0x40000    /* Transmit FIFO Empty Mask */
+#define          RX_DAT_ZERO_MASK  0x80000    /* Receive FIFO Empty Mask */
+#define           TX_DAT_RDY_MASK  0x100000   /* Transmit Data Available Mask */
+#define          RX_FIFO_RDY_MASK  0x200000   /* Receive Data Available Mask */
+
+/* Bit masks for SDH_FIFO_CNT */
+
+#define                FIFO_COUNT  0x7fff     /* FIFO Count */
+
+/* Bit masks for SDH_E_STATUS */
+
+#define              SDIO_INT_DET  0x2        /* SDIO Int Detected */
+#define               SD_CARD_DET  0x10       /* SD Card Detect */
+
+/* Bit masks for SDH_E_MASK */
+
+#define                  SDIO_MSK  0x2        /* Mask SDIO Int Detected */
+#define                   SCD_MSK  0x40       /* Mask Card Detect */
+
+/* Bit masks for SDH_CFG */
+
+#define                   CLKS_EN  0x1        /* Clocks Enable */
+#define                      SD4E  0x4        /* SDIO 4-Bit Enable */
+#define                       MWE  0x8        /* Moving Window Enable */
+#define                    SD_RST  0x10       /* SDMMC Reset */
+#define                 PUP_SDDAT  0x20       /* Pull-up SD_DAT */
+#define                PUP_SDDAT3  0x40       /* Pull-up SD_DAT3 */
+#define                 PD_SDDAT3  0x80       /* Pull-down SD_DAT3 */
+
+/* Bit masks for SDH_RD_WAIT_EN */
+
+#define                       RWR  0x1        /* Read Wait Request */
+
+#endif /* _DEF_BF518_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
new file mode 100644
index 0000000..1bec8d1
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
@@ -0,0 +1,1940 @@
+/*
+ * File:         include/asm-blackfin/mach-bf518/defBF51x_base.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _DEF_BF51X_H
+#define _DEF_BF51X_H
+
+
+/* ************************************************************** */
+/*   SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF51x    */
+/* ************************************************************** */
+
+/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
+#define PLL_CTL				0xFFC00000	/* PLL Control Register						*/
+#define PLL_DIV				0xFFC00004	/* PLL Divide Register						*/
+#define VR_CTL				0xFFC00008	/* Voltage Regulator Control Register				*/
+#define PLL_STAT			0xFFC0000C	/* PLL Status Register						*/
+#define PLL_LOCKCNT			0xFFC00010	/* PLL Lock Count Register					*/
+#define CHIPID				0xFFC00014	/* Device ID Register */
+
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)								*/
+#define SWRST				0xFFC00100	/* Software Reset Register					*/
+#define SYSCR				0xFFC00104	/* System Configuration Register				*/
+#define SIC_RVECT			0xFFC00108	/* Interrupt Reset Vector Address Register			*/
+
+#define SIC_IMASK0			0xFFC0010C	/* Interrupt Mask Register					*/
+#define SIC_IAR0			0xFFC00110	/* Interrupt Assignment Register 0				*/
+#define SIC_IAR1			0xFFC00114	/* Interrupt Assignment Register 1				*/
+#define SIC_IAR2			0xFFC00118	/* Interrupt Assignment Register 2				*/
+#define SIC_IAR3			0xFFC0011C	/* Interrupt Assignment Register 3				*/
+#define SIC_ISR0			0xFFC00120	/* Interrupt Status Register					*/
+#define SIC_IWR0			0xFFC00124	/* Interrupt Wakeup Register					*/
+
+/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
+#define SIC_IMASK1                      0xFFC0014C     /* Interrupt Mask register of SIC2 */
+#define SIC_IAR4                        0xFFC00150     /* Interrupt Assignment register4 */
+#define SIC_IAR5                        0xFFC00154     /* Interrupt Assignment register5 */
+#define SIC_IAR6                        0xFFC00158     /* Interrupt Assignment register6 */
+#define SIC_IAR7                        0xFFC0015C     /* Interrupt Assignment register7 */
+#define SIC_ISR1                        0xFFC00160     /* Interrupt Statur register */
+#define SIC_IWR1                        0xFFC00164     /* Interrupt Wakeup register */
+
+
+/* Watchdog Timer			(0xFFC00200 - 0xFFC002FF)								*/
+#define WDOG_CTL			0xFFC00200	/* Watchdog Control Register				*/
+#define WDOG_CNT			0xFFC00204	/* Watchdog Count Register					*/
+#define WDOG_STAT			0xFFC00208	/* Watchdog Status Register					*/
+
+
+/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
+#define RTC_STAT			0xFFC00300	/* RTC Status Register						*/
+#define RTC_ICTL			0xFFC00304	/* RTC Interrupt Control Register			*/
+#define RTC_ISTAT			0xFFC00308	/* RTC Interrupt Status Register			*/
+#define RTC_SWCNT			0xFFC0030C	/* RTC Stopwatch Count Register				*/
+#define RTC_ALARM			0xFFC00310	/* RTC Alarm Time Register					*/
+#define RTC_FAST			0xFFC00314	/* RTC Prescaler Enable Register			*/
+#define RTC_PREN			0xFFC00314	/* RTC Prescaler Enable Alternate Macro		*/
+
+
+/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
+#define UART0_THR			0xFFC00400	/* Transmit Holding register				*/
+#define UART0_RBR			0xFFC00400	/* Receive Buffer register					*/
+#define UART0_DLL			0xFFC00400	/* Divisor Latch (Low-Byte)					*/
+#define UART0_IER			0xFFC00404	/* Interrupt Enable Register				*/
+#define UART0_DLH			0xFFC00404	/* Divisor Latch (High-Byte)				*/
+#define UART0_IIR			0xFFC00408	/* Interrupt Identification Register		*/
+#define UART0_LCR			0xFFC0040C	/* Line Control Register					*/
+#define UART0_MCR			0xFFC00410	/* Modem Control Register					*/
+#define UART0_LSR			0xFFC00414	/* Line Status Register						*/
+#define UART0_MSR			0xFFC00418	/* Modem Status Register					*/
+#define UART0_SCR			0xFFC0041C	/* SCR Scratch Register						*/
+#define UART0_GCTL			0xFFC00424	/* Global Control Register					*/
+
+/* SPI0 Controller			(0xFFC00500 - 0xFFC005FF)							*/
+#define SPI0_REGBASE			0xFFC00500
+#define SPI0_CTL			0xFFC00500	/* SPI Control Register						*/
+#define SPI0_FLG			0xFFC00504	/* SPI Flag register						*/
+#define SPI0_STAT			0xFFC00508	/* SPI Status register						*/
+#define SPI0_TDBR			0xFFC0050C	/* SPI Transmit Data Buffer Register				*/
+#define SPI0_RDBR			0xFFC00510	/* SPI Receive Data Buffer Register				*/
+#define SPI0_BAUD			0xFFC00514	/* SPI Baud rate Register					*/
+#define SPI0_SHADOW			0xFFC00518	/* SPI_RDBR Shadow Register					*/
+
+/* SPI1 Controller			(0xFFC03400 - 0xFFC034FF)							*/
+#define SPI1_REGBASE			0xFFC03400
+#define SPI1_CTL			0xFFC03400	/* SPI Control Register						*/
+#define SPI1_FLG			0xFFC03404	/* SPI Flag register						*/
+#define SPI1_STAT			0xFFC03408	/* SPI Status register						*/
+#define SPI1_TDBR			0xFFC0340C	/* SPI Transmit Data Buffer Register				*/
+#define SPI1_RDBR			0xFFC03410	/* SPI Receive Data Buffer Register				*/
+#define SPI1_BAUD			0xFFC03414	/* SPI Baud rate Register					*/
+#define SPI1_SHADOW			0xFFC03418	/* SPI_RDBR Shadow Register					*/
+
+/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
+#define TIMER0_CONFIG		0xFFC00600	/* Timer 0 Configuration Register			*/
+#define TIMER0_COUNTER		0xFFC00604	/* Timer 0 Counter Register					*/
+#define TIMER0_PERIOD		0xFFC00608	/* Timer 0 Period Register					*/
+#define TIMER0_WIDTH		0xFFC0060C	/* Timer 0 Width Register					*/
+
+#define TIMER1_CONFIG		0xFFC00610	/* Timer 1 Configuration Register  			*/
+#define TIMER1_COUNTER		0xFFC00614	/* Timer 1 Counter Register        			*/
+#define TIMER1_PERIOD		0xFFC00618	/* Timer 1 Period Register         			*/
+#define TIMER1_WIDTH		0xFFC0061C	/* Timer 1 Width Register          			*/
+
+#define TIMER2_CONFIG		0xFFC00620	/* Timer 2 Configuration Register  			*/
+#define TIMER2_COUNTER		0xFFC00624	/* Timer 2 Counter Register        			*/
+#define TIMER2_PERIOD		0xFFC00628	/* Timer 2 Period Register         			*/
+#define TIMER2_WIDTH		0xFFC0062C	/* Timer 2 Width Register          			*/
+
+#define TIMER3_CONFIG		0xFFC00630	/* Timer 3 Configuration Register			*/
+#define TIMER3_COUNTER		0xFFC00634	/* Timer 3 Counter Register					*/
+#define TIMER3_PERIOD		0xFFC00638	/* Timer 3 Period Register					*/
+#define TIMER3_WIDTH		0xFFC0063C	/* Timer 3 Width Register					*/
+
+#define TIMER4_CONFIG		0xFFC00640	/* Timer 4 Configuration Register  			*/
+#define TIMER4_COUNTER		0xFFC00644	/* Timer 4 Counter Register        			*/
+#define TIMER4_PERIOD		0xFFC00648	/* Timer 4 Period Register         			*/
+#define TIMER4_WIDTH		0xFFC0064C	/* Timer 4 Width Register          			*/
+
+#define TIMER5_CONFIG		0xFFC00650	/* Timer 5 Configuration Register  			*/
+#define TIMER5_COUNTER		0xFFC00654	/* Timer 5 Counter Register        			*/
+#define TIMER5_PERIOD		0xFFC00658	/* Timer 5 Period Register         			*/
+#define TIMER5_WIDTH		0xFFC0065C	/* Timer 5 Width Register          			*/
+
+#define TIMER6_CONFIG		0xFFC00660	/* Timer 6 Configuration Register  			*/
+#define TIMER6_COUNTER		0xFFC00664	/* Timer 6 Counter Register        			*/
+#define TIMER6_PERIOD		0xFFC00668	/* Timer 6 Period Register         			*/
+#define TIMER6_WIDTH		0xFFC0066C	/* Timer 6 Width Register          			*/
+
+#define TIMER7_CONFIG		0xFFC00670	/* Timer 7 Configuration Register  			*/
+#define TIMER7_COUNTER		0xFFC00674	/* Timer 7 Counter Register        			*/
+#define TIMER7_PERIOD		0xFFC00678	/* Timer 7 Period Register         			*/
+#define TIMER7_WIDTH		0xFFC0067C	/* Timer 7 Width Register       			*/
+
+#define TIMER_ENABLE		0xFFC00680	/* Timer Enable Register					*/
+#define TIMER_DISABLE		0xFFC00684	/* Timer Disable Register					*/
+#define TIMER_STATUS		0xFFC00688	/* Timer Status Register					*/
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)												*/
+#define PORTFIO					0xFFC00700	/* Port F I/O Pin State Specify Register				*/
+#define PORTFIO_CLEAR			0xFFC00704	/* Port F I/O Peripheral Interrupt Clear Register		*/
+#define PORTFIO_SET				0xFFC00708	/* Port F I/O Peripheral Interrupt Set Register			*/
+#define PORTFIO_TOGGLE			0xFFC0070C	/* Port F I/O Pin State Toggle Register					*/
+#define PORTFIO_MASKA			0xFFC00710	/* Port F I/O Mask State Specify Interrupt A Register	*/
+#define PORTFIO_MASKA_CLEAR		0xFFC00714	/* Port F I/O Mask Disable Interrupt A Register			*/
+#define PORTFIO_MASKA_SET		0xFFC00718	/* Port F I/O Mask Enable Interrupt A Register			*/
+#define PORTFIO_MASKA_TOGGLE	0xFFC0071C	/* Port F I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTFIO_MASKB			0xFFC00720	/* Port F I/O Mask State Specify Interrupt B Register	*/
+#define PORTFIO_MASKB_CLEAR		0xFFC00724	/* Port F I/O Mask Disable Interrupt B Register			*/
+#define PORTFIO_MASKB_SET		0xFFC00728	/* Port F I/O Mask Enable Interrupt B Register			*/
+#define PORTFIO_MASKB_TOGGLE	0xFFC0072C	/* Port F I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTFIO_DIR				0xFFC00730	/* Port F I/O Direction Register						*/
+#define PORTFIO_POLAR			0xFFC00734	/* Port F I/O Source Polarity Register					*/
+#define PORTFIO_EDGE			0xFFC00738	/* Port F I/O Source Sensitivity Register				*/
+#define PORTFIO_BOTH			0xFFC0073C	/* Port F I/O Set on BOTH Edges Register				*/
+#define PORTFIO_INEN			0xFFC00740	/* Port F I/O Input Enable Register 					*/
+
+/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)										*/
+#define SPORT0_TCR1			0xFFC00800	/* SPORT0 Transmit Configuration 1 Register			*/
+#define SPORT0_TCR2			0xFFC00804	/* SPORT0 Transmit Configuration 2 Register			*/
+#define SPORT0_TCLKDIV		0xFFC00808	/* SPORT0 Transmit Clock Divider					*/
+#define SPORT0_TFSDIV		0xFFC0080C	/* SPORT0 Transmit Frame Sync Divider				*/
+#define SPORT0_TX			0xFFC00810	/* SPORT0 TX Data Register							*/
+#define SPORT0_RX			0xFFC00818	/* SPORT0 RX Data Register							*/
+#define SPORT0_RCR1			0xFFC00820	/* SPORT0 Transmit Configuration 1 Register			*/
+#define SPORT0_RCR2			0xFFC00824	/* SPORT0 Transmit Configuration 2 Register			*/
+#define SPORT0_RCLKDIV		0xFFC00828	/* SPORT0 Receive Clock Divider						*/
+#define SPORT0_RFSDIV		0xFFC0082C	/* SPORT0 Receive Frame Sync Divider				*/
+#define SPORT0_STAT			0xFFC00830	/* SPORT0 Status Register							*/
+#define SPORT0_CHNL			0xFFC00834	/* SPORT0 Current Channel Register					*/
+#define SPORT0_MCMC1		0xFFC00838	/* SPORT0 Multi-Channel Configuration Register 1	*/
+#define SPORT0_MCMC2		0xFFC0083C	/* SPORT0 Multi-Channel Configuration Register 2	*/
+#define SPORT0_MTCS0		0xFFC00840	/* SPORT0 Multi-Channel Transmit Select Register 0	*/
+#define SPORT0_MTCS1		0xFFC00844	/* SPORT0 Multi-Channel Transmit Select Register 1	*/
+#define SPORT0_MTCS2		0xFFC00848	/* SPORT0 Multi-Channel Transmit Select Register 2	*/
+#define SPORT0_MTCS3		0xFFC0084C	/* SPORT0 Multi-Channel Transmit Select Register 3	*/
+#define SPORT0_MRCS0		0xFFC00850	/* SPORT0 Multi-Channel Receive Select Register 0	*/
+#define SPORT0_MRCS1		0xFFC00854	/* SPORT0 Multi-Channel Receive Select Register 1	*/
+#define SPORT0_MRCS2		0xFFC00858	/* SPORT0 Multi-Channel Receive Select Register 2	*/
+#define SPORT0_MRCS3		0xFFC0085C	/* SPORT0 Multi-Channel Receive Select Register 3	*/
+
+/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)										*/
+#define SPORT1_TCR1			0xFFC00900	/* SPORT1 Transmit Configuration 1 Register			*/
+#define SPORT1_TCR2			0xFFC00904	/* SPORT1 Transmit Configuration 2 Register			*/
+#define SPORT1_TCLKDIV		0xFFC00908	/* SPORT1 Transmit Clock Divider					*/
+#define SPORT1_TFSDIV		0xFFC0090C	/* SPORT1 Transmit Frame Sync Divider				*/
+#define SPORT1_TX			0xFFC00910	/* SPORT1 TX Data Register							*/
+#define SPORT1_RX			0xFFC00918	/* SPORT1 RX Data Register							*/
+#define SPORT1_RCR1			0xFFC00920	/* SPORT1 Transmit Configuration 1 Register			*/
+#define SPORT1_RCR2			0xFFC00924	/* SPORT1 Transmit Configuration 2 Register			*/
+#define SPORT1_RCLKDIV		0xFFC00928	/* SPORT1 Receive Clock Divider						*/
+#define SPORT1_RFSDIV		0xFFC0092C	/* SPORT1 Receive Frame Sync Divider				*/
+#define SPORT1_STAT			0xFFC00930	/* SPORT1 Status Register							*/
+#define SPORT1_CHNL			0xFFC00934	/* SPORT1 Current Channel Register					*/
+#define SPORT1_MCMC1		0xFFC00938	/* SPORT1 Multi-Channel Configuration Register 1	*/
+#define SPORT1_MCMC2		0xFFC0093C	/* SPORT1 Multi-Channel Configuration Register 2	*/
+#define SPORT1_MTCS0		0xFFC00940	/* SPORT1 Multi-Channel Transmit Select Register 0	*/
+#define SPORT1_MTCS1		0xFFC00944	/* SPORT1 Multi-Channel Transmit Select Register 1	*/
+#define SPORT1_MTCS2		0xFFC00948	/* SPORT1 Multi-Channel Transmit Select Register 2	*/
+#define SPORT1_MTCS3		0xFFC0094C	/* SPORT1 Multi-Channel Transmit Select Register 3	*/
+#define SPORT1_MRCS0		0xFFC00950	/* SPORT1 Multi-Channel Receive Select Register 0	*/
+#define SPORT1_MRCS1		0xFFC00954	/* SPORT1 Multi-Channel Receive Select Register 1	*/
+#define SPORT1_MRCS2		0xFFC00958	/* SPORT1 Multi-Channel Receive Select Register 2	*/
+#define SPORT1_MRCS3		0xFFC0095C	/* SPORT1 Multi-Channel Receive Select Register 3	*/
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)								*/
+#define EBIU_AMGCTL			0xFFC00A00	/* Asynchronous Memory Global Control Register	*/
+#define EBIU_AMBCTL0		0xFFC00A04	/* Asynchronous Memory Bank Control Register 0	*/
+#define EBIU_AMBCTL1		0xFFC00A08	/* Asynchronous Memory Bank Control Register 1	*/
+#define EBIU_SDGCTL			0xFFC00A10	/* SDRAM Global Control Register				*/
+#define EBIU_SDBCTL			0xFFC00A14	/* SDRAM Bank Control Register					*/
+#define EBIU_SDRRC			0xFFC00A18	/* SDRAM Refresh Rate Control Register			*/
+#define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register						*/
+
+/* DMA Traffic Control Registers													*/
+#define DMA_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
+#define DMA_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
+
+/* Alternate deprecated register names (below) provided for backwards code compatibility */
+#define DMA_TCPER			0xFFC00B0C	/* Traffic Control Periods Register			*/
+#define DMA_TCCNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
+
+/* DMA Controller (0xFFC00C00 - 0xFFC00FFF)															*/
+#define DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register		*/
+#define DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register					*/
+#define DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register					*/
+#define DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register						*/
+#define DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register						*/
+#define DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register						*/
+#define DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register						*/
+#define DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register	*/
+#define DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register				*/
+#define DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register				*/
+#define DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map Register				*/
+#define DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register				*/
+#define DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register				*/
+
+#define DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register		*/
+#define DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register					*/
+#define DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register					*/
+#define DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register						*/
+#define DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register						*/
+#define DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register						*/
+#define DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register						*/
+#define DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register	*/
+#define DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register				*/
+#define DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register				*/
+#define DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map Register				*/
+#define DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register				*/
+#define DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register				*/
+
+#define DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register		*/
+#define DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register					*/
+#define DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register					*/
+#define DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register						*/
+#define DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register						*/
+#define DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register						*/
+#define DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register						*/
+#define DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register	*/
+#define DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register				*/
+#define DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register				*/
+#define DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map Register				*/
+#define DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register				*/
+#define DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register				*/
+
+#define DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register		*/
+#define DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register					*/
+#define DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register					*/
+#define DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register						*/
+#define DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register						*/
+#define DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register						*/
+#define DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register						*/
+#define DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register	*/
+#define DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register				*/
+#define DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register				*/
+#define DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map Register				*/
+#define DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register				*/
+#define DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register				*/
+
+#define DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register		*/
+#define DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register					*/
+#define DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register					*/
+#define DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register						*/
+#define DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register						*/
+#define DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register						*/
+#define DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register						*/
+#define DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register	*/
+#define DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register				*/
+#define DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register				*/
+#define DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map Register				*/
+#define DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register				*/
+#define DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register				*/
+
+#define DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register		*/
+#define DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register					*/
+#define DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register					*/
+#define DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register						*/
+#define DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register						*/
+#define DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register						*/
+#define DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register						*/
+#define DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register	*/
+#define DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register				*/
+#define DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register				*/
+#define DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map Register				*/
+#define DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register				*/
+#define DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register				*/
+
+#define DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register		*/
+#define DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register					*/
+#define DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register					*/
+#define DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register						*/
+#define DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register						*/
+#define DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register						*/
+#define DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register						*/
+#define DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register	*/
+#define DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register				*/
+#define DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register				*/
+#define DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map Register				*/
+#define DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register				*/
+#define DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register				*/
+
+#define DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register		*/
+#define DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register					*/
+#define DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register					*/
+#define DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register						*/
+#define DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register						*/
+#define DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register						*/
+#define DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register						*/
+#define DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register	*/
+#define DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register				*/
+#define DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register				*/
+#define DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map Register				*/
+#define DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register				*/
+#define DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register				*/
+
+#define DMA8_NEXT_DESC_PTR		0xFFC00E00	/* DMA Channel 8 Next Descriptor Pointer Register		*/
+#define DMA8_START_ADDR			0xFFC00E04	/* DMA Channel 8 Start Address Register					*/
+#define DMA8_CONFIG				0xFFC00E08	/* DMA Channel 8 Configuration Register					*/
+#define DMA8_X_COUNT			0xFFC00E10	/* DMA Channel 8 X Count Register						*/
+#define DMA8_X_MODIFY			0xFFC00E14	/* DMA Channel 8 X Modify Register						*/
+#define DMA8_Y_COUNT			0xFFC00E18	/* DMA Channel 8 Y Count Register						*/
+#define DMA8_Y_MODIFY			0xFFC00E1C	/* DMA Channel 8 Y Modify Register						*/
+#define DMA8_CURR_DESC_PTR		0xFFC00E20	/* DMA Channel 8 Current Descriptor Pointer Register	*/
+#define DMA8_CURR_ADDR			0xFFC00E24	/* DMA Channel 8 Current Address Register				*/
+#define DMA8_IRQ_STATUS			0xFFC00E28	/* DMA Channel 8 Interrupt/Status Register				*/
+#define DMA8_PERIPHERAL_MAP		0xFFC00E2C	/* DMA Channel 8 Peripheral Map Register				*/
+#define DMA8_CURR_X_COUNT		0xFFC00E30	/* DMA Channel 8 Current X Count Register				*/
+#define DMA8_CURR_Y_COUNT		0xFFC00E38	/* DMA Channel 8 Current Y Count Register				*/
+
+#define DMA9_NEXT_DESC_PTR		0xFFC00E40	/* DMA Channel 9 Next Descriptor Pointer Register		*/
+#define DMA9_START_ADDR			0xFFC00E44	/* DMA Channel 9 Start Address Register					*/
+#define DMA9_CONFIG				0xFFC00E48	/* DMA Channel 9 Configuration Register					*/
+#define DMA9_X_COUNT			0xFFC00E50	/* DMA Channel 9 X Count Register						*/
+#define DMA9_X_MODIFY			0xFFC00E54	/* DMA Channel 9 X Modify Register						*/
+#define DMA9_Y_COUNT			0xFFC00E58	/* DMA Channel 9 Y Count Register						*/
+#define DMA9_Y_MODIFY			0xFFC00E5C	/* DMA Channel 9 Y Modify Register						*/
+#define DMA9_CURR_DESC_PTR		0xFFC00E60	/* DMA Channel 9 Current Descriptor Pointer Register	*/
+#define DMA9_CURR_ADDR			0xFFC00E64	/* DMA Channel 9 Current Address Register				*/
+#define DMA9_IRQ_STATUS			0xFFC00E68	/* DMA Channel 9 Interrupt/Status Register				*/
+#define DMA9_PERIPHERAL_MAP		0xFFC00E6C	/* DMA Channel 9 Peripheral Map Register				*/
+#define DMA9_CURR_X_COUNT		0xFFC00E70	/* DMA Channel 9 Current X Count Register				*/
+#define DMA9_CURR_Y_COUNT		0xFFC00E78	/* DMA Channel 9 Current Y Count Register				*/
+
+#define DMA10_NEXT_DESC_PTR		0xFFC00E80	/* DMA Channel 10 Next Descriptor Pointer Register		*/
+#define DMA10_START_ADDR		0xFFC00E84	/* DMA Channel 10 Start Address Register				*/
+#define DMA10_CONFIG			0xFFC00E88	/* DMA Channel 10 Configuration Register				*/
+#define DMA10_X_COUNT			0xFFC00E90	/* DMA Channel 10 X Count Register						*/
+#define DMA10_X_MODIFY			0xFFC00E94	/* DMA Channel 10 X Modify Register						*/
+#define DMA10_Y_COUNT			0xFFC00E98	/* DMA Channel 10 Y Count Register						*/
+#define DMA10_Y_MODIFY			0xFFC00E9C	/* DMA Channel 10 Y Modify Register						*/
+#define DMA10_CURR_DESC_PTR		0xFFC00EA0	/* DMA Channel 10 Current Descriptor Pointer Register	*/
+#define DMA10_CURR_ADDR			0xFFC00EA4	/* DMA Channel 10 Current Address Register				*/
+#define DMA10_IRQ_STATUS		0xFFC00EA8	/* DMA Channel 10 Interrupt/Status Register				*/
+#define DMA10_PERIPHERAL_MAP	0xFFC00EAC	/* DMA Channel 10 Peripheral Map Register				*/
+#define DMA10_CURR_X_COUNT		0xFFC00EB0	/* DMA Channel 10 Current X Count Register				*/
+#define DMA10_CURR_Y_COUNT		0xFFC00EB8	/* DMA Channel 10 Current Y Count Register				*/
+
+#define DMA11_NEXT_DESC_PTR		0xFFC00EC0	/* DMA Channel 11 Next Descriptor Pointer Register		*/
+#define DMA11_START_ADDR		0xFFC00EC4	/* DMA Channel 11 Start Address Register				*/
+#define DMA11_CONFIG			0xFFC00EC8	/* DMA Channel 11 Configuration Register				*/
+#define DMA11_X_COUNT			0xFFC00ED0	/* DMA Channel 11 X Count Register						*/
+#define DMA11_X_MODIFY			0xFFC00ED4	/* DMA Channel 11 X Modify Register						*/
+#define DMA11_Y_COUNT			0xFFC00ED8	/* DMA Channel 11 Y Count Register						*/
+#define DMA11_Y_MODIFY			0xFFC00EDC	/* DMA Channel 11 Y Modify Register						*/
+#define DMA11_CURR_DESC_PTR		0xFFC00EE0	/* DMA Channel 11 Current Descriptor Pointer Register	*/
+#define DMA11_CURR_ADDR			0xFFC00EE4	/* DMA Channel 11 Current Address Register				*/
+#define DMA11_IRQ_STATUS		0xFFC00EE8	/* DMA Channel 11 Interrupt/Status Register				*/
+#define DMA11_PERIPHERAL_MAP	0xFFC00EEC	/* DMA Channel 11 Peripheral Map Register				*/
+#define DMA11_CURR_X_COUNT		0xFFC00EF0	/* DMA Channel 11 Current X Count Register				*/
+#define DMA11_CURR_Y_COUNT		0xFFC00EF8	/* DMA Channel 11 Current Y Count Register				*/
+
+#define MDMA_D0_NEXT_DESC_PTR	0xFFC00F00	/* MemDMA Stream 0 Destination Next Descriptor Pointer Register		*/
+#define MDMA_D0_START_ADDR		0xFFC00F04	/* MemDMA Stream 0 Destination Start Address Register				*/
+#define MDMA_D0_CONFIG			0xFFC00F08	/* MemDMA Stream 0 Destination Configuration Register				*/
+#define MDMA_D0_X_COUNT			0xFFC00F10	/* MemDMA Stream 0 Destination X Count Register						*/
+#define MDMA_D0_X_MODIFY		0xFFC00F14	/* MemDMA Stream 0 Destination X Modify Register					*/
+#define MDMA_D0_Y_COUNT			0xFFC00F18	/* MemDMA Stream 0 Destination Y Count Register						*/
+#define MDMA_D0_Y_MODIFY		0xFFC00F1C	/* MemDMA Stream 0 Destination Y Modify Register					*/
+#define MDMA_D0_CURR_DESC_PTR	0xFFC00F20	/* MemDMA Stream 0 Destination Current Descriptor Pointer Register	*/
+#define MDMA_D0_CURR_ADDR		0xFFC00F24	/* MemDMA Stream 0 Destination Current Address Register				*/
+#define MDMA_D0_IRQ_STATUS		0xFFC00F28	/* MemDMA Stream 0 Destination Interrupt/Status Register			*/
+#define MDMA_D0_PERIPHERAL_MAP	0xFFC00F2C	/* MemDMA Stream 0 Destination Peripheral Map Register				*/
+#define MDMA_D0_CURR_X_COUNT	0xFFC00F30	/* MemDMA Stream 0 Destination Current X Count Register				*/
+#define MDMA_D0_CURR_Y_COUNT	0xFFC00F38	/* MemDMA Stream 0 Destination Current Y Count Register				*/
+
+#define MDMA_S0_NEXT_DESC_PTR	0xFFC00F40	/* MemDMA Stream 0 Source Next Descriptor Pointer Register			*/
+#define MDMA_S0_START_ADDR		0xFFC00F44	/* MemDMA Stream 0 Source Start Address Register					*/
+#define MDMA_S0_CONFIG			0xFFC00F48	/* MemDMA Stream 0 Source Configuration Register					*/
+#define MDMA_S0_X_COUNT			0xFFC00F50	/* MemDMA Stream 0 Source X Count Register							*/
+#define MDMA_S0_X_MODIFY		0xFFC00F54	/* MemDMA Stream 0 Source X Modify Register							*/
+#define MDMA_S0_Y_COUNT			0xFFC00F58	/* MemDMA Stream 0 Source Y Count Register							*/
+#define MDMA_S0_Y_MODIFY		0xFFC00F5C	/* MemDMA Stream 0 Source Y Modify Register							*/
+#define MDMA_S0_CURR_DESC_PTR	0xFFC00F60	/* MemDMA Stream 0 Source Current Descriptor Pointer Register		*/
+#define MDMA_S0_CURR_ADDR		0xFFC00F64	/* MemDMA Stream 0 Source Current Address Register					*/
+#define MDMA_S0_IRQ_STATUS		0xFFC00F68	/* MemDMA Stream 0 Source Interrupt/Status Register					*/
+#define MDMA_S0_PERIPHERAL_MAP	0xFFC00F6C	/* MemDMA Stream 0 Source Peripheral Map Register					*/
+#define MDMA_S0_CURR_X_COUNT	0xFFC00F70	/* MemDMA Stream 0 Source Current X Count Register					*/
+#define MDMA_S0_CURR_Y_COUNT	0xFFC00F78	/* MemDMA Stream 0 Source Current Y Count Register					*/
+
+#define MDMA_D1_NEXT_DESC_PTR	0xFFC00F80	/* MemDMA Stream 1 Destination Next Descriptor Pointer Register		*/
+#define MDMA_D1_START_ADDR		0xFFC00F84	/* MemDMA Stream 1 Destination Start Address Register				*/
+#define MDMA_D1_CONFIG			0xFFC00F88	/* MemDMA Stream 1 Destination Configuration Register				*/
+#define MDMA_D1_X_COUNT			0xFFC00F90	/* MemDMA Stream 1 Destination X Count Register						*/
+#define MDMA_D1_X_MODIFY		0xFFC00F94	/* MemDMA Stream 1 Destination X Modify Register					*/
+#define MDMA_D1_Y_COUNT			0xFFC00F98	/* MemDMA Stream 1 Destination Y Count Register						*/
+#define MDMA_D1_Y_MODIFY		0xFFC00F9C	/* MemDMA Stream 1 Destination Y Modify Register					*/
+#define MDMA_D1_CURR_DESC_PTR	0xFFC00FA0	/* MemDMA Stream 1 Destination Current Descriptor Pointer Register	*/
+#define MDMA_D1_CURR_ADDR		0xFFC00FA4	/* MemDMA Stream 1 Destination Current Address Register				*/
+#define MDMA_D1_IRQ_STATUS		0xFFC00FA8	/* MemDMA Stream 1 Destination Interrupt/Status Register			*/
+#define MDMA_D1_PERIPHERAL_MAP	0xFFC00FAC	/* MemDMA Stream 1 Destination Peripheral Map Register				*/
+#define MDMA_D1_CURR_X_COUNT	0xFFC00FB0	/* MemDMA Stream 1 Destination Current X Count Register				*/
+#define MDMA_D1_CURR_Y_COUNT	0xFFC00FB8	/* MemDMA Stream 1 Destination Current Y Count Register				*/
+
+#define MDMA_S1_NEXT_DESC_PTR	0xFFC00FC0	/* MemDMA Stream 1 Source Next Descriptor Pointer Register			*/
+#define MDMA_S1_START_ADDR		0xFFC00FC4	/* MemDMA Stream 1 Source Start Address Register					*/
+#define MDMA_S1_CONFIG			0xFFC00FC8	/* MemDMA Stream 1 Source Configuration Register					*/
+#define MDMA_S1_X_COUNT			0xFFC00FD0	/* MemDMA Stream 1 Source X Count Register							*/
+#define MDMA_S1_X_MODIFY		0xFFC00FD4	/* MemDMA Stream 1 Source X Modify Register							*/
+#define MDMA_S1_Y_COUNT			0xFFC00FD8	/* MemDMA Stream 1 Source Y Count Register							*/
+#define MDMA_S1_Y_MODIFY		0xFFC00FDC	/* MemDMA Stream 1 Source Y Modify Register							*/
+#define MDMA_S1_CURR_DESC_PTR	0xFFC00FE0	/* MemDMA Stream 1 Source Current Descriptor Pointer Register		*/
+#define MDMA_S1_CURR_ADDR		0xFFC00FE4	/* MemDMA Stream 1 Source Current Address Register					*/
+#define MDMA_S1_IRQ_STATUS		0xFFC00FE8	/* MemDMA Stream 1 Source Interrupt/Status Register					*/
+#define MDMA_S1_PERIPHERAL_MAP	0xFFC00FEC	/* MemDMA Stream 1 Source Peripheral Map Register					*/
+#define MDMA_S1_CURR_X_COUNT	0xFFC00FF0	/* MemDMA Stream 1 Source Current X Count Register					*/
+#define MDMA_S1_CURR_Y_COUNT	0xFFC00FF8	/* MemDMA Stream 1 Source Current Y Count Register					*/
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)				*/
+#define PPI_CONTROL			0xFFC01000	/* PPI Control Register			*/
+#define PPI_STATUS			0xFFC01004	/* PPI Status Register			*/
+#define PPI_COUNT			0xFFC01008	/* PPI Transfer Count Register	*/
+#define PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register		*/
+#define PPI_FRAME			0xFFC01010	/* PPI Frame Length Register	*/
+
+
+/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
+#define TWI0_REGBASE			0xFFC01400
+#define TWI_CLKDIV			0xFFC01400	/* Serial Clock Divider Register			*/
+#define TWI_CONTROL			0xFFC01404	/* TWI Control Register						*/
+#define TWI_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register				*/
+#define TWI_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register				*/
+#define TWI_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register				*/
+#define TWI_MASTER_CTL		0xFFC01414	/* Master Mode Control Register				*/
+#define TWI_MASTER_STAT		0xFFC01418	/* Master Mode Status Register				*/
+#define TWI_MASTER_ADDR		0xFFC0141C	/* Master Mode Address Register				*/
+#define TWI_INT_STAT		0xFFC01420	/* TWI Interrupt Status Register			*/
+#define TWI_INT_MASK		0xFFC01424	/* TWI Master Interrupt Mask Register		*/
+#define TWI_FIFO_CTL		0xFFC01428	/* FIFO Control Register					*/
+#define TWI_FIFO_STAT		0xFFC0142C	/* FIFO Status Register						*/
+#define TWI_XMT_DATA8		0xFFC01480	/* FIFO Transmit Data Single Byte Register	*/
+#define TWI_XMT_DATA16		0xFFC01484	/* FIFO Transmit Data Double Byte Register	*/
+#define TWI_RCV_DATA8		0xFFC01488	/* FIFO Receive Data Single Byte Register	*/
+#define TWI_RCV_DATA16		0xFFC0148C	/* FIFO Receive Data Double Byte Register	*/
+
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)												*/
+#define PORTGIO					0xFFC01500	/* Port G I/O Pin State Specify Register				*/
+#define PORTGIO_CLEAR			0xFFC01504	/* Port G I/O Peripheral Interrupt Clear Register		*/
+#define PORTGIO_SET				0xFFC01508	/* Port G I/O Peripheral Interrupt Set Register			*/
+#define PORTGIO_TOGGLE			0xFFC0150C	/* Port G I/O Pin State Toggle Register					*/
+#define PORTGIO_MASKA			0xFFC01510	/* Port G I/O Mask State Specify Interrupt A Register	*/
+#define PORTGIO_MASKA_CLEAR		0xFFC01514	/* Port G I/O Mask Disable Interrupt A Register			*/
+#define PORTGIO_MASKA_SET		0xFFC01518	/* Port G I/O Mask Enable Interrupt A Register			*/
+#define PORTGIO_MASKA_TOGGLE	0xFFC0151C	/* Port G I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTGIO_MASKB			0xFFC01520	/* Port G I/O Mask State Specify Interrupt B Register	*/
+#define PORTGIO_MASKB_CLEAR		0xFFC01524	/* Port G I/O Mask Disable Interrupt B Register			*/
+#define PORTGIO_MASKB_SET		0xFFC01528	/* Port G I/O Mask Enable Interrupt B Register			*/
+#define PORTGIO_MASKB_TOGGLE	0xFFC0152C	/* Port G I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTGIO_DIR				0xFFC01530	/* Port G I/O Direction Register						*/
+#define PORTGIO_POLAR			0xFFC01534	/* Port G I/O Source Polarity Register					*/
+#define PORTGIO_EDGE			0xFFC01538	/* Port G I/O Source Sensitivity Register				*/
+#define PORTGIO_BOTH			0xFFC0153C	/* Port G I/O Set on BOTH Edges Register				*/
+#define PORTGIO_INEN			0xFFC01540	/* Port G I/O Input Enable Register						*/
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)												*/
+#define PORTHIO					0xFFC01700	/* Port H I/O Pin State Specify Register				*/
+#define PORTHIO_CLEAR			0xFFC01704	/* Port H I/O Peripheral Interrupt Clear Register		*/
+#define PORTHIO_SET				0xFFC01708	/* Port H I/O Peripheral Interrupt Set Register			*/
+#define PORTHIO_TOGGLE			0xFFC0170C	/* Port H I/O Pin State Toggle Register					*/
+#define PORTHIO_MASKA			0xFFC01710	/* Port H I/O Mask State Specify Interrupt A Register	*/
+#define PORTHIO_MASKA_CLEAR		0xFFC01714	/* Port H I/O Mask Disable Interrupt A Register			*/
+#define PORTHIO_MASKA_SET		0xFFC01718	/* Port H I/O Mask Enable Interrupt A Register			*/
+#define PORTHIO_MASKA_TOGGLE	0xFFC0171C	/* Port H I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTHIO_MASKB			0xFFC01720	/* Port H I/O Mask State Specify Interrupt B Register	*/
+#define PORTHIO_MASKB_CLEAR		0xFFC01724	/* Port H I/O Mask Disable Interrupt B Register			*/
+#define PORTHIO_MASKB_SET		0xFFC01728	/* Port H I/O Mask Enable Interrupt B Register			*/
+#define PORTHIO_MASKB_TOGGLE	0xFFC0172C	/* Port H I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTHIO_DIR				0xFFC01730	/* Port H I/O Direction Register						*/
+#define PORTHIO_POLAR			0xFFC01734	/* Port H I/O Source Polarity Register					*/
+#define PORTHIO_EDGE			0xFFC01738	/* Port H I/O Source Sensitivity Register				*/
+#define PORTHIO_BOTH			0xFFC0173C	/* Port H I/O Set on BOTH Edges Register				*/
+#define PORTHIO_INEN			0xFFC01740	/* Port H I/O Input Enable Register						*/
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
+#define UART1_THR			0xFFC02000	/* Transmit Holding register			*/
+#define UART1_RBR			0xFFC02000	/* Receive Buffer register				*/
+#define UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte)				*/
+#define UART1_IER			0xFFC02004	/* Interrupt Enable Register			*/
+#define UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte)			*/
+#define UART1_IIR			0xFFC02008	/* Interrupt Identification Register	*/
+#define UART1_LCR			0xFFC0200C	/* Line Control Register				*/
+#define UART1_MCR			0xFFC02010	/* Modem Control Register				*/
+#define UART1_LSR			0xFFC02014	/* Line Status Register					*/
+#define UART1_MSR			0xFFC02018	/* Modem Status Register				*/
+#define UART1_SCR			0xFFC0201C	/* SCR Scratch Register					*/
+#define UART1_GCTL			0xFFC02024	/* Global Control Register				*/
+
+
+/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)											*/
+#define PORTF_FER			0xFFC03200	/* Port F Function Enable Register (Alternate/Flag*)	*/
+#define PORTG_FER			0xFFC03204	/* Port G Function Enable Register (Alternate/Flag*)	*/
+#define PORTH_FER			0xFFC03208	/* Port H Function Enable Register (Alternate/Flag*)	*/
+#define BFIN_PORT_MUX			0xFFC0320C	/* Port Multiplexer Control Register					*/
+
+
+/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)										*/
+#define HMDMA0_CONTROL		0xFFC03300	/* Handshake MDMA0 Control Register					*/
+#define HMDMA0_ECINIT		0xFFC03304	/* HMDMA0 Initial Edge Count Register				*/
+#define HMDMA0_BCINIT		0xFFC03308	/* HMDMA0 Initial Block Count Register				*/
+#define HMDMA0_ECURGENT		0xFFC0330C	/* HMDMA0 Urgent Edge Count Threshhold Register		*/
+#define HMDMA0_ECOVERFLOW	0xFFC03310	/* HMDMA0 Edge Count Overflow Interrupt Register	*/
+#define HMDMA0_ECOUNT		0xFFC03314	/* HMDMA0 Current Edge Count Register				*/
+#define HMDMA0_BCOUNT		0xFFC03318	/* HMDMA0 Current Block Count Register				*/
+
+#define HMDMA1_CONTROL		0xFFC03340	/* Handshake MDMA1 Control Register					*/
+#define HMDMA1_ECINIT		0xFFC03344	/* HMDMA1 Initial Edge Count Register				*/
+#define HMDMA1_BCINIT		0xFFC03348	/* HMDMA1 Initial Block Count Register				*/
+#define HMDMA1_ECURGENT		0xFFC0334C	/* HMDMA1 Urgent Edge Count Threshhold Register		*/
+#define HMDMA1_ECOVERFLOW	0xFFC03350	/* HMDMA1 Edge Count Overflow Interrupt Register	*/
+#define HMDMA1_ECOUNT		0xFFC03354	/* HMDMA1 Current Edge Count Register				*/
+#define HMDMA1_BCOUNT		0xFFC03358	/* HMDMA1 Current Block Count Register				*/
+
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+#define PORTF_MUX               0xFFC03210      /* Port F mux control */
+#define PORTG_MUX               0xFFC03214      /* Port G mux control */
+#define PORTH_MUX               0xFFC03218      /* Port H mux control */
+#define PORTF_DRIVE             0xFFC03220      /* Port F drive strength control */
+#define PORTG_DRIVE             0xFFC03224      /* Port G drive strength control */
+#define PORTH_DRIVE             0xFFC03228      /* Port H drive strength control */
+#define PORTF_SLEW              0xFFC03230      /* Port F slew control */
+#define PORTG_SLEW              0xFFC03234      /* Port G slew control */
+#define PORTH_SLEW              0xFFC03238      /* Port H slew control */
+#define PORTF_HYSTERISIS        0xFFC03240      /* Port F Schmitt trigger control */
+#define PORTG_HYSTERISIS        0xFFC03244      /* Port G Schmitt trigger control */
+#define PORTH_HYSTERISIS        0xFFC03248      /* Port H Schmitt trigger control */
+#define MISCPORT_DRIVE          0xFFC03280      /* Misc Port drive strength control */
+#define MISCPORT_SLEW           0xFFC03284      /* Misc Port slew control */
+#define MISCPORT_HYSTERISIS     0xFFC03288      /* Misc Port Schmitt trigger control */
+
+
+/***********************************************************************************
+** System MMR Register Bits And Macros
+**
+** Disclaimer:	All macros are intended to make C and Assembly code more readable.
+**				Use these macros carefully, as any that do left shifts for field
+**				depositing will result in the lower order bits being destroyed.  Any
+**				macro that shifts left to properly position the bit-field should be
+**				used as part of an OR to initialize a register and NOT as a dynamic
+**				modifier UNLESS the lower order bits are saved and ORed back in when
+**				the macro is used.
+*************************************************************************************/
+/*
+** ********************* PLL AND RESET MASKS ****************************************/
+/* PLL_CTL Masks																	*/
+#define DF				0x0001	/* 0: PLL = CLKIN, 1: PLL = CLKIN/2					*/
+#define PLL_OFF			0x0002	/* PLL Not Powered									*/
+#define STOPCK			0x0008	/* Core Clock Off									*/
+#define PDWN			0x0020	/* Enter Deep Sleep Mode							*/
+#define	IN_DELAY		0x0040	/* Add 200ps Delay To EBIU Input Latches			*/
+#define	OUT_DELAY		0x0080	/* Add 200ps Delay To EBIU Output Signals			*/
+#define BYPASS			0x0100	/* Bypass the PLL									*/
+#define	MSEL			0x7E00	/* Multiplier Select For CCLK/VCO Factors			*/
+/* PLL_CTL Macros (Only Use With Logic OR While Setting Lower Order Bits)			*/
+#define	SET_MSEL(x)		(((x)&0x3F) << 0x9)	/* Set MSEL = 0-63 --> VCO = CLKIN*MSEL		*/
+
+/* PLL_DIV Masks														*/
+#define SSEL			0x000F	/* System Select						*/
+#define	CSEL			0x0030	/* Core Select							*/
+#define CSEL_DIV1		0x0000	/* 		CCLK = VCO / 1					*/
+#define CSEL_DIV2		0x0010	/* 		CCLK = VCO / 2					*/
+#define	CSEL_DIV4		0x0020	/* 		CCLK = VCO / 4					*/
+#define	CSEL_DIV8		0x0030	/* 		CCLK = VCO / 8					*/
+/* PLL_DIV Macros														*/
+#define SET_SSEL(x)		((x)&0xF)		/* Set SSEL = 0-15 --> SCLK = VCO/SSEL	*/
+
+/* VR_CTL Masks	*/
+#define	FREQ			0x3000	/* Switching Oscillator Frequency For Regulator	*/
+#define	HIBERNATE		0x0000	/* 		Powerdown/Bypass On-Board Regulation	*/
+
+#define	VLEV			0x00F0	/* Internal Voltage Level					*/
+#define	VLEV_085 		0x0060	/* 		VLEV = 0.85 V (-5% - +10% Accuracy)	*/
+#define	VLEV_090		0x0070	/* 		VLEV = 0.90 V (-5% - +10% Accuracy)	*/
+#define	VLEV_095		0x0080	/* 		VLEV = 0.95 V (-5% - +10% Accuracy)	*/
+#define	VLEV_100		0x0090	/* 		VLEV = 1.00 V (-5% - +10% Accuracy)	*/
+#define	VLEV_105		0x00A0	/* 		VLEV = 1.05 V (-5% - +10% Accuracy)	*/
+#define	VLEV_110		0x00B0	/* 		VLEV = 1.10 V (-5% - +10% Accuracy)	*/
+#define	VLEV_115		0x00C0	/* 		VLEV = 1.15 V (-5% - +10% Accuracy)	*/
+#define	VLEV_120		0x00D0	/* 		VLEV = 1.20 V (-5% - +10% Accuracy)	*/
+#define	VLEV_125		0x00E0	/* 		VLEV = 1.25 V (-5% - +10% Accuracy)	*/
+#define	VLEV_130		0x00F0	/* 		VLEV = 1.30 V (-5% - +10% Accuracy)	*/
+
+#define	WAKE			0x0100	/* Enable RTC/Reset Wakeup From Hibernate	*/
+#define	USBWE			0x0200	/* Enable USB Wakeup From Hibernate			*/
+#define	PHYWE			0x0400	/* Enable PHY Wakeup From Hibernate			*/
+#define	CLKBUFOE		0x4000	/* CLKIN Buffer Output Enable */
+#define	PHYCLKOE		CLKBUFOE	/* Alternative legacy name for the above */
+#define	SCKELOW		0x8000	/* Enable Drive CKE Low During Reset		*/
+
+/* PLL_STAT Masks																	*/
+#define ACTIVE_PLLENABLED	0x0001	/* Processor In Active Mode With PLL Enabled	*/
+#define	FULL_ON				0x0002	/* Processor In Full On Mode					*/
+#define ACTIVE_PLLDISABLED	0x0004	/* Processor In Active Mode With PLL Disabled	*/
+#define	PLL_LOCKED			0x0020	/* PLL_LOCKCNT Has Been Reached					*/
+
+/* CHIPID Masks */
+#define CHIPID_VERSION         0xF0000000
+#define CHIPID_FAMILY          0x0FFFF000
+#define CHIPID_MANUFACTURE     0x00000FFE
+
+/* SWRST Masks																		*/
+#define SYSTEM_RESET		0x0007	/* Initiates A System Software Reset			*/
+#define	DOUBLE_FAULT		0x0008	/* Core Double Fault Causes Reset				*/
+#define RESET_DOUBLE		0x2000	/* SW Reset Generated By Core Double-Fault		*/
+#define RESET_WDOG			0x4000	/* SW Reset Generated By Watchdog Timer			*/
+#define RESET_SOFTWARE		0x8000	/* SW Reset Occurred Since Last Read Of SWRST	*/
+
+/* SYSCR Masks																				*/
+#define BMODE				0x0007	/* Boot Mode - Latched During HW Reset From Mode Pins	*/
+#define	NOBOOT				0x0010	/* Execute From L1 or ASYNC Bank 0 When BMODE = 0		*/
+
+
+/* *************  SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
+/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK										*/
+
+#if 0
+#define IRQ_PLL_WAKEUP	0x00000001	/* PLL Wakeup Interrupt			 					*/
+
+#define IRQ_ERROR1      0x00000002  /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
+#define IRQ_ERROR2      0x00000004  /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
+#define IRQ_RTC			0x00000008	/* Real Time Clock Interrupt 						*/
+#define IRQ_DMA0		0x00000010	/* DMA Channel 0 (PPI) Interrupt 					*/
+#define IRQ_DMA3		0x00000020	/* DMA Channel 3 (SPORT0 RX) Interrupt 				*/
+#define IRQ_DMA4		0x00000040	/* DMA Channel 4 (SPORT0 TX) Interrupt 				*/
+#define IRQ_DMA5		0x00000080	/* DMA Channel 5 (SPORT1 RX) Interrupt 				*/
+
+#define IRQ_DMA6		0x00000100	/* DMA Channel 6 (SPORT1 TX) Interrupt 		 		*/
+#define IRQ_TWI			0x00000200	/* TWI Interrupt									*/
+#define IRQ_DMA7		0x00000400	/* DMA Channel 7 (SPI) Interrupt 					*/
+#define IRQ_DMA8		0x00000800	/* DMA Channel 8 (UART0 RX) Interrupt 				*/
+#define IRQ_DMA9		0x00001000	/* DMA Channel 9 (UART0 TX) Interrupt 				*/
+#define IRQ_DMA10		0x00002000	/* DMA Channel 10 (UART1 RX) Interrupt 				*/
+#define IRQ_DMA11		0x00004000	/* DMA Channel 11 (UART1 TX) Interrupt 				*/
+#define IRQ_CAN_RX		0x00008000	/* CAN Receive Interrupt 							*/
+
+#define IRQ_CAN_TX		0x00010000	/* CAN Transmit Interrupt  							*/
+#define IRQ_DMA1		0x00020000	/* DMA Channel 1 (Ethernet RX) Interrupt 			*/
+#define IRQ_PFA_PORTH	0x00020000	/* PF Port H (PF47:32) Interrupt A 					*/
+#define IRQ_DMA2		0x00040000	/* DMA Channel 2 (Ethernet TX) Interrupt 			*/
+#define IRQ_PFB_PORTH	0x00040000	/* PF Port H (PF47:32) Interrupt B 					*/
+#define IRQ_TIMER0		0x00080000	/* Timer 0 Interrupt								*/
+#define IRQ_TIMER1		0x00100000	/* Timer 1 Interrupt 								*/
+#define IRQ_TIMER2		0x00200000	/* Timer 2 Interrupt 								*/
+#define IRQ_TIMER3		0x00400000	/* Timer 3 Interrupt 								*/
+#define IRQ_TIMER4		0x00800000	/* Timer 4 Interrupt 								*/
+
+#define IRQ_TIMER5		0x01000000	/* Timer 5 Interrupt 								*/
+#define IRQ_TIMER6		0x02000000	/* Timer 6 Interrupt 								*/
+#define IRQ_TIMER7		0x04000000	/* Timer 7 Interrupt 								*/
+#define IRQ_PFA_PORTFG	0x08000000	/* PF Ports F&G (PF31:0) Interrupt A 				*/
+#define IRQ_PFB_PORTF	0x80000000	/* PF Port F (PF15:0) Interrupt B 					*/
+#define IRQ_DMA12		0x20000000	/* DMA Channels 12 (MDMA1 Source) RX Interrupt 		*/
+#define IRQ_DMA13		0x20000000	/* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
+#define IRQ_DMA14		0x40000000	/* DMA Channels 14 (MDMA0 Source) RX Interrupt 		*/
+#define IRQ_DMA15		0x40000000	/* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
+#define IRQ_WDOG		0x80000000	/* Software Watchdog Timer Interrupt 				*/
+#define IRQ_PFB_PORTG	0x10000000	/* PF Port G (PF31:16) Interrupt B 					*/
+#endif
+
+/* SIC_IAR0 Macros															*/
+#define P0_IVG(x)		(((x)&0xF)-7)			/* Peripheral #0 assigned IVG #x 	*/
+#define P1_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #1 assigned IVG #x 	*/
+#define P2_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #2 assigned IVG #x 	*/
+#define P3_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #3 assigned IVG #x	*/
+#define P4_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #4 assigned IVG #x	*/
+#define P5_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #5 assigned IVG #x	*/
+#define P6_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #6 assigned IVG #x	*/
+#define P7_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #7 assigned IVG #x	*/
+
+/* SIC_IAR1 Macros															*/
+#define P8_IVG(x)		(((x)&0xF)-7)			/* Peripheral #8 assigned IVG #x 	*/
+#define P9_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #9 assigned IVG #x 	*/
+#define P10_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #10 assigned IVG #x	*/
+#define P11_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #11 assigned IVG #x 	*/
+#define P12_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #12 assigned IVG #x	*/
+#define P13_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #13 assigned IVG #x	*/
+#define P14_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #14 assigned IVG #x	*/
+#define P15_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #15 assigned IVG #x	*/
+
+/* SIC_IAR2 Macros															*/
+#define P16_IVG(x)		(((x)&0xF)-7)			/* Peripheral #16 assigned IVG #x	*/
+#define P17_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #17 assigned IVG #x	*/
+#define P18_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #18 assigned IVG #x	*/
+#define P19_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #19 assigned IVG #x	*/
+#define P20_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #20 assigned IVG #x	*/
+#define P21_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #21 assigned IVG #x	*/
+#define P22_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #22 assigned IVG #x	*/
+#define P23_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #23 assigned IVG #x	*/
+
+/* SIC_IAR3 Macros															*/
+#define P24_IVG(x)		(((x)&0xF)-7)			/* Peripheral #24 assigned IVG #x	*/
+#define P25_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #25 assigned IVG #x	*/
+#define P26_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #26 assigned IVG #x	*/
+#define P27_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #27 assigned IVG #x	*/
+#define P28_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #28 assigned IVG #x	*/
+#define P29_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #29 assigned IVG #x	*/
+#define P30_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #30 assigned IVG #x	*/
+#define P31_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #31 assigned IVG #x	*/
+
+
+/* SIC_IMASK Masks																		*/
+#define SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts	*/
+#define SIC_MASK_ALL	0xFFFFFFFF					/* Mask all peripheral interrupts	*/
+#define SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask Peripheral #x interrupt		*/
+#define SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x interrupt	*/
+
+/* SIC_IWR Masks																		*/
+#define IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals	*/
+#define IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals	*/
+#define IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x		*/
+#define IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F))) 	/* Wakeup Disable Peripheral #x		*/
+
+
+/* ********* WATCHDOG TIMER MASKS ******************** */
+
+/* Watchdog Timer WDOG_CTL Register Masks */
+
+#define WDEV(x) (((x)<<1) & 0x0006) /* event generated on roll over */
+#define WDEV_RESET 0x0000 /* generate reset event on roll over */
+#define WDEV_NMI 0x0002 /* generate NMI event on roll over */
+#define WDEV_GPI 0x0004 /* generate GP IRQ on roll over */
+#define WDEV_NONE 0x0006 /* no event on roll over */
+#define WDEN 0x0FF0 /* enable watchdog */
+#define WDDIS 0x0AD0 /* disable watchdog */
+#define WDRO 0x8000 /* watchdog rolled over latch */
+
+/* depreciated WDOG_CTL Register Masks for legacy code */
+
+
+#define ICTL WDEV
+#define ENABLE_RESET WDEV_RESET
+#define WDOG_RESET WDEV_RESET
+#define ENABLE_NMI WDEV_NMI
+#define WDOG_NMI WDEV_NMI
+#define ENABLE_GPI WDEV_GPI
+#define WDOG_GPI WDEV_GPI
+#define DISABLE_EVT WDEV_NONE
+#define WDOG_NONE WDEV_NONE
+
+#define TMR_EN WDEN
+#define TMR_DIS WDDIS
+#define TRO WDRO
+#define ICTL_P0 0x01
+ #define ICTL_P1 0x02
+#define TRO_P 0x0F
+
+
+
+/* ***************  REAL TIME CLOCK MASKS  **************************/
+/* RTC_STAT and RTC_ALARM Masks										*/
+#define	RTC_SEC				0x0000003F	/* Real-Time Clock Seconds	*/
+#define	RTC_MIN				0x00000FC0	/* Real-Time Clock Minutes	*/
+#define	RTC_HR				0x0001F000	/* Real-Time Clock Hours	*/
+#define	RTC_DAY				0xFFFE0000	/* Real-Time Clock Days		*/
+
+/* RTC_ALARM Macro			z=day		y=hr	x=min	w=sec		*/
+#define SET_ALARM(z,y,x,w)	((((z)&0x7FFF)<<0x11)|(((y)&0x1F)<<0xC)|(((x)&0x3F)<<0x6)|((w)&0x3F))
+
+/* RTC_ICTL and RTC_ISTAT Masks																		*/
+#define	STOPWATCH			0x0001		/* Stopwatch Interrupt Enable								*/
+#define	ALARM				0x0002		/* Alarm Interrupt Enable									*/
+#define	SECOND				0x0004		/* Seconds (1 Hz) Interrupt Enable							*/
+#define	MINUTE				0x0008		/* Minutes Interrupt Enable									*/
+#define	HOUR				0x0010		/* Hours Interrupt Enable									*/
+#define	DAY					0x0020		/* 24 Hours (Days) Interrupt Enable							*/
+#define	DAY_ALARM			0x0040		/* Day Alarm (Day, Hour, Minute, Second) Interrupt Enable	*/
+#define	WRITE_PENDING		0x4000		/* Write Pending Status										*/
+#define	WRITE_COMPLETE		0x8000		/* Write Complete Interrupt Enable							*/
+
+/* RTC_FAST / RTC_PREN Mask												*/
+#define PREN				0x0001	/* Enable Prescaler, RTC Runs @1 Hz	*/
+
+
+/* ************** UART CONTROLLER MASKS *************************/
+/* UARTx_LCR Masks												*/
+#define WLS(x)		(((x)-5) & 0x03)	/* Word Length Select */
+#define STB			0x04				/* Stop Bits			*/
+#define PEN			0x08				/* Parity Enable		*/
+#define EPS			0x10				/* Even Parity Select	*/
+#define STP			0x20				/* Stick Parity			*/
+#define SB			0x40				/* Set Break			*/
+#define DLAB		0x80				/* Divisor Latch Access	*/
+
+/* UARTx_MCR Mask										*/
+#define LOOP_ENA	0x10	/* Loopback Mode Enable */
+#define LOOP_ENA_P	0x04
+
+/* UARTx_LSR Masks										*/
+#define DR			0x01	/* Data Ready				*/
+#define OE			0x02	/* Overrun Error			*/
+#define PE			0x04	/* Parity Error				*/
+#define FE			0x08	/* Framing Error			*/
+#define BI			0x10	/* Break Interrupt			*/
+#define THRE		0x20	/* THR Empty				*/
+#define TEMT		0x40	/* TSR and UART_THR Empty	*/
+
+/* UARTx_IER Masks															*/
+#define ERBFI		0x01		/* Enable Receive Buffer Full Interrupt		*/
+#define ETBEI		0x02		/* Enable Transmit Buffer Empty Interrupt	*/
+#define ELSI		0x04		/* Enable RX Status Interrupt				*/
+
+/* UARTx_IIR Masks														*/
+#define NINT		0x01		/* Pending Interrupt					*/
+#define IIR_TX_READY    0x02		/* UART_THR empty                               */
+#define IIR_RX_READY    0x04		/* Receive data ready                           */
+#define IIR_LINE_CHANGE 0x06		/* Receive line status    			*/
+#define IIR_STATUS	0x06		/* Highest Priority Pending Interrupt	*/
+
+/* UARTx_GCTL Masks													*/
+#define UCEN		0x01		/* Enable UARTx Clocks				*/
+#define IREN		0x02		/* Enable IrDA Mode					*/
+#define TPOLC		0x04		/* IrDA TX Polarity Change			*/
+#define RPOLC		0x08		/* IrDA RX Polarity Change			*/
+#define FPE			0x10		/* Force Parity Error On Transmit	*/
+#define FFE			0x20		/* Force Framing Error On Transmit	*/
+
+
+/* ***********  SERIAL PERIPHERAL INTERFACE (SPI) MASKS  ****************************/
+/* SPI_CTL Masks																	*/
+#define	TIMOD		0x0003		/* Transfer Initiate Mode							*/
+#define RDBR_CORE	0x0000		/* 		RDBR Read Initiates, IRQ When RDBR Full		*/
+#define	TDBR_CORE	0x0001		/* 		TDBR Write Initiates, IRQ When TDBR Empty	*/
+#define RDBR_DMA	0x0002		/* 		DMA Read, DMA Until FIFO Empty				*/
+#define TDBR_DMA	0x0003		/* 		DMA Write, DMA Until FIFO Full				*/
+#define SZ			0x0004		/* Send Zero (When TDBR Empty, Send Zero/Last*)		*/
+#define GM			0x0008		/* Get More (When RDBR Full, Overwrite/Discard*)	*/
+#define PSSE		0x0010		/* Slave-Select Input Enable						*/
+#define EMISO		0x0020		/* Enable MISO As Output							*/
+#define SIZE		0x0100		/* Size of Words (16/8* Bits)						*/
+#define LSBF		0x0200		/* LSB First										*/
+#define CPHA		0x0400		/* Clock Phase										*/
+#define CPOL		0x0800		/* Clock Polarity									*/
+#define MSTR		0x1000		/* Master/Slave*									*/
+#define WOM			0x2000		/* Write Open Drain Master							*/
+#define SPE			0x4000		/* SPI Enable										*/
+
+/* SPI_FLG Masks																	*/
+#define FLS1		0x0002		/* Enables SPI_FLOUT1 as SPI Slave-Select Output	*/
+#define FLS2		0x0004		/* Enables SPI_FLOUT2 as SPI Slave-Select Output	*/
+#define FLS3		0x0008		/* Enables SPI_FLOUT3 as SPI Slave-Select Output	*/
+#define FLS4		0x0010		/* Enables SPI_FLOUT4 as SPI Slave-Select Output	*/
+#define FLS5		0x0020		/* Enables SPI_FLOUT5 as SPI Slave-Select Output	*/
+#define FLS6		0x0040		/* Enables SPI_FLOUT6 as SPI Slave-Select Output	*/
+#define FLS7		0x0080		/* Enables SPI_FLOUT7 as SPI Slave-Select Output	*/
+#define FLG1		0xFDFF		/* Activates SPI_FLOUT1 							*/
+#define FLG2		0xFBFF		/* Activates SPI_FLOUT2								*/
+#define FLG3		0xF7FF		/* Activates SPI_FLOUT3								*/
+#define FLG4		0xEFFF		/* Activates SPI_FLOUT4								*/
+#define FLG5		0xDFFF		/* Activates SPI_FLOUT5								*/
+#define FLG6		0xBFFF		/* Activates SPI_FLOUT6								*/
+#define FLG7		0x7FFF		/* Activates SPI_FLOUT7								*/
+
+/* SPI_STAT Masks																				*/
+#define SPIF		0x0001		/* SPI Finished (Single-Word Transfer Complete)					*/
+#define MODF		0x0002		/* Mode Fault Error (Another Device Tried To Become Master)		*/
+#define TXE			0x0004		/* Transmission Error (Data Sent With No New Data In TDBR)		*/
+#define TXS			0x0008		/* SPI_TDBR Data Buffer Status (Full/Empty*)					*/
+#define RBSY		0x0010		/* Receive Error (Data Received With RDBR Full)					*/
+#define RXS			0x0020		/* SPI_RDBR Data Buffer Status (Full/Empty*)					*/
+#define TXCOL		0x0040		/* Transmit Collision Error (Corrupt Data May Have Been Sent)	*/
+
+
+/*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
+/* TIMER_ENABLE Masks													*/
+#define TIMEN0			0x0001		/* Enable Timer 0					*/
+#define TIMEN1			0x0002		/* Enable Timer 1					*/
+#define TIMEN2			0x0004		/* Enable Timer 2					*/
+#define TIMEN3			0x0008		/* Enable Timer 3					*/
+#define TIMEN4			0x0010		/* Enable Timer 4					*/
+#define TIMEN5			0x0020		/* Enable Timer 5					*/
+#define TIMEN6			0x0040		/* Enable Timer 6					*/
+#define TIMEN7			0x0080		/* Enable Timer 7					*/
+
+/* TIMER_DISABLE Masks													*/
+#define TIMDIS0			TIMEN0		/* Disable Timer 0					*/
+#define TIMDIS1			TIMEN1		/* Disable Timer 1					*/
+#define TIMDIS2			TIMEN2		/* Disable Timer 2					*/
+#define TIMDIS3			TIMEN3		/* Disable Timer 3					*/
+#define TIMDIS4			TIMEN4		/* Disable Timer 4					*/
+#define TIMDIS5			TIMEN5		/* Disable Timer 5					*/
+#define TIMDIS6			TIMEN6		/* Disable Timer 6					*/
+#define TIMDIS7			TIMEN7		/* Disable Timer 7					*/
+
+/* TIMER_STATUS Masks													*/
+#define TIMIL0			0x00000001	/* Timer 0 Interrupt				*/
+#define TIMIL1			0x00000002	/* Timer 1 Interrupt				*/
+#define TIMIL2			0x00000004	/* Timer 2 Interrupt				*/
+#define TIMIL3			0x00000008	/* Timer 3 Interrupt				*/
+#define TOVF_ERR0		0x00000010	/* Timer 0 Counter Overflow			*/
+#define TOVF_ERR1		0x00000020	/* Timer 1 Counter Overflow			*/
+#define TOVF_ERR2		0x00000040	/* Timer 2 Counter Overflow			*/
+#define TOVF_ERR3		0x00000080	/* Timer 3 Counter Overflow			*/
+#define TRUN0			0x00001000	/* Timer 0 Slave Enable Status		*/
+#define TRUN1			0x00002000	/* Timer 1 Slave Enable Status		*/
+#define TRUN2			0x00004000	/* Timer 2 Slave Enable Status		*/
+#define TRUN3			0x00008000	/* Timer 3 Slave Enable Status		*/
+#define TIMIL4			0x00010000	/* Timer 4 Interrupt				*/
+#define TIMIL5			0x00020000	/* Timer 5 Interrupt				*/
+#define TIMIL6			0x00040000	/* Timer 6 Interrupt				*/
+#define TIMIL7			0x00080000	/* Timer 7 Interrupt				*/
+#define TOVF_ERR4		0x00100000	/* Timer 4 Counter Overflow			*/
+#define TOVF_ERR5		0x00200000	/* Timer 5 Counter Overflow			*/
+#define TOVF_ERR6		0x00400000	/* Timer 6 Counter Overflow			*/
+#define TOVF_ERR7		0x00800000	/* Timer 7 Counter Overflow			*/
+#define TRUN4			0x10000000	/* Timer 4 Slave Enable Status		*/
+#define TRUN5			0x20000000	/* Timer 5 Slave Enable Status		*/
+#define TRUN6			0x40000000	/* Timer 6 Slave Enable Status		*/
+#define TRUN7			0x80000000	/* Timer 7 Slave Enable Status		*/
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define TOVL_ERR0 TOVF_ERR0
+#define TOVL_ERR1 TOVF_ERR1
+#define TOVL_ERR2 TOVF_ERR2
+#define TOVL_ERR3 TOVF_ERR3
+#define TOVL_ERR4 TOVF_ERR4
+#define TOVL_ERR5 TOVF_ERR5
+#define TOVL_ERR6 TOVF_ERR6
+#define TOVL_ERR7 TOVF_ERR7
+
+/* TIMERx_CONFIG Masks													*/
+#define PWM_OUT			0x0001	/* Pulse-Width Modulation Output Mode	*/
+#define WDTH_CAP		0x0002	/* Width Capture Input Mode				*/
+#define EXT_CLK			0x0003	/* External Clock Mode					*/
+#define PULSE_HI		0x0004	/* Action Pulse (Positive/Negative*)	*/
+#define PERIOD_CNT		0x0008	/* Period Count							*/
+#define IRQ_ENA			0x0010	/* Interrupt Request Enable				*/
+#define TIN_SEL			0x0020	/* Timer Input Select					*/
+#define OUT_DIS			0x0040	/* Output Pad Disable					*/
+#define CLK_SEL			0x0080	/* Timer Clock Select					*/
+#define TOGGLE_HI		0x0100	/* PWM_OUT PULSE_HI Toggle Mode			*/
+#define EMU_RUN			0x0200	/* Emulation Behavior Select			*/
+#define ERR_TYP			0xC000	/* Error Type							*/
+
+
+/* ******************   GPIO PORTS F, G, H MASKS  ***********************/
+/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks 				*/
+/* Port F Masks 														*/
+#define PF0		0x0001
+#define PF1		0x0002
+#define PF2		0x0004
+#define PF3		0x0008
+#define PF4		0x0010
+#define PF5		0x0020
+#define PF6		0x0040
+#define PF7		0x0080
+#define PF8		0x0100
+#define PF9		0x0200
+#define PF10	0x0400
+#define PF11	0x0800
+#define PF12	0x1000
+#define PF13	0x2000
+#define PF14	0x4000
+#define PF15	0x8000
+
+/* Port G Masks															*/
+#define PG0		0x0001
+#define PG1		0x0002
+#define PG2		0x0004
+#define PG3		0x0008
+#define PG4		0x0010
+#define PG5		0x0020
+#define PG6		0x0040
+#define PG7		0x0080
+#define PG8		0x0100
+#define PG9		0x0200
+#define PG10	0x0400
+#define PG11	0x0800
+#define PG12	0x1000
+#define PG13	0x2000
+#define PG14	0x4000
+#define PG15	0x8000
+
+/* Port H Masks															*/
+#define PH0		0x0001
+#define PH1		0x0002
+#define PH2		0x0004
+#define PH3		0x0008
+#define PH4		0x0010
+#define PH5		0x0020
+#define PH6		0x0040
+#define PH7		0x0080
+
+
+/* *******************  SERIAL PORT MASKS  **************************************/
+/* SPORTx_TCR1 Masks															*/
+#define TSPEN		0x0001		/* Transmit Enable								*/
+#define ITCLK		0x0002		/* Internal Transmit Clock Select				*/
+#define DTYPE_NORM	0x0004		/* Data Format Normal							*/
+#define DTYPE_ULAW	0x0008		/* Compand Using u-Law							*/
+#define DTYPE_ALAW	0x000C		/* Compand Using A-Law							*/
+#define TLSBIT		0x0010		/* Transmit Bit Order							*/
+#define ITFS		0x0200		/* Internal Transmit Frame Sync Select			*/
+#define TFSR		0x0400		/* Transmit Frame Sync Required Select			*/
+#define DITFS		0x0800		/* Data-Independent Transmit Frame Sync Select	*/
+#define LTFS		0x1000		/* Low Transmit Frame Sync Select				*/
+#define LATFS		0x2000		/* Late Transmit Frame Sync Select				*/
+#define TCKFE		0x4000		/* Clock Falling Edge Select					*/
+
+/* SPORTx_TCR2 Masks and Macro													*/
+#define SLEN(x)		((x)&0x1F)	/* SPORT TX Word Length (2 - 31)				*/
+#define TXSE		0x0100		/* TX Secondary Enable							*/
+#define TSFSE		0x0200		/* Transmit Stereo Frame Sync Enable			*/
+#define TRFST		0x0400		/* Left/Right Order (1 = Right Channel 1st)		*/
+
+/* SPORTx_RCR1 Masks															*/
+#define RSPEN		0x0001		/* Receive Enable 								*/
+#define IRCLK		0x0002		/* Internal Receive Clock Select 				*/
+#define DTYPE_NORM	0x0004		/* Data Format Normal							*/
+#define DTYPE_ULAW	0x0008		/* Compand Using u-Law							*/
+#define DTYPE_ALAW	0x000C		/* Compand Using A-Law							*/
+#define RLSBIT		0x0010		/* Receive Bit Order							*/
+#define IRFS		0x0200		/* Internal Receive Frame Sync Select 			*/
+#define RFSR		0x0400		/* Receive Frame Sync Required Select 			*/
+#define LRFS		0x1000		/* Low Receive Frame Sync Select 				*/
+#define LARFS		0x2000		/* Late Receive Frame Sync Select 				*/
+#define RCKFE		0x4000		/* Clock Falling Edge Select 					*/
+
+/* SPORTx_RCR2 Masks															*/
+#define SLEN(x)		((x)&0x1F)	/* SPORT RX Word Length (2 - 31)				*/
+#define RXSE		0x0100		/* RX Secondary Enable							*/
+#define RSFSE		0x0200		/* RX Stereo Frame Sync Enable					*/
+#define RRFST		0x0400		/* Right-First Data Order 						*/
+
+/* SPORTx_STAT Masks															*/
+#define RXNE		0x0001		/* Receive FIFO Not Empty Status				*/
+#define RUVF		0x0002		/* Sticky Receive Underflow Status				*/
+#define ROVF		0x0004		/* Sticky Receive Overflow Status				*/
+#define TXF			0x0008		/* Transmit FIFO Full Status					*/
+#define TUVF		0x0010		/* Sticky Transmit Underflow Status				*/
+#define TOVF		0x0020		/* Sticky Transmit Overflow Status				*/
+#define TXHRE		0x0040		/* Transmit Hold Register Empty					*/
+
+/* SPORTx_MCMC1 Macros															*/
+#define SP_WOFF(x)	((x) & 0x3FF) 	/* Multichannel Window Offset Field			*/
+
+/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits						*/
+#define SP_WSIZE(x)	(((((x)>>0x3)-1)&0xF) << 0xC)	/* Multichannel Window Size = (x/8)-1	*/
+
+/* SPORTx_MCMC2 Masks															*/
+#define REC_BYPASS	0x0000		/* Bypass Mode (No Clock Recovery)				*/
+#define REC_2FROM4	0x0002		/* Recover 2 MHz Clock from 4 MHz Clock			*/
+#define REC_8FROM16	0x0003		/* Recover 8 MHz Clock from 16 MHz Clock		*/
+#define MCDTXPE		0x0004 		/* Multichannel DMA Transmit Packing			*/
+#define MCDRXPE		0x0008 		/* Multichannel DMA Receive Packing				*/
+#define MCMEN		0x0010 		/* Multichannel Frame Mode Enable				*/
+#define FSDR		0x0080 		/* Multichannel Frame Sync to Data Relationship	*/
+#define MFD_0		0x0000		/* Multichannel Frame Delay = 0					*/
+#define MFD_1		0x1000		/* Multichannel Frame Delay = 1					*/
+#define MFD_2		0x2000		/* Multichannel Frame Delay = 2					*/
+#define MFD_3		0x3000		/* Multichannel Frame Delay = 3					*/
+#define MFD_4		0x4000		/* Multichannel Frame Delay = 4					*/
+#define MFD_5		0x5000		/* Multichannel Frame Delay = 5					*/
+#define MFD_6		0x6000		/* Multichannel Frame Delay = 6					*/
+#define MFD_7		0x7000		/* Multichannel Frame Delay = 7					*/
+#define MFD_8		0x8000		/* Multichannel Frame Delay = 8					*/
+#define MFD_9		0x9000		/* Multichannel Frame Delay = 9					*/
+#define MFD_10		0xA000		/* Multichannel Frame Delay = 10				*/
+#define MFD_11		0xB000		/* Multichannel Frame Delay = 11				*/
+#define MFD_12		0xC000		/* Multichannel Frame Delay = 12				*/
+#define MFD_13		0xD000		/* Multichannel Frame Delay = 13				*/
+#define MFD_14		0xE000		/* Multichannel Frame Delay = 14				*/
+#define MFD_15		0xF000		/* Multichannel Frame Delay = 15				*/
+
+
+/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
+/* EBIU_AMGCTL Masks																	*/
+#define AMCKEN			0x0001		/* Enable CLKOUT									*/
+#define	AMBEN_NONE		0x0000		/* All Banks Disabled								*/
+#define AMBEN_B0		0x0002		/* Enable Async Memory Bank 0 only					*/
+#define AMBEN_B0_B1		0x0004		/* Enable Async Memory Banks 0 & 1 only				*/
+#define AMBEN_B0_B1_B2	0x0006		/* Enable Async Memory Banks 0, 1, and 2			*/
+#define AMBEN_ALL		0x0008		/* Enable Async Memory Banks (all) 0, 1, 2, and 3	*/
+
+/* EBIU_AMBCTL0 Masks																	*/
+#define B0RDYEN			0x00000001  /* Bank 0 (B0) RDY Enable							*/
+#define B0RDYPOL		0x00000002  /* B0 RDY Active High								*/
+#define B0TT_1			0x00000004  /* B0 Transition Time (Read to Write) = 1 cycle		*/
+#define B0TT_2			0x00000008  /* B0 Transition Time (Read to Write) = 2 cycles	*/
+#define B0TT_3			0x0000000C  /* B0 Transition Time (Read to Write) = 3 cycles	*/
+#define B0TT_4			0x00000000  /* B0 Transition Time (Read to Write) = 4 cycles	*/
+#define B0ST_1			0x00000010  /* B0 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B0ST_2			0x00000020  /* B0 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B0ST_3			0x00000030  /* B0 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B0ST_4			0x00000000  /* B0 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B0HT_1			0x00000040  /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B0HT_2			0x00000080  /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B0HT_3			0x000000C0  /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B0HT_0			0x00000000  /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B0RAT_1			0x00000100  /* B0 Read Access Time = 1 cycle					*/
+#define B0RAT_2			0x00000200  /* B0 Read Access Time = 2 cycles					*/
+#define B0RAT_3			0x00000300  /* B0 Read Access Time = 3 cycles					*/
+#define B0RAT_4			0x00000400  /* B0 Read Access Time = 4 cycles					*/
+#define B0RAT_5			0x00000500  /* B0 Read Access Time = 5 cycles					*/
+#define B0RAT_6			0x00000600  /* B0 Read Access Time = 6 cycles					*/
+#define B0RAT_7			0x00000700  /* B0 Read Access Time = 7 cycles					*/
+#define B0RAT_8			0x00000800  /* B0 Read Access Time = 8 cycles					*/
+#define B0RAT_9			0x00000900  /* B0 Read Access Time = 9 cycles					*/
+#define B0RAT_10		0x00000A00  /* B0 Read Access Time = 10 cycles					*/
+#define B0RAT_11		0x00000B00  /* B0 Read Access Time = 11 cycles					*/
+#define B0RAT_12		0x00000C00  /* B0 Read Access Time = 12 cycles					*/
+#define B0RAT_13		0x00000D00  /* B0 Read Access Time = 13 cycles					*/
+#define B0RAT_14		0x00000E00  /* B0 Read Access Time = 14 cycles					*/
+#define B0RAT_15		0x00000F00  /* B0 Read Access Time = 15 cycles					*/
+#define B0WAT_1			0x00001000  /* B0 Write Access Time = 1 cycle					*/
+#define B0WAT_2			0x00002000  /* B0 Write Access Time = 2 cycles					*/
+#define B0WAT_3			0x00003000  /* B0 Write Access Time = 3 cycles					*/
+#define B0WAT_4			0x00004000  /* B0 Write Access Time = 4 cycles					*/
+#define B0WAT_5			0x00005000  /* B0 Write Access Time = 5 cycles					*/
+#define B0WAT_6			0x00006000  /* B0 Write Access Time = 6 cycles					*/
+#define B0WAT_7			0x00007000  /* B0 Write Access Time = 7 cycles					*/
+#define B0WAT_8			0x00008000  /* B0 Write Access Time = 8 cycles					*/
+#define B0WAT_9			0x00009000  /* B0 Write Access Time = 9 cycles					*/
+#define B0WAT_10		0x0000A000  /* B0 Write Access Time = 10 cycles					*/
+#define B0WAT_11		0x0000B000  /* B0 Write Access Time = 11 cycles					*/
+#define B0WAT_12		0x0000C000  /* B0 Write Access Time = 12 cycles					*/
+#define B0WAT_13		0x0000D000  /* B0 Write Access Time = 13 cycles					*/
+#define B0WAT_14		0x0000E000  /* B0 Write Access Time = 14 cycles					*/
+#define B0WAT_15		0x0000F000  /* B0 Write Access Time = 15 cycles					*/
+
+#define B1RDYEN			0x00010000  /* Bank 1 (B1) RDY Enable                       	*/
+#define B1RDYPOL		0x00020000  /* B1 RDY Active High                           	*/
+#define B1TT_1			0x00040000  /* B1 Transition Time (Read to Write) = 1 cycle 	*/
+#define B1TT_2			0x00080000  /* B1 Transition Time (Read to Write) = 2 cycles	*/
+#define B1TT_3			0x000C0000  /* B1 Transition Time (Read to Write) = 3 cycles	*/
+#define B1TT_4			0x00000000  /* B1 Transition Time (Read to Write) = 4 cycles	*/
+#define B1ST_1			0x00100000  /* B1 Setup Time (AOE to Read/Write) = 1 cycle  	*/
+#define B1ST_2			0x00200000  /* B1 Setup Time (AOE to Read/Write) = 2 cycles 	*/
+#define B1ST_3			0x00300000  /* B1 Setup Time (AOE to Read/Write) = 3 cycles 	*/
+#define B1ST_4			0x00000000  /* B1 Setup Time (AOE to Read/Write) = 4 cycles 	*/
+#define B1HT_1			0x00400000  /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle 	*/
+#define B1HT_2			0x00800000  /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B1HT_3			0x00C00000  /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B1HT_0			0x00000000  /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B1RAT_1			0x01000000  /* B1 Read Access Time = 1 cycle					*/
+#define B1RAT_2			0x02000000  /* B1 Read Access Time = 2 cycles					*/
+#define B1RAT_3			0x03000000  /* B1 Read Access Time = 3 cycles					*/
+#define B1RAT_4			0x04000000  /* B1 Read Access Time = 4 cycles					*/
+#define B1RAT_5			0x05000000  /* B1 Read Access Time = 5 cycles					*/
+#define B1RAT_6			0x06000000  /* B1 Read Access Time = 6 cycles					*/
+#define B1RAT_7			0x07000000  /* B1 Read Access Time = 7 cycles					*/
+#define B1RAT_8			0x08000000  /* B1 Read Access Time = 8 cycles					*/
+#define B1RAT_9			0x09000000  /* B1 Read Access Time = 9 cycles					*/
+#define B1RAT_10		0x0A000000  /* B1 Read Access Time = 10 cycles					*/
+#define B1RAT_11		0x0B000000  /* B1 Read Access Time = 11 cycles					*/
+#define B1RAT_12		0x0C000000  /* B1 Read Access Time = 12 cycles					*/
+#define B1RAT_13		0x0D000000  /* B1 Read Access Time = 13 cycles					*/
+#define B1RAT_14		0x0E000000  /* B1 Read Access Time = 14 cycles					*/
+#define B1RAT_15		0x0F000000  /* B1 Read Access Time = 15 cycles					*/
+#define B1WAT_1			0x10000000  /* B1 Write Access Time = 1 cycle					*/
+#define B1WAT_2			0x20000000  /* B1 Write Access Time = 2 cycles					*/
+#define B1WAT_3			0x30000000  /* B1 Write Access Time = 3 cycles					*/
+#define B1WAT_4			0x40000000  /* B1 Write Access Time = 4 cycles					*/
+#define B1WAT_5			0x50000000  /* B1 Write Access Time = 5 cycles					*/
+#define B1WAT_6			0x60000000  /* B1 Write Access Time = 6 cycles					*/
+#define B1WAT_7			0x70000000  /* B1 Write Access Time = 7 cycles					*/
+#define B1WAT_8			0x80000000  /* B1 Write Access Time = 8 cycles					*/
+#define B1WAT_9			0x90000000  /* B1 Write Access Time = 9 cycles					*/
+#define B1WAT_10		0xA0000000  /* B1 Write Access Time = 10 cycles					*/
+#define B1WAT_11		0xB0000000  /* B1 Write Access Time = 11 cycles					*/
+#define B1WAT_12		0xC0000000  /* B1 Write Access Time = 12 cycles					*/
+#define B1WAT_13		0xD0000000  /* B1 Write Access Time = 13 cycles					*/
+#define B1WAT_14		0xE0000000  /* B1 Write Access Time = 14 cycles					*/
+#define B1WAT_15		0xF0000000  /* B1 Write Access Time = 15 cycles					*/
+
+/* EBIU_AMBCTL1 Masks																	*/
+#define B2RDYEN			0x00000001  /* Bank 2 (B2) RDY Enable							*/
+#define B2RDYPOL		0x00000002  /* B2 RDY Active High								*/
+#define B2TT_1			0x00000004  /* B2 Transition Time (Read to Write) = 1 cycle		*/
+#define B2TT_2			0x00000008  /* B2 Transition Time (Read to Write) = 2 cycles	*/
+#define B2TT_3			0x0000000C  /* B2 Transition Time (Read to Write) = 3 cycles	*/
+#define B2TT_4			0x00000000  /* B2 Transition Time (Read to Write) = 4 cycles	*/
+#define B2ST_1			0x00000010  /* B2 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B2ST_2			0x00000020  /* B2 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B2ST_3			0x00000030  /* B2 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B2ST_4			0x00000000  /* B2 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B2HT_1			0x00000040  /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B2HT_2			0x00000080  /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B2HT_3			0x000000C0  /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B2HT_0			0x00000000  /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B2RAT_1			0x00000100  /* B2 Read Access Time = 1 cycle					*/
+#define B2RAT_2			0x00000200  /* B2 Read Access Time = 2 cycles					*/
+#define B2RAT_3			0x00000300  /* B2 Read Access Time = 3 cycles					*/
+#define B2RAT_4			0x00000400  /* B2 Read Access Time = 4 cycles					*/
+#define B2RAT_5			0x00000500  /* B2 Read Access Time = 5 cycles					*/
+#define B2RAT_6			0x00000600  /* B2 Read Access Time = 6 cycles					*/
+#define B2RAT_7			0x00000700  /* B2 Read Access Time = 7 cycles					*/
+#define B2RAT_8			0x00000800  /* B2 Read Access Time = 8 cycles					*/
+#define B2RAT_9			0x00000900  /* B2 Read Access Time = 9 cycles					*/
+#define B2RAT_10		0x00000A00  /* B2 Read Access Time = 10 cycles					*/
+#define B2RAT_11		0x00000B00  /* B2 Read Access Time = 11 cycles					*/
+#define B2RAT_12		0x00000C00  /* B2 Read Access Time = 12 cycles					*/
+#define B2RAT_13		0x00000D00  /* B2 Read Access Time = 13 cycles					*/
+#define B2RAT_14		0x00000E00  /* B2 Read Access Time = 14 cycles					*/
+#define B2RAT_15		0x00000F00  /* B2 Read Access Time = 15 cycles					*/
+#define B2WAT_1			0x00001000  /* B2 Write Access Time = 1 cycle					*/
+#define B2WAT_2			0x00002000  /* B2 Write Access Time = 2 cycles					*/
+#define B2WAT_3			0x00003000  /* B2 Write Access Time = 3 cycles					*/
+#define B2WAT_4			0x00004000  /* B2 Write Access Time = 4 cycles					*/
+#define B2WAT_5			0x00005000  /* B2 Write Access Time = 5 cycles					*/
+#define B2WAT_6			0x00006000  /* B2 Write Access Time = 6 cycles					*/
+#define B2WAT_7			0x00007000  /* B2 Write Access Time = 7 cycles					*/
+#define B2WAT_8			0x00008000  /* B2 Write Access Time = 8 cycles					*/
+#define B2WAT_9			0x00009000  /* B2 Write Access Time = 9 cycles					*/
+#define B2WAT_10		0x0000A000  /* B2 Write Access Time = 10 cycles					*/
+#define B2WAT_11		0x0000B000  /* B2 Write Access Time = 11 cycles					*/
+#define B2WAT_12		0x0000C000  /* B2 Write Access Time = 12 cycles					*/
+#define B2WAT_13		0x0000D000  /* B2 Write Access Time = 13 cycles					*/
+#define B2WAT_14		0x0000E000  /* B2 Write Access Time = 14 cycles					*/
+#define B2WAT_15		0x0000F000  /* B2 Write Access Time = 15 cycles					*/
+
+#define B3RDYEN			0x00010000  /* Bank 3 (B3) RDY Enable							*/
+#define B3RDYPOL		0x00020000  /* B3 RDY Active High								*/
+#define B3TT_1			0x00040000  /* B3 Transition Time (Read to Write) = 1 cycle		*/
+#define B3TT_2			0x00080000  /* B3 Transition Time (Read to Write) = 2 cycles	*/
+#define B3TT_3			0x000C0000  /* B3 Transition Time (Read to Write) = 3 cycles	*/
+#define B3TT_4			0x00000000  /* B3 Transition Time (Read to Write) = 4 cycles	*/
+#define B3ST_1			0x00100000  /* B3 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B3ST_2			0x00200000  /* B3 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B3ST_3			0x00300000  /* B3 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B3ST_4			0x00000000  /* B3 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B3HT_1			0x00400000  /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B3HT_2			0x00800000  /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B3HT_3			0x00C00000  /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B3HT_0			0x00000000  /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B3RAT_1			0x01000000  /* B3 Read Access Time = 1 cycle					*/
+#define B3RAT_2			0x02000000  /* B3 Read Access Time = 2 cycles					*/
+#define B3RAT_3			0x03000000  /* B3 Read Access Time = 3 cycles					*/
+#define B3RAT_4			0x04000000  /* B3 Read Access Time = 4 cycles					*/
+#define B3RAT_5			0x05000000  /* B3 Read Access Time = 5 cycles					*/
+#define B3RAT_6			0x06000000  /* B3 Read Access Time = 6 cycles					*/
+#define B3RAT_7			0x07000000  /* B3 Read Access Time = 7 cycles					*/
+#define B3RAT_8			0x08000000  /* B3 Read Access Time = 8 cycles					*/
+#define B3RAT_9			0x09000000  /* B3 Read Access Time = 9 cycles					*/
+#define B3RAT_10		0x0A000000  /* B3 Read Access Time = 10 cycles					*/
+#define B3RAT_11		0x0B000000  /* B3 Read Access Time = 11 cycles					*/
+#define B3RAT_12		0x0C000000  /* B3 Read Access Time = 12 cycles					*/
+#define B3RAT_13		0x0D000000  /* B3 Read Access Time = 13 cycles					*/
+#define B3RAT_14		0x0E000000  /* B3 Read Access Time = 14 cycles					*/
+#define B3RAT_15		0x0F000000  /* B3 Read Access Time = 15 cycles					*/
+#define B3WAT_1			0x10000000  /* B3 Write Access Time = 1 cycle					*/
+#define B3WAT_2			0x20000000  /* B3 Write Access Time = 2 cycles					*/
+#define B3WAT_3			0x30000000  /* B3 Write Access Time = 3 cycles					*/
+#define B3WAT_4			0x40000000  /* B3 Write Access Time = 4 cycles					*/
+#define B3WAT_5			0x50000000  /* B3 Write Access Time = 5 cycles					*/
+#define B3WAT_6			0x60000000  /* B3 Write Access Time = 6 cycles					*/
+#define B3WAT_7			0x70000000  /* B3 Write Access Time = 7 cycles					*/
+#define B3WAT_8			0x80000000  /* B3 Write Access Time = 8 cycles					*/
+#define B3WAT_9			0x90000000  /* B3 Write Access Time = 9 cycles					*/
+#define B3WAT_10		0xA0000000  /* B3 Write Access Time = 10 cycles					*/
+#define B3WAT_11		0xB0000000  /* B3 Write Access Time = 11 cycles					*/
+#define B3WAT_12		0xC0000000  /* B3 Write Access Time = 12 cycles					*/
+#define B3WAT_13		0xD0000000  /* B3 Write Access Time = 13 cycles					*/
+#define B3WAT_14		0xE0000000  /* B3 Write Access Time = 14 cycles					*/
+#define B3WAT_15		0xF0000000  /* B3 Write Access Time = 15 cycles					*/
+
+
+/* **********************  SDRAM CONTROLLER MASKS  **********************************************/
+/* EBIU_SDGCTL Masks																			*/
+#define SCTLE			0x00000001	/* Enable SDRAM Signals										*/
+#define CL_2			0x00000008	/* SDRAM CAS Latency = 2 cycles								*/
+#define CL_3			0x0000000C	/* SDRAM CAS Latency = 3 cycles								*/
+#define PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh				*/
+#define PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh		*/
+#define PASR_B0			0x00000020	/* Only SDRAM Bank 0 Is Refreshed In Self-Refresh			*/
+#define TRAS_1			0x00000040	/* SDRAM tRAS = 1 cycle										*/
+#define TRAS_2			0x00000080	/* SDRAM tRAS = 2 cycles									*/
+#define TRAS_3			0x000000C0	/* SDRAM tRAS = 3 cycles									*/
+#define TRAS_4			0x00000100	/* SDRAM tRAS = 4 cycles									*/
+#define TRAS_5			0x00000140	/* SDRAM tRAS = 5 cycles									*/
+#define TRAS_6			0x00000180	/* SDRAM tRAS = 6 cycles									*/
+#define TRAS_7			0x000001C0	/* SDRAM tRAS = 7 cycles									*/
+#define TRAS_8			0x00000200	/* SDRAM tRAS = 8 cycles									*/
+#define TRAS_9			0x00000240	/* SDRAM tRAS = 9 cycles									*/
+#define TRAS_10			0x00000280	/* SDRAM tRAS = 10 cycles									*/
+#define TRAS_11			0x000002C0	/* SDRAM tRAS = 11 cycles									*/
+#define TRAS_12			0x00000300	/* SDRAM tRAS = 12 cycles									*/
+#define TRAS_13			0x00000340	/* SDRAM tRAS = 13 cycles									*/
+#define TRAS_14			0x00000380	/* SDRAM tRAS = 14 cycles									*/
+#define TRAS_15			0x000003C0	/* SDRAM tRAS = 15 cycles									*/
+#define TRP_1			0x00000800	/* SDRAM tRP = 1 cycle										*/
+#define TRP_2			0x00001000	/* SDRAM tRP = 2 cycles										*/
+#define TRP_3			0x00001800	/* SDRAM tRP = 3 cycles										*/
+#define TRP_4			0x00002000	/* SDRAM tRP = 4 cycles										*/
+#define TRP_5			0x00002800	/* SDRAM tRP = 5 cycles										*/
+#define TRP_6			0x00003000	/* SDRAM tRP = 6 cycles										*/
+#define TRP_7			0x00003800	/* SDRAM tRP = 7 cycles										*/
+#define TRCD_1			0x00008000	/* SDRAM tRCD = 1 cycle										*/
+#define TRCD_2			0x00010000	/* SDRAM tRCD = 2 cycles									*/
+#define TRCD_3			0x00018000	/* SDRAM tRCD = 3 cycles									*/
+#define TRCD_4			0x00020000	/* SDRAM tRCD = 4 cycles									*/
+#define TRCD_5			0x00028000	/* SDRAM tRCD = 5 cycles									*/
+#define TRCD_6			0x00030000	/* SDRAM tRCD = 6 cycles									*/
+#define TRCD_7			0x00038000	/* SDRAM tRCD = 7 cycles									*/
+#define TWR_1			0x00080000	/* SDRAM tWR = 1 cycle										*/
+#define TWR_2			0x00100000	/* SDRAM tWR = 2 cycles										*/
+#define TWR_3			0x00180000	/* SDRAM tWR = 3 cycles										*/
+#define PUPSD			0x00200000	/* Power-Up Start Delay (15 SCLK Cycles Delay)				*/
+#define PSM				0x00400000	/* Power-Up Sequence (Mode Register Before/After* Refresh)	*/
+#define PSS				0x00800000	/* Enable Power-Up Sequence on Next SDRAM Access			*/
+#define SRFS			0x01000000	/* Enable SDRAM Self-Refresh Mode							*/
+#define EBUFE			0x02000000	/* Enable External Buffering Timing							*/
+#define FBBRW			0x04000000	/* Enable Fast Back-To-Back Read To Write					*/
+#define EMREN			0x10000000	/* Extended Mode Register Enable							*/
+#define TCSR			0x20000000	/* Temp-Compensated Self-Refresh Value (85/45* Deg C)		*/
+#define CDDBG			0x40000000	/* Tristate SDRAM Controls During Bus Grant					*/
+
+/* EBIU_SDBCTL Masks																		*/
+#define EBE				0x0001		/* Enable SDRAM External Bank							*/
+#define EBSZ_16			0x0000		/* SDRAM External Bank Size = 16MB	*/
+#define EBSZ_32			0x0002		/* SDRAM External Bank Size = 32MB	*/
+#define EBSZ_64			0x0004		/* SDRAM External Bank Size = 64MB	*/
+#define EBSZ_128		0x0006		/* SDRAM External Bank Size = 128MB		*/
+#define EBSZ_256		0x0008		/* SDRAM External Bank Size = 256MB 	*/
+#define EBSZ_512		0x000A		/* SDRAM External Bank Size = 512MB		*/
+#define EBCAW_8			0x0000		/* SDRAM External Bank Column Address Width = 8 Bits	*/
+#define EBCAW_9			0x0010		/* SDRAM External Bank Column Address Width = 9 Bits	*/
+#define EBCAW_10		0x0020		/* SDRAM External Bank Column Address Width = 10 Bits	*/
+#define EBCAW_11		0x0030		/* SDRAM External Bank Column Address Width = 11 Bits	*/
+
+/* EBIU_SDSTAT Masks														*/
+#define SDCI			0x0001		/* SDRAM Controller Idle 				*/
+#define SDSRA			0x0002		/* SDRAM Self-Refresh Active			*/
+#define SDPUA			0x0004		/* SDRAM Power-Up Active 				*/
+#define SDRS			0x0008		/* SDRAM Will Power-Up On Next Access	*/
+#define SDEASE			0x0010		/* SDRAM EAB Sticky Error Status		*/
+#define BGSTAT			0x0020		/* Bus Grant Status						*/
+
+
+/* **************************  DMA CONTROLLER MASKS  ********************************/
+/* DMAx_CONFIG, MDMA_yy_CONFIG Masks												*/
+#define DMAEN			0x0001		/* DMA Channel Enable							*/
+#define WNR				0x0002		/* Channel Direction (W/R*)						*/
+#define WDSIZE_8		0x0000		/* Transfer Word Size = 8						*/
+#define WDSIZE_16		0x0004		/* Transfer Word Size = 16						*/
+#define WDSIZE_32		0x0008		/* Transfer Word Size = 32						*/
+#define DMA2D			0x0010		/* DMA Mode (2D/1D*)							*/
+#define RESTART			0x0020		/* DMA Buffer Clear								*/
+#define DI_SEL			0x0040		/* Data Interrupt Timing Select					*/
+#define DI_EN			0x0080		/* Data Interrupt Enable						*/
+#define NDSIZE_0		0x0000		/* Next Descriptor Size = 0 (Stop/Autobuffer)	*/
+#define NDSIZE_1		0x0100		/* Next Descriptor Size = 1						*/
+#define NDSIZE_2		0x0200		/* Next Descriptor Size = 2						*/
+#define NDSIZE_3		0x0300		/* Next Descriptor Size = 3						*/
+#define NDSIZE_4		0x0400		/* Next Descriptor Size = 4						*/
+#define NDSIZE_5		0x0500		/* Next Descriptor Size = 5						*/
+#define NDSIZE_6		0x0600		/* Next Descriptor Size = 6						*/
+#define NDSIZE_7		0x0700		/* Next Descriptor Size = 7						*/
+#define NDSIZE_8		0x0800		/* Next Descriptor Size = 8						*/
+#define NDSIZE_9		0x0900		/* Next Descriptor Size = 9						*/
+#define NDSIZE	        	0x0900	/* Next Descriptor Size */
+#define DMAFLOW	        	0x7000	/* Flow Control */
+#define DMAFLOW_STOP		0x0000		/* Stop Mode									*/
+#define DMAFLOW_AUTO		0x1000		/* Autobuffer Mode								*/
+#define DMAFLOW_ARRAY		0x4000		/* Descriptor Array Mode						*/
+#define DMAFLOW_SMALL		0x6000		/* Small Model Descriptor List Mode				*/
+#define DMAFLOW_LARGE		0x7000		/* Large Model Descriptor List Mode				*/
+
+/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks								*/
+#define CTYPE			0x0040	/* DMA Channel Type Indicator (Memory/Peripheral*)	*/
+#define PMAP			0xF000	/* Peripheral Mapped To This Channel				*/
+#define PMAP_PPI		0x0000	/* 		PPI Port DMA								*/
+#define	PMAP_EMACRX		0x1000	/* 		Ethernet Receive DMA						*/
+#define PMAP_EMACTX		0x2000	/* 		Ethernet Transmit DMA						*/
+#define PMAP_SPORT0RX	0x3000	/* 		SPORT0 Receive DMA							*/
+#define PMAP_SPORT0TX	0x4000	/* 		SPORT0 Transmit DMA							*/
+#define PMAP_SPORT1RX	0x5000	/* 		SPORT1 Receive DMA							*/
+#define PMAP_SPORT1TX	0x6000	/* 		SPORT1 Transmit DMA							*/
+#define PMAP_SPI		0x7000	/* 		SPI Port DMA								*/
+#define PMAP_UART0RX	0x8000	/* 		UART0 Port Receive DMA						*/
+#define PMAP_UART0TX	0x9000	/* 		UART0 Port Transmit DMA						*/
+#define	PMAP_UART1RX	0xA000	/* 		UART1 Port Receive DMA						*/
+#define	PMAP_UART1TX	0xB000	/* 		UART1 Port Transmit DMA						*/
+
+/* DMAx_IRQ_STATUS, MDMA_yy_IRQ_STATUS Masks						*/
+#define DMA_DONE		0x0001	/* DMA Completion Interrupt Status	*/
+#define DMA_ERR			0x0002	/* DMA Error Interrupt Status		*/
+#define DFETCH			0x0004	/* DMA Descriptor Fetch Indicator	*/
+#define DMA_RUN			0x0008	/* DMA Channel Running Indicator	*/
+
+
+/*  ************  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
+/*  PPI_CONTROL Masks													*/
+#define PORT_EN			0x0001		/* PPI Port Enable					*/
+#define PORT_DIR		0x0002		/* PPI Port Direction				*/
+#define XFR_TYPE		0x000C		/* PPI Transfer Type				*/
+#define PORT_CFG		0x0030		/* PPI Port Configuration			*/
+#define FLD_SEL			0x0040		/* PPI Active Field Select			*/
+#define PACK_EN			0x0080		/* PPI Packing Mode					*/
+#define DMA32			0x0100		/* PPI 32-bit DMA Enable			*/
+#define SKIP_EN			0x0200		/* PPI Skip Element Enable			*/
+#define SKIP_EO			0x0400		/* PPI Skip Even/Odd Elements		*/
+#define DLEN_8			0x0000		/* Data Length = 8 Bits				*/
+#define DLEN_10			0x0800		/* Data Length = 10 Bits			*/
+#define DLEN_11			0x1000		/* Data Length = 11 Bits			*/
+#define DLEN_12			0x1800		/* Data Length = 12 Bits			*/
+#define DLEN_13			0x2000		/* Data Length = 13 Bits			*/
+#define DLEN_14			0x2800		/* Data Length = 14 Bits			*/
+#define DLEN_15			0x3000		/* Data Length = 15 Bits			*/
+#define DLEN_16			0x3800		/* Data Length = 16 Bits			*/
+#define DLENGTH			0x3800		/* PPI Data Length  */
+#define POLC			0x4000		/* PPI Clock Polarity				*/
+#define POLS			0x8000		/* PPI Frame Sync Polarity			*/
+
+/* PPI_STATUS Masks														*/
+#define FLD				0x0400		/* Field Indicator					*/
+#define FT_ERR			0x0800		/* Frame Track Error				*/
+#define OVR				0x1000		/* FIFO Overflow Error				*/
+#define UNDR			0x2000		/* FIFO Underrun Error				*/
+#define ERR_DET			0x4000		/* Error Detected Indicator			*/
+#define ERR_NCOR		0x8000		/* Error Not Corrected Indicator	*/
+
+
+/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
+/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
+#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low			*/
+#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low			*/
+
+/* TWI_PRESCALE Masks															*/
+#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz)	*/
+#define	TWI_ENA		0x0080		/* TWI Enable									*/
+#define	SCCB		0x0200		/* SCCB Compatibility Enable					*/
+
+/* TWI_SLAVE_CTRL Masks															*/
+#define	SEN			0x0001		/* Slave Enable									*/
+#define	SADD_LEN	0x0002		/* Slave Address Length							*/
+#define	STDVAL		0x0004		/* Slave Transmit Data Valid					*/
+#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
+#define	GEN			0x0010		/* General Call Adrress Matching Enabled		*/
+
+/* TWI_SLAVE_STAT Masks															*/
+#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*)	*/
+#define GCALL		0x0002		/* General Call Indicator						*/
+
+/* TWI_MASTER_CTRL Masks													*/
+#define	MEN			0x0001		/* Master Mode Enable						*/
+#define	MADD_LEN	0x0002		/* Master Address Length					*/
+#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*)		*/
+#define	FAST		0x0008		/* Use Fast Mode Timing Specs				*/
+#define	STOP		0x0010		/* Issue Stop Condition						*/
+#define	RSTART		0x0020		/* Repeat Start or Stop* At End Of Transfer	*/
+#define	DCNT		0x3FC0		/* Data Bytes To Transfer					*/
+#define	SDAOVR		0x4000		/* Serial Data Override						*/
+#define	SCLOVR		0x8000		/* Serial Clock Override					*/
+
+/* TWI_MASTER_STAT Masks														*/
+#define	MPROG		0x0001		/* Master Transfer In Progress					*/
+#define	LOSTARB		0x0002		/* Lost Arbitration Indicator (Xfer Aborted)	*/
+#define	ANAK		0x0004		/* Address Not Acknowledged						*/
+#define	DNAK		0x0008		/* Data Not Acknowledged						*/
+#define	BUFRDERR	0x0010		/* Buffer Read Error							*/
+#define	BUFWRERR	0x0020		/* Buffer Write Error							*/
+#define	SDASEN		0x0040		/* Serial Data Sense							*/
+#define	SCLSEN		0x0080		/* Serial Clock Sense							*/
+#define	BUSBUSY		0x0100		/* Bus Busy Indicator							*/
+
+/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
+#define	SINIT		0x0001		/* Slave Transfer Initiated	*/
+#define	SCOMP		0x0002		/* Slave Transfer Complete	*/
+#define	SERR		0x0004		/* Slave Transfer Error		*/
+#define	SOVF		0x0008		/* Slave Overflow			*/
+#define	MCOMP		0x0010		/* Master Transfer Complete	*/
+#define	MERR		0x0020		/* Master Transfer Error	*/
+#define	XMTSERV		0x0040		/* Transmit FIFO Service	*/
+#define	RCVSERV		0x0080		/* Receive FIFO Service		*/
+
+/* TWI_FIFO_CTRL Masks												*/
+#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush			*/
+#define	RCVFLUSH	0x0002		/* Receive Buffer Flush				*/
+#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length	*/
+#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length	*/
+
+/* TWI_FIFO_STAT Masks															*/
+#define	XMTSTAT		0x0003		/* Transmit FIFO Status							*/
+#define	XMT_EMPTY	0x0000		/* 		Transmit FIFO Empty						*/
+#define	XMT_HALF	0x0001		/* 		Transmit FIFO Has 1 Byte To Write		*/
+#define	XMT_FULL	0x0003		/* 		Transmit FIFO Full (2 Bytes To Write)	*/
+
+#define	RCVSTAT		0x000C		/* Receive FIFO Status							*/
+#define	RCV_EMPTY	0x0000		/* 		Receive FIFO Empty						*/
+#define	RCV_HALF	0x0004		/* 		Receive FIFO Has 1 Byte To Read			*/
+#define	RCV_FULL	0x000C		/* 		Receive FIFO Full (2 Bytes To Read)		*/
+
+
+/*  *******************  PIN CONTROL REGISTER MASKS  ************************/
+/* PORT_MUX Masks															*/
+#define	PJSE			0x0001			/* Port J SPI/SPORT Enable			*/
+#define	PJSE_SPORT		0x0000			/* 		Enable TFS0/DT0PRI			*/
+#define	PJSE_SPI		0x0001			/* 		Enable SPI_SSEL3:2			*/
+
+#define	PJCE(x)			(((x)&0x3)<<1)	/* Port J CAN/SPI/SPORT Enable		*/
+#define	PJCE_SPORT		0x0000			/* 		Enable DR0SEC/DT0SEC		*/
+#define	PJCE_CAN		0x0002			/* 		Enable CAN RX/TX			*/
+#define	PJCE_SPI		0x0004			/* 		Enable SPI_SSEL7			*/
+
+#define	PFDE			0x0008			/* Port F DMA Request Enable		*/
+#define	PFDE_UART		0x0000			/* 		Enable UART0 RX/TX			*/
+#define	PFDE_DMA		0x0008			/* 		Enable DMAR1:0				*/
+
+#define	PFTE			0x0010			/* Port F Timer Enable				*/
+#define	PFTE_UART		0x0000			/*		Enable UART1 RX/TX			*/
+#define	PFTE_TIMER		0x0010			/* 		Enable TMR7:6				*/
+
+#define	PFS6E			0x0020			/* Port F SPI SSEL 6 Enable			*/
+#define	PFS6E_TIMER		0x0000			/*		Enable TMR5					*/
+#define	PFS6E_SPI		0x0020			/* 		Enable SPI_SSEL6			*/
+
+#define	PFS5E			0x0040			/* Port F SPI SSEL 5 Enable			*/
+#define	PFS5E_TIMER		0x0000			/*		Enable TMR4					*/
+#define	PFS5E_SPI		0x0040			/* 		Enable SPI_SSEL5			*/
+
+#define	PFS4E			0x0080			/* Port F SPI SSEL 4 Enable			*/
+#define	PFS4E_TIMER		0x0000			/*		Enable TMR3					*/
+#define	PFS4E_SPI		0x0080			/* 		Enable SPI_SSEL4			*/
+
+#define	PFFE			0x0100			/* Port F PPI Frame Sync Enable		*/
+#define	PFFE_TIMER		0x0000			/* 		Enable TMR2					*/
+#define	PFFE_PPI		0x0100			/* 		Enable PPI FS3				*/
+
+#define	PGSE			0x0200			/* Port G SPORT1 Secondary Enable	*/
+#define	PGSE_PPI		0x0000			/* 		Enable PPI D9:8				*/
+#define	PGSE_SPORT		0x0200			/* 		Enable DR1SEC/DT1SEC		*/
+
+#define	PGRE			0x0400			/* Port G SPORT1 Receive Enable		*/
+#define	PGRE_PPI		0x0000			/* 		Enable PPI D12:10			*/
+#define	PGRE_SPORT		0x0400			/* 		Enable DR1PRI/RFS1/RSCLK1	*/
+
+#define	PGTE			0x0800			/* Port G SPORT1 Transmit Enable	*/
+#define	PGTE_PPI		0x0000			/* 		Enable PPI D15:13			*/
+#define	PGTE_SPORT		0x0800			/* 		Enable DT1PRI/TFS1/TSCLK1	*/
+
+
+/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
+/* HDMAx_CTL Masks														*/
+#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1					*/
+#define	REP			0x0002	/* HDMA Request Polarity					*/
+#define	UTE			0x0004	/* Urgency Threshold Enable					*/
+#define	OIE			0x0010	/* Overflow Interrupt Enable				*/
+#define	BDIE		0x0020	/* Block Done Interrupt Enable				*/
+#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT		*/
+#define	DRQ			0x0300	/* HDMA Request Type						*/
+#define	DRQ_NONE	0x0000	/* 		No Request							*/
+#define	DRQ_SINGLE	0x0100	/* 		Channels Request Single				*/
+#define	DRQ_MULTI	0x0200	/* 		Channels Request Multi (Default)	*/
+#define	DRQ_URGENT	0x0300	/* 		Channels Request Multi Urgent		*/
+#define	RBC			0x1000	/* Reload BCNT With IBCNT					*/
+#define	PS			0x2000	/* HDMA Pin Status							*/
+#define	OI			0x4000	/* Overflow Interrupt Generated				*/
+#define	BDI			0x8000	/* Block Done Interrupt Generated			*/
+
+/* entry addresses of the user-callable Boot ROM functions */
+
+#define _BOOTROM_RESET 0xEF000000
+#define _BOOTROM_FINAL_INIT 0xEF000002
+#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
+#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
+#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
+#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
+#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
+#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
+#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define	PGDE_UART   PFDE_UART
+#define	PGDE_DMA    PFDE_DMA
+#define	CKELOW		SCKELOW
+
+/* HOST Port Registers */
+
+#define                     HOST_CONTROL  0xffc03400   /* HOST Control Register */
+#define                      HOST_STATUS  0xffc03404   /* HOST Status Register */
+#define                     HOST_TIMEOUT  0xffc03408   /* HOST Acknowledge Mode Timeout Register */
+
+/* Counter Registers */
+
+#define                       CNT_CONFIG  0xffc03500   /* Configuration Register */
+#define                        CNT_IMASK  0xffc03504   /* Interrupt Mask Register */
+#define                       CNT_STATUS  0xffc03508   /* Status Register */
+#define                      CNT_COMMAND  0xffc0350c   /* Command Register */
+#define                     CNT_DEBOUNCE  0xffc03510   /* Debounce Register */
+#define                      CNT_COUNTER  0xffc03514   /* Counter Register */
+#define                          CNT_MAX  0xffc03518   /* Maximal Count Register */
+#define                          CNT_MIN  0xffc0351c   /* Minimal Count Register */
+
+/* OTP/FUSE Registers */
+
+#define                      OTP_CONTROL  0xffc03600   /* OTP/Fuse Control Register */
+#define                          OTP_BEN  0xffc03604   /* OTP/Fuse Byte Enable */
+#define                       OTP_STATUS  0xffc03608   /* OTP/Fuse Status */
+#define                       OTP_TIMING  0xffc0360c   /* OTP/Fuse Access Timing */
+
+/* Security Registers */
+
+#define                    SECURE_SYSSWT  0xffc03620   /* Secure System Switches */
+#define                   SECURE_CONTROL  0xffc03624   /* Secure Control */
+#define                    SECURE_STATUS  0xffc03628   /* Secure Status */
+
+/* OTP Read/Write Data Buffer Registers */
+
+#define                        OTP_DATA0  0xffc03680   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA1  0xffc03684   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA2  0xffc03688   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA3  0xffc0368c   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+
+/* Motor Control PWM Registers */
+
+#define                         PWM_CTRL  0xffc03700   /* PWM Control Register */
+#define                         PWM_STAT  0xffc03704   /* PWM Status Register */
+#define                           PWM_TM  0xffc03708   /* PWM Period Register */
+#define                           PWM_DT  0xffc0370c   /* PWM Dead Time Register */
+#define                         PWM_GATE  0xffc03710   /* PWM Chopping Control */
+#define                          PWM_CHA  0xffc03714   /* PWM Channel A Duty Control */
+#define                          PWM_CHB  0xffc03718   /* PWM Channel B Duty Control */
+#define                          PWM_CHC  0xffc0371c   /* PWM Channel C Duty Control */
+#define                          PWM_SEG  0xffc03720   /* PWM Crossover and Output Enable */
+#define                       PWM_SYNCWT  0xffc03724   /* PWM Sync Pluse Width Control */
+#define                         PWM_CHAL  0xffc03728   /* PWM Channel AL Duty Control (SR mode only) */
+#define                         PWM_CHBL  0xffc0372c   /* PWM Channel BL Duty Control (SR mode only) */
+#define                         PWM_CHCL  0xffc03730   /* PWM Channel CL Duty Control (SR mode only) */
+#define                          PWM_LSI  0xffc03734   /* PWM Low Side Invert (SR mode only) */
+#define                        PWM_STAT2  0xffc03738   /* PWM Status Register 2 */
+
+
+/* ********************************************************** */
+/*     SINGLE BIT MACRO PAIRS (bit mask and negated one)      */
+/*     and MULTI BIT READ MACROS                              */
+/* ********************************************************** */
+
+/* Bit masks for HOST_CONTROL */
+
+#define                   HOST_CNTR_HOST_EN  0x1        /* Host Enable */
+#define                  HOST_CNTR_nHOST_EN  0x0
+#define                  HOST_CNTR_HOST_END  0x2        /* Host Endianess */
+#define                 HOST_CNTR_nHOST_END  0x0
+#define                 HOST_CNTR_DATA_SIZE  0x4        /* Data Size */
+#define                HOST_CNTR_nDATA_SIZE  0x0
+#define                  HOST_CNTR_HOST_RST  0x8        /* Host Reset */
+#define                 HOST_CNTR_nHOST_RST  0x0
+#define                  HOST_CNTR_HRDY_OVR  0x20       /* Host Ready Override */
+#define                 HOST_CNTR_nHRDY_OVR  0x0
+#define                  HOST_CNTR_INT_MODE  0x40       /* Interrupt Mode */
+#define                 HOST_CNTR_nINT_MODE  0x0
+#define                     HOST_CNTR_BT_EN  0x80       /* Bus Timeout Enable */
+#define                   HOST_CNTR_ nBT_EN  0x0
+#define                       HOST_CNTR_EHW  0x100      /* Enable Host Write */
+#define                      HOST_CNTR_nEHW  0x0
+#define                       HOST_CNTR_EHR  0x200      /* Enable Host Read */
+#define                      HOST_CNTR_nEHR  0x0
+#define                       HOST_CNTR_BDR  0x400      /* Burst DMA Requests */
+#define                      HOST_CNTR_nBDR  0x0
+
+/* Bit masks for HOST_STATUS */
+
+#define                     HOST_STAT_READY  0x1        /* DMA Ready */
+#define                    HOST_STAT_nREADY  0x0
+#define                  HOST_STAT_FIFOFULL  0x2        /* FIFO Full */
+#define                 HOST_STAT_nFIFOFULL  0x0
+#define                 HOST_STAT_FIFOEMPTY  0x4        /* FIFO Empty */
+#define                HOST_STAT_nFIFOEMPTY  0x0
+#define                  HOST_STAT_COMPLETE  0x8        /* DMA Complete */
+#define                 HOST_STAT_nCOMPLETE  0x0
+#define                      HOST_STAT_HSHK  0x10       /* Host Handshake */
+#define                     HOST_STAT_nHSHK  0x0
+#define                   HOST_STAT_TIMEOUT  0x20       /* Host Timeout */
+#define                  HOST_STAT_nTIMEOUT  0x0
+#define                      HOST_STAT_HIRQ  0x40       /* Host Interrupt Request */
+#define                     HOST_STAT_nHIRQ  0x0
+#define                HOST_STAT_ALLOW_CNFG  0x80       /* Allow New Configuration */
+#define               HOST_STAT_nALLOW_CNFG  0x0
+#define                   HOST_STAT_DMA_DIR  0x100      /* DMA Direction */
+#define                  HOST_STAT_nDMA_DIR  0x0
+#define                       HOST_STAT_BTE  0x200      /* Bus Timeout Enabled */
+#define                      HOST_STAT_nBTE  0x0
+#define               HOST_STAT_HOSTRD_DONE  0x8000     /* Host Read Completion Interrupt */
+#define              HOST_STAT_nHOSTRD_DONE  0x0
+
+/* Bit masks for HOST_TIMEOUT */
+
+#define             HOST_COUNT_TIMEOUT  0x7ff      /* Host Timeout count */
+
+/* Bit masks for CNT_CONFIG */
+
+#define                      CNTE  0x1        /* Counter Enable */
+#define                     nCNTE  0x0
+#define                      DEBE  0x2        /* Debounce Enable */
+#define                     nDEBE  0x0
+#define                    CDGINV  0x10       /* CDG Pin Polarity Invert */
+#define                   nCDGINV  0x0
+#define                    CUDINV  0x20       /* CUD Pin Polarity Invert */
+#define                   nCUDINV  0x0
+#define                    CZMINV  0x40       /* CZM Pin Polarity Invert */
+#define                   nCZMINV  0x0
+#define                   CNTMODE  0x700      /* Counter Operating Mode */
+#define                      ZMZC  0x800      /* CZM Zeroes Counter Enable */
+#define                     nZMZC  0x0
+#define                   BNDMODE  0x3000     /* Boundary register Mode */
+#define                    INPDIS  0x8000     /* CUG and CDG Input Disable */
+#define                   nINPDIS  0x0
+
+/* Bit masks for CNT_IMASK */
+
+#define                      ICIE  0x1        /* Illegal Gray/Binary Code Interrupt Enable */
+#define                     nICIE  0x0
+#define                      UCIE  0x2        /* Up count Interrupt Enable */
+#define                     nUCIE  0x0
+#define                      DCIE  0x4        /* Down count Interrupt Enable */
+#define                     nDCIE  0x0
+#define                    MINCIE  0x8        /* Min Count Interrupt Enable */
+#define                   nMINCIE  0x0
+#define                    MAXCIE  0x10       /* Max Count Interrupt Enable */
+#define                   nMAXCIE  0x0
+#define                   COV31IE  0x20       /* Bit 31 Overflow Interrupt Enable */
+#define                  nCOV31IE  0x0
+#define                   COV15IE  0x40       /* Bit 15 Overflow Interrupt Enable */
+#define                  nCOV15IE  0x0
+#define                   CZEROIE  0x80       /* Count to Zero Interrupt Enable */
+#define                  nCZEROIE  0x0
+#define                     CZMIE  0x100      /* CZM Pin Interrupt Enable */
+#define                    nCZMIE  0x0
+#define                    CZMEIE  0x200      /* CZM Error Interrupt Enable */
+#define                   nCZMEIE  0x0
+#define                    CZMZIE  0x400      /* CZM Zeroes Counter Interrupt Enable */
+#define                   nCZMZIE  0x0
+
+/* Bit masks for CNT_STATUS */
+
+#define                      ICII  0x1        /* Illegal Gray/Binary Code Interrupt Identifier */
+#define                     nICII  0x0
+#define                      UCII  0x2        /* Up count Interrupt Identifier */
+#define                     nUCII  0x0
+#define                      DCII  0x4        /* Down count Interrupt Identifier */
+#define                     nDCII  0x0
+#define                    MINCII  0x8        /* Min Count Interrupt Identifier */
+#define                   nMINCII  0x0
+#define                    MAXCII  0x10       /* Max Count Interrupt Identifier */
+#define                   nMAXCII  0x0
+#define                   COV31II  0x20       /* Bit 31 Overflow Interrupt Identifier */
+#define                  nCOV31II  0x0
+#define                   COV15II  0x40       /* Bit 15 Overflow Interrupt Identifier */
+#define                  nCOV15II  0x0
+#define                   CZEROII  0x80       /* Count to Zero Interrupt Identifier */
+#define                  nCZEROII  0x0
+#define                     CZMII  0x100      /* CZM Pin Interrupt Identifier */
+#define                    nCZMII  0x0
+#define                    CZMEII  0x200      /* CZM Error Interrupt Identifier */
+#define                   nCZMEII  0x0
+#define                    CZMZII  0x400      /* CZM Zeroes Counter Interrupt Identifier */
+#define                   nCZMZII  0x0
+
+/* Bit masks for CNT_COMMAND */
+
+#define                    W1LCNT  0xf        /* Load Counter Register */
+#define                    W1LMIN  0xf0       /* Load Min Register */
+#define                    W1LMAX  0xf00      /* Load Max Register */
+#define                  W1ZMONCE  0x1000     /* Enable CZM Clear Counter Once */
+#define                 nW1ZMONCE  0x0
+
+/* Bit masks for CNT_DEBOUNCE */
+
+#define                 DPRESCALE  0xf        /* Load Counter Register */
+
+/* CNT_COMMAND bit field options */
+
+#define W1LCNT_ZERO   0x0001   /* write 1 to load CNT_COUNTER with zero */
+#define W1LCNT_MIN    0x0004   /* write 1 to load CNT_COUNTER from CNT_MIN */
+#define W1LCNT_MAX    0x0008   /* write 1 to load CNT_COUNTER from CNT_MAX */
+
+#define W1LMIN_ZERO   0x0010   /* write 1 to load CNT_MIN with zero */
+#define W1LMIN_CNT    0x0020   /* write 1 to load CNT_MIN from CNT_COUNTER */
+#define W1LMIN_MAX    0x0080   /* write 1 to load CNT_MIN from CNT_MAX */
+
+#define W1LMAX_ZERO   0x0100   /* write 1 to load CNT_MAX with zero */
+#define W1LMAX_CNT    0x0200   /* write 1 to load CNT_MAX from CNT_COUNTER */
+#define W1LMAX_MIN    0x0400   /* write 1 to load CNT_MAX from CNT_MIN */
+
+/* CNT_CONFIG bit field options */
+
+#define CNTMODE_QUADENC  0x0000  /* quadrature encoder mode */
+#define CNTMODE_BINENC   0x0100  /* binary encoder mode */
+#define CNTMODE_UDCNT    0x0200  /* up/down counter mode */
+#define CNTMODE_DIRCNT   0x0400  /* direction counter mode */
+#define CNTMODE_DIRTMR   0x0500  /* direction timer mode */
+
+#define BNDMODE_COMP     0x0000  /* boundary compare mode */
+#define BNDMODE_ZERO     0x1000  /* boundary compare and zero mode */
+#define BNDMODE_CAPT     0x2000  /* boundary capture mode */
+#define BNDMODE_AEXT     0x3000  /* boundary auto-extend mode */
+
+/* Bit masks for OTP_CONTROL */
+
+#define                FUSE_FADDR  0x1ff      /* OTP/Fuse Address */
+#define                      FIEN  0x800      /* OTP/Fuse Interrupt Enable */
+#define                     nFIEN  0x0
+#define                  FTESTDEC  0x1000     /* OTP/Fuse Test Decoder */
+#define                 nFTESTDEC  0x0
+#define                   FWRTEST  0x2000     /* OTP/Fuse Write Test */
+#define                  nFWRTEST  0x0
+#define                     FRDEN  0x4000     /* OTP/Fuse Read Enable */
+#define                    nFRDEN  0x0
+#define                     FWREN  0x8000     /* OTP/Fuse Write Enable */
+#define                    nFWREN  0x0
+
+/* Bit masks for OTP_BEN */
+
+#define                      FBEN  0xffff     /* OTP/Fuse Byte Enable */
+
+/* Bit masks for OTP_STATUS */
+
+#define                     FCOMP  0x1        /* OTP/Fuse Access Complete */
+#define                    nFCOMP  0x0
+#define                    FERROR  0x2        /* OTP/Fuse Access Error */
+#define                   nFERROR  0x0
+#define                  MMRGLOAD  0x10       /* Memory Mapped Register Gasket Load */
+#define                 nMMRGLOAD  0x0
+#define                  MMRGLOCK  0x20       /* Memory Mapped Register Gasket Lock */
+#define                 nMMRGLOCK  0x0
+#define                    FPGMEN  0x40       /* OTP/Fuse Program Enable */
+#define                   nFPGMEN  0x0
+
+/* Bit masks for OTP_TIMING */
+
+#define                   USECDIV  0xff       /* Micro Second Divider */
+#define                   READACC  0x7f00     /* Read Access Time */
+#define                   CPUMPRL  0x38000    /* Charge Pump Release Time */
+#define                   CPUMPSU  0xc0000    /* Charge Pump Setup Time */
+#define                   CPUMPHD  0xf00000   /* Charge Pump Hold Time */
+#define                   PGMTIME  0xff000000 /* Program Time */
+
+/* Bit masks for SECURE_SYSSWT */
+
+#define                   EMUDABL  0x1        /* Emulation Disable. */
+#define                  nEMUDABL  0x0
+#define                   RSTDABL  0x2        /* Reset Disable */
+#define                  nRSTDABL  0x0
+#define                   L1IDABL  0x1c       /* L1 Instruction Memory Disable. */
+#define                  L1DADABL  0xe0       /* L1 Data Bank A Memory Disable. */
+#define                  L1DBDABL  0x700      /* L1 Data Bank B Memory Disable. */
+#define                   DMA0OVR  0x800      /* DMA0 Memory Access Override */
+#define                  nDMA0OVR  0x0
+#define                   DMA1OVR  0x1000     /* DMA1 Memory Access Override */
+#define                  nDMA1OVR  0x0
+#define                    EMUOVR  0x4000     /* Emulation Override */
+#define                   nEMUOVR  0x0
+#define                    OTPSEN  0x8000     /* OTP Secrets Enable. */
+#define                   nOTPSEN  0x0
+#define                    L2DABL  0x70000    /* L2 Memory Disable. */
+
+/* Bit masks for SECURE_CONTROL */
+
+#define                   SECURE0  0x1        /* SECURE 0 */
+#define                  nSECURE0  0x0
+#define                   SECURE1  0x2        /* SECURE 1 */
+#define                  nSECURE1  0x0
+#define                   SECURE2  0x4        /* SECURE 2 */
+#define                  nSECURE2  0x0
+#define                   SECURE3  0x8        /* SECURE 3 */
+#define                  nSECURE3  0x0
+
+/* Bit masks for SECURE_STATUS */
+
+#define                   SECMODE  0x3        /* Secured Mode Control State */
+#define                       NMI  0x4        /* Non Maskable Interrupt */
+#define                      nNMI  0x0
+#define                   AFVALID  0x8        /* Authentication Firmware Valid */
+#define                  nAFVALID  0x0
+#define                    AFEXIT  0x10       /* Authentication Firmware Exit */
+#define                   nAFEXIT  0x0
+#define                   SECSTAT  0xe0       /* Secure Status */
+
+
+
+#endif /* _DEF_BF51X_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/dma.h b/arch/blackfin/mach-bf518/include/mach/dma.h
new file mode 100644
index 0000000..bbd33c1
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/dma.h
@@ -0,0 +1,33 @@
+/* mach/dma.h - arch-specific DMA defines
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _MACH_DMA_H_
+#define _MACH_DMA_H_
+
+#define MAX_DMA_CHANNELS 16
+
+#define CH_PPI 			0	/* PPI receive/transmit */
+#define CH_EMAC_RX 		1	/* Ethernet MAC receive */
+#define CH_EMAC_TX 		2	/* Ethernet MAC transmit */
+#define CH_SPORT0_RX 		3	/* SPORT0 receive */
+#define CH_SPORT0_TX 		4	/* SPORT0 transmit */
+#define CH_RSI 			4	/* RSI */
+#define CH_SPORT1_RX 		5	/* SPORT1 receive */
+#define CH_SPI1 		5	/* SPI1 transmit/receive */
+#define CH_SPORT1_TX 		6	/* SPORT1 transmit */
+#define CH_SPI0 		7	/* SPI0 transmit/receive */
+#define CH_UART0_RX 		8	/* UART0 receive */
+#define CH_UART0_TX 		9	/* UART0 transmit */
+#define CH_UART1_RX 		10	/* UART1 receive */
+#define CH_UART1_TX 		11	/* UART1 transmit */
+
+#define CH_MEM_STREAM0_SRC 	12	/* RX */
+#define CH_MEM_STREAM0_DEST	13	/* TX */
+#define CH_MEM_STREAM1_SRC 	14	/* RX */
+#define CH_MEM_STREAM1_DEST	15	/* TX */
+
+#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/gpio.h b/arch/blackfin/mach-bf518/include/mach/gpio.h
new file mode 100644
index 0000000..9757683
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/gpio.h
@@ -0,0 +1,60 @@
+/*
+ * File: arch/blackfin/mach-bf518/include/mach/gpio.h
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright (C) 2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef _MACH_GPIO_H_
+#define _MACH_GPIO_H_
+
+#define MAX_BLACKFIN_GPIOS 40
+
+#define	GPIO_PF0	0
+#define	GPIO_PF1	1
+#define	GPIO_PF2	2
+#define	GPIO_PF3	3
+#define	GPIO_PF4	4
+#define	GPIO_PF5	5
+#define	GPIO_PF6	6
+#define	GPIO_PF7	7
+#define	GPIO_PF8	8
+#define	GPIO_PF9	9
+#define	GPIO_PF10	10
+#define	GPIO_PF11	11
+#define	GPIO_PF12	12
+#define	GPIO_PF13	13
+#define	GPIO_PF14	14
+#define	GPIO_PF15	15
+#define	GPIO_PG0	16
+#define	GPIO_PG1	17
+#define	GPIO_PG2	18
+#define	GPIO_PG3	19
+#define	GPIO_PG4	20
+#define	GPIO_PG5	21
+#define	GPIO_PG6	22
+#define	GPIO_PG7	23
+#define	GPIO_PG8	24
+#define	GPIO_PG9	25
+#define	GPIO_PG10      	26
+#define	GPIO_PG11      	27
+#define	GPIO_PG12      	28
+#define	GPIO_PG13      	29
+#define	GPIO_PG14      	30
+#define	GPIO_PG15      	31
+#define	GPIO_PH0	32
+#define	GPIO_PH1	33
+#define	GPIO_PH2	34
+#define	GPIO_PH3	35
+#define	GPIO_PH4	36
+#define	GPIO_PH5	37
+#define	GPIO_PH6	38
+#define	GPIO_PH7	39
+
+#define PORT_F GPIO_PF0
+#define PORT_G GPIO_PG0
+#define PORT_H GPIO_PH0
+
+#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf518/include/mach/irq.h b/arch/blackfin/mach-bf518/include/mach/irq.h
new file mode 100644
index 0000000..3ff0f09
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/irq.h
@@ -0,0 +1,260 @@
+/*
+ * file:	include/asm-blackfin/mach-bf518/irq.h
+ * based on:	include/asm-blackfin/mach-bf527/irq.h
+ * author:	Michael Hennerich (michael.hennerich@analog.com)
+ *
+ * created:
+ * description:
+ *	system mmr register map
+ * rev:
+ *
+ * modified:
+ *
+ *
+ * bugs:         enter bugs at http://blackfin.uclinux.org/
+ *
+ * this program is free software; you can redistribute it and/or modify
+ * it under the terms of the gnu general public license as published by
+ * the free software foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * this program is distributed in the hope that it will be useful,
+ * but without any warranty; without even the implied warranty of
+ * merchantability or fitness for a particular purpose.  see the
+ * gnu general public license for more details.
+ *
+ * you should have received a copy of the gnu general public license
+ * along with this program; see the file copying.
+ * if not, write to the free software foundation,
+ * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ */
+
+#ifndef _BF518_IRQ_H_
+#define _BF518_IRQ_H_
+
+/*
+ * Interrupt source definitions
+	Event Source    Core Event Name
+	Core        Emulation               **
+	Events         (highest priority)  EMU         0
+	Reset                   RST         1
+	NMI                     NMI         2
+	Exception               EVX         3
+	Reserved                --          4
+	Hardware Error          IVHW        5
+	Core Timer              IVTMR       6 *
+
+	.....
+
+	 Software Interrupt 1    IVG14       31
+	 Software Interrupt 2    --
+	 (lowest priority)  IVG15       32 *
+*/
+
+#define NR_PERI_INTS    (2 * 32)
+
+/* The ABSTRACT IRQ definitions */
+/** the first seven of the following are fixed, the rest you change if you need to **/
+#define IRQ_EMU			0	/* Emulation */
+#define IRQ_RST			1	/* reset */
+#define IRQ_NMI			2	/* Non Maskable */
+#define IRQ_EVX			3	/* Exception */
+#define IRQ_UNUSED		4	/* - unused interrupt */
+#define IRQ_HWERR		5	/* Hardware Error */
+#define IRQ_CORETMR		6	/* Core timer */
+
+#define BFIN_IRQ(x)		((x) + 7)
+
+#define IRQ_PLL_WAKEUP		BFIN_IRQ(0)	/* PLL Wakeup Interrupt */
+#define IRQ_DMA0_ERROR		BFIN_IRQ(1)	/* DMA Error 0 (generic) */
+#define IRQ_DMAR0_BLK		BFIN_IRQ(2)	/* DMAR0 Block Interrupt */
+#define IRQ_DMAR1_BLK		BFIN_IRQ(3)	/* DMAR1 Block Interrupt */
+#define IRQ_DMAR0_OVR		BFIN_IRQ(4)	/* DMAR0 Overflow Error */
+#define IRQ_DMAR1_OVR		BFIN_IRQ(5)	/* DMAR1 Overflow Error */
+#define IRQ_PPI_ERROR		BFIN_IRQ(6)	/* PPI Error */
+#define IRQ_MAC_ERROR		BFIN_IRQ(7)	/* MAC Status */
+#define IRQ_SPORT0_ERROR	BFIN_IRQ(8)	/* SPORT0 Status */
+#define IRQ_SPORT1_ERROR	BFIN_IRQ(9)	/* SPORT1 Status */
+#define IRQ_PTP_ERROR		BFIN_IRQ(10)	/* PTP Error Interrupt */
+#define IRQ_UART0_ERROR		BFIN_IRQ(12)	/* UART0 Status */
+#define IRQ_UART1_ERROR		BFIN_IRQ(13)	/* UART1 Status */
+#define IRQ_RTC			BFIN_IRQ(14)	/* RTC */
+#define IRQ_PPI      		BFIN_IRQ(15)	/* DMA Channel 0 (PPI) */
+#define IRQ_SPORT0_RX		BFIN_IRQ(16)	/* DMA 3 Channel (SPORT0 RX) */
+#define IRQ_SPORT0_TX		BFIN_IRQ(17)	/* DMA 4 Channel (SPORT0 TX) */
+#define IRQ_RSI			BFIN_IRQ(17)	/* DMA 4 Channel (RSI) */
+#define IRQ_SPORT1_RX		BFIN_IRQ(18)	/* DMA 5 Channel (SPORT1 RX/SPI) */
+#define IRQ_SPI1		BFIN_IRQ(18)	/* DMA 5 Channel (SPI1) */
+#define IRQ_SPORT1_TX		BFIN_IRQ(19)	/* DMA 6 Channel (SPORT1 TX) */
+#define IRQ_TWI      		BFIN_IRQ(20)	/* TWI */
+#define IRQ_SPI0     		BFIN_IRQ(21)	/* DMA 7 Channel (SPI0) */
+#define IRQ_UART0_RX 		BFIN_IRQ(22)	/* DMA8 Channel (UART0 RX) */
+#define IRQ_UART0_TX 		BFIN_IRQ(23)	/* DMA9 Channel (UART0 TX) */
+#define IRQ_UART1_RX 		BFIN_IRQ(24)	/* DMA10 Channel (UART1 RX) */
+#define IRQ_UART1_TX 		BFIN_IRQ(25)	/* DMA11 Channel (UART1 TX) */
+#define IRQ_OPTSEC   		BFIN_IRQ(26)	/* OTPSEC Interrupt */
+#define IRQ_CNT   		BFIN_IRQ(27)	/* GP Counter */
+#define IRQ_MAC_RX   		BFIN_IRQ(28)	/* DMA1 Channel (MAC RX) */
+#define IRQ_PORTH_INTA   	BFIN_IRQ(29)	/* Port H Interrupt A */
+#define IRQ_MAC_TX		BFIN_IRQ(30)	/* DMA2 Channel (MAC TX) */
+#define IRQ_PORTH_INTB		BFIN_IRQ(31)	/* Port H Interrupt B */
+#define IRQ_TIMER0		BFIN_IRQ(32)	/* Timer 0 */
+#define IRQ_TIMER1		BFIN_IRQ(33)	/* Timer 1 */
+#define IRQ_TIMER2		BFIN_IRQ(34)	/* Timer 2 */
+#define IRQ_TIMER3		BFIN_IRQ(35)	/* Timer 3 */
+#define IRQ_TIMER4		BFIN_IRQ(36)	/* Timer 4 */
+#define IRQ_TIMER5		BFIN_IRQ(37)	/* Timer 5 */
+#define IRQ_TIMER6		BFIN_IRQ(38)	/* Timer 6 */
+#define IRQ_TIMER7		BFIN_IRQ(39)	/* Timer 7 */
+#define IRQ_PORTG_INTA		BFIN_IRQ(40)	/* Port G Interrupt A */
+#define IRQ_PORTG_INTB		BFIN_IRQ(41)	/* Port G Interrupt B */
+#define IRQ_MEM_DMA0		BFIN_IRQ(42)	/* MDMA Stream 0 */
+#define IRQ_MEM_DMA1		BFIN_IRQ(43)	/* MDMA Stream 1 */
+#define IRQ_WATCH		BFIN_IRQ(44)	/* Software Watchdog Timer */
+#define IRQ_PORTF_INTA		BFIN_IRQ(45)	/* Port F Interrupt A */
+#define IRQ_PORTF_INTB		BFIN_IRQ(46)	/* Port F Interrupt B */
+#define IRQ_SPI0_ERROR		BFIN_IRQ(47)	/* SPI0 Status */
+#define IRQ_SPI1_ERROR		BFIN_IRQ(48)	/* SPI1 Error */
+#define IRQ_RSI_INT0		BFIN_IRQ(51)	/* RSI Interrupt0 */
+#define IRQ_RSI_INT1		BFIN_IRQ(52)	/* RSI Interrupt1 */
+#define IRQ_PWM_TRIP		BFIN_IRQ(53)	/* PWM Trip Interrupt */
+#define IRQ_PWM_SYNC		BFIN_IRQ(54)	/* PWM Sync Interrupt */
+#define IRQ_PTP_STAT		BFIN_IRQ(55)	/* PTP Stat Interrupt */
+
+#define SYS_IRQS        	BFIN_IRQ(63)	/* 70 */
+
+#define IRQ_PF0         71
+#define IRQ_PF1         72
+#define IRQ_PF2         73
+#define IRQ_PF3         74
+#define IRQ_PF4         75
+#define IRQ_PF5         76
+#define IRQ_PF6         77
+#define IRQ_PF7         78
+#define IRQ_PF8         79
+#define IRQ_PF9         80
+#define IRQ_PF10        81
+#define IRQ_PF11        82
+#define IRQ_PF12        83
+#define IRQ_PF13        84
+#define IRQ_PF14        85
+#define IRQ_PF15        86
+
+#define IRQ_PG0         87
+#define IRQ_PG1         88
+#define IRQ_PG2         89
+#define IRQ_PG3         90
+#define IRQ_PG4         91
+#define IRQ_PG5         92
+#define IRQ_PG6         93
+#define IRQ_PG7         94
+#define IRQ_PG8         95
+#define IRQ_PG9         96
+#define IRQ_PG10        97
+#define IRQ_PG11        98
+#define IRQ_PG12        99
+#define IRQ_PG13        100
+#define IRQ_PG14        101
+#define IRQ_PG15        102
+
+#define IRQ_PH0         103
+#define IRQ_PH1         104
+#define IRQ_PH2         105
+#define IRQ_PH3         106
+#define IRQ_PH4         107
+#define IRQ_PH5         108
+#define IRQ_PH6         109
+#define IRQ_PH7         110
+#define IRQ_PH8         111
+#define IRQ_PH9         112
+#define IRQ_PH10        113
+#define IRQ_PH11        114
+#define IRQ_PH12        115
+#define IRQ_PH13        116
+#define IRQ_PH14        117
+#define IRQ_PH15        118
+
+#define GPIO_IRQ_BASE	IRQ_PF0
+
+#define NR_IRQS     (IRQ_PH15 + 1)
+
+#define IVG7            7
+#define IVG8            8
+#define IVG9            9
+#define IVG10           10
+#define IVG11           11
+#define IVG12           12
+#define IVG13           13
+#define IVG14           14
+#define IVG15           15
+
+/* IAR0 BIT FIELDS */
+#define IRQ_PLL_WAKEUP_POS	0
+#define IRQ_DMA0_ERROR_POS	4
+#define IRQ_DMAR0_BLK_POS 	8
+#define IRQ_DMAR1_BLK_POS 	12
+#define IRQ_DMAR0_OVR_POS 	16
+#define IRQ_DMAR1_OVR_POS 	20
+#define IRQ_PPI_ERROR_POS 	24
+#define IRQ_MAC_ERROR_POS 	28
+
+/* IAR1 BIT FIELDS */
+#define IRQ_SPORT0_ERROR_POS	0
+#define IRQ_SPORT1_ERROR_POS	4
+#define IRQ_PTP_ERROR_POS	8
+#define IRQ_UART0_ERROR_POS 	16
+#define IRQ_UART1_ERROR_POS 	20
+#define IRQ_RTC_POS         	24
+#define IRQ_PPI_POS         	28
+
+/* IAR2 BIT FIELDS */
+#define IRQ_SPORT0_RX_POS	0
+#define IRQ_SPORT0_TX_POS	4
+#define IRQ_RSI_POS		4
+#define IRQ_SPORT1_RX_POS	8
+#define IRQ_SPI1_POS		8
+#define IRQ_SPORT1_TX_POS	12
+#define IRQ_TWI_POS      	16
+#define IRQ_SPI0_POS      	20
+#define IRQ_UART0_RX_POS 	24
+#define IRQ_UART0_TX_POS 	28
+
+/* IAR3 BIT FIELDS */
+#define IRQ_UART1_RX_POS  	0
+#define IRQ_UART1_TX_POS  	4
+#define IRQ_OPTSEC_POS    	8
+#define IRQ_CNT_POS       	12
+#define IRQ_MAC_RX_POS    	16
+#define IRQ_PORTH_INTA_POS	20
+#define IRQ_MAC_TX_POS    	24
+#define IRQ_PORTH_INTB_POS	28
+
+/* IAR4 BIT FIELDS */
+#define IRQ_TIMER0_POS		0
+#define IRQ_TIMER1_POS		4
+#define IRQ_TIMER2_POS		8
+#define IRQ_TIMER3_POS		12
+#define IRQ_TIMER4_POS		16
+#define IRQ_TIMER5_POS		20
+#define IRQ_TIMER6_POS		24
+#define IRQ_TIMER7_POS		28
+
+/* IAR5 BIT FIELDS */
+#define IRQ_PORTG_INTA_POS	0
+#define IRQ_PORTG_INTB_POS	4
+#define IRQ_MEM_DMA0_POS  	8
+#define IRQ_MEM_DMA1_POS  	12
+#define IRQ_WATCH_POS     	16
+#define IRQ_PORTF_INTA_POS	20
+#define IRQ_PORTF_INTB_POS	24
+#define IRQ_SPI0_ERROR_POS 	28
+
+/* IAR6 BIT FIELDS */
+#define IRQ_SPI1_ERROR_POS  	0
+#define IRQ_RSI_INT0_POS   	12
+#define IRQ_RSI_INT1_POS   	16
+#define IRQ_PWM_TRIP_POS   	20
+#define IRQ_PWM_SYNC_POS   	24
+#define IRQ_PTP_STAT_POS    	28
+
+#endif				/* _BF518_IRQ_H_ */
diff --git a/arch/blackfin/mach-bf518/include/mach/mem_map.h b/arch/blackfin/mach-bf518/include/mach/mem_map.h
new file mode 100644
index 0000000..62bcc78
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/mem_map.h
@@ -0,0 +1,108 @@
+/*
+ * file:         include/asm-blackfin/mach-bf518/mem_map.h
+ * based on:	include/asm-blackfin/mach-bf527/mem_map.h
+ * author:	Bryan Wu <cooloney@kernel.org>
+ *
+ * created:
+ * description:
+ *	Memory MAP Common header file for blackfin BF518/6/4/2 of processors.
+ * rev:
+ *
+ * modified:
+ *
+ * bugs:         enter bugs at http://blackfin.uclinux.org/
+ *
+ * this program is free software; you can redistribute it and/or modify
+ * it under the terms of the gnu general public license as published by
+ * the free software foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * this program is distributed in the hope that it will be useful,
+ * but without any warranty; without even the implied warranty of
+ * merchantability or fitness for a particular purpose.  see the
+ * gnu general public license for more details.
+ *
+ * you should have received a copy of the gnu general public license
+ * along with this program; see the file copying.
+ * if not, write to the free software foundation,
+ * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ */
+
+#ifndef _MEM_MAP_518_H_
+#define _MEM_MAP_518_H_
+
+#define COREMMR_BASE           0xFFE00000	/* Core MMRs */
+#define SYSMMR_BASE            0xFFC00000	/* System MMRs */
+
+/* Async Memory Banks */
+#define ASYNC_BANK3_BASE	0x20300000	/* Async Bank 3 */
+#define ASYNC_BANK3_SIZE	0x00100000	/* 1M */
+#define ASYNC_BANK2_BASE	0x20200000	/* Async Bank 2 */
+#define ASYNC_BANK2_SIZE	0x00100000	/* 1M */
+#define ASYNC_BANK1_BASE	0x20100000	/* Async Bank 1 */
+#define ASYNC_BANK1_SIZE	0x00100000	/* 1M */
+#define ASYNC_BANK0_BASE	0x20000000	/* Async Bank 0 */
+#define ASYNC_BANK0_SIZE	0x00100000	/* 1M */
+
+/* Boot ROM Memory */
+
+#define BOOT_ROM_START		0xEF000000
+#define BOOT_ROM_LENGTH		0x8000
+
+/* Level 1 Memory */
+
+/* Memory Map for ADSP-BF518/6/4/2 processors */
+
+#ifdef CONFIG_BFIN_ICACHE
+#define BFIN_ICACHESIZE		(16 * 1024)
+#else
+#define BFIN_ICACHESIZE		(0)
+#endif
+
+#define L1_CODE_START		0xFFA00000
+#define L1_DATA_A_START		0xFF800000
+#define L1_DATA_B_START		0xFF900000
+
+#define L1_CODE_LENGTH		0xC000
+
+#ifdef CONFIG_BFIN_DCACHE
+
+#ifdef CONFIG_BFIN_DCACHE_BANKA
+#define DMEM_CNTR (ACACHE_BSRAM | ENDCPLB | PORT_PREF0)
+#define L1_DATA_A_LENGTH	(0x8000 - 0x4000)
+#define L1_DATA_B_LENGTH	0x8000
+#define BFIN_DCACHESIZE		(16 * 1024)
+#define BFIN_DSUPBANKS		1
+#else
+#define DMEM_CNTR (ACACHE_BCACHE | ENDCPLB | PORT_PREF0)
+#define L1_DATA_A_LENGTH	(0x8000 - 0x4000)
+#define L1_DATA_B_LENGTH	(0x8000 - 0x4000)
+#define BFIN_DCACHESIZE		(32 * 1024)
+#define BFIN_DSUPBANKS		2
+#endif
+
+#else
+#define DMEM_CNTR (ASRAM_BSRAM | ENDCPLB | PORT_PREF0)
+#define L1_DATA_A_LENGTH	0x8000
+#define L1_DATA_B_LENGTH	0x8000
+#define BFIN_DCACHESIZE		0
+#define BFIN_DSUPBANKS		0
+#endif				/*CONFIG_BFIN_DCACHE */
+
+/* Level 2 Memory - none */
+
+#define L2_START	0
+#define L2_LENGTH	0
+
+/* Scratch Pad Memory */
+
+#define L1_SCRATCH_START	0xFFB00000
+#define L1_SCRATCH_LENGTH	0x1000
+
+#define GET_PDA_SAFE(preg)		\
+	preg.l = _cpu_pda;		\
+	preg.h = _cpu_pda;
+
+#define GET_PDA(preg, dreg)	GET_PDA_SAFE(preg)
+
+#endif				/* _MEM_MAP_518_H_ */
diff --git a/arch/blackfin/mach-bf518/include/mach/portmux.h b/arch/blackfin/mach-bf518/include/mach/portmux.h
new file mode 100644
index 0000000..ac16d54
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/portmux.h
@@ -0,0 +1,188 @@
+#ifndef _MACH_PORTMUX_H_
+#define _MACH_PORTMUX_H_
+
+#define MAX_RESOURCES 	MAX_BLACKFIN_GPIOS
+
+/* EMAC MII/RMII Port Mux */
+#define P_MII0_ETxD2	(P_DEFINED | P_IDENT(GPIO_PF0) | P_FUNCT(0))
+#define P_MII0_ERxD2	(P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(0))
+#define P_MII0_ETxD3	(P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(0))
+#define P_MII0_ERxD3	(P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(0))
+#define P_MII0_ERxCLK	(P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(0))
+#define P_MII0_ERxDV	(P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(0))
+#define P_MII0_COL	(P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(0))
+
+#define P_MII0_MDC	(P_DEFINED | P_IDENT(GPIO_PF8) | P_FUNCT(0))
+#define P_MII0_MDIO	(P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(0))
+#define P_MII0_ETxD0	(P_DEFINED | P_IDENT(GPIO_PF10) | P_FUNCT(0))
+#define P_MII0_ERxD0	(P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(0))
+#define P_MII0_ETxD1	(P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(0))
+#define P_MII0_ERxD1	(P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(0))
+#define P_MII0_ETxEN	(P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(0))
+#define P_MII0_PHYINT	(P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(0))
+#define P_MII0_CRS	(P_DEFINED | P_IDENT(GPIO_PG0) | P_FUNCT(0))
+#define P_MII0_ERxER	(P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(0))
+#define P_MII0_TxCLK	(P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(0))
+
+#define P_MII0 {\
+	P_MII0_ETxD0, \
+	P_MII0_ETxD1, \
+	P_MII0_ETxD2, \
+	P_MII0_ETxD3, \
+	P_MII0_ETxEN, \
+	P_MII0_TxCLK, \
+	P_MII0_PHYINT, \
+	P_MII0_COL, \
+	P_MII0_ERxD0, \
+	P_MII0_ERxD1, \
+	P_MII0_ERxD2, \
+	P_MII0_ERxD3, \
+	P_MII0_ERxDV, \
+	P_MII0_ERxCLK, \
+	P_MII0_ERxER, \
+	P_MII0_CRS, \
+	P_MII0_MDC, \
+	P_MII0_MDIO, 0}
+
+#define P_RMII0 {\
+	P_MII0_ETxD0, \
+	P_MII0_ETxD1, \
+	P_MII0_ETxEN, \
+	P_MII0_ERxD0, \
+	P_MII0_ERxD1, \
+	P_MII0_ERxER, \
+	P_MII0_TxCLK, \
+	P_MII0_PHYINT, \
+	P_MII0_CRS, \
+	P_MII0_MDC, \
+	P_MII0_MDIO, 0}
+
+/* PPI Port Mux */
+#define P_PPI0_D0	(P_DEFINED | P_IDENT(GPIO_PF0) | P_FUNCT(1))
+#define P_PPI0_D1	(P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(1))
+#define P_PPI0_D2	(P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(1))
+#define P_PPI0_D3	(P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(1))
+#define P_PPI0_D4	(P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(1))
+#define P_PPI0_D5	(P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(1))
+#define P_PPI0_D6	(P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(1))
+#define P_PPI0_D7	(P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(1))
+#define P_PPI0_D8	(P_DEFINED | P_IDENT(GPIO_PF8) | P_FUNCT(1))
+#define P_PPI0_D9	(P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(1))
+#define P_PPI0_D10	(P_DEFINED | P_IDENT(GPIO_PF10) | P_FUNCT(1))
+#define P_PPI0_D11	(P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(1))
+#define P_PPI0_D12	(P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(1))
+#define P_PPI0_D13	(P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(1))
+#define P_PPI0_D14	(P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(1))
+#define P_PPI0_D15	(P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(1))
+
+#define P_PPI0_CLK	(P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(1))
+#define P_PPI0_FS1	(P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(1))
+#define P_PPI0_FS2	(P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(1))
+#define P_PPI0_FS3	(P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(1))
+
+/* SPI Port Mux */
+#define P_SPI0_SS	(P_DEFINED | P_IDENT(GPIO_PG11) | P_FUNCT(0))
+#define P_SPI0_SCK	(P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(0))
+#define P_SPI0_MISO	(P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(0))
+#define P_SPI0_MOSI	(P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(0))
+
+#define P_SPI0_SSEL1	(P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(0))
+#define P_SPI0_SSEL2	(P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(0))
+#define P_SPI0_SSEL3	(P_DEFINED | P_IDENT(GPIO_PH4) | P_FUNCT(2))
+#define P_SPI0_SSEL4	(P_DEFINED | P_IDENT(GPIO_PG10) | P_FUNCT(2))
+#define P_SPI0_SSEL5	(P_DEFINED | P_IDENT(GPIO_PG3) | P_FUNCT(2))
+
+#define P_SPI1_SS	(P_DEFINED | P_IDENT(GPIO_PH0) | P_FUNCT(1))
+#define P_SPI1_SCK	(P_DEFINED | P_IDENT(GPIO_PH1) | P_FUNCT(1))
+#define P_SPI1_MISO	(P_DEFINED | P_IDENT(GPIO_PH2) | P_FUNCT(1))
+#define P_SPI1_MOSI	(P_DEFINED | P_IDENT(GPIO_PH3) | P_FUNCT(1))
+
+#define P_SPI1_SSEL1	(P_DEFINED | P_IDENT(GPIO_PH6) | P_FUNCT(2))
+#define P_SPI1_SSEL2	(P_DEFINED | P_IDENT(GPIO_PF0) | P_FUNCT(2))
+#define P_SPI1_SSEL3	(P_DEFINED | P_IDENT(GPIO_PG0) | P_FUNCT(2))
+#define P_SPI1_SSEL4	(P_DEFINED | P_IDENT(GPIO_PF8) | P_FUNCT(2))
+#define P_SPI1_SSEL5	(P_DEFINED | P_IDENT(GPIO_PG11) | P_FUNCT(2))
+
+/* SPORT Port Mux */
+#define P_SPORT0_DRPRI	(P_DEFINED | P_IDENT(GPIO_PG3) | P_FUNCT(0))
+#define P_SPORT0_RSCLK	(P_DEFINED | P_IDENT(GPIO_PG4) | P_FUNCT(0))
+#define P_SPORT0_RFS	(P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(0))
+#define P_SPORT0_TFS	(P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(0))
+#define P_SPORT0_DTPRI	(P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(0))
+#define P_SPORT0_TSCLK	(P_DEFINED | P_IDENT(GPIO_PG8) | P_FUNCT(0))
+#define P_SPORT0_DTSEC	(P_DEFINED | P_IDENT(GPIO_PG9) | P_FUNCT(0))
+#define P_SPORT0_DRSEC	(P_DEFINED | P_IDENT(GPIO_PG10) | P_FUNCT(0))
+
+#define P_SPORT1_DRPRI	(P_DEFINED | P_IDENT(GPIO_PH0) | P_FUNCT(0))
+#define P_SPORT1_RFS	(P_DEFINED | P_IDENT(GPIO_PH1) | P_FUNCT(0))
+#define P_SPORT1_RSCLK	(P_DEFINED | P_IDENT(GPIO_PH2) | P_FUNCT(0))
+#define P_SPORT1_DTPRI	(P_DEFINED | P_IDENT(GPIO_PH3) | P_FUNCT(0))
+#define P_SPORT1_TFS	(P_DEFINED | P_IDENT(GPIO_PH4) | P_FUNCT(0))
+#define P_SPORT1_TSCLK	(P_DEFINED | P_IDENT(GPIO_PH5) | P_FUNCT(0))
+#define P_SPORT1_DTSEC	(P_DEFINED | P_IDENT(GPIO_PH6) | P_FUNCT(0))
+#define P_SPORT1_DRSEC	(P_DEFINED | P_IDENT(GPIO_PH7) | P_FUNCT(0))
+
+/* UART Port Mux */
+#define P_UART0_TX	(P_DEFINED | P_IDENT(GPIO_PG9) | P_FUNCT(1))
+#define P_UART0_RX	(P_DEFINED | P_IDENT(GPIO_PG10) | P_FUNCT(1))
+
+#define P_UART1_TX	(P_DEFINED | P_IDENT(GPIO_PH6) | P_FUNCT(1))
+#define P_UART1_RX	(P_DEFINED | P_IDENT(GPIO_PH7) | P_FUNCT(1))
+
+/* Timer */
+#define P_TMRCLK	(P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(2))
+#define P_TMR0		(P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(2))
+#define P_TMR1		(P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(2))
+#define P_TMR2		(P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(2))
+#define P_TMR3		(P_DEFINED | P_IDENT(GPIO_PF10) | P_FUNCT(2))
+#define P_TMR4		(P_DEFINED | P_IDENT(GPIO_PG9) | P_FUNCT(2))
+#define P_TMR5		(P_DEFINED | P_IDENT(GPIO_PG4) | P_FUNCT(2))
+#define P_TMR6		(P_DEFINED | P_IDENT(GPIO_PG8) | P_FUNCT(2))
+#define P_TMR7		(P_DEFINED | P_IDENT(GPIO_PH7) | P_FUNCT(2))
+
+/* DMA */
+#define P_DMAR1		(P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(1))
+#define P_DMAR0		(P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(1))
+
+/* TWI */
+#define P_TWI0_SCL	(P_DONTCARE)
+#define P_TWI0_SDA	(P_DONTCARE)
+
+/* PWM */
+#define P_PWM0_AH		(P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(2))
+#define P_PWM0_AL		(P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(2))
+#define P_PWM0_BH		(P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(2))
+#define P_PWM0_BL		(P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(2))
+#define P_PWM0_CH		(P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(2))
+#define P_PWM0_CL		(P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(2))
+#define P_PWM0_SYNC		(P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(2))
+
+#define P_PWM1_AH		(P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(2))
+#define P_PWM1_AL		(P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(2))
+#define P_PWM1_BH		(P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(2))
+#define P_PWM1_BL		(P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(2))
+#define P_PWM1_CH		(P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(2))
+#define P_PWM1_CL		(P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(2))
+#define P_PWM1_SYNC		(P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(2))
+
+#define P_PWM_TRIPB		(P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(2))
+
+/* RSI */
+#define P_RSI_DATA0		(P_DEFINED | P_IDENT(GPIO_PG3) | P_FUNCT(1))
+#define P_RSI_DATA1		(P_DEFINED | P_IDENT(GPIO_PG4) | P_FUNCT(1))
+#define P_RSI_DATA2		(P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(1))
+#define P_RSI_DATA3		(P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(1))
+#define P_RSI_DATA4		(P_DEFINED | P_IDENT(GPIO_PH0) | P_FUNCT(2))
+#define P_RSI_DATA5		(P_DEFINED | P_IDENT(GPIO_PH1) | P_FUNCT(2))
+#define P_RSI_DATA6		(P_DEFINED | P_IDENT(GPIO_PH2) | P_FUNCT(2))
+#define P_RSI_DATA7		(P_DEFINED | P_IDENT(GPIO_PH3) | P_FUNCT(2))
+#define P_RSI_CMD		(P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(1))
+#define P_RSI_CLK		(P_DEFINED | P_IDENT(GPIO_PG8) | P_FUNCT(1))
+
+/* PTP */
+#define P_PTP_PPS		(P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(2))
+#define P_PTP_CLKOUT		(P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(2))
+
+#define P_HWAIT		(P_DEFINED | P_IDENT(GPIO_PG000000000) | P_FUNCT(1))
+
+#endif				/* _MACH_PORTMUX_H_ */
diff --git a/arch/blackfin/mach-bf518/ints-priority.c b/arch/blackfin/mach-bf518/ints-priority.c
new file mode 100644
index 0000000..3151fd5
--- /dev/null
+++ b/arch/blackfin/mach-bf518/ints-priority.c
@@ -0,0 +1,99 @@
+/*
+ * File:         arch/blackfin/mach-bf518/ints-priority.c
+ * Based on:     arch/blackfin/mach-bf527/ints-priority.c
+ * Author:       Bryan Wu <cooloney@kernel.org>
+ *
+ * Created:
+ * Description:  Set up the interrupt priorities
+ *
+ * Modified:
+ *               Copyright 2004-2007 Analog Devices Inc.
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <asm/blackfin.h>
+
+void __init program_IAR(void)
+{
+	/* Program the IAR0 Register with the configured priority */
+	bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) |
+			((CONFIG_IRQ_DMA0_ERROR - 7) << IRQ_DMA0_ERROR_POS) |
+			((CONFIG_IRQ_DMAR0_BLK - 7) << IRQ_DMAR0_BLK_POS) |
+			((CONFIG_IRQ_DMAR1_BLK - 7) << IRQ_DMAR1_BLK_POS) |
+			((CONFIG_IRQ_DMAR0_OVR - 7) << IRQ_DMAR0_OVR_POS) |
+			((CONFIG_IRQ_DMAR1_OVR - 7) << IRQ_DMAR1_OVR_POS) |
+			((CONFIG_IRQ_PPI_ERROR - 7) << IRQ_PPI_ERROR_POS) |
+			((CONFIG_IRQ_MAC_ERROR - 7) << IRQ_MAC_ERROR_POS));
+
+
+	bfin_write_SIC_IAR1(((CONFIG_IRQ_SPORT0_ERROR - 7) << IRQ_SPORT0_ERROR_POS) |
+			((CONFIG_IRQ_SPORT1_ERROR - 7) << IRQ_SPORT1_ERROR_POS) |
+			((CONFIG_IRQ_PTP_ERROR - 7) << IRQ_PTP_ERROR_POS) |
+			((CONFIG_IRQ_UART0_ERROR - 7) << IRQ_UART0_ERROR_POS) |
+			((CONFIG_IRQ_UART1_ERROR - 7) << IRQ_UART1_ERROR_POS) |
+			((CONFIG_IRQ_RTC - 7) << IRQ_RTC_POS) |
+			((CONFIG_IRQ_PPI - 7) << IRQ_PPI_POS));
+
+	bfin_write_SIC_IAR2(((CONFIG_IRQ_SPORT0_RX - 7) << IRQ_SPORT0_RX_POS) |
+			((CONFIG_IRQ_SPORT0_TX - 7) << IRQ_SPORT0_TX_POS) |
+			((CONFIG_IRQ_SPORT1_RX - 7) << IRQ_SPORT1_RX_POS) |
+			((CONFIG_IRQ_SPORT1_TX - 7) << IRQ_SPORT1_TX_POS) |
+			((CONFIG_IRQ_TWI - 7) << IRQ_TWI_POS) |
+			((CONFIG_IRQ_SPI0 - 7) << IRQ_SPI0_POS) |
+			((CONFIG_IRQ_UART0_RX - 7) << IRQ_UART0_RX_POS) |
+			((CONFIG_IRQ_UART0_TX - 7) << IRQ_UART0_TX_POS));
+
+	bfin_write_SIC_IAR3(((CONFIG_IRQ_UART1_RX - 7) << IRQ_UART1_RX_POS) |
+			((CONFIG_IRQ_UART1_TX - 7) << IRQ_UART1_TX_POS) |
+			((CONFIG_IRQ_OPTSEC - 7) << IRQ_OPTSEC_POS) |
+			((CONFIG_IRQ_CNT - 7) << IRQ_CNT_POS) |
+			((CONFIG_IRQ_MAC_RX - 7) << IRQ_MAC_RX_POS) |
+			((CONFIG_IRQ_PORTH_INTA - 7) << IRQ_PORTH_INTA_POS) |
+			((CONFIG_IRQ_MAC_TX - 7) << IRQ_MAC_TX_POS) |
+			((CONFIG_IRQ_PORTH_INTB - 7) << IRQ_PORTH_INTB_POS));
+
+	bfin_write_SIC_IAR4(((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) |
+			((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) |
+			((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) |
+			((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) |
+			((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS) |
+			((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) |
+			((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) |
+			((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS));
+
+	bfin_write_SIC_IAR5(((CONFIG_IRQ_PORTG_INTA - 7) << IRQ_PORTG_INTA_POS) |
+			((CONFIG_IRQ_PORTG_INTB - 7) << IRQ_PORTG_INTB_POS) |
+			((CONFIG_IRQ_MEM_DMA0 - 7) << IRQ_MEM_DMA0_POS) |
+			((CONFIG_IRQ_MEM_DMA1 - 7) << IRQ_MEM_DMA1_POS) |
+			((CONFIG_IRQ_WATCH - 7) << IRQ_WATCH_POS) |
+			((CONFIG_IRQ_PORTF_INTA - 7) << IRQ_PORTF_INTA_POS) |
+			((CONFIG_IRQ_PORTF_INTB - 7) << IRQ_PORTF_INTB_POS) |
+			((CONFIG_IRQ_SPI0_ERROR - 7) << IRQ_SPI0_ERROR_POS));
+
+	bfin_write_SIC_IAR6(((CONFIG_IRQ_SPI1_ERROR - 7) << IRQ_SPI1_ERROR_POS) |
+			((CONFIG_IRQ_RSI_INT0 - 7) << IRQ_RSI_INT0_POS) |
+			((CONFIG_IRQ_RSI_INT1 - 7) << IRQ_RSI_INT1_POS) |
+			((CONFIG_IRQ_PWM_TRIP - 7) << IRQ_PWM_TRIP_POS) |
+			((CONFIG_IRQ_PWM_SYNC - 7) << IRQ_PWM_SYNC_POS) |
+			((CONFIG_IRQ_PTP_STAT - 7) << IRQ_PTP_STAT_POS));
+
+	SSYNC();
+}
diff --git a/arch/blackfin/mach-bf527/Kconfig b/arch/blackfin/mach-bf527/Kconfig
index 3cde4be..8438ec6 100644
--- a/arch/blackfin/mach-bf527/Kconfig
+++ b/arch/blackfin/mach-bf527/Kconfig
@@ -168,29 +168,29 @@
 config IRQ_PORTH_INTB
 	int "IRQ_PORTH_INTB"
 	default 11
-config IRQ_TMR0
-	int "IRQ_TMR0"
+config IRQ_TIMER0
+	int "IRQ_TIMER0"
+	default 8
+config IRQ_TIMER1
+	int "IRQ_TIMER1"
 	default 12
-config IRQ_TMR1
-	int "IRQ_TMR1"
+config IRQ_TIMER2
+	int "IRQ_TIMER2"
 	default 12
-config IRQ_TMR2
-	int "IRQ_TMR2"
+config IRQ_TIMER3
+	int "IRQ_TIMER3"
 	default 12
-config IRQ_TMR3
-	int "IRQ_TMR3"
+config IRQ_TIMER4
+	int "IRQ_TIMER4"
 	default 12
-config IRQ_TMR4
-	int "IRQ_TMR4"
+config IRQ_TIMER5
+	int "IRQ_TIMER5"
 	default 12
-config IRQ_TMR5
-	int "IRQ_TMR5"
+config IRQ_TIMER6
+	int "IRQ_TIMER6"
 	default 12
-config IRQ_TMR6
-	int "IRQ_TMR6"
-	default 12
-config IRQ_TMR7
-	int "IRQ_TMR7"
+config IRQ_TIMER7
+	int "IRQ_TIMER7"
 	default 12
 config IRQ_PORTG_INTA
 	int "IRQ_PORTG_INTA"
diff --git a/arch/blackfin/mach-bf527/Makefile b/arch/blackfin/mach-bf527/Makefile
index 4eddb58..4a6cdaf 100644
--- a/arch/blackfin/mach-bf527/Makefile
+++ b/arch/blackfin/mach-bf527/Makefile
@@ -2,6 +2,4 @@
 # arch/blackfin/mach-bf527/Makefile
 #
 
-extra-y := head.o
-
 obj-y := ints-priority.o dma.o
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 9ea440b..a2c3578 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -61,51 +61,40 @@
  *  Driver needs to know address, irq and flag pin.
  */
 
-#define ISP1761_BASE       0x203C0000
-#define ISP1761_IRQ        IRQ_PF7
-
 #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
-static struct resource bfin_isp1761_resources[] = {
+#include <linux/usb/isp1760.h>
+static struct resource bfin_isp1760_resources[] = {
 	[0] = {
-		.name	= "isp1761-regs",
-		.start  = ISP1761_BASE + 0x00000000,
-		.end    = ISP1761_BASE + 0x000fffff,
+		.start  = 0x203C0000,
+		.end    = 0x203C0000 + 0x000fffff,
 		.flags  = IORESOURCE_MEM,
 	},
 	[1] = {
-		.start  = ISP1761_IRQ,
-		.end    = ISP1761_IRQ,
+		.start  = IRQ_PF7,
+		.end    = IRQ_PF7,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
 
-static struct platform_device bfin_isp1761_device = {
-	.name           = "isp1761",
+static struct isp1760_platform_data isp1760_priv = {
+	.is_isp1761 = 0,
+	.port1_disable = 0,
+	.bus_width_16 = 1,
+	.port1_otg = 0,
+	.analog_oc = 0,
+	.dack_polarity_high = 0,
+	.dreq_polarity_high = 0,
+};
+
+static struct platform_device bfin_isp1760_device = {
+	.name           = "isp1760-hcd",
 	.id             = 0,
-	.num_resources  = ARRAY_SIZE(bfin_isp1761_resources),
-	.resource       = bfin_isp1761_resources,
+	.dev = {
+		.platform_data = &isp1760_priv,
+	},
+	.num_resources  = ARRAY_SIZE(bfin_isp1760_resources),
+	.resource       = bfin_isp1760_resources,
 };
-
-static struct platform_device *bfin_isp1761_devices[] = {
-	&bfin_isp1761_device,
-};
-
-int __init bfin_isp1761_init(void)
-{
-	unsigned int num_devices = ARRAY_SIZE(bfin_isp1761_devices);
-
-	printk(KERN_INFO "%s(): registering device resources\n", __func__);
-	set_irq_type(ISP1761_IRQ, IRQF_TRIGGER_FALLING);
-
-	return platform_add_devices(bfin_isp1761_devices, num_devices);
-}
-
-void __exit bfin_isp1761_exit(void)
-{
-	platform_device_unregister(&bfin_isp1761_device);
-}
-
-arch_initcall(bfin_isp1761_init);
 #endif
 
 #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
@@ -132,8 +121,8 @@
 	.dyn_fifo	= 0,
 	.soft_con	= 1,
 	.dma		= 1,
-	.num_eps	= 7,
-	.dma_channels	= 7,
+	.num_eps	= 8,
+	.dma_channels	= 8,
 	.gpio_vrsel	= GPIO_PF11,
 };
 
@@ -728,30 +717,59 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
 	{
 		.start = 0xFFC02000,
 		.end = 0xFFC020FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir1_device = {
 	.name = "bfin_sir",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
 static struct resource bfin_twi0_resource[] = {
@@ -885,6 +903,10 @@
 	&isp1362_hcd_device,
 #endif
 
+#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
+	&bfin_isp1760_device,
+#endif
+
 #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
 	&musb_device,
 #endif
@@ -918,7 +940,12 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 36c87b6..0314bd3 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -51,7 +51,7 @@
 /*
  * Name the Board for the /proc/cpuinfo
  */
-const char bfin_board_name[] = "BF526-EZBRD";
+const char bfin_board_name[] = "ADI BF526-EZBRD";
 
 /*
  *  Driver needs to know address, irq and flag pin.
@@ -81,8 +81,8 @@
 	.dyn_fifo	= 0,
 	.soft_con	= 1,
 	.dma		= 1,
-	.num_eps	= 7,
-	.dma_channels	= 7,
+	.num_eps	= 8,
+	.dma_channels	= 8,
 	.gpio_vrsel	= GPIO_PG13,
 };
 
@@ -288,6 +288,30 @@
 };
 #endif
 
+#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
+#include <linux/spi/ad7879.h>
+static const struct ad7879_platform_data bfin_ad7879_ts_info = {
+	.model			= 7879,	/* Model = AD7879 */
+	.x_plate_ohms		= 620,	/* 620 Ohm from the touch datasheet */
+	.pressure_max		= 10000,
+	.pressure_min		= 0,
+	.first_conversion_delay = 3,	/* wait 512us before do a first conversion */
+	.acquisition_time 	= 1,	/* 4us acquisition time per sample */
+	.median			= 2,	/* do 8 measurements */
+	.averaging 		= 1,	/* take the average of 4 middle samples */
+	.pen_down_acc_interval 	= 255,	/* 9.4 ms */
+	.gpio_output		= 1,	/* configure AUX/VBAT/GPIO as GPIO output */
+	.gpio_default 		= 1,	/* During initialization set GPIO = HIGH */
+};
+#endif
+
+#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
+static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
+	.enable_dma = 0,
+	.bits_per_word = 16,
+};
+#endif
+
 #if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
 	 && defined(CONFIG_SND_SOC_WM8731_SPI)
 static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
@@ -386,6 +410,18 @@
 		.controller_data = &spi_ad7877_chip_info,
 	},
 #endif
+#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
+	{
+		.modalias = "ad7879",
+		.platform_data = &bfin_ad7879_ts_info,
+		.irq = IRQ_PG0,
+		.max_speed_hz = 5000000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = 5,
+		.controller_data = &spi_ad7879_chip_info,
+		.mode = SPI_CPHA | SPI_CPOL,
+	},
+#endif
 #if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
 	 && defined(CONFIG_SND_SOC_WM8731_SPI)
 	{
@@ -478,30 +514,59 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
 	{
 		.start = 0xFFC02000,
 		.end = 0xFFC020FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir1_device = {
 	.name = "bfin_sir",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
 static struct resource bfin_twi0_resource[] = {
@@ -671,7 +736,12 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 8ee2b74..9454fb7 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -54,57 +54,46 @@
 /*
  * Name the Board for the /proc/cpuinfo
  */
-const char bfin_board_name[] = "ADDS-BF527-EZKIT";
+const char bfin_board_name[] = "ADI BF527-EZKIT";
 
 /*
  *  Driver needs to know address, irq and flag pin.
  */
 
-#define ISP1761_BASE       0x203C0000
-#define ISP1761_IRQ        IRQ_PF7
-
 #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
-static struct resource bfin_isp1761_resources[] = {
+#include <linux/usb/isp1760.h>
+static struct resource bfin_isp1760_resources[] = {
 	[0] = {
-		.name	= "isp1761-regs",
-		.start  = ISP1761_BASE + 0x00000000,
-		.end    = ISP1761_BASE + 0x000fffff,
+		.start  = 0x203C0000,
+		.end    = 0x203C0000 + 0x000fffff,
 		.flags  = IORESOURCE_MEM,
 	},
 	[1] = {
-		.start  = ISP1761_IRQ,
-		.end    = ISP1761_IRQ,
+		.start  = IRQ_PF7,
+		.end    = IRQ_PF7,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
 
-static struct platform_device bfin_isp1761_device = {
-	.name           = "isp1761",
+static struct isp1760_platform_data isp1760_priv = {
+	.is_isp1761 = 0,
+	.port1_disable = 0,
+	.bus_width_16 = 1,
+	.port1_otg = 0,
+	.analog_oc = 0,
+	.dack_polarity_high = 0,
+	.dreq_polarity_high = 0,
+};
+
+static struct platform_device bfin_isp1760_device = {
+	.name           = "isp1760-hcd",
 	.id             = 0,
-	.num_resources  = ARRAY_SIZE(bfin_isp1761_resources),
-	.resource       = bfin_isp1761_resources,
+	.dev = {
+		.platform_data = &isp1760_priv,
+	},
+	.num_resources  = ARRAY_SIZE(bfin_isp1760_resources),
+	.resource       = bfin_isp1760_resources,
 };
-
-static struct platform_device *bfin_isp1761_devices[] = {
-	&bfin_isp1761_device,
-};
-
-int __init bfin_isp1761_init(void)
-{
-	unsigned int num_devices = ARRAY_SIZE(bfin_isp1761_devices);
-
-	printk(KERN_INFO "%s(): registering device resources\n", __func__);
-	set_irq_type(ISP1761_IRQ, IRQF_TRIGGER_FALLING);
-
-	return platform_add_devices(bfin_isp1761_devices, num_devices);
-}
-
-void __exit bfin_isp1761_exit(void)
-{
-	platform_device_unregister(&bfin_isp1761_device);
-}
-
-arch_initcall(bfin_isp1761_init);
 #endif
 
 #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
@@ -131,8 +120,8 @@
 	.dyn_fifo	= 0,
 	.soft_con	= 1,
 	.dma		= 1,
-	.num_eps	= 7,
-	.dma_channels	= 7,
+	.num_eps	= 8,
+	.dma_channels	= 8,
 	.gpio_vrsel	= GPIO_PG13,
 };
 
@@ -515,13 +504,6 @@
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-	.enable_dma = 1,
-	.bits_per_word = 8,
-};
-#endif
-
 #if defined(CONFIG_PBX)
 static struct bfin5xx_spi_chip spi_si3xxx_chip_info = {
 	.ctl_reg	= 0x4, /* send zero */
@@ -552,6 +534,30 @@
 };
 #endif
 
+#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
+#include <linux/spi/ad7879.h>
+static const struct ad7879_platform_data bfin_ad7879_ts_info = {
+	.model			= 7879,	/* Model = AD7879 */
+	.x_plate_ohms		= 620,	/* 620 Ohm from the touch datasheet */
+	.pressure_max		= 10000,
+	.pressure_min		= 0,
+	.first_conversion_delay = 3,	/* wait 512us before do a first conversion */
+	.acquisition_time 	= 1,	/* 4us acquisition time per sample */
+	.median			= 2,	/* do 8 measurements */
+	.averaging 		= 1,	/* take the average of 4 middle samples */
+	.pen_down_acc_interval 	= 255,	/* 9.4 ms */
+	.gpio_output		= 1,	/* configure AUX/VBAT/GPIO as GPIO output */
+	.gpio_default 		= 1,	/* During initialization set GPIO = HIGH */
+};
+#endif
+
+#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
+static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
+	.enable_dma = 0,
+	.bits_per_word = 16,
+};
+#endif
+
 #if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
 	 && defined(CONFIG_SND_SOC_WM8731_SPI)
 static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
@@ -613,26 +619,6 @@
 		.controller_data = &ad9960_spi_chip_info,
 	},
 #endif
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-	{
-		.modalias = "spi_mmc_dummy",
-		.max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-		.bus_num = 0,
-		.chip_select = 0,
-		.platform_data = NULL,
-		.controller_data = &spi_mmc_chip_info,
-		.mode = SPI_MODE_3,
-	},
-	{
-		.modalias = "spi_mmc",
-		.max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
-		.bus_num = 0,
-		.chip_select = CONFIG_SPI_MMC_CS_CHAN,
-		.platform_data = NULL,
-		.controller_data = &spi_mmc_chip_info,
-		.mode = SPI_MODE_3,
-	},
-#endif
 #if defined(CONFIG_PBX)
 	{
 		.modalias = "fxs-spi",
@@ -662,6 +648,18 @@
 		.controller_data = &spi_ad7877_chip_info,
 	},
 #endif
+#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
+	{
+		.modalias = "ad7879",
+		.platform_data = &bfin_ad7879_ts_info,
+		.irq = IRQ_PF8,
+		.max_speed_hz = 5000000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = 3,
+		.controller_data = &spi_ad7879_chip_info,
+		.mode = SPI_CPHA | SPI_CPOL,
+	},
+#endif
 #if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
 	 && defined(CONFIG_SND_SOC_WM8731_SPI)
 	{
@@ -756,30 +754,59 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
 	{
 		.start = 0xFFC02000,
 		.end = 0xFFC020FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir1_device = {
 	.name = "bfin_sir",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
 static struct resource bfin_twi0_resource[] = {
@@ -944,6 +971,10 @@
 	&isp1362_hcd_device,
 #endif
 
+#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
+	&bfin_isp1760_device,
+#endif
+
 #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
 	&musb_device,
 #endif
@@ -985,7 +1016,12 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
diff --git a/arch/blackfin/mach-bf527/dma.c b/arch/blackfin/mach-bf527/dma.c
index dfd080c..2318775 100644
--- a/arch/blackfin/mach-bf527/dma.c
+++ b/arch/blackfin/mach-bf527/dma.c
@@ -31,7 +31,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_BLACKFIN_DMA_CHANNEL] = {
+struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf527/head.S b/arch/blackfin/mach-bf527/head.S
deleted file mode 100644
index 0eb1da8..0000000
--- a/arch/blackfin/mach-bf527/head.S
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * File:         arch/blackfin/mach-bf527/head.S
- * Based on:     arch/blackfin/mach-bf533/head.S
- * Author:       Jeff Dionne <jeff@uclinux.org> COPYRIGHT 1998 D. Jeff Dionne
- *
- * Created:      1998
- * Description:  Startup code for Blackfin BF537
- *
- * Modified:
- *               Copyright 2004-2007 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/blackfin.h>
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
-#include <asm/clocks.h>
-#include <mach/mem_init.h>
-#endif
-
-.section .l1.text
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
-ENTRY(_start_dma_code)
-
-	/* Enable PHY CLK buffer output */
-	p0.h = hi(VR_CTL);
-	p0.l = lo(VR_CTL);
-	r0.l = w[p0];
-	bitset(r0, 14);
-	w[p0] = r0.l;
-	ssync;
-
-	p0.h = hi(SIC_IWR0);
-	p0.l = lo(SIC_IWR0);
-	r0.l = 0x1;
-	r0.h = 0x0;
-	[p0] = r0;
-	SSYNC;
-
-	/*
-	 *  Set PLL_CTL
-	 *   - [14:09] = MSEL[5:0] : CLKIN / VCO multiplication factors
-	 *   - [8]     = BYPASS    : BYPASS the PLL, run CLKIN into CCLK/SCLK
-	 *   - [7]     = output delay (add 200ps of delay to mem signals)
-	 *   - [6]     = input delay (add 200ps of input delay to mem signals)
-	 *   - [5]     = PDWN      : 1=All Clocks off
-	 *   - [3]     = STOPCK    : 1=Core Clock off
-	 *   - [1]     = PLL_OFF   : 1=Disable Power to PLL
-	 *   - [0]     = DF        : 1=Pass CLKIN/2 to PLL / 0=Pass CLKIN to PLL
-	 *   all other bits set to zero
-	 */
-
-	p0.h = hi(PLL_LOCKCNT);
-	p0.l = lo(PLL_LOCKCNT);
-	r0 = 0x300(Z);
-	w[p0] = r0.l;
-	ssync;
-
-	P2.H = hi(EBIU_SDGCTL);
-	P2.L = lo(EBIU_SDGCTL);
-	R0 = [P2];
-	BITSET (R0, 24);
-	[P2] = R0;
-	SSYNC;
-
-	r0 = CONFIG_VCO_MULT & 63;       /* Load the VCO multiplier         */
-	r0 = r0 << 9;                    /* Shift it over,                  */
-	r1 = CLKIN_HALF;                 /* Do we need to divide CLKIN by 2?*/
-	r0 = r1 | r0;
-	r1 = PLL_BYPASS;                 /* Bypass the PLL?                 */
-	r1 = r1 << 8;                    /* Shift it over                   */
-	r0 = r1 | r0;                    /* add them all together           */
-#ifdef ANOMALY_05000265
-	BITSET(r0, 15);                  /* Add 250 mV of hysteresis to SPORT input pins */
-#endif
-
-	p0.h = hi(PLL_CTL);
-	p0.l = lo(PLL_CTL);              /* Load the address                */
-	cli r2;                          /* Disable interrupts              */
-	ssync;
-	w[p0] = r0.l;                    /* Set the value                   */
-	idle;                            /* Wait for the PLL to stablize    */
-	sti r2;                          /* Enable interrupts               */
-
-.Lcheck_again:
-	p0.h = hi(PLL_STAT);
-	p0.l = lo(PLL_STAT);
-	R0 = W[P0](Z);
-	CC = BITTST(R0,5);
-	if ! CC jump .Lcheck_again;
-
-	/* Configure SCLK & CCLK Dividers */
-	r0 = (CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
-	p0.h = hi(PLL_DIV);
-	p0.l = lo(PLL_DIV);
-	w[p0] = r0.l;
-	ssync;
-
-	p0.l = lo(EBIU_SDRRC);
-	p0.h = hi(EBIU_SDRRC);
-	r0 = mem_SDRRC;
-	w[p0] = r0.l;
-	ssync;
-
-	P2.H = hi(EBIU_SDGCTL);
-	P2.L = lo(EBIU_SDGCTL);
-	R0 = [P2];
-	BITCLR (R0, 24);
-	p0.h = hi(EBIU_SDSTAT);
-	p0.l = lo(EBIU_SDSTAT);
-	r2.l = w[p0];
-	cc = bittst(r2,3);
-	if !cc jump .Lskip;
-	NOP;
-	BITSET (R0, 23);
-.Lskip:
-	[P2] = R0;
-	SSYNC;
-
-	R0.L = lo(mem_SDGCTL);
-	R0.H = hi(mem_SDGCTL);
-	R1 = [p2];
-	R1 = R1 | R0;
-	[P2] = R1;
-	SSYNC;
-
-	RTS;
-ENDPROC(_start_dma_code)
-#endif /* CONFIG_BFIN_KERNEL_CLOCK */
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index 62373e6..035e8d8 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -28,7 +28,7 @@
 /* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
 #define ANOMALY_05000074 (1)
 /* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
-#define ANOMALY_05000119 (1)
+#define ANOMALY_05000119 (1)	/* note: brokenness is noted in documentation, not anomaly sheet */
 /* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
 #define ANOMALY_05000122 (1)
 /* Spurious Hardware Error from an Access in the Shadow of a Conditional Branch */
@@ -37,8 +37,6 @@
 #define ANOMALY_05000265 (1)
 /* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
 #define ANOMALY_05000310 (1)
-/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
-#define ANOMALY_05000312 (ANOMALY_BF527)
 /* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
 #define ANOMALY_05000313 (__SILICON_REVISION__ < 2)
 /* Incorrect Access of OTP_STATUS During otp_write() Function */
@@ -153,6 +151,10 @@
 #define ANOMALY_05000430 (ANOMALY_BF527 && __SILICON_REVISION__ > 1)
 /* bfrom_SysControl() Does Not Clear SIC_IWR1 Before Executing PLL Programming Sequence */
 #define ANOMALY_05000432 (ANOMALY_BF526)
+/* Certain SIC Registers are not Reset After Soft or Core Double Fault Reset */
+#define ANOMALY_05000435 ((ANOMALY_BF526 && __SILICON_REVISION__ < 1) || ANOMALY_BF527)
+/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
+#define ANOMALY_05000443 (1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000125 (0)
@@ -168,7 +170,9 @@
 #define ANOMALY_05000285 (0)
 #define ANOMALY_05000307 (0)
 #define ANOMALY_05000311 (0)
+#define ANOMALY_05000312 (0)
 #define ANOMALY_05000323 (0)
 #define ANOMALY_05000363 (0)
+#define ANOMALY_05000412 (0)
 
 #endif
diff --git a/arch/blackfin/mach-bf527/include/mach/bf527.h b/arch/blackfin/mach-bf527/include/mach/bf527.h
index 144f08d..3832aab 100644
--- a/arch/blackfin/mach-bf527/include/mach/bf527.h
+++ b/arch/blackfin/mach-bf527/include/mach/bf527.h
@@ -110,7 +110,7 @@
 
 #ifdef CONFIG_BF527
 #define CPU "BF527"
-#define CPUID 0x27e4
+#define CPUID 0x27e0
 #endif
 #ifdef CONFIG_BF526
 #define CPU "BF526"
@@ -118,7 +118,7 @@
 #endif
 #ifdef CONFIG_BF525
 #define CPU "BF525"
-#define CPUID 0x27e4
+#define CPUID 0x27e0
 #endif
 #ifdef CONFIG_BF524
 #define CPU "BF524"
@@ -126,7 +126,7 @@
 #endif
 #ifdef CONFIG_BF523
 #define CPU "BF523"
-#define CPUID 0x27e4
+#define CPUID 0x27e0
 #endif
 #ifdef CONFIG_BF522
 #define CPU "BF522"
@@ -134,7 +134,7 @@
 #endif
 
 #ifndef CPU
-#error Unknown CPU type - This kernel doesn't seem to be configured properly
+#error "Unknown CPU type - This kernel doesn't seem to be configured properly"
 #endif
 
 #endif				/* __MACH_BF527_H__  */
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_sir.h b/arch/blackfin/mach-bf527/include/mach/bfin_sir.h
deleted file mode 100644
index cfd8ad4..0000000
--- a/arch/blackfin/mach-bf527/include/mach/bfin_sir.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Blackfin Infra-red Driver
- *
- * Copyright 2006-2008 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- *
- */
-
-#include <linux/serial.h>
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#define SIR_UART_GET_CHAR(port)   bfin_read16((port)->membase + OFFSET_RBR)
-#define SIR_UART_GET_DLL(port)    bfin_read16((port)->membase + OFFSET_DLL)
-#define SIR_UART_GET_IER(port)    bfin_read16((port)->membase + OFFSET_IER)
-#define SIR_UART_GET_DLH(port)    bfin_read16((port)->membase + OFFSET_DLH)
-#define SIR_UART_GET_IIR(port)    bfin_read16((port)->membase + OFFSET_IIR)
-#define SIR_UART_GET_LCR(port)    bfin_read16((port)->membase + OFFSET_LCR)
-#define SIR_UART_GET_GCTL(port)   bfin_read16((port)->membase + OFFSET_GCTL)
-
-#define SIR_UART_PUT_CHAR(port, v) bfin_write16(((port)->membase + OFFSET_THR), v)
-#define SIR_UART_PUT_DLL(port, v)  bfin_write16(((port)->membase + OFFSET_DLL), v)
-#define SIR_UART_PUT_IER(port, v)  bfin_write16(((port)->membase + OFFSET_IER), v)
-#define SIR_UART_PUT_DLH(port, v)  bfin_write16(((port)->membase + OFFSET_DLH), v)
-#define SIR_UART_PUT_LCR(port, v)  bfin_write16(((port)->membase + OFFSET_LCR), v)
-#define SIR_UART_PUT_GCTL(port, v) bfin_write16(((port)->membase + OFFSET_GCTL), v)
-
-#ifdef CONFIG_SIR_BFIN_DMA
-struct dma_rx_buf {
-	char *buf;
-	int head;
-	int tail;
-	};
-#endif /* CONFIG_SIR_BFIN_DMA */
-
-struct bfin_sir_port {
-	unsigned char __iomem   *membase;
-	unsigned int            irq;
-	unsigned int            lsr;
-	unsigned long           clk;
-	struct net_device       *dev;
-#ifdef CONFIG_SIR_BFIN_DMA
-	int                     tx_done;
-	struct dma_rx_buf       rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int                     rx_dma_nrows;
-#endif /* CONFIG_SIR_BFIN_DMA */
-	unsigned int            tx_dma_channel;
-	unsigned int            rx_dma_channel;
-};
-
-struct bfin_sir_port sir_ports[BFIN_UART_NR_PORTS];
-
-struct bfin_sir_port_res {
-	unsigned long   base_addr;
-	int             irq;
-	unsigned int    rx_dma_channel;
-	unsigned int    tx_dma_channel;
-};
-
-struct bfin_sir_port_res bfin_sir_port_resource[] = {
-#ifdef CONFIG_BFIN_SIR0
-	{
-	0xFFC00400,
-	IRQ_UART0_RX,
-	CH_UART0_RX,
-	CH_UART0_TX,
-	},
-#endif
-#ifdef CONFIG_BFIN_SIR1
-	{
-	0xFFC02000,
-	IRQ_UART1_RX,
-	CH_UART1_RX,
-	CH_UART1_TX,
-	},
-#endif
-};
-
-int nr_sirs = ARRAY_SIZE(bfin_sir_port_resource);
-
-struct bfin_sir_self {
-	struct bfin_sir_port    *sir_port;
-	spinlock_t              lock;
-	unsigned int            open;
-	int                     speed;
-	int                     newspeed;
-
-	struct sk_buff          *txskb;
-	struct sk_buff          *rxskb;
-	struct net_device_stats stats;
-	struct device           *dev;
-	struct irlap_cb         *irlap;
-	struct qos_info         qos;
-
-	iobuff_t                tx_buff;
-	iobuff_t                rx_buff;
-
-	struct work_struct      work;
-	int                     mtt;
-};
-
-static inline unsigned int SIR_UART_GET_LSR(struct bfin_sir_port *port)
-{
-	unsigned int lsr = bfin_read16(port->membase + OFFSET_LSR);
-	port->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | port->lsr;
-}
-
-static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port)
-{
-	port->lsr = 0;
-	bfin_read16(port->membase + OFFSET_LSR);
-}
-
-#define DRIVER_NAME "bfin_sir"
-
-static int bfin_sir_hw_init(void)
-{
-	int ret = -ENODEV;
-#ifdef CONFIG_BFIN_SIR0
-	ret = peripheral_request(P_UART0_TX, DRIVER_NAME);
-	if (ret)
-		return ret;
-	ret = peripheral_request(P_UART0_RX, DRIVER_NAME);
-	if (ret)
-		return ret;
-#endif
-
-#ifdef CONFIG_BFIN_SIR1
-	ret = peripheral_request(P_UART1_TX, DRIVER_NAME);
-	if (ret)
-		return ret;
-	ret = peripheral_request(P_UART1_RX, DRIVER_NAME);
-	if (ret)
-		return ret;
-#endif
-	return ret;
-}
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
index 9a814b9..1fe76d8 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
@@ -31,7 +31,6 @@
 #ifndef _CDEF_BF52X_H
 #define _CDEF_BF52X_H
 
-#include <asm/system.h>
 #include <asm/blackfin.h>
 
 #include "defBF52x_base.h"
@@ -43,57 +42,9 @@
 
 /* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
 #define bfin_read_PLL_CTL()			bfin_read16(PLL_CTL)
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	local_irq_save(flags);
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	local_irq_restore(flags);
-}
 #define bfin_read_PLL_DIV()			bfin_read16(PLL_DIV)
 #define bfin_write_PLL_DIV(val)			bfin_write16(PLL_DIV, val)
 #define bfin_read_VR_CTL()			bfin_read16(VR_CTL)
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	local_irq_save(flags);
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	local_irq_restore(flags);
-}
 #define bfin_read_PLL_STAT()			bfin_read16(PLL_STAT)
 #define bfin_write_PLL_STAT(val)		bfin_write16(PLL_STAT, val)
 #define bfin_read_PLL_LOCKCNT()			bfin_read16(PLL_LOCKCNT)
@@ -1201,4 +1152,57 @@
 #define bfin_read_NFC_DATA_RD()			bfin_read16(NFC_DATA_RD)
 #define bfin_write_NFC_DATA_RD(val)		bfin_write16(NFC_DATA_RD, val)
 
+/* These need to be last due to the cdef/linux inter-dependencies */
+#include <asm/irq.h>
+
+/* Writing to PLL_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_PLL_CTL(unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1;
+
+	if (val == bfin_read_PLL_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr0 = bfin_read32(SIC_IWR0);
+	iwr1 = bfin_read32(SIC_IWR1);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
+	bfin_write32(SIC_IWR1, 0);
+
+	bfin_write16(PLL_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR0, iwr0);
+	bfin_write32(SIC_IWR1, iwr1);
+	local_irq_restore_hw(flags);
+}
+
+/* Writing to VR_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_VR_CTL(unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1;
+
+	if (val == bfin_read_VR_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr0 = bfin_read32(SIC_IWR0);
+	iwr1 = bfin_read32(SIC_IWR1);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
+	bfin_write32(SIC_IWR1, 0);
+
+	bfin_write16(VR_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR0, iwr0);
+	bfin_write32(SIC_IWR1, iwr1);
+	local_irq_restore_hw(flags);
+}
+
 #endif /* _CDEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/dma.h b/arch/blackfin/mach-bf527/include/mach/dma.h
index 49dd693..eb287da 100644
--- a/arch/blackfin/mach-bf527/include/mach/dma.h
+++ b/arch/blackfin/mach-bf527/include/mach/dma.h
@@ -1,38 +1,14 @@
-/*
- * file:        include/asm-blackfin/mach-bf527/dma.h
- * based on:	include/asm-blackfin/mach-bf537/dma.h
- * author:	Michael Hennerich (michael.hennerich@analog.com)
+/* mach/dma.h - arch-specific DMA defines
  *
- * created:
- * description:
- *	system DMA map
- * rev:
+ * Copyright 2004-2008 Analog Devices Inc.
  *
- * modified:
- *
- *
- * bugs:         enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose.  see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_DMA_H_
 #define _MACH_DMA_H_
 
-#define MAX_BLACKFIN_DMA_CHANNEL 16
+#define MAX_DMA_CHANNELS 16
 
 #define CH_PPI 			0	/* PPI receive/transmit or NFC */
 #define CH_EMAC_RX 		1	/* Ethernet MAC receive or HOSTDP */
diff --git a/arch/blackfin/mach-bf527/include/mach/gpio.h b/arch/blackfin/mach-bf527/include/mach/gpio.h
new file mode 100644
index 0000000..06b6eeb
--- /dev/null
+++ b/arch/blackfin/mach-bf527/include/mach/gpio.h
@@ -0,0 +1,68 @@
+/*
+ * File: arch/blackfin/mach-bf527/include/mach/gpio.h
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright (C) 2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef _MACH_GPIO_H_
+#define _MACH_GPIO_H_
+
+#define MAX_BLACKFIN_GPIOS 48
+
+#define	GPIO_PF0	0
+#define	GPIO_PF1	1
+#define	GPIO_PF2	2
+#define	GPIO_PF3	3
+#define	GPIO_PF4	4
+#define	GPIO_PF5	5
+#define	GPIO_PF6	6
+#define	GPIO_PF7	7
+#define	GPIO_PF8	8
+#define	GPIO_PF9	9
+#define	GPIO_PF10	10
+#define	GPIO_PF11	11
+#define	GPIO_PF12	12
+#define	GPIO_PF13	13
+#define	GPIO_PF14	14
+#define	GPIO_PF15	15
+#define	GPIO_PG0	16
+#define	GPIO_PG1	17
+#define	GPIO_PG2	18
+#define	GPIO_PG3	19
+#define	GPIO_PG4	20
+#define	GPIO_PG5	21
+#define	GPIO_PG6	22
+#define	GPIO_PG7	23
+#define	GPIO_PG8	24
+#define	GPIO_PG9	25
+#define	GPIO_PG10      	26
+#define	GPIO_PG11      	27
+#define	GPIO_PG12      	28
+#define	GPIO_PG13      	29
+#define	GPIO_PG14      	30
+#define	GPIO_PG15      	31
+#define	GPIO_PH0	32
+#define	GPIO_PH1	33
+#define	GPIO_PH2	34
+#define	GPIO_PH3	35
+#define	GPIO_PH4	36
+#define	GPIO_PH5	37
+#define	GPIO_PH6	38
+#define	GPIO_PH7	39
+#define	GPIO_PH8	40
+#define	GPIO_PH9	41
+#define	GPIO_PH10      	42
+#define	GPIO_PH11      	43
+#define	GPIO_PH12      	44
+#define	GPIO_PH13      	45
+#define	GPIO_PH14      	46
+#define	GPIO_PH15      	47
+
+#define PORT_F GPIO_PF0
+#define PORT_G GPIO_PG0
+#define PORT_H GPIO_PH0
+
+#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf527/include/mach/irq.h b/arch/blackfin/mach-bf527/include/mach/irq.h
index 4e2b3f2..8ea660d 100644
--- a/arch/blackfin/mach-bf527/include/mach/irq.h
+++ b/arch/blackfin/mach-bf527/include/mach/irq.h
@@ -96,14 +96,14 @@
 #define IRQ_MAC_TX		BFIN_IRQ(30)	/* DMA2 Channel (MAC TX/NAND) */
 #define IRQ_NFC			BFIN_IRQ(30)	/* DMA2 Channel (MAC TX/NAND) */
 #define IRQ_PORTH_INTB		BFIN_IRQ(31)	/* Port H Interrupt B */
-#define IRQ_TMR0		BFIN_IRQ(32)	/* Timer 0 */
-#define IRQ_TMR1		BFIN_IRQ(33)	/* Timer 1 */
-#define IRQ_TMR2		BFIN_IRQ(34)	/* Timer 2 */
-#define IRQ_TMR3		BFIN_IRQ(35)	/* Timer 3 */
-#define IRQ_TMR4		BFIN_IRQ(36)	/* Timer 4 */
-#define IRQ_TMR5		BFIN_IRQ(37)	/* Timer 5 */
-#define IRQ_TMR6		BFIN_IRQ(38)	/* Timer 6 */
-#define IRQ_TMR7		BFIN_IRQ(39)	/* Timer 7 */
+#define IRQ_TIMER0		BFIN_IRQ(32)	/* Timer 0 */
+#define IRQ_TIMER1		BFIN_IRQ(33)	/* Timer 1 */
+#define IRQ_TIMER2		BFIN_IRQ(34)	/* Timer 2 */
+#define IRQ_TIMER3		BFIN_IRQ(35)	/* Timer 3 */
+#define IRQ_TIMER4		BFIN_IRQ(36)	/* Timer 4 */
+#define IRQ_TIMER5		BFIN_IRQ(37)	/* Timer 5 */
+#define IRQ_TIMER6		BFIN_IRQ(38)	/* Timer 6 */
+#define IRQ_TIMER7		BFIN_IRQ(39)	/* Timer 7 */
 #define IRQ_PORTG_INTA		BFIN_IRQ(40)	/* Port G Interrupt A */
 #define IRQ_PORTG_INTB		BFIN_IRQ(41)	/* Port G Interrupt B */
 #define IRQ_MEM_DMA0		BFIN_IRQ(42)	/* MDMA Stream 0 */
@@ -227,14 +227,14 @@
 #define IRQ_PORTH_INTB_POS	28
 
 /* IAR4 BIT FIELDS */
-#define IRQ_TMR0_POS		0
-#define IRQ_TMR1_POS		4
-#define IRQ_TMR2_POS		8
-#define IRQ_TMR3_POS		12
-#define IRQ_TMR4_POS		16
-#define IRQ_TMR5_POS		20
-#define IRQ_TMR6_POS		24
-#define IRQ_TMR7_POS		28
+#define IRQ_TIMER0_POS		0
+#define IRQ_TIMER1_POS		4
+#define IRQ_TIMER2_POS		8
+#define IRQ_TIMER3_POS		12
+#define IRQ_TIMER4_POS		16
+#define IRQ_TIMER5_POS		20
+#define IRQ_TIMER6_POS		24
+#define IRQ_TIMER7_POS		28
 
 /* IAR5 BIT FIELDS */
 #define IRQ_PORTG_INTA_POS	0
diff --git a/arch/blackfin/mach-bf527/include/mach/mem_init.h b/arch/blackfin/mach-bf527/include/mach/mem_init.h
deleted file mode 100644
index cbe03f4..0000000
--- a/arch/blackfin/mach-bf527/include/mach/mem_init.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * File:         include/asm-blackfin/mach-bf527/mem_init.h
- * Based on:
- * Author:
- *
- * Created:
- * Description:
- *
- * Rev:
- *
- * Modified:
- *               Copyright 2004-2007 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#if (CONFIG_MEM_MT48LC16M16A2TG_75 || CONFIG_MEM_MT48LC64M4A2FB_7E || CONFIG_MEM_MT48LC16M8A2TG_75 || CONFIG_MEM_GENERIC_BOARD || CONFIG_MEM_MT48LC32M8A2_75 || CONFIG_MEM_MT48LC32M16A2TG_75)
-#if (CONFIG_SCLK_HZ > 119402985)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_7
-#define SDRAM_tRAS_num  7
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_6
-#define SDRAM_tRAS_num  6
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_5
-#define SDRAM_tRAS_num  5
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_4
-#define SDRAM_tRAS_num  4
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_3
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_4
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_3
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_2
-#define SDRAM_tRAS_num  2
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ <= 29850746)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_1
-#define SDRAM_tRAS_num  1
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#endif
-
-#if (CONFIG_MEM_MT48LC16M16A2TG_75)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_MT48LC16M8A2TG_75)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   4096	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_MT48LC32M8A2_75)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_MT48LC64M4A2FB_7E)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_GENERIC_BOARD)
-  /*SDRAM INFORMATION: Modify this for your board */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_MT48LC32M16A2TG_75)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-/* Equation from section 17 (p17-46) of BF533 HRM */
-#define mem_SDRRC       (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num)
-
-/* Enable SCLK Out */
-#define mem_SDGCTL        (SCTLE | SDRAM_CL | SDRAM_tRAS | SDRAM_tRP | SDRAM_tRCD | SDRAM_tWR | PSS)
-
-#if defined CONFIG_CLKIN_HALF
-#define CLKIN_HALF       1
-#else
-#define CLKIN_HALF       0
-#endif
-
-#if defined CONFIG_PLL_BYPASS
-#define PLL_BYPASS      1
-#else
-#define PLL_BYPASS       0
-#endif
-
-/***************************************Currently Not Being Used *********************************/
-#define flash_EBIU_AMBCTL_WAT  ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_RAT  ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_HT   ((CONFIG_FLASH_SPEED_BHT  * 4) / (4000000000 / CONFIG_SCLK_HZ))
-#define flash_EBIU_AMBCTL_ST   ((CONFIG_FLASH_SPEED_BST  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_TT   ((CONFIG_FLASH_SPEED_BTT  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-
-#if (flash_EBIU_AMBCTL_TT > 3)
-#define flash_EBIU_AMBCTL0_TT   B0TT_4
-#endif
-#if (flash_EBIU_AMBCTL_TT == 3)
-#define flash_EBIU_AMBCTL0_TT   B0TT_3
-#endif
-#if (flash_EBIU_AMBCTL_TT == 2)
-#define flash_EBIU_AMBCTL0_TT   B0TT_2
-#endif
-#if (flash_EBIU_AMBCTL_TT < 2)
-#define flash_EBIU_AMBCTL0_TT   B0TT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_ST > 3)
-#define flash_EBIU_AMBCTL0_ST   B0ST_4
-#endif
-#if (flash_EBIU_AMBCTL_ST == 3)
-#define flash_EBIU_AMBCTL0_ST   B0ST_3
-#endif
-#if (flash_EBIU_AMBCTL_ST == 2)
-#define flash_EBIU_AMBCTL0_ST   B0ST_2
-#endif
-#if (flash_EBIU_AMBCTL_ST < 2)
-#define flash_EBIU_AMBCTL0_ST   B0ST_1
-#endif
-
-#if (flash_EBIU_AMBCTL_HT > 2)
-#define flash_EBIU_AMBCTL0_HT   B0HT_3
-#endif
-#if (flash_EBIU_AMBCTL_HT == 2)
-#define flash_EBIU_AMBCTL0_HT   B0HT_2
-#endif
-#if (flash_EBIU_AMBCTL_HT == 1)
-#define flash_EBIU_AMBCTL0_HT   B0HT_1
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0)
-#define flash_EBIU_AMBCTL0_HT   B0HT_0
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0)
-#define flash_EBIU_AMBCTL0_HT   B0HT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_WAT > 14)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_15
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 14)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_14
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 13)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_13
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 12)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_12
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 11)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_11
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 10)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_10
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 9)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_9
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 8)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_8
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 7)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_7
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 6)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_6
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 5)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_5
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 4)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_4
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 3)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_3
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 2)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_2
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 1)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_RAT > 14)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_15
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 14)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_14
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 13)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_13
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 12)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_12
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 11)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_11
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 10)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_10
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 9)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_9
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 8)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_8
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 7)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_7
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 6)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_6
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 5)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_5
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 4)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_4
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 3)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_3
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 2)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_2
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 1)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_1
-#endif
-
-#define flash_EBIU_AMBCTL0  \
-	(flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \
-	 flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN)
diff --git a/arch/blackfin/mach-bf527/include/mach/mem_map.h b/arch/blackfin/mach-bf527/include/mach/mem_map.h
index ef46dc9..019e001 100644
--- a/arch/blackfin/mach-bf527/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf527/include/mach/mem_map.h
@@ -99,4 +99,10 @@
 #define L1_SCRATCH_START	0xFFB00000
 #define L1_SCRATCH_LENGTH	0x1000
 
+#define GET_PDA_SAFE(preg)		\
+	preg.l = _cpu_pda;		\
+	preg.h = _cpu_pda;
+
+#define GET_PDA(preg, dreg)	GET_PDA_SAFE(preg)
+
 #endif				/* _MEM_MAP_527_H_ */
diff --git a/arch/blackfin/mach-bf527/ints-priority.c b/arch/blackfin/mach-bf527/ints-priority.c
index 8a23674..f8c8acd 100644
--- a/arch/blackfin/mach-bf527/ints-priority.c
+++ b/arch/blackfin/mach-bf527/ints-priority.c
@@ -69,14 +69,14 @@
 			((CONFIG_IRQ_MAC_TX - 7) << IRQ_MAC_TX_POS) |
 			((CONFIG_IRQ_PORTH_INTB - 7) << IRQ_PORTH_INTB_POS));
 
-	bfin_write_SIC_IAR4(((CONFIG_IRQ_TMR0 - 7) << IRQ_TMR0_POS) |
-			((CONFIG_IRQ_TMR1 - 7) << IRQ_TMR1_POS) |
-			((CONFIG_IRQ_TMR2 - 7) << IRQ_TMR2_POS) |
-			((CONFIG_IRQ_TMR3 - 7) << IRQ_TMR3_POS) |
-			((CONFIG_IRQ_TMR4 - 7) << IRQ_TMR4_POS) |
-			((CONFIG_IRQ_TMR5 - 7) << IRQ_TMR5_POS) |
-			((CONFIG_IRQ_TMR6 - 7) << IRQ_TMR6_POS) |
-			((CONFIG_IRQ_TMR7 - 7) << IRQ_TMR7_POS));
+	bfin_write_SIC_IAR4(((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) |
+			((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) |
+			((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) |
+			((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) |
+			((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS) |
+			((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) |
+			((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) |
+			((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS));
 
 	bfin_write_SIC_IAR5(((CONFIG_IRQ_PORTG_INTA - 7) << IRQ_PORTG_INTA_POS) |
 			((CONFIG_IRQ_PORTG_INTB - 7) << IRQ_PORTG_INTB_POS) |
diff --git a/arch/blackfin/mach-bf533/Kconfig b/arch/blackfin/mach-bf533/Kconfig
index 76beb75..14427de 100644
--- a/arch/blackfin/mach-bf533/Kconfig
+++ b/arch/blackfin/mach-bf533/Kconfig
@@ -59,7 +59,7 @@
 	default 10
 config TIMER0
 	int "TIMER0"
-	default 11
+	default 8
 config TIMER1
 	int "TIMER1"
 	default 11
diff --git a/arch/blackfin/mach-bf533/Makefile b/arch/blackfin/mach-bf533/Makefile
index aa9f264..874840f 100644
--- a/arch/blackfin/mach-bf533/Makefile
+++ b/arch/blackfin/mach-bf533/Makefile
@@ -2,6 +2,4 @@
 # arch/blackfin/mach-bf533/Makefile
 #
 
-extra-y := head.o
-
 obj-y := ints-priority.o dma.o
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 72ac3ac..0c66bf4 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -313,23 +313,33 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir0_device = {
 	.name = "bfin_sir",
 	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
 
@@ -431,7 +441,9 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
 #endif
 
 #if defined(CONFIG_KEYBOARD_OPENCORES) || defined(CONFIG_KEYBOARD_OPENCORES_MODULE)
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index d064ded..6ee607c 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -212,23 +212,33 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir0_device = {
 	.name = "bfin_sir",
 	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
 static struct platform_device bfin_sport0_uart_device = {
@@ -353,7 +363,9 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
 #endif
 
 #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index 575843f..e7061c7 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -219,6 +219,19 @@
 };
 #endif
 
+static struct resource bfin_gpios_resources = {
+	.start = 0,
+	.end   = MAX_BLACKFIN_GPIOS - 1,
+	.flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+	.name = "simple-gpio",
+	.id = -1,
+	.num_resources = 1,
+	.resource = &bfin_gpios_resources,
+};
+
 #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
 static struct resource bfin_uart_resources[] = {
 	{
@@ -237,23 +250,33 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir0_device = {
 	.name = "bfin_sir",
 	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
 static struct platform_device bfin_sport0_uart_device = {
@@ -342,7 +365,9 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
 #endif
 
 #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
@@ -365,6 +390,8 @@
 #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
 	&bfin_spi0_device,
 #endif
+
+	&bfin_gpios_device,
 };
 
 static int __init cm_bf533_init(void)
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index cc2e7ee..08cd096 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -46,7 +46,7 @@
 /*
  * Name the Board for the /proc/cpuinfo
  */
-const char bfin_board_name[] = "ADDS-BF533-EZKIT";
+const char bfin_board_name[] = "ADI BF533-EZKIT";
 
 #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
 static struct platform_device rtc_device = {
@@ -236,23 +236,33 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir0_device = {
 	.name = "bfin_sir",
 	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
 #include <linux/input.h>
@@ -363,7 +373,9 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
 #endif
 
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
diff --git a/arch/blackfin/mach-bf533/boards/generic_board.c b/arch/blackfin/mach-bf533/boards/generic_board.c
index 82b1f6a..986eeec 100644
--- a/arch/blackfin/mach-bf533/boards/generic_board.c
+++ b/arch/blackfin/mach-bf533/boards/generic_board.c
@@ -72,6 +72,35 @@
 };
 #endif
 
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
+	{
+		.start = 0xFFC00400,
+		.end = 0xFFC004FF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
+#endif
+#endif
+
 static struct platform_device *generic_board_devices[] __initdata = {
 #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
 	&rtc_device,
@@ -80,6 +109,12 @@
 #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
 	&smc91x_device,
 #endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#endif
 };
 
 static int __init generic_board_init(void)
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index 5864892..e30b1b7 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -197,23 +197,33 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir0_device = {
 	.name = "bfin_sir",
 	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
 static struct resource isp1362_hcd_resources[] = {
@@ -272,7 +282,9 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
 #endif
 
 #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 050ffca..07f9ad1 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -49,7 +49,7 @@
 /*
  * Name the Board for the /proc/cpuinfo
  */
-const char bfin_board_name[] = "ADDS-BF533-STAMP";
+const char bfin_board_name[] = "ADI BF533-STAMP";
 
 #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
 static struct platform_device rtc_device = {
@@ -118,7 +118,7 @@
 		.offset = 0,
 	}, {
 		.name   = "linux kernel(nor)",
-		.size   = 0xE0000,
+		.size   = 0x180000,
 		.offset = MTDPART_OFS_APPEND,
 	}, {
 		.name   = "file system(nor)",
@@ -169,7 +169,7 @@
 		.mask_flags = MTD_CAP_ROM
 	}, {
 		.name = "linux kernel(spi)",
-		.size = 0xe0000,
+		.size = 0x180000,
 		.offset = MTDPART_OFS_APPEND,
 	}, {
 		.name = "file system(spi)",
@@ -216,13 +216,6 @@
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-	.enable_dma = 1,
-	.bits_per_word = 8,
-};
-#endif
-
 #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
 static struct bfin5xx_spi_chip spidev_chip_info = {
 	.enable_dma = 0,
@@ -265,27 +258,6 @@
 	},
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-	{
-		.modalias = "spi_mmc_dummy",
-		.max_speed_hz = 20000000,     /* max spi clock (SCK) speed in HZ */
-		.bus_num = 0,
-		.chip_select = 0,
-		.platform_data = NULL,
-		.controller_data = &spi_mmc_chip_info,
-		.mode = SPI_MODE_3,
-	},
-	{
-		.modalias = "spi_mmc",
-		.max_speed_hz = 20000000,     /* max spi clock (SCK) speed in HZ */
-		.bus_num = 0,
-		.chip_select = CONFIG_SPI_MMC_CS_CHAN,
-		.platform_data = NULL,
-		.controller_data = &spi_mmc_chip_info,
-		.mode = SPI_MODE_3,
-	},
-#endif
-
 #if defined(CONFIG_PBX)
 	{
 		.modalias = "fxs-spi",
@@ -373,23 +345,33 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir0_device = {
 	.name = "bfin_sir",
 	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
 static struct platform_device bfin_sport0_uart_device = {
@@ -537,7 +519,9 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
 #endif
 
 #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
diff --git a/arch/blackfin/mach-bf533/dma.c b/arch/blackfin/mach-bf533/dma.c
index 28655c1..0a6eb8f 100644
--- a/arch/blackfin/mach-bf533/dma.c
+++ b/arch/blackfin/mach-bf533/dma.c
@@ -31,7 +31,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_BLACKFIN_DMA_CHANNEL] = {
+struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf533/head.S b/arch/blackfin/mach-bf533/head.S
deleted file mode 100644
index 9fc95aa..0000000
--- a/arch/blackfin/mach-bf533/head.S
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * File:         arch/blackfin/mach-bf533/head.S
- * Based on:
- * Author:       Jeff Dionne <jeff@uclinux.org> COPYRIGHT 1998 D. Jeff Dionne
- *
- * Created:      1998
- * Description:  bf533 startup file
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/blackfin.h>
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
-#include <asm/clocks.h>
-#include <mach/mem_init.h>
-#endif
-
-.section .l1.text
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
-ENTRY(_start_dma_code)
-	p0.h = hi(SIC_IWR);
-	p0.l = lo(SIC_IWR);
-	r0.l = 0x1;
-	r0.h = 0x0;
-	[p0] = r0;
-	SSYNC;
-
-	/*
-	 *  Set PLL_CTL
-	 *   - [14:09] = MSEL[5:0] : CLKIN / VCO multiplication factors
-	 *   - [8]     = BYPASS    : BYPASS the PLL, run CLKIN into CCLK/SCLK
-	 *   - [7]     = output delay (add 200ps of delay to mem signals)
-	 *   - [6]     = input delay (add 200ps of input delay to mem signals)
-	 *   - [5]     = PDWN      : 1=All Clocks off
-	 *   - [3]     = STOPCK    : 1=Core Clock off
-	 *   - [1]     = PLL_OFF   : 1=Disable Power to PLL
-	 *   - [0]     = DF        : 1=Pass CLKIN/2 to PLL / 0=Pass CLKIN to PLL
-	 *   all other bits set to zero
-	 */
-
-	p0.h = hi(PLL_LOCKCNT);
-	p0.l = lo(PLL_LOCKCNT);
-	r0 = 0x300(Z);
-	w[p0] = r0.l;
-	ssync;
-
-	P2.H = hi(EBIU_SDGCTL);
-	P2.L = lo(EBIU_SDGCTL);
-	R0 = [P2];
-	BITSET (R0, 24);
-	[P2] = R0;
-	SSYNC;
-
-	r0 = CONFIG_VCO_MULT & 63;       /* Load the VCO multiplier         */
-	r0 = r0 << 9;                    /* Shift it over,                  */
-	r1 = CLKIN_HALF;                 /* Do we need to divide CLKIN by 2?*/
-	r0 = r1 | r0;
-	r1 = PLL_BYPASS;                 /* Bypass the PLL?                 */
-	r1 = r1 << 8;                    /* Shift it over                   */
-	r0 = r1 | r0;                    /* add them all together           */
-#ifdef ANOMALY_05000265
-	BITSET(r0, 15);                  /* Add 250 mV of hysteresis to SPORT input pins */
-#endif
-
-	p0.h = hi(PLL_CTL);
-	p0.l = lo(PLL_CTL);              /* Load the address                */
-	cli r2;                          /* Disable interrupts              */
-	ssync;
-	w[p0] = r0.l;                    /* Set the value                   */
-	idle;                            /* Wait for the PLL to stablize    */
-	sti r2;                          /* Enable interrupts               */
-
-.Lcheck_again:
-	p0.h = hi(PLL_STAT);
-	p0.l = lo(PLL_STAT);
-	R0 = W[P0](Z);
-	CC = BITTST(R0,5);
-	if ! CC jump .Lcheck_again;
-
-	/* Configure SCLK & CCLK Dividers */
-	r0 = (CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
-	p0.h = hi(PLL_DIV);
-	p0.l = lo(PLL_DIV);
-	w[p0] = r0.l;
-	ssync;
-
-	p0.l = lo(EBIU_SDRRC);
-	p0.h = hi(EBIU_SDRRC);
-	r0 = mem_SDRRC;
-	w[p0] = r0.l;
-	ssync;
-
-	P2.H = hi(EBIU_SDGCTL);
-	P2.L = lo(EBIU_SDGCTL);
-	R0 = [P2];
-	BITCLR (R0, 24);
-	p0.h = hi(EBIU_SDSTAT);
-	p0.l = lo(EBIU_SDSTAT);
-	r2.l = w[p0];
-	cc = bittst(r2,3);
-	if !cc jump .Lskip;
-	NOP;
-	BITSET (R0, 23);
-.Lskip:
-	[P2] = R0;
-	SSYNC;
-
-	R0.L = lo(mem_SDGCTL);
-	R0.H = hi(mem_SDGCTL);
-	R1 = [p2];
-	R1 = R1 | R0;
-	[P2] = R1;
-	SSYNC;
-
-	RTS;
-ENDPROC(_start_dma_code)
-#endif /* CONFIG_BFIN_KERNEL_CLOCK */
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index f544fc5..0d3a034 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -7,7 +7,7 @@
  */
 
 /* This file shoule be up to date with:
- *  - Revision D, 06/18/2008; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List
+ *  - Revision E, 09/18/2008; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List
  */
 
 #ifndef _MACH_ANOMALY_H_
@@ -194,6 +194,12 @@
 #define ANOMALY_05000403 (1)
 /* Speculative Fetches Can Cause Undesired External FIFO Operations */
 #define ANOMALY_05000416 (1)
+/* Multichannel SPORT Channel Misalignment Under Specific Configuration */
+#define ANOMALY_05000425 (1)
+/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
+#define ANOMALY_05000426 (1)
+/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
+#define ANOMALY_05000443 (1)
 
 /* These anomalies have been "phased" out of analog.com anomaly sheets and are
  * here to show running on older silicon just isn't feasible.
@@ -273,5 +279,8 @@
 #define ANOMALY_05000323 (0)
 #define ANOMALY_05000353 (1)
 #define ANOMALY_05000386 (1)
+#define ANOMALY_05000412 (0)
+#define ANOMALY_05000432 (0)
+#define ANOMALY_05000435 (0)
 
 #endif
diff --git a/arch/blackfin/mach-bf533/include/mach/bf533.h b/arch/blackfin/mach-bf533/include/mach/bf533.h
index dfc8c1a..cf4427c 100644
--- a/arch/blackfin/mach-bf533/include/mach/bf533.h
+++ b/arch/blackfin/mach-bf533/include/mach/bf533.h
@@ -145,7 +145,7 @@
 #endif
 #ifdef CONFIG_BF532
 #define CPU "BF532"
-#define CPUID 0x275A
+#define CPUID 0x27a5
 #endif
 #ifdef CONFIG_BF531
 #define CPU "BF531"
@@ -153,7 +153,7 @@
 #endif
 
 #ifndef CPU
-#error Unknown CPU type - This kernel doesn't seem to be configured properly
+#error "Unknown CPU type - This kernel doesn't seem to be configured properly"
 #endif
 
 #endif				/* __MACH_BF533_H__  */
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_sir.h b/arch/blackfin/mach-bf533/include/mach/bfin_sir.h
deleted file mode 100644
index 9bb87e9..0000000
--- a/arch/blackfin/mach-bf533/include/mach/bfin_sir.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Blackfin Infra-red Driver
- *
- * Copyright 2006-2008 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- *
- */
-
-#include <linux/serial.h>
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#define SIR_UART_GET_CHAR(port)   bfin_read16((port)->membase + OFFSET_RBR)
-#define SIR_UART_GET_DLL(port)    bfin_read16((port)->membase + OFFSET_DLL)
-#define SIR_UART_GET_IER(port)    bfin_read16((port)->membase + OFFSET_IER)
-#define SIR_UART_GET_DLH(port)    bfin_read16((port)->membase + OFFSET_DLH)
-#define SIR_UART_GET_IIR(port)    bfin_read16((port)->membase + OFFSET_IIR)
-#define SIR_UART_GET_LCR(port)    bfin_read16((port)->membase + OFFSET_LCR)
-#define SIR_UART_GET_GCTL(port)   bfin_read16((port)->membase + OFFSET_GCTL)
-
-#define SIR_UART_PUT_CHAR(port, v) bfin_write16(((port)->membase + OFFSET_THR), v)
-#define SIR_UART_PUT_DLL(port, v)  bfin_write16(((port)->membase + OFFSET_DLL), v)
-#define SIR_UART_PUT_IER(port, v)  bfin_write16(((port)->membase + OFFSET_IER), v)
-#define SIR_UART_PUT_DLH(port, v)  bfin_write16(((port)->membase + OFFSET_DLH), v)
-#define SIR_UART_PUT_LCR(port, v)  bfin_write16(((port)->membase + OFFSET_LCR), v)
-#define SIR_UART_PUT_GCTL(port, v) bfin_write16(((port)->membase + OFFSET_GCTL), v)
-
-#ifdef CONFIG_SIR_BFIN_DMA
-struct dma_rx_buf {
-	char *buf;
-	int head;
-	int tail;
-	};
-#endif /* CONFIG_SIR_BFIN_DMA */
-
-struct bfin_sir_port {
-	unsigned char __iomem   *membase;
-	unsigned int            irq;
-	unsigned int            lsr;
-	unsigned long           clk;
-	struct net_device       *dev;
-#ifdef CONFIG_SIR_BFIN_DMA
-	int                     tx_done;
-	struct dma_rx_buf       rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int                     rx_dma_nrows;
-#endif /* CONFIG_SIR_BFIN_DMA */
-	unsigned int            tx_dma_channel;
-	unsigned int            rx_dma_channel;
-};
-
-struct bfin_sir_port sir_ports[BFIN_UART_NR_PORTS];
-
-struct bfin_sir_port_res {
-	unsigned long   base_addr;
-	int             irq;
-	unsigned int    rx_dma_channel;
-	unsigned int    tx_dma_channel;
-};
-
-struct bfin_sir_port_res bfin_sir_port_resource[] = {
-#ifdef CONFIG_BFIN_SIR0
-	{
-	0xFFC00400,
-	IRQ_UART_RX,
-	CH_UART_RX,
-	CH_UART_TX,
-	},
-#endif
-};
-
-int nr_sirs = ARRAY_SIZE(bfin_sir_port_resource);
-
-struct bfin_sir_self {
-	struct bfin_sir_port    *sir_port;
-	spinlock_t              lock;
-	unsigned int            open;
-	int                     speed;
-	int                     newspeed;
-
-	struct sk_buff          *txskb;
-	struct sk_buff          *rxskb;
-	struct net_device_stats stats;
-	struct device           *dev;
-	struct irlap_cb         *irlap;
-	struct qos_info         qos;
-
-	iobuff_t                tx_buff;
-	iobuff_t                rx_buff;
-
-	struct work_struct      work;
-	int                     mtt;
-};
-
-static inline unsigned int SIR_UART_GET_LSR(struct bfin_sir_port *port)
-{
-	unsigned int lsr = bfin_read16(port->membase + OFFSET_LSR);
-	port->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | port->lsr;
-}
-
-static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port)
-{
-	port->lsr = 0;
-	bfin_read16(port->membase + OFFSET_LSR);
-}
-
-#define DRIVER_NAME "bfin_sir"
-
-static int bfin_sir_hw_init(void)
-{
-	int ret = -ENODEV;
-#ifdef CONFIG_BFIN_SIR0
-	ret = peripheral_request(P_UART0_TX, DRIVER_NAME);
-	if (ret)
-		return ret;
-	ret = peripheral_request(P_UART0_RX, DRIVER_NAME);
-	if (ret)
-		return ret;
-#endif
-	return ret;
-}
diff --git a/arch/blackfin/mach-bf533/include/mach/blackfin.h b/arch/blackfin/mach-bf533/include/mach/blackfin.h
index d80971b..045184f 100644
--- a/arch/blackfin/mach-bf533/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf533/include/mach/blackfin.h
@@ -44,6 +44,13 @@
 
 #define BFIN_UART_NR_PORTS      1
 
+#define CH_UART_RX CH_UART0_RX
+#define CH_UART_TX CH_UART0_TX
+
+#define IRQ_UART_ERROR IRQ_UART0_ERROR
+#define IRQ_UART_RX    IRQ_UART0_RX
+#define IRQ_UART_TX    IRQ_UART0_TX
+
 #define OFFSET_THR              0x00	/* Transmit Holding register            */
 #define OFFSET_RBR              0x00	/* Receive Buffer register              */
 #define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
diff --git a/arch/blackfin/mach-bf533/include/mach/cdefBF532.h b/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
index 3d8978a..bbc3c83 100644
--- a/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
@@ -39,31 +39,8 @@
 /*include core specific register pointer definitions*/
 #include <asm/cdef_LPBlackfin.h>
 
-#include <asm/system.h>
-
 /* Clock and System Control (0xFFC0 0400-0xFFC0 07FF) */
 #define bfin_read_PLL_CTL()                  bfin_read16(PLL_CTL)
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	local_irq_save(flags);
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	local_irq_restore(flags);
-}
 #define bfin_read_PLL_STAT()                 bfin_read16(PLL_STAT)
 #define bfin_write_PLL_STAT(val)             bfin_write16(PLL_STAT,val)
 #define bfin_read_PLL_LOCKCNT()              bfin_read16(PLL_LOCKCNT)
@@ -72,27 +49,6 @@
 #define bfin_read_PLL_DIV()                  bfin_read16(PLL_DIV)
 #define bfin_write_PLL_DIV(val)              bfin_write16(PLL_DIV,val)
 #define bfin_read_VR_CTL()                   bfin_read16(VR_CTL)
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	local_irq_save(flags);
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	local_irq_restore(flags);
-}
 
 /* System Interrupt Controller (0xFFC0 0C00-0xFFC0 0FFF) */
 #define bfin_read_SWRST()                    bfin_read16(SWRST)
@@ -178,50 +134,6 @@
 #define bfin_read_FIO_MASKB_T()              bfin_read16(FIO_MASKB_T)
 #define bfin_write_FIO_MASKB_T(val)          bfin_write16(FIO_MASKB_T,val)
 
-
-#if ANOMALY_05000311
-#define BFIN_WRITE_FIO_FLAG(name) \
-static __inline__ void bfin_write_FIO_FLAG_ ## name (unsigned short val)\
-{\
-	unsigned long flags;\
-	local_irq_save(flags);\
-	bfin_write16(FIO_FLAG_ ## name,val);\
-	bfin_read_CHIPID();\
-	local_irq_restore(flags);\
-}
-BFIN_WRITE_FIO_FLAG(D)
-BFIN_WRITE_FIO_FLAG(C)
-BFIN_WRITE_FIO_FLAG(S)
-BFIN_WRITE_FIO_FLAG(T)
-
-#define BFIN_READ_FIO_FLAG(name) \
-static __inline__ unsigned short bfin_read_FIO_FLAG_ ## name (void)\
-{\
-	unsigned long flags;\
-	unsigned short ret;\
-	local_irq_save(flags);\
-	ret = bfin_read16(FIO_FLAG_ ## name);\
-	bfin_read_CHIPID();\
-	local_irq_restore(flags);\
-	return ret;\
-}
-BFIN_READ_FIO_FLAG(D)
-BFIN_READ_FIO_FLAG(C)
-BFIN_READ_FIO_FLAG(S)
-BFIN_READ_FIO_FLAG(T)
-
-#else
-#define bfin_write_FIO_FLAG_D(val)           bfin_write16(FIO_FLAG_D,val)
-#define bfin_write_FIO_FLAG_C(val)           bfin_write16(FIO_FLAG_C,val)
-#define bfin_write_FIO_FLAG_S(val)           bfin_write16(FIO_FLAG_S,val)
-#define bfin_write_FIO_FLAG_T(val)           bfin_write16(FIO_FLAG_T,val)
-#define bfin_read_FIO_FLAG_T()               bfin_read16(FIO_FLAG_T)
-#define bfin_read_FIO_FLAG_C()               bfin_read16(FIO_FLAG_C)
-#define bfin_read_FIO_FLAG_S()               bfin_read16(FIO_FLAG_S)
-#define bfin_read_FIO_FLAG_D()               bfin_read16(FIO_FLAG_D)
-#endif
-
-
 /* DMA Controller */
 #define bfin_read_DMA0_CONFIG()              bfin_read16(DMA0_CONFIG)
 #define bfin_write_DMA0_CONFIG(val)          bfin_write16(DMA0_CONFIG,val)
@@ -764,4 +676,93 @@
 #define bfin_read_PPI_FRAME()                bfin_read16(PPI_FRAME)
 #define bfin_write_PPI_FRAME(val)            bfin_write16(PPI_FRAME,val)
 
+/* These need to be last due to the cdef/linux inter-dependencies */
+#include <asm/irq.h>
+
+#if ANOMALY_05000311
+#define BFIN_WRITE_FIO_FLAG(name) \
+static inline void bfin_write_FIO_FLAG_##name(unsigned short val) \
+{ \
+	unsigned long flags; \
+	local_irq_save_hw(flags); \
+	bfin_write16(FIO_FLAG_##name, val); \
+	bfin_read_CHIPID(); \
+	local_irq_restore_hw(flags); \
+}
+BFIN_WRITE_FIO_FLAG(D)
+BFIN_WRITE_FIO_FLAG(C)
+BFIN_WRITE_FIO_FLAG(S)
+BFIN_WRITE_FIO_FLAG(T)
+
+#define BFIN_READ_FIO_FLAG(name) \
+static inline u16 bfin_read_FIO_FLAG_##name(void) \
+{ \
+	unsigned long flags; \
+	u16 ret; \
+	local_irq_save_hw(flags); \
+	ret = bfin_read16(FIO_FLAG_##name); \
+	bfin_read_CHIPID(); \
+	local_irq_restore_hw(flags); \
+	return ret; \
+}
+BFIN_READ_FIO_FLAG(D)
+BFIN_READ_FIO_FLAG(C)
+BFIN_READ_FIO_FLAG(S)
+BFIN_READ_FIO_FLAG(T)
+
+#else
+#define bfin_write_FIO_FLAG_D(val)           bfin_write16(FIO_FLAG_D, val)
+#define bfin_write_FIO_FLAG_C(val)           bfin_write16(FIO_FLAG_C, val)
+#define bfin_write_FIO_FLAG_S(val)           bfin_write16(FIO_FLAG_S, val)
+#define bfin_write_FIO_FLAG_T(val)           bfin_write16(FIO_FLAG_T, val)
+#define bfin_read_FIO_FLAG_T()               bfin_read16(FIO_FLAG_T)
+#define bfin_read_FIO_FLAG_C()               bfin_read16(FIO_FLAG_C)
+#define bfin_read_FIO_FLAG_S()               bfin_read16(FIO_FLAG_S)
+#define bfin_read_FIO_FLAG_D()               bfin_read16(FIO_FLAG_D)
+#endif
+
+/* Writing to PLL_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_PLL_CTL(unsigned int val)
+{
+	unsigned long flags, iwr;
+
+	if (val == bfin_read_PLL_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr = bfin_read32(SIC_IWR);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR, IWR_ENABLE(0));
+
+	bfin_write16(PLL_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR, iwr);
+	local_irq_restore_hw(flags);
+}
+
+/* Writing to VR_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_VR_CTL(unsigned int val)
+{
+	unsigned long flags, iwr;
+
+	if (val == bfin_read_VR_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr = bfin_read32(SIC_IWR);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR, IWR_ENABLE(0));
+
+	bfin_write16(VR_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR, iwr);
+	local_irq_restore_hw(flags);
+}
+
 #endif				/* _CDEF_BF532_H */
diff --git a/arch/blackfin/mach-bf533/include/mach/dma.h b/arch/blackfin/mach-bf533/include/mach/dma.h
index bd9d5e9..fb34934 100644
--- a/arch/blackfin/mach-bf533/include/mach/dma.h
+++ b/arch/blackfin/mach-bf533/include/mach/dma.h
@@ -1,42 +1,14 @@
-/*****************************************************************************
-*
-*        BF-533/2/1 Specific Declarations
-*
-****************************************************************************/
-/*
- * File:         include/asm-blackfin/mach-bf533/dma.h
- * Based on:
- * Author:
+/* mach/dma.h - arch-specific DMA defines
  *
- * Created:
- * Description:
+ * Copyright 2004-2008 Analog Devices Inc.
  *
- * Rev:
- *
- * Modified:
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_DMA_H_
 #define _MACH_DMA_H_
 
-#define MAX_BLACKFIN_DMA_CHANNEL 12
+#define MAX_DMA_CHANNELS 12
 
 #define CH_PPI          0
 #define CH_SPORT0_RX    1
@@ -44,8 +16,8 @@
 #define CH_SPORT1_RX    3
 #define CH_SPORT1_TX    4
 #define CH_SPI          5
-#define CH_UART_RX      6
-#define CH_UART_TX      7
+#define CH_UART0_RX     6
+#define CH_UART0_TX     7
 #define CH_MEM_STREAM0_DEST     8	 /* TX */
 #define CH_MEM_STREAM0_SRC      9	 /* RX */
 #define CH_MEM_STREAM1_DEST     10	 /* TX */
diff --git a/arch/blackfin/mach-bf533/include/mach/gpio.h b/arch/blackfin/mach-bf533/include/mach/gpio.h
new file mode 100644
index 0000000..e45c170
--- /dev/null
+++ b/arch/blackfin/mach-bf533/include/mach/gpio.h
@@ -0,0 +1,34 @@
+/*
+ * File: arch/blackfin/mach-bf533/include/mach/gpio.h
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright (C) 2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef _MACH_GPIO_H_
+#define _MACH_GPIO_H_
+
+#define MAX_BLACKFIN_GPIOS 16
+
+#define	GPIO_PF0	0
+#define	GPIO_PF1	1
+#define	GPIO_PF2	2
+#define	GPIO_PF3	3
+#define	GPIO_PF4	4
+#define	GPIO_PF5	5
+#define	GPIO_PF6	6
+#define	GPIO_PF7	7
+#define	GPIO_PF8	8
+#define	GPIO_PF9	9
+#define	GPIO_PF10	10
+#define	GPIO_PF11	11
+#define	GPIO_PF12	12
+#define	GPIO_PF13	13
+#define	GPIO_PF14	14
+#define	GPIO_PF15	15
+
+#define PORT_F GPIO_PF0
+
+#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf533/include/mach/irq.h b/arch/blackfin/mach-bf533/include/mach/irq.h
index 5aa38e5..db1e346 100644
--- a/arch/blackfin/mach-bf533/include/mach/irq.h
+++ b/arch/blackfin/mach-bf533/include/mach/irq.h
@@ -90,19 +90,19 @@
 #define	IRQ_SPORT0_ERROR	10	/*SPORT0 Error Interrupt */
 #define	IRQ_SPORT1_ERROR	11	/*SPORT1 Error Interrupt */
 #define	IRQ_SPI_ERROR		12	/*SPI Error Interrupt */
-#define	IRQ_UART_ERROR		13	/*UART Error Interrupt */
+#define	IRQ_UART0_ERROR		13	/*UART Error Interrupt */
 #define	IRQ_RTC			14	/*RTC Interrupt */
 #define	IRQ_PPI			15	/*DMA0 Interrupt (PPI) */
 #define	IRQ_SPORT0_RX		16	/*DMA1 Interrupt (SPORT0 RX) */
 #define	IRQ_SPORT0_TX		17	/*DMA2 Interrupt (SPORT0 TX) */
 #define	IRQ_SPORT1_RX		18	/*DMA3 Interrupt (SPORT1 RX) */
 #define	IRQ_SPORT1_TX		19	/*DMA4 Interrupt (SPORT1 TX) */
-#define 	IRQ_SPI			20	/*DMA5 Interrupt (SPI) */
-#define	IRQ_UART_RX		21	/*DMA6 Interrupt (UART RX) */
-#define	IRQ_UART_TX		22	/*DMA7 Interrupt (UART TX) */
-#define	IRQ_TMR0		23	/*Timer 0 */
-#define	IRQ_TMR1		24	/*Timer 1 */
-#define	IRQ_TMR2		25	/*Timer 2 */
+#define	IRQ_SPI			20	/*DMA5 Interrupt (SPI) */
+#define	IRQ_UART0_RX		21	/*DMA6 Interrupt (UART RX) */
+#define	IRQ_UART0_TX		22	/*DMA7 Interrupt (UART TX) */
+#define	IRQ_TIMER0		23	/*Timer 0 */
+#define	IRQ_TIMER1		24	/*Timer 1 */
+#define	IRQ_TIMER2		25	/*Timer 2 */
 #define	IRQ_PROG_INTA		26	/*Programmable Flags A (8) */
 #define	IRQ_PROG_INTB		27	/*Programmable Flags B (8) */
 #define	IRQ_MEM_DMA0		28	/*DMA8/9 Interrupt (Memory DMA Stream 0) */
diff --git a/arch/blackfin/mach-bf533/include/mach/mem_init.h b/arch/blackfin/mach-bf533/include/mach/mem_init.h
deleted file mode 100644
index ed2034b..0000000
--- a/arch/blackfin/mach-bf533/include/mach/mem_init.h
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * File:         include/asm-blackfin/mach-bf533/mem_init.h
- * Based on:
- * Author:
- *
- * Created:
- * Description:
- *
- * Rev:
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#if (CONFIG_MEM_MT48LC16M16A2TG_75 || CONFIG_MEM_MT48LC64M4A2FB_7E || \
-     CONFIG_MEM_MT48LC32M16A2TG_75 || CONFIG_MEM_GENERIC_BOARD)
-#if (CONFIG_SCLK_HZ > 119402985)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_7
-#define SDRAM_tRAS_num  7
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_6
-#define SDRAM_tRAS_num  6
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_5
-#define SDRAM_tRAS_num  5
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_4
-#define SDRAM_tRAS_num  4
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_3
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_4
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_3
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_2
-#define SDRAM_tRAS_num  2
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ <= 29850746)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_1
-#define SDRAM_tRAS_num  1
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#endif
-
-#if (CONFIG_MEM_MT48LC16M16A2TG_75)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_MT48LC64M4A2FB_7E)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_MT48LC32M16A2TG_75)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_GENERIC_BOARD)
-  /*SDRAM INFORMATION: Modify this for your board */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-/* Equation from section 17 (p17-46) of BF533 HRM */
-#define mem_SDRRC       (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref)  / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num)
-
-/* Enable SCLK Out */
-#define mem_SDGCTL        (SCTLE | SDRAM_CL | SDRAM_tRAS | SDRAM_tRP | SDRAM_tRCD | SDRAM_tWR | PSS)
-
-#if defined CONFIG_CLKIN_HALF
-#define CLKIN_HALF       1
-#else
-#define CLKIN_HALF       0
-#endif
-
-#if defined CONFIG_PLL_BYPASS
-#define PLL_BYPASS      1
-#else
-#define PLL_BYPASS       0
-#endif
-
-/***************************************Currently Not Being Used *********************************/
-#define flash_EBIU_AMBCTL_WAT  ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_RAT  ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_HT   ((CONFIG_FLASH_SPEED_BHT  * 4) / (4000000000 / CONFIG_SCLK_HZ))
-#define flash_EBIU_AMBCTL_ST   ((CONFIG_FLASH_SPEED_BST  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_TT   ((CONFIG_FLASH_SPEED_BTT  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-
-#if (flash_EBIU_AMBCTL_TT > 3)
-#define flash_EBIU_AMBCTL0_TT   B0TT_4
-#endif
-#if (flash_EBIU_AMBCTL_TT == 3)
-#define flash_EBIU_AMBCTL0_TT   B0TT_3
-#endif
-#if (flash_EBIU_AMBCTL_TT == 2)
-#define flash_EBIU_AMBCTL0_TT   B0TT_2
-#endif
-#if (flash_EBIU_AMBCTL_TT < 2)
-#define flash_EBIU_AMBCTL0_TT   B0TT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_ST > 3)
-#define flash_EBIU_AMBCTL0_ST   B0ST_4
-#endif
-#if (flash_EBIU_AMBCTL_ST == 3)
-#define flash_EBIU_AMBCTL0_ST   B0ST_3
-#endif
-#if (flash_EBIU_AMBCTL_ST == 2)
-#define flash_EBIU_AMBCTL0_ST   B0ST_2
-#endif
-#if (flash_EBIU_AMBCTL_ST < 2)
-#define flash_EBIU_AMBCTL0_ST   B0ST_1
-#endif
-
-#if (flash_EBIU_AMBCTL_HT > 2)
-#define flash_EBIU_AMBCTL0_HT   B0HT_3
-#endif
-#if (flash_EBIU_AMBCTL_HT == 2)
-#define flash_EBIU_AMBCTL0_HT   B0HT_2
-#endif
-#if (flash_EBIU_AMBCTL_HT == 1)
-#define flash_EBIU_AMBCTL0_HT   B0HT_1
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0)
-#define flash_EBIU_AMBCTL0_HT   B0HT_0
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0)
-#define flash_EBIU_AMBCTL0_HT   B0HT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_WAT > 14)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_15
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 14)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_14
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 13)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_13
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 12)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_12
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 11)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_11
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 10)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_10
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 9)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_9
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 8)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_8
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 7)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_7
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 6)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_6
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 5)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_5
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 4)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_4
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 3)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_3
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 2)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_2
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 1)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_RAT > 14)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_15
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 14)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_14
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 13)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_13
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 12)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_12
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 11)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_11
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 10)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_10
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 9)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_9
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 8)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_8
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 7)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_7
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 6)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_6
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 5)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_5
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 4)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_4
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 3)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_3
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 2)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_2
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 1)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_1
-#endif
-
-#define flash_EBIU_AMBCTL0  \
-	(flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \
-	 flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN)
diff --git a/arch/blackfin/mach-bf533/include/mach/mem_map.h b/arch/blackfin/mach-bf533/include/mach/mem_map.h
index 581fc6e..fc33b7c 100644
--- a/arch/blackfin/mach-bf533/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf533/include/mach/mem_map.h
@@ -168,4 +168,10 @@
 #define L1_SCRATCH_START	0xFFB00000
 #define L1_SCRATCH_LENGTH	0x1000
 
+#define GET_PDA_SAFE(preg)		\
+	preg.l = _cpu_pda;		\
+	preg.h = _cpu_pda;
+
+#define GET_PDA(preg, dreg)	GET_PDA_SAFE(preg)
+
 #endif				/* _MEM_MAP_533_H_ */
diff --git a/arch/blackfin/mach-bf537/Kconfig b/arch/blackfin/mach-bf537/Kconfig
index 8255374..bbc08fd 100644
--- a/arch/blackfin/mach-bf537/Kconfig
+++ b/arch/blackfin/mach-bf537/Kconfig
@@ -64,29 +64,29 @@
 config IRQ_MAC_TX
 	int "IRQ_MAC_TX"
 	default 11
-config IRQ_TMR0
-	int "IRQ_TMR0"
+config IRQ_TIMER0
+	int "IRQ_TIMER0"
+	default 8
+config IRQ_TIMER1
+	int "IRQ_TIMER1"
 	default 12
-config IRQ_TMR1
-	int "IRQ_TMR1"
+config IRQ_TIMER2
+	int "IRQ_TIMER2"
 	default 12
-config IRQ_TMR2
-	int "IRQ_TMR2"
+config IRQ_TIMER3
+	int "IRQ_TIMER3"
 	default 12
-config IRQ_TMR3
-	int "IRQ_TMR3"
+config IRQ_TIMER4
+	int "IRQ_TIMER4"
 	default 12
-config IRQ_TMR4
-	int "IRQ_TMR4"
+config IRQ_TIMER5
+	int "IRQ_TIMER5"
 	default 12
-config IRQ_TMR5
-	int "IRQ_TMR5"
+config IRQ_TIMER6
+	int "IRQ_TIMER6"
 	default 12
-config IRQ_TMR6
-	int "IRQ_TMR6"
-	default 12
-config IRQ_TMR7
-	int "IRQ_TMR7"
+config IRQ_TIMER7
+	int "IRQ_TIMER7"
 	default 12
 config IRQ_PROG_INTA
 	int "IRQ_PROG_INTA"
diff --git a/arch/blackfin/mach-bf537/Makefile b/arch/blackfin/mach-bf537/Makefile
index 68e5478..56994b6 100644
--- a/arch/blackfin/mach-bf537/Makefile
+++ b/arch/blackfin/mach-bf537/Makefile
@@ -2,6 +2,4 @@
 # arch/blackfin/mach-bf537/Makefile
 #
 
-extra-y := head.o
-
 obj-y := ints-priority.o dma.o
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537.c b/arch/blackfin/mach-bf537/boards/cm_bf537.c
index dde1472..6ac8e4d 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537.c
@@ -308,6 +308,19 @@
 };
 #endif
 
+static struct resource bfin_gpios_resources = {
+	.start = 0,
+	.end   = MAX_BLACKFIN_GPIOS - 1,
+	.flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+	.name = "simple-gpio",
+	.id = -1,
+	.num_resources = 1,
+	.resource = &bfin_gpios_resources,
+};
+
 #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
 static struct mtd_partition cm_partitions[] = {
 	{
@@ -379,30 +392,57 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
 	{
 		.start = 0xFFC02000,
 		.end = 0xFFC020FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
-
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir1_device = {
 	.name = "bfin_sir",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
 static struct resource bfin_twi0_resource[] = {
@@ -525,7 +565,12 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
@@ -564,6 +609,8 @@
 #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
 	&cm_flash_device,
 #endif
+
+	&bfin_gpios_device,
 };
 
 static int __init cm_bf537_init(void)
diff --git a/arch/blackfin/mach-bf537/boards/generic_board.c b/arch/blackfin/mach-bf537/boards/generic_board.c
index 78a13d5..dd6e6bf 100644
--- a/arch/blackfin/mach-bf537/boards/generic_board.c
+++ b/arch/blackfin/mach-bf537/boards/generic_board.c
@@ -50,57 +50,46 @@
 /*
  * Name the Board for the /proc/cpuinfo
  */
-const char bfin_board_name[] = "GENERIC Board";
+const char bfin_board_name[] = "UNKNOWN BOARD";
 
 /*
  *  Driver needs to know address, irq and flag pin.
  */
 
-#define ISP1761_BASE       0x203C0000
-#define ISP1761_IRQ        IRQ_PF7
-
 #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
-static struct resource bfin_isp1761_resources[] = {
+#include <linux/usb/isp1760.h>
+static struct resource bfin_isp1760_resources[] = {
 	[0] = {
-		.name	= "isp1761-regs",
-		.start  = ISP1761_BASE + 0x00000000,
-		.end    = ISP1761_BASE + 0x000fffff,
+		.start  = 0x203C0000,
+		.end    = 0x203C0000 + 0x000fffff,
 		.flags  = IORESOURCE_MEM,
 	},
 	[1] = {
-		.start  = ISP1761_IRQ,
-		.end    = ISP1761_IRQ,
+		.start  = IRQ_PF7,
+		.end    = IRQ_PF7,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
 
-static struct platform_device bfin_isp1761_device = {
-	.name           = "isp1761",
+static struct isp1760_platform_data isp1760_priv = {
+	.is_isp1761 = 0,
+	.port1_disable = 0,
+	.bus_width_16 = 1,
+	.port1_otg = 0,
+	.analog_oc = 0,
+	.dack_polarity_high = 0,
+	.dreq_polarity_high = 0,
+};
+
+static struct platform_device bfin_isp1760_device = {
+	.name           = "isp1760-hcd",
 	.id             = 0,
-	.num_resources  = ARRAY_SIZE(bfin_isp1761_resources),
-	.resource       = bfin_isp1761_resources,
+	.dev = {
+		.platform_data = &isp1760_priv,
+	},
+	.num_resources  = ARRAY_SIZE(bfin_isp1760_resources),
+	.resource       = bfin_isp1760_resources,
 };
-
-static struct platform_device *bfin_isp1761_devices[] = {
-	&bfin_isp1761_device,
-};
-
-int __init bfin_isp1761_init(void)
-{
-	unsigned int num_devices = ARRAY_SIZE(bfin_isp1761_devices);
-
-	printk(KERN_INFO "%s(): registering device resources\n", __func__);
-	set_irq_type(ISP1761_IRQ, IRQF_TRIGGER_FALLING);
-
-	return platform_add_devices(bfin_isp1761_devices, num_devices);
-}
-
-void __exit bfin_isp1761_exit(void)
-{
-	platform_device_unregister(&bfin_isp1761_device);
-}
-
-arch_initcall(bfin_isp1761_init);
 #endif
 
 #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
@@ -559,30 +548,59 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
 	{
 		.start = 0xFFC02000,
 		.end = 0xFFC020FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir1_device = {
 	.name = "bfin_sir",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
 static struct resource bfin_twi0_resource[] = {
@@ -651,6 +669,10 @@
 	&net2272_bfin_device,
 #endif
 
+#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
+	&bfin_isp1760_device,
+#endif
+
 #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
 	&bfin_spi0_device,
 #endif
@@ -668,7 +690,12 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 48c4cd2..bb79534 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -226,30 +226,59 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
 	{
 		.start = 0xFFC02000,
 		.end = 0xFFC020FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir1_device = {
 	.name = "bfin_sir",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
 static struct resource bfin_twi0_resource[] = {
@@ -311,7 +340,12 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index f9174c1..89de94f 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -49,7 +49,7 @@
 /*
  * Name the Board for the /proc/cpuinfo
  */
-const char bfin_board_name[] = "PNAV-1.0";
+const char bfin_board_name[] = "ADI PNAV-1.0";
 
 /*
  *  Driver needs to know address, irq and flag pin.
@@ -453,30 +453,59 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
 	{
 		.start = 0xFFC02000,
 		.end = 0xFFC020FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir1_device = {
 	.name = "bfin_sir",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
 };
 #endif
+#endif
 
 static struct platform_device *stamp_devices[] __initdata = {
 #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
@@ -520,7 +549,12 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
 #endif
 };
 
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 8d39439..d812e25 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -46,6 +46,7 @@
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 #include <linux/usb/sl811.h>
+#include <linux/spi/mmc_spi.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/reboot.h>
@@ -55,57 +56,46 @@
 /*
  * Name the Board for the /proc/cpuinfo
  */
-const char bfin_board_name[] = "ADDS-BF537-STAMP";
+const char bfin_board_name[] = "ADI BF537-STAMP";
 
 /*
  *  Driver needs to know address, irq and flag pin.
  */
 
-#define ISP1761_BASE       0x203C0000
-#define ISP1761_IRQ        IRQ_PF7
-
 #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
-static struct resource bfin_isp1761_resources[] = {
+#include <linux/usb/isp1760.h>
+static struct resource bfin_isp1760_resources[] = {
 	[0] = {
-		.name	= "isp1761-regs",
-		.start  = ISP1761_BASE + 0x00000000,
-		.end    = ISP1761_BASE + 0x000fffff,
+		.start  = 0x203C0000,
+		.end    = 0x203C0000 + 0x000fffff,
 		.flags  = IORESOURCE_MEM,
 	},
 	[1] = {
-		.start  = ISP1761_IRQ,
-		.end    = ISP1761_IRQ,
-		.flags  = IORESOURCE_IRQ,
+		.start  = IRQ_PF7,
+		.end    = IRQ_PF7,
+		.flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
 	},
 };
 
-static struct platform_device bfin_isp1761_device = {
-	.name           = "isp1761",
+static struct isp1760_platform_data isp1760_priv = {
+	.is_isp1761 = 0,
+	.port1_disable = 0,
+	.bus_width_16 = 1,
+	.port1_otg = 0,
+	.analog_oc = 0,
+	.dack_polarity_high = 0,
+	.dreq_polarity_high = 0,
+};
+
+static struct platform_device bfin_isp1760_device = {
+	.name           = "isp1760-hcd",
 	.id             = 0,
-	.num_resources  = ARRAY_SIZE(bfin_isp1761_resources),
-	.resource       = bfin_isp1761_resources,
+	.dev = {
+		.platform_data = &isp1760_priv,
+	},
+	.num_resources  = ARRAY_SIZE(bfin_isp1760_resources),
+	.resource       = bfin_isp1760_resources,
 };
-
-static struct platform_device *bfin_isp1761_devices[] = {
-	&bfin_isp1761_device,
-};
-
-int __init bfin_isp1761_init(void)
-{
-	unsigned int num_devices = ARRAY_SIZE(bfin_isp1761_devices);
-
-	printk(KERN_INFO "%s(): registering device resources\n", __func__);
-	set_irq_type(ISP1761_IRQ, IRQF_TRIGGER_FALLING);
-
-	return platform_add_devices(bfin_isp1761_devices, num_devices);
-}
-
-void __exit bfin_isp1761_exit(void)
-{
-	platform_device_unregister(&bfin_isp1761_device);
-}
-
-arch_initcall(bfin_isp1761_init);
 #endif
 
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
@@ -443,11 +433,11 @@
 		.offset     = 0,
 	}, {
 		.name       = "linux kernel(nor)",
-		.size       = 0xE0000,
+		.size       = 0x180000,
 		.offset     = MTDPART_OFS_APPEND,
 	}, {
 		.name       = "file system(nor)",
-		.size       = 0x400000 - 0x40000 - 0xE0000 - 0x10000,
+		.size       = 0x400000 - 0x40000 - 0x180000 - 0x10000,
 		.offset     = MTDPART_OFS_APPEND,
 	}, {
 		.name       = "MAC Address(nor)",
@@ -490,7 +480,7 @@
 		.mask_flags = MTD_CAP_ROM
 	}, {
 		.name = "linux kernel(spi)",
-		.size = 0xe0000,
+		.size = 0x180000,
 		.offset = MTDPART_OFS_APPEND,
 	}, {
 		.name = "file system(spi)",
@@ -503,7 +493,7 @@
 	.name = "m25p80",
 	.parts = bfin_spi_flash_partitions,
 	.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
-	.type = "m25p64",
+	/* .type = "m25p64", */
 };
 
 /* SPI flash chip (m25p64) */
@@ -537,9 +527,29 @@
 };
 #endif
 
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
-static struct bfin5xx_spi_chip spi_mmc_chip_info = {
-	.enable_dma = 1,
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+#define MMC_SPI_CARD_DETECT_INT IRQ_PF5
+
+static int bfin_mmc_spi_init(struct device *dev,
+	irqreturn_t (*detect_int)(int, void *), void *data)
+{
+	return request_irq(MMC_SPI_CARD_DETECT_INT, detect_int,
+		IRQF_TRIGGER_FALLING, "mmc-spi-detect", data);
+}
+
+static void bfin_mmc_spi_exit(struct device *dev, void *data)
+{
+	free_irq(MMC_SPI_CARD_DETECT_INT, data);
+}
+
+static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
+	.init = bfin_mmc_spi_init,
+	.exit = bfin_mmc_spi_exit,
+	.detect_delay = 100, /* msecs */
+};
+
+static struct bfin5xx_spi_chip  mmc_spi_chip_info = {
+	.enable_dma = 0,
 	.bits_per_word = 8,
 };
 #endif
@@ -613,6 +623,14 @@
 };
 #endif
 
+#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE)
+static struct bfin5xx_spi_chip enc28j60_spi_chip_info = {
+	.enable_dma	= 1,
+	.bits_per_word	= 8,
+	.cs_gpio = GPIO_PF10,
+};
+#endif
+
 #if defined(CONFIG_MTD_DATAFLASH) \
 	|| defined(CONFIG_MTD_DATAFLASH_MODULE)
 
@@ -624,7 +642,7 @@
 		.mask_flags = MTD_CAP_ROM
 	}, {
 		.name = "linux kernel(spi)",
-		.size = 0xe0000,
+		.size = 0x180000,
 		.offset = MTDPART_OFS_APPEND,
 	}, {
 		.name = "file system(spi)",
@@ -703,23 +721,14 @@
 		.controller_data = &ad9960_spi_chip_info,
 	},
 #endif
-#if defined(CONFIG_SPI_MMC) || defined(CONFIG_SPI_MMC_MODULE)
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
 	{
-		.modalias = "spi_mmc_dummy",
+		.modalias = "mmc_spi",
 		.max_speed_hz = 20000000,     /* max spi clock (SCK) speed in HZ */
 		.bus_num = 0,
-		.chip_select = 0,
-		.platform_data = NULL,
-		.controller_data = &spi_mmc_chip_info,
-		.mode = SPI_MODE_3,
-	},
-	{
-		.modalias = "spi_mmc",
-		.max_speed_hz = 20000000,     /* max spi clock (SCK) speed in HZ */
-		.bus_num = 0,
-		.chip_select = CONFIG_SPI_MMC_CS_CHAN,
-		.platform_data = NULL,
-		.controller_data = &spi_mmc_chip_info,
+		.chip_select = 4,
+		.platform_data = &bfin_mmc_spi_pdata,
+		.controller_data = &mmc_spi_chip_info,
 		.mode = SPI_MODE_3,
 	},
 #endif
@@ -783,6 +792,17 @@
 		.mode = SPI_CPHA | SPI_CPOL,
 	},
 #endif
+#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE)
+	{
+		.modalias = "enc28j60",
+		.max_speed_hz = 20000000,     /* max spi clock (SCK) speed in HZ */
+		.irq = IRQ_PF6,
+		.bus_num = 0,
+		.chip_select = 0,	/* GPIO controlled SSEL */
+		.controller_data = &enc28j60_spi_chip_info,
+		.mode = SPI_MODE_0,
+	},
+#endif
 };
 
 #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
@@ -885,30 +905,59 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
 	{
 		.start = 0xFFC02000,
 		.end = 0xFFC020FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir1_device = {
 	.name = "bfin_sir",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
 static struct resource bfin_twi0_resource[] = {
@@ -932,6 +981,93 @@
 };
 #endif
 
+#if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE)
+#include <linux/input.h>
+#include <linux/i2c/adp5588_keys.h>
+static const unsigned short adp5588_keymap[ADP5588_KEYMAPSIZE] = {
+	[0]	 = KEY_GRAVE,
+	[1]	 = KEY_1,
+	[2]	 = KEY_2,
+	[3]	 = KEY_3,
+	[4]	 = KEY_4,
+	[5]	 = KEY_5,
+	[6]	 = KEY_6,
+	[7]	 = KEY_7,
+	[8]	 = KEY_8,
+	[9]	 = KEY_9,
+	[10]	 = KEY_0,
+	[11]	 = KEY_MINUS,
+	[12]	 = KEY_EQUAL,
+	[13]	 = KEY_BACKSLASH,
+	[15]	 = KEY_KP0,
+	[16]	 = KEY_Q,
+	[17]	 = KEY_W,
+	[18]	 = KEY_E,
+	[19]	 = KEY_R,
+	[20]	 = KEY_T,
+	[21]	 = KEY_Y,
+	[22]	 = KEY_U,
+	[23]	 = KEY_I,
+	[24]	 = KEY_O,
+	[25]	 = KEY_P,
+	[26]	 = KEY_LEFTBRACE,
+	[27]	 = KEY_RIGHTBRACE,
+	[29]	 = KEY_KP1,
+	[30]	 = KEY_KP2,
+	[31]	 = KEY_KP3,
+	[32]	 = KEY_A,
+	[33]	 = KEY_S,
+	[34]	 = KEY_D,
+	[35]	 = KEY_F,
+	[36]	 = KEY_G,
+	[37]	 = KEY_H,
+	[38]	 = KEY_J,
+	[39]	 = KEY_K,
+	[40]	 = KEY_L,
+	[41]	 = KEY_SEMICOLON,
+	[42]	 = KEY_APOSTROPHE,
+	[43]	 = KEY_BACKSLASH,
+	[45]	 = KEY_KP4,
+	[46]	 = KEY_KP5,
+	[47]	 = KEY_KP6,
+	[48]	 = KEY_102ND,
+	[49]	 = KEY_Z,
+	[50]	 = KEY_X,
+	[51]	 = KEY_C,
+	[52]	 = KEY_V,
+	[53]	 = KEY_B,
+	[54]	 = KEY_N,
+	[55]	 = KEY_M,
+	[56]	 = KEY_COMMA,
+	[57]	 = KEY_DOT,
+	[58]	 = KEY_SLASH,
+	[60]	 = KEY_KPDOT,
+	[61]	 = KEY_KP7,
+	[62]	 = KEY_KP8,
+	[63]	 = KEY_KP9,
+	[64]	 = KEY_SPACE,
+	[65]	 = KEY_BACKSPACE,
+	[66]	 = KEY_TAB,
+	[67]	 = KEY_KPENTER,
+	[68]	 = KEY_ENTER,
+	[69]	 = KEY_ESC,
+	[70]	 = KEY_DELETE,
+	[74]	 = KEY_KPMINUS,
+	[76]	 = KEY_UP,
+	[77]	 = KEY_DOWN,
+	[78]	 = KEY_RIGHT,
+	[79]	 = KEY_LEFT,
+};
+
+static struct adp5588_kpad_platform_data adp5588_kpad_data = {
+	.rows		= 8,
+	.cols		= 10,
+	.keymap		= adp5588_keymap,
+	.keymapsize	= ARRAY_SIZE(adp5588_keymap),
+	.repeat		= 0,
+};
+#endif
+
 #ifdef CONFIG_I2C_BOARDINFO
 static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
 #if defined(CONFIG_JOYSTICK_AD7142) || defined(CONFIG_JOYSTICK_AD7142_MODULE)
@@ -958,6 +1094,13 @@
 		.platform_data = (void *)&bfin_ad7879_ts_info,
 	},
 #endif
+#if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE)
+	{
+		I2C_BOARD_INFO("adp5588-keys", 0x34),
+		.irq = IRQ_PG0,
+		.platform_data = (void *)&adp5588_kpad_data,
+	},
+#endif
 };
 #endif
 
@@ -1057,6 +1200,10 @@
 	&isp1362_hcd_device,
 #endif
 
+#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
+	&bfin_isp1760_device,
+#endif
+
 #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
 	&smc91x_device,
 #endif
@@ -1098,7 +1245,12 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index d5ff705..2f4b066 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -308,6 +308,19 @@
 };
 #endif
 
+static struct resource bfin_gpios_resources = {
+	.start = 0,
+	.end   = MAX_BLACKFIN_GPIOS - 1,
+	.flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+	.name = "simple-gpio",
+	.id = -1,
+	.num_resources = 1,
+	.resource = &bfin_gpios_resources,
+};
+
 #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
 static struct mtd_partition cm_partitions[] = {
 	{
@@ -379,30 +392,59 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
 	{
 		.start = 0xFFC02000,
 		.end = 0xFFC020FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir1_device = {
 	.name = "bfin_sir",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
 static struct resource bfin_twi0_resource[] = {
@@ -525,7 +567,12 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
@@ -564,6 +611,8 @@
 #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
 	&cm_flash_device,
 #endif
+
+	&bfin_gpios_device,
 };
 
 static int __init cm_bf537_init(void)
diff --git a/arch/blackfin/mach-bf537/dma.c b/arch/blackfin/mach-bf537/dma.c
index 4edb363..8118505 100644
--- a/arch/blackfin/mach-bf537/dma.c
+++ b/arch/blackfin/mach-bf537/dma.c
@@ -31,7 +31,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_BLACKFIN_DMA_CHANNEL] = {
+struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf537/head.S b/arch/blackfin/mach-bf537/head.S
deleted file mode 100644
index f5c94bf..0000000
--- a/arch/blackfin/mach-bf537/head.S
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * File:         arch/blackfin/mach-bf537/head.S
- * Based on:     arch/blackfin/mach-bf533/head.S
- * Author:       Jeff Dionne <jeff@uclinux.org> COPYRIGHT 1998 D. Jeff Dionne
- *
- * Created:      1998
- * Description:  Startup code for Blackfin BF537
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/blackfin.h>
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
-#include <asm/clocks.h>
-#include <mach/mem_init.h>
-#endif
-
-.section .l1.text
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
-ENTRY(_start_dma_code)
-
-	/* Enable PHY CLK buffer output */
-	p0.h = hi(VR_CTL);
-	p0.l = lo(VR_CTL);
-	r0.l = w[p0];
-	bitset(r0, 14);
-	w[p0] = r0.l;
-	ssync;
-
-	p0.h = hi(SIC_IWR);
-	p0.l = lo(SIC_IWR);
-	r0.l = 0x1;
-	r0.h = 0x0;
-	[p0] = r0;
-	SSYNC;
-
-	/*
-	 *  Set PLL_CTL
-	 *   - [14:09] = MSEL[5:0] : CLKIN / VCO multiplication factors
-	 *   - [8]     = BYPASS    : BYPASS the PLL, run CLKIN into CCLK/SCLK
-	 *   - [7]     = output delay (add 200ps of delay to mem signals)
-	 *   - [6]     = input delay (add 200ps of input delay to mem signals)
-	 *   - [5]     = PDWN      : 1=All Clocks off
-	 *   - [3]     = STOPCK    : 1=Core Clock off
-	 *   - [1]     = PLL_OFF   : 1=Disable Power to PLL
-	 *   - [0]     = DF        : 1=Pass CLKIN/2 to PLL / 0=Pass CLKIN to PLL
-	 *   all other bits set to zero
-	 */
-
-	p0.h = hi(PLL_LOCKCNT);
-	p0.l = lo(PLL_LOCKCNT);
-	r0 = 0x300(Z);
-	w[p0] = r0.l;
-	ssync;
-
-	P2.H = hi(EBIU_SDGCTL);
-	P2.L = lo(EBIU_SDGCTL);
-	R0 = [P2];
-	BITSET (R0, 24);
-	[P2] = R0;
-	SSYNC;
-
-	r0 = CONFIG_VCO_MULT & 63;       /* Load the VCO multiplier         */
-	r0 = r0 << 9;                    /* Shift it over,                  */
-	r1 = CLKIN_HALF;                 /* Do we need to divide CLKIN by 2?*/
-	r0 = r1 | r0;
-	r1 = PLL_BYPASS;                 /* Bypass the PLL?                 */
-	r1 = r1 << 8;                    /* Shift it over                   */
-	r0 = r1 | r0;                    /* add them all together           */
-#ifdef ANOMALY_05000265
-	BITSET(r0, 15);                  /* Add 250 mV of hysteresis to SPORT input pins */
-#endif
-
-	p0.h = hi(PLL_CTL);
-	p0.l = lo(PLL_CTL);              /* Load the address                */
-	cli r2;                          /* Disable interrupts              */
-	ssync;
-	w[p0] = r0.l;                    /* Set the value                   */
-	idle;                            /* Wait for the PLL to stablize    */
-	sti r2;                          /* Enable interrupts               */
-
-.Lcheck_again:
-	p0.h = hi(PLL_STAT);
-	p0.l = lo(PLL_STAT);
-	R0 = W[P0](Z);
-	CC = BITTST(R0,5);
-	if ! CC jump .Lcheck_again;
-
-	/* Configure SCLK & CCLK Dividers */
-	r0 = (CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
-	p0.h = hi(PLL_DIV);
-	p0.l = lo(PLL_DIV);
-	w[p0] = r0.l;
-	ssync;
-
-	p0.l = lo(EBIU_SDRRC);
-	p0.h = hi(EBIU_SDRRC);
-	r0 = mem_SDRRC;
-	w[p0] = r0.l;
-	ssync;
-
-	P2.H = hi(EBIU_SDGCTL);
-	P2.L = lo(EBIU_SDGCTL);
-	R0 = [P2];
-	BITCLR (R0, 24);
-	p0.h = hi(EBIU_SDSTAT);
-	p0.l = lo(EBIU_SDSTAT);
-	r2.l = w[p0];
-	cc = bittst(r2,3);
-	if !cc jump .Lskip;
-	NOP;
-	BITSET (R0, 23);
-.Lskip:
-	[P2] = R0;
-	SSYNC;
-
-	R0.L = lo(mem_SDGCTL);
-	R0.H = hi(mem_SDGCTL);
-	R1 = [p2];
-	R1 = R1 | R0;
-	[P2] = R1;
-	SSYNC;
-
-	RTS;
-ENDPROC(_start_dma_code)
-#endif /* CONFIG_BFIN_KERNEL_CLOCK */
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index c689924..9cb3912 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -7,7 +7,7 @@
  */
 
 /* This file shoule be up to date with:
- *  - Revision C, 02/08/2008; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List
+ *  - Revision D, 09/18/2008; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List
  */
 
 #ifndef _MACH_ANOMALY_H_
@@ -148,6 +148,14 @@
 #define ANOMALY_05000402 (__SILICON_REVISION__ >= 5)
 /* Level-Sensitive External GPIO Wakeups May Cause Indefinite Stall */
 #define ANOMALY_05000403 (1)
+/* Speculative Fetches Can Cause Undesired External FIFO Operations */
+#define ANOMALY_05000416 (1)
+/* Multichannel SPORT Channel Misalignment Under Specific Configuration */
+#define ANOMALY_05000425 (1)
+/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
+#define ANOMALY_05000426 (1)
+/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
+#define ANOMALY_05000443 (1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000125 (0)
@@ -161,5 +169,8 @@
 #define ANOMALY_05000353 (1)
 #define ANOMALY_05000363 (0)
 #define ANOMALY_05000386 (1)
+#define ANOMALY_05000412 (0)
+#define ANOMALY_05000432 (0)
+#define ANOMALY_05000435 (0)
 
 #endif
diff --git a/arch/blackfin/mach-bf537/include/mach/bf537.h b/arch/blackfin/mach-bf537/include/mach/bf537.h
index 24d5c9d..f194a84 100644
--- a/arch/blackfin/mach-bf537/include/mach/bf537.h
+++ b/arch/blackfin/mach-bf537/include/mach/bf537.h
@@ -133,7 +133,7 @@
 #endif
 
 #ifndef CPU
-#error Unknown CPU type - This kernel doesn't seem to be configured properly
+#error "Unknown CPU type - This kernel doesn't seem to be configured properly"
 #endif
 
 #endif				/* __MACH_BF537_H__  */
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_sir.h b/arch/blackfin/mach-bf537/include/mach/bfin_sir.h
deleted file mode 100644
index cfd8ad4..0000000
--- a/arch/blackfin/mach-bf537/include/mach/bfin_sir.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Blackfin Infra-red Driver
- *
- * Copyright 2006-2008 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- *
- */
-
-#include <linux/serial.h>
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#define SIR_UART_GET_CHAR(port)   bfin_read16((port)->membase + OFFSET_RBR)
-#define SIR_UART_GET_DLL(port)    bfin_read16((port)->membase + OFFSET_DLL)
-#define SIR_UART_GET_IER(port)    bfin_read16((port)->membase + OFFSET_IER)
-#define SIR_UART_GET_DLH(port)    bfin_read16((port)->membase + OFFSET_DLH)
-#define SIR_UART_GET_IIR(port)    bfin_read16((port)->membase + OFFSET_IIR)
-#define SIR_UART_GET_LCR(port)    bfin_read16((port)->membase + OFFSET_LCR)
-#define SIR_UART_GET_GCTL(port)   bfin_read16((port)->membase + OFFSET_GCTL)
-
-#define SIR_UART_PUT_CHAR(port, v) bfin_write16(((port)->membase + OFFSET_THR), v)
-#define SIR_UART_PUT_DLL(port, v)  bfin_write16(((port)->membase + OFFSET_DLL), v)
-#define SIR_UART_PUT_IER(port, v)  bfin_write16(((port)->membase + OFFSET_IER), v)
-#define SIR_UART_PUT_DLH(port, v)  bfin_write16(((port)->membase + OFFSET_DLH), v)
-#define SIR_UART_PUT_LCR(port, v)  bfin_write16(((port)->membase + OFFSET_LCR), v)
-#define SIR_UART_PUT_GCTL(port, v) bfin_write16(((port)->membase + OFFSET_GCTL), v)
-
-#ifdef CONFIG_SIR_BFIN_DMA
-struct dma_rx_buf {
-	char *buf;
-	int head;
-	int tail;
-	};
-#endif /* CONFIG_SIR_BFIN_DMA */
-
-struct bfin_sir_port {
-	unsigned char __iomem   *membase;
-	unsigned int            irq;
-	unsigned int            lsr;
-	unsigned long           clk;
-	struct net_device       *dev;
-#ifdef CONFIG_SIR_BFIN_DMA
-	int                     tx_done;
-	struct dma_rx_buf       rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int                     rx_dma_nrows;
-#endif /* CONFIG_SIR_BFIN_DMA */
-	unsigned int            tx_dma_channel;
-	unsigned int            rx_dma_channel;
-};
-
-struct bfin_sir_port sir_ports[BFIN_UART_NR_PORTS];
-
-struct bfin_sir_port_res {
-	unsigned long   base_addr;
-	int             irq;
-	unsigned int    rx_dma_channel;
-	unsigned int    tx_dma_channel;
-};
-
-struct bfin_sir_port_res bfin_sir_port_resource[] = {
-#ifdef CONFIG_BFIN_SIR0
-	{
-	0xFFC00400,
-	IRQ_UART0_RX,
-	CH_UART0_RX,
-	CH_UART0_TX,
-	},
-#endif
-#ifdef CONFIG_BFIN_SIR1
-	{
-	0xFFC02000,
-	IRQ_UART1_RX,
-	CH_UART1_RX,
-	CH_UART1_TX,
-	},
-#endif
-};
-
-int nr_sirs = ARRAY_SIZE(bfin_sir_port_resource);
-
-struct bfin_sir_self {
-	struct bfin_sir_port    *sir_port;
-	spinlock_t              lock;
-	unsigned int            open;
-	int                     speed;
-	int                     newspeed;
-
-	struct sk_buff          *txskb;
-	struct sk_buff          *rxskb;
-	struct net_device_stats stats;
-	struct device           *dev;
-	struct irlap_cb         *irlap;
-	struct qos_info         qos;
-
-	iobuff_t                tx_buff;
-	iobuff_t                rx_buff;
-
-	struct work_struct      work;
-	int                     mtt;
-};
-
-static inline unsigned int SIR_UART_GET_LSR(struct bfin_sir_port *port)
-{
-	unsigned int lsr = bfin_read16(port->membase + OFFSET_LSR);
-	port->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | port->lsr;
-}
-
-static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port)
-{
-	port->lsr = 0;
-	bfin_read16(port->membase + OFFSET_LSR);
-}
-
-#define DRIVER_NAME "bfin_sir"
-
-static int bfin_sir_hw_init(void)
-{
-	int ret = -ENODEV;
-#ifdef CONFIG_BFIN_SIR0
-	ret = peripheral_request(P_UART0_TX, DRIVER_NAME);
-	if (ret)
-		return ret;
-	ret = peripheral_request(P_UART0_RX, DRIVER_NAME);
-	if (ret)
-		return ret;
-#endif
-
-#ifdef CONFIG_BFIN_SIR1
-	ret = peripheral_request(P_UART1_TX, DRIVER_NAME);
-	if (ret)
-		return ret;
-	ret = peripheral_request(P_UART1_RX, DRIVER_NAME);
-	if (ret)
-		return ret;
-#endif
-	return ret;
-}
diff --git a/arch/blackfin/mach-bf537/include/mach/blackfin.h b/arch/blackfin/mach-bf537/include/mach/blackfin.h
index cffc786..7d6069c 100644
--- a/arch/blackfin/mach-bf537/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf537/include/mach/blackfin.h
@@ -82,7 +82,7 @@
 #define STATUS_P1	0x02
 #define STATUS_P0	0x01
 
-/* DMA Channnel */
+/* DMA Channel */
 #define bfin_read_CH_UART_RX() bfin_read_CH_UART0_RX()
 #define bfin_write_CH_UART_RX(val) bfin_write_CH_UART0_RX(val)
 #define CH_UART_RX CH_UART0_RX
diff --git a/arch/blackfin/mach-bf537/include/mach/cdefBF534.h b/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
index 88d491c..5f8b5f8 100644
--- a/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
@@ -40,55 +40,11 @@
 /* Include core specific register pointer definitions 								*/
 #include <asm/cdef_LPBlackfin.h>
 
-#include <asm/system.h>
-
 /* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
 #define bfin_read_PLL_CTL()                  bfin_read16(PLL_CTL)
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	local_irq_save(flags);
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	local_irq_restore(flags);
-}
 #define bfin_read_PLL_DIV()                  bfin_read16(PLL_DIV)
 #define bfin_write_PLL_DIV(val)              bfin_write16(PLL_DIV,val)
 #define bfin_read_VR_CTL()                   bfin_read16(VR_CTL)
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	local_irq_save(flags);
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	local_irq_restore(flags);
-}
 #define bfin_read_PLL_STAT()                 bfin_read16(PLL_STAT)
 #define bfin_write_PLL_STAT(val)             bfin_write16(PLL_STAT,val)
 #define bfin_read_PLL_LOCKCNT()              bfin_read16(PLL_LOCKCNT)
@@ -1816,4 +1772,51 @@
 #define bfin_read_HMDMA1_BCOUNT()            bfin_read16(HMDMA1_BCOUNT)
 #define bfin_write_HMDMA1_BCOUNT(val)        bfin_write16(HMDMA1_BCOUNT,val)
 
+/* These need to be last due to the cdef/linux inter-dependencies */
+#include <asm/irq.h>
+
+/* Writing to PLL_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_PLL_CTL(unsigned int val)
+{
+	unsigned long flags, iwr;
+
+	if (val == bfin_read_PLL_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr = bfin_read32(SIC_IWR);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR, IWR_ENABLE(0));
+
+	bfin_write16(PLL_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR, iwr);
+	local_irq_restore_hw(flags);
+}
+
+/* Writing to VR_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_VR_CTL(unsigned int val)
+{
+	unsigned long flags, iwr;
+
+	if (val == bfin_read_VR_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr = bfin_read32(SIC_IWR);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR, IWR_ENABLE(0));
+
+	bfin_write16(VR_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR, iwr);
+	local_irq_restore_hw(flags);
+}
+
 #endif				/* _CDEF_BF534_H */
diff --git a/arch/blackfin/mach-bf537/include/mach/dma.h b/arch/blackfin/mach-bf537/include/mach/dma.h
index 7a96404..5ae83b1 100644
--- a/arch/blackfin/mach-bf537/include/mach/dma.h
+++ b/arch/blackfin/mach-bf537/include/mach/dma.h
@@ -1,38 +1,14 @@
-/*
- * file:         include/asm-blackfin/mach-bf537/dma.h
- * based on:
- * author:
+/* mach/dma.h - arch-specific DMA defines
  *
- * created:
- * description:
- *	system mmr register map
- * rev:
+ * Copyright 2004-2008 Analog Devices Inc.
  *
- * modified:
- *
- *
- * bugs:         enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose.  see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_DMA_H_
 #define _MACH_DMA_H_
 
-#define MAX_BLACKFIN_DMA_CHANNEL 16
+#define MAX_DMA_CHANNELS 16
 
 #define CH_PPI 			    0
 #define CH_EMAC_RX 		    1
diff --git a/arch/blackfin/mach-bf537/include/mach/gpio.h b/arch/blackfin/mach-bf537/include/mach/gpio.h
new file mode 100644
index 0000000..d77a31e
--- /dev/null
+++ b/arch/blackfin/mach-bf537/include/mach/gpio.h
@@ -0,0 +1,68 @@
+/*
+ * File: arch/blackfin/mach-bf537/include/mach/gpio.h
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright (C) 2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef _MACH_GPIO_H_
+#define _MACH_GPIO_H_
+
+#define MAX_BLACKFIN_GPIOS 48
+
+#define	GPIO_PF0	0
+#define	GPIO_PF1	1
+#define	GPIO_PF2	2
+#define	GPIO_PF3	3
+#define	GPIO_PF4	4
+#define	GPIO_PF5	5
+#define	GPIO_PF6	6
+#define	GPIO_PF7	7
+#define	GPIO_PF8	8
+#define	GPIO_PF9	9
+#define	GPIO_PF10	10
+#define	GPIO_PF11	11
+#define	GPIO_PF12	12
+#define	GPIO_PF13	13
+#define	GPIO_PF14	14
+#define	GPIO_PF15	15
+#define	GPIO_PG0	16
+#define	GPIO_PG1	17
+#define	GPIO_PG2	18
+#define	GPIO_PG3	19
+#define	GPIO_PG4	20
+#define	GPIO_PG5	21
+#define	GPIO_PG6	22
+#define	GPIO_PG7	23
+#define	GPIO_PG8	24
+#define	GPIO_PG9	25
+#define	GPIO_PG10      	26
+#define	GPIO_PG11      	27
+#define	GPIO_PG12      	28
+#define	GPIO_PG13      	29
+#define	GPIO_PG14      	30
+#define	GPIO_PG15      	31
+#define	GPIO_PH0	32
+#define	GPIO_PH1	33
+#define	GPIO_PH2	34
+#define	GPIO_PH3	35
+#define	GPIO_PH4	36
+#define	GPIO_PH5	37
+#define	GPIO_PH6	38
+#define	GPIO_PH7	39
+#define	GPIO_PH8	40
+#define	GPIO_PH9	41
+#define	GPIO_PH10      	42
+#define	GPIO_PH11      	43
+#define	GPIO_PH12      	44
+#define	GPIO_PH13      	45
+#define	GPIO_PH14      	46
+#define	GPIO_PH15      	47
+
+#define PORT_F GPIO_PF0
+#define PORT_G GPIO_PG0
+#define PORT_H GPIO_PH0
+
+#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf537/include/mach/irq.h b/arch/blackfin/mach-bf537/include/mach/irq.h
index 2e68a8a..b2a71d5 100644
--- a/arch/blackfin/mach-bf537/include/mach/irq.h
+++ b/arch/blackfin/mach-bf537/include/mach/irq.h
@@ -82,14 +82,14 @@
 #define IRQ_CAN_TX          23	/*CAN Transmit Interrupt */
 #define IRQ_MAC_RX          24	/*DMA1 (Ethernet RX) Interrupt */
 #define IRQ_MAC_TX          25	/*DMA2 (Ethernet TX) Interrupt */
-#define IRQ_TMR0            26	/*Timer 0 */
-#define IRQ_TMR1            27	/*Timer 1 */
-#define IRQ_TMR2            28	/*Timer 2 */
-#define IRQ_TMR3            29	/*Timer 3 */
-#define IRQ_TMR4            30	/*Timer 4 */
-#define IRQ_TMR5            31	/*Timer 5 */
-#define IRQ_TMR6            32	/*Timer 6 */
-#define IRQ_TMR7            33	/*Timer 7 */
+#define IRQ_TIMER0            26	/*Timer 0 */
+#define IRQ_TIMER1            27	/*Timer 1 */
+#define IRQ_TIMER2            28	/*Timer 2 */
+#define IRQ_TIMER3            29	/*Timer 3 */
+#define IRQ_TIMER4            30	/*Timer 4 */
+#define IRQ_TIMER5            31	/*Timer 5 */
+#define IRQ_TIMER6            32	/*Timer 6 */
+#define IRQ_TIMER7            33	/*Timer 7 */
 #define IRQ_PROG_INTA       34	/* PF Ports F&G (PF15:0) Interrupt A */
 #define IRQ_PORTG_INTB      35	/* PF Port G (PF15:0) Interrupt B */
 #define IRQ_MEM_DMA0        36	/*(Memory DMA Stream 0) */
@@ -195,16 +195,16 @@
 #define IRQ_CAN_TX_POS      0
 #define IRQ_MAC_RX_POS      4
 #define IRQ_MAC_TX_POS      8
-#define IRQ_TMR0_POS        12
-#define IRQ_TMR1_POS        16
-#define IRQ_TMR2_POS        20
-#define IRQ_TMR3_POS        24
-#define IRQ_TMR4_POS        28
+#define IRQ_TIMER0_POS        12
+#define IRQ_TIMER1_POS        16
+#define IRQ_TIMER2_POS        20
+#define IRQ_TIMER3_POS        24
+#define IRQ_TIMER4_POS        28
 
 /* IAR3 BIT FIELDS*/
-#define IRQ_TMR5_POS        0
-#define IRQ_TMR6_POS        4
-#define IRQ_TMR7_POS        8
+#define IRQ_TIMER5_POS        0
+#define IRQ_TIMER6_POS        4
+#define IRQ_TIMER7_POS        8
 #define IRQ_PROG_INTA_POS   12
 #define IRQ_PORTG_INTB_POS   16
 #define IRQ_MEM_DMA0_POS    20
diff --git a/arch/blackfin/mach-bf537/include/mach/mem_init.h b/arch/blackfin/mach-bf537/include/mach/mem_init.h
deleted file mode 100644
index f67698f..0000000
--- a/arch/blackfin/mach-bf537/include/mach/mem_init.h
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * File:         include/asm-blackfin/mach-bf537/mem_init.h
- * Based on:
- * Author:
- *
- * Created:
- * Description:
- *
- * Rev:
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#if (CONFIG_MEM_MT48LC16M16A2TG_75 || CONFIG_MEM_MT48LC64M4A2FB_7E || CONFIG_MEM_MT48LC16M8A2TG_75 || CONFIG_MEM_GENERIC_BOARD || CONFIG_MEM_MT48LC32M8A2_75)
-#if (CONFIG_SCLK_HZ > 119402985)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_7
-#define SDRAM_tRAS_num  7
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_6
-#define SDRAM_tRAS_num  6
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_5
-#define SDRAM_tRAS_num  5
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_4
-#define SDRAM_tRAS_num  4
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_3
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_4
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_3
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_2
-#define SDRAM_tRAS_num  2
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ <= 29850746)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_1
-#define SDRAM_tRAS_num  1
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#endif
-
-#if (CONFIG_MEM_MT48LC16M16A2TG_75)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_MT48LC16M8A2TG_75)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   4096	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_MT48LC32M8A2_75)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_MT48LC64M4A2FB_7E)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_GENERIC_BOARD)
-  /*SDRAM INFORMATION: Modify this for your board */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-/* Equation from section 17 (p17-46) of BF533 HRM */
-#define mem_SDRRC       (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num)
-
-/* Enable SCLK Out */
-#define mem_SDGCTL        (SCTLE | SDRAM_CL | SDRAM_tRAS | SDRAM_tRP | SDRAM_tRCD | SDRAM_tWR | PSS)
-
-#if defined CONFIG_CLKIN_HALF
-#define CLKIN_HALF       1
-#else
-#define CLKIN_HALF       0
-#endif
-
-#if defined CONFIG_PLL_BYPASS
-#define PLL_BYPASS      1
-#else
-#define PLL_BYPASS       0
-#endif
-
-/***************************************Currently Not Being Used *********************************/
-#define flash_EBIU_AMBCTL_WAT  ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_RAT  ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_HT   ((CONFIG_FLASH_SPEED_BHT  * 4) / (4000000000 / CONFIG_SCLK_HZ))
-#define flash_EBIU_AMBCTL_ST   ((CONFIG_FLASH_SPEED_BST  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_TT   ((CONFIG_FLASH_SPEED_BTT  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-
-#if (flash_EBIU_AMBCTL_TT > 3)
-#define flash_EBIU_AMBCTL0_TT   B0TT_4
-#endif
-#if (flash_EBIU_AMBCTL_TT == 3)
-#define flash_EBIU_AMBCTL0_TT   B0TT_3
-#endif
-#if (flash_EBIU_AMBCTL_TT == 2)
-#define flash_EBIU_AMBCTL0_TT   B0TT_2
-#endif
-#if (flash_EBIU_AMBCTL_TT < 2)
-#define flash_EBIU_AMBCTL0_TT   B0TT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_ST > 3)
-#define flash_EBIU_AMBCTL0_ST   B0ST_4
-#endif
-#if (flash_EBIU_AMBCTL_ST == 3)
-#define flash_EBIU_AMBCTL0_ST   B0ST_3
-#endif
-#if (flash_EBIU_AMBCTL_ST == 2)
-#define flash_EBIU_AMBCTL0_ST   B0ST_2
-#endif
-#if (flash_EBIU_AMBCTL_ST < 2)
-#define flash_EBIU_AMBCTL0_ST   B0ST_1
-#endif
-
-#if (flash_EBIU_AMBCTL_HT > 2)
-#define flash_EBIU_AMBCTL0_HT   B0HT_3
-#endif
-#if (flash_EBIU_AMBCTL_HT == 2)
-#define flash_EBIU_AMBCTL0_HT   B0HT_2
-#endif
-#if (flash_EBIU_AMBCTL_HT == 1)
-#define flash_EBIU_AMBCTL0_HT   B0HT_1
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0)
-#define flash_EBIU_AMBCTL0_HT   B0HT_0
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0)
-#define flash_EBIU_AMBCTL0_HT   B0HT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_WAT > 14)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_15
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 14)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_14
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 13)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_13
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 12)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_12
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 11)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_11
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 10)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_10
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 9)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_9
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 8)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_8
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 7)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_7
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 6)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_6
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 5)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_5
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 4)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_4
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 3)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_3
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 2)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_2
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 1)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_RAT > 14)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_15
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 14)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_14
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 13)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_13
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 12)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_12
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 11)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_11
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 10)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_10
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 9)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_9
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 8)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_8
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 7)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_7
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 6)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_6
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 5)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_5
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 4)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_4
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 3)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_3
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 2)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_2
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 1)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_1
-#endif
-
-#define flash_EBIU_AMBCTL0  \
-	(flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \
-	 flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN)
diff --git a/arch/blackfin/mach-bf537/include/mach/mem_map.h b/arch/blackfin/mach-bf537/include/mach/mem_map.h
index 5078b66..f9010c4b4 100644
--- a/arch/blackfin/mach-bf537/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf537/include/mach/mem_map.h
@@ -176,4 +176,10 @@
 #define L1_SCRATCH_START	0xFFB00000
 #define L1_SCRATCH_LENGTH	0x1000
 
+#define GET_PDA_SAFE(preg)		\
+	preg.l = _cpu_pda;		\
+	preg.h = _cpu_pda;
+
+#define GET_PDA(preg, dreg)	GET_PDA_SAFE(preg)
+
 #endif				/* _MEM_MAP_537_H_ */
diff --git a/arch/blackfin/mach-bf537/ints-priority.c b/arch/blackfin/mach-bf537/ints-priority.c
index b1300b3..51c4808 100644
--- a/arch/blackfin/mach-bf537/ints-priority.c
+++ b/arch/blackfin/mach-bf537/ints-priority.c
@@ -55,15 +55,15 @@
 	bfin_write_SIC_IAR2(((CONFIG_IRQ_CAN_TX - 7) << IRQ_CAN_TX_POS) |
 			    ((CONFIG_IRQ_MAC_RX - 7) << IRQ_MAC_RX_POS) |
 			    ((CONFIG_IRQ_MAC_TX - 7) << IRQ_MAC_TX_POS) |
-			    ((CONFIG_IRQ_TMR0 - 7) << IRQ_TMR0_POS) |
-			    ((CONFIG_IRQ_TMR1 - 7) << IRQ_TMR1_POS) |
-			    ((CONFIG_IRQ_TMR2 - 7) << IRQ_TMR2_POS) |
-			    ((CONFIG_IRQ_TMR3 - 7) << IRQ_TMR3_POS) |
-			    ((CONFIG_IRQ_TMR4 - 7) << IRQ_TMR4_POS));
+			    ((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) |
+			    ((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) |
+			    ((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) |
+			    ((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) |
+			    ((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS));
 
-	bfin_write_SIC_IAR3(((CONFIG_IRQ_TMR5 - 7) << IRQ_TMR5_POS) |
-			    ((CONFIG_IRQ_TMR6 - 7) << IRQ_TMR6_POS) |
-			    ((CONFIG_IRQ_TMR7 - 7) << IRQ_TMR7_POS) |
+	bfin_write_SIC_IAR3(((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) |
+			    ((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) |
+			    ((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS) |
 			    ((CONFIG_IRQ_PROG_INTA - 7) << IRQ_PROG_INTA_POS) |
 			    ((CONFIG_IRQ_PORTG_INTB - 7) << IRQ_PORTG_INTB_POS) |
 			    ((CONFIG_IRQ_MEM_DMA0 - 7) << IRQ_MEM_DMA0_POS) |
diff --git a/arch/blackfin/mach-bf538/Kconfig b/arch/blackfin/mach-bf538/Kconfig
new file mode 100644
index 0000000..f068c35
--- /dev/null
+++ b/arch/blackfin/mach-bf538/Kconfig
@@ -0,0 +1,164 @@
+if (BF538 || BF539)
+
+source "arch/blackfin/mach-bf538/boards/Kconfig"
+
+menu "BF538 Specific Configuration"
+
+comment "Interrupt Priority Assignment"
+menu "Priority"
+
+config IRQ_PLL_WAKEUP
+	int "IRQ_PLL_WAKEUP"
+	default 7
+config IRQ_DMA0_ERROR
+	int "IRQ_DMA0_ERROR"
+	default 7
+config IRQ_PPI_ERROR
+	int "IRQ_PPI_ERROR"
+	default 7
+config IRQ_SPORT0_ERROR
+	int "IRQ_SPORT0_ERROR"
+	default 7
+config IRQ_SPORT1_ERROR
+	int "IRQ_SPORT1_ERROR"
+	default 7
+config IRQ_SPI0_ERROR
+	int "IRQ_SPI0_ERROR"
+	default 7
+config IRQ_UART0_ERROR
+	int "IRQ_UART0_ERROR"
+	default 7
+config IRQ_RTC
+	int "IRQ_RTC"
+	default 8
+config IRQ_PPI
+	int "IRQ_PPI"
+	default 8
+config IRQ_SPORT0_RX
+	int "IRQ_SPORT0_RX"
+	default 9
+config IRQ_SPORT0_TX
+	int "IRQ_SPORT0_TX"
+	default 9
+config IRQ_SPORT1_RX
+	int "IRQ_SPORT1_RX"
+	default 9
+config IRQ_SPORT1_TX
+	int "IRQ_SPORT1_TX"
+	default 9
+config IRQ_SPI0
+	int "IRQ_SPI0"
+	default 10
+config IRQ_UART0_RX
+	int "IRQ_UART0_RX"
+	default 10
+config IRQ_UART0_TX
+	int "IRQ_UART0_TX"
+	default 10
+config IRQ_TIMER0
+	int "IRQ_TIMER0"
+	default 8
+config IRQ_TIMER1
+	int "IRQ_TIMER1"
+	default 11
+config IRQ_TIMER2
+	int "IRQ_TIMER2"
+	default 11
+config IRQ_PORTF_INTA
+	int "IRQ_PORTF_INTA"
+	default 12
+config IRQ_PORTF_INTB
+	int "IRQ_PORTF_INTB"
+	default 12
+config IRQ_MEM0_DMA0
+	int "IRQ_MEM0_DMA0"
+	default 13
+config IRQ_MEM0_DMA1
+	int "IRQ_MEM0_DMA1"
+	default 13
+config IRQ_WATCH
+	int "IRQ_WATCH"
+	default 13
+config IRQ_DMA1_ERROR
+	int "IRQ_DMA1_ERROR"
+	default 7
+config IRQ_SPORT2_ERROR
+	int "IRQ_SPORT2_ERROR"
+	default 7
+config IRQ_SPORT3_ERROR
+	int "IRQ_SPORT3_ERROR"
+	default 7
+config IRQ_SPI1_ERROR
+	int "IRQ_SPI1_ERROR"
+	default 7
+config IRQ_SPI2_ERROR
+	int "IRQ_SPI2_ERROR"
+	default 7
+config IRQ_UART1_ERROR
+	int "IRQ_UART1_ERROR"
+	default 7
+config IRQ_UART2_ERROR
+	int "IRQ_UART2_ERROR"
+	default 7
+config IRQ_CAN_ERROR
+	int "IRQ_CAN_ERROR"
+	default 7
+config IRQ_SPORT2_RX
+	int "IRQ_SPORT2_RX"
+	default 9
+config IRQ_SPORT2_TX
+	int "IRQ_SPORT2_TX"
+	default 9
+config IRQ_SPORT3_RX
+	int "IRQ_SPORT3_RX"
+	default 9
+config IRQ_SPORT3_TX
+	int "IRQ_SPORT3_TX"
+	default 9
+config IRQ_SPI1
+	int "IRQ_SPI1"
+	default 10
+config IRQ_SPI2
+	int "IRQ_SPI2"
+	default 10
+config IRQ_UART1_RX
+	int "IRQ_UART1_RX"
+	default 10
+config IRQ_UART1_TX
+	int "IRQ_UART1_TX"
+	default 10
+config IRQ_UART2_RX
+	int "IRQ_UART2_RX"
+	default 10
+config IRQ_UART2_TX
+	int "IRQ_UART2_TX"
+	default 10
+config IRQ_TWI0
+	int "IRQ_TWI0"
+	default 11
+config IRQ_TWI1
+	int "IRQ_TWI1"
+	default 11
+config IRQ_CAN_RX
+	int "IRQ_CAN_RX"
+	default 11
+config IRQ_CAN_TX
+	int "IRQ_CAN_TX"
+	default 11
+config IRQ_MEM1_DMA0
+	int "IRQ_MEM1_DMA0"
+	default 13
+config IRQ_MEM1_DMA1
+	int "IRQ_MEM1_DMA1"
+	default 13
+
+	help
+	  Enter the priority numbers between 7-13 ONLY.  Others are Reserved.
+	  This applies to all the above.  It is not recommended to assign the
+	  highest priority number 7 to UART or any other device.
+
+endmenu
+
+endmenu
+
+endif
diff --git a/arch/blackfin/mach-bf538/Makefile b/arch/blackfin/mach-bf538/Makefile
new file mode 100644
index 0000000..8cd2719
--- /dev/null
+++ b/arch/blackfin/mach-bf538/Makefile
@@ -0,0 +1,5 @@
+#
+# arch/blackfin/mach-bf538/Makefile
+#
+
+obj-y := ints-priority.o dma.o
diff --git a/arch/blackfin/mach-bf538/boards/Kconfig b/arch/blackfin/mach-bf538/boards/Kconfig
new file mode 100644
index 0000000..215249b
--- /dev/null
+++ b/arch/blackfin/mach-bf538/boards/Kconfig
@@ -0,0 +1,12 @@
+choice
+	prompt "System type"
+	default BFIN538_EZKIT
+	help
+	  Select your board!
+
+config BFIN538_EZKIT
+	bool "BF538-EZKIT"
+	help
+	  BF538-EZKIT-LITE board support.
+
+endchoice
diff --git a/arch/blackfin/mach-bf538/boards/Makefile b/arch/blackfin/mach-bf538/boards/Makefile
new file mode 100644
index 0000000..6143b32
--- /dev/null
+++ b/arch/blackfin/mach-bf538/boards/Makefile
@@ -0,0 +1,5 @@
+#
+# arch/blackfin/mach-bf538/boards/Makefile
+#
+
+obj-$(CONFIG_BFIN538_EZKIT)            += ezkit.o
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
new file mode 100644
index 0000000..e37cb93
--- /dev/null
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -0,0 +1,606 @@
+/*
+ * File:         arch/blackfin/mach-bf538/boards/ezkit.c
+ * Based on:     arch/blackfin/mach-bf537/boards/ezkit.c
+ * Author:       Aidan Williams <aidan@nicta.com.au>
+ *
+ * Created:
+ * Description:
+ *
+ * Modified:
+ *               Copyright 2005 National ICT Australia (NICTA)
+ *               Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/dma.h>
+#include <asm/gpio.h>
+#include <asm/nand.h>
+#include <asm/portmux.h>
+#include <asm/dpmc.h>
+#include <linux/input.h>
+
+/*
+ * Name the Board for the /proc/cpuinfo
+ */
+const char bfin_board_name[] = "ADI BF538-EZKIT";
+
+/*
+ *  Driver needs to know address, irq and flag pin.
+ */
+
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+static struct platform_device rtc_device = {
+	.name = "rtc-bfin",
+	.id   = -1,
+};
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+static struct resource bfin_uart_resources[] = {
+#ifdef CONFIG_SERIAL_BFIN_UART0
+	{
+		.start = 0xFFC00400,
+		.end = 0xFFC004FF,
+		.flags = IORESOURCE_MEM,
+	},
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART1
+	{
+		.start = 0xFFC02000,
+		.end = 0xFFC020FF,
+		.flags = IORESOURCE_MEM,
+	},
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART2
+	{
+		.start = 0xFFC02100,
+		.end = 0xFFC021FF,
+		.flags = IORESOURCE_MEM,
+	},
+#endif
+};
+
+static struct platform_device bfin_uart_device = {
+	.name = "bfin-uart",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_uart_resources),
+	.resource = bfin_uart_resources,
+};
+#endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
+	{
+		.start = 0xFFC00400,
+		.end = 0xFFC004FF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
+#endif
+#ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
+	{
+		.start = 0xFFC02000,
+		.end = 0xFFC020FF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir1_device = {
+	.name = "bfin_sir",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
+};
+#endif
+#ifdef CONFIG_BFIN_SIR2
+static struct resource bfin_sir2_resources[] = {
+	{
+		.start = 0xFFC02100,
+		.end = 0xFFC021FF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART2_RX,
+		.end = IRQ_UART2_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART2_RX,
+		.end = CH_UART2_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir2_device = {
+	.name = "bfin_sir",
+	.id = 2,
+	.num_resources = ARRAY_SIZE(bfin_sir2_resources),
+	.resource = bfin_sir2_resources,
+};
+#endif
+#endif
+
+/*
+ *  USB-LAN EzExtender board
+ *  Driver needs to know address, irq and flag pin.
+ */
+#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
+static struct resource smc91x_resources[] = {
+	{
+		.name = "smc91x-regs",
+		.start = 0x20310300,
+		.end = 0x20310300 + 16,
+		.flags = IORESOURCE_MEM,
+	}, {
+		.start = IRQ_PF0,
+		.end = IRQ_PF0,
+		.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+	},
+};
+static struct platform_device smc91x_device = {
+	.name = "smc91x",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(smc91x_resources),
+	.resource = smc91x_resources,
+};
+#endif
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+/* all SPI peripherals info goes here */
+#if defined(CONFIG_MTD_M25P80) \
+	|| defined(CONFIG_MTD_M25P80_MODULE)
+/* SPI flash chip (m25p16) */
+static struct mtd_partition bfin_spi_flash_partitions[] = {
+	{
+		.name = "bootloader(spi)",
+		.size = 0x00040000,
+		.offset = 0,
+		.mask_flags = MTD_CAP_ROM
+	}, {
+		.name = "linux kernel(spi)",
+		.size = 0x1c0000,
+		.offset = 0x40000
+	}
+};
+
+static struct flash_platform_data bfin_spi_flash_data = {
+	.name = "m25p80",
+	.parts = bfin_spi_flash_partitions,
+	.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
+	.type = "m25p16",
+};
+
+static struct bfin5xx_spi_chip spi_flash_chip_info = {
+	.enable_dma = 0,         /* use dma transfer with this chip*/
+	.bits_per_word = 8,
+	.cs_change_per_word = 0,
+};
+#endif
+
+#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
+#include <linux/spi/ad7879.h>
+static const struct ad7879_platform_data bfin_ad7879_ts_info = {
+	.model			= 7879,	/* Model = AD7879 */
+	.x_plate_ohms		= 620,	/* 620 Ohm from the touch datasheet */
+	.pressure_max		= 10000,
+	.pressure_min		= 0,
+	.first_conversion_delay = 3,	/* wait 512us before do a first conversion */
+	.acquisition_time 	= 1,	/* 4us acquisition time per sample */
+	.median			= 2,	/* do 8 measurements */
+	.averaging 		= 1,	/* take the average of 4 middle samples */
+	.pen_down_acc_interval 	= 255,	/* 9.4 ms */
+	.gpio_output		= 1,	/* configure AUX/VBAT/GPIO as GPIO output */
+	.gpio_default 		= 1,	/* During initialization set GPIO = HIGH */
+};
+#endif
+
+#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
+static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
+	.enable_dma = 0,
+	.bits_per_word = 16,
+};
+#endif
+
+#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
+#include <asm/bfin-lq035q1.h>
+
+static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
+	.mode = 	LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB,
+	.use_bl = 	0,	/* let something else control the LCD Blacklight */
+	.gpio_bl =	GPIO_PF7,
+};
+
+static struct resource bfin_lq035q1_resources[] = {
+	{
+		.start = IRQ_PPI_ERROR,
+		.end = IRQ_PPI_ERROR,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_lq035q1_device = {
+	.name		= "bfin-lq035q1",
+	.id		= -1,
+	.num_resources 	= ARRAY_SIZE(bfin_lq035q1_resources),
+	.resource 	= bfin_lq035q1_resources,
+	.dev		= {
+		.platform_data = &bfin_lq035q1_data,
+	},
+};
+#endif
+
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+static struct bfin5xx_spi_chip spidev_chip_info = {
+	.enable_dma = 0,
+	.bits_per_word = 8,
+};
+#endif
+
+#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
+static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
+	.enable_dma	= 0,
+	.bits_per_word	= 8,
+};
+#endif
+
+static struct spi_board_info bf538_spi_board_info[] __initdata = {
+#if defined(CONFIG_MTD_M25P80) \
+	|| defined(CONFIG_MTD_M25P80_MODULE)
+	{
+		/* the modalias must be the same as spi device driver name */
+		.modalias = "m25p80", /* Name of spi_driver for this device */
+		.max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0, /* Framework bus number */
+		.chip_select = 1, /* SPI_SSEL1*/
+		.platform_data = &bfin_spi_flash_data,
+		.controller_data = &spi_flash_chip_info,
+		.mode = SPI_MODE_3,
+	},
+#endif
+#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
+	{
+		.modalias = "ad7879",
+		.platform_data = &bfin_ad7879_ts_info,
+		.irq = IRQ_PF3,
+		.max_speed_hz = 5000000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = 1,
+		.controller_data = &spi_ad7879_chip_info,
+		.mode = SPI_CPHA | SPI_CPOL,
+	},
+#endif
+#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
+	{
+		.modalias = "bfin-lq035q1-spi",
+		.max_speed_hz = 20000000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = 2,
+		.controller_data = &lq035q1_spi_chip_info,
+		.mode = SPI_CPHA | SPI_CPOL,
+	},
+#endif
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+	{
+		.modalias = "spidev",
+		.max_speed_hz = 3125000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = 1,
+		.controller_data = &spidev_chip_info,
+	},
+#endif
+};
+
+/* SPI (0) */
+static struct resource bfin_spi0_resource[] = {
+	[0] = {
+		.start = SPI0_REGBASE,
+		.end   = SPI0_REGBASE + 0xFF,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = CH_SPI0,
+		.end   = CH_SPI0,
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+/* SPI (1) */
+static struct resource bfin_spi1_resource[] = {
+	[0] = {
+		.start = SPI1_REGBASE,
+		.end   = SPI1_REGBASE + 0xFF,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = CH_SPI1,
+		.end   = CH_SPI1,
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+/* SPI (2) */
+static struct resource bfin_spi2_resource[] = {
+	[0] = {
+		.start = SPI2_REGBASE,
+		.end   = SPI2_REGBASE + 0xFF,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = CH_SPI2,
+		.end   = CH_SPI2,
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+/* SPI controller data */
+static struct bfin5xx_spi_master bf538_spi_master_info0 = {
+	.num_chipselect = 8,
+	.enable_dma = 1,  /* master has the ability to do dma transfer */
+	.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
+};
+
+static struct platform_device bf538_spi_master0 = {
+	.name = "bfin-spi",
+	.id = 0, /* Bus number */
+	.num_resources = ARRAY_SIZE(bfin_spi0_resource),
+	.resource = bfin_spi0_resource,
+	.dev = {
+		.platform_data = &bf538_spi_master_info0, /* Passed to driver */
+		},
+};
+
+static struct bfin5xx_spi_master bf538_spi_master_info1 = {
+	.num_chipselect = 8,
+	.enable_dma = 1,  /* master has the ability to do dma transfer */
+	.pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0},
+};
+
+static struct platform_device bf538_spi_master1 = {
+	.name = "bfin-spi",
+	.id = 1, /* Bus number */
+	.num_resources = ARRAY_SIZE(bfin_spi1_resource),
+	.resource = bfin_spi1_resource,
+	.dev = {
+		.platform_data = &bf538_spi_master_info1, /* Passed to driver */
+		},
+};
+
+static struct bfin5xx_spi_master bf538_spi_master_info2 = {
+	.num_chipselect = 8,
+	.enable_dma = 1,  /* master has the ability to do dma transfer */
+	.pin_req = {P_SPI2_SCK, P_SPI2_MISO, P_SPI2_MOSI, 0},
+};
+
+static struct platform_device bf538_spi_master2 = {
+	.name = "bfin-spi",
+	.id = 2, /* Bus number */
+	.num_resources = ARRAY_SIZE(bfin_spi2_resource),
+	.resource = bfin_spi2_resource,
+	.dev = {
+		.platform_data = &bf538_spi_master_info2, /* Passed to driver */
+		},
+};
+
+#endif  /* spi master and devices */
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static struct resource bfin_twi0_resource[] = {
+	[0] = {
+		.start = TWI0_REGBASE,
+		.end   = TWI0_REGBASE + 0xFF,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = IRQ_TWI0,
+		.end   = IRQ_TWI0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device i2c_bfin_twi0_device = {
+	.name = "i2c-bfin-twi",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
+	.resource = bfin_twi0_resource,
+};
+
+#if !defined(CONFIG_BF542)	/* The BF542 only has 1 TWI */
+static struct resource bfin_twi1_resource[] = {
+	[0] = {
+		.start = TWI1_REGBASE,
+		.end   = TWI1_REGBASE + 0xFF,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = IRQ_TWI1,
+		.end   = IRQ_TWI1,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device i2c_bfin_twi1_device = {
+	.name = "i2c-bfin-twi",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_twi1_resource),
+	.resource = bfin_twi1_resource,
+};
+#endif
+#endif
+
+static struct resource bfin_gpios_resources = {
+	.start = 0,
+	.end   = MAX_BLACKFIN_GPIOS - 1,
+	.flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+	.name = "simple-gpio",
+	.id = -1,
+	.num_resources = 1,
+	.resource = &bfin_gpios_resources,
+};
+
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+#include <linux/gpio_keys.h>
+
+static struct gpio_keys_button bfin_gpio_keys_table[] = {
+	{BTN_0, GPIO_PC7, 1, "gpio-keys: BTN0"},
+};
+
+static struct gpio_keys_platform_data bfin_gpio_keys_data = {
+	.buttons        = bfin_gpio_keys_table,
+	.nbuttons       = ARRAY_SIZE(bfin_gpio_keys_table),
+};
+
+static struct platform_device bfin_device_gpiokeys = {
+	.name      = "gpio-keys",
+	.dev = {
+		.platform_data = &bfin_gpio_keys_data,
+	},
+};
+#endif
+
+static const unsigned int cclk_vlev_datasheet[] =
+{
+/*
+ * Internal VLEV BF538SBBC1533
+ ****temporarily using these values until data sheet is updated
+ */
+	VRPAIR(VLEV_100, 150000000),
+	VRPAIR(VLEV_100, 250000000),
+	VRPAIR(VLEV_110, 276000000),
+	VRPAIR(VLEV_115, 301000000),
+	VRPAIR(VLEV_120, 525000000),
+	VRPAIR(VLEV_125, 550000000),
+	VRPAIR(VLEV_130, 600000000),
+};
+
+static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
+	.tuple_tab = cclk_vlev_datasheet,
+	.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
+	.vr_settling_time = 25 /* us */,
+};
+
+static struct platform_device bfin_dpmc = {
+	.name = "bfin dpmc",
+	.dev = {
+		.platform_data = &bfin_dmpc_vreg_data,
+	},
+};
+
+static struct platform_device *cm_bf538_devices[] __initdata = {
+
+	&bfin_dpmc,
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+	&rtc_device,
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+	&bfin_uart_device,
+#endif
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+	&bf538_spi_master0,
+	&bf538_spi_master1,
+	&bf538_spi_master2,
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+	&i2c_bfin_twi0_device,
+	&i2c_bfin_twi1_device,
+#endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
+#ifdef CONFIG_BFIN_SIR2
+	&bfin_sir2_device,
+#endif
+#endif
+
+#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
+	&smc91x_device,
+#endif
+
+#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
+	&bfin_lq035q1_device,
+#endif
+
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+	&bfin_device_gpiokeys,
+#endif
+
+	&bfin_gpios_device,
+};
+
+static int __init ezkit_init(void)
+{
+	printk(KERN_INFO "%s(): registering device resources\n", __func__);
+	platform_add_devices(cm_bf538_devices, ARRAY_SIZE(cm_bf538_devices));
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+	spi_register_board_info(bf538_spi_board_info,
+			ARRAY_SIZE(bf538_spi_board_info));
+#endif
+
+	return 0;
+}
+
+arch_initcall(ezkit_init);
diff --git a/arch/blackfin/mach-bf538/dma.c b/arch/blackfin/mach-bf538/dma.c
new file mode 100644
index 0000000..d6837fb
--- /dev/null
+++ b/arch/blackfin/mach-bf538/dma.c
@@ -0,0 +1,161 @@
+/*
+ * File:         arch/blackfin/mach-bf538/dma.c
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:  This file contains the simple DMA Implementation for Blackfin
+ *
+ * Modified:
+ *               Copyright 2008 Analog Devices Inc.
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <linux/module.h>
+
+#include <asm/blackfin.h>
+#include <asm/dma.h>
+
+struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+	(struct dma_register *) DMA0_NEXT_DESC_PTR,
+	(struct dma_register *) DMA1_NEXT_DESC_PTR,
+	(struct dma_register *) DMA2_NEXT_DESC_PTR,
+	(struct dma_register *) DMA3_NEXT_DESC_PTR,
+	(struct dma_register *) DMA4_NEXT_DESC_PTR,
+	(struct dma_register *) DMA5_NEXT_DESC_PTR,
+	(struct dma_register *) DMA6_NEXT_DESC_PTR,
+	(struct dma_register *) DMA7_NEXT_DESC_PTR,
+	(struct dma_register *) DMA8_NEXT_DESC_PTR,
+	(struct dma_register *) DMA9_NEXT_DESC_PTR,
+	(struct dma_register *) DMA10_NEXT_DESC_PTR,
+	(struct dma_register *) DMA11_NEXT_DESC_PTR,
+	(struct dma_register *) DMA12_NEXT_DESC_PTR,
+	(struct dma_register *) DMA13_NEXT_DESC_PTR,
+	(struct dma_register *) DMA14_NEXT_DESC_PTR,
+	(struct dma_register *) DMA15_NEXT_DESC_PTR,
+	(struct dma_register *) DMA16_NEXT_DESC_PTR,
+	(struct dma_register *) DMA17_NEXT_DESC_PTR,
+	(struct dma_register *) DMA18_NEXT_DESC_PTR,
+	(struct dma_register *) DMA19_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA0_D0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA0_S0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA0_D1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA0_S1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA1_D0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA1_S0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA1_D1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA1_S1_NEXT_DESC_PTR,
+};
+EXPORT_SYMBOL(dma_io_base_addr);
+
+int channel2irq(unsigned int channel)
+{
+	int ret_irq = -1;
+
+	switch (channel) {
+	case CH_PPI:
+		ret_irq = IRQ_PPI;
+		break;
+
+	case CH_UART0_RX:
+		ret_irq = IRQ_UART0_RX;
+		break;
+
+	case CH_UART0_TX:
+		ret_irq = IRQ_UART0_TX;
+		break;
+
+	case CH_UART1_RX:
+		ret_irq = IRQ_UART1_RX;
+		break;
+
+	case CH_UART1_TX:
+		ret_irq = IRQ_UART1_TX;
+		break;
+
+	case CH_UART2_RX:
+		ret_irq = IRQ_UART2_RX;
+		break;
+
+	case CH_UART2_TX:
+		ret_irq = IRQ_UART2_TX;
+		break;
+
+	case CH_SPORT0_RX:
+		ret_irq = IRQ_SPORT0_RX;
+		break;
+
+	case CH_SPORT0_TX:
+		ret_irq = IRQ_SPORT0_TX;
+		break;
+
+	case CH_SPORT1_RX:
+		ret_irq = IRQ_SPORT1_RX;
+		break;
+
+	case CH_SPORT1_TX:
+		ret_irq = IRQ_SPORT1_TX;
+		break;
+
+	case CH_SPORT2_RX:
+		ret_irq = IRQ_SPORT2_RX;
+		break;
+
+	case CH_SPORT2_TX:
+		ret_irq = IRQ_SPORT2_TX;
+		break;
+
+	case CH_SPORT3_RX:
+		ret_irq = IRQ_SPORT3_RX;
+		break;
+
+	case CH_SPORT3_TX:
+		ret_irq = IRQ_SPORT3_TX;
+		break;
+
+	case CH_SPI0:
+		ret_irq = IRQ_SPI0;
+		break;
+
+	case CH_SPI1:
+		ret_irq = IRQ_SPI1;
+		break;
+
+	case CH_SPI2:
+		ret_irq = IRQ_SPI2;
+		break;
+
+	case CH_MEM_STREAM0_SRC:
+	case CH_MEM_STREAM0_DEST:
+		ret_irq = IRQ_MEM0_DMA0;
+		break;
+	case CH_MEM_STREAM1_SRC:
+	case CH_MEM_STREAM1_DEST:
+		ret_irq = IRQ_MEM0_DMA1;
+		break;
+	case CH_MEM_STREAM2_SRC:
+	case CH_MEM_STREAM2_DEST:
+		ret_irq = IRQ_MEM1_DMA0;
+		break;
+	case CH_MEM_STREAM3_SRC:
+	case CH_MEM_STREAM3_DEST:
+		ret_irq = IRQ_MEM1_DMA1;
+		break;
+	}
+	return ret_irq;
+}
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
new file mode 100644
index 0000000..e130b4f
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -0,0 +1,132 @@
+/*
+ * File: include/asm-blackfin/mach-bf538/anomaly.h
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright (C) 2004-2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+/* This file shoule be up to date with:
+ *  - Revision G, 09/18/2008; ADSP-BF538/BF538F Blackfin Processor Anomaly List
+ *  - Revision L, 09/18/2008; ADSP-BF539/BF539F Blackfin Processor Anomaly List
+ */
+
+#ifndef _MACH_ANOMALY_H_
+#define _MACH_ANOMALY_H_
+
+#if __SILICON_REVISION__ < 4
+# error will not work on BF538 silicon version 0.0, 0.1, 0.2, or 0.3
+#endif
+
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
+#define ANOMALY_05000074 (1)
+/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
+#define ANOMALY_05000119 (1)
+/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
+#define ANOMALY_05000122 (1)
+/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
+#define ANOMALY_05000166 (1)
+/* PPI_COUNT Cannot Be Programmed to 0 in General Purpose TX or RX Modes */
+#define ANOMALY_05000179 (1)
+/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */
+#define ANOMALY_05000180 (1)
+/* False I/O Pin Interrupts on Edge-Sensitive Inputs When Polarity Setting Is Changed */
+#define ANOMALY_05000193 (1)
+/* Current DMA Address Shows Wrong Value During Carry Fix */
+#define ANOMALY_05000199 (__SILICON_REVISION__ < 4)
+/* NMI Event at Boot Time Results in Unpredictable State */
+#define ANOMALY_05000219 (1)
+/* SPI Slave Boot Mode Modifies Registers from Reset Value */
+#define ANOMALY_05000229 (1)
+/* PPI_FS3 Is Not Driven in 2 or 3 Internal Frame Sync Transmit Modes */
+#define ANOMALY_05000233 (1)
+/* If i-cache is on, CSYNC/SSYNC/IDLE around Change of Control causes failures */
+#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
+/* Spurious Hardware Error from an Access in the Shadow of a Conditional Branch */
+#define ANOMALY_05000245 (1)
+/* Maximum External Clock Speed for Timers */
+#define ANOMALY_05000253 (1)
+/* DCPLB_FAULT_ADDR MMR register may be corrupted */
+#define ANOMALY_05000261 (__SILICON_REVISION__ < 3)
+/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */
+#define ANOMALY_05000270 (__SILICON_REVISION__ < 4)
+/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
+#define ANOMALY_05000272 (1)
+/* Writes to Synchronous SDRAM Memory May Be Lost */
+#define ANOMALY_05000273 (__SILICON_REVISION__ < 4)
+/* Writes to an I/O Data Register One SCLK Cycle after an Edge Is Detected May Clear Interrupt */
+#define ANOMALY_05000277 (__SILICON_REVISION__ < 4)
+/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
+#define ANOMALY_05000278 (__SILICON_REVISION__ < 4)
+/* False Hardware Error Exception when ISR Context Is Not Restored */
+#define ANOMALY_05000281 (__SILICON_REVISION__ < 4)
+/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
+#define ANOMALY_05000282 (__SILICON_REVISION__ < 4)
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
+#define ANOMALY_05000283 (__SILICON_REVISION__ < 4)
+/* SPORTs May Receive Bad Data If FIFOs Fill Up */
+#define ANOMALY_05000288 (__SILICON_REVISION__ < 4)
+/* Reads from CAN Mailbox and Acceptance Mask Area Can Fail */
+#define ANOMALY_05000291 (__SILICON_REVISION__ < 4)
+/* Hibernate Leakage Current Is Higher Than Specified */
+#define ANOMALY_05000293 (__SILICON_REVISION__ < 4)
+/* Timer Pin Limitations for PPI TX Modes with External Frame Syncs */
+#define ANOMALY_05000294 (1)
+/* Memory-To-Memory DMA Source/Destination Descriptors Must Be in Same Memory Space */
+#define ANOMALY_05000301 (__SILICON_REVISION__ < 4)
+/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
+#define ANOMALY_05000304 (__SILICON_REVISION__ < 4)
+/* SCKELOW Bit Does Not Maintain State Through Hibernate */
+#define ANOMALY_05000307 (__SILICON_REVISION__ < 4)
+/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
+#define ANOMALY_05000310 (1)
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+#define ANOMALY_05000312 (__SILICON_REVISION__ < 5)
+/* PPI Is Level-Sensitive on First Transfer */
+#define ANOMALY_05000313 (__SILICON_REVISION__ < 4)
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
+#define ANOMALY_05000315 (__SILICON_REVISION__ < 4)
+/* PFx Glitch on Write to FIO_FLAG_D or FIO_FLAG_T */
+#define ANOMALY_05000318 (__SILICON_REVISION__ < 4)
+/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
+#define ANOMALY_05000355 (__SILICON_REVISION__ < 5)
+/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */
+#define ANOMALY_05000357 (__SILICON_REVISION__ < 5)
+/* PPI Underflow Error Goes Undetected in ITU-R 656 Mode */
+#define ANOMALY_05000366 (1)
+/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */
+#define ANOMALY_05000371 (__SILICON_REVISION__ < 5)
+/* Entering Hibernate State with Peripheral Wakeups Enabled Draws Excess Current */
+#define ANOMALY_05000374 (__SILICON_REVISION__ == 4)
+/* New Feature: Open-Drain GPIO Outputs on PC1 and PC4 (Not Available on Older Silicon) */
+#define ANOMALY_05000375 (__SILICON_REVISION__ < 4)
+/* SSYNC Stalls Processor when Executed from Non-Cacheable Memory */
+#define ANOMALY_05000402 (__SILICON_REVISION__ < 4)
+/* Level-Sensitive External GPIO Wakeups May Cause Indefinite Stall */
+#define ANOMALY_05000403 (1)
+/* Speculative Fetches Can Cause Undesired External FIFO Operations */
+#define ANOMALY_05000416 (1)
+/* Multichannel SPORT Channel Misalignment Under Specific Configuration */
+#define ANOMALY_05000425 (1)
+/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
+#define ANOMALY_05000426 (1)
+/* Specific GPIO Pins May Change State when Entering Hibernate */
+#define ANOMALY_05000436 (__SILICON_REVISION__ > 3)
+/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
+#define ANOMALY_05000443 (1)
+
+/* Anomalies that don't exist on this proc */
+#define ANOMALY_05000158 (0)
+#define ANOMALY_05000198 (0)
+#define ANOMALY_05000230 (0)
+#define ANOMALY_05000263 (0)
+#define ANOMALY_05000311 (0)
+#define ANOMALY_05000323 (0)
+#define ANOMALY_05000353 (1)
+#define ANOMALY_05000363 (0)
+#define ANOMALY_05000386 (1)
+#define ANOMALY_05000412 (0)
+#define ANOMALY_05000432 (0)
+#define ANOMALY_05000435 (0)
+
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/bf538.h b/arch/blackfin/mach-bf538/include/mach/bf538.h
new file mode 100644
index 0000000..9c8abb3
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/bf538.h
@@ -0,0 +1,124 @@
+/*
+ * File:         include/asm-blackfin/mach-bf538/bf538.h
+ * Based on:	include/asm-blackfin/mach-bf537/bf537.h
+ * Author:	Michael Hennerich (michael.hennerich@analog.com)
+ *
+ * Created:
+ * Description:  SYSTEM MMR REGISTER AND MEMORY MAP FOR ADSP-BF527
+ *
+ * Modified:
+ *               Copyright 2004-2007 Analog Devices Inc.
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef __MACH_BF538_H__
+#define __MACH_BF538_H__
+
+#define OFFSET_(x) ((x) & 0x0000FFFF)
+
+/*some misc defines*/
+#define IMASK_IVG15		0x8000
+#define IMASK_IVG14		0x4000
+#define IMASK_IVG13		0x2000
+#define IMASK_IVG12		0x1000
+
+#define IMASK_IVG11		0x0800
+#define IMASK_IVG10		0x0400
+#define IMASK_IVG9		0x0200
+#define IMASK_IVG8		0x0100
+
+#define IMASK_IVG7		0x0080
+#define IMASK_IVGTMR	0x0040
+#define IMASK_IVGHW		0x0020
+
+/***************************/
+
+#define BFIN_DSUBBANKS	4
+#define BFIN_DWAYS		2
+#define BFIN_DLINES		64
+#define BFIN_ISUBBANKS	4
+#define BFIN_IWAYS		4
+#define BFIN_ILINES		32
+
+#define WAY0_L			0x1
+#define WAY1_L			0x2
+#define WAY01_L			0x3
+#define WAY2_L			0x4
+#define WAY02_L			0x5
+#define	WAY12_L			0x6
+#define	WAY012_L		0x7
+
+#define	WAY3_L			0x8
+#define	WAY03_L			0x9
+#define	WAY13_L			0xA
+#define	WAY013_L		0xB
+
+#define	WAY32_L			0xC
+#define	WAY320_L		0xD
+#define	WAY321_L		0xE
+#define	WAYALL_L		0xF
+
+#define DMC_ENABLE (2<<2)	/*yes, 2, not 1 */
+
+/********************************* EBIU Settings ************************************/
+#define AMBCTL0VAL	((CONFIG_BANK_1 << 16) | CONFIG_BANK_0)
+#define AMBCTL1VAL	((CONFIG_BANK_3 << 16) | CONFIG_BANK_2)
+
+#ifdef CONFIG_C_AMBEN_ALL
+#define V_AMBEN AMBEN_ALL
+#endif
+#ifdef CONFIG_C_AMBEN
+#define V_AMBEN 0x0
+#endif
+#ifdef CONFIG_C_AMBEN_B0
+#define V_AMBEN AMBEN_B0
+#endif
+#ifdef CONFIG_C_AMBEN_B0_B1
+#define V_AMBEN AMBEN_B0_B1
+#endif
+#ifdef CONFIG_C_AMBEN_B0_B1_B2
+#define V_AMBEN AMBEN_B0_B1_B2
+#endif
+#ifdef CONFIG_C_AMCKEN
+#define V_AMCKEN AMCKEN
+#else
+#define V_AMCKEN 0x0
+#endif
+#ifdef CONFIG_C_CDPRIO
+#define V_CDPRIO 0x100
+#else
+#define V_CDPRIO 0x0
+#endif
+
+#define AMGCTLVAL	(V_AMBEN | V_AMCKEN | V_CDPRIO)
+
+#ifdef CONFIG_BF538
+#define CPU "BF538"
+#define CPUID 0x27C4
+#endif
+#ifdef CONFIG_BF539
+#define CPU "BF539"
+#define CPUID 0x27C4	/* FXIME:? */
+#endif
+
+#ifndef CPU
+#error "Unknown CPU type - This kernel doesn't seem to be configured properly"
+#endif
+
+#endif				/* __MACH_BF538_H__  */
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
new file mode 100644
index 0000000..40503b6
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
@@ -0,0 +1,183 @@
+/*
+ * file:         include/asm-blackfin/mach-bf538/bfin_serial_5xx.h
+ * based on:
+ * author:
+ *
+ * created:
+ * description:
+ *	blackfin serial driver header files
+ * rev:
+ *
+ * modified:
+ *
+ *
+ * bugs:         enter bugs at http://blackfin.uclinux.org/
+ *
+ * this program is free software; you can redistribute it and/or modify
+ * it under the terms of the gnu general public license as published by
+ * the free software foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * this program is distributed in the hope that it will be useful,
+ * but without any warranty; without even the implied warranty of
+ * merchantability or fitness for a particular purpose.  see the
+ * gnu general public license for more details.
+ *
+ * you should have received a copy of the gnu general public license
+ * along with this program; see the file copying.
+ * if not, write to the free software foundation,
+ * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ */
+
+#include <linux/serial.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
+#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
+#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
+#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
+#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
+#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
+#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
+
+#define UART_PUT_CHAR(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_THR), v)
+#define UART_PUT_DLL(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
+#define UART_PUT_IER(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_IER), v)
+#define UART_SET_IER(uart, v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
+#define UART_CLEAR_IER(uart, v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
+#define UART_PUT_DLH(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
+#define UART_PUT_LCR(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
+#define UART_PUT_GCTL(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
+
+#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
+#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
+
+#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
+#define UART_SET_RTS(x) gpio_set_value(x->rts_pin, 1)
+#define UART_CLEAR_RTS(x) gpio_set_value(x->rts_pin, 0)
+#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
+#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
+
+#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
+# define CONFIG_SERIAL_BFIN_CTSRTS
+
+# ifndef CONFIG_UART0_CTS_PIN
+#  define CONFIG_UART0_CTS_PIN -1
+# endif
+
+# ifndef CONFIG_UART0_RTS_PIN
+#  define CONFIG_UART0_RTS_PIN -1
+# endif
+
+# ifndef CONFIG_UART1_CTS_PIN
+#  define CONFIG_UART1_CTS_PIN -1
+# endif
+
+# ifndef CONFIG_UART1_RTS_PIN
+#  define CONFIG_UART1_RTS_PIN -1
+# endif
+#endif
+
+#define BFIN_UART_TX_FIFO_SIZE	2
+
+/*
+ * The pin configuration is different from schematic
+ */
+struct bfin_serial_port {
+	struct uart_port	port;
+	unsigned int		old_status;
+	unsigned int lsr;
+#ifdef CONFIG_SERIAL_BFIN_DMA
+	int			tx_done;
+	int			tx_count;
+	struct circ_buf		rx_dma_buf;
+	struct timer_list       rx_dma_timer;
+	int			rx_dma_nrows;
+	unsigned int		tx_dma_channel;
+	unsigned int		rx_dma_channel;
+	struct work_struct	tx_dma_workqueue;
+#endif
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+	struct timer_list	cts_timer;
+	int		cts_pin;
+	int		rts_pin;
+#endif
+};
+
+/* The hardware clears the LSR bits upon read, so we need to cache
+ * some of the more fun bits in software so they don't get lost
+ * when checking the LSR in other code paths (TX).
+ */
+static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
+{
+	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
+	uart->lsr |= (lsr & (BI|FE|PE|OE));
+	return lsr | uart->lsr;
+}
+
+static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
+{
+	uart->lsr = 0;
+	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
+}
+
+struct bfin_serial_res {
+	unsigned long	uart_base_addr;
+	int		uart_irq;
+#ifdef CONFIG_SERIAL_BFIN_DMA
+	unsigned int	uart_tx_dma_channel;
+	unsigned int	uart_rx_dma_channel;
+#endif
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+	int	uart_cts_pin;
+	int	uart_rts_pin;
+#endif
+};
+
+struct bfin_serial_res bfin_serial_resource[] = {
+#ifdef CONFIG_SERIAL_BFIN_UART0
+	{
+	0xFFC00400,
+	IRQ_UART0_RX,
+#ifdef CONFIG_SERIAL_BFIN_DMA
+	CH_UART0_TX,
+	CH_UART0_RX,
+#endif
+#ifdef CONFIG_BFIN_UART0_CTSRTS
+	CONFIG_UART0_CTS_PIN,
+	CONFIG_UART0_RTS_PIN,
+#endif
+	},
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART1
+	{
+	0xFFC02000,
+	IRQ_UART1_RX,
+#ifdef CONFIG_SERIAL_BFIN_DMA
+	CH_UART1_TX,
+	CH_UART1_RX,
+#endif
+#ifdef CONFIG_BFIN_UART1_CTSRTS
+	CONFIG_UART1_CTS_PIN,
+	CONFIG_UART1_RTS_PIN,
+#endif
+	},
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART2
+	{
+	0xFFC02100,
+	IRQ_UART2_RX,
+#ifdef CONFIG_SERIAL_BFIN_DMA
+	CH_UART2_TX,
+	CH_UART2_RX,
+#endif
+#ifdef CONFIG_BFIN_UART2_CTSRTS
+	CONFIG_UART2_CTS_PIN,
+	CONFIG_UART2_RTS_PIN,
+#endif
+	},
+#endif
+};
+
+#define DRIVER_NAME "bfin-uart"
diff --git a/arch/blackfin/mach-bf538/include/mach/blackfin.h b/arch/blackfin/mach-bf538/include/mach/blackfin.h
new file mode 100644
index 0000000..ea25371a9
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/blackfin.h
@@ -0,0 +1,101 @@
+/*
+ * File:         include/asm-blackfin/mach-bf538/blackfin.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _MACH_BLACKFIN_H_
+#define _MACH_BLACKFIN_H_
+
+#define BF538_FAMILY
+
+#include "bf538.h"
+#include "mem_map.h"
+#include "defBF539.h"
+#include "anomaly.h"
+
+
+#if !defined(__ASSEMBLY__)
+#include "cdefBF538.h"
+
+#if defined(CONFIG_BF539)
+#include "cdefBF539.h"
+#endif
+#endif
+
+/* UART_IIR Register */
+#define STATUS(x)	((x << 1) & 0x06)
+#define STATUS_P1	0x02
+#define STATUS_P0	0x01
+
+#define BFIN_UART_NR_PORTS	3
+
+#define OFFSET_THR              0x00	/* Transmit Holding register            */
+#define OFFSET_RBR              0x00	/* Receive Buffer register              */
+#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
+#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
+#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
+#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
+#define OFFSET_LCR              0x0C	/* Line Control Register                */
+#define OFFSET_MCR              0x10	/* Modem Control Register               */
+#define OFFSET_LSR              0x14	/* Line Status Register                 */
+#define OFFSET_MSR              0x18	/* Modem Status Register                */
+#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
+#define OFFSET_GCTL             0x24	/* Global Control Register              */
+
+
+#define bfin_write_MDMA_D0_IRQ_STATUS	bfin_write_MDMA0_D0_IRQ_STATUS
+#define bfin_write_MDMA_D0_START_ADDR   bfin_write_MDMA0_D0_START_ADDR
+#define bfin_write_MDMA_S0_START_ADDR   bfin_write_MDMA0_S0_START_ADDR
+#define bfin_write_MDMA_D0_X_COUNT      bfin_write_MDMA0_D0_X_COUNT
+#define bfin_write_MDMA_S0_X_COUNT      bfin_write_MDMA0_S0_X_COUNT
+#define bfin_write_MDMA_D0_Y_COUNT      bfin_write_MDMA0_D0_Y_COUNT
+#define bfin_write_MDMA_S0_Y_COUNT      bfin_write_MDMA0_S0_Y_COUNT
+#define bfin_write_MDMA_D0_X_MODIFY     bfin_write_MDMA0_D0_X_MODIFY
+#define bfin_write_MDMA_S0_X_MODIFY     bfin_write_MDMA0_S0_X_MODIFY
+#define bfin_write_MDMA_D0_Y_MODIFY     bfin_write_MDMA0_D0_Y_MODIFY
+#define bfin_write_MDMA_S0_Y_MODIFY     bfin_write_MDMA0_S0_Y_MODIFY
+#define bfin_write_MDMA_S0_CONFIG       bfin_write_MDMA0_S0_CONFIG
+#define bfin_write_MDMA_D0_CONFIG       bfin_write_MDMA0_D0_CONFIG
+#define bfin_read_MDMA_S0_CONFIG        bfin_read_MDMA0_S0_CONFIG
+#define bfin_read_MDMA_D0_IRQ_STATUS    bfin_read_MDMA0_D0_IRQ_STATUS
+#define bfin_write_MDMA_S0_IRQ_STATUS   bfin_write_MDMA0_S0_IRQ_STATUS
+
+
+/* DPMC*/
+#define bfin_read_STOPCK_OFF() bfin_read_STOPCK()
+#define bfin_write_STOPCK_OFF(val) bfin_write_STOPCK(val)
+#define STOPCK_OFF STOPCK
+
+/* PLL_DIV Masks													*/
+#define CCLK_DIV1 CSEL_DIV1	/*          CCLK = VCO / 1                                  */
+#define CCLK_DIV2 CSEL_DIV2	/*          CCLK = VCO / 2                                  */
+#define CCLK_DIV4 CSEL_DIV4	/*          CCLK = VCO / 4                                  */
+#define CCLK_DIV8 CSEL_DIV8	/*          CCLK = VCO / 8                                  */
+
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/cdefBF538.h b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
new file mode 100644
index 0000000..241725b
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
@@ -0,0 +1,2108 @@
+/*
+ * File:         include/asm-blackfin/mach-bf538/cdefBF538.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _CDEF_BF538_H
+#define _CDEF_BF538_H
+
+#include <asm/blackfin.h>
+
+/*include all Core registers and bit definitions*/
+#include "defBF539.h"
+
+/*include core specific register pointer definitions*/
+#include <asm/cdef_LPBlackfin.h>
+
+#define bfin_writePTR(addr, val) bfin_write32(addr, val)
+
+#define bfin_read_PLL_CTL()            bfin_read16(PLL_CTL)
+#define bfin_read_PLL_DIV()            bfin_read16(PLL_DIV)
+#define bfin_write_PLL_DIV(val)        bfin_write16(PLL_DIV, val)
+#define bfin_read_VR_CTL()             bfin_read16(VR_CTL)
+#define bfin_read_PLL_STAT()           bfin_read16(PLL_STAT)
+#define bfin_write_PLL_STAT(val)       bfin_write16(PLL_STAT, val)
+#define bfin_read_PLL_LOCKCNT()        bfin_read16(PLL_LOCKCNT)
+#define bfin_write_PLL_LOCKCNT(val)    bfin_write16(PLL_LOCKCNT, val)
+#define bfin_read_CHIPID()             bfin_read32(CHIPID)
+#define bfin_write_CHIPID(val)         bfin_write32(CHIPID, val)
+#define bfin_read_SWRST()              bfin_read16(SWRST)
+#define bfin_write_SWRST(val)          bfin_write16(SWRST, val)
+#define bfin_read_SYSCR()              bfin_read16(SYSCR)
+#define bfin_write_SYSCR(val)          bfin_write16(SYSCR, val)
+#define bfin_read_SIC_RVECT()          bfin_readPTR(SIC_RVECT)
+#define bfin_write_SIC_RVECT(val)      bfin_writePTR(SIC_RVECT, val)
+#define bfin_read_SIC_IMASK0()         bfin_read32(SIC_IMASK0)
+#define bfin_write_SIC_IMASK0(val)     bfin_write32(SIC_IMASK0, val)
+#define bfin_read_SIC_IMASK1()         bfin_read32(SIC_IMASK1)
+#define bfin_write_SIC_IMASK1(val)     bfin_write32(SIC_IMASK1, val)
+#define bfin_read_SIC_IMASK(x)	       bfin_read32(SIC_IMASK0 + x * (SIC_IMASK1 - SIC_IMASK0))
+#define bfin_write_SIC_IMASK(x, val)   bfin_write32(SIC_IMASK0 + x * (SIC_IMASK1 - SIC_IMASK0), val)
+#define bfin_read_SIC_ISR0()           bfin_read32(SIC_ISR0)
+#define bfin_write_SIC_ISR0(val)       bfin_write32(SIC_ISR0, val)
+#define bfin_read_SIC_ISR1()           bfin_read32(SIC_ISR1)
+#define bfin_write_SIC_ISR1(val)       bfin_write32(SIC_ISR1, val)
+#define bfin_read_SIC_ISR(x)	       bfin_read32(SIC_ISR0 + x * (SIC_ISR1 - SIC_ISR0))
+#define bfin_write_SIC_ISR(x, val)     bfin_write32(SIC_ISR0 + x * (SIC_ISR1 - SIC_ISR0), val)
+#define bfin_read_SIC_IWR0()           bfin_read32(SIC_IWR0)
+#define bfin_write_SIC_IWR0(val)       bfin_write32(SIC_IWR0, val)
+#define bfin_read_SIC_IWR1()           bfin_read32(SIC_IWR1)
+#define bfin_write_SIC_IWR1(val)       bfin_write32(SIC_IWR1, val)
+#define bfin_read_SIC_IWR(x)	       bfin_read32(SIC_IWR0 + x * (SIC_IWR1 - SIC_IWR0))
+#define bfin_write_SIC_IWR(x, val)     bfin_write32((SIC_IWR0 + x * (SIC_IWR1 - SIC_IWR0), val)
+#define bfin_read_SIC_IAR0()           bfin_read32(SIC_IAR0)
+#define bfin_write_SIC_IAR0(val)       bfin_write32(SIC_IAR0, val)
+#define bfin_read_SIC_IAR1()           bfin_read32(SIC_IAR1)
+#define bfin_write_SIC_IAR1(val)       bfin_write32(SIC_IAR1, val)
+#define bfin_read_SIC_IAR2()           bfin_read32(SIC_IAR2)
+#define bfin_write_SIC_IAR2(val)       bfin_write32(SIC_IAR2, val)
+#define bfin_read_SIC_IAR3()           bfin_read32(SIC_IAR3)
+#define bfin_write_SIC_IAR3(val)       bfin_write32(SIC_IAR3, val)
+#define bfin_read_SIC_IAR4()           bfin_read32(SIC_IAR4)
+#define bfin_write_SIC_IAR4(val)       bfin_write32(SIC_IAR4, val)
+#define bfin_read_SIC_IAR5()           bfin_read32(SIC_IAR5)
+#define bfin_write_SIC_IAR5(val)       bfin_write32(SIC_IAR5, val)
+#define bfin_read_SIC_IAR6()           bfin_read32(SIC_IAR6)
+#define bfin_write_SIC_IAR6(val)       bfin_write32(SIC_IAR6, val)
+#define bfin_read_WDOG_CTL()           bfin_read16(WDOG_CTL)
+#define bfin_write_WDOG_CTL(val)       bfin_write16(WDOG_CTL, val)
+#define bfin_read_WDOG_CNT()           bfin_read32(WDOG_CNT)
+#define bfin_write_WDOG_CNT(val)       bfin_write32(WDOG_CNT, val)
+#define bfin_read_WDOG_STAT()          bfin_read32(WDOG_STAT)
+#define bfin_write_WDOG_STAT(val)      bfin_write32(WDOG_STAT, val)
+#define bfin_read_RTC_STAT()           bfin_read32(RTC_STAT)
+#define bfin_write_RTC_STAT(val)       bfin_write32(RTC_STAT, val)
+#define bfin_read_RTC_ICTL()           bfin_read16(RTC_ICTL)
+#define bfin_write_RTC_ICTL(val)       bfin_write16(RTC_ICTL, val)
+#define bfin_read_RTC_ISTAT()          bfin_read16(RTC_ISTAT)
+#define bfin_write_RTC_ISTAT(val)      bfin_write16(RTC_ISTAT, val)
+#define bfin_read_RTC_SWCNT()          bfin_read16(RTC_SWCNT)
+#define bfin_write_RTC_SWCNT(val)      bfin_write16(RTC_SWCNT, val)
+#define bfin_read_RTC_ALARM()          bfin_read32(RTC_ALARM)
+#define bfin_write_RTC_ALARM(val)      bfin_write32(RTC_ALARM, val)
+#define bfin_read_RTC_PREN()           bfin_read16(RTC_PREN)
+#define bfin_write_RTC_PREN(val)       bfin_write16(RTC_PREN, val)
+#define bfin_read_UART0_THR()          bfin_read16(UART0_THR)
+#define bfin_write_UART0_THR(val)      bfin_write16(UART0_THR, val)
+#define bfin_read_UART0_RBR()          bfin_read16(UART0_RBR)
+#define bfin_write_UART0_RBR(val)      bfin_write16(UART0_RBR, val)
+#define bfin_read_UART0_DLL()          bfin_read16(UART0_DLL)
+#define bfin_write_UART0_DLL(val)      bfin_write16(UART0_DLL, val)
+#define bfin_read_UART0_DLH()          bfin_read16(UART0_DLH)
+#define bfin_write_UART0_DLH(val)      bfin_write16(UART0_DLH, val)
+#define bfin_read_UART0_IER()          bfin_read16(UART0_IER)
+#define bfin_write_UART0_IER(val)      bfin_write16(UART0_IER, val)
+#define bfin_read_UART0_IIR()          bfin_read16(UART0_IIR)
+#define bfin_write_UART0_IIR(val)      bfin_write16(UART0_IIR, val)
+#define bfin_read_UART0_LCR()          bfin_read16(UART0_LCR)
+#define bfin_write_UART0_LCR(val)      bfin_write16(UART0_LCR, val)
+#define bfin_read_UART0_MCR()          bfin_read16(UART0_MCR)
+#define bfin_write_UART0_MCR(val)      bfin_write16(UART0_MCR, val)
+#define bfin_read_UART0_LSR()          bfin_read16(UART0_LSR)
+#define bfin_write_UART0_LSR(val)      bfin_write16(UART0_LSR, val)
+#define bfin_read_UART0_SCR()          bfin_read16(UART0_SCR)
+#define bfin_write_UART0_SCR(val)      bfin_write16(UART0_SCR, val)
+#define bfin_read_UART0_GCTL()         bfin_read16(UART0_GCTL)
+#define bfin_write_UART0_GCTL(val)     bfin_write16(UART0_GCTL, val)
+#define bfin_read_UART1_THR()          bfin_read16(UART1_THR)
+#define bfin_write_UART1_THR(val)      bfin_write16(UART1_THR, val)
+#define bfin_read_UART1_RBR()          bfin_read16(UART1_RBR)
+#define bfin_write_UART1_RBR(val)      bfin_write16(UART1_RBR, val)
+#define bfin_read_UART1_DLL()          bfin_read16(UART1_DLL)
+#define bfin_write_UART1_DLL(val)      bfin_write16(UART1_DLL, val)
+#define bfin_read_UART1_DLH()          bfin_read16(UART1_DLH)
+#define bfin_write_UART1_DLH(val)      bfin_write16(UART1_DLH, val)
+#define bfin_read_UART1_IER()          bfin_read16(UART1_IER)
+#define bfin_write_UART1_IER(val)      bfin_write16(UART1_IER, val)
+#define bfin_read_UART1_IIR()          bfin_read16(UART1_IIR)
+#define bfin_write_UART1_IIR(val)      bfin_write16(UART1_IIR, val)
+#define bfin_read_UART1_LCR()          bfin_read16(UART1_LCR)
+#define bfin_write_UART1_LCR(val)      bfin_write16(UART1_LCR, val)
+#define bfin_read_UART1_MCR()          bfin_read16(UART1_MCR)
+#define bfin_write_UART1_MCR(val)      bfin_write16(UART1_MCR, val)
+#define bfin_read_UART1_LSR()          bfin_read16(UART1_LSR)
+#define bfin_write_UART1_LSR(val)      bfin_write16(UART1_LSR, val)
+#define bfin_read_UART1_SCR()          bfin_read16(UART1_SCR)
+#define bfin_write_UART1_SCR(val)      bfin_write16(UART1_SCR, val)
+#define bfin_read_UART1_GCTL()         bfin_read16(UART1_GCTL)
+#define bfin_write_UART1_GCTL(val)     bfin_write16(UART1_GCTL, val)
+#define bfin_read_UART2_THR()          bfin_read16(UART2_THR)
+#define bfin_write_UART2_THR(val)      bfin_write16(UART2_THR, val)
+#define bfin_read_UART2_RBR()          bfin_read16(UART2_RBR)
+#define bfin_write_UART2_RBR(val)      bfin_write16(UART2_RBR, val)
+#define bfin_read_UART2_DLL()          bfin_read16(UART2_DLL)
+#define bfin_write_UART2_DLL(val)      bfin_write16(UART2_DLL, val)
+#define bfin_read_UART2_DLH()          bfin_read16(UART2_DLH)
+#define bfin_write_UART2_DLH(val)      bfin_write16(UART2_DLH, val)
+#define bfin_read_UART2_IER()          bfin_read16(UART2_IER)
+#define bfin_write_UART2_IER(val)      bfin_write16(UART2_IER, val)
+#define bfin_read_UART2_IIR()          bfin_read16(UART2_IIR)
+#define bfin_write_UART2_IIR(val)      bfin_write16(UART2_IIR, val)
+#define bfin_read_UART2_LCR()          bfin_read16(UART2_LCR)
+#define bfin_write_UART2_LCR(val)      bfin_write16(UART2_LCR, val)
+#define bfin_read_UART2_MCR()          bfin_read16(UART2_MCR)
+#define bfin_write_UART2_MCR(val)      bfin_write16(UART2_MCR, val)
+#define bfin_read_UART2_LSR()          bfin_read16(UART2_LSR)
+#define bfin_write_UART2_LSR(val)      bfin_write16(UART2_LSR, val)
+#define bfin_read_UART2_SCR()          bfin_read16(UART2_SCR)
+#define bfin_write_UART2_SCR(val)      bfin_write16(UART2_SCR, val)
+#define bfin_read_UART2_GCTL()         bfin_read16(UART2_GCTL)
+#define bfin_write_UART2_GCTL(val)     bfin_write16(UART2_GCTL, val)
+#define bfin_read_SPI0_CTL()           bfin_read16(SPI0_CTL)
+#define bfin_write_SPI0_CTL(val)       bfin_write16(SPI0_CTL, val)
+#define bfin_read_SPI0_FLG()           bfin_read16(SPI0_FLG)
+#define bfin_write_SPI0_FLG(val)       bfin_write16(SPI0_FLG, val)
+#define bfin_read_SPI0_STAT()          bfin_read16(SPI0_STAT)
+#define bfin_write_SPI0_STAT(val)      bfin_write16(SPI0_STAT, val)
+#define bfin_read_SPI0_TDBR()          bfin_read16(SPI0_TDBR)
+#define bfin_write_SPI0_TDBR(val)      bfin_write16(SPI0_TDBR, val)
+#define bfin_read_SPI0_RDBR()          bfin_read16(SPI0_RDBR)
+#define bfin_write_SPI0_RDBR(val)      bfin_write16(SPI0_RDBR, val)
+#define bfin_read_SPI0_BAUD()          bfin_read16(SPI0_BAUD)
+#define bfin_write_SPI0_BAUD(val)      bfin_write16(SPI0_BAUD, val)
+#define bfin_read_SPI0_SHADOW()        bfin_read16(SPI0_SHADOW)
+#define bfin_write_SPI0_SHADOW(val)    bfin_write16(SPI0_SHADOW, val)
+#define bfin_read_SPI1_CTL()           bfin_read16(SPI1_CTL)
+#define bfin_write_SPI1_CTL(val)       bfin_write16(SPI1_CTL, val)
+#define bfin_read_SPI1_FLG()           bfin_read16(SPI1_FLG)
+#define bfin_write_SPI1_FLG(val)       bfin_write16(SPI1_FLG, val)
+#define bfin_read_SPI1_STAT()          bfin_read16(SPI1_STAT)
+#define bfin_write_SPI1_STAT(val)      bfin_write16(SPI1_STAT, val)
+#define bfin_read_SPI1_TDBR()          bfin_read16(SPI1_TDBR)
+#define bfin_write_SPI1_TDBR(val)      bfin_write16(SPI1_TDBR, val)
+#define bfin_read_SPI1_RDBR()          bfin_read16(SPI1_RDBR)
+#define bfin_write_SPI1_RDBR(val)      bfin_write16(SPI1_RDBR, val)
+#define bfin_read_SPI1_BAUD()          bfin_read16(SPI1_BAUD)
+#define bfin_write_SPI1_BAUD(val)      bfin_write16(SPI1_BAUD, val)
+#define bfin_read_SPI1_SHADOW()        bfin_read16(SPI1_SHADOW)
+#define bfin_write_SPI1_SHADOW(val)    bfin_write16(SPI1_SHADOW, val)
+#define bfin_read_SPI2_CTL()           bfin_read16(SPI2_CTL)
+#define bfin_write_SPI2_CTL(val)       bfin_write16(SPI2_CTL, val)
+#define bfin_read_SPI2_FLG()           bfin_read16(SPI2_FLG)
+#define bfin_write_SPI2_FLG(val)       bfin_write16(SPI2_FLG, val)
+#define bfin_read_SPI2_STAT()          bfin_read16(SPI2_STAT)
+#define bfin_write_SPI2_STAT(val)      bfin_write16(SPI2_STAT, val)
+#define bfin_read_SPI2_TDBR()          bfin_read16(SPI2_TDBR)
+#define bfin_write_SPI2_TDBR(val)      bfin_write16(SPI2_TDBR, val)
+#define bfin_read_SPI2_RDBR()          bfin_read16(SPI2_RDBR)
+#define bfin_write_SPI2_RDBR(val)      bfin_write16(SPI2_RDBR, val)
+#define bfin_read_SPI2_BAUD()          bfin_read16(SPI2_BAUD)
+#define bfin_write_SPI2_BAUD(val)      bfin_write16(SPI2_BAUD, val)
+#define bfin_read_SPI2_SHADOW()        bfin_read16(SPI2_SHADOW)
+#define bfin_write_SPI2_SHADOW(val)    bfin_write16(SPI2_SHADOW, val)
+#define bfin_read_TIMER0_CONFIG()      bfin_read16(TIMER0_CONFIG)
+#define bfin_write_TIMER0_CONFIG(val)  bfin_write16(TIMER0_CONFIG, val)
+#define bfin_read_TIMER0_COUNTER()     bfin_read32(TIMER0_COUNTER)
+#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER, val)
+#define bfin_read_TIMER0_PERIOD()      bfin_read32(TIMER0_PERIOD)
+#define bfin_write_TIMER0_PERIOD(val)  bfin_write32(TIMER0_PERIOD, val)
+#define bfin_read_TIMER0_WIDTH()       bfin_read32(TIMER0_WIDTH)
+#define bfin_write_TIMER0_WIDTH(val)   bfin_write32(TIMER0_WIDTH, val)
+#define bfin_read_TIMER1_CONFIG()      bfin_read16(TIMER1_CONFIG)
+#define bfin_write_TIMER1_CONFIG(val)  bfin_write16(TIMER1_CONFIG, val)
+#define bfin_read_TIMER1_COUNTER()     bfin_read32(TIMER1_COUNTER)
+#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER, val)
+#define bfin_read_TIMER1_PERIOD()      bfin_read32(TIMER1_PERIOD)
+#define bfin_write_TIMER1_PERIOD(val)  bfin_write32(TIMER1_PERIOD, val)
+#define bfin_read_TIMER1_WIDTH()       bfin_read32(TIMER1_WIDTH)
+#define bfin_write_TIMER1_WIDTH(val)   bfin_write32(TIMER1_WIDTH, val)
+#define bfin_read_TIMER2_CONFIG()      bfin_read16(TIMER2_CONFIG)
+#define bfin_write_TIMER2_CONFIG(val)  bfin_write16(TIMER2_CONFIG, val)
+#define bfin_read_TIMER2_COUNTER()     bfin_read32(TIMER2_COUNTER)
+#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER, val)
+#define bfin_read_TIMER2_PERIOD()      bfin_read32(TIMER2_PERIOD)
+#define bfin_write_TIMER2_PERIOD(val)  bfin_write32(TIMER2_PERIOD, val)
+#define bfin_read_TIMER2_WIDTH()       bfin_read32(TIMER2_WIDTH)
+#define bfin_write_TIMER2_WIDTH(val)   bfin_write32(TIMER2_WIDTH, val)
+#define bfin_read_TIMER_ENABLE()       bfin_read16(TIMER_ENABLE)
+#define bfin_write_TIMER_ENABLE(val)   bfin_write16(TIMER_ENABLE, val)
+#define bfin_read_TIMER_DISABLE()      bfin_read16(TIMER_DISABLE)
+#define bfin_write_TIMER_DISABLE(val)  bfin_write16(TIMER_DISABLE, val)
+#define bfin_read_TIMER_STATUS()       bfin_read16(TIMER_STATUS)
+#define bfin_write_TIMER_STATUS(val)   bfin_write16(TIMER_STATUS, val)
+#define bfin_read_SPORT0_TCR1()        bfin_read16(SPORT0_TCR1)
+#define bfin_write_SPORT0_TCR1(val)    bfin_write16(SPORT0_TCR1, val)
+#define bfin_read_SPORT0_TCR2()        bfin_read16(SPORT0_TCR2)
+#define bfin_write_SPORT0_TCR2(val)    bfin_write16(SPORT0_TCR2, val)
+#define bfin_read_SPORT0_TCLKDIV()     bfin_read16(SPORT0_TCLKDIV)
+#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV, val)
+#define bfin_read_SPORT0_TFSDIV()      bfin_read16(SPORT0_TFSDIV)
+#define bfin_write_SPORT0_TFSDIV(val)  bfin_write16(SPORT0_TFSDIV, val)
+#define bfin_read_SPORT0_TX()          bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX(val)      bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX()          bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX(val)      bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_RCR1()        bfin_read16(SPORT0_RCR1)
+#define bfin_write_SPORT0_RCR1(val)    bfin_write16(SPORT0_RCR1, val)
+#define bfin_read_SPORT0_RCR2()        bfin_read16(SPORT0_RCR2)
+#define bfin_write_SPORT0_RCR2(val)    bfin_write16(SPORT0_RCR2, val)
+#define bfin_read_SPORT0_RCLKDIV()     bfin_read16(SPORT0_RCLKDIV)
+#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV, val)
+#define bfin_read_SPORT0_RFSDIV()      bfin_read16(SPORT0_RFSDIV)
+#define bfin_write_SPORT0_RFSDIV(val)  bfin_write16(SPORT0_RFSDIV, val)
+#define bfin_read_SPORT0_STAT()        bfin_read16(SPORT0_STAT)
+#define bfin_write_SPORT0_STAT(val)    bfin_write16(SPORT0_STAT, val)
+#define bfin_read_SPORT0_CHNL()        bfin_read16(SPORT0_CHNL)
+#define bfin_write_SPORT0_CHNL(val)    bfin_write16(SPORT0_CHNL, val)
+#define bfin_read_SPORT0_MCMC1()       bfin_read16(SPORT0_MCMC1)
+#define bfin_write_SPORT0_MCMC1(val)   bfin_write16(SPORT0_MCMC1, val)
+#define bfin_read_SPORT0_MCMC2()       bfin_read16(SPORT0_MCMC2)
+#define bfin_write_SPORT0_MCMC2(val)   bfin_write16(SPORT0_MCMC2, val)
+#define bfin_read_SPORT0_MTCS0()       bfin_read32(SPORT0_MTCS0)
+#define bfin_write_SPORT0_MTCS0(val)   bfin_write32(SPORT0_MTCS0, val)
+#define bfin_read_SPORT0_MTCS1()       bfin_read32(SPORT0_MTCS1)
+#define bfin_write_SPORT0_MTCS1(val)   bfin_write32(SPORT0_MTCS1, val)
+#define bfin_read_SPORT0_MTCS2()       bfin_read32(SPORT0_MTCS2)
+#define bfin_write_SPORT0_MTCS2(val)   bfin_write32(SPORT0_MTCS2, val)
+#define bfin_read_SPORT0_MTCS3()       bfin_read32(SPORT0_MTCS3)
+#define bfin_write_SPORT0_MTCS3(val)   bfin_write32(SPORT0_MTCS3, val)
+#define bfin_read_SPORT0_MRCS0()       bfin_read32(SPORT0_MRCS0)
+#define bfin_write_SPORT0_MRCS0(val)   bfin_write32(SPORT0_MRCS0, val)
+#define bfin_read_SPORT0_MRCS1()       bfin_read32(SPORT0_MRCS1)
+#define bfin_write_SPORT0_MRCS1(val)   bfin_write32(SPORT0_MRCS1, val)
+#define bfin_read_SPORT0_MRCS2()       bfin_read32(SPORT0_MRCS2)
+#define bfin_write_SPORT0_MRCS2(val)   bfin_write32(SPORT0_MRCS2, val)
+#define bfin_read_SPORT0_MRCS3()       bfin_read32(SPORT0_MRCS3)
+#define bfin_write_SPORT0_MRCS3(val)   bfin_write32(SPORT0_MRCS3, val)
+#define bfin_read_SPORT1_TCR1()        bfin_read16(SPORT1_TCR1)
+#define bfin_write_SPORT1_TCR1(val)    bfin_write16(SPORT1_TCR1, val)
+#define bfin_read_SPORT1_TCR2()        bfin_read16(SPORT1_TCR2)
+#define bfin_write_SPORT1_TCR2(val)    bfin_write16(SPORT1_TCR2, val)
+#define bfin_read_SPORT1_TCLKDIV()     bfin_read16(SPORT1_TCLKDIV)
+#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV, val)
+#define bfin_read_SPORT1_TFSDIV()      bfin_read16(SPORT1_TFSDIV)
+#define bfin_write_SPORT1_TFSDIV(val)  bfin_write16(SPORT1_TFSDIV, val)
+#define bfin_read_SPORT1_TX()          bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX(val)      bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX()          bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX(val)      bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_RCR1()        bfin_read16(SPORT1_RCR1)
+#define bfin_write_SPORT1_RCR1(val)    bfin_write16(SPORT1_RCR1, val)
+#define bfin_read_SPORT1_RCR2()        bfin_read16(SPORT1_RCR2)
+#define bfin_write_SPORT1_RCR2(val)    bfin_write16(SPORT1_RCR2, val)
+#define bfin_read_SPORT1_RCLKDIV()     bfin_read16(SPORT1_RCLKDIV)
+#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV, val)
+#define bfin_read_SPORT1_RFSDIV()      bfin_read16(SPORT1_RFSDIV)
+#define bfin_write_SPORT1_RFSDIV(val)  bfin_write16(SPORT1_RFSDIV, val)
+#define bfin_read_SPORT1_STAT()        bfin_read16(SPORT1_STAT)
+#define bfin_write_SPORT1_STAT(val)    bfin_write16(SPORT1_STAT, val)
+#define bfin_read_SPORT1_CHNL()        bfin_read16(SPORT1_CHNL)
+#define bfin_write_SPORT1_CHNL(val)    bfin_write16(SPORT1_CHNL, val)
+#define bfin_read_SPORT1_MCMC1()       bfin_read16(SPORT1_MCMC1)
+#define bfin_write_SPORT1_MCMC1(val)   bfin_write16(SPORT1_MCMC1, val)
+#define bfin_read_SPORT1_MCMC2()       bfin_read16(SPORT1_MCMC2)
+#define bfin_write_SPORT1_MCMC2(val)   bfin_write16(SPORT1_MCMC2, val)
+#define bfin_read_SPORT1_MTCS0()       bfin_read32(SPORT1_MTCS0)
+#define bfin_write_SPORT1_MTCS0(val)   bfin_write32(SPORT1_MTCS0, val)
+#define bfin_read_SPORT1_MTCS1()       bfin_read32(SPORT1_MTCS1)
+#define bfin_write_SPORT1_MTCS1(val)   bfin_write32(SPORT1_MTCS1, val)
+#define bfin_read_SPORT1_MTCS2()       bfin_read32(SPORT1_MTCS2)
+#define bfin_write_SPORT1_MTCS2(val)   bfin_write32(SPORT1_MTCS2, val)
+#define bfin_read_SPORT1_MTCS3()       bfin_read32(SPORT1_MTCS3)
+#define bfin_write_SPORT1_MTCS3(val)   bfin_write32(SPORT1_MTCS3, val)
+#define bfin_read_SPORT1_MRCS0()       bfin_read32(SPORT1_MRCS0)
+#define bfin_write_SPORT1_MRCS0(val)   bfin_write32(SPORT1_MRCS0, val)
+#define bfin_read_SPORT1_MRCS1()       bfin_read32(SPORT1_MRCS1)
+#define bfin_write_SPORT1_MRCS1(val)   bfin_write32(SPORT1_MRCS1, val)
+#define bfin_read_SPORT1_MRCS2()       bfin_read32(SPORT1_MRCS2)
+#define bfin_write_SPORT1_MRCS2(val)   bfin_write32(SPORT1_MRCS2, val)
+#define bfin_read_SPORT1_MRCS3()       bfin_read32(SPORT1_MRCS3)
+#define bfin_write_SPORT1_MRCS3(val)   bfin_write32(SPORT1_MRCS3, val)
+#define bfin_read_SPORT2_TCR1()        bfin_read16(SPORT2_TCR1)
+#define bfin_write_SPORT2_TCR1(val)    bfin_write16(SPORT2_TCR1, val)
+#define bfin_read_SPORT2_TCR2()        bfin_read16(SPORT2_TCR2)
+#define bfin_write_SPORT2_TCR2(val)    bfin_write16(SPORT2_TCR2, val)
+#define bfin_read_SPORT2_TCLKDIV()     bfin_read16(SPORT2_TCLKDIV)
+#define bfin_write_SPORT2_TCLKDIV(val) bfin_write16(SPORT2_TCLKDIV, val)
+#define bfin_read_SPORT2_TFSDIV()      bfin_read16(SPORT2_TFSDIV)
+#define bfin_write_SPORT2_TFSDIV(val)  bfin_write16(SPORT2_TFSDIV, val)
+#define bfin_read_SPORT2_TX()          bfin_read32(SPORT2_TX)
+#define bfin_write_SPORT2_TX(val)      bfin_write32(SPORT2_TX, val)
+#define bfin_read_SPORT2_RX()          bfin_read32(SPORT2_RX)
+#define bfin_write_SPORT2_RX(val)      bfin_write32(SPORT2_RX, val)
+#define bfin_read_SPORT2_RCR1()        bfin_read16(SPORT2_RCR1)
+#define bfin_write_SPORT2_RCR1(val)    bfin_write16(SPORT2_RCR1, val)
+#define bfin_read_SPORT2_RCR2()        bfin_read16(SPORT2_RCR2)
+#define bfin_write_SPORT2_RCR2(val)    bfin_write16(SPORT2_RCR2, val)
+#define bfin_read_SPORT2_RCLKDIV()     bfin_read16(SPORT2_RCLKDIV)
+#define bfin_write_SPORT2_RCLKDIV(val) bfin_write16(SPORT2_RCLKDIV, val)
+#define bfin_read_SPORT2_RFSDIV()      bfin_read16(SPORT2_RFSDIV)
+#define bfin_write_SPORT2_RFSDIV(val)  bfin_write16(SPORT2_RFSDIV, val)
+#define bfin_read_SPORT2_STAT()        bfin_read16(SPORT2_STAT)
+#define bfin_write_SPORT2_STAT(val)    bfin_write16(SPORT2_STAT, val)
+#define bfin_read_SPORT2_CHNL()        bfin_read16(SPORT2_CHNL)
+#define bfin_write_SPORT2_CHNL(val)    bfin_write16(SPORT2_CHNL, val)
+#define bfin_read_SPORT2_MCMC1()       bfin_read16(SPORT2_MCMC1)
+#define bfin_write_SPORT2_MCMC1(val)   bfin_write16(SPORT2_MCMC1, val)
+#define bfin_read_SPORT2_MCMC2()       bfin_read16(SPORT2_MCMC2)
+#define bfin_write_SPORT2_MCMC2(val)   bfin_write16(SPORT2_MCMC2, val)
+#define bfin_read_SPORT2_MTCS0()       bfin_read32(SPORT2_MTCS0)
+#define bfin_write_SPORT2_MTCS0(val)   bfin_write32(SPORT2_MTCS0, val)
+#define bfin_read_SPORT2_MTCS1()       bfin_read32(SPORT2_MTCS1)
+#define bfin_write_SPORT2_MTCS1(val)   bfin_write32(SPORT2_MTCS1, val)
+#define bfin_read_SPORT2_MTCS2()       bfin_read32(SPORT2_MTCS2)
+#define bfin_write_SPORT2_MTCS2(val)   bfin_write32(SPORT2_MTCS2, val)
+#define bfin_read_SPORT2_MTCS3()       bfin_read32(SPORT2_MTCS3)
+#define bfin_write_SPORT2_MTCS3(val)   bfin_write32(SPORT2_MTCS3, val)
+#define bfin_read_SPORT2_MRCS0()       bfin_read32(SPORT2_MRCS0)
+#define bfin_write_SPORT2_MRCS0(val)   bfin_write32(SPORT2_MRCS0, val)
+#define bfin_read_SPORT2_MRCS1()       bfin_read32(SPORT2_MRCS1)
+#define bfin_write_SPORT2_MRCS1(val)   bfin_write32(SPORT2_MRCS1, val)
+#define bfin_read_SPORT2_MRCS2()       bfin_read32(SPORT2_MRCS2)
+#define bfin_write_SPORT2_MRCS2(val)   bfin_write32(SPORT2_MRCS2, val)
+#define bfin_read_SPORT2_MRCS3()       bfin_read32(SPORT2_MRCS3)
+#define bfin_write_SPORT2_MRCS3(val)   bfin_write32(SPORT2_MRCS3, val)
+#define bfin_read_SPORT3_TCR1()        bfin_read16(SPORT3_TCR1)
+#define bfin_write_SPORT3_TCR1(val)    bfin_write16(SPORT3_TCR1, val)
+#define bfin_read_SPORT3_TCR2()        bfin_read16(SPORT3_TCR2)
+#define bfin_write_SPORT3_TCR2(val)    bfin_write16(SPORT3_TCR2, val)
+#define bfin_read_SPORT3_TCLKDIV()     bfin_read16(SPORT3_TCLKDIV)
+#define bfin_write_SPORT3_TCLKDIV(val) bfin_write16(SPORT3_TCLKDIV, val)
+#define bfin_read_SPORT3_TFSDIV()      bfin_read16(SPORT3_TFSDIV)
+#define bfin_write_SPORT3_TFSDIV(val)  bfin_write16(SPORT3_TFSDIV, val)
+#define bfin_read_SPORT3_TX()          bfin_read32(SPORT3_TX)
+#define bfin_write_SPORT3_TX(val)      bfin_write32(SPORT3_TX, val)
+#define bfin_read_SPORT3_RX()          bfin_read32(SPORT3_RX)
+#define bfin_write_SPORT3_RX(val)      bfin_write32(SPORT3_RX, val)
+#define bfin_read_SPORT3_RCR1()        bfin_read16(SPORT3_RCR1)
+#define bfin_write_SPORT3_RCR1(val)    bfin_write16(SPORT3_RCR1, val)
+#define bfin_read_SPORT3_RCR2()        bfin_read16(SPORT3_RCR2)
+#define bfin_write_SPORT3_RCR2(val)    bfin_write16(SPORT3_RCR2, val)
+#define bfin_read_SPORT3_RCLKDIV()     bfin_read16(SPORT3_RCLKDIV)
+#define bfin_write_SPORT3_RCLKDIV(val) bfin_write16(SPORT3_RCLKDIV, val)
+#define bfin_read_SPORT3_RFSDIV()      bfin_read16(SPORT3_RFSDIV)
+#define bfin_write_SPORT3_RFSDIV(val)  bfin_write16(SPORT3_RFSDIV, val)
+#define bfin_read_SPORT3_STAT()        bfin_read16(SPORT3_STAT)
+#define bfin_write_SPORT3_STAT(val)    bfin_write16(SPORT3_STAT, val)
+#define bfin_read_SPORT3_CHNL()        bfin_read16(SPORT3_CHNL)
+#define bfin_write_SPORT3_CHNL(val)    bfin_write16(SPORT3_CHNL, val)
+#define bfin_read_SPORT3_MCMC1()       bfin_read16(SPORT3_MCMC1)
+#define bfin_write_SPORT3_MCMC1(val)   bfin_write16(SPORT3_MCMC1, val)
+#define bfin_read_SPORT3_MCMC2()       bfin_read16(SPORT3_MCMC2)
+#define bfin_write_SPORT3_MCMC2(val)   bfin_write16(SPORT3_MCMC2, val)
+#define bfin_read_SPORT3_MTCS0()       bfin_read32(SPORT3_MTCS0)
+#define bfin_write_SPORT3_MTCS0(val)   bfin_write32(SPORT3_MTCS0, val)
+#define bfin_read_SPORT3_MTCS1()       bfin_read32(SPORT3_MTCS1)
+#define bfin_write_SPORT3_MTCS1(val)   bfin_write32(SPORT3_MTCS1, val)
+#define bfin_read_SPORT3_MTCS2()       bfin_read32(SPORT3_MTCS2)
+#define bfin_write_SPORT3_MTCS2(val)   bfin_write32(SPORT3_MTCS2, val)
+#define bfin_read_SPORT3_MTCS3()       bfin_read32(SPORT3_MTCS3)
+#define bfin_write_SPORT3_MTCS3(val)   bfin_write32(SPORT3_MTCS3, val)
+#define bfin_read_SPORT3_MRCS0()       bfin_read32(SPORT3_MRCS0)
+#define bfin_write_SPORT3_MRCS0(val)   bfin_write32(SPORT3_MRCS0, val)
+#define bfin_read_SPORT3_MRCS1()       bfin_read32(SPORT3_MRCS1)
+#define bfin_write_SPORT3_MRCS1(val)   bfin_write32(SPORT3_MRCS1, val)
+#define bfin_read_SPORT3_MRCS2()       bfin_read32(SPORT3_MRCS2)
+#define bfin_write_SPORT3_MRCS2(val)   bfin_write32(SPORT3_MRCS2, val)
+#define bfin_read_SPORT3_MRCS3()       bfin_read32(SPORT3_MRCS3)
+#define bfin_write_SPORT3_MRCS3(val)   bfin_write32(SPORT3_MRCS3, val)
+#define bfin_read_PORTFIO()            bfin_read16(PORTFIO)
+#define bfin_write_PORTFIO(val)        bfin_write16(PORTFIO, val)
+#define bfin_read_PORTFIO_CLEAR()      bfin_read16(PORTFIO_CLEAR)
+#define bfin_write_PORTFIO_CLEAR(val)  bfin_write16(PORTFIO_CLEAR, val)
+#define bfin_read_PORTFIO_SET()        bfin_read16(PORTFIO_SET)
+#define bfin_write_PORTFIO_SET(val)    bfin_write16(PORTFIO_SET, val)
+#define bfin_read_PORTFIO_TOGGLE()     bfin_read16(PORTFIO_TOGGLE)
+#define bfin_write_PORTFIO_TOGGLE(val) bfin_write16(PORTFIO_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKA()      bfin_read16(PORTFIO_MASKA)
+#define bfin_write_PORTFIO_MASKA(val)  bfin_write16(PORTFIO_MASKA, val)
+#define bfin_read_PORTFIO_MASKA_CLEAR() bfin_read16(PORTFIO_MASKA_CLEAR)
+#define bfin_write_PORTFIO_MASKA_CLEAR(val) bfin_write16(PORTFIO_MASKA_CLEAR, val)
+#define bfin_read_PORTFIO_MASKA_SET()  bfin_read16(PORTFIO_MASKA_SET)
+#define bfin_write_PORTFIO_MASKA_SET(val) bfin_write16(PORTFIO_MASKA_SET, val)
+#define bfin_read_PORTFIO_MASKA_TOGGLE() bfin_read16(PORTFIO_MASKA_TOGGLE)
+#define bfin_write_PORTFIO_MASKA_TOGGLE(val) bfin_write16(PORTFIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKB()      bfin_read16(PORTFIO_MASKB)
+#define bfin_write_PORTFIO_MASKB(val)  bfin_write16(PORTFIO_MASKB, val)
+#define bfin_read_PORTFIO_MASKB_CLEAR() bfin_read16(PORTFIO_MASKB_CLEAR)
+#define bfin_write_PORTFIO_MASKB_CLEAR(val) bfin_write16(PORTFIO_MASKB_CLEAR, val)
+#define bfin_read_PORTFIO_MASKB_SET()  bfin_read16(PORTFIO_MASKB_SET)
+#define bfin_write_PORTFIO_MASKB_SET(val) bfin_write16(PORTFIO_MASKB_SET, val)
+#define bfin_read_PORTFIO_MASKB_TOGGLE() bfin_read16(PORTFIO_MASKB_TOGGLE)
+#define bfin_write_PORTFIO_MASKB_TOGGLE(val) bfin_write16(PORTFIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTFIO_DIR()        bfin_read16(PORTFIO_DIR)
+#define bfin_write_PORTFIO_DIR(val)    bfin_write16(PORTFIO_DIR, val)
+#define bfin_read_PORTFIO_POLAR()      bfin_read16(PORTFIO_POLAR)
+#define bfin_write_PORTFIO_POLAR(val)  bfin_write16(PORTFIO_POLAR, val)
+#define bfin_read_PORTFIO_EDGE()       bfin_read16(PORTFIO_EDGE)
+#define bfin_write_PORTFIO_EDGE(val)   bfin_write16(PORTFIO_EDGE, val)
+#define bfin_read_PORTFIO_BOTH()       bfin_read16(PORTFIO_BOTH)
+#define bfin_write_PORTFIO_BOTH(val)   bfin_write16(PORTFIO_BOTH, val)
+#define bfin_read_PORTFIO_INEN()       bfin_read16(PORTFIO_INEN)
+#define bfin_write_PORTFIO_INEN(val)   bfin_write16(PORTFIO_INEN, val)
+#define bfin_read_PORTCIO_FER()        bfin_read16(PORTCIO_FER)
+#define bfin_write_PORTCIO_FER(val)    bfin_write16(PORTCIO_FER, val)
+#define bfin_read_PORTCIO()            bfin_read16(PORTCIO)
+#define bfin_write_PORTCIO(val)        bfin_write16(PORTCIO, val)
+#define bfin_read_PORTCIO_CLEAR()      bfin_read16(PORTCIO_CLEAR)
+#define bfin_write_PORTCIO_CLEAR(val)  bfin_write16(PORTCIO_CLEAR, val)
+#define bfin_read_PORTCIO_SET()        bfin_read16(PORTCIO_SET)
+#define bfin_write_PORTCIO_SET(val)    bfin_write16(PORTCIO_SET, val)
+#define bfin_read_PORTCIO_TOGGLE()     bfin_read16(PORTCIO_TOGGLE)
+#define bfin_write_PORTCIO_TOGGLE(val) bfin_write16(PORTCIO_TOGGLE, val)
+#define bfin_read_PORTCIO_DIR()        bfin_read16(PORTCIO_DIR)
+#define bfin_write_PORTCIO_DIR(val)    bfin_write16(PORTCIO_DIR, val)
+#define bfin_read_PORTCIO_INEN()       bfin_read16(PORTCIO_INEN)
+#define bfin_write_PORTCIO_INEN(val)   bfin_write16(PORTCIO_INEN, val)
+#define bfin_read_PORTDIO_FER()        bfin_read16(PORTDIO_FER)
+#define bfin_write_PORTDIO_FER(val)    bfin_write16(PORTDIO_FER, val)
+#define bfin_read_PORTDIO()            bfin_read16(PORTDIO)
+#define bfin_write_PORTDIO(val)        bfin_write16(PORTDIO, val)
+#define bfin_read_PORTDIO_CLEAR()      bfin_read16(PORTDIO_CLEAR)
+#define bfin_write_PORTDIO_CLEAR(val)  bfin_write16(PORTDIO_CLEAR, val)
+#define bfin_read_PORTDIO_SET()        bfin_read16(PORTDIO_SET)
+#define bfin_write_PORTDIO_SET(val)    bfin_write16(PORTDIO_SET, val)
+#define bfin_read_PORTDIO_TOGGLE()     bfin_read16(PORTDIO_TOGGLE)
+#define bfin_write_PORTDIO_TOGGLE(val) bfin_write16(PORTDIO_TOGGLE, val)
+#define bfin_read_PORTDIO_DIR()        bfin_read16(PORTDIO_DIR)
+#define bfin_write_PORTDIO_DIR(val)    bfin_write16(PORTDIO_DIR, val)
+#define bfin_read_PORTDIO_INEN()       bfin_read16(PORTDIO_INEN)
+#define bfin_write_PORTDIO_INEN(val)   bfin_write16(PORTDIO_INEN, val)
+#define bfin_read_PORTEIO_FER()        bfin_read16(PORTEIO_FER)
+#define bfin_write_PORTEIO_FER(val)    bfin_write16(PORTEIO_FER, val)
+#define bfin_read_PORTEIO()            bfin_read16(PORTEIO)
+#define bfin_write_PORTEIO(val)        bfin_write16(PORTEIO, val)
+#define bfin_read_PORTEIO_CLEAR()      bfin_read16(PORTEIO_CLEAR)
+#define bfin_write_PORTEIO_CLEAR(val)  bfin_write16(PORTEIO_CLEAR, val)
+#define bfin_read_PORTEIO_SET()        bfin_read16(PORTEIO_SET)
+#define bfin_write_PORTEIO_SET(val)    bfin_write16(PORTEIO_SET, val)
+#define bfin_read_PORTEIO_TOGGLE()     bfin_read16(PORTEIO_TOGGLE)
+#define bfin_write_PORTEIO_TOGGLE(val) bfin_write16(PORTEIO_TOGGLE, val)
+#define bfin_read_PORTEIO_DIR()        bfin_read16(PORTEIO_DIR)
+#define bfin_write_PORTEIO_DIR(val)    bfin_write16(PORTEIO_DIR, val)
+#define bfin_read_PORTEIO_INEN()       bfin_read16(PORTEIO_INEN)
+#define bfin_write_PORTEIO_INEN(val)   bfin_write16(PORTEIO_INEN, val)
+#define bfin_read_EBIU_AMGCTL()        bfin_read16(EBIU_AMGCTL)
+#define bfin_write_EBIU_AMGCTL(val)    bfin_write16(EBIU_AMGCTL, val)
+#define bfin_read_EBIU_AMBCTL0()       bfin_read32(EBIU_AMBCTL0)
+#define bfin_write_EBIU_AMBCTL0(val)   bfin_write32(EBIU_AMBCTL0, val)
+#define bfin_read_EBIU_AMBCTL1()       bfin_read32(EBIU_AMBCTL1)
+#define bfin_write_EBIU_AMBCTL1(val)   bfin_write32(EBIU_AMBCTL1, val)
+#define bfin_read_EBIU_SDGCTL()        bfin_read32(EBIU_SDGCTL)
+#define bfin_write_EBIU_SDGCTL(val)    bfin_write32(EBIU_SDGCTL, val)
+#define bfin_read_EBIU_SDBCTL()        bfin_read16(EBIU_SDBCTL)
+#define bfin_write_EBIU_SDBCTL(val)    bfin_write16(EBIU_SDBCTL, val)
+#define bfin_read_EBIU_SDRRC()         bfin_read16(EBIU_SDRRC)
+#define bfin_write_EBIU_SDRRC(val)     bfin_write16(EBIU_SDRRC, val)
+#define bfin_read_EBIU_SDSTAT()        bfin_read16(EBIU_SDSTAT)
+#define bfin_write_EBIU_SDSTAT(val)    bfin_write16(EBIU_SDSTAT, val)
+#define bfin_read_DMA0_TC_PER()        bfin_read16(DMA0_TC_PER)
+#define bfin_write_DMA0_TC_PER(val)    bfin_write16(DMA0_TC_PER, val)
+#define bfin_read_DMA0_TC_CNT()        bfin_read16(DMA0_TC_CNT)
+#define bfin_write_DMA0_TC_CNT(val)    bfin_write16(DMA0_TC_CNT, val)
+#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_readPTR(DMA0_NEXT_DESC_PTR)
+#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_writePTR(DMA0_NEXT_DESC_PTR, val)
+#define bfin_read_DMA0_START_ADDR()    bfin_readPTR(DMA0_START_ADDR)
+#define bfin_write_DMA0_START_ADDR(val) bfin_writePTR(DMA0_START_ADDR, val)
+#define bfin_read_DMA0_CONFIG()        bfin_read16(DMA0_CONFIG)
+#define bfin_write_DMA0_CONFIG(val)    bfin_write16(DMA0_CONFIG, val)
+#define bfin_read_DMA0_X_COUNT()       bfin_read16(DMA0_X_COUNT)
+#define bfin_write_DMA0_X_COUNT(val)   bfin_write16(DMA0_X_COUNT, val)
+#define bfin_read_DMA0_X_MODIFY()      bfin_read16(DMA0_X_MODIFY)
+#define bfin_write_DMA0_X_MODIFY(val)  bfin_write16(DMA0_X_MODIFY, val)
+#define bfin_read_DMA0_Y_COUNT()       bfin_read16(DMA0_Y_COUNT)
+#define bfin_write_DMA0_Y_COUNT(val)   bfin_write16(DMA0_Y_COUNT, val)
+#define bfin_read_DMA0_Y_MODIFY()      bfin_read16(DMA0_Y_MODIFY)
+#define bfin_write_DMA0_Y_MODIFY(val)  bfin_write16(DMA0_Y_MODIFY, val)
+#define bfin_read_DMA0_CURR_DESC_PTR() bfin_readPTR(DMA0_CURR_DESC_PTR)
+#define bfin_write_DMA0_CURR_DESC_PTR(val) bfin_writePTR(DMA0_CURR_DESC_PTR, val)
+#define bfin_read_DMA0_CURR_ADDR()     bfin_readPTR(DMA0_CURR_ADDR)
+#define bfin_write_DMA0_CURR_ADDR(val) bfin_writePTR(DMA0_CURR_ADDR, val)
+#define bfin_read_DMA0_IRQ_STATUS()    bfin_read16(DMA0_IRQ_STATUS)
+#define bfin_write_DMA0_IRQ_STATUS(val) bfin_write16(DMA0_IRQ_STATUS, val)
+#define bfin_read_DMA0_PERIPHERAL_MAP() bfin_read16(DMA0_PERIPHERAL_MAP)
+#define bfin_write_DMA0_PERIPHERAL_MAP(val) bfin_write16(DMA0_PERIPHERAL_MAP, val)
+#define bfin_read_DMA0_CURR_X_COUNT()  bfin_read16(DMA0_CURR_X_COUNT)
+#define bfin_write_DMA0_CURR_X_COUNT(val) bfin_write16(DMA0_CURR_X_COUNT, val)
+#define bfin_read_DMA0_CURR_Y_COUNT()  bfin_read16(DMA0_CURR_Y_COUNT)
+#define bfin_write_DMA0_CURR_Y_COUNT(val) bfin_write16(DMA0_CURR_Y_COUNT, val)
+#define bfin_read_DMA1_NEXT_DESC_PTR() bfin_readPTR(DMA1_NEXT_DESC_PTR)
+#define bfin_write_DMA1_NEXT_DESC_PTR(val) bfin_writePTR(DMA1_NEXT_DESC_PTR, val)
+#define bfin_read_DMA1_START_ADDR()    bfin_readPTR(DMA1_START_ADDR)
+#define bfin_write_DMA1_START_ADDR(val) bfin_writePTR(DMA1_START_ADDR, val)
+#define bfin_read_DMA1_CONFIG()        bfin_read16(DMA1_CONFIG)
+#define bfin_write_DMA1_CONFIG(val)    bfin_write16(DMA1_CONFIG, val)
+#define bfin_read_DMA1_X_COUNT()       bfin_read16(DMA1_X_COUNT)
+#define bfin_write_DMA1_X_COUNT(val)   bfin_write16(DMA1_X_COUNT, val)
+#define bfin_read_DMA1_X_MODIFY()      bfin_read16(DMA1_X_MODIFY)
+#define bfin_write_DMA1_X_MODIFY(val)  bfin_write16(DMA1_X_MODIFY, val)
+#define bfin_read_DMA1_Y_COUNT()       bfin_read16(DMA1_Y_COUNT)
+#define bfin_write_DMA1_Y_COUNT(val)   bfin_write16(DMA1_Y_COUNT, val)
+#define bfin_read_DMA1_Y_MODIFY()      bfin_read16(DMA1_Y_MODIFY)
+#define bfin_write_DMA1_Y_MODIFY(val)  bfin_write16(DMA1_Y_MODIFY, val)
+#define bfin_read_DMA1_CURR_DESC_PTR() bfin_readPTR(DMA1_CURR_DESC_PTR)
+#define bfin_write_DMA1_CURR_DESC_PTR(val) bfin_writePTR(DMA1_CURR_DESC_PTR, val)
+#define bfin_read_DMA1_CURR_ADDR()     bfin_readPTR(DMA1_CURR_ADDR)
+#define bfin_write_DMA1_CURR_ADDR(val) bfin_writePTR(DMA1_CURR_ADDR, val)
+#define bfin_read_DMA1_IRQ_STATUS()    bfin_read16(DMA1_IRQ_STATUS)
+#define bfin_write_DMA1_IRQ_STATUS(val) bfin_write16(DMA1_IRQ_STATUS, val)
+#define bfin_read_DMA1_PERIPHERAL_MAP() bfin_read16(DMA1_PERIPHERAL_MAP)
+#define bfin_write_DMA1_PERIPHERAL_MAP(val) bfin_write16(DMA1_PERIPHERAL_MAP, val)
+#define bfin_read_DMA1_CURR_X_COUNT()  bfin_read16(DMA1_CURR_X_COUNT)
+#define bfin_write_DMA1_CURR_X_COUNT(val) bfin_write16(DMA1_CURR_X_COUNT, val)
+#define bfin_read_DMA1_CURR_Y_COUNT()  bfin_read16(DMA1_CURR_Y_COUNT)
+#define bfin_write_DMA1_CURR_Y_COUNT(val) bfin_write16(DMA1_CURR_Y_COUNT, val)
+#define bfin_read_DMA2_NEXT_DESC_PTR() bfin_readPTR(DMA2_NEXT_DESC_PTR)
+#define bfin_write_DMA2_NEXT_DESC_PTR(val) bfin_writePTR(DMA2_NEXT_DESC_PTR, val)
+#define bfin_read_DMA2_START_ADDR()    bfin_readPTR(DMA2_START_ADDR)
+#define bfin_write_DMA2_START_ADDR(val) bfin_writePTR(DMA2_START_ADDR, val)
+#define bfin_read_DMA2_CONFIG()        bfin_read16(DMA2_CONFIG)
+#define bfin_write_DMA2_CONFIG(val)    bfin_write16(DMA2_CONFIG, val)
+#define bfin_read_DMA2_X_COUNT()       bfin_read16(DMA2_X_COUNT)
+#define bfin_write_DMA2_X_COUNT(val)   bfin_write16(DMA2_X_COUNT, val)
+#define bfin_read_DMA2_X_MODIFY()      bfin_read16(DMA2_X_MODIFY)
+#define bfin_write_DMA2_X_MODIFY(val)  bfin_write16(DMA2_X_MODIFY, val)
+#define bfin_read_DMA2_Y_COUNT()       bfin_read16(DMA2_Y_COUNT)
+#define bfin_write_DMA2_Y_COUNT(val)   bfin_write16(DMA2_Y_COUNT, val)
+#define bfin_read_DMA2_Y_MODIFY()      bfin_read16(DMA2_Y_MODIFY)
+#define bfin_write_DMA2_Y_MODIFY(val)  bfin_write16(DMA2_Y_MODIFY, val)
+#define bfin_read_DMA2_CURR_DESC_PTR() bfin_readPTR(DMA2_CURR_DESC_PTR)
+#define bfin_write_DMA2_CURR_DESC_PTR(val) bfin_writePTR(DMA2_CURR_DESC_PTR, val)
+#define bfin_read_DMA2_CURR_ADDR()     bfin_readPTR(DMA2_CURR_ADDR)
+#define bfin_write_DMA2_CURR_ADDR(val) bfin_writePTR(DMA2_CURR_ADDR, val)
+#define bfin_read_DMA2_IRQ_STATUS()    bfin_read16(DMA2_IRQ_STATUS)
+#define bfin_write_DMA2_IRQ_STATUS(val) bfin_write16(DMA2_IRQ_STATUS, val)
+#define bfin_read_DMA2_PERIPHERAL_MAP() bfin_read16(DMA2_PERIPHERAL_MAP)
+#define bfin_write_DMA2_PERIPHERAL_MAP(val) bfin_write16(DMA2_PERIPHERAL_MAP, val)
+#define bfin_read_DMA2_CURR_X_COUNT()  bfin_read16(DMA2_CURR_X_COUNT)
+#define bfin_write_DMA2_CURR_X_COUNT(val) bfin_write16(DMA2_CURR_X_COUNT, val)
+#define bfin_read_DMA2_CURR_Y_COUNT()  bfin_read16(DMA2_CURR_Y_COUNT)
+#define bfin_write_DMA2_CURR_Y_COUNT(val) bfin_write16(DMA2_CURR_Y_COUNT, val)
+#define bfin_read_DMA3_NEXT_DESC_PTR() bfin_readPTR(DMA3_NEXT_DESC_PTR)
+#define bfin_write_DMA3_NEXT_DESC_PTR(val) bfin_writePTR(DMA3_NEXT_DESC_PTR, val)
+#define bfin_read_DMA3_START_ADDR()    bfin_readPTR(DMA3_START_ADDR)
+#define bfin_write_DMA3_START_ADDR(val) bfin_writePTR(DMA3_START_ADDR, val)
+#define bfin_read_DMA3_CONFIG()        bfin_read16(DMA3_CONFIG)
+#define bfin_write_DMA3_CONFIG(val)    bfin_write16(DMA3_CONFIG, val)
+#define bfin_read_DMA3_X_COUNT()       bfin_read16(DMA3_X_COUNT)
+#define bfin_write_DMA3_X_COUNT(val)   bfin_write16(DMA3_X_COUNT, val)
+#define bfin_read_DMA3_X_MODIFY()      bfin_read16(DMA3_X_MODIFY)
+#define bfin_write_DMA3_X_MODIFY(val)  bfin_write16(DMA3_X_MODIFY, val)
+#define bfin_read_DMA3_Y_COUNT()       bfin_read16(DMA3_Y_COUNT)
+#define bfin_write_DMA3_Y_COUNT(val)   bfin_write16(DMA3_Y_COUNT, val)
+#define bfin_read_DMA3_Y_MODIFY()      bfin_read16(DMA3_Y_MODIFY)
+#define bfin_write_DMA3_Y_MODIFY(val)  bfin_write16(DMA3_Y_MODIFY, val)
+#define bfin_read_DMA3_CURR_DESC_PTR() bfin_readPTR(DMA3_CURR_DESC_PTR)
+#define bfin_write_DMA3_CURR_DESC_PTR(val) bfin_writePTR(DMA3_CURR_DESC_PTR, val)
+#define bfin_read_DMA3_CURR_ADDR()     bfin_readPTR(DMA3_CURR_ADDR)
+#define bfin_write_DMA3_CURR_ADDR(val) bfin_writePTR(DMA3_CURR_ADDR, val)
+#define bfin_read_DMA3_IRQ_STATUS()    bfin_read16(DMA3_IRQ_STATUS)
+#define bfin_write_DMA3_IRQ_STATUS(val) bfin_write16(DMA3_IRQ_STATUS, val)
+#define bfin_read_DMA3_PERIPHERAL_MAP() bfin_read16(DMA3_PERIPHERAL_MAP)
+#define bfin_write_DMA3_PERIPHERAL_MAP(val) bfin_write16(DMA3_PERIPHERAL_MAP, val)
+#define bfin_read_DMA3_CURR_X_COUNT()  bfin_read16(DMA3_CURR_X_COUNT)
+#define bfin_write_DMA3_CURR_X_COUNT(val) bfin_write16(DMA3_CURR_X_COUNT, val)
+#define bfin_read_DMA3_CURR_Y_COUNT()  bfin_read16(DMA3_CURR_Y_COUNT)
+#define bfin_write_DMA3_CURR_Y_COUNT(val) bfin_write16(DMA3_CURR_Y_COUNT, val)
+#define bfin_read_DMA4_NEXT_DESC_PTR() bfin_readPTR(DMA4_NEXT_DESC_PTR)
+#define bfin_write_DMA4_NEXT_DESC_PTR(val) bfin_writePTR(DMA4_NEXT_DESC_PTR, val)
+#define bfin_read_DMA4_START_ADDR()    bfin_readPTR(DMA4_START_ADDR)
+#define bfin_write_DMA4_START_ADDR(val) bfin_writePTR(DMA4_START_ADDR, val)
+#define bfin_read_DMA4_CONFIG()        bfin_read16(DMA4_CONFIG)
+#define bfin_write_DMA4_CONFIG(val)    bfin_write16(DMA4_CONFIG, val)
+#define bfin_read_DMA4_X_COUNT()       bfin_read16(DMA4_X_COUNT)
+#define bfin_write_DMA4_X_COUNT(val)   bfin_write16(DMA4_X_COUNT, val)
+#define bfin_read_DMA4_X_MODIFY()      bfin_read16(DMA4_X_MODIFY)
+#define bfin_write_DMA4_X_MODIFY(val)  bfin_write16(DMA4_X_MODIFY, val)
+#define bfin_read_DMA4_Y_COUNT()       bfin_read16(DMA4_Y_COUNT)
+#define bfin_write_DMA4_Y_COUNT(val)   bfin_write16(DMA4_Y_COUNT, val)
+#define bfin_read_DMA4_Y_MODIFY()      bfin_read16(DMA4_Y_MODIFY)
+#define bfin_write_DMA4_Y_MODIFY(val)  bfin_write16(DMA4_Y_MODIFY, val)
+#define bfin_read_DMA4_CURR_DESC_PTR() bfin_readPTR(DMA4_CURR_DESC_PTR)
+#define bfin_write_DMA4_CURR_DESC_PTR(val) bfin_writePTR(DMA4_CURR_DESC_PTR, val)
+#define bfin_read_DMA4_CURR_ADDR()     bfin_readPTR(DMA4_CURR_ADDR)
+#define bfin_write_DMA4_CURR_ADDR(val) bfin_writePTR(DMA4_CURR_ADDR, val)
+#define bfin_read_DMA4_IRQ_STATUS()    bfin_read16(DMA4_IRQ_STATUS)
+#define bfin_write_DMA4_IRQ_STATUS(val) bfin_write16(DMA4_IRQ_STATUS, val)
+#define bfin_read_DMA4_PERIPHERAL_MAP() bfin_read16(DMA4_PERIPHERAL_MAP)
+#define bfin_write_DMA4_PERIPHERAL_MAP(val) bfin_write16(DMA4_PERIPHERAL_MAP, val)
+#define bfin_read_DMA4_CURR_X_COUNT()  bfin_read16(DMA4_CURR_X_COUNT)
+#define bfin_write_DMA4_CURR_X_COUNT(val) bfin_write16(DMA4_CURR_X_COUNT, val)
+#define bfin_read_DMA4_CURR_Y_COUNT()  bfin_read16(DMA4_CURR_Y_COUNT)
+#define bfin_write_DMA4_CURR_Y_COUNT(val) bfin_write16(DMA4_CURR_Y_COUNT, val)
+#define bfin_read_DMA5_NEXT_DESC_PTR() bfin_readPTR(DMA5_NEXT_DESC_PTR)
+#define bfin_write_DMA5_NEXT_DESC_PTR(val) bfin_writePTR(DMA5_NEXT_DESC_PTR, val)
+#define bfin_read_DMA5_START_ADDR()    bfin_readPTR(DMA5_START_ADDR)
+#define bfin_write_DMA5_START_ADDR(val) bfin_writePTR(DMA5_START_ADDR, val)
+#define bfin_read_DMA5_CONFIG()        bfin_read16(DMA5_CONFIG)
+#define bfin_write_DMA5_CONFIG(val)    bfin_write16(DMA5_CONFIG, val)
+#define bfin_read_DMA5_X_COUNT()       bfin_read16(DMA5_X_COUNT)
+#define bfin_write_DMA5_X_COUNT(val)   bfin_write16(DMA5_X_COUNT, val)
+#define bfin_read_DMA5_X_MODIFY()      bfin_read16(DMA5_X_MODIFY)
+#define bfin_write_DMA5_X_MODIFY(val)  bfin_write16(DMA5_X_MODIFY, val)
+#define bfin_read_DMA5_Y_COUNT()       bfin_read16(DMA5_Y_COUNT)
+#define bfin_write_DMA5_Y_COUNT(val)   bfin_write16(DMA5_Y_COUNT, val)
+#define bfin_read_DMA5_Y_MODIFY()      bfin_read16(DMA5_Y_MODIFY)
+#define bfin_write_DMA5_Y_MODIFY(val)  bfin_write16(DMA5_Y_MODIFY, val)
+#define bfin_read_DMA5_CURR_DESC_PTR() bfin_readPTR(DMA5_CURR_DESC_PTR)
+#define bfin_write_DMA5_CURR_DESC_PTR(val) bfin_writePTR(DMA5_CURR_DESC_PTR, val)
+#define bfin_read_DMA5_CURR_ADDR()     bfin_readPTR(DMA5_CURR_ADDR)
+#define bfin_write_DMA5_CURR_ADDR(val) bfin_writePTR(DMA5_CURR_ADDR, val)
+#define bfin_read_DMA5_IRQ_STATUS()    bfin_read16(DMA5_IRQ_STATUS)
+#define bfin_write_DMA5_IRQ_STATUS(val) bfin_write16(DMA5_IRQ_STATUS, val)
+#define bfin_read_DMA5_PERIPHERAL_MAP() bfin_read16(DMA5_PERIPHERAL_MAP)
+#define bfin_write_DMA5_PERIPHERAL_MAP(val) bfin_write16(DMA5_PERIPHERAL_MAP, val)
+#define bfin_read_DMA5_CURR_X_COUNT()  bfin_read16(DMA5_CURR_X_COUNT)
+#define bfin_write_DMA5_CURR_X_COUNT(val) bfin_write16(DMA5_CURR_X_COUNT, val)
+#define bfin_read_DMA5_CURR_Y_COUNT()  bfin_read16(DMA5_CURR_Y_COUNT)
+#define bfin_write_DMA5_CURR_Y_COUNT(val) bfin_write16(DMA5_CURR_Y_COUNT, val)
+#define bfin_read_DMA6_NEXT_DESC_PTR() bfin_readPTR(DMA6_NEXT_DESC_PTR)
+#define bfin_write_DMA6_NEXT_DESC_PTR(val) bfin_writePTR(DMA6_NEXT_DESC_PTR, val)
+#define bfin_read_DMA6_START_ADDR()    bfin_readPTR(DMA6_START_ADDR)
+#define bfin_write_DMA6_START_ADDR(val) bfin_writePTR(DMA6_START_ADDR, val)
+#define bfin_read_DMA6_CONFIG()        bfin_read16(DMA6_CONFIG)
+#define bfin_write_DMA6_CONFIG(val)    bfin_write16(DMA6_CONFIG, val)
+#define bfin_read_DMA6_X_COUNT()       bfin_read16(DMA6_X_COUNT)
+#define bfin_write_DMA6_X_COUNT(val)   bfin_write16(DMA6_X_COUNT, val)
+#define bfin_read_DMA6_X_MODIFY()      bfin_read16(DMA6_X_MODIFY)
+#define bfin_write_DMA6_X_MODIFY(val)  bfin_write16(DMA6_X_MODIFY, val)
+#define bfin_read_DMA6_Y_COUNT()       bfin_read16(DMA6_Y_COUNT)
+#define bfin_write_DMA6_Y_COUNT(val)   bfin_write16(DMA6_Y_COUNT, val)
+#define bfin_read_DMA6_Y_MODIFY()      bfin_read16(DMA6_Y_MODIFY)
+#define bfin_write_DMA6_Y_MODIFY(val)  bfin_write16(DMA6_Y_MODIFY, val)
+#define bfin_read_DMA6_CURR_DESC_PTR() bfin_readPTR(DMA6_CURR_DESC_PTR)
+#define bfin_write_DMA6_CURR_DESC_PTR(val) bfin_writePTR(DMA6_CURR_DESC_PTR, val)
+#define bfin_read_DMA6_CURR_ADDR()     bfin_readPTR(DMA6_CURR_ADDR)
+#define bfin_write_DMA6_CURR_ADDR(val) bfin_writePTR(DMA6_CURR_ADDR, val)
+#define bfin_read_DMA6_IRQ_STATUS()    bfin_read16(DMA6_IRQ_STATUS)
+#define bfin_write_DMA6_IRQ_STATUS(val) bfin_write16(DMA6_IRQ_STATUS, val)
+#define bfin_read_DMA6_PERIPHERAL_MAP() bfin_read16(DMA6_PERIPHERAL_MAP)
+#define bfin_write_DMA6_PERIPHERAL_MAP(val) bfin_write16(DMA6_PERIPHERAL_MAP, val)
+#define bfin_read_DMA6_CURR_X_COUNT()  bfin_read16(DMA6_CURR_X_COUNT)
+#define bfin_write_DMA6_CURR_X_COUNT(val) bfin_write16(DMA6_CURR_X_COUNT, val)
+#define bfin_read_DMA6_CURR_Y_COUNT()  bfin_read16(DMA6_CURR_Y_COUNT)
+#define bfin_write_DMA6_CURR_Y_COUNT(val) bfin_write16(DMA6_CURR_Y_COUNT, val)
+#define bfin_read_DMA7_NEXT_DESC_PTR() bfin_readPTR(DMA7_NEXT_DESC_PTR)
+#define bfin_write_DMA7_NEXT_DESC_PTR(val) bfin_writePTR(DMA7_NEXT_DESC_PTR, val)
+#define bfin_read_DMA7_START_ADDR()    bfin_readPTR(DMA7_START_ADDR)
+#define bfin_write_DMA7_START_ADDR(val) bfin_writePTR(DMA7_START_ADDR, val)
+#define bfin_read_DMA7_CONFIG()        bfin_read16(DMA7_CONFIG)
+#define bfin_write_DMA7_CONFIG(val)    bfin_write16(DMA7_CONFIG, val)
+#define bfin_read_DMA7_X_COUNT()       bfin_read16(DMA7_X_COUNT)
+#define bfin_write_DMA7_X_COUNT(val)   bfin_write16(DMA7_X_COUNT, val)
+#define bfin_read_DMA7_X_MODIFY()      bfin_read16(DMA7_X_MODIFY)
+#define bfin_write_DMA7_X_MODIFY(val)  bfin_write16(DMA7_X_MODIFY, val)
+#define bfin_read_DMA7_Y_COUNT()       bfin_read16(DMA7_Y_COUNT)
+#define bfin_write_DMA7_Y_COUNT(val)   bfin_write16(DMA7_Y_COUNT, val)
+#define bfin_read_DMA7_Y_MODIFY()      bfin_read16(DMA7_Y_MODIFY)
+#define bfin_write_DMA7_Y_MODIFY(val)  bfin_write16(DMA7_Y_MODIFY, val)
+#define bfin_read_DMA7_CURR_DESC_PTR() bfin_readPTR(DMA7_CURR_DESC_PTR)
+#define bfin_write_DMA7_CURR_DESC_PTR(val) bfin_writePTR(DMA7_CURR_DESC_PTR, val)
+#define bfin_read_DMA7_CURR_ADDR()     bfin_readPTR(DMA7_CURR_ADDR)
+#define bfin_write_DMA7_CURR_ADDR(val) bfin_writePTR(DMA7_CURR_ADDR, val)
+#define bfin_read_DMA7_IRQ_STATUS()    bfin_read16(DMA7_IRQ_STATUS)
+#define bfin_write_DMA7_IRQ_STATUS(val) bfin_write16(DMA7_IRQ_STATUS, val)
+#define bfin_read_DMA7_PERIPHERAL_MAP() bfin_read16(DMA7_PERIPHERAL_MAP)
+#define bfin_write_DMA7_PERIPHERAL_MAP(val) bfin_write16(DMA7_PERIPHERAL_MAP, val)
+#define bfin_read_DMA7_CURR_X_COUNT()  bfin_read16(DMA7_CURR_X_COUNT)
+#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
+#define bfin_read_DMA7_CURR_Y_COUNT()  bfin_read16(DMA7_CURR_Y_COUNT)
+#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
+#define bfin_read_DMA1_TC_PER()        bfin_read16(DMA1_TC_PER)
+#define bfin_write_DMA1_TC_PER(val)    bfin_write16(DMA1_TC_PER, val)
+#define bfin_read_DMA1_TC_CNT()        bfin_read16(DMA1_TC_CNT)
+#define bfin_write_DMA1_TC_CNT(val)    bfin_write16(DMA1_TC_CNT, val)
+#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_readPTR(DMA8_NEXT_DESC_PTR)
+#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_writePTR(DMA8_NEXT_DESC_PTR, val)
+#define bfin_read_DMA8_START_ADDR()    bfin_readPTR(DMA8_START_ADDR)
+#define bfin_write_DMA8_START_ADDR(val) bfin_writePTR(DMA8_START_ADDR, val)
+#define bfin_read_DMA8_CONFIG()        bfin_read16(DMA8_CONFIG)
+#define bfin_write_DMA8_CONFIG(val)    bfin_write16(DMA8_CONFIG, val)
+#define bfin_read_DMA8_X_COUNT()       bfin_read16(DMA8_X_COUNT)
+#define bfin_write_DMA8_X_COUNT(val)   bfin_write16(DMA8_X_COUNT, val)
+#define bfin_read_DMA8_X_MODIFY()      bfin_read16(DMA8_X_MODIFY)
+#define bfin_write_DMA8_X_MODIFY(val)  bfin_write16(DMA8_X_MODIFY, val)
+#define bfin_read_DMA8_Y_COUNT()       bfin_read16(DMA8_Y_COUNT)
+#define bfin_write_DMA8_Y_COUNT(val)   bfin_write16(DMA8_Y_COUNT, val)
+#define bfin_read_DMA8_Y_MODIFY()      bfin_read16(DMA8_Y_MODIFY)
+#define bfin_write_DMA8_Y_MODIFY(val)  bfin_write16(DMA8_Y_MODIFY, val)
+#define bfin_read_DMA8_CURR_DESC_PTR() bfin_readPTR(DMA8_CURR_DESC_PTR)
+#define bfin_write_DMA8_CURR_DESC_PTR(val) bfin_writePTR(DMA8_CURR_DESC_PTR, val)
+#define bfin_read_DMA8_CURR_ADDR()     bfin_readPTR(DMA8_CURR_ADDR)
+#define bfin_write_DMA8_CURR_ADDR(val) bfin_writePTR(DMA8_CURR_ADDR, val)
+#define bfin_read_DMA8_IRQ_STATUS()    bfin_read16(DMA8_IRQ_STATUS)
+#define bfin_write_DMA8_IRQ_STATUS(val) bfin_write16(DMA8_IRQ_STATUS, val)
+#define bfin_read_DMA8_PERIPHERAL_MAP() bfin_read16(DMA8_PERIPHERAL_MAP)
+#define bfin_write_DMA8_PERIPHERAL_MAP(val) bfin_write16(DMA8_PERIPHERAL_MAP, val)
+#define bfin_read_DMA8_CURR_X_COUNT()  bfin_read16(DMA8_CURR_X_COUNT)
+#define bfin_write_DMA8_CURR_X_COUNT(val) bfin_write16(DMA8_CURR_X_COUNT, val)
+#define bfin_read_DMA8_CURR_Y_COUNT()  bfin_read16(DMA8_CURR_Y_COUNT)
+#define bfin_write_DMA8_CURR_Y_COUNT(val) bfin_write16(DMA8_CURR_Y_COUNT, val)
+#define bfin_read_DMA9_NEXT_DESC_PTR() bfin_readPTR(DMA9_NEXT_DESC_PTR)
+#define bfin_write_DMA9_NEXT_DESC_PTR(val) bfin_writePTR(DMA9_NEXT_DESC_PTR, val)
+#define bfin_read_DMA9_START_ADDR()    bfin_readPTR(DMA9_START_ADDR)
+#define bfin_write_DMA9_START_ADDR(val) bfin_writePTR(DMA9_START_ADDR, val)
+#define bfin_read_DMA9_CONFIG()        bfin_read16(DMA9_CONFIG)
+#define bfin_write_DMA9_CONFIG(val)    bfin_write16(DMA9_CONFIG, val)
+#define bfin_read_DMA9_X_COUNT()       bfin_read16(DMA9_X_COUNT)
+#define bfin_write_DMA9_X_COUNT(val)   bfin_write16(DMA9_X_COUNT, val)
+#define bfin_read_DMA9_X_MODIFY()      bfin_read16(DMA9_X_MODIFY)
+#define bfin_write_DMA9_X_MODIFY(val)  bfin_write16(DMA9_X_MODIFY, val)
+#define bfin_read_DMA9_Y_COUNT()       bfin_read16(DMA9_Y_COUNT)
+#define bfin_write_DMA9_Y_COUNT(val)   bfin_write16(DMA9_Y_COUNT, val)
+#define bfin_read_DMA9_Y_MODIFY()      bfin_read16(DMA9_Y_MODIFY)
+#define bfin_write_DMA9_Y_MODIFY(val)  bfin_write16(DMA9_Y_MODIFY, val)
+#define bfin_read_DMA9_CURR_DESC_PTR() bfin_readPTR(DMA9_CURR_DESC_PTR)
+#define bfin_write_DMA9_CURR_DESC_PTR(val) bfin_writePTR(DMA9_CURR_DESC_PTR, val)
+#define bfin_read_DMA9_CURR_ADDR()     bfin_readPTR(DMA9_CURR_ADDR)
+#define bfin_write_DMA9_CURR_ADDR(val) bfin_writePTR(DMA9_CURR_ADDR, val)
+#define bfin_read_DMA9_IRQ_STATUS()    bfin_read16(DMA9_IRQ_STATUS)
+#define bfin_write_DMA9_IRQ_STATUS(val) bfin_write16(DMA9_IRQ_STATUS, val)
+#define bfin_read_DMA9_PERIPHERAL_MAP() bfin_read16(DMA9_PERIPHERAL_MAP)
+#define bfin_write_DMA9_PERIPHERAL_MAP(val) bfin_write16(DMA9_PERIPHERAL_MAP, val)
+#define bfin_read_DMA9_CURR_X_COUNT()  bfin_read16(DMA9_CURR_X_COUNT)
+#define bfin_write_DMA9_CURR_X_COUNT(val) bfin_write16(DMA9_CURR_X_COUNT, val)
+#define bfin_read_DMA9_CURR_Y_COUNT()  bfin_read16(DMA9_CURR_Y_COUNT)
+#define bfin_write_DMA9_CURR_Y_COUNT(val) bfin_write16(DMA9_CURR_Y_COUNT, val)
+#define bfin_read_DMA10_NEXT_DESC_PTR() bfin_readPTR(DMA10_NEXT_DESC_PTR)
+#define bfin_write_DMA10_NEXT_DESC_PTR(val) bfin_writePTR(DMA10_NEXT_DESC_PTR, val)
+#define bfin_read_DMA10_START_ADDR()   bfin_readPTR(DMA10_START_ADDR)
+#define bfin_write_DMA10_START_ADDR(val) bfin_writePTR(DMA10_START_ADDR, val)
+#define bfin_read_DMA10_CONFIG()       bfin_read16(DMA10_CONFIG)
+#define bfin_write_DMA10_CONFIG(val)   bfin_write16(DMA10_CONFIG, val)
+#define bfin_read_DMA10_X_COUNT()      bfin_read16(DMA10_X_COUNT)
+#define bfin_write_DMA10_X_COUNT(val)  bfin_write16(DMA10_X_COUNT, val)
+#define bfin_read_DMA10_X_MODIFY()     bfin_read16(DMA10_X_MODIFY)
+#define bfin_write_DMA10_X_MODIFY(val) bfin_write16(DMA10_X_MODIFY, val)
+#define bfin_read_DMA10_Y_COUNT()      bfin_read16(DMA10_Y_COUNT)
+#define bfin_write_DMA10_Y_COUNT(val)  bfin_write16(DMA10_Y_COUNT, val)
+#define bfin_read_DMA10_Y_MODIFY()     bfin_read16(DMA10_Y_MODIFY)
+#define bfin_write_DMA10_Y_MODIFY(val) bfin_write16(DMA10_Y_MODIFY, val)
+#define bfin_read_DMA10_CURR_DESC_PTR() bfin_readPTR(DMA10_CURR_DESC_PTR)
+#define bfin_write_DMA10_CURR_DESC_PTR(val) bfin_writePTR(DMA10_CURR_DESC_PTR, val)
+#define bfin_read_DMA10_CURR_ADDR()    bfin_readPTR(DMA10_CURR_ADDR)
+#define bfin_write_DMA10_CURR_ADDR(val) bfin_writePTR(DMA10_CURR_ADDR, val)
+#define bfin_read_DMA10_IRQ_STATUS()   bfin_read16(DMA10_IRQ_STATUS)
+#define bfin_write_DMA10_IRQ_STATUS(val) bfin_write16(DMA10_IRQ_STATUS, val)
+#define bfin_read_DMA10_PERIPHERAL_MAP() bfin_read16(DMA10_PERIPHERAL_MAP)
+#define bfin_write_DMA10_PERIPHERAL_MAP(val) bfin_write16(DMA10_PERIPHERAL_MAP, val)
+#define bfin_read_DMA10_CURR_X_COUNT() bfin_read16(DMA10_CURR_X_COUNT)
+#define bfin_write_DMA10_CURR_X_COUNT(val) bfin_write16(DMA10_CURR_X_COUNT, val)
+#define bfin_read_DMA10_CURR_Y_COUNT() bfin_read16(DMA10_CURR_Y_COUNT)
+#define bfin_write_DMA10_CURR_Y_COUNT(val) bfin_write16(DMA10_CURR_Y_COUNT, val)
+#define bfin_read_DMA11_NEXT_DESC_PTR() bfin_readPTR(DMA11_NEXT_DESC_PTR)
+#define bfin_write_DMA11_NEXT_DESC_PTR(val) bfin_writePTR(DMA11_NEXT_DESC_PTR, val)
+#define bfin_read_DMA11_START_ADDR()   bfin_readPTR(DMA11_START_ADDR)
+#define bfin_write_DMA11_START_ADDR(val) bfin_writePTR(DMA11_START_ADDR, val)
+#define bfin_read_DMA11_CONFIG()       bfin_read16(DMA11_CONFIG)
+#define bfin_write_DMA11_CONFIG(val)   bfin_write16(DMA11_CONFIG, val)
+#define bfin_read_DMA11_X_COUNT()      bfin_read16(DMA11_X_COUNT)
+#define bfin_write_DMA11_X_COUNT(val)  bfin_write16(DMA11_X_COUNT, val)
+#define bfin_read_DMA11_X_MODIFY()     bfin_read16(DMA11_X_MODIFY)
+#define bfin_write_DMA11_X_MODIFY(val) bfin_write16(DMA11_X_MODIFY, val)
+#define bfin_read_DMA11_Y_COUNT()      bfin_read16(DMA11_Y_COUNT)
+#define bfin_write_DMA11_Y_COUNT(val)  bfin_write16(DMA11_Y_COUNT, val)
+#define bfin_read_DMA11_Y_MODIFY()     bfin_read16(DMA11_Y_MODIFY)
+#define bfin_write_DMA11_Y_MODIFY(val) bfin_write16(DMA11_Y_MODIFY, val)
+#define bfin_read_DMA11_CURR_DESC_PTR() bfin_readPTR(DMA11_CURR_DESC_PTR)
+#define bfin_write_DMA11_CURR_DESC_PTR(val) bfin_writePTR(DMA11_CURR_DESC_PTR, val)
+#define bfin_read_DMA11_CURR_ADDR()    bfin_readPTR(DMA11_CURR_ADDR)
+#define bfin_write_DMA11_CURR_ADDR(val) bfin_writePTR(DMA11_CURR_ADDR, val)
+#define bfin_read_DMA11_IRQ_STATUS()   bfin_read16(DMA11_IRQ_STATUS)
+#define bfin_write_DMA11_IRQ_STATUS(val) bfin_write16(DMA11_IRQ_STATUS, val)
+#define bfin_read_DMA11_PERIPHERAL_MAP() bfin_read16(DMA11_PERIPHERAL_MAP)
+#define bfin_write_DMA11_PERIPHERAL_MAP(val) bfin_write16(DMA11_PERIPHERAL_MAP, val)
+#define bfin_read_DMA11_CURR_X_COUNT() bfin_read16(DMA11_CURR_X_COUNT)
+#define bfin_write_DMA11_CURR_X_COUNT(val) bfin_write16(DMA11_CURR_X_COUNT, val)
+#define bfin_read_DMA11_CURR_Y_COUNT() bfin_read16(DMA11_CURR_Y_COUNT)
+#define bfin_write_DMA11_CURR_Y_COUNT(val) bfin_write16(DMA11_CURR_Y_COUNT, val)
+#define bfin_read_DMA12_NEXT_DESC_PTR() bfin_readPTR(DMA12_NEXT_DESC_PTR)
+#define bfin_write_DMA12_NEXT_DESC_PTR(val) bfin_writePTR(DMA12_NEXT_DESC_PTR, val)
+#define bfin_read_DMA12_START_ADDR()   bfin_readPTR(DMA12_START_ADDR)
+#define bfin_write_DMA12_START_ADDR(val) bfin_writePTR(DMA12_START_ADDR, val)
+#define bfin_read_DMA12_CONFIG()       bfin_read16(DMA12_CONFIG)
+#define bfin_write_DMA12_CONFIG(val)   bfin_write16(DMA12_CONFIG, val)
+#define bfin_read_DMA12_X_COUNT()      bfin_read16(DMA12_X_COUNT)
+#define bfin_write_DMA12_X_COUNT(val)  bfin_write16(DMA12_X_COUNT, val)
+#define bfin_read_DMA12_X_MODIFY()     bfin_read16(DMA12_X_MODIFY)
+#define bfin_write_DMA12_X_MODIFY(val) bfin_write16(DMA12_X_MODIFY, val)
+#define bfin_read_DMA12_Y_COUNT()      bfin_read16(DMA12_Y_COUNT)
+#define bfin_write_DMA12_Y_COUNT(val)  bfin_write16(DMA12_Y_COUNT, val)
+#define bfin_read_DMA12_Y_MODIFY()     bfin_read16(DMA12_Y_MODIFY)
+#define bfin_write_DMA12_Y_MODIFY(val) bfin_write16(DMA12_Y_MODIFY, val)
+#define bfin_read_DMA12_CURR_DESC_PTR() bfin_readPTR(DMA12_CURR_DESC_PTR)
+#define bfin_write_DMA12_CURR_DESC_PTR(val) bfin_writePTR(DMA12_CURR_DESC_PTR, val)
+#define bfin_read_DMA12_CURR_ADDR()    bfin_readPTR(DMA12_CURR_ADDR)
+#define bfin_write_DMA12_CURR_ADDR(val) bfin_writePTR(DMA12_CURR_ADDR, val)
+#define bfin_read_DMA12_IRQ_STATUS()   bfin_read16(DMA12_IRQ_STATUS)
+#define bfin_write_DMA12_IRQ_STATUS(val) bfin_write16(DMA12_IRQ_STATUS, val)
+#define bfin_read_DMA12_PERIPHERAL_MAP() bfin_read16(DMA12_PERIPHERAL_MAP)
+#define bfin_write_DMA12_PERIPHERAL_MAP(val) bfin_write16(DMA12_PERIPHERAL_MAP, val)
+#define bfin_read_DMA12_CURR_X_COUNT() bfin_read16(DMA12_CURR_X_COUNT)
+#define bfin_write_DMA12_CURR_X_COUNT(val) bfin_write16(DMA12_CURR_X_COUNT, val)
+#define bfin_read_DMA12_CURR_Y_COUNT() bfin_read16(DMA12_CURR_Y_COUNT)
+#define bfin_write_DMA12_CURR_Y_COUNT(val) bfin_write16(DMA12_CURR_Y_COUNT, val)
+#define bfin_read_DMA13_NEXT_DESC_PTR() bfin_readPTR(DMA13_NEXT_DESC_PTR)
+#define bfin_write_DMA13_NEXT_DESC_PTR(val) bfin_writePTR(DMA13_NEXT_DESC_PTR, val)
+#define bfin_read_DMA13_START_ADDR()   bfin_readPTR(DMA13_START_ADDR)
+#define bfin_write_DMA13_START_ADDR(val) bfin_writePTR(DMA13_START_ADDR, val)
+#define bfin_read_DMA13_CONFIG()       bfin_read16(DMA13_CONFIG)
+#define bfin_write_DMA13_CONFIG(val)   bfin_write16(DMA13_CONFIG, val)
+#define bfin_read_DMA13_X_COUNT()      bfin_read16(DMA13_X_COUNT)
+#define bfin_write_DMA13_X_COUNT(val)  bfin_write16(DMA13_X_COUNT, val)
+#define bfin_read_DMA13_X_MODIFY()     bfin_read16(DMA13_X_MODIFY)
+#define bfin_write_DMA13_X_MODIFY(val) bfin_write16(DMA13_X_MODIFY, val)
+#define bfin_read_DMA13_Y_COUNT()      bfin_read16(DMA13_Y_COUNT)
+#define bfin_write_DMA13_Y_COUNT(val)  bfin_write16(DMA13_Y_COUNT, val)
+#define bfin_read_DMA13_Y_MODIFY()     bfin_read16(DMA13_Y_MODIFY)
+#define bfin_write_DMA13_Y_MODIFY(val) bfin_write16(DMA13_Y_MODIFY, val)
+#define bfin_read_DMA13_CURR_DESC_PTR() bfin_readPTR(DMA13_CURR_DESC_PTR)
+#define bfin_write_DMA13_CURR_DESC_PTR(val) bfin_writePTR(DMA13_CURR_DESC_PTR, val)
+#define bfin_read_DMA13_CURR_ADDR()    bfin_readPTR(DMA13_CURR_ADDR)
+#define bfin_write_DMA13_CURR_ADDR(val) bfin_writePTR(DMA13_CURR_ADDR, val)
+#define bfin_read_DMA13_IRQ_STATUS()   bfin_read16(DMA13_IRQ_STATUS)
+#define bfin_write_DMA13_IRQ_STATUS(val) bfin_write16(DMA13_IRQ_STATUS, val)
+#define bfin_read_DMA13_PERIPHERAL_MAP() bfin_read16(DMA13_PERIPHERAL_MAP)
+#define bfin_write_DMA13_PERIPHERAL_MAP(val) bfin_write16(DMA13_PERIPHERAL_MAP, val)
+#define bfin_read_DMA13_CURR_X_COUNT() bfin_read16(DMA13_CURR_X_COUNT)
+#define bfin_write_DMA13_CURR_X_COUNT(val) bfin_write16(DMA13_CURR_X_COUNT, val)
+#define bfin_read_DMA13_CURR_Y_COUNT() bfin_read16(DMA13_CURR_Y_COUNT)
+#define bfin_write_DMA13_CURR_Y_COUNT(val) bfin_write16(DMA13_CURR_Y_COUNT, val)
+#define bfin_read_DMA14_NEXT_DESC_PTR() bfin_readPTR(DMA14_NEXT_DESC_PTR)
+#define bfin_write_DMA14_NEXT_DESC_PTR(val) bfin_writePTR(DMA14_NEXT_DESC_PTR, val)
+#define bfin_read_DMA14_START_ADDR()   bfin_readPTR(DMA14_START_ADDR)
+#define bfin_write_DMA14_START_ADDR(val) bfin_writePTR(DMA14_START_ADDR, val)
+#define bfin_read_DMA14_CONFIG()       bfin_read16(DMA14_CONFIG)
+#define bfin_write_DMA14_CONFIG(val)   bfin_write16(DMA14_CONFIG, val)
+#define bfin_read_DMA14_X_COUNT()      bfin_read16(DMA14_X_COUNT)
+#define bfin_write_DMA14_X_COUNT(val)  bfin_write16(DMA14_X_COUNT, val)
+#define bfin_read_DMA14_X_MODIFY()     bfin_read16(DMA14_X_MODIFY)
+#define bfin_write_DMA14_X_MODIFY(val) bfin_write16(DMA14_X_MODIFY, val)
+#define bfin_read_DMA14_Y_COUNT()      bfin_read16(DMA14_Y_COUNT)
+#define bfin_write_DMA14_Y_COUNT(val)  bfin_write16(DMA14_Y_COUNT, val)
+#define bfin_read_DMA14_Y_MODIFY()     bfin_read16(DMA14_Y_MODIFY)
+#define bfin_write_DMA14_Y_MODIFY(val) bfin_write16(DMA14_Y_MODIFY, val)
+#define bfin_read_DMA14_CURR_DESC_PTR() bfin_readPTR(DMA14_CURR_DESC_PTR)
+#define bfin_write_DMA14_CURR_DESC_PTR(val) bfin_writePTR(DMA14_CURR_DESC_PTR, val)
+#define bfin_read_DMA14_CURR_ADDR()    bfin_readPTR(DMA14_CURR_ADDR)
+#define bfin_write_DMA14_CURR_ADDR(val) bfin_writePTR(DMA14_CURR_ADDR, val)
+#define bfin_read_DMA14_IRQ_STATUS()   bfin_read16(DMA14_IRQ_STATUS)
+#define bfin_write_DMA14_IRQ_STATUS(val) bfin_write16(DMA14_IRQ_STATUS, val)
+#define bfin_read_DMA14_PERIPHERAL_MAP() bfin_read16(DMA14_PERIPHERAL_MAP)
+#define bfin_write_DMA14_PERIPHERAL_MAP(val) bfin_write16(DMA14_PERIPHERAL_MAP, val)
+#define bfin_read_DMA14_CURR_X_COUNT() bfin_read16(DMA14_CURR_X_COUNT)
+#define bfin_write_DMA14_CURR_X_COUNT(val) bfin_write16(DMA14_CURR_X_COUNT, val)
+#define bfin_read_DMA14_CURR_Y_COUNT() bfin_read16(DMA14_CURR_Y_COUNT)
+#define bfin_write_DMA14_CURR_Y_COUNT(val) bfin_write16(DMA14_CURR_Y_COUNT, val)
+#define bfin_read_DMA15_NEXT_DESC_PTR() bfin_readPTR(DMA15_NEXT_DESC_PTR)
+#define bfin_write_DMA15_NEXT_DESC_PTR(val) bfin_writePTR(DMA15_NEXT_DESC_PTR, val)
+#define bfin_read_DMA15_START_ADDR()   bfin_readPTR(DMA15_START_ADDR)
+#define bfin_write_DMA15_START_ADDR(val) bfin_writePTR(DMA15_START_ADDR, val)
+#define bfin_read_DMA15_CONFIG()       bfin_read16(DMA15_CONFIG)
+#define bfin_write_DMA15_CONFIG(val)   bfin_write16(DMA15_CONFIG, val)
+#define bfin_read_DMA15_X_COUNT()      bfin_read16(DMA15_X_COUNT)
+#define bfin_write_DMA15_X_COUNT(val)  bfin_write16(DMA15_X_COUNT, val)
+#define bfin_read_DMA15_X_MODIFY()     bfin_read16(DMA15_X_MODIFY)
+#define bfin_write_DMA15_X_MODIFY(val) bfin_write16(DMA15_X_MODIFY, val)
+#define bfin_read_DMA15_Y_COUNT()      bfin_read16(DMA15_Y_COUNT)
+#define bfin_write_DMA15_Y_COUNT(val)  bfin_write16(DMA15_Y_COUNT, val)
+#define bfin_read_DMA15_Y_MODIFY()     bfin_read16(DMA15_Y_MODIFY)
+#define bfin_write_DMA15_Y_MODIFY(val) bfin_write16(DMA15_Y_MODIFY, val)
+#define bfin_read_DMA15_CURR_DESC_PTR() bfin_readPTR(DMA15_CURR_DESC_PTR)
+#define bfin_write_DMA15_CURR_DESC_PTR(val) bfin_writePTR(DMA15_CURR_DESC_PTR, val)
+#define bfin_read_DMA15_CURR_ADDR()    bfin_readPTR(DMA15_CURR_ADDR)
+#define bfin_write_DMA15_CURR_ADDR(val) bfin_writePTR(DMA15_CURR_ADDR, val)
+#define bfin_read_DMA15_IRQ_STATUS()   bfin_read16(DMA15_IRQ_STATUS)
+#define bfin_write_DMA15_IRQ_STATUS(val) bfin_write16(DMA15_IRQ_STATUS, val)
+#define bfin_read_DMA15_PERIPHERAL_MAP() bfin_read16(DMA15_PERIPHERAL_MAP)
+#define bfin_write_DMA15_PERIPHERAL_MAP(val) bfin_write16(DMA15_PERIPHERAL_MAP, val)
+#define bfin_read_DMA15_CURR_X_COUNT() bfin_read16(DMA15_CURR_X_COUNT)
+#define bfin_write_DMA15_CURR_X_COUNT(val) bfin_write16(DMA15_CURR_X_COUNT, val)
+#define bfin_read_DMA15_CURR_Y_COUNT() bfin_read16(DMA15_CURR_Y_COUNT)
+#define bfin_write_DMA15_CURR_Y_COUNT(val) bfin_write16(DMA15_CURR_Y_COUNT, val)
+#define bfin_read_DMA16_NEXT_DESC_PTR() bfin_readPTR(DMA16_NEXT_DESC_PTR)
+#define bfin_write_DMA16_NEXT_DESC_PTR(val) bfin_writePTR(DMA16_NEXT_DESC_PTR, val)
+#define bfin_read_DMA16_START_ADDR()   bfin_readPTR(DMA16_START_ADDR)
+#define bfin_write_DMA16_START_ADDR(val) bfin_writePTR(DMA16_START_ADDR, val)
+#define bfin_read_DMA16_CONFIG()       bfin_read16(DMA16_CONFIG)
+#define bfin_write_DMA16_CONFIG(val)   bfin_write16(DMA16_CONFIG, val)
+#define bfin_read_DMA16_X_COUNT()      bfin_read16(DMA16_X_COUNT)
+#define bfin_write_DMA16_X_COUNT(val)  bfin_write16(DMA16_X_COUNT, val)
+#define bfin_read_DMA16_X_MODIFY()     bfin_read16(DMA16_X_MODIFY)
+#define bfin_write_DMA16_X_MODIFY(val) bfin_write16(DMA16_X_MODIFY, val)
+#define bfin_read_DMA16_Y_COUNT()      bfin_read16(DMA16_Y_COUNT)
+#define bfin_write_DMA16_Y_COUNT(val)  bfin_write16(DMA16_Y_COUNT, val)
+#define bfin_read_DMA16_Y_MODIFY()     bfin_read16(DMA16_Y_MODIFY)
+#define bfin_write_DMA16_Y_MODIFY(val) bfin_write16(DMA16_Y_MODIFY, val)
+#define bfin_read_DMA16_CURR_DESC_PTR() bfin_readPTR(DMA16_CURR_DESC_PTR)
+#define bfin_write_DMA16_CURR_DESC_PTR(val) bfin_writePTR(DMA16_CURR_DESC_PTR, val)
+#define bfin_read_DMA16_CURR_ADDR()    bfin_readPTR(DMA16_CURR_ADDR)
+#define bfin_write_DMA16_CURR_ADDR(val) bfin_writePTR(DMA16_CURR_ADDR, val)
+#define bfin_read_DMA16_IRQ_STATUS()   bfin_read16(DMA16_IRQ_STATUS)
+#define bfin_write_DMA16_IRQ_STATUS(val) bfin_write16(DMA16_IRQ_STATUS, val)
+#define bfin_read_DMA16_PERIPHERAL_MAP() bfin_read16(DMA16_PERIPHERAL_MAP)
+#define bfin_write_DMA16_PERIPHERAL_MAP(val) bfin_write16(DMA16_PERIPHERAL_MAP, val)
+#define bfin_read_DMA16_CURR_X_COUNT() bfin_read16(DMA16_CURR_X_COUNT)
+#define bfin_write_DMA16_CURR_X_COUNT(val) bfin_write16(DMA16_CURR_X_COUNT, val)
+#define bfin_read_DMA16_CURR_Y_COUNT() bfin_read16(DMA16_CURR_Y_COUNT)
+#define bfin_write_DMA16_CURR_Y_COUNT(val) bfin_write16(DMA16_CURR_Y_COUNT, val)
+#define bfin_read_DMA17_NEXT_DESC_PTR() bfin_readPTR(DMA17_NEXT_DESC_PTR)
+#define bfin_write_DMA17_NEXT_DESC_PTR(val) bfin_writePTR(DMA17_NEXT_DESC_PTR, val)
+#define bfin_read_DMA17_START_ADDR()   bfin_readPTR(DMA17_START_ADDR)
+#define bfin_write_DMA17_START_ADDR(val) bfin_writePTR(DMA17_START_ADDR, val)
+#define bfin_read_DMA17_CONFIG()       bfin_read16(DMA17_CONFIG)
+#define bfin_write_DMA17_CONFIG(val)   bfin_write16(DMA17_CONFIG, val)
+#define bfin_read_DMA17_X_COUNT()      bfin_read16(DMA17_X_COUNT)
+#define bfin_write_DMA17_X_COUNT(val)  bfin_write16(DMA17_X_COUNT, val)
+#define bfin_read_DMA17_X_MODIFY()     bfin_read16(DMA17_X_MODIFY)
+#define bfin_write_DMA17_X_MODIFY(val) bfin_write16(DMA17_X_MODIFY, val)
+#define bfin_read_DMA17_Y_COUNT()      bfin_read16(DMA17_Y_COUNT)
+#define bfin_write_DMA17_Y_COUNT(val)  bfin_write16(DMA17_Y_COUNT, val)
+#define bfin_read_DMA17_Y_MODIFY()     bfin_read16(DMA17_Y_MODIFY)
+#define bfin_write_DMA17_Y_MODIFY(val) bfin_write16(DMA17_Y_MODIFY, val)
+#define bfin_read_DMA17_CURR_DESC_PTR() bfin_readPTR(DMA17_CURR_DESC_PTR)
+#define bfin_write_DMA17_CURR_DESC_PTR(val) bfin_writePTR(DMA17_CURR_DESC_PTR, val)
+#define bfin_read_DMA17_CURR_ADDR()    bfin_readPTR(DMA17_CURR_ADDR)
+#define bfin_write_DMA17_CURR_ADDR(val) bfin_writePTR(DMA17_CURR_ADDR, val)
+#define bfin_read_DMA17_IRQ_STATUS()   bfin_read16(DMA17_IRQ_STATUS)
+#define bfin_write_DMA17_IRQ_STATUS(val) bfin_write16(DMA17_IRQ_STATUS, val)
+#define bfin_read_DMA17_PERIPHERAL_MAP() bfin_read16(DMA17_PERIPHERAL_MAP)
+#define bfin_write_DMA17_PERIPHERAL_MAP(val) bfin_write16(DMA17_PERIPHERAL_MAP, val)
+#define bfin_read_DMA17_CURR_X_COUNT() bfin_read16(DMA17_CURR_X_COUNT)
+#define bfin_write_DMA17_CURR_X_COUNT(val) bfin_write16(DMA17_CURR_X_COUNT, val)
+#define bfin_read_DMA17_CURR_Y_COUNT() bfin_read16(DMA17_CURR_Y_COUNT)
+#define bfin_write_DMA17_CURR_Y_COUNT(val) bfin_write16(DMA17_CURR_Y_COUNT, val)
+#define bfin_read_DMA18_NEXT_DESC_PTR() bfin_readPTR(DMA18_NEXT_DESC_PTR)
+#define bfin_write_DMA18_NEXT_DESC_PTR(val) bfin_writePTR(DMA18_NEXT_DESC_PTR, val)
+#define bfin_read_DMA18_START_ADDR()   bfin_readPTR(DMA18_START_ADDR)
+#define bfin_write_DMA18_START_ADDR(val) bfin_writePTR(DMA18_START_ADDR, val)
+#define bfin_read_DMA18_CONFIG()       bfin_read16(DMA18_CONFIG)
+#define bfin_write_DMA18_CONFIG(val)   bfin_write16(DMA18_CONFIG, val)
+#define bfin_read_DMA18_X_COUNT()      bfin_read16(DMA18_X_COUNT)
+#define bfin_write_DMA18_X_COUNT(val)  bfin_write16(DMA18_X_COUNT, val)
+#define bfin_read_DMA18_X_MODIFY()     bfin_read16(DMA18_X_MODIFY)
+#define bfin_write_DMA18_X_MODIFY(val) bfin_write16(DMA18_X_MODIFY, val)
+#define bfin_read_DMA18_Y_COUNT()      bfin_read16(DMA18_Y_COUNT)
+#define bfin_write_DMA18_Y_COUNT(val)  bfin_write16(DMA18_Y_COUNT, val)
+#define bfin_read_DMA18_Y_MODIFY()     bfin_read16(DMA18_Y_MODIFY)
+#define bfin_write_DMA18_Y_MODIFY(val) bfin_write16(DMA18_Y_MODIFY, val)
+#define bfin_read_DMA18_CURR_DESC_PTR() bfin_readPTR(DMA18_CURR_DESC_PTR)
+#define bfin_write_DMA18_CURR_DESC_PTR(val) bfin_writePTR(DMA18_CURR_DESC_PTR, val)
+#define bfin_read_DMA18_CURR_ADDR()    bfin_readPTR(DMA18_CURR_ADDR)
+#define bfin_write_DMA18_CURR_ADDR(val) bfin_writePTR(DMA18_CURR_ADDR, val)
+#define bfin_read_DMA18_IRQ_STATUS()   bfin_read16(DMA18_IRQ_STATUS)
+#define bfin_write_DMA18_IRQ_STATUS(val) bfin_write16(DMA18_IRQ_STATUS, val)
+#define bfin_read_DMA18_PERIPHERAL_MAP() bfin_read16(DMA18_PERIPHERAL_MAP)
+#define bfin_write_DMA18_PERIPHERAL_MAP(val) bfin_write16(DMA18_PERIPHERAL_MAP, val)
+#define bfin_read_DMA18_CURR_X_COUNT() bfin_read16(DMA18_CURR_X_COUNT)
+#define bfin_write_DMA18_CURR_X_COUNT(val) bfin_write16(DMA18_CURR_X_COUNT, val)
+#define bfin_read_DMA18_CURR_Y_COUNT() bfin_read16(DMA18_CURR_Y_COUNT)
+#define bfin_write_DMA18_CURR_Y_COUNT(val) bfin_write16(DMA18_CURR_Y_COUNT, val)
+#define bfin_read_DMA19_NEXT_DESC_PTR() bfin_readPTR(DMA19_NEXT_DESC_PTR)
+#define bfin_write_DMA19_NEXT_DESC_PTR(val) bfin_writePTR(DMA19_NEXT_DESC_PTR, val)
+#define bfin_read_DMA19_START_ADDR()   bfin_readPTR(DMA19_START_ADDR)
+#define bfin_write_DMA19_START_ADDR(val) bfin_writePTR(DMA19_START_ADDR, val)
+#define bfin_read_DMA19_CONFIG()       bfin_read16(DMA19_CONFIG)
+#define bfin_write_DMA19_CONFIG(val)   bfin_write16(DMA19_CONFIG, val)
+#define bfin_read_DMA19_X_COUNT()      bfin_read16(DMA19_X_COUNT)
+#define bfin_write_DMA19_X_COUNT(val)  bfin_write16(DMA19_X_COUNT, val)
+#define bfin_read_DMA19_X_MODIFY()     bfin_read16(DMA19_X_MODIFY)
+#define bfin_write_DMA19_X_MODIFY(val) bfin_write16(DMA19_X_MODIFY, val)
+#define bfin_read_DMA19_Y_COUNT()      bfin_read16(DMA19_Y_COUNT)
+#define bfin_write_DMA19_Y_COUNT(val)  bfin_write16(DMA19_Y_COUNT, val)
+#define bfin_read_DMA19_Y_MODIFY()     bfin_read16(DMA19_Y_MODIFY)
+#define bfin_write_DMA19_Y_MODIFY(val) bfin_write16(DMA19_Y_MODIFY, val)
+#define bfin_read_DMA19_CURR_DESC_PTR() bfin_readPTR(DMA19_CURR_DESC_PTR)
+#define bfin_write_DMA19_CURR_DESC_PTR(val) bfin_writePTR(DMA19_CURR_DESC_PTR, val)
+#define bfin_read_DMA19_CURR_ADDR()    bfin_readPTR(DMA19_CURR_ADDR)
+#define bfin_write_DMA19_CURR_ADDR(val) bfin_writePTR(DMA19_CURR_ADDR, val)
+#define bfin_read_DMA19_IRQ_STATUS()   bfin_read16(DMA19_IRQ_STATUS)
+#define bfin_write_DMA19_IRQ_STATUS(val) bfin_write16(DMA19_IRQ_STATUS, val)
+#define bfin_read_DMA19_PERIPHERAL_MAP() bfin_read16(DMA19_PERIPHERAL_MAP)
+#define bfin_write_DMA19_PERIPHERAL_MAP(val) bfin_write16(DMA19_PERIPHERAL_MAP, val)
+#define bfin_read_DMA19_CURR_X_COUNT() bfin_read16(DMA19_CURR_X_COUNT)
+#define bfin_write_DMA19_CURR_X_COUNT(val) bfin_write16(DMA19_CURR_X_COUNT, val)
+#define bfin_read_DMA19_CURR_Y_COUNT() bfin_read16(DMA19_CURR_Y_COUNT)
+#define bfin_write_DMA19_CURR_Y_COUNT(val) bfin_write16(DMA19_CURR_Y_COUNT, val)
+#define bfin_read_MDMA0_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA0_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA0_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA0_D0_START_ADDR() bfin_readPTR(MDMA0_D0_START_ADDR)
+#define bfin_write_MDMA0_D0_START_ADDR(val) bfin_writePTR(MDMA0_D0_START_ADDR, val)
+#define bfin_read_MDMA0_D0_CONFIG()    bfin_read16(MDMA0_D0_CONFIG)
+#define bfin_write_MDMA0_D0_CONFIG(val) bfin_write16(MDMA0_D0_CONFIG, val)
+#define bfin_read_MDMA0_D0_X_COUNT()   bfin_read16(MDMA0_D0_X_COUNT)
+#define bfin_write_MDMA0_D0_X_COUNT(val) bfin_write16(MDMA0_D0_X_COUNT, val)
+#define bfin_read_MDMA0_D0_X_MODIFY()  bfin_read16(MDMA0_D0_X_MODIFY)
+#define bfin_write_MDMA0_D0_X_MODIFY(val) bfin_write16(MDMA0_D0_X_MODIFY, val)
+#define bfin_read_MDMA0_D0_Y_COUNT()   bfin_read16(MDMA0_D0_Y_COUNT)
+#define bfin_write_MDMA0_D0_Y_COUNT(val) bfin_write16(MDMA0_D0_Y_COUNT, val)
+#define bfin_read_MDMA0_D0_Y_MODIFY()  bfin_read16(MDMA0_D0_Y_MODIFY)
+#define bfin_write_MDMA0_D0_Y_MODIFY(val) bfin_write16(MDMA0_D0_Y_MODIFY, val)
+#define bfin_read_MDMA0_D0_CURR_DESC_PTR() bfin_readPTR(MDMA0_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA0_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA0_D0_CURR_ADDR() bfin_readPTR(MDMA0_D0_CURR_ADDR)
+#define bfin_write_MDMA0_D0_CURR_ADDR(val) bfin_writePTR(MDMA0_D0_CURR_ADDR, val)
+#define bfin_read_MDMA0_D0_IRQ_STATUS() bfin_read16(MDMA0_D0_IRQ_STATUS)
+#define bfin_write_MDMA0_D0_IRQ_STATUS(val) bfin_write16(MDMA0_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA0_D0_PERIPHERAL_MAP() bfin_read16(MDMA0_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA0_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA0_D0_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA0_D0_CURR_X_COUNT() bfin_read16(MDMA0_D0_CURR_X_COUNT)
+#define bfin_write_MDMA0_D0_CURR_X_COUNT(val) bfin_write16(MDMA0_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA0_D0_CURR_Y_COUNT() bfin_read16(MDMA0_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA0_D0_CURR_Y_COUNT(val) bfin_write16(MDMA0_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA0_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA0_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA0_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA0_S0_START_ADDR() bfin_readPTR(MDMA0_S0_START_ADDR)
+#define bfin_write_MDMA0_S0_START_ADDR(val) bfin_writePTR(MDMA0_S0_START_ADDR, val)
+#define bfin_read_MDMA0_S0_CONFIG()    bfin_read16(MDMA0_S0_CONFIG)
+#define bfin_write_MDMA0_S0_CONFIG(val) bfin_write16(MDMA0_S0_CONFIG, val)
+#define bfin_read_MDMA0_S0_X_COUNT()   bfin_read16(MDMA0_S0_X_COUNT)
+#define bfin_write_MDMA0_S0_X_COUNT(val) bfin_write16(MDMA0_S0_X_COUNT, val)
+#define bfin_read_MDMA0_S0_X_MODIFY()  bfin_read16(MDMA0_S0_X_MODIFY)
+#define bfin_write_MDMA0_S0_X_MODIFY(val) bfin_write16(MDMA0_S0_X_MODIFY, val)
+#define bfin_read_MDMA0_S0_Y_COUNT()   bfin_read16(MDMA0_S0_Y_COUNT)
+#define bfin_write_MDMA0_S0_Y_COUNT(val) bfin_write16(MDMA0_S0_Y_COUNT, val)
+#define bfin_read_MDMA0_S0_Y_MODIFY()  bfin_read16(MDMA0_S0_Y_MODIFY)
+#define bfin_write_MDMA0_S0_Y_MODIFY(val) bfin_write16(MDMA0_S0_Y_MODIFY, val)
+#define bfin_read_MDMA0_S0_CURR_DESC_PTR() bfin_readPTR(MDMA0_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA0_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA0_S0_CURR_ADDR() bfin_readPTR(MDMA0_S0_CURR_ADDR)
+#define bfin_write_MDMA0_S0_CURR_ADDR(val) bfin_writePTR(MDMA0_S0_CURR_ADDR, val)
+#define bfin_read_MDMA0_S0_IRQ_STATUS() bfin_read16(MDMA0_S0_IRQ_STATUS)
+#define bfin_write_MDMA0_S0_IRQ_STATUS(val) bfin_write16(MDMA0_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA0_S0_PERIPHERAL_MAP() bfin_read16(MDMA0_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA0_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA0_S0_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA0_S0_CURR_X_COUNT() bfin_read16(MDMA0_S0_CURR_X_COUNT)
+#define bfin_write_MDMA0_S0_CURR_X_COUNT(val) bfin_write16(MDMA0_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA0_S0_CURR_Y_COUNT() bfin_read16(MDMA0_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA0_S0_CURR_Y_COUNT(val) bfin_write16(MDMA0_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA0_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA0_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA0_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA0_D1_START_ADDR() bfin_readPTR(MDMA0_D1_START_ADDR)
+#define bfin_write_MDMA0_D1_START_ADDR(val) bfin_writePTR(MDMA0_D1_START_ADDR, val)
+#define bfin_read_MDMA0_D1_CONFIG()    bfin_read16(MDMA0_D1_CONFIG)
+#define bfin_write_MDMA0_D1_CONFIG(val) bfin_write16(MDMA0_D1_CONFIG, val)
+#define bfin_read_MDMA0_D1_X_COUNT()   bfin_read16(MDMA0_D1_X_COUNT)
+#define bfin_write_MDMA0_D1_X_COUNT(val) bfin_write16(MDMA0_D1_X_COUNT, val)
+#define bfin_read_MDMA0_D1_X_MODIFY()  bfin_read16(MDMA0_D1_X_MODIFY)
+#define bfin_write_MDMA0_D1_X_MODIFY(val) bfin_write16(MDMA0_D1_X_MODIFY, val)
+#define bfin_read_MDMA0_D1_Y_COUNT()   bfin_read16(MDMA0_D1_Y_COUNT)
+#define bfin_write_MDMA0_D1_Y_COUNT(val) bfin_write16(MDMA0_D1_Y_COUNT, val)
+#define bfin_read_MDMA0_D1_Y_MODIFY()  bfin_read16(MDMA0_D1_Y_MODIFY)
+#define bfin_write_MDMA0_D1_Y_MODIFY(val) bfin_write16(MDMA0_D1_Y_MODIFY, val)
+#define bfin_read_MDMA0_D1_CURR_DESC_PTR() bfin_readPTR(MDMA0_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA0_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA0_D1_CURR_ADDR() bfin_readPTR(MDMA0_D1_CURR_ADDR)
+#define bfin_write_MDMA0_D1_CURR_ADDR(val) bfin_writePTR(MDMA0_D1_CURR_ADDR, val)
+#define bfin_read_MDMA0_D1_IRQ_STATUS() bfin_read16(MDMA0_D1_IRQ_STATUS)
+#define bfin_write_MDMA0_D1_IRQ_STATUS(val) bfin_write16(MDMA0_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA0_D1_PERIPHERAL_MAP() bfin_read16(MDMA0_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA0_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA0_D1_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA0_D1_CURR_X_COUNT() bfin_read16(MDMA0_D1_CURR_X_COUNT)
+#define bfin_write_MDMA0_D1_CURR_X_COUNT(val) bfin_write16(MDMA0_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA0_D1_CURR_Y_COUNT() bfin_read16(MDMA0_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA0_D1_CURR_Y_COUNT(val) bfin_write16(MDMA0_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA0_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA0_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA0_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA0_S1_START_ADDR() bfin_readPTR(MDMA0_S1_START_ADDR)
+#define bfin_write_MDMA0_S1_START_ADDR(val) bfin_writePTR(MDMA0_S1_START_ADDR, val)
+#define bfin_read_MDMA0_S1_CONFIG()    bfin_read16(MDMA0_S1_CONFIG)
+#define bfin_write_MDMA0_S1_CONFIG(val) bfin_write16(MDMA0_S1_CONFIG, val)
+#define bfin_read_MDMA0_S1_X_COUNT()   bfin_read16(MDMA0_S1_X_COUNT)
+#define bfin_write_MDMA0_S1_X_COUNT(val) bfin_write16(MDMA0_S1_X_COUNT, val)
+#define bfin_read_MDMA0_S1_X_MODIFY()  bfin_read16(MDMA0_S1_X_MODIFY)
+#define bfin_write_MDMA0_S1_X_MODIFY(val) bfin_write16(MDMA0_S1_X_MODIFY, val)
+#define bfin_read_MDMA0_S1_Y_COUNT()   bfin_read16(MDMA0_S1_Y_COUNT)
+#define bfin_write_MDMA0_S1_Y_COUNT(val) bfin_write16(MDMA0_S1_Y_COUNT, val)
+#define bfin_read_MDMA0_S1_Y_MODIFY()  bfin_read16(MDMA0_S1_Y_MODIFY)
+#define bfin_write_MDMA0_S1_Y_MODIFY(val) bfin_write16(MDMA0_S1_Y_MODIFY, val)
+#define bfin_read_MDMA0_S1_CURR_DESC_PTR() bfin_readPTR(MDMA0_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA0_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA0_S1_CURR_ADDR() bfin_readPTR(MDMA0_S1_CURR_ADDR)
+#define bfin_write_MDMA0_S1_CURR_ADDR(val) bfin_writePTR(MDMA0_S1_CURR_ADDR, val)
+#define bfin_read_MDMA0_S1_IRQ_STATUS() bfin_read16(MDMA0_S1_IRQ_STATUS)
+#define bfin_write_MDMA0_S1_IRQ_STATUS(val) bfin_write16(MDMA0_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA0_S1_PERIPHERAL_MAP() bfin_read16(MDMA0_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA0_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA0_S1_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA0_S1_CURR_X_COUNT() bfin_read16(MDMA0_S1_CURR_X_COUNT)
+#define bfin_write_MDMA0_S1_CURR_X_COUNT(val) bfin_write16(MDMA0_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA0_S1_CURR_Y_COUNT() bfin_read16(MDMA0_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA0_S1_CURR_Y_COUNT(val) bfin_write16(MDMA0_S1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA1_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA1_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA1_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA1_D0_START_ADDR() bfin_readPTR(MDMA1_D0_START_ADDR)
+#define bfin_write_MDMA1_D0_START_ADDR(val) bfin_writePTR(MDMA1_D0_START_ADDR, val)
+#define bfin_read_MDMA1_D0_CONFIG()    bfin_read16(MDMA1_D0_CONFIG)
+#define bfin_write_MDMA1_D0_CONFIG(val) bfin_write16(MDMA1_D0_CONFIG, val)
+#define bfin_read_MDMA1_D0_X_COUNT()   bfin_read16(MDMA1_D0_X_COUNT)
+#define bfin_write_MDMA1_D0_X_COUNT(val) bfin_write16(MDMA1_D0_X_COUNT, val)
+#define bfin_read_MDMA1_D0_X_MODIFY()  bfin_read16(MDMA1_D0_X_MODIFY)
+#define bfin_write_MDMA1_D0_X_MODIFY(val) bfin_write16(MDMA1_D0_X_MODIFY, val)
+#define bfin_read_MDMA1_D0_Y_COUNT()   bfin_read16(MDMA1_D0_Y_COUNT)
+#define bfin_write_MDMA1_D0_Y_COUNT(val) bfin_write16(MDMA1_D0_Y_COUNT, val)
+#define bfin_read_MDMA1_D0_Y_MODIFY()  bfin_read16(MDMA1_D0_Y_MODIFY)
+#define bfin_write_MDMA1_D0_Y_MODIFY(val) bfin_write16(MDMA1_D0_Y_MODIFY, val)
+#define bfin_read_MDMA1_D0_CURR_DESC_PTR() bfin_readPTR(MDMA1_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA1_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA1_D0_CURR_ADDR() bfin_readPTR(MDMA1_D0_CURR_ADDR)
+#define bfin_write_MDMA1_D0_CURR_ADDR(val) bfin_writePTR(MDMA1_D0_CURR_ADDR, val)
+#define bfin_read_MDMA1_D0_IRQ_STATUS() bfin_read16(MDMA1_D0_IRQ_STATUS)
+#define bfin_write_MDMA1_D0_IRQ_STATUS(val) bfin_write16(MDMA1_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA1_D0_PERIPHERAL_MAP() bfin_read16(MDMA1_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA1_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D0_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA1_D0_CURR_X_COUNT() bfin_read16(MDMA1_D0_CURR_X_COUNT)
+#define bfin_write_MDMA1_D0_CURR_X_COUNT(val) bfin_write16(MDMA1_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA1_D0_CURR_Y_COUNT() bfin_read16(MDMA1_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA1_D0_CURR_Y_COUNT(val) bfin_write16(MDMA1_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA1_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA1_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA1_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA1_S0_START_ADDR() bfin_readPTR(MDMA1_S0_START_ADDR)
+#define bfin_write_MDMA1_S0_START_ADDR(val) bfin_writePTR(MDMA1_S0_START_ADDR, val)
+#define bfin_read_MDMA1_S0_CONFIG()    bfin_read16(MDMA1_S0_CONFIG)
+#define bfin_write_MDMA1_S0_CONFIG(val) bfin_write16(MDMA1_S0_CONFIG, val)
+#define bfin_read_MDMA1_S0_X_COUNT()   bfin_read16(MDMA1_S0_X_COUNT)
+#define bfin_write_MDMA1_S0_X_COUNT(val) bfin_write16(MDMA1_S0_X_COUNT, val)
+#define bfin_read_MDMA1_S0_X_MODIFY()  bfin_read16(MDMA1_S0_X_MODIFY)
+#define bfin_write_MDMA1_S0_X_MODIFY(val) bfin_write16(MDMA1_S0_X_MODIFY, val)
+#define bfin_read_MDMA1_S0_Y_COUNT()   bfin_read16(MDMA1_S0_Y_COUNT)
+#define bfin_write_MDMA1_S0_Y_COUNT(val) bfin_write16(MDMA1_S0_Y_COUNT, val)
+#define bfin_read_MDMA1_S0_Y_MODIFY()  bfin_read16(MDMA1_S0_Y_MODIFY)
+#define bfin_write_MDMA1_S0_Y_MODIFY(val) bfin_write16(MDMA1_S0_Y_MODIFY, val)
+#define bfin_read_MDMA1_S0_CURR_DESC_PTR() bfin_readPTR(MDMA1_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA1_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA1_S0_CURR_ADDR() bfin_readPTR(MDMA1_S0_CURR_ADDR)
+#define bfin_write_MDMA1_S0_CURR_ADDR(val) bfin_writePTR(MDMA1_S0_CURR_ADDR, val)
+#define bfin_read_MDMA1_S0_IRQ_STATUS() bfin_read16(MDMA1_S0_IRQ_STATUS)
+#define bfin_write_MDMA1_S0_IRQ_STATUS(val) bfin_write16(MDMA1_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA1_S0_PERIPHERAL_MAP() bfin_read16(MDMA1_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA1_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S0_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA1_S0_CURR_X_COUNT() bfin_read16(MDMA1_S0_CURR_X_COUNT)
+#define bfin_write_MDMA1_S0_CURR_X_COUNT(val) bfin_write16(MDMA1_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA1_S0_CURR_Y_COUNT() bfin_read16(MDMA1_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA1_S0_CURR_Y_COUNT(val) bfin_write16(MDMA1_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA1_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA1_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA1_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA1_D1_START_ADDR() bfin_readPTR(MDMA1_D1_START_ADDR)
+#define bfin_write_MDMA1_D1_START_ADDR(val) bfin_writePTR(MDMA1_D1_START_ADDR, val)
+#define bfin_read_MDMA1_D1_CONFIG()    bfin_read16(MDMA1_D1_CONFIG)
+#define bfin_write_MDMA1_D1_CONFIG(val) bfin_write16(MDMA1_D1_CONFIG, val)
+#define bfin_read_MDMA1_D1_X_COUNT()   bfin_read16(MDMA1_D1_X_COUNT)
+#define bfin_write_MDMA1_D1_X_COUNT(val) bfin_write16(MDMA1_D1_X_COUNT, val)
+#define bfin_read_MDMA1_D1_X_MODIFY()  bfin_read16(MDMA1_D1_X_MODIFY)
+#define bfin_write_MDMA1_D1_X_MODIFY(val) bfin_write16(MDMA1_D1_X_MODIFY, val)
+#define bfin_read_MDMA1_D1_Y_COUNT()   bfin_read16(MDMA1_D1_Y_COUNT)
+#define bfin_write_MDMA1_D1_Y_COUNT(val) bfin_write16(MDMA1_D1_Y_COUNT, val)
+#define bfin_read_MDMA1_D1_Y_MODIFY()  bfin_read16(MDMA1_D1_Y_MODIFY)
+#define bfin_write_MDMA1_D1_Y_MODIFY(val) bfin_write16(MDMA1_D1_Y_MODIFY, val)
+#define bfin_read_MDMA1_D1_CURR_DESC_PTR() bfin_readPTR(MDMA1_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA1_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA1_D1_CURR_ADDR() bfin_readPTR(MDMA1_D1_CURR_ADDR)
+#define bfin_write_MDMA1_D1_CURR_ADDR(val) bfin_writePTR(MDMA1_D1_CURR_ADDR, val)
+#define bfin_read_MDMA1_D1_IRQ_STATUS() bfin_read16(MDMA1_D1_IRQ_STATUS)
+#define bfin_write_MDMA1_D1_IRQ_STATUS(val) bfin_write16(MDMA1_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA1_D1_PERIPHERAL_MAP() bfin_read16(MDMA1_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA1_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D1_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA1_D1_CURR_X_COUNT() bfin_read16(MDMA1_D1_CURR_X_COUNT)
+#define bfin_write_MDMA1_D1_CURR_X_COUNT(val) bfin_write16(MDMA1_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA1_D1_CURR_Y_COUNT() bfin_read16(MDMA1_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA1_D1_CURR_Y_COUNT(val) bfin_write16(MDMA1_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA1_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA1_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA1_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA1_S1_START_ADDR() bfin_readPTR(MDMA1_S1_START_ADDR)
+#define bfin_write_MDMA1_S1_START_ADDR(val) bfin_writePTR(MDMA1_S1_START_ADDR, val)
+#define bfin_read_MDMA1_S1_CONFIG()    bfin_read16(MDMA1_S1_CONFIG)
+#define bfin_write_MDMA1_S1_CONFIG(val) bfin_write16(MDMA1_S1_CONFIG, val)
+#define bfin_read_MDMA1_S1_X_COUNT()   bfin_read16(MDMA1_S1_X_COUNT)
+#define bfin_write_MDMA1_S1_X_COUNT(val) bfin_write16(MDMA1_S1_X_COUNT, val)
+#define bfin_read_MDMA1_S1_X_MODIFY()  bfin_read16(MDMA1_S1_X_MODIFY)
+#define bfin_write_MDMA1_S1_X_MODIFY(val) bfin_write16(MDMA1_S1_X_MODIFY, val)
+#define bfin_read_MDMA1_S1_Y_COUNT()   bfin_read16(MDMA1_S1_Y_COUNT)
+#define bfin_write_MDMA1_S1_Y_COUNT(val) bfin_write16(MDMA1_S1_Y_COUNT, val)
+#define bfin_read_MDMA1_S1_Y_MODIFY()  bfin_read16(MDMA1_S1_Y_MODIFY)
+#define bfin_write_MDMA1_S1_Y_MODIFY(val) bfin_write16(MDMA1_S1_Y_MODIFY, val)
+#define bfin_read_MDMA1_S1_CURR_DESC_PTR() bfin_readPTR(MDMA1_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA1_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA1_S1_CURR_ADDR() bfin_readPTR(MDMA1_S1_CURR_ADDR)
+#define bfin_write_MDMA1_S1_CURR_ADDR(val) bfin_writePTR(MDMA1_S1_CURR_ADDR, val)
+#define bfin_read_MDMA1_S1_IRQ_STATUS() bfin_read16(MDMA1_S1_IRQ_STATUS)
+#define bfin_write_MDMA1_S1_IRQ_STATUS(val) bfin_write16(MDMA1_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA1_S1_PERIPHERAL_MAP() bfin_read16(MDMA1_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA1_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S1_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA1_S1_CURR_X_COUNT() bfin_read16(MDMA1_S1_CURR_X_COUNT)
+#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA1_S1_CURR_Y_COUNT() bfin_read16(MDMA1_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT, val)
+#define bfin_read_PPI_CONTROL()        bfin_read16(PPI_CONTROL)
+#define bfin_write_PPI_CONTROL(val)    bfin_write16(PPI_CONTROL, val)
+#define bfin_read_PPI_STATUS()         bfin_read16(PPI_STATUS)
+#define bfin_write_PPI_STATUS(val)     bfin_write16(PPI_STATUS, val)
+#define bfin_read_PPI_DELAY()          bfin_read16(PPI_DELAY)
+#define bfin_write_PPI_DELAY(val)      bfin_write16(PPI_DELAY, val)
+#define bfin_read_PPI_COUNT()          bfin_read16(PPI_COUNT)
+#define bfin_write_PPI_COUNT(val)      bfin_write16(PPI_COUNT, val)
+#define bfin_read_PPI_FRAME()          bfin_read16(PPI_FRAME)
+#define bfin_write_PPI_FRAME(val)      bfin_write16(PPI_FRAME, val)
+#define bfin_read_TWI0_CLKDIV()        bfin_read16(TWI0_CLKDIV)
+#define bfin_write_TWI0_CLKDIV(val)    bfin_write16(TWI0_CLKDIV, val)
+#define bfin_read_TWI0_CONTROL()       bfin_read16(TWI0_CONTROL)
+#define bfin_write_TWI0_CONTROL(val)   bfin_write16(TWI0_CONTROL, val)
+#define bfin_read_TWI0_SLAVE_CTRL()    bfin_read16(TWI0_SLAVE_CTRL)
+#define bfin_write_TWI0_SLAVE_CTRL(val) bfin_write16(TWI0_SLAVE_CTRL, val)
+#define bfin_read_TWI0_SLAVE_STAT()    bfin_read16(TWI0_SLAVE_STAT)
+#define bfin_write_TWI0_SLAVE_STAT(val) bfin_write16(TWI0_SLAVE_STAT, val)
+#define bfin_read_TWI0_SLAVE_ADDR()    bfin_read16(TWI0_SLAVE_ADDR)
+#define bfin_write_TWI0_SLAVE_ADDR(val) bfin_write16(TWI0_SLAVE_ADDR, val)
+#define bfin_read_TWI0_MASTER_CTL()    bfin_read16(TWI0_MASTER_CTL)
+#define bfin_write_TWI0_MASTER_CTL(val) bfin_write16(TWI0_MASTER_CTL, val)
+#define bfin_read_TWI0_MASTER_STAT()   bfin_read16(TWI0_MASTER_STAT)
+#define bfin_write_TWI0_MASTER_STAT(val) bfin_write16(TWI0_MASTER_STAT, val)
+#define bfin_read_TWI0_MASTER_ADDR()   bfin_read16(TWI0_MASTER_ADDR)
+#define bfin_write_TWI0_MASTER_ADDR(val) bfin_write16(TWI0_MASTER_ADDR, val)
+#define bfin_read_TWI0_INT_STAT()      bfin_read16(TWI0_INT_STAT)
+#define bfin_write_TWI0_INT_STAT(val)  bfin_write16(TWI0_INT_STAT, val)
+#define bfin_read_TWI0_INT_MASK()      bfin_read16(TWI0_INT_MASK)
+#define bfin_write_TWI0_INT_MASK(val)  bfin_write16(TWI0_INT_MASK, val)
+#define bfin_read_TWI0_FIFO_CTL()      bfin_read16(TWI0_FIFO_CTL)
+#define bfin_write_TWI0_FIFO_CTL(val)  bfin_write16(TWI0_FIFO_CTL, val)
+#define bfin_read_TWI0_FIFO_STAT()     bfin_read16(TWI0_FIFO_STAT)
+#define bfin_write_TWI0_FIFO_STAT(val) bfin_write16(TWI0_FIFO_STAT, val)
+#define bfin_read_TWI0_XMT_DATA8()     bfin_read16(TWI0_XMT_DATA8)
+#define bfin_write_TWI0_XMT_DATA8(val) bfin_write16(TWI0_XMT_DATA8, val)
+#define bfin_read_TWI0_XMT_DATA16()    bfin_read16(TWI0_XMT_DATA16)
+#define bfin_write_TWI0_XMT_DATA16(val) bfin_write16(TWI0_XMT_DATA16, val)
+#define bfin_read_TWI0_RCV_DATA8()     bfin_read16(TWI0_RCV_DATA8)
+#define bfin_write_TWI0_RCV_DATA8(val) bfin_write16(TWI0_RCV_DATA8, val)
+#define bfin_read_TWI0_RCV_DATA16()    bfin_read16(TWI0_RCV_DATA16)
+#define bfin_write_TWI0_RCV_DATA16(val) bfin_write16(TWI0_RCV_DATA16, val)
+#define bfin_read_TWI1_CLKDIV()        bfin_read16(TWI1_CLKDIV)
+#define bfin_write_TWI1_CLKDIV(val)    bfin_write16(TWI1_CLKDIV, val)
+#define bfin_read_TWI1_CONTROL()       bfin_read16(TWI1_CONTROL)
+#define bfin_write_TWI1_CONTROL(val)   bfin_write16(TWI1_CONTROL, val)
+#define bfin_read_TWI1_SLAVE_CTRL()    bfin_read16(TWI1_SLAVE_CTRL)
+#define bfin_write_TWI1_SLAVE_CTRL(val) bfin_write16(TWI1_SLAVE_CTRL, val)
+#define bfin_read_TWI1_SLAVE_STAT()    bfin_read16(TWI1_SLAVE_STAT)
+#define bfin_write_TWI1_SLAVE_STAT(val) bfin_write16(TWI1_SLAVE_STAT, val)
+#define bfin_read_TWI1_SLAVE_ADDR()    bfin_read16(TWI1_SLAVE_ADDR)
+#define bfin_write_TWI1_SLAVE_ADDR(val) bfin_write16(TWI1_SLAVE_ADDR, val)
+#define bfin_read_TWI1_MASTER_CTL()    bfin_read16(TWI1_MASTER_CTL)
+#define bfin_write_TWI1_MASTER_CTL(val) bfin_write16(TWI1_MASTER_CTL, val)
+#define bfin_read_TWI1_MASTER_STAT()   bfin_read16(TWI1_MASTER_STAT)
+#define bfin_write_TWI1_MASTER_STAT(val) bfin_write16(TWI1_MASTER_STAT, val)
+#define bfin_read_TWI1_MASTER_ADDR()   bfin_read16(TWI1_MASTER_ADDR)
+#define bfin_write_TWI1_MASTER_ADDR(val) bfin_write16(TWI1_MASTER_ADDR, val)
+#define bfin_read_TWI1_INT_STAT()      bfin_read16(TWI1_INT_STAT)
+#define bfin_write_TWI1_INT_STAT(val)  bfin_write16(TWI1_INT_STAT, val)
+#define bfin_read_TWI1_INT_MASK()      bfin_read16(TWI1_INT_MASK)
+#define bfin_write_TWI1_INT_MASK(val)  bfin_write16(TWI1_INT_MASK, val)
+#define bfin_read_TWI1_FIFO_CTL()      bfin_read16(TWI1_FIFO_CTL)
+#define bfin_write_TWI1_FIFO_CTL(val)  bfin_write16(TWI1_FIFO_CTL, val)
+#define bfin_read_TWI1_FIFO_STAT()     bfin_read16(TWI1_FIFO_STAT)
+#define bfin_write_TWI1_FIFO_STAT(val) bfin_write16(TWI1_FIFO_STAT, val)
+#define bfin_read_TWI1_XMT_DATA8()     bfin_read16(TWI1_XMT_DATA8)
+#define bfin_write_TWI1_XMT_DATA8(val) bfin_write16(TWI1_XMT_DATA8, val)
+#define bfin_read_TWI1_XMT_DATA16()    bfin_read16(TWI1_XMT_DATA16)
+#define bfin_write_TWI1_XMT_DATA16(val) bfin_write16(TWI1_XMT_DATA16, val)
+#define bfin_read_TWI1_RCV_DATA8()     bfin_read16(TWI1_RCV_DATA8)
+#define bfin_write_TWI1_RCV_DATA8(val) bfin_write16(TWI1_RCV_DATA8, val)
+#define bfin_read_TWI1_RCV_DATA16()    bfin_read16(TWI1_RCV_DATA16)
+#define bfin_write_TWI1_RCV_DATA16(val) bfin_write16(TWI1_RCV_DATA16, val)
+#define bfin_read_CAN_MC1()            bfin_read16(CAN_MC1)
+#define bfin_write_CAN_MC1(val)        bfin_write16(CAN_MC1, val)
+#define bfin_read_CAN_MD1()            bfin_read16(CAN_MD1)
+#define bfin_write_CAN_MD1(val)        bfin_write16(CAN_MD1, val)
+#define bfin_read_CAN_TRS1()           bfin_read16(CAN_TRS1)
+#define bfin_write_CAN_TRS1(val)       bfin_write16(CAN_TRS1, val)
+#define bfin_read_CAN_TRR1()           bfin_read16(CAN_TRR1)
+#define bfin_write_CAN_TRR1(val)       bfin_write16(CAN_TRR1, val)
+#define bfin_read_CAN_TA1()            bfin_read16(CAN_TA1)
+#define bfin_write_CAN_TA1(val)        bfin_write16(CAN_TA1, val)
+#define bfin_read_CAN_AA1()            bfin_read16(CAN_AA1)
+#define bfin_write_CAN_AA1(val)        bfin_write16(CAN_AA1, val)
+#define bfin_read_CAN_RMP1()           bfin_read16(CAN_RMP1)
+#define bfin_write_CAN_RMP1(val)       bfin_write16(CAN_RMP1, val)
+#define bfin_read_CAN_RML1()           bfin_read16(CAN_RML1)
+#define bfin_write_CAN_RML1(val)       bfin_write16(CAN_RML1, val)
+#define bfin_read_CAN_MBTIF1()         bfin_read16(CAN_MBTIF1)
+#define bfin_write_CAN_MBTIF1(val)     bfin_write16(CAN_MBTIF1, val)
+#define bfin_read_CAN_MBRIF1()         bfin_read16(CAN_MBRIF1)
+#define bfin_write_CAN_MBRIF1(val)     bfin_write16(CAN_MBRIF1, val)
+#define bfin_read_CAN_MBIM1()          bfin_read16(CAN_MBIM1)
+#define bfin_write_CAN_MBIM1(val)      bfin_write16(CAN_MBIM1, val)
+#define bfin_read_CAN_RFH1()           bfin_read16(CAN_RFH1)
+#define bfin_write_CAN_RFH1(val)       bfin_write16(CAN_RFH1, val)
+#define bfin_read_CAN_OPSS1()          bfin_read16(CAN_OPSS1)
+#define bfin_write_CAN_OPSS1(val)      bfin_write16(CAN_OPSS1, val)
+#define bfin_read_CAN_MC2()            bfin_read16(CAN_MC2)
+#define bfin_write_CAN_MC2(val)        bfin_write16(CAN_MC2, val)
+#define bfin_read_CAN_MD2()            bfin_read16(CAN_MD2)
+#define bfin_write_CAN_MD2(val)        bfin_write16(CAN_MD2, val)
+#define bfin_read_CAN_TRS2()           bfin_read16(CAN_TRS2)
+#define bfin_write_CAN_TRS2(val)       bfin_write16(CAN_TRS2, val)
+#define bfin_read_CAN_TRR2()           bfin_read16(CAN_TRR2)
+#define bfin_write_CAN_TRR2(val)       bfin_write16(CAN_TRR2, val)
+#define bfin_read_CAN_TA2()            bfin_read16(CAN_TA2)
+#define bfin_write_CAN_TA2(val)        bfin_write16(CAN_TA2, val)
+#define bfin_read_CAN_AA2()            bfin_read16(CAN_AA2)
+#define bfin_write_CAN_AA2(val)        bfin_write16(CAN_AA2, val)
+#define bfin_read_CAN_RMP2()           bfin_read16(CAN_RMP2)
+#define bfin_write_CAN_RMP2(val)       bfin_write16(CAN_RMP2, val)
+#define bfin_read_CAN_RML2()           bfin_read16(CAN_RML2)
+#define bfin_write_CAN_RML2(val)       bfin_write16(CAN_RML2, val)
+#define bfin_read_CAN_MBTIF2()         bfin_read16(CAN_MBTIF2)
+#define bfin_write_CAN_MBTIF2(val)     bfin_write16(CAN_MBTIF2, val)
+#define bfin_read_CAN_MBRIF2()         bfin_read16(CAN_MBRIF2)
+#define bfin_write_CAN_MBRIF2(val)     bfin_write16(CAN_MBRIF2, val)
+#define bfin_read_CAN_MBIM2()          bfin_read16(CAN_MBIM2)
+#define bfin_write_CAN_MBIM2(val)      bfin_write16(CAN_MBIM2, val)
+#define bfin_read_CAN_RFH2()           bfin_read16(CAN_RFH2)
+#define bfin_write_CAN_RFH2(val)       bfin_write16(CAN_RFH2, val)
+#define bfin_read_CAN_OPSS2()          bfin_read16(CAN_OPSS2)
+#define bfin_write_CAN_OPSS2(val)      bfin_write16(CAN_OPSS2, val)
+#define bfin_read_CAN_CLOCK()          bfin_read16(CAN_CLOCK)
+#define bfin_write_CAN_CLOCK(val)      bfin_write16(CAN_CLOCK, val)
+#define bfin_read_CAN_TIMING()         bfin_read16(CAN_TIMING)
+#define bfin_write_CAN_TIMING(val)     bfin_write16(CAN_TIMING, val)
+#define bfin_read_CAN_DEBUG()          bfin_read16(CAN_DEBUG)
+#define bfin_write_CAN_DEBUG(val)      bfin_write16(CAN_DEBUG, val)
+#define bfin_read_CAN_STATUS()         bfin_read16(CAN_STATUS)
+#define bfin_write_CAN_STATUS(val)     bfin_write16(CAN_STATUS, val)
+#define bfin_read_CAN_CEC()            bfin_read16(CAN_CEC)
+#define bfin_write_CAN_CEC(val)        bfin_write16(CAN_CEC, val)
+#define bfin_read_CAN_GIS()            bfin_read16(CAN_GIS)
+#define bfin_write_CAN_GIS(val)        bfin_write16(CAN_GIS, val)
+#define bfin_read_CAN_GIM()            bfin_read16(CAN_GIM)
+#define bfin_write_CAN_GIM(val)        bfin_write16(CAN_GIM, val)
+#define bfin_read_CAN_GIF()            bfin_read16(CAN_GIF)
+#define bfin_write_CAN_GIF(val)        bfin_write16(CAN_GIF, val)
+#define bfin_read_CAN_CONTROL()        bfin_read16(CAN_CONTROL)
+#define bfin_write_CAN_CONTROL(val)    bfin_write16(CAN_CONTROL, val)
+#define bfin_read_CAN_INTR()           bfin_read16(CAN_INTR)
+#define bfin_write_CAN_INTR(val)       bfin_write16(CAN_INTR, val)
+#define bfin_read_CAN_VERSION()        bfin_read16(CAN_VERSION)
+#define bfin_write_CAN_VERSION(val)    bfin_write16(CAN_VERSION, val)
+#define bfin_read_CAN_MBTD()           bfin_read16(CAN_MBTD)
+#define bfin_write_CAN_MBTD(val)       bfin_write16(CAN_MBTD, val)
+#define bfin_read_CAN_EWR()            bfin_read16(CAN_EWR)
+#define bfin_write_CAN_EWR(val)        bfin_write16(CAN_EWR, val)
+#define bfin_read_CAN_ESR()            bfin_read16(CAN_ESR)
+#define bfin_write_CAN_ESR(val)        bfin_write16(CAN_ESR, val)
+#define bfin_read_CAN_UCREG()          bfin_read16(CAN_UCREG)
+#define bfin_write_CAN_UCREG(val)      bfin_write16(CAN_UCREG, val)
+#define bfin_read_CAN_UCCNT()          bfin_read16(CAN_UCCNT)
+#define bfin_write_CAN_UCCNT(val)      bfin_write16(CAN_UCCNT, val)
+#define bfin_read_CAN_UCRC()           bfin_read16(CAN_UCRC)
+#define bfin_write_CAN_UCRC(val)       bfin_write16(CAN_UCRC, val)
+#define bfin_read_CAN_UCCNF()          bfin_read16(CAN_UCCNF)
+#define bfin_write_CAN_UCCNF(val)      bfin_write16(CAN_UCCNF, val)
+#define bfin_read_CAN_VERSION2()       bfin_read16(CAN_VERSION2)
+#define bfin_write_CAN_VERSION2(val)   bfin_write16(CAN_VERSION2, val)
+#define bfin_read_CAN_AM00L()          bfin_read16(CAN_AM00L)
+#define bfin_write_CAN_AM00L(val)      bfin_write16(CAN_AM00L, val)
+#define bfin_read_CAN_AM00H()          bfin_read16(CAN_AM00H)
+#define bfin_write_CAN_AM00H(val)      bfin_write16(CAN_AM00H, val)
+#define bfin_read_CAN_AM01L()          bfin_read16(CAN_AM01L)
+#define bfin_write_CAN_AM01L(val)      bfin_write16(CAN_AM01L, val)
+#define bfin_read_CAN_AM01H()          bfin_read16(CAN_AM01H)
+#define bfin_write_CAN_AM01H(val)      bfin_write16(CAN_AM01H, val)
+#define bfin_read_CAN_AM02L()          bfin_read16(CAN_AM02L)
+#define bfin_write_CAN_AM02L(val)      bfin_write16(CAN_AM02L, val)
+#define bfin_read_CAN_AM02H()          bfin_read16(CAN_AM02H)
+#define bfin_write_CAN_AM02H(val)      bfin_write16(CAN_AM02H, val)
+#define bfin_read_CAN_AM03L()          bfin_read16(CAN_AM03L)
+#define bfin_write_CAN_AM03L(val)      bfin_write16(CAN_AM03L, val)
+#define bfin_read_CAN_AM03H()          bfin_read16(CAN_AM03H)
+#define bfin_write_CAN_AM03H(val)      bfin_write16(CAN_AM03H, val)
+#define bfin_read_CAN_AM04L()          bfin_read16(CAN_AM04L)
+#define bfin_write_CAN_AM04L(val)      bfin_write16(CAN_AM04L, val)
+#define bfin_read_CAN_AM04H()          bfin_read16(CAN_AM04H)
+#define bfin_write_CAN_AM04H(val)      bfin_write16(CAN_AM04H, val)
+#define bfin_read_CAN_AM05L()          bfin_read16(CAN_AM05L)
+#define bfin_write_CAN_AM05L(val)      bfin_write16(CAN_AM05L, val)
+#define bfin_read_CAN_AM05H()          bfin_read16(CAN_AM05H)
+#define bfin_write_CAN_AM05H(val)      bfin_write16(CAN_AM05H, val)
+#define bfin_read_CAN_AM06L()          bfin_read16(CAN_AM06L)
+#define bfin_write_CAN_AM06L(val)      bfin_write16(CAN_AM06L, val)
+#define bfin_read_CAN_AM06H()          bfin_read16(CAN_AM06H)
+#define bfin_write_CAN_AM06H(val)      bfin_write16(CAN_AM06H, val)
+#define bfin_read_CAN_AM07L()          bfin_read16(CAN_AM07L)
+#define bfin_write_CAN_AM07L(val)      bfin_write16(CAN_AM07L, val)
+#define bfin_read_CAN_AM07H()          bfin_read16(CAN_AM07H)
+#define bfin_write_CAN_AM07H(val)      bfin_write16(CAN_AM07H, val)
+#define bfin_read_CAN_AM08L()          bfin_read16(CAN_AM08L)
+#define bfin_write_CAN_AM08L(val)      bfin_write16(CAN_AM08L, val)
+#define bfin_read_CAN_AM08H()          bfin_read16(CAN_AM08H)
+#define bfin_write_CAN_AM08H(val)      bfin_write16(CAN_AM08H, val)
+#define bfin_read_CAN_AM09L()          bfin_read16(CAN_AM09L)
+#define bfin_write_CAN_AM09L(val)      bfin_write16(CAN_AM09L, val)
+#define bfin_read_CAN_AM09H()          bfin_read16(CAN_AM09H)
+#define bfin_write_CAN_AM09H(val)      bfin_write16(CAN_AM09H, val)
+#define bfin_read_CAN_AM10L()          bfin_read16(CAN_AM10L)
+#define bfin_write_CAN_AM10L(val)      bfin_write16(CAN_AM10L, val)
+#define bfin_read_CAN_AM10H()          bfin_read16(CAN_AM10H)
+#define bfin_write_CAN_AM10H(val)      bfin_write16(CAN_AM10H, val)
+#define bfin_read_CAN_AM11L()          bfin_read16(CAN_AM11L)
+#define bfin_write_CAN_AM11L(val)      bfin_write16(CAN_AM11L, val)
+#define bfin_read_CAN_AM11H()          bfin_read16(CAN_AM11H)
+#define bfin_write_CAN_AM11H(val)      bfin_write16(CAN_AM11H, val)
+#define bfin_read_CAN_AM12L()          bfin_read16(CAN_AM12L)
+#define bfin_write_CAN_AM12L(val)      bfin_write16(CAN_AM12L, val)
+#define bfin_read_CAN_AM12H()          bfin_read16(CAN_AM12H)
+#define bfin_write_CAN_AM12H(val)      bfin_write16(CAN_AM12H, val)
+#define bfin_read_CAN_AM13L()          bfin_read16(CAN_AM13L)
+#define bfin_write_CAN_AM13L(val)      bfin_write16(CAN_AM13L, val)
+#define bfin_read_CAN_AM13H()          bfin_read16(CAN_AM13H)
+#define bfin_write_CAN_AM13H(val)      bfin_write16(CAN_AM13H, val)
+#define bfin_read_CAN_AM14L()          bfin_read16(CAN_AM14L)
+#define bfin_write_CAN_AM14L(val)      bfin_write16(CAN_AM14L, val)
+#define bfin_read_CAN_AM14H()          bfin_read16(CAN_AM14H)
+#define bfin_write_CAN_AM14H(val)      bfin_write16(CAN_AM14H, val)
+#define bfin_read_CAN_AM15L()          bfin_read16(CAN_AM15L)
+#define bfin_write_CAN_AM15L(val)      bfin_write16(CAN_AM15L, val)
+#define bfin_read_CAN_AM15H()          bfin_read16(CAN_AM15H)
+#define bfin_write_CAN_AM15H(val)      bfin_write16(CAN_AM15H, val)
+#define bfin_read_CAN_AM16L()          bfin_read16(CAN_AM16L)
+#define bfin_write_CAN_AM16L(val)      bfin_write16(CAN_AM16L, val)
+#define bfin_read_CAN_AM16H()          bfin_read16(CAN_AM16H)
+#define bfin_write_CAN_AM16H(val)      bfin_write16(CAN_AM16H, val)
+#define bfin_read_CAN_AM17L()          bfin_read16(CAN_AM17L)
+#define bfin_write_CAN_AM17L(val)      bfin_write16(CAN_AM17L, val)
+#define bfin_read_CAN_AM17H()          bfin_read16(CAN_AM17H)
+#define bfin_write_CAN_AM17H(val)      bfin_write16(CAN_AM17H, val)
+#define bfin_read_CAN_AM18L()          bfin_read16(CAN_AM18L)
+#define bfin_write_CAN_AM18L(val)      bfin_write16(CAN_AM18L, val)
+#define bfin_read_CAN_AM18H()          bfin_read16(CAN_AM18H)
+#define bfin_write_CAN_AM18H(val)      bfin_write16(CAN_AM18H, val)
+#define bfin_read_CAN_AM19L()          bfin_read16(CAN_AM19L)
+#define bfin_write_CAN_AM19L(val)      bfin_write16(CAN_AM19L, val)
+#define bfin_read_CAN_AM19H()          bfin_read16(CAN_AM19H)
+#define bfin_write_CAN_AM19H(val)      bfin_write16(CAN_AM19H, val)
+#define bfin_read_CAN_AM20L()          bfin_read16(CAN_AM20L)
+#define bfin_write_CAN_AM20L(val)      bfin_write16(CAN_AM20L, val)
+#define bfin_read_CAN_AM20H()          bfin_read16(CAN_AM20H)
+#define bfin_write_CAN_AM20H(val)      bfin_write16(CAN_AM20H, val)
+#define bfin_read_CAN_AM21L()          bfin_read16(CAN_AM21L)
+#define bfin_write_CAN_AM21L(val)      bfin_write16(CAN_AM21L, val)
+#define bfin_read_CAN_AM21H()          bfin_read16(CAN_AM21H)
+#define bfin_write_CAN_AM21H(val)      bfin_write16(CAN_AM21H, val)
+#define bfin_read_CAN_AM22L()          bfin_read16(CAN_AM22L)
+#define bfin_write_CAN_AM22L(val)      bfin_write16(CAN_AM22L, val)
+#define bfin_read_CAN_AM22H()          bfin_read16(CAN_AM22H)
+#define bfin_write_CAN_AM22H(val)      bfin_write16(CAN_AM22H, val)
+#define bfin_read_CAN_AM23L()          bfin_read16(CAN_AM23L)
+#define bfin_write_CAN_AM23L(val)      bfin_write16(CAN_AM23L, val)
+#define bfin_read_CAN_AM23H()          bfin_read16(CAN_AM23H)
+#define bfin_write_CAN_AM23H(val)      bfin_write16(CAN_AM23H, val)
+#define bfin_read_CAN_AM24L()          bfin_read16(CAN_AM24L)
+#define bfin_write_CAN_AM24L(val)      bfin_write16(CAN_AM24L, val)
+#define bfin_read_CAN_AM24H()          bfin_read16(CAN_AM24H)
+#define bfin_write_CAN_AM24H(val)      bfin_write16(CAN_AM24H, val)
+#define bfin_read_CAN_AM25L()          bfin_read16(CAN_AM25L)
+#define bfin_write_CAN_AM25L(val)      bfin_write16(CAN_AM25L, val)
+#define bfin_read_CAN_AM25H()          bfin_read16(CAN_AM25H)
+#define bfin_write_CAN_AM25H(val)      bfin_write16(CAN_AM25H, val)
+#define bfin_read_CAN_AM26L()          bfin_read16(CAN_AM26L)
+#define bfin_write_CAN_AM26L(val)      bfin_write16(CAN_AM26L, val)
+#define bfin_read_CAN_AM26H()          bfin_read16(CAN_AM26H)
+#define bfin_write_CAN_AM26H(val)      bfin_write16(CAN_AM26H, val)
+#define bfin_read_CAN_AM27L()          bfin_read16(CAN_AM27L)
+#define bfin_write_CAN_AM27L(val)      bfin_write16(CAN_AM27L, val)
+#define bfin_read_CAN_AM27H()          bfin_read16(CAN_AM27H)
+#define bfin_write_CAN_AM27H(val)      bfin_write16(CAN_AM27H, val)
+#define bfin_read_CAN_AM28L()          bfin_read16(CAN_AM28L)
+#define bfin_write_CAN_AM28L(val)      bfin_write16(CAN_AM28L, val)
+#define bfin_read_CAN_AM28H()          bfin_read16(CAN_AM28H)
+#define bfin_write_CAN_AM28H(val)      bfin_write16(CAN_AM28H, val)
+#define bfin_read_CAN_AM29L()          bfin_read16(CAN_AM29L)
+#define bfin_write_CAN_AM29L(val)      bfin_write16(CAN_AM29L, val)
+#define bfin_read_CAN_AM29H()          bfin_read16(CAN_AM29H)
+#define bfin_write_CAN_AM29H(val)      bfin_write16(CAN_AM29H, val)
+#define bfin_read_CAN_AM30L()          bfin_read16(CAN_AM30L)
+#define bfin_write_CAN_AM30L(val)      bfin_write16(CAN_AM30L, val)
+#define bfin_read_CAN_AM30H()          bfin_read16(CAN_AM30H)
+#define bfin_write_CAN_AM30H(val)      bfin_write16(CAN_AM30H, val)
+#define bfin_read_CAN_AM31L()          bfin_read16(CAN_AM31L)
+#define bfin_write_CAN_AM31L(val)      bfin_write16(CAN_AM31L, val)
+#define bfin_read_CAN_AM31H()          bfin_read16(CAN_AM31H)
+#define bfin_write_CAN_AM31H(val)      bfin_write16(CAN_AM31H, val)
+#define bfin_read_CAN_MB00_DATA0()     bfin_read16(CAN_MB00_DATA0)
+#define bfin_write_CAN_MB00_DATA0(val) bfin_write16(CAN_MB00_DATA0, val)
+#define bfin_read_CAN_MB00_DATA1()     bfin_read16(CAN_MB00_DATA1)
+#define bfin_write_CAN_MB00_DATA1(val) bfin_write16(CAN_MB00_DATA1, val)
+#define bfin_read_CAN_MB00_DATA2()     bfin_read16(CAN_MB00_DATA2)
+#define bfin_write_CAN_MB00_DATA2(val) bfin_write16(CAN_MB00_DATA2, val)
+#define bfin_read_CAN_MB00_DATA3()     bfin_read16(CAN_MB00_DATA3)
+#define bfin_write_CAN_MB00_DATA3(val) bfin_write16(CAN_MB00_DATA3, val)
+#define bfin_read_CAN_MB00_LENGTH()    bfin_read16(CAN_MB00_LENGTH)
+#define bfin_write_CAN_MB00_LENGTH(val) bfin_write16(CAN_MB00_LENGTH, val)
+#define bfin_read_CAN_MB00_TIMESTAMP() bfin_read16(CAN_MB00_TIMESTAMP)
+#define bfin_write_CAN_MB00_TIMESTAMP(val) bfin_write16(CAN_MB00_TIMESTAMP, val)
+#define bfin_read_CAN_MB00_ID0()       bfin_read16(CAN_MB00_ID0)
+#define bfin_write_CAN_MB00_ID0(val)   bfin_write16(CAN_MB00_ID0, val)
+#define bfin_read_CAN_MB00_ID1()       bfin_read16(CAN_MB00_ID1)
+#define bfin_write_CAN_MB00_ID1(val)   bfin_write16(CAN_MB00_ID1, val)
+#define bfin_read_CAN_MB01_DATA0()     bfin_read16(CAN_MB01_DATA0)
+#define bfin_write_CAN_MB01_DATA0(val) bfin_write16(CAN_MB01_DATA0, val)
+#define bfin_read_CAN_MB01_DATA1()     bfin_read16(CAN_MB01_DATA1)
+#define bfin_write_CAN_MB01_DATA1(val) bfin_write16(CAN_MB01_DATA1, val)
+#define bfin_read_CAN_MB01_DATA2()     bfin_read16(CAN_MB01_DATA2)
+#define bfin_write_CAN_MB01_DATA2(val) bfin_write16(CAN_MB01_DATA2, val)
+#define bfin_read_CAN_MB01_DATA3()     bfin_read16(CAN_MB01_DATA3)
+#define bfin_write_CAN_MB01_DATA3(val) bfin_write16(CAN_MB01_DATA3, val)
+#define bfin_read_CAN_MB01_LENGTH()    bfin_read16(CAN_MB01_LENGTH)
+#define bfin_write_CAN_MB01_LENGTH(val) bfin_write16(CAN_MB01_LENGTH, val)
+#define bfin_read_CAN_MB01_TIMESTAMP() bfin_read16(CAN_MB01_TIMESTAMP)
+#define bfin_write_CAN_MB01_TIMESTAMP(val) bfin_write16(CAN_MB01_TIMESTAMP, val)
+#define bfin_read_CAN_MB01_ID0()       bfin_read16(CAN_MB01_ID0)
+#define bfin_write_CAN_MB01_ID0(val)   bfin_write16(CAN_MB01_ID0, val)
+#define bfin_read_CAN_MB01_ID1()       bfin_read16(CAN_MB01_ID1)
+#define bfin_write_CAN_MB01_ID1(val)   bfin_write16(CAN_MB01_ID1, val)
+#define bfin_read_CAN_MB02_DATA0()     bfin_read16(CAN_MB02_DATA0)
+#define bfin_write_CAN_MB02_DATA0(val) bfin_write16(CAN_MB02_DATA0, val)
+#define bfin_read_CAN_MB02_DATA1()     bfin_read16(CAN_MB02_DATA1)
+#define bfin_write_CAN_MB02_DATA1(val) bfin_write16(CAN_MB02_DATA1, val)
+#define bfin_read_CAN_MB02_DATA2()     bfin_read16(CAN_MB02_DATA2)
+#define bfin_write_CAN_MB02_DATA2(val) bfin_write16(CAN_MB02_DATA2, val)
+#define bfin_read_CAN_MB02_DATA3()     bfin_read16(CAN_MB02_DATA3)
+#define bfin_write_CAN_MB02_DATA3(val) bfin_write16(CAN_MB02_DATA3, val)
+#define bfin_read_CAN_MB02_LENGTH()    bfin_read16(CAN_MB02_LENGTH)
+#define bfin_write_CAN_MB02_LENGTH(val) bfin_write16(CAN_MB02_LENGTH, val)
+#define bfin_read_CAN_MB02_TIMESTAMP() bfin_read16(CAN_MB02_TIMESTAMP)
+#define bfin_write_CAN_MB02_TIMESTAMP(val) bfin_write16(CAN_MB02_TIMESTAMP, val)
+#define bfin_read_CAN_MB02_ID0()       bfin_read16(CAN_MB02_ID0)
+#define bfin_write_CAN_MB02_ID0(val)   bfin_write16(CAN_MB02_ID0, val)
+#define bfin_read_CAN_MB02_ID1()       bfin_read16(CAN_MB02_ID1)
+#define bfin_write_CAN_MB02_ID1(val)   bfin_write16(CAN_MB02_ID1, val)
+#define bfin_read_CAN_MB03_DATA0()     bfin_read16(CAN_MB03_DATA0)
+#define bfin_write_CAN_MB03_DATA0(val) bfin_write16(CAN_MB03_DATA0, val)
+#define bfin_read_CAN_MB03_DATA1()     bfin_read16(CAN_MB03_DATA1)
+#define bfin_write_CAN_MB03_DATA1(val) bfin_write16(CAN_MB03_DATA1, val)
+#define bfin_read_CAN_MB03_DATA2()     bfin_read16(CAN_MB03_DATA2)
+#define bfin_write_CAN_MB03_DATA2(val) bfin_write16(CAN_MB03_DATA2, val)
+#define bfin_read_CAN_MB03_DATA3()     bfin_read16(CAN_MB03_DATA3)
+#define bfin_write_CAN_MB03_DATA3(val) bfin_write16(CAN_MB03_DATA3, val)
+#define bfin_read_CAN_MB03_LENGTH()    bfin_read16(CAN_MB03_LENGTH)
+#define bfin_write_CAN_MB03_LENGTH(val) bfin_write16(CAN_MB03_LENGTH, val)
+#define bfin_read_CAN_MB03_TIMESTAMP() bfin_read16(CAN_MB03_TIMESTAMP)
+#define bfin_write_CAN_MB03_TIMESTAMP(val) bfin_write16(CAN_MB03_TIMESTAMP, val)
+#define bfin_read_CAN_MB03_ID0()       bfin_read16(CAN_MB03_ID0)
+#define bfin_write_CAN_MB03_ID0(val)   bfin_write16(CAN_MB03_ID0, val)
+#define bfin_read_CAN_MB03_ID1()       bfin_read16(CAN_MB03_ID1)
+#define bfin_write_CAN_MB03_ID1(val)   bfin_write16(CAN_MB03_ID1, val)
+#define bfin_read_CAN_MB04_DATA0()     bfin_read16(CAN_MB04_DATA0)
+#define bfin_write_CAN_MB04_DATA0(val) bfin_write16(CAN_MB04_DATA0, val)
+#define bfin_read_CAN_MB04_DATA1()     bfin_read16(CAN_MB04_DATA1)
+#define bfin_write_CAN_MB04_DATA1(val) bfin_write16(CAN_MB04_DATA1, val)
+#define bfin_read_CAN_MB04_DATA2()     bfin_read16(CAN_MB04_DATA2)
+#define bfin_write_CAN_MB04_DATA2(val) bfin_write16(CAN_MB04_DATA2, val)
+#define bfin_read_CAN_MB04_DATA3()     bfin_read16(CAN_MB04_DATA3)
+#define bfin_write_CAN_MB04_DATA3(val) bfin_write16(CAN_MB04_DATA3, val)
+#define bfin_read_CAN_MB04_LENGTH()    bfin_read16(CAN_MB04_LENGTH)
+#define bfin_write_CAN_MB04_LENGTH(val) bfin_write16(CAN_MB04_LENGTH, val)
+#define bfin_read_CAN_MB04_TIMESTAMP() bfin_read16(CAN_MB04_TIMESTAMP)
+#define bfin_write_CAN_MB04_TIMESTAMP(val) bfin_write16(CAN_MB04_TIMESTAMP, val)
+#define bfin_read_CAN_MB04_ID0()       bfin_read16(CAN_MB04_ID0)
+#define bfin_write_CAN_MB04_ID0(val)   bfin_write16(CAN_MB04_ID0, val)
+#define bfin_read_CAN_MB04_ID1()       bfin_read16(CAN_MB04_ID1)
+#define bfin_write_CAN_MB04_ID1(val)   bfin_write16(CAN_MB04_ID1, val)
+#define bfin_read_CAN_MB05_DATA0()     bfin_read16(CAN_MB05_DATA0)
+#define bfin_write_CAN_MB05_DATA0(val) bfin_write16(CAN_MB05_DATA0, val)
+#define bfin_read_CAN_MB05_DATA1()     bfin_read16(CAN_MB05_DATA1)
+#define bfin_write_CAN_MB05_DATA1(val) bfin_write16(CAN_MB05_DATA1, val)
+#define bfin_read_CAN_MB05_DATA2()     bfin_read16(CAN_MB05_DATA2)
+#define bfin_write_CAN_MB05_DATA2(val) bfin_write16(CAN_MB05_DATA2, val)
+#define bfin_read_CAN_MB05_DATA3()     bfin_read16(CAN_MB05_DATA3)
+#define bfin_write_CAN_MB05_DATA3(val) bfin_write16(CAN_MB05_DATA3, val)
+#define bfin_read_CAN_MB05_LENGTH()    bfin_read16(CAN_MB05_LENGTH)
+#define bfin_write_CAN_MB05_LENGTH(val) bfin_write16(CAN_MB05_LENGTH, val)
+#define bfin_read_CAN_MB05_TIMESTAMP() bfin_read16(CAN_MB05_TIMESTAMP)
+#define bfin_write_CAN_MB05_TIMESTAMP(val) bfin_write16(CAN_MB05_TIMESTAMP, val)
+#define bfin_read_CAN_MB05_ID0()       bfin_read16(CAN_MB05_ID0)
+#define bfin_write_CAN_MB05_ID0(val)   bfin_write16(CAN_MB05_ID0, val)
+#define bfin_read_CAN_MB05_ID1()       bfin_read16(CAN_MB05_ID1)
+#define bfin_write_CAN_MB05_ID1(val)   bfin_write16(CAN_MB05_ID1, val)
+#define bfin_read_CAN_MB06_DATA0()     bfin_read16(CAN_MB06_DATA0)
+#define bfin_write_CAN_MB06_DATA0(val) bfin_write16(CAN_MB06_DATA0, val)
+#define bfin_read_CAN_MB06_DATA1()     bfin_read16(CAN_MB06_DATA1)
+#define bfin_write_CAN_MB06_DATA1(val) bfin_write16(CAN_MB06_DATA1, val)
+#define bfin_read_CAN_MB06_DATA2()     bfin_read16(CAN_MB06_DATA2)
+#define bfin_write_CAN_MB06_DATA2(val) bfin_write16(CAN_MB06_DATA2, val)
+#define bfin_read_CAN_MB06_DATA3()     bfin_read16(CAN_MB06_DATA3)
+#define bfin_write_CAN_MB06_DATA3(val) bfin_write16(CAN_MB06_DATA3, val)
+#define bfin_read_CAN_MB06_LENGTH()    bfin_read16(CAN_MB06_LENGTH)
+#define bfin_write_CAN_MB06_LENGTH(val) bfin_write16(CAN_MB06_LENGTH, val)
+#define bfin_read_CAN_MB06_TIMESTAMP() bfin_read16(CAN_MB06_TIMESTAMP)
+#define bfin_write_CAN_MB06_TIMESTAMP(val) bfin_write16(CAN_MB06_TIMESTAMP, val)
+#define bfin_read_CAN_MB06_ID0()       bfin_read16(CAN_MB06_ID0)
+#define bfin_write_CAN_MB06_ID0(val)   bfin_write16(CAN_MB06_ID0, val)
+#define bfin_read_CAN_MB06_ID1()       bfin_read16(CAN_MB06_ID1)
+#define bfin_write_CAN_MB06_ID1(val)   bfin_write16(CAN_MB06_ID1, val)
+#define bfin_read_CAN_MB07_DATA0()     bfin_read16(CAN_MB07_DATA0)
+#define bfin_write_CAN_MB07_DATA0(val) bfin_write16(CAN_MB07_DATA0, val)
+#define bfin_read_CAN_MB07_DATA1()     bfin_read16(CAN_MB07_DATA1)
+#define bfin_write_CAN_MB07_DATA1(val) bfin_write16(CAN_MB07_DATA1, val)
+#define bfin_read_CAN_MB07_DATA2()     bfin_read16(CAN_MB07_DATA2)
+#define bfin_write_CAN_MB07_DATA2(val) bfin_write16(CAN_MB07_DATA2, val)
+#define bfin_read_CAN_MB07_DATA3()     bfin_read16(CAN_MB07_DATA3)
+#define bfin_write_CAN_MB07_DATA3(val) bfin_write16(CAN_MB07_DATA3, val)
+#define bfin_read_CAN_MB07_LENGTH()    bfin_read16(CAN_MB07_LENGTH)
+#define bfin_write_CAN_MB07_LENGTH(val) bfin_write16(CAN_MB07_LENGTH, val)
+#define bfin_read_CAN_MB07_TIMESTAMP() bfin_read16(CAN_MB07_TIMESTAMP)
+#define bfin_write_CAN_MB07_TIMESTAMP(val) bfin_write16(CAN_MB07_TIMESTAMP, val)
+#define bfin_read_CAN_MB07_ID0()       bfin_read16(CAN_MB07_ID0)
+#define bfin_write_CAN_MB07_ID0(val)   bfin_write16(CAN_MB07_ID0, val)
+#define bfin_read_CAN_MB07_ID1()       bfin_read16(CAN_MB07_ID1)
+#define bfin_write_CAN_MB07_ID1(val)   bfin_write16(CAN_MB07_ID1, val)
+#define bfin_read_CAN_MB08_DATA0()     bfin_read16(CAN_MB08_DATA0)
+#define bfin_write_CAN_MB08_DATA0(val) bfin_write16(CAN_MB08_DATA0, val)
+#define bfin_read_CAN_MB08_DATA1()     bfin_read16(CAN_MB08_DATA1)
+#define bfin_write_CAN_MB08_DATA1(val) bfin_write16(CAN_MB08_DATA1, val)
+#define bfin_read_CAN_MB08_DATA2()     bfin_read16(CAN_MB08_DATA2)
+#define bfin_write_CAN_MB08_DATA2(val) bfin_write16(CAN_MB08_DATA2, val)
+#define bfin_read_CAN_MB08_DATA3()     bfin_read16(CAN_MB08_DATA3)
+#define bfin_write_CAN_MB08_DATA3(val) bfin_write16(CAN_MB08_DATA3, val)
+#define bfin_read_CAN_MB08_LENGTH()    bfin_read16(CAN_MB08_LENGTH)
+#define bfin_write_CAN_MB08_LENGTH(val) bfin_write16(CAN_MB08_LENGTH, val)
+#define bfin_read_CAN_MB08_TIMESTAMP() bfin_read16(CAN_MB08_TIMESTAMP)
+#define bfin_write_CAN_MB08_TIMESTAMP(val) bfin_write16(CAN_MB08_TIMESTAMP, val)
+#define bfin_read_CAN_MB08_ID0()       bfin_read16(CAN_MB08_ID0)
+#define bfin_write_CAN_MB08_ID0(val)   bfin_write16(CAN_MB08_ID0, val)
+#define bfin_read_CAN_MB08_ID1()       bfin_read16(CAN_MB08_ID1)
+#define bfin_write_CAN_MB08_ID1(val)   bfin_write16(CAN_MB08_ID1, val)
+#define bfin_read_CAN_MB09_DATA0()     bfin_read16(CAN_MB09_DATA0)
+#define bfin_write_CAN_MB09_DATA0(val) bfin_write16(CAN_MB09_DATA0, val)
+#define bfin_read_CAN_MB09_DATA1()     bfin_read16(CAN_MB09_DATA1)
+#define bfin_write_CAN_MB09_DATA1(val) bfin_write16(CAN_MB09_DATA1, val)
+#define bfin_read_CAN_MB09_DATA2()     bfin_read16(CAN_MB09_DATA2)
+#define bfin_write_CAN_MB09_DATA2(val) bfin_write16(CAN_MB09_DATA2, val)
+#define bfin_read_CAN_MB09_DATA3()     bfin_read16(CAN_MB09_DATA3)
+#define bfin_write_CAN_MB09_DATA3(val) bfin_write16(CAN_MB09_DATA3, val)
+#define bfin_read_CAN_MB09_LENGTH()    bfin_read16(CAN_MB09_LENGTH)
+#define bfin_write_CAN_MB09_LENGTH(val) bfin_write16(CAN_MB09_LENGTH, val)
+#define bfin_read_CAN_MB09_TIMESTAMP() bfin_read16(CAN_MB09_TIMESTAMP)
+#define bfin_write_CAN_MB09_TIMESTAMP(val) bfin_write16(CAN_MB09_TIMESTAMP, val)
+#define bfin_read_CAN_MB09_ID0()       bfin_read16(CAN_MB09_ID0)
+#define bfin_write_CAN_MB09_ID0(val)   bfin_write16(CAN_MB09_ID0, val)
+#define bfin_read_CAN_MB09_ID1()       bfin_read16(CAN_MB09_ID1)
+#define bfin_write_CAN_MB09_ID1(val)   bfin_write16(CAN_MB09_ID1, val)
+#define bfin_read_CAN_MB10_DATA0()     bfin_read16(CAN_MB10_DATA0)
+#define bfin_write_CAN_MB10_DATA0(val) bfin_write16(CAN_MB10_DATA0, val)
+#define bfin_read_CAN_MB10_DATA1()     bfin_read16(CAN_MB10_DATA1)
+#define bfin_write_CAN_MB10_DATA1(val) bfin_write16(CAN_MB10_DATA1, val)
+#define bfin_read_CAN_MB10_DATA2()     bfin_read16(CAN_MB10_DATA2)
+#define bfin_write_CAN_MB10_DATA2(val) bfin_write16(CAN_MB10_DATA2, val)
+#define bfin_read_CAN_MB10_DATA3()     bfin_read16(CAN_MB10_DATA3)
+#define bfin_write_CAN_MB10_DATA3(val) bfin_write16(CAN_MB10_DATA3, val)
+#define bfin_read_CAN_MB10_LENGTH()    bfin_read16(CAN_MB10_LENGTH)
+#define bfin_write_CAN_MB10_LENGTH(val) bfin_write16(CAN_MB10_LENGTH, val)
+#define bfin_read_CAN_MB10_TIMESTAMP() bfin_read16(CAN_MB10_TIMESTAMP)
+#define bfin_write_CAN_MB10_TIMESTAMP(val) bfin_write16(CAN_MB10_TIMESTAMP, val)
+#define bfin_read_CAN_MB10_ID0()       bfin_read16(CAN_MB10_ID0)
+#define bfin_write_CAN_MB10_ID0(val)   bfin_write16(CAN_MB10_ID0, val)
+#define bfin_read_CAN_MB10_ID1()       bfin_read16(CAN_MB10_ID1)
+#define bfin_write_CAN_MB10_ID1(val)   bfin_write16(CAN_MB10_ID1, val)
+#define bfin_read_CAN_MB11_DATA0()     bfin_read16(CAN_MB11_DATA0)
+#define bfin_write_CAN_MB11_DATA0(val) bfin_write16(CAN_MB11_DATA0, val)
+#define bfin_read_CAN_MB11_DATA1()     bfin_read16(CAN_MB11_DATA1)
+#define bfin_write_CAN_MB11_DATA1(val) bfin_write16(CAN_MB11_DATA1, val)
+#define bfin_read_CAN_MB11_DATA2()     bfin_read16(CAN_MB11_DATA2)
+#define bfin_write_CAN_MB11_DATA2(val) bfin_write16(CAN_MB11_DATA2, val)
+#define bfin_read_CAN_MB11_DATA3()     bfin_read16(CAN_MB11_DATA3)
+#define bfin_write_CAN_MB11_DATA3(val) bfin_write16(CAN_MB11_DATA3, val)
+#define bfin_read_CAN_MB11_LENGTH()    bfin_read16(CAN_MB11_LENGTH)
+#define bfin_write_CAN_MB11_LENGTH(val) bfin_write16(CAN_MB11_LENGTH, val)
+#define bfin_read_CAN_MB11_TIMESTAMP() bfin_read16(CAN_MB11_TIMESTAMP)
+#define bfin_write_CAN_MB11_TIMESTAMP(val) bfin_write16(CAN_MB11_TIMESTAMP, val)
+#define bfin_read_CAN_MB11_ID0()       bfin_read16(CAN_MB11_ID0)
+#define bfin_write_CAN_MB11_ID0(val)   bfin_write16(CAN_MB11_ID0, val)
+#define bfin_read_CAN_MB11_ID1()       bfin_read16(CAN_MB11_ID1)
+#define bfin_write_CAN_MB11_ID1(val)   bfin_write16(CAN_MB11_ID1, val)
+#define bfin_read_CAN_MB12_DATA0()     bfin_read16(CAN_MB12_DATA0)
+#define bfin_write_CAN_MB12_DATA0(val) bfin_write16(CAN_MB12_DATA0, val)
+#define bfin_read_CAN_MB12_DATA1()     bfin_read16(CAN_MB12_DATA1)
+#define bfin_write_CAN_MB12_DATA1(val) bfin_write16(CAN_MB12_DATA1, val)
+#define bfin_read_CAN_MB12_DATA2()     bfin_read16(CAN_MB12_DATA2)
+#define bfin_write_CAN_MB12_DATA2(val) bfin_write16(CAN_MB12_DATA2, val)
+#define bfin_read_CAN_MB12_DATA3()     bfin_read16(CAN_MB12_DATA3)
+#define bfin_write_CAN_MB12_DATA3(val) bfin_write16(CAN_MB12_DATA3, val)
+#define bfin_read_CAN_MB12_LENGTH()    bfin_read16(CAN_MB12_LENGTH)
+#define bfin_write_CAN_MB12_LENGTH(val) bfin_write16(CAN_MB12_LENGTH, val)
+#define bfin_read_CAN_MB12_TIMESTAMP() bfin_read16(CAN_MB12_TIMESTAMP)
+#define bfin_write_CAN_MB12_TIMESTAMP(val) bfin_write16(CAN_MB12_TIMESTAMP, val)
+#define bfin_read_CAN_MB12_ID0()       bfin_read16(CAN_MB12_ID0)
+#define bfin_write_CAN_MB12_ID0(val)   bfin_write16(CAN_MB12_ID0, val)
+#define bfin_read_CAN_MB12_ID1()       bfin_read16(CAN_MB12_ID1)
+#define bfin_write_CAN_MB12_ID1(val)   bfin_write16(CAN_MB12_ID1, val)
+#define bfin_read_CAN_MB13_DATA0()     bfin_read16(CAN_MB13_DATA0)
+#define bfin_write_CAN_MB13_DATA0(val) bfin_write16(CAN_MB13_DATA0, val)
+#define bfin_read_CAN_MB13_DATA1()     bfin_read16(CAN_MB13_DATA1)
+#define bfin_write_CAN_MB13_DATA1(val) bfin_write16(CAN_MB13_DATA1, val)
+#define bfin_read_CAN_MB13_DATA2()     bfin_read16(CAN_MB13_DATA2)
+#define bfin_write_CAN_MB13_DATA2(val) bfin_write16(CAN_MB13_DATA2, val)
+#define bfin_read_CAN_MB13_DATA3()     bfin_read16(CAN_MB13_DATA3)
+#define bfin_write_CAN_MB13_DATA3(val) bfin_write16(CAN_MB13_DATA3, val)
+#define bfin_read_CAN_MB13_LENGTH()    bfin_read16(CAN_MB13_LENGTH)
+#define bfin_write_CAN_MB13_LENGTH(val) bfin_write16(CAN_MB13_LENGTH, val)
+#define bfin_read_CAN_MB13_TIMESTAMP() bfin_read16(CAN_MB13_TIMESTAMP)
+#define bfin_write_CAN_MB13_TIMESTAMP(val) bfin_write16(CAN_MB13_TIMESTAMP, val)
+#define bfin_read_CAN_MB13_ID0()       bfin_read16(CAN_MB13_ID0)
+#define bfin_write_CAN_MB13_ID0(val)   bfin_write16(CAN_MB13_ID0, val)
+#define bfin_read_CAN_MB13_ID1()       bfin_read16(CAN_MB13_ID1)
+#define bfin_write_CAN_MB13_ID1(val)   bfin_write16(CAN_MB13_ID1, val)
+#define bfin_read_CAN_MB14_DATA0()     bfin_read16(CAN_MB14_DATA0)
+#define bfin_write_CAN_MB14_DATA0(val) bfin_write16(CAN_MB14_DATA0, val)
+#define bfin_read_CAN_MB14_DATA1()     bfin_read16(CAN_MB14_DATA1)
+#define bfin_write_CAN_MB14_DATA1(val) bfin_write16(CAN_MB14_DATA1, val)
+#define bfin_read_CAN_MB14_DATA2()     bfin_read16(CAN_MB14_DATA2)
+#define bfin_write_CAN_MB14_DATA2(val) bfin_write16(CAN_MB14_DATA2, val)
+#define bfin_read_CAN_MB14_DATA3()     bfin_read16(CAN_MB14_DATA3)
+#define bfin_write_CAN_MB14_DATA3(val) bfin_write16(CAN_MB14_DATA3, val)
+#define bfin_read_CAN_MB14_LENGTH()    bfin_read16(CAN_MB14_LENGTH)
+#define bfin_write_CAN_MB14_LENGTH(val) bfin_write16(CAN_MB14_LENGTH, val)
+#define bfin_read_CAN_MB14_TIMESTAMP() bfin_read16(CAN_MB14_TIMESTAMP)
+#define bfin_write_CAN_MB14_TIMESTAMP(val) bfin_write16(CAN_MB14_TIMESTAMP, val)
+#define bfin_read_CAN_MB14_ID0()       bfin_read16(CAN_MB14_ID0)
+#define bfin_write_CAN_MB14_ID0(val)   bfin_write16(CAN_MB14_ID0, val)
+#define bfin_read_CAN_MB14_ID1()       bfin_read16(CAN_MB14_ID1)
+#define bfin_write_CAN_MB14_ID1(val)   bfin_write16(CAN_MB14_ID1, val)
+#define bfin_read_CAN_MB15_DATA0()     bfin_read16(CAN_MB15_DATA0)
+#define bfin_write_CAN_MB15_DATA0(val) bfin_write16(CAN_MB15_DATA0, val)
+#define bfin_read_CAN_MB15_DATA1()     bfin_read16(CAN_MB15_DATA1)
+#define bfin_write_CAN_MB15_DATA1(val) bfin_write16(CAN_MB15_DATA1, val)
+#define bfin_read_CAN_MB15_DATA2()     bfin_read16(CAN_MB15_DATA2)
+#define bfin_write_CAN_MB15_DATA2(val) bfin_write16(CAN_MB15_DATA2, val)
+#define bfin_read_CAN_MB15_DATA3()     bfin_read16(CAN_MB15_DATA3)
+#define bfin_write_CAN_MB15_DATA3(val) bfin_write16(CAN_MB15_DATA3, val)
+#define bfin_read_CAN_MB15_LENGTH()    bfin_read16(CAN_MB15_LENGTH)
+#define bfin_write_CAN_MB15_LENGTH(val) bfin_write16(CAN_MB15_LENGTH, val)
+#define bfin_read_CAN_MB15_TIMESTAMP() bfin_read16(CAN_MB15_TIMESTAMP)
+#define bfin_write_CAN_MB15_TIMESTAMP(val) bfin_write16(CAN_MB15_TIMESTAMP, val)
+#define bfin_read_CAN_MB15_ID0()       bfin_read16(CAN_MB15_ID0)
+#define bfin_write_CAN_MB15_ID0(val)   bfin_write16(CAN_MB15_ID0, val)
+#define bfin_read_CAN_MB15_ID1()       bfin_read16(CAN_MB15_ID1)
+#define bfin_write_CAN_MB15_ID1(val)   bfin_write16(CAN_MB15_ID1, val)
+#define bfin_read_CAN_MB16_DATA0()     bfin_read16(CAN_MB16_DATA0)
+#define bfin_write_CAN_MB16_DATA0(val) bfin_write16(CAN_MB16_DATA0, val)
+#define bfin_read_CAN_MB16_DATA1()     bfin_read16(CAN_MB16_DATA1)
+#define bfin_write_CAN_MB16_DATA1(val) bfin_write16(CAN_MB16_DATA1, val)
+#define bfin_read_CAN_MB16_DATA2()     bfin_read16(CAN_MB16_DATA2)
+#define bfin_write_CAN_MB16_DATA2(val) bfin_write16(CAN_MB16_DATA2, val)
+#define bfin_read_CAN_MB16_DATA3()     bfin_read16(CAN_MB16_DATA3)
+#define bfin_write_CAN_MB16_DATA3(val) bfin_write16(CAN_MB16_DATA3, val)
+#define bfin_read_CAN_MB16_LENGTH()    bfin_read16(CAN_MB16_LENGTH)
+#define bfin_write_CAN_MB16_LENGTH(val) bfin_write16(CAN_MB16_LENGTH, val)
+#define bfin_read_CAN_MB16_TIMESTAMP() bfin_read16(CAN_MB16_TIMESTAMP)
+#define bfin_write_CAN_MB16_TIMESTAMP(val) bfin_write16(CAN_MB16_TIMESTAMP, val)
+#define bfin_read_CAN_MB16_ID0()       bfin_read16(CAN_MB16_ID0)
+#define bfin_write_CAN_MB16_ID0(val)   bfin_write16(CAN_MB16_ID0, val)
+#define bfin_read_CAN_MB16_ID1()       bfin_read16(CAN_MB16_ID1)
+#define bfin_write_CAN_MB16_ID1(val)   bfin_write16(CAN_MB16_ID1, val)
+#define bfin_read_CAN_MB17_DATA0()     bfin_read16(CAN_MB17_DATA0)
+#define bfin_write_CAN_MB17_DATA0(val) bfin_write16(CAN_MB17_DATA0, val)
+#define bfin_read_CAN_MB17_DATA1()     bfin_read16(CAN_MB17_DATA1)
+#define bfin_write_CAN_MB17_DATA1(val) bfin_write16(CAN_MB17_DATA1, val)
+#define bfin_read_CAN_MB17_DATA2()     bfin_read16(CAN_MB17_DATA2)
+#define bfin_write_CAN_MB17_DATA2(val) bfin_write16(CAN_MB17_DATA2, val)
+#define bfin_read_CAN_MB17_DATA3()     bfin_read16(CAN_MB17_DATA3)
+#define bfin_write_CAN_MB17_DATA3(val) bfin_write16(CAN_MB17_DATA3, val)
+#define bfin_read_CAN_MB17_LENGTH()    bfin_read16(CAN_MB17_LENGTH)
+#define bfin_write_CAN_MB17_LENGTH(val) bfin_write16(CAN_MB17_LENGTH, val)
+#define bfin_read_CAN_MB17_TIMESTAMP() bfin_read16(CAN_MB17_TIMESTAMP)
+#define bfin_write_CAN_MB17_TIMESTAMP(val) bfin_write16(CAN_MB17_TIMESTAMP, val)
+#define bfin_read_CAN_MB17_ID0()       bfin_read16(CAN_MB17_ID0)
+#define bfin_write_CAN_MB17_ID0(val)   bfin_write16(CAN_MB17_ID0, val)
+#define bfin_read_CAN_MB17_ID1()       bfin_read16(CAN_MB17_ID1)
+#define bfin_write_CAN_MB17_ID1(val)   bfin_write16(CAN_MB17_ID1, val)
+#define bfin_read_CAN_MB18_DATA0()     bfin_read16(CAN_MB18_DATA0)
+#define bfin_write_CAN_MB18_DATA0(val) bfin_write16(CAN_MB18_DATA0, val)
+#define bfin_read_CAN_MB18_DATA1()     bfin_read16(CAN_MB18_DATA1)
+#define bfin_write_CAN_MB18_DATA1(val) bfin_write16(CAN_MB18_DATA1, val)
+#define bfin_read_CAN_MB18_DATA2()     bfin_read16(CAN_MB18_DATA2)
+#define bfin_write_CAN_MB18_DATA2(val) bfin_write16(CAN_MB18_DATA2, val)
+#define bfin_read_CAN_MB18_DATA3()     bfin_read16(CAN_MB18_DATA3)
+#define bfin_write_CAN_MB18_DATA3(val) bfin_write16(CAN_MB18_DATA3, val)
+#define bfin_read_CAN_MB18_LENGTH()    bfin_read16(CAN_MB18_LENGTH)
+#define bfin_write_CAN_MB18_LENGTH(val) bfin_write16(CAN_MB18_LENGTH, val)
+#define bfin_read_CAN_MB18_TIMESTAMP() bfin_read16(CAN_MB18_TIMESTAMP)
+#define bfin_write_CAN_MB18_TIMESTAMP(val) bfin_write16(CAN_MB18_TIMESTAMP, val)
+#define bfin_read_CAN_MB18_ID0()       bfin_read16(CAN_MB18_ID0)
+#define bfin_write_CAN_MB18_ID0(val)   bfin_write16(CAN_MB18_ID0, val)
+#define bfin_read_CAN_MB18_ID1()       bfin_read16(CAN_MB18_ID1)
+#define bfin_write_CAN_MB18_ID1(val)   bfin_write16(CAN_MB18_ID1, val)
+#define bfin_read_CAN_MB19_DATA0()     bfin_read16(CAN_MB19_DATA0)
+#define bfin_write_CAN_MB19_DATA0(val) bfin_write16(CAN_MB19_DATA0, val)
+#define bfin_read_CAN_MB19_DATA1()     bfin_read16(CAN_MB19_DATA1)
+#define bfin_write_CAN_MB19_DATA1(val) bfin_write16(CAN_MB19_DATA1, val)
+#define bfin_read_CAN_MB19_DATA2()     bfin_read16(CAN_MB19_DATA2)
+#define bfin_write_CAN_MB19_DATA2(val) bfin_write16(CAN_MB19_DATA2, val)
+#define bfin_read_CAN_MB19_DATA3()     bfin_read16(CAN_MB19_DATA3)
+#define bfin_write_CAN_MB19_DATA3(val) bfin_write16(CAN_MB19_DATA3, val)
+#define bfin_read_CAN_MB19_LENGTH()    bfin_read16(CAN_MB19_LENGTH)
+#define bfin_write_CAN_MB19_LENGTH(val) bfin_write16(CAN_MB19_LENGTH, val)
+#define bfin_read_CAN_MB19_TIMESTAMP() bfin_read16(CAN_MB19_TIMESTAMP)
+#define bfin_write_CAN_MB19_TIMESTAMP(val) bfin_write16(CAN_MB19_TIMESTAMP, val)
+#define bfin_read_CAN_MB19_ID0()       bfin_read16(CAN_MB19_ID0)
+#define bfin_write_CAN_MB19_ID0(val)   bfin_write16(CAN_MB19_ID0, val)
+#define bfin_read_CAN_MB19_ID1()       bfin_read16(CAN_MB19_ID1)
+#define bfin_write_CAN_MB19_ID1(val)   bfin_write16(CAN_MB19_ID1, val)
+#define bfin_read_CAN_MB20_DATA0()     bfin_read16(CAN_MB20_DATA0)
+#define bfin_write_CAN_MB20_DATA0(val) bfin_write16(CAN_MB20_DATA0, val)
+#define bfin_read_CAN_MB20_DATA1()     bfin_read16(CAN_MB20_DATA1)
+#define bfin_write_CAN_MB20_DATA1(val) bfin_write16(CAN_MB20_DATA1, val)
+#define bfin_read_CAN_MB20_DATA2()     bfin_read16(CAN_MB20_DATA2)
+#define bfin_write_CAN_MB20_DATA2(val) bfin_write16(CAN_MB20_DATA2, val)
+#define bfin_read_CAN_MB20_DATA3()     bfin_read16(CAN_MB20_DATA3)
+#define bfin_write_CAN_MB20_DATA3(val) bfin_write16(CAN_MB20_DATA3, val)
+#define bfin_read_CAN_MB20_LENGTH()    bfin_read16(CAN_MB20_LENGTH)
+#define bfin_write_CAN_MB20_LENGTH(val) bfin_write16(CAN_MB20_LENGTH, val)
+#define bfin_read_CAN_MB20_TIMESTAMP() bfin_read16(CAN_MB20_TIMESTAMP)
+#define bfin_write_CAN_MB20_TIMESTAMP(val) bfin_write16(CAN_MB20_TIMESTAMP, val)
+#define bfin_read_CAN_MB20_ID0()       bfin_read16(CAN_MB20_ID0)
+#define bfin_write_CAN_MB20_ID0(val)   bfin_write16(CAN_MB20_ID0, val)
+#define bfin_read_CAN_MB20_ID1()       bfin_read16(CAN_MB20_ID1)
+#define bfin_write_CAN_MB20_ID1(val)   bfin_write16(CAN_MB20_ID1, val)
+#define bfin_read_CAN_MB21_DATA0()     bfin_read16(CAN_MB21_DATA0)
+#define bfin_write_CAN_MB21_DATA0(val) bfin_write16(CAN_MB21_DATA0, val)
+#define bfin_read_CAN_MB21_DATA1()     bfin_read16(CAN_MB21_DATA1)
+#define bfin_write_CAN_MB21_DATA1(val) bfin_write16(CAN_MB21_DATA1, val)
+#define bfin_read_CAN_MB21_DATA2()     bfin_read16(CAN_MB21_DATA2)
+#define bfin_write_CAN_MB21_DATA2(val) bfin_write16(CAN_MB21_DATA2, val)
+#define bfin_read_CAN_MB21_DATA3()     bfin_read16(CAN_MB21_DATA3)
+#define bfin_write_CAN_MB21_DATA3(val) bfin_write16(CAN_MB21_DATA3, val)
+#define bfin_read_CAN_MB21_LENGTH()    bfin_read16(CAN_MB21_LENGTH)
+#define bfin_write_CAN_MB21_LENGTH(val) bfin_write16(CAN_MB21_LENGTH, val)
+#define bfin_read_CAN_MB21_TIMESTAMP() bfin_read16(CAN_MB21_TIMESTAMP)
+#define bfin_write_CAN_MB21_TIMESTAMP(val) bfin_write16(CAN_MB21_TIMESTAMP, val)
+#define bfin_read_CAN_MB21_ID0()       bfin_read16(CAN_MB21_ID0)
+#define bfin_write_CAN_MB21_ID0(val)   bfin_write16(CAN_MB21_ID0, val)
+#define bfin_read_CAN_MB21_ID1()       bfin_read16(CAN_MB21_ID1)
+#define bfin_write_CAN_MB21_ID1(val)   bfin_write16(CAN_MB21_ID1, val)
+#define bfin_read_CAN_MB22_DATA0()     bfin_read16(CAN_MB22_DATA0)
+#define bfin_write_CAN_MB22_DATA0(val) bfin_write16(CAN_MB22_DATA0, val)
+#define bfin_read_CAN_MB22_DATA1()     bfin_read16(CAN_MB22_DATA1)
+#define bfin_write_CAN_MB22_DATA1(val) bfin_write16(CAN_MB22_DATA1, val)
+#define bfin_read_CAN_MB22_DATA2()     bfin_read16(CAN_MB22_DATA2)
+#define bfin_write_CAN_MB22_DATA2(val) bfin_write16(CAN_MB22_DATA2, val)
+#define bfin_read_CAN_MB22_DATA3()     bfin_read16(CAN_MB22_DATA3)
+#define bfin_write_CAN_MB22_DATA3(val) bfin_write16(CAN_MB22_DATA3, val)
+#define bfin_read_CAN_MB22_LENGTH()    bfin_read16(CAN_MB22_LENGTH)
+#define bfin_write_CAN_MB22_LENGTH(val) bfin_write16(CAN_MB22_LENGTH, val)
+#define bfin_read_CAN_MB22_TIMESTAMP() bfin_read16(CAN_MB22_TIMESTAMP)
+#define bfin_write_CAN_MB22_TIMESTAMP(val) bfin_write16(CAN_MB22_TIMESTAMP, val)
+#define bfin_read_CAN_MB22_ID0()       bfin_read16(CAN_MB22_ID0)
+#define bfin_write_CAN_MB22_ID0(val)   bfin_write16(CAN_MB22_ID0, val)
+#define bfin_read_CAN_MB22_ID1()       bfin_read16(CAN_MB22_ID1)
+#define bfin_write_CAN_MB22_ID1(val)   bfin_write16(CAN_MB22_ID1, val)
+#define bfin_read_CAN_MB23_DATA0()     bfin_read16(CAN_MB23_DATA0)
+#define bfin_write_CAN_MB23_DATA0(val) bfin_write16(CAN_MB23_DATA0, val)
+#define bfin_read_CAN_MB23_DATA1()     bfin_read16(CAN_MB23_DATA1)
+#define bfin_write_CAN_MB23_DATA1(val) bfin_write16(CAN_MB23_DATA1, val)
+#define bfin_read_CAN_MB23_DATA2()     bfin_read16(CAN_MB23_DATA2)
+#define bfin_write_CAN_MB23_DATA2(val) bfin_write16(CAN_MB23_DATA2, val)
+#define bfin_read_CAN_MB23_DATA3()     bfin_read16(CAN_MB23_DATA3)
+#define bfin_write_CAN_MB23_DATA3(val) bfin_write16(CAN_MB23_DATA3, val)
+#define bfin_read_CAN_MB23_LENGTH()    bfin_read16(CAN_MB23_LENGTH)
+#define bfin_write_CAN_MB23_LENGTH(val) bfin_write16(CAN_MB23_LENGTH, val)
+#define bfin_read_CAN_MB23_TIMESTAMP() bfin_read16(CAN_MB23_TIMESTAMP)
+#define bfin_write_CAN_MB23_TIMESTAMP(val) bfin_write16(CAN_MB23_TIMESTAMP, val)
+#define bfin_read_CAN_MB23_ID0()       bfin_read16(CAN_MB23_ID0)
+#define bfin_write_CAN_MB23_ID0(val)   bfin_write16(CAN_MB23_ID0, val)
+#define bfin_read_CAN_MB23_ID1()       bfin_read16(CAN_MB23_ID1)
+#define bfin_write_CAN_MB23_ID1(val)   bfin_write16(CAN_MB23_ID1, val)
+#define bfin_read_CAN_MB24_DATA0()     bfin_read16(CAN_MB24_DATA0)
+#define bfin_write_CAN_MB24_DATA0(val) bfin_write16(CAN_MB24_DATA0, val)
+#define bfin_read_CAN_MB24_DATA1()     bfin_read16(CAN_MB24_DATA1)
+#define bfin_write_CAN_MB24_DATA1(val) bfin_write16(CAN_MB24_DATA1, val)
+#define bfin_read_CAN_MB24_DATA2()     bfin_read16(CAN_MB24_DATA2)
+#define bfin_write_CAN_MB24_DATA2(val) bfin_write16(CAN_MB24_DATA2, val)
+#define bfin_read_CAN_MB24_DATA3()     bfin_read16(CAN_MB24_DATA3)
+#define bfin_write_CAN_MB24_DATA3(val) bfin_write16(CAN_MB24_DATA3, val)
+#define bfin_read_CAN_MB24_LENGTH()    bfin_read16(CAN_MB24_LENGTH)
+#define bfin_write_CAN_MB24_LENGTH(val) bfin_write16(CAN_MB24_LENGTH, val)
+#define bfin_read_CAN_MB24_TIMESTAMP() bfin_read16(CAN_MB24_TIMESTAMP)
+#define bfin_write_CAN_MB24_TIMESTAMP(val) bfin_write16(CAN_MB24_TIMESTAMP, val)
+#define bfin_read_CAN_MB24_ID0()       bfin_read16(CAN_MB24_ID0)
+#define bfin_write_CAN_MB24_ID0(val)   bfin_write16(CAN_MB24_ID0, val)
+#define bfin_read_CAN_MB24_ID1()       bfin_read16(CAN_MB24_ID1)
+#define bfin_write_CAN_MB24_ID1(val)   bfin_write16(CAN_MB24_ID1, val)
+#define bfin_read_CAN_MB25_DATA0()     bfin_read16(CAN_MB25_DATA0)
+#define bfin_write_CAN_MB25_DATA0(val) bfin_write16(CAN_MB25_DATA0, val)
+#define bfin_read_CAN_MB25_DATA1()     bfin_read16(CAN_MB25_DATA1)
+#define bfin_write_CAN_MB25_DATA1(val) bfin_write16(CAN_MB25_DATA1, val)
+#define bfin_read_CAN_MB25_DATA2()     bfin_read16(CAN_MB25_DATA2)
+#define bfin_write_CAN_MB25_DATA2(val) bfin_write16(CAN_MB25_DATA2, val)
+#define bfin_read_CAN_MB25_DATA3()     bfin_read16(CAN_MB25_DATA3)
+#define bfin_write_CAN_MB25_DATA3(val) bfin_write16(CAN_MB25_DATA3, val)
+#define bfin_read_CAN_MB25_LENGTH()    bfin_read16(CAN_MB25_LENGTH)
+#define bfin_write_CAN_MB25_LENGTH(val) bfin_write16(CAN_MB25_LENGTH, val)
+#define bfin_read_CAN_MB25_TIMESTAMP() bfin_read16(CAN_MB25_TIMESTAMP)
+#define bfin_write_CAN_MB25_TIMESTAMP(val) bfin_write16(CAN_MB25_TIMESTAMP, val)
+#define bfin_read_CAN_MB25_ID0()       bfin_read16(CAN_MB25_ID0)
+#define bfin_write_CAN_MB25_ID0(val)   bfin_write16(CAN_MB25_ID0, val)
+#define bfin_read_CAN_MB25_ID1()       bfin_read16(CAN_MB25_ID1)
+#define bfin_write_CAN_MB25_ID1(val)   bfin_write16(CAN_MB25_ID1, val)
+#define bfin_read_CAN_MB26_DATA0()     bfin_read16(CAN_MB26_DATA0)
+#define bfin_write_CAN_MB26_DATA0(val) bfin_write16(CAN_MB26_DATA0, val)
+#define bfin_read_CAN_MB26_DATA1()     bfin_read16(CAN_MB26_DATA1)
+#define bfin_write_CAN_MB26_DATA1(val) bfin_write16(CAN_MB26_DATA1, val)
+#define bfin_read_CAN_MB26_DATA2()     bfin_read16(CAN_MB26_DATA2)
+#define bfin_write_CAN_MB26_DATA2(val) bfin_write16(CAN_MB26_DATA2, val)
+#define bfin_read_CAN_MB26_DATA3()     bfin_read16(CAN_MB26_DATA3)
+#define bfin_write_CAN_MB26_DATA3(val) bfin_write16(CAN_MB26_DATA3, val)
+#define bfin_read_CAN_MB26_LENGTH()    bfin_read16(CAN_MB26_LENGTH)
+#define bfin_write_CAN_MB26_LENGTH(val) bfin_write16(CAN_MB26_LENGTH, val)
+#define bfin_read_CAN_MB26_TIMESTAMP() bfin_read16(CAN_MB26_TIMESTAMP)
+#define bfin_write_CAN_MB26_TIMESTAMP(val) bfin_write16(CAN_MB26_TIMESTAMP, val)
+#define bfin_read_CAN_MB26_ID0()       bfin_read16(CAN_MB26_ID0)
+#define bfin_write_CAN_MB26_ID0(val)   bfin_write16(CAN_MB26_ID0, val)
+#define bfin_read_CAN_MB26_ID1()       bfin_read16(CAN_MB26_ID1)
+#define bfin_write_CAN_MB26_ID1(val)   bfin_write16(CAN_MB26_ID1, val)
+#define bfin_read_CAN_MB27_DATA0()     bfin_read16(CAN_MB27_DATA0)
+#define bfin_write_CAN_MB27_DATA0(val) bfin_write16(CAN_MB27_DATA0, val)
+#define bfin_read_CAN_MB27_DATA1()     bfin_read16(CAN_MB27_DATA1)
+#define bfin_write_CAN_MB27_DATA1(val) bfin_write16(CAN_MB27_DATA1, val)
+#define bfin_read_CAN_MB27_DATA2()     bfin_read16(CAN_MB27_DATA2)
+#define bfin_write_CAN_MB27_DATA2(val) bfin_write16(CAN_MB27_DATA2, val)
+#define bfin_read_CAN_MB27_DATA3()     bfin_read16(CAN_MB27_DATA3)
+#define bfin_write_CAN_MB27_DATA3(val) bfin_write16(CAN_MB27_DATA3, val)
+#define bfin_read_CAN_MB27_LENGTH()    bfin_read16(CAN_MB27_LENGTH)
+#define bfin_write_CAN_MB27_LENGTH(val) bfin_write16(CAN_MB27_LENGTH, val)
+#define bfin_read_CAN_MB27_TIMESTAMP() bfin_read16(CAN_MB27_TIMESTAMP)
+#define bfin_write_CAN_MB27_TIMESTAMP(val) bfin_write16(CAN_MB27_TIMESTAMP, val)
+#define bfin_read_CAN_MB27_ID0()       bfin_read16(CAN_MB27_ID0)
+#define bfin_write_CAN_MB27_ID0(val)   bfin_write16(CAN_MB27_ID0, val)
+#define bfin_read_CAN_MB27_ID1()       bfin_read16(CAN_MB27_ID1)
+#define bfin_write_CAN_MB27_ID1(val)   bfin_write16(CAN_MB27_ID1, val)
+#define bfin_read_CAN_MB28_DATA0()     bfin_read16(CAN_MB28_DATA0)
+#define bfin_write_CAN_MB28_DATA0(val) bfin_write16(CAN_MB28_DATA0, val)
+#define bfin_read_CAN_MB28_DATA1()     bfin_read16(CAN_MB28_DATA1)
+#define bfin_write_CAN_MB28_DATA1(val) bfin_write16(CAN_MB28_DATA1, val)
+#define bfin_read_CAN_MB28_DATA2()     bfin_read16(CAN_MB28_DATA2)
+#define bfin_write_CAN_MB28_DATA2(val) bfin_write16(CAN_MB28_DATA2, val)
+#define bfin_read_CAN_MB28_DATA3()     bfin_read16(CAN_MB28_DATA3)
+#define bfin_write_CAN_MB28_DATA3(val) bfin_write16(CAN_MB28_DATA3, val)
+#define bfin_read_CAN_MB28_LENGTH()    bfin_read16(CAN_MB28_LENGTH)
+#define bfin_write_CAN_MB28_LENGTH(val) bfin_write16(CAN_MB28_LENGTH, val)
+#define bfin_read_CAN_MB28_TIMESTAMP() bfin_read16(CAN_MB28_TIMESTAMP)
+#define bfin_write_CAN_MB28_TIMESTAMP(val) bfin_write16(CAN_MB28_TIMESTAMP, val)
+#define bfin_read_CAN_MB28_ID0()       bfin_read16(CAN_MB28_ID0)
+#define bfin_write_CAN_MB28_ID0(val)   bfin_write16(CAN_MB28_ID0, val)
+#define bfin_read_CAN_MB28_ID1()       bfin_read16(CAN_MB28_ID1)
+#define bfin_write_CAN_MB28_ID1(val)   bfin_write16(CAN_MB28_ID1, val)
+#define bfin_read_CAN_MB29_DATA0()     bfin_read16(CAN_MB29_DATA0)
+#define bfin_write_CAN_MB29_DATA0(val) bfin_write16(CAN_MB29_DATA0, val)
+#define bfin_read_CAN_MB29_DATA1()     bfin_read16(CAN_MB29_DATA1)
+#define bfin_write_CAN_MB29_DATA1(val) bfin_write16(CAN_MB29_DATA1, val)
+#define bfin_read_CAN_MB29_DATA2()     bfin_read16(CAN_MB29_DATA2)
+#define bfin_write_CAN_MB29_DATA2(val) bfin_write16(CAN_MB29_DATA2, val)
+#define bfin_read_CAN_MB29_DATA3()     bfin_read16(CAN_MB29_DATA3)
+#define bfin_write_CAN_MB29_DATA3(val) bfin_write16(CAN_MB29_DATA3, val)
+#define bfin_read_CAN_MB29_LENGTH()    bfin_read16(CAN_MB29_LENGTH)
+#define bfin_write_CAN_MB29_LENGTH(val) bfin_write16(CAN_MB29_LENGTH, val)
+#define bfin_read_CAN_MB29_TIMESTAMP() bfin_read16(CAN_MB29_TIMESTAMP)
+#define bfin_write_CAN_MB29_TIMESTAMP(val) bfin_write16(CAN_MB29_TIMESTAMP, val)
+#define bfin_read_CAN_MB29_ID0()       bfin_read16(CAN_MB29_ID0)
+#define bfin_write_CAN_MB29_ID0(val)   bfin_write16(CAN_MB29_ID0, val)
+#define bfin_read_CAN_MB29_ID1()       bfin_read16(CAN_MB29_ID1)
+#define bfin_write_CAN_MB29_ID1(val)   bfin_write16(CAN_MB29_ID1, val)
+#define bfin_read_CAN_MB30_DATA0()     bfin_read16(CAN_MB30_DATA0)
+#define bfin_write_CAN_MB30_DATA0(val) bfin_write16(CAN_MB30_DATA0, val)
+#define bfin_read_CAN_MB30_DATA1()     bfin_read16(CAN_MB30_DATA1)
+#define bfin_write_CAN_MB30_DATA1(val) bfin_write16(CAN_MB30_DATA1, val)
+#define bfin_read_CAN_MB30_DATA2()     bfin_read16(CAN_MB30_DATA2)
+#define bfin_write_CAN_MB30_DATA2(val) bfin_write16(CAN_MB30_DATA2, val)
+#define bfin_read_CAN_MB30_DATA3()     bfin_read16(CAN_MB30_DATA3)
+#define bfin_write_CAN_MB30_DATA3(val) bfin_write16(CAN_MB30_DATA3, val)
+#define bfin_read_CAN_MB30_LENGTH()    bfin_read16(CAN_MB30_LENGTH)
+#define bfin_write_CAN_MB30_LENGTH(val) bfin_write16(CAN_MB30_LENGTH, val)
+#define bfin_read_CAN_MB30_TIMESTAMP() bfin_read16(CAN_MB30_TIMESTAMP)
+#define bfin_write_CAN_MB30_TIMESTAMP(val) bfin_write16(CAN_MB30_TIMESTAMP, val)
+#define bfin_read_CAN_MB30_ID0()       bfin_read16(CAN_MB30_ID0)
+#define bfin_write_CAN_MB30_ID0(val)   bfin_write16(CAN_MB30_ID0, val)
+#define bfin_read_CAN_MB30_ID1()       bfin_read16(CAN_MB30_ID1)
+#define bfin_write_CAN_MB30_ID1(val)   bfin_write16(CAN_MB30_ID1, val)
+#define bfin_read_CAN_MB31_DATA0()     bfin_read16(CAN_MB31_DATA0)
+#define bfin_write_CAN_MB31_DATA0(val) bfin_write16(CAN_MB31_DATA0, val)
+#define bfin_read_CAN_MB31_DATA1()     bfin_read16(CAN_MB31_DATA1)
+#define bfin_write_CAN_MB31_DATA1(val) bfin_write16(CAN_MB31_DATA1, val)
+#define bfin_read_CAN_MB31_DATA2()     bfin_read16(CAN_MB31_DATA2)
+#define bfin_write_CAN_MB31_DATA2(val) bfin_write16(CAN_MB31_DATA2, val)
+#define bfin_read_CAN_MB31_DATA3()     bfin_read16(CAN_MB31_DATA3)
+#define bfin_write_CAN_MB31_DATA3(val) bfin_write16(CAN_MB31_DATA3, val)
+#define bfin_read_CAN_MB31_LENGTH()    bfin_read16(CAN_MB31_LENGTH)
+#define bfin_write_CAN_MB31_LENGTH(val) bfin_write16(CAN_MB31_LENGTH, val)
+#define bfin_read_CAN_MB31_TIMESTAMP() bfin_read16(CAN_MB31_TIMESTAMP)
+#define bfin_write_CAN_MB31_TIMESTAMP(val) bfin_write16(CAN_MB31_TIMESTAMP, val)
+#define bfin_read_CAN_MB31_ID0()       bfin_read16(CAN_MB31_ID0)
+#define bfin_write_CAN_MB31_ID0(val)   bfin_write16(CAN_MB31_ID0, val)
+#define bfin_read_CAN_MB31_ID1()       bfin_read16(CAN_MB31_ID1)
+#define bfin_write_CAN_MB31_ID1(val)   bfin_write16(CAN_MB31_ID1, val)
+
+/* These need to be last due to the cdef/linux inter-dependencies */
+#include <asm/irq.h>
+
+/* Writing to PLL_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_PLL_CTL(unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1;
+
+	if (val == bfin_read_PLL_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr0 = bfin_read32(SIC_IWR0);
+	iwr1 = bfin_read32(SIC_IWR1);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
+	bfin_write32(SIC_IWR1, 0);
+
+	bfin_write16(PLL_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR0, iwr0);
+	bfin_write32(SIC_IWR1, iwr1);
+	local_irq_restore_hw(flags);
+}
+
+/* Writing to VR_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_VR_CTL(unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1;
+
+	if (val == bfin_read_VR_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr0 = bfin_read32(SIC_IWR0);
+	iwr1 = bfin_read32(SIC_IWR1);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
+	bfin_write32(SIC_IWR1, 0);
+
+	bfin_write16(VR_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR0, iwr0);
+	bfin_write32(SIC_IWR1, iwr1);
+	local_irq_restore_hw(flags);
+}
+
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/cdefBF539.h b/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
new file mode 100644
index 0000000..198c4bb
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
@@ -0,0 +1,240 @@
+/* DO NOT EDIT THIS FILE
+ * Automatically generated by generate-cdef-headers.xsl
+ * DO NOT EDIT THIS FILE
+ */
+
+#ifndef _CDEF_BF539_H
+#define _CDEF_BF539_H
+
+/* Include MMRs Common to BF538 								*/
+#include "cdefBF538.h"
+
+
+#define bfin_read_MXVR_CONFIG()        bfin_read16(MXVR_CONFIG)
+#define bfin_write_MXVR_CONFIG(val)    bfin_write16(MXVR_CONFIG, val)
+#define bfin_read_MXVR_PLL_CTL_0()     bfin_read32(MXVR_PLL_CTL_0)
+#define bfin_write_MXVR_PLL_CTL_0(val) bfin_write32(MXVR_PLL_CTL_0, val)
+#define bfin_read_MXVR_STATE_0()       bfin_read32(MXVR_STATE_0)
+#define bfin_write_MXVR_STATE_0(val)   bfin_write32(MXVR_STATE_0, val)
+#define bfin_read_MXVR_STATE_1()       bfin_read32(MXVR_STATE_1)
+#define bfin_write_MXVR_STATE_1(val)   bfin_write32(MXVR_STATE_1, val)
+#define bfin_read_MXVR_INT_STAT_0()    bfin_read32(MXVR_INT_STAT_0)
+#define bfin_write_MXVR_INT_STAT_0(val) bfin_write32(MXVR_INT_STAT_0, val)
+#define bfin_read_MXVR_INT_STAT_1()    bfin_read32(MXVR_INT_STAT_1)
+#define bfin_write_MXVR_INT_STAT_1(val) bfin_write32(MXVR_INT_STAT_1, val)
+#define bfin_read_MXVR_INT_EN_0()      bfin_read32(MXVR_INT_EN_0)
+#define bfin_write_MXVR_INT_EN_0(val)  bfin_write32(MXVR_INT_EN_0, val)
+#define bfin_read_MXVR_INT_EN_1()      bfin_read32(MXVR_INT_EN_1)
+#define bfin_write_MXVR_INT_EN_1(val)  bfin_write32(MXVR_INT_EN_1, val)
+#define bfin_read_MXVR_POSITION()      bfin_read16(MXVR_POSITION)
+#define bfin_write_MXVR_POSITION(val)  bfin_write16(MXVR_POSITION, val)
+#define bfin_read_MXVR_MAX_POSITION()  bfin_read16(MXVR_MAX_POSITION)
+#define bfin_write_MXVR_MAX_POSITION(val) bfin_write16(MXVR_MAX_POSITION, val)
+#define bfin_read_MXVR_DELAY()         bfin_read16(MXVR_DELAY)
+#define bfin_write_MXVR_DELAY(val)     bfin_write16(MXVR_DELAY, val)
+#define bfin_read_MXVR_MAX_DELAY()     bfin_read16(MXVR_MAX_DELAY)
+#define bfin_write_MXVR_MAX_DELAY(val) bfin_write16(MXVR_MAX_DELAY, val)
+#define bfin_read_MXVR_LADDR()         bfin_read32(MXVR_LADDR)
+#define bfin_write_MXVR_LADDR(val)     bfin_write32(MXVR_LADDR, val)
+#define bfin_read_MXVR_GADDR()         bfin_read16(MXVR_GADDR)
+#define bfin_write_MXVR_GADDR(val)     bfin_write16(MXVR_GADDR, val)
+#define bfin_read_MXVR_AADDR()         bfin_read32(MXVR_AADDR)
+#define bfin_write_MXVR_AADDR(val)     bfin_write32(MXVR_AADDR, val)
+#define bfin_read_MXVR_ALLOC_0()       bfin_read32(MXVR_ALLOC_0)
+#define bfin_write_MXVR_ALLOC_0(val)   bfin_write32(MXVR_ALLOC_0, val)
+#define bfin_read_MXVR_ALLOC_1()       bfin_read32(MXVR_ALLOC_1)
+#define bfin_write_MXVR_ALLOC_1(val)   bfin_write32(MXVR_ALLOC_1, val)
+#define bfin_read_MXVR_ALLOC_2()       bfin_read32(MXVR_ALLOC_2)
+#define bfin_write_MXVR_ALLOC_2(val)   bfin_write32(MXVR_ALLOC_2, val)
+#define bfin_read_MXVR_ALLOC_3()       bfin_read32(MXVR_ALLOC_3)
+#define bfin_write_MXVR_ALLOC_3(val)   bfin_write32(MXVR_ALLOC_3, val)
+#define bfin_read_MXVR_ALLOC_4()       bfin_read32(MXVR_ALLOC_4)
+#define bfin_write_MXVR_ALLOC_4(val)   bfin_write32(MXVR_ALLOC_4, val)
+#define bfin_read_MXVR_ALLOC_5()       bfin_read32(MXVR_ALLOC_5)
+#define bfin_write_MXVR_ALLOC_5(val)   bfin_write32(MXVR_ALLOC_5, val)
+#define bfin_read_MXVR_ALLOC_6()       bfin_read32(MXVR_ALLOC_6)
+#define bfin_write_MXVR_ALLOC_6(val)   bfin_write32(MXVR_ALLOC_6, val)
+#define bfin_read_MXVR_ALLOC_7()       bfin_read32(MXVR_ALLOC_7)
+#define bfin_write_MXVR_ALLOC_7(val)   bfin_write32(MXVR_ALLOC_7, val)
+#define bfin_read_MXVR_ALLOC_8()       bfin_read32(MXVR_ALLOC_8)
+#define bfin_write_MXVR_ALLOC_8(val)   bfin_write32(MXVR_ALLOC_8, val)
+#define bfin_read_MXVR_ALLOC_9()       bfin_read32(MXVR_ALLOC_9)
+#define bfin_write_MXVR_ALLOC_9(val)   bfin_write32(MXVR_ALLOC_9, val)
+#define bfin_read_MXVR_ALLOC_10()      bfin_read32(MXVR_ALLOC_10)
+#define bfin_write_MXVR_ALLOC_10(val)  bfin_write32(MXVR_ALLOC_10, val)
+#define bfin_read_MXVR_ALLOC_11()      bfin_read32(MXVR_ALLOC_11)
+#define bfin_write_MXVR_ALLOC_11(val)  bfin_write32(MXVR_ALLOC_11, val)
+#define bfin_read_MXVR_ALLOC_12()      bfin_read32(MXVR_ALLOC_12)
+#define bfin_write_MXVR_ALLOC_12(val)  bfin_write32(MXVR_ALLOC_12, val)
+#define bfin_read_MXVR_ALLOC_13()      bfin_read32(MXVR_ALLOC_13)
+#define bfin_write_MXVR_ALLOC_13(val)  bfin_write32(MXVR_ALLOC_13, val)
+#define bfin_read_MXVR_ALLOC_14()      bfin_read32(MXVR_ALLOC_14)
+#define bfin_write_MXVR_ALLOC_14(val)  bfin_write32(MXVR_ALLOC_14, val)
+#define bfin_read_MXVR_SYNC_LCHAN_0()  bfin_read32(MXVR_SYNC_LCHAN_0)
+#define bfin_write_MXVR_SYNC_LCHAN_0(val) bfin_write32(MXVR_SYNC_LCHAN_0, val)
+#define bfin_read_MXVR_SYNC_LCHAN_1()  bfin_read32(MXVR_SYNC_LCHAN_1)
+#define bfin_write_MXVR_SYNC_LCHAN_1(val) bfin_write32(MXVR_SYNC_LCHAN_1, val)
+#define bfin_read_MXVR_SYNC_LCHAN_2()  bfin_read32(MXVR_SYNC_LCHAN_2)
+#define bfin_write_MXVR_SYNC_LCHAN_2(val) bfin_write32(MXVR_SYNC_LCHAN_2, val)
+#define bfin_read_MXVR_SYNC_LCHAN_3()  bfin_read32(MXVR_SYNC_LCHAN_3)
+#define bfin_write_MXVR_SYNC_LCHAN_3(val) bfin_write32(MXVR_SYNC_LCHAN_3, val)
+#define bfin_read_MXVR_SYNC_LCHAN_4()  bfin_read32(MXVR_SYNC_LCHAN_4)
+#define bfin_write_MXVR_SYNC_LCHAN_4(val) bfin_write32(MXVR_SYNC_LCHAN_4, val)
+#define bfin_read_MXVR_SYNC_LCHAN_5()  bfin_read32(MXVR_SYNC_LCHAN_5)
+#define bfin_write_MXVR_SYNC_LCHAN_5(val) bfin_write32(MXVR_SYNC_LCHAN_5, val)
+#define bfin_read_MXVR_SYNC_LCHAN_6()  bfin_read32(MXVR_SYNC_LCHAN_6)
+#define bfin_write_MXVR_SYNC_LCHAN_6(val) bfin_write32(MXVR_SYNC_LCHAN_6, val)
+#define bfin_read_MXVR_SYNC_LCHAN_7()  bfin_read32(MXVR_SYNC_LCHAN_7)
+#define bfin_write_MXVR_SYNC_LCHAN_7(val) bfin_write32(MXVR_SYNC_LCHAN_7, val)
+#define bfin_read_MXVR_DMA0_CONFIG()   bfin_read32(MXVR_DMA0_CONFIG)
+#define bfin_write_MXVR_DMA0_CONFIG(val) bfin_write32(MXVR_DMA0_CONFIG, val)
+#define bfin_read_MXVR_DMA0_START_ADDR() bfin_readPTR(MXVR_DMA0_START_ADDR)
+#define bfin_write_MXVR_DMA0_START_ADDR(val) bfin_writePTR(MXVR_DMA0_START_ADDR, val)
+#define bfin_read_MXVR_DMA0_COUNT()    bfin_read16(MXVR_DMA0_COUNT)
+#define bfin_write_MXVR_DMA0_COUNT(val) bfin_write16(MXVR_DMA0_COUNT, val)
+#define bfin_read_MXVR_DMA0_CURR_ADDR() bfin_readPTR(MXVR_DMA0_CURR_ADDR)
+#define bfin_write_MXVR_DMA0_CURR_ADDR(val) bfin_writePTR(MXVR_DMA0_CURR_ADDR, val)
+#define bfin_read_MXVR_DMA0_CURR_COUNT() bfin_read16(MXVR_DMA0_CURR_COUNT)
+#define bfin_write_MXVR_DMA0_CURR_COUNT(val) bfin_write16(MXVR_DMA0_CURR_COUNT, val)
+#define bfin_read_MXVR_DMA1_CONFIG()   bfin_read32(MXVR_DMA1_CONFIG)
+#define bfin_write_MXVR_DMA1_CONFIG(val) bfin_write32(MXVR_DMA1_CONFIG, val)
+#define bfin_read_MXVR_DMA1_START_ADDR() bfin_readPTR(MXVR_DMA1_START_ADDR)
+#define bfin_write_MXVR_DMA1_START_ADDR(val) bfin_writePTR(MXVR_DMA1_START_ADDR, val)
+#define bfin_read_MXVR_DMA1_COUNT()    bfin_read16(MXVR_DMA1_COUNT)
+#define bfin_write_MXVR_DMA1_COUNT(val) bfin_write16(MXVR_DMA1_COUNT, val)
+#define bfin_read_MXVR_DMA1_CURR_ADDR() bfin_readPTR(MXVR_DMA1_CURR_ADDR)
+#define bfin_write_MXVR_DMA1_CURR_ADDR(val) bfin_writePTR(MXVR_DMA1_CURR_ADDR, val)
+#define bfin_read_MXVR_DMA1_CURR_COUNT() bfin_read16(MXVR_DMA1_CURR_COUNT)
+#define bfin_write_MXVR_DMA1_CURR_COUNT(val) bfin_write16(MXVR_DMA1_CURR_COUNT, val)
+#define bfin_read_MXVR_DMA2_CONFIG()   bfin_read32(MXVR_DMA2_CONFIG)
+#define bfin_write_MXVR_DMA2_CONFIG(val) bfin_write32(MXVR_DMA2_CONFIG, val)
+#define bfin_read_MXVR_DMA2_START_ADDR() bfin_readPTR(MXVR_DMA2_START_ADDR)
+#define bfin_write_MXVR_DMA2_START_ADDR(val) bfin_writePTR(MXVR_DMA2_START_ADDR, val)
+#define bfin_read_MXVR_DMA2_COUNT()    bfin_read16(MXVR_DMA2_COUNT)
+#define bfin_write_MXVR_DMA2_COUNT(val) bfin_write16(MXVR_DMA2_COUNT, val)
+#define bfin_read_MXVR_DMA2_CURR_ADDR() bfin_readPTR(MXVR_DMA2_CURR_ADDR)
+#define bfin_write_MXVR_DMA2_CURR_ADDR(val) bfin_writePTR(MXVR_DMA2_CURR_ADDR, val)
+#define bfin_read_MXVR_DMA2_CURR_COUNT() bfin_read16(MXVR_DMA2_CURR_COUNT)
+#define bfin_write_MXVR_DMA2_CURR_COUNT(val) bfin_write16(MXVR_DMA2_CURR_COUNT, val)
+#define bfin_read_MXVR_DMA3_CONFIG()   bfin_read32(MXVR_DMA3_CONFIG)
+#define bfin_write_MXVR_DMA3_CONFIG(val) bfin_write32(MXVR_DMA3_CONFIG, val)
+#define bfin_read_MXVR_DMA3_START_ADDR() bfin_readPTR(MXVR_DMA3_START_ADDR)
+#define bfin_write_MXVR_DMA3_START_ADDR(val) bfin_writePTR(MXVR_DMA3_START_ADDR, val)
+#define bfin_read_MXVR_DMA3_COUNT()    bfin_read16(MXVR_DMA3_COUNT)
+#define bfin_write_MXVR_DMA3_COUNT(val) bfin_write16(MXVR_DMA3_COUNT, val)
+#define bfin_read_MXVR_DMA3_CURR_ADDR() bfin_readPTR(MXVR_DMA3_CURR_ADDR)
+#define bfin_write_MXVR_DMA3_CURR_ADDR(val) bfin_writePTR(MXVR_DMA3_CURR_ADDR, val)
+#define bfin_read_MXVR_DMA3_CURR_COUNT() bfin_read16(MXVR_DMA3_CURR_COUNT)
+#define bfin_write_MXVR_DMA3_CURR_COUNT(val) bfin_write16(MXVR_DMA3_CURR_COUNT, val)
+#define bfin_read_MXVR_DMA4_CONFIG()   bfin_read32(MXVR_DMA4_CONFIG)
+#define bfin_write_MXVR_DMA4_CONFIG(val) bfin_write32(MXVR_DMA4_CONFIG, val)
+#define bfin_read_MXVR_DMA4_START_ADDR() bfin_readPTR(MXVR_DMA4_START_ADDR)
+#define bfin_write_MXVR_DMA4_START_ADDR(val) bfin_writePTR(MXVR_DMA4_START_ADDR, val)
+#define bfin_read_MXVR_DMA4_COUNT()    bfin_read16(MXVR_DMA4_COUNT)
+#define bfin_write_MXVR_DMA4_COUNT(val) bfin_write16(MXVR_DMA4_COUNT, val)
+#define bfin_read_MXVR_DMA4_CURR_ADDR() bfin_readPTR(MXVR_DMA4_CURR_ADDR)
+#define bfin_write_MXVR_DMA4_CURR_ADDR(val) bfin_writePTR(MXVR_DMA4_CURR_ADDR, val)
+#define bfin_read_MXVR_DMA4_CURR_COUNT() bfin_read16(MXVR_DMA4_CURR_COUNT)
+#define bfin_write_MXVR_DMA4_CURR_COUNT(val) bfin_write16(MXVR_DMA4_CURR_COUNT, val)
+#define bfin_read_MXVR_DMA5_CONFIG()   bfin_read32(MXVR_DMA5_CONFIG)
+#define bfin_write_MXVR_DMA5_CONFIG(val) bfin_write32(MXVR_DMA5_CONFIG, val)
+#define bfin_read_MXVR_DMA5_START_ADDR() bfin_readPTR(MXVR_DMA5_START_ADDR)
+#define bfin_write_MXVR_DMA5_START_ADDR(val) bfin_writePTR(MXVR_DMA5_START_ADDR, val)
+#define bfin_read_MXVR_DMA5_COUNT()    bfin_read16(MXVR_DMA5_COUNT)
+#define bfin_write_MXVR_DMA5_COUNT(val) bfin_write16(MXVR_DMA5_COUNT, val)
+#define bfin_read_MXVR_DMA5_CURR_ADDR() bfin_readPTR(MXVR_DMA5_CURR_ADDR)
+#define bfin_write_MXVR_DMA5_CURR_ADDR(val) bfin_writePTR(MXVR_DMA5_CURR_ADDR, val)
+#define bfin_read_MXVR_DMA5_CURR_COUNT() bfin_read16(MXVR_DMA5_CURR_COUNT)
+#define bfin_write_MXVR_DMA5_CURR_COUNT(val) bfin_write16(MXVR_DMA5_CURR_COUNT, val)
+#define bfin_read_MXVR_DMA6_CONFIG()   bfin_read32(MXVR_DMA6_CONFIG)
+#define bfin_write_MXVR_DMA6_CONFIG(val) bfin_write32(MXVR_DMA6_CONFIG, val)
+#define bfin_read_MXVR_DMA6_START_ADDR() bfin_readPTR(MXVR_DMA6_START_ADDR)
+#define bfin_write_MXVR_DMA6_START_ADDR(val) bfin_writePTR(MXVR_DMA6_START_ADDR, val)
+#define bfin_read_MXVR_DMA6_COUNT()    bfin_read16(MXVR_DMA6_COUNT)
+#define bfin_write_MXVR_DMA6_COUNT(val) bfin_write16(MXVR_DMA6_COUNT, val)
+#define bfin_read_MXVR_DMA6_CURR_ADDR() bfin_readPTR(MXVR_DMA6_CURR_ADDR)
+#define bfin_write_MXVR_DMA6_CURR_ADDR(val) bfin_writePTR(MXVR_DMA6_CURR_ADDR, val)
+#define bfin_read_MXVR_DMA6_CURR_COUNT() bfin_read16(MXVR_DMA6_CURR_COUNT)
+#define bfin_write_MXVR_DMA6_CURR_COUNT(val) bfin_write16(MXVR_DMA6_CURR_COUNT, val)
+#define bfin_read_MXVR_DMA7_CONFIG()   bfin_read32(MXVR_DMA7_CONFIG)
+#define bfin_write_MXVR_DMA7_CONFIG(val) bfin_write32(MXVR_DMA7_CONFIG, val)
+#define bfin_read_MXVR_DMA7_START_ADDR() bfin_readPTR(MXVR_DMA7_START_ADDR)
+#define bfin_write_MXVR_DMA7_START_ADDR(val) bfin_writePTR(MXVR_DMA7_START_ADDR, val)
+#define bfin_read_MXVR_DMA7_COUNT()    bfin_read16(MXVR_DMA7_COUNT)
+#define bfin_write_MXVR_DMA7_COUNT(val) bfin_write16(MXVR_DMA7_COUNT, val)
+#define bfin_read_MXVR_DMA7_CURR_ADDR() bfin_readPTR(MXVR_DMA7_CURR_ADDR)
+#define bfin_write_MXVR_DMA7_CURR_ADDR(val) bfin_writePTR(MXVR_DMA7_CURR_ADDR, val)
+#define bfin_read_MXVR_DMA7_CURR_COUNT() bfin_read16(MXVR_DMA7_CURR_COUNT)
+#define bfin_write_MXVR_DMA7_CURR_COUNT(val) bfin_write16(MXVR_DMA7_CURR_COUNT, val)
+#define bfin_read_MXVR_AP_CTL()        bfin_read16(MXVR_AP_CTL)
+#define bfin_write_MXVR_AP_CTL(val)    bfin_write16(MXVR_AP_CTL, val)
+#define bfin_read_MXVR_APRB_START_ADDR() bfin_readPTR(MXVR_APRB_START_ADDR)
+#define bfin_write_MXVR_APRB_START_ADDR(val) bfin_writePTR(MXVR_APRB_START_ADDR, val)
+#define bfin_read_MXVR_APRB_CURR_ADDR() bfin_readPTR(MXVR_APRB_CURR_ADDR)
+#define bfin_write_MXVR_APRB_CURR_ADDR(val) bfin_writePTR(MXVR_APRB_CURR_ADDR, val)
+#define bfin_read_MXVR_APTB_START_ADDR() bfin_readPTR(MXVR_APTB_START_ADDR)
+#define bfin_write_MXVR_APTB_START_ADDR(val) bfin_writePTR(MXVR_APTB_START_ADDR, val)
+#define bfin_read_MXVR_APTB_CURR_ADDR() bfin_readPTR(MXVR_APTB_CURR_ADDR)
+#define bfin_write_MXVR_APTB_CURR_ADDR(val) bfin_writePTR(MXVR_APTB_CURR_ADDR, val)
+#define bfin_read_MXVR_CM_CTL()        bfin_read32(MXVR_CM_CTL)
+#define bfin_write_MXVR_CM_CTL(val)    bfin_write32(MXVR_CM_CTL, val)
+#define bfin_read_MXVR_CMRB_START_ADDR() bfin_readPTR(MXVR_CMRB_START_ADDR)
+#define bfin_write_MXVR_CMRB_START_ADDR(val) bfin_writePTR(MXVR_CMRB_START_ADDR, val)
+#define bfin_read_MXVR_CMRB_CURR_ADDR() bfin_readPTR(MXVR_CMRB_CURR_ADDR)
+#define bfin_write_MXVR_CMRB_CURR_ADDR(val) bfin_writePTR(MXVR_CMRB_CURR_ADDR, val)
+#define bfin_read_MXVR_CMTB_START_ADDR() bfin_readPTR(MXVR_CMTB_START_ADDR)
+#define bfin_write_MXVR_CMTB_START_ADDR(val) bfin_writePTR(MXVR_CMTB_START_ADDR, val)
+#define bfin_read_MXVR_CMTB_CURR_ADDR() bfin_readPTR(MXVR_CMTB_CURR_ADDR)
+#define bfin_write_MXVR_CMTB_CURR_ADDR(val) bfin_writePTR(MXVR_CMTB_CURR_ADDR, val)
+#define bfin_read_MXVR_RRDB_START_ADDR() bfin_readPTR(MXVR_RRDB_START_ADDR)
+#define bfin_write_MXVR_RRDB_START_ADDR(val) bfin_writePTR(MXVR_RRDB_START_ADDR, val)
+#define bfin_read_MXVR_RRDB_CURR_ADDR() bfin_readPTR(MXVR_RRDB_CURR_ADDR)
+#define bfin_write_MXVR_RRDB_CURR_ADDR(val) bfin_writePTR(MXVR_RRDB_CURR_ADDR, val)
+#define bfin_read_MXVR_PAT_DATA_0()    bfin_read32(MXVR_PAT_DATA_0)
+#define bfin_write_MXVR_PAT_DATA_0(val) bfin_write32(MXVR_PAT_DATA_0, val)
+#define bfin_read_MXVR_PAT_EN_0()      bfin_read32(MXVR_PAT_EN_0)
+#define bfin_write_MXVR_PAT_EN_0(val)  bfin_write32(MXVR_PAT_EN_0, val)
+#define bfin_read_MXVR_PAT_DATA_1()    bfin_read32(MXVR_PAT_DATA_1)
+#define bfin_write_MXVR_PAT_DATA_1(val) bfin_write32(MXVR_PAT_DATA_1, val)
+#define bfin_read_MXVR_PAT_EN_1()      bfin_read32(MXVR_PAT_EN_1)
+#define bfin_write_MXVR_PAT_EN_1(val)  bfin_write32(MXVR_PAT_EN_1, val)
+#define bfin_read_MXVR_FRAME_CNT_0()   bfin_read16(MXVR_FRAME_CNT_0)
+#define bfin_write_MXVR_FRAME_CNT_0(val) bfin_write16(MXVR_FRAME_CNT_0, val)
+#define bfin_read_MXVR_FRAME_CNT_1()   bfin_read16(MXVR_FRAME_CNT_1)
+#define bfin_write_MXVR_FRAME_CNT_1(val) bfin_write16(MXVR_FRAME_CNT_1, val)
+#define bfin_read_MXVR_ROUTING_0()     bfin_read32(MXVR_ROUTING_0)
+#define bfin_write_MXVR_ROUTING_0(val) bfin_write32(MXVR_ROUTING_0, val)
+#define bfin_read_MXVR_ROUTING_1()     bfin_read32(MXVR_ROUTING_1)
+#define bfin_write_MXVR_ROUTING_1(val) bfin_write32(MXVR_ROUTING_1, val)
+#define bfin_read_MXVR_ROUTING_2()     bfin_read32(MXVR_ROUTING_2)
+#define bfin_write_MXVR_ROUTING_2(val) bfin_write32(MXVR_ROUTING_2, val)
+#define bfin_read_MXVR_ROUTING_3()     bfin_read32(MXVR_ROUTING_3)
+#define bfin_write_MXVR_ROUTING_3(val) bfin_write32(MXVR_ROUTING_3, val)
+#define bfin_read_MXVR_ROUTING_4()     bfin_read32(MXVR_ROUTING_4)
+#define bfin_write_MXVR_ROUTING_4(val) bfin_write32(MXVR_ROUTING_4, val)
+#define bfin_read_MXVR_ROUTING_5()     bfin_read32(MXVR_ROUTING_5)
+#define bfin_write_MXVR_ROUTING_5(val) bfin_write32(MXVR_ROUTING_5, val)
+#define bfin_read_MXVR_ROUTING_6()     bfin_read32(MXVR_ROUTING_6)
+#define bfin_write_MXVR_ROUTING_6(val) bfin_write32(MXVR_ROUTING_6, val)
+#define bfin_read_MXVR_ROUTING_7()     bfin_read32(MXVR_ROUTING_7)
+#define bfin_write_MXVR_ROUTING_7(val) bfin_write32(MXVR_ROUTING_7, val)
+#define bfin_read_MXVR_ROUTING_8()     bfin_read32(MXVR_ROUTING_8)
+#define bfin_write_MXVR_ROUTING_8(val) bfin_write32(MXVR_ROUTING_8, val)
+#define bfin_read_MXVR_ROUTING_9()     bfin_read32(MXVR_ROUTING_9)
+#define bfin_write_MXVR_ROUTING_9(val) bfin_write32(MXVR_ROUTING_9, val)
+#define bfin_read_MXVR_ROUTING_10()    bfin_read32(MXVR_ROUTING_10)
+#define bfin_write_MXVR_ROUTING_10(val) bfin_write32(MXVR_ROUTING_10, val)
+#define bfin_read_MXVR_ROUTING_11()    bfin_read32(MXVR_ROUTING_11)
+#define bfin_write_MXVR_ROUTING_11(val) bfin_write32(MXVR_ROUTING_11, val)
+#define bfin_read_MXVR_ROUTING_12()    bfin_read32(MXVR_ROUTING_12)
+#define bfin_write_MXVR_ROUTING_12(val) bfin_write32(MXVR_ROUTING_12, val)
+#define bfin_read_MXVR_ROUTING_13()    bfin_read32(MXVR_ROUTING_13)
+#define bfin_write_MXVR_ROUTING_13(val) bfin_write32(MXVR_ROUTING_13, val)
+#define bfin_read_MXVR_ROUTING_14()    bfin_read32(MXVR_ROUTING_14)
+#define bfin_write_MXVR_ROUTING_14(val) bfin_write32(MXVR_ROUTING_14, val)
+#define bfin_read_MXVR_PLL_CTL_1()     bfin_read32(MXVR_PLL_CTL_1)
+#define bfin_write_MXVR_PLL_CTL_1(val) bfin_write32(MXVR_PLL_CTL_1, val)
+#define bfin_read_MXVR_BLOCK_CNT()     bfin_read16(MXVR_BLOCK_CNT)
+#define bfin_write_MXVR_BLOCK_CNT(val) bfin_write16(MXVR_BLOCK_CNT, val)
+
+#endif /* _CDEF_BF539_H */
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
new file mode 100644
index 0000000..6adbfcc
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -0,0 +1,4243 @@
+/************************************************************************
+ *
+ * This file is subject to the terms and conditions of the GNU Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Non-GPL License also available as part of VisualDSP++
+ * http://www.analog.com/processors/resources/crosscore/visualDspDevSoftware.html
+ *
+ * (c) Copyright 2001-2005 Analog Devices, Inc. All rights reserved
+ *
+ * This file under source code control, please send bugs or changes to:
+ * dsptools.support@analog.com
+ *
+ ************************************************************************/
+/*
+ * File:         include/asm-blackfin/mach-bf538/defBF539.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/* SYSTEM & MM REGISTER BIT & ADDRESS DEFINITIONS FOR ADSP-BF538/9 */
+
+#ifndef _DEF_BF539_H
+#define _DEF_BF539_H
+
+/* include all Core registers and bit definitions */
+#include <asm/def_LPBlackfin.h>
+
+
+/*********************************************************************************** */
+/* System MMR Register Map */
+/*********************************************************************************** */
+/* Clock/Regulator Control (0xFFC00000 - 0xFFC000FF) */
+#define	PLL_CTL			0xFFC00000	/* PLL Control register (16-bit) */
+#define	PLL_DIV			0xFFC00004	/* PLL Divide Register (16-bit) */
+#define	VR_CTL			0xFFC00008	/* Voltage Regulator Control Register (16-bit) */
+#define	PLL_STAT		0xFFC0000C	/* PLL Status register (16-bit) */
+#define	PLL_LOCKCNT		0xFFC00010	/* PLL Lock	Count register (16-bit) */
+#define	CHIPID			0xFFC00014	/* Chip	ID Register */
+
+/* CHIPID Masks */
+#define CHIPID_VERSION         0xF0000000
+#define CHIPID_FAMILY          0x0FFFF000
+#define CHIPID_MANUFACTURE     0x00000FFE
+
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
+#define	SWRST			0xFFC00100  /* Software	Reset Register (16-bit) */
+#define	SYSCR			0xFFC00104  /* System Configuration registe */
+#define	SIC_IMASK0		0xFFC0010C  /* Interrupt Mask Register */
+#define	SIC_IAR0		0xFFC00110  /* Interrupt Assignment Register 0 */
+#define	SIC_IAR1		0xFFC00114  /* Interrupt Assignment Register 1 */
+#define	SIC_IAR2		0xFFC00118  /* Interrupt Assignment Register 2 */
+#define	SIC_IAR3			0xFFC0011C	/* Interrupt Assignment	Register 3 */
+#define	SIC_ISR0			0xFFC00120  /* Interrupt Status	Register */
+#define	SIC_IWR0			0xFFC00124  /* Interrupt Wakeup	Register */
+#define	SIC_IMASK1			0xFFC00128	/* Interrupt Mask Register 1 */
+#define	SIC_ISR1			0xFFC0012C	/* Interrupt Status Register 1 */
+#define	SIC_IWR1			0xFFC00130	/* Interrupt Wakeup Register 1 */
+#define	SIC_IAR4			0xFFC00134	/* Interrupt Assignment	Register 4 */
+#define	SIC_IAR5			0xFFC00138	/* Interrupt Assignment	Register 5 */
+#define	SIC_IAR6			0xFFC0013C	/* Interrupt Assignment	Register 6 */
+
+
+/* Watchdog Timer (0xFFC00200 -	0xFFC002FF) */
+#define	WDOG_CTL	0xFFC00200  /* Watchdog	Control	Register */
+#define	WDOG_CNT	0xFFC00204  /* Watchdog	Count Register */
+#define	WDOG_STAT	0xFFC00208  /* Watchdog	Status Register */
+
+
+/* Real	Time Clock (0xFFC00300 - 0xFFC003FF) */
+#define	RTC_STAT	0xFFC00300  /* RTC Status Register */
+#define	RTC_ICTL	0xFFC00304  /* RTC Interrupt Control Register */
+#define	RTC_ISTAT	0xFFC00308  /* RTC Interrupt Status Register */
+#define	RTC_SWCNT	0xFFC0030C  /* RTC Stopwatch Count Register */
+#define	RTC_ALARM	0xFFC00310  /* RTC Alarm Time Register */
+#define	RTC_FAST	0xFFC00314  /* RTC Prescaler Enable Register */
+#define	RTC_PREN		0xFFC00314  /* RTC Prescaler Enable Register (alternate	macro) */
+
+
+/* UART0 Controller (0xFFC00400	- 0xFFC004FF) */
+#define	UART0_THR	      0xFFC00400  /* Transmit Holding register */
+#define	UART0_RBR	      0xFFC00400  /* Receive Buffer register */
+#define	UART0_DLL	      0xFFC00400  /* Divisor Latch (Low-Byte) */
+#define	UART0_IER	      0xFFC00404  /* Interrupt Enable Register */
+#define	UART0_DLH	      0xFFC00404  /* Divisor Latch (High-Byte) */
+#define	UART0_IIR	      0xFFC00408  /* Interrupt Identification Register */
+#define	UART0_LCR	      0xFFC0040C  /* Line Control Register */
+#define	UART0_MCR			 0xFFC00410  /*	Modem Control Register */
+#define	UART0_LSR	      0xFFC00414  /* Line Status Register */
+#define	UART0_SCR	      0xFFC0041C  /* SCR Scratch Register */
+#define	UART0_GCTL		     0xFFC00424	 /* Global Control Register */
+
+
+/* SPI0	Controller (0xFFC00500 - 0xFFC005FF) */
+
+#define	SPI0_CTL			0xFFC00500  /* SPI0 Control Register */
+#define	SPI0_FLG			0xFFC00504  /* SPI0 Flag register */
+#define	SPI0_STAT			0xFFC00508  /* SPI0 Status register */
+#define	SPI0_TDBR			0xFFC0050C  /* SPI0 Transmit Data Buffer Register */
+#define	SPI0_RDBR			0xFFC00510  /* SPI0 Receive Data Buffer	Register */
+#define	SPI0_BAUD			0xFFC00514  /* SPI0 Baud rate Register */
+#define	SPI0_SHADOW			0xFFC00518  /* SPI0_RDBR Shadow	Register */
+#define SPI0_REGBASE			SPI0_CTL
+
+
+/* TIMER 0, 1, 2 Registers (0xFFC00600 - 0xFFC006FF) */
+#define	TIMER0_CONFIG			0xFFC00600     /* Timer	0 Configuration	Register */
+#define	TIMER0_COUNTER				0xFFC00604     /* Timer	0 Counter Register */
+#define	TIMER0_PERIOD			0xFFC00608     /* Timer	0 Period Register */
+#define	TIMER0_WIDTH			0xFFC0060C     /* Timer	0 Width	Register */
+
+#define	TIMER1_CONFIG			0xFFC00610	/*  Timer 1 Configuration Register   */
+#define	TIMER1_COUNTER			0xFFC00614	/*  Timer 1 Counter Register	     */
+#define	TIMER1_PERIOD			0xFFC00618	/*  Timer 1 Period Register	     */
+#define	TIMER1_WIDTH			0xFFC0061C	/*  Timer 1 Width Register	     */
+
+#define	TIMER2_CONFIG			0xFFC00620	/* Timer 2 Configuration Register   */
+#define	TIMER2_COUNTER			0xFFC00624	/* Timer 2 Counter Register	    */
+#define	TIMER2_PERIOD			0xFFC00628	/* Timer 2 Period Register	    */
+#define	TIMER2_WIDTH			0xFFC0062C	/* Timer 2 Width Register	    */
+
+#define	TIMER_ENABLE				0xFFC00640	/* Timer Enable	Register */
+#define	TIMER_DISABLE				0xFFC00644	/* Timer Disable Register */
+#define	TIMER_STATUS				0xFFC00648	/* Timer Status	Register */
+
+
+/* Programmable	Flags (0xFFC00700 - 0xFFC007FF) */
+#define	FIO_FLAG_D				0xFFC00700  /* Flag Mask to directly specify state of pins */
+#define	FIO_FLAG_C			0xFFC00704  /* Peripheral Interrupt Flag Register (clear) */
+#define	FIO_FLAG_S			0xFFC00708  /* Peripheral Interrupt Flag Register (set) */
+#define	FIO_FLAG_T					0xFFC0070C  /* Flag Mask to directly toggle state of pins */
+#define	FIO_MASKA_D			0xFFC00710  /* Flag Mask Interrupt A Register (set directly) */
+#define	FIO_MASKA_C			0xFFC00714  /* Flag Mask Interrupt A Register (clear) */
+#define	FIO_MASKA_S			0xFFC00718  /* Flag Mask Interrupt A Register (set) */
+#define	FIO_MASKA_T			0xFFC0071C  /* Flag Mask Interrupt A Register (toggle) */
+#define	FIO_MASKB_D			0xFFC00720  /* Flag Mask Interrupt B Register (set directly) */
+#define	FIO_MASKB_C			0xFFC00724  /* Flag Mask Interrupt B Register (clear) */
+#define	FIO_MASKB_S			0xFFC00728  /* Flag Mask Interrupt B Register (set) */
+#define	FIO_MASKB_T			0xFFC0072C  /* Flag Mask Interrupt B Register (toggle) */
+#define	FIO_DIR				0xFFC00730  /* Peripheral Flag Direction Register */
+#define	FIO_POLAR			0xFFC00734  /* Flag Source Polarity Register */
+#define	FIO_EDGE			0xFFC00738  /* Flag Source Sensitivity Register */
+#define	FIO_BOTH			0xFFC0073C  /* Flag Set	on BOTH	Edges Register */
+#define	FIO_INEN					0xFFC00740  /* Flag Input Enable Register  */
+
+
+/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
+#define	SPORT0_TCR1				0xFFC00800  /* SPORT0 Transmit Configuration 1 Register */
+#define	SPORT0_TCR2				0xFFC00804  /* SPORT0 Transmit Configuration 2 Register */
+#define	SPORT0_TCLKDIV			0xFFC00808  /* SPORT0 Transmit Clock Divider */
+#define	SPORT0_TFSDIV			0xFFC0080C  /* SPORT0 Transmit Frame Sync Divider */
+#define	SPORT0_TX			0xFFC00810  /* SPORT0 TX Data Register */
+#define	SPORT0_RX			0xFFC00818  /* SPORT0 RX Data Register */
+#define	SPORT0_RCR1				0xFFC00820  /* SPORT0 Transmit Configuration 1 Register */
+#define	SPORT0_RCR2				0xFFC00824  /* SPORT0 Transmit Configuration 2 Register */
+#define	SPORT0_RCLKDIV			0xFFC00828  /* SPORT0 Receive Clock Divider */
+#define	SPORT0_RFSDIV			0xFFC0082C  /* SPORT0 Receive Frame Sync Divider */
+#define	SPORT0_STAT			0xFFC00830  /* SPORT0 Status Register */
+#define	SPORT0_CHNL			0xFFC00834  /* SPORT0 Current Channel Register */
+#define	SPORT0_MCMC1			0xFFC00838  /* SPORT0 Multi-Channel Configuration Register 1 */
+#define	SPORT0_MCMC2			0xFFC0083C  /* SPORT0 Multi-Channel Configuration Register 2 */
+#define	SPORT0_MTCS0			0xFFC00840  /* SPORT0 Multi-Channel Transmit Select Register 0 */
+#define	SPORT0_MTCS1			0xFFC00844  /* SPORT0 Multi-Channel Transmit Select Register 1 */
+#define	SPORT0_MTCS2			0xFFC00848  /* SPORT0 Multi-Channel Transmit Select Register 2 */
+#define	SPORT0_MTCS3			0xFFC0084C  /* SPORT0 Multi-Channel Transmit Select Register 3 */
+#define	SPORT0_MRCS0			0xFFC00850  /* SPORT0 Multi-Channel Receive Select Register 0 */
+#define	SPORT0_MRCS1			0xFFC00854  /* SPORT0 Multi-Channel Receive Select Register 1 */
+#define	SPORT0_MRCS2			0xFFC00858  /* SPORT0 Multi-Channel Receive Select Register 2 */
+#define	SPORT0_MRCS3			0xFFC0085C  /* SPORT0 Multi-Channel Receive Select Register 3 */
+
+
+/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
+#define	SPORT1_TCR1				0xFFC00900  /* SPORT1 Transmit Configuration 1 Register */
+#define	SPORT1_TCR2				0xFFC00904  /* SPORT1 Transmit Configuration 2 Register */
+#define	SPORT1_TCLKDIV			0xFFC00908  /* SPORT1 Transmit Clock Divider */
+#define	SPORT1_TFSDIV			0xFFC0090C  /* SPORT1 Transmit Frame Sync Divider */
+#define	SPORT1_TX			0xFFC00910  /* SPORT1 TX Data Register */
+#define	SPORT1_RX			0xFFC00918  /* SPORT1 RX Data Register */
+#define	SPORT1_RCR1				0xFFC00920  /* SPORT1 Transmit Configuration 1 Register */
+#define	SPORT1_RCR2				0xFFC00924  /* SPORT1 Transmit Configuration 2 Register */
+#define	SPORT1_RCLKDIV			0xFFC00928  /* SPORT1 Receive Clock Divider */
+#define	SPORT1_RFSDIV			0xFFC0092C  /* SPORT1 Receive Frame Sync Divider */
+#define	SPORT1_STAT			0xFFC00930  /* SPORT1 Status Register */
+#define	SPORT1_CHNL			0xFFC00934  /* SPORT1 Current Channel Register */
+#define	SPORT1_MCMC1			0xFFC00938  /* SPORT1 Multi-Channel Configuration Register 1 */
+#define	SPORT1_MCMC2			0xFFC0093C  /* SPORT1 Multi-Channel Configuration Register 2 */
+#define	SPORT1_MTCS0			0xFFC00940  /* SPORT1 Multi-Channel Transmit Select Register 0 */
+#define	SPORT1_MTCS1			0xFFC00944  /* SPORT1 Multi-Channel Transmit Select Register 1 */
+#define	SPORT1_MTCS2			0xFFC00948  /* SPORT1 Multi-Channel Transmit Select Register 2 */
+#define	SPORT1_MTCS3			0xFFC0094C  /* SPORT1 Multi-Channel Transmit Select Register 3 */
+#define	SPORT1_MRCS0			0xFFC00950  /* SPORT1 Multi-Channel Receive Select Register 0 */
+#define	SPORT1_MRCS1			0xFFC00954  /* SPORT1 Multi-Channel Receive Select Register 1 */
+#define	SPORT1_MRCS2			0xFFC00958  /* SPORT1 Multi-Channel Receive Select Register 2 */
+#define	SPORT1_MRCS3			0xFFC0095C  /* SPORT1 Multi-Channel Receive Select Register 3 */
+
+
+/* External Bus	Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
+/* Asynchronous	Memory Controller  */
+#define	EBIU_AMGCTL			0xFFC00A00  /* Asynchronous Memory Global Control Register */
+#define	EBIU_AMBCTL0		0xFFC00A04  /* Asynchronous Memory Bank	Control	Register 0 */
+#define	EBIU_AMBCTL1		0xFFC00A08  /* Asynchronous Memory Bank	Control	Register 1 */
+
+/* SDRAM Controller */
+#define	EBIU_SDGCTL			0xFFC00A10  /* SDRAM Global Control Register */
+#define	EBIU_SDBCTL			0xFFC00A14  /* SDRAM Bank Control Register */
+#define	EBIU_SDRRC			0xFFC00A18  /* SDRAM Refresh Rate Control Register */
+#define	EBIU_SDSTAT			0xFFC00A1C  /* SDRAM Status Register */
+
+
+
+/* DMA Controller 0 Traffic Control Registers (0xFFC00B00 - 0xFFC00BFF) */
+
+#define	DMAC0_TC_PER			0xFFC00B0C	/* DMA Controller 0 Traffic Control Periods Register */
+#define	DMAC0_TC_CNT			0xFFC00B10	/* DMA Controller 0 Traffic Control Current Counts Register */
+
+/* Alternate deprecated	register names (below) provided	for backwards code compatibility */
+#define	DMA0_TCPER			DMAC0_TC_PER
+#define	DMA0_TCCNT			DMAC0_TC_CNT
+
+
+/* DMA Controller 0 (0xFFC00C00	- 0xFFC00FFF)							 */
+
+#define	DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register */
+#define	DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register */
+#define	DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register */
+#define	DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register */
+#define	DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register */
+#define	DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register */
+#define	DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register */
+#define	DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register */
+#define	DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register */
+#define	DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register */
+#define	DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map	Register */
+#define	DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register */
+#define	DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register */
+
+#define	DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register */
+#define	DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register */
+#define	DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register */
+#define	DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register */
+#define	DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register */
+#define	DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register */
+#define	DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register */
+#define	DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register */
+#define	DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register */
+#define	DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register */
+#define	DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map	Register */
+#define	DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register */
+#define	DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register */
+
+#define	DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register */
+#define	DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register */
+#define	DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register */
+#define	DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register */
+#define	DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register */
+#define	DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register */
+#define	DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register */
+#define	DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register */
+#define	DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register */
+#define	DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register */
+#define	DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map	Register */
+#define	DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register */
+#define	DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register */
+
+#define	DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register */
+#define	DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register */
+#define	DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register */
+#define	DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register */
+#define	DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register */
+#define	DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register */
+#define	DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register */
+#define	DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register */
+#define	DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register */
+#define	DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register */
+#define	DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map	Register */
+#define	DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register */
+#define	DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register */
+
+#define	DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register */
+#define	DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register */
+#define	DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register */
+#define	DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register */
+#define	DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register */
+#define	DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register */
+#define	DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register */
+#define	DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register */
+#define	DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register */
+#define	DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register */
+#define	DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map	Register */
+#define	DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register */
+#define	DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register */
+
+#define	DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register */
+#define	DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register */
+#define	DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register */
+#define	DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register */
+#define	DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register */
+#define	DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register */
+#define	DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register */
+#define	DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register */
+#define	DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register */
+#define	DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register */
+#define	DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map	Register */
+#define	DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register */
+#define	DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register */
+
+#define	DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register */
+#define	DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register */
+#define	DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register */
+#define	DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register */
+#define	DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register */
+#define	DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register */
+#define	DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register */
+#define	DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register */
+#define	DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register */
+#define	DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register */
+#define	DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map	Register */
+#define	DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register */
+#define	DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register */
+
+#define	DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register */
+#define	DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register */
+#define	DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register */
+#define	DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register */
+#define	DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register */
+#define	DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register */
+#define	DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register */
+#define	DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register */
+#define	DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register */
+#define	DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register */
+#define	DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map	Register */
+#define	DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register */
+#define	DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register */
+
+#define	MDMA0_D0_NEXT_DESC_PTR	0xFFC00E00	/* MemDMA0 Stream 0 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA0_D0_START_ADDR		0xFFC00E04	/* MemDMA0 Stream 0 Destination	Start Address Register */
+#define	MDMA0_D0_CONFIG			0xFFC00E08	/* MemDMA0 Stream 0 Destination	Configuration Register */
+#define	MDMA0_D0_X_COUNT		0xFFC00E10	/* MemDMA0 Stream 0 Destination	X Count	Register */
+#define	MDMA0_D0_X_MODIFY		0xFFC00E14	/* MemDMA0 Stream 0 Destination	X Modify Register */
+#define	MDMA0_D0_Y_COUNT		0xFFC00E18	/* MemDMA0 Stream 0 Destination	Y Count	Register */
+#define	MDMA0_D0_Y_MODIFY		0xFFC00E1C	/* MemDMA0 Stream 0 Destination	Y Modify Register */
+#define	MDMA0_D0_CURR_DESC_PTR	0xFFC00E20	/* MemDMA0 Stream 0 Destination	Current	Descriptor Pointer Register */
+#define	MDMA0_D0_CURR_ADDR		0xFFC00E24	/* MemDMA0 Stream 0 Destination	Current	Address	Register */
+#define	MDMA0_D0_IRQ_STATUS		0xFFC00E28	/* MemDMA0 Stream 0 Destination	Interrupt/Status Register */
+#define	MDMA0_D0_PERIPHERAL_MAP	0xFFC00E2C	/* MemDMA0 Stream 0 Destination	Peripheral Map Register */
+#define	MDMA0_D0_CURR_X_COUNT	0xFFC00E30	/* MemDMA0 Stream 0 Destination	Current	X Count	Register */
+#define	MDMA0_D0_CURR_Y_COUNT	0xFFC00E38	/* MemDMA0 Stream 0 Destination	Current	Y Count	Register */
+
+#define	MDMA0_S0_NEXT_DESC_PTR	0xFFC00E40	/* MemDMA0 Stream 0 Source Next	Descriptor Pointer Register */
+#define	MDMA0_S0_START_ADDR		0xFFC00E44	/* MemDMA0 Stream 0 Source Start Address Register */
+#define	MDMA0_S0_CONFIG			0xFFC00E48	/* MemDMA0 Stream 0 Source Configuration Register */
+#define	MDMA0_S0_X_COUNT		0xFFC00E50	/* MemDMA0 Stream 0 Source X Count Register */
+#define	MDMA0_S0_X_MODIFY		0xFFC00E54	/* MemDMA0 Stream 0 Source X Modify Register */
+#define	MDMA0_S0_Y_COUNT		0xFFC00E58	/* MemDMA0 Stream 0 Source Y Count Register */
+#define	MDMA0_S0_Y_MODIFY		0xFFC00E5C	/* MemDMA0 Stream 0 Source Y Modify Register */
+#define	MDMA0_S0_CURR_DESC_PTR	0xFFC00E60	/* MemDMA0 Stream 0 Source Current Descriptor Pointer Register */
+#define	MDMA0_S0_CURR_ADDR		0xFFC00E64	/* MemDMA0 Stream 0 Source Current Address Register */
+#define	MDMA0_S0_IRQ_STATUS		0xFFC00E68	/* MemDMA0 Stream 0 Source Interrupt/Status Register */
+#define	MDMA0_S0_PERIPHERAL_MAP	0xFFC00E6C	/* MemDMA0 Stream 0 Source Peripheral Map Register */
+#define	MDMA0_S0_CURR_X_COUNT	0xFFC00E70	/* MemDMA0 Stream 0 Source Current X Count Register */
+#define	MDMA0_S0_CURR_Y_COUNT	0xFFC00E78	/* MemDMA0 Stream 0 Source Current Y Count Register */
+
+#define	MDMA0_D1_NEXT_DESC_PTR	0xFFC00E80	/* MemDMA0 Stream 1 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA0_D1_START_ADDR		0xFFC00E84	/* MemDMA0 Stream 1 Destination	Start Address Register */
+#define	MDMA0_D1_CONFIG			0xFFC00E88	/* MemDMA0 Stream 1 Destination	Configuration Register */
+#define	MDMA0_D1_X_COUNT		0xFFC00E90	/* MemDMA0 Stream 1 Destination	X Count	Register */
+#define	MDMA0_D1_X_MODIFY		0xFFC00E94	/* MemDMA0 Stream 1 Destination	X Modify Register */
+#define	MDMA0_D1_Y_COUNT		0xFFC00E98	/* MemDMA0 Stream 1 Destination	Y Count	Register */
+#define	MDMA0_D1_Y_MODIFY		0xFFC00E9C	/* MemDMA0 Stream 1 Destination	Y Modify Register */
+#define	MDMA0_D1_CURR_DESC_PTR	0xFFC00EA0	/* MemDMA0 Stream 1 Destination	Current	Descriptor Pointer Register */
+#define	MDMA0_D1_CURR_ADDR		0xFFC00EA4	/* MemDMA0 Stream 1 Destination	Current	Address	Register */
+#define	MDMA0_D1_IRQ_STATUS		0xFFC00EA8	/* MemDMA0 Stream 1 Destination	Interrupt/Status Register */
+#define	MDMA0_D1_PERIPHERAL_MAP	0xFFC00EAC	/* MemDMA0 Stream 1 Destination	Peripheral Map Register */
+#define	MDMA0_D1_CURR_X_COUNT	0xFFC00EB0	/* MemDMA0 Stream 1 Destination	Current	X Count	Register */
+#define	MDMA0_D1_CURR_Y_COUNT	0xFFC00EB8	/* MemDMA0 Stream 1 Destination	Current	Y Count	Register */
+
+#define	MDMA0_S1_NEXT_DESC_PTR	0xFFC00EC0	/* MemDMA0 Stream 1 Source Next	Descriptor Pointer Register */
+#define	MDMA0_S1_START_ADDR		0xFFC00EC4	/* MemDMA0 Stream 1 Source Start Address Register */
+#define	MDMA0_S1_CONFIG			0xFFC00EC8	/* MemDMA0 Stream 1 Source Configuration Register */
+#define	MDMA0_S1_X_COUNT		0xFFC00ED0	/* MemDMA0 Stream 1 Source X Count Register */
+#define	MDMA0_S1_X_MODIFY		0xFFC00ED4	/* MemDMA0 Stream 1 Source X Modify Register */
+#define	MDMA0_S1_Y_COUNT		0xFFC00ED8	/* MemDMA0 Stream 1 Source Y Count Register */
+#define	MDMA0_S1_Y_MODIFY		0xFFC00EDC	/* MemDMA0 Stream 1 Source Y Modify Register */
+#define	MDMA0_S1_CURR_DESC_PTR	0xFFC00EE0	/* MemDMA0 Stream 1 Source Current Descriptor Pointer Register */
+#define	MDMA0_S1_CURR_ADDR		0xFFC00EE4	/* MemDMA0 Stream 1 Source Current Address Register */
+#define	MDMA0_S1_IRQ_STATUS		0xFFC00EE8	/* MemDMA0 Stream 1 Source Interrupt/Status Register */
+#define	MDMA0_S1_PERIPHERAL_MAP	0xFFC00EEC	/* MemDMA0 Stream 1 Source Peripheral Map Register */
+#define	MDMA0_S1_CURR_X_COUNT	0xFFC00EF0	/* MemDMA0 Stream 1 Source Current X Count Register */
+#define	MDMA0_S1_CURR_Y_COUNT	0xFFC00EF8	/* MemDMA0 Stream 1 Source Current Y Count Register */
+
+
+/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */
+#define	PPI_CONTROL			0xFFC01000	/* PPI Control Register */
+#define	PPI_STATUS			0xFFC01004	/* PPI Status Register */
+#define	PPI_COUNT			0xFFC01008	/* PPI Transfer	Count Register */
+#define	PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register */
+#define	PPI_FRAME			0xFFC01010	/* PPI Frame Length Register */
+
+
+/* Two-Wire Interface 0	(0xFFC01400 - 0xFFC014FF)			 */
+#define	TWI0_CLKDIV			0xFFC01400	/* Serial Clock	Divider	Register */
+#define	TWI0_CONTROL		0xFFC01404	/* TWI0	Master Internal	Time Reference Register */
+#define	TWI0_SLAVE_CTRL		0xFFC01408	/* Slave Mode Control Register */
+#define	TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register */
+#define	TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register */
+#define	TWI0_MASTER_CTRL	0xFFC01414	/* Master Mode Control Register */
+#define	TWI0_MASTER_STAT	0xFFC01418	/* Master Mode Status Register */
+#define	TWI0_MASTER_ADDR	0xFFC0141C	/* Master Mode Address Register */
+#define	TWI0_INT_STAT		0xFFC01420	/* TWI0	Master Interrupt Register */
+#define	TWI0_INT_MASK		0xFFC01424	/* TWI0	Master Interrupt Mask Register */
+#define	TWI0_FIFO_CTRL		0xFFC01428	/* FIFO	Control	Register */
+#define	TWI0_FIFO_STAT		0xFFC0142C	/* FIFO	Status Register */
+#define	TWI0_XMT_DATA8		0xFFC01480	/* FIFO	Transmit Data Single Byte Register */
+#define	TWI0_XMT_DATA16		0xFFC01484	/* FIFO	Transmit Data Double Byte Register */
+#define	TWI0_RCV_DATA8		0xFFC01488	/* FIFO	Receive	Data Single Byte Register */
+#define	TWI0_RCV_DATA16		0xFFC0148C	/* FIFO	Receive	Data Double Byte Register */
+
+#define TWI0_REGBASE		TWI0_CLKDIV
+
+/* the following are for backwards compatibility */
+#define	TWI0_PRESCALE	 TWI0_CONTROL
+#define	TWI0_INT_SRC	 TWI0_INT_STAT
+#define	TWI0_INT_ENABLE	 TWI0_INT_MASK
+
+
+/* General-Purpose Ports  (0xFFC01500 -	0xFFC015FF)	 */
+
+/* GPIO	Port C Register	Names */
+#define	GPIO_C_CNFG			0xFFC01500	/* GPIO	Pin Port C Configuration Register */
+#define	GPIO_C_D			0xFFC01510	/* GPIO	Pin Port C Data	Register */
+#define	GPIO_C_C			0xFFC01520	/* Clear GPIO Pin Port C Register */
+#define	GPIO_C_S			0xFFC01530	/* Set GPIO Pin	Port C Register */
+#define	GPIO_C_T			0xFFC01540	/* Toggle GPIO Pin Port	C Register */
+#define	GPIO_C_DIR			0xFFC01550	/* GPIO	Pin Port C Direction Register */
+#define	GPIO_C_INEN			0xFFC01560	/* GPIO	Pin Port C Input Enable	Register */
+
+/* GPIO	Port D Register	Names */
+#define	GPIO_D_CNFG			0xFFC01504	/* GPIO	Pin Port D Configuration Register */
+#define	GPIO_D_D			0xFFC01514	/* GPIO	Pin Port D Data	Register */
+#define	GPIO_D_C			0xFFC01524	/* Clear GPIO Pin Port D Register */
+#define	GPIO_D_S			0xFFC01534	/* Set GPIO Pin	Port D Register */
+#define	GPIO_D_T			0xFFC01544	/* Toggle GPIO Pin Port	D Register */
+#define	GPIO_D_DIR			0xFFC01554	/* GPIO	Pin Port D Direction Register */
+#define	GPIO_D_INEN			0xFFC01564	/* GPIO	Pin Port D Input Enable	Register */
+
+/* GPIO	Port E Register	Names */
+#define	GPIO_E_CNFG			0xFFC01508	/* GPIO	Pin Port E Configuration Register */
+#define	GPIO_E_D			0xFFC01518	/* GPIO	Pin Port E Data	Register */
+#define	GPIO_E_C			0xFFC01528	/* Clear GPIO Pin Port E Register */
+#define	GPIO_E_S			0xFFC01538	/* Set GPIO Pin	Port E Register */
+#define	GPIO_E_T			0xFFC01548	/* Toggle GPIO Pin Port	E Register */
+#define	GPIO_E_DIR			0xFFC01558	/* GPIO	Pin Port E Direction Register */
+#define	GPIO_E_INEN			0xFFC01568	/* GPIO	Pin Port E Input Enable	Register */
+
+/* DMA Controller 1 Traffic Control Registers (0xFFC01B00 - 0xFFC01BFF) */
+
+#define	DMAC1_TC_PER			0xFFC01B0C	/* DMA Controller 1 Traffic Control Periods Register */
+#define	DMAC1_TC_CNT			0xFFC01B10	/* DMA Controller 1 Traffic Control Current Counts Register */
+
+/* Alternate deprecated	register names (below) provided	for backwards code compatibility */
+#define	DMA1_TCPER			DMAC1_TC_PER
+#define	DMA1_TCCNT			DMAC1_TC_CNT
+
+
+/* DMA Controller 1 (0xFFC01C00	- 0xFFC01FFF)							 */
+#define	DMA8_NEXT_DESC_PTR		0xFFC01C00	/* DMA Channel 8 Next Descriptor Pointer Register */
+#define	DMA8_START_ADDR			0xFFC01C04	/* DMA Channel 8 Start Address Register */
+#define	DMA8_CONFIG				0xFFC01C08	/* DMA Channel 8 Configuration Register */
+#define	DMA8_X_COUNT			0xFFC01C10	/* DMA Channel 8 X Count Register */
+#define	DMA8_X_MODIFY			0xFFC01C14	/* DMA Channel 8 X Modify Register */
+#define	DMA8_Y_COUNT			0xFFC01C18	/* DMA Channel 8 Y Count Register */
+#define	DMA8_Y_MODIFY			0xFFC01C1C	/* DMA Channel 8 Y Modify Register */
+#define	DMA8_CURR_DESC_PTR		0xFFC01C20	/* DMA Channel 8 Current Descriptor Pointer Register */
+#define	DMA8_CURR_ADDR			0xFFC01C24	/* DMA Channel 8 Current Address Register */
+#define	DMA8_IRQ_STATUS			0xFFC01C28	/* DMA Channel 8 Interrupt/Status Register */
+#define	DMA8_PERIPHERAL_MAP		0xFFC01C2C	/* DMA Channel 8 Peripheral Map	Register */
+#define	DMA8_CURR_X_COUNT		0xFFC01C30	/* DMA Channel 8 Current X Count Register */
+#define	DMA8_CURR_Y_COUNT		0xFFC01C38	/* DMA Channel 8 Current Y Count Register */
+
+#define	DMA9_NEXT_DESC_PTR		0xFFC01C40	/* DMA Channel 9 Next Descriptor Pointer Register */
+#define	DMA9_START_ADDR			0xFFC01C44	/* DMA Channel 9 Start Address Register */
+#define	DMA9_CONFIG				0xFFC01C48	/* DMA Channel 9 Configuration Register */
+#define	DMA9_X_COUNT			0xFFC01C50	/* DMA Channel 9 X Count Register */
+#define	DMA9_X_MODIFY			0xFFC01C54	/* DMA Channel 9 X Modify Register */
+#define	DMA9_Y_COUNT			0xFFC01C58	/* DMA Channel 9 Y Count Register */
+#define	DMA9_Y_MODIFY			0xFFC01C5C	/* DMA Channel 9 Y Modify Register */
+#define	DMA9_CURR_DESC_PTR		0xFFC01C60	/* DMA Channel 9 Current Descriptor Pointer Register */
+#define	DMA9_CURR_ADDR			0xFFC01C64	/* DMA Channel 9 Current Address Register */
+#define	DMA9_IRQ_STATUS			0xFFC01C68	/* DMA Channel 9 Interrupt/Status Register */
+#define	DMA9_PERIPHERAL_MAP		0xFFC01C6C	/* DMA Channel 9 Peripheral Map	Register */
+#define	DMA9_CURR_X_COUNT		0xFFC01C70	/* DMA Channel 9 Current X Count Register */
+#define	DMA9_CURR_Y_COUNT		0xFFC01C78	/* DMA Channel 9 Current Y Count Register */
+
+#define	DMA10_NEXT_DESC_PTR		0xFFC01C80	/* DMA Channel 10 Next Descriptor Pointer Register */
+#define	DMA10_START_ADDR		0xFFC01C84	/* DMA Channel 10 Start	Address	Register */
+#define	DMA10_CONFIG			0xFFC01C88	/* DMA Channel 10 Configuration	Register */
+#define	DMA10_X_COUNT			0xFFC01C90	/* DMA Channel 10 X Count Register */
+#define	DMA10_X_MODIFY			0xFFC01C94	/* DMA Channel 10 X Modify Register */
+#define	DMA10_Y_COUNT			0xFFC01C98	/* DMA Channel 10 Y Count Register */
+#define	DMA10_Y_MODIFY			0xFFC01C9C	/* DMA Channel 10 Y Modify Register */
+#define	DMA10_CURR_DESC_PTR		0xFFC01CA0	/* DMA Channel 10 Current Descriptor Pointer Register */
+#define	DMA10_CURR_ADDR			0xFFC01CA4	/* DMA Channel 10 Current Address Register */
+#define	DMA10_IRQ_STATUS		0xFFC01CA8	/* DMA Channel 10 Interrupt/Status Register */
+#define	DMA10_PERIPHERAL_MAP	0xFFC01CAC	/* DMA Channel 10 Peripheral Map Register */
+#define	DMA10_CURR_X_COUNT		0xFFC01CB0	/* DMA Channel 10 Current X Count Register */
+#define	DMA10_CURR_Y_COUNT		0xFFC01CB8	/* DMA Channel 10 Current Y Count Register */
+
+#define	DMA11_NEXT_DESC_PTR		0xFFC01CC0	/* DMA Channel 11 Next Descriptor Pointer Register */
+#define	DMA11_START_ADDR		0xFFC01CC4	/* DMA Channel 11 Start	Address	Register */
+#define	DMA11_CONFIG			0xFFC01CC8	/* DMA Channel 11 Configuration	Register */
+#define	DMA11_X_COUNT			0xFFC01CD0	/* DMA Channel 11 X Count Register */
+#define	DMA11_X_MODIFY			0xFFC01CD4	/* DMA Channel 11 X Modify Register */
+#define	DMA11_Y_COUNT			0xFFC01CD8	/* DMA Channel 11 Y Count Register */
+#define	DMA11_Y_MODIFY			0xFFC01CDC	/* DMA Channel 11 Y Modify Register */
+#define	DMA11_CURR_DESC_PTR		0xFFC01CE0	/* DMA Channel 11 Current Descriptor Pointer Register */
+#define	DMA11_CURR_ADDR			0xFFC01CE4	/* DMA Channel 11 Current Address Register */
+#define	DMA11_IRQ_STATUS		0xFFC01CE8	/* DMA Channel 11 Interrupt/Status Register */
+#define	DMA11_PERIPHERAL_MAP	0xFFC01CEC	/* DMA Channel 11 Peripheral Map Register */
+#define	DMA11_CURR_X_COUNT		0xFFC01CF0	/* DMA Channel 11 Current X Count Register */
+#define	DMA11_CURR_Y_COUNT		0xFFC01CF8	/* DMA Channel 11 Current Y Count Register */
+
+#define	DMA12_NEXT_DESC_PTR		0xFFC01D00	/* DMA Channel 12 Next Descriptor Pointer Register */
+#define	DMA12_START_ADDR		0xFFC01D04	/* DMA Channel 12 Start	Address	Register */
+#define	DMA12_CONFIG			0xFFC01D08	/* DMA Channel 12 Configuration	Register */
+#define	DMA12_X_COUNT			0xFFC01D10	/* DMA Channel 12 X Count Register */
+#define	DMA12_X_MODIFY			0xFFC01D14	/* DMA Channel 12 X Modify Register */
+#define	DMA12_Y_COUNT			0xFFC01D18	/* DMA Channel 12 Y Count Register */
+#define	DMA12_Y_MODIFY			0xFFC01D1C	/* DMA Channel 12 Y Modify Register */
+#define	DMA12_CURR_DESC_PTR		0xFFC01D20	/* DMA Channel 12 Current Descriptor Pointer Register */
+#define	DMA12_CURR_ADDR			0xFFC01D24	/* DMA Channel 12 Current Address Register */
+#define	DMA12_IRQ_STATUS		0xFFC01D28	/* DMA Channel 12 Interrupt/Status Register */
+#define	DMA12_PERIPHERAL_MAP	0xFFC01D2C	/* DMA Channel 12 Peripheral Map Register */
+#define	DMA12_CURR_X_COUNT		0xFFC01D30	/* DMA Channel 12 Current X Count Register */
+#define	DMA12_CURR_Y_COUNT		0xFFC01D38	/* DMA Channel 12 Current Y Count Register */
+
+#define	DMA13_NEXT_DESC_PTR		0xFFC01D40	/* DMA Channel 13 Next Descriptor Pointer Register */
+#define	DMA13_START_ADDR		0xFFC01D44	/* DMA Channel 13 Start	Address	Register */
+#define	DMA13_CONFIG			0xFFC01D48	/* DMA Channel 13 Configuration	Register */
+#define	DMA13_X_COUNT			0xFFC01D50	/* DMA Channel 13 X Count Register */
+#define	DMA13_X_MODIFY			0xFFC01D54	/* DMA Channel 13 X Modify Register */
+#define	DMA13_Y_COUNT			0xFFC01D58	/* DMA Channel 13 Y Count Register */
+#define	DMA13_Y_MODIFY			0xFFC01D5C	/* DMA Channel 13 Y Modify Register */
+#define	DMA13_CURR_DESC_PTR		0xFFC01D60	/* DMA Channel 13 Current Descriptor Pointer Register */
+#define	DMA13_CURR_ADDR			0xFFC01D64	/* DMA Channel 13 Current Address Register */
+#define	DMA13_IRQ_STATUS		0xFFC01D68	/* DMA Channel 13 Interrupt/Status Register */
+#define	DMA13_PERIPHERAL_MAP	0xFFC01D6C	/* DMA Channel 13 Peripheral Map Register */
+#define	DMA13_CURR_X_COUNT		0xFFC01D70	/* DMA Channel 13 Current X Count Register */
+#define	DMA13_CURR_Y_COUNT		0xFFC01D78	/* DMA Channel 13 Current Y Count Register */
+
+#define	DMA14_NEXT_DESC_PTR		0xFFC01D80	/* DMA Channel 14 Next Descriptor Pointer Register */
+#define	DMA14_START_ADDR		0xFFC01D84	/* DMA Channel 14 Start	Address	Register */
+#define	DMA14_CONFIG			0xFFC01D88	/* DMA Channel 14 Configuration	Register */
+#define	DMA14_X_COUNT			0xFFC01D90	/* DMA Channel 14 X Count Register */
+#define	DMA14_X_MODIFY			0xFFC01D94	/* DMA Channel 14 X Modify Register */
+#define	DMA14_Y_COUNT			0xFFC01D98	/* DMA Channel 14 Y Count Register */
+#define	DMA14_Y_MODIFY			0xFFC01D9C	/* DMA Channel 14 Y Modify Register */
+#define	DMA14_CURR_DESC_PTR		0xFFC01DA0	/* DMA Channel 14 Current Descriptor Pointer Register */
+#define	DMA14_CURR_ADDR			0xFFC01DA4	/* DMA Channel 14 Current Address Register */
+#define	DMA14_IRQ_STATUS		0xFFC01DA8	/* DMA Channel 14 Interrupt/Status Register */
+#define	DMA14_PERIPHERAL_MAP	0xFFC01DAC	/* DMA Channel 14 Peripheral Map Register */
+#define	DMA14_CURR_X_COUNT		0xFFC01DB0	/* DMA Channel 14 Current X Count Register */
+#define	DMA14_CURR_Y_COUNT		0xFFC01DB8	/* DMA Channel 14 Current Y Count Register */
+
+#define	DMA15_NEXT_DESC_PTR		0xFFC01DC0	/* DMA Channel 15 Next Descriptor Pointer Register */
+#define	DMA15_START_ADDR		0xFFC01DC4	/* DMA Channel 15 Start	Address	Register */
+#define	DMA15_CONFIG			0xFFC01DC8	/* DMA Channel 15 Configuration	Register */
+#define	DMA15_X_COUNT			0xFFC01DD0	/* DMA Channel 15 X Count Register */
+#define	DMA15_X_MODIFY			0xFFC01DD4	/* DMA Channel 15 X Modify Register */
+#define	DMA15_Y_COUNT			0xFFC01DD8	/* DMA Channel 15 Y Count Register */
+#define	DMA15_Y_MODIFY			0xFFC01DDC	/* DMA Channel 15 Y Modify Register */
+#define	DMA15_CURR_DESC_PTR		0xFFC01DE0	/* DMA Channel 15 Current Descriptor Pointer Register */
+#define	DMA15_CURR_ADDR			0xFFC01DE4	/* DMA Channel 15 Current Address Register */
+#define	DMA15_IRQ_STATUS		0xFFC01DE8	/* DMA Channel 15 Interrupt/Status Register */
+#define	DMA15_PERIPHERAL_MAP	0xFFC01DEC	/* DMA Channel 15 Peripheral Map Register */
+#define	DMA15_CURR_X_COUNT		0xFFC01DF0	/* DMA Channel 15 Current X Count Register */
+#define	DMA15_CURR_Y_COUNT		0xFFC01DF8	/* DMA Channel 15 Current Y Count Register */
+
+#define	DMA16_NEXT_DESC_PTR		0xFFC01E00	/* DMA Channel 16 Next Descriptor Pointer Register */
+#define	DMA16_START_ADDR		0xFFC01E04	/* DMA Channel 16 Start	Address	Register */
+#define	DMA16_CONFIG			0xFFC01E08	/* DMA Channel 16 Configuration	Register */
+#define	DMA16_X_COUNT			0xFFC01E10	/* DMA Channel 16 X Count Register */
+#define	DMA16_X_MODIFY			0xFFC01E14	/* DMA Channel 16 X Modify Register */
+#define	DMA16_Y_COUNT			0xFFC01E18	/* DMA Channel 16 Y Count Register */
+#define	DMA16_Y_MODIFY			0xFFC01E1C	/* DMA Channel 16 Y Modify Register */
+#define	DMA16_CURR_DESC_PTR		0xFFC01E20	/* DMA Channel 16 Current Descriptor Pointer Register */
+#define	DMA16_CURR_ADDR			0xFFC01E24	/* DMA Channel 16 Current Address Register */
+#define	DMA16_IRQ_STATUS		0xFFC01E28	/* DMA Channel 16 Interrupt/Status Register */
+#define	DMA16_PERIPHERAL_MAP	0xFFC01E2C	/* DMA Channel 16 Peripheral Map Register */
+#define	DMA16_CURR_X_COUNT		0xFFC01E30	/* DMA Channel 16 Current X Count Register */
+#define	DMA16_CURR_Y_COUNT		0xFFC01E38	/* DMA Channel 16 Current Y Count Register */
+
+#define	DMA17_NEXT_DESC_PTR		0xFFC01E40	/* DMA Channel 17 Next Descriptor Pointer Register */
+#define	DMA17_START_ADDR		0xFFC01E44	/* DMA Channel 17 Start	Address	Register */
+#define	DMA17_CONFIG			0xFFC01E48	/* DMA Channel 17 Configuration	Register */
+#define	DMA17_X_COUNT			0xFFC01E50	/* DMA Channel 17 X Count Register */
+#define	DMA17_X_MODIFY			0xFFC01E54	/* DMA Channel 17 X Modify Register */
+#define	DMA17_Y_COUNT			0xFFC01E58	/* DMA Channel 17 Y Count Register */
+#define	DMA17_Y_MODIFY			0xFFC01E5C	/* DMA Channel 17 Y Modify Register */
+#define	DMA17_CURR_DESC_PTR		0xFFC01E60	/* DMA Channel 17 Current Descriptor Pointer Register */
+#define	DMA17_CURR_ADDR			0xFFC01E64	/* DMA Channel 17 Current Address Register */
+#define	DMA17_IRQ_STATUS		0xFFC01E68	/* DMA Channel 17 Interrupt/Status Register */
+#define	DMA17_PERIPHERAL_MAP	0xFFC01E6C	/* DMA Channel 17 Peripheral Map Register */
+#define	DMA17_CURR_X_COUNT		0xFFC01E70	/* DMA Channel 17 Current X Count Register */
+#define	DMA17_CURR_Y_COUNT		0xFFC01E78	/* DMA Channel 17 Current Y Count Register */
+
+#define	DMA18_NEXT_DESC_PTR		0xFFC01E80	/* DMA Channel 18 Next Descriptor Pointer Register */
+#define	DMA18_START_ADDR		0xFFC01E84	/* DMA Channel 18 Start	Address	Register */
+#define	DMA18_CONFIG			0xFFC01E88	/* DMA Channel 18 Configuration	Register */
+#define	DMA18_X_COUNT			0xFFC01E90	/* DMA Channel 18 X Count Register */
+#define	DMA18_X_MODIFY			0xFFC01E94	/* DMA Channel 18 X Modify Register */
+#define	DMA18_Y_COUNT			0xFFC01E98	/* DMA Channel 18 Y Count Register */
+#define	DMA18_Y_MODIFY			0xFFC01E9C	/* DMA Channel 18 Y Modify Register */
+#define	DMA18_CURR_DESC_PTR		0xFFC01EA0	/* DMA Channel 18 Current Descriptor Pointer Register */
+#define	DMA18_CURR_ADDR			0xFFC01EA4	/* DMA Channel 18 Current Address Register */
+#define	DMA18_IRQ_STATUS		0xFFC01EA8	/* DMA Channel 18 Interrupt/Status Register */
+#define	DMA18_PERIPHERAL_MAP	0xFFC01EAC	/* DMA Channel 18 Peripheral Map Register */
+#define	DMA18_CURR_X_COUNT		0xFFC01EB0	/* DMA Channel 18 Current X Count Register */
+#define	DMA18_CURR_Y_COUNT		0xFFC01EB8	/* DMA Channel 18 Current Y Count Register */
+
+#define	DMA19_NEXT_DESC_PTR		0xFFC01EC0	/* DMA Channel 19 Next Descriptor Pointer Register */
+#define	DMA19_START_ADDR		0xFFC01EC4	/* DMA Channel 19 Start	Address	Register */
+#define	DMA19_CONFIG			0xFFC01EC8	/* DMA Channel 19 Configuration	Register */
+#define	DMA19_X_COUNT			0xFFC01ED0	/* DMA Channel 19 X Count Register */
+#define	DMA19_X_MODIFY			0xFFC01ED4	/* DMA Channel 19 X Modify Register */
+#define	DMA19_Y_COUNT			0xFFC01ED8	/* DMA Channel 19 Y Count Register */
+#define	DMA19_Y_MODIFY			0xFFC01EDC	/* DMA Channel 19 Y Modify Register */
+#define	DMA19_CURR_DESC_PTR		0xFFC01EE0	/* DMA Channel 19 Current Descriptor Pointer Register */
+#define	DMA19_CURR_ADDR			0xFFC01EE4	/* DMA Channel 19 Current Address Register */
+#define	DMA19_IRQ_STATUS		0xFFC01EE8	/* DMA Channel 19 Interrupt/Status Register */
+#define	DMA19_PERIPHERAL_MAP	0xFFC01EEC	/* DMA Channel 19 Peripheral Map Register */
+#define	DMA19_CURR_X_COUNT		0xFFC01EF0	/* DMA Channel 19 Current X Count Register */
+#define	DMA19_CURR_Y_COUNT		0xFFC01EF8	/* DMA Channel 19 Current Y Count Register */
+
+#define	MDMA1_D0_NEXT_DESC_PTR	0xFFC01F00	/* MemDMA1 Stream 0 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA1_D0_START_ADDR		0xFFC01F04	/* MemDMA1 Stream 0 Destination	Start Address Register */
+#define	MDMA1_D0_CONFIG			0xFFC01F08	/* MemDMA1 Stream 0 Destination	Configuration Register */
+#define	MDMA1_D0_X_COUNT		0xFFC01F10	/* MemDMA1 Stream 0 Destination	X Count	Register */
+#define	MDMA1_D0_X_MODIFY		0xFFC01F14	/* MemDMA1 Stream 0 Destination	X Modify Register */
+#define	MDMA1_D0_Y_COUNT		0xFFC01F18	/* MemDMA1 Stream 0 Destination	Y Count	Register */
+#define	MDMA1_D0_Y_MODIFY		0xFFC01F1C	/* MemDMA1 Stream 0 Destination	Y Modify Register */
+#define	MDMA1_D0_CURR_DESC_PTR	0xFFC01F20	/* MemDMA1 Stream 0 Destination	Current	Descriptor Pointer Register */
+#define	MDMA1_D0_CURR_ADDR		0xFFC01F24	/* MemDMA1 Stream 0 Destination	Current	Address	Register */
+#define	MDMA1_D0_IRQ_STATUS		0xFFC01F28	/* MemDMA1 Stream 0 Destination	Interrupt/Status Register */
+#define	MDMA1_D0_PERIPHERAL_MAP	0xFFC01F2C	/* MemDMA1 Stream 0 Destination	Peripheral Map Register */
+#define	MDMA1_D0_CURR_X_COUNT	0xFFC01F30	/* MemDMA1 Stream 0 Destination	Current	X Count	Register */
+#define	MDMA1_D0_CURR_Y_COUNT	0xFFC01F38	/* MemDMA1 Stream 0 Destination	Current	Y Count	Register */
+
+#define	MDMA1_S0_NEXT_DESC_PTR	0xFFC01F40	/* MemDMA1 Stream 0 Source Next	Descriptor Pointer Register */
+#define	MDMA1_S0_START_ADDR		0xFFC01F44	/* MemDMA1 Stream 0 Source Start Address Register */
+#define	MDMA1_S0_CONFIG			0xFFC01F48	/* MemDMA1 Stream 0 Source Configuration Register */
+#define	MDMA1_S0_X_COUNT		0xFFC01F50	/* MemDMA1 Stream 0 Source X Count Register */
+#define	MDMA1_S0_X_MODIFY		0xFFC01F54	/* MemDMA1 Stream 0 Source X Modify Register */
+#define	MDMA1_S0_Y_COUNT		0xFFC01F58	/* MemDMA1 Stream 0 Source Y Count Register */
+#define	MDMA1_S0_Y_MODIFY		0xFFC01F5C	/* MemDMA1 Stream 0 Source Y Modify Register */
+#define	MDMA1_S0_CURR_DESC_PTR	0xFFC01F60	/* MemDMA1 Stream 0 Source Current Descriptor Pointer Register */
+#define	MDMA1_S0_CURR_ADDR		0xFFC01F64	/* MemDMA1 Stream 0 Source Current Address Register */
+#define	MDMA1_S0_IRQ_STATUS		0xFFC01F68	/* MemDMA1 Stream 0 Source Interrupt/Status Register */
+#define	MDMA1_S0_PERIPHERAL_MAP	0xFFC01F6C	/* MemDMA1 Stream 0 Source Peripheral Map Register */
+#define	MDMA1_S0_CURR_X_COUNT	0xFFC01F70	/* MemDMA1 Stream 0 Source Current X Count Register */
+#define	MDMA1_S0_CURR_Y_COUNT	0xFFC01F78	/* MemDMA1 Stream 0 Source Current Y Count Register */
+
+#define	MDMA1_D1_NEXT_DESC_PTR	0xFFC01F80	/* MemDMA1 Stream 1 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA1_D1_START_ADDR		0xFFC01F84	/* MemDMA1 Stream 1 Destination	Start Address Register */
+#define	MDMA1_D1_CONFIG			0xFFC01F88	/* MemDMA1 Stream 1 Destination	Configuration Register */
+#define	MDMA1_D1_X_COUNT		0xFFC01F90	/* MemDMA1 Stream 1 Destination	X Count	Register */
+#define	MDMA1_D1_X_MODIFY		0xFFC01F94	/* MemDMA1 Stream 1 Destination	X Modify Register */
+#define	MDMA1_D1_Y_COUNT		0xFFC01F98	/* MemDMA1 Stream 1 Destination	Y Count	Register */
+#define	MDMA1_D1_Y_MODIFY		0xFFC01F9C	/* MemDMA1 Stream 1 Destination	Y Modify Register */
+#define	MDMA1_D1_CURR_DESC_PTR	0xFFC01FA0	/* MemDMA1 Stream 1 Destination	Current	Descriptor Pointer Register */
+#define	MDMA1_D1_CURR_ADDR		0xFFC01FA4	/* MemDMA1 Stream 1 Destination	Current	Address	Register */
+#define	MDMA1_D1_IRQ_STATUS		0xFFC01FA8	/* MemDMA1 Stream 1 Destination	Interrupt/Status Register */
+#define	MDMA1_D1_PERIPHERAL_MAP	0xFFC01FAC	/* MemDMA1 Stream 1 Destination	Peripheral Map Register */
+#define	MDMA1_D1_CURR_X_COUNT	0xFFC01FB0	/* MemDMA1 Stream 1 Destination	Current	X Count	Register */
+#define	MDMA1_D1_CURR_Y_COUNT	0xFFC01FB8	/* MemDMA1 Stream 1 Destination	Current	Y Count	Register */
+
+#define	MDMA1_S1_NEXT_DESC_PTR	0xFFC01FC0	/* MemDMA1 Stream 1 Source Next	Descriptor Pointer Register */
+#define	MDMA1_S1_START_ADDR		0xFFC01FC4	/* MemDMA1 Stream 1 Source Start Address Register */
+#define	MDMA1_S1_CONFIG			0xFFC01FC8	/* MemDMA1 Stream 1 Source Configuration Register */
+#define	MDMA1_S1_X_COUNT		0xFFC01FD0	/* MemDMA1 Stream 1 Source X Count Register */
+#define	MDMA1_S1_X_MODIFY		0xFFC01FD4	/* MemDMA1 Stream 1 Source X Modify Register */
+#define	MDMA1_S1_Y_COUNT		0xFFC01FD8	/* MemDMA1 Stream 1 Source Y Count Register */
+#define	MDMA1_S1_Y_MODIFY		0xFFC01FDC	/* MemDMA1 Stream 1 Source Y Modify Register */
+#define	MDMA1_S1_CURR_DESC_PTR	0xFFC01FE0	/* MemDMA1 Stream 1 Source Current Descriptor Pointer Register */
+#define	MDMA1_S1_CURR_ADDR		0xFFC01FE4	/* MemDMA1 Stream 1 Source Current Address Register */
+#define	MDMA1_S1_IRQ_STATUS		0xFFC01FE8	/* MemDMA1 Stream 1 Source Interrupt/Status Register */
+#define	MDMA1_S1_PERIPHERAL_MAP	0xFFC01FEC	/* MemDMA1 Stream 1 Source Peripheral Map Register */
+#define	MDMA1_S1_CURR_X_COUNT	0xFFC01FF0	/* MemDMA1 Stream 1 Source Current X Count Register */
+#define	MDMA1_S1_CURR_Y_COUNT	0xFFC01FF8	/* MemDMA1 Stream 1 Source Current Y Count Register */
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)	 */
+#define	UART1_THR			0xFFC02000	/* Transmit Holding register */
+#define	UART1_RBR			0xFFC02000	/* Receive Buffer register */
+#define	UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte) */
+#define	UART1_IER			0xFFC02004	/* Interrupt Enable Register */
+#define	UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte) */
+#define	UART1_IIR			0xFFC02008	/* Interrupt Identification Register */
+#define	UART1_LCR			0xFFC0200C	/* Line	Control	Register */
+#define	UART1_MCR			0xFFC02010	/* Modem Control Register */
+#define	UART1_LSR			0xFFC02014	/* Line	Status Register */
+#define	UART1_SCR			0xFFC0201C	/* SCR Scratch Register */
+#define	UART1_GCTL			0xFFC02024	/* Global Control Register */
+
+
+/* UART2 Controller		(0xFFC02100 - 0xFFC021FF)	 */
+#define	UART2_THR			0xFFC02100	/* Transmit Holding register */
+#define	UART2_RBR			0xFFC02100	/* Receive Buffer register */
+#define	UART2_DLL			0xFFC02100	/* Divisor Latch (Low-Byte) */
+#define	UART2_IER			0xFFC02104	/* Interrupt Enable Register */
+#define	UART2_DLH			0xFFC02104	/* Divisor Latch (High-Byte) */
+#define	UART2_IIR			0xFFC02108	/* Interrupt Identification Register */
+#define	UART2_LCR			0xFFC0210C	/* Line	Control	Register */
+#define	UART2_MCR			0xFFC02110	/* Modem Control Register */
+#define	UART2_LSR			0xFFC02114	/* Line	Status Register */
+#define	UART2_SCR			0xFFC0211C	/* SCR Scratch Register */
+#define	UART2_GCTL			0xFFC02124	/* Global Control Register */
+
+
+/* Two-Wire Interface 1	(0xFFC02200 - 0xFFC022FF)			 */
+#define	TWI1_CLKDIV			0xFFC02200	/* Serial Clock	Divider	Register */
+#define	TWI1_CONTROL		0xFFC02204	/* TWI1	Master Internal	Time Reference Register */
+#define	TWI1_SLAVE_CTRL		0xFFC02208	/* Slave Mode Control Register */
+#define	TWI1_SLAVE_STAT		0xFFC0220C	/* Slave Mode Status Register */
+#define	TWI1_SLAVE_ADDR		0xFFC02210	/* Slave Mode Address Register */
+#define	TWI1_MASTER_CTRL	0xFFC02214	/* Master Mode Control Register */
+#define	TWI1_MASTER_STAT	0xFFC02218	/* Master Mode Status Register */
+#define	TWI1_MASTER_ADDR	0xFFC0221C	/* Master Mode Address Register */
+#define	TWI1_INT_STAT		0xFFC02220	/* TWI1	Master Interrupt Register */
+#define	TWI1_INT_MASK		0xFFC02224	/* TWI1	Master Interrupt Mask Register */
+#define	TWI1_FIFO_CTRL		0xFFC02228	/* FIFO	Control	Register */
+#define	TWI1_FIFO_STAT		0xFFC0222C	/* FIFO	Status Register */
+#define	TWI1_XMT_DATA8		0xFFC02280	/* FIFO	Transmit Data Single Byte Register */
+#define	TWI1_XMT_DATA16		0xFFC02284	/* FIFO	Transmit Data Double Byte Register */
+#define	TWI1_RCV_DATA8		0xFFC02288	/* FIFO	Receive	Data Single Byte Register */
+#define	TWI1_RCV_DATA16		0xFFC0228C	/* FIFO	Receive	Data Double Byte Register */
+#define TWI1_REGBASE		TWI1_CLKDIV
+
+
+/* the following are for backwards compatibility */
+#define	TWI1_PRESCALE	  TWI1_CONTROL
+#define	TWI1_INT_SRC	  TWI1_INT_STAT
+#define	TWI1_INT_ENABLE	  TWI1_INT_MASK
+
+
+/* SPI1	Controller		(0xFFC02300 - 0xFFC023FF)	 */
+#define	SPI1_CTL			0xFFC02300  /* SPI1 Control Register */
+#define	SPI1_FLG			0xFFC02304  /* SPI1 Flag register */
+#define	SPI1_STAT			0xFFC02308  /* SPI1 Status register */
+#define	SPI1_TDBR			0xFFC0230C  /* SPI1 Transmit Data Buffer Register */
+#define	SPI1_RDBR			0xFFC02310  /* SPI1 Receive Data Buffer	Register */
+#define	SPI1_BAUD			0xFFC02314  /* SPI1 Baud rate Register */
+#define	SPI1_SHADOW			0xFFC02318  /* SPI1_RDBR Shadow	Register */
+#define SPI1_REGBASE			SPI1_CTL
+
+/* SPI2	Controller		(0xFFC02400 - 0xFFC024FF)	 */
+#define	SPI2_CTL			0xFFC02400  /* SPI2 Control Register */
+#define	SPI2_FLG			0xFFC02404  /* SPI2 Flag register */
+#define	SPI2_STAT			0xFFC02408  /* SPI2 Status register */
+#define	SPI2_TDBR			0xFFC0240C  /* SPI2 Transmit Data Buffer Register */
+#define	SPI2_RDBR			0xFFC02410  /* SPI2 Receive Data Buffer	Register */
+#define	SPI2_BAUD			0xFFC02414  /* SPI2 Baud rate Register */
+#define	SPI2_SHADOW			0xFFC02418  /* SPI2_RDBR Shadow	Register */
+#define SPI2_REGBASE			SPI2_CTL
+
+/* SPORT2 Controller		(0xFFC02500 - 0xFFC025FF)			 */
+#define	SPORT2_TCR1			0xFFC02500	/* SPORT2 Transmit Configuration 1 Register */
+#define	SPORT2_TCR2			0xFFC02504	/* SPORT2 Transmit Configuration 2 Register */
+#define	SPORT2_TCLKDIV		0xFFC02508	/* SPORT2 Transmit Clock Divider */
+#define	SPORT2_TFSDIV		0xFFC0250C	/* SPORT2 Transmit Frame Sync Divider */
+#define	SPORT2_TX			0xFFC02510	/* SPORT2 TX Data Register */
+#define	SPORT2_RX			0xFFC02518	/* SPORT2 RX Data Register */
+#define	SPORT2_RCR1			0xFFC02520	/* SPORT2 Transmit Configuration 1 Register */
+#define	SPORT2_RCR2			0xFFC02524	/* SPORT2 Transmit Configuration 2 Register */
+#define	SPORT2_RCLKDIV		0xFFC02528	/* SPORT2 Receive Clock	Divider */
+#define	SPORT2_RFSDIV		0xFFC0252C	/* SPORT2 Receive Frame	Sync Divider */
+#define	SPORT2_STAT			0xFFC02530	/* SPORT2 Status Register */
+#define	SPORT2_CHNL			0xFFC02534	/* SPORT2 Current Channel Register */
+#define	SPORT2_MCMC1		0xFFC02538	/* SPORT2 Multi-Channel	Configuration Register 1 */
+#define	SPORT2_MCMC2		0xFFC0253C	/* SPORT2 Multi-Channel	Configuration Register 2 */
+#define	SPORT2_MTCS0		0xFFC02540	/* SPORT2 Multi-Channel	Transmit Select	Register 0 */
+#define	SPORT2_MTCS1		0xFFC02544	/* SPORT2 Multi-Channel	Transmit Select	Register 1 */
+#define	SPORT2_MTCS2		0xFFC02548	/* SPORT2 Multi-Channel	Transmit Select	Register 2 */
+#define	SPORT2_MTCS3		0xFFC0254C	/* SPORT2 Multi-Channel	Transmit Select	Register 3 */
+#define	SPORT2_MRCS0		0xFFC02550	/* SPORT2 Multi-Channel	Receive	Select Register	0 */
+#define	SPORT2_MRCS1		0xFFC02554	/* SPORT2 Multi-Channel	Receive	Select Register	1 */
+#define	SPORT2_MRCS2		0xFFC02558	/* SPORT2 Multi-Channel	Receive	Select Register	2 */
+#define	SPORT2_MRCS3		0xFFC0255C	/* SPORT2 Multi-Channel	Receive	Select Register	3 */
+
+
+/* SPORT3 Controller		(0xFFC02600 - 0xFFC026FF)			 */
+#define	SPORT3_TCR1			0xFFC02600	/* SPORT3 Transmit Configuration 1 Register */
+#define	SPORT3_TCR2			0xFFC02604	/* SPORT3 Transmit Configuration 2 Register */
+#define	SPORT3_TCLKDIV		0xFFC02608	/* SPORT3 Transmit Clock Divider */
+#define	SPORT3_TFSDIV		0xFFC0260C	/* SPORT3 Transmit Frame Sync Divider */
+#define	SPORT3_TX			0xFFC02610	/* SPORT3 TX Data Register */
+#define	SPORT3_RX			0xFFC02618	/* SPORT3 RX Data Register */
+#define	SPORT3_RCR1			0xFFC02620	/* SPORT3 Transmit Configuration 1 Register */
+#define	SPORT3_RCR2			0xFFC02624	/* SPORT3 Transmit Configuration 2 Register */
+#define	SPORT3_RCLKDIV		0xFFC02628	/* SPORT3 Receive Clock	Divider */
+#define	SPORT3_RFSDIV		0xFFC0262C	/* SPORT3 Receive Frame	Sync Divider */
+#define	SPORT3_STAT			0xFFC02630	/* SPORT3 Status Register */
+#define	SPORT3_CHNL			0xFFC02634	/* SPORT3 Current Channel Register */
+#define	SPORT3_MCMC1		0xFFC02638	/* SPORT3 Multi-Channel	Configuration Register 1 */
+#define	SPORT3_MCMC2		0xFFC0263C	/* SPORT3 Multi-Channel	Configuration Register 2 */
+#define	SPORT3_MTCS0		0xFFC02640	/* SPORT3 Multi-Channel	Transmit Select	Register 0 */
+#define	SPORT3_MTCS1		0xFFC02644	/* SPORT3 Multi-Channel	Transmit Select	Register 1 */
+#define	SPORT3_MTCS2		0xFFC02648	/* SPORT3 Multi-Channel	Transmit Select	Register 2 */
+#define	SPORT3_MTCS3		0xFFC0264C	/* SPORT3 Multi-Channel	Transmit Select	Register 3 */
+#define	SPORT3_MRCS0		0xFFC02650	/* SPORT3 Multi-Channel	Receive	Select Register	0 */
+#define	SPORT3_MRCS1		0xFFC02654	/* SPORT3 Multi-Channel	Receive	Select Register	1 */
+#define	SPORT3_MRCS2		0xFFC02658	/* SPORT3 Multi-Channel	Receive	Select Register	2 */
+#define	SPORT3_MRCS3		0xFFC0265C	/* SPORT3 Multi-Channel	Receive	Select Register	3 */
+
+
+/* Media Transceiver (MXVR)   (0xFFC02700 - 0xFFC028FF) */
+
+#define	MXVR_CONFIG	      0xFFC02700  /* MXVR Configuration	Register */
+#define	MXVR_PLL_CTL_0	      0xFFC02704  /* MXVR Phase	Lock Loop Control Register 0 */
+
+#define	MXVR_STATE_0	      0xFFC02708  /* MXVR State	Register 0 */
+#define	MXVR_STATE_1	      0xFFC0270C  /* MXVR State	Register 1 */
+
+#define	MXVR_INT_STAT_0	      0xFFC02710  /* MXVR Interrupt Status Register 0 */
+#define	MXVR_INT_STAT_1	      0xFFC02714  /* MXVR Interrupt Status Register 1 */
+
+#define	MXVR_INT_EN_0	      0xFFC02718  /* MXVR Interrupt Enable Register 0 */
+#define	MXVR_INT_EN_1	      0xFFC0271C  /* MXVR Interrupt Enable Register 1 */
+
+#define	MXVR_POSITION	      0xFFC02720  /* MXVR Node Position	Register */
+#define	MXVR_MAX_POSITION     0xFFC02724  /* MXVR Maximum Node Position	Register */
+
+#define	MXVR_DELAY	      0xFFC02728  /* MXVR Node Frame Delay Register */
+#define	MXVR_MAX_DELAY	      0xFFC0272C  /* MXVR Maximum Node Frame Delay Register */
+
+#define	MXVR_LADDR	      0xFFC02730  /* MXVR Logical Address Register */
+#define	MXVR_GADDR	      0xFFC02734  /* MXVR Group	Address	Register */
+#define	MXVR_AADDR	      0xFFC02738  /* MXVR Alternate Address Register */
+
+#define	MXVR_ALLOC_0	      0xFFC0273C  /* MXVR Allocation Table Register 0 */
+#define	MXVR_ALLOC_1	      0xFFC02740  /* MXVR Allocation Table Register 1 */
+#define	MXVR_ALLOC_2	      0xFFC02744  /* MXVR Allocation Table Register 2 */
+#define	MXVR_ALLOC_3	      0xFFC02748  /* MXVR Allocation Table Register 3 */
+#define	MXVR_ALLOC_4	      0xFFC0274C  /* MXVR Allocation Table Register 4 */
+#define	MXVR_ALLOC_5	      0xFFC02750  /* MXVR Allocation Table Register 5 */
+#define	MXVR_ALLOC_6	      0xFFC02754  /* MXVR Allocation Table Register 6 */
+#define	MXVR_ALLOC_7	      0xFFC02758  /* MXVR Allocation Table Register 7 */
+#define	MXVR_ALLOC_8	      0xFFC0275C  /* MXVR Allocation Table Register 8 */
+#define	MXVR_ALLOC_9	      0xFFC02760  /* MXVR Allocation Table Register 9 */
+#define	MXVR_ALLOC_10	      0xFFC02764  /* MXVR Allocation Table Register 10 */
+#define	MXVR_ALLOC_11	      0xFFC02768  /* MXVR Allocation Table Register 11 */
+#define	MXVR_ALLOC_12	      0xFFC0276C  /* MXVR Allocation Table Register 12 */
+#define	MXVR_ALLOC_13	      0xFFC02770  /* MXVR Allocation Table Register 13 */
+#define	MXVR_ALLOC_14	      0xFFC02774  /* MXVR Allocation Table Register 14 */
+
+#define	MXVR_SYNC_LCHAN_0     0xFFC02778  /* MXVR Sync Data Logical Channel Assign Register 0 */
+#define	MXVR_SYNC_LCHAN_1     0xFFC0277C  /* MXVR Sync Data Logical Channel Assign Register 1 */
+#define	MXVR_SYNC_LCHAN_2     0xFFC02780  /* MXVR Sync Data Logical Channel Assign Register 2 */
+#define	MXVR_SYNC_LCHAN_3     0xFFC02784  /* MXVR Sync Data Logical Channel Assign Register 3 */
+#define	MXVR_SYNC_LCHAN_4     0xFFC02788  /* MXVR Sync Data Logical Channel Assign Register 4 */
+#define	MXVR_SYNC_LCHAN_5     0xFFC0278C  /* MXVR Sync Data Logical Channel Assign Register 5 */
+#define	MXVR_SYNC_LCHAN_6     0xFFC02790  /* MXVR Sync Data Logical Channel Assign Register 6 */
+#define	MXVR_SYNC_LCHAN_7     0xFFC02794  /* MXVR Sync Data Logical Channel Assign Register 7 */
+
+#define	MXVR_DMA0_CONFIG      0xFFC02798  /* MXVR Sync Data DMA0 Config	Register */
+#define	MXVR_DMA0_START_ADDR  0xFFC0279C  /* MXVR Sync Data DMA0 Start Address Register */
+#define	MXVR_DMA0_COUNT	      0xFFC027A0  /* MXVR Sync Data DMA0 Loop Count Register */
+#define	MXVR_DMA0_CURR_ADDR   0xFFC027A4  /* MXVR Sync Data DMA0 Current Address Register */
+#define	MXVR_DMA0_CURR_COUNT  0xFFC027A8  /* MXVR Sync Data DMA0 Current Loop Count Register */
+
+#define	MXVR_DMA1_CONFIG      0xFFC027AC  /* MXVR Sync Data DMA1 Config	Register */
+#define	MXVR_DMA1_START_ADDR  0xFFC027B0  /* MXVR Sync Data DMA1 Start Address Register */
+#define	MXVR_DMA1_COUNT	      0xFFC027B4  /* MXVR Sync Data DMA1 Loop Count Register */
+#define	MXVR_DMA1_CURR_ADDR   0xFFC027B8  /* MXVR Sync Data DMA1 Current Address Register */
+#define	MXVR_DMA1_CURR_COUNT  0xFFC027BC  /* MXVR Sync Data DMA1 Current Loop Count Register */
+
+#define	MXVR_DMA2_CONFIG      0xFFC027C0  /* MXVR Sync Data DMA2 Config	Register */
+#define	MXVR_DMA2_START_ADDR  0xFFC027C4  /* MXVR Sync Data DMA2 Start Address Register */
+#define	MXVR_DMA2_COUNT	      0xFFC027C8  /* MXVR Sync Data DMA2 Loop Count Register */
+#define	MXVR_DMA2_CURR_ADDR   0xFFC027CC  /* MXVR Sync Data DMA2 Current Address Register */
+#define	MXVR_DMA2_CURR_COUNT  0xFFC027D0  /* MXVR Sync Data DMA2 Current Loop Count Register */
+
+#define	MXVR_DMA3_CONFIG      0xFFC027D4  /* MXVR Sync Data DMA3 Config	Register */
+#define	MXVR_DMA3_START_ADDR  0xFFC027D8  /* MXVR Sync Data DMA3 Start Address Register */
+#define	MXVR_DMA3_COUNT	      0xFFC027DC  /* MXVR Sync Data DMA3 Loop Count Register */
+#define	MXVR_DMA3_CURR_ADDR   0xFFC027E0  /* MXVR Sync Data DMA3 Current Address Register */
+#define	MXVR_DMA3_CURR_COUNT  0xFFC027E4  /* MXVR Sync Data DMA3 Current Loop Count Register */
+
+#define	MXVR_DMA4_CONFIG      0xFFC027E8  /* MXVR Sync Data DMA4 Config	Register */
+#define	MXVR_DMA4_START_ADDR  0xFFC027EC  /* MXVR Sync Data DMA4 Start Address Register */
+#define	MXVR_DMA4_COUNT	      0xFFC027F0  /* MXVR Sync Data DMA4 Loop Count Register */
+#define	MXVR_DMA4_CURR_ADDR   0xFFC027F4  /* MXVR Sync Data DMA4 Current Address Register */
+#define	MXVR_DMA4_CURR_COUNT  0xFFC027F8  /* MXVR Sync Data DMA4 Current Loop Count Register */
+
+#define	MXVR_DMA5_CONFIG      0xFFC027FC  /* MXVR Sync Data DMA5 Config	Register */
+#define	MXVR_DMA5_START_ADDR  0xFFC02800  /* MXVR Sync Data DMA5 Start Address Register */
+#define	MXVR_DMA5_COUNT	      0xFFC02804  /* MXVR Sync Data DMA5 Loop Count Register */
+#define	MXVR_DMA5_CURR_ADDR   0xFFC02808  /* MXVR Sync Data DMA5 Current Address Register */
+#define	MXVR_DMA5_CURR_COUNT  0xFFC0280C  /* MXVR Sync Data DMA5 Current Loop Count Register */
+
+#define	MXVR_DMA6_CONFIG      0xFFC02810  /* MXVR Sync Data DMA6 Config	Register */
+#define	MXVR_DMA6_START_ADDR  0xFFC02814  /* MXVR Sync Data DMA6 Start Address Register */
+#define	MXVR_DMA6_COUNT	      0xFFC02818  /* MXVR Sync Data DMA6 Loop Count Register */
+#define	MXVR_DMA6_CURR_ADDR   0xFFC0281C  /* MXVR Sync Data DMA6 Current Address Register */
+#define	MXVR_DMA6_CURR_COUNT  0xFFC02820  /* MXVR Sync Data DMA6 Current Loop Count Register */
+
+#define	MXVR_DMA7_CONFIG      0xFFC02824  /* MXVR Sync Data DMA7 Config	Register */
+#define	MXVR_DMA7_START_ADDR  0xFFC02828  /* MXVR Sync Data DMA7 Start Address Register */
+#define	MXVR_DMA7_COUNT	      0xFFC0282C  /* MXVR Sync Data DMA7 Loop Count Register */
+#define	MXVR_DMA7_CURR_ADDR   0xFFC02830  /* MXVR Sync Data DMA7 Current Address Register */
+#define	MXVR_DMA7_CURR_COUNT  0xFFC02834  /* MXVR Sync Data DMA7 Current Loop Count Register */
+
+#define	MXVR_AP_CTL	      0xFFC02838  /* MXVR Async	Packet Control Register */
+#define	MXVR_APRB_START_ADDR  0xFFC0283C  /* MXVR Async	Packet RX Buffer Start Addr Register */
+#define	MXVR_APRB_CURR_ADDR   0xFFC02840  /* MXVR Async	Packet RX Buffer Current Addr Register */
+#define	MXVR_APTB_START_ADDR  0xFFC02844  /* MXVR Async	Packet TX Buffer Start Addr Register */
+#define	MXVR_APTB_CURR_ADDR   0xFFC02848  /* MXVR Async	Packet TX Buffer Current Addr Register */
+
+#define	MXVR_CM_CTL	      0xFFC0284C  /* MXVR Control Message Control Register */
+#define	MXVR_CMRB_START_ADDR  0xFFC02850  /* MXVR Control Message RX Buffer Start Addr Register */
+#define	MXVR_CMRB_CURR_ADDR   0xFFC02854  /* MXVR Control Message RX Buffer Current Address */
+#define	MXVR_CMTB_START_ADDR  0xFFC02858  /* MXVR Control Message TX Buffer Start Addr Register */
+#define	MXVR_CMTB_CURR_ADDR   0xFFC0285C  /* MXVR Control Message TX Buffer Current Address */
+
+#define	MXVR_RRDB_START_ADDR  0xFFC02860  /* MXVR Remote Read Buffer Start Addr	Register */
+#define	MXVR_RRDB_CURR_ADDR   0xFFC02864  /* MXVR Remote Read Buffer Current Addr Register */
+
+#define	MXVR_PAT_DATA_0	      0xFFC02868  /* MXVR Pattern Data Register	0 */
+#define	MXVR_PAT_EN_0	      0xFFC0286C  /* MXVR Pattern Enable Register 0 */
+#define	MXVR_PAT_DATA_1	      0xFFC02870  /* MXVR Pattern Data Register	1 */
+#define	MXVR_PAT_EN_1	      0xFFC02874  /* MXVR Pattern Enable Register 1 */
+
+#define	MXVR_FRAME_CNT_0      0xFFC02878  /* MXVR Frame	Counter	0 */
+#define	MXVR_FRAME_CNT_1      0xFFC0287C  /* MXVR Frame	Counter	1 */
+
+#define	MXVR_ROUTING_0	      0xFFC02880  /* MXVR Routing Table	Register 0 */
+#define	MXVR_ROUTING_1	      0xFFC02884  /* MXVR Routing Table	Register 1 */
+#define	MXVR_ROUTING_2	      0xFFC02888  /* MXVR Routing Table	Register 2 */
+#define	MXVR_ROUTING_3	      0xFFC0288C  /* MXVR Routing Table	Register 3 */
+#define	MXVR_ROUTING_4	      0xFFC02890  /* MXVR Routing Table	Register 4 */
+#define	MXVR_ROUTING_5	      0xFFC02894  /* MXVR Routing Table	Register 5 */
+#define	MXVR_ROUTING_6	      0xFFC02898  /* MXVR Routing Table	Register 6 */
+#define	MXVR_ROUTING_7	      0xFFC0289C  /* MXVR Routing Table	Register 7 */
+#define	MXVR_ROUTING_8	      0xFFC028A0  /* MXVR Routing Table	Register 8 */
+#define	MXVR_ROUTING_9	      0xFFC028A4  /* MXVR Routing Table	Register 9 */
+#define	MXVR_ROUTING_10	      0xFFC028A8  /* MXVR Routing Table	Register 10 */
+#define	MXVR_ROUTING_11	      0xFFC028AC  /* MXVR Routing Table	Register 11 */
+#define	MXVR_ROUTING_12	      0xFFC028B0  /* MXVR Routing Table	Register 12 */
+#define	MXVR_ROUTING_13	      0xFFC028B4  /* MXVR Routing Table	Register 13 */
+#define	MXVR_ROUTING_14	      0xFFC028B8  /* MXVR Routing Table	Register 14 */
+
+#define	MXVR_PLL_CTL_1	      0xFFC028BC  /* MXVR Phase	Lock Loop Control Register 1 */
+#define	MXVR_BLOCK_CNT	      0xFFC028C0  /* MXVR Block	Counter */
+#define	MXVR_PLL_CTL_2	      0xFFC028C4  /* MXVR Phase	Lock Loop Control Register 2 */
+
+
+/* CAN Controller		(0xFFC02A00 - 0xFFC02FFF)				 */
+/* For Mailboxes 0-15											 */
+#define	CAN_MC1				0xFFC02A00	/* Mailbox config reg 1	 */
+#define	CAN_MD1				0xFFC02A04	/* Mailbox direction reg 1 */
+#define	CAN_TRS1			0xFFC02A08	/* Transmit Request Set	reg 1 */
+#define	CAN_TRR1			0xFFC02A0C	/* Transmit Request Reset reg 1 */
+#define	CAN_TA1				0xFFC02A10	/* Transmit Acknowledge	reg 1 */
+#define	CAN_AA1				0xFFC02A14	/* Transmit Abort Acknowledge reg 1 */
+#define	CAN_RMP1			0xFFC02A18	/* Receive Message Pending reg 1 */
+#define	CAN_RML1			0xFFC02A1C	/* Receive Message Lost	reg 1 */
+#define	CAN_MBTIF1			0xFFC02A20	/* Mailbox Transmit Interrupt Flag reg 1 */
+#define	CAN_MBRIF1			0xFFC02A24	/* Mailbox Receive  Interrupt Flag reg 1 */
+#define	CAN_MBIM1			0xFFC02A28	/* Mailbox Interrupt Mask reg 1 */
+#define	CAN_RFH1			0xFFC02A2C	/* Remote Frame	Handling reg 1 */
+#define	CAN_OPSS1			0xFFC02A30	/* Overwrite Protection	Single Shot Xmission reg 1 */
+
+/* For Mailboxes 16-31											 */
+#define	CAN_MC2				0xFFC02A40	/* Mailbox config reg 2	 */
+#define	CAN_MD2				0xFFC02A44	/* Mailbox direction reg 2 */
+#define	CAN_TRS2			0xFFC02A48	/* Transmit Request Set	reg 2 */
+#define	CAN_TRR2			0xFFC02A4C	/* Transmit Request Reset reg 2 */
+#define	CAN_TA2				0xFFC02A50	/* Transmit Acknowledge	reg 2 */
+#define	CAN_AA2				0xFFC02A54	/* Transmit Abort Acknowledge reg 2 */
+#define	CAN_RMP2			0xFFC02A58	/* Receive Message Pending reg 2 */
+#define	CAN_RML2			0xFFC02A5C	/* Receive Message Lost	reg 2 */
+#define	CAN_MBTIF2			0xFFC02A60	/* Mailbox Transmit Interrupt Flag reg 2 */
+#define	CAN_MBRIF2			0xFFC02A64	/* Mailbox Receive  Interrupt Flag reg 2 */
+#define	CAN_MBIM2			0xFFC02A68	/* Mailbox Interrupt Mask reg 2 */
+#define	CAN_RFH2			0xFFC02A6C	/* Remote Frame	Handling reg 2 */
+#define	CAN_OPSS2			0xFFC02A70	/* Overwrite Protection	Single Shot Xmission reg 2 */
+
+#define	CAN_CLOCK			0xFFC02A80	/* Bit Timing Configuration register 0 */
+#define	CAN_TIMING			0xFFC02A84	/* Bit Timing Configuration register 1 */
+
+#define	CAN_DEBUG			0xFFC02A88	/* Debug Register		 */
+/* the following is for	backwards compatibility */
+#define	CAN_CNF		 CAN_DEBUG
+
+#define	CAN_STATUS			0xFFC02A8C	/* Global Status Register */
+#define	CAN_CEC				0xFFC02A90	/* Error Counter Register */
+#define	CAN_GIS				0xFFC02A94	/* Global Interrupt Status Register */
+#define	CAN_GIM				0xFFC02A98	/* Global Interrupt Mask Register */
+#define	CAN_GIF				0xFFC02A9C	/* Global Interrupt Flag Register */
+#define	CAN_CONTROL			0xFFC02AA0	/* Master Control Register */
+#define	CAN_INTR			0xFFC02AA4	/* Interrupt Pending Register */
+#define	CAN_MBTD			0xFFC02AAC	/* Mailbox Temporary Disable Feature */
+#define	CAN_EWR				0xFFC02AB0	/* Programmable	Warning	Level */
+#define	CAN_ESR				0xFFC02AB4	/* Error Status	Register */
+#define	CAN_UCCNT			0xFFC02AC4	/* Universal Counter	 */
+#define	CAN_UCRC			0xFFC02AC8	/* Universal Counter Reload/Capture Register */
+#define	CAN_UCCNF			0xFFC02ACC	/* Universal Counter Configuration Register */
+
+/* Mailbox Acceptance Masks					 */
+#define	CAN_AM00L			0xFFC02B00	/* Mailbox 0 Low Acceptance Mask */
+#define	CAN_AM00H			0xFFC02B04	/* Mailbox 0 High Acceptance Mask */
+#define	CAN_AM01L			0xFFC02B08	/* Mailbox 1 Low Acceptance Mask */
+#define	CAN_AM01H			0xFFC02B0C	/* Mailbox 1 High Acceptance Mask */
+#define	CAN_AM02L			0xFFC02B10	/* Mailbox 2 Low Acceptance Mask */
+#define	CAN_AM02H			0xFFC02B14	/* Mailbox 2 High Acceptance Mask */
+#define	CAN_AM03L			0xFFC02B18	/* Mailbox 3 Low Acceptance Mask */
+#define	CAN_AM03H			0xFFC02B1C	/* Mailbox 3 High Acceptance Mask */
+#define	CAN_AM04L			0xFFC02B20	/* Mailbox 4 Low Acceptance Mask */
+#define	CAN_AM04H			0xFFC02B24	/* Mailbox 4 High Acceptance Mask */
+#define	CAN_AM05L			0xFFC02B28	/* Mailbox 5 Low Acceptance Mask */
+#define	CAN_AM05H			0xFFC02B2C	/* Mailbox 5 High Acceptance Mask */
+#define	CAN_AM06L			0xFFC02B30	/* Mailbox 6 Low Acceptance Mask */
+#define	CAN_AM06H			0xFFC02B34	/* Mailbox 6 High Acceptance Mask */
+#define	CAN_AM07L			0xFFC02B38	/* Mailbox 7 Low Acceptance Mask */
+#define	CAN_AM07H			0xFFC02B3C	/* Mailbox 7 High Acceptance Mask */
+#define	CAN_AM08L			0xFFC02B40	/* Mailbox 8 Low Acceptance Mask */
+#define	CAN_AM08H			0xFFC02B44	/* Mailbox 8 High Acceptance Mask */
+#define	CAN_AM09L			0xFFC02B48	/* Mailbox 9 Low Acceptance Mask */
+#define	CAN_AM09H			0xFFC02B4C	/* Mailbox 9 High Acceptance Mask */
+#define	CAN_AM10L			0xFFC02B50	/* Mailbox 10 Low Acceptance Mask */
+#define	CAN_AM10H			0xFFC02B54	/* Mailbox 10 High Acceptance Mask */
+#define	CAN_AM11L			0xFFC02B58	/* Mailbox 11 Low Acceptance Mask */
+#define	CAN_AM11H			0xFFC02B5C	/* Mailbox 11 High Acceptance Mask */
+#define	CAN_AM12L			0xFFC02B60	/* Mailbox 12 Low Acceptance Mask */
+#define	CAN_AM12H			0xFFC02B64	/* Mailbox 12 High Acceptance Mask */
+#define	CAN_AM13L			0xFFC02B68	/* Mailbox 13 Low Acceptance Mask */
+#define	CAN_AM13H			0xFFC02B6C	/* Mailbox 13 High Acceptance Mask */
+#define	CAN_AM14L			0xFFC02B70	/* Mailbox 14 Low Acceptance Mask */
+#define	CAN_AM14H			0xFFC02B74	/* Mailbox 14 High Acceptance Mask */
+#define	CAN_AM15L			0xFFC02B78	/* Mailbox 15 Low Acceptance Mask */
+#define	CAN_AM15H			0xFFC02B7C	/* Mailbox 15 High Acceptance Mask */
+
+#define	CAN_AM16L			0xFFC02B80	/* Mailbox 16 Low Acceptance Mask */
+#define	CAN_AM16H			0xFFC02B84	/* Mailbox 16 High Acceptance Mask */
+#define	CAN_AM17L			0xFFC02B88	/* Mailbox 17 Low Acceptance Mask */
+#define	CAN_AM17H			0xFFC02B8C	/* Mailbox 17 High Acceptance Mask */
+#define	CAN_AM18L			0xFFC02B90	/* Mailbox 18 Low Acceptance Mask */
+#define	CAN_AM18H			0xFFC02B94	/* Mailbox 18 High Acceptance Mask */
+#define	CAN_AM19L			0xFFC02B98	/* Mailbox 19 Low Acceptance Mask */
+#define	CAN_AM19H			0xFFC02B9C	/* Mailbox 19 High Acceptance Mask */
+#define	CAN_AM20L			0xFFC02BA0	/* Mailbox 20 Low Acceptance Mask */
+#define	CAN_AM20H			0xFFC02BA4	/* Mailbox 20 High Acceptance Mask */
+#define	CAN_AM21L			0xFFC02BA8	/* Mailbox 21 Low Acceptance Mask */
+#define	CAN_AM21H			0xFFC02BAC	/* Mailbox 21 High Acceptance Mask */
+#define	CAN_AM22L			0xFFC02BB0	/* Mailbox 22 Low Acceptance Mask */
+#define	CAN_AM22H			0xFFC02BB4	/* Mailbox 22 High Acceptance Mask */
+#define	CAN_AM23L			0xFFC02BB8	/* Mailbox 23 Low Acceptance Mask */
+#define	CAN_AM23H			0xFFC02BBC	/* Mailbox 23 High Acceptance Mask */
+#define	CAN_AM24L			0xFFC02BC0	/* Mailbox 24 Low Acceptance Mask */
+#define	CAN_AM24H			0xFFC02BC4	/* Mailbox 24 High Acceptance Mask */
+#define	CAN_AM25L			0xFFC02BC8	/* Mailbox 25 Low Acceptance Mask */
+#define	CAN_AM25H			0xFFC02BCC	/* Mailbox 25 High Acceptance Mask */
+#define	CAN_AM26L			0xFFC02BD0	/* Mailbox 26 Low Acceptance Mask */
+#define	CAN_AM26H			0xFFC02BD4	/* Mailbox 26 High Acceptance Mask */
+#define	CAN_AM27L			0xFFC02BD8	/* Mailbox 27 Low Acceptance Mask */
+#define	CAN_AM27H			0xFFC02BDC	/* Mailbox 27 High Acceptance Mask */
+#define	CAN_AM28L			0xFFC02BE0	/* Mailbox 28 Low Acceptance Mask */
+#define	CAN_AM28H			0xFFC02BE4	/* Mailbox 28 High Acceptance Mask */
+#define	CAN_AM29L			0xFFC02BE8	/* Mailbox 29 Low Acceptance Mask */
+#define	CAN_AM29H			0xFFC02BEC	/* Mailbox 29 High Acceptance Mask */
+#define	CAN_AM30L			0xFFC02BF0	/* Mailbox 30 Low Acceptance Mask */
+#define	CAN_AM30H			0xFFC02BF4	/* Mailbox 30 High Acceptance Mask */
+#define	CAN_AM31L			0xFFC02BF8	/* Mailbox 31 Low Acceptance Mask */
+#define	CAN_AM31H			0xFFC02BFC	/* Mailbox 31 High Acceptance Mask */
+
+/* CAN Acceptance Mask Macros */
+#define	CAN_AM_L(x)			(CAN_AM00L+((x)*0x8))
+#define	CAN_AM_H(x)			(CAN_AM00H+((x)*0x8))
+
+/* Mailbox Registers									 */
+#define	CAN_MB00_DATA0		0xFFC02C00	/* Mailbox 0 Data Word 0 [15:0]	Register */
+#define	CAN_MB00_DATA1		0xFFC02C04	/* Mailbox 0 Data Word 1 [31:16] Register */
+#define	CAN_MB00_DATA2		0xFFC02C08	/* Mailbox 0 Data Word 2 [47:32] Register */
+#define	CAN_MB00_DATA3		0xFFC02C0C	/* Mailbox 0 Data Word 3 [63:48] Register */
+#define	CAN_MB00_LENGTH		0xFFC02C10	/* Mailbox 0 Data Length Code Register */
+#define	CAN_MB00_TIMESTAMP	0xFFC02C14	/* Mailbox 0 Time Stamp	Value Register */
+#define	CAN_MB00_ID0		0xFFC02C18	/* Mailbox 0 Identifier	Low Register */
+#define	CAN_MB00_ID1		0xFFC02C1C	/* Mailbox 0 Identifier	High Register */
+
+#define	CAN_MB01_DATA0		0xFFC02C20	/* Mailbox 1 Data Word 0 [15:0]	Register */
+#define	CAN_MB01_DATA1		0xFFC02C24	/* Mailbox 1 Data Word 1 [31:16] Register */
+#define	CAN_MB01_DATA2		0xFFC02C28	/* Mailbox 1 Data Word 2 [47:32] Register */
+#define	CAN_MB01_DATA3		0xFFC02C2C	/* Mailbox 1 Data Word 3 [63:48] Register */
+#define	CAN_MB01_LENGTH		0xFFC02C30	/* Mailbox 1 Data Length Code Register */
+#define	CAN_MB01_TIMESTAMP	0xFFC02C34	/* Mailbox 1 Time Stamp	Value Register */
+#define	CAN_MB01_ID0		0xFFC02C38	/* Mailbox 1 Identifier	Low Register */
+#define	CAN_MB01_ID1		0xFFC02C3C	/* Mailbox 1 Identifier	High Register */
+
+#define	CAN_MB02_DATA0		0xFFC02C40	/* Mailbox 2 Data Word 0 [15:0]	Register */
+#define	CAN_MB02_DATA1		0xFFC02C44	/* Mailbox 2 Data Word 1 [31:16] Register */
+#define	CAN_MB02_DATA2		0xFFC02C48	/* Mailbox 2 Data Word 2 [47:32] Register */
+#define	CAN_MB02_DATA3		0xFFC02C4C	/* Mailbox 2 Data Word 3 [63:48] Register */
+#define	CAN_MB02_LENGTH		0xFFC02C50	/* Mailbox 2 Data Length Code Register */
+#define	CAN_MB02_TIMESTAMP	0xFFC02C54	/* Mailbox 2 Time Stamp	Value Register */
+#define	CAN_MB02_ID0		0xFFC02C58	/* Mailbox 2 Identifier	Low Register */
+#define	CAN_MB02_ID1		0xFFC02C5C	/* Mailbox 2 Identifier	High Register */
+
+#define	CAN_MB03_DATA0		0xFFC02C60	/* Mailbox 3 Data Word 0 [15:0]	Register */
+#define	CAN_MB03_DATA1		0xFFC02C64	/* Mailbox 3 Data Word 1 [31:16] Register */
+#define	CAN_MB03_DATA2		0xFFC02C68	/* Mailbox 3 Data Word 2 [47:32] Register */
+#define	CAN_MB03_DATA3		0xFFC02C6C	/* Mailbox 3 Data Word 3 [63:48] Register */
+#define	CAN_MB03_LENGTH		0xFFC02C70	/* Mailbox 3 Data Length Code Register */
+#define	CAN_MB03_TIMESTAMP	0xFFC02C74	/* Mailbox 3 Time Stamp	Value Register */
+#define	CAN_MB03_ID0		0xFFC02C78	/* Mailbox 3 Identifier	Low Register */
+#define	CAN_MB03_ID1		0xFFC02C7C	/* Mailbox 3 Identifier	High Register */
+
+#define	CAN_MB04_DATA0		0xFFC02C80	/* Mailbox 4 Data Word 0 [15:0]	Register */
+#define	CAN_MB04_DATA1		0xFFC02C84	/* Mailbox 4 Data Word 1 [31:16] Register */
+#define	CAN_MB04_DATA2		0xFFC02C88	/* Mailbox 4 Data Word 2 [47:32] Register */
+#define	CAN_MB04_DATA3		0xFFC02C8C	/* Mailbox 4 Data Word 3 [63:48] Register */
+#define	CAN_MB04_LENGTH		0xFFC02C90	/* Mailbox 4 Data Length Code Register */
+#define	CAN_MB04_TIMESTAMP	0xFFC02C94	/* Mailbox 4 Time Stamp	Value Register */
+#define	CAN_MB04_ID0		0xFFC02C98	/* Mailbox 4 Identifier	Low Register */
+#define	CAN_MB04_ID1		0xFFC02C9C	/* Mailbox 4 Identifier	High Register */
+
+#define	CAN_MB05_DATA0		0xFFC02CA0	/* Mailbox 5 Data Word 0 [15:0]	Register */
+#define	CAN_MB05_DATA1		0xFFC02CA4	/* Mailbox 5 Data Word 1 [31:16] Register */
+#define	CAN_MB05_DATA2		0xFFC02CA8	/* Mailbox 5 Data Word 2 [47:32] Register */
+#define	CAN_MB05_DATA3		0xFFC02CAC	/* Mailbox 5 Data Word 3 [63:48] Register */
+#define	CAN_MB05_LENGTH		0xFFC02CB0	/* Mailbox 5 Data Length Code Register */
+#define	CAN_MB05_TIMESTAMP	0xFFC02CB4	/* Mailbox 5 Time Stamp	Value Register */
+#define	CAN_MB05_ID0		0xFFC02CB8	/* Mailbox 5 Identifier	Low Register */
+#define	CAN_MB05_ID1		0xFFC02CBC	/* Mailbox 5 Identifier	High Register */
+
+#define	CAN_MB06_DATA0		0xFFC02CC0	/* Mailbox 6 Data Word 0 [15:0]	Register */
+#define	CAN_MB06_DATA1		0xFFC02CC4	/* Mailbox 6 Data Word 1 [31:16] Register */
+#define	CAN_MB06_DATA2		0xFFC02CC8	/* Mailbox 6 Data Word 2 [47:32] Register */
+#define	CAN_MB06_DATA3		0xFFC02CCC	/* Mailbox 6 Data Word 3 [63:48] Register */
+#define	CAN_MB06_LENGTH		0xFFC02CD0	/* Mailbox 6 Data Length Code Register */
+#define	CAN_MB06_TIMESTAMP	0xFFC02CD4	/* Mailbox 6 Time Stamp	Value Register */
+#define	CAN_MB06_ID0		0xFFC02CD8	/* Mailbox 6 Identifier	Low Register */
+#define	CAN_MB06_ID1		0xFFC02CDC	/* Mailbox 6 Identifier	High Register */
+
+#define	CAN_MB07_DATA0		0xFFC02CE0	/* Mailbox 7 Data Word 0 [15:0]	Register */
+#define	CAN_MB07_DATA1		0xFFC02CE4	/* Mailbox 7 Data Word 1 [31:16] Register */
+#define	CAN_MB07_DATA2		0xFFC02CE8	/* Mailbox 7 Data Word 2 [47:32] Register */
+#define	CAN_MB07_DATA3		0xFFC02CEC	/* Mailbox 7 Data Word 3 [63:48] Register */
+#define	CAN_MB07_LENGTH		0xFFC02CF0	/* Mailbox 7 Data Length Code Register */
+#define	CAN_MB07_TIMESTAMP	0xFFC02CF4	/* Mailbox 7 Time Stamp	Value Register */
+#define	CAN_MB07_ID0		0xFFC02CF8	/* Mailbox 7 Identifier	Low Register */
+#define	CAN_MB07_ID1		0xFFC02CFC	/* Mailbox 7 Identifier	High Register */
+
+#define	CAN_MB08_DATA0		0xFFC02D00	/* Mailbox 8 Data Word 0 [15:0]	Register */
+#define	CAN_MB08_DATA1		0xFFC02D04	/* Mailbox 8 Data Word 1 [31:16] Register */
+#define	CAN_MB08_DATA2		0xFFC02D08	/* Mailbox 8 Data Word 2 [47:32] Register */
+#define	CAN_MB08_DATA3		0xFFC02D0C	/* Mailbox 8 Data Word 3 [63:48] Register */
+#define	CAN_MB08_LENGTH		0xFFC02D10	/* Mailbox 8 Data Length Code Register */
+#define	CAN_MB08_TIMESTAMP	0xFFC02D14	/* Mailbox 8 Time Stamp	Value Register */
+#define	CAN_MB08_ID0		0xFFC02D18	/* Mailbox 8 Identifier	Low Register */
+#define	CAN_MB08_ID1		0xFFC02D1C	/* Mailbox 8 Identifier	High Register */
+
+#define	CAN_MB09_DATA0		0xFFC02D20	/* Mailbox 9 Data Word 0 [15:0]	Register */
+#define	CAN_MB09_DATA1		0xFFC02D24	/* Mailbox 9 Data Word 1 [31:16] Register */
+#define	CAN_MB09_DATA2		0xFFC02D28	/* Mailbox 9 Data Word 2 [47:32] Register */
+#define	CAN_MB09_DATA3		0xFFC02D2C	/* Mailbox 9 Data Word 3 [63:48] Register */
+#define	CAN_MB09_LENGTH		0xFFC02D30	/* Mailbox 9 Data Length Code Register */
+#define	CAN_MB09_TIMESTAMP	0xFFC02D34	/* Mailbox 9 Time Stamp	Value Register */
+#define	CAN_MB09_ID0		0xFFC02D38	/* Mailbox 9 Identifier	Low Register */
+#define	CAN_MB09_ID1		0xFFC02D3C	/* Mailbox 9 Identifier	High Register */
+
+#define	CAN_MB10_DATA0		0xFFC02D40	/* Mailbox 10 Data Word	0 [15:0] Register */
+#define	CAN_MB10_DATA1		0xFFC02D44	/* Mailbox 10 Data Word	1 [31:16] Register */
+#define	CAN_MB10_DATA2		0xFFC02D48	/* Mailbox 10 Data Word	2 [47:32] Register */
+#define	CAN_MB10_DATA3		0xFFC02D4C	/* Mailbox 10 Data Word	3 [63:48] Register */
+#define	CAN_MB10_LENGTH		0xFFC02D50	/* Mailbox 10 Data Length Code Register */
+#define	CAN_MB10_TIMESTAMP	0xFFC02D54	/* Mailbox 10 Time Stamp Value Register */
+#define	CAN_MB10_ID0		0xFFC02D58	/* Mailbox 10 Identifier Low Register */
+#define	CAN_MB10_ID1		0xFFC02D5C	/* Mailbox 10 Identifier High Register */
+
+#define	CAN_MB11_DATA0		0xFFC02D60	/* Mailbox 11 Data Word	0 [15:0] Register */
+#define	CAN_MB11_DATA1		0xFFC02D64	/* Mailbox 11 Data Word	1 [31:16] Register */
+#define	CAN_MB11_DATA2		0xFFC02D68	/* Mailbox 11 Data Word	2 [47:32] Register */
+#define	CAN_MB11_DATA3		0xFFC02D6C	/* Mailbox 11 Data Word	3 [63:48] Register */
+#define	CAN_MB11_LENGTH		0xFFC02D70	/* Mailbox 11 Data Length Code Register */
+#define	CAN_MB11_TIMESTAMP	0xFFC02D74	/* Mailbox 11 Time Stamp Value Register */
+#define	CAN_MB11_ID0		0xFFC02D78	/* Mailbox 11 Identifier Low Register */
+#define	CAN_MB11_ID1		0xFFC02D7C	/* Mailbox 11 Identifier High Register */
+
+#define	CAN_MB12_DATA0		0xFFC02D80	/* Mailbox 12 Data Word	0 [15:0] Register */
+#define	CAN_MB12_DATA1		0xFFC02D84	/* Mailbox 12 Data Word	1 [31:16] Register */
+#define	CAN_MB12_DATA2		0xFFC02D88	/* Mailbox 12 Data Word	2 [47:32] Register */
+#define	CAN_MB12_DATA3		0xFFC02D8C	/* Mailbox 12 Data Word	3 [63:48] Register */
+#define	CAN_MB12_LENGTH		0xFFC02D90	/* Mailbox 12 Data Length Code Register */
+#define	CAN_MB12_TIMESTAMP	0xFFC02D94	/* Mailbox 12 Time Stamp Value Register */
+#define	CAN_MB12_ID0		0xFFC02D98	/* Mailbox 12 Identifier Low Register */
+#define	CAN_MB12_ID1		0xFFC02D9C	/* Mailbox 12 Identifier High Register */
+
+#define	CAN_MB13_DATA0		0xFFC02DA0	/* Mailbox 13 Data Word	0 [15:0] Register */
+#define	CAN_MB13_DATA1		0xFFC02DA4	/* Mailbox 13 Data Word	1 [31:16] Register */
+#define	CAN_MB13_DATA2		0xFFC02DA8	/* Mailbox 13 Data Word	2 [47:32] Register */
+#define	CAN_MB13_DATA3		0xFFC02DAC	/* Mailbox 13 Data Word	3 [63:48] Register */
+#define	CAN_MB13_LENGTH		0xFFC02DB0	/* Mailbox 13 Data Length Code Register */
+#define	CAN_MB13_TIMESTAMP	0xFFC02DB4	/* Mailbox 13 Time Stamp Value Register */
+#define	CAN_MB13_ID0		0xFFC02DB8	/* Mailbox 13 Identifier Low Register */
+#define	CAN_MB13_ID1		0xFFC02DBC	/* Mailbox 13 Identifier High Register */
+
+#define	CAN_MB14_DATA0		0xFFC02DC0	/* Mailbox 14 Data Word	0 [15:0] Register */
+#define	CAN_MB14_DATA1		0xFFC02DC4	/* Mailbox 14 Data Word	1 [31:16] Register */
+#define	CAN_MB14_DATA2		0xFFC02DC8	/* Mailbox 14 Data Word	2 [47:32] Register */
+#define	CAN_MB14_DATA3		0xFFC02DCC	/* Mailbox 14 Data Word	3 [63:48] Register */
+#define	CAN_MB14_LENGTH		0xFFC02DD0	/* Mailbox 14 Data Length Code Register */
+#define	CAN_MB14_TIMESTAMP	0xFFC02DD4	/* Mailbox 14 Time Stamp Value Register */
+#define	CAN_MB14_ID0		0xFFC02DD8	/* Mailbox 14 Identifier Low Register */
+#define	CAN_MB14_ID1		0xFFC02DDC	/* Mailbox 14 Identifier High Register */
+
+#define	CAN_MB15_DATA0		0xFFC02DE0	/* Mailbox 15 Data Word	0 [15:0] Register */
+#define	CAN_MB15_DATA1		0xFFC02DE4	/* Mailbox 15 Data Word	1 [31:16] Register */
+#define	CAN_MB15_DATA2		0xFFC02DE8	/* Mailbox 15 Data Word	2 [47:32] Register */
+#define	CAN_MB15_DATA3		0xFFC02DEC	/* Mailbox 15 Data Word	3 [63:48] Register */
+#define	CAN_MB15_LENGTH		0xFFC02DF0	/* Mailbox 15 Data Length Code Register */
+#define	CAN_MB15_TIMESTAMP	0xFFC02DF4	/* Mailbox 15 Time Stamp Value Register */
+#define	CAN_MB15_ID0		0xFFC02DF8	/* Mailbox 15 Identifier Low Register */
+#define	CAN_MB15_ID1		0xFFC02DFC	/* Mailbox 15 Identifier High Register */
+
+#define	CAN_MB16_DATA0		0xFFC02E00	/* Mailbox 16 Data Word	0 [15:0] Register */
+#define	CAN_MB16_DATA1		0xFFC02E04	/* Mailbox 16 Data Word	1 [31:16] Register */
+#define	CAN_MB16_DATA2		0xFFC02E08	/* Mailbox 16 Data Word	2 [47:32] Register */
+#define	CAN_MB16_DATA3		0xFFC02E0C	/* Mailbox 16 Data Word	3 [63:48] Register */
+#define	CAN_MB16_LENGTH		0xFFC02E10	/* Mailbox 16 Data Length Code Register */
+#define	CAN_MB16_TIMESTAMP	0xFFC02E14	/* Mailbox 16 Time Stamp Value Register */
+#define	CAN_MB16_ID0		0xFFC02E18	/* Mailbox 16 Identifier Low Register */
+#define	CAN_MB16_ID1		0xFFC02E1C	/* Mailbox 16 Identifier High Register */
+
+#define	CAN_MB17_DATA0		0xFFC02E20	/* Mailbox 17 Data Word	0 [15:0] Register */
+#define	CAN_MB17_DATA1		0xFFC02E24	/* Mailbox 17 Data Word	1 [31:16] Register */
+#define	CAN_MB17_DATA2		0xFFC02E28	/* Mailbox 17 Data Word	2 [47:32] Register */
+#define	CAN_MB17_DATA3		0xFFC02E2C	/* Mailbox 17 Data Word	3 [63:48] Register */
+#define	CAN_MB17_LENGTH		0xFFC02E30	/* Mailbox 17 Data Length Code Register */
+#define	CAN_MB17_TIMESTAMP	0xFFC02E34	/* Mailbox 17 Time Stamp Value Register */
+#define	CAN_MB17_ID0		0xFFC02E38	/* Mailbox 17 Identifier Low Register */
+#define	CAN_MB17_ID1		0xFFC02E3C	/* Mailbox 17 Identifier High Register */
+
+#define	CAN_MB18_DATA0		0xFFC02E40	/* Mailbox 18 Data Word	0 [15:0] Register */
+#define	CAN_MB18_DATA1		0xFFC02E44	/* Mailbox 18 Data Word	1 [31:16] Register */
+#define	CAN_MB18_DATA2		0xFFC02E48	/* Mailbox 18 Data Word	2 [47:32] Register */
+#define	CAN_MB18_DATA3		0xFFC02E4C	/* Mailbox 18 Data Word	3 [63:48] Register */
+#define	CAN_MB18_LENGTH		0xFFC02E50	/* Mailbox 18 Data Length Code Register */
+#define	CAN_MB18_TIMESTAMP	0xFFC02E54	/* Mailbox 18 Time Stamp Value Register */
+#define	CAN_MB18_ID0		0xFFC02E58	/* Mailbox 18 Identifier Low Register */
+#define	CAN_MB18_ID1		0xFFC02E5C	/* Mailbox 18 Identifier High Register */
+
+#define	CAN_MB19_DATA0		0xFFC02E60	/* Mailbox 19 Data Word	0 [15:0] Register */
+#define	CAN_MB19_DATA1		0xFFC02E64	/* Mailbox 19 Data Word	1 [31:16] Register */
+#define	CAN_MB19_DATA2		0xFFC02E68	/* Mailbox 19 Data Word	2 [47:32] Register */
+#define	CAN_MB19_DATA3		0xFFC02E6C	/* Mailbox 19 Data Word	3 [63:48] Register */
+#define	CAN_MB19_LENGTH		0xFFC02E70	/* Mailbox 19 Data Length Code Register */
+#define	CAN_MB19_TIMESTAMP	0xFFC02E74	/* Mailbox 19 Time Stamp Value Register */
+#define	CAN_MB19_ID0		0xFFC02E78	/* Mailbox 19 Identifier Low Register */
+#define	CAN_MB19_ID1		0xFFC02E7C	/* Mailbox 19 Identifier High Register */
+
+#define	CAN_MB20_DATA0		0xFFC02E80	/* Mailbox 20 Data Word	0 [15:0] Register */
+#define	CAN_MB20_DATA1		0xFFC02E84	/* Mailbox 20 Data Word	1 [31:16] Register */
+#define	CAN_MB20_DATA2		0xFFC02E88	/* Mailbox 20 Data Word	2 [47:32] Register */
+#define	CAN_MB20_DATA3		0xFFC02E8C	/* Mailbox 20 Data Word	3 [63:48] Register */
+#define	CAN_MB20_LENGTH		0xFFC02E90	/* Mailbox 20 Data Length Code Register */
+#define	CAN_MB20_TIMESTAMP	0xFFC02E94	/* Mailbox 20 Time Stamp Value Register */
+#define	CAN_MB20_ID0		0xFFC02E98	/* Mailbox 20 Identifier Low Register */
+#define	CAN_MB20_ID1		0xFFC02E9C	/* Mailbox 20 Identifier High Register */
+
+#define	CAN_MB21_DATA0		0xFFC02EA0	/* Mailbox 21 Data Word	0 [15:0] Register */
+#define	CAN_MB21_DATA1		0xFFC02EA4	/* Mailbox 21 Data Word	1 [31:16] Register */
+#define	CAN_MB21_DATA2		0xFFC02EA8	/* Mailbox 21 Data Word	2 [47:32] Register */
+#define	CAN_MB21_DATA3		0xFFC02EAC	/* Mailbox 21 Data Word	3 [63:48] Register */
+#define	CAN_MB21_LENGTH		0xFFC02EB0	/* Mailbox 21 Data Length Code Register */
+#define	CAN_MB21_TIMESTAMP	0xFFC02EB4	/* Mailbox 21 Time Stamp Value Register */
+#define	CAN_MB21_ID0		0xFFC02EB8	/* Mailbox 21 Identifier Low Register */
+#define	CAN_MB21_ID1		0xFFC02EBC	/* Mailbox 21 Identifier High Register */
+
+#define	CAN_MB22_DATA0		0xFFC02EC0	/* Mailbox 22 Data Word	0 [15:0] Register */
+#define	CAN_MB22_DATA1		0xFFC02EC4	/* Mailbox 22 Data Word	1 [31:16] Register */
+#define	CAN_MB22_DATA2		0xFFC02EC8	/* Mailbox 22 Data Word	2 [47:32] Register */
+#define	CAN_MB22_DATA3		0xFFC02ECC	/* Mailbox 22 Data Word	3 [63:48] Register */
+#define	CAN_MB22_LENGTH		0xFFC02ED0	/* Mailbox 22 Data Length Code Register */
+#define	CAN_MB22_TIMESTAMP	0xFFC02ED4	/* Mailbox 22 Time Stamp Value Register */
+#define	CAN_MB22_ID0		0xFFC02ED8	/* Mailbox 22 Identifier Low Register */
+#define	CAN_MB22_ID1		0xFFC02EDC	/* Mailbox 22 Identifier High Register */
+
+#define	CAN_MB23_DATA0		0xFFC02EE0	/* Mailbox 23 Data Word	0 [15:0] Register */
+#define	CAN_MB23_DATA1		0xFFC02EE4	/* Mailbox 23 Data Word	1 [31:16] Register */
+#define	CAN_MB23_DATA2		0xFFC02EE8	/* Mailbox 23 Data Word	2 [47:32] Register */
+#define	CAN_MB23_DATA3		0xFFC02EEC	/* Mailbox 23 Data Word	3 [63:48] Register */
+#define	CAN_MB23_LENGTH		0xFFC02EF0	/* Mailbox 23 Data Length Code Register */
+#define	CAN_MB23_TIMESTAMP	0xFFC02EF4	/* Mailbox 23 Time Stamp Value Register */
+#define	CAN_MB23_ID0		0xFFC02EF8	/* Mailbox 23 Identifier Low Register */
+#define	CAN_MB23_ID1		0xFFC02EFC	/* Mailbox 23 Identifier High Register */
+
+#define	CAN_MB24_DATA0		0xFFC02F00	/* Mailbox 24 Data Word	0 [15:0] Register */
+#define	CAN_MB24_DATA1		0xFFC02F04	/* Mailbox 24 Data Word	1 [31:16] Register */
+#define	CAN_MB24_DATA2		0xFFC02F08	/* Mailbox 24 Data Word	2 [47:32] Register */
+#define	CAN_MB24_DATA3		0xFFC02F0C	/* Mailbox 24 Data Word	3 [63:48] Register */
+#define	CAN_MB24_LENGTH		0xFFC02F10	/* Mailbox 24 Data Length Code Register */
+#define	CAN_MB24_TIMESTAMP	0xFFC02F14	/* Mailbox 24 Time Stamp Value Register */
+#define	CAN_MB24_ID0		0xFFC02F18	/* Mailbox 24 Identifier Low Register */
+#define	CAN_MB24_ID1		0xFFC02F1C	/* Mailbox 24 Identifier High Register */
+
+#define	CAN_MB25_DATA0		0xFFC02F20	/* Mailbox 25 Data Word	0 [15:0] Register */
+#define	CAN_MB25_DATA1		0xFFC02F24	/* Mailbox 25 Data Word	1 [31:16] Register */
+#define	CAN_MB25_DATA2		0xFFC02F28	/* Mailbox 25 Data Word	2 [47:32] Register */
+#define	CAN_MB25_DATA3		0xFFC02F2C	/* Mailbox 25 Data Word	3 [63:48] Register */
+#define	CAN_MB25_LENGTH		0xFFC02F30	/* Mailbox 25 Data Length Code Register */
+#define	CAN_MB25_TIMESTAMP	0xFFC02F34	/* Mailbox 25 Time Stamp Value Register */
+#define	CAN_MB25_ID0		0xFFC02F38	/* Mailbox 25 Identifier Low Register */
+#define	CAN_MB25_ID1		0xFFC02F3C	/* Mailbox 25 Identifier High Register */
+
+#define	CAN_MB26_DATA0		0xFFC02F40	/* Mailbox 26 Data Word	0 [15:0] Register */
+#define	CAN_MB26_DATA1		0xFFC02F44	/* Mailbox 26 Data Word	1 [31:16] Register */
+#define	CAN_MB26_DATA2		0xFFC02F48	/* Mailbox 26 Data Word	2 [47:32] Register */
+#define	CAN_MB26_DATA3		0xFFC02F4C	/* Mailbox 26 Data Word	3 [63:48] Register */
+#define	CAN_MB26_LENGTH		0xFFC02F50	/* Mailbox 26 Data Length Code Register */
+#define	CAN_MB26_TIMESTAMP	0xFFC02F54	/* Mailbox 26 Time Stamp Value Register */
+#define	CAN_MB26_ID0		0xFFC02F58	/* Mailbox 26 Identifier Low Register */
+#define	CAN_MB26_ID1		0xFFC02F5C	/* Mailbox 26 Identifier High Register */
+
+#define	CAN_MB27_DATA0		0xFFC02F60	/* Mailbox 27 Data Word	0 [15:0] Register */
+#define	CAN_MB27_DATA1		0xFFC02F64	/* Mailbox 27 Data Word	1 [31:16] Register */
+#define	CAN_MB27_DATA2		0xFFC02F68	/* Mailbox 27 Data Word	2 [47:32] Register */
+#define	CAN_MB27_DATA3		0xFFC02F6C	/* Mailbox 27 Data Word	3 [63:48] Register */
+#define	CAN_MB27_LENGTH		0xFFC02F70	/* Mailbox 27 Data Length Code Register */
+#define	CAN_MB27_TIMESTAMP	0xFFC02F74	/* Mailbox 27 Time Stamp Value Register */
+#define	CAN_MB27_ID0		0xFFC02F78	/* Mailbox 27 Identifier Low Register */
+#define	CAN_MB27_ID1		0xFFC02F7C	/* Mailbox 27 Identifier High Register */
+
+#define	CAN_MB28_DATA0		0xFFC02F80	/* Mailbox 28 Data Word	0 [15:0] Register */
+#define	CAN_MB28_DATA1		0xFFC02F84	/* Mailbox 28 Data Word	1 [31:16] Register */
+#define	CAN_MB28_DATA2		0xFFC02F88	/* Mailbox 28 Data Word	2 [47:32] Register */
+#define	CAN_MB28_DATA3		0xFFC02F8C	/* Mailbox 28 Data Word	3 [63:48] Register */
+#define	CAN_MB28_LENGTH		0xFFC02F90	/* Mailbox 28 Data Length Code Register */
+#define	CAN_MB28_TIMESTAMP	0xFFC02F94	/* Mailbox 28 Time Stamp Value Register */
+#define	CAN_MB28_ID0		0xFFC02F98	/* Mailbox 28 Identifier Low Register */
+#define	CAN_MB28_ID1		0xFFC02F9C	/* Mailbox 28 Identifier High Register */
+
+#define	CAN_MB29_DATA0		0xFFC02FA0	/* Mailbox 29 Data Word	0 [15:0] Register */
+#define	CAN_MB29_DATA1		0xFFC02FA4	/* Mailbox 29 Data Word	1 [31:16] Register */
+#define	CAN_MB29_DATA2		0xFFC02FA8	/* Mailbox 29 Data Word	2 [47:32] Register */
+#define	CAN_MB29_DATA3		0xFFC02FAC	/* Mailbox 29 Data Word	3 [63:48] Register */
+#define	CAN_MB29_LENGTH		0xFFC02FB0	/* Mailbox 29 Data Length Code Register */
+#define	CAN_MB29_TIMESTAMP	0xFFC02FB4	/* Mailbox 29 Time Stamp Value Register */
+#define	CAN_MB29_ID0		0xFFC02FB8	/* Mailbox 29 Identifier Low Register */
+#define	CAN_MB29_ID1		0xFFC02FBC	/* Mailbox 29 Identifier High Register */
+
+#define	CAN_MB30_DATA0		0xFFC02FC0	/* Mailbox 30 Data Word	0 [15:0] Register */
+#define	CAN_MB30_DATA1		0xFFC02FC4	/* Mailbox 30 Data Word	1 [31:16] Register */
+#define	CAN_MB30_DATA2		0xFFC02FC8	/* Mailbox 30 Data Word	2 [47:32] Register */
+#define	CAN_MB30_DATA3		0xFFC02FCC	/* Mailbox 30 Data Word	3 [63:48] Register */
+#define	CAN_MB30_LENGTH		0xFFC02FD0	/* Mailbox 30 Data Length Code Register */
+#define	CAN_MB30_TIMESTAMP	0xFFC02FD4	/* Mailbox 30 Time Stamp Value Register */
+#define	CAN_MB30_ID0		0xFFC02FD8	/* Mailbox 30 Identifier Low Register */
+#define	CAN_MB30_ID1		0xFFC02FDC	/* Mailbox 30 Identifier High Register */
+
+#define	CAN_MB31_DATA0		0xFFC02FE0	/* Mailbox 31 Data Word	0 [15:0] Register */
+#define	CAN_MB31_DATA1		0xFFC02FE4	/* Mailbox 31 Data Word	1 [31:16] Register */
+#define	CAN_MB31_DATA2		0xFFC02FE8	/* Mailbox 31 Data Word	2 [47:32] Register */
+#define	CAN_MB31_DATA3		0xFFC02FEC	/* Mailbox 31 Data Word	3 [63:48] Register */
+#define	CAN_MB31_LENGTH		0xFFC02FF0	/* Mailbox 31 Data Length Code Register */
+#define	CAN_MB31_TIMESTAMP	0xFFC02FF4	/* Mailbox 31 Time Stamp Value Register */
+#define	CAN_MB31_ID0		0xFFC02FF8	/* Mailbox 31 Identifier Low Register */
+#define	CAN_MB31_ID1		0xFFC02FFC	/* Mailbox 31 Identifier High Register */
+
+/* CAN Mailbox Area Macros */
+#define	CAN_MB_ID1(x)		(CAN_MB00_ID1+((x)*0x20))
+#define	CAN_MB_ID0(x)		(CAN_MB00_ID0+((x)*0x20))
+#define	CAN_MB_TIMESTAMP(x)	(CAN_MB00_TIMESTAMP+((x)*0x20))
+#define	CAN_MB_LENGTH(x)	(CAN_MB00_LENGTH+((x)*0x20))
+#define	CAN_MB_DATA3(x)		(CAN_MB00_DATA3+((x)*0x20))
+#define	CAN_MB_DATA2(x)		(CAN_MB00_DATA2+((x)*0x20))
+#define	CAN_MB_DATA1(x)		(CAN_MB00_DATA1+((x)*0x20))
+#define	CAN_MB_DATA0(x)		(CAN_MB00_DATA0+((x)*0x20))
+
+
+/*********************************************************************************** */
+/* System MMR Register Bits and	Macros */
+/******************************************************************************* */
+
+/* ********************* PLL AND RESET MASKS ************************ */
+/* PLL_CTL Masks */
+#define	PLL_CLKIN			0x0000	/* Pass	CLKIN to PLL */
+#define	PLL_CLKIN_DIV2		0x0001	/* Pass	CLKIN/2	to PLL */
+#define	DF					0x0001	 /* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */
+#define	PLL_OFF				0x0002	/* Shut	off PLL	clocks */
+
+#define	STOPCK				0x0008	/* Core	Clock Off		 */
+#define	PDWN				0x0020	/* Put the PLL in a Deep Sleep state */
+#define	IN_DELAY			0x0014	/* EBIU	Input Delay Select */
+#define	OUT_DELAY			0x00C0	/* EBIU	Output Delay Select */
+#define	BYPASS				0x0100	/* Bypass the PLL */
+#define	MSEL			0x7E00	/* Multiplier Select For CCLK/VCO Factors */
+
+/* PLL_CTL Macros				 */
+#ifdef _MISRA_RULES
+#define	SET_MSEL(x)		(((x)&0x3Fu) <<	0x9)	/* Set MSEL = 0-63 --> VCO = CLKIN*MSEL */
+#define	SET_OUT_DELAY(x)	(((x)&0x03u) <<	0x6)
+#define	SET_IN_DELAY(x)		((((x)&0x02u) << 0x3) |	(((x)&0x01u) <<	0x2))
+#else
+#define	SET_MSEL(x)		(((x)&0x3F) << 0x9)	/* Set MSEL = 0-63 --> VCO = CLKIN*MSEL */
+#define	SET_OUT_DELAY(x)	(((x)&0x03) << 0x6)
+#define	SET_IN_DELAY(x)		((((x)&0x02) <<	0x3) | (((x)&0x01) << 0x2))
+#endif /* _MISRA_RULES */
+
+/* PLL_DIV Masks */
+#define	SSEL				0x000F	/* System Select */
+#define	CSEL				0x0030	/* Core	Select */
+#define	CSEL_DIV1		0x0000	/*		CCLK = VCO / 1 */
+#define	CSEL_DIV2		0x0010	/*		CCLK = VCO / 2 */
+#define	CSEL_DIV4		0x0020	/*		CCLK = VCO / 4 */
+#define	CSEL_DIV8		0x0030	/*		CCLK = VCO / 8 */
+
+#define	SCLK_DIV(x)			(x)		/* SCLK	= VCO /	x */
+
+/* PLL_DIV Macros							 */
+#ifdef _MISRA_RULES
+#define	SET_SSEL(x)			((x)&0xFu)	/* Set SSEL = 0-15 --> SCLK = VCO/SSEL */
+#else
+#define	SET_SSEL(x)			((x)&0xF)	/* Set SSEL = 0-15 --> SCLK = VCO/SSEL */
+#endif /* _MISRA_RULES */
+
+/* PLL_STAT Masks										 */
+#define	ACTIVE_PLLENABLED	0x0001	/* Processor In	Active Mode With PLL Enabled */
+#define	FULL_ON				0x0002	/* Processor In	Full On	Mode */
+#define	ACTIVE_PLLDISABLED	0x0004	/* Processor In	Active Mode With PLL Disabled */
+#define	PLL_LOCKED			0x0020	/* PLL_LOCKCNT Has Been	Reached */
+
+/* VR_CTL Masks										 */
+#define	FREQ			0x0003	/* Switching Oscillator	Frequency For Regulator */
+#define	HIBERNATE		0x0000	/*		Powerdown/Bypass On-Board Regulation */
+#define	FREQ_333		0x0001	/*		Switching Frequency Is 333 kHz */
+#define	FREQ_667		0x0002	/*		Switching Frequency Is 667 kHz */
+#define	FREQ_1000		0x0003	/*		Switching Frequency Is 1 MHz */
+
+#define	GAIN			0x000C	/* Voltage Level Gain */
+#define	GAIN_5			0x0000	/*		GAIN = 5 */
+#define	GAIN_10			0x0004	/*		GAIN = 10 */
+#define	GAIN_20			0x0008	/*		GAIN = 20 */
+#define	GAIN_50			0x000C	/*		GAIN = 50 */
+
+#define	VLEV			0x00F0	/* Internal Voltage Level - Only Program Values	Within Specifications */
+#define	VLEV_100		0x0090	/*	VLEV = 1.00 V (See Datasheet for Regulator Tolerance) */
+#define	VLEV_105		0x00A0	/*	VLEV = 1.05 V (See Datasheet for Regulator Tolerance) */
+#define	VLEV_110		0x00B0	/*	VLEV = 1.10 V (See Datasheet for Regulator Tolerance) */
+#define	VLEV_115		0x00C0	/*	VLEV = 1.15 V (See Datasheet for Regulator Tolerance) */
+#define	VLEV_120		0x00D0	/*	VLEV = 1.20 V (See Datasheet for Regulator Tolerance) */
+#define	VLEV_125		0x00E0	/*	VLEV = 1.25 V (See Datasheet for Regulator Tolerance) */
+#define	VLEV_130		0x00F0	/*	VLEV = 1.30 V (See Datasheet for Regulator Tolerance) */
+
+#define	WAKE			0x0100	/* Enable RTC/Reset Wakeup From	Hibernate */
+#define	CANWE			0x0200	/* Enable CAN Wakeup From Hibernate */
+#define	MXVRWE			0x0400	/* Enable MXVR Wakeup From Hibernate */
+#define	SCKELOW			0x8000	/* Do Not Drive	SCKE High During Reset After Hibernate */
+
+/* SWRST Mask */
+#define	SYSTEM_RESET	0x0007	/* Initiates A System Software Reset */
+#define	DOUBLE_FAULT	0x0008	/* Core	Double Fault Causes Reset */
+#define	RESET_DOUBLE	0x2000	/* SW Reset Generated By Core Double-Fault */
+#define	RESET_WDOG		0x4000	/* SW Reset Generated By Watchdog Timer */
+#define	RESET_SOFTWARE	0x8000	/* SW Reset Occurred Since Last	Read Of	SWRST */
+
+/* SYSCR Masks													 */
+#define	BMODE			0x0006	/* Boot	Mode - Latched During HW Reset From Mode Pins */
+#define	NOBOOT			0x0010	/* Execute From	L1 or ASYNC Bank 0 When	BMODE =	0 */
+
+
+/* *************  SYSTEM INTERRUPT CONTROLLER MASKS ***************** */
+
+/* Peripheral Masks For	SIC0_ISR, SIC0_IWR, SIC0_IMASK */
+#define	PLL_WAKEUP_IRQ		0x00000001	/* PLL Wakeup Interrupt	Request */
+#define	DMAC0_ERR_IRQ		0x00000002	/* DMA Controller 0 Error Interrupt Request */
+#define	PPI_ERR_IRQ		0x00000004	/* PPI Error Interrupt Request */
+#define	SPORT0_ERR_IRQ		0x00000008	/* SPORT0 Error	Interrupt Request */
+#define	SPORT1_ERR_IRQ		0x00000010	/* SPORT1 Error	Interrupt Request */
+#define	SPI0_ERR_IRQ		0x00000020	/* SPI0	Error Interrupt	Request */
+#define	UART0_ERR_IRQ		0x00000040	/* UART0 Error Interrupt Request */
+#define	RTC_IRQ			0x00000080	/* Real-Time Clock Interrupt Request */
+#define	DMA0_IRQ		0x00000100	/* DMA Channel 0 (PPI) Interrupt Request */
+#define	DMA1_IRQ		0x00000200	/* DMA Channel 1 (SPORT0 RX) Interrupt Request */
+#define	DMA2_IRQ		0x00000400	/* DMA Channel 2 (SPORT0 TX) Interrupt Request */
+#define	DMA3_IRQ		0x00000800	/* DMA Channel 3 (SPORT1 RX) Interrupt Request */
+#define	DMA4_IRQ		0x00001000	/* DMA Channel 4 (SPORT1 TX) Interrupt Request */
+#define	DMA5_IRQ		0x00002000	/* DMA Channel 5 (SPI) Interrupt Request */
+#define	DMA6_IRQ		0x00004000	/* DMA Channel 6 (UART RX) Interrupt Request */
+#define	DMA7_IRQ		0x00008000	/* DMA Channel 7 (UART TX) Interrupt Request */
+#define	TIMER0_IRQ		0x00010000	/* Timer 0 Interrupt Request */
+#define	TIMER1_IRQ		0x00020000	/* Timer 1 Interrupt Request */
+#define	TIMER2_IRQ		0x00040000	/* Timer 2 Interrupt Request */
+#define	PFA_IRQ			0x00080000	/* Programmable	Flag Interrupt Request A */
+#define	PFB_IRQ			0x00100000	/* Programmable	Flag Interrupt Request B */
+#define	MDMA0_0_IRQ		0x00200000	/* MemDMA0 Stream 0 Interrupt Request */
+#define	MDMA0_1_IRQ		0x00400000	/* MemDMA0 Stream 1 Interrupt Request */
+#define	WDOG_IRQ		0x00800000	/* Software Watchdog Timer Interrupt Request */
+#define	DMAC1_ERR_IRQ		0x01000000	/* DMA Controller 1 Error Interrupt Request */
+#define	SPORT2_ERR_IRQ		0x02000000	/* SPORT2 Error	Interrupt Request */
+#define	SPORT3_ERR_IRQ		0x04000000	/* SPORT3 Error	Interrupt Request */
+#define	MXVR_SD_IRQ		0x08000000	/* MXVR	Synchronous Data Interrupt Request */
+#define	SPI1_ERR_IRQ		0x10000000	/* SPI1	Error Interrupt	Request */
+#define	SPI2_ERR_IRQ		0x20000000	/* SPI2	Error Interrupt	Request */
+#define	UART1_ERR_IRQ		0x40000000	/* UART1 Error Interrupt Request */
+#define	UART2_ERR_IRQ		0x80000000	/* UART2 Error Interrupt Request */
+
+/* the following are for backwards compatibility */
+#define	DMA0_ERR_IRQ		DMAC0_ERR_IRQ
+#define	DMA1_ERR_IRQ		DMAC1_ERR_IRQ
+
+
+/* Peripheral Masks For	SIC_ISR1, SIC_IWR1, SIC_IMASK1	 */
+#define	CAN_ERR_IRQ			0x00000001	/* CAN Error Interrupt Request */
+#define	DMA8_IRQ			0x00000002	/* DMA Channel 8 (SPORT2 RX) Interrupt Request */
+#define	DMA9_IRQ			0x00000004	/* DMA Channel 9 (SPORT2 TX) Interrupt Request */
+#define	DMA10_IRQ			0x00000008	/* DMA Channel 10 (SPORT3 RX) Interrupt	Request */
+#define	DMA11_IRQ			0x00000010	/* DMA Channel 11 (SPORT3 TX) Interrupt	Request */
+#define	DMA12_IRQ			0x00000020	/* DMA Channel 12 Interrupt Request */
+#define	DMA13_IRQ			0x00000040	/* DMA Channel 13 Interrupt Request */
+#define	DMA14_IRQ			0x00000080	/* DMA Channel 14 (SPI1) Interrupt Request */
+#define	DMA15_IRQ			0x00000100	/* DMA Channel 15 (SPI2) Interrupt Request */
+#define	DMA16_IRQ			0x00000200	/* DMA Channel 16 (UART1 RX) Interrupt Request */
+#define	DMA17_IRQ			0x00000400	/* DMA Channel 17 (UART1 TX) Interrupt Request */
+#define	DMA18_IRQ			0x00000800	/* DMA Channel 18 (UART2 RX) Interrupt Request */
+#define	DMA19_IRQ			0x00001000	/* DMA Channel 19 (UART2 TX) Interrupt Request */
+#define	TWI0_IRQ			0x00002000	/* TWI0	Interrupt Request */
+#define	TWI1_IRQ			0x00004000	/* TWI1	Interrupt Request */
+#define	CAN_RX_IRQ			0x00008000	/* CAN Receive Interrupt Request */
+#define	CAN_TX_IRQ			0x00010000	/* CAN Transmit	Interrupt Request */
+#define	MDMA1_0_IRQ			0x00020000	/* MemDMA1 Stream 0 Interrupt Request */
+#define	MDMA1_1_IRQ			0x00040000	/* MemDMA1 Stream 1 Interrupt Request */
+#define	MXVR_STAT_IRQ			0x00080000	/* MXVR	Status Interrupt Request */
+#define	MXVR_CM_IRQ			0x00100000	/* MXVR	Control	Message	Interrupt Request */
+#define	MXVR_AP_IRQ			0x00200000	/* MXVR	Asynchronous Packet Interrupt */
+
+/* the following are for backwards compatibility */
+#define	MDMA0_IRQ		MDMA1_0_IRQ
+#define	MDMA1_IRQ		MDMA1_1_IRQ
+
+#ifdef _MISRA_RULES
+#define	_MF15 0xFu
+#define	_MF7 7u
+#else
+#define	_MF15 0xF
+#define	_MF7 7
+#endif /* _MISRA_RULES */
+
+/* SIC_IMASKx Masks											 */
+#define	SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts */
+#define	SIC_MASK_ALL	0xFFFFFFFF					/* Mask	all peripheral interrupts */
+#ifdef _MISRA_RULES
+#define	SIC_MASK(x)		(1 << ((x)&0x1Fu))					/* Mask	Peripheral #x interrupt */
+#define	SIC_UNMASK(x)	(0xFFFFFFFFu ^ (1 << ((x)&0x1Fu)))	/* Unmask Peripheral #x	interrupt */
+#else
+#define	SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask	Peripheral #x interrupt */
+#define	SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x	interrupt */
+#endif /* _MISRA_RULES */
+
+/* SIC_IWRx Masks											 */
+#define	IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals */
+#define	IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals */
+#ifdef _MISRA_RULES
+#define	IWR_ENABLE(x)	(1 << ((x)&0x1Fu))					/* Wakeup Enable Peripheral #x */
+#define	IWR_DISABLE(x)	(0xFFFFFFFFu ^ (1 << ((x)&0x1Fu)))	/* Wakeup Disable Peripheral #x */
+#else
+#define	IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x */
+#define	IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Wakeup Disable Peripheral #x */
+#endif /* _MISRA_RULES */
+
+
+/* ********* WATCHDOG TIMER MASKS ******************** */
+/* Watchdog Timer WDOG_CTL Register Masks */
+#ifdef _MISRA_RULES
+#define	WDEV(x)			(((x)<<1) & 0x0006u)	/* event generated on roll over */
+#else
+#define	WDEV(x)			(((x)<<1) & 0x0006)	/* event generated on roll over */
+#endif /* _MISRA_RULES */
+#define	WDEV_RESET		0x0000				/* generate reset event	on roll	over */
+#define	WDEV_NMI		0x0002				/* generate NMI	event on roll over */
+#define	WDEV_GPI		0x0004				/* generate GP IRQ on roll over */
+#define	WDEV_NONE		0x0006				/* no event on roll over */
+#define	WDEN			0x0FF0				/* enable watchdog */
+#define	WDDIS			0x0AD0				/* disable watchdog */
+#define	WDRO			0x8000				/* watchdog rolled over	latch */
+
+/* deprecated WDOG_CTL Register	Masks for legacy code */
+#define	ICTL WDEV
+#define	ENABLE_RESET	WDEV_RESET
+#define	WDOG_RESET		WDEV_RESET
+#define	ENABLE_NMI		WDEV_NMI
+#define	WDOG_NMI		WDEV_NMI
+#define	ENABLE_GPI		WDEV_GPI
+#define	WDOG_GPI		WDEV_GPI
+#define	DISABLE_EVT	WDEV_NONE
+#define	WDOG_NONE		WDEV_NONE
+
+#define	TMR_EN			WDEN
+#define	WDOG_DISABLE		WDDIS
+#define	TRO			WDRO
+
+#define	ICTL_P0			0x01
+#define	ICTL_P1			0x02
+#define	TRO_P			0x0F
+
+
+/* ***************  REAL TIME CLOCK MASKS  **************************/
+/* RTC_STAT and	RTC_ALARM register */
+#define	RTSEC		0x0000003F	/* Real-Time Clock Seconds */
+#define	RTMIN		0x00000FC0	/* Real-Time Clock Minutes */
+#define	RTHR		0x0001F000	/* Real-Time Clock Hours */
+#define	RTDAY		0xFFFE0000	/* Real-Time Clock Days */
+
+/* RTC_ICTL register */
+#define	SWIE		0x0001		/* Stopwatch Interrupt Enable */
+#define	AIE			0x0002		/* Alarm Interrupt Enable */
+#define	SIE			0x0004		/* Seconds (1 Hz) Interrupt Enable */
+#define	MIE			0x0008		/* Minutes Interrupt Enable */
+#define	HIE			0x0010		/* Hours Interrupt Enable */
+#define	DIE			0x0020		/* 24 Hours (Days) Interrupt Enable */
+#define	DAIE		0x0040		/* Day Alarm (Day, Hour, Minute, Second) Interrupt Enable */
+#define	WCIE		0x8000		/* Write Complete Interrupt Enable */
+
+/* RTC_ISTAT register */
+#define	SWEF		0x0001		/* Stopwatch Event Flag */
+#define	AEF			0x0002		/* Alarm Event Flag */
+#define	SEF			0x0004		/* Seconds (1 Hz) Event	Flag */
+#define	MEF			0x0008		/* Minutes Event Flag */
+#define	HEF			0x0010		/* Hours Event Flag */
+#define	DEF			0x0020		/* 24 Hours (Days) Event Flag */
+#define	DAEF		0x0040		/* Day Alarm (Day, Hour, Minute, Second) Event Flag */
+#define	WPS			0x4000		/* Write Pending Status	(RO) */
+#define	WCOM		0x8000		/* Write Complete */
+
+/* RTC_FAST Mask (RTC_PREN Mask) */
+#define	ENABLE_PRESCALE	     0x00000001	 /* Enable prescaler so	RTC runs at 1 Hz */
+#define	PREN		     0x00000001
+		/* ** Must be set after	power-up for proper operation of RTC */
+
+/* Deprecated RTC_STAT and RTC_ALARM Masks			 */
+#define	RTC_SEC			RTSEC	/* Real-Time Clock Seconds */
+#define	RTC_MIN			RTMIN	/* Real-Time Clock Minutes */
+#define	RTC_HR			RTHR	/* Real-Time Clock Hours */
+#define	RTC_DAY			RTDAY	/* Real-Time Clock Days */
+
+/* Deprecated RTC_ICTL/RTC_ISTAT Masks											 */
+#define	STOPWATCH		SWIE		/* Stopwatch Interrupt Enable	 */
+#define	ALARM			AIE		/* Alarm Interrupt Enable		 */
+#define	SECOND			SIE		/* Seconds (1 Hz) Interrupt Enable */
+#define	MINUTE			MIE		/* Minutes Interrupt Enable		 */
+#define	HOUR			HIE		/* Hours Interrupt Enable		 */
+#define	DAY				DIE		/* 24 Hours (Days) Interrupt Enable */
+#define	DAY_ALARM		DAIE		/* Day Alarm (Day, Hour, Minute, Second) Interrupt Enable */
+#define	WRITE_COMPLETE	WCIE		/* Write Complete Interrupt Enable */
+
+
+/* ***************************** UART CONTROLLER MASKS ********************** */
+/* UARTx_LCR Register */
+#ifdef _MISRA_RULES
+#define	WLS(x)		(((x)-5u) & 0x03u)	/* Word	Length Select */
+#else
+#define	WLS(x)		(((x)-5) & 0x03)	/* Word	Length Select */
+#endif /* _MISRA_RULES */
+#define	STB			0x04				/* Stop	Bits */
+#define	PEN			0x08				/* Parity Enable */
+#define	EPS			0x10				/* Even	Parity Select */
+#define	STP			0x20				/* Stick Parity */
+#define	SB			0x40				/* Set Break */
+#define	DLAB		0x80				/* Divisor Latch Access */
+
+#define	DLAB_P		0x07
+#define	SB_P		0x06
+#define	STP_P		0x05
+#define	EPS_P		0x04
+#define	PEN_P		0x03
+#define	STB_P		0x02
+#define	WLS_P1		0x01
+#define	WLS_P0		0x00
+
+/* UARTx_MCR Register */
+#define	LOOP_ENA	0x10	/* Loopback Mode Enable */
+#define	LOOP_ENA_P	0x04
+/* Deprecated UARTx_MCR	Mask			 */
+
+/* UARTx_LSR Register */
+#define	DR			0x01	/* Data	Ready */
+#define	OE			0x02	/* Overrun Error */
+#define	PE			0x04	/* Parity Error */
+#define	FE			0x08	/* Framing Error */
+#define	BI			0x10	/* Break Interrupt */
+#define	THRE		0x20	/* THR Empty */
+#define	TEMT		0x40	/* TSR and UART_THR Empty */
+
+#define	TEMP_P		0x06
+#define	THRE_P		0x05
+#define	BI_P		0x04
+#define	FE_P		0x03
+#define	PE_P		0x02
+#define	OE_P		0x01
+#define	DR_P		0x00
+
+/* UARTx_IER Register */
+#define	ERBFI		0x01		/* Enable Receive Buffer Full Interrupt */
+#define	ETBEI		0x02		/* Enable Transmit Buffer Empty	Interrupt */
+#define	ELSI		0x04		/* Enable RX Status Interrupt */
+
+#define	ELSI_P		0x02
+#define	ETBEI_P		0x01
+#define	ERBFI_P		0x00
+
+/* UARTx_IIR Register */
+#define	NINT		0x01
+#define	STATUS_P1	0x02
+#define	STATUS_P0	0x01
+#define	NINT_P		0x00
+
+/* UARTx_GCTL Register */
+#define	UCEN		0x01		/* Enable UARTx	Clocks */
+#define	IREN		0x02		/* Enable IrDA Mode */
+#define	TPOLC		0x04		/* IrDA	TX Polarity Change */
+#define	RPOLC		0x08		/* IrDA	RX Polarity Change */
+#define	FPE			0x10		/* Force Parity	Error On Transmit */
+#define	FFE			0x20		/* Force Framing Error On Transmit */
+
+#define	FFE_P		0x05
+#define	FPE_P		0x04
+#define	RPOLC_P		0x03
+#define	TPOLC_P		0x02
+#define	IREN_P		0x01
+#define	UCEN_P		0x00
+
+
+/* **********  SERIAL PORT MASKS  ********************** */
+/* SPORTx_TCR1 Masks */
+#define	TSPEN		0x0001	/* TX enable  */
+#define	ITCLK		0x0002	/* Internal TX Clock Select  */
+#define	TDTYPE		0x000C	/* TX Data Formatting Select */
+#define	DTYPE_NORM	0x0000		/* Data	Format Normal */
+#define	DTYPE_ULAW	0x0008		/* Compand Using u-Law */
+#define	DTYPE_ALAW	0x000C		/* Compand Using A-Law */
+#define	TLSBIT		0x0010	/* TX Bit Order */
+#define	ITFS		0x0200	/* Internal TX Frame Sync Select  */
+#define	TFSR		0x0400	/* TX Frame Sync Required Select  */
+#define	DITFS		0x0800	/* Data	Independent TX Frame Sync Select  */
+#define	LTFS		0x1000	/* Low TX Frame	Sync Select  */
+#define	LATFS		0x2000	/* Late	TX Frame Sync Select  */
+#define	TCKFE		0x4000	/* TX Clock Falling Edge Select */
+/* SPORTx_RCR1 Deprecated Masks								 */
+#define	TULAW		DTYPE_ULAW		/* Compand Using u-Law */
+#define	TALAW		DTYPE_ALAW			/* Compand Using A-Law */
+
+/* SPORTx_TCR2 Masks */
+#ifdef _MISRA_RULES
+#define	SLEN(x)		((x)&0x1Fu)	/* SPORT TX Word Length	(2 - 31) */
+#else
+#define	SLEN(x)		((x)&0x1F)	/* SPORT TX Word Length	(2 - 31) */
+#endif /* _MISRA_RULES */
+#define	TXSE		0x0100	/*TX Secondary Enable */
+#define	TSFSE		0x0200	/*TX Stereo Frame Sync Enable */
+#define	TRFST		0x0400	/*TX Right-First Data Order  */
+
+/* SPORTx_RCR1 Masks */
+#define	RSPEN		0x0001	/* RX enable  */
+#define	IRCLK		0x0002	/* Internal RX Clock Select  */
+#define	RDTYPE		0x000C	/* RX Data Formatting Select */
+#define	DTYPE_NORM	0x0000		/* no companding */
+#define	DTYPE_ULAW	0x0008		/* Compand Using u-Law */
+#define	DTYPE_ALAW	0x000C		/* Compand Using A-Law */
+#define	RLSBIT		0x0010	/* RX Bit Order */
+#define	IRFS		0x0200	/* Internal RX Frame Sync Select  */
+#define	RFSR		0x0400	/* RX Frame Sync Required Select  */
+#define	LRFS		0x1000	/* Low RX Frame	Sync Select  */
+#define	LARFS		0x2000	/* Late	RX Frame Sync Select  */
+#define	RCKFE		0x4000	/* RX Clock Falling Edge Select */
+/* SPORTx_RCR1 Deprecated Masks								 */
+#define	RULAW		DTYPE_ULAW		/* Compand Using u-Law */
+#define	RALAW		DTYPE_ALAW			/* Compand Using A-Law */
+
+/* SPORTx_RCR2 Masks */
+#ifdef _MISRA_RULES
+#define	SLEN(x)		((x)&0x1Fu)	/* SPORT RX Word Length	(2 - 31) */
+#else
+#define	SLEN(x)		((x)&0x1F)	/* SPORT RX Word Length	(2 - 31) */
+#endif /* _MISRA_RULES */
+#define	RXSE		0x0100	/*RX Secondary Enable */
+#define	RSFSE		0x0200	/*RX Stereo Frame Sync Enable */
+#define	RRFST		0x0400	/*Right-First Data Order  */
+
+/*SPORTx_STAT Masks */
+#define	RXNE		0x0001		/*RX FIFO Not Empty Status */
+#define	RUVF		0x0002		/*RX Underflow Status */
+#define	ROVF		0x0004		/*RX Overflow Status */
+#define	TXF			0x0008		/*TX FIFO Full Status */
+#define	TUVF		0x0010		/*TX Underflow Status */
+#define	TOVF		0x0020		/*TX Overflow Status */
+#define	TXHRE		0x0040		/*TX Hold Register Empty */
+
+/*SPORTx_MCMC1 Masks */
+#define	WOFF			0x000003FF	/*Multichannel Window Offset Field */
+/* SPORTx_MCMC1	Macros								 */
+#ifdef _MISRA_RULES
+#define	SET_WOFF(x)		((x) & 0x3FFu)	/* Multichannel	Window Offset Field */
+/* Only	use SET_WSIZE Macro With Logic OR While	Setting	Lower Order Bits */
+#define	SET_WSIZE(x)	(((((x)>>0x3)-1u)&0xFu)	<< 0xC)	/* Multichannel	Window Size = (x/8)-1 */
+#else
+#define	SET_WOFF(x)		((x) & 0x3FF)	/* Multichannel	Window Offset Field */
+/* Only	use SET_WSIZE Macro With Logic OR While	Setting	Lower Order Bits */
+#define	SET_WSIZE(x)	(((((x)>>0x3)-1)&0xF) << 0xC)	/* Multichannel	Window Size = (x/8)-1 */
+#endif /* _MISRA_RULES */
+
+
+/*SPORTx_MCMC2 Masks */
+#define	MCCRM		0x0003	/*Multichannel Clock Recovery Mode */
+#define	REC_BYPASS	0x0000		/* Bypass Mode (No Clock Recovery) */
+#define	REC_2FROM4	0x0002		/* Recover 2 MHz Clock from 4 MHz Clock */
+#define	REC_8FROM16	0x0003		/* Recover 8 MHz Clock from 16 MHz Clock */
+#define	MCDTXPE		0x0004	/*Multichannel DMA Transmit Packing */
+#define	MCDRXPE		0x0008	/*Multichannel DMA Receive Packing */
+#define	MCMEN		0x0010	/*Multichannel Frame Mode Enable */
+#define	FSDR		0x0080	/*Multichannel Frame Sync to Data Relationship */
+#define	MFD			0xF000	/*Multichannel Frame Delay    */
+#define	MFD_0		0x0000		/* Multichannel	Frame Delay = 0 */
+#define	MFD_1		0x1000		/* Multichannel	Frame Delay = 1 */
+#define	MFD_2		0x2000		/* Multichannel	Frame Delay = 2 */
+#define	MFD_3		0x3000		/* Multichannel	Frame Delay = 3 */
+#define	MFD_4		0x4000		/* Multichannel	Frame Delay = 4 */
+#define	MFD_5		0x5000		/* Multichannel	Frame Delay = 5 */
+#define	MFD_6		0x6000		/* Multichannel	Frame Delay = 6 */
+#define	MFD_7		0x7000		/* Multichannel	Frame Delay = 7 */
+#define	MFD_8		0x8000		/* Multichannel	Frame Delay = 8 */
+#define	MFD_9		0x9000		/* Multichannel	Frame Delay = 9 */
+#define	MFD_10		0xA000		/* Multichannel	Frame Delay = 10 */
+#define	MFD_11		0xB000		/* Multichannel	Frame Delay = 11 */
+#define	MFD_12		0xC000		/* Multichannel	Frame Delay = 12 */
+#define	MFD_13		0xD000		/* Multichannel	Frame Delay = 13 */
+#define	MFD_14		0xE000		/* Multichannel	Frame Delay = 14 */
+#define	MFD_15		0xF000		/* Multichannel	Frame Delay = 15 */
+
+
+/*  *********  PARALLEL	PERIPHERAL INTERFACE (PPI) MASKS ****************   */
+/*  PPI_CONTROL	Masks	      */
+#define	PORT_EN		0x0001	/* PPI Port Enable  */
+#define	PORT_DIR	0x0002	/* PPI Port Direction	    */
+#define	XFR_TYPE	0x000C	/* PPI Transfer	Type  */
+#define	PORT_CFG	0x0030	/* PPI Port Configuration */
+#define	FLD_SEL		0x0040	/* PPI Active Field Select */
+#define	PACK_EN		0x0080	/* PPI Packing Mode */
+/* previous versions of	defBF539.h erroneously included	DMA32 (PPI 32-bit DMA Enable) */
+#define	SKIP_EN		0x0200	/* PPI Skip Element Enable */
+#define	SKIP_EO		0x0400	/* PPI Skip Even/Odd Elements */
+#define	DLENGTH		0x3800	/* PPI Data Length  */
+#define	DLEN_8		0x0	     /*	PPI Data Length	mask for DLEN=8 */
+#define	DLEN_10		0x0800		/* Data	Length = 10 Bits */
+#define	DLEN_11		0x1000		/* Data	Length = 11 Bits */
+#define	DLEN_12		0x1800		/* Data	Length = 12 Bits */
+#define	DLEN_13		0x2000		/* Data	Length = 13 Bits */
+#define	DLEN_14		0x2800		/* Data	Length = 14 Bits */
+#define	DLEN_15		0x3000		/* Data	Length = 15 Bits */
+#define	DLEN_16		0x3800		/* Data	Length = 16 Bits */
+#ifdef _MISRA_RULES
+#define	DLEN(x)		((((x)-9u) & 0x07u) << 11)  /* PPI Data	Length (only works for x=10-->x=16) */
+#else
+#define	DLEN(x)		((((x)-9) & 0x07) << 11)  /* PPI Data Length (only works for x=10-->x=16) */
+#endif /* _MISRA_RULES */
+#define	POL			0xC000	/* PPI Signal Polarities       */
+#define	POLC		0x4000		/* PPI Clock Polarity */
+#define	POLS		0x8000		/* PPI Frame Sync Polarity */
+
+
+/* PPI_STATUS Masks					     */
+#define	FLD			0x0400	/* Field Indicator   */
+#define	FT_ERR		0x0800	/* Frame Track Error */
+#define	OVR			0x1000	/* FIFO	Overflow Error */
+#define	UNDR		0x2000	/* FIFO	Underrun Error */
+#define	ERR_DET		0x4000	/* Error Detected Indicator */
+#define	ERR_NCOR	0x8000	/* Error Not Corrected Indicator */
+
+
+/* **********  DMA CONTROLLER MASKS  ***********************/
+/* DMAx_CONFIG,	MDMA_yy_CONFIG Masks */
+#define	DMAEN		0x0001	/* Channel Enable */
+#define	WNR			0x0002	/* Channel Direction (W/R*) */
+#define	WDSIZE_8	0x0000	/* Word	Size 8 bits */
+#define	WDSIZE_16	0x0004	/* Word	Size 16	bits */
+#define	WDSIZE_32	0x0008	/* Word	Size 32	bits */
+#define	DMA2D		0x0010	/* 2D/1D* Mode */
+#define	RESTART		0x0020	/* Restart */
+#define	DI_SEL		0x0040	/* Data	Interrupt Select */
+#define	DI_EN		0x0080	/* Data	Interrupt Enable */
+#define	NDSIZE		0x0900	/* Next	Descriptor Size */
+#define	NDSIZE_0	0x0000	/* Next	Descriptor Size	= 0 (Stop/Autobuffer) */
+#define	NDSIZE_1	0x0100	/* Next	Descriptor Size	= 1 */
+#define	NDSIZE_2	0x0200	/* Next	Descriptor Size	= 2 */
+#define	NDSIZE_3	0x0300	/* Next	Descriptor Size	= 3 */
+#define	NDSIZE_4	0x0400	/* Next	Descriptor Size	= 4 */
+#define	NDSIZE_5	0x0500	/* Next	Descriptor Size	= 5 */
+#define	NDSIZE_6	0x0600	/* Next	Descriptor Size	= 6 */
+#define	NDSIZE_7	0x0700	/* Next	Descriptor Size	= 7 */
+#define	NDSIZE_8	0x0800	/* Next	Descriptor Size	= 8 */
+#define	NDSIZE_9	0x0900	/* Next	Descriptor Size	= 9 */
+
+#define DMAFLOW			0x7000	/* Flow Control */
+#define DMAFLOW_STOP		0x0000	/* Stop Mode */
+#define DMAFLOW_AUTO		0x1000	/* Autobuffer Mode */
+#define DMAFLOW_ARRAY		0x4000	/* Descriptor Array Mode */
+#define DMAFLOW_SMALL		0x6000	/* Small Model Descriptor List Mode */
+#define DMAFLOW_LARGE		0x7000	/* Large Model Descriptor List Mode */
+
+#define	DMAEN_P		0x0		/* Channel Enable */
+#define	WNR_P		0x1		/* Channel Direction (W/R*) */
+#define	DMA2D_P		0x4		/* 2D/1D* Mode */
+#define	RESTART_P	0x5		/* Restart */
+#define	DI_SEL_P	0x6		/* Data	Interrupt Select */
+#define	DI_EN_P		0x7		/* Data	Interrupt Enable */
+
+/* DMAx_IRQ_STATUS, MDMA_yy_IRQ_STATUS Masks */
+#define	DMA_DONE	0x0001	/* DMA Done Indicator */
+#define	DMA_ERR		0x0002	/* DMA Error Indicator */
+#define	DFETCH		0x0004	/* Descriptor Fetch Indicator */
+#define	DMA_RUN		0x0008	/* DMA Running Indicator */
+
+#define	DMA_DONE_P	0x0		/* DMA Done Indicator */
+#define	DMA_ERR_P	0x1		/* DMA Error Indicator */
+#define	DFETCH_P	0x2		/* Descriptor Fetch Indicator */
+#define	DMA_RUN_P	0x3		/* DMA Running Indicator */
+
+/* DMAx_PERIPHERAL_MAP,	MDMA_yy_PERIPHERAL_MAP Masks */
+
+#define	CTYPE			0x0040	/* DMA Channel Type Indicator */
+#define	CTYPE_P			0x6		/* DMA Channel Type Indicator BIT POSITION */
+#define	PCAP8			0x0080	/* DMA 8-bit Operation Indicator   */
+#define	PCAP16			0x0100	/* DMA 16-bit Operation	Indicator */
+#define	PCAP32			0x0200	/* DMA 32-bit Operation	Indicator */
+#define	PCAPWR			0x0400	/* DMA Write Operation Indicator */
+#define	PCAPRD			0x0800	/* DMA Read Operation Indicator */
+#define	PMAP			0xF000	/* DMA Peripheral Map Field */
+
+/* PMAP	Encodings For DMA Controller 0 */
+#define	PMAP_PPI		0x0000	/* PMAP	PPI Port DMA */
+#define	PMAP_SPORT0RX	0x1000	/* PMAP	SPORT0 Receive DMA */
+#define	PMAP_SPORT0TX	0x2000	/* PMAP	SPORT0 Transmit	DMA */
+#define	PMAP_SPORT1RX	0x3000	/* PMAP	SPORT1 Receive DMA */
+#define	PMAP_SPORT1TX	0x4000	/* PMAP	SPORT1 Transmit	DMA */
+#define	PMAP_SPI0		0x5000	/* PMAP	SPI DMA */
+#define	PMAP_UART0RX		0x6000	/* PMAP	UART Receive DMA */
+#define	PMAP_UART0TX		0x7000	/* PMAP	UART Transmit DMA */
+
+/* PMAP	Encodings For DMA Controller 1 */
+#define	PMAP_SPORT2RX	    0x0000  /* PMAP SPORT2 Receive DMA */
+#define	PMAP_SPORT2TX	    0x1000  /* PMAP SPORT2 Transmit DMA */
+#define	PMAP_SPORT3RX	    0x2000  /* PMAP SPORT3 Receive DMA */
+#define	PMAP_SPORT3TX	    0x3000  /* PMAP SPORT3 Transmit DMA */
+#define	PMAP_SPI1	    0x6000  /* PMAP SPI1 DMA */
+#define	PMAP_SPI2	    0x7000  /* PMAP SPI2 DMA */
+#define	PMAP_UART1RX	    0x8000  /* PMAP UART1 Receive DMA */
+#define	PMAP_UART1TX	    0x9000  /* PMAP UART1 Transmit DMA */
+#define	PMAP_UART2RX	    0xA000  /* PMAP UART2 Receive DMA */
+#define	PMAP_UART2TX	    0xB000  /* PMAP UART2 Transmit DMA */
+
+
+/*  *************  GENERAL PURPOSE TIMER MASKS	******************** */
+/* PWM Timer bit definitions */
+/* TIMER_ENABLE	Register */
+#define	TIMEN0			0x0001	/* Enable Timer	0 */
+#define	TIMEN1			0x0002	/* Enable Timer	1 */
+#define	TIMEN2			0x0004	/* Enable Timer	2 */
+
+#define	TIMEN0_P		0x00
+#define	TIMEN1_P		0x01
+#define	TIMEN2_P		0x02
+
+/* TIMER_DISABLE Register */
+#define	TIMDIS0			0x0001	/* Disable Timer 0 */
+#define	TIMDIS1			0x0002	/* Disable Timer 1 */
+#define	TIMDIS2			0x0004	/* Disable Timer 2 */
+
+#define	TIMDIS0_P		0x00
+#define	TIMDIS1_P		0x01
+#define	TIMDIS2_P		0x02
+
+/* TIMER_STATUS	Register */
+#define	TIMIL0			0x0001	/* Timer 0 Interrupt */
+#define	TIMIL1			0x0002	/* Timer 1 Interrupt */
+#define	TIMIL2			0x0004	/* Timer 2 Interrupt */
+#define	TOVF_ERR0		0x0010	/* Timer 0 Counter Overflow */
+#define	TOVF_ERR1		0x0020	/* Timer 1 Counter Overflow */
+#define	TOVF_ERR2		0x0040	/* Timer 2 Counter Overflow */
+#define	TRUN0			0x1000	/* Timer 0 Slave Enable	Status */
+#define	TRUN1			0x2000	/* Timer 1 Slave Enable	Status */
+#define	TRUN2			0x4000	/* Timer 2 Slave Enable	Status */
+
+#define	TIMIL0_P		0x00
+#define	TIMIL1_P		0x01
+#define	TIMIL2_P		0x02
+#define	TOVF_ERR0_P		0x04
+#define	TOVF_ERR1_P		0x05
+#define	TOVF_ERR2_P		0x06
+#define	TRUN0_P			0x0C
+#define	TRUN1_P			0x0D
+#define	TRUN2_P			0x0E
+
+/* Alternate Deprecated	Macros Provided	For Backwards Code Compatibility */
+#define	TOVL_ERR0		TOVF_ERR0
+#define	TOVL_ERR1		TOVF_ERR1
+#define	TOVL_ERR2		TOVF_ERR2
+#define	TOVL_ERR0_P		TOVF_ERR0_P
+#define	TOVL_ERR1_P	TOVF_ERR1_P
+#define	TOVL_ERR2_P	TOVF_ERR2_P
+
+/* TIMERx_CONFIG Registers */
+#define	PWM_OUT			0x0001
+#define	WDTH_CAP		0x0002
+#define	EXT_CLK			0x0003
+#define	PULSE_HI		0x0004
+#define	PERIOD_CNT		0x0008
+#define	IRQ_ENA			0x0010
+#define	TIN_SEL			0x0020
+#define	OUT_DIS			0x0040
+#define	CLK_SEL			0x0080
+#define	TOGGLE_HI		0x0100
+#define	EMU_RUN			0x0200
+#ifdef _MISRA_RULES
+#define	ERR_TYP(x)		(((x) &	0x03u) << 14)
+#else
+#define	ERR_TYP(x)		(((x) &	0x03) << 14)
+#endif /* _MISRA_RULES */
+
+#define	TMODE_P0		0x00
+#define	TMODE_P1		0x01
+#define	PULSE_HI_P		0x02
+#define	PERIOD_CNT_P	0x03
+#define	IRQ_ENA_P		0x04
+#define	TIN_SEL_P		0x05
+#define	OUT_DIS_P		0x06
+#define	CLK_SEL_P		0x07
+#define	TOGGLE_HI_P		0x08
+#define	EMU_RUN_P		0x09
+#define	ERR_TYP_P0		0x0E
+#define	ERR_TYP_P1		0x0F
+
+
+/*/ ******************	 GENERAL-PURPOSE I/O  ********************* */
+/*  Flag I/O (FIO_) Masks */
+#define	PF0			0x0001
+#define	PF1			0x0002
+#define	PF2			0x0004
+#define	PF3			0x0008
+#define	PF4			0x0010
+#define	PF5			0x0020
+#define	PF6			0x0040
+#define	PF7			0x0080
+#define	PF8			0x0100
+#define	PF9			0x0200
+#define	PF10		0x0400
+#define	PF11		0x0800
+#define	PF12		0x1000
+#define	PF13		0x2000
+#define	PF14		0x4000
+#define	PF15		0x8000
+
+/*  PORT F BIT POSITIONS */
+#define	PF0_P		0x0
+#define	PF1_P		0x1
+#define	PF2_P		0x2
+#define	PF3_P		0x3
+#define	PF4_P		0x4
+#define	PF5_P		0x5
+#define	PF6_P		0x6
+#define	PF7_P		0x7
+#define	PF8_P		0x8
+#define	PF9_P		0x9
+#define	PF10_P		0xA
+#define	PF11_P		0xB
+#define	PF12_P		0xC
+#define	PF13_P		0xD
+#define	PF14_P		0xE
+#define	PF15_P		0xF
+
+
+/*******************   GPIO MASKS  *********************/
+/* Port	C Masks */
+#define	PC0		0x0001
+#define	PC1		0x0002
+#define	PC4		0x0010
+#define	PC5		0x0020
+#define	PC6		0x0040
+#define	PC7		0x0080
+#define	PC8		0x0100
+#define	PC9		0x0200
+/* Port	C Bit Positions */
+#define	PC0_P	0x0
+#define	PC1_P	0x1
+#define	PC4_P	0x4
+#define	PC5_P	0x5
+#define	PC6_P	0x6
+#define	PC7_P	0x7
+#define	PC8_P	0x8
+#define	PC9_P	0x9
+
+/* Port	D */
+#define	PD0		0x0001
+#define	PD1		0x0002
+#define	PD2		0x0004
+#define	PD3		0x0008
+#define	PD4		0x0010
+#define	PD5		0x0020
+#define	PD6		0x0040
+#define	PD7		0x0080
+#define	PD8		0x0100
+#define	PD9		0x0200
+#define	PD10	0x0400
+#define	PD11	0x0800
+#define	PD12	0x1000
+#define	PD13	0x2000
+#define	PD14	0x4000
+#define	PD15	0x8000
+/* Port	D Bit Positions */
+#define	PD0_P	0x0
+#define	PD1_P	0x1
+#define	PD2_P	0x2
+#define	PD3_P	0x3
+#define	PD4_P	0x4
+#define	PD5_P	0x5
+#define	PD6_P	0x6
+#define	PD7_P	0x7
+#define	PD8_P	0x8
+#define	PD9_P	0x9
+#define	PD10_P	0xA
+#define	PD11_P	0xB
+#define	PD12_P	0xC
+#define	PD13_P	0xD
+#define	PD14_P	0xE
+#define	PD15_P	0xF
+
+/* Port	E */
+#define	PE0		0x0001
+#define	PE1		0x0002
+#define	PE2		0x0004
+#define	PE3		0x0008
+#define	PE4		0x0010
+#define	PE5		0x0020
+#define	PE6		0x0040
+#define	PE7		0x0080
+#define	PE8		0x0100
+#define	PE9		0x0200
+#define	PE10	0x0400
+#define	PE11	0x0800
+#define	PE12	0x1000
+#define	PE13	0x2000
+#define	PE14	0x4000
+#define	PE15	0x8000
+/* Port	E Bit Positions */
+#define	PE0_P	0x0
+#define	PE1_P	0x1
+#define	PE2_P	0x2
+#define	PE3_P	0x3
+#define	PE4_P	0x4
+#define	PE5_P	0x5
+#define	PE6_P	0x6
+#define	PE7_P	0x7
+#define	PE8_P	0x8
+#define	PE9_P	0x9
+#define	PE10_P	0xA
+#define	PE11_P	0xB
+#define	PE12_P	0xC
+#define	PE13_P	0xD
+#define	PE14_P	0xE
+#define	PE15_P	0xF
+
+
+/* ***********	SERIAL PERIPHERAL INTERFACE (SPI) MASKS	 **************** */
+/* SPIx_CTL Masks */
+#define	TIMOD		0x0003		/* Transfer Initiate Mode */
+#define	RDBR_CORE	0x0000		/*		RDBR Read Initiates, IRQ When RDBR Full */
+#define	TDBR_CORE	0x0001		/*		TDBR Write Initiates, IRQ When TDBR Empty */
+#define	RDBR_DMA	0x0002		/*		DMA Read, DMA Until FIFO Empty */
+#define	TDBR_DMA	0x0003		/*		DMA Write, DMA Until FIFO Full */
+#define	SZ			0x0004		/* Send	Zero (When TDBR	Empty, Send Zero/Last*) */
+#define	GM			0x0008		/* Get More (When RDBR Full, Overwrite/Discard*) */
+#define	PSSE		0x0010		/* Slave-Select	Input Enable */
+#define	EMISO		0x0020		/* Enable MISO As Output */
+#define	SIZE		0x0100		/* Size	of Words (16/8*	Bits) */
+#define	LSBF		0x0200		/* LSB First			 */
+#define	CPHA		0x0400		/* Clock Phase			 */
+#define	CPOL		0x0800		/* Clock Polarity		 */
+#define	MSTR		0x1000		/* Master/Slave*		 */
+#define	WOM			0x2000		/* Write Open Drain Master */
+#define	SPE			0x4000		/* SPI Enable			 */
+
+/* SPIx_FLG Masks */
+#define	FLS1	0x0002	/* Enables (=1)	SPI_FLOUT1 as flag output for SPI Slave-select */
+#define	FLS2	0x0004	/* Enables (=1)	SPI_FLOUT2 as flag output for SPI Slave-select */
+#define	FLS3	0x0008	/* Enables (=1)	SPI_FLOUT3 as flag output for SPI Slave-select */
+#define	FLS4	0x0010	/* Enables (=1)	SPI_FLOUT4 as flag output for SPI Slave-select */
+#define	FLS5	0x0020	/* Enables (=1)	SPI_FLOUT5 as flag output for SPI Slave-select */
+#define	FLS6	0x0040	/* Enables (=1)	SPI_FLOUT6 as flag output for SPI Slave-select */
+#define	FLS7	0x0080	/* Enables (=1)	SPI_FLOUT7 as flag output for SPI Slave-select */
+
+#define	FLG1	0x0200	/* Activates (=0) SPI_FLOUT1 as	flag output for	SPI Slave-select  */
+#define	FLG2	0x0400	/* Activates (=0) SPI_FLOUT2 as	flag output for	SPI Slave-select */
+#define	FLG3	0x0800	/* Activates (=0) SPI_FLOUT3 as	flag output for	SPI Slave-select  */
+#define	FLG4	0x1000	/* Activates (=0) SPI_FLOUT4 as	flag output for	SPI Slave-select  */
+#define	FLG5	0x2000	/* Activates (=0) SPI_FLOUT5 as	flag output for	SPI Slave-select  */
+#define	FLG6	0x4000	/* Activates (=0) SPI_FLOUT6 as	flag output for	SPI Slave-select  */
+#define	FLG7	0x8000	/* Activates (=0) SPI_FLOUT7 as	flag output for	SPI Slave-select */
+
+/* SPIx_FLG Bit	Positions */
+#define	FLS1_P	0x0001	/* Enables (=1)	SPI_FLOUT1 as flag output for SPI Slave-select */
+#define	FLS2_P	0x0002	/* Enables (=1)	SPI_FLOUT2 as flag output for SPI Slave-select */
+#define	FLS3_P	0x0003	/* Enables (=1)	SPI_FLOUT3 as flag output for SPI Slave-select */
+#define	FLS4_P	0x0004	/* Enables (=1)	SPI_FLOUT4 as flag output for SPI Slave-select */
+#define	FLS5_P	0x0005	/* Enables (=1)	SPI_FLOUT5 as flag output for SPI Slave-select */
+#define	FLS6_P	0x0006	/* Enables (=1)	SPI_FLOUT6 as flag output for SPI Slave-select */
+#define	FLS7_P	0x0007	/* Enables (=1)	SPI_FLOUT7 as flag output for SPI Slave-select */
+#define	FLG1_P	0x0009	/* Activates (=0) SPI_FLOUT1 as	flag output for	SPI Slave-select  */
+#define	FLG2_P	0x000A	/* Activates (=0) SPI_FLOUT2 as	flag output for	SPI Slave-select */
+#define	FLG3_P	0x000B	/* Activates (=0) SPI_FLOUT3 as	flag output for	SPI Slave-select  */
+#define	FLG4_P	0x000C	/* Activates (=0) SPI_FLOUT4 as	flag output for	SPI Slave-select  */
+#define	FLG5_P	0x000D	/* Activates (=0) SPI_FLOUT5 as	flag output for	SPI Slave-select  */
+#define	FLG6_P	0x000E	/* Activates (=0) SPI_FLOUT6 as	flag output for	SPI Slave-select  */
+#define	FLG7_P	0x000F	/* Activates (=0) SPI_FLOUT7 as	flag output for	SPI Slave-select */
+
+/* SPIx_STAT Masks */
+#define	SPIF	0x0001	/* Set (=1) when SPI single-word transfer complete */
+#define	MODF	0x0002	/* Set (=1) in a master	device when some other device tries to become master */
+#define	TXE		0x0004	/* Set (=1) when transmission occurs with no new data in SPI_TDBR */
+#define	TXS		0x0008	/* SPI_TDBR Data Buffer	Status (0=Empty, 1=Full) */
+#define	RBSY	0x0010	/* Set (=1) when data is received with RDBR full */
+#define	RXS		0x0020	/* SPI_RDBR Data Buffer	Status (0=Empty, 1=Full)  */
+#define	TXCOL	0x0040	/* When	set (=1), corrupt data may have	been transmitted  */
+
+/* SPIx_FLG Masks										 */
+#define	FLG1E	0xFDFF		/* Activates SPI_FLOUT1	 */
+#define	FLG2E	0xFBFF		/* Activates SPI_FLOUT2	 */
+#define	FLG3E	0xF7FF		/* Activates SPI_FLOUT3	 */
+#define	FLG4E	0xEFFF		/* Activates SPI_FLOUT4	 */
+#define	FLG5E	0xDFFF		/* Activates SPI_FLOUT5	 */
+#define	FLG6E	0xBFFF		/* Activates SPI_FLOUT6	 */
+#define	FLG7E	0x7FFF		/* Activates SPI_FLOUT7	 */
+
+
+/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS	************* */
+/* EBIU_AMGCTL Masks */
+#define	AMCKEN		0x0001	/* Enable CLKOUT */
+#define	AMBEN_NONE	0x0000	/* All Banks Disabled */
+#define	AMBEN_B0	0x0002	/* Enable Asynchronous Memory Bank 0 only */
+#define	AMBEN_B0_B1	0x0004	/* Enable Asynchronous Memory Banks 0 &	1 only */
+#define	AMBEN_B0_B1_B2	0x0006	/* Enable Asynchronous Memory Banks 0, 1, and 2 */
+#define	AMBEN_ALL	0x0008	/* Enable Asynchronous Memory Banks (all) 0, 1,	2, and 3 */
+#define	CDPRIO		0x0100	/* DMA has priority over core for for external accesses */
+
+/* EBIU_AMGCTL Bit Positions */
+#define	AMCKEN_P		0x0000	/* Enable CLKOUT */
+#define	AMBEN_P0		0x0001	/* Asynchronous	Memory Enable, 000 - banks 0-3 disabled, 001 - Bank 0 enabled */
+#define	AMBEN_P1		0x0002	/* Asynchronous	Memory Enable, 010 - banks 0&1 enabled,	 011 - banks 0-3 enabled */
+#define	AMBEN_P2		0x0003	/* Asynchronous	Memory Enable, 1xx - All banks (bank 0,	1, 2, and 3) enabled */
+
+/* EBIU_AMBCTL0	Masks */
+#define	B0RDYEN			0x00000001  /* Bank 0 RDY Enable, 0=disable, 1=enable */
+#define	B0RDYPOL		0x00000002  /* Bank 0 RDY Active high, 0=active	low, 1=active high */
+#define	B0TT_1			0x00000004  /* Bank 0 Transition Time from Read	to Write = 1 cycle */
+#define	B0TT_2			0x00000008  /* Bank 0 Transition Time from Read	to Write = 2 cycles */
+#define	B0TT_3			0x0000000C  /* Bank 0 Transition Time from Read	to Write = 3 cycles */
+#define	B0TT_4			0x00000000  /* Bank 0 Transition Time from Read	to Write = 4 cycles */
+#define	B0ST_1			0x00000010  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=1 cycle */
+#define	B0ST_2			0x00000020  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=2 cycles */
+#define	B0ST_3			0x00000030  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=3 cycles */
+#define	B0ST_4			0x00000000  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=4 cycles */
+#define	B0HT_1			0x00000040  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 1 cycle */
+#define	B0HT_2			0x00000080  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 2 cycles */
+#define	B0HT_3			0x000000C0  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 3 cycles */
+#define	B0HT_0			0x00000000  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 0 cycles */
+#define	B0RAT_1			0x00000100  /* Bank 0 Read Access Time = 1 cycle */
+#define	B0RAT_2			0x00000200  /* Bank 0 Read Access Time = 2 cycles */
+#define	B0RAT_3			0x00000300  /* Bank 0 Read Access Time = 3 cycles */
+#define	B0RAT_4			0x00000400  /* Bank 0 Read Access Time = 4 cycles */
+#define	B0RAT_5			0x00000500  /* Bank 0 Read Access Time = 5 cycles */
+#define	B0RAT_6			0x00000600  /* Bank 0 Read Access Time = 6 cycles */
+#define	B0RAT_7			0x00000700  /* Bank 0 Read Access Time = 7 cycles */
+#define	B0RAT_8			0x00000800  /* Bank 0 Read Access Time = 8 cycles */
+#define	B0RAT_9			0x00000900  /* Bank 0 Read Access Time = 9 cycles */
+#define	B0RAT_10		0x00000A00  /* Bank 0 Read Access Time = 10 cycles */
+#define	B0RAT_11		0x00000B00  /* Bank 0 Read Access Time = 11 cycles */
+#define	B0RAT_12		0x00000C00  /* Bank 0 Read Access Time = 12 cycles */
+#define	B0RAT_13		0x00000D00  /* Bank 0 Read Access Time = 13 cycles */
+#define	B0RAT_14		0x00000E00  /* Bank 0 Read Access Time = 14 cycles */
+#define	B0RAT_15		0x00000F00  /* Bank 0 Read Access Time = 15 cycles */
+#define	B0WAT_1			0x00001000  /* Bank 0 Write Access Time	= 1 cycle */
+#define	B0WAT_2			0x00002000  /* Bank 0 Write Access Time	= 2 cycles */
+#define	B0WAT_3			0x00003000  /* Bank 0 Write Access Time	= 3 cycles */
+#define	B0WAT_4			0x00004000  /* Bank 0 Write Access Time	= 4 cycles */
+#define	B0WAT_5			0x00005000  /* Bank 0 Write Access Time	= 5 cycles */
+#define	B0WAT_6			0x00006000  /* Bank 0 Write Access Time	= 6 cycles */
+#define	B0WAT_7			0x00007000  /* Bank 0 Write Access Time	= 7 cycles */
+#define	B0WAT_8			0x00008000  /* Bank 0 Write Access Time	= 8 cycles */
+#define	B0WAT_9			0x00009000  /* Bank 0 Write Access Time	= 9 cycles */
+#define	B0WAT_10		0x0000A000  /* Bank 0 Write Access Time	= 10 cycles */
+#define	B0WAT_11		0x0000B000  /* Bank 0 Write Access Time	= 11 cycles */
+#define	B0WAT_12		0x0000C000  /* Bank 0 Write Access Time	= 12 cycles */
+#define	B0WAT_13		0x0000D000  /* Bank 0 Write Access Time	= 13 cycles */
+#define	B0WAT_14		0x0000E000  /* Bank 0 Write Access Time	= 14 cycles */
+#define	B0WAT_15		0x0000F000  /* Bank 0 Write Access Time	= 15 cycles */
+#define	B1RDYEN			0x00010000  /* Bank 1 RDY enable, 0=disable, 1=enable */
+#define	B1RDYPOL		0x00020000  /* Bank 1 RDY Active high, 0=active	low, 1=active high */
+#define	B1TT_1			0x00040000  /* Bank 1 Transition Time from Read	to Write = 1 cycle */
+#define	B1TT_2			0x00080000  /* Bank 1 Transition Time from Read	to Write = 2 cycles */
+#define	B1TT_3			0x000C0000  /* Bank 1 Transition Time from Read	to Write = 3 cycles */
+#define	B1TT_4			0x00000000  /* Bank 1 Transition Time from Read	to Write = 4 cycles */
+#define	B1ST_1			0x00100000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define	B1ST_2			0x00200000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define	B1ST_3			0x00300000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define	B1ST_4			0x00000000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define	B1HT_1			0x00400000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
+#define	B1HT_2			0x00800000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
+#define	B1HT_3			0x00C00000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
+#define	B1HT_0			0x00000000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
+#define	B1RAT_1			0x01000000  /* Bank 1 Read Access Time = 1 cycle */
+#define	B1RAT_2			0x02000000  /* Bank 1 Read Access Time = 2 cycles */
+#define	B1RAT_3			0x03000000  /* Bank 1 Read Access Time = 3 cycles */
+#define	B1RAT_4			0x04000000  /* Bank 1 Read Access Time = 4 cycles */
+#define	B1RAT_5			0x05000000  /* Bank 1 Read Access Time = 5 cycles */
+#define	B1RAT_6			0x06000000  /* Bank 1 Read Access Time = 6 cycles */
+#define	B1RAT_7			0x07000000  /* Bank 1 Read Access Time = 7 cycles */
+#define	B1RAT_8			0x08000000  /* Bank 1 Read Access Time = 8 cycles */
+#define	B1RAT_9			0x09000000  /* Bank 1 Read Access Time = 9 cycles */
+#define	B1RAT_10		0x0A000000  /* Bank 1 Read Access Time = 10 cycles */
+#define	B1RAT_11		0x0B000000  /* Bank 1 Read Access Time = 11 cycles */
+#define	B1RAT_12		0x0C000000  /* Bank 1 Read Access Time = 12 cycles */
+#define	B1RAT_13		0x0D000000  /* Bank 1 Read Access Time = 13 cycles */
+#define	B1RAT_14		0x0E000000  /* Bank 1 Read Access Time = 14 cycles */
+#define	B1RAT_15		0x0F000000  /* Bank 1 Read Access Time = 15 cycles */
+#define	B1WAT_1			0x10000000 /* Bank 1 Write Access Time = 1 cycle */
+#define	B1WAT_2			0x20000000  /* Bank 1 Write Access Time	= 2 cycles */
+#define	B1WAT_3			0x30000000  /* Bank 1 Write Access Time	= 3 cycles */
+#define	B1WAT_4			0x40000000  /* Bank 1 Write Access Time	= 4 cycles */
+#define	B1WAT_5			0x50000000  /* Bank 1 Write Access Time	= 5 cycles */
+#define	B1WAT_6			0x60000000  /* Bank 1 Write Access Time	= 6 cycles */
+#define	B1WAT_7			0x70000000  /* Bank 1 Write Access Time	= 7 cycles */
+#define	B1WAT_8			0x80000000  /* Bank 1 Write Access Time	= 8 cycles */
+#define	B1WAT_9			0x90000000  /* Bank 1 Write Access Time	= 9 cycles */
+#define	B1WAT_10		0xA0000000  /* Bank 1 Write Access Time	= 10 cycles */
+#define	B1WAT_11		0xB0000000  /* Bank 1 Write Access Time	= 11 cycles */
+#define	B1WAT_12		0xC0000000  /* Bank 1 Write Access Time	= 12 cycles */
+#define	B1WAT_13		0xD0000000  /* Bank 1 Write Access Time	= 13 cycles */
+#define	B1WAT_14		0xE0000000  /* Bank 1 Write Access Time	= 14 cycles */
+#define	B1WAT_15		0xF0000000  /* Bank 1 Write Access Time	= 15 cycles */
+
+/* EBIU_AMBCTL1	Masks */
+#define	B2RDYEN			0x00000001  /* Bank 2 RDY Enable, 0=disable, 1=enable */
+#define	B2RDYPOL		0x00000002  /* Bank 2 RDY Active high, 0=active	low, 1=active high */
+#define	B2TT_1			0x00000004  /* Bank 2 Transition Time from Read	to Write = 1 cycle */
+#define	B2TT_2			0x00000008  /* Bank 2 Transition Time from Read	to Write = 2 cycles */
+#define	B2TT_3			0x0000000C  /* Bank 2 Transition Time from Read	to Write = 3 cycles */
+#define	B2TT_4			0x00000000  /* Bank 2 Transition Time from Read	to Write = 4 cycles */
+#define	B2ST_1			0x00000010  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define	B2ST_2			0x00000020  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define	B2ST_3			0x00000030  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define	B2ST_4			0x00000000  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define	B2HT_1			0x00000040  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
+#define	B2HT_2			0x00000080  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
+#define	B2HT_3			0x000000C0  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
+#define	B2HT_0			0x00000000  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
+#define	B2RAT_1			0x00000100  /* Bank 2 Read Access Time = 1 cycle */
+#define	B2RAT_2			0x00000200  /* Bank 2 Read Access Time = 2 cycles */
+#define	B2RAT_3			0x00000300  /* Bank 2 Read Access Time = 3 cycles */
+#define	B2RAT_4			0x00000400  /* Bank 2 Read Access Time = 4 cycles */
+#define	B2RAT_5			0x00000500  /* Bank 2 Read Access Time = 5 cycles */
+#define	B2RAT_6			0x00000600  /* Bank 2 Read Access Time = 6 cycles */
+#define	B2RAT_7			0x00000700  /* Bank 2 Read Access Time = 7 cycles */
+#define	B2RAT_8			0x00000800  /* Bank 2 Read Access Time = 8 cycles */
+#define	B2RAT_9			0x00000900  /* Bank 2 Read Access Time = 9 cycles */
+#define	B2RAT_10		0x00000A00  /* Bank 2 Read Access Time = 10 cycles */
+#define	B2RAT_11		0x00000B00  /* Bank 2 Read Access Time = 11 cycles */
+#define	B2RAT_12		0x00000C00  /* Bank 2 Read Access Time = 12 cycles */
+#define	B2RAT_13		0x00000D00  /* Bank 2 Read Access Time = 13 cycles */
+#define	B2RAT_14		0x00000E00  /* Bank 2 Read Access Time = 14 cycles */
+#define	B2RAT_15		0x00000F00  /* Bank 2 Read Access Time = 15 cycles */
+#define	B2WAT_1			0x00001000  /* Bank 2 Write Access Time	= 1 cycle */
+#define	B2WAT_2			0x00002000  /* Bank 2 Write Access Time	= 2 cycles */
+#define	B2WAT_3			0x00003000  /* Bank 2 Write Access Time	= 3 cycles */
+#define	B2WAT_4			0x00004000  /* Bank 2 Write Access Time	= 4 cycles */
+#define	B2WAT_5			0x00005000  /* Bank 2 Write Access Time	= 5 cycles */
+#define	B2WAT_6			0x00006000  /* Bank 2 Write Access Time	= 6 cycles */
+#define	B2WAT_7			0x00007000  /* Bank 2 Write Access Time	= 7 cycles */
+#define	B2WAT_8			0x00008000  /* Bank 2 Write Access Time	= 8 cycles */
+#define	B2WAT_9			0x00009000  /* Bank 2 Write Access Time	= 9 cycles */
+#define	B2WAT_10		0x0000A000  /* Bank 2 Write Access Time	= 10 cycles */
+#define	B2WAT_11		0x0000B000  /* Bank 2 Write Access Time	= 11 cycles */
+#define	B2WAT_12		0x0000C000  /* Bank 2 Write Access Time	= 12 cycles */
+#define	B2WAT_13		0x0000D000  /* Bank 2 Write Access Time	= 13 cycles */
+#define	B2WAT_14		0x0000E000  /* Bank 2 Write Access Time	= 14 cycles */
+#define	B2WAT_15		0x0000F000  /* Bank 2 Write Access Time	= 15 cycles */
+#define	B3RDYEN			0x00010000  /* Bank 3 RDY enable, 0=disable, 1=enable */
+#define	B3RDYPOL		0x00020000  /* Bank 3 RDY Active high, 0=active	low, 1=active high */
+#define	B3TT_1			0x00040000  /* Bank 3 Transition Time from Read	to Write = 1 cycle */
+#define	B3TT_2			0x00080000  /* Bank 3 Transition Time from Read	to Write = 2 cycles */
+#define	B3TT_3			0x000C0000  /* Bank 3 Transition Time from Read	to Write = 3 cycles */
+#define	B3TT_4			0x00000000  /* Bank 3 Transition Time from Read	to Write = 4 cycles */
+#define	B3ST_1			0x00100000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define	B3ST_2			0x00200000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define	B3ST_3			0x00300000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define	B3ST_4			0x00000000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define	B3HT_1			0x00400000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
+#define	B3HT_2			0x00800000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
+#define	B3HT_3			0x00C00000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
+#define	B3HT_0			0x00000000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
+#define	B3RAT_1			0x01000000 /* Bank 3 Read Access Time =	1 cycle */
+#define	B3RAT_2			0x02000000  /* Bank 3 Read Access Time = 2 cycles */
+#define	B3RAT_3			0x03000000  /* Bank 3 Read Access Time = 3 cycles */
+#define	B3RAT_4			0x04000000  /* Bank 3 Read Access Time = 4 cycles */
+#define	B3RAT_5			0x05000000  /* Bank 3 Read Access Time = 5 cycles */
+#define	B3RAT_6			0x06000000  /* Bank 3 Read Access Time = 6 cycles */
+#define	B3RAT_7			0x07000000  /* Bank 3 Read Access Time = 7 cycles */
+#define	B3RAT_8			0x08000000  /* Bank 3 Read Access Time = 8 cycles */
+#define	B3RAT_9			0x09000000  /* Bank 3 Read Access Time = 9 cycles */
+#define	B3RAT_10		0x0A000000  /* Bank 3 Read Access Time = 10 cycles */
+#define	B3RAT_11		0x0B000000  /* Bank 3 Read Access Time = 11 cycles */
+#define	B3RAT_12		0x0C000000  /* Bank 3 Read Access Time = 12 cycles */
+#define	B3RAT_13		0x0D000000  /* Bank 3 Read Access Time = 13 cycles */
+#define	B3RAT_14		0x0E000000  /* Bank 3 Read Access Time = 14 cycles */
+#define	B3RAT_15		0x0F000000  /* Bank 3 Read Access Time = 15 cycles */
+#define	B3WAT_1			0x10000000 /* Bank 3 Write Access Time = 1 cycle */
+#define	B3WAT_2			0x20000000  /* Bank 3 Write Access Time	= 2 cycles */
+#define	B3WAT_3			0x30000000  /* Bank 3 Write Access Time	= 3 cycles */
+#define	B3WAT_4			0x40000000  /* Bank 3 Write Access Time	= 4 cycles */
+#define	B3WAT_5			0x50000000  /* Bank 3 Write Access Time	= 5 cycles */
+#define	B3WAT_6			0x60000000  /* Bank 3 Write Access Time	= 6 cycles */
+#define	B3WAT_7			0x70000000  /* Bank 3 Write Access Time	= 7 cycles */
+#define	B3WAT_8			0x80000000  /* Bank 3 Write Access Time	= 8 cycles */
+#define	B3WAT_9			0x90000000  /* Bank 3 Write Access Time	= 9 cycles */
+#define	B3WAT_10		0xA0000000  /* Bank 3 Write Access Time	= 10 cycles */
+#define	B3WAT_11		0xB0000000  /* Bank 3 Write Access Time	= 11 cycles */
+#define	B3WAT_12		0xC0000000  /* Bank 3 Write Access Time	= 12 cycles */
+#define	B3WAT_13		0xD0000000  /* Bank 3 Write Access Time	= 13 cycles */
+#define	B3WAT_14		0xE0000000  /* Bank 3 Write Access Time	= 14 cycles */
+#define	B3WAT_15		0xF0000000  /* Bank 3 Write Access Time	= 15 cycles */
+
+/* **********************  SDRAM CONTROLLER MASKS  *************************** */
+/* EBIU_SDGCTL Masks */
+#define	SCTLE			0x00000001 /* Enable SCLK[0], /SRAS, /SCAS, /SWE, SDQM[3:0] */
+#define	CL_2			0x00000008 /* SDRAM CAS	latency	= 2 cycles */
+#define	CL_3			0x0000000C /* SDRAM CAS	latency	= 3 cycles */
+#define	PFE				0x00000010 /* Enable SDRAM prefetch */
+#define	PFP				0x00000020 /* Prefetch has priority over AMC requests */
+#define	PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh */
+#define	PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In	Self-Refresh */
+#define	PASR_B0			0x00000020	/* Only	SDRAM Bank 0 Is	Refreshed In Self-Refresh */
+#define	TRAS_1			0x00000040 /* SDRAM tRAS = 1 cycle */
+#define	TRAS_2			0x00000080 /* SDRAM tRAS = 2 cycles */
+#define	TRAS_3			0x000000C0 /* SDRAM tRAS = 3 cycles */
+#define	TRAS_4			0x00000100 /* SDRAM tRAS = 4 cycles */
+#define	TRAS_5			0x00000140 /* SDRAM tRAS = 5 cycles */
+#define	TRAS_6			0x00000180 /* SDRAM tRAS = 6 cycles */
+#define	TRAS_7			0x000001C0 /* SDRAM tRAS = 7 cycles */
+#define	TRAS_8			0x00000200 /* SDRAM tRAS = 8 cycles */
+#define	TRAS_9			0x00000240 /* SDRAM tRAS = 9 cycles */
+#define	TRAS_10			0x00000280 /* SDRAM tRAS = 10 cycles */
+#define	TRAS_11			0x000002C0 /* SDRAM tRAS = 11 cycles */
+#define	TRAS_12			0x00000300 /* SDRAM tRAS = 12 cycles */
+#define	TRAS_13			0x00000340 /* SDRAM tRAS = 13 cycles */
+#define	TRAS_14			0x00000380 /* SDRAM tRAS = 14 cycles */
+#define	TRAS_15			0x000003C0 /* SDRAM tRAS = 15 cycles */
+#define	TRP_1			0x00000800 /* SDRAM tRP	= 1 cycle */
+#define	TRP_2			0x00001000 /* SDRAM tRP	= 2 cycles */
+#define	TRP_3			0x00001800 /* SDRAM tRP	= 3 cycles */
+#define	TRP_4			0x00002000 /* SDRAM tRP	= 4 cycles */
+#define	TRP_5			0x00002800 /* SDRAM tRP	= 5 cycles */
+#define	TRP_6			0x00003000 /* SDRAM tRP	= 6 cycles */
+#define	TRP_7			0x00003800 /* SDRAM tRP	= 7 cycles */
+#define	TRCD_1			0x00008000 /* SDRAM tRCD = 1 cycle */
+#define	TRCD_2			0x00010000 /* SDRAM tRCD = 2 cycles */
+#define	TRCD_3			0x00018000 /* SDRAM tRCD = 3 cycles */
+#define	TRCD_4			0x00020000 /* SDRAM tRCD = 4 cycles */
+#define	TRCD_5			0x00028000 /* SDRAM tRCD = 5 cycles */
+#define	TRCD_6			0x00030000 /* SDRAM tRCD = 6 cycles */
+#define	TRCD_7			0x00038000 /* SDRAM tRCD = 7 cycles */
+#define	TWR_1			0x00080000 /* SDRAM tWR	= 1 cycle */
+#define	TWR_2			0x00100000 /* SDRAM tWR	= 2 cycles */
+#define	TWR_3			0x00180000 /* SDRAM tWR	= 3 cycles */
+#define	PUPSD			0x00200000 /*Power-up start delay */
+#define	PSM				0x00400000 /* SDRAM power-up sequence =	Precharge, mode	register set, 8	CBR refresh cycles */
+#define	PSS				0x00800000 /* enable SDRAM power-up sequence on	next SDRAM access */
+#define	SRFS			0x01000000 /* Start SDRAM self-refresh mode */
+#define	EBUFE			0x02000000 /* Enable external buffering	timing */
+#define	FBBRW			0x04000000 /* Fast back-to-back	read write enable */
+#define	EMREN			0x10000000 /* Extended mode register enable */
+#define	TCSR			0x20000000 /* Temp compensated self refresh value 85 deg C */
+#define	CDDBG			0x40000000 /* Tristate SDRAM controls during bus grant */
+
+/* EBIU_SDBCTL Masks */
+#define	EBE				0x00000001 /* Enable SDRAM external bank */
+#define	EBSZ_16			0x00000000 /* SDRAM external bank size = 16MB */
+#define	EBSZ_32			0x00000002 /* SDRAM external bank size = 32MB */
+#define	EBSZ_64			0x00000004 /* SDRAM external bank size = 64MB */
+#define	EBSZ_128		0x00000006 /* SDRAM external bank size = 128MB */
+#define	EBSZ_256		0x00000008 /* SDRAM External Bank Size = 256MB */
+#define	EBSZ_512		0x0000000A /* SDRAM External Bank Size = 512MB */
+#define	EBCAW_8			0x00000000 /* SDRAM external bank column address width = 8 bits */
+#define	EBCAW_9			0x00000010 /* SDRAM external bank column address width = 9 bits */
+#define	EBCAW_10		0x00000020 /* SDRAM external bank column address width = 9 bits */
+#define	EBCAW_11		0x00000030 /* SDRAM external bank column address width = 9 bits */
+
+/* EBIU_SDSTAT Masks */
+#define	SDCI			0x00000001 /* SDRAM controller is idle */
+#define	SDSRA			0x00000002 /* SDRAM SDRAM self refresh is active */
+#define	SDPUA			0x00000004 /* SDRAM power up active  */
+#define	SDRS			0x00000008 /* SDRAM is in reset	state */
+#define	SDEASE			0x00000010 /* SDRAM EAB	sticky error status - W1C */
+#define	BGSTAT			0x00000020 /* Bus granted */
+
+
+/*  ********************  TWO-WIRE INTERFACE (TWIx) MASKS  ***********************/
+/* TWIx_CLKDIV Macros (Use: *pTWIx_CLKDIV = CLKLOW(x)|CLKHI(y);	 ) */
+#ifdef _MISRA_RULES
+#define	CLKLOW(x)	((x) & 0xFFu)		/* Periods Clock Is Held Low */
+#define	CLKHI(y)	(((y)&0xFFu)<<0x8)	/* Periods Before New Clock Low */
+#else
+#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low */
+#define	CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low */
+#endif /* _MISRA_RULES */
+
+/* TWIx_PRESCALE Masks								 */
+#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz) */
+#define	TWI_ENA		0x0080		/* TWI Enable		 */
+#define	SCCB		0x0200		/* SCCB	Compatibility Enable */
+
+/* TWIx_SLAVE_CTRL Masks								 */
+#define	SEN			0x0001		/* Slave Enable		 */
+#define	SADD_LEN	0x0002		/* Slave Address Length */
+#define	STDVAL		0x0004		/* Slave Transmit Data Valid */
+#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
+#define	GEN			0x0010		/* General Call	Adrress	Matching Enabled */
+
+/* TWIx_SLAVE_STAT Masks								 */
+#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*) */
+#define	GCALL		0x0002		/* General Call	Indicator */
+
+/* TWIx_MASTER_CTRL Masks						 */
+#define	MEN			0x0001		/* Master Mode Enable */
+#define	MADD_LEN	0x0002		/* Master Address Length */
+#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*) */
+#define	FAST		0x0008		/* Use Fast Mode Timing	Specs */
+#define	STOP		0x0010		/* Issue Stop Condition */
+#define	RSTART		0x0020		/* Repeat Start	or Stop* At End	Of Transfer */
+#define	DCNT		0x3FC0		/* Data	Bytes To Transfer */
+#define	SDAOVR		0x4000		/* Serial Data Override */
+#define	SCLOVR		0x8000		/* Serial Clock	Override */
+
+/* TWIx_MASTER_STAT Masks							 */
+#define	MPROG		0x0001		/* Master Transfer In Progress */
+#define	LOSTARB		0x0002		/* Lost	Arbitration Indicator (Xfer Aborted) */
+#define	ANAK		0x0004		/* Address Not Acknowledged */
+#define	DNAK		0x0008		/* Data	Not Acknowledged */
+#define	BUFRDERR	0x0010		/* Buffer Read Error */
+#define	BUFWRERR	0x0020		/* Buffer Write	Error */
+#define	SDASEN		0x0040		/* Serial Data Sense */
+#define	SCLSEN		0x0080		/* Serial Clock	Sense */
+#define	BUSBUSY		0x0100		/* Bus Busy Indicator */
+
+/* TWIx_INT_SRC	and TWIx_INT_ENABLE Masks */
+#define	SINIT		0x0001		/* Slave Transfer Initiated */
+#define	SCOMP		0x0002		/* Slave Transfer Complete */
+#define	SERR		0x0004		/* Slave Transfer Error */
+#define	SOVF		0x0008		/* Slave Overflow */
+#define	MCOMP		0x0010		/* Master Transfer Complete */
+#define	MERR		0x0020		/* Master Transfer Error */
+#define	XMTSERV		0x0040		/* Transmit FIFO Service */
+#define	RCVSERV		0x0080		/* Receive FIFO	Service */
+
+/* TWIx_FIFO_CTRL Masks					 */
+#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush */
+#define	RCVFLUSH	0x0002		/* Receive Buffer Flush */
+#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length */
+#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length */
+
+/* TWIx_FIFO_STAT Masks								 */
+#define	XMTSTAT		0x0003		/* Transmit FIFO Status */
+#define	XMT_EMPTY	0x0000		/*		Transmit FIFO Empty */
+#define	XMT_HALF	0x0001		/*		Transmit FIFO Has 1 Byte To Write */
+#define	XMT_FULL	0x0003		/*		Transmit FIFO Full (2 Bytes To Write) */
+
+#define	RCVSTAT		0x000C		/* Receive FIFO	Status */
+#define	RCV_EMPTY	0x0000		/*		Receive	FIFO Empty */
+#define	RCV_HALF	0x0004		/*		Receive	FIFO Has 1 Byte	To Read */
+#define	RCV_FULL	0x000C		/*		Receive	FIFO Full (2 Bytes To Read) */
+
+
+/********************************* MXVR	MASKS ****************************************/
+
+/* MXVR_CONFIG Masks */
+
+#define	MXVREN	  0x00000001lu
+#define	MMSM	  0x00000002lu
+#define	ACTIVE	  0x00000004lu
+#define	SDELAY	  0x00000008lu
+#define	NCMRXEN	  0x00000010lu
+#define	RWRRXEN	  0x00000020lu
+#define	MTXEN	  0x00000040lu
+#define	MTXON	  0x00000080lu /*legacy*/
+#define	MTXONB	  0x00000080lu
+#define	EPARITY	  0x00000100lu
+#define	MSB	  0x00001E00lu
+#define	APRXEN	  0x00002000lu
+#define	WAKEUP	  0x00004000lu
+#define	LMECH	  0x00008000lu
+
+#ifdef _MISRA_RULES
+#define	SET_MSB(x)     (((x)&0xFu) << 0x9)
+#else
+#define	SET_MSB(x)     (((x)&0xF) << 0x9)
+#endif /* _MISRA_RULES */
+
+
+/* MXVR_PLL_CTL_0 Masks */
+
+#define	MXTALCEN  0x00000001lu
+#define	MXTALFEN  0x00000002lu
+#define	MPLLMS	  0x00000008lu
+#define	MXTALMUL  0x00000030lu
+#define	MPLLEN	  0x00000040lu
+#define	MPLLEN0	  0x00000040lu /* legacy */
+#define	MPLLEN1	  0x00000080lu /* legacy */
+#define	MMCLKEN	  0x00000100lu
+#define	MMCLKMUL  0x00001E00lu
+#define	MPLLRSTB  0x00002000lu
+#define	MPLLRSTB0 0x00002000lu /* legacy */
+#define	MPLLRSTB1 0x00004000lu /* legacy */
+#define	MBCLKEN	  0x00010000lu
+#define	MBCLKDIV  0x001E0000lu
+#define	MPLLCDR	  0x00200000lu
+#define	MPLLCDR0  0x00200000lu /* legacy */
+#define	MPLLCDR1  0x00400000lu /* legacy */
+#define	INVRX	  0x00800000lu
+#define	MFSEN	  0x01000000lu
+#define	MFSDIV	  0x1E000000lu
+#define	MFSSEL	  0x60000000lu
+#define	MFSSYNC	  0x80000000lu
+
+#define	MXTALMUL_256FS	 0x00000000lu /* legacy */
+#define	MXTALMUL_384FS	 0x00000010lu /* legacy */
+#define	MXTALMUL_512FS	 0x00000020lu /* legacy */
+#define	MXTALMUL_1024FS	 0x00000030lu
+
+#define	MMCLKMUL_1024FS	 0x00000000lu
+#define	MMCLKMUL_512FS	 0x00000200lu
+#define	MMCLKMUL_256FS	 0x00000400lu
+#define	MMCLKMUL_128FS	 0x00000600lu
+#define	MMCLKMUL_64FS	 0x00000800lu
+#define	MMCLKMUL_32FS	 0x00000A00lu
+#define	MMCLKMUL_16FS	 0x00000C00lu
+#define	MMCLKMUL_8FS	 0x00000E00lu
+#define	MMCLKMUL_4FS	 0x00001000lu
+#define	MMCLKMUL_2FS	 0x00001200lu
+#define	MMCLKMUL_1FS	 0x00001400lu
+#define	MMCLKMUL_1536FS	 0x00001A00lu
+#define	MMCLKMUL_768FS	 0x00001C00lu
+#define	MMCLKMUL_384FS	 0x00001E00lu
+
+#define	MBCLKDIV_DIV2	 0x00020000lu
+#define	MBCLKDIV_DIV4	 0x00040000lu
+#define	MBCLKDIV_DIV8	 0x00060000lu
+#define	MBCLKDIV_DIV16	 0x00080000lu
+#define	MBCLKDIV_DIV32	 0x000A0000lu
+#define	MBCLKDIV_DIV64	 0x000C0000lu
+#define	MBCLKDIV_DIV128	 0x000E0000lu
+#define	MBCLKDIV_DIV256	 0x00100000lu
+#define	MBCLKDIV_DIV512	 0x00120000lu
+#define	MBCLKDIV_DIV1024 0x00140000lu
+
+#define	MFSDIV_DIV2	 0x02000000lu
+#define	MFSDIV_DIV4	 0x04000000lu
+#define	MFSDIV_DIV8	 0x06000000lu
+#define	MFSDIV_DIV16	 0x08000000lu
+#define	MFSDIV_DIV32	 0x0A000000lu
+#define	MFSDIV_DIV64	 0x0C000000lu
+#define	MFSDIV_DIV128	 0x0E000000lu
+#define	MFSDIV_DIV256	 0x10000000lu
+#define	MFSDIV_DIV512	 0x12000000lu
+#define	MFSDIV_DIV1024	 0x14000000lu
+
+#define	MFSSEL_CLOCK	 0x00000000lu
+#define	MFSSEL_PULSE_HI	 0x20000000lu
+#define	MFSSEL_PULSE_LO	 0x40000000lu
+
+
+/* MXVR_PLL_CTL_1 Masks */
+
+#define	MSTO	   0x00000001lu
+#define	MSTO0	   0x00000001lu	/* legacy */
+#define	MHOGGD	   0x00000004lu
+#define	MHOGGD0	   0x00000004lu	/* legacy */
+#define	MHOGGD1	   0x00000008lu	/* legacy */
+#define	MSHAPEREN  0x00000010lu
+#define	MSHAPEREN0 0x00000010lu	/* legacy */
+#define	MSHAPEREN1 0x00000020lu	/* legacy */
+#define	MPLLCNTEN  0x00008000lu
+#define	MPLLCNT	   0xFFFF0000lu
+
+#ifdef _MISRA_RULES
+#define	SET_MPLLCNT(x)	   (((x)&0xFFFFu) << 0x10)
+#else
+#define	SET_MPLLCNT(x)	   (((x)&0xFFFF) << 0x10)
+#endif /* _MISRA_RULES */
+
+
+/* MXVR_PLL_CTL_2 Masks */
+
+#define	MSHAPERSEL 0x00000007lu
+#define	MCPSEL	   0x000000E0lu
+
+/* MXVR_INT_STAT_0 Masks */
+
+#define	NI2A  0x00000001lu
+#define	NA2I  0x00000002lu
+#define	SBU2L 0x00000004lu
+#define	SBL2U 0x00000008lu
+#define	PRU   0x00000010lu
+#define	MPRU  0x00000020lu
+#define	DRU   0x00000040lu
+#define	MDRU  0x00000080lu
+#define	SBU   0x00000100lu
+#define	ATU   0x00000200lu
+#define	FCZ0  0x00000400lu
+#define	FCZ1  0x00000800lu
+#define	PERR  0x00001000lu
+#define	MH2L  0x00002000lu
+#define	ML2H  0x00004000lu
+#define	WUP   0x00008000lu
+#define	FU2L  0x00010000lu
+#define	FL2U  0x00020000lu
+#define	BU2L  0x00040000lu
+#define	BL2U  0x00080000lu
+#define	PCZ   0x00400000lu
+#define	FERR  0x00800000lu
+#define	CMR   0x01000000lu
+#define	CMROF 0x02000000lu
+#define	CMTS  0x04000000lu
+#define	CMTC  0x08000000lu
+#define	RWRC  0x10000000lu
+#define	BCZ   0x20000000lu
+#define	BMERR 0x40000000lu
+#define	DERR  0x80000000lu
+
+
+/* MXVR_INT_EN_0 Masks */
+
+#define	NI2AEN	NI2A
+#define	NA2IEN	NA2I
+#define	SBU2LEN	SBU2L
+#define	SBL2UEN	SBL2U
+#define	PRUEN	PRU
+#define	MPRUEN	MPRU
+#define	DRUEN	DRU
+#define	MDRUEN	MDRU
+#define	SBUEN	SBU
+#define	ATUEN	ATU
+#define	FCZ0EN	FCZ0
+#define	FCZ1EN	FCZ1
+#define	PERREN	PERR
+#define	MH2LEN	MH2L
+#define	ML2HEN	ML2H
+#define	WUPEN	WUP
+#define	FU2LEN	FU2L
+#define	FL2UEN	FL2U
+#define	BU2LEN	BU2L
+#define	BL2UEN	BL2U
+#define	PCZEN	PCZ
+#define	FERREN	FERR
+#define	CMREN	CMR
+#define	CMROFEN	CMROF
+#define	CMTSEN	CMTS
+#define	CMTCEN	CMTC
+#define	RWRCEN	RWRC
+#define	BCZEN	BCZ
+#define	BMERREN	BMERR
+#define	DERREN	DERR
+
+
+/* MXVR_INT_STAT_1 Masks */
+
+#define	APR   0x00000004lu
+#define	APROF 0x00000008lu
+#define	APTS  0x00000040lu
+#define	APTC  0x00000080lu
+#define	APRCE 0x00000400lu
+#define	APRPE 0x00000800lu
+
+#define	HDONE0 0x00000001lu
+#define	DONE0  0x00000002lu
+#define	HDONE1 0x00000010lu
+#define	DONE1  0x00000020lu
+#define	HDONE2 0x00000100lu
+#define	DONE2  0x00000200lu
+#define	HDONE3 0x00001000lu
+#define	DONE3  0x00002000lu
+#define	HDONE4 0x00010000lu
+#define	DONE4  0x00020000lu
+#define	HDONE5 0x00100000lu
+#define	DONE5  0x00200000lu
+#define	HDONE6 0x01000000lu
+#define	DONE6  0x02000000lu
+#define	HDONE7 0x10000000lu
+#define	DONE7  0x20000000lu
+
+#define	DONEX(x) (0x00000002 <<	(4 * (x)))
+#define	HDONEX(x) (0x00000001 << (4 * (x)))
+
+
+/* MXVR_INT_EN_1 Masks */
+
+#define	APREN	APR
+#define	APROFEN	APROF
+#define	APTSEN	APTS
+#define	APTCEN	APTC
+#define	APRCEEN	APRCE
+#define	APRPEEN	APRPE
+
+#define	HDONEEN0 HDONE0
+#define	DONEEN0	 DONE0
+#define	HDONEEN1 HDONE1
+#define	DONEEN1	 DONE1
+#define	HDONEEN2 HDONE2
+#define	DONEEN2	 DONE2
+#define	HDONEEN3 HDONE3
+#define	DONEEN3	 DONE3
+#define	HDONEEN4 HDONE4
+#define	DONEEN4	 DONE4
+#define	HDONEEN5 HDONE5
+#define	DONEEN5	 DONE5
+#define	HDONEEN6 HDONE6
+#define	DONEEN6	 DONE6
+#define	HDONEEN7 HDONE7
+#define	DONEEN7	 DONE7
+
+#define	DONEENX(x) (0x00000002 << (4 * (x)))
+#define	HDONEENX(x) (0x00000001	<< (4 *	(x)))
+
+
+/* MXVR_STATE_0	Masks */
+
+#define	NACT	 0x00000001lu
+#define	SBLOCK	 0x00000002lu
+#define	PFDLOCK	 0x00000004lu
+#define	PFDLOCK0 0x00000004lu /* legacy */
+#define	PDD	 0x00000008lu
+#define	PDD0	 0x00000008lu /* legacy */
+#define	PVCO	 0x00000010lu
+#define	PVCO0	 0x00000010lu /* legacy */
+#define	PFDLOCK1 0x00000020lu /* legacy */
+#define	PDD1	 0x00000040lu /* legacy */
+#define	PVCO1	 0x00000080lu /* legacy */
+#define	APBSY	 0x00000100lu
+#define	APARB	 0x00000200lu
+#define	APTX	 0x00000400lu
+#define	APRX	 0x00000800lu
+#define	CMBSY	 0x00001000lu
+#define	CMARB	 0x00002000lu
+#define	CMTX	 0x00004000lu
+#define	CMRX	 0x00008000lu
+#define	MRXONB	 0x00010000lu
+#define	RGSIP	 0x00020000lu
+#define	DALIP	 0x00040000lu
+#define	ALIP	 0x00080000lu
+#define	RRDIP	 0x00100000lu
+#define	RWRIP	 0x00200000lu
+#define	FLOCK	 0x00400000lu
+#define	BLOCK	 0x00800000lu
+#define	RSB	 0x0F000000lu
+#define	DERRNUM	 0xF0000000lu
+
+
+/* MXVR_STATE_1	Masks */
+
+#define	STXNUMB	    0x0000000Flu
+#define	SRXNUMB	    0x000000F0lu
+#define	APCONT	    0x00000100lu
+#define	DMAACTIVEX  0x00FF0000lu
+#define	DMAACTIVE0  0x00010000lu
+#define	DMAACTIVE1  0x00020000lu
+#define	DMAACTIVE2  0x00040000lu
+#define	DMAACTIVE3  0x00080000lu
+#define	DMAACTIVE4  0x00100000lu
+#define	DMAACTIVE5  0x00200000lu
+#define	DMAACTIVE6  0x00400000lu
+#define	DMAACTIVE7  0x00800000lu
+#define	DMAPMENX    0xFF000000lu
+#define	DMAPMEN0    0x01000000lu
+#define	DMAPMEN1    0x02000000lu
+#define	DMAPMEN2    0x04000000lu
+#define	DMAPMEN3    0x08000000lu
+#define	DMAPMEN4    0x10000000lu
+#define	DMAPMEN5    0x20000000lu
+#define	DMAPMEN6    0x40000000lu
+#define	DMAPMEN7    0x80000000lu
+
+
+/* MXVR_POSITION Masks */
+
+#define	PVALID	     0x8000
+#define	POSITION     0x003F
+
+
+/* MXVR_MAX_POSITION Masks */
+
+#define	MPVALID	     0x8000
+#define	MPOSITION    0x003F
+
+
+/* MXVR_DELAY Masks */
+
+#define	DVALID	     0x8000
+#define	DELAY	     0x003F
+
+
+/* MXVR_MAX_DELAY Masks */
+
+#define	MDVALID	     0x8000
+#define	MDELAY	     0x003F
+
+
+/* MXVR_LADDR Masks */
+
+#define	LVALID	     0x80000000lu
+#define	LADDR	     0x0000FFFFlu
+
+
+/* MXVR_GADDR Masks */
+
+#define	GVALID	     0x8000
+#define	GADDRL	     0x00FF
+
+
+/* MXVR_AADDR Masks */
+
+#define	AVALID	     0x80000000lu
+#define	AADDR	     0x0000FFFFlu
+
+
+/* MXVR_ALLOC_0	Masks */
+
+#define	CIU0	     0x00000080lu
+#define	CIU1	     0x00008000lu
+#define	CIU2	     0x00800000lu
+#define	CIU3	     0x80000000lu
+
+#define	CL0	     0x0000007Flu
+#define	CL1	     0x00007F00lu
+#define	CL2	     0x007F0000lu
+#define	CL3	     0x7F000000lu
+
+
+/* MXVR_ALLOC_1	Masks */
+
+#define	CIU4	     0x00000080lu
+#define	CIU5	     0x00008000lu
+#define	CIU6	     0x00800000lu
+#define	CIU7	     0x80000000lu
+
+#define	CL4	     0x0000007Flu
+#define	CL5	     0x00007F00lu
+#define	CL6	     0x007F0000lu
+#define	CL7	     0x7F000000lu
+
+
+/* MXVR_ALLOC_2	Masks */
+
+#define	CIU8	     0x00000080lu
+#define	CIU9	     0x00008000lu
+#define	CIU10	     0x00800000lu
+#define	CIU11	     0x80000000lu
+
+#define	CL8	     0x0000007Flu
+#define	CL9	     0x00007F00lu
+#define	CL10	     0x007F0000lu
+#define	CL11	     0x7F000000lu
+
+
+/* MXVR_ALLOC_3	Masks */
+
+#define	CIU12	     0x00000080lu
+#define	CIU13	     0x00008000lu
+#define	CIU14	     0x00800000lu
+#define	CIU15	     0x80000000lu
+
+#define	CL12	     0x0000007Flu
+#define	CL13	     0x00007F00lu
+#define	CL14	     0x007F0000lu
+#define	CL15	     0x7F000000lu
+
+
+/* MXVR_ALLOC_4	Masks */
+
+#define	CIU16	     0x00000080lu
+#define	CIU17	     0x00008000lu
+#define	CIU18	     0x00800000lu
+#define	CIU19	     0x80000000lu
+
+#define	CL16	     0x0000007Flu
+#define	CL17	     0x00007F00lu
+#define	CL18	     0x007F0000lu
+#define	CL19	     0x7F000000lu
+
+
+/* MXVR_ALLOC_5	Masks */
+
+#define	CIU20	     0x00000080lu
+#define	CIU21	     0x00008000lu
+#define	CIU22	     0x00800000lu
+#define	CIU23	     0x80000000lu
+
+#define	CL20	     0x0000007Flu
+#define	CL21	     0x00007F00lu
+#define	CL22	     0x007F0000lu
+#define	CL23	     0x7F000000lu
+
+
+/* MXVR_ALLOC_6	Masks */
+
+#define	CIU24	     0x00000080lu
+#define	CIU25	     0x00008000lu
+#define	CIU26	     0x00800000lu
+#define	CIU27	     0x80000000lu
+
+#define	CL24	     0x0000007Flu
+#define	CL25	     0x00007F00lu
+#define	CL26	     0x007F0000lu
+#define	CL27	     0x7F000000lu
+
+
+/* MXVR_ALLOC_7	Masks */
+
+#define	CIU28	     0x00000080lu
+#define	CIU29	     0x00008000lu
+#define	CIU30	     0x00800000lu
+#define	CIU31	     0x80000000lu
+
+#define	CL28	     0x0000007Flu
+#define	CL29	     0x00007F00lu
+#define	CL30	     0x007F0000lu
+#define	CL31	     0x7F000000lu
+
+
+/* MXVR_ALLOC_8	Masks */
+
+#define	CIU32	     0x00000080lu
+#define	CIU33	     0x00008000lu
+#define	CIU34	     0x00800000lu
+#define	CIU35	     0x80000000lu
+
+#define	CL32	     0x0000007Flu
+#define	CL33	     0x00007F00lu
+#define	CL34	     0x007F0000lu
+#define	CL35	     0x7F000000lu
+
+
+/* MXVR_ALLOC_9	Masks */
+
+#define	CIU36	     0x00000080lu
+#define	CIU37	     0x00008000lu
+#define	CIU38	     0x00800000lu
+#define	CIU39	     0x80000000lu
+
+#define	CL36	     0x0000007Flu
+#define	CL37	     0x00007F00lu
+#define	CL38	     0x007F0000lu
+#define	CL39	     0x7F000000lu
+
+
+/* MXVR_ALLOC_10 Masks */
+
+#define	CIU40	     0x00000080lu
+#define	CIU41	     0x00008000lu
+#define	CIU42	     0x00800000lu
+#define	CIU43	     0x80000000lu
+
+#define	CL40	     0x0000007Flu
+#define	CL41	     0x00007F00lu
+#define	CL42	     0x007F0000lu
+#define	CL43	     0x7F000000lu
+
+
+/* MXVR_ALLOC_11 Masks */
+
+#define	CIU44	     0x00000080lu
+#define	CIU45	     0x00008000lu
+#define	CIU46	     0x00800000lu
+#define	CIU47	     0x80000000lu
+
+#define	CL44	     0x0000007Flu
+#define	CL45	     0x00007F00lu
+#define	CL46	     0x007F0000lu
+#define	CL47	     0x7F000000lu
+
+
+/* MXVR_ALLOC_12 Masks */
+
+#define	CIU48	     0x00000080lu
+#define	CIU49	     0x00008000lu
+#define	CIU50	     0x00800000lu
+#define	CIU51	     0x80000000lu
+
+#define	CL48	     0x0000007Flu
+#define	CL49	     0x00007F00lu
+#define	CL50	     0x007F0000lu
+#define	CL51	     0x7F000000lu
+
+
+/* MXVR_ALLOC_13 Masks */
+
+#define	CIU52	     0x00000080lu
+#define	CIU53	     0x00008000lu
+#define	CIU54	     0x00800000lu
+#define	CIU55	     0x80000000lu
+
+#define	CL52	     0x0000007Flu
+#define	CL53	     0x00007F00lu
+#define	CL54	     0x007F0000lu
+#define	CL55	     0x7F000000lu
+
+
+/* MXVR_ALLOC_14 Masks */
+
+#define	CIU56	     0x00000080lu
+#define	CIU57	     0x00008000lu
+#define	CIU58	     0x00800000lu
+#define	CIU59	     0x80000000lu
+
+#define	CL56	     0x0000007Flu
+#define	CL57	     0x00007F00lu
+#define	CL58	     0x007F0000lu
+#define	CL59	     0x7F000000lu
+
+
+/* MXVR_SYNC_LCHAN_0 Masks */
+
+#define	LCHANPC0     0x0000000Flu
+#define	LCHANPC1     0x000000F0lu
+#define	LCHANPC2     0x00000F00lu
+#define	LCHANPC3     0x0000F000lu
+#define	LCHANPC4     0x000F0000lu
+#define	LCHANPC5     0x00F00000lu
+#define	LCHANPC6     0x0F000000lu
+#define	LCHANPC7     0xF0000000lu
+
+
+/* MXVR_SYNC_LCHAN_1 Masks */
+
+#define	LCHANPC8     0x0000000Flu
+#define	LCHANPC9     0x000000F0lu
+#define	LCHANPC10    0x00000F00lu
+#define	LCHANPC11    0x0000F000lu
+#define	LCHANPC12    0x000F0000lu
+#define	LCHANPC13    0x00F00000lu
+#define	LCHANPC14    0x0F000000lu
+#define	LCHANPC15    0xF0000000lu
+
+
+/* MXVR_SYNC_LCHAN_2 Masks */
+
+#define	LCHANPC16    0x0000000Flu
+#define	LCHANPC17    0x000000F0lu
+#define	LCHANPC18    0x00000F00lu
+#define	LCHANPC19    0x0000F000lu
+#define	LCHANPC20    0x000F0000lu
+#define	LCHANPC21    0x00F00000lu
+#define	LCHANPC22    0x0F000000lu
+#define	LCHANPC23    0xF0000000lu
+
+
+/* MXVR_SYNC_LCHAN_3 Masks */
+
+#define	LCHANPC24    0x0000000Flu
+#define	LCHANPC25    0x000000F0lu
+#define	LCHANPC26    0x00000F00lu
+#define	LCHANPC27    0x0000F000lu
+#define	LCHANPC28    0x000F0000lu
+#define	LCHANPC29    0x00F00000lu
+#define	LCHANPC30    0x0F000000lu
+#define	LCHANPC31    0xF0000000lu
+
+
+/* MXVR_SYNC_LCHAN_4 Masks */
+
+#define	LCHANPC32    0x0000000Flu
+#define	LCHANPC33    0x000000F0lu
+#define	LCHANPC34    0x00000F00lu
+#define	LCHANPC35    0x0000F000lu
+#define	LCHANPC36    0x000F0000lu
+#define	LCHANPC37    0x00F00000lu
+#define	LCHANPC38    0x0F000000lu
+#define	LCHANPC39    0xF0000000lu
+
+
+/* MXVR_SYNC_LCHAN_5 Masks */
+
+#define	LCHANPC40    0x0000000Flu
+#define	LCHANPC41    0x000000F0lu
+#define	LCHANPC42    0x00000F00lu
+#define	LCHANPC43    0x0000F000lu
+#define	LCHANPC44    0x000F0000lu
+#define	LCHANPC45    0x00F00000lu
+#define	LCHANPC46    0x0F000000lu
+#define	LCHANPC47    0xF0000000lu
+
+
+/* MXVR_SYNC_LCHAN_6 Masks */
+
+#define	LCHANPC48    0x0000000Flu
+#define	LCHANPC49    0x000000F0lu
+#define	LCHANPC50    0x00000F00lu
+#define	LCHANPC51    0x0000F000lu
+#define	LCHANPC52    0x000F0000lu
+#define	LCHANPC53    0x00F00000lu
+#define	LCHANPC54    0x0F000000lu
+#define	LCHANPC55    0xF0000000lu
+
+
+/* MXVR_SYNC_LCHAN_7 Masks */
+
+#define	LCHANPC56    0x0000000Flu
+#define	LCHANPC57    0x000000F0lu
+#define	LCHANPC58    0x00000F00lu
+#define	LCHANPC59    0x0000F000lu
+
+
+/* MXVR_DMAx_CONFIG Masks */
+
+#define	MDMAEN	    0x00000001lu
+#define	DD	    0x00000002lu
+#define	LCHAN	    0x000003C0lu
+#define	BITSWAPEN   0x00000400lu
+#define	BYSWAPEN    0x00000800lu
+#define	MFLOW	    0x00007000lu
+#define	FIXEDPM	    0x00080000lu
+#define	STARTPAT    0x00300000lu
+#define	STOPPAT	    0x00C00000lu
+#define	COUNTPOS    0x1C000000lu
+
+#define	DD_TX	    0x00000000lu
+#define	DD_RX	    0x00000002lu
+
+#define	LCHAN_0	    0x00000000lu
+#define	LCHAN_1	    0x00000040lu
+#define	LCHAN_2	    0x00000080lu
+#define	LCHAN_3	    0x000000C0lu
+#define	LCHAN_4	    0x00000100lu
+#define	LCHAN_5	    0x00000140lu
+#define	LCHAN_6	    0x00000180lu
+#define	LCHAN_7	    0x000001C0lu
+
+#define	MFLOW_STOP  0x00000000lu
+#define	MFLOW_AUTO  0x00001000lu
+#define	MFLOW_PVC   0x00002000lu
+#define	MFLOW_PSS   0x00003000lu
+#define	MFLOW_PFC   0x00004000lu
+
+#define	STARTPAT_0  0x00000000lu
+#define	STARTPAT_1  0x00100000lu
+
+#define	STOPPAT_0   0x00000000lu
+#define	STOPPAT_1   0x00400000lu
+
+#define	COUNTPOS_0  0x00000000lu
+#define	COUNTPOS_1  0x04000000lu
+#define	COUNTPOS_2  0x08000000lu
+#define	COUNTPOS_3  0x0C000000lu
+#define	COUNTPOS_4  0x10000000lu
+#define	COUNTPOS_5  0x14000000lu
+#define	COUNTPOS_6  0x18000000lu
+#define	COUNTPOS_7  0x1C000000lu
+
+
+/* MXVR_AP_CTL Masks */
+
+#define	STARTAP	   0x00000001lu
+#define	CANCELAP   0x00000002lu
+#define	RESETAP	   0x00000004lu
+#define	APRBE0	   0x00004000lu
+#define	APRBE1	   0x00008000lu
+#define	APRBEX	   0x0000C000lu
+
+
+/* MXVR_CM_CTL Masks */
+
+#define	STARTCM	   0x00000001lu
+#define	CANCELCM   0x00000002lu
+#define	CMRBEX	   0xFFFF0000lu
+#define	CMRBE0	   0x00010000lu
+#define	CMRBE1	   0x00020000lu
+#define	CMRBE2	   0x00040000lu
+#define	CMRBE3	   0x00080000lu
+#define	CMRBE4	   0x00100000lu
+#define	CMRBE5	   0x00200000lu
+#define	CMRBE6	   0x00400000lu
+#define	CMRBE7	   0x00800000lu
+#define	CMRBE8	   0x01000000lu
+#define	CMRBE9	   0x02000000lu
+#define	CMRBE10	   0x04000000lu
+#define	CMRBE11	   0x08000000lu
+#define	CMRBE12	   0x10000000lu
+#define	CMRBE13	   0x20000000lu
+#define	CMRBE14	   0x40000000lu
+#define	CMRBE15	   0x80000000lu
+
+
+/* MXVR_PAT_DATA_x Masks */
+
+#define	MATCH_DATA_0 0x000000FFlu
+#define	MATCH_DATA_1 0x0000FF00lu
+#define	MATCH_DATA_2 0x00FF0000lu
+#define	MATCH_DATA_3 0xFF000000lu
+
+
+
+/* MXVR_PAT_EN_x Masks */
+
+#define	MATCH_EN_0_0 0x00000001lu
+#define	MATCH_EN_0_1 0x00000002lu
+#define	MATCH_EN_0_2 0x00000004lu
+#define	MATCH_EN_0_3 0x00000008lu
+#define	MATCH_EN_0_4 0x00000010lu
+#define	MATCH_EN_0_5 0x00000020lu
+#define	MATCH_EN_0_6 0x00000040lu
+#define	MATCH_EN_0_7 0x00000080lu
+
+#define	MATCH_EN_1_0 0x00000100lu
+#define	MATCH_EN_1_1 0x00000200lu
+#define	MATCH_EN_1_2 0x00000400lu
+#define	MATCH_EN_1_3 0x00000800lu
+#define	MATCH_EN_1_4 0x00001000lu
+#define	MATCH_EN_1_5 0x00002000lu
+#define	MATCH_EN_1_6 0x00004000lu
+#define	MATCH_EN_1_7 0x00008000lu
+
+#define	MATCH_EN_2_0 0x00010000lu
+#define	MATCH_EN_2_1 0x00020000lu
+#define	MATCH_EN_2_2 0x00040000lu
+#define	MATCH_EN_2_3 0x00080000lu
+#define	MATCH_EN_2_4 0x00100000lu
+#define	MATCH_EN_2_5 0x00200000lu
+#define	MATCH_EN_2_6 0x00400000lu
+#define	MATCH_EN_2_7 0x00800000lu
+
+#define	MATCH_EN_3_0 0x01000000lu
+#define	MATCH_EN_3_1 0x02000000lu
+#define	MATCH_EN_3_2 0x04000000lu
+#define	MATCH_EN_3_3 0x08000000lu
+#define	MATCH_EN_3_4 0x10000000lu
+#define	MATCH_EN_3_5 0x20000000lu
+#define	MATCH_EN_3_6 0x40000000lu
+#define	MATCH_EN_3_7 0x80000000lu
+
+
+/* MXVR_ROUTING_0 Masks */
+
+#define	MUTE_CH0	0x00000080lu
+#define	MUTE_CH1	0x00008000lu
+#define	MUTE_CH2	0x00800000lu
+#define	MUTE_CH3	0x80000000lu
+
+#define	TX_CH0		0x0000007Flu
+#define	TX_CH1		0x00007F00lu
+#define	TX_CH2		0x007F0000lu
+#define	TX_CH3		0x7F000000lu
+
+
+/* MXVR_ROUTING_1 Masks */
+
+#define	MUTE_CH4	0x00000080lu
+#define	MUTE_CH5	0x00008000lu
+#define	MUTE_CH6	0x00800000lu
+#define	MUTE_CH7	0x80000000lu
+
+#define	TX_CH4		0x0000007Flu
+#define	TX_CH5		0x00007F00lu
+#define	TX_CH6		0x007F0000lu
+#define	TX_CH7		0x7F000000lu
+
+
+/* MXVR_ROUTING_2 Masks */
+
+#define	MUTE_CH8	0x00000080lu
+#define	MUTE_CH9	0x00008000lu
+#define	MUTE_CH10	0x00800000lu
+#define	MUTE_CH11	0x80000000lu
+
+#define	TX_CH8		0x0000007Flu
+#define	TX_CH9		0x00007F00lu
+#define	TX_CH10		0x007F0000lu
+#define	TX_CH11		0x7F000000lu
+
+/* MXVR_ROUTING_3 Masks */
+
+#define	MUTE_CH12	0x00000080lu
+#define	MUTE_CH13	0x00008000lu
+#define	MUTE_CH14	0x00800000lu
+#define	MUTE_CH15	0x80000000lu
+
+#define	TX_CH12		0x0000007Flu
+#define	TX_CH13		0x00007F00lu
+#define	TX_CH14		0x007F0000lu
+#define	TX_CH15		0x7F000000lu
+
+
+/* MXVR_ROUTING_4 Masks */
+
+#define	MUTE_CH16	0x00000080lu
+#define	MUTE_CH17	0x00008000lu
+#define	MUTE_CH18	0x00800000lu
+#define	MUTE_CH19	0x80000000lu
+
+#define	TX_CH16		0x0000007Flu
+#define	TX_CH17		0x00007F00lu
+#define	TX_CH18		0x007F0000lu
+#define	TX_CH19		0x7F000000lu
+
+
+/* MXVR_ROUTING_5 Masks */
+
+#define	MUTE_CH20	0x00000080lu
+#define	MUTE_CH21	0x00008000lu
+#define	MUTE_CH22	0x00800000lu
+#define	MUTE_CH23	0x80000000lu
+
+#define	TX_CH20		0x0000007Flu
+#define	TX_CH21		0x00007F00lu
+#define	TX_CH22		0x007F0000lu
+#define	TX_CH23		0x7F000000lu
+
+
+/* MXVR_ROUTING_6 Masks */
+
+#define	MUTE_CH24	0x00000080lu
+#define	MUTE_CH25	0x00008000lu
+#define	MUTE_CH26	0x00800000lu
+#define	MUTE_CH27	0x80000000lu
+
+#define	TX_CH24		0x0000007Flu
+#define	TX_CH25		0x00007F00lu
+#define	TX_CH26		0x007F0000lu
+#define	TX_CH27		0x7F000000lu
+
+
+/* MXVR_ROUTING_7 Masks */
+
+#define	MUTE_CH28	0x00000080lu
+#define	MUTE_CH29	0x00008000lu
+#define	MUTE_CH30	0x00800000lu
+#define	MUTE_CH31	0x80000000lu
+
+#define	TX_CH28		0x0000007Flu
+#define	TX_CH29		0x00007F00lu
+#define	TX_CH30		0x007F0000lu
+#define	TX_CH31		0x7F000000lu
+
+
+/* MXVR_ROUTING_8 Masks */
+
+#define	MUTE_CH32	0x00000080lu
+#define	MUTE_CH33	0x00008000lu
+#define	MUTE_CH34	0x00800000lu
+#define	MUTE_CH35	0x80000000lu
+
+#define	TX_CH32		0x0000007Flu
+#define	TX_CH33		0x00007F00lu
+#define	TX_CH34		0x007F0000lu
+#define	TX_CH35		0x7F000000lu
+
+
+/* MXVR_ROUTING_9 Masks */
+
+#define	MUTE_CH36	0x00000080lu
+#define	MUTE_CH37	0x00008000lu
+#define	MUTE_CH38	0x00800000lu
+#define	MUTE_CH39	0x80000000lu
+
+#define	TX_CH36		0x0000007Flu
+#define	TX_CH37		0x00007F00lu
+#define	TX_CH38		0x007F0000lu
+#define	TX_CH39		0x7F000000lu
+
+
+/* MXVR_ROUTING_10 Masks */
+
+#define	MUTE_CH40	0x00000080lu
+#define	MUTE_CH41	0x00008000lu
+#define	MUTE_CH42	0x00800000lu
+#define	MUTE_CH43	0x80000000lu
+
+#define	TX_CH40		0x0000007Flu
+#define	TX_CH41		0x00007F00lu
+#define	TX_CH42		0x007F0000lu
+#define	TX_CH43		0x7F000000lu
+
+
+/* MXVR_ROUTING_11 Masks */
+
+#define	MUTE_CH44	0x00000080lu
+#define	MUTE_CH45	0x00008000lu
+#define	MUTE_CH46	0x00800000lu
+#define	MUTE_CH47	0x80000000lu
+
+#define	TX_CH44		0x0000007Flu
+#define	TX_CH45		0x00007F00lu
+#define	TX_CH46		0x007F0000lu
+#define	TX_CH47		0x7F000000lu
+
+
+/* MXVR_ROUTING_12 Masks */
+
+#define	MUTE_CH48	0x00000080lu
+#define	MUTE_CH49	0x00008000lu
+#define	MUTE_CH50	0x00800000lu
+#define	MUTE_CH51	0x80000000lu
+
+#define	TX_CH48		0x0000007Flu
+#define	TX_CH49		0x00007F00lu
+#define	TX_CH50		0x007F0000lu
+#define	TX_CH51		0x7F000000lu
+
+
+/* MXVR_ROUTING_13 Masks */
+
+#define	MUTE_CH52	0x00000080lu
+#define	MUTE_CH53	0x00008000lu
+#define	MUTE_CH54	0x00800000lu
+#define	MUTE_CH55	0x80000000lu
+
+#define	TX_CH52		0x0000007Flu
+#define	TX_CH53		0x00007F00lu
+#define	TX_CH54		0x007F0000lu
+#define	TX_CH55		0x7F000000lu
+
+
+/* MXVR_ROUTING_14 Masks */
+
+#define	MUTE_CH56	0x00000080lu
+#define	MUTE_CH57	0x00008000lu
+#define	MUTE_CH58	0x00800000lu
+#define	MUTE_CH59	0x80000000lu
+
+#define	TX_CH56		0x0000007Flu
+#define	TX_CH57		0x00007F00lu
+#define	TX_CH58		0x007F0000lu
+#define	TX_CH59		0x7F000000lu
+
+
+/* Control Message Receive Buffer (CMRB) Address Offsets */
+
+#define	CMRB_STRIDE	  0x00000016lu
+
+#define	CMRB_DST_OFFSET	  0x00000000lu
+#define	CMRB_SRC_OFFSET	  0x00000002lu
+#define	CMRB_DATA_OFFSET  0x00000005lu
+
+
+/* Control Message Transmit Buffer (CMTB) Address Offsets */
+
+#define	CMTB_PRIO_OFFSET    0x00000000lu
+#define	CMTB_DST_OFFSET	    0x00000002lu
+#define	CMTB_SRC_OFFSET	    0x00000004lu
+#define	CMTB_TYPE_OFFSET    0x00000006lu
+#define	CMTB_DATA_OFFSET    0x00000007lu
+
+#define	CMTB_ANSWER_OFFSET  0x0000000Alu
+
+#define	CMTB_STAT_N_OFFSET  0x00000018lu
+#define	CMTB_STAT_A_OFFSET  0x00000016lu
+#define	CMTB_STAT_D_OFFSET  0x0000000Elu
+#define	CMTB_STAT_R_OFFSET  0x00000014lu
+#define	CMTB_STAT_W_OFFSET  0x00000014lu
+#define	CMTB_STAT_G_OFFSET  0x00000014lu
+
+
+/* Asynchronous	Packet Receive Buffer (APRB) Address Offsets */
+
+#define	APRB_STRIDE	  0x00000400lu
+
+#define	APRB_DST_OFFSET	  0x00000000lu
+#define	APRB_LEN_OFFSET	  0x00000002lu
+#define	APRB_SRC_OFFSET	  0x00000004lu
+#define	APRB_DATA_OFFSET  0x00000006lu
+
+
+/* Asynchronous	Packet Transmit	Buffer (APTB) Address Offsets */
+
+#define	APTB_PRIO_OFFSET  0x00000000lu
+#define	APTB_DST_OFFSET	  0x00000002lu
+#define	APTB_LEN_OFFSET	  0x00000004lu
+#define	APTB_SRC_OFFSET	  0x00000006lu
+#define	APTB_DATA_OFFSET  0x00000008lu
+
+
+/* Remote Read Buffer (RRDB) Address Offsets */
+
+#define	RRDB_WADDR_OFFSET 0x00000100lu
+#define	RRDB_WLEN_OFFSET  0x00000101lu
+
+
+
+/* ************	 CONTROLLER AREA NETWORK (CAN) MASKS  ***************/
+/* CAN_CONTROL Masks					 */
+#define	SRS			0x0001	/* Software Reset */
+#define	DNM			0x0002	/* Device Net Mode */
+#define	ABO			0x0004	/* Auto-Bus On Enable */
+#define	WBA			0x0010	/* Wake-Up On CAN Bus Activity Enable */
+#define	SMR			0x0020	/* Sleep Mode Request */
+#define	CSR			0x0040	/* CAN Suspend Mode Request */
+#define	CCR			0x0080	/* CAN Configuration Mode Request */
+
+/* CAN_STATUS Masks					 */
+#define	WT			0x0001	/* TX Warning Flag */
+#define	WR			0x0002	/* RX Warning Flag */
+#define	EP			0x0004	/* Error Passive Mode */
+#define	EBO			0x0008	/* Error Bus Off Mode */
+#define	CSA			0x0040	/* Suspend Mode	Acknowledge */
+#define	CCA			0x0080	/* Configuration Mode Acknowledge */
+#define	MBPTR		0x1F00	/* Mailbox Pointer */
+#define	TRM			0x4000	/* Transmit Mode */
+#define	REC			0x8000	/* Receive Mode */
+
+/* CAN_CLOCK Masks		 */
+#define	BRP			0x03FF	/* Bit-Rate Pre-Scaler */
+
+/* CAN_TIMING Masks				 */
+#define	TSEG1		0x000F	/* Time	Segment	1 */
+#define	TSEG2		0x0070	/* Time	Segment	2 */
+#define	SAM			0x0080	/* Sampling */
+#define	SJW			0x0300	/* Synchronization Jump	Width */
+
+/* CAN_DEBUG Masks				 */
+#define	DEC			0x0001	/* Disable CAN Error Counters */
+#define	DRI			0x0002	/* Disable CAN RX Input */
+#define	DTO			0x0004	/* Disable CAN TX Output */
+#define	DIL			0x0008	/* Disable CAN Internal	Loop */
+#define	MAA			0x0010	/* Mode	Auto-Acknowledge Enable */
+#define	MRB			0x0020	/* Mode	Read Back Enable */
+#define	CDE			0x8000	/* CAN Debug Enable */
+
+/* CAN_CEC Masks			 */
+#define	RXECNT		0x00FF	/* Receive Error Counter */
+#define	TXECNT		0xFF00	/* Transmit Error Counter */
+
+/* CAN_INTR Masks				 */
+#define	MBRIRQ	0x0001	/* Mailbox Receive Interrupt */
+#define	MBRIF		MBRIRQ	/* legacy */
+#define	MBTIRQ	0x0002	/* Mailbox Transmit Interrupt */
+#define	MBTIF		MBTIRQ	/* legacy */
+#define	GIRQ		0x0004	/* Global Interrupt */
+#define	SMACK		0x0008	/* Sleep Mode Acknowledge */
+#define	CANTX		0x0040	/* CAN TX Bus Value */
+#define	CANRX		0x0080	/* CAN RX Bus Value */
+
+/* CAN_MBxx_ID1	and CAN_MBxx_ID0 Masks			 */
+#define	DFC			0xFFFF	/* Data	Filtering Code (If Enabled) (ID0) */
+#define	EXTID_LO	0xFFFF	/* Lower 16 Bits of Extended Identifier	(ID0) */
+#define	EXTID_HI	0x0003	/* Upper 2 Bits	of Extended Identifier (ID1) */
+#define	BASEID		0x1FFC	/* Base	Identifier	 */
+#define	IDE			0x2000	/* Identifier Extension */
+#define	RTR			0x4000	/* Remote Frame	Transmission Request */
+#define	AME			0x8000	/* Acceptance Mask Enable */
+
+/* CAN_MBxx_TIMESTAMP Masks */
+#define	TSV			0xFFFF	/* Timestamp */
+
+/* CAN_MBxx_LENGTH Masks */
+#define	DLC			0x000F	/* Data	Length Code */
+
+/* CAN_AMxxH and CAN_AMxxL Masks					 */
+#define	DFM			0xFFFF	/* Data	Field Mask (If Enabled)	(CAN_AMxxL) */
+#define	EXTID_LO	0xFFFF	/* Lower 16 Bits of Extended Identifier	(CAN_AMxxL) */
+#define	EXTID_HI	0x0003	/* Upper 2 Bits	of Extended Identifier (CAN_AMxxH) */
+#define	BASEID		0x1FFC	/* Base	Identifier		 */
+#define	AMIDE		0x2000	/* Acceptance Mask ID Extension	Enable */
+#define	FMD			0x4000	/* Full	Mask Data Field	Enable */
+#define	FDF			0x8000	/* Filter On Data Field	Enable */
+
+/* CAN_MC1 Masks		 */
+#define	MC0			0x0001	/* Enable Mailbox 0 */
+#define	MC1			0x0002	/* Enable Mailbox 1 */
+#define	MC2			0x0004	/* Enable Mailbox 2 */
+#define	MC3			0x0008	/* Enable Mailbox 3 */
+#define	MC4			0x0010	/* Enable Mailbox 4 */
+#define	MC5			0x0020	/* Enable Mailbox 5 */
+#define	MC6			0x0040	/* Enable Mailbox 6 */
+#define	MC7			0x0080	/* Enable Mailbox 7 */
+#define	MC8			0x0100	/* Enable Mailbox 8 */
+#define	MC9			0x0200	/* Enable Mailbox 9 */
+#define	MC10		0x0400	/* Enable Mailbox 10 */
+#define	MC11		0x0800	/* Enable Mailbox 11 */
+#define	MC12		0x1000	/* Enable Mailbox 12 */
+#define	MC13		0x2000	/* Enable Mailbox 13 */
+#define	MC14		0x4000	/* Enable Mailbox 14 */
+#define	MC15		0x8000	/* Enable Mailbox 15 */
+
+/* CAN_MC2 Masks		 */
+#define	MC16		0x0001	/* Enable Mailbox 16 */
+#define	MC17		0x0002	/* Enable Mailbox 17 */
+#define	MC18		0x0004	/* Enable Mailbox 18 */
+#define	MC19		0x0008	/* Enable Mailbox 19 */
+#define	MC20		0x0010	/* Enable Mailbox 20 */
+#define	MC21		0x0020	/* Enable Mailbox 21 */
+#define	MC22		0x0040	/* Enable Mailbox 22 */
+#define	MC23		0x0080	/* Enable Mailbox 23 */
+#define	MC24		0x0100	/* Enable Mailbox 24 */
+#define	MC25		0x0200	/* Enable Mailbox 25 */
+#define	MC26		0x0400	/* Enable Mailbox 26 */
+#define	MC27		0x0800	/* Enable Mailbox 27 */
+#define	MC28		0x1000	/* Enable Mailbox 28 */
+#define	MC29		0x2000	/* Enable Mailbox 29 */
+#define	MC30		0x4000	/* Enable Mailbox 30 */
+#define	MC31		0x8000	/* Enable Mailbox 31 */
+
+/* CAN_MD1 Masks					 */
+#define	MD0			0x0001	/* Enable Mailbox 0 For	Receive */
+#define	MD1			0x0002	/* Enable Mailbox 1 For	Receive */
+#define	MD2			0x0004	/* Enable Mailbox 2 For	Receive */
+#define	MD3			0x0008	/* Enable Mailbox 3 For	Receive */
+#define	MD4			0x0010	/* Enable Mailbox 4 For	Receive */
+#define	MD5			0x0020	/* Enable Mailbox 5 For	Receive */
+#define	MD6			0x0040	/* Enable Mailbox 6 For	Receive */
+#define	MD7			0x0080	/* Enable Mailbox 7 For	Receive */
+#define	MD8			0x0100	/* Enable Mailbox 8 For	Receive */
+#define	MD9			0x0200	/* Enable Mailbox 9 For	Receive */
+#define	MD10		0x0400	/* Enable Mailbox 10 For Receive */
+#define	MD11		0x0800	/* Enable Mailbox 11 For Receive */
+#define	MD12		0x1000	/* Enable Mailbox 12 For Receive */
+#define	MD13		0x2000	/* Enable Mailbox 13 For Receive */
+#define	MD14		0x4000	/* Enable Mailbox 14 For Receive */
+#define	MD15		0x8000	/* Enable Mailbox 15 For Receive */
+
+/* CAN_MD2 Masks					 */
+#define	MD16		0x0001	/* Enable Mailbox 16 For Receive */
+#define	MD17		0x0002	/* Enable Mailbox 17 For Receive */
+#define	MD18		0x0004	/* Enable Mailbox 18 For Receive */
+#define	MD19		0x0008	/* Enable Mailbox 19 For Receive */
+#define	MD20		0x0010	/* Enable Mailbox 20 For Receive */
+#define	MD21		0x0020	/* Enable Mailbox 21 For Receive */
+#define	MD22		0x0040	/* Enable Mailbox 22 For Receive */
+#define	MD23		0x0080	/* Enable Mailbox 23 For Receive */
+#define	MD24		0x0100	/* Enable Mailbox 24 For Receive */
+#define	MD25		0x0200	/* Enable Mailbox 25 For Receive */
+#define	MD26		0x0400	/* Enable Mailbox 26 For Receive */
+#define	MD27		0x0800	/* Enable Mailbox 27 For Receive */
+#define	MD28		0x1000	/* Enable Mailbox 28 For Receive */
+#define	MD29		0x2000	/* Enable Mailbox 29 For Receive */
+#define	MD30		0x4000	/* Enable Mailbox 30 For Receive */
+#define	MD31		0x8000	/* Enable Mailbox 31 For Receive */
+
+/* CAN_RMP1 Masks					 */
+#define	RMP0		0x0001	/* RX Message Pending In Mailbox 0 */
+#define	RMP1		0x0002	/* RX Message Pending In Mailbox 1 */
+#define	RMP2		0x0004	/* RX Message Pending In Mailbox 2 */
+#define	RMP3		0x0008	/* RX Message Pending In Mailbox 3 */
+#define	RMP4		0x0010	/* RX Message Pending In Mailbox 4 */
+#define	RMP5		0x0020	/* RX Message Pending In Mailbox 5 */
+#define	RMP6		0x0040	/* RX Message Pending In Mailbox 6 */
+#define	RMP7		0x0080	/* RX Message Pending In Mailbox 7 */
+#define	RMP8		0x0100	/* RX Message Pending In Mailbox 8 */
+#define	RMP9		0x0200	/* RX Message Pending In Mailbox 9 */
+#define	RMP10		0x0400	/* RX Message Pending In Mailbox 10 */
+#define	RMP11		0x0800	/* RX Message Pending In Mailbox 11 */
+#define	RMP12		0x1000	/* RX Message Pending In Mailbox 12 */
+#define	RMP13		0x2000	/* RX Message Pending In Mailbox 13 */
+#define	RMP14		0x4000	/* RX Message Pending In Mailbox 14 */
+#define	RMP15		0x8000	/* RX Message Pending In Mailbox 15 */
+
+/* CAN_RMP2 Masks					 */
+#define	RMP16		0x0001	/* RX Message Pending In Mailbox 16 */
+#define	RMP17		0x0002	/* RX Message Pending In Mailbox 17 */
+#define	RMP18		0x0004	/* RX Message Pending In Mailbox 18 */
+#define	RMP19		0x0008	/* RX Message Pending In Mailbox 19 */
+#define	RMP20		0x0010	/* RX Message Pending In Mailbox 20 */
+#define	RMP21		0x0020	/* RX Message Pending In Mailbox 21 */
+#define	RMP22		0x0040	/* RX Message Pending In Mailbox 22 */
+#define	RMP23		0x0080	/* RX Message Pending In Mailbox 23 */
+#define	RMP24		0x0100	/* RX Message Pending In Mailbox 24 */
+#define	RMP25		0x0200	/* RX Message Pending In Mailbox 25 */
+#define	RMP26		0x0400	/* RX Message Pending In Mailbox 26 */
+#define	RMP27		0x0800	/* RX Message Pending In Mailbox 27 */
+#define	RMP28		0x1000	/* RX Message Pending In Mailbox 28 */
+#define	RMP29		0x2000	/* RX Message Pending In Mailbox 29 */
+#define	RMP30		0x4000	/* RX Message Pending In Mailbox 30 */
+#define	RMP31		0x8000	/* RX Message Pending In Mailbox 31 */
+
+/* CAN_RML1 Masks					 */
+#define	RML0		0x0001	/* RX Message Lost In Mailbox 0 */
+#define	RML1		0x0002	/* RX Message Lost In Mailbox 1 */
+#define	RML2		0x0004	/* RX Message Lost In Mailbox 2 */
+#define	RML3		0x0008	/* RX Message Lost In Mailbox 3 */
+#define	RML4		0x0010	/* RX Message Lost In Mailbox 4 */
+#define	RML5		0x0020	/* RX Message Lost In Mailbox 5 */
+#define	RML6		0x0040	/* RX Message Lost In Mailbox 6 */
+#define	RML7		0x0080	/* RX Message Lost In Mailbox 7 */
+#define	RML8		0x0100	/* RX Message Lost In Mailbox 8 */
+#define	RML9		0x0200	/* RX Message Lost In Mailbox 9 */
+#define	RML10		0x0400	/* RX Message Lost In Mailbox 10 */
+#define	RML11		0x0800	/* RX Message Lost In Mailbox 11 */
+#define	RML12		0x1000	/* RX Message Lost In Mailbox 12 */
+#define	RML13		0x2000	/* RX Message Lost In Mailbox 13 */
+#define	RML14		0x4000	/* RX Message Lost In Mailbox 14 */
+#define	RML15		0x8000	/* RX Message Lost In Mailbox 15 */
+
+/* CAN_RML2 Masks					 */
+#define	RML16		0x0001	/* RX Message Lost In Mailbox 16 */
+#define	RML17		0x0002	/* RX Message Lost In Mailbox 17 */
+#define	RML18		0x0004	/* RX Message Lost In Mailbox 18 */
+#define	RML19		0x0008	/* RX Message Lost In Mailbox 19 */
+#define	RML20		0x0010	/* RX Message Lost In Mailbox 20 */
+#define	RML21		0x0020	/* RX Message Lost In Mailbox 21 */
+#define	RML22		0x0040	/* RX Message Lost In Mailbox 22 */
+#define	RML23		0x0080	/* RX Message Lost In Mailbox 23 */
+#define	RML24		0x0100	/* RX Message Lost In Mailbox 24 */
+#define	RML25		0x0200	/* RX Message Lost In Mailbox 25 */
+#define	RML26		0x0400	/* RX Message Lost In Mailbox 26 */
+#define	RML27		0x0800	/* RX Message Lost In Mailbox 27 */
+#define	RML28		0x1000	/* RX Message Lost In Mailbox 28 */
+#define	RML29		0x2000	/* RX Message Lost In Mailbox 29 */
+#define	RML30		0x4000	/* RX Message Lost In Mailbox 30 */
+#define	RML31		0x8000	/* RX Message Lost In Mailbox 31 */
+
+/* CAN_OPSS1 Masks													 */
+#define	OPSS0		0x0001	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	0 */
+#define	OPSS1		0x0002	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	1 */
+#define	OPSS2		0x0004	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	2 */
+#define	OPSS3		0x0008	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	3 */
+#define	OPSS4		0x0010	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	4 */
+#define	OPSS5		0x0020	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	5 */
+#define	OPSS6		0x0040	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	6 */
+#define	OPSS7		0x0080	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	7 */
+#define	OPSS8		0x0100	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	8 */
+#define	OPSS9		0x0200	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	9 */
+#define	OPSS10		0x0400	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	10 */
+#define	OPSS11		0x0800	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	11 */
+#define	OPSS12		0x1000	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	12 */
+#define	OPSS13		0x2000	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	13 */
+#define	OPSS14		0x4000	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	14 */
+#define	OPSS15		0x8000	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	15 */
+
+/* CAN_OPSS2 Masks													 */
+#define	OPSS16		0x0001	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	16 */
+#define	OPSS17		0x0002	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	17 */
+#define	OPSS18		0x0004	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	18 */
+#define	OPSS19		0x0008	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	19 */
+#define	OPSS20		0x0010	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	20 */
+#define	OPSS21		0x0020	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	21 */
+#define	OPSS22		0x0040	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	22 */
+#define	OPSS23		0x0080	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	23 */
+#define	OPSS24		0x0100	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	24 */
+#define	OPSS25		0x0200	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	25 */
+#define	OPSS26		0x0400	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	26 */
+#define	OPSS27		0x0800	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	27 */
+#define	OPSS28		0x1000	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	28 */
+#define	OPSS29		0x2000	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	29 */
+#define	OPSS30		0x4000	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	30 */
+#define	OPSS31		0x8000	/* Enable RX Overwrite Protection or TX	Single-Shot For	Mailbox	31 */
+
+/* CAN_TRR1 Masks							 */
+#define	TRR0		0x0001	/* Deny	But Don't Lock Access To Mailbox 0 */
+#define	TRR1		0x0002	/* Deny	But Don't Lock Access To Mailbox 1 */
+#define	TRR2		0x0004	/* Deny	But Don't Lock Access To Mailbox 2 */
+#define	TRR3		0x0008	/* Deny	But Don't Lock Access To Mailbox 3 */
+#define	TRR4		0x0010	/* Deny	But Don't Lock Access To Mailbox 4 */
+#define	TRR5		0x0020	/* Deny	But Don't Lock Access To Mailbox 5 */
+#define	TRR6		0x0040	/* Deny	But Don't Lock Access To Mailbox 6 */
+#define	TRR7		0x0080	/* Deny	But Don't Lock Access To Mailbox 7 */
+#define	TRR8		0x0100	/* Deny	But Don't Lock Access To Mailbox 8 */
+#define	TRR9		0x0200	/* Deny	But Don't Lock Access To Mailbox 9 */
+#define	TRR10		0x0400	/* Deny	But Don't Lock Access To Mailbox 10 */
+#define	TRR11		0x0800	/* Deny	But Don't Lock Access To Mailbox 11 */
+#define	TRR12		0x1000	/* Deny	But Don't Lock Access To Mailbox 12 */
+#define	TRR13		0x2000	/* Deny	But Don't Lock Access To Mailbox 13 */
+#define	TRR14		0x4000	/* Deny	But Don't Lock Access To Mailbox 14 */
+#define	TRR15		0x8000	/* Deny	But Don't Lock Access To Mailbox 15 */
+
+/* CAN_TRR2 Masks							 */
+#define	TRR16		0x0001	/* Deny	But Don't Lock Access To Mailbox 16 */
+#define	TRR17		0x0002	/* Deny	But Don't Lock Access To Mailbox 17 */
+#define	TRR18		0x0004	/* Deny	But Don't Lock Access To Mailbox 18 */
+#define	TRR19		0x0008	/* Deny	But Don't Lock Access To Mailbox 19 */
+#define	TRR20		0x0010	/* Deny	But Don't Lock Access To Mailbox 20 */
+#define	TRR21		0x0020	/* Deny	But Don't Lock Access To Mailbox 21 */
+#define	TRR22		0x0040	/* Deny	But Don't Lock Access To Mailbox 22 */
+#define	TRR23		0x0080	/* Deny	But Don't Lock Access To Mailbox 23 */
+#define	TRR24		0x0100	/* Deny	But Don't Lock Access To Mailbox 24 */
+#define	TRR25		0x0200	/* Deny	But Don't Lock Access To Mailbox 25 */
+#define	TRR26		0x0400	/* Deny	But Don't Lock Access To Mailbox 26 */
+#define	TRR27		0x0800	/* Deny	But Don't Lock Access To Mailbox 27 */
+#define	TRR28		0x1000	/* Deny	But Don't Lock Access To Mailbox 28 */
+#define	TRR29		0x2000	/* Deny	But Don't Lock Access To Mailbox 29 */
+#define	TRR30		0x4000	/* Deny	But Don't Lock Access To Mailbox 30 */
+#define	TRR31		0x8000	/* Deny	But Don't Lock Access To Mailbox 31 */
+
+/* CAN_TRS1 Masks						 */
+#define	TRS0		0x0001	/* Remote Frame	Request	For Mailbox 0 */
+#define	TRS1		0x0002	/* Remote Frame	Request	For Mailbox 1 */
+#define	TRS2		0x0004	/* Remote Frame	Request	For Mailbox 2 */
+#define	TRS3		0x0008	/* Remote Frame	Request	For Mailbox 3 */
+#define	TRS4		0x0010	/* Remote Frame	Request	For Mailbox 4 */
+#define	TRS5		0x0020	/* Remote Frame	Request	For Mailbox 5 */
+#define	TRS6		0x0040	/* Remote Frame	Request	For Mailbox 6 */
+#define	TRS7		0x0080	/* Remote Frame	Request	For Mailbox 7 */
+#define	TRS8		0x0100	/* Remote Frame	Request	For Mailbox 8 */
+#define	TRS9		0x0200	/* Remote Frame	Request	For Mailbox 9 */
+#define	TRS10		0x0400	/* Remote Frame	Request	For Mailbox 10 */
+#define	TRS11		0x0800	/* Remote Frame	Request	For Mailbox 11 */
+#define	TRS12		0x1000	/* Remote Frame	Request	For Mailbox 12 */
+#define	TRS13		0x2000	/* Remote Frame	Request	For Mailbox 13 */
+#define	TRS14		0x4000	/* Remote Frame	Request	For Mailbox 14 */
+#define	TRS15		0x8000	/* Remote Frame	Request	For Mailbox 15 */
+
+/* CAN_TRS2 Masks						 */
+#define	TRS16		0x0001	/* Remote Frame	Request	For Mailbox 16 */
+#define	TRS17		0x0002	/* Remote Frame	Request	For Mailbox 17 */
+#define	TRS18		0x0004	/* Remote Frame	Request	For Mailbox 18 */
+#define	TRS19		0x0008	/* Remote Frame	Request	For Mailbox 19 */
+#define	TRS20		0x0010	/* Remote Frame	Request	For Mailbox 20 */
+#define	TRS21		0x0020	/* Remote Frame	Request	For Mailbox 21 */
+#define	TRS22		0x0040	/* Remote Frame	Request	For Mailbox 22 */
+#define	TRS23		0x0080	/* Remote Frame	Request	For Mailbox 23 */
+#define	TRS24		0x0100	/* Remote Frame	Request	For Mailbox 24 */
+#define	TRS25		0x0200	/* Remote Frame	Request	For Mailbox 25 */
+#define	TRS26		0x0400	/* Remote Frame	Request	For Mailbox 26 */
+#define	TRS27		0x0800	/* Remote Frame	Request	For Mailbox 27 */
+#define	TRS28		0x1000	/* Remote Frame	Request	For Mailbox 28 */
+#define	TRS29		0x2000	/* Remote Frame	Request	For Mailbox 29 */
+#define	TRS30		0x4000	/* Remote Frame	Request	For Mailbox 30 */
+#define	TRS31		0x8000	/* Remote Frame	Request	For Mailbox 31 */
+
+/* CAN_AA1 Masks					 */
+#define	AA0			0x0001	/* Aborted Message In Mailbox 0 */
+#define	AA1			0x0002	/* Aborted Message In Mailbox 1 */
+#define	AA2			0x0004	/* Aborted Message In Mailbox 2 */
+#define	AA3			0x0008	/* Aborted Message In Mailbox 3 */
+#define	AA4			0x0010	/* Aborted Message In Mailbox 4 */
+#define	AA5			0x0020	/* Aborted Message In Mailbox 5 */
+#define	AA6			0x0040	/* Aborted Message In Mailbox 6 */
+#define	AA7			0x0080	/* Aborted Message In Mailbox 7 */
+#define	AA8			0x0100	/* Aborted Message In Mailbox 8 */
+#define	AA9			0x0200	/* Aborted Message In Mailbox 9 */
+#define	AA10		0x0400	/* Aborted Message In Mailbox 10 */
+#define	AA11		0x0800	/* Aborted Message In Mailbox 11 */
+#define	AA12		0x1000	/* Aborted Message In Mailbox 12 */
+#define	AA13		0x2000	/* Aborted Message In Mailbox 13 */
+#define	AA14		0x4000	/* Aborted Message In Mailbox 14 */
+#define	AA15		0x8000	/* Aborted Message In Mailbox 15 */
+
+/* CAN_AA2 Masks					 */
+#define	AA16		0x0001	/* Aborted Message In Mailbox 16 */
+#define	AA17		0x0002	/* Aborted Message In Mailbox 17 */
+#define	AA18		0x0004	/* Aborted Message In Mailbox 18 */
+#define	AA19		0x0008	/* Aborted Message In Mailbox 19 */
+#define	AA20		0x0010	/* Aborted Message In Mailbox 20 */
+#define	AA21		0x0020	/* Aborted Message In Mailbox 21 */
+#define	AA22		0x0040	/* Aborted Message In Mailbox 22 */
+#define	AA23		0x0080	/* Aborted Message In Mailbox 23 */
+#define	AA24		0x0100	/* Aborted Message In Mailbox 24 */
+#define	AA25		0x0200	/* Aborted Message In Mailbox 25 */
+#define	AA26		0x0400	/* Aborted Message In Mailbox 26 */
+#define	AA27		0x0800	/* Aborted Message In Mailbox 27 */
+#define	AA28		0x1000	/* Aborted Message In Mailbox 28 */
+#define	AA29		0x2000	/* Aborted Message In Mailbox 29 */
+#define	AA30		0x4000	/* Aborted Message In Mailbox 30 */
+#define	AA31		0x8000	/* Aborted Message In Mailbox 31 */
+
+/* CAN_TA1 Masks						 */
+#define	TA0			0x0001	/* Transmit Successful From Mailbox 0 */
+#define	TA1			0x0002	/* Transmit Successful From Mailbox 1 */
+#define	TA2			0x0004	/* Transmit Successful From Mailbox 2 */
+#define	TA3			0x0008	/* Transmit Successful From Mailbox 3 */
+#define	TA4			0x0010	/* Transmit Successful From Mailbox 4 */
+#define	TA5			0x0020	/* Transmit Successful From Mailbox 5 */
+#define	TA6			0x0040	/* Transmit Successful From Mailbox 6 */
+#define	TA7			0x0080	/* Transmit Successful From Mailbox 7 */
+#define	TA8			0x0100	/* Transmit Successful From Mailbox 8 */
+#define	TA9			0x0200	/* Transmit Successful From Mailbox 9 */
+#define	TA10		0x0400	/* Transmit Successful From Mailbox 10 */
+#define	TA11		0x0800	/* Transmit Successful From Mailbox 11 */
+#define	TA12		0x1000	/* Transmit Successful From Mailbox 12 */
+#define	TA13		0x2000	/* Transmit Successful From Mailbox 13 */
+#define	TA14		0x4000	/* Transmit Successful From Mailbox 14 */
+#define	TA15		0x8000	/* Transmit Successful From Mailbox 15 */
+
+/* CAN_TA2 Masks						 */
+#define	TA16		0x0001	/* Transmit Successful From Mailbox 16 */
+#define	TA17		0x0002	/* Transmit Successful From Mailbox 17 */
+#define	TA18		0x0004	/* Transmit Successful From Mailbox 18 */
+#define	TA19		0x0008	/* Transmit Successful From Mailbox 19 */
+#define	TA20		0x0010	/* Transmit Successful From Mailbox 20 */
+#define	TA21		0x0020	/* Transmit Successful From Mailbox 21 */
+#define	TA22		0x0040	/* Transmit Successful From Mailbox 22 */
+#define	TA23		0x0080	/* Transmit Successful From Mailbox 23 */
+#define	TA24		0x0100	/* Transmit Successful From Mailbox 24 */
+#define	TA25		0x0200	/* Transmit Successful From Mailbox 25 */
+#define	TA26		0x0400	/* Transmit Successful From Mailbox 26 */
+#define	TA27		0x0800	/* Transmit Successful From Mailbox 27 */
+#define	TA28		0x1000	/* Transmit Successful From Mailbox 28 */
+#define	TA29		0x2000	/* Transmit Successful From Mailbox 29 */
+#define	TA30		0x4000	/* Transmit Successful From Mailbox 30 */
+#define	TA31		0x8000	/* Transmit Successful From Mailbox 31 */
+
+/* CAN_MBTD Masks					 */
+#define	TDPTR		0x001F	/* Mailbox To Temporarily Disable */
+#define	TDA			0x0040	/* Temporary Disable Acknowledge */
+#define	TDR			0x0080	/* Temporary Disable Request */
+
+/* CAN_RFH1 Masks											 */
+#define	RFH0		0x0001	/* Enable Automatic Remote Frame Handling For Mailbox 0 */
+#define	RFH1		0x0002	/* Enable Automatic Remote Frame Handling For Mailbox 1 */
+#define	RFH2		0x0004	/* Enable Automatic Remote Frame Handling For Mailbox 2 */
+#define	RFH3		0x0008	/* Enable Automatic Remote Frame Handling For Mailbox 3 */
+#define	RFH4		0x0010	/* Enable Automatic Remote Frame Handling For Mailbox 4 */
+#define	RFH5		0x0020	/* Enable Automatic Remote Frame Handling For Mailbox 5 */
+#define	RFH6		0x0040	/* Enable Automatic Remote Frame Handling For Mailbox 6 */
+#define	RFH7		0x0080	/* Enable Automatic Remote Frame Handling For Mailbox 7 */
+#define	RFH8		0x0100	/* Enable Automatic Remote Frame Handling For Mailbox 8 */
+#define	RFH9		0x0200	/* Enable Automatic Remote Frame Handling For Mailbox 9 */
+#define	RFH10		0x0400	/* Enable Automatic Remote Frame Handling For Mailbox 10 */
+#define	RFH11		0x0800	/* Enable Automatic Remote Frame Handling For Mailbox 11 */
+#define	RFH12		0x1000	/* Enable Automatic Remote Frame Handling For Mailbox 12 */
+#define	RFH13		0x2000	/* Enable Automatic Remote Frame Handling For Mailbox 13 */
+#define	RFH14		0x4000	/* Enable Automatic Remote Frame Handling For Mailbox 14 */
+#define	RFH15		0x8000	/* Enable Automatic Remote Frame Handling For Mailbox 15 */
+
+/* CAN_RFH2 Masks											 */
+#define	RFH16		0x0001	/* Enable Automatic Remote Frame Handling For Mailbox 16 */
+#define	RFH17		0x0002	/* Enable Automatic Remote Frame Handling For Mailbox 17 */
+#define	RFH18		0x0004	/* Enable Automatic Remote Frame Handling For Mailbox 18 */
+#define	RFH19		0x0008	/* Enable Automatic Remote Frame Handling For Mailbox 19 */
+#define	RFH20		0x0010	/* Enable Automatic Remote Frame Handling For Mailbox 20 */
+#define	RFH21		0x0020	/* Enable Automatic Remote Frame Handling For Mailbox 21 */
+#define	RFH22		0x0040	/* Enable Automatic Remote Frame Handling For Mailbox 22 */
+#define	RFH23		0x0080	/* Enable Automatic Remote Frame Handling For Mailbox 23 */
+#define	RFH24		0x0100	/* Enable Automatic Remote Frame Handling For Mailbox 24 */
+#define	RFH25		0x0200	/* Enable Automatic Remote Frame Handling For Mailbox 25 */
+#define	RFH26		0x0400	/* Enable Automatic Remote Frame Handling For Mailbox 26 */
+#define	RFH27		0x0800	/* Enable Automatic Remote Frame Handling For Mailbox 27 */
+#define	RFH28		0x1000	/* Enable Automatic Remote Frame Handling For Mailbox 28 */
+#define	RFH29		0x2000	/* Enable Automatic Remote Frame Handling For Mailbox 29 */
+#define	RFH30		0x4000	/* Enable Automatic Remote Frame Handling For Mailbox 30 */
+#define	RFH31		0x8000	/* Enable Automatic Remote Frame Handling For Mailbox 31 */
+
+/* CAN_MBTIF1 Masks						 */
+#define	MBTIF0		0x0001	/* TX Interrupt	Active In Mailbox 0 */
+#define	MBTIF1		0x0002	/* TX Interrupt	Active In Mailbox 1 */
+#define	MBTIF2		0x0004	/* TX Interrupt	Active In Mailbox 2 */
+#define	MBTIF3		0x0008	/* TX Interrupt	Active In Mailbox 3 */
+#define	MBTIF4		0x0010	/* TX Interrupt	Active In Mailbox 4 */
+#define	MBTIF5		0x0020	/* TX Interrupt	Active In Mailbox 5 */
+#define	MBTIF6		0x0040	/* TX Interrupt	Active In Mailbox 6 */
+#define	MBTIF7		0x0080	/* TX Interrupt	Active In Mailbox 7 */
+#define	MBTIF8		0x0100	/* TX Interrupt	Active In Mailbox 8 */
+#define	MBTIF9		0x0200	/* TX Interrupt	Active In Mailbox 9 */
+#define	MBTIF10		0x0400	/* TX Interrupt	Active In Mailbox 10 */
+#define	MBTIF11		0x0800	/* TX Interrupt	Active In Mailbox 11 */
+#define	MBTIF12		0x1000	/* TX Interrupt	Active In Mailbox 12 */
+#define	MBTIF13		0x2000	/* TX Interrupt	Active In Mailbox 13 */
+#define	MBTIF14		0x4000	/* TX Interrupt	Active In Mailbox 14 */
+#define	MBTIF15		0x8000	/* TX Interrupt	Active In Mailbox 15 */
+
+/* CAN_MBTIF2 Masks						 */
+#define	MBTIF16		0x0001	/* TX Interrupt	Active In Mailbox 16 */
+#define	MBTIF17		0x0002	/* TX Interrupt	Active In Mailbox 17 */
+#define	MBTIF18		0x0004	/* TX Interrupt	Active In Mailbox 18 */
+#define	MBTIF19		0x0008	/* TX Interrupt	Active In Mailbox 19 */
+#define	MBTIF20		0x0010	/* TX Interrupt	Active In Mailbox 20 */
+#define	MBTIF21		0x0020	/* TX Interrupt	Active In Mailbox 21 */
+#define	MBTIF22		0x0040	/* TX Interrupt	Active In Mailbox 22 */
+#define	MBTIF23		0x0080	/* TX Interrupt	Active In Mailbox 23 */
+#define	MBTIF24		0x0100	/* TX Interrupt	Active In Mailbox 24 */
+#define	MBTIF25		0x0200	/* TX Interrupt	Active In Mailbox 25 */
+#define	MBTIF26		0x0400	/* TX Interrupt	Active In Mailbox 26 */
+#define	MBTIF27		0x0800	/* TX Interrupt	Active In Mailbox 27 */
+#define	MBTIF28		0x1000	/* TX Interrupt	Active In Mailbox 28 */
+#define	MBTIF29		0x2000	/* TX Interrupt	Active In Mailbox 29 */
+#define	MBTIF30		0x4000	/* TX Interrupt	Active In Mailbox 30 */
+#define	MBTIF31		0x8000	/* TX Interrupt	Active In Mailbox 31 */
+
+/* CAN_MBRIF1 Masks						 */
+#define	MBRIF0		0x0001	/* RX Interrupt	Active In Mailbox 0 */
+#define	MBRIF1		0x0002	/* RX Interrupt	Active In Mailbox 1 */
+#define	MBRIF2		0x0004	/* RX Interrupt	Active In Mailbox 2 */
+#define	MBRIF3		0x0008	/* RX Interrupt	Active In Mailbox 3 */
+#define	MBRIF4		0x0010	/* RX Interrupt	Active In Mailbox 4 */
+#define	MBRIF5		0x0020	/* RX Interrupt	Active In Mailbox 5 */
+#define	MBRIF6		0x0040	/* RX Interrupt	Active In Mailbox 6 */
+#define	MBRIF7		0x0080	/* RX Interrupt	Active In Mailbox 7 */
+#define	MBRIF8		0x0100	/* RX Interrupt	Active In Mailbox 8 */
+#define	MBRIF9		0x0200	/* RX Interrupt	Active In Mailbox 9 */
+#define	MBRIF10		0x0400	/* RX Interrupt	Active In Mailbox 10 */
+#define	MBRIF11		0x0800	/* RX Interrupt	Active In Mailbox 11 */
+#define	MBRIF12		0x1000	/* RX Interrupt	Active In Mailbox 12 */
+#define	MBRIF13		0x2000	/* RX Interrupt	Active In Mailbox 13 */
+#define	MBRIF14		0x4000	/* RX Interrupt	Active In Mailbox 14 */
+#define	MBRIF15		0x8000	/* RX Interrupt	Active In Mailbox 15 */
+
+/* CAN_MBRIF2 Masks						 */
+#define	MBRIF16		0x0001	/* RX Interrupt	Active In Mailbox 16 */
+#define	MBRIF17		0x0002	/* RX Interrupt	Active In Mailbox 17 */
+#define	MBRIF18		0x0004	/* RX Interrupt	Active In Mailbox 18 */
+#define	MBRIF19		0x0008	/* RX Interrupt	Active In Mailbox 19 */
+#define	MBRIF20		0x0010	/* RX Interrupt	Active In Mailbox 20 */
+#define	MBRIF21		0x0020	/* RX Interrupt	Active In Mailbox 21 */
+#define	MBRIF22		0x0040	/* RX Interrupt	Active In Mailbox 22 */
+#define	MBRIF23		0x0080	/* RX Interrupt	Active In Mailbox 23 */
+#define	MBRIF24		0x0100	/* RX Interrupt	Active In Mailbox 24 */
+#define	MBRIF25		0x0200	/* RX Interrupt	Active In Mailbox 25 */
+#define	MBRIF26		0x0400	/* RX Interrupt	Active In Mailbox 26 */
+#define	MBRIF27		0x0800	/* RX Interrupt	Active In Mailbox 27 */
+#define	MBRIF28		0x1000	/* RX Interrupt	Active In Mailbox 28 */
+#define	MBRIF29		0x2000	/* RX Interrupt	Active In Mailbox 29 */
+#define	MBRIF30		0x4000	/* RX Interrupt	Active In Mailbox 30 */
+#define	MBRIF31		0x8000	/* RX Interrupt	Active In Mailbox 31 */
+
+/* CAN_MBIM1 Masks					 */
+#define	MBIM0		0x0001	/* Enable Interrupt For	Mailbox	0 */
+#define	MBIM1		0x0002	/* Enable Interrupt For	Mailbox	1 */
+#define	MBIM2		0x0004	/* Enable Interrupt For	Mailbox	2 */
+#define	MBIM3		0x0008	/* Enable Interrupt For	Mailbox	3 */
+#define	MBIM4		0x0010	/* Enable Interrupt For	Mailbox	4 */
+#define	MBIM5		0x0020	/* Enable Interrupt For	Mailbox	5 */
+#define	MBIM6		0x0040	/* Enable Interrupt For	Mailbox	6 */
+#define	MBIM7		0x0080	/* Enable Interrupt For	Mailbox	7 */
+#define	MBIM8		0x0100	/* Enable Interrupt For	Mailbox	8 */
+#define	MBIM9		0x0200	/* Enable Interrupt For	Mailbox	9 */
+#define	MBIM10		0x0400	/* Enable Interrupt For	Mailbox	10 */
+#define	MBIM11		0x0800	/* Enable Interrupt For	Mailbox	11 */
+#define	MBIM12		0x1000	/* Enable Interrupt For	Mailbox	12 */
+#define	MBIM13		0x2000	/* Enable Interrupt For	Mailbox	13 */
+#define	MBIM14		0x4000	/* Enable Interrupt For	Mailbox	14 */
+#define	MBIM15		0x8000	/* Enable Interrupt For	Mailbox	15 */
+
+/* CAN_MBIM2 Masks					 */
+#define	MBIM16		0x0001	/* Enable Interrupt For	Mailbox	16 */
+#define	MBIM17		0x0002	/* Enable Interrupt For	Mailbox	17 */
+#define	MBIM18		0x0004	/* Enable Interrupt For	Mailbox	18 */
+#define	MBIM19		0x0008	/* Enable Interrupt For	Mailbox	19 */
+#define	MBIM20		0x0010	/* Enable Interrupt For	Mailbox	20 */
+#define	MBIM21		0x0020	/* Enable Interrupt For	Mailbox	21 */
+#define	MBIM22		0x0040	/* Enable Interrupt For	Mailbox	22 */
+#define	MBIM23		0x0080	/* Enable Interrupt For	Mailbox	23 */
+#define	MBIM24		0x0100	/* Enable Interrupt For	Mailbox	24 */
+#define	MBIM25		0x0200	/* Enable Interrupt For	Mailbox	25 */
+#define	MBIM26		0x0400	/* Enable Interrupt For	Mailbox	26 */
+#define	MBIM27		0x0800	/* Enable Interrupt For	Mailbox	27 */
+#define	MBIM28		0x1000	/* Enable Interrupt For	Mailbox	28 */
+#define	MBIM29		0x2000	/* Enable Interrupt For	Mailbox	29 */
+#define	MBIM30		0x4000	/* Enable Interrupt For	Mailbox	30 */
+#define	MBIM31		0x8000	/* Enable Interrupt For	Mailbox	31 */
+
+/* CAN_GIM Masks									 */
+#define	EWTIM		0x0001	/* Enable TX Error Count Interrupt */
+#define	EWRIM		0x0002	/* Enable RX Error Count Interrupt */
+#define	EPIM		0x0004	/* Enable Error-Passive	Mode Interrupt */
+#define	BOIM		0x0008	/* Enable Bus Off Interrupt */
+#define	WUIM		0x0010	/* Enable Wake-Up Interrupt */
+#define	UIAIM		0x0020	/* Enable Access To Unimplemented Address Interrupt */
+#define	AAIM		0x0040	/* Enable Abort	Acknowledge Interrupt */
+#define	RMLIM		0x0080	/* Enable RX Message Lost Interrupt */
+#define	UCEIM		0x0100	/* Enable Universal Counter Overflow Interrupt */
+#define	EXTIM		0x0200	/* Enable External Trigger Output Interrupt */
+#define	ADIM		0x0400	/* Enable Access Denied	Interrupt */
+
+/* CAN_GIS Masks								 */
+#define	EWTIS		0x0001	/* TX Error Count IRQ Status */
+#define	EWRIS		0x0002	/* RX Error Count IRQ Status */
+#define	EPIS		0x0004	/* Error-Passive Mode IRQ Status */
+#define	BOIS		0x0008	/* Bus Off IRQ Status */
+#define	WUIS		0x0010	/* Wake-Up IRQ Status */
+#define	UIAIS		0x0020	/* Access To Unimplemented Address IRQ Status */
+#define	AAIS		0x0040	/* Abort Acknowledge IRQ Status */
+#define	RMLIS		0x0080	/* RX Message Lost IRQ Status */
+#define	UCEIS		0x0100	/* Universal Counter Overflow IRQ Status */
+#define	EXTIS		0x0200	/* External Trigger Output IRQ Status */
+#define	ADIS		0x0400	/* Access Denied IRQ Status */
+
+/* CAN_GIF Masks								 */
+#define	EWTIF		0x0001	/* TX Error Count IRQ Flag */
+#define	EWRIF		0x0002	/* RX Error Count IRQ Flag */
+#define	EPIF		0x0004	/* Error-Passive Mode IRQ Flag */
+#define	BOIF		0x0008	/* Bus Off IRQ Flag	 */
+#define	WUIF		0x0010	/* Wake-Up IRQ Flag	 */
+#define	UIAIF		0x0020	/* Access To Unimplemented Address IRQ Flag */
+#define	AAIF		0x0040	/* Abort Acknowledge IRQ Flag */
+#define	RMLIF		0x0080	/* RX Message Lost IRQ Flag */
+#define	UCEIF		0x0100	/* Universal Counter Overflow IRQ Flag */
+#define	EXTIF		0x0200	/* External Trigger Output IRQ Flag */
+#define	ADIF		0x0400	/* Access Denied IRQ Flag */
+
+/* CAN_UCCNF Masks								 */
+#define	UCCNF		0x000F	/* Universal Counter Mode */
+#define	UC_STAMP	0x0001	/*		Timestamp Mode */
+#define	UC_WDOG		0x0002	/*		Watchdog Mode */
+#define	UC_AUTOTX	0x0003	/*		Auto-Transmit Mode */
+#define	UC_ERROR	0x0006	/*		CAN Error Frame	Count */
+#define	UC_OVER		0x0007	/*		CAN Overload Frame Count */
+#define	UC_LOST		0x0008	/*		Arbitration Lost During	TX Count */
+#define	UC_AA		0x0009	/*		TX Abort Count */
+#define	UC_TA		0x000A	/*		TX Successful Count */
+#define	UC_REJECT	0x000B	/*		RX Message Rejected Count */
+#define	UC_RML		0x000C	/*		RX Message Lost	Count */
+#define	UC_RX		0x000D	/*		Total Successful RX Messages Count */
+#define	UC_RMP		0x000E	/*		Successful RX W/Matching ID Count */
+#define	UC_ALL		0x000F	/*		Correct	Message	On CAN Bus Line	Count */
+#define	UCRC		0x0020	/* Universal Counter Reload/Clear */
+#define	UCCT		0x0040	/* Universal Counter CAN Trigger */
+#define	UCE			0x0080	/* Universal Counter Enable */
+
+/* CAN_ESR Masks			 */
+#define	ACKE		0x0004	/* Acknowledge Error */
+#define	SER			0x0008	/* Stuff Error */
+#define	CRCE		0x0010	/* CRC Error */
+#define	SA0			0x0020	/* Stuck At Dominant Error */
+#define	BEF			0x0040	/* Bit Error Flag */
+#define	FER			0x0080	/* Form	Error Flag */
+
+/* CAN_EWR Masks					 */
+#define	EWLREC		0x00FF	/* RX Error Count Limit	(For EWRIS) */
+#define	EWLTEC		0xFF00	/* TX Error Count Limit	(For EWTIS) */
+
+#endif /* _DEF_BF539_H */
diff --git a/arch/blackfin/mach-bf538/include/mach/dma.h b/arch/blackfin/mach-bf538/include/mach/dma.h
new file mode 100644
index 0000000..eb05cac
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/dma.h
@@ -0,0 +1,41 @@
+/* mach/dma.h - arch-specific DMA defines
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _MACH_DMA_H_
+#define _MACH_DMA_H_
+
+#define CH_PPI			0
+#define CH_SPORT0_RX		1
+#define CH_SPORT0_TX		2
+#define CH_SPORT1_RX		3
+#define CH_SPORT1_TX		4
+#define CH_SPI0			5
+#define CH_UART0_RX		6
+#define CH_UART0_TX		7
+#define CH_SPORT2_RX		8
+#define CH_SPORT2_TX		9
+#define CH_SPORT3_RX		10
+#define CH_SPORT3_TX		11
+#define CH_SPI1			14
+#define CH_SPI2			15
+#define CH_UART1_RX		16
+#define CH_UART1_TX		17
+#define CH_UART2_RX		18
+#define CH_UART2_TX		19
+
+#define CH_MEM_STREAM0_DEST	20
+#define CH_MEM_STREAM0_SRC	21
+#define CH_MEM_STREAM1_DEST	22
+#define CH_MEM_STREAM1_SRC	23
+#define CH_MEM_STREAM2_DEST	24
+#define CH_MEM_STREAM2_SRC	25
+#define CH_MEM_STREAM3_DEST	26
+#define CH_MEM_STREAM3_SRC	27
+
+#define MAX_DMA_CHANNELS 28
+
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/gpio.h b/arch/blackfin/mach-bf538/include/mach/gpio.h
new file mode 100644
index 0000000..30f4f72
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/gpio.h
@@ -0,0 +1,79 @@
+/*
+ * File: arch/blackfin/mach-bf538/include/mach/gpio.h
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright (C) 2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef _MACH_GPIO_H_
+#define _MACH_GPIO_H_
+
+	/* FIXME:
+	 * For now only support PORTF GPIOs.
+	 * PORT C,D and E are for peripheral usage only
+	 */
+#define MAX_BLACKFIN_GPIOS 16
+
+#define	GPIO_PF0	0	/* PF */
+#define	GPIO_PF1	1
+#define	GPIO_PF2	2
+#define	GPIO_PF3	3
+#define	GPIO_PF4	4
+#define	GPIO_PF5	5
+#define	GPIO_PF6	6
+#define	GPIO_PF7	7
+#define	GPIO_PF8	8
+#define	GPIO_PF9	9
+#define	GPIO_PF10	10
+#define	GPIO_PF11	11
+#define	GPIO_PF12	12
+#define	GPIO_PF13	13
+#define	GPIO_PF14	14
+#define	GPIO_PF15	15
+#define	GPIO_PC0	16	/* PC */
+#define	GPIO_PC1	17
+#define	GPIO_PC4	20
+#define	GPIO_PC5	21
+#define	GPIO_PC6	22
+#define	GPIO_PC7	23
+#define	GPIO_PC8	24
+#define	GPIO_PC9	25
+#define	GPIO_PD0	32	/* PD */
+#define	GPIO_PD1	33
+#define	GPIO_PD2	34
+#define	GPIO_PD3	35
+#define	GPIO_PD4	36
+#define	GPIO_PD5	37
+#define	GPIO_PD6	38
+#define	GPIO_PD7	39
+#define	GPIO_PD8	40
+#define	GPIO_PD9	41
+#define	GPIO_PD10      	42
+#define	GPIO_PD11      	43
+#define	GPIO_PD12      	44
+#define	GPIO_PD13      	45
+#define	GPIO_PE0	48	/* PE */
+#define	GPIO_PE1	49
+#define	GPIO_PE2	50
+#define	GPIO_PE3	51
+#define	GPIO_PE4	52
+#define	GPIO_PE5	53
+#define	GPIO_PE6	54
+#define	GPIO_PE7	55
+#define	GPIO_PE8	56
+#define	GPIO_PE9	57
+#define	GPIO_PE10      	58
+#define	GPIO_PE11      	59
+#define	GPIO_PE12      	60
+#define	GPIO_PE13      	61
+#define	GPIO_PE14      	62
+#define	GPIO_PE15      	63
+
+#define PORT_F GPIO_PF0
+#define PORT_C GPIO_PC0
+#define PORT_D GPIO_PD0
+#define PORT_E GPIO_PE0
+
+#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf538/include/mach/irq.h b/arch/blackfin/mach-bf538/include/mach/irq.h
new file mode 100644
index 0000000..fdc87fe
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/irq.h
@@ -0,0 +1,211 @@
+/*
+ * file:	include/asm-blackfin/mach-bf538/irq.h
+ * based on:	include/asm-blackfin/mach-bf537/irq.h
+ * author:	Michael Hennerich (michael.hennerich@analog.com)
+ *
+ * created:
+ * description:
+ *	system mmr register map
+ * rev:
+ *
+ * modified:
+ *
+ *
+ * bugs:         enter bugs at http://blackfin.uclinux.org/
+ *
+ * this program is free software; you can redistribute it and/or modify
+ * it under the terms of the gnu general public license as published by
+ * the free software foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * this program is distributed in the hope that it will be useful,
+ * but without any warranty; without even the implied warranty of
+ * merchantability or fitness for a particular purpose.  see the
+ * gnu general public license for more details.
+ *
+ * you should have received a copy of the gnu general public license
+ * along with this program; see the file copying.
+ * if not, write to the free software foundation,
+ * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ */
+
+#ifndef _BF538_IRQ_H_
+#define _BF538_IRQ_H_
+
+/*
+ * Interrupt source definitions
+	Event Source    Core Event Name
+	Core        Emulation               **
+	Events         (highest priority)  EMU         0
+	Reset                   RST         1
+	NMI                     NMI         2
+	Exception               EVX         3
+	Reserved                --          4
+	Hardware Error          IVHW        5
+	Core Timer              IVTMR       6 *
+
+	.....
+
+	 Software Interrupt 1    IVG14       31
+	 Software Interrupt 2    --
+	 (lowest priority)  IVG15       32 *
+*/
+
+#define NR_PERI_INTS    (2 * 32)
+
+/* The ABSTRACT IRQ definitions */
+/** the first seven of the following are fixed, the rest you change if you need to **/
+#define IRQ_EMU			0	/* Emulation */
+#define IRQ_RST			1	/* reset */
+#define IRQ_NMI			2	/* Non Maskable */
+#define IRQ_EVX			3	/* Exception */
+#define IRQ_UNUSED		4	/* - unused interrupt */
+#define IRQ_HWERR		5	/* Hardware Error */
+#define IRQ_CORETMR		6	/* Core timer */
+
+#define BFIN_IRQ(x)		((x) + 7)
+
+#define IRQ_PLL_WAKEUP		BFIN_IRQ(0)	/* PLL Wakeup Interrupt */
+#define IRQ_DMA0_ERROR		BFIN_IRQ(1)	/* DMA Error 0 (generic) */
+#define IRQ_PPI_ERROR		BFIN_IRQ(2)	/* PPI Error */
+#define IRQ_SPORT0_ERROR	BFIN_IRQ(3)	/* SPORT0 Status */
+#define IRQ_SPORT1_ERROR	BFIN_IRQ(4)	/* SPORT1 Status */
+#define IRQ_SPI0_ERROR		BFIN_IRQ(5)	/* SPI0 Status */
+#define IRQ_UART0_ERROR		BFIN_IRQ(6)	/* UART0 Status */
+#define IRQ_RTC			BFIN_IRQ(7)	/* RTC */
+#define IRQ_PPI			BFIN_IRQ(8)	/* DMA Channel 0 (PPI) */
+#define IRQ_SPORT0_RX		BFIN_IRQ(9)	/* DMA 1 Channel (SPORT0 RX) */
+#define IRQ_SPORT0_TX		BFIN_IRQ(10)	/* DMA 2 Channel (SPORT0 TX) */
+#define IRQ_SPORT1_RX		BFIN_IRQ(11)	/* DMA 3 Channel (SPORT1 RX) */
+#define IRQ_SPORT1_TX		BFIN_IRQ(12)	/* DMA 4 Channel (SPORT1 TX) */
+#define IRQ_SPI0		BFIN_IRQ(13)	/* DMA 5 Channel (SPI0) */
+#define IRQ_UART0_RX		BFIN_IRQ(14)	/* DMA 6 Channel (UART0 RX) */
+#define IRQ_UART0_TX		BFIN_IRQ(15)	/* DMA 7 Channel (UART0 TX) */
+#define IRQ_TIMER0		BFIN_IRQ(16)	/* Timer 0 */
+#define IRQ_TIMER1		BFIN_IRQ(17)	/* Timer 1 */
+#define IRQ_TIMER2		BFIN_IRQ(18)	/* Timer 2 */
+#define IRQ_PORTF_INTA		BFIN_IRQ(19)	/* Port F Interrupt A */
+#define IRQ_PORTF_INTB		BFIN_IRQ(20)	/* Port F Interrupt B */
+#define IRQ_MEM0_DMA0		BFIN_IRQ(21)	/* MDMA0 Stream 0 */
+#define IRQ_MEM0_DMA1		BFIN_IRQ(22)	/* MDMA0 Stream 1 */
+#define IRQ_WATCH		BFIN_IRQ(23)	/* Software Watchdog Timer */
+#define IRQ_DMA1_ERROR		BFIN_IRQ(24)	/* DMA Error 1 (generic) */
+#define IRQ_SPORT2_ERROR	BFIN_IRQ(25)	/* SPORT2 Status */
+#define IRQ_SPORT3_ERROR	BFIN_IRQ(26)	/* SPORT3 Status */
+#define IRQ_SPI1_ERROR		BFIN_IRQ(28)	/* SPI1 Status */
+#define IRQ_SPI2_ERROR		BFIN_IRQ(29)	/* SPI2 Status */
+#define IRQ_UART1_ERROR		BFIN_IRQ(30)	/* UART1 Status */
+#define IRQ_UART2_ERROR		BFIN_IRQ(31)	/* UART2 Status */
+#define IRQ_CAN_ERROR		BFIN_IRQ(32)	/* CAN Status (Error) Interrupt */
+#define IRQ_SPORT2_RX		BFIN_IRQ(33)	/* DMA 8 Channel (SPORT2 RX) */
+#define IRQ_SPORT2_TX		BFIN_IRQ(34)	/* DMA 9 Channel (SPORT2 TX) */
+#define IRQ_SPORT3_RX		BFIN_IRQ(35)	/* DMA 10 Channel (SPORT3 RX) */
+#define IRQ_SPORT3_TX		BFIN_IRQ(36)	/* DMA 11 Channel (SPORT3 TX) */
+#define IRQ_SPI1		BFIN_IRQ(39)	/* DMA 14 Channel (SPI1) */
+#define IRQ_SPI2		BFIN_IRQ(40)	/* DMA 15 Channel (SPI2) */
+#define IRQ_UART1_RX		BFIN_IRQ(41)	/* DMA 16 Channel (UART1 RX) */
+#define IRQ_UART1_TX		BFIN_IRQ(42)	/* DMA 17 Channel (UART1 TX) */
+#define IRQ_UART2_RX		BFIN_IRQ(43)	/* DMA 18 Channel (UART2 RX) */
+#define IRQ_UART2_TX		BFIN_IRQ(44)	/* DMA 19 Channel (UART2 TX) */
+#define IRQ_TWI0		BFIN_IRQ(45)	/* TWI0 */
+#define IRQ_TWI1		BFIN_IRQ(46)	/* TWI1 */
+#define IRQ_CAN_RX		BFIN_IRQ(47)	/* CAN Receive Interrupt */
+#define IRQ_CAN_TX		BFIN_IRQ(48)	/* CAN Transmit Interrupt */
+#define IRQ_MEM1_DMA0		BFIN_IRQ(49)	/* MDMA1 Stream 0 */
+#define IRQ_MEM1_DMA1		BFIN_IRQ(50)	/* MDMA1 Stream 1 */
+
+#define SYS_IRQS		BFIN_IRQ(63)	/* 70 */
+
+#define IRQ_PF0         71
+#define IRQ_PF1         72
+#define IRQ_PF2         73
+#define IRQ_PF3         74
+#define IRQ_PF4         75
+#define IRQ_PF5         76
+#define IRQ_PF6         77
+#define IRQ_PF7         78
+#define IRQ_PF8         79
+#define IRQ_PF9         80
+#define IRQ_PF10        81
+#define IRQ_PF11        82
+#define IRQ_PF12        83
+#define IRQ_PF13        84
+#define IRQ_PF14        85
+#define IRQ_PF15        86
+
+#define GPIO_IRQ_BASE	IRQ_PF0
+
+#define NR_IRQS     (IRQ_PF15+1)
+
+#define IVG7            7
+#define IVG8            8
+#define IVG9            9
+#define IVG10           10
+#define IVG11           11
+#define IVG12           12
+#define IVG13           13
+#define IVG14           14
+#define IVG15           15
+
+/* IAR0 BIT FIELDS */
+#define IRQ_PLL_WAKEUP_POS	0
+#define IRQ_DMA0_ERROR_POS	4
+#define IRQ_PPI_ERROR_POS	8
+#define IRQ_SPORT0_ERROR_POS	12
+#define IRQ_SPORT1_ERROR_POS	16
+#define IRQ_SPI0_ERROR_POS	20
+#define IRQ_UART0_ERROR_POS	24
+#define IRQ_RTC_POS		28
+
+/* IAR1 BIT FIELDS */
+#define IRQ_PPI_POS		0
+#define IRQ_SPORT0_RX_POS	4
+#define IRQ_SPORT0_TX_POS	8
+#define IRQ_SPORT1_RX_POS	12
+#define IRQ_SPORT1_TX_POS	16
+#define IRQ_SPI0_POS		20
+#define IRQ_UART0_RX_POS	24
+#define IRQ_UART0_TX_POS	28
+
+/* IAR2 BIT FIELDS */
+#define IRQ_TIMER0_POS		0
+#define IRQ_TIMER1_POS		4
+#define IRQ_TIMER2_POS		8
+#define IRQ_PORTF_INTA_POS	12
+#define IRQ_PORTF_INTB_POS	16
+#define IRQ_MEM0_DMA0_POS	20
+#define IRQ_MEM0_DMA1_POS	24
+#define IRQ_WATCH_POS		28
+
+/* IAR3 BIT FIELDS */
+#define IRQ_DMA1_ERROR_POS	0
+#define IRQ_SPORT2_ERROR_POS	4
+#define IRQ_SPORT3_ERROR_POS	8
+#define IRQ_SPI1_ERROR_POS	16
+#define IRQ_SPI2_ERROR_POS	20
+#define IRQ_UART1_ERROR_POS	24
+#define IRQ_UART2_ERROR_POS	28
+
+/* IAR4 BIT FIELDS */
+#define IRQ_CAN_ERROR_POS	0
+#define IRQ_SPORT2_RX_POS	4
+#define IRQ_SPORT2_TX_POS	8
+#define IRQ_SPORT3_RX_POS	12
+#define IRQ_SPORT3_TX_POS	16
+#define IRQ_SPI1_POS		28
+
+/* IAR5 BIT FIELDS */
+#define IRQ_SPI2_POS		0
+#define IRQ_UART1_RX_POS	4
+#define IRQ_UART1_TX_POS	8
+#define IRQ_UART2_RX_POS	12
+#define IRQ_UART2_TX_POS	16
+#define IRQ_TWI0_POS		20
+#define IRQ_TWI1_POS		24
+#define IRQ_CAN_RX_POS		28
+
+/* IAR6 BIT FIELDS */
+#define IRQ_CAN_TX_POS		0
+#define IRQ_MEM1_DMA0_POS	4
+#define IRQ_MEM1_DMA1_POS	8
+#endif				/* _BF538_IRQ_H_ */
diff --git a/arch/blackfin/mach-bf538/include/mach/mem_map.h b/arch/blackfin/mach-bf538/include/mach/mem_map.h
new file mode 100644
index 0000000..7681196
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/mem_map.h
@@ -0,0 +1,113 @@
+/*
+ * File:         include/asm-blackfin/mach-bf538/mem_map.h
+ * Based on:
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Rev:
+ *
+ * Modified:
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _MEM_MAP_538_H_
+#define _MEM_MAP_538_H_
+
+#define COREMMR_BASE           0xFFE00000	 /* Core MMRs */
+#define SYSMMR_BASE            0xFFC00000	 /* System MMRs */
+
+/* Async Memory Banks */
+#define ASYNC_BANK3_BASE	0x20300000	 /* Async Bank 3 */
+#define ASYNC_BANK3_SIZE	0x00100000	/* 1M */
+#define ASYNC_BANK2_BASE	0x20200000	 /* Async Bank 2 */
+#define ASYNC_BANK2_SIZE	0x00100000	/* 1M */
+#define ASYNC_BANK1_BASE	0x20100000	 /* Async Bank 1 */
+#define ASYNC_BANK1_SIZE	0x00100000	/* 1M */
+#define ASYNC_BANK0_BASE	0x20000000	 /* Async Bank 0 */
+#define ASYNC_BANK0_SIZE	0x00100000	/* 1M */
+
+/* Boot ROM Memory */
+
+#define BOOT_ROM_START		0xEF000000
+#define BOOT_ROM_LENGTH		0x400
+
+/* Level 1 Memory */
+
+#ifdef CONFIG_BFIN_ICACHE
+#define BFIN_ICACHESIZE	(16*1024)
+#else
+#define BFIN_ICACHESIZE	(0*1024)
+#endif
+
+/* Memory Map for ADSP-BF538/9 processors */
+
+#define L1_CODE_START       0xFFA00000
+#define L1_DATA_A_START     0xFF800000
+#define L1_DATA_B_START     0xFF900000
+
+#ifdef CONFIG_BFIN_ICACHE
+#define L1_CODE_LENGTH      (0x14000 - 0x4000)
+#else
+#define L1_CODE_LENGTH      0x14000
+#endif
+
+#ifdef CONFIG_BFIN_DCACHE
+
+#ifdef CONFIG_BFIN_DCACHE_BANKA
+#define DMEM_CNTR (ACACHE_BSRAM | ENDCPLB | PORT_PREF0)
+#define L1_DATA_A_LENGTH      (0x8000 - 0x4000)
+#define L1_DATA_B_LENGTH      0x8000
+#define BFIN_DCACHESIZE	(16*1024)
+#define BFIN_DSUPBANKS	1
+#else
+#define DMEM_CNTR (ACACHE_BCACHE | ENDCPLB | PORT_PREF0)
+#define L1_DATA_A_LENGTH      (0x8000 - 0x4000)
+#define L1_DATA_B_LENGTH      (0x8000 - 0x4000)
+#define BFIN_DCACHESIZE	(32*1024)
+#define BFIN_DSUPBANKS	2
+#endif
+
+#else
+#define DMEM_CNTR (ASRAM_BSRAM | ENDCPLB | PORT_PREF0)
+#define L1_DATA_A_LENGTH      0x8000
+#define L1_DATA_B_LENGTH      0x8000
+#define BFIN_DCACHESIZE	(0*1024)
+#define BFIN_DSUPBANKS	0
+#endif /*CONFIG_BFIN_DCACHE*/
+
+
+/* Level 2 Memory - none */
+
+#define L2_START	0
+#define L2_LENGTH	0
+
+/* Scratch Pad Memory */
+
+#define L1_SCRATCH_START	0xFFB00000
+#define L1_SCRATCH_LENGTH	0x1000
+
+#define GET_PDA_SAFE(preg)		\
+	preg.l = _cpu_pda;		\
+	preg.h = _cpu_pda;
+
+#define GET_PDA(preg, dreg)	GET_PDA_SAFE(preg)
+
+#endif				/* _MEM_MAP_538_H_ */
diff --git a/arch/blackfin/mach-bf538/include/mach/portmux.h b/arch/blackfin/mach-bf538/include/mach/portmux.h
new file mode 100644
index 0000000..1e031b5
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/portmux.h
@@ -0,0 +1,106 @@
+#ifndef _MACH_PORTMUX_H_
+#define _MACH_PORTMUX_H_
+
+#define MAX_RESOURCES	MAX_BLACKFIN_GPIOS
+
+#define P_TMR2		(P_DONTCARE)
+#define P_TMR1		(P_DONTCARE)
+#define P_TMR0		(P_DONTCARE)
+#define P_TMRCLK	(P_DONTCARE)
+#define P_PPI0_CLK	(P_DONTCARE)
+#define P_PPI0_FS1	(P_DONTCARE)
+#define P_PPI0_FS2	(P_DONTCARE)
+
+#define P_TWI0_SCL	(P_DONTCARE)
+#define P_TWI0_SDA	(P_DONTCARE)
+#define P_TWI1_SCL	(P_DONTCARE)
+#define P_TWI1_SDA	(P_DONTCARE)
+
+#define P_SPORT1_TSCLK	(P_DONTCARE)
+#define P_SPORT1_RSCLK	(P_DONTCARE)
+#define P_SPORT0_TSCLK	(P_DONTCARE)
+#define P_SPORT0_RSCLK	(P_DONTCARE)
+#define P_SPORT1_DRSEC	(P_DONTCARE)
+#define P_SPORT1_RFS	(P_DONTCARE)
+#define P_SPORT1_DTPRI	(P_DONTCARE)
+#define P_SPORT1_DTSEC	(P_DONTCARE)
+#define P_SPORT1_TFS	(P_DONTCARE)
+#define P_SPORT1_DRPRI	(P_DONTCARE)
+#define P_SPORT0_DRSEC	(P_DONTCARE)
+#define P_SPORT0_RFS	(P_DONTCARE)
+#define P_SPORT0_DTPRI	(P_DONTCARE)
+#define P_SPORT0_DTSEC	(P_DONTCARE)
+#define P_SPORT0_TFS	(P_DONTCARE)
+#define P_SPORT0_DRPRI	(P_DONTCARE)
+
+#define P_UART0_RX	(P_DONTCARE)
+#define P_UART0_TX	(P_DONTCARE)
+
+#define P_SPI0_MOSI	(P_DONTCARE)
+#define P_SPI0_MISO	(P_DONTCARE)
+#define P_SPI0_SCK	(P_DONTCARE)
+
+#define P_PPI0_D0	(P_DONTCARE)
+#define P_PPI0_D1	(P_DONTCARE)
+#define P_PPI0_D2	(P_DONTCARE)
+#define P_PPI0_D3	(P_DONTCARE)
+
+#define P_CAN0_TX	(P_DEFINED | P_IDENT(GPIO_PC0))
+#define P_CAN0_RX	(P_DEFINED | P_IDENT(GPIO_PC1))
+
+#define P_SPI1_MOSI	(P_DEFINED | P_IDENT(GPIO_PD0))
+#define P_SPI1_MISO	(P_DEFINED | P_IDENT(GPIO_PD1))
+#define P_SPI1_SCK	(P_DEFINED | P_IDENT(GPIO_PD2))
+#define P_SPI1_SS	(P_DEFINED | P_IDENT(GPIO_PD3))
+#define P_SPI1_SSEL1	(P_DEFINED | P_IDENT(GPIO_PD4))
+#define P_SPI2_MOSI	(P_DEFINED | P_IDENT(GPIO_PD5))
+#define P_SPI2_MISO	(P_DEFINED | P_IDENT(GPIO_PD6))
+#define P_SPI2_SCK	(P_DEFINED | P_IDENT(GPIO_PD7))
+#define P_SPI2_SS	(P_DEFINED | P_IDENT(GPIO_PD8))
+#define P_SPI2_SSEL1	(P_DEFINED | P_IDENT(GPIO_PD9))
+#define P_UART1_RX	(P_DEFINED | P_IDENT(GPIO_PD10))
+#define P_UART1_TX	(P_DEFINED | P_IDENT(GPIO_PD11))
+#define P_UART2_RX	(P_DEFINED | P_IDENT(GPIO_PD12))
+#define P_UART2_TX	(P_DEFINED | P_IDENT(GPIO_PD13))
+
+#define P_SPORT2_RSCLK	(P_DEFINED | P_IDENT(GPIO_PE0))
+#define P_SPORT2_RFS	(P_DEFINED | P_IDENT(GPIO_PE1))
+#define P_SPORT2_DRPRI	(P_DEFINED | P_IDENT(GPIO_PE2))
+#define P_SPORT2_DRSEC	(P_DEFINED | P_IDENT(GPIO_PE3))
+#define P_SPORT2_TSCLK	(P_DEFINED | P_IDENT(GPIO_PE4))
+#define P_SPORT2_TFS	(P_DEFINED | P_IDENT(GPIO_PE5))
+#define P_SPORT2_DTPRI	(P_DEFINED | P_IDENT(GPIO_PE6))
+#define P_SPORT2_DTSEC	(P_DEFINED | P_IDENT(GPIO_PE7))
+#define P_SPORT3_RSCLK	(P_DEFINED | P_IDENT(GPIO_PE8))
+#define P_SPORT3_RFS	(P_DEFINED | P_IDENT(GPIO_PE9))
+#define P_SPORT3_DRPRI	(P_DEFINED | P_IDENT(GPIO_PE10))
+#define P_SPORT3_DRSEC	(P_DEFINED | P_IDENT(GPIO_PE11))
+#define P_SPORT3_TSCLK	(P_DEFINED | P_IDENT(GPIO_PE12))
+#define P_SPORT3_TFS	(P_DEFINED | P_IDENT(GPIO_PE13))
+#define P_SPORT3_DTPRI	(P_DEFINED | P_IDENT(GPIO_PE14))
+#define P_SPORT3_DTSEC	(P_DEFINED | P_IDENT(GPIO_PE15))
+
+#define P_PPI0_FS3	(P_DEFINED | P_IDENT(GPIO_PF3))
+#define P_PPI0_D15	(P_DEFINED | P_IDENT(GPIO_PF4))
+#define P_PPI0_D14	(P_DEFINED | P_IDENT(GPIO_PF5))
+#define P_PPI0_D13	(P_DEFINED | P_IDENT(GPIO_PF6))
+#define P_PPI0_D12	(P_DEFINED | P_IDENT(GPIO_PF7))
+#define P_PPI0_D11	(P_DEFINED | P_IDENT(GPIO_PF8))
+#define P_PPI0_D10	(P_DEFINED | P_IDENT(GPIO_PF9))
+#define P_PPI0_D9	(P_DEFINED | P_IDENT(GPIO_PF10))
+#define P_PPI0_D8	(P_DEFINED | P_IDENT(GPIO_PF11))
+
+#define P_PPI0_D4	(P_DEFINED | P_IDENT(GPIO_PF15))
+#define P_PPI0_D5	(P_DEFINED | P_IDENT(GPIO_PF14))
+#define P_PPI0_D6	(P_DEFINED | P_IDENT(GPIO_PF13))
+#define P_PPI0_D7	(P_DEFINED | P_IDENT(GPIO_PF12))
+#define P_SPI0_SSEL7	(P_DEFINED | P_IDENT(GPIO_PF7))
+#define P_SPI0_SSEL6	(P_DEFINED | P_IDENT(GPIO_PF6))
+#define P_SPI0_SSEL5	(P_DEFINED | P_IDENT(GPIO_PF5))
+#define P_SPI0_SSEL4	(P_DEFINED | P_IDENT(GPIO_PF4))
+#define P_SPI0_SSEL3	(P_DEFINED | P_IDENT(GPIO_PF3))
+#define P_SPI0_SSEL2	(P_DEFINED | P_IDENT(GPIO_PF2))
+#define P_SPI0_SSEL1	(P_DEFINED | P_IDENT(GPIO_PF1))
+#define P_SPI0_SS	(P_DEFINED | P_IDENT(GPIO_PF0))
+
+#endif /* _MACH_PORTMUX_H_ */
diff --git a/arch/blackfin/mach-bf538/ints-priority.c b/arch/blackfin/mach-bf538/ints-priority.c
new file mode 100644
index 0000000..70d17e5
--- /dev/null
+++ b/arch/blackfin/mach-bf538/ints-priority.c
@@ -0,0 +1,94 @@
+/*
+ * File:         arch/blackfin/mach-bf538/ints-priority.c
+ * Based on:     arch/blackfin/mach-bf533/ints-priority.c
+ * Author:       Michael Hennerich
+ *
+ * Created:
+ * Description:  Set up the interrupt priorities
+ *
+ * Modified:
+ *               Copyright 2008 Analog Devices Inc.
+ *
+ * Bugs:         Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <asm/blackfin.h>
+
+void __init program_IAR(void)
+{
+
+	/* Program the IAR0 Register with the configured priority */
+	bfin_write_SIC_IAR0(((CONFIG_IRQ_PLL_WAKEUP - 7) << IRQ_PLL_WAKEUP_POS) |
+			((CONFIG_IRQ_DMA0_ERROR - 7) << IRQ_DMA0_ERROR_POS) |
+			((CONFIG_IRQ_PPI_ERROR - 7) << IRQ_PPI_ERROR_POS) |
+			((CONFIG_IRQ_SPORT0_ERROR - 7) << IRQ_SPORT0_ERROR_POS) |
+			((CONFIG_IRQ_SPORT1_ERROR - 7) << IRQ_SPORT1_ERROR_POS) |
+			((CONFIG_IRQ_SPI0_ERROR - 7) << IRQ_SPI0_ERROR_POS) |
+			((CONFIG_IRQ_UART0_ERROR - 7) << IRQ_UART0_ERROR_POS) |
+			((CONFIG_IRQ_RTC - 7) << IRQ_RTC_POS));
+
+	bfin_write_SIC_IAR1(((CONFIG_IRQ_PPI - 7) << IRQ_PPI_POS) |
+			((CONFIG_IRQ_SPORT0_RX - 7) << IRQ_SPORT0_RX_POS) |
+			((CONFIG_IRQ_SPORT0_TX - 7) << IRQ_SPORT0_TX_POS) |
+			((CONFIG_IRQ_SPORT1_RX - 7) << IRQ_SPORT1_RX_POS) |
+			((CONFIG_IRQ_SPORT1_TX - 7) << IRQ_SPORT1_TX_POS) |
+			((CONFIG_IRQ_SPI0 - 7) << IRQ_SPI0_POS) |
+			((CONFIG_IRQ_UART0_RX - 7) << IRQ_UART0_RX_POS) |
+			((CONFIG_IRQ_UART0_TX - 7) << IRQ_UART0_TX_POS));
+
+	bfin_write_SIC_IAR2(((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) |
+			((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) |
+			((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) |
+			((CONFIG_IRQ_PORTF_INTA - 7) << IRQ_PORTF_INTA_POS) |
+			((CONFIG_IRQ_PORTF_INTB - 7) << IRQ_PORTF_INTB_POS) |
+			((CONFIG_IRQ_MEM0_DMA0 - 7) << IRQ_MEM0_DMA0_POS) |
+			((CONFIG_IRQ_MEM0_DMA1 - 7) << IRQ_MEM0_DMA1_POS) |
+			((CONFIG_IRQ_WATCH - 7) << IRQ_WATCH_POS));
+
+	bfin_write_SIC_IAR3(((CONFIG_IRQ_DMA1_ERROR - 7) << IRQ_DMA1_ERROR_POS) |
+			((CONFIG_IRQ_SPORT2_ERROR - 7) << IRQ_SPORT2_ERROR_POS) |
+			((CONFIG_IRQ_SPORT3_ERROR - 7) << IRQ_SPORT3_ERROR_POS) |
+			((CONFIG_IRQ_SPI1_ERROR - 7) << IRQ_SPI1_ERROR_POS) |
+			((CONFIG_IRQ_SPI2_ERROR - 7) << IRQ_SPI2_ERROR_POS) |
+			((CONFIG_IRQ_UART1_ERROR - 7) << IRQ_UART1_ERROR_POS) |
+			((CONFIG_IRQ_UART2_ERROR - 7) << IRQ_UART2_ERROR_POS));
+
+	bfin_write_SIC_IAR4(((CONFIG_IRQ_CAN_ERROR - 7) << IRQ_CAN_ERROR_POS) |
+			((CONFIG_IRQ_SPORT2_RX - 7) << IRQ_SPORT2_RX_POS) |
+			((CONFIG_IRQ_SPORT2_TX - 7) << IRQ_SPORT2_TX_POS) |
+			((CONFIG_IRQ_SPORT3_RX - 7) << IRQ_SPORT3_RX_POS) |
+			((CONFIG_IRQ_SPORT3_TX - 7) << IRQ_SPORT3_TX_POS) |
+			((CONFIG_IRQ_SPI1 - 7) << IRQ_SPI1_POS));
+
+	bfin_write_SIC_IAR5(((CONFIG_IRQ_SPI2 - 7) << IRQ_SPI2_POS) |
+			((CONFIG_IRQ_UART1_RX - 7) << IRQ_UART1_RX_POS) |
+			((CONFIG_IRQ_UART1_TX - 7) << IRQ_UART1_TX_POS) |
+			((CONFIG_IRQ_UART2_RX - 7) << IRQ_UART2_RX_POS) |
+			((CONFIG_IRQ_UART2_TX - 7) << IRQ_UART2_TX_POS) |
+			((CONFIG_IRQ_TWI0 - 7) << IRQ_TWI0_POS) |
+			((CONFIG_IRQ_TWI1 - 7) << IRQ_TWI1_POS) |
+			((CONFIG_IRQ_CAN_RX - 7) << IRQ_CAN_RX_POS));
+
+	bfin_write_SIC_IAR6(((CONFIG_IRQ_CAN_TX - 7) << IRQ_CAN_TX_POS) |
+			((CONFIG_IRQ_MEM1_DMA0 - 7) << IRQ_MEM1_DMA0_POS) |
+			((CONFIG_IRQ_MEM1_DMA1 - 7) << IRQ_MEM1_DMA1_POS));
+
+	SSYNC();
+}
diff --git a/arch/blackfin/mach-bf548/Kconfig b/arch/blackfin/mach-bf548/Kconfig
index 1bfcd8f..dcf6571 100644
--- a/arch/blackfin/mach-bf548/Kconfig
+++ b/arch/blackfin/mach-bf548/Kconfig
@@ -250,7 +250,7 @@
 	default 11
 config IRQ_TIMER0
 	int "IRQ_TIMER0"
-	default 11
+	default 8
 config IRQ_TIMER1
 	int "IRQ_TIMER1"
 	default 11
diff --git a/arch/blackfin/mach-bf548/Makefile b/arch/blackfin/mach-bf548/Makefile
index 68e5478..56994b6 100644
--- a/arch/blackfin/mach-bf548/Makefile
+++ b/arch/blackfin/mach-bf548/Makefile
@@ -2,6 +2,4 @@
 # arch/blackfin/mach-bf537/Makefile
 #
 
-extra-y := head.o
-
 obj-y := ints-priority.o dma.o
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index 24192aa..f53ad68 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -32,6 +32,7 @@
 #include <linux/platform_device.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
@@ -42,6 +43,7 @@
 #include <asm/gpio.h>
 #include <asm/nand.h>
 #include <asm/portmux.h>
+#include <asm/bfin_sdh.h>
 #include <mach/bf54x_keys.h>
 #include <asm/dpmc.h>
 #include <linux/input.h>
@@ -186,44 +188,107 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
 	{
 		.start = 0xFFC02000,
 		.end = 0xFFC020FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir1_device = {
+	.name = "bfin_sir",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR2
+static struct resource bfin_sir2_resources[] = {
 	{
 		.start = 0xFFC02100,
 		.end = 0xFFC021FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART2_RX,
+		.end = IRQ_UART2_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART2_RX,
+		.end = CH_UART2_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir2_device = {
+	.name = "bfin_sir",
+	.id = 2,
+	.num_resources = ARRAY_SIZE(bfin_sir2_resources),
+	.resource = bfin_sir2_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR3
+static struct resource bfin_sir3_resources[] = {
 	{
 		.start = 0xFFC03100,
 		.end = 0xFFC031FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART3_RX,
+		.end = IRQ_UART3_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART3_RX,
+		.end = CH_UART3_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
-
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir3_device = {
 	.name = "bfin_sir",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.id = 3,
+	.num_resources = ARRAY_SIZE(bfin_sir3_resources),
+	.resource = bfin_sir3_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
 static struct resource smsc911x_resources[] = {
@@ -271,8 +336,8 @@
 	.dyn_fifo	= 0,
 	.soft_con	= 1,
 	.dma		= 1,
-	.num_eps	= 7,
-	.dma_channels	= 7,
+	.num_eps	= 8,
+	.dma_channels	= 8,
 	.gpio_vrsel	= GPIO_PH6,
 };
 
@@ -302,6 +367,19 @@
 };
 #endif
 
+static struct resource bfin_gpios_resources = {
+	.start = 0,
+	.end   = MAX_BLACKFIN_GPIOS - 1,
+	.flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+	.name = "simple-gpio",
+	.id = -1,
+	.num_resources = 1,
+	.resource = &bfin_gpios_resources,
+};
+
 #if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE)
 static struct resource bfin_atapi_resources[] = {
 	{
@@ -372,9 +450,58 @@
 #endif
 
 #if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE)
+static struct bfin_sd_host bfin_sdh_data = {
+	.dma_chan = CH_SDH,
+	.irq_int0 = IRQ_SDH_MASK0,
+	.pin_req = {P_SD_D0, P_SD_D1, P_SD_D2, P_SD_D3, P_SD_CLK, P_SD_CMD, 0},
+};
+
 static struct platform_device bf54x_sdh_device = {
 	.name = "bfin-sdh",
 	.id = 0,
+	.dev = {
+		.platform_data = &bfin_sdh_data,
+	},
+};
+#endif
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+static struct mtd_partition para_partitions[] = {
+	{
+		.name       = "bootloader(nor)",
+		.size       = 0x40000,
+		.offset     = 0,
+	}, {
+		.name       = "linux kernel(nor)",
+		.size       = 0x400000,
+		.offset     = MTDPART_OFS_APPEND,
+	}, {
+		.name       = "file system(nor)",
+		.size       = MTDPART_SIZ_FULL,
+		.offset     = MTDPART_OFS_APPEND,
+	}
+};
+
+static struct physmap_flash_data para_flash_data = {
+	.width      = 2,
+	.parts      = para_partitions,
+	.nr_parts   = ARRAY_SIZE(para_partitions),
+};
+
+static struct resource para_flash_resource = {
+	.start = 0x20000000,
+	.end   = 0x207fffff,
+	.flags = IORESOURCE_MEM,
+};
+
+static struct platform_device para_flash_device = {
+	.name          = "physmap-flash",
+	.id            = 0,
+	.dev = {
+		.platform_data = &para_flash_data,
+	},
+	.num_resources = 1,
+	.resource      = &para_flash_resource,
 };
 #endif
 
@@ -642,7 +769,18 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
+#ifdef CONFIG_BFIN_SIR2
+	&bfin_sir2_device,
+#endif
+#ifdef CONFIG_BFIN_SIR3
+	&bfin_sir3_device,
+#endif
 #endif
 
 #if defined(CONFIG_FB_BF54X_LQ043) || defined(CONFIG_FB_BF54X_LQ043_MODULE)
@@ -679,7 +817,7 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
-/*	&i2c_bfin_twi0_device, */
+	&i2c_bfin_twi0_device,
 #if !defined(CONFIG_BF542)
 	&i2c_bfin_twi1_device,
 #endif
@@ -688,6 +826,12 @@
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
 	&bfin_device_gpiokeys,
 #endif
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+	&para_flash_device,
+#endif
+
+	&bfin_gpios_device,
 };
 
 static int __init cm_bf548_init(void)
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 5288187..309c160 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -45,6 +45,7 @@
 #include <asm/nand.h>
 #include <asm/dpmc.h>
 #include <asm/portmux.h>
+#include <asm/bfin_sdh.h>
 #include <mach/bf54x_keys.h>
 #include <linux/input.h>
 #include <linux/spi/ad7877.h>
@@ -52,16 +53,16 @@
 /*
  * Name the Board for the /proc/cpuinfo
  */
-const char bfin_board_name[] = "ADSP-BF548-EZKIT";
+const char bfin_board_name[] = "ADI BF548-EZKIT";
 
 /*
  *  Driver needs to know address, irq and flag pin.
  */
 
 #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
-static struct resource bfin_isp1761_resources[] = {
+#include <linux/usb/isp1760.h>
+static struct resource bfin_isp1760_resources[] = {
 	[0] = {
-		.name	= "isp1761-regs",
 		.start  = 0x2C0C0000,
 		.end    = 0x2C0C0000 + 0xfffff,
 		.flags  = IORESOURCE_MEM,
@@ -73,32 +74,25 @@
 	},
 };
 
-static struct platform_device bfin_isp1761_device = {
-	.name           = "isp1761",
+static struct isp1760_platform_data isp1760_priv = {
+	.is_isp1761 = 0,
+	.port1_disable = 0,
+	.bus_width_16 = 1,
+	.port1_otg = 0,
+	.analog_oc = 0,
+	.dack_polarity_high = 0,
+	.dreq_polarity_high = 0,
+};
+
+static struct platform_device bfin_isp1760_device = {
+	.name           = "isp1760-hcd",
 	.id             = 0,
-	.num_resources  = ARRAY_SIZE(bfin_isp1761_resources),
-	.resource       = bfin_isp1761_resources,
+	.dev = {
+		.platform_data = &isp1760_priv,
+	},
+	.num_resources  = ARRAY_SIZE(bfin_isp1760_resources),
+	.resource       = bfin_isp1760_resources,
 };
-
-static struct platform_device *bfin_isp1761_devices[] = {
-	&bfin_isp1761_device,
-};
-
-int __init bfin_isp1761_init(void)
-{
-	unsigned int num_devices = ARRAY_SIZE(bfin_isp1761_devices);
-
-	printk(KERN_INFO "%s(): registering device resources\n", __func__);
-	set_irq_type(bfin_isp1761_resources[1].start, IRQF_TRIGGER_FALLING);
-
-	return platform_add_devices(bfin_isp1761_devices, num_devices);
-}
-
-void __exit bfin_isp1761_exit(void)
-{
-	platform_device_unregister(&bfin_isp1761_device);
-}
-arch_initcall(bfin_isp1761_init);
 #endif
 
 #if defined(CONFIG_FB_BF54X_LQ043) || defined(CONFIG_FB_BF54X_LQ043_MODULE)
@@ -262,44 +256,107 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
 	{
 		.start = 0xFFC02000,
 		.end = 0xFFC020FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir1_device = {
+	.name = "bfin_sir",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR2
+static struct resource bfin_sir2_resources[] = {
 	{
 		.start = 0xFFC02100,
 		.end = 0xFFC021FF,
 		.flags = IORESOURCE_MEM,
 	},
+	{
+		.start = IRQ_UART2_RX,
+		.end = IRQ_UART2_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART2_RX,
+		.end = CH_UART2_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir2_device = {
+	.name = "bfin_sir",
+	.id = 2,
+	.num_resources = ARRAY_SIZE(bfin_sir2_resources),
+	.resource = bfin_sir2_resources,
+};
 #endif
 #ifdef CONFIG_BFIN_SIR3
+static struct resource bfin_sir3_resources[] = {
 	{
 		.start = 0xFFC03100,
 		.end = 0xFFC031FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART3_RX,
+		.end = IRQ_UART3_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART3_RX,
+		.end = CH_UART3_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
-
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir3_device = {
 	.name = "bfin_sir",
-	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.id = 3,
+	.num_resources = ARRAY_SIZE(bfin_sir3_resources),
+	.resource = bfin_sir3_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
 static struct resource smsc911x_resources[] = {
@@ -347,8 +404,8 @@
 	.dyn_fifo	= 0,
 	.soft_con	= 1,
 	.dma		= 1,
-	.num_eps	= 7,
-	.dma_channels	= 7,
+	.num_eps	= 8,
+	.dma_channels	= 8,
 	.gpio_vrsel	= GPIO_PE7,
 };
 
@@ -448,9 +505,19 @@
 #endif
 
 #if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE)
+
+static struct bfin_sd_host bfin_sdh_data = {
+	.dma_chan = CH_SDH,
+	.irq_int0 = IRQ_SDH_MASK0,
+	.pin_req = {P_SD_D0, P_SD_D1, P_SD_D2, P_SD_D3, P_SD_CLK, P_SD_CMD, 0},
+};
+
 static struct platform_device bf54x_sdh_device = {
 	.name = "bfin-sdh",
 	.id = 0,
+	.dev = {
+		.platform_data = &bfin_sdh_data,
+	},
 };
 #endif
 
@@ -589,7 +656,7 @@
 {
 	.modalias		= "ad7877",
 	.platform_data		= &bfin_ad7877_ts_info,
-	.irq			= IRQ_PJ11,	/* newer boards (Rev 1.4+) use IRQ_PB4 */
+	.irq			= IRQ_PB4,	/* old boards (<=Rev 1.3) use IRQ_PJ11 */
 	.max_speed_hz		= 12500000,     /* max spi clock (SCK) speed in HZ */
 	.bus_num		= 0,
 	.chip_select  		= 2,
@@ -812,7 +879,18 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
+#ifdef CONFIG_BFIN_SIR2
+	&bfin_sir2_device,
+#endif
+#ifdef CONFIG_BFIN_SIR3
+	&bfin_sir3_device,
+#endif
 #endif
 
 #if defined(CONFIG_FB_BF54X_LQ043) || defined(CONFIG_FB_BF54X_LQ043_MODULE)
@@ -827,6 +905,10 @@
 	&musb_device,
 #endif
 
+#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
+	&bfin_isp1760_device,
+#endif
+
 #if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE)
 	&bfin_atapi_device,
 #endif
diff --git a/arch/blackfin/mach-bf548/dma.c b/arch/blackfin/mach-bf548/dma.c
index 74730eb..5359806 100644
--- a/arch/blackfin/mach-bf548/dma.c
+++ b/arch/blackfin/mach-bf548/dma.c
@@ -32,7 +32,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_BLACKFIN_DMA_CHANNEL] = {
+struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf548/head.S b/arch/blackfin/mach-bf548/head.S
deleted file mode 100644
index 93b361d..0000000
--- a/arch/blackfin/mach-bf548/head.S
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * File:         arch/blackfin/mach-bf548/head.S
- * Based on:     arch/blackfin/mach-bf537/head.S
- * Author:       Jeff Dionne <jeff@uclinux.org> COPYRIGHT 1998 D. Jeff Dionne
- *
- * Created:      1998
- * Description:  Startup code for Blackfin BF548
- *
- * Modified:
- *               Copyright 2004-2007 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/blackfin.h>
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
-#include <asm/clocks.h>
-#include <mach/mem_init.h>
-#endif
-
-.section .l1.text
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
-ENTRY(_start_dma_code)
-
-	/* Enable PHY CLK buffer output */
-	p0.h = hi(VR_CTL);
-	p0.l = lo(VR_CTL);
-	r0.l = w[p0];
-	bitset(r0, 14);
-	w[p0] = r0.l;
-	ssync;
-
-	p0.h = hi(SIC_IWR0);
-	p0.l = lo(SIC_IWR0);
-	r0.l = 0x1;
-	r0.h = 0x0;
-	[p0] = r0;
-	SSYNC;
-
-	/*
-	 *  Set PLL_CTL
-	 *   - [14:09] = MSEL[5:0] : CLKIN / VCO multiplication factors
-	 *   - [8]     = BYPASS    : BYPASS the PLL, run CLKIN into CCLK/SCLK
-	 *   - [7]     = output delay (add 200ps of delay to mem signals)
-	 *   - [6]     = input delay (add 200ps of input delay to mem signals)
-	 *   - [5]     = PDWN      : 1=All Clocks off
-	 *   - [3]     = STOPCK    : 1=Core Clock off
-	 *   - [1]     = PLL_OFF   : 1=Disable Power to PLL
-	 *   - [0]     = DF        : 1=Pass CLKIN/2 to PLL / 0=Pass CLKIN to PLL
-	 *   all other bits set to zero
-	 */
-
-	p0.h = hi(PLL_LOCKCNT);
-	p0.l = lo(PLL_LOCKCNT);
-	r0 = 0x300(Z);
-	w[p0] = r0.l;
-	ssync;
-
-	/* enable self refresh via SRREQ */
-	P2.H = hi(EBIU_RSTCTL);
-	P2.L = lo(EBIU_RSTCTL);
-	R0 = [P2];
-	BITSET (R0, 3);
-	[P2] = R0;
-	SSYNC;
-
-	/* wait for SRACK bit to be set */
-.LSRR_MODE:
-	R0 = [P2];
-	CC = BITTST(R0, 4);
-	if !CC JUMP .LSRR_MODE;
-
-	r0 = CONFIG_VCO_MULT & 63;       /* Load the VCO multiplier         */
-	r0 = r0 << 9;                    /* Shift it over,                  */
-	r1 = CLKIN_HALF;                 /* Do we need to divide CLKIN by 2?*/
-	r0 = r1 | r0;
-	r1 = PLL_BYPASS;                 /* Bypass the PLL?                 */
-	r1 = r1 << 8;                    /* Shift it over                   */
-	r0 = r1 | r0;                    /* add them all together           */
-#ifdef ANOMALY_05000265
-	BITSET(r0, 15);                  /* Add 250 mV of hysteresis to SPORT input pins */
-#endif
-
-	p0.h = hi(PLL_CTL);
-	p0.l = lo(PLL_CTL);              /* Load the address                */
-	cli r2;                          /* Disable interrupts              */
-	ssync;
-	w[p0] = r0.l;                    /* Set the value                   */
-	idle;                            /* Wait for the PLL to stablize    */
-	sti r2;                          /* Enable interrupts               */
-
-.Lcheck_again:
-	p0.h = hi(PLL_STAT);
-	p0.l = lo(PLL_STAT);
-	R0 = W[P0](Z);
-	CC = BITTST(R0,5);
-	if ! CC jump .Lcheck_again;
-
-	/* Configure SCLK & CCLK Dividers */
-	r0 = (CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
-	p0.h = hi(PLL_DIV);
-	p0.l = lo(PLL_DIV);
-	w[p0] = r0.l;
-	ssync;
-
-	/* disable self refresh by clearing SRREQ */
-	P2.H = hi(EBIU_RSTCTL);
-	P2.L = lo(EBIU_RSTCTL);
-	R0 = [P2];
-	CC = BITTST(R0, 0);
-	if CC jump .Lskipddrrst;
-	BITSET (R0, 0);
-.Lskipddrrst:
-	BITCLR (R0, 3);
-	[P2] = R0;
-	SSYNC;
-
-	p0.l = lo(EBIU_DDRCTL0);
-	p0.h = hi(EBIU_DDRCTL0);
-	r0.l = lo(mem_DDRCTL0);
-	r0.h = hi(mem_DDRCTL0);
-	[p0] = r0;
-	ssync;
-
-	p0.l = lo(EBIU_DDRCTL1);
-	p0.h = hi(EBIU_DDRCTL1);
-	r0.l = lo(mem_DDRCTL1);
-	r0.h = hi(mem_DDRCTL1);
-	[p0] = r0;
-	ssync;
-
-	p0.l = lo(EBIU_DDRCTL2);
-	p0.h = hi(EBIU_DDRCTL2);
-	r0.l = lo(mem_DDRCTL2);
-	r0.h = hi(mem_DDRCTL2);
-	[p0] = r0;
-	ssync;
-
-	RTS;
-ENDPROC(_start_dma_code)
-#endif /* CONFIG_BFIN_KERNEL_CLOCK */
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index 816b092..3b54309 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -157,6 +157,8 @@
 #define ANOMALY_05000429 (__SILICON_REVISION__ < 2)
 /* Software System Reset Corrupts PLL_LOCKCNT Register */
 #define ANOMALY_05000430 (__SILICON_REVISION__ >= 2)
+/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
+#define ANOMALY_05000443 (1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000125 (0)
@@ -173,5 +175,8 @@
 #define ANOMALY_05000311 (0)
 #define ANOMALY_05000323 (0)
 #define ANOMALY_05000363 (0)
+#define ANOMALY_05000412 (0)
+#define ANOMALY_05000432 (0)
+#define ANOMALY_05000435 (0)
 
 #endif
diff --git a/arch/blackfin/mach-bf548/include/mach/bf548.h b/arch/blackfin/mach-bf548/include/mach/bf548.h
index 49f9b40..f0e5699 100644
--- a/arch/blackfin/mach-bf548/include/mach/bf548.h
+++ b/arch/blackfin/mach-bf548/include/mach/bf548.h
@@ -122,7 +122,7 @@
 #endif
 
 #ifndef CPU
-#error Unknown CPU type - This kernel doesn't seem to be configured properly
+#error "Unknown CPU type - This kernel doesn't seem to be configured properly"
 #endif
 
 #endif	/* __MACH_BF48_H__  */
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_sir.h b/arch/blackfin/mach-bf548/include/mach/bfin_sir.h
deleted file mode 100644
index c41f9cf..0000000
--- a/arch/blackfin/mach-bf548/include/mach/bfin_sir.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Blackfin Infra-red Driver
- *
- * Copyright 2006-2008 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- *
- */
-
-#include <linux/serial.h>
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#define SIR_UART_GET_CHAR(port)   bfin_read16((port)->membase + OFFSET_RBR)
-#define SIR_UART_GET_DLL(port)    bfin_read16((port)->membase + OFFSET_DLL)
-#define SIR_UART_GET_IER(port)    bfin_read16((port)->membase + OFFSET_IER_SET)
-#define SIR_UART_GET_DLH(port)    bfin_read16((port)->membase + OFFSET_DLH)
-#define SIR_UART_GET_LCR(port)    bfin_read16((port)->membase + OFFSET_LCR)
-#define SIR_UART_GET_LSR(port)    bfin_read16((port)->membase + OFFSET_LSR)
-#define SIR_UART_GET_GCTL(port)   bfin_read16((port)->membase + OFFSET_GCTL)
-
-#define SIR_UART_PUT_CHAR(port, v) bfin_write16(((port)->membase + OFFSET_THR), v)
-#define SIR_UART_PUT_DLL(port, v)  bfin_write16(((port)->membase + OFFSET_DLL), v)
-#define SIR_UART_SET_IER(port, v)  bfin_write16(((port)->membase + OFFSET_IER_SET), v)
-#define SIR_UART_CLEAR_IER(port, v)  bfin_write16(((port)->membase + OFFSET_IER_CLEAR), v)
-#define SIR_UART_PUT_DLH(port, v)  bfin_write16(((port)->membase + OFFSET_DLH), v)
-#define SIR_UART_PUT_LSR(port, v)  bfin_write16(((port)->membase + OFFSET_LSR), v)
-#define SIR_UART_PUT_LCR(port, v)  bfin_write16(((port)->membase + OFFSET_LCR), v)
-#define SIR_UART_CLEAR_LSR(port)  bfin_write16(((port)->membase + OFFSET_LSR), -1)
-#define SIR_UART_PUT_GCTL(port, v) bfin_write16(((port)->membase + OFFSET_GCTL), v)
-
-#ifdef CONFIG_SIR_BFIN_DMA
-struct dma_rx_buf {
-	char *buf;
-	int head;
-	int tail;
-	};
-#endif /* CONFIG_SIR_BFIN_DMA */
-
-struct bfin_sir_port {
-	unsigned char __iomem   *membase;
-	unsigned int            irq;
-	unsigned int            lsr;
-	unsigned long           clk;
-	struct net_device       *dev;
-#ifdef CONFIG_SIR_BFIN_DMA
-	int                     tx_done;
-	struct dma_rx_buf       rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int                     rx_dma_nrows;
-#endif /* CONFIG_SIR_BFIN_DMA */
-	unsigned int            tx_dma_channel;
-	unsigned int            rx_dma_channel;
-};
-
-struct bfin_sir_port sir_ports[BFIN_UART_NR_PORTS];
-
-struct bfin_sir_port_res {
-	unsigned long   base_addr;
-	int             irq;
-	unsigned int    rx_dma_channel;
-	unsigned int    tx_dma_channel;
-};
-
-struct bfin_sir_port_res bfin_sir_port_resource[] = {
-#ifdef CONFIG_BFIN_SIR0
-	{
-	0xFFC00400,
-	IRQ_UART0_RX,
-	CH_UART0_RX,
-	CH_UART0_TX,
-	},
-#endif
-#ifdef CONFIG_BFIN_SIR1
-	{
-	0xFFC02000,
-	IRQ_UART1_RX,
-	CH_UART1_RX,
-	CH_UART1_TX,
-	},
-#endif
-#ifdef CONFIG_BFIN_SIR2
-	{
-	0xFFC02100,
-	IRQ_UART2_RX,
-	CH_UART2_RX,
-	CH_UART2_TX,
-	},
-#endif
-#ifdef CONFIG_BFIN_SIR3
-	{
-	0xFFC03100,
-	IRQ_UART3_RX,
-	CH_UART3_RX,
-	CH_UART3_TX,
-	},
-#endif
-};
-
-int nr_sirs = ARRAY_SIZE(bfin_sir_port_resource);
-
-struct bfin_sir_self {
-	struct bfin_sir_port    *sir_port;
-	spinlock_t              lock;
-	unsigned int            open;
-	int                     speed;
-	int                     newspeed;
-
-	struct sk_buff          *txskb;
-	struct sk_buff          *rxskb;
-	struct net_device_stats stats;
-	struct device           *dev;
-	struct irlap_cb         *irlap;
-	struct qos_info         qos;
-
-	iobuff_t                tx_buff;
-	iobuff_t                rx_buff;
-
-	struct work_struct      work;
-	int                     mtt;
-};
-
-#define DRIVER_NAME "bfin_sir"
-
-static int bfin_sir_hw_init(void)
-{
-	int ret = -ENODEV;
-#ifdef CONFIG_BFIN_SIR0
-	ret = peripheral_request(P_UART0_TX, DRIVER_NAME);
-	if (ret)
-		return ret;
-	ret = peripheral_request(P_UART0_RX, DRIVER_NAME);
-	if (ret)
-		return ret;
-#endif
-
-#ifdef CONFIG_BFIN_SIR1
-	ret = peripheral_request(P_UART1_TX, DRIVER_NAME);
-	if (ret)
-		return ret;
-	ret = peripheral_request(P_UART1_RX, DRIVER_NAME);
-	if (ret)
-		return ret;
-#endif
-
-#ifdef CONFIG_BFIN_SIR2
-	ret = peripheral_request(P_UART2_TX, DRIVER_NAME);
-	if (ret)
-		return ret;
-	ret = peripheral_request(P_UART2_RX, DRIVER_NAME);
-	if (ret)
-		return ret;
-#endif
-
-#ifdef CONFIG_BFIN_SIR3
-	ret = peripheral_request(P_UART3_TX, DRIVER_NAME);
-	if (ret)
-		return ret;
-	ret = peripheral_request(P_UART3_RX, DRIVER_NAME);
-	if (ret)
-		return ret;
-#endif
-	return ret;
-}
diff --git a/arch/blackfin/mach-bf548/include/mach/blackfin.h b/arch/blackfin/mach-bf548/include/mach/blackfin.h
index d6ee74a..0c0e3e2 100644
--- a/arch/blackfin/mach-bf548/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf548/include/mach/blackfin.h
@@ -111,7 +111,7 @@
 
 /* UART 0*/
 
-/* DMA Channnel */
+/* DMA Channel */
 #define bfin_read_CH_UART_RX()		bfin_read_CH_UART1_RX()
 #define bfin_write_CH_UART_RX(val)	bfin_write_CH_UART1_RX(val)
 #define bfin_read_CH_UART_TX()		bfin_read_CH_UART1_TX()
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
index 57ac8cb..6e636c4 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
@@ -34,7 +34,6 @@
 #include <asm/blackfin.h>
 
 #include "defBF54x_base.h"
-#include <asm/system.h>
 
 /* ************************************************************** */
 /* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF54x    */
@@ -43,63 +42,9 @@
 /* PLL Registers */
 
 #define bfin_read_PLL_CTL()		bfin_read16(PLL_CTL)
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1, iwr2;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	local_irq_save(flags);
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	iwr2 = bfin_read32(SIC_IWR2);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-	bfin_write32(SIC_IWR2, 0);
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	bfin_write32(SIC_IWR2, iwr2);
-	local_irq_restore(flags);
-}
 #define bfin_read_PLL_DIV()		bfin_read16(PLL_DIV)
 #define bfin_write_PLL_DIV(val)		bfin_write16(PLL_DIV, val)
 #define bfin_read_VR_CTL()		bfin_read16(VR_CTL)
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1, iwr2;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	local_irq_save(flags);
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	iwr2 = bfin_read32(SIC_IWR2);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-	bfin_write32(SIC_IWR2, 0);
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	bfin_write32(SIC_IWR2, iwr2);
-	local_irq_restore(flags);
-}
 #define bfin_read_PLL_STAT()		bfin_read16(PLL_STAT)
 #define bfin_write_PLL_STAT(val)	bfin_write16(PLL_STAT, val)
 #define bfin_read_PLL_LOCKCNT()		bfin_read16(PLL_LOCKCNT)
@@ -2746,5 +2691,64 @@
 #define bfin_read_PINT3_IRQ		bfin_read_PINT3_REQUEST
 #define bfin_write_PINT3_IRQ		bfin_write_PINT3_REQUEST
 
+/* These need to be last due to the cdef/linux inter-dependencies */
+#include <asm/irq.h>
+
+/* Writing to PLL_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_PLL_CTL(unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1, iwr2;
+
+	if (val == bfin_read_PLL_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr0 = bfin_read32(SIC_IWR0);
+	iwr1 = bfin_read32(SIC_IWR1);
+	iwr2 = bfin_read32(SIC_IWR2);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
+	bfin_write32(SIC_IWR1, 0);
+	bfin_write32(SIC_IWR2, 0);
+
+	bfin_write16(PLL_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR0, iwr0);
+	bfin_write32(SIC_IWR1, iwr1);
+	bfin_write32(SIC_IWR2, iwr2);
+	local_irq_restore_hw(flags);
+}
+
+/* Writing to VR_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_VR_CTL(unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1, iwr2;
+
+	if (val == bfin_read_VR_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr0 = bfin_read32(SIC_IWR0);
+	iwr1 = bfin_read32(SIC_IWR1);
+	iwr2 = bfin_read32(SIC_IWR2);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
+	bfin_write32(SIC_IWR1, 0);
+	bfin_write32(SIC_IWR2, 0);
+
+	bfin_write16(VR_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SIC_IWR0, iwr0);
+	bfin_write32(SIC_IWR1, iwr1);
+	bfin_write32(SIC_IWR2, iwr2);
+	local_irq_restore_hw(flags);
+}
+
 #endif /* _CDEF_BF54X_H */
 
diff --git a/arch/blackfin/mach-bf548/include/mach/dma.h b/arch/blackfin/mach-bf548/include/mach/dma.h
index 36a2ef7..a30d242 100644
--- a/arch/blackfin/mach-bf548/include/mach/dma.h
+++ b/arch/blackfin/mach-bf548/include/mach/dma.h
@@ -1,32 +1,8 @@
-/*
- * file:         include/asm-blackfin/mach-bf548/dma.h
- * based on:
- * author:
+/* mach/dma.h - arch-specific DMA defines
  *
- * created:
- * description:
- *	system mmr register map
- * rev:
+ * Copyright 2004-2008 Analog Devices Inc.
  *
- * modified:
- *
- *
- * bugs:         enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose.  see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_DMA_H_
@@ -71,6 +47,6 @@
 #define CH_MEM_STREAM3_DEST	30
 #define CH_MEM_STREAM3_SRC	31
 
-#define MAX_BLACKFIN_DMA_CHANNEL 32
+#define MAX_DMA_CHANNELS 32
 
 #endif
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index ad380d1..60299a7 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -158,7 +158,7 @@
 #define IRQ_PINT2		BFIN_IRQ(94)	/* PINT2 Interrupt */
 #define IRQ_PINT3		BFIN_IRQ(95)	/* PINT3 Interrupt */
 
-#define SYS_IRQS        	IRQ_PINT3
+#define SYS_IRQS		IRQ_PINT3
 
 #define BFIN_PA_IRQ(x)		((x) + SYS_IRQS + 1)
 #define IRQ_PA0			BFIN_PA_IRQ(0)
diff --git a/arch/blackfin/mach-bf548/include/mach/mem_init.h b/arch/blackfin/mach-bf548/include/mach/mem_init.h
deleted file mode 100644
index ab0b863..0000000
--- a/arch/blackfin/mach-bf548/include/mach/mem_init.h
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * File:         include/asm-blackfin/mach-bf548/mem_init.h
- * Based on:
- * Author:
- *
- * Created:
- * Description:
- *
- * Rev:
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-#define MIN_DDR_SCLK(x)	(x*(CONFIG_SCLK_HZ/1000/1000)/1000 + 1)
-#define MAX_DDR_SCLK(x)	(x*(CONFIG_SCLK_HZ/1000/1000)/1000)
-#define DDR_CLK_HZ(x)	(1000*1000*1000/x)
-
-#if (CONFIG_MEM_MT46V32M16_6T)
-#define DDR_SIZE	DEVSZ_512
-#define DDR_WIDTH	DEVWD_16
-#define DDR_MAX_tCK	13
-
-#define DDR_tRC		DDR_TRC(MIN_DDR_SCLK(60))
-#define DDR_tRAS	DDR_TRAS(MIN_DDR_SCLK(42))
-#define DDR_tRP		DDR_TRP(MIN_DDR_SCLK(15))
-#define DDR_tRFC	DDR_TRFC(MIN_DDR_SCLK(72))
-#define DDR_tREFI	DDR_TREFI(MAX_DDR_SCLK(7800))
-
-#define DDR_tRCD	DDR_TRCD(MIN_DDR_SCLK(15))
-#define DDR_tWTR	DDR_TWTR(1)
-#define DDR_tMRD	DDR_TMRD(MIN_DDR_SCLK(12))
-#define DDR_tWR		DDR_TWR(MIN_DDR_SCLK(15))
-#endif
-
-#if (CONFIG_MEM_MT46V32M16_5B)
-#define DDR_SIZE	DEVSZ_512
-#define DDR_WIDTH	DEVWD_16
-#define DDR_MAX_tCK	13
-
-#define DDR_tRC		DDR_TRC(MIN_DDR_SCLK(55))
-#define DDR_tRAS	DDR_TRAS(MIN_DDR_SCLK(40))
-#define DDR_tRP		DDR_TRP(MIN_DDR_SCLK(15))
-#define DDR_tRFC	DDR_TRFC(MIN_DDR_SCLK(70))
-#define DDR_tREFI	DDR_TREFI(MAX_DDR_SCLK(7800))
-
-#define DDR_tRCD	DDR_TRCD(MIN_DDR_SCLK(15))
-#define DDR_tWTR	DDR_TWTR(2)
-#define DDR_tMRD	DDR_TMRD(MIN_DDR_SCLK(10))
-#define DDR_tWR		DDR_TWR(MIN_DDR_SCLK(15))
-#endif
-
-#if (CONFIG_MEM_GENERIC_BOARD)
-#define DDR_SIZE	DEVSZ_512
-#define DDR_WIDTH	DEVWD_16
-#define DDR_MAX_tCK	13
-
-#define DDR_tRCD	DDR_TRCD(3)
-#define DDR_tWTR	DDR_TWTR(2)
-#define DDR_tWR		DDR_TWR(2)
-#define DDR_tMRD	DDR_TMRD(2)
-#define DDR_tRP		DDR_TRP(3)
-#define DDR_tRAS	DDR_TRAS(7)
-#define DDR_tRC		DDR_TRC(10)
-#define DDR_tRFC	DDR_TRFC(12)
-#define DDR_tREFI	DDR_TREFI(1288)
-#endif
-
-#if (CONFIG_SCLK_HZ < DDR_CLK_HZ(DDR_MAX_tCK))
-# error "CONFIG_SCLK_HZ is too small (<DDR_CLK_HZ(DDR_MAX_tCK) Hz)."
-#elif(CONFIG_SCLK_HZ <= 133333333)
-# define	DDR_CL		CL_2
-#else
-# error "CONFIG_SCLK_HZ is too large (>133333333 Hz)."
-#endif
-
-
-#define mem_DDRCTL0	(DDR_tRP | DDR_tRAS | DDR_tRC | DDR_tRFC | DDR_tREFI)
-#define mem_DDRCTL1	(DDR_DATWIDTH | EXTBANK_1 | DDR_SIZE | DDR_WIDTH | DDR_tWTR \
-			| DDR_tMRD | DDR_tWR | DDR_tRCD)
-#define mem_DDRCTL2	DDR_CL
-
-
-#if defined CONFIG_CLKIN_HALF
-#define CLKIN_HALF       1
-#else
-#define CLKIN_HALF       0
-#endif
-
-#if defined CONFIG_PLL_BYPASS
-#define PLL_BYPASS      1
-#else
-#define PLL_BYPASS       0
-#endif
-
-/***************************************Currently Not Being Used *********************************/
-#define flash_EBIU_AMBCTL_WAT  ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_RAT  ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_HT   ((CONFIG_FLASH_SPEED_BHT  * 4) / (4000000000 / CONFIG_SCLK_HZ))
-#define flash_EBIU_AMBCTL_ST   ((CONFIG_FLASH_SPEED_BST  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_TT   ((CONFIG_FLASH_SPEED_BTT  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-
-#if (flash_EBIU_AMBCTL_TT > 3)
-#define flash_EBIU_AMBCTL0_TT   B0TT_4
-#endif
-#if (flash_EBIU_AMBCTL_TT == 3)
-#define flash_EBIU_AMBCTL0_TT   B0TT_3
-#endif
-#if (flash_EBIU_AMBCTL_TT == 2)
-#define flash_EBIU_AMBCTL0_TT   B0TT_2
-#endif
-#if (flash_EBIU_AMBCTL_TT < 2)
-#define flash_EBIU_AMBCTL0_TT   B0TT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_ST > 3)
-#define flash_EBIU_AMBCTL0_ST   B0ST_4
-#endif
-#if (flash_EBIU_AMBCTL_ST == 3)
-#define flash_EBIU_AMBCTL0_ST   B0ST_3
-#endif
-#if (flash_EBIU_AMBCTL_ST == 2)
-#define flash_EBIU_AMBCTL0_ST   B0ST_2
-#endif
-#if (flash_EBIU_AMBCTL_ST < 2)
-#define flash_EBIU_AMBCTL0_ST   B0ST_1
-#endif
-
-#if (flash_EBIU_AMBCTL_HT > 2)
-#define flash_EBIU_AMBCTL0_HT   B0HT_3
-#endif
-#if (flash_EBIU_AMBCTL_HT == 2)
-#define flash_EBIU_AMBCTL0_HT   B0HT_2
-#endif
-#if (flash_EBIU_AMBCTL_HT == 1)
-#define flash_EBIU_AMBCTL0_HT   B0HT_1
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0)
-#define flash_EBIU_AMBCTL0_HT   B0HT_0
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0)
-#define flash_EBIU_AMBCTL0_HT   B0HT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_WAT > 14)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_15
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 14)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_14
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 13)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_13
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 12)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_12
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 11)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_11
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 10)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_10
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 9)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_9
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 8)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_8
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 7)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_7
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 6)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_6
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 5)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_5
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 4)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_4
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 3)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_3
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 2)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_2
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 1)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_RAT > 14)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_15
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 14)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_14
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 13)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_13
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 12)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_12
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 11)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_11
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 10)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_10
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 9)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_9
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 8)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_8
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 7)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_7
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 6)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_6
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 5)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_5
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 4)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_4
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 3)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_3
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 2)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_2
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 1)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_1
-#endif
-
-#define flash_EBIU_AMBCTL0  \
-	(flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \
-	 flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN)
diff --git a/arch/blackfin/mach-bf548/include/mach/mem_map.h b/arch/blackfin/mach-bf548/include/mach/mem_map.h
index a222842..70b9c11 100644
--- a/arch/blackfin/mach-bf548/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf548/include/mach/mem_map.h
@@ -108,4 +108,10 @@
 #define L1_SCRATCH_START	0xFFB00000
 #define L1_SCRATCH_LENGTH	0x1000
 
+#define GET_PDA_SAFE(preg)		\
+	preg.l = _cpu_pda;		\
+	preg.h = _cpu_pda;
+
+#define GET_PDA(preg, dreg)	GET_PDA_SAFE(preg)
+
 #endif/* _MEM_MAP_548_H_ */
diff --git a/arch/blackfin/mach-bf561/Kconfig b/arch/blackfin/mach-bf561/Kconfig
index 3f48954..638ec38 100644
--- a/arch/blackfin/mach-bf561/Kconfig
+++ b/arch/blackfin/mach-bf561/Kconfig
@@ -4,9 +4,9 @@
 
 menu "BF561 Specific Configuration"
 
-comment "Core B Support"
+if (!SMP)
 
-menu "Core B Support"
+comment "Core B Support"
 
 config BF561_COREB
 	bool "Enable Core B support"
@@ -25,7 +25,7 @@
 	  0 is set, and will reset PC to 0xff600000 when
 	  COREB_SRAM_INIT is cleared.
 
-endmenu
+endif
 
 comment "Interrupt Priority Assignment"
 
@@ -138,7 +138,7 @@
 	default 9
 config IRQ_TIMER0
 	int "TIMER 0  Interrupt"
-	default 10
+	default 8
 config IRQ_TIMER1
 	int "TIMER 1  Interrupt"
 	default 10
diff --git a/arch/blackfin/mach-bf561/Makefile b/arch/blackfin/mach-bf561/Makefile
index f39235a..59e18af 100644
--- a/arch/blackfin/mach-bf561/Makefile
+++ b/arch/blackfin/mach-bf561/Makefile
@@ -2,8 +2,7 @@
 # arch/blackfin/mach-bf561/Makefile
 #
 
-extra-y := head.o
-
 obj-y := ints-priority.o dma.o
 
 obj-$(CONFIG_BF561_COREB) += coreb.o
+obj-$(CONFIG_SMP)  += smp.o secondary.o atomic.o
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
new file mode 100644
index 0000000..9439bc6
--- /dev/null
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -0,0 +1,919 @@
+/*
+ * File:         arch/blackfin/mach-bf561/atomic.S
+ * Author:       Philippe Gerum <rpm@xenomai.org>
+ *
+ *               Copyright 2007 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/linkage.h>
+#include <asm/blackfin.h>
+#include <asm/cache.h>
+#include <asm/asm-offsets.h>
+#include <asm/rwlock.h>
+#include <asm/cplb.h>
+
+.text
+
+.macro coreslot_loadaddr reg:req
+	\reg\().l = _corelock;
+	\reg\().h = _corelock;
+.endm
+
+/*
+ * r0 = address of atomic data to flush and invalidate (32bit).
+ *
+ * Clear interrupts and return the old mask.
+ * We assume that no atomic data can span cachelines.
+ *
+ * Clobbers: r2:0, p0
+ */
+ENTRY(_get_core_lock)
+	r1 = -L1_CACHE_BYTES;
+	r1 = r0 & r1;
+	cli r0;
+	coreslot_loadaddr p0;
+.Lretry_corelock:
+	testset (p0);
+	if cc jump .Ldone_corelock;
+	SSYNC(r2);
+	jump .Lretry_corelock
+.Ldone_corelock:
+	p0 = r1;
+	CSYNC(r2);
+	flushinv[p0];
+	SSYNC(r2);
+	rts;
+ENDPROC(_get_core_lock)
+
+/*
+ * r0 = address of atomic data in uncacheable memory region (32bit).
+ *
+ * Clear interrupts and return the old mask.
+ *
+ * Clobbers: r0, p0
+ */
+ENTRY(_get_core_lock_noflush)
+	cli r0;
+	coreslot_loadaddr p0;
+.Lretry_corelock_noflush:
+	testset (p0);
+	if cc jump .Ldone_corelock_noflush;
+	SSYNC(r2);
+	jump .Lretry_corelock_noflush
+.Ldone_corelock_noflush:
+	rts;
+ENDPROC(_get_core_lock_noflush)
+
+/*
+ * r0 = interrupt mask to restore.
+ * r1 = address of atomic data to flush and invalidate (32bit).
+ *
+ * Interrupts are masked on entry (see _get_core_lock).
+ * Clobbers: r2:0, p0
+ */
+ENTRY(_put_core_lock)
+	/* Write-through cache assumed, so no flush needed here. */
+	coreslot_loadaddr p0;
+	r1 = 0;
+	[p0] = r1;
+	SSYNC(r2);
+	sti r0;
+	rts;
+ENDPROC(_put_core_lock)
+
+#ifdef __ARCH_SYNC_CORE_DCACHE
+
+ENTRY(___raw_smp_mark_barrier_asm)
+	[--sp] = rets;
+	[--sp] = ( r7:5 );
+	[--sp] = r0;
+	[--sp] = p1;
+	[--sp] = p0;
+	call _get_core_lock_noflush;
+
+	/*
+	 * Calculate current core mask
+	 */
+	GET_CPUID(p1, r7);
+	r6 = 1;
+	r6 <<= r7;
+
+	/*
+	 * Set bit of other cores in barrier mask. Don't change current core bit.
+	 */
+	p1.l = _barrier_mask;
+	p1.h = _barrier_mask;
+	r7 = [p1];
+	r5 = r7 & r6;
+	r7 = ~r6;
+	cc = r5 == 0;
+	if cc jump 1f;
+	r7 = r7 | r6;
+1:
+	[p1] = r7;
+	SSYNC(r2);
+
+	call _put_core_lock;
+	p0 = [sp++];
+	p1 = [sp++];
+	r0 = [sp++];
+	( r7:5 ) = [sp++];
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_smp_mark_barrier_asm)
+
+ENTRY(___raw_smp_check_barrier_asm)
+	[--sp] = rets;
+	[--sp] = ( r7:5 );
+	[--sp] = r0;
+	[--sp] = p1;
+	[--sp] = p0;
+	call _get_core_lock_noflush;
+
+	/*
+	 * Calculate current core mask
+	 */
+	GET_CPUID(p1, r7);
+	r6 = 1;
+	r6 <<= r7;
+
+	/*
+	 * Clear current core bit in barrier mask if it is set.
+	 */
+	p1.l = _barrier_mask;
+	p1.h = _barrier_mask;
+	r7 = [p1];
+	r5 = r7 & r6;
+	cc = r5 == 0;
+	if cc jump 1f;
+	r6 = ~r6;
+	r7 = r7 & r6;
+	[p1] = r7;
+	SSYNC(r2);
+
+	call _put_core_lock;
+
+	/*
+	 * Invalidate the entire D-cache of current core.
+	 */
+	sp += -12;
+	call _resync_core_dcache
+	sp += 12;
+	jump 2f;
+1:
+	call _put_core_lock;
+2:
+	p0 = [sp++];
+	p1 = [sp++];
+	r0 = [sp++];
+	( r7:5 ) = [sp++];
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_smp_check_barrier_asm)
+
+/*
+ * r0 = irqflags
+ * r1 = address of atomic data
+ *
+ * Clobbers: r2:0, p1:0
+ */
+_start_lock_coherent:
+
+	[--sp] = rets;
+	[--sp] = ( r7:6 );
+	r7 = r0;
+	p1 = r1;
+
+	/*
+	 * Determine whether the atomic data was previously
+	 * owned by another CPU (=r6).
+	 */
+	GET_CPUID(p0, r2);
+	r1 = 1;
+	r1 <<= r2;
+	r2 = ~r1;
+
+	r1 = [p1];
+	r1 >>= 28;   /* CPU fingerprints are stored in the high nibble. */
+	r6 = r1 & r2;
+	r1 = [p1];
+	r1 <<= 4;
+	r1 >>= 4;
+	[p1] = r1;
+
+	/*
+	 * Release the core lock now, but keep IRQs disabled while we are
+	 * performing the remaining housekeeping chores for the current CPU.
+	 */
+	coreslot_loadaddr p0;
+	r1 = 0;
+	[p0] = r1;
+
+	/*
+	 * If another CPU has owned the same atomic section before us,
+	 * then our D-cached copy of the shared data protected by the
+	 * current spin/write_lock may be obsolete.
+	 */
+	cc = r6 == 0;
+	if cc jump .Lcache_synced
+
+	/*
+	 * Invalidate the entire D-cache of the current core.
+	 */
+	sp += -12;
+	call _resync_core_dcache
+	sp += 12;
+
+.Lcache_synced:
+	SSYNC(r2);
+	sti r7;
+	( r7:6 ) = [sp++];
+	rets = [sp++];
+	rts
+
+/*
+ * r0 = irqflags
+ * r1 = address of atomic data
+ *
+ * Clobbers: r2:0, p1:0
+ */
+_end_lock_coherent:
+
+	p1 = r1;
+	GET_CPUID(p0, r2);
+	r2 += 28;
+	r1 = 1;
+	r1 <<= r2;
+	r2 = [p1];
+	r2 = r1 | r2;
+	[p1] = r2;
+	r1 = p1;
+	jump _put_core_lock;
+
+#endif /* __ARCH_SYNC_CORE_DCACHE */
+
+/*
+ * r0 = &spinlock->lock
+ *
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_spin_is_locked_asm)
+	p1 = r0;
+	[--sp] = rets;
+	call _get_core_lock;
+	r3 = [p1];
+	cc = bittst( r3, 0 );
+	r3 = cc;
+	r1 = p1;
+	call _put_core_lock;
+	rets = [sp++];
+	r0 = r3;
+	rts;
+ENDPROC(___raw_spin_is_locked_asm)
+
+/*
+ * r0 = &spinlock->lock
+ *
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_spin_lock_asm)
+	p1 = r0;
+	[--sp] = rets;
+.Lretry_spinlock:
+	call _get_core_lock;
+	r1 = p1;
+	r2 = [p1];
+	cc = bittst( r2, 0 );
+	if cc jump .Lbusy_spinlock
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	r3 = p1;
+	bitset ( r2, 0 ); /* Raise the lock bit. */
+	[p1] = r2;
+	call _start_lock_coherent
+#else
+	r2 = 1;
+	[p1] = r2;
+	call _put_core_lock;
+#endif
+	rets = [sp++];
+	rts;
+
+.Lbusy_spinlock:
+	/* We don't touch the atomic area if busy, so that flush
+	   will behave like nop in _put_core_lock. */
+	call _put_core_lock;
+	SSYNC(r2);
+	r0 = p1;
+	jump .Lretry_spinlock
+ENDPROC(___raw_spin_lock_asm)
+
+/*
+ * r0 = &spinlock->lock
+ *
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_spin_trylock_asm)
+	p1 = r0;
+	[--sp] = rets;
+	call _get_core_lock;
+	r1 = p1;
+	r3 = [p1];
+	cc = bittst( r3, 0 );
+	if cc jump .Lfailed_trylock
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	bitset ( r3, 0 ); /* Raise the lock bit. */
+	[p1] = r3;
+	call _start_lock_coherent
+#else
+	r2 = 1;
+	[p1] = r2;
+	call _put_core_lock;
+#endif
+	r0 = 1;
+	rets = [sp++];
+	rts;
+.Lfailed_trylock:
+	call _put_core_lock;
+	r0 = 0;
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_spin_trylock_asm)
+
+/*
+ * r0 = &spinlock->lock
+ *
+ * Clobbers: r2:0, p1:0
+ */
+ENTRY(___raw_spin_unlock_asm)
+	p1 = r0;
+	[--sp] = rets;
+	call _get_core_lock;
+	r2 = [p1];
+	bitclr ( r2, 0 );
+	[p1] = r2;
+	r1 = p1;
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	call _end_lock_coherent
+#else
+	call _put_core_lock;
+#endif
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_spin_unlock_asm)
+
+/*
+ * r0 = &rwlock->lock
+ *
+ * Clobbers: r2:0, p1:0
+ */
+ENTRY(___raw_read_lock_asm)
+	p1 = r0;
+	[--sp] = rets;
+	call _get_core_lock;
+.Lrdlock_try:
+	r1 = [p1];
+	r1 += -1;
+	[p1] = r1;
+	cc = r1 < 0;
+	if cc jump .Lrdlock_failed
+	r1 = p1;
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	call _start_lock_coherent
+#else
+	call _put_core_lock;
+#endif
+	rets = [sp++];
+	rts;
+
+.Lrdlock_failed:
+	r1 += 1;
+	[p1] = r1;
+.Lrdlock_wait:
+	r1 = p1;
+	call _put_core_lock;
+	SSYNC(r2);
+	r0 = p1;
+	call _get_core_lock;
+	r1 = [p1];
+	cc = r1 < 2;
+	if cc jump .Lrdlock_wait;
+	jump .Lrdlock_try
+ENDPROC(___raw_read_lock_asm)
+
+/*
+ * r0 = &rwlock->lock
+ *
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_read_trylock_asm)
+	p1 = r0;
+	[--sp] = rets;
+	call _get_core_lock;
+	r1 = [p1];
+	cc = r1 <= 0;
+	if cc jump .Lfailed_tryrdlock;
+	r1 += -1;
+	[p1] = r1;
+	r1 = p1;
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	call _start_lock_coherent
+#else
+	call _put_core_lock;
+#endif
+	rets = [sp++];
+	r0 = 1;
+	rts;
+.Lfailed_tryrdlock:
+	r1 = p1;
+	call _put_core_lock;
+	rets = [sp++];
+	r0 = 0;
+	rts;
+ENDPROC(___raw_read_trylock_asm)
+
+/*
+ * r0 = &rwlock->lock
+ *
+ * Note: Processing controlled by a reader lock should not have
+ * any side-effect on cache issues with the other core, so we
+ * just release the core lock and exit (no _end_lock_coherent).
+ *
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_read_unlock_asm)
+	p1 = r0;
+	[--sp] = rets;
+	call _get_core_lock;
+	r1 = [p1];
+	r1 += 1;
+	[p1] = r1;
+	r1 = p1;
+	call _put_core_lock;
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_read_unlock_asm)
+
+/*
+ * r0 = &rwlock->lock
+ *
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_write_lock_asm)
+	p1 = r0;
+	r3.l = lo(RW_LOCK_BIAS);
+	r3.h = hi(RW_LOCK_BIAS);
+	[--sp] = rets;
+	call _get_core_lock;
+.Lwrlock_try:
+	r1 = [p1];
+	r1 = r1 - r3;
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	r2 = r1;
+	r2 <<= 4;
+	r2 >>= 4;
+	cc = r2 == 0;
+#else
+	cc = r1 == 0;
+#endif
+	if !cc jump .Lwrlock_wait
+	[p1] = r1;
+	r1 = p1;
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	call _start_lock_coherent
+#else
+	call _put_core_lock;
+#endif
+	rets = [sp++];
+	rts;
+
+.Lwrlock_wait:
+	r1 = p1;
+	call _put_core_lock;
+	SSYNC(r2);
+	r0 = p1;
+	call _get_core_lock;
+	r1 = [p1];
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	r1 <<= 4;
+	r1 >>= 4;
+#endif
+	cc = r1 == r3;
+	if !cc jump .Lwrlock_wait;
+	jump .Lwrlock_try
+ENDPROC(___raw_write_lock_asm)
+
+/*
+ * r0 = &rwlock->lock
+ *
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_write_trylock_asm)
+	p1 = r0;
+	[--sp] = rets;
+	call _get_core_lock;
+	r1 = [p1];
+	r2.l = lo(RW_LOCK_BIAS);
+	r2.h = hi(RW_LOCK_BIAS);
+	cc = r1 == r2;
+	if !cc jump .Lfailed_trywrlock;
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	r1 >>= 28;
+	r1 <<= 28;
+#else
+	r1 = 0;
+#endif
+	[p1] = r1;
+	r1 = p1;
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	call _start_lock_coherent
+#else
+	call _put_core_lock;
+#endif
+	rets = [sp++];
+	r0 = 1;
+	rts;
+
+.Lfailed_trywrlock:
+	r1 = p1;
+	call _put_core_lock;
+	rets = [sp++];
+	r0 = 0;
+	rts;
+ENDPROC(___raw_write_trylock_asm)
+
+/*
+ * r0 = &rwlock->lock
+ *
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_write_unlock_asm)
+	p1 = r0;
+	r3.l = lo(RW_LOCK_BIAS);
+	r3.h = hi(RW_LOCK_BIAS);
+	[--sp] = rets;
+	call _get_core_lock;
+	r1 = [p1];
+	r1 = r1 + r3;
+	[p1] = r1;
+	r1 = p1;
+#ifdef __ARCH_SYNC_CORE_DCACHE
+	call _end_lock_coherent
+#else
+	call _put_core_lock;
+#endif
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_write_unlock_asm)
+
+/*
+ * r0 = ptr
+ * r1 = value
+ *
+ * Add a signed value to a 32bit word and return the new value atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_atomic_update_asm)
+	p1 = r0;
+	r3 = r1;
+	[--sp] = rets;
+	call _get_core_lock;
+	r2 = [p1];
+	r3 = r3 + r2;
+	[p1] = r3;
+	r1 = p1;
+	call _put_core_lock;
+	r0 = r3;
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_atomic_update_asm)
+
+/*
+ * r0 = ptr
+ * r1 = mask
+ *
+ * Clear the mask bits from a 32bit word and return the old 32bit value
+ * atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_atomic_clear_asm)
+	p1 = r0;
+	r3 = ~r1;
+	[--sp] = rets;
+	call _get_core_lock;
+	r2 = [p1];
+	r3 = r2 & r3;
+	[p1] = r3;
+	r3 = r2;
+	r1 = p1;
+	call _put_core_lock;
+	r0 = r3;
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_atomic_clear_asm)
+
+/*
+ * r0 = ptr
+ * r1 = mask
+ *
+ * Set the mask bits into a 32bit word and return the old 32bit value
+ * atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_atomic_set_asm)
+	p1 = r0;
+	r3 = r1;
+	[--sp] = rets;
+	call _get_core_lock;
+	r2 = [p1];
+	r3 = r2 | r3;
+	[p1] = r3;
+	r3 = r2;
+	r1 = p1;
+	call _put_core_lock;
+	r0 = r3;
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_atomic_set_asm)
+
+/*
+ * r0 = ptr
+ * r1 = mask
+ *
+ * XOR the mask bits with a 32bit word and return the old 32bit value
+ * atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_atomic_xor_asm)
+	p1 = r0;
+	r3 = r1;
+	[--sp] = rets;
+	call _get_core_lock;
+	r2 = [p1];
+	r3 = r2 ^ r3;
+	[p1] = r3;
+	r3 = r2;
+	r1 = p1;
+	call _put_core_lock;
+	r0 = r3;
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_atomic_xor_asm)
+
+/*
+ * r0 = ptr
+ * r1 = mask
+ *
+ * Perform a logical AND between the mask bits and a 32bit word, and
+ * return the masked value. We need this on this architecture in
+ * order to invalidate the local cache before testing.
+ *
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_atomic_test_asm)
+	p1 = r0;
+	r3 = r1;
+	r1 = -L1_CACHE_BYTES;
+	r1 = r0 & r1;
+	p0 = r1;
+	flushinv[p0];
+	SSYNC(r2);
+	r0 = [p1];
+	r0 = r0 & r3;
+	rts;
+ENDPROC(___raw_atomic_test_asm)
+
+/*
+ * r0 = ptr
+ * r1 = value
+ *
+ * Swap *ptr with value and return the old 32bit value atomically.
+ * Clobbers: r3:0, p1:0
+ */
+#define	__do_xchg(src, dst) 		\
+	p1 = r0;			\
+	r3 = r1;			\
+	[--sp] = rets;			\
+	call _get_core_lock;		\
+	r2 = src;			\
+	dst = r3;			\
+	r3 = r2;			\
+	r1 = p1;			\
+	call _put_core_lock;		\
+	r0 = r3;			\
+	rets = [sp++];			\
+	rts;
+
+ENTRY(___raw_xchg_1_asm)
+	__do_xchg(b[p1] (z), b[p1])
+ENDPROC(___raw_xchg_1_asm)
+
+ENTRY(___raw_xchg_2_asm)
+	__do_xchg(w[p1] (z), w[p1])
+ENDPROC(___raw_xchg_2_asm)
+
+ENTRY(___raw_xchg_4_asm)
+	__do_xchg([p1], [p1])
+ENDPROC(___raw_xchg_4_asm)
+
+/*
+ * r0 = ptr
+ * r1 = new
+ * r2 = old
+ *
+ * Swap *ptr with new if *ptr == old and return the previous *ptr
+ * value atomically.
+ *
+ * Clobbers: r3:0, p1:0
+ */
+#define	__do_cmpxchg(src, dst) 		\
+	[--sp] = rets;			\
+	[--sp] = r4;			\
+	p1 = r0;			\
+	r3 = r1;			\
+	r4 = r2;			\
+	call _get_core_lock;		\
+	r2 = src;			\
+	cc = r2 == r4;			\
+	if !cc jump 1f;			\
+	dst = r3;			\
+     1: r3 = r2;			\
+	r1 = p1;			\
+	call _put_core_lock;		\
+	r0 = r3;			\
+	r4 = [sp++];			\
+	rets = [sp++];			\
+	rts;
+
+ENTRY(___raw_cmpxchg_1_asm)
+	__do_cmpxchg(b[p1] (z), b[p1])
+ENDPROC(___raw_cmpxchg_1_asm)
+
+ENTRY(___raw_cmpxchg_2_asm)
+	__do_cmpxchg(w[p1] (z), w[p1])
+ENDPROC(___raw_cmpxchg_2_asm)
+
+ENTRY(___raw_cmpxchg_4_asm)
+	__do_cmpxchg([p1], [p1])
+ENDPROC(___raw_cmpxchg_4_asm)
+
+/*
+ * r0 = ptr
+ * r1 = bitnr
+ *
+ * Set a bit in a 32bit word and return the old 32bit value atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_bit_set_asm)
+	r2 = r1;
+	r1 = 1;
+	r1 <<= r2;
+	jump ___raw_atomic_set_asm
+ENDPROC(___raw_bit_set_asm)
+
+/*
+ * r0 = ptr
+ * r1 = bitnr
+ *
+ * Clear a bit in a 32bit word and return the old 32bit value atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_bit_clear_asm)
+	r2 = r1;
+	r1 = 1;
+	r1 <<= r2;
+	jump ___raw_atomic_clear_asm
+ENDPROC(___raw_bit_clear_asm)
+
+/*
+ * r0 = ptr
+ * r1 = bitnr
+ *
+ * Toggle a bit in a 32bit word and return the old 32bit value atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_bit_toggle_asm)
+	r2 = r1;
+	r1 = 1;
+	r1 <<= r2;
+	jump ___raw_atomic_xor_asm
+ENDPROC(___raw_bit_toggle_asm)
+
+/*
+ * r0 = ptr
+ * r1 = bitnr
+ *
+ * Test-and-set a bit in a 32bit word and return the old bit value atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_bit_test_set_asm)
+	[--sp] = rets;
+	[--sp] = r1;
+	call ___raw_bit_set_asm
+	r1 = [sp++];
+	r2 = 1;
+	r2 <<= r1;
+	r0 = r0 & r2;
+	cc = r0 == 0;
+	if cc jump 1f
+	r0 = 1;
+1:
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_bit_test_set_asm)
+
+/*
+ * r0 = ptr
+ * r1 = bitnr
+ *
+ * Test-and-clear a bit in a 32bit word and return the old bit value atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_bit_test_clear_asm)
+	[--sp] = rets;
+	[--sp] = r1;
+	call ___raw_bit_clear_asm
+	r1 = [sp++];
+	r2 = 1;
+	r2 <<= r1;
+	r0 = r0 & r2;
+	cc = r0 == 0;
+	if cc jump 1f
+	r0 = 1;
+1:
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_bit_test_clear_asm)
+
+/*
+ * r0 = ptr
+ * r1 = bitnr
+ *
+ * Test-and-toggle a bit in a 32bit word,
+ * and return the old bit value atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_bit_test_toggle_asm)
+	[--sp] = rets;
+	[--sp] = r1;
+	call ___raw_bit_toggle_asm
+	r1 = [sp++];
+	r2 = 1;
+	r2 <<= r1;
+	r0 = r0 & r2;
+	cc = r0 == 0;
+	if cc jump 1f
+	r0 = 1;
+1:
+	rets = [sp++];
+	rts;
+ENDPROC(___raw_bit_test_toggle_asm)
+
+/*
+ * r0 = ptr
+ * r1 = bitnr
+ *
+ * Test a bit in a 32bit word and return its value.
+ * We need this on this architecture in order to invalidate
+ * the local cache before testing.
+ *
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_bit_test_asm)
+	r2 = r1;
+	r1 = 1;
+	r1 <<= r2;
+	jump ___raw_atomic_test_asm
+ENDPROC(___raw_bit_test_asm)
+
+/*
+ * r0 = ptr
+ *
+ * Fetch and return an uncached 32bit value.
+ *
+ * Clobbers: r2:0, p1:0
+ */
+ENTRY(___raw_uncached_fetch_asm)
+	p1 = r0;
+	r1 = -L1_CACHE_BYTES;
+	r1 = r0 & r1;
+	p0 = r1;
+	flushinv[p0];
+	SSYNC(r2);
+	r0 = [p1];
+	rts;
+ENDPROC(___raw_uncached_fetch_asm)
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 8f40990..6880d1e 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -230,6 +230,19 @@
 };
 #endif
 
+static struct resource bfin_gpios_resources = {
+	.start = 0,
+	.end   = MAX_BLACKFIN_GPIOS - 1,
+	.flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+	.name = "simple-gpio",
+	.id = -1,
+	.num_resources = 1,
+	.resource = &bfin_gpios_resources,
+};
+
 #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
 static struct resource isp1362_hcd_resources[] = {
 	{
@@ -287,23 +300,33 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir0_device = {
 	.name = "bfin_sir",
 	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
 #define PATA_INT	IRQ_PF46
@@ -382,7 +405,9 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
 #endif
 
 #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
@@ -400,6 +425,8 @@
 #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
 	&bfin_pata_device,
 #endif
+
+	&bfin_gpios_device,
 };
 
 static int __init cm_bf561_init(void)
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 50b4cdc..0e2178a 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -43,53 +43,42 @@
 /*
  * Name the Board for the /proc/cpuinfo
  */
-const char bfin_board_name[] = "ADDS-BF561-EZKIT";
-
-#define ISP1761_BASE       0x2C0F0000
-#define ISP1761_IRQ        IRQ_PF10
+const char bfin_board_name[] = "ADI BF561-EZKIT";
 
 #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
-static struct resource bfin_isp1761_resources[] = {
-	{
-		.name	= "isp1761-regs",
-		.start  = ISP1761_BASE + 0x00000000,
-		.end    = ISP1761_BASE + 0x000fffff,
+#include <linux/usb/isp1760.h>
+static struct resource bfin_isp1760_resources[] = {
+	[0] = {
+		.start  = 0x2C0F0000,
+		.end    = 0x203C0000 + 0xfffff,
 		.flags  = IORESOURCE_MEM,
 	},
-	{
-		.start  = ISP1761_IRQ,
-		.end    = ISP1761_IRQ,
+	[1] = {
+		.start  = IRQ_PF10,
+		.end    = IRQ_PF10,
 		.flags  = IORESOURCE_IRQ,
 	},
 };
 
-static struct platform_device bfin_isp1761_device = {
-	.name           = "isp1761",
+static struct isp1760_platform_data isp1760_priv = {
+	.is_isp1761 = 0,
+	.port1_disable = 0,
+	.bus_width_16 = 1,
+	.port1_otg = 0,
+	.analog_oc = 0,
+	.dack_polarity_high = 0,
+	.dreq_polarity_high = 0,
+};
+
+static struct platform_device bfin_isp1760_device = {
+	.name           = "isp1760-hcd",
 	.id             = 0,
-	.num_resources  = ARRAY_SIZE(bfin_isp1761_resources),
-	.resource       = bfin_isp1761_resources,
+	.dev = {
+		.platform_data = &isp1760_priv,
+	},
+	.num_resources  = ARRAY_SIZE(bfin_isp1760_resources),
+	.resource       = bfin_isp1760_resources,
 };
-
-static struct platform_device *bfin_isp1761_devices[] = {
-	&bfin_isp1761_device,
-};
-
-int __init bfin_isp1761_init(void)
-{
-	unsigned int num_devices = ARRAY_SIZE(bfin_isp1761_devices);
-
-	printk(KERN_INFO "%s(): registering device resources\n", __func__);
-	set_irq_type(ISP1761_IRQ, IRQF_TRIGGER_FALLING);
-
-	return platform_add_devices(bfin_isp1761_devices, num_devices);
-}
-
-void __exit bfin_isp1761_exit(void)
-{
-	platform_device_unregister(&bfin_isp1761_device);
-}
-
-arch_initcall(bfin_isp1761_init);
 #endif
 
 #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
@@ -221,23 +210,33 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-static struct resource bfin_sir_resources[] = {
 #ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
 	{
 		.start = 0xFFC00400,
 		.end = 0xFFC004FF,
 		.flags = IORESOURCE_MEM,
 	},
-#endif
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
-static struct platform_device bfin_sir_device = {
+static struct platform_device bfin_sir0_device = {
 	.name = "bfin_sir",
 	.id = 0,
-	.num_resources = ARRAY_SIZE(bfin_sir_resources),
-	.resource = bfin_sir_resources,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
 };
 #endif
+#endif
 
 #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
 static struct mtd_partition ezkit_partitions[] = {
@@ -449,6 +448,10 @@
 	&net2272_bfin_device,
 #endif
 
+#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
+	&bfin_isp1760_device,
+#endif
+
 #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
 	&bfin_spi0_device,
 #endif
@@ -458,7 +461,9 @@
 #endif
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
-	&bfin_sir_device,
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
 #endif
 
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
diff --git a/arch/blackfin/mach-bf561/boards/generic_board.c b/arch/blackfin/mach-bf561/boards/generic_board.c
index 2faa007..0ba366a 100644
--- a/arch/blackfin/mach-bf561/boards/generic_board.c
+++ b/arch/blackfin/mach-bf561/boards/generic_board.c
@@ -62,10 +62,45 @@
 };
 #endif
 
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
+	{
+		.start = 0xFFC00400,
+		.end = 0xFFC004FF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
+#endif
+#endif
+
 static struct platform_device *generic_board_devices[] __initdata = {
 #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
 	&smc91x_device,
 #endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#endif
 };
 
 static int __init generic_board_init(void)
diff --git a/arch/blackfin/mach-bf561/boards/tepla.c b/arch/blackfin/mach-bf561/boards/tepla.c
index c9174b3..6f77dbe 100644
--- a/arch/blackfin/mach-bf561/boards/tepla.c
+++ b/arch/blackfin/mach-bf561/boards/tepla.c
@@ -44,8 +44,42 @@
 	.resource      = smc91x_resources,
 };
 
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
+	{
+		.start = 0xFFC00400,
+		.end = 0xFFC004FF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
+#endif
+#endif
+
 static struct platform_device *tepla_devices[] __initdata = {
 	&smc91x_device,
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#endif
 };
 
 static int __init tepla_init(void)
diff --git a/arch/blackfin/mach-bf561/dma.c b/arch/blackfin/mach-bf561/dma.c
index 24415eb..42b0037 100644
--- a/arch/blackfin/mach-bf561/dma.c
+++ b/arch/blackfin/mach-bf561/dma.c
@@ -31,7 +31,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_BLACKFIN_DMA_CHANNEL] = {
+struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA1_0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf561/head.S b/arch/blackfin/mach-bf561/head.S
deleted file mode 100644
index 31a777a..0000000
--- a/arch/blackfin/mach-bf561/head.S
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * File:         arch/blackfin/mach-bf561/head.S
- * Based on:     arch/blackfin/mach-bf533/head.S
- * Author:
- *
- * Created:
- * Description:  BF561 startup file
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/blackfin.h>
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
-#include <asm/clocks.h>
-#include <mach/mem_init.h>
-#endif
-
-.section .l1.text
-#ifdef CONFIG_BFIN_KERNEL_CLOCK
-ENTRY(_start_dma_code)
-	p0.h = hi(SICA_IWR0);
-	p0.l = lo(SICA_IWR0);
-	r0.l = 0x1;
-	[p0] = r0;
-	SSYNC;
-
-	/*
-	 *  Set PLL_CTL
-	 *   - [14:09] = MSEL[5:0] : CLKIN / VCO multiplication factors
-	 *   - [8]     = BYPASS    : BYPASS the PLL, run CLKIN into CCLK/SCLK
-	 *   - [7]     = output delay (add 200ps of delay to mem signals)
-	 *   - [6]     = input delay (add 200ps of input delay to mem signals)
-	 *   - [5]     = PDWN      : 1=All Clocks off
-	 *   - [3]     = STOPCK    : 1=Core Clock off
-	 *   - [1]     = PLL_OFF   : 1=Disable Power to PLL
-	 *   - [0]     = DF        : 1=Pass CLKIN/2 to PLL / 0=Pass CLKIN to PLL
-	 *   all other bits set to zero
-	 */
-
-	p0.h = hi(PLL_LOCKCNT);
-	p0.l = lo(PLL_LOCKCNT);
-	r0 = 0x300(Z);
-	w[p0] = r0.l;
-	ssync;
-
-	P2.H = hi(EBIU_SDGCTL);
-	P2.L = lo(EBIU_SDGCTL);
-	R0 = [P2];
-	BITSET (R0, 24);
-	[P2] = R0;
-	SSYNC;
-
-	r0 = CONFIG_VCO_MULT & 63;       /* Load the VCO multiplier         */
-	r0 = r0 << 9;                    /* Shift it over,                  */
-	r1 = CLKIN_HALF;                 /* Do we need to divide CLKIN by 2?*/
-	r0 = r1 | r0;
-	r1 = PLL_BYPASS;                 /* Bypass the PLL?                 */
-	r1 = r1 << 8;                    /* Shift it over                   */
-	r0 = r1 | r0;                    /* add them all together           */
-#ifdef ANOMALY_05000265
-	BITSET(r0, 15);                  /* Add 250 mV of hysteresis to SPORT input pins */
-#endif
-
-	p0.h = hi(PLL_CTL);
-	p0.l = lo(PLL_CTL);              /* Load the address                */
-	cli r2;                          /* Disable interrupts              */
-	ssync;
-	w[p0] = r0.l;                    /* Set the value                   */
-	idle;                            /* Wait for the PLL to stablize    */
-	sti r2;                          /* Enable interrupts               */
-
-.Lcheck_again:
-	p0.h = hi(PLL_STAT);
-	p0.l = lo(PLL_STAT);
-	R0 = W[P0](Z);
-	CC = BITTST(R0,5);
-	if ! CC jump .Lcheck_again;
-
-	/* Configure SCLK & CCLK Dividers */
-	r0 = (CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
-	p0.h = hi(PLL_DIV);
-	p0.l = lo(PLL_DIV);
-	w[p0] = r0.l;
-	ssync;
-
-	p0.l = lo(EBIU_SDRRC);
-	p0.h = hi(EBIU_SDRRC);
-	r0 = mem_SDRRC;
-	w[p0] = r0.l;
-	ssync;
-
-	P2.H = hi(EBIU_SDGCTL);
-	P2.L = lo(EBIU_SDGCTL);
-	R0 = [P2];
-	BITCLR (R0, 24);
-	p0.h = hi(EBIU_SDSTAT);
-	p0.l = lo(EBIU_SDSTAT);
-	r2.l = w[p0];
-	cc = bittst(r2,3);
-	if !cc jump .Lskip;
-	NOP;
-	BITSET (R0, 23);
-.Lskip:
-	[P2] = R0;
-	SSYNC;
-
-	R0.L = lo(mem_SDGCTL);
-	R0.H = hi(mem_SDGCTL);
-	R1 = [p2];
-	R1 = R1 | R0;
-	[P2] = R1;
-	SSYNC;
-
-	RTS;
-ENDPROC(_start_dma_code)
-#endif /* CONFIG_BFIN_KERNEL_CLOCK */
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 22990df..1a9e175 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -7,7 +7,7 @@
  */
 
 /* This file shoule be up to date with:
- *  - Revision P, 02/08/2008; ADSP-BF561 Blackfin Processor Anomaly List
+ *  - Revision Q, 11/07/2008; ADSP-BF561 Blackfin Processor Anomaly List
  */
 
 #ifndef _MACH_ANOMALY_H_
@@ -264,6 +264,18 @@
 #define ANOMALY_05000371 (1)
 /* Level-Sensitive External GPIO Wakeups May Cause Indefinite Stall */
 #define ANOMALY_05000403 (1)
+/* TESTSET Instruction Causes Data Corruption with Writeback Data Cache Enabled */
+#define ANOMALY_05000412 (1)
+/* Speculative Fetches Can Cause Undesired External FIFO Operations */
+#define ANOMALY_05000416 (1)
+/* Multichannel SPORT Channel Misalignment Under Specific Configuration */
+#define ANOMALY_05000425 (1)
+/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
+#define ANOMALY_05000426 (1)
+/* Lost/Corrupted L2/L3 Memory Write after Speculative L2 Memory Read by Core B */
+#define ANOMALY_05000428 (__SILICON_REVISION__ > 3)
+/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
+#define ANOMALY_05000443 (1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000158 (0)
@@ -272,5 +284,7 @@
 #define ANOMALY_05000311 (0)
 #define ANOMALY_05000353 (1)
 #define ANOMALY_05000386 (1)
+#define ANOMALY_05000432 (0)
+#define ANOMALY_05000435 (0)
 
 #endif
diff --git a/arch/blackfin/mach-bf561/include/mach/bf561.h b/arch/blackfin/mach-bf561/include/mach/bf561.h
index 18b1b3a..9968362 100644
--- a/arch/blackfin/mach-bf561/include/mach/bf561.h
+++ b/arch/blackfin/mach-bf561/include/mach/bf561.h
@@ -215,7 +215,7 @@
 #endif
 
 #ifndef CPU
-#error Unknown CPU type - This kernel doesn't seem to be configured properly
+#error "Unknown CPU type - This kernel doesn't seem to be configured properly"
 #endif
 
 #endif				/* __MACH_BF561_H__  */
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_sir.h b/arch/blackfin/mach-bf561/include/mach/bfin_sir.h
deleted file mode 100644
index 9bb87e9..0000000
--- a/arch/blackfin/mach-bf561/include/mach/bfin_sir.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Blackfin Infra-red Driver
- *
- * Copyright 2006-2008 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- *
- */
-
-#include <linux/serial.h>
-#include <asm/dma.h>
-#include <asm/portmux.h>
-
-#define SIR_UART_GET_CHAR(port)   bfin_read16((port)->membase + OFFSET_RBR)
-#define SIR_UART_GET_DLL(port)    bfin_read16((port)->membase + OFFSET_DLL)
-#define SIR_UART_GET_IER(port)    bfin_read16((port)->membase + OFFSET_IER)
-#define SIR_UART_GET_DLH(port)    bfin_read16((port)->membase + OFFSET_DLH)
-#define SIR_UART_GET_IIR(port)    bfin_read16((port)->membase + OFFSET_IIR)
-#define SIR_UART_GET_LCR(port)    bfin_read16((port)->membase + OFFSET_LCR)
-#define SIR_UART_GET_GCTL(port)   bfin_read16((port)->membase + OFFSET_GCTL)
-
-#define SIR_UART_PUT_CHAR(port, v) bfin_write16(((port)->membase + OFFSET_THR), v)
-#define SIR_UART_PUT_DLL(port, v)  bfin_write16(((port)->membase + OFFSET_DLL), v)
-#define SIR_UART_PUT_IER(port, v)  bfin_write16(((port)->membase + OFFSET_IER), v)
-#define SIR_UART_PUT_DLH(port, v)  bfin_write16(((port)->membase + OFFSET_DLH), v)
-#define SIR_UART_PUT_LCR(port, v)  bfin_write16(((port)->membase + OFFSET_LCR), v)
-#define SIR_UART_PUT_GCTL(port, v) bfin_write16(((port)->membase + OFFSET_GCTL), v)
-
-#ifdef CONFIG_SIR_BFIN_DMA
-struct dma_rx_buf {
-	char *buf;
-	int head;
-	int tail;
-	};
-#endif /* CONFIG_SIR_BFIN_DMA */
-
-struct bfin_sir_port {
-	unsigned char __iomem   *membase;
-	unsigned int            irq;
-	unsigned int            lsr;
-	unsigned long           clk;
-	struct net_device       *dev;
-#ifdef CONFIG_SIR_BFIN_DMA
-	int                     tx_done;
-	struct dma_rx_buf       rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int                     rx_dma_nrows;
-#endif /* CONFIG_SIR_BFIN_DMA */
-	unsigned int            tx_dma_channel;
-	unsigned int            rx_dma_channel;
-};
-
-struct bfin_sir_port sir_ports[BFIN_UART_NR_PORTS];
-
-struct bfin_sir_port_res {
-	unsigned long   base_addr;
-	int             irq;
-	unsigned int    rx_dma_channel;
-	unsigned int    tx_dma_channel;
-};
-
-struct bfin_sir_port_res bfin_sir_port_resource[] = {
-#ifdef CONFIG_BFIN_SIR0
-	{
-	0xFFC00400,
-	IRQ_UART_RX,
-	CH_UART_RX,
-	CH_UART_TX,
-	},
-#endif
-};
-
-int nr_sirs = ARRAY_SIZE(bfin_sir_port_resource);
-
-struct bfin_sir_self {
-	struct bfin_sir_port    *sir_port;
-	spinlock_t              lock;
-	unsigned int            open;
-	int                     speed;
-	int                     newspeed;
-
-	struct sk_buff          *txskb;
-	struct sk_buff          *rxskb;
-	struct net_device_stats stats;
-	struct device           *dev;
-	struct irlap_cb         *irlap;
-	struct qos_info         qos;
-
-	iobuff_t                tx_buff;
-	iobuff_t                rx_buff;
-
-	struct work_struct      work;
-	int                     mtt;
-};
-
-static inline unsigned int SIR_UART_GET_LSR(struct bfin_sir_port *port)
-{
-	unsigned int lsr = bfin_read16(port->membase + OFFSET_LSR);
-	port->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | port->lsr;
-}
-
-static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port)
-{
-	port->lsr = 0;
-	bfin_read16(port->membase + OFFSET_LSR);
-}
-
-#define DRIVER_NAME "bfin_sir"
-
-static int bfin_sir_hw_init(void)
-{
-	int ret = -ENODEV;
-#ifdef CONFIG_BFIN_SIR0
-	ret = peripheral_request(P_UART0_TX, DRIVER_NAME);
-	if (ret)
-		return ret;
-	ret = peripheral_request(P_UART0_RX, DRIVER_NAME);
-	if (ret)
-		return ret;
-#endif
-	return ret;
-}
diff --git a/arch/blackfin/mach-bf561/include/mach/blackfin.h b/arch/blackfin/mach-bf561/include/mach/blackfin.h
index 0ea8666..f79f6626 100644
--- a/arch/blackfin/mach-bf561/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf561/include/mach/blackfin.h
@@ -66,8 +66,12 @@
 
 #define bfin_read_SIC_IMASK(x)		bfin_read32(SICA_IMASK0 + (x << 2))
 #define bfin_write_SIC_IMASK(x, val)	bfin_write32((SICA_IMASK0 + (x << 2)), val)
+#define bfin_read_SICB_IMASK(x)		bfin_read32(SICB_IMASK0 + (x << 2))
+#define bfin_write_SICB_IMASK(x, val)	bfin_write32((SICB_IMASK0 + (x << 2)), val)
 #define bfin_read_SIC_ISR(x)		bfin_read32(SICA_ISR0 + (x << 2))
 #define bfin_write_SIC_ISR(x, val)	bfin_write32((SICA_ISR0 + (x << 2)), val)
+#define bfin_read_SICB_ISR(x)		bfin_read32(SICB_ISR0 + (x << 2))
+#define bfin_write_SICB_ISR(x, val)	bfin_write32((SICB_ISR0 + (x << 2)), val)
 
 #define BFIN_UART_NR_PORTS      1
 
diff --git a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
index c14d634..95d609f 100644
--- a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
@@ -39,65 +39,15 @@
 /*include core specific register pointer definitions*/
 #include <asm/cdef_LPBlackfin.h>
 
-#include <asm/system.h>
-
 /*********************************************************************************** */
 /* System MMR Register Map */
 /*********************************************************************************** */
 
 /* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
 #define bfin_read_PLL_CTL()                  bfin_read16(PLL_CTL)
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	local_irq_save(flags);
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SICA_IWR0);
-	iwr1 = bfin_read32(SICA_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SICA_IWR0, IWR_ENABLE(0));
-	bfin_write32(SICA_IWR1, 0);
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SICA_IWR0, iwr0);
-	bfin_write32(SICA_IWR1, iwr1);
-	local_irq_restore(flags);
-}
 #define bfin_read_PLL_DIV()                  bfin_read16(PLL_DIV)
 #define bfin_write_PLL_DIV(val)              bfin_write16(PLL_DIV,val)
 #define bfin_read_VR_CTL()                   bfin_read16(VR_CTL)
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	local_irq_save(flags);
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SICA_IWR0);
-	iwr1 = bfin_read32(SICA_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SICA_IWR0, IWR_ENABLE(0));
-	bfin_write32(SICA_IWR1, 0);
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SICA_IWR0, iwr0);
-	bfin_write32(SICA_IWR1, iwr1);
-	local_irq_restore(flags);
-}
 #define bfin_read_PLL_STAT()                 bfin_read16(PLL_STAT)
 #define bfin_write_PLL_STAT(val)             bfin_write16(PLL_STAT,val)
 #define bfin_read_PLL_LOCKCNT()              bfin_read16(PLL_LOCKCNT)
@@ -1576,4 +1526,57 @@
 #define bfin_read_MDMA_D0_START_ADDR()  bfin_read_MDMA1_D0_START_ADDR()
 #define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA1_D0_START_ADDR(val)
 
+/* These need to be last due to the cdef/linux inter-dependencies */
+#include <asm/irq.h>
+
+/* Writing to PLL_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_PLL_CTL(unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1;
+
+	if (val == bfin_read_PLL_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr0 = bfin_read32(SICA_IWR0);
+	iwr1 = bfin_read32(SICA_IWR1);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SICA_IWR0, IWR_ENABLE(0));
+	bfin_write32(SICA_IWR1, 0);
+
+	bfin_write16(PLL_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SICA_IWR0, iwr0);
+	bfin_write32(SICA_IWR1, iwr1);
+	local_irq_restore_hw(flags);
+}
+
+/* Writing to VR_CTL initiates a PLL relock sequence. */
+static __inline__ void bfin_write_VR_CTL(unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1;
+
+	if (val == bfin_read_VR_CTL())
+		return;
+
+	local_irq_save_hw(flags);
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	iwr0 = bfin_read32(SICA_IWR0);
+	iwr1 = bfin_read32(SICA_IWR1);
+	/* Only allow PPL Wakeup) */
+	bfin_write32(SICA_IWR0, IWR_ENABLE(0));
+	bfin_write32(SICA_IWR1, 0);
+
+	bfin_write16(VR_CTL, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_write32(SICA_IWR0, iwr0);
+	bfin_write32(SICA_IWR1, iwr1);
+	local_irq_restore_hw(flags);
+}
+
 #endif				/* _CDEF_BF561_H */
diff --git a/arch/blackfin/mach-bf561/include/mach/defBF561.h b/arch/blackfin/mach-bf561/include/mach/defBF561.h
index 4eca202..d7c5097 100644
--- a/arch/blackfin/mach-bf561/include/mach/defBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/defBF561.h
@@ -912,6 +912,9 @@
 #define ACTIVE_PLLDISABLED	0x0004	/* Processor In Active Mode With PLL Disabled   */
 #define	PLL_LOCKED			0x0020	/* PLL_LOCKCNT Has Been Reached                                 */
 
+/* SICA_SYSCR Masks */
+#define COREB_SRAM_INIT		0x0020
+
 /* SWRST Mask */
 #define SYSTEM_RESET           0x0007	/* Initiates a system software reset */
 #define DOUBLE_FAULT_A         0x0008	/* Core A Double Fault Causes Reset */
diff --git a/arch/blackfin/mach-bf561/include/mach/dma.h b/arch/blackfin/mach-bf561/include/mach/dma.h
index 8bc46cd..13647c7 100644
--- a/arch/blackfin/mach-bf561/include/mach/dma.h
+++ b/arch/blackfin/mach-bf561/include/mach/dma.h
@@ -1,13 +1,17 @@
-/*****************************************************************************
-*
-*        BF-533/2/1 Specific Declarations
-*
-****************************************************************************/
+/* mach/dma.h - arch-specific DMA defines
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
 
 #ifndef _MACH_DMA_H_
 #define _MACH_DMA_H_
 
-#define MAX_BLACKFIN_DMA_CHANNEL 36
+#define MAX_DMA_CHANNELS 36
+
+/* [#4267] IMDMA channels have no PERIPHERAL_MAP MMR */
+#define MAX_DMA_SUSPEND_CHANNELS 32
 
 #define CH_PPI0			0
 #define CH_PPI			(CH_PPI0)
diff --git a/arch/blackfin/mach-bf561/include/mach/gpio.h b/arch/blackfin/mach-bf561/include/mach/gpio.h
new file mode 100644
index 0000000..7882f79
--- /dev/null
+++ b/arch/blackfin/mach-bf561/include/mach/gpio.h
@@ -0,0 +1,68 @@
+/*
+ * File: arch/blackfin/mach-bf561/include/mach/gpio.h
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright (C) 2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef _MACH_GPIO_H_
+#define _MACH_GPIO_H_
+
+#define MAX_BLACKFIN_GPIOS 48
+
+#define	GPIO_PF0	0
+#define	GPIO_PF1	1
+#define	GPIO_PF2	2
+#define	GPIO_PF3	3
+#define	GPIO_PF4	4
+#define	GPIO_PF5	5
+#define	GPIO_PF6	6
+#define	GPIO_PF7	7
+#define	GPIO_PF8	8
+#define	GPIO_PF9	9
+#define	GPIO_PF10	10
+#define	GPIO_PF11	11
+#define	GPIO_PF12	12
+#define	GPIO_PF13	13
+#define	GPIO_PF14	14
+#define	GPIO_PF15	15
+#define	GPIO_PF16	16
+#define	GPIO_PF17	17
+#define	GPIO_PF18	18
+#define	GPIO_PF19	19
+#define	GPIO_PF20	20
+#define	GPIO_PF21	21
+#define	GPIO_PF22	22
+#define	GPIO_PF23	23
+#define	GPIO_PF24	24
+#define	GPIO_PF25	25
+#define	GPIO_PF26	26
+#define	GPIO_PF27	27
+#define	GPIO_PF28	28
+#define	GPIO_PF29	29
+#define	GPIO_PF30	30
+#define	GPIO_PF31	31
+#define	GPIO_PF32	32
+#define	GPIO_PF33	33
+#define	GPIO_PF34	34
+#define	GPIO_PF35	35
+#define	GPIO_PF36	36
+#define	GPIO_PF37	37
+#define	GPIO_PF38	38
+#define	GPIO_PF39	39
+#define	GPIO_PF40	40
+#define	GPIO_PF41	41
+#define	GPIO_PF42	42
+#define	GPIO_PF43	43
+#define	GPIO_PF44	44
+#define	GPIO_PF45	45
+#define	GPIO_PF46	46
+#define	GPIO_PF47	47
+
+#define PORT_FIO0 GPIO_0
+#define PORT_FIO1 GPIO_16
+#define PORT_FIO2 GPIO_32
+
+#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf561/include/mach/mem_init.h b/arch/blackfin/mach-bf561/include/mach/mem_init.h
deleted file mode 100644
index e163260..0000000
--- a/arch/blackfin/mach-bf561/include/mach/mem_init.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * File:         include/asm-blackfin/mach-bf561/mem_init.h
- * Based on:
- * Author:
- *
- * Created:
- * Description:
- *
- * Rev:
- *
- * Modified:
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#if (CONFIG_MEM_MT48LC16M16A2TG_75 || CONFIG_MEM_MT48LC64M4A2FB_7E || CONFIG_MEM_GENERIC_BOARD || CONFIG_MEM_MT48LC8M32B2B5_7)
-#if (CONFIG_SCLK_HZ > 119402985)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_7
-#define SDRAM_tRAS_num  7
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_6
-#define SDRAM_tRAS_num  6
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_5
-#define SDRAM_tRAS_num  5
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_4
-#define SDRAM_tRAS_num  4
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866)
-#define SDRAM_tRP       TRP_2
-#define SDRAM_tRP_num   2
-#define SDRAM_tRAS      TRAS_3
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_2
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_4
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_3
-#define SDRAM_tRAS_num  3
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_2
-#define SDRAM_tRAS_num  2
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#if (CONFIG_SCLK_HZ <= 29850746)
-#define SDRAM_tRP       TRP_1
-#define SDRAM_tRP_num   1
-#define SDRAM_tRAS      TRAS_1
-#define SDRAM_tRAS_num  1
-#define SDRAM_tRCD      TRCD_1
-#define SDRAM_tWR       TWR_2
-#endif
-#endif
-
-#if (CONFIG_MEM_MT48LC16M16A2TG_75)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_MT48LC64M4A2FB_7E)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_MT48LC8M32B2B5_7)
-  /*SDRAM INFORMATION: */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   4096	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-#if (CONFIG_MEM_GENERIC_BOARD)
-  /*SDRAM INFORMATION: Modify this for your board */
-#define SDRAM_Tref  64		/* Refresh period in milliseconds   */
-#define SDRAM_NRA   8192	/* Number of row addresses in SDRAM */
-#define SDRAM_CL    CL_3
-#endif
-
-/* Equation from section 17 (p17-46) of BF533 HRM */
-#define mem_SDRRC       (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num)
-
-/* Enable SCLK Out */
-#define mem_SDGCTL        (SCTLE | SDRAM_CL | SDRAM_tRAS | SDRAM_tRP | SDRAM_tRCD | SDRAM_tWR | PSS)
-
-#if defined CONFIG_CLKIN_HALF
-#define CLKIN_HALF       1
-#else
-#define CLKIN_HALF       0
-#endif
-
-#if defined CONFIG_PLL_BYPASS
-#define PLL_BYPASS      1
-#else
-#define PLL_BYPASS       0
-#endif
-
-/***************************************Currently Not Being Used *********************************/
-#define flash_EBIU_AMBCTL_WAT  ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_RAT  ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_HT   ((CONFIG_FLASH_SPEED_BHT  * 4) / (4000000000 / CONFIG_SCLK_HZ))
-#define flash_EBIU_AMBCTL_ST   ((CONFIG_FLASH_SPEED_BST  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_TT   ((CONFIG_FLASH_SPEED_BTT  * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-
-#if (flash_EBIU_AMBCTL_TT > 3)
-#define flash_EBIU_AMBCTL0_TT   B0TT_4
-#endif
-#if (flash_EBIU_AMBCTL_TT == 3)
-#define flash_EBIU_AMBCTL0_TT   B0TT_3
-#endif
-#if (flash_EBIU_AMBCTL_TT == 2)
-#define flash_EBIU_AMBCTL0_TT   B0TT_2
-#endif
-#if (flash_EBIU_AMBCTL_TT < 2)
-#define flash_EBIU_AMBCTL0_TT   B0TT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_ST > 3)
-#define flash_EBIU_AMBCTL0_ST   B0ST_4
-#endif
-#if (flash_EBIU_AMBCTL_ST == 3)
-#define flash_EBIU_AMBCTL0_ST   B0ST_3
-#endif
-#if (flash_EBIU_AMBCTL_ST == 2)
-#define flash_EBIU_AMBCTL0_ST   B0ST_2
-#endif
-#if (flash_EBIU_AMBCTL_ST < 2)
-#define flash_EBIU_AMBCTL0_ST   B0ST_1
-#endif
-
-#if (flash_EBIU_AMBCTL_HT > 2)
-#define flash_EBIU_AMBCTL0_HT   B0HT_3
-#endif
-#if (flash_EBIU_AMBCTL_HT == 2)
-#define flash_EBIU_AMBCTL0_HT   B0HT_2
-#endif
-#if (flash_EBIU_AMBCTL_HT == 1)
-#define flash_EBIU_AMBCTL0_HT   B0HT_1
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0)
-#define flash_EBIU_AMBCTL0_HT   B0HT_0
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0)
-#define flash_EBIU_AMBCTL0_HT   B0HT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_WAT > 14)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_15
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 14)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_14
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 13)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_13
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 12)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_12
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 11)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_11
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 10)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_10
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 9)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_9
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 8)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_8
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 7)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_7
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 6)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_6
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 5)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_5
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 4)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_4
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 3)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_3
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 2)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_2
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 1)
-#define flash_EBIU_AMBCTL0_WAT  B0WAT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_RAT > 14)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_15
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 14)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_14
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 13)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_13
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 12)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_12
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 11)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_11
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 10)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_10
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 9)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_9
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 8)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_8
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 7)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_7
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 6)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_6
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 5)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_5
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 4)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_4
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 3)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_3
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 2)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_2
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 1)
-#define flash_EBIU_AMBCTL0_RAT  B0RAT_1
-#endif
-
-#define flash_EBIU_AMBCTL0  \
-	(flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \
-	 flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN)
diff --git a/arch/blackfin/mach-bf561/include/mach/mem_map.h b/arch/blackfin/mach-bf561/include/mach/mem_map.h
index f1d4c06..419dffd 100644
--- a/arch/blackfin/mach-bf561/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf561/include/mach/mem_map.h
@@ -85,4 +85,84 @@
 #define L1_SCRATCH_START	COREA_L1_SCRATCH_START
 #define L1_SCRATCH_LENGTH	0x1000
 
+#ifdef __ASSEMBLY__
+
+/*
+ * The following macros both return the address of the PDA for the
+ * current core.
+ *
+ * In its first safe (and hairy) form, the macro neither clobbers any
+ * register aside of the output Preg, nor uses the stack, since it
+ * could be called with an invalid stack pointer, or the current stack
+ * space being uncovered by any CPLB (e.g. early exception handling).
+ *
+ * The constraints on the second form are a bit relaxed, and the code
+ * is allowed to use the specified Dreg for determining the PDA
+ * address to be returned into Preg.
+ */
+#ifdef CONFIG_SMP
+#define GET_PDA_SAFE(preg)		\
+	preg.l = lo(DSPID);		\
+	preg.h = hi(DSPID);		\
+	preg = [preg];			\
+	preg = preg << 2;		\
+	preg = preg << 2;		\
+	preg = preg << 2;		\
+	preg = preg << 2;		\
+	preg = preg << 2;		\
+	preg = preg << 2;		\
+	preg = preg << 2;		\
+	preg = preg << 2;		\
+	preg = preg << 2;		\
+	preg = preg << 2;		\
+	preg = preg << 2;		\
+	preg = preg << 2;		\
+	if cc jump 2f;			\
+	cc = preg == 0x0;		\
+	preg.l = _cpu_pda;		\
+	preg.h = _cpu_pda;		\
+	if !cc jump 3f;			\
+1:					\
+	/* preg = 0x0; */		\
+	cc = !cc; /* restore cc to 0 */	\
+	jump 4f;			\
+2:					\
+	cc = preg == 0x0;		\
+	preg.l = _cpu_pda;		\
+	preg.h = _cpu_pda;		\
+	if cc jump 4f;			\
+	/* preg = 0x1000000; */		\
+	cc = !cc; /* restore cc to 1 */	\
+3:					\
+	preg = [preg];			\
+4:
+
+#define GET_PDA(preg, dreg)		\
+	preg.l = lo(DSPID);		\
+	preg.h = hi(DSPID);		\
+	dreg = [preg];			\
+	preg.l = _cpu_pda;		\
+	preg.h = _cpu_pda;		\
+	cc = bittst(dreg, 0);		\
+	if !cc jump 1f;			\
+	preg = [preg];			\
+1:					\
+
+#define GET_CPUID(preg, dreg)		\
+	preg.l = lo(DSPID);		\
+	preg.h = hi(DSPID);		\
+	dreg = [preg];			\
+	dreg = ROT dreg BY -1;		\
+	dreg = CC;
+
+#else
+#define GET_PDA_SAFE(preg)		\
+	preg.l = _cpu_pda;		\
+	preg.h = _cpu_pda;
+
+#define GET_PDA(preg, dreg)	GET_PDA_SAFE(preg)
+#endif /* CONFIG_SMP */
+
+#endif /* __ASSEMBLY__ */
+
 #endif				/* _MEM_MAP_533_H_ */
diff --git a/arch/blackfin/mach-bf561/include/mach/smp.h b/arch/blackfin/mach-bf561/include/mach/smp.h
new file mode 100644
index 0000000..f9e65eb
--- /dev/null
+++ b/arch/blackfin/mach-bf561/include/mach/smp.h
@@ -0,0 +1,22 @@
+#ifndef _MACH_BF561_SMP
+#define _MACH_BF561_SMP
+
+struct task_struct;
+
+void platform_init_cpus(void);
+
+void platform_prepare_cpus(unsigned int max_cpus);
+
+int platform_boot_secondary(unsigned int cpu, struct task_struct *idle);
+
+void platform_secondary_init(unsigned int cpu);
+
+void platform_request_ipi(int (*handler)(int, void *));
+
+void platform_send_ipi(cpumask_t callmap);
+
+void platform_send_ipi_cpu(unsigned int cpu);
+
+void platform_clear_ipi(unsigned int cpu);
+
+#endif /* !_MACH_BF561_SMP */
diff --git a/arch/blackfin/mach-bf561/secondary.S b/arch/blackfin/mach-bf561/secondary.S
new file mode 100644
index 0000000..35280f0
--- /dev/null
+++ b/arch/blackfin/mach-bf561/secondary.S
@@ -0,0 +1,215 @@
+/*
+ * File:         arch/blackfin/mach-bf561/secondary.S
+ * Based on:     arch/blackfin/mach-bf561/head.S
+ * Author:       Philippe Gerum <rpm@xenomai.org>
+ *
+ *               Copyright 2007 Analog Devices Inc.
+ *
+ * Description:  BF561 coreB bootstrap file
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/blackfin.h>
+#include <asm/asm-offsets.h>
+
+__INIT
+
+/* Lay the initial stack into the L1 scratch area of Core B */
+#define INITIAL_STACK	(COREB_L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
+
+ENTRY(_coreb_trampoline_start)
+	/* Set the SYSCFG register */
+	R0 = 0x36;
+	SYSCFG = R0; /*Enable Cycle Counter and Nesting Of Interrupts(3rd Bit)*/
+	R0 = 0;
+
+	/*Clear Out All the data and pointer  Registers*/
+	R1 = R0;
+	R2 = R0;
+	R3 = R0;
+	R4 = R0;
+	R5 = R0;
+	R6 = R0;
+	R7 = R0;
+
+	P0 = R0;
+	P1 = R0;
+	P2 = R0;
+	P3 = R0;
+	P4 = R0;
+	P5 = R0;
+
+	LC0 = r0;
+	LC1 = r0;
+	L0 = r0;
+	L1 = r0;
+	L2 = r0;
+	L3 = r0;
+
+	/* Clear Out All the DAG Registers*/
+	B0 = r0;
+	B1 = r0;
+	B2 = r0;
+	B3 = r0;
+
+	I0 = r0;
+	I1 = r0;
+	I2 = r0;
+	I3 = r0;
+
+	M0 = r0;
+	M1 = r0;
+	M2 = r0;
+	M3 = r0;
+
+	/* Turn off the icache */
+	p0.l = LO(IMEM_CONTROL);
+	p0.h = HI(IMEM_CONTROL);
+	R1 = [p0];
+	R0 = ~ENICPLB;
+	R0 = R0 & R1;
+
+	/* Anomaly 05000125 */
+#ifdef ANOMALY_05000125
+	CLI R2;
+	SSYNC;
+#endif
+	[p0] = R0;
+	SSYNC;
+#ifdef ANOMALY_05000125
+	STI R2;
+#endif
+
+	/* Turn off the dcache */
+	p0.l = LO(DMEM_CONTROL);
+	p0.h = HI(DMEM_CONTROL);
+	R1 = [p0];
+	R0 = ~ENDCPLB;
+	R0 = R0 & R1;
+
+	/* Anomaly 05000125 */
+#ifdef ANOMALY_05000125
+	CLI R2;
+	SSYNC;
+#endif
+	[p0] = R0;
+	SSYNC;
+#ifdef ANOMALY_05000125
+	STI R2;
+#endif
+
+	/* in case of double faults, save a few things */
+	p0.l = _init_retx_coreb;
+	p0.h = _init_retx_coreb;
+	R0 = RETX;
+	[P0] = R0;
+
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+	/* Only save these if we are storing them,
+	 * This happens here, since L1 gets clobbered
+	 * below
+	 */
+	GET_PDA(p0, r0);
+	r7 = [p0 + PDA_RETX];
+	p1.l = _init_saved_retx_coreb;
+	p1.h = _init_saved_retx_coreb;
+	[p1] = r7;
+
+	r7 = [p0 + PDA_DCPLB];
+	p1.l = _init_saved_dcplb_fault_addr_coreb;
+	p1.h = _init_saved_dcplb_fault_addr_coreb;
+	[p1] = r7;
+
+	r7 = [p0 + PDA_ICPLB];
+	p1.l = _init_saved_icplb_fault_addr_coreb;
+	p1.h = _init_saved_icplb_fault_addr_coreb;
+	[p1] = r7;
+
+	r7 = [p0 + PDA_SEQSTAT];
+	p1.l = _init_saved_seqstat_coreb;
+	p1.h = _init_saved_seqstat_coreb;
+	[p1] = r7;
+#endif
+
+	/* Initialize stack pointer */
+	sp.l = lo(INITIAL_STACK);
+	sp.h = hi(INITIAL_STACK);
+	fp = sp;
+	usp = sp;
+
+	/* This section keeps the processor in supervisor mode
+	 * during core B startup.  Branches to the idle task.
+	 */
+
+	/* EVT15 = _real_start */
+
+	p0.l = lo(EVT15);
+	p0.h = hi(EVT15);
+	p1.l = _coreb_start;
+	p1.h = _coreb_start;
+	[p0] = p1;
+	csync;
+
+	p0.l = lo(IMASK);
+	p0.h = hi(IMASK);
+	p1.l = IMASK_IVG15;
+	p1.h = 0x0;
+	[p0] = p1;
+	csync;
+
+	raise 15;
+	p0.l = .LWAIT_HERE;
+	p0.h = .LWAIT_HERE;
+	reti = p0;
+#if defined(ANOMALY_05000281)
+	nop; nop; nop;
+#endif
+	rti;
+
+.LWAIT_HERE:
+	jump .LWAIT_HERE;
+ENDPROC(_coreb_trampoline_start)
+ENTRY(_coreb_trampoline_end)
+
+ENTRY(_coreb_start)
+	[--sp] = reti;
+
+	p0.l = lo(WDOGB_CTL);
+	p0.h = hi(WDOGB_CTL);
+	r0 = 0xAD6(z);
+	w[p0] = r0;	/* Clear the watchdog. */
+	ssync;
+
+	/*
+	 * switch to IDLE stack.
+	 */
+	p0.l = _secondary_stack;
+	p0.h = _secondary_stack;
+	sp = [p0];
+	usp = sp;
+	fp = sp;
+	sp += -12;
+	call _init_pda
+	sp += 12;
+	call _secondary_start_kernel;
+.L_exit:
+	jump.s	.L_exit;
+ENDPROC(_coreb_start)
+
+__FINIT
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
new file mode 100644
index 0000000..9b27e69
--- /dev/null
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -0,0 +1,167 @@
+/*
+ * File:         arch/blackfin/mach-bf561/smp.c
+ * Author:       Philippe Gerum <rpm@xenomai.org>
+ *
+ *               Copyright 2007 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <asm/smp.h>
+#include <asm/dma.h>
+
+static DEFINE_SPINLOCK(boot_lock);
+
+static cpumask_t cpu_callin_map;
+
+/*
+ * platform_init_cpus() - Tell the world about how many cores we
+ * have. This is called while setting up the architecture support
+ * (setup_arch()), so don't be too demanding here with respect to
+ * available kernel services.
+ */
+
+void __init platform_init_cpus(void)
+{
+	cpu_set(0, cpu_possible_map); /* CoreA */
+	cpu_set(1, cpu_possible_map); /* CoreB */
+}
+
+void __init platform_prepare_cpus(unsigned int max_cpus)
+{
+	int len;
+
+	len = &coreb_trampoline_end - &coreb_trampoline_start + 1;
+	BUG_ON(len > L1_CODE_LENGTH);
+
+	dma_memcpy((void *)COREB_L1_CODE_START, &coreb_trampoline_start, len);
+
+	/* Both cores ought to be present on a bf561! */
+	cpu_set(0, cpu_present_map); /* CoreA */
+	cpu_set(1, cpu_present_map); /* CoreB */
+
+	printk(KERN_INFO "CoreB bootstrap code to SRAM %p via DMA.\n", (void *)COREB_L1_CODE_START);
+}
+
+int __init setup_profiling_timer(unsigned int multiplier) /* not supported */
+{
+	return -EINVAL;
+}
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+	local_irq_disable();
+
+	/* Clone setup for peripheral interrupt sources from CoreA. */
+	bfin_write_SICB_IMASK0(bfin_read_SICA_IMASK0());
+	bfin_write_SICB_IMASK1(bfin_read_SICA_IMASK1());
+	SSYNC();
+
+	/* Clone setup for IARs from CoreA. */
+	bfin_write_SICB_IAR0(bfin_read_SICA_IAR0());
+	bfin_write_SICB_IAR1(bfin_read_SICA_IAR1());
+	bfin_write_SICB_IAR2(bfin_read_SICA_IAR2());
+	bfin_write_SICB_IAR3(bfin_read_SICA_IAR3());
+	bfin_write_SICB_IAR4(bfin_read_SICA_IAR4());
+	bfin_write_SICB_IAR5(bfin_read_SICA_IAR5());
+	bfin_write_SICB_IAR6(bfin_read_SICA_IAR6());
+	bfin_write_SICB_IAR7(bfin_read_SICA_IAR7());
+	SSYNC();
+
+	local_irq_enable();
+
+	/* Calibrate loops per jiffy value. */
+	calibrate_delay();
+
+	/* Store CPU-private information to the cpu_data array. */
+	bfin_setup_cpudata(cpu);
+
+	/* We are done with local CPU inits, unblock the boot CPU. */
+	cpu_set(cpu, cpu_callin_map);
+	spin_lock(&boot_lock);
+	spin_unlock(&boot_lock);
+}
+
+int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	unsigned long timeout;
+
+	/* CoreB already running?! */
+	BUG_ON((bfin_read_SICA_SYSCR() & COREB_SRAM_INIT) == 0);
+
+	printk(KERN_INFO "Booting Core B.\n");
+
+	spin_lock(&boot_lock);
+
+	/* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
+	SSYNC();
+	bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() & ~COREB_SRAM_INIT);
+	SSYNC();
+
+	timeout = jiffies + 1 * HZ;
+	while (time_before(jiffies, timeout)) {
+		if (cpu_isset(cpu, cpu_callin_map))
+			break;
+		udelay(100);
+		barrier();
+	}
+
+	spin_unlock(&boot_lock);
+
+	return cpu_isset(cpu, cpu_callin_map) ? 0 : -ENOSYS;
+}
+
+void __init platform_request_ipi(irq_handler_t handler)
+{
+	int ret;
+
+	ret = request_irq(IRQ_SUPPLE_0, handler, IRQF_DISABLED,
+			  "SMP interrupt", handler);
+	if (ret)
+		panic("Cannot request supplemental interrupt 0 for IPI service\n");
+}
+
+void platform_send_ipi(cpumask_t callmap)
+{
+	unsigned int cpu;
+
+	for_each_cpu_mask(cpu, callmap) {
+		BUG_ON(cpu >= 2);
+		SSYNC();
+		bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu)));
+		SSYNC();
+	}
+}
+
+void platform_send_ipi_cpu(unsigned int cpu)
+{
+	BUG_ON(cpu >= 2);
+	SSYNC();
+	bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu)));
+	SSYNC();
+}
+
+void platform_clear_ipi(unsigned int cpu)
+{
+	BUG_ON(cpu >= 2);
+	SSYNC();
+	bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + cpu)));
+	SSYNC();
+}
diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile
index e6ed57c..1f3228e 100644
--- a/arch/blackfin/mach-common/Makefile
+++ b/arch/blackfin/mach-common/Makefile
@@ -3,10 +3,12 @@
 #
 
 obj-y := \
-	cache.o entry.o head.o \
+	cache.o cache-c.o entry.o head.o \
 	interrupt.o irqpanic.o arch_checks.o ints-priority.o
 
 obj-$(CONFIG_BFIN_ICACHE_LOCK) += lock.o
 obj-$(CONFIG_PM)          += pm.o dpmc_modes.o
 obj-$(CONFIG_CPU_FREQ)    += cpufreq.o
 obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
+obj-$(CONFIG_SMP)         += smp.o
+obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c
new file mode 100644
index 0000000..e6ab1f8
--- /dev/null
+++ b/arch/blackfin/mach-common/cache-c.c
@@ -0,0 +1,24 @@
+/*
+ * Blackfin cache control code (simpler control-style functions)
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <asm/blackfin.h>
+
+/* Invalidate the Entire Data cache by
+ * clearing DMC[1:0] bits
+ */
+void blackfin_invalidate_entire_dcache(void)
+{
+	u32 dmem = bfin_read_DMEM_CONTROL();
+	SSYNC();
+	bfin_write_DMEM_CONTROL(dmem & ~0xc);
+	SSYNC();
+	bfin_write_DMEM_CONTROL(dmem);
+	SSYNC();
+}
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index a028e94..3c98dac 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -49,13 +49,17 @@
 .ifnb \optflushins
 	\optflushins [P0];
 .endif
+#if ANOMALY_05000443
 .ifb \optnopins
 2:
 .endif
 	\flushins [P0++];
 .ifnb \optnopins
-2: \optnopins;
+2:	\optnopins;
 .endif
+#else
+2:	\flushins [P0++];
+#endif
 
 	RTS;
 .endm
diff --git a/arch/blackfin/mach-common/clocks-init.c b/arch/blackfin/mach-common/clocks-init.c
new file mode 100644
index 0000000..5d182ab
--- /dev/null
+++ b/arch/blackfin/mach-common/clocks-init.c
@@ -0,0 +1,93 @@
+/*
+ * arch/blackfin/mach-common/clocks-init.c - reprogram clocks / memory
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/blackfin.h>
+
+#include <asm/dma.h>
+#include <asm/clocks.h>
+#include <asm/mem_init.h>
+
+#define PLL_CTL_VAL \
+	(((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \
+	 (PLL_BYPASS << 8) | (ANOMALY_05000265 ? 0x8000 : 0))
+
+__attribute__((l1_text))
+static void do_sync(void)
+{
+	__builtin_bfin_ssync();
+}
+
+__attribute__((l1_text))
+void init_clocks(void)
+{
+	/* Kill any active DMAs as they may trigger external memory accesses
+	 * in the middle of reprogramming things, and that'll screw us up.
+	 * For example, any automatic DMAs left by U-Boot for splash screens.
+	 */
+	size_t i;
+	for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
+		struct dma_register *dma = dma_io_base_addr[i];
+		dma->cfg = 0;
+	}
+
+	do_sync();
+
+#ifdef SIC_IWR0
+	bfin_write_SIC_IWR0(IWR_ENABLE(0));
+# ifdef SIC_IWR1
+	/* BF52x system reset does not properly reset SIC_IWR1 which
+	 * will screw up the bootrom as it relies on MDMA0/1 waking it
+	 * up from IDLE instructions.  See this report for more info:
+	 * http://blackfin.uclinux.org/gf/tracker/4323
+	 */
+	if (ANOMALY_05000435)
+		bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
+	else
+		bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
+# endif
+# ifdef SIC_IWR2
+	bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
+# endif
+#else
+	bfin_write_SIC_IWR(IWR_ENABLE(0));
+#endif
+	do_sync();
+#ifdef EBIU_SDGCTL
+	bfin_write_EBIU_SDGCTL(bfin_read_EBIU_SDGCTL() | SRFS);
+	do_sync();
+#endif
+
+#ifdef CLKBUFOE
+	bfin_write16(VR_CTL, bfin_read_VR_CTL() | CLKBUFOE);
+	do_sync();
+	__asm__ __volatile__("IDLE;");
+#endif
+	bfin_write_PLL_LOCKCNT(0x300);
+	do_sync();
+	bfin_write16(PLL_CTL, PLL_CTL_VAL);
+	__asm__ __volatile__("IDLE;");
+	bfin_write_PLL_DIV(CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
+#ifdef EBIU_SDGCTL
+	bfin_write_EBIU_SDRRC(mem_SDRRC);
+	bfin_write_EBIU_SDGCTL(mem_SDGCTL);
+#else
+	bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() & ~(SRREQ));
+	do_sync();
+	bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() | 0x1);
+	bfin_write_EBIU_DDRCTL0(mem_DDRCTL0);
+	bfin_write_EBIU_DDRCTL1(mem_DDRCTL1);
+	bfin_write_EBIU_DDRCTL2(mem_DDRCTL2);
+#ifdef CONFIG_MEM_EBIU_DDRQUE
+	bfin_write_EBIU_DDRQUE(CONFIG_MEM_EBIU_DDRQUE);
+#endif
+#endif
+	do_sync();
+	bfin_read16(0);
+}
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index dda5443..72e16605 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -104,7 +104,7 @@
 		 cclk_hz, target_freq, freqs.old);
 
 	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 		plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
 		tscale = dpm_state_table[index].tscale;
 		bfin_write_PLL_DIV(plldiv);
@@ -112,10 +112,10 @@
 		bfin_write_TSCALE(tscale);
 		cycles = get_cycles();
 		SSYNC();
-	cycles += 10; /* ~10 cycles we loose after get_cycles() */
+	cycles += 10; /* ~10 cycles we lose after get_cycles() */
 	__bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
 	__bfin_cycles_mod = index;
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 	/* TODO: just test case for cycles clock source, remove later */
 	pr_debug("cpufreq: done\n");
 	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index ad5431e..4da50bc 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -247,7 +247,8 @@
 ENDPROC(_unset_dram_srfs)
 
 ENTRY(_set_sic_iwr)
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x)  || defined(CONFIG_BF561)
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) || \
+	defined(CONFIG_BF538) || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
 	P0.H = hi(SIC_IWR0);
 	P0.L = lo(SIC_IWR0);
 	P1.H = hi(SIC_IWR1);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index bde6dc4..fae7746 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -36,6 +36,7 @@
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <linux/unistd.h>
+#include <linux/threads.h>
 #include <asm/blackfin.h>
 #include <asm/errno.h>
 #include <asm/fixed_code.h>
@@ -75,11 +76,11 @@
 	 * handle it.
 	 */
 	P4 = R7;	/* Store EXCAUSE */
-	p5.l = _last_cplb_fault_retx;
-	p5.h = _last_cplb_fault_retx;
-	r7 = [p5];
+
+	GET_PDA(p5, r7);
+	r7 = [p5 + PDA_LFRETX];
 	r6 = retx;
-	[p5] = r6;
+	[p5 + PDA_LFRETX] = r6;
 	cc = r6 == r7;
 	if !cc jump _bfin_return_from_exception;
 	/* fall through */
@@ -111,24 +112,21 @@
 ENTRY(_ex_dcplb_miss)
 ENTRY(_ex_icplb_miss)
 	(R7:6,P5:4) = [sp++];
-	ASTAT = [sp++];
-	SAVE_ALL_SYS
-#ifdef CONFIG_MPU
+	/* We leave the previously pushed ASTAT on the stack.  */
+	SAVE_CONTEXT_CPLB
+
 	/* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that
 	 * will change the stack pointer.  */
 	R0 = SEQSTAT;
 	R1 = SP;
-#endif
+
 	DEBUG_HWTRACE_SAVE(p5, r7)
-#ifdef CONFIG_MPU
+
 	sp += -12;
 	call _cplb_hdr;
 	sp += 12;
 	CC = R0 == 0;
 	IF !CC JUMP _handle_bad_cplb;
-#else
-	call __cplb_hdr;
-#endif
 
 #ifdef CONFIG_DEBUG_DOUBLEFAULT
 	/* While we were processing this, did we double fault? */
@@ -142,7 +140,8 @@
 #endif
 
 	DEBUG_HWTRACE_RESTORE(p5, r7)
-	RESTORE_ALL_SYS
+	RESTORE_CONTEXT_CPLB
+	ASTAT = [SP++];
 	SP = EX_SCRATCH_REG;
 	rtx;
 ENDPROC(_ex_icplb_miss)
@@ -297,9 +296,8 @@
 	 * the stack to get ready so, we can fall through - we
 	 * need to make a CPLB exception look like a normal exception
 	 */
-
-	RESTORE_ALL_SYS
-	[--sp] = ASTAT;
+	RESTORE_CONTEXT_CPLB
+	/* ASTAT is still on the stack, where it is needed.  */
 	[--sp] = (R7:6,P5:4);
 
 ENTRY(_ex_replaceable)
@@ -324,7 +322,9 @@
 	[p4] = p5;
 	csync;
 
+	GET_PDA(p5, r6);
 #ifndef CONFIG_DEBUG_DOUBLEFAULT
+
 	/*
 	 * Save these registers, as they are only valid in exception context
 	 *  (where we are now - as soon as we defer to IRQ5, they can change)
@@ -335,29 +335,25 @@
 	p4.l = lo(DCPLB_FAULT_ADDR);
 	p4.h = hi(DCPLB_FAULT_ADDR);
 	r7 = [p4];
-	p5.h = _saved_dcplb_fault_addr;
-	p5.l = _saved_dcplb_fault_addr;
-	[p5] = r7;
+	[p5 + PDA_DCPLB] = r7;
 
-	r7 = [p4 + (ICPLB_FAULT_ADDR - DCPLB_FAULT_ADDR)];
-	p5.h = _saved_icplb_fault_addr;
-	p5.l = _saved_icplb_fault_addr;
-	[p5] = r7;
+	p4.l = lo(ICPLB_FAULT_ADDR);
+	p4.h = hi(ICPLB_FAULT_ADDR);
+	r6 = [p4];
+	[p5 + PDA_ICPLB] = r6;
 
 	r6 = retx;
-	p4.l = _saved_retx;
-	p4.h = _saved_retx;
-	[p4] = r6;
+	[p5 + PDA_RETX] = r6;
 #endif
 	r6 = SYSCFG;
-	[p4 + 4] = r6;
+	[p5 + PDA_SYSCFG] = r6;
 	BITCLR(r6, 0);
 	SYSCFG = r6;
 
 	/* Disable all interrupts, but make sure level 5 is enabled so
 	 * we can switch to that level.  Save the old mask.  */
 	cli r6;
-	[p4 + 8] = r6;
+	[p5 + PDA_EXIMASK] = r6;
 
 	p4.l = lo(SAFE_USER_INSTRUCTION);
 	p4.h = hi(SAFE_USER_INSTRUCTION);
@@ -371,9 +367,10 @@
 ENDPROC(_ex_trap_c)
 
 /* We just realized we got an exception, while we were processing a different
- * exception. This is a unrecoverable event, so crash
+ * exception. This is a unrecoverable event, so crash.
+ * Note: this cannot be ENTRY() as we jump here with "if cc jump" ...
  */
-ENTRY(_double_fault)
+_double_fault:
 	/* Turn caches & protection off, to ensure we don't get any more
 	 * double exceptions
 	 */
@@ -424,17 +421,16 @@
 ENTRY(_exception_to_level5)
 	SAVE_ALL_SYS
 
-	p4.l = _saved_retx;
-	p4.h = _saved_retx;
-	r6 = [p4];
+	GET_PDA(p4, r7);        /* Fetch current PDA */
+	r6 = [p4 + PDA_RETX];
 	[sp + PT_PC] = r6;
 
-	r6 = [p4 + 4];
+	r6 = [p4 + PDA_SYSCFG];
 	[sp + PT_SYSCFG] = r6;
 
 	/* Restore interrupt mask.  We haven't pushed RETI, so this
 	 * doesn't enable interrupts until we return from this handler.  */
-	r6 = [p4 + 8];
+	r6 = [p4 + PDA_EXIMASK];
 	sti r6;
 
 	/* Restore the hardware error vector.  */
@@ -478,8 +474,8 @@
 	 * scratch register (for want of a better option).
 	 */
 	EX_SCRATCH_REG = sp;
-	sp.l = _exception_stack_top;
-	sp.h = _exception_stack_top;
+	GET_PDA_SAFE(sp);
+	sp = [sp + PDA_EXSTACK]
 	/* Try to deal with syscalls quickly.  */
 	[--sp] = ASTAT;
 	[--sp] = (R7:6,P5:4);
@@ -501,27 +497,22 @@
 	 * but they are not very interesting, so don't save them
 	 */
 
+	GET_PDA(p5, r7);
 	p4.l = lo(DCPLB_FAULT_ADDR);
 	p4.h = hi(DCPLB_FAULT_ADDR);
 	r7 = [p4];
-	p5.h = _saved_dcplb_fault_addr;
-	p5.l = _saved_dcplb_fault_addr;
-	[p5] = r7;
+	[p5 + PDA_DCPLB] = r7;
 
-	r7 = [p4 + (ICPLB_FAULT_ADDR - DCPLB_FAULT_ADDR)];
-	p5.h = _saved_icplb_fault_addr;
-	p5.l = _saved_icplb_fault_addr;
-	[p5] = r7;
+	p4.l = lo(ICPLB_FAULT_ADDR);
+	p4.h = hi(ICPLB_FAULT_ADDR);
+	r7 = [p4];
+	[p5 + PDA_ICPLB] = r7;
 
-	p4.l = _saved_retx;
-	p4.h = _saved_retx;
 	r6 = retx;
-	[p4] = r6;
+	[p5 + PDA_RETX] = r6;
 
 	r7 = SEQSTAT;		/* reason code is in bit 5:0 */
-	p4.l = _saved_seqstat;
-	p4.h = _saved_seqstat;
-	[p4] = r7;
+	[p5 + PDA_SEQSTAT] = r7;
 #else
 	r7 = SEQSTAT;           /* reason code is in bit 5:0 */
 #endif
@@ -546,11 +537,11 @@
 	p0 = sp;
 	r3 = SIZEOF_PTREGS / 4;
 	r4 = 0(x);
-0:
+.Lclear_regs:
 	[p0++] = r4;
 	r3 += -1;
 	cc = r3 == 0;
-	if !cc jump 0b (bp);
+	if !cc jump .Lclear_regs (bp);
 
 	p0 = sp;
 	sp += -16;
@@ -558,7 +549,7 @@
 	call _do_execve;
 	SP += 16;
 	cc = r0 == 0;
-	if ! cc jump 1f;
+	if ! cc jump .Lexecve_failed;
 	/* Success.  Copy our temporary pt_regs to the top of the kernel
 	 * stack and do a normal exception return.
 	 */
@@ -574,12 +565,12 @@
 	p0 = fp;
 	r4 = [p0--];
 	r3 = SIZEOF_PTREGS / 4;
-0:
+.Lcopy_regs:
 	r4 = [p0--];
 	[p1--] = r4;
 	r3 += -1;
 	cc = r3 == 0;
-	if ! cc jump 0b (bp);
+	if ! cc jump .Lcopy_regs (bp);
 
 	r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z);
 	p1 = r0;
@@ -591,7 +582,7 @@
 
 	RESTORE_CONTEXT;
 	rti;
-1:
+.Lexecve_failed:
 	unlink;
 	rts;
 ENDPROC(_kernel_execve)
@@ -925,9 +916,14 @@
 	p1 = rets;
 	[sp + PT_RESERVED] = p1;
 
-	p0.l = _irq_flags;
-	p0.h = _irq_flags;
+#ifdef CONFIG_SMP
+	GET_PDA(p0, r0); 	/* Fetch current PDA (can't migrate to other CPU here) */
+	r0 = [p0 + PDA_IRQFLAGS];
+#else
+	p0.l = _bfin_irq_flags;
+	p0.h = _bfin_irq_flags;
 	r0 = [p0];
+#endif
 	sti r0;
 
 	r0 = sp;
@@ -1539,14 +1535,18 @@
 	.endr
 END(_sys_call_table)
 
-_exception_stack:
-	.rept 1024
-	.long 0;
+#ifdef CONFIG_EXCEPTION_L1_SCRATCH
+/* .section .l1.bss.scratch */
+.set _exception_stack_top, L1_SCRATCH_START + L1_SCRATCH_LENGTH
+#else
+#ifdef CONFIG_SYSCALL_TAB_L1
+.section .l1.bss
+#else
+.bss
+#endif
+ENTRY(_exception_stack)
+	.rept 1024 * NR_CPUS
+	.long 0
 	.endr
 _exception_stack_top:
-
-#if ANOMALY_05000261
-/* Used by the assembly entry point to work around an anomaly.  */
-_last_cplb_fault_retx:
-	.long 0;
 #endif
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index f123a62..e1e42c0 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -13,6 +13,7 @@
 #include <asm/blackfin.h>
 #include <asm/thread_info.h>
 #include <asm/trace.h>
+#include <asm/asm-offsets.h>
 
 __INIT
 
@@ -111,33 +112,26 @@
 	 * This happens here, since L1 gets clobbered
 	 * below
 	 */
-	p0.l = _saved_retx;
-	p0.h = _saved_retx;
+	GET_PDA(p0, r0);
+	r7 = [p0 + PDA_RETX];
 	p1.l = _init_saved_retx;
 	p1.h = _init_saved_retx;
-	r0 = [p0];
-	[p1] = r0;
+	[p1] = r7;
 
-	p0.l = _saved_dcplb_fault_addr;
-	p0.h = _saved_dcplb_fault_addr;
+	r7 = [p0 + PDA_DCPLB];
 	p1.l = _init_saved_dcplb_fault_addr;
 	p1.h = _init_saved_dcplb_fault_addr;
-	r0 = [p0];
-	[p1] = r0;
+	[p1] = r7;
 
-	p0.l = _saved_icplb_fault_addr;
-	p0.h = _saved_icplb_fault_addr;
+	r7 = [p0 + PDA_ICPLB];
 	p1.l = _init_saved_icplb_fault_addr;
 	p1.h = _init_saved_icplb_fault_addr;
-	r0 = [p0];
-	[p1] = r0;
+	[p1] = r7;
 
-	p0.l = _saved_seqstat;
-	p0.h = _saved_seqstat;
+	r7 = [p0 + PDA_SEQSTAT];
 	p1.l = _init_saved_seqstat;
 	p1.h = _init_saved_seqstat;
-	r0 = [p0];
-	[p1] = r0;
+	[p1] = r7;
 #endif
 
 	/* Initialize stack pointer */
@@ -153,7 +147,7 @@
 	/* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
 	call _bfin_relocate_l1_mem;
 #ifdef CONFIG_BFIN_KERNEL_CLOCK
-	call _start_dma_code;
+	call _init_clocks;
 #endif
 
 	/* This section keeps the processor in supervisor mode
@@ -170,12 +164,8 @@
 	[p0] = p1;
 	csync;
 
-	p0.l = lo(IMASK);
-	p0.h = hi(IMASK);
-	p1.l = IMASK_IVG15;
-	p1.h = 0x0;
-	[p0] = p1;
-	csync;
+	r0 = EVT_IVG15 (z);
+	sti r0;
 
 	raise 15;
 	p0.l = .LWAIT_HERE;
@@ -195,6 +185,19 @@
 # define WDOG_CTL WDOGA_CTL
 #endif
 
+ENTRY(__init_clear_bss)
+	r2 = r2 - r1;
+	cc = r2 == 0;
+	if cc jump .L_bss_done;
+	r2 >>= 2;
+	p1 = r1;
+	p2 = r2;
+	lsetup (1f, 1f) lc0 = p2;
+1:	[p1++] = r0;
+.L_bss_done:
+	rts;
+ENDPROC(__init_clear_bss)
+
 ENTRY(_real_start)
 	/* Enable nested interrupts */
 	[--sp] = reti;
@@ -206,87 +209,34 @@
 	w[p0] = r0;
 	ssync;
 
+	r0 = 0 (x);
+	/* Zero out all of the fun bss regions */
 #if L1_DATA_A_LENGTH > 0
 	r1.l = __sbss_l1;
 	r1.h = __sbss_l1;
 	r2.l = __ebss_l1;
 	r2.h = __ebss_l1;
-	r0 = 0 (z);
-	r2 = r2 - r1;
-	cc = r2 == 0;
-	if cc jump .L_a_l1_done;
-	r2 >>= 2;
-	p1 = r1;
-	p2 = r2;
-	lsetup (.L_clear_a_l1, .L_clear_a_l1 ) lc0 = p2;
-.L_clear_a_l1:
-	[p1++] = r0;
-.L_a_l1_done:
+	call __init_clear_bss
 #endif
-
 #if L1_DATA_B_LENGTH > 0
 	r1.l = __sbss_b_l1;
 	r1.h = __sbss_b_l1;
 	r2.l = __ebss_b_l1;
 	r2.h = __ebss_b_l1;
-	r0 = 0 (z);
-	r2 = r2 - r1;
-	cc = r2 == 0;
-	if cc jump .L_b_l1_done;
-	r2 >>= 2;
-	p1 = r1;
-	p2 = r2;
-	lsetup (.L_clear_b_l1, .L_clear_b_l1 ) lc0 = p2;
-.L_clear_b_l1:
-	[p1++] = r0;
-.L_b_l1_done:
+	call __init_clear_bss
 #endif
-
 #if L2_LENGTH > 0
 	r1.l = __sbss_l2;
 	r1.h = __sbss_l2;
 	r2.l = __ebss_l2;
 	r2.h = __ebss_l2;
-	r0 = 0 (z);
-	r2 = r2 - r1;
-	cc = r2 == 0;
-	if cc jump .L_l2_done;
-	r2 >>= 2;
-	p1 = r1;
-	p2 = r2;
-	lsetup (.L_clear_l2, .L_clear_l2 ) lc0 = p2;
-.L_clear_l2:
-	[p1++] = r0;
-.L_l2_done:
+	call __init_clear_bss
 #endif
-
-	/* Zero out the bss region
-	 * Note: this will fail if bss is 0 bytes ...
-	 */
-	r0 = 0 (z);
 	r1.l = ___bss_start;
 	r1.h = ___bss_start;
 	r2.l = ___bss_stop;
 	r2.h = ___bss_stop;
-	r2 = r2 - r1;
-	r2 >>= 2;
-	p1 = r1;
-	p2 = r2;
-	lsetup (.L_clear_bss, .L_clear_bss) lc0 = p2;
-.L_clear_bss:
-	[p1++] = r0;
-
-	/* In case there is a NULL pointer reference,
-	 * zero out region before stext
-	 */
-	p1 = r0;
-	r2.l = __stext;
-	r2.h = __stext;
-	r2 >>= 2;
-	p2 = r2;
-	lsetup (.L_clear_zero, .L_clear_zero) lc0 = p2;
-.L_clear_zero:
-	[p1++] = r0;
+	call __init_clear_bss
 
 	/* Pass the u-boot arguments to the global value command line */
 	R0 = R7;
@@ -299,6 +249,9 @@
 	sp = sp + p1;
 	usp = sp;
 	fp = sp;
+	sp += -12;
+	call _init_pda
+	sp += 12;
 	jump.l _start_kernel;
 ENDPROC(_real_start)
 
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 4a2ec7a..473df0f 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -129,8 +129,15 @@
 #endif
 	r1 =  sp;
 	SP += -12;
+#ifdef CONFIG_IPIPE
+	call ___ipipe_grab_irq
+	SP += 12;
+	cc = r0 == 0;
+	if cc jump .Lcommon_restore_context;
+#else /* CONFIG_IPIPE */
 	call _do_irq;
 	SP += 12;
+#endif /* CONFIG_IPIPE */
 	call _return_from_int;
 .Lcommon_restore_context:
 	RESTORE_CONTEXT
@@ -152,15 +159,6 @@
 1:
 #endif
 
-#ifdef CONFIG_HARDWARE_PM
-	r7 = [sp + PT_SEQSTAT];
-	r7 = r7 >>> 0xe;
-	r6 = 0x1F;
-	r7 = r7 & r6;
-	r5 = 0x12;
-	cc = r7 == r5;
-	if cc jump .Lcall_do_ovf; /* deal with performance counter overflow */
-#endif
 	# We are going to dump something out, so make sure we print IPEND properly
 	p2.l = lo(IPEND);
 	p2.h = hi(IPEND);
@@ -192,17 +190,6 @@
 .Lcommon_restore_all_sys:
 	RESTORE_ALL_SYS
 	rti;
-
-#ifdef CONFIG_HARDWARE_PM
-.Lcall_do_ovf:
-
-	SP += -12;
-	call _pm_overflow;
-	SP += 12;
-
-	jump .Lcommon_restore_all_sys;
-#endif
-
 ENDPROC(_evt_ivhw)
 
 /* Interrupt routine for evt2 (NMI).
@@ -245,3 +232,56 @@
 	call _system_call;
 	jump .Lcommon_restore_context;
 ENDPROC(_evt_system_call)
+
+#ifdef CONFIG_IPIPE
+ENTRY(___ipipe_call_irqtail)
+	r0.l = 1f;
+	r0.h = 1f;
+	reti = r0;
+	rti;
+1:
+	[--sp] = rets;
+	[--sp] = ( r7:4, p5:3 );
+	p0.l = ___ipipe_irq_tail_hook;
+	p0.h = ___ipipe_irq_tail_hook;
+	p0 = [p0];
+	sp += -12;
+	call (p0);
+	sp += 12;
+	( r7:4, p5:3 ) = [sp++];
+	rets = [sp++];
+
+	[--sp] = reti;
+	reti = [sp++];          /* IRQs are off. */
+	r0.h = 3f;
+	r0.l = 3f;
+	p0.l = lo(EVT14);
+	p0.h = hi(EVT14);
+	[p0] = r0;
+	csync;
+	r0 = 0x401f;
+	sti r0;
+	raise 14;
+	[--sp] = reti;          /* IRQs on. */
+2:
+	jump 2b;                /* Likely paranoid. */
+3:
+	sp += 4;                /* Discard saved RETI */
+	r0.h = _evt14_softirq;
+	r0.l = _evt14_softirq;
+	p0.l = lo(EVT14);
+	p0.h = hi(EVT14);
+	[p0] = r0;
+	csync;
+	p0.l = _bfin_irq_flags;
+	p0.h = _bfin_irq_flags;
+	r0 = [p0];
+	sti r0;
+#if 0 /* FIXME: this actually raises scheduling latencies */
+	/* Reenable interrupts */
+	[--sp] = reti;
+	r0 = [sp++];
+#endif
+	rts;
+ENDPROC(___ipipe_call_irqtail)
+#endif /* CONFIG_IPIPE */
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 34e8a72..1bba603 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1,9 +1,6 @@
 /*
  * File:         arch/blackfin/mach-common/ints-priority.c
- * Based on:
- * Author:
  *
- * Created:      ?
  * Description:  Set up the interrupt priorities
  *
  * Modified:
@@ -37,6 +34,9 @@
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
 #include <linux/irq.h>
+#ifdef CONFIG_IPIPE
+#include <linux/ipipe.h>
+#endif
 #ifdef CONFIG_KGDB
 #include <linux/kgdb.h>
 #endif
@@ -45,6 +45,8 @@
 #include <asm/gpio.h>
 #include <asm/irq_handler.h>
 
+#define SIC_SYSIRQ(irq)	(irq - (IRQ_CORETMR + 1))
+
 #ifdef BF537_FAMILY
 # define BF537_GENERIC_ERROR_INT_DEMUX
 #else
@@ -58,13 +60,16 @@
  * -
  */
 
+#ifndef CONFIG_SMP
 /* Initialize this to an actual value to force it into the .data
  * section so that we know it is properly initialized at entry into
  * the kernel but before bss is initialized to zero (which is where
  * it would live otherwise).  The 0x1f magic represents the IRQs we
  * cannot actually mask out in hardware.
  */
-unsigned long irq_flags = 0x1f;
+unsigned long bfin_irq_flags = 0x1f;
+EXPORT_SYMBOL(bfin_irq_flags);
+#endif
 
 /* The number of spurious interrupts */
 atomic_t num_spurious;
@@ -103,12 +108,14 @@
 		for (irqn = 0; irqn < NR_PERI_INTS; irqn++) {
 			int iar_shift = (irqn & 7) * 4;
 				if (ivg == (0xf &
-#ifndef CONFIG_BF52x
+#if defined(CONFIG_BF52x) || defined(CONFIG_BF538) \
+	|| defined(CONFIG_BF539) || defined(CONFIG_BF51x)
 			     bfin_read32((unsigned long *)SIC_IAR0 +
-					 (irqn >> 3)) >> iar_shift)) {
+					 ((irqn % 32) >> 3) + ((irqn / 32) *
+					 ((SIC_IAR4 - SIC_IAR0) / 4))) >> iar_shift)) {
 #else
 			     bfin_read32((unsigned long *)SIC_IAR0 +
-					 ((irqn%32) >> 3) + ((irqn / 32) * 16)) >> iar_shift)) {
+					 (irqn >> 3)) >> iar_shift)) {
 #endif
 				ivg_table[irq_pos].irqno = IVG7 + irqn;
 				ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
@@ -130,25 +137,25 @@
 
 static void bfin_core_mask_irq(unsigned int irq)
 {
-	irq_flags &= ~(1 << irq);
-	if (!irqs_disabled())
-		local_irq_enable();
+	bfin_irq_flags &= ~(1 << irq);
+	if (!irqs_disabled_hw())
+		local_irq_enable_hw();
 }
 
 static void bfin_core_unmask_irq(unsigned int irq)
 {
-	irq_flags |= 1 << irq;
+	bfin_irq_flags |= 1 << irq;
 	/*
 	 * If interrupts are enabled, IMASK must contain the same value
-	 * as irq_flags.  Make sure that invariant holds.  If interrupts
+	 * as bfin_irq_flags.  Make sure that invariant holds.  If interrupts
 	 * are currently disabled we need not do anything; one of the
 	 * callers will take care of setting IMASK to the proper value
 	 * when reenabling interrupts.
-	 * local_irq_enable just does "STI irq_flags", so it's exactly
+	 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
 	 * what we need.
 	 */
-	if (!irqs_disabled())
-		local_irq_enable();
+	if (!irqs_disabled_hw())
+		local_irq_enable_hw();
 	return;
 }
 
@@ -163,8 +170,11 @@
 	mask_bit = SIC_SYSIRQ(irq) % 32;
 	bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
 			     ~(1 << mask_bit));
+#ifdef CONFIG_SMP
+	bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
+			     ~(1 << mask_bit));
 #endif
-	SSYNC();
+#endif
 }
 
 static void bfin_internal_unmask_irq(unsigned int irq)
@@ -178,14 +188,17 @@
 	mask_bit = SIC_SYSIRQ(irq) % 32;
 	bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
 			     (1 << mask_bit));
+#ifdef CONFIG_SMP
+	bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) |
+			     (1 << mask_bit));
 #endif
-	SSYNC();
+#endif
 }
 
 #ifdef CONFIG_PM
 int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 {
-	unsigned bank, bit, wakeup = 0;
+	u32 bank, bit, wakeup = 0;
 	unsigned long flags;
 	bank = SIC_SYSIRQ(irq) / 32;
 	bit = SIC_SYSIRQ(irq) % 32;
@@ -225,7 +238,7 @@
 	break;
 	}
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 
 	if (state) {
 		bfin_sic_iwr[bank] |= (1 << bit);
@@ -236,7 +249,7 @@
 		vr_wakeup  &= ~wakeup;
 	}
 
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 
 	return 0;
 }
@@ -262,6 +275,19 @@
 #endif
 };
 
+static void bfin_handle_irq(unsigned irq)
+{
+#ifdef CONFIG_IPIPE
+	struct pt_regs regs;    /* Contents not used. */
+	ipipe_trace_irq_entry(irq);
+	__ipipe_handle_irq(irq, &regs);
+	ipipe_trace_irq_exit(irq);
+#else /* !CONFIG_IPIPE */
+	struct irq_desc *desc = irq_desc + irq;
+	desc->handle_irq(irq, desc);
+#endif  /* !CONFIG_IPIPE */
+}
+
 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
 static int error_int_mask;
 
@@ -292,8 +318,6 @@
 {
 	int irq = 0;
 
-	SSYNC();
-
 #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
 	if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
 		irq = IRQ_MAC_ERROR;
@@ -317,10 +341,9 @@
 		irq = IRQ_UART1_ERROR;
 
 	if (irq) {
-		if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) {
-			struct irq_desc *desc = irq_desc + irq;
-			desc->handle_irq(irq, desc);
-		} else {
+		if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
+			bfin_handle_irq(irq);
+		else {
 
 			switch (irq) {
 			case IRQ_PPI_ERROR:
@@ -366,62 +389,57 @@
 
 static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
 {
+#ifdef CONFIG_IPIPE
+	_set_irq_handler(irq, handle_edge_irq);
+#else
 	struct irq_desc *desc = irq_desc + irq;
 	/* May not call generic set_irq_handler() due to spinlock
 	   recursion. */
 	desc->handle_irq = handle;
+#endif
 }
 
+static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
+extern void bfin_gpio_irq_prepare(unsigned gpio);
+
 #if !defined(CONFIG_BF54x)
 
-static unsigned short gpio_enabled[gpio_bank(MAX_BLACKFIN_GPIOS)];
-static unsigned short gpio_edge_triggered[gpio_bank(MAX_BLACKFIN_GPIOS)];
-
-extern void bfin_gpio_irq_prepare(unsigned gpio);
-
 static void bfin_gpio_ack_irq(unsigned int irq)
 {
-	u16 gpionr = irq - IRQ_PF0;
-
-	if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) {
-		set_gpio_data(gpionr, 0);
-		SSYNC();
-	}
+	/* AFAIK ack_irq in case mask_ack is provided
+	 * get's only called for edge sense irqs
+	 */
+	set_gpio_data(irq_to_gpio(irq), 0);
 }
 
 static void bfin_gpio_mask_ack_irq(unsigned int irq)
 {
-	u16 gpionr = irq - IRQ_PF0;
+	struct irq_desc *desc = irq_desc + irq;
+	u32 gpionr = irq_to_gpio(irq);
 
-	if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) {
+	if (desc->handle_irq == handle_edge_irq)
 		set_gpio_data(gpionr, 0);
-		SSYNC();
-	}
 
 	set_gpio_maska(gpionr, 0);
-	SSYNC();
 }
 
 static void bfin_gpio_mask_irq(unsigned int irq)
 {
-	set_gpio_maska(irq - IRQ_PF0, 0);
-	SSYNC();
+	set_gpio_maska(irq_to_gpio(irq), 0);
 }
 
 static void bfin_gpio_unmask_irq(unsigned int irq)
 {
-	set_gpio_maska(irq - IRQ_PF0, 1);
-	SSYNC();
+	set_gpio_maska(irq_to_gpio(irq), 1);
 }
 
 static unsigned int bfin_gpio_irq_startup(unsigned int irq)
 {
-	u16 gpionr = irq - IRQ_PF0;
+	u32 gpionr = irq_to_gpio(irq);
 
-	if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr)))
+	if (__test_and_set_bit(gpionr, gpio_enabled))
 		bfin_gpio_irq_prepare(gpionr);
 
-	gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
 	bfin_gpio_unmask_irq(irq);
 
 	return 0;
@@ -429,29 +447,39 @@
 
 static void bfin_gpio_irq_shutdown(unsigned int irq)
 {
+	u32 gpionr = irq_to_gpio(irq);
+
 	bfin_gpio_mask_irq(irq);
-	gpio_enabled[gpio_bank(irq - IRQ_PF0)] &= ~gpio_bit(irq - IRQ_PF0);
+	__clear_bit(gpionr, gpio_enabled);
+	bfin_gpio_irq_free(gpionr);
 }
 
 static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
 {
-	u16 gpionr = irq - IRQ_PF0;
+	int ret;
+	char buf[16];
+	u32 gpionr = irq_to_gpio(irq);
 
 	if (type == IRQ_TYPE_PROBE) {
 		/* only probe unenabled GPIO interrupt lines */
-		if (gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))
+		if (__test_bit(gpionr, gpio_enabled))
 			return 0;
 		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
 	}
 
 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
 		    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
-		if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr)))
+
+		snprintf(buf, 16, "gpio-irq%d", irq);
+		ret = bfin_gpio_irq_request(gpionr, buf);
+		if (ret)
+			return ret;
+
+		if (__test_and_set_bit(gpionr, gpio_enabled))
 			bfin_gpio_irq_prepare(gpionr);
 
-		gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
 	} else {
-		gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr);
+		__clear_bit(gpionr, gpio_enabled);
 		return 0;
 	}
 
@@ -472,17 +500,13 @@
 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
 		set_gpio_edge(gpionr, 1);
 		set_gpio_inen(gpionr, 1);
-		gpio_edge_triggered[gpio_bank(gpionr)] |= gpio_bit(gpionr);
 		set_gpio_data(gpionr, 0);
 
 	} else {
 		set_gpio_edge(gpionr, 0);
-		gpio_edge_triggered[gpio_bank(gpionr)] &= ~gpio_bit(gpionr);
 		set_gpio_inen(gpionr, 1);
 	}
 
-	SSYNC();
-
 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 		bfin_set_irq_handler(irq, handle_edge_irq);
 	else
@@ -505,22 +529,6 @@
 }
 #endif
 
-static struct irq_chip bfin_gpio_irqchip = {
-	.name = "GPIO",
-	.ack = bfin_gpio_ack_irq,
-	.mask = bfin_gpio_mask_irq,
-	.mask_ack = bfin_gpio_mask_ack_irq,
-	.unmask = bfin_gpio_unmask_irq,
-	.disable = bfin_gpio_mask_irq,
-	.enable = bfin_gpio_unmask_irq,
-	.set_type = bfin_gpio_irq_type,
-	.startup = bfin_gpio_irq_startup,
-	.shutdown = bfin_gpio_irq_shutdown,
-#ifdef CONFIG_PM
-	.set_wake = bfin_gpio_set_wake,
-#endif
-};
-
 static void bfin_demux_gpio_irq(unsigned int inta_irq,
 				struct irq_desc *desc)
 {
@@ -537,7 +545,11 @@
 		irq = IRQ_PH0;
 		break;
 # endif
-#elif defined(CONFIG_BF52x)
+#elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
+	case IRQ_PORTF_INTA:
+		irq = IRQ_PF0;
+		break;
+#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
 	case IRQ_PORTF_INTA:
 		irq = IRQ_PF0;
 		break;
@@ -567,30 +579,22 @@
 		for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
 			irq += i;
 
-			mask = get_gpiop_data(i) &
-				(gpio_enabled[gpio_bank(i)] &
-				get_gpiop_maska(i));
+			mask = get_gpiop_data(i) & get_gpiop_maska(i);
 
 			while (mask) {
-				if (mask & 1) {
-					desc = irq_desc + irq;
-					desc->handle_irq(irq, desc);
-				}
+				if (mask & 1)
+					bfin_handle_irq(irq);
 				irq++;
 				mask >>= 1;
 			}
 		}
 	} else {
 			gpio = irq_to_gpio(irq);
-			mask = get_gpiop_data(gpio) &
-				(gpio_enabled[gpio_bank(gpio)] &
-				get_gpiop_maska(gpio));
+			mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
 
 			do {
-				if (mask & 1) {
-					desc = irq_desc + irq;
-					desc->handle_irq(irq, desc);
-				}
+				if (mask & 1)
+					bfin_handle_irq(irq);
 				irq++;
 				mask >>= 1;
 			} while (mask);
@@ -612,10 +616,6 @@
 static unsigned char irq2pint_lut[NR_PINTS];
 static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
 
-static unsigned int gpio_both_edge_triggered[NR_PINT_SYS_IRQS];
-static unsigned short gpio_enabled[gpio_bank(MAX_BLACKFIN_GPIOS)];
-
-
 struct pin_int_t {
 	unsigned int mask_set;
 	unsigned int mask_clear;
@@ -636,12 +636,9 @@
 	(struct pin_int_t *)PINT3_MASK_SET,
 };
 
-extern void bfin_gpio_irq_prepare(unsigned gpio);
-
-inline unsigned short get_irq_base(u8 bank, u8 bmap)
+inline unsigned int get_irq_base(u32 bank, u8 bmap)
 {
-
-	u16 irq_base;
+	unsigned int irq_base;
 
 	if (bank < 2) {		/*PA-PB */
 		irq_base = IRQ_PA0 + bmap * 16;
@@ -650,7 +647,6 @@
 	}
 
 	return irq_base;
-
 }
 
 	/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
@@ -677,20 +673,18 @@
 
 			pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
 			irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
-
 		}
-
 	}
-
 }
 
 static void bfin_gpio_ack_irq(unsigned int irq)
 {
-	u8 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	struct irq_desc *desc = irq_desc + irq;
+	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
 	u32 pintbit = PINT_BIT(pint_val);
-	u8 bank = PINT_2_BANK(pint_val);
+	u32 bank = PINT_2_BANK(pint_val);
 
-	if (unlikely(gpio_both_edge_triggered[bank] & pintbit)) {
+	if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
 		if (pint[bank]->invert_set & pintbit)
 			pint[bank]->invert_clear = pintbit;
 		else
@@ -698,16 +692,16 @@
 	}
 	pint[bank]->request = pintbit;
 
-	SSYNC();
 }
 
 static void bfin_gpio_mask_ack_irq(unsigned int irq)
 {
-	u8 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	struct irq_desc *desc = irq_desc + irq;
+	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
 	u32 pintbit = PINT_BIT(pint_val);
-	u8 bank = PINT_2_BANK(pint_val);
+	u32 bank = PINT_2_BANK(pint_val);
 
-	if (unlikely(gpio_both_edge_triggered[bank] & pintbit)) {
+	if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
 		if (pint[bank]->invert_set & pintbit)
 			pint[bank]->invert_clear = pintbit;
 		else
@@ -716,32 +710,29 @@
 
 	pint[bank]->request = pintbit;
 	pint[bank]->mask_clear = pintbit;
-	SSYNC();
 }
 
 static void bfin_gpio_mask_irq(unsigned int irq)
 {
-	u8 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
 
 	pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
-	SSYNC();
 }
 
 static void bfin_gpio_unmask_irq(unsigned int irq)
 {
-	u8 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
 	u32 pintbit = PINT_BIT(pint_val);
-	u8 bank = PINT_2_BANK(pint_val);
+	u32 bank = PINT_2_BANK(pint_val);
 
 	pint[bank]->request = pintbit;
 	pint[bank]->mask_set = pintbit;
-	SSYNC();
 }
 
 static unsigned int bfin_gpio_irq_startup(unsigned int irq)
 {
-	u16 gpionr = irq_to_gpio(irq);
-	u8 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	u32 gpionr = irq_to_gpio(irq);
+	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
 
 	if (pint_val == IRQ_NOT_AVAIL) {
 		printk(KERN_ERR
@@ -750,10 +741,9 @@
 		return -ENODEV;
 	}
 
-	if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr)))
+	if (__test_and_set_bit(gpionr, gpio_enabled))
 		bfin_gpio_irq_prepare(gpionr);
 
-	gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
 	bfin_gpio_unmask_irq(irq);
 
 	return 0;
@@ -761,38 +751,45 @@
 
 static void bfin_gpio_irq_shutdown(unsigned int irq)
 {
-	u16 gpionr = irq_to_gpio(irq);
+	u32 gpionr = irq_to_gpio(irq);
 
 	bfin_gpio_mask_irq(irq);
-	gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr);
+	__clear_bit(gpionr, gpio_enabled);
+	bfin_gpio_irq_free(gpionr);
 }
 
 static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
 {
-
-	u16 gpionr = irq_to_gpio(irq);
-	u8 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	int ret;
+	char buf[16];
+	u32 gpionr = irq_to_gpio(irq);
+	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
 	u32 pintbit = PINT_BIT(pint_val);
-	u8 bank = PINT_2_BANK(pint_val);
+	u32 bank = PINT_2_BANK(pint_val);
 
 	if (pint_val == IRQ_NOT_AVAIL)
 		return -ENODEV;
 
 	if (type == IRQ_TYPE_PROBE) {
 		/* only probe unenabled GPIO interrupt lines */
-		if (gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))
+		if (__test_bit(gpionr, gpio_enabled))
 			return 0;
 		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
 	}
 
 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
 		    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
-		if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr)))
+
+		snprintf(buf, 16, "gpio-irq%d", irq);
+		ret = bfin_gpio_irq_request(gpionr, buf);
+		if (ret)
+			return ret;
+
+		if (__test_and_set_bit(gpionr, gpio_enabled))
 			bfin_gpio_irq_prepare(gpionr);
 
-		gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
 	} else {
-		gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr);
+		__clear_bit(gpionr, gpio_enabled);
 		return 0;
 	}
 
@@ -803,15 +800,10 @@
 
 	if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 	    == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
-
-		gpio_both_edge_triggered[bank] |= pintbit;
-
 		if (gpio_get_value(gpionr))
 			pint[bank]->invert_set = pintbit;
 		else
 			pint[bank]->invert_clear = pintbit;
-	} else {
-		gpio_both_edge_triggered[bank] &= ~pintbit;
 	}
 
 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
@@ -822,8 +814,6 @@
 		bfin_set_irq_handler(irq, handle_level_irq);
 	}
 
-	SSYNC();
-
 	return 0;
 }
 
@@ -834,7 +824,7 @@
 int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
 {
 	u32 pint_irq;
-	u8 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
 	u32 bank = PINT_2_BANK(pint_val);
 	u32 pintbit = PINT_BIT(pint_val);
 
@@ -895,26 +885,10 @@
 }
 #endif
 
-static struct irq_chip bfin_gpio_irqchip = {
-	.name = "GPIO",
-	.ack = bfin_gpio_ack_irq,
-	.mask = bfin_gpio_mask_irq,
-	.mask_ack = bfin_gpio_mask_ack_irq,
-	.unmask = bfin_gpio_unmask_irq,
-	.disable = bfin_gpio_mask_irq,
-	.enable = bfin_gpio_unmask_irq,
-	.set_type = bfin_gpio_irq_type,
-	.startup = bfin_gpio_irq_startup,
-	.shutdown = bfin_gpio_irq_shutdown,
-#ifdef CONFIG_PM
-	.set_wake = bfin_gpio_set_wake,
-#endif
-};
-
 static void bfin_demux_gpio_irq(unsigned int inta_irq,
 				struct irq_desc *desc)
 {
-	u8 bank, pint_val;
+	u32 bank, pint_val;
 	u32 request, irq;
 
 	switch (inta_irq) {
@@ -941,8 +915,7 @@
 	while (request) {
 		if (request & 1) {
 			irq = pint2irq_lut[pint_val] + SYS_IRQS;
-			desc = irq_desc + irq;
-			desc->handle_irq(irq, desc);
+			bfin_handle_irq(irq);
 		}
 		pint_val++;
 		request >>= 1;
@@ -951,10 +924,24 @@
 }
 #endif
 
-void __init init_exception_vectors(void)
-{
-	SSYNC();
+static struct irq_chip bfin_gpio_irqchip = {
+	.name = "GPIO",
+	.ack = bfin_gpio_ack_irq,
+	.mask = bfin_gpio_mask_irq,
+	.mask_ack = bfin_gpio_mask_ack_irq,
+	.unmask = bfin_gpio_unmask_irq,
+	.disable = bfin_gpio_mask_irq,
+	.enable = bfin_gpio_unmask_irq,
+	.set_type = bfin_gpio_irq_type,
+	.startup = bfin_gpio_irq_startup,
+	.shutdown = bfin_gpio_irq_shutdown,
+#ifdef CONFIG_PM
+	.set_wake = bfin_gpio_set_wake,
+#endif
+};
 
+void __cpuinit init_exception_vectors(void)
+{
 	/* cannot program in software:
 	 * evt0 - emulation (jtag)
 	 * evt1 - reset
@@ -979,17 +966,23 @@
  * This function should be called during kernel startup to initialize
  * the BFin IRQ handling routines.
  */
+
 int __init init_arch_irq(void)
 {
 	int irq;
 	unsigned long ilat = 0;
 	/*  Disable all the peripheral intrs  - page 4-29 HW Ref manual */
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
+	|| defined(BF538_FAMILY) || defined(CONFIG_BF51x)
 	bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
 	bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
 # ifdef CONFIG_BF54x
 	bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
 # endif
+# ifdef CONFIG_SMP
+	bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
+	bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
+# endif
 #else
 	bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
 #endif
@@ -1029,7 +1022,7 @@
 		case IRQ_PINT1:
 		case IRQ_PINT2:
 		case IRQ_PINT3:
-#elif defined(CONFIG_BF52x)
+#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
 		case IRQ_PORTF_INTA:
 		case IRQ_PORTG_INTA:
 		case IRQ_PORTH_INTA:
@@ -1037,18 +1030,41 @@
 		case IRQ_PROG0_INTA:
 		case IRQ_PROG1_INTA:
 		case IRQ_PROG2_INTA:
+#elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
+		case IRQ_PORTF_INTA:
 #endif
+
 			set_irq_chained_handler(irq,
 						bfin_demux_gpio_irq);
 			break;
 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
 		case IRQ_GENERIC_ERROR:
-			set_irq_handler(irq, bfin_demux_error_irq);
-
+			set_irq_chained_handler(irq, bfin_demux_error_irq);
+			break;
+#endif
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
+		case IRQ_TIMER0:
+			set_irq_handler(irq, handle_percpu_irq);
+			break;
+#endif
+#ifdef CONFIG_SMP
+		case IRQ_SUPPLE_0:
+		case IRQ_SUPPLE_1:
+			set_irq_handler(irq, handle_percpu_irq);
 			break;
 #endif
 		default:
+#ifdef CONFIG_IPIPE
+	/*
+	 * We want internal interrupt sources to be masked, because
+	 * ISRs may trigger interrupts recursively (e.g. DMA), but
+	 * interrupts are _not_ masked at CPU level. So let's handle
+	 * them as level interrupts.
+	 */
+			set_irq_handler(irq, handle_level_irq);
+#else /* !CONFIG_IPIPE */
 			set_irq_handler(irq, handle_simple_irq);
+#endif /* !CONFIG_IPIPE */
 			break;
 		}
 	}
@@ -1073,7 +1089,7 @@
 	CSYNC();
 
 	printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
-	/* IMASK=xxx is equivalent to STI xx or irq_flags=xx,
+	/* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
 	 * local_irq_enable()
 	 */
 	program_IAR();
@@ -1081,19 +1097,23 @@
 	search_IAR();
 
 	/* Enable interrupts IVG7-15 */
-	irq_flags = irq_flags | IMASK_IVG15 |
+	bfin_irq_flags |= IMASK_IVG15 |
 	    IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
 	    IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
 
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
+	|| defined(BF538_FAMILY) || defined(CONFIG_BF51x)
 	bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
-#if defined(CONFIG_BF52x)
-	/* BF52x system reset does not properly reset SIC_IWR1 which
+#if defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
+	/* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
 	 * will screw up the bootrom as it relies on MDMA0/1 waking it
 	 * up from IDLE instructions.  See this report for more info:
 	 * http://blackfin.uclinux.org/gf/tracker/4323
 	 */
-	bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
+	if (ANOMALY_05000435)
+		bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
+	else
+		bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
 #else
 	bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
 #endif
@@ -1104,6 +1124,14 @@
 	bfin_write_SIC_IWR(IWR_DISABLE_ALL);
 #endif
 
+#ifdef CONFIG_IPIPE
+	for (irq = 0; irq < NR_IRQS; irq++) {
+		struct irq_desc *desc = irq_desc + irq;
+		desc->ic_prio = __ipipe_get_irq_priority(irq);
+		desc->thr_prio = __ipipe_get_irqthread_priority(irq);
+	}
+#endif /* CONFIG_IPIPE */
+
 	return 0;
 }
 
@@ -1117,11 +1145,20 @@
 	} else {
 		struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
 		struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
+	|| defined(BF538_FAMILY) || defined(CONFIG_BF51x)
 		unsigned long sic_status[3];
 
-		sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
-		sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
+		if (smp_processor_id()) {
+#ifdef CONFIG_SMP
+			/* This will be optimized out in UP mode. */
+			sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
+			sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
+#endif
+		} else {
+			sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
+			sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
+		}
 #ifdef CONFIG_BF54x
 		sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
 #endif
@@ -1150,3 +1187,161 @@
 	}
 	asm_do_IRQ(vec, fp);
 }
+
+#ifdef CONFIG_IPIPE
+
+int __ipipe_get_irq_priority(unsigned irq)
+{
+	int ient, prio;
+
+	if (irq <= IRQ_CORETMR)
+		return irq;
+
+	for (ient = 0; ient < NR_PERI_INTS; ient++) {
+		struct ivgx *ivg = ivg_table + ient;
+		if (ivg->irqno == irq) {
+			for (prio = 0; prio <= IVG13-IVG7; prio++) {
+				if (ivg7_13[prio].ifirst <= ivg &&
+				    ivg7_13[prio].istop > ivg)
+					return IVG7 + prio;
+			}
+		}
+	}
+
+	return IVG15;
+}
+
+int __ipipe_get_irqthread_priority(unsigned irq)
+{
+	int ient, prio;
+	int demux_irq;
+
+	/* The returned priority value is rescaled to [0..IVG13+1]
+	 * with 0 being the lowest effective priority level. */
+
+	if (irq <= IRQ_CORETMR)
+		return IVG13 - irq + 1;
+
+	/* GPIO IRQs are given the priority of the demux
+	 * interrupt. */
+	if (IS_GPIOIRQ(irq)) {
+#if defined(CONFIG_BF54x)
+		u32 bank = PINT_2_BANK(irq2pint_lut[irq - SYS_IRQS]);
+		demux_irq = (bank == 0 ? IRQ_PINT0 :
+				bank == 1 ? IRQ_PINT1 :
+				bank == 2 ? IRQ_PINT2 :
+				IRQ_PINT3);
+#elif defined(CONFIG_BF561)
+		demux_irq = (irq >= IRQ_PF32 ? IRQ_PROG2_INTA :
+				irq >= IRQ_PF16 ? IRQ_PROG1_INTA :
+				IRQ_PROG0_INTA);
+#elif defined(CONFIG_BF52x)
+		demux_irq = (irq >= IRQ_PH0 ? IRQ_PORTH_INTA :
+				irq >= IRQ_PG0 ? IRQ_PORTG_INTA :
+				IRQ_PORTF_INTA);
+#else
+		demux_irq = irq;
+#endif
+		return IVG13 - PRIO_GPIODEMUX(demux_irq) + 1;
+	}
+
+	/* The GPIO demux interrupt is given a lower priority
+	 * than the GPIO IRQs, so that its threaded handler
+	 * unmasks the interrupt line after the decoded IRQs
+	 * have been processed. */
+	prio = PRIO_GPIODEMUX(irq);
+	/* demux irq? */
+	if (prio != -1)
+		return IVG13 - prio;
+
+	for (ient = 0; ient < NR_PERI_INTS; ient++) {
+		struct ivgx *ivg = ivg_table + ient;
+		if (ivg->irqno == irq) {
+			for (prio = 0; prio <= IVG13-IVG7; prio++) {
+				if (ivg7_13[prio].ifirst <= ivg &&
+				    ivg7_13[prio].istop > ivg)
+					return IVG7 - prio;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
+#ifdef CONFIG_DO_IRQ_L1
+__attribute__((l1_text))
+#endif
+asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
+{
+	struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
+	struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
+	int irq;
+
+	if (likely(vec == EVT_IVTMR_P)) {
+		irq = IRQ_CORETMR;
+		goto handle_irq;
+	}
+
+	SSYNC();
+
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
+	{
+		unsigned long sic_status[3];
+
+		sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
+		sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
+#ifdef CONFIG_BF54x
+		sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
+#endif
+		for (;; ivg++) {
+			if (ivg >= ivg_stop) {
+				atomic_inc(&num_spurious);
+				return 0;
+			}
+			if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
+				break;
+		}
+	}
+#else
+	{
+		unsigned long sic_status;
+
+		sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
+
+		for (;; ivg++) {
+			if (ivg >= ivg_stop) {
+				atomic_inc(&num_spurious);
+				return 0;
+			} else if (sic_status & ivg->isrflag)
+				break;
+		}
+	}
+#endif
+
+	irq = ivg->irqno;
+
+	if (irq == IRQ_SYSTMR) {
+		bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
+		/* This is basically what we need from the register frame. */
+		__raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
+		__raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
+		if (!ipipe_root_domain_p)
+			__raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
+		else
+			__raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
+	}
+
+handle_irq:
+
+	ipipe_trace_irq_entry(irq);
+	__ipipe_handle_irq(irq, regs);
+       ipipe_trace_irq_exit(irq);
+
+       if (ipipe_root_domain_p)
+		return !test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+
+       return 0;
+}
+
+#endif /* CONFIG_IPIPE */
diff --git a/arch/blackfin/mach-common/irqpanic.c b/arch/blackfin/mach-common/irqpanic.c
index 606ded9..05004df 100644
--- a/arch/blackfin/mach-common/irqpanic.c
+++ b/arch/blackfin/mach-common/irqpanic.c
@@ -33,8 +33,6 @@
 #include <asm/traps.h>
 #include <asm/blackfin.h>
 
-#include "../oprofile/op_blackfin.h"
-
 #ifdef CONFIG_DEBUG_ICACHE_CHECK
 #define L1_ICACHE_START 0xffa10000
 #define L1_ICACHE_END   0xffa13fff
@@ -134,13 +132,3 @@
 #endif
 
 }
-
-#ifdef CONFIG_HARDWARE_PM
-/*
- * call the handler of Performance overflow
- */
-asmlinkage void pm_overflow(int irq, struct pt_regs *regs)
-{
-	pm_overflow_handler(irq, regs);
-}
-#endif
diff --git a/arch/blackfin/mach-common/lock.S b/arch/blackfin/mach-common/lock.S
index 9daf012..6c5f5f0 100644
--- a/arch/blackfin/mach-common/lock.S
+++ b/arch/blackfin/mach-common/lock.S
@@ -160,7 +160,7 @@
  * R0 - Which way to be locked
  */
 
-ENTRY(_cache_lock)
+ENTRY(_bfin_cache_lock)
 
 	[--SP]=( R7:0,P5:0 );
 
@@ -184,7 +184,7 @@
 
 	( R7:0,P5:0 ) = [SP++];
 	RTS;
-ENDPROC(_cache_lock)
+ENDPROC(_bfin_cache_lock)
 
 /* Invalidate the Entire Instruction cache by
  * disabling IMC bit
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index e28c6af..d3d70fd 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -71,7 +71,7 @@
 	gpio_pm_wakeup_request(CONFIG_PM_WAKEUP_GPIO_NUMBER, WAKEUP_TYPE);
 #endif
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 	bfin_pm_standby_setup();
 
 #ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
@@ -82,15 +82,19 @@
 
 	bfin_pm_standby_restore();
 
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x)  || defined(CONFIG_BF561)
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x)  || defined(CONFIG_BF561) || \
+	defined(CONFIG_BF538) || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
 	bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
-#if defined(CONFIG_BF52x)
+#if defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
 	/* BF52x system reset does not properly reset SIC_IWR1 which
 	 * will screw up the bootrom as it relies on MDMA0/1 waking it
 	 * up from IDLE instructions.  See this report for more info:
 	 * http://blackfin.uclinux.org/gf/tracker/4323
 	 */
-	bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
+	if (ANOMALY_05000435)
+		bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
+	else
+		bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
 #else
 	bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
 #endif
@@ -101,7 +105,7 @@
 	bfin_write_SIC_IWR(IWR_DISABLE_ALL);
 #endif
 
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 }
 
 int bf53x_suspend_l1_mem(unsigned char *memptr)
@@ -245,12 +249,12 @@
 	wakeup |= GPWE;
 #endif
 
-	local_irq_save(flags);
+	local_irq_save_hw(flags);
 
 	ret = blackfin_dma_suspend();
 
 	if (ret) {
-		local_irq_restore(flags);
+		local_irq_restore_hw(flags);
 		kfree(memptr);
 		return ret;
 	}
@@ -271,7 +275,7 @@
 	bfin_gpio_pm_hibernate_restore();
 	blackfin_dma_resume();
 
-	local_irq_restore(flags);
+	local_irq_restore_hw(flags);
 	kfree(memptr);
 
 	return 0;
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
new file mode 100644
index 0000000..77c9928
--- /dev/null
+++ b/arch/blackfin/mach-common/smp.c
@@ -0,0 +1,476 @@
+/*
+ * File:         arch/blackfin/kernel/smp.c
+ * Author:       Philippe Gerum <rpm@xenomai.org>
+ * IPI management based on arch/arm/kernel/smp.c.
+ *
+ *               Copyright 2007 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cache.h>
+#include <linux/profile.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/seq_file.h>
+#include <linux/irq.h>
+#include <asm/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/cpu.h>
+#include <linux/err.h>
+
+struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
+
+void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
+	*init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
+	*init_saved_dcplb_fault_addr_coreb;
+
+cpumask_t cpu_possible_map;
+EXPORT_SYMBOL(cpu_possible_map);
+
+cpumask_t cpu_online_map;
+EXPORT_SYMBOL(cpu_online_map);
+
+#define BFIN_IPI_RESCHEDULE   0
+#define BFIN_IPI_CALL_FUNC    1
+#define BFIN_IPI_CPU_STOP     2
+
+struct blackfin_flush_data {
+	unsigned long start;
+	unsigned long end;
+};
+
+void *secondary_stack;
+
+
+struct smp_call_struct {
+	void (*func)(void *info);
+	void *info;
+	int wait;
+	cpumask_t pending;
+	cpumask_t waitmask;
+};
+
+static struct blackfin_flush_data smp_flush_data;
+
+static DEFINE_SPINLOCK(stop_lock);
+
+struct ipi_message {
+	struct list_head list;
+	unsigned long type;
+	struct smp_call_struct call_struct;
+};
+
+struct ipi_message_queue {
+	struct list_head head;
+	spinlock_t lock;
+	unsigned long count;
+};
+
+static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
+
+static void ipi_cpu_stop(unsigned int cpu)
+{
+	spin_lock(&stop_lock);
+	printk(KERN_CRIT "CPU%u: stopping\n", cpu);
+	dump_stack();
+	spin_unlock(&stop_lock);
+
+	cpu_clear(cpu, cpu_online_map);
+
+	local_irq_disable();
+
+	while (1)
+		SSYNC();
+}
+
+static void ipi_flush_icache(void *info)
+{
+	struct blackfin_flush_data *fdata = info;
+
+	/* Invalidate the memory holding the bounds of the flushed region. */
+	blackfin_dcache_invalidate_range((unsigned long)fdata,
+					 (unsigned long)fdata + sizeof(*fdata));
+
+	blackfin_icache_flush_range(fdata->start, fdata->end);
+}
+
+static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
+{
+	int wait;
+	void (*func)(void *info);
+	void *info;
+	func = msg->call_struct.func;
+	info = msg->call_struct.info;
+	wait = msg->call_struct.wait;
+	cpu_clear(cpu, msg->call_struct.pending);
+	func(info);
+	if (wait)
+		cpu_clear(cpu, msg->call_struct.waitmask);
+	else
+		kfree(msg);
+}
+
+static irqreturn_t ipi_handler(int irq, void *dev_instance)
+{
+	struct ipi_message *msg, *mg;
+	struct ipi_message_queue *msg_queue;
+	unsigned int cpu = smp_processor_id();
+
+	platform_clear_ipi(cpu);
+
+	msg_queue = &__get_cpu_var(ipi_msg_queue);
+	msg_queue->count++;
+
+	spin_lock(&msg_queue->lock);
+	list_for_each_entry_safe(msg, mg, &msg_queue->head, list) {
+		list_del(&msg->list);
+		switch (msg->type) {
+		case BFIN_IPI_RESCHEDULE:
+			/* That's the easiest one; leave it to
+			 * return_from_int. */
+			kfree(msg);
+			break;
+		case BFIN_IPI_CALL_FUNC:
+			ipi_call_function(cpu, msg);
+			break;
+		case BFIN_IPI_CPU_STOP:
+			ipi_cpu_stop(cpu);
+			kfree(msg);
+			break;
+		default:
+			printk(KERN_CRIT "CPU%u: Unknown IPI message \
+			0x%lx\n", cpu, msg->type);
+			kfree(msg);
+			break;
+		}
+	}
+	spin_unlock(&msg_queue->lock);
+	return IRQ_HANDLED;
+}
+
+static void ipi_queue_init(void)
+{
+	unsigned int cpu;
+	struct ipi_message_queue *msg_queue;
+	for_each_possible_cpu(cpu) {
+		msg_queue = &per_cpu(ipi_msg_queue, cpu);
+		INIT_LIST_HEAD(&msg_queue->head);
+		spin_lock_init(&msg_queue->lock);
+		msg_queue->count = 0;
+	}
+}
+
+int smp_call_function(void (*func)(void *info), void *info, int wait)
+{
+	unsigned int cpu;
+	cpumask_t callmap;
+	unsigned long flags;
+	struct ipi_message_queue *msg_queue;
+	struct ipi_message *msg;
+
+	callmap = cpu_online_map;
+	cpu_clear(smp_processor_id(), callmap);
+	if (cpus_empty(callmap))
+		return 0;
+
+	msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+	INIT_LIST_HEAD(&msg->list);
+	msg->call_struct.func = func;
+	msg->call_struct.info = info;
+	msg->call_struct.wait = wait;
+	msg->call_struct.pending = callmap;
+	msg->call_struct.waitmask = callmap;
+	msg->type = BFIN_IPI_CALL_FUNC;
+
+	for_each_cpu_mask(cpu, callmap) {
+		msg_queue = &per_cpu(ipi_msg_queue, cpu);
+		spin_lock_irqsave(&msg_queue->lock, flags);
+		list_add(&msg->list, &msg_queue->head);
+		spin_unlock_irqrestore(&msg_queue->lock, flags);
+		platform_send_ipi_cpu(cpu);
+	}
+	if (wait) {
+		while (!cpus_empty(msg->call_struct.waitmask))
+			blackfin_dcache_invalidate_range(
+				(unsigned long)(&msg->call_struct.waitmask),
+				(unsigned long)(&msg->call_struct.waitmask));
+		kfree(msg);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(smp_call_function);
+
+int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
+				int wait)
+{
+	unsigned int cpu = cpuid;
+	cpumask_t callmap;
+	unsigned long flags;
+	struct ipi_message_queue *msg_queue;
+	struct ipi_message *msg;
+
+	if (cpu_is_offline(cpu))
+		return 0;
+	cpus_clear(callmap);
+	cpu_set(cpu, callmap);
+
+	msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+	INIT_LIST_HEAD(&msg->list);
+	msg->call_struct.func = func;
+	msg->call_struct.info = info;
+	msg->call_struct.wait = wait;
+	msg->call_struct.pending = callmap;
+	msg->call_struct.waitmask = callmap;
+	msg->type = BFIN_IPI_CALL_FUNC;
+
+	msg_queue = &per_cpu(ipi_msg_queue, cpu);
+	spin_lock_irqsave(&msg_queue->lock, flags);
+	list_add(&msg->list, &msg_queue->head);
+	spin_unlock_irqrestore(&msg_queue->lock, flags);
+	platform_send_ipi_cpu(cpu);
+
+	if (wait) {
+		while (!cpus_empty(msg->call_struct.waitmask))
+			blackfin_dcache_invalidate_range(
+				(unsigned long)(&msg->call_struct.waitmask),
+				(unsigned long)(&msg->call_struct.waitmask));
+		kfree(msg);
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(smp_call_function_single);
+
+void smp_send_reschedule(int cpu)
+{
+	unsigned long flags;
+	struct ipi_message_queue *msg_queue;
+	struct ipi_message *msg;
+
+	if (cpu_is_offline(cpu))
+		return;
+
+	msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+	memset(msg, 0, sizeof(msg));
+	INIT_LIST_HEAD(&msg->list);
+	msg->type = BFIN_IPI_RESCHEDULE;
+
+	msg_queue = &per_cpu(ipi_msg_queue, cpu);
+	spin_lock_irqsave(&msg_queue->lock, flags);
+	list_add(&msg->list, &msg_queue->head);
+	spin_unlock_irqrestore(&msg_queue->lock, flags);
+	platform_send_ipi_cpu(cpu);
+
+	return;
+}
+
+void smp_send_stop(void)
+{
+	unsigned int cpu;
+	cpumask_t callmap;
+	unsigned long flags;
+	struct ipi_message_queue *msg_queue;
+	struct ipi_message *msg;
+
+	callmap = cpu_online_map;
+	cpu_clear(smp_processor_id(), callmap);
+	if (cpus_empty(callmap))
+		return;
+
+	msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+	memset(msg, 0, sizeof(msg));
+	INIT_LIST_HEAD(&msg->list);
+	msg->type = BFIN_IPI_CPU_STOP;
+
+	for_each_cpu_mask(cpu, callmap) {
+		msg_queue = &per_cpu(ipi_msg_queue, cpu);
+		spin_lock_irqsave(&msg_queue->lock, flags);
+		list_add(&msg->list, &msg_queue->head);
+		spin_unlock_irqrestore(&msg_queue->lock, flags);
+		platform_send_ipi_cpu(cpu);
+	}
+	return;
+}
+
+int __cpuinit __cpu_up(unsigned int cpu)
+{
+	struct task_struct *idle;
+	int ret;
+
+	idle = fork_idle(cpu);
+	if (IS_ERR(idle)) {
+		printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
+		return PTR_ERR(idle);
+	}
+
+	secondary_stack = task_stack_page(idle) + THREAD_SIZE;
+	smp_wmb();
+
+	ret = platform_boot_secondary(cpu, idle);
+
+	if (ret) {
+		cpu_clear(cpu, cpu_present_map);
+		printk(KERN_CRIT "CPU%u: processor failed to boot (%d)\n", cpu, ret);
+		free_task(idle);
+	} else
+		cpu_set(cpu, cpu_online_map);
+
+	secondary_stack = NULL;
+
+	return ret;
+}
+
+static void __cpuinit setup_secondary(unsigned int cpu)
+{
+#if !(defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE))
+	struct irq_desc *timer_desc;
+#endif
+	unsigned long ilat;
+
+	bfin_write_IMASK(0);
+	CSYNC();
+	ilat = bfin_read_ILAT();
+	CSYNC();
+	bfin_write_ILAT(ilat);
+	CSYNC();
+
+	/* Reserve the PDA space for the secondary CPU. */
+	reserve_pda();
+
+	/* Enable interrupt levels IVG7-15. IARs have been already
+	 * programmed by the boot CPU.  */
+	bfin_irq_flags |= IMASK_IVG15 |
+	    IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
+	    IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
+
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
+	/* Power down the core timer, just to play safe. */
+	bfin_write_TCNTL(0);
+
+	/* system timer0 has been setup by CoreA. */
+#else
+	timer_desc = irq_desc + IRQ_CORETMR;
+	setup_core_timer();
+	timer_desc->chip->enable(IRQ_CORETMR);
+#endif
+}
+
+void __cpuinit secondary_start_kernel(void)
+{
+	unsigned int cpu = smp_processor_id();
+	struct mm_struct *mm = &init_mm;
+
+	if (_bfin_swrst & SWRST_DBL_FAULT_B) {
+		printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+		printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
+			(int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
+		printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
+		printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
+#endif
+		printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
+			init_retx_coreb);
+	}
+
+	/*
+	 * We want the D-cache to be enabled early, in case the atomic
+	 * support code emulates cache coherence (see
+	 * __ARCH_SYNC_CORE_DCACHE).
+	 */
+	init_exception_vectors();
+
+	bfin_setup_caches(cpu);
+
+	local_irq_disable();
+
+	/* Attach the new idle task to the global mm. */
+	atomic_inc(&mm->mm_users);
+	atomic_inc(&mm->mm_count);
+	current->active_mm = mm;
+	BUG_ON(current->mm);	/* Can't be, but better be safe than sorry. */
+
+	preempt_disable();
+
+	setup_secondary(cpu);
+
+	local_irq_enable();
+
+	platform_secondary_init(cpu);
+
+	cpu_idle();
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+	platform_prepare_cpus(max_cpus);
+	ipi_queue_init();
+	platform_request_ipi(&ipi_handler);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+	unsigned long bogosum = 0;
+	unsigned int cpu;
+
+	for_each_online_cpu(cpu)
+		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+
+	printk(KERN_INFO "SMP: Total of %d processors activated "
+	       "(%lu.%02lu BogoMIPS).\n",
+	       num_online_cpus(),
+	       bogosum / (500000/HZ),
+	       (bogosum / (5000/HZ)) % 100);
+}
+
+void smp_icache_flush_range_others(unsigned long start, unsigned long end)
+{
+	smp_flush_data.start = start;
+	smp_flush_data.end = end;
+
+	if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
+		printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
+}
+EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
+
+#ifdef __ARCH_SYNC_CORE_DCACHE
+unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
+
+void resync_core_dcache(void)
+{
+	unsigned int cpu = get_cpu();
+	blackfin_invalidate_entire_dcache();
+	++per_cpu(cpu_data, cpu).dcache_invld_count;
+	put_cpu();
+}
+EXPORT_SYMBOL(resync_core_dcache);
+#endif
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index bc240ab..d0532b7 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -31,7 +31,8 @@
 #include <linux/bootmem.h>
 #include <linux/uaccess.h>
 #include <asm/bfin-global.h>
-#include <asm/l1layout.h>
+#include <asm/pda.h>
+#include <asm/cplbinit.h>
 #include "blackfin_sram.h"
 
 /*
@@ -53,6 +54,11 @@
 
 unsigned long empty_zero_page;
 
+extern unsigned long exception_stack[NR_CPUS][1024];
+
+struct blackfin_pda cpu_pda[NR_CPUS];
+EXPORT_SYMBOL(cpu_pda);
+
 /*
  * paging_init() continues the virtual memory environment setup which
  * was begun by the code in arch/head.S.
@@ -98,6 +104,32 @@
 	}
 }
 
+asmlinkage void init_pda(void)
+{
+	unsigned int cpu = raw_smp_processor_id();
+
+	/* Initialize the PDA fields holding references to other parts
+	   of the memory. The content of such memory is still
+	   undefined at the time of the call, we are only setting up
+	   valid pointers to it. */
+	memset(&cpu_pda[cpu], 0, sizeof(cpu_pda[cpu]));
+
+	cpu_pda[0].next = &cpu_pda[1];
+	cpu_pda[1].next = &cpu_pda[0];
+
+	cpu_pda[cpu].ex_stack = exception_stack[cpu + 1];
+
+#ifdef CONFIG_SMP
+	cpu_pda[cpu].imask = 0x1f;
+#endif
+}
+
+void __cpuinit reserve_pda(void)
+{
+	printk(KERN_INFO "PDA for CPU%u reserved at %p\n", smp_processor_id(),
+					&cpu_pda[smp_processor_id()]);
+}
+
 void __init mem_init(void)
 {
 	unsigned int codek = 0, datak = 0, initk = 0;
@@ -141,21 +173,13 @@
 
 static int __init sram_init(void)
 {
-	unsigned long tmp;
-
 	/* Initialize the blackfin L1 Memory. */
 	bfin_sram_init();
 
-	/* Allocate this once; never free it.  We assume this gives us a
-	   pointer to the start of L1 scratchpad memory; panic if it
-	   doesn't.  */
-	tmp = (unsigned long)l1sram_alloc(sizeof(struct l1_scratch_task_info));
-	if (tmp != (unsigned long)L1_SCRATCH_TASK_INFO) {
-		printk(KERN_EMERG "mem_init(): Did not get the right address from l1sram_alloc: %08lx != %08lx\n",
-			tmp, (unsigned long)L1_SCRATCH_TASK_INFO);
-		panic("No L1, time to give up\n");
-	}
-
+	/* Reserve the PDA space for the boot CPU right after we
+	 * initialized the scratch memory allocator.
+	 */
+	reserve_pda();
 	return 0;
 }
 pure_initcall(sram_init);
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index cc6f336..834cab7 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -39,10 +39,13 @@
 #include <linux/spinlock.h>
 #include <linux/rtc.h>
 #include <asm/blackfin.h>
+#include <asm/mem_map.h>
 #include "blackfin_sram.h"
 
-static spinlock_t l1sram_lock, l1_data_sram_lock, l1_inst_sram_lock;
-static spinlock_t l2_sram_lock;
+static DEFINE_PER_CPU(spinlock_t, l1sram_lock) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(spinlock_t, l1_data_sram_lock) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(spinlock_t, l1_inst_sram_lock) ____cacheline_aligned_in_smp;
+static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
 
 /* the data structure for L1 scratchpad and DATA SRAM */
 struct sram_piece {
@@ -52,18 +55,22 @@
 	struct sram_piece *next;
 };
 
-static struct sram_piece free_l1_ssram_head, used_l1_ssram_head;
+static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
+static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
 
 #if L1_DATA_A_LENGTH != 0
-static struct sram_piece free_l1_data_A_sram_head, used_l1_data_A_sram_head;
+static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
+static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
 #endif
 
 #if L1_DATA_B_LENGTH != 0
-static struct sram_piece free_l1_data_B_sram_head, used_l1_data_B_sram_head;
+static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
+static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
 #endif
 
 #if L1_CODE_LENGTH != 0
-static struct sram_piece free_l1_inst_sram_head, used_l1_inst_sram_head;
+static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
+static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
 #endif
 
 #if L2_LENGTH != 0
@@ -75,102 +82,117 @@
 /* L1 Scratchpad SRAM initialization function */
 static void __init l1sram_init(void)
 {
-	free_l1_ssram_head.next =
-		kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
-	if (!free_l1_ssram_head.next) {
-		printk(KERN_INFO "Failed to initialize Scratchpad data SRAM\n");
-		return;
+	unsigned int cpu;
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
+		per_cpu(free_l1_ssram_head, cpu).next =
+			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
+		if (!per_cpu(free_l1_ssram_head, cpu).next) {
+			printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
+			return;
+		}
+
+		per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu);
+		per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH;
+		per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
+		per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
+
+		per_cpu(used_l1_ssram_head, cpu).next = NULL;
+
+		/* mutex initialize */
+		spin_lock_init(&per_cpu(l1sram_lock, cpu));
+		printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
+			L1_SCRATCH_LENGTH >> 10);
 	}
-
-	free_l1_ssram_head.next->paddr = (void *)L1_SCRATCH_START;
-	free_l1_ssram_head.next->size = L1_SCRATCH_LENGTH;
-	free_l1_ssram_head.next->pid = 0;
-	free_l1_ssram_head.next->next = NULL;
-
-	used_l1_ssram_head.next = NULL;
-
-	/* mutex initialize */
-	spin_lock_init(&l1sram_lock);
-
-	printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
-	       L1_SCRATCH_LENGTH >> 10);
 }
 
 static void __init l1_data_sram_init(void)
 {
+#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
+	unsigned int cpu;
+#endif
 #if L1_DATA_A_LENGTH != 0
-	free_l1_data_A_sram_head.next =
-		kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
-	if (!free_l1_data_A_sram_head.next) {
-		printk(KERN_INFO "Failed to initialize L1 Data A SRAM\n");
-		return;
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
+		per_cpu(free_l1_data_A_sram_head, cpu).next =
+			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
+		if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
+			printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
+			return;
+		}
+
+		per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
+			(void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
+		per_cpu(free_l1_data_A_sram_head, cpu).next->size =
+			L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
+		per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
+		per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
+
+		per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
+
+		printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
+			L1_DATA_A_LENGTH >> 10,
+			per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
 	}
-
-	free_l1_data_A_sram_head.next->paddr =
-		(void *)L1_DATA_A_START + (_ebss_l1 - _sdata_l1);
-	free_l1_data_A_sram_head.next->size =
-		L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
-	free_l1_data_A_sram_head.next->pid = 0;
-	free_l1_data_A_sram_head.next->next = NULL;
-
-	used_l1_data_A_sram_head.next = NULL;
-
-	printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
-		L1_DATA_A_LENGTH >> 10,
-		free_l1_data_A_sram_head.next->size >> 10);
 #endif
 #if L1_DATA_B_LENGTH != 0
-	free_l1_data_B_sram_head.next =
-		kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
-	if (!free_l1_data_B_sram_head.next) {
-		printk(KERN_INFO "Failed to initialize L1 Data B SRAM\n");
-		return;
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
+		per_cpu(free_l1_data_B_sram_head, cpu).next =
+			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
+		if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
+			printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
+			return;
+		}
+
+		per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
+			(void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
+		per_cpu(free_l1_data_B_sram_head, cpu).next->size =
+			L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
+		per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
+		per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
+
+		per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
+
+		printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
+			L1_DATA_B_LENGTH >> 10,
+			per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
+		/* mutex initialize */
 	}
-
-	free_l1_data_B_sram_head.next->paddr =
-		(void *)L1_DATA_B_START + (_ebss_b_l1 - _sdata_b_l1);
-	free_l1_data_B_sram_head.next->size =
-		L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
-	free_l1_data_B_sram_head.next->pid = 0;
-	free_l1_data_B_sram_head.next->next = NULL;
-
-	used_l1_data_B_sram_head.next = NULL;
-
-	printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
-		L1_DATA_B_LENGTH >> 10,
-		free_l1_data_B_sram_head.next->size >> 10);
 #endif
 
-	/* mutex initialize */
-	spin_lock_init(&l1_data_sram_lock);
+#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
+		spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
+#endif
 }
 
 static void __init l1_inst_sram_init(void)
 {
 #if L1_CODE_LENGTH != 0
-	free_l1_inst_sram_head.next =
-		kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
-	if (!free_l1_inst_sram_head.next) {
-		printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
-		return;
+	unsigned int cpu;
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
+		per_cpu(free_l1_inst_sram_head, cpu).next =
+			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
+		if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
+			printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
+			return;
+		}
+
+		per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
+			(void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
+		per_cpu(free_l1_inst_sram_head, cpu).next->size =
+			L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
+		per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
+		per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
+
+		per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
+
+		printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
+			L1_CODE_LENGTH >> 10,
+			per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
+
+		/* mutex initialize */
+		spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
 	}
-
-	free_l1_inst_sram_head.next->paddr =
-		(void *)L1_CODE_START + (_etext_l1 - _stext_l1);
-	free_l1_inst_sram_head.next->size =
-		L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
-	free_l1_inst_sram_head.next->pid = 0;
-	free_l1_inst_sram_head.next->next = NULL;
-
-	used_l1_inst_sram_head.next = NULL;
-
-	printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
-		L1_CODE_LENGTH >> 10,
-		free_l1_inst_sram_head.next->size >> 10);
 #endif
-
-	/* mutex initialize */
-	spin_lock_init(&l1_inst_sram_lock);
 }
 
 static void __init l2_sram_init(void)
@@ -179,7 +201,7 @@
 	free_l2_sram_head.next =
 		kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
 	if (!free_l2_sram_head.next) {
-		printk(KERN_INFO "Failed to initialize L2 SRAM\n");
+		printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
 		return;
 	}
 
@@ -200,6 +222,7 @@
 	/* mutex initialize */
 	spin_lock_init(&l2_sram_lock);
 }
+
 void __init bfin_sram_init(void)
 {
 	sram_piece_cache = kmem_cache_create("sram_piece_cache",
@@ -353,20 +376,20 @@
 {
 
 #if L1_CODE_LENGTH != 0
-	if (addr >= (void *)L1_CODE_START
-		 && addr < (void *)(L1_CODE_START + L1_CODE_LENGTH))
+	if (addr >= (void *)get_l1_code_start()
+		 && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
 		return l1_inst_sram_free(addr);
 	else
 #endif
 #if L1_DATA_A_LENGTH != 0
-	if (addr >= (void *)L1_DATA_A_START
-		 && addr < (void *)(L1_DATA_A_START + L1_DATA_A_LENGTH))
+	if (addr >= (void *)get_l1_data_a_start()
+		 && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
 		return l1_data_A_sram_free(addr);
 	else
 #endif
 #if L1_DATA_B_LENGTH != 0
-	if (addr >= (void *)L1_DATA_B_START
-		 && addr < (void *)(L1_DATA_B_START + L1_DATA_B_LENGTH))
+	if (addr >= (void *)get_l1_data_b_start()
+		 && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
 		return l1_data_B_sram_free(addr);
 	else
 #endif
@@ -384,17 +407,20 @@
 {
 	unsigned long flags;
 	void *addr = NULL;
+	unsigned int cpu;
 
+	cpu = get_cpu();
 	/* add mutex operation */
-	spin_lock_irqsave(&l1_data_sram_lock, flags);
+	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
 
 #if L1_DATA_A_LENGTH != 0
-	addr = _sram_alloc(size, &free_l1_data_A_sram_head,
-			&used_l1_data_A_sram_head);
+	addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
+			&per_cpu(used_l1_data_A_sram_head, cpu));
 #endif
 
 	/* add mutex operation */
-	spin_unlock_irqrestore(&l1_data_sram_lock, flags);
+	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
+	put_cpu();
 
 	pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
 		 (long unsigned int)addr, size);
@@ -407,19 +433,22 @@
 {
 	unsigned long flags;
 	int ret;
+	unsigned int cpu;
 
+	cpu = get_cpu();
 	/* add mutex operation */
-	spin_lock_irqsave(&l1_data_sram_lock, flags);
+	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
 
 #if L1_DATA_A_LENGTH != 0
-	ret = _sram_free(addr, &free_l1_data_A_sram_head,
-			&used_l1_data_A_sram_head);
+	ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
+			&per_cpu(used_l1_data_A_sram_head, cpu));
 #else
 	ret = -1;
 #endif
 
 	/* add mutex operation */
-	spin_unlock_irqrestore(&l1_data_sram_lock, flags);
+	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
+	put_cpu();
 
 	return ret;
 }
@@ -430,15 +459,18 @@
 #if L1_DATA_B_LENGTH != 0
 	unsigned long flags;
 	void *addr;
+	unsigned int cpu;
+
+	cpu = get_cpu();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
+
+	addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
+			&per_cpu(used_l1_data_B_sram_head, cpu));
 
 	/* add mutex operation */
-	spin_lock_irqsave(&l1_data_sram_lock, flags);
-
-	addr = _sram_alloc(size, &free_l1_data_B_sram_head,
-			&used_l1_data_B_sram_head);
-
-	/* add mutex operation */
-	spin_unlock_irqrestore(&l1_data_sram_lock, flags);
+	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
+	put_cpu();
 
 	pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
 		 (long unsigned int)addr, size);
@@ -455,15 +487,18 @@
 #if L1_DATA_B_LENGTH != 0
 	unsigned long flags;
 	int ret;
+	unsigned int cpu;
+
+	cpu = get_cpu();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
+
+	ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
+			&per_cpu(used_l1_data_B_sram_head, cpu));
 
 	/* add mutex operation */
-	spin_lock_irqsave(&l1_data_sram_lock, flags);
-
-	ret = _sram_free(addr, &free_l1_data_B_sram_head,
-			&used_l1_data_B_sram_head);
-
-	/* add mutex operation */
-	spin_unlock_irqrestore(&l1_data_sram_lock, flags);
+	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
+	put_cpu();
 
 	return ret;
 #else
@@ -509,15 +544,18 @@
 #if L1_CODE_LENGTH != 0
 	unsigned long flags;
 	void *addr;
+	unsigned int cpu;
+
+	cpu = get_cpu();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
+
+	addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
+			&per_cpu(used_l1_inst_sram_head, cpu));
 
 	/* add mutex operation */
-	spin_lock_irqsave(&l1_inst_sram_lock, flags);
-
-	addr = _sram_alloc(size, &free_l1_inst_sram_head,
-			&used_l1_inst_sram_head);
-
-	/* add mutex operation */
-	spin_unlock_irqrestore(&l1_inst_sram_lock, flags);
+	spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
+	put_cpu();
 
 	pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
 		 (long unsigned int)addr, size);
@@ -534,15 +572,18 @@
 #if L1_CODE_LENGTH != 0
 	unsigned long flags;
 	int ret;
+	unsigned int cpu;
+
+	cpu = get_cpu();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
+
+	ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
+			&per_cpu(used_l1_inst_sram_head, cpu));
 
 	/* add mutex operation */
-	spin_lock_irqsave(&l1_inst_sram_lock, flags);
-
-	ret = _sram_free(addr, &free_l1_inst_sram_head,
-			&used_l1_inst_sram_head);
-
-	/* add mutex operation */
-	spin_unlock_irqrestore(&l1_inst_sram_lock, flags);
+	spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
+	put_cpu();
 
 	return ret;
 #else
@@ -556,15 +597,18 @@
 {
 	unsigned long flags;
 	void *addr;
+	unsigned int cpu;
+
+	cpu = get_cpu();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
+
+	addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
+			&per_cpu(used_l1_ssram_head, cpu));
 
 	/* add mutex operation */
-	spin_lock_irqsave(&l1sram_lock, flags);
-
-	addr = _sram_alloc(size, &free_l1_ssram_head,
-			&used_l1_ssram_head);
-
-	/* add mutex operation */
-	spin_unlock_irqrestore(&l1sram_lock, flags);
+	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
+	put_cpu();
 
 	return addr;
 }
@@ -574,15 +618,18 @@
 {
 	unsigned long flags;
 	void *addr;
+	unsigned int cpu;
+
+	cpu = get_cpu();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
+
+	addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
+			&per_cpu(used_l1_ssram_head, cpu), psize);
 
 	/* add mutex operation */
-	spin_lock_irqsave(&l1sram_lock, flags);
-
-	addr = _sram_alloc_max(&free_l1_ssram_head,
-			&used_l1_ssram_head, psize);
-
-	/* add mutex operation */
-	spin_unlock_irqrestore(&l1sram_lock, flags);
+	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
+	put_cpu();
 
 	return addr;
 }
@@ -592,15 +639,18 @@
 {
 	unsigned long flags;
 	int ret;
+	unsigned int cpu;
+
+	cpu = get_cpu();
+	/* add mutex operation */
+	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
+
+	ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
+			&per_cpu(used_l1_ssram_head, cpu));
 
 	/* add mutex operation */
-	spin_lock_irqsave(&l1sram_lock, flags);
-
-	ret = _sram_free(addr, &free_l1_ssram_head,
-			&used_l1_ssram_head);
-
-	/* add mutex operation */
-	spin_unlock_irqrestore(&l1sram_lock, flags);
+	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
+	put_cpu();
 
 	return ret;
 }
@@ -761,33 +811,36 @@
 		int *eof, void *data)
 {
 	int len = 0;
+	unsigned int cpu;
 
-	if (_sram_proc_read(buf, &len, count, "Scratchpad",
-			&free_l1_ssram_head, &used_l1_ssram_head))
-		goto not_done;
+	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
+		if (_sram_proc_read(buf, &len, count, "Scratchpad",
+			&per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
+			goto not_done;
 #if L1_DATA_A_LENGTH != 0
-	if (_sram_proc_read(buf, &len, count, "L1 Data A",
-			&free_l1_data_A_sram_head,
-			&used_l1_data_A_sram_head))
-		goto not_done;
+		if (_sram_proc_read(buf, &len, count, "L1 Data A",
+			&per_cpu(free_l1_data_A_sram_head, cpu),
+			&per_cpu(used_l1_data_A_sram_head, cpu)))
+			goto not_done;
 #endif
 #if L1_DATA_B_LENGTH != 0
-	if (_sram_proc_read(buf, &len, count, "L1 Data B",
-			&free_l1_data_B_sram_head,
-			&used_l1_data_B_sram_head))
-		goto not_done;
+		if (_sram_proc_read(buf, &len, count, "L1 Data B",
+			&per_cpu(free_l1_data_B_sram_head, cpu),
+			&per_cpu(used_l1_data_B_sram_head, cpu)))
+			goto not_done;
 #endif
 #if L1_CODE_LENGTH != 0
-	if (_sram_proc_read(buf, &len, count, "L1 Instruction",
-			&free_l1_inst_sram_head, &used_l1_inst_sram_head))
-		goto not_done;
+		if (_sram_proc_read(buf, &len, count, "L1 Instruction",
+			&per_cpu(free_l1_inst_sram_head, cpu),
+			&per_cpu(used_l1_inst_sram_head, cpu)))
+			goto not_done;
 #endif
+	}
 #if L2_LENGTH != 0
-	if (_sram_proc_read(buf, &len, count, "L2",
-			&free_l2_sram_head, &used_l2_sram_head))
+	if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
+		&used_l2_sram_head))
 		goto not_done;
 #endif
-
 	*eof = 1;
  not_done:
 	return len;
diff --git a/arch/blackfin/oprofile/Makefile b/arch/blackfin/oprofile/Makefile
index 634e300..c70af3a 100644
--- a/arch/blackfin/oprofile/Makefile
+++ b/arch/blackfin/oprofile/Makefile
@@ -10,5 +10,4 @@
 		oprofilefs.o oprofile_stats.o \
 		timer_int.o )
 
-oprofile-y := $(DRIVER_OBJS) common.o
-oprofile-$(CONFIG_HARDWARE_PM) += op_model_bf533.o
+oprofile-y := $(DRIVER_OBJS) bfin_oprofile.o
diff --git a/arch/blackfin/oprofile/bfin_oprofile.c b/arch/blackfin/oprofile/bfin_oprofile.c
new file mode 100644
index 0000000..c3b9713
--- /dev/null
+++ b/arch/blackfin/oprofile/bfin_oprofile.c
@@ -0,0 +1,18 @@
+/*
+ * bfin_oprofile.c - Blackfin oprofile code
+ *
+ * Copyright 2004-2008 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/oprofile.h>
+#include <linux/init.h>
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+	return -1;
+}
+
+void oprofile_arch_exit(void)
+{
+}
diff --git a/arch/blackfin/oprofile/common.c b/arch/blackfin/oprofile/common.c
deleted file mode 100644
index 0f6d303..0000000
--- a/arch/blackfin/oprofile/common.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * File:         arch/blackfin/oprofile/common.c
- * Based on:     arch/alpha/oprofile/common.c
- * Author:       Anton Blanchard <anton@au.ibm.com>
- *
- * Created:
- * Description:
- *
- * Modified:
- *               Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/ptrace.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/blackfin.h>
-
-#include "op_blackfin.h"
-
-#define BFIN_533_ID  0xE5040003
-#define BFIN_537_ID  0xE5040002
-
-static int pfmon_enabled;
-static struct mutex pfmon_lock;
-
-struct op_bfin533_model *model;
-
-struct op_counter_config ctr[OP_MAX_COUNTER];
-
-static int op_bfin_setup(void)
-{
-	int ret;
-
-	/* Pre-compute the values to stuff in the hardware registers.  */
-	spin_lock(&oprofilefs_lock);
-	ret = model->reg_setup(ctr);
-	spin_unlock(&oprofilefs_lock);
-
-	return ret;
-}
-
-static void op_bfin_shutdown(void)
-{
-#if 0
-	/* what is the difference between shutdown and stop? */
-#endif
-}
-
-static int op_bfin_start(void)
-{
-	int ret = -EBUSY;
-
-	printk(KERN_INFO "KSDBG:in %s\n", __func__);
-	mutex_lock(&pfmon_lock);
-	if (!pfmon_enabled) {
-		ret = model->start(ctr);
-		pfmon_enabled = !ret;
-	}
-	mutex_unlock(&pfmon_lock);
-
-	return ret;
-}
-
-static void op_bfin_stop(void)
-{
-	mutex_lock(&pfmon_lock);
-	if (pfmon_enabled) {
-		model->stop();
-		pfmon_enabled = 0;
-	}
-	mutex_unlock(&pfmon_lock);
-}
-
-static int op_bfin_create_files(struct super_block *sb, struct dentry *root)
-{
-	int i;
-
-	for (i = 0; i < model->num_counters; ++i) {
-		struct dentry *dir;
-		char buf[3];
-		printk(KERN_INFO "Oprofile: creating files... \n");
-
-		snprintf(buf, sizeof buf, "%d", i);
-		dir = oprofilefs_mkdir(sb, root, buf);
-
-		oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
-		oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
-		oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
-		/*
-		 * We dont support per counter user/kernel selection, but
-		 * we leave the entries because userspace expects them
-		 */
-		oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
-		oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
-		oprofilefs_create_ulong(sb, dir, "unit_mask",
-					&ctr[i].unit_mask);
-	}
-
-	return 0;
-}
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
-#ifdef CONFIG_HARDWARE_PM
-	unsigned int dspid;
-
-	mutex_init(&pfmon_lock);
-
-	dspid = bfin_read_DSPID();
-
-	printk(KERN_INFO "Oprofile got the cpu id is 0x%x. \n", dspid);
-
-	switch (dspid) {
-	case BFIN_533_ID:
-		model = &op_model_bfin533;
-		model->num_counters = 2;
-		break;
-	case BFIN_537_ID:
-		model = &op_model_bfin533;
-		model->num_counters = 2;
-		break;
-	default:
-		return -ENODEV;
-	}
-
-	ops->cpu_type = model->name;
-	ops->create_files = op_bfin_create_files;
-	ops->setup = op_bfin_setup;
-	ops->shutdown = op_bfin_shutdown;
-	ops->start = op_bfin_start;
-	ops->stop = op_bfin_stop;
-
-	printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
-	       ops->cpu_type);
-
-	return 0;
-#else
-	return -1;
-#endif
-}
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/blackfin/oprofile/op_blackfin.h b/arch/blackfin/oprofile/op_blackfin.h
deleted file mode 100644
index 05dd08c..0000000
--- a/arch/blackfin/oprofile/op_blackfin.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * File:         arch/blackfin/oprofile/op_blackfin.h
- * Based on:
- * Author:       Anton Blanchard <anton@au.ibm.com>
- *
- * Created:
- * Description:
- *
- * Modified:
- *               Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef OP_BLACKFIN_H
-#define OP_BLACKFIN_H 1
-
-#define OP_MAX_COUNTER 2
-
-#include <asm/blackfin.h>
-
-/* Per-counter configuration as set via oprofilefs.  */
-struct op_counter_config {
-	unsigned long valid;
-	unsigned long enabled;
-	unsigned long event;
-	unsigned long count;
-	unsigned long kernel;
-	unsigned long user;
-	unsigned long unit_mask;
-};
-
-/* System-wide configuration as set via oprofilefs.  */
-struct op_system_config {
-	unsigned long enable_kernel;
-	unsigned long enable_user;
-};
-
-/* Per-arch configuration */
-struct op_bfin533_model {
-	int (*reg_setup) (struct op_counter_config *);
-	int (*start) (struct op_counter_config *);
-	void (*stop) (void);
-	int num_counters;
-	char *name;
-};
-
-extern struct op_bfin533_model op_model_bfin533;
-
-static inline unsigned int ctr_read(void)
-{
-	unsigned int tmp;
-
-	tmp = bfin_read_PFCTL();
-	CSYNC();
-
-	return tmp;
-}
-
-static inline void ctr_write(unsigned int val)
-{
-	bfin_write_PFCTL(val);
-	CSYNC();
-}
-
-static inline void count_read(unsigned int *count)
-{
-	count[0] = bfin_read_PFCNTR0();
-	count[1] = bfin_read_PFCNTR1();
-	CSYNC();
-}
-
-static inline void count_write(unsigned int *count)
-{
-	bfin_write_PFCNTR0(count[0]);
-	bfin_write_PFCNTR1(count[1]);
-	CSYNC();
-}
-
-extern int pm_overflow_handler(int irq, struct pt_regs *regs);
-
-#endif
diff --git a/arch/blackfin/oprofile/op_model_bf533.c b/arch/blackfin/oprofile/op_model_bf533.c
deleted file mode 100644
index d1c698b..0000000
--- a/arch/blackfin/oprofile/op_model_bf533.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * File:         arch/blackfin/oprofile/op_model_bf533.c
- * Based on:
- * Author:       Anton Blanchard <anton@au.ibm.com>
- *
- * Created:
- * Description:
- *
- * Modified:
- *               Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <asm/system.h>
-#include <asm/processor.h>
-#include <asm/blackfin.h>
-
-#include "op_blackfin.h"
-
-#define PM_ENABLE 0x01;
-#define PM_CTL1_ENABLE  0x18
-#define PM_CTL0_ENABLE  0xC000
-#define COUNT_EDGE_ONLY 0x3000000
-
-static int oprofile_running;
-
-static unsigned curr_pfctl, curr_count[2];
-
-static int bfin533_reg_setup(struct op_counter_config *ctr)
-{
-	unsigned int pfctl = ctr_read();
-	unsigned int count[2];
-
-	/* set Blackfin perf monitor regs with ctr */
-	if (ctr[0].enabled) {
-		pfctl |= (PM_CTL0_ENABLE | ((char)ctr[0].event << 5));
-		count[0] = 0xFFFFFFFF - ctr[0].count;
-		curr_count[0] = count[0];
-	}
-	if (ctr[1].enabled) {
-		pfctl |= (PM_CTL1_ENABLE | ((char)ctr[1].event << 16));
-		count[1] = 0xFFFFFFFF - ctr[1].count;
-		curr_count[1] = count[1];
-	}
-
-	pr_debug("ctr[0].enabled=%d,ctr[1].enabled=%d,ctr[0].event<<5=0x%x,ctr[1].event<<16=0x%x\n", ctr[0].enabled, ctr[1].enabled, ctr[0].event << 5, ctr[1].event << 16);
-	pfctl |= COUNT_EDGE_ONLY;
-	curr_pfctl = pfctl;
-
-	pr_debug("write 0x%x to pfctl\n", pfctl);
-	ctr_write(pfctl);
-	count_write(count);
-
-	return 0;
-}
-
-static int bfin533_start(struct op_counter_config *ctr)
-{
-	unsigned int pfctl = ctr_read();
-
-	pfctl |= PM_ENABLE;
-	curr_pfctl = pfctl;
-
-	ctr_write(pfctl);
-
-	oprofile_running = 1;
-	pr_debug("start oprofile counter \n");
-
-	return 0;
-}
-
-static void bfin533_stop(void)
-{
-	int pfctl;
-
-	pfctl = ctr_read();
-	pfctl &= ~PM_ENABLE;
-	/* freeze counters */
-	ctr_write(pfctl);
-
-	oprofile_running = 0;
-	pr_debug("stop oprofile counter \n");
-}
-
-static int get_kernel(void)
-{
-	int ipend, is_kernel;
-
-	ipend = bfin_read_IPEND();
-
-	/* test bit 15 */
-	is_kernel = ((ipend & 0x8000) != 0);
-
-	return is_kernel;
-}
-
-int pm_overflow_handler(int irq, struct pt_regs *regs)
-{
-	int is_kernel;
-	int i, cpu;
-	unsigned int pc, pfctl;
-	unsigned int count[2];
-
-	pr_debug("get interrupt in %s\n", __func__);
-	if (oprofile_running == 0) {
-		pr_debug("error: entering interrupt when oprofile is stopped.\n\r");
-		return -1;
-	}
-
-	is_kernel = get_kernel();
-	cpu = smp_processor_id();
-	pc = regs->pc;
-	pfctl = ctr_read();
-
-	/* read the two event counter regs */
-	count_read(count);
-
-	/* if the counter overflows, add sample to oprofile buffer */
-	for (i = 0; i < 2; ++i) {
-		if (oprofile_running) {
-			oprofile_add_sample(regs, i);
-		}
-	}
-
-	/* reset the perfmon counter */
-	ctr_write(curr_pfctl);
-	count_write(curr_count);
-	return 0;
-}
-
-struct op_bfin533_model op_model_bfin533 = {
-	.reg_setup = bfin533_reg_setup,
-	.start = bfin533_start,
-	.stop = bfin533_stop,
-	.num_counters = 2,
-	.name = "blackfin/bf533"
-};
diff --git a/arch/blackfin/oprofile/timer_int.c b/arch/blackfin/oprofile/timer_int.c
deleted file mode 100644
index 6c6f860..0000000
--- a/arch/blackfin/oprofile/timer_int.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * File:         arch/blackfin/oprofile/timer_int.c
- * Based on:
- * Author:       Michael Kang
- *
- * Created:
- * Description:
- *
- * Modified:
- *               Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs:         Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/irq.h>
-#include <linux/oprofile.h>
-#include <linux/ptrace.h>
-
-static void enable_sys_timer0()
-{
-}
-static void disable_sys_timer0()
-{
-}
-
-static irqreturn_t sys_timer0_int_handler(int irq, void *dev_id,
-					  struct pt_regs *regs)
-{
-	oprofile_add_sample(regs, 0);
-	return IRQ_HANDLED;
-}
-
-static int sys_timer0_start(void)
-{
-	enable_sys_timer0();
-	return request_irq(IVG11, sys_timer0_int_handler, 0, "sys_timer0", NULL);
-}
-
-static void sys_timer0_stop(void)
-{
-	disable_sys_timer();
-}
-
-int __init sys_timer0_init(struct oprofile_operations *ops)
-{
-	extern int nmi_active;
-
-	if (nmi_active <= 0)
-		return -ENODEV;
-
-	ops->start = timer_start;
-	ops->stop = timer_stop;
-	ops->cpu_type = "timer";
-	printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
-	return 0;
-}
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index ed171d3..72f5cd3 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -691,7 +691,7 @@
 	.long sys_uselib
 	.long sys_swapon
 	.long sys_reboot
-	.long old_readdir
+	.long sys_old_readdir
 	.long old_mmap		/* 90 */
 	.long sys_munmap
 	.long sys_truncate
diff --git a/arch/cris/arch-v32/kernel/entry.S b/arch/cris/arch-v32/kernel/entry.S
index 7f6f93e..5e674c8 100644
--- a/arch/cris/arch-v32/kernel/entry.S
+++ b/arch/cris/arch-v32/kernel/entry.S
@@ -614,7 +614,7 @@
 	.long sys_uselib
 	.long sys_swapon
 	.long sys_reboot
-	.long old_readdir
+	.long sys_old_readdir
 	.long old_mmap		/* 90 */
 	.long sys_munmap
 	.long sys_truncate
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index da7d2be..372d0ca 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -456,7 +456,7 @@
 	return -EFAULT;
 }
 
-/* Invoke a singal handler to, well, handle the signal. */
+/* Invoke a signal handler to, well, handle the signal. */
 static inline int
 handle_signal(int canrestart, unsigned long sig,
 	      siginfo_t *info, struct k_sigaction *ka,
diff --git a/arch/cris/include/arch-v10/arch/byteorder.h b/arch/cris/include/arch-v10/arch/byteorder.h
deleted file mode 100644
index 255b646..0000000
--- a/arch/cris/include/arch-v10/arch/byteorder.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _CRIS_ARCH_BYTEORDER_H
-#define _CRIS_ARCH_BYTEORDER_H
-
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-/* we just define these two (as we can do the swap in a single
- * asm instruction in CRIS) and the arch-independent files will put
- * them together into ntohl etc.
- */
-
-static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
-{
-	__asm__ ("swapwb %0" : "=r" (x) : "0" (x));
-  
-	return(x);
-}
-
-static inline __attribute_const__ __u16 ___arch__swab16(__u16 x)
-{
-	__asm__ ("swapb %0" : "=r" (x) : "0" (x));
-	
-	return(x);
-}
-
-#endif
diff --git a/arch/cris/include/arch-v10/arch/swab.h b/arch/cris/include/arch-v10/arch/swab.h
new file mode 100644
index 0000000..e4e847d
--- /dev/null
+++ b/arch/cris/include/arch-v10/arch/swab.h
@@ -0,0 +1,30 @@
+#ifndef _CRIS_ARCH_SWAB_H
+#define _CRIS_ARCH_SWAB_H
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+#define __SWAB_64_THRU_32__
+
+/* we just define these two (as we can do the swap in a single
+ * asm instruction in CRIS) and the arch-independent files will put
+ * them together into ntohl etc.
+ */
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+	__asm__ ("swapwb %0" : "=r" (x) : "0" (x));
+
+	return(x);
+}
+#define __arch_swab32 __arch_swab32
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+	__asm__ ("swapb %0" : "=r" (x) : "0" (x));
+
+	return(x);
+}
+#define __arch_swab16 __arch_swab16
+
+#endif
diff --git a/arch/cris/include/arch-v32/arch/byteorder.h b/arch/cris/include/arch-v32/arch/byteorder.h
deleted file mode 100644
index 6ef8fb4..0000000
--- a/arch/cris/include/arch-v32/arch/byteorder.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _ASM_CRIS_ARCH_BYTEORDER_H
-#define _ASM_CRIS_ARCH_BYTEORDER_H
-
-#include <asm/types.h>
-
-static inline __const__ __u32
-___arch__swab32(__u32 x)
-{
-	__asm__ __volatile__ ("swapwb %0" : "=r" (x) : "0" (x));
-	return (x);
-}
-
-static inline __const__ __u16
-___arch__swab16(__u16 x)
-{
-	__asm__ __volatile__ ("swapb %0" : "=r" (x) : "0" (x));
-	return (x);
-}
-
-#endif /* _ASM_CRIS_ARCH_BYTEORDER_H */
diff --git a/arch/cris/include/arch-v32/arch/swab.h b/arch/cris/include/arch-v32/arch/swab.h
new file mode 100644
index 0000000..9a4ea5e
--- /dev/null
+++ b/arch/cris/include/arch-v32/arch/swab.h
@@ -0,0 +1,24 @@
+#ifndef _ASM_CRIS_ARCH_SWAB_H
+#define _ASM_CRIS_ARCH_SWAB_H
+
+#include <asm/types.h>
+
+#define __SWAB_64_THRU_32__
+
+static inline __const__ __u32
+__arch_swab32(__u32 x)
+{
+	__asm__ __volatile__ ("swapwb %0" : "=r" (x) : "0" (x));
+	return (x);
+}
+#define __arch_swab32 __arch_swab32
+
+static inline __const__ __u16
+__arch_swab16(__u16 x)
+{
+	__asm__ __volatile__ ("swapb %0" : "=r" (x) : "0" (x));
+	return (x);
+}
+#define __arch_swab16 __arch_swab16
+
+#endif /* _ASM_CRIS_ARCH_SWAB_H */
diff --git a/arch/cris/include/asm/byteorder.h b/arch/cris/include/asm/byteorder.h
index cc8e418..bcd1897 100644
--- a/arch/cris/include/asm/byteorder.h
+++ b/arch/cris/include/asm/byteorder.h
@@ -1,25 +1,6 @@
 #ifndef _CRIS_BYTEORDER_H
 #define _CRIS_BYTEORDER_H
 
-#ifdef __GNUC__
-
-#ifdef __KERNEL__
-#include <arch/byteorder.h>
-
-/* defines are necessary because the other files detect the presence
- * of a defined __arch_swab32, not an inline
- */
-#define __arch__swab32(x) ___arch__swab32(x)
-#define __arch__swab16(x) ___arch__swab16(x)
-#endif /* __KERNEL__ */
-
-#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-#  define __BYTEORDER_HAS_U64__
-#  define __SWAB_64_THRU_32__
-#endif
-
-#endif /* __GNUC__ */
-
 #include <linux/byteorder/little_endian.h>
 
 #endif
diff --git a/arch/cris/include/asm/swab.h b/arch/cris/include/asm/swab.h
new file mode 100644
index 0000000..80668e8
--- /dev/null
+++ b/arch/cris/include/asm/swab.h
@@ -0,0 +1,8 @@
+#ifndef _CRIS_SWAB_H
+#define _CRIS_SWAB_H
+
+#ifdef __KERNEL__
+#include <arch/swab.h>
+#endif /* __KERNEL__ */
+
+#endif /* _CRIS_SWAB_H */
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index 709e9bd..5e7d401 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -69,7 +69,8 @@
 }
 
 /*
- * check that an address falls within the bounds of the target process's memory mappings
+ * check that an address falls within the bounds of the target process's memory
+ * mappings
  */
 static inline int is_user_addr_valid(struct task_struct *child,
 				     unsigned long start, unsigned long len)
@@ -79,11 +80,11 @@
 		return -EIO;
 	return 0;
 #else
-	struct vm_list_struct *vml;
+	struct vm_area_struct *vma;
 
-	for (vml = child->mm->context.vmlist; vml; vml = vml->next)
-		if (start >= vml->vma->vm_start && start + len <= vml->vma->vm_end)
-			return 0;
+	vma = find_vma(child->mm, start);
+	if (vma && start >= vma->vm_start && start + len <= vma->vm_end)
+		return 0;
 
 	return -EIO;
 #endif
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 27b108a..c68e168 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -1,2 +1 @@
 include include/asm-generic/Kbuild.asm
-unifdef-y += swab.h
diff --git a/arch/h8300/include/asm/byteorder.h b/arch/h8300/include/asm/byteorder.h
index c36b80a..13539da 100644
--- a/arch/h8300/include/asm/byteorder.h
+++ b/arch/h8300/include/asm/byteorder.h
@@ -1,7 +1,6 @@
 #ifndef _H8300_BYTEORDER_H
 #define _H8300_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/big_endian.h>
 
 #endif /* _H8300_BYTEORDER_H */
diff --git a/arch/h8300/include/asm/mmu.h b/arch/h8300/include/asm/mmu.h
index 2ce06ea..3130996 100644
--- a/arch/h8300/include/asm/mmu.h
+++ b/arch/h8300/include/asm/mmu.h
@@ -4,7 +4,6 @@
 /* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */
 
 typedef struct {
-	struct vm_list_struct	*vmlist;
 	unsigned long		end_brk;
 } mm_context_t;
 
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
index 54e21c3..4eb67fa 100644
--- a/arch/h8300/kernel/syscalls.S
+++ b/arch/h8300/kernel/syscalls.S
@@ -103,7 +103,7 @@
 	.long SYMBOL_NAME(sys_uselib)
 	.long SYMBOL_NAME(sys_swapon)
 	.long SYMBOL_NAME(sys_reboot)
-	.long SYMBOL_NAME(old_readdir)
+	.long SYMBOL_NAME(sys_old_readdir)
 	.long SYMBOL_NAME(old_mmap)		/* 90 */
 	.long SYMBOL_NAME(sys_munmap)
 	.long SYMBOL_NAME(sys_truncate)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 3d31636..6183aec 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -17,6 +17,7 @@
 	select ACPI if (!IA64_HP_SIM)
 	select PM if (!IA64_HP_SIM)
 	select ARCH_SUPPORTS_MSI
+	select HAVE_UNSTABLE_SCHED_CLOCK
 	select HAVE_IDE
 	select HAVE_OPROFILE
 	select HAVE_KPROBES
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 27eb676..a109db3 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -578,7 +578,7 @@
 # CONFIG_SATA_SIS is not set
 # CONFIG_SATA_ULI is not set
 # CONFIG_SATA_VIA is not set
-# CONFIG_SATA_VITESSE is not set
+CONFIG_SATA_VITESSE=y
 # CONFIG_SATA_INIC162X is not set
 # CONFIG_PATA_ACPI is not set
 # CONFIG_PATA_ALI is not set
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index a8cf199..a46f839 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -220,7 +220,7 @@
 	data8 sys_mkdir
 	data8 sys_rmdir		  /* 40 */
 	data8 sys_dup
-	data8 sys_pipe
+	data8 sys_ia64_pipe
 	data8 compat_sys_times
 	data8 sys_ni_syscall	  /* old prof syscall holder */
 	data8 sys32_brk		  /* 45 */
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 3b25bd9..ccbe8ae 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -14,4 +14,3 @@
 unifdef-y += intrinsics.h
 unifdef-y += perfmon.h
 unifdef-y += ustack.h
-unifdef-y += swab.h
diff --git a/arch/ia64/include/asm/acpi-ext.h b/arch/ia64/include/asm/acpi-ext.h
index 734d137..7f8362b 100644
--- a/arch/ia64/include/asm/acpi-ext.h
+++ b/arch/ia64/include/asm/acpi-ext.h
@@ -14,7 +14,6 @@
 #define _ASM_IA64_ACPI_EXT_H
 
 #include <linux/types.h>
-#include <acpi/actypes.h>
 
 extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
 
diff --git a/arch/ia64/include/asm/byteorder.h b/arch/ia64/include/asm/byteorder.h
index 0f84c5c..a8dd735 100644
--- a/arch/ia64/include/asm/byteorder.h
+++ b/arch/ia64/include/asm/byteorder.h
@@ -1,7 +1,6 @@
 #ifndef _ASM_IA64_BYTEORDER_H
 #define _ASM_IA64_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/little_endian.h>
 
 #endif /* _ASM_IA64_BYTEORDER_H */
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index bbab7e2..1f912d9 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -9,6 +9,8 @@
 #include <linux/scatterlist.h>
 #include <asm/swiotlb.h>
 
+#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+
 struct dma_mapping_ops {
 	int             (*mapping_error)(struct device *dev,
 					 dma_addr_t dma_addr);
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
index 36429a5..5282546 100644
--- a/arch/ia64/include/asm/irq.h
+++ b/arch/ia64/include/asm/irq.h
@@ -27,7 +27,7 @@
 }
 
 extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
-bool is_affinity_mask_valid(cpumask_var_t cpumask);
+bool is_affinity_mask_valid(const struct cpumask *cpumask);
 
 #define is_affinity_mask_valid is_affinity_mask_valid
 
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 59c17e4..fe87b21 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -62,6 +62,7 @@
 typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
 typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
 typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
+typedef u64 ia64_mv_dma_get_required_mask (struct device *);
 
 /*
  * WARNING: The legacy I/O space is _architected_.  Platforms are
@@ -159,6 +160,7 @@
 #  define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
 #  define platform_dma_mapping_error		ia64_mv.dma_mapping_error
 #  define platform_dma_supported	ia64_mv.dma_supported
+#  define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
 #  define platform_irq_to_vector	ia64_mv.irq_to_vector
 #  define platform_local_vector_to_irq	ia64_mv.local_vector_to_irq
 #  define platform_pci_get_legacy_mem	ia64_mv.pci_get_legacy_mem
@@ -213,6 +215,7 @@
 	ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
 	ia64_mv_dma_mapping_error *dma_mapping_error;
 	ia64_mv_dma_supported *dma_supported;
+	ia64_mv_dma_get_required_mask *dma_get_required_mask;
 	ia64_mv_irq_to_vector *irq_to_vector;
 	ia64_mv_local_vector_to_irq *local_vector_to_irq;
 	ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
@@ -263,6 +266,7 @@
 	platform_dma_sync_sg_for_device,	\
 	platform_dma_mapping_error,			\
 	platform_dma_supported,			\
+	platform_dma_get_required_mask,		\
 	platform_irq_to_vector,			\
 	platform_local_vector_to_irq,		\
 	platform_pci_get_legacy_mem,		\
@@ -366,6 +370,9 @@
 #ifndef platform_dma_supported
 # define  platform_dma_supported	swiotlb_dma_supported
 #endif
+#ifndef platform_dma_get_required_mask
+# define  platform_dma_get_required_mask	ia64_dma_get_required_mask
+#endif
 #ifndef platform_irq_to_vector
 # define platform_irq_to_vector		__ia64_irq_to_vector
 #endif
diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h
index ef964b2..37a4698 100644
--- a/arch/ia64/include/asm/machvec_init.h
+++ b/arch/ia64/include/asm/machvec_init.h
@@ -3,6 +3,7 @@
 
 extern ia64_mv_send_ipi_t ia64_send_ipi;
 extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
+extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask;
 extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
 extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
 extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index 781308e..f1a6e0d 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -67,6 +67,7 @@
 extern ia64_mv_dma_sync_sg_for_device	sn_dma_sync_sg_for_device;
 extern ia64_mv_dma_mapping_error	sn_dma_mapping_error;
 extern ia64_mv_dma_supported		sn_dma_supported;
+extern ia64_mv_dma_get_required_mask	sn_dma_get_required_mask;
 extern ia64_mv_migrate_t		sn_migrate;
 extern ia64_mv_kernel_launch_event_t	sn_kernel_launch_event;
 extern ia64_mv_setup_msi_irq_t		sn_setup_msi_irq;
@@ -123,6 +124,7 @@
 #define platform_dma_sync_sg_for_device	sn_dma_sync_sg_for_device
 #define platform_dma_mapping_error		sn_dma_mapping_error
 #define platform_dma_supported		sn_dma_supported
+#define platform_dma_get_required_mask	sn_dma_get_required_mask
 #define platform_migrate		sn_migrate
 #define platform_kernel_launch_event    sn_kernel_launch_event
 #ifdef CONFIG_PCI_MSI
diff --git a/arch/ia64/include/asm/sn/acpi.h b/arch/ia64/include/asm/sn/acpi.h
index 9ce2801..fd480db2 100644
--- a/arch/ia64/include/asm/sn/acpi.h
+++ b/arch/ia64/include/asm/sn/acpi.h
@@ -9,8 +9,6 @@
 #ifndef _ASM_IA64_SN_ACPI_H
 #define _ASM_IA64_SN_ACPI_H
 
-#include "acpi/acglobal.h"
-
 extern int sn_acpi_rev;
 #define SN_ACPI_BASE_SUPPORT()   (sn_acpi_rev >= 0x20101)
 
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 76a33a9..32f3af1 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -124,7 +124,7 @@
 
 #define cpumask_of_pcibus(bus)	(pcibus_to_node(bus) == -1 ?		\
 				 cpu_all_mask :				\
-				 cpumask_from_node(pcibus_to_node(bus)))
+				 cpumask_of_node(pcibus_to_node(bus)))
 
 #include <asm-generic/topology.h>
 
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index f791576..9015979 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -364,7 +364,7 @@
 struct sigaction;
 long sys_execve(char __user *filename, char __user * __user *argv,
 			   char __user * __user *envp, struct pt_regs *regs);
-asmlinkage long sys_pipe(void);
+asmlinkage long sys_ia64_pipe(void);
 asmlinkage long sys_rt_sigaction(int sig,
 				 const struct sigaction __user *act,
 				 struct sigaction __user *oact,
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 0553648..d541671 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -65,6 +65,7 @@
 void (*pm_power_off) (void);
 EXPORT_SYMBOL(pm_power_off);
 
+u32 acpi_rsdt_forced;
 unsigned int acpi_cpei_override;
 unsigned int acpi_cpei_phys_cpuid;
 
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index d435f4a..e5341e2 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1442,7 +1442,7 @@
 	data8 sys_mkdir				// 1055
 	data8 sys_rmdir
 	data8 sys_dup
-	data8 sys_pipe
+	data8 sys_ia64_pipe
 	data8 sys_times
 	data8 ia64_brk				// 1060
 	data8 sys_setgid
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 95ff16c..a58f64c 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -102,17 +102,14 @@
 
 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 {
-	cpumask_t mask = CPU_MASK_NONE;
-
-	cpu_set(cpu_logical_id(hwid), mask);
-
 	if (irq < NR_IRQS) {
-		irq_desc[irq].affinity = mask;
+		cpumask_copy(&irq_desc[irq].affinity,
+			     cpumask_of(cpu_logical_id(hwid)));
 		irq_redir[irq] = (char) (redir & 0xff);
 	}
 }
 
-bool is_affinity_mask_valid(cpumask_var_t cpumask)
+bool is_affinity_mask_valid(const struct cpumask *cpumask)
 {
 	if (ia64_platform_is("sn2")) {
 		/* Only allow one CPU to be specified in the smp_affinity mask */
@@ -128,7 +125,7 @@
 unsigned int vectors_in_migration[NR_IRQS];
 
 /*
- * Since cpu_online_map is already updated, we just need to check for
+ * Since cpu_online_mask is already updated, we just need to check for
  * affinity that has zeros
  */
 static void migrate_irqs(void)
@@ -158,7 +155,7 @@
 			 */
 			vectors_in_migration[irq] = irq;
 
-			new_cpu = any_online_cpu(cpu_online_map);
+			new_cpu = cpumask_any(cpu_online_mask);
 
 			/*
 			 * Al three are essential, currently WARN_ON.. maybe panic?
@@ -191,7 +188,7 @@
 	 * Find a new timesync master
 	 */
 	if (smp_processor_id() == time_keeper_id) {
-		time_keeper_id = first_cpu(cpu_online_map);
+		time_keeper_id = cpumask_first(cpu_online_mask);
 		printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
 	}
 
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 097b84d..9adac44 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -434,7 +434,7 @@
 	/*
 	 * It is possible to have multiple instances associated with a given
 	 * task either because an multiple functions in the call path
-	 * have a return probe installed on them, and/or more then one return
+	 * have a return probe installed on them, and/or more than one return
 	 * return probe was registered for a target function.
 	 *
 	 * We can handle this because:
@@ -870,7 +870,7 @@
 		return 1;
 
 ss_probe:
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
 	if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
 		/* Boost up -- we can execute copied instructions directly */
 		ia64_psr(regs)->ri = p->ainsn.slot;
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index bcbb6d8..92ed83f 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -154,7 +154,7 @@
  * and r9) as this is faster than doing a copy_to_user().
  */
 asmlinkage long
-sys_pipe (void)
+sys_ia64_pipe (void)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	int fd[2];
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index ff0e7c1..6db0859 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -59,6 +59,7 @@
  *  (i.e. don't allow attacker to fill up logs with unaligned accesses).
  */
 int no_unaligned_warning;
+int unaligned_dump_stack;
 static int noprint_warning;
 
 /*
@@ -1371,9 +1372,12 @@
 			}
 		}
 	} else {
-		if (within_logging_rate_limit())
+		if (within_logging_rate_limit()) {
 			printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
 			       ifa, regs->cr_iip + ipsr->ri);
+			if (unaligned_dump_stack)
+				dump_stack();
+		}
 		set_fs(KERNEL_DS);
 	}
 
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 211fcfd..61f1af5 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -19,6 +19,7 @@
 #include <linux/ioport.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/bootmem.h>
 
 #include <asm/machvec.h>
 #include <asm/page.h>
@@ -748,6 +749,32 @@
 	pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
 }
 
+u64 ia64_dma_get_required_mask(struct device *dev)
+{
+	u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
+	u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
+	u64 mask;
+
+	if (!high_totalram) {
+		/* convert to mask just covering totalram */
+		low_totalram = (1 << (fls(low_totalram) - 1));
+		low_totalram += low_totalram - 1;
+		mask = low_totalram;
+	} else {
+		high_totalram = (1 << (fls(high_totalram) - 1));
+		high_totalram += high_totalram - 1;
+		mask = (((u64)high_totalram) << 32) + 0xffffffff;
+	}
+	return mask;
+}
+EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
+
+u64 dma_get_required_mask(struct device *dev)
+{
+	return platform_dma_get_required_mask(dev);
+}
+EXPORT_SYMBOL_GPL(dma_get_required_mask);
+
 static int __init pcibios_init(void)
 {
 	set_pci_cacheline_size();
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index bc610a6..c5a2140 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -13,7 +13,6 @@
 #include <asm/sn/sn_sal.h>
 #include "xtalk/hubdev.h"
 #include <linux/acpi.h>
-#include <acpi/acnamesp.h>
 
 
 /*
@@ -64,6 +63,7 @@
 sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret)
 {
 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 	u64 addr;
 	struct hubdev_info *hubdev;
 	struct hubdev_info *hubdev_ptr;
@@ -77,11 +77,12 @@
 	status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
 					  &sn_uuid, &buffer);
 	if (ACPI_FAILURE(status)) {
+		acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
 		printk(KERN_ERR
 		       "sn_acpi_hubdev_init: acpi_get_vendor_resource() "
-		       "(0x%x) failed for: ", status);
-		acpi_ns_print_node_pathname(handle, NULL);
-		printk("\n");
+		       "(0x%x) failed for: %s\n", status,
+			(char *)name_buffer.pointer);
+		kfree(name_buffer.pointer);
 		return AE_OK;		/* Continue walking namespace */
 	}
 
@@ -89,11 +90,12 @@
 	vendor = &resource->data.vendor_typed;
 	if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
 	    sizeof(struct hubdev_info *)) {
+		acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
 		printk(KERN_ERR
-		       "sn_acpi_hubdev_init: Invalid vendor data length: %d for: ",
-		        vendor->byte_length);
-		acpi_ns_print_node_pathname(handle, NULL);
-		printk("\n");
+		       "sn_acpi_hubdev_init: Invalid vendor data length: "
+		       "%d for: %s\n",
+			vendor->byte_length, (char *)name_buffer.pointer);
+		kfree(name_buffer.pointer);
 		goto exit;
 	}
 
@@ -120,6 +122,7 @@
 {
 	u64 addr;
 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 	acpi_handle handle;
 	struct pcibus_bussoft *prom_bussoft_ptr;
 	struct acpi_resource *resource;
@@ -131,11 +134,11 @@
 	status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
 					  &sn_uuid, &buffer);
 	if (ACPI_FAILURE(status)) {
+		acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
 		printk(KERN_ERR "%s: "
-		       "acpi_get_vendor_resource() failed (0x%x) for: ",
-		       __func__, status);
-		acpi_ns_print_node_pathname(handle, NULL);
-		printk("\n");
+		       "acpi_get_vendor_resource() failed (0x%x) for: %s\n",
+		       __func__, status, (char *)name_buffer.pointer);
+		kfree(name_buffer.pointer);
 		return NULL;
 	}
 	resource = buffer.pointer;
@@ -168,6 +171,7 @@
 {
 	u64 addr;
 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 	struct sn_irq_info *irq_info, *irq_info_prom;
 	struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr;
 	struct acpi_resource *resource;
@@ -182,11 +186,11 @@
 	status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
 					  &sn_uuid, &buffer);
 	if (ACPI_FAILURE(status)) {
+		acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
 		printk(KERN_ERR
-		       "%s: acpi_get_vendor_resource() failed (0x%x) for: ",
-		        __func__, status);
-		acpi_ns_print_node_pathname(handle, NULL);
-		printk("\n");
+		       "%s: acpi_get_vendor_resource() failed (0x%x) for: %s\n",
+			__func__, status, (char *)name_buffer.pointer);
+		kfree(name_buffer.pointer);
 		return 1;
 	}
 
@@ -194,11 +198,12 @@
 	vendor = &resource->data.vendor_typed;
 	if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
 	    sizeof(struct pci_devdev_info *)) {
+		acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
 		printk(KERN_ERR
-		       "%s: Invalid vendor data length: %d for: ",
-		        __func__, vendor->byte_length);
-		acpi_ns_print_node_pathname(handle, NULL);
-		printk("\n");
+		       "%s: Invalid vendor data length: %d for: %s\n",
+			 __func__, vendor->byte_length,
+			(char *)name_buffer.pointer);
+		kfree(name_buffer.pointer);
 		ret = 1;
 		goto exit;
 	}
@@ -239,6 +244,9 @@
 	acpi_handle parent;
 	int slot;
 	acpi_status status;
+	struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	acpi_get_name(device_handle, ACPI_FULL_PATHNAME, &name_buffer);
 
 	/*
 	 * Do an upward search to find the root bus device, and
@@ -249,9 +257,8 @@
 		status = acpi_get_parent(child, &parent);
 		if (ACPI_FAILURE(status)) {
 			printk(KERN_ERR "%s: acpi_get_parent() failed "
-			       "(0x%x) for: ", __func__, status);
-			acpi_ns_print_node_pathname(child, NULL);
-			printk("\n");
+			       "(0x%x) for: %s\n", __func__, status,
+				(char *)name_buffer.pointer);
 			panic("%s: Unable to find host devfn\n", __func__);
 		}
 		if (parent == rootbus_handle)
@@ -259,22 +266,20 @@
 		child = parent;
 	}
 	if (!child) {
-		printk(KERN_ERR "%s: Unable to find root bus for: ",
-		       __func__);
-		acpi_ns_print_node_pathname(device_handle, NULL);
-		printk("\n");
+		printk(KERN_ERR "%s: Unable to find root bus for: %s\n",
+		       __func__, (char *)name_buffer.pointer);
 		BUG();
 	}
 
 	status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
 	if (ACPI_FAILURE(status)) {
-		printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ",
-		       __func__, status);
-		acpi_ns_print_node_pathname(child, NULL);
-		printk("\n");
+		printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: %s\n",
+		       __func__, status, (char *)name_buffer.pointer);
 		panic("%s: Unable to find host devfn\n", __func__);
 	}
 
+	kfree(name_buffer.pointer);
+
 	slot = (adr >> 16) & 0xffff;
 	function = adr & 0xffff;
 	devfn = PCI_DEVFN(slot, function);
@@ -300,27 +305,28 @@
 	int function;
 	int slot;
 	struct sn_pcidev_match *info = context;
+	struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
         status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
                                        &adr);
         if (ACPI_SUCCESS(status)) {
 		status = acpi_get_parent(handle, &parent);
 		if (ACPI_FAILURE(status)) {
+			acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
 			printk(KERN_ERR
-			       "%s: acpi_get_parent() failed (0x%x) for: ",
-					__func__, status);
-			acpi_ns_print_node_pathname(handle, NULL);
-			printk("\n");
+			       "%s: acpi_get_parent() failed (0x%x) for: %s\n",
+				__func__, status, (char *)name_buffer.pointer);
+			kfree(name_buffer.pointer);
 			return AE_OK;
 		}
 		status = acpi_evaluate_integer(parent, METHOD_NAME__BBN,
 					       NULL, &bbn);
 		if (ACPI_FAILURE(status)) {
+			acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
 			printk(KERN_ERR
-			  "%s: Failed to find _BBN in parent of: ",
-					__func__);
-			acpi_ns_print_node_pathname(handle, NULL);
-			printk("\n");
+			  "%s: Failed to find _BBN in parent of: %s\n",
+					__func__, (char *)name_buffer.pointer);
+			kfree(name_buffer.pointer);
 			return AE_OK;
 		}
 
@@ -350,24 +356,27 @@
 	acpi_handle rootbus_handle;
 	unsigned long long segment;
 	acpi_status status;
+	struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
 	rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle;
         status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
                                        &segment);
         if (ACPI_SUCCESS(status)) {
 		if (segment != pci_domain_nr(dev)) {
+			acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME,
+				&name_buffer);
 			printk(KERN_ERR
-			       "%s: Segment number mismatch, 0x%llx vs 0x%x for: ",
-			       __func__, segment, pci_domain_nr(dev));
-			acpi_ns_print_node_pathname(rootbus_handle, NULL);
-			printk("\n");
+			       "%s: Segment number mismatch, 0x%llx vs 0x%x for: %s\n",
+			       __func__, segment, pci_domain_nr(dev),
+			       (char *)name_buffer.pointer);
+			kfree(name_buffer.pointer);
 			return 1;
 		}
 	} else {
-		printk(KERN_ERR "%s: Unable to get __SEG from: ",
-		       __func__);
-		acpi_ns_print_node_pathname(rootbus_handle, NULL);
-		printk("\n");
+		acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME, &name_buffer);
+		printk(KERN_ERR "%s: Unable to get __SEG from: %s\n",
+		       __func__, (char *)name_buffer.pointer);
+		kfree(name_buffer.pointer);
 		return 1;
 	}
 
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 8a924a5..0d4ffa4 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -26,7 +26,6 @@
 #include <linux/acpi.h>
 #include <asm/sn/sn2/sn_hwperf.h>
 #include <asm/sn/acpi.h>
-#include "acpi/acglobal.h"
 
 extern void sn_init_cpei_timer(void);
 extern void register_sn_procfs(void);
@@ -473,7 +472,7 @@
 	{
 		struct acpi_table_header *header = NULL;
 
-		acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
+		acpi_get_table(ACPI_SIG_DSDT, 1, &header);
 		BUG_ON(header == NULL);
 		sn_acpi_rev = header->oem_revision;
 	}
@@ -505,7 +504,7 @@
 
 	{
 		struct acpi_table_header *header;
-		(void)acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
+		(void)acpi_get_table(ACPI_SIG_DSDT, 1, &header);
 		printk(KERN_INFO "ACPI  DSDT OEM Rev 0x%x\n",
 			header->oem_revision);
 	}
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 53ebb64..863f501 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -356,6 +356,12 @@
 }
 EXPORT_SYMBOL(sn_dma_mapping_error);
 
+u64 sn_dma_get_required_mask(struct device *dev)
+{
+	return DMA_64BIT_MASK;
+}
+EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
+
 char *sn_pci_get_legacy_mem(struct pci_bus *bus)
 {
 	if (!SN_PCIBUS_BUSSOFT(bus))
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index d15a94c..68d6204 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -129,8 +129,8 @@
 		blocked = stolentick;
 
 	if (stolen > 0 || blocked > 0) {
-		account_steal_time(NULL, jiffies_to_cputime(stolen));
-		account_steal_time(idle_task(cpu), jiffies_to_cputime(blocked));
+		account_steal_ticks(stolen);
+		account_idle_ticks(blocked);
 		run_local_timers();
 
 		if (rcu_pending(cpu))
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index c825bde..fb87c08 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -303,7 +303,7 @@
 	  correct rounding, the emulator can (often) do the same but this
 	  extra calculation can cost quite some time, so you can disable
 	  it here. The emulator will then "only" calculate with a 64 bit
-	  mantissa and round slightly incorrect, what is more then enough
+	  mantissa and round slightly incorrect, what is more than enough
 	  for normal usage.
 
 config M68KFPU_EMU_ONLY
diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c
index 907a553..c5b5212 100644
--- a/arch/m68k/amiga/amiints.c
+++ b/arch/m68k/amiga/amiints.c
@@ -72,10 +72,14 @@
 
 void __init amiga_init_IRQ(void)
 {
-	request_irq(IRQ_AUTO_1, ami_int1, 0, "int1", NULL);
-	request_irq(IRQ_AUTO_3, ami_int3, 0, "int3", NULL);
-	request_irq(IRQ_AUTO_4, ami_int4, 0, "int4", NULL);
-	request_irq(IRQ_AUTO_5, ami_int5, 0, "int5", NULL);
+	if (request_irq(IRQ_AUTO_1, ami_int1, 0, "int1", NULL))
+		pr_err("Couldn't register int%d\n", 1);
+	if (request_irq(IRQ_AUTO_3, ami_int3, 0, "int3", NULL))
+		pr_err("Couldn't register int%d\n", 3);
+	if (request_irq(IRQ_AUTO_4, ami_int4, 0, "int4", NULL))
+		pr_err("Couldn't register int%d\n", 4);
+	if (request_irq(IRQ_AUTO_5, ami_int5, 0, "int5", NULL))
+		pr_err("Couldn't register int%d\n", 5);
 
 	m68k_setup_irq_controller(&amiga_irq_controller, IRQ_USER, AMI_STD_IRQS);
 
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
index 343fab4..ecd0f7c 100644
--- a/arch/m68k/amiga/cia.c
+++ b/arch/m68k/amiga/cia.c
@@ -176,5 +176,7 @@
 	/* override auto int and install CIA handler */
 	m68k_setup_irq_controller(&auto_irq_controller, base->handler_irq, 1);
 	m68k_irq_startup(base->handler_irq);
-	request_irq(base->handler_irq, cia_handler, IRQF_SHARED, base->name, base);
+	if (request_irq(base->handler_irq, cia_handler, IRQF_SHARED,
+			base->name, base))
+		pr_err("Couldn't register %s interrupt\n", base->name);
 }
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index ab9862c..6e56275 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -493,7 +493,8 @@
 	 * Please don't change this to use ciaa, as it interferes with the
 	 * SCSI code. We'll have to take a look at this later
 	 */
-	request_irq(IRQ_AMIGA_CIAB_TA, timer_routine, 0, "timer", NULL);
+	if (request_irq(IRQ_AMIGA_CIAB_TA, timer_routine, 0, "timer", NULL))
+		pr_err("Couldn't register timer interrupt\n");
 	/* start timer */
 	ciab.cra |= 0x11;
 }
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 78df98f..8d3eafa 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -31,10 +31,6 @@
 extern int dn_dummy_hwclk(int, struct rtc_time *);
 extern int dn_dummy_set_clock_mmss(unsigned long);
 extern void dn_dummy_reset(void);
-extern void dn_dummy_waitbut(void);
-extern struct fb_info *dn_fb_init(long *);
-extern void dn_dummy_debug_init(void);
-extern irqreturn_t dn_process_int(int irq, struct pt_regs *fp);
 #ifdef CONFIG_HEARTBEAT
 static void dn_heartbeat(int on);
 #endif
@@ -204,7 +200,8 @@
 	printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
 #endif
 
-	request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine);
+	if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine))
+		pr_err("Couldn't register timer interrupt\n");
 }
 
 unsigned long dn_gettimeoffset(void) {
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index c038b7c..a5f33c0 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -33,7 +33,6 @@
 #include <asm/atari_joystick.h>
 #include <asm/irq.h>
 
-extern unsigned int keymap_count;
 
 /* Hook for MIDI serial driver */
 void (*atari_MIDI_interrupt_hook) (void);
@@ -567,14 +566,19 @@
 
 int atari_keyb_init(void)
 {
+	int error;
+
 	if (atari_keyb_done)
 		return 0;
 
 	kb_state.state = KEYBOARD;
 	kb_state.len = 0;
 
-	request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, IRQ_TYPE_SLOW,
-		    "keyboard/mouse/MIDI", atari_keyboard_interrupt);
+	error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt,
+			    IRQ_TYPE_SLOW, "keyboard/mouse/MIDI",
+			    atari_keyboard_interrupt);
+	if (error)
+		return error;
 
 	atari_turnoff_irq(IRQ_MFP_ACIA);
 	do {
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c
index d1bd029..604329f 100644
--- a/arch/m68k/atari/stdma.c
+++ b/arch/m68k/atari/stdma.c
@@ -179,8 +179,9 @@
 void __init stdma_init(void)
 {
 	stdma_isr = NULL;
-	request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED,
-	            "ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int);
+	if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED,
+			"ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int))
+		pr_err("Couldn't register ST-DMA interrupt\n");
 }
 
 
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index 1edde27..d076ff8 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -31,8 +31,9 @@
     /* start timer C, div = 1:100 */
     mfp.tim_ct_cd = (mfp.tim_ct_cd & 15) | 0x60;
     /* install interrupt service routine for MFP Timer C */
-    request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW,
-                "timer", timer_routine);
+    if (request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW,
+		    "timer", timer_routine))
+	pr_err("Couldn't register timer interrupt\n");
 }
 
 /* ++andreas: gettimeoffset fixed to check for pending interrupt */
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index c072595..9fe6fef 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -43,7 +43,6 @@
 extern int bvme6000_hwclk (int, struct rtc_time *);
 extern int bvme6000_set_clock_mmss (unsigned long);
 extern void bvme6000_reset (void);
-extern void bvme6000_waitbut(void);
 void bvme6000_set_vectors (void);
 
 /* Save tick handler routine pointer, will point to do_timer() in
diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
index dd7c8a2..f6312c7 100644
--- a/arch/m68k/hp300/time.c
+++ b/arch/m68k/hp300/time.c
@@ -70,7 +70,8 @@
 
   asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
 
-  request_irq(IRQ_AUTO_6, hp300_tick, IRQ_FLG_STD, "timer tick", vector);
+  if (request_irq(IRQ_AUTO_6, hp300_tick, IRQ_FLG_STD, "timer tick", vector))
+    pr_err("Couldn't register timer interrupt\n");
 
   out_8(CLOCKBASE + CLKCR2, 0x1);		/* select CR1 */
   out_8(CLOCKBASE + CLKCR1, 0x40);		/* enable irq */
diff --git a/arch/m68k/kernel/.gitignore b/arch/m68k/kernel/.gitignore
new file mode 100644
index 0000000..c5f676c
--- /dev/null
+++ b/arch/m68k/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index f28404d..5c332f2 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -424,7 +424,7 @@
 .data
 ALIGN
 sys_call_table:
-	.long sys_ni_syscall	/* 0  -  old "setup()" system call*/
+	.long sys_restart_syscall	/* 0 - old "setup()" system call, used for restarting */
 	.long sys_exit
 	.long sys_fork
 	.long sys_read
@@ -513,7 +513,7 @@
 	.long sys_uselib
 	.long sys_swapon
 	.long sys_reboot
-	.long old_readdir
+	.long sys_old_readdir
 	.long old_mmap		/* 90 */
 	.long sys_munmap
 	.long sys_truncate
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index 4d97bd2..303730a 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -26,6 +26,7 @@
 #include <linux/initrd.h>
 
 #include <asm/bootinfo.h>
+#include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/fpu.h>
 #include <asm/irq.h>
@@ -62,7 +63,6 @@
 int m68k_is040or060;
 EXPORT_SYMBOL(m68k_is040or060);
 
-extern int end;
 extern unsigned long availmem;
 
 int m68k_num_memory;
@@ -215,11 +215,10 @@
 
 void __init setup_arch(char **cmdline_p)
 {
-	extern int _etext, _edata, _end;
 	int i;
 
 	/* The bootinfo is located right after the kernel bss */
-	m68k_parse_bootinfo((const struct bi_record *)&_end);
+	m68k_parse_bootinfo((const struct bi_record *)_end);
 
 	if (CPU_IS_040)
 		m68k_is040or060 = 4;
@@ -252,9 +251,9 @@
 	}
 
 	init_mm.start_code = PAGE_OFFSET;
-	init_mm.end_code = (unsigned long) &_etext;
-	init_mm.end_data = (unsigned long) &_edata;
-	init_mm.brk = (unsigned long) &_end;
+	init_mm.end_code = (unsigned long)_etext;
+	init_mm.end_data = (unsigned long)_edata;
+	init_mm.brk = (unsigned long)_end;
 
 	*cmdline_p = m68k_command_line;
 	memcpy(boot_command_line, *cmdline_p, CL_SIZE);
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index f9af893..de2d05d 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -326,6 +326,9 @@
 	struct sigcontext context;
 	int err;
 
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
 	/* get previous context */
 	if (copy_from_user(&context, usc, sizeof(context)))
 		goto badframe;
@@ -411,6 +414,9 @@
 	unsigned long usp;
 	int err;
 
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
 	err = __get_user(temp, &uc->uc_mcontext.version);
 	if (temp != MCONTEXT_VERSION)
 		goto badframe;
@@ -937,6 +943,15 @@
 		regs->d0 = -EINTR;
 		break;
 
+	case -ERESTART_RESTARTBLOCK:
+		if (!has_handler) {
+			regs->d0 = __NR_restart_syscall;
+			regs->pc -= 2;
+			break;
+		}
+		regs->d0 = -EINTR;
+		break;
+
 	case -ERESTARTSYS:
 		if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
 			regs->d0 = -EINTR;
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 6d813de..184acc9 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -401,7 +401,7 @@
  * called from sigreturn(), must ensure userspace code didn't
  * manipulate exception frame to circumvent protection, then complete
  * pending writebacks
- * we just clear TM2 to turn it into an userspace access
+ * we just clear TM2 to turn it into a userspace access
  */
 asmlinkage void berr_040cleanup(struct frame *fp)
 {
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index 8a4919e..d9368c0 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -33,6 +33,7 @@
 	} :data
   /* End of data goes *here* so that freeing init code works properly. */
   _edata = .;
+  NOTES
 
   /* will be freed after init */
   . = ALIGN(PAGE_SIZE);	/* Init code and data */
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
index 245d16d..2a96beb 100644
--- a/arch/m68k/mac/baboon.c
+++ b/arch/m68k/mac/baboon.c
@@ -92,7 +92,8 @@
 void __init baboon_register_interrupts(void)
 {
 	baboon_disabled = 0;
-	request_irq(IRQ_NUBUS_C, baboon_irq, 0, "baboon", (void *)baboon);
+	if (request_irq(IRQ_NUBUS_C, baboon_irq, 0, "baboon", (void *)baboon))
+		pr_err("Couldn't register baboon interrupt\n");
 }
 
 /*
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 8819b97..98b6bcf 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -47,13 +47,6 @@
 
 struct mac_booter_data mac_bi_data;
 
-/* New m68k bootinfo stuff and videobase */
-
-extern int m68k_num_memory;
-extern struct mem_info m68k_memory[NUM_MEMINFO];
-
-extern struct mem_info m68k_ramdisk;
-
 /* The phys. video addr. - might be bogus on some machines */
 static unsigned long mac_orig_videoaddr;
 
@@ -61,7 +54,6 @@
 extern unsigned long mac_gettimeoffset(void);
 extern int mac_hwclk(int, struct rtc_time *);
 extern int mac_set_clock_mmss(unsigned long);
-extern int show_mac_interrupts(struct seq_file *, void *);
 extern void iop_preinit(void);
 extern void iop_init(void);
 extern void via_init(void);
@@ -805,10 +797,6 @@
 		mac_bi_data.boottime, mac_bi_data.gmtbias);
 	printk(KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx \n",
 		mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize);
-#if 0
-	printk("Ramdisk: addr 0x%lx size 0x%lx\n",
-		m68k_ramdisk.addr, m68k_ramdisk.size);
-#endif
 
 	iop_init();
 	via_init();
diff --git a/arch/m68k/mac/debug.c b/arch/m68k/mac/debug.c
index 65dd77a..bce074c 100644
--- a/arch/m68k/mac/debug.c
+++ b/arch/m68k/mac/debug.c
@@ -27,7 +27,6 @@
 #include <asm/macints.h>
 
 extern unsigned long mac_videobase;
-extern unsigned long mac_videodepth;
 extern unsigned long mac_rowbytes;
 
 extern void mac_serial_print(const char *);
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index 326fb99..1ad4e9d 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -305,14 +305,16 @@
 {
 	if (iop_ism_present) {
 		if (oss_present) {
-			request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq,
+			if (request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq,
 					IRQ_FLG_LOCK, "ISM IOP",
-					(void *) IOP_NUM_ISM);
+					(void *) IOP_NUM_ISM))
+				pr_err("Couldn't register ISM IOP interrupt\n");
 			oss_irq_enable(IRQ_MAC_ADB);
 		} else {
-			request_irq(IRQ_VIA2_0, iop_ism_irq,
+			if (request_irq(IRQ_VIA2_0, iop_ism_irq,
 					IRQ_FLG_LOCK|IRQ_FLG_FAST, "ISM IOP",
-					(void *) IOP_NUM_ISM);
+					(void *) IOP_NUM_ISM))
+				pr_err("Couldn't register ISM IOP interrupt\n");
 		}
 		if (!iop_alive(iop_base[IOP_NUM_ISM])) {
 			printk("IOP: oh my god, they killed the ISM IOP!\n");
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index 82e560c..2371107 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -134,6 +134,7 @@
 #include <asm/errno.h>
 #include <asm/macints.h>
 #include <asm/irq_regs.h>
+#include <asm/mac_oss.h>
 
 #define DEBUG_SPURIOUS
 #define SHUTUP_SONIC
@@ -146,7 +147,6 @@
  * VIA/RBV hooks
  */
 
-extern void via_init(void);
 extern void via_register_interrupts(void);
 extern void via_irq_enable(int);
 extern void via_irq_disable(int);
@@ -157,9 +157,6 @@
  * OSS hooks
  */
 
-extern int oss_present;
-
-extern void oss_init(void);
 extern void oss_register_interrupts(void);
 extern void oss_irq_enable(int);
 extern void oss_irq_disable(int);
@@ -170,9 +167,6 @@
  * PSC hooks
  */
 
-extern int psc_present;
-
-extern void psc_init(void);
 extern void psc_register_interrupts(void);
 extern void psc_irq_enable(int);
 extern void psc_irq_disable(int);
@@ -191,12 +185,10 @@
 
 extern int baboon_present;
 
-extern void baboon_init(void);
 extern void baboon_register_interrupts(void);
 extern void baboon_irq_enable(int);
 extern void baboon_irq_disable(int);
 extern void baboon_irq_clear(int);
-extern int  baboon_irq_pending(int);
 
 /*
  * SCC interrupt routines
@@ -258,8 +250,9 @@
 	if (baboon_present)
 		baboon_register_interrupts();
 	iop_register_interrupts();
-	request_irq(IRQ_AUTO_7, mac_nmi_handler, 0, "NMI",
-			mac_nmi_handler);
+	if (request_irq(IRQ_AUTO_7, mac_nmi_handler, 0, "NMI",
+			mac_nmi_handler))
+		pr_err("Couldn't register NMI\n");
 #ifdef DEBUG_MACINTS
 	printk("mac_init_IRQ(): Done!\n");
 #endif
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index a44c708..5d818568 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -35,7 +35,6 @@
 
 #define RTC_OFFSET 2082844800
 
-extern struct mac_booter_data mac_bi_data;
 static void (*rom_reset)(void);
 
 #ifdef CONFIG_ADB_CUDA
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index 8426501..f3d23d6 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -66,16 +66,21 @@
 
 void __init oss_register_interrupts(void)
 {
-	request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK,
-			"scsi", (void *) oss);
-	request_irq(OSS_IRQLEV_IOPSCC, mac_scc_dispatch, IRQ_FLG_LOCK,
-			"scc", mac_scc_dispatch);
-	request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK,
-			"nubus", (void *) oss);
-	request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK,
-			"sound", (void *) oss);
-	request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK,
-			"via1", (void *) via1);
+	if (request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK,
+			"scsi", (void *) oss))
+		pr_err("Couldn't register %s interrupt\n", "scsi");
+	if (request_irq(OSS_IRQLEV_IOPSCC, mac_scc_dispatch, IRQ_FLG_LOCK,
+			"scc", mac_scc_dispatch))
+		pr_err("Couldn't register %s interrupt\n", "scc");
+	if (request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK,
+			"nubus", (void *) oss))
+		pr_err("Couldn't register %s interrupt\n", "nubus");
+	if (request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK,
+			"sound", (void *) oss))
+		pr_err("Couldn't register %s interrupt\n", "sound");
+	if (request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK,
+			"via1", (void *) via1))
+		pr_err("Couldn't register %s interrupt\n", "via1");
 }
 
 /*
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index f84a4dd..ba6ccab 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -117,10 +117,14 @@
 
 void __init psc_register_interrupts(void)
 {
-	request_irq(IRQ_AUTO_3, psc_irq, 0, "psc3", (void *) 0x30);
-	request_irq(IRQ_AUTO_4, psc_irq, 0, "psc4", (void *) 0x40);
-	request_irq(IRQ_AUTO_5, psc_irq, 0, "psc5", (void *) 0x50);
-	request_irq(IRQ_AUTO_6, psc_irq, 0, "psc6", (void *) 0x60);
+	if (request_irq(IRQ_AUTO_3, psc_irq, 0, "psc3", (void *) 0x30))
+		pr_err("Couldn't register psc%d interrupt\n", 3);
+	if (request_irq(IRQ_AUTO_4, psc_irq, 0, "psc4", (void *) 0x40))
+		pr_err("Couldn't register psc%d interrupt\n", 4);
+	if (request_irq(IRQ_AUTO_5, psc_irq, 0, "psc5", (void *) 0x50))
+		pr_err("Couldn't register psc%d interrupt\n", 5);
+	if (request_irq(IRQ_AUTO_6, psc_irq, 0, "psc6", (void *) 0x60))
+		pr_err("Couldn't register psc%d interrupt\n", 6);
 }
 
 /*
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index f01d418..7d97ba5 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -34,6 +34,7 @@
 #include <asm/macints.h>
 #include <asm/mac_via.h>
 #include <asm/mac_psc.h>
+#include <asm/mac_oss.h>
 
 volatile __u8 *via1, *via2;
 int rbv_present;
@@ -84,7 +85,6 @@
 void via_irq_clear(int irq);
 
 extern irqreturn_t mac_scc_dispatch(int, void *);
-extern int oss_present;
 
 /*
  * Initialize the VIAs
@@ -283,7 +283,8 @@
 	via1[vT1CL] = MAC_CLOCK_LOW;
 	via1[vT1CH] = MAC_CLOCK_HIGH;
 
-	request_irq(IRQ_MAC_TIMER_1, func, IRQ_FLG_LOCK, "timer", func);
+	if (request_irq(IRQ_MAC_TIMER_1, func, IRQ_FLG_LOCK, "timer", func))
+		pr_err("Couldn't register %s interrupt\n", "timer");
 }
 
 /*
@@ -293,25 +294,31 @@
 void __init via_register_interrupts(void)
 {
 	if (via_alt_mapping) {
-		request_irq(IRQ_AUTO_1, via1_irq,
+		if (request_irq(IRQ_AUTO_1, via1_irq,
 				IRQ_FLG_LOCK|IRQ_FLG_FAST, "software",
-				(void *) via1);
-		request_irq(IRQ_AUTO_6, via1_irq,
+				(void *) via1))
+			pr_err("Couldn't register %s interrupt\n", "software");
+		if (request_irq(IRQ_AUTO_6, via1_irq,
 				IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
-				(void *) via1);
+				(void *) via1))
+			pr_err("Couldn't register %s interrupt\n", "via1");
 	} else {
-		request_irq(IRQ_AUTO_1, via1_irq,
+		if (request_irq(IRQ_AUTO_1, via1_irq,
 				IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
-				(void *) via1);
+				(void *) via1))
+			pr_err("Couldn't register %s interrupt\n", "via1");
 	}
-	request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
-			"via2", (void *) via2);
+	if (request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
+			"via2", (void *) via2))
+		pr_err("Couldn't register %s interrupt\n", "via2");
 	if (!psc_present) {
-		request_irq(IRQ_AUTO_4, mac_scc_dispatch, IRQ_FLG_LOCK,
-				"scc", mac_scc_dispatch);
+		if (request_irq(IRQ_AUTO_4, mac_scc_dispatch, IRQ_FLG_LOCK,
+				"scc", mac_scc_dispatch))
+			pr_err("Couldn't register %s interrupt\n", "scc");
 	}
-	request_irq(IRQ_MAC_NUBUS, via_nubus_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
-			"nubus", (void *) via2);
+	if (request_irq(IRQ_MAC_NUBUS, via_nubus_irq,
+			IRQ_FLG_LOCK|IRQ_FLG_FAST, "nubus", (void *) via2))
+		pr_err("Couldn't register %s interrupt\n", "nubus");
 }
 
 /*
diff --git a/arch/m68k/math-emu/fp_log.c b/arch/m68k/math-emu/fp_log.c
index b1033ae..367ecee 100644
--- a/arch/m68k/math-emu/fp_log.c
+++ b/arch/m68k/math-emu/fp_log.c
@@ -24,7 +24,6 @@
 
 extern struct fp_ext *fp_fadd(struct fp_ext *dest, const struct fp_ext *src);
 extern struct fp_ext *fp_fdiv(struct fp_ext *dest, const struct fp_ext *src);
-extern struct fp_ext *fp_fmul(struct fp_ext *dest, const struct fp_ext *src);
 
 struct fp_ext *
 fp_fsqrt(struct fp_ext *dest, struct fp_ext *src)
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 81bb08c..0007b2a 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -28,6 +28,7 @@
 #ifdef CONFIG_ATARI
 #include <asm/atari_stram.h>
 #endif
+#include <asm/sections.h>
 #include <asm/tlb.h>
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -73,9 +74,6 @@
 
 /* References to section boundaries */
 
-extern char _text[], _etext[];
-extern char __init_begin[], __init_end[];
-
 extern pmd_t *zero_pgtable;
 
 void __init mem_init(void)
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index c5dbb9b..4665fc8 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -30,6 +30,7 @@
 #ifdef CONFIG_ATARI
 #include <asm/atari_stram.h>
 #endif
+#include <asm/sections.h>
 
 #undef DEBUG
 
@@ -301,14 +302,12 @@
 	}
 }
 
-extern char __init_begin, __init_end;
-
 void free_initmem(void)
 {
 	unsigned long addr;
 
-	addr = (unsigned long)&__init_begin;
-	for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
+	addr = (unsigned long)__init_begin;
+	for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
 		virt_to_page(addr)->flags &= ~(1 << PG_reserved);
 		init_page_count(virt_to_page(addr));
 		free_page(addr);
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 43cdf47..100baaa 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -42,7 +42,6 @@
 extern int mvme147_hwclk (int, struct rtc_time *);
 extern int mvme147_set_clock_mmss (unsigned long);
 extern void mvme147_reset (void);
-extern void mvme147_waitbut(void);
 
 
 static int bcd2int (unsigned char b);
@@ -115,8 +114,9 @@
 void mvme147_sched_init (irq_handler_t timer_routine)
 {
 	tick_handler = timer_routine;
-	request_irq (PCC_IRQ_TIMER1, mvme147_timer_int,
-		IRQ_FLG_REPLACE, "timer 1", NULL);
+	if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, IRQ_FLG_REPLACE,
+			"timer 1", NULL))
+		pr_err("Couldn't register timer interrupt\n");
 
 	/* Init the clock with a value */
 	/* our clock goes off every 6.25us */
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 1521826..11edf61 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -48,7 +48,6 @@
 extern int mvme16x_hwclk (int, struct rtc_time *);
 extern int mvme16x_set_clock_mmss (unsigned long);
 extern void mvme16x_reset (void);
-extern void mvme16x_waitbut(void);
 
 int bcd2int (unsigned char b);
 
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 7110546..31ab3f0 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -36,7 +36,6 @@
 #include <asm/machdep.h>
 #include <asm/q40_master.h>
 
-extern irqreturn_t q40_process_int(int level, struct pt_regs *regs);
 extern void q40_init_IRQ(void);
 static void q40_get_model(char *model);
 extern void q40_sched_init(irq_handler_t handler);
@@ -47,8 +46,6 @@
 static int q40_set_clock_mmss(unsigned long);
 static int q40_get_rtc_pll(struct rtc_pll_info *pll);
 static int q40_set_rtc_pll(struct rtc_pll_info *pll);
-extern void q40_waitbut(void);
-void q40_set_vectors(void);
 
 extern void q40_mksound(unsigned int /*freq*/, unsigned int /*ticks*/);
 
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 8dfaa20..2ca25bd 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -27,23 +27,21 @@
 #include <asm/sun3mmu.h>
 #include <asm/rtc.h>
 #include <asm/machdep.h>
+#include <asm/idprom.h>
 #include <asm/intersil.h>
 #include <asm/irq.h>
+#include <asm/sections.h>
 #include <asm/segment.h>
 #include <asm/sun3ints.h>
 
-extern char _text, _end;
-
 char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
 
 extern unsigned long sun3_gettimeoffset(void);
 static void sun3_sched_init(irq_handler_t handler);
 extern void sun3_get_model (char* model);
-extern void idprom_init (void);
 extern int sun3_hwclk(int set, struct rtc_time *t);
 
 volatile char* clock_va;
-extern volatile unsigned char* sun3_intreg;
 extern unsigned long availmem;
 unsigned long num_pages;
 
@@ -149,7 +147,7 @@
 	mach_halt	     =  sun3_halt;
 	mach_get_hardware_list = sun3_get_hardware_list;
 
-	memory_start = ((((int)&_end) + 0x2000) & ~0x1fff);
+	memory_start = ((((unsigned long)_end) + 0x2000) & ~0x1fff);
 // PROM seems to want the last couple of physical pages. --m
 	memory_end   = *(romvec->pv_sun3mem) + PAGE_OFFSET - 2*PAGE_SIZE;
 
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 60f9d45..3cd1939 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -27,7 +27,6 @@
 #include <asm/mmu_context.h>
 #include <asm/dvma.h>
 
-extern void prom_reboot (char *) __attribute__ ((__noreturn__));
 
 #undef DEBUG_MMU_EMU
 #define DEBUG_PROM_MAPS
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 7364cd6..ad90393 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -105,7 +105,10 @@
 	m68k_setup_irq_controller(&sun3_irq_controller, IRQ_AUTO_1, 7);
 	m68k_setup_user_interrupt(VEC_USER, 128, NULL);
 
-	request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL);
-	request_irq(IRQ_AUTO_7, sun3_int7, 0, "int7", NULL);
-	request_irq(IRQ_USER+127, sun3_vec255, 0, "vec255", NULL);
+	if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL))
+		pr_err("Couldn't register %s interrupt\n", "int5");
+	if (request_irq(IRQ_AUTO_7, sun3_int7, 0, "int7", NULL))
+		pr_err("Couldn't register %s interrupt\n", "int7");
+	if (request_irq(IRQ_USER+127, sun3_vec255, 0, "vec255", NULL))
+		pr_err("Couldn't register %s interrupt\n", "vec255");
 }
diff --git a/arch/m68k/sun3x/config.c b/arch/m68k/sun3x/config.c
index 2b1ca2d..fc599fa 100644
--- a/arch/m68k/sun3x/config.c
+++ b/arch/m68k/sun3x/config.c
@@ -23,7 +23,6 @@
 #include "time.h"
 
 volatile char *clock_va;
-extern volatile unsigned char *sun3_intreg;
 
 extern void sun3_get_model(char *model);
 
diff --git a/arch/m68knommu/include/asm/Kbuild b/arch/m68knommu/include/asm/Kbuild
index 58c02a4..c68e168 100644
--- a/arch/m68knommu/include/asm/Kbuild
+++ b/arch/m68knommu/include/asm/Kbuild
@@ -1,3 +1 @@
 include include/asm-generic/Kbuild.asm
-
-unifdef-y += swab.h
diff --git a/arch/m68knommu/include/asm/byteorder.h b/arch/m68knommu/include/asm/byteorder.h
index a6f0b8f..9c6c76a 100644
--- a/arch/m68knommu/include/asm/byteorder.h
+++ b/arch/m68knommu/include/asm/byteorder.h
@@ -1,7 +1,6 @@
 #ifndef _M68KNOMMU_BYTEORDER_H
 #define _M68KNOMMU_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/big_endian.h>
 
 #endif /* _M68KNOMMU_BYTEORDER_H */
diff --git a/arch/m68knommu/include/asm/mmu.h b/arch/m68knommu/include/asm/mmu.h
index 5fa6b68..e2da1e6 100644
--- a/arch/m68knommu/include/asm/mmu.h
+++ b/arch/m68knommu/include/asm/mmu.h
@@ -4,7 +4,6 @@
 /* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */
 
 typedef struct {
-	struct vm_list_struct	*vmlist;
 	unsigned long		end_brk;
 } mm_context_t;
 
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index 812f8d8..5c3e3f6 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -107,7 +107,7 @@
 	.long sys_uselib
 	.long sys_ni_syscall	/* sys_swapon */
 	.long sys_reboot
-	.long old_readdir
+	.long sys_old_readdir
 	.long old_mmap		/* 90 */
 	.long sys_munmap
 	.long sys_truncate
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a5255e7..52c80c2 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -595,6 +595,44 @@
 	  This enables support for the Wind River MIPS32 4KC PPMC evaluation
 	  board, which is based on GT64120 bridge chip.
 
+config CAVIUM_OCTEON_SIMULATOR
+	bool "Support for the Cavium Networks Octeon Simulator"
+	select CEVT_R4K
+	select 64BIT_PHYS_ADDR
+	select DMA_COHERENT
+	select SYS_SUPPORTS_64BIT_KERNEL
+	select SYS_SUPPORTS_BIG_ENDIAN
+	select SYS_SUPPORTS_HIGHMEM
+	select CPU_CAVIUM_OCTEON
+	help
+	  The Octeon simulator is software performance model of the Cavium
+	  Octeon Processor. It supports simulating Octeon processors on x86
+	  hardware.
+
+config CAVIUM_OCTEON_REFERENCE_BOARD
+	bool "Support for the Cavium Networks Octeon reference board"
+	select CEVT_R4K
+	select 64BIT_PHYS_ADDR
+	select DMA_COHERENT
+	select SYS_SUPPORTS_64BIT_KERNEL
+	select SYS_SUPPORTS_BIG_ENDIAN
+	select SYS_SUPPORTS_HIGHMEM
+	select SYS_HAS_EARLY_PRINTK
+	select CPU_CAVIUM_OCTEON
+	select SWAP_IO_SPACE
+	help
+	  This option supports all of the Octeon reference boards from Cavium
+	  Networks. It builds a kernel that dynamically determines the Octeon
+	  CPU type and supports all known board reference implementations.
+	  Some of the supported boards are:
+		EBT3000
+		EBH3000
+		EBH3100
+		Thunder
+		Kodama
+		Hikari
+	  Say Y here for most Octeon reference boards.
+
 endchoice
 
 source "arch/mips/alchemy/Kconfig"
@@ -607,6 +645,7 @@
 source "arch/mips/sibyte/Kconfig"
 source "arch/mips/txx9/Kconfig"
 source "arch/mips/vr41xx/Kconfig"
+source "arch/mips/cavium-octeon/Kconfig"
 
 endmenu
 
@@ -682,7 +721,11 @@
 config CEVT_GT641XX
 	bool
 
+config CEVT_R4K_LIB
+	bool
+
 config CEVT_R4K
+	select CEVT_R4K_LIB
 	bool
 
 config CEVT_SB1250
@@ -697,7 +740,11 @@
 config CSRC_IOASIC
 	bool
 
+config CSRC_R4K_LIB
+	bool
+
 config CSRC_R4K
+	select CSRC_R4K_LIB
 	bool
 
 config CSRC_SB1250
@@ -835,6 +882,9 @@
 config IRQ_GIC
 	bool
 
+config IRQ_CPU_OCTEON
+	bool
+
 config MIPS_BOARDS_GEN
 	bool
 
@@ -924,7 +974,7 @@
 config MIPS_L1_CACHE_SHIFT
 	int
 	default "4" if MACH_DECSTATION || MIKROTIK_RB532
-	default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM
+	default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
 	default "4" if PMC_MSP4200_EVAL
 	default "5"
 
@@ -1185,6 +1235,23 @@
 	select CPU_SUPPORTS_HIGHMEM
 	select WEAK_ORDERING
 
+config CPU_CAVIUM_OCTEON
+	bool "Cavium Octeon processor"
+	select IRQ_CPU
+	select IRQ_CPU_OCTEON
+	select CPU_HAS_PREFETCH
+	select CPU_SUPPORTS_64BIT_KERNEL
+	select SYS_SUPPORTS_SMP
+	select NR_CPUS_DEFAULT_16
+	select WEAK_ORDERING
+	select WEAK_REORDERING_BEYOND_LLSC
+	select CPU_SUPPORTS_HIGHMEM
+	help
+	  The Cavium Octeon processor is a highly integrated chip containing
+	  many ethernet hardware widgets for networking tasks. The processor
+	  can have up to 16 Mips64v2 cores and 8 integrated gigabit ethernets.
+	  Full details can be found at http://www.caviumnetworks.com.
+
 endchoice
 
 config SYS_HAS_CPU_LOONGSON2
@@ -1285,7 +1352,7 @@
 
 config CPU_MIPSR2
 	bool
-	default y if CPU_MIPS32_R2 || CPU_MIPS64_R2
+	default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
 
 config SYS_SUPPORTS_32BIT_KERNEL
 	bool
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 28c55f6..21b00e9 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -144,6 +144,10 @@
 cflags-$(CONFIG_CPU_R8000)	+= -march=r8000 -Wa,--trap
 cflags-$(CONFIG_CPU_R10000)	+= $(call cc-option,-march=r10000,-march=r8000) \
 			-Wa,--trap
+cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += $(call cc-option,-march=octeon) -Wa,--trap
+ifeq (,$(findstring march=octeon, $(cflags-$(CONFIG_CPU_CAVIUM_OCTEON))))
+cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
+endif
 
 cflags-$(CONFIG_CPU_R4000_WORKAROUNDS)	+= $(call cc-option,-mfix-r4000,)
 cflags-$(CONFIG_CPU_R4400_WORKAROUNDS)	+= $(call cc-option,-mfix-r4400,)
@@ -184,84 +188,84 @@
 #
 # AMD Alchemy Pb1000 eval board
 #
-libs-$(CONFIG_MIPS_PB1000)	+= arch/mips/alchemy/pb1000/
+core-$(CONFIG_MIPS_PB1000)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_PB1000)	+= -I$(srctree)/arch/mips/include/asm/mach-pb1x00
 load-$(CONFIG_MIPS_PB1000)	+= 0xffffffff80100000
 
 #
 # AMD Alchemy Pb1100 eval board
 #
-libs-$(CONFIG_MIPS_PB1100)	+= arch/mips/alchemy/pb1100/
+core-$(CONFIG_MIPS_PB1100)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_PB1100)	+= -I$(srctree)/arch/mips/include/asm/mach-pb1x00
 load-$(CONFIG_MIPS_PB1100)	+= 0xffffffff80100000
 
 #
 # AMD Alchemy Pb1500 eval board
 #
-libs-$(CONFIG_MIPS_PB1500)	+= arch/mips/alchemy/pb1500/
+core-$(CONFIG_MIPS_PB1500)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_PB1500)	+= -I$(srctree)/arch/mips/include/asm/mach-pb1x00
 load-$(CONFIG_MIPS_PB1500)	+= 0xffffffff80100000
 
 #
 # AMD Alchemy Pb1550 eval board
 #
-libs-$(CONFIG_MIPS_PB1550)	+= arch/mips/alchemy/pb1550/
+core-$(CONFIG_MIPS_PB1550)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_PB1550)	+= -I$(srctree)/arch/mips/include/asm/mach-pb1x00
 load-$(CONFIG_MIPS_PB1550)	+= 0xffffffff80100000
 
 #
 # AMD Alchemy Pb1200 eval board
 #
-libs-$(CONFIG_MIPS_PB1200)	+= arch/mips/alchemy/pb1200/
+core-$(CONFIG_MIPS_PB1200)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_PB1200)	+= -I$(srctree)/arch/mips/include/asm/mach-pb1x00
 load-$(CONFIG_MIPS_PB1200)	+= 0xffffffff80100000
 
 #
 # AMD Alchemy Db1000 eval board
 #
-libs-$(CONFIG_MIPS_DB1000)	+= arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_DB1000)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1000)	+= -I$(srctree)/arch/mips/include/asm/mach-db1x00
 load-$(CONFIG_MIPS_DB1000)	+= 0xffffffff80100000
 
 #
 # AMD Alchemy Db1100 eval board
 #
-libs-$(CONFIG_MIPS_DB1100)	+= arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_DB1100)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1100)	+= -I$(srctree)/arch/mips/include/asm/mach-db1x00
 load-$(CONFIG_MIPS_DB1100)	+= 0xffffffff80100000
 
 #
 # AMD Alchemy Db1500 eval board
 #
-libs-$(CONFIG_MIPS_DB1500)	+= arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_DB1500)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1500)	+= -I$(srctree)/arch/mips/include/asm/mach-db1x00
 load-$(CONFIG_MIPS_DB1500)	+= 0xffffffff80100000
 
 #
 # AMD Alchemy Db1550 eval board
 #
-libs-$(CONFIG_MIPS_DB1550)	+= arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_DB1550)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1550)	+= -I$(srctree)/arch/mips/include/asm/mach-db1x00
 load-$(CONFIG_MIPS_DB1550)	+= 0xffffffff80100000
 
 #
 # AMD Alchemy Db1200 eval board
 #
-libs-$(CONFIG_MIPS_DB1200)	+= arch/mips/alchemy/pb1200/
+core-$(CONFIG_MIPS_DB1200)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1200)	+= -I$(srctree)/arch/mips/include/asm/mach-db1x00
 load-$(CONFIG_MIPS_DB1200)	+= 0xffffffff80100000
 
 #
 # AMD Alchemy Bosporus eval board
 #
-libs-$(CONFIG_MIPS_BOSPORUS)	+= arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_BOSPORUS)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_BOSPORUS)	+= -I$(srctree)/arch/mips/include/asm/mach-db1x00
 load-$(CONFIG_MIPS_BOSPORUS)	+= 0xffffffff80100000
 
 #
 # AMD Alchemy Mirage eval board
 #
-libs-$(CONFIG_MIPS_MIRAGE)	+= arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_MIRAGE)	+= arch/mips/alchemy/devboards/
 cflags-$(CONFIG_MIPS_MIRAGE)	+= -I$(srctree)/arch/mips/include/asm/mach-db1x00
 load-$(CONFIG_MIPS_MIRAGE)	+= 0xffffffff80100000
 
@@ -586,6 +590,18 @@
 core-$(CONFIG_TOSHIBA_RBTX4938) += arch/mips/txx9/rbtx4938/
 core-$(CONFIG_TOSHIBA_RBTX4939) += arch/mips/txx9/rbtx4939/
 
+#
+# Cavium Octeon
+#
+core-$(CONFIG_CPU_CAVIUM_OCTEON)	+= arch/mips/cavium-octeon/
+cflags-$(CONFIG_CPU_CAVIUM_OCTEON)	+= -I$(srctree)/arch/mips/include/asm/mach-cavium-octeon
+core-$(CONFIG_CPU_CAVIUM_OCTEON)	+= arch/mips/cavium-octeon/executive/
+ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+load-$(CONFIG_CPU_CAVIUM_OCTEON)	+= 0xffffffff84100000
+else
+load-$(CONFIG_CPU_CAVIUM_OCTEON) 	+= 0xffffffff81100000
+endif
+
 cflags-y			+= -I$(srctree)/arch/mips/include/asm/mach-generic
 drivers-$(CONFIG_PCI)		+= arch/mips/pci/
 
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index e4a057d..7f8ef13 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -128,9 +128,10 @@
 config SOC_AU1X00
 	bool
 	select 64BIT_PHYS_ADDR
-	select CEVT_R4K
-	select CSRC_R4K
+	select CEVT_R4K_LIB
+	select CSRC_R4K_LIB
 	select IRQ_CPU
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_APM_EMULATION
+	select GENERIC_HARDIRQS_NO__DO_IRQ
diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile
index df48fd6..d50d476 100644
--- a/arch/mips/alchemy/common/Makefile
+++ b/arch/mips/alchemy/common/Makefile
@@ -6,8 +6,8 @@
 #
 
 obj-y += prom.o irq.o puts.o time.o reset.o \
-	au1xxx_irqmap.o clocks.o platform.o power.o setup.o \
-	sleeper.o cputable.o dma.o dbdma.o gpio.o
+	clocks.o platform.o power.o setup.o \
+	sleeper.o dma.o dbdma.o gpio.o
 
 obj-$(CONFIG_PCI)		+= pci.o
 
diff --git a/arch/mips/alchemy/common/au1xxx_irqmap.c b/arch/mips/alchemy/common/au1xxx_irqmap.c
deleted file mode 100644
index c7ca159..0000000
--- a/arch/mips/alchemy/common/au1xxx_irqmap.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- *	Au1xxx processor specific IRQ tables
- *
- * Copyright 2004 Embedded Edge, LLC
- *	dan@embeddededge.com
- *
- *  This program is free software; you can redistribute	 it and/or modify it
- *  under  the terms of	 the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the	License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED	  ``AS	IS'' AND   ANY	EXPRESS OR IMPLIED
- *  WARRANTIES,	  INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO	EVENT  SHALL   THE AUTHOR  BE	 LIABLE FOR ANY	  DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED	  TO, PROCUREMENT OF  SUBSTITUTE GOODS	OR SERVICES; LOSS OF
- *  USE, DATA,	OR PROFITS; OR	BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN	 CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <au1000.h>
-
-/* The IC0 interrupt table.  This is processor, rather than
- * board dependent, so no reason to keep this info in the board
- * dependent files.
- *
- * Careful if you change match 2 request!
- * The interrupt handler is called directly from the low level dispatch code.
- */
-struct au1xxx_irqmap __initdata au1xxx_ic0_map[] = {
-
-#if defined(CONFIG_SOC_AU1000)
-	{ AU1000_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_UART2_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_SSI0_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_SSI1_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+1, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+2, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+3, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+4, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+5, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+6, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+7, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
-	{ AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_IRDA_TX_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_IRDA_RX_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1000_ACSYNC_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_MAC1_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_AC97C_INT, INTC_INT_RISE_EDGE, 0 },
-
-#elif defined(CONFIG_SOC_AU1500)
-
-	{ AU1500_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_PCI_INTA, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1000_PCI_INTB, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1500_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_PCI_INTC, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1000_PCI_INTD, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+1, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+2, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+3, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+4, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+5, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+6, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+7, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
-	{ AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1000_ACSYNC_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1500_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1500_MAC1_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_AC97C_INT, INTC_INT_RISE_EDGE, 0 },
-
-#elif defined(CONFIG_SOC_AU1100)
-
-	{ AU1100_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1100_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1100_SD_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1100_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_SSI0_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_SSI1_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+1, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+2, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+3, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+4, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+5, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+6, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_DMA_INT_BASE+7, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
-	{ AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_IRDA_TX_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_IRDA_RX_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1000_ACSYNC_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1100_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
-	/* { AU1000_GPIO215_208_INT, INTC_INT_HIGH_LEVEL, 0 }, */
-	{ AU1100_LCD_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_AC97C_INT, INTC_INT_RISE_EDGE, 0 },
-
-#elif defined(CONFIG_SOC_AU1550)
-
-	{ AU1550_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1550_PCI_INTA, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1550_PCI_INTB, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1550_DDMA_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1550_CRYPTO_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1550_PCI_INTC, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1550_PCI_INTD, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1550_PCI_RST_INT, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1550_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1550_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1550_PSC0_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1550_PSC1_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1550_PSC2_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1550_PSC3_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
-	{ AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1550_NAND_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1550_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1550_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1550_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1550_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1550_MAC1_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
-
-#elif defined(CONFIG_SOC_AU1200)
-
-	{ AU1200_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1200_SWT_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1200_SD_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1200_DDMA_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1200_MAE_BE_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1200_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1200_MAE_FE_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1200_PSC0_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1200_PSC1_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1200_AES_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1200_CAMERA_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
-	{ AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1200_NAND_INT, INTC_INT_RISE_EDGE, 0 },
-	{ AU1200_USB_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1200_LCD_INT, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1200_MAE_BOTH_INT, INTC_INT_HIGH_LEVEL, 0 },
-
-#else
-#error "Error: Unknown Alchemy SOC"
-#endif
-
-};
-
-int __initdata au1xxx_ic0_nr_irqs = ARRAY_SIZE(au1xxx_ic0_map);
diff --git a/arch/mips/alchemy/common/clocks.c b/arch/mips/alchemy/common/clocks.c
index 043429d..d899185 100644
--- a/arch/mips/alchemy/common/clocks.c
+++ b/arch/mips/alchemy/common/clocks.c
@@ -27,12 +27,21 @@
  */
 
 #include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/time.h>
 #include <asm/mach-au1x00/au1000.h>
 
+/*
+ * I haven't found anyone that doesn't use a 12 MHz source clock,
+ * but just in case.....
+ */
+#define AU1000_SRC_CLK	12000000
+
 static unsigned int au1x00_clock; /*  Hz */
-static unsigned int lcd_clock;    /* KHz */
 static unsigned long uart_baud_base;
 
+static DEFINE_SPINLOCK(time_lock);
+
 /*
  * Set the au1000_clock
  */
@@ -63,31 +72,45 @@
 }
 
 /*
- * Calculate the Au1x00's LCD clock based on the current
- * cpu clock and the system bus clock, and try to keep it
- * below 40 MHz (the Pb1000 board can lock-up if the LCD
- * clock is over 40 MHz).
+ * We read the real processor speed from the PLL.  This is important
+ * because it is more accurate than computing it from the 32 KHz
+ * counter, if it exists.  If we don't have an accurate processor
+ * speed, all of the peripherals that derive their clocks based on
+ * this advertised speed will introduce error and sometimes not work
+ * properly.  This function is futher convoluted to still allow configurations
+ * to do that in case they have really, really old silicon with a
+ * write-only PLL register.			-- Dan
  */
-void set_au1x00_lcd_clock(void)
+unsigned long au1xxx_calc_clock(void)
 {
-	unsigned int static_cfg0;
-	unsigned int sys_busclk = (get_au1x00_speed() / 1000) /
-				  ((int)(au_readl(SYS_POWERCTRL) & 0x03) + 2);
+	unsigned long cpu_speed;
+	unsigned long flags;
 
-	static_cfg0 = au_readl(MEM_STCFG0);
+	spin_lock_irqsave(&time_lock, flags);
 
-	if (static_cfg0 & (1 << 11))
-		lcd_clock = sys_busclk / 5; /* note: BCLK switching fails with D5 */
+	/*
+	 * On early Au1000, sys_cpupll was write-only. Since these
+	 * silicon versions of Au1000 are not sold by AMD, we don't bend
+	 * over backwards trying to determine the frequency.
+	 */
+	if (au1xxx_cpu_has_pll_wo())
+#ifdef CONFIG_SOC_AU1000_FREQUENCY
+		cpu_speed = CONFIG_SOC_AU1000_FREQUENCY;
+#else
+		cpu_speed = 396000000;
+#endif
 	else
-		lcd_clock = sys_busclk / 4;
+		cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK;
 
-	if (lcd_clock > 50000) /* Epson MAX */
-		printk(KERN_WARNING "warning: LCD clock too high (%u KHz)\n",
-				    lcd_clock);
-}
+	/* On Alchemy CPU:counter ratio is 1:1 */
+	mips_hpt_frequency = cpu_speed;
+	/* Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) */
+	set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL)
+							  & 0x03) + 2) * 16));
 
-unsigned int get_au1x00_lcd_clock(void)
-{
-	return lcd_clock;
+	spin_unlock_irqrestore(&time_lock, flags);
+
+	set_au1x00_speed(cpu_speed);
+
+	return cpu_speed;
 }
-EXPORT_SYMBOL(get_au1x00_lcd_clock);
diff --git a/arch/mips/alchemy/common/cputable.c b/arch/mips/alchemy/common/cputable.c
deleted file mode 100644
index ba6430b..0000000
--- a/arch/mips/alchemy/common/cputable.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- *  arch/mips/au1000/common/cputable.c
- *
- *  Copyright (C) 2004 Dan Malek (dan@embeddededge.com)
- *	Copied from PowerPC and updated for Alchemy Au1xxx processors.
- *
- *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- */
-
-#include <asm/mach-au1x00/au1000.h>
-
-struct cpu_spec *cur_cpu_spec[NR_CPUS];
-
-/* With some thought, we can probably use the mask to reduce the
- * size of the table.
- */
-struct cpu_spec cpu_specs[] = {
-	{ 0xffffffff, 0x00030100, "Au1000 DA", 1, 0, 1 },
-	{ 0xffffffff, 0x00030201, "Au1000 HA", 1, 0, 1 },
-	{ 0xffffffff, 0x00030202, "Au1000 HB", 1, 0, 1 },
-	{ 0xffffffff, 0x00030203, "Au1000 HC", 1, 1, 0 },
-	{ 0xffffffff, 0x00030204, "Au1000 HD", 1, 1, 0 },
-	{ 0xffffffff, 0x01030200, "Au1500 AB", 1, 1, 0 },
-	{ 0xffffffff, 0x01030201, "Au1500 AC", 0, 1, 0 },
-	{ 0xffffffff, 0x01030202, "Au1500 AD", 0, 1, 0 },
-	{ 0xffffffff, 0x02030200, "Au1100 AB", 1, 1, 0 },
-	{ 0xffffffff, 0x02030201, "Au1100 BA", 1, 1, 0 },
-	{ 0xffffffff, 0x02030202, "Au1100 BC", 1, 1, 0 },
-	{ 0xffffffff, 0x02030203, "Au1100 BD", 0, 1, 0 },
-	{ 0xffffffff, 0x02030204, "Au1100 BE", 0, 1, 0 },
-	{ 0xffffffff, 0x03030200, "Au1550 AA", 0, 1, 0 },
-	{ 0xffffffff, 0x04030200, "Au1200 AB", 0, 0, 0 },
-	{ 0xffffffff, 0x04030201, "Au1200 AC", 1, 0, 0 },
-	{ 0x00000000, 0x00000000, "Unknown Au1xxx", 1, 0, 0 }
-};
-
-void set_cpuspec(void)
-{
-	struct	cpu_spec *sp;
-	u32	prid;
-
-	prid = read_c0_prid();
-	sp = cpu_specs;
-	while ((prid & sp->prid_mask) != sp->prid_value)
-		sp++;
-	cur_cpu_spec[0] = sp;
-}
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 601ee91..3ab6d80 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -174,6 +174,11 @@
 
 #define DBDEV_TAB_SIZE	ARRAY_SIZE(dbdev_tab)
 
+#ifdef CONFIG_PM
+static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][8];
+#endif
+
+
 static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
 
 static dbdev_tab_t *find_dbdev_id(u32 id)
@@ -975,4 +980,64 @@
 	return nbytes;
 }
 
+#ifdef CONFIG_PM
+void au1xxx_dbdma_suspend(void)
+{
+	int i;
+	u32 addr;
+
+	addr = DDMA_GLOBAL_BASE;
+	au1xxx_dbdma_pm_regs[0][0] = au_readl(addr + 0x00);
+	au1xxx_dbdma_pm_regs[0][1] = au_readl(addr + 0x04);
+	au1xxx_dbdma_pm_regs[0][2] = au_readl(addr + 0x08);
+	au1xxx_dbdma_pm_regs[0][3] = au_readl(addr + 0x0c);
+
+	/* save channel configurations */
+	for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) {
+		au1xxx_dbdma_pm_regs[i][0] = au_readl(addr + 0x00);
+		au1xxx_dbdma_pm_regs[i][1] = au_readl(addr + 0x04);
+		au1xxx_dbdma_pm_regs[i][2] = au_readl(addr + 0x08);
+		au1xxx_dbdma_pm_regs[i][3] = au_readl(addr + 0x0c);
+		au1xxx_dbdma_pm_regs[i][4] = au_readl(addr + 0x10);
+		au1xxx_dbdma_pm_regs[i][5] = au_readl(addr + 0x14);
+		au1xxx_dbdma_pm_regs[i][6] = au_readl(addr + 0x18);
+
+		/* halt channel */
+		au_writel(au1xxx_dbdma_pm_regs[i][0] & ~1, addr + 0x00);
+		au_sync();
+		while (!(au_readl(addr + 0x14) & 1))
+			au_sync();
+
+		addr += 0x100;	/* next channel base */
+	}
+	/* disable channel interrupts */
+	au_writel(0, DDMA_GLOBAL_BASE + 0x0c);
+	au_sync();
+}
+
+void au1xxx_dbdma_resume(void)
+{
+	int i;
+	u32 addr;
+
+	addr = DDMA_GLOBAL_BASE;
+	au_writel(au1xxx_dbdma_pm_regs[0][0], addr + 0x00);
+	au_writel(au1xxx_dbdma_pm_regs[0][1], addr + 0x04);
+	au_writel(au1xxx_dbdma_pm_regs[0][2], addr + 0x08);
+	au_writel(au1xxx_dbdma_pm_regs[0][3], addr + 0x0c);
+
+	/* restore channel configurations */
+	for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) {
+		au_writel(au1xxx_dbdma_pm_regs[i][0], addr + 0x00);
+		au_writel(au1xxx_dbdma_pm_regs[i][1], addr + 0x04);
+		au_writel(au1xxx_dbdma_pm_regs[i][2], addr + 0x08);
+		au_writel(au1xxx_dbdma_pm_regs[i][3], addr + 0x0c);
+		au_writel(au1xxx_dbdma_pm_regs[i][4], addr + 0x10);
+		au_writel(au1xxx_dbdma_pm_regs[i][5], addr + 0x14);
+		au_writel(au1xxx_dbdma_pm_regs[i][6], addr + 0x18);
+		au_sync();
+		addr += 0x100;	/* next channel base */
+	}
+}
+#endif	/* CONFIG_PM */
 #endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index 40c6cec..c88c821 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -24,6 +24,7 @@
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/bitops.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -36,15 +37,172 @@
 #include <asm/mach-pb1x00/pb1000.h>
 #endif
 
-#define EXT_INTC0_REQ0 2 /* IP 2 */
-#define EXT_INTC0_REQ1 3 /* IP 3 */
-#define EXT_INTC1_REQ0 4 /* IP 4 */
-#define EXT_INTC1_REQ1 5 /* IP 5 */
-#define MIPS_TIMER_IP  7 /* IP 7 */
+static int au1x_ic_settype(unsigned int irq, unsigned int flow_type);
 
-void (*board_init_irq)(void) __initdata = NULL;
+/* per-processor fixed function irqs */
+struct au1xxx_irqmap au1xxx_ic0_map[] __initdata = {
 
-static DEFINE_SPINLOCK(irq_lock);
+#if defined(CONFIG_SOC_AU1000)
+	{ AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+	{ AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
+
+#elif defined(CONFIG_SOC_AU1500)
+
+	{ AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1000_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1000_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+	{ AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
+
+#elif defined(CONFIG_SOC_AU1100)
+
+	{ AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+	{ AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
+
+#elif defined(CONFIG_SOC_AU1550)
+
+	{ AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+	{ AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1550_NAND_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1550_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+	{ AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+
+#elif defined(CONFIG_SOC_AU1200)
+
+	{ AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1200_SWT_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+	{ AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1200_NAND_INT, IRQ_TYPE_EDGE_RISING, 0 },
+	{ AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+	{ AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+
+#else
+#error "Error: Unknown Alchemy SOC"
+#endif
+};
+
 
 #ifdef CONFIG_PM
 
@@ -130,67 +288,47 @@
 #endif /* CONFIG_PM */
 
 
-inline void local_enable_irq(unsigned int irq_nr)
+static void au1x_ic0_unmask(unsigned int irq_nr)
 {
 	unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
-
-	if (bit >= 32) {
-		au_writel(1 << (bit - 32), IC1_MASKSET);
-		au_writel(1 << (bit - 32), IC1_WAKESET);
-	} else {
-		au_writel(1 << bit, IC0_MASKSET);
-		au_writel(1 << bit, IC0_WAKESET);
-	}
+	au_writel(1 << bit, IC0_MASKSET);
+	au_writel(1 << bit, IC0_WAKESET);
 	au_sync();
 }
 
-
-inline void local_disable_irq(unsigned int irq_nr)
+static void au1x_ic1_unmask(unsigned int irq_nr)
 {
-	unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
+	unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE;
+	au_writel(1 << bit, IC1_MASKSET);
+	au_writel(1 << bit, IC1_WAKESET);
 
-	if (bit >= 32) {
-		au_writel(1 << (bit - 32), IC1_MASKCLR);
-		au_writel(1 << (bit - 32), IC1_WAKECLR);
-	} else {
-		au_writel(1 << bit, IC0_MASKCLR);
-		au_writel(1 << bit, IC0_WAKECLR);
-	}
+/* very hacky. does the pb1000 cpld auto-disable this int?
+ * nowhere in the current kernel sources is it disabled.	--mlau
+ */
+#if defined(CONFIG_MIPS_PB1000)
+	if (irq_nr == AU1000_GPIO_15)
+		au_writel(0x4000, PB1000_MDR); /* enable int */
+#endif
 	au_sync();
 }
 
-
-static inline void mask_and_ack_rise_edge_irq(unsigned int irq_nr)
+static void au1x_ic0_mask(unsigned int irq_nr)
 {
 	unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
-
-	if (bit >= 32) {
-		au_writel(1 << (bit - 32), IC1_RISINGCLR);
-		au_writel(1 << (bit - 32), IC1_MASKCLR);
-	} else {
-		au_writel(1 << bit, IC0_RISINGCLR);
-		au_writel(1 << bit, IC0_MASKCLR);
-	}
+	au_writel(1 << bit, IC0_MASKCLR);
+	au_writel(1 << bit, IC0_WAKECLR);
 	au_sync();
 }
 
-
-static inline void mask_and_ack_fall_edge_irq(unsigned int irq_nr)
+static void au1x_ic1_mask(unsigned int irq_nr)
 {
-	unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
-
-	if (bit >= 32) {
-		au_writel(1 << (bit - 32), IC1_FALLINGCLR);
-		au_writel(1 << (bit - 32), IC1_MASKCLR);
-	} else {
-		au_writel(1 << bit, IC0_FALLINGCLR);
-		au_writel(1 << bit, IC0_MASKCLR);
-	}
+	unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE;
+	au_writel(1 << bit, IC1_MASKCLR);
+	au_writel(1 << bit, IC1_WAKECLR);
 	au_sync();
 }
 
-
-static inline void mask_and_ack_either_edge_irq(unsigned int irq_nr)
+static void au1x_ic0_ack(unsigned int irq_nr)
 {
 	unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
 
@@ -198,349 +336,229 @@
 	 * This may assume that we don't get interrupts from
 	 * both edges at once, or if we do, that we don't care.
 	 */
-	if (bit >= 32) {
-		au_writel(1 << (bit - 32), IC1_FALLINGCLR);
-		au_writel(1 << (bit - 32), IC1_RISINGCLR);
-		au_writel(1 << (bit - 32), IC1_MASKCLR);
-	} else {
-		au_writel(1 << bit, IC0_FALLINGCLR);
-		au_writel(1 << bit, IC0_RISINGCLR);
-		au_writel(1 << bit, IC0_MASKCLR);
-	}
+	au_writel(1 << bit, IC0_FALLINGCLR);
+	au_writel(1 << bit, IC0_RISINGCLR);
 	au_sync();
 }
 
-static inline void mask_and_ack_level_irq(unsigned int irq_nr)
+static void au1x_ic1_ack(unsigned int irq_nr)
 {
-	local_disable_irq(irq_nr);
+	unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE;
+
+	/*
+	 * This may assume that we don't get interrupts from
+	 * both edges at once, or if we do, that we don't care.
+	 */
+	au_writel(1 << bit, IC1_FALLINGCLR);
+	au_writel(1 << bit, IC1_RISINGCLR);
 	au_sync();
-#if defined(CONFIG_MIPS_PB1000)
-	if (irq_nr == AU1000_GPIO_15) {
-		au_writel(0x8000, PB1000_MDR); /* ack int */
-		au_sync();
-	}
-#endif
 }
 
-static void end_irq(unsigned int irq_nr)
+static int au1x_ic1_setwake(unsigned int irq, unsigned int on)
 {
-	if (!(irq_desc[irq_nr].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		local_enable_irq(irq_nr);
+	unsigned int bit = irq - AU1000_INTC1_INT_BASE;
+	unsigned long wakemsk, flags;
 
-#if defined(CONFIG_MIPS_PB1000)
-	if (irq_nr == AU1000_GPIO_15) {
-		au_writel(0x4000, PB1000_MDR); /* enable int */
-		au_sync();
-	}
-#endif
-}
+	/* only GPIO 0-7 can act as wakeup source: */
+	if ((irq < AU1000_GPIO_0) || (irq > AU1000_GPIO_7))
+		return -EINVAL;
 
-unsigned long save_local_and_disable(int controller)
-{
-	int i;
-	unsigned long flags, mask;
-
-	spin_lock_irqsave(&irq_lock, flags);
-	if (controller) {
-		mask = au_readl(IC1_MASKSET);
-		for (i = 32; i < 64; i++)
-			local_disable_irq(i);
-	} else {
-		mask = au_readl(IC0_MASKSET);
-		for (i = 0; i < 32; i++)
-			local_disable_irq(i);
-	}
-	spin_unlock_irqrestore(&irq_lock, flags);
-
-	return mask;
-}
-
-void restore_local_and_enable(int controller, unsigned long mask)
-{
-	int i;
-	unsigned long flags, new_mask;
-
-	spin_lock_irqsave(&irq_lock, flags);
-	for (i = 0; i < 32; i++)
-		if (mask & (1 << i)) {
-			if (controller)
-				local_enable_irq(i + 32);
-			else
-				local_enable_irq(i);
-		}
-
-	if (controller)
-		new_mask = au_readl(IC1_MASKSET);
+	local_irq_save(flags);
+	wakemsk = au_readl(SYS_WAKEMSK);
+	if (on)
+		wakemsk |= 1 << bit;
 	else
-		new_mask = au_readl(IC0_MASKSET);
-
-	spin_unlock_irqrestore(&irq_lock, flags);
-}
-
-
-static struct irq_chip rise_edge_irq_type = {
-	.name		= "Au1000 Rise Edge",
-	.ack		= mask_and_ack_rise_edge_irq,
-	.mask		= local_disable_irq,
-	.mask_ack	= mask_and_ack_rise_edge_irq,
-	.unmask		= local_enable_irq,
-	.end		= end_irq,
-};
-
-static struct irq_chip fall_edge_irq_type = {
-	.name		= "Au1000 Fall Edge",
-	.ack		= mask_and_ack_fall_edge_irq,
-	.mask		= local_disable_irq,
-	.mask_ack	= mask_and_ack_fall_edge_irq,
-	.unmask		= local_enable_irq,
-	.end		= end_irq,
-};
-
-static struct irq_chip either_edge_irq_type = {
-	.name		= "Au1000 Rise or Fall Edge",
-	.ack		= mask_and_ack_either_edge_irq,
-	.mask		= local_disable_irq,
-	.mask_ack	= mask_and_ack_either_edge_irq,
-	.unmask		= local_enable_irq,
-	.end		= end_irq,
-};
-
-static struct irq_chip level_irq_type = {
-	.name		= "Au1000 Level",
-	.ack		= mask_and_ack_level_irq,
-	.mask		= local_disable_irq,
-	.mask_ack	= mask_and_ack_level_irq,
-	.unmask		= local_enable_irq,
-	.end		= end_irq,
-};
-
-static void __init setup_local_irq(unsigned int irq_nr, int type, int int_req)
-{
-	unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
-
-	if (irq_nr > AU1000_MAX_INTR)
-		return;
-
-	/* Config2[n], Config1[n], Config0[n] */
-	if (bit >= 32) {
-		switch (type) {
-		case INTC_INT_RISE_EDGE: /* 0:0:1 */
-			au_writel(1 << (bit - 32), IC1_CFG2CLR);
-			au_writel(1 << (bit - 32), IC1_CFG1CLR);
-			au_writel(1 << (bit - 32), IC1_CFG0SET);
-			set_irq_chip(irq_nr, &rise_edge_irq_type);
-			break;
-		case INTC_INT_FALL_EDGE: /* 0:1:0 */
-			au_writel(1 << (bit - 32), IC1_CFG2CLR);
-			au_writel(1 << (bit - 32), IC1_CFG1SET);
-			au_writel(1 << (bit - 32), IC1_CFG0CLR);
-			set_irq_chip(irq_nr, &fall_edge_irq_type);
-			break;
-		case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
-			au_writel(1 << (bit - 32), IC1_CFG2CLR);
-			au_writel(1 << (bit - 32), IC1_CFG1SET);
-			au_writel(1 << (bit - 32), IC1_CFG0SET);
-			set_irq_chip(irq_nr, &either_edge_irq_type);
-			break;
-		case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
-			au_writel(1 << (bit - 32), IC1_CFG2SET);
-			au_writel(1 << (bit - 32), IC1_CFG1CLR);
-			au_writel(1 << (bit - 32), IC1_CFG0SET);
-			set_irq_chip(irq_nr, &level_irq_type);
-			break;
-		case INTC_INT_LOW_LEVEL: /* 1:1:0 */
-			au_writel(1 << (bit - 32), IC1_CFG2SET);
-			au_writel(1 << (bit - 32), IC1_CFG1SET);
-			au_writel(1 << (bit - 32), IC1_CFG0CLR);
-			set_irq_chip(irq_nr, &level_irq_type);
-			break;
-		case INTC_INT_DISABLED: /* 0:0:0 */
-			au_writel(1 << (bit - 32), IC1_CFG0CLR);
-			au_writel(1 << (bit - 32), IC1_CFG1CLR);
-			au_writel(1 << (bit - 32), IC1_CFG2CLR);
-			break;
-		default: /* disable the interrupt */
-			printk(KERN_WARNING "unexpected int type %d (irq %d)\n",
-			       type, irq_nr);
-			au_writel(1 << (bit - 32), IC1_CFG0CLR);
-			au_writel(1 << (bit - 32), IC1_CFG1CLR);
-			au_writel(1 << (bit - 32), IC1_CFG2CLR);
-			return;
-		}
-		if (int_req) /* assign to interrupt request 1 */
-			au_writel(1 << (bit - 32), IC1_ASSIGNCLR);
-		else	     /* assign to interrupt request 0 */
-			au_writel(1 << (bit - 32), IC1_ASSIGNSET);
-		au_writel(1 << (bit - 32), IC1_SRCSET);
-		au_writel(1 << (bit - 32), IC1_MASKCLR);
-		au_writel(1 << (bit - 32), IC1_WAKECLR);
-	} else {
-		switch (type) {
-		case INTC_INT_RISE_EDGE: /* 0:0:1 */
-			au_writel(1 << bit, IC0_CFG2CLR);
-			au_writel(1 << bit, IC0_CFG1CLR);
-			au_writel(1 << bit, IC0_CFG0SET);
-			set_irq_chip(irq_nr, &rise_edge_irq_type);
-			break;
-		case INTC_INT_FALL_EDGE: /* 0:1:0 */
-			au_writel(1 << bit, IC0_CFG2CLR);
-			au_writel(1 << bit, IC0_CFG1SET);
-			au_writel(1 << bit, IC0_CFG0CLR);
-			set_irq_chip(irq_nr, &fall_edge_irq_type);
-			break;
-		case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
-			au_writel(1 << bit, IC0_CFG2CLR);
-			au_writel(1 << bit, IC0_CFG1SET);
-			au_writel(1 << bit, IC0_CFG0SET);
-			set_irq_chip(irq_nr, &either_edge_irq_type);
-			break;
-		case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
-			au_writel(1 << bit, IC0_CFG2SET);
-			au_writel(1 << bit, IC0_CFG1CLR);
-			au_writel(1 << bit, IC0_CFG0SET);
-			set_irq_chip(irq_nr, &level_irq_type);
-			break;
-		case INTC_INT_LOW_LEVEL: /* 1:1:0 */
-			au_writel(1 << bit, IC0_CFG2SET);
-			au_writel(1 << bit, IC0_CFG1SET);
-			au_writel(1 << bit, IC0_CFG0CLR);
-			set_irq_chip(irq_nr, &level_irq_type);
-			break;
-		case INTC_INT_DISABLED: /* 0:0:0 */
-			au_writel(1 << bit, IC0_CFG0CLR);
-			au_writel(1 << bit, IC0_CFG1CLR);
-			au_writel(1 << bit, IC0_CFG2CLR);
-			break;
-		default: /* disable the interrupt */
-			printk(KERN_WARNING "unexpected int type %d (irq %d)\n",
-			       type, irq_nr);
-			au_writel(1 << bit, IC0_CFG0CLR);
-			au_writel(1 << bit, IC0_CFG1CLR);
-			au_writel(1 << bit, IC0_CFG2CLR);
-			return;
-		}
-		if (int_req) /* assign to interrupt request 1 */
-			au_writel(1 << bit, IC0_ASSIGNCLR);
-		else	     /* assign to interrupt request 0 */
-			au_writel(1 << bit, IC0_ASSIGNSET);
-		au_writel(1 << bit, IC0_SRCSET);
-		au_writel(1 << bit, IC0_MASKCLR);
-		au_writel(1 << bit, IC0_WAKECLR);
-	}
+		wakemsk &= ~(1 << bit);
+	au_writel(wakemsk, SYS_WAKEMSK);
 	au_sync();
+	local_irq_restore(flags);
+
+	return 0;
 }
 
 /*
- * Interrupts are nested. Even if an interrupt handler is registered
- * as "fast", we might get another interrupt before we return from
- * intcX_reqX_irqdispatch().
+ * irq_chips for both ICs; this way the mask handlers can be
+ * as short as possible.
+ *
+ * NOTE: the ->ack() callback is used by the handle_edge_irq
+ *	 flowhandler only, the ->mask_ack() one by handle_level_irq,
+ *	 so no need for an irq_chip for each type of irq (level/edge).
  */
+static struct irq_chip au1x_ic0_chip = {
+	.name		= "Alchemy-IC0",
+	.ack		= au1x_ic0_ack,		/* edge */
+	.mask		= au1x_ic0_mask,
+	.mask_ack	= au1x_ic0_mask,	/* level */
+	.unmask		= au1x_ic0_unmask,
+	.set_type	= au1x_ic_settype,
+};
 
-static void intc0_req0_irqdispatch(void)
+static struct irq_chip au1x_ic1_chip = {
+	.name		= "Alchemy-IC1",
+	.ack		= au1x_ic1_ack,		/* edge */
+	.mask		= au1x_ic1_mask,
+	.mask_ack	= au1x_ic1_mask,	/* level */
+	.unmask		= au1x_ic1_unmask,
+	.set_type	= au1x_ic_settype,
+	.set_wake	= au1x_ic1_setwake,
+};
+
+static int au1x_ic_settype(unsigned int irq, unsigned int flow_type)
 {
-	static unsigned long intc0_req0;
-	unsigned int bit;
+	struct irq_chip *chip;
+	unsigned long icr[6];
+	unsigned int bit, ic;
+	int ret;
 
-	intc0_req0 |= au_readl(IC0_REQ0INT);
+	if (irq >= AU1000_INTC1_INT_BASE) {
+		bit = irq - AU1000_INTC1_INT_BASE;
+		chip = &au1x_ic1_chip;
+		ic = 1;
+	} else {
+		bit = irq - AU1000_INTC0_INT_BASE;
+		chip = &au1x_ic0_chip;
+		ic = 0;
+	}
 
-	if (!intc0_req0)
+	if (bit > 31)
+		return -EINVAL;
+
+	icr[0] = ic ? IC1_CFG0SET : IC0_CFG0SET;
+	icr[1] = ic ? IC1_CFG1SET : IC0_CFG1SET;
+	icr[2] = ic ? IC1_CFG2SET : IC0_CFG2SET;
+	icr[3] = ic ? IC1_CFG0CLR : IC0_CFG0CLR;
+	icr[4] = ic ? IC1_CFG1CLR : IC0_CFG1CLR;
+	icr[5] = ic ? IC1_CFG2CLR : IC0_CFG2CLR;
+
+	ret = 0;
+
+	switch (flow_type) {	/* cfgregs 2:1:0 */
+	case IRQ_TYPE_EDGE_RISING:	/* 0:0:1 */
+		au_writel(1 << bit, icr[5]);
+		au_writel(1 << bit, icr[4]);
+		au_writel(1 << bit, icr[0]);
+		set_irq_chip_and_handler_name(irq, chip,
+				handle_edge_irq, "riseedge");
+		break;
+	case IRQ_TYPE_EDGE_FALLING:	/* 0:1:0 */
+		au_writel(1 << bit, icr[5]);
+		au_writel(1 << bit, icr[1]);
+		au_writel(1 << bit, icr[3]);
+		set_irq_chip_and_handler_name(irq, chip,
+				handle_edge_irq, "falledge");
+		break;
+	case IRQ_TYPE_EDGE_BOTH:	/* 0:1:1 */
+		au_writel(1 << bit, icr[5]);
+		au_writel(1 << bit, icr[1]);
+		au_writel(1 << bit, icr[0]);
+		set_irq_chip_and_handler_name(irq, chip,
+				handle_edge_irq, "bothedge");
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:	/* 1:0:1 */
+		au_writel(1 << bit, icr[2]);
+		au_writel(1 << bit, icr[4]);
+		au_writel(1 << bit, icr[0]);
+		set_irq_chip_and_handler_name(irq, chip,
+				handle_level_irq, "hilevel");
+		break;
+	case IRQ_TYPE_LEVEL_LOW:	/* 1:1:0 */
+		au_writel(1 << bit, icr[2]);
+		au_writel(1 << bit, icr[1]);
+		au_writel(1 << bit, icr[3]);
+		set_irq_chip_and_handler_name(irq, chip,
+				handle_level_irq, "lowlevel");
+		break;
+	case IRQ_TYPE_NONE:		/* 0:0:0 */
+		au_writel(1 << bit, icr[5]);
+		au_writel(1 << bit, icr[4]);
+		au_writel(1 << bit, icr[3]);
+		/* set at least chip so we can call set_irq_type() on it */
+		set_irq_chip(irq, chip);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	au_sync();
+
+	return ret;
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+	unsigned int pending = read_c0_status() & read_c0_cause();
+	unsigned long s, off, bit;
+
+	if (pending & CAUSEF_IP7) {
+		do_IRQ(MIPS_CPU_IRQ_BASE + 7);
 		return;
+	} else if (pending & CAUSEF_IP2) {
+		s = IC0_REQ0INT;
+		off = AU1000_INTC0_INT_BASE;
+	} else if (pending & CAUSEF_IP3) {
+		s = IC0_REQ1INT;
+		off = AU1000_INTC0_INT_BASE;
+	} else if (pending & CAUSEF_IP4) {
+		s = IC1_REQ0INT;
+		off = AU1000_INTC1_INT_BASE;
+	} else if (pending & CAUSEF_IP5) {
+		s = IC1_REQ1INT;
+		off = AU1000_INTC1_INT_BASE;
+	} else
+		goto spurious;
 
+	bit = 0;
+	s = au_readl(s);
+	if (unlikely(!s)) {
+spurious:
+		spurious_interrupt();
+		return;
+	}
 #ifdef AU1000_USB_DEV_REQ_INT
 	/*
 	 * Because of the tight timing of SETUP token to reply
 	 * transactions, the USB devices-side packet complete
 	 * interrupt needs the highest priority.
 	 */
-	if ((intc0_req0 & (1 << AU1000_USB_DEV_REQ_INT))) {
-		intc0_req0 &= ~(1 << AU1000_USB_DEV_REQ_INT);
+	bit = 1 << (AU1000_USB_DEV_REQ_INT - AU1000_INTC0_INT_BASE);
+	if ((pending & CAUSEF_IP2) && (s & bit)) {
 		do_IRQ(AU1000_USB_DEV_REQ_INT);
 		return;
 	}
 #endif
-	bit = __ffs(intc0_req0);
-	intc0_req0 &= ~(1 << bit);
-	do_IRQ(AU1000_INTC0_INT_BASE + bit);
+	do_IRQ(__ffs(s) + off);
 }
 
-
-static void intc0_req1_irqdispatch(void)
+/* setup edge/level and assign request 0/1 */
+void __init au1xxx_setup_irqmap(struct au1xxx_irqmap *map, int count)
 {
-	static unsigned long intc0_req1;
-	unsigned int bit;
+	unsigned int bit, irq_nr;
 
-	intc0_req1 |= au_readl(IC0_REQ1INT);
+	while (count--) {
+		irq_nr = map[count].im_irq;
 
-	if (!intc0_req1)
-		return;
+		if (((irq_nr < AU1000_INTC0_INT_BASE) ||
+		     (irq_nr >= AU1000_INTC0_INT_BASE + 32)) &&
+		    ((irq_nr < AU1000_INTC1_INT_BASE) ||
+		     (irq_nr >= AU1000_INTC1_INT_BASE + 32)))
+			continue;
 
-	bit = __ffs(intc0_req1);
-	intc0_req1 &= ~(1 << bit);
-	do_IRQ(AU1000_INTC0_INT_BASE + bit);
-}
+		if (irq_nr >= AU1000_INTC1_INT_BASE) {
+			bit = irq_nr - AU1000_INTC1_INT_BASE;
+			if (map[count].im_request)
+				au_writel(1 << bit, IC1_ASSIGNCLR);
+		} else {
+			bit = irq_nr - AU1000_INTC0_INT_BASE;
+			if (map[count].im_request)
+				au_writel(1 << bit, IC0_ASSIGNCLR);
+		}
 
-
-/*
- * Interrupt Controller 1:
- * interrupts 32 - 63
- */
-static void intc1_req0_irqdispatch(void)
-{
-	static unsigned long intc1_req0;
-	unsigned int bit;
-
-	intc1_req0 |= au_readl(IC1_REQ0INT);
-
-	if (!intc1_req0)
-		return;
-
-	bit = __ffs(intc1_req0);
-	intc1_req0 &= ~(1 << bit);
-	do_IRQ(AU1000_INTC1_INT_BASE + bit);
-}
-
-
-static void intc1_req1_irqdispatch(void)
-{
-	static unsigned long intc1_req1;
-	unsigned int bit;
-
-	intc1_req1 |= au_readl(IC1_REQ1INT);
-
-	if (!intc1_req1)
-		return;
-
-	bit = __ffs(intc1_req1);
-	intc1_req1 &= ~(1 << bit);
-	do_IRQ(AU1000_INTC1_INT_BASE + bit);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
-	unsigned int pending = read_c0_status() & read_c0_cause();
-
-	if (pending & CAUSEF_IP7)
-		do_IRQ(MIPS_CPU_IRQ_BASE + 7);
-	else if (pending & CAUSEF_IP2)
-		intc0_req0_irqdispatch();
-	else if (pending & CAUSEF_IP3)
-		intc0_req1_irqdispatch();
-	else if (pending & CAUSEF_IP4)
-		intc1_req0_irqdispatch();
-	else if (pending  & CAUSEF_IP5)
-		intc1_req1_irqdispatch();
-	else
-		spurious_interrupt();
+		au1x_ic_settype(irq_nr, map[count].im_type);
+	}
 }
 
 void __init arch_init_irq(void)
 {
 	int i;
-	struct au1xxx_irqmap *imp;
-	extern struct au1xxx_irqmap au1xxx_irq_map[];
-	extern struct au1xxx_irqmap au1xxx_ic0_map[];
-	extern int au1xxx_nr_irqs;
-	extern int au1xxx_ic0_nr_irqs;
 
 	/*
 	 * Initialize interrupt controllers to a safe state.
@@ -569,28 +587,25 @@
 
 	mips_cpu_irq_init();
 
+	/* register all 64 possible IC0+IC1 irq sources as type "none".
+	 * Use set_irq_type() to set edge/level behaviour at runtime.
+	 */
+	for (i = AU1000_INTC0_INT_BASE;
+	     (i < AU1000_INTC0_INT_BASE + 32); i++)
+		au1x_ic_settype(i, IRQ_TYPE_NONE);
+
+	for (i = AU1000_INTC1_INT_BASE;
+	     (i < AU1000_INTC1_INT_BASE + 32); i++)
+		au1x_ic_settype(i, IRQ_TYPE_NONE);
+
 	/*
 	 * Initialize IC0, which is fixed per processor.
 	 */
-	imp = au1xxx_ic0_map;
-	for (i = 0; i < au1xxx_ic0_nr_irqs; i++) {
-		setup_local_irq(imp->im_irq, imp->im_type, imp->im_request);
-		imp++;
-	}
+	au1xxx_setup_irqmap(au1xxx_ic0_map, ARRAY_SIZE(au1xxx_ic0_map));
 
-	/*
-	 * Now set up the irq mapping for the board.
-	 */
-	imp = au1xxx_irq_map;
-	for (i = 0; i < au1xxx_nr_irqs; i++) {
-		setup_local_irq(imp->im_irq, imp->im_type, imp->im_request);
-		imp++;
-	}
-
-	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4);
-
-	/* Board specific IRQ initialization.
+	/* Boards can register additional (GPIO-based) IRQs.
 	*/
-	if (board_init_irq)
-		board_init_irq();
+	board_init_irq();
+
+	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3);
 }
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index bd854a6..6ab7b42 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -35,25 +35,12 @@
 #include <linux/jiffies.h>
 
 #include <asm/uaccess.h>
-#include <asm/cacheflush.h>
 #include <asm/mach-au1x00/au1000.h>
-
-#ifdef CONFIG_PM
-
-#define DEBUG 1
-#ifdef	DEBUG
-#define DPRINTK(fmt, args...)	printk(KERN_DEBUG "%s: " fmt, __func__, ## args)
-#else
-#define DPRINTK(fmt, args...)
+#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
 #endif
 
-static void au1000_calibrate_delay(void);
-
-extern unsigned long save_local_and_disable(int controller);
-extern void restore_local_and_enable(int controller, unsigned long mask);
-extern void local_enable_irq(unsigned int irq_nr);
-
-static DEFINE_SPINLOCK(pm_lock);
+#ifdef CONFIG_PM
 
 /*
  * We need to save/restore a bunch of core registers that are
@@ -65,29 +52,16 @@
  * We only have to save/restore registers that aren't otherwise
  * done as part of a driver pm_* function.
  */
-static unsigned int	sleep_aux_pll_cntrl;
-static unsigned int	sleep_cpu_pll_cntrl;
-static unsigned int	sleep_pin_function;
-static unsigned int	sleep_uart0_inten;
-static unsigned int	sleep_uart0_fifoctl;
-static unsigned int	sleep_uart0_linectl;
-static unsigned int	sleep_uart0_clkdiv;
-static unsigned int	sleep_uart0_enable;
-static unsigned int	sleep_usbhost_enable;
-static unsigned int	sleep_usbdev_enable;
-static unsigned int	sleep_static_memctlr[4][3];
+static unsigned int sleep_uart0_inten;
+static unsigned int sleep_uart0_fifoctl;
+static unsigned int sleep_uart0_linectl;
+static unsigned int sleep_uart0_clkdiv;
+static unsigned int sleep_uart0_enable;
+static unsigned int sleep_usb[2];
+static unsigned int sleep_sys_clocks[5];
+static unsigned int sleep_sys_pinfunc;
+static unsigned int sleep_static_memctlr[4][3];
 
-/*
- * Define this to cause the value you write to /proc/sys/pm/sleep to
- * set the TOY timer for the amount of time you want to sleep.
- * This is done mainly for testing, but may be useful in other cases.
- * The value is number of 32KHz ticks to sleep.
- */
-#define SLEEP_TEST_TIMEOUT 1
-#ifdef	SLEEP_TEST_TIMEOUT
-static int sleep_ticks;
-void wakeup_counter0_set(int ticks);
-#endif
 
 static void save_core_regs(void)
 {
@@ -105,31 +79,45 @@
 	sleep_uart0_linectl = au_readl(UART0_ADDR + UART_LCR);
 	sleep_uart0_clkdiv = au_readl(UART0_ADDR + UART_CLK);
 	sleep_uart0_enable = au_readl(UART0_ADDR + UART_MOD_CNTRL);
+	au_sync();
 
+#ifndef CONFIG_SOC_AU1200
 	/* Shutdown USB host/device. */
-	sleep_usbhost_enable = au_readl(USB_HOST_CONFIG);
+	sleep_usb[0] = au_readl(USB_HOST_CONFIG);
 
 	/* There appears to be some undocumented reset register.... */
-	au_writel(0, 0xb0100004); au_sync();
-	au_writel(0, USB_HOST_CONFIG); au_sync();
+	au_writel(0, 0xb0100004);
+	au_sync();
+	au_writel(0, USB_HOST_CONFIG);
+	au_sync();
 
-	sleep_usbdev_enable = au_readl(USBD_ENABLE);
-	au_writel(0, USBD_ENABLE); au_sync();
+	sleep_usb[1] = au_readl(USBD_ENABLE);
+	au_writel(0, USBD_ENABLE);
+	au_sync();
+
+#else	/* AU1200 */
+
+	/* enable access to OTG mmio so we can save OTG CAP/MUX.
+	 * FIXME: write an OTG driver and move this stuff there!
+	 */
+	au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4);
+	au_sync();
+	sleep_usb[0] = au_readl(0xb4020020);	/* OTG_CAP */
+	sleep_usb[1] = au_readl(0xb4020024);	/* OTG_MUX */
+#endif
 
 	/* Save interrupt controller state. */
 	save_au1xxx_intctl();
 
 	/* Clocks and PLLs. */
-	sleep_aux_pll_cntrl = au_readl(SYS_AUXPLL);
+	sleep_sys_clocks[0] = au_readl(SYS_FREQCTRL0);
+	sleep_sys_clocks[1] = au_readl(SYS_FREQCTRL1);
+	sleep_sys_clocks[2] = au_readl(SYS_CLKSRC);
+	sleep_sys_clocks[3] = au_readl(SYS_CPUPLL);
+	sleep_sys_clocks[4] = au_readl(SYS_AUXPLL);
 
-	/*
-	 * We don't really need to do this one, but unless we
-	 * write it again it won't have a valid value if we
-	 * happen to read it.
-	 */
-	sleep_cpu_pll_cntrl = au_readl(SYS_CPUPLL);
-
-	sleep_pin_function = au_readl(SYS_PINFUNC);
+	/* pin mux config */
+	sleep_sys_pinfunc = au_readl(SYS_PINFUNC);
 
 	/* Save the static memory controller configuration. */
 	sleep_static_memctlr[0][0] = au_readl(MEM_STCFG0);
@@ -144,16 +132,45 @@
 	sleep_static_memctlr[3][0] = au_readl(MEM_STCFG3);
 	sleep_static_memctlr[3][1] = au_readl(MEM_STTIME3);
 	sleep_static_memctlr[3][2] = au_readl(MEM_STADDR3);
+
+#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
+	au1xxx_dbdma_suspend();
+#endif
 }
 
 static void restore_core_regs(void)
 {
-	extern void restore_au1xxx_intctl(void);
-	extern void wakeup_counter0_adjust(void);
+	/* restore clock configuration.  Writing CPUPLL last will
+	 * stall a bit and stabilize other clocks (unless this is
+	 * one of those Au1000 with a write-only PLL, where we dont
+	 * have a valid value)
+	 */
+	au_writel(sleep_sys_clocks[0], SYS_FREQCTRL0);
+	au_writel(sleep_sys_clocks[1], SYS_FREQCTRL1);
+	au_writel(sleep_sys_clocks[2], SYS_CLKSRC);
+	au_writel(sleep_sys_clocks[4], SYS_AUXPLL);
+	if (!au1xxx_cpu_has_pll_wo())
+		au_writel(sleep_sys_clocks[3], SYS_CPUPLL);
+	au_sync();
 
-	au_writel(sleep_aux_pll_cntrl, SYS_AUXPLL); au_sync();
-	au_writel(sleep_cpu_pll_cntrl, SYS_CPUPLL); au_sync();
-	au_writel(sleep_pin_function, SYS_PINFUNC); au_sync();
+	au_writel(sleep_sys_pinfunc, SYS_PINFUNC);
+	au_sync();
+
+#ifndef CONFIG_SOC_AU1200
+	au_writel(sleep_usb[0], USB_HOST_CONFIG);
+	au_writel(sleep_usb[1], USBD_ENABLE);
+	au_sync();
+#else
+	/* enable accces to OTG memory */
+	au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4);
+	au_sync();
+
+	/* restore OTG caps and port mux. */
+	au_writel(sleep_usb[0], 0xb4020020 + 0);	/* OTG_CAP */
+	au_sync();
+	au_writel(sleep_usb[1], 0xb4020020 + 4);	/* OTG_MUX */
+	au_sync();
+#endif
 
 	/* Restore the static memory controller configuration. */
 	au_writel(sleep_static_memctlr[0][0], MEM_STCFG0);
@@ -184,282 +201,17 @@
 	}
 
 	restore_au1xxx_intctl();
-	wakeup_counter0_adjust();
+
+#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
+	au1xxx_dbdma_resume();
+#endif
 }
 
-unsigned long suspend_mode;
-
-void wakeup_from_suspend(void)
+void au_sleep(void)
 {
-	suspend_mode = 0;
-}
-
-int au_sleep(void)
-{
-	unsigned long wakeup, flags;
-	extern void save_and_sleep(void);
-
-	spin_lock_irqsave(&pm_lock, flags);
-
 	save_core_regs();
-
-	flush_cache_all();
-
-	/**
-	 ** The code below is all system dependent and we should probably
-	 ** have a function call out of here to set this up.  You need
-	 ** to configure the GPIO or timer interrupts that will bring
-	 ** you out of sleep.
-	 ** For testing, the TOY counter wakeup is useful.
-	 **/
-#if 0
-	au_writel(au_readl(SYS_PINSTATERD) & ~(1 << 11), SYS_PINSTATERD);
-
-	/* GPIO 6 can cause a wake up event */
-	wakeup = au_readl(SYS_WAKEMSK);
-	wakeup &= ~(1 << 8);	/* turn off match20 wakeup */
-	wakeup |= 1 << 6;	/* turn on  GPIO  6 wakeup */
-#else
-	/* For testing, allow match20 to wake us up. */
-#ifdef SLEEP_TEST_TIMEOUT
-	wakeup_counter0_set(sleep_ticks);
-#endif
-	wakeup = 1 << 8;	/* turn on match20 wakeup   */
-	wakeup = 0;
-#endif
-	au_writel(1, SYS_WAKESRC);	/* clear cause */
-	au_sync();
-	au_writel(wakeup, SYS_WAKEMSK);
-	au_sync();
-
-	save_and_sleep();
-
-	/*
-	 * After a wakeup, the cpu vectors back to 0x1fc00000, so
-	 * it's up to the boot code to get us back here.
-	 */
+	au1xxx_save_and_sleep();
 	restore_core_regs();
-	spin_unlock_irqrestore(&pm_lock, flags);
-	return 0;
 }
 
-static int pm_do_sleep(ctl_table *ctl, int write, struct file *file,
-		       void __user *buffer, size_t *len, loff_t *ppos)
-{
-#ifdef SLEEP_TEST_TIMEOUT
-#define TMPBUFLEN2 16
-	char buf[TMPBUFLEN2], *p;
-#endif
-
-	if (!write)
-		*len = 0;
-	else {
-#ifdef SLEEP_TEST_TIMEOUT
-		if (*len > TMPBUFLEN2 - 1)
-			return -EFAULT;
-		if (copy_from_user(buf, buffer, *len))
-			return -EFAULT;
-		buf[*len] = 0;
-		p = buf;
-		sleep_ticks = simple_strtoul(p, &p, 0);
-#endif
-
-		au_sleep();
-	}
-	return 0;
-}
-
-static int pm_do_freq(ctl_table *ctl, int write, struct file *file,
-		      void __user *buffer, size_t *len, loff_t *ppos)
-{
-	int retval = 0, i;
-	unsigned long val, pll;
-#define TMPBUFLEN 64
-#define MAX_CPU_FREQ 396
-	char buf[TMPBUFLEN], *p;
-	unsigned long flags, intc0_mask, intc1_mask;
-	unsigned long old_baud_base, old_cpu_freq, old_clk, old_refresh;
-	unsigned long new_baud_base, new_cpu_freq, new_clk, new_refresh;
-	unsigned long baud_rate;
-
-	spin_lock_irqsave(&pm_lock, flags);
-	if (!write)
-		*len = 0;
-	else {
-		/* Parse the new frequency */
-		if (*len > TMPBUFLEN - 1) {
-			spin_unlock_irqrestore(&pm_lock, flags);
-			return -EFAULT;
-		}
-		if (copy_from_user(buf, buffer, *len)) {
-			spin_unlock_irqrestore(&pm_lock, flags);
-			return -EFAULT;
-		}
-		buf[*len] = 0;
-		p = buf;
-		val = simple_strtoul(p, &p, 0);
-		if (val > MAX_CPU_FREQ) {
-			spin_unlock_irqrestore(&pm_lock, flags);
-			return -EFAULT;
-		}
-
-		pll = val / 12;
-		if ((pll > 33) || (pll < 7)) {	/* 396 MHz max, 84 MHz min */
-			/* Revisit this for higher speed CPUs */
-			spin_unlock_irqrestore(&pm_lock, flags);
-			return -EFAULT;
-		}
-
-		old_baud_base = get_au1x00_uart_baud_base();
-		old_cpu_freq = get_au1x00_speed();
-
-		new_cpu_freq = pll * 12 * 1000000;
-	        new_baud_base = (new_cpu_freq / (2 * ((int)(au_readl(SYS_POWERCTRL)
-							    & 0x03) + 2) * 16));
-		set_au1x00_speed(new_cpu_freq);
-		set_au1x00_uart_baud_base(new_baud_base);
-
-		old_refresh = au_readl(MEM_SDREFCFG) & 0x1ffffff;
-		new_refresh = ((old_refresh * new_cpu_freq) / old_cpu_freq) |
-			      (au_readl(MEM_SDREFCFG) & ~0x1ffffff);
-
-		au_writel(pll, SYS_CPUPLL);
-		au_sync_delay(1);
-		au_writel(new_refresh, MEM_SDREFCFG);
-		au_sync_delay(1);
-
-		for (i = 0; i < 4; i++)
-			if (au_readl(UART_BASE + UART_MOD_CNTRL +
-				     i * 0x00100000) == 3) {
-				old_clk = au_readl(UART_BASE + UART_CLK +
-						   i * 0x00100000);
-				baud_rate = old_baud_base / old_clk;
-				/*
-				 * We won't get an exact baud rate and the error
-				 * could be significant enough that our new
-				 * calculation will result in a clock that will
-				 * give us a baud rate that's too far off from
-				 * what we really want.
-				 */
-				if (baud_rate > 100000)
-					baud_rate = 115200;
-				else if (baud_rate > 50000)
-					baud_rate = 57600;
-				else if (baud_rate > 30000)
-					baud_rate = 38400;
-				else if (baud_rate > 17000)
-					baud_rate = 19200;
-				else
-					baud_rate = 9600;
-				new_clk = new_baud_base / baud_rate;
-				au_writel(new_clk, UART_BASE + UART_CLK +
-					  i * 0x00100000);
-				au_sync_delay(10);
-			}
-	}
-
-	/*
-	 * We don't want _any_ interrupts other than match20. Otherwise our
-	 * au1000_calibrate_delay() calculation will be off, potentially a lot.
-	 */
-	intc0_mask = save_local_and_disable(0);
-	intc1_mask = save_local_and_disable(1);
-	local_enable_irq(AU1000_TOY_MATCH2_INT);
-	spin_unlock_irqrestore(&pm_lock, flags);
-	au1000_calibrate_delay();
-	restore_local_and_enable(0, intc0_mask);
-	restore_local_and_enable(1, intc1_mask);
-
-	return retval;
-}
-
-
-static struct ctl_table pm_table[] = {
-	{
-		.ctl_name	= CTL_UNNUMBERED,
-		.procname	= "sleep",
-		.data		= NULL,
-		.maxlen		= 0,
-		.mode		= 0600,
-		.proc_handler	= &pm_do_sleep
-	},
-	{
-		.ctl_name	= CTL_UNNUMBERED,
-		.procname	= "freq",
-		.data		= NULL,
-		.maxlen		= 0,
-		.mode		= 0600,
-		.proc_handler	= &pm_do_freq
-	},
-	{}
-};
-
-static struct ctl_table pm_dir_table[] = {
-	{
-		.ctl_name	= CTL_UNNUMBERED,
-		.procname	= "pm",
-		.mode		= 0555,
-		.child		= pm_table
-	},
-	{}
-};
-
-/*
- * Initialize power interface
- */
-static int __init pm_init(void)
-{
-	register_sysctl_table(pm_dir_table);
-	return 0;
-}
-
-__initcall(pm_init);
-
-/*
- * This is right out of init/main.c
- */
-
-/*
- * This is the number of bits of precision for the loops_per_jiffy.
- * Each bit takes on average 1.5/HZ seconds.  This (like the original)
- * is a little better than 1%.
- */
-#define LPS_PREC 8
-
-static void au1000_calibrate_delay(void)
-{
-	unsigned long ticks, loopbit;
-	int lps_precision = LPS_PREC;
-
-	loops_per_jiffy = 1 << 12;
-
-	while (loops_per_jiffy <<= 1) {
-		/* Wait for "start of" clock tick */
-		ticks = jiffies;
-		while (ticks == jiffies)
-			/* nothing */ ;
-		/* Go ... */
-		ticks = jiffies;
-		__delay(loops_per_jiffy);
-		ticks = jiffies - ticks;
-		if (ticks)
-			break;
-	}
-
-	/*
-	 * Do a binary approximation to get loops_per_jiffy set to be equal
-	 * one clock (up to lps_precision bits)
-	 */
-	loops_per_jiffy >>= 1;
-	loopbit = loops_per_jiffy;
-	while (lps_precision-- && (loopbit >>= 1)) {
-		loops_per_jiffy |= loopbit;
-		ticks = jiffies;
-		while (ticks == jiffies);
-		ticks = jiffies;
-		__delay(loops_per_jiffy);
-		if (jiffies != ticks)	/* longer than 1 tick */
-			loops_per_jiffy &= ~loopbit;
-	}
-}
 #endif	/* CONFIG_PM */
diff --git a/arch/mips/alchemy/common/reset.c b/arch/mips/alchemy/common/reset.c
index d555429..0191c93 100644
--- a/arch/mips/alchemy/common/reset.c
+++ b/arch/mips/alchemy/common/reset.c
@@ -31,8 +31,6 @@
 
 #include <asm/mach-au1x00/au1000.h>
 
-extern int au_sleep(void);
-
 void au1000_restart(char *command)
 {
 	/* Set all integrated peripherals to disabled states */
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 1ac6b06..3f036b3 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -35,7 +35,6 @@
 #include <asm/time.h>
 
 #include <au1000.h>
-#include <prom.h>
 
 extern void __init board_setup(void);
 extern void au1000_restart(char *);
@@ -45,80 +44,34 @@
 
 void __init plat_mem_setup(void)
 {
-	struct	cpu_spec *sp;
-	char *argptr;
-	unsigned long prid, cpufreq, bclk;
+	unsigned long est_freq;
 
-	set_cpuspec();
-	sp = cur_cpu_spec[0];
+	/* determine core clock */
+	est_freq = au1xxx_calc_clock();
+	est_freq += 5000;    /* round */
+	est_freq -= est_freq % 10000;
+	printk(KERN_INFO "(PRId %08x) @ %lu.%02lu MHz\n", read_c0_prid(),
+	       est_freq / 1000000, ((est_freq % 1000000) * 100) / 1000000);
+
+	_machine_restart = au1000_restart;
+	_machine_halt = au1000_halt;
+	pm_power_off = au1000_power_off;
 
 	board_setup();  /* board specific setup */
 
-	prid = read_c0_prid();
-	if (sp->cpu_pll_wo)
-#ifdef CONFIG_SOC_AU1000_FREQUENCY
-		cpufreq = CONFIG_SOC_AU1000_FREQUENCY / 1000000;
-#else
-		cpufreq = 396;
-#endif
-	else
-		cpufreq = (au_readl(SYS_CPUPLL) & 0x3F) * 12;
-	printk(KERN_INFO "(PRID %08lx) @ %ld MHz\n", prid, cpufreq);
-
-	if (sp->cpu_bclk) {
-		/* Enable BCLK switching */
-		bclk = au_readl(SYS_POWERCTRL);
-		au_writel(bclk | 0x60, SYS_POWERCTRL);
-		printk(KERN_INFO "BCLK switching enabled!\n");
-	}
-
-	if (sp->cpu_od)
+	if (au1xxx_cpu_needs_config_od())
 		/* Various early Au1xx0 errata corrected by this */
 		set_c0_config(1 << 19); /* Set Config[OD] */
 	else
 		/* Clear to obtain best system bus performance */
 		clear_c0_config(1 << 19); /* Clear Config[OD] */
 
-	argptr = prom_getcmdline();
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-	argptr = strstr(argptr, "console=");
-	if (argptr == NULL) {
-		argptr = prom_getcmdline();
-		strcat(argptr, " console=ttyS0,115200");
-	}
-#endif
-
-#ifdef CONFIG_FB_AU1100
-	argptr = strstr(argptr, "video=");
-	if (argptr == NULL) {
-		argptr = prom_getcmdline();
-		/* default panel */
-		/*strcat(argptr, " video=au1100fb:panel:Sharp_320x240_16");*/
-	}
-#endif
-
-#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
-	/* au1000 does not support vra, au1500 and au1100 do */
-	strcat(argptr, " au1000_audio=vra");
-	argptr = prom_getcmdline();
-#endif
-	_machine_restart = au1000_restart;
-	_machine_halt = au1000_halt;
-	pm_power_off = au1000_power_off;
-
 	/* IO/MEM resources. */
 	set_io_port_base(0);
 	ioport_resource.start = IOPORT_RESOURCE_START;
 	ioport_resource.end = IOPORT_RESOURCE_END;
 	iomem_resource.start = IOMEM_RESOURCE_START;
 	iomem_resource.end = IOMEM_RESOURCE_END;
-
-	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_E0S);
-	au_writel(SYS_CNTRL_E0 | SYS_CNTRL_EN0, SYS_COUNTER_CNTRL);
-	au_sync();
-	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T0S);
-	au_writel(0, SYS_TOYTRIM);
 }
 
 #if defined(CONFIG_64BIT_PHYS_ADDR)
diff --git a/arch/mips/alchemy/common/sleeper.S b/arch/mips/alchemy/common/sleeper.S
index 3006e27..4f4b167 100644
--- a/arch/mips/alchemy/common/sleeper.S
+++ b/arch/mips/alchemy/common/sleeper.S
@@ -15,16 +15,17 @@
 #include <asm/regdef.h>
 #include <asm/stackframe.h>
 
+	.extern __flush_cache_all
+
 	.text
-	.set	macro
-	.set	noat
+	.set noreorder
+	.set noat
 	.align	5
 
 /* Save all of the processor general registers and go to sleep.
  * A wakeup condition will get us back here to restore the registers.
  */
-LEAF(save_and_sleep)
-
+LEAF(au1xxx_save_and_sleep)
 	subu	sp, PT_SIZE
 	sw	$1, PT_R1(sp)
 	sw	$2, PT_R2(sp)
@@ -33,14 +34,6 @@
 	sw	$5, PT_R5(sp)
 	sw	$6, PT_R6(sp)
 	sw	$7, PT_R7(sp)
-	sw	$8, PT_R8(sp)
-	sw	$9, PT_R9(sp)
-	sw	$10, PT_R10(sp)
-	sw	$11, PT_R11(sp)
-	sw	$12, PT_R12(sp)
-	sw	$13, PT_R13(sp)
-	sw	$14, PT_R14(sp)
-	sw	$15, PT_R15(sp)
 	sw	$16, PT_R16(sp)
 	sw	$17, PT_R17(sp)
 	sw	$18, PT_R18(sp)
@@ -49,12 +42,9 @@
 	sw	$21, PT_R21(sp)
 	sw	$22, PT_R22(sp)
 	sw	$23, PT_R23(sp)
-	sw	$24, PT_R24(sp)
-	sw	$25, PT_R25(sp)
 	sw	$26, PT_R26(sp)
 	sw	$27, PT_R27(sp)
 	sw	$28, PT_R28(sp)
-	sw	$29, PT_R29(sp)
 	sw	$30, PT_R30(sp)
 	sw	$31, PT_R31(sp)
 	mfc0	k0, CP0_STATUS
@@ -66,20 +56,26 @@
 	mfc0	k0, CP0_CONFIG
 	sw	k0, 0x14(sp)
 
+	/* flush caches to make sure context is in memory */
+	la	t1, __flush_cache_all
+	lw	t0, 0(t1)
+	jalr	t0
+	 nop
+
 	/* Now set up the scratch registers so the boot rom will
 	 * return to this point upon wakeup.
+	 * sys_scratch0 : SP
+	 * sys_scratch1 : RA
 	 */
-	la	k0, 1f
-	lui	k1, 0xb190
-	ori	k1, 0x18
-	sw	sp, 0(k1)
-	ori 	k1, 0x1c
-	sw	k0, 0(k1)
+	lui	t3, 0xb190		/* sys_xxx */
+	sw	sp, 0x0018(t3)
+	la	k0, 3f			/* resume path */
+	sw	k0, 0x001c(t3)
 
-/* Put SDRAM into self refresh.  Preload instructions into cache,
- * issue a precharge, then auto refresh, then sleep commands to it.
- */
-	la	t0, sdsleep
+	/* Put SDRAM into self refresh:  Preload instructions into cache,
+	 * issue a precharge, auto/self refresh, then sleep commands to it.
+	 */
+	la	t0, 1f
 	.set	mips3
 	cache	0x14, 0(t0)
 	cache	0x14, 32(t0)
@@ -87,24 +83,57 @@
 	cache	0x14, 96(t0)
 	.set	mips0
 
-sdsleep:
-	lui 	k0, 0xb400
-	sw	zero, 0x001c(k0)	/* Precharge */
-	sw	zero, 0x0020(k0)	/* Auto refresh */
-	sw	zero, 0x0030(k0)	/* SDRAM sleep */
+1:	lui 	a0, 0xb400		/* mem_xxx */
+#if defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100) ||	\
+    defined(CONFIG_SOC_AU1500)
+	sw	zero, 0x001c(a0) 	/* Precharge */
+	sync
+	sw	zero, 0x0020(a0)	/* Auto Refresh */
+	sync
+	sw	zero, 0x0030(a0)  	/* Sleep */
+	sync
+#endif
+
+#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
+	sw	zero, 0x08c0(a0) 	/* Precharge */
+	sync
+	sw	zero, 0x08d0(a0)	/* Self Refresh */
 	sync
 
-	lui 	k1, 0xb190
-	sw	zero, 0x0078(k1)	/* get ready  to sleep */
+	/* wait for sdram to enter self-refresh mode */
+	lui 	t0, 0x0100
+2:	lw 	t1, 0x0850(a0)		/* mem_sdstat */
+	and	t2, t1, t0
+	beq	t2, zero, 2b
+	 nop
+
+	/* disable SDRAM clocks */
+	lui	t0, 0xcfff
+	ori	t0, t0, 0xffff
+	lw 	t1, 0x0840(a0)		/* mem_sdconfiga */
+	and 	t1, t0, t1		/* clear CE[1:0] */
+	sw 	t1, 0x0840(a0)		/* mem_sdconfiga */
 	sync
-	sw	zero, 0x007c(k1)	/* Put processor to sleep */
+#endif
+
+	/* put power supply and processor to sleep */
+	sw	zero, 0x0078(t3)	/* sys_slppwr */
 	sync
+	sw	zero, 0x007c(t3)	/* sys_sleep */
+	sync
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
 
 	/* This is where we return upon wakeup.
 	 * Reload all of the registers and return.
 	 */
-1:	nop
-	lw	k0, 0x20(sp)
+3:	lw	k0, 0x20(sp)
 	mtc0	k0, CP0_STATUS
 	lw	k0, 0x1c(sp)
 	mtc0	k0, CP0_CONTEXT
@@ -113,10 +142,11 @@
 	lw	k0, 0x14(sp)
 	mtc0	k0, CP0_CONFIG
 
-	/* We need to catch the ealry Alchemy SOCs with
+	/* We need to catch the early Alchemy SOCs with
 	 * the write-only Config[OD] bit and set it back to one...
 	 */
 	jal	au1x00_fixup_config_od
+	 nop
 	lw	$1, PT_R1(sp)
 	lw	$2, PT_R2(sp)
 	lw	$3, PT_R3(sp)
@@ -124,14 +154,6 @@
 	lw	$5, PT_R5(sp)
 	lw	$6, PT_R6(sp)
 	lw	$7, PT_R7(sp)
-	lw	$8, PT_R8(sp)
-	lw	$9, PT_R9(sp)
-	lw	$10, PT_R10(sp)
-	lw	$11, PT_R11(sp)
-	lw	$12, PT_R12(sp)
-	lw	$13, PT_R13(sp)
-	lw	$14, PT_R14(sp)
-	lw	$15, PT_R15(sp)
 	lw	$16, PT_R16(sp)
 	lw	$17, PT_R17(sp)
 	lw	$18, PT_R18(sp)
@@ -140,15 +162,11 @@
 	lw	$21, PT_R21(sp)
 	lw	$22, PT_R22(sp)
 	lw	$23, PT_R23(sp)
-	lw	$24, PT_R24(sp)
-	lw	$25, PT_R25(sp)
 	lw	$26, PT_R26(sp)
 	lw	$27, PT_R27(sp)
 	lw	$28, PT_R28(sp)
-	lw	$29, PT_R29(sp)
 	lw	$30, PT_R30(sp)
 	lw	$31, PT_R31(sp)
-	addiu	sp, PT_SIZE
-
 	jr	ra
-END(save_and_sleep)
+	 addiu	sp, PT_SIZE
+END(au1xxx_save_and_sleep)
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 563d939..3288014 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -1,5 +1,7 @@
 /*
+ * Copyright (C) 2008 Manuel Lauss <mano@roarinelk.homelinux.net>
  *
+ * Previous incarnations were:
  * Copyright (C) 2001, 2006, 2008 MontaVista Software, <source@mvista.com>
  * Copied and modified Carsten Langgaard's time.c
  *
@@ -23,244 +25,141 @@
  *
  * ########################################################################
  *
- * Setting up the clock on the MIPS boards.
- *
- * We provide the clock interrupt processing and the timer offset compute
- * functions.  If CONFIG_PM is selected, we also ensure the 32KHz timer is
- * available.  -- Dan
+ * Clocksource/event using the 32.768kHz-clocked Counter1 ('RTC' in the
+ * databooks).  Firmware/Board init code must enable the counters in the
+ * counter control register, otherwise the CP0 counter clocksource/event
+ * will be installed instead (and use of 'wait' instruction is prohibited).
  */
 
-#include <linux/types.h>
-#include <linux/init.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
 #include <linux/spinlock.h>
 
-#include <asm/mipsregs.h>
 #include <asm/time.h>
 #include <asm/mach-au1x00/au1000.h>
 
-static int no_au1xxx_32khz;
+/* 32kHz clock enabled and detected */
+#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
+
 extern int allow_au1k_wait; /* default off for CP0 Counter */
 
-#ifdef CONFIG_PM
-#if HZ < 100 || HZ > 1000
-#error "unsupported HZ value! Must be in [100,1000]"
-#endif
-#define MATCH20_INC (328 * 100 / HZ) /* magic number 328 is for HZ=100... */
-static unsigned long last_pc0, last_match20;
-#endif
-
-static DEFINE_SPINLOCK(time_lock);
-
-unsigned long wtimer;
-
-#ifdef CONFIG_PM
-static irqreturn_t counter0_irq(int irq, void *dev_id)
+static cycle_t au1x_counter1_read(void)
 {
-	unsigned long pc0;
-	int time_elapsed;
-	static int jiffie_drift;
+	return au_readl(SYS_RTCREAD);
+}
 
-	if (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20) {
-		/* should never happen! */
-		printk(KERN_WARNING "counter 0 w status error\n");
-		return IRQ_NONE;
-	}
+static struct clocksource au1x_counter1_clocksource = {
+	.name		= "alchemy-counter1",
+	.read		= au1x_counter1_read,
+	.mask		= CLOCKSOURCE_MASK(32),
+	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+	.rating		= 100,
+};
 
-	pc0 = au_readl(SYS_TOYREAD);
-	if (pc0 < last_match20)
-		/* counter overflowed */
-		time_elapsed = (0xffffffff - last_match20) + pc0;
-	else
-		time_elapsed = pc0 - last_match20;
-
-	while (time_elapsed > 0) {
-		do_timer(1);
-#ifndef CONFIG_SMP
-		update_process_times(user_mode(get_irq_regs()));
-#endif
-		time_elapsed -= MATCH20_INC;
-		last_match20 += MATCH20_INC;
-		jiffie_drift++;
-	}
-
-	last_pc0 = pc0;
-	au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
+static int au1x_rtcmatch2_set_next_event(unsigned long delta,
+					 struct clock_event_device *cd)
+{
+	delta += au_readl(SYS_RTCREAD);
+	/* wait for register access */
+	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M21)
+		;
+	au_writel(delta, SYS_RTCMATCH2);
 	au_sync();
 
-	/*
-	 * Our counter ticks at 10.009765625 ms/tick, we we're running
-	 * almost 10 uS too slow per tick.
-	 */
+	return 0;
+}
 
-	if (jiffie_drift >= 999) {
-		jiffie_drift -= 999;
-		do_timer(1); /* increment jiffies by one */
-#ifndef CONFIG_SMP
-		update_process_times(user_mode(get_irq_regs()));
-#endif
-	}
+static void au1x_rtcmatch2_set_mode(enum clock_event_mode mode,
+				    struct clock_event_device *cd)
+{
+}
 
+static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
+{
+	struct clock_event_device *cd = dev_id;
+	cd->event_handler(cd);
 	return IRQ_HANDLED;
 }
 
-struct irqaction counter0_action = {
-	.handler	= counter0_irq,
-	.flags		= IRQF_DISABLED,
-	.name		= "alchemy-toy",
-	.dev_id		= NULL,
+static struct clock_event_device au1x_rtcmatch2_clockdev = {
+	.name		= "rtcmatch2",
+	.features	= CLOCK_EVT_FEAT_ONESHOT,
+	.rating		= 100,
+	.irq		= AU1000_RTC_MATCH2_INT,
+	.set_next_event	= au1x_rtcmatch2_set_next_event,
+	.set_mode	= au1x_rtcmatch2_set_mode,
+	.cpumask	= CPU_MASK_ALL,
 };
 
-/* When we wakeup from sleep, we have to "catch up" on all of the
- * timer ticks we have missed.
- */
-void wakeup_counter0_adjust(void)
-{
-	unsigned long pc0;
-	int time_elapsed;
-
-	pc0 = au_readl(SYS_TOYREAD);
-	if (pc0 < last_match20)
-		/* counter overflowed */
-		time_elapsed = (0xffffffff - last_match20) + pc0;
-	else
-		time_elapsed = pc0 - last_match20;
-
-	while (time_elapsed > 0) {
-		time_elapsed -= MATCH20_INC;
-		last_match20 += MATCH20_INC;
-	}
-
-	last_pc0 = pc0;
-	au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
-	au_sync();
-
-}
-
-/* This is just for debugging to set the timer for a sleep delay. */
-void wakeup_counter0_set(int ticks)
-{
-	unsigned long pc0;
-
-	pc0 = au_readl(SYS_TOYREAD);
-	last_pc0 = pc0;
-	au_writel(last_match20 + (MATCH20_INC * ticks), SYS_TOYMATCH2);
-	au_sync();
-}
-#endif
-
-/*
- * I haven't found anyone that doesn't use a 12 MHz source clock,
- * but just in case.....
- */
-#define AU1000_SRC_CLK	12000000
-
-/*
- * We read the real processor speed from the PLL.  This is important
- * because it is more accurate than computing it from the 32 KHz
- * counter, if it exists.  If we don't have an accurate processor
- * speed, all of the peripherals that derive their clocks based on
- * this advertised speed will introduce error and sometimes not work
- * properly.  This function is futher convoluted to still allow configurations
- * to do that in case they have really, really old silicon with a
- * write-only PLL register, that we need the 32 KHz when power management
- * "wait" is enabled, and we need to detect if the 32 KHz isn't present
- * but requested......got it? :-)		-- Dan
- */
-unsigned long calc_clock(void)
-{
-	unsigned long cpu_speed;
-	unsigned long flags;
-	unsigned long counter;
-
-	spin_lock_irqsave(&time_lock, flags);
-
-	/* Power management cares if we don't have a 32 KHz counter. */
-	no_au1xxx_32khz = 0;
-	counter = au_readl(SYS_COUNTER_CNTRL);
-	if (counter & SYS_CNTRL_E0) {
-		int trim_divide = 16;
-
-		au_writel(counter | SYS_CNTRL_EN1, SYS_COUNTER_CNTRL);
-
-		while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S);
-		/* RTC now ticks at 32.768/16 kHz */
-		au_writel(trim_divide - 1, SYS_RTCTRIM);
-		while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S);
-
-		while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S);
-		au_writel(0, SYS_TOYWRITE);
-		while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S);
-	} else
-		no_au1xxx_32khz = 1;
-
-	/*
-	 * On early Au1000, sys_cpupll was write-only. Since these
-	 * silicon versions of Au1000 are not sold by AMD, we don't bend
-	 * over backwards trying to determine the frequency.
-	 */
-	if (cur_cpu_spec[0]->cpu_pll_wo)
-#ifdef CONFIG_SOC_AU1000_FREQUENCY
-		cpu_speed = CONFIG_SOC_AU1000_FREQUENCY;
-#else
-		cpu_speed = 396000000;
-#endif
-	else
-		cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK;
-	/* On Alchemy CPU:counter ratio is 1:1 */
-	mips_hpt_frequency = cpu_speed;
-	/* Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) */
-	set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL)
-							  & 0x03) + 2) * 16));
-	spin_unlock_irqrestore(&time_lock, flags);
-	return cpu_speed;
-}
+static struct irqaction au1x_rtcmatch2_irqaction = {
+	.handler	= au1x_rtcmatch2_irq,
+	.flags		= IRQF_DISABLED | IRQF_TIMER,
+	.name		= "timer",
+	.dev_id		= &au1x_rtcmatch2_clockdev,
+};
 
 void __init plat_time_init(void)
 {
-	unsigned int est_freq = calc_clock();
+	struct clock_event_device *cd = &au1x_rtcmatch2_clockdev;
+	unsigned long t;
 
-	est_freq += 5000;    /* round */
-	est_freq -= est_freq%10000;
-	printk(KERN_INFO "CPU frequency %u.%02u MHz\n",
-	       est_freq / 1000000, ((est_freq % 1000000) * 100) / 1000000);
-	set_au1x00_speed(est_freq);
-	set_au1x00_lcd_clock(); /* program the LCD clock */
-
-#ifdef CONFIG_PM
-	/*
-	 * setup counter 0, since it keeps ticking after a
-	 * 'wait' instruction has been executed. The CP0 timer and
-	 * counter 1 do NOT continue running after 'wait'
-	 *
-	 * It's too early to call request_irq() here, so we handle
-	 * counter 0 interrupt as a special irq and it doesn't show
-	 * up under /proc/interrupts.
-	 *
-	 * Check to ensure we really have a 32 KHz oscillator before
-	 * we do this.
+	/* Check if firmware (YAMON, ...) has enabled 32kHz and clock
+	 * has been detected.  If so install the rtcmatch2 clocksource,
+	 * otherwise don't bother.  Note that both bits being set is by
+	 * no means a definite guarantee that the counters actually work
+	 * (the 32S bit seems to be stuck set to 1 once a single clock-
+	 * edge is detected, hence the timeouts).
 	 */
-	if (no_au1xxx_32khz)
-		printk(KERN_WARNING "WARNING: no 32KHz clock found.\n");
-	else {
-		while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S);
-		au_writel(0, SYS_TOYWRITE);
-		while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S);
+	if (CNTR_OK != (au_readl(SYS_COUNTER_CNTRL) & CNTR_OK))
+		goto cntr_err;
 
-		au_writel(au_readl(SYS_WAKEMSK) | (1 << 8), SYS_WAKEMSK);
-		au_writel(~0, SYS_WAKESRC);
-		au_sync();
-		while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20);
+	/*
+	 * setup counter 1 (RTC) to tick at full speed
+	 */
+	t = 0xffffff;
+	while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && t--)
+		asm volatile ("nop");
+	if (!t)
+		goto cntr_err;
 
-		/* Setup match20 to interrupt once every HZ */
-		last_pc0 = last_match20 = au_readl(SYS_TOYREAD);
-		au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
-		au_sync();
-		while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20);
-		setup_irq(AU1000_TOY_MATCH2_INT, &counter0_action);
+	au_writel(0, SYS_RTCTRIM);	/* 32.768 kHz */
+	au_sync();
 
-		/* We can use the real 'wait' instruction. */
-		allow_au1k_wait = 1;
-	}
+	t = 0xffffff;
+	while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--)
+		asm volatile ("nop");
+	if (!t)
+		goto cntr_err;
+	au_writel(0, SYS_RTCWRITE);
+	au_sync();
 
-#endif
+	t = 0xffffff;
+	while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--)
+		asm volatile ("nop");
+	if (!t)
+		goto cntr_err;
+
+	/* register counter1 clocksource and event device */
+	clocksource_set_clock(&au1x_counter1_clocksource, 32768);
+	clocksource_register(&au1x_counter1_clocksource);
+
+	cd->shift = 32;
+	cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
+	cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
+	cd->min_delta_ns = clockevent_delta2ns(8, cd);	/* ~0.25ms */
+	clockevents_register_device(cd);
+	setup_irq(AU1000_RTC_MATCH2_INT, &au1x_rtcmatch2_irqaction);
+
+	printk(KERN_INFO "Alchemy clocksource installed\n");
+
+	/* can now use 'wait' */
+	allow_au1k_wait = 1;
+	return;
+
+cntr_err:
+	/* counters unusable, use C0 counter */
+	r4k_clockevent_init();
+	init_r4k_clocksource();
+	allow_au1k_wait = 0;
 }
diff --git a/arch/mips/alchemy/db1x00/Makefile b/arch/mips/alchemy/db1x00/Makefile
deleted file mode 100644
index 274db3b..0000000
--- a/arch/mips/alchemy/db1x00/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-#  Copyright 2000, 2008 MontaVista Software Inc.
-#  Author: MontaVista Software, Inc. <source@mvista.com>
-#
-# Makefile for the Alchemy Semiconductor DBAu1xx0 boards.
-#
-
-lib-y := init.o board_setup.o irqmap.o
diff --git a/arch/mips/alchemy/db1x00/board_setup.c b/arch/mips/alchemy/db1x00/board_setup.c
deleted file mode 100644
index 9e5ccbb..0000000
--- a/arch/mips/alchemy/db1x00/board_setup.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *	Alchemy Db1x00 board setup.
- *
- * Copyright 2000, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-db1x00/db1x00.h>
-
-static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
-
-void board_reset(void)
-{
-	/* Hit BCSR.SW_RESET[RESET] */
-	bcsr->swreset = 0x0000;
-}
-
-void __init board_setup(void)
-{
-	u32 pin_func = 0;
-
-	/* Not valid for Au1550 */
-#if defined(CONFIG_IRDA) && \
-   (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100))
-	/* Set IRFIRSEL instead of GPIO15 */
-	pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF;
-	au_writel(pin_func, SYS_PINFUNC);
-	/* Power off until the driver is in use */
-	bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK;
-	bcsr->resets |=  BCSR_RESETS_IRDA_MODE_OFF;
-	au_sync();
-#endif
-	bcsr->pcmcia = 0x0000; /* turn off PCMCIA power */
-
-#ifdef CONFIG_MIPS_MIRAGE
-	/* Enable GPIO[31:0] inputs */
-	au_writel(0, SYS_PININPUTEN);
-
-	/* GPIO[20] is output, tristate the other input primary GPIOs */
-	au_writel(~(1 << 20), SYS_TRIOUTCLR);
-
-	/* Set GPIO[210:208] instead of SSI_0 */
-	pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0;
-
-	/* Set GPIO[215:211] for LEDs */
-	pin_func |= 5 << 2;
-
-	/* Set GPIO[214:213] for more LEDs */
-	pin_func |= 5 << 12;
-
-	/* Set GPIO[207:200] instead of PCMCIA/LCD */
-	pin_func |= SYS_PF_LCD | SYS_PF_PC;
-	au_writel(pin_func, SYS_PINFUNC);
-
-	/*
-	 * Enable speaker amplifier.  This should
-	 * be part of the audio driver.
-	 */
-	au_writel(au_readl(GPIO2_DIR) | 0x200, GPIO2_DIR);
-	au_writel(0x02000200, GPIO2_OUTPUT);
-#endif
-
-	au_sync();
-
-#ifdef CONFIG_MIPS_DB1000
-	printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n");
-#endif
-#ifdef CONFIG_MIPS_DB1500
-	printk(KERN_INFO "AMD Alchemy Au1500/Db1500 Board\n");
-#endif
-#ifdef CONFIG_MIPS_DB1100
-	printk(KERN_INFO "AMD Alchemy Au1100/Db1100 Board\n");
-#endif
-#ifdef CONFIG_MIPS_BOSPORUS
-	printk(KERN_INFO "AMD Alchemy Bosporus Board\n");
-#endif
-#ifdef CONFIG_MIPS_MIRAGE
-	printk(KERN_INFO "AMD Alchemy Mirage Board\n");
-#endif
-#ifdef CONFIG_MIPS_DB1550
-	printk(KERN_INFO "AMD Alchemy Au1550/Db1550 Board\n");
-#endif
-}
diff --git a/arch/mips/alchemy/db1x00/init.c b/arch/mips/alchemy/db1x00/init.c
deleted file mode 100644
index 8474135..0000000
--- a/arch/mips/alchemy/db1x00/init.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- *	PB1000 board setup
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-
-#include <prom.h>
-
-const char *get_system_type(void)
-{
-#ifdef CONFIG_MIPS_BOSPORUS
-	return "Alchemy Bosporus Gateway Reference";
-#else
-	return "Alchemy Db1x00";
-#endif
-}
-
-void __init prom_init(void)
-{
-	unsigned char *memsize_str;
-	unsigned long memsize;
-
-	prom_argc = fw_arg0;
-	prom_argv = (char **)fw_arg1;
-	prom_envp = (char **)fw_arg2;
-
-	prom_init_cmdline();
-
-	memsize_str = prom_getenv("memsize");
-	if (!memsize_str)
-		memsize = 0x04000000;
-	else
-		strict_strtol(memsize_str, 0, &memsize);
-	add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/alchemy/db1x00/irqmap.c b/arch/mips/alchemy/db1x00/irqmap.c
deleted file mode 100644
index 94c090e..0000000
--- a/arch/mips/alchemy/db1x00/irqmap.c
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- *	Au1xxx irq map table
- *
- * Copyright 2003 Embedded Edge, LLC
- *		dan@embeddededge.com
- *
- *  This program is free software; you can redistribute	 it and/or modify it
- *  under  the terms of	 the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the	License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED	  ``AS	IS'' AND   ANY	EXPRESS OR IMPLIED
- *  WARRANTIES,	  INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO	EVENT  SHALL   THE AUTHOR  BE	 LIABLE FOR ANY	  DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED	  TO, PROCUREMENT OF  SUBSTITUTE GOODS	OR SERVICES; LOSS OF
- *  USE, DATA,	OR PROFITS; OR	BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN	 CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-#ifdef CONFIG_MIPS_DB1500
-char irq_tab_alchemy[][5] __initdata = {
-	[12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - HPT371   */
-	[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */
-};
-#endif
-
-#ifdef CONFIG_MIPS_BOSPORUS
-char irq_tab_alchemy[][5] __initdata = {
-	[11] = { -1, INTA, INTB, INTX, INTX }, /* IDSEL 11 - miniPCI  */
-	[12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - SN1741   */
-	[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */
-};
-#endif
-
-#ifdef CONFIG_MIPS_MIRAGE
-char irq_tab_alchemy[][5] __initdata = {
-	[11] = { -1, INTD, INTX, INTX, INTX }, /* IDSEL 11 - SMI VGX */
-	[12] = { -1, INTX, INTX, INTC, INTX }, /* IDSEL 12 - PNX1300 */
-	[13] = { -1, INTA, INTB, INTX, INTX }, /* IDSEL 13 - miniPCI */
-};
-#endif
-
-#ifdef CONFIG_MIPS_DB1550
-char irq_tab_alchemy[][5] __initdata = {
-	[11] = { -1, INTC, INTX, INTX, INTX }, /* IDSEL 11 - on-board HPT371 */
-	[12] = { -1, INTB, INTC, INTD, INTA }, /* IDSEL 12 - PCI slot 2 (left) */
-	[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot 1 (right) */
-};
-#endif
-
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
-
-#ifndef CONFIG_MIPS_MIRAGE
-#ifdef CONFIG_MIPS_DB1550
-	{ AU1000_GPIO_3, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 IRQ# */
-	{ AU1000_GPIO_5, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 IRQ# */
-#else
-	{ AU1000_GPIO_0, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 Fully_Interted# */
-	{ AU1000_GPIO_1, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 STSCHG# */
-	{ AU1000_GPIO_2, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 IRQ# */
-
-	{ AU1000_GPIO_3, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 Fully_Interted# */
-	{ AU1000_GPIO_4, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 STSCHG# */
-	{ AU1000_GPIO_5, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 IRQ# */
-#endif
-#else
-	{ AU1000_GPIO_7, INTC_INT_RISE_EDGE, 0 }, /* touchscreen pen down */
-#endif
-
-};
-
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
diff --git a/arch/mips/alchemy/devboards/Makefile b/arch/mips/alchemy/devboards/Makefile
new file mode 100644
index 0000000..730f9f2
--- /dev/null
+++ b/arch/mips/alchemy/devboards/Makefile
@@ -0,0 +1,18 @@
+#
+# Alchemy Develboards
+#
+
+obj-y += prom.o
+obj-$(CONFIG_PM)		+= pm.o
+obj-$(CONFIG_MIPS_PB1000)	+= pb1000/
+obj-$(CONFIG_MIPS_PB1100)	+= pb1100/
+obj-$(CONFIG_MIPS_PB1200)	+= pb1200/
+obj-$(CONFIG_MIPS_PB1500)	+= pb1500/
+obj-$(CONFIG_MIPS_PB1550)	+= pb1550/
+obj-$(CONFIG_MIPS_DB1000)	+= db1x00/
+obj-$(CONFIG_MIPS_DB1100)	+= db1x00/
+obj-$(CONFIG_MIPS_DB1200)	+= pb1200/
+obj-$(CONFIG_MIPS_DB1500)	+= db1x00/
+obj-$(CONFIG_MIPS_DB1550)	+= db1x00/
+obj-$(CONFIG_MIPS_BOSPORUS)	+= db1x00/
+obj-$(CONFIG_MIPS_MIRAGE)	+= db1x00/
diff --git a/arch/mips/alchemy/devboards/db1x00/Makefile b/arch/mips/alchemy/devboards/db1x00/Makefile
new file mode 100644
index 0000000..432241a
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1x00/Makefile
@@ -0,0 +1,8 @@
+#
+#  Copyright 2000, 2008 MontaVista Software Inc.
+#  Author: MontaVista Software, Inc. <source@mvista.com>
+#
+# Makefile for the Alchemy Semiconductor DBAu1xx0 boards.
+#
+
+obj-y := board_setup.o irqmap.o
diff --git a/arch/mips/alchemy/devboards/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c
new file mode 100644
index 0000000..a75ffbf
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c
@@ -0,0 +1,145 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ *	Alchemy Db1x00 board setup.
+ *
+ * Copyright 2000, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-db1x00/db1x00.h>
+
+#include <prom.h>
+
+
+static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
+
+const char *get_system_type(void)
+{
+#ifdef CONFIG_MIPS_BOSPORUS
+	return "Alchemy Bosporus Gateway Reference";
+#else
+	return "Alchemy Db1x00";
+#endif
+}
+
+void board_reset(void)
+{
+	/* Hit BCSR.SW_RESET[RESET] */
+	bcsr->swreset = 0x0000;
+}
+
+void __init board_setup(void)
+{
+	u32 pin_func = 0;
+	char *argptr;
+
+	argptr = prom_getcmdline();
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+	argptr = strstr(argptr, "console=");
+	if (argptr == NULL) {
+		argptr = prom_getcmdline();
+		strcat(argptr, " console=ttyS0,115200");
+	}
+#endif
+
+#ifdef CONFIG_FB_AU1100
+	argptr = strstr(argptr, "video=");
+	if (argptr == NULL) {
+		argptr = prom_getcmdline();
+		/* default panel */
+		/*strcat(argptr, " video=au1100fb:panel:Sharp_320x240_16");*/
+	}
+#endif
+
+#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
+	/* au1000 does not support vra, au1500 and au1100 do */
+	strcat(argptr, " au1000_audio=vra");
+	argptr = prom_getcmdline();
+#endif
+
+	/* Not valid for Au1550 */
+#if defined(CONFIG_IRDA) && \
+   (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100))
+	/* Set IRFIRSEL instead of GPIO15 */
+	pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF;
+	au_writel(pin_func, SYS_PINFUNC);
+	/* Power off until the driver is in use */
+	bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK;
+	bcsr->resets |=  BCSR_RESETS_IRDA_MODE_OFF;
+	au_sync();
+#endif
+	bcsr->pcmcia = 0x0000; /* turn off PCMCIA power */
+
+#ifdef CONFIG_MIPS_MIRAGE
+	/* Enable GPIO[31:0] inputs */
+	au_writel(0, SYS_PININPUTEN);
+
+	/* GPIO[20] is output, tristate the other input primary GPIOs */
+	au_writel(~(1 << 20), SYS_TRIOUTCLR);
+
+	/* Set GPIO[210:208] instead of SSI_0 */
+	pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0;
+
+	/* Set GPIO[215:211] for LEDs */
+	pin_func |= 5 << 2;
+
+	/* Set GPIO[214:213] for more LEDs */
+	pin_func |= 5 << 12;
+
+	/* Set GPIO[207:200] instead of PCMCIA/LCD */
+	pin_func |= SYS_PF_LCD | SYS_PF_PC;
+	au_writel(pin_func, SYS_PINFUNC);
+
+	/*
+	 * Enable speaker amplifier.  This should
+	 * be part of the audio driver.
+	 */
+	au_writel(au_readl(GPIO2_DIR) | 0x200, GPIO2_DIR);
+	au_writel(0x02000200, GPIO2_OUTPUT);
+#endif
+
+	au_sync();
+
+#ifdef CONFIG_MIPS_DB1000
+	printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n");
+#endif
+#ifdef CONFIG_MIPS_DB1500
+	printk(KERN_INFO "AMD Alchemy Au1500/Db1500 Board\n");
+#endif
+#ifdef CONFIG_MIPS_DB1100
+	printk(KERN_INFO "AMD Alchemy Au1100/Db1100 Board\n");
+#endif
+#ifdef CONFIG_MIPS_BOSPORUS
+	printk(KERN_INFO "AMD Alchemy Bosporus Board\n");
+#endif
+#ifdef CONFIG_MIPS_MIRAGE
+	printk(KERN_INFO "AMD Alchemy Mirage Board\n");
+#endif
+#ifdef CONFIG_MIPS_DB1550
+	printk(KERN_INFO "AMD Alchemy Au1550/Db1550 Board\n");
+#endif
+}
diff --git a/arch/mips/alchemy/devboards/db1x00/irqmap.c b/arch/mips/alchemy/devboards/db1x00/irqmap.c
new file mode 100644
index 0000000..0b09025
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1x00/irqmap.c
@@ -0,0 +1,90 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ *	Au1xxx irq map table
+ *
+ * Copyright 2003 Embedded Edge, LLC
+ *		dan@embeddededge.com
+ *
+ *  This program is free software; you can redistribute	 it and/or modify it
+ *  under  the terms of	 the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the	License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED	  ``AS	IS'' AND   ANY	EXPRESS OR IMPLIED
+ *  WARRANTIES,	  INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO	EVENT  SHALL   THE AUTHOR  BE	 LIABLE FOR ANY	  DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED	  TO, PROCUREMENT OF  SUBSTITUTE GOODS	OR SERVICES; LOSS OF
+ *  USE, DATA,	OR PROFITS; OR	BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN	 CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-au1x00/au1000.h>
+
+#ifdef CONFIG_MIPS_DB1500
+char irq_tab_alchemy[][5] __initdata = {
+	[12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - HPT371   */
+	[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */
+};
+#endif
+
+#ifdef CONFIG_MIPS_BOSPORUS
+char irq_tab_alchemy[][5] __initdata = {
+	[11] = { -1, INTA, INTB, INTX, INTX }, /* IDSEL 11 - miniPCI  */
+	[12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - SN1741   */
+	[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */
+};
+#endif
+
+#ifdef CONFIG_MIPS_MIRAGE
+char irq_tab_alchemy[][5] __initdata = {
+	[11] = { -1, INTD, INTX, INTX, INTX }, /* IDSEL 11 - SMI VGX */
+	[12] = { -1, INTX, INTX, INTC, INTX }, /* IDSEL 12 - PNX1300 */
+	[13] = { -1, INTA, INTB, INTX, INTX }, /* IDSEL 13 - miniPCI */
+};
+#endif
+
+#ifdef CONFIG_MIPS_DB1550
+char irq_tab_alchemy[][5] __initdata = {
+	[11] = { -1, INTC, INTX, INTX, INTX }, /* IDSEL 11 - on-board HPT371 */
+	[12] = { -1, INTB, INTC, INTD, INTA }, /* IDSEL 12 - PCI slot 2 (left) */
+	[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot 1 (right) */
+};
+#endif
+
+
+struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
+
+#ifndef CONFIG_MIPS_MIRAGE
+#ifdef CONFIG_MIPS_DB1550
+	{ AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 IRQ# */
+	{ AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 IRQ# */
+#else
+	{ AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 Fully_Interted# */
+	{ AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 STSCHG# */
+	{ AU1000_GPIO_2, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 IRQ# */
+
+	{ AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 Fully_Interted# */
+	{ AU1000_GPIO_4, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 STSCHG# */
+	{ AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 IRQ# */
+#endif
+#else
+	{ AU1000_GPIO_7, IRQF_TRIGGER_RISING, 0 }, /* touchscreen pen down */
+#endif
+
+};
+
+void __init board_init_irq(void)
+{
+	au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
diff --git a/arch/mips/alchemy/devboards/pb1000/Makefile b/arch/mips/alchemy/devboards/pb1000/Makefile
new file mode 100644
index 0000000..97c6615
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1000/Makefile
@@ -0,0 +1,8 @@
+#
+#  Copyright 2000, 2008 MontaVista Software Inc.
+#  Author: MontaVista Software, Inc. <source@mvista.com>
+#
+# Makefile for the Alchemy Semiconductor Pb1000 board.
+#
+
+obj-y := board_setup.o
diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c
new file mode 100644
index 0000000..aed2fde
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2000, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-pb1x00/pb1000.h>
+#include <prom.h>
+
+
+struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
+	{ AU1000_GPIO_15, IRQF_TRIGGER_LOW, 0 },
+};
+
+
+const char *get_system_type(void)
+{
+	return "Alchemy Pb1000";
+}
+
+void board_reset(void)
+{
+}
+
+void __init board_init_irq(void)
+{
+	au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
+
+void __init board_setup(void)
+{
+	u32 pin_func, static_cfg0;
+	u32 sys_freqctrl, sys_clksrc;
+	u32 prid = read_c0_prid();
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+	char *argptr = prom_getcmdline();
+	argptr = strstr(argptr, "console=");
+	if (argptr == NULL) {
+		argptr = prom_getcmdline();
+		strcat(argptr, " console=ttyS0,115200");
+	}
+#endif
+
+	/* Set AUX clock to 12 MHz * 8 = 96 MHz */
+	au_writel(8, SYS_AUXPLL);
+	au_writel(0, SYS_PINSTATERD);
+	udelay(100);
+
+#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+	/* Zero and disable FREQ2 */
+	sys_freqctrl = au_readl(SYS_FREQCTRL0);
+	sys_freqctrl &= ~0xFFF00000;
+	au_writel(sys_freqctrl, SYS_FREQCTRL0);
+
+	/* Zero and disable USBH/USBD clocks */
+	sys_clksrc = au_readl(SYS_CLKSRC);
+	sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK |
+		        SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK);
+	au_writel(sys_clksrc, SYS_CLKSRC);
+
+	sys_freqctrl = au_readl(SYS_FREQCTRL0);
+	sys_freqctrl &= ~0xFFF00000;
+
+	sys_clksrc = au_readl(SYS_CLKSRC);
+	sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK |
+		        SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK);
+
+	switch (prid & 0x000000FF) {
+	case 0x00: /* DA */
+	case 0x01: /* HA */
+	case 0x02: /* HB */
+		/* CPU core freq to 48 MHz to slow it way down... */
+		au_writel(4, SYS_CPUPLL);
+
+		/*
+		 * Setup 48 MHz FREQ2 from CPUPLL for USB Host
+		 * FRDIV2 = 3 -> div by 8 of 384 MHz -> 48 MHz
+		 */
+		sys_freqctrl |= (3 << SYS_FC_FRDIV2_BIT) | SYS_FC_FE2;
+		au_writel(sys_freqctrl, SYS_FREQCTRL0);
+
+		/* CPU core freq to 384 MHz */
+		au_writel(0x20, SYS_CPUPLL);
+
+		printk(KERN_INFO "Au1000: 48 MHz OHCI workaround enabled\n");
+		break;
+
+	default: /* HC and newer */
+		/* FREQ2 = aux / 2 = 48 MHz */
+		sys_freqctrl |= (0 << SYS_FC_FRDIV2_BIT) |
+				 SYS_FC_FE2 | SYS_FC_FS2;
+		au_writel(sys_freqctrl, SYS_FREQCTRL0);
+		break;
+	}
+
+	/*
+	 * Route 48 MHz FREQ2 into USB Host and/or Device
+	 */
+	sys_clksrc |= SYS_CS_MUX_FQ2 << SYS_CS_MUH_BIT;
+	au_writel(sys_clksrc, SYS_CLKSRC);
+
+	/* Configure pins GPIO[14:9] as GPIO */
+	pin_func = au_readl(SYS_PINFUNC) & ~(SYS_PF_UR3 | SYS_PF_USB);
+
+	/* 2nd USB port is USB host */
+	pin_func |= SYS_PF_USB;
+
+	au_writel(pin_func, SYS_PINFUNC);
+	au_writel(0x2800, SYS_TRIOUTCLR);
+	au_writel(0x0030, SYS_OUTPUTCLR);
+#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
+
+	/* Make GPIO 15 an input (for interrupt line) */
+	pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_IRF;
+	/* We don't need I2S, so make it available for GPIO[31:29] */
+	pin_func |= SYS_PF_I2S;
+	au_writel(pin_func, SYS_PINFUNC);
+
+	au_writel(0x8000, SYS_TRIOUTCLR);
+
+	static_cfg0 = au_readl(MEM_STCFG0) & ~0xc00;
+	au_writel(static_cfg0, MEM_STCFG0);
+
+	/* configure RCE2* for LCD */
+	au_writel(0x00000004, MEM_STCFG2);
+
+	/* MEM_STTIME2 */
+	au_writel(0x09000000, MEM_STTIME2);
+
+	/* Set 32-bit base address decoding for RCE2* */
+	au_writel(0x10003ff0, MEM_STADDR2);
+
+	/*
+	 * PCI CPLD setup
+	 * Expand CE0 to cover PCI
+	 */
+	au_writel(0x11803e40, MEM_STADDR1);
+
+	/* Burst visibility on */
+	au_writel(au_readl(MEM_STCFG0) | 0x1000, MEM_STCFG0);
+
+	au_writel(0x83, MEM_STCFG1);	     /* ewait enabled, flash timing */
+	au_writel(0x33030a10, MEM_STTIME1);  /* slower timing for FPGA */
+
+	/* Setup the static bus controller */
+	au_writel(0x00000002, MEM_STCFG3);  /* type = PCMCIA */
+	au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */
+	au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */
+
+	/*
+	 * Enable Au1000 BCLK switching - note: sed1356 must not use
+	 * its BCLK (Au1000 LCLK) for any timings
+	 */
+	switch (prid & 0x000000FF) {
+	case 0x00: /* DA */
+	case 0x01: /* HA */
+	case 0x02: /* HB */
+		break;
+	default:  /* HC and newer */
+		/*
+		 * Enable sys bus clock divider when IDLE state or no bus
+		 * activity.
+		 */
+		au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
+		break;
+	}
+}
diff --git a/arch/mips/alchemy/devboards/pb1100/Makefile b/arch/mips/alchemy/devboards/pb1100/Makefile
new file mode 100644
index 0000000..c586dd7
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1100/Makefile
@@ -0,0 +1,8 @@
+#
+#  Copyright 2000, 2001, 2008 MontaVista Software Inc.
+#  Author: MontaVista Software, Inc. <source@mvista.com>
+#
+# Makefile for the Alchemy Semiconductor Pb1100 board.
+#
+
+obj-y := board_setup.o
diff --git a/arch/mips/alchemy/devboards/pb1100/board_setup.c b/arch/mips/alchemy/devboards/pb1100/board_setup.c
new file mode 100644
index 0000000..4df57fa
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1100/board_setup.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2002, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-pb1x00/pb1100.h>
+
+#include <prom.h>
+
+
+struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
+	{ AU1000_GPIO_9,  IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card Fully_Inserted# */
+	{ AU1000_GPIO_10, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card STSCHG# */
+	{ AU1000_GPIO_11, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card IRQ# */
+	{ AU1000_GPIO_13, IRQF_TRIGGER_LOW, 0 }, /* DC_IRQ# */
+};
+
+
+const char *get_system_type(void)
+{
+	return "Alchemy Pb1100";
+}
+
+void board_reset(void)
+{
+	/* Hit BCSR.RST_VDDI[SOFT_RESET] */
+	au_writel(0x00000000, PB1100_RST_VDDI);
+}
+
+void __init board_init_irq(void)
+{
+	au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
+
+void __init board_setup(void)
+{
+	volatile void __iomem *base = (volatile void __iomem *)0xac000000UL;
+	char *argptr;
+
+	argptr = prom_getcmdline();
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+	argptr = strstr(argptr, "console=");
+	if (argptr == NULL) {
+		argptr = prom_getcmdline();
+		strcat(argptr, " console=ttyS0,115200");
+	}
+#endif
+
+#ifdef CONFIG_FB_AU1100
+	argptr = strstr(argptr, "video=");
+	if (argptr == NULL) {
+		argptr = prom_getcmdline();
+		/* default panel */
+		/*strcat(argptr, " video=au1100fb:panel:Sharp_320x240_16");*/
+	}
+#endif
+
+#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
+	/* au1000 does not support vra, au1500 and au1100 do */
+	strcat(argptr, " au1000_audio=vra");
+	argptr = prom_getcmdline();
+#endif
+
+	/* Set AUX clock to 12 MHz * 8 = 96 MHz */
+	au_writel(8, SYS_AUXPLL);
+	au_writel(0, SYS_PININPUTEN);
+	udelay(100);
+
+#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+	{
+		u32 pin_func, sys_freqctrl, sys_clksrc;
+
+		/* Configure pins GPIO[14:9] as GPIO */
+		pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_UR3;
+
+		/* Zero and disable FREQ2 */
+		sys_freqctrl = au_readl(SYS_FREQCTRL0);
+		sys_freqctrl &= ~0xFFF00000;
+		au_writel(sys_freqctrl, SYS_FREQCTRL0);
+
+		/* Zero and disable USBH/USBD/IrDA clock */
+		sys_clksrc = au_readl(SYS_CLKSRC);
+		sys_clksrc &= ~(SYS_CS_CIR | SYS_CS_DIR | SYS_CS_MIR_MASK);
+		au_writel(sys_clksrc, SYS_CLKSRC);
+
+		sys_freqctrl = au_readl(SYS_FREQCTRL0);
+		sys_freqctrl &= ~0xFFF00000;
+
+		sys_clksrc = au_readl(SYS_CLKSRC);
+		sys_clksrc &= ~(SYS_CS_CIR | SYS_CS_DIR | SYS_CS_MIR_MASK);
+
+		/* FREQ2 = aux / 2 = 48 MHz */
+		sys_freqctrl |= (0 << SYS_FC_FRDIV2_BIT) |
+				SYS_FC_FE2 | SYS_FC_FS2;
+		au_writel(sys_freqctrl, SYS_FREQCTRL0);
+
+		/*
+		 * Route 48 MHz FREQ2 into USBH/USBD/IrDA
+		 */
+		sys_clksrc |= SYS_CS_MUX_FQ2 << SYS_CS_MIR_BIT;
+		au_writel(sys_clksrc, SYS_CLKSRC);
+
+		/* Setup the static bus controller */
+		au_writel(0x00000002, MEM_STCFG3);  /* type = PCMCIA */
+		au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */
+		au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */
+
+		/*
+		 * Get USB Functionality pin state (device vs host drive pins).
+		 */
+		pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_USB;
+		/* 2nd USB port is USB host. */
+		pin_func |= SYS_PF_USB;
+		au_writel(pin_func, SYS_PINFUNC);
+	}
+#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
+
+	/* Enable sys bus clock divider when IDLE state or no bus activity. */
+	au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
+
+	/* Enable the RTC if not already enabled. */
+	if (!(readb(base + 0x28) & 0x20)) {
+		writeb(readb(base + 0x28) | 0x20, base + 0x28);
+		au_sync();
+	}
+	/* Put the clock in BCD mode. */
+	if (readb(base + 0x2C) & 0x4) { /* reg B */
+		writeb(readb(base + 0x2c) & ~0x4, base + 0x2c);
+		au_sync();
+	}
+}
diff --git a/arch/mips/alchemy/devboards/pb1200/Makefile b/arch/mips/alchemy/devboards/pb1200/Makefile
new file mode 100644
index 0000000..c8c3a99
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1200/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Alchemy Semiconductor Pb1200/DBAu1200 boards.
+#
+
+obj-y := board_setup.o irqmap.o platform.o
+
+EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/alchemy/devboards/pb1200/board_setup.c b/arch/mips/alchemy/devboards/pb1200/board_setup.c
new file mode 100644
index 0000000..94e6b7e
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1200/board_setup.c
@@ -0,0 +1,164 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ *	Alchemy Pb1200/Db1200 board setup.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include <prom.h>
+#include <au1xxx.h>
+
+
+const char *get_system_type(void)
+{
+	return "Alchemy Pb1200";
+}
+
+void board_reset(void)
+{
+	bcsr->resets = 0;
+	bcsr->system = 0;
+}
+
+void __init board_setup(void)
+{
+	char *argptr;
+
+	argptr = prom_getcmdline();
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+	argptr = strstr(argptr, "console=");
+	if (argptr == NULL) {
+		argptr = prom_getcmdline();
+		strcat(argptr, " console=ttyS0,115200");
+	}
+#endif
+#ifdef CONFIG_FB_AU1200
+	strcat(argptr, " video=au1200fb:panel:bs");
+#endif
+
+#if 0
+	{
+		u32 pin_func;
+
+		/*
+		 * Enable PSC1 SYNC for AC97.  Normaly done in audio driver,
+		 * but it is board specific code, so put it here.
+		 */
+		pin_func = au_readl(SYS_PINFUNC);
+		au_sync();
+		pin_func |= SYS_PF_MUST_BE_SET | SYS_PF_PSC1_S1;
+		au_writel(pin_func, SYS_PINFUNC);
+
+		au_writel(0, (u32)bcsr | 0x10); /* turn off PCMCIA power */
+		au_sync();
+	}
+#endif
+
+#if defined(CONFIG_I2C_AU1550)
+	{
+		u32 freq0, clksrc;
+		u32 pin_func;
+
+		/* Select SMBus in CPLD */
+		bcsr->resets &= ~BCSR_RESETS_PCS0MUX;
+
+		pin_func = au_readl(SYS_PINFUNC);
+		au_sync();
+		pin_func &= ~(SYS_PINFUNC_P0A | SYS_PINFUNC_P0B);
+		/* Set GPIOs correctly */
+		pin_func |= 2 << 17;
+		au_writel(pin_func, SYS_PINFUNC);
+		au_sync();
+
+		/* The I2C driver depends on 50 MHz clock */
+		freq0 = au_readl(SYS_FREQCTRL0);
+		au_sync();
+		freq0 &= ~(SYS_FC_FRDIV1_MASK | SYS_FC_FS1 | SYS_FC_FE1);
+		freq0 |= 3 << SYS_FC_FRDIV1_BIT;
+		/* 396 MHz / (3 + 1) * 2 == 49.5 MHz */
+		au_writel(freq0, SYS_FREQCTRL0);
+		au_sync();
+		freq0 |= SYS_FC_FE1;
+		au_writel(freq0, SYS_FREQCTRL0);
+		au_sync();
+
+		clksrc = au_readl(SYS_CLKSRC);
+		au_sync();
+		clksrc &= ~(SYS_CS_CE0 | SYS_CS_DE0 | SYS_CS_ME0_MASK);
+		/* Bit 22 is EXTCLK0 for PSC0 */
+		clksrc |= SYS_CS_MUX_FQ1 << SYS_CS_ME0_BIT;
+		au_writel(clksrc, SYS_CLKSRC);
+		au_sync();
+	}
+#endif
+
+	/*
+	 * The Pb1200 development board uses external MUX for PSC0 to
+	 * support SMB/SPI. bcsr->resets bit 12: 0=SMB 1=SPI
+	 */
+#ifdef CONFIG_I2C_AU1550
+	bcsr->resets &= ~BCSR_RESETS_PCS0MUX;
+#endif
+	au_sync();
+
+#ifdef CONFIG_MIPS_PB1200
+	printk(KERN_INFO "AMD Alchemy Pb1200 Board\n");
+#endif
+#ifdef CONFIG_MIPS_DB1200
+	printk(KERN_INFO "AMD Alchemy Db1200 Board\n");
+#endif
+}
+
+int board_au1200fb_panel(void)
+{
+	BCSR *bcsr = (BCSR *)BCSR_KSEG1_ADDR;
+	int p;
+
+	p = bcsr->switches;
+	p >>= 8;
+	p &= 0x0F;
+	return p;
+}
+
+int board_au1200fb_panel_init(void)
+{
+	/* Apply power */
+	BCSR *bcsr = (BCSR *)BCSR_KSEG1_ADDR;
+
+	bcsr->board |= BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD | BCSR_BOARD_LCDBL;
+	/* printk(KERN_DEBUG "board_au1200fb_panel_init()\n"); */
+	return 0;
+}
+
+int board_au1200fb_panel_shutdown(void)
+{
+	/* Remove power */
+	BCSR *bcsr = (BCSR *)BCSR_KSEG1_ADDR;
+
+	bcsr->board &= ~(BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD |
+			 BCSR_BOARD_LCDBL);
+	/* printk(KERN_DEBUG "board_au1200fb_panel_shutdown()\n"); */
+	return 0;
+}
diff --git a/arch/mips/alchemy/devboards/pb1200/irqmap.c b/arch/mips/alchemy/devboards/pb1200/irqmap.c
new file mode 100644
index 0000000..fe47498
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1200/irqmap.c
@@ -0,0 +1,134 @@
+/*
+ * BRIEF MODULE DESCRIPTION
+ *	Au1xxx irq map table
+ *
+ *  This program is free software; you can redistribute	 it and/or modify it
+ *  under  the terms of	 the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the	License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED	  ``AS	IS'' AND   ANY	EXPRESS OR IMPLIED
+ *  WARRANTIES,	  INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO	EVENT  SHALL   THE AUTHOR  BE	 LIABLE FOR ANY	  DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED	  TO, PROCUREMENT OF  SUBSTITUTE GOODS	OR SERVICES; LOSS OF
+ *  USE, DATA,	OR PROFITS; OR	BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN	 CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-au1x00/au1000.h>
+
+#ifdef CONFIG_MIPS_PB1200
+#include <asm/mach-pb1x00/pb1200.h>
+#endif
+
+#ifdef CONFIG_MIPS_DB1200
+#include <asm/mach-db1x00/db1200.h>
+#define PB1200_INT_BEGIN DB1200_INT_BEGIN
+#define PB1200_INT_END DB1200_INT_END
+#endif
+
+struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
+	/* This is external interrupt cascade */
+	{ AU1000_GPIO_7, IRQF_TRIGGER_LOW, 0 },
+};
+
+
+/*
+ * Support for External interrupts on the Pb1200 Development platform.
+ */
+
+static void pb1200_cascade_handler(unsigned int irq, struct irq_desc *d)
+{
+	unsigned short bisr = bcsr->int_status;
+
+	for ( ; bisr; bisr &= bisr - 1)
+		generic_handle_irq(PB1200_INT_BEGIN + __ffs(bisr));
+}
+
+/* NOTE: both the enable and mask bits must be cleared, otherwise the
+ * CPLD generates tons of spurious interrupts (at least on the DB1200).
+ */
+static void pb1200_mask_irq(unsigned int irq_nr)
+{
+	bcsr->intclr_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
+	bcsr->intclr = 1 << (irq_nr - PB1200_INT_BEGIN);
+	au_sync();
+}
+
+static void pb1200_maskack_irq(unsigned int irq_nr)
+{
+	bcsr->intclr_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
+	bcsr->intclr = 1 << (irq_nr - PB1200_INT_BEGIN);
+	bcsr->int_status = 1 << (irq_nr - PB1200_INT_BEGIN);	/* ack */
+	au_sync();
+}
+
+static void pb1200_unmask_irq(unsigned int irq_nr)
+{
+	bcsr->intset = 1 << (irq_nr - PB1200_INT_BEGIN);
+	bcsr->intset_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
+	au_sync();
+}
+
+static struct irq_chip pb1200_cpld_irq_type = {
+#ifdef CONFIG_MIPS_PB1200
+	.name = "Pb1200 Ext",
+#endif
+#ifdef CONFIG_MIPS_DB1200
+	.name = "Db1200 Ext",
+#endif
+	.mask		= pb1200_mask_irq,
+	.mask_ack	= pb1200_maskack_irq,
+	.unmask		= pb1200_unmask_irq,
+};
+
+void __init board_init_irq(void)
+{
+	unsigned int irq;
+
+	au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+
+#ifdef CONFIG_MIPS_PB1200
+	/* We have a problem with CPLD rev 3. */
+	if (((bcsr->whoami & BCSR_WHOAMI_CPLD) >> 4) <= 3) {
+		printk(KERN_ERR "WARNING!!!\n");
+		printk(KERN_ERR "WARNING!!!\n");
+		printk(KERN_ERR "WARNING!!!\n");
+		printk(KERN_ERR "WARNING!!!\n");
+		printk(KERN_ERR "WARNING!!!\n");
+		printk(KERN_ERR "WARNING!!!\n");
+		printk(KERN_ERR "Pb1200 must be at CPLD rev 4. Please have Pb1200\n");
+		printk(KERN_ERR "updated to latest revision. This software will\n");
+		printk(KERN_ERR "not work on anything less than CPLD rev 4.\n");
+		printk(KERN_ERR "WARNING!!!\n");
+		printk(KERN_ERR "WARNING!!!\n");
+		printk(KERN_ERR "WARNING!!!\n");
+		printk(KERN_ERR "WARNING!!!\n");
+		printk(KERN_ERR "WARNING!!!\n");
+		printk(KERN_ERR "WARNING!!!\n");
+		panic("Game over.  Your score is 0.");
+	}
+#endif
+	/* mask & disable & ack all */
+	bcsr->intclr_mask = 0xffff;
+	bcsr->intclr = 0xffff;
+	bcsr->int_status = 0xffff;
+	au_sync();
+
+	for (irq = PB1200_INT_BEGIN; irq <= PB1200_INT_END; irq++)
+		set_irq_chip_and_handler_name(irq, &pb1200_cpld_irq_type,
+					 handle_level_irq, "level");
+
+	set_irq_chained_handler(AU1000_GPIO_7, pb1200_cascade_handler);
+}
diff --git a/arch/mips/alchemy/pb1200/platform.c b/arch/mips/alchemy/devboards/pb1200/platform.c
similarity index 100%
rename from arch/mips/alchemy/pb1200/platform.c
rename to arch/mips/alchemy/devboards/pb1200/platform.c
diff --git a/arch/mips/alchemy/devboards/pb1500/Makefile b/arch/mips/alchemy/devboards/pb1500/Makefile
new file mode 100644
index 0000000..173b419
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1500/Makefile
@@ -0,0 +1,8 @@
+#
+#  Copyright 2000, 2001, 2008 MontaVista Software Inc.
+#  Author: MontaVista Software, Inc. <source@mvista.com>
+#
+# Makefile for the Alchemy Semiconductor Pb1500 board.
+#
+
+obj-y := board_setup.o
diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c
new file mode 100644
index 0000000..fed3b09
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2000, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-pb1x00/pb1500.h>
+
+#include <prom.h>
+
+
+char irq_tab_alchemy[][5] __initdata = {
+	[12] = { -1, INTA, INTX, INTX, INTX },   /* IDSEL 12 - HPT370	*/
+	[13] = { -1, INTA, INTB, INTC, INTD },   /* IDSEL 13 - PCI slot */
+};
+
+struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
+	{ AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
+	{ AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
+	{ AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
+	{ AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
+	{ AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
+};
+
+
+const char *get_system_type(void)
+{
+	return "Alchemy Pb1500";
+}
+
+void board_reset(void)
+{
+	/* Hit BCSR.RST_VDDI[SOFT_RESET] */
+	au_writel(0x00000000, PB1500_RST_VDDI);
+}
+
+void __init board_init_irq(void)
+{
+	au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
+
+void __init board_setup(void)
+{
+	u32 pin_func;
+	u32 sys_freqctrl, sys_clksrc;
+	char *argptr;
+
+	argptr = prom_getcmdline();
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+	argptr = strstr(argptr, "console=");
+	if (argptr == NULL) {
+		argptr = prom_getcmdline();
+		strcat(argptr, " console=ttyS0,115200");
+	}
+#endif
+
+#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
+	/* au1000 does not support vra, au1500 and au1100 do */
+	strcat(argptr, " au1000_audio=vra");
+	argptr = prom_getcmdline();
+#endif
+
+	sys_clksrc = sys_freqctrl = pin_func = 0;
+	/* Set AUX clock to 12 MHz * 8 = 96 MHz */
+	au_writel(8, SYS_AUXPLL);
+	au_writel(0, SYS_PINSTATERD);
+	udelay(100);
+
+#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+
+	/* GPIO201 is input for PCMCIA card detect */
+	/* GPIO203 is input for PCMCIA interrupt request */
+	au_writel(au_readl(GPIO2_DIR) & ~((1 << 1) | (1 << 3)), GPIO2_DIR);
+
+	/* Zero and disable FREQ2 */
+	sys_freqctrl = au_readl(SYS_FREQCTRL0);
+	sys_freqctrl &= ~0xFFF00000;
+	au_writel(sys_freqctrl, SYS_FREQCTRL0);
+
+	/* zero and disable USBH/USBD clocks */
+	sys_clksrc = au_readl(SYS_CLKSRC);
+	sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK |
+			SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK);
+	au_writel(sys_clksrc, SYS_CLKSRC);
+
+	sys_freqctrl = au_readl(SYS_FREQCTRL0);
+	sys_freqctrl &= ~0xFFF00000;
+
+	sys_clksrc = au_readl(SYS_CLKSRC);
+	sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK |
+			SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK);
+
+	/* FREQ2 = aux/2 = 48 MHz */
+	sys_freqctrl |= (0 << SYS_FC_FRDIV2_BIT) | SYS_FC_FE2 | SYS_FC_FS2;
+	au_writel(sys_freqctrl, SYS_FREQCTRL0);
+
+	/*
+	 * Route 48MHz FREQ2 into USB Host and/or Device
+	 */
+	sys_clksrc |= SYS_CS_MUX_FQ2 << SYS_CS_MUH_BIT;
+	au_writel(sys_clksrc, SYS_CLKSRC);
+
+	pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_USB;
+	/* 2nd USB port is USB host */
+	pin_func |= SYS_PF_USB;
+	au_writel(pin_func, SYS_PINFUNC);
+#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
+
+#ifdef CONFIG_PCI
+	/* Setup PCI bus controller */
+	au_writel(0, Au1500_PCI_CMEM);
+	au_writel(0x00003fff, Au1500_CFG_BASE);
+#if defined(__MIPSEB__)
+	au_writel(0xf | (2 << 6) | (1 << 4), Au1500_PCI_CFG);
+#else
+	au_writel(0xf, Au1500_PCI_CFG);
+#endif
+	au_writel(0xf0000000, Au1500_PCI_MWMASK_DEV);
+	au_writel(0, Au1500_PCI_MWBASE_REV_CCL);
+	au_writel(0x02a00356, Au1500_PCI_STATCMD);
+	au_writel(0x00003c04, Au1500_PCI_HDRTYPE);
+	au_writel(0x00000008, Au1500_PCI_MBAR);
+	au_sync();
+#endif
+
+	/* Enable sys bus clock divider when IDLE state or no bus activity. */
+	au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
+
+	/* Enable the RTC if not already enabled */
+	if (!(au_readl(0xac000028) & 0x20)) {
+		printk(KERN_INFO "enabling clock ...\n");
+		au_writel((au_readl(0xac000028) | 0x20), 0xac000028);
+	}
+	/* Put the clock in BCD mode */
+	if (au_readl(0xac00002c) & 0x4) { /* reg B */
+		au_writel(au_readl(0xac00002c) & ~0x4, 0xac00002c);
+		au_sync();
+	}
+}
diff --git a/arch/mips/alchemy/devboards/pb1550/Makefile b/arch/mips/alchemy/devboards/pb1550/Makefile
new file mode 100644
index 0000000..cff95bc
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1550/Makefile
@@ -0,0 +1,8 @@
+#
+#  Copyright 2000, 2008 MontaVista Software Inc.
+#  Author: MontaVista Software, Inc. <source@mvista.com>
+#
+# Makefile for the Alchemy Semiconductor Pb1550 board.
+#
+
+obj-y := board_setup.o
diff --git a/arch/mips/alchemy/devboards/pb1550/board_setup.c b/arch/mips/alchemy/devboards/pb1550/board_setup.c
new file mode 100644
index 0000000..b6e9e7d
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1550/board_setup.c
@@ -0,0 +1,92 @@
+/*
+ *
+ * BRIEF MODULE DESCRIPTION
+ *	Alchemy Pb1550 board setup.
+ *
+ * Copyright 2000, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-pb1x00/pb1550.h>
+
+#include <prom.h>
+
+
+char irq_tab_alchemy[][5] __initdata = {
+	[12] = { -1, INTB, INTC, INTD, INTA }, /* IDSEL 12 - PCI slot 2 (left)  */
+	[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot 1 (right) */
+};
+
+struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
+	{ AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 },
+	{ AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 },
+};
+
+const char *get_system_type(void)
+{
+	return "Alchemy Pb1550";
+}
+
+void board_reset(void)
+{
+	/* Hit BCSR.SYSTEM[RESET] */
+	au_writew(au_readw(0xAF00001C) & ~BCSR_SYSTEM_RESET, 0xAF00001C);
+}
+
+void __init board_init_irq(void)
+{
+	au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
+
+void __init board_setup(void)
+{
+	u32 pin_func;
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+	char *argptr;
+	argptr = prom_getcmdline();
+	argptr = strstr(argptr, "console=");
+	if (argptr == NULL) {
+		argptr = prom_getcmdline();
+		strcat(argptr, " console=ttyS0,115200");
+	}
+#endif
+
+	/*
+	 * Enable PSC1 SYNC for AC'97.  Normaly done in audio driver,
+	 * but it is board specific code, so put it here.
+	 */
+	pin_func = au_readl(SYS_PINFUNC);
+	au_sync();
+	pin_func |= SYS_PF_MUST_BE_SET | SYS_PF_PSC1_S1;
+	au_writel(pin_func, SYS_PINFUNC);
+
+	au_writel(0, (u32)bcsr | 0x10); /* turn off PCMCIA power */
+	au_sync();
+
+	printk(KERN_INFO "AMD Alchemy Pb1550 Board\n");
+}
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
new file mode 100644
index 0000000..d5eb9c3
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -0,0 +1,229 @@
+/*
+ * Alchemy Development Board example suspend userspace interface.
+ *
+ * (c) 2008 Manuel Lauss <mano@roarinelk.homelinux.net>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/suspend.h>
+#include <linux/sysfs.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/*
+ * Generic suspend userspace interface for Alchemy development boards.
+ * This code exports a few sysfs nodes under /sys/power/db1x/ which
+ * can be used by userspace to en/disable all au1x-provided wakeup
+ * sources and configure the timeout after which the the TOYMATCH2 irq
+ * is to trigger a wakeup.
+ */
+
+
+static unsigned long db1x_pm_sleep_secs;
+static unsigned long db1x_pm_wakemsk;
+static unsigned long db1x_pm_last_wakesrc;
+
+static int db1x_pm_enter(suspend_state_t state)
+{
+	/* enable GPIO based wakeup */
+	au_writel(1, SYS_PININPUTEN);
+
+	/* clear and setup wake cause and source */
+	au_writel(0, SYS_WAKEMSK);
+	au_sync();
+	au_writel(0, SYS_WAKESRC);
+	au_sync();
+
+	au_writel(db1x_pm_wakemsk, SYS_WAKEMSK);
+	au_sync();
+
+	/* setup 1Hz-timer-based wakeup: wait for reg access */
+	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20)
+		asm volatile ("nop");
+
+	au_writel(au_readl(SYS_TOYREAD) + db1x_pm_sleep_secs, SYS_TOYMATCH2);
+	au_sync();
+
+	/* wait for value to really hit the register */
+	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20)
+		asm volatile ("nop");
+
+	/* ...and now the sandman can come! */
+	au_sleep();
+
+	return 0;
+}
+
+static int db1x_pm_begin(suspend_state_t state)
+{
+	if (!db1x_pm_wakemsk) {
+		printk(KERN_ERR "db1x: no wakeup source activated!\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void db1x_pm_end(void)
+{
+	/* read and store wakeup source, the clear the register. To
+	 * be able to clear it, WAKEMSK must be cleared first.
+	 */
+	db1x_pm_last_wakesrc = au_readl(SYS_WAKESRC);
+
+	au_writel(0, SYS_WAKEMSK);
+	au_writel(0, SYS_WAKESRC);
+	au_sync();
+
+}
+
+static struct platform_suspend_ops db1x_pm_ops = {
+	.valid		= suspend_valid_only_mem,
+	.begin		= db1x_pm_begin,
+	.enter		= db1x_pm_enter,
+	.end		= db1x_pm_end,
+};
+
+#define ATTRCMP(x) (0 == strcmp(attr->attr.name, #x))
+
+static ssize_t db1x_pmattr_show(struct kobject *kobj,
+				struct kobj_attribute *attr,
+				char *buf)
+{
+	int idx;
+
+	if (ATTRCMP(timer_timeout))
+		return sprintf(buf, "%lu\n", db1x_pm_sleep_secs);
+
+	else if (ATTRCMP(timer))
+		return sprintf(buf, "%u\n",
+				!!(db1x_pm_wakemsk & SYS_WAKEMSK_M2));
+
+	else if (ATTRCMP(wakesrc))
+		return sprintf(buf, "%lu\n", db1x_pm_last_wakesrc);
+
+	else if (ATTRCMP(gpio0) || ATTRCMP(gpio1) || ATTRCMP(gpio2) ||
+		 ATTRCMP(gpio3) || ATTRCMP(gpio4) || ATTRCMP(gpio5) ||
+		 ATTRCMP(gpio6) || ATTRCMP(gpio7)) {
+		idx = (attr->attr.name)[4] - '0';
+		return sprintf(buf, "%d\n",
+			!!(db1x_pm_wakemsk & SYS_WAKEMSK_GPIO(idx)));
+
+	} else if (ATTRCMP(wakemsk)) {
+		return sprintf(buf, "%08lx\n", db1x_pm_wakemsk);
+	}
+
+	return -ENOENT;
+}
+
+static ssize_t db1x_pmattr_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *instr,
+				 size_t bytes)
+{
+	unsigned long l;
+	int tmp;
+
+	if (ATTRCMP(timer_timeout)) {
+		tmp = strict_strtoul(instr, 0, &l);
+		if (tmp)
+			return tmp;
+
+		db1x_pm_sleep_secs = l;
+
+	} else if (ATTRCMP(timer)) {
+		if (instr[0] != '0')
+			db1x_pm_wakemsk |= SYS_WAKEMSK_M2;
+		else
+			db1x_pm_wakemsk &= ~SYS_WAKEMSK_M2;
+
+	} else if (ATTRCMP(gpio0) || ATTRCMP(gpio1) || ATTRCMP(gpio2) ||
+		   ATTRCMP(gpio3) || ATTRCMP(gpio4) || ATTRCMP(gpio5) ||
+		   ATTRCMP(gpio6) || ATTRCMP(gpio7)) {
+		tmp = (attr->attr.name)[4] - '0';
+		if (instr[0] != '0') {
+			db1x_pm_wakemsk |= SYS_WAKEMSK_GPIO(tmp);
+		} else {
+			db1x_pm_wakemsk &= ~SYS_WAKEMSK_GPIO(tmp);
+		}
+
+	} else if (ATTRCMP(wakemsk)) {
+		tmp = strict_strtoul(instr, 0, &l);
+		if (tmp)
+			return tmp;
+
+		db1x_pm_wakemsk = l & 0x0000003f;
+
+	} else
+		bytes = -ENOENT;
+
+	return bytes;
+}
+
+#define ATTR(x)							\
+	static struct kobj_attribute x##_attribute = 		\
+		__ATTR(x, 0664, db1x_pmattr_show,		\
+				db1x_pmattr_store);
+
+ATTR(gpio0)		/* GPIO-based wakeup enable */
+ATTR(gpio1)
+ATTR(gpio2)
+ATTR(gpio3)
+ATTR(gpio4)
+ATTR(gpio5)
+ATTR(gpio6)
+ATTR(gpio7)
+ATTR(timer)		/* TOYMATCH2-based wakeup enable */
+ATTR(timer_timeout)	/* timer-based wakeup timeout value, in seconds */
+ATTR(wakesrc)		/* contents of SYS_WAKESRC after last wakeup */
+ATTR(wakemsk)		/* direct access to SYS_WAKEMSK */
+
+#define ATTR_LIST(x)	& x ## _attribute.attr
+static struct attribute *db1x_pmattrs[] = {
+	ATTR_LIST(gpio0),
+	ATTR_LIST(gpio1),
+	ATTR_LIST(gpio2),
+	ATTR_LIST(gpio3),
+	ATTR_LIST(gpio4),
+	ATTR_LIST(gpio5),
+	ATTR_LIST(gpio6),
+	ATTR_LIST(gpio7),
+	ATTR_LIST(timer),
+	ATTR_LIST(timer_timeout),
+	ATTR_LIST(wakesrc),
+	ATTR_LIST(wakemsk),
+	NULL,		/* terminator */
+};
+
+static struct attribute_group db1x_pmattr_group = {
+	.name	= "db1x",
+	.attrs	= db1x_pmattrs,
+};
+
+/*
+ * Initialize suspend interface
+ */
+static int __init pm_init(void)
+{
+	/* init TOY to tick at 1Hz if not already done. No need to wait
+	 * for confirmation since there's plenty of time from here to
+	 * the next suspend cycle.
+	 */
+	if (au_readl(SYS_TOYTRIM) != 32767) {
+		au_writel(32767, SYS_TOYTRIM);
+		au_sync();
+	}
+
+	db1x_pm_last_wakesrc = au_readl(SYS_WAKESRC);
+
+	au_writel(0, SYS_WAKESRC);
+	au_sync();
+	au_writel(0, SYS_WAKEMSK);
+	au_sync();
+
+	suspend_set_ops(&db1x_pm_ops);
+
+	return sysfs_create_group(power_kobj, &db1x_pmattr_group);
+}
+
+late_initcall(pm_init);
diff --git a/arch/mips/alchemy/devboards/prom.c b/arch/mips/alchemy/devboards/prom.c
new file mode 100644
index 0000000..0042bd6
--- /dev/null
+++ b/arch/mips/alchemy/devboards/prom.c
@@ -0,0 +1,62 @@
+/*
+ * Common code used by all Alchemy develboards.
+ *
+ * Extracted from files which had this to say:
+ *
+ * Copyright 2000, 2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/bootinfo.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <prom.h>
+
+#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_DB1000) || \
+    defined(CONFIG_MIPS_PB1100) || defined(CONFIG_MIPS_DB1100) || \
+    defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_DB1500) || \
+    defined(CONFIG_MIPS_BOSPORUS) || defined(CONFIG_MIPS_MIRAGE)
+#define ALCHEMY_BOARD_DEFAULT_MEMSIZE	0x04000000
+
+#else	/* Au1550/Au1200-based develboards */
+#define ALCHEMY_BOARD_DEFAULT_MEMSIZE	0x08000000
+#endif
+
+void __init prom_init(void)
+{
+	unsigned char *memsize_str;
+	unsigned long memsize;
+
+	prom_argc = (int)fw_arg0;
+	prom_argv = (char **)fw_arg1;
+	prom_envp = (char **)fw_arg2;
+
+	prom_init_cmdline();
+	memsize_str = prom_getenv("memsize");
+	if (!memsize_str)
+		memsize = ALCHEMY_BOARD_DEFAULT_MEMSIZE;
+	else
+		strict_strtoul(memsize_str, 0, &memsize);
+	add_memory_region(0, memsize, BOOT_MEM_RAM);
+}
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c
index 3f80791..8ed1ae1 100644
--- a/arch/mips/alchemy/mtx-1/board_setup.c
+++ b/arch/mips/alchemy/mtx-1/board_setup.c
@@ -32,6 +32,8 @@
 
 #include <asm/mach-au1x00/au1000.h>
 
+#include <prom.h>
+
 extern int (*board_pci_idsel)(unsigned int devsel, int assert);
 int mtx1_pci_idsel(unsigned int devsel, int assert);
 
@@ -43,6 +45,16 @@
 
 void __init board_setup(void)
 {
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+	char *argptr;
+	argptr = prom_getcmdline();
+	argptr = strstr(argptr, "console=");
+	if (argptr == NULL) {
+		argptr = prom_getcmdline();
+		strcat(argptr, " console=ttyS0,115200");
+	}
+#endif
+
 #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
 	/* Enable USB power switch */
 	au_writel(au_readl(GPIO2_DIR) | 0x10, GPIO2_DIR);
diff --git a/arch/mips/alchemy/mtx-1/init.c b/arch/mips/alchemy/mtx-1/init.c
index 3bae13c..5e871c8 100644
--- a/arch/mips/alchemy/mtx-1/init.c
+++ b/arch/mips/alchemy/mtx-1/init.c
@@ -55,6 +55,6 @@
 	if (!memsize_str)
 		memsize = 0x04000000;
 	else
-		strict_strtol(memsize_str, 0, &memsize);
+		strict_strtoul(memsize_str, 0, &memsize);
 	add_memory_region(0, memsize, BOOT_MEM_RAM);
 }
diff --git a/arch/mips/alchemy/mtx-1/irqmap.c b/arch/mips/alchemy/mtx-1/irqmap.c
index f2bf029..f1ab12a 100644
--- a/arch/mips/alchemy/mtx-1/irqmap.c
+++ b/arch/mips/alchemy/mtx-1/irqmap.c
@@ -27,7 +27,7 @@
  */
 
 #include <linux/init.h>
-
+#include <linux/interrupt.h>
 #include <asm/mach-au1x00/au1000.h>
 
 char irq_tab_alchemy[][5] __initdata = {
@@ -42,11 +42,15 @@
 };
 
 struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
-       { AU1500_GPIO_204, INTC_INT_HIGH_LEVEL, 0 },
-       { AU1500_GPIO_201, INTC_INT_LOW_LEVEL, 0 },
-       { AU1500_GPIO_202, INTC_INT_LOW_LEVEL, 0 },
-       { AU1500_GPIO_203, INTC_INT_LOW_LEVEL, 0 },
-       { AU1500_GPIO_205, INTC_INT_LOW_LEVEL, 0 },
+       { AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
+       { AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
+       { AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
+       { AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
+       { AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
 };
 
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
+
+void __init board_init_irq(void)
+{
+	au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
diff --git a/arch/mips/alchemy/pb1000/Makefile b/arch/mips/alchemy/pb1000/Makefile
deleted file mode 100644
index 99bbec0..0000000
--- a/arch/mips/alchemy/pb1000/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-#  Copyright 2000, 2008 MontaVista Software Inc.
-#  Author: MontaVista Software, Inc. <source@mvista.com>
-#
-# Makefile for the Alchemy Semiconductor Pb1000 board.
-#
-
-lib-y := init.o board_setup.o irqmap.o
diff --git a/arch/mips/alchemy/pb1000/board_setup.c b/arch/mips/alchemy/pb1000/board_setup.c
deleted file mode 100644
index 25df167..0000000
--- a/arch/mips/alchemy/pb1000/board_setup.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 2000, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/delay.h>
-
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-pb1x00/pb1000.h>
-
-void board_reset(void)
-{
-}
-
-void __init board_setup(void)
-{
-	u32 pin_func, static_cfg0;
-	u32 sys_freqctrl, sys_clksrc;
-	u32 prid = read_c0_prid();
-
-	/* Set AUX clock to 12 MHz * 8 = 96 MHz */
-	au_writel(8, SYS_AUXPLL);
-	au_writel(0, SYS_PINSTATERD);
-	udelay(100);
-
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
-	/* Zero and disable FREQ2 */
-	sys_freqctrl = au_readl(SYS_FREQCTRL0);
-	sys_freqctrl &= ~0xFFF00000;
-	au_writel(sys_freqctrl, SYS_FREQCTRL0);
-
-	/* Zero and disable USBH/USBD clocks */
-	sys_clksrc = au_readl(SYS_CLKSRC);
-	sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK |
-		        SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK);
-	au_writel(sys_clksrc, SYS_CLKSRC);
-
-	sys_freqctrl = au_readl(SYS_FREQCTRL0);
-	sys_freqctrl &= ~0xFFF00000;
-
-	sys_clksrc = au_readl(SYS_CLKSRC);
-	sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK |
-		        SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK);
-
-	switch (prid & 0x000000FF) {
-	case 0x00: /* DA */
-	case 0x01: /* HA */
-	case 0x02: /* HB */
-		/* CPU core freq to 48 MHz to slow it way down... */
-		au_writel(4, SYS_CPUPLL);
-
-		/*
-		 * Setup 48 MHz FREQ2 from CPUPLL for USB Host
-		 * FRDIV2 = 3 -> div by 8 of 384 MHz -> 48 MHz
-		 */
-		sys_freqctrl |= (3 << SYS_FC_FRDIV2_BIT) | SYS_FC_FE2;
-		au_writel(sys_freqctrl, SYS_FREQCTRL0);
-
-		/* CPU core freq to 384 MHz */
-		au_writel(0x20, SYS_CPUPLL);
-
-		printk(KERN_INFO "Au1000: 48 MHz OHCI workaround enabled\n");
-		break;
-
-	default: /* HC and newer */
-		/* FREQ2 = aux / 2 = 48 MHz */
-		sys_freqctrl |= (0 << SYS_FC_FRDIV2_BIT) |
-				 SYS_FC_FE2 | SYS_FC_FS2;
-		au_writel(sys_freqctrl, SYS_FREQCTRL0);
-		break;
-	}
-
-	/*
-	 * Route 48 MHz FREQ2 into USB Host and/or Device
-	 */
-	sys_clksrc |= SYS_CS_MUX_FQ2 << SYS_CS_MUH_BIT;
-	au_writel(sys_clksrc, SYS_CLKSRC);
-
-	/* Configure pins GPIO[14:9] as GPIO */
-	pin_func = au_readl(SYS_PINFUNC) & ~(SYS_PF_UR3 | SYS_PF_USB);
-
-	/* 2nd USB port is USB host */
-	pin_func |= SYS_PF_USB;
-
-	au_writel(pin_func, SYS_PINFUNC);
-	au_writel(0x2800, SYS_TRIOUTCLR);
-	au_writel(0x0030, SYS_OUTPUTCLR);
-#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
-
-	/* Make GPIO 15 an input (for interrupt line) */
-	pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_IRF;
-	/* We don't need I2S, so make it available for GPIO[31:29] */
-	pin_func |= SYS_PF_I2S;
-	au_writel(pin_func, SYS_PINFUNC);
-
-	au_writel(0x8000, SYS_TRIOUTCLR);
-
-	static_cfg0 = au_readl(MEM_STCFG0) & ~0xc00;
-	au_writel(static_cfg0, MEM_STCFG0);
-
-	/* configure RCE2* for LCD */
-	au_writel(0x00000004, MEM_STCFG2);
-
-	/* MEM_STTIME2 */
-	au_writel(0x09000000, MEM_STTIME2);
-
-	/* Set 32-bit base address decoding for RCE2* */
-	au_writel(0x10003ff0, MEM_STADDR2);
-
-	/*
-	 * PCI CPLD setup
-	 * Expand CE0 to cover PCI
-	 */
-	au_writel(0x11803e40, MEM_STADDR1);
-
-	/* Burst visibility on */
-	au_writel(au_readl(MEM_STCFG0) | 0x1000, MEM_STCFG0);
-
-	au_writel(0x83, MEM_STCFG1);	     /* ewait enabled, flash timing */
-	au_writel(0x33030a10, MEM_STTIME1);  /* slower timing for FPGA */
-
-	/* Setup the static bus controller */
-	au_writel(0x00000002, MEM_STCFG3);  /* type = PCMCIA */
-	au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */
-	au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */
-
-	/*
-	 * Enable Au1000 BCLK switching - note: sed1356 must not use
-	 * its BCLK (Au1000 LCLK) for any timings
-	 */
-	switch (prid & 0x000000FF) {
-	case 0x00: /* DA */
-	case 0x01: /* HA */
-	case 0x02: /* HB */
-		break;
-	default:  /* HC and newer */
-		/*
-		 * Enable sys bus clock divider when IDLE state or no bus
-		 * activity.
-		 */
-		au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
-		break;
-	}
-}
diff --git a/arch/mips/alchemy/pb1000/init.c b/arch/mips/alchemy/pb1000/init.c
deleted file mode 100644
index 8a9c7d5..0000000
--- a/arch/mips/alchemy/pb1000/init.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- *	Pb1000 board setup
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-
-#include <prom.h>
-
-const char *get_system_type(void)
-{
-	return "Alchemy Pb1000";
-}
-
-void __init prom_init(void)
-{
-	unsigned char *memsize_str;
-	unsigned long memsize;
-
-	prom_argc = (int)fw_arg0;
-	prom_argv = (char **)fw_arg1;
-	prom_envp = (char **)fw_arg2;
-
-	prom_init_cmdline();
-	memsize_str = prom_getenv("memsize");
-	if (!memsize_str)
-		memsize = 0x04000000;
-	else
-		strict_strtol(memsize_str, 0, &memsize);
-	add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/alchemy/pb1000/irqmap.c b/arch/mips/alchemy/pb1000/irqmap.c
deleted file mode 100644
index b3d56b0..0000000
--- a/arch/mips/alchemy/pb1000/irqmap.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- *	Au1xxx irq map table
- *
- * Copyright 2003 Embedded Edge, LLC
- *		dan@embeddededge.com
- *
- *  This program is free software; you can redistribute	 it and/or modify it
- *  under  the terms of	 the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the	License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED	  ``AS	IS'' AND   ANY	EXPRESS OR IMPLIED
- *  WARRANTIES,	  INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO	EVENT  SHALL   THE AUTHOR  BE	 LIABLE FOR ANY	  DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED	  TO, PROCUREMENT OF  SUBSTITUTE GOODS	OR SERVICES; LOSS OF
- *  USE, DATA,	OR PROFITS; OR	BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN	 CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
-	{ AU1000_GPIO_15, INTC_INT_LOW_LEVEL, 0 },
-};
-
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
diff --git a/arch/mips/alchemy/pb1100/Makefile b/arch/mips/alchemy/pb1100/Makefile
deleted file mode 100644
index 793e97c..0000000
--- a/arch/mips/alchemy/pb1100/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-#  Copyright 2000, 2001, 2008 MontaVista Software Inc.
-#  Author: MontaVista Software, Inc. <source@mvista.com>
-#
-# Makefile for the Alchemy Semiconductor Pb1100 board.
-#
-
-lib-y := init.o board_setup.o irqmap.o
diff --git a/arch/mips/alchemy/pb1100/board_setup.c b/arch/mips/alchemy/pb1100/board_setup.c
deleted file mode 100644
index c0bfd59..0000000
--- a/arch/mips/alchemy/pb1100/board_setup.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright 2002, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/delay.h>
-
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-pb1x00/pb1100.h>
-
-void board_reset(void)
-{
-	/* Hit BCSR.RST_VDDI[SOFT_RESET] */
-	au_writel(0x00000000, PB1100_RST_VDDI);
-}
-
-void __init board_setup(void)
-{
-	volatile void __iomem *base = (volatile void __iomem *)0xac000000UL;
-
-	/* Set AUX clock to 12 MHz * 8 = 96 MHz */
-	au_writel(8, SYS_AUXPLL);
-	au_writel(0, SYS_PININPUTEN);
-	udelay(100);
-
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
-	{
-		u32 pin_func, sys_freqctrl, sys_clksrc;
-
-		/* Configure pins GPIO[14:9] as GPIO */
-		pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_UR3;
-
-		/* Zero and disable FREQ2 */
-		sys_freqctrl = au_readl(SYS_FREQCTRL0);
-		sys_freqctrl &= ~0xFFF00000;
-		au_writel(sys_freqctrl, SYS_FREQCTRL0);
-
-		/* Zero and disable USBH/USBD/IrDA clock */
-		sys_clksrc = au_readl(SYS_CLKSRC);
-		sys_clksrc &= ~(SYS_CS_CIR | SYS_CS_DIR | SYS_CS_MIR_MASK);
-		au_writel(sys_clksrc, SYS_CLKSRC);
-
-		sys_freqctrl = au_readl(SYS_FREQCTRL0);
-		sys_freqctrl &= ~0xFFF00000;
-
-		sys_clksrc = au_readl(SYS_CLKSRC);
-		sys_clksrc &= ~(SYS_CS_CIR | SYS_CS_DIR | SYS_CS_MIR_MASK);
-
-		/* FREQ2 = aux / 2 = 48 MHz */
-		sys_freqctrl |= (0 << SYS_FC_FRDIV2_BIT) |
-				SYS_FC_FE2 | SYS_FC_FS2;
-		au_writel(sys_freqctrl, SYS_FREQCTRL0);
-
-		/*
-		 * Route 48 MHz FREQ2 into USBH/USBD/IrDA
-		 */
-		sys_clksrc |= SYS_CS_MUX_FQ2 << SYS_CS_MIR_BIT;
-		au_writel(sys_clksrc, SYS_CLKSRC);
-
-		/* Setup the static bus controller */
-		au_writel(0x00000002, MEM_STCFG3);  /* type = PCMCIA */
-		au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */
-		au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */
-
-		/*
-		 * Get USB Functionality pin state (device vs host drive pins).
-		 */
-		pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_USB;
-		/* 2nd USB port is USB host. */
-		pin_func |= SYS_PF_USB;
-		au_writel(pin_func, SYS_PINFUNC);
-	}
-#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
-
-	/* Enable sys bus clock divider when IDLE state or no bus activity. */
-	au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
-
-	/* Enable the RTC if not already enabled. */
-	if (!(readb(base + 0x28) & 0x20)) {
-		writeb(readb(base + 0x28) | 0x20, base + 0x28);
-		au_sync();
-	}
-	/* Put the clock in BCD mode. */
-	if (readb(base + 0x2C) & 0x4) { /* reg B */
-		writeb(readb(base + 0x2c) & ~0x4, base + 0x2c);
-		au_sync();
-	}
-}
diff --git a/arch/mips/alchemy/pb1100/init.c b/arch/mips/alchemy/pb1100/init.c
deleted file mode 100644
index 7c67923..0000000
--- a/arch/mips/alchemy/pb1100/init.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *	Pb1100 board setup
- *
- * Copyright 2002, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-
-#include <prom.h>
-
-const char *get_system_type(void)
-{
-	return "Alchemy Pb1100";
-}
-
-void __init prom_init(void)
-{
-	unsigned char *memsize_str;
-	unsigned long memsize;
-
-	prom_argc = fw_arg0;
-	prom_argv = (char **)fw_arg1;
-	prom_envp = (char **)fw_arg3;
-
-	prom_init_cmdline();
-
-	memsize_str = prom_getenv("memsize");
-	if (!memsize_str)
-		memsize = 0x04000000;
-	else
-		strict_strtol(memsize_str, 0, &memsize);
-
-	add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/alchemy/pb1100/irqmap.c b/arch/mips/alchemy/pb1100/irqmap.c
deleted file mode 100644
index 9b7dd8b..0000000
--- a/arch/mips/alchemy/pb1100/irqmap.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- *	Au1xx0 IRQ map table
- *
- * Copyright 2003 Embedded Edge, LLC
- *		dan@embeddededge.com
- *
- *  This program is free software; you can redistribute	 it and/or modify it
- *  under  the terms of	 the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the	License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED	  ``AS	IS'' AND   ANY	EXPRESS OR IMPLIED
- *  WARRANTIES,	  INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO	EVENT  SHALL   THE AUTHOR  BE	 LIABLE FOR ANY	  DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED	  TO, PROCUREMENT OF  SUBSTITUTE GOODS	OR SERVICES; LOSS OF
- *  USE, DATA,	OR PROFITS; OR	BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN	 CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
-	{ AU1000_GPIO_9,  INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card Fully_Inserted# */
-	{ AU1000_GPIO_10, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card STSCHG# */
-	{ AU1000_GPIO_11, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card IRQ# */
-	{ AU1000_GPIO_13, INTC_INT_LOW_LEVEL, 0 }, /* DC_IRQ# */
-};
-
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
diff --git a/arch/mips/alchemy/pb1200/Makefile b/arch/mips/alchemy/pb1200/Makefile
deleted file mode 100644
index d678adf..0000000
--- a/arch/mips/alchemy/pb1200/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for the Alchemy Semiconductor Pb1200/DBAu1200 boards.
-#
-
-lib-y := init.o board_setup.o irqmap.o
-obj-y += platform.o
-
-EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/alchemy/pb1200/board_setup.c b/arch/mips/alchemy/pb1200/board_setup.c
deleted file mode 100644
index 6cb21150..0000000
--- a/arch/mips/alchemy/pb1200/board_setup.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *	Alchemy Pb1200/Db1200 board setup.
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include <prom.h>
-#include <au1xxx.h>
-
-extern void _board_init_irq(void);
-extern void (*board_init_irq)(void);
-
-void board_reset(void)
-{
-	bcsr->resets = 0;
-	bcsr->system = 0;
-}
-
-void __init board_setup(void)
-{
-	char *argptr = NULL;
-
-#if 0
-	{
-		u32 pin_func;
-
-		/*
-		 * Enable PSC1 SYNC for AC97.  Normaly done in audio driver,
-		 * but it is board specific code, so put it here.
-		 */
-		pin_func = au_readl(SYS_PINFUNC);
-		au_sync();
-		pin_func |= SYS_PF_MUST_BE_SET | SYS_PF_PSC1_S1;
-		au_writel(pin_func, SYS_PINFUNC);
-
-		au_writel(0, (u32)bcsr | 0x10); /* turn off PCMCIA power */
-		au_sync();
-	}
-#endif
-
-#if defined(CONFIG_I2C_AU1550)
-	{
-		u32 freq0, clksrc;
-		u32 pin_func;
-
-		/* Select SMBus in CPLD */
-		bcsr->resets &= ~BCSR_RESETS_PCS0MUX;
-
-		pin_func = au_readl(SYS_PINFUNC);
-		au_sync();
-		pin_func &= ~(SYS_PINFUNC_P0A | SYS_PINFUNC_P0B);
-		/* Set GPIOs correctly */
-		pin_func |= 2 << 17;
-		au_writel(pin_func, SYS_PINFUNC);
-		au_sync();
-
-		/* The I2C driver depends on 50 MHz clock */
-		freq0 = au_readl(SYS_FREQCTRL0);
-		au_sync();
-		freq0 &= ~(SYS_FC_FRDIV1_MASK | SYS_FC_FS1 | SYS_FC_FE1);
-		freq0 |= 3 << SYS_FC_FRDIV1_BIT;
-		/* 396 MHz / (3 + 1) * 2 == 49.5 MHz */
-		au_writel(freq0, SYS_FREQCTRL0);
-		au_sync();
-		freq0 |= SYS_FC_FE1;
-		au_writel(freq0, SYS_FREQCTRL0);
-		au_sync();
-
-		clksrc = au_readl(SYS_CLKSRC);
-		au_sync();
-		clksrc &= ~(SYS_CS_CE0 | SYS_CS_DE0 | SYS_CS_ME0_MASK);
-		/* Bit 22 is EXTCLK0 for PSC0 */
-		clksrc |= SYS_CS_MUX_FQ1 << SYS_CS_ME0_BIT;
-		au_writel(clksrc, SYS_CLKSRC);
-		au_sync();
-	}
-#endif
-
-#ifdef CONFIG_FB_AU1200
-	argptr = prom_getcmdline();
-#ifdef CONFIG_MIPS_PB1200
-	strcat(argptr, " video=au1200fb:panel:bs");
-#endif
-#ifdef CONFIG_MIPS_DB1200
-	strcat(argptr, " video=au1200fb:panel:bs");
-#endif
-#endif
-
-	/*
-	 * The Pb1200 development board uses external MUX for PSC0 to
-	 * support SMB/SPI. bcsr->resets bit 12: 0=SMB 1=SPI
-	 */
-#ifdef CONFIG_I2C_AU1550
-	bcsr->resets &= ~BCSR_RESETS_PCS0MUX;
-#endif
-	au_sync();
-
-#ifdef CONFIG_MIPS_PB1200
-	printk(KERN_INFO "AMD Alchemy Pb1200 Board\n");
-#endif
-#ifdef CONFIG_MIPS_DB1200
-	printk(KERN_INFO "AMD Alchemy Db1200 Board\n");
-#endif
-
-	/* Setup Pb1200 External Interrupt Controller */
-	board_init_irq = _board_init_irq;
-}
-
-int board_au1200fb_panel(void)
-{
-	BCSR *bcsr = (BCSR *)BCSR_KSEG1_ADDR;
-	int p;
-
-	p = bcsr->switches;
-	p >>= 8;
-	p &= 0x0F;
-	return p;
-}
-
-int board_au1200fb_panel_init(void)
-{
-	/* Apply power */
-	BCSR *bcsr = (BCSR *)BCSR_KSEG1_ADDR;
-
-	bcsr->board |= BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD | BCSR_BOARD_LCDBL;
-	/* printk(KERN_DEBUG "board_au1200fb_panel_init()\n"); */
-	return 0;
-}
-
-int board_au1200fb_panel_shutdown(void)
-{
-	/* Remove power */
-	BCSR *bcsr = (BCSR *)BCSR_KSEG1_ADDR;
-
-	bcsr->board &= ~(BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD |
-			 BCSR_BOARD_LCDBL);
-	/* printk(KERN_DEBUG "board_au1200fb_panel_shutdown()\n"); */
-	return 0;
-}
diff --git a/arch/mips/alchemy/pb1200/init.c b/arch/mips/alchemy/pb1200/init.c
deleted file mode 100644
index e9b2a0f..0000000
--- a/arch/mips/alchemy/pb1200/init.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *	PB1200 board setup
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-
-#include <prom.h>
-
-const char *get_system_type(void)
-{
-	return "Alchemy Pb1200";
-}
-
-void __init prom_init(void)
-{
-	unsigned char *memsize_str;
-	unsigned long memsize;
-
-	prom_argc = (int)fw_arg0;
-	prom_argv = (char **)fw_arg1;
-	prom_envp = (char **)fw_arg2;
-
-	prom_init_cmdline();
-	memsize_str = prom_getenv("memsize");
-	if (!memsize_str)
-		memsize = 0x08000000;
-	else
-		strict_strtol(memsize_str, 0, &memsize);
-	add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/alchemy/pb1200/irqmap.c b/arch/mips/alchemy/pb1200/irqmap.c
deleted file mode 100644
index 2a505ad..0000000
--- a/arch/mips/alchemy/pb1200/irqmap.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- *	Au1xxx irq map table
- *
- *  This program is free software; you can redistribute	 it and/or modify it
- *  under  the terms of	 the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the	License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED	  ``AS	IS'' AND   ANY	EXPRESS OR IMPLIED
- *  WARRANTIES,	  INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO	EVENT  SHALL   THE AUTHOR  BE	 LIABLE FOR ANY	  DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED	  TO, PROCUREMENT OF  SUBSTITUTE GOODS	OR SERVICES; LOSS OF
- *  USE, DATA,	OR PROFITS; OR	BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN	 CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-#ifdef CONFIG_MIPS_PB1200
-#include <asm/mach-pb1x00/pb1200.h>
-#endif
-
-#ifdef CONFIG_MIPS_DB1200
-#include <asm/mach-db1x00/db1200.h>
-#define PB1200_INT_BEGIN DB1200_INT_BEGIN
-#define PB1200_INT_END DB1200_INT_END
-#endif
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
-	/* This is external interrupt cascade */
-	{ AU1000_GPIO_7, INTC_INT_LOW_LEVEL, 0 },
-};
-
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
-
-/*
- * Support for External interrupts on the Pb1200 Development platform.
- */
-static volatile int pb1200_cascade_en;
-
-irqreturn_t pb1200_cascade_handler(int irq, void *dev_id)
-{
-	unsigned short bisr = bcsr->int_status;
-	int extirq_nr = 0;
-
-	/* Clear all the edge interrupts. This has no effect on level. */
-	bcsr->int_status = bisr;
-	for ( ; bisr; bisr &= bisr - 1) {
-		extirq_nr = PB1200_INT_BEGIN + __ffs(bisr);
-		/* Ack and dispatch IRQ */
-		do_IRQ(extirq_nr);
-	}
-
-	return IRQ_RETVAL(1);
-}
-
-inline void pb1200_enable_irq(unsigned int irq_nr)
-{
-	bcsr->intset_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
-	bcsr->intset = 1 << (irq_nr - PB1200_INT_BEGIN);
-}
-
-inline void pb1200_disable_irq(unsigned int irq_nr)
-{
-	bcsr->intclr_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
-	bcsr->intclr = 1 << (irq_nr - PB1200_INT_BEGIN);
-}
-
-static unsigned int pb1200_setup_cascade(void)
-{
-	return request_irq(AU1000_GPIO_7, &pb1200_cascade_handler,
-			   0, "Pb1200 Cascade", &pb1200_cascade_handler);
-}
-
-static unsigned int pb1200_startup_irq(unsigned int irq)
-{
-	if (++pb1200_cascade_en == 1) {
-		int res;
-
-		res = pb1200_setup_cascade();
-		if (res)
-			return res;
-	}
-
-	pb1200_enable_irq(irq);
-
-	return 0;
-}
-
-static void pb1200_shutdown_irq(unsigned int irq)
-{
-	pb1200_disable_irq(irq);
-	if (--pb1200_cascade_en == 0)
-		free_irq(AU1000_GPIO_7, &pb1200_cascade_handler);
-}
-
-static struct irq_chip external_irq_type = {
-#ifdef CONFIG_MIPS_PB1200
-	.name = "Pb1200 Ext",
-#endif
-#ifdef CONFIG_MIPS_DB1200
-	.name = "Db1200 Ext",
-#endif
-	.startup  = pb1200_startup_irq,
-	.shutdown = pb1200_shutdown_irq,
-	.ack      = pb1200_disable_irq,
-	.mask     = pb1200_disable_irq,
-	.mask_ack = pb1200_disable_irq,
-	.unmask   = pb1200_enable_irq,
-};
-
-void _board_init_irq(void)
-{
-	unsigned int irq;
-
-#ifdef CONFIG_MIPS_PB1200
-	/* We have a problem with CPLD rev 3. */
-	if (((bcsr->whoami & BCSR_WHOAMI_CPLD) >> 4) <= 3) {
-		printk(KERN_ERR "WARNING!!!\n");
-		printk(KERN_ERR "WARNING!!!\n");
-		printk(KERN_ERR "WARNING!!!\n");
-		printk(KERN_ERR "WARNING!!!\n");
-		printk(KERN_ERR "WARNING!!!\n");
-		printk(KERN_ERR "WARNING!!!\n");
-		printk(KERN_ERR "Pb1200 must be at CPLD rev 4. Please have Pb1200\n");
-		printk(KERN_ERR "updated to latest revision. This software will\n");
-		printk(KERN_ERR "not work on anything less than CPLD rev 4.\n");
-		printk(KERN_ERR "WARNING!!!\n");
-		printk(KERN_ERR "WARNING!!!\n");
-		printk(KERN_ERR "WARNING!!!\n");
-		printk(KERN_ERR "WARNING!!!\n");
-		printk(KERN_ERR "WARNING!!!\n");
-		printk(KERN_ERR "WARNING!!!\n");
-		panic("Game over.  Your score is 0.");
-	}
-#endif
-
-	for (irq = PB1200_INT_BEGIN; irq <= PB1200_INT_END; irq++) {
-		set_irq_chip_and_handler(irq, &external_irq_type,
-					 handle_level_irq);
-		pb1200_disable_irq(irq);
-	}
-
-	/*
-	 * GPIO_7 can not be hooked here, so it is hooked upon first
-	 * request of any source attached to the cascade.
-	 */
-}
diff --git a/arch/mips/alchemy/pb1500/Makefile b/arch/mips/alchemy/pb1500/Makefile
deleted file mode 100644
index 602f38d..0000000
--- a/arch/mips/alchemy/pb1500/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-#  Copyright 2000, 2001, 2008 MontaVista Software Inc.
-#  Author: MontaVista Software, Inc. <source@mvista.com>
-#
-# Makefile for the Alchemy Semiconductor Pb1500 board.
-#
-
-lib-y := init.o board_setup.o irqmap.o
diff --git a/arch/mips/alchemy/pb1500/board_setup.c b/arch/mips/alchemy/pb1500/board_setup.c
deleted file mode 100644
index 035771c..0000000
--- a/arch/mips/alchemy/pb1500/board_setup.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright 2000, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/delay.h>
-
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-pb1x00/pb1500.h>
-
-void board_reset(void)
-{
-	/* Hit BCSR.RST_VDDI[SOFT_RESET] */
-	au_writel(0x00000000, PB1500_RST_VDDI);
-}
-
-void __init board_setup(void)
-{
-	u32 pin_func;
-	u32 sys_freqctrl, sys_clksrc;
-
-	sys_clksrc = sys_freqctrl = pin_func = 0;
-	/* Set AUX clock to 12 MHz * 8 = 96 MHz */
-	au_writel(8, SYS_AUXPLL);
-	au_writel(0, SYS_PINSTATERD);
-	udelay(100);
-
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
-
-	/* GPIO201 is input for PCMCIA card detect */
-	/* GPIO203 is input for PCMCIA interrupt request */
-	au_writel(au_readl(GPIO2_DIR) & ~((1 << 1) | (1 << 3)), GPIO2_DIR);
-
-	/* Zero and disable FREQ2 */
-	sys_freqctrl = au_readl(SYS_FREQCTRL0);
-	sys_freqctrl &= ~0xFFF00000;
-	au_writel(sys_freqctrl, SYS_FREQCTRL0);
-
-	/* zero and disable USBH/USBD clocks */
-	sys_clksrc = au_readl(SYS_CLKSRC);
-	sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK |
-			SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK);
-	au_writel(sys_clksrc, SYS_CLKSRC);
-
-	sys_freqctrl = au_readl(SYS_FREQCTRL0);
-	sys_freqctrl &= ~0xFFF00000;
-
-	sys_clksrc = au_readl(SYS_CLKSRC);
-	sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK |
-			SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK);
-
-	/* FREQ2 = aux/2 = 48 MHz */
-	sys_freqctrl |= (0 << SYS_FC_FRDIV2_BIT) | SYS_FC_FE2 | SYS_FC_FS2;
-	au_writel(sys_freqctrl, SYS_FREQCTRL0);
-
-	/*
-	 * Route 48MHz FREQ2 into USB Host and/or Device
-	 */
-	sys_clksrc |= SYS_CS_MUX_FQ2 << SYS_CS_MUH_BIT;
-	au_writel(sys_clksrc, SYS_CLKSRC);
-
-	pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_USB;
-	/* 2nd USB port is USB host */
-	pin_func |= SYS_PF_USB;
-	au_writel(pin_func, SYS_PINFUNC);
-#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
-
-#ifdef CONFIG_PCI
-	/* Setup PCI bus controller */
-	au_writel(0, Au1500_PCI_CMEM);
-	au_writel(0x00003fff, Au1500_CFG_BASE);
-#if defined(__MIPSEB__)
-	au_writel(0xf | (2 << 6) | (1 << 4), Au1500_PCI_CFG);
-#else
-	au_writel(0xf, Au1500_PCI_CFG);
-#endif
-	au_writel(0xf0000000, Au1500_PCI_MWMASK_DEV);
-	au_writel(0, Au1500_PCI_MWBASE_REV_CCL);
-	au_writel(0x02a00356, Au1500_PCI_STATCMD);
-	au_writel(0x00003c04, Au1500_PCI_HDRTYPE);
-	au_writel(0x00000008, Au1500_PCI_MBAR);
-	au_sync();
-#endif
-
-	/* Enable sys bus clock divider when IDLE state or no bus activity. */
-	au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
-
-	/* Enable the RTC if not already enabled */
-	if (!(au_readl(0xac000028) & 0x20)) {
-		printk(KERN_INFO "enabling clock ...\n");
-		au_writel((au_readl(0xac000028) | 0x20), 0xac000028);
-	}
-	/* Put the clock in BCD mode */
-	if (au_readl(0xac00002c) & 0x4) { /* reg B */
-		au_writel(au_readl(0xac00002c) & ~0x4, 0xac00002c);
-		au_sync();
-	}
-}
diff --git a/arch/mips/alchemy/pb1500/init.c b/arch/mips/alchemy/pb1500/init.c
deleted file mode 100644
index 3b6e395..0000000
--- a/arch/mips/alchemy/pb1500/init.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *	Pb1500 board setup
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-
-#include <prom.h>
-
-const char *get_system_type(void)
-{
-	return "Alchemy Pb1500";
-}
-
-void __init prom_init(void)
-{
-	unsigned char *memsize_str;
-	unsigned long memsize;
-
-	prom_argc = (int)fw_arg0;
-	prom_argv = (char **)fw_arg1;
-	prom_envp = (char **)fw_arg2;
-
-	prom_init_cmdline();
-	memsize_str = prom_getenv("memsize");
-	if (!memsize_str)
-		memsize = 0x04000000;
-	else
-		strict_strtol(memsize_str, 0, &memsize);
-	add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/alchemy/pb1500/irqmap.c b/arch/mips/alchemy/pb1500/irqmap.c
deleted file mode 100644
index 39c4682..0000000
--- a/arch/mips/alchemy/pb1500/irqmap.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- *	Au1xxx irq map table
- *
- * Copyright 2003 Embedded Edge, LLC
- *		dan@embeddededge.com
- *
- *  This program is free software; you can redistribute	 it and/or modify it
- *  under  the terms of	 the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the	License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED	  ``AS	IS'' AND   ANY	EXPRESS OR IMPLIED
- *  WARRANTIES,	  INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO	EVENT  SHALL   THE AUTHOR  BE	 LIABLE FOR ANY	  DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED	  TO, PROCUREMENT OF  SUBSTITUTE GOODS	OR SERVICES; LOSS OF
- *  USE, DATA,	OR PROFITS; OR	BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN	 CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-char irq_tab_alchemy[][5] __initdata = {
-	[12] = { -1, INTA, INTX, INTX, INTX },   /* IDSEL 12 - HPT370	*/
-	[13] = { -1, INTA, INTB, INTC, INTD },   /* IDSEL 13 - PCI slot */
-};
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
-	{ AU1500_GPIO_204, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1500_GPIO_201, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1500_GPIO_202, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1500_GPIO_203, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1500_GPIO_205, INTC_INT_LOW_LEVEL, 0 },
-};
-
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
diff --git a/arch/mips/alchemy/pb1550/Makefile b/arch/mips/alchemy/pb1550/Makefile
deleted file mode 100644
index 7d8beca..0000000
--- a/arch/mips/alchemy/pb1550/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-#  Copyright 2000, 2008 MontaVista Software Inc.
-#  Author: MontaVista Software, Inc. <source@mvista.com>
-#
-# Makefile for the Alchemy Semiconductor Pb1550 board.
-#
-
-lib-y := init.o board_setup.o irqmap.o
diff --git a/arch/mips/alchemy/pb1550/board_setup.c b/arch/mips/alchemy/pb1550/board_setup.c
deleted file mode 100644
index 0ed76b6..0000000
--- a/arch/mips/alchemy/pb1550/board_setup.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *	Alchemy Pb1550 board setup.
- *
- * Copyright 2000, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-pb1x00/pb1550.h>
-
-void board_reset(void)
-{
-	/* Hit BCSR.SYSTEM[RESET] */
-	au_writew(au_readw(0xAF00001C) & ~BCSR_SYSTEM_RESET, 0xAF00001C);
-}
-
-void __init board_setup(void)
-{
-	u32 pin_func;
-
-	/*
-	 * Enable PSC1 SYNC for AC'97.  Normaly done in audio driver,
-	 * but it is board specific code, so put it here.
-	 */
-	pin_func = au_readl(SYS_PINFUNC);
-	au_sync();
-	pin_func |= SYS_PF_MUST_BE_SET | SYS_PF_PSC1_S1;
-	au_writel(pin_func, SYS_PINFUNC);
-
-	au_writel(0, (u32)bcsr | 0x10); /* turn off PCMCIA power */
-	au_sync();
-
-	printk(KERN_INFO "AMD Alchemy Pb1550 Board\n");
-}
diff --git a/arch/mips/alchemy/pb1550/init.c b/arch/mips/alchemy/pb1550/init.c
deleted file mode 100644
index e1055a1..0000000
--- a/arch/mips/alchemy/pb1550/init.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *	Pb1550 board setup
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-
-#include <prom.h>
-
-const char *get_system_type(void)
-{
-	return "Alchemy Pb1550";
-}
-
-void __init prom_init(void)
-{
-	unsigned char *memsize_str;
-	unsigned long memsize;
-
-	prom_argc = (int)fw_arg0;
-	prom_argv = (char **)fw_arg1;
-	prom_envp = (char **)fw_arg2;
-
-	prom_init_cmdline();
-	memsize_str = prom_getenv("memsize");
-	if (!memsize_str)
-		memsize = 0x08000000;
-	else
-		strict_strtol(memsize_str, 0, &memsize);
-	add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/alchemy/pb1550/irqmap.c b/arch/mips/alchemy/pb1550/irqmap.c
deleted file mode 100644
index a02a4d1..0000000
--- a/arch/mips/alchemy/pb1550/irqmap.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- *	Au1xx0 IRQ map table
- *
- * Copyright 2003 Embedded Edge, LLC
- *		dan@embeddededge.com
- *
- *  This program is free software; you can redistribute	 it and/or modify it
- *  under  the terms of	 the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the	License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED	  ``AS	IS'' AND   ANY	EXPRESS OR IMPLIED
- *  WARRANTIES,	  INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO	EVENT  SHALL   THE AUTHOR  BE	 LIABLE FOR ANY	  DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED	  TO, PROCUREMENT OF  SUBSTITUTE GOODS	OR SERVICES; LOSS OF
- *  USE, DATA,	OR PROFITS; OR	BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN	 CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-char irq_tab_alchemy[][5] __initdata = {
-	[12] = { -1, INTB, INTC, INTD, INTA }, /* IDSEL 12 - PCI slot 2 (left)  */
-	[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot 1 (right) */
-};
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
-	{ AU1000_GPIO_0, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1000_GPIO_1, INTC_INT_LOW_LEVEL, 0 },
-};
-
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c
index 4c587ac..a2634fa 100644
--- a/arch/mips/alchemy/xxs1500/board_setup.c
+++ b/arch/mips/alchemy/xxs1500/board_setup.c
@@ -28,6 +28,8 @@
 
 #include <asm/mach-au1x00/au1000.h>
 
+#include <prom.h>
+
 void board_reset(void)
 {
 	/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
@@ -38,6 +40,16 @@
 {
 	u32 pin_func;
 
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+	char *argptr;
+	argptr = prom_getcmdline();
+	argptr = strstr(argptr, "console=");
+	if (argptr == NULL) {
+		argptr = prom_getcmdline();
+		strcat(argptr, " console=ttyS0,115200");
+	}
+#endif
+
 	/* Set multiple use pins (UART3/GPIO) to UART (it's used as UART too) */
 	pin_func  = au_readl(SYS_PINFUNC) & ~SYS_PF_UR3;
 	pin_func |= SYS_PF_UR3;
diff --git a/arch/mips/alchemy/xxs1500/init.c b/arch/mips/alchemy/xxs1500/init.c
index 7516434..456fa14 100644
--- a/arch/mips/alchemy/xxs1500/init.c
+++ b/arch/mips/alchemy/xxs1500/init.c
@@ -53,6 +53,6 @@
 	if (!memsize_str)
 		memsize = 0x04000000;
 	else
-		strict_strtol(memsize_str, 0, &memsize);
+		strict_strtoul(memsize_str, 0, &memsize);
 	add_memory_region(0, memsize, BOOT_MEM_RAM);
 }
diff --git a/arch/mips/alchemy/xxs1500/irqmap.c b/arch/mips/alchemy/xxs1500/irqmap.c
index edf06ed..0f0f301 100644
--- a/arch/mips/alchemy/xxs1500/irqmap.c
+++ b/arch/mips/alchemy/xxs1500/irqmap.c
@@ -27,23 +27,26 @@
  */
 
 #include <linux/init.h>
-
+#include <linux/interrupt.h>
 #include <asm/mach-au1x00/au1000.h>
 
 struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
-	{ AU1500_GPIO_204, INTC_INT_HIGH_LEVEL, 0 },
-	{ AU1500_GPIO_201, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1500_GPIO_202, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1500_GPIO_203, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1500_GPIO_205, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1500_GPIO_207, INTC_INT_LOW_LEVEL, 0 },
+	{ AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
+	{ AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
+	{ AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
+	{ AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
+	{ AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
+	{ AU1500_GPIO_207, IRQF_TRIGGER_LOW, 0 },
 
-	{ AU1000_GPIO_0, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1000_GPIO_1, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1000_GPIO_2, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1000_GPIO_3, INTC_INT_LOW_LEVEL, 0 },
-	{ AU1000_GPIO_4, INTC_INT_LOW_LEVEL, 0 }, /* CF interrupt */
-	{ AU1000_GPIO_5, INTC_INT_LOW_LEVEL, 0 },
+	{ AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 },
+	{ AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 },
+	{ AU1000_GPIO_2, IRQF_TRIGGER_LOW, 0 },
+	{ AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 },
+	{ AU1000_GPIO_4, IRQF_TRIGGER_LOW, 0 }, /* CF interrupt */
+	{ AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 },
 };
 
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
+void __init board_init_irq(void)
+{
+	au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
new file mode 100644
index 0000000..094c17e
--- /dev/null
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -0,0 +1,85 @@
+config CAVIUM_OCTEON_SPECIFIC_OPTIONS
+	bool "Enable Octeon specific options"
+	depends on CPU_CAVIUM_OCTEON
+	default "y"
+
+config CAVIUM_OCTEON_2ND_KERNEL
+	bool "Build the kernel to be used as a 2nd kernel on the same chip"
+	depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+	default "n"
+	help
+	  This option configures this kernel to be linked at a different
+	  address and use the 2nd uart for output. This allows a kernel built
+	  with this option to be run at the same time as one built without this
+	  option.
+
+config CAVIUM_OCTEON_HW_FIX_UNALIGNED
+	bool "Enable hardware fixups of unaligned loads and stores"
+	depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+	default "y"
+	help
+	  Configure the Octeon hardware to automatically fix unaligned loads
+	  and stores. Normally unaligned accesses are fixed using a kernel
+	  exception handler. This option enables the hardware automatic fixups,
+	  which requires only an extra 3 cycles. Disable this option if you
+	  are running code that relies on address exceptions on unaligned
+	  accesses.
+
+config CAVIUM_OCTEON_CVMSEG_SIZE
+	int "Number of L1 cache lines reserved for CVMSEG memory"
+	depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+	range 0 54
+	default 1
+	help
+	  CVMSEG LM is a segment that accesses portions of the dcache as a
+	  local memory; the larger CVMSEG is, the smaller the cache is.
+	  This selects the size of CVMSEG LM, which is in cache blocks. The
+	  legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
+	  between zero and 6192 bytes).
+
+config CAVIUM_OCTEON_LOCK_L2
+	bool "Lock often used kernel code in the L2"
+	depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+	default "y"
+	help
+	  Enable locking parts of the kernel into the L2 cache.
+
+config CAVIUM_OCTEON_LOCK_L2_TLB
+	bool "Lock the TLB handler in L2"
+	depends on CAVIUM_OCTEON_LOCK_L2
+	default "y"
+	help
+	  Lock the low level TLB fast path into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_EXCEPTION
+	bool "Lock the exception handler in L2"
+	depends on CAVIUM_OCTEON_LOCK_L2
+	default "y"
+	help
+	  Lock the low level exception handler into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
+	bool "Lock the interrupt handler in L2"
+	depends on CAVIUM_OCTEON_LOCK_L2
+	default "y"
+	help
+	  Lock the low level interrupt handler into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_INTERRUPT
+	bool "Lock the 2nd level interrupt handler in L2"
+	depends on CAVIUM_OCTEON_LOCK_L2
+	default "y"
+	help
+	  Lock the 2nd level interrupt handler in L2.
+
+config CAVIUM_OCTEON_LOCK_L2_MEMCPY
+	bool "Lock memcpy() in L2"
+	depends on CAVIUM_OCTEON_LOCK_L2
+	default "y"
+	help
+	  Lock the kernel's implementation of memcpy() into L2.
+
+config ARCH_SPARSEMEM_ENABLE
+	def_bool y
+	select SPARSEMEM_STATIC
+	depends on CPU_CAVIUM_OCTEON
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
new file mode 100644
index 0000000..1c2a7fa
--- /dev/null
+++ b/arch/mips/cavium-octeon/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for the Cavium Octeon specific kernel interface routines
+# under Linux.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2005-2008 Cavium Networks
+#
+
+obj-y := setup.o serial.o octeon-irq.o csrc-octeon.o
+obj-y += dma-octeon.o flash_setup.o
+obj-y += octeon-memcpy.o
+
+obj-$(CONFIG_SMP)                     += smp.o
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
new file mode 100644
index 0000000..70fd92c
--- /dev/null
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -0,0 +1,58 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ipd-defs.h>
+
+/*
+ * Set the current core's cvmcount counter to the value of the
+ * IPD_CLK_COUNT.  We do this on all cores as they are brought
+ * on-line.  This allows for a read from a local cpu register to
+ * access a synchronized counter.
+ *
+ */
+void octeon_init_cvmcount(void)
+{
+	unsigned long flags;
+	unsigned loops = 2;
+
+	/* Clobber loops so GCC will not unroll the following while loop. */
+	asm("" : "+r" (loops));
+
+	local_irq_save(flags);
+	/*
+	 * Loop several times so we are executing from the cache,
+	 * which should give more deterministic timing.
+	 */
+	while (loops--)
+		write_c0_cvmcount(cvmx_read_csr(CVMX_IPD_CLK_COUNT));
+	local_irq_restore(flags);
+}
+
+static cycle_t octeon_cvmcount_read(void)
+{
+	return read_c0_cvmcount();
+}
+
+static struct clocksource clocksource_mips = {
+	.name		= "OCTEON_CVMCOUNT",
+	.read		= octeon_cvmcount_read,
+	.mask		= CLOCKSOURCE_MASK(64),
+	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init plat_time_init(void)
+{
+	clocksource_mips.rating = 300;
+	clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
+	clocksource_register(&clocksource_mips);
+}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
new file mode 100644
index 0000000..01b1ef9
--- /dev/null
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -0,0 +1,32 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001  Ralf Baechle <ralf@gnu.org>
+ * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ * IP32 changes by Ilya.
+ * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on
+ * the kernels original.
+ */
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <dma-coherence.h>
+
+dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size)
+{
+	/* Without PCI/PCIe this function can be called for Octeon internal
+	   devices such as USB. These devices all support 64bit addressing */
+	mb();
+	return virt_to_phys(ptr);
+}
+
+void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
+{
+	/* Without PCI/PCIe this function can be called for Octeon internal
+	 * devices such as USB. These devices all support 64bit addressing */
+	return;
+}
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile
new file mode 100644
index 0000000..80d6cb2
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the Cavium Octeon specific kernel interface routines
+# under Linux.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2005-2008 Cavium Networks
+#
+
+obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
+
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
new file mode 100644
index 0000000..4f5a08b
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -0,0 +1,586 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Simple allocate only memory allocator.  Used to allocate memory at
+ * application start time.
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#include <asm/octeon/cvmx-bootmem.h>
+
+/*#define DEBUG */
+
+
+static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
+
+/* See header file for descriptions of functions */
+
+/*
+ * Wrapper functions are provided for reading/writing the size and
+ * next block values as these may not be directly addressible (in 32
+ * bit applications, for instance.)  Offsets of data elements in
+ * bootmem list, must match cvmx_bootmem_block_header_t.
+ */
+#define NEXT_OFFSET 0
+#define SIZE_OFFSET 8
+
+static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
+{
+	cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
+}
+
+static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
+{
+	cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
+}
+
+static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
+{
+	return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63));
+}
+
+static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
+{
+	return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63));
+}
+
+void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
+			       uint64_t min_addr, uint64_t max_addr)
+{
+	int64_t address;
+	address =
+	    cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
+
+	if (address > 0)
+		return cvmx_phys_to_ptr(address);
+	else
+		return NULL;
+}
+
+void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
+				 uint64_t alignment)
+{
+	return cvmx_bootmem_alloc_range(size, alignment, address,
+					address + size);
+}
+
+void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
+{
+	return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
+}
+
+int cvmx_bootmem_free_named(char *name)
+{
+	return cvmx_bootmem_phy_named_block_free(name, 0);
+}
+
+struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
+{
+	return cvmx_bootmem_phy_named_block_find(name, 0);
+}
+
+void cvmx_bootmem_lock(void)
+{
+	cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
+}
+
+void cvmx_bootmem_unlock(void)
+{
+	cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
+}
+
+int cvmx_bootmem_init(void *mem_desc_ptr)
+{
+	/* Here we set the global pointer to the bootmem descriptor
+	 * block.  This pointer will be used directly, so we will set
+	 * it up to be directly usable by the application.  It is set
+	 * up as follows for the various runtime/ABI combinations:
+	 *
+	 * Linux 64 bit: Set XKPHYS bit
+	 * Linux 32 bit: use mmap to create mapping, use virtual address
+	 * CVMX 64 bit:  use physical address directly
+	 * CVMX 32 bit:  use physical address directly
+	 *
+	 * Note that the CVMX environment assumes the use of 1-1 TLB
+	 * mappings so that the physical addresses can be used
+	 * directly
+	 */
+	if (!cvmx_bootmem_desc) {
+#if   defined(CVMX_ABI_64)
+		/* Set XKPHYS bit */
+		cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
+#else
+		cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
+#endif
+	}
+
+	return 0;
+}
+
+/*
+ * The cvmx_bootmem_phy* functions below return 64 bit physical
+ * addresses, and expose more features that the cvmx_bootmem_functions
+ * above.  These are required for full memory space access in 32 bit
+ * applications, as well as for using some advance features.  Most
+ * applications should not need to use these.
+ */
+
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
+			       uint64_t address_max, uint64_t alignment,
+			       uint32_t flags)
+{
+
+	uint64_t head_addr;
+	uint64_t ent_addr;
+	/* points to previous list entry, NULL current entry is head of list */
+	uint64_t prev_addr = 0;
+	uint64_t new_ent_addr = 0;
+	uint64_t desired_min_addr;
+
+#ifdef DEBUG
+	cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
+		     "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
+		     (unsigned long long)req_size,
+		     (unsigned long long)address_min,
+		     (unsigned long long)address_max,
+		     (unsigned long long)alignment);
+#endif
+
+	if (cvmx_bootmem_desc->major_version > 3) {
+		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+			     "version: %d.%d at addr: %p\n",
+			     (int)cvmx_bootmem_desc->major_version,
+			     (int)cvmx_bootmem_desc->minor_version,
+			     cvmx_bootmem_desc);
+		goto error_out;
+	}
+
+	/*
+	 * Do a variety of checks to validate the arguments.  The
+	 * allocator code will later assume that these checks have
+	 * been made.  We validate that the requested constraints are
+	 * not self-contradictory before we look through the list of
+	 * available memory.
+	 */
+
+	/* 0 is not a valid req_size for this allocator */
+	if (!req_size)
+		goto error_out;
+
+	/* Round req_size up to mult of minimum alignment bytes */
+	req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
+		~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+	/*
+	 * Convert !0 address_min and 0 address_max to special case of
+	 * range that specifies an exact memory block to allocate.  Do
+	 * this before other checks and adjustments so that this
+	 * tranformation will be validated.
+	 */
+	if (address_min && !address_max)
+		address_max = address_min + req_size;
+	else if (!address_min && !address_max)
+		address_max = ~0ull;  /* If no limits given, use max limits */
+
+
+	/*
+	 * Enforce minimum alignment (this also keeps the minimum free block
+	 * req_size the same as the alignment req_size.
+	 */
+	if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
+		alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
+
+	/*
+	 * Adjust address minimum based on requested alignment (round
+	 * up to meet alignment).  Do this here so we can reject
+	 * impossible requests up front. (NOP for address_min == 0)
+	 */
+	if (alignment)
+		address_min = __ALIGN_MASK(address_min, (alignment - 1));
+
+	/*
+	 * Reject inconsistent args.  We have adjusted these, so this
+	 * may fail due to our internal changes even if this check
+	 * would pass for the values the user supplied.
+	 */
+	if (req_size > address_max - address_min)
+		goto error_out;
+
+	/* Walk through the list entries - first fit found is returned */
+
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_lock();
+	head_addr = cvmx_bootmem_desc->head_addr;
+	ent_addr = head_addr;
+	for (; ent_addr;
+	     prev_addr = ent_addr,
+	     ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
+		uint64_t usable_base, usable_max;
+		uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
+
+		if (cvmx_bootmem_phy_get_next(ent_addr)
+		    && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
+			cvmx_dprintf("Internal bootmem_alloc() error: ent: "
+				"0x%llx, next: 0x%llx\n",
+				(unsigned long long)ent_addr,
+				(unsigned long long)
+				cvmx_bootmem_phy_get_next(ent_addr));
+			goto error_out;
+		}
+
+		/*
+		 * Determine if this is an entry that can satisify the
+		 * request Check to make sure entry is large enough to
+		 * satisfy request.
+		 */
+		usable_base =
+		    __ALIGN_MASK(max(address_min, ent_addr), alignment - 1);
+		usable_max = min(address_max, ent_addr + ent_size);
+		/*
+		 * We should be able to allocate block at address
+		 * usable_base.
+		 */
+
+		desired_min_addr = usable_base;
+		/*
+		 * Determine if request can be satisfied from the
+		 * current entry.
+		 */
+		if (!((ent_addr + ent_size) > usable_base
+				&& ent_addr < address_max
+				&& req_size <= usable_max - usable_base))
+			continue;
+		/*
+		 * We have found an entry that has room to satisfy the
+		 * request, so allocate it from this entry.  If end
+		 * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from
+		 * the end of this block rather than the beginning.
+		 */
+		if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) {
+			desired_min_addr = usable_max - req_size;
+			/*
+			 * Align desired address down to required
+			 * alignment.
+			 */
+			desired_min_addr &= ~(alignment - 1);
+		}
+
+		/* Match at start of entry */
+		if (desired_min_addr == ent_addr) {
+			if (req_size < ent_size) {
+				/*
+				 * big enough to create a new block
+				 * from top portion of block.
+				 */
+				new_ent_addr = ent_addr + req_size;
+				cvmx_bootmem_phy_set_next(new_ent_addr,
+					cvmx_bootmem_phy_get_next(ent_addr));
+				cvmx_bootmem_phy_set_size(new_ent_addr,
+							ent_size -
+							req_size);
+
+				/*
+				 * Adjust next pointer as following
+				 * code uses this.
+				 */
+				cvmx_bootmem_phy_set_next(ent_addr,
+							new_ent_addr);
+			}
+
+			/*
+			 * adjust prev ptr or head to remove this
+			 * entry from list.
+			 */
+			if (prev_addr)
+				cvmx_bootmem_phy_set_next(prev_addr,
+					cvmx_bootmem_phy_get_next(ent_addr));
+			else
+				/*
+				 * head of list being returned, so
+				 * update head ptr.
+				 */
+				cvmx_bootmem_desc->head_addr =
+					cvmx_bootmem_phy_get_next(ent_addr);
+
+			if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+				cvmx_bootmem_unlock();
+			return desired_min_addr;
+		}
+		/*
+		 * block returned doesn't start at beginning of entry,
+		 * so we know that we will be splitting a block off
+		 * the front of this one.  Create a new block from the
+		 * beginning, add to list, and go to top of loop
+		 * again.
+		 *
+		 * create new block from high portion of
+		 * block, so that top block starts at desired
+		 * addr.
+		 */
+		new_ent_addr = desired_min_addr;
+		cvmx_bootmem_phy_set_next(new_ent_addr,
+					cvmx_bootmem_phy_get_next
+					(ent_addr));
+		cvmx_bootmem_phy_set_size(new_ent_addr,
+					cvmx_bootmem_phy_get_size
+					(ent_addr) -
+					(desired_min_addr -
+						ent_addr));
+		cvmx_bootmem_phy_set_size(ent_addr,
+					desired_min_addr - ent_addr);
+		cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
+		/* Loop again to handle actual alloc from new block */
+	}
+error_out:
+	/* We didn't find anything, so return error */
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_unlock();
+	return -1;
+}
+
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
+{
+	uint64_t cur_addr;
+	uint64_t prev_addr = 0;	/* zero is invalid */
+	int retval = 0;
+
+#ifdef DEBUG
+	cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n",
+		     (unsigned long long)phy_addr, (unsigned long long)size);
+#endif
+	if (cvmx_bootmem_desc->major_version > 3) {
+		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+			     "version: %d.%d at addr: %p\n",
+			     (int)cvmx_bootmem_desc->major_version,
+			     (int)cvmx_bootmem_desc->minor_version,
+			     cvmx_bootmem_desc);
+		return 0;
+	}
+
+	/* 0 is not a valid size for this allocator */
+	if (!size)
+		return 0;
+
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_lock();
+	cur_addr = cvmx_bootmem_desc->head_addr;
+	if (cur_addr == 0 || phy_addr < cur_addr) {
+		/* add at front of list - special case with changing head ptr */
+		if (cur_addr && phy_addr + size > cur_addr)
+			goto bootmem_free_done;	/* error, overlapping section */
+		else if (phy_addr + size == cur_addr) {
+			/* Add to front of existing first block */
+			cvmx_bootmem_phy_set_next(phy_addr,
+						  cvmx_bootmem_phy_get_next
+						  (cur_addr));
+			cvmx_bootmem_phy_set_size(phy_addr,
+						  cvmx_bootmem_phy_get_size
+						  (cur_addr) + size);
+			cvmx_bootmem_desc->head_addr = phy_addr;
+
+		} else {
+			/* New block before first block.  OK if cur_addr is 0 */
+			cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+			cvmx_bootmem_phy_set_size(phy_addr, size);
+			cvmx_bootmem_desc->head_addr = phy_addr;
+		}
+		retval = 1;
+		goto bootmem_free_done;
+	}
+
+	/* Find place in list to add block */
+	while (cur_addr && phy_addr > cur_addr) {
+		prev_addr = cur_addr;
+		cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
+	}
+
+	if (!cur_addr) {
+		/*
+		 * We have reached the end of the list, add on to end,
+		 * checking to see if we need to combine with last
+		 * block
+		 */
+		if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
+		    phy_addr) {
+			cvmx_bootmem_phy_set_size(prev_addr,
+						  cvmx_bootmem_phy_get_size
+						  (prev_addr) + size);
+		} else {
+			cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+			cvmx_bootmem_phy_set_size(phy_addr, size);
+			cvmx_bootmem_phy_set_next(phy_addr, 0);
+		}
+		retval = 1;
+		goto bootmem_free_done;
+	} else {
+		/*
+		 * insert between prev and cur nodes, checking for
+		 * merge with either/both.
+		 */
+		if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
+		    phy_addr) {
+			/* Merge with previous */
+			cvmx_bootmem_phy_set_size(prev_addr,
+						  cvmx_bootmem_phy_get_size
+						  (prev_addr) + size);
+			if (phy_addr + size == cur_addr) {
+				/* Also merge with current */
+				cvmx_bootmem_phy_set_size(prev_addr,
+					cvmx_bootmem_phy_get_size(cur_addr) +
+					cvmx_bootmem_phy_get_size(prev_addr));
+				cvmx_bootmem_phy_set_next(prev_addr,
+					cvmx_bootmem_phy_get_next(cur_addr));
+			}
+			retval = 1;
+			goto bootmem_free_done;
+		} else if (phy_addr + size == cur_addr) {
+			/* Merge with current */
+			cvmx_bootmem_phy_set_size(phy_addr,
+						  cvmx_bootmem_phy_get_size
+						  (cur_addr) + size);
+			cvmx_bootmem_phy_set_next(phy_addr,
+						  cvmx_bootmem_phy_get_next
+						  (cur_addr));
+			cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+			retval = 1;
+			goto bootmem_free_done;
+		}
+
+		/* It is a standalone block, add in between prev and cur */
+		cvmx_bootmem_phy_set_size(phy_addr, size);
+		cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+		cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+
+	}
+	retval = 1;
+
+bootmem_free_done:
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_unlock();
+	return retval;
+
+}
+
+struct cvmx_bootmem_named_block_desc *
+	cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
+{
+	unsigned int i;
+	struct cvmx_bootmem_named_block_desc *named_block_array_ptr;
+
+#ifdef DEBUG
+	cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
+#endif
+	/*
+	 * Lock the structure to make sure that it is not being
+	 * changed while we are examining it.
+	 */
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_lock();
+
+	/* Use XKPHYS for 64 bit linux */
+	named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *)
+	    cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
+
+#ifdef DEBUG
+	cvmx_dprintf
+	    ("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n",
+	     named_block_array_ptr);
+#endif
+	if (cvmx_bootmem_desc->major_version == 3) {
+		for (i = 0;
+		     i < cvmx_bootmem_desc->named_block_num_blocks; i++) {
+			if ((name && named_block_array_ptr[i].size
+			     && !strncmp(name, named_block_array_ptr[i].name,
+					 cvmx_bootmem_desc->named_block_name_len
+					 - 1))
+			    || (!name && !named_block_array_ptr[i].size)) {
+				if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+					cvmx_bootmem_unlock();
+
+				return &(named_block_array_ptr[i]);
+			}
+		}
+	} else {
+		cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+			     "version: %d.%d at addr: %p\n",
+			     (int)cvmx_bootmem_desc->major_version,
+			     (int)cvmx_bootmem_desc->minor_version,
+			     cvmx_bootmem_desc);
+	}
+	if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+		cvmx_bootmem_unlock();
+
+	return NULL;
+}
+
+int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
+{
+	struct cvmx_bootmem_named_block_desc *named_block_ptr;
+
+	if (cvmx_bootmem_desc->major_version != 3) {
+		cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
+			     "%d.%d at addr: %p\n",
+			     (int)cvmx_bootmem_desc->major_version,
+			     (int)cvmx_bootmem_desc->minor_version,
+			     cvmx_bootmem_desc);
+		return 0;
+	}
+#ifdef DEBUG
+	cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
+#endif
+
+	/*
+	 * Take lock here, as name lookup/block free/name free need to
+	 * be atomic.
+	 */
+	cvmx_bootmem_lock();
+
+	named_block_ptr =
+	    cvmx_bootmem_phy_named_block_find(name,
+					      CVMX_BOOTMEM_FLAG_NO_LOCKING);
+	if (named_block_ptr) {
+#ifdef DEBUG
+		cvmx_dprintf("cvmx_bootmem_phy_named_block_free: "
+			     "%s, base: 0x%llx, size: 0x%llx\n",
+			     name,
+			     (unsigned long long)named_block_ptr->base_addr,
+			     (unsigned long long)named_block_ptr->size);
+#endif
+		__cvmx_bootmem_phy_free(named_block_ptr->base_addr,
+					named_block_ptr->size,
+					CVMX_BOOTMEM_FLAG_NO_LOCKING);
+		named_block_ptr->size = 0;
+		/* Set size to zero to indicate block not used. */
+	}
+
+	cvmx_bootmem_unlock();
+	return named_block_ptr != NULL;	/* 0 on failure, 1 on success */
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
new file mode 100644
index 0000000..6abe56f
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -0,0 +1,734 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Implementation of the Level 2 Cache (L2C) control, measurement, and
+ * debugging facilities.
+ */
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-l2c.h>
+#include <asm/octeon/cvmx-spinlock.h>
+
+/*
+ * This spinlock is used internally to ensure that only one core is
+ * performing certain L2 operations at a time.
+ *
+ * NOTE: This only protects calls from within a single application -
+ * if multiple applications or operating systems are running, then it
+ * is up to the user program to coordinate between them.
+ */
+static cvmx_spinlock_t cvmx_l2c_spinlock;
+
+static inline int l2_size_half(void)
+{
+	uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3);
+	return !!(val & (1ull << 34));
+}
+
+int cvmx_l2c_get_core_way_partition(uint32_t core)
+{
+	uint32_t field;
+
+	/* Validate the core number */
+	if (core >= cvmx_octeon_num_cores())
+		return -1;
+
+	/*
+	 * Use the lower two bits of the coreNumber to determine the
+	 * bit offset of the UMSK[] field in the L2C_SPAR register.
+	 */
+	field = (core & 0x3) * 8;
+
+	/*
+	 * Return the UMSK[] field from the appropriate L2C_SPAR
+	 * register based on the coreNumber.
+	 */
+
+	switch (core & 0xC) {
+	case 0x0:
+		return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >>
+			field;
+	case 0x4:
+		return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >>
+			field;
+	case 0x8:
+		return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >>
+			field;
+	case 0xC:
+		return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >>
+			field;
+	}
+	return 0;
+}
+
+int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
+{
+	uint32_t field;
+	uint32_t valid_mask;
+
+	valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
+
+	mask &= valid_mask;
+
+	/* A UMSK setting which blocks all L2C Ways is an error. */
+	if (mask == valid_mask)
+		return -1;
+
+	/* Validate the core number */
+	if (core >= cvmx_octeon_num_cores())
+		return -1;
+
+	/* Check to make sure current mask & new mask don't block all ways */
+	if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) ==
+	    valid_mask)
+		return -1;
+
+	/* Use the lower two bits of core to determine the bit offset of the
+	 * UMSK[] field in the L2C_SPAR register.
+	 */
+	field = (core & 0x3) * 8;
+
+	/* Assign the new mask setting to the UMSK[] field in the appropriate
+	 * L2C_SPAR register based on the core_num.
+	 *
+	 */
+	switch (core & 0xC) {
+	case 0x0:
+		cvmx_write_csr(CVMX_L2C_SPAR0,
+			       (cvmx_read_csr(CVMX_L2C_SPAR0) &
+				~(0xFF << field)) | mask << field);
+		break;
+	case 0x4:
+		cvmx_write_csr(CVMX_L2C_SPAR1,
+			       (cvmx_read_csr(CVMX_L2C_SPAR1) &
+				~(0xFF << field)) | mask << field);
+		break;
+	case 0x8:
+		cvmx_write_csr(CVMX_L2C_SPAR2,
+			       (cvmx_read_csr(CVMX_L2C_SPAR2) &
+				~(0xFF << field)) | mask << field);
+		break;
+	case 0xC:
+		cvmx_write_csr(CVMX_L2C_SPAR3,
+			       (cvmx_read_csr(CVMX_L2C_SPAR3) &
+				~(0xFF << field)) | mask << field);
+		break;
+	}
+	return 0;
+}
+
+int cvmx_l2c_set_hw_way_partition(uint32_t mask)
+{
+	uint32_t valid_mask;
+
+	valid_mask = 0xff;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+		if (l2_size_half())
+			valid_mask = 0xf;
+	} else if (l2_size_half())
+		valid_mask = 0x3;
+
+	mask &= valid_mask;
+
+	/* A UMSK setting which blocks all L2C Ways is an error. */
+	if (mask == valid_mask)
+		return -1;
+	/* Check to make sure current mask & new mask don't block all ways */
+	if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) ==
+	    valid_mask)
+		return -1;
+
+	cvmx_write_csr(CVMX_L2C_SPAR4,
+		       (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
+	return 0;
+}
+
+int cvmx_l2c_get_hw_way_partition(void)
+{
+	return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
+}
+
+void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
+			  uint32_t clear_on_read)
+{
+	union cvmx_l2c_pfctl pfctl;
+
+	pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
+
+	switch (counter) {
+	case 0:
+		pfctl.s.cnt0sel = event;
+		pfctl.s.cnt0ena = 1;
+		if (!cvmx_octeon_is_pass1())
+			pfctl.s.cnt0rdclr = clear_on_read;
+		break;
+	case 1:
+		pfctl.s.cnt1sel = event;
+		pfctl.s.cnt1ena = 1;
+		if (!cvmx_octeon_is_pass1())
+			pfctl.s.cnt1rdclr = clear_on_read;
+		break;
+	case 2:
+		pfctl.s.cnt2sel = event;
+		pfctl.s.cnt2ena = 1;
+		if (!cvmx_octeon_is_pass1())
+			pfctl.s.cnt2rdclr = clear_on_read;
+		break;
+	case 3:
+	default:
+		pfctl.s.cnt3sel = event;
+		pfctl.s.cnt3ena = 1;
+		if (!cvmx_octeon_is_pass1())
+			pfctl.s.cnt3rdclr = clear_on_read;
+		break;
+	}
+
+	cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
+}
+
+uint64_t cvmx_l2c_read_perf(uint32_t counter)
+{
+	switch (counter) {
+	case 0:
+		return cvmx_read_csr(CVMX_L2C_PFC0);
+	case 1:
+		return cvmx_read_csr(CVMX_L2C_PFC1);
+	case 2:
+		return cvmx_read_csr(CVMX_L2C_PFC2);
+	case 3:
+	default:
+		return cvmx_read_csr(CVMX_L2C_PFC3);
+	}
+}
+
+/**
+ * @INTERNAL
+ * Helper function use to fault in cache lines for L2 cache locking
+ *
+ * @addr:   Address of base of memory region to read into L2 cache
+ * @len:    Length (in bytes) of region to fault in
+ */
+static void fault_in(uint64_t addr, int len)
+{
+	volatile char *ptr;
+	volatile char dummy;
+	/*
+	 * Adjust addr and length so we get all cache lines even for
+	 * small ranges spanning two cache lines
+	 */
+	len += addr & CVMX_CACHE_LINE_MASK;
+	addr &= ~CVMX_CACHE_LINE_MASK;
+	ptr = (volatile char *)cvmx_phys_to_ptr(addr);
+	/*
+	 * Invalidate L1 cache to make sure all loads result in data
+	 * being in L2.
+	 */
+	CVMX_DCACHE_INVALIDATE;
+	while (len > 0) {
+		dummy += *ptr;
+		len -= CVMX_CACHE_LINE_SIZE;
+		ptr += CVMX_CACHE_LINE_SIZE;
+	}
+}
+
+int cvmx_l2c_lock_line(uint64_t addr)
+{
+	int retval = 0;
+	union cvmx_l2c_dbg l2cdbg;
+	union cvmx_l2c_lckbase lckbase;
+	union cvmx_l2c_lckoff lckoff;
+	union cvmx_l2t_err l2t_err;
+	l2cdbg.u64 = 0;
+	lckbase.u64 = 0;
+	lckoff.u64 = 0;
+
+	cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+
+	/* Clear l2t error bits if set */
+	l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+	l2t_err.s.lckerr = 1;
+	l2t_err.s.lckerr2 = 1;
+	cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+
+	addr &= ~CVMX_CACHE_LINE_MASK;
+
+	/* Set this core as debug core */
+	l2cdbg.s.ppnum = cvmx_get_core_num();
+	CVMX_SYNC;
+	cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+	cvmx_read_csr(CVMX_L2C_DBG);
+
+	lckoff.s.lck_offset = 0;	/* Only lock 1 line at a time */
+	cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
+	cvmx_read_csr(CVMX_L2C_LCKOFF);
+
+	if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
+		int alias_shift =
+		    CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
+		uint64_t addr_tmp =
+		    addr ^ (addr & ((1 << alias_shift) - 1)) >>
+		    CVMX_L2_SET_BITS;
+		lckbase.s.lck_base = addr_tmp >> 7;
+	} else {
+		lckbase.s.lck_base = addr >> 7;
+	}
+
+	lckbase.s.lck_ena = 1;
+	cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+	cvmx_read_csr(CVMX_L2C_LCKBASE);	/* Make sure it gets there */
+
+	fault_in(addr, CVMX_CACHE_LINE_SIZE);
+
+	lckbase.s.lck_ena = 0;
+	cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+	cvmx_read_csr(CVMX_L2C_LCKBASE);	/* Make sure it gets there */
+
+	/* Stop being debug core */
+	cvmx_write_csr(CVMX_L2C_DBG, 0);
+	cvmx_read_csr(CVMX_L2C_DBG);
+
+	l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+	if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
+		retval = 1;	/* We were unable to lock the line */
+
+	cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+
+	return retval;
+}
+
+int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
+{
+	int retval = 0;
+
+	/* Round start/end to cache line boundaries */
+	len += start & CVMX_CACHE_LINE_MASK;
+	start &= ~CVMX_CACHE_LINE_MASK;
+	len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+
+	while (len) {
+		retval += cvmx_l2c_lock_line(start);
+		start += CVMX_CACHE_LINE_SIZE;
+		len -= CVMX_CACHE_LINE_SIZE;
+	}
+
+	return retval;
+}
+
+void cvmx_l2c_flush(void)
+{
+	uint64_t assoc, set;
+	uint64_t n_assoc, n_set;
+	union cvmx_l2c_dbg l2cdbg;
+
+	cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+
+	l2cdbg.u64 = 0;
+	if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
+		l2cdbg.s.ppnum = cvmx_get_core_num();
+	l2cdbg.s.finv = 1;
+	n_set = CVMX_L2_SETS;
+	n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC;
+	for (set = 0; set < n_set; set++) {
+		for (assoc = 0; assoc < n_assoc; assoc++) {
+			l2cdbg.s.set = assoc;
+			/* Enter debug mode, and make sure all other
+			 ** writes complete before we enter debug
+			 ** mode */
+			CVMX_SYNCW;
+			cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+			cvmx_read_csr(CVMX_L2C_DBG);
+
+			CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
+					       (CVMX_MIPS_SPACE_XKPHYS,
+						set * CVMX_CACHE_LINE_SIZE), 0);
+			CVMX_SYNCW;	/* Push STF out to L2 */
+			/* Exit debug mode */
+			CVMX_SYNC;
+			cvmx_write_csr(CVMX_L2C_DBG, 0);
+			cvmx_read_csr(CVMX_L2C_DBG);
+		}
+	}
+
+	cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+}
+
+int cvmx_l2c_unlock_line(uint64_t address)
+{
+	int assoc;
+	union cvmx_l2c_tag tag;
+	union cvmx_l2c_dbg l2cdbg;
+	uint32_t tag_addr;
+
+	uint32_t index = cvmx_l2c_address_to_index(address);
+
+	cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+	/* Compute portion of address that is stored in tag */
+	tag_addr =
+	    ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) &
+	     ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+	for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
+		tag = cvmx_get_l2c_tag(assoc, index);
+
+		if (tag.s.V && (tag.s.addr == tag_addr)) {
+			l2cdbg.u64 = 0;
+			l2cdbg.s.ppnum = cvmx_get_core_num();
+			l2cdbg.s.set = assoc;
+			l2cdbg.s.finv = 1;
+
+			CVMX_SYNC;
+			/* Enter debug mode */
+			cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+			cvmx_read_csr(CVMX_L2C_DBG);
+
+			CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
+					       (CVMX_MIPS_SPACE_XKPHYS,
+						address), 0);
+			CVMX_SYNC;
+			/* Exit debug mode */
+			cvmx_write_csr(CVMX_L2C_DBG, 0);
+			cvmx_read_csr(CVMX_L2C_DBG);
+			cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+			return tag.s.L;
+		}
+	}
+	cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+	return 0;
+}
+
+int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
+{
+	int num_unlocked = 0;
+	/* Round start/end to cache line boundaries */
+	len += start & CVMX_CACHE_LINE_MASK;
+	start &= ~CVMX_CACHE_LINE_MASK;
+	len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+	while (len > 0) {
+		num_unlocked += cvmx_l2c_unlock_line(start);
+		start += CVMX_CACHE_LINE_SIZE;
+		len -= CVMX_CACHE_LINE_SIZE;
+	}
+
+	return num_unlocked;
+}
+
+/*
+ * Internal l2c tag types.  These are converted to a generic structure
+ * that can be used on all chips.
+ */
+union __cvmx_l2c_tag {
+	uint64_t u64;
+	struct cvmx_l2c_tag_cn50xx {
+		uint64_t reserved:40;
+		uint64_t V:1;	/* Line valid */
+		uint64_t D:1;	/* Line dirty */
+		uint64_t L:1;	/* Line locked */
+		uint64_t U:1;	/* Use, LRU eviction */
+		uint64_t addr:20;	/* Phys mem addr (33..14) */
+	} cn50xx;
+	struct cvmx_l2c_tag_cn30xx {
+		uint64_t reserved:41;
+		uint64_t V:1;	/* Line valid */
+		uint64_t D:1;	/* Line dirty */
+		uint64_t L:1;	/* Line locked */
+		uint64_t U:1;	/* Use, LRU eviction */
+		uint64_t addr:19;	/* Phys mem addr (33..15) */
+	} cn30xx;
+	struct cvmx_l2c_tag_cn31xx {
+		uint64_t reserved:42;
+		uint64_t V:1;	/* Line valid */
+		uint64_t D:1;	/* Line dirty */
+		uint64_t L:1;	/* Line locked */
+		uint64_t U:1;	/* Use, LRU eviction */
+		uint64_t addr:18;	/* Phys mem addr (33..16) */
+	} cn31xx;
+	struct cvmx_l2c_tag_cn38xx {
+		uint64_t reserved:43;
+		uint64_t V:1;	/* Line valid */
+		uint64_t D:1;	/* Line dirty */
+		uint64_t L:1;	/* Line locked */
+		uint64_t U:1;	/* Use, LRU eviction */
+		uint64_t addr:17;	/* Phys mem addr (33..17) */
+	} cn38xx;
+	struct cvmx_l2c_tag_cn58xx {
+		uint64_t reserved:44;
+		uint64_t V:1;	/* Line valid */
+		uint64_t D:1;	/* Line dirty */
+		uint64_t L:1;	/* Line locked */
+		uint64_t U:1;	/* Use, LRU eviction */
+		uint64_t addr:16;	/* Phys mem addr (33..18) */
+	} cn58xx;
+	struct cvmx_l2c_tag_cn58xx cn56xx;	/* 2048 sets */
+	struct cvmx_l2c_tag_cn31xx cn52xx;	/* 512 sets */
+};
+
+/**
+ * @INTERNAL
+ * Function to read a L2C tag.  This code make the current core
+ * the 'debug core' for the L2.  This code must only be executed by
+ * 1 core at a time.
+ *
+ * @assoc:  Association (way) of the tag to dump
+ * @index:  Index of the cacheline
+ *
+ * Returns The Octeon model specific tag structure.  This is
+ *         translated by a wrapper function to a generic form that is
+ *         easier for applications to use.
+ */
+static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
+{
+
+	uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96);
+	uint64_t core = cvmx_get_core_num();
+	union __cvmx_l2c_tag tag_val;
+	uint64_t dbg_addr = CVMX_L2C_DBG;
+	unsigned long flags;
+
+	union cvmx_l2c_dbg debug_val;
+	debug_val.u64 = 0;
+	/*
+	 * For low core count parts, the core number is always small enough
+	 * to stay in the correct field and not set any reserved bits.
+	 */
+	debug_val.s.ppnum = core;
+	debug_val.s.l2t = 1;
+	debug_val.s.set = assoc;
+	/*
+	 * Make sure core is quiet (no prefetches, etc.) before
+	 * entering debug mode.
+	 */
+	CVMX_SYNC;
+	/* Flush L1 to make sure debug load misses L1 */
+	CVMX_DCACHE_INVALIDATE;
+
+	local_irq_save(flags);
+
+	/*
+	 * The following must be done in assembly as when in debug
+	 * mode all data loads from L2 return special debug data, not
+	 * normal memory contents.  Also, interrupts must be
+	 * disabled, since if an interrupt occurs while in debug mode
+	 * the ISR will get debug data from all its memory reads
+	 * instead of the contents of memory
+	 */
+
+	asm volatile (".set push              \n"
+		"        .set mips64              \n"
+		"        .set noreorder           \n"
+		/* Enter debug mode, wait for store */
+		"        sd    %[dbg_val], 0(%[dbg_addr])  \n"
+		"        ld    $0, 0(%[dbg_addr]) \n"
+		/* Read L2C tag data */
+		"        ld    %[tag_val], 0(%[tag_addr]) \n"
+		/* Exit debug mode, wait for store */
+		"        sd    $0, 0(%[dbg_addr])  \n"
+		"        ld    $0, 0(%[dbg_addr]) \n"
+		/* Invalidate dcache to discard debug data */
+		"        cache 9, 0($0) \n"
+		"        .set pop" :
+		[tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr),
+		[dbg_val] "r"(debug_val.u64),
+		[tag_addr] "r"(debug_tag_addr) : "memory");
+
+	local_irq_restore(flags);
+	return tag_val;
+
+}
+
+union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
+{
+	union __cvmx_l2c_tag tmp_tag;
+	union cvmx_l2c_tag tag;
+	tag.u64 = 0;
+
+	if ((int)association >= cvmx_l2c_get_num_assoc()) {
+		cvmx_dprintf
+		    ("ERROR: cvmx_get_l2c_tag association out of range\n");
+		return tag;
+	}
+	if ((int)index >= cvmx_l2c_get_num_sets()) {
+		cvmx_dprintf("ERROR: cvmx_get_l2c_tag "
+			     "index out of range (arg: %d, max: %d\n",
+		     index, cvmx_l2c_get_num_sets());
+		return tag;
+	}
+	/* __read_l2_tag is intended for internal use only */
+	tmp_tag = __read_l2_tag(association, index);
+
+	/*
+	 * Convert all tag structure types to generic version, as it
+	 * can represent all models.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+		tag.s.V = tmp_tag.cn58xx.V;
+		tag.s.D = tmp_tag.cn58xx.D;
+		tag.s.L = tmp_tag.cn58xx.L;
+		tag.s.U = tmp_tag.cn58xx.U;
+		tag.s.addr = tmp_tag.cn58xx.addr;
+	} else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+		tag.s.V = tmp_tag.cn38xx.V;
+		tag.s.D = tmp_tag.cn38xx.D;
+		tag.s.L = tmp_tag.cn38xx.L;
+		tag.s.U = tmp_tag.cn38xx.U;
+		tag.s.addr = tmp_tag.cn38xx.addr;
+	} else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+		   || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+		tag.s.V = tmp_tag.cn31xx.V;
+		tag.s.D = tmp_tag.cn31xx.D;
+		tag.s.L = tmp_tag.cn31xx.L;
+		tag.s.U = tmp_tag.cn31xx.U;
+		tag.s.addr = tmp_tag.cn31xx.addr;
+	} else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+		tag.s.V = tmp_tag.cn30xx.V;
+		tag.s.D = tmp_tag.cn30xx.D;
+		tag.s.L = tmp_tag.cn30xx.L;
+		tag.s.U = tmp_tag.cn30xx.U;
+		tag.s.addr = tmp_tag.cn30xx.addr;
+	} else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+		tag.s.V = tmp_tag.cn50xx.V;
+		tag.s.D = tmp_tag.cn50xx.D;
+		tag.s.L = tmp_tag.cn50xx.L;
+		tag.s.U = tmp_tag.cn50xx.U;
+		tag.s.addr = tmp_tag.cn50xx.addr;
+	} else {
+		cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+	}
+
+	return tag;
+}
+
+uint32_t cvmx_l2c_address_to_index(uint64_t addr)
+{
+	uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
+	union cvmx_l2c_cfg l2c_cfg;
+	l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+
+	if (l2c_cfg.s.idxalias) {
+		idx ^=
+		    ((addr & CVMX_L2C_ALIAS_MASK) >>
+		     CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+	}
+	idx &= CVMX_L2C_IDX_MASK;
+	return idx;
+}
+
+int cvmx_l2c_get_cache_size_bytes(void)
+{
+	return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
+		CVMX_CACHE_LINE_SIZE;
+}
+
+/**
+ * Return log base 2 of the number of sets in the L2 cache
+ * Returns
+ */
+int cvmx_l2c_get_set_bits(void)
+{
+	int l2_set_bits;
+	if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+		l2_set_bits = 11;	/* 2048 sets */
+	else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+		l2_set_bits = 10;	/* 1024 sets */
+	else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+		 || OCTEON_IS_MODEL(OCTEON_CN52XX))
+		l2_set_bits = 9;	/* 512 sets */
+	else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+		l2_set_bits = 8;	/* 256 sets */
+	else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+		l2_set_bits = 7;	/* 128 sets */
+	else {
+		cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+		l2_set_bits = 11;	/* 2048 sets */
+	}
+	return l2_set_bits;
+
+}
+
+/* Return the number of sets in the L2 Cache */
+int cvmx_l2c_get_num_sets(void)
+{
+	return 1 << cvmx_l2c_get_set_bits();
+}
+
+/* Return the number of associations in the L2 Cache */
+int cvmx_l2c_get_num_assoc(void)
+{
+	int l2_assoc;
+	if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
+	    OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+	    OCTEON_IS_MODEL(OCTEON_CN58XX) ||
+	    OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX))
+		l2_assoc = 8;
+	else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
+		 OCTEON_IS_MODEL(OCTEON_CN30XX))
+		l2_assoc = 4;
+	else {
+		cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+		l2_assoc = 8;
+	}
+
+	/* Check to see if part of the cache is disabled */
+	if (cvmx_fuse_read(265))
+		l2_assoc = l2_assoc >> 2;
+	else if (cvmx_fuse_read(264))
+		l2_assoc = l2_assoc >> 1;
+
+	return l2_assoc;
+}
+
+/**
+ * Flush a line from the L2 cache
+ * This should only be called from one core at a time, as this routine
+ * sets the core to the 'debug' core in order to flush the line.
+ *
+ * @assoc:  Association (or way) to flush
+ * @index:  Index to flush
+ */
+void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
+{
+	union cvmx_l2c_dbg l2cdbg;
+
+	l2cdbg.u64 = 0;
+	l2cdbg.s.ppnum = cvmx_get_core_num();
+	l2cdbg.s.finv = 1;
+
+	l2cdbg.s.set = assoc;
+	/*
+	 * Enter debug mode, and make sure all other writes complete
+	 * before we enter debug mode.
+	 */
+	asm volatile ("sync" : : : "memory");
+	cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+	cvmx_read_csr(CVMX_L2C_DBG);
+
+	CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0);
+	/* Exit debug mode */
+	asm volatile ("sync" : : : "memory");
+	cvmx_write_csr(CVMX_L2C_DBG, 0);
+	cvmx_read_csr(CVMX_L2C_DBG);
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
new file mode 100644
index 0000000..48123707
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -0,0 +1,116 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * This module provides system/board/application information obtained
+ * by the bootloader.
+ */
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+
+/**
+ * This structure defines the private state maintained by sysinfo module.
+ *
+ */
+static struct {
+	struct cvmx_sysinfo sysinfo;	   /* system information */
+	cvmx_spinlock_t lock;	   /* mutex spinlock */
+
+} state = {
+	.lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER
+};
+
+
+/*
+ * Global variables that define the min/max of the memory region set
+ * up for 32 bit userspace access.
+ */
+uint64_t linux_mem32_min;
+uint64_t linux_mem32_max;
+uint64_t linux_mem32_wired;
+uint64_t linux_mem32_offset;
+
+/**
+ * This function returns the application information as obtained
+ * by the bootloader.  This provides the core mask of the cores
+ * running the same application image, as well as the physical
+ * memory regions available to the core.
+ *
+ * Returns  Pointer to the boot information structure
+ *
+ */
+struct cvmx_sysinfo *cvmx_sysinfo_get(void)
+{
+	return &(state.sysinfo);
+}
+
+/**
+ * This function is used in non-simple executive environments (such as
+ * Linux kernel, u-boot, etc.)  to configure the minimal fields that
+ * are required to use simple executive files directly.
+ *
+ * Locking (if required) must be handled outside of this
+ * function
+ *
+ * @phy_mem_desc_ptr:
+ *                   Pointer to global physical memory descriptor
+ *                   (bootmem descriptor) @board_type: Octeon board
+ *                   type enumeration
+ *
+ * @board_rev_major:
+ *                   Board major revision
+ * @board_rev_minor:
+ *                   Board minor revision
+ * @cpu_clock_hz:
+ *                   CPU clock freqency in hertz
+ *
+ * Returns 0: Failure
+ *         1: success
+ */
+int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
+				    uint16_t board_type,
+				    uint8_t board_rev_major,
+				    uint8_t board_rev_minor,
+				    uint32_t cpu_clock_hz)
+{
+
+	/* The sysinfo structure was already initialized */
+	if (state.sysinfo.board_type)
+		return 0;
+
+	memset(&(state.sysinfo), 0x0, sizeof(state.sysinfo));
+	state.sysinfo.phy_mem_desc_ptr = phy_mem_desc_ptr;
+	state.sysinfo.board_type = board_type;
+	state.sysinfo.board_rev_major = board_rev_major;
+	state.sysinfo.board_rev_minor = board_rev_minor;
+	state.sysinfo.cpu_clock_hz = cpu_clock_hz;
+
+	return 1;
+}
+
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
new file mode 100644
index 0000000..9afc379
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -0,0 +1,358 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * File defining functions for working with different Octeon
+ * models.
+ */
+#include <asm/octeon/octeon.h>
+
+/**
+ * Given the chip processor ID from COP0, this function returns a
+ * string representing the chip model number. The string is of the
+ * form CNXXXXpX.X-FREQ-SUFFIX.
+ * - XXXX = The chip model number
+ * - X.X = Chip pass number
+ * - FREQ = Current frequency in Mhz
+ * - SUFFIX = NSP, EXP, SCP, SSP, or CP
+ *
+ * @chip_id: Chip ID
+ *
+ * Returns Model string
+ */
+const char *octeon_model_get_string(uint32_t chip_id)
+{
+	static char buffer[32];
+	return octeon_model_get_string_buffer(chip_id, buffer);
+}
+
+/*
+ * Version of octeon_model_get_string() that takes buffer as argument,
+ * as running early in u-boot static/global variables don't work when
+ * running from flash.
+ */
+const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer)
+{
+	const char *family;
+	const char *core_model;
+	char pass[4];
+	int clock_mhz;
+	const char *suffix;
+	union cvmx_l2d_fus3 fus3;
+	int num_cores;
+	union cvmx_mio_fus_dat2 fus_dat2;
+	union cvmx_mio_fus_dat3 fus_dat3;
+	char fuse_model[10];
+	uint32_t fuse_data = 0;
+
+	fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+	fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
+	fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+
+	num_cores = cvmx_octeon_num_cores();
+
+	/* Make sure the non existant devices look disabled */
+	switch ((chip_id >> 8) & 0xff) {
+	case 6:		/* CN50XX */
+	case 2:		/* CN30XX */
+		fus_dat3.s.nodfa_dte = 1;
+		fus_dat3.s.nozip = 1;
+		break;
+	case 4:		/* CN57XX or CN56XX */
+		fus_dat3.s.nodfa_dte = 1;
+		break;
+	default:
+		break;
+	}
+
+	/* Make a guess at the suffix */
+	/* NSP = everything */
+	/* EXP = No crypto */
+	/* SCP = No DFA, No zip */
+	/* CP = No DFA, No crypto, No zip */
+	if (fus_dat3.s.nodfa_dte) {
+		if (fus_dat2.s.nocrypto)
+			suffix = "CP";
+		else
+			suffix = "SCP";
+	} else if (fus_dat2.s.nocrypto)
+		suffix = "EXP";
+	else
+		suffix = "NSP";
+
+	/*
+	 * Assume pass number is encoded using <5:3><2:0>. Exceptions
+	 * will be fixed later.
+	 */
+	sprintf(pass, "%u.%u", ((chip_id >> 3) & 7) + 1, chip_id & 7);
+
+	/*
+	 * Use the number of cores to determine the last 2 digits of
+	 * the model number. There are some exceptions that are fixed
+	 * later.
+	 */
+	switch (num_cores) {
+	case 16:
+		core_model = "60";
+		break;
+	case 15:
+		core_model = "58";
+		break;
+	case 14:
+		core_model = "55";
+		break;
+	case 13:
+		core_model = "52";
+		break;
+	case 12:
+		core_model = "50";
+		break;
+	case 11:
+		core_model = "48";
+		break;
+	case 10:
+		core_model = "45";
+		break;
+	case 9:
+		core_model = "42";
+		break;
+	case 8:
+		core_model = "40";
+		break;
+	case 7:
+		core_model = "38";
+		break;
+	case 6:
+		core_model = "34";
+		break;
+	case 5:
+		core_model = "32";
+		break;
+	case 4:
+		core_model = "30";
+		break;
+	case 3:
+		core_model = "25";
+		break;
+	case 2:
+		core_model = "20";
+		break;
+	case 1:
+		core_model = "10";
+		break;
+	default:
+		core_model = "XX";
+		break;
+	}
+
+	/* Now figure out the family, the first two digits */
+	switch ((chip_id >> 8) & 0xff) {
+	case 0:		/* CN38XX, CN37XX or CN36XX */
+		if (fus3.cn38xx.crip_512k) {
+			/*
+			 * For some unknown reason, the 16 core one is
+			 * called 37 instead of 36.
+			 */
+			if (num_cores >= 16)
+				family = "37";
+			else
+				family = "36";
+		} else
+			family = "38";
+		/*
+		 * This series of chips didn't follow the standard
+		 * pass numbering.
+		 */
+		switch (chip_id & 0xf) {
+		case 0:
+			strcpy(pass, "1.X");
+			break;
+		case 1:
+			strcpy(pass, "2.X");
+			break;
+		case 3:
+			strcpy(pass, "3.X");
+			break;
+		default:
+			strcpy(pass, "X.X");
+			break;
+		}
+		break;
+	case 1:		/* CN31XX or CN3020 */
+		if ((chip_id & 0x10) || fus3.cn31xx.crip_128k)
+			family = "30";
+		else
+			family = "31";
+		/*
+		 * This series of chips didn't follow the standard
+		 * pass numbering.
+		 */
+		switch (chip_id & 0xf) {
+		case 0:
+			strcpy(pass, "1.0");
+			break;
+		case 2:
+			strcpy(pass, "1.1");
+			break;
+		default:
+			strcpy(pass, "X.X");
+			break;
+		}
+		break;
+	case 2:		/* CN3010 or CN3005 */
+		family = "30";
+		/* A chip with half cache is an 05 */
+		if (fus3.cn30xx.crip_64k)
+			core_model = "05";
+		/*
+		 * This series of chips didn't follow the standard
+		 * pass numbering.
+		 */
+		switch (chip_id & 0xf) {
+		case 0:
+			strcpy(pass, "1.0");
+			break;
+		case 2:
+			strcpy(pass, "1.1");
+			break;
+		default:
+			strcpy(pass, "X.X");
+			break;
+		}
+		break;
+	case 3:		/* CN58XX */
+		family = "58";
+		/* Special case. 4 core, no crypto */
+		if ((num_cores == 4) && fus_dat2.cn38xx.nocrypto)
+			core_model = "29";
+
+		/* Pass 1 uses different encodings for pass numbers */
+		if ((chip_id & 0xFF) < 0x8) {
+			switch (chip_id & 0x3) {
+			case 0:
+				strcpy(pass, "1.0");
+				break;
+			case 1:
+				strcpy(pass, "1.1");
+				break;
+			case 3:
+				strcpy(pass, "1.2");
+				break;
+			default:
+				strcpy(pass, "1.X");
+				break;
+			}
+		}
+		break;
+	case 4:		/* CN57XX, CN56XX, CN55XX, CN54XX */
+		if (fus_dat2.cn56xx.raid_en) {
+			if (fus3.cn56xx.crip_1024k)
+				family = "55";
+			else
+				family = "57";
+			if (fus_dat2.cn56xx.nocrypto)
+				suffix = "SP";
+			else
+				suffix = "SSP";
+		} else {
+			if (fus_dat2.cn56xx.nocrypto)
+				suffix = "CP";
+			else {
+				suffix = "NSP";
+				if (fus_dat3.s.nozip)
+					suffix = "SCP";
+			}
+			if (fus3.cn56xx.crip_1024k)
+				family = "54";
+			else
+				family = "56";
+		}
+		break;
+	case 6:		/* CN50XX */
+		family = "50";
+		break;
+	case 7:		/* CN52XX */
+		if (fus3.cn52xx.crip_256k)
+			family = "51";
+		else
+			family = "52";
+		break;
+	default:
+		family = "XX";
+		core_model = "XX";
+		strcpy(pass, "X.X");
+		suffix = "XXX";
+		break;
+	}
+
+	clock_mhz = octeon_get_clock_rate() / 1000000;
+
+	if (family[0] != '3') {
+		/* Check for model in fuses, overrides normal decode */
+		/* This is _not_ valid for Octeon CN3XXX models */
+		fuse_data |= cvmx_fuse_read_byte(51);
+		fuse_data = fuse_data << 8;
+		fuse_data |= cvmx_fuse_read_byte(50);
+		fuse_data = fuse_data << 8;
+		fuse_data |= cvmx_fuse_read_byte(49);
+		fuse_data = fuse_data << 8;
+		fuse_data |= cvmx_fuse_read_byte(48);
+		if (fuse_data & 0x7ffff) {
+			int model = fuse_data & 0x3fff;
+			int suffix = (fuse_data >> 14) & 0x1f;
+			if (suffix && model) {
+				/*
+				 * Have both number and suffix in
+				 * fuses, so both
+				 */
+				sprintf(fuse_model, "%d%c",
+					model, 'A' + suffix - 1);
+				core_model = "";
+				family = fuse_model;
+			} else if (suffix && !model) {
+				/*
+				 * Only have suffix, so add suffix to
+				 * 'normal' model number.
+				 */
+				sprintf(fuse_model, "%s%c", core_model,
+					'A' + suffix - 1);
+				core_model = fuse_model;
+			} else {
+				/*
+				 * Don't have suffix, so just use
+				 * model from fuses.
+				 */
+				sprintf(fuse_model, "%d", model);
+				core_model = "";
+				family = fuse_model;
+			}
+		}
+	}
+	sprintf(buffer, "CN%s%sp%s-%d-%s",
+		family, core_model, pass, clock_mhz, suffix);
+	return buffer;
+}
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
new file mode 100644
index 0000000..553d36c
--- /dev/null
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -0,0 +1,84 @@
+/*
+ *   Octeon Bootbus flash setup
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007, 2008 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/octeon/octeon.h>
+
+static struct map_info flash_map;
+static struct mtd_info *mymtd;
+#ifdef CONFIG_MTD_PARTITIONS
+static int nr_parts;
+static struct mtd_partition *parts;
+static const char *part_probe_types[] = {
+	"cmdlinepart",
+#ifdef CONFIG_MTD_REDBOOT_PARTS
+	"RedBoot",
+#endif
+	NULL
+};
+#endif
+
+/**
+ * Module/ driver initialization.
+ *
+ * Returns Zero on success
+ */
+static int __init flash_init(void)
+{
+	/*
+	 * Read the bootbus region 0 setup to determine the base
+	 * address of the flash.
+	 */
+	union cvmx_mio_boot_reg_cfgx region_cfg;
+	region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(0));
+	if (region_cfg.s.en) {
+		/*
+		 * The bootloader always takes the flash and sets its
+		 * address so the entire flash fits below
+		 * 0x1fc00000. This way the flash aliases to
+		 * 0x1fc00000 for booting. Software can access the
+		 * full flash at the true address, while core boot can
+		 * access 4MB.
+		 */
+		/* Use this name so old part lines work */
+		flash_map.name = "phys_mapped_flash";
+		flash_map.phys = region_cfg.s.base << 16;
+		flash_map.size = 0x1fc00000 - flash_map.phys;
+		flash_map.bankwidth = 1;
+		flash_map.virt = ioremap(flash_map.phys, flash_map.size);
+		pr_notice("Bootbus flash: Setting flash for %luMB flash at "
+			  "0x%08lx\n", flash_map.size >> 20, flash_map.phys);
+		simple_map_init(&flash_map);
+		mymtd = do_map_probe("cfi_probe", &flash_map);
+		if (mymtd) {
+			mymtd->owner = THIS_MODULE;
+
+#ifdef CONFIG_MTD_PARTITIONS
+			nr_parts = parse_mtd_partitions(mymtd,
+							part_probe_types,
+							&parts, 0);
+			if (nr_parts > 0)
+				add_mtd_partitions(mymtd, parts, nr_parts);
+			else
+				add_mtd_device(mymtd);
+#else
+			add_mtd_device(mymtd);
+#endif
+		} else {
+			pr_err("Failed to register MTD device for flash\n");
+		}
+	}
+	return 0;
+}
+
+late_initcall(flash_init);
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
new file mode 100644
index 0000000..fc72984
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -0,0 +1,497 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
+
+#include <asm/octeon/octeon.h>
+
+DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
+DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
+DEFINE_SPINLOCK(octeon_irq_msi_lock);
+
+static void octeon_irq_core_ack(unsigned int irq)
+{
+	unsigned int bit = irq - OCTEON_IRQ_SW0;
+	/*
+	 * We don't need to disable IRQs to make these atomic since
+	 * they are already disabled earlier in the low level
+	 * interrupt code.
+	 */
+	clear_c0_status(0x100 << bit);
+	/* The two user interrupts must be cleared manually. */
+	if (bit < 2)
+		clear_c0_cause(0x100 << bit);
+}
+
+static void octeon_irq_core_eoi(unsigned int irq)
+{
+	irq_desc_t *desc = irq_desc + irq;
+	unsigned int bit = irq - OCTEON_IRQ_SW0;
+	/*
+	 * If an IRQ is being processed while we are disabling it the
+	 * handler will attempt to unmask the interrupt after it has
+	 * been disabled.
+	 */
+	if (desc->status & IRQ_DISABLED)
+		return;
+
+	/* There is a race here.  We should fix it.  */
+
+	/*
+	 * We don't need to disable IRQs to make these atomic since
+	 * they are already disabled earlier in the low level
+	 * interrupt code.
+	 */
+	set_c0_status(0x100 << bit);
+}
+
+static void octeon_irq_core_enable(unsigned int irq)
+{
+	unsigned long flags;
+	unsigned int bit = irq - OCTEON_IRQ_SW0;
+
+	/*
+	 * We need to disable interrupts to make sure our updates are
+	 * atomic.
+	 */
+	local_irq_save(flags);
+	set_c0_status(0x100 << bit);
+	local_irq_restore(flags);
+}
+
+static void octeon_irq_core_disable_local(unsigned int irq)
+{
+	unsigned long flags;
+	unsigned int bit = irq - OCTEON_IRQ_SW0;
+	/*
+	 * We need to disable interrupts to make sure our updates are
+	 * atomic.
+	 */
+	local_irq_save(flags);
+	clear_c0_status(0x100 << bit);
+	local_irq_restore(flags);
+}
+
+static void octeon_irq_core_disable(unsigned int irq)
+{
+#ifdef CONFIG_SMP
+	on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
+		    (void *) (long) irq, 1);
+#else
+	octeon_irq_core_disable_local(irq);
+#endif
+}
+
+static struct irq_chip octeon_irq_chip_core = {
+	.name = "Core",
+	.enable = octeon_irq_core_enable,
+	.disable = octeon_irq_core_disable,
+	.ack = octeon_irq_core_ack,
+	.eoi = octeon_irq_core_eoi,
+};
+
+
+static void octeon_irq_ciu0_ack(unsigned int irq)
+{
+	/*
+	 * In order to avoid any locking accessing the CIU, we
+	 * acknowledge CIU interrupts by disabling all of them.  This
+	 * way we can use a per core register and avoid any out of
+	 * core locking requirements.  This has the side affect that
+	 * CIU interrupts can't be processed recursively.
+	 *
+	 * We don't need to disable IRQs to make these atomic since
+	 * they are already disabled earlier in the low level
+	 * interrupt code.
+	 */
+	clear_c0_status(0x100 << 2);
+}
+
+static void octeon_irq_ciu0_eoi(unsigned int irq)
+{
+	/*
+	 * Enable all CIU interrupts again.  We don't need to disable
+	 * IRQs to make these atomic since they are already disabled
+	 * earlier in the low level interrupt code.
+	 */
+	set_c0_status(0x100 << 2);
+}
+
+static void octeon_irq_ciu0_enable(unsigned int irq)
+{
+	int coreid = cvmx_get_core_num();
+	unsigned long flags;
+	uint64_t en0;
+	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
+
+	/*
+	 * A read lock is used here to make sure only one core is ever
+	 * updating the CIU enable bits at a time. During an enable
+	 * the cores don't interfere with each other. During a disable
+	 * the write lock stops any enables that might cause a
+	 * problem.
+	 */
+	read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
+	en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+	en0 |= 1ull << bit;
+	cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+	cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+	read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
+}
+
+static void octeon_irq_ciu0_disable(unsigned int irq)
+{
+	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
+	unsigned long flags;
+	uint64_t en0;
+#ifdef CONFIG_SMP
+	int cpu;
+	write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
+	for_each_online_cpu(cpu) {
+		int coreid = cpu_logical_map(cpu);
+		en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+		en0 &= ~(1ull << bit);
+		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+	}
+	/*
+	 * We need to do a read after the last update to make sure all
+	 * of them are done.
+	 */
+	cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
+	write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
+#else
+	int coreid = cvmx_get_core_num();
+	local_irq_save(flags);
+	en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+	en0 &= ~(1ull << bit);
+	cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+	cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+	local_irq_restore(flags);
+#endif
+}
+
+#ifdef CONFIG_SMP
+static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
+{
+	int cpu;
+	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
+
+	write_lock(&octeon_irq_ciu0_rwlock);
+	for_each_online_cpu(cpu) {
+		int coreid = cpu_logical_map(cpu);
+		uint64_t en0 =
+			cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+		if (cpumask_test_cpu(cpu, dest))
+			en0 |= 1ull << bit;
+		else
+			en0 &= ~(1ull << bit);
+		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+	}
+	/*
+	 * We need to do a read after the last update to make sure all
+	 * of them are done.
+	 */
+	cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
+	write_unlock(&octeon_irq_ciu0_rwlock);
+}
+#endif
+
+static struct irq_chip octeon_irq_chip_ciu0 = {
+	.name = "CIU0",
+	.enable = octeon_irq_ciu0_enable,
+	.disable = octeon_irq_ciu0_disable,
+	.ack = octeon_irq_ciu0_ack,
+	.eoi = octeon_irq_ciu0_eoi,
+#ifdef CONFIG_SMP
+	.set_affinity = octeon_irq_ciu0_set_affinity,
+#endif
+};
+
+
+static void octeon_irq_ciu1_ack(unsigned int irq)
+{
+	/*
+	 * In order to avoid any locking accessing the CIU, we
+	 * acknowledge CIU interrupts by disabling all of them.  This
+	 * way we can use a per core register and avoid any out of
+	 * core locking requirements.  This has the side affect that
+	 * CIU interrupts can't be processed recursively.  We don't
+	 * need to disable IRQs to make these atomic since they are
+	 * already disabled earlier in the low level interrupt code.
+	 */
+	clear_c0_status(0x100 << 3);
+}
+
+static void octeon_irq_ciu1_eoi(unsigned int irq)
+{
+	/*
+	 * Enable all CIU interrupts again.  We don't need to disable
+	 * IRQs to make these atomic since they are already disabled
+	 * earlier in the low level interrupt code.
+	 */
+	set_c0_status(0x100 << 3);
+}
+
+static void octeon_irq_ciu1_enable(unsigned int irq)
+{
+	int coreid = cvmx_get_core_num();
+	unsigned long flags;
+	uint64_t en1;
+	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
+
+	/*
+	 * A read lock is used here to make sure only one core is ever
+	 * updating the CIU enable bits at a time.  During an enable
+	 * the cores don't interfere with each other.  During a disable
+	 * the write lock stops any enables that might cause a
+	 * problem.
+	 */
+	read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
+	en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+	en1 |= 1ull << bit;
+	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+	cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+	read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
+}
+
+static void octeon_irq_ciu1_disable(unsigned int irq)
+{
+	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
+	unsigned long flags;
+	uint64_t en1;
+#ifdef CONFIG_SMP
+	int cpu;
+	write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
+	for_each_online_cpu(cpu) {
+		int coreid = cpu_logical_map(cpu);
+		en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+		en1 &= ~(1ull << bit);
+		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+	}
+	/*
+	 * We need to do a read after the last update to make sure all
+	 * of them are done.
+	 */
+	cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
+	write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
+#else
+	int coreid = cvmx_get_core_num();
+	local_irq_save(flags);
+	en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+	en1 &= ~(1ull << bit);
+	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+	cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+	local_irq_restore(flags);
+#endif
+}
+
+#ifdef CONFIG_SMP
+static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest)
+{
+	int cpu;
+	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
+
+	write_lock(&octeon_irq_ciu1_rwlock);
+	for_each_online_cpu(cpu) {
+		int coreid = cpu_logical_map(cpu);
+		uint64_t en1 =
+			cvmx_read_csr(CVMX_CIU_INTX_EN1
+				(coreid * 2 + 1));
+		if (cpumask_test_cpu(cpu, dest))
+			en1 |= 1ull << bit;
+		else
+			en1 &= ~(1ull << bit);
+		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+	}
+	/*
+	 * We need to do a read after the last update to make sure all
+	 * of them are done.
+	 */
+	cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
+	write_unlock(&octeon_irq_ciu1_rwlock);
+}
+#endif
+
+static struct irq_chip octeon_irq_chip_ciu1 = {
+	.name = "CIU1",
+	.enable = octeon_irq_ciu1_enable,
+	.disable = octeon_irq_ciu1_disable,
+	.ack = octeon_irq_ciu1_ack,
+	.eoi = octeon_irq_ciu1_eoi,
+#ifdef CONFIG_SMP
+	.set_affinity = octeon_irq_ciu1_set_affinity,
+#endif
+};
+
+#ifdef CONFIG_PCI_MSI
+
+static void octeon_irq_msi_ack(unsigned int irq)
+{
+	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+		/* These chips have PCI */
+		cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
+			       1ull << (irq - OCTEON_IRQ_MSI_BIT0));
+	} else {
+		/*
+		 * These chips have PCIe. Thankfully the ACK doesn't
+		 * need any locking.
+		 */
+		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
+			       1ull << (irq - OCTEON_IRQ_MSI_BIT0));
+	}
+}
+
+static void octeon_irq_msi_eoi(unsigned int irq)
+{
+	/* Nothing needed */
+}
+
+static void octeon_irq_msi_enable(unsigned int irq)
+{
+	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+		/*
+		 * Octeon PCI doesn't have the ability to mask/unmask
+		 * MSI interrupts individually.  Instead of
+		 * masking/unmasking them in groups of 16, we simple
+		 * assume MSI devices are well behaved.  MSI
+		 * interrupts are always enable and the ACK is assumed
+		 * to be enough.
+		 */
+	} else {
+		/* These chips have PCIe.  Note that we only support
+		 * the first 64 MSI interrupts.  Unfortunately all the
+		 * MSI enables are in the same register.  We use
+		 * MSI0's lock to control access to them all.
+		 */
+		uint64_t en;
+		unsigned long flags;
+		spin_lock_irqsave(&octeon_irq_msi_lock, flags);
+		en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+		en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
+		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
+		cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+		spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
+	}
+}
+
+static void octeon_irq_msi_disable(unsigned int irq)
+{
+	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+		/* See comment in enable */
+	} else {
+		/*
+		 * These chips have PCIe.  Note that we only support
+		 * the first 64 MSI interrupts.  Unfortunately all the
+		 * MSI enables are in the same register.  We use
+		 * MSI0's lock to control access to them all.
+		 */
+		uint64_t en;
+		unsigned long flags;
+		spin_lock_irqsave(&octeon_irq_msi_lock, flags);
+		en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+		en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
+		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
+		cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+		spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
+	}
+}
+
+static struct irq_chip octeon_irq_chip_msi = {
+	.name = "MSI",
+	.enable = octeon_irq_msi_enable,
+	.disable = octeon_irq_msi_disable,
+	.ack = octeon_irq_msi_ack,
+	.eoi = octeon_irq_msi_eoi,
+};
+#endif
+
+void __init arch_init_irq(void)
+{
+	int irq;
+
+#ifdef CONFIG_SMP
+	/* Set the default affinity to the boot cpu. */
+	cpumask_clear(irq_default_affinity);
+	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+#endif
+
+	if (NR_IRQS < OCTEON_IRQ_LAST)
+		pr_err("octeon_irq_init: NR_IRQS is set too low\n");
+
+	/* 0 - 15 reserved for i8259 master and slave controller. */
+
+	/* 17 - 23 Mips internal */
+	for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
+		set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
+					 handle_percpu_irq);
+	}
+
+	/* 24 - 87 CIU_INT_SUM0 */
+	for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
+		set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0,
+					 handle_percpu_irq);
+	}
+
+	/* 88 - 151 CIU_INT_SUM1 */
+	for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
+		set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1,
+					 handle_percpu_irq);
+	}
+
+#ifdef CONFIG_PCI_MSI
+	/* 152 - 215 PCI/PCIe MSI interrupts */
+	for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
+		set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
+					 handle_percpu_irq);
+	}
+#endif
+	set_c0_status(0x300 << 2);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+	const unsigned long core_id = cvmx_get_core_num();
+	const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
+	const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
+	const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
+	const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
+	unsigned long cop0_cause;
+	unsigned long cop0_status;
+	uint64_t ciu_en;
+	uint64_t ciu_sum;
+
+	while (1) {
+		cop0_cause = read_c0_cause();
+		cop0_status = read_c0_status();
+		cop0_cause &= cop0_status;
+		cop0_cause &= ST0_IM;
+
+		if (unlikely(cop0_cause & STATUSF_IP2)) {
+			ciu_sum = cvmx_read_csr(ciu_sum0_address);
+			ciu_en = cvmx_read_csr(ciu_en0_address);
+			ciu_sum &= ciu_en;
+			if (likely(ciu_sum))
+				do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
+			else
+				spurious_interrupt();
+		} else if (unlikely(cop0_cause & STATUSF_IP3)) {
+			ciu_sum = cvmx_read_csr(ciu_sum1_address);
+			ciu_en = cvmx_read_csr(ciu_en1_address);
+			ciu_sum &= ciu_en;
+			if (likely(ciu_sum))
+				do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
+			else
+				spurious_interrupt();
+		} else if (likely(cop0_cause)) {
+			do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
+		} else {
+			break;
+		}
+	}
+}
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
new file mode 100644
index 0000000..88e0cdd
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -0,0 +1,521 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Unified implementation of memcpy, memmove and the __copy_user backend.
+ *
+ * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
+ * Copyright (C) 2002 Broadcom, Inc.
+ *   memcpy/copy_user author: Mark Vandevoorde
+ *
+ * Mnemonic names for arguments to memcpy/__copy_user
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+#define dst a0
+#define src a1
+#define len a2
+
+/*
+ * Spec
+ *
+ * memcpy copies len bytes from src to dst and sets v0 to dst.
+ * It assumes that
+ *   - src and dst don't overlap
+ *   - src is readable
+ *   - dst is writable
+ * memcpy uses the standard calling convention
+ *
+ * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
+ * the number of uncopied bytes due to an exception caused by a read or write.
+ * __copy_user assumes that src and dst don't overlap, and that the call is
+ * implementing one of the following:
+ *   copy_to_user
+ *     - src is readable  (no exceptions when reading src)
+ *   copy_from_user
+ *     - dst is writable  (no exceptions when writing dst)
+ * __copy_user uses a non-standard calling convention; see
+ * arch/mips/include/asm/uaccess.h
+ *
+ * When an exception happens on a load, the handler must
+ # ensure that all of the destination buffer is overwritten to prevent
+ * leaking information to user mode programs.
+ */
+
+/*
+ * Implementation
+ */
+
+/*
+ * The exception handler for loads requires that:
+ *  1- AT contain the address of the byte just past the end of the source
+ *     of the copy,
+ *  2- src_entry <= src < AT, and
+ *  3- (dst - src) == (dst_entry - src_entry),
+ * The _entry suffix denotes values when __copy_user was called.
+ *
+ * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (2) is met by incrementing src by the number of bytes copied
+ * (3) is met by not doing loads between a pair of increments of dst and src
+ *
+ * The exception handlers for stores adjust len (if necessary) and return.
+ * These handlers do not need to overwrite any data.
+ *
+ * For __rmemcpy and memmove an exception is always a kernel bug, therefore
+ * they're not protected.
+ */
+
+#define EXC(inst_reg,addr,handler)		\
+9:	inst_reg, addr;				\
+	.section __ex_table,"a";		\
+	PTR	9b, handler;			\
+	.previous
+
+/*
+ * Only on the 64-bit kernel we can made use of 64-bit registers.
+ */
+#ifdef CONFIG_64BIT
+#define USE_DOUBLE
+#endif
+
+#ifdef USE_DOUBLE
+
+#define LOAD   ld
+#define LOADL  ldl
+#define LOADR  ldr
+#define STOREL sdl
+#define STORER sdr
+#define STORE  sd
+#define ADD    daddu
+#define SUB    dsubu
+#define SRL    dsrl
+#define SRA    dsra
+#define SLL    dsll
+#define SLLV   dsllv
+#define SRLV   dsrlv
+#define NBYTES 8
+#define LOG_NBYTES 3
+
+/*
+ * As we are sharing code base with the mips32 tree (which use the o32 ABI
+ * register definitions). We need to redefine the register definitions from
+ * the n64 ABI register naming to the o32 ABI register naming.
+ */
+#undef t0
+#undef t1
+#undef t2
+#undef t3
+#define t0	$8
+#define t1	$9
+#define t2	$10
+#define t3	$11
+#define t4	$12
+#define t5	$13
+#define t6	$14
+#define t7	$15
+
+#else
+
+#define LOAD   lw
+#define LOADL  lwl
+#define LOADR  lwr
+#define STOREL swl
+#define STORER swr
+#define STORE  sw
+#define ADD    addu
+#define SUB    subu
+#define SRL    srl
+#define SLL    sll
+#define SRA    sra
+#define SLLV   sllv
+#define SRLV   srlv
+#define NBYTES 4
+#define LOG_NBYTES 2
+
+#endif /* USE_DOUBLE */
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST  LOADL
+#define STFIRST STORER
+#define STREST  STOREL
+#define SHIFT_DISCARD SLLV
+#else
+#define LDFIRST LOADL
+#define LDREST  LOADR
+#define STFIRST STOREL
+#define STREST  STORER
+#define SHIFT_DISCARD SRLV
+#endif
+
+#define FIRST(unit) ((unit)*NBYTES)
+#define REST(unit)  (FIRST(unit)+NBYTES-1)
+#define UNIT(unit)  FIRST(unit)
+
+#define ADDRMASK (NBYTES-1)
+
+	.text
+	.set	noreorder
+	.set	noat
+
+/*
+ * A combined memcpy/__copy_user
+ * __copy_user sets len to 0 for success; else to an upper bound of
+ * the number of uncopied bytes.
+ * memcpy sets v0 to dst.
+ */
+	.align	5
+LEAF(memcpy)					/* a0=dst a1=src a2=len */
+	move	v0, dst				/* return value */
+__memcpy:
+FEXPORT(__copy_user)
+	/*
+	 * Note: dst & src may be unaligned, len may be 0
+	 * Temps
+	 */
+	#
+	# Octeon doesn't care if the destination is unaligned. The hardware
+	# can fix it faster than we can special case the assembly.
+	#
+	pref	0, 0(src)
+	sltu	t0, len, NBYTES		# Check if < 1 word
+	bnez	t0, copy_bytes_checklen
+	 and	t0, src, ADDRMASK	# Check if src unaligned
+	bnez	t0, src_unaligned
+	 sltu	t0, len, 4*NBYTES	# Check if < 4 words
+	bnez	t0, less_than_4units
+	 sltu	t0, len, 8*NBYTES	# Check if < 8 words
+	bnez	t0, less_than_8units
+	 sltu	t0, len, 16*NBYTES	# Check if < 16 words
+	bnez	t0, cleanup_both_aligned
+	 sltu	t0, len, 128+1		# Check if len < 129
+	bnez	t0, 1f			# Skip prefetch if len is too short
+	 sltu	t0, len, 256+1		# Check if len < 257
+	bnez	t0, 1f			# Skip prefetch if len is too short
+	 pref	0, 128(src)		# We must not prefetch invalid addresses
+	#
+	# This is where we loop if there is more than 128 bytes left
+2:	pref	0, 256(src)		# We must not prefetch invalid addresses
+	#
+	# This is where we loop if we can't prefetch anymore
+1:
+EXC(	LOAD	t0, UNIT(0)(src),	l_exc)
+EXC(	LOAD	t1, UNIT(1)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(2)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(3)(src),	l_exc_copy)
+	SUB	len, len, 16*NBYTES
+EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p16u)
+EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p15u)
+EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p14u)
+EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p13u)
+EXC(	LOAD	t0, UNIT(4)(src),	l_exc_copy)
+EXC(	LOAD	t1, UNIT(5)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(6)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(7)(src),	l_exc_copy)
+EXC(	STORE	t0, UNIT(4)(dst),	s_exc_p12u)
+EXC(	STORE	t1, UNIT(5)(dst),	s_exc_p11u)
+EXC(	STORE	t2, UNIT(6)(dst),	s_exc_p10u)
+	ADD	src, src, 16*NBYTES
+EXC(	STORE	t3, UNIT(7)(dst),	s_exc_p9u)
+	ADD	dst, dst, 16*NBYTES
+EXC(	LOAD	t0, UNIT(-8)(src),	l_exc_copy)
+EXC(	LOAD	t1, UNIT(-7)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(-6)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(-5)(src),	l_exc_copy)
+EXC(	STORE	t0, UNIT(-8)(dst),	s_exc_p8u)
+EXC(	STORE	t1, UNIT(-7)(dst),	s_exc_p7u)
+EXC(	STORE	t2, UNIT(-6)(dst),	s_exc_p6u)
+EXC(	STORE	t3, UNIT(-5)(dst),	s_exc_p5u)
+EXC(	LOAD	t0, UNIT(-4)(src),	l_exc_copy)
+EXC(	LOAD	t1, UNIT(-3)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(-2)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(-1)(src),	l_exc_copy)
+EXC(	STORE	t0, UNIT(-4)(dst),	s_exc_p4u)
+EXC(	STORE	t1, UNIT(-3)(dst),	s_exc_p3u)
+EXC(	STORE	t2, UNIT(-2)(dst),	s_exc_p2u)
+EXC(	STORE	t3, UNIT(-1)(dst),	s_exc_p1u)
+	sltu	t0, len, 256+1		# See if we can prefetch more
+	beqz	t0, 2b
+	 sltu	t0, len, 128		# See if we can loop more time
+	beqz	t0, 1b
+	 nop
+	#
+	# Jump here if there are less than 16*NBYTES left.
+	#
+cleanup_both_aligned:
+	beqz	len, done
+	 sltu	t0, len, 8*NBYTES
+	bnez	t0, less_than_8units
+	 nop
+EXC(	LOAD	t0, UNIT(0)(src),	l_exc)
+EXC(	LOAD	t1, UNIT(1)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(2)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(3)(src),	l_exc_copy)
+	SUB	len, len, 8*NBYTES
+EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p8u)
+EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p7u)
+EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p6u)
+EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p5u)
+EXC(	LOAD	t0, UNIT(4)(src),	l_exc_copy)
+EXC(	LOAD	t1, UNIT(5)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(6)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(7)(src),	l_exc_copy)
+EXC(	STORE	t0, UNIT(4)(dst),	s_exc_p4u)
+EXC(	STORE	t1, UNIT(5)(dst),	s_exc_p3u)
+EXC(	STORE	t2, UNIT(6)(dst),	s_exc_p2u)
+EXC(	STORE	t3, UNIT(7)(dst),	s_exc_p1u)
+	ADD	src, src, 8*NBYTES
+	beqz	len, done
+	 ADD	dst, dst, 8*NBYTES
+	#
+	# Jump here if there are less than 8*NBYTES left.
+	#
+less_than_8units:
+	sltu	t0, len, 4*NBYTES
+	bnez	t0, less_than_4units
+	 nop
+EXC(	LOAD	t0, UNIT(0)(src),	l_exc)
+EXC(	LOAD	t1, UNIT(1)(src),	l_exc_copy)
+EXC(	LOAD	t2, UNIT(2)(src),	l_exc_copy)
+EXC(	LOAD	t3, UNIT(3)(src),	l_exc_copy)
+	SUB	len, len, 4*NBYTES
+EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p4u)
+EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p3u)
+EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p2u)
+EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p1u)
+	ADD	src, src, 4*NBYTES
+	beqz	len, done
+	 ADD	dst, dst, 4*NBYTES
+	#
+	# Jump here if there are less than 4*NBYTES left. This means
+	# we may need to copy up to 3 NBYTES words.
+	#
+less_than_4units:
+	sltu	t0, len, 1*NBYTES
+	bnez	t0, copy_bytes_checklen
+	 nop
+	#
+	# 1) Copy NBYTES, then check length again
+	#
+EXC(	LOAD	t0, 0(src),		l_exc)
+	SUB	len, len, NBYTES
+	sltu	t1, len, 8
+EXC(	STORE	t0, 0(dst),		s_exc_p1u)
+	ADD	src, src, NBYTES
+	bnez	t1, copy_bytes_checklen
+	 ADD	dst, dst, NBYTES
+	#
+	# 2) Copy NBYTES, then check length again
+	#
+EXC(	LOAD	t0, 0(src),		l_exc)
+	SUB	len, len, NBYTES
+	sltu	t1, len, 8
+EXC(	STORE	t0, 0(dst),		s_exc_p1u)
+	ADD	src, src, NBYTES
+	bnez	t1, copy_bytes_checklen
+	 ADD	dst, dst, NBYTES
+	#
+	# 3) Copy NBYTES, then check length again
+	#
+EXC(	LOAD	t0, 0(src),		l_exc)
+	SUB	len, len, NBYTES
+	ADD	src, src, NBYTES
+	ADD	dst, dst, NBYTES
+	b copy_bytes_checklen
+EXC(	 STORE	t0, -8(dst),		s_exc_p1u)
+
+src_unaligned:
+#define rem t8
+	SRL	t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
+	beqz	t0, cleanup_src_unaligned
+	 and	rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+EXC(	LDFIRST	t0, FIRST(0)(src),	l_exc)
+EXC(	LDFIRST	t1, FIRST(1)(src),	l_exc_copy)
+	SUB     len, len, 4*NBYTES
+EXC(	LDREST	t0, REST(0)(src),	l_exc_copy)
+EXC(	LDREST	t1, REST(1)(src),	l_exc_copy)
+EXC(	LDFIRST	t2, FIRST(2)(src),	l_exc_copy)
+EXC(	LDFIRST	t3, FIRST(3)(src),	l_exc_copy)
+EXC(	LDREST	t2, REST(2)(src),	l_exc_copy)
+EXC(	LDREST	t3, REST(3)(src),	l_exc_copy)
+	ADD	src, src, 4*NBYTES
+EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p4u)
+EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p3u)
+EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p2u)
+EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p1u)
+	bne	len, rem, 1b
+	 ADD	dst, dst, 4*NBYTES
+
+cleanup_src_unaligned:
+	beqz	len, done
+	 and	rem, len, NBYTES-1  # rem = len % NBYTES
+	beq	rem, len, copy_bytes
+	 nop
+1:
+EXC(	LDFIRST t0, FIRST(0)(src),	l_exc)
+EXC(	LDREST	t0, REST(0)(src),	l_exc_copy)
+	SUB	len, len, NBYTES
+EXC(	STORE	t0, 0(dst),		s_exc_p1u)
+	ADD	src, src, NBYTES
+	bne	len, rem, 1b
+	 ADD	dst, dst, NBYTES
+
+copy_bytes_checklen:
+	beqz	len, done
+	 nop
+copy_bytes:
+	/* 0 < len < NBYTES  */
+#define COPY_BYTE(N)			\
+EXC(	lb	t0, N(src), l_exc);	\
+	SUB	len, len, 1;		\
+	beqz	len, done;		\
+EXC(	 sb	t0, N(dst), s_exc_p1)
+
+	COPY_BYTE(0)
+	COPY_BYTE(1)
+#ifdef USE_DOUBLE
+	COPY_BYTE(2)
+	COPY_BYTE(3)
+	COPY_BYTE(4)
+	COPY_BYTE(5)
+#endif
+EXC(	lb	t0, NBYTES-2(src), l_exc)
+	SUB	len, len, 1
+	jr	ra
+EXC(	 sb	t0, NBYTES-2(dst), s_exc_p1)
+done:
+	jr	ra
+	 nop
+	END(memcpy)
+
+l_exc_copy:
+	/*
+	 * Copy bytes from src until faulting load address (or until a
+	 * lb faults)
+	 *
+	 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+	 * may be more than a byte beyond the last address.
+	 * Hence, the lb below may get an exception.
+	 *
+	 * Assumes src < THREAD_BUADDR($28)
+	 */
+	LOAD	t0, TI_TASK($28)
+	 nop
+	LOAD	t0, THREAD_BUADDR(t0)
+1:
+EXC(	lb	t1, 0(src),	l_exc)
+	ADD	src, src, 1
+	sb	t1, 0(dst)	# can't fault -- we're copy_from_user
+	bne	src, t0, 1b
+	 ADD	dst, dst, 1
+l_exc:
+	LOAD	t0, TI_TASK($28)
+	 nop
+	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
+	 nop
+	SUB	len, AT, t0		# len number of uncopied bytes
+	/*
+	 * Here's where we rely on src and dst being incremented in tandem,
+	 *   See (3) above.
+	 * dst += (fault addr - src) to put dst at first byte to clear
+	 */
+	ADD	dst, t0			# compute start address in a1
+	SUB	dst, src
+	/*
+	 * Clear len bytes starting at dst.  Can't call __bzero because it
+	 * might modify len.  An inefficient loop for these rare times...
+	 */
+	beqz	len, done
+	 SUB	src, len, 1
+1:	sb	zero, 0(dst)
+	ADD	dst, dst, 1
+	bnez	src, 1b
+	 SUB	src, src, 1
+	jr	ra
+	 nop
+
+
+#define SEXC(n)				\
+s_exc_p ## n ## u:			\
+	jr	ra;			\
+	 ADD	len, len, n*NBYTES
+
+SEXC(16)
+SEXC(15)
+SEXC(14)
+SEXC(13)
+SEXC(12)
+SEXC(11)
+SEXC(10)
+SEXC(9)
+SEXC(8)
+SEXC(7)
+SEXC(6)
+SEXC(5)
+SEXC(4)
+SEXC(3)
+SEXC(2)
+SEXC(1)
+
+s_exc_p1:
+	jr	ra
+	 ADD	len, len, 1
+s_exc:
+	jr	ra
+	 nop
+
+	.align	5
+LEAF(memmove)
+	ADD	t0, a0, a2
+	ADD	t1, a1, a2
+	sltu	t0, a1, t0			# dst + len <= src -> memcpy
+	sltu	t1, a0, t1			# dst >= src + len -> memcpy
+	and	t0, t1
+	beqz	t0, __memcpy
+	 move	v0, a0				/* return value */
+	beqz	a2, r_out
+	END(memmove)
+
+	/* fall through to __rmemcpy */
+LEAF(__rmemcpy)					/* a0=dst a1=src a2=len */
+	 sltu	t0, a1, a0
+	beqz	t0, r_end_bytes_up		# src >= dst
+	 nop
+	ADD	a0, a2				# dst = dst + len
+	ADD	a1, a2				# src = src + len
+
+r_end_bytes:
+	lb	t0, -1(a1)
+	SUB	a2, a2, 0x1
+	sb	t0, -1(a0)
+	SUB	a1, a1, 0x1
+	bnez	a2, r_end_bytes
+	 SUB	a0, a0, 0x1
+
+r_out:
+	jr	ra
+	 move	a2, zero
+
+r_end_bytes_up:
+	lb	t0, (a1)
+	SUB	a2, a2, 0x1
+	sb	t0, (a0)
+	ADD	a1, a1, 0x1
+	bnez	a2, r_end_bytes_up
+	 ADD	a0, a0, 0x1
+
+	jr	ra
+	 move	a2, zero
+	END(__rmemcpy)
diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c
new file mode 100644
index 0000000..8240728
--- /dev/null
+++ b/arch/mips/cavium-octeon/serial.c
@@ -0,0 +1,136 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2007 Cavium Networks
+ */
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+#include <linux/tty.h>
+
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+
+#ifdef CONFIG_GDB_CONSOLE
+#define DEBUG_UART 0
+#else
+#define DEBUG_UART 1
+#endif
+
+unsigned int octeon_serial_in(struct uart_port *up, int offset)
+{
+	int rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3)));
+	if (offset == UART_IIR && (rv & 0xf) == 7) {
+		/* Busy interrupt, read the USR (39) and try again. */
+		cvmx_read_csr((uint64_t)(up->membase + (39 << 3)));
+		rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3)));
+	}
+	return rv;
+}
+
+void octeon_serial_out(struct uart_port *up, int offset, int value)
+{
+	/*
+	 * If bits 6 or 7 of the OCTEON UART's LCR are set, it quits
+	 * working.
+	 */
+	if (offset == UART_LCR)
+		value &= 0x9f;
+	cvmx_write_csr((uint64_t)(up->membase + (offset << 3)), (u8)value);
+}
+
+/*
+ * Allocated in .bss, so it is all zeroed.
+ */
+#define OCTEON_MAX_UARTS 3
+static struct plat_serial8250_port octeon_uart8250_data[OCTEON_MAX_UARTS + 1];
+static struct platform_device octeon_uart8250_device = {
+	.name			= "serial8250",
+	.id			= PLAT8250_DEV_PLATFORM,
+	.dev			= {
+		.platform_data	= octeon_uart8250_data,
+	},
+};
+
+static void __init octeon_uart_set_common(struct plat_serial8250_port *p)
+{
+	p->flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
+	p->type = PORT_OCTEON;
+	p->iotype = UPIO_MEM;
+	p->regshift = 3;	/* I/O addresses are every 8 bytes */
+	p->uartclk = mips_hpt_frequency;
+	p->serial_in = octeon_serial_in;
+	p->serial_out = octeon_serial_out;
+}
+
+static int __init octeon_serial_init(void)
+{
+	int enable_uart0;
+	int enable_uart1;
+	int enable_uart2;
+	struct plat_serial8250_port *p;
+
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+	/*
+	 * If we are configured to run as the second of two kernels,
+	 * disable uart0 and enable uart1. Uart0 is owned by the first
+	 * kernel
+	 */
+	enable_uart0 = 0;
+	enable_uart1 = 1;
+#else
+	/*
+	 * We are configured for the first kernel. We'll enable uart0
+	 * if the bootloader told us to use 0, otherwise will enable
+	 * uart 1.
+	 */
+	enable_uart0 = (octeon_get_boot_uart() == 0);
+	enable_uart1 = (octeon_get_boot_uart() == 1);
+#ifdef CONFIG_KGDB
+	enable_uart1 = 1;
+#endif
+#endif
+
+	/* Right now CN52XX is the only chip with a third uart */
+	enable_uart2 = OCTEON_IS_MODEL(OCTEON_CN52XX);
+
+	p = octeon_uart8250_data;
+	if (enable_uart0) {
+		/* Add a ttyS device for hardware uart 0 */
+		octeon_uart_set_common(p);
+		p->membase = (void *) CVMX_MIO_UARTX_RBR(0);
+		p->mapbase = CVMX_MIO_UARTX_RBR(0) & ((1ull << 49) - 1);
+		p->irq = OCTEON_IRQ_UART0;
+		p++;
+	}
+
+	if (enable_uart1) {
+		/* Add a ttyS device for hardware uart 1 */
+		octeon_uart_set_common(p);
+		p->membase = (void *) CVMX_MIO_UARTX_RBR(1);
+		p->mapbase = CVMX_MIO_UARTX_RBR(1) & ((1ull << 49) - 1);
+		p->irq = OCTEON_IRQ_UART1;
+		p++;
+	}
+	if (enable_uart2) {
+		/* Add a ttyS device for hardware uart 2 */
+		octeon_uart_set_common(p);
+		p->membase = (void *) CVMX_MIO_UART2_RBR;
+		p->mapbase = CVMX_MIO_UART2_RBR & ((1ull << 49) - 1);
+		p->irq = OCTEON_IRQ_UART2;
+		p++;
+	}
+
+	BUG_ON(p > &octeon_uart8250_data[OCTEON_MAX_UARTS]);
+
+	return platform_device_register(&octeon_uart8250_device);
+}
+
+device_initcall(octeon_serial_init);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
new file mode 100644
index 0000000..e085fed
--- /dev/null
+++ b/arch/mips/cavium-octeon/setup.c
@@ -0,0 +1,929 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2007 Cavium Networks
+ * Copyright (C) 2008 Wind River Systems
+ */
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/serial.h>
+#include <linux/types.h>
+#include <linux/string.h>	/* for memset */
+#include <linux/serial.h>
+#include <linux/tty.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/string.h>
+
+#include <asm/processor.h>
+#include <asm/reboot.h>
+#include <asm/smp-ops.h>
+#include <asm/system.h>
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/bootinfo.h>
+#include <asm/sections.h>
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+extern void cvmx_interrupt_rsl_decode(void);
+extern int __cvmx_interrupt_ecc_report_single_bit_errors;
+extern void cvmx_interrupt_rsl_enable(void);
+#endif
+
+extern struct plat_smp_ops octeon_smp_ops;
+
+#ifdef CONFIG_PCI
+extern void pci_console_init(const char *arg);
+#endif
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+extern uint64_t octeon_reserve32_memory;
+#endif
+static unsigned long long MAX_MEMORY = 512ull << 20;
+
+struct octeon_boot_descriptor *octeon_boot_desc_ptr;
+
+struct cvmx_bootinfo *octeon_bootinfo;
+EXPORT_SYMBOL(octeon_bootinfo);
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+uint64_t octeon_reserve32_memory;
+EXPORT_SYMBOL(octeon_reserve32_memory);
+#endif
+
+static int octeon_uart;
+
+extern asmlinkage void handle_int(void);
+extern asmlinkage void plat_irq_dispatch(void);
+
+/**
+ * Return non zero if we are currently running in the Octeon simulator
+ *
+ * Returns
+ */
+int octeon_is_simulation(void)
+{
+	return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
+}
+EXPORT_SYMBOL(octeon_is_simulation);
+
+/**
+ * Return true if Octeon is in PCI Host mode. This means
+ * Linux can control the PCI bus.
+ *
+ * Returns Non zero if Octeon in host mode.
+ */
+int octeon_is_pci_host(void)
+{
+#ifdef CONFIG_PCI
+	return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
+#else
+	return 0;
+#endif
+}
+
+/**
+ * Get the clock rate of Octeon
+ *
+ * Returns Clock rate in HZ
+ */
+uint64_t octeon_get_clock_rate(void)
+{
+	if (octeon_is_simulation())
+		octeon_bootinfo->eclock_hz = 6000000;
+	return octeon_bootinfo->eclock_hz;
+}
+EXPORT_SYMBOL(octeon_get_clock_rate);
+
+/**
+ * Write to the LCD display connected to the bootbus. This display
+ * exists on most Cavium evaluation boards. If it doesn't exist, then
+ * this function doesn't do anything.
+ *
+ * @s:      String to write
+ */
+void octeon_write_lcd(const char *s)
+{
+	if (octeon_bootinfo->led_display_base_addr) {
+		void __iomem *lcd_address =
+			ioremap_nocache(octeon_bootinfo->led_display_base_addr,
+					8);
+		int i;
+		for (i = 0; i < 8; i++, s++) {
+			if (*s)
+				iowrite8(*s, lcd_address + i);
+			else
+				iowrite8(' ', lcd_address + i);
+		}
+		iounmap(lcd_address);
+	}
+}
+
+/**
+ * Return the console uart passed by the bootloader
+ *
+ * Returns uart   (0 or 1)
+ */
+int octeon_get_boot_uart(void)
+{
+	int uart;
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+	uart = 1;
+#else
+	uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
+		1 : 0;
+#endif
+	return uart;
+}
+
+/**
+ * Get the coremask Linux was booted on.
+ *
+ * Returns Core mask
+ */
+int octeon_get_boot_coremask(void)
+{
+	return octeon_boot_desc_ptr->core_mask;
+}
+
+/**
+ * Check the hardware BIST results for a CPU
+ */
+void octeon_check_cpu_bist(void)
+{
+	const int coreid = cvmx_get_core_num();
+	unsigned long long mask;
+	unsigned long long bist_val;
+
+	/* Check BIST results for COP0 registers */
+	mask = 0x1f00000000ull;
+	bist_val = read_octeon_c0_icacheerr();
+	if (bist_val & mask)
+		pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
+		       coreid, bist_val);
+
+	bist_val = read_octeon_c0_dcacheerr();
+	if (bist_val & 1)
+		pr_err("Core%d L1 Dcache parity error: "
+		       "CacheErr(dcache) = 0x%llx\n",
+		       coreid, bist_val);
+
+	mask = 0xfc00000000000000ull;
+	bist_val = read_c0_cvmmemctl();
+	if (bist_val & mask)
+		pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
+		       coreid, bist_val);
+
+	write_octeon_c0_dcacheerr(0);
+}
+
+#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
+/**
+ * Called on every core to setup the wired tlb entry needed
+ * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set.
+ *
+ */
+static void octeon_hal_setup_per_cpu_reserved32(void *unused)
+{
+	/*
+	 * The config has selected to wire the reserve32 memory for all
+	 * userspace applications. We need to put a wired TLB entry in for each
+	 * 512MB of reserve32 memory. We only handle double 256MB pages here,
+	 * so reserve32 must be multiple of 512MB.
+	 */
+	uint32_t size = CONFIG_CAVIUM_RESERVE32;
+	uint32_t entrylo0 =
+		0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6);
+	uint32_t entrylo1 = entrylo0 + (256 << 14);
+	uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20));
+	while (size >= 512) {
+#if 0
+		pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n",
+			smp_processor_id(), entryhi);
+#endif
+		add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M);
+		entrylo0 += 512 << 14;
+		entrylo1 += 512 << 14;
+		entryhi += 512 << 20;
+		size -= 512;
+	}
+}
+#endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */
+
+/**
+ * Called to release the named block which was used to made sure
+ * that nobody used the memory for something else during
+ * init. Now we'll free it so userspace apps can use this
+ * memory region with bootmem_alloc.
+ *
+ * This function is called only once from prom_free_prom_memory().
+ */
+void octeon_hal_setup_reserved32(void)
+{
+#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
+	on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1);
+#endif
+}
+
+/**
+ * Reboot Octeon
+ *
+ * @command: Command to pass to the bootloader. Currently ignored.
+ */
+static void octeon_restart(char *command)
+{
+	/* Disable all watchdogs before soft reset. They don't get cleared */
+#ifdef CONFIG_SMP
+	int cpu;
+	for_each_online_cpu(cpu)
+		cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+#else
+	cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+#endif
+
+	mb();
+	while (1)
+		cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
+}
+
+
+/**
+ * Permanently stop a core.
+ *
+ * @arg: Ignored.
+ */
+static void octeon_kill_core(void *arg)
+{
+	mb();
+	if (octeon_is_simulation()) {
+		/* The simulator needs the watchdog to stop for dead cores */
+		cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+		/* A break instruction causes the simulator stop a core */
+		asm volatile ("sync\nbreak");
+	}
+}
+
+
+/**
+ * Halt the system
+ */
+static void octeon_halt(void)
+{
+	smp_call_function(octeon_kill_core, NULL, 0);
+
+	switch (octeon_bootinfo->board_type) {
+	case CVMX_BOARD_TYPE_NAO38:
+		/* Driving a 1 to GPIO 12 shuts off this board */
+		cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
+		cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
+		break;
+	default:
+		octeon_write_lcd("PowerOff");
+		break;
+	}
+
+	octeon_kill_core(NULL);
+}
+
+#if 0
+/**
+ * Platform time init specifics.
+ * Returns
+ */
+void __init plat_time_init(void)
+{
+	/* Nothing special here, but we are required to have one */
+}
+
+#endif
+
+/**
+ * Handle all the error condition interrupts that might occur.
+ *
+ */
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
+{
+	cvmx_interrupt_rsl_decode();
+	return IRQ_HANDLED;
+}
+#endif
+
+/**
+ * Return a string representing the system type
+ *
+ * Returns
+ */
+const char *octeon_board_type_string(void)
+{
+	static char name[80];
+	sprintf(name, "%s (%s)",
+		cvmx_board_type_to_string(octeon_bootinfo->board_type),
+		octeon_model_get_string(read_c0_prid()));
+	return name;
+}
+
+const char *get_system_type(void)
+	__attribute__ ((alias("octeon_board_type_string")));
+
+void octeon_user_io_init(void)
+{
+	union octeon_cvmemctl cvmmemctl;
+	union cvmx_iob_fau_timeout fau_timeout;
+	union cvmx_pow_nw_tim nm_tim;
+	uint64_t cvmctl;
+
+	/* Get the current settings for CP0_CVMMEMCTL_REG */
+	cvmmemctl.u64 = read_c0_cvmmemctl();
+	/* R/W If set, marked write-buffer entries time out the same
+	 * as as other entries; if clear, marked write-buffer entries
+	 * use the maximum timeout. */
+	cvmmemctl.s.dismarkwblongto = 1;
+	/* R/W If set, a merged store does not clear the write-buffer
+	 * entry timeout state. */
+	cvmmemctl.s.dismrgclrwbto = 0;
+	/* R/W Two bits that are the MSBs of the resultant CVMSEG LM
+	 * word location for an IOBDMA. The other 8 bits come from the
+	 * SCRADDR field of the IOBDMA. */
+	cvmmemctl.s.iobdmascrmsb = 0;
+	/* R/W If set, SYNCWS and SYNCS only order marked stores; if
+	 * clear, SYNCWS and SYNCS only order unmarked
+	 * stores. SYNCWSMARKED has no effect when DISSYNCWS is
+	 * set. */
+	cvmmemctl.s.syncwsmarked = 0;
+	/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
+	cvmmemctl.s.dissyncws = 0;
+	/* R/W If set, no stall happens on write buffer full. */
+	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
+		cvmmemctl.s.diswbfst = 1;
+	else
+		cvmmemctl.s.diswbfst = 0;
+	/* R/W If set (and SX set), supervisor-level loads/stores can
+	 * use XKPHYS addresses with <48>==0 */
+	cvmmemctl.s.xkmemenas = 0;
+
+	/* R/W If set (and UX set), user-level loads/stores can use
+	 * XKPHYS addresses with VA<48>==0 */
+	cvmmemctl.s.xkmemenau = 0;
+
+	/* R/W If set (and SX set), supervisor-level loads/stores can
+	 * use XKPHYS addresses with VA<48>==1 */
+	cvmmemctl.s.xkioenas = 0;
+
+	/* R/W If set (and UX set), user-level loads/stores can use
+	 * XKPHYS addresses with VA<48>==1 */
+	cvmmemctl.s.xkioenau = 0;
+
+	/* R/W If set, all stores act as SYNCW (NOMERGE must be set
+	 * when this is set) RW, reset to 0. */
+	cvmmemctl.s.allsyncw = 0;
+
+	/* R/W If set, no stores merge, and all stores reach the
+	 * coherent bus in order. */
+	cvmmemctl.s.nomerge = 0;
+	/* R/W Selects the bit in the counter used for DID time-outs 0
+	 * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
+	 * between 1x and 2x this interval. For example, with
+	 * DIDTTO=3, expiration interval is between 16K and 32K. */
+	cvmmemctl.s.didtto = 0;
+	/* R/W If set, the (mem) CSR clock never turns off. */
+	cvmmemctl.s.csrckalwys = 0;
+	/* R/W If set, mclk never turns off. */
+	cvmmemctl.s.mclkalwys = 0;
+	/* R/W Selects the bit in the counter used for write buffer
+	 * flush time-outs (WBFLT+11) is the bit position in an
+	 * internal counter used to determine expiration. The write
+	 * buffer expires between 1x and 2x this interval. For
+	 * example, with WBFLT = 0, a write buffer expires between 2K
+	 * and 4K cycles after the write buffer entry is allocated. */
+	cvmmemctl.s.wbfltime = 0;
+	/* R/W If set, do not put Istream in the L2 cache. */
+	cvmmemctl.s.istrnol2 = 0;
+	/* R/W The write buffer threshold. */
+	cvmmemctl.s.wbthresh = 10;
+	/* R/W If set, CVMSEG is available for loads/stores in
+	 * kernel/debug mode. */
+#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+	cvmmemctl.s.cvmsegenak = 1;
+#else
+	cvmmemctl.s.cvmsegenak = 0;
+#endif
+	/* R/W If set, CVMSEG is available for loads/stores in
+	 * supervisor mode. */
+	cvmmemctl.s.cvmsegenas = 0;
+	/* R/W If set, CVMSEG is available for loads/stores in user
+	 * mode. */
+	cvmmemctl.s.cvmsegenau = 0;
+	/* R/W Size of local memory in cache blocks, 54 (6912 bytes)
+	 * is max legal value. */
+	cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
+
+
+	if (smp_processor_id() == 0)
+		pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
+			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
+			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
+
+	write_c0_cvmmemctl(cvmmemctl.u64);
+
+	/* Move the performance counter interrupts to IRQ 6 */
+	cvmctl = read_c0_cvmctl();
+	cvmctl &= ~(7 << 7);
+	cvmctl |= 6 << 7;
+	write_c0_cvmctl(cvmctl);
+
+	/* Set a default for the hardware timeouts */
+	fau_timeout.u64 = 0;
+	fau_timeout.s.tout_val = 0xfff;
+	/* Disable tagwait FAU timeout */
+	fau_timeout.s.tout_enb = 0;
+	cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
+
+	nm_tim.u64 = 0;
+	/* 4096 cycles */
+	nm_tim.s.nw_tim = 3;
+	cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
+
+	write_octeon_c0_icacheerr(0);
+	write_c0_derraddr1(0);
+}
+
+/**
+ * Early entry point for arch setup
+ */
+void __init prom_init(void)
+{
+	struct cvmx_sysinfo *sysinfo;
+	const int coreid = cvmx_get_core_num();
+	int i;
+	int argc;
+	struct uart_port octeon_port;
+#ifdef CONFIG_CAVIUM_RESERVE32
+	int64_t addr = -1;
+#endif
+	/*
+	 * The bootloader passes a pointer to the boot descriptor in
+	 * $a3, this is available as fw_arg3.
+	 */
+	octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
+	octeon_bootinfo =
+		cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
+	cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
+
+	/*
+	 * Only enable the LED controller if we're running on a CN38XX, CN58XX,
+	 * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
+	 */
+	if (!octeon_is_simulation() &&
+	    octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
+		cvmx_write_csr(CVMX_LED_EN, 0);
+		cvmx_write_csr(CVMX_LED_PRT, 0);
+		cvmx_write_csr(CVMX_LED_DBG, 0);
+		cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
+		cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
+		cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
+		cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
+		cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
+		cvmx_write_csr(CVMX_LED_EN, 1);
+	}
+#ifdef CONFIG_CAVIUM_RESERVE32
+	/*
+	 * We need to temporarily allocate all memory in the reserve32
+	 * region. This makes sure the kernel doesn't allocate this
+	 * memory when it is getting memory from the
+	 * bootloader. Later, after the memory allocations are
+	 * complete, the reserve32 will be freed.
+	 */
+#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
+	if (CONFIG_CAVIUM_RESERVE32 & 0x1ff)
+		pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. "
+		       "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB "
+		       "is set\n");
+	else
+		addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
+							0, 0, 512 << 20,
+							"CAVIUM_RESERVE32", 0);
+#else
+	/*
+	 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
+	 * is in case we later use hugetlb entries with it.
+	 */
+	addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
+						0, 0, 2 << 20,
+						"CAVIUM_RESERVE32", 0);
+#endif
+	if (addr < 0)
+		pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
+	else
+		octeon_reserve32_memory = addr;
+#endif
+
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
+	if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
+		pr_info("Skipping L2 locking due to reduced L2 cache size\n");
+	} else {
+		uint32_t ebase = read_c0_ebase() & 0x3ffff000;
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
+		/* TLB refill */
+		cvmx_l2c_lock_mem_region(ebase, 0x100);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
+		/* General exception */
+		cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
+		/* Interrupt handler */
+		cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
+		cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
+		cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
+		cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
+#endif
+	}
+#endif
+
+	sysinfo = cvmx_sysinfo_get();
+	memset(sysinfo, 0, sizeof(*sysinfo));
+	sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
+	sysinfo->phy_mem_desc_ptr =
+		cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
+	sysinfo->core_mask = octeon_bootinfo->core_mask;
+	sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
+	sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
+	sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
+	sysinfo->board_type = octeon_bootinfo->board_type;
+	sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
+	sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
+	memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
+	       sizeof(sysinfo->mac_addr_base));
+	sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
+	memcpy(sysinfo->board_serial_number,
+	       octeon_bootinfo->board_serial_number,
+	       sizeof(sysinfo->board_serial_number));
+	sysinfo->compact_flash_common_base_addr =
+		octeon_bootinfo->compact_flash_common_base_addr;
+	sysinfo->compact_flash_attribute_base_addr =
+		octeon_bootinfo->compact_flash_attribute_base_addr;
+	sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
+	sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
+	sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
+
+
+	octeon_check_cpu_bist();
+
+	octeon_uart = octeon_get_boot_uart();
+
+	/*
+	 * Disable All CIU Interrupts. The ones we need will be
+	 * enabled later.  Read the SUM register so we know the write
+	 * completed.
+	 */
+	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
+	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
+	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
+	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
+	cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
+
+#ifdef CONFIG_SMP
+	octeon_write_lcd("LinuxSMP");
+#else
+	octeon_write_lcd("Linux");
+#endif
+
+#ifdef CONFIG_CAVIUM_GDB
+	/*
+	 * When debugging the linux kernel, force the cores to enter
+	 * the debug exception handler to break in.
+	 */
+	if (octeon_get_boot_debug_flag()) {
+		cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
+		cvmx_read_csr(CVMX_CIU_DINT);
+	}
+#endif
+
+	/*
+	 * BIST should always be enabled when doing a soft reset. L2
+	 * Cache locking for instance is not cleared unless BIST is
+	 * enabled.  Unfortunately due to a chip errata G-200 for
+	 * Cn38XX and CN31XX, BIST msut be disabled on these parts.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
+	    OCTEON_IS_MODEL(OCTEON_CN31XX))
+		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
+	else
+		cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
+
+	/* Default to 64MB in the simulator to speed things up */
+	if (octeon_is_simulation())
+		MAX_MEMORY = 64ull << 20;
+
+	arcs_cmdline[0] = 0;
+	argc = octeon_boot_desc_ptr->argc;
+	for (i = 0; i < argc; i++) {
+		const char *arg =
+			cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
+		if ((strncmp(arg, "MEM=", 4) == 0) ||
+		    (strncmp(arg, "mem=", 4) == 0)) {
+			sscanf(arg + 4, "%llu", &MAX_MEMORY);
+			MAX_MEMORY <<= 20;
+			if (MAX_MEMORY == 0)
+				MAX_MEMORY = 32ull << 30;
+		} else if (strcmp(arg, "ecc_verbose") == 0) {
+#ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
+			__cvmx_interrupt_ecc_report_single_bit_errors = 1;
+			pr_notice("Reporting of single bit ECC errors is "
+				  "turned on\n");
+#endif
+		} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
+			   sizeof(arcs_cmdline) - 1) {
+			strcat(arcs_cmdline, " ");
+			strcat(arcs_cmdline, arg);
+		}
+	}
+
+	if (strstr(arcs_cmdline, "console=") == NULL) {
+#ifdef CONFIG_GDB_CONSOLE
+		strcat(arcs_cmdline, " console=gdb");
+#else
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+		strcat(arcs_cmdline, " console=ttyS0,115200");
+#else
+		if (octeon_uart == 1)
+			strcat(arcs_cmdline, " console=ttyS1,115200");
+		else
+			strcat(arcs_cmdline, " console=ttyS0,115200");
+#endif
+#endif
+	}
+
+	if (octeon_is_simulation()) {
+		/*
+		 * The simulator uses a mtdram device pre filled with
+		 * the filesystem. Also specify the calibration delay
+		 * to avoid calculating it every time.
+		 */
+		strcat(arcs_cmdline, " rw root=1f00"
+		       " lpj=60176 slram=root,0x40000000,+1073741824");
+	}
+
+	mips_hpt_frequency = octeon_get_clock_rate();
+
+	octeon_init_cvmcount();
+
+	_machine_restart = octeon_restart;
+	_machine_halt = octeon_halt;
+
+	memset(&octeon_port, 0, sizeof(octeon_port));
+	/*
+	 * For early_serial_setup we don't set the port type or
+	 * UPF_FIXED_TYPE.
+	 */
+	octeon_port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ;
+	octeon_port.iotype = UPIO_MEM;
+	/* I/O addresses are every 8 bytes */
+	octeon_port.regshift = 3;
+	/* Clock rate of the chip */
+	octeon_port.uartclk = mips_hpt_frequency;
+	octeon_port.fifosize = 64;
+	octeon_port.mapbase = 0x0001180000000800ull + (1024 * octeon_uart);
+	octeon_port.membase = cvmx_phys_to_ptr(octeon_port.mapbase);
+	octeon_port.serial_in = octeon_serial_in;
+	octeon_port.serial_out = octeon_serial_out;
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+	octeon_port.line = 0;
+#else
+	octeon_port.line = octeon_uart;
+#endif
+	octeon_port.irq = 42 + octeon_uart;
+	early_serial_setup(&octeon_port);
+
+	octeon_user_io_init();
+	register_smp_ops(&octeon_smp_ops);
+}
+
+void __init plat_mem_setup(void)
+{
+	uint64_t mem_alloc_size;
+	uint64_t total;
+	int64_t memory;
+
+	total = 0;
+
+	/* First add the init memory we will be returning.  */
+	memory = __pa_symbol(&__init_begin) & PAGE_MASK;
+	mem_alloc_size = (__pa_symbol(&__init_end) & PAGE_MASK) - memory;
+	if (mem_alloc_size > 0) {
+		add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
+		total += mem_alloc_size;
+	}
+
+	/*
+	 * The Mips memory init uses the first memory location for
+	 * some memory vectors. When SPARSEMEM is in use, it doesn't
+	 * verify that the size is big enough for the final
+	 * vectors. Making the smallest chuck 4MB seems to be enough
+	 * to consistantly work.
+	 */
+	mem_alloc_size = 4 << 20;
+	if (mem_alloc_size > MAX_MEMORY)
+		mem_alloc_size = MAX_MEMORY;
+
+	/*
+	 * When allocating memory, we want incrementing addresses from
+	 * bootmem_alloc so the code in add_memory_region can merge
+	 * regions next to each other.
+	 */
+	cvmx_bootmem_lock();
+	while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
+		&& (total < MAX_MEMORY)) {
+#if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR)
+		memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
+						__pa_symbol(&__init_end), -1,
+						0x100000,
+						CVMX_BOOTMEM_FLAG_NO_LOCKING);
+#elif defined(CONFIG_HIGHMEM)
+		memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31,
+						0x100000,
+						CVMX_BOOTMEM_FLAG_NO_LOCKING);
+#else
+		memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20,
+						0x100000,
+						CVMX_BOOTMEM_FLAG_NO_LOCKING);
+#endif
+		if (memory >= 0) {
+			/*
+			 * This function automatically merges address
+			 * regions next to each other if they are
+			 * received in incrementing order.
+			 */
+			add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
+			total += mem_alloc_size;
+		} else {
+			break;
+		}
+	}
+	cvmx_bootmem_unlock();
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+	/*
+	 * Now that we've allocated the kernel memory it is safe to
+	 * free the reserved region. We free it here so that builtin
+	 * drivers can use the memory.
+	 */
+	if (octeon_reserve32_memory)
+		cvmx_bootmem_free_named("CAVIUM_RESERVE32");
+#endif /* CONFIG_CAVIUM_RESERVE32 */
+
+	if (total == 0)
+		panic("Unable to allocate memory from "
+		      "cvmx_bootmem_phy_alloc\n");
+}
+
+
+int prom_putchar(char c)
+{
+	uint64_t lsrval;
+
+	/* Spin until there is room */
+	do {
+		lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
+	} while ((lsrval & 0x20) == 0);
+
+	/* Write the byte */
+	cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c);
+	return 1;
+}
+
+void prom_free_prom_memory(void)
+{
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+	cvmx_interrupt_rsl_enable();
+
+	/* Add an interrupt handler for general failures. */
+	if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED,
+			"RML/RSL", octeon_rlm_interrupt)) {
+		panic("Unable to request_irq(OCTEON_IRQ_RML)\n");
+	}
+#endif
+
+	/* This call is here so that it is performed after any TLB
+	   initializations. It needs to be after these in case the
+	   CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */
+	octeon_hal_setup_reserved32();
+}
+
+static struct octeon_cf_data octeon_cf_data;
+
+static int __init octeon_cf_device_init(void)
+{
+	union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
+	unsigned long base_ptr, region_base, region_size;
+	struct platform_device *pd;
+	struct resource cf_resources[3];
+	unsigned int num_resources;
+	int i;
+	int ret = 0;
+
+	/* Setup octeon-cf platform device if present. */
+	base_ptr = 0;
+	if (octeon_bootinfo->major_version == 1
+		&& octeon_bootinfo->minor_version >= 1) {
+		if (octeon_bootinfo->compact_flash_common_base_addr)
+			base_ptr =
+				octeon_bootinfo->compact_flash_common_base_addr;
+	} else {
+		base_ptr = 0x1d000800;
+	}
+
+	if (!base_ptr)
+		return ret;
+
+	/* Find CS0 region. */
+	for (i = 0; i < 8; i++) {
+		mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i));
+		region_base = mio_boot_reg_cfg.s.base << 16;
+		region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+		if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
+		    && base_ptr < region_base + region_size)
+			break;
+	}
+	if (i >= 7) {
+		/* i and i + 1 are CS0 and CS1, both must be less than 8. */
+		goto out;
+	}
+	octeon_cf_data.base_region = i;
+	octeon_cf_data.is16bit = mio_boot_reg_cfg.s.width;
+	octeon_cf_data.base_region_bias = base_ptr - region_base;
+	memset(cf_resources, 0, sizeof(cf_resources));
+	num_resources = 0;
+	cf_resources[num_resources].flags	= IORESOURCE_MEM;
+	cf_resources[num_resources].start	= region_base;
+	cf_resources[num_resources].end	= region_base + region_size - 1;
+	num_resources++;
+
+
+	if (!(base_ptr & 0xfffful)) {
+		/*
+		 * Boot loader signals availability of DMA (true_ide
+		 * mode) by setting low order bits of base_ptr to
+		 * zero.
+		 */
+
+		/* Asume that CS1 immediately follows. */
+		mio_boot_reg_cfg.u64 =
+			cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1));
+		region_base = mio_boot_reg_cfg.s.base << 16;
+		region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+		if (!mio_boot_reg_cfg.s.en)
+			goto out;
+
+		cf_resources[num_resources].flags	= IORESOURCE_MEM;
+		cf_resources[num_resources].start	= region_base;
+		cf_resources[num_resources].end	= region_base + region_size - 1;
+		num_resources++;
+
+		octeon_cf_data.dma_engine = 0;
+		cf_resources[num_resources].flags	= IORESOURCE_IRQ;
+		cf_resources[num_resources].start	= OCTEON_IRQ_BOOTDMA;
+		cf_resources[num_resources].end	= OCTEON_IRQ_BOOTDMA;
+		num_resources++;
+	} else {
+		octeon_cf_data.dma_engine = -1;
+	}
+
+	pd = platform_device_alloc("pata_octeon_cf", -1);
+	if (!pd) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	pd->dev.platform_data = &octeon_cf_data;
+
+	ret = platform_device_add_resources(pd, cf_resources, num_resources);
+	if (ret)
+		goto fail;
+
+	ret = platform_device_add(pd);
+	if (ret)
+		goto fail;
+
+	return ret;
+fail:
+	platform_device_put(pd);
+out:
+	return ret;
+}
+device_initcall(octeon_cf_device_init);
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
new file mode 100644
index 0000000..24e0ad6
--- /dev/null
+++ b/arch/mips/cavium-octeon/smp.c
@@ -0,0 +1,211 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+
+volatile unsigned long octeon_processor_boot = 0xff;
+volatile unsigned long octeon_processor_sp;
+volatile unsigned long octeon_processor_gp;
+
+static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
+{
+	const int coreid = cvmx_get_core_num();
+	uint64_t action;
+
+	/* Load the mailbox register to figure out what we're supposed to do */
+	action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid));
+
+	/* Clear the mailbox to clear the interrupt */
+	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
+
+	if (action & SMP_CALL_FUNCTION)
+		smp_call_function_interrupt();
+
+	/* Check if we've been told to flush the icache */
+	if (action & SMP_ICACHE_FLUSH)
+		asm volatile ("synci 0($0)\n");
+	return IRQ_HANDLED;
+}
+
+/**
+ * Cause the function described by call_data to be executed on the passed
+ * cpu.  When the function has finished, increment the finished field of
+ * call_data.
+ */
+void octeon_send_ipi_single(int cpu, unsigned int action)
+{
+	int coreid = cpu_logical_map(cpu);
+	/*
+	pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
+	       coreid, action);
+	*/
+	cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
+}
+
+static inline void octeon_send_ipi_mask(cpumask_t mask, unsigned int action)
+{
+	unsigned int i;
+
+	for_each_cpu_mask(i, mask)
+		octeon_send_ipi_single(i, action);
+}
+
+/**
+ * Detect available CPUs, populate phys_cpu_present_map
+ */
+static void octeon_smp_setup(void)
+{
+	const int coreid = cvmx_get_core_num();
+	int cpus;
+	int id;
+
+	int core_mask = octeon_get_boot_coremask();
+
+	cpus_clear(cpu_possible_map);
+	__cpu_number_map[coreid] = 0;
+	__cpu_logical_map[0] = coreid;
+	cpu_set(0, cpu_possible_map);
+
+	cpus = 1;
+	for (id = 0; id < 16; id++) {
+		if ((id != coreid) && (core_mask & (1 << id))) {
+			cpu_set(cpus, cpu_possible_map);
+			__cpu_number_map[id] = cpus;
+			__cpu_logical_map[cpus] = id;
+			cpus++;
+		}
+	}
+}
+
+/**
+ * Firmware CPU startup hook
+ *
+ */
+static void octeon_boot_secondary(int cpu, struct task_struct *idle)
+{
+	int count;
+
+	pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
+		cpu_logical_map(cpu));
+
+	octeon_processor_sp = __KSTK_TOS(idle);
+	octeon_processor_gp = (unsigned long)(task_thread_info(idle));
+	octeon_processor_boot = cpu_logical_map(cpu);
+	mb();
+
+	count = 10000;
+	while (octeon_processor_sp && count) {
+		/* Waiting for processor to get the SP and GP */
+		udelay(1);
+		count--;
+	}
+	if (count == 0)
+		pr_err("Secondary boot timeout\n");
+}
+
+/**
+ * After we've done initial boot, this function is called to allow the
+ * board code to clean up state, if needed
+ */
+static void octeon_init_secondary(void)
+{
+	const int coreid = cvmx_get_core_num();
+	union cvmx_ciu_intx_sum0 interrupt_enable;
+
+	octeon_check_cpu_bist();
+	octeon_init_cvmcount();
+	/*
+	pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid);
+	*/
+	/* Enable Mailbox interrupts to this core. These are the only
+	   interrupts allowed on line 3 */
+	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff);
+	interrupt_enable.u64 = 0;
+	interrupt_enable.s.mbox = 0x3;
+	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64);
+	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
+	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
+	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
+	/* Enable core interrupt processing for 2,3 and 7 */
+	set_c0_status(0x8c01);
+}
+
+/**
+ * Callout to firmware before smp_init
+ *
+ */
+void octeon_prepare_cpus(unsigned int max_cpus)
+{
+	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff);
+	if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_SHARED,
+			"mailbox0", mailbox_interrupt)) {
+		panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
+	}
+	if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_SHARED,
+			"mailbox1", mailbox_interrupt)) {
+		panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n");
+	}
+}
+
+/**
+ * Last chance for the board code to finish SMP initialization before
+ * the CPU is "online".
+ */
+static void octeon_smp_finish(void)
+{
+#ifdef CONFIG_CAVIUM_GDB
+	unsigned long tmp;
+	/* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
+	   to be not masked by this core so we know the signal is received by
+	   someone */
+	asm volatile ("dmfc0 %0, $22\n"
+		      "ori   %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
+#endif
+
+	octeon_user_io_init();
+
+	/* to generate the first CPU timer interrupt */
+	write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+}
+
+/**
+ * Hook for after all CPUs are online
+ */
+static void octeon_cpus_done(void)
+{
+#ifdef CONFIG_CAVIUM_GDB
+	unsigned long tmp;
+	/* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
+	   to be not masked by this core so we know the signal is received by
+	   someone */
+	asm volatile ("dmfc0 %0, $22\n"
+		      "ori   %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
+#endif
+}
+
+struct plat_smp_ops octeon_smp_ops = {
+	.send_ipi_single	= octeon_send_ipi_single,
+	.send_ipi_mask		= octeon_send_ipi_mask,
+	.init_secondary		= octeon_init_secondary,
+	.smp_finish		= octeon_smp_finish,
+	.cpus_done		= octeon_cpus_done,
+	.boot_secondary		= octeon_boot_secondary,
+	.smp_setup		= octeon_smp_setup,
+	.prepare_cpus		= octeon_prepare_cpus,
+};
diff --git a/arch/mips/configs/cavium-octeon_defconfig b/arch/mips/configs/cavium-octeon_defconfig
new file mode 100644
index 0000000..7afaa28
--- /dev/null
+++ b/arch/mips/configs/cavium-octeon_defconfig
@@ -0,0 +1,943 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.28-rc6
+# Wed Dec  3 11:00:58 2008
+#
+CONFIG_MIPS=y
+
+#
+# Machine selection
+#
+# CONFIG_MACH_ALCHEMY is not set
+# CONFIG_BASLER_EXCITE is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_MIPS_COBALT is not set
+# CONFIG_MACH_DECSTATION is not set
+# CONFIG_MACH_JAZZ is not set
+# CONFIG_LASAT is not set
+# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_MIPS_MALTA is not set
+# CONFIG_MIPS_SIM is not set
+# CONFIG_MACH_EMMA is not set
+# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
+# CONFIG_PNX8550_JBS is not set
+# CONFIG_PNX8550_STB810 is not set
+# CONFIG_PMC_MSP is not set
+# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_SGI_IP22 is not set
+# CONFIG_SGI_IP27 is not set
+# CONFIG_SGI_IP28 is not set
+# CONFIG_SGI_IP32 is not set
+# CONFIG_SIBYTE_CRHINE is not set
+# CONFIG_SIBYTE_CARMEL is not set
+# CONFIG_SIBYTE_CRHONE is not set
+# CONFIG_SIBYTE_RHONE is not set
+# CONFIG_SIBYTE_SWARM is not set
+# CONFIG_SIBYTE_LITTLESUR is not set
+# CONFIG_SIBYTE_SENTOSA is not set
+# CONFIG_SIBYTE_BIGSUR is not set
+# CONFIG_SNI_RM is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
+# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD=y
+CONFIG_CAVIUM_OCTEON_SPECIFIC_OPTIONS=y
+# CONFIG_CAVIUM_OCTEON_2ND_KERNEL is not set
+CONFIG_CAVIUM_OCTEON_HW_FIX_UNALIGNED=y
+CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2
+CONFIG_CAVIUM_OCTEON_LOCK_L2=y
+CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB=y
+CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION=y
+CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT=y
+CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT=y
+CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_SUPPORTS_OPROFILE=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+CONFIG_CEVT_R4K=y
+CONFIG_CSRC_R4K=y
+CONFIG_DMA_COHERENT=y
+# CONFIG_EARLY_PRINTK is not set
+CONFIG_SYS_HAS_EARLY_PRINTK=y
+# CONFIG_HOTPLUG_CPU is not set
+# CONFIG_NO_IOPORT is not set
+CONFIG_CPU_BIG_ENDIAN=y
+# CONFIG_CPU_LITTLE_ENDIAN is not set
+CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+CONFIG_IRQ_CPU=y
+CONFIG_IRQ_CPU_OCTEON=y
+CONFIG_SWAP_IO_SPACE=y
+CONFIG_MIPS_L1_CACHE_SHIFT=7
+
+#
+# CPU selection
+#
+# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_MIPS32_R1 is not set
+# CONFIG_CPU_MIPS32_R2 is not set
+# CONFIG_CPU_MIPS64_R1 is not set
+# CONFIG_CPU_MIPS64_R2 is not set
+# CONFIG_CPU_R3000 is not set
+# CONFIG_CPU_TX39XX is not set
+# CONFIG_CPU_VR41XX is not set
+# CONFIG_CPU_R4300 is not set
+# CONFIG_CPU_R4X00 is not set
+# CONFIG_CPU_TX49XX is not set
+# CONFIG_CPU_R5000 is not set
+# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
+# CONFIG_CPU_R6000 is not set
+# CONFIG_CPU_NEVADA is not set
+# CONFIG_CPU_R8000 is not set
+# CONFIG_CPU_R10000 is not set
+# CONFIG_CPU_RM7000 is not set
+# CONFIG_CPU_RM9000 is not set
+# CONFIG_CPU_SB1 is not set
+CONFIG_CPU_CAVIUM_OCTEON=y
+CONFIG_WEAK_ORDERING=y
+CONFIG_WEAK_REORDERING_BEYOND_LLSC=y
+CONFIG_CPU_MIPSR2=y
+CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y
+CONFIG_CPU_SUPPORTS_64BIT_KERNEL=y
+
+#
+# Kernel type
+#
+# CONFIG_32BIT is not set
+CONFIG_64BIT=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_CPU_HAS_PREFETCH=y
+CONFIG_MIPS_MT_DISABLED=y
+# CONFIG_MIPS_MT_SMP is not set
+# CONFIG_MIPS_MT_SMTC is not set
+CONFIG_64BIT_PHYS_ADDR=y
+CONFIG_CPU_HAS_SYNC=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_CPU_SUPPORTS_HIGHMEM=y
+CONFIG_SYS_SUPPORTS_HIGHMEM=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_FLATMEM_MANUAL is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_RESOURCES_64BIT=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_SMP=y
+CONFIG_SYS_SUPPORTS_SMP=y
+CONFIG_NR_CPUS_DEFAULT_16=y
+CONFIG_NR_CPUS=16
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_48 is not set
+# CONFIG_HZ_100 is not set
+# CONFIG_HZ_128 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_256 is not set
+# CONFIG_HZ_1000 is not set
+# CONFIG_HZ_1024 is not set
+CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
+CONFIG_HZ=250
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_KEXEC is not set
+CONFIG_SECCOMP=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_PCSPKR_PLATFORM is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLOCK_COMPAT=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PROBE_INITRD_HEADER is not set
+# CONFIG_FREEZER is not set
+
+#
+# Bus options (PCI, PCMCIA, EISA, ISA, TC)
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+CONFIG_MMU=y
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_MIPS32_COMPAT=y
+CONFIG_COMPAT=y
+CONFIG_SYSVIPC_COMPAT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_BINFMT_ELF32=y
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETLABEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
+CONFIG_FIB_RULES=y
+# CONFIG_WIRELESS is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x8000000
+CONFIG_MTD_PHYSMAP_LEN=0x0
+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+CONFIG_STAGING_EXCLUDE_BUILD=y
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+
+#
+# Tracers
+#
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_CMDLINE=""
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_RUNTIME_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_NETWORK=y
+# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 023866c..7897f05 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,4 +1,3 @@
 include include/asm-generic/Kbuild.asm
 
 header-y += cachectl.h sgidefs.h sysmips.h
-header-y += swab.h
diff --git a/arch/mips/include/asm/byteorder.h b/arch/mips/include/asm/byteorder.h
index 607b718..9579051 100644
--- a/arch/mips/include/asm/byteorder.h
+++ b/arch/mips/include/asm/byteorder.h
@@ -8,8 +8,6 @@
 #ifndef _ASM_BYTEORDER_H
 #define _ASM_BYTEORDER_H
 
-#include <asm/swab.h>
-
 #if defined(__MIPSEB__)
 #include <linux/byteorder/big_endian.h>
 #elif defined(__MIPSEL__)
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 12d12df..a0d14f8 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -38,6 +38,9 @@
 #ifndef cpu_has_tx39_cache
 #define cpu_has_tx39_cache	(cpu_data[0].options & MIPS_CPU_TX39_CACHE)
 #endif
+#ifndef cpu_has_octeon_cache
+#define cpu_has_octeon_cache	0
+#endif
 #ifndef cpu_has_fpu
 #define cpu_has_fpu		(current_cpu_data.options & MIPS_CPU_FPU)
 #define raw_cpu_has_fpu		(raw_current_cpu_data.options & MIPS_CPU_FPU)
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 229a786..c018727 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -33,6 +33,7 @@
 #define PRID_COMP_TOSHIBA	0x070000
 #define PRID_COMP_LSI		0x080000
 #define PRID_COMP_LEXRA		0x0b0000
+#define PRID_COMP_CAVIUM	0x0d0000
 
 
 /*
@@ -114,6 +115,18 @@
 #define PRID_IMP_BCM3302	0x9000
 
 /*
+ * These are the PRID's for when 23:16 == PRID_COMP_CAVIUM
+ */
+
+#define PRID_IMP_CAVIUM_CN38XX 0x0000
+#define PRID_IMP_CAVIUM_CN31XX 0x0100
+#define PRID_IMP_CAVIUM_CN30XX 0x0200
+#define PRID_IMP_CAVIUM_CN58XX 0x0300
+#define PRID_IMP_CAVIUM_CN56XX 0x0400
+#define PRID_IMP_CAVIUM_CN50XX 0x0600
+#define PRID_IMP_CAVIUM_CN52XX 0x0700
+
+/*
  * Definitions for 7:0 on legacy processors
  */
 
@@ -203,6 +216,7 @@
 	 * MIPS64 class processors
 	 */
 	CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
+	CPU_CAVIUM_OCTEON,
 
 	CPU_LAST
 };
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 2de638f..43baed1 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -42,7 +42,7 @@
 /*
  * TLB hazards
  */
-#if defined(CONFIG_CPU_MIPSR2)
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
 
 /*
  * MIPSR2 defines ehb for hazard avoidance
@@ -138,7 +138,7 @@
 		__instruction_hazard();					\
 } while (0)
 
-#elif defined(CONFIG_CPU_R10000)
+#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_CAVIUM_OCTEON)
 
 /*
  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 501a40b..436878e 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -295,6 +295,12 @@
 #undef __IS_KSEG1
 }
 
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+#define war_octeon_io_reorder_wmb()  		wmb()
+#else
+#define war_octeon_io_reorder_wmb()		do { } while (0)
+#endif
+
 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq)			\
 									\
 static inline void pfx##write##bwlq(type val,				\
@@ -303,6 +309,8 @@
 	volatile type *__mem;						\
 	type __val;							\
 									\
+	war_octeon_io_reorder_wmb();					\
+									\
 	__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem));	\
 									\
 	__val = pfx##ioswab##bwlq(__mem, val);				\
@@ -370,6 +378,8 @@
 	volatile type *__addr;						\
 	type __val;							\
 									\
+	war_octeon_io_reorder_wmb();					\
+									\
 	__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
 									\
 	__val = pfx##ioswab##bwlq(__addr, val);				\
@@ -504,8 +514,12 @@
 #endif
 
 
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+#define mmiowb() wmb()
+#else
 /* Depends on MIPS II instruction set */
 #define mmiowb() asm volatile ("sync" ::: "memory")
+#endif
 
 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
 {
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index 0d302ba..62f91f5 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -91,14 +91,57 @@
 	return *(volatile u32 *)reg;
 }
 
+/* Early Au1000 have a write-only SYS_CPUPLL register. */
+static inline int au1xxx_cpu_has_pll_wo(void)
+{
+	switch (read_c0_prid()) {
+	case 0x00030100:	/* Au1000 DA */
+	case 0x00030201:	/* Au1000 HA */
+	case 0x00030202:	/* Au1000 HB */
+		return 1;
+	}
+	return 0;
+}
+
+/* does CPU need CONFIG[OD] set to fix tons of errata? */
+static inline int au1xxx_cpu_needs_config_od(void)
+{
+	/*
+	 * c0_config.od (bit 19) was write only (and read as 0) on the
+	 * early revisions of Alchemy SOCs.  It disables the bus trans-
+	 * action overlapping and needs to be set to fix various errata.
+	 */
+	switch (read_c0_prid()) {
+	case 0x00030100: /* Au1000 DA */
+	case 0x00030201: /* Au1000 HA */
+	case 0x00030202: /* Au1000 HB */
+	case 0x01030200: /* Au1500 AB */
+	/*
+	 * Au1100/Au1200 errata actually keep silence about this bit,
+	 * so we set it just in case for those revisions that require
+	 * it to be set according to the (now gone) cpu_table.
+	 */
+	case 0x02030200: /* Au1100 AB */
+	case 0x02030201: /* Au1100 BA */
+	case 0x02030202: /* Au1100 BC */
+	case 0x04030201: /* Au1200 AC */
+		return 1;
+	}
+	return 0;
+}
 
 /* arch/mips/au1000/common/clocks.c */
 extern void set_au1x00_speed(unsigned int new_freq);
 extern unsigned int get_au1x00_speed(void);
 extern void set_au1x00_uart_baud_base(unsigned long new_baud_base);
 extern unsigned long get_au1x00_uart_baud_base(void);
-extern void set_au1x00_lcd_clock(void);
-extern unsigned int get_au1x00_lcd_clock(void);
+extern unsigned long au1xxx_calc_clock(void);
+
+/* PM: arch/mips/alchemy/common/sleeper.S, power.c, irq.c */
+void au1xxx_save_and_sleep(void);
+void au_sleep(void);
+void save_au1xxx_intctl(void);
+void restore_au1xxx_intctl(void);
 
 /*
  * Every board describes its IRQ mapping with this table.
@@ -109,10 +152,11 @@
 	int	im_request;
 };
 
-/*
- * init_IRQ looks for a table with this name.
- */
-extern struct au1xxx_irqmap au1xxx_irq_map[];
+/* core calls this function to let boards initialize other IRQ sources */
+void board_init_irq(void);
+
+/* boards call this to register additional (GPIO) interrupts */
+void au1xxx_setup_irqmap(struct au1xxx_irqmap *map, int count);
 
 #endif /* !defined (_LANGUAGE_ASSEMBLY) */
 
@@ -505,15 +549,6 @@
 
 #define IC1_TESTBIT		0xB1800080
 
-/* Interrupt Configuration Modes */
-#define INTC_INT_DISABLED		0x0
-#define INTC_INT_RISE_EDGE		0x1
-#define INTC_INT_FALL_EDGE		0x2
-#define INTC_INT_RISE_AND_FALL_EDGE	0x3
-#define INTC_INT_HIGH_LEVEL		0x5
-#define INTC_INT_LOW_LEVEL		0x6
-#define INTC_INT_HIGH_AND_LOW_LEVEL	0x7
-
 /* Interrupt Numbers */
 /* Au1000 */
 #ifdef CONFIG_SOC_AU1000
@@ -1525,6 +1560,10 @@
 #define SYS_SLPPWR		0xB1900078
 #define SYS_SLEEP		0xB190007C
 
+#define SYS_WAKEMSK_D2		(1 << 9)
+#define SYS_WAKEMSK_M2		(1 << 8)
+#define SYS_WAKEMSK_GPIO(x)	(1 << (x))
+
 /* Clock Controller */
 #define SYS_FREQCTRL0		0xB1900020
 #  define SYS_FC_FRDIV2_BIT	22
@@ -1749,24 +1788,4 @@
 
 #endif
 
-/*
- * Processor information based on PRID.
- * Copied from PowerPC.
- */
-#ifndef _LANGUAGE_ASSEMBLY
-struct cpu_spec {
-	/* CPU is matched via (PRID & prid_mask) == prid_value */
-	unsigned int	prid_mask;
-	unsigned int	prid_value;
-
-	char		*cpu_name;
-	unsigned char	cpu_od;		/* Set Config[OD] */
-	unsigned char	cpu_bclk;	/* Enable BCLK switching */
-	unsigned char	cpu_pll_wo;	/* sys_cpupll reg. write-only */
-};
-
-extern struct cpu_spec	cpu_specs[];
-extern struct cpu_spec	*cur_cpu_spec[];
-#endif
-
 #endif
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index 44a67bf..06f68f4 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -357,6 +357,11 @@
 u32 au1xxx_ddma_add_device(dbdev_tab_t *dev);
 extern void au1xxx_ddma_del_device(u32 devid);
 void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp);
+#ifdef CONFIG_PM
+void au1xxx_dbdma_suspend(void);
+void au1xxx_dbdma_resume(void);
+#endif
+
 
 /*
  * Some compatibilty macros -- needed to make changes to API
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
new file mode 100644
index 0000000..04ce6e6
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -0,0 +1,78 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 Cavium Networks
+ */
+#ifndef __ASM_MACH_CAVIUM_OCTEON_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_CAVIUM_OCTEON_CPU_FEATURE_OVERRIDES_H
+
+#include <linux/types.h>
+#include <asm/mipsregs.h>
+
+/*
+ * Cavium Octeons are MIPS64v2 processors
+ */
+#define cpu_dcache_line_size()	128
+#define cpu_icache_line_size()	128
+
+
+#define cpu_has_4kex		1
+#define cpu_has_3k_cache	0
+#define cpu_has_4k_cache	0
+#define cpu_has_tx39_cache	0
+#define cpu_has_fpu		0
+#define cpu_has_counter		1
+#define cpu_has_watch		1
+#define cpu_has_divec		1
+#define cpu_has_vce		0
+#define cpu_has_cache_cdex_p	0
+#define cpu_has_cache_cdex_s	0
+#define cpu_has_prefetch	1
+
+/*
+ * We should disable LL/SC on non SMP systems as it is faster to
+ * disable interrupts for atomic access than a LL/SC.  Unfortunatly we
+ * cannot as this breaks asm/futex.h
+ */
+#define cpu_has_llsc		1
+#define cpu_has_vtag_icache	1
+#define cpu_has_dc_aliases	0
+#define cpu_has_ic_fills_f_dc	0
+#define cpu_has_64bits		1
+#define cpu_has_octeon_cache	1
+#define cpu_has_saa		octeon_has_saa()
+#define cpu_has_mips32r1	0
+#define cpu_has_mips32r2	0
+#define cpu_has_mips64r1	0
+#define cpu_has_mips64r2	1
+#define cpu_has_dsp		0
+#define cpu_has_mipsmt		0
+#define cpu_has_userlocal	0
+#define cpu_has_vint		0
+#define cpu_has_veic		0
+#define ARCH_HAS_READ_CURRENT_TIMER 1
+#define ARCH_HAS_IRQ_PER_CPU	1
+#define ARCH_HAS_SPINLOCK_PREFETCH 1
+#define spin_lock_prefetch(x) prefetch(x)
+#define PREFETCH_STRIDE 128
+
+static inline int read_current_timer(unsigned long *result)
+{
+	asm volatile ("rdhwr %0,$31\n"
+#ifndef CONFIG_64BIT
+		      "\tsll %0, 0"
+#endif
+		      : "=r" (*result));
+	return 0;
+}
+
+static inline int octeon_has_saa(void)
+{
+	int id;
+	asm volatile ("mfc0 %0, $15,0" : "=r" (id));
+	return id >= 0x000d0300;
+}
+
+#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
new file mode 100644
index 0000000..f30fce9
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -0,0 +1,64 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
+ *
+ *
+ * Similar to mach-generic/dma-coherence.h except
+ * plat_device_is_coherent hard coded to return 1.
+ *
+ */
+#ifndef __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
+#define __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
+
+struct device;
+
+dma_addr_t octeon_map_dma_mem(struct device *, void *, size_t);
+void octeon_unmap_dma_mem(struct device *, dma_addr_t);
+
+static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
+	size_t size)
+{
+	return octeon_map_dma_mem(dev, addr, size);
+}
+
+static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
+	struct page *page)
+{
+	return octeon_map_dma_mem(dev, page_address(page), PAGE_SIZE);
+}
+
+static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
+{
+	return dma_addr;
+}
+
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
+{
+	octeon_unmap_dma_mem(dev, dma_addr);
+}
+
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+	return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+	mb();
+}
+
+static inline int plat_device_is_coherent(struct device *dev)
+{
+	return 1;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+					 dma_addr_t dma_addr)
+{
+	return dma_addr == -1;
+}
+
+#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
new file mode 100644
index 0000000..d32220f
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -0,0 +1,244 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#ifndef __OCTEON_IRQ_H__
+#define __OCTEON_IRQ_H__
+
+#define NR_IRQS OCTEON_IRQ_LAST
+#define MIPS_CPU_IRQ_BASE OCTEON_IRQ_SW0
+
+/* 0 - 7 represent the i8259 master */
+#define OCTEON_IRQ_I8259M0	0
+#define OCTEON_IRQ_I8259M1	1
+#define OCTEON_IRQ_I8259M2	2
+#define OCTEON_IRQ_I8259M3	3
+#define OCTEON_IRQ_I8259M4	4
+#define OCTEON_IRQ_I8259M5	5
+#define OCTEON_IRQ_I8259M6	6
+#define OCTEON_IRQ_I8259M7	7
+/* 8 - 15 represent the i8259 slave */
+#define OCTEON_IRQ_I8259S0	8
+#define OCTEON_IRQ_I8259S1	9
+#define OCTEON_IRQ_I8259S2	10
+#define OCTEON_IRQ_I8259S3	11
+#define OCTEON_IRQ_I8259S4	12
+#define OCTEON_IRQ_I8259S5	13
+#define OCTEON_IRQ_I8259S6	14
+#define OCTEON_IRQ_I8259S7	15
+/* 16 - 23 represent the 8 MIPS standard interrupt sources */
+#define OCTEON_IRQ_SW0		16
+#define OCTEON_IRQ_SW1		17
+#define OCTEON_IRQ_CIU0		18
+#define OCTEON_IRQ_CIU1		19
+#define OCTEON_IRQ_CIU4		20
+#define OCTEON_IRQ_5		21
+#define OCTEON_IRQ_PERF		22
+#define OCTEON_IRQ_TIMER	23
+/* 24 - 87 represent the sources in CIU_INTX_EN0 */
+#define OCTEON_IRQ_WORKQ0	24
+#define OCTEON_IRQ_WORKQ1	25
+#define OCTEON_IRQ_WORKQ2	26
+#define OCTEON_IRQ_WORKQ3	27
+#define OCTEON_IRQ_WORKQ4	28
+#define OCTEON_IRQ_WORKQ5	29
+#define OCTEON_IRQ_WORKQ6	30
+#define OCTEON_IRQ_WORKQ7	31
+#define OCTEON_IRQ_WORKQ8	32
+#define OCTEON_IRQ_WORKQ9	33
+#define OCTEON_IRQ_WORKQ10	34
+#define OCTEON_IRQ_WORKQ11	35
+#define OCTEON_IRQ_WORKQ12	36
+#define OCTEON_IRQ_WORKQ13	37
+#define OCTEON_IRQ_WORKQ14	38
+#define OCTEON_IRQ_WORKQ15	39
+#define OCTEON_IRQ_GPIO0	40
+#define OCTEON_IRQ_GPIO1	41
+#define OCTEON_IRQ_GPIO2	42
+#define OCTEON_IRQ_GPIO3	43
+#define OCTEON_IRQ_GPIO4	44
+#define OCTEON_IRQ_GPIO5	45
+#define OCTEON_IRQ_GPIO6	46
+#define OCTEON_IRQ_GPIO7	47
+#define OCTEON_IRQ_GPIO8	48
+#define OCTEON_IRQ_GPIO9	49
+#define OCTEON_IRQ_GPIO10	50
+#define OCTEON_IRQ_GPIO11	51
+#define OCTEON_IRQ_GPIO12	52
+#define OCTEON_IRQ_GPIO13	53
+#define OCTEON_IRQ_GPIO14	54
+#define OCTEON_IRQ_GPIO15	55
+#define OCTEON_IRQ_MBOX0	56
+#define OCTEON_IRQ_MBOX1	57
+#define OCTEON_IRQ_UART0	58
+#define OCTEON_IRQ_UART1	59
+#define OCTEON_IRQ_PCI_INT0	60
+#define OCTEON_IRQ_PCI_INT1	61
+#define OCTEON_IRQ_PCI_INT2	62
+#define OCTEON_IRQ_PCI_INT3	63
+#define OCTEON_IRQ_PCI_MSI0	64
+#define OCTEON_IRQ_PCI_MSI1	65
+#define OCTEON_IRQ_PCI_MSI2	66
+#define OCTEON_IRQ_PCI_MSI3	67
+#define OCTEON_IRQ_RESERVED68	68	/* Summary of CIU_INT_SUM1 */
+#define OCTEON_IRQ_TWSI		69
+#define OCTEON_IRQ_RML		70
+#define OCTEON_IRQ_TRACE	71
+#define OCTEON_IRQ_GMX_DRP0	72
+#define OCTEON_IRQ_GMX_DRP1	73
+#define OCTEON_IRQ_IPD_DRP	74
+#define OCTEON_IRQ_KEY_ZERO	75
+#define OCTEON_IRQ_TIMER0	76
+#define OCTEON_IRQ_TIMER1	77
+#define OCTEON_IRQ_TIMER2	78
+#define OCTEON_IRQ_TIMER3	79
+#define OCTEON_IRQ_USB0		80
+#define OCTEON_IRQ_PCM		81
+#define OCTEON_IRQ_MPI		82
+#define OCTEON_IRQ_TWSI2	83
+#define OCTEON_IRQ_POWIQ	84
+#define OCTEON_IRQ_IPDPPTHR	85
+#define OCTEON_IRQ_MII0		86
+#define OCTEON_IRQ_BOOTDMA	87
+/* 88 - 151 represent the sources in CIU_INTX_EN1 */
+#define OCTEON_IRQ_WDOG0	88
+#define OCTEON_IRQ_WDOG1	89
+#define OCTEON_IRQ_WDOG2	90
+#define OCTEON_IRQ_WDOG3	91
+#define OCTEON_IRQ_WDOG4	92
+#define OCTEON_IRQ_WDOG5	93
+#define OCTEON_IRQ_WDOG6	94
+#define OCTEON_IRQ_WDOG7	95
+#define OCTEON_IRQ_WDOG8	96
+#define OCTEON_IRQ_WDOG9	97
+#define OCTEON_IRQ_WDOG10	98
+#define OCTEON_IRQ_WDOG11	99
+#define OCTEON_IRQ_WDOG12	100
+#define OCTEON_IRQ_WDOG13	101
+#define OCTEON_IRQ_WDOG14	102
+#define OCTEON_IRQ_WDOG15	103
+#define OCTEON_IRQ_UART2	104
+#define OCTEON_IRQ_USB1		105
+#define OCTEON_IRQ_MII1		106
+#define OCTEON_IRQ_RESERVED107	107
+#define OCTEON_IRQ_RESERVED108	108
+#define OCTEON_IRQ_RESERVED109	109
+#define OCTEON_IRQ_RESERVED110	110
+#define OCTEON_IRQ_RESERVED111	111
+#define OCTEON_IRQ_RESERVED112	112
+#define OCTEON_IRQ_RESERVED113	113
+#define OCTEON_IRQ_RESERVED114	114
+#define OCTEON_IRQ_RESERVED115	115
+#define OCTEON_IRQ_RESERVED116	116
+#define OCTEON_IRQ_RESERVED117	117
+#define OCTEON_IRQ_RESERVED118	118
+#define OCTEON_IRQ_RESERVED119	119
+#define OCTEON_IRQ_RESERVED120	120
+#define OCTEON_IRQ_RESERVED121	121
+#define OCTEON_IRQ_RESERVED122	122
+#define OCTEON_IRQ_RESERVED123	123
+#define OCTEON_IRQ_RESERVED124	124
+#define OCTEON_IRQ_RESERVED125	125
+#define OCTEON_IRQ_RESERVED126	126
+#define OCTEON_IRQ_RESERVED127	127
+#define OCTEON_IRQ_RESERVED128	128
+#define OCTEON_IRQ_RESERVED129	129
+#define OCTEON_IRQ_RESERVED130	130
+#define OCTEON_IRQ_RESERVED131	131
+#define OCTEON_IRQ_RESERVED132	132
+#define OCTEON_IRQ_RESERVED133	133
+#define OCTEON_IRQ_RESERVED134	134
+#define OCTEON_IRQ_RESERVED135	135
+#define OCTEON_IRQ_RESERVED136	136
+#define OCTEON_IRQ_RESERVED137	137
+#define OCTEON_IRQ_RESERVED138	138
+#define OCTEON_IRQ_RESERVED139	139
+#define OCTEON_IRQ_RESERVED140	140
+#define OCTEON_IRQ_RESERVED141	141
+#define OCTEON_IRQ_RESERVED142	142
+#define OCTEON_IRQ_RESERVED143	143
+#define OCTEON_IRQ_RESERVED144	144
+#define OCTEON_IRQ_RESERVED145	145
+#define OCTEON_IRQ_RESERVED146	146
+#define OCTEON_IRQ_RESERVED147	147
+#define OCTEON_IRQ_RESERVED148	148
+#define OCTEON_IRQ_RESERVED149	149
+#define OCTEON_IRQ_RESERVED150	150
+#define OCTEON_IRQ_RESERVED151	151
+
+#ifdef CONFIG_PCI_MSI
+/* 152 - 215 represent the MSI interrupts 0-63 */
+#define OCTEON_IRQ_MSI_BIT0	152
+#define OCTEON_IRQ_MSI_BIT1	153
+#define OCTEON_IRQ_MSI_BIT2	154
+#define OCTEON_IRQ_MSI_BIT3	155
+#define OCTEON_IRQ_MSI_BIT4	156
+#define OCTEON_IRQ_MSI_BIT5	157
+#define OCTEON_IRQ_MSI_BIT6	158
+#define OCTEON_IRQ_MSI_BIT7	159
+#define OCTEON_IRQ_MSI_BIT8	160
+#define OCTEON_IRQ_MSI_BIT9	161
+#define OCTEON_IRQ_MSI_BIT10	162
+#define OCTEON_IRQ_MSI_BIT11	163
+#define OCTEON_IRQ_MSI_BIT12	164
+#define OCTEON_IRQ_MSI_BIT13	165
+#define OCTEON_IRQ_MSI_BIT14	166
+#define OCTEON_IRQ_MSI_BIT15	167
+#define OCTEON_IRQ_MSI_BIT16	168
+#define OCTEON_IRQ_MSI_BIT17	169
+#define OCTEON_IRQ_MSI_BIT18	170
+#define OCTEON_IRQ_MSI_BIT19	171
+#define OCTEON_IRQ_MSI_BIT20	172
+#define OCTEON_IRQ_MSI_BIT21	173
+#define OCTEON_IRQ_MSI_BIT22	174
+#define OCTEON_IRQ_MSI_BIT23	175
+#define OCTEON_IRQ_MSI_BIT24	176
+#define OCTEON_IRQ_MSI_BIT25	177
+#define OCTEON_IRQ_MSI_BIT26	178
+#define OCTEON_IRQ_MSI_BIT27	179
+#define OCTEON_IRQ_MSI_BIT28	180
+#define OCTEON_IRQ_MSI_BIT29	181
+#define OCTEON_IRQ_MSI_BIT30	182
+#define OCTEON_IRQ_MSI_BIT31	183
+#define OCTEON_IRQ_MSI_BIT32	184
+#define OCTEON_IRQ_MSI_BIT33	185
+#define OCTEON_IRQ_MSI_BIT34	186
+#define OCTEON_IRQ_MSI_BIT35	187
+#define OCTEON_IRQ_MSI_BIT36	188
+#define OCTEON_IRQ_MSI_BIT37	189
+#define OCTEON_IRQ_MSI_BIT38	190
+#define OCTEON_IRQ_MSI_BIT39	191
+#define OCTEON_IRQ_MSI_BIT40	192
+#define OCTEON_IRQ_MSI_BIT41	193
+#define OCTEON_IRQ_MSI_BIT42	194
+#define OCTEON_IRQ_MSI_BIT43	195
+#define OCTEON_IRQ_MSI_BIT44	196
+#define OCTEON_IRQ_MSI_BIT45	197
+#define OCTEON_IRQ_MSI_BIT46	198
+#define OCTEON_IRQ_MSI_BIT47	199
+#define OCTEON_IRQ_MSI_BIT48	200
+#define OCTEON_IRQ_MSI_BIT49	201
+#define OCTEON_IRQ_MSI_BIT50	202
+#define OCTEON_IRQ_MSI_BIT51	203
+#define OCTEON_IRQ_MSI_BIT52	204
+#define OCTEON_IRQ_MSI_BIT53	205
+#define OCTEON_IRQ_MSI_BIT54	206
+#define OCTEON_IRQ_MSI_BIT55	207
+#define OCTEON_IRQ_MSI_BIT56	208
+#define OCTEON_IRQ_MSI_BIT57	209
+#define OCTEON_IRQ_MSI_BIT58	210
+#define OCTEON_IRQ_MSI_BIT59	211
+#define OCTEON_IRQ_MSI_BIT60	212
+#define OCTEON_IRQ_MSI_BIT61	213
+#define OCTEON_IRQ_MSI_BIT62	214
+#define OCTEON_IRQ_MSI_BIT63	215
+
+#define OCTEON_IRQ_LAST         216
+#else
+#define OCTEON_IRQ_LAST         152
+#endif
+
+#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
new file mode 100644
index 0000000..0b2b5eb
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -0,0 +1,131 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2008 Cavium Networks, Inc
+ */
+#ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
+#define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
+
+
+#define CP0_CYCLE_COUNTER $9, 6
+#define CP0_CVMCTL_REG $9, 7
+#define CP0_CVMMEMCTL_REG $11,7
+#define CP0_PRID_REG $15, 0
+#define CP0_PRID_OCTEON_PASS1 0x000d0000
+#define CP0_PRID_OCTEON_CN30XX 0x000d0200
+
+.macro  kernel_entry_setup
+	# Registers set by bootloader:
+	# (only 32 bits set by bootloader, all addresses are physical
+	# addresses, and need to have the appropriate memory region set
+	# by the kernel
+	# a0 = argc
+	# a1 = argv (kseg0 compat addr)
+	# a2 = 1 if init core, zero otherwise
+	# a3 = address of boot descriptor block
+	.set push
+	.set arch=octeon
+	# Read the cavium mem control register
+	dmfc0   v0, CP0_CVMMEMCTL_REG
+	# Clear the lower 6 bits, the CVMSEG size
+	dins    v0, $0, 0, 6
+	ori     v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+	dmtc0   v0, CP0_CVMMEMCTL_REG	# Write the cavium mem control register
+	dmfc0   v0, CP0_CVMCTL_REG	# Read the cavium control register
+#ifdef CONFIG_CAVIUM_OCTEON_HW_FIX_UNALIGNED
+	# Disable unaligned load/store support but leave HW fixup enabled
+	or  v0, v0, 0x5001
+	xor v0, v0, 0x1001
+#else
+	# Disable unaligned load/store and HW fixup support
+	or  v0, v0, 0x5001
+	xor v0, v0, 0x5001
+#endif
+	# Read the processor ID register
+	mfc0 v1, CP0_PRID_REG
+	# Disable instruction prefetching (Octeon Pass1 errata)
+	or  v0, v0, 0x2000
+	# Skip reenable of prefetching for Octeon Pass1
+	beq v1, CP0_PRID_OCTEON_PASS1, skip
+	nop
+	# Reenable instruction prefetching, not on Pass1
+	xor v0, v0, 0x2000
+	# Strip off pass number off of processor id
+	srl v1, 8
+	sll v1, 8
+	# CN30XX needs some extra stuff turned off for better performance
+	bne v1, CP0_PRID_OCTEON_CN30XX, skip
+	nop
+	# CN30XX Use random Icache replacement
+	or  v0, v0, 0x400
+	# CN30XX Disable instruction prefetching
+	or  v0, v0, 0x2000
+skip:
+	# Write the cavium control register
+	dmtc0   v0, CP0_CVMCTL_REG
+	sync
+	# Flush dcache after config change
+	cache   9, 0($0)
+	# Get my core id
+	rdhwr   v0, $0
+	# Jump the master to kernel_entry
+	bne     a2, zero, octeon_main_processor
+	nop
+
+#ifdef CONFIG_SMP
+
+	#
+	# All cores other than the master need to wait here for SMP bootstrap
+	# to begin
+	#
+
+	# This is the variable where the next core to boot os stored
+	PTR_LA  t0, octeon_processor_boot
+octeon_spin_wait_boot:
+	# Get the core id of the next to be booted
+	LONG_L  t1, (t0)
+	# Keep looping if it isn't me
+	bne t1, v0, octeon_spin_wait_boot
+	nop
+	# Get my GP from the global variable
+	PTR_LA  t0, octeon_processor_gp
+	LONG_L  gp, (t0)
+	# Get my SP from the global variable
+	PTR_LA  t0, octeon_processor_sp
+	LONG_L  sp, (t0)
+	# Set the SP global variable to zero so the master knows we've started
+	LONG_S  zero, (t0)
+#ifdef __OCTEON__
+	syncw
+	syncw
+#else
+	sync
+#endif
+	# Jump to the normal Linux SMP entry point
+	j   smp_bootstrap
+	nop
+#else /* CONFIG_SMP */
+
+	#
+	# Someone tried to boot SMP with a non SMP kernel. All extra cores
+	# will halt here.
+	#
+octeon_wait_forever:
+	wait
+	b   octeon_wait_forever
+	nop
+
+#endif /* CONFIG_SMP */
+octeon_main_processor:
+	.set pop
+.endm
+
+/*
+ * Do SMP slave processor setup necessary before we can savely execute C code.
+ */
+	.macro  smp_slave_setup
+	.endm
+
+#endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h
new file mode 100644
index 0000000..c4712d7
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/war.h
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2008 Cavium Networks <support@caviumnetworks.com>
+ */
+#ifndef __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H
+#define __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR	0
+#define R4600_V1_HIT_CACHEOP_WAR	0
+#define R4600_V2_HIT_CACHEOP_WAR	0
+#define R5432_CP0_INTERRUPT_WAR		0
+#define BCM1250_M3_WAR			0
+#define SIBYTE_1956_WAR			0
+#define MIPS4K_ICACHE_REFILL_WAR	0
+#define MIPS_CACHE_SYNC_WAR		0
+#define TX49XX_ICACHE_INDEX_INV_WAR	0
+#define RM9000_CDEX_SMP_WAR		0
+#define ICACHE_REFILLS_WORKAROUND_WAR	0
+#define R10000_LLSC_WAR			0
+#define MIPS34K_MISSED_ITLB_WAR		0
+
+#endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 76e04e7..36c611b 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -28,10 +28,34 @@
 	return dma_addr;
 }
 
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
 {
 }
 
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+	/*
+	 * we fall back to GFP_DMA when the mask isn't all 1s,
+	 * so we can't guarantee allocations that must be
+	 * within a tighter range than GFP_DMA..
+	 */
+	if (mask < DMA_BIT_MASK(24))
+		return 0;
+
+	return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+	return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+					 dma_addr_t dma_addr)
+{
+	return 0;
+}
+
 static inline int plat_device_is_coherent(struct device *dev)
 {
 #ifdef CONFIG_DMA_COHERENT
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index ed7e622..4c21bfc 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -38,10 +38,34 @@
 	return dma_addr & ~(0xffUL << 56);
 }
 
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
 {
 }
 
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+	/*
+	 * we fall back to GFP_DMA when the mask isn't all 1s,
+	 * so we can't guarantee allocations that must be
+	 * within a tighter range than GFP_DMA..
+	 */
+	if (mask < DMA_BIT_MASK(24))
+		return 0;
+
+	return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+	return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+					 dma_addr_t dma_addr)
+{
+	return 0;
+}
+
 static inline int plat_device_is_coherent(struct device *dev)
 {
 	return 1;		/* IP27 non-cohernet mode is unsupported */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index a5511eb..7ae40f4 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -60,10 +60,34 @@
 	return addr;
 }
 
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
 {
 }
 
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+	/*
+	 * we fall back to GFP_DMA when the mask isn't all 1s,
+	 * so we can't guarantee allocations that must be
+	 * within a tighter range than GFP_DMA..
+	 */
+	if (mask < DMA_BIT_MASK(24))
+		return 0;
+
+	return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+	return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+					 dma_addr_t dma_addr)
+{
+	return 0;
+}
+
 static inline int plat_device_is_coherent(struct device *dev)
 {
 	return 0;		/* IP32 is non-cohernet */
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
index d66979a..1c7cd27 100644
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h
@@ -27,11 +27,35 @@
 	return vdma_log2phys(dma_addr);
 }
 
-static void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
 {
 	vdma_free(dma_addr);
 }
 
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+	/*
+	 * we fall back to GFP_DMA when the mask isn't all 1s,
+	 * so we can't guarantee allocations that must be
+	 * within a tighter range than GFP_DMA..
+	 */
+	if (mask < DMA_BIT_MASK(24))
+		return 0;
+
+	return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+	return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+					 dma_addr_t dma_addr)
+{
+	return 0;
+}
+
 static inline int plat_device_is_coherent(struct device *dev)
 {
 	return 0;
diff --git a/arch/mips/include/asm/mach-lemote/dma-coherence.h b/arch/mips/include/asm/mach-lemote/dma-coherence.h
index 7e91477..38fad7d 100644
--- a/arch/mips/include/asm/mach-lemote/dma-coherence.h
+++ b/arch/mips/include/asm/mach-lemote/dma-coherence.h
@@ -30,10 +30,34 @@
 	return dma_addr & 0x7fffffff;
 }
 
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
 {
 }
 
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+	/*
+	 * we fall back to GFP_DMA when the mask isn't all 1s,
+	 * so we can't guarantee allocations that must be
+	 * within a tighter range than GFP_DMA..
+	 */
+	if (mask < DMA_BIT_MASK(24))
+		return 0;
+
+	return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+	return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+					 dma_addr_t dma_addr)
+{
+	return 0;
+}
+
 static inline int plat_device_is_coherent(struct device *dev)
 {
 	return 0;
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 9316324..0417516 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1000,6 +1000,26 @@
 #define read_c0_ebase()		__read_32bit_c0_register($15, 1)
 #define write_c0_ebase(val)	__write_32bit_c0_register($15, 1, val)
 
+
+/* Cavium OCTEON (cnMIPS) */
+#define read_c0_cvmcount()	__read_ulong_c0_register($9, 6)
+#define write_c0_cvmcount(val)	__write_ulong_c0_register($9, 6, val)
+
+#define read_c0_cvmctl()	__read_64bit_c0_register($9, 7)
+#define write_c0_cvmctl(val)	__write_64bit_c0_register($9, 7, val)
+
+#define read_c0_cvmmemctl()	__read_64bit_c0_register($11, 7)
+#define write_c0_cvmmemctl(val)	__write_64bit_c0_register($11, 7, val)
+/*
+ * The cacheerr registers are not standardized.  On OCTEON, they are
+ * 64 bits wide.
+ */
+#define read_octeon_c0_icacheerr()	__read_64bit_c0_register($27, 0)
+#define write_octeon_c0_icacheerr(val)	__write_64bit_c0_register($27, 0, val)
+
+#define read_octeon_c0_dcacheerr()	__read_64bit_c0_register($27, 1)
+#define write_octeon_c0_dcacheerr(val)	__write_64bit_c0_register($27, 1, val)
+
 /*
  * Macros to access the floating point coprocessor control registers
  */
@@ -1008,6 +1028,8 @@
 	__asm__ __volatile__(                                   \
 	".set\tpush\n\t"					\
 	".set\treorder\n\t"					\
+	/* gas fails to assemble cfc1 for some archs (octeon).*/ \
+	".set\tmips1\n\t"					\
         "cfc1\t%0,"STR(source)"\n\t"                            \
 	".set\tpop"						\
         : "=r" (__res));                                        \
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index e2e09b2..d94085a 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -116,6 +116,8 @@
 #define MODULE_PROC_FAMILY "SB1 "
 #elif defined CONFIG_CPU_LOONGSON2
 #define MODULE_PROC_FAMILY "LOONGSON2 "
+#elif defined CONFIG_CPU_CAVIUM_OCTEON
+#define MODULE_PROC_FAMILY "OCTEON "
 #else
 #error MODULE_PROC_FAMILY undefined for your processor configuration
 #endif
diff --git a/arch/mips/include/asm/octeon/cvmx-asm.h b/arch/mips/include/asm/octeon/cvmx-asm.h
new file mode 100644
index 0000000..b21d3fc
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-asm.h
@@ -0,0 +1,128 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * This is file defines ASM primitives for the executive.
+ */
+#ifndef __CVMX_ASM_H__
+#define __CVMX_ASM_H__
+
+#include "octeon-model.h"
+
+/* other useful stuff */
+#define CVMX_SYNC asm volatile ("sync" : : : "memory")
+/* String version of SYNCW macro for using in inline asm constructs */
+#define CVMX_SYNCW_STR "syncw\nsyncw\n"
+#ifdef __OCTEON__
+
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIO asm volatile ("nop")
+
+#define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : : "memory")
+
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIOALL asm volatile ("nop")
+
+/*
+ * We actually use two syncw instructions in a row when we need a write
+ * memory barrier. This is because the CN3XXX series of Octeons have
+ * errata Core-401. This can cause a single syncw to not enforce
+ * ordering under very rare conditions. Even if it is rare, better safe
+ * than sorry.
+ */
+#define CVMX_SYNCW asm volatile ("syncw\n\tsyncw" : : : "memory")
+
+/*
+ * Define new sync instructions to be normal SYNC instructions for
+ * operating systems that use threads.
+ */
+#define CVMX_SYNCWS CVMX_SYNCW
+#define CVMX_SYNCS  CVMX_SYNC
+#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+#else
+/*
+ * Not using a Cavium compiler, always use the slower sync so the
+ * assembler stays happy.
+ */
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIO asm volatile ("nop")
+
+#define CVMX_SYNCIOBDMA asm volatile ("sync" : : : "memory")
+
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIOALL asm volatile ("nop")
+
+#define CVMX_SYNCW asm volatile ("sync" : : : "memory")
+#define CVMX_SYNCWS CVMX_SYNCW
+#define CVMX_SYNCS  CVMX_SYNC
+#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+#endif
+
+/*
+ * CVMX_PREPARE_FOR_STORE makes each byte of the block unpredictable
+ * (actually old value or zero) until that byte is stored to (by this or
+ * another processor. Note that the value of each byte is not only
+ * unpredictable, but may also change again - up until the point when one
+ * of the cores stores to the byte.
+ */
+#define CVMX_PREPARE_FOR_STORE(address, offset) \
+	asm volatile ("pref 30, " CVMX_TMP_STR(offset) "(%[rbase])" : : \
+	[rbase] "d" (address))
+/*
+ * This is a command headed to the L2 controller to tell it to clear
+ * its dirty bit for a block. Basically, SW is telling HW that the
+ * current version of the block will not be used.
+ */
+#define CVMX_DONT_WRITE_BACK(address, offset) \
+	asm volatile ("pref 29, " CVMX_TMP_STR(offset) "(%[rbase])" : : \
+	[rbase] "d" (address))
+
+/* flush stores, invalidate entire icache */
+#define CVMX_ICACHE_INVALIDATE \
+	{ CVMX_SYNC; asm volatile ("synci 0($0)" : : ); }
+
+/* flush stores, invalidate entire icache */
+#define CVMX_ICACHE_INVALIDATE2 \
+	{ CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); }
+
+/* complete prefetches, invalidate entire dcache */
+#define CVMX_DCACHE_INVALIDATE \
+	{ CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); }
+
+
+#define CVMX_POP(result, input) \
+	asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_DPOP(result, input) \
+	asm ("dpop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+
+/* some new cop0-like stuff */
+#define CVMX_RDHWR(result, regstr) \
+	asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
+#define CVMX_RDHWRNV(result, regstr) \
+	asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
+#endif /* __CVMX_ASM_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
new file mode 100644
index 0000000..692989a
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -0,0 +1,262 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Header file containing the ABI with the bootloader.
+ */
+
+#ifndef __CVMX_BOOTINFO_H__
+#define __CVMX_BOOTINFO_H__
+
+/*
+ * Current major and minor versions of the CVMX bootinfo block that is
+ * passed from the bootloader to the application.  This is versioned
+ * so that applications can properly handle multiple bootloader
+ * versions.
+ */
+#define CVMX_BOOTINFO_MAJ_VER 1
+#define CVMX_BOOTINFO_MIN_VER 2
+
+#if (CVMX_BOOTINFO_MAJ_VER == 1)
+#define CVMX_BOOTINFO_OCTEON_SERIAL_LEN 20
+/*
+ * This structure is populated by the bootloader.  For binary
+ * compatibility the only changes that should be made are
+ * adding members to the end of the structure, and the minor
+ * version should be incremented at that time.
+ * If an incompatible change is made, the major version
+ * must be incremented, and the minor version should be reset
+ * to 0.
+ */
+struct cvmx_bootinfo {
+	uint32_t major_version;
+	uint32_t minor_version;
+
+	uint64_t stack_top;
+	uint64_t heap_base;
+	uint64_t heap_end;
+	uint64_t desc_vaddr;
+
+	uint32_t exception_base_addr;
+	uint32_t stack_size;
+	uint32_t flags;
+	uint32_t core_mask;
+	/* DRAM size in megabytes */
+	uint32_t dram_size;
+	/* physical address of free memory descriptor block*/
+	uint32_t phy_mem_desc_addr;
+	/* used to pass flags from app to debugger */
+	uint32_t debugger_flags_base_addr;
+
+	/* CPU clock speed, in hz */
+	uint32_t eclock_hz;
+
+	/* DRAM clock speed, in hz */
+	uint32_t dclock_hz;
+
+	uint32_t reserved0;
+	uint16_t board_type;
+	uint8_t board_rev_major;
+	uint8_t board_rev_minor;
+	uint16_t reserved1;
+	uint8_t reserved2;
+	uint8_t reserved3;
+	char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
+	uint8_t mac_addr_base[6];
+	uint8_t mac_addr_count;
+#if (CVMX_BOOTINFO_MIN_VER >= 1)
+	/*
+	 * Several boards support compact flash on the Octeon boot
+	 * bus.  The CF memory spaces may be mapped to different
+	 * addresses on different boards.  These are the physical
+	 * addresses, so care must be taken to use the correct
+	 * XKPHYS/KSEG0 addressing depending on the application's
+	 * ABI.  These values will be 0 if CF is not present.
+	 */
+	uint64_t compact_flash_common_base_addr;
+	uint64_t compact_flash_attribute_base_addr;
+	/*
+	 * Base address of the LED display (as on EBT3000 board)
+	 * This will be 0 if LED display not present.
+	 */
+	uint64_t led_display_base_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 2)
+	/* DFA reference clock in hz (if applicable)*/
+	uint32_t dfa_ref_clock_hz;
+
+	/*
+	 * flags indicating various configuration options.  These
+	 * flags supercede the 'flags' variable and should be used
+	 * instead if available.
+	 */
+	uint32_t config_flags;
+#endif
+
+};
+
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_HOST			(1ull << 0)
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_TARGET		(1ull << 1)
+#define CVMX_BOOTINFO_CFG_FLAG_DEBUG			(1ull << 2)
+#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC			(1ull << 3)
+/* This flag is set if the TLB mappings are not contained in the
+ * 0x10000000 - 0x20000000 boot bus region. */
+#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING     (1ull << 4)
+#define CVMX_BOOTINFO_CFG_FLAG_BREAK			(1ull << 5)
+
+#endif /*   (CVMX_BOOTINFO_MAJ_VER == 1) */
+
+/* Type defines for board and chip types */
+enum cvmx_board_types_enum {
+	CVMX_BOARD_TYPE_NULL = 0,
+	CVMX_BOARD_TYPE_SIM = 1,
+	CVMX_BOARD_TYPE_EBT3000 = 2,
+	CVMX_BOARD_TYPE_KODAMA = 3,
+	CVMX_BOARD_TYPE_NIAGARA = 4,
+	CVMX_BOARD_TYPE_NAC38 = 5,	/* formerly NAO38 */
+	CVMX_BOARD_TYPE_THUNDER = 6,
+	CVMX_BOARD_TYPE_TRANTOR = 7,
+	CVMX_BOARD_TYPE_EBH3000 = 8,
+	CVMX_BOARD_TYPE_EBH3100 = 9,
+	CVMX_BOARD_TYPE_HIKARI = 10,
+	CVMX_BOARD_TYPE_CN3010_EVB_HS5 = 11,
+	CVMX_BOARD_TYPE_CN3005_EVB_HS5 = 12,
+	CVMX_BOARD_TYPE_KBP = 13,
+	/* Deprecated, CVMX_BOARD_TYPE_CN3010_EVB_HS5 supports the CN3020 */
+	CVMX_BOARD_TYPE_CN3020_EVB_HS5 = 14,
+	CVMX_BOARD_TYPE_EBT5800 = 15,
+	CVMX_BOARD_TYPE_NICPRO2 = 16,
+	CVMX_BOARD_TYPE_EBH5600 = 17,
+	CVMX_BOARD_TYPE_EBH5601 = 18,
+	CVMX_BOARD_TYPE_EBH5200 = 19,
+	CVMX_BOARD_TYPE_BBGW_REF = 20,
+	CVMX_BOARD_TYPE_NIC_XLE_4G = 21,
+	CVMX_BOARD_TYPE_EBT5600 = 22,
+	CVMX_BOARD_TYPE_EBH5201 = 23,
+	CVMX_BOARD_TYPE_MAX,
+
+	/*
+	 * The range from CVMX_BOARD_TYPE_MAX to
+	 * CVMX_BOARD_TYPE_CUST_DEFINED_MIN is reserved for future
+	 * SDK use.
+	 */
+
+	/*
+	 * Set aside a range for customer boards.  These numbers are managed
+	 * by Cavium.
+	 */
+	CVMX_BOARD_TYPE_CUST_DEFINED_MIN = 10000,
+	CVMX_BOARD_TYPE_CUST_WSX16 = 10001,
+	CVMX_BOARD_TYPE_CUST_NS0216 = 10002,
+	CVMX_BOARD_TYPE_CUST_NB5 = 10003,
+	CVMX_BOARD_TYPE_CUST_WMR500 = 10004,
+	CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
+
+	/*
+	 * Set aside a range for customer private use.  The SDK won't
+	 * use any numbers in this range.
+	 */
+	CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
+	CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
+
+	/* The remaining range is reserved for future use. */
+};
+
+enum cvmx_chip_types_enum {
+	CVMX_CHIP_TYPE_NULL = 0,
+	CVMX_CHIP_SIM_TYPE_DEPRECATED = 1,
+	CVMX_CHIP_TYPE_OCTEON_SAMPLE = 2,
+	CVMX_CHIP_TYPE_MAX,
+};
+
+/* Compatability alias for NAC38 name change, planned to be removed
+ * from SDK 1.7 */
+#define CVMX_BOARD_TYPE_NAO38	CVMX_BOARD_TYPE_NAC38
+
+/* Functions to return string based on type */
+#define ENUM_BRD_TYPE_CASE(x) \
+	case x: return(#x + 16);	/* Skip CVMX_BOARD_TYPE_ */
+static inline const char *cvmx_board_type_to_string(enum
+						    cvmx_board_types_enum type)
+{
+	switch (type) {
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NULL)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_SIM)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT3000)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KODAMA)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIAGARA)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NAC38)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_THUNDER)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_TRANTOR)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3000)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3100)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_HIKARI)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3010_EVB_HS5)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3005_EVB_HS5)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KBP)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3020_EVB_HS5)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5800)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NICPRO2)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5600)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5601)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5200)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_BBGW_REF)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_4G)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5600)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5201)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX)
+
+			/* Customer boards listed here */
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MIN)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WSX16)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NS0216)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NB5)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WMR500)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MAX)
+
+		    /* Customer private range */
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
+	}
+	return "Unsupported Board";
+}
+
+#define ENUM_CHIP_TYPE_CASE(x) \
+	case x: return(#x + 15);	/* Skip CVMX_CHIP_TYPE */
+static inline const char *cvmx_chip_type_to_string(enum
+						   cvmx_chip_types_enum type)
+{
+	switch (type) {
+		ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_NULL)
+		    ENUM_CHIP_TYPE_CASE(CVMX_CHIP_SIM_TYPE_DEPRECATED)
+		    ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_OCTEON_SAMPLE)
+		    ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_MAX)
+	}
+	return "Unsupported Chip";
+}
+
+#endif /* __CVMX_BOOTINFO_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h
new file mode 100644
index 0000000..1cbe4b5
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h
@@ -0,0 +1,288 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Simple allocate only memory allocator.  Used to allocate memory at
+ * application start time.
+ */
+
+#ifndef __CVMX_BOOTMEM_H__
+#define __CVMX_BOOTMEM_H__
+/* Must be multiple of 8, changing breaks ABI */
+#define CVMX_BOOTMEM_NAME_LEN 128
+
+/* Can change without breaking ABI */
+#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
+
+/* minimum alignment of bootmem alloced blocks */
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE     (16ull)
+
+/* Flags for cvmx_bootmem_phy_mem* functions */
+/* Allocate from end of block instead of beginning */
+#define CVMX_BOOTMEM_FLAG_END_ALLOC    (1 << 0)
+
+/* Don't do any locking. */
+#define CVMX_BOOTMEM_FLAG_NO_LOCKING   (1 << 1)
+
+/* First bytes of each free physical block of memory contain this structure,
+ * which is used to maintain the free memory list.  Since the bootloader is
+ * only 32 bits, there is a union providing 64 and 32 bit versions.  The
+ * application init code converts addresses to 64 bit addresses before the
+ * application starts.
+ */
+struct cvmx_bootmem_block_header {
+	/*
+	 * Note: these are referenced from assembly routines in the
+	 * bootloader, so this structure should not be changed
+	 * without changing those routines as well.
+	 */
+	uint64_t next_block_addr;
+	uint64_t size;
+
+};
+
+/*
+ * Structure for named memory blocks.  Number of descriptors available
+ * can be changed without affecting compatiblity, but name length
+ * changes require a bump in the bootmem descriptor version Note: This
+ * structure must be naturally 64 bit aligned, as a single memory
+ * image will be used by both 32 and 64 bit programs.
+ */
+struct cvmx_bootmem_named_block_desc {
+	/* Base address of named block */
+	uint64_t base_addr;
+	/*
+	 * Size actually allocated for named block (may differ from
+	 * requested).
+	 */
+	uint64_t size;
+	/* name of named block */
+	char name[CVMX_BOOTMEM_NAME_LEN];
+};
+
+/* Current descriptor versions */
+/* CVMX bootmem descriptor major version */
+#define CVMX_BOOTMEM_DESC_MAJ_VER   3
+
+/* CVMX bootmem descriptor minor version */
+#define CVMX_BOOTMEM_DESC_MIN_VER   0
+
+/* First three members of cvmx_bootmem_desc_t are left in original
+ * positions for backwards compatibility.
+ */
+struct cvmx_bootmem_desc {
+	/* spinlock to control access to list */
+	uint32_t lock;
+	/* flags for indicating various conditions */
+	uint32_t flags;
+	uint64_t head_addr;
+
+	/* Incremented when incompatible changes made */
+	uint32_t major_version;
+
+	/*
+	 * Incremented changed when compatible changes made, reset to
+	 * zero when major incremented.
+	 */
+	uint32_t minor_version;
+
+	uint64_t app_data_addr;
+	uint64_t app_data_size;
+
+	/* number of elements in named blocks array */
+	uint32_t named_block_num_blocks;
+
+	/* length of name array in bootmem blocks */
+	uint32_t named_block_name_len;
+	/* address of named memory block descriptors */
+	uint64_t named_block_array_addr;
+
+};
+
+/**
+ * Initialize the boot alloc memory structures. This is
+ * normally called inside of cvmx_user_app_init()
+ *
+ * @mem_desc_ptr:	Address of the free memory list
+ */
+extern int cvmx_bootmem_init(void *mem_desc_ptr);
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader.
+ * This is an allocate-only algorithm, so freeing memory is not possible.
+ *
+ * @size:      Size in bytes of block to allocate
+ * @alignment: Alignment required - must be power of 2
+ *
+ * Returns pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment);
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader at a specific
+ * address. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated at the specified address.
+ *
+ * @size:      Size in bytes of block to allocate
+ * @address:   Physical address to allocate memory at.  If this memory is not
+ *                  available, the allocation fails.
+ * @alignment: Alignment required - must be power of 2
+ * Returns pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
+					uint64_t alignment);
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader within a specified
+ * address range. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated in the requested range.
+ *
+ * @size:      Size in bytes of block to allocate
+ * @min_addr:  defines the minimum address of the range
+ * @max_addr:  defines the maximum address of the range
+ * @alignment: Alignment required - must be power of 2
+ * Returns pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
+				      uint64_t min_addr, uint64_t max_addr);
+
+/**
+ * Frees a previously allocated named bootmem block.
+ *
+ * @name:   name of block to free
+ *
+ * Returns 0 on failure,
+ *         !0 on success
+ */
+extern int cvmx_bootmem_free_named(char *name);
+
+/**
+ * Finds a named bootmem block by name.
+ *
+ * @name:   name of block to free
+ *
+ * Returns pointer to named block descriptor on success
+ *         0 on failure
+ */
+struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
+
+/**
+ * Allocates a block of physical memory from the free list, at
+ * (optional) requested address and alignment.
+ *
+ * @req_size: size of region to allocate.  All requests are rounded up
+ *            to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ *
+ * @address_min: Minimum address that block can occupy.
+ *
+ * @address_max: Specifies the maximum address_min (inclusive) that
+ *               the allocation can use.
+ *
+ * @alignment: Requested alignment of the block.  If this alignment
+ *             cannot be met, the allocation fails.  This must be a
+ *             power of 2.  (Note: Alignment of
+ *             CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ *             internally enforced.  Requested alignments of less than
+ *             CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ *             CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ *
+ * @flags:     Flags to control options for the allocation.
+ *
+ * Returns physical address of block allocated, or -1 on failure
+ */
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
+			       uint64_t address_max, uint64_t alignment,
+			       uint32_t flags);
+
+/**
+ * Finds a named memory block by name.
+ * Also used for finding an unused entry in the named block table.
+ *
+ * @name: Name of memory block to find.  If NULL pointer given, then
+ *        finds unused descriptor, if available.
+ *
+ * @flags: Flags to control options for the allocation.
+ *
+ * Returns Pointer to memory block descriptor, NULL if not found.
+ *         If NULL returned when name parameter is NULL, then no memory
+ *         block descriptors are available.
+ */
+struct cvmx_bootmem_named_block_desc *
+cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
+
+/**
+ * Frees a named block.
+ *
+ * @name:   name of block to free
+ * @flags:  flags for passing options
+ *
+ * Returns 0 on failure
+ *         1 on success
+ */
+int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags);
+
+/**
+ * Frees a block to the bootmem allocator list.  This must
+ * be used with care, as the size provided must match the size
+ * of the block that was allocated, or the list will become
+ * corrupted.
+ *
+ * IMPORTANT:  This is only intended to be used as part of named block
+ * frees and initial population of the free memory list.
+ *                                                      *
+ *
+ * @phy_addr: physical address of block
+ * @size:     size of block in bytes.
+ * @flags:    flags for passing options
+ *
+ * Returns 1 on success,
+ *         0 on failure
+ */
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags);
+
+/**
+ * Locks the bootmem allocator.  This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_lock(void);
+
+/**
+ * Unlocks the bootmem allocator.  This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_unlock(void);
+
+#endif /*   __CVMX_BOOTMEM_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
new file mode 100644
index 0000000..f8f05b7
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
@@ -0,0 +1,1616 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_CIU_DEFS_H__
+#define __CVMX_CIU_DEFS_H__
+
+#define CVMX_CIU_BIST \
+	 CVMX_ADD_IO_SEG(0x0001070000000730ull)
+#define CVMX_CIU_DINT \
+	 CVMX_ADD_IO_SEG(0x0001070000000720ull)
+#define CVMX_CIU_FUSE \
+	 CVMX_ADD_IO_SEG(0x0001070000000728ull)
+#define CVMX_CIU_GSTOP \
+	 CVMX_ADD_IO_SEG(0x0001070000000710ull)
+#define CVMX_CIU_INTX_EN0(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000200ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN0_W1C(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000002200ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN0_W1S(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000006200ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN1(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000208ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN1_W1C(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000002208ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN1_W1S(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000006208ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN4_0(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000C80ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_EN4_0_W1C(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000002C80ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_EN4_0_W1S(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000006C80ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_EN4_1(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000C88ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_EN4_1_W1C(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000002C88ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_EN4_1_W1S(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000006C88ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_SUM0(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000000ull + (((offset) & 63) * 8))
+#define CVMX_CIU_INTX_SUM4(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000C00ull + (((offset) & 15) * 8))
+#define CVMX_CIU_INT_SUM1 \
+	 CVMX_ADD_IO_SEG(0x0001070000000108ull)
+#define CVMX_CIU_MBOX_CLRX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000680ull + (((offset) & 15) * 8))
+#define CVMX_CIU_MBOX_SETX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000600ull + (((offset) & 15) * 8))
+#define CVMX_CIU_NMI \
+	 CVMX_ADD_IO_SEG(0x0001070000000718ull)
+#define CVMX_CIU_PCI_INTA \
+	 CVMX_ADD_IO_SEG(0x0001070000000750ull)
+#define CVMX_CIU_PP_DBG \
+	 CVMX_ADD_IO_SEG(0x0001070000000708ull)
+#define CVMX_CIU_PP_POKEX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000580ull + (((offset) & 15) * 8))
+#define CVMX_CIU_PP_RST \
+	 CVMX_ADD_IO_SEG(0x0001070000000700ull)
+#define CVMX_CIU_QLM_DCOK \
+	 CVMX_ADD_IO_SEG(0x0001070000000760ull)
+#define CVMX_CIU_QLM_JTGC \
+	 CVMX_ADD_IO_SEG(0x0001070000000768ull)
+#define CVMX_CIU_QLM_JTGD \
+	 CVMX_ADD_IO_SEG(0x0001070000000770ull)
+#define CVMX_CIU_SOFT_BIST \
+	 CVMX_ADD_IO_SEG(0x0001070000000738ull)
+#define CVMX_CIU_SOFT_PRST \
+	 CVMX_ADD_IO_SEG(0x0001070000000748ull)
+#define CVMX_CIU_SOFT_PRST1 \
+	 CVMX_ADD_IO_SEG(0x0001070000000758ull)
+#define CVMX_CIU_SOFT_RST \
+	 CVMX_ADD_IO_SEG(0x0001070000000740ull)
+#define CVMX_CIU_TIMX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000480ull + (((offset) & 3) * 8))
+#define CVMX_CIU_WDOGX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000500ull + (((offset) & 15) * 8))
+
+union cvmx_ciu_bist {
+	uint64_t u64;
+	struct cvmx_ciu_bist_s {
+		uint64_t reserved_4_63:60;
+		uint64_t bist:4;
+	} s;
+	struct cvmx_ciu_bist_s cn30xx;
+	struct cvmx_ciu_bist_s cn31xx;
+	struct cvmx_ciu_bist_s cn38xx;
+	struct cvmx_ciu_bist_s cn38xxp2;
+	struct cvmx_ciu_bist_cn50xx {
+		uint64_t reserved_2_63:62;
+		uint64_t bist:2;
+	} cn50xx;
+	struct cvmx_ciu_bist_cn52xx {
+		uint64_t reserved_3_63:61;
+		uint64_t bist:3;
+	} cn52xx;
+	struct cvmx_ciu_bist_cn52xx cn52xxp1;
+	struct cvmx_ciu_bist_s cn56xx;
+	struct cvmx_ciu_bist_s cn56xxp1;
+	struct cvmx_ciu_bist_s cn58xx;
+	struct cvmx_ciu_bist_s cn58xxp1;
+};
+
+union cvmx_ciu_dint {
+	uint64_t u64;
+	struct cvmx_ciu_dint_s {
+		uint64_t reserved_16_63:48;
+		uint64_t dint:16;
+	} s;
+	struct cvmx_ciu_dint_cn30xx {
+		uint64_t reserved_1_63:63;
+		uint64_t dint:1;
+	} cn30xx;
+	struct cvmx_ciu_dint_cn31xx {
+		uint64_t reserved_2_63:62;
+		uint64_t dint:2;
+	} cn31xx;
+	struct cvmx_ciu_dint_s cn38xx;
+	struct cvmx_ciu_dint_s cn38xxp2;
+	struct cvmx_ciu_dint_cn31xx cn50xx;
+	struct cvmx_ciu_dint_cn52xx {
+		uint64_t reserved_4_63:60;
+		uint64_t dint:4;
+	} cn52xx;
+	struct cvmx_ciu_dint_cn52xx cn52xxp1;
+	struct cvmx_ciu_dint_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t dint:12;
+	} cn56xx;
+	struct cvmx_ciu_dint_cn56xx cn56xxp1;
+	struct cvmx_ciu_dint_s cn58xx;
+	struct cvmx_ciu_dint_s cn58xxp1;
+};
+
+union cvmx_ciu_fuse {
+	uint64_t u64;
+	struct cvmx_ciu_fuse_s {
+		uint64_t reserved_16_63:48;
+		uint64_t fuse:16;
+	} s;
+	struct cvmx_ciu_fuse_cn30xx {
+		uint64_t reserved_1_63:63;
+		uint64_t fuse:1;
+	} cn30xx;
+	struct cvmx_ciu_fuse_cn31xx {
+		uint64_t reserved_2_63:62;
+		uint64_t fuse:2;
+	} cn31xx;
+	struct cvmx_ciu_fuse_s cn38xx;
+	struct cvmx_ciu_fuse_s cn38xxp2;
+	struct cvmx_ciu_fuse_cn31xx cn50xx;
+	struct cvmx_ciu_fuse_cn52xx {
+		uint64_t reserved_4_63:60;
+		uint64_t fuse:4;
+	} cn52xx;
+	struct cvmx_ciu_fuse_cn52xx cn52xxp1;
+	struct cvmx_ciu_fuse_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t fuse:12;
+	} cn56xx;
+	struct cvmx_ciu_fuse_cn56xx cn56xxp1;
+	struct cvmx_ciu_fuse_s cn58xx;
+	struct cvmx_ciu_fuse_s cn58xxp1;
+};
+
+union cvmx_ciu_gstop {
+	uint64_t u64;
+	struct cvmx_ciu_gstop_s {
+		uint64_t reserved_1_63:63;
+		uint64_t gstop:1;
+	} s;
+	struct cvmx_ciu_gstop_s cn30xx;
+	struct cvmx_ciu_gstop_s cn31xx;
+	struct cvmx_ciu_gstop_s cn38xx;
+	struct cvmx_ciu_gstop_s cn38xxp2;
+	struct cvmx_ciu_gstop_s cn50xx;
+	struct cvmx_ciu_gstop_s cn52xx;
+	struct cvmx_ciu_gstop_s cn52xxp1;
+	struct cvmx_ciu_gstop_s cn56xx;
+	struct cvmx_ciu_gstop_s cn56xxp1;
+	struct cvmx_ciu_gstop_s cn58xx;
+	struct cvmx_ciu_gstop_s cn58xxp1;
+};
+
+union cvmx_ciu_intx_en0 {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en0_s {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t mpi:1;
+		uint64_t pcm:1;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} s;
+	struct cvmx_ciu_intx_en0_cn30xx {
+		uint64_t reserved_59_63:5;
+		uint64_t mpi:1;
+		uint64_t pcm:1;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t reserved_47_47:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn30xx;
+	struct cvmx_ciu_intx_en0_cn31xx {
+		uint64_t reserved_59_63:5;
+		uint64_t mpi:1;
+		uint64_t pcm:1;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn31xx;
+	struct cvmx_ciu_intx_en0_cn38xx {
+		uint64_t reserved_56_63:8;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn38xx;
+	struct cvmx_ciu_intx_en0_cn38xx cn38xxp2;
+	struct cvmx_ciu_intx_en0_cn30xx cn50xx;
+	struct cvmx_ciu_intx_en0_cn52xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn52xx;
+	struct cvmx_ciu_intx_en0_cn52xx cn52xxp1;
+	struct cvmx_ciu_intx_en0_cn56xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn56xx;
+	struct cvmx_ciu_intx_en0_cn56xx cn56xxp1;
+	struct cvmx_ciu_intx_en0_cn38xx cn58xx;
+	struct cvmx_ciu_intx_en0_cn38xx cn58xxp1;
+};
+
+union cvmx_ciu_intx_en0_w1c {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en0_w1c_s {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} s;
+	struct cvmx_ciu_intx_en0_w1c_cn52xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn52xx;
+	struct cvmx_ciu_intx_en0_w1c_s cn56xx;
+	struct cvmx_ciu_intx_en0_w1c_cn58xx {
+		uint64_t reserved_56_63:8;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn58xx;
+};
+
+union cvmx_ciu_intx_en0_w1s {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en0_w1s_s {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} s;
+	struct cvmx_ciu_intx_en0_w1s_cn52xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn52xx;
+	struct cvmx_ciu_intx_en0_w1s_s cn56xx;
+	struct cvmx_ciu_intx_en0_w1s_cn58xx {
+		uint64_t reserved_56_63:8;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn58xx;
+};
+
+union cvmx_ciu_intx_en1 {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en1_s {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t wdog:16;
+	} s;
+	struct cvmx_ciu_intx_en1_cn30xx {
+		uint64_t reserved_1_63:63;
+		uint64_t wdog:1;
+	} cn30xx;
+	struct cvmx_ciu_intx_en1_cn31xx {
+		uint64_t reserved_2_63:62;
+		uint64_t wdog:2;
+	} cn31xx;
+	struct cvmx_ciu_intx_en1_cn38xx {
+		uint64_t reserved_16_63:48;
+		uint64_t wdog:16;
+	} cn38xx;
+	struct cvmx_ciu_intx_en1_cn38xx cn38xxp2;
+	struct cvmx_ciu_intx_en1_cn31xx cn50xx;
+	struct cvmx_ciu_intx_en1_cn52xx {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t reserved_4_15:12;
+		uint64_t wdog:4;
+	} cn52xx;
+	struct cvmx_ciu_intx_en1_cn52xxp1 {
+		uint64_t reserved_19_63:45;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t reserved_4_15:12;
+		uint64_t wdog:4;
+	} cn52xxp1;
+	struct cvmx_ciu_intx_en1_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t wdog:12;
+	} cn56xx;
+	struct cvmx_ciu_intx_en1_cn56xx cn56xxp1;
+	struct cvmx_ciu_intx_en1_cn38xx cn58xx;
+	struct cvmx_ciu_intx_en1_cn38xx cn58xxp1;
+};
+
+union cvmx_ciu_intx_en1_w1c {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en1_w1c_s {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t wdog:16;
+	} s;
+	struct cvmx_ciu_intx_en1_w1c_cn52xx {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t reserved_4_15:12;
+		uint64_t wdog:4;
+	} cn52xx;
+	struct cvmx_ciu_intx_en1_w1c_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t wdog:12;
+	} cn56xx;
+	struct cvmx_ciu_intx_en1_w1c_cn58xx {
+		uint64_t reserved_16_63:48;
+		uint64_t wdog:16;
+	} cn58xx;
+};
+
+union cvmx_ciu_intx_en1_w1s {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en1_w1s_s {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t wdog:16;
+	} s;
+	struct cvmx_ciu_intx_en1_w1s_cn52xx {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t reserved_4_15:12;
+		uint64_t wdog:4;
+	} cn52xx;
+	struct cvmx_ciu_intx_en1_w1s_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t wdog:12;
+	} cn56xx;
+	struct cvmx_ciu_intx_en1_w1s_cn58xx {
+		uint64_t reserved_16_63:48;
+		uint64_t wdog:16;
+	} cn58xx;
+};
+
+union cvmx_ciu_intx_en4_0 {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en4_0_s {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t mpi:1;
+		uint64_t pcm:1;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} s;
+	struct cvmx_ciu_intx_en4_0_cn50xx {
+		uint64_t reserved_59_63:5;
+		uint64_t mpi:1;
+		uint64_t pcm:1;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t reserved_47_47:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn50xx;
+	struct cvmx_ciu_intx_en4_0_cn52xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn52xx;
+	struct cvmx_ciu_intx_en4_0_cn52xx cn52xxp1;
+	struct cvmx_ciu_intx_en4_0_cn56xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn56xx;
+	struct cvmx_ciu_intx_en4_0_cn56xx cn56xxp1;
+	struct cvmx_ciu_intx_en4_0_cn58xx {
+		uint64_t reserved_56_63:8;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn58xx;
+	struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1;
+};
+
+union cvmx_ciu_intx_en4_0_w1c {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en4_0_w1c_s {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} s;
+	struct cvmx_ciu_intx_en4_0_w1c_cn52xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn52xx;
+	struct cvmx_ciu_intx_en4_0_w1c_s cn56xx;
+	struct cvmx_ciu_intx_en4_0_w1c_cn58xx {
+		uint64_t reserved_56_63:8;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn58xx;
+};
+
+union cvmx_ciu_intx_en4_0_w1s {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en4_0_w1s_s {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} s;
+	struct cvmx_ciu_intx_en4_0_w1s_cn52xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn52xx;
+	struct cvmx_ciu_intx_en4_0_w1s_s cn56xx;
+	struct cvmx_ciu_intx_en4_0_w1s_cn58xx {
+		uint64_t reserved_56_63:8;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t reserved_44_44:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn58xx;
+};
+
+union cvmx_ciu_intx_en4_1 {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en4_1_s {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t wdog:16;
+	} s;
+	struct cvmx_ciu_intx_en4_1_cn50xx {
+		uint64_t reserved_2_63:62;
+		uint64_t wdog:2;
+	} cn50xx;
+	struct cvmx_ciu_intx_en4_1_cn52xx {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t reserved_4_15:12;
+		uint64_t wdog:4;
+	} cn52xx;
+	struct cvmx_ciu_intx_en4_1_cn52xxp1 {
+		uint64_t reserved_19_63:45;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t reserved_4_15:12;
+		uint64_t wdog:4;
+	} cn52xxp1;
+	struct cvmx_ciu_intx_en4_1_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t wdog:12;
+	} cn56xx;
+	struct cvmx_ciu_intx_en4_1_cn56xx cn56xxp1;
+	struct cvmx_ciu_intx_en4_1_cn58xx {
+		uint64_t reserved_16_63:48;
+		uint64_t wdog:16;
+	} cn58xx;
+	struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1;
+};
+
+union cvmx_ciu_intx_en4_1_w1c {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en4_1_w1c_s {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t wdog:16;
+	} s;
+	struct cvmx_ciu_intx_en4_1_w1c_cn52xx {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t reserved_4_15:12;
+		uint64_t wdog:4;
+	} cn52xx;
+	struct cvmx_ciu_intx_en4_1_w1c_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t wdog:12;
+	} cn56xx;
+	struct cvmx_ciu_intx_en4_1_w1c_cn58xx {
+		uint64_t reserved_16_63:48;
+		uint64_t wdog:16;
+	} cn58xx;
+};
+
+union cvmx_ciu_intx_en4_1_w1s {
+	uint64_t u64;
+	struct cvmx_ciu_intx_en4_1_w1s_s {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t wdog:16;
+	} s;
+	struct cvmx_ciu_intx_en4_1_w1s_cn52xx {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t reserved_4_15:12;
+		uint64_t wdog:4;
+	} cn52xx;
+	struct cvmx_ciu_intx_en4_1_w1s_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t wdog:12;
+	} cn56xx;
+	struct cvmx_ciu_intx_en4_1_w1s_cn58xx {
+		uint64_t reserved_16_63:48;
+		uint64_t wdog:16;
+	} cn58xx;
+};
+
+union cvmx_ciu_intx_sum0 {
+	uint64_t u64;
+	struct cvmx_ciu_intx_sum0_s {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t mpi:1;
+		uint64_t pcm:1;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t wdog_sum:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} s;
+	struct cvmx_ciu_intx_sum0_cn30xx {
+		uint64_t reserved_59_63:5;
+		uint64_t mpi:1;
+		uint64_t pcm:1;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t reserved_47_47:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t wdog_sum:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn30xx;
+	struct cvmx_ciu_intx_sum0_cn31xx {
+		uint64_t reserved_59_63:5;
+		uint64_t mpi:1;
+		uint64_t pcm:1;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t wdog_sum:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn31xx;
+	struct cvmx_ciu_intx_sum0_cn38xx {
+		uint64_t reserved_56_63:8;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t wdog_sum:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn38xx;
+	struct cvmx_ciu_intx_sum0_cn38xx cn38xxp2;
+	struct cvmx_ciu_intx_sum0_cn30xx cn50xx;
+	struct cvmx_ciu_intx_sum0_cn52xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t wdog_sum:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn52xx;
+	struct cvmx_ciu_intx_sum0_cn52xx cn52xxp1;
+	struct cvmx_ciu_intx_sum0_cn56xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t wdog_sum:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn56xx;
+	struct cvmx_ciu_intx_sum0_cn56xx cn56xxp1;
+	struct cvmx_ciu_intx_sum0_cn38xx cn58xx;
+	struct cvmx_ciu_intx_sum0_cn38xx cn58xxp1;
+};
+
+union cvmx_ciu_intx_sum4 {
+	uint64_t u64;
+	struct cvmx_ciu_intx_sum4_s {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t mpi:1;
+		uint64_t pcm:1;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t wdog_sum:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} s;
+	struct cvmx_ciu_intx_sum4_cn50xx {
+		uint64_t reserved_59_63:5;
+		uint64_t mpi:1;
+		uint64_t pcm:1;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t reserved_47_47:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t wdog_sum:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn50xx;
+	struct cvmx_ciu_intx_sum4_cn52xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t reserved_51_51:1;
+		uint64_t ipd_drp:1;
+		uint64_t reserved_49_49:1;
+		uint64_t gmx_drp:1;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t wdog_sum:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn52xx;
+	struct cvmx_ciu_intx_sum4_cn52xx cn52xxp1;
+	struct cvmx_ciu_intx_sum4_cn56xx {
+		uint64_t bootdma:1;
+		uint64_t mii:1;
+		uint64_t ipdppthr:1;
+		uint64_t powiq:1;
+		uint64_t twsi2:1;
+		uint64_t reserved_57_58:2;
+		uint64_t usb:1;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t wdog_sum:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn56xx;
+	struct cvmx_ciu_intx_sum4_cn56xx cn56xxp1;
+	struct cvmx_ciu_intx_sum4_cn58xx {
+		uint64_t reserved_56_63:8;
+		uint64_t timer:4;
+		uint64_t key_zero:1;
+		uint64_t ipd_drp:1;
+		uint64_t gmx_drp:2;
+		uint64_t trace:1;
+		uint64_t rml:1;
+		uint64_t twsi:1;
+		uint64_t wdog_sum:1;
+		uint64_t pci_msi:4;
+		uint64_t pci_int:4;
+		uint64_t uart:2;
+		uint64_t mbox:2;
+		uint64_t gpio:16;
+		uint64_t workq:16;
+	} cn58xx;
+	struct cvmx_ciu_intx_sum4_cn58xx cn58xxp1;
+};
+
+union cvmx_ciu_int_sum1 {
+	uint64_t u64;
+	struct cvmx_ciu_int_sum1_s {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t wdog:16;
+	} s;
+	struct cvmx_ciu_int_sum1_cn30xx {
+		uint64_t reserved_1_63:63;
+		uint64_t wdog:1;
+	} cn30xx;
+	struct cvmx_ciu_int_sum1_cn31xx {
+		uint64_t reserved_2_63:62;
+		uint64_t wdog:2;
+	} cn31xx;
+	struct cvmx_ciu_int_sum1_cn38xx {
+		uint64_t reserved_16_63:48;
+		uint64_t wdog:16;
+	} cn38xx;
+	struct cvmx_ciu_int_sum1_cn38xx cn38xxp2;
+	struct cvmx_ciu_int_sum1_cn31xx cn50xx;
+	struct cvmx_ciu_int_sum1_cn52xx {
+		uint64_t reserved_20_63:44;
+		uint64_t nand:1;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t reserved_4_15:12;
+		uint64_t wdog:4;
+	} cn52xx;
+	struct cvmx_ciu_int_sum1_cn52xxp1 {
+		uint64_t reserved_19_63:45;
+		uint64_t mii1:1;
+		uint64_t usb1:1;
+		uint64_t uart2:1;
+		uint64_t reserved_4_15:12;
+		uint64_t wdog:4;
+	} cn52xxp1;
+	struct cvmx_ciu_int_sum1_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t wdog:12;
+	} cn56xx;
+	struct cvmx_ciu_int_sum1_cn56xx cn56xxp1;
+	struct cvmx_ciu_int_sum1_cn38xx cn58xx;
+	struct cvmx_ciu_int_sum1_cn38xx cn58xxp1;
+};
+
+union cvmx_ciu_mbox_clrx {
+	uint64_t u64;
+	struct cvmx_ciu_mbox_clrx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t bits:32;
+	} s;
+	struct cvmx_ciu_mbox_clrx_s cn30xx;
+	struct cvmx_ciu_mbox_clrx_s cn31xx;
+	struct cvmx_ciu_mbox_clrx_s cn38xx;
+	struct cvmx_ciu_mbox_clrx_s cn38xxp2;
+	struct cvmx_ciu_mbox_clrx_s cn50xx;
+	struct cvmx_ciu_mbox_clrx_s cn52xx;
+	struct cvmx_ciu_mbox_clrx_s cn52xxp1;
+	struct cvmx_ciu_mbox_clrx_s cn56xx;
+	struct cvmx_ciu_mbox_clrx_s cn56xxp1;
+	struct cvmx_ciu_mbox_clrx_s cn58xx;
+	struct cvmx_ciu_mbox_clrx_s cn58xxp1;
+};
+
+union cvmx_ciu_mbox_setx {
+	uint64_t u64;
+	struct cvmx_ciu_mbox_setx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t bits:32;
+	} s;
+	struct cvmx_ciu_mbox_setx_s cn30xx;
+	struct cvmx_ciu_mbox_setx_s cn31xx;
+	struct cvmx_ciu_mbox_setx_s cn38xx;
+	struct cvmx_ciu_mbox_setx_s cn38xxp2;
+	struct cvmx_ciu_mbox_setx_s cn50xx;
+	struct cvmx_ciu_mbox_setx_s cn52xx;
+	struct cvmx_ciu_mbox_setx_s cn52xxp1;
+	struct cvmx_ciu_mbox_setx_s cn56xx;
+	struct cvmx_ciu_mbox_setx_s cn56xxp1;
+	struct cvmx_ciu_mbox_setx_s cn58xx;
+	struct cvmx_ciu_mbox_setx_s cn58xxp1;
+};
+
+union cvmx_ciu_nmi {
+	uint64_t u64;
+	struct cvmx_ciu_nmi_s {
+		uint64_t reserved_16_63:48;
+		uint64_t nmi:16;
+	} s;
+	struct cvmx_ciu_nmi_cn30xx {
+		uint64_t reserved_1_63:63;
+		uint64_t nmi:1;
+	} cn30xx;
+	struct cvmx_ciu_nmi_cn31xx {
+		uint64_t reserved_2_63:62;
+		uint64_t nmi:2;
+	} cn31xx;
+	struct cvmx_ciu_nmi_s cn38xx;
+	struct cvmx_ciu_nmi_s cn38xxp2;
+	struct cvmx_ciu_nmi_cn31xx cn50xx;
+	struct cvmx_ciu_nmi_cn52xx {
+		uint64_t reserved_4_63:60;
+		uint64_t nmi:4;
+	} cn52xx;
+	struct cvmx_ciu_nmi_cn52xx cn52xxp1;
+	struct cvmx_ciu_nmi_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t nmi:12;
+	} cn56xx;
+	struct cvmx_ciu_nmi_cn56xx cn56xxp1;
+	struct cvmx_ciu_nmi_s cn58xx;
+	struct cvmx_ciu_nmi_s cn58xxp1;
+};
+
+union cvmx_ciu_pci_inta {
+	uint64_t u64;
+	struct cvmx_ciu_pci_inta_s {
+		uint64_t reserved_2_63:62;
+		uint64_t intr:2;
+	} s;
+	struct cvmx_ciu_pci_inta_s cn30xx;
+	struct cvmx_ciu_pci_inta_s cn31xx;
+	struct cvmx_ciu_pci_inta_s cn38xx;
+	struct cvmx_ciu_pci_inta_s cn38xxp2;
+	struct cvmx_ciu_pci_inta_s cn50xx;
+	struct cvmx_ciu_pci_inta_s cn52xx;
+	struct cvmx_ciu_pci_inta_s cn52xxp1;
+	struct cvmx_ciu_pci_inta_s cn56xx;
+	struct cvmx_ciu_pci_inta_s cn56xxp1;
+	struct cvmx_ciu_pci_inta_s cn58xx;
+	struct cvmx_ciu_pci_inta_s cn58xxp1;
+};
+
+union cvmx_ciu_pp_dbg {
+	uint64_t u64;
+	struct cvmx_ciu_pp_dbg_s {
+		uint64_t reserved_16_63:48;
+		uint64_t ppdbg:16;
+	} s;
+	struct cvmx_ciu_pp_dbg_cn30xx {
+		uint64_t reserved_1_63:63;
+		uint64_t ppdbg:1;
+	} cn30xx;
+	struct cvmx_ciu_pp_dbg_cn31xx {
+		uint64_t reserved_2_63:62;
+		uint64_t ppdbg:2;
+	} cn31xx;
+	struct cvmx_ciu_pp_dbg_s cn38xx;
+	struct cvmx_ciu_pp_dbg_s cn38xxp2;
+	struct cvmx_ciu_pp_dbg_cn31xx cn50xx;
+	struct cvmx_ciu_pp_dbg_cn52xx {
+		uint64_t reserved_4_63:60;
+		uint64_t ppdbg:4;
+	} cn52xx;
+	struct cvmx_ciu_pp_dbg_cn52xx cn52xxp1;
+	struct cvmx_ciu_pp_dbg_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t ppdbg:12;
+	} cn56xx;
+	struct cvmx_ciu_pp_dbg_cn56xx cn56xxp1;
+	struct cvmx_ciu_pp_dbg_s cn58xx;
+	struct cvmx_ciu_pp_dbg_s cn58xxp1;
+};
+
+union cvmx_ciu_pp_pokex {
+	uint64_t u64;
+	struct cvmx_ciu_pp_pokex_s {
+		uint64_t reserved_0_63:64;
+	} s;
+	struct cvmx_ciu_pp_pokex_s cn30xx;
+	struct cvmx_ciu_pp_pokex_s cn31xx;
+	struct cvmx_ciu_pp_pokex_s cn38xx;
+	struct cvmx_ciu_pp_pokex_s cn38xxp2;
+	struct cvmx_ciu_pp_pokex_s cn50xx;
+	struct cvmx_ciu_pp_pokex_s cn52xx;
+	struct cvmx_ciu_pp_pokex_s cn52xxp1;
+	struct cvmx_ciu_pp_pokex_s cn56xx;
+	struct cvmx_ciu_pp_pokex_s cn56xxp1;
+	struct cvmx_ciu_pp_pokex_s cn58xx;
+	struct cvmx_ciu_pp_pokex_s cn58xxp1;
+};
+
+union cvmx_ciu_pp_rst {
+	uint64_t u64;
+	struct cvmx_ciu_pp_rst_s {
+		uint64_t reserved_16_63:48;
+		uint64_t rst:15;
+		uint64_t rst0:1;
+	} s;
+	struct cvmx_ciu_pp_rst_cn30xx {
+		uint64_t reserved_1_63:63;
+		uint64_t rst0:1;
+	} cn30xx;
+	struct cvmx_ciu_pp_rst_cn31xx {
+		uint64_t reserved_2_63:62;
+		uint64_t rst:1;
+		uint64_t rst0:1;
+	} cn31xx;
+	struct cvmx_ciu_pp_rst_s cn38xx;
+	struct cvmx_ciu_pp_rst_s cn38xxp2;
+	struct cvmx_ciu_pp_rst_cn31xx cn50xx;
+	struct cvmx_ciu_pp_rst_cn52xx {
+		uint64_t reserved_4_63:60;
+		uint64_t rst:3;
+		uint64_t rst0:1;
+	} cn52xx;
+	struct cvmx_ciu_pp_rst_cn52xx cn52xxp1;
+	struct cvmx_ciu_pp_rst_cn56xx {
+		uint64_t reserved_12_63:52;
+		uint64_t rst:11;
+		uint64_t rst0:1;
+	} cn56xx;
+	struct cvmx_ciu_pp_rst_cn56xx cn56xxp1;
+	struct cvmx_ciu_pp_rst_s cn58xx;
+	struct cvmx_ciu_pp_rst_s cn58xxp1;
+};
+
+union cvmx_ciu_qlm_dcok {
+	uint64_t u64;
+	struct cvmx_ciu_qlm_dcok_s {
+		uint64_t reserved_4_63:60;
+		uint64_t qlm_dcok:4;
+	} s;
+	struct cvmx_ciu_qlm_dcok_cn52xx {
+		uint64_t reserved_2_63:62;
+		uint64_t qlm_dcok:2;
+	} cn52xx;
+	struct cvmx_ciu_qlm_dcok_cn52xx cn52xxp1;
+	struct cvmx_ciu_qlm_dcok_s cn56xx;
+	struct cvmx_ciu_qlm_dcok_s cn56xxp1;
+};
+
+union cvmx_ciu_qlm_jtgc {
+	uint64_t u64;
+	struct cvmx_ciu_qlm_jtgc_s {
+		uint64_t reserved_11_63:53;
+		uint64_t clk_div:3;
+		uint64_t reserved_6_7:2;
+		uint64_t mux_sel:2;
+		uint64_t bypass:4;
+	} s;
+	struct cvmx_ciu_qlm_jtgc_cn52xx {
+		uint64_t reserved_11_63:53;
+		uint64_t clk_div:3;
+		uint64_t reserved_5_7:3;
+		uint64_t mux_sel:1;
+		uint64_t reserved_2_3:2;
+		uint64_t bypass:2;
+	} cn52xx;
+	struct cvmx_ciu_qlm_jtgc_cn52xx cn52xxp1;
+	struct cvmx_ciu_qlm_jtgc_s cn56xx;
+	struct cvmx_ciu_qlm_jtgc_s cn56xxp1;
+};
+
+union cvmx_ciu_qlm_jtgd {
+	uint64_t u64;
+	struct cvmx_ciu_qlm_jtgd_s {
+		uint64_t capture:1;
+		uint64_t shift:1;
+		uint64_t update:1;
+		uint64_t reserved_44_60:17;
+		uint64_t select:4;
+		uint64_t reserved_37_39:3;
+		uint64_t shft_cnt:5;
+		uint64_t shft_reg:32;
+	} s;
+	struct cvmx_ciu_qlm_jtgd_cn52xx {
+		uint64_t capture:1;
+		uint64_t shift:1;
+		uint64_t update:1;
+		uint64_t reserved_42_60:19;
+		uint64_t select:2;
+		uint64_t reserved_37_39:3;
+		uint64_t shft_cnt:5;
+		uint64_t shft_reg:32;
+	} cn52xx;
+	struct cvmx_ciu_qlm_jtgd_cn52xx cn52xxp1;
+	struct cvmx_ciu_qlm_jtgd_s cn56xx;
+	struct cvmx_ciu_qlm_jtgd_cn56xxp1 {
+		uint64_t capture:1;
+		uint64_t shift:1;
+		uint64_t update:1;
+		uint64_t reserved_37_60:24;
+		uint64_t shft_cnt:5;
+		uint64_t shft_reg:32;
+	} cn56xxp1;
+};
+
+union cvmx_ciu_soft_bist {
+	uint64_t u64;
+	struct cvmx_ciu_soft_bist_s {
+		uint64_t reserved_1_63:63;
+		uint64_t soft_bist:1;
+	} s;
+	struct cvmx_ciu_soft_bist_s cn30xx;
+	struct cvmx_ciu_soft_bist_s cn31xx;
+	struct cvmx_ciu_soft_bist_s cn38xx;
+	struct cvmx_ciu_soft_bist_s cn38xxp2;
+	struct cvmx_ciu_soft_bist_s cn50xx;
+	struct cvmx_ciu_soft_bist_s cn52xx;
+	struct cvmx_ciu_soft_bist_s cn52xxp1;
+	struct cvmx_ciu_soft_bist_s cn56xx;
+	struct cvmx_ciu_soft_bist_s cn56xxp1;
+	struct cvmx_ciu_soft_bist_s cn58xx;
+	struct cvmx_ciu_soft_bist_s cn58xxp1;
+};
+
+union cvmx_ciu_soft_prst {
+	uint64_t u64;
+	struct cvmx_ciu_soft_prst_s {
+		uint64_t reserved_3_63:61;
+		uint64_t host64:1;
+		uint64_t npi:1;
+		uint64_t soft_prst:1;
+	} s;
+	struct cvmx_ciu_soft_prst_s cn30xx;
+	struct cvmx_ciu_soft_prst_s cn31xx;
+	struct cvmx_ciu_soft_prst_s cn38xx;
+	struct cvmx_ciu_soft_prst_s cn38xxp2;
+	struct cvmx_ciu_soft_prst_s cn50xx;
+	struct cvmx_ciu_soft_prst_cn52xx {
+		uint64_t reserved_1_63:63;
+		uint64_t soft_prst:1;
+	} cn52xx;
+	struct cvmx_ciu_soft_prst_cn52xx cn52xxp1;
+	struct cvmx_ciu_soft_prst_cn52xx cn56xx;
+	struct cvmx_ciu_soft_prst_cn52xx cn56xxp1;
+	struct cvmx_ciu_soft_prst_s cn58xx;
+	struct cvmx_ciu_soft_prst_s cn58xxp1;
+};
+
+union cvmx_ciu_soft_prst1 {
+	uint64_t u64;
+	struct cvmx_ciu_soft_prst1_s {
+		uint64_t reserved_1_63:63;
+		uint64_t soft_prst:1;
+	} s;
+	struct cvmx_ciu_soft_prst1_s cn52xx;
+	struct cvmx_ciu_soft_prst1_s cn52xxp1;
+	struct cvmx_ciu_soft_prst1_s cn56xx;
+	struct cvmx_ciu_soft_prst1_s cn56xxp1;
+};
+
+union cvmx_ciu_soft_rst {
+	uint64_t u64;
+	struct cvmx_ciu_soft_rst_s {
+		uint64_t reserved_1_63:63;
+		uint64_t soft_rst:1;
+	} s;
+	struct cvmx_ciu_soft_rst_s cn30xx;
+	struct cvmx_ciu_soft_rst_s cn31xx;
+	struct cvmx_ciu_soft_rst_s cn38xx;
+	struct cvmx_ciu_soft_rst_s cn38xxp2;
+	struct cvmx_ciu_soft_rst_s cn50xx;
+	struct cvmx_ciu_soft_rst_s cn52xx;
+	struct cvmx_ciu_soft_rst_s cn52xxp1;
+	struct cvmx_ciu_soft_rst_s cn56xx;
+	struct cvmx_ciu_soft_rst_s cn56xxp1;
+	struct cvmx_ciu_soft_rst_s cn58xx;
+	struct cvmx_ciu_soft_rst_s cn58xxp1;
+};
+
+union cvmx_ciu_timx {
+	uint64_t u64;
+	struct cvmx_ciu_timx_s {
+		uint64_t reserved_37_63:27;
+		uint64_t one_shot:1;
+		uint64_t len:36;
+	} s;
+	struct cvmx_ciu_timx_s cn30xx;
+	struct cvmx_ciu_timx_s cn31xx;
+	struct cvmx_ciu_timx_s cn38xx;
+	struct cvmx_ciu_timx_s cn38xxp2;
+	struct cvmx_ciu_timx_s cn50xx;
+	struct cvmx_ciu_timx_s cn52xx;
+	struct cvmx_ciu_timx_s cn52xxp1;
+	struct cvmx_ciu_timx_s cn56xx;
+	struct cvmx_ciu_timx_s cn56xxp1;
+	struct cvmx_ciu_timx_s cn58xx;
+	struct cvmx_ciu_timx_s cn58xxp1;
+};
+
+union cvmx_ciu_wdogx {
+	uint64_t u64;
+	struct cvmx_ciu_wdogx_s {
+		uint64_t reserved_46_63:18;
+		uint64_t gstopen:1;
+		uint64_t dstop:1;
+		uint64_t cnt:24;
+		uint64_t len:16;
+		uint64_t state:2;
+		uint64_t mode:2;
+	} s;
+	struct cvmx_ciu_wdogx_s cn30xx;
+	struct cvmx_ciu_wdogx_s cn31xx;
+	struct cvmx_ciu_wdogx_s cn38xx;
+	struct cvmx_ciu_wdogx_s cn38xxp2;
+	struct cvmx_ciu_wdogx_s cn50xx;
+	struct cvmx_ciu_wdogx_s cn52xx;
+	struct cvmx_ciu_wdogx_s cn52xxp1;
+	struct cvmx_ciu_wdogx_s cn56xx;
+	struct cvmx_ciu_wdogx_s cn56xxp1;
+	struct cvmx_ciu_wdogx_s cn58xx;
+	struct cvmx_ciu_wdogx_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-gpio-defs.h b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
new file mode 100644
index 0000000..5fdd6ba
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
@@ -0,0 +1,219 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_GPIO_DEFS_H__
+#define __CVMX_GPIO_DEFS_H__
+
+#define CVMX_GPIO_BIT_CFGX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000800ull + (((offset) & 15) * 8))
+#define CVMX_GPIO_BOOT_ENA \
+	 CVMX_ADD_IO_SEG(0x00010700000008A8ull)
+#define CVMX_GPIO_CLK_GENX(offset) \
+	 CVMX_ADD_IO_SEG(0x00010700000008C0ull + (((offset) & 3) * 8))
+#define CVMX_GPIO_DBG_ENA \
+	 CVMX_ADD_IO_SEG(0x00010700000008A0ull)
+#define CVMX_GPIO_INT_CLR \
+	 CVMX_ADD_IO_SEG(0x0001070000000898ull)
+#define CVMX_GPIO_RX_DAT \
+	 CVMX_ADD_IO_SEG(0x0001070000000880ull)
+#define CVMX_GPIO_TX_CLR \
+	 CVMX_ADD_IO_SEG(0x0001070000000890ull)
+#define CVMX_GPIO_TX_SET \
+	 CVMX_ADD_IO_SEG(0x0001070000000888ull)
+#define CVMX_GPIO_XBIT_CFGX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001070000000900ull + (((offset) & 31) * 8) - 8 * 16)
+
+union cvmx_gpio_bit_cfgx {
+	uint64_t u64;
+	struct cvmx_gpio_bit_cfgx_s {
+		uint64_t reserved_15_63:49;
+		uint64_t clk_gen:1;
+		uint64_t clk_sel:2;
+		uint64_t fil_sel:4;
+		uint64_t fil_cnt:4;
+		uint64_t int_type:1;
+		uint64_t int_en:1;
+		uint64_t rx_xor:1;
+		uint64_t tx_oe:1;
+	} s;
+	struct cvmx_gpio_bit_cfgx_cn30xx {
+		uint64_t reserved_12_63:52;
+		uint64_t fil_sel:4;
+		uint64_t fil_cnt:4;
+		uint64_t int_type:1;
+		uint64_t int_en:1;
+		uint64_t rx_xor:1;
+		uint64_t tx_oe:1;
+	} cn30xx;
+	struct cvmx_gpio_bit_cfgx_cn30xx cn31xx;
+	struct cvmx_gpio_bit_cfgx_cn30xx cn38xx;
+	struct cvmx_gpio_bit_cfgx_cn30xx cn38xxp2;
+	struct cvmx_gpio_bit_cfgx_cn30xx cn50xx;
+	struct cvmx_gpio_bit_cfgx_s cn52xx;
+	struct cvmx_gpio_bit_cfgx_s cn52xxp1;
+	struct cvmx_gpio_bit_cfgx_s cn56xx;
+	struct cvmx_gpio_bit_cfgx_s cn56xxp1;
+	struct cvmx_gpio_bit_cfgx_cn30xx cn58xx;
+	struct cvmx_gpio_bit_cfgx_cn30xx cn58xxp1;
+};
+
+union cvmx_gpio_boot_ena {
+	uint64_t u64;
+	struct cvmx_gpio_boot_ena_s {
+		uint64_t reserved_12_63:52;
+		uint64_t boot_ena:4;
+		uint64_t reserved_0_7:8;
+	} s;
+	struct cvmx_gpio_boot_ena_s cn30xx;
+	struct cvmx_gpio_boot_ena_s cn31xx;
+	struct cvmx_gpio_boot_ena_s cn50xx;
+};
+
+union cvmx_gpio_clk_genx {
+	uint64_t u64;
+	struct cvmx_gpio_clk_genx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t n:32;
+	} s;
+	struct cvmx_gpio_clk_genx_s cn52xx;
+	struct cvmx_gpio_clk_genx_s cn52xxp1;
+	struct cvmx_gpio_clk_genx_s cn56xx;
+	struct cvmx_gpio_clk_genx_s cn56xxp1;
+};
+
+union cvmx_gpio_dbg_ena {
+	uint64_t u64;
+	struct cvmx_gpio_dbg_ena_s {
+		uint64_t reserved_21_63:43;
+		uint64_t dbg_ena:21;
+	} s;
+	struct cvmx_gpio_dbg_ena_s cn30xx;
+	struct cvmx_gpio_dbg_ena_s cn31xx;
+	struct cvmx_gpio_dbg_ena_s cn50xx;
+};
+
+union cvmx_gpio_int_clr {
+	uint64_t u64;
+	struct cvmx_gpio_int_clr_s {
+		uint64_t reserved_16_63:48;
+		uint64_t type:16;
+	} s;
+	struct cvmx_gpio_int_clr_s cn30xx;
+	struct cvmx_gpio_int_clr_s cn31xx;
+	struct cvmx_gpio_int_clr_s cn38xx;
+	struct cvmx_gpio_int_clr_s cn38xxp2;
+	struct cvmx_gpio_int_clr_s cn50xx;
+	struct cvmx_gpio_int_clr_s cn52xx;
+	struct cvmx_gpio_int_clr_s cn52xxp1;
+	struct cvmx_gpio_int_clr_s cn56xx;
+	struct cvmx_gpio_int_clr_s cn56xxp1;
+	struct cvmx_gpio_int_clr_s cn58xx;
+	struct cvmx_gpio_int_clr_s cn58xxp1;
+};
+
+union cvmx_gpio_rx_dat {
+	uint64_t u64;
+	struct cvmx_gpio_rx_dat_s {
+		uint64_t reserved_24_63:40;
+		uint64_t dat:24;
+	} s;
+	struct cvmx_gpio_rx_dat_s cn30xx;
+	struct cvmx_gpio_rx_dat_s cn31xx;
+	struct cvmx_gpio_rx_dat_cn38xx {
+		uint64_t reserved_16_63:48;
+		uint64_t dat:16;
+	} cn38xx;
+	struct cvmx_gpio_rx_dat_cn38xx cn38xxp2;
+	struct cvmx_gpio_rx_dat_s cn50xx;
+	struct cvmx_gpio_rx_dat_cn38xx cn52xx;
+	struct cvmx_gpio_rx_dat_cn38xx cn52xxp1;
+	struct cvmx_gpio_rx_dat_cn38xx cn56xx;
+	struct cvmx_gpio_rx_dat_cn38xx cn56xxp1;
+	struct cvmx_gpio_rx_dat_cn38xx cn58xx;
+	struct cvmx_gpio_rx_dat_cn38xx cn58xxp1;
+};
+
+union cvmx_gpio_tx_clr {
+	uint64_t u64;
+	struct cvmx_gpio_tx_clr_s {
+		uint64_t reserved_24_63:40;
+		uint64_t clr:24;
+	} s;
+	struct cvmx_gpio_tx_clr_s cn30xx;
+	struct cvmx_gpio_tx_clr_s cn31xx;
+	struct cvmx_gpio_tx_clr_cn38xx {
+		uint64_t reserved_16_63:48;
+		uint64_t clr:16;
+	} cn38xx;
+	struct cvmx_gpio_tx_clr_cn38xx cn38xxp2;
+	struct cvmx_gpio_tx_clr_s cn50xx;
+	struct cvmx_gpio_tx_clr_cn38xx cn52xx;
+	struct cvmx_gpio_tx_clr_cn38xx cn52xxp1;
+	struct cvmx_gpio_tx_clr_cn38xx cn56xx;
+	struct cvmx_gpio_tx_clr_cn38xx cn56xxp1;
+	struct cvmx_gpio_tx_clr_cn38xx cn58xx;
+	struct cvmx_gpio_tx_clr_cn38xx cn58xxp1;
+};
+
+union cvmx_gpio_tx_set {
+	uint64_t u64;
+	struct cvmx_gpio_tx_set_s {
+		uint64_t reserved_24_63:40;
+		uint64_t set:24;
+	} s;
+	struct cvmx_gpio_tx_set_s cn30xx;
+	struct cvmx_gpio_tx_set_s cn31xx;
+	struct cvmx_gpio_tx_set_cn38xx {
+		uint64_t reserved_16_63:48;
+		uint64_t set:16;
+	} cn38xx;
+	struct cvmx_gpio_tx_set_cn38xx cn38xxp2;
+	struct cvmx_gpio_tx_set_s cn50xx;
+	struct cvmx_gpio_tx_set_cn38xx cn52xx;
+	struct cvmx_gpio_tx_set_cn38xx cn52xxp1;
+	struct cvmx_gpio_tx_set_cn38xx cn56xx;
+	struct cvmx_gpio_tx_set_cn38xx cn56xxp1;
+	struct cvmx_gpio_tx_set_cn38xx cn58xx;
+	struct cvmx_gpio_tx_set_cn38xx cn58xxp1;
+};
+
+union cvmx_gpio_xbit_cfgx {
+	uint64_t u64;
+	struct cvmx_gpio_xbit_cfgx_s {
+		uint64_t reserved_12_63:52;
+		uint64_t fil_sel:4;
+		uint64_t fil_cnt:4;
+		uint64_t reserved_2_3:2;
+		uint64_t rx_xor:1;
+		uint64_t tx_oe:1;
+	} s;
+	struct cvmx_gpio_xbit_cfgx_s cn30xx;
+	struct cvmx_gpio_xbit_cfgx_s cn31xx;
+	struct cvmx_gpio_xbit_cfgx_s cn50xx;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-iob-defs.h b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
new file mode 100644
index 0000000..0ee36ba
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
@@ -0,0 +1,530 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_IOB_DEFS_H__
+#define __CVMX_IOB_DEFS_H__
+
+#define CVMX_IOB_BIST_STATUS \
+	 CVMX_ADD_IO_SEG(0x00011800F00007F8ull)
+#define CVMX_IOB_CTL_STATUS \
+	 CVMX_ADD_IO_SEG(0x00011800F0000050ull)
+#define CVMX_IOB_DWB_PRI_CNT \
+	 CVMX_ADD_IO_SEG(0x00011800F0000028ull)
+#define CVMX_IOB_FAU_TIMEOUT \
+	 CVMX_ADD_IO_SEG(0x00011800F0000000ull)
+#define CVMX_IOB_I2C_PRI_CNT \
+	 CVMX_ADD_IO_SEG(0x00011800F0000010ull)
+#define CVMX_IOB_INB_CONTROL_MATCH \
+	 CVMX_ADD_IO_SEG(0x00011800F0000078ull)
+#define CVMX_IOB_INB_CONTROL_MATCH_ENB \
+	 CVMX_ADD_IO_SEG(0x00011800F0000088ull)
+#define CVMX_IOB_INB_DATA_MATCH \
+	 CVMX_ADD_IO_SEG(0x00011800F0000070ull)
+#define CVMX_IOB_INB_DATA_MATCH_ENB \
+	 CVMX_ADD_IO_SEG(0x00011800F0000080ull)
+#define CVMX_IOB_INT_ENB \
+	 CVMX_ADD_IO_SEG(0x00011800F0000060ull)
+#define CVMX_IOB_INT_SUM \
+	 CVMX_ADD_IO_SEG(0x00011800F0000058ull)
+#define CVMX_IOB_N2C_L2C_PRI_CNT \
+	 CVMX_ADD_IO_SEG(0x00011800F0000020ull)
+#define CVMX_IOB_N2C_RSP_PRI_CNT \
+	 CVMX_ADD_IO_SEG(0x00011800F0000008ull)
+#define CVMX_IOB_OUTB_COM_PRI_CNT \
+	 CVMX_ADD_IO_SEG(0x00011800F0000040ull)
+#define CVMX_IOB_OUTB_CONTROL_MATCH \
+	 CVMX_ADD_IO_SEG(0x00011800F0000098ull)
+#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB \
+	 CVMX_ADD_IO_SEG(0x00011800F00000A8ull)
+#define CVMX_IOB_OUTB_DATA_MATCH \
+	 CVMX_ADD_IO_SEG(0x00011800F0000090ull)
+#define CVMX_IOB_OUTB_DATA_MATCH_ENB \
+	 CVMX_ADD_IO_SEG(0x00011800F00000A0ull)
+#define CVMX_IOB_OUTB_FPA_PRI_CNT \
+	 CVMX_ADD_IO_SEG(0x00011800F0000048ull)
+#define CVMX_IOB_OUTB_REQ_PRI_CNT \
+	 CVMX_ADD_IO_SEG(0x00011800F0000038ull)
+#define CVMX_IOB_P2C_REQ_PRI_CNT \
+	 CVMX_ADD_IO_SEG(0x00011800F0000018ull)
+#define CVMX_IOB_PKT_ERR \
+	 CVMX_ADD_IO_SEG(0x00011800F0000068ull)
+
+union cvmx_iob_bist_status {
+	uint64_t u64;
+	struct cvmx_iob_bist_status_s {
+		uint64_t reserved_18_63:46;
+		uint64_t icnrcb:1;
+		uint64_t icr0:1;
+		uint64_t icr1:1;
+		uint64_t icnr1:1;
+		uint64_t icnr0:1;
+		uint64_t ibdr0:1;
+		uint64_t ibdr1:1;
+		uint64_t ibr0:1;
+		uint64_t ibr1:1;
+		uint64_t icnrt:1;
+		uint64_t ibrq0:1;
+		uint64_t ibrq1:1;
+		uint64_t icrn0:1;
+		uint64_t icrn1:1;
+		uint64_t icrp0:1;
+		uint64_t icrp1:1;
+		uint64_t ibd:1;
+		uint64_t icd:1;
+	} s;
+	struct cvmx_iob_bist_status_s cn30xx;
+	struct cvmx_iob_bist_status_s cn31xx;
+	struct cvmx_iob_bist_status_s cn38xx;
+	struct cvmx_iob_bist_status_s cn38xxp2;
+	struct cvmx_iob_bist_status_s cn50xx;
+	struct cvmx_iob_bist_status_s cn52xx;
+	struct cvmx_iob_bist_status_s cn52xxp1;
+	struct cvmx_iob_bist_status_s cn56xx;
+	struct cvmx_iob_bist_status_s cn56xxp1;
+	struct cvmx_iob_bist_status_s cn58xx;
+	struct cvmx_iob_bist_status_s cn58xxp1;
+};
+
+union cvmx_iob_ctl_status {
+	uint64_t u64;
+	struct cvmx_iob_ctl_status_s {
+		uint64_t reserved_5_63:59;
+		uint64_t outb_mat:1;
+		uint64_t inb_mat:1;
+		uint64_t pko_enb:1;
+		uint64_t dwb_enb:1;
+		uint64_t fau_end:1;
+	} s;
+	struct cvmx_iob_ctl_status_s cn30xx;
+	struct cvmx_iob_ctl_status_s cn31xx;
+	struct cvmx_iob_ctl_status_s cn38xx;
+	struct cvmx_iob_ctl_status_s cn38xxp2;
+	struct cvmx_iob_ctl_status_s cn50xx;
+	struct cvmx_iob_ctl_status_s cn52xx;
+	struct cvmx_iob_ctl_status_s cn52xxp1;
+	struct cvmx_iob_ctl_status_s cn56xx;
+	struct cvmx_iob_ctl_status_s cn56xxp1;
+	struct cvmx_iob_ctl_status_s cn58xx;
+	struct cvmx_iob_ctl_status_s cn58xxp1;
+};
+
+union cvmx_iob_dwb_pri_cnt {
+	uint64_t u64;
+	struct cvmx_iob_dwb_pri_cnt_s {
+		uint64_t reserved_16_63:48;
+		uint64_t cnt_enb:1;
+		uint64_t cnt_val:15;
+	} s;
+	struct cvmx_iob_dwb_pri_cnt_s cn38xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_dwb_pri_cnt_s cn52xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_dwb_pri_cnt_s cn56xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_dwb_pri_cnt_s cn58xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_fau_timeout {
+	uint64_t u64;
+	struct cvmx_iob_fau_timeout_s {
+		uint64_t reserved_13_63:51;
+		uint64_t tout_enb:1;
+		uint64_t tout_val:12;
+	} s;
+	struct cvmx_iob_fau_timeout_s cn30xx;
+	struct cvmx_iob_fau_timeout_s cn31xx;
+	struct cvmx_iob_fau_timeout_s cn38xx;
+	struct cvmx_iob_fau_timeout_s cn38xxp2;
+	struct cvmx_iob_fau_timeout_s cn50xx;
+	struct cvmx_iob_fau_timeout_s cn52xx;
+	struct cvmx_iob_fau_timeout_s cn52xxp1;
+	struct cvmx_iob_fau_timeout_s cn56xx;
+	struct cvmx_iob_fau_timeout_s cn56xxp1;
+	struct cvmx_iob_fau_timeout_s cn58xx;
+	struct cvmx_iob_fau_timeout_s cn58xxp1;
+};
+
+union cvmx_iob_i2c_pri_cnt {
+	uint64_t u64;
+	struct cvmx_iob_i2c_pri_cnt_s {
+		uint64_t reserved_16_63:48;
+		uint64_t cnt_enb:1;
+		uint64_t cnt_val:15;
+	} s;
+	struct cvmx_iob_i2c_pri_cnt_s cn38xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_i2c_pri_cnt_s cn52xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_i2c_pri_cnt_s cn56xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_i2c_pri_cnt_s cn58xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_inb_control_match {
+	uint64_t u64;
+	struct cvmx_iob_inb_control_match_s {
+		uint64_t reserved_29_63:35;
+		uint64_t mask:8;
+		uint64_t opc:4;
+		uint64_t dst:9;
+		uint64_t src:8;
+	} s;
+	struct cvmx_iob_inb_control_match_s cn30xx;
+	struct cvmx_iob_inb_control_match_s cn31xx;
+	struct cvmx_iob_inb_control_match_s cn38xx;
+	struct cvmx_iob_inb_control_match_s cn38xxp2;
+	struct cvmx_iob_inb_control_match_s cn50xx;
+	struct cvmx_iob_inb_control_match_s cn52xx;
+	struct cvmx_iob_inb_control_match_s cn52xxp1;
+	struct cvmx_iob_inb_control_match_s cn56xx;
+	struct cvmx_iob_inb_control_match_s cn56xxp1;
+	struct cvmx_iob_inb_control_match_s cn58xx;
+	struct cvmx_iob_inb_control_match_s cn58xxp1;
+};
+
+union cvmx_iob_inb_control_match_enb {
+	uint64_t u64;
+	struct cvmx_iob_inb_control_match_enb_s {
+		uint64_t reserved_29_63:35;
+		uint64_t mask:8;
+		uint64_t opc:4;
+		uint64_t dst:9;
+		uint64_t src:8;
+	} s;
+	struct cvmx_iob_inb_control_match_enb_s cn30xx;
+	struct cvmx_iob_inb_control_match_enb_s cn31xx;
+	struct cvmx_iob_inb_control_match_enb_s cn38xx;
+	struct cvmx_iob_inb_control_match_enb_s cn38xxp2;
+	struct cvmx_iob_inb_control_match_enb_s cn50xx;
+	struct cvmx_iob_inb_control_match_enb_s cn52xx;
+	struct cvmx_iob_inb_control_match_enb_s cn52xxp1;
+	struct cvmx_iob_inb_control_match_enb_s cn56xx;
+	struct cvmx_iob_inb_control_match_enb_s cn56xxp1;
+	struct cvmx_iob_inb_control_match_enb_s cn58xx;
+	struct cvmx_iob_inb_control_match_enb_s cn58xxp1;
+};
+
+union cvmx_iob_inb_data_match {
+	uint64_t u64;
+	struct cvmx_iob_inb_data_match_s {
+		uint64_t data:64;
+	} s;
+	struct cvmx_iob_inb_data_match_s cn30xx;
+	struct cvmx_iob_inb_data_match_s cn31xx;
+	struct cvmx_iob_inb_data_match_s cn38xx;
+	struct cvmx_iob_inb_data_match_s cn38xxp2;
+	struct cvmx_iob_inb_data_match_s cn50xx;
+	struct cvmx_iob_inb_data_match_s cn52xx;
+	struct cvmx_iob_inb_data_match_s cn52xxp1;
+	struct cvmx_iob_inb_data_match_s cn56xx;
+	struct cvmx_iob_inb_data_match_s cn56xxp1;
+	struct cvmx_iob_inb_data_match_s cn58xx;
+	struct cvmx_iob_inb_data_match_s cn58xxp1;
+};
+
+union cvmx_iob_inb_data_match_enb {
+	uint64_t u64;
+	struct cvmx_iob_inb_data_match_enb_s {
+		uint64_t data:64;
+	} s;
+	struct cvmx_iob_inb_data_match_enb_s cn30xx;
+	struct cvmx_iob_inb_data_match_enb_s cn31xx;
+	struct cvmx_iob_inb_data_match_enb_s cn38xx;
+	struct cvmx_iob_inb_data_match_enb_s cn38xxp2;
+	struct cvmx_iob_inb_data_match_enb_s cn50xx;
+	struct cvmx_iob_inb_data_match_enb_s cn52xx;
+	struct cvmx_iob_inb_data_match_enb_s cn52xxp1;
+	struct cvmx_iob_inb_data_match_enb_s cn56xx;
+	struct cvmx_iob_inb_data_match_enb_s cn56xxp1;
+	struct cvmx_iob_inb_data_match_enb_s cn58xx;
+	struct cvmx_iob_inb_data_match_enb_s cn58xxp1;
+};
+
+union cvmx_iob_int_enb {
+	uint64_t u64;
+	struct cvmx_iob_int_enb_s {
+		uint64_t reserved_6_63:58;
+		uint64_t p_dat:1;
+		uint64_t np_dat:1;
+		uint64_t p_eop:1;
+		uint64_t p_sop:1;
+		uint64_t np_eop:1;
+		uint64_t np_sop:1;
+	} s;
+	struct cvmx_iob_int_enb_cn30xx {
+		uint64_t reserved_4_63:60;
+		uint64_t p_eop:1;
+		uint64_t p_sop:1;
+		uint64_t np_eop:1;
+		uint64_t np_sop:1;
+	} cn30xx;
+	struct cvmx_iob_int_enb_cn30xx cn31xx;
+	struct cvmx_iob_int_enb_cn30xx cn38xx;
+	struct cvmx_iob_int_enb_cn30xx cn38xxp2;
+	struct cvmx_iob_int_enb_s cn50xx;
+	struct cvmx_iob_int_enb_s cn52xx;
+	struct cvmx_iob_int_enb_s cn52xxp1;
+	struct cvmx_iob_int_enb_s cn56xx;
+	struct cvmx_iob_int_enb_s cn56xxp1;
+	struct cvmx_iob_int_enb_s cn58xx;
+	struct cvmx_iob_int_enb_s cn58xxp1;
+};
+
+union cvmx_iob_int_sum {
+	uint64_t u64;
+	struct cvmx_iob_int_sum_s {
+		uint64_t reserved_6_63:58;
+		uint64_t p_dat:1;
+		uint64_t np_dat:1;
+		uint64_t p_eop:1;
+		uint64_t p_sop:1;
+		uint64_t np_eop:1;
+		uint64_t np_sop:1;
+	} s;
+	struct cvmx_iob_int_sum_cn30xx {
+		uint64_t reserved_4_63:60;
+		uint64_t p_eop:1;
+		uint64_t p_sop:1;
+		uint64_t np_eop:1;
+		uint64_t np_sop:1;
+	} cn30xx;
+	struct cvmx_iob_int_sum_cn30xx cn31xx;
+	struct cvmx_iob_int_sum_cn30xx cn38xx;
+	struct cvmx_iob_int_sum_cn30xx cn38xxp2;
+	struct cvmx_iob_int_sum_s cn50xx;
+	struct cvmx_iob_int_sum_s cn52xx;
+	struct cvmx_iob_int_sum_s cn52xxp1;
+	struct cvmx_iob_int_sum_s cn56xx;
+	struct cvmx_iob_int_sum_s cn56xxp1;
+	struct cvmx_iob_int_sum_s cn58xx;
+	struct cvmx_iob_int_sum_s cn58xxp1;
+};
+
+union cvmx_iob_n2c_l2c_pri_cnt {
+	uint64_t u64;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s {
+		uint64_t reserved_16_63:48;
+		uint64_t cnt_enb:1;
+		uint64_t cnt_val:15;
+	} s;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_n2c_rsp_pri_cnt {
+	uint64_t u64;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s {
+		uint64_t reserved_16_63:48;
+		uint64_t cnt_enb:1;
+		uint64_t cnt_val:15;
+	} s;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_outb_com_pri_cnt {
+	uint64_t u64;
+	struct cvmx_iob_outb_com_pri_cnt_s {
+		uint64_t reserved_16_63:48;
+		uint64_t cnt_enb:1;
+		uint64_t cnt_val:15;
+	} s;
+	struct cvmx_iob_outb_com_pri_cnt_s cn38xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_outb_com_pri_cnt_s cn52xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_outb_com_pri_cnt_s cn56xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_outb_com_pri_cnt_s cn58xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_outb_control_match {
+	uint64_t u64;
+	struct cvmx_iob_outb_control_match_s {
+		uint64_t reserved_26_63:38;
+		uint64_t mask:8;
+		uint64_t eot:1;
+		uint64_t dst:8;
+		uint64_t src:9;
+	} s;
+	struct cvmx_iob_outb_control_match_s cn30xx;
+	struct cvmx_iob_outb_control_match_s cn31xx;
+	struct cvmx_iob_outb_control_match_s cn38xx;
+	struct cvmx_iob_outb_control_match_s cn38xxp2;
+	struct cvmx_iob_outb_control_match_s cn50xx;
+	struct cvmx_iob_outb_control_match_s cn52xx;
+	struct cvmx_iob_outb_control_match_s cn52xxp1;
+	struct cvmx_iob_outb_control_match_s cn56xx;
+	struct cvmx_iob_outb_control_match_s cn56xxp1;
+	struct cvmx_iob_outb_control_match_s cn58xx;
+	struct cvmx_iob_outb_control_match_s cn58xxp1;
+};
+
+union cvmx_iob_outb_control_match_enb {
+	uint64_t u64;
+	struct cvmx_iob_outb_control_match_enb_s {
+		uint64_t reserved_26_63:38;
+		uint64_t mask:8;
+		uint64_t eot:1;
+		uint64_t dst:8;
+		uint64_t src:9;
+	} s;
+	struct cvmx_iob_outb_control_match_enb_s cn30xx;
+	struct cvmx_iob_outb_control_match_enb_s cn31xx;
+	struct cvmx_iob_outb_control_match_enb_s cn38xx;
+	struct cvmx_iob_outb_control_match_enb_s cn38xxp2;
+	struct cvmx_iob_outb_control_match_enb_s cn50xx;
+	struct cvmx_iob_outb_control_match_enb_s cn52xx;
+	struct cvmx_iob_outb_control_match_enb_s cn52xxp1;
+	struct cvmx_iob_outb_control_match_enb_s cn56xx;
+	struct cvmx_iob_outb_control_match_enb_s cn56xxp1;
+	struct cvmx_iob_outb_control_match_enb_s cn58xx;
+	struct cvmx_iob_outb_control_match_enb_s cn58xxp1;
+};
+
+union cvmx_iob_outb_data_match {
+	uint64_t u64;
+	struct cvmx_iob_outb_data_match_s {
+		uint64_t data:64;
+	} s;
+	struct cvmx_iob_outb_data_match_s cn30xx;
+	struct cvmx_iob_outb_data_match_s cn31xx;
+	struct cvmx_iob_outb_data_match_s cn38xx;
+	struct cvmx_iob_outb_data_match_s cn38xxp2;
+	struct cvmx_iob_outb_data_match_s cn50xx;
+	struct cvmx_iob_outb_data_match_s cn52xx;
+	struct cvmx_iob_outb_data_match_s cn52xxp1;
+	struct cvmx_iob_outb_data_match_s cn56xx;
+	struct cvmx_iob_outb_data_match_s cn56xxp1;
+	struct cvmx_iob_outb_data_match_s cn58xx;
+	struct cvmx_iob_outb_data_match_s cn58xxp1;
+};
+
+union cvmx_iob_outb_data_match_enb {
+	uint64_t u64;
+	struct cvmx_iob_outb_data_match_enb_s {
+		uint64_t data:64;
+	} s;
+	struct cvmx_iob_outb_data_match_enb_s cn30xx;
+	struct cvmx_iob_outb_data_match_enb_s cn31xx;
+	struct cvmx_iob_outb_data_match_enb_s cn38xx;
+	struct cvmx_iob_outb_data_match_enb_s cn38xxp2;
+	struct cvmx_iob_outb_data_match_enb_s cn50xx;
+	struct cvmx_iob_outb_data_match_enb_s cn52xx;
+	struct cvmx_iob_outb_data_match_enb_s cn52xxp1;
+	struct cvmx_iob_outb_data_match_enb_s cn56xx;
+	struct cvmx_iob_outb_data_match_enb_s cn56xxp1;
+	struct cvmx_iob_outb_data_match_enb_s cn58xx;
+	struct cvmx_iob_outb_data_match_enb_s cn58xxp1;
+};
+
+union cvmx_iob_outb_fpa_pri_cnt {
+	uint64_t u64;
+	struct cvmx_iob_outb_fpa_pri_cnt_s {
+		uint64_t reserved_16_63:48;
+		uint64_t cnt_enb:1;
+		uint64_t cnt_val:15;
+	} s;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn38xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn52xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn56xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn58xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_outb_req_pri_cnt {
+	uint64_t u64;
+	struct cvmx_iob_outb_req_pri_cnt_s {
+		uint64_t reserved_16_63:48;
+		uint64_t cnt_enb:1;
+		uint64_t cnt_val:15;
+	} s;
+	struct cvmx_iob_outb_req_pri_cnt_s cn38xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_outb_req_pri_cnt_s cn52xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_outb_req_pri_cnt_s cn56xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_outb_req_pri_cnt_s cn58xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_p2c_req_pri_cnt {
+	uint64_t u64;
+	struct cvmx_iob_p2c_req_pri_cnt_s {
+		uint64_t reserved_16_63:48;
+		uint64_t cnt_enb:1;
+		uint64_t cnt_val:15;
+	} s;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn38xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn52xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn56xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn58xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_pkt_err {
+	uint64_t u64;
+	struct cvmx_iob_pkt_err_s {
+		uint64_t reserved_6_63:58;
+		uint64_t port:6;
+	} s;
+	struct cvmx_iob_pkt_err_s cn30xx;
+	struct cvmx_iob_pkt_err_s cn31xx;
+	struct cvmx_iob_pkt_err_s cn38xx;
+	struct cvmx_iob_pkt_err_s cn38xxp2;
+	struct cvmx_iob_pkt_err_s cn50xx;
+	struct cvmx_iob_pkt_err_s cn52xx;
+	struct cvmx_iob_pkt_err_s cn52xxp1;
+	struct cvmx_iob_pkt_err_s cn56xx;
+	struct cvmx_iob_pkt_err_s cn56xxp1;
+	struct cvmx_iob_pkt_err_s cn58xx;
+	struct cvmx_iob_pkt_err_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd-defs.h b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
new file mode 100644
index 0000000..f8b8fc6
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
@@ -0,0 +1,877 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_IPD_DEFS_H__
+#define __CVMX_IPD_DEFS_H__
+
+#define CVMX_IPD_1ST_MBUFF_SKIP \
+	 CVMX_ADD_IO_SEG(0x00014F0000000000ull)
+#define CVMX_IPD_1st_NEXT_PTR_BACK \
+	 CVMX_ADD_IO_SEG(0x00014F0000000150ull)
+#define CVMX_IPD_2nd_NEXT_PTR_BACK \
+	 CVMX_ADD_IO_SEG(0x00014F0000000158ull)
+#define CVMX_IPD_BIST_STATUS \
+	 CVMX_ADD_IO_SEG(0x00014F00000007F8ull)
+#define CVMX_IPD_BP_PRT_RED_END \
+	 CVMX_ADD_IO_SEG(0x00014F0000000328ull)
+#define CVMX_IPD_CLK_COUNT \
+	 CVMX_ADD_IO_SEG(0x00014F0000000338ull)
+#define CVMX_IPD_CTL_STATUS \
+	 CVMX_ADD_IO_SEG(0x00014F0000000018ull)
+#define CVMX_IPD_INT_ENB \
+	 CVMX_ADD_IO_SEG(0x00014F0000000160ull)
+#define CVMX_IPD_INT_SUM \
+	 CVMX_ADD_IO_SEG(0x00014F0000000168ull)
+#define CVMX_IPD_NOT_1ST_MBUFF_SKIP \
+	 CVMX_ADD_IO_SEG(0x00014F0000000008ull)
+#define CVMX_IPD_PACKET_MBUFF_SIZE \
+	 CVMX_ADD_IO_SEG(0x00014F0000000010ull)
+#define CVMX_IPD_PKT_PTR_VALID \
+	 CVMX_ADD_IO_SEG(0x00014F0000000358ull)
+#define CVMX_IPD_PORTX_BP_PAGE_CNT(offset) \
+	 CVMX_ADD_IO_SEG(0x00014F0000000028ull + (((offset) & 63) * 8))
+#define CVMX_IPD_PORTX_BP_PAGE_CNT2(offset) \
+	 CVMX_ADD_IO_SEG(0x00014F0000000368ull + (((offset) & 63) * 8) - 8 * 36)
+#define CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(offset) \
+	 CVMX_ADD_IO_SEG(0x00014F0000000388ull + (((offset) & 63) * 8) - 8 * 36)
+#define CVMX_IPD_PORT_BP_COUNTERS_PAIRX(offset) \
+	 CVMX_ADD_IO_SEG(0x00014F00000001B8ull + (((offset) & 63) * 8))
+#define CVMX_IPD_PORT_QOS_INTX(offset) \
+	 CVMX_ADD_IO_SEG(0x00014F0000000808ull + (((offset) & 7) * 8))
+#define CVMX_IPD_PORT_QOS_INT_ENBX(offset) \
+	 CVMX_ADD_IO_SEG(0x00014F0000000848ull + (((offset) & 7) * 8))
+#define CVMX_IPD_PORT_QOS_X_CNT(offset) \
+	 CVMX_ADD_IO_SEG(0x00014F0000000888ull + (((offset) & 511) * 8))
+#define CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL \
+	 CVMX_ADD_IO_SEG(0x00014F0000000348ull)
+#define CVMX_IPD_PRC_PORT_PTR_FIFO_CTL \
+	 CVMX_ADD_IO_SEG(0x00014F0000000350ull)
+#define CVMX_IPD_PTR_COUNT \
+	 CVMX_ADD_IO_SEG(0x00014F0000000320ull)
+#define CVMX_IPD_PWP_PTR_FIFO_CTL \
+	 CVMX_ADD_IO_SEG(0x00014F0000000340ull)
+#define CVMX_IPD_QOS0_RED_MARKS \
+	 CVMX_ADD_IO_SEG(0x00014F0000000178ull)
+#define CVMX_IPD_QOS1_RED_MARKS \
+	 CVMX_ADD_IO_SEG(0x00014F0000000180ull)
+#define CVMX_IPD_QOS2_RED_MARKS \
+	 CVMX_ADD_IO_SEG(0x00014F0000000188ull)
+#define CVMX_IPD_QOS3_RED_MARKS \
+	 CVMX_ADD_IO_SEG(0x00014F0000000190ull)
+#define CVMX_IPD_QOS4_RED_MARKS \
+	 CVMX_ADD_IO_SEG(0x00014F0000000198ull)
+#define CVMX_IPD_QOS5_RED_MARKS \
+	 CVMX_ADD_IO_SEG(0x00014F00000001A0ull)
+#define CVMX_IPD_QOS6_RED_MARKS \
+	 CVMX_ADD_IO_SEG(0x00014F00000001A8ull)
+#define CVMX_IPD_QOS7_RED_MARKS \
+	 CVMX_ADD_IO_SEG(0x00014F00000001B0ull)
+#define CVMX_IPD_QOSX_RED_MARKS(offset) \
+	 CVMX_ADD_IO_SEG(0x00014F0000000178ull + (((offset) & 7) * 8))
+#define CVMX_IPD_QUE0_FREE_PAGE_CNT \
+	 CVMX_ADD_IO_SEG(0x00014F0000000330ull)
+#define CVMX_IPD_RED_PORT_ENABLE \
+	 CVMX_ADD_IO_SEG(0x00014F00000002D8ull)
+#define CVMX_IPD_RED_PORT_ENABLE2 \
+	 CVMX_ADD_IO_SEG(0x00014F00000003A8ull)
+#define CVMX_IPD_RED_QUE0_PARAM \
+	 CVMX_ADD_IO_SEG(0x00014F00000002E0ull)
+#define CVMX_IPD_RED_QUE1_PARAM \
+	 CVMX_ADD_IO_SEG(0x00014F00000002E8ull)
+#define CVMX_IPD_RED_QUE2_PARAM \
+	 CVMX_ADD_IO_SEG(0x00014F00000002F0ull)
+#define CVMX_IPD_RED_QUE3_PARAM \
+	 CVMX_ADD_IO_SEG(0x00014F00000002F8ull)
+#define CVMX_IPD_RED_QUE4_PARAM \
+	 CVMX_ADD_IO_SEG(0x00014F0000000300ull)
+#define CVMX_IPD_RED_QUE5_PARAM \
+	 CVMX_ADD_IO_SEG(0x00014F0000000308ull)
+#define CVMX_IPD_RED_QUE6_PARAM \
+	 CVMX_ADD_IO_SEG(0x00014F0000000310ull)
+#define CVMX_IPD_RED_QUE7_PARAM \
+	 CVMX_ADD_IO_SEG(0x00014F0000000318ull)
+#define CVMX_IPD_RED_QUEX_PARAM(offset) \
+	 CVMX_ADD_IO_SEG(0x00014F00000002E0ull + (((offset) & 7) * 8))
+#define CVMX_IPD_SUB_PORT_BP_PAGE_CNT \
+	 CVMX_ADD_IO_SEG(0x00014F0000000148ull)
+#define CVMX_IPD_SUB_PORT_FCS \
+	 CVMX_ADD_IO_SEG(0x00014F0000000170ull)
+#define CVMX_IPD_SUB_PORT_QOS_CNT \
+	 CVMX_ADD_IO_SEG(0x00014F0000000800ull)
+#define CVMX_IPD_WQE_FPA_QUEUE \
+	 CVMX_ADD_IO_SEG(0x00014F0000000020ull)
+#define CVMX_IPD_WQE_PTR_VALID \
+	 CVMX_ADD_IO_SEG(0x00014F0000000360ull)
+
+union cvmx_ipd_1st_mbuff_skip {
+	uint64_t u64;
+	struct cvmx_ipd_1st_mbuff_skip_s {
+		uint64_t reserved_6_63:58;
+		uint64_t skip_sz:6;
+	} s;
+	struct cvmx_ipd_1st_mbuff_skip_s cn30xx;
+	struct cvmx_ipd_1st_mbuff_skip_s cn31xx;
+	struct cvmx_ipd_1st_mbuff_skip_s cn38xx;
+	struct cvmx_ipd_1st_mbuff_skip_s cn38xxp2;
+	struct cvmx_ipd_1st_mbuff_skip_s cn50xx;
+	struct cvmx_ipd_1st_mbuff_skip_s cn52xx;
+	struct cvmx_ipd_1st_mbuff_skip_s cn52xxp1;
+	struct cvmx_ipd_1st_mbuff_skip_s cn56xx;
+	struct cvmx_ipd_1st_mbuff_skip_s cn56xxp1;
+	struct cvmx_ipd_1st_mbuff_skip_s cn58xx;
+	struct cvmx_ipd_1st_mbuff_skip_s cn58xxp1;
+};
+
+union cvmx_ipd_1st_next_ptr_back {
+	uint64_t u64;
+	struct cvmx_ipd_1st_next_ptr_back_s {
+		uint64_t reserved_4_63:60;
+		uint64_t back:4;
+	} s;
+	struct cvmx_ipd_1st_next_ptr_back_s cn30xx;
+	struct cvmx_ipd_1st_next_ptr_back_s cn31xx;
+	struct cvmx_ipd_1st_next_ptr_back_s cn38xx;
+	struct cvmx_ipd_1st_next_ptr_back_s cn38xxp2;
+	struct cvmx_ipd_1st_next_ptr_back_s cn50xx;
+	struct cvmx_ipd_1st_next_ptr_back_s cn52xx;
+	struct cvmx_ipd_1st_next_ptr_back_s cn52xxp1;
+	struct cvmx_ipd_1st_next_ptr_back_s cn56xx;
+	struct cvmx_ipd_1st_next_ptr_back_s cn56xxp1;
+	struct cvmx_ipd_1st_next_ptr_back_s cn58xx;
+	struct cvmx_ipd_1st_next_ptr_back_s cn58xxp1;
+};
+
+union cvmx_ipd_2nd_next_ptr_back {
+	uint64_t u64;
+	struct cvmx_ipd_2nd_next_ptr_back_s {
+		uint64_t reserved_4_63:60;
+		uint64_t back:4;
+	} s;
+	struct cvmx_ipd_2nd_next_ptr_back_s cn30xx;
+	struct cvmx_ipd_2nd_next_ptr_back_s cn31xx;
+	struct cvmx_ipd_2nd_next_ptr_back_s cn38xx;
+	struct cvmx_ipd_2nd_next_ptr_back_s cn38xxp2;
+	struct cvmx_ipd_2nd_next_ptr_back_s cn50xx;
+	struct cvmx_ipd_2nd_next_ptr_back_s cn52xx;
+	struct cvmx_ipd_2nd_next_ptr_back_s cn52xxp1;
+	struct cvmx_ipd_2nd_next_ptr_back_s cn56xx;
+	struct cvmx_ipd_2nd_next_ptr_back_s cn56xxp1;
+	struct cvmx_ipd_2nd_next_ptr_back_s cn58xx;
+	struct cvmx_ipd_2nd_next_ptr_back_s cn58xxp1;
+};
+
+union cvmx_ipd_bist_status {
+	uint64_t u64;
+	struct cvmx_ipd_bist_status_s {
+		uint64_t reserved_18_63:46;
+		uint64_t csr_mem:1;
+		uint64_t csr_ncmd:1;
+		uint64_t pwq_wqed:1;
+		uint64_t pwq_wp1:1;
+		uint64_t pwq_pow:1;
+		uint64_t ipq_pbe1:1;
+		uint64_t ipq_pbe0:1;
+		uint64_t pbm3:1;
+		uint64_t pbm2:1;
+		uint64_t pbm1:1;
+		uint64_t pbm0:1;
+		uint64_t pbm_word:1;
+		uint64_t pwq1:1;
+		uint64_t pwq0:1;
+		uint64_t prc_off:1;
+		uint64_t ipd_old:1;
+		uint64_t ipd_new:1;
+		uint64_t pwp:1;
+	} s;
+	struct cvmx_ipd_bist_status_cn30xx {
+		uint64_t reserved_16_63:48;
+		uint64_t pwq_wqed:1;
+		uint64_t pwq_wp1:1;
+		uint64_t pwq_pow:1;
+		uint64_t ipq_pbe1:1;
+		uint64_t ipq_pbe0:1;
+		uint64_t pbm3:1;
+		uint64_t pbm2:1;
+		uint64_t pbm1:1;
+		uint64_t pbm0:1;
+		uint64_t pbm_word:1;
+		uint64_t pwq1:1;
+		uint64_t pwq0:1;
+		uint64_t prc_off:1;
+		uint64_t ipd_old:1;
+		uint64_t ipd_new:1;
+		uint64_t pwp:1;
+	} cn30xx;
+	struct cvmx_ipd_bist_status_cn30xx cn31xx;
+	struct cvmx_ipd_bist_status_cn30xx cn38xx;
+	struct cvmx_ipd_bist_status_cn30xx cn38xxp2;
+	struct cvmx_ipd_bist_status_cn30xx cn50xx;
+	struct cvmx_ipd_bist_status_s cn52xx;
+	struct cvmx_ipd_bist_status_s cn52xxp1;
+	struct cvmx_ipd_bist_status_s cn56xx;
+	struct cvmx_ipd_bist_status_s cn56xxp1;
+	struct cvmx_ipd_bist_status_cn30xx cn58xx;
+	struct cvmx_ipd_bist_status_cn30xx cn58xxp1;
+};
+
+union cvmx_ipd_bp_prt_red_end {
+	uint64_t u64;
+	struct cvmx_ipd_bp_prt_red_end_s {
+		uint64_t reserved_40_63:24;
+		uint64_t prt_enb:40;
+	} s;
+	struct cvmx_ipd_bp_prt_red_end_cn30xx {
+		uint64_t reserved_36_63:28;
+		uint64_t prt_enb:36;
+	} cn30xx;
+	struct cvmx_ipd_bp_prt_red_end_cn30xx cn31xx;
+	struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xx;
+	struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xxp2;
+	struct cvmx_ipd_bp_prt_red_end_cn30xx cn50xx;
+	struct cvmx_ipd_bp_prt_red_end_s cn52xx;
+	struct cvmx_ipd_bp_prt_red_end_s cn52xxp1;
+	struct cvmx_ipd_bp_prt_red_end_s cn56xx;
+	struct cvmx_ipd_bp_prt_red_end_s cn56xxp1;
+	struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xx;
+	struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xxp1;
+};
+
+union cvmx_ipd_clk_count {
+	uint64_t u64;
+	struct cvmx_ipd_clk_count_s {
+		uint64_t clk_cnt:64;
+	} s;
+	struct cvmx_ipd_clk_count_s cn30xx;
+	struct cvmx_ipd_clk_count_s cn31xx;
+	struct cvmx_ipd_clk_count_s cn38xx;
+	struct cvmx_ipd_clk_count_s cn38xxp2;
+	struct cvmx_ipd_clk_count_s cn50xx;
+	struct cvmx_ipd_clk_count_s cn52xx;
+	struct cvmx_ipd_clk_count_s cn52xxp1;
+	struct cvmx_ipd_clk_count_s cn56xx;
+	struct cvmx_ipd_clk_count_s cn56xxp1;
+	struct cvmx_ipd_clk_count_s cn58xx;
+	struct cvmx_ipd_clk_count_s cn58xxp1;
+};
+
+union cvmx_ipd_ctl_status {
+	uint64_t u64;
+	struct cvmx_ipd_ctl_status_s {
+		uint64_t reserved_15_63:49;
+		uint64_t no_wptr:1;
+		uint64_t pq_apkt:1;
+		uint64_t pq_nabuf:1;
+		uint64_t ipd_full:1;
+		uint64_t pkt_off:1;
+		uint64_t len_m8:1;
+		uint64_t reset:1;
+		uint64_t addpkt:1;
+		uint64_t naddbuf:1;
+		uint64_t pkt_lend:1;
+		uint64_t wqe_lend:1;
+		uint64_t pbp_en:1;
+		uint64_t opc_mode:2;
+		uint64_t ipd_en:1;
+	} s;
+	struct cvmx_ipd_ctl_status_cn30xx {
+		uint64_t reserved_10_63:54;
+		uint64_t len_m8:1;
+		uint64_t reset:1;
+		uint64_t addpkt:1;
+		uint64_t naddbuf:1;
+		uint64_t pkt_lend:1;
+		uint64_t wqe_lend:1;
+		uint64_t pbp_en:1;
+		uint64_t opc_mode:2;
+		uint64_t ipd_en:1;
+	} cn30xx;
+	struct cvmx_ipd_ctl_status_cn30xx cn31xx;
+	struct cvmx_ipd_ctl_status_cn30xx cn38xx;
+	struct cvmx_ipd_ctl_status_cn38xxp2 {
+		uint64_t reserved_9_63:55;
+		uint64_t reset:1;
+		uint64_t addpkt:1;
+		uint64_t naddbuf:1;
+		uint64_t pkt_lend:1;
+		uint64_t wqe_lend:1;
+		uint64_t pbp_en:1;
+		uint64_t opc_mode:2;
+		uint64_t ipd_en:1;
+	} cn38xxp2;
+	struct cvmx_ipd_ctl_status_s cn50xx;
+	struct cvmx_ipd_ctl_status_s cn52xx;
+	struct cvmx_ipd_ctl_status_s cn52xxp1;
+	struct cvmx_ipd_ctl_status_s cn56xx;
+	struct cvmx_ipd_ctl_status_s cn56xxp1;
+	struct cvmx_ipd_ctl_status_cn58xx {
+		uint64_t reserved_12_63:52;
+		uint64_t ipd_full:1;
+		uint64_t pkt_off:1;
+		uint64_t len_m8:1;
+		uint64_t reset:1;
+		uint64_t addpkt:1;
+		uint64_t naddbuf:1;
+		uint64_t pkt_lend:1;
+		uint64_t wqe_lend:1;
+		uint64_t pbp_en:1;
+		uint64_t opc_mode:2;
+		uint64_t ipd_en:1;
+	} cn58xx;
+	struct cvmx_ipd_ctl_status_cn58xx cn58xxp1;
+};
+
+union cvmx_ipd_int_enb {
+	uint64_t u64;
+	struct cvmx_ipd_int_enb_s {
+		uint64_t reserved_12_63:52;
+		uint64_t pq_sub:1;
+		uint64_t pq_add:1;
+		uint64_t bc_ovr:1;
+		uint64_t d_coll:1;
+		uint64_t c_coll:1;
+		uint64_t cc_ovr:1;
+		uint64_t dc_ovr:1;
+		uint64_t bp_sub:1;
+		uint64_t prc_par3:1;
+		uint64_t prc_par2:1;
+		uint64_t prc_par1:1;
+		uint64_t prc_par0:1;
+	} s;
+	struct cvmx_ipd_int_enb_cn30xx {
+		uint64_t reserved_5_63:59;
+		uint64_t bp_sub:1;
+		uint64_t prc_par3:1;
+		uint64_t prc_par2:1;
+		uint64_t prc_par1:1;
+		uint64_t prc_par0:1;
+	} cn30xx;
+	struct cvmx_ipd_int_enb_cn30xx cn31xx;
+	struct cvmx_ipd_int_enb_cn38xx {
+		uint64_t reserved_10_63:54;
+		uint64_t bc_ovr:1;
+		uint64_t d_coll:1;
+		uint64_t c_coll:1;
+		uint64_t cc_ovr:1;
+		uint64_t dc_ovr:1;
+		uint64_t bp_sub:1;
+		uint64_t prc_par3:1;
+		uint64_t prc_par2:1;
+		uint64_t prc_par1:1;
+		uint64_t prc_par0:1;
+	} cn38xx;
+	struct cvmx_ipd_int_enb_cn30xx cn38xxp2;
+	struct cvmx_ipd_int_enb_cn38xx cn50xx;
+	struct cvmx_ipd_int_enb_s cn52xx;
+	struct cvmx_ipd_int_enb_s cn52xxp1;
+	struct cvmx_ipd_int_enb_s cn56xx;
+	struct cvmx_ipd_int_enb_s cn56xxp1;
+	struct cvmx_ipd_int_enb_cn38xx cn58xx;
+	struct cvmx_ipd_int_enb_cn38xx cn58xxp1;
+};
+
+union cvmx_ipd_int_sum {
+	uint64_t u64;
+	struct cvmx_ipd_int_sum_s {
+		uint64_t reserved_12_63:52;
+		uint64_t pq_sub:1;
+		uint64_t pq_add:1;
+		uint64_t bc_ovr:1;
+		uint64_t d_coll:1;
+		uint64_t c_coll:1;
+		uint64_t cc_ovr:1;
+		uint64_t dc_ovr:1;
+		uint64_t bp_sub:1;
+		uint64_t prc_par3:1;
+		uint64_t prc_par2:1;
+		uint64_t prc_par1:1;
+		uint64_t prc_par0:1;
+	} s;
+	struct cvmx_ipd_int_sum_cn30xx {
+		uint64_t reserved_5_63:59;
+		uint64_t bp_sub:1;
+		uint64_t prc_par3:1;
+		uint64_t prc_par2:1;
+		uint64_t prc_par1:1;
+		uint64_t prc_par0:1;
+	} cn30xx;
+	struct cvmx_ipd_int_sum_cn30xx cn31xx;
+	struct cvmx_ipd_int_sum_cn38xx {
+		uint64_t reserved_10_63:54;
+		uint64_t bc_ovr:1;
+		uint64_t d_coll:1;
+		uint64_t c_coll:1;
+		uint64_t cc_ovr:1;
+		uint64_t dc_ovr:1;
+		uint64_t bp_sub:1;
+		uint64_t prc_par3:1;
+		uint64_t prc_par2:1;
+		uint64_t prc_par1:1;
+		uint64_t prc_par0:1;
+	} cn38xx;
+	struct cvmx_ipd_int_sum_cn30xx cn38xxp2;
+	struct cvmx_ipd_int_sum_cn38xx cn50xx;
+	struct cvmx_ipd_int_sum_s cn52xx;
+	struct cvmx_ipd_int_sum_s cn52xxp1;
+	struct cvmx_ipd_int_sum_s cn56xx;
+	struct cvmx_ipd_int_sum_s cn56xxp1;
+	struct cvmx_ipd_int_sum_cn38xx cn58xx;
+	struct cvmx_ipd_int_sum_cn38xx cn58xxp1;
+};
+
+union cvmx_ipd_not_1st_mbuff_skip {
+	uint64_t u64;
+	struct cvmx_ipd_not_1st_mbuff_skip_s {
+		uint64_t reserved_6_63:58;
+		uint64_t skip_sz:6;
+	} s;
+	struct cvmx_ipd_not_1st_mbuff_skip_s cn30xx;
+	struct cvmx_ipd_not_1st_mbuff_skip_s cn31xx;
+	struct cvmx_ipd_not_1st_mbuff_skip_s cn38xx;
+	struct cvmx_ipd_not_1st_mbuff_skip_s cn38xxp2;
+	struct cvmx_ipd_not_1st_mbuff_skip_s cn50xx;
+	struct cvmx_ipd_not_1st_mbuff_skip_s cn52xx;
+	struct cvmx_ipd_not_1st_mbuff_skip_s cn52xxp1;
+	struct cvmx_ipd_not_1st_mbuff_skip_s cn56xx;
+	struct cvmx_ipd_not_1st_mbuff_skip_s cn56xxp1;
+	struct cvmx_ipd_not_1st_mbuff_skip_s cn58xx;
+	struct cvmx_ipd_not_1st_mbuff_skip_s cn58xxp1;
+};
+
+union cvmx_ipd_packet_mbuff_size {
+	uint64_t u64;
+	struct cvmx_ipd_packet_mbuff_size_s {
+		uint64_t reserved_12_63:52;
+		uint64_t mb_size:12;
+	} s;
+	struct cvmx_ipd_packet_mbuff_size_s cn30xx;
+	struct cvmx_ipd_packet_mbuff_size_s cn31xx;
+	struct cvmx_ipd_packet_mbuff_size_s cn38xx;
+	struct cvmx_ipd_packet_mbuff_size_s cn38xxp2;
+	struct cvmx_ipd_packet_mbuff_size_s cn50xx;
+	struct cvmx_ipd_packet_mbuff_size_s cn52xx;
+	struct cvmx_ipd_packet_mbuff_size_s cn52xxp1;
+	struct cvmx_ipd_packet_mbuff_size_s cn56xx;
+	struct cvmx_ipd_packet_mbuff_size_s cn56xxp1;
+	struct cvmx_ipd_packet_mbuff_size_s cn58xx;
+	struct cvmx_ipd_packet_mbuff_size_s cn58xxp1;
+};
+
+union cvmx_ipd_pkt_ptr_valid {
+	uint64_t u64;
+	struct cvmx_ipd_pkt_ptr_valid_s {
+		uint64_t reserved_29_63:35;
+		uint64_t ptr:29;
+	} s;
+	struct cvmx_ipd_pkt_ptr_valid_s cn30xx;
+	struct cvmx_ipd_pkt_ptr_valid_s cn31xx;
+	struct cvmx_ipd_pkt_ptr_valid_s cn38xx;
+	struct cvmx_ipd_pkt_ptr_valid_s cn50xx;
+	struct cvmx_ipd_pkt_ptr_valid_s cn52xx;
+	struct cvmx_ipd_pkt_ptr_valid_s cn52xxp1;
+	struct cvmx_ipd_pkt_ptr_valid_s cn56xx;
+	struct cvmx_ipd_pkt_ptr_valid_s cn56xxp1;
+	struct cvmx_ipd_pkt_ptr_valid_s cn58xx;
+	struct cvmx_ipd_pkt_ptr_valid_s cn58xxp1;
+};
+
+union cvmx_ipd_portx_bp_page_cnt {
+	uint64_t u64;
+	struct cvmx_ipd_portx_bp_page_cnt_s {
+		uint64_t reserved_18_63:46;
+		uint64_t bp_enb:1;
+		uint64_t page_cnt:17;
+	} s;
+	struct cvmx_ipd_portx_bp_page_cnt_s cn30xx;
+	struct cvmx_ipd_portx_bp_page_cnt_s cn31xx;
+	struct cvmx_ipd_portx_bp_page_cnt_s cn38xx;
+	struct cvmx_ipd_portx_bp_page_cnt_s cn38xxp2;
+	struct cvmx_ipd_portx_bp_page_cnt_s cn50xx;
+	struct cvmx_ipd_portx_bp_page_cnt_s cn52xx;
+	struct cvmx_ipd_portx_bp_page_cnt_s cn52xxp1;
+	struct cvmx_ipd_portx_bp_page_cnt_s cn56xx;
+	struct cvmx_ipd_portx_bp_page_cnt_s cn56xxp1;
+	struct cvmx_ipd_portx_bp_page_cnt_s cn58xx;
+	struct cvmx_ipd_portx_bp_page_cnt_s cn58xxp1;
+};
+
+union cvmx_ipd_portx_bp_page_cnt2 {
+	uint64_t u64;
+	struct cvmx_ipd_portx_bp_page_cnt2_s {
+		uint64_t reserved_18_63:46;
+		uint64_t bp_enb:1;
+		uint64_t page_cnt:17;
+	} s;
+	struct cvmx_ipd_portx_bp_page_cnt2_s cn52xx;
+	struct cvmx_ipd_portx_bp_page_cnt2_s cn52xxp1;
+	struct cvmx_ipd_portx_bp_page_cnt2_s cn56xx;
+	struct cvmx_ipd_portx_bp_page_cnt2_s cn56xxp1;
+};
+
+union cvmx_ipd_port_bp_counters2_pairx {
+	uint64_t u64;
+	struct cvmx_ipd_port_bp_counters2_pairx_s {
+		uint64_t reserved_25_63:39;
+		uint64_t cnt_val:25;
+	} s;
+	struct cvmx_ipd_port_bp_counters2_pairx_s cn52xx;
+	struct cvmx_ipd_port_bp_counters2_pairx_s cn52xxp1;
+	struct cvmx_ipd_port_bp_counters2_pairx_s cn56xx;
+	struct cvmx_ipd_port_bp_counters2_pairx_s cn56xxp1;
+};
+
+union cvmx_ipd_port_bp_counters_pairx {
+	uint64_t u64;
+	struct cvmx_ipd_port_bp_counters_pairx_s {
+		uint64_t reserved_25_63:39;
+		uint64_t cnt_val:25;
+	} s;
+	struct cvmx_ipd_port_bp_counters_pairx_s cn30xx;
+	struct cvmx_ipd_port_bp_counters_pairx_s cn31xx;
+	struct cvmx_ipd_port_bp_counters_pairx_s cn38xx;
+	struct cvmx_ipd_port_bp_counters_pairx_s cn38xxp2;
+	struct cvmx_ipd_port_bp_counters_pairx_s cn50xx;
+	struct cvmx_ipd_port_bp_counters_pairx_s cn52xx;
+	struct cvmx_ipd_port_bp_counters_pairx_s cn52xxp1;
+	struct cvmx_ipd_port_bp_counters_pairx_s cn56xx;
+	struct cvmx_ipd_port_bp_counters_pairx_s cn56xxp1;
+	struct cvmx_ipd_port_bp_counters_pairx_s cn58xx;
+	struct cvmx_ipd_port_bp_counters_pairx_s cn58xxp1;
+};
+
+union cvmx_ipd_port_qos_x_cnt {
+	uint64_t u64;
+	struct cvmx_ipd_port_qos_x_cnt_s {
+		uint64_t wmark:32;
+		uint64_t cnt:32;
+	} s;
+	struct cvmx_ipd_port_qos_x_cnt_s cn52xx;
+	struct cvmx_ipd_port_qos_x_cnt_s cn52xxp1;
+	struct cvmx_ipd_port_qos_x_cnt_s cn56xx;
+	struct cvmx_ipd_port_qos_x_cnt_s cn56xxp1;
+};
+
+union cvmx_ipd_port_qos_intx {
+	uint64_t u64;
+	struct cvmx_ipd_port_qos_intx_s {
+		uint64_t intr:64;
+	} s;
+	struct cvmx_ipd_port_qos_intx_s cn52xx;
+	struct cvmx_ipd_port_qos_intx_s cn52xxp1;
+	struct cvmx_ipd_port_qos_intx_s cn56xx;
+	struct cvmx_ipd_port_qos_intx_s cn56xxp1;
+};
+
+union cvmx_ipd_port_qos_int_enbx {
+	uint64_t u64;
+	struct cvmx_ipd_port_qos_int_enbx_s {
+		uint64_t enb:64;
+	} s;
+	struct cvmx_ipd_port_qos_int_enbx_s cn52xx;
+	struct cvmx_ipd_port_qos_int_enbx_s cn52xxp1;
+	struct cvmx_ipd_port_qos_int_enbx_s cn56xx;
+	struct cvmx_ipd_port_qos_int_enbx_s cn56xxp1;
+};
+
+union cvmx_ipd_prc_hold_ptr_fifo_ctl {
+	uint64_t u64;
+	struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s {
+		uint64_t reserved_39_63:25;
+		uint64_t max_pkt:3;
+		uint64_t praddr:3;
+		uint64_t ptr:29;
+		uint64_t cena:1;
+		uint64_t raddr:3;
+	} s;
+	struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn30xx;
+	struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn31xx;
+	struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn38xx;
+	struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn50xx;
+	struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xx;
+	struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xxp1;
+	struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xx;
+	struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xxp1;
+	struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xx;
+	struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xxp1;
+};
+
+union cvmx_ipd_prc_port_ptr_fifo_ctl {
+	uint64_t u64;
+	struct cvmx_ipd_prc_port_ptr_fifo_ctl_s {
+		uint64_t reserved_44_63:20;
+		uint64_t max_pkt:7;
+		uint64_t ptr:29;
+		uint64_t cena:1;
+		uint64_t raddr:7;
+	} s;
+	struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn30xx;
+	struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn31xx;
+	struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn38xx;
+	struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn50xx;
+	struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xx;
+	struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xxp1;
+	struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xx;
+	struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xxp1;
+	struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xx;
+	struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xxp1;
+};
+
+union cvmx_ipd_ptr_count {
+	uint64_t u64;
+	struct cvmx_ipd_ptr_count_s {
+		uint64_t reserved_19_63:45;
+		uint64_t pktv_cnt:1;
+		uint64_t wqev_cnt:1;
+		uint64_t pfif_cnt:3;
+		uint64_t pkt_pcnt:7;
+		uint64_t wqe_pcnt:7;
+	} s;
+	struct cvmx_ipd_ptr_count_s cn30xx;
+	struct cvmx_ipd_ptr_count_s cn31xx;
+	struct cvmx_ipd_ptr_count_s cn38xx;
+	struct cvmx_ipd_ptr_count_s cn38xxp2;
+	struct cvmx_ipd_ptr_count_s cn50xx;
+	struct cvmx_ipd_ptr_count_s cn52xx;
+	struct cvmx_ipd_ptr_count_s cn52xxp1;
+	struct cvmx_ipd_ptr_count_s cn56xx;
+	struct cvmx_ipd_ptr_count_s cn56xxp1;
+	struct cvmx_ipd_ptr_count_s cn58xx;
+	struct cvmx_ipd_ptr_count_s cn58xxp1;
+};
+
+union cvmx_ipd_pwp_ptr_fifo_ctl {
+	uint64_t u64;
+	struct cvmx_ipd_pwp_ptr_fifo_ctl_s {
+		uint64_t reserved_61_63:3;
+		uint64_t max_cnts:7;
+		uint64_t wraddr:8;
+		uint64_t praddr:8;
+		uint64_t ptr:29;
+		uint64_t cena:1;
+		uint64_t raddr:8;
+	} s;
+	struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn30xx;
+	struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn31xx;
+	struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn38xx;
+	struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn50xx;
+	struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xx;
+	struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xxp1;
+	struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xx;
+	struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xxp1;
+	struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xx;
+	struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xxp1;
+};
+
+union cvmx_ipd_qosx_red_marks {
+	uint64_t u64;
+	struct cvmx_ipd_qosx_red_marks_s {
+		uint64_t drop:32;
+		uint64_t pass:32;
+	} s;
+	struct cvmx_ipd_qosx_red_marks_s cn30xx;
+	struct cvmx_ipd_qosx_red_marks_s cn31xx;
+	struct cvmx_ipd_qosx_red_marks_s cn38xx;
+	struct cvmx_ipd_qosx_red_marks_s cn38xxp2;
+	struct cvmx_ipd_qosx_red_marks_s cn50xx;
+	struct cvmx_ipd_qosx_red_marks_s cn52xx;
+	struct cvmx_ipd_qosx_red_marks_s cn52xxp1;
+	struct cvmx_ipd_qosx_red_marks_s cn56xx;
+	struct cvmx_ipd_qosx_red_marks_s cn56xxp1;
+	struct cvmx_ipd_qosx_red_marks_s cn58xx;
+	struct cvmx_ipd_qosx_red_marks_s cn58xxp1;
+};
+
+union cvmx_ipd_que0_free_page_cnt {
+	uint64_t u64;
+	struct cvmx_ipd_que0_free_page_cnt_s {
+		uint64_t reserved_32_63:32;
+		uint64_t q0_pcnt:32;
+	} s;
+	struct cvmx_ipd_que0_free_page_cnt_s cn30xx;
+	struct cvmx_ipd_que0_free_page_cnt_s cn31xx;
+	struct cvmx_ipd_que0_free_page_cnt_s cn38xx;
+	struct cvmx_ipd_que0_free_page_cnt_s cn38xxp2;
+	struct cvmx_ipd_que0_free_page_cnt_s cn50xx;
+	struct cvmx_ipd_que0_free_page_cnt_s cn52xx;
+	struct cvmx_ipd_que0_free_page_cnt_s cn52xxp1;
+	struct cvmx_ipd_que0_free_page_cnt_s cn56xx;
+	struct cvmx_ipd_que0_free_page_cnt_s cn56xxp1;
+	struct cvmx_ipd_que0_free_page_cnt_s cn58xx;
+	struct cvmx_ipd_que0_free_page_cnt_s cn58xxp1;
+};
+
+union cvmx_ipd_red_port_enable {
+	uint64_t u64;
+	struct cvmx_ipd_red_port_enable_s {
+		uint64_t prb_dly:14;
+		uint64_t avg_dly:14;
+		uint64_t prt_enb:36;
+	} s;
+	struct cvmx_ipd_red_port_enable_s cn30xx;
+	struct cvmx_ipd_red_port_enable_s cn31xx;
+	struct cvmx_ipd_red_port_enable_s cn38xx;
+	struct cvmx_ipd_red_port_enable_s cn38xxp2;
+	struct cvmx_ipd_red_port_enable_s cn50xx;
+	struct cvmx_ipd_red_port_enable_s cn52xx;
+	struct cvmx_ipd_red_port_enable_s cn52xxp1;
+	struct cvmx_ipd_red_port_enable_s cn56xx;
+	struct cvmx_ipd_red_port_enable_s cn56xxp1;
+	struct cvmx_ipd_red_port_enable_s cn58xx;
+	struct cvmx_ipd_red_port_enable_s cn58xxp1;
+};
+
+union cvmx_ipd_red_port_enable2 {
+	uint64_t u64;
+	struct cvmx_ipd_red_port_enable2_s {
+		uint64_t reserved_4_63:60;
+		uint64_t prt_enb:4;
+	} s;
+	struct cvmx_ipd_red_port_enable2_s cn52xx;
+	struct cvmx_ipd_red_port_enable2_s cn52xxp1;
+	struct cvmx_ipd_red_port_enable2_s cn56xx;
+	struct cvmx_ipd_red_port_enable2_s cn56xxp1;
+};
+
+union cvmx_ipd_red_quex_param {
+	uint64_t u64;
+	struct cvmx_ipd_red_quex_param_s {
+		uint64_t reserved_49_63:15;
+		uint64_t use_pcnt:1;
+		uint64_t new_con:8;
+		uint64_t avg_con:8;
+		uint64_t prb_con:32;
+	} s;
+	struct cvmx_ipd_red_quex_param_s cn30xx;
+	struct cvmx_ipd_red_quex_param_s cn31xx;
+	struct cvmx_ipd_red_quex_param_s cn38xx;
+	struct cvmx_ipd_red_quex_param_s cn38xxp2;
+	struct cvmx_ipd_red_quex_param_s cn50xx;
+	struct cvmx_ipd_red_quex_param_s cn52xx;
+	struct cvmx_ipd_red_quex_param_s cn52xxp1;
+	struct cvmx_ipd_red_quex_param_s cn56xx;
+	struct cvmx_ipd_red_quex_param_s cn56xxp1;
+	struct cvmx_ipd_red_quex_param_s cn58xx;
+	struct cvmx_ipd_red_quex_param_s cn58xxp1;
+};
+
+union cvmx_ipd_sub_port_bp_page_cnt {
+	uint64_t u64;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s {
+		uint64_t reserved_31_63:33;
+		uint64_t port:6;
+		uint64_t page_cnt:25;
+	} s;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s cn30xx;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s cn31xx;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xx;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xxp2;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s cn50xx;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xx;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xxp1;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xx;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xxp1;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xx;
+	struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xxp1;
+};
+
+union cvmx_ipd_sub_port_fcs {
+	uint64_t u64;
+	struct cvmx_ipd_sub_port_fcs_s {
+		uint64_t reserved_40_63:24;
+		uint64_t port_bit2:4;
+		uint64_t reserved_32_35:4;
+		uint64_t port_bit:32;
+	} s;
+	struct cvmx_ipd_sub_port_fcs_cn30xx {
+		uint64_t reserved_3_63:61;
+		uint64_t port_bit:3;
+	} cn30xx;
+	struct cvmx_ipd_sub_port_fcs_cn30xx cn31xx;
+	struct cvmx_ipd_sub_port_fcs_cn38xx {
+		uint64_t reserved_32_63:32;
+		uint64_t port_bit:32;
+	} cn38xx;
+	struct cvmx_ipd_sub_port_fcs_cn38xx cn38xxp2;
+	struct cvmx_ipd_sub_port_fcs_cn30xx cn50xx;
+	struct cvmx_ipd_sub_port_fcs_s cn52xx;
+	struct cvmx_ipd_sub_port_fcs_s cn52xxp1;
+	struct cvmx_ipd_sub_port_fcs_s cn56xx;
+	struct cvmx_ipd_sub_port_fcs_s cn56xxp1;
+	struct cvmx_ipd_sub_port_fcs_cn38xx cn58xx;
+	struct cvmx_ipd_sub_port_fcs_cn38xx cn58xxp1;
+};
+
+union cvmx_ipd_sub_port_qos_cnt {
+	uint64_t u64;
+	struct cvmx_ipd_sub_port_qos_cnt_s {
+		uint64_t reserved_41_63:23;
+		uint64_t port_qos:9;
+		uint64_t cnt:32;
+	} s;
+	struct cvmx_ipd_sub_port_qos_cnt_s cn52xx;
+	struct cvmx_ipd_sub_port_qos_cnt_s cn52xxp1;
+	struct cvmx_ipd_sub_port_qos_cnt_s cn56xx;
+	struct cvmx_ipd_sub_port_qos_cnt_s cn56xxp1;
+};
+
+union cvmx_ipd_wqe_fpa_queue {
+	uint64_t u64;
+	struct cvmx_ipd_wqe_fpa_queue_s {
+		uint64_t reserved_3_63:61;
+		uint64_t wqe_pool:3;
+	} s;
+	struct cvmx_ipd_wqe_fpa_queue_s cn30xx;
+	struct cvmx_ipd_wqe_fpa_queue_s cn31xx;
+	struct cvmx_ipd_wqe_fpa_queue_s cn38xx;
+	struct cvmx_ipd_wqe_fpa_queue_s cn38xxp2;
+	struct cvmx_ipd_wqe_fpa_queue_s cn50xx;
+	struct cvmx_ipd_wqe_fpa_queue_s cn52xx;
+	struct cvmx_ipd_wqe_fpa_queue_s cn52xxp1;
+	struct cvmx_ipd_wqe_fpa_queue_s cn56xx;
+	struct cvmx_ipd_wqe_fpa_queue_s cn56xxp1;
+	struct cvmx_ipd_wqe_fpa_queue_s cn58xx;
+	struct cvmx_ipd_wqe_fpa_queue_s cn58xxp1;
+};
+
+union cvmx_ipd_wqe_ptr_valid {
+	uint64_t u64;
+	struct cvmx_ipd_wqe_ptr_valid_s {
+		uint64_t reserved_29_63:35;
+		uint64_t ptr:29;
+	} s;
+	struct cvmx_ipd_wqe_ptr_valid_s cn30xx;
+	struct cvmx_ipd_wqe_ptr_valid_s cn31xx;
+	struct cvmx_ipd_wqe_ptr_valid_s cn38xx;
+	struct cvmx_ipd_wqe_ptr_valid_s cn50xx;
+	struct cvmx_ipd_wqe_ptr_valid_s cn52xx;
+	struct cvmx_ipd_wqe_ptr_valid_s cn52xxp1;
+	struct cvmx_ipd_wqe_ptr_valid_s cn56xx;
+	struct cvmx_ipd_wqe_ptr_valid_s cn56xxp1;
+	struct cvmx_ipd_wqe_ptr_valid_s cn58xx;
+	struct cvmx_ipd_wqe_ptr_valid_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
new file mode 100644
index 0000000..3375838
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
@@ -0,0 +1,963 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2C_DEFS_H__
+#define __CVMX_L2C_DEFS_H__
+
+#define CVMX_L2C_BST0 \
+	 CVMX_ADD_IO_SEG(0x00011800800007F8ull)
+#define CVMX_L2C_BST1 \
+	 CVMX_ADD_IO_SEG(0x00011800800007F0ull)
+#define CVMX_L2C_BST2 \
+	 CVMX_ADD_IO_SEG(0x00011800800007E8ull)
+#define CVMX_L2C_CFG \
+	 CVMX_ADD_IO_SEG(0x0001180080000000ull)
+#define CVMX_L2C_DBG \
+	 CVMX_ADD_IO_SEG(0x0001180080000030ull)
+#define CVMX_L2C_DUT \
+	 CVMX_ADD_IO_SEG(0x0001180080000050ull)
+#define CVMX_L2C_GRPWRR0 \
+	 CVMX_ADD_IO_SEG(0x00011800800000C8ull)
+#define CVMX_L2C_GRPWRR1 \
+	 CVMX_ADD_IO_SEG(0x00011800800000D0ull)
+#define CVMX_L2C_INT_EN \
+	 CVMX_ADD_IO_SEG(0x0001180080000100ull)
+#define CVMX_L2C_INT_STAT \
+	 CVMX_ADD_IO_SEG(0x00011800800000F8ull)
+#define CVMX_L2C_LCKBASE \
+	 CVMX_ADD_IO_SEG(0x0001180080000058ull)
+#define CVMX_L2C_LCKOFF \
+	 CVMX_ADD_IO_SEG(0x0001180080000060ull)
+#define CVMX_L2C_LFB0 \
+	 CVMX_ADD_IO_SEG(0x0001180080000038ull)
+#define CVMX_L2C_LFB1 \
+	 CVMX_ADD_IO_SEG(0x0001180080000040ull)
+#define CVMX_L2C_LFB2 \
+	 CVMX_ADD_IO_SEG(0x0001180080000048ull)
+#define CVMX_L2C_LFB3 \
+	 CVMX_ADD_IO_SEG(0x00011800800000B8ull)
+#define CVMX_L2C_OOB \
+	 CVMX_ADD_IO_SEG(0x00011800800000D8ull)
+#define CVMX_L2C_OOB1 \
+	 CVMX_ADD_IO_SEG(0x00011800800000E0ull)
+#define CVMX_L2C_OOB2 \
+	 CVMX_ADD_IO_SEG(0x00011800800000E8ull)
+#define CVMX_L2C_OOB3 \
+	 CVMX_ADD_IO_SEG(0x00011800800000F0ull)
+#define CVMX_L2C_PFC0 \
+	 CVMX_ADD_IO_SEG(0x0001180080000098ull)
+#define CVMX_L2C_PFC1 \
+	 CVMX_ADD_IO_SEG(0x00011800800000A0ull)
+#define CVMX_L2C_PFC2 \
+	 CVMX_ADD_IO_SEG(0x00011800800000A8ull)
+#define CVMX_L2C_PFC3 \
+	 CVMX_ADD_IO_SEG(0x00011800800000B0ull)
+#define CVMX_L2C_PFCTL \
+	 CVMX_ADD_IO_SEG(0x0001180080000090ull)
+#define CVMX_L2C_PFCX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180080000098ull + (((offset) & 3) * 8))
+#define CVMX_L2C_PPGRP \
+	 CVMX_ADD_IO_SEG(0x00011800800000C0ull)
+#define CVMX_L2C_SPAR0 \
+	 CVMX_ADD_IO_SEG(0x0001180080000068ull)
+#define CVMX_L2C_SPAR1 \
+	 CVMX_ADD_IO_SEG(0x0001180080000070ull)
+#define CVMX_L2C_SPAR2 \
+	 CVMX_ADD_IO_SEG(0x0001180080000078ull)
+#define CVMX_L2C_SPAR3 \
+	 CVMX_ADD_IO_SEG(0x0001180080000080ull)
+#define CVMX_L2C_SPAR4 \
+	 CVMX_ADD_IO_SEG(0x0001180080000088ull)
+
+union cvmx_l2c_bst0 {
+	uint64_t u64;
+	struct cvmx_l2c_bst0_s {
+		uint64_t reserved_24_63:40;
+		uint64_t dtbnk:1;
+		uint64_t wlb_msk:4;
+		uint64_t dtcnt:13;
+		uint64_t dt:1;
+		uint64_t stin_msk:1;
+		uint64_t wlb_dat:4;
+	} s;
+	struct cvmx_l2c_bst0_cn30xx {
+		uint64_t reserved_23_63:41;
+		uint64_t wlb_msk:4;
+		uint64_t reserved_15_18:4;
+		uint64_t dtcnt:9;
+		uint64_t dt:1;
+		uint64_t reserved_4_4:1;
+		uint64_t wlb_dat:4;
+	} cn30xx;
+	struct cvmx_l2c_bst0_cn31xx {
+		uint64_t reserved_23_63:41;
+		uint64_t wlb_msk:4;
+		uint64_t reserved_16_18:3;
+		uint64_t dtcnt:10;
+		uint64_t dt:1;
+		uint64_t stin_msk:1;
+		uint64_t wlb_dat:4;
+	} cn31xx;
+	struct cvmx_l2c_bst0_cn38xx {
+		uint64_t reserved_19_63:45;
+		uint64_t dtcnt:13;
+		uint64_t dt:1;
+		uint64_t stin_msk:1;
+		uint64_t wlb_dat:4;
+	} cn38xx;
+	struct cvmx_l2c_bst0_cn38xx cn38xxp2;
+	struct cvmx_l2c_bst0_cn50xx {
+		uint64_t reserved_24_63:40;
+		uint64_t dtbnk:1;
+		uint64_t wlb_msk:4;
+		uint64_t reserved_16_18:3;
+		uint64_t dtcnt:10;
+		uint64_t dt:1;
+		uint64_t stin_msk:1;
+		uint64_t wlb_dat:4;
+	} cn50xx;
+	struct cvmx_l2c_bst0_cn50xx cn52xx;
+	struct cvmx_l2c_bst0_cn50xx cn52xxp1;
+	struct cvmx_l2c_bst0_s cn56xx;
+	struct cvmx_l2c_bst0_s cn56xxp1;
+	struct cvmx_l2c_bst0_s cn58xx;
+	struct cvmx_l2c_bst0_s cn58xxp1;
+};
+
+union cvmx_l2c_bst1 {
+	uint64_t u64;
+	struct cvmx_l2c_bst1_s {
+		uint64_t reserved_9_63:55;
+		uint64_t l2t:9;
+	} s;
+	struct cvmx_l2c_bst1_cn30xx {
+		uint64_t reserved_16_63:48;
+		uint64_t vwdf:4;
+		uint64_t lrf:2;
+		uint64_t vab_vwcf:1;
+		uint64_t reserved_5_8:4;
+		uint64_t l2t:5;
+	} cn30xx;
+	struct cvmx_l2c_bst1_cn30xx cn31xx;
+	struct cvmx_l2c_bst1_cn38xx {
+		uint64_t reserved_16_63:48;
+		uint64_t vwdf:4;
+		uint64_t lrf:2;
+		uint64_t vab_vwcf:1;
+		uint64_t l2t:9;
+	} cn38xx;
+	struct cvmx_l2c_bst1_cn38xx cn38xxp2;
+	struct cvmx_l2c_bst1_cn38xx cn50xx;
+	struct cvmx_l2c_bst1_cn52xx {
+		uint64_t reserved_19_63:45;
+		uint64_t plc2:1;
+		uint64_t plc1:1;
+		uint64_t plc0:1;
+		uint64_t vwdf:4;
+		uint64_t reserved_11_11:1;
+		uint64_t ilc:1;
+		uint64_t vab_vwcf:1;
+		uint64_t l2t:9;
+	} cn52xx;
+	struct cvmx_l2c_bst1_cn52xx cn52xxp1;
+	struct cvmx_l2c_bst1_cn56xx {
+		uint64_t reserved_24_63:40;
+		uint64_t plc2:1;
+		uint64_t plc1:1;
+		uint64_t plc0:1;
+		uint64_t ilc:1;
+		uint64_t vwdf1:4;
+		uint64_t vwdf0:4;
+		uint64_t vab_vwcf1:1;
+		uint64_t reserved_10_10:1;
+		uint64_t vab_vwcf0:1;
+		uint64_t l2t:9;
+	} cn56xx;
+	struct cvmx_l2c_bst1_cn56xx cn56xxp1;
+	struct cvmx_l2c_bst1_cn38xx cn58xx;
+	struct cvmx_l2c_bst1_cn38xx cn58xxp1;
+};
+
+union cvmx_l2c_bst2 {
+	uint64_t u64;
+	struct cvmx_l2c_bst2_s {
+		uint64_t reserved_16_63:48;
+		uint64_t mrb:4;
+		uint64_t reserved_4_11:8;
+		uint64_t ipcbst:1;
+		uint64_t picbst:1;
+		uint64_t xrdmsk:1;
+		uint64_t xrddat:1;
+	} s;
+	struct cvmx_l2c_bst2_cn30xx {
+		uint64_t reserved_16_63:48;
+		uint64_t mrb:4;
+		uint64_t rmdf:4;
+		uint64_t reserved_4_7:4;
+		uint64_t ipcbst:1;
+		uint64_t reserved_2_2:1;
+		uint64_t xrdmsk:1;
+		uint64_t xrddat:1;
+	} cn30xx;
+	struct cvmx_l2c_bst2_cn30xx cn31xx;
+	struct cvmx_l2c_bst2_cn38xx {
+		uint64_t reserved_16_63:48;
+		uint64_t mrb:4;
+		uint64_t rmdf:4;
+		uint64_t rhdf:4;
+		uint64_t ipcbst:1;
+		uint64_t picbst:1;
+		uint64_t xrdmsk:1;
+		uint64_t xrddat:1;
+	} cn38xx;
+	struct cvmx_l2c_bst2_cn38xx cn38xxp2;
+	struct cvmx_l2c_bst2_cn30xx cn50xx;
+	struct cvmx_l2c_bst2_cn30xx cn52xx;
+	struct cvmx_l2c_bst2_cn30xx cn52xxp1;
+	struct cvmx_l2c_bst2_cn56xx {
+		uint64_t reserved_16_63:48;
+		uint64_t mrb:4;
+		uint64_t rmdb:4;
+		uint64_t rhdb:4;
+		uint64_t ipcbst:1;
+		uint64_t picbst:1;
+		uint64_t xrdmsk:1;
+		uint64_t xrddat:1;
+	} cn56xx;
+	struct cvmx_l2c_bst2_cn56xx cn56xxp1;
+	struct cvmx_l2c_bst2_cn56xx cn58xx;
+	struct cvmx_l2c_bst2_cn56xx cn58xxp1;
+};
+
+union cvmx_l2c_cfg {
+	uint64_t u64;
+	struct cvmx_l2c_cfg_s {
+		uint64_t reserved_20_63:44;
+		uint64_t bstrun:1;
+		uint64_t lbist:1;
+		uint64_t xor_bank:1;
+		uint64_t dpres1:1;
+		uint64_t dpres0:1;
+		uint64_t dfill_dis:1;
+		uint64_t fpexp:4;
+		uint64_t fpempty:1;
+		uint64_t fpen:1;
+		uint64_t idxalias:1;
+		uint64_t mwf_crd:4;
+		uint64_t rsp_arb_mode:1;
+		uint64_t rfb_arb_mode:1;
+		uint64_t lrf_arb_mode:1;
+	} s;
+	struct cvmx_l2c_cfg_cn30xx {
+		uint64_t reserved_14_63:50;
+		uint64_t fpexp:4;
+		uint64_t fpempty:1;
+		uint64_t fpen:1;
+		uint64_t idxalias:1;
+		uint64_t mwf_crd:4;
+		uint64_t rsp_arb_mode:1;
+		uint64_t rfb_arb_mode:1;
+		uint64_t lrf_arb_mode:1;
+	} cn30xx;
+	struct cvmx_l2c_cfg_cn30xx cn31xx;
+	struct cvmx_l2c_cfg_cn30xx cn38xx;
+	struct cvmx_l2c_cfg_cn30xx cn38xxp2;
+	struct cvmx_l2c_cfg_cn50xx {
+		uint64_t reserved_20_63:44;
+		uint64_t bstrun:1;
+		uint64_t lbist:1;
+		uint64_t reserved_14_17:4;
+		uint64_t fpexp:4;
+		uint64_t fpempty:1;
+		uint64_t fpen:1;
+		uint64_t idxalias:1;
+		uint64_t mwf_crd:4;
+		uint64_t rsp_arb_mode:1;
+		uint64_t rfb_arb_mode:1;
+		uint64_t lrf_arb_mode:1;
+	} cn50xx;
+	struct cvmx_l2c_cfg_cn50xx cn52xx;
+	struct cvmx_l2c_cfg_cn50xx cn52xxp1;
+	struct cvmx_l2c_cfg_s cn56xx;
+	struct cvmx_l2c_cfg_s cn56xxp1;
+	struct cvmx_l2c_cfg_cn58xx {
+		uint64_t reserved_20_63:44;
+		uint64_t bstrun:1;
+		uint64_t lbist:1;
+		uint64_t reserved_15_17:3;
+		uint64_t dfill_dis:1;
+		uint64_t fpexp:4;
+		uint64_t fpempty:1;
+		uint64_t fpen:1;
+		uint64_t idxalias:1;
+		uint64_t mwf_crd:4;
+		uint64_t rsp_arb_mode:1;
+		uint64_t rfb_arb_mode:1;
+		uint64_t lrf_arb_mode:1;
+	} cn58xx;
+	struct cvmx_l2c_cfg_cn58xxp1 {
+		uint64_t reserved_15_63:49;
+		uint64_t dfill_dis:1;
+		uint64_t fpexp:4;
+		uint64_t fpempty:1;
+		uint64_t fpen:1;
+		uint64_t idxalias:1;
+		uint64_t mwf_crd:4;
+		uint64_t rsp_arb_mode:1;
+		uint64_t rfb_arb_mode:1;
+		uint64_t lrf_arb_mode:1;
+	} cn58xxp1;
+};
+
+union cvmx_l2c_dbg {
+	uint64_t u64;
+	struct cvmx_l2c_dbg_s {
+		uint64_t reserved_15_63:49;
+		uint64_t lfb_enum:4;
+		uint64_t lfb_dmp:1;
+		uint64_t ppnum:4;
+		uint64_t set:3;
+		uint64_t finv:1;
+		uint64_t l2d:1;
+		uint64_t l2t:1;
+	} s;
+	struct cvmx_l2c_dbg_cn30xx {
+		uint64_t reserved_13_63:51;
+		uint64_t lfb_enum:2;
+		uint64_t lfb_dmp:1;
+		uint64_t reserved_5_9:5;
+		uint64_t set:2;
+		uint64_t finv:1;
+		uint64_t l2d:1;
+		uint64_t l2t:1;
+	} cn30xx;
+	struct cvmx_l2c_dbg_cn31xx {
+		uint64_t reserved_14_63:50;
+		uint64_t lfb_enum:3;
+		uint64_t lfb_dmp:1;
+		uint64_t reserved_7_9:3;
+		uint64_t ppnum:1;
+		uint64_t reserved_5_5:1;
+		uint64_t set:2;
+		uint64_t finv:1;
+		uint64_t l2d:1;
+		uint64_t l2t:1;
+	} cn31xx;
+	struct cvmx_l2c_dbg_s cn38xx;
+	struct cvmx_l2c_dbg_s cn38xxp2;
+	struct cvmx_l2c_dbg_cn50xx {
+		uint64_t reserved_14_63:50;
+		uint64_t lfb_enum:3;
+		uint64_t lfb_dmp:1;
+		uint64_t reserved_7_9:3;
+		uint64_t ppnum:1;
+		uint64_t set:3;
+		uint64_t finv:1;
+		uint64_t l2d:1;
+		uint64_t l2t:1;
+	} cn50xx;
+	struct cvmx_l2c_dbg_cn52xx {
+		uint64_t reserved_14_63:50;
+		uint64_t lfb_enum:3;
+		uint64_t lfb_dmp:1;
+		uint64_t reserved_8_9:2;
+		uint64_t ppnum:2;
+		uint64_t set:3;
+		uint64_t finv:1;
+		uint64_t l2d:1;
+		uint64_t l2t:1;
+	} cn52xx;
+	struct cvmx_l2c_dbg_cn52xx cn52xxp1;
+	struct cvmx_l2c_dbg_s cn56xx;
+	struct cvmx_l2c_dbg_s cn56xxp1;
+	struct cvmx_l2c_dbg_s cn58xx;
+	struct cvmx_l2c_dbg_s cn58xxp1;
+};
+
+union cvmx_l2c_dut {
+	uint64_t u64;
+	struct cvmx_l2c_dut_s {
+		uint64_t reserved_32_63:32;
+		uint64_t dtena:1;
+		uint64_t reserved_30_30:1;
+		uint64_t dt_vld:1;
+		uint64_t dt_tag:29;
+	} s;
+	struct cvmx_l2c_dut_s cn30xx;
+	struct cvmx_l2c_dut_s cn31xx;
+	struct cvmx_l2c_dut_s cn38xx;
+	struct cvmx_l2c_dut_s cn38xxp2;
+	struct cvmx_l2c_dut_s cn50xx;
+	struct cvmx_l2c_dut_s cn52xx;
+	struct cvmx_l2c_dut_s cn52xxp1;
+	struct cvmx_l2c_dut_s cn56xx;
+	struct cvmx_l2c_dut_s cn56xxp1;
+	struct cvmx_l2c_dut_s cn58xx;
+	struct cvmx_l2c_dut_s cn58xxp1;
+};
+
+union cvmx_l2c_grpwrr0 {
+	uint64_t u64;
+	struct cvmx_l2c_grpwrr0_s {
+		uint64_t plc1rmsk:32;
+		uint64_t plc0rmsk:32;
+	} s;
+	struct cvmx_l2c_grpwrr0_s cn52xx;
+	struct cvmx_l2c_grpwrr0_s cn52xxp1;
+	struct cvmx_l2c_grpwrr0_s cn56xx;
+	struct cvmx_l2c_grpwrr0_s cn56xxp1;
+};
+
+union cvmx_l2c_grpwrr1 {
+	uint64_t u64;
+	struct cvmx_l2c_grpwrr1_s {
+		uint64_t ilcrmsk:32;
+		uint64_t plc2rmsk:32;
+	} s;
+	struct cvmx_l2c_grpwrr1_s cn52xx;
+	struct cvmx_l2c_grpwrr1_s cn52xxp1;
+	struct cvmx_l2c_grpwrr1_s cn56xx;
+	struct cvmx_l2c_grpwrr1_s cn56xxp1;
+};
+
+union cvmx_l2c_int_en {
+	uint64_t u64;
+	struct cvmx_l2c_int_en_s {
+		uint64_t reserved_9_63:55;
+		uint64_t lck2ena:1;
+		uint64_t lckena:1;
+		uint64_t l2ddeden:1;
+		uint64_t l2dsecen:1;
+		uint64_t l2tdeden:1;
+		uint64_t l2tsecen:1;
+		uint64_t oob3en:1;
+		uint64_t oob2en:1;
+		uint64_t oob1en:1;
+	} s;
+	struct cvmx_l2c_int_en_s cn52xx;
+	struct cvmx_l2c_int_en_s cn52xxp1;
+	struct cvmx_l2c_int_en_s cn56xx;
+	struct cvmx_l2c_int_en_s cn56xxp1;
+};
+
+union cvmx_l2c_int_stat {
+	uint64_t u64;
+	struct cvmx_l2c_int_stat_s {
+		uint64_t reserved_9_63:55;
+		uint64_t lck2:1;
+		uint64_t lck:1;
+		uint64_t l2dded:1;
+		uint64_t l2dsec:1;
+		uint64_t l2tded:1;
+		uint64_t l2tsec:1;
+		uint64_t oob3:1;
+		uint64_t oob2:1;
+		uint64_t oob1:1;
+	} s;
+	struct cvmx_l2c_int_stat_s cn52xx;
+	struct cvmx_l2c_int_stat_s cn52xxp1;
+	struct cvmx_l2c_int_stat_s cn56xx;
+	struct cvmx_l2c_int_stat_s cn56xxp1;
+};
+
+union cvmx_l2c_lckbase {
+	uint64_t u64;
+	struct cvmx_l2c_lckbase_s {
+		uint64_t reserved_31_63:33;
+		uint64_t lck_base:27;
+		uint64_t reserved_1_3:3;
+		uint64_t lck_ena:1;
+	} s;
+	struct cvmx_l2c_lckbase_s cn30xx;
+	struct cvmx_l2c_lckbase_s cn31xx;
+	struct cvmx_l2c_lckbase_s cn38xx;
+	struct cvmx_l2c_lckbase_s cn38xxp2;
+	struct cvmx_l2c_lckbase_s cn50xx;
+	struct cvmx_l2c_lckbase_s cn52xx;
+	struct cvmx_l2c_lckbase_s cn52xxp1;
+	struct cvmx_l2c_lckbase_s cn56xx;
+	struct cvmx_l2c_lckbase_s cn56xxp1;
+	struct cvmx_l2c_lckbase_s cn58xx;
+	struct cvmx_l2c_lckbase_s cn58xxp1;
+};
+
+union cvmx_l2c_lckoff {
+	uint64_t u64;
+	struct cvmx_l2c_lckoff_s {
+		uint64_t reserved_10_63:54;
+		uint64_t lck_offset:10;
+	} s;
+	struct cvmx_l2c_lckoff_s cn30xx;
+	struct cvmx_l2c_lckoff_s cn31xx;
+	struct cvmx_l2c_lckoff_s cn38xx;
+	struct cvmx_l2c_lckoff_s cn38xxp2;
+	struct cvmx_l2c_lckoff_s cn50xx;
+	struct cvmx_l2c_lckoff_s cn52xx;
+	struct cvmx_l2c_lckoff_s cn52xxp1;
+	struct cvmx_l2c_lckoff_s cn56xx;
+	struct cvmx_l2c_lckoff_s cn56xxp1;
+	struct cvmx_l2c_lckoff_s cn58xx;
+	struct cvmx_l2c_lckoff_s cn58xxp1;
+};
+
+union cvmx_l2c_lfb0 {
+	uint64_t u64;
+	struct cvmx_l2c_lfb0_s {
+		uint64_t reserved_32_63:32;
+		uint64_t stcpnd:1;
+		uint64_t stpnd:1;
+		uint64_t stinv:1;
+		uint64_t stcfl:1;
+		uint64_t vam:1;
+		uint64_t inxt:4;
+		uint64_t itl:1;
+		uint64_t ihd:1;
+		uint64_t set:3;
+		uint64_t vabnum:4;
+		uint64_t sid:9;
+		uint64_t cmd:4;
+		uint64_t vld:1;
+	} s;
+	struct cvmx_l2c_lfb0_cn30xx {
+		uint64_t reserved_32_63:32;
+		uint64_t stcpnd:1;
+		uint64_t stpnd:1;
+		uint64_t stinv:1;
+		uint64_t stcfl:1;
+		uint64_t vam:1;
+		uint64_t reserved_25_26:2;
+		uint64_t inxt:2;
+		uint64_t itl:1;
+		uint64_t ihd:1;
+		uint64_t reserved_20_20:1;
+		uint64_t set:2;
+		uint64_t reserved_16_17:2;
+		uint64_t vabnum:2;
+		uint64_t sid:9;
+		uint64_t cmd:4;
+		uint64_t vld:1;
+	} cn30xx;
+	struct cvmx_l2c_lfb0_cn31xx {
+		uint64_t reserved_32_63:32;
+		uint64_t stcpnd:1;
+		uint64_t stpnd:1;
+		uint64_t stinv:1;
+		uint64_t stcfl:1;
+		uint64_t vam:1;
+		uint64_t reserved_26_26:1;
+		uint64_t inxt:3;
+		uint64_t itl:1;
+		uint64_t ihd:1;
+		uint64_t reserved_20_20:1;
+		uint64_t set:2;
+		uint64_t reserved_17_17:1;
+		uint64_t vabnum:3;
+		uint64_t sid:9;
+		uint64_t cmd:4;
+		uint64_t vld:1;
+	} cn31xx;
+	struct cvmx_l2c_lfb0_s cn38xx;
+	struct cvmx_l2c_lfb0_s cn38xxp2;
+	struct cvmx_l2c_lfb0_cn50xx {
+		uint64_t reserved_32_63:32;
+		uint64_t stcpnd:1;
+		uint64_t stpnd:1;
+		uint64_t stinv:1;
+		uint64_t stcfl:1;
+		uint64_t vam:1;
+		uint64_t reserved_26_26:1;
+		uint64_t inxt:3;
+		uint64_t itl:1;
+		uint64_t ihd:1;
+		uint64_t set:3;
+		uint64_t reserved_17_17:1;
+		uint64_t vabnum:3;
+		uint64_t sid:9;
+		uint64_t cmd:4;
+		uint64_t vld:1;
+	} cn50xx;
+	struct cvmx_l2c_lfb0_cn50xx cn52xx;
+	struct cvmx_l2c_lfb0_cn50xx cn52xxp1;
+	struct cvmx_l2c_lfb0_s cn56xx;
+	struct cvmx_l2c_lfb0_s cn56xxp1;
+	struct cvmx_l2c_lfb0_s cn58xx;
+	struct cvmx_l2c_lfb0_s cn58xxp1;
+};
+
+union cvmx_l2c_lfb1 {
+	uint64_t u64;
+	struct cvmx_l2c_lfb1_s {
+		uint64_t reserved_19_63:45;
+		uint64_t dsgoing:1;
+		uint64_t bid:2;
+		uint64_t wtrsp:1;
+		uint64_t wtdw:1;
+		uint64_t wtdq:1;
+		uint64_t wtwhp:1;
+		uint64_t wtwhf:1;
+		uint64_t wtwrm:1;
+		uint64_t wtstm:1;
+		uint64_t wtrda:1;
+		uint64_t wtstdt:1;
+		uint64_t wtstrsp:1;
+		uint64_t wtstrsc:1;
+		uint64_t wtvtm:1;
+		uint64_t wtmfl:1;
+		uint64_t prbrty:1;
+		uint64_t wtprb:1;
+		uint64_t vld:1;
+	} s;
+	struct cvmx_l2c_lfb1_s cn30xx;
+	struct cvmx_l2c_lfb1_s cn31xx;
+	struct cvmx_l2c_lfb1_s cn38xx;
+	struct cvmx_l2c_lfb1_s cn38xxp2;
+	struct cvmx_l2c_lfb1_s cn50xx;
+	struct cvmx_l2c_lfb1_s cn52xx;
+	struct cvmx_l2c_lfb1_s cn52xxp1;
+	struct cvmx_l2c_lfb1_s cn56xx;
+	struct cvmx_l2c_lfb1_s cn56xxp1;
+	struct cvmx_l2c_lfb1_s cn58xx;
+	struct cvmx_l2c_lfb1_s cn58xxp1;
+};
+
+union cvmx_l2c_lfb2 {
+	uint64_t u64;
+	struct cvmx_l2c_lfb2_s {
+		uint64_t reserved_0_63:64;
+	} s;
+	struct cvmx_l2c_lfb2_cn30xx {
+		uint64_t reserved_27_63:37;
+		uint64_t lfb_tag:19;
+		uint64_t lfb_idx:8;
+	} cn30xx;
+	struct cvmx_l2c_lfb2_cn31xx {
+		uint64_t reserved_27_63:37;
+		uint64_t lfb_tag:17;
+		uint64_t lfb_idx:10;
+	} cn31xx;
+	struct cvmx_l2c_lfb2_cn31xx cn38xx;
+	struct cvmx_l2c_lfb2_cn31xx cn38xxp2;
+	struct cvmx_l2c_lfb2_cn50xx {
+		uint64_t reserved_27_63:37;
+		uint64_t lfb_tag:20;
+		uint64_t lfb_idx:7;
+	} cn50xx;
+	struct cvmx_l2c_lfb2_cn52xx {
+		uint64_t reserved_27_63:37;
+		uint64_t lfb_tag:18;
+		uint64_t lfb_idx:9;
+	} cn52xx;
+	struct cvmx_l2c_lfb2_cn52xx cn52xxp1;
+	struct cvmx_l2c_lfb2_cn56xx {
+		uint64_t reserved_27_63:37;
+		uint64_t lfb_tag:16;
+		uint64_t lfb_idx:11;
+	} cn56xx;
+	struct cvmx_l2c_lfb2_cn56xx cn56xxp1;
+	struct cvmx_l2c_lfb2_cn56xx cn58xx;
+	struct cvmx_l2c_lfb2_cn56xx cn58xxp1;
+};
+
+union cvmx_l2c_lfb3 {
+	uint64_t u64;
+	struct cvmx_l2c_lfb3_s {
+		uint64_t reserved_5_63:59;
+		uint64_t stpartdis:1;
+		uint64_t lfb_hwm:4;
+	} s;
+	struct cvmx_l2c_lfb3_cn30xx {
+		uint64_t reserved_5_63:59;
+		uint64_t stpartdis:1;
+		uint64_t reserved_2_3:2;
+		uint64_t lfb_hwm:2;
+	} cn30xx;
+	struct cvmx_l2c_lfb3_cn31xx {
+		uint64_t reserved_5_63:59;
+		uint64_t stpartdis:1;
+		uint64_t reserved_3_3:1;
+		uint64_t lfb_hwm:3;
+	} cn31xx;
+	struct cvmx_l2c_lfb3_s cn38xx;
+	struct cvmx_l2c_lfb3_s cn38xxp2;
+	struct cvmx_l2c_lfb3_cn31xx cn50xx;
+	struct cvmx_l2c_lfb3_cn31xx cn52xx;
+	struct cvmx_l2c_lfb3_cn31xx cn52xxp1;
+	struct cvmx_l2c_lfb3_s cn56xx;
+	struct cvmx_l2c_lfb3_s cn56xxp1;
+	struct cvmx_l2c_lfb3_s cn58xx;
+	struct cvmx_l2c_lfb3_s cn58xxp1;
+};
+
+union cvmx_l2c_oob {
+	uint64_t u64;
+	struct cvmx_l2c_oob_s {
+		uint64_t reserved_2_63:62;
+		uint64_t dwbena:1;
+		uint64_t stena:1;
+	} s;
+	struct cvmx_l2c_oob_s cn52xx;
+	struct cvmx_l2c_oob_s cn52xxp1;
+	struct cvmx_l2c_oob_s cn56xx;
+	struct cvmx_l2c_oob_s cn56xxp1;
+};
+
+union cvmx_l2c_oob1 {
+	uint64_t u64;
+	struct cvmx_l2c_oob1_s {
+		uint64_t fadr:27;
+		uint64_t fsrc:1;
+		uint64_t reserved_34_35:2;
+		uint64_t sadr:14;
+		uint64_t reserved_14_19:6;
+		uint64_t size:14;
+	} s;
+	struct cvmx_l2c_oob1_s cn52xx;
+	struct cvmx_l2c_oob1_s cn52xxp1;
+	struct cvmx_l2c_oob1_s cn56xx;
+	struct cvmx_l2c_oob1_s cn56xxp1;
+};
+
+union cvmx_l2c_oob2 {
+	uint64_t u64;
+	struct cvmx_l2c_oob2_s {
+		uint64_t fadr:27;
+		uint64_t fsrc:1;
+		uint64_t reserved_34_35:2;
+		uint64_t sadr:14;
+		uint64_t reserved_14_19:6;
+		uint64_t size:14;
+	} s;
+	struct cvmx_l2c_oob2_s cn52xx;
+	struct cvmx_l2c_oob2_s cn52xxp1;
+	struct cvmx_l2c_oob2_s cn56xx;
+	struct cvmx_l2c_oob2_s cn56xxp1;
+};
+
+union cvmx_l2c_oob3 {
+	uint64_t u64;
+	struct cvmx_l2c_oob3_s {
+		uint64_t fadr:27;
+		uint64_t fsrc:1;
+		uint64_t reserved_34_35:2;
+		uint64_t sadr:14;
+		uint64_t reserved_14_19:6;
+		uint64_t size:14;
+	} s;
+	struct cvmx_l2c_oob3_s cn52xx;
+	struct cvmx_l2c_oob3_s cn52xxp1;
+	struct cvmx_l2c_oob3_s cn56xx;
+	struct cvmx_l2c_oob3_s cn56xxp1;
+};
+
+union cvmx_l2c_pfcx {
+	uint64_t u64;
+	struct cvmx_l2c_pfcx_s {
+		uint64_t reserved_36_63:28;
+		uint64_t pfcnt0:36;
+	} s;
+	struct cvmx_l2c_pfcx_s cn30xx;
+	struct cvmx_l2c_pfcx_s cn31xx;
+	struct cvmx_l2c_pfcx_s cn38xx;
+	struct cvmx_l2c_pfcx_s cn38xxp2;
+	struct cvmx_l2c_pfcx_s cn50xx;
+	struct cvmx_l2c_pfcx_s cn52xx;
+	struct cvmx_l2c_pfcx_s cn52xxp1;
+	struct cvmx_l2c_pfcx_s cn56xx;
+	struct cvmx_l2c_pfcx_s cn56xxp1;
+	struct cvmx_l2c_pfcx_s cn58xx;
+	struct cvmx_l2c_pfcx_s cn58xxp1;
+};
+
+union cvmx_l2c_pfctl {
+	uint64_t u64;
+	struct cvmx_l2c_pfctl_s {
+		uint64_t reserved_36_63:28;
+		uint64_t cnt3rdclr:1;
+		uint64_t cnt2rdclr:1;
+		uint64_t cnt1rdclr:1;
+		uint64_t cnt0rdclr:1;
+		uint64_t cnt3ena:1;
+		uint64_t cnt3clr:1;
+		uint64_t cnt3sel:6;
+		uint64_t cnt2ena:1;
+		uint64_t cnt2clr:1;
+		uint64_t cnt2sel:6;
+		uint64_t cnt1ena:1;
+		uint64_t cnt1clr:1;
+		uint64_t cnt1sel:6;
+		uint64_t cnt0ena:1;
+		uint64_t cnt0clr:1;
+		uint64_t cnt0sel:6;
+	} s;
+	struct cvmx_l2c_pfctl_s cn30xx;
+	struct cvmx_l2c_pfctl_s cn31xx;
+	struct cvmx_l2c_pfctl_s cn38xx;
+	struct cvmx_l2c_pfctl_s cn38xxp2;
+	struct cvmx_l2c_pfctl_s cn50xx;
+	struct cvmx_l2c_pfctl_s cn52xx;
+	struct cvmx_l2c_pfctl_s cn52xxp1;
+	struct cvmx_l2c_pfctl_s cn56xx;
+	struct cvmx_l2c_pfctl_s cn56xxp1;
+	struct cvmx_l2c_pfctl_s cn58xx;
+	struct cvmx_l2c_pfctl_s cn58xxp1;
+};
+
+union cvmx_l2c_ppgrp {
+	uint64_t u64;
+	struct cvmx_l2c_ppgrp_s {
+		uint64_t reserved_24_63:40;
+		uint64_t pp11grp:2;
+		uint64_t pp10grp:2;
+		uint64_t pp9grp:2;
+		uint64_t pp8grp:2;
+		uint64_t pp7grp:2;
+		uint64_t pp6grp:2;
+		uint64_t pp5grp:2;
+		uint64_t pp4grp:2;
+		uint64_t pp3grp:2;
+		uint64_t pp2grp:2;
+		uint64_t pp1grp:2;
+		uint64_t pp0grp:2;
+	} s;
+	struct cvmx_l2c_ppgrp_cn52xx {
+		uint64_t reserved_8_63:56;
+		uint64_t pp3grp:2;
+		uint64_t pp2grp:2;
+		uint64_t pp1grp:2;
+		uint64_t pp0grp:2;
+	} cn52xx;
+	struct cvmx_l2c_ppgrp_cn52xx cn52xxp1;
+	struct cvmx_l2c_ppgrp_s cn56xx;
+	struct cvmx_l2c_ppgrp_s cn56xxp1;
+};
+
+union cvmx_l2c_spar0 {
+	uint64_t u64;
+	struct cvmx_l2c_spar0_s {
+		uint64_t reserved_32_63:32;
+		uint64_t umsk3:8;
+		uint64_t umsk2:8;
+		uint64_t umsk1:8;
+		uint64_t umsk0:8;
+	} s;
+	struct cvmx_l2c_spar0_cn30xx {
+		uint64_t reserved_4_63:60;
+		uint64_t umsk0:4;
+	} cn30xx;
+	struct cvmx_l2c_spar0_cn31xx {
+		uint64_t reserved_12_63:52;
+		uint64_t umsk1:4;
+		uint64_t reserved_4_7:4;
+		uint64_t umsk0:4;
+	} cn31xx;
+	struct cvmx_l2c_spar0_s cn38xx;
+	struct cvmx_l2c_spar0_s cn38xxp2;
+	struct cvmx_l2c_spar0_cn50xx {
+		uint64_t reserved_16_63:48;
+		uint64_t umsk1:8;
+		uint64_t umsk0:8;
+	} cn50xx;
+	struct cvmx_l2c_spar0_s cn52xx;
+	struct cvmx_l2c_spar0_s cn52xxp1;
+	struct cvmx_l2c_spar0_s cn56xx;
+	struct cvmx_l2c_spar0_s cn56xxp1;
+	struct cvmx_l2c_spar0_s cn58xx;
+	struct cvmx_l2c_spar0_s cn58xxp1;
+};
+
+union cvmx_l2c_spar1 {
+	uint64_t u64;
+	struct cvmx_l2c_spar1_s {
+		uint64_t reserved_32_63:32;
+		uint64_t umsk7:8;
+		uint64_t umsk6:8;
+		uint64_t umsk5:8;
+		uint64_t umsk4:8;
+	} s;
+	struct cvmx_l2c_spar1_s cn38xx;
+	struct cvmx_l2c_spar1_s cn38xxp2;
+	struct cvmx_l2c_spar1_s cn56xx;
+	struct cvmx_l2c_spar1_s cn56xxp1;
+	struct cvmx_l2c_spar1_s cn58xx;
+	struct cvmx_l2c_spar1_s cn58xxp1;
+};
+
+union cvmx_l2c_spar2 {
+	uint64_t u64;
+	struct cvmx_l2c_spar2_s {
+		uint64_t reserved_32_63:32;
+		uint64_t umsk11:8;
+		uint64_t umsk10:8;
+		uint64_t umsk9:8;
+		uint64_t umsk8:8;
+	} s;
+	struct cvmx_l2c_spar2_s cn38xx;
+	struct cvmx_l2c_spar2_s cn38xxp2;
+	struct cvmx_l2c_spar2_s cn56xx;
+	struct cvmx_l2c_spar2_s cn56xxp1;
+	struct cvmx_l2c_spar2_s cn58xx;
+	struct cvmx_l2c_spar2_s cn58xxp1;
+};
+
+union cvmx_l2c_spar3 {
+	uint64_t u64;
+	struct cvmx_l2c_spar3_s {
+		uint64_t reserved_32_63:32;
+		uint64_t umsk15:8;
+		uint64_t umsk14:8;
+		uint64_t umsk13:8;
+		uint64_t umsk12:8;
+	} s;
+	struct cvmx_l2c_spar3_s cn38xx;
+	struct cvmx_l2c_spar3_s cn38xxp2;
+	struct cvmx_l2c_spar3_s cn58xx;
+	struct cvmx_l2c_spar3_s cn58xxp1;
+};
+
+union cvmx_l2c_spar4 {
+	uint64_t u64;
+	struct cvmx_l2c_spar4_s {
+		uint64_t reserved_8_63:56;
+		uint64_t umskiob:8;
+	} s;
+	struct cvmx_l2c_spar4_cn30xx {
+		uint64_t reserved_4_63:60;
+		uint64_t umskiob:4;
+	} cn30xx;
+	struct cvmx_l2c_spar4_cn30xx cn31xx;
+	struct cvmx_l2c_spar4_s cn38xx;
+	struct cvmx_l2c_spar4_s cn38xxp2;
+	struct cvmx_l2c_spar4_s cn50xx;
+	struct cvmx_l2c_spar4_s cn52xx;
+	struct cvmx_l2c_spar4_s cn52xxp1;
+	struct cvmx_l2c_spar4_s cn56xx;
+	struct cvmx_l2c_spar4_s cn56xxp1;
+	struct cvmx_l2c_spar4_s cn58xx;
+	struct cvmx_l2c_spar4_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
new file mode 100644
index 0000000..2a8c090
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -0,0 +1,325 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Interface to the Level 2 Cache (L2C) control, measurement, and debugging
+ * facilities.
+ */
+
+#ifndef __CVMX_L2C_H__
+#define __CVMX_L2C_H__
+
+/* Deprecated macro, use function */
+#define CVMX_L2_ASSOC     cvmx_l2c_get_num_assoc()
+
+/* Deprecated macro, use function */
+#define CVMX_L2_SET_BITS  cvmx_l2c_get_set_bits()
+
+/* Deprecated macro, use function */
+#define CVMX_L2_SETS      cvmx_l2c_get_num_sets()
+
+#define CVMX_L2C_IDX_ADDR_SHIFT 7  /* based on 128 byte cache line size */
+#define CVMX_L2C_IDX_MASK       (cvmx_l2c_get_num_sets() - 1)
+
+/* Defines for index aliasing computations */
+#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT \
+	(CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
+
+#define CVMX_L2C_ALIAS_MASK \
+	(CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
+
+union cvmx_l2c_tag {
+	uint64_t u64;
+	struct {
+		uint64_t reserved:28;
+		uint64_t V:1;	/* Line valid */
+		uint64_t D:1;	/* Line dirty */
+		uint64_t L:1;	/* Line locked */
+		uint64_t U:1;	/* Use, LRU eviction */
+		uint64_t addr:32;	/* Phys mem (not all bits valid) */
+	} s;
+};
+
+  /* L2C Performance Counter events. */
+enum cvmx_l2c_event {
+	CVMX_L2C_EVENT_CYCLES = 0,
+	CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
+	CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
+	CVMX_L2C_EVENT_DATA_MISS = 3,
+	CVMX_L2C_EVENT_DATA_HIT = 4,
+	CVMX_L2C_EVENT_MISS = 5,
+	CVMX_L2C_EVENT_HIT = 6,
+	CVMX_L2C_EVENT_VICTIM_HIT = 7,
+	CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
+	CVMX_L2C_EVENT_TAG_PROBE = 9,
+	CVMX_L2C_EVENT_TAG_UPDATE = 10,
+	CVMX_L2C_EVENT_TAG_COMPLETE = 11,
+	CVMX_L2C_EVENT_TAG_DIRTY = 12,
+	CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
+	CVMX_L2C_EVENT_DATA_STORE_READ = 14,
+	CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,
+	CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
+	CVMX_L2C_EVENT_WRITE_REQUEST = 17,
+	CVMX_L2C_EVENT_READ_REQUEST = 18,
+	CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,
+	CVMX_L2C_EVENT_XMC_NOP = 20,
+	CVMX_L2C_EVENT_XMC_LDT = 21,
+	CVMX_L2C_EVENT_XMC_LDI = 22,
+	CVMX_L2C_EVENT_XMC_LDD = 23,
+	CVMX_L2C_EVENT_XMC_STF = 24,
+	CVMX_L2C_EVENT_XMC_STT = 25,
+	CVMX_L2C_EVENT_XMC_STP = 26,
+	CVMX_L2C_EVENT_XMC_STC = 27,
+	CVMX_L2C_EVENT_XMC_DWB = 28,
+	CVMX_L2C_EVENT_XMC_PL2 = 29,
+	CVMX_L2C_EVENT_XMC_PSL1 = 30,
+	CVMX_L2C_EVENT_XMC_IOBLD = 31,
+	CVMX_L2C_EVENT_XMC_IOBST = 32,
+	CVMX_L2C_EVENT_XMC_IOBDMA = 33,
+	CVMX_L2C_EVENT_XMC_IOBRSP = 34,
+	CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
+	CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
+	CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
+	CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
+	CVMX_L2C_EVENT_RSC_NOP = 39,
+	CVMX_L2C_EVENT_RSC_STDN = 40,
+	CVMX_L2C_EVENT_RSC_FILL = 41,
+	CVMX_L2C_EVENT_RSC_REFL = 42,
+	CVMX_L2C_EVENT_RSC_STIN = 43,
+	CVMX_L2C_EVENT_RSC_SCIN = 44,
+	CVMX_L2C_EVENT_RSC_SCFL = 45,
+	CVMX_L2C_EVENT_RSC_SCDN = 46,
+	CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
+	CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
+	CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
+	CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
+	CVMX_L2C_EVENT_LRF_REQ = 51,
+	CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
+	CVMX_L2C_EVENT_DT_WR_INVAL = 53
+};
+
+/**
+ * Configure one of the four L2 Cache performance counters to capture event
+ * occurences.
+ *
+ * @counter:        The counter to configure. Range 0..3.
+ * @event:          The type of L2 Cache event occurrence to count.
+ * @clear_on_read:  When asserted, any read of the performance counter
+ *                       clears the counter.
+ *
+ * The routine does not clear the counter.
+ */
+void cvmx_l2c_config_perf(uint32_t counter,
+			  enum cvmx_l2c_event event, uint32_t clear_on_read);
+/**
+ * Read the given L2 Cache performance counter. The counter must be configured
+ * before reading, but this routine does not enforce this requirement.
+ *
+ * @counter:  The counter to configure. Range 0..3.
+ *
+ * Returns The current counter value.
+ */
+uint64_t cvmx_l2c_read_perf(uint32_t counter);
+
+/**
+ * Return the L2 Cache way partitioning for a given core.
+ *
+ * @core:  The core processor of interest.
+ *
+ * Returns    The mask specifying the partitioning. 0 bits in mask indicates
+ *              the cache 'ways' that a core can evict from.
+ *            -1 on error
+ */
+int cvmx_l2c_get_core_way_partition(uint32_t core);
+
+/**
+ * Partitions the L2 cache for a core
+ *
+ * @core:  The core that the partitioning applies to.
+ *
+ * @mask: The partitioning of the ways expressed as a binary mask. A 0
+ *        bit allows the core to evict cache lines from a way, while a
+ *        1 bit blocks the core from evicting any lines from that
+ *        way. There must be at least one allowed way (0 bit) in the
+ *        mask.
+ *
+ * If any ways are blocked for all cores and the HW blocks, then those
+ * ways will never have any cache lines evicted from them.  All cores
+ * and the hardware blocks are free to read from all ways regardless
+ * of the partitioning.
+ */
+int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
+
+/**
+ * Return the L2 Cache way partitioning for the hw blocks.
+ *
+ * Returns    The mask specifying the reserved way. 0 bits in mask indicates
+ *              the cache 'ways' that a core can evict from.
+ *            -1 on error
+ */
+int cvmx_l2c_get_hw_way_partition(void);
+
+/**
+ * Partitions the L2 cache for the hardware blocks.
+ *
+ * @mask: The partitioning of the ways expressed as a binary mask. A 0
+ *        bit allows the core to evict cache lines from a way, while a
+ *        1 bit blocks the core from evicting any lines from that
+ *        way. There must be at least one allowed way (0 bit) in the
+ *        mask.
+ *
+ * If any ways are blocked for all cores and the HW blocks, then those
+ * ways will never have any cache lines evicted from them.  All cores
+ * and the hardware blocks are free to read from all ways regardless
+ * of the partitioning.
+ */
+int cvmx_l2c_set_hw_way_partition(uint32_t mask);
+
+/**
+ * Locks a line in the L2 cache at the specified physical address
+ *
+ * @addr:   physical address of line to lock
+ *
+ * Returns 0 on success,
+ *         1 if line not locked.
+ */
+int cvmx_l2c_lock_line(uint64_t addr);
+
+/**
+ * Locks a specified memory region in the L2 cache.
+ *
+ * Note that if not all lines can be locked, that means that all
+ * but one of the ways (associations) available to the locking
+ * core are locked.  Having only 1 association available for
+ * normal caching may have a significant adverse affect on performance.
+ * Care should be taken to ensure that enough of the L2 cache is left
+ * unlocked to allow for normal caching of DRAM.
+ *
+ * @start:  Physical address of the start of the region to lock
+ * @len:    Length (in bytes) of region to lock
+ *
+ * Returns Number of requested lines that where not locked.
+ *         0 on success (all locked)
+ */
+int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
+
+/**
+ * Unlock and flush a cache line from the L2 cache.
+ * IMPORTANT: Must only be run by one core at a time due to use
+ * of L2C debug features.
+ * Note that this function will flush a matching but unlocked cache line.
+ * (If address is not in L2, no lines are flushed.)
+ *
+ * @address: Physical address to unlock
+ *
+ * Returns 0: line not unlocked
+ *         1: line unlocked
+ */
+int cvmx_l2c_unlock_line(uint64_t address);
+
+/**
+ * Unlocks a region of memory that is locked in the L2 cache
+ *
+ * @start:  start physical address
+ * @len:    length (in bytes) to unlock
+ *
+ * Returns Number of locked lines that the call unlocked
+ */
+int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
+
+/**
+ * Read the L2 controller tag for a given location in L2
+ *
+ * @association:
+ *               Which association to read line from
+ * @index:  Which way to read from.
+ *
+ * Returns l2c tag structure for line requested.
+ */
+union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index);
+
+/* Wrapper around deprecated old function name */
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
+					      uint32_t index)
+{
+	return cvmx_l2c_get_tag(association, index);
+}
+
+/**
+ * Returns the cache index for a given physical address
+ *
+ * @addr:   physical address
+ *
+ * Returns L2 cache index
+ */
+uint32_t cvmx_l2c_address_to_index(uint64_t addr);
+
+/**
+ * Flushes (and unlocks) the entire L2 cache.
+ * IMPORTANT: Must only be run by one core at a time due to use
+ * of L2C debug features.
+ */
+void cvmx_l2c_flush(void);
+
+/**
+ *
+ * Returns Returns the size of the L2 cache in bytes,
+ * -1 on error (unrecognized model)
+ */
+int cvmx_l2c_get_cache_size_bytes(void);
+
+/**
+ * Return the number of sets in the L2 Cache
+ *
+ * Returns
+ */
+int cvmx_l2c_get_num_sets(void);
+
+/**
+ * Return log base 2 of the number of sets in the L2 cache
+ * Returns
+ */
+int cvmx_l2c_get_set_bits(void);
+/**
+ * Return the number of associations in the L2 Cache
+ *
+ * Returns
+ */
+int cvmx_l2c_get_num_assoc(void);
+
+/**
+ * Flush a line from the L2 cache
+ * This should only be called from one core at a time, as this routine
+ * sets the core to the 'debug' core in order to flush the line.
+ *
+ * @assoc:  Association (or way) to flush
+ * @index:  Index to flush
+ */
+void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index);
+
+#endif /* __CVMX_L2C_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
new file mode 100644
index 0000000..d7102d4
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
@@ -0,0 +1,369 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2D_DEFS_H__
+#define __CVMX_L2D_DEFS_H__
+
+#define CVMX_L2D_BST0 \
+	 CVMX_ADD_IO_SEG(0x0001180080000780ull)
+#define CVMX_L2D_BST1 \
+	 CVMX_ADD_IO_SEG(0x0001180080000788ull)
+#define CVMX_L2D_BST2 \
+	 CVMX_ADD_IO_SEG(0x0001180080000790ull)
+#define CVMX_L2D_BST3 \
+	 CVMX_ADD_IO_SEG(0x0001180080000798ull)
+#define CVMX_L2D_ERR \
+	 CVMX_ADD_IO_SEG(0x0001180080000010ull)
+#define CVMX_L2D_FADR \
+	 CVMX_ADD_IO_SEG(0x0001180080000018ull)
+#define CVMX_L2D_FSYN0 \
+	 CVMX_ADD_IO_SEG(0x0001180080000020ull)
+#define CVMX_L2D_FSYN1 \
+	 CVMX_ADD_IO_SEG(0x0001180080000028ull)
+#define CVMX_L2D_FUS0 \
+	 CVMX_ADD_IO_SEG(0x00011800800007A0ull)
+#define CVMX_L2D_FUS1 \
+	 CVMX_ADD_IO_SEG(0x00011800800007A8ull)
+#define CVMX_L2D_FUS2 \
+	 CVMX_ADD_IO_SEG(0x00011800800007B0ull)
+#define CVMX_L2D_FUS3 \
+	 CVMX_ADD_IO_SEG(0x00011800800007B8ull)
+
+union cvmx_l2d_bst0 {
+	uint64_t u64;
+	struct cvmx_l2d_bst0_s {
+		uint64_t reserved_35_63:29;
+		uint64_t ftl:1;
+		uint64_t q0stat:34;
+	} s;
+	struct cvmx_l2d_bst0_s cn30xx;
+	struct cvmx_l2d_bst0_s cn31xx;
+	struct cvmx_l2d_bst0_s cn38xx;
+	struct cvmx_l2d_bst0_s cn38xxp2;
+	struct cvmx_l2d_bst0_s cn50xx;
+	struct cvmx_l2d_bst0_s cn52xx;
+	struct cvmx_l2d_bst0_s cn52xxp1;
+	struct cvmx_l2d_bst0_s cn56xx;
+	struct cvmx_l2d_bst0_s cn56xxp1;
+	struct cvmx_l2d_bst0_s cn58xx;
+	struct cvmx_l2d_bst0_s cn58xxp1;
+};
+
+union cvmx_l2d_bst1 {
+	uint64_t u64;
+	struct cvmx_l2d_bst1_s {
+		uint64_t reserved_34_63:30;
+		uint64_t q1stat:34;
+	} s;
+	struct cvmx_l2d_bst1_s cn30xx;
+	struct cvmx_l2d_bst1_s cn31xx;
+	struct cvmx_l2d_bst1_s cn38xx;
+	struct cvmx_l2d_bst1_s cn38xxp2;
+	struct cvmx_l2d_bst1_s cn50xx;
+	struct cvmx_l2d_bst1_s cn52xx;
+	struct cvmx_l2d_bst1_s cn52xxp1;
+	struct cvmx_l2d_bst1_s cn56xx;
+	struct cvmx_l2d_bst1_s cn56xxp1;
+	struct cvmx_l2d_bst1_s cn58xx;
+	struct cvmx_l2d_bst1_s cn58xxp1;
+};
+
+union cvmx_l2d_bst2 {
+	uint64_t u64;
+	struct cvmx_l2d_bst2_s {
+		uint64_t reserved_34_63:30;
+		uint64_t q2stat:34;
+	} s;
+	struct cvmx_l2d_bst2_s cn30xx;
+	struct cvmx_l2d_bst2_s cn31xx;
+	struct cvmx_l2d_bst2_s cn38xx;
+	struct cvmx_l2d_bst2_s cn38xxp2;
+	struct cvmx_l2d_bst2_s cn50xx;
+	struct cvmx_l2d_bst2_s cn52xx;
+	struct cvmx_l2d_bst2_s cn52xxp1;
+	struct cvmx_l2d_bst2_s cn56xx;
+	struct cvmx_l2d_bst2_s cn56xxp1;
+	struct cvmx_l2d_bst2_s cn58xx;
+	struct cvmx_l2d_bst2_s cn58xxp1;
+};
+
+union cvmx_l2d_bst3 {
+	uint64_t u64;
+	struct cvmx_l2d_bst3_s {
+		uint64_t reserved_34_63:30;
+		uint64_t q3stat:34;
+	} s;
+	struct cvmx_l2d_bst3_s cn30xx;
+	struct cvmx_l2d_bst3_s cn31xx;
+	struct cvmx_l2d_bst3_s cn38xx;
+	struct cvmx_l2d_bst3_s cn38xxp2;
+	struct cvmx_l2d_bst3_s cn50xx;
+	struct cvmx_l2d_bst3_s cn52xx;
+	struct cvmx_l2d_bst3_s cn52xxp1;
+	struct cvmx_l2d_bst3_s cn56xx;
+	struct cvmx_l2d_bst3_s cn56xxp1;
+	struct cvmx_l2d_bst3_s cn58xx;
+	struct cvmx_l2d_bst3_s cn58xxp1;
+};
+
+union cvmx_l2d_err {
+	uint64_t u64;
+	struct cvmx_l2d_err_s {
+		uint64_t reserved_6_63:58;
+		uint64_t bmhclsel:1;
+		uint64_t ded_err:1;
+		uint64_t sec_err:1;
+		uint64_t ded_intena:1;
+		uint64_t sec_intena:1;
+		uint64_t ecc_ena:1;
+	} s;
+	struct cvmx_l2d_err_s cn30xx;
+	struct cvmx_l2d_err_s cn31xx;
+	struct cvmx_l2d_err_s cn38xx;
+	struct cvmx_l2d_err_s cn38xxp2;
+	struct cvmx_l2d_err_s cn50xx;
+	struct cvmx_l2d_err_s cn52xx;
+	struct cvmx_l2d_err_s cn52xxp1;
+	struct cvmx_l2d_err_s cn56xx;
+	struct cvmx_l2d_err_s cn56xxp1;
+	struct cvmx_l2d_err_s cn58xx;
+	struct cvmx_l2d_err_s cn58xxp1;
+};
+
+union cvmx_l2d_fadr {
+	uint64_t u64;
+	struct cvmx_l2d_fadr_s {
+		uint64_t reserved_19_63:45;
+		uint64_t fadru:1;
+		uint64_t fowmsk:4;
+		uint64_t fset:3;
+		uint64_t fadr:11;
+	} s;
+	struct cvmx_l2d_fadr_cn30xx {
+		uint64_t reserved_18_63:46;
+		uint64_t fowmsk:4;
+		uint64_t reserved_13_13:1;
+		uint64_t fset:2;
+		uint64_t reserved_9_10:2;
+		uint64_t fadr:9;
+	} cn30xx;
+	struct cvmx_l2d_fadr_cn31xx {
+		uint64_t reserved_18_63:46;
+		uint64_t fowmsk:4;
+		uint64_t reserved_13_13:1;
+		uint64_t fset:2;
+		uint64_t reserved_10_10:1;
+		uint64_t fadr:10;
+	} cn31xx;
+	struct cvmx_l2d_fadr_cn38xx {
+		uint64_t reserved_18_63:46;
+		uint64_t fowmsk:4;
+		uint64_t fset:3;
+		uint64_t fadr:11;
+	} cn38xx;
+	struct cvmx_l2d_fadr_cn38xx cn38xxp2;
+	struct cvmx_l2d_fadr_cn50xx {
+		uint64_t reserved_18_63:46;
+		uint64_t fowmsk:4;
+		uint64_t fset:3;
+		uint64_t reserved_8_10:3;
+		uint64_t fadr:8;
+	} cn50xx;
+	struct cvmx_l2d_fadr_cn52xx {
+		uint64_t reserved_18_63:46;
+		uint64_t fowmsk:4;
+		uint64_t fset:3;
+		uint64_t reserved_10_10:1;
+		uint64_t fadr:10;
+	} cn52xx;
+	struct cvmx_l2d_fadr_cn52xx cn52xxp1;
+	struct cvmx_l2d_fadr_s cn56xx;
+	struct cvmx_l2d_fadr_s cn56xxp1;
+	struct cvmx_l2d_fadr_s cn58xx;
+	struct cvmx_l2d_fadr_s cn58xxp1;
+};
+
+union cvmx_l2d_fsyn0 {
+	uint64_t u64;
+	struct cvmx_l2d_fsyn0_s {
+		uint64_t reserved_20_63:44;
+		uint64_t fsyn_ow1:10;
+		uint64_t fsyn_ow0:10;
+	} s;
+	struct cvmx_l2d_fsyn0_s cn30xx;
+	struct cvmx_l2d_fsyn0_s cn31xx;
+	struct cvmx_l2d_fsyn0_s cn38xx;
+	struct cvmx_l2d_fsyn0_s cn38xxp2;
+	struct cvmx_l2d_fsyn0_s cn50xx;
+	struct cvmx_l2d_fsyn0_s cn52xx;
+	struct cvmx_l2d_fsyn0_s cn52xxp1;
+	struct cvmx_l2d_fsyn0_s cn56xx;
+	struct cvmx_l2d_fsyn0_s cn56xxp1;
+	struct cvmx_l2d_fsyn0_s cn58xx;
+	struct cvmx_l2d_fsyn0_s cn58xxp1;
+};
+
+union cvmx_l2d_fsyn1 {
+	uint64_t u64;
+	struct cvmx_l2d_fsyn1_s {
+		uint64_t reserved_20_63:44;
+		uint64_t fsyn_ow3:10;
+		uint64_t fsyn_ow2:10;
+	} s;
+	struct cvmx_l2d_fsyn1_s cn30xx;
+	struct cvmx_l2d_fsyn1_s cn31xx;
+	struct cvmx_l2d_fsyn1_s cn38xx;
+	struct cvmx_l2d_fsyn1_s cn38xxp2;
+	struct cvmx_l2d_fsyn1_s cn50xx;
+	struct cvmx_l2d_fsyn1_s cn52xx;
+	struct cvmx_l2d_fsyn1_s cn52xxp1;
+	struct cvmx_l2d_fsyn1_s cn56xx;
+	struct cvmx_l2d_fsyn1_s cn56xxp1;
+	struct cvmx_l2d_fsyn1_s cn58xx;
+	struct cvmx_l2d_fsyn1_s cn58xxp1;
+};
+
+union cvmx_l2d_fus0 {
+	uint64_t u64;
+	struct cvmx_l2d_fus0_s {
+		uint64_t reserved_34_63:30;
+		uint64_t q0fus:34;
+	} s;
+	struct cvmx_l2d_fus0_s cn30xx;
+	struct cvmx_l2d_fus0_s cn31xx;
+	struct cvmx_l2d_fus0_s cn38xx;
+	struct cvmx_l2d_fus0_s cn38xxp2;
+	struct cvmx_l2d_fus0_s cn50xx;
+	struct cvmx_l2d_fus0_s cn52xx;
+	struct cvmx_l2d_fus0_s cn52xxp1;
+	struct cvmx_l2d_fus0_s cn56xx;
+	struct cvmx_l2d_fus0_s cn56xxp1;
+	struct cvmx_l2d_fus0_s cn58xx;
+	struct cvmx_l2d_fus0_s cn58xxp1;
+};
+
+union cvmx_l2d_fus1 {
+	uint64_t u64;
+	struct cvmx_l2d_fus1_s {
+		uint64_t reserved_34_63:30;
+		uint64_t q1fus:34;
+	} s;
+	struct cvmx_l2d_fus1_s cn30xx;
+	struct cvmx_l2d_fus1_s cn31xx;
+	struct cvmx_l2d_fus1_s cn38xx;
+	struct cvmx_l2d_fus1_s cn38xxp2;
+	struct cvmx_l2d_fus1_s cn50xx;
+	struct cvmx_l2d_fus1_s cn52xx;
+	struct cvmx_l2d_fus1_s cn52xxp1;
+	struct cvmx_l2d_fus1_s cn56xx;
+	struct cvmx_l2d_fus1_s cn56xxp1;
+	struct cvmx_l2d_fus1_s cn58xx;
+	struct cvmx_l2d_fus1_s cn58xxp1;
+};
+
+union cvmx_l2d_fus2 {
+	uint64_t u64;
+	struct cvmx_l2d_fus2_s {
+		uint64_t reserved_34_63:30;
+		uint64_t q2fus:34;
+	} s;
+	struct cvmx_l2d_fus2_s cn30xx;
+	struct cvmx_l2d_fus2_s cn31xx;
+	struct cvmx_l2d_fus2_s cn38xx;
+	struct cvmx_l2d_fus2_s cn38xxp2;
+	struct cvmx_l2d_fus2_s cn50xx;
+	struct cvmx_l2d_fus2_s cn52xx;
+	struct cvmx_l2d_fus2_s cn52xxp1;
+	struct cvmx_l2d_fus2_s cn56xx;
+	struct cvmx_l2d_fus2_s cn56xxp1;
+	struct cvmx_l2d_fus2_s cn58xx;
+	struct cvmx_l2d_fus2_s cn58xxp1;
+};
+
+union cvmx_l2d_fus3 {
+	uint64_t u64;
+	struct cvmx_l2d_fus3_s {
+		uint64_t reserved_40_63:24;
+		uint64_t ema_ctl:3;
+		uint64_t reserved_34_36:3;
+		uint64_t q3fus:34;
+	} s;
+	struct cvmx_l2d_fus3_cn30xx {
+		uint64_t reserved_35_63:29;
+		uint64_t crip_64k:1;
+		uint64_t q3fus:34;
+	} cn30xx;
+	struct cvmx_l2d_fus3_cn31xx {
+		uint64_t reserved_35_63:29;
+		uint64_t crip_128k:1;
+		uint64_t q3fus:34;
+	} cn31xx;
+	struct cvmx_l2d_fus3_cn38xx {
+		uint64_t reserved_36_63:28;
+		uint64_t crip_256k:1;
+		uint64_t crip_512k:1;
+		uint64_t q3fus:34;
+	} cn38xx;
+	struct cvmx_l2d_fus3_cn38xx cn38xxp2;
+	struct cvmx_l2d_fus3_cn50xx {
+		uint64_t reserved_40_63:24;
+		uint64_t ema_ctl:3;
+		uint64_t reserved_36_36:1;
+		uint64_t crip_32k:1;
+		uint64_t crip_64k:1;
+		uint64_t q3fus:34;
+	} cn50xx;
+	struct cvmx_l2d_fus3_cn52xx {
+		uint64_t reserved_40_63:24;
+		uint64_t ema_ctl:3;
+		uint64_t reserved_36_36:1;
+		uint64_t crip_128k:1;
+		uint64_t crip_256k:1;
+		uint64_t q3fus:34;
+	} cn52xx;
+	struct cvmx_l2d_fus3_cn52xx cn52xxp1;
+	struct cvmx_l2d_fus3_cn56xx {
+		uint64_t reserved_40_63:24;
+		uint64_t ema_ctl:3;
+		uint64_t reserved_36_36:1;
+		uint64_t crip_512k:1;
+		uint64_t crip_1024k:1;
+		uint64_t q3fus:34;
+	} cn56xx;
+	struct cvmx_l2d_fus3_cn56xx cn56xxp1;
+	struct cvmx_l2d_fus3_cn58xx {
+		uint64_t reserved_39_63:25;
+		uint64_t ema_ctl:2;
+		uint64_t reserved_36_36:1;
+		uint64_t crip_512k:1;
+		uint64_t crip_1024k:1;
+		uint64_t q3fus:34;
+	} cn58xx;
+	struct cvmx_l2d_fus3_cn58xx cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
new file mode 100644
index 0000000..2639a3f
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
@@ -0,0 +1,141 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2T_DEFS_H__
+#define __CVMX_L2T_DEFS_H__
+
+#define CVMX_L2T_ERR \
+	 CVMX_ADD_IO_SEG(0x0001180080000008ull)
+
+union cvmx_l2t_err {
+	uint64_t u64;
+	struct cvmx_l2t_err_s {
+		uint64_t reserved_29_63:35;
+		uint64_t fadru:1;
+		uint64_t lck_intena2:1;
+		uint64_t lckerr2:1;
+		uint64_t lck_intena:1;
+		uint64_t lckerr:1;
+		uint64_t fset:3;
+		uint64_t fadr:10;
+		uint64_t fsyn:6;
+		uint64_t ded_err:1;
+		uint64_t sec_err:1;
+		uint64_t ded_intena:1;
+		uint64_t sec_intena:1;
+		uint64_t ecc_ena:1;
+	} s;
+	struct cvmx_l2t_err_cn30xx {
+		uint64_t reserved_28_63:36;
+		uint64_t lck_intena2:1;
+		uint64_t lckerr2:1;
+		uint64_t lck_intena:1;
+		uint64_t lckerr:1;
+		uint64_t reserved_23_23:1;
+		uint64_t fset:2;
+		uint64_t reserved_19_20:2;
+		uint64_t fadr:8;
+		uint64_t fsyn:6;
+		uint64_t ded_err:1;
+		uint64_t sec_err:1;
+		uint64_t ded_intena:1;
+		uint64_t sec_intena:1;
+		uint64_t ecc_ena:1;
+	} cn30xx;
+	struct cvmx_l2t_err_cn31xx {
+		uint64_t reserved_28_63:36;
+		uint64_t lck_intena2:1;
+		uint64_t lckerr2:1;
+		uint64_t lck_intena:1;
+		uint64_t lckerr:1;
+		uint64_t reserved_23_23:1;
+		uint64_t fset:2;
+		uint64_t reserved_20_20:1;
+		uint64_t fadr:9;
+		uint64_t fsyn:6;
+		uint64_t ded_err:1;
+		uint64_t sec_err:1;
+		uint64_t ded_intena:1;
+		uint64_t sec_intena:1;
+		uint64_t ecc_ena:1;
+	} cn31xx;
+	struct cvmx_l2t_err_cn38xx {
+		uint64_t reserved_28_63:36;
+		uint64_t lck_intena2:1;
+		uint64_t lckerr2:1;
+		uint64_t lck_intena:1;
+		uint64_t lckerr:1;
+		uint64_t fset:3;
+		uint64_t fadr:10;
+		uint64_t fsyn:6;
+		uint64_t ded_err:1;
+		uint64_t sec_err:1;
+		uint64_t ded_intena:1;
+		uint64_t sec_intena:1;
+		uint64_t ecc_ena:1;
+	} cn38xx;
+	struct cvmx_l2t_err_cn38xx cn38xxp2;
+	struct cvmx_l2t_err_cn50xx {
+		uint64_t reserved_28_63:36;
+		uint64_t lck_intena2:1;
+		uint64_t lckerr2:1;
+		uint64_t lck_intena:1;
+		uint64_t lckerr:1;
+		uint64_t fset:3;
+		uint64_t reserved_18_20:3;
+		uint64_t fadr:7;
+		uint64_t fsyn:6;
+		uint64_t ded_err:1;
+		uint64_t sec_err:1;
+		uint64_t ded_intena:1;
+		uint64_t sec_intena:1;
+		uint64_t ecc_ena:1;
+	} cn50xx;
+	struct cvmx_l2t_err_cn52xx {
+		uint64_t reserved_28_63:36;
+		uint64_t lck_intena2:1;
+		uint64_t lckerr2:1;
+		uint64_t lck_intena:1;
+		uint64_t lckerr:1;
+		uint64_t fset:3;
+		uint64_t reserved_20_20:1;
+		uint64_t fadr:9;
+		uint64_t fsyn:6;
+		uint64_t ded_err:1;
+		uint64_t sec_err:1;
+		uint64_t ded_intena:1;
+		uint64_t sec_intena:1;
+		uint64_t ecc_ena:1;
+	} cn52xx;
+	struct cvmx_l2t_err_cn52xx cn52xxp1;
+	struct cvmx_l2t_err_s cn56xx;
+	struct cvmx_l2t_err_s cn56xxp1;
+	struct cvmx_l2t_err_s cn58xx;
+	struct cvmx_l2t_err_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-led-defs.h b/arch/mips/include/asm/octeon/cvmx-led-defs.h
new file mode 100644
index 0000000..16f174a
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-led-defs.h
@@ -0,0 +1,240 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_LED_DEFS_H__
+#define __CVMX_LED_DEFS_H__
+
+#define CVMX_LED_BLINK \
+	 CVMX_ADD_IO_SEG(0x0001180000001A48ull)
+#define CVMX_LED_CLK_PHASE \
+	 CVMX_ADD_IO_SEG(0x0001180000001A08ull)
+#define CVMX_LED_CYLON \
+	 CVMX_ADD_IO_SEG(0x0001180000001AF8ull)
+#define CVMX_LED_DBG \
+	 CVMX_ADD_IO_SEG(0x0001180000001A18ull)
+#define CVMX_LED_EN \
+	 CVMX_ADD_IO_SEG(0x0001180000001A00ull)
+#define CVMX_LED_POLARITY \
+	 CVMX_ADD_IO_SEG(0x0001180000001A50ull)
+#define CVMX_LED_PRT \
+	 CVMX_ADD_IO_SEG(0x0001180000001A10ull)
+#define CVMX_LED_PRT_FMT \
+	 CVMX_ADD_IO_SEG(0x0001180000001A30ull)
+#define CVMX_LED_PRT_STATUSX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000001A80ull + (((offset) & 7) * 8))
+#define CVMX_LED_UDD_CNTX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000001A20ull + (((offset) & 1) * 8))
+#define CVMX_LED_UDD_DATX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000001A38ull + (((offset) & 1) * 8))
+#define CVMX_LED_UDD_DAT_CLRX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000001AC8ull + (((offset) & 1) * 16))
+#define CVMX_LED_UDD_DAT_SETX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000001AC0ull + (((offset) & 1) * 16))
+
+union cvmx_led_blink {
+	uint64_t u64;
+	struct cvmx_led_blink_s {
+		uint64_t reserved_8_63:56;
+		uint64_t rate:8;
+	} s;
+	struct cvmx_led_blink_s cn38xx;
+	struct cvmx_led_blink_s cn38xxp2;
+	struct cvmx_led_blink_s cn56xx;
+	struct cvmx_led_blink_s cn56xxp1;
+	struct cvmx_led_blink_s cn58xx;
+	struct cvmx_led_blink_s cn58xxp1;
+};
+
+union cvmx_led_clk_phase {
+	uint64_t u64;
+	struct cvmx_led_clk_phase_s {
+		uint64_t reserved_7_63:57;
+		uint64_t phase:7;
+	} s;
+	struct cvmx_led_clk_phase_s cn38xx;
+	struct cvmx_led_clk_phase_s cn38xxp2;
+	struct cvmx_led_clk_phase_s cn56xx;
+	struct cvmx_led_clk_phase_s cn56xxp1;
+	struct cvmx_led_clk_phase_s cn58xx;
+	struct cvmx_led_clk_phase_s cn58xxp1;
+};
+
+union cvmx_led_cylon {
+	uint64_t u64;
+	struct cvmx_led_cylon_s {
+		uint64_t reserved_16_63:48;
+		uint64_t rate:16;
+	} s;
+	struct cvmx_led_cylon_s cn38xx;
+	struct cvmx_led_cylon_s cn38xxp2;
+	struct cvmx_led_cylon_s cn56xx;
+	struct cvmx_led_cylon_s cn56xxp1;
+	struct cvmx_led_cylon_s cn58xx;
+	struct cvmx_led_cylon_s cn58xxp1;
+};
+
+union cvmx_led_dbg {
+	uint64_t u64;
+	struct cvmx_led_dbg_s {
+		uint64_t reserved_1_63:63;
+		uint64_t dbg_en:1;
+	} s;
+	struct cvmx_led_dbg_s cn38xx;
+	struct cvmx_led_dbg_s cn38xxp2;
+	struct cvmx_led_dbg_s cn56xx;
+	struct cvmx_led_dbg_s cn56xxp1;
+	struct cvmx_led_dbg_s cn58xx;
+	struct cvmx_led_dbg_s cn58xxp1;
+};
+
+union cvmx_led_en {
+	uint64_t u64;
+	struct cvmx_led_en_s {
+		uint64_t reserved_1_63:63;
+		uint64_t en:1;
+	} s;
+	struct cvmx_led_en_s cn38xx;
+	struct cvmx_led_en_s cn38xxp2;
+	struct cvmx_led_en_s cn56xx;
+	struct cvmx_led_en_s cn56xxp1;
+	struct cvmx_led_en_s cn58xx;
+	struct cvmx_led_en_s cn58xxp1;
+};
+
+union cvmx_led_polarity {
+	uint64_t u64;
+	struct cvmx_led_polarity_s {
+		uint64_t reserved_1_63:63;
+		uint64_t polarity:1;
+	} s;
+	struct cvmx_led_polarity_s cn38xx;
+	struct cvmx_led_polarity_s cn38xxp2;
+	struct cvmx_led_polarity_s cn56xx;
+	struct cvmx_led_polarity_s cn56xxp1;
+	struct cvmx_led_polarity_s cn58xx;
+	struct cvmx_led_polarity_s cn58xxp1;
+};
+
+union cvmx_led_prt {
+	uint64_t u64;
+	struct cvmx_led_prt_s {
+		uint64_t reserved_8_63:56;
+		uint64_t prt_en:8;
+	} s;
+	struct cvmx_led_prt_s cn38xx;
+	struct cvmx_led_prt_s cn38xxp2;
+	struct cvmx_led_prt_s cn56xx;
+	struct cvmx_led_prt_s cn56xxp1;
+	struct cvmx_led_prt_s cn58xx;
+	struct cvmx_led_prt_s cn58xxp1;
+};
+
+union cvmx_led_prt_fmt {
+	uint64_t u64;
+	struct cvmx_led_prt_fmt_s {
+		uint64_t reserved_4_63:60;
+		uint64_t format:4;
+	} s;
+	struct cvmx_led_prt_fmt_s cn38xx;
+	struct cvmx_led_prt_fmt_s cn38xxp2;
+	struct cvmx_led_prt_fmt_s cn56xx;
+	struct cvmx_led_prt_fmt_s cn56xxp1;
+	struct cvmx_led_prt_fmt_s cn58xx;
+	struct cvmx_led_prt_fmt_s cn58xxp1;
+};
+
+union cvmx_led_prt_statusx {
+	uint64_t u64;
+	struct cvmx_led_prt_statusx_s {
+		uint64_t reserved_6_63:58;
+		uint64_t status:6;
+	} s;
+	struct cvmx_led_prt_statusx_s cn38xx;
+	struct cvmx_led_prt_statusx_s cn38xxp2;
+	struct cvmx_led_prt_statusx_s cn56xx;
+	struct cvmx_led_prt_statusx_s cn56xxp1;
+	struct cvmx_led_prt_statusx_s cn58xx;
+	struct cvmx_led_prt_statusx_s cn58xxp1;
+};
+
+union cvmx_led_udd_cntx {
+	uint64_t u64;
+	struct cvmx_led_udd_cntx_s {
+		uint64_t reserved_6_63:58;
+		uint64_t cnt:6;
+	} s;
+	struct cvmx_led_udd_cntx_s cn38xx;
+	struct cvmx_led_udd_cntx_s cn38xxp2;
+	struct cvmx_led_udd_cntx_s cn56xx;
+	struct cvmx_led_udd_cntx_s cn56xxp1;
+	struct cvmx_led_udd_cntx_s cn58xx;
+	struct cvmx_led_udd_cntx_s cn58xxp1;
+};
+
+union cvmx_led_udd_datx {
+	uint64_t u64;
+	struct cvmx_led_udd_datx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t dat:32;
+	} s;
+	struct cvmx_led_udd_datx_s cn38xx;
+	struct cvmx_led_udd_datx_s cn38xxp2;
+	struct cvmx_led_udd_datx_s cn56xx;
+	struct cvmx_led_udd_datx_s cn56xxp1;
+	struct cvmx_led_udd_datx_s cn58xx;
+	struct cvmx_led_udd_datx_s cn58xxp1;
+};
+
+union cvmx_led_udd_dat_clrx {
+	uint64_t u64;
+	struct cvmx_led_udd_dat_clrx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t clr:32;
+	} s;
+	struct cvmx_led_udd_dat_clrx_s cn38xx;
+	struct cvmx_led_udd_dat_clrx_s cn38xxp2;
+	struct cvmx_led_udd_dat_clrx_s cn56xx;
+	struct cvmx_led_udd_dat_clrx_s cn56xxp1;
+	struct cvmx_led_udd_dat_clrx_s cn58xx;
+	struct cvmx_led_udd_dat_clrx_s cn58xxp1;
+};
+
+union cvmx_led_udd_dat_setx {
+	uint64_t u64;
+	struct cvmx_led_udd_dat_setx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t set:32;
+	} s;
+	struct cvmx_led_udd_dat_setx_s cn38xx;
+	struct cvmx_led_udd_dat_setx_s cn38xxp2;
+	struct cvmx_led_udd_dat_setx_s cn56xx;
+	struct cvmx_led_udd_dat_setx_s cn56xxp1;
+	struct cvmx_led_udd_dat_setx_s cn58xx;
+	struct cvmx_led_udd_dat_setx_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-mio-defs.h b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
new file mode 100644
index 0000000..6555f05
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
@@ -0,0 +1,2004 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_MIO_DEFS_H__
+#define __CVMX_MIO_DEFS_H__
+
+#define CVMX_MIO_BOOT_BIST_STAT \
+	 CVMX_ADD_IO_SEG(0x00011800000000F8ull)
+#define CVMX_MIO_BOOT_COMP \
+	 CVMX_ADD_IO_SEG(0x00011800000000B8ull)
+#define CVMX_MIO_BOOT_DMA_CFGX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000100ull + (((offset) & 3) * 8))
+#define CVMX_MIO_BOOT_DMA_INTX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000138ull + (((offset) & 3) * 8))
+#define CVMX_MIO_BOOT_DMA_INT_ENX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000150ull + (((offset) & 3) * 8))
+#define CVMX_MIO_BOOT_DMA_TIMX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000120ull + (((offset) & 3) * 8))
+#define CVMX_MIO_BOOT_ERR \
+	 CVMX_ADD_IO_SEG(0x00011800000000A0ull)
+#define CVMX_MIO_BOOT_INT \
+	 CVMX_ADD_IO_SEG(0x00011800000000A8ull)
+#define CVMX_MIO_BOOT_LOC_ADR \
+	 CVMX_ADD_IO_SEG(0x0001180000000090ull)
+#define CVMX_MIO_BOOT_LOC_CFGX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000080ull + (((offset) & 1) * 8))
+#define CVMX_MIO_BOOT_LOC_DAT \
+	 CVMX_ADD_IO_SEG(0x0001180000000098ull)
+#define CVMX_MIO_BOOT_PIN_DEFS \
+	 CVMX_ADD_IO_SEG(0x00011800000000C0ull)
+#define CVMX_MIO_BOOT_REG_CFGX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000000ull + (((offset) & 7) * 8))
+#define CVMX_MIO_BOOT_REG_TIMX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000040ull + (((offset) & 7) * 8))
+#define CVMX_MIO_BOOT_THR \
+	 CVMX_ADD_IO_SEG(0x00011800000000B0ull)
+#define CVMX_MIO_FUS_BNK_DATX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000001520ull + (((offset) & 3) * 8))
+#define CVMX_MIO_FUS_DAT0 \
+	 CVMX_ADD_IO_SEG(0x0001180000001400ull)
+#define CVMX_MIO_FUS_DAT1 \
+	 CVMX_ADD_IO_SEG(0x0001180000001408ull)
+#define CVMX_MIO_FUS_DAT2 \
+	 CVMX_ADD_IO_SEG(0x0001180000001410ull)
+#define CVMX_MIO_FUS_DAT3 \
+	 CVMX_ADD_IO_SEG(0x0001180000001418ull)
+#define CVMX_MIO_FUS_EMA \
+	 CVMX_ADD_IO_SEG(0x0001180000001550ull)
+#define CVMX_MIO_FUS_PDF \
+	 CVMX_ADD_IO_SEG(0x0001180000001420ull)
+#define CVMX_MIO_FUS_PLL \
+	 CVMX_ADD_IO_SEG(0x0001180000001580ull)
+#define CVMX_MIO_FUS_PROG \
+	 CVMX_ADD_IO_SEG(0x0001180000001510ull)
+#define CVMX_MIO_FUS_PROG_TIMES \
+	 CVMX_ADD_IO_SEG(0x0001180000001518ull)
+#define CVMX_MIO_FUS_RCMD \
+	 CVMX_ADD_IO_SEG(0x0001180000001500ull)
+#define CVMX_MIO_FUS_SPR_REPAIR_RES \
+	 CVMX_ADD_IO_SEG(0x0001180000001548ull)
+#define CVMX_MIO_FUS_SPR_REPAIR_SUM \
+	 CVMX_ADD_IO_SEG(0x0001180000001540ull)
+#define CVMX_MIO_FUS_UNLOCK \
+	 CVMX_ADD_IO_SEG(0x0001180000001578ull)
+#define CVMX_MIO_FUS_WADR \
+	 CVMX_ADD_IO_SEG(0x0001180000001508ull)
+#define CVMX_MIO_NDF_DMA_CFG \
+	 CVMX_ADD_IO_SEG(0x0001180000000168ull)
+#define CVMX_MIO_NDF_DMA_INT \
+	 CVMX_ADD_IO_SEG(0x0001180000000170ull)
+#define CVMX_MIO_NDF_DMA_INT_EN \
+	 CVMX_ADD_IO_SEG(0x0001180000000178ull)
+#define CVMX_MIO_PLL_CTL \
+	 CVMX_ADD_IO_SEG(0x0001180000001448ull)
+#define CVMX_MIO_PLL_SETTING \
+	 CVMX_ADD_IO_SEG(0x0001180000001440ull)
+#define CVMX_MIO_TWSX_INT(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000001010ull + (((offset) & 1) * 512))
+#define CVMX_MIO_TWSX_SW_TWSI(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000001000ull + (((offset) & 1) * 512))
+#define CVMX_MIO_TWSX_SW_TWSI_EXT(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000001018ull + (((offset) & 1) * 512))
+#define CVMX_MIO_TWSX_TWSI_SW(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000001008ull + (((offset) & 1) * 512))
+#define CVMX_MIO_UART2_DLH \
+	 CVMX_ADD_IO_SEG(0x0001180000000488ull)
+#define CVMX_MIO_UART2_DLL \
+	 CVMX_ADD_IO_SEG(0x0001180000000480ull)
+#define CVMX_MIO_UART2_FAR \
+	 CVMX_ADD_IO_SEG(0x0001180000000520ull)
+#define CVMX_MIO_UART2_FCR \
+	 CVMX_ADD_IO_SEG(0x0001180000000450ull)
+#define CVMX_MIO_UART2_HTX \
+	 CVMX_ADD_IO_SEG(0x0001180000000708ull)
+#define CVMX_MIO_UART2_IER \
+	 CVMX_ADD_IO_SEG(0x0001180000000408ull)
+#define CVMX_MIO_UART2_IIR \
+	 CVMX_ADD_IO_SEG(0x0001180000000410ull)
+#define CVMX_MIO_UART2_LCR \
+	 CVMX_ADD_IO_SEG(0x0001180000000418ull)
+#define CVMX_MIO_UART2_LSR \
+	 CVMX_ADD_IO_SEG(0x0001180000000428ull)
+#define CVMX_MIO_UART2_MCR \
+	 CVMX_ADD_IO_SEG(0x0001180000000420ull)
+#define CVMX_MIO_UART2_MSR \
+	 CVMX_ADD_IO_SEG(0x0001180000000430ull)
+#define CVMX_MIO_UART2_RBR \
+	 CVMX_ADD_IO_SEG(0x0001180000000400ull)
+#define CVMX_MIO_UART2_RFL \
+	 CVMX_ADD_IO_SEG(0x0001180000000608ull)
+#define CVMX_MIO_UART2_RFW \
+	 CVMX_ADD_IO_SEG(0x0001180000000530ull)
+#define CVMX_MIO_UART2_SBCR \
+	 CVMX_ADD_IO_SEG(0x0001180000000620ull)
+#define CVMX_MIO_UART2_SCR \
+	 CVMX_ADD_IO_SEG(0x0001180000000438ull)
+#define CVMX_MIO_UART2_SFE \
+	 CVMX_ADD_IO_SEG(0x0001180000000630ull)
+#define CVMX_MIO_UART2_SRR \
+	 CVMX_ADD_IO_SEG(0x0001180000000610ull)
+#define CVMX_MIO_UART2_SRT \
+	 CVMX_ADD_IO_SEG(0x0001180000000638ull)
+#define CVMX_MIO_UART2_SRTS \
+	 CVMX_ADD_IO_SEG(0x0001180000000618ull)
+#define CVMX_MIO_UART2_STT \
+	 CVMX_ADD_IO_SEG(0x0001180000000700ull)
+#define CVMX_MIO_UART2_TFL \
+	 CVMX_ADD_IO_SEG(0x0001180000000600ull)
+#define CVMX_MIO_UART2_TFR \
+	 CVMX_ADD_IO_SEG(0x0001180000000528ull)
+#define CVMX_MIO_UART2_THR \
+	 CVMX_ADD_IO_SEG(0x0001180000000440ull)
+#define CVMX_MIO_UART2_USR \
+	 CVMX_ADD_IO_SEG(0x0001180000000538ull)
+#define CVMX_MIO_UARTX_DLH(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000888ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_DLL(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000880ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_FAR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000920ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_FCR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000850ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_HTX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000B08ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_IER(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000808ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_IIR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000810ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_LCR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000818ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_LSR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000828ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_MCR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000820ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_MSR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000830ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_RBR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000800ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_RFL(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000A08ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_RFW(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000930ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SBCR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000A20ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SCR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000838ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SFE(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000A30ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SRR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000A10ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SRT(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000A38ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SRTS(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000A18ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_STT(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000B00ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_TFL(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000A00ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_TFR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000928ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_THR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000840ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_USR(offset) \
+	 CVMX_ADD_IO_SEG(0x0001180000000938ull + (((offset) & 1) * 1024))
+
+union cvmx_mio_boot_bist_stat {
+	uint64_t u64;
+	struct cvmx_mio_boot_bist_stat_s {
+		uint64_t reserved_2_63:62;
+		uint64_t loc:1;
+		uint64_t ncbi:1;
+	} s;
+	struct cvmx_mio_boot_bist_stat_cn30xx {
+		uint64_t reserved_4_63:60;
+		uint64_t ncbo_1:1;
+		uint64_t ncbo_0:1;
+		uint64_t loc:1;
+		uint64_t ncbi:1;
+	} cn30xx;
+	struct cvmx_mio_boot_bist_stat_cn30xx cn31xx;
+	struct cvmx_mio_boot_bist_stat_cn38xx {
+		uint64_t reserved_3_63:61;
+		uint64_t ncbo_0:1;
+		uint64_t loc:1;
+		uint64_t ncbi:1;
+	} cn38xx;
+	struct cvmx_mio_boot_bist_stat_cn38xx cn38xxp2;
+	struct cvmx_mio_boot_bist_stat_cn50xx {
+		uint64_t reserved_6_63:58;
+		uint64_t pcm_1:1;
+		uint64_t pcm_0:1;
+		uint64_t ncbo_1:1;
+		uint64_t ncbo_0:1;
+		uint64_t loc:1;
+		uint64_t ncbi:1;
+	} cn50xx;
+	struct cvmx_mio_boot_bist_stat_cn52xx {
+		uint64_t reserved_6_63:58;
+		uint64_t ndf:2;
+		uint64_t ncbo_0:1;
+		uint64_t dma:1;
+		uint64_t loc:1;
+		uint64_t ncbi:1;
+	} cn52xx;
+	struct cvmx_mio_boot_bist_stat_cn52xxp1 {
+		uint64_t reserved_4_63:60;
+		uint64_t ncbo_0:1;
+		uint64_t dma:1;
+		uint64_t loc:1;
+		uint64_t ncbi:1;
+	} cn52xxp1;
+	struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xx;
+	struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xxp1;
+	struct cvmx_mio_boot_bist_stat_cn38xx cn58xx;
+	struct cvmx_mio_boot_bist_stat_cn38xx cn58xxp1;
+};
+
+union cvmx_mio_boot_comp {
+	uint64_t u64;
+	struct cvmx_mio_boot_comp_s {
+		uint64_t reserved_10_63:54;
+		uint64_t pctl:5;
+		uint64_t nctl:5;
+	} s;
+	struct cvmx_mio_boot_comp_s cn50xx;
+	struct cvmx_mio_boot_comp_s cn52xx;
+	struct cvmx_mio_boot_comp_s cn52xxp1;
+	struct cvmx_mio_boot_comp_s cn56xx;
+	struct cvmx_mio_boot_comp_s cn56xxp1;
+};
+
+union cvmx_mio_boot_dma_cfgx {
+	uint64_t u64;
+	struct cvmx_mio_boot_dma_cfgx_s {
+		uint64_t en:1;
+		uint64_t rw:1;
+		uint64_t clr:1;
+		uint64_t reserved_60_60:1;
+		uint64_t swap32:1;
+		uint64_t swap16:1;
+		uint64_t swap8:1;
+		uint64_t endian:1;
+		uint64_t size:20;
+		uint64_t adr:36;
+	} s;
+	struct cvmx_mio_boot_dma_cfgx_s cn52xx;
+	struct cvmx_mio_boot_dma_cfgx_s cn52xxp1;
+	struct cvmx_mio_boot_dma_cfgx_s cn56xx;
+	struct cvmx_mio_boot_dma_cfgx_s cn56xxp1;
+};
+
+union cvmx_mio_boot_dma_intx {
+	uint64_t u64;
+	struct cvmx_mio_boot_dma_intx_s {
+		uint64_t reserved_2_63:62;
+		uint64_t dmarq:1;
+		uint64_t done:1;
+	} s;
+	struct cvmx_mio_boot_dma_intx_s cn52xx;
+	struct cvmx_mio_boot_dma_intx_s cn52xxp1;
+	struct cvmx_mio_boot_dma_intx_s cn56xx;
+	struct cvmx_mio_boot_dma_intx_s cn56xxp1;
+};
+
+union cvmx_mio_boot_dma_int_enx {
+	uint64_t u64;
+	struct cvmx_mio_boot_dma_int_enx_s {
+		uint64_t reserved_2_63:62;
+		uint64_t dmarq:1;
+		uint64_t done:1;
+	} s;
+	struct cvmx_mio_boot_dma_int_enx_s cn52xx;
+	struct cvmx_mio_boot_dma_int_enx_s cn52xxp1;
+	struct cvmx_mio_boot_dma_int_enx_s cn56xx;
+	struct cvmx_mio_boot_dma_int_enx_s cn56xxp1;
+};
+
+union cvmx_mio_boot_dma_timx {
+	uint64_t u64;
+	struct cvmx_mio_boot_dma_timx_s {
+		uint64_t dmack_pi:1;
+		uint64_t dmarq_pi:1;
+		uint64_t tim_mult:2;
+		uint64_t rd_dly:3;
+		uint64_t ddr:1;
+		uint64_t width:1;
+		uint64_t reserved_48_54:7;
+		uint64_t pause:6;
+		uint64_t dmack_h:6;
+		uint64_t we_n:6;
+		uint64_t we_a:6;
+		uint64_t oe_n:6;
+		uint64_t oe_a:6;
+		uint64_t dmack_s:6;
+		uint64_t dmarq:6;
+	} s;
+	struct cvmx_mio_boot_dma_timx_s cn52xx;
+	struct cvmx_mio_boot_dma_timx_s cn52xxp1;
+	struct cvmx_mio_boot_dma_timx_s cn56xx;
+	struct cvmx_mio_boot_dma_timx_s cn56xxp1;
+};
+
+union cvmx_mio_boot_err {
+	uint64_t u64;
+	struct cvmx_mio_boot_err_s {
+		uint64_t reserved_2_63:62;
+		uint64_t wait_err:1;
+		uint64_t adr_err:1;
+	} s;
+	struct cvmx_mio_boot_err_s cn30xx;
+	struct cvmx_mio_boot_err_s cn31xx;
+	struct cvmx_mio_boot_err_s cn38xx;
+	struct cvmx_mio_boot_err_s cn38xxp2;
+	struct cvmx_mio_boot_err_s cn50xx;
+	struct cvmx_mio_boot_err_s cn52xx;
+	struct cvmx_mio_boot_err_s cn52xxp1;
+	struct cvmx_mio_boot_err_s cn56xx;
+	struct cvmx_mio_boot_err_s cn56xxp1;
+	struct cvmx_mio_boot_err_s cn58xx;
+	struct cvmx_mio_boot_err_s cn58xxp1;
+};
+
+union cvmx_mio_boot_int {
+	uint64_t u64;
+	struct cvmx_mio_boot_int_s {
+		uint64_t reserved_2_63:62;
+		uint64_t wait_int:1;
+		uint64_t adr_int:1;
+	} s;
+	struct cvmx_mio_boot_int_s cn30xx;
+	struct cvmx_mio_boot_int_s cn31xx;
+	struct cvmx_mio_boot_int_s cn38xx;
+	struct cvmx_mio_boot_int_s cn38xxp2;
+	struct cvmx_mio_boot_int_s cn50xx;
+	struct cvmx_mio_boot_int_s cn52xx;
+	struct cvmx_mio_boot_int_s cn52xxp1;
+	struct cvmx_mio_boot_int_s cn56xx;
+	struct cvmx_mio_boot_int_s cn56xxp1;
+	struct cvmx_mio_boot_int_s cn58xx;
+	struct cvmx_mio_boot_int_s cn58xxp1;
+};
+
+union cvmx_mio_boot_loc_adr {
+	uint64_t u64;
+	struct cvmx_mio_boot_loc_adr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t adr:5;
+		uint64_t reserved_0_2:3;
+	} s;
+	struct cvmx_mio_boot_loc_adr_s cn30xx;
+	struct cvmx_mio_boot_loc_adr_s cn31xx;
+	struct cvmx_mio_boot_loc_adr_s cn38xx;
+	struct cvmx_mio_boot_loc_adr_s cn38xxp2;
+	struct cvmx_mio_boot_loc_adr_s cn50xx;
+	struct cvmx_mio_boot_loc_adr_s cn52xx;
+	struct cvmx_mio_boot_loc_adr_s cn52xxp1;
+	struct cvmx_mio_boot_loc_adr_s cn56xx;
+	struct cvmx_mio_boot_loc_adr_s cn56xxp1;
+	struct cvmx_mio_boot_loc_adr_s cn58xx;
+	struct cvmx_mio_boot_loc_adr_s cn58xxp1;
+};
+
+union cvmx_mio_boot_loc_cfgx {
+	uint64_t u64;
+	struct cvmx_mio_boot_loc_cfgx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t en:1;
+		uint64_t reserved_28_30:3;
+		uint64_t base:25;
+		uint64_t reserved_0_2:3;
+	} s;
+	struct cvmx_mio_boot_loc_cfgx_s cn30xx;
+	struct cvmx_mio_boot_loc_cfgx_s cn31xx;
+	struct cvmx_mio_boot_loc_cfgx_s cn38xx;
+	struct cvmx_mio_boot_loc_cfgx_s cn38xxp2;
+	struct cvmx_mio_boot_loc_cfgx_s cn50xx;
+	struct cvmx_mio_boot_loc_cfgx_s cn52xx;
+	struct cvmx_mio_boot_loc_cfgx_s cn52xxp1;
+	struct cvmx_mio_boot_loc_cfgx_s cn56xx;
+	struct cvmx_mio_boot_loc_cfgx_s cn56xxp1;
+	struct cvmx_mio_boot_loc_cfgx_s cn58xx;
+	struct cvmx_mio_boot_loc_cfgx_s cn58xxp1;
+};
+
+union cvmx_mio_boot_loc_dat {
+	uint64_t u64;
+	struct cvmx_mio_boot_loc_dat_s {
+		uint64_t data:64;
+	} s;
+	struct cvmx_mio_boot_loc_dat_s cn30xx;
+	struct cvmx_mio_boot_loc_dat_s cn31xx;
+	struct cvmx_mio_boot_loc_dat_s cn38xx;
+	struct cvmx_mio_boot_loc_dat_s cn38xxp2;
+	struct cvmx_mio_boot_loc_dat_s cn50xx;
+	struct cvmx_mio_boot_loc_dat_s cn52xx;
+	struct cvmx_mio_boot_loc_dat_s cn52xxp1;
+	struct cvmx_mio_boot_loc_dat_s cn56xx;
+	struct cvmx_mio_boot_loc_dat_s cn56xxp1;
+	struct cvmx_mio_boot_loc_dat_s cn58xx;
+	struct cvmx_mio_boot_loc_dat_s cn58xxp1;
+};
+
+union cvmx_mio_boot_pin_defs {
+	uint64_t u64;
+	struct cvmx_mio_boot_pin_defs_s {
+		uint64_t reserved_16_63:48;
+		uint64_t ale:1;
+		uint64_t width:1;
+		uint64_t dmack_p2:1;
+		uint64_t dmack_p1:1;
+		uint64_t dmack_p0:1;
+		uint64_t term:2;
+		uint64_t nand:1;
+		uint64_t reserved_0_7:8;
+	} s;
+	struct cvmx_mio_boot_pin_defs_cn52xx {
+		uint64_t reserved_16_63:48;
+		uint64_t ale:1;
+		uint64_t width:1;
+		uint64_t reserved_13_13:1;
+		uint64_t dmack_p1:1;
+		uint64_t dmack_p0:1;
+		uint64_t term:2;
+		uint64_t nand:1;
+		uint64_t reserved_0_7:8;
+	} cn52xx;
+	struct cvmx_mio_boot_pin_defs_cn56xx {
+		uint64_t reserved_16_63:48;
+		uint64_t ale:1;
+		uint64_t width:1;
+		uint64_t dmack_p2:1;
+		uint64_t dmack_p1:1;
+		uint64_t dmack_p0:1;
+		uint64_t term:2;
+		uint64_t reserved_0_8:9;
+	} cn56xx;
+};
+
+union cvmx_mio_boot_reg_cfgx {
+	uint64_t u64;
+	struct cvmx_mio_boot_reg_cfgx_s {
+		uint64_t reserved_44_63:20;
+		uint64_t dmack:2;
+		uint64_t tim_mult:2;
+		uint64_t rd_dly:3;
+		uint64_t sam:1;
+		uint64_t we_ext:2;
+		uint64_t oe_ext:2;
+		uint64_t en:1;
+		uint64_t orbit:1;
+		uint64_t ale:1;
+		uint64_t width:1;
+		uint64_t size:12;
+		uint64_t base:16;
+	} s;
+	struct cvmx_mio_boot_reg_cfgx_cn30xx {
+		uint64_t reserved_37_63:27;
+		uint64_t sam:1;
+		uint64_t we_ext:2;
+		uint64_t oe_ext:2;
+		uint64_t en:1;
+		uint64_t orbit:1;
+		uint64_t ale:1;
+		uint64_t width:1;
+		uint64_t size:12;
+		uint64_t base:16;
+	} cn30xx;
+	struct cvmx_mio_boot_reg_cfgx_cn30xx cn31xx;
+	struct cvmx_mio_boot_reg_cfgx_cn38xx {
+		uint64_t reserved_32_63:32;
+		uint64_t en:1;
+		uint64_t orbit:1;
+		uint64_t reserved_28_29:2;
+		uint64_t size:12;
+		uint64_t base:16;
+	} cn38xx;
+	struct cvmx_mio_boot_reg_cfgx_cn38xx cn38xxp2;
+	struct cvmx_mio_boot_reg_cfgx_cn50xx {
+		uint64_t reserved_42_63:22;
+		uint64_t tim_mult:2;
+		uint64_t rd_dly:3;
+		uint64_t sam:1;
+		uint64_t we_ext:2;
+		uint64_t oe_ext:2;
+		uint64_t en:1;
+		uint64_t orbit:1;
+		uint64_t ale:1;
+		uint64_t width:1;
+		uint64_t size:12;
+		uint64_t base:16;
+	} cn50xx;
+	struct cvmx_mio_boot_reg_cfgx_s cn52xx;
+	struct cvmx_mio_boot_reg_cfgx_s cn52xxp1;
+	struct cvmx_mio_boot_reg_cfgx_s cn56xx;
+	struct cvmx_mio_boot_reg_cfgx_s cn56xxp1;
+	struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xx;
+	struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xxp1;
+};
+
+union cvmx_mio_boot_reg_timx {
+	uint64_t u64;
+	struct cvmx_mio_boot_reg_timx_s {
+		uint64_t pagem:1;
+		uint64_t waitm:1;
+		uint64_t pages:2;
+		uint64_t ale:6;
+		uint64_t page:6;
+		uint64_t wait:6;
+		uint64_t pause:6;
+		uint64_t wr_hld:6;
+		uint64_t rd_hld:6;
+		uint64_t we:6;
+		uint64_t oe:6;
+		uint64_t ce:6;
+		uint64_t adr:6;
+	} s;
+	struct cvmx_mio_boot_reg_timx_s cn30xx;
+	struct cvmx_mio_boot_reg_timx_s cn31xx;
+	struct cvmx_mio_boot_reg_timx_cn38xx {
+		uint64_t pagem:1;
+		uint64_t waitm:1;
+		uint64_t pages:2;
+		uint64_t reserved_54_59:6;
+		uint64_t page:6;
+		uint64_t wait:6;
+		uint64_t pause:6;
+		uint64_t wr_hld:6;
+		uint64_t rd_hld:6;
+		uint64_t we:6;
+		uint64_t oe:6;
+		uint64_t ce:6;
+		uint64_t adr:6;
+	} cn38xx;
+	struct cvmx_mio_boot_reg_timx_cn38xx cn38xxp2;
+	struct cvmx_mio_boot_reg_timx_s cn50xx;
+	struct cvmx_mio_boot_reg_timx_s cn52xx;
+	struct cvmx_mio_boot_reg_timx_s cn52xxp1;
+	struct cvmx_mio_boot_reg_timx_s cn56xx;
+	struct cvmx_mio_boot_reg_timx_s cn56xxp1;
+	struct cvmx_mio_boot_reg_timx_s cn58xx;
+	struct cvmx_mio_boot_reg_timx_s cn58xxp1;
+};
+
+union cvmx_mio_boot_thr {
+	uint64_t u64;
+	struct cvmx_mio_boot_thr_s {
+		uint64_t reserved_22_63:42;
+		uint64_t dma_thr:6;
+		uint64_t reserved_14_15:2;
+		uint64_t fif_cnt:6;
+		uint64_t reserved_6_7:2;
+		uint64_t fif_thr:6;
+	} s;
+	struct cvmx_mio_boot_thr_cn30xx {
+		uint64_t reserved_14_63:50;
+		uint64_t fif_cnt:6;
+		uint64_t reserved_6_7:2;
+		uint64_t fif_thr:6;
+	} cn30xx;
+	struct cvmx_mio_boot_thr_cn30xx cn31xx;
+	struct cvmx_mio_boot_thr_cn30xx cn38xx;
+	struct cvmx_mio_boot_thr_cn30xx cn38xxp2;
+	struct cvmx_mio_boot_thr_cn30xx cn50xx;
+	struct cvmx_mio_boot_thr_s cn52xx;
+	struct cvmx_mio_boot_thr_s cn52xxp1;
+	struct cvmx_mio_boot_thr_s cn56xx;
+	struct cvmx_mio_boot_thr_s cn56xxp1;
+	struct cvmx_mio_boot_thr_cn30xx cn58xx;
+	struct cvmx_mio_boot_thr_cn30xx cn58xxp1;
+};
+
+union cvmx_mio_fus_bnk_datx {
+	uint64_t u64;
+	struct cvmx_mio_fus_bnk_datx_s {
+		uint64_t dat:64;
+	} s;
+	struct cvmx_mio_fus_bnk_datx_s cn50xx;
+	struct cvmx_mio_fus_bnk_datx_s cn52xx;
+	struct cvmx_mio_fus_bnk_datx_s cn52xxp1;
+	struct cvmx_mio_fus_bnk_datx_s cn56xx;
+	struct cvmx_mio_fus_bnk_datx_s cn56xxp1;
+	struct cvmx_mio_fus_bnk_datx_s cn58xx;
+	struct cvmx_mio_fus_bnk_datx_s cn58xxp1;
+};
+
+union cvmx_mio_fus_dat0 {
+	uint64_t u64;
+	struct cvmx_mio_fus_dat0_s {
+		uint64_t reserved_32_63:32;
+		uint64_t man_info:32;
+	} s;
+	struct cvmx_mio_fus_dat0_s cn30xx;
+	struct cvmx_mio_fus_dat0_s cn31xx;
+	struct cvmx_mio_fus_dat0_s cn38xx;
+	struct cvmx_mio_fus_dat0_s cn38xxp2;
+	struct cvmx_mio_fus_dat0_s cn50xx;
+	struct cvmx_mio_fus_dat0_s cn52xx;
+	struct cvmx_mio_fus_dat0_s cn52xxp1;
+	struct cvmx_mio_fus_dat0_s cn56xx;
+	struct cvmx_mio_fus_dat0_s cn56xxp1;
+	struct cvmx_mio_fus_dat0_s cn58xx;
+	struct cvmx_mio_fus_dat0_s cn58xxp1;
+};
+
+union cvmx_mio_fus_dat1 {
+	uint64_t u64;
+	struct cvmx_mio_fus_dat1_s {
+		uint64_t reserved_32_63:32;
+		uint64_t man_info:32;
+	} s;
+	struct cvmx_mio_fus_dat1_s cn30xx;
+	struct cvmx_mio_fus_dat1_s cn31xx;
+	struct cvmx_mio_fus_dat1_s cn38xx;
+	struct cvmx_mio_fus_dat1_s cn38xxp2;
+	struct cvmx_mio_fus_dat1_s cn50xx;
+	struct cvmx_mio_fus_dat1_s cn52xx;
+	struct cvmx_mio_fus_dat1_s cn52xxp1;
+	struct cvmx_mio_fus_dat1_s cn56xx;
+	struct cvmx_mio_fus_dat1_s cn56xxp1;
+	struct cvmx_mio_fus_dat1_s cn58xx;
+	struct cvmx_mio_fus_dat1_s cn58xxp1;
+};
+
+union cvmx_mio_fus_dat2 {
+	uint64_t u64;
+	struct cvmx_mio_fus_dat2_s {
+		uint64_t reserved_34_63:30;
+		uint64_t fus318:1;
+		uint64_t raid_en:1;
+		uint64_t reserved_30_31:2;
+		uint64_t nokasu:1;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t rst_sht:1;
+		uint64_t bist_dis:1;
+		uint64_t chip_id:8;
+		uint64_t reserved_0_15:16;
+	} s;
+	struct cvmx_mio_fus_dat2_cn30xx {
+		uint64_t reserved_29_63:35;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t rst_sht:1;
+		uint64_t bist_dis:1;
+		uint64_t chip_id:8;
+		uint64_t pll_off:4;
+		uint64_t reserved_1_11:11;
+		uint64_t pp_dis:1;
+	} cn30xx;
+	struct cvmx_mio_fus_dat2_cn31xx {
+		uint64_t reserved_29_63:35;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t rst_sht:1;
+		uint64_t bist_dis:1;
+		uint64_t chip_id:8;
+		uint64_t pll_off:4;
+		uint64_t reserved_2_11:10;
+		uint64_t pp_dis:2;
+	} cn31xx;
+	struct cvmx_mio_fus_dat2_cn38xx {
+		uint64_t reserved_29_63:35;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t rst_sht:1;
+		uint64_t bist_dis:1;
+		uint64_t chip_id:8;
+		uint64_t pp_dis:16;
+	} cn38xx;
+	struct cvmx_mio_fus_dat2_cn38xx cn38xxp2;
+	struct cvmx_mio_fus_dat2_cn50xx {
+		uint64_t reserved_34_63:30;
+		uint64_t fus318:1;
+		uint64_t raid_en:1;
+		uint64_t reserved_30_31:2;
+		uint64_t nokasu:1;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t rst_sht:1;
+		uint64_t bist_dis:1;
+		uint64_t chip_id:8;
+		uint64_t reserved_2_15:14;
+		uint64_t pp_dis:2;
+	} cn50xx;
+	struct cvmx_mio_fus_dat2_cn52xx {
+		uint64_t reserved_34_63:30;
+		uint64_t fus318:1;
+		uint64_t raid_en:1;
+		uint64_t reserved_30_31:2;
+		uint64_t nokasu:1;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t rst_sht:1;
+		uint64_t bist_dis:1;
+		uint64_t chip_id:8;
+		uint64_t reserved_4_15:12;
+		uint64_t pp_dis:4;
+	} cn52xx;
+	struct cvmx_mio_fus_dat2_cn52xx cn52xxp1;
+	struct cvmx_mio_fus_dat2_cn56xx {
+		uint64_t reserved_34_63:30;
+		uint64_t fus318:1;
+		uint64_t raid_en:1;
+		uint64_t reserved_30_31:2;
+		uint64_t nokasu:1;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t rst_sht:1;
+		uint64_t bist_dis:1;
+		uint64_t chip_id:8;
+		uint64_t reserved_12_15:4;
+		uint64_t pp_dis:12;
+	} cn56xx;
+	struct cvmx_mio_fus_dat2_cn56xx cn56xxp1;
+	struct cvmx_mio_fus_dat2_cn58xx {
+		uint64_t reserved_30_63:34;
+		uint64_t nokasu:1;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t rst_sht:1;
+		uint64_t bist_dis:1;
+		uint64_t chip_id:8;
+		uint64_t pp_dis:16;
+	} cn58xx;
+	struct cvmx_mio_fus_dat2_cn58xx cn58xxp1;
+};
+
+union cvmx_mio_fus_dat3 {
+	uint64_t u64;
+	struct cvmx_mio_fus_dat3_s {
+		uint64_t reserved_32_63:32;
+		uint64_t pll_div4:1;
+		uint64_t zip_crip:2;
+		uint64_t bar2_en:1;
+		uint64_t efus_lck:1;
+		uint64_t efus_ign:1;
+		uint64_t nozip:1;
+		uint64_t nodfa_dte:1;
+		uint64_t icache:24;
+	} s;
+	struct cvmx_mio_fus_dat3_cn30xx {
+		uint64_t reserved_32_63:32;
+		uint64_t pll_div4:1;
+		uint64_t reserved_29_30:2;
+		uint64_t bar2_en:1;
+		uint64_t efus_lck:1;
+		uint64_t efus_ign:1;
+		uint64_t nozip:1;
+		uint64_t nodfa_dte:1;
+		uint64_t icache:24;
+	} cn30xx;
+	struct cvmx_mio_fus_dat3_s cn31xx;
+	struct cvmx_mio_fus_dat3_cn38xx {
+		uint64_t reserved_31_63:33;
+		uint64_t zip_crip:2;
+		uint64_t bar2_en:1;
+		uint64_t efus_lck:1;
+		uint64_t efus_ign:1;
+		uint64_t nozip:1;
+		uint64_t nodfa_dte:1;
+		uint64_t icache:24;
+	} cn38xx;
+	struct cvmx_mio_fus_dat3_cn38xxp2 {
+		uint64_t reserved_29_63:35;
+		uint64_t bar2_en:1;
+		uint64_t efus_lck:1;
+		uint64_t efus_ign:1;
+		uint64_t nozip:1;
+		uint64_t nodfa_dte:1;
+		uint64_t icache:24;
+	} cn38xxp2;
+	struct cvmx_mio_fus_dat3_cn38xx cn50xx;
+	struct cvmx_mio_fus_dat3_cn38xx cn52xx;
+	struct cvmx_mio_fus_dat3_cn38xx cn52xxp1;
+	struct cvmx_mio_fus_dat3_cn38xx cn56xx;
+	struct cvmx_mio_fus_dat3_cn38xx cn56xxp1;
+	struct cvmx_mio_fus_dat3_cn38xx cn58xx;
+	struct cvmx_mio_fus_dat3_cn38xx cn58xxp1;
+};
+
+union cvmx_mio_fus_ema {
+	uint64_t u64;
+	struct cvmx_mio_fus_ema_s {
+		uint64_t reserved_7_63:57;
+		uint64_t eff_ema:3;
+		uint64_t reserved_3_3:1;
+		uint64_t ema:3;
+	} s;
+	struct cvmx_mio_fus_ema_s cn50xx;
+	struct cvmx_mio_fus_ema_s cn52xx;
+	struct cvmx_mio_fus_ema_s cn52xxp1;
+	struct cvmx_mio_fus_ema_s cn56xx;
+	struct cvmx_mio_fus_ema_s cn56xxp1;
+	struct cvmx_mio_fus_ema_cn58xx {
+		uint64_t reserved_2_63:62;
+		uint64_t ema:2;
+	} cn58xx;
+	struct cvmx_mio_fus_ema_cn58xx cn58xxp1;
+};
+
+union cvmx_mio_fus_pdf {
+	uint64_t u64;
+	struct cvmx_mio_fus_pdf_s {
+		uint64_t pdf:64;
+	} s;
+	struct cvmx_mio_fus_pdf_s cn50xx;
+	struct cvmx_mio_fus_pdf_s cn52xx;
+	struct cvmx_mio_fus_pdf_s cn52xxp1;
+	struct cvmx_mio_fus_pdf_s cn56xx;
+	struct cvmx_mio_fus_pdf_s cn56xxp1;
+	struct cvmx_mio_fus_pdf_s cn58xx;
+};
+
+union cvmx_mio_fus_pll {
+	uint64_t u64;
+	struct cvmx_mio_fus_pll_s {
+		uint64_t reserved_2_63:62;
+		uint64_t rfslip:1;
+		uint64_t fbslip:1;
+	} s;
+	struct cvmx_mio_fus_pll_s cn50xx;
+	struct cvmx_mio_fus_pll_s cn52xx;
+	struct cvmx_mio_fus_pll_s cn52xxp1;
+	struct cvmx_mio_fus_pll_s cn56xx;
+	struct cvmx_mio_fus_pll_s cn56xxp1;
+	struct cvmx_mio_fus_pll_s cn58xx;
+	struct cvmx_mio_fus_pll_s cn58xxp1;
+};
+
+union cvmx_mio_fus_prog {
+	uint64_t u64;
+	struct cvmx_mio_fus_prog_s {
+		uint64_t reserved_1_63:63;
+		uint64_t prog:1;
+	} s;
+	struct cvmx_mio_fus_prog_s cn30xx;
+	struct cvmx_mio_fus_prog_s cn31xx;
+	struct cvmx_mio_fus_prog_s cn38xx;
+	struct cvmx_mio_fus_prog_s cn38xxp2;
+	struct cvmx_mio_fus_prog_s cn50xx;
+	struct cvmx_mio_fus_prog_s cn52xx;
+	struct cvmx_mio_fus_prog_s cn52xxp1;
+	struct cvmx_mio_fus_prog_s cn56xx;
+	struct cvmx_mio_fus_prog_s cn56xxp1;
+	struct cvmx_mio_fus_prog_s cn58xx;
+	struct cvmx_mio_fus_prog_s cn58xxp1;
+};
+
+union cvmx_mio_fus_prog_times {
+	uint64_t u64;
+	struct cvmx_mio_fus_prog_times_s {
+		uint64_t reserved_33_63:31;
+		uint64_t prog_pin:1;
+		uint64_t out:8;
+		uint64_t sclk_lo:4;
+		uint64_t sclk_hi:12;
+		uint64_t setup:8;
+	} s;
+	struct cvmx_mio_fus_prog_times_s cn50xx;
+	struct cvmx_mio_fus_prog_times_s cn52xx;
+	struct cvmx_mio_fus_prog_times_s cn52xxp1;
+	struct cvmx_mio_fus_prog_times_s cn56xx;
+	struct cvmx_mio_fus_prog_times_s cn56xxp1;
+	struct cvmx_mio_fus_prog_times_s cn58xx;
+	struct cvmx_mio_fus_prog_times_s cn58xxp1;
+};
+
+union cvmx_mio_fus_rcmd {
+	uint64_t u64;
+	struct cvmx_mio_fus_rcmd_s {
+		uint64_t reserved_24_63:40;
+		uint64_t dat:8;
+		uint64_t reserved_13_15:3;
+		uint64_t pend:1;
+		uint64_t reserved_9_11:3;
+		uint64_t efuse:1;
+		uint64_t addr:8;
+	} s;
+	struct cvmx_mio_fus_rcmd_cn30xx {
+		uint64_t reserved_24_63:40;
+		uint64_t dat:8;
+		uint64_t reserved_13_15:3;
+		uint64_t pend:1;
+		uint64_t reserved_9_11:3;
+		uint64_t efuse:1;
+		uint64_t reserved_7_7:1;
+		uint64_t addr:7;
+	} cn30xx;
+	struct cvmx_mio_fus_rcmd_cn30xx cn31xx;
+	struct cvmx_mio_fus_rcmd_cn30xx cn38xx;
+	struct cvmx_mio_fus_rcmd_cn30xx cn38xxp2;
+	struct cvmx_mio_fus_rcmd_cn30xx cn50xx;
+	struct cvmx_mio_fus_rcmd_s cn52xx;
+	struct cvmx_mio_fus_rcmd_s cn52xxp1;
+	struct cvmx_mio_fus_rcmd_s cn56xx;
+	struct cvmx_mio_fus_rcmd_s cn56xxp1;
+	struct cvmx_mio_fus_rcmd_cn30xx cn58xx;
+	struct cvmx_mio_fus_rcmd_cn30xx cn58xxp1;
+};
+
+union cvmx_mio_fus_spr_repair_res {
+	uint64_t u64;
+	struct cvmx_mio_fus_spr_repair_res_s {
+		uint64_t reserved_42_63:22;
+		uint64_t repair2:14;
+		uint64_t repair1:14;
+		uint64_t repair0:14;
+	} s;
+	struct cvmx_mio_fus_spr_repair_res_s cn30xx;
+	struct cvmx_mio_fus_spr_repair_res_s cn31xx;
+	struct cvmx_mio_fus_spr_repair_res_s cn38xx;
+	struct cvmx_mio_fus_spr_repair_res_s cn50xx;
+	struct cvmx_mio_fus_spr_repair_res_s cn52xx;
+	struct cvmx_mio_fus_spr_repair_res_s cn52xxp1;
+	struct cvmx_mio_fus_spr_repair_res_s cn56xx;
+	struct cvmx_mio_fus_spr_repair_res_s cn56xxp1;
+	struct cvmx_mio_fus_spr_repair_res_s cn58xx;
+	struct cvmx_mio_fus_spr_repair_res_s cn58xxp1;
+};
+
+union cvmx_mio_fus_spr_repair_sum {
+	uint64_t u64;
+	struct cvmx_mio_fus_spr_repair_sum_s {
+		uint64_t reserved_1_63:63;
+		uint64_t too_many:1;
+	} s;
+	struct cvmx_mio_fus_spr_repair_sum_s cn30xx;
+	struct cvmx_mio_fus_spr_repair_sum_s cn31xx;
+	struct cvmx_mio_fus_spr_repair_sum_s cn38xx;
+	struct cvmx_mio_fus_spr_repair_sum_s cn50xx;
+	struct cvmx_mio_fus_spr_repair_sum_s cn52xx;
+	struct cvmx_mio_fus_spr_repair_sum_s cn52xxp1;
+	struct cvmx_mio_fus_spr_repair_sum_s cn56xx;
+	struct cvmx_mio_fus_spr_repair_sum_s cn56xxp1;
+	struct cvmx_mio_fus_spr_repair_sum_s cn58xx;
+	struct cvmx_mio_fus_spr_repair_sum_s cn58xxp1;
+};
+
+union cvmx_mio_fus_unlock {
+	uint64_t u64;
+	struct cvmx_mio_fus_unlock_s {
+		uint64_t reserved_24_63:40;
+		uint64_t key:24;
+	} s;
+	struct cvmx_mio_fus_unlock_s cn30xx;
+	struct cvmx_mio_fus_unlock_s cn31xx;
+};
+
+union cvmx_mio_fus_wadr {
+	uint64_t u64;
+	struct cvmx_mio_fus_wadr_s {
+		uint64_t reserved_10_63:54;
+		uint64_t addr:10;
+	} s;
+	struct cvmx_mio_fus_wadr_s cn30xx;
+	struct cvmx_mio_fus_wadr_s cn31xx;
+	struct cvmx_mio_fus_wadr_s cn38xx;
+	struct cvmx_mio_fus_wadr_s cn38xxp2;
+	struct cvmx_mio_fus_wadr_cn50xx {
+		uint64_t reserved_2_63:62;
+		uint64_t addr:2;
+	} cn50xx;
+	struct cvmx_mio_fus_wadr_cn52xx {
+		uint64_t reserved_3_63:61;
+		uint64_t addr:3;
+	} cn52xx;
+	struct cvmx_mio_fus_wadr_cn52xx cn52xxp1;
+	struct cvmx_mio_fus_wadr_cn52xx cn56xx;
+	struct cvmx_mio_fus_wadr_cn52xx cn56xxp1;
+	struct cvmx_mio_fus_wadr_cn50xx cn58xx;
+	struct cvmx_mio_fus_wadr_cn50xx cn58xxp1;
+};
+
+union cvmx_mio_ndf_dma_cfg {
+	uint64_t u64;
+	struct cvmx_mio_ndf_dma_cfg_s {
+		uint64_t en:1;
+		uint64_t rw:1;
+		uint64_t clr:1;
+		uint64_t reserved_60_60:1;
+		uint64_t swap32:1;
+		uint64_t swap16:1;
+		uint64_t swap8:1;
+		uint64_t endian:1;
+		uint64_t size:20;
+		uint64_t adr:36;
+	} s;
+	struct cvmx_mio_ndf_dma_cfg_s cn52xx;
+};
+
+union cvmx_mio_ndf_dma_int {
+	uint64_t u64;
+	struct cvmx_mio_ndf_dma_int_s {
+		uint64_t reserved_1_63:63;
+		uint64_t done:1;
+	} s;
+	struct cvmx_mio_ndf_dma_int_s cn52xx;
+};
+
+union cvmx_mio_ndf_dma_int_en {
+	uint64_t u64;
+	struct cvmx_mio_ndf_dma_int_en_s {
+		uint64_t reserved_1_63:63;
+		uint64_t done:1;
+	} s;
+	struct cvmx_mio_ndf_dma_int_en_s cn52xx;
+};
+
+union cvmx_mio_pll_ctl {
+	uint64_t u64;
+	struct cvmx_mio_pll_ctl_s {
+		uint64_t reserved_5_63:59;
+		uint64_t bw_ctl:5;
+	} s;
+	struct cvmx_mio_pll_ctl_s cn30xx;
+	struct cvmx_mio_pll_ctl_s cn31xx;
+};
+
+union cvmx_mio_pll_setting {
+	uint64_t u64;
+	struct cvmx_mio_pll_setting_s {
+		uint64_t reserved_17_63:47;
+		uint64_t setting:17;
+	} s;
+	struct cvmx_mio_pll_setting_s cn30xx;
+	struct cvmx_mio_pll_setting_s cn31xx;
+};
+
+union cvmx_mio_twsx_int {
+	uint64_t u64;
+	struct cvmx_mio_twsx_int_s {
+		uint64_t reserved_12_63:52;
+		uint64_t scl:1;
+		uint64_t sda:1;
+		uint64_t scl_ovr:1;
+		uint64_t sda_ovr:1;
+		uint64_t reserved_7_7:1;
+		uint64_t core_en:1;
+		uint64_t ts_en:1;
+		uint64_t st_en:1;
+		uint64_t reserved_3_3:1;
+		uint64_t core_int:1;
+		uint64_t ts_int:1;
+		uint64_t st_int:1;
+	} s;
+	struct cvmx_mio_twsx_int_s cn30xx;
+	struct cvmx_mio_twsx_int_s cn31xx;
+	struct cvmx_mio_twsx_int_s cn38xx;
+	struct cvmx_mio_twsx_int_cn38xxp2 {
+		uint64_t reserved_7_63:57;
+		uint64_t core_en:1;
+		uint64_t ts_en:1;
+		uint64_t st_en:1;
+		uint64_t reserved_3_3:1;
+		uint64_t core_int:1;
+		uint64_t ts_int:1;
+		uint64_t st_int:1;
+	} cn38xxp2;
+	struct cvmx_mio_twsx_int_s cn50xx;
+	struct cvmx_mio_twsx_int_s cn52xx;
+	struct cvmx_mio_twsx_int_s cn52xxp1;
+	struct cvmx_mio_twsx_int_s cn56xx;
+	struct cvmx_mio_twsx_int_s cn56xxp1;
+	struct cvmx_mio_twsx_int_s cn58xx;
+	struct cvmx_mio_twsx_int_s cn58xxp1;
+};
+
+union cvmx_mio_twsx_sw_twsi {
+	uint64_t u64;
+	struct cvmx_mio_twsx_sw_twsi_s {
+		uint64_t v:1;
+		uint64_t slonly:1;
+		uint64_t eia:1;
+		uint64_t op:4;
+		uint64_t r:1;
+		uint64_t sovr:1;
+		uint64_t size:3;
+		uint64_t scr:2;
+		uint64_t a:10;
+		uint64_t ia:5;
+		uint64_t eop_ia:3;
+		uint64_t d:32;
+	} s;
+	struct cvmx_mio_twsx_sw_twsi_s cn30xx;
+	struct cvmx_mio_twsx_sw_twsi_s cn31xx;
+	struct cvmx_mio_twsx_sw_twsi_s cn38xx;
+	struct cvmx_mio_twsx_sw_twsi_s cn38xxp2;
+	struct cvmx_mio_twsx_sw_twsi_s cn50xx;
+	struct cvmx_mio_twsx_sw_twsi_s cn52xx;
+	struct cvmx_mio_twsx_sw_twsi_s cn52xxp1;
+	struct cvmx_mio_twsx_sw_twsi_s cn56xx;
+	struct cvmx_mio_twsx_sw_twsi_s cn56xxp1;
+	struct cvmx_mio_twsx_sw_twsi_s cn58xx;
+	struct cvmx_mio_twsx_sw_twsi_s cn58xxp1;
+};
+
+union cvmx_mio_twsx_sw_twsi_ext {
+	uint64_t u64;
+	struct cvmx_mio_twsx_sw_twsi_ext_s {
+		uint64_t reserved_40_63:24;
+		uint64_t ia:8;
+		uint64_t d:32;
+	} s;
+	struct cvmx_mio_twsx_sw_twsi_ext_s cn30xx;
+	struct cvmx_mio_twsx_sw_twsi_ext_s cn31xx;
+	struct cvmx_mio_twsx_sw_twsi_ext_s cn38xx;
+	struct cvmx_mio_twsx_sw_twsi_ext_s cn38xxp2;
+	struct cvmx_mio_twsx_sw_twsi_ext_s cn50xx;
+	struct cvmx_mio_twsx_sw_twsi_ext_s cn52xx;
+	struct cvmx_mio_twsx_sw_twsi_ext_s cn52xxp1;
+	struct cvmx_mio_twsx_sw_twsi_ext_s cn56xx;
+	struct cvmx_mio_twsx_sw_twsi_ext_s cn56xxp1;
+	struct cvmx_mio_twsx_sw_twsi_ext_s cn58xx;
+	struct cvmx_mio_twsx_sw_twsi_ext_s cn58xxp1;
+};
+
+union cvmx_mio_twsx_twsi_sw {
+	uint64_t u64;
+	struct cvmx_mio_twsx_twsi_sw_s {
+		uint64_t v:2;
+		uint64_t reserved_32_61:30;
+		uint64_t d:32;
+	} s;
+	struct cvmx_mio_twsx_twsi_sw_s cn30xx;
+	struct cvmx_mio_twsx_twsi_sw_s cn31xx;
+	struct cvmx_mio_twsx_twsi_sw_s cn38xx;
+	struct cvmx_mio_twsx_twsi_sw_s cn38xxp2;
+	struct cvmx_mio_twsx_twsi_sw_s cn50xx;
+	struct cvmx_mio_twsx_twsi_sw_s cn52xx;
+	struct cvmx_mio_twsx_twsi_sw_s cn52xxp1;
+	struct cvmx_mio_twsx_twsi_sw_s cn56xx;
+	struct cvmx_mio_twsx_twsi_sw_s cn56xxp1;
+	struct cvmx_mio_twsx_twsi_sw_s cn58xx;
+	struct cvmx_mio_twsx_twsi_sw_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_dlh {
+	uint64_t u64;
+	struct cvmx_mio_uartx_dlh_s {
+		uint64_t reserved_8_63:56;
+		uint64_t dlh:8;
+	} s;
+	struct cvmx_mio_uartx_dlh_s cn30xx;
+	struct cvmx_mio_uartx_dlh_s cn31xx;
+	struct cvmx_mio_uartx_dlh_s cn38xx;
+	struct cvmx_mio_uartx_dlh_s cn38xxp2;
+	struct cvmx_mio_uartx_dlh_s cn50xx;
+	struct cvmx_mio_uartx_dlh_s cn52xx;
+	struct cvmx_mio_uartx_dlh_s cn52xxp1;
+	struct cvmx_mio_uartx_dlh_s cn56xx;
+	struct cvmx_mio_uartx_dlh_s cn56xxp1;
+	struct cvmx_mio_uartx_dlh_s cn58xx;
+	struct cvmx_mio_uartx_dlh_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_dll {
+	uint64_t u64;
+	struct cvmx_mio_uartx_dll_s {
+		uint64_t reserved_8_63:56;
+		uint64_t dll:8;
+	} s;
+	struct cvmx_mio_uartx_dll_s cn30xx;
+	struct cvmx_mio_uartx_dll_s cn31xx;
+	struct cvmx_mio_uartx_dll_s cn38xx;
+	struct cvmx_mio_uartx_dll_s cn38xxp2;
+	struct cvmx_mio_uartx_dll_s cn50xx;
+	struct cvmx_mio_uartx_dll_s cn52xx;
+	struct cvmx_mio_uartx_dll_s cn52xxp1;
+	struct cvmx_mio_uartx_dll_s cn56xx;
+	struct cvmx_mio_uartx_dll_s cn56xxp1;
+	struct cvmx_mio_uartx_dll_s cn58xx;
+	struct cvmx_mio_uartx_dll_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_far {
+	uint64_t u64;
+	struct cvmx_mio_uartx_far_s {
+		uint64_t reserved_1_63:63;
+		uint64_t far:1;
+	} s;
+	struct cvmx_mio_uartx_far_s cn30xx;
+	struct cvmx_mio_uartx_far_s cn31xx;
+	struct cvmx_mio_uartx_far_s cn38xx;
+	struct cvmx_mio_uartx_far_s cn38xxp2;
+	struct cvmx_mio_uartx_far_s cn50xx;
+	struct cvmx_mio_uartx_far_s cn52xx;
+	struct cvmx_mio_uartx_far_s cn52xxp1;
+	struct cvmx_mio_uartx_far_s cn56xx;
+	struct cvmx_mio_uartx_far_s cn56xxp1;
+	struct cvmx_mio_uartx_far_s cn58xx;
+	struct cvmx_mio_uartx_far_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_fcr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_fcr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t rxtrig:2;
+		uint64_t txtrig:2;
+		uint64_t reserved_3_3:1;
+		uint64_t txfr:1;
+		uint64_t rxfr:1;
+		uint64_t en:1;
+	} s;
+	struct cvmx_mio_uartx_fcr_s cn30xx;
+	struct cvmx_mio_uartx_fcr_s cn31xx;
+	struct cvmx_mio_uartx_fcr_s cn38xx;
+	struct cvmx_mio_uartx_fcr_s cn38xxp2;
+	struct cvmx_mio_uartx_fcr_s cn50xx;
+	struct cvmx_mio_uartx_fcr_s cn52xx;
+	struct cvmx_mio_uartx_fcr_s cn52xxp1;
+	struct cvmx_mio_uartx_fcr_s cn56xx;
+	struct cvmx_mio_uartx_fcr_s cn56xxp1;
+	struct cvmx_mio_uartx_fcr_s cn58xx;
+	struct cvmx_mio_uartx_fcr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_htx {
+	uint64_t u64;
+	struct cvmx_mio_uartx_htx_s {
+		uint64_t reserved_1_63:63;
+		uint64_t htx:1;
+	} s;
+	struct cvmx_mio_uartx_htx_s cn30xx;
+	struct cvmx_mio_uartx_htx_s cn31xx;
+	struct cvmx_mio_uartx_htx_s cn38xx;
+	struct cvmx_mio_uartx_htx_s cn38xxp2;
+	struct cvmx_mio_uartx_htx_s cn50xx;
+	struct cvmx_mio_uartx_htx_s cn52xx;
+	struct cvmx_mio_uartx_htx_s cn52xxp1;
+	struct cvmx_mio_uartx_htx_s cn56xx;
+	struct cvmx_mio_uartx_htx_s cn56xxp1;
+	struct cvmx_mio_uartx_htx_s cn58xx;
+	struct cvmx_mio_uartx_htx_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_ier {
+	uint64_t u64;
+	struct cvmx_mio_uartx_ier_s {
+		uint64_t reserved_8_63:56;
+		uint64_t ptime:1;
+		uint64_t reserved_4_6:3;
+		uint64_t edssi:1;
+		uint64_t elsi:1;
+		uint64_t etbei:1;
+		uint64_t erbfi:1;
+	} s;
+	struct cvmx_mio_uartx_ier_s cn30xx;
+	struct cvmx_mio_uartx_ier_s cn31xx;
+	struct cvmx_mio_uartx_ier_s cn38xx;
+	struct cvmx_mio_uartx_ier_s cn38xxp2;
+	struct cvmx_mio_uartx_ier_s cn50xx;
+	struct cvmx_mio_uartx_ier_s cn52xx;
+	struct cvmx_mio_uartx_ier_s cn52xxp1;
+	struct cvmx_mio_uartx_ier_s cn56xx;
+	struct cvmx_mio_uartx_ier_s cn56xxp1;
+	struct cvmx_mio_uartx_ier_s cn58xx;
+	struct cvmx_mio_uartx_ier_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_iir {
+	uint64_t u64;
+	struct cvmx_mio_uartx_iir_s {
+		uint64_t reserved_8_63:56;
+		uint64_t fen:2;
+		uint64_t reserved_4_5:2;
+		uint64_t iid:4;
+	} s;
+	struct cvmx_mio_uartx_iir_s cn30xx;
+	struct cvmx_mio_uartx_iir_s cn31xx;
+	struct cvmx_mio_uartx_iir_s cn38xx;
+	struct cvmx_mio_uartx_iir_s cn38xxp2;
+	struct cvmx_mio_uartx_iir_s cn50xx;
+	struct cvmx_mio_uartx_iir_s cn52xx;
+	struct cvmx_mio_uartx_iir_s cn52xxp1;
+	struct cvmx_mio_uartx_iir_s cn56xx;
+	struct cvmx_mio_uartx_iir_s cn56xxp1;
+	struct cvmx_mio_uartx_iir_s cn58xx;
+	struct cvmx_mio_uartx_iir_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_lcr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_lcr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t dlab:1;
+		uint64_t brk:1;
+		uint64_t reserved_5_5:1;
+		uint64_t eps:1;
+		uint64_t pen:1;
+		uint64_t stop:1;
+		uint64_t cls:2;
+	} s;
+	struct cvmx_mio_uartx_lcr_s cn30xx;
+	struct cvmx_mio_uartx_lcr_s cn31xx;
+	struct cvmx_mio_uartx_lcr_s cn38xx;
+	struct cvmx_mio_uartx_lcr_s cn38xxp2;
+	struct cvmx_mio_uartx_lcr_s cn50xx;
+	struct cvmx_mio_uartx_lcr_s cn52xx;
+	struct cvmx_mio_uartx_lcr_s cn52xxp1;
+	struct cvmx_mio_uartx_lcr_s cn56xx;
+	struct cvmx_mio_uartx_lcr_s cn56xxp1;
+	struct cvmx_mio_uartx_lcr_s cn58xx;
+	struct cvmx_mio_uartx_lcr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_lsr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_lsr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t ferr:1;
+		uint64_t temt:1;
+		uint64_t thre:1;
+		uint64_t bi:1;
+		uint64_t fe:1;
+		uint64_t pe:1;
+		uint64_t oe:1;
+		uint64_t dr:1;
+	} s;
+	struct cvmx_mio_uartx_lsr_s cn30xx;
+	struct cvmx_mio_uartx_lsr_s cn31xx;
+	struct cvmx_mio_uartx_lsr_s cn38xx;
+	struct cvmx_mio_uartx_lsr_s cn38xxp2;
+	struct cvmx_mio_uartx_lsr_s cn50xx;
+	struct cvmx_mio_uartx_lsr_s cn52xx;
+	struct cvmx_mio_uartx_lsr_s cn52xxp1;
+	struct cvmx_mio_uartx_lsr_s cn56xx;
+	struct cvmx_mio_uartx_lsr_s cn56xxp1;
+	struct cvmx_mio_uartx_lsr_s cn58xx;
+	struct cvmx_mio_uartx_lsr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_mcr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_mcr_s {
+		uint64_t reserved_6_63:58;
+		uint64_t afce:1;
+		uint64_t loop:1;
+		uint64_t out2:1;
+		uint64_t out1:1;
+		uint64_t rts:1;
+		uint64_t dtr:1;
+	} s;
+	struct cvmx_mio_uartx_mcr_s cn30xx;
+	struct cvmx_mio_uartx_mcr_s cn31xx;
+	struct cvmx_mio_uartx_mcr_s cn38xx;
+	struct cvmx_mio_uartx_mcr_s cn38xxp2;
+	struct cvmx_mio_uartx_mcr_s cn50xx;
+	struct cvmx_mio_uartx_mcr_s cn52xx;
+	struct cvmx_mio_uartx_mcr_s cn52xxp1;
+	struct cvmx_mio_uartx_mcr_s cn56xx;
+	struct cvmx_mio_uartx_mcr_s cn56xxp1;
+	struct cvmx_mio_uartx_mcr_s cn58xx;
+	struct cvmx_mio_uartx_mcr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_msr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_msr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t dcd:1;
+		uint64_t ri:1;
+		uint64_t dsr:1;
+		uint64_t cts:1;
+		uint64_t ddcd:1;
+		uint64_t teri:1;
+		uint64_t ddsr:1;
+		uint64_t dcts:1;
+	} s;
+	struct cvmx_mio_uartx_msr_s cn30xx;
+	struct cvmx_mio_uartx_msr_s cn31xx;
+	struct cvmx_mio_uartx_msr_s cn38xx;
+	struct cvmx_mio_uartx_msr_s cn38xxp2;
+	struct cvmx_mio_uartx_msr_s cn50xx;
+	struct cvmx_mio_uartx_msr_s cn52xx;
+	struct cvmx_mio_uartx_msr_s cn52xxp1;
+	struct cvmx_mio_uartx_msr_s cn56xx;
+	struct cvmx_mio_uartx_msr_s cn56xxp1;
+	struct cvmx_mio_uartx_msr_s cn58xx;
+	struct cvmx_mio_uartx_msr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_rbr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_rbr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t rbr:8;
+	} s;
+	struct cvmx_mio_uartx_rbr_s cn30xx;
+	struct cvmx_mio_uartx_rbr_s cn31xx;
+	struct cvmx_mio_uartx_rbr_s cn38xx;
+	struct cvmx_mio_uartx_rbr_s cn38xxp2;
+	struct cvmx_mio_uartx_rbr_s cn50xx;
+	struct cvmx_mio_uartx_rbr_s cn52xx;
+	struct cvmx_mio_uartx_rbr_s cn52xxp1;
+	struct cvmx_mio_uartx_rbr_s cn56xx;
+	struct cvmx_mio_uartx_rbr_s cn56xxp1;
+	struct cvmx_mio_uartx_rbr_s cn58xx;
+	struct cvmx_mio_uartx_rbr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_rfl {
+	uint64_t u64;
+	struct cvmx_mio_uartx_rfl_s {
+		uint64_t reserved_7_63:57;
+		uint64_t rfl:7;
+	} s;
+	struct cvmx_mio_uartx_rfl_s cn30xx;
+	struct cvmx_mio_uartx_rfl_s cn31xx;
+	struct cvmx_mio_uartx_rfl_s cn38xx;
+	struct cvmx_mio_uartx_rfl_s cn38xxp2;
+	struct cvmx_mio_uartx_rfl_s cn50xx;
+	struct cvmx_mio_uartx_rfl_s cn52xx;
+	struct cvmx_mio_uartx_rfl_s cn52xxp1;
+	struct cvmx_mio_uartx_rfl_s cn56xx;
+	struct cvmx_mio_uartx_rfl_s cn56xxp1;
+	struct cvmx_mio_uartx_rfl_s cn58xx;
+	struct cvmx_mio_uartx_rfl_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_rfw {
+	uint64_t u64;
+	struct cvmx_mio_uartx_rfw_s {
+		uint64_t reserved_10_63:54;
+		uint64_t rffe:1;
+		uint64_t rfpe:1;
+		uint64_t rfwd:8;
+	} s;
+	struct cvmx_mio_uartx_rfw_s cn30xx;
+	struct cvmx_mio_uartx_rfw_s cn31xx;
+	struct cvmx_mio_uartx_rfw_s cn38xx;
+	struct cvmx_mio_uartx_rfw_s cn38xxp2;
+	struct cvmx_mio_uartx_rfw_s cn50xx;
+	struct cvmx_mio_uartx_rfw_s cn52xx;
+	struct cvmx_mio_uartx_rfw_s cn52xxp1;
+	struct cvmx_mio_uartx_rfw_s cn56xx;
+	struct cvmx_mio_uartx_rfw_s cn56xxp1;
+	struct cvmx_mio_uartx_rfw_s cn58xx;
+	struct cvmx_mio_uartx_rfw_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_sbcr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_sbcr_s {
+		uint64_t reserved_1_63:63;
+		uint64_t sbcr:1;
+	} s;
+	struct cvmx_mio_uartx_sbcr_s cn30xx;
+	struct cvmx_mio_uartx_sbcr_s cn31xx;
+	struct cvmx_mio_uartx_sbcr_s cn38xx;
+	struct cvmx_mio_uartx_sbcr_s cn38xxp2;
+	struct cvmx_mio_uartx_sbcr_s cn50xx;
+	struct cvmx_mio_uartx_sbcr_s cn52xx;
+	struct cvmx_mio_uartx_sbcr_s cn52xxp1;
+	struct cvmx_mio_uartx_sbcr_s cn56xx;
+	struct cvmx_mio_uartx_sbcr_s cn56xxp1;
+	struct cvmx_mio_uartx_sbcr_s cn58xx;
+	struct cvmx_mio_uartx_sbcr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_scr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_scr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t scr:8;
+	} s;
+	struct cvmx_mio_uartx_scr_s cn30xx;
+	struct cvmx_mio_uartx_scr_s cn31xx;
+	struct cvmx_mio_uartx_scr_s cn38xx;
+	struct cvmx_mio_uartx_scr_s cn38xxp2;
+	struct cvmx_mio_uartx_scr_s cn50xx;
+	struct cvmx_mio_uartx_scr_s cn52xx;
+	struct cvmx_mio_uartx_scr_s cn52xxp1;
+	struct cvmx_mio_uartx_scr_s cn56xx;
+	struct cvmx_mio_uartx_scr_s cn56xxp1;
+	struct cvmx_mio_uartx_scr_s cn58xx;
+	struct cvmx_mio_uartx_scr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_sfe {
+	uint64_t u64;
+	struct cvmx_mio_uartx_sfe_s {
+		uint64_t reserved_1_63:63;
+		uint64_t sfe:1;
+	} s;
+	struct cvmx_mio_uartx_sfe_s cn30xx;
+	struct cvmx_mio_uartx_sfe_s cn31xx;
+	struct cvmx_mio_uartx_sfe_s cn38xx;
+	struct cvmx_mio_uartx_sfe_s cn38xxp2;
+	struct cvmx_mio_uartx_sfe_s cn50xx;
+	struct cvmx_mio_uartx_sfe_s cn52xx;
+	struct cvmx_mio_uartx_sfe_s cn52xxp1;
+	struct cvmx_mio_uartx_sfe_s cn56xx;
+	struct cvmx_mio_uartx_sfe_s cn56xxp1;
+	struct cvmx_mio_uartx_sfe_s cn58xx;
+	struct cvmx_mio_uartx_sfe_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_srr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_srr_s {
+		uint64_t reserved_3_63:61;
+		uint64_t stfr:1;
+		uint64_t srfr:1;
+		uint64_t usr:1;
+	} s;
+	struct cvmx_mio_uartx_srr_s cn30xx;
+	struct cvmx_mio_uartx_srr_s cn31xx;
+	struct cvmx_mio_uartx_srr_s cn38xx;
+	struct cvmx_mio_uartx_srr_s cn38xxp2;
+	struct cvmx_mio_uartx_srr_s cn50xx;
+	struct cvmx_mio_uartx_srr_s cn52xx;
+	struct cvmx_mio_uartx_srr_s cn52xxp1;
+	struct cvmx_mio_uartx_srr_s cn56xx;
+	struct cvmx_mio_uartx_srr_s cn56xxp1;
+	struct cvmx_mio_uartx_srr_s cn58xx;
+	struct cvmx_mio_uartx_srr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_srt {
+	uint64_t u64;
+	struct cvmx_mio_uartx_srt_s {
+		uint64_t reserved_2_63:62;
+		uint64_t srt:2;
+	} s;
+	struct cvmx_mio_uartx_srt_s cn30xx;
+	struct cvmx_mio_uartx_srt_s cn31xx;
+	struct cvmx_mio_uartx_srt_s cn38xx;
+	struct cvmx_mio_uartx_srt_s cn38xxp2;
+	struct cvmx_mio_uartx_srt_s cn50xx;
+	struct cvmx_mio_uartx_srt_s cn52xx;
+	struct cvmx_mio_uartx_srt_s cn52xxp1;
+	struct cvmx_mio_uartx_srt_s cn56xx;
+	struct cvmx_mio_uartx_srt_s cn56xxp1;
+	struct cvmx_mio_uartx_srt_s cn58xx;
+	struct cvmx_mio_uartx_srt_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_srts {
+	uint64_t u64;
+	struct cvmx_mio_uartx_srts_s {
+		uint64_t reserved_1_63:63;
+		uint64_t srts:1;
+	} s;
+	struct cvmx_mio_uartx_srts_s cn30xx;
+	struct cvmx_mio_uartx_srts_s cn31xx;
+	struct cvmx_mio_uartx_srts_s cn38xx;
+	struct cvmx_mio_uartx_srts_s cn38xxp2;
+	struct cvmx_mio_uartx_srts_s cn50xx;
+	struct cvmx_mio_uartx_srts_s cn52xx;
+	struct cvmx_mio_uartx_srts_s cn52xxp1;
+	struct cvmx_mio_uartx_srts_s cn56xx;
+	struct cvmx_mio_uartx_srts_s cn56xxp1;
+	struct cvmx_mio_uartx_srts_s cn58xx;
+	struct cvmx_mio_uartx_srts_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_stt {
+	uint64_t u64;
+	struct cvmx_mio_uartx_stt_s {
+		uint64_t reserved_2_63:62;
+		uint64_t stt:2;
+	} s;
+	struct cvmx_mio_uartx_stt_s cn30xx;
+	struct cvmx_mio_uartx_stt_s cn31xx;
+	struct cvmx_mio_uartx_stt_s cn38xx;
+	struct cvmx_mio_uartx_stt_s cn38xxp2;
+	struct cvmx_mio_uartx_stt_s cn50xx;
+	struct cvmx_mio_uartx_stt_s cn52xx;
+	struct cvmx_mio_uartx_stt_s cn52xxp1;
+	struct cvmx_mio_uartx_stt_s cn56xx;
+	struct cvmx_mio_uartx_stt_s cn56xxp1;
+	struct cvmx_mio_uartx_stt_s cn58xx;
+	struct cvmx_mio_uartx_stt_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_tfl {
+	uint64_t u64;
+	struct cvmx_mio_uartx_tfl_s {
+		uint64_t reserved_7_63:57;
+		uint64_t tfl:7;
+	} s;
+	struct cvmx_mio_uartx_tfl_s cn30xx;
+	struct cvmx_mio_uartx_tfl_s cn31xx;
+	struct cvmx_mio_uartx_tfl_s cn38xx;
+	struct cvmx_mio_uartx_tfl_s cn38xxp2;
+	struct cvmx_mio_uartx_tfl_s cn50xx;
+	struct cvmx_mio_uartx_tfl_s cn52xx;
+	struct cvmx_mio_uartx_tfl_s cn52xxp1;
+	struct cvmx_mio_uartx_tfl_s cn56xx;
+	struct cvmx_mio_uartx_tfl_s cn56xxp1;
+	struct cvmx_mio_uartx_tfl_s cn58xx;
+	struct cvmx_mio_uartx_tfl_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_tfr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_tfr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t tfr:8;
+	} s;
+	struct cvmx_mio_uartx_tfr_s cn30xx;
+	struct cvmx_mio_uartx_tfr_s cn31xx;
+	struct cvmx_mio_uartx_tfr_s cn38xx;
+	struct cvmx_mio_uartx_tfr_s cn38xxp2;
+	struct cvmx_mio_uartx_tfr_s cn50xx;
+	struct cvmx_mio_uartx_tfr_s cn52xx;
+	struct cvmx_mio_uartx_tfr_s cn52xxp1;
+	struct cvmx_mio_uartx_tfr_s cn56xx;
+	struct cvmx_mio_uartx_tfr_s cn56xxp1;
+	struct cvmx_mio_uartx_tfr_s cn58xx;
+	struct cvmx_mio_uartx_tfr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_thr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_thr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t thr:8;
+	} s;
+	struct cvmx_mio_uartx_thr_s cn30xx;
+	struct cvmx_mio_uartx_thr_s cn31xx;
+	struct cvmx_mio_uartx_thr_s cn38xx;
+	struct cvmx_mio_uartx_thr_s cn38xxp2;
+	struct cvmx_mio_uartx_thr_s cn50xx;
+	struct cvmx_mio_uartx_thr_s cn52xx;
+	struct cvmx_mio_uartx_thr_s cn52xxp1;
+	struct cvmx_mio_uartx_thr_s cn56xx;
+	struct cvmx_mio_uartx_thr_s cn56xxp1;
+	struct cvmx_mio_uartx_thr_s cn58xx;
+	struct cvmx_mio_uartx_thr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_usr {
+	uint64_t u64;
+	struct cvmx_mio_uartx_usr_s {
+		uint64_t reserved_5_63:59;
+		uint64_t rff:1;
+		uint64_t rfne:1;
+		uint64_t tfe:1;
+		uint64_t tfnf:1;
+		uint64_t busy:1;
+	} s;
+	struct cvmx_mio_uartx_usr_s cn30xx;
+	struct cvmx_mio_uartx_usr_s cn31xx;
+	struct cvmx_mio_uartx_usr_s cn38xx;
+	struct cvmx_mio_uartx_usr_s cn38xxp2;
+	struct cvmx_mio_uartx_usr_s cn50xx;
+	struct cvmx_mio_uartx_usr_s cn52xx;
+	struct cvmx_mio_uartx_usr_s cn52xxp1;
+	struct cvmx_mio_uartx_usr_s cn56xx;
+	struct cvmx_mio_uartx_usr_s cn56xxp1;
+	struct cvmx_mio_uartx_usr_s cn58xx;
+	struct cvmx_mio_uartx_usr_s cn58xxp1;
+};
+
+union cvmx_mio_uart2_dlh {
+	uint64_t u64;
+	struct cvmx_mio_uart2_dlh_s {
+		uint64_t reserved_8_63:56;
+		uint64_t dlh:8;
+	} s;
+	struct cvmx_mio_uart2_dlh_s cn52xx;
+	struct cvmx_mio_uart2_dlh_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_dll {
+	uint64_t u64;
+	struct cvmx_mio_uart2_dll_s {
+		uint64_t reserved_8_63:56;
+		uint64_t dll:8;
+	} s;
+	struct cvmx_mio_uart2_dll_s cn52xx;
+	struct cvmx_mio_uart2_dll_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_far {
+	uint64_t u64;
+	struct cvmx_mio_uart2_far_s {
+		uint64_t reserved_1_63:63;
+		uint64_t far:1;
+	} s;
+	struct cvmx_mio_uart2_far_s cn52xx;
+	struct cvmx_mio_uart2_far_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_fcr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_fcr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t rxtrig:2;
+		uint64_t txtrig:2;
+		uint64_t reserved_3_3:1;
+		uint64_t txfr:1;
+		uint64_t rxfr:1;
+		uint64_t en:1;
+	} s;
+	struct cvmx_mio_uart2_fcr_s cn52xx;
+	struct cvmx_mio_uart2_fcr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_htx {
+	uint64_t u64;
+	struct cvmx_mio_uart2_htx_s {
+		uint64_t reserved_1_63:63;
+		uint64_t htx:1;
+	} s;
+	struct cvmx_mio_uart2_htx_s cn52xx;
+	struct cvmx_mio_uart2_htx_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_ier {
+	uint64_t u64;
+	struct cvmx_mio_uart2_ier_s {
+		uint64_t reserved_8_63:56;
+		uint64_t ptime:1;
+		uint64_t reserved_4_6:3;
+		uint64_t edssi:1;
+		uint64_t elsi:1;
+		uint64_t etbei:1;
+		uint64_t erbfi:1;
+	} s;
+	struct cvmx_mio_uart2_ier_s cn52xx;
+	struct cvmx_mio_uart2_ier_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_iir {
+	uint64_t u64;
+	struct cvmx_mio_uart2_iir_s {
+		uint64_t reserved_8_63:56;
+		uint64_t fen:2;
+		uint64_t reserved_4_5:2;
+		uint64_t iid:4;
+	} s;
+	struct cvmx_mio_uart2_iir_s cn52xx;
+	struct cvmx_mio_uart2_iir_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_lcr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_lcr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t dlab:1;
+		uint64_t brk:1;
+		uint64_t reserved_5_5:1;
+		uint64_t eps:1;
+		uint64_t pen:1;
+		uint64_t stop:1;
+		uint64_t cls:2;
+	} s;
+	struct cvmx_mio_uart2_lcr_s cn52xx;
+	struct cvmx_mio_uart2_lcr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_lsr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_lsr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t ferr:1;
+		uint64_t temt:1;
+		uint64_t thre:1;
+		uint64_t bi:1;
+		uint64_t fe:1;
+		uint64_t pe:1;
+		uint64_t oe:1;
+		uint64_t dr:1;
+	} s;
+	struct cvmx_mio_uart2_lsr_s cn52xx;
+	struct cvmx_mio_uart2_lsr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_mcr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_mcr_s {
+		uint64_t reserved_6_63:58;
+		uint64_t afce:1;
+		uint64_t loop:1;
+		uint64_t out2:1;
+		uint64_t out1:1;
+		uint64_t rts:1;
+		uint64_t dtr:1;
+	} s;
+	struct cvmx_mio_uart2_mcr_s cn52xx;
+	struct cvmx_mio_uart2_mcr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_msr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_msr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t dcd:1;
+		uint64_t ri:1;
+		uint64_t dsr:1;
+		uint64_t cts:1;
+		uint64_t ddcd:1;
+		uint64_t teri:1;
+		uint64_t ddsr:1;
+		uint64_t dcts:1;
+	} s;
+	struct cvmx_mio_uart2_msr_s cn52xx;
+	struct cvmx_mio_uart2_msr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_rbr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_rbr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t rbr:8;
+	} s;
+	struct cvmx_mio_uart2_rbr_s cn52xx;
+	struct cvmx_mio_uart2_rbr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_rfl {
+	uint64_t u64;
+	struct cvmx_mio_uart2_rfl_s {
+		uint64_t reserved_7_63:57;
+		uint64_t rfl:7;
+	} s;
+	struct cvmx_mio_uart2_rfl_s cn52xx;
+	struct cvmx_mio_uart2_rfl_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_rfw {
+	uint64_t u64;
+	struct cvmx_mio_uart2_rfw_s {
+		uint64_t reserved_10_63:54;
+		uint64_t rffe:1;
+		uint64_t rfpe:1;
+		uint64_t rfwd:8;
+	} s;
+	struct cvmx_mio_uart2_rfw_s cn52xx;
+	struct cvmx_mio_uart2_rfw_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_sbcr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_sbcr_s {
+		uint64_t reserved_1_63:63;
+		uint64_t sbcr:1;
+	} s;
+	struct cvmx_mio_uart2_sbcr_s cn52xx;
+	struct cvmx_mio_uart2_sbcr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_scr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_scr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t scr:8;
+	} s;
+	struct cvmx_mio_uart2_scr_s cn52xx;
+	struct cvmx_mio_uart2_scr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_sfe {
+	uint64_t u64;
+	struct cvmx_mio_uart2_sfe_s {
+		uint64_t reserved_1_63:63;
+		uint64_t sfe:1;
+	} s;
+	struct cvmx_mio_uart2_sfe_s cn52xx;
+	struct cvmx_mio_uart2_sfe_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_srr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_srr_s {
+		uint64_t reserved_3_63:61;
+		uint64_t stfr:1;
+		uint64_t srfr:1;
+		uint64_t usr:1;
+	} s;
+	struct cvmx_mio_uart2_srr_s cn52xx;
+	struct cvmx_mio_uart2_srr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_srt {
+	uint64_t u64;
+	struct cvmx_mio_uart2_srt_s {
+		uint64_t reserved_2_63:62;
+		uint64_t srt:2;
+	} s;
+	struct cvmx_mio_uart2_srt_s cn52xx;
+	struct cvmx_mio_uart2_srt_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_srts {
+	uint64_t u64;
+	struct cvmx_mio_uart2_srts_s {
+		uint64_t reserved_1_63:63;
+		uint64_t srts:1;
+	} s;
+	struct cvmx_mio_uart2_srts_s cn52xx;
+	struct cvmx_mio_uart2_srts_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_stt {
+	uint64_t u64;
+	struct cvmx_mio_uart2_stt_s {
+		uint64_t reserved_2_63:62;
+		uint64_t stt:2;
+	} s;
+	struct cvmx_mio_uart2_stt_s cn52xx;
+	struct cvmx_mio_uart2_stt_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_tfl {
+	uint64_t u64;
+	struct cvmx_mio_uart2_tfl_s {
+		uint64_t reserved_7_63:57;
+		uint64_t tfl:7;
+	} s;
+	struct cvmx_mio_uart2_tfl_s cn52xx;
+	struct cvmx_mio_uart2_tfl_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_tfr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_tfr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t tfr:8;
+	} s;
+	struct cvmx_mio_uart2_tfr_s cn52xx;
+	struct cvmx_mio_uart2_tfr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_thr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_thr_s {
+		uint64_t reserved_8_63:56;
+		uint64_t thr:8;
+	} s;
+	struct cvmx_mio_uart2_thr_s cn52xx;
+	struct cvmx_mio_uart2_thr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_usr {
+	uint64_t u64;
+	struct cvmx_mio_uart2_usr_s {
+		uint64_t reserved_5_63:59;
+		uint64_t rff:1;
+		uint64_t rfne:1;
+		uint64_t tfe:1;
+		uint64_t tfnf:1;
+		uint64_t busy:1;
+	} s;
+	struct cvmx_mio_uart2_usr_s cn52xx;
+	struct cvmx_mio_uart2_usr_s cn52xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-packet.h b/arch/mips/include/asm/octeon/cvmx-packet.h
new file mode 100644
index 0000000..38aefa1
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-packet.h
@@ -0,0 +1,61 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Packet buffer defines.
+ */
+
+#ifndef __CVMX_PACKET_H__
+#define __CVMX_PACKET_H__
+
+/**
+ * This structure defines a buffer pointer on Octeon
+ */
+union cvmx_buf_ptr {
+	void *ptr;
+	uint64_t u64;
+	struct {
+		/* if set, invert the "free" pick of the overall
+		 * packet. HW always sets this bit to 0 on inbound
+		 * packet */
+		uint64_t i:1;
+
+		/* Indicates the amount to back up to get to the
+		 * buffer start in cache lines. In most cases this is
+		 * less than one complete cache line, so the value is
+		 * zero */
+		uint64_t back:4;
+		/* The pool that the buffer came from / goes to */
+		uint64_t pool:3;
+		/* The size of the segment pointed to by addr (in bytes) */
+		uint64_t size:16;
+		/* Pointer to the first byte of the data, NOT buffer */
+		uint64_t addr:40;
+	} s;
+};
+
+#endif /*  __CVMX_PACKET_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-pow-defs.h b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
new file mode 100644
index 0000000..2d82e24
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
@@ -0,0 +1,698 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_POW_DEFS_H__
+#define __CVMX_POW_DEFS_H__
+
+#define CVMX_POW_BIST_STAT \
+	 CVMX_ADD_IO_SEG(0x00016700000003F8ull)
+#define CVMX_POW_DS_PC \
+	 CVMX_ADD_IO_SEG(0x0001670000000398ull)
+#define CVMX_POW_ECC_ERR \
+	 CVMX_ADD_IO_SEG(0x0001670000000218ull)
+#define CVMX_POW_INT_CTL \
+	 CVMX_ADD_IO_SEG(0x0001670000000220ull)
+#define CVMX_POW_IQ_CNTX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001670000000340ull + (((offset) & 7) * 8))
+#define CVMX_POW_IQ_COM_CNT \
+	 CVMX_ADD_IO_SEG(0x0001670000000388ull)
+#define CVMX_POW_IQ_INT \
+	 CVMX_ADD_IO_SEG(0x0001670000000238ull)
+#define CVMX_POW_IQ_INT_EN \
+	 CVMX_ADD_IO_SEG(0x0001670000000240ull)
+#define CVMX_POW_IQ_THRX(offset) \
+	 CVMX_ADD_IO_SEG(0x00016700000003A0ull + (((offset) & 7) * 8))
+#define CVMX_POW_NOS_CNT \
+	 CVMX_ADD_IO_SEG(0x0001670000000228ull)
+#define CVMX_POW_NW_TIM \
+	 CVMX_ADD_IO_SEG(0x0001670000000210ull)
+#define CVMX_POW_PF_RST_MSK \
+	 CVMX_ADD_IO_SEG(0x0001670000000230ull)
+#define CVMX_POW_PP_GRP_MSKX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001670000000000ull + (((offset) & 15) * 8))
+#define CVMX_POW_QOS_RNDX(offset) \
+	 CVMX_ADD_IO_SEG(0x00016700000001C0ull + (((offset) & 7) * 8))
+#define CVMX_POW_QOS_THRX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001670000000180ull + (((offset) & 7) * 8))
+#define CVMX_POW_TS_PC \
+	 CVMX_ADD_IO_SEG(0x0001670000000390ull)
+#define CVMX_POW_WA_COM_PC \
+	 CVMX_ADD_IO_SEG(0x0001670000000380ull)
+#define CVMX_POW_WA_PCX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001670000000300ull + (((offset) & 7) * 8))
+#define CVMX_POW_WQ_INT \
+	 CVMX_ADD_IO_SEG(0x0001670000000200ull)
+#define CVMX_POW_WQ_INT_CNTX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001670000000100ull + (((offset) & 15) * 8))
+#define CVMX_POW_WQ_INT_PC \
+	 CVMX_ADD_IO_SEG(0x0001670000000208ull)
+#define CVMX_POW_WQ_INT_THRX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001670000000080ull + (((offset) & 15) * 8))
+#define CVMX_POW_WS_PCX(offset) \
+	 CVMX_ADD_IO_SEG(0x0001670000000280ull + (((offset) & 15) * 8))
+
+union cvmx_pow_bist_stat {
+	uint64_t u64;
+	struct cvmx_pow_bist_stat_s {
+		uint64_t reserved_32_63:32;
+		uint64_t pp:16;
+		uint64_t reserved_0_15:16;
+	} s;
+	struct cvmx_pow_bist_stat_cn30xx {
+		uint64_t reserved_17_63:47;
+		uint64_t pp:1;
+		uint64_t reserved_9_15:7;
+		uint64_t cam:1;
+		uint64_t nbt1:1;
+		uint64_t nbt0:1;
+		uint64_t index:1;
+		uint64_t fidx:1;
+		uint64_t nbr1:1;
+		uint64_t nbr0:1;
+		uint64_t pend:1;
+		uint64_t adr:1;
+	} cn30xx;
+	struct cvmx_pow_bist_stat_cn31xx {
+		uint64_t reserved_18_63:46;
+		uint64_t pp:2;
+		uint64_t reserved_9_15:7;
+		uint64_t cam:1;
+		uint64_t nbt1:1;
+		uint64_t nbt0:1;
+		uint64_t index:1;
+		uint64_t fidx:1;
+		uint64_t nbr1:1;
+		uint64_t nbr0:1;
+		uint64_t pend:1;
+		uint64_t adr:1;
+	} cn31xx;
+	struct cvmx_pow_bist_stat_cn38xx {
+		uint64_t reserved_32_63:32;
+		uint64_t pp:16;
+		uint64_t reserved_10_15:6;
+		uint64_t cam:1;
+		uint64_t nbt:1;
+		uint64_t index:1;
+		uint64_t fidx:1;
+		uint64_t nbr1:1;
+		uint64_t nbr0:1;
+		uint64_t pend1:1;
+		uint64_t pend0:1;
+		uint64_t adr1:1;
+		uint64_t adr0:1;
+	} cn38xx;
+	struct cvmx_pow_bist_stat_cn38xx cn38xxp2;
+	struct cvmx_pow_bist_stat_cn31xx cn50xx;
+	struct cvmx_pow_bist_stat_cn52xx {
+		uint64_t reserved_20_63:44;
+		uint64_t pp:4;
+		uint64_t reserved_9_15:7;
+		uint64_t cam:1;
+		uint64_t nbt1:1;
+		uint64_t nbt0:1;
+		uint64_t index:1;
+		uint64_t fidx:1;
+		uint64_t nbr1:1;
+		uint64_t nbr0:1;
+		uint64_t pend:1;
+		uint64_t adr:1;
+	} cn52xx;
+	struct cvmx_pow_bist_stat_cn52xx cn52xxp1;
+	struct cvmx_pow_bist_stat_cn56xx {
+		uint64_t reserved_28_63:36;
+		uint64_t pp:12;
+		uint64_t reserved_10_15:6;
+		uint64_t cam:1;
+		uint64_t nbt:1;
+		uint64_t index:1;
+		uint64_t fidx:1;
+		uint64_t nbr1:1;
+		uint64_t nbr0:1;
+		uint64_t pend1:1;
+		uint64_t pend0:1;
+		uint64_t adr1:1;
+		uint64_t adr0:1;
+	} cn56xx;
+	struct cvmx_pow_bist_stat_cn56xx cn56xxp1;
+	struct cvmx_pow_bist_stat_cn38xx cn58xx;
+	struct cvmx_pow_bist_stat_cn38xx cn58xxp1;
+};
+
+union cvmx_pow_ds_pc {
+	uint64_t u64;
+	struct cvmx_pow_ds_pc_s {
+		uint64_t reserved_32_63:32;
+		uint64_t ds_pc:32;
+	} s;
+	struct cvmx_pow_ds_pc_s cn30xx;
+	struct cvmx_pow_ds_pc_s cn31xx;
+	struct cvmx_pow_ds_pc_s cn38xx;
+	struct cvmx_pow_ds_pc_s cn38xxp2;
+	struct cvmx_pow_ds_pc_s cn50xx;
+	struct cvmx_pow_ds_pc_s cn52xx;
+	struct cvmx_pow_ds_pc_s cn52xxp1;
+	struct cvmx_pow_ds_pc_s cn56xx;
+	struct cvmx_pow_ds_pc_s cn56xxp1;
+	struct cvmx_pow_ds_pc_s cn58xx;
+	struct cvmx_pow_ds_pc_s cn58xxp1;
+};
+
+union cvmx_pow_ecc_err {
+	uint64_t u64;
+	struct cvmx_pow_ecc_err_s {
+		uint64_t reserved_45_63:19;
+		uint64_t iop_ie:13;
+		uint64_t reserved_29_31:3;
+		uint64_t iop:13;
+		uint64_t reserved_14_15:2;
+		uint64_t rpe_ie:1;
+		uint64_t rpe:1;
+		uint64_t reserved_9_11:3;
+		uint64_t syn:5;
+		uint64_t dbe_ie:1;
+		uint64_t sbe_ie:1;
+		uint64_t dbe:1;
+		uint64_t sbe:1;
+	} s;
+	struct cvmx_pow_ecc_err_s cn30xx;
+	struct cvmx_pow_ecc_err_cn31xx {
+		uint64_t reserved_14_63:50;
+		uint64_t rpe_ie:1;
+		uint64_t rpe:1;
+		uint64_t reserved_9_11:3;
+		uint64_t syn:5;
+		uint64_t dbe_ie:1;
+		uint64_t sbe_ie:1;
+		uint64_t dbe:1;
+		uint64_t sbe:1;
+	} cn31xx;
+	struct cvmx_pow_ecc_err_s cn38xx;
+	struct cvmx_pow_ecc_err_cn31xx cn38xxp2;
+	struct cvmx_pow_ecc_err_s cn50xx;
+	struct cvmx_pow_ecc_err_s cn52xx;
+	struct cvmx_pow_ecc_err_s cn52xxp1;
+	struct cvmx_pow_ecc_err_s cn56xx;
+	struct cvmx_pow_ecc_err_s cn56xxp1;
+	struct cvmx_pow_ecc_err_s cn58xx;
+	struct cvmx_pow_ecc_err_s cn58xxp1;
+};
+
+union cvmx_pow_int_ctl {
+	uint64_t u64;
+	struct cvmx_pow_int_ctl_s {
+		uint64_t reserved_6_63:58;
+		uint64_t pfr_dis:1;
+		uint64_t nbr_thr:5;
+	} s;
+	struct cvmx_pow_int_ctl_s cn30xx;
+	struct cvmx_pow_int_ctl_s cn31xx;
+	struct cvmx_pow_int_ctl_s cn38xx;
+	struct cvmx_pow_int_ctl_s cn38xxp2;
+	struct cvmx_pow_int_ctl_s cn50xx;
+	struct cvmx_pow_int_ctl_s cn52xx;
+	struct cvmx_pow_int_ctl_s cn52xxp1;
+	struct cvmx_pow_int_ctl_s cn56xx;
+	struct cvmx_pow_int_ctl_s cn56xxp1;
+	struct cvmx_pow_int_ctl_s cn58xx;
+	struct cvmx_pow_int_ctl_s cn58xxp1;
+};
+
+union cvmx_pow_iq_cntx {
+	uint64_t u64;
+	struct cvmx_pow_iq_cntx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t iq_cnt:32;
+	} s;
+	struct cvmx_pow_iq_cntx_s cn30xx;
+	struct cvmx_pow_iq_cntx_s cn31xx;
+	struct cvmx_pow_iq_cntx_s cn38xx;
+	struct cvmx_pow_iq_cntx_s cn38xxp2;
+	struct cvmx_pow_iq_cntx_s cn50xx;
+	struct cvmx_pow_iq_cntx_s cn52xx;
+	struct cvmx_pow_iq_cntx_s cn52xxp1;
+	struct cvmx_pow_iq_cntx_s cn56xx;
+	struct cvmx_pow_iq_cntx_s cn56xxp1;
+	struct cvmx_pow_iq_cntx_s cn58xx;
+	struct cvmx_pow_iq_cntx_s cn58xxp1;
+};
+
+union cvmx_pow_iq_com_cnt {
+	uint64_t u64;
+	struct cvmx_pow_iq_com_cnt_s {
+		uint64_t reserved_32_63:32;
+		uint64_t iq_cnt:32;
+	} s;
+	struct cvmx_pow_iq_com_cnt_s cn30xx;
+	struct cvmx_pow_iq_com_cnt_s cn31xx;
+	struct cvmx_pow_iq_com_cnt_s cn38xx;
+	struct cvmx_pow_iq_com_cnt_s cn38xxp2;
+	struct cvmx_pow_iq_com_cnt_s cn50xx;
+	struct cvmx_pow_iq_com_cnt_s cn52xx;
+	struct cvmx_pow_iq_com_cnt_s cn52xxp1;
+	struct cvmx_pow_iq_com_cnt_s cn56xx;
+	struct cvmx_pow_iq_com_cnt_s cn56xxp1;
+	struct cvmx_pow_iq_com_cnt_s cn58xx;
+	struct cvmx_pow_iq_com_cnt_s cn58xxp1;
+};
+
+union cvmx_pow_iq_int {
+	uint64_t u64;
+	struct cvmx_pow_iq_int_s {
+		uint64_t reserved_8_63:56;
+		uint64_t iq_int:8;
+	} s;
+	struct cvmx_pow_iq_int_s cn52xx;
+	struct cvmx_pow_iq_int_s cn52xxp1;
+	struct cvmx_pow_iq_int_s cn56xx;
+	struct cvmx_pow_iq_int_s cn56xxp1;
+};
+
+union cvmx_pow_iq_int_en {
+	uint64_t u64;
+	struct cvmx_pow_iq_int_en_s {
+		uint64_t reserved_8_63:56;
+		uint64_t int_en:8;
+	} s;
+	struct cvmx_pow_iq_int_en_s cn52xx;
+	struct cvmx_pow_iq_int_en_s cn52xxp1;
+	struct cvmx_pow_iq_int_en_s cn56xx;
+	struct cvmx_pow_iq_int_en_s cn56xxp1;
+};
+
+union cvmx_pow_iq_thrx {
+	uint64_t u64;
+	struct cvmx_pow_iq_thrx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t iq_thr:32;
+	} s;
+	struct cvmx_pow_iq_thrx_s cn52xx;
+	struct cvmx_pow_iq_thrx_s cn52xxp1;
+	struct cvmx_pow_iq_thrx_s cn56xx;
+	struct cvmx_pow_iq_thrx_s cn56xxp1;
+};
+
+union cvmx_pow_nos_cnt {
+	uint64_t u64;
+	struct cvmx_pow_nos_cnt_s {
+		uint64_t reserved_12_63:52;
+		uint64_t nos_cnt:12;
+	} s;
+	struct cvmx_pow_nos_cnt_cn30xx {
+		uint64_t reserved_7_63:57;
+		uint64_t nos_cnt:7;
+	} cn30xx;
+	struct cvmx_pow_nos_cnt_cn31xx {
+		uint64_t reserved_9_63:55;
+		uint64_t nos_cnt:9;
+	} cn31xx;
+	struct cvmx_pow_nos_cnt_s cn38xx;
+	struct cvmx_pow_nos_cnt_s cn38xxp2;
+	struct cvmx_pow_nos_cnt_cn31xx cn50xx;
+	struct cvmx_pow_nos_cnt_cn52xx {
+		uint64_t reserved_10_63:54;
+		uint64_t nos_cnt:10;
+	} cn52xx;
+	struct cvmx_pow_nos_cnt_cn52xx cn52xxp1;
+	struct cvmx_pow_nos_cnt_s cn56xx;
+	struct cvmx_pow_nos_cnt_s cn56xxp1;
+	struct cvmx_pow_nos_cnt_s cn58xx;
+	struct cvmx_pow_nos_cnt_s cn58xxp1;
+};
+
+union cvmx_pow_nw_tim {
+	uint64_t u64;
+	struct cvmx_pow_nw_tim_s {
+		uint64_t reserved_10_63:54;
+		uint64_t nw_tim:10;
+	} s;
+	struct cvmx_pow_nw_tim_s cn30xx;
+	struct cvmx_pow_nw_tim_s cn31xx;
+	struct cvmx_pow_nw_tim_s cn38xx;
+	struct cvmx_pow_nw_tim_s cn38xxp2;
+	struct cvmx_pow_nw_tim_s cn50xx;
+	struct cvmx_pow_nw_tim_s cn52xx;
+	struct cvmx_pow_nw_tim_s cn52xxp1;
+	struct cvmx_pow_nw_tim_s cn56xx;
+	struct cvmx_pow_nw_tim_s cn56xxp1;
+	struct cvmx_pow_nw_tim_s cn58xx;
+	struct cvmx_pow_nw_tim_s cn58xxp1;
+};
+
+union cvmx_pow_pf_rst_msk {
+	uint64_t u64;
+	struct cvmx_pow_pf_rst_msk_s {
+		uint64_t reserved_8_63:56;
+		uint64_t rst_msk:8;
+	} s;
+	struct cvmx_pow_pf_rst_msk_s cn50xx;
+	struct cvmx_pow_pf_rst_msk_s cn52xx;
+	struct cvmx_pow_pf_rst_msk_s cn52xxp1;
+	struct cvmx_pow_pf_rst_msk_s cn56xx;
+	struct cvmx_pow_pf_rst_msk_s cn56xxp1;
+	struct cvmx_pow_pf_rst_msk_s cn58xx;
+	struct cvmx_pow_pf_rst_msk_s cn58xxp1;
+};
+
+union cvmx_pow_pp_grp_mskx {
+	uint64_t u64;
+	struct cvmx_pow_pp_grp_mskx_s {
+		uint64_t reserved_48_63:16;
+		uint64_t qos7_pri:4;
+		uint64_t qos6_pri:4;
+		uint64_t qos5_pri:4;
+		uint64_t qos4_pri:4;
+		uint64_t qos3_pri:4;
+		uint64_t qos2_pri:4;
+		uint64_t qos1_pri:4;
+		uint64_t qos0_pri:4;
+		uint64_t grp_msk:16;
+	} s;
+	struct cvmx_pow_pp_grp_mskx_cn30xx {
+		uint64_t reserved_16_63:48;
+		uint64_t grp_msk:16;
+	} cn30xx;
+	struct cvmx_pow_pp_grp_mskx_cn30xx cn31xx;
+	struct cvmx_pow_pp_grp_mskx_cn30xx cn38xx;
+	struct cvmx_pow_pp_grp_mskx_cn30xx cn38xxp2;
+	struct cvmx_pow_pp_grp_mskx_s cn50xx;
+	struct cvmx_pow_pp_grp_mskx_s cn52xx;
+	struct cvmx_pow_pp_grp_mskx_s cn52xxp1;
+	struct cvmx_pow_pp_grp_mskx_s cn56xx;
+	struct cvmx_pow_pp_grp_mskx_s cn56xxp1;
+	struct cvmx_pow_pp_grp_mskx_s cn58xx;
+	struct cvmx_pow_pp_grp_mskx_s cn58xxp1;
+};
+
+union cvmx_pow_qos_rndx {
+	uint64_t u64;
+	struct cvmx_pow_qos_rndx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t rnd_p3:8;
+		uint64_t rnd_p2:8;
+		uint64_t rnd_p1:8;
+		uint64_t rnd:8;
+	} s;
+	struct cvmx_pow_qos_rndx_s cn30xx;
+	struct cvmx_pow_qos_rndx_s cn31xx;
+	struct cvmx_pow_qos_rndx_s cn38xx;
+	struct cvmx_pow_qos_rndx_s cn38xxp2;
+	struct cvmx_pow_qos_rndx_s cn50xx;
+	struct cvmx_pow_qos_rndx_s cn52xx;
+	struct cvmx_pow_qos_rndx_s cn52xxp1;
+	struct cvmx_pow_qos_rndx_s cn56xx;
+	struct cvmx_pow_qos_rndx_s cn56xxp1;
+	struct cvmx_pow_qos_rndx_s cn58xx;
+	struct cvmx_pow_qos_rndx_s cn58xxp1;
+};
+
+union cvmx_pow_qos_thrx {
+	uint64_t u64;
+	struct cvmx_pow_qos_thrx_s {
+		uint64_t reserved_60_63:4;
+		uint64_t des_cnt:12;
+		uint64_t buf_cnt:12;
+		uint64_t free_cnt:12;
+		uint64_t reserved_23_23:1;
+		uint64_t max_thr:11;
+		uint64_t reserved_11_11:1;
+		uint64_t min_thr:11;
+	} s;
+	struct cvmx_pow_qos_thrx_cn30xx {
+		uint64_t reserved_55_63:9;
+		uint64_t des_cnt:7;
+		uint64_t reserved_43_47:5;
+		uint64_t buf_cnt:7;
+		uint64_t reserved_31_35:5;
+		uint64_t free_cnt:7;
+		uint64_t reserved_18_23:6;
+		uint64_t max_thr:6;
+		uint64_t reserved_6_11:6;
+		uint64_t min_thr:6;
+	} cn30xx;
+	struct cvmx_pow_qos_thrx_cn31xx {
+		uint64_t reserved_57_63:7;
+		uint64_t des_cnt:9;
+		uint64_t reserved_45_47:3;
+		uint64_t buf_cnt:9;
+		uint64_t reserved_33_35:3;
+		uint64_t free_cnt:9;
+		uint64_t reserved_20_23:4;
+		uint64_t max_thr:8;
+		uint64_t reserved_8_11:4;
+		uint64_t min_thr:8;
+	} cn31xx;
+	struct cvmx_pow_qos_thrx_s cn38xx;
+	struct cvmx_pow_qos_thrx_s cn38xxp2;
+	struct cvmx_pow_qos_thrx_cn31xx cn50xx;
+	struct cvmx_pow_qos_thrx_cn52xx {
+		uint64_t reserved_58_63:6;
+		uint64_t des_cnt:10;
+		uint64_t reserved_46_47:2;
+		uint64_t buf_cnt:10;
+		uint64_t reserved_34_35:2;
+		uint64_t free_cnt:10;
+		uint64_t reserved_21_23:3;
+		uint64_t max_thr:9;
+		uint64_t reserved_9_11:3;
+		uint64_t min_thr:9;
+	} cn52xx;
+	struct cvmx_pow_qos_thrx_cn52xx cn52xxp1;
+	struct cvmx_pow_qos_thrx_s cn56xx;
+	struct cvmx_pow_qos_thrx_s cn56xxp1;
+	struct cvmx_pow_qos_thrx_s cn58xx;
+	struct cvmx_pow_qos_thrx_s cn58xxp1;
+};
+
+union cvmx_pow_ts_pc {
+	uint64_t u64;
+	struct cvmx_pow_ts_pc_s {
+		uint64_t reserved_32_63:32;
+		uint64_t ts_pc:32;
+	} s;
+	struct cvmx_pow_ts_pc_s cn30xx;
+	struct cvmx_pow_ts_pc_s cn31xx;
+	struct cvmx_pow_ts_pc_s cn38xx;
+	struct cvmx_pow_ts_pc_s cn38xxp2;
+	struct cvmx_pow_ts_pc_s cn50xx;
+	struct cvmx_pow_ts_pc_s cn52xx;
+	struct cvmx_pow_ts_pc_s cn52xxp1;
+	struct cvmx_pow_ts_pc_s cn56xx;
+	struct cvmx_pow_ts_pc_s cn56xxp1;
+	struct cvmx_pow_ts_pc_s cn58xx;
+	struct cvmx_pow_ts_pc_s cn58xxp1;
+};
+
+union cvmx_pow_wa_com_pc {
+	uint64_t u64;
+	struct cvmx_pow_wa_com_pc_s {
+		uint64_t reserved_32_63:32;
+		uint64_t wa_pc:32;
+	} s;
+	struct cvmx_pow_wa_com_pc_s cn30xx;
+	struct cvmx_pow_wa_com_pc_s cn31xx;
+	struct cvmx_pow_wa_com_pc_s cn38xx;
+	struct cvmx_pow_wa_com_pc_s cn38xxp2;
+	struct cvmx_pow_wa_com_pc_s cn50xx;
+	struct cvmx_pow_wa_com_pc_s cn52xx;
+	struct cvmx_pow_wa_com_pc_s cn52xxp1;
+	struct cvmx_pow_wa_com_pc_s cn56xx;
+	struct cvmx_pow_wa_com_pc_s cn56xxp1;
+	struct cvmx_pow_wa_com_pc_s cn58xx;
+	struct cvmx_pow_wa_com_pc_s cn58xxp1;
+};
+
+union cvmx_pow_wa_pcx {
+	uint64_t u64;
+	struct cvmx_pow_wa_pcx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t wa_pc:32;
+	} s;
+	struct cvmx_pow_wa_pcx_s cn30xx;
+	struct cvmx_pow_wa_pcx_s cn31xx;
+	struct cvmx_pow_wa_pcx_s cn38xx;
+	struct cvmx_pow_wa_pcx_s cn38xxp2;
+	struct cvmx_pow_wa_pcx_s cn50xx;
+	struct cvmx_pow_wa_pcx_s cn52xx;
+	struct cvmx_pow_wa_pcx_s cn52xxp1;
+	struct cvmx_pow_wa_pcx_s cn56xx;
+	struct cvmx_pow_wa_pcx_s cn56xxp1;
+	struct cvmx_pow_wa_pcx_s cn58xx;
+	struct cvmx_pow_wa_pcx_s cn58xxp1;
+};
+
+union cvmx_pow_wq_int {
+	uint64_t u64;
+	struct cvmx_pow_wq_int_s {
+		uint64_t reserved_32_63:32;
+		uint64_t iq_dis:16;
+		uint64_t wq_int:16;
+	} s;
+	struct cvmx_pow_wq_int_s cn30xx;
+	struct cvmx_pow_wq_int_s cn31xx;
+	struct cvmx_pow_wq_int_s cn38xx;
+	struct cvmx_pow_wq_int_s cn38xxp2;
+	struct cvmx_pow_wq_int_s cn50xx;
+	struct cvmx_pow_wq_int_s cn52xx;
+	struct cvmx_pow_wq_int_s cn52xxp1;
+	struct cvmx_pow_wq_int_s cn56xx;
+	struct cvmx_pow_wq_int_s cn56xxp1;
+	struct cvmx_pow_wq_int_s cn58xx;
+	struct cvmx_pow_wq_int_s cn58xxp1;
+};
+
+union cvmx_pow_wq_int_cntx {
+	uint64_t u64;
+	struct cvmx_pow_wq_int_cntx_s {
+		uint64_t reserved_28_63:36;
+		uint64_t tc_cnt:4;
+		uint64_t ds_cnt:12;
+		uint64_t iq_cnt:12;
+	} s;
+	struct cvmx_pow_wq_int_cntx_cn30xx {
+		uint64_t reserved_28_63:36;
+		uint64_t tc_cnt:4;
+		uint64_t reserved_19_23:5;
+		uint64_t ds_cnt:7;
+		uint64_t reserved_7_11:5;
+		uint64_t iq_cnt:7;
+	} cn30xx;
+	struct cvmx_pow_wq_int_cntx_cn31xx {
+		uint64_t reserved_28_63:36;
+		uint64_t tc_cnt:4;
+		uint64_t reserved_21_23:3;
+		uint64_t ds_cnt:9;
+		uint64_t reserved_9_11:3;
+		uint64_t iq_cnt:9;
+	} cn31xx;
+	struct cvmx_pow_wq_int_cntx_s cn38xx;
+	struct cvmx_pow_wq_int_cntx_s cn38xxp2;
+	struct cvmx_pow_wq_int_cntx_cn31xx cn50xx;
+	struct cvmx_pow_wq_int_cntx_cn52xx {
+		uint64_t reserved_28_63:36;
+		uint64_t tc_cnt:4;
+		uint64_t reserved_22_23:2;
+		uint64_t ds_cnt:10;
+		uint64_t reserved_10_11:2;
+		uint64_t iq_cnt:10;
+	} cn52xx;
+	struct cvmx_pow_wq_int_cntx_cn52xx cn52xxp1;
+	struct cvmx_pow_wq_int_cntx_s cn56xx;
+	struct cvmx_pow_wq_int_cntx_s cn56xxp1;
+	struct cvmx_pow_wq_int_cntx_s cn58xx;
+	struct cvmx_pow_wq_int_cntx_s cn58xxp1;
+};
+
+union cvmx_pow_wq_int_pc {
+	uint64_t u64;
+	struct cvmx_pow_wq_int_pc_s {
+		uint64_t reserved_60_63:4;
+		uint64_t pc:28;
+		uint64_t reserved_28_31:4;
+		uint64_t pc_thr:20;
+		uint64_t reserved_0_7:8;
+	} s;
+	struct cvmx_pow_wq_int_pc_s cn30xx;
+	struct cvmx_pow_wq_int_pc_s cn31xx;
+	struct cvmx_pow_wq_int_pc_s cn38xx;
+	struct cvmx_pow_wq_int_pc_s cn38xxp2;
+	struct cvmx_pow_wq_int_pc_s cn50xx;
+	struct cvmx_pow_wq_int_pc_s cn52xx;
+	struct cvmx_pow_wq_int_pc_s cn52xxp1;
+	struct cvmx_pow_wq_int_pc_s cn56xx;
+	struct cvmx_pow_wq_int_pc_s cn56xxp1;
+	struct cvmx_pow_wq_int_pc_s cn58xx;
+	struct cvmx_pow_wq_int_pc_s cn58xxp1;
+};
+
+union cvmx_pow_wq_int_thrx {
+	uint64_t u64;
+	struct cvmx_pow_wq_int_thrx_s {
+		uint64_t reserved_29_63:35;
+		uint64_t tc_en:1;
+		uint64_t tc_thr:4;
+		uint64_t reserved_23_23:1;
+		uint64_t ds_thr:11;
+		uint64_t reserved_11_11:1;
+		uint64_t iq_thr:11;
+	} s;
+	struct cvmx_pow_wq_int_thrx_cn30xx {
+		uint64_t reserved_29_63:35;
+		uint64_t tc_en:1;
+		uint64_t tc_thr:4;
+		uint64_t reserved_18_23:6;
+		uint64_t ds_thr:6;
+		uint64_t reserved_6_11:6;
+		uint64_t iq_thr:6;
+	} cn30xx;
+	struct cvmx_pow_wq_int_thrx_cn31xx {
+		uint64_t reserved_29_63:35;
+		uint64_t tc_en:1;
+		uint64_t tc_thr:4;
+		uint64_t reserved_20_23:4;
+		uint64_t ds_thr:8;
+		uint64_t reserved_8_11:4;
+		uint64_t iq_thr:8;
+	} cn31xx;
+	struct cvmx_pow_wq_int_thrx_s cn38xx;
+	struct cvmx_pow_wq_int_thrx_s cn38xxp2;
+	struct cvmx_pow_wq_int_thrx_cn31xx cn50xx;
+	struct cvmx_pow_wq_int_thrx_cn52xx {
+		uint64_t reserved_29_63:35;
+		uint64_t tc_en:1;
+		uint64_t tc_thr:4;
+		uint64_t reserved_21_23:3;
+		uint64_t ds_thr:9;
+		uint64_t reserved_9_11:3;
+		uint64_t iq_thr:9;
+	} cn52xx;
+	struct cvmx_pow_wq_int_thrx_cn52xx cn52xxp1;
+	struct cvmx_pow_wq_int_thrx_s cn56xx;
+	struct cvmx_pow_wq_int_thrx_s cn56xxp1;
+	struct cvmx_pow_wq_int_thrx_s cn58xx;
+	struct cvmx_pow_wq_int_thrx_s cn58xxp1;
+};
+
+union cvmx_pow_ws_pcx {
+	uint64_t u64;
+	struct cvmx_pow_ws_pcx_s {
+		uint64_t reserved_32_63:32;
+		uint64_t ws_pc:32;
+	} s;
+	struct cvmx_pow_ws_pcx_s cn30xx;
+	struct cvmx_pow_ws_pcx_s cn31xx;
+	struct cvmx_pow_ws_pcx_s cn38xx;
+	struct cvmx_pow_ws_pcx_s cn38xxp2;
+	struct cvmx_pow_ws_pcx_s cn50xx;
+	struct cvmx_pow_ws_pcx_s cn52xx;
+	struct cvmx_pow_ws_pcx_s cn52xxp1;
+	struct cvmx_pow_ws_pcx_s cn56xx;
+	struct cvmx_pow_ws_pcx_s cn56xxp1;
+	struct cvmx_pow_ws_pcx_s cn58xx;
+	struct cvmx_pow_ws_pcx_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-spinlock.h b/arch/mips/include/asm/octeon/cvmx-spinlock.h
new file mode 100644
index 0000000..2fbf087
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-spinlock.h
@@ -0,0 +1,232 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * Implementation of spinlocks for Octeon CVMX.  Although similar in
+ * function to Linux kernel spinlocks, they are not compatible.
+ * Octeon CVMX spinlocks are only used to synchronize with the boot
+ * monitor and other non-Linux programs running in the system.
+ */
+
+#ifndef __CVMX_SPINLOCK_H__
+#define __CVMX_SPINLOCK_H__
+
+#include "cvmx-asm.h"
+
+/* Spinlocks for Octeon */
+
+/* define these to enable recursive spinlock debugging */
+/*#define CVMX_SPINLOCK_DEBUG */
+
+/**
+ * Spinlocks for Octeon CVMX
+ */
+typedef struct {
+	volatile uint32_t value;
+} cvmx_spinlock_t;
+
+/* note - macros not expanded in inline ASM, so values hardcoded */
+#define  CVMX_SPINLOCK_UNLOCKED_VAL  0
+#define  CVMX_SPINLOCK_LOCKED_VAL    1
+
+#define CVMX_SPINLOCK_UNLOCKED_INITIALIZER  {CVMX_SPINLOCK_UNLOCKED_VAL}
+
+/**
+ * Initialize a spinlock
+ *
+ * @lock:   Lock to initialize
+ */
+static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock)
+{
+	lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
+}
+
+/**
+ * Return non-zero if the spinlock is currently locked
+ *
+ * @lock:   Lock to check
+ * Returns Non-zero if locked
+ */
+static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock)
+{
+	return lock->value != CVMX_SPINLOCK_UNLOCKED_VAL;
+}
+
+/**
+ * Releases lock
+ *
+ * @lock:   pointer to lock structure
+ */
+static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
+{
+	CVMX_SYNCWS;
+	lock->value = 0;
+	CVMX_SYNCWS;
+}
+
+/**
+ * Attempts to take the lock, but does not spin if lock is not available.
+ * May take some time to acquire the lock even if it is available
+ * due to the ll/sc not succeeding.
+ *
+ * @lock:   pointer to lock structure
+ *
+ * Returns 0: lock successfully taken
+ *         1: lock not taken, held by someone else
+ * These return values match the Linux semantics.
+ */
+
+static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
+{
+	unsigned int tmp;
+
+	__asm__ __volatile__(".set noreorder         \n"
+			     "1: ll   %[tmp], %[val] \n"
+			/* if lock held, fail immediately */
+			     "   bnez %[tmp], 2f     \n"
+			     "   li   %[tmp], 1      \n"
+			     "   sc   %[tmp], %[val] \n"
+			     "   beqz %[tmp], 1b     \n"
+			     "   li   %[tmp], 0      \n"
+			     "2:                     \n"
+			     ".set reorder           \n" :
+			[val] "+m"(lock->value), [tmp] "=&r"(tmp)
+			     : : "memory");
+
+	return tmp != 0;		/* normalize to 0 or 1 */
+}
+
+/**
+ * Gets lock, spins until lock is taken
+ *
+ * @lock:   pointer to lock structure
+ */
+static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
+{
+	unsigned int tmp;
+
+	__asm__ __volatile__(".set noreorder         \n"
+			     "1: ll   %[tmp], %[val]  \n"
+			     "   bnez %[tmp], 1b     \n"
+			     "   li   %[tmp], 1      \n"
+			     "   sc   %[tmp], %[val] \n"
+			     "   beqz %[tmp], 1b     \n"
+			     "   nop                \n"
+			     ".set reorder           \n" :
+			[val] "+m"(lock->value), [tmp] "=&r"(tmp)
+			: : "memory");
+
+}
+
+/** ********************************************************************
+ * Bit spinlocks
+ * These spinlocks use a single bit (bit 31) of a 32 bit word for locking.
+ * The rest of the bits in the word are left undisturbed.  This enables more
+ * compact data structures as only 1 bit is consumed for the lock.
+ *
+ */
+
+/**
+ * Gets lock, spins until lock is taken
+ * Preserves the low 31 bits of the 32 bit
+ * word used for the lock.
+ *
+ *
+ * @word:  word to lock bit 31 of
+ */
+static inline void cvmx_spinlock_bit_lock(uint32_t *word)
+{
+	unsigned int tmp;
+	unsigned int sav;
+
+	__asm__ __volatile__(".set noreorder         \n"
+			     ".set noat              \n"
+			     "1: ll    %[tmp], %[val]  \n"
+			     "   bbit1 %[tmp], 31, 1b    \n"
+			     "   li    $at, 1      \n"
+			     "   ins   %[tmp], $at, 31, 1  \n"
+			     "   sc    %[tmp], %[val] \n"
+			     "   beqz  %[tmp], 1b     \n"
+			     "   nop                \n"
+			     ".set at              \n"
+			     ".set reorder           \n" :
+			[val] "+m"(*word), [tmp] "=&r"(tmp), [sav] "=&r"(sav)
+			     : : "memory");
+
+}
+
+/**
+ * Attempts to get lock, returns immediately with success/failure
+ * Preserves the low 31 bits of the 32 bit
+ * word used for the lock.
+ *
+ *
+ * @word:  word to lock bit 31 of
+ * Returns 0: lock successfully taken
+ *         1: lock not taken, held by someone else
+ * These return values match the Linux semantics.
+ */
+static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
+{
+	unsigned int tmp;
+
+	__asm__ __volatile__(".set noreorder\n\t"
+			     ".set noat\n"
+			     "1: ll    %[tmp], %[val] \n"
+			/* if lock held, fail immediately */
+			     "   bbit1 %[tmp], 31, 2f     \n"
+			     "   li    $at, 1      \n"
+			     "   ins   %[tmp], $at, 31, 1  \n"
+			     "   sc    %[tmp], %[val] \n"
+			     "   beqz  %[tmp], 1b     \n"
+			     "   li    %[tmp], 0      \n"
+			     "2:                     \n"
+			     ".set at              \n"
+			     ".set reorder           \n" :
+			[val] "+m"(*word), [tmp] "=&r"(tmp)
+			: : "memory");
+
+	return tmp != 0;		/* normalize to 0 or 1 */
+}
+
+/**
+ * Releases bit lock
+ *
+ * Unconditionally clears bit 31 of the lock word.  Note that this is
+ * done non-atomically, as this implementation assumes that the rest
+ * of the bits in the word are protected by the lock.
+ *
+ * @word:  word to unlock bit 31 in
+ */
+static inline void cvmx_spinlock_bit_unlock(uint32_t *word)
+{
+	CVMX_SYNCWS;
+	*word &= ~(1UL << 31);
+	CVMX_SYNCWS;
+}
+
+#endif /* __CVMX_SPINLOCK_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-sysinfo.h b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
new file mode 100644
index 0000000..61dd574
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
@@ -0,0 +1,152 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * This module provides system/board information obtained by the bootloader.
+ */
+
+#ifndef __CVMX_SYSINFO_H__
+#define __CVMX_SYSINFO_H__
+
+#define OCTEON_SERIAL_LEN 20
+/**
+ * Structure describing application specific information.
+ * __cvmx_app_init() populates this from the cvmx boot descriptor.
+ * This structure is private to simple executive applications, so
+ * no versioning is required.
+ *
+ * This structure must be provided with some fields set in order to
+ * use simple executive functions in other applications (Linux kernel,
+ * u-boot, etc.)  The cvmx_sysinfo_minimal_initialize() function is
+ * provided to set the required values in these cases.
+ */
+struct cvmx_sysinfo {
+	/* System wide variables */
+	/* installed DRAM in system, in bytes */
+	uint64_t system_dram_size;
+
+	/* ptr to memory descriptor block */
+	void *phy_mem_desc_ptr;
+
+
+	/* Application image specific variables */
+	/* stack top address (virtual) */
+	uint64_t stack_top;
+	/* heap base address (virtual) */
+	uint64_t heap_base;
+	/* stack size in bytes */
+	uint32_t stack_size;
+	/* heap size in bytes */
+	uint32_t heap_size;
+	/* coremask defining cores running application */
+	uint32_t core_mask;
+	/* Deprecated, use cvmx_coremask_first_core() to select init core */
+	uint32_t init_core;
+
+	/* exception base address, as set by bootloader */
+	uint64_t exception_base_addr;
+
+	/* cpu clock speed in hz */
+	uint32_t cpu_clock_hz;
+
+	/* dram data rate in hz (data rate = 2 * clock rate */
+	uint32_t dram_data_rate_hz;
+
+
+	uint16_t board_type;
+	uint8_t board_rev_major;
+	uint8_t board_rev_minor;
+	uint8_t mac_addr_base[6];
+	uint8_t mac_addr_count;
+	char board_serial_number[OCTEON_SERIAL_LEN];
+	/*
+	 * Several boards support compact flash on the Octeon boot
+	 * bus.  The CF memory spaces may be mapped to different
+	 * addresses on different boards.  These values will be 0 if
+	 * CF is not present.  Note that these addresses are physical
+	 * addresses, and it is up to the application to use the
+	 * proper addressing mode (XKPHYS, KSEG0, etc.)
+	 */
+	uint64_t compact_flash_common_base_addr;
+	uint64_t compact_flash_attribute_base_addr;
+	/*
+	 * Base address of the LED display (as on EBT3000 board) This
+	 * will be 0 if LED display not present.  Note that this
+	 * address is a physical address, and it is up to the
+	 * application to use the proper addressing mode (XKPHYS,
+	 * KSEG0, etc.)
+	 */
+	uint64_t led_display_base_addr;
+	/* DFA reference clock in hz (if applicable)*/
+	uint32_t dfa_ref_clock_hz;
+	/* configuration flags from bootloader */
+	uint32_t bootloader_config_flags;
+
+	/* Uart number used for console */
+	uint8_t console_uart_num;
+};
+
+/**
+ * This function returns the system/board information as obtained
+ * by the bootloader.
+ *
+ *
+ * Returns  Pointer to the boot information structure
+ *
+ */
+
+extern struct cvmx_sysinfo *cvmx_sysinfo_get(void);
+
+/**
+ * This function is used in non-simple executive environments (such as
+ * Linux kernel, u-boot, etc.)  to configure the minimal fields that
+ * are required to use simple executive files directly.
+ *
+ * Locking (if required) must be handled outside of this
+ * function
+ *
+ * @phy_mem_desc_ptr: Pointer to global physical memory descriptor
+ *                   (bootmem descriptor) @board_type: Octeon board
+ *                   type enumeration
+ *
+ * @board_rev_major:
+ *                   Board major revision
+ * @board_rev_minor:
+ *                   Board minor revision
+ * @cpu_clock_hz:
+ *                   CPU clock freqency in hertz
+ *
+ * Returns 0: Failure
+ *         1: success
+ */
+extern int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
+					   uint16_t board_type,
+					   uint8_t board_rev_major,
+					   uint8_t board_rev_minor,
+					   uint32_t cpu_clock_hz);
+
+#endif /* __CVMX_SYSINFO_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
new file mode 100644
index 0000000..03fddfa
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -0,0 +1,505 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_H__
+#define __CVMX_H__
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include "cvmx-asm.h"
+#include "cvmx-packet.h"
+#include "cvmx-sysinfo.h"
+
+#include "cvmx-ciu-defs.h"
+#include "cvmx-gpio-defs.h"
+#include "cvmx-iob-defs.h"
+#include "cvmx-ipd-defs.h"
+#include "cvmx-l2c-defs.h"
+#include "cvmx-l2d-defs.h"
+#include "cvmx-l2t-defs.h"
+#include "cvmx-led-defs.h"
+#include "cvmx-mio-defs.h"
+#include "cvmx-pow-defs.h"
+
+#include "cvmx-bootinfo.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-l2c.h"
+
+#ifndef CVMX_ENABLE_DEBUG_PRINTS
+#define CVMX_ENABLE_DEBUG_PRINTS 1
+#endif
+
+#if CVMX_ENABLE_DEBUG_PRINTS
+#define cvmx_dprintf        printk
+#else
+#define cvmx_dprintf(...)   {}
+#endif
+
+#define CVMX_MAX_CORES          (16)
+#define CVMX_CACHE_LINE_SIZE    (128)	/* In bytes */
+#define CVMX_CACHE_LINE_MASK    (CVMX_CACHE_LINE_SIZE - 1)	/* In bytes */
+#define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned(CVMX_CACHE_LINE_SIZE)))
+#define CAST64(v) ((long long)(long)(v))
+#define CASTPTR(type, v) ((type *)(long)(v))
+
+/*
+ * Returns processor ID, different Linux and simple exec versions
+ * provided in the cvmx-app-init*.c files.
+ */
+static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
+static inline uint32_t cvmx_get_proc_id(void)
+{
+	uint32_t id;
+	asm("mfc0 %0, $15,0" : "=r"(id));
+	return id;
+}
+
+/* turn the variable name into a string */
+#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
+#define CVMX_TMP_STR2(x) #x
+
+/**
+ * Builds a bit mask given the required size in bits.
+ *
+ * @bits:   Number of bits in the mask
+ * Returns The mask
+ */ static inline uint64_t cvmx_build_mask(uint64_t bits)
+{
+	return ~((~0x0ull) << bits);
+}
+
+/**
+ * Builds a memory address for I/O based on the Major and Sub DID.
+ *
+ * @major_did: 5 bit major did
+ * @sub_did:   3 bit sub did
+ * Returns I/O base address
+ */
+static inline uint64_t cvmx_build_io_address(uint64_t major_did,
+					     uint64_t sub_did)
+{
+	return (0x1ull << 48) | (major_did << 43) | (sub_did << 40);
+}
+
+/**
+ * Perform mask and shift to place the supplied value into
+ * the supplied bit rage.
+ *
+ * Example: cvmx_build_bits(39,24,value)
+ * <pre>
+ * 6       5       4       3       3       2       1
+ * 3       5       7       9       1       3       5       7      0
+ * +-------+-------+-------+-------+-------+-------+-------+------+
+ * 000000000000000000000000___________value000000000000000000000000
+ * </pre>
+ *
+ * @high_bit: Highest bit value can occupy (inclusive) 0-63
+ * @low_bit:  Lowest bit value can occupy inclusive 0-high_bit
+ * @value:    Value to use
+ * Returns Value masked and shifted
+ */
+static inline uint64_t cvmx_build_bits(uint64_t high_bit,
+				       uint64_t low_bit, uint64_t value)
+{
+	return (value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit;
+}
+
+enum cvmx_mips_space {
+	CVMX_MIPS_SPACE_XKSEG = 3LL,
+	CVMX_MIPS_SPACE_XKPHYS = 2LL,
+	CVMX_MIPS_SPACE_XSSEG = 1LL,
+	CVMX_MIPS_SPACE_XUSEG = 0LL
+};
+
+/* These macros for use when using 32 bit pointers. */
+#define CVMX_MIPS32_SPACE_KSEG0 1l
+#define CVMX_ADD_SEG32(segment, add) \
+	(((int32_t)segment << 31) | (int32_t)(add))
+
+#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
+
+/* These macros simplify the process of creating common IO addresses */
+#define CVMX_ADD_SEG(segment, add) \
+	((((uint64_t)segment) << 62) | (add))
+#ifndef CVMX_ADD_IO_SEG
+#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
+#endif
+
+/**
+ * Convert a memory pointer (void*) into a hardware compatable
+ * memory address (uint64_t). Octeon hardware widgets don't
+ * understand logical addresses.
+ *
+ * @ptr:    C style memory pointer
+ * Returns Hardware physical address
+ */
+static inline uint64_t cvmx_ptr_to_phys(void *ptr)
+{
+	if (sizeof(void *) == 8) {
+		/*
+		 * We're running in 64 bit mode. Normally this means
+		 * that we can use 40 bits of address space (the
+		 * hardware limit). Unfortunately there is one case
+		 * were we need to limit this to 30 bits, sign
+		 * extended 32 bit. Although these are 64 bits wide,
+		 * only 30 bits can be used.
+		 */
+		if ((CAST64(ptr) >> 62) == 3)
+			return CAST64(ptr) & cvmx_build_mask(30);
+		else
+			return CAST64(ptr) & cvmx_build_mask(40);
+	} else {
+		return (long)(ptr) & 0x1fffffff;
+	}
+}
+
+/**
+ * Convert a hardware physical address (uint64_t) into a
+ * memory pointer (void *).
+ *
+ * @physical_address:
+ *               Hardware physical address to memory
+ * Returns Pointer to memory
+ */
+static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
+{
+	if (sizeof(void *) == 8) {
+		/* Just set the top bit, avoiding any TLB uglyness */
+		return CASTPTR(void,
+			       CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+					    physical_address));
+	} else {
+		return CASTPTR(void,
+			       CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,
+					      physical_address));
+	}
+}
+
+/* The following #if controls the definition of the macro
+    CVMX_BUILD_WRITE64. This macro is used to build a store operation to
+    a full 64bit address. With a 64bit ABI, this can be done with a simple
+    pointer access. 32bit ABIs require more complicated assembly */
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+    a simple volatile pointer */
+#define CVMX_BUILD_WRITE64(TYPE, ST)                                    \
+static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val)     \
+{                                                                       \
+    *CASTPTR(volatile TYPE##_t, addr) = val;                            \
+}
+
+
+/* The following #if controls the definition of the macro
+    CVMX_BUILD_READ64. This macro is used to build a load operation from
+    a full 64bit address. With a 64bit ABI, this can be done with a simple
+    pointer access. 32bit ABIs require more complicated assembly */
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+    a simple volatile pointer */
+#define CVMX_BUILD_READ64(TYPE, LT)                                     \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr)                \
+{                                                                       \
+	return *CASTPTR(volatile TYPE##_t, addr);			\
+}
+
+
+/* The following defines 8 functions for writing to a 64bit address. Each
+    takes two arguments, the address and the value to write.
+    cvmx_write64_int64      cvmx_write64_uint64
+    cvmx_write64_int32      cvmx_write64_uint32
+    cvmx_write64_int16      cvmx_write64_uint16
+    cvmx_write64_int8       cvmx_write64_uint8 */
+CVMX_BUILD_WRITE64(int64, "sd");
+CVMX_BUILD_WRITE64(int32, "sw");
+CVMX_BUILD_WRITE64(int16, "sh");
+CVMX_BUILD_WRITE64(int8, "sb");
+CVMX_BUILD_WRITE64(uint64, "sd");
+CVMX_BUILD_WRITE64(uint32, "sw");
+CVMX_BUILD_WRITE64(uint16, "sh");
+CVMX_BUILD_WRITE64(uint8, "sb");
+#define cvmx_write64 cvmx_write64_uint64
+
+/* The following defines 8 functions for reading from a 64bit address. Each
+    takes the address as the only argument
+    cvmx_read64_int64       cvmx_read64_uint64
+    cvmx_read64_int32       cvmx_read64_uint32
+    cvmx_read64_int16       cvmx_read64_uint16
+    cvmx_read64_int8        cvmx_read64_uint8 */
+CVMX_BUILD_READ64(int64, "ld");
+CVMX_BUILD_READ64(int32, "lw");
+CVMX_BUILD_READ64(int16, "lh");
+CVMX_BUILD_READ64(int8, "lb");
+CVMX_BUILD_READ64(uint64, "ld");
+CVMX_BUILD_READ64(uint32, "lw");
+CVMX_BUILD_READ64(uint16, "lhu");
+CVMX_BUILD_READ64(uint8, "lbu");
+#define cvmx_read64 cvmx_read64_uint64
+
+
+static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
+{
+	cvmx_write64(csr_addr, val);
+
+	/*
+	 * Perform an immediate read after every write to an RSL
+	 * register to force the write to complete. It doesn't matter
+	 * what RSL read we do, so we choose CVMX_MIO_BOOT_BIST_STAT
+	 * because it is fast and harmless.
+	 */
+	if ((csr_addr >> 40) == (0x800118))
+		cvmx_read64(CVMX_MIO_BOOT_BIST_STAT);
+}
+
+static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
+{
+	cvmx_write64(io_addr, val);
+
+}
+
+static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
+{
+	uint64_t val = cvmx_read64(csr_addr);
+	return val;
+}
+
+
+static inline void cvmx_send_single(uint64_t data)
+{
+	const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
+	cvmx_write64(CVMX_IOBDMA_SENDSINGLE, data);
+}
+
+static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
+{
+	union {
+		uint64_t u64;
+		struct {
+			uint64_t scraddr:8;
+			uint64_t len:8;
+			uint64_t addr:48;
+		} s;
+	} addr;
+	addr.u64 = csr_addr;
+	addr.s.scraddr = scraddr >> 3;
+	addr.s.len = 1;
+	cvmx_send_single(addr.u64);
+}
+
+/* Return true if Octeon is CN38XX pass 1 */
+static inline int cvmx_octeon_is_pass1(void)
+{
+#if OCTEON_IS_COMMON_BINARY()
+	return 0;	/* Pass 1 isn't supported for common binaries */
+#else
+/* Now that we know we're built for a specific model, only check CN38XX */
+#if OCTEON_IS_MODEL(OCTEON_CN38XX)
+	return cvmx_get_proc_id() == OCTEON_CN38XX_PASS1;
+#else
+	return 0;	/* Built for non CN38XX chip, we're not CN38XX pass1 */
+#endif
+#endif
+}
+
+static inline unsigned int cvmx_get_core_num(void)
+{
+	unsigned int core_num;
+	CVMX_RDHWRNV(core_num, 0);
+	return core_num;
+}
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for POP instruction.
+ *
+ * @val:    32 bit value to count set bits in
+ *
+ * Returns Number of bits set
+ */
+static inline uint32_t cvmx_pop(uint32_t val)
+{
+	uint32_t pop;
+	CVMX_POP(pop, val);
+	return pop;
+}
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for DPOP instruction.
+ *
+ * @val:    64 bit value to count set bits in
+ *
+ * Returns Number of bits set
+ */
+static inline int cvmx_dpop(uint64_t val)
+{
+	int pop;
+	CVMX_DPOP(pop, val);
+	return pop;
+}
+
+/**
+ * Provide current cycle counter as a return value
+ *
+ * Returns current cycle counter
+ */
+
+static inline uint64_t cvmx_get_cycle(void)
+{
+	uint64_t cycle;
+	CVMX_RDHWR(cycle, 31);
+	return cycle;
+}
+
+/**
+ * Reads a chip global cycle counter.  This counts CPU cycles since
+ * chip reset.  The counter is 64 bit.
+ * This register does not exist on CN38XX pass 1 silicion
+ *
+ * Returns Global chip cycle count since chip reset.
+ */
+static inline uint64_t cvmx_get_cycle_global(void)
+{
+	if (cvmx_octeon_is_pass1())
+		return 0;
+	else
+		return cvmx_read64(CVMX_IPD_CLK_COUNT);
+}
+
+/**
+ * This macro spins on a field waiting for it to reach a value. It
+ * is common in code to need to wait for a specific field in a CSR
+ * to match a specific value. Conceptually this macro expands to:
+ *
+ * 1) read csr at "address" with a csr typedef of "type"
+ * 2) Check if ("type".s."field" "op" "value")
+ * 3) If #2 isn't true loop to #1 unless too much time has passed.
+ */
+#define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, timeout_usec)\
+    (									\
+{									\
+	int result;							\
+	do {								\
+		uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
+			cvmx_sysinfo_get()->cpu_clock_hz / 1000000;	\
+		type c;							\
+		while (1) {						\
+			c.u64 = cvmx_read_csr(address);			\
+			if ((c.s.field) op(value)) {			\
+				result = 0;				\
+				break;					\
+			} else if (cvmx_get_cycle() > done) {		\
+				result = -1;				\
+				break;					\
+			} else						\
+				cvmx_wait(100);				\
+		}							\
+	} while (0);							\
+	result;								\
+})
+
+/***************************************************************************/
+
+static inline void cvmx_reset_octeon(void)
+{
+	union cvmx_ciu_soft_rst ciu_soft_rst;
+	ciu_soft_rst.u64 = 0;
+	ciu_soft_rst.s.soft_rst = 1;
+	cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
+}
+
+/* Return the number of cores available in the chip */
+static inline uint32_t cvmx_octeon_num_cores(void)
+{
+	uint32_t ciu_fuse = (uint32_t) cvmx_read_csr(CVMX_CIU_FUSE) & 0xffff;
+	return cvmx_pop(ciu_fuse);
+}
+
+/**
+ * Read a byte of fuse data
+ * @byte_addr:   address to read
+ *
+ * Returns fuse value: 0 or 1
+ */
+static uint8_t cvmx_fuse_read_byte(int byte_addr)
+{
+	union cvmx_mio_fus_rcmd read_cmd;
+
+	read_cmd.u64 = 0;
+	read_cmd.s.addr = byte_addr;
+	read_cmd.s.pend = 1;
+	cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
+	while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD))
+	       && read_cmd.s.pend)
+		;
+	return read_cmd.s.dat;
+}
+
+/**
+ * Read a single fuse bit
+ *
+ * @fuse:   Fuse number (0-1024)
+ *
+ * Returns fuse value: 0 or 1
+ */
+static inline int cvmx_fuse_read(int fuse)
+{
+	return (cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1;
+}
+
+static inline int cvmx_octeon_model_CN36XX(void)
+{
+	return OCTEON_IS_MODEL(OCTEON_CN38XX)
+		&& !cvmx_octeon_is_pass1()
+		&& cvmx_fuse_read(264);
+}
+
+static inline int cvmx_octeon_zip_present(void)
+{
+	return octeon_has_feature(OCTEON_FEATURE_ZIP);
+}
+
+static inline int cvmx_octeon_dfa_present(void)
+{
+	if (!OCTEON_IS_MODEL(OCTEON_CN38XX)
+	    && !OCTEON_IS_MODEL(OCTEON_CN31XX)
+	    && !OCTEON_IS_MODEL(OCTEON_CN58XX))
+		return 0;
+	else if (OCTEON_IS_MODEL(OCTEON_CN3020))
+		return 0;
+	else if (cvmx_octeon_is_pass1())
+		return 1;
+	else
+		return !cvmx_fuse_read(120);
+}
+
+static inline int cvmx_octeon_crypto_present(void)
+{
+	return octeon_has_feature(OCTEON_FEATURE_CRYPTO);
+}
+
+#endif /*  __CVMX_H__  */
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
new file mode 100644
index 0000000..04fac68
--- /dev/null
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -0,0 +1,119 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * File defining checks for different Octeon features.
+ */
+
+#ifndef __OCTEON_FEATURE_H__
+#define __OCTEON_FEATURE_H__
+
+enum octeon_feature {
+	/*
+	 * Octeon models in the CN5XXX family and higher support
+	 * atomic add instructions to memory (saa/saad).
+	 */
+	OCTEON_FEATURE_SAAD,
+	/* Does this Octeon support the ZIP offload engine? */
+	OCTEON_FEATURE_ZIP,
+	/* Does this Octeon support crypto acceleration using COP2? */
+	OCTEON_FEATURE_CRYPTO,
+	/* Does this Octeon support PCI express? */
+	OCTEON_FEATURE_PCIE,
+	/* Some Octeon models support internal memory for storing
+	 * cryptographic keys */
+	OCTEON_FEATURE_KEY_MEMORY,
+	/* Octeon has a LED controller for banks of external LEDs */
+	OCTEON_FEATURE_LED_CONTROLLER,
+	/* Octeon has a trace buffer */
+	OCTEON_FEATURE_TRA,
+	/* Octeon has a management port */
+	OCTEON_FEATURE_MGMT_PORT,
+	/* Octeon has a raid unit */
+	OCTEON_FEATURE_RAID,
+	/* Octeon has a builtin USB */
+	OCTEON_FEATURE_USB,
+};
+
+static inline int cvmx_fuse_read(int fuse);
+
+/**
+ * Determine if the current Octeon supports a specific feature. These
+ * checks have been optimized to be fairly quick, but they should still
+ * be kept out of fast path code.
+ *
+ * @feature: Feature to check for. This should always be a constant so the
+ *                compiler can remove the switch statement through optimization.
+ *
+ * Returns Non zero if the feature exists. Zero if the feature does not
+ *         exist.
+ */
+static inline int octeon_has_feature(enum octeon_feature feature)
+{
+	switch (feature) {
+	case OCTEON_FEATURE_SAAD:
+		return !OCTEON_IS_MODEL(OCTEON_CN3XXX);
+
+	case OCTEON_FEATURE_ZIP:
+		if (OCTEON_IS_MODEL(OCTEON_CN30XX)
+		    || OCTEON_IS_MODEL(OCTEON_CN50XX)
+		    || OCTEON_IS_MODEL(OCTEON_CN52XX))
+			return 0;
+		else if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
+			return 1;
+		else
+			return !cvmx_fuse_read(121);
+
+	case OCTEON_FEATURE_CRYPTO:
+		return !cvmx_fuse_read(90);
+
+	case OCTEON_FEATURE_PCIE:
+		return OCTEON_IS_MODEL(OCTEON_CN56XX)
+			|| OCTEON_IS_MODEL(OCTEON_CN52XX);
+
+	case OCTEON_FEATURE_KEY_MEMORY:
+	case OCTEON_FEATURE_LED_CONTROLLER:
+		return OCTEON_IS_MODEL(OCTEON_CN38XX)
+			|| OCTEON_IS_MODEL(OCTEON_CN58XX)
+			|| OCTEON_IS_MODEL(OCTEON_CN56XX);
+	case OCTEON_FEATURE_TRA:
+		return !(OCTEON_IS_MODEL(OCTEON_CN30XX)
+			 || OCTEON_IS_MODEL(OCTEON_CN50XX));
+	case OCTEON_FEATURE_MGMT_PORT:
+		return OCTEON_IS_MODEL(OCTEON_CN56XX)
+			|| OCTEON_IS_MODEL(OCTEON_CN52XX);
+	case OCTEON_FEATURE_RAID:
+		return OCTEON_IS_MODEL(OCTEON_CN56XX)
+			|| OCTEON_IS_MODEL(OCTEON_CN52XX);
+	case OCTEON_FEATURE_USB:
+		return !(OCTEON_IS_MODEL(OCTEON_CN38XX)
+			 || OCTEON_IS_MODEL(OCTEON_CN58XX));
+	}
+	return 0;
+}
+
+#endif /* __OCTEON_FEATURE_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
new file mode 100644
index 0000000..cf50336
--- /dev/null
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -0,0 +1,321 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * File defining different Octeon model IDs and macros to
+ * compare them.
+ *
+ */
+
+#ifndef __OCTEON_MODEL_H__
+#define __OCTEON_MODEL_H__
+
+/* NOTE: These must match what is checked in common-config.mk */
+/* Defines to represent the different versions of Octeon.  */
+
+/*
+ * IMPORTANT: When the default pass is updated for an Octeon Model,
+ * the corresponding change must also be made in the oct-sim script.
+ */
+
+/*
+ * The defines below should be used with the OCTEON_IS_MODEL() macro
+ * to determine what model of chip the software is running on.  Models
+ * ending in 'XX' match multiple models (families), while specific
+ * models match only that model.  If a pass (revision) is specified,
+ * then only that revision will be matched.  Care should be taken when
+ * checking for both specific models and families that the specific
+ * models are checked for first.  While these defines are similar to
+ * the processor ID, they are not intended to be used by anything
+ * other that the OCTEON_IS_MODEL framework, and the values are
+ * subject to change at anytime without notice.
+ *
+ * NOTE: only the OCTEON_IS_MODEL() macro/function and the OCTEON_CN*
+ * macros should be used outside of this file.  All other macros are
+ * for internal use only, and may change without notice.
+ */
+
+/* Flag bits in top byte */
+/* Ignores revision in model checks */
+#define OM_IGNORE_REVISION        0x01000000
+/* Check submodels */
+#define OM_CHECK_SUBMODEL         0x02000000
+/* Match all models previous than the one specified */
+#define OM_MATCH_PREVIOUS_MODELS  0x04000000
+/* Ignores the minor revison on newer parts */
+#define OM_IGNORE_MINOR_REVISION  0x08000000
+#define OM_FLAG_MASK              0xff000000
+
+/*
+ * CN5XXX models with new revision encoding
+ */
+#define OCTEON_CN58XX_PASS1_0   0x000d0300
+#define OCTEON_CN58XX_PASS1_1   0x000d0301
+#define OCTEON_CN58XX_PASS1_2   0x000d0303
+#define OCTEON_CN58XX_PASS2_0   0x000d0308
+#define OCTEON_CN58XX_PASS2_1   0x000d0309
+#define OCTEON_CN58XX_PASS2_2   0x000d030a
+#define OCTEON_CN58XX_PASS2_3   0x000d030b
+
+#define OCTEON_CN58XX           (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX_PASS1_X   (OCTEON_CN58XX_PASS1_0 \
+				 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS2_X   (OCTEON_CN58XX_PASS2_0 \
+				 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS1     OCTEON_CN58XX_PASS1_X
+#define OCTEON_CN58XX_PASS2     OCTEON_CN58XX_PASS2_X
+
+#define OCTEON_CN56XX_PASS1_0   0x000d0400
+#define OCTEON_CN56XX_PASS1_1   0x000d0401
+#define OCTEON_CN56XX_PASS2_0   0x000d0408
+#define OCTEON_CN56XX_PASS2_1   0x000d0409
+
+#define OCTEON_CN56XX           (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN56XX_PASS1_X   (OCTEON_CN56XX_PASS1_0 \
+				 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS2_X   (OCTEON_CN56XX_PASS2_0 \
+				 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS1     OCTEON_CN56XX_PASS1_X
+#define OCTEON_CN56XX_PASS2     OCTEON_CN56XX_PASS2_X
+
+#define OCTEON_CN57XX           OCTEON_CN56XX
+#define OCTEON_CN57XX_PASS1     OCTEON_CN56XX_PASS1
+#define OCTEON_CN57XX_PASS2     OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN55XX           OCTEON_CN56XX
+#define OCTEON_CN55XX_PASS1     OCTEON_CN56XX_PASS1
+#define OCTEON_CN55XX_PASS2     OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN54XX           OCTEON_CN56XX
+#define OCTEON_CN54XX_PASS1     OCTEON_CN56XX_PASS1
+#define OCTEON_CN54XX_PASS2     OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN50XX_PASS1_0   0x000d0600
+
+#define OCTEON_CN50XX           (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN50XX_PASS1_X   (OCTEON_CN50XX_PASS1_0 \
+				 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN50XX_PASS1     OCTEON_CN50XX_PASS1_X
+
+/*
+ * NOTE: Octeon CN5000F model is not identifiable using the
+ * OCTEON_IS_MODEL() functions, but are treated as CN50XX.
+ */
+
+#define OCTEON_CN52XX_PASS1_0   0x000d0700
+#define OCTEON_CN52XX_PASS2_0   0x000d0708
+
+#define OCTEON_CN52XX           (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN52XX_PASS1_X   (OCTEON_CN52XX_PASS1_0 \
+				 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS2_X   (OCTEON_CN52XX_PASS2_0 \
+				 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS1     OCTEON_CN52XX_PASS1_X
+#define OCTEON_CN52XX_PASS2     OCTEON_CN52XX_PASS2_X
+
+/*
+ * CN3XXX models with old revision enconding
+ */
+#define OCTEON_CN38XX_PASS1     0x000d0000
+#define OCTEON_CN38XX_PASS2     0x000d0001
+#define OCTEON_CN38XX_PASS3     0x000d0003
+#define OCTEON_CN38XX           (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
+
+#define OCTEON_CN36XX           OCTEON_CN38XX
+#define OCTEON_CN36XX_PASS2     OCTEON_CN38XX_PASS2
+#define OCTEON_CN36XX_PASS3     OCTEON_CN38XX_PASS3
+
+/* The OCTEON_CN31XX matches CN31XX models and the CN3020 */
+#define OCTEON_CN31XX_PASS1     0x000d0100
+#define OCTEON_CN31XX_PASS1_1   0x000d0102
+#define OCTEON_CN31XX           (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
+
+/*
+ * This model is only used for internal checks, it is not a valid
+ * model for the OCTEON_MODEL environment variable.  This matches the
+ * CN3010 and CN3005 but NOT the CN3020.
+ */
+#define OCTEON_CN30XX_PASS1     0x000d0200
+#define OCTEON_CN30XX_PASS1_1   0x000d0202
+#define OCTEON_CN30XX           (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
+
+#define OCTEON_CN3005_PASS1     (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_0   (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_1   (0x000d0212 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005           (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION \
+				 | OM_CHECK_SUBMODEL)
+
+#define OCTEON_CN3010_PASS1     (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_0   (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_1   (0x000d0202 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010           (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION \
+				 | OM_CHECK_SUBMODEL)
+
+#define OCTEON_CN3020_PASS1     (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_0   (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_1   (0x000d0112 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020           (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION \
+				 | OM_CHECK_SUBMODEL)
+
+
+
+/* This matches the complete family of CN3xxx CPUs, and not subsequent models */
+#define OCTEON_CN3XXX           (OCTEON_CN58XX_PASS1_0 \
+				 | OM_MATCH_PREVIOUS_MODELS \
+				 | OM_IGNORE_REVISION)
+
+/* The revision byte (low byte) has two different encodings.
+ * CN3XXX:
+ *
+ *     bits
+ *     <7:5>: reserved (0)
+ *     <4>:   alternate package
+ *     <3:0>: revision
+ *
+ * CN5XXX:
+ *
+ *     bits
+ *     <7>:   reserved (0)
+ *     <6>:   alternate package
+ *     <5:3>: major revision
+ *     <2:0>: minor revision
+ *
+ */
+
+/* Masks used for the various types of model/family/revision matching */
+#define OCTEON_38XX_FAMILY_MASK      0x00ffff00
+#define OCTEON_38XX_FAMILY_REV_MASK  0x00ffff0f
+#define OCTEON_38XX_MODEL_MASK       0x00ffff10
+#define OCTEON_38XX_MODEL_REV_MASK   (OCTEON_38XX_FAMILY_REV_MASK \
+				      | OCTEON_38XX_MODEL_MASK)
+
+/* CN5XXX and later use different layout of bits in the revision ID field */
+#define OCTEON_58XX_FAMILY_MASK      OCTEON_38XX_FAMILY_MASK
+#define OCTEON_58XX_FAMILY_REV_MASK  0x00ffff3f
+#define OCTEON_58XX_MODEL_MASK       0x00ffffc0
+#define OCTEON_58XX_MODEL_REV_MASK   (OCTEON_58XX_FAMILY_REV_MASK \
+				      | OCTEON_58XX_MODEL_MASK)
+#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK \
+					  & 0x00fffff8)
+
+#define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
+
+/* NOTE: This is for internal (to this file) use only. */
+static inline int __OCTEON_IS_MODEL_COMPILE__(uint32_t arg_model,
+					      uint32_t chip_model)
+{
+	uint32_t rev_and_sub = OM_IGNORE_REVISION | OM_CHECK_SUBMODEL;
+
+	if ((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) {
+		if (((arg_model & OM_FLAG_MASK) == rev_and_sub) &&
+		    __OCTEON_MATCH_MASK__(chip_model, arg_model,
+					  OCTEON_38XX_MODEL_MASK))
+			return 1;
+		if (((arg_model & OM_FLAG_MASK) == 0) &&
+		    __OCTEON_MATCH_MASK__(chip_model, arg_model,
+					  OCTEON_38XX_FAMILY_REV_MASK))
+			return 1;
+		if (((arg_model & OM_FLAG_MASK) == OM_IGNORE_REVISION) &&
+		    __OCTEON_MATCH_MASK__(chip_model, arg_model,
+					  OCTEON_38XX_FAMILY_MASK))
+			return 1;
+		if (((arg_model & OM_FLAG_MASK) == OM_CHECK_SUBMODEL) &&
+		    __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+					  OCTEON_38XX_MODEL_REV_MASK))
+			return 1;
+		if ((arg_model & OM_MATCH_PREVIOUS_MODELS) &&
+		    ((chip_model & OCTEON_38XX_MODEL_MASK) <
+			    (arg_model & OCTEON_38XX_MODEL_MASK)))
+			return 1;
+	} else {
+		if (((arg_model & OM_FLAG_MASK) == rev_and_sub) &&
+		    __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+					  OCTEON_58XX_MODEL_MASK))
+			return 1;
+		if (((arg_model & OM_FLAG_MASK) == 0) &&
+		    __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+					  OCTEON_58XX_FAMILY_REV_MASK))
+			return 1;
+		if (((arg_model & OM_FLAG_MASK) == OM_IGNORE_MINOR_REVISION) &&
+		    __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+					  OCTEON_58XX_MODEL_MINOR_REV_MASK))
+			return 1;
+		if (((arg_model & OM_FLAG_MASK) == OM_IGNORE_REVISION) &&
+		    __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+					  OCTEON_58XX_FAMILY_MASK))
+			return 1;
+		if (((arg_model & OM_FLAG_MASK) == OM_CHECK_SUBMODEL) &&
+		    __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+					  OCTEON_58XX_MODEL_REV_MASK))
+			return 1;
+		if ((arg_model & OM_MATCH_PREVIOUS_MODELS) &&
+		    ((chip_model & OCTEON_58XX_MODEL_MASK) <
+			    (arg_model & OCTEON_58XX_MODEL_MASK)))
+			return 1;
+	}
+	return 0;
+}
+
+/* forward declarations */
+static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
+static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
+
+/* NOTE: This for internal use only!!!!! */
+static inline int __octeon_is_model_runtime__(uint32_t model)
+{
+	uint32_t cpuid = cvmx_get_proc_id();
+
+	/*
+	 * Check for special case of mismarked 3005 samples. We only
+	 * need to check if the sub model isn't being ignored.
+	 */
+	if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) {
+		if (cpuid == OCTEON_CN3010_PASS1 \
+		    && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34)))
+			cpuid |= 0x10;
+	}
+	return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
+}
+
+/*
+ * The OCTEON_IS_MODEL macro should be used for all Octeon model
+ * checking done in a program.  This should be kept runtime if at all
+ * possible.  Any compile time (#if OCTEON_IS_MODEL) usage must be
+ * condtionalized with OCTEON_IS_COMMON_BINARY() if runtime checking
+ * support is required.
+ */
+#define OCTEON_IS_MODEL(x) __octeon_is_model_runtime__(x)
+#define OCTEON_IS_COMMON_BINARY() 1
+#undef OCTEON_MODEL
+
+const char *octeon_model_get_string(uint32_t chip_id);
+const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer);
+
+#include "octeon-feature.h"
+
+#endif /* __OCTEON_MODEL_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
new file mode 100644
index 0000000..edc6760
--- /dev/null
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -0,0 +1,248 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#ifndef __ASM_OCTEON_OCTEON_H
+#define __ASM_OCTEON_OCTEON_H
+
+#include "cvmx.h"
+
+extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size,
+						uint64_t alignment,
+						uint64_t min_addr,
+						uint64_t max_addr,
+						int do_locking);
+extern void *octeon_bootmem_alloc(uint64_t size, uint64_t alignment,
+				  int do_locking);
+extern void *octeon_bootmem_alloc_range(uint64_t size, uint64_t alignment,
+					uint64_t min_addr, uint64_t max_addr,
+					int do_locking);
+extern void *octeon_bootmem_alloc_named(uint64_t size, uint64_t alignment,
+					char *name);
+extern void *octeon_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
+					      uint64_t max_addr, uint64_t align,
+					      char *name);
+extern void *octeon_bootmem_alloc_named_address(uint64_t size, uint64_t address,
+						char *name);
+extern int octeon_bootmem_free_named(char *name);
+extern void octeon_bootmem_lock(void);
+extern void octeon_bootmem_unlock(void);
+
+extern int octeon_is_simulation(void);
+extern int octeon_is_pci_host(void);
+extern int octeon_usb_is_ref_clk(void);
+extern uint64_t octeon_get_clock_rate(void);
+extern const char *octeon_board_type_string(void);
+extern const char *octeon_get_pci_interrupts(void);
+extern int octeon_get_southbridge_interrupt(void);
+extern int octeon_get_boot_coremask(void);
+extern int octeon_get_boot_num_arguments(void);
+extern const char *octeon_get_boot_argument(int arg);
+extern void octeon_hal_setup_reserved32(void);
+extern void octeon_user_io_init(void);
+struct octeon_cop2_state;
+extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state);
+extern void octeon_crypto_disable(struct octeon_cop2_state *state,
+				  unsigned long flags);
+
+extern void octeon_init_cvmcount(void);
+
+#define OCTEON_ARGV_MAX_ARGS	64
+#define OCTOEN_SERIAL_LEN	20
+
+struct octeon_boot_descriptor {
+	/* Start of block referenced by assembly code - do not change! */
+	uint32_t desc_version;
+	uint32_t desc_size;
+	uint64_t stack_top;
+	uint64_t heap_base;
+	uint64_t heap_end;
+	/* Only used by bootloader */
+	uint64_t entry_point;
+	uint64_t desc_vaddr;
+	/* End of This block referenced by assembly code - do not change! */
+	uint32_t exception_base_addr;
+	uint32_t stack_size;
+	uint32_t heap_size;
+	/* Argc count for application. */
+	uint32_t argc;
+	uint32_t argv[OCTEON_ARGV_MAX_ARGS];
+
+#define  BOOT_FLAG_INIT_CORE		(1 << 0)
+#define  OCTEON_BL_FLAG_DEBUG		(1 << 1)
+#define  OCTEON_BL_FLAG_NO_MAGIC	(1 << 2)
+	/* If set, use uart1 for console */
+#define  OCTEON_BL_FLAG_CONSOLE_UART1	(1 << 3)
+	/* If set, use PCI console */
+#define  OCTEON_BL_FLAG_CONSOLE_PCI	(1 << 4)
+	/* Call exit on break on serial port */
+#define  OCTEON_BL_FLAG_BREAK		(1 << 5)
+
+	uint32_t flags;
+	uint32_t core_mask;
+	/* DRAM size in megabyes. */
+	uint32_t dram_size;
+	/* physical address of free memory descriptor block. */
+	uint32_t phy_mem_desc_addr;
+	/* used to pass flags from app to debugger. */
+	uint32_t debugger_flags_base_addr;
+	/* CPU clock speed, in hz. */
+	uint32_t eclock_hz;
+	/* DRAM clock speed, in hz. */
+	uint32_t dclock_hz;
+	/* SPI4 clock in hz. */
+	uint32_t spi_clock_hz;
+	uint16_t board_type;
+	uint8_t board_rev_major;
+	uint8_t board_rev_minor;
+	uint16_t chip_type;
+	uint8_t chip_rev_major;
+	uint8_t chip_rev_minor;
+	char board_serial_number[OCTOEN_SERIAL_LEN];
+	uint8_t mac_addr_base[6];
+	uint8_t mac_addr_count;
+	uint64_t cvmx_desc_vaddr;
+};
+
+union octeon_cvmemctl {
+	uint64_t u64;
+	struct {
+		/* RO 1 = BIST fail, 0 = BIST pass */
+		uint64_t tlbbist:1;
+		/* RO 1 = BIST fail, 0 = BIST pass */
+		uint64_t l1cbist:1;
+		/* RO 1 = BIST fail, 0 = BIST pass */
+		uint64_t l1dbist:1;
+		/* RO 1 = BIST fail, 0 = BIST pass */
+		uint64_t dcmbist:1;
+		/* RO 1 = BIST fail, 0 = BIST pass */
+		uint64_t ptgbist:1;
+		/* RO 1 = BIST fail, 0 = BIST pass */
+		uint64_t wbfbist:1;
+		/* Reserved */
+		uint64_t reserved:22;
+		/* R/W If set, marked write-buffer entries time out
+		 * the same as as other entries; if clear, marked
+		 * write-buffer entries use the maximum timeout. */
+		uint64_t dismarkwblongto:1;
+		/* R/W If set, a merged store does not clear the
+		 * write-buffer entry timeout state. */
+		uint64_t dismrgclrwbto:1;
+		/* R/W Two bits that are the MSBs of the resultant
+		 * CVMSEG LM word location for an IOBDMA. The other 8
+		 * bits come from the SCRADDR field of the IOBDMA. */
+		uint64_t iobdmascrmsb:2;
+		/* R/W If set, SYNCWS and SYNCS only order marked
+		 * stores; if clear, SYNCWS and SYNCS only order
+		 * unmarked stores. SYNCWSMARKED has no effect when
+		 * DISSYNCWS is set. */
+		uint64_t syncwsmarked:1;
+		/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as
+		 * SYNC. */
+		uint64_t dissyncws:1;
+		/* R/W If set, no stall happens on write buffer
+		 * full. */
+		uint64_t diswbfst:1;
+		/* R/W If set (and SX set), supervisor-level
+		 * loads/stores can use XKPHYS addresses with
+		 * VA<48>==0 */
+		uint64_t xkmemenas:1;
+		/* R/W If set (and UX set), user-level loads/stores
+		 * can use XKPHYS addresses with VA<48>==0 */
+		uint64_t xkmemenau:1;
+		/* R/W If set (and SX set), supervisor-level
+		 * loads/stores can use XKPHYS addresses with
+		 * VA<48>==1 */
+		uint64_t xkioenas:1;
+		/* R/W If set (and UX set), user-level loads/stores
+		 * can use XKPHYS addresses with VA<48>==1 */
+		uint64_t xkioenau:1;
+		/* R/W If set, all stores act as SYNCW (NOMERGE must
+		 * be set when this is set) RW, reset to 0. */
+		uint64_t allsyncw:1;
+		/* R/W If set, no stores merge, and all stores reach
+		 * the coherent bus in order. */
+		uint64_t nomerge:1;
+		/* R/W Selects the bit in the counter used for DID
+		 * time-outs 0 = 231, 1 = 230, 2 = 229, 3 =
+		 * 214. Actual time-out is between 1x and 2x this
+		 * interval. For example, with DIDTTO=3, expiration
+		 * interval is between 16K and 32K. */
+		uint64_t didtto:2;
+		/* R/W If set, the (mem) CSR clock never turns off. */
+		uint64_t csrckalwys:1;
+		/* R/W If set, mclk never turns off. */
+		uint64_t mclkalwys:1;
+		/* R/W Selects the bit in the counter used for write
+		 * buffer flush time-outs (WBFLT+11) is the bit
+		 * position in an internal counter used to determine
+		 * expiration. The write buffer expires between 1x and
+		 * 2x this interval. For example, with WBFLT = 0, a
+		 * write buffer expires between 2K and 4K cycles after
+		 * the write buffer entry is allocated. */
+		uint64_t wbfltime:3;
+		/* R/W If set, do not put Istream in the L2 cache. */
+		uint64_t istrnol2:1;
+		/* R/W The write buffer threshold. */
+		uint64_t wbthresh:4;
+		/* Reserved */
+		uint64_t reserved2:2;
+		/* R/W If set, CVMSEG is available for loads/stores in
+		 * kernel/debug mode. */
+		uint64_t cvmsegenak:1;
+		/* R/W If set, CVMSEG is available for loads/stores in
+		 * supervisor mode. */
+		uint64_t cvmsegenas:1;
+		/* R/W If set, CVMSEG is available for loads/stores in
+		 * user mode. */
+		uint64_t cvmsegenau:1;
+		/* R/W Size of local memory in cache blocks, 54 (6912
+		 * bytes) is max legal value. */
+		uint64_t lmemsz:6;
+	} s;
+};
+
+struct octeon_cf_data {
+	unsigned long	base_region_bias;
+	unsigned int	base_region;	/* The chip select region used by CF */
+	int		is16bit;	/* 0 - 8bit, !0 - 16bit */
+	int		dma_engine;	/* -1 for no DMA */
+};
+
+extern void octeon_write_lcd(const char *s);
+extern void octeon_check_cpu_bist(void);
+extern int octeon_get_boot_debug_flag(void);
+extern int octeon_get_boot_uart(void);
+
+struct uart_port;
+extern unsigned int octeon_serial_in(struct uart_port *, int);
+extern void octeon_serial_out(struct uart_port *, int, int);
+
+/**
+ * Write a 32bit value to the Octeon NPI register space
+ *
+ * @address: Address to write to
+ * @val:     Value to write
+ */
+static inline void octeon_npi_write32(uint64_t address, uint32_t val)
+{
+	cvmx_write64_uint32(address ^ 4, val);
+	cvmx_read64_uint32(address ^ 4);
+}
+
+
+/**
+ * Read a 32bit value from the Octeon NPI register space
+ *
+ * @address: Address to read
+ * Returns The result
+ */
+static inline uint32_t octeon_npi_read32(uint64_t address)
+{
+	return cvmx_read64_uint32(address ^ 4);
+}
+
+#endif /* __ASM_OCTEON_OCTEON_H */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 18ee58e..0f926aa 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -118,6 +118,60 @@
 	struct mips3264_watch_reg_state mips3264;
 };
 
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+
+struct octeon_cop2_state {
+	/* DMFC2 rt, 0x0201 */
+	unsigned long   cop2_crc_iv;
+	/* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
+	unsigned long   cop2_crc_length;
+	/* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
+	unsigned long   cop2_crc_poly;
+	/* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
+	unsigned long   cop2_llm_dat[2];
+       /* DMFC2 rt, 0x0084 */
+	unsigned long   cop2_3des_iv;
+	/* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
+	unsigned long   cop2_3des_key[3];
+	/* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
+	unsigned long   cop2_3des_result;
+	/* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
+	unsigned long   cop2_aes_inp0;
+	/* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
+	unsigned long   cop2_aes_iv[2];
+	/* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
+	 * rt, 0x0107 */
+	unsigned long   cop2_aes_key[4];
+	/* DMFC2 rt, 0x0110 */
+	unsigned long   cop2_aes_keylen;
+	/* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
+	unsigned long   cop2_aes_result[2];
+	/* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
+	 * rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
+	 * 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
+	 * 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
+	 * 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
+	unsigned long   cop2_hsh_datw[15];
+	/* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
+	 * rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
+	 * 0x0256; DMFC2 rt, 0x0257 - Pass2 */
+	unsigned long   cop2_hsh_ivw[8];
+	/* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
+	unsigned long   cop2_gfm_mult[2];
+	/* DMFC2 rt, 0x025E - Pass2 */
+	unsigned long   cop2_gfm_poly;
+	/* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
+	unsigned long   cop2_gfm_result[2];
+};
+#define INIT_OCTEON_COP2 {0,}
+
+struct octeon_cvmseg_state {
+	unsigned long cvmseg[CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE]
+			    [cpu_dcache_line_size() / sizeof(unsigned long)];
+};
+
+#endif
+
 typedef struct {
 	unsigned long seg;
 } mm_segment_t;
@@ -160,6 +214,10 @@
 	unsigned long trap_no;
 	unsigned long irix_trampoline;  /* Wheee... */
 	unsigned long irix_oldctx;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+    struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
+    struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
+#endif
 	struct mips_abi *abi;
 };
 
@@ -171,6 +229,13 @@
 #define FPAFF_INIT
 #endif /* CONFIG_MIPS_MT_FPAFF */
 
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+#define OCTEON_INIT						\
+	.cp2			= INIT_OCTEON_COP2,
+#else
+#define OCTEON_INIT
+#endif /* CONFIG_CPU_CAVIUM_OCTEON */
+
 #define INIT_THREAD  {						\
         /*							\
          * Saved main processor registers			\
@@ -221,6 +286,10 @@
 	.trap_no		= 0,				\
 	.irix_trampoline	= 0,				\
 	.irix_oldctx		= 0,				\
+	/*							\
+	 * Cavium Octeon specifics (null if not Octeon)		\
+	 */							\
+	OCTEON_INIT						\
 }
 
 struct task_struct;
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index c2c8bac..1f30d16 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -48,6 +48,10 @@
 #ifdef CONFIG_MIPS_MT_SMTC
 	unsigned long cp0_tcstatus;
 #endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	unsigned long long mpl[3];        /* MTM{0,1,2} */
+	unsigned long long mtp[3];        /* MTP{0,1,2} */
+#endif
 } __attribute__ ((aligned (8)));
 
 /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 86557b5..40e5ef1 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -37,6 +37,9 @@
 
 #define SMP_RESCHEDULE_YOURSELF	0x1	/* XXX braindead */
 #define SMP_CALL_FUNCTION	0x2
+/* Octeon - Tell another core to flush its icache */
+#define SMP_ICACHE_FLUSH	0x4
+
 
 extern void asmlinkage smp_bootstrap(void);
 
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 4c37c4e5..db0fa7b 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -194,6 +194,19 @@
 		LONG_S	$31, PT_R31(sp)
 		ori	$28, sp, _THREAD_MASK
 		xori	$28, _THREAD_MASK
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+		.set    mips64
+		pref    0, 0($28)       /* Prefetch the current pointer */
+		pref    0, PT_R31(sp)   /* Prefetch the $31(ra) */
+		/* The Octeon multiplier state is affected by general multiply
+		    instructions. It must be saved before and kernel code might
+		    corrupt it */
+		jal     octeon_mult_save
+		LONG_L  v1, 0($28)  /* Load the current pointer */
+			 /* Restore $31(ra) that was changed by the jal */
+		LONG_L  ra, PT_R31(sp)
+		pref    0, 0(v1)    /* Prefetch the current thread */
+#endif
 		.set	pop
 		.endm
 
@@ -324,6 +337,10 @@
 		DVPE	5				# dvpe a1
 		jal	mips_ihb
 #endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+		/* Restore the Octeon multiplier state */
+		jal	octeon_mult_restore
+#endif
 		mfc0	a0, CP0_STATUS
 		ori	a0, STATMASK
 		xori	a0, STATMASK
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 9601ea9..38a30d2 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -50,27 +50,35 @@
 /*
  * Initialize the calling CPU's compare interrupt as clockevent device
  */
-#ifdef CONFIG_CEVT_R4K
-extern int mips_clockevent_init(void);
+#ifdef CONFIG_CEVT_R4K_LIB
 extern unsigned int __weak get_c0_compare_int(void);
-#else
+extern int r4k_clockevent_init(void);
+#endif
+
 static inline int mips_clockevent_init(void)
 {
+#ifdef CONFIG_CEVT_R4K
+	return r4k_clockevent_init();
+#else
 	return -ENXIO;
-}
 #endif
+}
 
 /*
  * Initialize the count register as a clocksource
  */
-#ifdef CONFIG_CSRC_R4K
-extern int init_mips_clocksource(void);
-#else
+#ifdef CONFIG_CSRC_R4K_LIB
+extern int init_r4k_clocksource(void);
+#endif
+
 static inline int init_mips_clocksource(void)
 {
+#ifdef CONFIG_CSRC_R4K
+	return init_r4k_clocksource();
+#else
 	return 0;
-}
 #endif
+}
 
 extern void clocksource_set_clock(struct clocksource *cs, unsigned int clock);
 extern void clockevent_set_clock(struct clock_event_device *cd,
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index b1372c2..e961221 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -9,7 +9,7 @@
 		   time.o topology.o traps.o unaligned.o watch.o
 
 obj-$(CONFIG_CEVT_BCM1480)	+= cevt-bcm1480.o
-obj-$(CONFIG_CEVT_R4K)		+= cevt-r4k.o
+obj-$(CONFIG_CEVT_R4K_LIB)	+= cevt-r4k.o
 obj-$(CONFIG_MIPS_MT_SMTC)	+= cevt-smtc.o
 obj-$(CONFIG_CEVT_DS1287)	+= cevt-ds1287.o
 obj-$(CONFIG_CEVT_GT641XX)	+= cevt-gt641xx.o
@@ -17,7 +17,7 @@
 obj-$(CONFIG_CEVT_TXX9)		+= cevt-txx9.o
 obj-$(CONFIG_CSRC_BCM1480)	+= csrc-bcm1480.o
 obj-$(CONFIG_CSRC_IOASIC)	+= csrc-ioasic.o
-obj-$(CONFIG_CSRC_R4K)		+= csrc-r4k.o
+obj-$(CONFIG_CSRC_R4K_LIB)	+= csrc-r4k.o
 obj-$(CONFIG_CSRC_SB1250)	+= csrc-sb1250.o
 obj-$(CONFIG_SYNC_R4K)		+= sync-r4k.o
 
@@ -43,6 +43,7 @@
 obj-$(CONFIG_CPU_TX39XX)	+= r2300_fpu.o r2300_switch.o
 obj-$(CONFIG_CPU_TX49XX)	+= r4k_fpu.o r4k_switch.o
 obj-$(CONFIG_CPU_VR41XX)	+= r4k_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON)	+= octeon_switch.o
 
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_SMP_UP)		+= smp-up.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 7294222..c901c22 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -64,6 +64,10 @@
 #ifdef CONFIG_MIPS_MT_SMTC
 	OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus);
 #endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	OFFSET(PT_MPL, pt_regs, mpl);
+	OFFSET(PT_MTP, pt_regs, mtp);
+#endif /* CONFIG_CPU_CAVIUM_OCTEON */
 	DEFINE(PT_SIZE, sizeof(struct pt_regs));
 	BLANK();
 }
@@ -295,3 +299,30 @@
 	DEFINE(IC_IRQ_CPUSTAT_T, sizeof(irq_cpustat_t));
 	BLANK();
 }
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+void output_octeon_cop2_state_defines(void)
+{
+	COMMENT("Octeon specific octeon_cop2_state offsets.");
+	OFFSET(OCTEON_CP2_CRC_IV,	octeon_cop2_state, cop2_crc_iv);
+	OFFSET(OCTEON_CP2_CRC_LENGTH,	octeon_cop2_state, cop2_crc_length);
+	OFFSET(OCTEON_CP2_CRC_POLY,	octeon_cop2_state, cop2_crc_poly);
+	OFFSET(OCTEON_CP2_LLM_DAT,	octeon_cop2_state, cop2_llm_dat);
+	OFFSET(OCTEON_CP2_3DES_IV,	octeon_cop2_state, cop2_3des_iv);
+	OFFSET(OCTEON_CP2_3DES_KEY,	octeon_cop2_state, cop2_3des_key);
+	OFFSET(OCTEON_CP2_3DES_RESULT,	octeon_cop2_state, cop2_3des_result);
+	OFFSET(OCTEON_CP2_AES_INP0,	octeon_cop2_state, cop2_aes_inp0);
+	OFFSET(OCTEON_CP2_AES_IV,	octeon_cop2_state, cop2_aes_iv);
+	OFFSET(OCTEON_CP2_AES_KEY,	octeon_cop2_state, cop2_aes_key);
+	OFFSET(OCTEON_CP2_AES_KEYLEN,	octeon_cop2_state, cop2_aes_keylen);
+	OFFSET(OCTEON_CP2_AES_RESULT,	octeon_cop2_state, cop2_aes_result);
+	OFFSET(OCTEON_CP2_GFM_MULT,	octeon_cop2_state, cop2_gfm_mult);
+	OFFSET(OCTEON_CP2_GFM_POLY,	octeon_cop2_state, cop2_gfm_poly);
+	OFFSET(OCTEON_CP2_GFM_RESULT,	octeon_cop2_state, cop2_gfm_result);
+	OFFSET(OCTEON_CP2_HSH_DATW,	octeon_cop2_state, cop2_hsh_datw);
+	OFFSET(OCTEON_CP2_HSH_IVW,	octeon_cop2_state, cop2_hsh_ivw);
+	OFFSET(THREAD_CP2,	task_struct, thread.cp2);
+	OFFSET(THREAD_CVMSEG,	task_struct, thread.cvmseg.cvmseg);
+	BLANK();
+}
+#endif
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 6b5df8b..0176ed0 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -205,6 +205,39 @@
 			break;
 		}
 		break;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	case lwc2_op: /* This is bbit0 on Octeon */
+		if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+		     == 0)
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		regs->cp0_epc = epc;
+		break;
+	case ldc2_op: /* This is bbit032 on Octeon */
+		if ((regs->regs[insn.i_format.rs] &
+		    (1ull<<(insn.i_format.rt+32))) == 0)
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		regs->cp0_epc = epc;
+		break;
+	case swc2_op: /* This is bbit1 on Octeon */
+		if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		regs->cp0_epc = epc;
+		break;
+	case sdc2_op: /* This is bbit132 on Octeon */
+		if (regs->regs[insn.i_format.rs] &
+		    (1ull<<(insn.i_format.rt+32)))
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		regs->cp0_epc = epc;
+		break;
+#endif
 	}
 
 	return 0;
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index e1ec83b..0015e44 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -160,7 +160,7 @@
 
 #ifndef CONFIG_MIPS_MT_SMTC
 
-int __cpuinit mips_clockevent_init(void)
+int __cpuinit r4k_clockevent_init(void)
 {
 	uint64_t mips_freq = mips_hpt_frequency;
 	unsigned int cpu = smp_processor_id();
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c9207b5..a7162a4 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -96,6 +96,9 @@
 
 static void au1k_wait(void)
 {
+	if (!allow_au1k_wait)
+		return;
+
 	/* using the wait instruction makes CP0 counter unusable */
 	__asm__("	.set	mips3			\n"
 		"	cache	0x14, 0(%0)		\n"
@@ -154,6 +157,7 @@
 	case CPU_25KF:
 	case CPU_PR4450:
 	case CPU_BCM3302:
+	case CPU_CAVIUM_OCTEON:
 		cpu_wait = r4k_wait;
 		break;
 
@@ -185,8 +189,7 @@
 	case CPU_AU1200:
 	case CPU_AU1210:
 	case CPU_AU1250:
-		if (allow_au1k_wait)
-			cpu_wait = au1k_wait;
+		cpu_wait = au1k_wait;
 		break;
 	case CPU_20KC:
 		/*
@@ -875,6 +878,27 @@
 	}
 }
 
+static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
+{
+	decode_configs(c);
+	switch (c->processor_id & 0xff00) {
+	case PRID_IMP_CAVIUM_CN38XX:
+	case PRID_IMP_CAVIUM_CN31XX:
+	case PRID_IMP_CAVIUM_CN30XX:
+	case PRID_IMP_CAVIUM_CN58XX:
+	case PRID_IMP_CAVIUM_CN56XX:
+	case PRID_IMP_CAVIUM_CN50XX:
+	case PRID_IMP_CAVIUM_CN52XX:
+		c->cputype = CPU_CAVIUM_OCTEON;
+		__cpu_name[cpu] = "Cavium Octeon";
+		break;
+	default:
+		printk(KERN_INFO "Unknown Octeon chip!\n");
+		c->cputype = CPU_UNKNOWN;
+		break;
+	}
+}
+
 const char *__cpu_name[NR_CPUS];
 
 __cpuinit void cpu_probe(void)
@@ -909,6 +933,9 @@
 	case PRID_COMP_NXP:
 		cpu_probe_nxp(c, cpu);
 		break;
+	case PRID_COMP_CAVIUM:
+		cpu_probe_cavium(c, cpu);
+		break;
 	}
 
 	BUG_ON(!__cpu_name[cpu]);
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index 74fb745..f1a2893 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -22,7 +22,7 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-int __init init_mips_clocksource(void)
+int __init init_r4k_clocksource(void)
 {
 	if (!cpu_has_counter || !mips_hpt_frequency)
 		return -ENXIO;
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 757d48f..fb6f731 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -385,10 +385,14 @@
 	.endm
 
 	.macro	__build_clear_fpe
+	.set	push
+	/* gas fails to assemble cfc1 for some archs (octeon).*/ \
+	.set	mips1
 	cfc1	a1, fcr31
 	li	a2, ~(0x3f << 12)
 	and	a2, a1
 	ctc1	a2, fcr31
+	.set	pop
 	TRACE_IRQS_ON
 	STI
 	.endm
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 4b4007b..a0ff2b6 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -111,6 +111,7 @@
 			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
 		seq_printf(p, " %14s", irq_desc[i].chip->name);
+		seq_printf(p, "-%-8s", irq_desc[i].name);
 		seq_printf(p, "  %s", action->name);
 
 		for (action=action->next; action; action = action->next)
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
new file mode 100644
index 0000000..d523896
--- /dev/null
+++ b/arch/mips/kernel/octeon_switch.S
@@ -0,0 +1,506 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1994, 1995, 1996, by Andreas Busse
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ *    written by Carsten Langgaard, carstenl@mips.com
+ */
+#include <asm/asm.h>
+#include <asm/cachectl.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/pgtable-bits.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+#include <asm/asmmacro.h>
+
+/*
+ * Offset to the current process status flags, the first 32 bytes of the
+ * stack are not used.
+ */
+#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
+
+/*
+ * task_struct *resume(task_struct *prev, task_struct *next,
+ *                     struct thread_info *next_ti)
+ */
+	.align	7
+	LEAF(resume)
+	.set arch=octeon
+#ifndef CONFIG_CPU_HAS_LLSC
+	sw	zero, ll_bit
+#endif
+	mfc0	t1, CP0_STATUS
+	LONG_S	t1, THREAD_STATUS(a0)
+	cpu_save_nonscratch a0
+	LONG_S	ra, THREAD_REG31(a0)
+
+	/* check if we need to save COP2 registers */
+	PTR_L	t2, TASK_THREAD_INFO(a0)
+	LONG_L	t0, ST_OFF(t2)
+	bbit0	t0, 30, 1f
+
+	/* Disable COP2 in the stored process state */
+	li	t1, ST0_CU2
+	xor	t0, t1
+	LONG_S	t0, ST_OFF(t2)
+
+	/* Enable COP2 so we can save it */
+	mfc0	t0, CP0_STATUS
+	or	t0, t1
+	mtc0	t0, CP0_STATUS
+
+	/* Save COP2 */
+	daddu	a0, THREAD_CP2
+	jal octeon_cop2_save
+	dsubu	a0, THREAD_CP2
+
+	/* Disable COP2 now that we are done */
+	mfc0	t0, CP0_STATUS
+	li	t1, ST0_CU2
+	xor	t0, t1
+	mtc0	t0, CP0_STATUS
+
+1:
+#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+	/* Check if we need to store CVMSEG state */
+	mfc0	t0, $11,7 	/* CvmMemCtl */
+	bbit0	t0, 6, 3f	/* Is user access enabled? */
+
+	/* Store the CVMSEG state */
+	/* Extract the size of CVMSEG */
+	andi	t0, 0x3f
+	/* Multiply * (cache line size/sizeof(long)/2) */
+	sll	t0, 7-LONGLOG-1
+	li	t1, -32768 	/* Base address of CVMSEG */
+	LONG_ADDI t2, a0, THREAD_CVMSEG	/* Where to store CVMSEG to */
+	synciobdma
+2:
+	.set noreorder
+	LONG_L	t8, 0(t1)	/* Load from CVMSEG */
+	subu	t0, 1		/* Decrement loop var */
+	LONG_L	t9, LONGSIZE(t1)/* Load from CVMSEG */
+	LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */
+	LONG_S	t8, 0(t2)	/* Store CVMSEG to thread storage */
+	LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */
+	bnez	t0, 2b		/* Loop until we've copied it all */
+	 LONG_S	t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
+	.set reorder
+
+	/* Disable access to CVMSEG */
+	mfc0	t0, $11,7 	/* CvmMemCtl */
+	xori	t0, t0, 0x40	/* Bit 6 is CVMSEG user enable */
+	mtc0	t0, $11,7 	/* CvmMemCtl */
+#endif
+3:
+	/*
+	 * The order of restoring the registers takes care of the race
+	 * updating $28, $29 and kernelsp without disabling ints.
+	 */
+	move	$28, a2
+	cpu_restore_nonscratch a1
+
+#if (_THREAD_SIZE - 32) < 0x8000
+	PTR_ADDIU	t0, $28, _THREAD_SIZE - 32
+#else
+	PTR_LI		t0, _THREAD_SIZE - 32
+	PTR_ADDU	t0, $28
+#endif
+	set_saved_sp	t0, t1, t2
+
+	mfc0	t1, CP0_STATUS		/* Do we really need this? */
+	li	a3, 0xff01
+	and	t1, a3
+	LONG_L	a2, THREAD_STATUS(a1)
+	nor	a3, $0, a3
+	and	a2, a3
+	or	a2, t1
+	mtc0	a2, CP0_STATUS
+	move	v0, a0
+	jr	ra
+	END(resume)
+
+/*
+ * void octeon_cop2_save(struct octeon_cop2_state *a0)
+ */
+	.align	7
+	LEAF(octeon_cop2_save)
+
+	dmfc0	t9, $9,7	/* CvmCtl register. */
+
+        /* Save the COP2 CRC state */
+	dmfc2	t0, 0x0201
+	dmfc2	t1, 0x0202
+	dmfc2	t2, 0x0200
+	sd	t0, OCTEON_CP2_CRC_IV(a0)
+	sd	t1, OCTEON_CP2_CRC_LENGTH(a0)
+	sd	t2, OCTEON_CP2_CRC_POLY(a0)
+	/* Skip next instructions if CvmCtl[NODFA_CP2] set */
+	bbit1	t9, 28, 1f
+
+	/* Save the LLM state */
+	dmfc2	t0, 0x0402
+	dmfc2	t1, 0x040A
+	sd	t0, OCTEON_CP2_LLM_DAT(a0)
+	sd	t1, OCTEON_CP2_LLM_DAT+8(a0)
+
+1:      bbit1	t9, 26, 3f	/* done if CvmCtl[NOCRYPTO] set */
+
+	/* Save the COP2 crypto state */
+        /* this part is mostly common to both pass 1 and later revisions */
+	dmfc2 	t0, 0x0084
+	dmfc2 	t1, 0x0080
+	dmfc2 	t2, 0x0081
+	dmfc2 	t3, 0x0082
+	sd	t0, OCTEON_CP2_3DES_IV(a0)
+	dmfc2 	t0, 0x0088
+	sd	t1, OCTEON_CP2_3DES_KEY(a0)
+	dmfc2 	t1, 0x0111                      /* only necessary for pass 1 */
+	sd	t2, OCTEON_CP2_3DES_KEY+8(a0)
+	dmfc2 	t2, 0x0102
+	sd	t3, OCTEON_CP2_3DES_KEY+16(a0)
+	dmfc2 	t3, 0x0103
+	sd	t0, OCTEON_CP2_3DES_RESULT(a0)
+	dmfc2 	t0, 0x0104
+	sd	t1, OCTEON_CP2_AES_INP0(a0)     /* only necessary for pass 1 */
+	dmfc2 	t1, 0x0105
+	sd	t2, OCTEON_CP2_AES_IV(a0)
+	dmfc2	t2, 0x0106
+	sd	t3, OCTEON_CP2_AES_IV+8(a0)
+	dmfc2 	t3, 0x0107
+	sd	t0, OCTEON_CP2_AES_KEY(a0)
+	dmfc2	t0, 0x0110
+	sd	t1, OCTEON_CP2_AES_KEY+8(a0)
+	dmfc2	t1, 0x0100
+	sd	t2, OCTEON_CP2_AES_KEY+16(a0)
+	dmfc2	t2, 0x0101
+	sd	t3, OCTEON_CP2_AES_KEY+24(a0)
+	mfc0	t3, $15,0 	/* Get the processor ID register */
+	sd	t0, OCTEON_CP2_AES_KEYLEN(a0)
+	li	t0, 0x000d0000	/* This is the processor ID of Octeon Pass1 */
+	sd	t1, OCTEON_CP2_AES_RESULT(a0)
+	sd	t2, OCTEON_CP2_AES_RESULT+8(a0)
+	/* Skip to the Pass1 version of the remainder of the COP2 state */
+	beq	t3, t0, 2f
+
+        /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
+	dmfc2	t1, 0x0240
+	dmfc2	t2, 0x0241
+	dmfc2	t3, 0x0242
+	dmfc2	t0, 0x0243
+	sd	t1, OCTEON_CP2_HSH_DATW(a0)
+	dmfc2	t1, 0x0244
+	sd	t2, OCTEON_CP2_HSH_DATW+8(a0)
+	dmfc2	t2, 0x0245
+	sd	t3, OCTEON_CP2_HSH_DATW+16(a0)
+	dmfc2	t3, 0x0246
+	sd	t0, OCTEON_CP2_HSH_DATW+24(a0)
+	dmfc2	t0, 0x0247
+	sd	t1, OCTEON_CP2_HSH_DATW+32(a0)
+	dmfc2	t1, 0x0248
+	sd	t2, OCTEON_CP2_HSH_DATW+40(a0)
+	dmfc2	t2, 0x0249
+	sd	t3, OCTEON_CP2_HSH_DATW+48(a0)
+	dmfc2	t3, 0x024A
+	sd	t0, OCTEON_CP2_HSH_DATW+56(a0)
+	dmfc2	t0, 0x024B
+	sd	t1, OCTEON_CP2_HSH_DATW+64(a0)
+	dmfc2	t1, 0x024C
+	sd	t2, OCTEON_CP2_HSH_DATW+72(a0)
+	dmfc2	t2, 0x024D
+	sd	t3, OCTEON_CP2_HSH_DATW+80(a0)
+	dmfc2 	t3, 0x024E
+	sd	t0, OCTEON_CP2_HSH_DATW+88(a0)
+	dmfc2	t0, 0x0250
+	sd	t1, OCTEON_CP2_HSH_DATW+96(a0)
+	dmfc2	t1, 0x0251
+	sd	t2, OCTEON_CP2_HSH_DATW+104(a0)
+	dmfc2	t2, 0x0252
+	sd	t3, OCTEON_CP2_HSH_DATW+112(a0)
+	dmfc2	t3, 0x0253
+	sd	t0, OCTEON_CP2_HSH_IVW(a0)
+	dmfc2	t0, 0x0254
+	sd	t1, OCTEON_CP2_HSH_IVW+8(a0)
+	dmfc2	t1, 0x0255
+	sd	t2, OCTEON_CP2_HSH_IVW+16(a0)
+	dmfc2	t2, 0x0256
+	sd	t3, OCTEON_CP2_HSH_IVW+24(a0)
+	dmfc2	t3, 0x0257
+	sd	t0, OCTEON_CP2_HSH_IVW+32(a0)
+	dmfc2 	t0, 0x0258
+	sd	t1, OCTEON_CP2_HSH_IVW+40(a0)
+	dmfc2 	t1, 0x0259
+	sd	t2, OCTEON_CP2_HSH_IVW+48(a0)
+	dmfc2	t2, 0x025E
+	sd	t3, OCTEON_CP2_HSH_IVW+56(a0)
+	dmfc2	t3, 0x025A
+	sd	t0, OCTEON_CP2_GFM_MULT(a0)
+	dmfc2	t0, 0x025B
+	sd	t1, OCTEON_CP2_GFM_MULT+8(a0)
+	sd	t2, OCTEON_CP2_GFM_POLY(a0)
+	sd	t3, OCTEON_CP2_GFM_RESULT(a0)
+	sd	t0, OCTEON_CP2_GFM_RESULT+8(a0)
+	jr	ra
+
+2:      /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
+	dmfc2	t3, 0x0040
+	dmfc2	t0, 0x0041
+	dmfc2	t1, 0x0042
+	dmfc2	t2, 0x0043
+	sd	t3, OCTEON_CP2_HSH_DATW(a0)
+	dmfc2	t3, 0x0044
+	sd	t0, OCTEON_CP2_HSH_DATW+8(a0)
+	dmfc2	t0, 0x0045
+	sd	t1, OCTEON_CP2_HSH_DATW+16(a0)
+	dmfc2	t1, 0x0046
+	sd	t2, OCTEON_CP2_HSH_DATW+24(a0)
+	dmfc2	t2, 0x0048
+	sd	t3, OCTEON_CP2_HSH_DATW+32(a0)
+	dmfc2	t3, 0x0049
+	sd	t0, OCTEON_CP2_HSH_DATW+40(a0)
+	dmfc2	t0, 0x004A
+	sd	t1, OCTEON_CP2_HSH_DATW+48(a0)
+	sd	t2, OCTEON_CP2_HSH_IVW(a0)
+	sd	t3, OCTEON_CP2_HSH_IVW+8(a0)
+	sd	t0, OCTEON_CP2_HSH_IVW+16(a0)
+
+3:      /* pass 1 or CvmCtl[NOCRYPTO] set */
+	jr	ra
+	END(octeon_cop2_save)
+
+/*
+ * void octeon_cop2_restore(struct octeon_cop2_state *a0)
+ */
+	.align	7
+	.set push
+	.set noreorder
+	LEAF(octeon_cop2_restore)
+        /* First cache line was prefetched before the call */
+        pref    4,  128(a0)
+	dmfc0	t9, $9,7	/* CvmCtl register. */
+
+        pref    4,  256(a0)
+	ld	t0, OCTEON_CP2_CRC_IV(a0)
+        pref    4,  384(a0)
+	ld	t1, OCTEON_CP2_CRC_LENGTH(a0)
+	ld	t2, OCTEON_CP2_CRC_POLY(a0)
+
+	/* Restore the COP2 CRC state */
+	dmtc2	t0, 0x0201
+	dmtc2 	t1, 0x1202
+	bbit1	t9, 28, 2f	/* Skip LLM if CvmCtl[NODFA_CP2] is set */
+	 dmtc2	t2, 0x4200
+
+	/* Restore the LLM state */
+	ld	t0, OCTEON_CP2_LLM_DAT(a0)
+	ld	t1, OCTEON_CP2_LLM_DAT+8(a0)
+	dmtc2	t0, 0x0402
+	dmtc2	t1, 0x040A
+
+2:
+	bbit1	t9, 26, done_restore	/* done if CvmCtl[NOCRYPTO] set */
+	 nop
+
+	/* Restore the COP2 crypto state common to pass 1 and pass 2 */
+	ld	t0, OCTEON_CP2_3DES_IV(a0)
+	ld	t1, OCTEON_CP2_3DES_KEY(a0)
+	ld	t2, OCTEON_CP2_3DES_KEY+8(a0)
+	dmtc2 	t0, 0x0084
+	ld	t0, OCTEON_CP2_3DES_KEY+16(a0)
+	dmtc2 	t1, 0x0080
+	ld	t1, OCTEON_CP2_3DES_RESULT(a0)
+	dmtc2 	t2, 0x0081
+	ld	t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */
+	dmtc2	t0, 0x0082
+	ld	t0, OCTEON_CP2_AES_IV(a0)
+	dmtc2 	t1, 0x0098
+	ld	t1, OCTEON_CP2_AES_IV+8(a0)
+	dmtc2 	t2, 0x010A                  /* only really needed for pass 1 */
+	ld	t2, OCTEON_CP2_AES_KEY(a0)
+	dmtc2 	t0, 0x0102
+	ld	t0, OCTEON_CP2_AES_KEY+8(a0)
+	dmtc2	t1, 0x0103
+	ld	t1, OCTEON_CP2_AES_KEY+16(a0)
+	dmtc2	t2, 0x0104
+	ld	t2, OCTEON_CP2_AES_KEY+24(a0)
+	dmtc2	t0, 0x0105
+	ld	t0, OCTEON_CP2_AES_KEYLEN(a0)
+	dmtc2	t1, 0x0106
+	ld	t1, OCTEON_CP2_AES_RESULT(a0)
+	dmtc2	t2, 0x0107
+	ld	t2, OCTEON_CP2_AES_RESULT+8(a0)
+	mfc0	t3, $15,0 	/* Get the processor ID register */
+	dmtc2	t0, 0x0110
+	li	t0, 0x000d0000	/* This is the processor ID of Octeon Pass1 */
+	dmtc2	t1, 0x0100
+	bne	t0, t3, 3f	/* Skip the next stuff for non-pass1 */
+	 dmtc2	t2, 0x0101
+
+        /* this code is specific for pass 1 */
+	ld	t0, OCTEON_CP2_HSH_DATW(a0)
+	ld	t1, OCTEON_CP2_HSH_DATW+8(a0)
+	ld	t2, OCTEON_CP2_HSH_DATW+16(a0)
+	dmtc2	t0, 0x0040
+	ld	t0, OCTEON_CP2_HSH_DATW+24(a0)
+	dmtc2	t1, 0x0041
+	ld	t1, OCTEON_CP2_HSH_DATW+32(a0)
+	dmtc2	t2, 0x0042
+	ld	t2, OCTEON_CP2_HSH_DATW+40(a0)
+	dmtc2	t0, 0x0043
+	ld	t0, OCTEON_CP2_HSH_DATW+48(a0)
+	dmtc2	t1, 0x0044
+	ld	t1, OCTEON_CP2_HSH_IVW(a0)
+	dmtc2	t2, 0x0045
+	ld	t2, OCTEON_CP2_HSH_IVW+8(a0)
+	dmtc2	t0, 0x0046
+	ld	t0, OCTEON_CP2_HSH_IVW+16(a0)
+	dmtc2	t1, 0x0048
+	dmtc2	t2, 0x0049
+        b done_restore   /* unconditional branch */
+	 dmtc2	t0, 0x004A
+
+3:      /* this is post-pass1 code */
+	ld	t2, OCTEON_CP2_HSH_DATW(a0)
+	ld	t0, OCTEON_CP2_HSH_DATW+8(a0)
+	ld	t1, OCTEON_CP2_HSH_DATW+16(a0)
+	dmtc2	t2, 0x0240
+	ld	t2, OCTEON_CP2_HSH_DATW+24(a0)
+	dmtc2	t0, 0x0241
+	ld	t0, OCTEON_CP2_HSH_DATW+32(a0)
+	dmtc2	t1, 0x0242
+	ld	t1, OCTEON_CP2_HSH_DATW+40(a0)
+	dmtc2	t2, 0x0243
+	ld	t2, OCTEON_CP2_HSH_DATW+48(a0)
+	dmtc2	t0, 0x0244
+	ld	t0, OCTEON_CP2_HSH_DATW+56(a0)
+	dmtc2	t1, 0x0245
+	ld	t1, OCTEON_CP2_HSH_DATW+64(a0)
+	dmtc2	t2, 0x0246
+	ld	t2, OCTEON_CP2_HSH_DATW+72(a0)
+	dmtc2	t0, 0x0247
+	ld	t0, OCTEON_CP2_HSH_DATW+80(a0)
+	dmtc2	t1, 0x0248
+	ld	t1, OCTEON_CP2_HSH_DATW+88(a0)
+	dmtc2	t2, 0x0249
+	ld	t2, OCTEON_CP2_HSH_DATW+96(a0)
+	dmtc2	t0, 0x024A
+	ld	t0, OCTEON_CP2_HSH_DATW+104(a0)
+	dmtc2	t1, 0x024B
+	ld	t1, OCTEON_CP2_HSH_DATW+112(a0)
+	dmtc2	t2, 0x024C
+	ld	t2, OCTEON_CP2_HSH_IVW(a0)
+	dmtc2	t0, 0x024D
+	ld	t0, OCTEON_CP2_HSH_IVW+8(a0)
+	dmtc2	t1, 0x024E
+	ld	t1, OCTEON_CP2_HSH_IVW+16(a0)
+	dmtc2	t2, 0x0250
+	ld	t2, OCTEON_CP2_HSH_IVW+24(a0)
+	dmtc2	t0, 0x0251
+	ld	t0, OCTEON_CP2_HSH_IVW+32(a0)
+	dmtc2	t1, 0x0252
+	ld	t1, OCTEON_CP2_HSH_IVW+40(a0)
+	dmtc2	t2, 0x0253
+	ld	t2, OCTEON_CP2_HSH_IVW+48(a0)
+	dmtc2	t0, 0x0254
+	ld	t0, OCTEON_CP2_HSH_IVW+56(a0)
+	dmtc2	t1, 0x0255
+	ld	t1, OCTEON_CP2_GFM_MULT(a0)
+	dmtc2	t2, 0x0256
+	ld	t2, OCTEON_CP2_GFM_MULT+8(a0)
+	dmtc2	t0, 0x0257
+	ld	t0, OCTEON_CP2_GFM_POLY(a0)
+	dmtc2	t1, 0x0258
+	ld	t1, OCTEON_CP2_GFM_RESULT(a0)
+	dmtc2	t2, 0x0259
+	ld	t2, OCTEON_CP2_GFM_RESULT+8(a0)
+	dmtc2	t0, 0x025E
+	dmtc2	t1, 0x025A
+	dmtc2	t2, 0x025B
+
+done_restore:
+	jr	ra
+	 nop
+	END(octeon_cop2_restore)
+	.set pop
+
+/*
+ * void octeon_mult_save()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in SAVE_SOME in stackframe.h. It can only
+ *       safely modify k0 and k1.
+ */
+	.align	7
+	.set push
+	.set noreorder
+	LEAF(octeon_mult_save)
+	dmfc0	k0, $9,7	/* CvmCtl register. */
+	bbit1	k0, 27, 1f	/* Skip CvmCtl[NOMUL] */
+	 nop
+
+	/* Save the multiplier state */
+	v3mulu	k0, $0, $0
+	v3mulu	k1, $0, $0
+	sd	k0, PT_MTP(sp)        /* PT_MTP    has P0 */
+	v3mulu	k0, $0, $0
+	sd	k1, PT_MTP+8(sp)      /* PT_MTP+8  has P1 */
+	ori	k1, $0, 1
+	v3mulu	k1, k1, $0
+	sd	k0, PT_MTP+16(sp)     /* PT_MTP+16 has P2 */
+	v3mulu	k0, $0, $0
+	sd	k1, PT_MPL(sp)        /* PT_MPL    has MPL0 */
+	v3mulu	k1, $0, $0
+	sd	k0, PT_MPL+8(sp)      /* PT_MPL+8  has MPL1 */
+	jr	ra
+	 sd	k1, PT_MPL+16(sp)     /* PT_MPL+16 has MPL2 */
+
+1:	/* Resume here if CvmCtl[NOMUL] */
+	jr	ra
+	END(octeon_mult_save)
+	.set pop
+
+/*
+ * void octeon_mult_restore()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in RESTORE_SOME in stackframe.h.
+ */
+	.align	7
+	.set push
+	.set noreorder
+	LEAF(octeon_mult_restore)
+	dmfc0	k1, $9,7		/* CvmCtl register. */
+	ld	v0, PT_MPL(sp)        	/* MPL0 */
+	ld	v1, PT_MPL+8(sp)      	/* MPL1 */
+	ld	k0, PT_MPL+16(sp)     	/* MPL2 */
+	bbit1	k1, 27, 1f		/* Skip CvmCtl[NOMUL] */
+	/* Normally falls through, so no time wasted here */
+	nop
+
+	/* Restore the multiplier state */
+	ld	k1, PT_MTP+16(sp)     	/* P2 */
+	MTM0	v0			/* MPL0 */
+	ld	v0, PT_MTP+8(sp)	/* P1 */
+	MTM1	v1			/* MPL1 */
+	ld	v1, PT_MTP(sp)   	/* P0 */
+	MTM2	k0			/* MPL2 */
+	MTP2	k1			/* P2 */
+	MTP1	v0			/* P1 */
+	jr	ra
+	 MTP0	v1			/* P0 */
+
+1:	/* Resume here if CvmCtl[NOMUL] */
+	jr	ra
+	 nop
+	END(octeon_mult_restore)
+	.set pop
+
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 1ca3410..c4f9ac1 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -49,19 +49,6 @@
 	int ret;
 
 	switch (request) {
-	/* when I and D space are separate, these will need to be fixed. */
-	case PTRACE_PEEKTEXT: /* read word at location addr. */
-	case PTRACE_PEEKDATA: {
-		unsigned int tmp;
-		int copied;
-
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-		ret = -EIO;
-		if (copied != sizeof(tmp))
-			break;
-		ret = put_user(tmp, (unsigned int __user *) (unsigned long) data);
-		break;
-	}
 
 	/*
 	 * Read 4 bytes of the other process' storage
@@ -208,16 +195,6 @@
 		break;
 	}
 
-	/* when I and D space are separate, this will have to be fixed. */
-	case PTRACE_POKETEXT: /* write the word at location addr. */
-	case PTRACE_POKEDATA:
-		ret = 0;
-		if (access_process_vm(child, addr, &data, sizeof(data), 1)
-		    == sizeof(data))
-			break;
-		ret = -EIO;
-		break;
-
 	/*
 	 * Write 4 bytes into the other process' storage
 	 *  data is the 4 bytes that the user wants written
@@ -332,50 +309,11 @@
 		ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data);
 		break;
 
-	case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
-	case PTRACE_CONT: { /* restart after signal. */
-		ret = -EIO;
-		if (!valid_signal(data))
-			break;
-		if (request == PTRACE_SYSCALL) {
-			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		}
-		else {
-			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-		}
-		child->exit_code = data;
-		wake_up_process(child);
-		ret = 0;
-		break;
-	}
-
-	/*
-	 * make the child exit.  Best I can do is send it a sigkill.
-	 * perhaps it should be put in the status that it wants to
-	 * exit.
-	 */
-	case PTRACE_KILL:
-		ret = 0;
-		if (child->exit_state == EXIT_ZOMBIE)	/* already dead */
-			break;
-		child->exit_code = SIGKILL;
-		wake_up_process(child);
-		break;
-
 	case PTRACE_GET_THREAD_AREA:
 		ret = put_user(task_thread_info(child)->tp_value,
 				(unsigned int __user *) (unsigned long) data);
 		break;
 
-	case PTRACE_DETACH: /* detach a process that was attached. */
-		ret = ptrace_detach(child, data);
-		break;
-
-	case PTRACE_GETEVENTMSG:
-		ret = put_user(child->ptrace_message,
-			       (unsigned int __user *) (unsigned long) data);
-		break;
-
 	case PTRACE_GET_THREAD_AREA_3264:
 		ret = put_user(task_thread_info(child)->tp_value,
 				(unsigned long __user *) (unsigned long) data);
@@ -392,7 +330,7 @@
 		break;
 
 	default:
-		ret = ptrace_request(child, request, addr, data);
+		ret = compat_ptrace_request(child, request, addr, data);
 		break;
 	}
 out:
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index d0916a5..51d1ba4 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -398,7 +398,7 @@
 	sys	sys_uselib		1
 	sys	sys_swapon		2
 	sys	sys_reboot		3
-	sys	old_readdir		3
+	sys	sys_old_readdir		3
 	sys	old_mmap		6	/* 4090 */
 	sys	sys_munmap		2
 	sys	sys_truncate		2
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c
index 0632e2a..58f5cd7 100644
--- a/arch/mips/kernel/stacktrace.c
+++ b/arch/mips/kernel/stacktrace.c
@@ -32,7 +32,8 @@
 	}
 }
 
-static void save_context_stack(struct stack_trace *trace, struct pt_regs *regs)
+static void save_context_stack(struct stack_trace *trace,
+	struct task_struct *tsk, struct pt_regs *regs)
 {
 	unsigned long sp = regs->regs[29];
 #ifdef CONFIG_KALLSYMS
@@ -41,7 +42,7 @@
 
 	if (raw_show_trace || !__kernel_text_address(pc)) {
 		unsigned long stack_page =
-			(unsigned long)task_stack_page(current);
+			(unsigned long)task_stack_page(tsk);
 		if (stack_page && sp >= stack_page &&
 		    sp <= stack_page + THREAD_SIZE - 32)
 			save_raw_context_stack(trace, sp);
@@ -54,7 +55,7 @@
 			trace->entries[trace->nr_entries++] = pc;
 		if (trace->nr_entries >= trace->max_entries)
 			break;
-		pc = unwind_stack(current, &sp, pc, &ra);
+		pc = unwind_stack(tsk, &sp, pc, &ra);
 	} while (pc);
 #else
 	save_raw_context_stack(trace, sp);
@@ -66,12 +67,23 @@
  */
 void save_stack_trace(struct stack_trace *trace)
 {
+	save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
 	struct pt_regs dummyregs;
 	struct pt_regs *regs = &dummyregs;
 
 	WARN_ON(trace->nr_entries || !trace->max_entries);
 
-	prepare_frametrace(regs);
-	save_context_stack(trace, regs);
+	if (tsk != current) {
+		regs->regs[29] = tsk->thread.reg29;
+		regs->regs[31] = 0;
+		regs->cp0_epc = tsk->thread.reg31;
+	} else
+		prepare_frametrace(regs);
+	save_context_stack(trace, tsk, regs);
 }
-EXPORT_SYMBOL_GPL(save_stack_trace);
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 3530561..f6083c6 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -47,6 +47,7 @@
 #include <asm/mmu_context.h>
 #include <asm/types.h>
 #include <asm/stacktrace.h>
+#include <asm/irq.h>
 
 extern void check_wait(void);
 extern asmlinkage void r4k_wait(void);
@@ -78,6 +79,10 @@
 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
 	struct mips_fpu_struct *ctx, int has_fpu);
 
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task);
+#endif
+
 void (*board_be_init)(void);
 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 void (*board_nmi_handler_setup)(void);
@@ -860,6 +865,7 @@
 	unsigned int opcode;
 	unsigned int cpid;
 	int status;
+	unsigned long __maybe_unused flags;
 
 	die_if_kernel("do_cpu invoked from kernel context!", regs);
 
@@ -915,6 +921,17 @@
 		return;
 
 	case 2:
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+		prefetch(&current->thread.cp2);
+		local_irq_save(flags);
+		KSTK_STATUS(current) |= ST0_CU2;
+		status = read_c0_status();
+		write_c0_status(status | ST0_CU2);
+		octeon_cop2_restore(&(current->thread.cp2));
+		write_c0_status(status & ~ST0_CU2);
+		local_irq_restore(flags);
+		return;
+#endif
 	case 3:
 		break;
 	}
@@ -1488,6 +1505,10 @@
 		write_c0_hwrena(enable);
 	}
 
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	write_c0_hwrena(0xc000000f); /* Octeon has register 30 and 31 */
+#endif
+
 #ifdef CONFIG_MIPS_MT_SMTC
 	if (!secondaryTC) {
 #endif /* CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index dbcf651..c13c7ad 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_CPU_TX39XX)	+= r3k_dump_tlb.o
 obj-$(CONFIG_CPU_TX49XX)	+= dump_tlb.o
 obj-$(CONFIG_CPU_VR41XX)	+= dump_tlb.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON)	+= dump_tlb.o
 
 # libgcc-style stuff needed in the kernel
 obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 95ba32b..d7ec955 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_CPU_TX39XX)	+= c-tx39.o tlb-r3k.o
 obj-$(CONFIG_CPU_TX49XX)	+= c-r4k.o cex-gen.o tlb-r4k.o
 obj-$(CONFIG_CPU_VR41XX)	+= c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON)	+= c-octeon.o cex-oct.o tlb-r4k.o
 
 obj-$(CONFIG_IP22_CPU_SCACHE)	+= sc-ip22.o
 obj-$(CONFIG_R5000_CPU_SCACHE)  += sc-r5k.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
new file mode 100644
index 0000000..44d01a0
--- /dev/null
+++ b/arch/mips/mm/c-octeon.c
@@ -0,0 +1,307 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2007 Cavium Networks
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
+
+#include <asm/bcache.h>
+#include <asm/bootinfo.h>
+#include <asm/cacheops.h>
+#include <asm/cpu-features.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/r4kcache.h>
+#include <asm/system.h>
+#include <asm/mmu_context.h>
+#include <asm/war.h>
+
+#include <asm/octeon/octeon.h>
+
+unsigned long long cache_err_dcache[NR_CPUS];
+
+/**
+ * Octeon automatically flushes the dcache on tlb changes, so
+ * from Linux's viewpoint it acts much like a physically
+ * tagged cache. No flushing is needed
+ *
+ */
+static void octeon_flush_data_cache_page(unsigned long addr)
+{
+    /* Nothing to do */
+}
+
+static inline void octeon_local_flush_icache(void)
+{
+	asm volatile ("synci 0($0)");
+}
+
+/*
+ * Flush local I-cache for the specified range.
+ */
+static void local_octeon_flush_icache_range(unsigned long start,
+					    unsigned long end)
+{
+	octeon_local_flush_icache();
+}
+
+/**
+ * Flush caches as necessary for all cores affected by a
+ * vma. If no vma is supplied, all cores are flushed.
+ *
+ * @vma:    VMA to flush or NULL to flush all icaches.
+ */
+static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
+{
+	extern void octeon_send_ipi_single(int cpu, unsigned int action);
+#ifdef CONFIG_SMP
+	int cpu;
+	cpumask_t mask;
+#endif
+
+	mb();
+	octeon_local_flush_icache();
+#ifdef CONFIG_SMP
+	preempt_disable();
+	cpu = smp_processor_id();
+
+	/*
+	 * If we have a vma structure, we only need to worry about
+	 * cores it has been used on
+	 */
+	if (vma)
+		mask = vma->vm_mm->cpu_vm_mask;
+	else
+		mask = cpu_online_map;
+	cpu_clear(cpu, mask);
+	for_each_cpu_mask(cpu, mask)
+		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
+
+	preempt_enable();
+#endif
+}
+
+
+/**
+ * Called to flush the icache on all cores
+ */
+static void octeon_flush_icache_all(void)
+{
+	octeon_flush_icache_all_cores(NULL);
+}
+
+
+/**
+ * Called to flush all memory associated with a memory
+ * context.
+ *
+ * @mm:     Memory context to flush
+ */
+static void octeon_flush_cache_mm(struct mm_struct *mm)
+{
+	/*
+	 * According to the R4K version of this file, CPUs without
+	 * dcache aliases don't need to do anything here
+	 */
+}
+
+
+/**
+ * Flush a range of kernel addresses out of the icache
+ *
+ */
+static void octeon_flush_icache_range(unsigned long start, unsigned long end)
+{
+	octeon_flush_icache_all_cores(NULL);
+}
+
+
+/**
+ * Flush the icache for a trampoline. These are used for interrupt
+ * and exception hooking.
+ *
+ * @addr:   Address to flush
+ */
+static void octeon_flush_cache_sigtramp(unsigned long addr)
+{
+	struct vm_area_struct *vma;
+
+	vma = find_vma(current->mm, addr);
+	octeon_flush_icache_all_cores(vma);
+}
+
+
+/**
+ * Flush a range out of a vma
+ *
+ * @vma:    VMA to flush
+ * @start:
+ * @end:
+ */
+static void octeon_flush_cache_range(struct vm_area_struct *vma,
+				     unsigned long start, unsigned long end)
+{
+	if (vma->vm_flags & VM_EXEC)
+		octeon_flush_icache_all_cores(vma);
+}
+
+
+/**
+ * Flush a specific page of a vma
+ *
+ * @vma:    VMA to flush page for
+ * @page:   Page to flush
+ * @pfn:
+ */
+static void octeon_flush_cache_page(struct vm_area_struct *vma,
+				    unsigned long page, unsigned long pfn)
+{
+	if (vma->vm_flags & VM_EXEC)
+		octeon_flush_icache_all_cores(vma);
+}
+
+
+/**
+ * Probe Octeon's caches
+ *
+ */
+static void __devinit probe_octeon(void)
+{
+	unsigned long icache_size;
+	unsigned long dcache_size;
+	unsigned int config1;
+	struct cpuinfo_mips *c = &current_cpu_data;
+
+	switch (c->cputype) {
+	case CPU_CAVIUM_OCTEON:
+		config1 = read_c0_config1();
+		c->icache.linesz = 2 << ((config1 >> 19) & 7);
+		c->icache.sets = 64 << ((config1 >> 22) & 7);
+		c->icache.ways = 1 + ((config1 >> 16) & 7);
+		c->icache.flags |= MIPS_CACHE_VTAG;
+		icache_size =
+			c->icache.sets * c->icache.ways * c->icache.linesz;
+		c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
+		c->dcache.linesz = 128;
+		if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+			c->dcache.sets = 1; /* CN3XXX has one Dcache set */
+		else
+			c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
+		c->dcache.ways = 64;
+		dcache_size =
+			c->dcache.sets * c->dcache.ways * c->dcache.linesz;
+		c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
+		c->options |= MIPS_CPU_PREFETCH;
+		break;
+
+	default:
+		panic("Unsupported Cavium Networks CPU type\n");
+		break;
+	}
+
+	/* compute a couple of other cache variables */
+	c->icache.waysize = icache_size / c->icache.ways;
+	c->dcache.waysize = dcache_size / c->dcache.ways;
+
+	c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
+	c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
+
+	if (smp_processor_id() == 0) {
+		pr_notice("Primary instruction cache %ldkB, %s, %d way, "
+			  "%d sets, linesize %d bytes.\n",
+			  icache_size >> 10,
+			  cpu_has_vtag_icache ?
+				"virtually tagged" : "physically tagged",
+			  c->icache.ways, c->icache.sets, c->icache.linesz);
+
+		pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
+			  "linesize %d bytes.\n",
+			  dcache_size >> 10, c->dcache.ways,
+			  c->dcache.sets, c->dcache.linesz);
+	}
+}
+
+
+/**
+ * Setup the Octeon cache flush routines
+ *
+ */
+void __devinit octeon_cache_init(void)
+{
+	extern unsigned long ebase;
+	extern char except_vec2_octeon;
+
+	memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80);
+	octeon_flush_cache_sigtramp(ebase + 0x100);
+
+	probe_octeon();
+
+	shm_align_mask = PAGE_SIZE - 1;
+
+	flush_cache_all			= octeon_flush_icache_all;
+	__flush_cache_all		= octeon_flush_icache_all;
+	flush_cache_mm			= octeon_flush_cache_mm;
+	flush_cache_page		= octeon_flush_cache_page;
+	flush_cache_range		= octeon_flush_cache_range;
+	flush_cache_sigtramp		= octeon_flush_cache_sigtramp;
+	flush_icache_all		= octeon_flush_icache_all;
+	flush_data_cache_page		= octeon_flush_data_cache_page;
+	flush_icache_range		= octeon_flush_icache_range;
+	local_flush_icache_range	= local_octeon_flush_icache_range;
+
+	build_clear_page();
+	build_copy_page();
+}
+
+/**
+ * Handle a cache error exception
+ */
+
+static void  cache_parity_error_octeon(int non_recoverable)
+{
+	unsigned long coreid = cvmx_get_core_num();
+	uint64_t icache_err = read_octeon_c0_icacheerr();
+
+	pr_err("Cache error exception:\n");
+	pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
+	if (icache_err & 1) {
+		pr_err("CacheErr (Icache) == %llx\n",
+		       (unsigned long long)icache_err);
+		write_octeon_c0_icacheerr(0);
+	}
+	if (cache_err_dcache[coreid] & 1) {
+		pr_err("CacheErr (Dcache) == %llx\n",
+		       (unsigned long long)cache_err_dcache[coreid]);
+		cache_err_dcache[coreid] = 0;
+	}
+
+	if (non_recoverable)
+		panic("Can't handle cache error: nested exception");
+}
+
+/**
+ * Called when the the exception is not recoverable
+ */
+
+asmlinkage void cache_parity_error_octeon_recoverable(void)
+{
+	cache_parity_error_octeon(0);
+}
+
+/**
+ * Called when the the exception is recoverable
+ */
+
+asmlinkage void cache_parity_error_octeon_non_recoverable(void)
+{
+	cache_parity_error_octeon(1);
+}
+
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 1eb7c71..98ad0a8 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -182,6 +182,12 @@
 		tx39_cache_init();
 	}
 
+	if (cpu_has_octeon_cache) {
+		extern void __weak octeon_cache_init(void);
+
+		octeon_cache_init();
+	}
+
 	setup_protection_map();
 }
 
diff --git a/arch/mips/mm/cex-oct.S b/arch/mips/mm/cex-oct.S
new file mode 100644
index 0000000..3db8553
--- /dev/null
+++ b/arch/mips/mm/cex-oct.S
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Cavium Networks
+ * Cache error handler
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+
+/*
+ * Handle cache error. Indicate to the second level handler whether
+ * the exception is recoverable.
+ */
+	LEAF(except_vec2_octeon)
+
+	.set    push
+	.set	mips64r2
+	.set	noreorder
+	.set	noat
+
+
+	/* due to an errata we need to read the COP0 CacheErr (Dcache)
+	 * before any cache/DRAM access	 */
+
+	rdhwr   k0, $0        /* get core_id */
+	PTR_LA  k1, cache_err_dcache
+	sll     k0, k0, 3
+	PTR_ADDU k1, k0, k1    /* k1 = &cache_err_dcache[core_id] */
+
+	dmfc0   k0, CP0_CACHEERR, 1
+	sd      k0, (k1)
+	dmtc0   $0, CP0_CACHEERR, 1
+
+        /* check whether this is a nested exception */
+	mfc0    k1, CP0_STATUS
+	andi    k1, k1, ST0_EXL
+	beqz    k1, 1f
+	 nop
+	j	cache_parity_error_octeon_non_recoverable
+	 nop
+
+	/* exception is recoverable */
+1:	j	handle_cache_err
+	 nop
+
+	.set    pop
+	END(except_vec2_octeon)
+
+ /* We need to jump to handle_cache_err so that the previous handler
+  * can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX
+  * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached).  */
+	LEAF(handle_cache_err)
+	.set    push
+        .set    noreorder
+        .set    noat
+
+	SAVE_ALL
+	KMODE
+	jal     cache_parity_error_octeon_recoverable
+	nop
+	j       ret_from_exception
+	nop
+
+	.set pop
+	END(handle_cache_err)
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index e6708b3..546e697 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -111,7 +111,7 @@
 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
 	dma_addr_t dma_handle)
 {
-	plat_unmap_dma_mem(dma_handle);
+	plat_unmap_dma_mem(dev, dma_handle);
 	free_pages((unsigned long) vaddr, get_order(size));
 }
 
@@ -122,7 +122,7 @@
 {
 	unsigned long addr = (unsigned long) vaddr;
 
-	plat_unmap_dma_mem(dma_handle);
+	plat_unmap_dma_mem(dev, dma_handle);
 
 	if (!plat_device_is_coherent(dev))
 		addr = CAC_ADDR(addr);
@@ -173,7 +173,7 @@
 		__dma_sync(dma_addr_to_virt(dma_addr), size,
 		           direction);
 
-	plat_unmap_dma_mem(dma_addr);
+	plat_unmap_dma_mem(dev, dma_addr);
 }
 
 EXPORT_SYMBOL(dma_unmap_single);
@@ -229,7 +229,7 @@
 		dma_cache_wback_inv(addr, size);
 	}
 
-	plat_unmap_dma_mem(dma_address);
+	plat_unmap_dma_mem(dev, dma_address);
 }
 
 EXPORT_SYMBOL(dma_unmap_page);
@@ -249,7 +249,7 @@
 			if (addr)
 				__dma_sync(addr, sg->length, direction);
 		}
-		plat_unmap_dma_mem(sg->dma_address);
+		plat_unmap_dma_mem(dev, sg->dma_address);
 	}
 }
 
@@ -275,6 +275,7 @@
 {
 	BUG_ON(direction == DMA_NONE);
 
+	plat_extra_sync_for_device(dev);
 	if (!plat_device_is_coherent(dev)) {
 		unsigned long addr;
 
@@ -305,6 +306,7 @@
 {
 	BUG_ON(direction == DMA_NONE);
 
+	plat_extra_sync_for_device(dev);
 	if (!plat_device_is_coherent(dev)) {
 		unsigned long addr;
 
@@ -351,22 +353,14 @@
 
 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-	return 0;
+	return plat_dma_mapping_error(dev, dma_addr);
 }
 
 EXPORT_SYMBOL(dma_mapping_error);
 
 int dma_supported(struct device *dev, u64 mask)
 {
-	/*
-	 * we fall back to GFP_DMA when the mask isn't all 1s,
-	 * so we can't guarantee allocations that must be
-	 * within a tighter range than GFP_DMA..
-	 */
-	if (mask < DMA_BIT_MASK(24))
-		return 0;
-
-	return 1;
+	return plat_dma_supported(dev, mask);
 }
 
 EXPORT_SYMBOL(dma_supported);
@@ -383,6 +377,7 @@
 {
 	BUG_ON(direction == DMA_NONE);
 
+	plat_extra_sync_for_device(dev);
 	if (!plat_device_is_coherent(dev))
 		__dma_sync((unsigned long)vaddr, size, direction);
 }
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 5ce2fa7..9619f66 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -478,7 +478,10 @@
 	probe_tlb(config);
 	write_c0_pagemask(PM_DEFAULT_MASK);
 	write_c0_wired(0);
-	write_c0_framemask(0);
+	if (current_cpu_type() == CPU_R10000 ||
+	    current_cpu_type() == CPU_R12000 ||
+	    current_cpu_type() == CPU_R14000)
+		write_c0_framemask(0);
 	temp_tlb_entry = current_cpu_data.tlbsize - 1;
 
         /* From this point on the ARC firmware is dead.  */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 979cf91..4294203 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -317,6 +317,7 @@
 	case CPU_BCM3302:
 	case CPU_BCM4710:
 	case CPU_LOONGSON2:
+	case CPU_CAVIUM_OCTEON:
 		if (m4kc_tlbp_war())
 			uasm_i_nop(p);
 		tlbw(p);
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index f97ab14..dda6f20 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -146,12 +146,6 @@
 	return 0;
 }
 
-/* Most MIPS systems have straight-forward swizzling needs.  */
-static inline u8 bridge_swizzle(u8 pin, u8 slot)
-{
-	return (((pin - 1) + slot) % 4) + 1;
-}
-
 static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev)
 {
 	while (dev->bus->parent) {
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 62cae74..b0eb9e7 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -149,28 +149,6 @@
 	       "Skipping PCI bus scan due to resource conflict\n");
 }
 
-/* Most MIPS systems have straight-forward swizzling needs.  */
-
-static inline u8 bridge_swizzle(u8 pin, u8 slot)
-{
-	return (((pin - 1) + slot) % 4) + 1;
-}
-
-static u8 __init common_swizzle(struct pci_dev *dev, u8 *pinp)
-{
-	u8 pin = *pinp;
-
-	while (dev->bus->parent) {
-		pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
-		/* Move up the chain of bridges. */
-		dev = dev->bus->self;
-        }
-	*pinp = pin;
-
-	/* The slot is the slot of the last bridge. */
-	return PCI_SLOT(dev->devfn);
-}
-
 static int __init pcibios_init(void)
 {
 	struct pci_controller *hose;
@@ -179,7 +157,7 @@
 	for (hose = hose_head; hose; hose = hose->next)
 		pcibios_scanbus(hose);
 
-	pci_fixup_irqs(common_swizzle, pcibios_map_irq);
+	pci_fixup_irqs(pci_common_swizzle, pcibios_map_irq);
 
 	pci_initialized = 1;
 
diff --git a/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c b/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c
index 97862f4..caf5e9a 100644
--- a/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c
+++ b/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c
@@ -148,7 +148,7 @@
 	send_byte(W_HEADER);
 	recv_ack();
 
-	/* EEPROM with size of more then 2K need two byte addressing */
+	/* EEPROM with size of more than 2K need two byte addressing */
 	if (eeprom_size > 2048) {
 		send_byte(0x00);
 		recv_ack();
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index 62fba8a..ceeaaaa 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -478,7 +478,7 @@
 	.long sys_uselib
 	.long sys_swapon
 	.long sys_reboot
-	.long old_readdir
+	.long sys_old_readdir
 	.long old_mmap		/* 90 */
 	.long sys_munmap
 	.long sys_truncate
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 5ddad7b..0d42827 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -77,7 +77,7 @@
 
 drivers-$(CONFIG_OPROFILE)		+= arch/parisc/oprofile/
 
-PALO := $(shell if which palo; then : ; \
+PALO := $(shell if (which palo 2>&1); then : ; \
 	elif [ -x /sbin/palo ]; then echo /sbin/palo; \
 	fi)
 
diff --git a/arch/parisc/include/asm/byteorder.h b/arch/parisc/include/asm/byteorder.h
index db14831..58af2c5 100644
--- a/arch/parisc/include/asm/byteorder.h
+++ b/arch/parisc/include/asm/byteorder.h
@@ -1,82 +1,6 @@
 #ifndef _PARISC_BYTEORDER_H
 #define _PARISC_BYTEORDER_H
 
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-#ifdef __GNUC__
-
-static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
-{
-	__asm__("dep %0, 15, 8, %0\n\t"		/* deposit 00ab -> 0bab */
-		"shd %%r0, %0, 8, %0"		/* shift 000000ab -> 00ba */
-		: "=r" (x)
-		: "0" (x));
-	return x;
-}
-
-static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x)
-{
-	__asm__("shd %0, %0, 8, %0\n\t"		/* shift xabcxabc -> cxab */
-		"dep %0, 15, 8, %0\n\t"		/* deposit cxab -> cbab */
-		"shd %%r0, %0, 8, %0"		/* shift 0000cbab -> 0cba */
-		: "=r" (x)
-		: "0" (x));
-	return x;
-}
-
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
-{
-	unsigned int temp;
-	__asm__("shd %0, %0, 16, %1\n\t"	/* shift abcdabcd -> cdab */
-		"dep %1, 15, 8, %1\n\t"		/* deposit cdab -> cbab */
-		"shd %0, %1, 8, %0"		/* shift abcdcbab -> dcba */
-		: "=r" (x), "=&r" (temp)
-		: "0" (x));
-	return x;
-}
-
-
-#if BITS_PER_LONG > 32
-/*
-** From "PA-RISC 2.0 Architecture", HP Professional Books.
-** See Appendix I page 8 , "Endian Byte Swapping".
-**
-** Pretty cool algorithm: (* == zero'd bits)
-**      PERMH   01234567 -> 67452301 into %0
-**      HSHL    67452301 -> 7*5*3*1* into %1
-**      HSHR    67452301 -> *6*4*2*0 into %0
-**      OR      %0 | %1  -> 76543210 into %0 (all done!)
-*/
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) {
-	__u64 temp;
-	__asm__("permh,3210 %0, %0\n\t"
-		"hshl %0, 8, %1\n\t"
-		"hshr,u %0, 8, %0\n\t"
-		"or %1, %0, %0"
-		: "=r" (x), "=&r" (temp)
-		: "0" (x));
-	return x;
-}
-#define __arch__swab64(x) ___arch__swab64(x)
-#define __BYTEORDER_HAS_U64__
-#elif !defined(__STRICT_ANSI__)
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
-{
-	__u32 t1 = ___arch__swab32((__u32) x);
-	__u32 t2 = ___arch__swab32((__u32) (x >> 32));
-	return (((__u64) t1 << 32) | t2);
-}
-#define __arch__swab64(x) ___arch__swab64(x)
-#define __BYTEORDER_HAS_U64__
-#endif
-
-#define __arch__swab16(x) ___arch__swab16(x)
-#define __arch__swab24(x) ___arch__swab24(x)
-#define __arch__swab32(x) ___arch__swab32(x)
-
-#endif /* __GNUC__ */
-
 #include <linux/byteorder/big_endian.h>
 
 #endif /* _PARISC_BYTEORDER_H */
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
index e9639cc..c84b2fc 100644
--- a/arch/parisc/include/asm/checksum.h
+++ b/arch/parisc/include/asm/checksum.h
@@ -182,7 +182,7 @@
 #endif
 	: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
 	: "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
-	: "r19", "r20", "r21", "r22");
+	: "r19", "r20", "r21", "r22", "memory");
 	return csum_fold(sum);
 }
 
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 55ddb18..d3031d1 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -4,12 +4,6 @@
 #include <linux/types.h>
 #include <asm/pgtable.h>
 
-extern unsigned long parisc_vmerge_boundary;
-extern unsigned long parisc_vmerge_max_size;
-
-#define BIO_VMERGE_BOUNDARY	parisc_vmerge_boundary
-#define BIO_VMERGE_MAX_SIZE	parisc_vmerge_max_size
-
 #define virt_to_phys(a) ((unsigned long)__pa(a))
 #define phys_to_virt(a) __va(a)
 #define virt_to_bus virt_to_phys
@@ -182,9 +176,9 @@
 
 /* readb can never be const, so use __fswab instead of le*_to_cpu */
 #define readb(addr) __raw_readb(addr)
-#define readw(addr) __fswab16(__raw_readw(addr))
-#define readl(addr) __fswab32(__raw_readl(addr))
-#define readq(addr) __fswab64(__raw_readq(addr))
+#define readw(addr) le16_to_cpu(__raw_readw(addr))
+#define readl(addr) le32_to_cpu(__raw_readl(addr))
+#define readq(addr) le64_to_cpu(__raw_readq(addr))
 #define writeb(b, addr) __raw_writeb(b, addr)
 #define writew(b, addr) __raw_writew(cpu_to_le16(b), addr)
 #define writel(b, addr) __raw_writel(cpu_to_le32(b), addr)
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 85856c7..354b2ac 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -34,14 +34,19 @@
 	mm->context = 0;
 }
 
+static inline unsigned long __space_to_prot(mm_context_t context)
+{
+#if SPACEID_SHIFT == 0
+	return context << 1;
+#else
+	return context >> (SPACEID_SHIFT - 1);
+#endif
+}
+
 static inline void load_context(mm_context_t context)
 {
 	mtsp(context, 3);
-#if SPACEID_SHIFT == 0
-	mtctl(context << 1,8);
-#else
-	mtctl(context >> (SPACEID_SHIFT - 1),8);
-#endif
+	mtctl(__space_to_prot(context), 8);
 }
 
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 3c9d348..9d64df8 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -17,6 +17,7 @@
 #include <asm/ptrace.h>
 #include <asm/types.h>
 #include <asm/system.h>
+#include <asm/percpu.h>
 #endif /* __ASSEMBLY__ */
 
 #define KERNEL_STACK_SIZE 	(4*PAGE_SIZE)
@@ -109,8 +110,7 @@
 };
 
 extern struct system_cpuinfo_parisc boot_cpu_data;
-extern struct cpuinfo_parisc cpu_data[NR_CPUS];
-#define current_cpu_data cpu_data[smp_processor_id()]
+DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 
 #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
 
diff --git a/arch/parisc/include/asm/swab.h b/arch/parisc/include/asm/swab.h
new file mode 100644
index 0000000..3ff16c5
--- /dev/null
+++ b/arch/parisc/include/asm/swab.h
@@ -0,0 +1,66 @@
+#ifndef _PARISC_SWAB_H
+#define _PARISC_SWAB_H
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+#define __SWAB_64_THRU_32__
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+	__asm__("dep %0, 15, 8, %0\n\t"		/* deposit 00ab -> 0bab */
+		"shd %%r0, %0, 8, %0"		/* shift 000000ab -> 00ba */
+		: "=r" (x)
+		: "0" (x));
+	return x;
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab24(__u32 x)
+{
+	__asm__("shd %0, %0, 8, %0\n\t"		/* shift xabcxabc -> cxab */
+		"dep %0, 15, 8, %0\n\t"		/* deposit cxab -> cbab */
+		"shd %%r0, %0, 8, %0"		/* shift 0000cbab -> 0cba */
+		: "=r" (x)
+		: "0" (x));
+	return x;
+}
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+	unsigned int temp;
+	__asm__("shd %0, %0, 16, %1\n\t"	/* shift abcdabcd -> cdab */
+		"dep %1, 15, 8, %1\n\t"		/* deposit cdab -> cbab */
+		"shd %0, %1, 8, %0"		/* shift abcdcbab -> dcba */
+		: "=r" (x), "=&r" (temp)
+		: "0" (x));
+	return x;
+}
+#define __arch_swab32 __arch_swab32
+
+#if BITS_PER_LONG > 32
+/*
+** From "PA-RISC 2.0 Architecture", HP Professional Books.
+** See Appendix I page 8 , "Endian Byte Swapping".
+**
+** Pretty cool algorithm: (* == zero'd bits)
+**      PERMH   01234567 -> 67452301 into %0
+**      HSHL    67452301 -> 7*5*3*1* into %1
+**      HSHR    67452301 -> *6*4*2*0 into %0
+**      OR      %0 | %1  -> 76543210 into %0 (all done!)
+*/
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+{
+	__u64 temp;
+	__asm__("permh,3210 %0, %0\n\t"
+		"hshl %0, 8, %1\n\t"
+		"hshr,u %0, 8, %0\n\t"
+		"or %1, %0, %0"
+		: "=r" (x), "=&r" (temp)
+		: "0" (x));
+	return x;
+}
+#define __arch_swab64 __arch_swab64
+#endif /* BITS_PER_LONG > 32 */
+
+#endif /* _PARISC_SWAB_H */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 4878b95..1c6dbb6 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -241,4 +241,6 @@
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
+int fixup_exception(struct pt_regs *regs);
+
 #endif /* __PARISC_UACCESS_H */
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 884b7ce..994bcd9 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -549,6 +549,38 @@
 	return match_device(to_parisc_driver(drv), to_parisc_device(dev));
 }
 
+static ssize_t make_modalias(struct device *dev, char *buf)
+{
+	const struct parisc_device *padev = to_parisc_device(dev);
+	const struct parisc_device_id *id = &padev->id;
+
+	return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
+		(u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
+		(u32)id->sversion);
+}
+
+static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	const struct parisc_device *padev;
+	char modalias[40];
+
+	if (!dev)
+		return -ENODEV;
+
+	padev = to_parisc_device(dev);
+	if (!padev)
+		return -ENODEV;
+
+	if (add_uevent_var(env, "PARISC_NAME=%s", padev->name))
+		return -ENOMEM;
+
+	make_modalias(dev, modalias);
+	if (add_uevent_var(env, "MODALIAS=%s", modalias))
+		return -ENOMEM;
+
+	return 0;
+}
+
 #define pa_dev_attr(name, field, format_string)				\
 static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf)		\
 {									\
@@ -566,12 +598,7 @@
 
 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	struct parisc_device *padev = to_parisc_device(dev);
-	struct parisc_device_id *id = &padev->id;
-
-	return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
-		(u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
-		(u32)id->sversion);
+	return make_modalias(dev, buf);
 }
 
 static struct device_attribute parisc_device_attrs[] = {
@@ -587,6 +614,7 @@
 struct bus_type parisc_bus_type = {
 	.name = "parisc",
 	.match = parisc_generic_match,
+	.uevent = parisc_uevent,
 	.dev_attrs = parisc_device_attrs,
 	.probe = parisc_driver_probe,
 	.remove = parisc_driver_remove,
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index 2cbf13b..5595a2f 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -80,6 +80,7 @@
 
 	.import intr_save, code
 ENTRY(os_hpmc)
+.os_hpmc:
 
 	/*
 	 * registers modified:
@@ -295,5 +296,10 @@
 	b .
 	nop
 ENDPROC(os_hpmc)
-ENTRY(os_hpmc_end)	/* this label used to compute os_hpmc checksum */
+.os_hpmc_end:
 	nop
+.data
+.align 4
+	.export os_hpmc_size
+os_hpmc_size:
+	.word .os_hpmc_end-.os_hpmc
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 4cea935..ac2c822 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -298,7 +298,7 @@
 	irq_desc[irq].affinity = cpumask_of_cpu(cpu);
 #endif
 
-	return cpu_data[cpu].txn_addr;
+	return per_cpu(cpu_data, cpu).txn_addr;
 }
 
 
@@ -309,8 +309,9 @@
 	next_cpu++; /* assign to "next" CPU we want this bugger on */
 
 	/* validate entry */
-	while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr || 
-		!cpu_online(next_cpu)))
+	while ((next_cpu < NR_CPUS) &&
+		(!per_cpu(cpu_data, next_cpu).txn_addr ||
+		 !cpu_online(next_cpu)))
 		next_cpu++;
 
 	if (next_cpu >= NR_CPUS) 
@@ -359,7 +360,7 @@
 		printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
 		       irq, smp_processor_id(), cpu);
 		gsc_writel(irq + CPU_IRQ_BASE,
-			   cpu_data[cpu].hpa);
+			   per_cpu(cpu_data, cpu).hpa);
 		goto set_out;
 	}
 #endif
@@ -421,5 +422,5 @@
 
 void ack_bad_irq(unsigned int irq)
 {
-	printk("unexpected IRQ %d\n", irq);
+	printk(KERN_WARNING "unexpected IRQ %d\n", irq);
 }
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index ccb6809..1ff366c 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -52,7 +52,7 @@
 #include <linux/tty.h>
 #include <asm/pdc.h>		/* for iodc_call() proto and friends */
 
-static spinlock_t pdc_console_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(pdc_console_lock);
 
 static void pdc_console_write(struct console *co, const char *s, unsigned count)
 {
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index f696f57..75099ef 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -541,9 +541,9 @@
 	spin_lock_init(&perf_lock);
 
 	/* TODO: this only lets us access the first cpu.. what to do for SMP? */
-	cpu_device = cpu_data[0].dev;
+	cpu_device = per_cpu(cpu_data, 0).dev;
 	printk("Performance monitoring counters enabled for %s\n",
-		cpu_data[0].dev->name);
+		per_cpu(cpu_data, 0).dev->name);
 
 	return 0;
 }
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 370086f..ecb6093 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -3,7 +3,7 @@
  *    Initial setup-routines for HP 9000 based hardware.
  *
  *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
- *    Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
+ *    Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
  *    Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
  *    Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
  *    Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
@@ -46,7 +46,7 @@
 struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
 EXPORT_SYMBOL(boot_cpu_data);
 
-struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly;
+DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 
 extern int update_cr16_clocksource(void);	/* from time.c */
 
@@ -69,6 +69,23 @@
 */
 
 /**
+ * init_cpu_profiler - enable/setup per cpu profiling hooks.
+ * @cpunum: The processor instance.
+ *
+ * FIXME: doesn't do much yet...
+ */
+static void __cpuinit
+init_percpu_prof(unsigned long cpunum)
+{
+	struct cpuinfo_parisc *p;
+
+	p = &per_cpu(cpu_data, cpunum);
+	p->prof_counter = 1;
+	p->prof_multiplier = 1;
+}
+
+
+/**
  * processor_probe - Determine if processor driver should claim this device.
  * @dev: The device which has been found.
  *
@@ -147,7 +164,7 @@
 	}
 #endif
 
-	p = &cpu_data[cpuid];
+	p = &per_cpu(cpu_data, cpuid);
 	boot_cpu_data.cpu_count++;
 
 	/* initialize counters - CPU 0 gets it_value set in time_init() */
@@ -162,12 +179,9 @@
 #ifdef CONFIG_SMP
 	/*
 	** FIXME: review if any other initialization is clobbered
-	**	for boot_cpu by the above memset().
+	**	  for boot_cpu by the above memset().
 	*/
-
-	/* stolen from init_percpu_prof() */
-	cpu_data[cpuid].prof_counter = 1;
-	cpu_data[cpuid].prof_multiplier = 1;
+	init_percpu_prof(cpuid);
 #endif
 
 	/*
@@ -261,19 +275,6 @@
 }
 
 
-/**
- * init_cpu_profiler - enable/setup per cpu profiling hooks.
- * @cpunum: The processor instance.
- *
- * FIXME: doesn't do much yet...
- */
-static inline void __init
-init_percpu_prof(int cpunum)
-{
-	cpu_data[cpunum].prof_counter = 1;
-	cpu_data[cpunum].prof_multiplier = 1;
-}
-
 
 /**
  * init_per_cpu - Handle individual processor initializations.
@@ -293,7 +294,7 @@
  *
  * o Enable CPU profiling hooks.
  */
-int __init init_per_cpu(int cpunum)
+int __cpuinit init_per_cpu(int cpunum)
 {
 	int ret;
 	struct pdc_coproc_cfg coproc_cfg;
@@ -307,8 +308,8 @@
 		/* FWIW, FP rev/model is a more accurate way to determine
 		** CPU type. CPU rev/model has some ambiguous cases.
 		*/
-		cpu_data[cpunum].fp_rev = coproc_cfg.revision;
-		cpu_data[cpunum].fp_model = coproc_cfg.model;
+		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
 
 		printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
 			cpunum, coproc_cfg.revision, coproc_cfg.model);
@@ -344,16 +345,17 @@
 int
 show_cpuinfo (struct seq_file *m, void *v)
 {
-	int	n;
+	unsigned long cpu;
 
-	for(n=0; n<boot_cpu_data.cpu_count; n++) {
+	for_each_online_cpu(cpu) {
+		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
 #ifdef CONFIG_SMP
-		if (0 == cpu_data[n].hpa)
+		if (0 == cpuinfo->hpa)
 			continue;
 #endif
-		seq_printf(m, "processor\t: %d\n"
+		seq_printf(m, "processor\t: %lu\n"
 				"cpu family\t: PA-RISC %s\n",
-				 n, boot_cpu_data.family_name);
+				 cpu, boot_cpu_data.family_name);
 
 		seq_printf(m, "cpu\t\t: %s\n",  boot_cpu_data.cpu_name );
 
@@ -365,8 +367,8 @@
 		seq_printf(m, "model\t\t: %s\n"
 				"model name\t: %s\n",
 				 boot_cpu_data.pdc.sys_model_name,
-				 cpu_data[n].dev ? 
-				 cpu_data[n].dev->name : "Unknown" );
+				 cpuinfo->dev ?
+				 cpuinfo->dev->name : "Unknown");
 
 		seq_printf(m, "hversion\t: 0x%08x\n"
 			        "sversion\t: 0x%08x\n",
@@ -377,8 +379,8 @@
 		show_cache_info(m);
 
 		seq_printf(m, "bogomips\t: %lu.%02lu\n",
-			     cpu_data[n].loops_per_jiffy / (500000 / HZ),
-			     (cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100);
+			     cpuinfo->loops_per_jiffy / (500000 / HZ),
+			     (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
 
 		seq_printf(m, "software id\t: %ld\n\n",
 				boot_cpu_data.pdc.model.sw_id);
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 7d27853..82131ca 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -58,11 +58,6 @@
 EXPORT_SYMBOL(parisc_bus_is_phys);
 #endif
 
-/* This sets the vmerge boundary and size, it's here because it has to
- * be available on all platforms (zero means no-virtual merging) */
-unsigned long parisc_vmerge_boundary = 0;
-unsigned long parisc_vmerge_max_size = 0;
-
 void __init setup_cmdline(char **cmdline_p)
 {
 	extern unsigned int boot_args[];
@@ -321,7 +316,7 @@
 	
 	processor_init();
 	printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n",
-			boot_cpu_data.cpu_count,
+			num_present_cpus(),
 			boot_cpu_data.cpu_name,
 			boot_cpu_data.cpu_hz / 1000000,
 			boot_cpu_data.cpu_hz % 1000000	);
@@ -387,8 +382,8 @@
 	if (ret >= 0 && coproc_cfg.ccr_functional) {
 		mtctl(coproc_cfg.ccr_functional, 10);
 
-		cpu_data[cpunum].fp_rev = coproc_cfg.revision;
-		cpu_data[cpunum].fp_model = coproc_cfg.model;
+		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
 
 		asm volatile ("fstd	%fr0,8(%sp)");
 	} else {
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 80bc000..9995d7e 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -56,16 +56,17 @@
 		if (lvl >= smp_debug_lvl)	\
 			printk(printargs);
 #else
-#define smp_debug(lvl, ...)
+#define smp_debug(lvl, ...)	do { } while(0)
 #endif /* DEBUG_SMP */
 
 DEFINE_SPINLOCK(smp_lock);
 
 volatile struct task_struct *smp_init_current_idle_task;
 
-static volatile int cpu_now_booting __read_mostly = 0;	/* track which CPU is booting */
+/* track which CPU is booting */
+static volatile int cpu_now_booting __cpuinitdata;
 
-static int parisc_max_cpus __read_mostly = 1;
+static int parisc_max_cpus __cpuinitdata = 1;
 
 DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
 
@@ -123,7 +124,7 @@
 ipi_interrupt(int irq, void *dev_id) 
 {
 	int this_cpu = smp_processor_id();
-	struct cpuinfo_parisc *p = &cpu_data[this_cpu];
+	struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
 	unsigned long ops;
 	unsigned long flags;
 
@@ -202,13 +203,13 @@
 static inline void
 ipi_send(int cpu, enum ipi_message_type op)
 {
-	struct cpuinfo_parisc *p = &cpu_data[cpu];
+	struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
 	spinlock_t *lock = &per_cpu(ipi_lock, cpu);
 	unsigned long flags;
 
 	spin_lock_irqsave(lock, flags);
 	p->pending_ipi |= 1 << op;
-	gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa);
+	gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
 	spin_unlock_irqrestore(lock, flags);
 }
 
@@ -224,10 +225,7 @@
 static inline void
 send_IPI_single(int dest_cpu, enum ipi_message_type op)
 {
-	if (dest_cpu == NO_PROC_ID) {
-		BUG();
-		return;
-	}
+	BUG_ON(dest_cpu == NO_PROC_ID);
 
 	ipi_send(dest_cpu, op);
 }
@@ -309,8 +307,7 @@
 	/* Initialise the idle task for this CPU */
 	atomic_inc(&init_mm.mm_count);
 	current->active_mm = &init_mm;
-	if(current->mm)
-		BUG();
+	BUG_ON(current->mm);
 	enter_lazy_tlb(&init_mm, current);
 
 	init_IRQ();   /* make sure no IRQs are enabled or pending */
@@ -345,6 +342,7 @@
  */
 int __cpuinit smp_boot_one_cpu(int cpuid)
 {
+	const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
 	struct task_struct *idle;
 	long timeout;
 
@@ -376,7 +374,7 @@
 	smp_init_current_idle_task = idle ;
 	mb();
 
-	printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);
+	printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
 
 	/*
 	** This gets PDC to release the CPU from a very tight loop.
@@ -387,7 +385,7 @@
 	** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 
 	** contents of memory are valid."
 	*/
-	gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa);
+	gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
 	mb();
 
 	/* 
@@ -419,12 +417,12 @@
 	return 0;
 }
 
-void __devinit smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
 {
-	int bootstrap_processor=cpu_data[0].cpuid;	/* CPU ID of BSP */
+	int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
 
 	/* Setup BSP mappings */
-	printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
+	printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
 
 	cpu_set(bootstrap_processor, cpu_online_map);
 	cpu_set(bootstrap_processor, cpu_present_map);
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 4d09203..9d46c43 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -60,7 +60,7 @@
 	unsigned long cycles_elapsed, ticks_elapsed;
 	unsigned long cycles_remainder;
 	unsigned int cpu = smp_processor_id();
-	struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu];
+	struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
 
 	/* gcc can optimize for "read-only" case with a local clocktick */
 	unsigned long cpt = clocktick;
@@ -213,7 +213,7 @@
 
 	mtctl(next_tick, 16);		/* kick off Interval Timer (CR16) */
 
-	cpu_data[cpu].it_value = next_tick;
+	per_cpu(cpu_data, cpu).it_value = next_tick;
 }
 
 struct platform_device rtc_parisc_dev = {
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
index d71cb01..f515938 100644
--- a/arch/parisc/kernel/topology.c
+++ b/arch/parisc/kernel/topology.c
@@ -22,14 +22,14 @@
 #include <linux/cpu.h>
 #include <linux/cache.h>
 
-static struct cpu cpu_devices[NR_CPUS] __read_mostly;
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
 static int __init topology_init(void)
 {
 	int num;
 
 	for_each_present_cpu(num) {
-		register_cpu(&cpu_devices[num], num);
+		register_cpu(&per_cpu(cpu_devices, num), num);
 	}
 	return 0;
 }
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 4c771cd..ba658d2 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -745,6 +745,10 @@
 		/* Fall Through */
 	case 27: 
 		/* Data memory protection ID trap */
+		if (code == 27 && !user_mode(regs) &&
+			fixup_exception(regs))
+			return;
+
 		die_if_kernel("Protection id trap", regs, code);
 		si.si_code = SEGV_MAPERR;
 		si.si_signo = SIGSEGV;
@@ -821,8 +825,8 @@
 
 int __init check_ivt(void *iva)
 {
+	extern u32 os_hpmc_size;
 	extern const u32 os_hpmc[];
-	extern const u32 os_hpmc_end[];
 
 	int i;
 	u32 check = 0;
@@ -839,8 +843,7 @@
 	    *ivap++ = 0;
 
 	/* Compute Checksum for HPMC handler */
-
-	length = os_hpmc_end - os_hpmc;
+	length = os_hpmc_size;
 	ivap[7] = length;
 
 	hpmcp = (u32 *)os_hpmc;
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 6773c58..69dad5a 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -372,7 +372,7 @@
 	struct pt_regs *r = &t->thread.regs;
 	struct pt_regs *r2;
 
-	r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
+	r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
 	if (!r2)
 		return;
 	*r2 = *r;
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index 9abed07..5069e8b 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -261,7 +261,7 @@
 	iomem_write32r,
 };
 
-const struct iomap_ops *iomap_ops[8] = {
+static const struct iomap_ops *iomap_ops[8] = {
 	[0] = &ioport_ops,
 	[7] = &iomem_ops
 };
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index 2d68431..bbda909 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -275,7 +275,7 @@
 
 
 /* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
 {
 	register unsigned long src, dst, t1, t2, t3;
 	register unsigned char *pcs, *pcd;
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index b2e3e9a..92c7fa4 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -139,13 +139,41 @@
 			}
 #endif
 
+int fixup_exception(struct pt_regs *regs)
+{
+	const struct exception_table_entry *fix;
+
+	fix = search_exception_tables(regs->iaoq[0]);
+	if (fix) {
+		struct exception_data *d;
+		d = &__get_cpu_var(exception_data);
+		d->fault_ip = regs->iaoq[0];
+		d->fault_space = regs->isr;
+		d->fault_addr = regs->ior;
+
+		regs->iaoq[0] = ((fix->fixup) & ~3);
+		/*
+		 * NOTE: In some cases the faulting instruction
+		 * may be in the delay slot of a branch. We
+		 * don't want to take the branch, so we don't
+		 * increment iaoq[1], instead we set it to be
+		 * iaoq[0]+4, and clear the B bit in the PSW
+		 */
+		regs->iaoq[1] = regs->iaoq[0] + 4;
+		regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
+
+		return 1;
+	}
+
+	return 0;
+}
+
 void do_page_fault(struct pt_regs *regs, unsigned long code,
 			      unsigned long address)
 {
 	struct vm_area_struct *vma, *prev_vma;
 	struct task_struct *tsk = current;
 	struct mm_struct *mm = tsk->mm;
-	const struct exception_table_entry *fix;
 	unsigned long acc_type;
 	int fault;
 
@@ -229,32 +257,8 @@
 
 no_context:
 
-	if (!user_mode(regs)) {
-		fix = search_exception_tables(regs->iaoq[0]);
-
-		if (fix) {
-			struct exception_data *d;
-
-			d = &__get_cpu_var(exception_data);
-			d->fault_ip = regs->iaoq[0];
-			d->fault_space = regs->isr;
-			d->fault_addr = regs->ior;
-
-			regs->iaoq[0] = ((fix->fixup) & ~3);
-
-			/*
-			 * NOTE: In some cases the faulting instruction
-			 * may be in the delay slot of a branch. We
-			 * don't want to take the branch, so we don't
-			 * increment iaoq[1], instead we set it to be
-			 * iaoq[0]+4, and clear the B bit in the PSW
-			 */
-
-			regs->iaoq[1] = regs->iaoq[0] + 4;
-			regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
-
-			return;
-		}
+	if (!user_mode(regs) && fixup_exception(regs)) {
+		return;
 	}
 
 	parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 79f25ce..e39b73b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -108,6 +108,8 @@
 config PPC
 	bool
 	default y
+	select HAVE_FTRACE_MCOUNT_RECORD
+	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FUNCTION_TRACER
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select HAVE_IDE
@@ -121,6 +123,7 @@
 	select HAVE_DMA_ATTRS if PPC64
 	select USE_GENERIC_SMP_HELPERS if SMP
 	select HAVE_OPROFILE
+	select HAVE_SYSCALL_WRAPPERS if PPC64
 
 config EARLY_PRINTK
 	bool
@@ -326,7 +329,8 @@
 
 config CRASH_DUMP
 	bool "Build a kdump crash kernel"
-	depends on (PPC64 && RELOCATABLE) || 6xx
+	depends on PPC64 || 6xx
+	select RELOCATABLE if PPC64
 	help
 	  Build a kernel suitable for use as a kdump capture kernel.
 	  The same kernel binary can be used as production kernel and dump
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index f328299..e84df33 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -208,7 +208,7 @@
 #
 # Theses are default targets to build images which embed device tree blobs.
 # They are only required on boards which do not have FDT support in firmware.
-# Boards with newish u-boot firmare can use the uImage target above
+# Boards with newish u-boot firmware can use the uImage target above
 #
 
 # Board ports in arch/powerpc/platform/40x/Kconfig
@@ -356,7 +356,7 @@
 	@rm -f $@; ln $< $@
 
 install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
-	sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $<
+	sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^
 
 # anything not in $(targets)
 clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts
index 9708b34..e78c355 100644
--- a/arch/powerpc/boot/dts/gef_sbc610.dts
+++ b/arch/powerpc/boot/dts/gef_sbc610.dts
@@ -88,6 +88,21 @@
 			compatible = "gef,fpga-regs";
 			reg = <0x4 0x0 0x40>;
 		};
+
+		wdt@4,2000 {
+			compatible = "gef,fpga-wdt";
+			reg = <0x4 0x2000 0x8>;
+			interrupts = <0x1a 0x4>;
+			interrupt-parent = <&gef_pic>;
+		};
+		/* Second watchdog available, driver currently supports one.
+		wdt@4,2010 {
+			compatible = "gef,fpga-wdt";
+			reg = <0x4 0x2010 0x8>;
+			interrupts = <0x1b 0x4>;
+			interrupt-parent = <&gef_pic>;
+		};
+		*/
 		gef_pic: pic@4,4000 {
 			#interrupt-cells = <1>;
 			interrupt-controller;
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 072c9b0..7178416 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -255,7 +255,7 @@
 			device_type = "serial";
 			compatible = "ns16550";
 			reg = <0x4500 0x100>;
-			clock-frequency = <0>;
+			clock-frequency = <133333333>;
 			interrupts = <9 0x8>;
 			interrupt-parent = <&ipic>;
 		};
@@ -265,7 +265,7 @@
 			device_type = "serial";
 			compatible = "ns16550";
 			reg = <0x4600 0x100>;
-			clock-frequency = <0>;
+			clock-frequency = <133333333>;
 			interrupts = <10 0x8>;
 			interrupt-parent = <&ipic>;
 		};
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 14534d0..6e34f17 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -69,8 +69,18 @@
 		};
 
 		bcsr@1,0 {
+			#address-cells = <1>;
+			#size-cells = <1>;
  			compatible = "fsl,mpc8360mds-bcsr";
 			reg = <1 0 0x8000>;
+			ranges = <0 1 0 0x8000>;
+
+			bcsr13: gpio-controller@d {
+				#gpio-cells = <2>;
+				compatible = "fsl,mpc8360mds-bcsr-gpio";
+				reg = <0xd 1>;
+				gpio-controller;
+			};
 		};
 	};
 
@@ -195,10 +205,21 @@
 		};
 
 		par_io@1400 {
+			#address-cells = <1>;
+			#size-cells = <1>;
 			reg = <0x1400 0x100>;
+			ranges = <0 0x1400 0x100>;
 			device_type = "par_io";
 			num-ports = <7>;
 
+			qe_pio_b: gpio-controller@18 {
+				#gpio-cells = <2>;
+				compatible = "fsl,mpc8360-qe-pario-bank",
+					     "fsl,mpc8323-qe-pario-bank";
+				reg = <0x18 0x18>;
+				gpio-controller;
+			};
+
 			pio1: ucc_pin@01 {
 				pio-map = <
 			/* port  pin  dir  open_drain  assignment  has_irq */
@@ -282,6 +303,15 @@
 			};
 		};
 
+		timer@440 {
+			compatible = "fsl,mpc8360-qe-gtm",
+				     "fsl,qe-gtm", "fsl,gtm";
+			reg = <0x440 0x40>;
+			clock-frequency = <132000000>;
+			interrupts = <12 13 14 15>;
+			interrupt-parent = <&qeic>;
+		};
+
 		spi@4c0 {
 			cell-index = <0>;
 			compatible = "fsl,spi";
@@ -301,11 +331,20 @@
 		};
 
 		usb@6c0 {
-			compatible = "qe_udc";
+			compatible = "fsl,mpc8360-qe-usb",
+				     "fsl,mpc8323-qe-usb";
 			reg = <0x6c0 0x40 0x8b00 0x100>;
 			interrupts = <11>;
 			interrupt-parent = <&qeic>;
-			mode = "slave";
+			fsl,fullspeed-clock = "clk21";
+			fsl,lowspeed-clock = "brg9";
+			gpios = <&qe_pio_b  2 0   /* USBOE */
+				 &qe_pio_b  3 0   /* USBTP */
+				 &qe_pio_b  8 0   /* USBTN */
+				 &qe_pio_b  9 0   /* USBRP */
+				 &qe_pio_b 11 0   /* USBRN */
+				 &bcsr13    5 0   /* SPEED */
+				 &bcsr13    4 1>; /* POWER */
 		};
 
 		enet0: ucc@2000 {
diff --git a/arch/powerpc/boot/dts/mpc836x_rdk.dts b/arch/powerpc/boot/dts/mpc836x_rdk.dts
index decadf3..37b7895 100644
--- a/arch/powerpc/boot/dts/mpc836x_rdk.dts
+++ b/arch/powerpc/boot/dts/mpc836x_rdk.dts
@@ -218,8 +218,23 @@
 				reg = <0x440 0x40>;
 				interrupts = <12 13 14 15>;
 				interrupt-parent = <&qeic>;
-				/* filled by u-boot */
-				clock-frequency = <0>;
+				clock-frequency = <166666666>;
+			};
+
+			usb@6c0 {
+				compatible = "fsl,mpc8360-qe-usb",
+					     "fsl,mpc8323-qe-usb";
+				reg = <0x6c0 0x40 0x8b00 0x100>;
+				interrupts = <11>;
+				interrupt-parent = <&qeic>;
+				fsl,fullspeed-clock = "clk21";
+				gpios = <&qe_pio_b  2 0 /* USBOE */
+					 &qe_pio_b  3 0 /* USBTP */
+					 &qe_pio_b  8 0 /* USBTN */
+					 &qe_pio_b  9 0 /* USBRP */
+					 &qe_pio_b 11 0 /* USBRN */
+					 &qe_pio_e 20 0 /* SPEED */
+					 &qe_pio_e 21 1 /* POWER */>;
 			};
 
 			spi@4c0 {
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dts b/arch/powerpc/boot/dts/mpc8544ds.dts
index b9da421..0668d10 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dts
+++ b/arch/powerpc/boot/dts/mpc8544ds.dts
@@ -313,7 +313,7 @@
 			  0x1000000 0x0 0x0 0xe1010000 0x0 0x10000>;
 		clock-frequency = <33333333>;
 		interrupt-parent = <&mpic>;
-		interrupts = <26 2>;
+		interrupts = <25 2>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 			/* IDSEL 0x0 */
@@ -350,7 +350,7 @@
 			  0x1000000 0x0 0x0 0xe1020000 0x0 0x10000>;
 		clock-frequency = <33333333>;
 		interrupt-parent = <&mpic>;
-		interrupts = <25 2>;
+		interrupts = <26 2>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 			/* IDSEL 0x0 */
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index 21459e1..3dcc001 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -724,7 +724,7 @@
 			  0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x00010000>;
 		clock-frequency = <33333333>;
 		interrupt-parent = <&mpic>;
-		interrupts = <26 2>;
+		interrupts = <25 2>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 			/* IDSEL 0x0 */
@@ -761,7 +761,7 @@
 			  0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>;
 		clock-frequency = <33333333>;
 		interrupt-parent = <&mpic>;
-		interrupts = <27 2>;
+		interrupts = <26 2>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 			/* IDSEL 0x0 */
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
index c114c4e..fd462ef 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
@@ -457,7 +457,7 @@
 			  0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>;
 		clock-frequency = <33333333>;
 		interrupt-parent = <&mpic>;
-		interrupts = <26 2>;
+		interrupts = <25 2>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 			/* IDSEL 0x0 */
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
index 04ecda1..e35230f 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
@@ -208,7 +208,7 @@
 			  0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>;
 		clock-frequency = <33333333>;
 		interrupt-parent = <&mpic>;
-		interrupts = <27 2>;
+		interrupts = <26 2>;
 		interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
 		interrupt-map = <
 			/* IDSEL 0x0 */
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
index 35d5e24..4481532 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
@@ -26,7 +26,13 @@
 		serial1 = &serial1;
 		pci0 = &pci0;
 		pci1 = &pci1;
-		rapidio0 = &rapidio0;
+/*
+ * Only one of Rapid IO or PCI can be present due to HW limitations and
+ * due to the fact that the 2 now share address space in the new memory
+ * map.  The most likely case is that we have PCI, so comment out the
+ * rapidio node.  Leave it here for reference.
+ */
+		/* rapidio0 = &rapidio0; */
 	};
 
 	cpus {
@@ -62,18 +68,17 @@
 		reg = <0x00000000 0x40000000>;	// 1G at 0x0
 	};
 
-	localbus@f8005000 {
+	localbus@ffe05000 {
 		#address-cells = <2>;
 		#size-cells = <1>;
 		compatible = "fsl,mpc8641-localbus", "simple-bus";
-		reg = <0xf8005000 0x1000>;
+		reg = <0xffe05000 0x1000>;
 		interrupts = <19 2>;
 		interrupt-parent = <&mpic>;
 
-		ranges = <0 0 0xff800000 0x00800000
-			  1 0 0xfe000000 0x01000000
-			  2 0 0xf8200000 0x00100000
-			  3 0 0xf8100000 0x00100000>;
+		ranges = <0 0 0xef800000 0x00800000
+			  2 0 0xffdf8000 0x00008000
+			  3 0 0xffdf0000 0x00008000>;
 
 		flash@0,0 {
 			compatible = "cfi-flash";
@@ -103,13 +108,13 @@
 		};
 	};
 
-	soc8641@f8000000 {
+	soc8641@ffe00000 {
 		#address-cells = <1>;
 		#size-cells = <1>;
 		device_type = "soc";
 		compatible = "simple-bus";
-		ranges = <0x00000000 0xf8000000 0x00100000>;
-		reg = <0xf8000000 0x00001000>;	// CCSRBAR
+		ranges = <0x00000000 0xffe00000 0x00100000>;
+		reg = <0xffe00000 0x00001000>;	// CCSRBAR
 		bus-frequency = <0>;
 
 		i2c@3000 {
@@ -340,17 +345,17 @@
 		};
 	};
 
-	pci0: pcie@f8008000 {
+	pci0: pcie@ffe08000 {
 		cell-index = <0>;
 		compatible = "fsl,mpc8641-pcie";
 		device_type = "pci";
 		#interrupt-cells = <1>;
 		#size-cells = <2>;
 		#address-cells = <3>;
-		reg = <0xf8008000 0x1000>;
+		reg = <0xffe08000 0x1000>;
 		bus-range = <0x0 0xff>;
 		ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x20000000
-			  0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
+			  0x01000000 0x0 0x00000000 0xffc00000 0x0 0x00010000>;
 		clock-frequency = <33333333>;
 		interrupt-parent = <&mpic>;
 		interrupts = <24 2>;
@@ -481,7 +486,7 @@
 
 				  0x01000000 0x0 0x00000000
 				  0x01000000 0x0 0x00000000
-				  0x0 0x00100000>;
+				  0x0 0x00010000>;
 			uli1575@0 {
 				reg = <0 0 0 0 0>;
 				#size-cells = <2>;
@@ -491,7 +496,7 @@
 					  0x0 0x20000000
 					  0x01000000 0x0 0x00000000
 					  0x01000000 0x0 0x00000000
-					  0x0 0x00100000>;
+					  0x0 0x00010000>;
 				isa@1e {
 					device_type = "isa";
 					#interrupt-cells = <2>;
@@ -549,17 +554,17 @@
 
 	};
 
-	pci1: pcie@f8009000 {
+	pci1: pcie@ffe09000 {
 		cell-index = <1>;
 		compatible = "fsl,mpc8641-pcie";
 		device_type = "pci";
 		#interrupt-cells = <1>;
 		#size-cells = <2>;
 		#address-cells = <3>;
-		reg = <0xf8009000 0x1000>;
+		reg = <0xffe09000 0x1000>;
 		bus-range = <0 0xff>;
 		ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
-			  0x01000000 0x0 0x00000000 0xe3000000 0x0 0x00100000>;
+			  0x01000000 0x0 0x00000000 0xffc10000 0x0 0x00010000>;
 		clock-frequency = <33333333>;
 		interrupt-parent = <&mpic>;
 		interrupts = <25 2>;
@@ -582,18 +587,21 @@
 
 				  0x01000000 0x0 0x00000000
 				  0x01000000 0x0 0x00000000
-				  0x0 0x00100000>;
+				  0x0 0x00010000>;
 		};
 	};
-	rapidio0: rapidio@f80c0000 {
+/*
+	rapidio0: rapidio@ffec0000 {
 		#address-cells = <2>;
 		#size-cells = <2>;
 		compatible = "fsl,rapidio-delta";
-		reg = <0xf80c0000 0x20000>;
-		ranges = <0 0 0xc0000000 0 0x20000000>;
+		reg = <0xffec0000 0x20000>;
+		ranges = <0 0 0x80000000 0 0x20000000>;
 		interrupt-parent = <&mpic>;
-		/* err_irq bell_outb_irq bell_inb_irq
-			msg1_tx_irq msg1_rx_irq	msg2_tx_irq msg2_rx_irq */
+		// err_irq bell_outb_irq bell_inb_irq
+		//	msg1_tx_irq msg1_rx_irq	msg2_tx_irq msg2_rx_irq
 		interrupts = <48 2 49 2 50 2 53 2 54 2 55 2 56 2>;
 	};
+*/
+
 };
diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts
index 3b295e8..43cc68b 100644
--- a/arch/powerpc/boot/dts/sequoia.dts
+++ b/arch/powerpc/boot/dts/sequoia.dts
@@ -134,7 +134,7 @@
 		};
 
 		USB1: usb@e0000400 {
-			compatible = "ohci-be";
+			compatible = "ibm,usb-ohci-440epx", "ohci-be";
 			reg = <0x00000000 0xe0000400 0x00000060>;
 			interrupt-parent = <&UIC0>;
 			interrupts = <0x15 0x8>;
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index b002bfd..51b2387 100644
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -15,7 +15,7 @@
 #   $2 - kernel image file
 #   $3 - kernel map file
 #   $4 - default install path (blank if root directory)
-#   $5 - kernel boot file, the zImage
+#   $5 and more - kernel boot files; zImage*, uImage, cuImage.*, etc.
 #
 
 # User may have a custom install script
@@ -38,3 +38,15 @@
 
 cat $2 > $4/$image_name
 cp $3 $4/System.map
+
+# Copy all the bootable image files
+path=$4
+shift 4
+while [ $# -ne 0 ]; do
+	image_name=`basename $1`
+	if [ -f $path/$image_name ]; then
+		mv $path/$image_name $path/$image_name.old
+	fi
+	cat $1 > $path/$image_name
+	shift
+done;
diff --git a/arch/powerpc/configs/85xx/mpc8572_ds_defconfig b/arch/powerpc/configs/85xx/mpc8572_ds_defconfig
index 6355883..32aeb79 100644
--- a/arch/powerpc/configs/85xx/mpc8572_ds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8572_ds_defconfig
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28-rc3
-# Sat Nov  8 12:40:13 2008
+# Linux kernel version: 2.6.28-rc8
+# Tue Dec 30 11:17:46 2008
 #
 # CONFIG_PPC64 is not set
 
@@ -21,7 +21,10 @@
 CONFIG_FSL_EMB_PERFMON=y
 # CONFIG_PHYS_64BIT is not set
 CONFIG_SPE=y
+CONFIG_PPC_MMU_NOHASH=y
 # CONFIG_PPC_MM_SLICES is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
 CONFIG_PPC32=y
 CONFIG_WORD_SIZE=32
 # CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
@@ -50,7 +53,7 @@
 CONFIG_PPC_OF=y
 CONFIG_OF=y
 CONFIG_PPC_UDBG_16550=y
-# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_GENERIC_TBSYNC=y
 CONFIG_AUDIT_ARCH=y
 CONFIG_GENERIC_BUG=y
 CONFIG_DEFAULT_UIMAGE=y
@@ -62,7 +65,7 @@
 # General setup
 #
 CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
 CONFIG_INIT_ENV_ARG_LIMIT=32
 CONFIG_LOCALVERSION=""
 CONFIG_LOCALVERSION_AUTO=y
@@ -126,6 +129,7 @@
 CONFIG_HAVE_KPROBES=y
 CONFIG_HAVE_KRETPROBES=y
 CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
 # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
@@ -138,6 +142,7 @@
 CONFIG_MODVERSIONS=y
 # CONFIG_MODULE_SRCVERSION_ALL is not set
 CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
 CONFIG_BLOCK=y
 CONFIG_LBD=y
 # CONFIG_BLK_DEV_IO_TRACE is not set
@@ -197,6 +202,7 @@
 # CONFIG_CPM2 is not set
 CONFIG_FSL_ULI1575=y
 # CONFIG_MPC8xxx_GPIO is not set
+# CONFIG_SIMPLE_GPIO is not set
 
 #
 # Kernel options
@@ -224,6 +230,7 @@
 CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
 CONFIG_ARCH_HAS_WALK_MEMORY=y
 CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+# CONFIG_IRQ_ALL_CPUS is not set
 CONFIG_ARCH_FLATMEM_ENABLE=y
 CONFIG_ARCH_POPULATES_NODE_MAP=y
 CONFIG_SELECT_MEMORY_MODEL=y
@@ -241,6 +248,9 @@
 CONFIG_BOUNCE=y
 CONFIG_VIRT_TO_BUS=y
 CONFIG_UNEVICTABLE_LRU=y
+CONFIG_PPC_4K_PAGES=y
+# CONFIG_PPC_16K_PAGES is not set
+# CONFIG_PPC_64K_PAGES is not set
 CONFIG_FORCE_MAX_ZONEORDER=11
 CONFIG_PROC_DEVICETREE=y
 # CONFIG_CMDLINE_BOOL is not set
@@ -443,8 +453,10 @@
 # CONFIG_EEPROM_93CX6 is not set
 # CONFIG_SGI_IOC4 is not set
 # CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
 # CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
@@ -784,6 +796,7 @@
 CONFIG_UNIX98_PTYS=y
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_HVC_UDBG is not set
 # CONFIG_IPMI_HANDLER is not set
 CONFIG_HW_RANDOM=y
 CONFIG_NVRAM=y
@@ -869,11 +882,11 @@
 # CONFIG_THERMAL is not set
 # CONFIG_THERMAL_HWMON is not set
 # CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
 
 #
 # Sonics Silicon Backplane
 #
-CONFIG_SSB_POSSIBLE=y
 # CONFIG_SSB is not set
 
 #
@@ -886,14 +899,7 @@
 # CONFIG_PMIC_DA903X is not set
 # CONFIG_MFD_WM8400 is not set
 # CONFIG_MFD_WM8350_I2C is not set
-
-#
-# Voltage and Current regulators
-#
 # CONFIG_REGULATOR is not set
-# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
-# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
-# CONFIG_REGULATOR_BQ24022 is not set
 
 #
 # Multimedia devices
@@ -1252,11 +1258,11 @@
 # CONFIG_USB_TMC is not set
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
 #
 
 #
-# may also be needed; see USB_STORAGE Help for more information
+# see USB_STORAGE Help for more information
 #
 CONFIG_USB_STORAGE=y
 # CONFIG_USB_STORAGE_DEBUG is not set
@@ -1348,6 +1354,7 @@
 # CONFIG_RTC_DRV_M41T80 is not set
 # CONFIG_RTC_DRV_S35390A is not set
 # CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
 
 #
 # SPI RTC drivers
@@ -1624,6 +1631,7 @@
 # CONFIG_SAMPLES is not set
 CONFIG_HAVE_ARCH_KGDB=y
 # CONFIG_KGDB is not set
+CONFIG_PRINT_STACK_DEPTH=64
 # CONFIG_DEBUG_STACKOVERFLOW is not set
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_PAGEALLOC is not set
@@ -1649,11 +1657,16 @@
 #
 # CONFIG_CRYPTO_FIPS is not set
 CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
 CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
 CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=y
-CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
 CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
 # CONFIG_CRYPTO_GF128MUL is not set
 # CONFIG_CRYPTO_NULL is not set
 # CONFIG_CRYPTO_CRYPTD is not set
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index cd1ffa44..391874c 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -1164,6 +1164,7 @@
 # CONFIG_SOFT_WATCHDOG is not set
 # CONFIG_ALIM7101_WDT is not set
 # CONFIG_8xxx_WDT is not set
+CONFIG_GEF_WDT=y
 
 #
 # PCI-based Watchdog Cards
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 069ae1b..d4685d1 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -211,11 +211,28 @@
 CONFIG_PPC_PASEMI_IOMMU=y
 # CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE is not set
 CONFIG_PPC_PASEMI_MDIO=y
-# CONFIG_PPC_PS3 is not set
+CONFIG_PPC_PS3=y
+
+#
+# PS3 Platform Options
+#
+# CONFIG_PS3_ADVANCED is not set
+CONFIG_PS3_HTAB_SIZE=20
+# CONFIG_PS3_DYNAMIC_DMA is not set
+CONFIG_PS3_VUART=y
+CONFIG_PS3_PS3AV=y
+CONFIG_PS3_SYS_MANAGER=y
+CONFIG_PS3_STORAGE=m
+CONFIG_PS3_DISK=m
+CONFIG_PS3_ROM=m
+CONFIG_PS3_FLASH=m
+CONFIG_PS3_LPM=m
 CONFIG_PPC_CELL=y
+CONFIG_PPC_CELL_COMMON=y
 CONFIG_PPC_CELL_NATIVE=y
 CONFIG_PPC_IBM_CELL_BLADE=y
 CONFIG_PPC_CELLEB=y
+CONFIG_PPC_CELL_QPACE=y
 
 #
 # Cell Broadband Engine options
@@ -981,6 +998,9 @@
 CONFIG_TIGON3=y
 # CONFIG_BNX2 is not set
 CONFIG_SPIDER_NET=m
+CONFIG_GELIC_NET=m
+CONFIG_GELIC_WIRELESS=y
+# CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE is not set
 # CONFIG_QLA3XXX is not set
 # CONFIG_ATL1 is not set
 # CONFIG_ATL1E is not set
@@ -1370,6 +1390,8 @@
 # CONFIG_FB_PM3 is not set
 # CONFIG_FB_CARMINE is not set
 CONFIG_FB_IBM_GXT4500=y
+CONFIG_FB_PS3=m
+CONFIG_FB_PS3_DEFAULT_SIZE_M=9
 # CONFIG_FB_VIRTUAL is not set
 # CONFIG_FB_METRONOME is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -1492,6 +1514,8 @@
 CONFIG_SND_PPC=y
 CONFIG_SND_POWERMAC=m
 CONFIG_SND_POWERMAC_AUTO_DRC=y
+CONFIG_SND_PS3=m
+CONFIG_SND_PS3_DEFAULT_START_DELAY=2000
 CONFIG_SND_AOA=m
 CONFIG_SND_AOA_FABRIC_LAYOUT=m
 CONFIG_SND_AOA_ONYX=m
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 9268602..5ab7d7f 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -35,4 +35,3 @@
 unifdef-y += termios.h
 unifdef-y += types.h
 unifdef-y += unistd.h
-unifdef-y += swab.h
diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h
index 5cca27a..aa6cc4f 100644
--- a/arch/powerpc/include/asm/byteorder.h
+++ b/arch/powerpc/include/asm/byteorder.h
@@ -7,8 +7,6 @@
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  */
-
-#include <asm/swab.h>
 #include <linux/byteorder/big_endian.h>
 
 #endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h
index 8066eed..b4b7338 100644
--- a/arch/powerpc/include/asm/cell-pmu.h
+++ b/arch/powerpc/include/asm/cell-pmu.h
@@ -37,9 +37,11 @@
 #define CBE_PM_STOP_AT_MAX                 0x40000000
 #define CBE_PM_TRACE_MODE_GET(pm_control)  (((pm_control) >> 28) & 0x3)
 #define CBE_PM_TRACE_MODE_SET(mode)        (((mode)  & 0x3) << 28)
+#define CBE_PM_TRACE_BUF_OVFLW(bit)        (((bit) & 0x1) << 17)
 #define CBE_PM_COUNT_MODE_SET(count)       (((count) & 0x3) << 18)
 #define CBE_PM_FREEZE_ALL_CTRS             0x00100000
 #define CBE_PM_ENABLE_EXT_TRACE            0x00008000
+#define CBE_PM_SPU_ADDR_TRACE_SET(msk)     (((msk) & 0x3) << 9)
 
 /* Macros for the trace_address register. */
 #define CBE_PM_TRACE_BUF_FULL              0x00000800
diff --git a/arch/powerpc/include/asm/ioctls.h b/arch/powerpc/include/asm/ioctls.h
index 279a622..1842186d 100644
--- a/arch/powerpc/include/asm/ioctls.h
+++ b/arch/powerpc/include/asm/ioctls.h
@@ -89,6 +89,8 @@
 #define TIOCSBRK	0x5427  /* BSD compatibility */
 #define TIOCCBRK	0x5428  /* BSD compatibility */
 #define TIOCGSID	0x5429  /* Return the session ID of FD */
+#define TIOCGRS485	0x542e
+#define TIOCSRS485	0x542f
 #define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
 #define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
 
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 6dbffc9..7e06b43 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -48,63 +48,8 @@
 {
 	if (oldregs)
 		memcpy(newregs, oldregs, sizeof(*newregs));
-#ifdef __powerpc64__
-	else {
-		/* FIXME Merge this with xmon_save_regs ?? */
-		unsigned long tmp1, tmp2;
-		__asm__ __volatile__ (
-			"std    0,0(%2)\n"
-			"std    1,8(%2)\n"
-			"std    2,16(%2)\n"
-			"std    3,24(%2)\n"
-			"std    4,32(%2)\n"
-			"std    5,40(%2)\n"
-			"std    6,48(%2)\n"
-			"std    7,56(%2)\n"
-			"std    8,64(%2)\n"
-			"std    9,72(%2)\n"
-			"std    10,80(%2)\n"
-			"std    11,88(%2)\n"
-			"std    12,96(%2)\n"
-			"std    13,104(%2)\n"
-			"std    14,112(%2)\n"
-			"std    15,120(%2)\n"
-			"std    16,128(%2)\n"
-			"std    17,136(%2)\n"
-			"std    18,144(%2)\n"
-			"std    19,152(%2)\n"
-			"std    20,160(%2)\n"
-			"std    21,168(%2)\n"
-			"std    22,176(%2)\n"
-			"std    23,184(%2)\n"
-			"std    24,192(%2)\n"
-			"std    25,200(%2)\n"
-			"std    26,208(%2)\n"
-			"std    27,216(%2)\n"
-			"std    28,224(%2)\n"
-			"std    29,232(%2)\n"
-			"std    30,240(%2)\n"
-			"std    31,248(%2)\n"
-			"mfmsr  %0\n"
-			"std    %0, 264(%2)\n"
-			"mfctr  %0\n"
-			"std    %0, 280(%2)\n"
-			"mflr   %0\n"
-			"std    %0, 288(%2)\n"
-			"bl     1f\n"
-		"1:     mflr   %1\n"
-			"std    %1, 256(%2)\n"
-			"mtlr   %0\n"
-			"mfxer  %0\n"
-			"std    %0, 296(%2)\n"
-			: "=&r" (tmp1), "=&r" (tmp2)
-			: "b" (newregs)
-			: "memory");
-	}
-#else
 	else
 		ppc_save_regs(newregs);
-#endif /* __powerpc64__ */
 }
 
 extern void kexec_smp_wait(void);	/* get and clear naca physid, wait for
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h
index 95035c6..639dc96 100644
--- a/arch/powerpc/include/asm/oprofile_impl.h
+++ b/arch/powerpc/include/asm/oprofile_impl.h
@@ -32,6 +32,12 @@
 	unsigned long mmcr0;
 	unsigned long mmcr1;
 	unsigned long mmcra;
+#ifdef CONFIG_OPROFILE_CELL
+	/* Register for oprofile user tool to check cell kernel profiling
+	 * suport.
+	 */
+	unsigned long cell_support;
+#endif
 #endif
 	unsigned long enable_kernel;
 	unsigned long enable_user;
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index cff30c0..67f1812 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -103,10 +103,10 @@
 	int (*map)(struct ps3_dma_region *,
 		   unsigned long virt_addr,
 		   unsigned long len,
-		   unsigned long *bus_addr,
+		   dma_addr_t *bus_addr,
 		   u64 iopte_pp);
 	int (*unmap)(struct ps3_dma_region *,
-		     unsigned long bus_addr,
+		     dma_addr_t bus_addr,
 		     unsigned long len);
 };
 /**
@@ -124,9 +124,9 @@
 int ps3_dma_region_create(struct ps3_dma_region *r);
 int ps3_dma_region_free(struct ps3_dma_region *r);
 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
-	unsigned long len, unsigned long *bus_addr,
+	unsigned long len, dma_addr_t *bus_addr,
 	u64 iopte_pp);
-int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
+int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
 	unsigned long len);
 
 /* mmio routines */
@@ -320,6 +320,7 @@
 
 enum ps3_match_sub_id {
 	PS3_MATCH_SUB_ID_GPU_FB		= 1,
+	PS3_MATCH_SUB_ID_GPU_RAMDISK	= 2,
 };
 
 #define PS3_MODULE_ALIAS_EHCI		"ps3:1:0"
@@ -332,6 +333,7 @@
 #define PS3_MODULE_ALIAS_STOR_FLASH	"ps3:8:0"
 #define PS3_MODULE_ALIAS_SOUND		"ps3:9:0"
 #define PS3_MODULE_ALIAS_GPU_FB		"ps3:10:1"
+#define PS3_MODULE_ALIAS_GPU_RAMDISK	"ps3:10:2"
 #define PS3_MODULE_ALIAS_LPM		"ps3:11:0"
 
 enum ps3_system_bus_device_type {
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
index edee15d..2701753 100644
--- a/arch/powerpc/include/asm/qe.h
+++ b/arch/powerpc/include/asm/qe.h
@@ -17,6 +17,8 @@
 #ifdef __KERNEL__
 
 #include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/err.h>
 #include <asm/cpm.h>
 #include <asm/immap_qe.h>
 
@@ -84,7 +86,11 @@
 extern spinlock_t cmxgcr_lock;
 
 /* Export QE common operations */
+#ifdef CONFIG_QUICC_ENGINE
 extern void __init qe_reset(void);
+#else
+static inline void qe_reset(void) {}
+#endif
 
 /* QE PIO */
 #define QE_PIO_PINS 32
@@ -101,16 +107,43 @@
 #endif
 };
 
-extern int par_io_init(struct device_node *np);
-extern int par_io_of_config(struct device_node *np);
 #define QE_PIO_DIR_IN	2
 #define QE_PIO_DIR_OUT	1
 extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin,
 				int dir, int open_drain, int assignment,
 				int has_irq);
+#ifdef CONFIG_QUICC_ENGINE
+extern int par_io_init(struct device_node *np);
+extern int par_io_of_config(struct device_node *np);
 extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
 			     int assignment, int has_irq);
 extern int par_io_data_set(u8 port, u8 pin, u8 val);
+#else
+static inline int par_io_init(struct device_node *np) { return -ENOSYS; }
+static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; }
+static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+		int assignment, int has_irq) { return -ENOSYS; }
+static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
+#endif /* CONFIG_QUICC_ENGINE */
+
+/*
+ * Pin multiplexing functions.
+ */
+struct qe_pin;
+#ifdef CONFIG_QE_GPIO
+extern struct qe_pin *qe_pin_request(struct device_node *np, int index);
+extern void qe_pin_free(struct qe_pin *qe_pin);
+extern void qe_pin_set_gpio(struct qe_pin *qe_pin);
+extern void qe_pin_set_dedicated(struct qe_pin *pin);
+#else
+static inline struct qe_pin *qe_pin_request(struct device_node *np, int index)
+{
+	return ERR_PTR(-ENOSYS);
+}
+static inline void qe_pin_free(struct qe_pin *qe_pin) {}
+static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {}
+static inline void qe_pin_set_dedicated(struct qe_pin *pin) {}
+#endif /* CONFIG_QE_GPIO */
 
 /* QE internal API */
 int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
@@ -591,7 +624,7 @@
 #define UCC_GETH_UCCE_RXF1      0x00000002
 #define UCC_GETH_UCCE_RXF0      0x00000001
 
-/* UPSMR, when used as a UART */
+/* UCC Protocol Specific Mode Register (UPSMR), when used for UART */
 #define UCC_UART_UPSMR_FLC		0x8000
 #define UCC_UART_UPSMR_SL		0x4000
 #define UCC_UART_UPSMR_CL_MASK		0x3000
@@ -619,6 +652,23 @@
 #define UCC_UART_UPSMR_TPM_EVEN		0x0002
 #define UCC_UART_UPSMR_TPM_HIGH		0x0003
 
+/* UCC Protocol Specific Mode Register (UPSMR), when used for Ethernet */
+#define UCC_GETH_UPSMR_FTFE     0x80000000
+#define UCC_GETH_UPSMR_PTPE     0x40000000
+#define UCC_GETH_UPSMR_ECM      0x04000000
+#define UCC_GETH_UPSMR_HSE      0x02000000
+#define UCC_GETH_UPSMR_PRO      0x00400000
+#define UCC_GETH_UPSMR_CAP      0x00200000
+#define UCC_GETH_UPSMR_RSH      0x00100000
+#define UCC_GETH_UPSMR_RPM      0x00080000
+#define UCC_GETH_UPSMR_R10M     0x00040000
+#define UCC_GETH_UPSMR_RLPB     0x00020000
+#define UCC_GETH_UPSMR_TBIM     0x00010000
+#define UCC_GETH_UPSMR_RES1     0x00002000
+#define UCC_GETH_UPSMR_RMM      0x00001000
+#define UCC_GETH_UPSMR_CAM      0x00000400
+#define UCC_GETH_UPSMR_BRO      0x00000200
+
 /* UCC Transmit On Demand Register (UTODR) */
 #define UCC_SLOW_TOD	0x8000
 #define UCC_FAST_TOD	0x8000
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h
index 56a7745..cf51966 100644
--- a/arch/powerpc/include/asm/qe_ic.h
+++ b/arch/powerpc/include/asm/qe_ic.h
@@ -17,6 +17,9 @@
 
 #include <linux/irq.h>
 
+struct device_node;
+struct qe_ic;
+
 #define NUM_OF_QE_IC_GROUPS	6
 
 /* Flags when we init the QE IC */
@@ -54,17 +57,27 @@
 	QE_IC_GRP_RISCB		/* QE interrupt controller RISC group B */
 };
 
+#ifdef CONFIG_QUICC_ENGINE
 void qe_ic_init(struct device_node *node, unsigned int flags,
 		void (*low_handler)(unsigned int irq, struct irq_desc *desc),
 		void (*high_handler)(unsigned int irq, struct irq_desc *desc));
+unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
+unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
+#else
+static inline void qe_ic_init(struct device_node *node, unsigned int flags,
+		void (*low_handler)(unsigned int irq, struct irq_desc *desc),
+		void (*high_handler)(unsigned int irq, struct irq_desc *desc))
+{}
+static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+{ return 0; }
+static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+{ return 0; }
+#endif /* CONFIG_QUICC_ENGINE */
+
 void qe_ic_set_highest_priority(unsigned int virq, int high);
 int qe_ic_set_priority(unsigned int virq, unsigned int priority);
 int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
 
-struct qe_ic;
-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
-
 static inline void qe_ic_cascade_low_ipic(unsigned int irq,
 					  struct irq_desc *desc)
 {
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index e0175be..0aa0315 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -18,7 +18,7 @@
  */
 
 #define RTAS_UNKNOWN_SERVICE (-1)
-#define RTAS_INSTANTIATE_MAX (1UL<<30) /* Don't instantiate rtas at/above this value */
+#define RTAS_INSTANTIATE_MAX (1ULL<<30) /* Don't instantiate rtas at/above this value */
 
 /* Buffer size for ppc_rtas system call. */
 #define RTAS_RMOBUF_MAX (64 * 1024)
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 8b2eb04..0ab8d869 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -128,7 +128,7 @@
 	int number;
 	unsigned int irqs[3];
 	u32 node;
-	u64 flags;
+	unsigned long flags;
 	u64 class_0_pending;
 	u64 class_0_dar;
 	u64 class_1_dar;
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 803def23..72353f6 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -92,7 +92,7 @@
 SYSCALL(uselib)
 SYSCALL(swapon)
 SYSCALL(reboot)
-SYSX(sys_ni_syscall,compat_sys_old_readdir,old_readdir)
+SYSX(sys_ni_syscall,compat_sys_old_readdir,sys_old_readdir)
 SYSCALL_SPU(mmap)
 SYSCALL_SPU(munmap)
 SYSCALL_SPU(truncate)
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
index c004c13..7ce27a5 100644
--- a/arch/powerpc/include/asm/types.h
+++ b/arch/powerpc/include/asm/types.h
@@ -1,7 +1,12 @@
 #ifndef _ASM_POWERPC_TYPES_H
 #define _ASM_POWERPC_TYPES_H
 
-#ifdef __powerpc64__
+/*
+ * This is here because we used to use l64 for 64bit powerpc
+ * and we don't want to impact user mode with our change to ll64
+ * in the kernel.
+ */
+#if defined(__powerpc64__) && !defined(__KERNEL__)
 # include <asm-generic/int-l64.h>
 #else
 # include <asm-generic/int-ll64.h>
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 1308a86..8d1a419 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -29,7 +29,7 @@
 obj-y				:= cputable.o ptrace.o syscalls.o \
 				   irq.o align.o signal_32.o pmc.o vdso.o \
 				   init_task.o process.o systbl.o idle.o \
-				   signal.o sysfs.o
+				   signal.o sysfs.o cacheinfo.o
 obj-y				+= vdso32/
 obj-$(CONFIG_PPC64)		+= setup_64.o sys_ppc32.o \
 				   signal_64.o ptrace32.o \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9937fe4..19ee491 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -56,6 +56,10 @@
 #include "head_booke.h"
 #endif
 
+#if defined(CONFIG_FSL_BOOKE)
+#include "../mm/mmu_decl.h"
+#endif
+
 int main(void)
 {
 	DEFINE(THREAD, offsetof(struct task_struct, thread));
@@ -382,6 +386,9 @@
 	DEFINE(PGD_T_LOG2, PGD_T_LOG2);
 	DEFINE(PTE_T_LOG2, PTE_T_LOG2);
 #endif
+#ifdef CONFIG_FSL_BOOKE
+	DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
+#endif
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
new file mode 100644
index 0000000..bb37b1d
--- /dev/null
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -0,0 +1,837 @@
+/*
+ * Processor cache information made available to userspace via sysfs;
+ * intended to be compatible with x86 intel_cacheinfo implementation.
+ *
+ * Copyright 2008 IBM Corporation
+ * Author: Nathan Lynch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/percpu.h>
+#include <asm/prom.h>
+
+#include "cacheinfo.h"
+
+/* per-cpu object for tracking:
+ * - a "cache" kobject for the top-level directory
+ * - a list of "index" objects representing the cpu's local cache hierarchy
+ */
+struct cache_dir {
+	struct kobject *kobj; /* bare (not embedded) kobject for cache
+			       * directory */
+	struct cache_index_dir *index; /* list of index objects */
+};
+
+/* "index" object: each cpu's cache directory has an index
+ * subdirectory corresponding to a cache object associated with the
+ * cpu.  This object's lifetime is managed via the embedded kobject.
+ */
+struct cache_index_dir {
+	struct kobject kobj;
+	struct cache_index_dir *next; /* next index in parent directory */
+	struct cache *cache;
+};
+
+/* Template for determining which OF properties to query for a given
+ * cache type */
+struct cache_type_info {
+	const char *name;
+	const char *size_prop;
+
+	/* Allow for both [di]-cache-line-size and
+	 * [di]-cache-block-size properties.  According to the PowerPC
+	 * Processor binding, -line-size should be provided if it
+	 * differs from the cache block size (that which is operated
+	 * on by cache instructions), so we look for -line-size first.
+	 * See cache_get_line_size(). */
+
+	const char *line_size_props[2];
+	const char *nr_sets_prop;
+};
+
+/* These are used to index the cache_type_info array. */
+#define CACHE_TYPE_UNIFIED     0
+#define CACHE_TYPE_INSTRUCTION 1
+#define CACHE_TYPE_DATA        2
+
+static const struct cache_type_info cache_type_info[] = {
+	{
+		/* PowerPC Processor binding says the [di]-cache-*
+		 * must be equal on unified caches, so just use
+		 * d-cache properties. */
+		.name            = "Unified",
+		.size_prop       = "d-cache-size",
+		.line_size_props = { "d-cache-line-size",
+				     "d-cache-block-size", },
+		.nr_sets_prop    = "d-cache-sets",
+	},
+	{
+		.name            = "Instruction",
+		.size_prop       = "i-cache-size",
+		.line_size_props = { "i-cache-line-size",
+				     "i-cache-block-size", },
+		.nr_sets_prop    = "i-cache-sets",
+	},
+	{
+		.name            = "Data",
+		.size_prop       = "d-cache-size",
+		.line_size_props = { "d-cache-line-size",
+				     "d-cache-block-size", },
+		.nr_sets_prop    = "d-cache-sets",
+	},
+};
+
+/* Cache object: each instance of this corresponds to a distinct cache
+ * in the system.  There are separate objects for Harvard caches: one
+ * each for instruction and data, and each refers to the same OF node.
+ * The refcount of the OF node is elevated for the lifetime of the
+ * cache object.  A cache object is released when its shared_cpu_map
+ * is cleared (see cache_cpu_clear).
+ *
+ * A cache object is on two lists: an unsorted global list
+ * (cache_list) of cache objects; and a singly-linked list
+ * representing the local cache hierarchy, which is ordered by level
+ * (e.g. L1d -> L1i -> L2 -> L3).
+ */
+struct cache {
+	struct device_node *ofnode;    /* OF node for this cache, may be cpu */
+	struct cpumask shared_cpu_map; /* online CPUs using this cache */
+	int type;                      /* split cache disambiguation */
+	int level;                     /* level not explicit in device tree */
+	struct list_head list;         /* global list of cache objects */
+	struct cache *next_local;      /* next cache of >= level */
+};
+
+static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
+
+/* traversal/modification of this list occurs only at cpu hotplug time;
+ * access is serialized by cpu hotplug locking
+ */
+static LIST_HEAD(cache_list);
+
+static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
+{
+	return container_of(k, struct cache_index_dir, kobj);
+}
+
+static const char *cache_type_string(const struct cache *cache)
+{
+	return cache_type_info[cache->type].name;
+}
+
+static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
+{
+	cache->type = type;
+	cache->level = level;
+	cache->ofnode = of_node_get(ofnode);
+	INIT_LIST_HEAD(&cache->list);
+	list_add(&cache->list, &cache_list);
+}
+
+static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
+{
+	struct cache *cache;
+
+	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+	if (cache)
+		cache_init(cache, type, level, ofnode);
+
+	return cache;
+}
+
+static void release_cache_debugcheck(struct cache *cache)
+{
+	struct cache *iter;
+
+	list_for_each_entry(iter, &cache_list, list)
+		WARN_ONCE(iter->next_local == cache,
+			  "cache for %s(%s) refers to cache for %s(%s)\n",
+			  iter->ofnode->full_name,
+			  cache_type_string(iter),
+			  cache->ofnode->full_name,
+			  cache_type_string(cache));
+}
+
+static void release_cache(struct cache *cache)
+{
+	if (!cache)
+		return;
+
+	pr_debug("freeing L%d %s cache for %s\n", cache->level,
+		 cache_type_string(cache), cache->ofnode->full_name);
+
+	release_cache_debugcheck(cache);
+	list_del(&cache->list);
+	of_node_put(cache->ofnode);
+	kfree(cache);
+}
+
+static void cache_cpu_set(struct cache *cache, int cpu)
+{
+	struct cache *next = cache;
+
+	while (next) {
+		WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
+			  "CPU %i already accounted in %s(%s)\n",
+			  cpu, next->ofnode->full_name,
+			  cache_type_string(next));
+		cpumask_set_cpu(cpu, &next->shared_cpu_map);
+		next = next->next_local;
+	}
+}
+
+static int cache_size(const struct cache *cache, unsigned int *ret)
+{
+	const char *propname;
+	const u32 *cache_size;
+
+	propname = cache_type_info[cache->type].size_prop;
+
+	cache_size = of_get_property(cache->ofnode, propname, NULL);
+	if (!cache_size)
+		return -ENODEV;
+
+	*ret = *cache_size;
+	return 0;
+}
+
+static int cache_size_kb(const struct cache *cache, unsigned int *ret)
+{
+	unsigned int size;
+
+	if (cache_size(cache, &size))
+		return -ENODEV;
+
+	*ret = size / 1024;
+	return 0;
+}
+
+/* not cache_line_size() because that's a macro in include/linux/cache.h */
+static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
+{
+	const u32 *line_size;
+	int i, lim;
+
+	lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
+
+	for (i = 0; i < lim; i++) {
+		const char *propname;
+
+		propname = cache_type_info[cache->type].line_size_props[i];
+		line_size = of_get_property(cache->ofnode, propname, NULL);
+		if (line_size)
+			break;
+	}
+
+	if (!line_size)
+		return -ENODEV;
+
+	*ret = *line_size;
+	return 0;
+}
+
+static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
+{
+	const char *propname;
+	const u32 *nr_sets;
+
+	propname = cache_type_info[cache->type].nr_sets_prop;
+
+	nr_sets = of_get_property(cache->ofnode, propname, NULL);
+	if (!nr_sets)
+		return -ENODEV;
+
+	*ret = *nr_sets;
+	return 0;
+}
+
+static int cache_associativity(const struct cache *cache, unsigned int *ret)
+{
+	unsigned int line_size;
+	unsigned int nr_sets;
+	unsigned int size;
+
+	if (cache_nr_sets(cache, &nr_sets))
+		goto err;
+
+	/* If the cache is fully associative, there is no need to
+	 * check the other properties.
+	 */
+	if (nr_sets == 1) {
+		*ret = 0;
+		return 0;
+	}
+
+	if (cache_get_line_size(cache, &line_size))
+		goto err;
+	if (cache_size(cache, &size))
+		goto err;
+
+	if (!(nr_sets > 0 && size > 0 && line_size > 0))
+		goto err;
+
+	*ret = (size / nr_sets) / line_size;
+	return 0;
+err:
+	return -ENODEV;
+}
+
+/* helper for dealing with split caches */
+static struct cache *cache_find_first_sibling(struct cache *cache)
+{
+	struct cache *iter;
+
+	if (cache->type == CACHE_TYPE_UNIFIED)
+		return cache;
+
+	list_for_each_entry(iter, &cache_list, list)
+		if (iter->ofnode == cache->ofnode && iter->next_local == cache)
+			return iter;
+
+	return cache;
+}
+
+/* return the first cache on a local list matching node */
+static struct cache *cache_lookup_by_node(const struct device_node *node)
+{
+	struct cache *cache = NULL;
+	struct cache *iter;
+
+	list_for_each_entry(iter, &cache_list, list) {
+		if (iter->ofnode != node)
+			continue;
+		cache = cache_find_first_sibling(iter);
+		break;
+	}
+
+	return cache;
+}
+
+static bool cache_node_is_unified(const struct device_node *np)
+{
+	return of_get_property(np, "cache-unified", NULL);
+}
+
+static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
+{
+	struct cache *cache;
+
+	pr_debug("creating L%d ucache for %s\n", level, node->full_name);
+
+	cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
+
+	return cache;
+}
+
+static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
+{
+	struct cache *dcache, *icache;
+
+	pr_debug("creating L%d dcache and icache for %s\n", level,
+		 node->full_name);
+
+	dcache = new_cache(CACHE_TYPE_DATA, level, node);
+	icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
+
+	if (!dcache || !icache)
+		goto err;
+
+	dcache->next_local = icache;
+
+	return dcache;
+err:
+	release_cache(dcache);
+	release_cache(icache);
+	return NULL;
+}
+
+static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
+{
+	struct cache *cache;
+
+	if (cache_node_is_unified(node))
+		cache = cache_do_one_devnode_unified(node, level);
+	else
+		cache = cache_do_one_devnode_split(node, level);
+
+	return cache;
+}
+
+static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
+{
+	struct cache *cache;
+
+	cache = cache_lookup_by_node(node);
+
+	WARN_ONCE(cache && cache->level != level,
+		  "cache level mismatch on lookup (got %d, expected %d)\n",
+		  cache->level, level);
+
+	if (!cache)
+		cache = cache_do_one_devnode(node, level);
+
+	return cache;
+}
+
+static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
+{
+	while (smaller->next_local) {
+		if (smaller->next_local == bigger)
+			return; /* already linked */
+		smaller = smaller->next_local;
+	}
+
+	smaller->next_local = bigger;
+}
+
+static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
+{
+	WARN_ON_ONCE(cache->level != 1);
+	WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
+}
+
+static void __cpuinit do_subsidiary_caches(struct cache *cache)
+{
+	struct device_node *subcache_node;
+	int level = cache->level;
+
+	do_subsidiary_caches_debugcheck(cache);
+
+	while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
+		struct cache *subcache;
+
+		level++;
+		subcache = cache_lookup_or_instantiate(subcache_node, level);
+		of_node_put(subcache_node);
+		if (!subcache)
+			break;
+
+		link_cache_lists(cache, subcache);
+		cache = subcache;
+	}
+}
+
+static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
+{
+	struct device_node *cpu_node;
+	struct cache *cpu_cache = NULL;
+
+	pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
+
+	cpu_node = of_get_cpu_node(cpu_id, NULL);
+	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
+	if (!cpu_node)
+		goto out;
+
+	cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
+	if (!cpu_cache)
+		goto out;
+
+	do_subsidiary_caches(cpu_cache);
+
+	cache_cpu_set(cpu_cache, cpu_id);
+out:
+	of_node_put(cpu_node);
+
+	return cpu_cache;
+}
+
+static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
+{
+	struct cache_dir *cache_dir;
+	struct sys_device *sysdev;
+	struct kobject *kobj = NULL;
+
+	sysdev = get_cpu_sysdev(cpu_id);
+	WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id);
+	if (!sysdev)
+		goto err;
+
+	kobj = kobject_create_and_add("cache", &sysdev->kobj);
+	if (!kobj)
+		goto err;
+
+	cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
+	if (!cache_dir)
+		goto err;
+
+	cache_dir->kobj = kobj;
+
+	WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
+
+	per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
+
+	return cache_dir;
+err:
+	kobject_put(kobj);
+	return NULL;
+}
+
+static void cache_index_release(struct kobject *kobj)
+{
+	struct cache_index_dir *index;
+
+	index = kobj_to_cache_index_dir(kobj);
+
+	pr_debug("freeing index directory for L%d %s cache\n",
+		 index->cache->level, cache_type_string(index->cache));
+
+	kfree(index);
+}
+
+static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
+{
+	struct kobj_attribute *kobj_attr;
+
+	kobj_attr = container_of(attr, struct kobj_attribute, attr);
+
+	return kobj_attr->show(k, kobj_attr, buf);
+}
+
+static struct cache *index_kobj_to_cache(struct kobject *k)
+{
+	struct cache_index_dir *index;
+
+	index = kobj_to_cache_index_dir(k);
+
+	return index->cache;
+}
+
+static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+	unsigned int size_kb;
+	struct cache *cache;
+
+	cache = index_kobj_to_cache(k);
+
+	if (cache_size_kb(cache, &size_kb))
+		return -ENODEV;
+
+	return sprintf(buf, "%uK\n", size_kb);
+}
+
+static struct kobj_attribute cache_size_attr =
+	__ATTR(size, 0444, size_show, NULL);
+
+
+static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+	unsigned int line_size;
+	struct cache *cache;
+
+	cache = index_kobj_to_cache(k);
+
+	if (cache_get_line_size(cache, &line_size))
+		return -ENODEV;
+
+	return sprintf(buf, "%u\n", line_size);
+}
+
+static struct kobj_attribute cache_line_size_attr =
+	__ATTR(coherency_line_size, 0444, line_size_show, NULL);
+
+static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+	unsigned int nr_sets;
+	struct cache *cache;
+
+	cache = index_kobj_to_cache(k);
+
+	if (cache_nr_sets(cache, &nr_sets))
+		return -ENODEV;
+
+	return sprintf(buf, "%u\n", nr_sets);
+}
+
+static struct kobj_attribute cache_nr_sets_attr =
+	__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
+
+static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+	unsigned int associativity;
+	struct cache *cache;
+
+	cache = index_kobj_to_cache(k);
+
+	if (cache_associativity(cache, &associativity))
+		return -ENODEV;
+
+	return sprintf(buf, "%u\n", associativity);
+}
+
+static struct kobj_attribute cache_assoc_attr =
+	__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
+
+static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+	struct cache *cache;
+
+	cache = index_kobj_to_cache(k);
+
+	return sprintf(buf, "%s\n", cache_type_string(cache));
+}
+
+static struct kobj_attribute cache_type_attr =
+	__ATTR(type, 0444, type_show, NULL);
+
+static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+	struct cache_index_dir *index;
+	struct cache *cache;
+
+	index = kobj_to_cache_index_dir(k);
+	cache = index->cache;
+
+	return sprintf(buf, "%d\n", cache->level);
+}
+
+static struct kobj_attribute cache_level_attr =
+	__ATTR(level, 0444, level_show, NULL);
+
+static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+	struct cache_index_dir *index;
+	struct cache *cache;
+	int len;
+	int n = 0;
+
+	index = kobj_to_cache_index_dir(k);
+	cache = index->cache;
+	len = PAGE_SIZE - 2;
+
+	if (len > 1) {
+		n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
+		buf[n++] = '\n';
+		buf[n] = '\0';
+	}
+	return n;
+}
+
+static struct kobj_attribute cache_shared_cpu_map_attr =
+	__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
+
+/* Attributes which should always be created -- the kobject/sysfs core
+ * does this automatically via kobj_type->default_attrs.  This is the
+ * minimum data required to uniquely identify a cache.
+ */
+static struct attribute *cache_index_default_attrs[] = {
+	&cache_type_attr.attr,
+	&cache_level_attr.attr,
+	&cache_shared_cpu_map_attr.attr,
+	NULL,
+};
+
+/* Attributes which should be created if the cache device node has the
+ * right properties -- see cacheinfo_create_index_opt_attrs
+ */
+static struct kobj_attribute *cache_index_opt_attrs[] = {
+	&cache_size_attr,
+	&cache_line_size_attr,
+	&cache_nr_sets_attr,
+	&cache_assoc_attr,
+};
+
+static struct sysfs_ops cache_index_ops = {
+	.show = cache_index_show,
+};
+
+static struct kobj_type cache_index_type = {
+	.release = cache_index_release,
+	.sysfs_ops = &cache_index_ops,
+	.default_attrs = cache_index_default_attrs,
+};
+
+static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
+{
+	const char *cache_name;
+	const char *cache_type;
+	struct cache *cache;
+	char *buf;
+	int i;
+
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	cache = dir->cache;
+	cache_name = cache->ofnode->full_name;
+	cache_type = cache_type_string(cache);
+
+	/* We don't want to create an attribute that can't provide a
+	 * meaningful value.  Check the return value of each optional
+	 * attribute's ->show method before registering the
+	 * attribute.
+	 */
+	for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
+		struct kobj_attribute *attr;
+		ssize_t rc;
+
+		attr = cache_index_opt_attrs[i];
+
+		rc = attr->show(&dir->kobj, attr, buf);
+		if (rc <= 0) {
+			pr_debug("not creating %s attribute for "
+				 "%s(%s) (rc = %zd)\n",
+				 attr->attr.name, cache_name,
+				 cache_type, rc);
+			continue;
+		}
+		if (sysfs_create_file(&dir->kobj, &attr->attr))
+			pr_debug("could not create %s attribute for %s(%s)\n",
+				 attr->attr.name, cache_name, cache_type);
+	}
+
+	kfree(buf);
+}
+
+static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
+{
+	struct cache_index_dir *index_dir;
+	int rc;
+
+	index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
+	if (!index_dir)
+		goto err;
+
+	index_dir->cache = cache;
+
+	rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
+				  cache_dir->kobj, "index%d", index);
+	if (rc)
+		goto err;
+
+	index_dir->next = cache_dir->index;
+	cache_dir->index = index_dir;
+
+	cacheinfo_create_index_opt_attrs(index_dir);
+
+	return;
+err:
+	kfree(index_dir);
+}
+
+static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
+{
+	struct cache_dir *cache_dir;
+	struct cache *cache;
+	int index = 0;
+
+	cache_dir = cacheinfo_create_cache_dir(cpu_id);
+	if (!cache_dir)
+		return;
+
+	cache = cache_list;
+	while (cache) {
+		cacheinfo_create_index_dir(cache, index, cache_dir);
+		index++;
+		cache = cache->next_local;
+	}
+}
+
+void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
+{
+	struct cache *cache;
+
+	cache = cache_chain_instantiate(cpu_id);
+	if (!cache)
+		return;
+
+	cacheinfo_sysfs_populate(cpu_id, cache);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
+
+static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
+{
+	struct device_node *cpu_node;
+	struct cache *cache;
+
+	cpu_node = of_get_cpu_node(cpu_id, NULL);
+	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
+	if (!cpu_node)
+		return NULL;
+
+	cache = cache_lookup_by_node(cpu_node);
+	of_node_put(cpu_node);
+
+	return cache;
+}
+
+static void remove_index_dirs(struct cache_dir *cache_dir)
+{
+	struct cache_index_dir *index;
+
+	index = cache_dir->index;
+
+	while (index) {
+		struct cache_index_dir *next;
+
+		next = index->next;
+		kobject_put(&index->kobj);
+		index = next;
+	}
+}
+
+static void remove_cache_dir(struct cache_dir *cache_dir)
+{
+	remove_index_dirs(cache_dir);
+
+	kobject_put(cache_dir->kobj);
+
+	kfree(cache_dir);
+}
+
+static void cache_cpu_clear(struct cache *cache, int cpu)
+{
+	while (cache) {
+		struct cache *next = cache->next_local;
+
+		WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
+			  "CPU %i not accounted in %s(%s)\n",
+			  cpu, cache->ofnode->full_name,
+			  cache_type_string(cache));
+
+		cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
+
+		/* Release the cache object if all the cpus using it
+		 * are offline */
+		if (cpumask_empty(&cache->shared_cpu_map))
+			release_cache(cache);
+
+		cache = next;
+	}
+}
+
+void cacheinfo_cpu_offline(unsigned int cpu_id)
+{
+	struct cache_dir *cache_dir;
+	struct cache *cache;
+
+	/* Prevent userspace from seeing inconsistent state - remove
+	 * the sysfs hierarchy first */
+	cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
+
+	/* careful, sysfs population may have failed */
+	if (cache_dir)
+		remove_cache_dir(cache_dir);
+
+	per_cpu(cache_dir_pcpu, cpu_id) = NULL;
+
+	/* clear the CPU's bit in its cache chain, possibly freeing
+	 * cache objects */
+	cache = cache_lookup_by_cpu(cpu_id);
+	if (cache)
+		cache_cpu_clear(cache, cpu_id);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/powerpc/kernel/cacheinfo.h b/arch/powerpc/kernel/cacheinfo.h
new file mode 100644
index 0000000..a7b74d3
--- /dev/null
+++ b/arch/powerpc/kernel/cacheinfo.h
@@ -0,0 +1,8 @@
+#ifndef _PPC_CACHEINFO_H
+#define _PPC_CACHEINFO_H
+
+/* These are just hooks for sysfs.c to use. */
+extern void cacheinfo_cpu_online(unsigned int cpu_id);
+extern void cacheinfo_cpu_offline(unsigned int cpu_id);
+
+#endif /* _PPC_CACHEINFO_H */
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 14183af..2983ada 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -79,10 +79,10 @@
 		       "Warning: IOMMU offset too big for device mask\n");
 		if (tbl)
 			printk(KERN_INFO
-			       "mask: 0x%08lx, table offset: 0x%08lx\n",
+			       "mask: 0x%08llx, table offset: 0x%08lx\n",
 				mask, tbl->it_offset);
 		else
-			printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
+			printk(KERN_INFO "mask: 0x%08llx, table unavailable\n",
 				mask);
 		return 0;
 	} else
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b4bcf5a..ebaedaf 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -1518,6 +1518,15 @@
 	/* turn on 64-bit mode */
 	bl	.enable_64b_mode
 
+	li	r0,0
+	mfspr	r3,SPRN_HID4
+	rldimi	r3,r0,40,23	/* clear bit 23 (rm_ci) */
+	sync
+	mtspr	SPRN_HID4,r3
+	isync
+	sync
+	slbia
+
 	/* get TOC pointer (real address) */
 	bl	.relative_toc
 
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 11b549ac..36ffb35 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -389,10 +389,6 @@
 #endif
 #endif
 
-	mfspr	r3,SPRN_TLB1CFG
-	andi.	r3,r3,0xfff
-	lis	r4,num_tlbcam_entries@ha
-	stw	r3,num_tlbcam_entries@l(r4)
 /*
  * Decide what sort of machine this is and initialize the MMU.
  */
@@ -711,7 +707,7 @@
 	EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
 
 #ifdef CONFIG_PPC_E500MC
-	EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_EE)
+	EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_STD)
 #endif
 
 	/* Debug Interrupt */
@@ -909,7 +905,7 @@
 _GLOBAL(loadcam_entry)
 	lis	r4,TLBCAM@ha
 	addi	r4,r4,TLBCAM@l
-	mulli	r5,r3,20
+	mulli	r5,r3,TLBCAM_SIZE
 	add	r3,r5,r4
 	lwz	r4,0(r3)
 	mtspr	SPRN_MAS0,r4
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 1bfa706..fd51578 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -239,12 +239,12 @@
 		if (printk_ratelimit()) {
 			printk(KERN_INFO "iommu_free: invalid entry\n");
 			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
-			printk(KERN_INFO "\tdma_addr  = 0x%lx\n", (u64)dma_addr);
-			printk(KERN_INFO "\tTable     = 0x%lx\n", (u64)tbl);
-			printk(KERN_INFO "\tbus#      = 0x%lx\n", (u64)tbl->it_busno);
-			printk(KERN_INFO "\tsize      = 0x%lx\n", (u64)tbl->it_size);
-			printk(KERN_INFO "\tstartOff  = 0x%lx\n", (u64)tbl->it_offset);
-			printk(KERN_INFO "\tindex     = 0x%lx\n", (u64)tbl->it_index);
+			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
+			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
+			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
+			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
+			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
+			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
 			WARN_ON(1);
 		}
 		return;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 989edcd..c932978 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -317,7 +317,7 @@
 	/*
 	 * It is possible to have multiple instances associated with a given
 	 * task either because an multiple functions in the call path
-	 * have a return probe installed on them, and/or more then one return
+	 * have a return probe installed on them, and/or more than one return
 	 * return probe was registered for a target function.
 	 *
 	 * We can handle this because:
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index d051e8c..182e0f6 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -240,7 +240,7 @@
 	if (rc)
 		return;
 
-	seq_printf(m, "partition_entitled_capacity=%ld\n",
+	seq_printf(m, "partition_entitled_capacity=%lld\n",
 	           ppp_data.entitlement);
 	seq_printf(m, "group=%d\n", ppp_data.group_num);
 	seq_printf(m, "system_active_processors=%d\n",
@@ -265,7 +265,7 @@
 		   ppp_data.unallocated_weight);
 	seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
 	seq_printf(m, "capped=%d\n", ppp_data.capped);
-	seq_printf(m, "unallocated_capacity=%ld\n",
+	seq_printf(m, "unallocated_capacity=%lld\n",
 		   ppp_data.unallocated_entitlement);
 }
 
@@ -509,10 +509,10 @@
 	} else
 		return -EINVAL;
 
-	pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
+	pr_debug("%s: current_entitled = %llu, current_weight = %u\n",
 		 __func__, ppp_data.entitlement, ppp_data.weight);
 
-	pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
+	pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
 		 __func__, new_entitled, new_weight);
 
 	retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
@@ -558,7 +558,7 @@
 	pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
 	         __func__, mpp_data.entitled_mem, mpp_data.mem_weight);
 
-	pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
+	pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
 		 __func__, new_entitled, new_weight);
 
 	rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index b3abebb..d59e2b1 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -93,10 +93,35 @@
 				KDUMP_KERNELBASE);
 
 	crashk_res.start = KDUMP_KERNELBASE;
+#else
+	if (!crashk_res.start) {
+		/*
+		 * unspecified address, choose a region of specified size
+		 * can overlap with initrd (ignoring corruption when retained)
+		 * ppc64 requires kernel and some stacks to be in first segemnt
+		 */
+		crashk_res.start = KDUMP_KERNELBASE;
+	}
+
+	crash_base = PAGE_ALIGN(crashk_res.start);
+	if (crash_base != crashk_res.start) {
+		printk("Crash kernel base must be aligned to 0x%lx\n",
+				PAGE_SIZE);
+		crashk_res.start = crash_base;
+	}
+
 #endif
 	crash_size = PAGE_ALIGN(crash_size);
 	crashk_res.end = crashk_res.start + crash_size - 1;
 
+	/* The crash region must not overlap the current kernel */
+	if (overlaps_crashkernel(__pa(_stext), _end - _stext)) {
+		printk(KERN_WARNING
+			"Crash kernel can not overlap current kernel\n");
+		crashk_res.start = crashk_res.end = 0;
+		return;
+	}
+
 	/* Crash kernel trumps memory limit */
 	if (memory_limit && memory_limit <= crashk_res.end) {
 		memory_limit = crashk_res.end + 1;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 2538030..da5a385 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -16,7 +16,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#undef DEBUG
+#define DEBUG
 
 #include <linux/kernel.h>
 #include <linux/pci.h>
@@ -1356,6 +1356,63 @@
 	}
 }
 
+static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
+{
+	struct pci_controller *hose = pci_bus_to_host(bus);
+	resource_size_t	offset;
+	struct resource *res, *pres;
+	int i;
+
+	pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
+
+	/* Check for IO */
+	if (!(hose->io_resource.flags & IORESOURCE_IO))
+		goto no_io;
+	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+	BUG_ON(res == NULL);
+	res->name = "Legacy IO";
+	res->flags = IORESOURCE_IO;
+	res->start = offset;
+	res->end = (offset + 0xfff) & 0xfffffffful;
+	pr_debug("Candidate legacy IO: %pR\n", res);
+	if (request_resource(&hose->io_resource, res)) {
+		printk(KERN_DEBUG
+		       "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
+		       pci_domain_nr(bus), bus->number, res);
+		kfree(res);
+	}
+
+ no_io:
+	/* Check for memory */
+	offset = hose->pci_mem_offset;
+	pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
+	for (i = 0; i < 3; i++) {
+		pres = &hose->mem_resources[i];
+		if (!(pres->flags & IORESOURCE_MEM))
+			continue;
+		pr_debug("hose mem res: %pR\n", pres);
+		if ((pres->start - offset) <= 0xa0000 &&
+		    (pres->end - offset) >= 0xbffff)
+			break;
+	}
+	if (i >= 3)
+		return;
+	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+	BUG_ON(res == NULL);
+	res->name = "Legacy VGA memory";
+	res->flags = IORESOURCE_MEM;
+	res->start = 0xa0000 + offset;
+	res->end = 0xbffff + offset;
+	pr_debug("Candidate VGA memory: %pR\n", res);
+	if (request_resource(pres, res)) {
+		printk(KERN_DEBUG
+		       "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
+		       pci_domain_nr(bus), bus->number, res);
+		kfree(res);
+	}
+}
+
 void __init pcibios_resource_survey(void)
 {
 	struct pci_bus *b;
@@ -1371,6 +1428,18 @@
 		pcibios_allocate_resources(1);
 	}
 
+	/* Before we start assigning unassigned resource, we try to reserve
+	 * the low IO area and the VGA memory area if they intersect the
+	 * bus available resources to avoid allocating things on top of them
+	 */
+	if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
+		list_for_each_entry(b, &pci_root_buses, node)
+			pcibios_reserve_legacy_regions(b);
+	}
+
+	/* Now, if the platform didn't decide to blindly trust the firmware,
+	 * we proceed to assigning things that were left unassigned
+	 */
 	if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
 		pr_debug("PCI: Assigning unassigned resouces...\n");
 		pci_assign_unassigned_resources();
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 39fadc6..ea8eda8 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -470,7 +470,7 @@
 	if (bus->self) {
 		pr_debug("IO mapping for PCI-PCI bridge %s\n",
 			 pci_name(bus->self));
-		pr_debug("  virt=0x%016lx...0x%016lx\n",
+		pr_debug("  virt=0x%016llx...0x%016llx\n",
 			 bus->resource[0]->start + _IO_BASE,
 			 bus->resource[0]->end + _IO_BASE);
 		return 0;
@@ -502,7 +502,7 @@
 					      hose->io_base_phys - phys_page);
 
 	pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
-	pr_debug("  phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
+	pr_debug("  phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
 		 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
 	pr_debug("  size=0x%016lx (alloc=0x%016lx)\n",
 		 hose->pci_io_size, size_page);
@@ -517,7 +517,7 @@
 	hose->io_resource.start += io_virt_offset;
 	hose->io_resource.end += io_virt_offset;
 
-	pr_debug("  hose->io_resource=0x%016lx...0x%016lx\n",
+	pr_debug("  hose->io_resource=0x%016llx...0x%016llx\n",
 		 hose->io_resource.start, hose->io_resource.end);
 
 	return 0;
@@ -560,9 +560,14 @@
 	 * G5 machines... So when something asks for bus 0 io base
 	 * (bus 0 is HT root), we return the AGP one instead.
 	 */
-	if (machine_is_compatible("MacRISC4"))
-		if (in_bus == 0)
+	if (in_bus == 0 && machine_is_compatible("MacRISC4")) {
+		struct device_node *agp;
+
+		agp = of_find_compatible_node(NULL, NULL, "u3-agp");
+		if (agp)
 			in_bus = 0xf0;
+		of_node_put(agp);
+	}
 
 	/* That syscall isn't quite compatible with PCI domains, but it's
 	 * used on pre-domains setup. We return the first match
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index dcec132..c8b27bb 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -165,6 +165,7 @@
 EXPORT_SYMBOL(irq_desc);
 EXPORT_SYMBOL(tb_ticks_per_jiffy);
 EXPORT_SYMBOL(cacheable_memcpy);
+EXPORT_SYMBOL(cacheable_memzero);
 #endif
 
 #ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 6f73c73..f00f831 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -590,6 +590,11 @@
 {
 	u32 *slb_size_ptr;
 
+	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
+	if (slb_size_ptr != NULL) {
+		mmu_slb_size = *slb_size_ptr;
+		return;
+	}
 	slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
 	if (slb_size_ptr != NULL) {
 		mmu_slb_size = *slb_size_ptr;
@@ -824,11 +829,11 @@
 #endif
 
 #ifdef CONFIG_KEXEC
-	lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
+	lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
 	if (lprop)
 		crashk_res.start = *lprop;
 
-	lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
+	lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
 	if (lprop)
 		crashk_res.end = crashk_res.start + *lprop - 1;
 #endif
@@ -893,12 +898,12 @@
 	u64 base, size, lmb_size;
 	unsigned int is_kexec_kdump = 0, rngs;
 
-	ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
+	ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
 	if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
 		return 0;
 	lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
 
-	dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
+	dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
 	if (dm == NULL || l < sizeof(cell_t))
 		return 0;
 
@@ -907,7 +912,7 @@
 		return 0;
 
 	/* check if this is a kexec/kdump kernel. */
-	usm = (cell_t *)of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
+	usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
 						 &l);
 	if (usm != NULL)
 		is_kexec_kdump = 1;
@@ -981,9 +986,9 @@
 	} else if (strcmp(type, "memory") != 0)
 		return 0;
 
-	reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
+	reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
 	if (reg == NULL)
-		reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
+		reg = of_get_flat_dt_prop(node, "reg", &l);
 	if (reg == NULL)
 		return 0;
 
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 2445945..7f1b33d 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1210,7 +1210,7 @@
 		/* Initialize the table to have a one-to-one mapping
 		 * over the allocated size.
 		 */
-		tce_entryp = (unsigned long *)base;
+		tce_entryp = (u64 *)base;
 		for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
 			tce_entry = (i << PAGE_SHIFT);
 			tce_entry |= 0x3;
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 8c13355..8f0856f 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -232,11 +232,6 @@
 }
 EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
 
-static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
-{
-	return (((pin - 1) + slot) % 4) + 1;
-}
-
 int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
 {
 	struct device_node *dn, *ppnode;
@@ -306,7 +301,7 @@
 		/* We can only get here if we hit a P2P bridge with no node,
 		 * let's do standard swizzling and try again
 		 */
-		lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
+		lspec = pci_swizzle_interrupt_pin(pdev, lspec);
 		pdev = ppdev;
 	}
 
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index d8bd216..2d34196 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -434,8 +434,8 @@
 	printk("Starting Linux PPC64 %s\n", init_utsname()->version);
 
 	printk("-----------------------------------------------------\n");
-	printk("ppc64_pft_size                = 0x%lx\n", ppc64_pft_size);
-	printk("physicalMemorySize            = 0x%lx\n", lmb_phys_mem_size());
+	printk("ppc64_pft_size                = 0x%llx\n", ppc64_pft_size);
+	printk("physicalMemorySize            = 0x%llx\n", lmb_phys_mem_size());
 	if (ppc64_caches.dline_size != 0x80)
 		printk("ppc64_caches.dcache_line_size = 0x%x\n",
 		       ppc64_caches.dline_size);
@@ -493,7 +493,7 @@
 	 * bringup, we need to get at them in real mode. This means they
 	 * must also be within the RMO region.
 	 */
-	limit = min(0x10000000UL, lmb.rmo_size);
+	limit = min(0x10000000ULL, lmb.rmo_size);
 
 	for_each_possible_cpu(i) {
 		unsigned long sp;
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 0c64f10..4a2ee08 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -18,6 +18,8 @@
 #include <asm/machdep.h>
 #include <asm/smp.h>
 
+#include "cacheinfo.h"
+
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
 #include <asm/lppaca.h>
@@ -25,8 +27,6 @@
 
 static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
-static DEFINE_PER_CPU(struct kobject *, cache_toplevel);
-
 /*
  * SMT snooze delay stuff, 64-bit only for now
  */
@@ -343,283 +343,6 @@
 #endif /* HAS_PPC_PMC_PA6T */
 #endif /* HAS_PPC_PMC_CLASSIC */
 
-struct cache_desc {
-	struct kobject kobj;
-	struct cache_desc *next;
-	const char *type;	/* Instruction, Data, or Unified */
-	u32 size;		/* total cache size in KB */
-	u32 line_size;		/* in bytes */
-	u32 nr_sets;		/* number of sets */
-	u32 level;		/* e.g. 1, 2, 3... */
-	u32 associativity;	/* e.g. 8-way... 0 is fully associative */
-};
-
-DEFINE_PER_CPU(struct cache_desc *, cache_desc);
-
-static struct cache_desc *kobj_to_cache_desc(struct kobject *k)
-{
-	return container_of(k, struct cache_desc, kobj);
-}
-
-static void cache_desc_release(struct kobject *k)
-{
-	struct cache_desc *desc = kobj_to_cache_desc(k);
-
-	pr_debug("%s: releasing %s\n", __func__, kobject_name(k));
-
-	if (desc->next)
-		kobject_put(&desc->next->kobj);
-
-	kfree(kobj_to_cache_desc(k));
-}
-
-static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf)
-{
-	struct kobj_attribute *kobj_attr;
-
-	kobj_attr = container_of(attr, struct kobj_attribute, attr);
-
-	return kobj_attr->show(k, kobj_attr, buf);
-}
-
-static struct sysfs_ops cache_desc_sysfs_ops = {
-	.show = cache_desc_show,
-};
-
-static struct kobj_type cache_desc_type = {
-	.release = cache_desc_release,
-	.sysfs_ops = &cache_desc_sysfs_ops,
-};
-
-static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
-	struct cache_desc *cache = kobj_to_cache_desc(k);
-
-	return sprintf(buf, "%uK\n", cache->size);
-}
-
-static struct kobj_attribute cache_size_attr =
-	__ATTR(size, 0444, cache_size_show, NULL);
-
-static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
-	struct cache_desc *cache = kobj_to_cache_desc(k);
-
-	return sprintf(buf, "%u\n", cache->line_size);
-}
-
-static struct kobj_attribute cache_line_size_attr =
-	__ATTR(coherency_line_size, 0444, cache_line_size_show, NULL);
-
-static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
-	struct cache_desc *cache = kobj_to_cache_desc(k);
-
-	return sprintf(buf, "%u\n", cache->nr_sets);
-}
-
-static struct kobj_attribute cache_nr_sets_attr =
-	__ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL);
-
-static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
-	struct cache_desc *cache = kobj_to_cache_desc(k);
-
-	return sprintf(buf, "%s\n", cache->type);
-}
-
-static struct kobj_attribute cache_type_attr =
-	__ATTR(type, 0444, cache_type_show, NULL);
-
-static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
-	struct cache_desc *cache = kobj_to_cache_desc(k);
-
-	return sprintf(buf, "%u\n", cache->level);
-}
-
-static struct kobj_attribute cache_level_attr =
-	__ATTR(level, 0444, cache_level_show, NULL);
-
-static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
-	struct cache_desc *cache = kobj_to_cache_desc(k);
-
-	return sprintf(buf, "%u\n", cache->associativity);
-}
-
-static struct kobj_attribute cache_assoc_attr =
-	__ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL);
-
-struct cache_desc_info {
-	const char *type;
-	const char *size_prop;
-	const char *line_size_prop;
-	const char *nr_sets_prop;
-};
-
-/* PowerPC Processor binding says the [di]-cache-* must be equal on
- * unified caches, so just use d-cache properties. */
-static struct cache_desc_info ucache_info = {
-	.type = "Unified",
-	.size_prop = "d-cache-size",
-	.line_size_prop = "d-cache-line-size",
-	.nr_sets_prop = "d-cache-sets",
-};
-
-static struct cache_desc_info dcache_info = {
-	.type = "Data",
-	.size_prop = "d-cache-size",
-	.line_size_prop = "d-cache-line-size",
-	.nr_sets_prop = "d-cache-sets",
-};
-
-static struct cache_desc_info icache_info = {
-	.type = "Instruction",
-	.size_prop = "i-cache-size",
-	.line_size_prop = "i-cache-line-size",
-	.nr_sets_prop = "i-cache-sets",
-};
-
-static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info)
-{
-	const u32 *cache_line_size;
-	struct cache_desc *new;
-	const u32 *cache_size;
-	const u32 *nr_sets;
-	int rc;
-
-	new = kzalloc(sizeof(*new), GFP_KERNEL);
-	if (!new)
-		return NULL;
-
-	rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent,
-				  "index%d", index);
-	if (rc)
-		goto err;
-
-	/* type */
-	new->type = info->type;
-	rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr);
-	WARN_ON(rc);
-
-	/* level */
-	new->level = level;
-	rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr);
-	WARN_ON(rc);
-
-	/* size */
-	cache_size = of_get_property(np, info->size_prop, NULL);
-	if (cache_size) {
-		new->size = *cache_size / 1024;
-		rc = sysfs_create_file(&new->kobj,
-				       &cache_size_attr.attr);
-		WARN_ON(rc);
-	}
-
-	/* coherency_line_size */
-	cache_line_size = of_get_property(np, info->line_size_prop, NULL);
-	if (cache_line_size) {
-		new->line_size = *cache_line_size;
-		rc = sysfs_create_file(&new->kobj,
-				       &cache_line_size_attr.attr);
-		WARN_ON(rc);
-	}
-
-	/* number_of_sets */
-	nr_sets = of_get_property(np, info->nr_sets_prop, NULL);
-	if (nr_sets) {
-		new->nr_sets = *nr_sets;
-		rc = sysfs_create_file(&new->kobj,
-				       &cache_nr_sets_attr.attr);
-		WARN_ON(rc);
-	}
-
-	/* ways_of_associativity */
-	if (new->nr_sets == 1) {
-		/* fully associative */
-		new->associativity = 0;
-		goto create_assoc;
-	}
-
-	if (new->nr_sets && new->size && new->line_size) {
-		/* If we have values for all of these we can derive
-		 * the associativity. */
-		new->associativity =
-			((new->size * 1024) / new->nr_sets) / new->line_size;
-create_assoc:
-		rc = sysfs_create_file(&new->kobj,
-				       &cache_assoc_attr.attr);
-		WARN_ON(rc);
-	}
-
-	return new;
-err:
-	kfree(new);
-	return NULL;
-}
-
-static bool cache_is_unified(struct device_node *np)
-{
-	return of_get_property(np, "cache-unified", NULL);
-}
-
-static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
-{
-	struct device_node *next_cache;
-	struct cache_desc *new, **end;
-
-	pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index);
-
-	if (cache_is_unified(np)) {
-		new = create_cache_desc(np, parent, index, level,
-					&ucache_info);
-	} else {
-		new = create_cache_desc(np, parent, index, level,
-					&dcache_info);
-		if (new) {
-			index++;
-			new->next = create_cache_desc(np, parent, index, level,
-						      &icache_info);
-		}
-	}
-	if (!new)
-		return NULL;
-
-	end = &new->next;
-	while (*end)
-		end = &(*end)->next;
-
-	next_cache = of_find_next_cache_node(np);
-	if (!next_cache)
-		goto out;
-
-	*end = create_cache_index_info(next_cache, parent, ++index, ++level);
-
-	of_node_put(next_cache);
-out:
-	return new;
-}
-
-static void __cpuinit create_cache_info(struct sys_device *sysdev)
-{
-	struct kobject *cache_toplevel;
-	struct device_node *np = NULL;
-	int cpu = sysdev->id;
-
-	cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj);
-	if (!cache_toplevel)
-		return;
-	per_cpu(cache_toplevel, cpu) = cache_toplevel;
-	np = of_get_cpu_node(cpu, NULL);
-	if (np != NULL) {
-		per_cpu(cache_desc, cpu) =
-			create_cache_index_info(np, cache_toplevel, 0, 1);
-		of_node_put(np);
-	}
-	return;
-}
-
 static void __cpuinit register_cpu_online(unsigned int cpu)
 {
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -684,25 +407,10 @@
 		sysdev_create_file(s, &attr_dscr);
 #endif /* CONFIG_PPC64 */
 
-	create_cache_info(s);
+	cacheinfo_cpu_online(cpu);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void remove_cache_info(struct sys_device *sysdev)
-{
-	struct kobject *cache_toplevel;
-	struct cache_desc *cache_desc;
-	int cpu = sysdev->id;
-
-	cache_desc = per_cpu(cache_desc, cpu);
-	if (cache_desc != NULL)
-		kobject_put(&cache_desc->kobj);
-
-	cache_toplevel = per_cpu(cache_toplevel, cpu);
-	if (cache_toplevel != NULL)
-		kobject_put(cache_toplevel);
-}
-
 static void unregister_cpu_online(unsigned int cpu)
 {
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -769,7 +477,7 @@
 		sysdev_remove_file(s, &attr_dscr);
 #endif /* CONFIG_PPC64 */
 
-	remove_cache_info(s);
+	cacheinfo_cpu_offline(cpu);
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 47bf15c..161b9b9 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -87,7 +87,9 @@
 	/* The dummy segment contents for the bug workaround mentioned above
 	   near PHDRS.  */
 	.dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
-		LONG(0xf177)
+		LONG(0)
+		LONG(0)
+		LONG(0)
 	} :kernel :dummy
 
 /*
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 23cee39..1971e4ee3 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -56,18 +56,11 @@
 
 extern void loadcam_entry(unsigned int index);
 unsigned int tlbcam_index;
-unsigned int num_tlbcam_entries;
 static unsigned long __cam0, __cam1, __cam2;
 
 #define NUM_TLBCAMS	(16)
 
-struct tlbcam {
-   	u32	MAS0;
-	u32	MAS1;
-	u32	MAS2;
-	u32	MAS3;
-	u32	MAS7;
-} TLBCAM[NUM_TLBCAMS];
+struct tlbcam TLBCAM[NUM_TLBCAMS];
 
 struct tlbcamrange {
    	unsigned long start;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 4314b39..d1f9c62 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -30,11 +30,11 @@
 #if defined(CONFIG_40x) || defined(CONFIG_8xx)
 static inline void _tlbil_all(void)
 {
-	asm volatile ("sync; tlbia; isync" : : : "memory")
+	asm volatile ("sync; tlbia; isync" : : : "memory");
 }
 static inline void _tlbil_pid(unsigned int pid)
 {
-	asm volatile ("sync; tlbia; isync" : : : "memory")
+	asm volatile ("sync; tlbia; isync" : : : "memory");
 }
 #else /* CONFIG_40x || CONFIG_8xx */
 extern void _tlbil_all(void);
@@ -47,7 +47,7 @@
 #ifdef CONFIG_8xx
 static inline void _tlbil_va(unsigned long address, unsigned int pid)
 {
-	asm volatile ("tlbie %0; sync" : : "r" (address) : "memory")
+	asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
 }
 #else /* CONFIG_8xx */
 extern void _tlbil_va(unsigned long address, unsigned int pid);
@@ -75,6 +75,15 @@
 #endif /* CONFIG_PPC_MMU_NOHASH */
 
 #ifdef CONFIG_PPC32
+
+struct tlbcam {
+	u32	MAS0;
+	u32	MAS1;
+	u32	MAS2;
+	u32	MAS3;
+	u32	MAS7;
+};
+
 extern void mapin_ram(void);
 extern int map_page(unsigned long va, phys_addr_t pa, int flags);
 extern void setbat(int index, unsigned long virt, phys_addr_t phys,
@@ -90,8 +99,6 @@
 struct hash_pte;
 extern struct hash_pte *Hash, *Hash_end;
 extern unsigned long Hash_size, Hash_mask;
-
-extern unsigned int num_tlbcam_entries;
 #endif
 
 extern unsigned long ioremap_bot;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index cf81049..7393bd7 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -822,42 +822,50 @@
  * required. nid is the preferred node and end is the physical address of
  * the highest address in the node.
  *
- * Returns the physical address of the memory.
+ * Returns the virtual address of the memory.
  */
-static void __init *careful_allocation(int nid, unsigned long size,
+static void __init *careful_zallocation(int nid, unsigned long size,
 				       unsigned long align,
 				       unsigned long end_pfn)
 {
+	void *ret;
 	int new_nid;
-	unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
+	unsigned long ret_paddr;
+
+	ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
 
 	/* retry over all memory */
-	if (!ret)
-		ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
+	if (!ret_paddr)
+		ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
 
-	if (!ret)
-		panic("numa.c: cannot allocate %lu bytes on node %d",
+	if (!ret_paddr)
+		panic("numa.c: cannot allocate %lu bytes for node %d",
 		      size, nid);
 
+	ret = __va(ret_paddr);
+
 	/*
-	 * If the memory came from a previously allocated node, we must
-	 * retry with the bootmem allocator.
+	 * We initialize the nodes in numeric order: 0, 1, 2...
+	 * and hand over control from the LMB allocator to the
+	 * bootmem allocator.  If this function is called for
+	 * node 5, then we know that all nodes <5 are using the
+	 * bootmem allocator instead of the LMB allocator.
+	 *
+	 * So, check the nid from which this allocation came
+	 * and double check to see if we need to use bootmem
+	 * instead of the LMB.  We don't free the LMB memory
+	 * since it would be useless.
 	 */
-	new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
+	new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
 	if (new_nid < nid) {
-		ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),
+		ret = __alloc_bootmem_node(NODE_DATA(new_nid),
 				size, align, 0);
 
-		if (!ret)
-			panic("numa.c: cannot allocate %lu bytes on node %d",
-			      size, new_nid);
-
-		ret = __pa(ret);
-
-		dbg("alloc_bootmem %lx %lx\n", ret, size);
+		dbg("alloc_bootmem %p %lx\n", ret, size);
 	}
 
-	return (void *)ret;
+	memset(ret, 0, size);
+	return ret;
 }
 
 static struct notifier_block __cpuinitdata ppc64_numa_nb = {
@@ -952,7 +960,7 @@
 
 	for_each_online_node(nid) {
 		unsigned long start_pfn, end_pfn;
-		unsigned long bootmem_paddr;
+		void *bootmem_vaddr;
 		unsigned long bootmap_pages;
 
 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
@@ -964,11 +972,9 @@
 		 * previous nodes' bootmem to be initialized and have
 		 * all reserved areas marked.
 		 */
-		NODE_DATA(nid) = careful_allocation(nid,
+		NODE_DATA(nid) = careful_zallocation(nid,
 					sizeof(struct pglist_data),
 					SMP_CACHE_BYTES, end_pfn);
-		NODE_DATA(nid) = __va(NODE_DATA(nid));
-		memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
 
   		dbg("node %d\n", nid);
 		dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
@@ -984,20 +990,20 @@
   		dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
 
 		bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
-		bootmem_paddr = (unsigned long)careful_allocation(nid,
+		bootmem_vaddr = careful_zallocation(nid,
 					bootmap_pages << PAGE_SHIFT,
 					PAGE_SIZE, end_pfn);
-		memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
 
-		dbg("bootmap_paddr = %lx\n", bootmem_paddr);
+		dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
 
-		init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
+		init_bootmem_node(NODE_DATA(nid),
+				  __pa(bootmem_vaddr) >> PAGE_SHIFT,
 				  start_pfn, end_pfn);
 
 		free_bootmem_with_active_regions(nid, end_pfn);
 		/*
 		 * Be very careful about moving this around.  Future
-		 * calls to careful_allocation() depend on this getting
+		 * calls to careful_zallocation() depend on this getting
 		 * done correctly.
 		 */
 		mark_reserved_regions_for_nid(nid);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 38ff35f..22972cd 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -266,7 +266,8 @@
 		/* The PTE should never be already set nor present in the
 		 * hash table
 		 */
-		BUG_ON(pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE));
+		BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
+		       flags);
 		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
 						     __pgprot(flags)));
 	}
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index db44e02..ba51948 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -710,9 +710,18 @@
 			   unsigned long len)
 {
 	struct slice_mask mask, available;
+	unsigned int psize = mm->context.user_psize;
 
 	mask = slice_range_to_mask(addr, len);
-	available = slice_mask_for_size(mm, mm->context.user_psize);
+	available = slice_mask_for_size(mm, psize);
+#ifdef CONFIG_PPC_64K_PAGES
+	/* We need to account for 4k slices too */
+	if (psize == MMU_PAGE_64K) {
+		struct slice_mask compat_mask;
+		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
+		or_mask(available, compat_mask);
+	}
+#endif
 
 #if 0 /* too verbose */
 	slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 60e6032..98cd1dc 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -251,8 +251,8 @@
 
 		paca[cpu].stab_addr = newstab;
 		paca[cpu].stab_real = virt_to_abs(newstab);
-		printk(KERN_INFO "Segment table for CPU %d at 0x%lx "
-		       "virtual, 0x%lx absolute\n",
+		printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
+		       "virtual, 0x%llx absolute\n",
 		       cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
 	}
 }
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 803a64c..39ac22b 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -189,8 +189,9 @@
 	smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
 	_tlbil_pid(0);
 	preempt_enable();
-#endif
+#else
 	_tlbil_pid(0);
+#endif
 }
 EXPORT_SYMBOL(flush_tlb_kernel_range);
 
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h
index 628009c..964b939 100644
--- a/arch/powerpc/oprofile/cell/pr_util.h
+++ b/arch/powerpc/oprofile/cell/pr_util.h
@@ -30,6 +30,10 @@
 extern struct delayed_work spu_work;
 extern int spu_prof_running;
 
+#define TRACE_ARRAY_SIZE 1024
+
+extern spinlock_t oprof_spu_smpl_arry_lck;
+
 struct spu_overlay_info {	/* map of sections within an SPU overlay */
 	unsigned int vma;	/* SPU virtual memory address from elf */
 	unsigned int size;	/* size of section from elf */
@@ -79,7 +83,7 @@
  * the vma-to-fileoffset map.
  */
 struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu,
-					     u64 objectid);
+					     unsigned long objectid);
 unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map,
 			    unsigned int vma, const struct spu *aSpu,
 			    int *grd_val);
@@ -89,10 +93,11 @@
  * Entry point for SPU profiling.
  * cycles_reset is the SPU_CYCLES count value specified by the user.
  */
-int start_spu_profiling(unsigned int cycles_reset);
+int start_spu_profiling_cycles(unsigned int cycles_reset);
+void start_spu_profiling_events(void);
 
-void stop_spu_profiling(void);
-
+void stop_spu_profiling_cycles(void);
+void stop_spu_profiling_events(void);
 
 /* add the necessary profiling hooks */
 int spu_sync_start(void);
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index dd499c3..9305dda 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -18,11 +18,21 @@
 #include <asm/cell-pmu.h>
 #include "pr_util.h"
 
-#define TRACE_ARRAY_SIZE 1024
 #define SCALE_SHIFT 14
 
 static u32 *samples;
 
+/* spu_prof_running is a flag used to indicate if spu profiling is enabled
+ * or not.  It is set by the routines start_spu_profiling_cycles() and
+ * start_spu_profiling_events().  The flag is cleared by the routines
+ * stop_spu_profiling_cycles() and stop_spu_profiling_events().  These
+ * routines are called via global_start() and global_stop() which are called in
+ * op_powerpc_start() and op_powerpc_stop().  These routines are called once
+ * per system as a result of the user starting/stopping oprofile.  Hence, only
+ * one CPU per user at a time will be changing  the value of spu_prof_running.
+ * In general, OProfile does not protect against multiple users trying to run
+ * OProfile at a time.
+ */
 int spu_prof_running;
 static unsigned int profiling_interval;
 
@@ -31,8 +41,8 @@
 
 #define SPU_PC_MASK	     0xFFFF
 
-static DEFINE_SPINLOCK(sample_array_lock);
-unsigned long sample_array_lock_flags;
+DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
+unsigned long oprof_spu_smpl_arry_lck_flags;
 
 void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
 {
@@ -49,7 +59,7 @@
 	 * of precision.  This is close enough for the purpose at hand.
 	 *
 	 * The value of the timeout should be small enough that the hw
-	 * trace buffer will not get more then about 1/3 full for the
+	 * trace buffer will not get more than about 1/3 full for the
 	 * maximum user specified (the LFSR value) hw sampling frequency.
 	 * This is to ensure the trace buffer will never fill even if the
 	 * kernel thread scheduling varies under a heavy system load.
@@ -145,13 +155,13 @@
 		 * sample array must be loaded and then processed for a given
 		 * cpu.	 The sample array is not per cpu.
 		 */
-		spin_lock_irqsave(&sample_array_lock,
-				  sample_array_lock_flags);
+		spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
+				  oprof_spu_smpl_arry_lck_flags);
 		num_samples = cell_spu_pc_collection(cpu);
 
 		if (num_samples == 0) {
-			spin_unlock_irqrestore(&sample_array_lock,
-					       sample_array_lock_flags);
+			spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+					       oprof_spu_smpl_arry_lck_flags);
 			continue;
 		}
 
@@ -162,8 +172,8 @@
 					num_samples);
 		}
 
-		spin_unlock_irqrestore(&sample_array_lock,
-				       sample_array_lock_flags);
+		spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+				       oprof_spu_smpl_arry_lck_flags);
 
 	}
 	smp_wmb();	/* insure spu event buffer updates are written */
@@ -182,13 +192,13 @@
 
 static struct hrtimer timer;
 /*
- * Entry point for SPU profiling.
+ * Entry point for SPU cycle profiling.
  * NOTE:  SPU profiling is done system-wide, not per-CPU.
  *
  * cycles_reset is the count value specified by the user when
  * setting up OProfile to count SPU_CYCLES.
  */
-int start_spu_profiling(unsigned int cycles_reset)
+int start_spu_profiling_cycles(unsigned int cycles_reset)
 {
 	ktime_t kt;
 
@@ -212,10 +222,30 @@
 	return 0;
 }
 
-void stop_spu_profiling(void)
+/*
+ * Entry point for SPU event profiling.
+ * NOTE:  SPU profiling is done system-wide, not per-CPU.
+ *
+ * cycles_reset is the count value specified by the user when
+ * setting up OProfile to count SPU_CYCLES.
+ */
+void start_spu_profiling_events(void)
+{
+	spu_prof_running = 1;
+	schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
+
+	return;
+}
+
+void stop_spu_profiling_cycles(void)
 {
 	spu_prof_running = 0;
 	hrtimer_cancel(&timer);
 	kfree(samples);
-	pr_debug("SPU_PROF: stop_spu_profiling issued\n");
+	pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
+}
+
+void stop_spu_profiling_events(void)
+{
+	spu_prof_running = 0;
 }
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 17807ac..21f16ed 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -132,6 +132,28 @@
 	oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
 	oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
 	oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
+#ifdef CONFIG_OPROFILE_CELL
+	/* create a file the user tool can check to see what level of profiling
+	 * support exits with this kernel. Initialize bit mask to indicate
+	 * what support the kernel has:
+	 * bit 0      -  Supports SPU event profiling in addition to PPU
+	 *               event and cycles; and SPU cycle profiling
+	 * bits 1-31  -  Currently unused.
+	 *
+	 * If the file does not exist, then the kernel only supports SPU
+	 * cycle profiling, PPU event and cycle profiling.
+	 */
+	oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support);
+	sys.cell_support = 0x1; /* Note, the user OProfile tool must check
+				 * that this bit is set before attempting to
+				 * user SPU event profiling.  Older kernels
+				 * will not have this file, hence the user
+				 * tool is not allowed to do SPU event
+				 * profiling on older kernels.  Older kernels
+				 * will accept SPU events but collected data
+				 * is garbage.
+				 */
+#endif
 #endif
 
 	for (i = 0; i < model->num_counters; ++i) {
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 25a4ec2..ae06c62 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -40,14 +40,15 @@
 #include "../platforms/cell/interrupt.h"
 #include "cell/pr_util.h"
 
-static void cell_global_stop_spu(void);
+#define PPU_PROFILING            0
+#define SPU_PROFILING_CYCLES     1
+#define SPU_PROFILING_EVENTS     2
 
-/*
- * spu_cycle_reset is the number of cycles between samples.
- * This variable is used for SPU profiling and should ONLY be set
- * at the beginning of cell_reg_setup; otherwise, it's read-only.
- */
-static unsigned int spu_cycle_reset;
+#define SPU_EVENT_NUM_START      4100
+#define SPU_EVENT_NUM_STOP       4399
+#define SPU_PROFILE_EVENT_ADDR          4363  /* spu, address trace, decimal */
+#define SPU_PROFILE_EVENT_ADDR_MASK_A   0x146 /* sub unit set to zero */
+#define SPU_PROFILE_EVENT_ADDR_MASK_B   0x186 /* sub unit set to zero */
 
 #define NUM_SPUS_PER_NODE    8
 #define SPU_CYCLES_EVENT_NUM 2	/*  event number for SPU_CYCLES */
@@ -66,6 +67,21 @@
 
 #define MAX_SPU_COUNT 0xFFFFFF	/* maximum 24 bit LFSR value */
 
+/* Minumum HW interval timer setting to send value to trace buffer is 10 cycle.
+ * To configure counter to send value every N cycles set counter to
+ * 2^32 - 1 - N.
+ */
+#define NUM_INTERVAL_CYC  0xFFFFFFFF - 10
+
+/*
+ * spu_cycle_reset is the number of cycles between samples.
+ * This variable is used for SPU profiling and should ONLY be set
+ * at the beginning of cell_reg_setup; otherwise, it's read-only.
+ */
+static unsigned int spu_cycle_reset;
+static unsigned int profiling_mode;
+static int spu_evnt_phys_spu_indx;
+
 struct pmc_cntrl_data {
 	unsigned long vcntr;
 	unsigned long evnts;
@@ -105,6 +121,8 @@
 	u16 trace_mode;
 	u16 freeze;
 	u16 count_mode;
+	u16 spu_addr_trace;
+	u8  trace_buf_ovflw;
 };
 
 static struct {
@@ -122,7 +140,7 @@
 #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
 
 static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
-
+static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
 static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
 
 /*
@@ -152,6 +170,7 @@
 
 static u32 virt_cntr_inter_mask;
 static struct timer_list timer_virt_cntr;
+static struct timer_list timer_spu_event_swap;
 
 /*
  * pm_signal needs to be global since it is initialized in
@@ -165,7 +184,7 @@
 static u32 reset_value[NR_PHYS_CTRS];
 static int num_counters;
 static int oprofile_running;
-static DEFINE_SPINLOCK(virt_cntr_lock);
+static DEFINE_SPINLOCK(cntr_lock);
 
 static u32 ctr_enabled;
 
@@ -336,13 +355,13 @@
 	for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
 		if (bus_word & (1 << i)) {
 			pm_regs.debug_bus_control |=
-			    (bus_type << (30 - (2 * i)));
+				(bus_type << (30 - (2 * i)));
 
 			for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
 				if (input_bus[j] == 0xff) {
 					input_bus[j] = i;
 					pm_regs.group_control |=
-					    (i << (30 - (2 * j)));
+						(i << (30 - (2 * j)));
 
 					break;
 				}
@@ -367,12 +386,16 @@
 	if (pm_regs.pm_cntrl.stop_at_max == 1)
 		val |= CBE_PM_STOP_AT_MAX;
 
-	if (pm_regs.pm_cntrl.trace_mode == 1)
+	if (pm_regs.pm_cntrl.trace_mode != 0)
 		val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
 
+	if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
+		val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
 	if (pm_regs.pm_cntrl.freeze == 1)
 		val |= CBE_PM_FREEZE_ALL_CTRS;
 
+	val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
+
 	/*
 	 * Routine set_count_mode must be called previously to set
 	 * the count mode based on the user selection of user and kernel.
@@ -441,7 +464,7 @@
 	 * not both playing with the counters on the same node.
 	 */
 
-	spin_lock_irqsave(&virt_cntr_lock, flags);
+	spin_lock_irqsave(&cntr_lock, flags);
 
 	prev_hdw_thread = hdw_thread;
 
@@ -480,7 +503,7 @@
 		cbe_disable_pm_interrupts(cpu);
 		for (i = 0; i < num_counters; i++) {
 			per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
-			    = cbe_read_ctr(cpu, i);
+				= cbe_read_ctr(cpu, i);
 
 			if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
 			    == 0xFFFFFFFF)
@@ -527,7 +550,7 @@
 		cbe_enable_pm(cpu);
 	}
 
-	spin_unlock_irqrestore(&virt_cntr_lock, flags);
+	spin_unlock_irqrestore(&cntr_lock, flags);
 
 	mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
 }
@@ -541,38 +564,146 @@
 	add_timer(&timer_virt_cntr);
 }
 
-/* This function is called once for all cpus combined */
-static int cell_reg_setup(struct op_counter_config *ctr,
+static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
 			struct op_system_config *sys, int num_ctrs)
 {
-	int i, j, cpu;
-	spu_cycle_reset = 0;
-
-	if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
-		spu_cycle_reset = ctr[0].count;
-
-		/*
-		 * Each node will need to make the rtas call to start
-		 * and stop SPU profiling.  Get the token once and store it.
-		 */
-		spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
-
-		if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
-			printk(KERN_ERR
-			       "%s: rtas token ibm,cbe-spu-perftools unknown\n",
-			       __func__);
-			return -EIO;
-		}
-	}
-
-	pm_rtas_token = rtas_token("ibm,cbe-perftools");
+	spu_cycle_reset = ctr[0].count;
 
 	/*
-	 * For all events excetp PPU CYCLEs, each node will need to make
+	 * Each node will need to make the rtas call to start
+	 * and stop SPU profiling.  Get the token once and store it.
+	 */
+	spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
+
+	if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
+		printk(KERN_ERR
+		       "%s: rtas token ibm,cbe-spu-perftools unknown\n",
+		       __func__);
+		return -EIO;
+	}
+	return 0;
+}
+
+/* Unfortunately, the hardware will only support event profiling
+ * on one SPU per node at a time.  Therefore, we must time slice
+ * the profiling across all SPUs in the node.  Note, we do this
+ * in parallel for each node.  The following routine is called
+ * periodically based on kernel timer to switch which SPU is
+ * being monitored in a round robbin fashion.
+ */
+static void spu_evnt_swap(unsigned long data)
+{
+	int node;
+	int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
+	unsigned long flags;
+	int cpu;
+	int ret;
+	u32 interrupt_mask;
+
+
+	/* enable interrupts on cntr 0 */
+	interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
+
+	hdw_thread = 0;
+
+	/* Make sure spu event interrupt handler and spu event swap
+	 * don't access the counters simultaneously.
+	 */
+	spin_lock_irqsave(&cntr_lock, flags);
+
+	cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
+
+	if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
+		spu_evnt_phys_spu_indx = 0;
+
+	pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
+	pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
+	pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
+
+	/* switch the SPU being profiled on each node */
+	for_each_online_cpu(cpu) {
+		if (cbe_get_hw_thread_id(cpu))
+			continue;
+
+		node = cbe_cpu_to_node(cpu);
+		cur_phys_spu = (node * NUM_SPUS_PER_NODE)
+			+ cur_spu_evnt_phys_spu_indx;
+		nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
+			+ spu_evnt_phys_spu_indx;
+
+		/*
+		 * stop counters, save counter values, restore counts
+		 * for previous physical SPU
+		 */
+		cbe_disable_pm(cpu);
+		cbe_disable_pm_interrupts(cpu);
+
+		spu_pm_cnt[cur_phys_spu]
+			= cbe_read_ctr(cpu, 0);
+
+		/* restore previous count for the next spu to sample */
+		/* NOTE, hardware issue, counter will not start if the
+		 * counter value is at max (0xFFFFFFFF).
+		 */
+		if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
+			cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
+		 else
+			 cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
+
+		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+		/* setup the debug bus measure the one event and
+		 * the two events to route the next SPU's PC on
+		 * the debug bus
+		 */
+		ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
+		if (ret)
+			printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
+			       "SPU event swap\n", __func__);
+
+		/* clear the trace buffer, don't want to take PC for
+		 * previous SPU*/
+		cbe_write_pm(cpu, trace_address, 0);
+
+		enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
+
+		/* Enable interrupts on the CPU thread that is starting */
+		cbe_enable_pm_interrupts(cpu, hdw_thread,
+					 interrupt_mask);
+		cbe_enable_pm(cpu);
+	}
+
+	spin_unlock_irqrestore(&cntr_lock, flags);
+
+	/* swap approximately every 0.1 seconds */
+	mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
+}
+
+static void start_spu_event_swap(void)
+{
+	init_timer(&timer_spu_event_swap);
+	timer_spu_event_swap.function = spu_evnt_swap;
+	timer_spu_event_swap.data = 0UL;
+	timer_spu_event_swap.expires = jiffies + HZ / 25;
+	add_timer(&timer_spu_event_swap);
+}
+
+static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
+			struct op_system_config *sys, int num_ctrs)
+{
+	int i;
+
+	/* routine is called once for all nodes */
+
+	spu_evnt_phys_spu_indx = 0;
+	/*
+	 * For all events except PPU CYCLEs, each node will need to make
 	 * the rtas cbe-perftools call to setup and reset the debug bus.
 	 * Make the token lookup call once and store it in the global
 	 * variable pm_rtas_token.
 	 */
+	pm_rtas_token = rtas_token("ibm,cbe-perftools");
+
 	if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
 		printk(KERN_ERR
 		       "%s: rtas token ibm,cbe-perftools unknown\n",
@@ -580,6 +711,58 @@
 		return -EIO;
 	}
 
+	/* setup the pm_control register settings,
+	 * settings will be written per node by the
+	 * cell_cpu_setup() function.
+	 */
+	pm_regs.pm_cntrl.trace_buf_ovflw = 1;
+
+	/* Use the occurrence trace mode to have SPU PC saved
+	 * to the trace buffer.  Occurrence data in trace buffer
+	 * is not used.  Bit 2 must be set to store SPU addresses.
+	 */
+	pm_regs.pm_cntrl.trace_mode = 2;
+
+	pm_regs.pm_cntrl.spu_addr_trace = 0x1;  /* using debug bus
+						   event 2 & 3 */
+
+	/* setup the debug bus event array with the SPU PC routing events.
+	*  Note, pm_signal[0] will be filled in by set_pm_event() call below.
+	*/
+	pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
+	pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
+	pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
+	pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
+
+	pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
+	pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
+	pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
+	pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
+
+	/* Set the user selected spu event to profile on,
+	 * note, only one SPU profiling event is supported
+	 */
+	num_counters = 1;  /* Only support one SPU event at a time */
+	set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
+
+	reset_value[0] = 0xFFFFFFFF - ctr[0].count;
+
+	/* global, used by cell_cpu_setup */
+	ctr_enabled |= 1;
+
+	/* Initialize the count for each SPU to the reset value */
+	for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
+		spu_pm_cnt[i] = reset_value[0];
+
+	return 0;
+}
+
+static int cell_reg_setup_ppu(struct op_counter_config *ctr,
+			struct op_system_config *sys, int num_ctrs)
+{
+	/* routine is called once for all nodes */
+	int i, j, cpu;
+
 	num_counters = num_ctrs;
 
 	if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
@@ -589,14 +772,6 @@
 		       __func__);
 		return -EIO;
 	}
-	pm_regs.group_control = 0;
-	pm_regs.debug_bus_control = 0;
-
-	/* setup the pm_control register */
-	memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
-	pm_regs.pm_cntrl.stop_at_max = 1;
-	pm_regs.pm_cntrl.trace_mode = 0;
-	pm_regs.pm_cntrl.freeze = 1;
 
 	set_count_mode(sys->enable_kernel, sys->enable_user);
 
@@ -665,6 +840,63 @@
 }
 
 
+/* This function is called once for all cpus combined */
+static int cell_reg_setup(struct op_counter_config *ctr,
+			struct op_system_config *sys, int num_ctrs)
+{
+	int ret=0;
+	spu_cycle_reset = 0;
+
+	/* initialize the spu_arr_trace value, will be reset if
+	 * doing spu event profiling.
+	 */
+	pm_regs.group_control = 0;
+	pm_regs.debug_bus_control = 0;
+	pm_regs.pm_cntrl.stop_at_max = 1;
+	pm_regs.pm_cntrl.trace_mode = 0;
+	pm_regs.pm_cntrl.freeze = 1;
+	pm_regs.pm_cntrl.trace_buf_ovflw = 0;
+	pm_regs.pm_cntrl.spu_addr_trace = 0;
+
+	/*
+	 * For all events except PPU CYCLEs, each node will need to make
+	 * the rtas cbe-perftools call to setup and reset the debug bus.
+	 * Make the token lookup call once and store it in the global
+	 * variable pm_rtas_token.
+	 */
+	pm_rtas_token = rtas_token("ibm,cbe-perftools");
+
+	if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
+		printk(KERN_ERR
+		       "%s: rtas token ibm,cbe-perftools unknown\n",
+		       __func__);
+		return -EIO;
+	}
+
+	if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
+		profiling_mode = SPU_PROFILING_CYCLES;
+		ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
+	} else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
+		   (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
+		profiling_mode = SPU_PROFILING_EVENTS;
+		spu_cycle_reset = ctr[0].count;
+
+		/* for SPU event profiling, need to setup the
+		 * pm_signal array with the events to route the
+		 * SPU PC before making the FW call.  Note, only
+		 * one SPU event for profiling can be specified
+		 * at a time.
+		 */
+		cell_reg_setup_spu_events(ctr, sys, num_ctrs);
+	} else {
+		profiling_mode = PPU_PROFILING;
+		ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
+	}
+
+	return ret;
+}
+
+
 
 /* This function is called once for each cpu */
 static int cell_cpu_setup(struct op_counter_config *cntr)
@@ -672,8 +904,13 @@
 	u32 cpu = smp_processor_id();
 	u32 num_enabled = 0;
 	int i;
+	int ret;
 
-	if (spu_cycle_reset)
+	/* Cycle based SPU profiling does not use the performance
+	 * counters.  The trace array is configured to collect
+	 * the data.
+	 */
+	if (profiling_mode == SPU_PROFILING_CYCLES)
 		return 0;
 
 	/* There is one performance monitor per processor chip (i.e. node),
@@ -686,7 +923,6 @@
 	cbe_disable_pm(cpu);
 	cbe_disable_pm_interrupts(cpu);
 
-	cbe_write_pm(cpu, pm_interval, 0);
 	cbe_write_pm(cpu, pm_start_stop, 0);
 	cbe_write_pm(cpu, group_control, pm_regs.group_control);
 	cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
@@ -703,7 +939,20 @@
 	 * The pm_rtas_activate_signals will return -EIO if the FW
 	 * call failed.
 	 */
-	return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
+	if (profiling_mode == SPU_PROFILING_EVENTS) {
+		/* For SPU event profiling also need to setup the
+		 * pm interval timer
+		 */
+		ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
+					       num_enabled+2);
+		/* store PC from debug bus to Trace buffer as often
+		 * as possible (every 10 cycles)
+		 */
+		cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
+		return ret;
+	} else
+		return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
+						num_enabled);
 }
 
 #define ENTRIES	 303
@@ -885,7 +1134,122 @@
 };
 #endif
 
-static int cell_global_start_spu(struct op_counter_config *ctr)
+/*
+ * Note the generic OProfile stop calls do not support returning
+ * an error on stop.  Hence, will not return an error if the FW
+ * calls fail on stop.	Failure to reset the debug bus is not an issue.
+ * Failure to disable the SPU profiling is not an issue.  The FW calls
+ * to enable the performance counters and debug bus will work even if
+ * the hardware was not cleanly reset.
+ */
+static void cell_global_stop_spu_cycles(void)
+{
+	int subfunc, rtn_value;
+	unsigned int lfsr_value;
+	int cpu;
+
+	oprofile_running = 0;
+	smp_wmb();
+
+#ifdef CONFIG_CPU_FREQ
+	cpufreq_unregister_notifier(&cpu_freq_notifier_block,
+				    CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
+	for_each_online_cpu(cpu) {
+		if (cbe_get_hw_thread_id(cpu))
+			continue;
+
+		subfunc = 3;	/*
+				 * 2 - activate SPU tracing,
+				 * 3 - deactivate
+				 */
+		lfsr_value = 0x8f100000;
+
+		rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
+				      subfunc, cbe_cpu_to_node(cpu),
+				      lfsr_value);
+
+		if (unlikely(rtn_value != 0)) {
+			printk(KERN_ERR
+			       "%s: rtas call ibm,cbe-spu-perftools " \
+			       "failed, return = %d\n",
+			       __func__, rtn_value);
+		}
+
+		/* Deactivate the signals */
+		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+	}
+
+	stop_spu_profiling_cycles();
+}
+
+static void cell_global_stop_spu_events(void)
+{
+	int cpu;
+	oprofile_running = 0;
+
+	stop_spu_profiling_events();
+	smp_wmb();
+
+	for_each_online_cpu(cpu) {
+		if (cbe_get_hw_thread_id(cpu))
+			continue;
+
+		cbe_sync_irq(cbe_cpu_to_node(cpu));
+		/* Stop the counters */
+		cbe_disable_pm(cpu);
+		cbe_write_pm07_control(cpu, 0, 0);
+
+		/* Deactivate the signals */
+		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+		/* Deactivate interrupts */
+		cbe_disable_pm_interrupts(cpu);
+	}
+	del_timer_sync(&timer_spu_event_swap);
+}
+
+static void cell_global_stop_ppu(void)
+{
+	int cpu;
+
+	/*
+	 * This routine will be called once for the system.
+	 * There is one performance monitor per node, so we
+	 * only need to perform this function once per node.
+	 */
+	del_timer_sync(&timer_virt_cntr);
+	oprofile_running = 0;
+	smp_wmb();
+
+	for_each_online_cpu(cpu) {
+		if (cbe_get_hw_thread_id(cpu))
+			continue;
+
+		cbe_sync_irq(cbe_cpu_to_node(cpu));
+		/* Stop the counters */
+		cbe_disable_pm(cpu);
+
+		/* Deactivate the signals */
+		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+		/* Deactivate interrupts */
+		cbe_disable_pm_interrupts(cpu);
+	}
+}
+
+static void cell_global_stop(void)
+{
+	if (profiling_mode == PPU_PROFILING)
+		cell_global_stop_ppu();
+	else if (profiling_mode == SPU_PROFILING_EVENTS)
+		cell_global_stop_spu_events();
+	else
+		cell_global_stop_spu_cycles();
+}
+
+static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
 {
 	int subfunc;
 	unsigned int lfsr_value;
@@ -951,18 +1315,18 @@
 
 		/* start profiling */
 		ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
-		  cbe_cpu_to_node(cpu), lfsr_value);
+				cbe_cpu_to_node(cpu), lfsr_value);
 
 		if (unlikely(ret != 0)) {
 			printk(KERN_ERR
-			       "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
-			       __func__, ret);
+			       "%s: rtas call ibm,cbe-spu-perftools failed, " \
+			       "return = %d\n", __func__, ret);
 			rtas_error = -EIO;
 			goto out;
 		}
 	}
 
-	rtas_error = start_spu_profiling(spu_cycle_reset);
+	rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
 	if (rtas_error)
 		goto out_stop;
 
@@ -970,11 +1334,74 @@
 	return 0;
 
 out_stop:
-	cell_global_stop_spu();		/* clean up the PMU/debug bus */
+	cell_global_stop_spu_cycles();	/* clean up the PMU/debug bus */
 out:
 	return rtas_error;
 }
 
+static int cell_global_start_spu_events(struct op_counter_config *ctr)
+{
+	int cpu;
+	u32 interrupt_mask = 0;
+	int rtn = 0;
+
+	hdw_thread = 0;
+
+	/* spu event profiling, uses the performance counters to generate
+	 * an interrupt.  The hardware is setup to store the SPU program
+	 * counter into the trace array.  The occurrence mode is used to
+	 * enable storing data to the trace buffer.  The bits are set
+	 * to send/store the SPU address in the trace buffer.  The debug
+	 * bus must be setup to route the SPU program counter onto the
+	 * debug bus.  The occurrence data in the trace buffer is not used.
+	 */
+
+	/* This routine gets called once for the system.
+	 * There is one performance monitor per node, so we
+	 * only need to perform this function once per node.
+	 */
+
+	for_each_online_cpu(cpu) {
+		if (cbe_get_hw_thread_id(cpu))
+			continue;
+
+		/*
+		 * Setup SPU event-based profiling.
+		 * Set perf_mon_control bit 0 to a zero before
+		 * enabling spu collection hardware.
+		 *
+		 * Only support one SPU event on one SPU per node.
+		 */
+		if (ctr_enabled & 1) {
+			cbe_write_ctr(cpu, 0, reset_value[0]);
+			enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
+			interrupt_mask |=
+				CBE_PM_CTR_OVERFLOW_INTR(0);
+		} else {
+			/* Disable counter */
+			cbe_write_pm07_control(cpu, 0, 0);
+		}
+
+		cbe_get_and_clear_pm_interrupts(cpu);
+		cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
+		cbe_enable_pm(cpu);
+
+		/* clear the trace buffer */
+		cbe_write_pm(cpu, trace_address, 0);
+	}
+
+	/* Start the timer to time slice collecting the event profile
+	 * on each of the SPUs.  Note, can collect profile on one SPU
+	 * per node at a time.
+	 */
+	start_spu_event_swap();
+	start_spu_profiling_events();
+	oprofile_running = 1;
+	smp_wmb();
+
+	return rtn;
+}
+
 static int cell_global_start_ppu(struct op_counter_config *ctr)
 {
 	u32 cpu, i;
@@ -994,8 +1421,7 @@
 			if (ctr_enabled & (1 << i)) {
 				cbe_write_ctr(cpu, i, reset_value[i]);
 				enable_ctr(cpu, i, pm_regs.pm07_cntrl);
-				interrupt_mask |=
-				    CBE_PM_CTR_OVERFLOW_INTR(i);
+				interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
 			} else {
 				/* Disable counter */
 				cbe_write_pm07_control(cpu, i, 0);
@@ -1024,99 +1450,162 @@
 
 static int cell_global_start(struct op_counter_config *ctr)
 {
-	if (spu_cycle_reset)
-		return cell_global_start_spu(ctr);
+	if (profiling_mode == SPU_PROFILING_CYCLES)
+		return cell_global_start_spu_cycles(ctr);
+	else if (profiling_mode == SPU_PROFILING_EVENTS)
+		return cell_global_start_spu_events(ctr);
 	else
 		return cell_global_start_ppu(ctr);
 }
 
-/*
- * Note the generic OProfile stop calls do not support returning
- * an error on stop.  Hence, will not return an error if the FW
- * calls fail on stop.	Failure to reset the debug bus is not an issue.
- * Failure to disable the SPU profiling is not an issue.  The FW calls
- * to enable the performance counters and debug bus will work even if
- * the hardware was not cleanly reset.
+
+/* The SPU interrupt handler
+ *
+ * SPU event profiling works as follows:
+ * The pm_signal[0] holds the one SPU event to be measured.  It is routed on
+ * the debug bus using word 0 or 1.  The value of pm_signal[1] and
+ * pm_signal[2] contain the necessary events to route the SPU program
+ * counter for the selected SPU onto the debug bus using words 2 and 3.
+ * The pm_interval register is setup to write the SPU PC value into the
+ * trace buffer at the maximum rate possible.  The trace buffer is configured
+ * to store the PCs, wrapping when it is full.  The performance counter is
+ * intialized to the max hardware count minus the number of events, N, between
+ * samples.  Once the N events have occured, a HW counter overflow occurs
+ * causing the generation of a HW counter interrupt which also stops the
+ * writing of the SPU PC values to the trace buffer.  Hence the last PC
+ * written to the trace buffer is the SPU PC that we want.  Unfortunately,
+ * we have to read from the beginning of the trace buffer to get to the
+ * last value written.  We just hope the PPU has nothing better to do then
+ * service this interrupt. The PC for the specific SPU being profiled is
+ * extracted from the trace buffer processed and stored.  The trace buffer
+ * is cleared, interrupts are cleared, the counter is reset to max - N.
+ * A kernel timer is used to periodically call the routine spu_evnt_swap()
+ * to switch to the next physical SPU in the node to profile in round robbin
+ * order.  This way data is collected for all SPUs on the node. It does mean
+ * that we need to use a relatively small value of N to ensure enough samples
+ * on each SPU are collected each SPU is being profiled 1/8 of the time.
+ * It may also be necessary to use a longer sample collection period.
  */
-static void cell_global_stop_spu(void)
+static void cell_handle_interrupt_spu(struct pt_regs *regs,
+				      struct op_counter_config *ctr)
 {
-	int subfunc, rtn_value;
-	unsigned int lfsr_value;
-	int cpu;
+	u32 cpu, cpu_tmp;
+	u64 trace_entry;
+	u32 interrupt_mask;
+	u64 trace_buffer[2];
+	u64 last_trace_buffer;
+	u32 sample;
+	u32 trace_addr;
+	unsigned long sample_array_lock_flags;
+	int spu_num;
+	unsigned long flags;
 
-	oprofile_running = 0;
+	/* Make sure spu event interrupt handler and spu event swap
+	 * don't access the counters simultaneously.
+	 */
+	cpu = smp_processor_id();
+	spin_lock_irqsave(&cntr_lock, flags);
 
-#ifdef CONFIG_CPU_FREQ
-	cpufreq_unregister_notifier(&cpu_freq_notifier_block,
-				    CPUFREQ_TRANSITION_NOTIFIER);
-#endif
+	cpu_tmp = cpu;
+	cbe_disable_pm(cpu);
 
-	for_each_online_cpu(cpu) {
-		if (cbe_get_hw_thread_id(cpu))
-			continue;
+	interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
 
-		subfunc = 3;	/*
-				 * 2 - activate SPU tracing,
-				 * 3 - deactivate
-				 */
-		lfsr_value = 0x8f100000;
+	sample = 0xABCDEF;
+	trace_entry = 0xfedcba;
+	last_trace_buffer = 0xdeadbeaf;
 
-		rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
-				      subfunc, cbe_cpu_to_node(cpu),
-				      lfsr_value);
+	if ((oprofile_running == 1) && (interrupt_mask != 0)) {
+		/* disable writes to trace buff */
+		cbe_write_pm(cpu, pm_interval, 0);
 
-		if (unlikely(rtn_value != 0)) {
-			printk(KERN_ERR
-			       "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
-			       __func__, rtn_value);
+		/* only have one perf cntr being used, cntr 0 */
+		if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
+		    && ctr[0].enabled)
+			/* The SPU PC values will be read
+			 * from the trace buffer, reset counter
+			 */
+
+			cbe_write_ctr(cpu, 0, reset_value[0]);
+
+		trace_addr = cbe_read_pm(cpu, trace_address);
+
+		while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
+			/* There is data in the trace buffer to process
+			 * Read the buffer until you get to the last
+			 * entry.  This is the value we want.
+			 */
+
+			cbe_read_trace_buffer(cpu, trace_buffer);
+			trace_addr = cbe_read_pm(cpu, trace_address);
 		}
 
-		/* Deactivate the signals */
-		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+		/* SPU Address 16 bit count format for 128 bit
+		 * HW trace buffer is used for the SPU PC storage
+		 *    HDR bits          0:15
+		 *    SPU Addr 0 bits   16:31
+		 *    SPU Addr 1 bits   32:47
+		 *    unused bits       48:127
+		 *
+		 * HDR: bit4 = 1 SPU Address 0 valid
+		 * HDR: bit5 = 1 SPU Address 1 valid
+		 *  - unfortunately, the valid bits don't seem to work
+		 *
+		 * Note trace_buffer[0] holds bits 0:63 of the HW
+		 * trace buffer, trace_buffer[1] holds bits 64:127
+		 */
+
+		trace_entry = trace_buffer[0]
+			& 0x00000000FFFF0000;
+
+		/* only top 16 of the 18 bit SPU PC address
+		 * is stored in trace buffer, hence shift right
+		 * by 16 -2 bits */
+		sample = trace_entry >> 14;
+		last_trace_buffer = trace_buffer[0];
+
+		spu_num = spu_evnt_phys_spu_indx
+			+ (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
+
+		/* make sure only one process at a time is calling
+		 * spu_sync_buffer()
+		 */
+		spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
+				  sample_array_lock_flags);
+		spu_sync_buffer(spu_num, &sample, 1);
+		spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+				       sample_array_lock_flags);
+
+		smp_wmb();    /* insure spu event buffer updates are written
+			       * don't want events intermingled... */
+
+		/* The counters were frozen by the interrupt.
+		 * Reenable the interrupt and restart the counters.
+		 */
+		cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
+		cbe_enable_pm_interrupts(cpu, hdw_thread,
+					 virt_cntr_inter_mask);
+
+		/* clear the trace buffer, re-enable writes to trace buff */
+		cbe_write_pm(cpu, trace_address, 0);
+		cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
+
+		/* The writes to the various performance counters only writes
+		 * to a latch.  The new values (interrupt setting bits, reset
+		 * counter value etc.) are not copied to the actual registers
+		 * until the performance monitor is enabled.  In order to get
+		 * this to work as desired, the permormance monitor needs to
+		 * be disabled while writing to the latches.  This is a
+		 * HW design issue.
+		 */
+		write_pm_cntrl(cpu);
+		cbe_enable_pm(cpu);
 	}
-
-	stop_spu_profiling();
+	spin_unlock_irqrestore(&cntr_lock, flags);
 }
 
-static void cell_global_stop_ppu(void)
-{
-	int cpu;
-
-	/*
-	 * This routine will be called once for the system.
-	 * There is one performance monitor per node, so we
-	 * only need to perform this function once per node.
-	 */
-	del_timer_sync(&timer_virt_cntr);
-	oprofile_running = 0;
-	smp_wmb();
-
-	for_each_online_cpu(cpu) {
-		if (cbe_get_hw_thread_id(cpu))
-			continue;
-
-		cbe_sync_irq(cbe_cpu_to_node(cpu));
-		/* Stop the counters */
-		cbe_disable_pm(cpu);
-
-		/* Deactivate the signals */
-		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
-
-		/* Deactivate interrupts */
-		cbe_disable_pm_interrupts(cpu);
-	}
-}
-
-static void cell_global_stop(void)
-{
-	if (spu_cycle_reset)
-		cell_global_stop_spu();
-	else
-		cell_global_stop_ppu();
-}
-
-static void cell_handle_interrupt(struct pt_regs *regs,
-				struct op_counter_config *ctr)
+static void cell_handle_interrupt_ppu(struct pt_regs *regs,
+				      struct op_counter_config *ctr)
 {
 	u32 cpu;
 	u64 pc;
@@ -1132,7 +1621,7 @@
 	 * routine are not running at the same time. See the
 	 * cell_virtual_cntr() routine for additional comments.
 	 */
-	spin_lock_irqsave(&virt_cntr_lock, flags);
+	spin_lock_irqsave(&cntr_lock, flags);
 
 	/*
 	 * Need to disable and reenable the performance counters
@@ -1185,7 +1674,16 @@
 		 */
 		cbe_enable_pm(cpu);
 	}
-	spin_unlock_irqrestore(&virt_cntr_lock, flags);
+	spin_unlock_irqrestore(&cntr_lock, flags);
+}
+
+static void cell_handle_interrupt(struct pt_regs *regs,
+				  struct op_counter_config *ctr)
+{
+	if (profiling_mode == PPU_PROFILING)
+		cell_handle_interrupt_ppu(regs, ctr);
+	else
+		cell_handle_interrupt_spu(regs, ctr);
 }
 
 /*
@@ -1195,7 +1693,8 @@
  */
 static int cell_sync_start(void)
 {
-	if (spu_cycle_reset)
+	if ((profiling_mode == SPU_PROFILING_CYCLES) ||
+	    (profiling_mode == SPU_PROFILING_EVENTS))
 		return spu_sync_start();
 	else
 		return DO_GENERIC_SYNC;
@@ -1203,7 +1702,8 @@
 
 static int cell_sync_stop(void)
 {
-	if (spu_cycle_reset)
+	if ((profiling_mode == SPU_PROFILING_CYCLES) ||
+	    (profiling_mode == SPU_PROFILING_EVENTS))
 		return spu_sync_stop();
 	else
 		return 1;
diff --git a/arch/powerpc/oprofile/op_model_pa6t.c b/arch/powerpc/oprofile/op_model_pa6t.c
index c40de46..42f778d 100644
--- a/arch/powerpc/oprofile/op_model_pa6t.c
+++ b/arch/powerpc/oprofile/op_model_pa6t.c
@@ -132,7 +132,7 @@
 	for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) {
 		/* counters are 40 bit. Move to cputable at some point? */
 		reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count;
-		pr_debug("reset_value for pmc%u inited to 0x%lx\n",
+		pr_debug("reset_value for pmc%u inited to 0x%llx\n",
 				 pmc, reset_value[pmc]);
 	}
 
@@ -177,7 +177,7 @@
 
 	oprofile_running = 1;
 
-	pr_debug("start on cpu %d, mmcr0 %lx\n", smp_processor_id(), mmcr0);
+	pr_debug("start on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
 
 	return 0;
 }
@@ -193,7 +193,7 @@
 
 	oprofile_running = 0;
 
-	pr_debug("stop on cpu %d, mmcr0 %lx\n", smp_processor_id(), mmcr0);
+	pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
 }
 
 /* handle the perfmon overflow vector */
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
index f416014..1bcff94 100644
--- a/arch/powerpc/platforms/512x/clock.c
+++ b/arch/powerpc/platforms/512x/clock.c
@@ -56,12 +56,12 @@
 	int dev_match = 0;
 	int id_match = 0;
 
-	if (dev == NULL && id == NULL)
+	if (dev == NULL || id == NULL)
 		return NULL;
 
 	mutex_lock(&clocks_mutex);
 	list_for_each_entry(p, &clocks, node) {
-		if (dev && dev == p->dev)
+		if (dev == p->dev)
 			dev_match++;
 		if (strcmp(id, p->name) == 0)
 			id_match++;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index ae7c34f..98367a0 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -42,7 +42,7 @@
  * from interrupt context while node mapping (which calls ioremap())
  * cannot be used at such point.
  */
-static spinlock_t mpc52xx_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(mpc52xx_lock);
 static struct mpc52xx_gpt __iomem *mpc52xx_wdt;
 static struct mpc52xx_cdm __iomem *mpc52xx_cdm;
 
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
index 8a455eb..07f89ae 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
@@ -363,11 +363,8 @@
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
 	struct mpc52xx_gpt __iomem *regs = mm_gc->regs;
-	unsigned int ret;
 
 	return (in_be32(&regs->status) & (1 << (31 - 23))) ? 1 : 0;
-
-	return ret;
 }
 
 static void
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 72865e8..0a093f0 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -196,6 +196,7 @@
 
 static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type)
 {
+	struct irq_desc *desc = get_irq_desc(virq);
 	u32 ctrl_reg, type;
 	int irq;
 	int l2irq;
@@ -222,6 +223,11 @@
 		type = 0;
 	}
 
+	desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
+	desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
+	if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+		desc->status |= IRQ_LEVEL;
+
 	ctrl_reg = in_be32(&intr->ctrl);
 	ctrl_reg &= ~(0x3 << (22 - (l2irq * 2)));
 	ctrl_reg |= (type << (22 - (l2irq * 2)));
@@ -231,7 +237,7 @@
 }
 
 static struct irq_chip mpc52xx_extirq_irqchip = {
-	.typename = " MPC52xx IRQ[0-3] ",
+	.typename = "MPC52xx External",
 	.mask = mpc52xx_extirq_mask,
 	.unmask = mpc52xx_extirq_unmask,
 	.ack = mpc52xx_extirq_ack,
diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile
index ba5028e..051777c 100644
--- a/arch/powerpc/platforms/83xx/Makefile
+++ b/arch/powerpc/platforms/83xx/Makefile
@@ -3,6 +3,7 @@
 #
 obj-y				:= misc.o usb.o
 obj-$(CONFIG_SUSPEND)		+= suspend.o suspend-asm.o
+obj-$(CONFIG_MCU_MPC8349EMITX)	+= mcu_mpc8349emitx.o
 obj-$(CONFIG_MPC831x_RDB)	+= mpc831x_rdb.o
 obj-$(CONFIG_MPC832x_RDB)	+= mpc832x_rdb.o
 obj-$(CONFIG_MPC834x_MDS)	+= mpc834x_mds.o
diff --git a/drivers/i2c/chips/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
similarity index 100%
rename from drivers/i2c/chips/mcu_mpc8349emitx.c
rename to arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index a428f8d..5177bdd 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -42,7 +42,7 @@
 	mpc831x_usb_cfg();
 }
 
-void __init mpc831x_rdb_init_IRQ(void)
+static void __init mpc831x_rdb_init_IRQ(void)
 {
 	struct device_node *np;
 
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index ec43477..ec0b401b 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -49,8 +49,6 @@
 #define DBG(fmt...)
 #endif
 
-static u8 *bcsr_regs = NULL;
-
 /* ************************************************************************
  *
  * Setup the architecture
@@ -59,13 +57,14 @@
 static void __init mpc832x_sys_setup_arch(void)
 {
 	struct device_node *np;
+	u8 __iomem *bcsr_regs = NULL;
 
 	if (ppc_md.progress)
 		ppc_md.progress("mpc832x_sys_setup_arch()", 0);
 
 	/* Map BCSR area */
 	np = of_find_node_by_name(NULL, "bcsr");
-	if (np != 0) {
+	if (np) {
 		struct resource res;
 
 		of_address_to_resource(np, 0, &res);
@@ -93,9 +92,9 @@
 			!= NULL){
 		/* Reset the Ethernet PHYs */
 #define BCSR8_FETH_RST 0x50
-		bcsr_regs[8] &= ~BCSR8_FETH_RST;
+		clrbits8(&bcsr_regs[8], BCSR8_FETH_RST);
 		udelay(1000);
-		bcsr_regs[8] |= BCSR8_FETH_RST;
+		setbits8(&bcsr_regs[8], BCSR8_FETH_RST);
 		iounmap(bcsr_regs);
 		of_node_put(np);
 	}
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 0300268..2a1295f 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -38,6 +38,7 @@
 #define DBG(fmt...)
 #endif
 
+#ifdef CONFIG_QUICC_ENGINE
 static void mpc83xx_spi_activate_cs(u8 cs, u8 polarity)
 {
 	pr_debug("%s %d %d\n", __func__, cs, polarity);
@@ -77,8 +78,8 @@
 			    mpc83xx_spi_activate_cs,
 			    mpc83xx_spi_deactivate_cs);
 }
-
 machine_device_initcall(mpc832x_rdb, mpc832x_spi_init);
+#endif /* CONFIG_QUICC_ENGINE */
 
 /* ************************************************************************
  *
@@ -130,7 +131,7 @@
 }
 machine_device_initcall(mpc832x_rdb, mpc832x_declare_of_platform_devices);
 
-void __init mpc832x_rdb_init_IRQ(void)
+static void __init mpc832x_rdb_init_IRQ(void)
 {
 
 	struct device_node *np;
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 9d46e5b..09e9d6f 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -18,6 +18,7 @@
 
 #include <linux/stddef.h>
 #include <linux/kernel.h>
+#include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/reboot.h>
@@ -43,6 +44,7 @@
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
+#include <sysdev/simple_gpio.h>
 #include <asm/qe.h>
 #include <asm/qe_ic.h>
 
@@ -55,8 +57,6 @@
 #define DBG(fmt...)
 #endif
 
-static u8 *bcsr_regs = NULL;
-
 /* ************************************************************************
  *
  * Setup the architecture
@@ -65,13 +65,14 @@
 static void __init mpc836x_mds_setup_arch(void)
 {
 	struct device_node *np;
+	u8 __iomem *bcsr_regs = NULL;
 
 	if (ppc_md.progress)
 		ppc_md.progress("mpc836x_mds_setup_arch()", 0);
 
 	/* Map BCSR area */
 	np = of_find_node_by_name(NULL, "bcsr");
-	if (np != 0) {
+	if (np) {
 		struct resource res;
 
 		of_address_to_resource(np, 0, &res);
@@ -93,6 +94,16 @@
 
 		for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;)
 			par_io_of_config(np);
+#ifdef CONFIG_QE_USB
+		/* Must fixup Par IO before QE GPIO chips are registered. */
+		par_io_config_pin(1,  2, 1, 0, 3, 0); /* USBOE  */
+		par_io_config_pin(1,  3, 1, 0, 3, 0); /* USBTP  */
+		par_io_config_pin(1,  8, 1, 0, 1, 0); /* USBTN  */
+		par_io_config_pin(1, 10, 2, 0, 3, 0); /* USBRXD */
+		par_io_config_pin(1,  9, 2, 1, 3, 0); /* USBRP  */
+		par_io_config_pin(1, 11, 2, 1, 3, 0); /* USBRN  */
+		par_io_config_pin(2, 20, 2, 0, 1, 0); /* CLK21  */
+#endif /* CONFIG_QE_USB */
 	}
 
 	if ((np = of_find_compatible_node(NULL, "network", "ucc_geth"))
@@ -151,6 +162,70 @@
 }
 machine_device_initcall(mpc836x_mds, mpc836x_declare_of_platform_devices);
 
+#ifdef CONFIG_QE_USB
+static int __init mpc836x_usb_cfg(void)
+{
+	u8 __iomem *bcsr;
+	struct device_node *np;
+	const char *mode;
+	int ret = 0;
+
+	np = of_find_compatible_node(NULL, NULL, "fsl,mpc8360mds-bcsr");
+	if (!np)
+		return -ENODEV;
+
+	bcsr = of_iomap(np, 0);
+	of_node_put(np);
+	if (!bcsr)
+		return -ENOMEM;
+
+	np = of_find_compatible_node(NULL, NULL, "fsl,mpc8323-qe-usb");
+	if (!np) {
+		ret = -ENODEV;
+		goto err;
+	}
+
+#define BCSR8_TSEC1M_MASK	(0x3 << 6)
+#define BCSR8_TSEC1M_RGMII	(0x0 << 6)
+#define BCSR8_TSEC2M_MASK	(0x3 << 4)
+#define BCSR8_TSEC2M_RGMII	(0x0 << 4)
+	/*
+	 * Default is GMII (2), but we should set it to RGMII (0) if we use
+	 * USB (Eth PHY is in RGMII mode anyway).
+	 */
+	clrsetbits_8(&bcsr[8], BCSR8_TSEC1M_MASK | BCSR8_TSEC2M_MASK,
+			       BCSR8_TSEC1M_RGMII | BCSR8_TSEC2M_RGMII);
+
+#define BCSR13_USBMASK	0x0f
+#define BCSR13_nUSBEN	0x08 /* 1 - Disable, 0 - Enable			*/
+#define BCSR13_USBSPEED	0x04 /* 1 - Full, 0 - Low			*/
+#define BCSR13_USBMODE	0x02 /* 1 - Host, 0 - Function			*/
+#define BCSR13_nUSBVCC	0x01 /* 1 - gets VBUS, 0 - supplies VBUS 	*/
+
+	clrsetbits_8(&bcsr[13], BCSR13_USBMASK, BCSR13_USBSPEED);
+
+	mode = of_get_property(np, "mode", NULL);
+	if (mode && !strcmp(mode, "peripheral")) {
+		setbits8(&bcsr[13], BCSR13_nUSBVCC);
+		qe_usb_clock_set(QE_CLK21, 48000000);
+	} else {
+		setbits8(&bcsr[13], BCSR13_USBMODE);
+		/*
+		 * The BCSR GPIOs are used to control power and
+		 * speed of the USB transceiver. This is needed for
+		 * the USB Host only.
+		 */
+		simple_gpiochip_init("fsl,mpc8360mds-bcsr-gpio");
+	}
+
+	of_node_put(np);
+err:
+	iounmap(bcsr);
+	return ret;
+}
+machine_arch_initcall(mpc836x_mds, mpc836x_usb_cfg);
+#endif /* CONFIG_QE_USB */
+
 static void __init mpc836x_mds_init_IRQ(void)
 {
 	struct device_node *np;
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index a5273bb..b0090aa 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -51,8 +51,9 @@
 	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
 		mpc83xx_add_bridge(np);
 #endif
-
+#ifdef CONFIG_QUICC_ENGINE
 	qe_reset();
+#endif
 }
 
 static void __init mpc836x_rdk_init_IRQ(void)
@@ -71,13 +72,14 @@
 	 */
 	ipic_set_default_priority();
 	of_node_put(np);
-
+#ifdef CONFIG_QUICC_ENGINE
 	np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
 	if (!np)
 		return;
 
 	qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
 	of_node_put(np);
+#endif
 }
 
 /*
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index 8bb13c8..530ef99 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -26,7 +26,6 @@
 #define BCSR12_USB_SER_MASK	0x8a
 #define BCSR12_USB_SER_PIN	0x80
 #define BCSR12_USB_SER_DEVICE	0x02
-extern int mpc837x_usb_cfg(void);
 
 static int mpc837xmds_usb_cfg(void)
 {
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index da030af..1d09654 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -21,8 +21,6 @@
 
 #include "mpc83xx.h"
 
-extern int mpc837x_usb_cfg(void);
-
 /* ************************************************************************
  *
  * Setup the architecture
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 2a7cbab..83cfe51 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -61,6 +61,7 @@
 
 extern void mpc83xx_restart(char *cmd);
 extern long mpc83xx_time_init(void);
+extern int mpc837x_usb_cfg(void);
 extern int mpc834x_usb_cfg(void);
 extern int mpc831x_usb_cfg(void);
 
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index a8301c8..7326d90 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -148,6 +148,9 @@
 /*
  * Setup the architecture
  */
+#ifdef CONFIG_SMP
+extern void __init mpc85xx_smp_init(void);
+#endif
 static void __init mpc85xx_ds_setup_arch(void)
 {
 #ifdef CONFIG_PCI
@@ -173,6 +176,10 @@
 	ppc_md.pci_exclude_device = mpc85xx_exclude_device;
 #endif
 
+#ifdef CONFIG_SMP
+	mpc85xx_smp_init();
+#endif
+
 	printk("MPC85xx DS board from Freescale Semiconductor\n");
 }
 
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index d652c71..79a0df1 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -58,6 +58,7 @@
 
 	if (cpu_rel_addr == NULL) {
 		printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
+		local_irq_restore(flags);
 		return;
 	}
 
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 47e956c..200b9cb 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -312,4 +312,26 @@
 	  Say Y here if you're going to use hardware that connects to the
 	  MPC831x/834x/837x/8572/8610 GPIOs.
 
+config SIMPLE_GPIO
+	bool "Support for simple, memory-mapped GPIO controllers"
+	depends on PPC
+	select GENERIC_GPIO
+	select ARCH_REQUIRE_GPIOLIB
+	help
+	  Say Y here to support simple, memory-mapped GPIO controllers.
+	  These are usually BCSRs used to control board's switches, LEDs,
+	  chip-selects, Ethernet/USB PHY's power and various other small
+	  on-board peripherals.
+
+config MCU_MPC8349EMITX
+	tristate "MPC8349E-mITX MCU driver"
+	depends on I2C && PPC_83xx
+	select GENERIC_GPIO
+	select ARCH_REQUIRE_GPIOLIB
+	help
+	  Say Y here to enable soft power-off functionality on the Freescale
+	  boards with the MPC8349E-mITX-compatible MCU chips. This driver will
+	  also register MCU GPIOs with the generic GPIO API, so you'll able
+	  to use MCU pins as GPIOs.
+
 endmenu
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 3d0c776..e868b5c 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -231,7 +231,7 @@
 	  If in doubt, say Y here.
 
 config SMP
-	depends on PPC_STD_MMU
+	depends on PPC_STD_MMU || FSL_BOOKE
 	bool "Symmetric multi-processing support"
 	---help---
 	  This enables support for systems with more than one CPU. If you have
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 2e67bd8..35b1ec4 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -44,8 +44,8 @@
 
 static inline unsigned int beat_read_mask(unsigned hpte_group)
 {
-	unsigned long hpte_v[5];
 	unsigned long rmask = 0;
+	u64 hpte_v[5];
 
 	beat_read_htab_entries(0, hpte_group + 0, hpte_v);
 	if (!(hpte_v[0] & HPTE_V_BOLTED))
@@ -93,8 +93,7 @@
 				  int psize, int ssize)
 {
 	unsigned long lpar_rc;
-	unsigned long slot;
-	unsigned long hpte_v, hpte_r;
+	u64 hpte_v, hpte_r, slot;
 
 	/* same as iseries */
 	if (vflags & HPTE_V_SECONDARY)
@@ -153,8 +152,9 @@
 
 static unsigned long beat_lpar_hpte_getword0(unsigned long slot)
 {
-	unsigned long dword0, dword[5];
+	unsigned long dword0;
 	unsigned long lpar_rc;
+	u64 dword[5];
 
 	lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword);
 
@@ -170,7 +170,7 @@
 	unsigned long size_bytes = 1UL << ppc64_pft_size;
 	unsigned long hpte_count = size_bytes >> 4;
 	int i;
-	unsigned long dummy0, dummy1;
+	u64 dummy0, dummy1;
 
 	/* TODO: Use bulk call */
 	for (i = 0; i < hpte_count; i++)
@@ -189,7 +189,8 @@
 				    int psize, int ssize, int local)
 {
 	unsigned long lpar_rc;
-	unsigned long dummy0, dummy1, want_v;
+	u64 dummy0, dummy1;
+	unsigned long want_v;
 
 	want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
 
@@ -255,7 +256,8 @@
 					  unsigned long ea,
 					  int psize, int ssize)
 {
-	unsigned long lpar_rc, slot, vsid, va, dummy0, dummy1;
+	unsigned long lpar_rc, slot, vsid, va;
+	u64 dummy0, dummy1;
 
 	vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
 	va = (vsid << 28) | (ea & 0x0fffffff);
@@ -276,7 +278,7 @@
 {
 	unsigned long want_v;
 	unsigned long lpar_rc;
-	unsigned long dummy1, dummy2;
+	u64 dummy1, dummy2;
 	unsigned long flags;
 
 	DBG_LOW("    inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
@@ -315,8 +317,7 @@
 				  int psize, int ssize)
 {
 	unsigned long lpar_rc;
-	unsigned long slot;
-	unsigned long hpte_v, hpte_r;
+	u64 hpte_v, hpte_r, slot;
 
 	/* same as iseries */
 	if (vflags & HPTE_V_SECONDARY)
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 192a935..7225484 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -99,7 +99,7 @@
 	err = beat_downcount_of_interrupt(irq_plug);
 	if (err != 0) {
 		if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */
-			panic("Failed to downcount IRQ! Error = %16lx", err);
+			panic("Failed to downcount IRQ! Error = %16llx", err);
 
 		printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug);
 	}
diff --git a/arch/powerpc/platforms/cell/beat_udbg.c b/arch/powerpc/platforms/cell/beat_udbg.c
index 6b418f6..350735b 100644
--- a/arch/powerpc/platforms/cell/beat_udbg.c
+++ b/arch/powerpc/platforms/cell/beat_udbg.c
@@ -40,8 +40,8 @@
 }
 
 /* Buffered chars getc */
-static long inbuflen;
-static long inbuf[2];	/* must be 2 longs */
+static u64 inbuflen;
+static u64 inbuf[2];	/* must be 2 u64s */
 
 static int udbg_getc_poll_beat(void)
 {
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index ec7c8f4..e6506cd 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -118,7 +118,7 @@
 	policy->cur = cbe_freqs[cur_pmode].frequency;
 
 #ifdef CONFIG_SMP
-	policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
+	cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
 #endif
 
 	cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
index 70fa7ae..20472e4 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
@@ -54,7 +54,7 @@
 {
 	struct cbe_pmd_regs __iomem *pmd_regs;
 	struct cbe_mic_tm_regs __iomem *mic_tm_regs;
-	u64 flags;
+	unsigned long flags;
 	u64 value;
 #ifdef DEBUG
 	long time;
diff --git a/arch/powerpc/platforms/cell/celleb_scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c
index 08c285b..48ec88a 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_epci.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_epci.c
@@ -405,7 +405,7 @@
 	hose->cfg_addr = ioremap(r.start, (r.end - r.start + 1));
 	if (!hose->cfg_addr)
 		goto error;
-	pr_debug("EPCI: cfg_addr map 0x%016lx->0x%016lx + 0x%016lx\n",
+	pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n",
 		 r.start, (unsigned long)hose->cfg_addr, (r.end - r.start + 1));
 
 	if (of_address_to_resource(node, 2, &r))
@@ -413,7 +413,7 @@
 	hose->cfg_data = ioremap(r.start, (r.end - r.start + 1));
 	if (!hose->cfg_data)
 		goto error;
-	pr_debug("EPCI: cfg_data map 0x%016lx->0x%016lx + 0x%016lx\n",
+	pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n",
 		 r.start, (unsigned long)hose->cfg_data, (r.end - r.start + 1));
 
 	hose->ops = &celleb_epci_ops;
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index a3c6c01..968c1c0 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -110,7 +110,7 @@
 		}
 
 		/* initialize spu_gov_info for all affected cpus */
-		for_each_cpu_mask(i, policy->cpus) {
+		for_each_cpu(i, policy->cpus) {
 			affected_info = &per_cpu(spu_gov_info, i);
 			affected_info->policy = policy;
 		}
@@ -127,7 +127,7 @@
 		spu_gov_cancel_work(info);
 
 		/* clean spu_gov_info for all affected cpus */
-		for_each_cpu_mask (i, policy->cpus) {
+		for_each_cpu (i, policy->cpus) {
 			info = &per_cpu(spu_gov_info, i);
 			info->policy = NULL;
 		}
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 2d5bb22..28c04da 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -148,7 +148,7 @@
 
 	iic = &__get_cpu_var(iic);
 	*(unsigned long *) &pending =
-		in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
+		in_be64((u64 __iomem *) &iic->regs->pending_destr);
 	if (!(pending.flags & CBE_IIC_IRQ_VALID))
 		return NO_IRQ;
 	virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
index b5f84e8..059cad6 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -130,14 +130,14 @@
 
 };
 
-static void __iomem *iowa_ioremap(unsigned long addr, unsigned long size,
+static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
 						unsigned long flags)
 {
 	struct iowa_bus *bus;
 	void __iomem *res = __ioremap(addr, size, flags);
 	int busno;
 
-	bus = iowa_pci_find(0, addr);
+	bus = iowa_pci_find(0, (unsigned long)addr);
 	if (bus != NULL) {
 		busno = bus - iowa_busses;
 		PCI_SET_ADDR_TOKEN(res, busno + 1);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 86db4dd..ee5033e 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -150,8 +150,8 @@
 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
 		long n_ptes)
 {
-	unsigned long __iomem *reg;
-	unsigned long val;
+	u64 __iomem *reg;
+	u64 val;
 	long n;
 
 	reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
@@ -855,7 +855,7 @@
 	 */
 	if (np && size < lmb_end_of_DRAM()) {
 		printk(KERN_WARNING "iommu: force-enabled, dma window"
-		       " (%ldMB) smaller than total memory (%ldMB)\n",
+		       " (%ldMB) smaller than total memory (%lldMB)\n",
 		       size >> 20, lmb_end_of_DRAM() >> 20);
 		return -ENODEV;
 	}
@@ -985,7 +985,7 @@
 	addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
 	archdata->dma_data = (void *)addr;
 
-	dev_dbg(dev, "iommu: fixed addr = %lx\n", addr);
+	dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
 }
 
 static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 7b4cefa..5f961c4 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -38,16 +38,16 @@
 	/* Todo: do some nicer parsing of bits and based on them go down
 	 * to other sub-units FIRs and not only IIC
 	 */
-	printk(KERN_ERR "Global Checkstop FIR    : 0x%016lx\n",
+	printk(KERN_ERR "Global Checkstop FIR    : 0x%016llx\n",
 	       in_be64(&pregs->checkstop_fir));
-	printk(KERN_ERR "Global Recoverable FIR  : 0x%016lx\n",
+	printk(KERN_ERR "Global Recoverable FIR  : 0x%016llx\n",
 	       in_be64(&pregs->checkstop_fir));
-	printk(KERN_ERR "Global MachineCheck FIR : 0x%016lx\n",
+	printk(KERN_ERR "Global MachineCheck FIR : 0x%016llx\n",
 	       in_be64(&pregs->spec_att_mchk_fir));
 
 	if (iregs == NULL)
 		return;
-	printk(KERN_ERR "IOC FIR                 : 0x%016lx\n",
+	printk(KERN_ERR "IOC FIR                 : 0x%016llx\n",
 	       in_be64(&iregs->ioc_fir));
 
 }
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index a5bdb89..e487ad6 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -151,7 +151,7 @@
 {
 	struct spu_priv2 __iomem *priv2 = spu->priv2;
 
-	pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n",
+	pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
 			__func__, slbe, slb->vsid, slb->esid);
 
 	out_be64(&priv2->slb_index_W, slbe);
@@ -221,7 +221,7 @@
 {
 	int ret;
 
-	pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea);
+	pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
 
 	/*
 	 * Handle kernel space hash faults immediately. User hash
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index 19f6bfd..fec1495 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -54,7 +54,7 @@
 	long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6);
 
 	if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) {
-		pr_debug("%s: invalid syscall #%ld", __func__, s->nr_ret);
+		pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret);
 		return -ENOSYS;
 	}
 
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index af116aa..c4d4a19 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -42,7 +42,7 @@
 		return spufs_coredump_read[num].read(ctx, buffer, size, off);
 
 	data = spufs_coredump_read[num].get(ctx);
-	ret = snprintf(buffer, size, "0x%.16lx", data);
+	ret = snprintf(buffer, size, "0x%.16llx", data);
 	if (ret >= size)
 		return size;
 	return ++ret; /* count trailing NULL */
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index f093a58..a4dd3ae 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -132,7 +132,7 @@
 
 	spuctx_switch_state(ctx, SPU_UTIL_IOWAIT);
 
-	pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
+	pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea,
 		dsisr, ctx->state);
 
 	ctx->stats.hash_flt++;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 7106b63..0da7f2b 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1654,7 +1654,7 @@
 
 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
 {
-	pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
+	pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
 		 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
 
 	switch (cmd->cmd) {
@@ -1671,7 +1671,7 @@
 	}
 
 	if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
-		pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
+		pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
 				cmd->ea, cmd->lsa);
 		return -EIO;
 	}
@@ -2633,7 +2633,7 @@
 	}
 
 	seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
-		" %c %lx %lx %lx %lx %x %x\n",
+		" %c %llx %llx %llx %llx %x %x\n",
 		ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
 		ctx->flags,
 		ctx->sched_flags,
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 15c62d3..3bf908e 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -314,7 +314,7 @@
  *	we need to call spu_release(ctx) before sleeping, and
  *	then spu_acquire(ctx) when awoken.
  *
- * 	Returns with state_mutex re-acquired when successfull or
+ * 	Returns with state_mutex re-acquired when successful or
  * 	with -ERESTARTSYS and the state_mutex dropped when interrupted.
  */
 
diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c
index 8c61996..1db6b9e 100644
--- a/arch/powerpc/platforms/fsl_uli1575.c
+++ b/arch/powerpc/platforms/fsl_uli1575.c
@@ -249,6 +249,7 @@
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 0x5249, quirk_final_uli5249);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 0x1575, quirk_final_uli1575);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229);
 
 static void __devinit hpcd_quirk_uli1575(struct pci_dev *dev)
 {
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index ed3753d..7ddd0a2 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -10,18 +10,21 @@
 config VIODASD
 	tristate "iSeries Virtual I/O disk support"
 	depends on BLOCK
+	select VIOPATH
 	help
 	  If you are running on an iSeries system and you want to use
 	  virtual disks created and managed by OS/400, say Y.
 
 config VIOCD
 	tristate "iSeries Virtual I/O CD support"
+	select VIOPATH
 	help
 	  If you are running Linux on an IBM iSeries system and you want to
 	  read a CD drive owned by OS/400, say Y here.
 
 config VIOTAPE
 	tristate "iSeries Virtual Tape Support"
+	select VIOPATH
 	help
 	  If you are running Linux on an iSeries system and you want Linux
 	  to read and/or write a tape drive owned by OS/400, say Y here.
@@ -30,5 +33,3 @@
 
 config VIOPATH
 	bool
-	depends on VIODASD || VIOCD || VIOTAPE || ISERIES_VETH
-	default y
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index bbe828f..6ed75bf 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -66,7 +66,7 @@
 
 		rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
 		if (rc)
-			panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
+			panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
 					rc);
 		index++;
 		uaddr += TCE_PAGE_SIZE;
@@ -81,7 +81,7 @@
 	while (npages--) {
 		rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
 		if (rc)
-			panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
+			panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
 					rc);
 		index++;
 	}
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 70b688c..24519b9 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -23,6 +23,7 @@
 #include <linux/string.h>
 #include <linux/seq_file.h>
 #include <linux/kdev_t.h>
+#include <linux/kexec.h>
 #include <linux/major.h>
 #include <linux/root_dev.h>
 #include <linux/kernel.h>
@@ -638,6 +639,13 @@
 	return 1;
 }
 
+#ifdef CONFIG_KEXEC
+static int iseries_kexec_prepare(struct kimage *image)
+{
+	return -ENOSYS;
+}
+#endif
+
 define_machine(iseries) {
 	.name			= "iSeries",
 	.setup_arch		= iSeries_setup_arch,
@@ -658,6 +666,9 @@
 	.probe			= iseries_probe,
 	.ioremap		= iseries_ioremap,
 	.iounmap		= iseries_iounmap,
+#ifdef CONFIG_KEXEC
+	.machine_kexec_prepare	= iseries_kexec_prepare,
+#endif
 	/* XXX Implement enable_pmcs for iSeries */
 };
 
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index 58556b0..be2527a 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -112,7 +112,7 @@
 
 static void set_astate(int cpu, unsigned int astate)
 {
-	u64 flags;
+	unsigned long flags;
 
 	/* Return if called before init has run */
 	if (unlikely(!sdcasr_mapbase))
@@ -213,7 +213,7 @@
 	pr_debug("current astate is at %d\n",cur_astate);
 
 	policy->cur = pas_freqs[cur_astate].frequency;
-	policy->cpus = cpu_online_map;
+	cpumask_copy(policy->cpus, &cpu_online_map);
 
 	ppc_proc_freq = policy->cur * 1000ul;
 
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 217af32..a6152d9 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -509,7 +509,7 @@
  */
 int pasemi_dma_init(void)
 {
-	static spinlock_t init_lock = SPIN_LOCK_UNLOCKED;
+	static DEFINE_SPINLOCK(init_lock);
 	struct pci_dev *iob_pdev;
 	struct pci_dev *pdev;
 	struct resource res;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 4dfb4bc..beb3833 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -362,7 +362,7 @@
 	/* secondary CPUs are tied to the primary one by the
 	 * cpufreq core if in the secondary policy we tell it that
 	 * it actually must be one policy together with all others. */
-	policy->cpus = cpu_online_map;
+	cpumask_copy(policy->cpus, &cpu_online_map);
 	cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
 
 	return cpufreq_frequency_table_cpuinfo(policy,
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 54b7b76..04cdd32 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -661,6 +661,7 @@
 			pci_find_hose_for_OF_device(np);
 		if (!hose) {
 			printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
+			of_node_put(np);
 			return;
 		}
 		early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
@@ -669,6 +670,7 @@
 		early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
 	}
 	has_second_ohare = 1;
+	of_node_put(np);
 }
 
 /*
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 6b0711c..bd8817b 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -53,7 +53,7 @@
 #include <asm/pmac_low_i2c.h>
 #include <asm/pmac_pfunc.h>
 
-#define DEBUG
+#undef DEBUG
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index 59eb840..1810e42 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -265,12 +265,15 @@
 	struct resource rsrc;
 
 	vias = of_find_node_by_name(NULL, "via-cuda");
-	if (vias == 0)
+	if (vias == NULL)
 		vias = of_find_node_by_name(NULL, "via-pmu");
-	if (vias == 0)
+	if (vias == NULL)
 		vias = of_find_node_by_name(NULL, "via");
-	if (vias == 0 || of_address_to_resource(vias, 0, &rsrc))
+	if (vias == NULL || of_address_to_resource(vias, 0, &rsrc)) {
+	        of_node_put(vias);
 		return 0;
+	}
+	of_node_put(vias);
 	via = ioremap(rsrc.start, rsrc.end - rsrc.start + 1);
 	if (via == NULL) {
 		printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
@@ -297,7 +300,7 @@
 	ppc_tb_freq = (dstart - dend) * 100 / 6;
 
 	iounmap(via);
-	
+
 	return 1;
 }
 #endif
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index dbc124e..bb028f1 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -82,7 +82,7 @@
 		goto fail_rights;
 	}
 
-	pr_debug("%s:%d: pu_id %lu, rights %lu(%lxh)\n",
+	pr_debug("%s:%d: pu_id %llu, rights %llu(%llxh)\n",
 		__func__, __LINE__, dev->lpm.pu_id, dev->lpm.rights,
 		dev->lpm.rights);
 
@@ -348,7 +348,7 @@
 		return -ENODEV;
 	}
 
-	pr_debug("%s:%u: (%u:%u:%u): port %lu blk_size %lu num_blocks %lu "
+	pr_debug("%s:%u: (%u:%u:%u): port %llu blk_size %llu num_blocks %llu "
 		 "num_regions %u\n", __func__, __LINE__, repo->bus_index,
 		 repo->dev_index, repo->dev_type, port, blk_size, num_blocks,
 		 num_regions);
@@ -394,7 +394,7 @@
 			result = -ENODEV;
 			goto fail_read_region;
 		}
-		pr_debug("%s:%u: region %u: id %u start %lu size %lu\n",
+		pr_debug("%s:%u: region %u: id %u start %llu size %llu\n",
 			 __func__, __LINE__, i, id, start, size);
 
 		p->regions[i].id = id;
@@ -518,6 +518,41 @@
 	return result;
 }
 
+static int __init ps3_register_ramdisk_device(void)
+{
+	int result;
+	struct layout {
+		struct ps3_system_bus_device dev;
+	} *p;
+
+	pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+	p = kzalloc(sizeof(struct layout), GFP_KERNEL);
+
+	if (!p)
+		return -ENOMEM;
+
+	p->dev.match_id = PS3_MATCH_ID_GPU;
+	p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK;
+	p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
+
+	result = ps3_system_bus_device_register(&p->dev);
+
+	if (result) {
+		pr_debug("%s:%d ps3_system_bus_device_register failed\n",
+			__func__, __LINE__);
+		goto fail_device_register;
+	}
+
+	pr_debug(" <- %s:%d\n", __func__, __LINE__);
+	return 0;
+
+fail_device_register:
+	kfree(p);
+	pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
+	return result;
+}
+
 /**
  * ps3_setup_dynamic_device - Setup a dynamic device from the repository
  */
@@ -627,13 +662,13 @@
 		if (rem)
 			break;
 	}
-	pr_warning("%s:%u: device %lu:%lu not found\n", __func__, __LINE__,
+	pr_warning("%s:%u: device %llu:%llu not found\n", __func__, __LINE__,
 		   bus_id, dev_id);
 	return;
 
 found:
 	if (retries)
-		pr_debug("%s:%u: device %lu:%lu found after %u retries\n",
+		pr_debug("%s:%u: device %llu:%llu found after %u retries\n",
 			 __func__, __LINE__, bus_id, dev_id, retries);
 
 	ps3_setup_dynamic_device(&repo);
@@ -680,14 +715,14 @@
 	res = lv1_storage_get_async_status(PS3_NOTIFICATION_DEV_ID, &tag,
 					   &status);
 	if (tag != dev->tag)
-		pr_err("%s:%u: tag mismatch, got %lx, expected %lx\n",
+		pr_err("%s:%u: tag mismatch, got %llx, expected %llx\n",
 		       __func__, __LINE__, tag, dev->tag);
 
 	if (res) {
-		pr_err("%s:%u: res %d status 0x%lx\n", __func__, __LINE__, res,
+		pr_err("%s:%u: res %d status 0x%llx\n", __func__, __LINE__, res,
 		       status);
 	} else {
-		pr_debug("%s:%u: completed, status 0x%lx\n", __func__,
+		pr_debug("%s:%u: completed, status 0x%llx\n", __func__,
 			 __LINE__, status);
 		dev->lv1_status = status;
 		complete(&dev->done);
@@ -726,7 +761,7 @@
 	}
 
 	if (dev->lv1_status) {
-		pr_err("%s:%u: %s not completed, status 0x%lx\n", __func__,
+		pr_err("%s:%u: %s not completed, status 0x%llx\n", __func__,
 		       __LINE__, op, dev->lv1_status);
 		return -EIO;
 	}
@@ -815,16 +850,16 @@
 		if (res)
 			break;
 
-		pr_debug("%s:%u: notify event type 0x%lx bus id %lu dev id %lu"
-			 " type %lu port %lu\n", __func__, __LINE__,
+		pr_debug("%s:%u: notify event type 0x%llx bus id %llu dev id %llu"
+			 " type %llu port %llu\n", __func__, __LINE__,
 			 notify_event->event_type, notify_event->bus_id,
 			 notify_event->dev_id, notify_event->dev_type,
 			 notify_event->dev_port);
 
 		if (notify_event->event_type != notify_region_probe ||
 		    notify_event->bus_id != dev.sbd.bus_id) {
-			pr_warning("%s:%u: bad notify_event: event %lu, "
-				   "dev_id %lu, dev_type %lu\n",
+			pr_warning("%s:%u: bad notify_event: event %llu, "
+				   "dev_id %llu, dev_type %llu\n",
 				   __func__, __LINE__, notify_event->event_type,
 				   notify_event->dev_id,
 				   notify_event->dev_type);
@@ -946,6 +981,8 @@
 
 	ps3_register_lpm_devices();
 
+	ps3_register_ramdisk_device();
+
 	pr_debug(" <- %s:%d\n", __func__, __LINE__);
 	return 0;
 }
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 6eb1d4d..1e8a1e3 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -75,7 +75,7 @@
 
 	if (result) {
 		/* all entries bolted !*/
-		pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%lx r=%lx\n",
+		pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
 			__func__, result, va, pa, hpte_group, hpte_v, hpte_r);
 		BUG();
 	}
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index e59634f..8ec5ccf 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -60,6 +60,8 @@
  * gives a usable range of plug values of  {NUM_ISA_INTERRUPTS..63}.  Note
  * that there is no constraint on how many in this set an individual thread
  * can acquire.
+ *
+ * The mask is declared as unsigned long so we can use set/clear_bit on it.
  */
 
 #define PS3_BMP_MINALIGN 64
@@ -68,7 +70,7 @@
 	struct {
 		u64 status;
 		u64 unused_1[3];
-		u64 mask;
+		unsigned long mask;
 		u64 unused_2[3];
 	};
 	u64 ipi_debug_brk_mask;
@@ -102,7 +104,7 @@
 	struct ps3_private *pd = get_irq_chip_data(virq);
 	unsigned long flags;
 
-	pr_debug("%s:%d: thread_id %lu, virq %d\n", __func__, __LINE__,
+	pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
 		pd->thread_id, virq);
 
 	local_irq_save(flags);
@@ -123,7 +125,7 @@
 	struct ps3_private *pd = get_irq_chip_data(virq);
 	unsigned long flags;
 
-	pr_debug("%s:%d: thread_id %lu, virq %d\n", __func__, __LINE__,
+	pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
 		pd->thread_id, virq);
 
 	local_irq_save(flags);
@@ -221,7 +223,7 @@
 {
 	const struct ps3_private *pd = get_irq_chip_data(virq);
 
-	pr_debug("%s:%d: ppe_id %lu, thread_id %lu, virq %u\n", __func__,
+	pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
 		__LINE__, pd->ppe_id, pd->thread_id, virq);
 
 	set_irq_chip_data(virq, NULL);
@@ -291,7 +293,7 @@
 	int result;
 	const struct ps3_private *pd = get_irq_chip_data(virq);
 
-	pr_debug("%s:%d: ppe_id %lu, thread_id %lu, virq %u\n", __func__,
+	pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
 		__LINE__, pd->ppe_id, pd->thread_id, virq);
 
 	ps3_chip_mask(virq);
@@ -322,7 +324,7 @@
 int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq)
 {
 	int result;
-	unsigned long outlet;
+	u64 outlet;
 
 	result = lv1_construct_event_receive_port(&outlet);
 
@@ -468,7 +470,7 @@
 	unsigned int *virq)
 {
 	int result;
-	unsigned long outlet;
+	u64 outlet;
 
 	result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
 
@@ -525,7 +527,7 @@
 	unsigned int *virq)
 {
 	int result;
-	unsigned long outlet;
+	u64 outlet;
 	u64 lpar_addr;
 
 	BUG_ON(!is_kernel_addr((u64)virt_addr_bmp));
@@ -581,7 +583,7 @@
 	unsigned int class, unsigned int *virq)
 {
 	int result;
-	unsigned long outlet;
+	u64 outlet;
 
 	BUG_ON(class > 2);
 
@@ -691,7 +693,7 @@
 
 	pd->bmp.ipi_debug_brk_mask = 0x8000000000000000UL >> virq;
 
-	pr_debug("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__,
+	pr_debug("%s:%d: cpu %u, virq %u, mask %llxh\n", __func__, __LINE__,
 		cpu, virq, pd->bmp.ipi_debug_brk_mask);
 }
 
@@ -710,7 +712,7 @@
 	plug &= 0x3f;
 
 	if (unlikely(plug == NO_IRQ)) {
-		pr_debug("%s:%d: no plug found: thread_id %lu\n", __func__,
+		pr_debug("%s:%d: no plug found: thread_id %llu\n", __func__,
 			__LINE__, pd->thread_id);
 		dump_bmp(&per_cpu(ps3_private, 0));
 		dump_bmp(&per_cpu(ps3_private, 1));
@@ -745,7 +747,7 @@
 		pd->thread_id = get_hard_smp_processor_id(cpu);
 		spin_lock_init(&pd->bmp.lock);
 
-		pr_debug("%s:%d: ppe_id %lu, thread_id %lu, bmp %lxh\n",
+		pr_debug("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n",
 			__func__, __LINE__, pd->ppe_id, pd->thread_id,
 			ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
 
@@ -770,6 +772,6 @@
 	lv1_get_logical_ppe_id(&ppe_id);
 	result = lv1_configure_irq_state_bitmap(ppe_id, thread_id, 0);
 
-	DBG("%s:%d: lv1_configure_irq_state_bitmap (%lu:%lu/%d) %s\n", __func__,
+	DBG("%s:%d: lv1_configure_irq_state_bitmap (%llu:%llu/%d) %s\n", __func__,
 		__LINE__, ppe_id, thread_id, cpu, ps3_result(result));
 }
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index a4d49dd..67de6bf3 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -79,8 +79,8 @@
  */
 
 struct mem_region {
-	unsigned long base;
-	unsigned long size;
+	u64 base;
+	u64 size;
 	unsigned long offset;
 };
 
@@ -103,9 +103,9 @@
  */
 
 struct map {
-	unsigned long total;
-	unsigned long vas_id;
-	unsigned long htab_size;
+	u64 total;
+	u64 vas_id;
+	u64 htab_size;
 	struct mem_region rm;
 	struct mem_region r1;
 };
@@ -114,13 +114,13 @@
 static void __maybe_unused _debug_dump_map(const struct map *m,
 	const char *func, int line)
 {
-	DBG("%s:%d: map.total     = %lxh\n", func, line, m->total);
-	DBG("%s:%d: map.rm.size   = %lxh\n", func, line, m->rm.size);
-	DBG("%s:%d: map.vas_id    = %lu\n", func, line, m->vas_id);
-	DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size);
-	DBG("%s:%d: map.r1.base   = %lxh\n", func, line, m->r1.base);
+	DBG("%s:%d: map.total     = %llxh\n", func, line, m->total);
+	DBG("%s:%d: map.rm.size   = %llxh\n", func, line, m->rm.size);
+	DBG("%s:%d: map.vas_id    = %llu\n", func, line, m->vas_id);
+	DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
+	DBG("%s:%d: map.r1.base   = %llxh\n", func, line, m->r1.base);
 	DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
-	DBG("%s:%d: map.r1.size   = %lxh\n", func, line, m->r1.size);
+	DBG("%s:%d: map.r1.size   = %llxh\n", func, line, m->r1.size);
 }
 
 static struct map map;
@@ -146,11 +146,11 @@
 void __init ps3_mm_vas_create(unsigned long* htab_size)
 {
 	int result;
-	unsigned long start_address;
-	unsigned long size;
-	unsigned long access_right;
-	unsigned long max_page_size;
-	unsigned long flags;
+	u64 start_address;
+	u64 size;
+	u64 access_right;
+	u64 max_page_size;
+	u64 flags;
 
 	result = lv1_query_logical_partition_address_region_info(0,
 		&start_address, &size, &access_right, &max_page_size,
@@ -164,7 +164,7 @@
 	}
 
 	if (max_page_size < PAGE_SHIFT_16M) {
-		DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__,
+		DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
 			max_page_size);
 		goto fail;
 	}
@@ -208,7 +208,7 @@
 {
 	int result;
 
-	DBG("%s:%d: map.vas_id    = %lu\n", __func__, __LINE__, map.vas_id);
+	DBG("%s:%d: map.vas_id    = %llu\n", __func__, __LINE__, map.vas_id);
 
 	if (map.vas_id) {
 		result = lv1_select_virtual_address_space(0);
@@ -235,15 +235,14 @@
 static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
 {
 	int result;
-	unsigned long muid;
+	u64 muid;
 
 	r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
 
 	DBG("%s:%d requested  %lxh\n", __func__, __LINE__, size);
-	DBG("%s:%d actual     %lxh\n", __func__, __LINE__, r->size);
-	DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__,
-		(unsigned long)(size - r->size),
-		(size - r->size) / 1024 / 1024);
+	DBG("%s:%d actual     %llxh\n", __func__, __LINE__, r->size);
+	DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
+		size - r->size, (size - r->size) / 1024 / 1024);
 
 	if (r->size == 0) {
 		DBG("%s:%d: size == 0\n", __func__, __LINE__);
@@ -277,7 +276,7 @@
 {
 	int result;
 
-	DBG("%s:%d: r->base = %lxh\n", __func__, __LINE__, r->base);
+	DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
 	if (r->base) {
 		result = lv1_release_memory(r->base);
 		BUG_ON(result);
@@ -355,7 +354,7 @@
 static void  __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
 	const char *func, int line)
 {
-	DBG("%s:%d: dev        %lu:%lu\n", func, line, r->dev->bus_id,
+	DBG("%s:%d: dev        %llu:%llu\n", func, line, r->dev->bus_id,
 		r->dev->dev_id);
 	DBG("%s:%d: page_size  %u\n", func, line, r->page_size);
 	DBG("%s:%d: bus_addr   %lxh\n", func, line, r->bus_addr);
@@ -390,7 +389,7 @@
 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
 	int line)
 {
-	DBG("%s:%d: r.dev        %lu:%lu\n", func, line,
+	DBG("%s:%d: r.dev        %llu:%llu\n", func, line,
 		c->region->dev->bus_id, c->region->dev->dev_id);
 	DBG("%s:%d: r.bus_addr   %lxh\n", func, line, c->region->bus_addr);
 	DBG("%s:%d: r.page_size  %u\n", func, line, c->region->page_size);
@@ -596,7 +595,7 @@
 
 	/* build ioptes for the area */
 	pages = len >> r->page_size;
-	DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#lx\n", __func__,
+	DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
 	    r->page_size, r->len, pages, iopte_flag);
 	for (iopage = 0; iopage < pages; iopage++) {
 		offset = (1 << r->page_size) * iopage;
@@ -648,13 +647,14 @@
 static int dma_sb_region_create(struct ps3_dma_region *r)
 {
 	int result;
+	u64 bus_addr;
 
 	DBG(" -> %s:%d:\n", __func__, __LINE__);
 
 	BUG_ON(!r);
 
 	if (!r->dev->bus_id) {
-		pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__,
+		pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
 			r->dev->bus_id, r->dev->dev_id);
 		return 0;
 	}
@@ -671,7 +671,8 @@
 
 	result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
 		roundup_pow_of_two(r->len), r->page_size, r->region_type,
-		&r->bus_addr);
+		&bus_addr);
+	r->bus_addr = bus_addr;
 
 	if (result) {
 		DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
@@ -685,6 +686,7 @@
 static int dma_ioc0_region_create(struct ps3_dma_region *r)
 {
 	int result;
+	u64 bus_addr;
 
 	INIT_LIST_HEAD(&r->chunk_list.head);
 	spin_lock_init(&r->chunk_list.lock);
@@ -692,7 +694,8 @@
 	result = lv1_allocate_io_segment(0,
 					 r->len,
 					 r->page_size,
-					 &r->bus_addr);
+					 &bus_addr);
+	r->bus_addr = bus_addr;
 	if (result) {
 		DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
@@ -720,7 +723,7 @@
 	BUG_ON(!r);
 
 	if (!r->dev->bus_id) {
-		pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__,
+		pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
 			r->dev->bus_id, r->dev->dev_id);
 		return 0;
 	}
@@ -777,7 +780,7 @@
  */
 
 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
-	   unsigned long len, unsigned long *bus_addr,
+	   unsigned long len, dma_addr_t *bus_addr,
 	   u64 iopte_flag)
 {
 	int result;
@@ -800,7 +803,7 @@
 		DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
 			lpar_addr);
 		DBG("%s:%d len       %lxh\n", __func__, __LINE__, len);
-		DBG("%s:%d bus_addr  %lxh (%lxh)\n", __func__, __LINE__,
+		DBG("%s:%d bus_addr  %llxh (%lxh)\n", __func__, __LINE__,
 		*bus_addr, len);
 	}
 
@@ -832,7 +835,7 @@
 }
 
 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
-	     unsigned long len, unsigned long *bus_addr,
+	     unsigned long len, dma_addr_t *bus_addr,
 	     u64 iopte_flag)
 {
 	int result;
@@ -872,7 +875,7 @@
 		return result;
 	}
 	*bus_addr = c->bus_addr + phys_addr - aligned_phys;
-	DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#lx\n", __func__,
+	DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
 	    virt_addr, phys_addr, aligned_phys, *bus_addr);
 	c->usage_count = 1;
 
@@ -889,7 +892,7 @@
  * This is the common dma unmap routine.
  */
 
-static int dma_sb_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
+static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
 	unsigned long len)
 {
 	unsigned long flags;
@@ -903,7 +906,7 @@
 			1 << r->page_size);
 		unsigned long aligned_len = _ALIGN_UP(len + bus_addr
 			- aligned_bus, 1 << r->page_size);
-		DBG("%s:%d: not found: bus_addr %lxh\n",
+		DBG("%s:%d: not found: bus_addr %llxh\n",
 			__func__, __LINE__, bus_addr);
 		DBG("%s:%d: not found: len %lxh\n",
 			__func__, __LINE__, len);
@@ -926,12 +929,12 @@
 }
 
 static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
-			unsigned long bus_addr, unsigned long len)
+			dma_addr_t bus_addr, unsigned long len)
 {
 	unsigned long flags;
 	struct dma_chunk *c;
 
-	DBG("%s: start a=%#lx l=%#lx\n", __func__, bus_addr, len);
+	DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
 	spin_lock_irqsave(&r->chunk_list.lock, flags);
 	c = dma_find_chunk(r, bus_addr, len);
 
@@ -941,7 +944,7 @@
 		unsigned long aligned_len = _ALIGN_UP(len + bus_addr
 						      - aligned_bus,
 						      1 << r->page_size);
-		DBG("%s:%d: not found: bus_addr %lxh\n",
+		DBG("%s:%d: not found: bus_addr %llxh\n",
 		    __func__, __LINE__, bus_addr);
 		DBG("%s:%d: not found: len %lxh\n",
 		    __func__, __LINE__, len);
@@ -975,7 +978,8 @@
 static int dma_sb_region_create_linear(struct ps3_dma_region *r)
 {
 	int result;
-	unsigned long virt_addr, len, tmp;
+	unsigned long virt_addr, len;
+	dma_addr_t tmp;
 
 	if (r->len > 16*1024*1024) {	/* FIXME: need proper fix */
 		/* force 16M dma pages for linear mapping */
@@ -1027,7 +1031,8 @@
 static int dma_sb_region_free_linear(struct ps3_dma_region *r)
 {
 	int result;
-	unsigned long bus_addr, len, lpar_addr;
+	dma_addr_t bus_addr;
+	unsigned long len, lpar_addr;
 
 	if (r->offset < map.rm.size) {
 		/* Unmap (part of) 1st RAM chunk */
@@ -1072,7 +1077,7 @@
  */
 
 static int dma_sb_map_area_linear(struct ps3_dma_region *r,
-	unsigned long virt_addr, unsigned long len, unsigned long *bus_addr,
+	unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
 	u64 iopte_flag)
 {
 	unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
@@ -1091,7 +1096,7 @@
  */
 
 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
-	unsigned long bus_addr, unsigned long len)
+	dma_addr_t bus_addr, unsigned long len)
 {
 	return 0;
 };
@@ -1169,13 +1174,13 @@
 EXPORT_SYMBOL(ps3_dma_region_free);
 
 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
-	unsigned long len, unsigned long *bus_addr,
+	unsigned long len, dma_addr_t *bus_addr,
 	u64 iopte_flag)
 {
 	return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
 }
 
-int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
+int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
 	unsigned long len)
 {
 	return r->region_ops->unmap(r, bus_addr, len);
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index 1d20178..e1c83c2 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -306,7 +306,7 @@
 {
 	pr_debug("%s:%d: p.boot_flag:       %u\n", func, line, p->boot_flag);
 	pr_debug("%s:%d: p.num_params:      %u\n", func, line, p->num_params);
-	pr_debug("%s:%d: p.rtc_diff         %ld\n", func, line, p->rtc_diff);
+	pr_debug("%s:%d: p.rtc_diff         %lld\n", func, line, p->rtc_diff);
 	pr_debug("%s:%d: p.av_multi_out     %u\n", func, line, p->av_multi_out);
 	pr_debug("%s:%d: p.ctrl_button:     %u\n", func, line, p->ctrl_button);
 	pr_debug("%s:%d: p.static_ip_addr:  %u.%u.%u.%u\n", func, line,
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index 22063ad..5e304c2 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -44,7 +44,7 @@
 		s[i] = (in[i] <= 126 && in[i] >= 32) ? in[i] : '.';
 	s[i] = 0;
 
-	pr_debug("%s:%d: %s%016lx : %s\n", func, line, hdr, n, s);
+	pr_debug("%s:%d: %s%016llx : %s\n", func, line, hdr, n, s);
 #endif
 }
 
@@ -70,8 +70,8 @@
 	_dump_field("n2: ", n2, func, line);
 	_dump_field("n3: ", n3, func, line);
 	_dump_field("n4: ", n4, func, line);
-	pr_debug("%s:%d: v1: %016lx\n", func, line, v1);
-	pr_debug("%s:%d: v2: %016lx\n", func, line, v2);
+	pr_debug("%s:%d: v1: %016llx\n", func, line, v1);
+	pr_debug("%s:%d: v2: %016llx\n", func, line, v2);
 }
 
 /**
@@ -149,10 +149,10 @@
 		*_v2 = v2;
 
 	if (v1 && !_v1)
-		pr_debug("%s:%d: warning: discarding non-zero v1: %016lx\n",
+		pr_debug("%s:%d: warning: discarding non-zero v1: %016llx\n",
 			__func__, __LINE__, v1);
 	if (v2 && !_v2)
-		pr_debug("%s:%d: warning: discarding non-zero v2: %016lx\n",
+		pr_debug("%s:%d: warning: discarding non-zero v2: %016llx\n",
 			__func__, __LINE__, v2);
 
 	return 0;
@@ -327,7 +327,7 @@
 		return result;
 	}
 
-	pr_debug("%s:%d: bus_type %u, bus_index %u, bus_id %lu, num_dev %u\n",
+	pr_debug("%s:%d: bus_type %u, bus_index %u, bus_id %llu, num_dev %u\n",
 		__func__, __LINE__, tmp.bus_type, tmp.bus_index, tmp.bus_id,
 		num_dev);
 
@@ -353,7 +353,7 @@
 		return result;
 	}
 
-	pr_debug("%s:%d: found: dev_type %u, dev_index %u, dev_id %lu\n",
+	pr_debug("%s:%d: found: dev_type %u, dev_index %u, dev_id %llu\n",
 		__func__, __LINE__, tmp.dev_type, tmp.dev_index, tmp.dev_id);
 
 	*repo = tmp;
@@ -367,7 +367,7 @@
 	struct ps3_repository_device tmp;
 	unsigned int num_dev;
 
-	pr_debug(" -> %s:%u: find device by id %lu:%lu\n", __func__, __LINE__,
+	pr_debug(" -> %s:%u: find device by id %llu:%llu\n", __func__, __LINE__,
 		 bus_id, dev_id);
 
 	for (tmp.bus_index = 0; tmp.bus_index < 10; tmp.bus_index++) {
@@ -382,7 +382,7 @@
 		if (tmp.bus_id == bus_id)
 			goto found_bus;
 
-		pr_debug("%s:%u: skip, bus_id %lu\n", __func__, __LINE__,
+		pr_debug("%s:%u: skip, bus_id %llu\n", __func__, __LINE__,
 			 tmp.bus_id);
 	}
 	pr_debug(" <- %s:%u: bus not found\n", __func__, __LINE__);
@@ -416,7 +416,7 @@
 		if (tmp.dev_id == dev_id)
 			goto found_dev;
 
-		pr_debug("%s:%u: skip, dev_id %lu\n", __func__, __LINE__,
+		pr_debug("%s:%u: skip, dev_id %llu\n", __func__, __LINE__,
 			 tmp.dev_id);
 	}
 	pr_debug(" <- %s:%u: dev not found\n", __func__, __LINE__);
@@ -430,7 +430,7 @@
 		return result;
 	}
 
-	pr_debug(" <- %s:%u: found: type (%u:%u) index (%u:%u) id (%lu:%lu)\n",
+	pr_debug(" <- %s:%u: found: type (%u:%u) index (%u:%u) id (%llu:%llu)\n",
 		 __func__, __LINE__, tmp.bus_type, tmp.dev_type, tmp.bus_index,
 		 tmp.dev_index, tmp.bus_id, tmp.dev_id);
 	*repo = tmp;
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 35f3e85..3331ccb 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -186,7 +186,7 @@
 #define prealloc_ps3flash_bounce_buffer()	do { } while (0)
 #endif
 
-static int ps3_set_dabr(u64 dabr)
+static int ps3_set_dabr(unsigned long dabr)
 {
 	enum {DABR_USER = 1, DABR_KERNEL = 2,};
 
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index ccae3d4..b3c6a99 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -149,7 +149,7 @@
 
 static unsigned long get_vas_id(void)
 {
-	unsigned long id;
+	u64 id;
 
 	lv1_get_logical_ppe_id(&id);
 	lv1_get_virtual_address_space_id_of_ppe(id, &id);
@@ -160,14 +160,18 @@
 static int __init construct_spu(struct spu *spu)
 {
 	int result;
-	unsigned long unused;
+	u64 unused;
+	u64 problem_phys;
+	u64 local_store_phys;
 
 	result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT,
 		PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL,
-		&spu_pdata(spu)->priv2_addr, &spu->problem_phys,
-		&spu->local_store_phys, &unused,
+		&spu_pdata(spu)->priv2_addr, &problem_phys,
+		&local_store_phys, &unused,
 		&spu_pdata(spu)->shadow_addr,
 		&spu_pdata(spu)->spe_id);
+	spu->problem_phys = problem_phys;
+	spu->local_store_phys = local_store_phys;
 
 	if (result) {
 		pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n",
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index ee0d229..58311a8 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -182,7 +182,7 @@
 	case PS3_MATCH_ID_SYSTEM_MANAGER:
 		pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
 			__LINE__, dev->match_id);
-		pr_debug("%s:%d: bus_id: %lu\n", __func__, __LINE__,
+		pr_debug("%s:%d: bus_id: %llu\n", __func__, __LINE__,
 			dev->bus_id);
 		BUG();
 		return -EINVAL;
@@ -220,7 +220,7 @@
 	case PS3_MATCH_ID_SYSTEM_MANAGER:
 		pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
 			__LINE__, dev->match_id);
-		pr_debug("%s:%d: bus_id: %lu\n", __func__, __LINE__,
+		pr_debug("%s:%d: bus_id: %llu\n", __func__, __LINE__,
 			dev->bus_id);
 		BUG();
 		return -EINVAL;
@@ -240,7 +240,7 @@
 static void _dump_mmio_region(const struct ps3_mmio_region* r,
 	const char* func, int line)
 {
-	pr_debug("%s:%d: dev       %lu:%lu\n", func, line, r->dev->bus_id,
+	pr_debug("%s:%d: dev       %llu:%llu\n", func, line, r->dev->bus_id,
 		r->dev->dev_id);
 	pr_debug("%s:%d: bus_addr  %lxh\n", func, line, r->bus_addr);
 	pr_debug("%s:%d: len       %lxh\n", func, line, r->len);
@@ -250,9 +250,11 @@
 static int ps3_sb_mmio_region_create(struct ps3_mmio_region *r)
 {
 	int result;
+	u64 lpar_addr;
 
 	result = lv1_map_device_mmio_region(r->dev->bus_id, r->dev->dev_id,
-		r->bus_addr, r->len, r->page_size, &r->lpar_addr);
+		r->bus_addr, r->len, r->page_size, &lpar_addr);
+	r->lpar_addr = lpar_addr;
 
 	if (result) {
 		pr_debug("%s:%d: lv1_map_device_mmio_region failed: %s\n",
@@ -568,7 +570,7 @@
 {
 	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 	int result;
-	unsigned long bus_addr;
+	dma_addr_t bus_addr;
 	void *ptr = page_address(page) + offset;
 
 	result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
@@ -590,7 +592,7 @@
 {
 	struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
 	int result;
-	unsigned long bus_addr;
+	dma_addr_t bus_addr;
 	u64 iopte_flag;
 	void *ptr = page_address(page) + offset;
 
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index c90817a..3ee01b4 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -127,10 +127,10 @@
 		}
 
 		if (rc && printk_ratelimit()) {
-			printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
-			printk("\tindex   = 0x%lx\n", (u64)tbl->it_index);
-			printk("\ttcenum  = 0x%lx\n", (u64)tcenum);
-			printk("\ttce val = 0x%lx\n", tce );
+			printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
+			printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
+			printk("\ttcenum  = 0x%llx\n", (u64)tcenum);
+			printk("\ttce val = 0x%llx\n", tce );
 			show_stack(current, (unsigned long *)__get_SP());
 		}
 
@@ -210,10 +210,10 @@
 	}
 
 	if (rc && printk_ratelimit()) {
-		printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
-		printk("\tindex   = 0x%lx\n", (u64)tbl->it_index);
-		printk("\tnpages  = 0x%lx\n", (u64)npages);
-		printk("\ttce[0] val = 0x%lx\n", tcep[0]);
+		printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
+		printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
+		printk("\tnpages  = 0x%llx\n", (u64)npages);
+		printk("\ttce[0] val = 0x%llx\n", tcep[0]);
 		show_stack(current, (unsigned long *)__get_SP());
 	}
 	return ret;
@@ -227,9 +227,9 @@
 		rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
 
 		if (rc && printk_ratelimit()) {
-			printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
-			printk("\tindex   = 0x%lx\n", (u64)tbl->it_index);
-			printk("\ttcenum  = 0x%lx\n", (u64)tcenum);
+			printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
+			printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
+			printk("\ttcenum  = 0x%llx\n", (u64)tcenum);
 			show_stack(current, (unsigned long *)__get_SP());
 		}
 
@@ -246,9 +246,9 @@
 
 	if (rc && printk_ratelimit()) {
 		printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
-		printk("\trc      = %ld\n", rc);
-		printk("\tindex   = 0x%lx\n", (u64)tbl->it_index);
-		printk("\tnpages  = 0x%lx\n", (u64)npages);
+		printk("\trc      = %lld\n", rc);
+		printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
+		printk("\tnpages  = 0x%llx\n", (u64)npages);
 		show_stack(current, (unsigned long *)__get_SP());
 	}
 }
@@ -261,10 +261,9 @@
 	rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
 
 	if (rc && printk_ratelimit()) {
-		printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%ld\n",
-			rc);
-		printk("\tindex   = 0x%lx\n", (u64)tbl->it_index);
-		printk("\ttcenum  = 0x%lx\n", (u64)tcenum);
+		printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
+		printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
+		printk("\ttcenum  = 0x%llx\n", (u64)tcenum);
 		show_stack(current, (unsigned long *)__get_SP());
 	}
 
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 5afce11..b33b28a 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -17,6 +17,7 @@
 obj-$(CONFIG_FSL_LBC)		+= fsl_lbc.o
 obj-$(CONFIG_FSL_GTM)		+= fsl_gtm.o
 obj-$(CONFIG_MPC8xxx_GPIO)	+= mpc8xxx_gpio.o
+obj-$(CONFIG_SIMPLE_GPIO)	+= simple_gpio.o
 obj-$(CONFIG_RAPIDIO)		+= fsl_rio.o
 obj-$(CONFIG_TSI108_BRIDGE)	+= tsi108_pci.o tsi108_dev.o
 obj-$(CONFIG_QUICC_ENGINE)	+= qe_lib/
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index d5f9ae0..9817f63 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -28,65 +28,107 @@
 #include <sysdev/fsl_pci.h>
 
 #if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx)
+static int __init setup_one_atmu(struct ccsr_pci __iomem *pci,
+	unsigned int index, const struct resource *res,
+	resource_size_t offset)
+{
+	resource_size_t pci_addr = res->start - offset;
+	resource_size_t phys_addr = res->start;
+	resource_size_t size = res->end - res->start + 1;
+	u32 flags = 0x80044000; /* enable & mem R/W */
+	unsigned int i;
+
+	pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
+		(u64)res->start, (u64)size);
+
+	if (res->flags & IORESOURCE_PREFETCH)
+		flags |= 0x10000000; /* enable relaxed ordering */
+
+	for (i = 0; size > 0; i++) {
+		unsigned int bits = min(__ilog2(size),
+					__ffs(pci_addr | phys_addr));
+
+		if (index + i >= 5)
+			return -1;
+
+		out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
+		out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
+		out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
+		out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
+
+		pci_addr += (resource_size_t)1U << bits;
+		phys_addr += (resource_size_t)1U << bits;
+		size -= (resource_size_t)1U << bits;
+	}
+
+	return i;
+}
+
 /* atmu setup for fsl pci/pcie controller */
-void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc)
+static void __init setup_pci_atmu(struct pci_controller *hose,
+				  struct resource *rsrc)
 {
 	struct ccsr_pci __iomem *pci;
-	int i;
+	int i, j, n;
 
 	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
 		    (u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1);
 	pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1);
+	if (!pci) {
+	    dev_err(hose->parent, "Unable to map ATMU registers\n");
+	    return;
+	}
 
-	/* Disable all windows (except powar0 since its ignored) */
+	/* Disable all windows (except powar0 since it's ignored) */
 	for(i = 1; i < 5; i++)
 		out_be32(&pci->pow[i].powar, 0);
 	for(i = 0; i < 3; i++)
 		out_be32(&pci->piw[i].piwar, 0);
 
 	/* Setup outbound MEM window */
-	for(i = 0; i < 3; i++)
-		if (hose->mem_resources[i].flags & IORESOURCE_MEM){
-			resource_size_t pci_addr_start =
-				 hose->mem_resources[i].start -
-				 hose->pci_mem_offset;
-			pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
-				(u64)hose->mem_resources[i].start,
-				(u64)hose->mem_resources[i].end
-				  - (u64)hose->mem_resources[i].start + 1);
-			out_be32(&pci->pow[i+1].potar, (pci_addr_start >> 12));
-			out_be32(&pci->pow[i+1].potear, 0);
-			out_be32(&pci->pow[i+1].powbar,
-				(hose->mem_resources[i].start >> 12));
-			/* Enable, Mem R/W */
-			out_be32(&pci->pow[i+1].powar, 0x80044000
-				| (__ilog2(hose->mem_resources[i].end
-				- hose->mem_resources[i].start + 1) - 1));
-		}
+	for(i = 0, j = 1; i < 3; i++) {
+		if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
+			continue;
+
+		n = setup_one_atmu(pci, j, &hose->mem_resources[i],
+				   hose->pci_mem_offset);
+
+		if (n < 0 || j >= 5) {
+			pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
+			hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
+		} else
+			j += n;
+	}
 
 	/* Setup outbound IO window */
-	if (hose->io_resource.flags & IORESOURCE_IO){
-		pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
-			 "phy base 0x%016llx.\n",
-			(u64)hose->io_resource.start,
-			(u64)hose->io_resource.end - (u64)hose->io_resource.start + 1,
-			(u64)hose->io_base_phys);
-		out_be32(&pci->pow[i+1].potar, (hose->io_resource.start >> 12));
-		out_be32(&pci->pow[i+1].potear, 0);
-		out_be32(&pci->pow[i+1].powbar, (hose->io_base_phys >> 12));
-		/* Enable, IO R/W */
-		out_be32(&pci->pow[i+1].powar, 0x80088000
-			| (__ilog2(hose->io_resource.end
-			- hose->io_resource.start + 1) - 1));
+	if (hose->io_resource.flags & IORESOURCE_IO) {
+		if (j >= 5) {
+			pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
+		} else {
+			pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
+				 "phy base 0x%016llx.\n",
+				(u64)hose->io_resource.start,
+				(u64)hose->io_resource.end - (u64)hose->io_resource.start + 1,
+				(u64)hose->io_base_phys);
+			out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
+			out_be32(&pci->pow[j].potear, 0);
+			out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
+			/* Enable, IO R/W */
+			out_be32(&pci->pow[j].powar, 0x80088000
+				| (__ilog2(hose->io_resource.end
+				- hose->io_resource.start + 1) - 1));
+		}
 	}
 
 	/* Setup 2G inbound Memory Window @ 1 */
 	out_be32(&pci->piw[2].pitar, 0x00000000);
 	out_be32(&pci->piw[2].piwbar,0x00000000);
 	out_be32(&pci->piw[2].piwar, PIWAR_2G);
+
+	iounmap(pci);
 }
 
-void __init setup_pci_cmd(struct pci_controller *hose)
+static void __init setup_pci_cmd(struct pci_controller *hose)
 {
 	u16 cmd;
 	int cap_x;
@@ -130,7 +172,7 @@
 	return ;
 }
 
-int __init fsl_pcie_check_link(struct pci_controller *hose)
+static int __init fsl_pcie_check_link(struct pci_controller *hose)
 {
 	u32 val;
 	early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 60f7f22..9c744e4 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -5,8 +5,13 @@
 #include <asm/mmu.h>
 
 extern phys_addr_t get_immrbase(void);
+#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
 extern u32 get_brgfreq(void);
 extern u32 get_baudrate(void);
+#else
+static inline u32 get_brgfreq(void) { return -1; }
+static inline u32 get_baudrate(void) { return -1; }
+#endif
 extern u32 fsl_get_sys_freq(void);
 
 struct spi_board_info;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 3e0d89d..a35297d 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -435,7 +435,7 @@
 		addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32);
 	}
 
-	printk(KERN_DEBUG "mpic:   - HT:%02x.%x %s MSI mapping found @ 0x%lx\n",
+	printk(KERN_DEBUG "mpic:   - HT:%02x.%x %s MSI mapping found @ 0x%llx\n",
 		PCI_SLOT(devfn), PCI_FUNC(devfn),
 		flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr);
 
diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig
index 76ffbc4..41ac3df 100644
--- a/arch/powerpc/sysdev/qe_lib/Kconfig
+++ b/arch/powerpc/sysdev/qe_lib/Kconfig
@@ -22,5 +22,6 @@
 
 config QE_USB
 	bool
+	default y if USB_GADGET_FSL_QE
 	help
-	  QE USB Host Controller support
+	  QE USB Controller support
diff --git a/arch/powerpc/sysdev/qe_lib/gpio.c b/arch/powerpc/sysdev/qe_lib/gpio.c
index 8e5a0bc..3485288 100644
--- a/arch/powerpc/sysdev/qe_lib/gpio.c
+++ b/arch/powerpc/sysdev/qe_lib/gpio.c
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
+#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
@@ -24,8 +25,14 @@
 	struct of_mm_gpio_chip mm_gc;
 	spinlock_t lock;
 
+	unsigned long pin_flags[QE_PIO_PINS];
+#define QE_PIN_REQUESTED 0
+
 	/* shadowed data register to clear/set bits safely */
 	u32 cpdata;
+
+	/* saved_regs used to restore dedicated functions */
+	struct qe_pio_regs saved_regs;
 };
 
 static inline struct qe_gpio_chip *
@@ -40,6 +47,12 @@
 	struct qe_pio_regs __iomem *regs = mm_gc->regs;
 
 	qe_gc->cpdata = in_be32(&regs->cpdata);
+	qe_gc->saved_regs.cpdata = qe_gc->cpdata;
+	qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
+	qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
+	qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
+	qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
+	qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
 }
 
 static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
@@ -103,6 +116,188 @@
 	return 0;
 }
 
+struct qe_pin {
+	/*
+	 * The qe_gpio_chip name is unfortunate, we should change that to
+	 * something like qe_pio_controller. Someday.
+	 */
+	struct qe_gpio_chip *controller;
+	int num;
+};
+
+/**
+ * qe_pin_request - Request a QE pin
+ * @np:		device node to get a pin from
+ * @index:	index of a pin in the device tree
+ * Context:	non-atomic
+ *
+ * This function return qe_pin so that you could use it with the rest of
+ * the QE Pin Multiplexing API.
+ */
+struct qe_pin *qe_pin_request(struct device_node *np, int index)
+{
+	struct qe_pin *qe_pin;
+	struct device_node *gc;
+	struct of_gpio_chip *of_gc = NULL;
+	struct of_mm_gpio_chip *mm_gc;
+	struct qe_gpio_chip *qe_gc;
+	int err;
+	int size;
+	const void *gpio_spec;
+	const u32 *gpio_cells;
+	unsigned long flags;
+
+	qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
+	if (!qe_pin) {
+		pr_debug("%s: can't allocate memory\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	err = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index,
+					  &gc, &gpio_spec);
+	if (err) {
+		pr_debug("%s: can't parse gpios property\n", __func__);
+		goto err0;
+	}
+
+	if (!of_device_is_compatible(gc, "fsl,mpc8323-qe-pario-bank")) {
+		pr_debug("%s: tried to get a non-qe pin\n", __func__);
+		err = -EINVAL;
+		goto err1;
+	}
+
+	of_gc = gc->data;
+	if (!of_gc) {
+		pr_debug("%s: gpio controller %s isn't registered\n",
+			 np->full_name, gc->full_name);
+		err = -ENODEV;
+		goto err1;
+	}
+
+	gpio_cells = of_get_property(gc, "#gpio-cells", &size);
+	if (!gpio_cells || size != sizeof(*gpio_cells) ||
+			*gpio_cells != of_gc->gpio_cells) {
+		pr_debug("%s: wrong #gpio-cells for %s\n",
+			 np->full_name, gc->full_name);
+		err = -EINVAL;
+		goto err1;
+	}
+
+	err = of_gc->xlate(of_gc, np, gpio_spec, NULL);
+	if (err < 0)
+		goto err1;
+
+	mm_gc = to_of_mm_gpio_chip(&of_gc->gc);
+	qe_gc = to_qe_gpio_chip(mm_gc);
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+
+	if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
+		qe_pin->controller = qe_gc;
+		qe_pin->num = err;
+		err = 0;
+	} else {
+		err = -EBUSY;
+	}
+
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+	if (!err)
+		return qe_pin;
+err1:
+	of_node_put(gc);
+err0:
+	kfree(qe_pin);
+	pr_debug("%s failed with status %d\n", __func__, err);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(qe_pin_request);
+
+/**
+ * qe_pin_free - Free a pin
+ * @qe_pin:	pointer to the qe_pin structure
+ * Context:	any
+ *
+ * This function frees the qe_pin structure and makes a pin available
+ * for further qe_pin_request() calls.
+ */
+void qe_pin_free(struct qe_pin *qe_pin)
+{
+	struct qe_gpio_chip *qe_gc = qe_pin->controller;
+	unsigned long flags;
+	const int pin = qe_pin->num;
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+	test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+	kfree(qe_pin);
+}
+EXPORT_SYMBOL(qe_pin_free);
+
+/**
+ * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
+ * @qe_pin:	pointer to the qe_pin structure
+ * Context:	any
+ *
+ * This function resets a pin to a dedicated peripheral function that
+ * has been set up by the firmware.
+ */
+void qe_pin_set_dedicated(struct qe_pin *qe_pin)
+{
+	struct qe_gpio_chip *qe_gc = qe_pin->controller;
+	struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+	struct qe_pio_regs *sregs = &qe_gc->saved_regs;
+	int pin = qe_pin->num;
+	u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
+	u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
+	bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+
+	if (second_reg) {
+		clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
+		clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
+	} else {
+		clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
+		clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
+	}
+
+	if (sregs->cpdata & mask1)
+		qe_gc->cpdata |= mask1;
+	else
+		qe_gc->cpdata &= ~mask1;
+
+	out_be32(&regs->cpdata, qe_gc->cpdata);
+	clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
+
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_dedicated);
+
+/**
+ * qe_pin_set_gpio - Set a pin to the GPIO mode
+ * @qe_pin:	pointer to the qe_pin structure
+ * Context:	any
+ *
+ * This function sets a pin to the GPIO mode.
+ */
+void qe_pin_set_gpio(struct qe_pin *qe_pin)
+{
+	struct qe_gpio_chip *qe_gc = qe_pin->controller;
+	struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qe_gc->lock, flags);
+
+	/* Let's make it input by default, GPIO API is able to change that. */
+	__par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
+
+	spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_gpio);
+
 static int __init qe_add_gpiochips(void)
 {
 	struct device_node *np;
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
new file mode 100644
index 0000000..43c4569
--- /dev/null
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -0,0 +1,155 @@
+/*
+ * Simple Memory-Mapped GPIOs
+ *
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <asm/prom.h>
+#include "simple_gpio.h"
+
+struct u8_gpio_chip {
+	struct of_mm_gpio_chip mm_gc;
+	spinlock_t lock;
+
+	/* shadowed data register to clear/set bits safely */
+	u8 data;
+};
+
+static struct u8_gpio_chip *to_u8_gpio_chip(struct of_mm_gpio_chip *mm_gc)
+{
+	return container_of(mm_gc, struct u8_gpio_chip, mm_gc);
+}
+
+static u8 u8_pin2mask(unsigned int pin)
+{
+	return 1 << (8 - 1 - pin);
+}
+
+static int u8_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+
+	return in_8(mm_gc->regs) & u8_pin2mask(gpio);
+}
+
+static void u8_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+	struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&u8_gc->lock, flags);
+
+	if (val)
+		u8_gc->data |= u8_pin2mask(gpio);
+	else
+		u8_gc->data &= ~u8_pin2mask(gpio);
+
+	out_8(mm_gc->regs, u8_gc->data);
+
+	spin_unlock_irqrestore(&u8_gc->lock, flags);
+}
+
+static int u8_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+	return 0;
+}
+
+static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	u8_gpio_set(gc, gpio, val);
+	return 0;
+}
+
+static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+	struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
+
+	u8_gc->data = in_8(mm_gc->regs);
+}
+
+static int __init u8_simple_gpiochip_add(struct device_node *np)
+{
+	int ret;
+	struct u8_gpio_chip *u8_gc;
+	struct of_mm_gpio_chip *mm_gc;
+	struct of_gpio_chip *of_gc;
+	struct gpio_chip *gc;
+
+	u8_gc = kzalloc(sizeof(*u8_gc), GFP_KERNEL);
+	if (!u8_gc)
+		return -ENOMEM;
+
+	spin_lock_init(&u8_gc->lock);
+
+	mm_gc = &u8_gc->mm_gc;
+	of_gc = &mm_gc->of_gc;
+	gc = &of_gc->gc;
+
+	mm_gc->save_regs = u8_gpio_save_regs;
+	of_gc->gpio_cells = 2;
+	gc->ngpio = 8;
+	gc->direction_input = u8_gpio_dir_in;
+	gc->direction_output = u8_gpio_dir_out;
+	gc->get = u8_gpio_get;
+	gc->set = u8_gpio_set;
+
+	ret = of_mm_gpiochip_add(np, mm_gc);
+	if (ret)
+		goto err;
+	return 0;
+err:
+	kfree(u8_gc);
+	return ret;
+}
+
+void __init simple_gpiochip_init(const char *compatible)
+{
+	struct device_node *np;
+
+	for_each_compatible_node(np, NULL, compatible) {
+		int ret;
+		struct resource r;
+
+		ret = of_address_to_resource(np, 0, &r);
+		if (ret)
+			goto err;
+
+		switch (resource_size(&r)) {
+		case 1:
+			ret = u8_simple_gpiochip_add(np);
+			if (ret)
+				goto err;
+			break;
+		default:
+			/*
+			 * Whenever you need support for GPIO bank width > 1,
+			 * please just turn u8_ code into huge macros, and
+			 * construct needed uX_ code with it.
+			 */
+			ret = -ENOSYS;
+			goto err;
+		}
+		continue;
+err:
+		pr_err("%s: registration failed, status %d\n",
+		       np->full_name, ret);
+	}
+}
diff --git a/arch/powerpc/sysdev/simple_gpio.h b/arch/powerpc/sysdev/simple_gpio.h
new file mode 100644
index 0000000..3a7b0c5
--- /dev/null
+++ b/arch/powerpc/sysdev/simple_gpio.h
@@ -0,0 +1,12 @@
+#ifndef __SYSDEV_SIMPLE_GPIO_H
+#define __SYSDEV_SIMPLE_GPIO_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_SIMPLE_GPIO
+extern void simple_gpiochip_init(const char *compatible);
+#else
+static inline void simple_gpiochip_init(const char *compatible) {}
+#endif /* CONFIG_SIMPLE_GPIO */
+
+#endif /* __SYSDEV_SIMPLE_GPIO_H */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 19577ae..6b0a353 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -77,6 +77,7 @@
 config S390
 	def_bool y
 	select USE_GENERIC_SMP_HELPERS if SMP
+	select HAVE_SYSCALL_WRAPPERS
 	select HAVE_FUNCTION_TRACER
 	select HAVE_OPROFILE
 	select HAVE_KPROBES
@@ -299,7 +300,7 @@
 	  This option enables the compiler options -mwarn-framesize and
 	  -mwarn-dynamicstack. If the compiler supports these options it
 	  will generate warnings for function which either use alloca or
-	  create a stack frame bigger then CONFIG_WARN_STACK_SIZE.
+	  create a stack frame bigger than CONFIG_WARN_STACK_SIZE.
 
 	  Say N if you are unsure.
 
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index f2af416..63a2341 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -13,4 +13,3 @@
 unifdef-y += debug.h
 unifdef-y += chpid.h
 unifdef-y += schid.h
-unifdef-y += swab.h
diff --git a/arch/s390/include/asm/byteorder.h b/arch/s390/include/asm/byteorder.h
index b95a2b2..a332e59 100644
--- a/arch/s390/include/asm/byteorder.h
+++ b/arch/s390/include/asm/byteorder.h
@@ -1,7 +1,6 @@
 #ifndef _S390_BYTEORDER_H
 #define _S390_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/big_endian.h>
 
 #endif /* _S390_BYTEORDER_H */
diff --git a/arch/s390/include/asm/chpid.h b/arch/s390/include/asm/chpid.h
index dfe3c7f..fc71d8a 100644
--- a/arch/s390/include/asm/chpid.h
+++ b/arch/s390/include/asm/chpid.h
@@ -9,7 +9,7 @@
 #define _ASM_S390_CHPID_H _ASM_S390_CHPID_H
 
 #include <linux/string.h>
-#include <asm/types.h>
+#include <linux/types.h>
 
 #define __MAX_CHPID 255
 
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index d38d0cf..807997f 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -8,6 +8,7 @@
 #ifndef _ASM_CHSC_H
 #define _ASM_CHSC_H
 
+#include <linux/types.h>
 #include <asm/chpid.h>
 #include <asm/schid.h>
 
diff --git a/arch/s390/include/asm/cmb.h b/arch/s390/include/asm/cmb.h
index 5019685..39ae032 100644
--- a/arch/s390/include/asm/cmb.h
+++ b/arch/s390/include/asm/cmb.h
@@ -1,5 +1,8 @@
 #ifndef S390_CMB_H
 #define S390_CMB_H
+
+#include <linux/types.h>
+
 /**
  * struct cmbdata - channel measurement block data for user space
  * @size: size of the stored data
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h
index 55b2b80..e2db6f1 100644
--- a/arch/s390/include/asm/dasd.h
+++ b/arch/s390/include/asm/dasd.h
@@ -14,6 +14,7 @@
 
 #ifndef DASD_H
 #define DASD_H
+#include <linux/types.h>
 #include <linux/ioctl.h>
 
 #define DASD_IOCTL_LETTER 'D'
@@ -78,6 +79,7 @@
 #define DASD_FEATURE_USEDIAG	     0x02
 #define DASD_FEATURE_INITIAL_ONLINE  0x04
 #define DASD_FEATURE_ERPLOG	     0x08
+#define DASD_FEATURE_FAILFAST	     0x10
 
 #define DASD_PARTN_BITS 2
 
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
index d74002f..e1f5465 100644
--- a/arch/s390/include/asm/kvm.h
+++ b/arch/s390/include/asm/kvm.h
@@ -13,7 +13,7 @@
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
  */
-#include <asm/types.h>
+#include <linux/types.h>
 
 /* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
 struct kvm_pic_state {
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 397d93f..8cc113f 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -68,11 +68,7 @@
 #endif /* __s390x__ */
 
 typedef struct {
-#if defined(__KERNEL__) || defined(__USE_ALL)
         int     val[2];
-#else                        /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
-        int     __val[2];
-#endif                       /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
 } __kernel_fsid_t;
 
 
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 5396f9f..8920025 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -272,12 +272,15 @@
 #define PSW_ASC_SECONDARY	0x0000800000000000UL
 #define PSW_ASC_HOME		0x0000C00000000000UL
 
-extern long psw_user32_bits;
-
 #endif /* __s390x__ */
 
+#ifdef __KERNEL__
 extern long psw_kernel_bits;
 extern long psw_user_bits;
+#ifdef CONFIG_64BIT
+extern long psw_user32_bits;
+#endif
+#endif
 
 /* This macro merges a NEW PSW mask specified by the user into
    the currently active PSW mask CURRENT, modifying only those
diff --git a/arch/s390/include/asm/qeth.h b/arch/s390/include/asm/qeth.h
index 930d378..06cbd1e 100644
--- a/arch/s390/include/asm/qeth.h
+++ b/arch/s390/include/asm/qeth.h
@@ -10,6 +10,7 @@
  */
 #ifndef __ASM_S390_QETH_IOCTL_H__
 #define __ASM_S390_QETH_IOCTL_H__
+#include <linux/types.h>
 #include <linux/ioctl.h>
 
 #define SIOC_QETH_ARP_SET_NO_ENTRIES    (SIOCDEVPRIVATE)
diff --git a/arch/s390/include/asm/schid.h b/arch/s390/include/asm/schid.h
index 825503c..3e4d401 100644
--- a/arch/s390/include/asm/schid.h
+++ b/arch/s390/include/asm/schid.h
@@ -1,6 +1,8 @@
 #ifndef ASM_SCHID_H
 #define ASM_SCHID_H
 
+#include <linux/types.h>
+
 struct subchannel_id {
 	__u32 cssid : 8;
 	__u32 : 4;
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h
index bd9321a..eb18dc1 100644
--- a/arch/s390/include/asm/swab.h
+++ b/arch/s390/include/asm/swab.h
@@ -9,7 +9,7 @@
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  */
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 #ifndef __s390x__
 # define __SWAB_64_THRU_32__
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 41c5476..3dc3fc2 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -9,11 +9,7 @@
 #ifndef _S390_TYPES_H
 #define _S390_TYPES_H
 
-#ifndef __s390x__
-# include <asm-generic/int-ll64.h>
-#else
-# include <asm-generic/int-l64.h>
-#endif
+#include <asm-generic/int-ll64.h>
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index fc2c971..6035cd2 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -547,7 +547,7 @@
 	.globl	sys32_newuname_wrapper
 sys32_newuname_wrapper:
 	llgtr	%r2,%r2			# struct new_utsname *
-	jg	s390x_newuname		# branch to system call
+	jg	sys_s390_newuname	# branch to system call
 
 	.globl	compat_sys_adjtimex_wrapper
 compat_sys_adjtimex_wrapper:
@@ -615,7 +615,7 @@
 	.globl	sys32_personality_wrapper
 sys32_personality_wrapper:
 	llgfr	%r2,%r2			# unsigned long
-	jg	s390x_personality	# branch to system call
+	jg	sys_s390_personality	# branch to system call
 
 	.globl	sys32_setfsuid16_wrapper
 sys32_setfsuid16_wrapper:
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 6b18963..950c59c 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -30,23 +30,23 @@
 struct old_sigaction;
 struct sel_arg_struct;
 
-long sys_pipe(unsigned long __user *fildes);
 long sys_mmap2(struct mmap_arg_struct __user  *arg);
-long old_mmap(struct mmap_arg_struct __user *arg);
+long sys_s390_old_mmap(struct mmap_arg_struct __user *arg);
 long sys_ipc(uint call, int first, unsigned long second,
 	     unsigned long third, void __user *ptr);
-long s390x_newuname(struct new_utsname __user *name);
-long s390x_personality(unsigned long personality);
-long s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
+long sys_s390_newuname(struct new_utsname __user *name);
+long sys_s390_personality(unsigned long personality);
+long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
 		    size_t len, int advice);
-long s390_fadvise64_64(struct fadvise64_64_args __user *args);
-long s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, u32 len_low);
+long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
+long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high,
+			u32 len_low);
 long sys_fork(void);
 long sys_clone(void);
 long sys_vfork(void);
 void execve_tail(void);
 long sys_execve(void);
-int sys_sigsuspend(int history0, int history1, old_sigset_t mask);
+long sys_sigsuspend(int history0, int history1, old_sigset_t mask);
 long sys_sigaction(int sig, const struct old_sigaction __user *act,
 		   struct old_sigaction __user *oact);
 long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss);
@@ -54,7 +54,5 @@
 long sys_rt_sigreturn(void);
 long sys32_sigreturn(void);
 long sys32_rt_sigreturn(void);
-long old_select(struct sel_arg_struct __user *arg);
-long sys_ptrace(long request, long pid, long addr, long data);
 
 #endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 9b92856..a01cf02 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -382,7 +382,7 @@
 	/*
 	 * It is possible to have multiple instances associated with a given
 	 * task either because an multiple functions in the call path
-	 * have a return probe installed on them, and/or more then one return
+	 * have a return probe installed on them, and/or more than one return
 	 * return probe was registered for a target function.
 	 *
 	 * We can handle this because:
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index b6110bd..5cd38a9 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -39,6 +39,7 @@
 #include <linux/tick.h>
 #include <linux/elfcore.h>
 #include <linux/kernel_stat.h>
+#include <linux/syscalls.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -225,13 +226,13 @@
         return 0;
 }
 
-asmlinkage long sys_fork(void)
+SYSCALL_DEFINE0(fork)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL);
 }
 
-asmlinkage long sys_clone(void)
+SYSCALL_DEFINE0(clone)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	unsigned long clone_flags;
@@ -258,7 +259,7 @@
  * do not have enough call-clobbered registers to hold all
  * the information you need.
  */
-asmlinkage long sys_vfork(void)
+SYSCALL_DEFINE0(vfork)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
@@ -278,7 +279,7 @@
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage long sys_execve(void)
+SYSCALL_DEFINE0(execve)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	char *filename;
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 8e6812a..3cf74c3 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -25,6 +25,7 @@
 #include <linux/personality.h>
 #include <linux/binfmts.h>
 #include <linux/tracehook.h>
+#include <linux/syscalls.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 #include <asm/lowcore.h>
@@ -53,8 +54,7 @@
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
-asmlinkage int
-sys_sigsuspend(int history0, int history1, old_sigset_t mask)
+SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask)
 {
 	mask &= _BLOCKABLE;
 	spin_lock_irq(&current->sighand->siglock);
@@ -70,9 +70,8 @@
 	return -ERESTARTNOHAND;
 }
 
-asmlinkage long
-sys_sigaction(int sig, const struct old_sigaction __user *act,
-	      struct old_sigaction __user *oact)
+SYSCALL_DEFINE3(sigaction, int, sig, const struct old_sigaction __user *, act,
+		struct old_sigaction __user *, oact)
 {
 	struct k_sigaction new_ka, old_ka;
 	int ret;
@@ -102,15 +101,13 @@
 	return ret;
 }
 
-asmlinkage long
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
+SYSCALL_DEFINE2(sigaltstack, const stack_t __user *, uss,
+		stack_t __user *, uoss)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	return do_sigaltstack(uss, uoss, regs->gprs[15]);
 }
 
-
-
 /* Returns non-zero on fault. */
 static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
 {
@@ -164,7 +161,7 @@
 	return 0;
 }
 
-asmlinkage long sys_sigreturn(void)
+SYSCALL_DEFINE0(sigreturn)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	sigframe __user *frame = (sigframe __user *)regs->gprs[15];
@@ -191,7 +188,7 @@
 	return 0;
 }
 
-asmlinkage long sys_rt_sigreturn(void)
+SYSCALL_DEFINE0(rt_sigreturn)
 {
 	struct pt_regs *regs = task_pt_regs(current);
 	rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 9c0ccb5..2d337cb 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -685,7 +685,8 @@
 	if (MACHINE_HAS_IEEE)
 		lowcore->extended_save_area_addr = (u32) save_area;
 #else
-	BUG_ON(vdso_alloc_per_cpu(smp_processor_id(), lowcore));
+	if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
+		BUG();
 #endif
 	set_prefix((u32)(unsigned long) lowcore);
 	local_mcck_enable();
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 4fe952e..c7ae4b1 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -29,6 +29,7 @@
 #include <linux/personality.h>
 #include <linux/unistd.h>
 #include <linux/ipc.h>
+#include <linux/syscalls.h>
 #include <asm/uaccess.h>
 #include "entry.h"
 
@@ -74,7 +75,7 @@
 	unsigned long offset;
 };
 
-asmlinkage long sys_mmap2(struct mmap_arg_struct __user  *arg)
+SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg)
 {
 	struct mmap_arg_struct a;
 	int error = -EFAULT;
@@ -86,7 +87,7 @@
 	return error;
 }
 
-asmlinkage long old_mmap(struct mmap_arg_struct __user *arg)
+SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg)
 {
 	struct mmap_arg_struct a;
 	long error = -EFAULT;
@@ -103,32 +104,13 @@
 	return error;
 }
 
-#ifndef CONFIG_64BIT
-struct sel_arg_struct {
-	unsigned long n;
-	fd_set __user *inp, *outp, *exp;
-	struct timeval __user *tvp;
-};
-
-asmlinkage long old_select(struct sel_arg_struct __user *arg)
-{
-	struct sel_arg_struct a;
-
-	if (copy_from_user(&a, arg, sizeof(a)))
-		return -EFAULT;
-	/* sys_select() does the appropriate kernel locking */
-	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
-
-}
-#endif /* CONFIG_64BIT */
-
 /*
  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  *
  * This is really horribly ugly.
  */
-asmlinkage long sys_ipc(uint call, int first, unsigned long second,
-				  unsigned long third, void __user *ptr)
+SYSCALL_DEFINE5(ipc, uint, call, int, first, unsigned long, second,
+		unsigned long, third, void __user *, ptr)
 {
         struct ipc_kludge tmp;
 	int ret;
@@ -194,7 +176,7 @@
 }
 
 #ifdef CONFIG_64BIT
-asmlinkage long s390x_newuname(struct new_utsname __user *name)
+SYSCALL_DEFINE1(s390_newuname, struct new_utsname __user *, name)
 {
 	int ret = sys_newuname(name);
 
@@ -205,7 +187,7 @@
 	return ret;
 }
 
-asmlinkage long s390x_personality(unsigned long personality)
+SYSCALL_DEFINE1(s390_personality, unsigned long, personality)
 {
 	int ret;
 
@@ -224,15 +206,13 @@
  */
 #ifndef CONFIG_64BIT
 
-asmlinkage long
-s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice)
+SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low,
+		size_t, len, int, advice)
 {
 	return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
 			len, advice);
 }
 
-#endif
-
 struct fadvise64_64_args {
 	int fd;
 	long long offset;
@@ -240,8 +220,7 @@
 	int advice;
 };
 
-asmlinkage long
-s390_fadvise64_64(struct fadvise64_64_args __user *args)
+SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
 {
 	struct fadvise64_64_args a;
 
@@ -250,7 +229,6 @@
 	return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
 }
 
-#ifndef CONFIG_64BIT
 /*
  * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
  * 64 bit argument "len" is split into the upper and lower 32 bits. The
@@ -263,9 +241,19 @@
  * to
  *   %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
  */
-asmlinkage long s390_fallocate(int fd, int mode, loff_t offset,
+SYSCALL_DEFINE(s390_fallocate)(int fd, int mode, loff_t offset,
 			       u32 len_high, u32 len_low)
 {
 	return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_s390_fallocate(long fd, long mode, loff_t offset,
+				   long len_high, long len_low)
+{
+	return SYSC_s390_fallocate((int) fd, (int) mode, offset,
+				   (u32) len_high, (u32) len_low);
+}
+SYSCALL_ALIAS(sys_s390_fallocate, SyS_s390_fallocate);
+#endif
+
 #endif
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 2d61787..76d16e0 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -98,7 +98,7 @@
 SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper)
 SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
 SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper)	/* old readdir syscall */
-SYSCALL(old_mmap,old_mmap,old32_mmap_wrapper)			/* 90 */
+SYSCALL(sys_s390_old_mmap,sys_s390_old_mmap,old32_mmap_wrapper)	/* 90 */
 SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
 SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
 SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
@@ -130,7 +130,7 @@
 SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
 SYSCALL(sys_clone,sys_clone,sys32_clone)			/* 120 */
 SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
-SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
+SYSCALL(sys_newuname,sys_s390_newuname,sys32_newuname_wrapper)
 NI_SYSCALL							/* modify_ldt for i386 */
 SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
 SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper)	/* 125 */
@@ -144,7 +144,7 @@
 SYSCALL(sys_fchdir,sys_fchdir,sys32_fchdir_wrapper)
 SYSCALL(sys_bdflush,sys_bdflush,sys32_bdflush_wrapper)
 SYSCALL(sys_sysfs,sys_sysfs,sys32_sysfs_wrapper)		/* 135 */
-SYSCALL(sys_personality,s390x_personality,sys32_personality_wrapper)
+SYSCALL(sys_personality,sys_s390_personality,sys32_personality_wrapper)
 NI_SYSCALL							/* for afs_syscall */
 SYSCALL(sys_setfsuid16,sys_ni_syscall,sys32_setfsuid16_wrapper)	/* old setfsuid16 syscall */
 SYSCALL(sys_setfsgid16,sys_ni_syscall,sys32_setfsgid16_wrapper)	/* old setfsgid16 syscall */
@@ -261,7 +261,7 @@
 SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper)	/* 250 */
 SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper)
 SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper)
-SYSCALL(s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
+SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
 SYSCALL(sys_timer_create,sys_timer_create,sys32_timer_create_wrapper)
 SYSCALL(sys_timer_settime,sys_timer_settime,sys32_timer_settime_wrapper)	/* 255 */
 SYSCALL(sys_timer_gettime,sys_timer_gettime,sys32_timer_gettime_wrapper)
@@ -272,7 +272,7 @@
 SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
 SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
 NI_SYSCALL							/* reserved for vserver */
-SYSCALL(s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
+SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
 SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
 SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
 SYSCALL(sys_remap_file_pages,sys_remap_file_pages,sys32_remap_file_pages_wrapper)
@@ -322,7 +322,7 @@
 SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper)
 SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait_wrapper)
 SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper)
-SYSCALL(s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
+SYSCALL(sys_s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
 SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper)	/* 315 */
 SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper)
 NI_SYSCALL						/* 317 old sys_timer_fd */
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 25a6a82..690e178 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -322,7 +322,8 @@
 	vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
 	vdso64_pagelist[vdso64_pages] = NULL;
 #ifndef CONFIG_SMP
-	BUG_ON(vdso_alloc_per_cpu(0, S390_lowcore));
+	if (vdso_alloc_per_cpu(0, &S390_lowcore))
+		BUG();
 #endif
 	vdso_init_cr5();
 #endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index c32f29c..ad8acfc 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -13,10 +13,6 @@
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 
-#include <asm/vdso.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-
 	.text
 	.align 4
 	.globl __kernel_gettimeofday
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index a0775e1..8300309 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -47,7 +47,7 @@
 	vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
 	vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
 	vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
-	VCPU_EVENT(vcpu, 3, "requesting userspace resets %lx",
+	VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
 	  vcpu->run->s390_reset_flags);
 	return -EREMOTE;
 }
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 2960702..f4fe28a 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -160,7 +160,7 @@
 		break;
 
 	case KVM_S390_INT_VIRTIO:
-		VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx",
+		VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
 			   inti->ext.ext_params, inti->ext.ext_params2);
 		vcpu->stat.deliver_virtio_interrupt++;
 		rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
@@ -360,7 +360,7 @@
 	vcpu->arch.ckc_timer.expires = jiffies + sltime;
 
 	add_timer(&vcpu->arch.ckc_timer);
-	VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime);
+	VCPU_EVENT(vcpu, 5, "enabled wait timer:%llx jiffies", sltime);
 no_timer:
 	spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
 	spin_lock_bh(&vcpu->arch.local_int.lock);
@@ -491,7 +491,7 @@
 
 	switch (s390int->type) {
 	case KVM_S390_INT_VIRTIO:
-		VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx",
+		VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
 			 s390int->parm, s390int->parm64);
 		inti->type = s390int->type;
 		inti->ext.ext_params = s390int->parm;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index cce40ff..3605df45 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -118,7 +118,7 @@
 		goto out;
 	}
 
-	VCPU_EVENT(vcpu, 5, "storing cpu address to %lx", useraddr);
+	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
 out:
 	return 0;
 }
diff --git a/arch/sh/drivers/pci/ops-cayman.c b/arch/sh/drivers/pci/ops-cayman.c
index 5ccf9ea..38ef762 100644
--- a/arch/sh/drivers/pci/ops-cayman.c
+++ b/arch/sh/drivers/pci/ops-cayman.c
@@ -5,11 +5,6 @@
 #include <cpu/irq.h>
 #include "pci-sh5.h"
 
-static inline u8 bridge_swizzle(u8 pin, u8 slot)
-{
-	return (((pin - 1) + slot) % 4) + 1;
-}
-
 int __init pcibios_map_platform_irq(struct pci_dev *dev, u8 slot, u8 pin)
 {
 	int result = -1;
@@ -42,7 +37,7 @@
 	while (dev->bus->number > 0) {
 
 		slot = path[i].slot = PCI_SLOT(dev->devfn);
-		pin = path[i].pin = bridge_swizzle(pin, slot);
+		pin = path[i].pin = pci_swizzle_interrupt_pin(dev, pin);
 		dev = dev->bus->self;
 		i++;
 		if (i > 3) panic("PCI path to root bus too long!\n");
@@ -56,7 +51,7 @@
 	if ((slot < 3) || (i == 0)) {
 		/* Bus 0 (incl. PCI-PCI bridge itself) : perform the final
 		   swizzle now. */
-		result = IRQ_INTA + bridge_swizzle(pin, slot) - 1;
+		result = IRQ_INTA + pci_swizzle_interrupt_pin(dev, pin) - 1;
 	} else {
 		i--;
 		slot = path[i].slot;
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index d3839e6..e36c7b8 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -21,26 +21,6 @@
 #include <linux/init.h>
 #include <asm/io.h>
 
-static inline u8 bridge_swizzle(u8 pin, u8 slot)
-{
-	return (((pin - 1) + slot) % 4) + 1;
-}
-
-static u8 __init simple_swizzle(struct pci_dev *dev, u8 *pinp)
-{
-	u8 pin = *pinp;
-
-	while (dev->bus->parent) {
-		pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
-		/* Move up the chain of bridges. */
-		dev = dev->bus->self;
-	}
-	*pinp = pin;
-
-	/* The slot is the slot of the last bridge. */
-	return PCI_SLOT(dev->devfn);
-}
-
 static int __init pcibios_init(void)
 {
 	struct pci_channel *p;
@@ -61,7 +41,7 @@
 		busno = bus->subordinate + 1;
 	}
 
-	pci_fixup_irqs(simple_swizzle, pcibios_map_platform_irq);
+	pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq);
 
 	return 0;
 }
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index f1a2a0d..43910cd 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -6,4 +6,3 @@
 unifdef-y += unistd_64.h
 unifdef-y += posix_types_32.h
 unifdef-y += posix_types_64.h
-unifdef-y += swab.h
diff --git a/arch/sh/include/asm/byteorder.h b/arch/sh/include/asm/byteorder.h
index e95c41a..db2f5d7 100644
--- a/arch/sh/include/asm/byteorder.h
+++ b/arch/sh/include/asm/byteorder.h
@@ -1,8 +1,6 @@
 #ifndef __ASM_SH_BYTEORDER_H
 #define __ASM_SH_BYTEORDER_H
 
-#include <asm/swab.h>
-
 #ifdef __LITTLE_ENDIAN__
 #include <linux/byteorder/little_endian.h>
 #else
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index fdcb93b..6c43625 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -9,7 +9,6 @@
 	mm_context_id_t		id;
 	void			*vdso;
 #else
-	struct vm_list_struct	*vmlist;
 	unsigned long		end_brk;
 #endif
 #ifdef CONFIG_BINFMT_ELF_FDPIC
diff --git a/arch/sh/include/asm/syscalls_32.h b/arch/sh/include/asm/syscalls_32.h
index 104c5e6..8b30200 100644
--- a/arch/sh/include/asm/syscalls_32.h
+++ b/arch/sh/include/asm/syscalls_32.h
@@ -36,9 +36,9 @@
 asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
 				unsigned long r6, unsigned long r7,
 				struct pt_regs __regs);
-asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
-			unsigned long r6, unsigned long r7,
-			struct pt_regs __regs);
+asmlinkage int sys_sh_pipe(unsigned long r4, unsigned long r5,
+			   unsigned long r6, unsigned long r7,
+			   struct pt_regs __regs);
 asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf,
 				     size_t count, long dummy, loff_t pos);
 asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf,
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
index dbba1e1..63ba128 100644
--- a/arch/sh/kernel/sys_sh32.c
+++ b/arch/sh/kernel/sys_sh32.c
@@ -22,7 +22,7 @@
  * sys_pipe() is the normal C calling standard for creating
  * a pipe. It's not the way Unix traditionally does this, though.
  */
-asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
+asmlinkage int sys_sh_pipe(unsigned long r4, unsigned long r5,
 	unsigned long r6, unsigned long r7,
 	struct pt_regs __regs)
 {
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 0af693e..e67c173 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -58,7 +58,7 @@
 	.long sys_mkdir
 	.long sys_rmdir		/* 40 */
 	.long sys_dup
-	.long sys_pipe
+	.long sys_sh_pipe
 	.long sys_times
 	.long sys_ni_syscall	/* old prof syscall holder */
 	.long sys_brk		/* 45 */
@@ -105,7 +105,7 @@
 	.long sys_uselib
 	.long sys_swapon
 	.long sys_reboot
-	.long old_readdir
+	.long sys_old_readdir
 	.long old_mmap		/* 90 */
 	.long sys_munmap
 	.long sys_truncate
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 0b436aa..557cb91 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -109,7 +109,7 @@
 	.long sys_uselib
 	.long sys_swapon
 	.long sys_reboot
-	.long old_readdir
+	.long sys_old_readdir
 	.long old_mmap			/* 90 */
 	.long sys_munmap
 	.long sys_truncate
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index 2e3a149..09ab46e 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -1,15 +1,21 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25
-# Tue Apr 29 01:28:58 2008
+# Linux kernel version: 2.6.28
+# Thu Jan  8 16:45:44 2009
 #
+# CONFIG_64BIT is not set
+CONFIG_SPARC=y
+CONFIG_SPARC32=y
+# CONFIG_SPARC64 is not set
+CONFIG_ARCH_DEFCONFIG="arch/sparc/configs/sparc32_defconfig"
+CONFIG_BITS=32
+CONFIG_AUDIT_ARCH=y
 CONFIG_MMU=y
 CONFIG_HIGHMEM=y
 CONFIG_ZONE_DMA=y
 CONFIG_GENERIC_ISA_DMA=y
 CONFIG_ARCH_NO_VIRT_TO_BUS=y
 CONFIG_OF=y
-CONFIG_HZ=100
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 
 #
@@ -66,31 +72,30 @@
 CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
 # CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 CONFIG_HAVE_OPROFILE=y
-# CONFIG_HAVE_KPROBES is not set
-# CONFIG_HAVE_KRETPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
 # CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
 CONFIG_BLOCK=y
 # CONFIG_LBD is not set
 # CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -105,59 +110,73 @@
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="cfq"
 CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+# CONFIG_FREEZER is not set
 
 #
-# General machine setup
+# Processor type and features
 #
 # CONFIG_SMP is not set
-CONFIG_SPARC=y
-CONFIG_SPARC32=y
-CONFIG_SBUS=y
-CONFIG_SBUSCHAR=y
-CONFIG_SERIAL_CONSOLE=y
-CONFIG_SUN_AUXIO=y
-CONFIG_SUN_IO=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
 CONFIG_ARCH_MAY_HAVE_PC_FDC=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_EMULATED_CMPXCHG=y
-CONFIG_SUN_PM=y
-# CONFIG_SUN4 is not set
-CONFIG_PCI=y
-CONFIG_PCI_SYSCALL=y
-# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
-# CONFIG_PCI_DEBUG is not set
-# CONFIG_NO_DMA is not set
-CONFIG_SUN_OPENPROMFS=m
-# CONFIG_SPARC_LED is not set
-CONFIG_BINFMT_ELF=y
-CONFIG_BINFMT_MISC=m
 CONFIG_SELECT_MEMORY_MODEL=y
 CONFIG_FLATMEM_MANUAL=y
 # CONFIG_DISCONTIGMEM_MANUAL is not set
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_BOUNCE=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_SUN_PM=y
+# CONFIG_SPARC_LED is not set
+CONFIG_SERIAL_CONSOLE=y
 
 #
-# Networking
+# Bus options (PCI etc.)
 #
+CONFIG_SBUS=y
+CONFIG_SBUSCHAR=y
+CONFIG_PCI=y
+CONFIG_PCI_SYSCALL=y
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCCARD is not set
+CONFIG_SUN_OPENPROMFS=m
+CONFIG_SPARC32_PCI=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+CONFIG_BINFMT_MISC=m
 CONFIG_NET=y
 
 #
 # Networking options
 #
+# CONFIG_NET_NS is not set
+CONFIG_COMPAT_NET_DEV_OPS=y
 CONFIG_PACKET=y
 # CONFIG_PACKET_MMAP is not set
 CONFIG_UNIX=y
@@ -166,6 +185,7 @@
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
 # CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
 CONFIG_NET_KEY=m
 # CONFIG_NET_KEY_MIGRATE is not set
 CONFIG_INET=y
@@ -221,6 +241,7 @@
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -231,6 +252,7 @@
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
 # CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
 
 #
 # Network testing
@@ -241,14 +263,14 @@
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
 # CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
 # CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
 # CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
 # CONFIG_RFKILL is not set
 # CONFIG_NET_9P is not set
 
@@ -262,7 +284,9 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
 # CONFIG_DEBUG_DRIVER is not set
 # CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
@@ -286,12 +310,15 @@
 # CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_PHANTOM is not set
 # CONFIG_EEPROM_93CX6 is not set
 # CONFIG_SGI_IOC4 is not set
 # CONFIG_TIFM_CORE is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
@@ -335,6 +362,7 @@
 # CONFIG_SCSI_SRP_ATTRS is not set
 CONFIG_SCSI_LOWLEVEL=y
 # CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_CXGB3_ISCSI is not set
 # CONFIG_BLK_DEV_3W_XXXX_RAID is not set
 # CONFIG_SCSI_3W_9XXX is not set
 # CONFIG_SCSI_ACARD is not set
@@ -348,6 +376,8 @@
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
 # CONFIG_SCSI_HPTIOP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_FCOE is not set
 # CONFIG_SCSI_DMX3191D is not set
 # CONFIG_SCSI_FUTURE_DOMAIN is not set
 # CONFIG_SCSI_IPS is not set
@@ -367,6 +397,7 @@
 # CONFIG_SCSI_DEBUG is not set
 CONFIG_SCSI_SUNESP=y
 # CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_DH is not set
 # CONFIG_ATA is not set
 # CONFIG_MD is not set
 # CONFIG_FUSION is not set
@@ -374,11 +405,14 @@
 #
 # IEEE 1394 (FireWire) support
 #
+
+#
+# Enable only one of the two stacks, unless you know what you are doing
+#
 # CONFIG_FIREWIRE is not set
 # CONFIG_IEEE1394 is not set
 # CONFIG_I2O is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 CONFIG_DUMMY=m
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
@@ -402,14 +436,16 @@
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 # CONFIG_NET_PCI is not set
 # CONFIG_B44 is not set
+# CONFIG_ATL2 is not set
 CONFIG_NETDEV_1000=y
 # CONFIG_ACENIC is not set
 # CONFIG_DL2K is not set
 # CONFIG_E1000 is not set
-# CONFIG_E1000E is not set
-# CONFIG_E1000E_ENABLED is not set
 # CONFIG_IP1000 is not set
 # CONFIG_IGB is not set
 # CONFIG_MYRI_SBUS is not set
@@ -425,18 +461,25 @@
 # CONFIG_BNX2 is not set
 # CONFIG_QLA3XXX is not set
 # CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_JME is not set
 CONFIG_NETDEV_10000=y
 # CONFIG_CHELSIO_T1 is not set
+CONFIG_CHELSIO_T3_DEPENDS=y
 # CONFIG_CHELSIO_T3 is not set
+# CONFIG_ENIC is not set
 # CONFIG_IXGBE is not set
 # CONFIG_IXGB is not set
 # CONFIG_S2IO is not set
 # CONFIG_MYRI10GE is not set
 # CONFIG_NETXEN_NIC is not set
 # CONFIG_NIU is not set
+# CONFIG_MLX4_EN is not set
 # CONFIG_MLX4_CORE is not set
 # CONFIG_TEHUTI is not set
 # CONFIG_BNX2X is not set
+# CONFIG_QLGE is not set
+# CONFIG_SFC is not set
 # CONFIG_TR is not set
 
 #
@@ -445,6 +488,10 @@
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
 # CONFIG_IWLWIFI_LEDS is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
 # CONFIG_WAN is not set
 # CONFIG_FDDI is not set
 # CONFIG_HIPPI is not set
@@ -492,9 +539,11 @@
 CONFIG_MOUSE_PS2_SYNAPTICS=y
 CONFIG_MOUSE_PS2_LIFEBOOK=y
 CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
 # CONFIG_MOUSE_PS2_TOUCHKIT is not set
 CONFIG_MOUSE_SERIAL=m
 # CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
 # CONFIG_MOUSE_VSXXXAA is not set
 # CONFIG_INPUT_JOYSTICK is not set
 # CONFIG_INPUT_TABLET is not set
@@ -516,15 +565,18 @@
 # Character devices
 #
 CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
 CONFIG_VT_CONSOLE=y
 CONFIG_HW_CONSOLE=y
 # CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
 # CONFIG_SERIAL_NONSTANDARD is not set
 # CONFIG_NOZOMI is not set
 
 #
 # Serial drivers
 #
+# CONFIG_SERIAL_8250 is not set
 
 #
 # Non-8250 serial port support
@@ -540,23 +592,20 @@
 CONFIG_CONSOLE_POLL=y
 # CONFIG_SERIAL_JSM is not set
 CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
 # CONFIG_IPMI_HANDLER is not set
 CONFIG_HW_RANDOM=m
-CONFIG_JS_RTC=m
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
 # CONFIG_RAW_DRIVER is not set
 # CONFIG_TCG_TPM is not set
 CONFIG_DEVPORT=y
 # CONFIG_I2C is not set
-
-#
-# SPI support
-#
 # CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
@@ -577,25 +626,38 @@
 # CONFIG_SENSORS_W83627EHF is not set
 # CONFIG_HWMON_DEBUG_CHIP is not set
 # CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
 # CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
 
 #
 # Sonics Silicon Backplane
 #
-CONFIG_SSB_POSSIBLE=y
 # CONFIG_SSB is not set
 
 #
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
 
 #
 # Multimedia devices
 #
+
+#
+# Multimedia core support
+#
 # CONFIG_VIDEO_DEV is not set
 # CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
 # CONFIG_DAB is not set
 
 #
@@ -616,15 +678,17 @@
 #
 # CONFIG_PROM_CONSOLE is not set
 CONFIG_DUMMY_CONSOLE=y
-
-#
-# Sound
-#
 # CONFIG_SOUND is not set
 CONFIG_HID_SUPPORT=y
 CONFIG_HID=y
 # CONFIG_HID_DEBUG is not set
 # CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB_ARCH_HAS_OHCI=y
@@ -632,32 +696,71 @@
 # CONFIG_USB is not set
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# Enable Host or Gadget support to see Inventra options
+#
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
 #
 # CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
 # CONFIG_MMC is not set
 # CONFIG_MEMSTICK is not set
 # CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
 # CONFIG_INFINIBAND is not set
-# CONFIG_RTC_CLASS is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+CONFIG_RTC_DRV_M48T59=y
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # Misc Linux/SPARC drivers
 #
 CONFIG_SUN_OPENPROMIO=m
-CONFIG_SUN_MOSTEK_RTC=m
-# CONFIG_SUN_BPP is not set
-# CONFIG_SUN_VIDEOPIX is not set
 # CONFIG_TADPOLE_TS102_UCTRL is not set
 # CONFIG_SUN_JSFLASH is not set
 
 #
-# Unix98 PTY support
-#
-CONFIG_UNIX98_PTY_COUNT=256
-
-#
 # File systems
 #
 CONFIG_EXT2_FS=y
@@ -666,11 +769,12 @@
 CONFIG_EXT2_FS_SECURITY=y
 # CONFIG_EXT2_FS_XIP is not set
 # CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 CONFIG_FS_MBCACHE=y
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
 # CONFIG_OCFS2_FS is not set
 CONFIG_DNOTIFY=y
@@ -702,14 +806,12 @@
 CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 # CONFIG_TMPFS is not set
 # CONFIG_HUGETLB_PAGE is not set
 # CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
 # CONFIG_AFFS_FS is not set
 # CONFIG_HFS_FS is not set
@@ -720,6 +822,7 @@
 # CONFIG_CRAMFS is not set
 # CONFIG_VXFS_FS is not set
 # CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
 CONFIG_ROMFS_FS=m
@@ -729,13 +832,13 @@
 CONFIG_NFS_FS=y
 # CONFIG_NFS_V3 is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFSD is not set
 CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
 CONFIG_LOCKD=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=y
 CONFIG_SUNRPC_GSS=m
-# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 CONFIG_RPCSEC_GSS_KRB5=m
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
@@ -806,9 +909,12 @@
 # CONFIG_HEADERS_CHECK is not set
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
 # CONFIG_DEBUG_SLAB is not set
 # CONFIG_DEBUG_RT_MUTEXES is not set
 # CONFIG_RT_MUTEX_TESTER is not set
@@ -822,37 +928,59 @@
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
 # CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
 # CONFIG_DEBUG_LIST is not set
 # CONFIG_DEBUG_SG is not set
-CONFIG_FRAME_POINTER=y
+# CONFIG_DEBUG_NOTIFIERS is not set
 # CONFIG_BOOT_PRINTK_DELAY is not set
 # CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
 # CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+
+#
+# Tracers
+#
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
-CONFIG_KGDB=y
 CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_KGDB=y
 CONFIG_KGDB_SERIAL_CONSOLE=y
 CONFIG_KGDB_TESTS=y
 # CONFIG_KGDB_TESTS_ON_BOOT is not set
 # CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_STACK_DEBUG is not set
 
 #
 # Security options
 #
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_FILE_CAPABILITIES is not set
 CONFIG_CRYPTO=y
 
 #
 # Crypto core or helper
 #
+# CONFIG_CRYPTO_FIPS is not set
 CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
 CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
 CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
 CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
 # CONFIG_CRYPTO_GF128MUL is not set
 CONFIG_CRYPTO_NULL=m
 # CONFIG_CRYPTO_CRYPTD is not set
@@ -890,6 +1018,10 @@
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
 CONFIG_CRYPTO_SHA1=y
 CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
@@ -921,15 +1053,21 @@
 #
 CONFIG_CRYPTO_DEFLATE=y
 # CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_HW is not set
 
 #
 # Library routines
 #
 CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
 # CONFIG_CRC_CCITT is not set
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 95e38a4..deeb0fb 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -17,4 +17,3 @@
 header-y += uctx.h
 header-y += utrap.h
 header-y += watchdog.h
-header-y += swab.h
diff --git a/arch/sparc/include/asm/byteorder.h b/arch/sparc/include/asm/byteorder.h
index 48a047c..ccc1b6b 100644
--- a/arch/sparc/include/asm/byteorder.h
+++ b/arch/sparc/include/asm/byteorder.h
@@ -1,7 +1,6 @@
 #ifndef _SPARC_BYTEORDER_H
 #define _SPARC_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/big_endian.h>
 
 #endif /* _SPARC_BYTEORDER_H */
diff --git a/arch/sparc/include/asm/oplib_32.h b/arch/sparc/include/asm/oplib_32.h
index 73d4552..33e31ce 100644
--- a/arch/sparc/include/asm/oplib_32.h
+++ b/arch/sparc/include/asm/oplib_32.h
@@ -177,17 +177,6 @@
 
 /* PROM device tree traversal functions... */
 
-#ifdef PROMLIB_INTERNAL
-
-/* Internal version of prom_getchild. */
-extern int __prom_getchild(int parent_node);
-
-/* Internal version of prom_getsibling. */
-extern int __prom_getsibling(int node);
-
-#endif
-
-
 /* Get the child node of the given node, or zero if no child exists. */
 extern int prom_getchild(int parent_node);
 
diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
index 6d2c2ca..a5db031 100644
--- a/arch/sparc/include/asm/oplib_64.h
+++ b/arch/sparc/include/asm/oplib_64.h
@@ -218,16 +218,6 @@
 
 /* PROM device tree traversal functions... */
 
-#ifdef PROMLIB_INTERNAL
-
-/* Internal version of prom_getchild. */
-extern int __prom_getchild(int parent_node);
-
-/* Internal version of prom_getsibling. */
-extern int __prom_getsibling(int node);
-
-#endif
-
 /* Get the child node of the given node, or zero if no child exists. */
 extern int prom_getchild(int parent_node);
 
diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h
index 41535e7..cba4520 100644
--- a/arch/sparc/include/asm/signal.h
+++ b/arch/sparc/include/asm/signal.h
@@ -84,7 +84,11 @@
 
 #define __OLD_NSIG	32
 #define __NEW_NSIG      64
+#ifdef __arch64__
 #define _NSIG_BPW       64
+#else
+#define _NSIG_BPW       32
+#endif
 #define _NSIG_WORDS     (__NEW_NSIG / _NSIG_BPW)
 
 #define SIGRTMIN       32
diff --git a/arch/sparc/include/asm/timer_64.h b/arch/sparc/include/asm/timer_64.h
index 5b779fd..ef3c368 100644
--- a/arch/sparc/include/asm/timer_64.h
+++ b/arch/sparc/include/asm/timer_64.h
@@ -10,7 +10,7 @@
 #include <linux/init.h>
 
 struct sparc64_tick_ops {
-	unsigned long (*get_tick)(void);
+	unsigned long long (*get_tick)(void);
 	int (*add_compare)(unsigned long);
 	unsigned long softint_mask;
 	void (*disable_irq)(void);
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index b8a65b6..5bc0b8f 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -47,6 +47,10 @@
 	(pcibus_to_node(bus) == -1 ? \
 	 CPU_MASK_ALL : \
 	 node_to_cpumask(pcibus_to_node(bus)))
+#define cpumask_of_pcibus(bus)	\
+	(pcibus_to_node(bus) == -1 ? \
+	 CPU_MASK_ALL_PTR : \
+	 cpumask_of_node(pcibus_to_node(bus)))
 
 #define SD_NODE_INIT (struct sched_domain) {		\
 	.min_interval		= 8,			\
diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/asm/types.h
index 8c28fde..22371188 100644
--- a/arch/sparc/include/asm/types.h
+++ b/arch/sparc/include/asm/types.h
@@ -11,7 +11,7 @@
 #if defined(__sparc__) && defined(__arch64__)
 
 /*** SPARC 64 bit ***/
-#include <asm-generic/int-l64.h>
+#include <asm-generic/int-ll64.h>
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
index 09c8572..45c4123 100644
--- a/arch/sparc/kernel/auxio_32.c
+++ b/arch/sparc/kernel/auxio_32.c
@@ -76,6 +76,7 @@
 		return sbus_readb(auxio_register);
 	return 0;
 }
+EXPORT_SYMBOL(get_auxio);
 
 void set_auxio(unsigned char bits_on, unsigned char bits_off)
 {
@@ -102,7 +103,7 @@
 	};
 	spin_unlock_irqrestore(&auxio_lock, flags);
 }
-
+EXPORT_SYMBOL(set_auxio);
 
 /* sun4m power control register (AUXIO2) */
 
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 8b67347..9f52db2 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -72,6 +72,7 @@
 	bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
 	__auxio_set_bit(bit, on, ebus);
 }
+EXPORT_SYMBOL(auxio_set_led);
 
 static void __auxio_sbus_set_lte(int on)
 {
@@ -90,6 +91,7 @@
 		break;
 	}
 }
+EXPORT_SYMBOL(auxio_set_lte);
 
 static struct of_device_id __initdata auxio_match[] = {
 	{
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 6c2da24..f0b8255 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -5,6 +5,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/threads.h>
@@ -20,6 +21,7 @@
 #include "kernel.h"
 
 DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
+EXPORT_PER_CPU_SYMBOL(__cpu_data);
 
 struct cpu_info {
 	int psr_vers;
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index f52e053..57c3984 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -286,7 +286,7 @@
 
 	rp = (struct ds_md_update_req *) (dpkt + 1);
 
-	printk(KERN_INFO "ds-%lu: Machine description update.\n", dp->id);
+	printk(KERN_INFO "ds-%llu: Machine description update.\n", dp->id);
 
 	mdesc_update();
 
@@ -325,7 +325,7 @@
 
 	rp = (struct ds_shutdown_req *) (dpkt + 1);
 
-	printk(KERN_ALERT "ds-%lu: Shutdown request from "
+	printk(KERN_ALERT "ds-%llu: Shutdown request from "
 	       "LDOM manager received.\n", dp->id);
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -365,7 +365,7 @@
 
 	rp = (struct ds_panic_req *) (dpkt + 1);
 
-	printk(KERN_ALERT "ds-%lu: Panic request from "
+	printk(KERN_ALERT "ds-%llu: Panic request from "
 	       "LDOM manager received.\n", dp->id);
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -549,7 +549,7 @@
 	for_each_cpu_mask(cpu, *mask) {
 		int err;
 
-		printk(KERN_INFO "ds-%lu: Starting cpu %d...\n",
+		printk(KERN_INFO "ds-%llu: Starting cpu %d...\n",
 		       dp->id, cpu);
 		err = cpu_up(cpu);
 		if (err) {
@@ -565,7 +565,7 @@
 				res = DR_CPU_RES_CPU_NOT_RESPONDING;
 			}
 
-			printk(KERN_INFO "ds-%lu: CPU startup failed err=%d\n",
+			printk(KERN_INFO "ds-%llu: CPU startup failed err=%d\n",
 			       dp->id, err);
 			dr_cpu_mark(resp, cpu, ncpus, res, stat);
 		}
@@ -605,7 +605,7 @@
 	for_each_cpu_mask(cpu, *mask) {
 		int err;
 
-		printk(KERN_INFO "ds-%lu: Shutting down cpu %d...\n",
+		printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n",
 		       dp->id, cpu);
 		err = cpu_down(cpu);
 		if (err)
@@ -684,7 +684,7 @@
 
 	rp = (struct ds_pri_msg *) (dpkt + 1);
 
-	printk(KERN_INFO "ds-%lu: PRI REQ [%lx:%lx], len=%d\n",
+	printk(KERN_INFO "ds-%llu: PRI REQ [%llx:%llx], len=%d\n",
 	       dp->id, rp->req_num, rp->type, len);
 }
 
@@ -816,7 +816,7 @@
 
 		if (ds_var_doorbell == 0 ||
 		    ds_var_response != DS_VAR_SUCCESS)
-			printk(KERN_ERR "ds-%lu: var-config [%s:%s] "
+			printk(KERN_ERR "ds-%llu: var-config [%s:%s] "
 			       "failed, response(%d).\n",
 			       dp->id, var, value,
 			       ds_var_response);
@@ -850,7 +850,7 @@
 
 static void ds_conn_reset(struct ds_info *dp)
 {
-	printk(KERN_ERR "ds-%lu: ds_conn_reset() from %p\n",
+	printk(KERN_ERR "ds-%llu: ds_conn_reset() from %p\n",
 	       dp->id, __builtin_return_address(0));
 }
 
@@ -912,11 +912,11 @@
 		struct ds_cap_state *cp = find_cap(dp, ap->handle);
 
 		if (!cp) {
-			printk(KERN_ERR "ds-%lu: REG ACK for unknown "
-			       "handle %lx\n", dp->id, ap->handle);
+			printk(KERN_ERR "ds-%llu: REG ACK for unknown "
+			       "handle %llx\n", dp->id, ap->handle);
 			return 0;
 		}
-		printk(KERN_INFO "ds-%lu: Registered %s service.\n",
+		printk(KERN_INFO "ds-%llu: Registered %s service.\n",
 		       dp->id, cp->service_id);
 		cp->state = CAP_STATE_REGISTERED;
 	} else if (pkt->type == DS_REG_NACK) {
@@ -924,8 +924,8 @@
 		struct ds_cap_state *cp = find_cap(dp, np->handle);
 
 		if (!cp) {
-			printk(KERN_ERR "ds-%lu: REG NACK for "
-			       "unknown handle %lx\n",
+			printk(KERN_ERR "ds-%llu: REG NACK for "
+			       "unknown handle %llx\n",
 			       dp->id, np->handle);
 			return 0;
 		}
@@ -982,8 +982,8 @@
 		int req_len = qp->req_len;
 
 		if (!cp) {
-			printk(KERN_ERR "ds-%lu: Data for unknown "
-			       "handle %lu\n",
+			printk(KERN_ERR "ds-%llu: Data for unknown "
+			       "handle %llu\n",
 			       dp->id, dpkt->handle);
 
 			spin_lock_irqsave(&ds_lock, flags);
@@ -1085,7 +1085,7 @@
 	}
 
 	if (event != LDC_EVENT_DATA_READY) {
-		printk(KERN_WARNING "ds-%lu: Unexpected LDC event %d\n",
+		printk(KERN_WARNING "ds-%llu: Unexpected LDC event %d\n",
 		       dp->id, event);
 		spin_unlock_irqrestore(&ds_lock, flags);
 		return;
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index faf9ccd..f41ecc5 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1088,8 +1088,8 @@
 	 ld	[%sp + STACKFRAME_SZ + PT_I0], %o0
 
 	.align	4
-	.globl	sys_pipe
-sys_pipe:
+	.globl	sys_sparc_pipe
+sys_sparc_pipe:
 	mov	%o7, %l5
 	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
 	call	sparc_pipe
diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c
index c16135e..57922f6 100644
--- a/arch/sparc/kernel/idprom.c
+++ b/arch/sparc/kernel/idprom.c
@@ -8,11 +8,14 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/init.h>
+#include <linux/module.h>
 
 #include <asm/oplib.h>
 #include <asm/idprom.h>
 
 struct idprom *idprom;
+EXPORT_SYMBOL(idprom);
+
 static struct idprom idprom_buffer;
 
 #ifdef CONFIG_SPARC32
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 1cc1995..d8900e1 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -434,7 +434,7 @@
 		val = iommu_read(matchreg);
 		if (unlikely(val)) {
 			printk(KERN_WARNING "strbuf_flush: ctx flush "
-			       "timeout matchreg[%lx] ctx[%lx]\n",
+			       "timeout matchreg[%llx] ctx[%lx]\n",
 			       val, ctx);
 			goto do_page_flush;
 		}
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 7ce14f0..87ea0d0 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -120,6 +120,7 @@
 	sprintf(name, "phys_%08x", (u32)offset);
 	return _sparc_alloc_io(0, offset, size, name);
 }
+EXPORT_SYMBOL(ioremap);
 
 /*
  * Comlimentary to ioremap().
@@ -141,6 +142,7 @@
 		kfree(res);
 	}
 }
+EXPORT_SYMBOL(iounmap);
 
 void __iomem *of_ioremap(struct resource *res, unsigned long offset,
 			 unsigned long size, char *name)
@@ -237,6 +239,7 @@
 {
 	printk("sbus_set_sbus64: unsupported\n");
 }
+EXPORT_SYMBOL(sbus_set_sbus64);
 
 /*
  * Allocate a chunk of memory suitable for DMA.
@@ -436,6 +439,7 @@
 	*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
 	return (void *) res->start;
 }
+EXPORT_SYMBOL(pci_alloc_consistent);
 
 /* Free and unmap a consistent DMA buffer.
  * cpu_addr is what was returned from pci_alloc_consistent,
@@ -477,6 +481,7 @@
 
 	free_pages(pgp, get_order(n));
 }
+EXPORT_SYMBOL(pci_free_consistent);
 
 /* Map a single buffer of the indicated size for DMA in streaming mode.
  * The 32-bit bus address to use is returned.
@@ -491,6 +496,7 @@
 	/* IIep is write-through, not flushing. */
 	return virt_to_phys(ptr);
 }
+EXPORT_SYMBOL(pci_map_single);
 
 /* Unmap a single streaming mode DMA translation.  The dma_addr and size
  * must match what was provided for in a previous pci_map_single call.  All
@@ -508,6 +514,7 @@
 		    (size + PAGE_SIZE-1) & PAGE_MASK);
 	}
 }
+EXPORT_SYMBOL(pci_unmap_single);
 
 /*
  * Same as pci_map_single, but with pages.
@@ -519,6 +526,7 @@
 	/* IIep is write-through, not flushing. */
 	return page_to_phys(page) + offset;
 }
+EXPORT_SYMBOL(pci_map_page);
 
 void pci_unmap_page(struct pci_dev *hwdev,
 			dma_addr_t dma_address, size_t size, int direction)
@@ -526,6 +534,7 @@
 	BUG_ON(direction == PCI_DMA_NONE);
 	/* mmu_inval_dma_area XXX */
 }
+EXPORT_SYMBOL(pci_unmap_page);
 
 /* Map a set of buffers described by scatterlist in streaming
  * mode for DMA.  This is the scather-gather version of the
@@ -557,6 +566,7 @@
 	}
 	return nents;
 }
+EXPORT_SYMBOL(pci_map_sg);
 
 /* Unmap a set of streaming mode DMA translations.
  * Again, cpu read rules concerning calls here are the same as for
@@ -578,6 +588,7 @@
 		}
 	}
 }
+EXPORT_SYMBOL(pci_unmap_sg);
 
 /* Make physical memory consistent for a single
  * streaming mode DMA translation before or after a transfer.
@@ -597,6 +608,7 @@
 		    (size + PAGE_SIZE-1) & PAGE_MASK);
 	}
 }
+EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
 
 void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
 {
@@ -606,6 +618,7 @@
 		    (size + PAGE_SIZE-1) & PAGE_MASK);
 	}
 }
+EXPORT_SYMBOL(pci_dma_sync_single_for_device);
 
 /* Make physical memory consistent for a set of streaming
  * mode DMA translations after a transfer.
@@ -628,6 +641,7 @@
 		}
 	}
 }
+EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
 
 void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
 {
@@ -644,6 +658,7 @@
 		}
 	}
 }
+EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
 #endif /* CONFIG_PCI */
 
 #ifdef CONFIG_PROC_FS
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index 1eff942..44dd5ee 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -294,6 +294,7 @@
 	while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS)
 		cpu_relax();
 }
+EXPORT_SYMBOL(synchronize_irq);
 #endif /* SMP */
 
 void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index 201a6e5..3bc6527 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -517,7 +517,7 @@
 	/*
 	 * It is possible to have multiple instances associated with a given
 	 * task either because an multiple functions in the call path
-	 * have a return probe installed on them, and/or more then one return
+	 * have a return probe installed on them, and/or more than one return
 	 * return probe was registered for a target function.
 	 *
 	 * We can handle this because:
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index d6898233..6ce5d25 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -625,22 +625,23 @@
 static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
 {
 	struct ldc_version *vap;
+	struct ldc_packet *p;
+	unsigned long new_tail;
 
-	if ((vp->major == 0 && vp->minor == 0) ||
-	    !(vap = find_by_major(vp->major))) {
+	if (vp->major == 0 && vp->minor == 0)
 		return ldc_abort(lp);
-	} else {
-		struct ldc_packet *p;
-		unsigned long new_tail;
 
-		p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
+	vap = find_by_major(vp->major);
+	if (!vap)
+		return ldc_abort(lp);
+
+	p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
 					   vap, sizeof(*vap),
 					   &new_tail);
-		if (p)
-			return send_tx_packet(lp, p, new_tail);
-		else
-			return ldc_abort(lp);
-	}
+	if (!p)
+		return ldc_abort(lp);
+
+	return send_tx_packet(lp, p, new_tail);
 }
 
 static int process_version(struct ldc_channel *lp,
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 3c539a6..3f79f0c 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -536,24 +536,24 @@
 
 	v = mdesc_get_property(hp, pn, "hostid", NULL);
 	if (v)
-		printk("PLATFORM: hostid [%08lx]\n", *v);
+		printk("PLATFORM: hostid [%08llx]\n", *v);
 	v = mdesc_get_property(hp, pn, "serial#", NULL);
 	if (v)
-		printk("PLATFORM: serial# [%08lx]\n", *v);
+		printk("PLATFORM: serial# [%08llx]\n", *v);
 	v = mdesc_get_property(hp, pn, "stick-frequency", NULL);
-	printk("PLATFORM: stick-frequency [%08lx]\n", *v);
+	printk("PLATFORM: stick-frequency [%08llx]\n", *v);
 	v = mdesc_get_property(hp, pn, "mac-address", NULL);
 	if (v)
-		printk("PLATFORM: mac-address [%lx]\n", *v);
+		printk("PLATFORM: mac-address [%llx]\n", *v);
 	v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL);
 	if (v)
-		printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v);
+		printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v);
 	v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL);
 	if (v)
-		printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v);
+		printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v);
 	v = mdesc_get_property(hp, pn, "max-cpus", NULL);
 	if (v)
-		printk("PLATFORM: max-cpus [%lu]\n", *v);
+		printk("PLATFORM: max-cpus [%llu]\n", *v);
 
 #ifdef CONFIG_SMP
 	{
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 4873f28..b4a12c9 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -554,7 +554,7 @@
 		memset(r, 0, sizeof(*r));
 
 		if (of_resource_verbose)
-			printk("%s reg[%d] -> %lx\n",
+			printk("%s reg[%d] -> %llx\n",
 			       op->node->full_name, index,
 			       result);
 
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index bdb7c0a..4638fba 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -223,7 +223,7 @@
 			continue;
 		i = addrs[0] & 0xff;
 		if (ofpci_verbose)
-			printk("  start: %lx, end: %lx, i: %x\n",
+			printk("  start: %llx, end: %llx, i: %x\n",
 			       op_res->start, op_res->end, i);
 
 		if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
@@ -1077,6 +1077,7 @@
 
 	return (device_mask & dma_addr_mask) == dma_addr_mask;
 }
+EXPORT_SYMBOL(pci_dma_supported);
 
 void pci_resource_to_user(const struct pci_dev *pdev, int bar,
 			  const struct resource *rp, resource_size_t *start,
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 23b8808..64e6edf 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -457,7 +457,7 @@
 		prom_halt();
 	}
 
-	printk("%s: PCI IO[%lx] MEM[%lx]\n",
+	printk("%s: PCI IO[%llx] MEM[%llx]\n",
 	       pbm->name,
 	       pbm->io_space.start,
 	       pbm->mem_space.start);
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index 4ef282e..f1be37a 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -426,8 +426,8 @@
 		       pbm->name,
 		       pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
 		       pbm->msix_data_width);
-		printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
-		       "addr64[0x%lx:0x%x]\n",
+		printk(KERN_INFO "%s: MSI addr32[0x%llx:0x%x] "
+		       "addr64[0x%llx:0x%x]\n",
 		       pbm->name,
 		       pbm->msi32_start, pbm->msi32_len,
 		       pbm->msi64_start, pbm->msi64_len);
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 45d9dba..2b5cdde 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -794,7 +794,7 @@
 		   pbm->controller_regs + SCHIZO_SAFARI_ERRLOG);
 
 	if (!(errlog & BUS_ERROR_UNMAP)) {
-		printk("%s: Unexpected Safari/JBUS error interrupt, errlog[%016lx]\n",
+		printk("%s: Unexpected Safari/JBUS error interrupt, errlog[%016llx]\n",
 		       pbm->name, errlog);
 
 		return IRQ_HANDLED;
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 34a1fde..0ef0ab3 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -73,7 +73,7 @@
 		if (unlikely(num < 0)) {
 			if (printk_ratelimit())
 				printk("iommu_batch_flush: IOMMU map of "
-				       "[%08lx:%08lx:%lx:%lx:%lx] failed with "
+				       "[%08lx:%08llx:%lx:%lx:%lx] failed with "
 				       "status %ld\n",
 				       devhandle, HV_PCI_TSBID(0, entry),
 				       npages, prot, __pa(pglist), num);
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 75ed98b..85e7037 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -956,6 +956,7 @@
 		/* addr += 1; */
 	}
 }
+EXPORT_SYMBOL(outsb);
 
 void outsw(unsigned long addr, const void *src, unsigned long count)
 {
@@ -966,6 +967,7 @@
 		/* addr += 2; */
 	}
 }
+EXPORT_SYMBOL(outsw);
 
 void outsl(unsigned long addr, const void *src, unsigned long count)
 {
@@ -976,6 +978,7 @@
 		/* addr += 4; */
 	}
 }
+EXPORT_SYMBOL(outsl);
 
 void insb(unsigned long addr, void *dst, unsigned long count)
 {
@@ -986,6 +989,7 @@
 		/* addr += 1; */
 	}
 }
+EXPORT_SYMBOL(insb);
 
 void insw(unsigned long addr, void *dst, unsigned long count)
 {
@@ -996,6 +1000,7 @@
 		/* addr += 2; */
 	}
 }
+EXPORT_SYMBOL(insw);
 
 void insl(unsigned long addr, void *dst, unsigned long count)
 {
@@ -1009,5 +1014,6 @@
 		/* addr += 4; */
 	}
 }
+EXPORT_SYMBOL(insl);
 
 subsys_initcall(pcic_init);
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
index 076cad7..ae88f06 100644
--- a/arch/sparc/kernel/power.c
+++ b/arch/sparc/kernel/power.c
@@ -40,7 +40,7 @@
 
 	power_reg = of_ioremap(res, 0, 0x4, "power");
 
-	printk(KERN_INFO "%s: Control reg at %lx\n",
+	printk(KERN_INFO "%s: Control reg at %llx\n",
 	       op->node->name, res->start);
 
 	if (has_button_interrupt(irq, op->node)) {
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 5a8d8ce..f4bee35 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -44,6 +44,7 @@
  * Set in pm platform drivers (apc.c and pmc.c)
  */
 void (*pm_idle)(void);
+EXPORT_SYMBOL(pm_idle);
 
 /* 
  * Power-off handler instantiation for pm.h compliance
@@ -673,6 +674,7 @@
 			     "g1", "g2", "g3", "o0", "o1", "memory", "cc");
 	return retval;
 }
+EXPORT_SYMBOL(kernel_thread);
 
 unsigned long get_wchan(struct task_struct *task)
 {
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index d5e2ace..cc8b560 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -678,6 +678,7 @@
 			     "g1", "g2", "g3", "o0", "o1", "memory", "cc");
 	return retval;
 }
+EXPORT_SYMBOL(kernel_thread);
 
 typedef struct {
 	union {
@@ -743,6 +744,7 @@
 	}
 	return 1;
 }
+EXPORT_SYMBOL(dump_fpu);
 
 /*
  * sparc_execve() executes a new program after the asm stub has set
diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
index 96958c4..5702ad4 100644
--- a/arch/sparc/kernel/prom_irqtrans.c
+++ b/arch/sparc/kernel/prom_irqtrans.c
@@ -346,7 +346,7 @@
 			break;
 	}
 	if (limit <= 0) {
-		printk("tomatillo_wsync_handler: DMA won't sync [%lx:%lx]\n",
+		printk("tomatillo_wsync_handler: DMA won't sync [%llx:%llx]\n",
 		       val, mask);
 	}
 
diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
index 7909964..8f1478475 100644
--- a/arch/sparc/kernel/psycho_common.c
+++ b/arch/sparc/kernel/psycho_common.c
@@ -11,19 +11,19 @@
 #include "iommu_common.h"
 #include "psycho_common.h"
 
-#define  PSYCHO_STRBUF_CTRL_DENAB	0x0000000000000002UL
-#define  PSYCHO_STCERR_WRITE		0x0000000000000002UL
-#define  PSYCHO_STCERR_READ		0x0000000000000001UL
-#define  PSYCHO_STCTAG_PPN		0x0fffffff00000000UL
-#define  PSYCHO_STCTAG_VPN		0x00000000ffffe000UL
-#define  PSYCHO_STCTAG_VALID		0x0000000000000002UL
-#define  PSYCHO_STCTAG_WRITE		0x0000000000000001UL
-#define  PSYCHO_STCLINE_LINDX		0x0000000001e00000UL
-#define  PSYCHO_STCLINE_SPTR		0x00000000001f8000UL
-#define  PSYCHO_STCLINE_LADDR		0x0000000000007f00UL
-#define  PSYCHO_STCLINE_EPTR		0x00000000000000fcUL
-#define  PSYCHO_STCLINE_VALID		0x0000000000000002UL
-#define  PSYCHO_STCLINE_FOFN		0x0000000000000001UL
+#define  PSYCHO_STRBUF_CTRL_DENAB	0x0000000000000002ULL
+#define  PSYCHO_STCERR_WRITE		0x0000000000000002ULL
+#define  PSYCHO_STCERR_READ		0x0000000000000001ULL
+#define  PSYCHO_STCTAG_PPN		0x0fffffff00000000ULL
+#define  PSYCHO_STCTAG_VPN		0x00000000ffffe000ULL
+#define  PSYCHO_STCTAG_VALID		0x0000000000000002ULL
+#define  PSYCHO_STCTAG_WRITE		0x0000000000000001ULL
+#define  PSYCHO_STCLINE_LINDX		0x0000000001e00000ULL
+#define  PSYCHO_STCLINE_SPTR		0x00000000001f8000ULL
+#define  PSYCHO_STCLINE_LADDR		0x0000000000007f00ULL
+#define  PSYCHO_STCLINE_EPTR		0x00000000000000fcULL
+#define  PSYCHO_STCLINE_VALID		0x0000000000000002ULL
+#define  PSYCHO_STCLINE_FOFN		0x0000000000000001ULL
 
 static DEFINE_SPINLOCK(stc_buf_lock);
 static unsigned long stc_error_buf[128];
@@ -94,7 +94,7 @@
 		if (saw_error != 0) {
 			u64 tagval = stc_tag_buf[i];
 			u64 lineval = stc_line_buf[i];
-			printk(KERN_ERR "%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)"
+			printk(KERN_ERR "%s: STC_TAG(%d)[PA(%016llx)VA(%08llx)"
 			       "V(%d)W(%d)]\n",
 			       pbm->name,
 			       i,
@@ -102,8 +102,8 @@
 			       (tagval & PSYCHO_STCTAG_VPN),
 			       ((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0),
 			       ((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0));
-			printk(KERN_ERR "%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)"
-			       "LADDR(%lx)EP(%lx)V(%d)FOFN(%d)]\n",
+			printk(KERN_ERR "%s: STC_LINE(%d)[LIDX(%llx)SP(%llx)"
+			       "LADDR(%llx)EP(%llx)V(%d)FOFN(%d)]\n",
 			       pbm->name,
 			       i,
 			       ((lineval & PSYCHO_STCLINE_LINDX) >> 21UL),
@@ -144,10 +144,10 @@
 #define  PSYCHO_IOMMU_TAG_WRITE	 (0x1UL << 21UL)
 #define  PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL)
 #define  PSYCHO_IOMMU_TAG_SIZE	 (0x1UL << 19UL)
-#define  PSYCHO_IOMMU_TAG_VPAGE	 0x7ffffUL
+#define  PSYCHO_IOMMU_TAG_VPAGE	 0x7ffffULL
 #define  PSYCHO_IOMMU_DATA_VALID (1UL << 30UL)
 #define  PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL)
-#define  PSYCHO_IOMMU_DATA_PPAGE 0xfffffffUL
+#define  PSYCHO_IOMMU_DATA_PPAGE 0xfffffffULL
 
 static void psycho_dump_iommu_tags_and_data(struct pci_pbm_info *pbm,
 					    u64 *tag, u64 *data)
@@ -179,18 +179,18 @@
 		}
 
 		printk(KERN_ERR "%s: IOMMU TAG(%d)[error(%s) wr(%d) "
-		       "str(%d) sz(%dK) vpg(%08lx)]\n",
+		       "str(%d) sz(%dK) vpg(%08llx)]\n",
 		       pbm->name, i, type_str,
 		       ((tag_val & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0),
 		       ((tag_val & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0),
 		       ((tag_val & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8),
 		       (tag_val & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
 		printk(KERN_ERR "%s: IOMMU DATA(%d)[valid(%d) cache(%d) "
-		       "ppg(%016lx)]\n",
+		       "ppg(%016llx)]\n",
 		       pbm->name, i,
 		       ((data_val & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0),
 		       ((data_val & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0),
-		       (data_val & PSYCHO_IOMMU_DATA_PPAGE)<<IOMMU_PAGE_SHIFT);
+		       (data_val & PSYCHO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
 	}
 }
 
@@ -285,20 +285,20 @@
 	return ret;
 }
 
-#define  PSYCHO_PCIAFSR_PMA	0x8000000000000000UL
-#define  PSYCHO_PCIAFSR_PTA	0x4000000000000000UL
-#define  PSYCHO_PCIAFSR_PRTRY	0x2000000000000000UL
-#define  PSYCHO_PCIAFSR_PPERR	0x1000000000000000UL
-#define  PSYCHO_PCIAFSR_SMA	0x0800000000000000UL
-#define  PSYCHO_PCIAFSR_STA	0x0400000000000000UL
-#define  PSYCHO_PCIAFSR_SRTRY	0x0200000000000000UL
-#define  PSYCHO_PCIAFSR_SPERR	0x0100000000000000UL
-#define  PSYCHO_PCIAFSR_RESV1	0x00ff000000000000UL
-#define  PSYCHO_PCIAFSR_BMSK	0x0000ffff00000000UL
-#define  PSYCHO_PCIAFSR_BLK	0x0000000080000000UL
-#define  PSYCHO_PCIAFSR_RESV2	0x0000000040000000UL
-#define  PSYCHO_PCIAFSR_MID	0x000000003e000000UL
-#define  PSYCHO_PCIAFSR_RESV3	0x0000000001ffffffUL
+#define  PSYCHO_PCIAFSR_PMA	0x8000000000000000ULL
+#define  PSYCHO_PCIAFSR_PTA	0x4000000000000000ULL
+#define  PSYCHO_PCIAFSR_PRTRY	0x2000000000000000ULL
+#define  PSYCHO_PCIAFSR_PPERR	0x1000000000000000ULL
+#define  PSYCHO_PCIAFSR_SMA	0x0800000000000000ULL
+#define  PSYCHO_PCIAFSR_STA	0x0400000000000000ULL
+#define  PSYCHO_PCIAFSR_SRTRY	0x0200000000000000ULL
+#define  PSYCHO_PCIAFSR_SPERR	0x0100000000000000ULL
+#define  PSYCHO_PCIAFSR_RESV1	0x00ff000000000000ULL
+#define  PSYCHO_PCIAFSR_BMSK	0x0000ffff00000000ULL
+#define  PSYCHO_PCIAFSR_BLK	0x0000000080000000ULL
+#define  PSYCHO_PCIAFSR_RESV2	0x0000000040000000ULL
+#define  PSYCHO_PCIAFSR_MID	0x000000003e000000ULL
+#define  PSYCHO_PCIAFSR_RESV3	0x0000000001ffffffULL
 
 irqreturn_t psycho_pcierr_intr(int irq, void *dev_id)
 {
@@ -326,12 +326,12 @@
 		   "Excessive Retries" :
 		   ((error_bits & PSYCHO_PCIAFSR_PPERR) ?
 		    "Parity Error" : "???"))))));
-	printk(KERN_ERR "%s: bytemask[%04lx] UPA_MID[%02lx] was_block(%d)\n",
+	printk(KERN_ERR "%s: bytemask[%04llx] UPA_MID[%02llx] was_block(%d)\n",
 	       pbm->name,
 	       (afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL,
 	       (afsr & PSYCHO_PCIAFSR_MID) >> 25UL,
 	       (afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0);
-	printk(KERN_ERR "%s: PCI AFAR [%016lx]\n", pbm->name, afar);
+	printk(KERN_ERR "%s: PCI AFAR [%016llx]\n", pbm->name, afar);
 	printk(KERN_ERR "%s: PCI Secondary errors [", pbm->name);
 	reported = 0;
 	if (afsr & PSYCHO_PCIAFSR_SMA) {
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index 2ead310..406e087 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -117,6 +117,7 @@
 		val |= (1UL << 4UL);
 	upa_writeq(val, cfg_reg);
 }
+EXPORT_SYMBOL(sbus_set_sbus64);
 
 /* INO number to IMAP register offset for SYSIO external IRQ's.
  * This should conform to both Sunfire/Wildfire server and Fusion
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index c96c65d..998cadb 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -199,7 +199,9 @@
 extern int root_mountflags;
 
 char reboot_command[COMMAND_LINE_SIZE];
+
 enum sparc_cpu sparc_cpu_model;
+EXPORT_SYMBOL(sparc_cpu_model);
 
 struct tt_entry *sparc_ttable;
 
@@ -391,6 +393,7 @@
 
 	prom_cmdline();
 }
+EXPORT_SYMBOL(sun_do_break);
 
 int stop_a_enabled = 1;
 
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 555db74..49d061f 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -58,6 +58,7 @@
  * operations in asm/ns87303.h
  */
 DEFINE_SPINLOCK(ns87303_lock);
+EXPORT_SYMBOL(ns87303_lock);
 
 struct screen_info screen_info = {
 	0, 0,			/* orig-x, orig-y */
@@ -425,5 +426,7 @@
 
 	prom_cmdline();
 }
+EXPORT_SYMBOL(sun_do_break);
 
 int stop_a_enabled = 1;
+EXPORT_SYMBOL(stop_a_enabled);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 4632979..6cd1a5b 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -449,7 +449,7 @@
 	__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 			     : : "r" (pstate));
 	if (stuck == 0) {
-		printk("CPU[%d]: mondo stuckage result[%016lx]\n",
+		printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 		       smp_processor_id(), result);
 	} else {
 		udelay(2);
@@ -584,7 +584,7 @@
 			/* Busy bits will not clear, continue instead
 			 * of freezing up on this cpu.
 			 */
-			printk("CPU[%d]: mondo stuckage result[%016lx]\n",
+			printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 			       smp_processor_id(), dispatch_stat);
 		} else {
 			int i, this_busy_nack = 0;
diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c
index e1e9763..baeab87 100644
--- a/arch/sparc/kernel/sparc_ksyms_32.c
+++ b/arch/sparc/kernel/sparc_ksyms_32.c
@@ -5,49 +5,14 @@
  * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
  */
 
-/* Tell string.h we don't want memcpy etc. as cpp defines */
-#define EXPORT_SYMTAB_STROPS
-#define PROMLIB_INTERNAL
-
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/in6.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/syscalls.h>
-#ifdef CONFIG_PCI
-#include <linux/pci.h>
-#endif
-#include <linux/pm.h>
-#ifdef CONFIG_HIGHMEM
-#include <linux/highmem.h>
-#endif
 
-#include <asm/oplib.h>
-#include <asm/delay.h>
-#include <asm/system.h>
-#include <asm/auxio.h>
 #include <asm/pgtable.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/idprom.h>
-#include <asm/head.h>
-#include <asm/smp.h>
-#include <asm/ptrace.h>
 #include <asm/uaccess.h>
-#include <asm/checksum.h>
-#ifdef CONFIG_SBUS
+#include <asm/delay.h>
+#include <asm/head.h>
 #include <asm/dma.h>
-#endif
-#include <asm/io-unit.h>
-#include <asm/bug.h>
-
-extern spinlock_t rtc_lock;
 
 struct poll {
 	int fd;
@@ -55,72 +20,15 @@
 	short revents;
 };
 
-extern void (*__copy_1page)(void *, const void *);
-extern void __memmove(void *, const void *, __kernel_size_t);
-extern void (*bzero_1page)(void *);
-extern void *__bzero(void *, size_t);
-extern void *__memscan_zero(void *, size_t);
-extern void *__memscan_generic(void *, int, size_t);
-extern int __strncmp(const char *, const char *, __kernel_size_t);
-
-extern int __ashrdi3(int, int);
-extern int __ashldi3(int, int);
-extern int __lshrdi3(int, int);
-extern int __muldi3(int, int);
-extern int __divdi3(int, int);
-
-/* Private functions with odd calling conventions. */
-extern void ___atomic24_add(void);
-extern void ___atomic24_sub(void);
-extern void ___rw_read_enter(void);
-extern void ___rw_read_try(void);
-extern void ___rw_read_exit(void);
-extern void ___rw_write_enter(void);
-
-/* Alias functions whose names begin with "." and export the aliases.
- * The module references will be fixed up by module_frob_arch_sections.
- */
-extern int _Div(int, int);
-extern int _Mul(int, int);
-extern int _Rem(int, int);
-extern unsigned _Udiv(unsigned, unsigned);
-extern unsigned _Umul(unsigned, unsigned);
-extern unsigned _Urem(unsigned, unsigned);
-
-/* used by various drivers */
-EXPORT_SYMBOL(sparc_cpu_model);
-EXPORT_SYMBOL(kernel_thread);
-#ifdef CONFIG_SMP
-// XXX find what uses (or used) these.   AV: see asm/spinlock.h
-EXPORT_SYMBOL(___rw_read_enter);
-EXPORT_SYMBOL(___rw_read_try);
-EXPORT_SYMBOL(___rw_read_exit);
-EXPORT_SYMBOL(___rw_write_enter);
-#endif
-
-EXPORT_SYMBOL(sparc_valid_addr_bitmap);
-EXPORT_SYMBOL(phys_base);
-EXPORT_SYMBOL(pfn_base);
-
-/* Atomic operations. */
-EXPORT_SYMBOL(___atomic24_add);
-EXPORT_SYMBOL(___atomic24_sub);
-
-/* Per-CPU information table */
-EXPORT_PER_CPU_SYMBOL(__cpu_data);
-
-#ifdef CONFIG_SMP
-/* IRQ implementation. */
-EXPORT_SYMBOL(synchronize_irq);
-#endif
-
+/* from entry.S */
 EXPORT_SYMBOL(__udelay);
 EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(rtc_lock);
-EXPORT_SYMBOL(set_auxio);
-EXPORT_SYMBOL(get_auxio);
-EXPORT_SYMBOL(io_remap_pfn_range);
 
+/* from head_32.S */
+EXPORT_SYMBOL(__ret_efault);
+EXPORT_SYMBOL(empty_zero_page);
+
+/* Defined using magic */
 #ifndef CONFIG_SMP
 EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
 #else
@@ -132,122 +40,7 @@
 EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one));
 EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl));
 EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one));
-
 EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached));
 
-#ifdef CONFIG_SBUS
-EXPORT_SYMBOL(sbus_set_sbus64);
-#endif
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(insl);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(pci_alloc_consistent);
-EXPORT_SYMBOL(pci_free_consistent);
-EXPORT_SYMBOL(pci_map_single);
-EXPORT_SYMBOL(pci_unmap_single);
-EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
-EXPORT_SYMBOL(pci_dma_sync_single_for_device);
-EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
-EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
-EXPORT_SYMBOL(pci_map_sg);
-EXPORT_SYMBOL(pci_unmap_sg);
-EXPORT_SYMBOL(pci_map_page);
-EXPORT_SYMBOL(pci_unmap_page);
-/* Actually, ioremap/iounmap are not PCI specific. But it is ok for drivers. */
-EXPORT_SYMBOL(ioremap);
-EXPORT_SYMBOL(iounmap);
-#endif
-
-/* in arch/sparc/mm/highmem.c */
-#ifdef CONFIG_HIGHMEM
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic);
-#endif
-
-/* prom symbols */
-EXPORT_SYMBOL(idprom);
-EXPORT_SYMBOL(prom_root_node);
-EXPORT_SYMBOL(prom_getchild);
-EXPORT_SYMBOL(prom_getsibling);
-EXPORT_SYMBOL(prom_searchsiblings);
-EXPORT_SYMBOL(prom_firstprop);
-EXPORT_SYMBOL(prom_nextprop);
-EXPORT_SYMBOL(prom_getproplen);
-EXPORT_SYMBOL(prom_getproperty);
-EXPORT_SYMBOL(prom_node_has_property);
-EXPORT_SYMBOL(prom_setprop);
+/* Exporting a symbol from /init/main.c */
 EXPORT_SYMBOL(saved_command_line);
-EXPORT_SYMBOL(prom_apply_obio_ranges);
-EXPORT_SYMBOL(prom_feval);
-EXPORT_SYMBOL(prom_getbool);
-EXPORT_SYMBOL(prom_getstring);
-EXPORT_SYMBOL(prom_getint);
-EXPORT_SYMBOL(prom_getintdefault);
-EXPORT_SYMBOL(prom_finddevice);
-EXPORT_SYMBOL(romvec);
-EXPORT_SYMBOL(__prom_getchild);
-EXPORT_SYMBOL(__prom_getsibling);
-
-/* sparc library symbols */
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(page_kernel);
-
-/* Special internal versions of library functions. */
-EXPORT_SYMBOL(__copy_1page);
-EXPORT_SYMBOL(__memcpy);
-EXPORT_SYMBOL(__memset);
-EXPORT_SYMBOL(bzero_1page);
-EXPORT_SYMBOL(__bzero);
-EXPORT_SYMBOL(__memscan_zero);
-EXPORT_SYMBOL(__memscan_generic);
-EXPORT_SYMBOL(__strncmp);
-EXPORT_SYMBOL(__memmove);
-
-/* Moving data to/from userspace. */
-EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
-EXPORT_SYMBOL(csum_partial);
-
-/* Cache flushing.  */
-EXPORT_SYMBOL(sparc_flush_page_to_ram);
-
-/* For when serial stuff is built as modules. */
-EXPORT_SYMBOL(sun_do_break);
-
-EXPORT_SYMBOL(__ret_efault);
-
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__muldi3);
-EXPORT_SYMBOL(__divdi3);
-
-EXPORT_SYMBOL(_Rem);
-EXPORT_SYMBOL(_Urem);
-EXPORT_SYMBOL(_Mul);
-EXPORT_SYMBOL(_Umul);
-EXPORT_SYMBOL(_Div);
-EXPORT_SYMBOL(_Udiv);
-
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-EXPORT_SYMBOL(do_BUG);
-#endif
-
-/* Sun Power Management Idle Handler */
-EXPORT_SYMBOL(pm_idle);
-
-EXPORT_SYMBOL(empty_zero_page);
diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c
index 0133211..0f26066 100644
--- a/arch/sparc/kernel/sparc_ksyms_64.c
+++ b/arch/sparc/kernel/sparc_ksyms_64.c
@@ -5,50 +5,15 @@
  * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
  */
 
-/* Tell string.h we don't want memcpy etc. as cpp defines */
-#define EXPORT_SYMTAB_STROPS
-#define PROMLIB_INTERNAL
-
 #include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
 #include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/fs_struct.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/syscalls.h>
-#include <linux/percpu.h>
 #include <linux/init.h>
-#include <linux/rwsem.h>
-#include <net/compat.h>
 
-#include <asm/oplib.h>
 #include <asm/system.h>
-#include <asm/auxio.h>
-#include <asm/pgtable.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/idprom.h>
-#include <asm/elf.h>
-#include <asm/head.h>
-#include <asm/smp.h>
-#include <asm/ptrace.h>
-#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/fpumacro.h>
-#include <asm/pgalloc.h>
-#include <asm/cacheflush.h>
-#ifdef CONFIG_SBUS
-#include <asm/dma.h>
-#endif
-#include <asm/ns87303.h>
-#include <asm/timer.h>
 #include <asm/cpudata.h>
-#include <asm/ftrace.h>
+#include <asm/uaccess.h>
+#include <asm/spitfire.h>
+#include <asm/oplib.h>
 #include <asm/hypervisor.h>
 
 struct poll {
@@ -57,114 +22,24 @@
 	short revents;
 };
 
-extern void die_if_kernel(char *str, struct pt_regs *regs);
-extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-extern void *__bzero(void *, size_t);
-extern void *__memscan_zero(void *, size_t);
-extern void *__memscan_generic(void *, int, size_t);
-extern __kernel_size_t strlen(const char *);
-extern void sys_sigsuspend(void);
-extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
-extern int (*handle_mathemu)(struct pt_regs *, struct fpustate *);
-extern long sparc32_open(const char __user * filename, int flags, int mode);
-extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
-	unsigned long pfn, unsigned long size, pgprot_t prot);
-
-extern int __ashrdi3(int, int);
-
-extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
-
-extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
-		      unsigned long *);
-extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
-		      unsigned long *, unsigned long *);
-extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
-		      unsigned long *, unsigned long *, unsigned long *);
-
-extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
-			  unsigned long *);
-extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
-			  unsigned long *, unsigned long *);
-extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
-			  unsigned long *, unsigned long *, unsigned long *);
-
-/* Per-CPU information table */
-EXPORT_PER_CPU_SYMBOL(__cpu_data);
-
-/* used by various drivers */
-#ifdef CONFIG_SMP
-/* Out of line rw-locking implementation. */
-EXPORT_SYMBOL(__read_lock);
-EXPORT_SYMBOL(__read_unlock);
-EXPORT_SYMBOL(__write_lock);
-EXPORT_SYMBOL(__write_unlock);
-EXPORT_SYMBOL(__write_trylock);
-#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_MCOUNT
-EXPORT_SYMBOL(_mcount);
-#endif
-
-EXPORT_SYMBOL(sparc64_get_clock_tick);
-
-/* RW semaphores */
-EXPORT_SYMBOL(__down_read);
-EXPORT_SYMBOL(__down_read_trylock);
-EXPORT_SYMBOL(__down_write);
-EXPORT_SYMBOL(__down_write_trylock);
-EXPORT_SYMBOL(__up_read);
-EXPORT_SYMBOL(__up_write);
-EXPORT_SYMBOL(__downgrade_write);
-
-/* Atomic counter implementation. */
-EXPORT_SYMBOL(atomic_add);
-EXPORT_SYMBOL(atomic_add_ret);
-EXPORT_SYMBOL(atomic_sub);
-EXPORT_SYMBOL(atomic_sub_ret);
-EXPORT_SYMBOL(atomic64_add);
-EXPORT_SYMBOL(atomic64_add_ret);
-EXPORT_SYMBOL(atomic64_sub);
-EXPORT_SYMBOL(atomic64_sub_ret);
-
-/* Atomic bit operations. */
-EXPORT_SYMBOL(test_and_set_bit);
-EXPORT_SYMBOL(test_and_clear_bit);
-EXPORT_SYMBOL(test_and_change_bit);
-EXPORT_SYMBOL(set_bit);
-EXPORT_SYMBOL(clear_bit);
-EXPORT_SYMBOL(change_bit);
-
+/* from helpers.S */
 EXPORT_SYMBOL(__flushw_user);
+EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);
 
+/* from head_64.S */
+EXPORT_SYMBOL(__ret_efault);
 EXPORT_SYMBOL(tlb_type);
 EXPORT_SYMBOL(sun4v_chip_type);
-EXPORT_SYMBOL(get_fb_unmapped_area);
-EXPORT_SYMBOL(flush_icache_range);
+EXPORT_SYMBOL(prom_root_node);
 
-EXPORT_SYMBOL(flush_dcache_page);
-#ifdef DCACHE_ALIASING_POSSIBLE
-EXPORT_SYMBOL(__flush_dcache_range);
-#endif
-
+/* from hvcalls.S */
 EXPORT_SYMBOL(sun4v_niagara_getperf);
 EXPORT_SYMBOL(sun4v_niagara_setperf);
 EXPORT_SYMBOL(sun4v_niagara2_getperf);
 EXPORT_SYMBOL(sun4v_niagara2_setperf);
 
-EXPORT_SYMBOL(auxio_set_led);
-EXPORT_SYMBOL(auxio_set_lte);
-#ifdef CONFIG_SBUS
-EXPORT_SYMBOL(sbus_set_sbus64);
-#endif
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(insl);
 #ifdef CONFIG_PCI
+/* inline functions in asm/pci_64.h */
 EXPORT_SYMBOL(pci_alloc_consistent);
 EXPORT_SYMBOL(pci_free_consistent);
 EXPORT_SYMBOL(pci_map_single);
@@ -173,112 +48,7 @@
 EXPORT_SYMBOL(pci_unmap_sg);
 EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
 EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
-EXPORT_SYMBOL(pci_dma_supported);
 #endif
 
-/* I/O device mmaping on Sparc64. */
-EXPORT_SYMBOL(io_remap_pfn_range);
-
-EXPORT_SYMBOL(dump_fpu);
-
-/* math-emu wants this */
-EXPORT_SYMBOL(die_if_kernel);
-
-/* Kernel thread creation. */
-EXPORT_SYMBOL(kernel_thread);
-
-/* prom symbols */
-EXPORT_SYMBOL(idprom);
-EXPORT_SYMBOL(prom_root_node);
-EXPORT_SYMBOL(prom_getchild);
-EXPORT_SYMBOL(prom_getsibling);
-EXPORT_SYMBOL(prom_searchsiblings);
-EXPORT_SYMBOL(prom_firstprop);
-EXPORT_SYMBOL(prom_nextprop);
-EXPORT_SYMBOL(prom_getproplen);
-EXPORT_SYMBOL(prom_getproperty);
-EXPORT_SYMBOL(prom_node_has_property);
-EXPORT_SYMBOL(prom_setprop);
+/* Exporting a symbol from /init/main.c */
 EXPORT_SYMBOL(saved_command_line);
-EXPORT_SYMBOL(prom_finddevice);
-EXPORT_SYMBOL(prom_feval);
-EXPORT_SYMBOL(prom_getbool);
-EXPORT_SYMBOL(prom_getstring);
-EXPORT_SYMBOL(prom_getint);
-EXPORT_SYMBOL(prom_getintdefault);
-EXPORT_SYMBOL(__prom_getchild);
-EXPORT_SYMBOL(__prom_getsibling);
-
-/* sparc library symbols */
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(__strlen_user);
-EXPORT_SYMBOL(__strnlen_user);
-
-/* Special internal versions of library functions. */
-EXPORT_SYMBOL(_clear_page);
-EXPORT_SYMBOL(clear_user_page);
-EXPORT_SYMBOL(copy_user_page);
-EXPORT_SYMBOL(__bzero);
-EXPORT_SYMBOL(__memscan_zero);
-EXPORT_SYMBOL(__memscan_generic);
-EXPORT_SYMBOL(__memset);
-
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_partial_copy_from_user);
-EXPORT_SYMBOL(__csum_partial_copy_to_user);
-EXPORT_SYMBOL(ip_fast_csum);
-
-/* Moving data to/from/in userspace. */
-EXPORT_SYMBOL(___copy_to_user);
-EXPORT_SYMBOL(___copy_from_user);
-EXPORT_SYMBOL(___copy_in_user);
-EXPORT_SYMBOL(copy_to_user_fixup);
-EXPORT_SYMBOL(copy_from_user_fixup);
-EXPORT_SYMBOL(copy_in_user_fixup);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__clear_user);
-
-/* Various address conversion macros use this. */
-EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
-
-/* No version information on this, heavily used in inline asm,
- * and will always be 'void __ret_efault(void)'.
- */
-EXPORT_SYMBOL(__ret_efault);
-
-/* No version information on these, as gcc produces such symbols. */
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(strncmp);
-
-void VISenter(void);
-/* RAID code needs this */
-EXPORT_SYMBOL(VISenter);
-
-/* for input/keybdev */
-EXPORT_SYMBOL(sun_do_break);
-EXPORT_SYMBOL(stop_a_enabled);
-
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-EXPORT_SYMBOL(do_BUG);
-#endif
-
-/* for ns8703 */
-EXPORT_SYMBOL(ns87303_lock);
-
-EXPORT_SYMBOL(tick_ops);
-
-EXPORT_SYMBOL(xor_vis_2);
-EXPORT_SYMBOL(xor_vis_3);
-EXPORT_SYMBOL(xor_vis_4);
-EXPORT_SYMBOL(xor_vis_5);
-
-EXPORT_SYMBOL(xor_niagara_2);
-EXPORT_SYMBOL(xor_niagara_3);
-EXPORT_SYMBOL(xor_niagara_4);
-EXPORT_SYMBOL(xor_niagara_5);
-
-EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 16ab0cb..50afaed 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -60,7 +60,7 @@
 #define SMP_PRINTK(x)
 #endif
 
-static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
+static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val)
 {
 	__asm__ __volatile__("swap [%1], %0\n\t" :
 			     "=&r" (val), "=&r" (ptr) :
@@ -115,7 +115,7 @@
 	local_flush_tlb_all();
 
 	/* Allow master to continue. */
-	swap((unsigned long *)&cpu_callin_map[cpuid], 1);
+	sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
 	local_flush_cache_all();
 	local_flush_tlb_all();
 	
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 4f8d605..8040376 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -54,7 +54,8 @@
 #define SMP_PRINTK(x)
 #endif
 
-static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
+static inline unsigned long
+swap_ulong(volatile unsigned long *ptr, unsigned long val)
 {
 	__asm__ __volatile__("swap [%1], %0\n\t" :
 			     "=&r" (val), "=&r" (ptr) :
@@ -90,7 +91,7 @@
 	 * to call the scheduler code.
 	 */
 	/* Allow master to continue. */
-	swap(&cpu_callin_map[cpuid], 1);
+	swap_ulong(&cpu_callin_map[cpuid], 1);
 
 	/* XXX: What's up with all the flushes? */
 	local_flush_cache_all();
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 39749e3..09058fc 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -23,6 +23,7 @@
 #include <linux/ipc.h>
 #include <linux/personality.h>
 #include <linux/random.h>
+#include <linux/module.h>
 
 #include <asm/uaccess.h>
 #include <asm/utrap.h>
@@ -354,6 +355,7 @@
 
 	return addr;
 }
+EXPORT_SYMBOL(get_fb_unmapped_area);
 
 /* Essentially the same as PowerPC... */
 void arch_pick_mmap_layout(struct mm_struct *mm)
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 7a6786a..87f5a3b 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -20,7 +20,7 @@
 	 add	%sp, PTREGS_OFF, %o0
 
 	.align	32
-sys_pipe:
+sys_sparc_pipe:
 	ba,pt	%xcc, sparc_pipe
 	 add	%sp, PTREGS_OFF, %o0
 sys_nis_syscall:
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 7d08075..dccc95d 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -24,7 +24,7 @@
 /*25*/	.long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
 /*30*/	.long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
 /*35*/	.long sys_chown, sys_sync, sys_kill, sys_newstat, sys_sendfile
-/*40*/	.long sys_newlstat, sys_dup, sys_pipe, sys_times, sys_getuid
+/*40*/	.long sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_getuid
 /*45*/	.long sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
 /*50*/	.long sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, sys_ioctl
 /*55*/	.long sys_reboot, sys_mmap2, sys_symlink, sys_readlink, sys_execve
@@ -56,7 +56,7 @@
 /*185*/	.long sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname
 /*190*/	.long sys_init_module, sys_personality, sparc_remap_file_pages, sys_epoll_create, sys_epoll_ctl
 /*195*/	.long sys_epoll_wait, sys_ioprio_set, sys_getppid, sparc_sigaction, sys_sgetmask
-/*200*/	.long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, old_readdir
+/*200*/	.long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_old_readdir
 /*205*/	.long sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
 /*210*/	.long sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
 /*215*/	.long sys_ipc, sys_sigreturn, sys_clone, sys_ioprio_get, sys_adjtimex
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 9fc78cf..e6007bb3 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -26,7 +26,7 @@
 /*25*/	.word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause
 /*30*/	.word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
 	.word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile
-/*40*/	.word compat_sys_newlstat, sys_dup, sys_pipe, compat_sys_times, sys_getuid
+/*40*/	.word compat_sys_newlstat, sys_dup, sys_sparc_pipe, compat_sys_times, sys_getuid
 	.word sys32_umount, sys_setgid16, sys_getgid16, sys32_signal, sys_geteuid16
 /*50*/	.word sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl
 	.word sys32_reboot, sys32_mmap2, sys_symlink, sys32_readlink, sys32_execve
@@ -100,7 +100,7 @@
 /*25*/	.word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
 /*30*/	.word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
 	.word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64
-/*40*/	.word sys_newlstat, sys_dup, sys_pipe, sys_times, sys_nis_syscall
+/*40*/	.word sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_nis_syscall
 	.word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
 /*50*/	.word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl
 	.word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 00f7383..614ac7b 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -48,6 +48,8 @@
 #include "irq.h"
 
 DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
+
 static int set_rtc_mmss(unsigned long);
 static int sbus_do_settimeofday(struct timespec *tv);
 
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 9df8f09..2db3c22 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -106,7 +106,7 @@
 	tick_disable_irq();
 }
 
-static unsigned long tick_get_tick(void)
+static unsigned long long tick_get_tick(void)
 {
 	unsigned long ret;
 
@@ -176,6 +176,7 @@
 };
 
 struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
+EXPORT_SYMBOL(tick_ops);
 
 static void stick_disable_irq(void)
 {
@@ -208,7 +209,7 @@
 	stick_disable_irq();
 }
 
-static unsigned long stick_get_tick(void)
+static unsigned long long stick_get_tick(void)
 {
 	unsigned long ret;
 
@@ -352,7 +353,7 @@
 	hbtick_disable_irq();
 }
 
-static unsigned long hbtick_get_tick(void)
+static unsigned long long hbtick_get_tick(void)
 {
 	return __hbird_read_stick() & ~TICK_PRIV_BIT;
 }
@@ -422,7 +423,7 @@
 {
 	struct resource *r;
 
-	printk(KERN_INFO "%s: RTC regs at 0x%lx\n",
+	printk(KERN_INFO "%s: RTC regs at 0x%llx\n",
 	       op->node->full_name, op->resource[0].start);
 
 	/* The CMOS RTC driver only accepts IORESOURCE_IO, so cons
@@ -478,7 +479,7 @@
 static int __devinit bq4802_probe(struct of_device *op, const struct of_device_id *match)
 {
 
-	printk(KERN_INFO "%s: BQ4802 regs at 0x%lx\n",
+	printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n",
 	       op->node->full_name, op->resource[0].start);
 
 	rtc_bq4802_device.resource = &op->resource[0];
@@ -542,7 +543,7 @@
 	    strcmp(dp->parent->parent->name, "central") != 0)
 		return -ENODEV;
 
-	printk(KERN_INFO "%s: Mostek regs at 0x%lx\n",
+	printk(KERN_INFO "%s: Mostek regs at 0x%llx\n",
 	       dp->full_name, op->resource[0].start);
 
 	m48t59_rtc.resource = &op->resource[0];
@@ -639,6 +640,7 @@
 		return ft->clock_tick_ref;
 	return cpu_data(cpu).clock_tick;
 }
+EXPORT_SYMBOL(sparc64_get_clock_tick);
 
 #ifdef CONFIG_CPU_FREQ
 
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 213645b..3582833 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -424,6 +424,7 @@
         // bust_spinlocks(1);   XXX Not in our original BUG()
         printk("kernel BUG at %s:%d!\n", file, line);
 }
+EXPORT_SYMBOL(do_BUG);
 #endif
 
 /* Since we have our mappings set up, on multiprocessors we can spin them
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 4638af2..c2d153d 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -128,6 +128,7 @@
 	bust_spinlocks(1);
 	printk("kernel BUG at %s:%d!\n", file, line);
 }
+EXPORT_SYMBOL(do_BUG);
 #endif
 
 static DEFINE_SPINLOCK(dimm_handler_lock);
@@ -1168,20 +1169,20 @@
 	}
 
 	/* Now dump the cache snapshots. */
-	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
+	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
 	       (int) info->dcache_index,
 	       info->dcache_tag,
 	       info->dcache_utag,
 	       info->dcache_stag);
-	printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
+	printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
 	       info->dcache_data[0],
 	       info->dcache_data[1],
 	       info->dcache_data[2],
 	       info->dcache_data[3]);
-	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
-	       "u[%016lx] l[%016lx]\n",
+	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
+	       "u[%016llx] l[%016llx]\n",
 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
 	       (int) info->icache_index,
 	       info->icache_tag,
@@ -1189,22 +1190,22 @@
 	       info->icache_stag,
 	       info->icache_upper,
 	       info->icache_lower);
-	printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
+	printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
 	       info->icache_data[0],
 	       info->icache_data[1],
 	       info->icache_data[2],
 	       info->icache_data[3]);
-	printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
+	printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
 	       info->icache_data[4],
 	       info->icache_data[5],
 	       info->icache_data[6],
 	       info->icache_data[7]);
-	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
+	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
 	       (int) info->ecache_index, info->ecache_tag);
-	printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
+	printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
 	       info->ecache_data[0],
 	       info->ecache_data[1],
@@ -1794,7 +1795,7 @@
 	int cnt;
 
 	printk("%s: Reporting on cpu %d\n", pfx, cpu);
-	printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
+	printk("%s: err_handle[%llx] err_stick[%llx] err_type[%08x:%s]\n",
 	       pfx,
 	       ent->err_handle, ent->err_stick,
 	       ent->err_type,
@@ -1818,7 +1819,7 @@
 		"privileged" : ""),
 	       ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
 		"queue-full" : ""));
-	printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
+	printk("%s: err_raddr[%016llx] err_size[%u] err_cpu[%u]\n",
 	       pfx,
 	       ent->err_raddr, ent->err_size, ent->err_cpu);
 
@@ -2261,6 +2262,7 @@
 		do_exit(SIGKILL);
 	do_exit(SIGSEGV);
 }
+EXPORT_SYMBOL(die_if_kernel);
 
 #define VIS_OPCODE_MASK	((0x3 << 30) | (0x3f << 19))
 #define VIS_OPCODE_VAL	((0x2 << 30) | (0x36 << 19))
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 203ddfa..3792099 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -589,7 +589,6 @@
 	unsigned long pc = regs->tpc;
 	unsigned long tstate = regs->tstate;
 	u32 insn;
-	u32 first, second;
 	u64 value;
 	u8 freg;
 	int flag;
@@ -601,15 +600,20 @@
 		pc = (u32)pc;
 	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
 		int asi = decode_asi(insn, regs);
+		u32 first, second;
+		int err;
+
 		if ((asi > ASI_SNFL) ||
 		    (asi < ASI_P))
 			goto daex;
-		if (get_user(first, (u32 __user *)sfar) ||
-		     get_user(second, (u32 __user *)(sfar + 4))) {
-			if (asi & 0x2) /* NF */ {
-				first = 0; second = 0;
-			} else
+		first = second = 0;
+		err = get_user(first, (u32 __user *)sfar);
+		if (!err)
+			err = get_user(second, (u32 __user *)(sfar + 4));
+		if (err) {
+			if (!(asi & 0x2))
 				goto daex;
+			first = second = 0;
 		}
 		save_and_clear_fpu();
 		freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 92b1f8e..753d128 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -263,10 +263,10 @@
 		dev_set_name(&vdev->dev, "%s", bus_id_name);
 		vdev->dev_no = ~(u64)0;
 	} else if (!cfg_handle) {
-		dev_set_name(&vdev->dev, "%s-%lu", bus_id_name, *id);
+		dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
 		vdev->dev_no = *id;
 	} else {
-		dev_set_name(&vdev->dev, "%s-%lu-%lu", bus_id_name,
+		dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
 			     *cfg_handle, *id);
 		vdev->dev_no = *cfg_handle;
 	}
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index 708fa17..aa6ac70 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -337,8 +337,10 @@
 	viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n",
 	       pkt->major, pkt->minor, pkt->dev_class);
 
-	if ((pkt->major == 0 && pkt->minor == 0) ||
-	    !(nver = find_by_major(vio, pkt->major)))
+	if (pkt->major == 0 && pkt->minor == 0)
+		return handshake_failure(vio);
+	nver = find_by_major(vio, pkt->major);
+	if (!nver)
 		return handshake_failure(vio);
 
 	if (send_version(vio, nver->major, nver->minor) < 0)
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 375016e..273fc85 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -42,3 +42,4 @@
 
 obj-y                 += iomap.o
 obj-$(CONFIG_SPARC32) += atomic32.o
+obj-y                 += ksyms.o
diff --git a/arch/sparc/lib/PeeCeeI.c b/arch/sparc/lib/PeeCeeI.c
index 46053e6..6529f86 100644
--- a/arch/sparc/lib/PeeCeeI.c
+++ b/arch/sparc/lib/PeeCeeI.c
@@ -4,6 +4,8 @@
  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  */
 
+#include <linux/module.h>
+
 #include <asm/io.h>
 #include <asm/byteorder.h>
 
@@ -15,6 +17,7 @@
 	while (count--)
 		outb(*p++, addr);
 }
+EXPORT_SYMBOL(outsb);
 
 void outsw(unsigned long __addr, const void *src, unsigned long count)
 {
@@ -25,6 +28,7 @@
 		src += sizeof(u16);
 	}
 }
+EXPORT_SYMBOL(outsw);
 
 void outsl(unsigned long __addr, const void *src, unsigned long count)
 {
@@ -78,6 +82,7 @@
 		break;
 	}
 }
+EXPORT_SYMBOL(outsl);
 
 void insb(unsigned long __addr, void *dst, unsigned long count)
 {
@@ -105,6 +110,7 @@
 			*pb++ = inb(addr);
 	}
 }
+EXPORT_SYMBOL(insb);
 
 void insw(unsigned long __addr, void *dst, unsigned long count)
 {
@@ -132,6 +138,7 @@
 			*ps = le16_to_cpu(inw(addr));
 	}
 }
+EXPORT_SYMBOL(insw);
 
 void insl(unsigned long __addr, void *dst, unsigned long count)
 {
@@ -200,4 +207,5 @@
 		}
 	}
 }
+EXPORT_SYMBOL(insl);
 
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
new file mode 100644
index 0000000..704b126
--- /dev/null
+++ b/arch/sparc/lib/ksyms.c
@@ -0,0 +1,196 @@
+/*
+ * Export of symbols defined in assembler
+ */
+
+/* Tell string.h we don't want memcpy etc. as cpp defines */
+#define EXPORT_SYMTAB_STROPS
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/checksum.h>
+#include <asm/uaccess.h>
+#include <asm/ftrace.h>
+
+/* string functions */
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(__strlen_user);
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(strncmp);
+
+/* mem* functions */
+extern void *__memscan_zero(void *, size_t);
+extern void *__memscan_generic(void *, int, size_t);
+extern void *__bzero(void *, size_t);
+
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(__memscan_zero);
+EXPORT_SYMBOL(__memscan_generic);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__bzero);
+
+/* Moving data to/from/in userspace. */
+EXPORT_SYMBOL(__strncpy_from_user);
+
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial);
+
+#ifdef CONFIG_MCOUNT
+EXPORT_SYMBOL(_mcount);
+#endif
+
+/*
+ * sparc
+ */
+#ifdef CONFIG_SPARC32
+extern int __ashrdi3(int, int);
+extern int __ashldi3(int, int);
+extern int __lshrdi3(int, int);
+extern int __muldi3(int, int);
+extern int __divdi3(int, int);
+
+extern void (*__copy_1page)(void *, const void *);
+extern void (*bzero_1page)(void *);
+
+extern int __strncmp(const char *, const char *, __kernel_size_t);
+
+extern void ___rw_read_enter(void);
+extern void ___rw_read_try(void);
+extern void ___rw_read_exit(void);
+extern void ___rw_write_enter(void);
+extern void ___atomic24_add(void);
+extern void ___atomic24_sub(void);
+
+/* Alias functions whose names begin with "." and export the aliases.
+ * The module references will be fixed up by module_frob_arch_sections.
+ */
+extern int _Div(int, int);
+extern int _Mul(int, int);
+extern int _Rem(int, int);
+extern unsigned _Udiv(unsigned, unsigned);
+extern unsigned _Umul(unsigned, unsigned);
+extern unsigned _Urem(unsigned, unsigned);
+
+/* Networking helper routines. */
+EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
+
+/* Special internal versions of library functions. */
+EXPORT_SYMBOL(__copy_1page);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
+EXPORT_SYMBOL(bzero_1page);
+
+/* string functions */
+EXPORT_SYMBOL(__strncmp);
+
+/* Moving data to/from/in userspace. */
+EXPORT_SYMBOL(__copy_user);
+
+/* Used by asm/spinlock.h */
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(___rw_read_enter);
+EXPORT_SYMBOL(___rw_read_try);
+EXPORT_SYMBOL(___rw_read_exit);
+EXPORT_SYMBOL(___rw_write_enter);
+#endif
+
+/* Atomic operations. */
+EXPORT_SYMBOL(___atomic24_add);
+EXPORT_SYMBOL(___atomic24_sub);
+
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__divdi3);
+
+EXPORT_SYMBOL(_Rem);
+EXPORT_SYMBOL(_Urem);
+EXPORT_SYMBOL(_Mul);
+EXPORT_SYMBOL(_Umul);
+EXPORT_SYMBOL(_Div);
+EXPORT_SYMBOL(_Udiv);
+#endif
+
+/*
+ * sparc64
+ */
+#ifdef CONFIG_SPARC64
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_partial_copy_from_user);
+EXPORT_SYMBOL(__csum_partial_copy_to_user);
+EXPORT_SYMBOL(ip_fast_csum);
+
+/* Moving data to/from/in userspace. */
+EXPORT_SYMBOL(___copy_to_user);
+EXPORT_SYMBOL(___copy_from_user);
+EXPORT_SYMBOL(___copy_in_user);
+EXPORT_SYMBOL(__clear_user);
+
+/* RW semaphores */
+EXPORT_SYMBOL(__down_read);
+EXPORT_SYMBOL(__down_read_trylock);
+EXPORT_SYMBOL(__down_write);
+EXPORT_SYMBOL(__down_write_trylock);
+EXPORT_SYMBOL(__up_read);
+EXPORT_SYMBOL(__up_write);
+EXPORT_SYMBOL(__downgrade_write);
+
+/* Atomic counter implementation. */
+EXPORT_SYMBOL(atomic_add);
+EXPORT_SYMBOL(atomic_add_ret);
+EXPORT_SYMBOL(atomic_sub);
+EXPORT_SYMBOL(atomic_sub_ret);
+EXPORT_SYMBOL(atomic64_add);
+EXPORT_SYMBOL(atomic64_add_ret);
+EXPORT_SYMBOL(atomic64_sub);
+EXPORT_SYMBOL(atomic64_sub_ret);
+
+/* Atomic bit operations. */
+EXPORT_SYMBOL(test_and_set_bit);
+EXPORT_SYMBOL(test_and_clear_bit);
+EXPORT_SYMBOL(test_and_change_bit);
+EXPORT_SYMBOL(set_bit);
+EXPORT_SYMBOL(clear_bit);
+EXPORT_SYMBOL(change_bit);
+
+/* Special internal versions of library functions. */
+EXPORT_SYMBOL(_clear_page);
+EXPORT_SYMBOL(clear_user_page);
+EXPORT_SYMBOL(copy_user_page);
+
+/* RAID code needs this */
+void VISenter(void);
+EXPORT_SYMBOL(VISenter);
+
+extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
+		unsigned long *);
+extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
+		unsigned long *, unsigned long *);
+extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
+		unsigned long *, unsigned long *, unsigned long *);
+EXPORT_SYMBOL(xor_vis_2);
+EXPORT_SYMBOL(xor_vis_3);
+EXPORT_SYMBOL(xor_vis_4);
+EXPORT_SYMBOL(xor_vis_5);
+
+extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
+		unsigned long *);
+extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
+		unsigned long *, unsigned long *);
+extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
+		unsigned long *, unsigned long *, unsigned long *);
+
+EXPORT_SYMBOL(xor_niagara_2);
+EXPORT_SYMBOL(xor_niagara_3);
+EXPORT_SYMBOL(xor_niagara_4);
+EXPORT_SYMBOL(xor_niagara_5);
+#endif
diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c
index 05a361b..ac96ae2 100644
--- a/arch/sparc/lib/user_fixup.c
+++ b/arch/sparc/lib/user_fixup.c
@@ -7,6 +7,8 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/errno.h>
+#include <linux/module.h>
+
 #include <asm/uaccess.h>
 
 /* Calculating the exact fault address when using
@@ -40,6 +42,7 @@
 
 	return size;
 }
+EXPORT_SYMBOL(copy_from_user_fixup);
 
 unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
 {
@@ -47,6 +50,7 @@
 
 	return compute_size((unsigned long) to, size, &offset);
 }
+EXPORT_SYMBOL(copy_to_user_fixup);
 
 unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
 {
@@ -64,3 +68,4 @@
 
 	return size;
 }
+EXPORT_SYMBOL(copy_in_user_fixup);
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index a507e11..12e447f 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -283,7 +283,8 @@
 	/* Is this in ex_table? */
 no_context:
 	g2 = regs->u_regs[UREG_G2];
-	if (!from_user && (fixup = search_extables_range(regs->pc, &g2))) {
+	if (!from_user) {
+		fixup = search_extables_range(regs->pc, &g2);
 		if (fixup > 10) { /* Values below are reserved for other things */
 			extern const unsigned __memset_start[];
 			extern const unsigned __memset_end[];
diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
index a289261..5edcac1 100644
--- a/arch/sparc/mm/generic_32.c
+++ b/arch/sparc/mm/generic_32.c
@@ -95,3 +95,4 @@
 	flush_tlb_range(vma, beg, end);
 	return error;
 }
+EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
index f362c20..04f2bf4 100644
--- a/arch/sparc/mm/generic_64.c
+++ b/arch/sparc/mm/generic_64.c
@@ -161,3 +161,4 @@
 	flush_tlb_range(vma, beg, end);
 	return error;
 }
+EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 01fc6c2..752d0c9 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -62,6 +62,7 @@
 
 	return (void*) vaddr;
 }
+EXPORT_SYMBOL(kmap_atomic);
 
 void kunmap_atomic(void *kvaddr, enum km_type type)
 {
@@ -98,6 +99,7 @@
 
 	pagefault_enable();
 }
+EXPORT_SYMBOL(kunmap_atomic);
 
 /* We may be fed a pagetable here by ptep_to_xxx and others. */
 struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index fec9260..cbb282d 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -38,11 +38,16 @@
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
 unsigned long *sparc_valid_addr_bitmap;
+EXPORT_SYMBOL(sparc_valid_addr_bitmap);
 
 unsigned long phys_base;
+EXPORT_SYMBOL(phys_base);
+
 unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
 
 unsigned long page_kernel;
+EXPORT_SYMBOL(page_kernel);
 
 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
 unsigned long sparc_unmapped_base;
@@ -522,3 +527,4 @@
 	if (vaddr)
 		__flush_page_to_ram(vaddr);
 }
+EXPORT_SYMBOL(sparc_flush_page_to_ram);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 6ea73da..00373ce 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -146,6 +146,7 @@
 }
 
 unsigned long *sparc64_valid_addr_bitmap __read_mostly;
+EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
 
 /* Kernel physical address base and size in bytes.  */
 unsigned long kern_base __read_mostly;
@@ -258,21 +259,16 @@
 unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
 unsigned long _PAGE_SZBITS __read_mostly;
 
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+static void flush_dcache(unsigned long pfn)
 {
-	struct mm_struct *mm;
-	struct tsb *tsb;
-	unsigned long tag, flags;
-	unsigned long tsb_index, tsb_hash_shift;
+	struct page *page;
 
-	if (tlb_type != hypervisor) {
-		unsigned long pfn = pte_pfn(pte);
+	page = pfn_to_page(pfn);
+	if (page && page_mapping(page)) {
 		unsigned long pg_flags;
-		struct page *page;
 
-		if (pfn_valid(pfn) &&
-		    (page = pfn_to_page(pfn), page_mapping(page)) &&
-		    ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
+		pg_flags = page->flags;
+		if (pg_flags & (1UL << PG_dcache_dirty)) {
 			int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
 				   PG_dcache_cpu_mask);
 			int this_cpu = get_cpu();
@@ -290,6 +286,21 @@
 			put_cpu();
 		}
 	}
+}
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+	struct mm_struct *mm;
+	struct tsb *tsb;
+	unsigned long tag, flags;
+	unsigned long tsb_index, tsb_hash_shift;
+
+	if (tlb_type != hypervisor) {
+		unsigned long pfn = pte_pfn(pte);
+
+		if (pfn_valid(pfn))
+			flush_dcache(pfn);
+	}
 
 	mm = vma->vm_mm;
 
@@ -359,6 +370,7 @@
 out:
 	put_cpu();
 }
+EXPORT_SYMBOL(flush_dcache_page);
 
 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
 {
@@ -386,6 +398,7 @@
 		}
 	}
 }
+EXPORT_SYMBOL(flush_icache_range);
 
 void mmu_info(struct seq_file *m)
 {
@@ -589,6 +602,7 @@
 					       "i" (ASI_DCACHE_INVALIDATE));
 	}
 }
+EXPORT_SYMBOL(__flush_dcache_range);
 
 /* get_new_mmu_context() uses "cache + 1".  */
 DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -769,8 +783,8 @@
 	return -1;
 }
 
-static unsigned long nid_range(unsigned long start, unsigned long end,
-			       int *nid)
+static unsigned long long nid_range(unsigned long long start,
+				    unsigned long long end, int *nid)
 {
 	*nid = find_node(start);
 	start += PAGE_SIZE;
@@ -788,8 +802,8 @@
 	return start;
 }
 #else
-static unsigned long nid_range(unsigned long start, unsigned long end,
-			       int *nid)
+static unsigned long long nid_range(unsigned long long start,
+				    unsigned long long end, int *nid)
 {
 	*nid = 0;
 	return end;
@@ -1016,8 +1030,8 @@
 		val = mdesc_get_property(md, node, "address-mask", NULL);
 		m->mask = *val;
 
-		numadbg("MLGROUP[%d]: node[%lx] latency[%lx] "
-			"match[%lx] mask[%lx]\n",
+		numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
+			"match[%llx] mask[%llx]\n",
 			count - 1, m->node, m->latency, m->match, m->mask);
 	}
 
@@ -1056,7 +1070,7 @@
 					 "address-congruence-offset", NULL);
 		m->offset = *val;
 
-		numadbg("MBLOCK[%d]: base[%lx] size[%lx] offset[%lx]\n",
+		numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
 			count - 1, m->base, m->size, m->offset);
 	}
 
@@ -1127,7 +1141,7 @@
 	n->mask = candidate->mask;
 	n->val = candidate->match;
 
-	numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%lx])\n",
+	numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
 		index, n->mask, n->val, candidate->latency);
 
 	return 0;
diff --git a/arch/sparc/prom/init_32.c b/arch/sparc/prom/init_32.c
index 873217c..6193c33 100644
--- a/arch/sparc/prom/init_32.c
+++ b/arch/sparc/prom/init_32.c
@@ -8,16 +8,20 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/module.h>
 
 #include <asm/openprom.h>
 #include <asm/oplib.h>
 
 struct linux_romvec *romvec;
+EXPORT_SYMBOL(romvec);
+
 enum prom_major_version prom_vers;
 unsigned int prom_rev, prom_prev;
 
 /* The root node of the prom device tree. */
 int prom_root_node;
+EXPORT_SYMBOL(prom_root_node);
 
 /* Pointer to the device tree operations structure. */
 struct linux_nodeops *prom_nodeops;
diff --git a/arch/sparc/prom/misc_32.c b/arch/sparc/prom/misc_32.c
index cf6c3f6..4d61c54 100644
--- a/arch/sparc/prom/misc_32.c
+++ b/arch/sparc/prom/misc_32.c
@@ -8,6 +8,8 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
+#include <linux/module.h>
+
 #include <asm/openprom.h>
 #include <asm/oplib.h>
 #include <asm/auxio.h>
@@ -44,6 +46,7 @@
 	restore_current();
 	spin_unlock_irqrestore(&prom_lock, flags);
 }
+EXPORT_SYMBOL(prom_feval);
 
 /* Drop into the prom, with the chance to continue with the 'go'
  * prom command.
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c
index 9b0c076..eedffb4 100644
--- a/arch/sparc/prom/misc_64.c
+++ b/arch/sparc/prom/misc_64.c
@@ -11,6 +11,8 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
+#include <linux/module.h>
+
 #include <asm/openprom.h>
 #include <asm/oplib.h>
 #include <asm/system.h>
@@ -54,6 +56,7 @@
 	p1275_cmd("interpret", P1275_ARG(0, P1275_ARG_IN_STRING) |
 		  P1275_INOUT(1, 1), fstring);
 }
+EXPORT_SYMBOL(prom_feval);
 
 #ifdef CONFIG_SMP
 extern void smp_capture(void);
diff --git a/arch/sparc/prom/ranges.c b/arch/sparc/prom/ranges.c
index 64579a3..cd57908 100644
--- a/arch/sparc/prom/ranges.c
+++ b/arch/sparc/prom/ranges.c
@@ -6,6 +6,8 @@
  */
 
 #include <linux/init.h>
+#include <linux/module.h>
+
 #include <asm/openprom.h>
 #include <asm/oplib.h>
 #include <asm/types.h>
@@ -62,6 +64,7 @@
 	if(num_obio_ranges)
 		prom_adjust_regs(regs, nregs, promlib_obio_ranges, num_obio_ranges);
 }
+EXPORT_SYMBOL(prom_apply_obio_ranges);
 
 void __init prom_ranges_init(void)
 {
diff --git a/arch/sparc/prom/tree_32.c b/arch/sparc/prom/tree_32.c
index 6d81873..646d244 100644
--- a/arch/sparc/prom/tree_32.c
+++ b/arch/sparc/prom/tree_32.c
@@ -5,13 +5,12 @@
  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  */
 
-#define PROMLIB_INTERNAL
-
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/ctype.h>
+#include <linux/module.h>
 
 #include <asm/openprom.h>
 #include <asm/oplib.h>
@@ -50,6 +49,7 @@
 
 	return cnode;
 }
+EXPORT_SYMBOL(prom_getchild);
 
 /* Internal version of prom_getsibling that does not alter return values. */
 int __prom_getsibling(int node)
@@ -81,6 +81,7 @@
 
 	return sibnode;
 }
+EXPORT_SYMBOL(prom_getsibling);
 
 /* Return the length in bytes of property 'prop' at node 'node'.
  * Return -1 on error.
@@ -99,6 +100,7 @@
 	spin_unlock_irqrestore(&prom_lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL(prom_getproplen);
 
 /* Acquire a property 'prop' at node 'node' and place it in
  * 'buffer' which has a size of 'bufsize'.  If the acquisition
@@ -119,6 +121,7 @@
 	spin_unlock_irqrestore(&prom_lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL(prom_getproperty);
 
 /* Acquire an integer property and return its value.  Returns -1
  * on failure.
@@ -132,6 +135,7 @@
 
 	return -1;
 }
+EXPORT_SYMBOL(prom_getint);
 
 /* Acquire an integer property, upon error return the passed default
  * integer.
@@ -145,6 +149,7 @@
 
 	return retval;
 }
+EXPORT_SYMBOL(prom_getintdefault);
 
 /* Acquire a boolean property, 1=TRUE 0=FALSE. */
 int prom_getbool(int node, char *prop)
@@ -155,6 +160,7 @@
 	if(retval == -1) return 0;
 	return 1;
 }
+EXPORT_SYMBOL(prom_getbool);
 
 /* Acquire a property whose value is a string, returns a null
  * string on error.  The char pointer is the user supplied string
@@ -169,6 +175,7 @@
 	user_buf[0] = 0;
 	return;
 }
+EXPORT_SYMBOL(prom_getstring);
 
 
 /* Does the device at node 'node' have name 'name'?
@@ -204,6 +211,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(prom_searchsiblings);
 
 /* Interal version of nextprop that does not alter return values. */
 char * __prom_nextprop(int node, char * oprop)
@@ -228,6 +236,7 @@
 
 	return __prom_nextprop(node, "");
 }
+EXPORT_SYMBOL(prom_firstprop);
 
 /* Return the property type string after property type 'oprop'
  * at node 'node' .  Returns empty string if no more
@@ -240,6 +249,7 @@
 
 	return __prom_nextprop(node, oprop);
 }
+EXPORT_SYMBOL(prom_nextprop);
 
 int prom_finddevice(char *name)
 {
@@ -287,6 +297,7 @@
 	}
 	return node;
 }
+EXPORT_SYMBOL(prom_finddevice);
 
 int prom_node_has_property(int node, char *prop)
 {
@@ -299,6 +310,7 @@
 	} while (*current_property);
 	return 0;
 }
+EXPORT_SYMBOL(prom_node_has_property);
 
 /* Set property 'pname' at node 'node' to value 'value' which has a length
  * of 'size' bytes.  Return the number of bytes the prom accepted.
@@ -316,6 +328,7 @@
 	spin_unlock_irqrestore(&prom_lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL(prom_setprop);
 
 int prom_inst2pkg(int inst)
 {
diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c
index 281aea4..8ea73dd 100644
--- a/arch/sparc/prom/tree_64.c
+++ b/arch/sparc/prom/tree_64.c
@@ -10,6 +10,7 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
+#include <linux/module.h>
 
 #include <asm/openprom.h>
 #include <asm/oplib.h>
@@ -32,6 +33,7 @@
 	if(cnode == -1) return 0;
 	return (int)cnode;
 }
+EXPORT_SYMBOL(prom_getchild);
 
 inline int prom_getparent(int node)
 {
@@ -63,6 +65,7 @@
 
 	return sibnode;
 }
+EXPORT_SYMBOL(prom_getsibling);
 
 /* Return the length in bytes of property 'prop' at node 'node'.
  * Return -1 on error.
@@ -75,6 +78,7 @@
 			  P1275_INOUT(2, 1), 
 			  node, prop);
 }
+EXPORT_SYMBOL(prom_getproplen);
 
 /* Acquire a property 'prop' at node 'node' and place it in
  * 'buffer' which has a size of 'bufsize'.  If the acquisition
@@ -97,6 +101,7 @@
 				 node, prop, buffer, P1275_SIZE(plen));
 	}
 }
+EXPORT_SYMBOL(prom_getproperty);
 
 /* Acquire an integer property and return its value.  Returns -1
  * on failure.
@@ -110,6 +115,7 @@
 
 	return -1;
 }
+EXPORT_SYMBOL(prom_getint);
 
 /* Acquire an integer property, upon error return the passed default
  * integer.
@@ -124,6 +130,7 @@
 
 	return retval;
 }
+EXPORT_SYMBOL(prom_getintdefault);
 
 /* Acquire a boolean property, 1=TRUE 0=FALSE. */
 int prom_getbool(int node, const char *prop)
@@ -134,6 +141,7 @@
 	if(retval == -1) return 0;
 	return 1;
 }
+EXPORT_SYMBOL(prom_getbool);
 
 /* Acquire a property whose value is a string, returns a null
  * string on error.  The char pointer is the user supplied string
@@ -148,7 +156,7 @@
 	user_buf[0] = 0;
 	return;
 }
-
+EXPORT_SYMBOL(prom_getstring);
 
 /* Does the device at node 'node' have name 'name'?
  * YES = 1   NO = 0
@@ -181,6 +189,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(prom_searchsiblings);
 
 /* Return the first property type for node 'node'.
  * buffer should be at least 32B in length
@@ -194,6 +203,7 @@
 			       node, (char *) 0x0, buffer);
 	return buffer;
 }
+EXPORT_SYMBOL(prom_firstprop);
 
 /* Return the property type string after property type 'oprop'
  * at node 'node' .  Returns NULL string if no more
@@ -217,6 +227,7 @@
 				    node, oprop, buffer); 
 	return buffer;
 }
+EXPORT_SYMBOL(prom_nextprop);
 
 int
 prom_finddevice(const char *name)
@@ -228,6 +239,7 @@
 			 P1275_INOUT(1, 1), 
 			 name);
 }
+EXPORT_SYMBOL(prom_finddevice);
 
 int prom_node_has_property(int node, const char *prop)
 {
@@ -241,7 +253,8 @@
 	} while (*buf);
 	return 0;
 }
-                                                                                           
+EXPORT_SYMBOL(prom_node_has_property);
+
 /* Set property 'pname' at node 'node' to value 'value' which has a length
  * of 'size' bytes.  Return the number of bytes the prom accepted.
  */
@@ -264,6 +277,7 @@
 					  P1275_INOUT(4, 1), 
 					  node, pname, value, P1275_SIZE(size));
 }
+EXPORT_SYMBOL(prom_setprop);
 
 inline int prom_inst2pkg(int inst)
 {
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 862adb9..73f7fe8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -27,6 +27,7 @@
 	select HAVE_IOREMAP_PROT
 	select HAVE_KPROBES
 	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select ARCH_WANT_FRAME_POINTERS
 	select HAVE_KRETPROBES
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_DYNAMIC_FTRACE
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index a9f8a81..4a8e80c 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -22,4 +22,3 @@
 unifdef-y += unistd_64.h
 unifdef-y += vm86.h
 unifdef-y += vsyscall.h
-unifdef-y += swab.h
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 9fa9dcd..e02a359 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -300,7 +300,7 @@
 	return oldbit;
 }
 
-static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
+static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
 {
 	return ((1UL << (nr % BITS_PER_LONG)) &
 		(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/asm/byteorder.h
index 7c49917..b13a7a8 100644
--- a/arch/x86/include/asm/byteorder.h
+++ b/arch/x86/include/asm/byteorder.h
@@ -1,7 +1,6 @@
 #ifndef _ASM_X86_BYTEORDER_H
 #define _ASM_X86_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/little_endian.h>
 
 #endif /* _ASM_X86_BYTEORDER_H */
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
index bc53d5e..c58b9cc 100644
--- a/arch/x86/include/asm/es7000/apic.h
+++ b/arch/x86/include/asm/es7000/apic.h
@@ -1,6 +1,8 @@
 #ifndef __ASM_ES7000_APIC_H
 #define __ASM_ES7000_APIC_H
 
+#include <linux/gfp.h>
+
 #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
 #define esr_disable (1)
 
diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h
index ed5a3ca..c1629b0 100644
--- a/arch/x86/include/asm/es7000/mpparse.h
+++ b/arch/x86/include/asm/es7000/mpparse.h
@@ -10,8 +10,7 @@
 
 #ifndef CONFIG_X86_GENERICARCH
 extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
-extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
-				char *productid);
+extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
 #endif
 
 #ifdef CONFIG_ACPI
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
index 746f37a..2c05b73 100644
--- a/arch/x86/include/asm/genapic_32.h
+++ b/arch/x86/include/asm/genapic_32.h
@@ -15,9 +15,9 @@
  * Copyright 2003 Andi Kleen, SuSE Labs.
  */
 
-struct mpc_config_bus;
-struct mp_config_table;
-struct mpc_config_processor;
+struct mpc_bus;
+struct mpc_table;
+struct mpc_cpu;
 
 struct genapic {
 	char *name;
@@ -51,7 +51,7 @@
 	/* When one of the next two hooks returns 1 the genapic
 	   is switched to this. Essentially they are additional probe
 	   functions. */
-	int (*mps_oem_check)(struct mp_config_table *mpc, char *oem,
+	int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
 			     char *productid);
 	int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
 
diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h
index 8c1ea212..c70a263 100644
--- a/arch/x86/include/asm/mach-default/mach_mpparse.h
+++ b/arch/x86/include/asm/mach-default/mach_mpparse.h
@@ -1,8 +1,8 @@
 #ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
 #define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
 
-static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 
-		char *productid)
+static inline int
+mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
 {
 	return 0;
 }
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
index ceb0136..89897a6 100644
--- a/arch/x86/include/asm/mach-default/mach_wakecpu.h
+++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h
@@ -24,7 +24,13 @@
 {
 }
 
+#ifdef CONFIG_SMP
 extern void __inquire_remote_apic(int apicid);
+#else /* CONFIG_SMP */
+static inline void __inquire_remote_apic(int apicid)
+{
+}
+#endif /* CONFIG_SMP */
 
 static inline void inquire_remote_apic(int apicid)
 {
diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h
index 048f1d4..9444ab8 100644
--- a/arch/x86/include/asm/mach-generic/mach_mpparse.h
+++ b/arch/x86/include/asm/mach-generic/mach_mpparse.h
@@ -2,9 +2,8 @@
 #define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
 
 
-extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
-			 char *productid);
+extern int mps_oem_check(struct mpc_table *, char *, char *);
 
-extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
+extern int acpi_madt_oem_check(char *, char *);
 
 #endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h
index bbab5cc..3bc4072 100644
--- a/arch/x86/include/asm/mach-generic/mach_mpspec.h
+++ b/arch/x86/include/asm/mach-generic/mach_mpspec.h
@@ -7,6 +7,6 @@
 /* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
 #define MAX_MP_BUSSES 260
 
-extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
-				char *productid);
+extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
+
 #endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index e3ace7d..59568bc 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -39,17 +39,17 @@
 
 #define MPC_SIGNATURE "PCMP"
 
-struct mp_config_table {
-	char mpc_signature[4];
-	unsigned short mpc_length;	/* Size of table */
-	char mpc_spec;			/* 0x01 */
-	char mpc_checksum;
-	char mpc_oem[8];
-	char mpc_productid[12];
-	unsigned int mpc_oemptr;	/* 0 if not present */
-	unsigned short mpc_oemsize;	/* 0 if not present */
-	unsigned short mpc_oemcount;
-	unsigned int mpc_lapic;	/* APIC address */
+struct mpc_table {
+	char signature[4];
+	unsigned short length;		/* Size of table */
+	char spec;			/* 0x01 */
+	char checksum;
+	char oem[8];
+	char productid[12];
+	unsigned int oemptr;		/* 0 if not present */
+	unsigned short oemsize;		/* 0 if not present */
+	unsigned short oemcount;
+	unsigned int lapic;		/* APIC address */
 	unsigned int reserved;
 };
 
@@ -70,20 +70,20 @@
 #define CPU_MODEL_MASK		0x00F0
 #define CPU_FAMILY_MASK		0x0F00
 
-struct mpc_config_processor {
-	unsigned char mpc_type;
-	unsigned char mpc_apicid;	/* Local APIC number */
-	unsigned char mpc_apicver;	/* Its versions */
-	unsigned char mpc_cpuflag;
-	unsigned int mpc_cpufeature;
-	unsigned int mpc_featureflag;	/* CPUID feature value */
-	unsigned int mpc_reserved[2];
+struct mpc_cpu {
+	unsigned char type;
+	unsigned char apicid;		/* Local APIC number */
+	unsigned char apicver;		/* Its versions */
+	unsigned char cpuflag;
+	unsigned int cpufeature;
+	unsigned int featureflag;	/* CPUID feature value */
+	unsigned int reserved[2];
 };
 
-struct mpc_config_bus {
-	unsigned char mpc_type;
-	unsigned char mpc_busid;
-	unsigned char mpc_bustype[6];
+struct mpc_bus {
+	unsigned char type;
+	unsigned char busid;
+	unsigned char bustype[6];
 };
 
 /* List of Bus Type string values, Intel MP Spec. */
@@ -108,22 +108,22 @@
 
 #define MPC_APIC_USABLE		0x01
 
-struct mpc_config_ioapic {
-	unsigned char mpc_type;
-	unsigned char mpc_apicid;
-	unsigned char mpc_apicver;
-	unsigned char mpc_flags;
-	unsigned int mpc_apicaddr;
+struct mpc_ioapic {
+	unsigned char type;
+	unsigned char apicid;
+	unsigned char apicver;
+	unsigned char flags;
+	unsigned int apicaddr;
 };
 
-struct mpc_config_intsrc {
-	unsigned char mpc_type;
-	unsigned char mpc_irqtype;
-	unsigned short mpc_irqflag;
-	unsigned char mpc_srcbus;
-	unsigned char mpc_srcbusirq;
-	unsigned char mpc_dstapic;
-	unsigned char mpc_dstirq;
+struct mpc_intsrc {
+	unsigned char type;
+	unsigned char irqtype;
+	unsigned short irqflag;
+	unsigned char srcbus;
+	unsigned char srcbusirq;
+	unsigned char dstapic;
+	unsigned char dstirq;
 };
 
 enum mp_irq_source_types {
@@ -139,24 +139,24 @@
 
 #define MP_APIC_ALL	0xFF
 
-struct mpc_config_lintsrc {
-	unsigned char mpc_type;
-	unsigned char mpc_irqtype;
-	unsigned short mpc_irqflag;
-	unsigned char mpc_srcbusid;
-	unsigned char mpc_srcbusirq;
-	unsigned char mpc_destapic;
-	unsigned char mpc_destapiclint;
+struct mpc_lintsrc {
+	unsigned char type;
+	unsigned char irqtype;
+	unsigned short irqflag;
+	unsigned char srcbusid;
+	unsigned char srcbusirq;
+	unsigned char destapic;
+	unsigned char destapiclint;
 };
 
 #define MPC_OEM_SIGNATURE "_OEM"
 
-struct mp_config_oemtable {
-	char oem_signature[4];
-	unsigned short oem_length;	/* Size of table */
-	char  oem_rev;			/* 0x01 */
-	char  oem_checksum;
-	char  mpc_oem[8];
+struct mpc_oemtable {
+	char signature[4];
+	unsigned short length;		/* Size of table */
+	char  rev;			/* 0x01 */
+	char  checksum;
+	char  mpc[8];
 };
 
 /*
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index cb988aa..14080d2 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -58,15 +58,15 @@
 #endif /* !__i386__ */
 
 struct mtrr_var_range {
-	u32 base_lo;
-	u32 base_hi;
-	u32 mask_lo;
-	u32 mask_hi;
+	__u32 base_lo;
+	__u32 base_hi;
+	__u32 mask_lo;
+	__u32 mask_hi;
 };
 
 /* In the Intel processor's MTRR interface, the MTRR type is always held in
    an 8 bit field: */
-typedef u8 mtrr_type;
+typedef __u8 mtrr_type;
 
 #define MTRR_NUM_FIXED_RANGES 88
 #define MTRR_MAX_VAR_RANGES 256
diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h
index 252292e..a2eeefc 100644
--- a/arch/x86/include/asm/numaq/mpparse.h
+++ b/arch/x86/include/asm/numaq/mpparse.h
@@ -1,7 +1,6 @@
 #ifndef __ASM_NUMAQ_MPPARSE_H
 #define __ASM_NUMAQ_MPPARSE_H
 
-extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
-				char *productid);
+extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
 
 #endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 83e69f4..06bbcbd 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -341,6 +341,25 @@
 
 #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
 
+static inline int is_new_memtype_allowed(unsigned long flags,
+						unsigned long new_flags)
+{
+	/*
+	 * Certain new memtypes are not allowed with certain
+	 * requested memtype:
+	 * - request is uncached, return cannot be write-back
+	 * - request is write-combine, return cannot be write-back
+	 */
+	if ((flags == _PAGE_CACHE_UC_MINUS &&
+	     new_flags == _PAGE_CACHE_WB) ||
+	    (flags == _PAGE_CACHE_WC &&
+	     new_flags == _PAGE_CACHE_WB)) {
+		return 0;
+	}
+
+	return 1;
+}
+
 #ifndef __ASSEMBLY__
 /* Indicate that x86 has its own track and untrack pfn vma functions */
 #define __HAVE_PFNMAP_TRACKING
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 4fcd53f..ebe858c 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -25,9 +25,9 @@
 /*
  * Any setup quirks to be performed?
  */
-struct mpc_config_processor;
-struct mpc_config_bus;
-struct mp_config_oemtable;
+struct mpc_cpu;
+struct mpc_bus;
+struct mpc_oemtable;
 struct x86_quirks {
 	int (*arch_pre_time_init)(void);
 	int (*arch_time_init)(void);
@@ -39,10 +39,10 @@
 	int (*mach_find_smp_config)(unsigned int reserve);
 
 	int *mpc_record;
-	int (*mpc_apic_id)(struct mpc_config_processor *m);
-	void (*mpc_oem_bus_info)(struct mpc_config_bus *m, char *name);
-	void (*mpc_oem_pci_bus)(struct mpc_config_bus *m);
-	void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable,
+	int (*mpc_apic_id)(struct mpc_cpu *m);
+	void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
+	void (*mpc_oem_pci_bus)(struct mpc_bus *m);
+	void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
                                     unsigned short oemsize);
 	int (*setup_ioapic_ids)(void);
 	int (*update_genapic)(void);
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 830b9fc..19953df 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -18,9 +18,26 @@
 #include <asm/pda.h>
 #include <asm/thread_info.h>
 
+#ifdef CONFIG_X86_64
+
+extern cpumask_var_t cpu_callin_mask;
+extern cpumask_var_t cpu_callout_mask;
+extern cpumask_var_t cpu_initialized_mask;
+extern cpumask_var_t cpu_sibling_setup_mask;
+
+#else /* CONFIG_X86_32 */
+
+extern cpumask_t cpu_callin_map;
 extern cpumask_t cpu_callout_map;
 extern cpumask_t cpu_initialized;
-extern cpumask_t cpu_callin_map;
+extern cpumask_t cpu_sibling_setup_map;
+
+#define cpu_callin_mask		((struct cpumask *)&cpu_callin_map)
+#define cpu_callout_mask	((struct cpumask *)&cpu_callout_map)
+#define cpu_initialized_mask	((struct cpumask *)&cpu_initialized)
+#define cpu_sibling_setup_mask	((struct cpumask *)&cpu_sibling_setup_map)
+
+#endif /* CONFIG_X86_32 */
 
 extern void (*mtrr_hook)(void);
 extern void zap_low_mappings(void);
@@ -29,7 +46,6 @@
 
 extern int smp_num_siblings;
 extern unsigned int num_processors;
-extern cpumask_t cpu_initialized;
 
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 DECLARE_PER_CPU(cpumask_t, cpu_core_map);
@@ -38,6 +54,16 @@
 DECLARE_PER_CPU(int, cpu_number);
 #endif
 
+static inline struct cpumask *cpu_sibling_mask(int cpu)
+{
+	return &per_cpu(cpu_sibling_map, cpu);
+}
+
+static inline struct cpumask *cpu_core_mask(int cpu)
+{
+	return &per_cpu(cpu_core_map, cpu);
+}
+
 DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
 
@@ -149,7 +175,7 @@
 /* We don't mark CPUs online until __cpu_up(), so we need another measure */
 static inline int num_booting_cpus(void)
 {
-	return cpus_weight(cpu_callout_map);
+	return cpumask_weight(cpu_callout_mask);
 }
 #else
 static inline void prefill_possible_map(void)
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
index 4bb5fb3..93d2c86 100644
--- a/arch/x86/include/asm/summit/apic.h
+++ b/arch/x86/include/asm/summit/apic.h
@@ -2,6 +2,7 @@
 #define __ASM_SUMMIT_APIC_H
 
 #include <asm/smp.h>
+#include <linux/gfp.h>
 
 #define esr_disable (1)
 #define NO_BALANCE_IRQ (0)
diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h
index 013ce6f..380e86c 100644
--- a/arch/x86/include/asm/summit/mpparse.h
+++ b/arch/x86/include/asm/summit/mpparse.h
@@ -11,7 +11,7 @@
 #define setup_summit()	{}
 #endif
 
-static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
+static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
 		char *productid)
 {
 	if (!strncmp(oem, "IBM ENSW", 8) &&
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 29dc0c8..d37593c 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -47,7 +47,7 @@
 #endif
 
 static int __initdata acpi_force = 0;
-
+u32 acpi_rsdt_forced;
 #ifdef	CONFIG_ACPI
 int acpi_disabled = 0;
 #else
@@ -1374,6 +1374,17 @@
 			       "Invalid BIOS MADT, disabling ACPI\n");
 			disable_acpi();
 		}
+	} else {
+		/*
+ 		 * ACPI found no MADT, and so ACPI wants UP PIC mode.
+ 		 * In the event an MPS table was found, forget it.
+ 		 * Boot with "acpi=off" to use MPS on such a system.
+ 		 */
+		if (smp_found_config) {
+			printk(KERN_WARNING PREFIX
+				"No APIC-table, disabling MPS\n");
+			smp_found_config = 0;
+		}
 	}
 
 	/*
@@ -1809,6 +1820,10 @@
 			disable_acpi();
 		acpi_ht = 1;
 	}
+	/* acpi=rsdt use RSDT instead of XSDT */
+	else if (strcmp(arg, "rsdt") == 0) {
+		acpi_rsdt_forced = 1;
+	}
 	/* "acpi=noirq" disables ACPI interrupt routing */
 	else if (strcmp(arg, "noirq") == 0) {
 		acpi_noirq_set();
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index c2502eb..bbbe4bb 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -56,6 +56,7 @@
 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
 
 #define MWAIT_SUBSTATE_MASK	(0xf)
+#define MWAIT_CSTATE_MASK	(0xf)
 #define MWAIT_SUBSTATE_SIZE	(4)
 
 #define CPUID_MWAIT_LEAF (5)
@@ -66,39 +67,20 @@
 
 #define NATIVE_CSTATE_BEYOND_HALT	(2)
 
-int acpi_processor_ffh_cstate_probe(unsigned int cpu,
-		struct acpi_processor_cx *cx, struct acpi_power_register *reg)
+static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
 {
-	struct cstate_entry *percpu_entry;
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-	cpumask_t saved_mask;
-	int retval;
+	struct acpi_processor_cx *cx = _cx;
+	long retval;
 	unsigned int eax, ebx, ecx, edx;
 	unsigned int edx_part;
 	unsigned int cstate_type; /* C-state type and not ACPI C-state type */
 	unsigned int num_cstate_subtype;
 
-	if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF )
-		return -1;
-
-	if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
-		return -1;
-
-	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
-	percpu_entry->states[cx->index].eax = 0;
-	percpu_entry->states[cx->index].ecx = 0;
-
-	/* Make sure we are running on right CPU */
-	saved_mask = current->cpus_allowed;
-	retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
-	if (retval)
-		return -1;
-
 	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
 
 	/* Check whether this particular cx_type (in CST) is supported or not */
-	cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1;
+	cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) &
+			MWAIT_CSTATE_MASK) + 1;
 	edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
 	num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
 
@@ -114,21 +96,45 @@
 		retval = -1;
 		goto out;
 	}
-	percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
-
-	/* Use the hint in CST */
-	percpu_entry->states[cx->index].eax = cx->address;
 
 	if (!mwait_supported[cstate_type]) {
 		mwait_supported[cstate_type] = 1;
-		printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d "
-		       "state\n", cx->type);
+		printk(KERN_DEBUG
+			"Monitor-Mwait will be used to enter C-%d "
+			"state\n", cx->type);
 	}
-	snprintf(cx->desc, ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
-		 cx->address);
-
+	snprintf(cx->desc,
+			ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
+			cx->address);
 out:
-	set_cpus_allowed_ptr(current, &saved_mask);
+	return retval;
+}
+
+int acpi_processor_ffh_cstate_probe(unsigned int cpu,
+		struct acpi_processor_cx *cx, struct acpi_power_register *reg)
+{
+	struct cstate_entry *percpu_entry;
+	struct cpuinfo_x86 *c = &cpu_data(cpu);
+	long retval;
+
+	if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
+		return -1;
+
+	if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
+		return -1;
+
+	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
+	percpu_entry->states[cx->index].eax = 0;
+	percpu_entry->states[cx->index].ecx = 0;
+
+	/* Make sure we are running on right CPU */
+
+	retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx);
+	if (retval == 0) {
+		/* Use the hint in CST */
+		percpu_entry->states[cx->index].eax = cx->address;
+		percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
+	}
 	return retval;
 }
 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 806b4e9..707c1f6 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -159,6 +159,8 @@
 #endif
 		if (strncmp(str, "old_ordering", 12) == 0)
 			acpi_old_suspend_ordering();
+		if (strncmp(str, "s4_nonvs", 8) == 0)
+			acpi_s4_no_nvs();
 		str = strchr(str, ',');
 		if (str != NULL)
 			str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index b13d3c4..0f830e4 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -31,9 +31,11 @@
 #include <linux/dmi.h>
 #include <linux/dmar.h>
 #include <linux/ftrace.h>
+#include <linux/smp.h>
+#include <linux/nmi.h>
+#include <linux/timex.h>
 
 #include <asm/atomic.h>
-#include <asm/smp.h>
 #include <asm/mtrr.h>
 #include <asm/mpspec.h>
 #include <asm/desc.h>
@@ -41,12 +43,11 @@
 #include <asm/hpet.h>
 #include <asm/pgalloc.h>
 #include <asm/i8253.h>
-#include <asm/nmi.h>
 #include <asm/idle.h>
 #include <asm/proto.h>
-#include <asm/timex.h>
 #include <asm/apic.h>
 #include <asm/i8259.h>
+#include <asm/smp.h>
 
 #include <mach_apic.h>
 #include <mach_apicdef.h>
@@ -687,7 +688,7 @@
 		local_irq_enable();
 
 	if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
-		pr_warning("APIC timer disabled due to verification failure.\n");
+		pr_warning("APIC timer disabled due to verification failure\n");
 			return -1;
 	}
 
@@ -2087,14 +2088,12 @@
 		/* are we being called early in kernel startup? */
 		if (bios_cpu_apicid) {
 			id = bios_cpu_apicid[i];
-		}
-		else if (i < nr_cpu_ids) {
+		} else if (i < nr_cpu_ids) {
 			if (cpu_present(i))
 				id = per_cpu(x86_bios_cpu_apicid, i);
 			else
 				continue;
-		}
-		else
+		} else
 			break;
 
 		if (id != BAD_APICID)
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 3a26525..98807bb 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -160,9 +160,9 @@
  *         Work around byte swap bug in one of the Vaio's BIOS's
  *         (Marc Boucher <marc@mbsi.ca>).
  *         Exposed the disable flag to dmi so that we can handle known
- *         broken APM (Alan Cox <alan@redhat.com>).
+ *         broken APM (Alan Cox <alan@lxorguk.ukuu.org.uk>).
  *   1.14ac: If the BIOS says "I slowed the CPU down" then don't spin
- *         calling it - instead idle. (Alan Cox <alan@redhat.com>)
+ *         calling it - instead idle. (Alan Cox <alan@lxorguk.ukuu.org.uk>)
  *         If an APM idle fails log it and idle sensibly
  *   1.15: Don't queue events to clients who open the device O_WRONLY.
  *         Don't expect replies from clients who open the device O_RDONLY.
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3f95a40..83492b1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -40,6 +40,26 @@
 
 #include "cpu.h"
 
+#ifdef CONFIG_X86_64
+
+/* all of these masks are initialized in setup_cpu_local_masks() */
+cpumask_var_t cpu_callin_mask;
+cpumask_var_t cpu_callout_mask;
+cpumask_var_t cpu_initialized_mask;
+
+/* representing cpus for which sibling maps can be computed */
+cpumask_var_t cpu_sibling_setup_mask;
+
+#else /* CONFIG_X86_32 */
+
+cpumask_t cpu_callin_map;
+cpumask_t cpu_callout_map;
+cpumask_t cpu_initialized;
+cpumask_t cpu_sibling_setup_map;
+
+#endif /* CONFIG_X86_32 */
+
+
 static struct cpu_dev *this_cpu __cpuinitdata;
 
 #ifdef CONFIG_X86_64
@@ -856,8 +876,6 @@
 }
 __setup("clearcpuid=", setup_disablecpuid);
 
-cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
-
 #ifdef CONFIG_X86_64
 struct x8664_pda **_cpu_pda __read_mostly;
 EXPORT_SYMBOL(_cpu_pda);
@@ -976,7 +994,7 @@
 
 	me = current;
 
-	if (cpu_test_and_set(cpu, cpu_initialized))
+	if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
 		panic("CPU#%d already initialized!\n", cpu);
 
 	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@ -1085,7 +1103,7 @@
 	struct tss_struct *t = &per_cpu(init_tss, cpu);
 	struct thread_struct *thread = &curr->thread;
 
-	if (cpu_test_and_set(cpu, cpu_initialized)) {
+	if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
 		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
 		for (;;) local_irq_enable();
 	}
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 28102ad..6f11e02 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -145,7 +145,7 @@
 
 struct drv_cmd {
 	unsigned int type;
-	cpumask_t mask;
+	cpumask_var_t mask;
 	drv_addr_union addr;
 	u32 val;
 };
@@ -193,7 +193,7 @@
 	cpumask_t saved_mask = current->cpus_allowed;
 	cmd->val = 0;
 
-	set_cpus_allowed_ptr(current, &cmd->mask);
+	set_cpus_allowed_ptr(current, cmd->mask);
 	do_drv_read(cmd);
 	set_cpus_allowed_ptr(current, &saved_mask);
 }
@@ -203,8 +203,8 @@
 	cpumask_t saved_mask = current->cpus_allowed;
 	unsigned int i;
 
-	for_each_cpu_mask_nr(i, cmd->mask) {
-		set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
+	for_each_cpu(i, cmd->mask) {
+		set_cpus_allowed_ptr(current, cpumask_of(i));
 		do_drv_write(cmd);
 	}
 
@@ -212,22 +212,22 @@
 	return;
 }
 
-static u32 get_cur_val(const cpumask_t *mask)
+static u32 get_cur_val(const struct cpumask *mask)
 {
 	struct acpi_processor_performance *perf;
 	struct drv_cmd cmd;
 
-	if (unlikely(cpus_empty(*mask)))
+	if (unlikely(cpumask_empty(mask)))
 		return 0;
 
-	switch (per_cpu(drv_data, first_cpu(*mask))->cpu_feature) {
+	switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) {
 	case SYSTEM_INTEL_MSR_CAPABLE:
 		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
 		cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
 		break;
 	case SYSTEM_IO_CAPABLE:
 		cmd.type = SYSTEM_IO_CAPABLE;
-		perf = per_cpu(drv_data, first_cpu(*mask))->acpi_data;
+		perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data;
 		cmd.addr.io.port = perf->control_register.address;
 		cmd.addr.io.bit_width = perf->control_register.bit_width;
 		break;
@@ -235,7 +235,7 @@
 		return 0;
 	}
 
-	cmd.mask = *mask;
+	cpumask_copy(cmd.mask, mask);
 
 	drv_read(&cmd);
 
@@ -244,6 +244,30 @@
 	return cmd.val;
 }
 
+struct perf_cur {
+	union {
+		struct {
+			u32 lo;
+			u32 hi;
+		} split;
+		u64 whole;
+	} aperf_cur, mperf_cur;
+};
+
+
+static long read_measured_perf_ctrs(void *_cur)
+{
+	struct perf_cur *cur = _cur;
+
+	rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi);
+	rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi);
+
+	wrmsr(MSR_IA32_APERF, 0, 0);
+	wrmsr(MSR_IA32_MPERF, 0, 0);
+
+	return 0;
+}
+
 /*
  * Return the measured active (C0) frequency on this CPU since last call
  * to this function.
@@ -260,31 +284,12 @@
 static unsigned int get_measured_perf(struct cpufreq_policy *policy,
 				      unsigned int cpu)
 {
-	union {
-		struct {
-			u32 lo;
-			u32 hi;
-		} split;
-		u64 whole;
-	} aperf_cur, mperf_cur;
-
-	cpumask_t saved_mask;
+	struct perf_cur cur;
 	unsigned int perf_percent;
 	unsigned int retval;
 
-	saved_mask = current->cpus_allowed;
-	set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
-	if (get_cpu() != cpu) {
-		/* We were not able to run on requested processor */
-		put_cpu();
+	if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur))
 		return 0;
-	}
-
-	rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
-	rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
-
-	wrmsr(MSR_IA32_APERF, 0,0);
-	wrmsr(MSR_IA32_MPERF, 0,0);
 
 #ifdef __i386__
 	/*
@@ -292,37 +297,39 @@
 	 * Get an approximate value. Return failure in case we cannot get
 	 * an approximate value.
 	 */
-	if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) {
+	if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) {
 		int shift_count;
 		u32 h;
 
-		h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi);
+		h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi);
 		shift_count = fls(h);
 
-		aperf_cur.whole >>= shift_count;
-		mperf_cur.whole >>= shift_count;
+		cur.aperf_cur.whole >>= shift_count;
+		cur.mperf_cur.whole >>= shift_count;
 	}
 
-	if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) {
+	if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) {
 		int shift_count = 7;
-		aperf_cur.split.lo >>= shift_count;
-		mperf_cur.split.lo >>= shift_count;
+		cur.aperf_cur.split.lo >>= shift_count;
+		cur.mperf_cur.split.lo >>= shift_count;
 	}
 
-	if (aperf_cur.split.lo && mperf_cur.split.lo)
-		perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo;
+	if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo)
+		perf_percent = (cur.aperf_cur.split.lo * 100) /
+				cur.mperf_cur.split.lo;
 	else
 		perf_percent = 0;
 
 #else
-	if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) {
+	if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) {
 		int shift_count = 7;
-		aperf_cur.whole >>= shift_count;
-		mperf_cur.whole >>= shift_count;
+		cur.aperf_cur.whole >>= shift_count;
+		cur.mperf_cur.whole >>= shift_count;
 	}
 
-	if (aperf_cur.whole && mperf_cur.whole)
-		perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole;
+	if (cur.aperf_cur.whole && cur.mperf_cur.whole)
+		perf_percent = (cur.aperf_cur.whole * 100) /
+				cur.mperf_cur.whole;
 	else
 		perf_percent = 0;
 
@@ -330,10 +337,6 @@
 
 	retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100;
 
-	put_cpu();
-	set_cpus_allowed_ptr(current, &saved_mask);
-
-	dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
 	return retval;
 }
 
@@ -351,7 +354,7 @@
 	}
 
 	cached_freq = data->freq_table[data->acpi_data->state].frequency;
-	freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
+	freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
 	if (freq != cached_freq) {
 		/*
 		 * The dreaded BIOS frequency change behind our back.
@@ -386,7 +389,6 @@
 	struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
 	struct acpi_processor_performance *perf;
 	struct cpufreq_freqs freqs;
-	cpumask_t online_policy_cpus;
 	struct drv_cmd cmd;
 	unsigned int next_state = 0; /* Index into freq_table */
 	unsigned int next_perf_state = 0; /* Index into perf table */
@@ -401,20 +403,18 @@
 		return -ENODEV;
 	}
 
+	if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL)))
+		return -ENOMEM;
+
 	perf = data->acpi_data;
 	result = cpufreq_frequency_table_target(policy,
 						data->freq_table,
 						target_freq,
 						relation, &next_state);
-	if (unlikely(result))
-		return -ENODEV;
-
-#ifdef CONFIG_HOTPLUG_CPU
-	/* cpufreq holds the hotplug lock, so we are safe from here on */
-	cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
-#else
-	online_policy_cpus = policy->cpus;
-#endif
+	if (unlikely(result)) {
+		result = -ENODEV;
+		goto out;
+	}
 
 	next_perf_state = data->freq_table[next_state].index;
 	if (perf->state == next_perf_state) {
@@ -425,7 +425,7 @@
 		} else {
 			dprintk("Already at target state (P%d)\n",
 				next_perf_state);
-			return 0;
+			goto out;
 		}
 	}
 
@@ -444,19 +444,19 @@
 		cmd.val = (u32) perf->states[next_perf_state].control;
 		break;
 	default:
-		return -ENODEV;
+		result = -ENODEV;
+		goto out;
 	}
 
-	cpus_clear(cmd.mask);
-
+	/* cpufreq holds the hotplug lock, so we are safe from here on */
 	if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
-		cmd.mask = online_policy_cpus;
+		cpumask_and(cmd.mask, cpu_online_mask, policy->cpus);
 	else
-		cpu_set(policy->cpu, cmd.mask);
+		cpumask_copy(cmd.mask, cpumask_of(policy->cpu));
 
 	freqs.old = perf->states[perf->state].core_frequency * 1000;
 	freqs.new = data->freq_table[next_state].frequency;
-	for_each_cpu_mask_nr(i, cmd.mask) {
+	for_each_cpu(i, cmd.mask) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 	}
@@ -464,19 +464,22 @@
 	drv_write(&cmd);
 
 	if (acpi_pstate_strict) {
-		if (!check_freqs(&cmd.mask, freqs.new, data)) {
+		if (!check_freqs(cmd.mask, freqs.new, data)) {
 			dprintk("acpi_cpufreq_target failed (%d)\n",
 				policy->cpu);
-			return -EAGAIN;
+			result = -EAGAIN;
+			goto out;
 		}
 	}
 
-	for_each_cpu_mask_nr(i, cmd.mask) {
+	for_each_cpu(i, cmd.mask) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 	}
 	perf->state = next_perf_state;
 
+out:
+	free_cpumask_var(cmd.mask);
 	return result;
 }
 
@@ -626,15 +629,15 @@
 	 */
 	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
 	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
-		cpumask_copy(&policy->cpus, perf->shared_cpu_map);
+		cpumask_copy(policy->cpus, perf->shared_cpu_map);
 	}
-	cpumask_copy(&policy->related_cpus, perf->shared_cpu_map);
+	cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
 
 #ifdef CONFIG_SMP
 	dmi_check_system(sw_any_bug_dmi_table);
-	if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) {
+	if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
 		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
-		policy->cpus = per_cpu(cpu_core_map, cpu);
+		cpumask_copy(policy->cpus, cpu_core_mask(cpu));
 	}
 #endif
 
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index b046185..a4cff5d 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -982,7 +982,7 @@
 	case 10:
 		printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
 	default:
-		;;
+		;
 	}
 
 	return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index beea446..b585e04c 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -122,7 +122,7 @@
 		return 0;
 
 	/* notifiers */
-	for_each_cpu_mask_nr(i, policy->cpus) {
+	for_each_cpu(i, policy->cpus) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 	}
@@ -130,11 +130,11 @@
 	/* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
 	 * Developer's Manual, Volume 3
 	 */
-	for_each_cpu_mask_nr(i, policy->cpus)
+	for_each_cpu(i, policy->cpus)
 		cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
 
 	/* notifiers */
-	for_each_cpu_mask_nr(i, policy->cpus) {
+	for_each_cpu(i, policy->cpus) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 	}
@@ -203,7 +203,7 @@
 	unsigned int i;
 
 #ifdef CONFIG_SMP
-	policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
+	cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
 #endif
 
 	/* Errata workaround */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index c3c9adb..5c28b37 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1199,10 +1199,10 @@
 	set_cpus_allowed_ptr(current, &oldmask);
 
 	if (cpu_family == CPU_HW_PSTATE)
-		pol->cpus = cpumask_of_cpu(pol->cpu);
+		cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
 	else
-		pol->cpus = per_cpu(cpu_core_map, pol->cpu);
-	data->available_cores = &(pol->cpus);
+		cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
+	data->available_cores = pol->cpus;
 
 	/* Take a crude guess here.
 	 * That guess was in microseconds, so multiply with 1000 */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index 65cfb5d..8ecc75b 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -53,7 +53,7 @@
 	/* we need to keep track of associated cores, but let cpufreq
 	 * handle hotplug events - so just point at cpufreq pol->cpus
 	 * structure */
-	cpumask_t *available_cores;
+	struct cpumask *available_cores;
 };
 
 
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index f0ea6fa..f089982 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -458,11 +458,6 @@
  *
  * Sets a new CPUFreq policy.
  */
-struct allmasks {
-	cpumask_t		saved_mask;
-	cpumask_t		covered_cpus;
-};
-
 static int centrino_target (struct cpufreq_policy *policy,
 			    unsigned int target_freq,
 			    unsigned int relation)
@@ -472,12 +467,15 @@
 	struct cpufreq_freqs	freqs;
 	int			retval = 0;
 	unsigned int		j, k, first_cpu, tmp;
-	CPUMASK_ALLOC(allmasks);
-	CPUMASK_PTR(saved_mask, allmasks);
-	CPUMASK_PTR(covered_cpus, allmasks);
+	cpumask_var_t saved_mask, covered_cpus;
 
-	if (unlikely(allmasks == NULL))
+	if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL)))
 		return -ENOMEM;
+	if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
+		free_cpumask_var(saved_mask);
+		return -ENOMEM;
+	}
+	cpumask_copy(saved_mask, &current->cpus_allowed);
 
 	if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
 		retval = -ENODEV;
@@ -493,11 +491,9 @@
 		goto out;
 	}
 
-	*saved_mask = current->cpus_allowed;
 	first_cpu = 1;
-	cpus_clear(*covered_cpus);
-	for_each_cpu_mask_nr(j, policy->cpus) {
-		const cpumask_t *mask;
+	for_each_cpu(j, policy->cpus) {
+		const struct cpumask *mask;
 
 		/* cpufreq holds the hotplug lock, so we are safe here */
 		if (!cpu_online(j))
@@ -508,9 +504,9 @@
 		 * Make sure we are running on CPU that wants to change freq
 		 */
 		if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
-			mask = &policy->cpus;
+			mask = policy->cpus;
 		else
-			mask = &cpumask_of_cpu(j);
+			mask = cpumask_of(j);
 
 		set_cpus_allowed_ptr(current, mask);
 		preempt_disable();
@@ -542,7 +538,7 @@
 			dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
 				target_freq, freqs.old, freqs.new, msr);
 
-			for_each_cpu_mask_nr(k, policy->cpus) {
+			for_each_cpu(k, policy->cpus) {
 				if (!cpu_online(k))
 					continue;
 				freqs.cpu = k;
@@ -567,7 +563,7 @@
 		preempt_enable();
 	}
 
-	for_each_cpu_mask_nr(k, policy->cpus) {
+	for_each_cpu(k, policy->cpus) {
 		if (!cpu_online(k))
 			continue;
 		freqs.cpu = k;
@@ -590,7 +586,7 @@
 		tmp = freqs.new;
 		freqs.new = freqs.old;
 		freqs.old = tmp;
-		for_each_cpu_mask_nr(j, policy->cpus) {
+		for_each_cpu(j, policy->cpus) {
 			if (!cpu_online(j))
 				continue;
 			cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
@@ -605,7 +601,8 @@
 	preempt_enable();
 	set_cpus_allowed_ptr(current, saved_mask);
 out:
-	CPUMASK_FREE(allmasks);
+	free_cpumask_var(saved_mask);
+	free_cpumask_var(covered_cpus);
 	return retval;
 }
 
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 04d0376..dedc1e9 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -229,7 +229,7 @@
 	return 0;
 }
 
-static unsigned int _speedstep_get(const cpumask_t *cpus)
+static unsigned int _speedstep_get(const struct cpumask *cpus)
 {
 	unsigned int speed;
 	cpumask_t cpus_allowed;
@@ -244,7 +244,7 @@
 
 static unsigned int speedstep_get(unsigned int cpu)
 {
-	return _speedstep_get(&cpumask_of_cpu(cpu));
+	return _speedstep_get(cpumask_of(cpu));
 }
 
 /**
@@ -267,7 +267,7 @@
 	if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
 		return -EINVAL;
 
-	freqs.old = _speedstep_get(&policy->cpus);
+	freqs.old = _speedstep_get(policy->cpus);
 	freqs.new = speedstep_freqs[newstate].frequency;
 	freqs.cpu = policy->cpu;
 
@@ -279,20 +279,20 @@
 
 	cpus_allowed = current->cpus_allowed;
 
-	for_each_cpu_mask_nr(i, policy->cpus) {
+	for_each_cpu(i, policy->cpus) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 	}
 
 	/* switch to physical CPU where state is to be changed */
-	set_cpus_allowed_ptr(current, &policy->cpus);
+	set_cpus_allowed_ptr(current, policy->cpus);
 
 	speedstep_set_state(newstate);
 
 	/* allow to be run on all CPUs */
 	set_cpus_allowed_ptr(current, &cpus_allowed);
 
-	for_each_cpu_mask_nr(i, policy->cpus) {
+	for_each_cpu(i, policy->cpus) {
 		freqs.cpu = i;
 		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
 	}
@@ -322,11 +322,11 @@
 
 	/* only run on CPU to be set, or on its sibling */
 #ifdef CONFIG_SMP
-	policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
+	cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
 #endif
 
 	cpus_allowed = current->cpus_allowed;
-	set_cpus_allowed_ptr(current, &policy->cpus);
+	set_cpus_allowed_ptr(current, policy->cpus);
 
 	/* detect low and high frequency and transition latency */
 	result = speedstep_get_freqs(speedstep_processor,
@@ -339,7 +339,7 @@
 		return result;
 
 	/* get current speed setting */
-	speed = _speedstep_get(&policy->cpus);
+	speed = _speedstep_get(policy->cpus);
 	if (!speed)
 		return -EIO;
 
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
index 0ebf3fc..dfaebce 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_32.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
@@ -1,6 +1,6 @@
 /*
  * mce.c - x86 Machine Check Exception Reporting
- * (c) 2002 Alan Cox <alan@redhat.com>, Dave Jones <davej@redhat.com>
+ * (c) 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>, Dave Jones <davej@redhat.com>
  */
 
 #include <linux/init.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index a5a5e05..8ae8c4f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -462,7 +462,7 @@
 	return err;
 }
 
-static long local_allocate_threshold_blocks(void *_bank)
+static __cpuinit long local_allocate_threshold_blocks(void *_bank)
 {
 	unsigned int *bank = _bank;
 
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index bfa5817..c9f77ea 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -1,6 +1,6 @@
 /*
  * P5 specific Machine Check Exception Reporting
- * (C) Copyright 2002 Alan Cox <alan@redhat.com>
+ * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>
  */
 
 #include <linux/init.h>
diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c
index 62efc9c..2ac52d7 100644
--- a/arch/x86/kernel/cpu/mcheck/p6.c
+++ b/arch/x86/kernel/cpu/mcheck/p6.c
@@ -1,6 +1,6 @@
 /*
  * P6 specific Machine Check Exception Reporting
- * (C) Copyright 2002 Alan Cox <alan@redhat.com>
+ * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>
  */
 
 #include <linux/init.h>
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index f2be3e1..2a043d8 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -1,6 +1,6 @@
 /*
  * IDT Winchip specific Machine Check Exception Reporting
- * (C) Copyright 2002 Alan Cox <alan@redhat.com>
+ * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>
  */
 
 #include <linux/init.h>
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 65a1394..e858268 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -665,6 +665,27 @@
 }
 #endif
 
+#ifdef CONFIG_HIBERNATION
+/**
+ * Mark ACPI NVS memory region, so that we can save/restore it during
+ * hibernation and the subsequent resume.
+ */
+static int __init e820_mark_nvs_memory(void)
+{
+	int i;
+
+	for (i = 0; i < e820.nr_map; i++) {
+		struct e820entry *ei = &e820.map[i];
+
+		if (ei->type == E820_NVS)
+			hibernate_nvs_register(ei->addr, ei->size);
+	}
+
+	return 0;
+}
+core_initcall(e820_mark_nvs_memory);
+#endif
+
 /*
  * Early reserved memory areas.
  */
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 744aa7f..76b8cd9 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -201,6 +201,12 @@
 	void (*f)(int num, int slot, int func);
 };
 
+/*
+ * Only works for devices on the root bus. If you add any devices
+ * not on bus 0 readd another loop level in early_quirks(). But
+ * be careful because at least the Nvidia quirk here relies on
+ * only matching on bus 0.
+ */
 static struct chipset early_qrk[] __initdata = {
 	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
 	  PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
@@ -267,17 +273,17 @@
 
 void __init early_quirks(void)
 {
-	int num, slot, func;
+	int slot, func;
 
 	if (!early_pci_allowed())
 		return;
 
 	/* Poor man's PCI discovery */
-	for (num = 0; num < 32; num++)
-		for (slot = 0; slot < 32; slot++)
-			for (func = 0; func < 8; func++) {
-				/* Only probe function 0 on single fn devices */
-				if (check_dev_quirk(num, slot, func))
-					break;
-			}
+	/* Only scan the root bus */
+	for (slot = 0; slot < 32; slot++)
+		for (func = 0; func < 8; func++) {
+			/* Only probe function 0 on single fn devices */
+			if (check_dev_quirk(0, slot, func))
+				break;
+		}
 }
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index d6f0490..4646902 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1203,7 +1203,6 @@
 	pushl %eax
 	CFI_ADJUST_CFA_OFFSET 4
 	SAVE_ALL
-	TRACE_IRQS_OFF
 	xorl %edx,%edx		# zero error code
 	movl %esp,%eax		# pt_regs pointer
 	call do_nmi
@@ -1244,7 +1243,6 @@
 	pushl %eax
 	CFI_ADJUST_CFA_OFFSET 4
 	SAVE_ALL
-	TRACE_IRQS_OFF
 	FIXUP_ESPFIX_STACK		# %eax == %esp
 	xorl %edx,%edx			# zero error code
 	call do_nmi
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 26cfdc1..0e275d4 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -305,7 +305,7 @@
 	call dump_stack
 #ifdef CONFIG_KALLSYMS	
 	leaq early_idt_ripmsg(%rip),%rdi
-	movq 8(%rsp),%rsi	# get rip again
+	movq 0(%rsp),%rsi	# get rip again
 	call __print_symbol
 #endif
 #endif /* EARLY_PRINTK */
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 4b8a53d..11d5093 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -11,15 +11,15 @@
 #include <linux/kernel_stat.h>
 #include <linux/sysdev.h>
 #include <linux/bitops.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/delay.h>
 
-#include <asm/acpi.h>
 #include <asm/atomic.h>
 #include <asm/system.h>
-#include <asm/io.h>
 #include <asm/timer.h>
 #include <asm/hw_irq.h>
 #include <asm/pgtable.h>
-#include <asm/delay.h>
 #include <asm/desc.h>
 #include <asm/apic.h>
 #include <asm/arch_hooks.h>
@@ -323,7 +323,7 @@
 	outb_pic(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
 
 	/* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64,
-	                       to 0x20-0x27 on i386 */
+	   to 0x20-0x27 on i386 */
 	outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
 
 	/* 8259A-1 (the master) has a slave on IR2 */
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 3639442..1c4a130 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -129,7 +129,6 @@
 	node = cpu_to_node(cpu);
 
 	pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
-	printk(KERN_DEBUG "  alloc irq_2_pin on cpu %d node %d\n", cpu, node);
 
 	return pin;
 }
@@ -227,7 +226,6 @@
 			cpumask_clear(cfg->old_domain);
 		}
 	}
-	printk(KERN_DEBUG "  alloc irq_cfg on cpu %d node %d\n", cpu, node);
 
 	return cfg;
 }
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 1919143..b12208f 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -35,8 +35,8 @@
  */
 asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
 {
-	struct thread_struct * t = &current->thread;
-	struct tss_struct * tss;
+	struct thread_struct *t = &current->thread;
+	struct tss_struct *tss;
 	unsigned int i, max_long, bytes, bytes_updated;
 
 	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index bce53e1..3973e2d 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -5,10 +5,10 @@
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
+#include <linux/smp.h>
 
 #include <asm/apic.h>
 #include <asm/io_apic.h>
-#include <asm/smp.h>
 #include <asm/irq.h>
 
 atomic_t irq_err_count;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 9dc5588..74b9ff7 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -15,9 +15,9 @@
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/delay.h>
+#include <linux/uaccess.h>
 
 #include <asm/apic.h>
-#include <asm/uaccess.h>
 
 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 EXPORT_PER_CPU_SYMBOL(irq_stat);
@@ -93,7 +93,7 @@
 		return 0;
 
 	/* build the stack frame on the IRQ stack */
-	isp = (u32 *) ((char*)irqctx + sizeof(*irqctx));
+	isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
 	irqctx->tinfo.task = curctx->tinfo.task;
 	irqctx->tinfo.previous_esp = current_stack_pointer;
 
@@ -137,7 +137,7 @@
 
 	hardirq_ctx[cpu] = irqctx;
 
-	irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
+	irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE];
 	irqctx->tinfo.task		= NULL;
 	irqctx->tinfo.exec_domain	= NULL;
 	irqctx->tinfo.cpu		= cpu;
@@ -147,7 +147,7 @@
 	softirq_ctx[cpu] = irqctx;
 
 	printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
-	       cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
+	       cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
 }
 
 void irq_ctx_exit(int cpu)
@@ -174,7 +174,7 @@
 		irqctx->tinfo.previous_esp = current_stack_pointer;
 
 		/* build the stack frame on the softirq stack */
-		isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
+		isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
 
 		call_on_stack(__do_softirq, isp);
 		/*
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 6383d50..63c88e6 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -14,10 +14,10 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/ftrace.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+#include <linux/smp.h>
 #include <asm/io_apic.h>
 #include <asm/idle.h>
-#include <asm/smp.h>
 
 /*
  * Probabilistic stack overflow check:
@@ -142,18 +142,18 @@
 
 asmlinkage void do_softirq(void)
 {
- 	__u32 pending;
- 	unsigned long flags;
+	__u32 pending;
+	unsigned long flags;
 
- 	if (in_interrupt())
- 		return;
+	if (in_interrupt())
+		return;
 
- 	local_irq_save(flags);
- 	pending = local_softirq_pending();
- 	/* Switch to interrupt stack */
- 	if (pending) {
+	local_irq_save(flags);
+	pending = local_softirq_pending();
+	/* Switch to interrupt stack */
+	if (pending) {
 		call_softirq();
 		WARN_ON_ONCE(softirq_count());
 	}
- 	local_irq_restore(flags);
+	local_irq_restore(flags);
 }
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 8472329..1507ad4e 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -9,18 +9,18 @@
 #include <linux/kernel_stat.h>
 #include <linux/sysdev.h>
 #include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/delay.h>
 
 #include <asm/atomic.h>
 #include <asm/system.h>
-#include <asm/io.h>
 #include <asm/timer.h>
 #include <asm/pgtable.h>
-#include <asm/delay.h>
 #include <asm/desc.h>
 #include <asm/apic.h>
 #include <asm/arch_hooks.h>
 #include <asm/i8259.h>
-
+#include <asm/traps.h>
 
 
 /*
@@ -34,12 +34,10 @@
  * leads to races. IBM designers who came up with it should
  * be shot.
  */
- 
 
 static irqreturn_t math_error_irq(int cpl, void *dev_id)
 {
-	extern void math_error(void __user *);
-	outb(0,0xF0);
+	outb(0, 0xF0);
 	if (ignore_fpu_irq || !boot_cpu_data.hard_math)
 		return IRQ_NONE;
 	math_error((void __user *)get_irq_regs()->ip);
@@ -56,7 +54,7 @@
 	.name = "fpu",
 };
 
-void __init init_ISA_irqs (void)
+void __init init_ISA_irqs(void)
 {
 	int i;
 
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c
index 31ebfe3..da481a1 100644
--- a/arch/x86/kernel/irqinit_64.c
+++ b/arch/x86/kernel/irqinit_64.c
@@ -11,14 +11,14 @@
 #include <linux/kernel_stat.h>
 #include <linux/sysdev.h>
 #include <linux/bitops.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/delay.h>
 
-#include <asm/acpi.h>
 #include <asm/atomic.h>
 #include <asm/system.h>
-#include <asm/io.h>
 #include <asm/hw_irq.h>
 #include <asm/pgtable.h>
-#include <asm/delay.h>
 #include <asm/desc.h>
 #include <asm/apic.h>
 #include <asm/i8259.h>
@@ -81,7 +81,7 @@
 	return 0;
 }
 
-void __init init_ISA_irqs(void)
+static void __init init_ISA_irqs(void)
 {
 	int i;
 
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index eead6f8..e948b28 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -446,7 +446,7 @@
 static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
 				       struct kprobe_ctlblk *kcb)
 {
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
 	if (p->ainsn.boostable == 1 && !p->post_handler) {
 		/* Boost up -- we can execute copied instructions directly */
 		reset_current_kprobe();
@@ -695,7 +695,7 @@
 	/*
 	 * It is possible to have multiple instances associated with a given
 	 * task either because multiple functions in the call path have
-	 * return probes installed on them, and/or more then one
+	 * return probes installed on them, and/or more than one
 	 * return probe was registered for a target function.
 	 *
 	 * We can handle this because:
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index c12314c..8815f3c 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -252,7 +252,7 @@
 /*
  * The MFPGT timers on the CS5536 provide us with suitable timers to use
  * as clock event sources - not as good as a HPET or APIC, but certainly
- * better then the PIT.  This isn't a general purpose MFGPT driver, but
+ * better than the PIT.  This isn't a general purpose MFGPT driver, but
  * a simplified one designed specifically to act as a clock event source.
  * For full details about the MFGPT, please consult the CS5536 data sheet.
  */
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index c5c5b8d..a649a4c 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -2,7 +2,7 @@
  *	Intel Multiprocessor Specification 1.1 and 1.4
  *	compliant MP-table parsing routines.
  *
- *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
  *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
  *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  */
@@ -17,7 +17,6 @@
 #include <linux/acpi.h>
 #include <linux/module.h>
 #include <linux/smp.h>
-#include <linux/acpi.h>
 
 #include <asm/mtrr.h>
 #include <asm/mpspec.h>
@@ -28,6 +27,7 @@
 #include <asm/e820.h>
 #include <asm/trampoline.h>
 #include <asm/setup.h>
+#include <asm/smp.h>
 
 #include <mach_apic.h>
 #ifdef CONFIG_X86_32
@@ -49,12 +49,12 @@
 	return sum & 0xFF;
 }
 
-static void __init MP_processor_info(struct mpc_config_processor *m)
+static void __init MP_processor_info(struct mpc_cpu *m)
 {
 	int apicid;
 	char *bootup_cpu = "";
 
-	if (!(m->mpc_cpuflag & CPU_ENABLED)) {
+	if (!(m->cpuflag & CPU_ENABLED)) {
 		disabled_cpus++;
 		return;
 	}
@@ -62,54 +62,54 @@
 	if (x86_quirks->mpc_apic_id)
 		apicid = x86_quirks->mpc_apic_id(m);
 	else
-		apicid = m->mpc_apicid;
+		apicid = m->apicid;
 
-	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
+	if (m->cpuflag & CPU_BOOTPROCESSOR) {
 		bootup_cpu = " (Bootup-CPU)";
-		boot_cpu_physical_apicid = m->mpc_apicid;
+		boot_cpu_physical_apicid = m->apicid;
 	}
 
-	printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu);
-	generic_processor_info(apicid, m->mpc_apicver);
+	printk(KERN_INFO "Processor #%d%s\n", m->apicid, bootup_cpu);
+	generic_processor_info(apicid, m->apicver);
 }
 
 #ifdef CONFIG_X86_IO_APIC
-static void __init MP_bus_info(struct mpc_config_bus *m)
+static void __init MP_bus_info(struct mpc_bus *m)
 {
 	char str[7];
-	memcpy(str, m->mpc_bustype, 6);
+	memcpy(str, m->bustype, 6);
 	str[6] = 0;
 
 	if (x86_quirks->mpc_oem_bus_info)
 		x86_quirks->mpc_oem_bus_info(m, str);
 	else
-		apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->mpc_busid, str);
+		apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
 
 #if MAX_MP_BUSSES < 256
-	if (m->mpc_busid >= MAX_MP_BUSSES) {
+	if (m->busid >= MAX_MP_BUSSES) {
 		printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
 		       " is too large, max. supported is %d\n",
-		       m->mpc_busid, str, MAX_MP_BUSSES - 1);
+		       m->busid, str, MAX_MP_BUSSES - 1);
 		return;
 	}
 #endif
 
 	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
-		set_bit(m->mpc_busid, mp_bus_not_pci);
+		set_bit(m->busid, mp_bus_not_pci);
 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
-		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
+		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
 #endif
 	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
 		if (x86_quirks->mpc_oem_pci_bus)
 			x86_quirks->mpc_oem_pci_bus(m);
 
-		clear_bit(m->mpc_busid, mp_bus_not_pci);
+		clear_bit(m->busid, mp_bus_not_pci);
 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
-		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
+		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
 	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
-		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
+		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
 	} else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) {
-		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
+		mp_bus_id_to_type[m->busid] = MP_BUS_MCA;
 #endif
 	} else
 		printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
@@ -133,32 +133,31 @@
 	return 0;
 }
 
-static void __init MP_ioapic_info(struct mpc_config_ioapic *m)
+static void __init MP_ioapic_info(struct mpc_ioapic *m)
 {
-	if (!(m->mpc_flags & MPC_APIC_USABLE))
+	if (!(m->flags & MPC_APIC_USABLE))
 		return;
 
 	printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
-	       m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
+	       m->apicid, m->apicver, m->apicaddr);
 
-	if (bad_ioapic(m->mpc_apicaddr))
+	if (bad_ioapic(m->apicaddr))
 		return;
 
-	mp_ioapics[nr_ioapics].mp_apicaddr = m->mpc_apicaddr;
-	mp_ioapics[nr_ioapics].mp_apicid = m->mpc_apicid;
-	mp_ioapics[nr_ioapics].mp_type = m->mpc_type;
-	mp_ioapics[nr_ioapics].mp_apicver = m->mpc_apicver;
-	mp_ioapics[nr_ioapics].mp_flags = m->mpc_flags;
+	mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr;
+	mp_ioapics[nr_ioapics].mp_apicid = m->apicid;
+	mp_ioapics[nr_ioapics].mp_type = m->type;
+	mp_ioapics[nr_ioapics].mp_apicver = m->apicver;
+	mp_ioapics[nr_ioapics].mp_flags = m->flags;
 	nr_ioapics++;
 }
 
-static void print_MP_intsrc_info(struct mpc_config_intsrc *m)
+static void print_MP_intsrc_info(struct mpc_intsrc *m)
 {
 	apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
 		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
-		m->mpc_irqtype, m->mpc_irqflag & 3,
-		(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-		m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
+		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
+		m->srcbusirq, m->dstapic, m->dstirq);
 }
 
 static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
@@ -170,52 +169,52 @@
 		mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq);
 }
 
-static void __init assign_to_mp_irq(struct mpc_config_intsrc *m,
+static void __init assign_to_mp_irq(struct mpc_intsrc *m,
 				    struct mp_config_intsrc *mp_irq)
 {
-	mp_irq->mp_dstapic = m->mpc_dstapic;
-	mp_irq->mp_type = m->mpc_type;
-	mp_irq->mp_irqtype = m->mpc_irqtype;
-	mp_irq->mp_irqflag = m->mpc_irqflag;
-	mp_irq->mp_srcbus = m->mpc_srcbus;
-	mp_irq->mp_srcbusirq = m->mpc_srcbusirq;
-	mp_irq->mp_dstirq = m->mpc_dstirq;
+	mp_irq->mp_dstapic = m->dstapic;
+	mp_irq->mp_type = m->type;
+	mp_irq->mp_irqtype = m->irqtype;
+	mp_irq->mp_irqflag = m->irqflag;
+	mp_irq->mp_srcbus = m->srcbus;
+	mp_irq->mp_srcbusirq = m->srcbusirq;
+	mp_irq->mp_dstirq = m->dstirq;
 }
 
 static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq,
-					struct mpc_config_intsrc *m)
+					struct mpc_intsrc *m)
 {
-	m->mpc_dstapic = mp_irq->mp_dstapic;
-	m->mpc_type = mp_irq->mp_type;
-	m->mpc_irqtype = mp_irq->mp_irqtype;
-	m->mpc_irqflag = mp_irq->mp_irqflag;
-	m->mpc_srcbus = mp_irq->mp_srcbus;
-	m->mpc_srcbusirq = mp_irq->mp_srcbusirq;
-	m->mpc_dstirq = mp_irq->mp_dstirq;
+	m->dstapic = mp_irq->mp_dstapic;
+	m->type = mp_irq->mp_type;
+	m->irqtype = mp_irq->mp_irqtype;
+	m->irqflag = mp_irq->mp_irqflag;
+	m->srcbus = mp_irq->mp_srcbus;
+	m->srcbusirq = mp_irq->mp_srcbusirq;
+	m->dstirq = mp_irq->mp_dstirq;
 }
 
 static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq,
-					struct mpc_config_intsrc *m)
+					struct mpc_intsrc *m)
 {
-	if (mp_irq->mp_dstapic != m->mpc_dstapic)
+	if (mp_irq->mp_dstapic != m->dstapic)
 		return 1;
-	if (mp_irq->mp_type != m->mpc_type)
+	if (mp_irq->mp_type != m->type)
 		return 2;
-	if (mp_irq->mp_irqtype != m->mpc_irqtype)
+	if (mp_irq->mp_irqtype != m->irqtype)
 		return 3;
-	if (mp_irq->mp_irqflag != m->mpc_irqflag)
+	if (mp_irq->mp_irqflag != m->irqflag)
 		return 4;
-	if (mp_irq->mp_srcbus != m->mpc_srcbus)
+	if (mp_irq->mp_srcbus != m->srcbus)
 		return 5;
-	if (mp_irq->mp_srcbusirq != m->mpc_srcbusirq)
+	if (mp_irq->mp_srcbusirq != m->srcbusirq)
 		return 6;
-	if (mp_irq->mp_dstirq != m->mpc_dstirq)
+	if (mp_irq->mp_dstirq != m->dstirq)
 		return 7;
 
 	return 0;
 }
 
-static void __init MP_intsrc_info(struct mpc_config_intsrc *m)
+static void __init MP_intsrc_info(struct mpc_intsrc *m)
 {
 	int i;
 
@@ -233,57 +232,55 @@
 
 #endif
 
-static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m)
+static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
 {
 	apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
 		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-		m->mpc_irqtype, m->mpc_irqflag & 3,
-		(m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid,
-		m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
+		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
+		m->srcbusirq, m->destapic, m->destapiclint);
 }
 
 /*
  * Read/parse the MPC
  */
 
-static int __init smp_check_mpc(struct mp_config_table *mpc, char *oem,
-				char *str)
+static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
 {
 
-	if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) {
+	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
 		printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n",
-		       mpc->mpc_signature[0], mpc->mpc_signature[1],
-		       mpc->mpc_signature[2], mpc->mpc_signature[3]);
+		       mpc->signature[0], mpc->signature[1],
+		       mpc->signature[2], mpc->signature[3]);
 		return 0;
 	}
-	if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) {
+	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
 		printk(KERN_ERR "MPTABLE: checksum error!\n");
 		return 0;
 	}
-	if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) {
+	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
 		printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
-		       mpc->mpc_spec);
+		       mpc->spec);
 		return 0;
 	}
-	if (!mpc->mpc_lapic) {
+	if (!mpc->lapic) {
 		printk(KERN_ERR "MPTABLE: null local APIC address!\n");
 		return 0;
 	}
-	memcpy(oem, mpc->mpc_oem, 8);
+	memcpy(oem, mpc->oem, 8);
 	oem[8] = 0;
 	printk(KERN_INFO "MPTABLE: OEM ID: %s\n", oem);
 
-	memcpy(str, mpc->mpc_productid, 12);
+	memcpy(str, mpc->productid, 12);
 	str[12] = 0;
 
 	printk(KERN_INFO "MPTABLE: Product ID: %s\n", str);
 
-	printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic);
+	printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->lapic);
 
 	return 1;
 }
 
-static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early)
+static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
 {
 	char str[16];
 	char oem[10];
@@ -308,14 +305,14 @@
 #endif
 	/* save the local APIC address, it might be non-default */
 	if (!acpi_lapic)
-		mp_lapic_addr = mpc->mpc_lapic;
+		mp_lapic_addr = mpc->lapic;
 
 	if (early)
 		return 1;
 
-	if (mpc->mpc_oemptr && x86_quirks->smp_read_mpc_oem) {
-		struct mp_config_oemtable *oem_table = (struct mp_config_oemtable *)(unsigned long)mpc->mpc_oemptr;
-		x86_quirks->smp_read_mpc_oem(oem_table, mpc->mpc_oemsize);
+	if (mpc->oemptr && x86_quirks->smp_read_mpc_oem) {
+		struct mpc_oemtable *oem_table = (void *)(long)mpc->oemptr;
+		x86_quirks->smp_read_mpc_oem(oem_table, mpc->oemsize);
 	}
 
 	/*
@@ -324,12 +321,11 @@
 	if (x86_quirks->mpc_record)
 		*x86_quirks->mpc_record = 0;
 
-	while (count < mpc->mpc_length) {
+	while (count < mpc->length) {
 		switch (*mpt) {
 		case MP_PROCESSOR:
 			{
-				struct mpc_config_processor *m =
-				    (struct mpc_config_processor *)mpt;
+				struct mpc_cpu *m = (struct mpc_cpu *)mpt;
 				/* ACPI may have already provided this data */
 				if (!acpi_lapic)
 					MP_processor_info(m);
@@ -339,8 +335,7 @@
 			}
 		case MP_BUS:
 			{
-				struct mpc_config_bus *m =
-				    (struct mpc_config_bus *)mpt;
+				struct mpc_bus *m = (struct mpc_bus *)mpt;
 #ifdef CONFIG_X86_IO_APIC
 				MP_bus_info(m);
 #endif
@@ -351,30 +346,28 @@
 		case MP_IOAPIC:
 			{
 #ifdef CONFIG_X86_IO_APIC
-				struct mpc_config_ioapic *m =
-				    (struct mpc_config_ioapic *)mpt;
+				struct mpc_ioapic *m = (struct mpc_ioapic *)mpt;
 				MP_ioapic_info(m);
 #endif
-				mpt += sizeof(struct mpc_config_ioapic);
-				count += sizeof(struct mpc_config_ioapic);
+				mpt += sizeof(struct mpc_ioapic);
+				count += sizeof(struct mpc_ioapic);
 				break;
 			}
 		case MP_INTSRC:
 			{
 #ifdef CONFIG_X86_IO_APIC
-				struct mpc_config_intsrc *m =
-				    (struct mpc_config_intsrc *)mpt;
+				struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
 
 				MP_intsrc_info(m);
 #endif
-				mpt += sizeof(struct mpc_config_intsrc);
-				count += sizeof(struct mpc_config_intsrc);
+				mpt += sizeof(struct mpc_intsrc);
+				count += sizeof(struct mpc_intsrc);
 				break;
 			}
 		case MP_LINTSRC:
 			{
-				struct mpc_config_lintsrc *m =
-				    (struct mpc_config_lintsrc *)mpt;
+				struct mpc_lintsrc *m =
+				    (struct mpc_lintsrc *)mpt;
 				MP_lintsrc_info(m);
 				mpt += sizeof(*m);
 				count += sizeof(*m);
@@ -385,8 +378,8 @@
 			printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n");
 			printk(KERN_ERR "type %x\n", *mpt);
 			print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
-					1, mpc, mpc->mpc_length, 1);
-			count = mpc->mpc_length;
+					1, mpc, mpc->length, 1);
+			count = mpc->length;
 			break;
 		}
 		if (x86_quirks->mpc_record)
@@ -417,16 +410,16 @@
 
 static void __init construct_default_ioirq_mptable(int mpc_default_type)
 {
-	struct mpc_config_intsrc intsrc;
+	struct mpc_intsrc intsrc;
 	int i;
 	int ELCR_fallback = 0;
 
-	intsrc.mpc_type = MP_INTSRC;
-	intsrc.mpc_irqflag = 0;	/* conforming */
-	intsrc.mpc_srcbus = 0;
-	intsrc.mpc_dstapic = mp_ioapics[0].mp_apicid;
+	intsrc.type = MP_INTSRC;
+	intsrc.irqflag = 0;	/* conforming */
+	intsrc.srcbus = 0;
+	intsrc.dstapic = mp_ioapics[0].mp_apicid;
 
-	intsrc.mpc_irqtype = mp_INT;
+	intsrc.irqtype = mp_INT;
 
 	/*
 	 *  If true, we have an ISA/PCI system with no IRQ entries
@@ -469,30 +462,30 @@
 			 *  irqflag field (level sensitive, active high polarity).
 			 */
 			if (ELCR_trigger(i))
-				intsrc.mpc_irqflag = 13;
+				intsrc.irqflag = 13;
 			else
-				intsrc.mpc_irqflag = 0;
+				intsrc.irqflag = 0;
 		}
 
-		intsrc.mpc_srcbusirq = i;
-		intsrc.mpc_dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
+		intsrc.srcbusirq = i;
+		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
 		MP_intsrc_info(&intsrc);
 	}
 
-	intsrc.mpc_irqtype = mp_ExtINT;
-	intsrc.mpc_srcbusirq = 0;
-	intsrc.mpc_dstirq = 0;	/* 8259A to INTIN0 */
+	intsrc.irqtype = mp_ExtINT;
+	intsrc.srcbusirq = 0;
+	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
 	MP_intsrc_info(&intsrc);
 }
 
 
 static void __init construct_ioapic_table(int mpc_default_type)
 {
-	struct mpc_config_ioapic ioapic;
-	struct mpc_config_bus bus;
+	struct mpc_ioapic ioapic;
+	struct mpc_bus bus;
 
-	bus.mpc_type = MP_BUS;
-	bus.mpc_busid = 0;
+	bus.type = MP_BUS;
+	bus.busid = 0;
 	switch (mpc_default_type) {
 	default:
 		printk(KERN_ERR "???\nUnknown standard configuration %d\n",
@@ -500,29 +493,29 @@
 		/* fall through */
 	case 1:
 	case 5:
-		memcpy(bus.mpc_bustype, "ISA   ", 6);
+		memcpy(bus.bustype, "ISA   ", 6);
 		break;
 	case 2:
 	case 6:
 	case 3:
-		memcpy(bus.mpc_bustype, "EISA  ", 6);
+		memcpy(bus.bustype, "EISA  ", 6);
 		break;
 	case 4:
 	case 7:
-		memcpy(bus.mpc_bustype, "MCA   ", 6);
+		memcpy(bus.bustype, "MCA   ", 6);
 	}
 	MP_bus_info(&bus);
 	if (mpc_default_type > 4) {
-		bus.mpc_busid = 1;
-		memcpy(bus.mpc_bustype, "PCI   ", 6);
+		bus.busid = 1;
+		memcpy(bus.bustype, "PCI   ", 6);
 		MP_bus_info(&bus);
 	}
 
-	ioapic.mpc_type = MP_IOAPIC;
-	ioapic.mpc_apicid = 2;
-	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-	ioapic.mpc_flags = MPC_APIC_USABLE;
-	ioapic.mpc_apicaddr = 0xFEC00000;
+	ioapic.type = MP_IOAPIC;
+	ioapic.apicid = 2;
+	ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
+	ioapic.flags = MPC_APIC_USABLE;
+	ioapic.apicaddr = 0xFEC00000;
 	MP_ioapic_info(&ioapic);
 
 	/*
@@ -536,8 +529,8 @@
 
 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
 {
-	struct mpc_config_processor processor;
-	struct mpc_config_lintsrc lintsrc;
+	struct mpc_cpu processor;
+	struct mpc_lintsrc lintsrc;
 	int linttypes[2] = { mp_ExtINT, mp_NMI };
 	int i;
 
@@ -549,30 +542,30 @@
 	/*
 	 * 2 CPUs, numbered 0 & 1.
 	 */
-	processor.mpc_type = MP_PROCESSOR;
+	processor.type = MP_PROCESSOR;
 	/* Either an integrated APIC or a discrete 82489DX. */
-	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-	processor.mpc_cpuflag = CPU_ENABLED;
-	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
+	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
+	processor.cpuflag = CPU_ENABLED;
+	processor.cpufeature = (boot_cpu_data.x86 << 8) |
 	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
-	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-	processor.mpc_reserved[0] = 0;
-	processor.mpc_reserved[1] = 0;
+	processor.featureflag = boot_cpu_data.x86_capability[0];
+	processor.reserved[0] = 0;
+	processor.reserved[1] = 0;
 	for (i = 0; i < 2; i++) {
-		processor.mpc_apicid = i;
+		processor.apicid = i;
 		MP_processor_info(&processor);
 	}
 
 	construct_ioapic_table(mpc_default_type);
 
-	lintsrc.mpc_type = MP_LINTSRC;
-	lintsrc.mpc_irqflag = 0;	/* conforming */
-	lintsrc.mpc_srcbusid = 0;
-	lintsrc.mpc_srcbusirq = 0;
-	lintsrc.mpc_destapic = MP_APIC_ALL;
+	lintsrc.type = MP_LINTSRC;
+	lintsrc.irqflag = 0;		/* conforming */
+	lintsrc.srcbusid = 0;
+	lintsrc.srcbusirq = 0;
+	lintsrc.destapic = MP_APIC_ALL;
 	for (i = 0; i < 2; i++) {
-		lintsrc.mpc_irqtype = linttypes[i];
-		lintsrc.mpc_destapiclint = i;
+		lintsrc.irqtype = linttypes[i];
+		lintsrc.destapiclint = i;
 		MP_lintsrc_info(&lintsrc);
 	}
 }
@@ -657,15 +650,15 @@
 		 * ISA defaults and hope it will work.
 		 */
 		if (!mp_irq_entries) {
-			struct mpc_config_bus bus;
+			struct mpc_bus bus;
 
 			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, "
 			       "using default mptable. "
 			       "(tell your hw vendor)\n");
 
-			bus.mpc_type = MP_BUS;
-			bus.mpc_busid = 0;
-			memcpy(bus.mpc_bustype, "ISA   ", 6);
+			bus.type = MP_BUS;
+			bus.busid = 0;
+			memcpy(bus.bustype, "ISA   ", 6);
 			MP_bus_info(&bus);
 
 			construct_default_ioirq_mptable(0);
@@ -803,14 +796,14 @@
 #ifdef CONFIG_X86_IO_APIC
 static u8 __initdata irq_used[MAX_IRQ_SOURCES];
 
-static int  __init get_MP_intsrc_index(struct mpc_config_intsrc *m)
+static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
 {
 	int i;
 
-	if (m->mpc_irqtype != mp_INT)
+	if (m->irqtype != mp_INT)
 		return 0;
 
-	if (m->mpc_irqflag != 0x0f)
+	if (m->irqflag != 0x0f)
 		return 0;
 
 	/* not legacy */
@@ -822,9 +815,9 @@
 		if (mp_irqs[i].mp_irqflag != 0x0f)
 			continue;
 
-		if (mp_irqs[i].mp_srcbus != m->mpc_srcbus)
+		if (mp_irqs[i].mp_srcbus != m->srcbus)
 			continue;
-		if (mp_irqs[i].mp_srcbusirq != m->mpc_srcbusirq)
+		if (mp_irqs[i].mp_srcbusirq != m->srcbusirq)
 			continue;
 		if (irq_used[i]) {
 			/* already claimed */
@@ -840,10 +833,10 @@
 
 #define SPARE_SLOT_NUM 20
 
-static struct mpc_config_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
+static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
 #endif
 
-static int  __init replace_intsrc_all(struct mp_config_table *mpc,
+static int  __init replace_intsrc_all(struct mpc_table *mpc,
 					unsigned long mpc_new_phys,
 					unsigned long mpc_new_length)
 {
@@ -855,36 +848,33 @@
 	int count = sizeof(*mpc);
 	unsigned char *mpt = ((unsigned char *)mpc) + count;
 
-	printk(KERN_INFO "mpc_length %x\n", mpc->mpc_length);
-	while (count < mpc->mpc_length) {
+	printk(KERN_INFO "mpc_length %x\n", mpc->length);
+	while (count < mpc->length) {
 		switch (*mpt) {
 		case MP_PROCESSOR:
 			{
-				struct mpc_config_processor *m =
-				    (struct mpc_config_processor *)mpt;
+				struct mpc_cpu *m = (struct mpc_cpu *)mpt;
 				mpt += sizeof(*m);
 				count += sizeof(*m);
 				break;
 			}
 		case MP_BUS:
 			{
-				struct mpc_config_bus *m =
-				    (struct mpc_config_bus *)mpt;
+				struct mpc_bus *m = (struct mpc_bus *)mpt;
 				mpt += sizeof(*m);
 				count += sizeof(*m);
 				break;
 			}
 		case MP_IOAPIC:
 			{
-				mpt += sizeof(struct mpc_config_ioapic);
-				count += sizeof(struct mpc_config_ioapic);
+				mpt += sizeof(struct mpc_ioapic);
+				count += sizeof(struct mpc_ioapic);
 				break;
 			}
 		case MP_INTSRC:
 			{
 #ifdef CONFIG_X86_IO_APIC
-				struct mpc_config_intsrc *m =
-				    (struct mpc_config_intsrc *)mpt;
+				struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
 
 				printk(KERN_INFO "OLD ");
 				print_MP_intsrc_info(m);
@@ -905,14 +895,14 @@
 					nr_m_spare++;
 				}
 #endif
-				mpt += sizeof(struct mpc_config_intsrc);
-				count += sizeof(struct mpc_config_intsrc);
+				mpt += sizeof(struct mpc_intsrc);
+				count += sizeof(struct mpc_intsrc);
 				break;
 			}
 		case MP_LINTSRC:
 			{
-				struct mpc_config_lintsrc *m =
-				    (struct mpc_config_lintsrc *)mpt;
+				struct mpc_lintsrc *m =
+				    (struct mpc_lintsrc *)mpt;
 				mpt += sizeof(*m);
 				count += sizeof(*m);
 				break;
@@ -922,7 +912,7 @@
 			printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n");
 			printk(KERN_ERR "type %x\n", *mpt);
 			print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
-					1, mpc, mpc->mpc_length, 1);
+					1, mpc, mpc->length, 1);
 			goto out;
 		}
 	}
@@ -944,9 +934,8 @@
 			assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]);
 			m_spare[nr_m_spare] = NULL;
 		} else {
-			struct mpc_config_intsrc *m =
-			    (struct mpc_config_intsrc *)mpt;
-			count += sizeof(struct mpc_config_intsrc);
+			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
+			count += sizeof(struct mpc_intsrc);
 			if (!mpc_new_phys) {
 				printk(KERN_INFO "No spare slots, try to append...take your risk, new mpc_length %x\n", count);
 			} else {
@@ -958,17 +947,16 @@
 				}
 			}
 			assign_to_mpc_intsrc(&mp_irqs[i], m);
-			mpc->mpc_length = count;
-			mpt += sizeof(struct mpc_config_intsrc);
+			mpc->length = count;
+			mpt += sizeof(struct mpc_intsrc);
 		}
 		print_mp_irq_info(&mp_irqs[i]);
 	}
 #endif
 out:
 	/* update checksum */
-	mpc->mpc_checksum = 0;
-	mpc->mpc_checksum -= mpf_checksum((unsigned char *)mpc,
-					   mpc->mpc_length);
+	mpc->checksum = 0;
+	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
 
 	return 0;
 }
@@ -1014,8 +1002,7 @@
 	char str[16];
 	char oem[10];
 	struct intel_mp_floating *mpf;
-	struct mp_config_table *mpc;
-	struct mp_config_table *mpc_new;
+	struct mpc_table *mpc, *mpc_new;
 
 	if (!enable_update_mptable)
 		return 0;
@@ -1041,7 +1028,7 @@
 	printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
 	printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr);
 
-	if (mpc_new_phys && mpc->mpc_length > mpc_new_length) {
+	if (mpc_new_phys && mpc->length > mpc_new_length) {
 		mpc_new_phys = 0;
 		printk(KERN_INFO "mpc_new_length is %ld, please use alloc_mptable=8k\n",
 			 mpc_new_length);
@@ -1050,10 +1037,10 @@
 	if (!mpc_new_phys) {
 		unsigned char old, new;
 		/* check if we can change the postion */
-		mpc->mpc_checksum = 0;
-		old = mpf_checksum((unsigned char *)mpc, mpc->mpc_length);
-		mpc->mpc_checksum = 0xff;
-		new = mpf_checksum((unsigned char *)mpc, mpc->mpc_length);
+		mpc->checksum = 0;
+		old = mpf_checksum((unsigned char *)mpc, mpc->length);
+		mpc->checksum = 0xff;
+		new = mpf_checksum((unsigned char *)mpc, mpc->length);
 		if (old == new) {
 			printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n");
 			return 0;
@@ -1062,7 +1049,7 @@
 	} else {
 		mpf->mpf_physptr = mpc_new_phys;
 		mpc_new = phys_to_virt(mpc_new_phys);
-		memcpy(mpc_new, mpc, mpc->mpc_length);
+		memcpy(mpc_new, mpc, mpc->length);
 		mpc = mpc_new;
 		/* check if we can modify that */
 		if (mpc_new_phys - mpf->mpf_physptr) {
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 45a09cc..7228979 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -26,7 +26,6 @@
 #include <linux/kernel_stat.h>
 #include <linux/kdebug.h>
 #include <linux/smp.h>
-#include <linux/nmi.h>
 
 #include <asm/i8259.h>
 #include <asm/io_apic.h>
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
index 0deea37..f2191d4 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/numaq_32.c
@@ -117,16 +117,15 @@
 }
 
 /* x86_quirks member */
-static int mpc_apic_id(struct mpc_config_processor *m)
+static int mpc_apic_id(struct mpc_cpu *m)
 {
 	int quad = translation_table[mpc_record]->trans_quad;
-	int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid);
+	int logical_apicid = generate_logical_apicid(quad, m->apicid);
 
 	printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
-	       m->mpc_apicid,
-	       (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
-	       (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
-	       m->mpc_apicver, quad, logical_apicid);
+	       m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
+	       (m->cpufeature & CPU_MODEL_MASK) >> 4,
+	       m->apicver, quad, logical_apicid);
 	return logical_apicid;
 }
 
@@ -135,26 +134,26 @@
 int mp_bus_id_to_local[MAX_MP_BUSSES];
 
 /* x86_quirks member */
-static void mpc_oem_bus_info(struct mpc_config_bus *m, char *name)
+static void mpc_oem_bus_info(struct mpc_bus *m, char *name)
 {
 	int quad = translation_table[mpc_record]->trans_quad;
 	int local = translation_table[mpc_record]->trans_local;
 
-	mp_bus_id_to_node[m->mpc_busid] = quad;
-	mp_bus_id_to_local[m->mpc_busid] = local;
+	mp_bus_id_to_node[m->busid] = quad;
+	mp_bus_id_to_local[m->busid] = local;
 	printk(KERN_INFO "Bus #%d is %s (node %d)\n",
-	       m->mpc_busid, name, quad);
+	       m->busid, name, quad);
 }
 
 int quad_local_to_mp_bus_id [NR_CPUS/4][4];
 
 /* x86_quirks member */
-static void mpc_oem_pci_bus(struct mpc_config_bus *m)
+static void mpc_oem_pci_bus(struct mpc_bus *m)
 {
 	int quad = translation_table[mpc_record]->trans_quad;
 	int local = translation_table[mpc_record]->trans_local;
 
-	quad_local_to_mp_bus_id[quad][local] = m->mpc_busid;
+	quad_local_to_mp_bus_id[quad][local] = m->busid;
 }
 
 static void __init MP_translation_info(struct mpc_config_translation *m)
@@ -186,7 +185,7 @@
  * Read/parse the MPC oem tables
  */
 
-static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
+static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable,
 				    unsigned short oemsize)
 {
 	int count = sizeof(*oemtable);	/* the header size */
@@ -195,18 +194,18 @@
 	mpc_record = 0;
 	printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n",
 	       oemtable);
-	if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) {
+	if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) {
 		printk(KERN_WARNING
 		       "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
-		       oemtable->oem_signature[0], oemtable->oem_signature[1],
-		       oemtable->oem_signature[2], oemtable->oem_signature[3]);
+		       oemtable->signature[0], oemtable->signature[1],
+		       oemtable->signature[2], oemtable->signature[3]);
 		return;
 	}
-	if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) {
+	if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) {
 		printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
 		return;
 	}
-	while (count < oemtable->oem_length) {
+	while (count < oemtable->length) {
 		switch (*oemptr) {
 		case MP_TRANSLATION:
 			{
@@ -260,8 +259,7 @@
 	.update_genapic		= numaq_update_genapic,
 };
 
-void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
-				 char *productid)
+void numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
 {
 	if (strncmp(oem, "IBM NUMA", 8))
 		printk("Warning!  Not a NUMA-Q system!\n");
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 19a1044..b254285 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -38,7 +38,7 @@
    be probably a smaller DMA mask, but this is bug-to-bug compatible
    to older i386. */
 struct device x86_dma_fallback_dev = {
-	.bus_id = "fallback device",
+	.init_name = "fallback device",
 	.coherent_dma_mask = DMA_32BIT_MASK,
 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
 };
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 3ba155d..a546f55 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -39,11 +39,12 @@
 #include <linux/prctl.h>
 #include <linux/dmi.h>
 #include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/kdebug.h>
 
-#include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
-#include <asm/io.h>
 #include <asm/ldt.h>
 #include <asm/processor.h>
 #include <asm/i387.h>
@@ -56,10 +57,8 @@
 
 #include <asm/tlbflush.h>
 #include <asm/cpu.h>
-#include <asm/kdebug.h>
 #include <asm/idle.h>
 #include <asm/syscalls.h>
-#include <asm/smp.h>
 #include <asm/ds.h>
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
@@ -205,7 +204,7 @@
 /*
  * Create a kernel thread
  */
-int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
 {
 	struct pt_regs regs;
 
@@ -266,7 +265,7 @@
 	tsk->thread.debugreg3 = 0;
 	tsk->thread.debugreg6 = 0;
 	tsk->thread.debugreg7 = 0;
-	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
+	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
 	clear_tsk_thread_flag(tsk, TIF_DEBUG);
 	/*
 	 * Forget coprocessor state..
@@ -293,9 +292,9 @@
 
 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
 	unsigned long unused,
-	struct task_struct * p, struct pt_regs * regs)
+	struct task_struct *p, struct pt_regs *regs)
 {
-	struct pt_regs * childregs;
+	struct pt_regs *childregs;
 	struct task_struct *tsk;
 	int err;
 
@@ -347,7 +346,7 @@
 void
 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 {
-	__asm__("movl %0, %%gs" :: "r"(0));
+	__asm__("movl %0, %%gs" : : "r"(0));
 	regs->fs		= 0;
 	set_fs(USER_DS);
 	regs->ds		= __USER_DS;
@@ -638,7 +637,7 @@
 asmlinkage int sys_execve(struct pt_regs regs)
 {
 	int error;
-	char * filename;
+	char *filename;
 
 	filename = getname((char __user *) regs.bx);
 	error = PTR_ERR(filename);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index a4b619c..55c4607 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -5,12 +5,11 @@
 #include <linux/percpu.h>
 #include <linux/kexec.h>
 #include <linux/crash_dump.h>
-#include <asm/smp.h>
-#include <asm/percpu.h>
+#include <linux/smp.h>
+#include <linux/topology.h>
 #include <asm/sections.h>
 #include <asm/processor.h>
 #include <asm/setup.h>
-#include <asm/topology.h>
 #include <asm/mpspec.h>
 #include <asm/apicdef.h>
 #include <asm/highmem.h>
@@ -20,8 +19,8 @@
 unsigned disabled_cpus __cpuinitdata;
 /* Processor that is doing the boot up */
 unsigned int boot_cpu_physical_apicid = -1U;
-unsigned int max_physical_apicid;
 EXPORT_SYMBOL(boot_cpu_physical_apicid);
+unsigned int max_physical_apicid;
 
 /* Bitmask of physically existing CPUs */
 physid_mask_t phys_cpu_present_map;
@@ -131,7 +130,27 @@
 	/* point to new pointer table */
 	_cpu_pda = new_cpu_pda;
 }
-#endif
+
+#endif /* CONFIG_SMP && CONFIG_X86_64 */
+
+#ifdef CONFIG_X86_64
+
+/* correctly size the local cpu masks */
+static void setup_cpu_local_masks(void)
+{
+	alloc_bootmem_cpumask_var(&cpu_initialized_mask);
+	alloc_bootmem_cpumask_var(&cpu_callin_mask);
+	alloc_bootmem_cpumask_var(&cpu_callout_mask);
+	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
+}
+
+#else /* CONFIG_X86_32 */
+
+static inline void setup_cpu_local_masks(void)
+{
+}
+
+#endif /* CONFIG_X86_32 */
 
 /*
  * Great future plan:
@@ -187,6 +206,9 @@
 
 	/* Setup node to cpumask map */
 	setup_node_to_cpumask_map();
+
+	/* Setup cpu initialized, callin, callout masks */
+	setup_cpu_local_masks();
 }
 
 #endif
@@ -280,8 +302,8 @@
 
 	cpulist_scnprintf(buf, sizeof(buf), mask);
 	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
-		enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf);
- }
+		enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
+}
 
 void __cpuinit numa_add_cpu(int cpu)
 {
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index beea2649a..e6faa33 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -1,7 +1,7 @@
 /*
  *	Intel SMP support routines.
  *
- *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
  *	(c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  *      (c) 2002,2003 Andi Kleen, SuSE Labs.
  *
@@ -128,16 +128,23 @@
 
 void native_send_call_func_ipi(const struct cpumask *mask)
 {
-	cpumask_t allbutself;
+	cpumask_var_t allbutself;
 
-	allbutself = cpu_online_map;
-	cpu_clear(smp_processor_id(), allbutself);
+	if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
+		send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+		return;
+	}
 
-	if (cpus_equal(*mask, allbutself) &&
-	    cpus_equal(cpu_online_map, cpu_callout_map))
+	cpumask_copy(allbutself, cpu_online_mask);
+	cpumask_clear_cpu(smp_processor_id(), allbutself);
+
+	if (cpumask_equal(mask, allbutself) &&
+	    cpumask_equal(cpu_online_mask, cpu_callout_mask))
 		send_IPI_allbutself(CALL_FUNCTION_VECTOR);
 	else
 		send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+
+	free_cpumask_var(allbutself);
 }
 
 /*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6bd4d9b..bb1a3b1 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1,7 +1,7 @@
 /*
  *	x86 SMP booting functions
  *
- *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
  *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
  *	Copyright 2001 Andi Kleen, SuSE Labs.
  *
@@ -102,9 +102,6 @@
 /* Last level cache ID of each logical CPU */
 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
 
-cpumask_t cpu_callin_map;
-cpumask_t cpu_callout_map;
-
 /* representing HT siblings of each logical CPU */
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
@@ -120,9 +117,6 @@
 static atomic_t init_deasserted;
 
 
-/* representing cpus for which sibling maps can be computed */
-static cpumask_t cpu_sibling_setup_map;
-
 /* Set if we find a B stepping CPU */
 static int __cpuinitdata smp_b_stepping;
 
@@ -140,7 +134,7 @@
 static void map_cpu_to_node(int cpu, int node)
 {
 	printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
-	cpu_set(cpu, node_to_cpumask_map[node]);
+	cpumask_set_cpu(cpu, &node_to_cpumask_map[node]);
 	cpu_to_node_map[cpu] = node;
 }
 
@@ -151,7 +145,7 @@
 
 	printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
 	for (node = 0; node < MAX_NUMNODES; node++)
-		cpu_clear(cpu, node_to_cpumask_map[node]);
+		cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]);
 	cpu_to_node_map[cpu] = 0;
 }
 #else /* !(CONFIG_NUMA && CONFIG_X86_32) */
@@ -209,7 +203,7 @@
 	 */
 	phys_id = read_apic_id();
 	cpuid = smp_processor_id();
-	if (cpu_isset(cpuid, cpu_callin_map)) {
+	if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
 		panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
 					phys_id, cpuid);
 	}
@@ -231,7 +225,7 @@
 		/*
 		 * Has the boot CPU finished it's STARTUP sequence?
 		 */
-		if (cpu_isset(cpuid, cpu_callout_map))
+		if (cpumask_test_cpu(cpuid, cpu_callout_mask))
 			break;
 		cpu_relax();
 	}
@@ -274,7 +268,7 @@
 	/*
 	 * Allow the master to continue.
 	 */
-	cpu_set(cpuid, cpu_callin_map);
+	cpumask_set_cpu(cpuid, cpu_callin_mask);
 }
 
 static int __cpuinitdata unsafe_smp;
@@ -332,7 +326,7 @@
 	ipi_call_lock();
 	lock_vector_lock();
 	__setup_vector_irq(smp_processor_id());
-	cpu_set(smp_processor_id(), cpu_online_map);
+	set_cpu_online(smp_processor_id(), true);
 	unlock_vector_lock();
 	ipi_call_unlock();
 	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@ -438,50 +432,52 @@
 	int i;
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-	cpu_set(cpu, cpu_sibling_setup_map);
+	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 
 	if (smp_num_siblings > 1) {
-		for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
-			if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
-			    c->cpu_core_id == cpu_data(i).cpu_core_id) {
-				cpu_set(i, per_cpu(cpu_sibling_map, cpu));
-				cpu_set(cpu, per_cpu(cpu_sibling_map, i));
-				cpu_set(i, per_cpu(cpu_core_map, cpu));
-				cpu_set(cpu, per_cpu(cpu_core_map, i));
-				cpu_set(i, c->llc_shared_map);
-				cpu_set(cpu, cpu_data(i).llc_shared_map);
+		for_each_cpu(i, cpu_sibling_setup_mask) {
+			struct cpuinfo_x86 *o = &cpu_data(i);
+
+			if (c->phys_proc_id == o->phys_proc_id &&
+			    c->cpu_core_id == o->cpu_core_id) {
+				cpumask_set_cpu(i, cpu_sibling_mask(cpu));
+				cpumask_set_cpu(cpu, cpu_sibling_mask(i));
+				cpumask_set_cpu(i, cpu_core_mask(cpu));
+				cpumask_set_cpu(cpu, cpu_core_mask(i));
+				cpumask_set_cpu(i, &c->llc_shared_map);
+				cpumask_set_cpu(cpu, &o->llc_shared_map);
 			}
 		}
 	} else {
-		cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
+		cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
 	}
 
-	cpu_set(cpu, c->llc_shared_map);
+	cpumask_set_cpu(cpu, &c->llc_shared_map);
 
 	if (current_cpu_data.x86_max_cores == 1) {
-		per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
+		cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
 		c->booted_cores = 1;
 		return;
 	}
 
-	for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
+	for_each_cpu(i, cpu_sibling_setup_mask) {
 		if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
 		    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
-			cpu_set(i, c->llc_shared_map);
-			cpu_set(cpu, cpu_data(i).llc_shared_map);
+			cpumask_set_cpu(i, &c->llc_shared_map);
+			cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map);
 		}
 		if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
-			cpu_set(i, per_cpu(cpu_core_map, cpu));
-			cpu_set(cpu, per_cpu(cpu_core_map, i));
+			cpumask_set_cpu(i, cpu_core_mask(cpu));
+			cpumask_set_cpu(cpu, cpu_core_mask(i));
 			/*
 			 *  Does this new cpu bringup a new core?
 			 */
-			if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
+			if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
 				/*
 				 * for each core in package, increment
 				 * the booted_cores for this new cpu
 				 */
-				if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
+				if (cpumask_first(cpu_sibling_mask(i)) == i)
 					c->booted_cores++;
 				/*
 				 * increment the core count for all
@@ -504,7 +500,7 @@
 	 * And for power savings, we return cpu_core_map
 	 */
 	if (sched_mc_power_savings || sched_smt_power_savings)
-		return &per_cpu(cpu_core_map, cpu);
+		return cpu_core_mask(cpu);
 	else
 		return &c->llc_shared_map;
 }
@@ -523,7 +519,7 @@
 	 */
 	pr_debug("Before bogomips.\n");
 	for_each_possible_cpu(cpu)
-		if (cpu_isset(cpu, cpu_callout_map))
+		if (cpumask_test_cpu(cpu, cpu_callout_mask))
 			bogosum += cpu_data(cpu).loops_per_jiffy;
 	printk(KERN_INFO
 		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
@@ -904,19 +900,19 @@
 		 * allow APs to start initializing.
 		 */
 		pr_debug("Before Callout %d.\n", cpu);
-		cpu_set(cpu, cpu_callout_map);
+		cpumask_set_cpu(cpu, cpu_callout_mask);
 		pr_debug("After Callout %d.\n", cpu);
 
 		/*
 		 * Wait 5s total for a response
 		 */
 		for (timeout = 0; timeout < 50000; timeout++) {
-			if (cpu_isset(cpu, cpu_callin_map))
+			if (cpumask_test_cpu(cpu, cpu_callin_mask))
 				break;	/* It has booted */
 			udelay(100);
 		}
 
-		if (cpu_isset(cpu, cpu_callin_map)) {
+		if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
 			/* number CPUs logically, starting from 1 (BSP is 0) */
 			pr_debug("OK.\n");
 			printk(KERN_INFO "CPU%d: ", cpu);
@@ -941,9 +937,14 @@
 	if (boot_error) {
 		/* Try to put things back the way they were before ... */
 		numa_remove_cpu(cpu); /* was set by numa_add_cpu */
-		cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */
-		cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
-		cpu_clear(cpu, cpu_present_map);
+
+		/* was set by do_boot_cpu() */
+		cpumask_clear_cpu(cpu, cpu_callout_mask);
+
+		/* was set by cpu_init() */
+		cpumask_clear_cpu(cpu, cpu_initialized_mask);
+
+		set_cpu_present(cpu, false);
 		per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
 	}
 
@@ -977,7 +978,7 @@
 	/*
 	 * Already booted CPU?
 	 */
-	if (cpu_isset(cpu, cpu_callin_map)) {
+	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
 		pr_debug("do_boot_cpu %d Already started\n", cpu);
 		return -ENOSYS;
 	}
@@ -1032,8 +1033,9 @@
  */
 static __init void disable_smp(void)
 {
-	cpu_present_map = cpumask_of_cpu(0);
-	cpu_possible_map = cpumask_of_cpu(0);
+	/* use the read/write pointers to the present and possible maps */
+	cpumask_copy(&cpu_present_map, cpumask_of(0));
+	cpumask_copy(&cpu_possible_map, cpumask_of(0));
 	smpboot_clear_io_apic_irqs();
 
 	if (smp_found_config)
@@ -1041,8 +1043,8 @@
 	else
 		physid_set_mask_of_physid(0, &phys_cpu_present_map);
 	map_cpu_to_logical_apicid();
-	cpu_set(0, per_cpu(cpu_sibling_map, 0));
-	cpu_set(0, per_cpu(cpu_core_map, 0));
+	cpumask_set_cpu(0, cpu_sibling_mask(0));
+	cpumask_set_cpu(0, cpu_core_mask(0));
 }
 
 /*
@@ -1064,14 +1066,14 @@
 		nr = 0;
 		for_each_present_cpu(cpu) {
 			if (nr >= 8)
-				cpu_clear(cpu, cpu_present_map);
+				set_cpu_present(cpu, false);
 			nr++;
 		}
 
 		nr = 0;
 		for_each_possible_cpu(cpu) {
 			if (nr >= 8)
-				cpu_clear(cpu, cpu_possible_map);
+				set_cpu_possible(cpu, false);
 			nr++;
 		}
 
@@ -1167,7 +1169,7 @@
 	preempt_disable();
 	smp_cpu_index_default();
 	current_cpu_data = boot_cpu_data;
-	cpu_callin_map = cpumask_of_cpu(0);
+	cpumask_copy(cpu_callin_mask, cpumask_of(0));
 	mb();
 	/*
 	 * Setup boot CPU information
@@ -1242,8 +1244,8 @@
 	init_gdt(me);
 #endif
 	switch_to_new_gdt();
-	/* already set me in cpu_online_map in boot_cpu_init() */
-	cpu_set(me, cpu_callout_map);
+	/* already set me in cpu_online_mask in boot_cpu_init() */
+	cpumask_set_cpu(me, cpu_callout_mask);
 	per_cpu(cpu_state, me) = CPU_ONLINE;
 }
 
@@ -1311,7 +1313,7 @@
 		possible, max_t(int, possible - num_processors, 0));
 
 	for (i = 0; i < possible; i++)
-		cpu_set(i, cpu_possible_map);
+		set_cpu_possible(i, true);
 
 	nr_cpu_ids = possible;
 }
@@ -1323,31 +1325,31 @@
 	int sibling;
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-	for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
-		cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+	for_each_cpu(sibling, cpu_core_mask(cpu)) {
+		cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
 		/*/
 		 * last thread sibling in this cpu core going down
 		 */
-		if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
+		if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
 			cpu_data(sibling).booted_cores--;
 	}
 
-	for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
-		cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
-	cpus_clear(per_cpu(cpu_sibling_map, cpu));
-	cpus_clear(per_cpu(cpu_core_map, cpu));
+	for_each_cpu(sibling, cpu_sibling_mask(cpu))
+		cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
+	cpumask_clear(cpu_sibling_mask(cpu));
+	cpumask_clear(cpu_core_mask(cpu));
 	c->phys_proc_id = 0;
 	c->cpu_core_id = 0;
-	cpu_clear(cpu, cpu_sibling_setup_map);
+	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
 }
 
 static void __ref remove_cpu_from_maps(int cpu)
 {
-	cpu_clear(cpu, cpu_online_map);
-	cpu_clear(cpu, cpu_callout_map);
-	cpu_clear(cpu, cpu_callin_map);
+	set_cpu_online(cpu, false);
+	cpumask_clear_cpu(cpu, cpu_callout_mask);
+	cpumask_clear_cpu(cpu, cpu_callin_mask);
 	/* was set by cpu_init() */
-	cpu_clear(cpu, cpu_initialized);
+	cpumask_clear_cpu(cpu, cpu_initialized_mask);
 	numa_remove_cpu(cpu);
 }
 
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index d44395f..e2e86a0 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -88,7 +88,7 @@
 	.long sys_uselib
 	.long sys_swapon
 	.long sys_reboot
-	.long old_readdir
+	.long sys_old_readdir
 	.long old_mmap		/* 90 */
 	.long sys_munmap
 	.long sys_truncate
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 65309e4..3985cac 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -105,8 +105,8 @@
 		high bit of the PPI port B (0x61).  Note that some PS/2s,
 		notably the 55SX, work fine if this is removed.  */
 
-		u8 irq_v = inb_p( 0x61 );	/* read the current state */
-		outb_p( irq_v|0x80, 0x61 );	/* reset the IRQ */
+		u8 irq_v = inb_p(0x61);		/* read the current state */
+		outb_p(irq_v | 0x80, 0x61);	/* reset the IRQ */
 	}
 #endif
 
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index 891e7a7..e6e695a 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -17,10 +17,10 @@
 #include <linux/module.h>
 #include <linux/time.h>
 #include <linux/mca.h>
+#include <linux/nmi.h>
 
 #include <asm/i8253.h>
 #include <asm/hpet.h>
-#include <asm/nmi.h>
 #include <asm/vgtod.h>
 #include <asm/time.h>
 #include <asm/timer.h>
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c9a666c..98c2d055 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -63,9 +63,6 @@
 #else
 #include <asm/processor-flags.h>
 #include <asm/arch_hooks.h>
-#include <asm/nmi.h>
-#include <asm/smp.h>
-#include <asm/io.h>
 #include <asm/traps.h>
 
 #include "cpu/mcheck/mce.h"
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 0c9667f..d801d06 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -176,33 +176,31 @@
  * No problem for Linux.
  */
 
-static void __init MP_processor_info(struct mpc_config_processor *m)
+static void __init MP_processor_info(struct mpc_cpu *m)
 {
 	int ver, logical_apicid;
 	physid_mask_t apic_cpus;
 
-	if (!(m->mpc_cpuflag & CPU_ENABLED))
+	if (!(m->cpuflag & CPU_ENABLED))
 		return;
 
-	logical_apicid = m->mpc_apicid;
+	logical_apicid = m->apicid;
 	printk(KERN_INFO "%sCPU #%d %u:%u APIC version %d\n",
-	       m->mpc_cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "",
-	       m->mpc_apicid,
-	       (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
-	       (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
-	       m->mpc_apicver);
+	       m->cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "",
+	       m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
+	       (m->cpufeature & CPU_MODEL_MASK) >> 4, m->apicver);
 
-	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR)
-		boot_cpu_physical_apicid = m->mpc_apicid;
+	if (m->cpuflag & CPU_BOOTPROCESSOR)
+		boot_cpu_physical_apicid = m->apicid;
 
-	ver = m->mpc_apicver;
-	if ((ver >= 0x14 && m->mpc_apicid >= 0xff) || m->mpc_apicid >= 0xf) {
+	ver = m->apicver;
+	if ((ver >= 0x14 && m->apicid >= 0xff) || m->apicid >= 0xf) {
 		printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
-			m->mpc_apicid, MAX_APICS);
+			m->apicid, MAX_APICS);
 		return;
 	}
 
-	apic_cpus = apicid_to_cpu_present(m->mpc_apicid);
+	apic_cpus = apicid_to_cpu_present(m->apicid);
 	physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus);
 	/*
 	 * Validate version
@@ -210,15 +208,15 @@
 	if (ver == 0x0) {
 		printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! "
 			"fixing up to 0x10. (tell your hw vendor)\n",
-			m->mpc_apicid);
+			m->apicid);
 		ver = 0x10;
 	}
-	apic_version[m->mpc_apicid] = ver;
+	apic_version[m->apicid] = ver;
 }
 
 static int __init visws_find_smp_config(unsigned int reserve)
 {
-	struct mpc_config_processor *mp = phys_to_virt(CO_CPU_TAB_PHYS);
+	struct mpc_cpu *mp = phys_to_virt(CO_CPU_TAB_PHYS);
 	unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS));
 
 	if (ncpus > CO_CPU_MAX) {
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
index 4ba5cca..c2ded14 100644
--- a/arch/x86/mach-generic/es7000.c
+++ b/arch/x86/mach-generic/es7000.c
@@ -43,12 +43,12 @@
 	return;
 }
 
-static __init int mps_oem_check(struct mp_config_table *mpc, char *oem,
-		char *productid)
+static __init int
+mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
 {
-	if (mpc->mpc_oemptr) {
-		struct mp_config_oemtable *oem_table =
-			(struct mp_config_oemtable *)mpc->mpc_oemptr;
+	if (mpc->oemptr) {
+		struct mpc_oemtable *oem_table =
+			(struct mpc_oemtable *)mpc->oemptr;
 		if (!strncmp(oem, "UNISYS", 6))
 			return parse_unisys_oem((char *)oem_table);
 	}
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
index 511d794..3679e22 100644
--- a/arch/x86/mach-generic/numaq.c
+++ b/arch/x86/mach-generic/numaq.c
@@ -19,8 +19,7 @@
 #include <asm/numaq/wakecpu.h>
 #include <asm/numaq.h>
 
-static int mps_oem_check(struct mp_config_table *mpc, char *oem,
-		char *productid)
+static int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
 {
 	numaq_mps_oem_check(mpc, oem, productid);
 	return found_numaq;
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c
index c346d9d..15a38da 100644
--- a/arch/x86/mach-generic/probe.c
+++ b/arch/x86/mach-generic/probe.c
@@ -110,8 +110,7 @@
 
 /* These functions can switch the APIC even after the initial ->probe() */
 
-int __init mps_oem_check(struct mp_config_table *mpc, char *oem,
-				 char *productid)
+int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
 {
 	int i;
 	for (i = 0; apic_probe[i]; ++i) {
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9e268b6b..90dfae5 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -534,7 +534,7 @@
 	   happen within a race in page table update. In the later
 	   case just flush. */
 
-	pgd = pgd_offset(current->mm ?: &init_mm, address);
+	pgd = pgd_offset(current->active_mm, address);
 	pgd_ref = pgd_offset_k(address);
 	if (pgd_none(*pgd_ref))
 		return -1;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 544d724..88f1b10 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -328,6 +328,8 @@
 {
 	if (pagenr <= 256)
 		return 1;
+	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+		return 0;
 	if (!page_is_ram(pagenr))
 		return 1;
 	return 0;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 54c437e..23f68e7 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -888,6 +888,8 @@
 {
 	if (pagenr <= 256)
 		return 1;
+	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+		return 0;
 	if (!page_is_ram(pagenr))
 		return 1;
 	return 0;
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
index 41f1b5c..268f825 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/k8topology_64.c
@@ -81,7 +81,6 @@
 	unsigned numnodes, cores, bits, apicid_base;
 	unsigned long prevbase;
 	struct bootnode nodes[8];
-	unsigned char nodeids[8];
 	int i, j, nb, found = 0;
 	u32 nodeid, reg;
 
@@ -110,7 +109,6 @@
 		limit = read_pci_config(0, nb, 1, 0x44 + i*8);
 
 		nodeid = limit & 7;
-		nodeids[i] = nodeid;
 		if ((base & 3) == 0) {
 			if (i < numnodes)
 				printk("Skipping disabled node %d\n", i);
@@ -179,9 +177,6 @@
 
 		nodes[nodeid].start = base;
 		nodes[nodeid].end = limit;
-		e820_register_active_regions(nodeid,
-				nodes[nodeid].start >> PAGE_SHIFT,
-				nodes[nodeid].end >> PAGE_SHIFT);
 
 		prevbase = base;
 
@@ -211,12 +206,15 @@
 	}
 
 	for (i = 0; i < 8; i++) {
-		if (nodes[i].start != nodes[i].end) {
-			nodeid = nodeids[i];
-			for (j = apicid_base; j < cores + apicid_base; j++)
-				apicid_to_node[(nodeid << bits) + j] = i;
-			setup_node_bootmem(i, nodes[i].start, nodes[i].end);
-		}
+		if (nodes[i].start == nodes[i].end)
+			continue;
+
+		e820_register_active_regions(i,
+				nodes[i].start >> PAGE_SHIFT,
+				nodes[i].end >> PAGE_SHIFT);
+		for (j = apicid_base; j < cores + apicid_base; j++)
+			apicid_to_node[(i << bits) + j] = i;
+		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
 	}
 
 	numa_init_array();
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 8518c67..d1f7439 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -239,7 +239,7 @@
 		start_pfn = node_remap_start_pfn[node];
 		size = node_remap_size[node];
 
-		printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node);
+		printk(KERN_DEBUG "%s: node %d\n", __func__, node);
 
 		for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
 			unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
@@ -251,7 +251,7 @@
 						PAGE_KERNEL_LARGE_EXEC));
 
 			printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
-				__FUNCTION__, vaddr, start_pfn + pfn);
+				__func__, vaddr, start_pfn + pfn);
 		}
 	}
 }
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 85cbd3c..8b08fb9 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -601,12 +601,13 @@
  * Reserved non RAM regions only and after successful reserve_memtype,
  * this func also keeps identity mapping (if any) in sync with this new prot.
  */
-static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
+static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+				int strict_prot)
 {
 	int is_ram = 0;
 	int id_sz, ret;
 	unsigned long flags;
-	unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
+	unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
 
 	is_ram = pagerange_is_ram(paddr, paddr + size);
 
@@ -625,15 +626,24 @@
 		return ret;
 
 	if (flags != want_flags) {
-		free_memtype(paddr, paddr + size);
-		printk(KERN_ERR
-		"%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n",
-			current->comm, current->pid,
-			cattr_name(want_flags),
-			(unsigned long long)paddr,
-			(unsigned long long)(paddr + size),
-			cattr_name(flags));
-		return -EINVAL;
+		if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
+			free_memtype(paddr, paddr + size);
+			printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
+				" for %Lx-%Lx, got %s\n",
+				current->comm, current->pid,
+				cattr_name(want_flags),
+				(unsigned long long)paddr,
+				(unsigned long long)(paddr + size),
+				cattr_name(flags));
+			return -EINVAL;
+		}
+		/*
+		 * We allow returning different type than the one requested in
+		 * non strict case.
+		 */
+		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
+				      (~_PAGE_CACHE_MASK)) |
+				     flags);
 	}
 
 	/* Need to keep identity mapping in sync */
@@ -689,6 +699,7 @@
 	unsigned long vma_start = vma->vm_start;
 	unsigned long vma_end = vma->vm_end;
 	unsigned long vma_size = vma_end - vma_start;
+	pgprot_t pgprot;
 
 	if (!pat_enabled)
 		return 0;
@@ -702,7 +713,8 @@
 			WARN_ON_ONCE(1);
 			return -EINVAL;
 		}
-		return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
+		pgprot = __pgprot(prot);
+		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
 	}
 
 	/* reserve entire vma page by page, using pfn and prot from pte */
@@ -710,7 +722,8 @@
 		if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
 			continue;
 
-		retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
+		pgprot = __pgprot(prot);
+		retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
 		if (retval)
 			goto cleanup_ret;
 	}
@@ -741,7 +754,7 @@
  * Note that this function can be called with caller trying to map only a
  * subrange/page inside the vma.
  */
-int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
 			unsigned long pfn, unsigned long size)
 {
 	int retval = 0;
@@ -758,14 +771,14 @@
 	if (is_linear_pfn_mapping(vma)) {
 		/* reserve the whole chunk starting from vm_pgoff */
 		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
-		return reserve_pfn_range(paddr, vma_size, prot);
+		return reserve_pfn_range(paddr, vma_size, prot, 0);
 	}
 
 	/* reserve page by page using pfn and size */
 	base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
 	for (i = 0; i < size; i += PAGE_SIZE) {
 		paddr = base_paddr + i;
-		retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
+		retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
 		if (retval)
 			goto cleanup_ret;
 	}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 98658f2..8fdf06e 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -2,7 +2,7 @@
  * @file op_model_amd.c
  * athlon / K7 / K8 / Family 10h model-specific MSR operations
  *
- * @remark Copyright 2002-2008 OProfile authors
+ * @remark Copyright 2002-2009 OProfile authors
  * @remark Read the file COPYING
  *
  * @author John Levon
@@ -10,7 +10,7 @@
  * @author Graydon Hoare
  * @author Robert Richter <robert.richter@amd.com>
  * @author Barry Kasindorf
-*/
+ */
 
 #include <linux/oprofile.h>
 #include <linux/device.h>
@@ -60,53 +60,10 @@
 #define IBS_OP_LOW_VALID_BIT		(1ULL<<18)	/* bit 18 */
 #define IBS_OP_LOW_ENABLE		(1ULL<<17)	/* bit 17 */
 
-/* Codes used in cpu_buffer.c */
-/* This produces duplicate code, need to be fixed */
-#define IBS_FETCH_BEGIN 3
-#define IBS_OP_BEGIN    4
+#define IBS_FETCH_SIZE	6
+#define IBS_OP_SIZE	12
 
-/*
- * The function interface needs to be fixed, something like add
- * data. Should then be added to linux/oprofile.h.
- */
-extern void
-oprofile_add_ibs_sample(struct pt_regs * const regs,
-			unsigned int * const ibs_sample, int ibs_code);
-
-struct ibs_fetch_sample {
-	/* MSRC001_1031 IBS Fetch Linear Address Register */
-	unsigned int ibs_fetch_lin_addr_low;
-	unsigned int ibs_fetch_lin_addr_high;
-	/* MSRC001_1030 IBS Fetch Control Register */
-	unsigned int ibs_fetch_ctl_low;
-	unsigned int ibs_fetch_ctl_high;
-	/* MSRC001_1032 IBS Fetch Physical Address Register */
-	unsigned int ibs_fetch_phys_addr_low;
-	unsigned int ibs_fetch_phys_addr_high;
-};
-
-struct ibs_op_sample {
-	/* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */
-	unsigned int ibs_op_rip_low;
-	unsigned int ibs_op_rip_high;
-	/* MSRC001_1035 IBS Op Data Register */
-	unsigned int ibs_op_data1_low;
-	unsigned int ibs_op_data1_high;
-	/* MSRC001_1036 IBS Op Data 2 Register */
-	unsigned int ibs_op_data2_low;
-	unsigned int ibs_op_data2_high;
-	/* MSRC001_1037 IBS Op Data 3 Register */
-	unsigned int ibs_op_data3_low;
-	unsigned int ibs_op_data3_high;
-	/* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */
-	unsigned int ibs_dc_linear_low;
-	unsigned int ibs_dc_linear_high;
-	/* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */
-	unsigned int ibs_dc_phys_low;
-	unsigned int ibs_dc_phys_high;
-};
-
-static int ibs_allowed;	/* AMD Family10h and later */
+static int has_ibs;	/* AMD Family10h and later */
 
 struct op_ibs_config {
 	unsigned long op_enabled;
@@ -197,31 +154,29 @@
 op_amd_handle_ibs(struct pt_regs * const regs,
 		  struct op_msrs const * const msrs)
 {
-	unsigned int low, high;
-	struct ibs_fetch_sample ibs_fetch;
-	struct ibs_op_sample ibs_op;
+	u32 low, high;
+	u64 msr;
+	struct op_entry entry;
 
-	if (!ibs_allowed)
+	if (!has_ibs)
 		return 1;
 
 	if (ibs_config.fetch_enabled) {
 		rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
 		if (high & IBS_FETCH_HIGH_VALID_BIT) {
-			ibs_fetch.ibs_fetch_ctl_high = high;
-			ibs_fetch.ibs_fetch_ctl_low = low;
-			rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high);
-			ibs_fetch.ibs_fetch_lin_addr_high = high;
-			ibs_fetch.ibs_fetch_lin_addr_low = low;
-			rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high);
-			ibs_fetch.ibs_fetch_phys_addr_high = high;
-			ibs_fetch.ibs_fetch_phys_addr_low = low;
-
-			oprofile_add_ibs_sample(regs,
-						(unsigned int *)&ibs_fetch,
-						IBS_FETCH_BEGIN);
+			rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr);
+			oprofile_write_reserve(&entry, regs, msr,
+					       IBS_FETCH_CODE, IBS_FETCH_SIZE);
+			oprofile_add_data(&entry, (u32)msr);
+			oprofile_add_data(&entry, (u32)(msr >> 32));
+			oprofile_add_data(&entry, low);
+			oprofile_add_data(&entry, high);
+			rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr);
+			oprofile_add_data(&entry, (u32)msr);
+			oprofile_add_data(&entry, (u32)(msr >> 32));
+			oprofile_write_commit(&entry);
 
 			/* reenable the IRQ */
-			rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
 			high &= ~IBS_FETCH_HIGH_VALID_BIT;
 			high |= IBS_FETCH_HIGH_ENABLE;
 			low &= IBS_FETCH_LOW_MAX_CNT_MASK;
@@ -232,30 +187,29 @@
 	if (ibs_config.op_enabled) {
 		rdmsr(MSR_AMD64_IBSOPCTL, low, high);
 		if (low & IBS_OP_LOW_VALID_BIT) {
-			rdmsr(MSR_AMD64_IBSOPRIP, low, high);
-			ibs_op.ibs_op_rip_low = low;
-			ibs_op.ibs_op_rip_high = high;
-			rdmsr(MSR_AMD64_IBSOPDATA, low, high);
-			ibs_op.ibs_op_data1_low = low;
-			ibs_op.ibs_op_data1_high = high;
-			rdmsr(MSR_AMD64_IBSOPDATA2, low, high);
-			ibs_op.ibs_op_data2_low = low;
-			ibs_op.ibs_op_data2_high = high;
-			rdmsr(MSR_AMD64_IBSOPDATA3, low, high);
-			ibs_op.ibs_op_data3_low = low;
-			ibs_op.ibs_op_data3_high = high;
-			rdmsr(MSR_AMD64_IBSDCLINAD, low, high);
-			ibs_op.ibs_dc_linear_low = low;
-			ibs_op.ibs_dc_linear_high = high;
-			rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high);
-			ibs_op.ibs_dc_phys_low = low;
-			ibs_op.ibs_dc_phys_high = high;
+			rdmsrl(MSR_AMD64_IBSOPRIP, msr);
+			oprofile_write_reserve(&entry, regs, msr,
+					       IBS_OP_CODE, IBS_OP_SIZE);
+			oprofile_add_data(&entry, (u32)msr);
+			oprofile_add_data(&entry, (u32)(msr >> 32));
+			rdmsrl(MSR_AMD64_IBSOPDATA, msr);
+			oprofile_add_data(&entry, (u32)msr);
+			oprofile_add_data(&entry, (u32)(msr >> 32));
+			rdmsrl(MSR_AMD64_IBSOPDATA2, msr);
+			oprofile_add_data(&entry, (u32)msr);
+			oprofile_add_data(&entry, (u32)(msr >> 32));
+			rdmsrl(MSR_AMD64_IBSOPDATA3, msr);
+			oprofile_add_data(&entry, (u32)msr);
+			oprofile_add_data(&entry, (u32)(msr >> 32));
+			rdmsrl(MSR_AMD64_IBSDCLINAD, msr);
+			oprofile_add_data(&entry, (u32)msr);
+			oprofile_add_data(&entry, (u32)(msr >> 32));
+			rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr);
+			oprofile_add_data(&entry, (u32)msr);
+			oprofile_add_data(&entry, (u32)(msr >> 32));
+			oprofile_write_commit(&entry);
 
 			/* reenable the IRQ */
-			oprofile_add_ibs_sample(regs,
-						(unsigned int *)&ibs_op,
-						IBS_OP_BEGIN);
-			rdmsr(MSR_AMD64_IBSOPCTL, low, high);
 			high = 0;
 			low &= ~IBS_OP_LOW_VALID_BIT;
 			low |= IBS_OP_LOW_ENABLE;
@@ -305,14 +259,14 @@
 	}
 
 #ifdef CONFIG_OPROFILE_IBS
-	if (ibs_allowed && ibs_config.fetch_enabled) {
+	if (has_ibs && ibs_config.fetch_enabled) {
 		low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
 		high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
 			+ IBS_FETCH_HIGH_ENABLE;
 		wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
 	}
 
-	if (ibs_allowed && ibs_config.op_enabled) {
+	if (has_ibs && ibs_config.op_enabled) {
 		low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
 			+ ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
 			+ IBS_OP_LOW_ENABLE;
@@ -341,14 +295,14 @@
 	}
 
 #ifdef CONFIG_OPROFILE_IBS
-	if (ibs_allowed && ibs_config.fetch_enabled) {
+	if (has_ibs && ibs_config.fetch_enabled) {
 		/* clear max count and enable */
 		low = 0;
 		high = 0;
 		wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
 	}
 
-	if (ibs_allowed && ibs_config.op_enabled) {
+	if (has_ibs && ibs_config.op_enabled) {
 		/* clear max count and enable */
 		low = 0;
 		high = 0;
@@ -409,6 +363,7 @@
 				       | IBSCTL_LVTOFFSETVAL);
 		pci_read_config_dword(cpu_cfg, IBSCTL, &value);
 		if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
+			pci_dev_put(cpu_cfg);
 			printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
 				"IBSCTL = 0x%08x", value);
 			return 1;
@@ -436,20 +391,20 @@
 /* uninitialize the APIC for the IBS interrupts if needed */
 static void clear_ibs_nmi(void)
 {
-	if (ibs_allowed)
+	if (has_ibs)
 		on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
 }
 
 /* initialize the APIC for the IBS interrupts if available */
 static void ibs_init(void)
 {
-	ibs_allowed = boot_cpu_has(X86_FEATURE_IBS);
+	has_ibs = boot_cpu_has(X86_FEATURE_IBS);
 
-	if (!ibs_allowed)
+	if (!has_ibs)
 		return;
 
 	if (init_ibs_nmi()) {
-		ibs_allowed = 0;
+		has_ibs = 0;
 		return;
 	}
 
@@ -458,7 +413,7 @@
 
 static void ibs_exit(void)
 {
-	if (!ibs_allowed)
+	if (!has_ibs)
 		return;
 
 	clear_ibs_nmi();
@@ -478,7 +433,7 @@
 	if (ret)
 		return ret;
 
-	if (!ibs_allowed)
+	if (!has_ibs)
 		return ret;
 
 	/* model specific files */
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 9e5752f..c0ecf25 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -210,11 +210,10 @@
 	if (bus && node != -1) {
 #ifdef CONFIG_ACPI_NUMA
 		if (pxm >= 0)
-			printk(KERN_DEBUG "bus %02x -> pxm %d -> node %d\n",
-				busnum, pxm, node);
+			dev_printk(KERN_DEBUG, &bus->dev,
+				   "on NUMA node %d (pxm %d)\n", node, pxm);
 #else
-		printk(KERN_DEBUG "bus %02x -> node %d\n",
-			busnum, node);
+		dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
 #endif
 	}
 
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 62ddb73..82d22fc 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -551,17 +551,25 @@
 	if ((err = pci_enable_resources(dev, mask)) < 0)
 		return err;
 
-	if (!dev->msi_enabled)
+	if (!pci_dev_msi_enabled(dev))
 		return pcibios_enable_irq(dev);
 	return 0;
 }
 
 void pcibios_disable_device (struct pci_dev *dev)
 {
-	if (!dev->msi_enabled && pcibios_disable_irq)
+	if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
 		pcibios_disable_irq(dev);
 }
 
+int pci_ext_cfg_avail(struct pci_dev *dev)
+{
+	if (raw_pci_ext_ops)
+		return 1;
+	else
+		return 0;
+}
+
 struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node)
 {
 	struct pci_bus *bus = NULL;
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index e51bf2c..5ead808 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -129,7 +129,7 @@
 				pr = pci_find_parent_resource(dev, r);
 				if (!r->start || !pr ||
 				    request_resource(pr, r) < 0) {
-					dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx);
+					dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx);
 					/*
 					 * Something is wrong with the region.
 					 * Invalidate the resource to prevent
@@ -170,7 +170,7 @@
 					r->flags, disabled, pass);
 				pr = pci_find_parent_resource(dev, r);
 				if (!pr || request_resource(pr, r) < 0) {
-					dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx);
+					dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx);
 					/* We'll assign a new address later */
 					r->end -= r->start;
 					r->start = 0;
@@ -314,17 +314,7 @@
 		return retval;
 
 	if (flags != new_flags) {
-		/*
-		 * Do not fallback to certain memory types with certain
-		 * requested type:
-		 * - request is uncached, return cannot be write-back
-		 * - request is uncached, return cannot be write-combine
-		 * - request is write-combine, return cannot be write-back
-		 */
-		if ((flags == _PAGE_CACHE_UC_MINUS &&
-		     (new_flags == _PAGE_CACHE_WB)) ||
-		    (flags == _PAGE_CACHE_WC &&
-		     new_flags == _PAGE_CACHE_WB)) {
+		if (!is_new_memtype_allowed(flags, new_flags)) {
 			free_memtype(addr, addr+len);
 			return -EINVAL;
 		}
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c
index bec3b04..25a1f8e 100644
--- a/arch/x86/pci/init.c
+++ b/arch/x86/pci/init.c
@@ -12,7 +12,8 @@
 	type = pci_direct_probe();
 #endif
 
-	pci_mmcfg_early_init();
+	if (!(pci_probe & PCI_PROBE_NOEARLY))
+		pci_mmcfg_early_init();
 
 #ifdef CONFIG_PCI_OLPC
 	if (!pci_olpc_init())
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 373b9af..4064345 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -533,7 +533,7 @@
 {
 	struct pci_dev *bridge;
 	int pin = pci_get_interrupt_pin(dev, &bridge);
-	return pcibios_set_irq_routing(bridge, pin, irq);
+	return pcibios_set_irq_routing(bridge, pin - 1, irq);
 }
 
 #endif
@@ -887,7 +887,6 @@
 		dev_dbg(&dev->dev, "no interrupt pin\n");
 		return 0;
 	}
-	pin = pin - 1;
 
 	/* Find IRQ routing entry */
 
@@ -897,17 +896,17 @@
 	info = pirq_get_info(dev);
 	if (!info) {
 		dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n",
-			'A' + pin);
+			'A' + pin - 1);
 		return 0;
 	}
-	pirq = info->irq[pin].link;
-	mask = info->irq[pin].bitmap;
+	pirq = info->irq[pin - 1].link;
+	mask = info->irq[pin - 1].bitmap;
 	if (!pirq) {
-		dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin);
+		dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin - 1);
 		return 0;
 	}
 	dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x",
-		'A' + pin, pirq, mask, pirq_table->exclusive_irqs);
+		'A' + pin - 1, pirq, mask, pirq_table->exclusive_irqs);
 	mask &= pcibios_irq_mask;
 
 	/* Work around broken HP Pavilion Notebooks which assign USB to
@@ -949,7 +948,7 @@
 				newirq = i;
 		}
 	}
-	dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin, newirq);
+	dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin - 1, newirq);
 
 	/* Check if it is hardcoded */
 	if ((pirq & 0xf0) == 0xf0) {
@@ -977,18 +976,18 @@
 			return 0;
 		}
 	}
-	dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin, irq);
+	dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin - 1, irq);
 
 	/* Update IRQ for all devices with the same pirq value */
 	while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
 		pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
 		if (!pin)
 			continue;
-		pin--;
+
 		info = pirq_get_info(dev2);
 		if (!info)
 			continue;
-		if (info->irq[pin].link == pirq) {
+		if (info->irq[pin - 1].link == pirq) {
 			/*
 			 * We refuse to override the dev->irq
 			 * information. Give a warning!
@@ -1042,6 +1041,9 @@
 	dev = NULL;
 	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
 		pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+		if (!pin)
+			continue;
+
 #ifdef CONFIG_X86_IO_APIC
 		/*
 		 * Recalculate IRQ numbers if we use the I/O APIC.
@@ -1049,15 +1051,11 @@
 		if (io_apic_assign_pci_irqs) {
 			int irq;
 
-			if (!pin)
-				continue;
-
 			/*
 			 * interrupt pins are numbered starting from 1
 			 */
-			pin--;
 			irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
-				PCI_SLOT(dev->devfn), pin);
+				PCI_SLOT(dev->devfn), pin - 1);
 			/*
 			 * Busses behind bridges are typically not listed in the
 			 * MP-table.  In this case we have to look up the IRQ
@@ -1070,22 +1068,22 @@
 				struct pci_dev *bridge = dev->bus->self;
 				int bus;
 
-				pin = (pin + PCI_SLOT(dev->devfn)) % 4;
+				pin = pci_swizzle_interrupt_pin(dev, pin);
 				bus = bridge->bus->number;
 				irq = IO_APIC_get_PCI_irq_vector(bus,
-						PCI_SLOT(bridge->devfn), pin);
+						PCI_SLOT(bridge->devfn), pin - 1);
 				if (irq >= 0)
 					dev_warn(&dev->dev,
 						"using bridge %s INT %c to "
 							"get IRQ %d\n",
 						 pci_name(bridge),
-						 'A' + pin, irq);
+						 'A' + pin - 1, irq);
 			}
 			if (irq >= 0) {
 				dev_info(&dev->dev,
 					"PCI->APIC IRQ transform: INT %c "
 						"-> IRQ %d\n",
-					'A' + pin, irq);
+					'A' + pin - 1, irq);
 				dev->irq = irq;
 			}
 		}
@@ -1093,7 +1091,7 @@
 		/*
 		 * Still no IRQ? Try to lookup one...
 		 */
-		if (pin && !dev->irq)
+		if (!dev->irq)
 			pcibios_lookup_irq(dev, 0);
 	}
 }
@@ -1220,12 +1218,10 @@
 	if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
 		char *msg = "";
 
-		pin--; /* interrupt pins are numbered starting from 1 */
-
 		if (io_apic_assign_pci_irqs) {
 			int irq;
 
-			irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
+			irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin - 1);
 			/*
 			 * Busses behind bridges are typically not listed in the MP-table.
 			 * In this case we have to look up the IRQ based on the parent bus,
@@ -1236,20 +1232,20 @@
 			while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
 				struct pci_dev *bridge = dev->bus->self;
 
-				pin = (pin + PCI_SLOT(dev->devfn)) % 4;
+				pin = pci_swizzle_interrupt_pin(dev, pin);
 				irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
-						PCI_SLOT(bridge->devfn), pin);
+						PCI_SLOT(bridge->devfn), pin - 1);
 				if (irq >= 0)
 					dev_warn(&dev->dev, "using bridge %s "
 						 "INT %c to get IRQ %d\n",
-						 pci_name(bridge), 'A' + pin,
+						 pci_name(bridge), 'A' + pin - 1,
 						 irq);
 				dev = bridge;
 			}
 			dev = temp_dev;
 			if (irq >= 0) {
 				dev_info(&dev->dev, "PCI->APIC IRQ transform: "
-					 "INT %c -> IRQ %d\n", 'A' + pin, irq);
+					 "INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
 				dev->irq = irq;
 				return 0;
 			} else
@@ -1268,7 +1264,7 @@
 			return 0;
 
 		dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n",
-			 'A' + pin, msg);
+			 'A' + pin - 1, msg);
 	}
 	return 0;
 }
diff --git a/arch/x86/pci/visws.c b/arch/x86/pci/visws.c
index 16d0c0e..bcead7a 100644
--- a/arch/x86/pci/visws.c
+++ b/arch/x86/pci/visws.c
@@ -24,24 +24,6 @@
 
 unsigned int pci_bus0, pci_bus1;
 
-static inline u8 bridge_swizzle(u8 pin, u8 slot) 
-{
-	return (((pin - 1) + slot) % 4) + 1;
-}
-
-static u8 __init visws_swizzle(struct pci_dev *dev, u8 *pinp)
-{
-	u8 pin = *pinp;
-
-	while (dev->bus->self) {	/* Move up the chain of bridges. */
-		pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
-		dev = dev->bus->self;
-	}
-	*pinp = pin;
-
-	return PCI_SLOT(dev->devfn);
-}
-
 static int __init visws_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 {
 	int irq, bus = dev->bus->number;
@@ -106,7 +88,7 @@
 	raw_pci_ops = &pci_direct_conf1;
 	pci_scan_bus_with_sysdata(pci_bus0);
 	pci_scan_bus_with_sysdata(pci_bus1);
-	pci_fixup_irqs(visws_swizzle, visws_map_irq);
+	pci_fixup_irqs(pci_common_swizzle, visws_map_irq);
 	pcibios_resource_survey();
 	return 0;
 }
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 015b6b2..1da55fe 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -33,6 +33,15 @@
 
 KBUILD_CFLAGS += -pipe -mlongcalls
 
+vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
+plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
+
+ifeq ($(KBUILD_SRC),)
+KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(vardirs) $(plfdirs))
+else
+KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
+endif
+
 KBUILD_DEFCONFIG := iss_defconfig
 
 # ramdisk/initrd support
@@ -66,21 +75,6 @@
 
 boot		:= arch/xtensa/boot
 
-archinc		:= include/asm-xtensa
-
-archprepare: $(archinc)/.platform
-
-# Update processor variant and platform symlinks if something which affects
-# them changed.
-
-$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/auto.conf
-	@echo '  SYMLINK $(archinc)/variant -> $(archinc)/variant-$(VARIANT)'
-	$(Q)mkdir -p $(archinc)
-	$(Q)ln -fsn $(srctree)/$(archinc)/variant-$(VARIANT) $(archinc)/variant
-	@echo '  SYMLINK $(archinc)/platform -> $(archinc)/platform-$(PLATFORM)'
-	$(Q)ln -fsn $(srctree)/$(archinc)/platform-$(PLATFORM) $(archinc)/platform
-	@touch $@
-
 
 all: zImage
 
@@ -89,10 +83,6 @@
 zImage zImage.initrd: vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) $@
 
-CLEAN_FILES	+= arch/xtensa/vmlinux.lds                      \
-		   $(archinc)/platform $(archinc)/variant	\
-		   $(archinc)/.platform
-
 define archhelp
   @echo '* zImage      - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
 endef
diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S
index 849dfca..4e53b74 100644
--- a/arch/xtensa/boot/boot-elf/boot.lds.S
+++ b/arch/xtensa/boot/boot-elf/boot.lds.S
@@ -1,4 +1,4 @@
-#include <asm/variant/core.h>
+#include <variant/core.h>
 OUTPUT_ARCH(xtensa)
 ENTRY(_ResetVector)
 
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
index 8484812..5582e8c 100644
--- a/arch/xtensa/boot/boot-redboot/bootstrap.S
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -1,4 +1,4 @@
-#include <asm/variant/core.h>
+#include <variant/core.h>
 #include <asm/regs.h>
 #include <asm/asmmacro.h>
 #include <asm/cacheasm.h>
diff --git a/include/asm-xtensa/Kbuild b/arch/xtensa/include/asm/Kbuild
similarity index 100%
rename from include/asm-xtensa/Kbuild
rename to arch/xtensa/include/asm/Kbuild
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
new file mode 100644
index 0000000..755320f
--- /dev/null
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -0,0 +1,153 @@
+/*
+ * include/asm-xtensa/asmmacro.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ASMMACRO_H
+#define _XTENSA_ASMMACRO_H
+
+#include <variant/core.h>
+
+/*
+ * Some little helpers for loops. Use zero-overhead-loops
+ * where applicable and if supported by the processor.
+ *
+ * __loopi ar, at, size, inc
+ *         ar	register initialized with the start address
+ *	   at	scratch register used by macro
+ *	   size	size immediate value
+ *	   inc	increment
+ *
+ * __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
+ *	   ar	register initialized with the start address
+ *	   as	register initialized with the size
+ *	   at	scratch register use by macro
+ *	   inc_log2	increment [in log2]
+ *	   mask_log2	mask [in log2]
+ *	   cond		true condition (used in loop'cond')
+ *	   ncond	false condition (used in b'ncond')
+ *
+ * __loop  as
+ *	   restart loop. 'as' register must not have been modified!
+ *
+ * __endla ar, at, incr
+ *	   ar	start address (modified)
+ *	   as	scratch register used by macro
+ *	   inc	increment
+ */
+
+/*
+ * loop for given size as immediate
+ */
+
+	.macro	__loopi ar, at, size, incr
+
+#if XCHAL_HAVE_LOOPS
+		movi	\at, ((\size + \incr - 1) / (\incr))
+		loop	\at, 99f
+#else
+		addi	\at, \ar, \size
+		98:
+#endif
+
+	.endm
+
+/*
+ * loop for given size in register
+ */
+
+	.macro	__loops	ar, as, at, incr_log2, mask_log2, cond, ncond
+
+#if XCHAL_HAVE_LOOPS
+		.ifgt \incr_log2 - 1
+			addi	\at, \as, (1 << \incr_log2) - 1
+			.ifnc \mask_log2,
+				extui	\at, \at, \incr_log2, \mask_log2
+			.else
+				srli	\at, \at, \incr_log2
+			.endif
+		.endif
+		loop\cond	\at, 99f
+#else
+		.ifnc \mask_log2,
+			extui	\at, \as, \incr_log2, \mask_log2
+		.else
+			.ifnc \ncond,
+				srli	\at, \as, \incr_log2
+			.endif
+		.endif
+		.ifnc \ncond,
+			b\ncond	\at, 99f
+
+		.endif
+		.ifnc \mask_log2,
+			slli	\at, \at, \incr_log2
+			add	\at, \ar, \at
+		.else
+			add	\at, \ar, \as
+		.endif
+#endif
+		98:
+
+	.endm
+
+/*
+ * loop from ar to ax
+ */
+
+	.macro	__loopt	ar, as, at, incr_log2
+
+#if XCHAL_HAVE_LOOPS
+		sub	\at, \as, \ar
+		.ifgt	\incr_log2 - 1
+			addi	\at, \at, (1 << \incr_log2) - 1
+			srli	\at, \at, \incr_log2
+		.endif
+		loop	\at, 99f
+#else
+		98:
+#endif
+
+	.endm
+
+/*
+ * restart loop. registers must be unchanged
+ */
+
+	.macro	__loop	as
+
+#if XCHAL_HAVE_LOOPS
+		loop	\as, 99f
+#else
+		98:
+#endif
+
+	.endm
+
+/*
+ * end of loop with no increment of the address.
+ */
+
+	.macro	__endl	ar, as
+#if !XCHAL_HAVE_LOOPS
+		bltu	\ar, \as, 98b
+#endif
+		99:
+	.endm
+
+/*
+ * end of loop with increment of the address.
+ */
+
+	.macro	__endla	ar, as, incr
+		addi	\ar, \ar, \incr
+		__endl	\ar \as
+	.endm
+
+
+#endif /* _XTENSA_ASMMACRO_H */
diff --git a/include/asm-xtensa/atomic.h b/arch/xtensa/include/asm/atomic.h
similarity index 100%
rename from include/asm-xtensa/atomic.h
rename to arch/xtensa/include/asm/atomic.h
diff --git a/include/asm-xtensa/auxvec.h b/arch/xtensa/include/asm/auxvec.h
similarity index 100%
rename from include/asm-xtensa/auxvec.h
rename to arch/xtensa/include/asm/auxvec.h
diff --git a/include/asm-xtensa/bitops.h b/arch/xtensa/include/asm/bitops.h
similarity index 100%
rename from include/asm-xtensa/bitops.h
rename to arch/xtensa/include/asm/bitops.h
diff --git a/include/asm-xtensa/bootparam.h b/arch/xtensa/include/asm/bootparam.h
similarity index 100%
rename from include/asm-xtensa/bootparam.h
rename to arch/xtensa/include/asm/bootparam.h
diff --git a/include/asm-xtensa/bug.h b/arch/xtensa/include/asm/bug.h
similarity index 100%
rename from include/asm-xtensa/bug.h
rename to arch/xtensa/include/asm/bug.h
diff --git a/include/asm-xtensa/bugs.h b/arch/xtensa/include/asm/bugs.h
similarity index 100%
rename from include/asm-xtensa/bugs.h
rename to arch/xtensa/include/asm/bugs.h
diff --git a/arch/xtensa/include/asm/byteorder.h b/arch/xtensa/include/asm/byteorder.h
new file mode 100644
index 0000000..54eb631
--- /dev/null
+++ b/arch/xtensa/include/asm/byteorder.h
@@ -0,0 +1,12 @@
+#ifndef _XTENSA_BYTEORDER_H
+#define _XTENSA_BYTEORDER_H
+
+#ifdef __XTENSA_EL__
+#include <linux/byteorder/little_endian.h>
+#elif defined(__XTENSA_EB__)
+#include <linux/byteorder/big_endian.h>
+#else
+# error processor byte order undefined!
+#endif
+
+#endif /* _XTENSA_BYTEORDER_H */
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
new file mode 100644
index 0000000..f04c989
--- /dev/null
+++ b/arch/xtensa/include/asm/cache.h
@@ -0,0 +1,33 @@
+/*
+ * include/asm-xtensa/cache.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CACHE_H
+#define _XTENSA_CACHE_H
+
+#include <variant/core.h>
+
+#define L1_CACHE_SHIFT	XCHAL_DCACHE_LINEWIDTH
+#define L1_CACHE_BYTES	XCHAL_DCACHE_LINESIZE
+#define SMP_CACHE_BYTES	L1_CACHE_BYTES
+
+#define DCACHE_WAY_SIZE	(XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
+#define ICACHE_WAY_SIZE	(XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
+#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
+#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
+
+/* Maximum cache size per way. */
+#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
+# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
+#else
+# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
+#endif
+
+
+#endif	/* _XTENSA_CACHE_H */
diff --git a/include/asm-xtensa/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
similarity index 100%
rename from include/asm-xtensa/cacheasm.h
rename to arch/xtensa/include/asm/cacheasm.h
diff --git a/include/asm-xtensa/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
similarity index 100%
rename from include/asm-xtensa/cacheflush.h
rename to arch/xtensa/include/asm/cacheflush.h
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
new file mode 100644
index 0000000..f84d3f0
--- /dev/null
+++ b/arch/xtensa/include/asm/checksum.h
@@ -0,0 +1,250 @@
+/*
+ * include/asm-xtensa/checksum.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CHECKSUM_H
+#define _XTENSA_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <variant/core.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum,
+						   int *src_err_ptr, int *dst_err_ptr);
+
+/*
+ *	Note: when you get a NULL pointer exception here this means someone
+ *	passed in an incorrect kernel address to one of these functions.
+ *
+ *	If you use these functions directly please don't forget the access_ok().
+ */
+static inline
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+					int len, __wsum sum)
+{
+	return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+}
+
+static inline
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+						int len, __wsum sum, int *err_ptr)
+{
+	return csum_partial_copy_generic((__force const void *)src, dst,
+					len, sum, err_ptr, NULL);
+}
+
+/*
+ *	Fold a partial checksum
+ */
+
+static __inline__ __sum16 csum_fold(__wsum sum)
+{
+	unsigned int __dummy;
+	__asm__("extui	%1, %0, 16, 16\n\t"
+		"extui	%0 ,%0, 0, 16\n\t"
+		"add	%0, %0, %1\n\t"
+		"slli	%1, %0, 16\n\t"
+		"add	%0, %0, %1\n\t"
+		"extui	%0, %0, 16, 16\n\t"
+		"neg	%0, %0\n\t"
+		"addi	%0, %0, -1\n\t"
+		"extui	%0, %0, 0, 16\n\t"
+		: "=r" (sum), "=&r" (__dummy)
+		: "0" (sum));
+	return (__force __sum16)sum;
+}
+
+/*
+ *	This is a version of ip_compute_csum() optimized for IP headers,
+ *	which always checksum on 4 octet boundaries.
+ */
+static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+	unsigned int sum, tmp, endaddr;
+
+	__asm__ __volatile__(
+		"sub		%0, %0, %0\n\t"
+#if XCHAL_HAVE_LOOPS
+		"loopgtz	%2, 2f\n\t"
+#else
+		"beqz		%2, 2f\n\t"
+		"slli		%4, %2, 2\n\t"
+		"add		%4, %4, %1\n\t"
+		"0:\t"
+#endif
+		"l32i		%3, %1, 0\n\t"
+		"add		%0, %0, %3\n\t"
+		"bgeu		%0, %3, 1f\n\t"
+		"addi		%0, %0, 1\n\t"
+		"1:\t"
+		"addi		%1, %1, 4\n\t"
+#if !XCHAL_HAVE_LOOPS
+		"blt		%1, %4, 0b\n\t"
+#endif
+		"2:\t"
+	/* Since the input registers which are loaded with iph and ihl
+	   are modified, we must also specify them as outputs, or gcc
+	   will assume they contain their original values. */
+		: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr)
+		: "1" (iph), "2" (ihl));
+
+	return	csum_fold(sum);
+}
+
+static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+						   unsigned short len,
+						   unsigned short proto,
+						   __wsum sum)
+{
+
+#ifdef __XTENSA_EL__
+	unsigned long len_proto = (len + proto) << 8;
+#elif defined(__XTENSA_EB__)
+	unsigned long len_proto = len + proto;
+#else
+# error processor byte order undefined!
+#endif
+	__asm__("add	%0, %0, %1\n\t"
+		"bgeu	%0, %1, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		"add	%0, %0, %2\n\t"
+		"bgeu	%0, %2, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		"add	%0, %0, %3\n\t"
+		"bgeu	%0, %3, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		: "=r" (sum), "=r" (len_proto)
+		: "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
+	return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+						       unsigned short len,
+						       unsigned short proto,
+						       __wsum sum)
+{
+	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
+{
+    return csum_fold (csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+					  const struct in6_addr *daddr,
+					  __u32 len, unsigned short proto,
+					  __wsum sum)
+{
+	unsigned int __dummy;
+	__asm__("l32i	%1, %2, 0\n\t"
+		"add	%0, %0, %1\n\t"
+		"bgeu	%0, %1, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		"l32i	%1, %2, 4\n\t"
+		"add	%0, %0, %1\n\t"
+		"bgeu	%0, %1, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		"l32i	%1, %2, 8\n\t"
+		"add	%0, %0, %1\n\t"
+		"bgeu	%0, %1, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		"l32i	%1, %2, 12\n\t"
+		"add	%0, %0, %1\n\t"
+		"bgeu	%0, %1, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		"l32i	%1, %3, 0\n\t"
+		"add	%0, %0, %1\n\t"
+		"bgeu	%0, %1, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		"l32i	%1, %3, 4\n\t"
+		"add	%0, %0, %1\n\t"
+		"bgeu	%0, %1, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		"l32i	%1, %3, 8\n\t"
+		"add	%0, %0, %1\n\t"
+		"bgeu	%0, %1, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		"l32i	%1, %3, 12\n\t"
+		"add	%0, %0, %1\n\t"
+		"bgeu	%0, %1, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		"add	%0, %0, %4\n\t"
+		"bgeu	%0, %4, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		"add	%0, %0, %5\n\t"
+		"bgeu	%0, %5, 1f\n\t"
+		"addi	%0, %0, 1\n\t"
+		"1:\t"
+		: "=r" (sum), "=&r" (__dummy)
+		: "r" (saddr), "r" (daddr),
+		  "r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
+
+	return csum_fold(sum);
+}
+
+/*
+ *	Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst,
+				    int len, __wsum sum, int *err_ptr)
+{
+	if (access_ok(VERIFY_WRITE, dst, len))
+		return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
+
+	if (len)
+		*err_ptr = -EFAULT;
+
+	return (__force __wsum)-1; /* invalid checksum */
+}
+#endif
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
new file mode 100644
index 0000000..65a285d
--- /dev/null
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -0,0 +1,177 @@
+/*
+ * include/asm-xtensa/coprocessor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2007 Tensilica Inc.
+ */
+
+
+#ifndef _XTENSA_COPROCESSOR_H
+#define _XTENSA_COPROCESSOR_H
+
+#include <linux/stringify.h>
+#include <variant/tie.h>
+#include <asm/types.h>
+
+#ifdef __ASSEMBLY__
+# include <variant/tie-asm.h>
+
+.macro	xchal_sa_start  a b
+	.set .Lxchal_pofs_, 0
+	.set .Lxchal_ofs_, 0
+.endm
+
+.macro	xchal_sa_align  ptr minofs maxofs ofsalign totalign
+	.set	.Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ + \totalign - 1
+	.set	.Lxchal_ofs_, (.Lxchal_ofs_ & -\totalign) - .Lxchal_pofs_
+.endm
+
+#define _SELECT	(  XTHAL_SAS_TIE | XTHAL_SAS_OPT \
+		 | XTHAL_SAS_CC \
+		 | XTHAL_SAS_CALR | XTHAL_SAS_CALE )
+
+.macro save_xtregs_opt ptr clb at1 at2 at3 at4 offset
+	.if XTREGS_OPT_SIZE > 0
+		addi	\clb, \ptr, \offset
+		xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
+	.endif
+.endm
+
+.macro load_xtregs_opt ptr clb at1 at2 at3 at4 offset
+	.if XTREGS_OPT_SIZE > 0
+		addi	\clb, \ptr, \offset
+		xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
+	.endif
+.endm
+#undef _SELECT
+
+#define _SELECT	(  XTHAL_SAS_TIE | XTHAL_SAS_OPT \
+		 | XTHAL_SAS_NOCC \
+		 | XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
+
+.macro save_xtregs_user ptr clb at1 at2 at3 at4 offset
+	.if XTREGS_USER_SIZE > 0
+		addi	\clb, \ptr, \offset
+		xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
+	.endif
+.endm
+
+.macro load_xtregs_user ptr clb at1 at2 at3 at4 offset
+	.if XTREGS_USER_SIZE > 0
+		addi	\clb, \ptr, \offset
+		xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
+	.endif
+.endm
+#undef _SELECT
+
+
+
+#endif	/* __ASSEMBLY__ */
+
+/*
+ * XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
+ *
+ * XTENSA_HAVE_IO_PORT(x) returns 1 if io-port x is configured.
+ *
+ */
+
+#define XTENSA_HAVE_COPROCESSOR(x)					\
+	((XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) & (1 << (x)))
+#define XTENSA_HAVE_COPROCESSORS					\
+	(XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK)
+#define XTENSA_HAVE_IO_PORT(x)						\
+	(XCHAL_CP_PORT_MASK & (1 << (x)))
+#define XTENSA_HAVE_IO_PORTS						\
+	XCHAL_CP_PORT_MASK
+
+#ifndef __ASSEMBLY__
+
+
+#if XCHAL_HAVE_CP
+
+#define RSR_CPENABLE(x)	do {						  \
+	__asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
+	} while(0);
+#define WSR_CPENABLE(x)	do {						  \
+  	__asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" 	  \
+	    		     :: "a" (x));				  \
+	} while(0);
+
+#endif /* XCHAL_HAVE_CP */
+
+
+/*
+ * Additional registers.
+ * We define three types of additional registers:
+ *  ext: extra registers that are used by the compiler
+ *  cpn: optional registers that can be used by a user application
+ *  cpX: coprocessor registers that can only be used if the corresponding
+ *       CPENABLE bit is set.
+ */
+
+#define XCHAL_SA_REG(list,cc,abi,type,y,name,z,align,size,...)	\
+	__REG ## list (cc, abi, type, name, size, align)
+
+#define __REG0(cc,abi,t,name,s,a)	__REG0_ ## cc (abi,name)
+#define __REG1(cc,abi,t,name,s,a)	__REG1_ ## cc (name)
+#define __REG2(cc,abi,type,...)		__REG2_ ## type (__VA_ARGS__)
+
+#define __REG0_0(abi,name)
+#define __REG0_1(abi,name)		__REG0_1 ## abi (name)
+#define __REG0_10(name)	__u32 name;
+#define __REG0_11(name)	__u32 name;
+#define __REG0_12(name)
+
+#define __REG1_0(name)	__u32 name;
+#define __REG1_1(name)
+
+#define __REG2_0(n,s,a)	__u32 name;
+#define __REG2_1(n,s,a)	unsigned char n[s] __attribute__ ((aligned(a)));
+#define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
+
+typedef struct { XCHAL_NCP_SA_LIST(0) } xtregs_opt_t
+	__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
+typedef struct { XCHAL_NCP_SA_LIST(1) } xtregs_user_t
+	__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
+
+#if XTENSA_HAVE_COPROCESSORS
+
+typedef struct { XCHAL_CP0_SA_LIST(2) } xtregs_cp0_t
+	__attribute__ ((aligned (XCHAL_CP0_SA_ALIGN)));
+typedef struct { XCHAL_CP1_SA_LIST(2) } xtregs_cp1_t
+	__attribute__ ((aligned (XCHAL_CP1_SA_ALIGN)));
+typedef struct { XCHAL_CP2_SA_LIST(2) } xtregs_cp2_t
+	__attribute__ ((aligned (XCHAL_CP2_SA_ALIGN)));
+typedef struct { XCHAL_CP3_SA_LIST(2) } xtregs_cp3_t
+	__attribute__ ((aligned (XCHAL_CP3_SA_ALIGN)));
+typedef struct { XCHAL_CP4_SA_LIST(2) } xtregs_cp4_t
+	__attribute__ ((aligned (XCHAL_CP4_SA_ALIGN)));
+typedef struct { XCHAL_CP5_SA_LIST(2) } xtregs_cp5_t
+	__attribute__ ((aligned (XCHAL_CP5_SA_ALIGN)));
+typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
+	__attribute__ ((aligned (XCHAL_CP6_SA_ALIGN)));
+typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
+	__attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
+
+extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
+extern void coprocessor_save(void*, int);
+extern void coprocessor_load(void*, int);
+extern void coprocessor_flush(struct thread_info*, int);
+extern void coprocessor_restore(struct thread_info*, int);
+
+extern void coprocessor_release_all(struct thread_info*);
+extern void coprocessor_flush_all(struct thread_info*);
+
+static inline void coprocessor_clear_cpenable(void)
+{
+	unsigned long i = 0;
+	WSR_CPENABLE(i);
+}
+
+#endif	/* XTENSA_HAVE_COPROCESSORS */
+
+#endif	/* !__ASSEMBLY__ */
+#endif	/* _XTENSA_COPROCESSOR_H */
diff --git a/include/asm-xtensa/cpumask.h b/arch/xtensa/include/asm/cpumask.h
similarity index 100%
rename from include/asm-xtensa/cpumask.h
rename to arch/xtensa/include/asm/cpumask.h
diff --git a/include/asm-xtensa/cputime.h b/arch/xtensa/include/asm/cputime.h
similarity index 100%
rename from include/asm-xtensa/cputime.h
rename to arch/xtensa/include/asm/cputime.h
diff --git a/include/asm-xtensa/current.h b/arch/xtensa/include/asm/current.h
similarity index 100%
rename from include/asm-xtensa/current.h
rename to arch/xtensa/include/asm/current.h
diff --git a/include/asm-xtensa/delay.h b/arch/xtensa/include/asm/delay.h
similarity index 100%
rename from include/asm-xtensa/delay.h
rename to arch/xtensa/include/asm/delay.h
diff --git a/include/asm-xtensa/device.h b/arch/xtensa/include/asm/device.h
similarity index 100%
rename from include/asm-xtensa/device.h
rename to arch/xtensa/include/asm/device.h
diff --git a/include/asm-xtensa/div64.h b/arch/xtensa/include/asm/div64.h
similarity index 100%
rename from include/asm-xtensa/div64.h
rename to arch/xtensa/include/asm/div64.h
diff --git a/include/asm-xtensa/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
similarity index 100%
rename from include/asm-xtensa/dma-mapping.h
rename to arch/xtensa/include/asm/dma-mapping.h
diff --git a/include/asm-xtensa/dma.h b/arch/xtensa/include/asm/dma.h
similarity index 100%
rename from include/asm-xtensa/dma.h
rename to arch/xtensa/include/asm/dma.h
diff --git a/include/asm-xtensa/elf.h b/arch/xtensa/include/asm/elf.h
similarity index 100%
rename from include/asm-xtensa/elf.h
rename to arch/xtensa/include/asm/elf.h
diff --git a/include/asm-xtensa/emergency-restart.h b/arch/xtensa/include/asm/emergency-restart.h
similarity index 100%
rename from include/asm-xtensa/emergency-restart.h
rename to arch/xtensa/include/asm/emergency-restart.h
diff --git a/include/asm-xtensa/errno.h b/arch/xtensa/include/asm/errno.h
similarity index 100%
rename from include/asm-xtensa/errno.h
rename to arch/xtensa/include/asm/errno.h
diff --git a/include/asm-xtensa/fb.h b/arch/xtensa/include/asm/fb.h
similarity index 100%
rename from include/asm-xtensa/fb.h
rename to arch/xtensa/include/asm/fb.h
diff --git a/include/asm-xtensa/fcntl.h b/arch/xtensa/include/asm/fcntl.h
similarity index 100%
rename from include/asm-xtensa/fcntl.h
rename to arch/xtensa/include/asm/fcntl.h
diff --git a/include/asm-xtensa/futex.h b/arch/xtensa/include/asm/futex.h
similarity index 100%
rename from include/asm-xtensa/futex.h
rename to arch/xtensa/include/asm/futex.h
diff --git a/include/asm-xtensa/hardirq.h b/arch/xtensa/include/asm/hardirq.h
similarity index 100%
rename from include/asm-xtensa/hardirq.h
rename to arch/xtensa/include/asm/hardirq.h
diff --git a/include/asm-xtensa/highmem.h b/arch/xtensa/include/asm/highmem.h
similarity index 100%
rename from include/asm-xtensa/highmem.h
rename to arch/xtensa/include/asm/highmem.h
diff --git a/include/asm-xtensa/hw_irq.h b/arch/xtensa/include/asm/hw_irq.h
similarity index 100%
rename from include/asm-xtensa/hw_irq.h
rename to arch/xtensa/include/asm/hw_irq.h
diff --git a/include/asm-xtensa/io.h b/arch/xtensa/include/asm/io.h
similarity index 100%
rename from include/asm-xtensa/io.h
rename to arch/xtensa/include/asm/io.h
diff --git a/include/asm-xtensa/ioctl.h b/arch/xtensa/include/asm/ioctl.h
similarity index 100%
rename from include/asm-xtensa/ioctl.h
rename to arch/xtensa/include/asm/ioctl.h
diff --git a/include/asm-xtensa/ioctls.h b/arch/xtensa/include/asm/ioctls.h
similarity index 100%
rename from include/asm-xtensa/ioctls.h
rename to arch/xtensa/include/asm/ioctls.h
diff --git a/include/asm-xtensa/ipcbuf.h b/arch/xtensa/include/asm/ipcbuf.h
similarity index 100%
rename from include/asm-xtensa/ipcbuf.h
rename to arch/xtensa/include/asm/ipcbuf.h
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
new file mode 100644
index 0000000..1620d1e
--- /dev/null
+++ b/arch/xtensa/include/asm/irq.h
@@ -0,0 +1,30 @@
+/*
+ * include/asm-xtensa/irq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IRQ_H
+#define _XTENSA_IRQ_H
+
+#include <platform/hardware.h>
+#include <variant/core.h>
+
+#ifndef PLATFORM_NR_IRQS
+# define PLATFORM_NR_IRQS 0
+#endif
+#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
+#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS)
+
+static __inline__ int irq_canonicalize(int irq)
+{
+	return (irq);
+}
+
+struct irqaction;
+
+#endif	/* _XTENSA_IRQ_H */
diff --git a/include/asm-xtensa/irq_regs.h b/arch/xtensa/include/asm/irq_regs.h
similarity index 100%
rename from include/asm-xtensa/irq_regs.h
rename to arch/xtensa/include/asm/irq_regs.h
diff --git a/include/asm-xtensa/kdebug.h b/arch/xtensa/include/asm/kdebug.h
similarity index 100%
rename from include/asm-xtensa/kdebug.h
rename to arch/xtensa/include/asm/kdebug.h
diff --git a/include/asm-xtensa/kmap_types.h b/arch/xtensa/include/asm/kmap_types.h
similarity index 100%
rename from include/asm-xtensa/kmap_types.h
rename to arch/xtensa/include/asm/kmap_types.h
diff --git a/include/asm-xtensa/linkage.h b/arch/xtensa/include/asm/linkage.h
similarity index 100%
rename from include/asm-xtensa/linkage.h
rename to arch/xtensa/include/asm/linkage.h
diff --git a/include/asm-xtensa/local.h b/arch/xtensa/include/asm/local.h
similarity index 100%
rename from include/asm-xtensa/local.h
rename to arch/xtensa/include/asm/local.h
diff --git a/include/asm-xtensa/mman.h b/arch/xtensa/include/asm/mman.h
similarity index 100%
rename from include/asm-xtensa/mman.h
rename to arch/xtensa/include/asm/mman.h
diff --git a/include/asm-xtensa/mmu.h b/arch/xtensa/include/asm/mmu.h
similarity index 100%
rename from include/asm-xtensa/mmu.h
rename to arch/xtensa/include/asm/mmu.h
diff --git a/include/asm-xtensa/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
similarity index 100%
rename from include/asm-xtensa/mmu_context.h
rename to arch/xtensa/include/asm/mmu_context.h
diff --git a/include/asm-xtensa/module.h b/arch/xtensa/include/asm/module.h
similarity index 100%
rename from include/asm-xtensa/module.h
rename to arch/xtensa/include/asm/module.h
diff --git a/include/asm-xtensa/msgbuf.h b/arch/xtensa/include/asm/msgbuf.h
similarity index 100%
rename from include/asm-xtensa/msgbuf.h
rename to arch/xtensa/include/asm/msgbuf.h
diff --git a/include/asm-xtensa/mutex.h b/arch/xtensa/include/asm/mutex.h
similarity index 100%
rename from include/asm-xtensa/mutex.h
rename to arch/xtensa/include/asm/mutex.h
diff --git a/include/asm-xtensa/page.h b/arch/xtensa/include/asm/page.h
similarity index 100%
rename from include/asm-xtensa/page.h
rename to arch/xtensa/include/asm/page.h
diff --git a/include/asm-xtensa/param.h b/arch/xtensa/include/asm/param.h
similarity index 100%
rename from include/asm-xtensa/param.h
rename to arch/xtensa/include/asm/param.h
diff --git a/include/asm-xtensa/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h
similarity index 100%
rename from include/asm-xtensa/pci-bridge.h
rename to arch/xtensa/include/asm/pci-bridge.h
diff --git a/include/asm-xtensa/pci.h b/arch/xtensa/include/asm/pci.h
similarity index 100%
rename from include/asm-xtensa/pci.h
rename to arch/xtensa/include/asm/pci.h
diff --git a/include/asm-xtensa/percpu.h b/arch/xtensa/include/asm/percpu.h
similarity index 100%
rename from include/asm-xtensa/percpu.h
rename to arch/xtensa/include/asm/percpu.h
diff --git a/include/asm-xtensa/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
similarity index 100%
rename from include/asm-xtensa/pgalloc.h
rename to arch/xtensa/include/asm/pgalloc.h
diff --git a/include/asm-xtensa/pgtable.h b/arch/xtensa/include/asm/pgtable.h
similarity index 100%
rename from include/asm-xtensa/pgtable.h
rename to arch/xtensa/include/asm/pgtable.h
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
new file mode 100644
index 0000000..e3d5a48
--- /dev/null
+++ b/arch/xtensa/include/asm/platform.h
@@ -0,0 +1,89 @@
+/*
+ * Platform specific functions
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PLATFORM_H
+#define _XTENSA_PLATFORM_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include <asm/bootparam.h>
+
+/*
+ * platform_init is called before the mmu is initialized to give the
+ * platform a early hook-up. bp_tag_t is a list of configuration tags
+ * passed from the boot-loader.
+ */
+extern void platform_init(bp_tag_t*);
+
+/*
+ * platform_setup is called from setup_arch with a pointer to the command-line
+ * string.
+ */
+extern void platform_setup (char **);
+
+/*
+ * platform_init_irq is called from init_IRQ.
+ */
+extern void platform_init_irq (void);
+
+/*
+ * platform_restart is called to restart the system.
+ */
+extern void platform_restart (void);
+
+/*
+ * platform_halt is called to stop the system and halt.
+ */
+extern void platform_halt (void);
+
+/*
+ * platform_power_off is called to stop the system and power it off.
+ */
+extern void platform_power_off (void);
+
+/*
+ * platform_idle is called from the idle function.
+ */
+extern void platform_idle (void);
+
+/*
+ * platform_heartbeat is called every HZ
+ */
+extern void platform_heartbeat (void);
+
+/*
+ * platform_pcibios_init is called to allow the platform to setup the pci bus.
+ */
+extern void platform_pcibios_init (void);
+
+/*
+ * platform_pcibios_fixup allows to modify the PCI configuration.
+ */
+extern int platform_pcibios_fixup (void);
+
+/*
+ * platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE)
+ */
+extern void platform_calibrate_ccount (void);
+
+/*
+ * platform_get_rtc_time returns RTC seconds (returns 0 for no error)
+ */
+extern int platform_get_rtc_time(time_t*);
+
+/*
+ * platform_set_rtc_time set RTC seconds (returns 0 for no error)
+ */
+extern int platform_set_rtc_time(time_t);
+
+
+#endif	/* _XTENSA_PLATFORM_H */
+
diff --git a/include/asm-xtensa/poll.h b/arch/xtensa/include/asm/poll.h
similarity index 100%
rename from include/asm-xtensa/poll.h
rename to arch/xtensa/include/asm/poll.h
diff --git a/include/asm-xtensa/posix_types.h b/arch/xtensa/include/asm/posix_types.h
similarity index 100%
rename from include/asm-xtensa/posix_types.h
rename to arch/xtensa/include/asm/posix_types.h
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
new file mode 100644
index 0000000..07387d3
--- /dev/null
+++ b/arch/xtensa/include/asm/processor.h
@@ -0,0 +1,193 @@
+/*
+ * include/asm-xtensa/processor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PROCESSOR_H
+#define _XTENSA_PROCESSOR_H
+
+#include <variant/core.h>
+#include <asm/coprocessor.h>
+
+#include <linux/compiler.h>
+#include <asm/ptrace.h>
+#include <asm/types.h>
+#include <asm/regs.h>
+
+/* Assertions. */
+
+#if (XCHAL_HAVE_WINDOWED != 1)
+# error Linux requires the Xtensa Windowed Registers Option.
+#endif
+
+/*
+ * User space process size: 1 GB.
+ * Windowed call ABI requires caller and callee to be located within the same
+ * 1 GB region. The C compiler places trampoline code on the stack for sources
+ * that take the address of a nested C function (a feature used by glibc), so
+ * the 1 GB requirement applies to the stack as well.
+ */
+
+#define TASK_SIZE	__XTENSA_UL_CONST(0x40000000)
+#define STACK_TOP	TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
+
+/*
+ * General exception cause assigned to debug exceptions. Debug exceptions go
+ * to their own vector, rather than the general exception vectors (user,
+ * kernel, double); and their specific causes are reported via DEBUGCAUSE
+ * rather than EXCCAUSE.  However it is sometimes convenient to redirect debug
+ * exceptions to the general exception mechanism.  To do this, an otherwise
+ * unused EXCCAUSE value was assigned to debug exceptions for this purpose.
+ */
+
+#define EXCCAUSE_MAPPED_DEBUG	63
+
+/*
+ * We use DEPC also as a flag to distinguish between double and regular
+ * exceptions. For performance reasons, DEPC might contain the value of
+ * EXCCAUSE for regular exceptions, so we use this definition to mark a
+ * valid double exception address.
+ * (Note: We use it in bgeui, so it should be 64, 128, or 256)
+ */
+
+#define VALID_DOUBLE_EXCEPTION_ADDRESS	64
+
+/* LOCKLEVEL defines the interrupt level that masks all
+ * general-purpose interrupts.
+ */
+#define LOCKLEVEL 1
+
+/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
+ * registers
+ */
+#define WSBITS  (XCHAL_NUM_AREGS / 4)      /* width of WINDOWSTART in bits */
+#define WBBITS  (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */
+
+#ifndef __ASSEMBLY__
+
+/* Build a valid return address for the specified call winsize.
+ * winsize must be 1 (call4), 2 (call8), or 3 (call12)
+ */
+#define MAKE_RA_FOR_CALL(ra,ws)   (((ra) & 0x3fffffff) | (ws) << 30)
+
+/* Convert return address to a valid pc
+ * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ */
+#define MAKE_PC_FROM_RA(ra,sp)    (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
+
+typedef struct {
+    unsigned long seg;
+} mm_segment_t;
+
+struct thread_struct {
+
+	/* kernel's return address and stack pointer for context switching */
+	unsigned long ra; /* kernel's a0: return address and window call size */
+	unsigned long sp; /* kernel's a1: stack pointer */
+
+	mm_segment_t current_ds;    /* see uaccess.h for example uses */
+
+	/* struct xtensa_cpuinfo info; */
+
+	unsigned long bad_vaddr; /* last user fault */
+	unsigned long bad_uaddr; /* last kernel fault accessing user space */
+	unsigned long error_code;
+
+	unsigned long ibreak[XCHAL_NUM_IBREAK];
+	unsigned long dbreaka[XCHAL_NUM_DBREAK];
+	unsigned long dbreakc[XCHAL_NUM_DBREAK];
+
+	/* Make structure 16 bytes aligned. */
+	int align[0] __attribute__ ((aligned(16)));
+};
+
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr()  ({ __label__ _l; _l: &&_l;})
+
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE	(TASK_SIZE / 2)
+
+#define INIT_THREAD  \
+{									\
+	ra:		0, 						\
+	sp:		sizeof(init_stack) + (long) &init_stack,	\
+	current_ds:	{0},						\
+	/*info:		{0}, */						\
+	bad_vaddr:	0,						\
+	bad_uaddr:	0,						\
+	error_code:	0,						\
+}
+
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ * Note: We set-up ps as if we did a call4 to the new pc.
+ *       set_thread_state in signal.c depends on it.
+ */
+#define USER_PS_VALUE ((1 << PS_WOE_BIT) |				\
+                       (1 << PS_CALLINC_SHIFT) |			\
+                       (USER_RING << PS_RING_SHIFT) |			\
+                       (1 << PS_UM_BIT) |				\
+                       (1 << PS_EXCM_BIT))
+
+/* Clearing a0 terminates the backtrace. */
+#define start_thread(regs, new_pc, new_sp) \
+	regs->pc = new_pc; \
+	regs->ps = USER_PS_VALUE; \
+	regs->areg[1] = new_sp; \
+	regs->areg[0] = 0; \
+	regs->wmask = 1; \
+	regs->depc = 0; \
+	regs->windowbase = 0; \
+	regs->windowstart = 1;
+
+/* Forward declaration */
+struct task_struct;
+struct mm_struct;
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while(0)
+
+/* Prepare to copy thread state - unlazy all lazy status */
+extern void prepare_to_copy(struct task_struct*);
+
+/* Create a kernel thread without removing it from tasklists */
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+/* Copy and release all segment info associated with a VM */
+#define copy_segments(p, mm)	do { } while(0)
+#define release_segments(mm)	do { } while(0)
+#define forget_segments()	do { } while (0)
+
+#define thread_saved_pc(tsk)	(task_pt_regs(tsk)->pc)
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk)		(task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk)		(task_pt_regs(tsk)->areg[1])
+
+#define cpu_relax()  barrier()
+
+/* Special register access. */
+
+#define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v));
+#define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v));
+
+#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
+#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
+
+#endif	/* __ASSEMBLY__ */
+#endif	/* _XTENSA_PROCESSOR_H */
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
new file mode 100644
index 0000000..905e1e6
--- /dev/null
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -0,0 +1,135 @@
+/*
+ * include/asm-xtensa/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PTRACE_H
+#define _XTENSA_PTRACE_H
+
+/*
+ * Kernel stack
+ *
+ * 		+-----------------------+  -------- STACK_SIZE
+ * 		|     register file     |  |
+ * 		+-----------------------+  |
+ * 		|    struct pt_regs     |  |
+ * 		+-----------------------+  | ------ PT_REGS_OFFSET
+ * double 	:  16 bytes spill area  :  |  ^
+ * excetion 	:- - - - - - - - - - - -:  |  |
+ * frame	:    struct pt_regs     :  |  |
+ * 		:- - - - - - - - - - - -:  |  |
+ * 		|                       |  |  |
+ * 		|     memory stack      |  |  |
+ * 		|                       |  |  |
+ * 		~                       ~  ~  ~
+ * 		~                       ~  ~  ~
+ * 		|                       |  |  |
+ * 		|                       |  |  |
+ * 		+-----------------------+  |  | --- STACK_BIAS
+ * 		|  struct task_struct   |  |  |  ^
+ *  current --> +-----------------------+  |  |  |
+ * 		|  struct thread_info   |  |  |  |
+ *		+-----------------------+ --------
+ */
+
+#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
+
+/*  Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
+
+#define EXC_TABLE_KSTK		0x004	/* Kernel Stack */
+#define EXC_TABLE_DOUBLE_SAVE	0x008	/* Double exception save area for a0 */
+#define EXC_TABLE_FIXUP		0x00c	/* Fixup handler */
+#define EXC_TABLE_PARAM		0x010	/* For passing a parameter to fixup */
+#define EXC_TABLE_SYSCALL_SAVE	0x014	/* For fast syscall handler */
+#define EXC_TABLE_FAST_USER	0x100	/* Fast user exception handler */
+#define EXC_TABLE_FAST_KERNEL	0x200	/* Fast kernel exception handler */
+#define EXC_TABLE_DEFAULT	0x300	/* Default C-Handler */
+#define EXC_TABLE_SIZE		0x400
+
+/* Registers used by strace */
+
+#define REG_A_BASE	0x0000
+#define REG_AR_BASE	0x0100
+#define REG_PC		0x0020
+#define REG_PS		0x02e6
+#define REG_WB		0x0248
+#define REG_WS		0x0249
+#define REG_LBEG	0x0200
+#define REG_LEND	0x0201
+#define REG_LCOUNT	0x0202
+#define REG_SAR		0x0203
+
+#define SYSCALL_NR	0x00ff
+
+/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
+
+#define PTRACE_GETREGS		12
+#define PTRACE_SETREGS		13
+#define PTRACE_GETXTREGS	18
+#define PTRACE_SETXTREGS	19
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ */
+struct pt_regs {
+	unsigned long pc;		/*   4 */
+	unsigned long ps;		/*   8 */
+	unsigned long depc;		/*  12 */
+	unsigned long exccause;		/*  16 */
+	unsigned long excvaddr;		/*  20 */
+	unsigned long debugcause;	/*  24 */
+	unsigned long wmask;		/*  28 */
+	unsigned long lbeg;		/*  32 */
+	unsigned long lend;		/*  36 */
+	unsigned long lcount;		/*  40 */
+	unsigned long sar;		/*  44 */
+	unsigned long windowbase;	/*  48 */
+	unsigned long windowstart;	/*  52 */
+	unsigned long syscall;		/*  56 */
+	unsigned long icountlevel;	/*  60 */
+	int reserved[1];		/*  64 */
+
+	/* Additional configurable registers that are used by the compiler. */
+	xtregs_opt_t xtregs_opt;
+
+	/* Make sure the areg field is 16 bytes aligned. */
+	int align[0] __attribute__ ((aligned(16)));
+
+	/* current register frame.
+	 * Note: The ESF for kernel exceptions ends after 16 registers!
+	 */
+	unsigned long areg[16];		/* 128 (64) */
+};
+
+#include <variant/core.h>
+
+# define task_pt_regs(tsk) ((struct pt_regs*) \
+  (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
+# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
+# define instruction_pointer(regs) ((regs)->pc)
+extern void show_regs(struct pt_regs *);
+
+# ifndef CONFIG_SMP
+#  define profile_pc(regs) instruction_pointer(regs)
+# endif
+
+#else	/* __ASSEMBLY__ */
+
+# include <asm/asm-offsets.h>
+#define PT_REGS_OFFSET	  (KERNEL_STACK_SIZE - PT_USER_SIZE)
+
+#endif	/* !__ASSEMBLY__ */
+
+#endif  /* __KERNEL__ */
+
+#endif	/* _XTENSA_PTRACE_H */
diff --git a/include/asm-xtensa/regs.h b/arch/xtensa/include/asm/regs.h
similarity index 100%
rename from include/asm-xtensa/regs.h
rename to arch/xtensa/include/asm/regs.h
diff --git a/include/asm-xtensa/resource.h b/arch/xtensa/include/asm/resource.h
similarity index 100%
rename from include/asm-xtensa/resource.h
rename to arch/xtensa/include/asm/resource.h
diff --git a/include/asm-xtensa/rmap.h b/arch/xtensa/include/asm/rmap.h
similarity index 100%
rename from include/asm-xtensa/rmap.h
rename to arch/xtensa/include/asm/rmap.h
diff --git a/include/asm-xtensa/rwsem.h b/arch/xtensa/include/asm/rwsem.h
similarity index 100%
rename from include/asm-xtensa/rwsem.h
rename to arch/xtensa/include/asm/rwsem.h
diff --git a/include/asm-xtensa/scatterlist.h b/arch/xtensa/include/asm/scatterlist.h
similarity index 100%
rename from include/asm-xtensa/scatterlist.h
rename to arch/xtensa/include/asm/scatterlist.h
diff --git a/include/asm-xtensa/sections.h b/arch/xtensa/include/asm/sections.h
similarity index 100%
rename from include/asm-xtensa/sections.h
rename to arch/xtensa/include/asm/sections.h
diff --git a/include/asm-xtensa/segment.h b/arch/xtensa/include/asm/segment.h
similarity index 100%
rename from include/asm-xtensa/segment.h
rename to arch/xtensa/include/asm/segment.h
diff --git a/include/asm-xtensa/sembuf.h b/arch/xtensa/include/asm/sembuf.h
similarity index 100%
rename from include/asm-xtensa/sembuf.h
rename to arch/xtensa/include/asm/sembuf.h
diff --git a/arch/xtensa/include/asm/serial.h b/arch/xtensa/include/asm/serial.h
new file mode 100644
index 0000000..a8a2493
--- /dev/null
+++ b/arch/xtensa/include/asm/serial.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-xtensa/serial.h
+ *
+ * Configuration details for 8250, 16450, 16550, etc. serial ports
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SERIAL_H
+#define _XTENSA_SERIAL_H
+
+#include <platform/serial.h>
+
+#endif	/* _XTENSA_SERIAL_H */
diff --git a/include/asm-xtensa/setup.h b/arch/xtensa/include/asm/setup.h
similarity index 100%
rename from include/asm-xtensa/setup.h
rename to arch/xtensa/include/asm/setup.h
diff --git a/include/asm-xtensa/shmbuf.h b/arch/xtensa/include/asm/shmbuf.h
similarity index 100%
rename from include/asm-xtensa/shmbuf.h
rename to arch/xtensa/include/asm/shmbuf.h
diff --git a/include/asm-xtensa/shmparam.h b/arch/xtensa/include/asm/shmparam.h
similarity index 100%
rename from include/asm-xtensa/shmparam.h
rename to arch/xtensa/include/asm/shmparam.h
diff --git a/include/asm-xtensa/sigcontext.h b/arch/xtensa/include/asm/sigcontext.h
similarity index 100%
rename from include/asm-xtensa/sigcontext.h
rename to arch/xtensa/include/asm/sigcontext.h
diff --git a/include/asm-xtensa/siginfo.h b/arch/xtensa/include/asm/siginfo.h
similarity index 100%
rename from include/asm-xtensa/siginfo.h
rename to arch/xtensa/include/asm/siginfo.h
diff --git a/include/asm-xtensa/signal.h b/arch/xtensa/include/asm/signal.h
similarity index 100%
rename from include/asm-xtensa/signal.h
rename to arch/xtensa/include/asm/signal.h
diff --git a/include/asm-xtensa/smp.h b/arch/xtensa/include/asm/smp.h
similarity index 100%
rename from include/asm-xtensa/smp.h
rename to arch/xtensa/include/asm/smp.h
diff --git a/include/asm-xtensa/socket.h b/arch/xtensa/include/asm/socket.h
similarity index 100%
rename from include/asm-xtensa/socket.h
rename to arch/xtensa/include/asm/socket.h
diff --git a/include/asm-xtensa/sockios.h b/arch/xtensa/include/asm/sockios.h
similarity index 100%
rename from include/asm-xtensa/sockios.h
rename to arch/xtensa/include/asm/sockios.h
diff --git a/include/asm-xtensa/spinlock.h b/arch/xtensa/include/asm/spinlock.h
similarity index 100%
rename from include/asm-xtensa/spinlock.h
rename to arch/xtensa/include/asm/spinlock.h
diff --git a/include/asm-xtensa/stat.h b/arch/xtensa/include/asm/stat.h
similarity index 100%
rename from include/asm-xtensa/stat.h
rename to arch/xtensa/include/asm/stat.h
diff --git a/include/asm-xtensa/statfs.h b/arch/xtensa/include/asm/statfs.h
similarity index 100%
rename from include/asm-xtensa/statfs.h
rename to arch/xtensa/include/asm/statfs.h
diff --git a/include/asm-xtensa/string.h b/arch/xtensa/include/asm/string.h
similarity index 100%
rename from include/asm-xtensa/string.h
rename to arch/xtensa/include/asm/string.h
diff --git a/arch/xtensa/include/asm/swab.h b/arch/xtensa/include/asm/swab.h
new file mode 100644
index 0000000..f50b697
--- /dev/null
+++ b/arch/xtensa/include/asm/swab.h
@@ -0,0 +1,70 @@
+/*
+ * include/asm-xtensa/swab.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SWAB_H
+#define _XTENSA_SWAB_H
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+#define __SWAB_64_THRU_32__
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+    __u32 res;
+    /* instruction sequence from Xtensa ISA release 2/2000 */
+    __asm__("ssai     8           \n\t"
+	    "srli     %0, %1, 16  \n\t"
+	    "src      %0, %0, %1  \n\t"
+	    "src      %0, %0, %0  \n\t"
+	    "src      %0, %1, %0  \n"
+	    : "=&a" (res)
+	    : "a" (x)
+	    );
+    return res;
+}
+#define __arch_swab32 __arch_swab32
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+    /* Given that 'short' values are signed (i.e., can be negative),
+     * we cannot assume that the upper 16-bits of the register are
+     * zero.  We are careful to mask values after shifting.
+     */
+
+    /* There exists an anomaly between xt-gcc and xt-xcc.  xt-gcc
+     * inserts an extui instruction after putting this function inline
+     * to ensure that it uses only the least-significant 16 bits of
+     * the result.  xt-xcc doesn't use an extui, but assumes the
+     * __asm__ macro follows convention that the upper 16 bits of an
+     * 'unsigned short' result are still zero.  This macro doesn't
+     * follow convention; indeed, it leaves garbage in the upport 16
+     * bits of the register.
+
+     * Declaring the temporary variables 'res' and 'tmp' to be 32-bit
+     * types while the return type of the function is a 16-bit type
+     * forces both compilers to insert exactly one extui instruction
+     * (or equivalent) to mask off the upper 16 bits. */
+
+    __u32 res;
+    __u32 tmp;
+
+    __asm__("extui    %1, %2, 8, 8\n\t"
+	    "slli     %0, %2, 8   \n\t"
+	    "or       %0, %0, %1  \n"
+	    : "=&a" (res), "=&a" (tmp)
+	    : "a" (x)
+	    );
+
+    return res;
+}
+#define __arch_swab16 __arch_swab16
+
+#endif /* _XTENSA_SWAB_H */
diff --git a/include/asm-xtensa/syscall.h b/arch/xtensa/include/asm/syscall.h
similarity index 100%
rename from include/asm-xtensa/syscall.h
rename to arch/xtensa/include/asm/syscall.h
diff --git a/include/asm-xtensa/system.h b/arch/xtensa/include/asm/system.h
similarity index 100%
rename from include/asm-xtensa/system.h
rename to arch/xtensa/include/asm/system.h
diff --git a/include/asm-xtensa/termbits.h b/arch/xtensa/include/asm/termbits.h
similarity index 100%
rename from include/asm-xtensa/termbits.h
rename to arch/xtensa/include/asm/termbits.h
diff --git a/include/asm-xtensa/termios.h b/arch/xtensa/include/asm/termios.h
similarity index 100%
rename from include/asm-xtensa/termios.h
rename to arch/xtensa/include/asm/termios.h
diff --git a/include/asm-xtensa/thread_info.h b/arch/xtensa/include/asm/thread_info.h
similarity index 100%
rename from include/asm-xtensa/thread_info.h
rename to arch/xtensa/include/asm/thread_info.h
diff --git a/include/asm-xtensa/timex.h b/arch/xtensa/include/asm/timex.h
similarity index 100%
rename from include/asm-xtensa/timex.h
rename to arch/xtensa/include/asm/timex.h
diff --git a/include/asm-xtensa/tlb.h b/arch/xtensa/include/asm/tlb.h
similarity index 100%
rename from include/asm-xtensa/tlb.h
rename to arch/xtensa/include/asm/tlb.h
diff --git a/include/asm-xtensa/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
similarity index 100%
rename from include/asm-xtensa/tlbflush.h
rename to arch/xtensa/include/asm/tlbflush.h
diff --git a/include/asm-xtensa/topology.h b/arch/xtensa/include/asm/topology.h
similarity index 100%
rename from include/asm-xtensa/topology.h
rename to arch/xtensa/include/asm/topology.h
diff --git a/include/asm-xtensa/types.h b/arch/xtensa/include/asm/types.h
similarity index 100%
rename from include/asm-xtensa/types.h
rename to arch/xtensa/include/asm/types.h
diff --git a/include/asm-xtensa/uaccess.h b/arch/xtensa/include/asm/uaccess.h
similarity index 100%
rename from include/asm-xtensa/uaccess.h
rename to arch/xtensa/include/asm/uaccess.h
diff --git a/include/asm-xtensa/ucontext.h b/arch/xtensa/include/asm/ucontext.h
similarity index 100%
rename from include/asm-xtensa/ucontext.h
rename to arch/xtensa/include/asm/ucontext.h
diff --git a/arch/xtensa/include/asm/unaligned.h b/arch/xtensa/include/asm/unaligned.h
new file mode 100644
index 0000000..8e7ed04
--- /dev/null
+++ b/arch/xtensa/include/asm/unaligned.h
@@ -0,0 +1,29 @@
+/*
+ * Xtensa doesn't handle unaligned accesses efficiently.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+#ifndef _ASM_XTENSA_UNALIGNED_H
+#define _ASM_XTENSA_UNALIGNED_H
+
+#include <asm/byteorder.h>
+
+#ifdef __LITTLE_ENDIAN
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_le
+# define put_unaligned	__put_unaligned_le
+#else
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_be
+# define put_unaligned	__put_unaligned_be
+#endif
+
+#endif	/* _ASM_XTENSA_UNALIGNED_H */
diff --git a/include/asm-xtensa/unistd.h b/arch/xtensa/include/asm/unistd.h
similarity index 100%
rename from include/asm-xtensa/unistd.h
rename to arch/xtensa/include/asm/unistd.h
diff --git a/include/asm-xtensa/user.h b/arch/xtensa/include/asm/user.h
similarity index 100%
rename from include/asm-xtensa/user.h
rename to arch/xtensa/include/asm/user.h
diff --git a/include/asm-xtensa/vga.h b/arch/xtensa/include/asm/vga.h
similarity index 100%
rename from include/asm-xtensa/vga.h
rename to arch/xtensa/include/asm/vga.h
diff --git a/include/asm-xtensa/xor.h b/arch/xtensa/include/asm/xor.h
similarity index 100%
rename from include/asm-xtensa/xor.h
rename to arch/xtensa/include/asm/xor.h
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index dfd35dc..a51d36a 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -25,7 +25,7 @@
 #include <asm/page.h>
 #include <asm/signal.h>
 #include <asm/tlbflush.h>
-#include <asm/variant/tie-asm.h>
+#include <variant/tie-asm.h>
 
 /* Unimplemented features. */
 
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 51f4fb6..d506774 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -16,7 +16,7 @@
 
 #include <asm-generic/vmlinux.lds.h>
 
-#include <asm/variant/core.h>
+#include <variant/core.h>
 OUTPUT_ARCH(xtensa)
 ENTRY(_start)
 
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index 9d9cd99..df397f9 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -16,7 +16,7 @@
 
 #include <asm/errno.h>
 #include <linux/linkage.h>
-#include <asm/variant/core.h>
+#include <variant/core.h>
 
 /*
  * computes a partial checksum, e.g. for TCP/UDP fragments
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index ddda8f4..ea59dcd 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -9,7 +9,7 @@
  * Copyright (C) 2002 - 2005 Tensilica Inc.
  */
 
-#include <asm/variant/core.h>
+#include <variant/core.h>
 
 	.macro	src_b	r, w0, w1
 #ifdef __XTENSA_EB__
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S
index 56a1749..10b8c40 100644
--- a/arch/xtensa/lib/memset.S
+++ b/arch/xtensa/lib/memset.S
@@ -11,7 +11,7 @@
  *  Copyright (C) 2002 Tensilica Inc.
  */
 
-#include <asm/variant/core.h>
+#include <variant/core.h>
 
 /*
  * void *memset(void *dst, int c, size_t length)
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
index b2655d9..9f603cd 100644
--- a/arch/xtensa/lib/strncpy_user.S
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -11,7 +11,7 @@
  *  Copyright (C) 2002 Tensilica Inc.
  */
 
-#include <asm/variant/core.h>
+#include <variant/core.h>
 #include <linux/errno.h>
 
 /* Load or store instructions that may cause exceptions use the EX macro. */
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
index ad3f616..23f2a89 100644
--- a/arch/xtensa/lib/strnlen_user.S
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -11,7 +11,7 @@
  *  Copyright (C) 2002 Tensilica Inc.
  */
 
-#include <asm/variant/core.h>
+#include <variant/core.h>
 
 /* Load or store instructions that may cause exceptions use the EX macro. */
 
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index a8ab1d4..46d6031 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -53,7 +53,7 @@
  *	a11/ original length
  */
 
-#include <asm/variant/core.h>
+#include <variant/core.h>
 
 #ifdef __XTENSA_EB__
 #define ALIGN(R, W0, W1) src	R, W0, W1
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 9141e36..efed889 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -1,5 +1,5 @@
 /*
- * arch/xtensa/platform-iss/console.c
+ * arch/xtensa/platforms/iss/console.c
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -24,7 +24,7 @@
 #include <asm/uaccess.h>
 #include <asm/irq.h>
 
-#include <asm/platform/simcall.h>
+#include <platform/simcall.h>
 
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
diff --git a/include/asm-xtensa/platform-iss/hardware.h b/arch/xtensa/platforms/iss/include/platform/hardware.h
similarity index 100%
rename from include/asm-xtensa/platform-iss/hardware.h
rename to arch/xtensa/platforms/iss/include/platform/hardware.h
diff --git a/include/asm-xtensa/platform-iss/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h
similarity index 100%
rename from include/asm-xtensa/platform-iss/simcall.h
rename to arch/xtensa/platforms/iss/include/platform/simcall.h
diff --git a/arch/xtensa/platforms/iss/io.c b/arch/xtensa/platforms/iss/io.c
index 5b161a5..571d0b2 100644
--- a/arch/xtensa/platforms/iss/io.c
+++ b/arch/xtensa/platforms/iss/io.c
@@ -3,7 +3,7 @@
 #if 0
 
 #include <asm/io.h>
-#include <xtensa/simcall.h>
+#include <platform/platform-iss/simcall.h>
 
 extern int __simc ();
 
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 64f057d..edad415 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -1,6 +1,6 @@
 /*
  *
- * arch/xtensa/platform-iss/network.c
+ * arch/xtensa/platforms/iss/network.c
  *
  * Platform specific initialization.
  *
@@ -33,7 +33,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/platform_device.h>
 
-#include <asm/platform/simcall.h>
+#include <platform/simcall.h>
 
 #define DRIVER_NAME "iss-netdev"
 #define ETH_MAX_PACKET 1500
diff --git a/arch/xtensa/platforms/xt2000/Makefile b/arch/xtensa/platforms/xt2000/Makefile
new file mode 100644
index 0000000..54d018e
--- /dev/null
+++ b/arch/xtensa/platforms/xt2000/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Tensilica XT2000 Emulation Board
+#
+
+obj-y 			= setup.o
diff --git a/arch/xtensa/platforms/xt2000/include/platform/hardware.h b/arch/xtensa/platforms/xt2000/include/platform/hardware.h
new file mode 100644
index 0000000..41459ad
--- /dev/null
+++ b/arch/xtensa/platforms/xt2000/include/platform/hardware.h
@@ -0,0 +1,55 @@
+/*
+ * platform/hardware.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 Tensilica Inc.
+ */
+
+/*
+ * This file contains the hardware configuration of the XT2000 board.
+ */
+
+#ifndef _XTENSA_XT2000_HARDWARE_H
+#define _XTENSA_XT2000_HARDWARE_H
+
+#include <variant/core.h>
+#include <asm/io.h>
+
+/* 
+ * Memory configuration.
+ */
+
+#define PLATFORM_DEFAULT_MEM_START 0x00000000
+#define PLATFORM_DEFAULT_MEM_SIZE 0x08000000
+
+/*
+ * Number of platform IRQs
+ */
+#define PLATFORM_NR_IRQS 3
+/*
+ * On-board components.
+ */
+
+#define SONIC83934_INTNUM	XCHAL_EXTINT3_NUM
+#define SONIC83934_ADDR		IOADDR(0x0d030000)
+
+/*
+ * V3-PCI
+ */
+
+/* The XT2000 uses the V3 as a cascaded interrupt controller for the PCI bus */
+
+#define IRQ_PCI_A		(XCHAL_NUM_INTERRUPTS + 0)
+#define IRQ_PCI_B		(XCHAL_NUM_INTERRUPTS + 1)
+#define IRQ_PCI_C		(XCHAL_NUM_INTERRUPTS + 2)
+
+/*
+ * Various other components.
+ */
+
+#define XT2000_LED_ADDR		IOADDR(0x0d040000)
+
+#endif /* _XTENSA_XT2000_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xt2000/include/platform/serial.h b/arch/xtensa/platforms/xt2000/include/platform/serial.h
new file mode 100644
index 0000000..7226cf7
--- /dev/null
+++ b/arch/xtensa/platforms/xt2000/include/platform/serial.h
@@ -0,0 +1,28 @@
+/*
+ * platform/serial.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_XT2000_SERIAL_H
+#define _XTENSA_XT2000_SERIAL_H
+
+#include <variant/core.h>
+#include <asm/io.h>
+
+/*  National-Semi PC16552D DUART:  */
+
+#define DUART16552_1_INTNUM	XCHAL_EXTINT4_NUM
+#define DUART16552_2_INTNUM	XCHAL_EXTINT5_NUM
+
+#define DUART16552_1_ADDR	IOADDR(0x0d050020)	/* channel 1 */
+#define DUART16552_2_ADDR	IOADDR(0x0d050000)	/* channel 2 */
+
+#define DUART16552_XTAL_FREQ	18432000	/* crystal frequency in Hz */
+#define BASE_BAUD ( DUART16552_XTAL_FREQ / 16 )
+
+#endif /* _XTENSA_XT2000_SERIAL_H */
diff --git a/arch/xtensa/platforms/xt2000/setup.c b/arch/xtensa/platforms/xt2000/setup.c
new file mode 100644
index 0000000..9e83940
--- /dev/null
+++ b/arch/xtensa/platforms/xt2000/setup.c
@@ -0,0 +1,181 @@
+/*
+ * arch/xtensa/platforms/xt2000/setup.c
+ *
+ * Platform specific functions for the XT2000 board.
+ *
+ * Authors:	Chris Zankel <chris@zankel.net>
+ *		Joe Taylor <joe@tensilica.com>
+ *
+ * Copyright 2001 - 2004 Tensilica Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/stringify.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+
+#include <asm/processor.h>
+#include <asm/platform.h>
+#include <asm/bootparam.h>
+#include <platform/hardware.h>
+#include <platform/serial.h>
+
+/* Assumes s points to an 8-chr string.  No checking for NULL. */
+
+static void led_print (int f, char *s)
+{
+	unsigned long* led_addr = (unsigned long*) (XT2000_LED_ADDR + 0xE0) + f;
+	int i;
+	for (i = f; i < 8; i++)
+		if ((*led_addr++ = *s++) == 0)
+		    break;
+}
+
+void platform_halt(void)
+{
+	led_print (0, "  HALT  ");
+	local_irq_disable();
+	while (1);
+}
+
+void platform_power_off(void)
+{
+	led_print (0, "POWEROFF");
+	local_irq_disable();
+	while (1);
+}
+
+void platform_restart(void)
+{
+	/* Flush and reset the mmu, simulate a processor reset, and
+	 * jump to the reset vector. */
+
+	__asm__ __volatile__ ("movi	a2, 15\n\t"
+			      "wsr	a2, " __stringify(ICOUNTLEVEL) "\n\t"
+			      "movi	a2, 0\n\t"
+			      "wsr	a2, " __stringify(ICOUNT) "\n\t"
+			      "wsr	a2, " __stringify(IBREAKENABLE) "\n\t"
+			      "wsr	a2, " __stringify(LCOUNT) "\n\t"
+			      "movi	a2, 0x1f\n\t"
+			      "wsr	a2, " __stringify(PS) "\n\t"
+			      "isync\n\t"
+			      "jx	%0\n\t"
+			      :
+			      : "a" (XCHAL_RESET_VECTOR_VADDR)
+			      : "a2"
+			      );
+
+	/* control never gets here */
+}
+
+void __init platform_setup(char** cmdline)
+{
+	led_print (0, "LINUX   ");
+}
+
+/* early initialization */
+
+extern sysmem_info_t __initdata sysmem;
+
+void platform_init(bp_tag_t* first)
+{
+	/* Set default memory block if not provided by the bootloader. */
+
+	if (sysmem.nr_banks == 0) {
+		sysmem.nr_banks = 1;
+		sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
+		sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
+				     + PLATFORM_DEFAULT_MEM_SIZE;
+	}
+}
+
+/* Heartbeat. Let the LED blink. */
+
+void platform_heartbeat(void)
+{
+	static int i=0, t = 0;
+
+	if (--t < 0)
+	{
+		t = 59;
+		led_print(7, i ? ".": " ");
+		i ^= 1;
+	}
+}
+
+//#define RS_TABLE_SIZE 2
+//#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF|ASYNC_SKIP_TEST)
+
+#define _SERIAL_PORT(_base,_irq)					\
+{									\
+	.mapbase	= (_base),					\
+	.membase	= (void*)(_base),				\
+	.irq		= (_irq),					\
+	.uartclk	= DUART16552_XTAL_FREQ,				\
+	.iotype		= UPIO_MEM,					\
+	.flags		= UPF_BOOT_AUTOCONF,				\
+	.regshift	= 2,						\
+}
+
+static struct plat_serial8250_port xt2000_serial_data[] = {
+#if XCHAL_HAVE_BE
+	_SERIAL_PORT(DUART16552_1_ADDR + 3, DUART16552_1_INTNUM),
+	_SERIAL_PORT(DUART16552_2_ADDR + 3, DUART16552_2_INTNUM),
+#else
+	_SERIAL_PORT(DUART16552_1_ADDR, DUART16552_1_INTNUM),
+	_SERIAL_PORT(DUART16552_2_ADDR, DUART16552_2_INTNUM),
+#endif
+	{ }
+};
+
+static struct platform_device xt2000_serial8250_device = {
+	.name		= "serial8250",
+	.id		= PLAT8250_DEV_PLATFORM,
+	.dev		= {
+	    .platform_data = xt2000_serial_data,
+	},
+};
+
+static struct resource xt2000_sonic_res[] = {
+	{
+		.start = SONIC83934_ADDR,
+		.end   = SONIC83934_ADDR + 0xff,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = SONIC83934_INTNUM,
+		.end = SONIC83934_INTNUM,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device xt2000_sonic_device = {
+	.name		= "xtsonic",
+	.num_resources	= ARRAY_SIZE(xt2000_sonic_res),
+	.resource		= xt2000_sonic_res,
+};
+
+static int __init xt2000_setup_devinit(void)
+{
+	platform_device_register(&xt2000_serial8250_device);
+	platform_device_register(&xt2000_sonic_device);
+
+	return 0;
+}
+
+device_initcall(xt2000_setup_devinit);
diff --git a/include/asm-xtensa/variant-dc232b/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
similarity index 100%
rename from include/asm-xtensa/variant-dc232b/core.h
rename to arch/xtensa/variants/dc232b/include/variant/core.h
diff --git a/include/asm-xtensa/variant-dc232b/tie-asm.h b/arch/xtensa/variants/dc232b/include/variant/tie-asm.h
similarity index 100%
rename from include/asm-xtensa/variant-dc232b/tie-asm.h
rename to arch/xtensa/variants/dc232b/include/variant/tie-asm.h
diff --git a/include/asm-xtensa/variant-dc232b/tie.h b/arch/xtensa/variants/dc232b/include/variant/tie.h
similarity index 100%
rename from include/asm-xtensa/variant-dc232b/tie.h
rename to arch/xtensa/variants/dc232b/include/variant/tie.h
diff --git a/include/asm-xtensa/variant-fsf/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
similarity index 100%
rename from include/asm-xtensa/variant-fsf/core.h
rename to arch/xtensa/variants/fsf/include/variant/core.h
diff --git a/include/asm-xtensa/variant-fsf/tie-asm.h b/arch/xtensa/variants/fsf/include/variant/tie-asm.h
similarity index 100%
rename from include/asm-xtensa/variant-fsf/tie-asm.h
rename to arch/xtensa/variants/fsf/include/variant/tie-asm.h
diff --git a/include/asm-xtensa/variant-fsf/tie.h b/arch/xtensa/variants/fsf/include/variant/tie.h
similarity index 100%
rename from include/asm-xtensa/variant-fsf/tie.h
rename to arch/xtensa/variants/fsf/include/variant/tie.h
diff --git a/block/Kconfig b/block/Kconfig
index ac0956f..0cbb3b8 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -36,6 +36,12 @@
 	  This option also enables support for single files larger than
 	  2TB.
 
+	  The ext4 filesystem requires that this feature be enabled in
+	  order to support filesystems that have the huge_file feature
+	  enabled.    Otherwise, it will refuse to mount any filesystems
+	  that use the huge_file feature, which is enabled by default
+	  by mke2fs.ext4.   The GFS2 filesystem also requires this feature.
+
 	  If unsure, say N.
 
 config BLK_DEV_IO_TRACE
diff --git a/block/blk-map.c b/block/blk-map.c
index 2990447..f103729 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -42,7 +42,7 @@
 
 static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
 			     struct rq_map_data *map_data, void __user *ubuf,
-			     unsigned int len, int null_mapped, gfp_t gfp_mask)
+			     unsigned int len, gfp_t gfp_mask)
 {
 	unsigned long uaddr;
 	struct bio *bio, *orig_bio;
@@ -63,7 +63,7 @@
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
-	if (null_mapped)
+	if (map_data && map_data->null_mapped)
 		bio->bi_flags |= (1 << BIO_NULL_MAPPED);
 
 	orig_bio = bio;
@@ -114,17 +114,15 @@
 {
 	unsigned long bytes_read = 0;
 	struct bio *bio = NULL;
-	int ret, null_mapped = 0;
+	int ret;
 
 	if (len > (q->max_hw_sectors << 9))
 		return -EINVAL;
 	if (!len)
 		return -EINVAL;
-	if (!ubuf) {
-		if (!map_data || rq_data_dir(rq) != READ)
-			return -EINVAL;
-		null_mapped = 1;
-	}
+
+	if (!ubuf && (!map_data || !map_data->null_mapped))
+		return -EINVAL;
 
 	while (bytes_read != len) {
 		unsigned long map_len, end, start;
@@ -143,13 +141,16 @@
 			map_len -= PAGE_SIZE;
 
 		ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
-					null_mapped, gfp_mask);
+					gfp_mask);
 		if (ret < 0)
 			goto unmap_rq;
 		if (!bio)
 			bio = rq->bio;
 		bytes_read += ret;
 		ubuf += ret;
+
+		if (map_data)
+			map_data->offset += ret;
 	}
 
 	if (!bio_flagged(bio, BIO_USER_MAPPED))
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index dcbf1be..f21147f 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -28,351 +28,18 @@
 #include <linux/async_tx.h>
 
 #ifdef CONFIG_DMA_ENGINE
-static enum dma_state_client
-dma_channel_add_remove(struct dma_client *client,
-	struct dma_chan *chan, enum dma_state state);
-
-static struct dma_client async_tx_dma = {
-	.event_callback = dma_channel_add_remove,
-	/* .cap_mask == 0 defaults to all channels */
-};
-
-/**
- * dma_cap_mask_all - enable iteration over all operation types
- */
-static dma_cap_mask_t dma_cap_mask_all;
-
-/**
- * chan_ref_percpu - tracks channel allocations per core/opertion
- */
-struct chan_ref_percpu {
-	struct dma_chan_ref *ref;
-};
-
-static int channel_table_initialized;
-static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END];
-
-/**
- * async_tx_lock - protect modification of async_tx_master_list and serialize
- *	rebalance operations
- */
-static spinlock_t async_tx_lock;
-
-static LIST_HEAD(async_tx_master_list);
-
-/* async_tx_issue_pending_all - start all transactions on all channels */
-void async_tx_issue_pending_all(void)
+static int __init async_tx_init(void)
 {
-	struct dma_chan_ref *ref;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(ref, &async_tx_master_list, node)
-		ref->chan->device->device_issue_pending(ref->chan);
-	rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
-
-/* dma_wait_for_async_tx - spin wait for a transcation to complete
- * @tx: transaction to wait on
- */
-enum dma_status
-dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
-{
-	enum dma_status status;
-	struct dma_async_tx_descriptor *iter;
-	struct dma_async_tx_descriptor *parent;
-
-	if (!tx)
-		return DMA_SUCCESS;
-
-	/* poll through the dependency chain, return when tx is complete */
-	do {
-		iter = tx;
-
-		/* find the root of the unsubmitted dependency chain */
-		do {
-			parent = iter->parent;
-			if (!parent)
-				break;
-			else
-				iter = parent;
-		} while (parent);
-
-		/* there is a small window for ->parent == NULL and
-		 * ->cookie == -EBUSY
-		 */
-		while (iter->cookie == -EBUSY)
-			cpu_relax();
-
-		status = dma_sync_wait(iter->chan, iter->cookie);
-	} while (status == DMA_IN_PROGRESS || (iter != tx));
-
-	return status;
-}
-EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
-
-/* async_tx_run_dependencies - helper routine for dma drivers to process
- *	(start) dependent operations on their target channel
- * @tx: transaction with dependencies
- */
-void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
-{
-	struct dma_async_tx_descriptor *dep = tx->next;
-	struct dma_async_tx_descriptor *dep_next;
-	struct dma_chan *chan;
-
-	if (!dep)
-		return;
-
-	chan = dep->chan;
-
-	/* keep submitting up until a channel switch is detected
-	 * in that case we will be called again as a result of
-	 * processing the interrupt from async_tx_channel_switch
-	 */
-	for (; dep; dep = dep_next) {
-		spin_lock_bh(&dep->lock);
-		dep->parent = NULL;
-		dep_next = dep->next;
-		if (dep_next && dep_next->chan == chan)
-			dep->next = NULL; /* ->next will be submitted */
-		else
-			dep_next = NULL; /* submit current dep and terminate */
-		spin_unlock_bh(&dep->lock);
-
-		dep->tx_submit(dep);
-	}
-
-	chan->device->device_issue_pending(chan);
-}
-EXPORT_SYMBOL_GPL(async_tx_run_dependencies);
-
-static void
-free_dma_chan_ref(struct rcu_head *rcu)
-{
-	struct dma_chan_ref *ref;
-	ref = container_of(rcu, struct dma_chan_ref, rcu);
-	kfree(ref);
-}
-
-static void
-init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan)
-{
-	INIT_LIST_HEAD(&ref->node);
-	INIT_RCU_HEAD(&ref->rcu);
-	ref->chan = chan;
-	atomic_set(&ref->count, 0);
-}
-
-/**
- * get_chan_ref_by_cap - returns the nth channel of the given capability
- * 	defaults to returning the channel with the desired capability and the
- * 	lowest reference count if the index can not be satisfied
- * @cap: capability to match
- * @index: nth channel desired, passing -1 has the effect of forcing the
- *  default return value
- */
-static struct dma_chan_ref *
-get_chan_ref_by_cap(enum dma_transaction_type cap, int index)
-{
-	struct dma_chan_ref *ret_ref = NULL, *min_ref = NULL, *ref;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(ref, &async_tx_master_list, node)
-		if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
-			if (!min_ref)
-				min_ref = ref;
-			else if (atomic_read(&ref->count) <
-				atomic_read(&min_ref->count))
-				min_ref = ref;
-
-			if (index-- == 0) {
-				ret_ref = ref;
-				break;
-			}
-		}
-	rcu_read_unlock();
-
-	if (!ret_ref)
-		ret_ref = min_ref;
-
-	if (ret_ref)
-		atomic_inc(&ret_ref->count);
-
-	return ret_ref;
-}
-
-/**
- * async_tx_rebalance - redistribute the available channels, optimize
- * for cpu isolation in the SMP case, and opertaion isolation in the
- * uniprocessor case
- */
-static void async_tx_rebalance(void)
-{
-	int cpu, cap, cpu_idx = 0;
-	unsigned long flags;
-
-	if (!channel_table_initialized)
-		return;
-
-	spin_lock_irqsave(&async_tx_lock, flags);
-
-	/* undo the last distribution */
-	for_each_dma_cap_mask(cap, dma_cap_mask_all)
-		for_each_possible_cpu(cpu) {
-			struct dma_chan_ref *ref =
-				per_cpu_ptr(channel_table[cap], cpu)->ref;
-			if (ref) {
-				atomic_set(&ref->count, 0);
-				per_cpu_ptr(channel_table[cap], cpu)->ref =
-									NULL;
-			}
-		}
-
-	for_each_dma_cap_mask(cap, dma_cap_mask_all)
-		for_each_online_cpu(cpu) {
-			struct dma_chan_ref *new;
-			if (NR_CPUS > 1)
-				new = get_chan_ref_by_cap(cap, cpu_idx++);
-			else
-				new = get_chan_ref_by_cap(cap, -1);
-
-			per_cpu_ptr(channel_table[cap], cpu)->ref = new;
-		}
-
-	spin_unlock_irqrestore(&async_tx_lock, flags);
-}
-
-static enum dma_state_client
-dma_channel_add_remove(struct dma_client *client,
-	struct dma_chan *chan, enum dma_state state)
-{
-	unsigned long found, flags;
-	struct dma_chan_ref *master_ref, *ref;
-	enum dma_state_client ack = DMA_DUP; /* default: take no action */
-
-	switch (state) {
-	case DMA_RESOURCE_AVAILABLE:
-		found = 0;
-		rcu_read_lock();
-		list_for_each_entry_rcu(ref, &async_tx_master_list, node)
-			if (ref->chan == chan) {
-				found = 1;
-				break;
-			}
-		rcu_read_unlock();
-
-		pr_debug("async_tx: dma resource available [%s]\n",
-			found ? "old" : "new");
-
-		if (!found)
-			ack = DMA_ACK;
-		else
-			break;
-
-		/* add the channel to the generic management list */
-		master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL);
-		if (master_ref) {
-			/* keep a reference until async_tx is unloaded */
-			dma_chan_get(chan);
-			init_dma_chan_ref(master_ref, chan);
-			spin_lock_irqsave(&async_tx_lock, flags);
-			list_add_tail_rcu(&master_ref->node,
-				&async_tx_master_list);
-			spin_unlock_irqrestore(&async_tx_lock,
-				flags);
-		} else {
-			printk(KERN_WARNING "async_tx: unable to create"
-				" new master entry in response to"
-				" a DMA_RESOURCE_ADDED event"
-				" (-ENOMEM)\n");
-			return 0;
-		}
-
-		async_tx_rebalance();
-		break;
-	case DMA_RESOURCE_REMOVED:
-		found = 0;
-		spin_lock_irqsave(&async_tx_lock, flags);
-		list_for_each_entry(ref, &async_tx_master_list, node)
-			if (ref->chan == chan) {
-				/* permit backing devices to go away */
-				dma_chan_put(ref->chan);
-				list_del_rcu(&ref->node);
-				call_rcu(&ref->rcu, free_dma_chan_ref);
-				found = 1;
-				break;
-			}
-		spin_unlock_irqrestore(&async_tx_lock, flags);
-
-		pr_debug("async_tx: dma resource removed [%s]\n",
-			found ? "ours" : "not ours");
-
-		if (found)
-			ack = DMA_ACK;
-		else
-			break;
-
-		async_tx_rebalance();
-		break;
-	case DMA_RESOURCE_SUSPEND:
-	case DMA_RESOURCE_RESUME:
-		printk(KERN_WARNING "async_tx: does not support dma channel"
-			" suspend/resume\n");
-		break;
-	default:
-		BUG();
-	}
-
-	return ack;
-}
-
-static int __init
-async_tx_init(void)
-{
-	enum dma_transaction_type cap;
-
-	spin_lock_init(&async_tx_lock);
-	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
-
-	/* an interrupt will never be an explicit operation type.
-	 * clearing this bit prevents allocation to a slot in 'channel_table'
-	 */
-	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
-
-	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
-		channel_table[cap] = alloc_percpu(struct chan_ref_percpu);
-		if (!channel_table[cap])
-			goto err;
-	}
-
-	channel_table_initialized = 1;
-	dma_async_client_register(&async_tx_dma);
-	dma_async_client_chan_request(&async_tx_dma);
+	dmaengine_get();
 
 	printk(KERN_INFO "async_tx: api initialized (async)\n");
 
 	return 0;
-err:
-	printk(KERN_ERR "async_tx: initialization failure\n");
-
-	while (--cap >= 0)
-		free_percpu(channel_table[cap]);
-
-	return 1;
 }
 
 static void __exit async_tx_exit(void)
 {
-	enum dma_transaction_type cap;
-
-	channel_table_initialized = 0;
-
-	for_each_dma_cap_mask(cap, dma_cap_mask_all)
-		if (channel_table[cap])
-			free_percpu(channel_table[cap]);
-
-	dma_async_client_unregister(&async_tx_dma);
+	dmaengine_put();
 }
 
 /**
@@ -387,16 +54,9 @@
 {
 	/* see if we can keep the chain on one channel */
 	if (depend_tx &&
-		dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
+	    dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
 		return depend_tx->chan;
-	else if (likely(channel_table_initialized)) {
-		struct dma_chan_ref *ref;
-		int cpu = get_cpu();
-		ref = per_cpu_ptr(channel_table[tx_type], cpu)->ref;
-		put_cpu();
-		return ref ? ref->chan : NULL;
-	} else
-		return NULL;
+	return dma_find_channel(tx_type);
 }
 EXPORT_SYMBOL_GPL(__async_tx_find_channel);
 #else
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 2f557f5..00cf955 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -107,4 +107,6 @@
 source "drivers/xen/Kconfig"
 
 source "drivers/staging/Kconfig"
+
+source "drivers/platform/Kconfig"
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index fceb71a..c1bf417 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -18,6 +18,9 @@
 
 obj-$(CONFIG_XEN)		+= xen/
 
+# regulators early, since some subsystems rely on them to initialize
+obj-$(CONFIG_REGULATOR)		+= regulator/
+
 # char/ comes before serial/ etc so that the VT console is the boot-time
 # default.
 obj-y				+= char/
@@ -57,6 +60,7 @@
 obj-$(CONFIG_PARIDE) 		+= block/paride/
 obj-$(CONFIG_TC)		+= tc/
 obj-$(CONFIG_UWB)		+= uwb/
+obj-$(CONFIG_USB_OTG_UTILS)	+= usb/otg/
 obj-$(CONFIG_USB)		+= usb/
 obj-$(CONFIG_USB_MUSB_HDRC)	+= usb/musb/
 obj-$(CONFIG_PCI)		+= usb/
@@ -100,5 +104,5 @@
 obj-$(CONFIG_OF)		+= of/
 obj-$(CONFIG_SSB)		+= ssb/
 obj-$(CONFIG_VIRTIO)		+= virtio/
-obj-$(CONFIG_REGULATOR)		+= regulator/
 obj-$(CONFIG_STAGING)		+= staging/
+obj-y				+= platform/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b0243fd..d7f9839 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -196,90 +196,6 @@
 	depends on (X86 || IA64)
 	default y if IA64_GENERIC || IA64_SGI_SN2
 
-config ACPI_WMI
-	tristate "WMI (EXPERIMENTAL)"
-	depends on X86
-	depends on EXPERIMENTAL
-	help
-	  This driver adds support for the ACPI-WMI (Windows Management
-	  Instrumentation) mapper device (PNP0C14) found on some systems.
-
-	  ACPI-WMI is a proprietary extension to ACPI to expose parts of the
-	  ACPI firmware to userspace - this is done through various vendor
-	  defined methods and data blocks in a PNP0C14 device, which are then
-	  made available for userspace to call.
-
-	  The implementation of this in Linux currently only exposes this to
-	  other kernel space drivers.
-
-	  This driver is a required dependency to build the firmware specific
-	  drivers needed on many machines, including Acer and HP laptops.
-
-	  It is safe to enable this driver even if your DSDT doesn't define
-	  any ACPI-WMI devices.
-
-config ACPI_ASUS
-        tristate "ASUS/Medion Laptop Extras"
-	depends on X86
-	select BACKLIGHT_CLASS_DEVICE
-        ---help---
-          This driver provides support for extra features of ACPI-compatible
-          ASUS laptops. As some of Medion laptops are made by ASUS, it may also
-          support some Medion laptops (such as 9675 for example).  It makes all
-          the extra buttons generate standard ACPI events that go through
-          /proc/acpi/events, and (on some models) adds support for changing the
-          display brightness and output, switching the LCD backlight on and off,
-          and most importantly, allows you to blink those fancy LEDs intended
-          for reporting mail and wireless status.
-
-	  Note: display switching code is currently considered EXPERIMENTAL,
-	  toying with these values may even lock your machine.
-
-          All settings are changed via /proc/acpi/asus directory entries. Owner
-          and group for these entries can be set with asus_uid and asus_gid
-          parameters.
-
-          More information and a userspace daemon for handling the extra buttons
-          at <http://sourceforge.net/projects/acpi4asus/>.
-
-          If you have an ACPI-compatible ASUS laptop, say Y or M here. This
-          driver is still under development, so if your laptop is unsupported or
-          something works not quite as expected, please use the mailing list
-          available on the above page (acpi4asus-user@lists.sourceforge.net).
-
-	  NOTE: This driver is deprecated and will probably be removed soon,
-	  use asus-laptop instead.
-
-config ACPI_TOSHIBA
-	tristate "Toshiba Laptop Extras"
-	depends on X86 && INPUT
-	select INPUT_POLLDEV
-	select NET
-	select RFKILL
-	select BACKLIGHT_CLASS_DEVICE
-	---help---
-	  This driver adds support for access to certain system settings
-	  on "legacy free" Toshiba laptops.  These laptops can be recognized by
-	  their lack of a BIOS setup menu and APM support.
-
-	  On these machines, all system configuration is handled through the
-	  ACPI.  This driver is required for access to controls not covered
-	  by the general ACPI drivers, such as LCD brightness, video output,
-	  etc.
-
-	  This driver differs from the non-ACPI Toshiba laptop driver (located
-	  under "Processor type and features") in several aspects.
-	  Configuration is accessed by reading and writing text files in the
-	  /proc tree instead of by program interface to /dev.  Furthermore, no
-	  power management functions are exposed, as those are handled by the
-	  general ACPI drivers.
-
-	  More information about this driver is available at
-	  <http://memebeam.org/toys/ToshibaAcpiDriver>.
-
-	  If you have a legacy free Toshiba laptop (such as the Libretto L1
-	  series), say Y.
-
 config ACPI_CUSTOM_DSDT_FILE
 	string "Custom DSDT Table file to include"
 	default ""
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 3c0c933..65d90c7 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,15 +2,8 @@
 # Makefile for the Linux ACPI interpreter
 #
 
-export ACPI_CFLAGS
-
-ACPI_CFLAGS	:= -Os
-
-ifdef CONFIG_ACPI_DEBUG
-  ACPI_CFLAGS	+= -DACPI_DEBUG_OUTPUT
-endif
-
-EXTRA_CFLAGS	+= $(ACPI_CFLAGS)
+ccflags-y			:= -Os
+ccflags-$(CONFIG_ACPI_DEBUG)	+= -DACPI_DEBUG_OUTPUT
 
 #
 # ACPI Boot-Time Table Parsing
@@ -22,9 +15,13 @@
 # ACPI Core Subsystem (Interpreter)
 #
 obj-y				+= osl.o utils.o reboot.o\
-				   dispatcher/ events/ executer/ hardware/ \
-				   namespace/ parser/ resources/ tables/ \
-				   utilities/
+					acpica/
+
+# sleep related files
+obj-y				+= wakeup.o
+obj-y				+= sleep.o
+obj-$(CONFIG_ACPI_SLEEP)	+= proc.o
+
 
 #
 # ACPI Bus and Device Drivers
@@ -35,7 +32,6 @@
 processor-objs	+= processor_perflib.o
 endif
 
-obj-y				+= sleep/
 obj-y				+= bus.o glue.o
 obj-y				+= scan.o
 # Keep EC driver first. Initialization of others depend on it.
@@ -59,9 +55,6 @@
 obj-$(CONFIG_ACPI_SYSTEM)	+= system.o event.o
 obj-$(CONFIG_ACPI_DEBUG)	+= debug.o
 obj-$(CONFIG_ACPI_NUMA)		+= numa.o
-obj-$(CONFIG_ACPI_WMI)		+= wmi.o
-obj-$(CONFIG_ACPI_ASUS)		+= asus_acpi.o
-obj-$(CONFIG_ACPI_TOSHIBA)	+= toshiba_acpi.o
 obj-$(CONFIG_ACPI_HOTPLUG_MEMORY)	+= acpi_memhotplug.o
 obj-$(CONFIG_ACPI_PROCFS_POWER)	+= cm_sbs.o
 obj-$(CONFIG_ACPI_SBS)		+= sbshc.o
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 63a17b5..7a0f4aa 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -20,7 +20,7 @@
  *
  *
  * ACPI based HotPlug driver that supports Memory Hotplug
- * This driver fields notifications from firmare for memory add
+ * This driver fields notifications from firmware for memory add
  * and remove operations and alerts the VM of the affected memory
  * ranges.
  */
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
new file mode 100644
index 0000000..3f23298
--- /dev/null
+++ b/drivers/acpi/acpica/Makefile
@@ -0,0 +1,44 @@
+#
+# Makefile for ACPICA Core interpreter
+#
+
+ccflags-y			:= -Os
+ccflags-$(CONFIG_ACPI_DEBUG)	+= -DACPI_DEBUG_OUTPUT
+
+obj-y := dsfield.o   dsmthdat.o  dsopcode.o  dswexec.o  dswscope.o \
+	 dsmethod.o  dsobject.o  dsutils.o   dswload.o  dswstate.o \
+	 dsinit.o
+
+obj-y += evevent.o  evregion.o  evsci.o    evxfevnt.o \
+	 evmisc.o   evrgnini.o  evxface.o  evxfregn.o \
+	 evgpe.o    evgpeblk.o
+
+obj-y += exconfig.o  exfield.o  exnames.o   exoparg6.o  exresolv.o  exstorob.o\
+	 exconvrt.o  exfldio.o  exoparg1.o  exprep.o    exresop.o   exsystem.o\
+	 excreate.o  exmisc.o   exoparg2.o  exregion.o  exstore.o   exutils.o \
+	 exdump.o    exmutex.o  exoparg3.o  exresnte.o  exstoren.o
+
+obj-y += hwacpi.o  hwgpe.o  hwregs.o  hwsleep.o hwxface.o
+
+obj-$(ACPI_FUTURE_USAGE) += hwtimer.o
+
+obj-y += nsaccess.o  nsload.o    nssearch.o  nsxfeval.o \
+	 nsalloc.o   nseval.o    nsnames.o   nsutils.o   nsxfname.o \
+	 nsdump.o    nsinit.o    nsobject.o  nswalk.o    nsxfobj.o  \
+	 nsparse.o   nspredef.o
+
+obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
+
+obj-y += psargs.o    psparse.o  psloop.o pstree.o   pswalk.o  \
+	 psopcode.o  psscope.o  psutils.o  psxface.o
+
+obj-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
+	 rscalc.o  rsirq.o  rsmemory.o  rsutils.o
+
+obj-$(ACPI_FUTURE_USAGE) += rsdump.o
+
+obj-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
+
+obj-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
+		utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
+		utstate.o utmutex.o utobject.o utresrc.o
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
new file mode 100644
index 0000000..3b20786
--- /dev/null
+++ b/drivers/acpi/acpica/accommon.h
@@ -0,0 +1,63 @@
+/******************************************************************************
+ *
+ * Name: accommon.h - Common include files for generation of ACPICA source
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACCOMMON_H__
+#define __ACCOMMON_H__
+
+/*
+ * Common set of includes for all ACPICA source files.
+ * We put them here because we don't want to duplicate them
+ * in the the source code again and again.
+ *
+ * Note: The order of these include files is important.
+ */
+#include "acconfig.h"		/* Global configuration constants */
+#include "acmacros.h"		/* C macros */
+#include "aclocal.h"		/* Internal data types */
+#include "acobject.h"		/* ACPI internal object */
+#include "acstruct.h"		/* Common structures */
+#include "acglobal.h"		/* All global variables */
+#include "achware.h"		/* Hardware defines and interfaces */
+#include "acutils.h"		/* Utility interfaces */
+
+#endif				/* __ACCOMMON_H__ */
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
new file mode 100644
index 0000000..e6777fb
--- /dev/null
+++ b/drivers/acpi/acpica/acconfig.h
@@ -0,0 +1,217 @@
+/******************************************************************************
+ *
+ * Name: acconfig.h - Global configuration constants
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef _ACCONFIG_H
+#define _ACCONFIG_H
+
+/******************************************************************************
+ *
+ * Configuration options
+ *
+ *****************************************************************************/
+
+/*
+ * ACPI_DEBUG_OUTPUT    - This switch enables all the debug facilities of the
+ *                        ACPI subsystem.  This includes the DEBUG_PRINT output
+ *                        statements.  When disabled, all DEBUG_PRINT
+ *                        statements are compiled out.
+ *
+ * ACPI_APPLICATION     - Use this switch if the subsystem is going to be run
+ *                        at the application level.
+ *
+ */
+
+/*
+ * OS name, used for the _OS object.  The _OS object is essentially obsolete,
+ * but there is a large base of ASL/AML code in existing machines that check
+ * for the string below.  The use of this string usually guarantees that
+ * the ASL will execute down the most tested code path.  Also, there is some
+ * code that will not execute the _OSI method unless _OS matches the string
+ * below.  Therefore, change this string at your own risk.
+ */
+#define ACPI_OS_NAME                    "Microsoft Windows NT"
+
+/* Maximum objects in the various object caches */
+
+#define ACPI_MAX_STATE_CACHE_DEPTH      96	/* State objects */
+#define ACPI_MAX_PARSE_CACHE_DEPTH      96	/* Parse tree objects */
+#define ACPI_MAX_EXTPARSE_CACHE_DEPTH   96	/* Parse tree objects */
+#define ACPI_MAX_OBJECT_CACHE_DEPTH     96	/* Interpreter operand objects */
+#define ACPI_MAX_NAMESPACE_CACHE_DEPTH  96	/* Namespace objects */
+
+/*
+ * Should the subsystem abort the loading of an ACPI table if the
+ * table checksum is incorrect?
+ */
+#define ACPI_CHECKSUM_ABORT             FALSE
+
+/******************************************************************************
+ *
+ * Subsystem Constants
+ *
+ *****************************************************************************/
+
+/* Version of ACPI supported */
+
+#define ACPI_CA_SUPPORT_LEVEL           3
+
+/* Maximum count for a semaphore object */
+
+#define ACPI_MAX_SEMAPHORE_COUNT        256
+
+/* Maximum object reference count (detects object deletion issues) */
+
+#define ACPI_MAX_REFERENCE_COUNT        0x1000
+
+/* Size of cached memory mapping for system memory operation region */
+
+#define ACPI_SYSMEM_REGION_WINDOW_SIZE  4096
+
+/* owner_id tracking. 8 entries allows for 255 owner_ids */
+
+#define ACPI_NUM_OWNERID_MASKS          8
+
+/* Size of the root table array is increased by this increment */
+
+#define ACPI_ROOT_TABLE_SIZE_INCREMENT  4
+
+/* Maximum number of While() loop iterations before forced abort */
+
+#define ACPI_MAX_LOOP_ITERATIONS        0xFFFF
+
+/******************************************************************************
+ *
+ * ACPI Specification constants (Do not change unless the specification changes)
+ *
+ *****************************************************************************/
+
+/* Number of distinct GPE register blocks and register width */
+
+#define ACPI_MAX_GPE_BLOCKS             2
+#define ACPI_GPE_REGISTER_WIDTH         8
+
+/* Method info (in WALK_STATE), containing local variables and argumetns */
+
+#define ACPI_METHOD_NUM_LOCALS          8
+#define ACPI_METHOD_MAX_LOCAL           7
+
+#define ACPI_METHOD_NUM_ARGS            7
+#define ACPI_METHOD_MAX_ARG             6
+
+/* Length of _HID, _UID, _CID, and UUID values */
+
+#define ACPI_DEVICE_ID_LENGTH           0x09
+#define ACPI_MAX_CID_LENGTH             48
+#define ACPI_UUID_LENGTH                16
+
+/*
+ * Operand Stack (in WALK_STATE), Must be large enough to contain METHOD_MAX_ARG
+ */
+#define ACPI_OBJ_NUM_OPERANDS           8
+#define ACPI_OBJ_MAX_OPERAND            7
+
+/* Number of elements in the Result Stack frame, can be an arbitrary value */
+
+#define ACPI_RESULTS_FRAME_OBJ_NUM      8
+
+/*
+ * Maximal number of elements the Result Stack can contain,
+ * it may be an arbitray value not exceeding the types of
+ * result_size and result_count (now u8).
+ */
+#define ACPI_RESULTS_OBJ_NUM_MAX        255
+
+/* Names within the namespace are 4 bytes long */
+
+#define ACPI_NAME_SIZE                  4
+#define ACPI_PATH_SEGMENT_LENGTH        5	/* 4 chars for name + 1 char for separator */
+#define ACPI_PATH_SEPARATOR             '.'
+
+/* Sizes for ACPI table headers */
+
+#define ACPI_OEM_ID_SIZE                6
+#define ACPI_OEM_TABLE_ID_SIZE          8
+
+/* Constants used in searching for the RSDP in low memory */
+
+#define ACPI_EBDA_PTR_LOCATION          0x0000040E	/* Physical Address */
+#define ACPI_EBDA_PTR_LENGTH            2
+#define ACPI_EBDA_WINDOW_SIZE           1024
+#define ACPI_HI_RSDP_WINDOW_BASE        0x000E0000	/* Physical Address */
+#define ACPI_HI_RSDP_WINDOW_SIZE        0x00020000
+#define ACPI_RSDP_SCAN_STEP             16
+
+/* Operation regions */
+
+#define ACPI_NUM_PREDEFINED_REGIONS     8
+#define ACPI_USER_REGION_BEGIN          0x80
+
+/* Maximum space_ids for Operation Regions */
+
+#define ACPI_MAX_ADDRESS_SPACE          255
+
+/* Array sizes.  Used for range checking also */
+
+#define ACPI_MAX_MATCH_OPCODE           5
+
+/* RSDP checksums */
+
+#define ACPI_RSDP_CHECKSUM_LENGTH       20
+#define ACPI_RSDP_XCHECKSUM_LENGTH      36
+
+/* SMBus bidirectional buffer size */
+
+#define ACPI_SMBUS_BUFFER_SIZE          34
+
+/******************************************************************************
+ *
+ * ACPI AML Debugger
+ *
+ *****************************************************************************/
+
+#define ACPI_DEBUGGER_MAX_ARGS          8	/* Must be max method args + 1 */
+
+#define ACPI_DEBUGGER_COMMAND_PROMPT    '-'
+#define ACPI_DEBUGGER_EXECUTE_PROMPT    '%'
+
+#endif				/* _ACCONFIG_H */
diff --git a/include/acpi/acdebug.h b/drivers/acpi/acpica/acdebug.h
similarity index 100%
rename from include/acpi/acdebug.h
rename to drivers/acpi/acpica/acdebug.h
diff --git a/include/acpi/acdispat.h b/drivers/acpi/acpica/acdispat.h
similarity index 100%
rename from include/acpi/acdispat.h
rename to drivers/acpi/acpica/acdispat.h
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
new file mode 100644
index 0000000..07e2013
--- /dev/null
+++ b/drivers/acpi/acpica/acevents.h
@@ -0,0 +1,218 @@
+/******************************************************************************
+ *
+ * Name: acevents.h - Event subcomponent prototypes and defines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACEVENTS_H__
+#define __ACEVENTS_H__
+
+/*
+ * evevent
+ */
+acpi_status acpi_ev_initialize_events(void);
+
+acpi_status acpi_ev_install_xrupt_handlers(void);
+
+acpi_status acpi_ev_install_fadt_gpes(void);
+
+u32 acpi_ev_fixed_event_detect(void);
+
+/*
+ * evmisc
+ */
+u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout);
+
+acpi_status acpi_ev_release_global_lock(void);
+
+acpi_status acpi_ev_init_global_lock_handler(void);
+
+u32 acpi_ev_get_gpe_number_index(u32 gpe_number);
+
+acpi_status
+acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
+			     u32 notify_value);
+
+/*
+ * evgpe - GPE handling and dispatch
+ */
+acpi_status
+acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
+				u8 type);
+
+acpi_status
+acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
+		   u8 write_to_hardware);
+
+acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
+struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
+						       u32 gpe_number);
+
+/*
+ * evgpeblk
+ */
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status
+acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+			    struct acpi_gpe_block_info *gpe_block,
+			    void *context);
+
+acpi_status
+acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
+			 struct acpi_generic_address *gpe_block_address,
+			 u32 register_count,
+			 u8 gpe_block_base_number,
+			 u32 interrupt_number,
+			 struct acpi_gpe_block_info **return_gpe_block);
+
+acpi_status
+acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
+			     struct acpi_gpe_block_info *gpe_block);
+
+acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
+
+u32
+acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
+		     u32 gpe_number);
+
+u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
+
+acpi_status
+acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type);
+
+acpi_status
+acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status acpi_ev_gpe_initialize(void);
+
+/*
+ * evregion - Address Space handling
+ */
+acpi_status acpi_ev_install_region_handlers(void);
+
+acpi_status acpi_ev_initialize_op_regions(void);
+
+acpi_status
+acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+			       u32 function,
+			       acpi_physical_address address,
+			       u32 bit_width, acpi_integer * value);
+
+acpi_status
+acpi_ev_attach_region(union acpi_operand_object *handler_obj,
+		      union acpi_operand_object *region_obj,
+		      u8 acpi_ns_is_locked);
+
+void
+acpi_ev_detach_region(union acpi_operand_object *region_obj,
+		      u8 acpi_ns_is_locked);
+
+acpi_status
+acpi_ev_install_space_handler(struct acpi_namespace_node *node,
+			      acpi_adr_space_type space_id,
+			      acpi_adr_space_handler handler,
+			      acpi_adr_space_setup setup, void *context);
+
+acpi_status
+acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+			    acpi_adr_space_type space_id);
+
+acpi_status
+acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
+
+/*
+ * evregini - Region initialization and setup
+ */
+acpi_status
+acpi_ev_system_memory_region_setup(acpi_handle handle,
+				   u32 function,
+				   void *handler_context,
+				   void **region_context);
+
+acpi_status
+acpi_ev_io_space_region_setup(acpi_handle handle,
+			      u32 function,
+			      void *handler_context, void **region_context);
+
+acpi_status
+acpi_ev_pci_config_region_setup(acpi_handle handle,
+				u32 function,
+				void *handler_context, void **region_context);
+
+acpi_status
+acpi_ev_cmos_region_setup(acpi_handle handle,
+			  u32 function,
+			  void *handler_context, void **region_context);
+
+acpi_status
+acpi_ev_pci_bar_region_setup(acpi_handle handle,
+			     u32 function,
+			     void *handler_context, void **region_context);
+
+acpi_status
+acpi_ev_default_region_setup(acpi_handle handle,
+			     u32 function,
+			     void *handler_context, void **region_context);
+
+acpi_status
+acpi_ev_initialize_region(union acpi_operand_object *region_obj,
+			  u8 acpi_ns_locked);
+
+/*
+ * evsci - SCI (System Control Interrupt) handling/dispatch
+ */
+u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
+
+u32 acpi_ev_install_sci_handler(void);
+
+acpi_status acpi_ev_remove_sci_handler(void);
+
+u32 acpi_ev_initialize_sCI(u32 program_sCI);
+
+void acpi_ev_terminate(void);
+
+#endif				/* __ACEVENTS_H__  */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
new file mode 100644
index 0000000..ddb40f5
--- /dev/null
+++ b/drivers/acpi/acpica/acglobal.h
@@ -0,0 +1,394 @@
+/******************************************************************************
+ *
+ * Name: acglobal.h - Declarations for global variables
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACGLOBAL_H__
+#define __ACGLOBAL_H__
+
+/*
+ * Ensure that the globals are actually defined and initialized only once.
+ *
+ * The use of these macros allows a single list of globals (here) in order
+ * to simplify maintenance of the code.
+ */
+#ifdef DEFINE_ACPI_GLOBALS
+#define ACPI_EXTERN
+#define ACPI_INIT_GLOBAL(a,b) a=b
+#else
+#define ACPI_EXTERN extern
+#define ACPI_INIT_GLOBAL(a,b) a
+#endif
+
+/*****************************************************************************
+ *
+ * Runtime configuration (static defaults that can be overriden at runtime)
+ *
+ ****************************************************************************/
+
+/*
+ * Enable "slack" in the AML interpreter?  Default is FALSE, and the
+ * interpreter strictly follows the ACPI specification.  Setting to TRUE
+ * allows the interpreter to ignore certain errors and/or bad AML constructs.
+ *
+ * Currently, these features are enabled by this flag:
+ *
+ * 1) Allow "implicit return" of last value in a control method
+ * 2) Allow access beyond the end of an operation region
+ * 3) Allow access to uninitialized locals/args (auto-init to integer 0)
+ * 4) Allow ANY object type to be a source operand for the Store() operator
+ * 5) Allow unresolved references (invalid target name) in package objects
+ * 6) Enable warning messages for behavior that is not ACPI spec compliant
+ */
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
+
+/*
+ * Automatically serialize ALL control methods? Default is FALSE, meaning
+ * to use the Serialized/not_serialized method flags on a per method basis.
+ * Only change this if the ASL code is poorly written and cannot handle
+ * reentrancy even though methods are marked "NotSerialized".
+ */
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
+
+/*
+ * Create the predefined _OSI method in the namespace? Default is TRUE
+ * because ACPI CA is fully compatible with other ACPI implementations.
+ * Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior.
+ */
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
+
+/*
+ * Disable wakeup GPEs during runtime? Default is TRUE because WAKE and
+ * RUNTIME GPEs should never be shared, and WAKE GPEs should typically only
+ * be enabled just before going to sleep.
+ */
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
+
+/*
+ * Optionally use default values for the ACPI register widths. Set this to
+ * TRUE to use the defaults, if an FADT contains incorrect widths/lengths.
+ */
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
+
+/*****************************************************************************
+ *
+ * Debug support
+ *
+ ****************************************************************************/
+
+/* Runtime configuration of debug print levels */
+
+extern u32 acpi_dbg_level;
+extern u32 acpi_dbg_layer;
+
+/* Procedure nesting level for debug output */
+
+extern u32 acpi_gbl_nesting_level;
+
+/* Support for dynamic control method tracing mechanism */
+
+ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
+ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
+ACPI_EXTERN acpi_name acpi_gbl_trace_method_name;
+ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
+ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
+ACPI_EXTERN u32 acpi_gbl_trace_flags;
+
+/*****************************************************************************
+ *
+ * ACPI Table globals
+ *
+ ****************************************************************************/
+
+/*
+ * acpi_gbl_root_table_list is the master list of ACPI tables found in the
+ * RSDT/XSDT.
+ *
+ * acpi_gbl_FADT is a local copy of the FADT, converted to a common format.
+ */
+ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
+ACPI_EXTERN struct acpi_table_fadt acpi_gbl_FADT;
+ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
+
+/* These addresses are calculated from FADT address values */
+
+ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
+ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
+
+/*
+ * Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
+ * determined by the revision of the DSDT: If the DSDT revision is less than
+ * 2, use only the lower 32 bits of the internal 64-bit Integer.
+ */
+ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
+ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
+ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
+
+/*****************************************************************************
+ *
+ * Mutual exlusion within ACPICA subsystem
+ *
+ ****************************************************************************/
+
+/*
+ * Predefined mutex objects. This array contains the
+ * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
+ * (The table maps local handles to the real OS handles)
+ */
+ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
+
+/*
+ * Global lock mutex is an actual AML mutex object
+ * Global lock semaphore works in conjunction with the HW global lock
+ */
+ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
+ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
+ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
+ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
+ACPI_EXTERN u8 acpi_gbl_global_lock_present;
+
+/*
+ * Spinlocks are used for interfaces that can be possibly called at
+ * interrupt level
+ */
+ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock;	/* For GPE data structs and registers */
+ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock;	/* For ACPI H/W except GPE registers */
+#define acpi_gbl_gpe_lock	&_acpi_gbl_gpe_lock
+#define acpi_gbl_hardware_lock	&_acpi_gbl_hardware_lock
+
+/*****************************************************************************
+ *
+ * Miscellaneous globals
+ *
+ ****************************************************************************/
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+/* Lists for tracking memory allocations */
+
+ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
+ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
+ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
+#endif
+
+/* Object caches */
+
+ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache;
+ACPI_EXTERN acpi_cache_t *acpi_gbl_state_cache;
+ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_cache;
+ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_ext_cache;
+ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
+
+/* Global handlers */
+
+ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_device_notify;
+ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify;
+ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
+ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
+ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler;
+ACPI_EXTERN void *acpi_gbl_table_handler_context;
+ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
+
+/* Owner ID support */
+
+ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
+ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
+ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
+
+/* Misc */
+
+ACPI_EXTERN u32 acpi_gbl_original_mode;
+ACPI_EXTERN u32 acpi_gbl_rsdp_original_location;
+ACPI_EXTERN u32 acpi_gbl_ns_lookup_count;
+ACPI_EXTERN u32 acpi_gbl_ps_find_count;
+ACPI_EXTERN u16 acpi_gbl_pm1_enable_register_save;
+ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
+ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
+ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
+ACPI_EXTERN u8 acpi_gbl_events_initialized;
+ACPI_EXTERN u8 acpi_gbl_system_awake_and_running;
+
+#ifndef DEFINE_ACPI_GLOBALS
+
+/* Other miscellaneous */
+
+extern u8 acpi_gbl_shutdown;
+extern u32 acpi_gbl_startup_flags;
+extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
+extern const char *acpi_gbl_highest_dstate_names[4];
+extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
+extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
+
+#endif
+
+/* Exception codes */
+
+extern char const *acpi_gbl_exception_names_env[];
+extern char const *acpi_gbl_exception_names_pgm[];
+extern char const *acpi_gbl_exception_names_tbl[];
+extern char const *acpi_gbl_exception_names_aml[];
+extern char const *acpi_gbl_exception_names_ctrl[];
+
+/*****************************************************************************
+ *
+ * Namespace globals
+ *
+ ****************************************************************************/
+
+#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
+#define NUM_PREDEFINED_NAMES            10
+#else
+#define NUM_PREDEFINED_NAMES            9
+#endif
+
+ACPI_EXTERN struct acpi_namespace_node acpi_gbl_root_node_struct;
+ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_root_node;
+ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_fadt_gpe_device;
+
+extern const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES];
+extern const struct acpi_predefined_names
+    acpi_gbl_pre_defined_names[NUM_PREDEFINED_NAMES];
+
+#ifdef ACPI_DEBUG_OUTPUT
+ACPI_EXTERN u32 acpi_gbl_current_node_count;
+ACPI_EXTERN u32 acpi_gbl_current_node_size;
+ACPI_EXTERN u32 acpi_gbl_max_concurrent_node_count;
+ACPI_EXTERN acpi_size *acpi_gbl_entry_stack_pointer;
+ACPI_EXTERN acpi_size *acpi_gbl_lowest_stack_pointer;
+ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
+#endif
+
+/*****************************************************************************
+ *
+ * Interpreter globals
+ *
+ ****************************************************************************/
+
+ACPI_EXTERN struct acpi_thread_state *acpi_gbl_current_walk_list;
+
+/* Control method single step flag */
+
+ACPI_EXTERN u8 acpi_gbl_cm_single_step;
+
+/*****************************************************************************
+ *
+ * Hardware globals
+ *
+ ****************************************************************************/
+
+extern struct acpi_bit_register_info
+    acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
+ACPI_EXTERN u8 acpi_gbl_sleep_type_a;
+ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
+
+/*****************************************************************************
+ *
+ * Event and GPE globals
+ *
+ ****************************************************************************/
+
+extern struct acpi_fixed_event_info
+    acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
+ACPI_EXTERN struct acpi_fixed_event_handler
+    acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
+ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
+ACPI_EXTERN struct acpi_gpe_block_info
+*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
+ACPI_EXTERN u32 acpi_current_gpe_count;
+
+/*****************************************************************************
+ *
+ * Debugger globals
+ *
+ ****************************************************************************/
+
+ACPI_EXTERN u8 acpi_gbl_db_output_flags;
+
+#ifdef ACPI_DISASSEMBLER
+
+ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
+ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
+#endif
+
+#ifdef ACPI_DEBUGGER
+
+extern u8 acpi_gbl_method_executing;
+extern u8 acpi_gbl_abort_method;
+extern u8 acpi_gbl_db_terminate_threads;
+
+ACPI_EXTERN int optind;
+ACPI_EXTERN char *optarg;
+
+ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
+ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
+ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
+
+ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_EXTERN char acpi_gbl_db_line_buf[80];
+ACPI_EXTERN char acpi_gbl_db_parsed_buf[80];
+ACPI_EXTERN char acpi_gbl_db_scope_buf[40];
+ACPI_EXTERN char acpi_gbl_db_debug_filename[40];
+ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
+ACPI_EXTERN char *acpi_gbl_db_buffer;
+ACPI_EXTERN char *acpi_gbl_db_filename;
+ACPI_EXTERN u32 acpi_gbl_db_debug_level;
+ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
+ACPI_EXTERN struct acpi_table_header *acpi_gbl_db_table_ptr;
+ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
+
+/*
+ * Statistic globals
+ */
+ACPI_EXTERN u16 acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
+ACPI_EXTERN u16 acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
+ACPI_EXTERN u16 acpi_gbl_obj_type_count_misc;
+ACPI_EXTERN u16 acpi_gbl_node_type_count_misc;
+ACPI_EXTERN u32 acpi_gbl_num_nodes;
+ACPI_EXTERN u32 acpi_gbl_num_objects;
+
+ACPI_EXTERN u32 acpi_gbl_size_of_parse_tree;
+ACPI_EXTERN u32 acpi_gbl_size_of_method_trees;
+ACPI_EXTERN u32 acpi_gbl_size_of_node_entries;
+ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
+
+#endif				/* ACPI_DEBUGGER */
+
+#endif				/* __ACGLOBAL_H__ */
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
new file mode 100644
index 0000000..58c69dc
--- /dev/null
+++ b/drivers/acpi/acpica/achware.h
@@ -0,0 +1,119 @@
+/******************************************************************************
+ *
+ * Name: achware.h -- hardware specific interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACHWARE_H__
+#define __ACHWARE_H__
+
+/* Values for the _SST predefined method */
+
+#define ACPI_SST_INDICATOR_OFF  0
+#define ACPI_SST_WORKING        1
+#define ACPI_SST_WAKING         2
+#define ACPI_SST_SLEEPING       3
+#define ACPI_SST_SLEEP_CONTEXT  4
+
+/*
+ * hwacpi - high level functions
+ */
+acpi_status acpi_hw_set_mode(u32 mode);
+
+u32 acpi_hw_get_mode(void);
+
+/*
+ * hwregs - ACPI Register I/O
+ */
+struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id);
+
+acpi_status
+acpi_hw_register_read(u32 register_id, u32 * return_value);
+
+acpi_status acpi_hw_register_write(u32 register_id, u32 value);
+
+acpi_status acpi_hw_clear_acpi_status(void);
+
+/*
+ * hwgpe - GPE support
+ */
+acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status
+acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status
+acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+			  struct acpi_gpe_block_info *gpe_block, void *context);
+
+acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
+acpi_status
+acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+			struct acpi_gpe_block_info *gpe_block, void *context);
+
+acpi_status
+acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
+		       acpi_event_status * event_status);
+
+acpi_status acpi_hw_disable_all_gpes(void);
+
+acpi_status acpi_hw_enable_all_runtime_gpes(void);
+
+acpi_status acpi_hw_enable_all_wakeup_gpes(void);
+
+acpi_status
+acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+				 struct acpi_gpe_block_info *gpe_block,
+				 void *context);
+
+#ifdef	ACPI_FUTURE_USAGE
+/*
+ * hwtimer - ACPI Timer prototypes
+ */
+acpi_status acpi_get_timer_resolution(u32 * resolution);
+
+acpi_status acpi_get_timer(u32 * ticks);
+
+acpi_status
+acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed);
+#endif				/* ACPI_FUTURE_USAGE */
+
+#endif				/* __ACHWARE_H__ */
diff --git a/include/acpi/acinterp.h b/drivers/acpi/acpica/acinterp.h
similarity index 100%
rename from include/acpi/acinterp.h
rename to drivers/acpi/acpica/acinterp.h
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
new file mode 100644
index 0000000..492d027
--- /dev/null
+++ b/drivers/acpi/acpica/aclocal.h
@@ -0,0 +1,990 @@
+/******************************************************************************
+ *
+ * Name: aclocal.h - Internal data types used across the ACPI subsystem
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACLOCAL_H__
+#define __ACLOCAL_H__
+
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
+#define ACPI_SERIALIZED                 0xFF
+
+typedef u32 acpi_mutex_handle;
+#define ACPI_GLOBAL_LOCK                (acpi_semaphore) (-1)
+
+/* Total number of aml opcodes defined */
+
+#define AML_NUM_OPCODES                 0x7F
+
+/* Forward declarations */
+
+struct acpi_walk_state;
+struct acpi_obj_mutex;
+union acpi_parse_object;
+
+/*****************************************************************************
+ *
+ * Mutex typedefs and structs
+ *
+ ****************************************************************************/
+
+/*
+ * Predefined handles for the mutex objects used within the subsystem
+ * All mutex objects are automatically created by acpi_ut_mutex_initialize.
+ *
+ * The acquire/release ordering protocol is implied via this list. Mutexes
+ * with a lower value must be acquired before mutexes with a higher value.
+ *
+ * NOTE: any changes here must be reflected in the acpi_gbl_mutex_names
+ * table below also!
+ */
+#define ACPI_MTX_INTERPRETER            0	/* AML Interpreter, main lock */
+#define ACPI_MTX_NAMESPACE              1	/* ACPI Namespace */
+#define ACPI_MTX_TABLES                 2	/* Data for ACPI tables */
+#define ACPI_MTX_EVENTS                 3	/* Data for ACPI events */
+#define ACPI_MTX_CACHES                 4	/* Internal caches, general purposes */
+#define ACPI_MTX_MEMORY                 5	/* Debug memory tracking lists */
+#define ACPI_MTX_DEBUG_CMD_COMPLETE     6	/* AML debugger */
+#define ACPI_MTX_DEBUG_CMD_READY        7	/* AML debugger */
+
+#define ACPI_MAX_MUTEX                  7
+#define ACPI_NUM_MUTEX                  ACPI_MAX_MUTEX+1
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#ifdef DEFINE_ACPI_GLOBALS
+
+/* Debug names for the mutexes above */
+
+static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
+	"ACPI_MTX_Interpreter",
+	"ACPI_MTX_Namespace",
+	"ACPI_MTX_Tables",
+	"ACPI_MTX_Events",
+	"ACPI_MTX_Caches",
+	"ACPI_MTX_Memory",
+	"ACPI_MTX_CommandComplete",
+	"ACPI_MTX_CommandReady"
+};
+
+#endif
+#endif
+
+/*
+ * Predefined handles for spinlocks used within the subsystem.
+ * These spinlocks are created by acpi_ut_mutex_initialize
+ */
+#define ACPI_LOCK_GPES                  0
+#define ACPI_LOCK_HARDWARE              1
+
+#define ACPI_MAX_LOCK                   1
+#define ACPI_NUM_LOCK                   ACPI_MAX_LOCK+1
+
+/* This Thread ID means that the mutex is not in use (unlocked) */
+
+#define ACPI_MUTEX_NOT_ACQUIRED         (acpi_thread_id) 0
+
+/* Table for the global mutexes */
+
+struct acpi_mutex_info {
+	acpi_mutex mutex;
+	u32 use_count;
+	acpi_thread_id thread_id;
+};
+
+/* Lock flag parameter for various interfaces */
+
+#define ACPI_MTX_DO_NOT_LOCK            0
+#define ACPI_MTX_LOCK                   1
+
+/* Field access granularities */
+
+#define ACPI_FIELD_BYTE_GRANULARITY     1
+#define ACPI_FIELD_WORD_GRANULARITY     2
+#define ACPI_FIELD_DWORD_GRANULARITY    4
+#define ACPI_FIELD_QWORD_GRANULARITY    8
+
+#define ACPI_ENTRY_NOT_FOUND            NULL
+
+/*****************************************************************************
+ *
+ * Namespace typedefs and structs
+ *
+ ****************************************************************************/
+
+/* Operational modes of the AML interpreter/scanner */
+
+typedef enum {
+	ACPI_IMODE_LOAD_PASS1 = 0x01,
+	ACPI_IMODE_LOAD_PASS2 = 0x02,
+	ACPI_IMODE_EXECUTE = 0x03
+} acpi_interpreter_mode;
+
+/*
+ * The Namespace Node describes a named object that appears in the AML.
+ * descriptor_type is used to differentiate between internal descriptors.
+ *
+ * The node is optimized for both 32-bit and 64-bit platforms:
+ * 20 bytes for the 32-bit case, 32 bytes for the 64-bit case.
+ *
+ * Note: The descriptor_type and Type fields must appear in the identical
+ * position in both the struct acpi_namespace_node and union acpi_operand_object
+ * structures.
+ */
+struct acpi_namespace_node {
+	union acpi_operand_object *object;	/* Interpreter object */
+	u8 descriptor_type;	/* Differentiate object descriptor types */
+	u8 type;		/* ACPI Type associated with this name */
+	u8 flags;		/* Miscellaneous flags */
+	acpi_owner_id owner_id;	/* Node creator */
+	union acpi_name_union name;	/* ACPI Name, always 4 chars per ACPI spec */
+	struct acpi_namespace_node *child;	/* First child */
+	struct acpi_namespace_node *peer;	/* Peer. Parent if ANOBJ_END_OF_PEER_LIST set */
+
+	/*
+	 * The following fields are used by the ASL compiler and disassembler only
+	 */
+#ifdef ACPI_LARGE_NAMESPACE_NODE
+	union acpi_parse_object *op;
+	u32 value;
+	u32 length;
+#endif
+};
+
+/* Namespace Node flags */
+
+#define ANOBJ_END_OF_PEER_LIST          0x01	/* End-of-list, Peer field points to parent */
+#define ANOBJ_TEMPORARY                 0x02	/* Node is create by a method and is temporary */
+#define ANOBJ_METHOD_ARG                0x04	/* Node is a method argument */
+#define ANOBJ_METHOD_LOCAL              0x08	/* Node is a method local */
+#define ANOBJ_SUBTREE_HAS_INI           0x10	/* Used to optimize device initialization */
+#define ANOBJ_EVALUATED                 0x20	/* Set on first evaluation of node */
+
+#define ANOBJ_IS_EXTERNAL               0x08	/* i_aSL only: This object created via External() */
+#define ANOBJ_METHOD_NO_RETVAL          0x10	/* i_aSL only: Method has no return value */
+#define ANOBJ_METHOD_SOME_NO_RETVAL     0x20	/* i_aSL only: Method has at least one return value */
+#define ANOBJ_IS_BIT_OFFSET             0x40	/* i_aSL only: Reference is a bit offset */
+#define ANOBJ_IS_REFERENCED             0x80	/* i_aSL only: Object was referenced */
+
+/* One internal RSDT for table management */
+
+struct acpi_internal_rsdt {
+	struct acpi_table_desc *tables;
+	u32 count;
+	u32 size;
+	u8 flags;
+};
+
+/* Flags for above */
+
+#define ACPI_ROOT_ORIGIN_UNKNOWN        (0)	/* ~ORIGIN_ALLOCATED */
+#define ACPI_ROOT_ORIGIN_ALLOCATED      (1)
+#define ACPI_ROOT_ALLOW_RESIZE          (2)
+
+/* Predefined (fixed) table indexes */
+
+#define ACPI_TABLE_INDEX_DSDT           (0)
+#define ACPI_TABLE_INDEX_FACS           (1)
+
+struct acpi_find_context {
+	char *search_for;
+	acpi_handle *list;
+	u32 *count;
+};
+
+struct acpi_ns_search_data {
+	struct acpi_namespace_node *node;
+};
+
+/* Object types used during package copies */
+
+#define ACPI_COPY_TYPE_SIMPLE           0
+#define ACPI_COPY_TYPE_PACKAGE          1
+
+/* Info structure used to convert external<->internal namestrings */
+
+struct acpi_namestring_info {
+	const char *external_name;
+	const char *next_external_char;
+	char *internal_name;
+	u32 length;
+	u32 num_segments;
+	u32 num_carats;
+	u8 fully_qualified;
+};
+
+/* Field creation info */
+
+struct acpi_create_field_info {
+	struct acpi_namespace_node *region_node;
+	struct acpi_namespace_node *field_node;
+	struct acpi_namespace_node *register_node;
+	struct acpi_namespace_node *data_register_node;
+	u32 bank_value;
+	u32 field_bit_position;
+	u32 field_bit_length;
+	u8 field_flags;
+	u8 attribute;
+	u8 field_type;
+};
+
+typedef
+acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
+
+/*
+ * Bitmapped ACPI types.  Used internally only
+ */
+#define ACPI_BTYPE_ANY                  0x00000000
+#define ACPI_BTYPE_INTEGER              0x00000001
+#define ACPI_BTYPE_STRING               0x00000002
+#define ACPI_BTYPE_BUFFER               0x00000004
+#define ACPI_BTYPE_PACKAGE              0x00000008
+#define ACPI_BTYPE_FIELD_UNIT           0x00000010
+#define ACPI_BTYPE_DEVICE               0x00000020
+#define ACPI_BTYPE_EVENT                0x00000040
+#define ACPI_BTYPE_METHOD               0x00000080
+#define ACPI_BTYPE_MUTEX                0x00000100
+#define ACPI_BTYPE_REGION               0x00000200
+#define ACPI_BTYPE_POWER                0x00000400
+#define ACPI_BTYPE_PROCESSOR            0x00000800
+#define ACPI_BTYPE_THERMAL              0x00001000
+#define ACPI_BTYPE_BUFFER_FIELD         0x00002000
+#define ACPI_BTYPE_DDB_HANDLE           0x00004000
+#define ACPI_BTYPE_DEBUG_OBJECT         0x00008000
+#define ACPI_BTYPE_REFERENCE            0x00010000
+#define ACPI_BTYPE_RESOURCE             0x00020000
+
+#define ACPI_BTYPE_COMPUTE_DATA         (ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING | ACPI_BTYPE_BUFFER)
+
+#define ACPI_BTYPE_DATA                 (ACPI_BTYPE_COMPUTE_DATA  | ACPI_BTYPE_PACKAGE)
+#define ACPI_BTYPE_DATA_REFERENCE       (ACPI_BTYPE_DATA | ACPI_BTYPE_REFERENCE | ACPI_BTYPE_DDB_HANDLE)
+#define ACPI_BTYPE_DEVICE_OBJECTS       (ACPI_BTYPE_DEVICE | ACPI_BTYPE_THERMAL | ACPI_BTYPE_PROCESSOR)
+#define ACPI_BTYPE_OBJECTS_AND_REFS     0x0001FFFF	/* ARG or LOCAL */
+#define ACPI_BTYPE_ALL_OBJECTS          0x0000FFFF
+
+/*
+ * Information structure for ACPI predefined names.
+ * Each entry in the table contains the following items:
+ *
+ * Name                 - The ACPI reserved name
+ * param_count          - Number of arguments to the method
+ * expected_return_btypes - Allowed type(s) for the return value
+ */
+struct acpi_name_info {
+	char name[ACPI_NAME_SIZE];
+	u8 param_count;
+	u8 expected_btypes;
+};
+
+/*
+ * Secondary information structures for ACPI predefined objects that return
+ * package objects. This structure appears as the next entry in the table
+ * after the NAME_INFO structure above.
+ *
+ * The reason for this is to minimize the size of the predefined name table.
+ */
+
+/*
+ * Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2,
+ * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT
+ */
+struct acpi_package_info {
+	u8 type;
+	u8 object_type1;
+	u8 count1;
+	u8 object_type2;
+	u8 count2;
+	u8 reserved;
+};
+
+/* Used for ACPI_PTYPE2_FIXED */
+
+struct acpi_package_info2 {
+	u8 type;
+	u8 count;
+	u8 object_type[4];
+};
+
+/* Used for ACPI_PTYPE1_OPTION */
+
+struct acpi_package_info3 {
+	u8 type;
+	u8 count;
+	u8 object_type[2];
+	u8 tail_object_type;
+	u8 reserved;
+};
+
+union acpi_predefined_info {
+	struct acpi_name_info info;
+	struct acpi_package_info ret_info;
+	struct acpi_package_info2 ret_info2;
+	struct acpi_package_info3 ret_info3;
+};
+
+/*
+ * Bitmapped return value types
+ * Note: the actual data types must be contiguous, a loop in nspredef.c
+ * depends on this.
+ */
+#define ACPI_RTYPE_ANY                  0x00
+#define ACPI_RTYPE_NONE                 0x01
+#define ACPI_RTYPE_INTEGER              0x02
+#define ACPI_RTYPE_STRING               0x04
+#define ACPI_RTYPE_BUFFER               0x08
+#define ACPI_RTYPE_PACKAGE              0x10
+#define ACPI_RTYPE_REFERENCE            0x20
+#define ACPI_RTYPE_ALL                  0x3F
+
+#define ACPI_NUM_RTYPES                 5	/* Number of actual object types */
+
+/*****************************************************************************
+ *
+ * Event typedefs and structs
+ *
+ ****************************************************************************/
+
+/* Dispatch info for each GPE -- either a method or handler, cannot be both */
+
+struct acpi_handler_info {
+	acpi_event_handler address;	/* Address of handler, if any */
+	void *context;		/* Context to be passed to handler */
+	struct acpi_namespace_node *method_node;	/* Method node for this GPE level (saved) */
+};
+
+union acpi_gpe_dispatch_info {
+	struct acpi_namespace_node *method_node;	/* Method node for this GPE level */
+	struct acpi_handler_info *handler;
+};
+
+/*
+ * Information about a GPE, one per each GPE in an array.
+ * NOTE: Important to keep this struct as small as possible.
+ */
+struct acpi_gpe_event_info {
+	union acpi_gpe_dispatch_info dispatch;	/* Either Method or Handler */
+	struct acpi_gpe_register_info *register_info;	/* Backpointer to register info */
+	u8 flags;		/* Misc info about this GPE */
+	u8 gpe_number;		/* This GPE */
+};
+
+/* Information about a GPE register pair, one per each status/enable pair in an array */
+
+struct acpi_gpe_register_info {
+	struct acpi_generic_address status_address;	/* Address of status reg */
+	struct acpi_generic_address enable_address;	/* Address of enable reg */
+	u8 enable_for_wake;	/* GPEs to keep enabled when sleeping */
+	u8 enable_for_run;	/* GPEs to keep enabled when running */
+	u8 base_gpe_number;	/* Base GPE number for this register */
+};
+
+/*
+ * Information about a GPE register block, one per each installed block --
+ * GPE0, GPE1, and one per each installed GPE Block Device.
+ */
+struct acpi_gpe_block_info {
+	struct acpi_namespace_node *node;
+	struct acpi_gpe_block_info *previous;
+	struct acpi_gpe_block_info *next;
+	struct acpi_gpe_xrupt_info *xrupt_block;	/* Backpointer to interrupt block */
+	struct acpi_gpe_register_info *register_info;	/* One per GPE register pair */
+	struct acpi_gpe_event_info *event_info;	/* One for each GPE */
+	struct acpi_generic_address block_address;	/* Base address of the block */
+	u32 register_count;	/* Number of register pairs in block */
+	u8 block_base_number;	/* Base GPE number for this block */
+};
+
+/* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
+
+struct acpi_gpe_xrupt_info {
+	struct acpi_gpe_xrupt_info *previous;
+	struct acpi_gpe_xrupt_info *next;
+	struct acpi_gpe_block_info *gpe_block_list_head;	/* List of GPE blocks for this xrupt */
+	u32 interrupt_number;	/* System interrupt number */
+};
+
+struct acpi_gpe_walk_info {
+	struct acpi_namespace_node *gpe_device;
+	struct acpi_gpe_block_info *gpe_block;
+};
+
+struct acpi_gpe_device_info {
+	u32 index;
+	u32 next_block_base_index;
+	acpi_status status;
+	struct acpi_namespace_node *gpe_device;
+};
+
+typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+		struct acpi_gpe_block_info *gpe_block, void *context);
+
+/* Information about each particular fixed event */
+
+struct acpi_fixed_event_handler {
+	acpi_event_handler handler;	/* Address of handler. */
+	void *context;		/* Context to be passed to handler */
+};
+
+struct acpi_fixed_event_info {
+	u8 status_register_id;
+	u8 enable_register_id;
+	u16 status_bit_mask;
+	u16 enable_bit_mask;
+};
+
+/* Information used during field processing */
+
+struct acpi_field_info {
+	u8 skip_field;
+	u8 field_flag;
+	u32 pkg_length;
+};
+
+/*****************************************************************************
+ *
+ * Generic "state" object for stacks
+ *
+ ****************************************************************************/
+
+#define ACPI_CONTROL_NORMAL                  0xC0
+#define ACPI_CONTROL_CONDITIONAL_EXECUTING   0xC1
+#define ACPI_CONTROL_PREDICATE_EXECUTING     0xC2
+#define ACPI_CONTROL_PREDICATE_FALSE         0xC3
+#define ACPI_CONTROL_PREDICATE_TRUE          0xC4
+
+#define ACPI_STATE_COMMON \
+	void                            *next; \
+	u8                              descriptor_type; /* To differentiate various internal objs */\
+	u8                              flags; \
+	u16                             value; \
+	u16                             state;
+
+	/* There are 2 bytes available here until the next natural alignment boundary */
+
+struct acpi_common_state {
+ACPI_STATE_COMMON};
+
+/*
+ * Update state - used to traverse complex objects such as packages
+ */
+struct acpi_update_state {
+	ACPI_STATE_COMMON union acpi_operand_object *object;
+};
+
+/*
+ * Pkg state - used to traverse nested package structures
+ */
+struct acpi_pkg_state {
+	ACPI_STATE_COMMON u16 index;
+	union acpi_operand_object *source_object;
+	union acpi_operand_object *dest_object;
+	struct acpi_walk_state *walk_state;
+	void *this_target_obj;
+	u32 num_packages;
+};
+
+/*
+ * Control state - one per if/else and while constructs.
+ * Allows nesting of these constructs
+ */
+struct acpi_control_state {
+	ACPI_STATE_COMMON u16 opcode;
+	union acpi_parse_object *predicate_op;
+	u8 *aml_predicate_start;	/* Start of if/while predicate */
+	u8 *package_end;	/* End of if/while block */
+	u32 loop_count;		/* While() loop counter */
+};
+
+/*
+ * Scope state - current scope during namespace lookups
+ */
+struct acpi_scope_state {
+	ACPI_STATE_COMMON struct acpi_namespace_node *node;
+};
+
+struct acpi_pscope_state {
+	ACPI_STATE_COMMON u32 arg_count;	/* Number of fixed arguments */
+	union acpi_parse_object *op;	/* Current op being parsed */
+	u8 *arg_end;		/* Current argument end */
+	u8 *pkg_end;		/* Current package end */
+	u32 arg_list;		/* Next argument to parse */
+};
+
+/*
+ * Thread state - one per thread across multiple walk states.  Multiple walk
+ * states are created when there are nested control methods executing.
+ */
+struct acpi_thread_state {
+	ACPI_STATE_COMMON u8 current_sync_level;	/* Mutex Sync (nested acquire) level */
+	struct acpi_walk_state *walk_state_list;	/* Head of list of walk_states for this thread */
+	union acpi_operand_object *acquired_mutex_list;	/* List of all currently acquired mutexes */
+	acpi_thread_id thread_id;	/* Running thread ID */
+};
+
+/*
+ * Result values - used to accumulate the results of nested
+ * AML arguments
+ */
+struct acpi_result_values {
+	ACPI_STATE_COMMON
+	    union acpi_operand_object *obj_desc[ACPI_RESULTS_FRAME_OBJ_NUM];
+};
+
+typedef
+acpi_status(*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
+				    union acpi_parse_object ** out_op);
+
+typedef acpi_status(*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
+
+/*
+ * Notify info - used to pass info to the deferred notify
+ * handler/dispatcher.
+ */
+struct acpi_notify_info {
+	ACPI_STATE_COMMON struct acpi_namespace_node *node;
+	union acpi_operand_object *handler_obj;
+};
+
+/* Generic state is union of structs above */
+
+union acpi_generic_state {
+	struct acpi_common_state common;
+	struct acpi_control_state control;
+	struct acpi_update_state update;
+	struct acpi_scope_state scope;
+	struct acpi_pscope_state parse_scope;
+	struct acpi_pkg_state pkg;
+	struct acpi_thread_state thread;
+	struct acpi_result_values results;
+	struct acpi_notify_info notify;
+};
+
+/*****************************************************************************
+ *
+ * Interpreter typedefs and structs
+ *
+ ****************************************************************************/
+
+typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
+
+/*****************************************************************************
+ *
+ * Parser typedefs and structs
+ *
+ ****************************************************************************/
+
+/*
+ * AML opcode, name, and argument layout
+ */
+struct acpi_opcode_info {
+#if defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUG_OUTPUT)
+	char *name;		/* Opcode name (disassembler/debug only) */
+#endif
+	u32 parse_args;		/* Grammar/Parse time arguments */
+	u32 runtime_args;	/* Interpret time arguments */
+	u16 flags;		/* Misc flags */
+	u8 object_type;		/* Corresponding internal object type */
+	u8 class;		/* Opcode class */
+	u8 type;		/* Opcode type */
+};
+
+union acpi_parse_value {
+	acpi_integer integer;	/* Integer constant (Up to 64 bits) */
+	struct uint64_struct integer64;	/* Structure overlay for 2 32-bit Dwords */
+	u32 size;		/* bytelist or field size */
+	char *string;		/* NULL terminated string */
+	u8 *buffer;		/* buffer or string */
+	char *name;		/* NULL terminated string */
+	union acpi_parse_object *arg;	/* arguments and contained ops */
+};
+
+#ifdef ACPI_DISASSEMBLER
+#define ACPI_DISASM_ONLY_MEMBERS(a)     a;
+#else
+#define ACPI_DISASM_ONLY_MEMBERS(a)
+#endif
+
+#define ACPI_PARSE_COMMON \
+	union acpi_parse_object         *parent;        /* Parent op */\
+	u8                              descriptor_type; /* To differentiate various internal objs */\
+	u8                              flags;          /* Type of Op */\
+	u16                             aml_opcode;     /* AML opcode */\
+	u32                             aml_offset;     /* Offset of declaration in AML */\
+	union acpi_parse_object         *next;          /* Next op */\
+	struct acpi_namespace_node      *node;          /* For use by interpreter */\
+	union acpi_parse_value          value;          /* Value or args associated with the opcode */\
+	u8                              arg_list_length; /* Number of elements in the arg list */\
+	ACPI_DISASM_ONLY_MEMBERS (\
+	u8                              disasm_flags;   /* Used during AML disassembly */\
+	u8                              disasm_opcode;  /* Subtype used for disassembly */\
+	char                            aml_op_name[16])	/* Op name (debug only) */
+
+#define ACPI_DASM_BUFFER                0x00
+#define ACPI_DASM_RESOURCE              0x01
+#define ACPI_DASM_STRING                0x02
+#define ACPI_DASM_UNICODE               0x03
+#define ACPI_DASM_EISAID                0x04
+#define ACPI_DASM_MATCHOP               0x05
+#define ACPI_DASM_LNOT_PREFIX           0x06
+#define ACPI_DASM_LNOT_SUFFIX           0x07
+#define ACPI_DASM_IGNORE                0x08
+
+/*
+ * Generic operation (for example:  If, While, Store)
+ */
+struct acpi_parse_obj_common {
+ACPI_PARSE_COMMON};
+
+/*
+ * Extended Op for named ops (Scope, Method, etc.), deferred ops (Methods and op_regions),
+ * and bytelists.
+ */
+struct acpi_parse_obj_named {
+	ACPI_PARSE_COMMON u8 *path;
+	u8 *data;		/* AML body or bytelist data */
+	u32 length;		/* AML length */
+	u32 name;		/* 4-byte name or zero if no name */
+};
+
+/* This version is used by the i_aSL compiler only */
+
+#define ACPI_MAX_PARSEOP_NAME   20
+
+struct acpi_parse_obj_asl {
+	ACPI_PARSE_COMMON union acpi_parse_object *child;
+	union acpi_parse_object *parent_method;
+	char *filename;
+	char *external_name;
+	char *namepath;
+	char name_seg[4];
+	u32 extra_value;
+	u32 column;
+	u32 line_number;
+	u32 logical_line_number;
+	u32 logical_byte_offset;
+	u32 end_line;
+	u32 end_logical_line;
+	u32 acpi_btype;
+	u32 aml_length;
+	u32 aml_subtree_length;
+	u32 final_aml_length;
+	u32 final_aml_offset;
+	u32 compile_flags;
+	u16 parse_opcode;
+	u8 aml_opcode_length;
+	u8 aml_pkg_len_bytes;
+	u8 extra;
+	char parse_op_name[ACPI_MAX_PARSEOP_NAME];
+};
+
+union acpi_parse_object {
+	struct acpi_parse_obj_common common;
+	struct acpi_parse_obj_named named;
+	struct acpi_parse_obj_asl asl;
+};
+
+/*
+ * Parse state - one state per parser invocation and each control
+ * method.
+ */
+struct acpi_parse_state {
+	u8 *aml_start;		/* First AML byte */
+	u8 *aml;		/* Next AML byte */
+	u8 *aml_end;		/* (last + 1) AML byte */
+	u8 *pkg_start;		/* Current package begin */
+	u8 *pkg_end;		/* Current package end */
+	union acpi_parse_object *start_op;	/* Root of parse tree */
+	struct acpi_namespace_node *start_node;
+	union acpi_generic_state *scope;	/* Current scope */
+	union acpi_parse_object *start_scope;
+	u32 aml_size;
+};
+
+/* Parse object flags */
+
+#define ACPI_PARSEOP_GENERIC            0x01
+#define ACPI_PARSEOP_NAMED              0x02
+#define ACPI_PARSEOP_DEFERRED           0x04
+#define ACPI_PARSEOP_BYTELIST           0x08
+#define ACPI_PARSEOP_IN_STACK           0x10
+#define ACPI_PARSEOP_TARGET             0x20
+#define ACPI_PARSEOP_IN_CACHE           0x80
+
+/* Parse object disasm_flags */
+
+#define ACPI_PARSEOP_IGNORE             0x01
+#define ACPI_PARSEOP_PARAMLIST          0x02
+#define ACPI_PARSEOP_EMPTY_TERMLIST     0x04
+#define ACPI_PARSEOP_SPECIAL            0x10
+
+/*****************************************************************************
+ *
+ * Hardware (ACPI registers) and PNP
+ *
+ ****************************************************************************/
+
+struct acpi_bit_register_info {
+	u8 parent_register;
+	u8 bit_position;
+	u16 access_bit_mask;
+};
+
+/*
+ * Some ACPI registers have bits that must be ignored -- meaning that they
+ * must be preserved.
+ */
+#define ACPI_PM1_STATUS_PRESERVED_BITS          0x0800	/* Bit 11 */
+#define ACPI_PM1_CONTROL_PRESERVED_BITS         0x0200	/* Bit 9 (whatever) */
+
+/*
+ * Register IDs
+ * These are the full ACPI registers
+ */
+#define ACPI_REGISTER_PM1_STATUS                0x01
+#define ACPI_REGISTER_PM1_ENABLE                0x02
+#define ACPI_REGISTER_PM1_CONTROL               0x03
+#define ACPI_REGISTER_PM1A_CONTROL              0x04
+#define ACPI_REGISTER_PM1B_CONTROL              0x05
+#define ACPI_REGISTER_PM2_CONTROL               0x06
+#define ACPI_REGISTER_PM_TIMER                  0x07
+#define ACPI_REGISTER_PROCESSOR_BLOCK           0x08
+#define ACPI_REGISTER_SMI_COMMAND_BLOCK         0x09
+
+/* Masks used to access the bit_registers */
+
+#define ACPI_BITMASK_TIMER_STATUS               0x0001
+#define ACPI_BITMASK_BUS_MASTER_STATUS          0x0010
+#define ACPI_BITMASK_GLOBAL_LOCK_STATUS         0x0020
+#define ACPI_BITMASK_POWER_BUTTON_STATUS        0x0100
+#define ACPI_BITMASK_SLEEP_BUTTON_STATUS        0x0200
+#define ACPI_BITMASK_RT_CLOCK_STATUS            0x0400
+#define ACPI_BITMASK_PCIEXP_WAKE_STATUS         0x4000	/* ACPI 3.0 */
+#define ACPI_BITMASK_WAKE_STATUS                0x8000
+
+#define ACPI_BITMASK_ALL_FIXED_STATUS           (\
+	ACPI_BITMASK_TIMER_STATUS          | \
+	ACPI_BITMASK_BUS_MASTER_STATUS     | \
+	ACPI_BITMASK_GLOBAL_LOCK_STATUS    | \
+	ACPI_BITMASK_POWER_BUTTON_STATUS   | \
+	ACPI_BITMASK_SLEEP_BUTTON_STATUS   | \
+	ACPI_BITMASK_RT_CLOCK_STATUS       | \
+	ACPI_BITMASK_WAKE_STATUS)
+
+#define ACPI_BITMASK_TIMER_ENABLE               0x0001
+#define ACPI_BITMASK_GLOBAL_LOCK_ENABLE         0x0020
+#define ACPI_BITMASK_POWER_BUTTON_ENABLE        0x0100
+#define ACPI_BITMASK_SLEEP_BUTTON_ENABLE        0x0200
+#define ACPI_BITMASK_RT_CLOCK_ENABLE            0x0400
+#define ACPI_BITMASK_PCIEXP_WAKE_DISABLE        0x4000	/* ACPI 3.0 */
+
+#define ACPI_BITMASK_SCI_ENABLE                 0x0001
+#define ACPI_BITMASK_BUS_MASTER_RLD             0x0002
+#define ACPI_BITMASK_GLOBAL_LOCK_RELEASE        0x0004
+#define ACPI_BITMASK_SLEEP_TYPE_X               0x1C00
+#define ACPI_BITMASK_SLEEP_ENABLE               0x2000
+
+#define ACPI_BITMASK_ARB_DISABLE                0x0001
+
+/* Raw bit position of each bit_register */
+
+#define ACPI_BITPOSITION_TIMER_STATUS           0x00
+#define ACPI_BITPOSITION_BUS_MASTER_STATUS      0x04
+#define ACPI_BITPOSITION_GLOBAL_LOCK_STATUS     0x05
+#define ACPI_BITPOSITION_POWER_BUTTON_STATUS    0x08
+#define ACPI_BITPOSITION_SLEEP_BUTTON_STATUS    0x09
+#define ACPI_BITPOSITION_RT_CLOCK_STATUS        0x0A
+#define ACPI_BITPOSITION_PCIEXP_WAKE_STATUS     0x0E	/* ACPI 3.0 */
+#define ACPI_BITPOSITION_WAKE_STATUS            0x0F
+
+#define ACPI_BITPOSITION_TIMER_ENABLE           0x00
+#define ACPI_BITPOSITION_GLOBAL_LOCK_ENABLE     0x05
+#define ACPI_BITPOSITION_POWER_BUTTON_ENABLE    0x08
+#define ACPI_BITPOSITION_SLEEP_BUTTON_ENABLE    0x09
+#define ACPI_BITPOSITION_RT_CLOCK_ENABLE        0x0A
+#define ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE    0x0E	/* ACPI 3.0 */
+
+#define ACPI_BITPOSITION_SCI_ENABLE             0x00
+#define ACPI_BITPOSITION_BUS_MASTER_RLD         0x01
+#define ACPI_BITPOSITION_GLOBAL_LOCK_RELEASE    0x02
+#define ACPI_BITPOSITION_SLEEP_TYPE_X           0x0A
+#define ACPI_BITPOSITION_SLEEP_ENABLE           0x0D
+
+#define ACPI_BITPOSITION_ARB_DISABLE            0x00
+
+/*****************************************************************************
+ *
+ * Resource descriptors
+ *
+ ****************************************************************************/
+
+/* resource_type values */
+
+#define ACPI_ADDRESS_TYPE_MEMORY_RANGE          0
+#define ACPI_ADDRESS_TYPE_IO_RANGE              1
+#define ACPI_ADDRESS_TYPE_BUS_NUMBER_RANGE      2
+
+/* Resource descriptor types and masks */
+
+#define ACPI_RESOURCE_NAME_LARGE                0x80
+#define ACPI_RESOURCE_NAME_SMALL                0x00
+
+#define ACPI_RESOURCE_NAME_SMALL_MASK           0x78	/* Bits 6:3 contain the type */
+#define ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK    0x07	/* Bits 2:0 contain the length */
+#define ACPI_RESOURCE_NAME_LARGE_MASK           0x7F	/* Bits 6:0 contain the type */
+
+/*
+ * Small resource descriptor "names" as defined by the ACPI specification.
+ * Note: Bits 2:0 are used for the descriptor length
+ */
+#define ACPI_RESOURCE_NAME_IRQ                  0x20
+#define ACPI_RESOURCE_NAME_DMA                  0x28
+#define ACPI_RESOURCE_NAME_START_DEPENDENT      0x30
+#define ACPI_RESOURCE_NAME_END_DEPENDENT        0x38
+#define ACPI_RESOURCE_NAME_IO                   0x40
+#define ACPI_RESOURCE_NAME_FIXED_IO             0x48
+#define ACPI_RESOURCE_NAME_RESERVED_S1          0x50
+#define ACPI_RESOURCE_NAME_RESERVED_S2          0x58
+#define ACPI_RESOURCE_NAME_RESERVED_S3          0x60
+#define ACPI_RESOURCE_NAME_RESERVED_S4          0x68
+#define ACPI_RESOURCE_NAME_VENDOR_SMALL         0x70
+#define ACPI_RESOURCE_NAME_END_TAG              0x78
+
+/*
+ * Large resource descriptor "names" as defined by the ACPI specification.
+ * Note: includes the Large Descriptor bit in bit[7]
+ */
+#define ACPI_RESOURCE_NAME_MEMORY24             0x81
+#define ACPI_RESOURCE_NAME_GENERIC_REGISTER     0x82
+#define ACPI_RESOURCE_NAME_RESERVED_L1          0x83
+#define ACPI_RESOURCE_NAME_VENDOR_LARGE         0x84
+#define ACPI_RESOURCE_NAME_MEMORY32             0x85
+#define ACPI_RESOURCE_NAME_FIXED_MEMORY32       0x86
+#define ACPI_RESOURCE_NAME_ADDRESS32            0x87
+#define ACPI_RESOURCE_NAME_ADDRESS16            0x88
+#define ACPI_RESOURCE_NAME_EXTENDED_IRQ         0x89
+#define ACPI_RESOURCE_NAME_ADDRESS64            0x8A
+#define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64   0x8B
+#define ACPI_RESOURCE_NAME_LARGE_MAX            0x8B
+
+/*****************************************************************************
+ *
+ * Miscellaneous
+ *
+ ****************************************************************************/
+
+#define ACPI_ASCII_ZERO                 0x30
+
+/*****************************************************************************
+ *
+ * Debugger
+ *
+ ****************************************************************************/
+
+struct acpi_db_method_info {
+	acpi_handle main_thread_gate;
+	acpi_handle thread_complete_gate;
+	u32 *threads;
+	u32 num_threads;
+	u32 num_created;
+	u32 num_completed;
+
+	char *name;
+	u32 flags;
+	u32 num_loops;
+	char pathname[128];
+	char **args;
+
+	/*
+	 * Arguments to be passed to method for the command
+	 * Threads -
+	 *   the Number of threads, ID of current thread and
+	 *   Index of current thread inside all them created.
+	 */
+	char init_args;
+	char *arguments[4];
+	char num_threads_str[11];
+	char id_of_thread_str[11];
+	char index_of_thread_str[11];
+};
+
+struct acpi_integrity_info {
+	u32 nodes;
+	u32 objects;
+};
+
+#define ACPI_DB_REDIRECTABLE_OUTPUT     0x01
+#define ACPI_DB_CONSOLE_OUTPUT          0x02
+#define ACPI_DB_DUPLICATE_OUTPUT        0x03
+
+/*****************************************************************************
+ *
+ * Debug
+ *
+ ****************************************************************************/
+
+/* Entry for a memory allocation (debug only) */
+
+#define ACPI_MEM_MALLOC                 0
+#define ACPI_MEM_CALLOC                 1
+#define ACPI_MAX_MODULE_NAME            16
+
+#define ACPI_COMMON_DEBUG_MEM_HEADER \
+	struct acpi_debug_mem_block     *previous; \
+	struct acpi_debug_mem_block     *next; \
+	u32                             size; \
+	u32                             component; \
+	u32                             line; \
+	char                            module[ACPI_MAX_MODULE_NAME]; \
+	u8                              alloc_type;
+
+struct acpi_debug_mem_header {
+ACPI_COMMON_DEBUG_MEM_HEADER};
+
+struct acpi_debug_mem_block {
+	ACPI_COMMON_DEBUG_MEM_HEADER u64 user_space;
+};
+
+#define ACPI_MEM_LIST_GLOBAL            0
+#define ACPI_MEM_LIST_NSNODE            1
+#define ACPI_MEM_LIST_MAX               1
+#define ACPI_NUM_MEM_LISTS              2
+
+#endif				/* __ACLOCAL_H__ */
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
new file mode 100644
index 0000000..9c127e8
--- /dev/null
+++ b/drivers/acpi/acpica/acmacros.h
@@ -0,0 +1,577 @@
+/******************************************************************************
+ *
+ * Name: acmacros.h - C macros for the entire subsystem.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACMACROS_H__
+#define __ACMACROS_H__
+
+/*
+ * Extract data using a pointer. Any more than a byte and we
+ * get into potential aligment issues -- see the STORE macros below.
+ * Use with care.
+ */
+#define ACPI_GET8(ptr)                  *ACPI_CAST_PTR (u8, ptr)
+#define ACPI_GET16(ptr)                 *ACPI_CAST_PTR (u16, ptr)
+#define ACPI_GET32(ptr)                 *ACPI_CAST_PTR (u32, ptr)
+#define ACPI_GET64(ptr)                 *ACPI_CAST_PTR (u64, ptr)
+#define ACPI_SET8(ptr)                  *ACPI_CAST_PTR (u8, ptr)
+#define ACPI_SET16(ptr)                 *ACPI_CAST_PTR (u16, ptr)
+#define ACPI_SET32(ptr)                 *ACPI_CAST_PTR (u32, ptr)
+#define ACPI_SET64(ptr)                 *ACPI_CAST_PTR (u64, ptr)
+
+/*
+ * printf() format helpers
+ */
+
+/* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */
+
+#define ACPI_FORMAT_UINT64(i)           ACPI_HIDWORD(i), ACPI_LODWORD(i)
+
+#if ACPI_MACHINE_WIDTH == 64
+#define ACPI_FORMAT_NATIVE_UINT(i)      ACPI_FORMAT_UINT64(i)
+#else
+#define ACPI_FORMAT_NATIVE_UINT(i)      0, (i)
+#endif
+
+/*
+ * Macros for moving data around to/from buffers that are possibly unaligned.
+ * If the hardware supports the transfer of unaligned data, just do the store.
+ * Otherwise, we have to move one byte at a time.
+ */
+#ifdef ACPI_BIG_ENDIAN
+/*
+ * Macros for big-endian machines
+ */
+
+/* These macros reverse the bytes during the move, converting little-endian to big endian */
+
+			  /* Big Endian      <==        Little Endian */
+			  /*  Hi...Lo                     Lo...Hi     */
+/* 16-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_16_TO_16(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\
+					   ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];}
+
+#define ACPI_MOVE_16_TO_32(d, s)        {(*(u32 *)(void *)(d))=0;\
+							   ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
+							   ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
+
+#define ACPI_MOVE_16_TO_64(d, s)        {(*(u64 *)(void *)(d))=0;\
+									 ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
+									 ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
+
+/* 32-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_32_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
+
+#define ACPI_MOVE_32_TO_32(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\
+										 ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\
+										 ((  u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
+										 ((  u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
+
+#define ACPI_MOVE_32_TO_64(d, s)        {(*(u64 *)(void *)(d))=0;\
+										   ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\
+										   ((u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\
+										   ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
+										   ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
+
+/* 64-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_64_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
+
+#define ACPI_MOVE_64_TO_32(d, s)        ACPI_MOVE_32_TO_32(d, s)	/* Truncate to 32 */
+
+#define ACPI_MOVE_64_TO_64(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[7];\
+										 ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[6];\
+										 ((  u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[5];\
+										 ((  u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[4];\
+										 ((  u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\
+										 ((  u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\
+										 ((  u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
+										 ((  u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
+#else
+/*
+ * Macros for little-endian machines
+ */
+
+#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
+
+/* The hardware supports unaligned transfers, just do the little-endian move */
+
+/* 16-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_16_TO_16(d, s)        *(u16 *)(void *)(d) = *(u16 *)(void *)(s)
+#define ACPI_MOVE_16_TO_32(d, s)        *(u32 *)(void *)(d) = *(u16 *)(void *)(s)
+#define ACPI_MOVE_16_TO_64(d, s)        *(u64 *)(void *)(d) = *(u16 *)(void *)(s)
+
+/* 32-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_32_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
+#define ACPI_MOVE_32_TO_32(d, s)        *(u32 *)(void *)(d) = *(u32 *)(void *)(s)
+#define ACPI_MOVE_32_TO_64(d, s)        *(u64 *)(void *)(d) = *(u32 *)(void *)(s)
+
+/* 64-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_64_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
+#define ACPI_MOVE_64_TO_32(d, s)        ACPI_MOVE_32_TO_32(d, s)	/* Truncate to 32 */
+#define ACPI_MOVE_64_TO_64(d, s)        *(u64 *)(void *)(d) = *(u64 *)(void *)(s)
+
+#else
+/*
+ * The hardware does not support unaligned transfers. We must move the
+ * data one byte at a time. These macros work whether the source or
+ * the destination (or both) is/are unaligned. (Little-endian move)
+ */
+
+/* 16-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_16_TO_16(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\
+										 ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];}
+
+#define ACPI_MOVE_16_TO_32(d, s)        {(*(u32 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d, s);}
+#define ACPI_MOVE_16_TO_64(d, s)        {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d, s);}
+
+/* 32-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_32_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
+
+#define ACPI_MOVE_32_TO_32(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\
+										 ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\
+										 ((  u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\
+										 ((  u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];}
+
+#define ACPI_MOVE_32_TO_64(d, s)        {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_32_TO_32(d, s);}
+
+/* 64-bit source, 16/32/64 destination */
+
+#define ACPI_MOVE_64_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
+#define ACPI_MOVE_64_TO_32(d, s)        ACPI_MOVE_32_TO_32(d, s)	/* Truncate to 32 */
+#define ACPI_MOVE_64_TO_64(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\
+										 ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\
+										 ((  u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\
+										 ((  u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];\
+										 ((  u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[4];\
+										 ((  u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[5];\
+										 ((  u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[6];\
+										 ((  u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[7];}
+#endif
+#endif
+
+/* Macros based on machine integer width */
+
+#if ACPI_MACHINE_WIDTH == 32
+#define ACPI_MOVE_SIZE_TO_16(d, s)       ACPI_MOVE_32_TO_16(d, s)
+
+#elif ACPI_MACHINE_WIDTH == 64
+#define ACPI_MOVE_SIZE_TO_16(d, s)       ACPI_MOVE_64_TO_16(d, s)
+
+#else
+#error unknown ACPI_MACHINE_WIDTH
+#endif
+
+/*
+ * Fast power-of-two math macros for non-optimized compilers
+ */
+#define _ACPI_DIV(value, power_of2)      ((u32) ((value) >> (power_of2)))
+#define _ACPI_MUL(value, power_of2)      ((u32) ((value) << (power_of2)))
+#define _ACPI_MOD(value, divisor)        ((u32) ((value) & ((divisor) -1)))
+
+#define ACPI_DIV_2(a)                   _ACPI_DIV(a, 1)
+#define ACPI_MUL_2(a)                   _ACPI_MUL(a, 1)
+#define ACPI_MOD_2(a)                   _ACPI_MOD(a, 2)
+
+#define ACPI_DIV_4(a)                   _ACPI_DIV(a, 2)
+#define ACPI_MUL_4(a)                   _ACPI_MUL(a, 2)
+#define ACPI_MOD_4(a)                   _ACPI_MOD(a, 4)
+
+#define ACPI_DIV_8(a)                   _ACPI_DIV(a, 3)
+#define ACPI_MUL_8(a)                   _ACPI_MUL(a, 3)
+#define ACPI_MOD_8(a)                   _ACPI_MOD(a, 8)
+
+#define ACPI_DIV_16(a)                  _ACPI_DIV(a, 4)
+#define ACPI_MUL_16(a)                  _ACPI_MUL(a, 4)
+#define ACPI_MOD_16(a)                  _ACPI_MOD(a, 16)
+
+#define ACPI_DIV_32(a)                  _ACPI_DIV(a, 5)
+#define ACPI_MUL_32(a)                  _ACPI_MUL(a, 5)
+#define ACPI_MOD_32(a)                  _ACPI_MOD(a, 32)
+
+/*
+ * Rounding macros (Power of two boundaries only)
+ */
+#define ACPI_ROUND_DOWN(value, boundary)     (((acpi_size)(value)) & \
+						(~(((acpi_size) boundary)-1)))
+
+#define ACPI_ROUND_UP(value, boundary)	     ((((acpi_size)(value)) + \
+						(((acpi_size) boundary)-1)) & \
+						(~(((acpi_size) boundary)-1)))
+
+/* Note: sizeof(acpi_size) evaluates to either 4 or 8 (32- vs 64-bit mode) */
+
+#define ACPI_ROUND_DOWN_TO_32BIT(a)         ACPI_ROUND_DOWN(a, 4)
+#define ACPI_ROUND_DOWN_TO_64BIT(a)         ACPI_ROUND_DOWN(a, 8)
+#define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a)   ACPI_ROUND_DOWN(a, sizeof(acpi_size))
+
+#define ACPI_ROUND_UP_TO_32BIT(a)           ACPI_ROUND_UP(a, 4)
+#define ACPI_ROUND_UP_TO_64BIT(a)           ACPI_ROUND_UP(a, 8)
+#define ACPI_ROUND_UP_TO_NATIVE_WORD(a)     ACPI_ROUND_UP(a, sizeof(acpi_size))
+
+#define ACPI_ROUND_BITS_UP_TO_BYTES(a)      ACPI_DIV_8((a) + 7)
+#define ACPI_ROUND_BITS_DOWN_TO_BYTES(a)    ACPI_DIV_8((a))
+
+#define ACPI_ROUND_UP_TO_1K(a)              (((a) + 1023) >> 10)
+
+/* Generic (non-power-of-two) rounding */
+
+#define ACPI_ROUND_UP_TO(value, boundary)   (((value) + ((boundary)-1)) / (boundary))
+
+#define ACPI_IS_MISALIGNED(value)	    (((acpi_size) value) & (sizeof(acpi_size)-1))
+
+/*
+ * Bitmask creation
+ * Bit positions start at zero.
+ * MASK_BITS_ABOVE creates a mask starting AT the position and above
+ * MASK_BITS_BELOW creates a mask starting one bit BELOW the position
+ */
+#define ACPI_MASK_BITS_ABOVE(position)      (~((ACPI_INTEGER_MAX) << ((u32) (position))))
+#define ACPI_MASK_BITS_BELOW(position)      ((ACPI_INTEGER_MAX) << ((u32) (position)))
+
+/* Bitfields within ACPI registers */
+
+#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask)      ((val << pos) & mask)
+#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val)  reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask)
+
+#define ACPI_INSERT_BITS(target, mask, source)          target = ((target & (~(mask))) | (source & mask))
+
+/*
+ * A struct acpi_namespace_node can appear in some contexts
+ * where a pointer to a union acpi_operand_object can also
+ * appear. This macro is used to distinguish them.
+ *
+ * The "Descriptor" field is the first field in both structures.
+ */
+#define ACPI_GET_DESCRIPTOR_TYPE(d)     (((union acpi_descriptor *)(void *)(d))->common.descriptor_type)
+#define ACPI_SET_DESCRIPTOR_TYPE(d, t)  (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = t)
+
+/* Macro to test the object type */
+
+#define ACPI_GET_OBJECT_TYPE(d)         (((union acpi_operand_object *)(void *)(d))->common.type)
+
+/*
+ * Macros for the master AML opcode table
+ */
+#if defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
+#define ACPI_OP(name, Pargs, Iargs, obj_type, class, type, flags) \
+	{name, (u32)(Pargs), (u32)(Iargs), (u32)(flags), obj_type, class, type}
+#else
+#define ACPI_OP(name, Pargs, Iargs, obj_type, class, type, flags) \
+	{(u32)(Pargs), (u32)(Iargs), (u32)(flags), obj_type, class, type}
+#endif
+
+#define ARG_TYPE_WIDTH                  5
+#define ARG_1(x)                        ((u32)(x))
+#define ARG_2(x)                        ((u32)(x) << (1 * ARG_TYPE_WIDTH))
+#define ARG_3(x)                        ((u32)(x) << (2 * ARG_TYPE_WIDTH))
+#define ARG_4(x)                        ((u32)(x) << (3 * ARG_TYPE_WIDTH))
+#define ARG_5(x)                        ((u32)(x) << (4 * ARG_TYPE_WIDTH))
+#define ARG_6(x)                        ((u32)(x) << (5 * ARG_TYPE_WIDTH))
+
+#define ARGI_LIST1(a)                   (ARG_1(a))
+#define ARGI_LIST2(a, b)                (ARG_1(b)|ARG_2(a))
+#define ARGI_LIST3(a, b, c)             (ARG_1(c)|ARG_2(b)|ARG_3(a))
+#define ARGI_LIST4(a, b, c, d)          (ARG_1(d)|ARG_2(c)|ARG_3(b)|ARG_4(a))
+#define ARGI_LIST5(a, b, c, d, e)       (ARG_1(e)|ARG_2(d)|ARG_3(c)|ARG_4(b)|ARG_5(a))
+#define ARGI_LIST6(a, b, c, d, e, f)    (ARG_1(f)|ARG_2(e)|ARG_3(d)|ARG_4(c)|ARG_5(b)|ARG_6(a))
+
+#define ARGP_LIST1(a)                   (ARG_1(a))
+#define ARGP_LIST2(a, b)                (ARG_1(a)|ARG_2(b))
+#define ARGP_LIST3(a, b, c)             (ARG_1(a)|ARG_2(b)|ARG_3(c))
+#define ARGP_LIST4(a, b, c, d)          (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d))
+#define ARGP_LIST5(a, b, c, d, e)       (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e))
+#define ARGP_LIST6(a, b, c, d, e, f)    (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)|ARG_6(f))
+
+#define GET_CURRENT_ARG_TYPE(list)      (list & ((u32) 0x1F))
+#define INCREMENT_ARG_LIST(list)        (list >>= ((u32) ARG_TYPE_WIDTH))
+
+/*
+ * Ascii error messages can be configured out
+ */
+#ifndef ACPI_NO_ERROR_MESSAGES
+
+/*
+ * Error reporting. Callers module and line number are inserted by AE_INFO,
+ * the plist contains a set of parens to allow variable-length lists.
+ * These macros are used for both the debug and non-debug versions of the code.
+ */
+#define ACPI_ERROR_NAMESPACE(s, e)      acpi_ns_report_error (AE_INFO, s, e);
+#define ACPI_ERROR_METHOD(s, n, p, e)   acpi_ns_report_method_error (AE_INFO, s, n, p, e);
+
+#else
+
+/* No error messages */
+
+#define ACPI_ERROR_NAMESPACE(s, e)
+#define ACPI_ERROR_METHOD(s, n, p, e)
+#endif		/* ACPI_NO_ERROR_MESSAGES */
+
+/*
+ * Debug macros that are conditionally compiled
+ */
+#ifdef ACPI_DEBUG_OUTPUT
+
+/*
+ * Function entry tracing
+ */
+#ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE
+
+#define ACPI_FUNCTION_TRACE(a)          ACPI_FUNCTION_NAME(a) \
+			  acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
+#define ACPI_FUNCTION_TRACE_PTR(a, b)   ACPI_FUNCTION_NAME(a) \
+					   acpi_ut_trace_ptr(ACPI_DEBUG_PARAMETERS, (void *)b)
+#define ACPI_FUNCTION_TRACE_U32(a, b)   ACPI_FUNCTION_NAME(a) \
+							 acpi_ut_trace_u32(ACPI_DEBUG_PARAMETERS, (u32)b)
+#define ACPI_FUNCTION_TRACE_STR(a, b)   ACPI_FUNCTION_NAME(a) \
+									  acpi_ut_trace_str(ACPI_DEBUG_PARAMETERS, (char *)b)
+
+#define ACPI_FUNCTION_ENTRY()           acpi_ut_track_stack_ptr()
+
+/*
+ * Function exit tracing.
+ * WARNING: These macros include a return statement. This is usually considered
+ * bad form, but having a separate exit macro is very ugly and difficult to maintain.
+ * One of the FUNCTION_TRACE macros above must be used in conjunction with these macros
+ * so that "_AcpiFunctionName" is defined.
+ *
+ * Note: the DO_WHILE0 macro is used to prevent some compilers from complaining
+ * about these constructs.
+ */
+#ifdef ACPI_USE_DO_WHILE_0
+#define ACPI_DO_WHILE0(a)               do a while(0)
+#else
+#define ACPI_DO_WHILE0(a)               a
+#endif
+
+#define return_VOID                     ACPI_DO_WHILE0 ({ \
+											acpi_ut_exit (ACPI_DEBUG_PARAMETERS); \
+											return;})
+/*
+ * There are two versions of most of the return macros. The default version is
+ * safer, since it avoids side-effects by guaranteeing that the argument will
+ * not be evaluated twice.
+ *
+ * A less-safe version of the macros is provided for optional use if the
+ * compiler uses excessive CPU stack (for example, this may happen in the
+ * debug case if code optimzation is disabled.)
+ */
+#ifndef ACPI_SIMPLE_RETURN_MACROS
+
+#define return_ACPI_STATUS(s)           ACPI_DO_WHILE0 ({ \
+											register acpi_status _s = (s); \
+											acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, _s); \
+											return (_s); })
+#define return_PTR(s)                   ACPI_DO_WHILE0 ({ \
+											register void *_s = (void *) (s); \
+											acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) _s); \
+											return (_s); })
+#define return_VALUE(s)                 ACPI_DO_WHILE0 ({ \
+											register acpi_integer _s = (s); \
+											acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, _s); \
+											return (_s); })
+#define return_UINT8(s)                 ACPI_DO_WHILE0 ({ \
+											register u8 _s = (u8) (s); \
+											acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (acpi_integer) _s); \
+											return (_s); })
+#define return_UINT32(s)                ACPI_DO_WHILE0 ({ \
+											register u32 _s = (u32) (s); \
+											acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (acpi_integer) _s); \
+											return (_s); })
+#else				/* Use original less-safe macros */
+
+#define return_ACPI_STATUS(s)           ACPI_DO_WHILE0 ({ \
+											acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, (s)); \
+											return((s)); })
+#define return_PTR(s)                   ACPI_DO_WHILE0 ({ \
+											acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) (s)); \
+											return((s)); })
+#define return_VALUE(s)                 ACPI_DO_WHILE0 ({ \
+											acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (acpi_integer) (s)); \
+											return((s)); })
+#define return_UINT8(s)                 return_VALUE(s)
+#define return_UINT32(s)                return_VALUE(s)
+
+#endif				/* ACPI_SIMPLE_RETURN_MACROS */
+
+#else /* !CONFIG_ACPI_DEBUG_FUNC_TRACE */
+
+#define ACPI_FUNCTION_TRACE(a)
+#define ACPI_FUNCTION_TRACE_PTR(a,b)
+#define ACPI_FUNCTION_TRACE_U32(a,b)
+#define ACPI_FUNCTION_TRACE_STR(a,b)
+#define ACPI_FUNCTION_EXIT
+#define ACPI_FUNCTION_STATUS_EXIT(s)
+#define ACPI_FUNCTION_VALUE_EXIT(s)
+#define ACPI_FUNCTION_TRACE(a)
+#define ACPI_FUNCTION_ENTRY()
+
+#define return_VOID                     return
+#define return_ACPI_STATUS(s)           return(s)
+#define return_VALUE(s)                 return(s)
+#define return_UINT8(s)                 return(s)
+#define return_UINT32(s)                return(s)
+#define return_PTR(s)                   return(s)
+
+#endif /* CONFIG_ACPI_DEBUG_FUNC_TRACE */
+
+/* Conditional execution */
+
+#define ACPI_DEBUG_EXEC(a)              a
+#define ACPI_NORMAL_EXEC(a)
+
+#define ACPI_DEBUG_DEFINE(a)            a;
+#define ACPI_DEBUG_ONLY_MEMBERS(a)      a;
+#define _VERBOSE_STRUCTURES
+
+/* Stack and buffer dumping */
+
+#define ACPI_DUMP_STACK_ENTRY(a)        acpi_ex_dump_operand((a), 0)
+#define ACPI_DUMP_OPERANDS(a, b, c)	acpi_ex_dump_operands(a, b, c)
+
+#define ACPI_DUMP_ENTRY(a, b)           acpi_ns_dump_entry (a, b)
+#define ACPI_DUMP_PATHNAME(a, b, c, d)  acpi_ns_dump_pathname(a, b, c, d)
+#define ACPI_DUMP_RESOURCE_LIST(a)      acpi_rs_dump_resource_list(a)
+#define ACPI_DUMP_BUFFER(a, b)          acpi_ut_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
+
+#else
+/*
+ * This is the non-debug case -- make everything go away,
+ * leaving no executable debug code!
+ */
+#define ACPI_DEBUG_EXEC(a)
+#define ACPI_NORMAL_EXEC(a)             a;
+
+#define ACPI_DEBUG_DEFINE(a)		do { } while(0)
+#define ACPI_DEBUG_ONLY_MEMBERS(a)	do { } while(0)
+#define ACPI_FUNCTION_TRACE(a)		do { } while(0)
+#define ACPI_FUNCTION_TRACE_PTR(a, b)	do { } while(0)
+#define ACPI_FUNCTION_TRACE_U32(a, b)	do { } while(0)
+#define ACPI_FUNCTION_TRACE_STR(a, b)	do { } while(0)
+#define ACPI_FUNCTION_EXIT		do { } while(0)
+#define ACPI_FUNCTION_STATUS_EXIT(s)	do { } while(0)
+#define ACPI_FUNCTION_VALUE_EXIT(s)	do { } while(0)
+#define ACPI_FUNCTION_ENTRY()		do { } while(0)
+#define ACPI_DUMP_STACK_ENTRY(a)	do { } while(0)
+#define ACPI_DUMP_OPERANDS(a, b, c)     do { } while(0)
+#define ACPI_DUMP_ENTRY(a, b)		do { } while(0)
+#define ACPI_DUMP_TABLES(a, b)		do { } while(0)
+#define ACPI_DUMP_PATHNAME(a, b, c, d)	do { } while(0)
+#define ACPI_DUMP_RESOURCE_LIST(a)	do { } while(0)
+#define ACPI_DUMP_BUFFER(a, b)		do { } while(0)
+
+#define return_VOID                     return
+#define return_ACPI_STATUS(s)           return(s)
+#define return_VALUE(s)                 return(s)
+#define return_UINT8(s)                 return(s)
+#define return_UINT32(s)                return(s)
+#define return_PTR(s)                   return(s)
+
+#endif				/* ACPI_DEBUG_OUTPUT */
+
+/*
+ * Some code only gets executed when the debugger is built in.
+ * Note that this is entirely independent of whether the
+ * DEBUG_PRINT stuff (set by ACPI_DEBUG_OUTPUT) is on, or not.
+ */
+#ifdef ACPI_DEBUGGER
+#define ACPI_DEBUGGER_EXEC(a)           a
+#else
+#define ACPI_DEBUGGER_EXEC(a)
+#endif
+
+#ifdef ACPI_DEBUG_OUTPUT
+/*
+ * 1) Set name to blanks
+ * 2) Copy the object name
+ */
+#define ACPI_ADD_OBJECT_NAME(a,b)       ACPI_MEMSET (a->common.name, ' ', sizeof (a->common.name));\
+										ACPI_STRNCPY (a->common.name, acpi_gbl_ns_type_names[b], sizeof (a->common.name))
+#else
+
+#define ACPI_ADD_OBJECT_NAME(a,b)
+#endif
+
+/*
+ * Memory allocation tracking (DEBUG ONLY)
+ */
+#define ACPI_MEM_PARAMETERS         _COMPONENT, _acpi_module_name, __LINE__
+
+#ifndef ACPI_DBG_TRACK_ALLOCATIONS
+
+/* Memory allocation */
+
+#ifndef ACPI_ALLOCATE
+#define ACPI_ALLOCATE(a)            acpi_ut_allocate((acpi_size)(a), ACPI_MEM_PARAMETERS)
+#endif
+#ifndef ACPI_ALLOCATE_ZEROED
+#define ACPI_ALLOCATE_ZEROED(a)     acpi_ut_allocate_zeroed((acpi_size)(a), ACPI_MEM_PARAMETERS)
+#endif
+#ifndef ACPI_FREE
+#define ACPI_FREE(a)                acpio_os_free(a)
+#endif
+#define ACPI_MEM_TRACKING(a)
+
+#else
+
+/* Memory allocation */
+
+#define ACPI_ALLOCATE(a)            acpi_ut_allocate_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS)
+#define ACPI_ALLOCATE_ZEROED(a)     acpi_ut_allocate_zeroed_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS)
+#define ACPI_FREE(a)                acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS)
+#define ACPI_MEM_TRACKING(a)        a
+
+#endif				/* ACPI_DBG_TRACK_ALLOCATIONS */
+
+/* Preemption point */
+#ifndef ACPI_PREEMPTION_POINT
+#define ACPI_PREEMPTION_POINT() /* no preemption */
+#endif
+
+#endif				/* ACMACROS_H */
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
new file mode 100644
index 0000000..46cb5b4
--- /dev/null
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -0,0 +1,324 @@
+/******************************************************************************
+ *
+ * Name: acnamesp.h - Namespace subcomponent prototypes and defines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACNAMESP_H__
+#define __ACNAMESP_H__
+
+/* To search the entire name space, pass this as search_base */
+
+#define ACPI_NS_ALL                 ((acpi_handle)0)
+
+/*
+ * Elements of acpi_ns_properties are bit significant
+ * and should be one-to-one with values of acpi_object_type
+ */
+#define ACPI_NS_NORMAL              0
+#define ACPI_NS_NEWSCOPE            1	/* a definition of this type opens a name scope */
+#define ACPI_NS_LOCAL               2	/* suppress search of enclosing scopes */
+
+/* Flags for acpi_ns_lookup, acpi_ns_search_and_enter */
+
+#define ACPI_NS_NO_UPSEARCH         0
+#define ACPI_NS_SEARCH_PARENT       0x01
+#define ACPI_NS_DONT_OPEN_SCOPE     0x02
+#define ACPI_NS_NO_PEER_SEARCH      0x04
+#define ACPI_NS_ERROR_IF_FOUND      0x08
+#define ACPI_NS_PREFIX_IS_SCOPE     0x10
+#define ACPI_NS_EXTERNAL            0x20
+#define ACPI_NS_TEMPORARY           0x40
+
+/* Flags for acpi_ns_walk_namespace */
+
+#define ACPI_NS_WALK_NO_UNLOCK      0
+#define ACPI_NS_WALK_UNLOCK         0x01
+#define ACPI_NS_WALK_TEMP_NODES     0x02
+
+/*
+ * nsinit - Namespace initialization
+ */
+acpi_status acpi_ns_initialize_objects(void);
+
+acpi_status acpi_ns_initialize_devices(void);
+
+/*
+ * nsload -  Namespace loading
+ */
+acpi_status acpi_ns_load_namespace(void);
+
+acpi_status
+acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node);
+
+/*
+ * nswalk - walk the namespace
+ */
+acpi_status
+acpi_ns_walk_namespace(acpi_object_type type,
+		       acpi_handle start_object,
+		       u32 max_depth,
+		       u32 flags,
+		       acpi_walk_callback user_function,
+		       void *context, void **return_value);
+
+struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
+						  *parent, struct acpi_namespace_node
+						  *child);
+
+/*
+ * nsparse - table parsing
+ */
+acpi_status
+acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node);
+
+acpi_status
+acpi_ns_one_complete_parse(u32 pass_number,
+			   u32 table_index,
+			   struct acpi_namespace_node *start_node);
+
+/*
+ * nsaccess - Top-level namespace access
+ */
+acpi_status acpi_ns_root_initialize(void);
+
+acpi_status
+acpi_ns_lookup(union acpi_generic_state *scope_info,
+	       char *name,
+	       acpi_object_type type,
+	       acpi_interpreter_mode interpreter_mode,
+	       u32 flags,
+	       struct acpi_walk_state *walk_state,
+	       struct acpi_namespace_node **ret_node);
+
+/*
+ * nsalloc - Named object allocation/deallocation
+ */
+struct acpi_namespace_node *acpi_ns_create_node(u32 name);
+
+void acpi_ns_delete_node(struct acpi_namespace_node *node);
+
+void
+acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_handle);
+
+void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id);
+
+void acpi_ns_detach_object(struct acpi_namespace_node *node);
+
+void acpi_ns_delete_children(struct acpi_namespace_node *parent);
+
+int acpi_ns_compare_names(char *name1, char *name2);
+
+/*
+ * nsdump - Namespace dump/print utilities
+ */
+#ifdef	ACPI_FUTURE_USAGE
+void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth);
+#endif				/* ACPI_FUTURE_USAGE */
+
+void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level);
+
+void
+acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component);
+
+void acpi_ns_print_pathname(u32 num_segments, char *pathname);
+
+acpi_status
+acpi_ns_dump_one_object(acpi_handle obj_handle,
+			u32 level, void *context, void **return_value);
+
+#ifdef	ACPI_FUTURE_USAGE
+void
+acpi_ns_dump_objects(acpi_object_type type,
+		     u8 display_type,
+		     u32 max_depth,
+		     acpi_owner_id owner_id, acpi_handle start_handle);
+#endif				/* ACPI_FUTURE_USAGE */
+
+/*
+ * nseval - Namespace evaluation functions
+ */
+acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info);
+
+/*
+ * nspredef - Support for predefined/reserved names
+ */
+acpi_status
+acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
+			       u32 user_param_count,
+			       acpi_status return_status,
+			       union acpi_operand_object **return_object);
+
+const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
+								    acpi_namespace_node
+								    *node);
+
+void
+acpi_ns_check_parameter_count(char *pathname,
+			      struct acpi_namespace_node *node,
+			      u32 user_param_count,
+			      const union acpi_predefined_info *info);
+
+/*
+ * nsnames - Name and Scope manipulation
+ */
+u32 acpi_ns_opens_scope(acpi_object_type type);
+
+acpi_status
+acpi_ns_build_external_path(struct acpi_namespace_node *node,
+			    acpi_size size, char *name_buffer);
+
+char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node);
+
+char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ns_handle_to_pathname(acpi_handle target_handle,
+			   struct acpi_buffer *buffer);
+
+u8
+acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for);
+
+acpi_status
+acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
+		 const char *external_pathname,
+		 u32 flags, struct acpi_namespace_node **out_node);
+
+acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node);
+
+/*
+ * nsobject - Object management for namespace nodes
+ */
+acpi_status
+acpi_ns_attach_object(struct acpi_namespace_node *node,
+		      union acpi_operand_object *object, acpi_object_type type);
+
+union acpi_operand_object *acpi_ns_get_attached_object(struct
+						       acpi_namespace_node
+						       *node);
+
+union acpi_operand_object *acpi_ns_get_secondary_object(union
+							acpi_operand_object
+							*obj_desc);
+
+acpi_status
+acpi_ns_attach_data(struct acpi_namespace_node *node,
+		    acpi_object_handler handler, void *data);
+
+acpi_status
+acpi_ns_detach_data(struct acpi_namespace_node *node,
+		    acpi_object_handler handler);
+
+acpi_status
+acpi_ns_get_attached_data(struct acpi_namespace_node *node,
+			  acpi_object_handler handler, void **data);
+
+/*
+ * nssearch - Namespace searching and entry
+ */
+acpi_status
+acpi_ns_search_and_enter(u32 entry_name,
+			 struct acpi_walk_state *walk_state,
+			 struct acpi_namespace_node *node,
+			 acpi_interpreter_mode interpreter_mode,
+			 acpi_object_type type,
+			 u32 flags, struct acpi_namespace_node **ret_node);
+
+acpi_status
+acpi_ns_search_one_scope(u32 entry_name,
+			 struct acpi_namespace_node *node,
+			 acpi_object_type type,
+			 struct acpi_namespace_node **ret_node);
+
+void
+acpi_ns_install_node(struct acpi_walk_state *walk_state,
+		     struct acpi_namespace_node *parent_node,
+		     struct acpi_namespace_node *node, acpi_object_type type);
+
+/*
+ * nsutils - Utility functions
+ */
+u8 acpi_ns_valid_root_prefix(char prefix);
+
+acpi_object_type acpi_ns_get_type(struct acpi_namespace_node *node);
+
+u32 acpi_ns_local(acpi_object_type type);
+
+void
+acpi_ns_report_error(const char *module_name,
+		     u32 line_number,
+		     const char *internal_name, acpi_status lookup_status);
+
+void
+acpi_ns_report_method_error(const char *module_name,
+			    u32 line_number,
+			    const char *message,
+			    struct acpi_namespace_node *node,
+			    const char *path, acpi_status lookup_status);
+
+void
+acpi_ns_print_node_pathname(struct acpi_namespace_node *node, const char *msg);
+
+acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info);
+
+void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info);
+
+acpi_status
+acpi_ns_internalize_name(const char *dotted_name, char **converted_name);
+
+acpi_status
+acpi_ns_externalize_name(u32 internal_name_length,
+			 const char *internal_name,
+			 u32 * converted_name_length, char **converted_name);
+
+struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle);
+
+acpi_handle acpi_ns_convert_entry_to_handle(struct acpi_namespace_node *node);
+
+void acpi_ns_terminate(void);
+
+struct acpi_namespace_node *acpi_ns_get_parent_node(struct acpi_namespace_node
+						    *node);
+
+struct acpi_namespace_node *acpi_ns_get_next_valid_node(struct
+							acpi_namespace_node
+							*node);
+
+#endif				/* __ACNAMESP_H__ */
diff --git a/include/acpi/acobject.h b/drivers/acpi/acpica/acobject.h
similarity index 100%
rename from include/acpi/acobject.h
rename to drivers/acpi/acpica/acobject.h
diff --git a/include/acpi/acopcode.h b/drivers/acpi/acpica/acopcode.h
similarity index 100%
rename from include/acpi/acopcode.h
rename to drivers/acpi/acpica/acopcode.h
diff --git a/include/acpi/acparser.h b/drivers/acpi/acpica/acparser.h
similarity index 100%
rename from include/acpi/acparser.h
rename to drivers/acpi/acpica/acparser.h
diff --git a/include/acpi/acpredef.h b/drivers/acpi/acpica/acpredef.h
similarity index 100%
rename from include/acpi/acpredef.h
rename to drivers/acpi/acpica/acpredef.h
diff --git a/include/acpi/acresrc.h b/drivers/acpi/acpica/acresrc.h
similarity index 100%
rename from include/acpi/acresrc.h
rename to drivers/acpi/acpica/acresrc.h
diff --git a/include/acpi/acstruct.h b/drivers/acpi/acpica/acstruct.h
similarity index 100%
rename from include/acpi/acstruct.h
rename to drivers/acpi/acpica/acstruct.h
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
new file mode 100644
index 0000000..7ce6e33
--- /dev/null
+++ b/drivers/acpi/acpica/actables.h
@@ -0,0 +1,117 @@
+/******************************************************************************
+ *
+ * Name: actables.h - ACPI table management
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACTABLES_H__
+#define __ACTABLES_H__
+
+acpi_status acpi_allocate_root_table(u32 initial_table_count);
+
+/*
+ * tbfadt - FADT parse/convert/validate
+ */
+void acpi_tb_parse_fadt(u32 table_index, u8 flags);
+
+void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
+
+/*
+ * tbfind - find ACPI table
+ */
+acpi_status
+acpi_tb_find_table(char *signature,
+		   char *oem_id, char *oem_table_id, u32 *table_index);
+
+/*
+ * tbinstal - Table removal and deletion
+ */
+acpi_status acpi_tb_resize_root_table_list(void);
+
+acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc);
+
+acpi_status
+acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index);
+
+acpi_status
+acpi_tb_store_table(acpi_physical_address address,
+		    struct acpi_table_header *table,
+		    u32 length, u8 flags, u32 *table_index);
+
+void acpi_tb_delete_table(struct acpi_table_desc *table_desc);
+
+void acpi_tb_terminate(void);
+
+void acpi_tb_delete_namespace_by_owner(u32 table_index);
+
+acpi_status acpi_tb_allocate_owner_id(u32 table_index);
+
+acpi_status acpi_tb_release_owner_id(u32 table_index);
+
+acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id);
+
+u8 acpi_tb_is_table_loaded(u32 table_index);
+
+void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded);
+
+/*
+ * tbutils - table manager utilities
+ */
+acpi_status acpi_tb_initialize_facs(void);
+
+u8 acpi_tb_tables_loaded(void);
+
+void
+acpi_tb_print_table_header(acpi_physical_address address,
+			   struct acpi_table_header *header);
+
+u8 acpi_tb_checksum(u8 *buffer, u32 length);
+
+acpi_status
+acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length);
+
+void
+acpi_tb_install_table(acpi_physical_address address,
+		      u8 flags, char *signature, u32 table_index);
+
+acpi_status
+acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags);
+
+#endif				/* __ACTABLES_H__ */
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
new file mode 100644
index 0000000..80d8813
--- /dev/null
+++ b/drivers/acpi/acpica/acutils.h
@@ -0,0 +1,549 @@
+/******************************************************************************
+ *
+ * Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef _ACUTILS_H
+#define _ACUTILS_H
+
+extern const u8 acpi_gbl_resource_aml_sizes[];
+
+/* Strings used by the disassembler and debugger resource dump routines */
+
+#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+
+extern const char *acpi_gbl_bm_decode[];
+extern const char *acpi_gbl_config_decode[];
+extern const char *acpi_gbl_consume_decode[];
+extern const char *acpi_gbl_dec_decode[];
+extern const char *acpi_gbl_he_decode[];
+extern const char *acpi_gbl_io_decode[];
+extern const char *acpi_gbl_ll_decode[];
+extern const char *acpi_gbl_max_decode[];
+extern const char *acpi_gbl_mem_decode[];
+extern const char *acpi_gbl_min_decode[];
+extern const char *acpi_gbl_mtp_decode[];
+extern const char *acpi_gbl_rng_decode[];
+extern const char *acpi_gbl_rw_decode[];
+extern const char *acpi_gbl_shr_decode[];
+extern const char *acpi_gbl_siz_decode[];
+extern const char *acpi_gbl_trs_decode[];
+extern const char *acpi_gbl_ttp_decode[];
+extern const char *acpi_gbl_typ_decode[];
+#endif
+
+/* Types for Resource descriptor entries */
+
+#define ACPI_INVALID_RESOURCE           0
+#define ACPI_FIXED_LENGTH               1
+#define ACPI_VARIABLE_LENGTH            2
+#define ACPI_SMALL_VARIABLE_LENGTH      3
+
+typedef
+acpi_status(*acpi_walk_aml_callback) (u8 * aml,
+				      u32 length,
+				      u32 offset,
+				      u8 resource_index, void **context);
+
+typedef
+acpi_status(*acpi_pkg_callback) (u8 object_type,
+				 union acpi_operand_object * source_object,
+				 union acpi_generic_state * state,
+				 void *context);
+
+struct acpi_pkg_info {
+	u8 *free_space;
+	acpi_size length;
+	u32 object_space;
+	u32 num_packages;
+};
+
+#define REF_INCREMENT       (u16) 0
+#define REF_DECREMENT       (u16) 1
+#define REF_FORCE_DELETE    (u16) 2
+
+/* acpi_ut_dump_buffer */
+
+#define DB_BYTE_DISPLAY     1
+#define DB_WORD_DISPLAY     2
+#define DB_DWORD_DISPLAY    4
+#define DB_QWORD_DISPLAY    8
+
+/*
+ * utglobal - Global data structures and procedures
+ */
+acpi_status acpi_ut_init_globals(void);
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+char *acpi_ut_get_mutex_name(u32 mutex_id);
+
+const char *acpi_ut_get_notify_name(u32 notify_value);
+
+#endif
+
+char *acpi_ut_get_type_name(acpi_object_type type);
+
+char *acpi_ut_get_node_name(void *object);
+
+char *acpi_ut_get_descriptor_name(void *object);
+
+const char *acpi_ut_get_reference_name(union acpi_operand_object *object);
+
+char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc);
+
+char *acpi_ut_get_region_name(u8 space_id);
+
+char *acpi_ut_get_event_name(u32 event_id);
+
+char acpi_ut_hex_to_ascii_char(acpi_integer integer, u32 position);
+
+u8 acpi_ut_valid_object_type(acpi_object_type type);
+
+/*
+ * utinit - miscellaneous initialization and shutdown
+ */
+acpi_status acpi_ut_hardware_initialize(void);
+
+void acpi_ut_subsystem_shutdown(void);
+
+/*
+ * utclib - Local implementations of C library functions
+ */
+#ifndef ACPI_USE_SYSTEM_CLIBRARY
+
+acpi_size acpi_ut_strlen(const char *string);
+
+char *acpi_ut_strcpy(char *dst_string, const char *src_string);
+
+char *acpi_ut_strncpy(char *dst_string,
+		      const char *src_string, acpi_size count);
+
+int acpi_ut_memcmp(const char *buffer1, const char *buffer2, acpi_size count);
+
+int acpi_ut_strncmp(const char *string1, const char *string2, acpi_size count);
+
+int acpi_ut_strcmp(const char *string1, const char *string2);
+
+char *acpi_ut_strcat(char *dst_string, const char *src_string);
+
+char *acpi_ut_strncat(char *dst_string,
+		      const char *src_string, acpi_size count);
+
+u32 acpi_ut_strtoul(const char *string, char **terminator, u32 base);
+
+char *acpi_ut_strstr(char *string1, char *string2);
+
+void *acpi_ut_memcpy(void *dest, const void *src, acpi_size count);
+
+void *acpi_ut_memset(void *dest, u8 value, acpi_size count);
+
+int acpi_ut_to_upper(int c);
+
+int acpi_ut_to_lower(int c);
+
+extern const u8 _acpi_ctype[];
+
+#define _ACPI_XA     0x00	/* extra alphabetic - not supported */
+#define _ACPI_XS     0x40	/* extra space */
+#define _ACPI_BB     0x00	/* BEL, BS, etc. - not supported */
+#define _ACPI_CN     0x20	/* CR, FF, HT, NL, VT */
+#define _ACPI_DI     0x04	/* '0'-'9' */
+#define _ACPI_LO     0x02	/* 'a'-'z' */
+#define _ACPI_PU     0x10	/* punctuation */
+#define _ACPI_SP     0x08	/* space */
+#define _ACPI_UP     0x01	/* 'A'-'Z' */
+#define _ACPI_XD     0x80	/* '0'-'9', 'A'-'F', 'a'-'f' */
+
+#define ACPI_IS_DIGIT(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_DI))
+#define ACPI_IS_SPACE(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_SP))
+#define ACPI_IS_XDIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_XD))
+#define ACPI_IS_UPPER(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_UP))
+#define ACPI_IS_LOWER(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO))
+#define ACPI_IS_PRINT(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU))
+#define ACPI_IS_ALPHA(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP))
+
+#endif				/* ACPI_USE_SYSTEM_CLIBRARY */
+
+/*
+ * utcopy - Object construction and conversion interfaces
+ */
+acpi_status
+acpi_ut_build_simple_object(union acpi_operand_object *obj,
+			    union acpi_object *user_obj,
+			    u8 * data_space, u32 * buffer_space_used);
+
+acpi_status
+acpi_ut_build_package_object(union acpi_operand_object *obj,
+			     u8 * buffer, u32 * space_used);
+
+acpi_status
+acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *obj,
+				struct acpi_buffer *ret_buffer);
+
+acpi_status
+acpi_ut_copy_eobject_to_iobject(union acpi_object *obj,
+				union acpi_operand_object **internal_obj);
+
+acpi_status
+acpi_ut_copy_isimple_to_isimple(union acpi_operand_object *source_obj,
+				union acpi_operand_object *dest_obj);
+
+acpi_status
+acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
+				union acpi_operand_object **dest_desc,
+				struct acpi_walk_state *walk_state);
+
+/*
+ * utcreate - Object creation
+ */
+acpi_status
+acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action);
+
+/*
+ * utdebug - Debug interfaces
+ */
+void acpi_ut_init_stack_ptr_trace(void);
+
+void acpi_ut_track_stack_ptr(void);
+
+void
+acpi_ut_trace(u32 line_number,
+	      const char *function_name,
+	      const char *module_name, u32 component_id);
+
+void
+acpi_ut_trace_ptr(u32 line_number,
+		  const char *function_name,
+		  const char *module_name, u32 component_id, void *pointer);
+
+void
+acpi_ut_trace_u32(u32 line_number,
+		  const char *function_name,
+		  const char *module_name, u32 component_id, u32 integer);
+
+void
+acpi_ut_trace_str(u32 line_number,
+		  const char *function_name,
+		  const char *module_name, u32 component_id, char *string);
+
+void
+acpi_ut_exit(u32 line_number,
+	     const char *function_name,
+	     const char *module_name, u32 component_id);
+
+void
+acpi_ut_status_exit(u32 line_number,
+		    const char *function_name,
+		    const char *module_name,
+		    u32 component_id, acpi_status status);
+
+void
+acpi_ut_value_exit(u32 line_number,
+		   const char *function_name,
+		   const char *module_name,
+		   u32 component_id, acpi_integer value);
+
+void
+acpi_ut_ptr_exit(u32 line_number,
+		 const char *function_name,
+		 const char *module_name, u32 component_id, u8 *ptr);
+
+void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id);
+
+void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display);
+
+void acpi_ut_report_error(char *module_name, u32 line_number);
+
+void acpi_ut_report_info(char *module_name, u32 line_number);
+
+void acpi_ut_report_warning(char *module_name, u32 line_number);
+
+/*
+ * utdelete - Object deletion and reference counts
+ */
+void acpi_ut_add_reference(union acpi_operand_object *object);
+
+void acpi_ut_remove_reference(union acpi_operand_object *object);
+
+void acpi_ut_delete_internal_package_object(union acpi_operand_object *object);
+
+void acpi_ut_delete_internal_simple_object(union acpi_operand_object *object);
+
+void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list);
+
+/*
+ * uteval - object evaluation
+ */
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
+
+acpi_status
+acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
+			char *path,
+			u32 expected_return_btypes,
+			union acpi_operand_object **return_desc);
+
+acpi_status
+acpi_ut_evaluate_numeric_object(char *object_name,
+				struct acpi_namespace_node *device_node,
+				acpi_integer * address);
+
+acpi_status
+acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
+		    struct acpica_device_id *hid);
+
+acpi_status
+acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
+		    struct acpi_compatible_id_list **return_cid_list);
+
+acpi_status
+acpi_ut_execute_STA(struct acpi_namespace_node *device_node,
+		    u32 * status_flags);
+
+acpi_status
+acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
+		    struct acpica_device_id *uid);
+
+acpi_status
+acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest);
+
+/*
+ * utobject - internal object create/delete/cache routines
+ */
+union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char
+							      *module_name,
+							      u32 line_number,
+							      u32 component_id,
+							      acpi_object_type
+							      type);
+
+void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
+				       u32 line_number, u32 component_id);
+
+#define acpi_ut_create_internal_object(t) acpi_ut_create_internal_object_dbg (_acpi_module_name,__LINE__,_COMPONENT,t)
+#define acpi_ut_allocate_object_desc()  acpi_ut_allocate_object_desc_dbg (_acpi_module_name,__LINE__,_COMPONENT)
+
+void acpi_ut_delete_object_desc(union acpi_operand_object *object);
+
+u8 acpi_ut_valid_internal_object(void *object);
+
+union acpi_operand_object *acpi_ut_create_package_object(u32 count);
+
+union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size);
+
+union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size);
+
+acpi_status
+acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size * obj_length);
+
+/*
+ * utstate - Generic state creation/cache routines
+ */
+void
+acpi_ut_push_generic_state(union acpi_generic_state **list_head,
+			   union acpi_generic_state *state);
+
+union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
+						    **list_head);
+
+union acpi_generic_state *acpi_ut_create_generic_state(void);
+
+struct acpi_thread_state *acpi_ut_create_thread_state(void);
+
+union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
+						      *object, u16 action);
+
+union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
+						   void *external_object,
+						   u16 index);
+
+acpi_status
+acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
+				     u16 action,
+				     union acpi_generic_state **state_list);
+
+#ifdef	ACPI_FUTURE_USAGE
+acpi_status
+acpi_ut_create_pkg_state_and_push(void *internal_object,
+				  void *external_object,
+				  u16 index,
+				  union acpi_generic_state **state_list);
+#endif				/* ACPI_FUTURE_USAGE */
+
+union acpi_generic_state *acpi_ut_create_control_state(void);
+
+void acpi_ut_delete_generic_state(union acpi_generic_state *state);
+
+/*
+ * utmath
+ */
+acpi_status
+acpi_ut_divide(acpi_integer in_dividend,
+	       acpi_integer in_divisor,
+	       acpi_integer * out_quotient, acpi_integer * out_remainder);
+
+acpi_status
+acpi_ut_short_divide(acpi_integer in_dividend,
+		     u32 divisor,
+		     acpi_integer * out_quotient, u32 * out_remainder);
+
+/*
+ * utmisc
+ */
+const char *acpi_ut_validate_exception(acpi_status status);
+
+u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
+
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
+
+void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
+
+acpi_status
+acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
+			  void *target_object,
+			  acpi_pkg_callback walk_callback, void *context);
+
+void acpi_ut_strupr(char *src_string);
+
+void acpi_ut_print_string(char *string, u8 max_length);
+
+u8 acpi_ut_valid_acpi_name(u32 name);
+
+acpi_name acpi_ut_repair_name(char *name);
+
+u8 acpi_ut_valid_acpi_char(char character, u32 position);
+
+acpi_status
+acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer);
+
+/* Values for Base above (16=Hex, 10=Decimal) */
+
+#define ACPI_ANY_BASE        0
+
+u32 acpi_ut_dword_byte_swap(u32 value);
+
+void acpi_ut_set_integer_width(u8 revision);
+
+#ifdef ACPI_DEBUG_OUTPUT
+void
+acpi_ut_display_init_pathname(u8 type,
+			      struct acpi_namespace_node *obj_handle,
+			      char *path);
+#endif
+
+/*
+ * utresrc
+ */
+acpi_status
+acpi_ut_walk_aml_resources(u8 * aml,
+			   acpi_size aml_length,
+			   acpi_walk_aml_callback user_function,
+			   void **context);
+
+acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index);
+
+u32 acpi_ut_get_descriptor_length(void *aml);
+
+u16 acpi_ut_get_resource_length(void *aml);
+
+u8 acpi_ut_get_resource_header_length(void *aml);
+
+u8 acpi_ut_get_resource_type(void *aml);
+
+acpi_status
+acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc,
+			     u8 ** end_tag);
+
+/*
+ * utmutex - mutex support
+ */
+acpi_status acpi_ut_mutex_initialize(void);
+
+void acpi_ut_mutex_terminate(void);
+
+acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id);
+
+acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id);
+
+/*
+ * utalloc - memory allocation and object caching
+ */
+acpi_status acpi_ut_create_caches(void);
+
+acpi_status acpi_ut_delete_caches(void);
+
+acpi_status acpi_ut_validate_buffer(struct acpi_buffer *buffer);
+
+acpi_status
+acpi_ut_initialize_buffer(struct acpi_buffer *buffer,
+			  acpi_size required_length);
+
+void *acpi_ut_allocate(acpi_size size,
+		       u32 component, const char *module, u32 line);
+
+void *acpi_ut_allocate_zeroed(acpi_size size,
+			      u32 component, const char *module, u32 line);
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+void *acpi_ut_allocate_and_track(acpi_size size,
+				 u32 component, const char *module, u32 line);
+
+void *acpi_ut_allocate_zeroed_and_track(acpi_size size,
+					u32 component,
+					const char *module, u32 line);
+
+void
+acpi_ut_free_and_track(void *address,
+		       u32 component, const char *module, u32 line);
+
+#ifdef	ACPI_FUTURE_USAGE
+void acpi_ut_dump_allocation_info(void);
+#endif				/* ACPI_FUTURE_USAGE */
+
+void acpi_ut_dump_allocations(u32 component, const char *module);
+
+acpi_status
+acpi_ut_create_list(char *list_name,
+		    u16 object_size, struct acpi_memory_list **return_cache);
+
+#endif
+
+#endif				/* _ACUTILS_H */
diff --git a/include/acpi/amlcode.h b/drivers/acpi/acpica/amlcode.h
similarity index 100%
rename from include/acpi/amlcode.h
rename to drivers/acpi/acpica/amlcode.h
diff --git a/include/acpi/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
similarity index 100%
rename from include/acpi/amlresrc.h
rename to drivers/acpi/acpica/amlresrc.h
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
new file mode 100644
index 0000000..53e27bc
--- /dev/null
+++ b/drivers/acpi/acpica/dsfield.c
@@ -0,0 +1,650 @@
+/******************************************************************************
+ *
+ * Module Name: dsfield - Dispatcher field routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+#include "acparser.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsfield")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_get_field_names(struct acpi_create_field_info *info,
+			struct acpi_walk_state *walk_state,
+			union acpi_parse_object *arg);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_create_buffer_field
+ *
+ * PARAMETERS:  Op                  - Current parse op (create_xXField)
+ *              walk_state          - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute the create_field operators:
+ *              create_bit_field_op,
+ *              create_byte_field_op,
+ *              create_word_field_op,
+ *              create_dword_field_op,
+ *              create_qword_field_op,
+ *              create_field_op     (all of which define a field in a buffer)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_buffer_field(union acpi_parse_object *op,
+			    struct acpi_walk_state *walk_state)
+{
+	union acpi_parse_object *arg;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *second_desc = NULL;
+	u32 flags;
+
+	ACPI_FUNCTION_TRACE(ds_create_buffer_field);
+
+	/*
+	 * Get the name_string argument (name of the new buffer_field)
+	 */
+	if (op->common.aml_opcode == AML_CREATE_FIELD_OP) {
+
+		/* For create_field, name is the 4th argument */
+
+		arg = acpi_ps_get_arg(op, 3);
+	} else {
+		/* For all other create_xXXField operators, name is the 3rd argument */
+
+		arg = acpi_ps_get_arg(op, 2);
+	}
+
+	if (!arg) {
+		return_ACPI_STATUS(AE_AML_NO_OPERAND);
+	}
+
+	if (walk_state->deferred_node) {
+		node = walk_state->deferred_node;
+		status = AE_OK;
+	} else {
+		/* Execute flag should always be set when this function is entered */
+
+		if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
+			return_ACPI_STATUS(AE_AML_INTERNAL);
+		}
+
+		/* Creating new namespace node, should not already exist */
+
+		flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
+		    ACPI_NS_ERROR_IF_FOUND;
+
+		/* Mark node temporary if we are executing a method */
+
+		if (walk_state->method_node) {
+			flags |= ACPI_NS_TEMPORARY;
+		}
+
+		/* Enter the name_string into the namespace */
+
+		status =
+		    acpi_ns_lookup(walk_state->scope_info,
+				   arg->common.value.string, ACPI_TYPE_ANY,
+				   ACPI_IMODE_LOAD_PASS1, flags, walk_state,
+				   &node);
+		if (ACPI_FAILURE(status)) {
+			ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/*
+	 * We could put the returned object (Node) on the object stack for later,
+	 * but for now, we will put it in the "op" object that the parser uses,
+	 * so we can get it again at the end of this scope.
+	 */
+	op->common.node = node;
+
+	/*
+	 * If there is no object attached to the node, this node was just created
+	 * and we need to create the field object. Otherwise, this was a lookup
+	 * of an existing node and we don't want to create the field object again.
+	 */
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (obj_desc) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/*
+	 * The Field definition is not fully parsed at this time.
+	 * (We must save the address of the AML for the buffer and index operands)
+	 */
+
+	/* Create the buffer field object */
+
+	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER_FIELD);
+	if (!obj_desc) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	/*
+	 * Remember location in AML stream of the field unit opcode and operands --
+	 * since the buffer and index operands must be evaluated.
+	 */
+	second_desc = obj_desc->common.next_object;
+	second_desc->extra.aml_start = op->named.data;
+	second_desc->extra.aml_length = op->named.length;
+	obj_desc->buffer_field.node = node;
+
+	/* Attach constructed field descriptors to parent node */
+
+	status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_BUFFER_FIELD);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+      cleanup:
+
+	/* Remove local reference to the object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_field_names
+ *
+ * PARAMETERS:  Info            - create_field info structure
+ *  `           walk_state      - Current method state
+ *              Arg             - First parser arg for the field name list
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Process all named fields in a field declaration.  Names are
+ *              entered into the namespace.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_get_field_names(struct acpi_create_field_info *info,
+			struct acpi_walk_state *walk_state,
+			union acpi_parse_object *arg)
+{
+	acpi_status status;
+	acpi_integer position;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
+
+	/* First field starts at bit zero */
+
+	info->field_bit_position = 0;
+
+	/* Process all elements in the field list (of parse nodes) */
+
+	while (arg) {
+		/*
+		 * Three types of field elements are handled:
+		 * 1) Offset - specifies a bit offset
+		 * 2) access_as - changes the access mode
+		 * 3) Name - Enters a new named field into the namespace
+		 */
+		switch (arg->common.aml_opcode) {
+		case AML_INT_RESERVEDFIELD_OP:
+
+			position = (acpi_integer) info->field_bit_position
+			    + (acpi_integer) arg->common.value.size;
+
+			if (position > ACPI_UINT32_MAX) {
+				ACPI_ERROR((AE_INFO,
+					    "Bit offset within field too large (> 0xFFFFFFFF)"));
+				return_ACPI_STATUS(AE_SUPPORT);
+			}
+
+			info->field_bit_position = (u32) position;
+			break;
+
+		case AML_INT_ACCESSFIELD_OP:
+
+			/*
+			 * Get a new access_type and access_attribute -- to be used for all
+			 * field units that follow, until field end or another access_as
+			 * keyword.
+			 *
+			 * In field_flags, preserve the flag bits other than the
+			 * ACCESS_TYPE bits
+			 */
+			info->field_flags = (u8)
+			    ((info->
+			      field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) |
+			     ((u8) ((u32) arg->common.value.integer >> 8)));
+
+			info->attribute = (u8) (arg->common.value.integer);
+			break;
+
+		case AML_INT_NAMEDFIELD_OP:
+
+			/* Lookup the name, it should already exist */
+
+			status = acpi_ns_lookup(walk_state->scope_info,
+						(char *)&arg->named.name,
+						info->field_type,
+						ACPI_IMODE_EXECUTE,
+						ACPI_NS_DONT_OPEN_SCOPE,
+						walk_state, &info->field_node);
+			if (ACPI_FAILURE(status)) {
+				ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+						     status);
+				return_ACPI_STATUS(status);
+			} else {
+				arg->common.node = info->field_node;
+				info->field_bit_length = arg->common.value.size;
+
+				/*
+				 * If there is no object attached to the node, this node was
+				 * just created and we need to create the field object.
+				 * Otherwise, this was a lookup of an existing node and we
+				 * don't want to create the field object again.
+				 */
+				if (!acpi_ns_get_attached_object
+				    (info->field_node)) {
+					status = acpi_ex_prep_field_value(info);
+					if (ACPI_FAILURE(status)) {
+						return_ACPI_STATUS(status);
+					}
+				}
+			}
+
+			/* Keep track of bit position for the next field */
+
+			position = (acpi_integer) info->field_bit_position
+			    + (acpi_integer) arg->common.value.size;
+
+			if (position > ACPI_UINT32_MAX) {
+				ACPI_ERROR((AE_INFO,
+					    "Field [%4.4s] bit offset too large (> 0xFFFFFFFF)",
+					    ACPI_CAST_PTR(char,
+							  &info->field_node->
+							  name)));
+				return_ACPI_STATUS(AE_SUPPORT);
+			}
+
+			info->field_bit_position += info->field_bit_length;
+			break;
+
+		default:
+
+			ACPI_ERROR((AE_INFO,
+				    "Invalid opcode in field list: %X",
+				    arg->common.aml_opcode));
+			return_ACPI_STATUS(AE_AML_BAD_OPCODE);
+		}
+
+		arg = arg->common.next;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_create_field
+ *
+ * PARAMETERS:  Op              - Op containing the Field definition and args
+ *              region_node     - Object for the containing Operation Region
+ *  `           walk_state      - Current method state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new field in the specified operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_field(union acpi_parse_object *op,
+		     struct acpi_namespace_node *region_node,
+		     struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	union acpi_parse_object *arg;
+	struct acpi_create_field_info info;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_create_field, op);
+
+	/* First arg is the name of the parent op_region (must already exist) */
+
+	arg = op->common.value.arg;
+	if (!region_node) {
+		status =
+		    acpi_ns_lookup(walk_state->scope_info,
+				   arg->common.value.name, ACPI_TYPE_REGION,
+				   ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
+				   walk_state, &region_node);
+		if (ACPI_FAILURE(status)) {
+			ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/* Second arg is the field flags */
+
+	arg = arg->common.next;
+	info.field_flags = (u8) arg->common.value.integer;
+	info.attribute = 0;
+
+	/* Each remaining arg is a Named Field */
+
+	info.field_type = ACPI_TYPE_LOCAL_REGION_FIELD;
+	info.region_node = region_node;
+
+	status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_init_field_objects
+ *
+ * PARAMETERS:  Op              - Op containing the Field definition and args
+ *  `           walk_state      - Current method state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: For each "Field Unit" name in the argument list that is
+ *              part of the field declaration, enter the name into the
+ *              namespace.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_init_field_objects(union acpi_parse_object *op,
+			   struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	union acpi_parse_object *arg = NULL;
+	struct acpi_namespace_node *node;
+	u8 type = 0;
+	u32 flags;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_init_field_objects, op);
+
+	/* Execute flag should always be set when this function is entered */
+
+	if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
+		if (walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP) {
+
+			/* bank_field Op is deferred, just return OK */
+
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		return_ACPI_STATUS(AE_AML_INTERNAL);
+	}
+
+	/*
+	 * Get the field_list argument for this opcode. This is the start of the
+	 * list of field elements.
+	 */
+	switch (walk_state->opcode) {
+	case AML_FIELD_OP:
+		arg = acpi_ps_get_arg(op, 2);
+		type = ACPI_TYPE_LOCAL_REGION_FIELD;
+		break;
+
+	case AML_BANK_FIELD_OP:
+		arg = acpi_ps_get_arg(op, 4);
+		type = ACPI_TYPE_LOCAL_BANK_FIELD;
+		break;
+
+	case AML_INDEX_FIELD_OP:
+		arg = acpi_ps_get_arg(op, 3);
+		type = ACPI_TYPE_LOCAL_INDEX_FIELD;
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Creating new namespace node(s), should not already exist */
+
+	flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
+	    ACPI_NS_ERROR_IF_FOUND;
+
+	/* Mark node(s) temporary if we are executing a method */
+
+	if (walk_state->method_node) {
+		flags |= ACPI_NS_TEMPORARY;
+	}
+
+	/*
+	 * Walk the list of entries in the field_list
+	 * Note: field_list can be of zero length. In this case, Arg will be NULL.
+	 */
+	while (arg) {
+		/*
+		 * Ignore OFFSET and ACCESSAS terms here; we are only interested in the
+		 * field names in order to enter them into the namespace.
+		 */
+		if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
+			status = acpi_ns_lookup(walk_state->scope_info,
+						(char *)&arg->named.name, type,
+						ACPI_IMODE_LOAD_PASS1, flags,
+						walk_state, &node);
+			if (ACPI_FAILURE(status)) {
+				ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+						     status);
+				if (status != AE_ALREADY_EXISTS) {
+					return_ACPI_STATUS(status);
+				}
+
+				/* Name already exists, just ignore this error */
+
+				status = AE_OK;
+			}
+
+			arg->common.node = node;
+		}
+
+		/* Get the next field element in the list */
+
+		arg = arg->common.next;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_create_bank_field
+ *
+ * PARAMETERS:  Op              - Op containing the Field definition and args
+ *              region_node     - Object for the containing Operation Region
+ *              walk_state      - Current method state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new bank field in the specified operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_bank_field(union acpi_parse_object *op,
+			  struct acpi_namespace_node *region_node,
+			  struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	union acpi_parse_object *arg;
+	struct acpi_create_field_info info;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_create_bank_field, op);
+
+	/* First arg is the name of the parent op_region (must already exist) */
+
+	arg = op->common.value.arg;
+	if (!region_node) {
+		status =
+		    acpi_ns_lookup(walk_state->scope_info,
+				   arg->common.value.name, ACPI_TYPE_REGION,
+				   ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
+				   walk_state, &region_node);
+		if (ACPI_FAILURE(status)) {
+			ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/* Second arg is the Bank Register (Field) (must already exist) */
+
+	arg = arg->common.next;
+	status =
+	    acpi_ns_lookup(walk_state->scope_info, arg->common.value.string,
+			   ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+			   ACPI_NS_SEARCH_PARENT, walk_state,
+			   &info.register_node);
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Third arg is the bank_value
+	 * This arg is a term_arg, not a constant
+	 * It will be evaluated later, by acpi_ds_eval_bank_field_operands
+	 */
+	arg = arg->common.next;
+
+	/* Fourth arg is the field flags */
+
+	arg = arg->common.next;
+	info.field_flags = (u8) arg->common.value.integer;
+
+	/* Each remaining arg is a Named Field */
+
+	info.field_type = ACPI_TYPE_LOCAL_BANK_FIELD;
+	info.region_node = region_node;
+
+	/*
+	 * Use Info.data_register_node to store bank_field Op
+	 * It's safe because data_register_node will never be used when create bank field
+	 * We store aml_start and aml_length in the bank_field Op for late evaluation
+	 * Used in acpi_ex_prep_field_value(Info)
+	 *
+	 * TBD: Or, should we add a field in struct acpi_create_field_info, like "void *ParentOp"?
+	 */
+	info.data_register_node = (struct acpi_namespace_node *)op;
+
+	status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_create_index_field
+ *
+ * PARAMETERS:  Op              - Op containing the Field definition and args
+ *              region_node     - Object for the containing Operation Region
+ *  `           walk_state      - Current method state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new index field in the specified operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_index_field(union acpi_parse_object *op,
+			   struct acpi_namespace_node *region_node,
+			   struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	union acpi_parse_object *arg;
+	struct acpi_create_field_info info;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_create_index_field, op);
+
+	/* First arg is the name of the Index register (must already exist) */
+
+	arg = op->common.value.arg;
+	status =
+	    acpi_ns_lookup(walk_state->scope_info, arg->common.value.string,
+			   ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+			   ACPI_NS_SEARCH_PARENT, walk_state,
+			   &info.register_node);
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+		return_ACPI_STATUS(status);
+	}
+
+	/* Second arg is the data register (must already exist) */
+
+	arg = arg->common.next;
+	status =
+	    acpi_ns_lookup(walk_state->scope_info, arg->common.value.string,
+			   ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+			   ACPI_NS_SEARCH_PARENT, walk_state,
+			   &info.data_register_node);
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+		return_ACPI_STATUS(status);
+	}
+
+	/* Next arg is the field flags */
+
+	arg = arg->common.next;
+	info.field_flags = (u8) arg->common.value.integer;
+
+	/* Each remaining arg is a Named Field */
+
+	info.field_type = ACPI_TYPE_LOCAL_INDEX_FIELD;
+	info.region_node = region_node;
+
+	status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
new file mode 100644
index 0000000..eb144b1
--- /dev/null
+++ b/drivers/acpi/acpica/dsinit.c
@@ -0,0 +1,205 @@
+/******************************************************************************
+ *
+ * Module Name: dsinit - Object initialization namespace walk
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acdispat.h"
+#include "acnamesp.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsinit")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_init_one_object(acpi_handle obj_handle,
+			u32 level, void *context, void **return_value);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_init_one_object
+ *
+ * PARAMETERS:  obj_handle      - Node for the object
+ *              Level           - Current nesting level
+ *              Context         - Points to a init info struct
+ *              return_value    - Not used
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every object
+ *              within the namespace.
+ *
+ *              Currently, the only objects that require initialization are:
+ *              1) Methods
+ *              2) Operation Regions
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_init_one_object(acpi_handle obj_handle,
+			u32 level, void *context, void **return_value)
+{
+	struct acpi_init_walk_info *info =
+	    (struct acpi_init_walk_info *)context;
+	struct acpi_namespace_node *node =
+	    (struct acpi_namespace_node *)obj_handle;
+	acpi_object_type type;
+	acpi_status status;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * We are only interested in NS nodes owned by the table that
+	 * was just loaded
+	 */
+	if (node->owner_id != info->owner_id) {
+		return (AE_OK);
+	}
+
+	info->object_count++;
+
+	/* And even then, we are only interested in a few object types */
+
+	type = acpi_ns_get_type(obj_handle);
+
+	switch (type) {
+	case ACPI_TYPE_REGION:
+
+		status = acpi_ds_initialize_region(obj_handle);
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"During Region initialization %p [%4.4s]",
+					obj_handle,
+					acpi_ut_get_node_name(obj_handle)));
+		}
+
+		info->op_region_count++;
+		break;
+
+	case ACPI_TYPE_METHOD:
+
+		info->method_count++;
+		break;
+
+	case ACPI_TYPE_DEVICE:
+
+		info->device_count++;
+		break;
+
+	default:
+		break;
+	}
+
+	/*
+	 * We ignore errors from above, and always return OK, since
+	 * we don't want to abort the walk on a single error.
+	 */
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_initialize_objects
+ *
+ * PARAMETERS:  table_desc      - Descriptor for parent ACPI table
+ *              start_node      - Root of subtree to be initialized.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Walk the namespace starting at "StartNode" and perform any
+ *              necessary initialization on the objects found therein
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_initialize_objects(u32 table_index,
+			   struct acpi_namespace_node * start_node)
+{
+	acpi_status status;
+	struct acpi_init_walk_info info;
+	struct acpi_table_header *table;
+	acpi_owner_id owner_id;
+
+	ACPI_FUNCTION_TRACE(ds_initialize_objects);
+
+	status = acpi_tb_get_owner_id(table_index, &owner_id);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "**** Starting initialization of namespace objects ****\n"));
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
+
+	info.method_count = 0;
+	info.op_region_count = 0;
+	info.object_count = 0;
+	info.device_count = 0;
+	info.table_index = table_index;
+	info.owner_id = owner_id;
+
+	/* Walk entire namespace from the supplied root */
+
+	status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
+				     acpi_ds_init_one_object, &info, NULL);
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
+	}
+
+	status = acpi_get_table_by_index(table_index, &table);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+			      "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n",
+			      table->signature, owner_id, info.object_count,
+			      info.device_count, info.method_count,
+			      info.op_region_count));
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "%hd Methods, %hd Regions\n", info.method_count,
+			  info.op_region_count));
+
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
new file mode 100644
index 0000000..14b8b8e
--- /dev/null
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -0,0 +1,629 @@
+/******************************************************************************
+ *
+ * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+#ifdef	ACPI_DISASSEMBLER
+#include <acpi/acdisasm.h>
+#endif
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsmethod")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_method_error
+ *
+ * PARAMETERS:  Status          - Execution status
+ *              walk_state      - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Called on method error. Invoke the global exception handler if
+ *              present, dump the method data if the disassembler is configured
+ *
+ *              Note: Allows the exception handler to change the status code
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	/* Ignore AE_OK and control exception codes */
+
+	if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
+		return (status);
+	}
+
+	/* Invoke the global exception handler */
+
+	if (acpi_gbl_exception_handler) {
+
+		/* Exit the interpreter, allow handler to execute methods */
+
+		acpi_ex_exit_interpreter();
+
+		/*
+		 * Handler can map the exception code to anything it wants, including
+		 * AE_OK, in which case the executing method will not be aborted.
+		 */
+		status = acpi_gbl_exception_handler(status,
+						    walk_state->method_node ?
+						    walk_state->method_node->
+						    name.integer : 0,
+						    walk_state->opcode,
+						    walk_state->aml_offset,
+						    NULL);
+		acpi_ex_enter_interpreter();
+	}
+
+	acpi_ds_clear_implicit_return(walk_state);
+
+#ifdef ACPI_DISASSEMBLER
+	if (ACPI_FAILURE(status)) {
+
+		/* Display method locals/args if disassembler is present */
+
+		acpi_dm_dump_method_info(status, walk_state, walk_state->op);
+	}
+#endif
+
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_create_method_mutex
+ *
+ * PARAMETERS:  obj_desc            - The method object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a mutex object for a serialized control method
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
+{
+	union acpi_operand_object *mutex_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ds_create_method_mutex);
+
+	/* Create the new mutex object */
+
+	mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
+	if (!mutex_desc) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Create the actual OS Mutex */
+
+	status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	mutex_desc->mutex.sync_level = method_desc->method.sync_level;
+	method_desc->method.mutex = mutex_desc;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_begin_method_execution
+ *
+ * PARAMETERS:  method_node         - Node of the method
+ *              obj_desc            - The method object
+ *              walk_state          - current state, NULL if not yet executing
+ *                                    a method.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Prepare a method for execution.  Parses the method if necessary,
+ *              increments the thread count, and waits at the method semaphore
+ *              for clearance to execute.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
+			       union acpi_operand_object *obj_desc,
+			       struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
+
+	if (!method_node) {
+		return_ACPI_STATUS(AE_NULL_ENTRY);
+	}
+
+	/* Prevent wraparound of thread count */
+
+	if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
+		ACPI_ERROR((AE_INFO,
+			    "Method reached maximum reentrancy limit (255)"));
+		return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
+	}
+
+	/*
+	 * If this method is serialized, we need to acquire the method mutex.
+	 */
+	if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
+		/*
+		 * Create a mutex for the method if it is defined to be Serialized
+		 * and a mutex has not already been created. We defer the mutex creation
+		 * until a method is actually executed, to minimize the object count
+		 */
+		if (!obj_desc->method.mutex) {
+			status = acpi_ds_create_method_mutex(obj_desc);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		}
+
+		/*
+		 * The current_sync_level (per-thread) must be less than or equal to
+		 * the sync level of the method. This mechanism provides some
+		 * deadlock prevention
+		 *
+		 * Top-level method invocation has no walk state at this point
+		 */
+		if (walk_state &&
+		    (walk_state->thread->current_sync_level >
+		     obj_desc->method.mutex->mutex.sync_level)) {
+			ACPI_ERROR((AE_INFO,
+				    "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
+				    acpi_ut_get_node_name(method_node),
+				    walk_state->thread->current_sync_level));
+
+			return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
+		}
+
+		/*
+		 * Obtain the method mutex if necessary. Do not acquire mutex for a
+		 * recursive call.
+		 */
+		if (!walk_state ||
+		    !obj_desc->method.mutex->mutex.thread_id ||
+		    (walk_state->thread->thread_id !=
+		     obj_desc->method.mutex->mutex.thread_id)) {
+			/*
+			 * Acquire the method mutex. This releases the interpreter if we
+			 * block (and reacquires it before it returns)
+			 */
+			status =
+			    acpi_ex_system_wait_mutex(obj_desc->method.mutex->
+						      mutex.os_mutex,
+						      ACPI_WAIT_FOREVER);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+
+			/* Update the mutex and walk info and save the original sync_level */
+
+			if (walk_state) {
+				obj_desc->method.mutex->mutex.
+				    original_sync_level =
+				    walk_state->thread->current_sync_level;
+
+				obj_desc->method.mutex->mutex.thread_id =
+				    walk_state->thread->thread_id;
+				walk_state->thread->current_sync_level =
+				    obj_desc->method.sync_level;
+			} else {
+				obj_desc->method.mutex->mutex.
+				    original_sync_level =
+				    obj_desc->method.mutex->mutex.sync_level;
+			}
+		}
+
+		/* Always increase acquisition depth */
+
+		obj_desc->method.mutex->mutex.acquisition_depth++;
+	}
+
+	/*
+	 * Allocate an Owner ID for this method, only if this is the first thread
+	 * to begin concurrent execution. We only need one owner_id, even if the
+	 * method is invoked recursively.
+	 */
+	if (!obj_desc->method.owner_id) {
+		status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+	}
+
+	/*
+	 * Increment the method parse tree thread count since it has been
+	 * reentered one more time (even if it is the same thread)
+	 */
+	obj_desc->method.thread_count++;
+	return_ACPI_STATUS(status);
+
+      cleanup:
+	/* On error, must release the method mutex (if present) */
+
+	if (obj_desc->method.mutex) {
+		acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
+	}
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_call_control_method
+ *
+ * PARAMETERS:  Thread              - Info for this thread
+ *              this_walk_state     - Current walk state
+ *              Op                  - Current Op to be walked
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Transfer execution to a called control method
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_call_control_method(struct acpi_thread_state *thread,
+			    struct acpi_walk_state *this_walk_state,
+			    union acpi_parse_object *op)
+{
+	acpi_status status;
+	struct acpi_namespace_node *method_node;
+	struct acpi_walk_state *next_walk_state = NULL;
+	union acpi_operand_object *obj_desc;
+	struct acpi_evaluate_info *info;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "Calling method %p, currentstate=%p\n",
+			  this_walk_state->prev_op, this_walk_state));
+
+	/*
+	 * Get the namespace entry for the control method we are about to call
+	 */
+	method_node = this_walk_state->method_call_node;
+	if (!method_node) {
+		return_ACPI_STATUS(AE_NULL_ENTRY);
+	}
+
+	obj_desc = acpi_ns_get_attached_object(method_node);
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_NULL_OBJECT);
+	}
+
+	/* Init for new method, possibly wait on method mutex */
+
+	status = acpi_ds_begin_method_execution(method_node, obj_desc,
+						this_walk_state);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Begin method parse/execution. Create a new walk state */
+
+	next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
+						    NULL, obj_desc, thread);
+	if (!next_walk_state) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	/*
+	 * The resolved arguments were put on the previous walk state's operand
+	 * stack. Operands on the previous walk state stack always
+	 * start at index 0. Also, null terminate the list of arguments
+	 */
+	this_walk_state->operands[this_walk_state->num_operands] = NULL;
+
+	/*
+	 * Allocate and initialize the evaluation information block
+	 * TBD: this is somewhat inefficient, should change interface to
+	 * ds_init_aml_walk. For now, keeps this struct off the CPU stack
+	 */
+	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+	if (!info) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	info->parameters = &this_walk_state->operands[0];
+
+	status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
+				       obj_desc->method.aml_start,
+				       obj_desc->method.aml_length, info,
+				       ACPI_IMODE_EXECUTE);
+
+	ACPI_FREE(info);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/*
+	 * Delete the operands on the previous walkstate operand stack
+	 * (they were copied to new objects)
+	 */
+	for (i = 0; i < obj_desc->method.param_count; i++) {
+		acpi_ut_remove_reference(this_walk_state->operands[i]);
+		this_walk_state->operands[i] = NULL;
+	}
+
+	/* Clear the operand stack */
+
+	this_walk_state->num_operands = 0;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
+			  method_node->name.ascii, next_walk_state));
+
+	/* Invoke an internal method if necessary */
+
+	if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
+		status = obj_desc->method.implementation(next_walk_state);
+		if (status == AE_OK) {
+			status = AE_CTRL_TERMINATE;
+		}
+	}
+
+	return_ACPI_STATUS(status);
+
+      cleanup:
+
+	/* On error, we must terminate the method properly */
+
+	acpi_ds_terminate_control_method(obj_desc, next_walk_state);
+	if (next_walk_state) {
+		acpi_ds_delete_walk_state(next_walk_state);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_restart_control_method
+ *
+ * PARAMETERS:  walk_state          - State for preempted method (caller)
+ *              return_desc         - Return value from the called method
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Restart a method that was preempted by another (nested) method
+ *              invocation.  Handle the return value (if any) from the callee.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
+			       union acpi_operand_object *return_desc)
+{
+	acpi_status status;
+	int same_as_implicit_return;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
+			  acpi_ut_get_node_name(walk_state->method_node),
+			  walk_state->method_call_op, return_desc));
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "    ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
+			  walk_state->return_used,
+			  walk_state->results, walk_state));
+
+	/* Did the called method return a value? */
+
+	if (return_desc) {
+
+		/* Is the implicit return object the same as the return desc? */
+
+		same_as_implicit_return =
+		    (walk_state->implicit_return_obj == return_desc);
+
+		/* Are we actually going to use the return value? */
+
+		if (walk_state->return_used) {
+
+			/* Save the return value from the previous method */
+
+			status = acpi_ds_result_push(return_desc, walk_state);
+			if (ACPI_FAILURE(status)) {
+				acpi_ut_remove_reference(return_desc);
+				return_ACPI_STATUS(status);
+			}
+
+			/*
+			 * Save as THIS method's return value in case it is returned
+			 * immediately to yet another method
+			 */
+			walk_state->return_desc = return_desc;
+		}
+
+		/*
+		 * The following code is the optional support for the so-called
+		 * "implicit return". Some AML code assumes that the last value of the
+		 * method is "implicitly" returned to the caller, in the absence of an
+		 * explicit return value.
+		 *
+		 * Just save the last result of the method as the return value.
+		 *
+		 * NOTE: this is optional because the ASL language does not actually
+		 * support this behavior.
+		 */
+		else if (!acpi_ds_do_implicit_return
+			 (return_desc, walk_state, FALSE)
+			 || same_as_implicit_return) {
+			/*
+			 * Delete the return value if it will not be used by the
+			 * calling method or remove one reference if the explicit return
+			 * is the same as the implicit return value.
+			 */
+			acpi_ut_remove_reference(return_desc);
+		}
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_terminate_control_method
+ *
+ * PARAMETERS:  method_desc         - Method object
+ *              walk_state          - State associated with the method
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Terminate a control method.  Delete everything that the method
+ *              created, delete all locals and arguments, and delete the parse
+ *              tree if requested.
+ *
+ * MUTEX:       Interpreter is locked
+ *
+ ******************************************************************************/
+
+void
+acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
+				 struct acpi_walk_state *walk_state)
+{
+
+	ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
+
+	/* method_desc is required, walk_state is optional */
+
+	if (!method_desc) {
+		return_VOID;
+	}
+
+	if (walk_state) {
+
+		/* Delete all arguments and locals */
+
+		acpi_ds_method_data_delete_all(walk_state);
+
+		/*
+		 * If method is serialized, release the mutex and restore the
+		 * current sync level for this thread
+		 */
+		if (method_desc->method.mutex) {
+
+			/* Acquisition Depth handles recursive calls */
+
+			method_desc->method.mutex->mutex.acquisition_depth--;
+			if (!method_desc->method.mutex->mutex.acquisition_depth) {
+				walk_state->thread->current_sync_level =
+				    method_desc->method.mutex->mutex.
+				    original_sync_level;
+
+				acpi_os_release_mutex(method_desc->method.
+						      mutex->mutex.os_mutex);
+				method_desc->method.mutex->mutex.thread_id = NULL;
+			}
+		}
+
+		/*
+		 * Delete any namespace objects created anywhere within
+		 * the namespace by the execution of this method
+		 */
+		acpi_ns_delete_namespace_by_owner(method_desc->method.owner_id);
+	}
+
+	/* Decrement the thread count on the method */
+
+	if (method_desc->method.thread_count) {
+		method_desc->method.thread_count--;
+	} else {
+		ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
+	}
+
+	/* Are there any other threads currently executing this method? */
+
+	if (method_desc->method.thread_count) {
+		/*
+		 * Additional threads. Do not release the owner_id in this case,
+		 * we immediately reuse it for the next thread executing this method
+		 */
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "*** Completed execution of one thread, %d threads remaining\n",
+				  method_desc->method.thread_count));
+	} else {
+		/* This is the only executing thread for this method */
+
+		/*
+		 * Support to dynamically change a method from not_serialized to
+		 * Serialized if it appears that the method is incorrectly written and
+		 * does not support multiple thread execution. The best example of this
+		 * is if such a method creates namespace objects and blocks. A second
+		 * thread will fail with an AE_ALREADY_EXISTS exception
+		 *
+		 * This code is here because we must wait until the last thread exits
+		 * before creating the synchronization semaphore.
+		 */
+		if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
+		    && (!method_desc->method.mutex)) {
+			(void)acpi_ds_create_method_mutex(method_desc);
+		}
+
+		/* No more threads, we can free the owner_id */
+
+		acpi_ut_release_owner_id(&method_desc->method.owner_id);
+	}
+
+	return_VOID;
+}
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
new file mode 100644
index 0000000..da0f546
--- /dev/null
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -0,0 +1,718 @@
+/*******************************************************************************
+ *
+ * Module Name: dsmthdat - control method arguments and local variables
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acdispat.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsmthdat")
+
+/* Local prototypes */
+static void
+acpi_ds_method_data_delete_value(u8 type,
+				 u32 index, struct acpi_walk_state *walk_state);
+
+static acpi_status
+acpi_ds_method_data_set_value(u8 type,
+			      u32 index,
+			      union acpi_operand_object *object,
+			      struct acpi_walk_state *walk_state);
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+acpi_object_type
+acpi_ds_method_data_get_type(u16 opcode,
+			     u32 index, struct acpi_walk_state *walk_state);
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_method_data_init
+ *
+ * PARAMETERS:  walk_state          - Current walk state object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize the data structures that hold the method's arguments
+ *              and locals.  The data struct is an array of namespace nodes for
+ *              each - this allows ref_of and de_ref_of to work properly for these
+ *              special data types.
+ *
+ * NOTES:       walk_state fields are initialized to zero by the
+ *              ACPI_ALLOCATE_ZEROED().
+ *
+ *              A pseudo-Namespace Node is assigned to each argument and local
+ *              so that ref_of() can return a pointer to the Node.
+ *
+ ******************************************************************************/
+
+void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
+{
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ds_method_data_init);
+
+	/* Init the method arguments */
+
+	for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++) {
+		ACPI_MOVE_32_TO_32(&walk_state->arguments[i].name,
+				   NAMEOF_ARG_NTE);
+		walk_state->arguments[i].name.integer |= (i << 24);
+		walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
+		walk_state->arguments[i].type = ACPI_TYPE_ANY;
+		walk_state->arguments[i].flags =
+		    ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_ARG;
+	}
+
+	/* Init the method locals */
+
+	for (i = 0; i < ACPI_METHOD_NUM_LOCALS; i++) {
+		ACPI_MOVE_32_TO_32(&walk_state->local_variables[i].name,
+				   NAMEOF_LOCAL_NTE);
+
+		walk_state->local_variables[i].name.integer |= (i << 24);
+		walk_state->local_variables[i].descriptor_type =
+		    ACPI_DESC_TYPE_NAMED;
+		walk_state->local_variables[i].type = ACPI_TYPE_ANY;
+		walk_state->local_variables[i].flags =
+		    ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_LOCAL;
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_method_data_delete_all
+ *
+ * PARAMETERS:  walk_state          - Current walk state object
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Delete method locals and arguments.  Arguments are only
+ *              deleted if this method was called from another method.
+ *
+ ******************************************************************************/
+
+void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
+{
+	u32 index;
+
+	ACPI_FUNCTION_TRACE(ds_method_data_delete_all);
+
+	/* Detach the locals */
+
+	for (index = 0; index < ACPI_METHOD_NUM_LOCALS; index++) {
+		if (walk_state->local_variables[index].object) {
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Local%d=%p\n",
+					  index,
+					  walk_state->local_variables[index].
+					  object));
+
+			/* Detach object (if present) and remove a reference */
+
+			acpi_ns_detach_object(&walk_state->
+					      local_variables[index]);
+		}
+	}
+
+	/* Detach the arguments */
+
+	for (index = 0; index < ACPI_METHOD_NUM_ARGS; index++) {
+		if (walk_state->arguments[index].object) {
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Arg%d=%p\n",
+					  index,
+					  walk_state->arguments[index].object));
+
+			/* Detach object (if present) and remove a reference */
+
+			acpi_ns_detach_object(&walk_state->arguments[index]);
+		}
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_method_data_init_args
+ *
+ * PARAMETERS:  *Params         - Pointer to a parameter list for the method
+ *              max_param_count - The arg count for this method
+ *              walk_state      - Current walk state object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize arguments for a method.  The parameter list is a list
+ *              of ACPI operand objects, either null terminated or whose length
+ *              is defined by max_param_count.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_method_data_init_args(union acpi_operand_object **params,
+			      u32 max_param_count,
+			      struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	u32 index = 0;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_method_data_init_args, params);
+
+	if (!params) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "No param list passed to method\n"));
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Copy passed parameters into the new method stack frame */
+
+	while ((index < ACPI_METHOD_NUM_ARGS) &&
+	       (index < max_param_count) && params[index]) {
+		/*
+		 * A valid parameter.
+		 * Store the argument in the method/walk descriptor.
+		 * Do not copy the arg in order to implement call by reference
+		 */
+		status = acpi_ds_method_data_set_value(ACPI_REFCLASS_ARG, index,
+						       params[index],
+						       walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		index++;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%d args passed to method\n", index));
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_method_data_get_node
+ *
+ * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
+ *                                    ACPI_REFCLASS_ARG
+ *              Index               - Which Local or Arg whose type to get
+ *              walk_state          - Current walk state object
+ *              Node                - Where the node is returned.
+ *
+ * RETURN:      Status and node
+ *
+ * DESCRIPTION: Get the Node associated with a local or arg.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_method_data_get_node(u8 type,
+			     u32 index,
+			     struct acpi_walk_state *walk_state,
+			     struct acpi_namespace_node **node)
+{
+	ACPI_FUNCTION_TRACE(ds_method_data_get_node);
+
+	/*
+	 * Method Locals and Arguments are supported
+	 */
+	switch (type) {
+	case ACPI_REFCLASS_LOCAL:
+
+		if (index > ACPI_METHOD_MAX_LOCAL) {
+			ACPI_ERROR((AE_INFO,
+				    "Local index %d is invalid (max %d)",
+				    index, ACPI_METHOD_MAX_LOCAL));
+			return_ACPI_STATUS(AE_AML_INVALID_INDEX);
+		}
+
+		/* Return a pointer to the pseudo-node */
+
+		*node = &walk_state->local_variables[index];
+		break;
+
+	case ACPI_REFCLASS_ARG:
+
+		if (index > ACPI_METHOD_MAX_ARG) {
+			ACPI_ERROR((AE_INFO,
+				    "Arg index %d is invalid (max %d)",
+				    index, ACPI_METHOD_MAX_ARG));
+			return_ACPI_STATUS(AE_AML_INVALID_INDEX);
+		}
+
+		/* Return a pointer to the pseudo-node */
+
+		*node = &walk_state->arguments[index];
+		break;
+
+	default:
+		ACPI_ERROR((AE_INFO, "Type %d is invalid", type));
+		return_ACPI_STATUS(AE_TYPE);
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_method_data_set_value
+ *
+ * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
+ *                                    ACPI_REFCLASS_ARG
+ *              Index               - Which Local or Arg to get
+ *              Object              - Object to be inserted into the stack entry
+ *              walk_state          - Current walk state object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Insert an object onto the method stack at entry Opcode:Index.
+ *              Note: There is no "implicit conversion" for locals.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_method_data_set_value(u8 type,
+			      u32 index,
+			      union acpi_operand_object *object,
+			      struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(ds_method_data_set_value);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "NewObj %p Type %2.2X, Refs=%d [%s]\n", object,
+			  type, object->common.reference_count,
+			  acpi_ut_get_type_name(object->common.type)));
+
+	/* Get the namespace node for the arg/local */
+
+	status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Increment ref count so object can't be deleted while installed.
+	 * NOTE: We do not copy the object in order to preserve the call by
+	 * reference semantics of ACPI Control Method invocation.
+	 * (See ACPI Specification 2.0_c)
+	 */
+	acpi_ut_add_reference(object);
+
+	/* Install the object */
+
+	node->object = object;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_method_data_get_value
+ *
+ * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
+ *                                    ACPI_REFCLASS_ARG
+ *              Index               - Which local_var or argument to get
+ *              walk_state          - Current walk state object
+ *              dest_desc           - Where Arg or Local value is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Retrieve value of selected Arg or Local for this method
+ *              Used only in acpi_ex_resolve_to_value().
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_method_data_get_value(u8 type,
+			      u32 index,
+			      struct acpi_walk_state *walk_state,
+			      union acpi_operand_object **dest_desc)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+	union acpi_operand_object *object;
+
+	ACPI_FUNCTION_TRACE(ds_method_data_get_value);
+
+	/* Validate the object descriptor */
+
+	if (!dest_desc) {
+		ACPI_ERROR((AE_INFO, "Null object descriptor pointer"));
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Get the namespace node for the arg/local */
+
+	status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Get the object from the node */
+
+	object = node->object;
+
+	/* Examine the returned object, it must be valid. */
+
+	if (!object) {
+		/*
+		 * Index points to uninitialized object.
+		 * This means that either 1) The expected argument was
+		 * not passed to the method, or 2) A local variable
+		 * was referenced by the method (via the ASL)
+		 * before it was initialized.  Either case is an error.
+		 */
+
+		/* If slack enabled, init the local_x/arg_x to an Integer of value zero */
+
+		if (acpi_gbl_enable_interpreter_slack) {
+			object =
+			    acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+			if (!object) {
+				return_ACPI_STATUS(AE_NO_MEMORY);
+			}
+
+			object->integer.value = 0;
+			node->object = object;
+		}
+
+		/* Otherwise, return the error */
+
+		else
+			switch (type) {
+			case ACPI_REFCLASS_ARG:
+
+				ACPI_ERROR((AE_INFO,
+					    "Uninitialized Arg[%d] at node %p",
+					    index, node));
+
+				return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
+
+			case ACPI_REFCLASS_LOCAL:
+
+				ACPI_ERROR((AE_INFO,
+					    "Uninitialized Local[%d] at node %p",
+					    index, node));
+
+				return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL);
+
+			default:
+
+				ACPI_ERROR((AE_INFO,
+					    "Not a Arg/Local opcode: %X",
+					    type));
+				return_ACPI_STATUS(AE_AML_INTERNAL);
+			}
+	}
+
+	/*
+	 * The Index points to an initialized and valid object.
+	 * Return an additional reference to the object
+	 */
+	*dest_desc = object;
+	acpi_ut_add_reference(object);
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_method_data_delete_value
+ *
+ * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
+ *                                    ACPI_REFCLASS_ARG
+ *              Index               - Which local_var or argument to delete
+ *              walk_state          - Current walk state object
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Delete the entry at Opcode:Index.  Inserts
+ *              a null into the stack slot after the object is deleted.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ds_method_data_delete_value(u8 type,
+				 u32 index, struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+	union acpi_operand_object *object;
+
+	ACPI_FUNCTION_TRACE(ds_method_data_delete_value);
+
+	/* Get the namespace node for the arg/local */
+
+	status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
+	if (ACPI_FAILURE(status)) {
+		return_VOID;
+	}
+
+	/* Get the associated object */
+
+	object = acpi_ns_get_attached_object(node);
+
+	/*
+	 * Undefine the Arg or Local by setting its descriptor
+	 * pointer to NULL. Locals/Args can contain both
+	 * ACPI_OPERAND_OBJECTS and ACPI_NAMESPACE_NODEs
+	 */
+	node->object = NULL;
+
+	if ((object) &&
+	    (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_OPERAND)) {
+		/*
+		 * There is a valid object.
+		 * Decrement the reference count by one to balance the
+		 * increment when the object was stored.
+		 */
+		acpi_ut_remove_reference(object);
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_store_object_to_local
+ *
+ * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
+ *                                    ACPI_REFCLASS_ARG
+ *              Index               - Which Local or Arg to set
+ *              obj_desc            - Value to be stored
+ *              walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Store a value in an Arg or Local.  The obj_desc is installed
+ *              as the new value for the Arg or Local and the reference count
+ *              for obj_desc is incremented.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_store_object_to_local(u8 type,
+			      u32 index,
+			      union acpi_operand_object *obj_desc,
+			      struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+	union acpi_operand_object *current_obj_desc;
+	union acpi_operand_object *new_obj_desc;
+
+	ACPI_FUNCTION_TRACE(ds_store_object_to_local);
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Type=%2.2X Index=%d Obj=%p\n",
+			  type, index, obj_desc));
+
+	/* Parameter validation */
+
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Get the namespace node for the arg/local */
+
+	status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	current_obj_desc = acpi_ns_get_attached_object(node);
+	if (current_obj_desc == obj_desc) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p already installed!\n",
+				  obj_desc));
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * If the reference count on the object is more than one, we must
+	 * take a copy of the object before we store.  A reference count
+	 * of exactly 1 means that the object was just created during the
+	 * evaluation of an expression, and we can safely use it since it
+	 * is not used anywhere else.
+	 */
+	new_obj_desc = obj_desc;
+	if (obj_desc->common.reference_count > 1) {
+		status =
+		    acpi_ut_copy_iobject_to_iobject(obj_desc, &new_obj_desc,
+						    walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/*
+	 * If there is an object already in this slot, we either
+	 * have to delete it, or if this is an argument and there
+	 * is an object reference stored there, we have to do
+	 * an indirect store!
+	 */
+	if (current_obj_desc) {
+		/*
+		 * Check for an indirect store if an argument
+		 * contains an object reference (stored as an Node).
+		 * We don't allow this automatic dereferencing for
+		 * locals, since a store to a local should overwrite
+		 * anything there, including an object reference.
+		 *
+		 * If both Arg0 and Local0 contain ref_of (Local4):
+		 *
+		 * Store (1, Arg0)             - Causes indirect store to local4
+		 * Store (1, Local0)           - Stores 1 in local0, overwriting
+		 *                                  the reference to local4
+		 * Store (1, de_refof (Local0)) - Causes indirect store to local4
+		 *
+		 * Weird, but true.
+		 */
+		if (type == ACPI_REFCLASS_ARG) {
+			/*
+			 * If we have a valid reference object that came from ref_of(),
+			 * do the indirect store
+			 */
+			if ((ACPI_GET_DESCRIPTOR_TYPE(current_obj_desc) ==
+			     ACPI_DESC_TYPE_OPERAND)
+			    && (current_obj_desc->common.type ==
+				ACPI_TYPE_LOCAL_REFERENCE)
+			    && (current_obj_desc->reference.class ==
+				ACPI_REFCLASS_REFOF)) {
+				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+						  "Arg (%p) is an ObjRef(Node), storing in node %p\n",
+						  new_obj_desc,
+						  current_obj_desc));
+
+				/*
+				 * Store this object to the Node (perform the indirect store)
+				 * NOTE: No implicit conversion is performed, as per the ACPI
+				 * specification rules on storing to Locals/Args.
+				 */
+				status =
+				    acpi_ex_store_object_to_node(new_obj_desc,
+								 current_obj_desc->
+								 reference.
+								 object,
+								 walk_state,
+								 ACPI_NO_IMPLICIT_CONVERSION);
+
+				/* Remove local reference if we copied the object above */
+
+				if (new_obj_desc != obj_desc) {
+					acpi_ut_remove_reference(new_obj_desc);
+				}
+				return_ACPI_STATUS(status);
+			}
+		}
+
+		/* Delete the existing object before storing the new one */
+
+		acpi_ds_method_data_delete_value(type, index, walk_state);
+	}
+
+	/*
+	 * Install the Obj descriptor (*new_obj_desc) into
+	 * the descriptor for the Arg or Local.
+	 * (increments the object reference count by one)
+	 */
+	status =
+	    acpi_ds_method_data_set_value(type, index, new_obj_desc,
+					  walk_state);
+
+	/* Remove local reference if we copied the object above */
+
+	if (new_obj_desc != obj_desc) {
+		acpi_ut_remove_reference(new_obj_desc);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_method_data_get_type
+ *
+ * PARAMETERS:  Opcode              - Either AML_LOCAL_OP or AML_ARG_OP
+ *              Index               - Which Local or Arg whose type to get
+ *              walk_state          - Current walk state object
+ *
+ * RETURN:      Data type of current value of the selected Arg or Local
+ *
+ * DESCRIPTION: Get the type of the object stored in the Local or Arg
+ *
+ ******************************************************************************/
+
+acpi_object_type
+acpi_ds_method_data_get_type(u16 opcode,
+			     u32 index, struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+	union acpi_operand_object *object;
+
+	ACPI_FUNCTION_TRACE(ds_method_data_get_type);
+
+	/* Get the namespace node for the arg/local */
+
+	status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node);
+	if (ACPI_FAILURE(status)) {
+		return_VALUE((ACPI_TYPE_NOT_FOUND));
+	}
+
+	/* Get the object */
+
+	object = acpi_ns_get_attached_object(node);
+	if (!object) {
+
+		/* Uninitialized local/arg, return TYPE_ANY */
+
+		return_VALUE(ACPI_TYPE_ANY);
+	}
+
+	/* Get the object type */
+
+	return_VALUE(ACPI_GET_OBJECT_TYPE(object));
+}
+#endif
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
new file mode 100644
index 0000000..15c628e
--- /dev/null
+++ b/drivers/acpi/acpica/dsobject.c
@@ -0,0 +1,813 @@
+/******************************************************************************
+ *
+ * Module Name: dsobject - Dispatcher object management routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsobject")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
+			      union acpi_parse_object *op,
+			      union acpi_operand_object **obj_desc_ptr);
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_build_internal_object
+ *
+ * PARAMETERS:  walk_state      - Current walk state
+ *              Op              - Parser object to be translated
+ *              obj_desc_ptr    - Where the ACPI internal object is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Translate a parser Op object to the equivalent namespace object
+ *              Simple objects are any objects other than a package object!
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
+			      union acpi_parse_object *op,
+			      union acpi_operand_object **obj_desc_ptr)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ds_build_internal_object);
+
+	*obj_desc_ptr = NULL;
+	if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
+		/*
+		 * This is a named object reference. If this name was
+		 * previously looked up in the namespace, it was stored in this op.
+		 * Otherwise, go ahead and look it up now
+		 */
+		if (!op->common.node) {
+			status = acpi_ns_lookup(walk_state->scope_info,
+						op->common.value.string,
+						ACPI_TYPE_ANY,
+						ACPI_IMODE_EXECUTE,
+						ACPI_NS_SEARCH_PARENT |
+						ACPI_NS_DONT_OPEN_SCOPE, NULL,
+						ACPI_CAST_INDIRECT_PTR(struct
+								       acpi_namespace_node,
+								       &(op->
+									 common.
+									 node)));
+			if (ACPI_FAILURE(status)) {
+
+				/* Check if we are resolving a named reference within a package */
+
+				if ((status == AE_NOT_FOUND)
+				    && (acpi_gbl_enable_interpreter_slack)
+				    &&
+				    ((op->common.parent->common.aml_opcode ==
+				      AML_PACKAGE_OP)
+				     || (op->common.parent->common.aml_opcode ==
+					 AML_VAR_PACKAGE_OP))) {
+					/*
+					 * We didn't find the target and we are populating elements
+					 * of a package - ignore if slack enabled. Some ASL code
+					 * contains dangling invalid references in packages and
+					 * expects that no exception will be issued. Leave the
+					 * element as a null element. It cannot be used, but it
+					 * can be overwritten by subsequent ASL code - this is
+					 * typically the case.
+					 */
+					ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+							  "Ignoring unresolved reference in package [%4.4s]\n",
+							  walk_state->
+							  scope_info->scope.
+							  node->name.ascii));
+
+					return_ACPI_STATUS(AE_OK);
+				} else {
+					ACPI_ERROR_NAMESPACE(op->common.value.
+							     string, status);
+				}
+
+				return_ACPI_STATUS(status);
+			}
+		}
+
+		/* Special object resolution for elements of a package */
+
+		if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
+		    (op->common.parent->common.aml_opcode ==
+		     AML_VAR_PACKAGE_OP)) {
+			/*
+			 * Attempt to resolve the node to a value before we insert it into
+			 * the package. If this is a reference to a common data type,
+			 * resolve it immediately. According to the ACPI spec, package
+			 * elements can only be "data objects" or method references.
+			 * Attempt to resolve to an Integer, Buffer, String or Package.
+			 * If cannot, return the named reference (for things like Devices,
+			 * Methods, etc.) Buffer Fields and Fields will resolve to simple
+			 * objects (int/buf/str/pkg).
+			 *
+			 * NOTE: References to things like Devices, Methods, Mutexes, etc.
+			 * will remain as named references. This behavior is not described
+			 * in the ACPI spec, but it appears to be an oversight.
+			 */
+			obj_desc =
+			    ACPI_CAST_PTR(union acpi_operand_object,
+					  op->common.node);
+
+			status =
+			    acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
+							  (struct
+							   acpi_namespace_node,
+							   &obj_desc),
+							  walk_state);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+
+			switch (op->common.node->type) {
+				/*
+				 * For these types, we need the actual node, not the subobject.
+				 * However, the subobject did not get an extra reference count above.
+				 *
+				 * TBD: should ex_resolve_node_to_value be changed to fix this?
+				 */
+			case ACPI_TYPE_DEVICE:
+			case ACPI_TYPE_THERMAL:
+
+				acpi_ut_add_reference(op->common.node->object);
+
+				/*lint -fallthrough */
+				/*
+				 * For these types, we need the actual node, not the subobject.
+				 * The subobject got an extra reference count in ex_resolve_node_to_value.
+				 */
+			case ACPI_TYPE_MUTEX:
+			case ACPI_TYPE_METHOD:
+			case ACPI_TYPE_POWER:
+			case ACPI_TYPE_PROCESSOR:
+			case ACPI_TYPE_EVENT:
+			case ACPI_TYPE_REGION:
+
+				/* We will create a reference object for these types below */
+				break;
+
+			default:
+				/*
+				 * All other types - the node was resolved to an actual
+				 * object, we are done.
+				 */
+				goto exit;
+			}
+		}
+	}
+
+	/* Create and init a new internal ACPI object */
+
+	obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
+						   (op->common.aml_opcode))->
+						  object_type);
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	status =
+	    acpi_ds_init_object_from_op(walk_state, op, op->common.aml_opcode,
+					&obj_desc);
+	if (ACPI_FAILURE(status)) {
+		acpi_ut_remove_reference(obj_desc);
+		return_ACPI_STATUS(status);
+	}
+
+      exit:
+	*obj_desc_ptr = obj_desc;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_build_internal_buffer_obj
+ *
+ * PARAMETERS:  walk_state      - Current walk state
+ *              Op              - Parser object to be translated
+ *              buffer_length   - Length of the buffer
+ *              obj_desc_ptr    - Where the ACPI internal object is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Translate a parser Op package object to the equivalent
+ *              namespace object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
+				  union acpi_parse_object *op,
+				  u32 buffer_length,
+				  union acpi_operand_object **obj_desc_ptr)
+{
+	union acpi_parse_object *arg;
+	union acpi_operand_object *obj_desc;
+	union acpi_parse_object *byte_list;
+	u32 byte_list_length = 0;
+
+	ACPI_FUNCTION_TRACE(ds_build_internal_buffer_obj);
+
+	/*
+	 * If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
+	 * The buffer object already exists (from the NS node), otherwise it must
+	 * be created.
+	 */
+	obj_desc = *obj_desc_ptr;
+	if (!obj_desc) {
+
+		/* Create a new buffer object */
+
+		obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
+		*obj_desc_ptr = obj_desc;
+		if (!obj_desc) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+	}
+
+	/*
+	 * Second arg is the buffer data (optional) byte_list can be either
+	 * individual bytes or a string initializer.  In either case, a
+	 * byte_list appears in the AML.
+	 */
+	arg = op->common.value.arg;	/* skip first arg */
+
+	byte_list = arg->named.next;
+	if (byte_list) {
+		if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
+			ACPI_ERROR((AE_INFO,
+				    "Expecting bytelist, got AML opcode %X in op %p",
+				    byte_list->common.aml_opcode, byte_list));
+
+			acpi_ut_remove_reference(obj_desc);
+			return (AE_TYPE);
+		}
+
+		byte_list_length = (u32) byte_list->common.value.integer;
+	}
+
+	/*
+	 * The buffer length (number of bytes) will be the larger of:
+	 * 1) The specified buffer length and
+	 * 2) The length of the initializer byte list
+	 */
+	obj_desc->buffer.length = buffer_length;
+	if (byte_list_length > buffer_length) {
+		obj_desc->buffer.length = byte_list_length;
+	}
+
+	/* Allocate the buffer */
+
+	if (obj_desc->buffer.length == 0) {
+		obj_desc->buffer.pointer = NULL;
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Buffer defined with zero length in AML, creating\n"));
+	} else {
+		obj_desc->buffer.pointer =
+		    ACPI_ALLOCATE_ZEROED(obj_desc->buffer.length);
+		if (!obj_desc->buffer.pointer) {
+			acpi_ut_delete_object_desc(obj_desc);
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		/* Initialize buffer from the byte_list (if present) */
+
+		if (byte_list) {
+			ACPI_MEMCPY(obj_desc->buffer.pointer,
+				    byte_list->named.data, byte_list_length);
+		}
+	}
+
+	obj_desc->buffer.flags |= AOPOBJ_DATA_VALID;
+	op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_build_internal_package_obj
+ *
+ * PARAMETERS:  walk_state      - Current walk state
+ *              Op              - Parser object to be translated
+ *              element_count   - Number of elements in the package - this is
+ *                                the num_elements argument to Package()
+ *              obj_desc_ptr    - Where the ACPI internal object is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Translate a parser Op package object to the equivalent
+ *              namespace object
+ *
+ * NOTE: The number of elements in the package will be always be the num_elements
+ * count, regardless of the number of elements in the package list. If
+ * num_elements is smaller, only that many package list elements are used.
+ * if num_elements is larger, the Package object is padded out with
+ * objects of type Uninitialized (as per ACPI spec.)
+ *
+ * Even though the ASL compilers do not allow num_elements to be smaller
+ * than the Package list length (for the fixed length package opcode), some
+ * BIOS code modifies the AML on the fly to adjust the num_elements, and
+ * this code compensates for that. This also provides compatibility with
+ * other AML interpreters.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
+				   union acpi_parse_object *op,
+				   u32 element_count,
+				   union acpi_operand_object **obj_desc_ptr)
+{
+	union acpi_parse_object *arg;
+	union acpi_parse_object *parent;
+	union acpi_operand_object *obj_desc = NULL;
+	acpi_status status = AE_OK;
+	unsigned i;
+	u16 index;
+	u16 reference_count;
+
+	ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
+
+	/* Find the parent of a possibly nested package */
+
+	parent = op->common.parent;
+	while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
+	       (parent->common.aml_opcode == AML_VAR_PACKAGE_OP)) {
+		parent = parent->common.parent;
+	}
+
+	/*
+	 * If we are evaluating a Named package object "Name (xxxx, Package)",
+	 * the package object already exists, otherwise it must be created.
+	 */
+	obj_desc = *obj_desc_ptr;
+	if (!obj_desc) {
+		obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
+		*obj_desc_ptr = obj_desc;
+		if (!obj_desc) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		obj_desc->package.node = parent->common.node;
+	}
+
+	/*
+	 * Allocate the element array (array of pointers to the individual
+	 * objects) based on the num_elements parameter. Add an extra pointer slot
+	 * so that the list is always null terminated.
+	 */
+	obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+							   element_count +
+							   1) * sizeof(void *));
+
+	if (!obj_desc->package.elements) {
+		acpi_ut_delete_object_desc(obj_desc);
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	obj_desc->package.count = element_count;
+
+	/*
+	 * Initialize the elements of the package, up to the num_elements count.
+	 * Package is automatically padded with uninitialized (NULL) elements
+	 * if num_elements is greater than the package list length. Likewise,
+	 * Package is truncated if num_elements is less than the list length.
+	 */
+	arg = op->common.value.arg;
+	arg = arg->common.next;
+	for (i = 0; arg && (i < element_count); i++) {
+		if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+			if (arg->common.node->type == ACPI_TYPE_METHOD) {
+				/*
+				 * A method reference "looks" to the parser to be a method
+				 * invocation, so we special case it here
+				 */
+				arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
+				status =
+				    acpi_ds_build_internal_object(walk_state,
+								  arg,
+								  &obj_desc->
+								  package.
+								  elements[i]);
+			} else {
+				/* This package element is already built, just get it */
+
+				obj_desc->package.elements[i] =
+				    ACPI_CAST_PTR(union acpi_operand_object,
+						  arg->common.node);
+			}
+		} else {
+			status = acpi_ds_build_internal_object(walk_state, arg,
+							       &obj_desc->
+							       package.
+							       elements[i]);
+		}
+
+		if (*obj_desc_ptr) {
+
+			/* Existing package, get existing reference count */
+
+			reference_count =
+			    (*obj_desc_ptr)->common.reference_count;
+			if (reference_count > 1) {
+
+				/* Make new element ref count match original ref count */
+
+				for (index = 0; index < (reference_count - 1);
+				     index++) {
+					acpi_ut_add_reference((obj_desc->
+							       package.
+							       elements[i]));
+				}
+			}
+		}
+
+		arg = arg->common.next;
+	}
+
+	/* Check for match between num_elements and actual length of package_list */
+
+	if (arg) {
+		/*
+		 * num_elements was exhausted, but there are remaining elements in the
+		 * package_list.
+		 *
+		 * Note: technically, this is an error, from ACPI spec: "It is an error
+		 * for NumElements to be less than the number of elements in the
+		 * PackageList". However, for now, we just print an error message and
+		 * no exception is returned.
+		 */
+		while (arg) {
+
+			/* Find out how many elements there really are */
+
+			i++;
+			arg = arg->common.next;
+		}
+
+		ACPI_WARNING((AE_INFO,
+			    "Package List length (%X) larger than NumElements count (%X), truncated\n",
+			    i, element_count));
+	} else if (i < element_count) {
+		/*
+		 * Arg list (elements) was exhausted, but we did not reach num_elements count.
+		 * Note: this is not an error, the package is padded out with NULLs.
+		 */
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "Package List length (%X) smaller than NumElements count (%X), padded with null elements\n",
+				  i, element_count));
+	}
+
+	obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+	op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_create_node
+ *
+ * PARAMETERS:  walk_state      - Current walk state
+ *              Node            - NS Node to be initialized
+ *              Op              - Parser object to be translated
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create the object to be associated with a namespace node
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_node(struct acpi_walk_state *walk_state,
+		    struct acpi_namespace_node *node,
+		    union acpi_parse_object *op)
+{
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_create_node, op);
+
+	/*
+	 * Because of the execution pass through the non-control-method
+	 * parts of the table, we can arrive here twice.  Only init
+	 * the named object node the first time through
+	 */
+	if (acpi_ns_get_attached_object(node)) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	if (!op->common.value.arg) {
+
+		/* No arguments, there is nothing to do */
+
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Build an internal object for the argument(s) */
+
+	status = acpi_ds_build_internal_object(walk_state, op->common.value.arg,
+					       &obj_desc);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Re-type the object according to its argument */
+
+	node->type = ACPI_GET_OBJECT_TYPE(obj_desc);
+
+	/* Attach obj to node */
+
+	status = acpi_ns_attach_object(node, obj_desc, node->type);
+
+	/* Remove local reference to the object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+#endif				/* ACPI_NO_METHOD_EXECUTION */
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_init_object_from_op
+ *
+ * PARAMETERS:  walk_state      - Current walk state
+ *              Op              - Parser op used to init the internal object
+ *              Opcode          - AML opcode associated with the object
+ *              ret_obj_desc    - Namespace object to be initialized
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize a namespace object from a parser Op and its
+ *              associated arguments.  The namespace object is a more compact
+ *              representation of the Op and its arguments.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
+			    union acpi_parse_object *op,
+			    u16 opcode,
+			    union acpi_operand_object **ret_obj_desc)
+{
+	const struct acpi_opcode_info *op_info;
+	union acpi_operand_object *obj_desc;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ds_init_object_from_op);
+
+	obj_desc = *ret_obj_desc;
+	op_info = acpi_ps_get_opcode_info(opcode);
+	if (op_info->class == AML_CLASS_UNKNOWN) {
+
+		/* Unknown opcode */
+
+		return_ACPI_STATUS(AE_TYPE);
+	}
+
+	/* Perform per-object initialization */
+
+	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+	case ACPI_TYPE_BUFFER:
+
+		/*
+		 * Defer evaluation of Buffer term_arg operand
+		 */
+		obj_desc->buffer.node =
+		    ACPI_CAST_PTR(struct acpi_namespace_node,
+				  walk_state->operands[0]);
+		obj_desc->buffer.aml_start = op->named.data;
+		obj_desc->buffer.aml_length = op->named.length;
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+
+		/*
+		 * Defer evaluation of Package term_arg operand
+		 */
+		obj_desc->package.node =
+		    ACPI_CAST_PTR(struct acpi_namespace_node,
+				  walk_state->operands[0]);
+		obj_desc->package.aml_start = op->named.data;
+		obj_desc->package.aml_length = op->named.length;
+		break;
+
+	case ACPI_TYPE_INTEGER:
+
+		switch (op_info->type) {
+		case AML_TYPE_CONSTANT:
+			/*
+			 * Resolve AML Constants here - AND ONLY HERE!
+			 * All constants are integers.
+			 * We mark the integer with a flag that indicates that it started
+			 * life as a constant -- so that stores to constants will perform
+			 * as expected (noop). zero_op is used as a placeholder for optional
+			 * target operands.
+			 */
+			obj_desc->common.flags = AOPOBJ_AML_CONSTANT;
+
+			switch (opcode) {
+			case AML_ZERO_OP:
+
+				obj_desc->integer.value = 0;
+				break;
+
+			case AML_ONE_OP:
+
+				obj_desc->integer.value = 1;
+				break;
+
+			case AML_ONES_OP:
+
+				obj_desc->integer.value = ACPI_INTEGER_MAX;
+
+				/* Truncate value if we are executing from a 32-bit ACPI table */
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+				acpi_ex_truncate_for32bit_table(obj_desc);
+#endif
+				break;
+
+			case AML_REVISION_OP:
+
+				obj_desc->integer.value = ACPI_CA_VERSION;
+				break;
+
+			default:
+
+				ACPI_ERROR((AE_INFO,
+					    "Unknown constant opcode %X",
+					    opcode));
+				status = AE_AML_OPERAND_TYPE;
+				break;
+			}
+			break;
+
+		case AML_TYPE_LITERAL:
+
+			obj_desc->integer.value = op->common.value.integer;
+#ifndef ACPI_NO_METHOD_EXECUTION
+			acpi_ex_truncate_for32bit_table(obj_desc);
+#endif
+			break;
+
+		default:
+			ACPI_ERROR((AE_INFO, "Unknown Integer type %X",
+				    op_info->type));
+			status = AE_AML_OPERAND_TYPE;
+			break;
+		}
+		break;
+
+	case ACPI_TYPE_STRING:
+
+		obj_desc->string.pointer = op->common.value.string;
+		obj_desc->string.length =
+		    (u32) ACPI_STRLEN(op->common.value.string);
+
+		/*
+		 * The string is contained in the ACPI table, don't ever try
+		 * to delete it
+		 */
+		obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
+		break;
+
+	case ACPI_TYPE_METHOD:
+		break;
+
+	case ACPI_TYPE_LOCAL_REFERENCE:
+
+		switch (op_info->type) {
+		case AML_TYPE_LOCAL_VARIABLE:
+
+			/* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */
+
+			obj_desc->reference.value = opcode - AML_LOCAL_OP;
+			obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+			status =
+			    acpi_ds_method_data_get_node(ACPI_REFCLASS_LOCAL,
+							 obj_desc->reference.
+							 value, walk_state,
+							 ACPI_CAST_INDIRECT_PTR
+							 (struct
+							  acpi_namespace_node,
+							  &obj_desc->reference.
+							  object));
+#endif
+			break;
+
+		case AML_TYPE_METHOD_ARGUMENT:
+
+			/* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */
+
+			obj_desc->reference.value = opcode - AML_ARG_OP;
+			obj_desc->reference.class = ACPI_REFCLASS_ARG;
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+			status = acpi_ds_method_data_get_node(ACPI_REFCLASS_ARG,
+							      obj_desc->
+							      reference.value,
+							      walk_state,
+							      ACPI_CAST_INDIRECT_PTR
+							      (struct
+							       acpi_namespace_node,
+							       &obj_desc->
+							       reference.
+							       object));
+#endif
+			break;
+
+		default:	/* Object name or Debug object */
+
+			switch (op->common.aml_opcode) {
+			case AML_INT_NAMEPATH_OP:
+
+				/* Node was saved in Op */
+
+				obj_desc->reference.node = op->common.node;
+				obj_desc->reference.object =
+				    op->common.node->object;
+				obj_desc->reference.class = ACPI_REFCLASS_NAME;
+				break;
+
+			case AML_DEBUG_OP:
+
+				obj_desc->reference.class = ACPI_REFCLASS_DEBUG;
+				break;
+
+			default:
+
+				ACPI_ERROR((AE_INFO,
+					    "Unimplemented reference type for AML opcode: %4.4X",
+					    opcode));
+				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+			}
+			break;
+		}
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unimplemented data type: %X",
+			    ACPI_GET_OBJECT_TYPE(obj_desc)));
+
+		status = AE_AML_OPERAND_TYPE;
+		break;
+	}
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
new file mode 100644
index 0000000..0c3b4dd
--- /dev/null
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -0,0 +1,1469 @@
+/******************************************************************************
+ *
+ * Module Name: dsopcode - Dispatcher Op Region support and handling of
+ *                         "control" opcodes
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+#include "acevents.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsopcode")
+
+/* Local prototypes */
+static acpi_status
+acpi_ds_execute_arguments(struct acpi_namespace_node *node,
+			  struct acpi_namespace_node *scope_node,
+			  u32 aml_length, u8 * aml_start);
+
+static acpi_status
+acpi_ds_init_buffer_field(u16 aml_opcode,
+			  union acpi_operand_object *obj_desc,
+			  union acpi_operand_object *buffer_desc,
+			  union acpi_operand_object *offset_desc,
+			  union acpi_operand_object *length_desc,
+			  union acpi_operand_object *result_desc);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_execute_arguments
+ *
+ * PARAMETERS:  Node                - Object NS node
+ *              scope_node          - Parent NS node
+ *              aml_length          - Length of executable AML
+ *              aml_start           - Pointer to the AML
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Late (deferred) execution of region or field arguments
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_execute_arguments(struct acpi_namespace_node *node,
+			  struct acpi_namespace_node *scope_node,
+			  u32 aml_length, u8 * aml_start)
+{
+	acpi_status status;
+	union acpi_parse_object *op;
+	struct acpi_walk_state *walk_state;
+
+	ACPI_FUNCTION_TRACE(ds_execute_arguments);
+
+	/*
+	 * Allocate a new parser op to be the root of the parsed tree
+	 */
+	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+	if (!op) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Save the Node for use in acpi_ps_parse_aml */
+
+	op->common.node = scope_node;
+
+	/* Create and initialize a new parser state */
+
+	walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
+	if (!walk_state) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
+				       aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
+	if (ACPI_FAILURE(status)) {
+		acpi_ds_delete_walk_state(walk_state);
+		goto cleanup;
+	}
+
+	/* Mark this parse as a deferred opcode */
+
+	walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
+	walk_state->deferred_node = node;
+
+	/* Pass1: Parse the entire declaration */
+
+	status = acpi_ps_parse_aml(walk_state);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/* Get and init the Op created above */
+
+	op->common.node = node;
+	acpi_ps_delete_parse_tree(op);
+
+	/* Evaluate the deferred arguments */
+
+	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+	if (!op) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	op->common.node = scope_node;
+
+	/* Create and initialize a new parser state */
+
+	walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
+	if (!walk_state) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	/* Execute the opcode and arguments */
+
+	status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
+				       aml_length, NULL, ACPI_IMODE_EXECUTE);
+	if (ACPI_FAILURE(status)) {
+		acpi_ds_delete_walk_state(walk_state);
+		goto cleanup;
+	}
+
+	/* Mark this execution as a deferred opcode */
+
+	walk_state->deferred_node = node;
+	status = acpi_ps_parse_aml(walk_state);
+
+      cleanup:
+	acpi_ps_delete_parse_tree(op);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_buffer_field_arguments
+ *
+ * PARAMETERS:  obj_desc        - A valid buffer_field object
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
+ *              evaluation of these field attributes.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
+{
+	union acpi_operand_object *extra_desc;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
+
+	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Get the AML pointer (method object) and buffer_field node */
+
+	extra_desc = acpi_ns_get_secondary_object(obj_desc);
+	node = obj_desc->buffer_field.node;
+
+	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+			(ACPI_TYPE_BUFFER_FIELD, node, NULL));
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
+			  acpi_ut_get_node_name(node)));
+
+	/* Execute the AML code for the term_arg arguments */
+
+	status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
+					   extra_desc->extra.aml_length,
+					   extra_desc->extra.aml_start);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_bank_field_arguments
+ *
+ * PARAMETERS:  obj_desc        - A valid bank_field object
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Get bank_field bank_value. This implements the late
+ *              evaluation of these field attributes.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
+{
+	union acpi_operand_object *extra_desc;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
+
+	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Get the AML pointer (method object) and bank_field node */
+
+	extra_desc = acpi_ns_get_secondary_object(obj_desc);
+	node = obj_desc->bank_field.node;
+
+	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+			(ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
+			  acpi_ut_get_node_name(node)));
+
+	/* Execute the AML code for the term_arg arguments */
+
+	status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
+					   extra_desc->extra.aml_length,
+					   extra_desc->extra.aml_start);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_buffer_arguments
+ *
+ * PARAMETERS:  obj_desc        - A valid Buffer object
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Get Buffer length and initializer byte list.  This implements
+ *              the late evaluation of these attributes.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
+
+	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Get the Buffer node */
+
+	node = obj_desc->buffer.node;
+	if (!node) {
+		ACPI_ERROR((AE_INFO,
+			    "No pointer back to NS node in buffer obj %p",
+			    obj_desc));
+		return_ACPI_STATUS(AE_AML_INTERNAL);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
+
+	/* Execute the AML code for the term_arg arguments */
+
+	status = acpi_ds_execute_arguments(node, node,
+					   obj_desc->buffer.aml_length,
+					   obj_desc->buffer.aml_start);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_package_arguments
+ *
+ * PARAMETERS:  obj_desc        - A valid Package object
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Get Package length and initializer byte list.  This implements
+ *              the late evaluation of these attributes.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
+
+	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Get the Package node */
+
+	node = obj_desc->package.node;
+	if (!node) {
+		ACPI_ERROR((AE_INFO,
+			    "No pointer back to NS node in package %p",
+			    obj_desc));
+		return_ACPI_STATUS(AE_AML_INTERNAL);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
+
+	/* Execute the AML code for the term_arg arguments */
+
+	status = acpi_ds_execute_arguments(node, node,
+					   obj_desc->package.aml_length,
+					   obj_desc->package.aml_start);
+	return_ACPI_STATUS(status);
+}
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_region_arguments
+ *
+ * PARAMETERS:  obj_desc        - A valid region object
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Get region address and length.  This implements the late
+ *              evaluation of these region attributes.
+ *
+ ****************************************************************************/
+
+acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+	union acpi_operand_object *extra_desc;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
+
+	if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	extra_desc = acpi_ns_get_secondary_object(obj_desc);
+	if (!extra_desc) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Get the Region node */
+
+	node = obj_desc->region.node;
+
+	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+			(ACPI_TYPE_REGION, node, NULL));
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
+			  acpi_ut_get_node_name(node),
+			  extra_desc->extra.aml_start));
+
+	/* Execute the argument AML */
+
+	status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
+					   extra_desc->extra.aml_length,
+					   extra_desc->extra.aml_start);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Validate the region address/length via the host OS */
+
+	status = acpi_os_validate_address(obj_desc->region.space_id,
+					  obj_desc->region.address,
+					  (acpi_size) obj_desc->region.length,
+					  acpi_ut_get_node_name(node));
+
+	if (ACPI_FAILURE(status)) {
+		/*
+		 * Invalid address/length. We will emit an error message and mark
+		 * the region as invalid, so that it will cause an additional error if
+		 * it is ever used. Then return AE_OK.
+		 */
+		ACPI_EXCEPTION((AE_INFO, status,
+				"During address validation of OpRegion [%4.4s]",
+				node->name.ascii));
+		obj_desc->common.flags |= AOPOBJ_INVALID;
+		status = AE_OK;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_initialize_region
+ *
+ * PARAMETERS:  obj_handle      - Region namespace node
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Front end to ev_initialize_region
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_initialize_region(acpi_handle obj_handle)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	obj_desc = acpi_ns_get_attached_object(obj_handle);
+
+	/* Namespace is NOT locked */
+
+	status = acpi_ev_initialize_region(obj_desc, FALSE);
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_init_buffer_field
+ *
+ * PARAMETERS:  aml_opcode      - create_xxx_field
+ *              obj_desc        - buffer_field object
+ *              buffer_desc     - Host Buffer
+ *              offset_desc     - Offset into buffer
+ *              length_desc     - Length of field (CREATE_FIELD_OP only)
+ *              result_desc     - Where to store the result
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform actual initialization of a buffer field
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_init_buffer_field(u16 aml_opcode,
+			  union acpi_operand_object *obj_desc,
+			  union acpi_operand_object *buffer_desc,
+			  union acpi_operand_object *offset_desc,
+			  union acpi_operand_object *length_desc,
+			  union acpi_operand_object *result_desc)
+{
+	u32 offset;
+	u32 bit_offset;
+	u32 bit_count;
+	u8 field_flags;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_init_buffer_field, obj_desc);
+
+	/* Host object must be a Buffer */
+
+	if (ACPI_GET_OBJECT_TYPE(buffer_desc) != ACPI_TYPE_BUFFER) {
+		ACPI_ERROR((AE_INFO,
+			    "Target of Create Field is not a Buffer object - %s",
+			    acpi_ut_get_object_type_name(buffer_desc)));
+
+		status = AE_AML_OPERAND_TYPE;
+		goto cleanup;
+	}
+
+	/*
+	 * The last parameter to all of these opcodes (result_desc) started
+	 * out as a name_string, and should therefore now be a NS node
+	 * after resolution in acpi_ex_resolve_operands().
+	 */
+	if (ACPI_GET_DESCRIPTOR_TYPE(result_desc) != ACPI_DESC_TYPE_NAMED) {
+		ACPI_ERROR((AE_INFO,
+			    "(%s) destination not a NS Node [%s]",
+			    acpi_ps_get_opcode_name(aml_opcode),
+			    acpi_ut_get_descriptor_name(result_desc)));
+
+		status = AE_AML_OPERAND_TYPE;
+		goto cleanup;
+	}
+
+	offset = (u32) offset_desc->integer.value;
+
+	/*
+	 * Setup the Bit offsets and counts, according to the opcode
+	 */
+	switch (aml_opcode) {
+	case AML_CREATE_FIELD_OP:
+
+		/* Offset is in bits, count is in bits */
+
+		field_flags = AML_FIELD_ACCESS_BYTE;
+		bit_offset = offset;
+		bit_count = (u32) length_desc->integer.value;
+
+		/* Must have a valid (>0) bit count */
+
+		if (bit_count == 0) {
+			ACPI_ERROR((AE_INFO,
+				    "Attempt to CreateField of length zero"));
+			status = AE_AML_OPERAND_VALUE;
+			goto cleanup;
+		}
+		break;
+
+	case AML_CREATE_BIT_FIELD_OP:
+
+		/* Offset is in bits, Field is one bit */
+
+		bit_offset = offset;
+		bit_count = 1;
+		field_flags = AML_FIELD_ACCESS_BYTE;
+		break;
+
+	case AML_CREATE_BYTE_FIELD_OP:
+
+		/* Offset is in bytes, field is one byte */
+
+		bit_offset = 8 * offset;
+		bit_count = 8;
+		field_flags = AML_FIELD_ACCESS_BYTE;
+		break;
+
+	case AML_CREATE_WORD_FIELD_OP:
+
+		/* Offset is in bytes, field is one word */
+
+		bit_offset = 8 * offset;
+		bit_count = 16;
+		field_flags = AML_FIELD_ACCESS_WORD;
+		break;
+
+	case AML_CREATE_DWORD_FIELD_OP:
+
+		/* Offset is in bytes, field is one dword */
+
+		bit_offset = 8 * offset;
+		bit_count = 32;
+		field_flags = AML_FIELD_ACCESS_DWORD;
+		break;
+
+	case AML_CREATE_QWORD_FIELD_OP:
+
+		/* Offset is in bytes, field is one qword */
+
+		bit_offset = 8 * offset;
+		bit_count = 64;
+		field_flags = AML_FIELD_ACCESS_QWORD;
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO,
+			    "Unknown field creation opcode %02x", aml_opcode));
+		status = AE_AML_BAD_OPCODE;
+		goto cleanup;
+	}
+
+	/* Entire field must fit within the current length of the buffer */
+
+	if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
+		ACPI_ERROR((AE_INFO,
+			    "Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)",
+			    acpi_ut_get_node_name(result_desc),
+			    bit_offset + bit_count,
+			    acpi_ut_get_node_name(buffer_desc->buffer.node),
+			    8 * (u32) buffer_desc->buffer.length));
+		status = AE_AML_BUFFER_LIMIT;
+		goto cleanup;
+	}
+
+	/*
+	 * Initialize areas of the field object that are common to all fields
+	 * For field_flags, use LOCK_RULE = 0 (NO_LOCK),
+	 * UPDATE_RULE = 0 (UPDATE_PRESERVE)
+	 */
+	status = acpi_ex_prep_common_field_object(obj_desc, field_flags, 0,
+						  bit_offset, bit_count);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	obj_desc->buffer_field.buffer_obj = buffer_desc;
+
+	/* Reference count for buffer_desc inherits obj_desc count */
+
+	buffer_desc->common.reference_count = (u16)
+	    (buffer_desc->common.reference_count +
+	     obj_desc->common.reference_count);
+
+      cleanup:
+
+	/* Always delete the operands */
+
+	acpi_ut_remove_reference(offset_desc);
+	acpi_ut_remove_reference(buffer_desc);
+
+	if (aml_opcode == AML_CREATE_FIELD_OP) {
+		acpi_ut_remove_reference(length_desc);
+	}
+
+	/* On failure, delete the result descriptor */
+
+	if (ACPI_FAILURE(status)) {
+		acpi_ut_remove_reference(result_desc);	/* Result descriptor */
+	} else {
+		/* Now the address and length are valid for this buffer_field */
+
+		obj_desc->buffer_field.flags |= AOPOBJ_DATA_VALID;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_eval_buffer_field_operands
+ *
+ * PARAMETERS:  walk_state      - Current walk
+ *              Op              - A valid buffer_field Op object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get buffer_field Buffer and Index
+ *              Called from acpi_ds_exec_end_op during buffer_field parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
+				   union acpi_parse_object *op)
+{
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+	struct acpi_namespace_node *node;
+	union acpi_parse_object *next_op;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_eval_buffer_field_operands, op);
+
+	/*
+	 * This is where we evaluate the address and length fields of the
+	 * create_xxx_field declaration
+	 */
+	node = op->common.node;
+
+	/* next_op points to the op that holds the Buffer */
+
+	next_op = op->common.value.arg;
+
+	/* Evaluate/create the address and length operands */
+
+	status = acpi_ds_create_operands(walk_state, next_op);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Resolve the operands */
+
+	status = acpi_ex_resolve_operands(op->common.aml_opcode,
+					  ACPI_WALK_OPERANDS, walk_state);
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
+			    acpi_ps_get_opcode_name(op->common.aml_opcode),
+			    status));
+
+		return_ACPI_STATUS(status);
+	}
+
+	/* Initialize the Buffer Field */
+
+	if (op->common.aml_opcode == AML_CREATE_FIELD_OP) {
+
+		/* NOTE: Slightly different operands for this opcode */
+
+		status =
+		    acpi_ds_init_buffer_field(op->common.aml_opcode, obj_desc,
+					      walk_state->operands[0],
+					      walk_state->operands[1],
+					      walk_state->operands[2],
+					      walk_state->operands[3]);
+	} else {
+		/* All other, create_xxx_field opcodes */
+
+		status =
+		    acpi_ds_init_buffer_field(op->common.aml_opcode, obj_desc,
+					      walk_state->operands[0],
+					      walk_state->operands[1], NULL,
+					      walk_state->operands[2]);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_eval_region_operands
+ *
+ * PARAMETERS:  walk_state      - Current walk
+ *              Op              - A valid region Op object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get region address and length
+ *              Called from acpi_ds_exec_end_op during op_region parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
+			     union acpi_parse_object *op)
+{
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *operand_desc;
+	struct acpi_namespace_node *node;
+	union acpi_parse_object *next_op;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_eval_region_operands, op);
+
+	/*
+	 * This is where we evaluate the address and length fields of the
+	 * op_region declaration
+	 */
+	node = op->common.node;
+
+	/* next_op points to the op that holds the space_iD */
+
+	next_op = op->common.value.arg;
+
+	/* next_op points to address op */
+
+	next_op = next_op->common.next;
+
+	/* Evaluate/create the address and length operands */
+
+	status = acpi_ds_create_operands(walk_state, next_op);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Resolve the length and address operands to numbers */
+
+	status = acpi_ex_resolve_operands(op->common.aml_opcode,
+					  ACPI_WALK_OPERANDS, walk_state);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/*
+	 * Get the length operand and save it
+	 * (at Top of stack)
+	 */
+	operand_desc = walk_state->operands[walk_state->num_operands - 1];
+
+	obj_desc->region.length = (u32) operand_desc->integer.value;
+	acpi_ut_remove_reference(operand_desc);
+
+	/*
+	 * Get the address and save it
+	 * (at top of stack - 1)
+	 */
+	operand_desc = walk_state->operands[walk_state->num_operands - 2];
+
+	obj_desc->region.address = (acpi_physical_address)
+	    operand_desc->integer.value;
+	acpi_ut_remove_reference(operand_desc);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
+			  obj_desc,
+			  ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+			  obj_desc->region.length));
+
+	/* Now the address and length are valid for this opregion */
+
+	obj_desc->region.flags |= AOPOBJ_DATA_VALID;
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_eval_table_region_operands
+ *
+ * PARAMETERS:  walk_state      - Current walk
+ *              Op              - A valid region Op object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get region address and length
+ *              Called from acpi_ds_exec_end_op during data_table_region parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
+				   union acpi_parse_object *op)
+{
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object **operand;
+	struct acpi_namespace_node *node;
+	union acpi_parse_object *next_op;
+	u32 table_index;
+	struct acpi_table_header *table;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
+
+	/*
+	 * This is where we evaluate the signature_string and oem_iDString
+	 * and oem_table_iDString of the data_table_region declaration
+	 */
+	node = op->common.node;
+
+	/* next_op points to signature_string op */
+
+	next_op = op->common.value.arg;
+
+	/*
+	 * Evaluate/create the signature_string and oem_iDString
+	 * and oem_table_iDString operands
+	 */
+	status = acpi_ds_create_operands(walk_state, next_op);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Resolve the signature_string and oem_iDString
+	 * and oem_table_iDString operands
+	 */
+	status = acpi_ex_resolve_operands(op->common.aml_opcode,
+					  ACPI_WALK_OPERANDS, walk_state);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	operand = &walk_state->operands[0];
+
+	/* Find the ACPI table */
+
+	status = acpi_tb_find_table(operand[0]->string.pointer,
+				    operand[1]->string.pointer,
+				    operand[2]->string.pointer, &table_index);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	acpi_ut_remove_reference(operand[0]);
+	acpi_ut_remove_reference(operand[1]);
+	acpi_ut_remove_reference(operand[2]);
+
+	status = acpi_get_table_by_index(table_index, &table);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	obj_desc->region.address =
+	    (acpi_physical_address) ACPI_TO_INTEGER(table);
+	obj_desc->region.length = table->length;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
+			  obj_desc,
+			  ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+			  obj_desc->region.length));
+
+	/* Now the address and length are valid for this opregion */
+
+	obj_desc->region.flags |= AOPOBJ_DATA_VALID;
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_eval_data_object_operands
+ *
+ * PARAMETERS:  walk_state      - Current walk
+ *              Op              - A valid data_object Op object
+ *              obj_desc        - data_object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get the operands and complete the following data object types:
+ *              Buffer, Package.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
+				  union acpi_parse_object *op,
+				  union acpi_operand_object *obj_desc)
+{
+	acpi_status status;
+	union acpi_operand_object *arg_desc;
+	u32 length;
+
+	ACPI_FUNCTION_TRACE(ds_eval_data_object_operands);
+
+	/* The first operand (for all of these data objects) is the length */
+
+	/*
+	 * Set proper index into operand stack for acpi_ds_obj_stack_push
+	 * invoked inside acpi_ds_create_operand.
+	 */
+	walk_state->operand_index = walk_state->num_operands;
+
+	status = acpi_ds_create_operand(walk_state, op->common.value.arg, 1);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_ex_resolve_operands(walk_state->opcode,
+					  &(walk_state->
+					    operands[walk_state->num_operands -
+						     1]), walk_state);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Extract length operand */
+
+	arg_desc = walk_state->operands[walk_state->num_operands - 1];
+	length = (u32) arg_desc->integer.value;
+
+	/* Cleanup for length operand */
+
+	status = acpi_ds_obj_stack_pop(1, walk_state);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	acpi_ut_remove_reference(arg_desc);
+
+	/*
+	 * Create the actual data object
+	 */
+	switch (op->common.aml_opcode) {
+	case AML_BUFFER_OP:
+
+		status =
+		    acpi_ds_build_internal_buffer_obj(walk_state, op, length,
+						      &obj_desc);
+		break;
+
+	case AML_PACKAGE_OP:
+	case AML_VAR_PACKAGE_OP:
+
+		status =
+		    acpi_ds_build_internal_package_obj(walk_state, op, length,
+						       &obj_desc);
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_AML_BAD_OPCODE);
+	}
+
+	if (ACPI_SUCCESS(status)) {
+		/*
+		 * Return the object in the walk_state, unless the parent is a package -
+		 * in this case, the return object will be stored in the parse tree
+		 * for the package.
+		 */
+		if ((!op->common.parent) ||
+		    ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) &&
+		     (op->common.parent->common.aml_opcode !=
+		      AML_VAR_PACKAGE_OP)
+		     && (op->common.parent->common.aml_opcode != AML_NAME_OP))) {
+			walk_state->result_obj = obj_desc;
+		}
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_eval_bank_field_operands
+ *
+ * PARAMETERS:  walk_state      - Current walk
+ *              Op              - A valid bank_field Op object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get bank_field bank_value
+ *              Called from acpi_ds_exec_end_op during bank_field parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
+				 union acpi_parse_object *op)
+{
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *operand_desc;
+	struct acpi_namespace_node *node;
+	union acpi_parse_object *next_op;
+	union acpi_parse_object *arg;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_eval_bank_field_operands, op);
+
+	/*
+	 * This is where we evaluate the bank_value field of the
+	 * bank_field declaration
+	 */
+
+	/* next_op points to the op that holds the Region */
+
+	next_op = op->common.value.arg;
+
+	/* next_op points to the op that holds the Bank Register */
+
+	next_op = next_op->common.next;
+
+	/* next_op points to the op that holds the Bank Value */
+
+	next_op = next_op->common.next;
+
+	/*
+	 * Set proper index into operand stack for acpi_ds_obj_stack_push
+	 * invoked inside acpi_ds_create_operand.
+	 *
+	 * We use walk_state->Operands[0] to store the evaluated bank_value
+	 */
+	walk_state->operand_index = 0;
+
+	status = acpi_ds_create_operand(walk_state, next_op, 0);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_ex_resolve_to_value(&walk_state->operands[0], walk_state);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS,
+			   acpi_ps_get_opcode_name(op->common.aml_opcode), 1);
+	/*
+	 * Get the bank_value operand and save it
+	 * (at Top of stack)
+	 */
+	operand_desc = walk_state->operands[0];
+
+	/* Arg points to the start Bank Field */
+
+	arg = acpi_ps_get_arg(op, 4);
+	while (arg) {
+
+		/* Ignore OFFSET and ACCESSAS terms here */
+
+		if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
+			node = arg->common.node;
+
+			obj_desc = acpi_ns_get_attached_object(node);
+			if (!obj_desc) {
+				return_ACPI_STATUS(AE_NOT_EXIST);
+			}
+
+			obj_desc->bank_field.value =
+			    (u32) operand_desc->integer.value;
+		}
+
+		/* Move to next field in the list */
+
+		arg = arg->common.next;
+	}
+
+	acpi_ut_remove_reference(operand_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_exec_begin_control_op
+ *
+ * PARAMETERS:  walk_list       - The list that owns the walk stack
+ *              Op              - The control Op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Handles all control ops encountered during control method
+ *              execution.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
+			      union acpi_parse_object *op)
+{
+	acpi_status status = AE_OK;
+	union acpi_generic_state *control_state;
+
+	ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op,
+			  op->common.aml_opcode, walk_state));
+
+	switch (op->common.aml_opcode) {
+	case AML_WHILE_OP:
+
+		/*
+		 * If this is an additional iteration of a while loop, continue.
+		 * There is no need to allocate a new control state.
+		 */
+		if (walk_state->control_state) {
+			if (walk_state->control_state->control.aml_predicate_start
+				== (walk_state->parser_state.aml - 1)) {
+
+				/* Reset the state to start-of-loop */
+
+				walk_state->control_state->common.state =
+				    ACPI_CONTROL_CONDITIONAL_EXECUTING;
+				break;
+			}
+		}
+
+		/*lint -fallthrough */
+
+	case AML_IF_OP:
+
+		/*
+		 * IF/WHILE: Create a new control state to manage these
+		 * constructs. We need to manage these as a stack, in order
+		 * to handle nesting.
+		 */
+		control_state = acpi_ut_create_control_state();
+		if (!control_state) {
+			status = AE_NO_MEMORY;
+			break;
+		}
+		/*
+		 * Save a pointer to the predicate for multiple executions
+		 * of a loop
+		 */
+		control_state->control.aml_predicate_start =
+		    walk_state->parser_state.aml - 1;
+		control_state->control.package_end =
+		    walk_state->parser_state.pkg_end;
+		control_state->control.opcode = op->common.aml_opcode;
+
+		/* Push the control state on this walk's control stack */
+
+		acpi_ut_push_generic_state(&walk_state->control_state,
+					   control_state);
+		break;
+
+	case AML_ELSE_OP:
+
+		/* Predicate is in the state object */
+		/* If predicate is true, the IF was executed, ignore ELSE part */
+
+		if (walk_state->last_predicate) {
+			status = AE_CTRL_TRUE;
+		}
+
+		break;
+
+	case AML_RETURN_OP:
+
+		break;
+
+	default:
+		break;
+	}
+
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_exec_end_control_op
+ *
+ * PARAMETERS:  walk_list       - The list that owns the walk stack
+ *              Op              - The control Op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Handles all control ops encountered during control method
+ *              execution.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
+			    union acpi_parse_object * op)
+{
+	acpi_status status = AE_OK;
+	union acpi_generic_state *control_state;
+
+	ACPI_FUNCTION_NAME(ds_exec_end_control_op);
+
+	switch (op->common.aml_opcode) {
+	case AML_IF_OP:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op));
+
+		/*
+		 * Save the result of the predicate in case there is an
+		 * ELSE to come
+		 */
+		walk_state->last_predicate =
+		    (u8) walk_state->control_state->common.value;
+
+		/*
+		 * Pop the control state that was created at the start
+		 * of the IF and free it
+		 */
+		control_state =
+		    acpi_ut_pop_generic_state(&walk_state->control_state);
+		acpi_ut_delete_generic_state(control_state);
+		break;
+
+	case AML_ELSE_OP:
+
+		break;
+
+	case AML_WHILE_OP:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
+
+		control_state = walk_state->control_state;
+		if (control_state->common.value) {
+
+			/* Predicate was true, the body of the loop was just executed */
+
+			/*
+			 * This loop counter mechanism allows the interpreter to escape
+			 * possibly infinite loops. This can occur in poorly written AML
+			 * when the hardware does not respond within a while loop and the
+			 * loop does not implement a timeout.
+			 */
+			control_state->control.loop_count++;
+			if (control_state->control.loop_count >
+				ACPI_MAX_LOOP_ITERATIONS) {
+				status = AE_AML_INFINITE_LOOP;
+				break;
+			}
+
+			/*
+			 * Go back and evaluate the predicate and maybe execute the loop
+			 * another time
+			 */
+			status = AE_CTRL_PENDING;
+			walk_state->aml_last_while =
+			    control_state->control.aml_predicate_start;
+			break;
+		}
+
+		/* Predicate was false, terminate this while loop */
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "[WHILE_OP] termination! Op=%p\n", op));
+
+		/* Pop this control state and free it */
+
+		control_state =
+		    acpi_ut_pop_generic_state(&walk_state->control_state);
+		acpi_ut_delete_generic_state(control_state);
+		break;
+
+	case AML_RETURN_OP:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "[RETURN_OP] Op=%p Arg=%p\n", op,
+				  op->common.value.arg));
+
+		/*
+		 * One optional operand -- the return value
+		 * It can be either an immediate operand or a result that
+		 * has been bubbled up the tree
+		 */
+		if (op->common.value.arg) {
+
+			/* Since we have a real Return(), delete any implicit return */
+
+			acpi_ds_clear_implicit_return(walk_state);
+
+			/* Return statement has an immediate operand */
+
+			status =
+			    acpi_ds_create_operands(walk_state,
+						    op->common.value.arg);
+			if (ACPI_FAILURE(status)) {
+				return (status);
+			}
+
+			/*
+			 * If value being returned is a Reference (such as
+			 * an arg or local), resolve it now because it may
+			 * cease to exist at the end of the method.
+			 */
+			status =
+			    acpi_ex_resolve_to_value(&walk_state->operands[0],
+						     walk_state);
+			if (ACPI_FAILURE(status)) {
+				return (status);
+			}
+
+			/*
+			 * Get the return value and save as the last result
+			 * value.  This is the only place where walk_state->return_desc
+			 * is set to anything other than zero!
+			 */
+			walk_state->return_desc = walk_state->operands[0];
+		} else if (walk_state->result_count) {
+
+			/* Since we have a real Return(), delete any implicit return */
+
+			acpi_ds_clear_implicit_return(walk_state);
+
+			/*
+			 * The return value has come from a previous calculation.
+			 *
+			 * If value being returned is a Reference (such as
+			 * an arg or local), resolve it now because it may
+			 * cease to exist at the end of the method.
+			 *
+			 * Allow references created by the Index operator to return unchanged.
+			 */
+			if ((ACPI_GET_DESCRIPTOR_TYPE
+			     (walk_state->results->results.obj_desc[0]) ==
+			     ACPI_DESC_TYPE_OPERAND)
+			    &&
+			    (ACPI_GET_OBJECT_TYPE
+			     (walk_state->results->results.obj_desc[0]) ==
+			     ACPI_TYPE_LOCAL_REFERENCE)
+			    && ((walk_state->results->results.obj_desc[0])->
+				reference.class != ACPI_REFCLASS_INDEX)) {
+				status =
+				    acpi_ex_resolve_to_value(&walk_state->
+							     results->results.
+							     obj_desc[0],
+							     walk_state);
+				if (ACPI_FAILURE(status)) {
+					return (status);
+				}
+			}
+
+			walk_state->return_desc =
+			    walk_state->results->results.obj_desc[0];
+		} else {
+			/* No return operand */
+
+			if (walk_state->num_operands) {
+				acpi_ut_remove_reference(walk_state->
+							 operands[0]);
+			}
+
+			walk_state->operands[0] = NULL;
+			walk_state->num_operands = 0;
+			walk_state->return_desc = NULL;
+		}
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "Completed RETURN_OP State=%p, RetVal=%p\n",
+				  walk_state, walk_state->return_desc));
+
+		/* End the control method execution right now */
+
+		status = AE_CTRL_TERMINATE;
+		break;
+
+	case AML_NOOP_OP:
+
+		/* Just do nothing! */
+		break;
+
+	case AML_BREAK_POINT_OP:
+
+		/* Call up to the OS service layer to handle this */
+
+		status =
+		    acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
+				   "Executed AML Breakpoint opcode");
+
+		/* If and when it returns, all done. */
+
+		break;
+
+	case AML_BREAK_OP:
+	case AML_CONTINUE_OP:	/* ACPI 2.0 */
+
+		/* Pop and delete control states until we find a while */
+
+		while (walk_state->control_state &&
+		       (walk_state->control_state->control.opcode !=
+			AML_WHILE_OP)) {
+			control_state =
+			    acpi_ut_pop_generic_state(&walk_state->
+						      control_state);
+			acpi_ut_delete_generic_state(control_state);
+		}
+
+		/* No while found? */
+
+		if (!walk_state->control_state) {
+			return (AE_AML_NO_WHILE);
+		}
+
+		/* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */
+
+		walk_state->aml_last_while =
+		    walk_state->control_state->control.package_end;
+
+		/* Return status depending on opcode */
+
+		if (op->common.aml_opcode == AML_BREAK_OP) {
+			status = AE_CTRL_BREAK;
+		} else {
+			status = AE_CTRL_CONTINUE;
+		}
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p",
+			    op->common.aml_opcode, op));
+
+		status = AE_AML_BAD_OPCODE;
+		break;
+	}
+
+	return (status);
+}
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
new file mode 100644
index 0000000..dabc23a
--- /dev/null
+++ b/drivers/acpi/acpica/dsutils.c
@@ -0,0 +1,869 @@
+/*******************************************************************************
+ *
+ * Module Name: dsutils - Dispatcher utilities
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+#include "acdebug.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsutils")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_clear_implicit_return
+ *
+ * PARAMETERS:  walk_state          - Current State
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Clear and remove a reference on an implicit return value.  Used
+ *              to delete "stale" return values (if enabled, the return value
+ *              from every operator is saved at least momentarily, in case the
+ *              parent method exits.)
+ *
+ ******************************************************************************/
+void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state)
+{
+	ACPI_FUNCTION_NAME(ds_clear_implicit_return);
+
+	/*
+	 * Slack must be enabled for this feature
+	 */
+	if (!acpi_gbl_enable_interpreter_slack) {
+		return;
+	}
+
+	if (walk_state->implicit_return_obj) {
+		/*
+		 * Delete any "stale" implicit return. However, in
+		 * complex statements, the implicit return value can be
+		 * bubbled up several levels.
+		 */
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "Removing reference on stale implicit return obj %p\n",
+				  walk_state->implicit_return_obj));
+
+		acpi_ut_remove_reference(walk_state->implicit_return_obj);
+		walk_state->implicit_return_obj = NULL;
+	}
+}
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_do_implicit_return
+ *
+ * PARAMETERS:  return_desc         - The return value
+ *              walk_state          - Current State
+ *              add_reference       - True if a reference should be added to the
+ *                                    return object
+ *
+ * RETURN:      TRUE if implicit return enabled, FALSE otherwise
+ *
+ * DESCRIPTION: Implements the optional "implicit return".  We save the result
+ *              of every ASL operator and control method invocation in case the
+ *              parent method exit.  Before storing a new return value, we
+ *              delete the previous return value.
+ *
+ ******************************************************************************/
+
+u8
+acpi_ds_do_implicit_return(union acpi_operand_object *return_desc,
+			   struct acpi_walk_state *walk_state, u8 add_reference)
+{
+	ACPI_FUNCTION_NAME(ds_do_implicit_return);
+
+	/*
+	 * Slack must be enabled for this feature, and we must
+	 * have a valid return object
+	 */
+	if ((!acpi_gbl_enable_interpreter_slack) || (!return_desc)) {
+		return (FALSE);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "Result %p will be implicitly returned; Prev=%p\n",
+			  return_desc, walk_state->implicit_return_obj));
+
+	/*
+	 * Delete any "stale" implicit return value first. However, in
+	 * complex statements, the implicit return value can be
+	 * bubbled up several levels, so we don't clear the value if it
+	 * is the same as the return_desc.
+	 */
+	if (walk_state->implicit_return_obj) {
+		if (walk_state->implicit_return_obj == return_desc) {
+			return (TRUE);
+		}
+		acpi_ds_clear_implicit_return(walk_state);
+	}
+
+	/* Save the implicit return value, add a reference if requested */
+
+	walk_state->implicit_return_obj = return_desc;
+	if (add_reference) {
+		acpi_ut_add_reference(return_desc);
+	}
+
+	return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_is_result_used
+ *
+ * PARAMETERS:  Op                  - Current Op
+ *              walk_state          - Current State
+ *
+ * RETURN:      TRUE if result is used, FALSE otherwise
+ *
+ * DESCRIPTION: Check if a result object will be used by the parent
+ *
+ ******************************************************************************/
+
+u8
+acpi_ds_is_result_used(union acpi_parse_object * op,
+		       struct acpi_walk_state * walk_state)
+{
+	const struct acpi_opcode_info *parent_info;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_is_result_used, op);
+
+	/* Must have both an Op and a Result Object */
+
+	if (!op) {
+		ACPI_ERROR((AE_INFO, "Null Op"));
+		return_UINT8(TRUE);
+	}
+
+	/*
+	 * We know that this operator is not a
+	 * Return() operator (would not come here.) The following code is the
+	 * optional support for a so-called "implicit return". Some AML code
+	 * assumes that the last value of the method is "implicitly" returned
+	 * to the caller. Just save the last result as the return value.
+	 * NOTE: this is optional because the ASL language does not actually
+	 * support this behavior.
+	 */
+	(void)acpi_ds_do_implicit_return(walk_state->result_obj, walk_state,
+					 TRUE);
+
+	/*
+	 * Now determine if the parent will use the result
+	 *
+	 * If there is no parent, or the parent is a scope_op, we are executing
+	 * at the method level. An executing method typically has no parent,
+	 * since each method is parsed separately.  A method invoked externally
+	 * via execute_control_method has a scope_op as the parent.
+	 */
+	if ((!op->common.parent) ||
+	    (op->common.parent->common.aml_opcode == AML_SCOPE_OP)) {
+
+		/* No parent, the return value cannot possibly be used */
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "At Method level, result of [%s] not used\n",
+				  acpi_ps_get_opcode_name(op->common.
+							  aml_opcode)));
+		return_UINT8(FALSE);
+	}
+
+	/* Get info on the parent. The root_op is AML_SCOPE */
+
+	parent_info =
+	    acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
+	if (parent_info->class == AML_CLASS_UNKNOWN) {
+		ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
+		return_UINT8(FALSE);
+	}
+
+	/*
+	 * Decide what to do with the result based on the parent.  If
+	 * the parent opcode will not use the result, delete the object.
+	 * Otherwise leave it as is, it will be deleted when it is used
+	 * as an operand later.
+	 */
+	switch (parent_info->class) {
+	case AML_CLASS_CONTROL:
+
+		switch (op->common.parent->common.aml_opcode) {
+		case AML_RETURN_OP:
+
+			/* Never delete the return value associated with a return opcode */
+
+			goto result_used;
+
+		case AML_IF_OP:
+		case AML_WHILE_OP:
+
+			/*
+			 * If we are executing the predicate AND this is the predicate op,
+			 * we will use the return value
+			 */
+			if ((walk_state->control_state->common.state ==
+			     ACPI_CONTROL_PREDICATE_EXECUTING)
+			    && (walk_state->control_state->control.
+				predicate_op == op)) {
+				goto result_used;
+			}
+			break;
+
+		default:
+			/* Ignore other control opcodes */
+			break;
+		}
+
+		/* The general control opcode returns no result */
+
+		goto result_not_used;
+
+	case AML_CLASS_CREATE:
+
+		/*
+		 * These opcodes allow term_arg(s) as operands and therefore
+		 * the operands can be method calls.  The result is used.
+		 */
+		goto result_used;
+
+	case AML_CLASS_NAMED_OBJECT:
+
+		if ((op->common.parent->common.aml_opcode == AML_REGION_OP) ||
+		    (op->common.parent->common.aml_opcode == AML_DATA_REGION_OP)
+		    || (op->common.parent->common.aml_opcode == AML_PACKAGE_OP)
+		    || (op->common.parent->common.aml_opcode ==
+			AML_VAR_PACKAGE_OP)
+		    || (op->common.parent->common.aml_opcode == AML_BUFFER_OP)
+		    || (op->common.parent->common.aml_opcode ==
+			AML_INT_EVAL_SUBTREE_OP)
+		    || (op->common.parent->common.aml_opcode ==
+			AML_BANK_FIELD_OP)) {
+			/*
+			 * These opcodes allow term_arg(s) as operands and therefore
+			 * the operands can be method calls.  The result is used.
+			 */
+			goto result_used;
+		}
+
+		goto result_not_used;
+
+	default:
+
+		/*
+		 * In all other cases. the parent will actually use the return
+		 * object, so keep it.
+		 */
+		goto result_used;
+	}
+
+      result_used:
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "Result of [%s] used by Parent [%s] Op=%p\n",
+			  acpi_ps_get_opcode_name(op->common.aml_opcode),
+			  acpi_ps_get_opcode_name(op->common.parent->common.
+						  aml_opcode), op));
+
+	return_UINT8(TRUE);
+
+      result_not_used:
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "Result of [%s] not used by Parent [%s] Op=%p\n",
+			  acpi_ps_get_opcode_name(op->common.aml_opcode),
+			  acpi_ps_get_opcode_name(op->common.parent->common.
+						  aml_opcode), op));
+
+	return_UINT8(FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_delete_result_if_not_used
+ *
+ * PARAMETERS:  Op              - Current parse Op
+ *              result_obj      - Result of the operation
+ *              walk_state      - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Used after interpretation of an opcode.  If there is an internal
+ *              result descriptor, check if the parent opcode will actually use
+ *              this result.  If not, delete the result now so that it will
+ *              not become orphaned.
+ *
+ ******************************************************************************/
+
+void
+acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
+				  union acpi_operand_object *result_obj,
+				  struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_delete_result_if_not_used, result_obj);
+
+	if (!op) {
+		ACPI_ERROR((AE_INFO, "Null Op"));
+		return_VOID;
+	}
+
+	if (!result_obj) {
+		return_VOID;
+	}
+
+	if (!acpi_ds_is_result_used(op, walk_state)) {
+
+		/* Must pop the result stack (obj_desc should be equal to result_obj) */
+
+		status = acpi_ds_result_pop(&obj_desc, walk_state);
+		if (ACPI_SUCCESS(status)) {
+			acpi_ut_remove_reference(result_obj);
+		}
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_resolve_operands
+ *
+ * PARAMETERS:  walk_state          - Current walk state with operands on stack
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Resolve all operands to their values.  Used to prepare
+ *              arguments to a control method invocation (a call from one
+ *              method to another.)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state)
+{
+	u32 i;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_resolve_operands, walk_state);
+
+	/*
+	 * Attempt to resolve each of the valid operands
+	 * Method arguments are passed by reference, not by value.  This means
+	 * that the actual objects are passed, not copies of the objects.
+	 */
+	for (i = 0; i < walk_state->num_operands; i++) {
+		status =
+		    acpi_ex_resolve_to_value(&walk_state->operands[i],
+					     walk_state);
+		if (ACPI_FAILURE(status)) {
+			break;
+		}
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_clear_operands
+ *
+ * PARAMETERS:  walk_state          - Current walk state with operands on stack
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Clear all operands on the current walk state operand stack.
+ *
+ ******************************************************************************/
+
+void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
+{
+	u32 i;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_clear_operands, walk_state);
+
+	/* Remove a reference on each operand on the stack */
+
+	for (i = 0; i < walk_state->num_operands; i++) {
+		/*
+		 * Remove a reference to all operands, including both
+		 * "Arguments" and "Targets".
+		 */
+		acpi_ut_remove_reference(walk_state->operands[i]);
+		walk_state->operands[i] = NULL;
+	}
+
+	walk_state->num_operands = 0;
+	return_VOID;
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_create_operand
+ *
+ * PARAMETERS:  walk_state      - Current walk state
+ *              Arg             - Parse object for the argument
+ *              arg_index       - Which argument (zero based)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Translate a parse tree object that is an argument to an AML
+ *              opcode to the equivalent interpreter object.  This may include
+ *              looking up a name or entering a new name into the internal
+ *              namespace.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_operand(struct acpi_walk_state *walk_state,
+		       union acpi_parse_object *arg, u32 arg_index)
+{
+	acpi_status status = AE_OK;
+	char *name_string;
+	u32 name_length;
+	union acpi_operand_object *obj_desc;
+	union acpi_parse_object *parent_op;
+	u16 opcode;
+	acpi_interpreter_mode interpreter_mode;
+	const struct acpi_opcode_info *op_info;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_create_operand, arg);
+
+	/* A valid name must be looked up in the namespace */
+
+	if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
+	    (arg->common.value.string) &&
+	    !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Getting a name: Arg=%p\n",
+				  arg));
+
+		/* Get the entire name string from the AML stream */
+
+		status =
+		    acpi_ex_get_name_string(ACPI_TYPE_ANY,
+					    arg->common.value.buffer,
+					    &name_string, &name_length);
+
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/* All prefixes have been handled, and the name is in name_string */
+
+		/*
+		 * Special handling for buffer_field declarations. This is a deferred
+		 * opcode that unfortunately defines the field name as the last
+		 * parameter instead of the first.  We get here when we are performing
+		 * the deferred execution, so the actual name of the field is already
+		 * in the namespace.  We don't want to attempt to look it up again
+		 * because we may be executing in a different scope than where the
+		 * actual opcode exists.
+		 */
+		if ((walk_state->deferred_node) &&
+		    (walk_state->deferred_node->type == ACPI_TYPE_BUFFER_FIELD)
+		    && (arg_index ==
+			(u32) ((walk_state->opcode ==
+				AML_CREATE_FIELD_OP) ? 3 : 2))) {
+			obj_desc =
+			    ACPI_CAST_PTR(union acpi_operand_object,
+					  walk_state->deferred_node);
+			status = AE_OK;
+		} else {	/* All other opcodes */
+
+			/*
+			 * Differentiate between a namespace "create" operation
+			 * versus a "lookup" operation (IMODE_LOAD_PASS2 vs.
+			 * IMODE_EXECUTE) in order to support the creation of
+			 * namespace objects during the execution of control methods.
+			 */
+			parent_op = arg->common.parent;
+			op_info =
+			    acpi_ps_get_opcode_info(parent_op->common.
+						    aml_opcode);
+			if ((op_info->flags & AML_NSNODE)
+			    && (parent_op->common.aml_opcode !=
+				AML_INT_METHODCALL_OP)
+			    && (parent_op->common.aml_opcode != AML_REGION_OP)
+			    && (parent_op->common.aml_opcode !=
+				AML_INT_NAMEPATH_OP)) {
+
+				/* Enter name into namespace if not found */
+
+				interpreter_mode = ACPI_IMODE_LOAD_PASS2;
+			} else {
+				/* Return a failure if name not found */
+
+				interpreter_mode = ACPI_IMODE_EXECUTE;
+			}
+
+			status =
+			    acpi_ns_lookup(walk_state->scope_info, name_string,
+					   ACPI_TYPE_ANY, interpreter_mode,
+					   ACPI_NS_SEARCH_PARENT |
+					   ACPI_NS_DONT_OPEN_SCOPE, walk_state,
+					   ACPI_CAST_INDIRECT_PTR(struct
+								  acpi_namespace_node,
+								  &obj_desc));
+			/*
+			 * The only case where we pass through (ignore) a NOT_FOUND
+			 * error is for the cond_ref_of opcode.
+			 */
+			if (status == AE_NOT_FOUND) {
+				if (parent_op->common.aml_opcode ==
+				    AML_COND_REF_OF_OP) {
+					/*
+					 * For the Conditional Reference op, it's OK if
+					 * the name is not found;  We just need a way to
+					 * indicate this to the interpreter, set the
+					 * object to the root
+					 */
+					obj_desc = ACPI_CAST_PTR(union
+								 acpi_operand_object,
+								 acpi_gbl_root_node);
+					status = AE_OK;
+				} else {
+					/*
+					 * We just plain didn't find it -- which is a
+					 * very serious error at this point
+					 */
+					status = AE_AML_NAME_NOT_FOUND;
+				}
+			}
+
+			if (ACPI_FAILURE(status)) {
+				ACPI_ERROR_NAMESPACE(name_string, status);
+			}
+		}
+
+		/* Free the namestring created above */
+
+		ACPI_FREE(name_string);
+
+		/* Check status from the lookup */
+
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/* Put the resulting object onto the current object stack */
+
+		status = acpi_ds_obj_stack_push(obj_desc, walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+		ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
+				   (obj_desc, walk_state));
+	} else {
+		/* Check for null name case */
+
+		if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
+		    !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
+			/*
+			 * If the name is null, this means that this is an
+			 * optional result parameter that was not specified
+			 * in the original ASL.  Create a Zero Constant for a
+			 * placeholder.  (Store to a constant is a Noop.)
+			 */
+			opcode = AML_ZERO_OP;	/* Has no arguments! */
+
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "Null namepath: Arg=%p\n", arg));
+		} else {
+			opcode = arg->common.aml_opcode;
+		}
+
+		/* Get the object type of the argument */
+
+		op_info = acpi_ps_get_opcode_info(opcode);
+		if (op_info->object_type == ACPI_TYPE_INVALID) {
+			return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
+		}
+
+		if ((op_info->flags & AML_HAS_RETVAL)
+		    || (arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "Argument previously created, already stacked\n"));
+
+			ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
+					   (walk_state->
+					    operands[walk_state->num_operands -
+						     1], walk_state));
+
+			/*
+			 * Use value that was already previously returned
+			 * by the evaluation of this argument
+			 */
+			status = acpi_ds_result_pop(&obj_desc, walk_state);
+			if (ACPI_FAILURE(status)) {
+				/*
+				 * Only error is underflow, and this indicates
+				 * a missing or null operand!
+				 */
+				ACPI_EXCEPTION((AE_INFO, status,
+						"Missing or null operand"));
+				return_ACPI_STATUS(status);
+			}
+		} else {
+			/* Create an ACPI_INTERNAL_OBJECT for the argument */
+
+			obj_desc =
+			    acpi_ut_create_internal_object(op_info->
+							   object_type);
+			if (!obj_desc) {
+				return_ACPI_STATUS(AE_NO_MEMORY);
+			}
+
+			/* Initialize the new object */
+
+			status =
+			    acpi_ds_init_object_from_op(walk_state, arg, opcode,
+							&obj_desc);
+			if (ACPI_FAILURE(status)) {
+				acpi_ut_delete_object_desc(obj_desc);
+				return_ACPI_STATUS(status);
+			}
+		}
+
+		/* Put the operand object on the object stack */
+
+		status = acpi_ds_obj_stack_push(obj_desc, walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
+				   (obj_desc, walk_state));
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_create_operands
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *              first_arg           - First argument of a parser argument tree
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert an operator's arguments from a parse tree format to
+ *              namespace objects and place those argument object on the object
+ *              stack in preparation for evaluation by the interpreter.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_create_operands(struct acpi_walk_state *walk_state,
+			union acpi_parse_object *first_arg)
+{
+	acpi_status status = AE_OK;
+	union acpi_parse_object *arg;
+	union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
+	u32 arg_count = 0;
+	u32 index = walk_state->num_operands;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
+
+	/* Get all arguments in the list */
+
+	arg = first_arg;
+	while (arg) {
+		if (index >= ACPI_OBJ_NUM_OPERANDS) {
+			return_ACPI_STATUS(AE_BAD_DATA);
+		}
+
+		arguments[index] = arg;
+		walk_state->operands[index] = NULL;
+
+		/* Move on to next argument, if any */
+
+		arg = arg->common.next;
+		arg_count++;
+		index++;
+	}
+
+	index--;
+
+	/* It is the appropriate order to get objects from the Result stack */
+
+	for (i = 0; i < arg_count; i++) {
+		arg = arguments[index];
+
+		/* Force the filling of the operand stack in inverse order */
+
+		walk_state->operand_index = (u8) index;
+
+		status = acpi_ds_create_operand(walk_state, arg, index);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+
+		index--;
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "Arg #%d (%p) done, Arg1=%p\n", index, arg,
+				  first_arg));
+	}
+
+	return_ACPI_STATUS(status);
+
+      cleanup:
+	/*
+	 * We must undo everything done above; meaning that we must
+	 * pop everything off of the operand stack and delete those
+	 * objects
+	 */
+	acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
+
+	ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d", index));
+	return_ACPI_STATUS(status);
+}
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_ds_evaluate_name_path
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk,
+ *                                the opcode of current operation should be
+ *                                AML_INT_NAMEPATH_OP
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Translate the -name_path- parse tree object to the equivalent
+ *              interpreter object, convert it to value, if needed, duplicate
+ *              it, if needed, and push it onto the current result stack.
+ *
+ ****************************************************************************/
+
+acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+	union acpi_parse_object *op = walk_state->op;
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	union acpi_operand_object *new_obj_desc;
+	u8 type;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_evaluate_name_path, walk_state);
+
+	if (!op->common.parent) {
+
+		/* This happens after certain exception processing */
+
+		goto exit;
+	}
+
+	if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
+	    (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP) ||
+	    (op->common.parent->common.aml_opcode == AML_REF_OF_OP)) {
+
+		/* TBD: Should we specify this feature as a bit of op_info->Flags of these opcodes? */
+
+		goto exit;
+	}
+
+	status = acpi_ds_create_operand(walk_state, op, 0);
+	if (ACPI_FAILURE(status)) {
+		goto exit;
+	}
+
+	if (op->common.flags & ACPI_PARSEOP_TARGET) {
+		new_obj_desc = *operand;
+		goto push_result;
+	}
+
+	type = ACPI_GET_OBJECT_TYPE(*operand);
+
+	status = acpi_ex_resolve_to_value(operand, walk_state);
+	if (ACPI_FAILURE(status)) {
+		goto exit;
+	}
+
+	if (type == ACPI_TYPE_INTEGER) {
+
+		/* It was incremented by acpi_ex_resolve_to_value */
+
+		acpi_ut_remove_reference(*operand);
+
+		status =
+		    acpi_ut_copy_iobject_to_iobject(*operand, &new_obj_desc,
+						    walk_state);
+		if (ACPI_FAILURE(status)) {
+			goto exit;
+		}
+	} else {
+		/*
+		 * The object either was anew created or is
+		 * a Namespace node - don't decrement it.
+		 */
+		new_obj_desc = *operand;
+	}
+
+	/* Cleanup for name-path operand */
+
+	status = acpi_ds_obj_stack_pop(1, walk_state);
+	if (ACPI_FAILURE(status)) {
+		walk_state->result_obj = new_obj_desc;
+		goto exit;
+	}
+
+      push_result:
+
+	walk_state->result_obj = new_obj_desc;
+
+	status = acpi_ds_result_push(walk_state->result_obj, walk_state);
+	if (ACPI_SUCCESS(status)) {
+
+		/* Force to take it from stack */
+
+		op->common.flags |= ACPI_PARSEOP_IN_STACK;
+	}
+
+      exit:
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
new file mode 100644
index 0000000..350e665
--- /dev/null
+++ b/drivers/acpi/acpica/dswexec.c
@@ -0,0 +1,746 @@
+/******************************************************************************
+ *
+ * Module Name: dswexec - Dispatcher method execution callbacks;
+ *                        dispatch to interpreter.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+#include "acdebug.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dswexec")
+
+/*
+ * Dispatch table for opcode classes
+ */
+static ACPI_EXECUTE_OP acpi_gbl_op_type_dispatch[] = {
+	acpi_ex_opcode_0A_0T_1R,
+	acpi_ex_opcode_1A_0T_0R,
+	acpi_ex_opcode_1A_0T_1R,
+	acpi_ex_opcode_1A_1T_0R,
+	acpi_ex_opcode_1A_1T_1R,
+	acpi_ex_opcode_2A_0T_0R,
+	acpi_ex_opcode_2A_0T_1R,
+	acpi_ex_opcode_2A_1T_1R,
+	acpi_ex_opcode_2A_2T_1R,
+	acpi_ex_opcode_3A_0T_0R,
+	acpi_ex_opcode_3A_1T_1R,
+	acpi_ex_opcode_6A_0T_1R
+};
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_predicate_value
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk
+ *              result_obj      - if non-zero, pop result from result stack
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get the result of a predicate evaluation
+ *
+ ****************************************************************************/
+
+acpi_status
+acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
+			    union acpi_operand_object *result_obj)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *local_obj_desc = NULL;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_get_predicate_value, walk_state);
+
+	walk_state->control_state->common.state = 0;
+
+	if (result_obj) {
+		status = acpi_ds_result_pop(&obj_desc, walk_state);
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"Could not get result from predicate evaluation"));
+
+			return_ACPI_STATUS(status);
+		}
+	} else {
+		status = acpi_ds_create_operand(walk_state, walk_state->op, 0);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		status =
+		    acpi_ex_resolve_to_value(&walk_state->operands[0],
+					     walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		obj_desc = walk_state->operands[0];
+	}
+
+	if (!obj_desc) {
+		ACPI_ERROR((AE_INFO,
+			    "No predicate ObjDesc=%p State=%p",
+			    obj_desc, walk_state));
+
+		return_ACPI_STATUS(AE_AML_NO_OPERAND);
+	}
+
+	/*
+	 * Result of predicate evaluation must be an Integer
+	 * object. Implicitly convert the argument if necessary.
+	 */
+	status = acpi_ex_convert_to_integer(obj_desc, &local_obj_desc, 16);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	if (ACPI_GET_OBJECT_TYPE(local_obj_desc) != ACPI_TYPE_INTEGER) {
+		ACPI_ERROR((AE_INFO,
+			    "Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X",
+			    obj_desc, walk_state,
+			    ACPI_GET_OBJECT_TYPE(obj_desc)));
+
+		status = AE_AML_OPERAND_TYPE;
+		goto cleanup;
+	}
+
+	/* Truncate the predicate to 32-bits if necessary */
+
+	acpi_ex_truncate_for32bit_table(local_obj_desc);
+
+	/*
+	 * Save the result of the predicate evaluation on
+	 * the control stack
+	 */
+	if (local_obj_desc->integer.value) {
+		walk_state->control_state->common.value = TRUE;
+	} else {
+		/*
+		 * Predicate is FALSE, we will just toss the
+		 * rest of the package
+		 */
+		walk_state->control_state->common.value = FALSE;
+		status = AE_CTRL_FALSE;
+	}
+
+	/* Predicate can be used for an implicit return value */
+
+	(void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE);
+
+      cleanup:
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n",
+			  walk_state->control_state->common.value,
+			  walk_state->op));
+
+	/* Break to debugger to display result */
+
+	ACPI_DEBUGGER_EXEC(acpi_db_display_result_object
+			   (local_obj_desc, walk_state));
+
+	/*
+	 * Delete the predicate result object (we know that
+	 * we don't need it anymore)
+	 */
+	if (local_obj_desc != obj_desc) {
+		acpi_ut_remove_reference(local_obj_desc);
+	}
+	acpi_ut_remove_reference(obj_desc);
+
+	walk_state->control_state->common.state = ACPI_CONTROL_NORMAL;
+	return_ACPI_STATUS(status);
+}
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_ds_exec_begin_op
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk
+ *              out_op          - Where to return op if a new one is created
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Descending callback used during the execution of control
+ *              methods.  This is where most operators and operands are
+ *              dispatched to the interpreter.
+ *
+ ****************************************************************************/
+
+acpi_status
+acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
+		      union acpi_parse_object **out_op)
+{
+	union acpi_parse_object *op;
+	acpi_status status = AE_OK;
+	u32 opcode_class;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_exec_begin_op, walk_state);
+
+	op = walk_state->op;
+	if (!op) {
+		status = acpi_ds_load2_begin_op(walk_state, out_op);
+		if (ACPI_FAILURE(status)) {
+			goto error_exit;
+		}
+
+		op = *out_op;
+		walk_state->op = op;
+		walk_state->opcode = op->common.aml_opcode;
+		walk_state->op_info =
+		    acpi_ps_get_opcode_info(op->common.aml_opcode);
+
+		if (acpi_ns_opens_scope(walk_state->op_info->object_type)) {
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "(%s) Popping scope for Op %p\n",
+					  acpi_ut_get_type_name(walk_state->
+								op_info->
+								object_type),
+					  op));
+
+			status = acpi_ds_scope_stack_pop(walk_state);
+			if (ACPI_FAILURE(status)) {
+				goto error_exit;
+			}
+		}
+	}
+
+	if (op == walk_state->origin) {
+		if (out_op) {
+			*out_op = op;
+		}
+
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/*
+	 * If the previous opcode was a conditional, this opcode
+	 * must be the beginning of the associated predicate.
+	 * Save this knowledge in the current scope descriptor
+	 */
+	if ((walk_state->control_state) &&
+	    (walk_state->control_state->common.state ==
+	     ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Exec predicate Op=%p State=%p\n", op,
+				  walk_state));
+
+		walk_state->control_state->common.state =
+		    ACPI_CONTROL_PREDICATE_EXECUTING;
+
+		/* Save start of predicate */
+
+		walk_state->control_state->control.predicate_op = op;
+	}
+
+	opcode_class = walk_state->op_info->class;
+
+	/* We want to send namepaths to the load code */
+
+	if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
+		opcode_class = AML_CLASS_NAMED_OBJECT;
+	}
+
+	/*
+	 * Handle the opcode based upon the opcode type
+	 */
+	switch (opcode_class) {
+	case AML_CLASS_CONTROL:
+
+		status = acpi_ds_exec_begin_control_op(walk_state, op);
+		break;
+
+	case AML_CLASS_NAMED_OBJECT:
+
+		if (walk_state->walk_type & ACPI_WALK_METHOD) {
+			/*
+			 * Found a named object declaration during method execution;
+			 * we must enter this object into the namespace.  The created
+			 * object is temporary and will be deleted upon completion of
+			 * the execution of this method.
+			 */
+			status = acpi_ds_load2_begin_op(walk_state, NULL);
+		}
+
+		break;
+
+	case AML_CLASS_EXECUTE:
+	case AML_CLASS_CREATE:
+
+		break;
+
+	default:
+		break;
+	}
+
+	/* Nothing to do here during method execution */
+
+	return_ACPI_STATUS(status);
+
+      error_exit:
+	status = acpi_ds_method_error(status, walk_state);
+	return_ACPI_STATUS(status);
+}
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_ds_exec_end_op
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Ascending callback used during the execution of control
+ *              methods.  The only thing we really need to do here is to
+ *              notice the beginning of IF, ELSE, and WHILE blocks.
+ *
+ ****************************************************************************/
+
+acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
+{
+	union acpi_parse_object *op;
+	acpi_status status = AE_OK;
+	u32 op_type;
+	u32 op_class;
+	union acpi_parse_object *next_op;
+	union acpi_parse_object *first_arg;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state);
+
+	op = walk_state->op;
+	op_type = walk_state->op_info->type;
+	op_class = walk_state->op_info->class;
+
+	if (op_class == AML_CLASS_UNKNOWN) {
+		ACPI_ERROR((AE_INFO, "Unknown opcode %X",
+			    op->common.aml_opcode));
+		return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
+	}
+
+	first_arg = op->common.value.arg;
+
+	/* Init the walk state */
+
+	walk_state->num_operands = 0;
+	walk_state->operand_index = 0;
+	walk_state->return_desc = NULL;
+	walk_state->result_obj = NULL;
+
+	/* Call debugger for single step support (DEBUG build only) */
+
+	ACPI_DEBUGGER_EXEC(status =
+			   acpi_db_single_step(walk_state, op, op_class));
+	ACPI_DEBUGGER_EXEC(if (ACPI_FAILURE(status)) {
+			   return_ACPI_STATUS(status);}
+	) ;
+
+	/* Decode the Opcode Class */
+
+	switch (op_class) {
+	case AML_CLASS_ARGUMENT:	/* Constants, literals, etc. */
+
+		if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
+			status = acpi_ds_evaluate_name_path(walk_state);
+			if (ACPI_FAILURE(status)) {
+				goto cleanup;
+			}
+		}
+		break;
+
+	case AML_CLASS_EXECUTE:	/* Most operators with arguments */
+
+		/* Build resolved operand stack */
+
+		status = acpi_ds_create_operands(walk_state, first_arg);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+
+		/*
+		 * All opcodes require operand resolution, with the only exceptions
+		 * being the object_type and size_of operators.
+		 */
+		if (!(walk_state->op_info->flags & AML_NO_OPERAND_RESOLVE)) {
+
+			/* Resolve all operands */
+
+			status = acpi_ex_resolve_operands(walk_state->opcode,
+							  &(walk_state->
+							    operands
+							    [walk_state->
+							     num_operands - 1]),
+							  walk_state);
+		}
+
+		if (ACPI_SUCCESS(status)) {
+			/*
+			 * Dispatch the request to the appropriate interpreter handler
+			 * routine.  There is one routine per opcode "type" based upon the
+			 * number of opcode arguments and return type.
+			 */
+			status =
+			    acpi_gbl_op_type_dispatch[op_type] (walk_state);
+		} else {
+			/*
+			 * Treat constructs of the form "Store(LocalX,LocalX)" as noops when the
+			 * Local is uninitialized.
+			 */
+			if ((status == AE_AML_UNINITIALIZED_LOCAL) &&
+			    (walk_state->opcode == AML_STORE_OP) &&
+			    (walk_state->operands[0]->common.type ==
+			     ACPI_TYPE_LOCAL_REFERENCE)
+			    && (walk_state->operands[1]->common.type ==
+				ACPI_TYPE_LOCAL_REFERENCE)
+			    && (walk_state->operands[0]->reference.class ==
+				walk_state->operands[1]->reference.class)
+			    && (walk_state->operands[0]->reference.value ==
+				walk_state->operands[1]->reference.value)) {
+				status = AE_OK;
+			} else {
+				ACPI_EXCEPTION((AE_INFO, status,
+						"While resolving operands for [%s]",
+						acpi_ps_get_opcode_name
+						(walk_state->opcode)));
+			}
+		}
+
+		/* Always delete the argument objects and clear the operand stack */
+
+		acpi_ds_clear_operands(walk_state);
+
+		/*
+		 * If a result object was returned from above, push it on the
+		 * current result stack
+		 */
+		if (ACPI_SUCCESS(status) && walk_state->result_obj) {
+			status =
+			    acpi_ds_result_push(walk_state->result_obj,
+						walk_state);
+		}
+		break;
+
+	default:
+
+		switch (op_type) {
+		case AML_TYPE_CONTROL:	/* Type 1 opcode, IF/ELSE/WHILE/NOOP */
+
+			/* 1 Operand, 0 external_result, 0 internal_result */
+
+			status = acpi_ds_exec_end_control_op(walk_state, op);
+
+			break;
+
+		case AML_TYPE_METHOD_CALL:
+
+			/*
+			 * If the method is referenced from within a package
+			 * declaration, it is not a invocation of the method, just
+			 * a reference to it.
+			 */
+			if ((op->asl.parent) &&
+			    ((op->asl.parent->asl.aml_opcode == AML_PACKAGE_OP)
+			     || (op->asl.parent->asl.aml_opcode ==
+				 AML_VAR_PACKAGE_OP))) {
+				ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+						  "Method Reference in a Package, Op=%p\n",
+						  op));
+
+				op->common.node =
+				    (struct acpi_namespace_node *)op->asl.value.
+				    arg->asl.node;
+				acpi_ut_add_reference(op->asl.value.arg->asl.
+						      node->object);
+				return_ACPI_STATUS(AE_OK);
+			}
+
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "Method invocation, Op=%p\n", op));
+
+			/*
+			 * (AML_METHODCALL) Op->Asl.Value.Arg->Asl.Node contains
+			 * the method Node pointer
+			 */
+			/* next_op points to the op that holds the method name */
+
+			next_op = first_arg;
+
+			/* next_op points to first argument op */
+
+			next_op = next_op->common.next;
+
+			/*
+			 * Get the method's arguments and put them on the operand stack
+			 */
+			status = acpi_ds_create_operands(walk_state, next_op);
+			if (ACPI_FAILURE(status)) {
+				break;
+			}
+
+			/*
+			 * Since the operands will be passed to another control method,
+			 * we must resolve all local references here (Local variables,
+			 * arguments to *this* method, etc.)
+			 */
+			status = acpi_ds_resolve_operands(walk_state);
+			if (ACPI_FAILURE(status)) {
+
+				/* On error, clear all resolved operands */
+
+				acpi_ds_clear_operands(walk_state);
+				break;
+			}
+
+			/*
+			 * Tell the walk loop to preempt this running method and
+			 * execute the new method
+			 */
+			status = AE_CTRL_TRANSFER;
+
+			/*
+			 * Return now; we don't want to disturb anything,
+			 * especially the operand count!
+			 */
+			return_ACPI_STATUS(status);
+
+		case AML_TYPE_CREATE_FIELD:
+
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+					  "Executing CreateField Buffer/Index Op=%p\n",
+					  op));
+
+			status = acpi_ds_load2_end_op(walk_state);
+			if (ACPI_FAILURE(status)) {
+				break;
+			}
+
+			status =
+			    acpi_ds_eval_buffer_field_operands(walk_state, op);
+			break;
+
+		case AML_TYPE_CREATE_OBJECT:
+
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+					  "Executing CreateObject (Buffer/Package) Op=%p\n",
+					  op));
+
+			switch (op->common.parent->common.aml_opcode) {
+			case AML_NAME_OP:
+
+				/*
+				 * Put the Node on the object stack (Contains the ACPI Name
+				 * of this object)
+				 */
+				walk_state->operands[0] =
+				    (void *)op->common.parent->common.node;
+				walk_state->num_operands = 1;
+
+				status = acpi_ds_create_node(walk_state,
+							     op->common.parent->
+							     common.node,
+							     op->common.parent);
+				if (ACPI_FAILURE(status)) {
+					break;
+				}
+
+				/* Fall through */
+				/*lint -fallthrough */
+
+			case AML_INT_EVAL_SUBTREE_OP:
+
+				status =
+				    acpi_ds_eval_data_object_operands
+				    (walk_state, op,
+				     acpi_ns_get_attached_object(op->common.
+								 parent->common.
+								 node));
+				break;
+
+			default:
+
+				status =
+				    acpi_ds_eval_data_object_operands
+				    (walk_state, op, NULL);
+				break;
+			}
+
+			/*
+			 * If a result object was returned from above, push it on the
+			 * current result stack
+			 */
+			if (walk_state->result_obj) {
+				status =
+				    acpi_ds_result_push(walk_state->result_obj,
+							walk_state);
+			}
+			break;
+
+		case AML_TYPE_NAMED_FIELD:
+		case AML_TYPE_NAMED_COMPLEX:
+		case AML_TYPE_NAMED_SIMPLE:
+		case AML_TYPE_NAMED_NO_OBJ:
+
+			status = acpi_ds_load2_end_op(walk_state);
+			if (ACPI_FAILURE(status)) {
+				break;
+			}
+
+			if (op->common.aml_opcode == AML_REGION_OP) {
+				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+						  "Executing OpRegion Address/Length Op=%p\n",
+						  op));
+
+				status =
+				    acpi_ds_eval_region_operands(walk_state,
+								 op);
+				if (ACPI_FAILURE(status)) {
+					break;
+				}
+			} else if (op->common.aml_opcode == AML_DATA_REGION_OP) {
+				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+						  "Executing DataTableRegion Strings Op=%p\n",
+						  op));
+
+				status =
+				    acpi_ds_eval_table_region_operands
+				    (walk_state, op);
+				if (ACPI_FAILURE(status)) {
+					break;
+				}
+			} else if (op->common.aml_opcode == AML_BANK_FIELD_OP) {
+				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+						  "Executing BankField Op=%p\n",
+						  op));
+
+				status =
+				    acpi_ds_eval_bank_field_operands(walk_state,
+								     op);
+				if (ACPI_FAILURE(status)) {
+					break;
+				}
+			}
+			break;
+
+		case AML_TYPE_UNDEFINED:
+
+			ACPI_ERROR((AE_INFO,
+				    "Undefined opcode type Op=%p", op));
+			return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
+
+		case AML_TYPE_BOGUS:
+
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "Internal opcode=%X type Op=%p\n",
+					  walk_state->opcode, op));
+			break;
+
+		default:
+
+			ACPI_ERROR((AE_INFO,
+				    "Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p",
+				    op_class, op_type, op->common.aml_opcode,
+				    op));
+
+			status = AE_NOT_IMPLEMENTED;
+			break;
+		}
+	}
+
+	/*
+	 * ACPI 2.0 support for 64-bit integers: Truncate numeric
+	 * result value if we are executing from a 32-bit ACPI table
+	 */
+	acpi_ex_truncate_for32bit_table(walk_state->result_obj);
+
+	/*
+	 * Check if we just completed the evaluation of a
+	 * conditional predicate
+	 */
+	if ((ACPI_SUCCESS(status)) &&
+	    (walk_state->control_state) &&
+	    (walk_state->control_state->common.state ==
+	     ACPI_CONTROL_PREDICATE_EXECUTING) &&
+	    (walk_state->control_state->control.predicate_op == op)) {
+		status =
+		    acpi_ds_get_predicate_value(walk_state,
+						walk_state->result_obj);
+		walk_state->result_obj = NULL;
+	}
+
+      cleanup:
+
+	if (walk_state->result_obj) {
+
+		/* Break to debugger to display result */
+
+		ACPI_DEBUGGER_EXEC(acpi_db_display_result_object
+				   (walk_state->result_obj, walk_state));
+
+		/*
+		 * Delete the result op if and only if:
+		 * Parent will not use the result -- such as any
+		 * non-nested type2 op in a method (parent will be method)
+		 */
+		acpi_ds_delete_result_if_not_used(op, walk_state->result_obj,
+						  walk_state);
+	}
+#ifdef _UNDER_DEVELOPMENT
+
+	if (walk_state->parser_state.aml == walk_state->parser_state.aml_end) {
+		acpi_db_method_end(walk_state);
+	}
+#endif
+
+	/* Invoke exception handler on error */
+
+	if (ACPI_FAILURE(status)) {
+		status = acpi_ds_method_error(status, walk_state);
+	}
+
+	/* Always clear the object stack */
+
+	walk_state->num_operands = 0;
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
new file mode 100644
index 0000000..3023cea
--- /dev/null
+++ b/drivers/acpi/acpica/dswload.c
@@ -0,0 +1,1203 @@
+/******************************************************************************
+ *
+ * Module Name: dswload - Dispatcher namespace load callbacks
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+#include "acevents.h"
+
+#ifdef ACPI_ASL_COMPILER
+#include <acpi/acdisasm.h>
+#endif
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dswload")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_init_callbacks
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk
+ *              pass_number     - 1, 2, or 3
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Init walk state callbacks
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
+{
+
+	switch (pass_number) {
+	case 1:
+		walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
+		    ACPI_PARSE_DELETE_TREE;
+		walk_state->descending_callback = acpi_ds_load1_begin_op;
+		walk_state->ascending_callback = acpi_ds_load1_end_op;
+		break;
+
+	case 2:
+		walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
+		    ACPI_PARSE_DELETE_TREE;
+		walk_state->descending_callback = acpi_ds_load2_begin_op;
+		walk_state->ascending_callback = acpi_ds_load2_end_op;
+		break;
+
+	case 3:
+#ifndef ACPI_NO_METHOD_EXECUTION
+		walk_state->parse_flags |= ACPI_PARSE_EXECUTE |
+		    ACPI_PARSE_DELETE_TREE;
+		walk_state->descending_callback = acpi_ds_exec_begin_op;
+		walk_state->ascending_callback = acpi_ds_exec_end_op;
+#endif
+		break;
+
+	default:
+		return (AE_BAD_PARAMETER);
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_load1_begin_op
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk
+ *              out_op          - Where to return op if a new one is created
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Descending callback used during the loading of ACPI tables.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
+		       union acpi_parse_object ** out_op)
+{
+	union acpi_parse_object *op;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+	acpi_object_type object_type;
+	char *path;
+	u32 flags;
+
+	ACPI_FUNCTION_TRACE(ds_load1_begin_op);
+
+	op = walk_state->op;
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
+			  walk_state));
+
+	/* We are only interested in opcodes that have an associated name */
+
+	if (op) {
+		if (!(walk_state->op_info->flags & AML_NAMED)) {
+			*out_op = op;
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		/* Check if this object has already been installed in the namespace */
+
+		if (op->common.node) {
+			*out_op = op;
+			return_ACPI_STATUS(AE_OK);
+		}
+	}
+
+	path = acpi_ps_get_next_namestring(&walk_state->parser_state);
+
+	/* Map the raw opcode into an internal object type */
+
+	object_type = walk_state->op_info->object_type;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "State=%p Op=%p [%s]\n", walk_state, op,
+			  acpi_ut_get_type_name(object_type)));
+
+	switch (walk_state->opcode) {
+	case AML_SCOPE_OP:
+
+		/*
+		 * The target name of the Scope() operator must exist at this point so
+		 * that we can actually open the scope to enter new names underneath it.
+		 * Allow search-to-root for single namesegs.
+		 */
+		status =
+		    acpi_ns_lookup(walk_state->scope_info, path, object_type,
+				   ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
+				   walk_state, &(node));
+#ifdef ACPI_ASL_COMPILER
+		if (status == AE_NOT_FOUND) {
+			/*
+			 * Table disassembly:
+			 * Target of Scope() not found. Generate an External for it, and
+			 * insert the name into the namespace.
+			 */
+			acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0);
+			status =
+			    acpi_ns_lookup(walk_state->scope_info, path,
+					   object_type, ACPI_IMODE_LOAD_PASS1,
+					   ACPI_NS_SEARCH_PARENT, walk_state,
+					   &node);
+		}
+#endif
+		if (ACPI_FAILURE(status)) {
+			ACPI_ERROR_NAMESPACE(path, status);
+			return_ACPI_STATUS(status);
+		}
+
+		/*
+		 * Check to make sure that the target is
+		 * one of the opcodes that actually opens a scope
+		 */
+		switch (node->type) {
+		case ACPI_TYPE_ANY:
+		case ACPI_TYPE_LOCAL_SCOPE:	/* Scope  */
+		case ACPI_TYPE_DEVICE:
+		case ACPI_TYPE_POWER:
+		case ACPI_TYPE_PROCESSOR:
+		case ACPI_TYPE_THERMAL:
+
+			/* These are acceptable types */
+			break;
+
+		case ACPI_TYPE_INTEGER:
+		case ACPI_TYPE_STRING:
+		case ACPI_TYPE_BUFFER:
+
+			/*
+			 * These types we will allow, but we will change the type. This
+			 * enables some existing code of the form:
+			 *
+			 *  Name (DEB, 0)
+			 *  Scope (DEB) { ... }
+			 *
+			 * Note: silently change the type here. On the second pass, we will report
+			 * a warning
+			 */
+			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+					  "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n",
+					  path,
+					  acpi_ut_get_type_name(node->type)));
+
+			node->type = ACPI_TYPE_ANY;
+			walk_state->scope_info->common.value = ACPI_TYPE_ANY;
+			break;
+
+		default:
+
+			/* All other types are an error */
+
+			ACPI_ERROR((AE_INFO,
+				    "Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)",
+				    acpi_ut_get_type_name(node->type), path));
+
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+		break;
+
+	default:
+		/*
+		 * For all other named opcodes, we will enter the name into
+		 * the namespace.
+		 *
+		 * Setup the search flags.
+		 * Since we are entering a name into the namespace, we do not want to
+		 * enable the search-to-root upsearch.
+		 *
+		 * There are only two conditions where it is acceptable that the name
+		 * already exists:
+		 *    1) the Scope() operator can reopen a scoping object that was
+		 *       previously defined (Scope, Method, Device, etc.)
+		 *    2) Whenever we are parsing a deferred opcode (op_region, Buffer,
+		 *       buffer_field, or Package), the name of the object is already
+		 *       in the namespace.
+		 */
+		if (walk_state->deferred_node) {
+
+			/* This name is already in the namespace, get the node */
+
+			node = walk_state->deferred_node;
+			status = AE_OK;
+			break;
+		}
+
+		/*
+		 * If we are executing a method, do not create any namespace objects
+		 * during the load phase, only during execution.
+		 */
+		if (walk_state->method_node) {
+			node = NULL;
+			status = AE_OK;
+			break;
+		}
+
+		flags = ACPI_NS_NO_UPSEARCH;
+		if ((walk_state->opcode != AML_SCOPE_OP) &&
+		    (!(walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP))) {
+			flags |= ACPI_NS_ERROR_IF_FOUND;
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "[%s] Cannot already exist\n",
+					  acpi_ut_get_type_name(object_type)));
+		} else {
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "[%s] Both Find or Create allowed\n",
+					  acpi_ut_get_type_name(object_type)));
+		}
+
+		/*
+		 * Enter the named type into the internal namespace. We enter the name
+		 * as we go downward in the parse tree. Any necessary subobjects that
+		 * involve arguments to the opcode must be created as we go back up the
+		 * parse tree later.
+		 */
+		status =
+		    acpi_ns_lookup(walk_state->scope_info, path, object_type,
+				   ACPI_IMODE_LOAD_PASS1, flags, walk_state,
+				   &node);
+		if (ACPI_FAILURE(status)) {
+			if (status == AE_ALREADY_EXISTS) {
+
+				/* The name already exists in this scope */
+
+				if (node->flags & ANOBJ_IS_EXTERNAL) {
+					/*
+					 * Allow one create on an object or segment that was
+					 * previously declared External
+					 */
+					node->flags &= ~ANOBJ_IS_EXTERNAL;
+					node->type = (u8) object_type;
+
+					/* Just retyped a node, probably will need to open a scope */
+
+					if (acpi_ns_opens_scope(object_type)) {
+						status =
+						    acpi_ds_scope_stack_push
+						    (node, object_type,
+						     walk_state);
+						if (ACPI_FAILURE(status)) {
+							return_ACPI_STATUS
+							    (status);
+						}
+					}
+
+					status = AE_OK;
+				}
+			}
+
+			if (ACPI_FAILURE(status)) {
+				ACPI_ERROR_NAMESPACE(path, status);
+				return_ACPI_STATUS(status);
+			}
+		}
+		break;
+	}
+
+	/* Common exit */
+
+	if (!op) {
+
+		/* Create a new op */
+
+		op = acpi_ps_alloc_op(walk_state->opcode);
+		if (!op) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+	}
+
+	/* Initialize the op */
+
+#if (defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY))
+	op->named.path = ACPI_CAST_PTR(u8, path);
+#endif
+
+	if (node) {
+		/*
+		 * Put the Node in the "op" object that the parser uses, so we
+		 * can get it again quickly when this scope is closed
+		 */
+		op->common.node = node;
+		op->named.name = node->name.integer;
+	}
+
+	acpi_ps_append_arg(acpi_ps_get_parent_scope(&walk_state->parser_state),
+			   op);
+	*out_op = op;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_load1_end_op
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Ascending callback used during the loading of the namespace,
+ *              both control methods and everything else.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
+{
+	union acpi_parse_object *op;
+	acpi_object_type object_type;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ds_load1_end_op);
+
+	op = walk_state->op;
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
+			  walk_state));
+
+	/* We are only interested in opcodes that have an associated name */
+
+	if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Get the object type to determine if we should pop the scope */
+
+	object_type = walk_state->op_info->object_type;
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+	if (walk_state->op_info->flags & AML_FIELD) {
+		/*
+		 * If we are executing a method, do not create any namespace objects
+		 * during the load phase, only during execution.
+		 */
+		if (!walk_state->method_node) {
+			if (walk_state->opcode == AML_FIELD_OP ||
+			    walk_state->opcode == AML_BANK_FIELD_OP ||
+			    walk_state->opcode == AML_INDEX_FIELD_OP) {
+				status =
+				    acpi_ds_init_field_objects(op, walk_state);
+			}
+		}
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * If we are executing a method, do not create any namespace objects
+	 * during the load phase, only during execution.
+	 */
+	if (!walk_state->method_node) {
+		if (op->common.aml_opcode == AML_REGION_OP) {
+			status =
+			    acpi_ex_create_region(op->named.data,
+						  op->named.length,
+						  (acpi_adr_space_type) ((op->
+									  common.
+									  value.
+									  arg)->
+									 common.
+									 value.
+									 integer),
+						  walk_state);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		} else if (op->common.aml_opcode == AML_DATA_REGION_OP) {
+			status =
+			    acpi_ex_create_region(op->named.data,
+						  op->named.length,
+						  REGION_DATA_TABLE,
+						  walk_state);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		}
+	}
+#endif
+
+	if (op->common.aml_opcode == AML_NAME_OP) {
+
+		/* For Name opcode, get the object type from the argument */
+
+		if (op->common.value.arg) {
+			object_type = (acpi_ps_get_opcode_info((op->common.
+								value.arg)->
+							       common.
+							       aml_opcode))->
+			    object_type;
+
+			/* Set node type if we have a namespace node */
+
+			if (op->common.node) {
+				op->common.node->type = (u8) object_type;
+			}
+		}
+	}
+
+	/*
+	 * If we are executing a method, do not create any namespace objects
+	 * during the load phase, only during execution.
+	 */
+	if (!walk_state->method_node) {
+		if (op->common.aml_opcode == AML_METHOD_OP) {
+			/*
+			 * method_op pkg_length name_string method_flags term_list
+			 *
+			 * Note: We must create the method node/object pair as soon as we
+			 * see the method declaration. This allows later pass1 parsing
+			 * of invocations of the method (need to know the number of
+			 * arguments.)
+			 */
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
+					  walk_state, op, op->named.node));
+
+			if (!acpi_ns_get_attached_object(op->named.node)) {
+				walk_state->operands[0] =
+				    ACPI_CAST_PTR(void, op->named.node);
+				walk_state->num_operands = 1;
+
+				status =
+				    acpi_ds_create_operands(walk_state,
+							    op->common.value.
+							    arg);
+				if (ACPI_SUCCESS(status)) {
+					status =
+					    acpi_ex_create_method(op->named.
+								  data,
+								  op->named.
+								  length,
+								  walk_state);
+				}
+
+				walk_state->operands[0] = NULL;
+				walk_state->num_operands = 0;
+
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+			}
+		}
+	}
+
+	/* Pop the scope stack (only if loading a table) */
+
+	if (!walk_state->method_node && acpi_ns_opens_scope(object_type)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "(%s): Popping scope for Op %p\n",
+				  acpi_ut_get_type_name(object_type), op));
+
+		status = acpi_ds_scope_stack_pop(walk_state);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_load2_begin_op
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk
+ *              out_op          - Wher to return op if a new one is created
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Descending callback used during the loading of ACPI tables.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
+		       union acpi_parse_object **out_op)
+{
+	union acpi_parse_object *op;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+	acpi_object_type object_type;
+	char *buffer_ptr;
+	u32 flags;
+
+	ACPI_FUNCTION_TRACE(ds_load2_begin_op);
+
+	op = walk_state->op;
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
+			  walk_state));
+
+	if (op) {
+		if ((walk_state->control_state) &&
+		    (walk_state->control_state->common.state ==
+		     ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
+
+			/* We are executing a while loop outside of a method */
+
+			status = acpi_ds_exec_begin_op(walk_state, out_op);
+			return_ACPI_STATUS(status);
+		}
+
+		/* We only care about Namespace opcodes here */
+
+		if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
+		     (walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
+		    (!(walk_state->op_info->flags & AML_NAMED))) {
+#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
+			if ((walk_state->op_info->class == AML_CLASS_EXECUTE) ||
+			    (walk_state->op_info->class == AML_CLASS_CONTROL)) {
+				ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+						  "Begin/EXEC: %s (fl %8.8X)\n",
+						  walk_state->op_info->name,
+						  walk_state->op_info->flags));
+
+				/* Executing a type1 or type2 opcode outside of a method */
+
+				status =
+				    acpi_ds_exec_begin_op(walk_state, out_op);
+				return_ACPI_STATUS(status);
+			}
+#endif
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		/* Get the name we are going to enter or lookup in the namespace */
+
+		if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
+
+			/* For Namepath op, get the path string */
+
+			buffer_ptr = op->common.value.string;
+			if (!buffer_ptr) {
+
+				/* No name, just exit */
+
+				return_ACPI_STATUS(AE_OK);
+			}
+		} else {
+			/* Get name from the op */
+
+			buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
+		}
+	} else {
+		/* Get the namestring from the raw AML */
+
+		buffer_ptr =
+		    acpi_ps_get_next_namestring(&walk_state->parser_state);
+	}
+
+	/* Map the opcode into an internal object type */
+
+	object_type = walk_state->op_info->object_type;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "State=%p Op=%p Type=%X\n", walk_state, op,
+			  object_type));
+
+	switch (walk_state->opcode) {
+	case AML_FIELD_OP:
+	case AML_BANK_FIELD_OP:
+	case AML_INDEX_FIELD_OP:
+
+		node = NULL;
+		status = AE_OK;
+		break;
+
+	case AML_INT_NAMEPATH_OP:
+		/*
+		 * The name_path is an object reference to an existing object.
+		 * Don't enter the name into the namespace, but look it up
+		 * for use later.
+		 */
+		status =
+		    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+				   object_type, ACPI_IMODE_EXECUTE,
+				   ACPI_NS_SEARCH_PARENT, walk_state, &(node));
+		break;
+
+	case AML_SCOPE_OP:
+		/*
+		 * The Path is an object reference to an existing object.
+		 * Don't enter the name into the namespace, but look it up
+		 * for use later.
+		 */
+		status =
+		    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+				   object_type, ACPI_IMODE_EXECUTE,
+				   ACPI_NS_SEARCH_PARENT, walk_state, &(node));
+		if (ACPI_FAILURE(status)) {
+#ifdef ACPI_ASL_COMPILER
+			if (status == AE_NOT_FOUND) {
+				status = AE_OK;
+			} else {
+				ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+			}
+#else
+			ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+#endif
+			return_ACPI_STATUS(status);
+		}
+
+		/*
+		 * We must check to make sure that the target is
+		 * one of the opcodes that actually opens a scope
+		 */
+		switch (node->type) {
+		case ACPI_TYPE_ANY:
+		case ACPI_TYPE_LOCAL_SCOPE:	/* Scope */
+		case ACPI_TYPE_DEVICE:
+		case ACPI_TYPE_POWER:
+		case ACPI_TYPE_PROCESSOR:
+		case ACPI_TYPE_THERMAL:
+
+			/* These are acceptable types */
+			break;
+
+		case ACPI_TYPE_INTEGER:
+		case ACPI_TYPE_STRING:
+		case ACPI_TYPE_BUFFER:
+
+			/*
+			 * These types we will allow, but we will change the type. This
+			 * enables some existing code of the form:
+			 *
+			 *  Name (DEB, 0)
+			 *  Scope (DEB) { ... }
+			 */
+			ACPI_WARNING((AE_INFO,
+				      "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)",
+				      buffer_ptr,
+				      acpi_ut_get_type_name(node->type)));
+
+			node->type = ACPI_TYPE_ANY;
+			walk_state->scope_info->common.value = ACPI_TYPE_ANY;
+			break;
+
+		default:
+
+			/* All other types are an error */
+
+			ACPI_ERROR((AE_INFO,
+				    "Invalid type (%s) for target of Scope operator [%4.4s]",
+				    acpi_ut_get_type_name(node->type),
+				    buffer_ptr));
+
+			return (AE_AML_OPERAND_TYPE);
+		}
+		break;
+
+	default:
+
+		/* All other opcodes */
+
+		if (op && op->common.node) {
+
+			/* This op/node was previously entered into the namespace */
+
+			node = op->common.node;
+
+			if (acpi_ns_opens_scope(object_type)) {
+				status =
+				    acpi_ds_scope_stack_push(node, object_type,
+							     walk_state);
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+			}
+
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		/*
+		 * Enter the named type into the internal namespace. We enter the name
+		 * as we go downward in the parse tree. Any necessary subobjects that
+		 * involve arguments to the opcode must be created as we go back up the
+		 * parse tree later.
+		 *
+		 * Note: Name may already exist if we are executing a deferred opcode.
+		 */
+		if (walk_state->deferred_node) {
+
+			/* This name is already in the namespace, get the node */
+
+			node = walk_state->deferred_node;
+			status = AE_OK;
+			break;
+		}
+
+		flags = ACPI_NS_NO_UPSEARCH;
+		if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
+
+			/* Execution mode, node cannot already exist, node is temporary */
+
+			flags |= (ACPI_NS_ERROR_IF_FOUND | ACPI_NS_TEMPORARY);
+		}
+
+		/* Add new entry or lookup existing entry */
+
+		status =
+		    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
+				   object_type, ACPI_IMODE_LOAD_PASS2, flags,
+				   walk_state, &node);
+
+		if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "***New Node [%4.4s] %p is temporary\n",
+					  acpi_ut_get_node_name(node), node));
+		}
+		break;
+	}
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+		return_ACPI_STATUS(status);
+	}
+
+	if (!op) {
+
+		/* Create a new op */
+
+		op = acpi_ps_alloc_op(walk_state->opcode);
+		if (!op) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		/* Initialize the new op */
+
+		if (node) {
+			op->named.name = node->name.integer;
+		}
+		*out_op = op;
+	}
+
+	/*
+	 * Put the Node in the "op" object that the parser uses, so we
+	 * can get it again quickly when this scope is closed
+	 */
+	op->common.node = node;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_load2_end_op
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Ascending callback used during the loading of the namespace,
+ *              both control methods and everything else.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
+{
+	union acpi_parse_object *op;
+	acpi_status status = AE_OK;
+	acpi_object_type object_type;
+	struct acpi_namespace_node *node;
+	union acpi_parse_object *arg;
+	struct acpi_namespace_node *new_node;
+#ifndef ACPI_NO_METHOD_EXECUTION
+	u32 i;
+	u8 region_space;
+#endif
+
+	ACPI_FUNCTION_TRACE(ds_load2_end_op);
+
+	op = walk_state->op;
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
+			  walk_state->op_info->name, op, walk_state));
+
+	/* Check if opcode had an associated namespace object */
+
+	if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
+#ifndef ACPI_NO_METHOD_EXECUTION
+#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
+		/* No namespace object. Executable opcode? */
+
+		if ((walk_state->op_info->class == AML_CLASS_EXECUTE) ||
+		    (walk_state->op_info->class == AML_CLASS_CONTROL)) {
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "End/EXEC:   %s (fl %8.8X)\n",
+					  walk_state->op_info->name,
+					  walk_state->op_info->flags));
+
+			/* Executing a type1 or type2 opcode outside of a method */
+
+			status = acpi_ds_exec_end_op(walk_state);
+			return_ACPI_STATUS(status);
+		}
+#endif
+#endif
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	if (op->common.aml_opcode == AML_SCOPE_OP) {
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "Ending scope Op=%p State=%p\n", op,
+				  walk_state));
+	}
+
+	object_type = walk_state->op_info->object_type;
+
+	/*
+	 * Get the Node/name from the earlier lookup
+	 * (It was saved in the *op structure)
+	 */
+	node = op->common.node;
+
+	/*
+	 * Put the Node on the object stack (Contains the ACPI Name of
+	 * this object)
+	 */
+	walk_state->operands[0] = (void *)node;
+	walk_state->num_operands = 1;
+
+	/* Pop the scope stack */
+
+	if (acpi_ns_opens_scope(object_type) &&
+	    (op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "(%s) Popping scope for Op %p\n",
+				  acpi_ut_get_type_name(object_type), op));
+
+		status = acpi_ds_scope_stack_pop(walk_state);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+	}
+
+	/*
+	 * Named operations are as follows:
+	 *
+	 * AML_ALIAS
+	 * AML_BANKFIELD
+	 * AML_CREATEBITFIELD
+	 * AML_CREATEBYTEFIELD
+	 * AML_CREATEDWORDFIELD
+	 * AML_CREATEFIELD
+	 * AML_CREATEQWORDFIELD
+	 * AML_CREATEWORDFIELD
+	 * AML_DATA_REGION
+	 * AML_DEVICE
+	 * AML_EVENT
+	 * AML_FIELD
+	 * AML_INDEXFIELD
+	 * AML_METHOD
+	 * AML_METHODCALL
+	 * AML_MUTEX
+	 * AML_NAME
+	 * AML_NAMEDFIELD
+	 * AML_OPREGION
+	 * AML_POWERRES
+	 * AML_PROCESSOR
+	 * AML_SCOPE
+	 * AML_THERMALZONE
+	 */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
+			  acpi_ps_get_opcode_name(op->common.aml_opcode),
+			  walk_state, op, node));
+
+	/* Decode the opcode */
+
+	arg = op->common.value.arg;
+
+	switch (walk_state->op_info->type) {
+#ifndef ACPI_NO_METHOD_EXECUTION
+
+	case AML_TYPE_CREATE_FIELD:
+		/*
+		 * Create the field object, but the field buffer and index must
+		 * be evaluated later during the execution phase
+		 */
+		status = acpi_ds_create_buffer_field(op, walk_state);
+		break;
+
+	case AML_TYPE_NAMED_FIELD:
+		/*
+		 * If we are executing a method, initialize the field
+		 */
+		if (walk_state->method_node) {
+			status = acpi_ds_init_field_objects(op, walk_state);
+		}
+
+		switch (op->common.aml_opcode) {
+		case AML_INDEX_FIELD_OP:
+
+			status =
+			    acpi_ds_create_index_field(op,
+						       (acpi_handle) arg->
+						       common.node, walk_state);
+			break;
+
+		case AML_BANK_FIELD_OP:
+
+			status =
+			    acpi_ds_create_bank_field(op, arg->common.node,
+						      walk_state);
+			break;
+
+		case AML_FIELD_OP:
+
+			status =
+			    acpi_ds_create_field(op, arg->common.node,
+						 walk_state);
+			break;
+
+		default:
+			/* All NAMED_FIELD opcodes must be handled above */
+			break;
+		}
+		break;
+
+	case AML_TYPE_NAMED_SIMPLE:
+
+		status = acpi_ds_create_operands(walk_state, arg);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+
+		switch (op->common.aml_opcode) {
+		case AML_PROCESSOR_OP:
+
+			status = acpi_ex_create_processor(walk_state);
+			break;
+
+		case AML_POWER_RES_OP:
+
+			status = acpi_ex_create_power_resource(walk_state);
+			break;
+
+		case AML_MUTEX_OP:
+
+			status = acpi_ex_create_mutex(walk_state);
+			break;
+
+		case AML_EVENT_OP:
+
+			status = acpi_ex_create_event(walk_state);
+			break;
+
+		case AML_ALIAS_OP:
+
+			status = acpi_ex_create_alias(walk_state);
+			break;
+
+		default:
+			/* Unknown opcode */
+
+			status = AE_OK;
+			goto cleanup;
+		}
+
+		/* Delete operands */
+
+		for (i = 1; i < walk_state->num_operands; i++) {
+			acpi_ut_remove_reference(walk_state->operands[i]);
+			walk_state->operands[i] = NULL;
+		}
+
+		break;
+#endif				/* ACPI_NO_METHOD_EXECUTION */
+
+	case AML_TYPE_NAMED_COMPLEX:
+
+		switch (op->common.aml_opcode) {
+#ifndef ACPI_NO_METHOD_EXECUTION
+		case AML_REGION_OP:
+		case AML_DATA_REGION_OP:
+
+			if (op->common.aml_opcode == AML_REGION_OP) {
+				region_space = (acpi_adr_space_type)
+				    ((op->common.value.arg)->common.value.
+				     integer);
+			} else {
+				region_space = REGION_DATA_TABLE;
+			}
+
+			/*
+			 * If we are executing a method, initialize the region
+			 */
+			if (walk_state->method_node) {
+				status =
+				    acpi_ex_create_region(op->named.data,
+							  op->named.length,
+							  region_space,
+							  walk_state);
+				if (ACPI_FAILURE(status)) {
+					return (status);
+				}
+			}
+
+			/*
+			 * The op_region is not fully parsed at this time. Only valid
+			 * argument is the space_id. (We must save the address of the
+			 * AML of the address and length operands)
+			 */
+
+			/*
+			 * If we have a valid region, initialize it
+			 * Namespace is NOT locked at this point.
+			 */
+			status =
+			    acpi_ev_initialize_region
+			    (acpi_ns_get_attached_object(node), FALSE);
+			if (ACPI_FAILURE(status)) {
+				/*
+				 *  If AE_NOT_EXIST is returned, it is not fatal
+				 *  because many regions get created before a handler
+				 *  is installed for said region.
+				 */
+				if (AE_NOT_EXIST == status) {
+					status = AE_OK;
+				}
+			}
+			break;
+
+		case AML_NAME_OP:
+
+			status = acpi_ds_create_node(walk_state, node, op);
+			break;
+
+		case AML_METHOD_OP:
+			/*
+			 * method_op pkg_length name_string method_flags term_list
+			 *
+			 * Note: We must create the method node/object pair as soon as we
+			 * see the method declaration. This allows later pass1 parsing
+			 * of invocations of the method (need to know the number of
+			 * arguments.)
+			 */
+			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+					  "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
+					  walk_state, op, op->named.node));
+
+			if (!acpi_ns_get_attached_object(op->named.node)) {
+				walk_state->operands[0] =
+				    ACPI_CAST_PTR(void, op->named.node);
+				walk_state->num_operands = 1;
+
+				status =
+				    acpi_ds_create_operands(walk_state,
+							    op->common.value.
+							    arg);
+				if (ACPI_SUCCESS(status)) {
+					status =
+					    acpi_ex_create_method(op->named.
+								  data,
+								  op->named.
+								  length,
+								  walk_state);
+				}
+				walk_state->operands[0] = NULL;
+				walk_state->num_operands = 0;
+
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+			}
+			break;
+
+#endif				/* ACPI_NO_METHOD_EXECUTION */
+
+		default:
+			/* All NAMED_COMPLEX opcodes must be handled above */
+			break;
+		}
+		break;
+
+	case AML_CLASS_INTERNAL:
+
+		/* case AML_INT_NAMEPATH_OP: */
+		break;
+
+	case AML_CLASS_METHOD_CALL:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
+				  walk_state, op, node));
+
+		/*
+		 * Lookup the method name and save the Node
+		 */
+		status =
+		    acpi_ns_lookup(walk_state->scope_info,
+				   arg->common.value.string, ACPI_TYPE_ANY,
+				   ACPI_IMODE_LOAD_PASS2,
+				   ACPI_NS_SEARCH_PARENT |
+				   ACPI_NS_DONT_OPEN_SCOPE, walk_state,
+				   &(new_node));
+		if (ACPI_SUCCESS(status)) {
+			/*
+			 * Make sure that what we found is indeed a method
+			 * We didn't search for a method on purpose, to see if the name
+			 * would resolve
+			 */
+			if (new_node->type != ACPI_TYPE_METHOD) {
+				status = AE_AML_OPERAND_TYPE;
+			}
+
+			/* We could put the returned object (Node) on the object stack for
+			 * later, but for now, we will put it in the "op" object that the
+			 * parser uses, so we can get it again at the end of this scope
+			 */
+			op->common.node = new_node;
+		} else {
+			ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+		}
+		break;
+
+	default:
+		break;
+	}
+
+      cleanup:
+
+	/* Remove the Node pushed at the very beginning */
+
+	walk_state->operands[0] = NULL;
+	walk_state->num_operands = 0;
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
new file mode 100644
index 0000000..908645e
--- /dev/null
+++ b/drivers/acpi/acpica/dswscope.c
@@ -0,0 +1,214 @@
+/******************************************************************************
+ *
+ * Module Name: dswscope - Scope stack manipulation
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acdispat.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dswscope")
+
+/****************************************************************************
+ *
+ * FUNCTION:    acpi_ds_scope_stack_clear
+ *
+ * PARAMETERS:  walk_state      - Current state
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Pop (and free) everything on the scope stack except the
+ *              root scope object (which remains at the stack top.)
+ *
+ ***************************************************************************/
+void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state)
+{
+	union acpi_generic_state *scope_info;
+
+	ACPI_FUNCTION_NAME(ds_scope_stack_clear);
+
+	while (walk_state->scope_info) {
+
+		/* Pop a scope off the stack */
+
+		scope_info = walk_state->scope_info;
+		walk_state->scope_info = scope_info->scope.next;
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Popped object type (%s)\n",
+				  acpi_ut_get_type_name(scope_info->common.
+							value)));
+		acpi_ut_delete_generic_state(scope_info);
+	}
+}
+
+/****************************************************************************
+ *
+ * FUNCTION:    acpi_ds_scope_stack_push
+ *
+ * PARAMETERS:  Node            - Name to be made current
+ *              Type            - Type of frame being pushed
+ *              walk_state      - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Push the current scope on the scope stack, and make the
+ *              passed Node current.
+ *
+ ***************************************************************************/
+
+acpi_status
+acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
+			 acpi_object_type type,
+			 struct acpi_walk_state *walk_state)
+{
+	union acpi_generic_state *scope_info;
+	union acpi_generic_state *old_scope_info;
+
+	ACPI_FUNCTION_TRACE(ds_scope_stack_push);
+
+	if (!node) {
+
+		/* Invalid scope   */
+
+		ACPI_ERROR((AE_INFO, "Null scope parameter"));
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Make sure object type is valid */
+
+	if (!acpi_ut_valid_object_type(type)) {
+		ACPI_WARNING((AE_INFO, "Invalid object type: 0x%X", type));
+	}
+
+	/* Allocate a new scope object */
+
+	scope_info = acpi_ut_create_generic_state();
+	if (!scope_info) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Init new scope object */
+
+	scope_info->common.descriptor_type = ACPI_DESC_TYPE_STATE_WSCOPE;
+	scope_info->scope.node = node;
+	scope_info->common.value = (u16) type;
+
+	walk_state->scope_depth++;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "[%.2d] Pushed scope ",
+			  (u32) walk_state->scope_depth));
+
+	old_scope_info = walk_state->scope_info;
+	if (old_scope_info) {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC,
+				      "[%4.4s] (%s)",
+				      acpi_ut_get_node_name(old_scope_info->
+							    scope.node),
+				      acpi_ut_get_type_name(old_scope_info->
+							    common.value)));
+	} else {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "[\\___] (%s)", "ROOT"));
+	}
+
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC,
+			      ", New scope -> [%4.4s] (%s)\n",
+			      acpi_ut_get_node_name(scope_info->scope.node),
+			      acpi_ut_get_type_name(scope_info->common.value)));
+
+	/* Push new scope object onto stack */
+
+	acpi_ut_push_generic_state(&walk_state->scope_info, scope_info);
+	return_ACPI_STATUS(AE_OK);
+}
+
+/****************************************************************************
+ *
+ * FUNCTION:    acpi_ds_scope_stack_pop
+ *
+ * PARAMETERS:  walk_state      - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Pop the scope stack once.
+ *
+ ***************************************************************************/
+
+acpi_status acpi_ds_scope_stack_pop(struct acpi_walk_state *walk_state)
+{
+	union acpi_generic_state *scope_info;
+	union acpi_generic_state *new_scope_info;
+
+	ACPI_FUNCTION_TRACE(ds_scope_stack_pop);
+
+	/*
+	 * Pop scope info object off the stack.
+	 */
+	scope_info = acpi_ut_pop_generic_state(&walk_state->scope_info);
+	if (!scope_info) {
+		return_ACPI_STATUS(AE_STACK_UNDERFLOW);
+	}
+
+	walk_state->scope_depth--;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "[%.2d] Popped scope [%4.4s] (%s), New scope -> ",
+			  (u32) walk_state->scope_depth,
+			  acpi_ut_get_node_name(scope_info->scope.node),
+			  acpi_ut_get_type_name(scope_info->common.value)));
+
+	new_scope_info = walk_state->scope_info;
+	if (new_scope_info) {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC,
+				      "[%4.4s] (%s)\n",
+				      acpi_ut_get_node_name(new_scope_info->
+							    scope.node),
+				      acpi_ut_get_type_name(new_scope_info->
+							    common.value)));
+	} else {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "[\\___] (ROOT)\n"));
+	}
+
+	acpi_ut_delete_generic_state(scope_info);
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
new file mode 100644
index 0000000..40f92bf
--- /dev/null
+++ b/drivers/acpi/acpica/dswstate.c
@@ -0,0 +1,753 @@
+/******************************************************************************
+ *
+ * Module Name: dswstate - Dispatcher parse tree walk management routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "acdispat.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dswstate")
+
+  /* Local prototypes */
+static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *ws);
+static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_result_pop
+ *
+ * PARAMETERS:  Object              - Where to return the popped object
+ *              walk_state          - Current Walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Pop an object off the top of this walk's result stack
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_result_pop(union acpi_operand_object **object,
+		   struct acpi_walk_state *walk_state)
+{
+	u32 index;
+	union acpi_generic_state *state;
+	acpi_status status;
+
+	ACPI_FUNCTION_NAME(ds_result_pop);
+
+	state = walk_state->results;
+
+	/* Incorrect state of result stack */
+
+	if (state && !walk_state->result_count) {
+		ACPI_ERROR((AE_INFO, "No results on result stack"));
+		return (AE_AML_INTERNAL);
+	}
+
+	if (!state && walk_state->result_count) {
+		ACPI_ERROR((AE_INFO, "No result state for result stack"));
+		return (AE_AML_INTERNAL);
+	}
+
+	/* Empty result stack */
+
+	if (!state) {
+		ACPI_ERROR((AE_INFO, "Result stack is empty! State=%p",
+			    walk_state));
+		return (AE_AML_NO_RETURN_VALUE);
+	}
+
+	/* Return object of the top element and clean that top element result stack */
+
+	walk_state->result_count--;
+	index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
+
+	*object = state->results.obj_desc[index];
+	if (!*object) {
+		ACPI_ERROR((AE_INFO,
+			    "No result objects on result stack, State=%p",
+			    walk_state));
+		return (AE_AML_NO_RETURN_VALUE);
+	}
+
+	state->results.obj_desc[index] = NULL;
+	if (index == 0) {
+		status = acpi_ds_result_stack_pop(walk_state);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "Obj=%p [%s] Index=%X State=%p Num=%X\n", *object,
+			  acpi_ut_get_object_type_name(*object),
+			  index, walk_state, walk_state->result_count));
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_result_push
+ *
+ * PARAMETERS:  Object              - Where to return the popped object
+ *              walk_state          - Current Walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Push an object onto the current result stack
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_result_push(union acpi_operand_object * object,
+		    struct acpi_walk_state * walk_state)
+{
+	union acpi_generic_state *state;
+	acpi_status status;
+	u32 index;
+
+	ACPI_FUNCTION_NAME(ds_result_push);
+
+	if (walk_state->result_count > walk_state->result_size) {
+		ACPI_ERROR((AE_INFO, "Result stack is full"));
+		return (AE_AML_INTERNAL);
+	} else if (walk_state->result_count == walk_state->result_size) {
+
+		/* Extend the result stack */
+
+		status = acpi_ds_result_stack_push(walk_state);
+		if (ACPI_FAILURE(status)) {
+			ACPI_ERROR((AE_INFO,
+				    "Failed to extend the result stack"));
+			return (status);
+		}
+	}
+
+	if (!(walk_state->result_count < walk_state->result_size)) {
+		ACPI_ERROR((AE_INFO, "No free elements in result stack"));
+		return (AE_AML_INTERNAL);
+	}
+
+	state = walk_state->results;
+	if (!state) {
+		ACPI_ERROR((AE_INFO, "No result stack frame during push"));
+		return (AE_AML_INTERNAL);
+	}
+
+	if (!object) {
+		ACPI_ERROR((AE_INFO,
+			    "Null Object! Obj=%p State=%p Num=%X",
+			    object, walk_state, walk_state->result_count));
+		return (AE_BAD_PARAMETER);
+	}
+
+	/* Assign the address of object to the top free element of result stack */
+
+	index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
+	state->results.obj_desc[index] = object;
+	walk_state->result_count++;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p Num=%X Cur=%X\n",
+			  object,
+			  acpi_ut_get_object_type_name((union
+							acpi_operand_object *)
+						       object), walk_state,
+			  walk_state->result_count,
+			  walk_state->current_result));
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_result_stack_push
+ *
+ * PARAMETERS:  walk_state          - Current Walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Push an object onto the walk_state result stack
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state)
+{
+	union acpi_generic_state *state;
+
+	ACPI_FUNCTION_NAME(ds_result_stack_push);
+
+	/* Check for stack overflow */
+
+	if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) >
+	    ACPI_RESULTS_OBJ_NUM_MAX) {
+		ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%X",
+			    walk_state, walk_state->result_size));
+		return (AE_STACK_OVERFLOW);
+	}
+
+	state = acpi_ut_create_generic_state();
+	if (!state) {
+		return (AE_NO_MEMORY);
+	}
+
+	state->common.descriptor_type = ACPI_DESC_TYPE_STATE_RESULT;
+	acpi_ut_push_generic_state(&walk_state->results, state);
+
+	/* Increase the length of the result stack by the length of frame */
+
+	walk_state->result_size += ACPI_RESULTS_FRAME_OBJ_NUM;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Results=%p State=%p\n",
+			  state, walk_state));
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_result_stack_pop
+ *
+ * PARAMETERS:  walk_state          - Current Walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Pop an object off of the walk_state result stack
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state)
+{
+	union acpi_generic_state *state;
+
+	ACPI_FUNCTION_NAME(ds_result_stack_pop);
+
+	/* Check for stack underflow */
+
+	if (walk_state->results == NULL) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Result stack underflow - State=%p\n",
+				  walk_state));
+		return (AE_AML_NO_OPERAND);
+	}
+
+	if (walk_state->result_size < ACPI_RESULTS_FRAME_OBJ_NUM) {
+		ACPI_ERROR((AE_INFO, "Insufficient result stack size"));
+		return (AE_AML_INTERNAL);
+	}
+
+	state = acpi_ut_pop_generic_state(&walk_state->results);
+	acpi_ut_delete_generic_state(state);
+
+	/* Decrease the length of result stack by the length of frame */
+
+	walk_state->result_size -= ACPI_RESULTS_FRAME_OBJ_NUM;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "Result=%p RemainingResults=%X State=%p\n",
+			  state, walk_state->result_count, walk_state));
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_obj_stack_push
+ *
+ * PARAMETERS:  Object              - Object to push
+ *              walk_state          - Current Walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Push an object onto this walk's object/operand stack
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
+{
+	ACPI_FUNCTION_NAME(ds_obj_stack_push);
+
+	/* Check for stack overflow */
+
+	if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
+		ACPI_ERROR((AE_INFO,
+			    "Object stack overflow! Obj=%p State=%p #Ops=%X",
+			    object, walk_state, walk_state->num_operands));
+		return (AE_STACK_OVERFLOW);
+	}
+
+	/* Put the object onto the stack */
+
+	walk_state->operands[walk_state->operand_index] = object;
+	walk_state->num_operands++;
+
+	/* For the usual order of filling the operand stack */
+
+	walk_state->operand_index++;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p #Ops=%X\n",
+			  object,
+			  acpi_ut_get_object_type_name((union
+							acpi_operand_object *)
+						       object), walk_state,
+			  walk_state->num_operands));
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_obj_stack_pop
+ *
+ * PARAMETERS:  pop_count           - Number of objects/entries to pop
+ *              walk_state          - Current Walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Pop this walk's object stack.  Objects on the stack are NOT
+ *              deleted by this routine.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
+{
+	u32 i;
+
+	ACPI_FUNCTION_NAME(ds_obj_stack_pop);
+
+	for (i = 0; i < pop_count; i++) {
+
+		/* Check for stack underflow */
+
+		if (walk_state->num_operands == 0) {
+			ACPI_ERROR((AE_INFO,
+				    "Object stack underflow! Count=%X State=%p #Ops=%X",
+				    pop_count, walk_state,
+				    walk_state->num_operands));
+			return (AE_STACK_UNDERFLOW);
+		}
+
+		/* Just set the stack entry to null */
+
+		walk_state->num_operands--;
+		walk_state->operands[walk_state->num_operands] = NULL;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
+			  pop_count, walk_state, walk_state->num_operands));
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_obj_stack_pop_and_delete
+ *
+ * PARAMETERS:  pop_count           - Number of objects/entries to pop
+ *              walk_state          - Current Walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Pop this walk's object stack and delete each object that is
+ *              popped off.
+ *
+ ******************************************************************************/
+
+void
+acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
+				 struct acpi_walk_state *walk_state)
+{
+	s32 i;
+	union acpi_operand_object *obj_desc;
+
+	ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete);
+
+	if (pop_count == 0) {
+		return;
+	}
+
+	for (i = (s32) pop_count - 1; i >= 0; i--) {
+		if (walk_state->num_operands == 0) {
+			return;
+		}
+
+		/* Pop the stack and delete an object if present in this stack entry */
+
+		walk_state->num_operands--;
+		obj_desc = walk_state->operands[i];
+		if (obj_desc) {
+			acpi_ut_remove_reference(walk_state->operands[i]);
+			walk_state->operands[i] = NULL;
+		}
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
+			  pop_count, walk_state, walk_state->num_operands));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_current_walk_state
+ *
+ * PARAMETERS:  Thread          - Get current active state for this Thread
+ *
+ * RETURN:      Pointer to the current walk state
+ *
+ * DESCRIPTION: Get the walk state that is at the head of the list (the "current"
+ *              walk state.)
+ *
+ ******************************************************************************/
+
+struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
+						       *thread)
+{
+	ACPI_FUNCTION_NAME(ds_get_current_walk_state);
+
+	if (!thread) {
+		return (NULL);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Current WalkState %p\n",
+			  thread->walk_state_list));
+
+	return (thread->walk_state_list);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_push_walk_state
+ *
+ * PARAMETERS:  walk_state      - State to push
+ *              Thread          - Thread state object
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Place the Thread state at the head of the state list
+ *
+ ******************************************************************************/
+
+void
+acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
+			struct acpi_thread_state *thread)
+{
+	ACPI_FUNCTION_TRACE(ds_push_walk_state);
+
+	walk_state->next = thread->walk_state_list;
+	thread->walk_state_list = walk_state;
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_pop_walk_state
+ *
+ * PARAMETERS:  Thread      - Current thread state
+ *
+ * RETURN:      A walk_state object popped from the thread's stack
+ *
+ * DESCRIPTION: Remove and return the walkstate object that is at the head of
+ *              the walk stack for the given walk list.  NULL indicates that
+ *              the list is empty.
+ *
+ ******************************************************************************/
+
+struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
+{
+	struct acpi_walk_state *walk_state;
+
+	ACPI_FUNCTION_TRACE(ds_pop_walk_state);
+
+	walk_state = thread->walk_state_list;
+
+	if (walk_state) {
+
+		/* Next walk state becomes the current walk state */
+
+		thread->walk_state_list = walk_state->next;
+
+		/*
+		 * Don't clear the NEXT field, this serves as an indicator
+		 * that there is a parent WALK STATE
+		 * Do Not: walk_state->Next = NULL;
+		 */
+	}
+
+	return_PTR(walk_state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_create_walk_state
+ *
+ * PARAMETERS:  owner_id        - ID for object creation
+ *              Origin          - Starting point for this walk
+ *              method_desc     - Method object
+ *              Thread          - Current thread state
+ *
+ * RETURN:      Pointer to the new walk state.
+ *
+ * DESCRIPTION: Allocate and initialize a new walk state.  The current walk
+ *              state is set to this new state.
+ *
+ ******************************************************************************/
+
+struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
+						  *origin, union acpi_operand_object
+						  *method_desc, struct acpi_thread_state
+						  *thread)
+{
+	struct acpi_walk_state *walk_state;
+
+	ACPI_FUNCTION_TRACE(ds_create_walk_state);
+
+	walk_state = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_walk_state));
+	if (!walk_state) {
+		return_PTR(NULL);
+	}
+
+	walk_state->descriptor_type = ACPI_DESC_TYPE_WALK;
+	walk_state->method_desc = method_desc;
+	walk_state->owner_id = owner_id;
+	walk_state->origin = origin;
+	walk_state->thread = thread;
+
+	walk_state->parser_state.start_op = origin;
+
+	/* Init the method args/local */
+
+#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
+	acpi_ds_method_data_init(walk_state);
+#endif
+
+	/* Put the new state at the head of the walk list */
+
+	if (thread) {
+		acpi_ds_push_walk_state(walk_state, thread);
+	}
+
+	return_PTR(walk_state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_init_aml_walk
+ *
+ * PARAMETERS:  walk_state      - New state to be initialized
+ *              Op              - Current parse op
+ *              method_node     - Control method NS node, if any
+ *              aml_start       - Start of AML
+ *              aml_length      - Length of AML
+ *              Info            - Method info block (params, etc.)
+ *              pass_number     - 1, 2, or 3
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize a walk state for a pass 1 or 2 parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
+		      union acpi_parse_object *op,
+		      struct acpi_namespace_node *method_node,
+		      u8 * aml_start,
+		      u32 aml_length,
+		      struct acpi_evaluate_info *info, u8 pass_number)
+{
+	acpi_status status;
+	struct acpi_parse_state *parser_state = &walk_state->parser_state;
+	union acpi_parse_object *extra_op;
+
+	ACPI_FUNCTION_TRACE(ds_init_aml_walk);
+
+	walk_state->parser_state.aml =
+	    walk_state->parser_state.aml_start = aml_start;
+	walk_state->parser_state.aml_end =
+	    walk_state->parser_state.pkg_end = aml_start + aml_length;
+
+	/* The next_op of the next_walk will be the beginning of the method */
+
+	walk_state->next_op = NULL;
+	walk_state->pass_number = pass_number;
+
+	if (info) {
+		walk_state->params = info->parameters;
+		walk_state->caller_return_desc = &info->return_object;
+	}
+
+	status = acpi_ps_init_scope(&walk_state->parser_state, op);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	if (method_node) {
+		walk_state->parser_state.start_node = method_node;
+		walk_state->walk_type = ACPI_WALK_METHOD;
+		walk_state->method_node = method_node;
+		walk_state->method_desc =
+		    acpi_ns_get_attached_object(method_node);
+
+		/* Push start scope on scope stack and make it current  */
+
+		status =
+		    acpi_ds_scope_stack_push(method_node, ACPI_TYPE_METHOD,
+					     walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/* Init the method arguments */
+
+		status = acpi_ds_method_data_init_args(walk_state->params,
+						       ACPI_METHOD_NUM_ARGS,
+						       walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	} else {
+		/*
+		 * Setup the current scope.
+		 * Find a Named Op that has a namespace node associated with it.
+		 * search upwards from this Op.  Current scope is the first
+		 * Op with a namespace node.
+		 */
+		extra_op = parser_state->start_op;
+		while (extra_op && !extra_op->common.node) {
+			extra_op = extra_op->common.parent;
+		}
+
+		if (!extra_op) {
+			parser_state->start_node = NULL;
+		} else {
+			parser_state->start_node = extra_op->common.node;
+		}
+
+		if (parser_state->start_node) {
+
+			/* Push start scope on scope stack and make it current  */
+
+			status =
+			    acpi_ds_scope_stack_push(parser_state->start_node,
+						     parser_state->start_node->
+						     type, walk_state);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		}
+	}
+
+	status = acpi_ds_init_callbacks(walk_state, pass_number);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_delete_walk_state
+ *
+ * PARAMETERS:  walk_state      - State to delete
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Delete a walk state including all internal data structures
+ *
+ ******************************************************************************/
+
+void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
+{
+	union acpi_generic_state *state;
+
+	ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state);
+
+	if (!walk_state) {
+		return;
+	}
+
+	if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) {
+		ACPI_ERROR((AE_INFO, "%p is not a valid walk state",
+			    walk_state));
+		return;
+	}
+
+	/* There should not be any open scopes */
+
+	if (walk_state->parser_state.scope) {
+		ACPI_ERROR((AE_INFO, "%p walk still has a scope list",
+			    walk_state));
+		acpi_ps_cleanup_scope(&walk_state->parser_state);
+	}
+
+	/* Always must free any linked control states */
+
+	while (walk_state->control_state) {
+		state = walk_state->control_state;
+		walk_state->control_state = state->common.next;
+
+		acpi_ut_delete_generic_state(state);
+	}
+
+	/* Always must free any linked parse states */
+
+	while (walk_state->scope_info) {
+		state = walk_state->scope_info;
+		walk_state->scope_info = state->common.next;
+
+		acpi_ut_delete_generic_state(state);
+	}
+
+	/* Always must free any stacked result states */
+
+	while (walk_state->results) {
+		state = walk_state->results;
+		walk_state->results = state->common.next;
+
+		acpi_ut_delete_generic_state(state);
+	}
+
+	ACPI_FREE(walk_state);
+	return_VOID;
+}
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
new file mode 100644
index 0000000..803edd9
--- /dev/null
+++ b/drivers/acpi/acpica/evevent.c
@@ -0,0 +1,313 @@
+/******************************************************************************
+ *
+ * Module Name: evevent - Fixed Event handling and dispatch
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evevent")
+
+/* Local prototypes */
+static acpi_status acpi_ev_fixed_event_initialize(void);
+
+static u32 acpi_ev_fixed_event_dispatch(u32 event);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_initialize_events
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize global data structures for ACPI events (Fixed, GPE)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_initialize_events(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_initialize_events);
+
+	/*
+	 * Initialize the Fixed and General Purpose Events. This is done prior to
+	 * enabling SCIs to prevent interrupts from occurring before the handlers
+	 * are installed.
+	 */
+	status = acpi_ev_fixed_event_initialize();
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Unable to initialize fixed events"));
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_ev_gpe_initialize();
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Unable to initialize general purpose events"));
+		return_ACPI_STATUS(status);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_install_fadt_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
+ *              (0 and 1). This causes the _PRW methods to be run, so the HW
+ *              must be fully initialized at this point, including global lock
+ *              support.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_install_fadt_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_install_fadt_gpes);
+
+	/* Namespace must be locked */
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* FADT GPE Block 0 */
+
+	(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
+					   acpi_gbl_gpe_fadt_blocks[0]);
+
+	/* FADT GPE Block 1 */
+
+	(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
+					   acpi_gbl_gpe_fadt_blocks[1]);
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_install_xrupt_handlers
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install interrupt handlers for the SCI and Global Lock
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_install_xrupt_handlers(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
+
+	/* Install the SCI handler */
+
+	status = acpi_ev_install_sci_handler();
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Unable to install System Control Interrupt handler"));
+		return_ACPI_STATUS(status);
+	}
+
+	/* Install the handler for the Global Lock */
+
+	status = acpi_ev_init_global_lock_handler();
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Unable to initialize Global Lock handler"));
+		return_ACPI_STATUS(status);
+	}
+
+	acpi_gbl_events_initialized = TRUE;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_fixed_event_initialize
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install the fixed event handlers and enable the fixed events.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ev_fixed_event_initialize(void)
+{
+	u32 i;
+	acpi_status status;
+
+	/*
+	 * Initialize the structure that keeps track of fixed event handlers and
+	 * enable the fixed events.
+	 */
+	for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+		acpi_gbl_fixed_event_handlers[i].handler = NULL;
+		acpi_gbl_fixed_event_handlers[i].context = NULL;
+
+		/* Enable the fixed event */
+
+		if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
+			status =
+			    acpi_set_register(acpi_gbl_fixed_event_info[i].
+					      enable_register_id, 0);
+			if (ACPI_FAILURE(status)) {
+				return (status);
+			}
+		}
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_fixed_event_detect
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Checks the PM status register for active fixed events
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_fixed_event_detect(void)
+{
+	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+	u32 fixed_status;
+	u32 fixed_enable;
+	u32 i;
+
+	ACPI_FUNCTION_NAME(ev_fixed_event_detect);
+
+	/*
+	 * Read the fixed feature status and enable registers, as all the cases
+	 * depend on their values. Ignore errors here.
+	 */
+	(void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
+	(void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
+			  "Fixed Event Block: Enable %08X Status %08X\n",
+			  fixed_enable, fixed_status));
+
+	/*
+	 * Check for all possible Fixed Events and dispatch those that are active
+	 */
+	for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+
+		/* Both the status and enable bits must be on for this event */
+
+		if ((fixed_status & acpi_gbl_fixed_event_info[i].
+		     status_bit_mask)
+		    && (fixed_enable & acpi_gbl_fixed_event_info[i].
+			enable_bit_mask)) {
+
+			/* Found an active (signalled) event */
+			acpi_os_fixed_event_count(i);
+			int_status |= acpi_ev_fixed_event_dispatch(i);
+		}
+	}
+
+	return (int_status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_fixed_event_dispatch
+ *
+ * PARAMETERS:  Event               - Event type
+ *
+ * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Clears the status bit for the requested event, calls the
+ *              handler that previously registered for the event.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ev_fixed_event_dispatch(u32 event)
+{
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Clear the status bit */
+
+	(void)acpi_set_register(acpi_gbl_fixed_event_info[event].
+				status_register_id, 1);
+
+	/*
+	 * Make sure we've got a handler. If not, report an error. The event is
+	 * disabled to prevent further interrupts.
+	 */
+	if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
+		(void)acpi_set_register(acpi_gbl_fixed_event_info[event].
+					enable_register_id, 0);
+
+		ACPI_ERROR((AE_INFO,
+			    "No installed handler for fixed event [%08X]",
+			    event));
+
+		return (ACPI_INTERRUPT_NOT_HANDLED);
+	}
+
+	/* Invoke the Fixed Event handler */
+
+	return ((acpi_gbl_fixed_event_handlers[event].
+		 handler) (acpi_gbl_fixed_event_handlers[event].context));
+}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
new file mode 100644
index 0000000..f345ced
--- /dev/null
+++ b/drivers/acpi/acpica/evgpe.c
@@ -0,0 +1,722 @@
+/******************************************************************************
+ *
+ * Module Name: evgpe - General Purpose Event handling and dispatch
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evgpe")
+
+/* Local prototypes */
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_set_gpe_type
+ *
+ * PARAMETERS:  gpe_event_info          - GPE to set
+ *              Type                    - New type
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_set_gpe_type);
+
+	/* Validate type and update register enable masks */
+
+	switch (type) {
+	case ACPI_GPE_TYPE_WAKE:
+	case ACPI_GPE_TYPE_RUNTIME:
+	case ACPI_GPE_TYPE_WAKE_RUN:
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Disable the GPE if currently enabled */
+
+	status = acpi_ev_disable_gpe(gpe_event_info);
+
+	/* Type was validated above */
+
+	gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK;	/* Clear type bits */
+	gpe_event_info->flags |= type;	/* Insert type */
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_update_gpe_enable_masks
+ *
+ * PARAMETERS:  gpe_event_info          - GPE to update
+ *              Type                    - What to do: ACPI_GPE_DISABLE or
+ *                                        ACPI_GPE_ENABLE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Updates GPE register enable masks based on the GPE type
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
+				u8 type)
+{
+	struct acpi_gpe_register_info *gpe_register_info;
+	u8 register_bit;
+
+	ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
+
+	gpe_register_info = gpe_event_info->register_info;
+	if (!gpe_register_info) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+	register_bit = (u8)
+	    (1 <<
+	     (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
+
+	/* 1) Disable case. Simply clear all enable bits */
+
+	if (type == ACPI_GPE_DISABLE) {
+		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
+			       register_bit);
+		ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* 2) Enable case. Set/Clear the appropriate enable bits */
+
+	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
+	case ACPI_GPE_TYPE_WAKE:
+		ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
+		ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
+		break;
+
+	case ACPI_GPE_TYPE_RUNTIME:
+		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
+			       register_bit);
+		ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
+		break;
+
+	case ACPI_GPE_TYPE_WAKE_RUN:
+		ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
+		ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_enable_gpe
+ *
+ * PARAMETERS:  gpe_event_info          - GPE to enable
+ *              write_to_hardware       - Enable now, or just mark data structs
+ *                                        (WAKE GPEs should be deferred)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable a GPE based on the GPE type
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
+		   u8 write_to_hardware)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_enable_gpe);
+
+	/* Make sure HW enable masks are updated */
+
+	status =
+	    acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Mark wake-enabled or HW enable, or both */
+
+	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
+	case ACPI_GPE_TYPE_WAKE:
+
+		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+		break;
+
+	case ACPI_GPE_TYPE_WAKE_RUN:
+
+		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+
+		/*lint -fallthrough */
+
+	case ACPI_GPE_TYPE_RUNTIME:
+
+		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
+
+		if (write_to_hardware) {
+
+			/* Clear the GPE (of stale events), then enable it */
+
+			status = acpi_hw_clear_gpe(gpe_event_info);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+
+			/* Enable the requested runtime GPE */
+
+			status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
+		}
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_disable_gpe
+ *
+ * PARAMETERS:  gpe_event_info          - GPE to disable
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Disable a GPE based on the GPE type
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_disable_gpe);
+
+	/* Make sure HW enable masks are updated */
+
+	status =
+	    acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Clear the appropriate enabled flags for this GPE */
+
+	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
+	case ACPI_GPE_TYPE_WAKE:
+		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+		break;
+
+	case ACPI_GPE_TYPE_WAKE_RUN:
+		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+
+		/* fallthrough */
+
+	case ACPI_GPE_TYPE_RUNTIME:
+
+		/* Disable the requested runtime GPE */
+
+		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
+		break;
+
+	default:
+		break;
+	}
+
+	/*
+	 * Even if we don't know the GPE type, make sure that we always
+	 * disable it. low_disable_gpe will just clear the enable bit for this
+	 * GPE and write it. It will not write out the current GPE enable mask,
+	 * since this may inadvertently enable GPEs too early, if a rogue GPE has
+	 * come in during ACPICA initialization - possibly as a result of AML or
+	 * other code that has enabled the GPE.
+	 */
+	status = acpi_hw_low_disable_gpe(gpe_event_info);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_get_gpe_event_info
+ *
+ * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
+ *              gpe_number          - Raw GPE number
+ *
+ * RETURN:      A GPE event_info struct. NULL if not a valid GPE
+ *
+ * DESCRIPTION: Returns the event_info struct associated with this GPE.
+ *              Validates the gpe_block and the gpe_number
+ *
+ *              Should be called only when the GPE lists are semaphore locked
+ *              and not subject to change.
+ *
+ ******************************************************************************/
+
+struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
+						       u32 gpe_number)
+{
+	union acpi_operand_object *obj_desc;
+	struct acpi_gpe_block_info *gpe_block;
+	u32 i;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* A NULL gpe_block means use the FADT-defined GPE block(s) */
+
+	if (!gpe_device) {
+
+		/* Examine GPE Block 0 and 1 (These blocks are permanent) */
+
+		for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
+			gpe_block = acpi_gbl_gpe_fadt_blocks[i];
+			if (gpe_block) {
+				if ((gpe_number >= gpe_block->block_base_number)
+				    && (gpe_number <
+					gpe_block->block_base_number +
+					(gpe_block->register_count * 8))) {
+					return (&gpe_block->
+						event_info[gpe_number -
+							   gpe_block->
+							   block_base_number]);
+				}
+			}
+		}
+
+		/* The gpe_number was not in the range of either FADT GPE block */
+
+		return (NULL);
+	}
+
+	/* A Non-NULL gpe_device means this is a GPE Block Device */
+
+	obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
+					       gpe_device);
+	if (!obj_desc || !obj_desc->device.gpe_block) {
+		return (NULL);
+	}
+
+	gpe_block = obj_desc->device.gpe_block;
+
+	if ((gpe_number >= gpe_block->block_base_number) &&
+	    (gpe_number <
+	     gpe_block->block_base_number + (gpe_block->register_count * 8))) {
+		return (&gpe_block->
+			event_info[gpe_number - gpe_block->block_base_number]);
+	}
+
+	return (NULL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_gpe_detect
+ *
+ * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
+ *                                    Can have multiple GPE blocks attached.
+ *
+ * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Detect if any GP events have occurred. This function is
+ *              executed at interrupt level.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
+{
+	acpi_status status;
+	struct acpi_gpe_block_info *gpe_block;
+	struct acpi_gpe_register_info *gpe_register_info;
+	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+	u8 enabled_status_byte;
+	u32 status_reg;
+	u32 enable_reg;
+	acpi_cpu_flags flags;
+	u32 i;
+	u32 j;
+
+	ACPI_FUNCTION_NAME(ev_gpe_detect);
+
+	/* Check for the case where there are no GPEs */
+
+	if (!gpe_xrupt_list) {
+		return (int_status);
+	}
+
+	/*
+	 * We need to obtain the GPE lock for both the data structs and registers
+	 * Note: Not necessary to obtain the hardware lock, since the GPE
+	 * registers are owned by the gpe_lock.
+	 */
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Examine all GPE blocks attached to this interrupt level */
+
+	gpe_block = gpe_xrupt_list->gpe_block_list_head;
+	while (gpe_block) {
+		/*
+		 * Read all of the 8-bit GPE status and enable registers in this GPE
+		 * block, saving all of them. Find all currently active GP events.
+		 */
+		for (i = 0; i < gpe_block->register_count; i++) {
+
+			/* Get the next status/enable pair */
+
+			gpe_register_info = &gpe_block->register_info[i];
+
+			/* Read the Status Register */
+
+			status =
+			    acpi_read(&status_reg,
+				      &gpe_register_info->status_address);
+			if (ACPI_FAILURE(status)) {
+				goto unlock_and_exit;
+			}
+
+			/* Read the Enable Register */
+
+			status =
+			    acpi_read(&enable_reg,
+				      &gpe_register_info->enable_address);
+			if (ACPI_FAILURE(status)) {
+				goto unlock_and_exit;
+			}
+
+			ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
+					  "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
+					  gpe_register_info->base_gpe_number,
+					  status_reg, enable_reg));
+
+			/* Check if there is anything active at all in this register */
+
+			enabled_status_byte = (u8) (status_reg & enable_reg);
+			if (!enabled_status_byte) {
+
+				/* No active GPEs in this register, move on */
+
+				continue;
+			}
+
+			/* Now look at the individual GPEs in this byte register */
+
+			for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+
+				/* Examine one GPE bit */
+
+				if (enabled_status_byte & (1 << j)) {
+					/*
+					 * Found an active GPE. Dispatch the event to a handler
+					 * or method.
+					 */
+					int_status |=
+					    acpi_ev_gpe_dispatch(&gpe_block->
+						event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
+				}
+			}
+		}
+
+		gpe_block = gpe_block->next;
+	}
+
+      unlock_and_exit:
+
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return (int_status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_asynch_execute_gpe_method
+ *
+ * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Perform the actual execution of a GPE control method. This
+ *              function is called from an invocation of acpi_os_execute and
+ *              therefore does NOT execute at interrupt level - so that
+ *              the control method itself is not executed in the context of
+ *              an interrupt handler.
+ *
+ ******************************************************************************/
+static void acpi_ev_asynch_enable_gpe(void *context);
+
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
+{
+	struct acpi_gpe_event_info *gpe_event_info = (void *)context;
+	acpi_status status;
+	struct acpi_gpe_event_info local_gpe_event_info;
+	struct acpi_evaluate_info *info;
+
+	ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_VOID;
+	}
+
+	/* Must revalidate the gpe_number/gpe_block */
+
+	if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
+		status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+		return_VOID;
+	}
+
+	/* Set the GPE flags for return to enabled state */
+
+	(void)acpi_ev_enable_gpe(gpe_event_info, FALSE);
+
+	/*
+	 * Take a snapshot of the GPE info for this level - we copy the info to
+	 * prevent a race condition with remove_handler/remove_block.
+	 */
+	ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
+		    sizeof(struct acpi_gpe_event_info));
+
+	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_VOID;
+	}
+
+	/*
+	 * Must check for control method type dispatch one more time to avoid a
+	 * race with ev_gpe_install_handler
+	 */
+	if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
+	    ACPI_GPE_DISPATCH_METHOD) {
+
+		/* Allocate the evaluation information block */
+
+		info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+		if (!info) {
+			status = AE_NO_MEMORY;
+		} else {
+			/*
+			 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
+			 * control method that corresponds to this GPE
+			 */
+			info->prefix_node =
+			    local_gpe_event_info.dispatch.method_node;
+			info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+			status = acpi_ns_evaluate(info);
+			ACPI_FREE(info);
+		}
+
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"while evaluating GPE method [%4.4s]",
+					acpi_ut_get_node_name
+					(local_gpe_event_info.dispatch.
+					 method_node)));
+		}
+	}
+	/* Defer enabling of GPE until all notify handlers are done */
+	acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
+				gpe_event_info);
+	return_VOID;
+}
+
+static void acpi_ev_asynch_enable_gpe(void *context)
+{
+	struct acpi_gpe_event_info *gpe_event_info = context;
+	acpi_status status;
+	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+	    ACPI_GPE_LEVEL_TRIGGERED) {
+		/*
+		 * GPE is level-triggered, we clear the GPE status bit after handling
+		 * the event.
+		 */
+		status = acpi_hw_clear_gpe(gpe_event_info);
+		if (ACPI_FAILURE(status)) {
+			return_VOID;
+		}
+	}
+
+	/* Enable this GPE */
+	(void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_gpe_dispatch
+ *
+ * PARAMETERS:  gpe_event_info  - Info for this GPE
+ *              gpe_number      - Number relative to the parent GPE block
+ *
+ * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
+ *              or method (e.g. _Lxx/_Exx) handler.
+ *
+ *              This function executes at interrupt level.
+ *
+ ******************************************************************************/
+
+u32
+acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
+
+	acpi_os_gpe_count(gpe_number);
+
+	/*
+	 * If edge-triggered, clear the GPE status bit now. Note that
+	 * level-triggered events are cleared after the GPE is serviced.
+	 */
+	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+	    ACPI_GPE_EDGE_TRIGGERED) {
+		status = acpi_hw_clear_gpe(gpe_event_info);
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"Unable to clear GPE[%2X]",
+					gpe_number));
+			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+		}
+	}
+
+	/*
+	 * Dispatch the GPE to either an installed handler, or the control method
+	 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
+	 * it and do not attempt to run the method. If there is neither a handler
+	 * nor a method, we disable this GPE to prevent further such pointless
+	 * events from firing.
+	 */
+	switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
+	case ACPI_GPE_DISPATCH_HANDLER:
+
+		/*
+		 * Invoke the installed handler (at interrupt level)
+		 * Ignore return status for now.
+		 * TBD: leave GPE disabled on error?
+		 */
+		(void)gpe_event_info->dispatch.handler->address(gpe_event_info->
+								dispatch.
+								handler->
+								context);
+
+		/* It is now safe to clear level-triggered events. */
+
+		if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+		    ACPI_GPE_LEVEL_TRIGGERED) {
+			status = acpi_hw_clear_gpe(gpe_event_info);
+			if (ACPI_FAILURE(status)) {
+				ACPI_EXCEPTION((AE_INFO, status,
+						"Unable to clear GPE[%2X]",
+						gpe_number));
+				return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+			}
+		}
+		break;
+
+	case ACPI_GPE_DISPATCH_METHOD:
+
+		/*
+		 * Disable the GPE, so it doesn't keep firing before the method has a
+		 * chance to run (it runs asynchronously with interrupts enabled).
+		 */
+		status = acpi_ev_disable_gpe(gpe_event_info);
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"Unable to disable GPE[%2X]",
+					gpe_number));
+			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+		}
+
+		/*
+		 * Execute the method associated with the GPE
+		 * NOTE: Level-triggered GPEs are cleared after the method completes.
+		 */
+		status = acpi_os_execute(OSL_GPE_HANDLER,
+					 acpi_ev_asynch_execute_gpe_method,
+					 gpe_event_info);
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"Unable to queue handler for GPE[%2X] - event disabled",
+					gpe_number));
+		}
+		break;
+
+	default:
+
+		/* No handler or method to run! */
+
+		ACPI_ERROR((AE_INFO,
+			    "No handler or method for GPE[%2X], disabling event",
+			    gpe_number));
+
+		/*
+		 * Disable the GPE. The GPE will remain disabled until the ACPICA
+		 * Core Subsystem is restarted, or a handler is installed.
+		 */
+		status = acpi_ev_disable_gpe(gpe_event_info);
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"Unable to disable GPE[%2X]",
+					gpe_number));
+			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+		}
+		break;
+	}
+
+	return_UINT32(ACPI_INTERRUPT_HANDLED);
+}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
new file mode 100644
index 0000000..484cc05
--- /dev/null
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -0,0 +1,1227 @@
+/******************************************************************************
+ *
+ * Module Name: evgpeblk - GPE block creation and initialization.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evgpeblk")
+
+/* Local prototypes */
+static acpi_status
+acpi_ev_save_method_info(acpi_handle obj_handle,
+			 u32 level, void *obj_desc, void **return_value);
+
+static acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+			  u32 level, void *info, void **return_value);
+
+static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
+							       interrupt_number);
+
+static acpi_status
+acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
+
+static acpi_status
+acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
+			  u32 interrupt_number);
+
+static acpi_status
+acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_valid_gpe_event
+ *
+ * PARAMETERS:  gpe_event_info              - Info for this GPE
+ *
+ * RETURN:      TRUE if the gpe_event is valid
+ *
+ * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
+ *              Should be called only when the GPE lists are semaphore locked
+ *              and not subject to change.
+ *
+ ******************************************************************************/
+
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
+{
+	struct acpi_gpe_xrupt_info *gpe_xrupt_block;
+	struct acpi_gpe_block_info *gpe_block;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* No need for spin lock since we are not changing any list elements */
+
+	/* Walk the GPE interrupt levels */
+
+	gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
+	while (gpe_xrupt_block) {
+		gpe_block = gpe_xrupt_block->gpe_block_list_head;
+
+		/* Walk the GPE blocks on this interrupt level */
+
+		while (gpe_block) {
+			if ((&gpe_block->event_info[0] <= gpe_event_info) &&
+			    (&gpe_block->
+			     event_info[((acpi_size) gpe_block->
+					 register_count) * 8] >
+			     gpe_event_info)) {
+				return (TRUE);
+			}
+
+			gpe_block = gpe_block->next;
+		}
+
+		gpe_xrupt_block = gpe_xrupt_block->next;
+	}
+
+	return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_walk_gpe_list
+ *
+ * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
+ *              Context             - Value passed to callback
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Walk the GPE lists.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
+{
+	struct acpi_gpe_block_info *gpe_block;
+	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+	acpi_status status = AE_OK;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Walk the interrupt level descriptor list */
+
+	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+	while (gpe_xrupt_info) {
+
+		/* Walk all Gpe Blocks attached to this interrupt level */
+
+		gpe_block = gpe_xrupt_info->gpe_block_list_head;
+		while (gpe_block) {
+
+			/* One callback per GPE block */
+
+			status =
+			    gpe_walk_callback(gpe_xrupt_info, gpe_block,
+					      context);
+			if (ACPI_FAILURE(status)) {
+				if (status == AE_CTRL_END) {	/* Callback abort */
+					status = AE_OK;
+				}
+				goto unlock_and_exit;
+			}
+
+			gpe_block = gpe_block->next;
+		}
+
+		gpe_xrupt_info = gpe_xrupt_info->next;
+	}
+
+      unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_delete_gpe_handlers
+ *
+ * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
+ *              gpe_block           - Gpe Block info
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
+ *              Used only prior to termination.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+			    struct acpi_gpe_block_info *gpe_block,
+			    void *context)
+{
+	struct acpi_gpe_event_info *gpe_event_info;
+	u32 i;
+	u32 j;
+
+	ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
+
+	/* Examine each GPE Register within the block */
+
+	for (i = 0; i < gpe_block->register_count; i++) {
+
+		/* Now look at the individual GPEs in this byte register */
+
+		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+			gpe_event_info =
+			    &gpe_block->
+			    event_info[((acpi_size) i *
+					ACPI_GPE_REGISTER_WIDTH) + j];
+
+			if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+			    ACPI_GPE_DISPATCH_HANDLER) {
+				ACPI_FREE(gpe_event_info->dispatch.handler);
+				gpe_event_info->dispatch.handler = NULL;
+				gpe_event_info->flags &=
+				    ~ACPI_GPE_DISPATCH_MASK;
+			}
+		}
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_save_method_info
+ *
+ * PARAMETERS:  Callback from walk_namespace
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ *              control method under the _GPE portion of the namespace.
+ *              Extract the name and GPE type from the object, saving this
+ *              information for quick lookup during GPE dispatch
+ *
+ *              The name of each GPE control method is of the form:
+ *              "_Lxx" or "_Exx"
+ *              Where:
+ *                  L      - means that the GPE is level triggered
+ *                  E      - means that the GPE is edge triggered
+ *                  xx     - is the GPE number [in HEX]
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_save_method_info(acpi_handle obj_handle,
+			 u32 level, void *obj_desc, void **return_value)
+{
+	struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
+	struct acpi_gpe_event_info *gpe_event_info;
+	u32 gpe_number;
+	char name[ACPI_NAME_SIZE + 1];
+	u8 type;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_save_method_info);
+
+	/*
+	 * _Lxx and _Exx GPE method support
+	 *
+	 * 1) Extract the name from the object and convert to a string
+	 */
+	ACPI_MOVE_32_TO_32(name,
+			   &((struct acpi_namespace_node *)obj_handle)->name.
+			   integer);
+	name[ACPI_NAME_SIZE] = 0;
+
+	/*
+	 * 2) Edge/Level determination is based on the 2nd character
+	 *    of the method name
+	 *
+	 * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
+	 * if a _PRW object is found that points to this GPE.
+	 */
+	switch (name[1]) {
+	case 'L':
+		type = ACPI_GPE_LEVEL_TRIGGERED;
+		break;
+
+	case 'E':
+		type = ACPI_GPE_EDGE_TRIGGERED;
+		break;
+
+	default:
+		/* Unknown method type, just ignore it! */
+
+		ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+				  "Ignoring unknown GPE method type: %s (name not of form _Lxx or _Exx)",
+				  name));
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Convert the last two characters of the name to the GPE Number */
+
+	gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
+	if (gpe_number == ACPI_UINT32_MAX) {
+
+		/* Conversion failed; invalid method, just ignore it */
+
+		ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+				  "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
+				  name));
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Ensure that we have a valid GPE number for this GPE block */
+
+	if ((gpe_number < gpe_block->block_base_number) ||
+	    (gpe_number >=
+	     (gpe_block->block_base_number +
+	      (gpe_block->register_count * 8)))) {
+		/*
+		 * Not valid for this GPE block, just ignore it. However, it may be
+		 * valid for a different GPE block, since GPE0 and GPE1 methods both
+		 * appear under \_GPE.
+		 */
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/*
+	 * Now we can add this information to the gpe_event_info block for use
+	 * during dispatch of this GPE. Default type is RUNTIME, although this may
+	 * change when the _PRW methods are executed later.
+	 */
+	gpe_event_info =
+	    &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
+
+	gpe_event_info->flags = (u8)
+	    (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
+
+	gpe_event_info->dispatch.method_node =
+	    (struct acpi_namespace_node *)obj_handle;
+
+	/* Update enable mask, but don't enable the HW GPE as of yet */
+
+	status = acpi_ev_enable_gpe(gpe_event_info, FALSE);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+			  "Registered GPE method %s as GPE number 0x%.2X\n",
+			  name, gpe_number));
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_match_prw_and_gpe
+ *
+ * PARAMETERS:  Callback from walk_namespace
+ *
+ * RETURN:      Status. NOTE: We ignore errors so that the _PRW walk is
+ *              not aborted on a single _PRW failure.
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ *              Device. Run the _PRW method. If present, extract the GPE
+ *              number and mark the GPE as a WAKE GPE.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+			  u32 level, void *info, void **return_value)
+{
+	struct acpi_gpe_walk_info *gpe_info = (void *)info;
+	struct acpi_namespace_node *gpe_device;
+	struct acpi_gpe_block_info *gpe_block;
+	struct acpi_namespace_node *target_gpe_device;
+	struct acpi_gpe_event_info *gpe_event_info;
+	union acpi_operand_object *pkg_desc;
+	union acpi_operand_object *obj_desc;
+	u32 gpe_number;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
+
+	/* Check for a _PRW method under this device */
+
+	status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
+					 ACPI_BTYPE_PACKAGE, &pkg_desc);
+	if (ACPI_FAILURE(status)) {
+
+		/* Ignore all errors from _PRW, we don't want to abort the subsystem */
+
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* The returned _PRW package must have at least two elements */
+
+	if (pkg_desc->package.count < 2) {
+		goto cleanup;
+	}
+
+	/* Extract pointers from the input context */
+
+	gpe_device = gpe_info->gpe_device;
+	gpe_block = gpe_info->gpe_block;
+
+	/*
+	 * The _PRW object must return a package, we are only interested in the
+	 * first element
+	 */
+	obj_desc = pkg_desc->package.elements[0];
+
+	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
+		/* Use FADT-defined GPE device (from definition of _PRW) */
+
+		target_gpe_device = acpi_gbl_fadt_gpe_device;
+
+		/* Integer is the GPE number in the FADT described GPE blocks */
+
+		gpe_number = (u32) obj_desc->integer.value;
+	} else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
+
+		/* Package contains a GPE reference and GPE number within a GPE block */
+
+		if ((obj_desc->package.count < 2) ||
+		    (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[0]) !=
+		     ACPI_TYPE_LOCAL_REFERENCE)
+		    || (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[1]) !=
+			ACPI_TYPE_INTEGER)) {
+			goto cleanup;
+		}
+
+		/* Get GPE block reference and decode */
+
+		target_gpe_device =
+		    obj_desc->package.elements[0]->reference.node;
+		gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
+	} else {
+		/* Unknown type, just ignore it */
+
+		goto cleanup;
+	}
+
+	/*
+	 * Is this GPE within this block?
+	 *
+	 * TRUE if and only if these conditions are true:
+	 *     1) The GPE devices match.
+	 *     2) The GPE index(number) is within the range of the Gpe Block
+	 *          associated with the GPE device.
+	 */
+	if ((gpe_device == target_gpe_device) &&
+	    (gpe_number >= gpe_block->block_base_number) &&
+	    (gpe_number <
+	     gpe_block->block_base_number + (gpe_block->register_count * 8))) {
+		gpe_event_info =
+		    &gpe_block->event_info[gpe_number -
+					   gpe_block->block_base_number];
+
+		/* Mark GPE for WAKE-ONLY but WAKE_DISABLED */
+
+		gpe_event_info->flags &=
+		    ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
+
+		status =
+		    acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+
+		status =
+		    acpi_ev_update_gpe_enable_masks(gpe_event_info,
+						    ACPI_GPE_DISABLE);
+	}
+
+      cleanup:
+	acpi_ut_remove_reference(pkg_desc);
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_get_gpe_xrupt_block
+ *
+ * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
+ *
+ * RETURN:      A GPE interrupt block
+ *
+ * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
+ *              block per unique interrupt level used for GPEs. Should be
+ *              called only when the GPE lists are semaphore locked and not
+ *              subject to change.
+ *
+ ******************************************************************************/
+
+static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
+							       interrupt_number)
+{
+	struct acpi_gpe_xrupt_info *next_gpe_xrupt;
+	struct acpi_gpe_xrupt_info *gpe_xrupt;
+	acpi_status status;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
+
+	/* No need for lock since we are not changing any list elements here */
+
+	next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+	while (next_gpe_xrupt) {
+		if (next_gpe_xrupt->interrupt_number == interrupt_number) {
+			return_PTR(next_gpe_xrupt);
+		}
+
+		next_gpe_xrupt = next_gpe_xrupt->next;
+	}
+
+	/* Not found, must allocate a new xrupt descriptor */
+
+	gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
+	if (!gpe_xrupt) {
+		return_PTR(NULL);
+	}
+
+	gpe_xrupt->interrupt_number = interrupt_number;
+
+	/* Install new interrupt descriptor with spin lock */
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+	if (acpi_gbl_gpe_xrupt_list_head) {
+		next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+		while (next_gpe_xrupt->next) {
+			next_gpe_xrupt = next_gpe_xrupt->next;
+		}
+
+		next_gpe_xrupt->next = gpe_xrupt;
+		gpe_xrupt->previous = next_gpe_xrupt;
+	} else {
+		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
+	}
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+	/* Install new interrupt handler if not SCI_INT */
+
+	if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
+		status = acpi_os_install_interrupt_handler(interrupt_number,
+							   acpi_ev_gpe_xrupt_handler,
+							   gpe_xrupt);
+		if (ACPI_FAILURE(status)) {
+			ACPI_ERROR((AE_INFO,
+				    "Could not install GPE interrupt handler at level 0x%X",
+				    interrupt_number));
+			return_PTR(NULL);
+		}
+	}
+
+	return_PTR(gpe_xrupt);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_delete_gpe_xrupt
+ *
+ * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
+ *              interrupt handler if not the SCI interrupt.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
+{
+	acpi_status status;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
+
+	/* We never want to remove the SCI interrupt handler */
+
+	if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
+		gpe_xrupt->gpe_block_list_head = NULL;
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Disable this interrupt */
+
+	status =
+	    acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
+					     acpi_ev_gpe_xrupt_handler);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Unlink the interrupt block with lock */
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+	if (gpe_xrupt->previous) {
+		gpe_xrupt->previous->next = gpe_xrupt->next;
+	} else {
+		/* No previous, update list head */
+
+		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
+	}
+
+	if (gpe_xrupt->next) {
+		gpe_xrupt->next->previous = gpe_xrupt->previous;
+	}
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+	/* Free the block */
+
+	ACPI_FREE(gpe_xrupt);
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_install_gpe_block
+ *
+ * PARAMETERS:  gpe_block               - New GPE block
+ *              interrupt_number        - Xrupt to be associated with this
+ *                                        GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install new GPE block with mutex support
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
+			  u32 interrupt_number)
+{
+	struct acpi_gpe_block_info *next_gpe_block;
+	struct acpi_gpe_xrupt_info *gpe_xrupt_block;
+	acpi_status status;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(ev_install_gpe_block);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
+	if (!gpe_xrupt_block) {
+		status = AE_NO_MEMORY;
+		goto unlock_and_exit;
+	}
+
+	/* Install the new block at the end of the list with lock */
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+	if (gpe_xrupt_block->gpe_block_list_head) {
+		next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
+		while (next_gpe_block->next) {
+			next_gpe_block = next_gpe_block->next;
+		}
+
+		next_gpe_block->next = gpe_block;
+		gpe_block->previous = next_gpe_block;
+	} else {
+		gpe_xrupt_block->gpe_block_list_head = gpe_block;
+	}
+
+	gpe_block->xrupt_block = gpe_xrupt_block;
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+      unlock_and_exit:
+	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_delete_gpe_block
+ *
+ * PARAMETERS:  gpe_block           - Existing GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a GPE block
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
+{
+	acpi_status status;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(ev_install_gpe_block);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Disable all GPEs in this block */
+
+	status =
+	    acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
+
+	if (!gpe_block->previous && !gpe_block->next) {
+
+		/* This is the last gpe_block on this interrupt */
+
+		status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
+		if (ACPI_FAILURE(status)) {
+			goto unlock_and_exit;
+		}
+	} else {
+		/* Remove the block on this interrupt with lock */
+
+		flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+		if (gpe_block->previous) {
+			gpe_block->previous->next = gpe_block->next;
+		} else {
+			gpe_block->xrupt_block->gpe_block_list_head =
+			    gpe_block->next;
+		}
+
+		if (gpe_block->next) {
+			gpe_block->next->previous = gpe_block->previous;
+		}
+		acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	}
+
+	acpi_current_gpe_count -=
+	    gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH;
+
+	/* Free the gpe_block */
+
+	ACPI_FREE(gpe_block->register_info);
+	ACPI_FREE(gpe_block->event_info);
+	ACPI_FREE(gpe_block);
+
+      unlock_and_exit:
+	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_create_gpe_info_blocks
+ *
+ * PARAMETERS:  gpe_block   - New GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
+{
+	struct acpi_gpe_register_info *gpe_register_info = NULL;
+	struct acpi_gpe_event_info *gpe_event_info = NULL;
+	struct acpi_gpe_event_info *this_event;
+	struct acpi_gpe_register_info *this_register;
+	u32 i;
+	u32 j;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
+
+	/* Allocate the GPE register information block */
+
+	gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
+						 register_count *
+						 sizeof(struct
+							acpi_gpe_register_info));
+	if (!gpe_register_info) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not allocate the GpeRegisterInfo table"));
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/*
+	 * Allocate the GPE event_info block. There are eight distinct GPEs
+	 * per register. Initialization to zeros is sufficient.
+	 */
+	gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
+					       register_count *
+					       ACPI_GPE_REGISTER_WIDTH) *
+					      sizeof(struct
+						     acpi_gpe_event_info));
+	if (!gpe_event_info) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not allocate the GpeEventInfo table"));
+		status = AE_NO_MEMORY;
+		goto error_exit;
+	}
+
+	/* Save the new Info arrays in the GPE block */
+
+	gpe_block->register_info = gpe_register_info;
+	gpe_block->event_info = gpe_event_info;
+
+	/*
+	 * Initialize the GPE Register and Event structures. A goal of these
+	 * tables is to hide the fact that there are two separate GPE register
+	 * sets in a given GPE hardware block, the status registers occupy the
+	 * first half, and the enable registers occupy the second half.
+	 */
+	this_register = gpe_register_info;
+	this_event = gpe_event_info;
+
+	for (i = 0; i < gpe_block->register_count; i++) {
+
+		/* Init the register_info for this GPE register (8 GPEs) */
+
+		this_register->base_gpe_number =
+		    (u8) (gpe_block->block_base_number +
+			  (i * ACPI_GPE_REGISTER_WIDTH));
+
+		this_register->status_address.address =
+		    gpe_block->block_address.address + i;
+
+		this_register->enable_address.address =
+		    gpe_block->block_address.address + i +
+		    gpe_block->register_count;
+
+		this_register->status_address.space_id =
+		    gpe_block->block_address.space_id;
+		this_register->enable_address.space_id =
+		    gpe_block->block_address.space_id;
+		this_register->status_address.bit_width =
+		    ACPI_GPE_REGISTER_WIDTH;
+		this_register->enable_address.bit_width =
+		    ACPI_GPE_REGISTER_WIDTH;
+		this_register->status_address.bit_offset = 0;
+		this_register->enable_address.bit_offset = 0;
+
+		/* Init the event_info for each GPE within this register */
+
+		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+			this_event->gpe_number =
+			    (u8) (this_register->base_gpe_number + j);
+			this_event->register_info = this_register;
+			this_event++;
+		}
+
+		/* Disable all GPEs within this register */
+
+		status = acpi_write(0x00, &this_register->enable_address);
+		if (ACPI_FAILURE(status)) {
+			goto error_exit;
+		}
+
+		/* Clear any pending GPE events within this register */
+
+		status = acpi_write(0xFF, &this_register->status_address);
+		if (ACPI_FAILURE(status)) {
+			goto error_exit;
+		}
+
+		this_register++;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+
+      error_exit:
+	if (gpe_register_info) {
+		ACPI_FREE(gpe_register_info);
+	}
+	if (gpe_event_info) {
+		ACPI_FREE(gpe_event_info);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_create_gpe_block
+ *
+ * PARAMETERS:  gpe_device          - Handle to the parent GPE block
+ *              gpe_block_address   - Address and space_iD
+ *              register_count      - Number of GPE register pairs in the block
+ *              gpe_block_base_number - Starting GPE number for the block
+ *              interrupt_number    - H/W interrupt for the block
+ *              return_gpe_block    - Where the new block descriptor is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
+ *              the block are disabled at exit.
+ *              Note: Assumes namespace is locked.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
+			 struct acpi_generic_address *gpe_block_address,
+			 u32 register_count,
+			 u8 gpe_block_base_number,
+			 u32 interrupt_number,
+			 struct acpi_gpe_block_info **return_gpe_block)
+{
+	acpi_status status;
+	struct acpi_gpe_block_info *gpe_block;
+
+	ACPI_FUNCTION_TRACE(ev_create_gpe_block);
+
+	if (!register_count) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Allocate a new GPE block */
+
+	gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
+	if (!gpe_block) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Initialize the new GPE block */
+
+	gpe_block->node = gpe_device;
+	gpe_block->register_count = register_count;
+	gpe_block->block_base_number = gpe_block_base_number;
+
+	ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
+		    sizeof(struct acpi_generic_address));
+
+	/*
+	 * Create the register_info and event_info sub-structures
+	 * Note: disables and clears all GPEs in the block
+	 */
+	status = acpi_ev_create_gpe_info_blocks(gpe_block);
+	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(gpe_block);
+		return_ACPI_STATUS(status);
+	}
+
+	/* Install the new block in the global lists */
+
+	status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
+	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(gpe_block);
+		return_ACPI_STATUS(status);
+	}
+
+	/* Find all GPE methods (_Lxx, _Exx) for this block */
+
+	status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
+					ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
+					acpi_ev_save_method_info, gpe_block,
+					NULL);
+
+	/* Return the new block */
+
+	if (return_gpe_block) {
+		(*return_gpe_block) = gpe_block;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+			  "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
+			  (u32) gpe_block->block_base_number,
+			  (u32) (gpe_block->block_base_number +
+				 ((gpe_block->register_count *
+				   ACPI_GPE_REGISTER_WIDTH) - 1)),
+			  gpe_device->name.ascii, gpe_block->register_count,
+			  interrupt_number));
+
+	/* Update global count of currently available GPEs */
+
+	acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_initialize_gpe_block
+ *
+ * PARAMETERS:  gpe_device          - Handle to the parent GPE block
+ *              gpe_block           - Gpe Block info
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize and enable a GPE block. First find and run any
+ *              _PRT methods associated with the block, then enable the
+ *              appropriate GPEs.
+ *              Note: Assumes namespace is locked.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
+			     struct acpi_gpe_block_info *gpe_block)
+{
+	acpi_status status;
+	struct acpi_gpe_event_info *gpe_event_info;
+	struct acpi_gpe_walk_info gpe_info;
+	u32 wake_gpe_count;
+	u32 gpe_enabled_count;
+	u32 i;
+	u32 j;
+
+	ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
+
+	/* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
+
+	if (!gpe_block) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/*
+	 * Runtime option: Should wake GPEs be enabled at runtime?  The default
+	 * is no, they should only be enabled just as the machine goes to sleep.
+	 */
+	if (acpi_gbl_leave_wake_gpes_disabled) {
+		/*
+		 * Differentiate runtime vs wake GPEs, via the _PRW control methods.
+		 * Each GPE that has one or more _PRWs that reference it is by
+		 * definition a wake GPE and will not be enabled while the machine
+		 * is running.
+		 */
+		gpe_info.gpe_block = gpe_block;
+		gpe_info.gpe_device = gpe_device;
+
+		status =
+		    acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+					   ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
+					   acpi_ev_match_prw_and_gpe, &gpe_info,
+					   NULL);
+	}
+
+	/*
+	 * Enable all GPEs in this block that have these attributes:
+	 * 1) are "runtime" or "run/wake" GPEs, and
+	 * 2) have a corresponding _Lxx or _Exx method
+	 *
+	 * Any other GPEs within this block must be enabled via the acpi_enable_gpe()
+	 * external interface.
+	 */
+	wake_gpe_count = 0;
+	gpe_enabled_count = 0;
+
+	for (i = 0; i < gpe_block->register_count; i++) {
+		for (j = 0; j < 8; j++) {
+
+			/* Get the info block for this particular GPE */
+
+			gpe_event_info =
+			    &gpe_block->
+			    event_info[((acpi_size) i *
+					ACPI_GPE_REGISTER_WIDTH) + j];
+
+			if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+			     ACPI_GPE_DISPATCH_METHOD)
+			    && (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
+				gpe_enabled_count++;
+			}
+
+			if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) {
+				wake_gpe_count++;
+			}
+		}
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+			  "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
+			  wake_gpe_count, gpe_enabled_count));
+
+	/* Enable all valid runtime GPEs found above */
+
+	status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block, NULL);
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
+			    gpe_block));
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_gpe_initialize
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize the GPE data structures
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_gpe_initialize(void)
+{
+	u32 register_count0 = 0;
+	u32 register_count1 = 0;
+	u32 gpe_number_max = 0;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_gpe_initialize);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Initialize the GPE Block(s) defined in the FADT
+	 *
+	 * Why the GPE register block lengths are divided by 2:  From the ACPI Spec,
+	 * section "General-Purpose Event Registers", we have:
+	 *
+	 * "Each register block contains two registers of equal length
+	 *  GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
+	 *  GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
+	 *  The length of the GPE1_STS and GPE1_EN registers is equal to
+	 *  half the GPE1_LEN. If a generic register block is not supported
+	 *  then its respective block pointer and block length values in the
+	 *  FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
+	 *  to be the same size."
+	 */
+
+	/*
+	 * Determine the maximum GPE number for this machine.
+	 *
+	 * Note: both GPE0 and GPE1 are optional, and either can exist without
+	 * the other.
+	 *
+	 * If EITHER the register length OR the block address are zero, then that
+	 * particular block is not supported.
+	 */
+	if (acpi_gbl_FADT.gpe0_block_length &&
+	    acpi_gbl_FADT.xgpe0_block.address) {
+
+		/* GPE block 0 exists (has both length and address > 0) */
+
+		register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
+
+		gpe_number_max =
+		    (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
+
+		/* Install GPE Block 0 */
+
+		status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+						  &acpi_gbl_FADT.xgpe0_block,
+						  register_count0, 0,
+						  acpi_gbl_FADT.sci_interrupt,
+						  &acpi_gbl_gpe_fadt_blocks[0]);
+
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"Could not create GPE Block 0"));
+		}
+	}
+
+	if (acpi_gbl_FADT.gpe1_block_length &&
+	    acpi_gbl_FADT.xgpe1_block.address) {
+
+		/* GPE block 1 exists (has both length and address > 0) */
+
+		register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
+
+		/* Check for GPE0/GPE1 overlap (if both banks exist) */
+
+		if ((register_count0) &&
+		    (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
+			ACPI_ERROR((AE_INFO,
+				    "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
+				    gpe_number_max, acpi_gbl_FADT.gpe1_base,
+				    acpi_gbl_FADT.gpe1_base +
+				    ((register_count1 *
+				      ACPI_GPE_REGISTER_WIDTH) - 1)));
+
+			/* Ignore GPE1 block by setting the register count to zero */
+
+			register_count1 = 0;
+		} else {
+			/* Install GPE Block 1 */
+
+			status =
+			    acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+						     &acpi_gbl_FADT.xgpe1_block,
+						     register_count1,
+						     acpi_gbl_FADT.gpe1_base,
+						     acpi_gbl_FADT.
+						     sci_interrupt,
+						     &acpi_gbl_gpe_fadt_blocks
+						     [1]);
+
+			if (ACPI_FAILURE(status)) {
+				ACPI_EXCEPTION((AE_INFO, status,
+						"Could not create GPE Block 1"));
+			}
+
+			/*
+			 * GPE0 and GPE1 do not have to be contiguous in the GPE number
+			 * space. However, GPE0 always starts at GPE number zero.
+			 */
+			gpe_number_max = acpi_gbl_FADT.gpe1_base +
+			    ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
+		}
+	}
+
+	/* Exit if there are no GPE registers */
+
+	if ((register_count0 + register_count1) == 0) {
+
+		/* GPEs are not required by ACPI, this is OK */
+
+		ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+				  "There are no GPE blocks defined in the FADT\n"));
+		status = AE_OK;
+		goto cleanup;
+	}
+
+	/* Check for Max GPE number out-of-range */
+
+	if (gpe_number_max > ACPI_GPE_MAX) {
+		ACPI_ERROR((AE_INFO,
+			    "Maximum GPE number from FADT is too large: 0x%X",
+			    gpe_number_max));
+		status = AE_BAD_VALUE;
+		goto cleanup;
+	}
+
+      cleanup:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
new file mode 100644
index 0000000..5f89305
--- /dev/null
+++ b/drivers/acpi/acpica/evmisc.c
@@ -0,0 +1,621 @@
+/******************************************************************************
+ *
+ * Module Name: evmisc - Miscellaneous event manager support functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evmisc")
+
+/* Local prototypes */
+static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
+
+static u32 acpi_ev_global_lock_handler(void *context);
+
+static acpi_status acpi_ev_remove_global_lock_handler(void);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_is_notify_object
+ *
+ * PARAMETERS:  Node            - Node to check
+ *
+ * RETURN:      TRUE if notifies allowed on this object
+ *
+ * DESCRIPTION: Check type of node for a object that supports notifies.
+ *
+ *              TBD: This could be replaced by a flag bit in the node.
+ *
+ ******************************************************************************/
+
+u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
+{
+	switch (node->type) {
+	case ACPI_TYPE_DEVICE:
+	case ACPI_TYPE_PROCESSOR:
+	case ACPI_TYPE_THERMAL:
+		/*
+		 * These are the ONLY objects that can receive ACPI notifications
+		 */
+		return (TRUE);
+
+	default:
+		return (FALSE);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_queue_notify_request
+ *
+ * PARAMETERS:  Node            - NS node for the notified object
+ *              notify_value    - Value from the Notify() request
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Dispatch a device notification event to a previously
+ *              installed handler.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
+			     u32 notify_value)
+{
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *handler_obj = NULL;
+	union acpi_generic_state *notify_info;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_NAME(ev_queue_notify_request);
+
+	/*
+	 * For value 3 (Ejection Request), some device method may need to be run.
+	 * For value 2 (Device Wake) if _PRW exists, the _PS0 method may need
+	 *   to be run.
+	 * For value 0x80 (Status Change) on the power button or sleep button,
+	 *   initiate soft-off or sleep operation?
+	 */
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			  "Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n",
+			  acpi_ut_get_node_name(node), node, notify_value,
+			  acpi_ut_get_notify_name(notify_value)));
+
+	/* Get the notify object attached to the NS Node */
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (obj_desc) {
+
+		/* We have the notify object, Get the right handler */
+
+		switch (node->type) {
+
+			/* Notify allowed only on these types */
+
+		case ACPI_TYPE_DEVICE:
+		case ACPI_TYPE_THERMAL:
+		case ACPI_TYPE_PROCESSOR:
+
+			if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
+				handler_obj =
+				    obj_desc->common_notify.system_notify;
+			} else {
+				handler_obj =
+				    obj_desc->common_notify.device_notify;
+			}
+			break;
+
+		default:
+
+			/* All other types are not supported */
+
+			return (AE_TYPE);
+		}
+	}
+
+	/*
+	 * If there is any handler to run, schedule the dispatcher.
+	 * Check for:
+	 * 1) Global system notify handler
+	 * 2) Global device notify handler
+	 * 3) Per-device notify handler
+	 */
+	if ((acpi_gbl_system_notify.handler
+	     && (notify_value <= ACPI_MAX_SYS_NOTIFY))
+	    || (acpi_gbl_device_notify.handler
+		&& (notify_value > ACPI_MAX_SYS_NOTIFY)) || handler_obj) {
+		notify_info = acpi_ut_create_generic_state();
+		if (!notify_info) {
+			return (AE_NO_MEMORY);
+		}
+
+		if (!handler_obj) {
+			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+					  "Executing system notify handler for Notify (%4.4s, %X) node %p\n",
+					  acpi_ut_get_node_name(node),
+					  notify_value, node));
+		}
+
+		notify_info->common.descriptor_type =
+		    ACPI_DESC_TYPE_STATE_NOTIFY;
+		notify_info->notify.node = node;
+		notify_info->notify.value = (u16) notify_value;
+		notify_info->notify.handler_obj = handler_obj;
+
+		status =
+		    acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
+				    notify_info);
+		if (ACPI_FAILURE(status)) {
+			acpi_ut_delete_generic_state(notify_info);
+		}
+	} else {
+		/* There is no notify handler (per-device or system) for this device */
+
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "No notify handler for Notify (%4.4s, %X) node %p\n",
+				  acpi_ut_get_node_name(node), notify_value,
+				  node));
+	}
+
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_notify_dispatch
+ *
+ * PARAMETERS:  Context         - To be passed to the notify handler
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Dispatch a device notification event to a previously
+ *              installed handler.
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
+{
+	union acpi_generic_state *notify_info =
+	    (union acpi_generic_state *)context;
+	acpi_notify_handler global_handler = NULL;
+	void *global_context = NULL;
+	union acpi_operand_object *handler_obj;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * We will invoke a global notify handler if installed. This is done
+	 * _before_ we invoke the per-device handler attached to the device.
+	 */
+	if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) {
+
+		/* Global system notification handler */
+
+		if (acpi_gbl_system_notify.handler) {
+			global_handler = acpi_gbl_system_notify.handler;
+			global_context = acpi_gbl_system_notify.context;
+		}
+	} else {
+		/* Global driver notification handler */
+
+		if (acpi_gbl_device_notify.handler) {
+			global_handler = acpi_gbl_device_notify.handler;
+			global_context = acpi_gbl_device_notify.context;
+		}
+	}
+
+	/* Invoke the system handler first, if present */
+
+	if (global_handler) {
+		global_handler(notify_info->notify.node,
+			       notify_info->notify.value, global_context);
+	}
+
+	/* Now invoke the per-device handler, if present */
+
+	handler_obj = notify_info->notify.handler_obj;
+	if (handler_obj) {
+		handler_obj->notify.handler(notify_info->notify.node,
+					    notify_info->notify.value,
+					    handler_obj->notify.context);
+	}
+
+	/* All done with the info object */
+
+	acpi_ut_delete_generic_state(notify_info);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_global_lock_handler
+ *
+ * PARAMETERS:  Context         - From thread interface, not used
+ *
+ * RETURN:      ACPI_INTERRUPT_HANDLED
+ *
+ * DESCRIPTION: Invoked directly from the SCI handler when a global lock
+ *              release interrupt occurs. Attempt to acquire the global lock,
+ *              if successful, signal the thread waiting for the lock.
+ *
+ * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
+ * this is not possible for some reason, a separate thread will have to be
+ * scheduled to do this.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ev_global_lock_handler(void *context)
+{
+	u8 acquired = FALSE;
+
+	/*
+	 * Attempt to get the lock.
+	 *
+	 * If we don't get it now, it will be marked pending and we will
+	 * take another interrupt when it becomes free.
+	 */
+	ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
+	if (acquired) {
+
+		/* Got the lock, now wake all threads waiting for it */
+
+		acpi_gbl_global_lock_acquired = TRUE;
+		/* Send a unit to the semaphore */
+
+		if (ACPI_FAILURE
+		    (acpi_os_signal_semaphore
+		     (acpi_gbl_global_lock_semaphore, 1))) {
+			ACPI_ERROR((AE_INFO,
+				    "Could not signal Global Lock semaphore"));
+		}
+	}
+
+	return (ACPI_INTERRUPT_HANDLED);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_init_global_lock_handler
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a handler for the global lock release event
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_init_global_lock_handler(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
+
+	/* Attempt installation of the global lock handler */
+
+	status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
+						  acpi_ev_global_lock_handler,
+						  NULL);
+
+	/*
+	 * If the global lock does not exist on this platform, the attempt to
+	 * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
+	 * Map to AE_OK, but mark global lock as not present. Any attempt to
+	 * actually use the global lock will be flagged with an error.
+	 */
+	if (status == AE_NO_HARDWARE_RESPONSE) {
+		ACPI_ERROR((AE_INFO,
+			    "No response from Global Lock hardware, disabling lock"));
+
+		acpi_gbl_global_lock_present = FALSE;
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	acpi_gbl_global_lock_present = TRUE;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_remove_global_lock_handler
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove the handler for the Global Lock
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ev_remove_global_lock_handler(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
+
+	acpi_gbl_global_lock_present = FALSE;
+	status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
+						 acpi_ev_global_lock_handler);
+
+	return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_acquire_global_lock
+ *
+ * PARAMETERS:  Timeout         - Max time to wait for the lock, in millisec.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Attempt to gain ownership of the Global Lock.
+ *
+ * MUTEX:       Interpreter must be locked
+ *
+ * Note: The original implementation allowed multiple threads to "acquire" the
+ * Global Lock, and the OS would hold the lock until the last thread had
+ * released it. However, this could potentially starve the BIOS out of the
+ * lock, especially in the case where there is a tight handshake between the
+ * Embedded Controller driver and the BIOS. Therefore, this implementation
+ * allows only one thread to acquire the HW Global Lock at a time, and makes
+ * the global lock appear as a standard mutex on the OS side.
+ *
+ *****************************************************************************/
+static acpi_thread_id acpi_ev_global_lock_thread_id;
+static int acpi_ev_global_lock_acquired;
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout)
+{
+	acpi_status status = AE_OK;
+	u8 acquired = FALSE;
+
+	ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
+
+	/*
+	 * Only one thread can acquire the GL at a time, the global_lock_mutex
+	 * enforces this. This interface releases the interpreter if we must wait.
+	 */
+	status = acpi_ex_system_wait_mutex(
+			acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
+	if (status == AE_TIME) {
+		if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
+			acpi_ev_global_lock_acquired++;
+			return AE_OK;
+		}
+	}
+
+	if (ACPI_FAILURE(status)) {
+		status = acpi_ex_system_wait_mutex(
+				acpi_gbl_global_lock_mutex->mutex.os_mutex,
+				timeout);
+	}
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
+	acpi_ev_global_lock_acquired++;
+
+	/*
+	 * Update the global lock handle and check for wraparound. The handle is
+	 * only used for the external global lock interfaces, but it is updated
+	 * here to properly handle the case where a single thread may acquire the
+	 * lock via both the AML and the acpi_acquire_global_lock interfaces. The
+	 * handle is therefore updated on the first acquire from a given thread
+	 * regardless of where the acquisition request originated.
+	 */
+	acpi_gbl_global_lock_handle++;
+	if (acpi_gbl_global_lock_handle == 0) {
+		acpi_gbl_global_lock_handle = 1;
+	}
+
+	/*
+	 * Make sure that a global lock actually exists. If not, just treat the
+	 * lock as a standard mutex.
+	 */
+	if (!acpi_gbl_global_lock_present) {
+		acpi_gbl_global_lock_acquired = TRUE;
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Attempt to acquire the actual hardware lock */
+
+	ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
+	if (acquired) {
+
+		/* We got the lock */
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Acquired hardware Global Lock\n"));
+
+		acpi_gbl_global_lock_acquired = TRUE;
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/*
+	 * Did not get the lock. The pending bit was set above, and we must now
+	 * wait until we get the global lock released interrupt.
+	 */
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
+
+	/*
+	 * Wait for handshake with the global lock interrupt handler.
+	 * This interface releases the interpreter if we must wait.
+	 */
+	status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
+					       ACPI_WAIT_FOREVER);
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_release_global_lock
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Releases ownership of the Global Lock.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_release_global_lock(void)
+{
+	u8 pending = FALSE;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ev_release_global_lock);
+
+	/* Lock must be already acquired */
+
+	if (!acpi_gbl_global_lock_acquired) {
+		ACPI_WARNING((AE_INFO,
+			      "Cannot release the ACPI Global Lock, it has not been acquired"));
+		return_ACPI_STATUS(AE_NOT_ACQUIRED);
+	}
+
+	acpi_ev_global_lock_acquired--;
+	if (acpi_ev_global_lock_acquired > 0) {
+		return AE_OK;
+	}
+
+	if (acpi_gbl_global_lock_present) {
+
+		/* Allow any thread to release the lock */
+
+		ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
+
+		/*
+		 * If the pending bit was set, we must write GBL_RLS to the control
+		 * register
+		 */
+		if (pending) {
+			status =
+			    acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
+					      1);
+		}
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Released hardware Global Lock\n"));
+	}
+
+	acpi_gbl_global_lock_acquired = FALSE;
+
+	/* Release the local GL mutex */
+	acpi_ev_global_lock_thread_id = NULL;
+	acpi_ev_global_lock_acquired = 0;
+	acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
+	return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_terminate
+ *
+ * PARAMETERS:  none
+ *
+ * RETURN:      none
+ *
+ * DESCRIPTION: Disable events and free memory allocated for table storage.
+ *
+ ******************************************************************************/
+
+void acpi_ev_terminate(void)
+{
+	u32 i;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_terminate);
+
+	if (acpi_gbl_events_initialized) {
+		/*
+		 * Disable all event-related functionality. In all cases, on error,
+		 * print a message but obviously we don't abort.
+		 */
+
+		/* Disable all fixed events */
+
+		for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+			status = acpi_disable_event(i, 0);
+			if (ACPI_FAILURE(status)) {
+				ACPI_ERROR((AE_INFO,
+					    "Could not disable fixed event %d",
+					    (u32) i));
+			}
+		}
+
+		/* Disable all GPEs in all GPE blocks */
+
+		status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
+
+		/* Remove SCI handler */
+
+		status = acpi_ev_remove_sci_handler();
+		if (ACPI_FAILURE(status)) {
+			ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
+		}
+
+		status = acpi_ev_remove_global_lock_handler();
+		if (ACPI_FAILURE(status)) {
+			ACPI_ERROR((AE_INFO,
+				    "Could not remove Global Lock handler"));
+		}
+	}
+
+	/* Deallocate all handler objects installed within GPE info structs */
+
+	status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL);
+
+	/* Return to original mode if necessary */
+
+	if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
+		status = acpi_disable();
+		if (ACPI_FAILURE(status)) {
+			ACPI_WARNING((AE_INFO, "AcpiDisable failed"));
+		}
+	}
+	return_VOID;
+}
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
new file mode 100644
index 0000000..665c088
--- /dev/null
+++ b/drivers/acpi/acpica/evregion.c
@@ -0,0 +1,1070 @@
+/******************************************************************************
+ *
+ * Module Name: evregion - ACPI address_space (op_region) handler dispatch
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evregion")
+
+/* Local prototypes */
+static acpi_status
+acpi_ev_reg_run(acpi_handle obj_handle,
+		u32 level, void *context, void **return_value);
+
+static acpi_status
+acpi_ev_install_handler(acpi_handle obj_handle,
+			u32 level, void *context, void **return_value);
+
+/* These are the address spaces that will get default handlers */
+
+#define ACPI_NUM_DEFAULT_SPACES     4
+
+static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
+	ACPI_ADR_SPACE_SYSTEM_MEMORY,
+	ACPI_ADR_SPACE_SYSTEM_IO,
+	ACPI_ADR_SPACE_PCI_CONFIG,
+	ACPI_ADR_SPACE_DATA_TABLE
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_install_region_handlers
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Installs the core subsystem default address space handlers.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_install_region_handlers(void)
+{
+	acpi_status status;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ev_install_region_handlers);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * All address spaces (PCI Config, EC, SMBus) are scope dependent and
+	 * registration must occur for a specific device.
+	 *
+	 * In the case of the system memory and IO address spaces there is
+	 * currently no device associated with the address space. For these we
+	 * use the root.
+	 *
+	 * We install the default PCI config space handler at the root so that
+	 * this space is immediately available even though the we have not
+	 * enumerated all the PCI Root Buses yet. This is to conform to the ACPI
+	 * specification which states that the PCI config space must be always
+	 * available -- even though we are nowhere near ready to find the PCI root
+	 * buses at this point.
+	 *
+	 * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler
+	 * has already been installed (via acpi_install_address_space_handler).
+	 * Similar for AE_SAME_HANDLER.
+	 */
+	for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
+		status = acpi_ev_install_space_handler(acpi_gbl_root_node,
+						       acpi_gbl_default_address_spaces
+						       [i],
+						       ACPI_DEFAULT_HANDLER,
+						       NULL, NULL);
+		switch (status) {
+		case AE_OK:
+		case AE_SAME_HANDLER:
+		case AE_ALREADY_EXISTS:
+
+			/* These exceptions are all OK */
+
+			status = AE_OK;
+			break;
+
+		default:
+
+			goto unlock_and_exit;
+		}
+	}
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_initialize_op_regions
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute _REG methods for all Operation Regions that have
+ *              an installed default region handler.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_initialize_op_regions(void)
+{
+	acpi_status status;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ev_initialize_op_regions);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Run the _REG methods for op_regions in each default address space */
+
+	for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
+		/*
+		 * TBD: Make sure handler is the DEFAULT handler, otherwise
+		 * _REG will have already been run.
+		 */
+		status = acpi_ev_execute_reg_methods(acpi_gbl_root_node,
+						     acpi_gbl_default_address_spaces
+						     [i]);
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_execute_reg_method
+ *
+ * PARAMETERS:  region_obj          - Region object
+ *              Function            - Passed to _REG: On (1) or Off (0)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute _REG method for a region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
+{
+	struct acpi_evaluate_info *info;
+	union acpi_operand_object *args[3];
+	union acpi_operand_object *region_obj2;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_execute_reg_method);
+
+	region_obj2 = acpi_ns_get_secondary_object(region_obj);
+	if (!region_obj2) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	if (region_obj2->extra.method_REG == NULL) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Allocate and initialize the evaluation information block */
+
+	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+	if (!info) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	info->prefix_node = region_obj2->extra.method_REG;
+	info->pathname = NULL;
+	info->parameters = args;
+	info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+	/*
+	 * The _REG method has two arguments:
+	 *
+	 * Arg0 - Integer:
+	 *  Operation region space ID Same value as region_obj->Region.space_id
+	 *
+	 * Arg1 - Integer:
+	 *  connection status 1 for connecting the handler, 0 for disconnecting
+	 *  the handler (Passed as a parameter)
+	 */
+	args[0] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+	if (!args[0]) {
+		status = AE_NO_MEMORY;
+		goto cleanup1;
+	}
+
+	args[1] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+	if (!args[1]) {
+		status = AE_NO_MEMORY;
+		goto cleanup2;
+	}
+
+	/* Setup the parameter objects */
+
+	args[0]->integer.value = region_obj->region.space_id;
+	args[1]->integer.value = function;
+	args[2] = NULL;
+
+	/* Execute the method, no return value */
+
+	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+			(ACPI_TYPE_METHOD, info->prefix_node, NULL));
+
+	status = acpi_ns_evaluate(info);
+	acpi_ut_remove_reference(args[1]);
+
+      cleanup2:
+	acpi_ut_remove_reference(args[0]);
+
+      cleanup1:
+	ACPI_FREE(info);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_address_space_dispatch
+ *
+ * PARAMETERS:  region_obj          - Internal region object
+ *              Function            - Read or Write operation
+ *              Address             - Where in the space to read or write
+ *              bit_width           - Field width in bits (8, 16, 32, or 64)
+ *              Value               - Pointer to in or out value, must be
+ *                                    full 64-bit acpi_integer
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Dispatch an address space or operation region access to
+ *              a previously installed handler.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+			       u32 function,
+			       acpi_physical_address address,
+			       u32 bit_width, acpi_integer * value)
+{
+	acpi_status status;
+	acpi_adr_space_handler handler;
+	acpi_adr_space_setup region_setup;
+	union acpi_operand_object *handler_desc;
+	union acpi_operand_object *region_obj2;
+	void *region_context = NULL;
+
+	ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
+
+	region_obj2 = acpi_ns_get_secondary_object(region_obj);
+	if (!region_obj2) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Ensure that there is a handler associated with this region */
+
+	handler_desc = region_obj->region.handler;
+	if (!handler_desc) {
+		ACPI_ERROR((AE_INFO,
+			    "No handler for Region [%4.4s] (%p) [%s]",
+			    acpi_ut_get_node_name(region_obj->region.node),
+			    region_obj,
+			    acpi_ut_get_region_name(region_obj->region.
+						    space_id)));
+
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/*
+	 * It may be the case that the region has never been initialized.
+	 * Some types of regions require special init code
+	 */
+	if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
+
+		/* This region has not been initialized yet, do it */
+
+		region_setup = handler_desc->address_space.setup;
+		if (!region_setup) {
+
+			/* No initialization routine, exit with error */
+
+			ACPI_ERROR((AE_INFO,
+				    "No init routine for region(%p) [%s]",
+				    region_obj,
+				    acpi_ut_get_region_name(region_obj->region.
+							    space_id)));
+			return_ACPI_STATUS(AE_NOT_EXIST);
+		}
+
+		/*
+		 * We must exit the interpreter because the region setup will
+		 * potentially execute control methods (for example, the _REG method
+		 * for this region)
+		 */
+		acpi_ex_exit_interpreter();
+
+		status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
+				      handler_desc->address_space.context,
+				      &region_context);
+
+		/* Re-enter the interpreter */
+
+		acpi_ex_enter_interpreter();
+
+		/* Check for failure of the Region Setup */
+
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"During region initialization: [%s]",
+					acpi_ut_get_region_name(region_obj->
+								region.
+								space_id)));
+			return_ACPI_STATUS(status);
+		}
+
+		/* Region initialization may have been completed by region_setup */
+
+		if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
+			region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
+
+			if (region_obj2->extra.region_context) {
+
+				/* The handler for this region was already installed */
+
+				ACPI_FREE(region_context);
+			} else {
+				/*
+				 * Save the returned context for use in all accesses to
+				 * this particular region
+				 */
+				region_obj2->extra.region_context =
+				    region_context;
+			}
+		}
+	}
+
+	/* We have everything we need, we can invoke the address space handler */
+
+	handler = handler_desc->address_space.handler;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+			  "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+			  &region_obj->region.handler->address_space, handler,
+			  ACPI_FORMAT_NATIVE_UINT(address),
+			  acpi_ut_get_region_name(region_obj->region.
+						  space_id)));
+
+	if (!(handler_desc->address_space.handler_flags &
+	      ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+		/*
+		 * For handlers other than the default (supplied) handlers, we must
+		 * exit the interpreter because the handler *might* block -- we don't
+		 * know what it will do, so we can't hold the lock on the intepreter.
+		 */
+		acpi_ex_exit_interpreter();
+	}
+
+	/* Call the handler */
+
+	status = handler(function, address, bit_width, value,
+			 handler_desc->address_space.context,
+			 region_obj2->extra.region_context);
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
+				acpi_ut_get_region_name(region_obj->region.
+							space_id)));
+	}
+
+	if (!(handler_desc->address_space.handler_flags &
+	      ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+		/*
+		 * We just returned from a non-default handler, we must re-enter the
+		 * interpreter
+		 */
+		acpi_ex_enter_interpreter();
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_detach_region
+ *
+ * PARAMETERS:  region_obj          - Region Object
+ *              acpi_ns_is_locked   - Namespace Region Already Locked?
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Break the association between the handler and the region
+ *              this is a two way association.
+ *
+ ******************************************************************************/
+
+void
+acpi_ev_detach_region(union acpi_operand_object *region_obj,
+		      u8 acpi_ns_is_locked)
+{
+	union acpi_operand_object *handler_obj;
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object **last_obj_ptr;
+	acpi_adr_space_setup region_setup;
+	void **region_context;
+	union acpi_operand_object *region_obj2;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_detach_region);
+
+	region_obj2 = acpi_ns_get_secondary_object(region_obj);
+	if (!region_obj2) {
+		return_VOID;
+	}
+	region_context = &region_obj2->extra.region_context;
+
+	/* Get the address handler from the region object */
+
+	handler_obj = region_obj->region.handler;
+	if (!handler_obj) {
+
+		/* This region has no handler, all done */
+
+		return_VOID;
+	}
+
+	/* Find this region in the handler's list */
+
+	obj_desc = handler_obj->address_space.region_list;
+	last_obj_ptr = &handler_obj->address_space.region_list;
+
+	while (obj_desc) {
+
+		/* Is this the correct Region? */
+
+		if (obj_desc == region_obj) {
+			ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+					  "Removing Region %p from address handler %p\n",
+					  region_obj, handler_obj));
+
+			/* This is it, remove it from the handler's list */
+
+			*last_obj_ptr = obj_desc->region.next;
+			obj_desc->region.next = NULL;	/* Must clear field */
+
+			if (acpi_ns_is_locked) {
+				status =
+				    acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+				if (ACPI_FAILURE(status)) {
+					return_VOID;
+				}
+			}
+
+			/* Now stop region accesses by executing the _REG method */
+
+			status = acpi_ev_execute_reg_method(region_obj, 0);
+			if (ACPI_FAILURE(status)) {
+				ACPI_EXCEPTION((AE_INFO, status,
+						"from region _REG, [%s]",
+						acpi_ut_get_region_name
+						(region_obj->region.space_id)));
+			}
+
+			if (acpi_ns_is_locked) {
+				status =
+				    acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+				if (ACPI_FAILURE(status)) {
+					return_VOID;
+				}
+			}
+
+			/*
+			 * If the region has been activated, call the setup handler with
+			 * the deactivate notification
+			 */
+			if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) {
+				region_setup = handler_obj->address_space.setup;
+				status =
+				    region_setup(region_obj,
+						 ACPI_REGION_DEACTIVATE,
+						 handler_obj->address_space.
+						 context, region_context);
+
+				/* Init routine may fail, Just ignore errors */
+
+				if (ACPI_FAILURE(status)) {
+					ACPI_EXCEPTION((AE_INFO, status,
+							"from region handler - deactivate, [%s]",
+							acpi_ut_get_region_name
+							(region_obj->region.
+							 space_id)));
+				}
+
+				region_obj->region.flags &=
+				    ~(AOPOBJ_SETUP_COMPLETE);
+			}
+
+			/*
+			 * Remove handler reference in the region
+			 *
+			 * NOTE: this doesn't mean that the region goes away, the region
+			 * is just inaccessible as indicated to the _REG method
+			 *
+			 * If the region is on the handler's list, this must be the
+			 * region's handler
+			 */
+			region_obj->region.handler = NULL;
+			acpi_ut_remove_reference(handler_obj);
+
+			return_VOID;
+		}
+
+		/* Walk the linked list of handlers */
+
+		last_obj_ptr = &obj_desc->region.next;
+		obj_desc = obj_desc->region.next;
+	}
+
+	/* If we get here, the region was not in the handler's region list */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+			  "Cannot remove region %p from address handler %p\n",
+			  region_obj, handler_obj));
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_attach_region
+ *
+ * PARAMETERS:  handler_obj         - Handler Object
+ *              region_obj          - Region Object
+ *              acpi_ns_is_locked   - Namespace Region Already Locked?
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Create the association between the handler and the region
+ *              this is a two way association.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_attach_region(union acpi_operand_object *handler_obj,
+		      union acpi_operand_object *region_obj,
+		      u8 acpi_ns_is_locked)
+{
+
+	ACPI_FUNCTION_TRACE(ev_attach_region);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+			  "Adding Region [%4.4s] %p to address handler %p [%s]\n",
+			  acpi_ut_get_node_name(region_obj->region.node),
+			  region_obj, handler_obj,
+			  acpi_ut_get_region_name(region_obj->region.
+						  space_id)));
+
+	/* Link this region to the front of the handler's list */
+
+	region_obj->region.next = handler_obj->address_space.region_list;
+	handler_obj->address_space.region_list = region_obj;
+
+	/* Install the region's handler */
+
+	if (region_obj->region.handler) {
+		return_ACPI_STATUS(AE_ALREADY_EXISTS);
+	}
+
+	region_obj->region.handler = handler_obj;
+	acpi_ut_add_reference(handler_obj);
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_install_handler
+ *
+ * PARAMETERS:  walk_namespace callback
+ *
+ * DESCRIPTION: This routine installs an address handler into objects that are
+ *              of type Region or Device.
+ *
+ *              If the Object is a Device, and the device has a handler of
+ *              the same type then the search is terminated in that branch.
+ *
+ *              This is because the existing handler is closer in proximity
+ *              to any more regions than the one we are trying to install.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_install_handler(acpi_handle obj_handle,
+			u32 level, void *context, void **return_value)
+{
+	union acpi_operand_object *handler_obj;
+	union acpi_operand_object *next_handler_obj;
+	union acpi_operand_object *obj_desc;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_NAME(ev_install_handler);
+
+	handler_obj = (union acpi_operand_object *)context;
+
+	/* Parameter validation */
+
+	if (!handler_obj) {
+		return (AE_OK);
+	}
+
+	/* Convert and validate the device handle */
+
+	node = acpi_ns_map_handle_to_node(obj_handle);
+	if (!node) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * We only care about regions and objects that are allowed to have
+	 * address space handlers
+	 */
+	if ((node->type != ACPI_TYPE_DEVICE) &&
+	    (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
+		return (AE_OK);
+	}
+
+	/* Check for an existing internal object */
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc) {
+
+		/* No object, just exit */
+
+		return (AE_OK);
+	}
+
+	/* Devices are handled different than regions */
+
+	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_DEVICE) {
+
+		/* Check if this Device already has a handler for this address space */
+
+		next_handler_obj = obj_desc->device.handler;
+		while (next_handler_obj) {
+
+			/* Found a handler, is it for the same address space? */
+
+			if (next_handler_obj->address_space.space_id ==
+			    handler_obj->address_space.space_id) {
+				ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+						  "Found handler for region [%s] in device %p(%p) handler %p\n",
+						  acpi_ut_get_region_name
+						  (handler_obj->address_space.
+						   space_id), obj_desc,
+						  next_handler_obj,
+						  handler_obj));
+
+				/*
+				 * Since the object we found it on was a device, then it
+				 * means that someone has already installed a handler for
+				 * the branch of the namespace from this device on. Just
+				 * bail out telling the walk routine to not traverse this
+				 * branch. This preserves the scoping rule for handlers.
+				 */
+				return (AE_CTRL_DEPTH);
+			}
+
+			/* Walk the linked list of handlers attached to this device */
+
+			next_handler_obj = next_handler_obj->address_space.next;
+		}
+
+		/*
+		 * As long as the device didn't have a handler for this space we
+		 * don't care about it. We just ignore it and proceed.
+		 */
+		return (AE_OK);
+	}
+
+	/* Object is a Region */
+
+	if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
+
+		/* This region is for a different address space, just ignore it */
+
+		return (AE_OK);
+	}
+
+	/*
+	 * Now we have a region and it is for the handler's address space type.
+	 *
+	 * First disconnect region for any previous handler (if any)
+	 */
+	acpi_ev_detach_region(obj_desc, FALSE);
+
+	/* Connect the region to the new handler */
+
+	status = acpi_ev_attach_region(handler_obj, obj_desc, FALSE);
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_install_space_handler
+ *
+ * PARAMETERS:  Node            - Namespace node for the device
+ *              space_id        - The address space ID
+ *              Handler         - Address of the handler
+ *              Setup           - Address of the setup function
+ *              Context         - Value passed to the handler on each access
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a handler for all op_regions of a given space_id.
+ *              Assumes namespace is locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_install_space_handler(struct acpi_namespace_node * node,
+			      acpi_adr_space_type space_id,
+			      acpi_adr_space_handler handler,
+			      acpi_adr_space_setup setup, void *context)
+{
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *handler_obj;
+	acpi_status status;
+	acpi_object_type type;
+	u8 flags = 0;
+
+	ACPI_FUNCTION_TRACE(ev_install_space_handler);
+
+	/*
+	 * This registration is valid for only the types below and the root. This
+	 * is where the default handlers get placed.
+	 */
+	if ((node->type != ACPI_TYPE_DEVICE) &&
+	    (node->type != ACPI_TYPE_PROCESSOR) &&
+	    (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	if (handler == ACPI_DEFAULT_HANDLER) {
+		flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED;
+
+		switch (space_id) {
+		case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+			handler = acpi_ex_system_memory_space_handler;
+			setup = acpi_ev_system_memory_region_setup;
+			break;
+
+		case ACPI_ADR_SPACE_SYSTEM_IO:
+			handler = acpi_ex_system_io_space_handler;
+			setup = acpi_ev_io_space_region_setup;
+			break;
+
+		case ACPI_ADR_SPACE_PCI_CONFIG:
+			handler = acpi_ex_pci_config_space_handler;
+			setup = acpi_ev_pci_config_region_setup;
+			break;
+
+		case ACPI_ADR_SPACE_CMOS:
+			handler = acpi_ex_cmos_space_handler;
+			setup = acpi_ev_cmos_region_setup;
+			break;
+
+		case ACPI_ADR_SPACE_PCI_BAR_TARGET:
+			handler = acpi_ex_pci_bar_space_handler;
+			setup = acpi_ev_pci_bar_region_setup;
+			break;
+
+		case ACPI_ADR_SPACE_DATA_TABLE:
+			handler = acpi_ex_data_table_space_handler;
+			setup = NULL;
+			break;
+
+		default:
+			status = AE_BAD_PARAMETER;
+			goto unlock_and_exit;
+		}
+	}
+
+	/* If the caller hasn't specified a setup routine, use the default */
+
+	if (!setup) {
+		setup = acpi_ev_default_region_setup;
+	}
+
+	/* Check for an existing internal object */
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (obj_desc) {
+		/*
+		 * The attached device object already exists. Make sure the handler
+		 * is not already installed.
+		 */
+		handler_obj = obj_desc->device.handler;
+
+		/* Walk the handler list for this device */
+
+		while (handler_obj) {
+
+			/* Same space_id indicates a handler already installed */
+
+			if (handler_obj->address_space.space_id == space_id) {
+				if (handler_obj->address_space.handler ==
+				    handler) {
+					/*
+					 * It is (relatively) OK to attempt to install the SAME
+					 * handler twice. This can easily happen with the
+					 * PCI_Config space.
+					 */
+					status = AE_SAME_HANDLER;
+					goto unlock_and_exit;
+				} else {
+					/* A handler is already installed */
+
+					status = AE_ALREADY_EXISTS;
+				}
+				goto unlock_and_exit;
+			}
+
+			/* Walk the linked list of handlers */
+
+			handler_obj = handler_obj->address_space.next;
+		}
+	} else {
+		ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+				  "Creating object on Device %p while installing handler\n",
+				  node));
+
+		/* obj_desc does not exist, create one */
+
+		if (node->type == ACPI_TYPE_ANY) {
+			type = ACPI_TYPE_DEVICE;
+		} else {
+			type = node->type;
+		}
+
+		obj_desc = acpi_ut_create_internal_object(type);
+		if (!obj_desc) {
+			status = AE_NO_MEMORY;
+			goto unlock_and_exit;
+		}
+
+		/* Init new descriptor */
+
+		obj_desc->common.type = (u8) type;
+
+		/* Attach the new object to the Node */
+
+		status = acpi_ns_attach_object(node, obj_desc, type);
+
+		/* Remove local reference to the object */
+
+		acpi_ut_remove_reference(obj_desc);
+
+		if (ACPI_FAILURE(status)) {
+			goto unlock_and_exit;
+		}
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+			  "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
+			  acpi_ut_get_region_name(space_id), space_id,
+			  acpi_ut_get_node_name(node), node, obj_desc));
+
+	/*
+	 * Install the handler
+	 *
+	 * At this point there is no existing handler. Just allocate the object
+	 * for the handler and link it into the list.
+	 */
+	handler_obj =
+	    acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
+	if (!handler_obj) {
+		status = AE_NO_MEMORY;
+		goto unlock_and_exit;
+	}
+
+	/* Init handler obj */
+
+	handler_obj->address_space.space_id = (u8) space_id;
+	handler_obj->address_space.handler_flags = flags;
+	handler_obj->address_space.region_list = NULL;
+	handler_obj->address_space.node = node;
+	handler_obj->address_space.handler = handler;
+	handler_obj->address_space.context = context;
+	handler_obj->address_space.setup = setup;
+
+	/* Install at head of Device.address_space list */
+
+	handler_obj->address_space.next = obj_desc->device.handler;
+
+	/*
+	 * The Device object is the first reference on the handler_obj.
+	 * Each region that uses the handler adds a reference.
+	 */
+	obj_desc->device.handler = handler_obj;
+
+	/*
+	 * Walk the namespace finding all of the regions this
+	 * handler will manage.
+	 *
+	 * Start at the device and search the branch toward
+	 * the leaf nodes until either the leaf is encountered or
+	 * a device is detected that has an address handler of the
+	 * same type.
+	 *
+	 * In either case, back up and search down the remainder
+	 * of the branch
+	 */
+	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
+					ACPI_NS_WALK_UNLOCK,
+					acpi_ev_install_handler, handler_obj,
+					NULL);
+
+      unlock_and_exit:
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_execute_reg_methods
+ *
+ * PARAMETERS:  Node            - Namespace node for the device
+ *              space_id        - The address space ID
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Run all _REG methods for the input Space ID;
+ *              Note: assumes namespace is locked, or system init time.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+			    acpi_adr_space_type space_id)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
+
+	/*
+	 * Run all _REG methods for all Operation Regions for this space ID. This
+	 * is a separate walk in order to handle any interdependencies between
+	 * regions and _REG methods. (i.e. handlers must be installed for all
+	 * regions of this Space ID before we can run any _REG methods)
+	 */
+	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
+					ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
+					&space_id, NULL);
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_reg_run
+ *
+ * PARAMETERS:  walk_namespace callback
+ *
+ * DESCRIPTION: Run _REG method for region objects of the requested space_iD
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_reg_run(acpi_handle obj_handle,
+		u32 level, void *context, void **return_value)
+{
+	union acpi_operand_object *obj_desc;
+	struct acpi_namespace_node *node;
+	acpi_adr_space_type space_id;
+	acpi_status status;
+
+	space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context);
+
+	/* Convert and validate the device handle */
+
+	node = acpi_ns_map_handle_to_node(obj_handle);
+	if (!node) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * We only care about regions.and objects that are allowed to have address
+	 * space handlers
+	 */
+	if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
+		return (AE_OK);
+	}
+
+	/* Check for an existing internal object */
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc) {
+
+		/* No object, just exit */
+
+		return (AE_OK);
+	}
+
+	/* Object is a Region */
+
+	if (obj_desc->region.space_id != space_id) {
+
+		/* This region is for a different address space, just ignore it */
+
+		return (AE_OK);
+	}
+
+	status = acpi_ev_execute_reg_method(obj_desc, 1);
+	return (status);
+}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
new file mode 100644
index 0000000..f3f1fb4
--- /dev/null
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -0,0 +1,684 @@
+/******************************************************************************
+ *
+ * Module Name: evrgnini- ACPI address_space (op_region) init
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evrgnini")
+
+/* Local prototypes */
+static u8 acpi_ev_match_pci_root_bridge(char *id);
+
+static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_system_memory_region_setup
+ *
+ * PARAMETERS:  Handle              - Region we are interested in
+ *              Function            - Start or stop
+ *              handler_context     - Address space handler context
+ *              region_context      - Region specific context
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Setup a system_memory operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_system_memory_region_setup(acpi_handle handle,
+				   u32 function,
+				   void *handler_context, void **region_context)
+{
+	union acpi_operand_object *region_desc =
+	    (union acpi_operand_object *)handle;
+	struct acpi_mem_space_context *local_region_context;
+
+	ACPI_FUNCTION_TRACE(ev_system_memory_region_setup);
+
+	if (function == ACPI_REGION_DEACTIVATE) {
+		if (*region_context) {
+			local_region_context =
+			    (struct acpi_mem_space_context *)*region_context;
+
+			/* Delete a cached mapping if present */
+
+			if (local_region_context->mapped_length) {
+				acpi_os_unmap_memory(local_region_context->
+						     mapped_logical_address,
+						     local_region_context->
+						     mapped_length);
+			}
+			ACPI_FREE(local_region_context);
+			*region_context = NULL;
+		}
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Create a new context */
+
+	local_region_context =
+	    ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_mem_space_context));
+	if (!(local_region_context)) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Save the region length and address for use in the handler */
+
+	local_region_context->length = region_desc->region.length;
+	local_region_context->address = region_desc->region.address;
+
+	*region_context = local_region_context;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_io_space_region_setup
+ *
+ * PARAMETERS:  Handle              - Region we are interested in
+ *              Function            - Start or stop
+ *              handler_context     - Address space handler context
+ *              region_context      - Region specific context
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Setup a IO operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_io_space_region_setup(acpi_handle handle,
+			      u32 function,
+			      void *handler_context, void **region_context)
+{
+	ACPI_FUNCTION_TRACE(ev_io_space_region_setup);
+
+	if (function == ACPI_REGION_DEACTIVATE) {
+		*region_context = NULL;
+	} else {
+		*region_context = handler_context;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_pci_config_region_setup
+ *
+ * PARAMETERS:  Handle              - Region we are interested in
+ *              Function            - Start or stop
+ *              handler_context     - Address space handler context
+ *              region_context      - Region specific context
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Setup a PCI_Config operation region
+ *
+ * MUTEX:       Assumes namespace is not locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_pci_config_region_setup(acpi_handle handle,
+				u32 function,
+				void *handler_context, void **region_context)
+{
+	acpi_status status = AE_OK;
+	acpi_integer pci_value;
+	struct acpi_pci_id *pci_id = *region_context;
+	union acpi_operand_object *handler_obj;
+	struct acpi_namespace_node *parent_node;
+	struct acpi_namespace_node *pci_root_node;
+	struct acpi_namespace_node *pci_device_node;
+	union acpi_operand_object *region_obj =
+	    (union acpi_operand_object *)handle;
+
+	ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
+
+	handler_obj = region_obj->region.handler;
+	if (!handler_obj) {
+		/*
+		 * No installed handler. This shouldn't happen because the dispatch
+		 * routine checks before we get here, but we check again just in case.
+		 */
+		ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+				  "Attempting to init a region %p, with no handler\n",
+				  region_obj));
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	*region_context = NULL;
+	if (function == ACPI_REGION_DEACTIVATE) {
+		if (pci_id) {
+			ACPI_FREE(pci_id);
+		}
+		return_ACPI_STATUS(status);
+	}
+
+	parent_node = acpi_ns_get_parent_node(region_obj->region.node);
+
+	/*
+	 * Get the _SEG and _BBN values from the device upon which the handler
+	 * is installed.
+	 *
+	 * We need to get the _SEG and _BBN objects relative to the PCI BUS device.
+	 * This is the device the handler has been registered to handle.
+	 */
+
+	/*
+	 * If the address_space.Node is still pointing to the root, we need
+	 * to scan upward for a PCI Root bridge and re-associate the op_region
+	 * handlers with that device.
+	 */
+	if (handler_obj->address_space.node == acpi_gbl_root_node) {
+
+		/* Start search from the parent object */
+
+		pci_root_node = parent_node;
+		while (pci_root_node != acpi_gbl_root_node) {
+
+			/* Get the _HID/_CID in order to detect a root_bridge */
+
+			if (acpi_ev_is_pci_root_bridge(pci_root_node)) {
+
+				/* Install a handler for this PCI root bridge */
+
+				status =
+				    acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
+				if (ACPI_FAILURE(status)) {
+					if (status == AE_SAME_HANDLER) {
+						/*
+						 * It is OK if the handler is already installed on the
+						 * root bridge. Still need to return a context object
+						 * for the new PCI_Config operation region, however.
+						 */
+						status = AE_OK;
+					} else {
+						ACPI_EXCEPTION((AE_INFO, status,
+								"Could not install PciConfig handler for Root Bridge %4.4s",
+								acpi_ut_get_node_name
+								(pci_root_node)));
+					}
+				}
+				break;
+			}
+
+			pci_root_node = acpi_ns_get_parent_node(pci_root_node);
+		}
+
+		/* PCI root bridge not found, use namespace root node */
+	} else {
+		pci_root_node = handler_obj->address_space.node;
+	}
+
+	/*
+	 * If this region is now initialized, we are done.
+	 * (install_address_space_handler could have initialized it)
+	 */
+	if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Region is still not initialized. Create a new context */
+
+	pci_id = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pci_id));
+	if (!pci_id) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/*
+	 * For PCI_Config space access, we need the segment, bus, device and
+	 * function numbers. Acquire them here.
+	 *
+	 * Find the parent device object. (This allows the operation region to be
+	 * within a subscope under the device, such as a control method.)
+	 */
+	pci_device_node = region_obj->region.node;
+	while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
+		pci_device_node = acpi_ns_get_parent_node(pci_device_node);
+	}
+
+	if (!pci_device_node) {
+		ACPI_FREE(pci_id);
+		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+	}
+
+	/*
+	 * Get the PCI device and function numbers from the _ADR object contained
+	 * in the parent's scope.
+	 */
+	status =
+	    acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
+					    &pci_value);
+
+	/*
+	 * The default is zero, and since the allocation above zeroed the data,
+	 * just do nothing on failure.
+	 */
+	if (ACPI_SUCCESS(status)) {
+		pci_id->device = ACPI_HIWORD(ACPI_LODWORD(pci_value));
+		pci_id->function = ACPI_LOWORD(ACPI_LODWORD(pci_value));
+	}
+
+	/* The PCI segment number comes from the _SEG method */
+
+	status =
+	    acpi_ut_evaluate_numeric_object(METHOD_NAME__SEG, pci_root_node,
+					    &pci_value);
+	if (ACPI_SUCCESS(status)) {
+		pci_id->segment = ACPI_LOWORD(pci_value);
+	}
+
+	/* The PCI bus number comes from the _BBN method */
+
+	status =
+	    acpi_ut_evaluate_numeric_object(METHOD_NAME__BBN, pci_root_node,
+					    &pci_value);
+	if (ACPI_SUCCESS(status)) {
+		pci_id->bus = ACPI_LOWORD(pci_value);
+	}
+
+	/* Complete this device's pci_id */
+
+	acpi_os_derive_pci_id(pci_root_node, region_obj->region.node, &pci_id);
+
+	*region_context = pci_id;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_match_pci_root_bridge
+ *
+ * PARAMETERS:  Id              - The HID/CID in string format
+ *
+ * RETURN:      TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
+ *
+ * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
+ *
+ ******************************************************************************/
+
+static u8 acpi_ev_match_pci_root_bridge(char *id)
+{
+
+	/*
+	 * Check if this is a PCI root.
+	 * ACPI 3.0+: check for a PCI Express root also.
+	 */
+	if (!(ACPI_STRNCMP(id,
+			   PCI_ROOT_HID_STRING,
+			   sizeof(PCI_ROOT_HID_STRING))) ||
+	    !(ACPI_STRNCMP(id,
+			   PCI_EXPRESS_ROOT_HID_STRING,
+			   sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
+		return (TRUE);
+	}
+
+	return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_is_pci_root_bridge
+ *
+ * PARAMETERS:  Node            - Device node being examined
+ *
+ * RETURN:      TRUE if device is a PCI/PCI-Express Root Bridge
+ *
+ * DESCRIPTION: Determine if the input device represents a PCI Root Bridge by
+ *              examining the _HID and _CID for the device.
+ *
+ ******************************************************************************/
+
+static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
+{
+	acpi_status status;
+	struct acpica_device_id hid;
+	struct acpi_compatible_id_list *cid;
+	u32 i;
+
+	/* Get the _HID and check for a PCI Root Bridge */
+
+	status = acpi_ut_execute_HID(node, &hid);
+	if (ACPI_FAILURE(status)) {
+		return (FALSE);
+	}
+
+	if (acpi_ev_match_pci_root_bridge(hid.value)) {
+		return (TRUE);
+	}
+
+	/* The _HID did not match. Get the _CID and check for a PCI Root Bridge */
+
+	status = acpi_ut_execute_CID(node, &cid);
+	if (ACPI_FAILURE(status)) {
+		return (FALSE);
+	}
+
+	/* Check all _CIDs in the returned list */
+
+	for (i = 0; i < cid->count; i++) {
+		if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
+			ACPI_FREE(cid);
+			return (TRUE);
+		}
+	}
+
+	ACPI_FREE(cid);
+	return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_pci_bar_region_setup
+ *
+ * PARAMETERS:  Handle              - Region we are interested in
+ *              Function            - Start or stop
+ *              handler_context     - Address space handler context
+ *              region_context      - Region specific context
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Setup a pci_bAR operation region
+ *
+ * MUTEX:       Assumes namespace is not locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_pci_bar_region_setup(acpi_handle handle,
+			     u32 function,
+			     void *handler_context, void **region_context)
+{
+	ACPI_FUNCTION_TRACE(ev_pci_bar_region_setup);
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_cmos_region_setup
+ *
+ * PARAMETERS:  Handle              - Region we are interested in
+ *              Function            - Start or stop
+ *              handler_context     - Address space handler context
+ *              region_context      - Region specific context
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Setup a CMOS operation region
+ *
+ * MUTEX:       Assumes namespace is not locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_cmos_region_setup(acpi_handle handle,
+			  u32 function,
+			  void *handler_context, void **region_context)
+{
+	ACPI_FUNCTION_TRACE(ev_cmos_region_setup);
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_default_region_setup
+ *
+ * PARAMETERS:  Handle              - Region we are interested in
+ *              Function            - Start or stop
+ *              handler_context     - Address space handler context
+ *              region_context      - Region specific context
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Default region initialization
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_default_region_setup(acpi_handle handle,
+			     u32 function,
+			     void *handler_context, void **region_context)
+{
+	ACPI_FUNCTION_TRACE(ev_default_region_setup);
+
+	if (function == ACPI_REGION_DEACTIVATE) {
+		*region_context = NULL;
+	} else {
+		*region_context = handler_context;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_initialize_region
+ *
+ * PARAMETERS:  region_obj      - Region we are initializing
+ *              acpi_ns_locked  - Is namespace locked?
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initializes the region, finds any _REG methods and saves them
+ *              for execution at a later time
+ *
+ *              Get the appropriate address space handler for a newly
+ *              created region.
+ *
+ *              This also performs address space specific initialization. For
+ *              example, PCI regions must have an _ADR object that contains
+ *              a PCI address in the scope of the definition. This address is
+ *              required to perform an access to PCI config space.
+ *
+ * MUTEX:       Interpreter should be unlocked, because we may run the _REG
+ *              method for this region.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_initialize_region(union acpi_operand_object *region_obj,
+			  u8 acpi_ns_locked)
+{
+	union acpi_operand_object *handler_obj;
+	union acpi_operand_object *obj_desc;
+	acpi_adr_space_type space_id;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+	struct acpi_namespace_node *method_node;
+	acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
+	union acpi_operand_object *region_obj2;
+
+	ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
+
+	if (!region_obj) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (region_obj->common.flags & AOPOBJ_OBJECT_INITIALIZED) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	region_obj2 = acpi_ns_get_secondary_object(region_obj);
+	if (!region_obj2) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	node = acpi_ns_get_parent_node(region_obj->region.node);
+	space_id = region_obj->region.space_id;
+
+	/* Setup defaults */
+
+	region_obj->region.handler = NULL;
+	region_obj2->extra.method_REG = NULL;
+	region_obj->common.flags &= ~(AOPOBJ_SETUP_COMPLETE);
+	region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED;
+
+	/* Find any "_REG" method associated with this region definition */
+
+	status =
+	    acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
+				     &method_node);
+	if (ACPI_SUCCESS(status)) {
+		/*
+		 * The _REG method is optional and there can be only one per region
+		 * definition. This will be executed when the handler is attached
+		 * or removed
+		 */
+		region_obj2->extra.method_REG = method_node;
+	}
+
+	/*
+	 * The following loop depends upon the root Node having no parent
+	 * ie: acpi_gbl_root_node->parent_entry being set to NULL
+	 */
+	while (node) {
+
+		/* Check to see if a handler exists */
+
+		handler_obj = NULL;
+		obj_desc = acpi_ns_get_attached_object(node);
+		if (obj_desc) {
+
+			/* Can only be a handler if the object exists */
+
+			switch (node->type) {
+			case ACPI_TYPE_DEVICE:
+
+				handler_obj = obj_desc->device.handler;
+				break;
+
+			case ACPI_TYPE_PROCESSOR:
+
+				handler_obj = obj_desc->processor.handler;
+				break;
+
+			case ACPI_TYPE_THERMAL:
+
+				handler_obj = obj_desc->thermal_zone.handler;
+				break;
+
+			default:
+				/* Ignore other objects */
+				break;
+			}
+
+			while (handler_obj) {
+
+				/* Is this handler of the correct type? */
+
+				if (handler_obj->address_space.space_id ==
+				    space_id) {
+
+					/* Found correct handler */
+
+					ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+							  "Found handler %p for region %p in obj %p\n",
+							  handler_obj,
+							  region_obj,
+							  obj_desc));
+
+					status =
+					    acpi_ev_attach_region(handler_obj,
+								  region_obj,
+								  acpi_ns_locked);
+
+					/*
+					 * Tell all users that this region is usable by running the _REG
+					 * method
+					 */
+					if (acpi_ns_locked) {
+						status =
+						    acpi_ut_release_mutex
+						    (ACPI_MTX_NAMESPACE);
+						if (ACPI_FAILURE(status)) {
+							return_ACPI_STATUS
+							    (status);
+						}
+					}
+
+					status =
+					    acpi_ev_execute_reg_method
+					    (region_obj, 1);
+
+					if (acpi_ns_locked) {
+						status =
+						    acpi_ut_acquire_mutex
+						    (ACPI_MTX_NAMESPACE);
+						if (ACPI_FAILURE(status)) {
+							return_ACPI_STATUS
+							    (status);
+						}
+					}
+
+					return_ACPI_STATUS(AE_OK);
+				}
+
+				/* Try next handler in the list */
+
+				handler_obj = handler_obj->address_space.next;
+			}
+		}
+
+		/* This node does not have the handler we need; Pop up one level */
+
+		node = acpi_ns_get_parent_node(node);
+	}
+
+	/* If we get here, there is no handler for this region */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+			  "No handler for RegionType %s(%X) (RegionObj %p)\n",
+			  acpi_ut_get_region_name(space_id), space_id,
+			  region_obj));
+
+	return_ACPI_STATUS(AE_NOT_EXIST);
+}
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
new file mode 100644
index 0000000..567b356
--- /dev/null
+++ b/drivers/acpi/acpica/evsci.c
@@ -0,0 +1,183 @@
+/*******************************************************************************
+ *
+ * Module Name: evsci - System Control Interrupt configuration and
+ *                      legacy to ACPI mode state transition functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evsci")
+
+/* Local prototypes */
+static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_sci_xrupt_handler
+ *
+ * PARAMETERS:  Context   - Calling Context
+ *
+ * RETURN:      Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Interrupt handler that will figure out what function or
+ *              control method to call to deal with a SCI.
+ *
+ ******************************************************************************/
+
+static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
+{
+	struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
+	u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
+
+	ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler);
+
+	/*
+	 * We are guaranteed by the ACPI CA initialization/shutdown code that
+	 * if this interrupt handler is installed, ACPI is enabled.
+	 */
+
+	/*
+	 * Fixed Events:
+	 * Check for and dispatch any Fixed Events that have occurred
+	 */
+	interrupt_handled |= acpi_ev_fixed_event_detect();
+
+	/*
+	 * General Purpose Events:
+	 * Check for and dispatch any GPEs that have occurred
+	 */
+	interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
+
+	return_UINT32(interrupt_handled);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_gpe_xrupt_handler
+ *
+ * PARAMETERS:  Context   - Calling Context
+ *
+ * RETURN:      Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Handler for GPE Block Device interrupts
+ *
+ ******************************************************************************/
+
+u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
+{
+	struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
+	u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
+
+	ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
+
+	/*
+	 * We are guaranteed by the ACPI CA initialization/shutdown code that
+	 * if this interrupt handler is installed, ACPI is enabled.
+	 */
+
+	/* GPEs: Check for and dispatch any GPEs that have occurred */
+
+	interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
+
+	return_UINT32(interrupt_handled);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_install_sci_handler
+ *
+ * PARAMETERS:  none
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Installs SCI handler.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_install_sci_handler(void)
+{
+	u32 status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ev_install_sci_handler);
+
+	status =
+	    acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
+					      acpi_ev_sci_xrupt_handler,
+					      acpi_gbl_gpe_xrupt_list_head);
+	return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_remove_sci_handler
+ *
+ * PARAMETERS:  none
+ *
+ * RETURN:      E_OK if handler uninstalled OK, E_ERROR if handler was not
+ *              installed to begin with
+ *
+ * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
+ *              taken.
+ *
+ * Note:  It doesn't seem important to disable all events or set the event
+ *        enable registers to their original values. The OS should disable
+ *        the SCI interrupt level when the handler is removed, so no more
+ *        events will come in.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_remove_sci_handler(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
+
+	/* Just let the OS remove the handler and disable the level */
+
+	status =
+	    acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
+					     acpi_ev_sci_xrupt_handler);
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
new file mode 100644
index 0000000..3aca901
--- /dev/null
+++ b/drivers/acpi/acpica/evxface.c
@@ -0,0 +1,821 @@
+/******************************************************************************
+ *
+ * Module Name: evxface - External interfaces for ACPI events
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acevents.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evxface")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_exception_handler
+ *
+ * PARAMETERS:  Handler         - Pointer to the handler function for the
+ *                                event
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function
+ *
+ ******************************************************************************/
+#ifdef ACPI_FUTURE_USAGE
+acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Don't allow two handlers. */
+
+	if (acpi_gbl_exception_handler) {
+		status = AE_ALREADY_EXISTS;
+		goto cleanup;
+	}
+
+	/* Install the handler */
+
+	acpi_gbl_exception_handler = handler;
+
+      cleanup:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
+#endif				/*  ACPI_FUTURE_USAGE  */
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_fixed_event_handler
+ *
+ * PARAMETERS:  Event           - Event type to enable.
+ *              Handler         - Pointer to the handler function for the
+ *                                event
+ *              Context         - Value passed to the handler on each GPE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function and then enables the
+ *              event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_fixed_event_handler(u32 event,
+				 acpi_event_handler handler, void *context)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
+
+	/* Parameter validation */
+
+	if (event > ACPI_EVENT_MAX) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Don't allow two handlers. */
+
+	if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
+		status = AE_ALREADY_EXISTS;
+		goto cleanup;
+	}
+
+	/* Install the handler before enabling the event */
+
+	acpi_gbl_fixed_event_handlers[event].handler = handler;
+	acpi_gbl_fixed_event_handlers[event].context = context;
+
+	status = acpi_clear_event(event);
+	if (ACPI_SUCCESS(status))
+		status = acpi_enable_event(event, 0);
+	if (ACPI_FAILURE(status)) {
+		ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
+			      event));
+
+		/* Remove the handler */
+
+		acpi_gbl_fixed_event_handlers[event].handler = NULL;
+		acpi_gbl_fixed_event_handlers[event].context = NULL;
+	} else {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "Enabled fixed event %X, Handler=%p\n", event,
+				  handler));
+	}
+
+      cleanup:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_fixed_event_handler
+ *
+ * PARAMETERS:  Event           - Event type to disable.
+ *              Handler         - Address of the handler
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Disables the event and unregisters the event handler.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
+
+	/* Parameter validation */
+
+	if (event > ACPI_EVENT_MAX) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Disable the event before removing the handler */
+
+	status = acpi_disable_event(event, 0);
+
+	/* Always Remove the handler */
+
+	acpi_gbl_fixed_event_handlers[event].handler = NULL;
+	acpi_gbl_fixed_event_handlers[event].context = NULL;
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_WARNING((AE_INFO,
+			      "Could not write to fixed event enable register %X",
+			      event));
+	} else {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
+				  event));
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_notify_handler
+ *
+ * PARAMETERS:  Device          - The device for which notifies will be handled
+ *              handler_type    - The type of handler:
+ *                                  ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
+ *                                  ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
+ *                                  ACPI_ALL_NOTIFY:  both system and device
+ *              Handler         - Address of the handler
+ *              Context         - Value passed to the handler on each GPE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a handler for notifies on an ACPI device
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_notify_handler(acpi_handle device,
+			    u32 handler_type,
+			    acpi_notify_handler handler, void *context)
+{
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *notify_obj;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_install_notify_handler);
+
+	/* Parameter validation */
+
+	if ((!device) ||
+	    (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Convert and validate the device handle */
+
+	node = acpi_ns_map_handle_to_node(device);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * Root Object:
+	 * Registering a notify handler on the root object indicates that the
+	 * caller wishes to receive notifications for all objects. Note that
+	 * only one <external> global handler can be regsitered (per notify type).
+	 */
+	if (device == ACPI_ROOT_OBJECT) {
+
+		/* Make sure the handler is not already installed */
+
+		if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
+		     acpi_gbl_system_notify.handler) ||
+		    ((handler_type & ACPI_DEVICE_NOTIFY) &&
+		     acpi_gbl_device_notify.handler)) {
+			status = AE_ALREADY_EXISTS;
+			goto unlock_and_exit;
+		}
+
+		if (handler_type & ACPI_SYSTEM_NOTIFY) {
+			acpi_gbl_system_notify.node = node;
+			acpi_gbl_system_notify.handler = handler;
+			acpi_gbl_system_notify.context = context;
+		}
+
+		if (handler_type & ACPI_DEVICE_NOTIFY) {
+			acpi_gbl_device_notify.node = node;
+			acpi_gbl_device_notify.handler = handler;
+			acpi_gbl_device_notify.context = context;
+		}
+
+		/* Global notify handler installed */
+	}
+
+	/*
+	 * All Other Objects:
+	 * Caller will only receive notifications specific to the target object.
+	 * Note that only certain object types can receive notifications.
+	 */
+	else {
+		/* Notifies allowed on this object? */
+
+		if (!acpi_ev_is_notify_object(node)) {
+			status = AE_TYPE;
+			goto unlock_and_exit;
+		}
+
+		/* Check for an existing internal object */
+
+		obj_desc = acpi_ns_get_attached_object(node);
+		if (obj_desc) {
+
+			/* Object exists - make sure there's no handler */
+
+			if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
+			     obj_desc->common_notify.system_notify) ||
+			    ((handler_type & ACPI_DEVICE_NOTIFY) &&
+			     obj_desc->common_notify.device_notify)) {
+				status = AE_ALREADY_EXISTS;
+				goto unlock_and_exit;
+			}
+		} else {
+			/* Create a new object */
+
+			obj_desc = acpi_ut_create_internal_object(node->type);
+			if (!obj_desc) {
+				status = AE_NO_MEMORY;
+				goto unlock_and_exit;
+			}
+
+			/* Attach new object to the Node */
+
+			status =
+			    acpi_ns_attach_object(device, obj_desc, node->type);
+
+			/* Remove local reference to the object */
+
+			acpi_ut_remove_reference(obj_desc);
+			if (ACPI_FAILURE(status)) {
+				goto unlock_and_exit;
+			}
+		}
+
+		/* Install the handler */
+
+		notify_obj =
+		    acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY);
+		if (!notify_obj) {
+			status = AE_NO_MEMORY;
+			goto unlock_and_exit;
+		}
+
+		notify_obj->notify.node = node;
+		notify_obj->notify.handler = handler;
+		notify_obj->notify.context = context;
+
+		if (handler_type & ACPI_SYSTEM_NOTIFY) {
+			obj_desc->common_notify.system_notify = notify_obj;
+		}
+
+		if (handler_type & ACPI_DEVICE_NOTIFY) {
+			obj_desc->common_notify.device_notify = notify_obj;
+		}
+
+		if (handler_type == ACPI_ALL_NOTIFY) {
+
+			/* Extra ref if installed in both */
+
+			acpi_ut_add_reference(notify_obj);
+		}
+	}
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_notify_handler
+ *
+ * PARAMETERS:  Device          - The device for which notifies will be handled
+ *              handler_type    - The type of handler:
+ *                                  ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
+ *                                  ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
+ *                                  ACPI_ALL_NOTIFY:  both system and device
+ *              Handler         - Address of the handler
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a handler for notifies on an ACPI device
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_notify_handler(acpi_handle device,
+			   u32 handler_type, acpi_notify_handler handler)
+{
+	union acpi_operand_object *notify_obj;
+	union acpi_operand_object *obj_desc;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_remove_notify_handler);
+
+	/* Parameter validation */
+
+	if ((!device) ||
+	    (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
+		status = AE_BAD_PARAMETER;
+		goto exit;
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		goto exit;
+	}
+
+	/* Convert and validate the device handle */
+
+	node = acpi_ns_map_handle_to_node(device);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Root Object */
+
+	if (device == ACPI_ROOT_OBJECT) {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "Removing notify handler for namespace root object\n"));
+
+		if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
+		     !acpi_gbl_system_notify.handler) ||
+		    ((handler_type & ACPI_DEVICE_NOTIFY) &&
+		     !acpi_gbl_device_notify.handler)) {
+			status = AE_NOT_EXIST;
+			goto unlock_and_exit;
+		}
+
+		/* Make sure all deferred tasks are completed */
+
+		(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+		acpi_os_wait_events_complete(NULL);
+		status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+		if (ACPI_FAILURE(status)) {
+			goto exit;
+		}
+
+		if (handler_type & ACPI_SYSTEM_NOTIFY) {
+			acpi_gbl_system_notify.node = NULL;
+			acpi_gbl_system_notify.handler = NULL;
+			acpi_gbl_system_notify.context = NULL;
+		}
+
+		if (handler_type & ACPI_DEVICE_NOTIFY) {
+			acpi_gbl_device_notify.node = NULL;
+			acpi_gbl_device_notify.handler = NULL;
+			acpi_gbl_device_notify.context = NULL;
+		}
+	}
+
+	/* All Other Objects */
+
+	else {
+		/* Notifies allowed on this object? */
+
+		if (!acpi_ev_is_notify_object(node)) {
+			status = AE_TYPE;
+			goto unlock_and_exit;
+		}
+
+		/* Check for an existing internal object */
+
+		obj_desc = acpi_ns_get_attached_object(node);
+		if (!obj_desc) {
+			status = AE_NOT_EXIST;
+			goto unlock_and_exit;
+		}
+
+		/* Object exists - make sure there's an existing handler */
+
+		if (handler_type & ACPI_SYSTEM_NOTIFY) {
+			notify_obj = obj_desc->common_notify.system_notify;
+			if (!notify_obj) {
+				status = AE_NOT_EXIST;
+				goto unlock_and_exit;
+			}
+
+			if (notify_obj->notify.handler != handler) {
+				status = AE_BAD_PARAMETER;
+				goto unlock_and_exit;
+			}
+			/* Make sure all deferred tasks are completed */
+
+			(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+			acpi_os_wait_events_complete(NULL);
+			status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+			if (ACPI_FAILURE(status)) {
+				goto exit;
+			}
+
+			/* Remove the handler */
+			obj_desc->common_notify.system_notify = NULL;
+			acpi_ut_remove_reference(notify_obj);
+		}
+
+		if (handler_type & ACPI_DEVICE_NOTIFY) {
+			notify_obj = obj_desc->common_notify.device_notify;
+			if (!notify_obj) {
+				status = AE_NOT_EXIST;
+				goto unlock_and_exit;
+			}
+
+			if (notify_obj->notify.handler != handler) {
+				status = AE_BAD_PARAMETER;
+				goto unlock_and_exit;
+			}
+			/* Make sure all deferred tasks are completed */
+
+			(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+			acpi_os_wait_events_complete(NULL);
+			status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+			if (ACPI_FAILURE(status)) {
+				goto exit;
+			}
+
+			/* Remove the handler */
+			obj_desc->common_notify.device_notify = NULL;
+			acpi_ut_remove_reference(notify_obj);
+		}
+	}
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+      exit:
+	if (ACPI_FAILURE(status))
+		ACPI_EXCEPTION((AE_INFO, status, "Removing notify handler"));
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_gpe_handler
+ *
+ * PARAMETERS:  gpe_device      - Namespace node for the GPE (NULL for FADT
+ *                                defined GPEs)
+ *              gpe_number      - The GPE number within the GPE block
+ *              Type            - Whether this GPE should be treated as an
+ *                                edge- or level-triggered interrupt.
+ *              Address         - Address of the handler
+ *              Context         - Value passed to the handler on each GPE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a handler for a General Purpose Event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_handler(acpi_handle gpe_device,
+			 u32 gpe_number,
+			 u32 type, acpi_event_handler address, void *context)
+{
+	struct acpi_gpe_event_info *gpe_event_info;
+	struct acpi_handler_info *handler;
+	acpi_status status;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
+
+	/* Parameter validation */
+
+	if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) {
+		status = AE_BAD_PARAMETER;
+		goto exit;
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		goto exit;
+	}
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Make sure that there isn't a handler there already */
+
+	if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+	    ACPI_GPE_DISPATCH_HANDLER) {
+		status = AE_ALREADY_EXISTS;
+		goto unlock_and_exit;
+	}
+
+	/* Allocate and init handler object */
+
+	handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
+	if (!handler) {
+		status = AE_NO_MEMORY;
+		goto unlock_and_exit;
+	}
+
+	handler->address = address;
+	handler->context = context;
+	handler->method_node = gpe_event_info->dispatch.method_node;
+
+	/* Disable the GPE before installing the handler */
+
+	status = acpi_ev_disable_gpe(gpe_event_info);
+	if (ACPI_FAILURE(status)) {
+		goto unlock_and_exit;
+	}
+
+	/* Install the handler */
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+	gpe_event_info->dispatch.handler = handler;
+
+	/* Setup up dispatch flags to indicate handler (vs. method) */
+
+	gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);	/* Clear bits */
+	gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER);
+
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+      exit:
+	if (ACPI_FAILURE(status))
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Installing notify handler failed"));
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_gpe_handler
+ *
+ * PARAMETERS:  gpe_device      - Namespace node for the GPE (NULL for FADT
+ *                                defined GPEs)
+ *              gpe_number      - The event to remove a handler
+ *              Address         - Address of the handler
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a handler for a General Purpose acpi_event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_gpe_handler(acpi_handle gpe_device,
+			u32 gpe_number, acpi_event_handler address)
+{
+	struct acpi_gpe_event_info *gpe_event_info;
+	struct acpi_handler_info *handler;
+	acpi_status status;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler);
+
+	/* Parameter validation */
+
+	if (!address) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Make sure that a handler is indeed installed */
+
+	if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
+	    ACPI_GPE_DISPATCH_HANDLER) {
+		status = AE_NOT_EXIST;
+		goto unlock_and_exit;
+	}
+
+	/* Make sure that the installed handler is the same */
+
+	if (gpe_event_info->dispatch.handler->address != address) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Disable the GPE before removing the handler */
+
+	status = acpi_ev_disable_gpe(gpe_event_info);
+	if (ACPI_FAILURE(status)) {
+		goto unlock_and_exit;
+	}
+
+	/* Make sure all deferred tasks are completed */
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	acpi_os_wait_events_complete(NULL);
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Remove the handler */
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+	handler = gpe_event_info->dispatch.handler;
+
+	/* Restore Method node (if any), set dispatch flags */
+
+	gpe_event_info->dispatch.method_node = handler->method_node;
+	gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK;	/* Clear bits */
+	if (handler->method_node) {
+		gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD;
+	}
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+	/* Now we can free the handler object */
+
+	ACPI_FREE(handler);
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_acquire_global_lock
+ *
+ * PARAMETERS:  Timeout         - How long the caller is willing to wait
+ *              Handle          - Where the handle to the lock is returned
+ *                                (if acquired)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Acquire the ACPI Global Lock
+ *
+ * Note: Allows callers with the same thread ID to acquire the global lock
+ * multiple times. In other words, externally, the behavior of the global lock
+ * is identical to an AML mutex. On the first acquire, a new handle is
+ * returned. On any subsequent calls to acquire by the same thread, the same
+ * handle is returned.
+ *
+ ******************************************************************************/
+acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
+{
+	acpi_status status;
+
+	if (!handle) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/* Must lock interpreter to prevent race conditions */
+
+	acpi_ex_enter_interpreter();
+
+	status = acpi_ex_acquire_mutex_object(timeout,
+					      acpi_gbl_global_lock_mutex,
+					      acpi_os_get_thread_id());
+
+	if (ACPI_SUCCESS(status)) {
+
+		/* Return the global lock handle (updated in acpi_ev_acquire_global_lock) */
+
+		*handle = acpi_gbl_global_lock_handle;
+	}
+
+	acpi_ex_exit_interpreter();
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_release_global_lock
+ *
+ * PARAMETERS:  Handle      - Returned from acpi_acquire_global_lock
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Release the ACPI Global Lock. The handle must be valid.
+ *
+ ******************************************************************************/
+acpi_status acpi_release_global_lock(u32 handle)
+{
+	acpi_status status;
+
+	if (!handle || (handle != acpi_gbl_global_lock_handle)) {
+		return (AE_NOT_ACQUIRED);
+	}
+
+	status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_release_global_lock)
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
new file mode 100644
index 0000000..35485e4
--- /dev/null
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -0,0 +1,871 @@
+/******************************************************************************
+ *
+ * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evxfevnt")
+
+/* Local prototypes */
+acpi_status
+acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+		       struct acpi_gpe_block_info *gpe_block, void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enable
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Transfers the system into ACPI mode.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enable(void)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(acpi_enable);
+
+	/* ACPI tables must be present */
+
+	if (!acpi_tb_tables_loaded()) {
+		return_ACPI_STATUS(AE_NO_ACPI_TABLES);
+	}
+
+	/* Check current mode */
+
+	if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
+		ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+				  "System is already in ACPI mode\n"));
+	} else {
+		/* Transition to ACPI mode */
+
+		status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
+		if (ACPI_FAILURE(status)) {
+			ACPI_ERROR((AE_INFO,
+				    "Could not transition to ACPI mode"));
+			return_ACPI_STATUS(status);
+		}
+
+		ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+				  "Transition to ACPI mode successful\n"));
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_disable
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Transfers the system into LEGACY (non-ACPI) mode.
+ *
+ ******************************************************************************/
+acpi_status acpi_disable(void)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(acpi_disable);
+
+	if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
+		ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+				  "System is already in legacy (non-ACPI) mode\n"));
+	} else {
+		/* Transition to LEGACY mode */
+
+		status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY);
+
+		if (ACPI_FAILURE(status)) {
+			ACPI_ERROR((AE_INFO,
+				    "Could not exit ACPI mode to legacy mode"));
+			return_ACPI_STATUS(status);
+		}
+
+		ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI mode disabled\n"));
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enable_event
+ *
+ * PARAMETERS:  Event           - The fixed eventto be enabled
+ *              Flags           - Reserved
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable an ACPI event (fixed)
+ *
+ ******************************************************************************/
+acpi_status acpi_enable_event(u32 event, u32 flags)
+{
+	acpi_status status = AE_OK;
+	u32 value;
+
+	ACPI_FUNCTION_TRACE(acpi_enable_event);
+
+	/* Decode the Fixed Event */
+
+	if (event > ACPI_EVENT_MAX) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Enable the requested fixed event (by writing a one to the enable
+	 * register bit)
+	 */
+	status =
+	    acpi_set_register(acpi_gbl_fixed_event_info[event].
+			      enable_register_id, 1);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Make sure that the hardware responded */
+
+	status =
+	    acpi_get_register(acpi_gbl_fixed_event_info[event].
+			      enable_register_id, &value);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	if (value != 1) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not enable %s event",
+			    acpi_ut_get_event_name(event)));
+		return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_event)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_gpe_type
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device
+ *              gpe_number      - GPE level within the GPE block
+ *              Type            - New GPE type
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Set the type of an individual GPE
+ *
+ ******************************************************************************/
+acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
+{
+	acpi_status status = AE_OK;
+	struct acpi_gpe_event_info *gpe_event_info;
+
+	ACPI_FUNCTION_TRACE(acpi_set_gpe_type);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Set the new type (will disable GPE if currently enabled) */
+
+	status = acpi_ev_set_gpe_type(gpe_event_info, type);
+
+      unlock_and_exit:
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_gpe_type)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enable_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device
+ *              gpe_number      - GPE level within the GPE block
+ *              Flags           - Just enable, or also wake enable?
+ *                                Called from ISR or not
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+	acpi_status status = AE_OK;
+	acpi_cpu_flags flags;
+	struct acpi_gpe_event_info *gpe_event_info;
+
+	ACPI_FUNCTION_TRACE(acpi_enable_gpe);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Perform the enable */
+
+	status = acpi_ev_enable_gpe(gpe_event_info, TRUE);
+
+      unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_disable_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device
+ *              gpe_number      - GPE level within the GPE block
+ *              Flags           - Just disable, or also wake disable?
+ *                                Called from ISR or not
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Disable an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+	acpi_status status = AE_OK;
+	acpi_cpu_flags flags;
+	struct acpi_gpe_event_info *gpe_event_info;
+
+	ACPI_FUNCTION_TRACE(acpi_disable_gpe);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	status = acpi_ev_disable_gpe(gpe_event_info);
+
+unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_disable_event
+ *
+ * PARAMETERS:  Event           - The fixed eventto be enabled
+ *              Flags           - Reserved
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Disable an ACPI event (fixed)
+ *
+ ******************************************************************************/
+acpi_status acpi_disable_event(u32 event, u32 flags)
+{
+	acpi_status status = AE_OK;
+	u32 value;
+
+	ACPI_FUNCTION_TRACE(acpi_disable_event);
+
+	/* Decode the Fixed Event */
+
+	if (event > ACPI_EVENT_MAX) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Disable the requested fixed event (by writing a zero to the enable
+	 * register bit)
+	 */
+	status =
+	    acpi_set_register(acpi_gbl_fixed_event_info[event].
+			      enable_register_id, 0);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status =
+	    acpi_get_register(acpi_gbl_fixed_event_info[event].
+			      enable_register_id, &value);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	if (value != 0) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not disable %s events",
+			    acpi_ut_get_event_name(event)));
+		return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable_event)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_clear_event
+ *
+ * PARAMETERS:  Event           - The fixed event to be cleared
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clear an ACPI event (fixed)
+ *
+ ******************************************************************************/
+acpi_status acpi_clear_event(u32 event)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(acpi_clear_event);
+
+	/* Decode the Fixed Event */
+
+	if (event > ACPI_EVENT_MAX) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Clear the requested fixed event (By writing a one to the status
+	 * register bit)
+	 */
+	status =
+	    acpi_set_register(acpi_gbl_fixed_event_info[event].
+			      status_register_id, 1);
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_clear_event)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_clear_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device
+ *              gpe_number      - GPE level within the GPE block
+ *              Flags           - Called from an ISR or not
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clear an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
+{
+	acpi_status status = AE_OK;
+	struct acpi_gpe_event_info *gpe_event_info;
+
+	ACPI_FUNCTION_TRACE(acpi_clear_gpe);
+
+	/* Use semaphore lock if not executing at interrupt level */
+
+	if (flags & ACPI_NOT_ISR) {
+		status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	status = acpi_hw_clear_gpe(gpe_event_info);
+
+      unlock_and_exit:
+	if (flags & ACPI_NOT_ISR) {
+		(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	}
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_event_status
+ *
+ * PARAMETERS:  Event           - The fixed event
+ *              event_status    - Where the current status of the event will
+ *                                be returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Obtains and returns the current status of the event
+ *
+ ******************************************************************************/
+acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
+{
+	acpi_status status = AE_OK;
+	u32 value;
+
+	ACPI_FUNCTION_TRACE(acpi_get_event_status);
+
+	if (!event_status) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Decode the Fixed Event */
+
+	if (event > ACPI_EVENT_MAX) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Get the status of the requested fixed event */
+
+	status =
+	    acpi_get_register(acpi_gbl_fixed_event_info[event].
+			      enable_register_id, &value);
+	if (ACPI_FAILURE(status))
+		return_ACPI_STATUS(status);
+
+	*event_status = value;
+
+	status =
+	    acpi_get_register(acpi_gbl_fixed_event_info[event].
+			      status_register_id, &value);
+	if (ACPI_FAILURE(status))
+		return_ACPI_STATUS(status);
+
+	if (value)
+		*event_status |= ACPI_EVENT_FLAG_SET;
+
+	if (acpi_gbl_fixed_event_handlers[event].handler)
+		*event_status |= ACPI_EVENT_FLAG_HANDLE;
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_event_status)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_gpe_status
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device
+ *              gpe_number      - GPE level within the GPE block
+ *              Flags           - Called from an ISR or not
+ *              event_status    - Where the current status of the event will
+ *                                be returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get status of an event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_status(acpi_handle gpe_device,
+		    u32 gpe_number, u32 flags, acpi_event_status * event_status)
+{
+	acpi_status status = AE_OK;
+	struct acpi_gpe_event_info *gpe_event_info;
+
+	ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
+
+	/* Use semaphore lock if not executing at interrupt level */
+
+	if (flags & ACPI_NOT_ISR) {
+		status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Obtain status on the requested GPE number */
+
+	status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
+
+	if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
+		*event_status |= ACPI_EVENT_FLAG_HANDLE;
+
+      unlock_and_exit:
+	if (flags & ACPI_NOT_ISR) {
+		(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	}
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_gpe_block
+ *
+ * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
+ *              gpe_block_address   - Address and space_iD
+ *              register_count      - Number of GPE register pairs in the block
+ *              interrupt_number    - H/W interrupt for the block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create and Install a block of GPE registers
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_block(acpi_handle gpe_device,
+		       struct acpi_generic_address *gpe_block_address,
+		       u32 register_count, u32 interrupt_number)
+{
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+	struct acpi_namespace_node *node;
+	struct acpi_gpe_block_info *gpe_block;
+
+	ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
+
+	if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	node = acpi_ns_map_handle_to_node(gpe_device);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * For user-installed GPE Block Devices, the gpe_block_base_number
+	 * is always zero
+	 */
+	status =
+	    acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
+				     interrupt_number, &gpe_block);
+	if (ACPI_FAILURE(status)) {
+		goto unlock_and_exit;
+	}
+
+	/* Run the _PRW methods and enable the GPEs */
+
+	status = acpi_ev_initialize_gpe_block(node, gpe_block);
+	if (ACPI_FAILURE(status)) {
+		goto unlock_and_exit;
+	}
+
+	/* Get the device_object attached to the node */
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc) {
+
+		/* No object, create a new one */
+
+		obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
+		if (!obj_desc) {
+			status = AE_NO_MEMORY;
+			goto unlock_and_exit;
+		}
+
+		status =
+		    acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
+
+		/* Remove local reference to the object */
+
+		acpi_ut_remove_reference(obj_desc);
+
+		if (ACPI_FAILURE(status)) {
+			goto unlock_and_exit;
+		}
+	}
+
+	/* Install the GPE block in the device_object */
+
+	obj_desc->device.gpe_block = gpe_block;
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_gpe_block
+ *
+ * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a previously installed block of GPE registers
+ *
+ ******************************************************************************/
+acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
+
+	if (!gpe_device) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	node = acpi_ns_map_handle_to_node(gpe_device);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Get the device_object attached to the node */
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc || !obj_desc->device.gpe_block) {
+		return_ACPI_STATUS(AE_NULL_OBJECT);
+	}
+
+	/* Delete the GPE block (but not the device_object) */
+
+	status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
+	if (ACPI_SUCCESS(status)) {
+		obj_desc->device.gpe_block = NULL;
+	}
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_gpe_device
+ *
+ * PARAMETERS:  Index               - System GPE index (0-current_gpe_count)
+ *              gpe_device          - Where the parent GPE Device is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
+ *              gpe device indicates that the gpe number is contained in one of
+ *              the FADT-defined gpe blocks. Otherwise, the GPE block device.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
+{
+	struct acpi_gpe_device_info info;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
+
+	if (!gpe_device) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (index >= acpi_current_gpe_count) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Setup and walk the GPE list */
+
+	info.index = index;
+	info.status = AE_NOT_EXIST;
+	info.gpe_device = NULL;
+	info.next_block_base_index = 0;
+
+	status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	*gpe_device = info.gpe_device;
+	return_ACPI_STATUS(info.status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_get_gpe_device
+ *
+ * PARAMETERS:  GPE_WALK_CALLBACK
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
+ *              block device. NULL if the GPE is one of the FADT-defined GPEs.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+		       struct acpi_gpe_block_info *gpe_block, void *context)
+{
+	struct acpi_gpe_device_info *info = context;
+
+	/* Increment Index by the number of GPEs in this block */
+
+	info->next_block_base_index +=
+	    (gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH);
+
+	if (info->index < info->next_block_base_index) {
+		/*
+		 * The GPE index is within this block, get the node. Leave the node
+		 * NULL for the FADT-defined GPEs
+		 */
+		if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
+			info->gpe_device = gpe_block->node;
+		}
+
+		info->status = AE_OK;
+		return (AE_CTRL_END);
+	}
+
+	return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_disable_all_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_disable_all_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_hw_disable_all_gpes();
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+	return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_enable_all_runtime_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enable_all_runtime_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_hw_enable_all_runtime_gpes();
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
new file mode 100644
index 0000000..479e7a3
--- /dev/null
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -0,0 +1,254 @@
+/******************************************************************************
+ *
+ * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
+ *                         Address Spaces.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acevents.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evxfregn")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_address_space_handler
+ *
+ * PARAMETERS:  Device          - Handle for the device
+ *              space_id        - The address space ID
+ *              Handler         - Address of the handler
+ *              Setup           - Address of the setup function
+ *              Context         - Value passed to the handler on each access
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a handler for all op_regions of a given space_id.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_address_space_handler(acpi_handle device,
+				   acpi_adr_space_type space_id,
+				   acpi_adr_space_handler handler,
+				   acpi_adr_space_setup setup, void *context)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_install_address_space_handler);
+
+	/* Parameter validation */
+
+	if (!device) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Convert and validate the device handle */
+
+	node = acpi_ns_map_handle_to_node(device);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Install the handler for all Regions for this Space ID */
+
+	status =
+	    acpi_ev_install_space_handler(node, space_id, handler, setup,
+					  context);
+	if (ACPI_FAILURE(status)) {
+		goto unlock_and_exit;
+	}
+
+	/* Run all _REG methods for this address space */
+
+	status = acpi_ev_execute_reg_methods(node, space_id);
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_address_space_handler
+ *
+ * PARAMETERS:  Device          - Handle for the device
+ *              space_id        - The address space ID
+ *              Handler         - Address of the handler
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a previously installed handler.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_address_space_handler(acpi_handle device,
+				  acpi_adr_space_type space_id,
+				  acpi_adr_space_handler handler)
+{
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *handler_obj;
+	union acpi_operand_object *region_obj;
+	union acpi_operand_object **last_obj_ptr;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_remove_address_space_handler);
+
+	/* Parameter validation */
+
+	if (!device) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Convert and validate the device handle */
+
+	node = acpi_ns_map_handle_to_node(device);
+	if (!node ||
+	    ((node->type != ACPI_TYPE_DEVICE) &&
+	     (node->type != ACPI_TYPE_PROCESSOR) &&
+	     (node->type != ACPI_TYPE_THERMAL) &&
+	     (node != acpi_gbl_root_node))) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Make sure the internal object exists */
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc) {
+		status = AE_NOT_EXIST;
+		goto unlock_and_exit;
+	}
+
+	/* Find the address handler the user requested */
+
+	handler_obj = obj_desc->device.handler;
+	last_obj_ptr = &obj_desc->device.handler;
+	while (handler_obj) {
+
+		/* We have a handler, see if user requested this one */
+
+		if (handler_obj->address_space.space_id == space_id) {
+
+			/* Handler must be the same as the installed handler */
+
+			if (handler_obj->address_space.handler != handler) {
+				status = AE_BAD_PARAMETER;
+				goto unlock_and_exit;
+			}
+
+			/* Matched space_id, first dereference this in the Regions */
+
+			ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+					  "Removing address handler %p(%p) for region %s on Device %p(%p)\n",
+					  handler_obj, handler,
+					  acpi_ut_get_region_name(space_id),
+					  node, obj_desc));
+
+			region_obj = handler_obj->address_space.region_list;
+
+			/* Walk the handler's region list */
+
+			while (region_obj) {
+				/*
+				 * First disassociate the handler from the region.
+				 *
+				 * NOTE: this doesn't mean that the region goes away
+				 * The region is just inaccessible as indicated to
+				 * the _REG method
+				 */
+				acpi_ev_detach_region(region_obj, TRUE);
+
+				/*
+				 * Walk the list: Just grab the head because the
+				 * detach_region removed the previous head.
+				 */
+				region_obj =
+				    handler_obj->address_space.region_list;
+
+			}
+
+			/* Remove this Handler object from the list */
+
+			*last_obj_ptr = handler_obj->address_space.next;
+
+			/* Now we can delete the handler object */
+
+			acpi_ut_remove_reference(handler_obj);
+			goto unlock_and_exit;
+		}
+
+		/* Walk the linked list of handlers */
+
+		last_obj_ptr = &handler_obj->address_space.next;
+		handler_obj = handler_obj->address_space.next;
+	}
+
+	/* The handler does not exist */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+			  "Unable to remove address handler %p for %s(%X), DevNode %p, obj %p\n",
+			  handler, acpi_ut_get_region_name(space_id), space_id,
+			  node, obj_desc));
+
+	status = AE_NOT_EXIST;
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
new file mode 100644
index 0000000..932bbc2
--- /dev/null
+++ b/drivers/acpi/acpica/exconfig.c
@@ -0,0 +1,536 @@
+/******************************************************************************
+ *
+ * Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+#include "actables.h"
+#include "acdispat.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exconfig")
+
+/* Local prototypes */
+static acpi_status
+acpi_ex_add_table(u32 table_index,
+		  struct acpi_namespace_node *parent_node,
+		  union acpi_operand_object **ddb_handle);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_add_table
+ *
+ * PARAMETERS:  Table               - Pointer to raw table
+ *              parent_node         - Where to load the table (scope)
+ *              ddb_handle          - Where to return the table handle.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Common function to Install and Load an ACPI table with a
+ *              returned table handle.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_add_table(u32 table_index,
+		  struct acpi_namespace_node *parent_node,
+		  union acpi_operand_object **ddb_handle)
+{
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+
+	ACPI_FUNCTION_TRACE(ex_add_table);
+
+	/* Create an object to be the table handle */
+
+	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Init the table handle */
+
+	obj_desc->reference.class = ACPI_REFCLASS_TABLE;
+	*ddb_handle = obj_desc;
+
+	/* Install the new table into the local data structures */
+
+	obj_desc->reference.value = table_index;
+
+	/* Add the table to the namespace */
+
+	status = acpi_ns_load_table(table_index, parent_node);
+	if (ACPI_FAILURE(status)) {
+		acpi_ut_remove_reference(obj_desc);
+		*ddb_handle = NULL;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_load_table_op
+ *
+ * PARAMETERS:  walk_state          - Current state with operands
+ *              return_desc         - Where to store the return object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Load an ACPI table from the RSDT/XSDT
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
+		      union acpi_operand_object **return_desc)
+{
+	acpi_status status;
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	struct acpi_namespace_node *parent_node;
+	struct acpi_namespace_node *start_node;
+	struct acpi_namespace_node *parameter_node = NULL;
+	union acpi_operand_object *ddb_handle;
+	struct acpi_table_header *table;
+	u32 table_index;
+
+	ACPI_FUNCTION_TRACE(ex_load_table_op);
+
+	/* Validate lengths for the signature_string, OEMIDString, OEMtable_iD */
+
+	if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
+	    (operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
+	    (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Find the ACPI table in the RSDT/XSDT */
+
+	status = acpi_tb_find_table(operand[0]->string.pointer,
+				    operand[1]->string.pointer,
+				    operand[2]->string.pointer, &table_index);
+	if (ACPI_FAILURE(status)) {
+		if (status != AE_NOT_FOUND) {
+			return_ACPI_STATUS(status);
+		}
+
+		/* Table not found, return an Integer=0 and AE_OK */
+
+		ddb_handle = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!ddb_handle) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		ddb_handle->integer.value = 0;
+		*return_desc = ddb_handle;
+
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Default nodes */
+
+	start_node = walk_state->scope_info->scope.node;
+	parent_node = acpi_gbl_root_node;
+
+	/* root_path (optional parameter) */
+
+	if (operand[3]->string.length > 0) {
+		/*
+		 * Find the node referenced by the root_path_string. This is the
+		 * location within the namespace where the table will be loaded.
+		 */
+		status =
+		    acpi_ns_get_node(start_node, operand[3]->string.pointer,
+				     ACPI_NS_SEARCH_PARENT, &parent_node);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/* parameter_path (optional parameter) */
+
+	if (operand[4]->string.length > 0) {
+		if ((operand[4]->string.pointer[0] != '\\') &&
+		    (operand[4]->string.pointer[0] != '^')) {
+			/*
+			 * Path is not absolute, so it will be relative to the node
+			 * referenced by the root_path_string (or the NS root if omitted)
+			 */
+			start_node = parent_node;
+		}
+
+		/* Find the node referenced by the parameter_path_string */
+
+		status =
+		    acpi_ns_get_node(start_node, operand[4]->string.pointer,
+				     ACPI_NS_SEARCH_PARENT, &parameter_node);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/* Load the table into the namespace */
+
+	status = acpi_ex_add_table(table_index, parent_node, &ddb_handle);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Parameter Data (optional) */
+
+	if (parameter_node) {
+
+		/* Store the parameter data into the optional parameter object */
+
+		status = acpi_ex_store(operand[5],
+				       ACPI_CAST_PTR(union acpi_operand_object,
+						     parameter_node),
+				       walk_state);
+		if (ACPI_FAILURE(status)) {
+			(void)acpi_ex_unload_table(ddb_handle);
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	status = acpi_get_table_by_index(table_index, &table);
+	if (ACPI_SUCCESS(status)) {
+		ACPI_INFO((AE_INFO,
+			   "Dynamic OEM Table Load - [%.4s] OemId [%.6s] OemTableId [%.8s]",
+			   table->signature, table->oem_id,
+			   table->oem_table_id));
+	}
+
+	/* Invoke table handler if present */
+
+	if (acpi_gbl_table_handler) {
+		(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
+					     acpi_gbl_table_handler_context);
+	}
+
+	*return_desc = ddb_handle;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_load_op
+ *
+ * PARAMETERS:  obj_desc        - Region or Buffer/Field where the table will be
+ *                                obtained
+ *              Target          - Where a handle to the table will be stored
+ *              walk_state      - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Load an ACPI table from a field or operation region
+ *
+ * NOTE: Region Fields (Field, bank_field, index_fields) are resolved to buffer
+ *       objects before this code is reached.
+ *
+ *       If source is an operation region, it must refer to system_memory, as
+ *       per the ACPI specification.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_load_op(union acpi_operand_object *obj_desc,
+		union acpi_operand_object *target,
+		struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object *ddb_handle;
+	struct acpi_table_header *table;
+	struct acpi_table_desc table_desc;
+	u32 table_index;
+	acpi_status status;
+	u32 length;
+
+	ACPI_FUNCTION_TRACE(ex_load_op);
+
+	ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
+
+	/* Source Object can be either an op_region or a Buffer/Field */
+
+	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+	case ACPI_TYPE_REGION:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Load table from Region %p\n", obj_desc));
+
+		/* Region must be system_memory (from ACPI spec) */
+
+		if (obj_desc->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+
+		/*
+		 * If the Region Address and Length have not been previously evaluated,
+		 * evaluate them now and save the results.
+		 */
+		if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
+			status = acpi_ds_get_region_arguments(obj_desc);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		}
+
+		/*
+		 * Map the table header and get the actual table length. The region
+		 * length is not guaranteed to be the same as the table length.
+		 */
+		table = acpi_os_map_memory(obj_desc->region.address,
+					   sizeof(struct acpi_table_header));
+		if (!table) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		length = table->length;
+		acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+
+		/* Must have at least an ACPI table header */
+
+		if (length < sizeof(struct acpi_table_header)) {
+			return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
+		}
+
+		/*
+		 * The memory region is not guaranteed to remain stable and we must
+		 * copy the table to a local buffer. For example, the memory region
+		 * is corrupted after suspend on some machines. Dynamically loaded
+		 * tables are usually small, so this overhead is minimal.
+		 */
+
+		/* Allocate a buffer for the table */
+
+		table_desc.pointer = ACPI_ALLOCATE(length);
+		if (!table_desc.pointer) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		/* Map the entire table and copy it */
+
+		table = acpi_os_map_memory(obj_desc->region.address, length);
+		if (!table) {
+			ACPI_FREE(table_desc.pointer);
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		ACPI_MEMCPY(table_desc.pointer, table, length);
+		acpi_os_unmap_memory(table, length);
+
+		table_desc.address = obj_desc->region.address;
+		break;
+
+	case ACPI_TYPE_BUFFER:	/* Buffer or resolved region_field */
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Load table from Buffer or Field %p\n",
+				  obj_desc));
+
+		/* Must have at least an ACPI table header */
+
+		if (obj_desc->buffer.length < sizeof(struct acpi_table_header)) {
+			return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
+		}
+
+		/* Get the actual table length from the table header */
+
+		table =
+		    ACPI_CAST_PTR(struct acpi_table_header,
+				  obj_desc->buffer.pointer);
+		length = table->length;
+
+		/* Table cannot extend beyond the buffer */
+
+		if (length > obj_desc->buffer.length) {
+			return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
+		}
+		if (length < sizeof(struct acpi_table_header)) {
+			return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
+		}
+
+		/*
+		 * Copy the table from the buffer because the buffer could be modified
+		 * or even deleted in the future
+		 */
+		table_desc.pointer = ACPI_ALLOCATE(length);
+		if (!table_desc.pointer) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		ACPI_MEMCPY(table_desc.pointer, table, length);
+		table_desc.address = ACPI_TO_INTEGER(table_desc.pointer);
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+	}
+
+	/* Validate table checksum (will not get validated in tb_add_table) */
+
+	status = acpi_tb_verify_checksum(table_desc.pointer, length);
+	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(table_desc.pointer);
+		return_ACPI_STATUS(status);
+	}
+
+	/* Complete the table descriptor */
+
+	table_desc.length = length;
+	table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
+
+	/* Install the new table into the local data structures */
+
+	status = acpi_tb_add_table(&table_desc, &table_index);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/*
+	 * Add the table to the namespace.
+	 *
+	 * Note: Load the table objects relative to the root of the namespace.
+	 * This appears to go against the ACPI specification, but we do it for
+	 * compatibility with other ACPI implementations.
+	 */
+	status =
+	    acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle);
+	if (ACPI_FAILURE(status)) {
+
+		/* On error, table_ptr was deallocated above */
+
+		return_ACPI_STATUS(status);
+	}
+
+	/* Store the ddb_handle into the Target operand */
+
+	status = acpi_ex_store(ddb_handle, target, walk_state);
+	if (ACPI_FAILURE(status)) {
+		(void)acpi_ex_unload_table(ddb_handle);
+
+		/* table_ptr was deallocated above */
+
+		acpi_ut_remove_reference(ddb_handle);
+		return_ACPI_STATUS(status);
+	}
+
+	/* Invoke table handler if present */
+
+	if (acpi_gbl_table_handler) {
+		(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD,
+					     table_desc.pointer,
+					     acpi_gbl_table_handler_context);
+	}
+
+      cleanup:
+	if (ACPI_FAILURE(status)) {
+
+		/* Delete allocated table buffer */
+
+		acpi_tb_delete_table(&table_desc);
+	}
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_unload_table
+ *
+ * PARAMETERS:  ddb_handle          - Handle to a previously loaded table
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Unload an ACPI table
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object *table_desc = ddb_handle;
+	u32 table_index;
+	struct acpi_table_header *table;
+
+	ACPI_FUNCTION_TRACE(ex_unload_table);
+
+	/*
+	 * Validate the handle
+	 * Although the handle is partially validated in acpi_ex_reconfiguration(),
+	 * when it calls acpi_ex_resolve_operands(), the handle is more completely
+	 * validated here.
+	 */
+	if ((!ddb_handle) ||
+	    (ACPI_GET_DESCRIPTOR_TYPE(ddb_handle) != ACPI_DESC_TYPE_OPERAND) ||
+	    (ACPI_GET_OBJECT_TYPE(ddb_handle) != ACPI_TYPE_LOCAL_REFERENCE)) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Get the table index from the ddb_handle */
+
+	table_index = table_desc->reference.value;
+
+	/* Invoke table handler if present */
+
+	if (acpi_gbl_table_handler) {
+		status = acpi_get_table_by_index(table_index, &table);
+		if (ACPI_SUCCESS(status)) {
+			(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
+						     table,
+						     acpi_gbl_table_handler_context);
+		}
+	}
+
+	/*
+	 * Delete the entire namespace under this table Node
+	 * (Offset contains the table_id)
+	 */
+	acpi_tb_delete_namespace_by_owner(table_index);
+	(void)acpi_tb_release_owner_id(table_index);
+
+	acpi_tb_set_table_loaded_flag(table_index, FALSE);
+
+	/* Table unloaded, remove a reference to the ddb_handle object */
+
+	acpi_ut_remove_reference(ddb_handle);
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
new file mode 100644
index 0000000..0be1018
--- /dev/null
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -0,0 +1,692 @@
+/******************************************************************************
+ *
+ * Module Name: exconvrt - Object conversion routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exconvrt")
+
+/* Local prototypes */
+static u32
+acpi_ex_convert_to_ascii(acpi_integer integer,
+			 u16 base, u8 * string, u8 max_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_convert_to_integer
+ *
+ * PARAMETERS:  obj_desc        - Object to be converted. Must be an
+ *                                Integer, Buffer, or String
+ *              result_desc     - Where the new Integer object is returned
+ *              Flags           - Used for string conversion
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert an ACPI Object to an integer.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
+			   union acpi_operand_object **result_desc, u32 flags)
+{
+	union acpi_operand_object *return_desc;
+	u8 *pointer;
+	acpi_integer result;
+	u32 i;
+	u32 count;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc);
+
+	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+	case ACPI_TYPE_INTEGER:
+
+		/* No conversion necessary */
+
+		*result_desc = obj_desc;
+		return_ACPI_STATUS(AE_OK);
+
+	case ACPI_TYPE_BUFFER:
+	case ACPI_TYPE_STRING:
+
+		/* Note: Takes advantage of common buffer/string fields */
+
+		pointer = obj_desc->buffer.pointer;
+		count = obj_desc->buffer.length;
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_TYPE);
+	}
+
+	/*
+	 * Convert the buffer/string to an integer. Note that both buffers and
+	 * strings are treated as raw data - we don't convert ascii to hex for
+	 * strings.
+	 *
+	 * There are two terminating conditions for the loop:
+	 * 1) The size of an integer has been reached, or
+	 * 2) The end of the buffer or string has been reached
+	 */
+	result = 0;
+
+	/* String conversion is different than Buffer conversion */
+
+	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+	case ACPI_TYPE_STRING:
+
+		/*
+		 * Convert string to an integer - for most cases, the string must be
+		 * hexadecimal as per the ACPI specification. The only exception (as
+		 * of ACPI 3.0) is that the to_integer() operator allows both decimal
+		 * and hexadecimal strings (hex prefixed with "0x").
+		 */
+		status = acpi_ut_strtoul64((char *)pointer, flags, &result);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		/* Check for zero-length buffer */
+
+		if (!count) {
+			return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
+		}
+
+		/* Transfer no more than an integer's worth of data */
+
+		if (count > acpi_gbl_integer_byte_width) {
+			count = acpi_gbl_integer_byte_width;
+		}
+
+		/*
+		 * Convert buffer to an integer - we simply grab enough raw data
+		 * from the buffer to fill an integer
+		 */
+		for (i = 0; i < count; i++) {
+			/*
+			 * Get next byte and shift it into the Result.
+			 * Little endian is used, meaning that the first byte of the buffer
+			 * is the LSB of the integer
+			 */
+			result |= (((acpi_integer) pointer[i]) << (i * 8));
+		}
+		break;
+
+	default:
+
+		/* No other types can get here */
+		break;
+	}
+
+	/* Create a new integer */
+
+	return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+	if (!return_desc) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
+			  ACPI_FORMAT_UINT64(result)));
+
+	/* Save the Result */
+
+	return_desc->integer.value = result;
+	acpi_ex_truncate_for32bit_table(return_desc);
+	*result_desc = return_desc;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_convert_to_buffer
+ *
+ * PARAMETERS:  obj_desc        - Object to be converted. Must be an
+ *                                Integer, Buffer, or String
+ *              result_desc     - Where the new buffer object is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert an ACPI Object to a Buffer
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
+			  union acpi_operand_object **result_desc)
+{
+	union acpi_operand_object *return_desc;
+	u8 *new_buf;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_convert_to_buffer, obj_desc);
+
+	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+	case ACPI_TYPE_BUFFER:
+
+		/* No conversion necessary */
+
+		*result_desc = obj_desc;
+		return_ACPI_STATUS(AE_OK);
+
+	case ACPI_TYPE_INTEGER:
+
+		/*
+		 * Create a new Buffer object.
+		 * Need enough space for one integer
+		 */
+		return_desc =
+		    acpi_ut_create_buffer_object(acpi_gbl_integer_byte_width);
+		if (!return_desc) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		/* Copy the integer to the buffer, LSB first */
+
+		new_buf = return_desc->buffer.pointer;
+		ACPI_MEMCPY(new_buf,
+			    &obj_desc->integer.value,
+			    acpi_gbl_integer_byte_width);
+		break;
+
+	case ACPI_TYPE_STRING:
+
+		/*
+		 * Create a new Buffer object
+		 * Size will be the string length
+		 *
+		 * NOTE: Add one to the string length to include the null terminator.
+		 * The ACPI spec is unclear on this subject, but there is existing
+		 * ASL/AML code that depends on the null being transferred to the new
+		 * buffer.
+		 */
+		return_desc = acpi_ut_create_buffer_object((acpi_size)
+							   obj_desc->string.
+							   length + 1);
+		if (!return_desc) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		/* Copy the string to the buffer */
+
+		new_buf = return_desc->buffer.pointer;
+		ACPI_STRNCPY((char *)new_buf, (char *)obj_desc->string.pointer,
+			     obj_desc->string.length);
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_TYPE);
+	}
+
+	/* Mark buffer initialized */
+
+	return_desc->common.flags |= AOPOBJ_DATA_VALID;
+	*result_desc = return_desc;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_convert_to_ascii
+ *
+ * PARAMETERS:  Integer         - Value to be converted
+ *              Base            - ACPI_STRING_DECIMAL or ACPI_STRING_HEX
+ *              String          - Where the string is returned
+ *              data_width      - Size of data item to be converted, in bytes
+ *
+ * RETURN:      Actual string length
+ *
+ * DESCRIPTION: Convert an ACPI Integer to a hex or decimal string
+ *
+ ******************************************************************************/
+
+static u32
+acpi_ex_convert_to_ascii(acpi_integer integer,
+			 u16 base, u8 * string, u8 data_width)
+{
+	acpi_integer digit;
+	u32 i;
+	u32 j;
+	u32 k = 0;
+	u32 hex_length;
+	u32 decimal_length;
+	u32 remainder;
+	u8 supress_zeros;
+
+	ACPI_FUNCTION_ENTRY();
+
+	switch (base) {
+	case 10:
+
+		/* Setup max length for the decimal number */
+
+		switch (data_width) {
+		case 1:
+			decimal_length = ACPI_MAX8_DECIMAL_DIGITS;
+			break;
+
+		case 4:
+			decimal_length = ACPI_MAX32_DECIMAL_DIGITS;
+			break;
+
+		case 8:
+		default:
+			decimal_length = ACPI_MAX64_DECIMAL_DIGITS;
+			break;
+		}
+
+		supress_zeros = TRUE;	/* No leading zeros */
+		remainder = 0;
+
+		for (i = decimal_length; i > 0; i--) {
+
+			/* Divide by nth factor of 10 */
+
+			digit = integer;
+			for (j = 0; j < i; j++) {
+				(void)acpi_ut_short_divide(digit, 10, &digit,
+							   &remainder);
+			}
+
+			/* Handle leading zeros */
+
+			if (remainder != 0) {
+				supress_zeros = FALSE;
+			}
+
+			if (!supress_zeros) {
+				string[k] = (u8) (ACPI_ASCII_ZERO + remainder);
+				k++;
+			}
+		}
+		break;
+
+	case 16:
+
+		/* hex_length: 2 ascii hex chars per data byte */
+
+		hex_length = ACPI_MUL_2(data_width);
+		for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) {
+
+			/* Get one hex digit, most significant digits first */
+
+			string[k] =
+			    (u8) acpi_ut_hex_to_ascii_char(integer,
+							   ACPI_MUL_4(j));
+			k++;
+		}
+		break;
+
+	default:
+		return (0);
+	}
+
+	/*
+	 * Since leading zeros are suppressed, we must check for the case where
+	 * the integer equals 0
+	 *
+	 * Finally, null terminate the string and return the length
+	 */
+	if (!k) {
+		string[0] = ACPI_ASCII_ZERO;
+		k = 1;
+	}
+
+	string[k] = 0;
+	return ((u32) k);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_convert_to_string
+ *
+ * PARAMETERS:  obj_desc        - Object to be converted. Must be an
+ *                                Integer, Buffer, or String
+ *              result_desc     - Where the string object is returned
+ *              Type            - String flags (base and conversion type)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert an ACPI Object to a string
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
+			  union acpi_operand_object ** result_desc, u32 type)
+{
+	union acpi_operand_object *return_desc;
+	u8 *new_buf;
+	u32 i;
+	u32 string_length = 0;
+	u16 base = 16;
+	u8 separator = ',';
+
+	ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc);
+
+	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+	case ACPI_TYPE_STRING:
+
+		/* No conversion necessary */
+
+		*result_desc = obj_desc;
+		return_ACPI_STATUS(AE_OK);
+
+	case ACPI_TYPE_INTEGER:
+
+		switch (type) {
+		case ACPI_EXPLICIT_CONVERT_DECIMAL:
+
+			/* Make room for maximum decimal number */
+
+			string_length = ACPI_MAX_DECIMAL_DIGITS;
+			base = 10;
+			break;
+
+		default:
+
+			/* Two hex string characters for each integer byte */
+
+			string_length = ACPI_MUL_2(acpi_gbl_integer_byte_width);
+			break;
+		}
+
+		/*
+		 * Create a new String
+		 * Need enough space for one ASCII integer (plus null terminator)
+		 */
+		return_desc =
+		    acpi_ut_create_string_object((acpi_size) string_length);
+		if (!return_desc) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		new_buf = return_desc->buffer.pointer;
+
+		/* Convert integer to string */
+
+		string_length =
+		    acpi_ex_convert_to_ascii(obj_desc->integer.value, base,
+					     new_buf,
+					     acpi_gbl_integer_byte_width);
+
+		/* Null terminate at the correct place */
+
+		return_desc->string.length = string_length;
+		new_buf[string_length] = 0;
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		/* Setup string length, base, and separator */
+
+		switch (type) {
+		case ACPI_EXPLICIT_CONVERT_DECIMAL:	/* Used by to_decimal_string */
+			/*
+			 * From ACPI: "If Data is a buffer, it is converted to a string of
+			 * decimal values separated by commas."
+			 */
+			base = 10;
+
+			/*
+			 * Calculate the final string length. Individual string values
+			 * are variable length (include separator for each)
+			 */
+			for (i = 0; i < obj_desc->buffer.length; i++) {
+				if (obj_desc->buffer.pointer[i] >= 100) {
+					string_length += 4;
+				} else if (obj_desc->buffer.pointer[i] >= 10) {
+					string_length += 3;
+				} else {
+					string_length += 2;
+				}
+			}
+			break;
+
+		case ACPI_IMPLICIT_CONVERT_HEX:
+			/*
+			 * From the ACPI spec:
+			 *"The entire contents of the buffer are converted to a string of
+			 * two-character hexadecimal numbers, each separated by a space."
+			 */
+			separator = ' ';
+			string_length = (obj_desc->buffer.length * 3);
+			break;
+
+		case ACPI_EXPLICIT_CONVERT_HEX:	/* Used by to_hex_string */
+			/*
+			 * From ACPI: "If Data is a buffer, it is converted to a string of
+			 * hexadecimal values separated by commas."
+			 */
+			string_length = (obj_desc->buffer.length * 3);
+			break;
+
+		default:
+			return_ACPI_STATUS(AE_BAD_PARAMETER);
+		}
+
+		/*
+		 * Create a new string object and string buffer
+		 * (-1 because of extra separator included in string_length from above)
+		 * Allow creation of zero-length strings from zero-length buffers.
+		 */
+		if (string_length) {
+			string_length--;
+		}
+
+		return_desc = acpi_ut_create_string_object((acpi_size)
+							   string_length);
+		if (!return_desc) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		new_buf = return_desc->buffer.pointer;
+
+		/*
+		 * Convert buffer bytes to hex or decimal values
+		 * (separated by commas or spaces)
+		 */
+		for (i = 0; i < obj_desc->buffer.length; i++) {
+			new_buf += acpi_ex_convert_to_ascii((acpi_integer)
+							    obj_desc->buffer.
+							    pointer[i], base,
+							    new_buf, 1);
+			*new_buf++ = separator;	/* each separated by a comma or space */
+		}
+
+		/*
+		 * Null terminate the string
+		 * (overwrites final comma/space from above)
+		 */
+		if (obj_desc->buffer.length) {
+			new_buf--;
+		}
+		*new_buf = 0;
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_TYPE);
+	}
+
+	*result_desc = return_desc;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_convert_to_target_type
+ *
+ * PARAMETERS:  destination_type    - Current type of the destination
+ *              source_desc         - Source object to be converted.
+ *              result_desc         - Where the converted object is returned
+ *              walk_state          - Current method state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Implements "implicit conversion" rules for storing an object.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_convert_to_target_type(acpi_object_type destination_type,
+			       union acpi_operand_object *source_desc,
+			       union acpi_operand_object **result_desc,
+			       struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ex_convert_to_target_type);
+
+	/* Default behavior */
+
+	*result_desc = source_desc;
+
+	/*
+	 * If required by the target,
+	 * perform implicit conversion on the source before we store it.
+	 */
+	switch (GET_CURRENT_ARG_TYPE(walk_state->op_info->runtime_args)) {
+	case ARGI_SIMPLE_TARGET:
+	case ARGI_FIXED_TARGET:
+	case ARGI_INTEGER_REF:	/* Handles Increment, Decrement cases */
+
+		switch (destination_type) {
+		case ACPI_TYPE_LOCAL_REGION_FIELD:
+			/*
+			 * Named field can always handle conversions
+			 */
+			break;
+
+		default:
+			/* No conversion allowed for these types */
+
+			if (destination_type !=
+			    ACPI_GET_OBJECT_TYPE(source_desc)) {
+				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+						  "Explicit operator, will store (%s) over existing type (%s)\n",
+						  acpi_ut_get_object_type_name
+						  (source_desc),
+						  acpi_ut_get_type_name
+						  (destination_type)));
+				status = AE_TYPE;
+			}
+		}
+		break;
+
+	case ARGI_TARGETREF:
+
+		switch (destination_type) {
+		case ACPI_TYPE_INTEGER:
+		case ACPI_TYPE_BUFFER_FIELD:
+		case ACPI_TYPE_LOCAL_BANK_FIELD:
+		case ACPI_TYPE_LOCAL_INDEX_FIELD:
+			/*
+			 * These types require an Integer operand. We can convert
+			 * a Buffer or a String to an Integer if necessary.
+			 */
+			status =
+			    acpi_ex_convert_to_integer(source_desc, result_desc,
+						       16);
+			break;
+
+		case ACPI_TYPE_STRING:
+			/*
+			 * The operand must be a String. We can convert an
+			 * Integer or Buffer if necessary
+			 */
+			status =
+			    acpi_ex_convert_to_string(source_desc, result_desc,
+						      ACPI_IMPLICIT_CONVERT_HEX);
+			break;
+
+		case ACPI_TYPE_BUFFER:
+			/*
+			 * The operand must be a Buffer. We can convert an
+			 * Integer or String if necessary
+			 */
+			status =
+			    acpi_ex_convert_to_buffer(source_desc, result_desc);
+			break;
+
+		default:
+			ACPI_ERROR((AE_INFO,
+				    "Bad destination type during conversion: %X",
+				    destination_type));
+			status = AE_AML_INTERNAL;
+			break;
+		}
+		break;
+
+	case ARGI_REFERENCE:
+		/*
+		 * create_xxxx_field cases - we are storing the field object into the name
+		 */
+		break;
+
+	default:
+		ACPI_ERROR((AE_INFO,
+			    "Unknown Target type ID 0x%X AmlOpcode %X DestType %s",
+			    GET_CURRENT_ARG_TYPE(walk_state->op_info->
+						 runtime_args),
+			    walk_state->opcode,
+			    acpi_ut_get_type_name(destination_type)));
+		status = AE_AML_INTERNAL;
+	}
+
+	/*
+	 * Source-to-Target conversion semantics:
+	 *
+	 * If conversion to the target type cannot be performed, then simply
+	 * overwrite the target with the new object and type.
+	 */
+	if (status == AE_TYPE) {
+		status = AE_OK;
+	}
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
new file mode 100644
index 0000000..a57ad25
--- /dev/null
+++ b/drivers/acpi/acpica/excreate.c
@@ -0,0 +1,522 @@
+/******************************************************************************
+ *
+ * Module Name: excreate - Named object creation
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlcode.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("excreate")
+#ifndef ACPI_NO_METHOD_EXECUTION
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_create_alias
+ *
+ * PARAMETERS:  walk_state           - Current state, contains operands
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new named alias
+ *
+ ******************************************************************************/
+acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
+{
+	struct acpi_namespace_node *target_node;
+	struct acpi_namespace_node *alias_node;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ex_create_alias);
+
+	/* Get the source/alias operands (both namespace nodes) */
+
+	alias_node = (struct acpi_namespace_node *)walk_state->operands[0];
+	target_node = (struct acpi_namespace_node *)walk_state->operands[1];
+
+	if ((target_node->type == ACPI_TYPE_LOCAL_ALIAS) ||
+	    (target_node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
+		/*
+		 * Dereference an existing alias so that we don't create a chain
+		 * of aliases.  With this code, we guarantee that an alias is
+		 * always exactly one level of indirection away from the
+		 * actual aliased name.
+		 */
+		target_node =
+		    ACPI_CAST_PTR(struct acpi_namespace_node,
+				  target_node->object);
+	}
+
+	/*
+	 * For objects that can never change (i.e., the NS node will
+	 * permanently point to the same object), we can simply attach
+	 * the object to the new NS node.  For other objects (such as
+	 * Integers, buffers, etc.), we have to point the Alias node
+	 * to the original Node.
+	 */
+	switch (target_node->type) {
+
+		/* For these types, the sub-object can change dynamically via a Store */
+
+	case ACPI_TYPE_INTEGER:
+	case ACPI_TYPE_STRING:
+	case ACPI_TYPE_BUFFER:
+	case ACPI_TYPE_PACKAGE:
+	case ACPI_TYPE_BUFFER_FIELD:
+
+		/*
+		 * These types open a new scope, so we need the NS node in order to access
+		 * any children.
+		 */
+	case ACPI_TYPE_DEVICE:
+	case ACPI_TYPE_POWER:
+	case ACPI_TYPE_PROCESSOR:
+	case ACPI_TYPE_THERMAL:
+	case ACPI_TYPE_LOCAL_SCOPE:
+
+		/*
+		 * The new alias has the type ALIAS and points to the original
+		 * NS node, not the object itself.
+		 */
+		alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
+		alias_node->object =
+		    ACPI_CAST_PTR(union acpi_operand_object, target_node);
+		break;
+
+	case ACPI_TYPE_METHOD:
+
+		/*
+		 * Control method aliases need to be differentiated
+		 */
+		alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS;
+		alias_node->object =
+		    ACPI_CAST_PTR(union acpi_operand_object, target_node);
+		break;
+
+	default:
+
+		/* Attach the original source object to the new Alias Node */
+
+		/*
+		 * The new alias assumes the type of the target, and it points
+		 * to the same object.  The reference count of the object has an
+		 * additional reference to prevent deletion out from under either the
+		 * target node or the alias Node
+		 */
+		status = acpi_ns_attach_object(alias_node,
+					       acpi_ns_get_attached_object
+					       (target_node),
+					       target_node->type);
+		break;
+	}
+
+	/* Since both operands are Nodes, we don't need to delete them */
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_create_event
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new event object
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+
+	ACPI_FUNCTION_TRACE(ex_create_event);
+
+	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_EVENT);
+	if (!obj_desc) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	/*
+	 * Create the actual OS semaphore, with zero initial units -- meaning
+	 * that the event is created in an unsignalled state
+	 */
+	status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0,
+					  &obj_desc->event.os_semaphore);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/* Attach object to the Node */
+
+	status =
+	    acpi_ns_attach_object((struct acpi_namespace_node *)walk_state->
+				  operands[0], obj_desc, ACPI_TYPE_EVENT);
+
+      cleanup:
+	/*
+	 * Remove local reference to the object (on error, will cause deletion
+	 * of both object and semaphore if present.)
+	 */
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_create_mutex
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new mutex object
+ *
+ *              Mutex (Name[0], sync_level[1])
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object *obj_desc;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_create_mutex, ACPI_WALK_OPERANDS);
+
+	/* Create the new mutex object */
+
+	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
+	if (!obj_desc) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	/* Create the actual OS Mutex */
+
+	status = acpi_os_create_mutex(&obj_desc->mutex.os_mutex);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/* Init object and attach to NS node */
+
+	obj_desc->mutex.sync_level =
+	    (u8) walk_state->operands[1]->integer.value;
+	obj_desc->mutex.node =
+	    (struct acpi_namespace_node *)walk_state->operands[0];
+
+	status =
+	    acpi_ns_attach_object(obj_desc->mutex.node, obj_desc,
+				  ACPI_TYPE_MUTEX);
+
+      cleanup:
+	/*
+	 * Remove local reference to the object (on error, will cause deletion
+	 * of both object and semaphore if present.)
+	 */
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_create_region
+ *
+ * PARAMETERS:  aml_start           - Pointer to the region declaration AML
+ *              aml_length          - Max length of the declaration AML
+ *              region_space        - space_iD for the region
+ *              walk_state          - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new operation region object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_create_region(u8 * aml_start,
+		      u32 aml_length,
+		      u8 region_space, struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+	struct acpi_namespace_node *node;
+	union acpi_operand_object *region_obj2;
+
+	ACPI_FUNCTION_TRACE(ex_create_region);
+
+	/* Get the Namespace Node */
+
+	node = walk_state->op->common.node;
+
+	/*
+	 * If the region object is already attached to this node,
+	 * just return
+	 */
+	if (acpi_ns_get_attached_object(node)) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/*
+	 * Space ID must be one of the predefined IDs, or in the user-defined
+	 * range
+	 */
+	if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
+	    (region_space < ACPI_USER_REGION_BEGIN)) {
+		ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X",
+			    region_space));
+		return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (%X)\n",
+			  acpi_ut_get_region_name(region_space), region_space));
+
+	/* Create the region descriptor */
+
+	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_REGION);
+	if (!obj_desc) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	/*
+	 * Remember location in AML stream of address & length
+	 * operands since they need to be evaluated at run time.
+	 */
+	region_obj2 = obj_desc->common.next_object;
+	region_obj2->extra.aml_start = aml_start;
+	region_obj2->extra.aml_length = aml_length;
+
+	/* Init the region from the operands */
+
+	obj_desc->region.space_id = region_space;
+	obj_desc->region.address = 0;
+	obj_desc->region.length = 0;
+	obj_desc->region.node = node;
+
+	/* Install the new region object in the parent Node */
+
+	status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION);
+
+      cleanup:
+
+	/* Remove local reference to the object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_create_processor
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new processor object and populate the fields
+ *
+ *              Processor (Name[0], cpu_iD[1], pblock_addr[2], pblock_length[3])
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_create_processor, walk_state);
+
+	/* Create the processor object */
+
+	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PROCESSOR);
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Initialize the processor object from the operands */
+
+	obj_desc->processor.proc_id = (u8) operand[1]->integer.value;
+	obj_desc->processor.length = (u8) operand[3]->integer.value;
+	obj_desc->processor.address =
+	    (acpi_io_address) operand[2]->integer.value;
+
+	/* Install the processor object in the parent Node */
+
+	status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0],
+				       obj_desc, ACPI_TYPE_PROCESSOR);
+
+	/* Remove local reference to the object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_create_power_resource
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new power_resource object and populate the fields
+ *
+ *              power_resource (Name[0], system_level[1], resource_order[2])
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_create_power_resource(struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_create_power_resource, walk_state);
+
+	/* Create the power resource object */
+
+	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_POWER);
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Initialize the power object from the operands */
+
+	obj_desc->power_resource.system_level = (u8) operand[1]->integer.value;
+	obj_desc->power_resource.resource_order =
+	    (u16) operand[2]->integer.value;
+
+	/* Install the  power resource object in the parent Node */
+
+	status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0],
+				       obj_desc, ACPI_TYPE_POWER);
+
+	/* Remove local reference to the object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_create_method
+ *
+ * PARAMETERS:  aml_start       - First byte of the method's AML
+ *              aml_length      - AML byte count for this method
+ *              walk_state      - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new method object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_create_method(u8 * aml_start,
+		      u32 aml_length, struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+	u8 method_flags;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_create_method, walk_state);
+
+	/* Create a new method object */
+
+	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
+	if (!obj_desc) {
+		status = AE_NO_MEMORY;
+		goto exit;
+	}
+
+	/* Save the method's AML pointer and length  */
+
+	obj_desc->method.aml_start = aml_start;
+	obj_desc->method.aml_length = aml_length;
+
+	/*
+	 * Disassemble the method flags. Split off the Arg Count
+	 * for efficiency
+	 */
+	method_flags = (u8) operand[1]->integer.value;
+
+	obj_desc->method.method_flags =
+	    (u8) (method_flags & ~AML_METHOD_ARG_COUNT);
+	obj_desc->method.param_count =
+	    (u8) (method_flags & AML_METHOD_ARG_COUNT);
+
+	/*
+	 * Get the sync_level. If method is serialized, a mutex will be
+	 * created for this method when it is parsed.
+	 */
+	if (method_flags & AML_METHOD_SERIALIZED) {
+		/*
+		 * ACPI 1.0: sync_level = 0
+		 * ACPI 2.0: sync_level = sync_level in method declaration
+		 */
+		obj_desc->method.sync_level = (u8)
+		    ((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4);
+	}
+
+	/* Attach the new object to the method Node */
+
+	status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0],
+				       obj_desc, ACPI_TYPE_METHOD);
+
+	/* Remove local reference to the object */
+
+	acpi_ut_remove_reference(obj_desc);
+
+      exit:
+	/* Remove a reference to the operand */
+
+	acpi_ut_remove_reference(operand[1]);
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
new file mode 100644
index 0000000..aa31357
--- /dev/null
+++ b/drivers/acpi/acpica/exdump.c
@@ -0,0 +1,1060 @@
+/******************************************************************************
+ *
+ * Module Name: exdump - Interpreter debug output routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlcode.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exdump")
+
+/*
+ * The following routines are used for debug output only
+ */
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/* Local prototypes */
+static void acpi_ex_out_string(char *title, char *value);
+
+static void acpi_ex_out_pointer(char *title, void *value);
+
+static void
+acpi_ex_dump_object(union acpi_operand_object *obj_desc,
+		    struct acpi_exdump_info *info);
+
+static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc);
+
+static void
+acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
+			 u32 level, u32 index);
+
+/*******************************************************************************
+ *
+ * Object Descriptor info tables
+ *
+ * Note: The first table entry must be an INIT opcode and must contain
+ * the table length (number of table entries)
+ *
+ ******************************************************************************/
+
+static struct acpi_exdump_info acpi_ex_dump_integer[2] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_integer), NULL},
+	{ACPI_EXD_UINT64, ACPI_EXD_OFFSET(integer.value), "Value"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_string[4] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_string), NULL},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(string.length), "Length"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(string.pointer), "Pointer"},
+	{ACPI_EXD_STRING, 0, NULL}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
+	{ACPI_EXD_BUFFER, 0, NULL}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_package[5] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"},
+	{ACPI_EXD_PACKAGE, 0, NULL}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_device[4] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.system_notify),
+	 "System Notify"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.device_notify),
+	 "Device Notify"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_event[2] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_event), NULL},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_method[8] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.thread_count), "Thread Count"},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(method.aml_length), "Aml Length"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.aml_start), "Aml Start"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"},
+	{ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth),
+	 "Acquire Depth"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_region[7] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region), NULL},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.space_id), "Space Id"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.flags), "Flags"},
+	{ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(region.address), "Address"},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(region.length), "Length"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.handler), "Handler"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.next), "Next"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_power[5] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_power), NULL},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.system_level),
+	 "System Level"},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.resource_order),
+	 "Resource Order"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.system_notify),
+	 "System Notify"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.device_notify),
+	 "Device Notify"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"},
+	{ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify),
+	 "System Notify"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.device_notify),
+	 "Device Notify"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.handler), "Handler"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_thermal[4] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_thermal), NULL},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.system_notify),
+	 "System Notify"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.device_notify),
+	 "Device Notify"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.handler), "Handler"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer_field), NULL},
+	{ACPI_EXD_FIELD, 0, NULL},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer_field.buffer_obj),
+	 "Buffer Object"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_region_field[3] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL},
+	{ACPI_EXD_FIELD, 0, NULL},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_bank_field), NULL},
+	{ACPI_EXD_FIELD, 0, NULL},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(bank_field.value), "Value"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(bank_field.region_obj),
+	 "Region Object"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(bank_field.bank_obj), "Bank Object"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_index_field[5] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_bank_field), NULL},
+	{ACPI_EXD_FIELD, 0, NULL},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(index_field.value), "Value"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.index_obj),
+	 "Index Object"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.data_obj), "Data Object"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_reference), NULL},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.class), "Class"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.value), "Value"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.node), "Node"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"},
+	{ACPI_EXD_REFERENCE, 0, NULL}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_address_handler),
+	 NULL},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(address_space.space_id), "Space Id"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.next), "Next"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.region_list),
+	 "Region List"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.node), "Node"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_notify[3] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"}
+};
+
+/* Miscellaneous tables */
+
+static struct acpi_exdump_info acpi_ex_dump_common[4] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_common), NULL},
+	{ACPI_EXD_TYPE, 0, NULL},
+	{ACPI_EXD_UINT16, ACPI_EXD_OFFSET(common.reference_count),
+	 "Reference Count"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_field_common), NULL},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.field_flags),
+	 "Field Flags"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.access_byte_width),
+	 "Access Byte Width"},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.bit_length),
+	 "Bit Length"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.start_field_bit_offset),
+	 "Field Bit Offset"},
+	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.base_byte_offset),
+	 "Base Byte Offset"},
+	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_node[5] = {
+	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL},
+	{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"},
+	{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
+	{ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(child), "Child List"},
+	{ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(peer), "Next Peer"}
+};
+
+/* Dispatch table, indexed by object type */
+
+static struct acpi_exdump_info *acpi_ex_dump_info[] = {
+	NULL,
+	acpi_ex_dump_integer,
+	acpi_ex_dump_string,
+	acpi_ex_dump_buffer,
+	acpi_ex_dump_package,
+	NULL,
+	acpi_ex_dump_device,
+	acpi_ex_dump_event,
+	acpi_ex_dump_method,
+	acpi_ex_dump_mutex,
+	acpi_ex_dump_region,
+	acpi_ex_dump_power,
+	acpi_ex_dump_processor,
+	acpi_ex_dump_thermal,
+	acpi_ex_dump_buffer_field,
+	NULL,
+	NULL,
+	acpi_ex_dump_region_field,
+	acpi_ex_dump_bank_field,
+	acpi_ex_dump_index_field,
+	acpi_ex_dump_reference,
+	NULL,
+	NULL,
+	acpi_ex_dump_notify,
+	acpi_ex_dump_address_handler,
+	NULL,
+	NULL,
+	NULL
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_dump_object
+ *
+ * PARAMETERS:  obj_desc            - Descriptor to dump
+ *              Info                - Info table corresponding to this object
+ *                                    type
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Walk the info table for this object
+ *
+ ******************************************************************************/
+
+static void
+acpi_ex_dump_object(union acpi_operand_object *obj_desc,
+		    struct acpi_exdump_info *info)
+{
+	u8 *target;
+	char *name;
+	u8 count;
+
+	if (!info) {
+		acpi_os_printf
+		    ("ExDumpObject: Display not implemented for object type %s\n",
+		     acpi_ut_get_object_type_name(obj_desc));
+		return;
+	}
+
+	/* First table entry must contain the table length (# of table entries) */
+
+	count = info->offset;
+
+	while (count) {
+		target = ACPI_ADD_PTR(u8, obj_desc, info->offset);
+		name = info->name;
+
+		switch (info->opcode) {
+		case ACPI_EXD_INIT:
+			break;
+
+		case ACPI_EXD_TYPE:
+			acpi_ex_out_string("Type",
+					   acpi_ut_get_object_type_name
+					   (obj_desc));
+			break;
+
+		case ACPI_EXD_UINT8:
+
+			acpi_os_printf("%20s : %2.2X\n", name, *target);
+			break;
+
+		case ACPI_EXD_UINT16:
+
+			acpi_os_printf("%20s : %4.4X\n", name,
+				       ACPI_GET16(target));
+			break;
+
+		case ACPI_EXD_UINT32:
+
+			acpi_os_printf("%20s : %8.8X\n", name,
+				       ACPI_GET32(target));
+			break;
+
+		case ACPI_EXD_UINT64:
+
+			acpi_os_printf("%20s : %8.8X%8.8X\n", "Value",
+				       ACPI_FORMAT_UINT64(ACPI_GET64(target)));
+			break;
+
+		case ACPI_EXD_POINTER:
+		case ACPI_EXD_ADDRESS:
+
+			acpi_ex_out_pointer(name,
+					    *ACPI_CAST_PTR(void *, target));
+			break;
+
+		case ACPI_EXD_STRING:
+
+			acpi_ut_print_string(obj_desc->string.pointer,
+					     ACPI_UINT8_MAX);
+			acpi_os_printf("\n");
+			break;
+
+		case ACPI_EXD_BUFFER:
+
+			ACPI_DUMP_BUFFER(obj_desc->buffer.pointer,
+					 obj_desc->buffer.length);
+			break;
+
+		case ACPI_EXD_PACKAGE:
+
+			/* Dump the package contents */
+
+			acpi_os_printf("\nPackage Contents:\n");
+			acpi_ex_dump_package_obj(obj_desc, 0, 0);
+			break;
+
+		case ACPI_EXD_FIELD:
+
+			acpi_ex_dump_object(obj_desc,
+					    acpi_ex_dump_field_common);
+			break;
+
+		case ACPI_EXD_REFERENCE:
+
+			acpi_ex_out_string("Class Name",
+					   (char *)
+					   acpi_ut_get_reference_name
+					   (obj_desc));
+			acpi_ex_dump_reference_obj(obj_desc);
+			break;
+
+		default:
+			acpi_os_printf("**** Invalid table opcode [%X] ****\n",
+				       info->opcode);
+			return;
+		}
+
+		info++;
+		count--;
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_dump_operand
+ *
+ * PARAMETERS:  *obj_desc       - Pointer to entry to be dumped
+ *              Depth           - Current nesting depth
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Dump an operand object
+ *
+ ******************************************************************************/
+
+void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
+{
+	u32 length;
+	u32 index;
+
+	ACPI_FUNCTION_NAME(ex_dump_operand)
+
+	    if (!((ACPI_LV_EXEC & acpi_dbg_level)
+		  && (_COMPONENT & acpi_dbg_layer))) {
+		return;
+	}
+
+	if (!obj_desc) {
+
+		/* This could be a null element of a package */
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Null Object Descriptor\n"));
+		return;
+	}
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p Namespace Node: ",
+				  obj_desc));
+		ACPI_DUMP_ENTRY(obj_desc, ACPI_LV_EXEC);
+		return;
+	}
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "%p is not a node or operand object: [%s]\n",
+				  obj_desc,
+				  acpi_ut_get_descriptor_name(obj_desc)));
+		ACPI_DUMP_BUFFER(obj_desc, sizeof(union acpi_operand_object));
+		return;
+	}
+
+	/* obj_desc is a valid object */
+
+	if (depth > 0) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%*s[%u] %p ",
+				  depth, " ", depth, obj_desc));
+	} else {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p ", obj_desc));
+	}
+
+	/* Decode object type */
+
+	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+	case ACPI_TYPE_LOCAL_REFERENCE:
+
+		acpi_os_printf("Reference: [%s] ",
+			       acpi_ut_get_reference_name(obj_desc));
+
+		switch (obj_desc->reference.class) {
+		case ACPI_REFCLASS_DEBUG:
+
+			acpi_os_printf("\n");
+			break;
+
+		case ACPI_REFCLASS_INDEX:
+
+			acpi_os_printf("%p\n", obj_desc->reference.object);
+			break;
+
+		case ACPI_REFCLASS_TABLE:
+
+			acpi_os_printf("Table Index %X\n",
+				       obj_desc->reference.value);
+			break;
+
+		case ACPI_REFCLASS_REFOF:
+
+			acpi_os_printf("%p [%s]\n", obj_desc->reference.object,
+				       acpi_ut_get_type_name(((union
+							       acpi_operand_object
+							       *)
+							      obj_desc->
+							      reference.
+							      object)->common.
+							     type));
+			break;
+
+		case ACPI_REFCLASS_ARG:
+
+			acpi_os_printf("%X", obj_desc->reference.value);
+
+			if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
+				/* Value is an Integer */
+
+				acpi_os_printf(" value is [%8.8X%8.8x]",
+					       ACPI_FORMAT_UINT64(obj_desc->
+								  integer.
+								  value));
+			}
+
+			acpi_os_printf("\n");
+			break;
+
+		case ACPI_REFCLASS_LOCAL:
+
+			acpi_os_printf("%X", obj_desc->reference.value);
+
+			if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
+				/* Value is an Integer */
+
+				acpi_os_printf(" value is [%8.8X%8.8x]",
+					       ACPI_FORMAT_UINT64(obj_desc->
+								  integer.
+								  value));
+			}
+
+			acpi_os_printf("\n");
+			break;
+
+		case ACPI_REFCLASS_NAME:
+
+			acpi_os_printf("- [%4.4s]\n",
+				       obj_desc->reference.node->name.ascii);
+			break;
+
+		default:	/* Unknown reference class */
+
+			acpi_os_printf("%2.2X\n", obj_desc->reference.class);
+			break;
+		}
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		acpi_os_printf("Buffer length %.2X @ %p\n",
+			       obj_desc->buffer.length,
+			       obj_desc->buffer.pointer);
+
+		/* Debug only -- dump the buffer contents */
+
+		if (obj_desc->buffer.pointer) {
+			length = obj_desc->buffer.length;
+			if (length > 128) {
+				length = 128;
+			}
+
+			acpi_os_printf
+			    ("Buffer Contents: (displaying length 0x%.2X)\n",
+			     length);
+			ACPI_DUMP_BUFFER(obj_desc->buffer.pointer, length);
+		}
+		break;
+
+	case ACPI_TYPE_INTEGER:
+
+		acpi_os_printf("Integer %8.8X%8.8X\n",
+			       ACPI_FORMAT_UINT64(obj_desc->integer.value));
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+
+		acpi_os_printf("Package [Len %X] ElementArray %p\n",
+			       obj_desc->package.count,
+			       obj_desc->package.elements);
+
+		/*
+		 * If elements exist, package element pointer is valid,
+		 * and debug_level exceeds 1, dump package's elements.
+		 */
+		if (obj_desc->package.count &&
+		    obj_desc->package.elements && acpi_dbg_level > 1) {
+			for (index = 0; index < obj_desc->package.count;
+			     index++) {
+				acpi_ex_dump_operand(obj_desc->package.
+						     elements[index],
+						     depth + 1);
+			}
+		}
+		break;
+
+	case ACPI_TYPE_REGION:
+
+		acpi_os_printf("Region %s (%X)",
+			       acpi_ut_get_region_name(obj_desc->region.
+						       space_id),
+			       obj_desc->region.space_id);
+
+		/*
+		 * If the address and length have not been evaluated,
+		 * don't print them.
+		 */
+		if (!(obj_desc->region.flags & AOPOBJ_DATA_VALID)) {
+			acpi_os_printf("\n");
+		} else {
+			acpi_os_printf(" base %8.8X%8.8X Length %X\n",
+				       ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
+							       address),
+				       obj_desc->region.length);
+		}
+		break;
+
+	case ACPI_TYPE_STRING:
+
+		acpi_os_printf("String length %X @ %p ",
+			       obj_desc->string.length,
+			       obj_desc->string.pointer);
+
+		acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX);
+		acpi_os_printf("\n");
+		break;
+
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+		acpi_os_printf("BankField\n");
+		break;
+
+	case ACPI_TYPE_LOCAL_REGION_FIELD:
+
+		acpi_os_printf
+		    ("RegionField: Bits=%X AccWidth=%X Lock=%X Update=%X at byte=%X bit=%X of below:\n",
+		     obj_desc->field.bit_length,
+		     obj_desc->field.access_byte_width,
+		     obj_desc->field.field_flags & AML_FIELD_LOCK_RULE_MASK,
+		     obj_desc->field.field_flags & AML_FIELD_UPDATE_RULE_MASK,
+		     obj_desc->field.base_byte_offset,
+		     obj_desc->field.start_field_bit_offset);
+
+		acpi_ex_dump_operand(obj_desc->field.region_obj, depth + 1);
+		break;
+
+	case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+		acpi_os_printf("IndexField\n");
+		break;
+
+	case ACPI_TYPE_BUFFER_FIELD:
+
+		acpi_os_printf("BufferField: %X bits at byte %X bit %X of\n",
+			       obj_desc->buffer_field.bit_length,
+			       obj_desc->buffer_field.base_byte_offset,
+			       obj_desc->buffer_field.start_field_bit_offset);
+
+		if (!obj_desc->buffer_field.buffer_obj) {
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "*NULL*\n"));
+		} else
+		    if (ACPI_GET_OBJECT_TYPE(obj_desc->buffer_field.buffer_obj)
+			!= ACPI_TYPE_BUFFER) {
+			acpi_os_printf("*not a Buffer*\n");
+		} else {
+			acpi_ex_dump_operand(obj_desc->buffer_field.buffer_obj,
+					     depth + 1);
+		}
+		break;
+
+	case ACPI_TYPE_EVENT:
+
+		acpi_os_printf("Event\n");
+		break;
+
+	case ACPI_TYPE_METHOD:
+
+		acpi_os_printf("Method(%X) @ %p:%X\n",
+			       obj_desc->method.param_count,
+			       obj_desc->method.aml_start,
+			       obj_desc->method.aml_length);
+		break;
+
+	case ACPI_TYPE_MUTEX:
+
+		acpi_os_printf("Mutex\n");
+		break;
+
+	case ACPI_TYPE_DEVICE:
+
+		acpi_os_printf("Device\n");
+		break;
+
+	case ACPI_TYPE_POWER:
+
+		acpi_os_printf("Power\n");
+		break;
+
+	case ACPI_TYPE_PROCESSOR:
+
+		acpi_os_printf("Processor\n");
+		break;
+
+	case ACPI_TYPE_THERMAL:
+
+		acpi_os_printf("Thermal\n");
+		break;
+
+	default:
+		/* Unknown Type */
+
+		acpi_os_printf("Unknown Type %X\n",
+			       ACPI_GET_OBJECT_TYPE(obj_desc));
+		break;
+	}
+
+	return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_dump_operands
+ *
+ * PARAMETERS:	Operands	    - A list of Operand objects
+ *		opcode_name	    - AML opcode name
+ *		num_operands	    - Operand count for this opcode
+ *
+ * DESCRIPTION: Dump the operands associated with the opcode
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_dump_operands(union acpi_operand_object **operands,
+		      const char *opcode_name, u32 num_operands)
+{
+	ACPI_FUNCTION_NAME(ex_dump_operands);
+
+	if (!opcode_name) {
+		opcode_name = "UNKNOWN";
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "**** Start operand dump for opcode [%s], %d operands\n",
+			  opcode_name, num_operands));
+
+	if (num_operands == 0) {
+		num_operands = 1;
+	}
+
+	/* Dump the individual operands */
+
+	while (num_operands) {
+		acpi_ex_dump_operand(*operands, 0);
+		operands++;
+		num_operands--;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "**** End operand dump for [%s]\n", opcode_name));
+	return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_out* functions
+ *
+ * PARAMETERS:  Title               - Descriptive text
+ *              Value               - Value to be displayed
+ *
+ * DESCRIPTION: Object dump output formatting functions.  These functions
+ *              reduce the number of format strings required and keeps them
+ *              all in one place for easy modification.
+ *
+ ******************************************************************************/
+
+static void acpi_ex_out_string(char *title, char *value)
+{
+	acpi_os_printf("%20s : %s\n", title, value);
+}
+
+static void acpi_ex_out_pointer(char *title, void *value)
+{
+	acpi_os_printf("%20s : %p\n", title, value);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_dump_namespace_node
+ *
+ * PARAMETERS:  Node                - Descriptor to dump
+ *              Flags               - Force display if TRUE
+ *
+ * DESCRIPTION: Dumps the members of the given.Node
+ *
+ ******************************************************************************/
+
+void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
+{
+
+	ACPI_FUNCTION_ENTRY();
+
+	if (!flags) {
+		if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
+		      && (_COMPONENT & acpi_dbg_layer))) {
+			return;
+		}
+	}
+
+	acpi_os_printf("%20s : %4.4s\n", "Name", acpi_ut_get_node_name(node));
+	acpi_ex_out_string("Type", acpi_ut_get_type_name(node->type));
+	acpi_ex_out_pointer("Attached Object",
+			    acpi_ns_get_attached_object(node));
+	acpi_ex_out_pointer("Parent", acpi_ns_get_parent_node(node));
+
+	acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node),
+			    acpi_ex_dump_node);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_dump_reference_obj
+ *
+ * PARAMETERS:  Object              - Descriptor to dump
+ *
+ * DESCRIPTION: Dumps a reference object
+ *
+ ******************************************************************************/
+
+static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
+{
+	struct acpi_buffer ret_buf;
+	acpi_status status;
+
+	ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+
+	if (obj_desc->reference.class == ACPI_REFCLASS_NAME) {
+		acpi_os_printf(" %p ", obj_desc->reference.node);
+
+		status =
+		    acpi_ns_handle_to_pathname(obj_desc->reference.node,
+					       &ret_buf);
+		if (ACPI_FAILURE(status)) {
+			acpi_os_printf(" Could not convert name to pathname\n");
+		} else {
+			acpi_os_printf("%s\n", (char *)ret_buf.pointer);
+			ACPI_FREE(ret_buf.pointer);
+		}
+	} else if (obj_desc->reference.object) {
+		if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
+		    ACPI_DESC_TYPE_OPERAND) {
+			acpi_os_printf(" Target: %p",
+				       obj_desc->reference.object);
+			if (obj_desc->reference.class == ACPI_REFCLASS_TABLE) {
+				acpi_os_printf(" Table Index: %X\n",
+					       obj_desc->reference.value);
+			} else {
+				acpi_os_printf(" Target: %p [%s]\n",
+					       obj_desc->reference.object,
+					       acpi_ut_get_type_name(((union
+								       acpi_operand_object
+								       *)
+								      obj_desc->
+								      reference.
+								      object)->
+								     common.
+								     type));
+			}
+		} else {
+			acpi_os_printf(" Target: %p\n",
+				       obj_desc->reference.object);
+		}
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_dump_package_obj
+ *
+ * PARAMETERS:  obj_desc            - Descriptor to dump
+ *              Level               - Indentation Level
+ *              Index               - Package index for this object
+ *
+ * DESCRIPTION: Dumps the elements of the package
+ *
+ ******************************************************************************/
+
+static void
+acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
+			 u32 level, u32 index)
+{
+	u32 i;
+
+	/* Indentation and index output */
+
+	if (level > 0) {
+		for (i = 0; i < level; i++) {
+			acpi_os_printf(" ");
+		}
+
+		acpi_os_printf("[%.2d] ", index);
+	}
+
+	acpi_os_printf("%p ", obj_desc);
+
+	/* Null package elements are allowed */
+
+	if (!obj_desc) {
+		acpi_os_printf("[Null Object]\n");
+		return;
+	}
+
+	/* Packages may only contain a few object types */
+
+	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+	case ACPI_TYPE_INTEGER:
+
+		acpi_os_printf("[Integer] = %8.8X%8.8X\n",
+			       ACPI_FORMAT_UINT64(obj_desc->integer.value));
+		break;
+
+	case ACPI_TYPE_STRING:
+
+		acpi_os_printf("[String] Value: ");
+		for (i = 0; i < obj_desc->string.length; i++) {
+			acpi_os_printf("%c", obj_desc->string.pointer[i]);
+		}
+		acpi_os_printf("\n");
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		acpi_os_printf("[Buffer] Length %.2X = ",
+			       obj_desc->buffer.length);
+		if (obj_desc->buffer.length) {
+			acpi_ut_dump_buffer(ACPI_CAST_PTR
+					    (u8, obj_desc->buffer.pointer),
+					    obj_desc->buffer.length,
+					    DB_DWORD_DISPLAY, _COMPONENT);
+		} else {
+			acpi_os_printf("\n");
+		}
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+
+		acpi_os_printf("[Package] Contains %d Elements:\n",
+			       obj_desc->package.count);
+
+		for (i = 0; i < obj_desc->package.count; i++) {
+			acpi_ex_dump_package_obj(obj_desc->package.elements[i],
+						 level + 1, i);
+		}
+		break;
+
+	case ACPI_TYPE_LOCAL_REFERENCE:
+
+		acpi_os_printf("[Object Reference] Type [%s] %2.2X",
+			       acpi_ut_get_reference_name(obj_desc),
+			       obj_desc->reference.class);
+		acpi_ex_dump_reference_obj(obj_desc);
+		break;
+
+	default:
+
+		acpi_os_printf("[Unknown Type] %X\n",
+			       ACPI_GET_OBJECT_TYPE(obj_desc));
+		break;
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_dump_object_descriptor
+ *
+ * PARAMETERS:  obj_desc            - Descriptor to dump
+ *              Flags               - Force display if TRUE
+ *
+ * DESCRIPTION: Dumps the members of the object descriptor given.
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
+{
+	ACPI_FUNCTION_TRACE(ex_dump_object_descriptor);
+
+	if (!obj_desc) {
+		return_VOID;
+	}
+
+	if (!flags) {
+		if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
+		      && (_COMPONENT & acpi_dbg_layer))) {
+			return_VOID;
+		}
+	}
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) {
+		acpi_ex_dump_namespace_node((struct acpi_namespace_node *)
+					    obj_desc, flags);
+
+		acpi_os_printf("\nAttached Object (%p):\n",
+			       ((struct acpi_namespace_node *)obj_desc)->
+			       object);
+
+		acpi_ex_dump_object_descriptor(((struct acpi_namespace_node *)
+						obj_desc)->object, flags);
+		return_VOID;
+	}
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
+		acpi_os_printf
+		    ("ExDumpObjectDescriptor: %p is not an ACPI operand object: [%s]\n",
+		     obj_desc, acpi_ut_get_descriptor_name(obj_desc));
+		return_VOID;
+	}
+
+	if (obj_desc->common.type > ACPI_TYPE_NS_NODE_MAX) {
+		return_VOID;
+	}
+
+	/* Common Fields */
+
+	acpi_ex_dump_object(obj_desc, acpi_ex_dump_common);
+
+	/* Object-specific fields */
+
+	acpi_ex_dump_object(obj_desc, acpi_ex_dump_info[obj_desc->common.type]);
+	return_VOID;
+}
+
+#endif
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
new file mode 100644
index 0000000..a352d02
--- /dev/null
+++ b/drivers/acpi/acpica/exfield.c
@@ -0,0 +1,340 @@
+/******************************************************************************
+ *
+ * Module Name: exfield - ACPI AML (p-code) execution - field manipulation
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acdispat.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exfield")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_read_data_from_field
+ *
+ * PARAMETERS:  walk_state          - Current execution state
+ *              obj_desc            - The named field
+ *              ret_buffer_desc     - Where the return data object is stored
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Read from a named field.  Returns either an Integer or a
+ *              Buffer, depending on the size of the field.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
+			     union acpi_operand_object *obj_desc,
+			     union acpi_operand_object **ret_buffer_desc)
+{
+	acpi_status status;
+	union acpi_operand_object *buffer_desc;
+	acpi_size length;
+	void *buffer;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
+
+	/* Parameter validation */
+
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_AML_NO_OPERAND);
+	}
+	if (!ret_buffer_desc) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_BUFFER_FIELD) {
+		/*
+		 * If the buffer_field arguments have not been previously evaluated,
+		 * evaluate them now and save the results.
+		 */
+		if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
+			status = acpi_ds_get_buffer_field_arguments(obj_desc);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		}
+	} else
+	    if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REGION_FIELD)
+		&& (obj_desc->field.region_obj->region.space_id ==
+		    ACPI_ADR_SPACE_SMBUS)) {
+		/*
+		 * This is an SMBus read.  We must create a buffer to hold the data
+		 * and directly access the region handler.
+		 */
+		buffer_desc =
+		    acpi_ut_create_buffer_object(ACPI_SMBUS_BUFFER_SIZE);
+		if (!buffer_desc) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		/* Lock entire transaction if requested */
+
+		acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+		/*
+		 * Perform the read.
+		 * Note: Smbus protocol value is passed in upper 16-bits of Function
+		 */
+		status = acpi_ex_access_region(obj_desc, 0,
+					       ACPI_CAST_PTR(acpi_integer,
+							     buffer_desc->
+							     buffer.pointer),
+					       ACPI_READ | (obj_desc->field.
+							    attribute << 16));
+		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+		goto exit;
+	}
+
+	/*
+	 * Allocate a buffer for the contents of the field.
+	 *
+	 * If the field is larger than the size of an acpi_integer, create
+	 * a BUFFER to hold it.  Otherwise, use an INTEGER.  This allows
+	 * the use of arithmetic operators on the returned value if the
+	 * field size is equal or smaller than an Integer.
+	 *
+	 * Note: Field.length is in bits.
+	 */
+	length =
+	    (acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
+	if (length > acpi_gbl_integer_byte_width) {
+
+		/* Field is too large for an Integer, create a Buffer instead */
+
+		buffer_desc = acpi_ut_create_buffer_object(length);
+		if (!buffer_desc) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+		buffer = buffer_desc->buffer.pointer;
+	} else {
+		/* Field will fit within an Integer (normal case) */
+
+		buffer_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!buffer_desc) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		length = acpi_gbl_integer_byte_width;
+		buffer_desc->integer.value = 0;
+		buffer = &buffer_desc->integer.value;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+			  "FieldRead [TO]:   Obj %p, Type %X, Buf %p, ByteLen %X\n",
+			  obj_desc, ACPI_GET_OBJECT_TYPE(obj_desc), buffer,
+			  (u32) length));
+	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+			  "FieldRead [FROM]: BitLen %X, BitOff %X, ByteOff %X\n",
+			  obj_desc->common_field.bit_length,
+			  obj_desc->common_field.start_field_bit_offset,
+			  obj_desc->common_field.base_byte_offset));
+
+	/* Lock entire transaction if requested */
+
+	acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+	/* Read from the field */
+
+	status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
+	acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+
+      exit:
+	if (ACPI_FAILURE(status)) {
+		acpi_ut_remove_reference(buffer_desc);
+	} else {
+		*ret_buffer_desc = buffer_desc;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_write_data_to_field
+ *
+ * PARAMETERS:  source_desc         - Contains data to write
+ *              obj_desc            - The named field
+ *              result_desc         - Where the return value is returned, if any
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Write to a named field
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
+			    union acpi_operand_object *obj_desc,
+			    union acpi_operand_object **result_desc)
+{
+	acpi_status status;
+	u32 length;
+	void *buffer;
+	union acpi_operand_object *buffer_desc;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
+
+	/* Parameter validation */
+
+	if (!source_desc || !obj_desc) {
+		return_ACPI_STATUS(AE_AML_NO_OPERAND);
+	}
+
+	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_BUFFER_FIELD) {
+		/*
+		 * If the buffer_field arguments have not been previously evaluated,
+		 * evaluate them now and save the results.
+		 */
+		if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
+			status = acpi_ds_get_buffer_field_arguments(obj_desc);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		}
+	} else
+	    if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REGION_FIELD)
+		&& (obj_desc->field.region_obj->region.space_id ==
+		    ACPI_ADR_SPACE_SMBUS)) {
+		/*
+		 * This is an SMBus write.  We will bypass the entire field mechanism
+		 * and handoff the buffer directly to the handler.
+		 *
+		 * Source must be a buffer of sufficient size (ACPI_SMBUS_BUFFER_SIZE).
+		 */
+		if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) {
+			ACPI_ERROR((AE_INFO,
+				    "SMBus write requires Buffer, found type %s",
+				    acpi_ut_get_object_type_name(source_desc)));
+
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+
+		if (source_desc->buffer.length < ACPI_SMBUS_BUFFER_SIZE) {
+			ACPI_ERROR((AE_INFO,
+				    "SMBus write requires Buffer of length %X, found length %X",
+				    ACPI_SMBUS_BUFFER_SIZE,
+				    source_desc->buffer.length));
+
+			return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
+		}
+
+		buffer_desc =
+		    acpi_ut_create_buffer_object(ACPI_SMBUS_BUFFER_SIZE);
+		if (!buffer_desc) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		buffer = buffer_desc->buffer.pointer;
+		ACPI_MEMCPY(buffer, source_desc->buffer.pointer,
+			    ACPI_SMBUS_BUFFER_SIZE);
+
+		/* Lock entire transaction if requested */
+
+		acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+		/*
+		 * Perform the write (returns status and perhaps data in the
+		 * same buffer)
+		 * Note: SMBus protocol type is passed in upper 16-bits of Function.
+		 */
+		status = acpi_ex_access_region(obj_desc, 0,
+					       (acpi_integer *) buffer,
+					       ACPI_WRITE | (obj_desc->field.
+							     attribute << 16));
+		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+
+		*result_desc = buffer_desc;
+		return_ACPI_STATUS(status);
+	}
+
+	/* Get a pointer to the data to be written */
+
+	switch (ACPI_GET_OBJECT_TYPE(source_desc)) {
+	case ACPI_TYPE_INTEGER:
+		buffer = &source_desc->integer.value;
+		length = sizeof(source_desc->integer.value);
+		break;
+
+	case ACPI_TYPE_BUFFER:
+		buffer = source_desc->buffer.pointer;
+		length = source_desc->buffer.length;
+		break;
+
+	case ACPI_TYPE_STRING:
+		buffer = source_desc->string.pointer;
+		length = source_desc->string.length;
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+			  "FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n",
+			  source_desc,
+			  acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE
+						(source_desc)),
+			  ACPI_GET_OBJECT_TYPE(source_desc), buffer, length));
+
+	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+			  "FieldWrite [TO]:   Obj %p (%s:%X), BitLen %X, BitOff %X, ByteOff %X\n",
+			  obj_desc,
+			  acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE(obj_desc)),
+			  ACPI_GET_OBJECT_TYPE(obj_desc),
+			  obj_desc->common_field.bit_length,
+			  obj_desc->common_field.start_field_bit_offset,
+			  obj_desc->common_field.base_byte_offset));
+
+	/* Lock entire transaction if requested */
+
+	acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+	/* Write to the field */
+
+	status = acpi_ex_insert_into_field(obj_desc, buffer, length);
+	acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
new file mode 100644
index 0000000..ef58ac4
--- /dev/null
+++ b/drivers/acpi/acpica/exfldio.c
@@ -0,0 +1,961 @@
+/******************************************************************************
+ *
+ * Module Name: exfldio - Aml Field I/O
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlcode.h"
+#include "acevents.h"
+#include "acdispat.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exfldio")
+
+/* Local prototypes */
+static acpi_status
+acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
+		       u32 field_datum_byte_offset,
+		       acpi_integer * value, u32 read_write);
+
+static u8
+acpi_ex_register_overflow(union acpi_operand_object *obj_desc,
+			  acpi_integer value);
+
+static acpi_status
+acpi_ex_setup_region(union acpi_operand_object *obj_desc,
+		     u32 field_datum_byte_offset);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_setup_region
+ *
+ * PARAMETERS:  obj_desc                - Field to be read or written
+ *              field_datum_byte_offset - Byte offset of this datum within the
+ *                                        parent field
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Common processing for acpi_ex_extract_from_field and
+ *              acpi_ex_insert_into_field. Initialize the Region if necessary and
+ *              validate the request.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_setup_region(union acpi_operand_object *obj_desc,
+		     u32 field_datum_byte_offset)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object *rgn_desc;
+
+	ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
+
+	rgn_desc = obj_desc->common_field.region_obj;
+
+	/* We must have a valid region */
+
+	if (ACPI_GET_OBJECT_TYPE(rgn_desc) != ACPI_TYPE_REGION) {
+		ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)",
+			    ACPI_GET_OBJECT_TYPE(rgn_desc),
+			    acpi_ut_get_object_type_name(rgn_desc)));
+
+		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+	}
+
+	/*
+	 * If the Region Address and Length have not been previously evaluated,
+	 * evaluate them now and save the results.
+	 */
+	if (!(rgn_desc->common.flags & AOPOBJ_DATA_VALID)) {
+		status = acpi_ds_get_region_arguments(rgn_desc);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/* Exit if Address/Length have been disallowed by the host OS */
+
+	if (rgn_desc->common.flags & AOPOBJ_INVALID) {
+		return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
+	}
+
+	/*
+	 * Exit now for SMBus address space, it has a non-linear address space
+	 * and the request cannot be directly validated
+	 */
+	if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS) {
+
+		/* SMBus has a non-linear address space */
+
+		return_ACPI_STATUS(AE_OK);
+	}
+#ifdef ACPI_UNDER_DEVELOPMENT
+	/*
+	 * If the Field access is any_acc, we can now compute the optimal
+	 * access (because we know know the length of the parent region)
+	 */
+	if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+#endif
+
+	/*
+	 * Validate the request.  The entire request from the byte offset for a
+	 * length of one field datum (access width) must fit within the region.
+	 * (Region length is specified in bytes)
+	 */
+	if (rgn_desc->region.length <
+	    (obj_desc->common_field.base_byte_offset +
+	     field_datum_byte_offset +
+	     obj_desc->common_field.access_byte_width)) {
+		if (acpi_gbl_enable_interpreter_slack) {
+			/*
+			 * Slack mode only:  We will go ahead and allow access to this
+			 * field if it is within the region length rounded up to the next
+			 * access width boundary. acpi_size cast for 64-bit compile.
+			 */
+			if (ACPI_ROUND_UP(rgn_desc->region.length,
+					  obj_desc->common_field.
+					  access_byte_width) >=
+			    ((acpi_size) obj_desc->common_field.
+			     base_byte_offset +
+			     obj_desc->common_field.access_byte_width +
+			     field_datum_byte_offset)) {
+				return_ACPI_STATUS(AE_OK);
+			}
+		}
+
+		if (rgn_desc->region.length <
+		    obj_desc->common_field.access_byte_width) {
+			/*
+			 * This is the case where the access_type (acc_word, etc.) is wider
+			 * than the region itself.  For example, a region of length one
+			 * byte, and a field with Dword access specified.
+			 */
+			ACPI_ERROR((AE_INFO,
+				    "Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)",
+				    acpi_ut_get_node_name(obj_desc->
+							  common_field.node),
+				    obj_desc->common_field.access_byte_width,
+				    acpi_ut_get_node_name(rgn_desc->region.
+							  node),
+				    rgn_desc->region.length));
+		}
+
+		/*
+		 * Offset rounded up to next multiple of field width
+		 * exceeds region length, indicate an error
+		 */
+		ACPI_ERROR((AE_INFO,
+			    "Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)",
+			    acpi_ut_get_node_name(obj_desc->common_field.node),
+			    obj_desc->common_field.base_byte_offset,
+			    field_datum_byte_offset,
+			    obj_desc->common_field.access_byte_width,
+			    acpi_ut_get_node_name(rgn_desc->region.node),
+			    rgn_desc->region.length));
+
+		return_ACPI_STATUS(AE_AML_REGION_LIMIT);
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_access_region
+ *
+ * PARAMETERS:  obj_desc                - Field to be read
+ *              field_datum_byte_offset - Byte offset of this datum within the
+ *                                        parent field
+ *              Value                   - Where to store value (must at least
+ *                                        the size of acpi_integer)
+ *              Function                - Read or Write flag plus other region-
+ *                                        dependent flags
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Read or Write a single field datum to an Operation Region.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_access_region(union acpi_operand_object *obj_desc,
+		      u32 field_datum_byte_offset,
+		      acpi_integer * value, u32 function)
+{
+	acpi_status status;
+	union acpi_operand_object *rgn_desc;
+	acpi_physical_address address;
+
+	ACPI_FUNCTION_TRACE(ex_access_region);
+
+	/*
+	 * Ensure that the region operands are fully evaluated and verify
+	 * the validity of the request
+	 */
+	status = acpi_ex_setup_region(obj_desc, field_datum_byte_offset);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * The physical address of this field datum is:
+	 *
+	 * 1) The base of the region, plus
+	 * 2) The base offset of the field, plus
+	 * 3) The current offset into the field
+	 */
+	rgn_desc = obj_desc->common_field.region_obj;
+	address = rgn_desc->region.address +
+	    obj_desc->common_field.base_byte_offset + field_datum_byte_offset;
+
+	if ((function & ACPI_IO_MASK) == ACPI_READ) {
+		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "[READ]"));
+	} else {
+		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "[WRITE]"));
+	}
+
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
+			      " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
+			      acpi_ut_get_region_name(rgn_desc->region.
+						      space_id),
+			      rgn_desc->region.space_id,
+			      obj_desc->common_field.access_byte_width,
+			      obj_desc->common_field.base_byte_offset,
+			      field_datum_byte_offset, ACPI_CAST_PTR(void,
+								     address)));
+
+	/* Invoke the appropriate address_space/op_region handler */
+
+	status = acpi_ev_address_space_dispatch(rgn_desc, function,
+						address,
+						ACPI_MUL_8(obj_desc->
+							   common_field.
+							   access_byte_width),
+						value);
+
+	if (ACPI_FAILURE(status)) {
+		if (status == AE_NOT_IMPLEMENTED) {
+			ACPI_ERROR((AE_INFO,
+				    "Region %s(%X) not implemented",
+				    acpi_ut_get_region_name(rgn_desc->region.
+							    space_id),
+				    rgn_desc->region.space_id));
+		} else if (status == AE_NOT_EXIST) {
+			ACPI_ERROR((AE_INFO,
+				    "Region %s(%X) has no handler",
+				    acpi_ut_get_region_name(rgn_desc->region.
+							    space_id),
+				    rgn_desc->region.space_id));
+		}
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_register_overflow
+ *
+ * PARAMETERS:  obj_desc                - Register(Field) to be written
+ *              Value                   - Value to be stored
+ *
+ * RETURN:      TRUE if value overflows the field, FALSE otherwise
+ *
+ * DESCRIPTION: Check if a value is out of range of the field being written.
+ *              Used to check if the values written to Index and Bank registers
+ *              are out of range.  Normally, the value is simply truncated
+ *              to fit the field, but this case is most likely a serious
+ *              coding error in the ASL.
+ *
+ ******************************************************************************/
+
+static u8
+acpi_ex_register_overflow(union acpi_operand_object *obj_desc,
+			  acpi_integer value)
+{
+
+	if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
+		/*
+		 * The field is large enough to hold the maximum integer, so we can
+		 * never overflow it.
+		 */
+		return (FALSE);
+	}
+
+	if (value >= ((acpi_integer) 1 << obj_desc->common_field.bit_length)) {
+		/*
+		 * The Value is larger than the maximum value that can fit into
+		 * the register.
+		 */
+		return (TRUE);
+	}
+
+	/* The Value will fit into the field with no truncation */
+
+	return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_field_datum_io
+ *
+ * PARAMETERS:  obj_desc                - Field to be read
+ *              field_datum_byte_offset - Byte offset of this datum within the
+ *                                        parent field
+ *              Value                   - Where to store value (must be 64 bits)
+ *              read_write              - Read or Write flag
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Read or Write a single datum of a field.  The field_type is
+ *              demultiplexed here to handle the different types of fields
+ *              (buffer_field, region_field, index_field, bank_field)
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
+		       u32 field_datum_byte_offset,
+		       acpi_integer * value, u32 read_write)
+{
+	acpi_status status;
+	acpi_integer local_value;
+
+	ACPI_FUNCTION_TRACE_U32(ex_field_datum_io, field_datum_byte_offset);
+
+	if (read_write == ACPI_READ) {
+		if (!value) {
+			local_value = 0;
+
+			/* To support reads without saving return value */
+			value = &local_value;
+		}
+
+		/* Clear the entire return buffer first, [Very Important!] */
+
+		*value = 0;
+	}
+
+	/*
+	 * The four types of fields are:
+	 *
+	 * buffer_field - Read/write from/to a Buffer
+	 * region_field - Read/write from/to a Operation Region.
+	 * bank_field  - Write to a Bank Register, then read/write from/to an
+	 *               operation_region
+	 * index_field - Write to an Index Register, then read/write from/to a
+	 *               Data Register
+	 */
+	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+	case ACPI_TYPE_BUFFER_FIELD:
+		/*
+		 * If the buffer_field arguments have not been previously evaluated,
+		 * evaluate them now and save the results.
+		 */
+		if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
+			status = acpi_ds_get_buffer_field_arguments(obj_desc);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		}
+
+		if (read_write == ACPI_READ) {
+			/*
+			 * Copy the data from the source buffer.
+			 * Length is the field width in bytes.
+			 */
+			ACPI_MEMCPY(value,
+				    (obj_desc->buffer_field.buffer_obj)->buffer.
+				    pointer +
+				    obj_desc->buffer_field.base_byte_offset +
+				    field_datum_byte_offset,
+				    obj_desc->common_field.access_byte_width);
+		} else {
+			/*
+			 * Copy the data to the target buffer.
+			 * Length is the field width in bytes.
+			 */
+			ACPI_MEMCPY((obj_desc->buffer_field.buffer_obj)->buffer.
+				    pointer +
+				    obj_desc->buffer_field.base_byte_offset +
+				    field_datum_byte_offset, value,
+				    obj_desc->common_field.access_byte_width);
+		}
+
+		status = AE_OK;
+		break;
+
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+		/*
+		 * Ensure that the bank_value is not beyond the capacity of
+		 * the register
+		 */
+		if (acpi_ex_register_overflow(obj_desc->bank_field.bank_obj,
+					      (acpi_integer) obj_desc->
+					      bank_field.value)) {
+			return_ACPI_STATUS(AE_AML_REGISTER_LIMIT);
+		}
+
+		/*
+		 * For bank_fields, we must write the bank_value to the bank_register
+		 * (itself a region_field) before we can access the data.
+		 */
+		status =
+		    acpi_ex_insert_into_field(obj_desc->bank_field.bank_obj,
+					      &obj_desc->bank_field.value,
+					      sizeof(obj_desc->bank_field.
+						     value));
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/*
+		 * Now that the Bank has been selected, fall through to the
+		 * region_field case and write the datum to the Operation Region
+		 */
+
+		/*lint -fallthrough */
+
+	case ACPI_TYPE_LOCAL_REGION_FIELD:
+		/*
+		 * For simple region_fields, we just directly access the owning
+		 * Operation Region.
+		 */
+		status =
+		    acpi_ex_access_region(obj_desc, field_datum_byte_offset,
+					  value, read_write);
+		break;
+
+	case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+		/*
+		 * Ensure that the index_value is not beyond the capacity of
+		 * the register
+		 */
+		if (acpi_ex_register_overflow(obj_desc->index_field.index_obj,
+					      (acpi_integer) obj_desc->
+					      index_field.value)) {
+			return_ACPI_STATUS(AE_AML_REGISTER_LIMIT);
+		}
+
+		/* Write the index value to the index_register (itself a region_field) */
+
+		field_datum_byte_offset += obj_desc->index_field.value;
+
+		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+				  "Write to Index Register: Value %8.8X\n",
+				  field_datum_byte_offset));
+
+		status =
+		    acpi_ex_insert_into_field(obj_desc->index_field.index_obj,
+					      &field_datum_byte_offset,
+					      sizeof(field_datum_byte_offset));
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		if (read_write == ACPI_READ) {
+
+			/* Read the datum from the data_register */
+
+			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+					  "Read from Data Register\n"));
+
+			status =
+			    acpi_ex_extract_from_field(obj_desc->index_field.
+						       data_obj, value,
+						       sizeof(acpi_integer));
+		} else {
+			/* Write the datum to the data_register */
+
+			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+					  "Write to Data Register: Value %8.8X%8.8X\n",
+					  ACPI_FORMAT_UINT64(*value)));
+
+			status =
+			    acpi_ex_insert_into_field(obj_desc->index_field.
+						      data_obj, value,
+						      sizeof(acpi_integer));
+		}
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X",
+			    ACPI_GET_OBJECT_TYPE(obj_desc)));
+		status = AE_AML_INTERNAL;
+		break;
+	}
+
+	if (ACPI_SUCCESS(status)) {
+		if (read_write == ACPI_READ) {
+			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+					  "Value Read %8.8X%8.8X, Width %d\n",
+					  ACPI_FORMAT_UINT64(*value),
+					  obj_desc->common_field.
+					  access_byte_width));
+		} else {
+			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+					  "Value Written %8.8X%8.8X, Width %d\n",
+					  ACPI_FORMAT_UINT64(*value),
+					  obj_desc->common_field.
+					  access_byte_width));
+		}
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_write_with_update_rule
+ *
+ * PARAMETERS:  obj_desc                - Field to be written
+ *              Mask                    - bitmask within field datum
+ *              field_value             - Value to write
+ *              field_datum_byte_offset - Offset of datum within field
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Apply the field update rule to a field write
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
+			       acpi_integer mask,
+			       acpi_integer field_value,
+			       u32 field_datum_byte_offset)
+{
+	acpi_status status = AE_OK;
+	acpi_integer merged_value;
+	acpi_integer current_value;
+
+	ACPI_FUNCTION_TRACE_U32(ex_write_with_update_rule, mask);
+
+	/* Start with the new bits  */
+
+	merged_value = field_value;
+
+	/* If the mask is all ones, we don't need to worry about the update rule */
+
+	if (mask != ACPI_INTEGER_MAX) {
+
+		/* Decode the update rule */
+
+		switch (obj_desc->common_field.
+			field_flags & AML_FIELD_UPDATE_RULE_MASK) {
+		case AML_FIELD_UPDATE_PRESERVE:
+			/*
+			 * Check if update rule needs to be applied (not if mask is all
+			 * ones)  The left shift drops the bits we want to ignore.
+			 */
+			if ((~mask << (ACPI_MUL_8(sizeof(mask)) -
+				       ACPI_MUL_8(obj_desc->common_field.
+						  access_byte_width))) != 0) {
+				/*
+				 * Read the current contents of the byte/word/dword containing
+				 * the field, and merge with the new field value.
+				 */
+				status =
+				    acpi_ex_field_datum_io(obj_desc,
+							   field_datum_byte_offset,
+							   &current_value,
+							   ACPI_READ);
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+
+				merged_value |= (current_value & ~mask);
+			}
+			break;
+
+		case AML_FIELD_UPDATE_WRITE_AS_ONES:
+
+			/* Set positions outside the field to all ones */
+
+			merged_value |= ~mask;
+			break;
+
+		case AML_FIELD_UPDATE_WRITE_AS_ZEROS:
+
+			/* Set positions outside the field to all zeros */
+
+			merged_value &= mask;
+			break;
+
+		default:
+
+			ACPI_ERROR((AE_INFO,
+				    "Unknown UpdateRule value: %X",
+				    (obj_desc->common_field.
+				     field_flags &
+				     AML_FIELD_UPDATE_RULE_MASK)));
+			return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+		}
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+			  "Mask %8.8X%8.8X, DatumOffset %X, Width %X, Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n",
+			  ACPI_FORMAT_UINT64(mask),
+			  field_datum_byte_offset,
+			  obj_desc->common_field.access_byte_width,
+			  ACPI_FORMAT_UINT64(field_value),
+			  ACPI_FORMAT_UINT64(merged_value)));
+
+	/* Write the merged value */
+
+	status = acpi_ex_field_datum_io(obj_desc, field_datum_byte_offset,
+					&merged_value, ACPI_WRITE);
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_extract_from_field
+ *
+ * PARAMETERS:  obj_desc            - Field to be read
+ *              Buffer              - Where to store the field data
+ *              buffer_length       - Length of Buffer
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Retrieve the current value of the given field
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
+			   void *buffer, u32 buffer_length)
+{
+	acpi_status status;
+	acpi_integer raw_datum;
+	acpi_integer merged_datum;
+	u32 field_offset = 0;
+	u32 buffer_offset = 0;
+	u32 buffer_tail_bits;
+	u32 datum_count;
+	u32 field_datum_count;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ex_extract_from_field);
+
+	/* Validate target buffer and clear it */
+
+	if (buffer_length <
+	    ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
+		ACPI_ERROR((AE_INFO,
+			    "Field size %X (bits) is too large for buffer (%X)",
+			    obj_desc->common_field.bit_length, buffer_length));
+
+		return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
+	}
+	ACPI_MEMSET(buffer, 0, buffer_length);
+
+	/* Compute the number of datums (access width data items) */
+
+	datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
+				       obj_desc->common_field.access_bit_width);
+	field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
+					     obj_desc->common_field.
+					     start_field_bit_offset,
+					     obj_desc->common_field.
+					     access_bit_width);
+
+	/* Priming read from the field */
+
+	status =
+	    acpi_ex_field_datum_io(obj_desc, field_offset, &raw_datum,
+				   ACPI_READ);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+	merged_datum =
+	    raw_datum >> obj_desc->common_field.start_field_bit_offset;
+
+	/* Read the rest of the field */
+
+	for (i = 1; i < field_datum_count; i++) {
+
+		/* Get next input datum from the field */
+
+		field_offset += obj_desc->common_field.access_byte_width;
+		status = acpi_ex_field_datum_io(obj_desc, field_offset,
+						&raw_datum, ACPI_READ);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/*
+		 * Merge with previous datum if necessary.
+		 *
+		 * Note: Before the shift, check if the shift value will be larger than
+		 * the integer size. If so, there is no need to perform the operation.
+		 * This avoids the differences in behavior between different compilers
+		 * concerning shift values larger than the target data width.
+		 */
+		if ((obj_desc->common_field.access_bit_width -
+		     obj_desc->common_field.start_field_bit_offset) <
+		    ACPI_INTEGER_BIT_SIZE) {
+			merged_datum |=
+			    raw_datum << (obj_desc->common_field.
+					  access_bit_width -
+					  obj_desc->common_field.
+					  start_field_bit_offset);
+		}
+
+		if (i == datum_count) {
+			break;
+		}
+
+		/* Write merged datum to target buffer */
+
+		ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum,
+			    ACPI_MIN(obj_desc->common_field.access_byte_width,
+				     buffer_length - buffer_offset));
+
+		buffer_offset += obj_desc->common_field.access_byte_width;
+		merged_datum =
+		    raw_datum >> obj_desc->common_field.start_field_bit_offset;
+	}
+
+	/* Mask off any extra bits in the last datum */
+
+	buffer_tail_bits = obj_desc->common_field.bit_length %
+	    obj_desc->common_field.access_bit_width;
+	if (buffer_tail_bits) {
+		merged_datum &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
+	}
+
+	/* Write the last datum to the buffer */
+
+	ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum,
+		    ACPI_MIN(obj_desc->common_field.access_byte_width,
+			     buffer_length - buffer_offset));
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_insert_into_field
+ *
+ * PARAMETERS:  obj_desc            - Field to be written
+ *              Buffer              - Data to be written
+ *              buffer_length       - Length of Buffer
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Store the Buffer contents into the given field
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
+			  void *buffer, u32 buffer_length)
+{
+	acpi_status status;
+	acpi_integer mask;
+	acpi_integer width_mask;
+	acpi_integer merged_datum;
+	acpi_integer raw_datum = 0;
+	u32 field_offset = 0;
+	u32 buffer_offset = 0;
+	u32 buffer_tail_bits;
+	u32 datum_count;
+	u32 field_datum_count;
+	u32 i;
+	u32 required_length;
+	void *new_buffer;
+
+	ACPI_FUNCTION_TRACE(ex_insert_into_field);
+
+	/* Validate input buffer */
+
+	new_buffer = NULL;
+	required_length =
+	    ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
+	/*
+	 * We must have a buffer that is at least as long as the field
+	 * we are writing to.  This is because individual fields are
+	 * indivisible and partial writes are not supported -- as per
+	 * the ACPI specification.
+	 */
+	if (buffer_length < required_length) {
+
+		/* We need to create a new buffer */
+
+		new_buffer = ACPI_ALLOCATE_ZEROED(required_length);
+		if (!new_buffer) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		/*
+		 * Copy the original data to the new buffer, starting
+		 * at Byte zero.  All unused (upper) bytes of the
+		 * buffer will be 0.
+		 */
+		ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length);
+		buffer = new_buffer;
+		buffer_length = required_length;
+	}
+
+	/*
+	 * Create the bitmasks used for bit insertion.
+	 * Note: This if/else is used to bypass compiler differences with the
+	 * shift operator
+	 */
+	if (obj_desc->common_field.access_bit_width == ACPI_INTEGER_BIT_SIZE) {
+		width_mask = ACPI_INTEGER_MAX;
+	} else {
+		width_mask =
+		    ACPI_MASK_BITS_ABOVE(obj_desc->common_field.
+					 access_bit_width);
+	}
+
+	mask = width_mask &
+	    ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset);
+
+	/* Compute the number of datums (access width data items) */
+
+	datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
+				       obj_desc->common_field.access_bit_width);
+
+	field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
+					     obj_desc->common_field.
+					     start_field_bit_offset,
+					     obj_desc->common_field.
+					     access_bit_width);
+
+	/* Get initial Datum from the input buffer */
+
+	ACPI_MEMCPY(&raw_datum, buffer,
+		    ACPI_MIN(obj_desc->common_field.access_byte_width,
+			     buffer_length - buffer_offset));
+
+	merged_datum =
+	    raw_datum << obj_desc->common_field.start_field_bit_offset;
+
+	/* Write the entire field */
+
+	for (i = 1; i < field_datum_count; i++) {
+
+		/* Write merged datum to the target field */
+
+		merged_datum &= mask;
+		status = acpi_ex_write_with_update_rule(obj_desc, mask,
+							merged_datum,
+							field_offset);
+		if (ACPI_FAILURE(status)) {
+			goto exit;
+		}
+
+		field_offset += obj_desc->common_field.access_byte_width;
+
+		/*
+		 * Start new output datum by merging with previous input datum
+		 * if necessary.
+		 *
+		 * Note: Before the shift, check if the shift value will be larger than
+		 * the integer size. If so, there is no need to perform the operation.
+		 * This avoids the differences in behavior between different compilers
+		 * concerning shift values larger than the target data width.
+		 */
+		if ((obj_desc->common_field.access_bit_width -
+		     obj_desc->common_field.start_field_bit_offset) <
+		    ACPI_INTEGER_BIT_SIZE) {
+			merged_datum =
+			    raw_datum >> (obj_desc->common_field.
+					  access_bit_width -
+					  obj_desc->common_field.
+					  start_field_bit_offset);
+		} else {
+			merged_datum = 0;
+		}
+
+		mask = width_mask;
+
+		if (i == datum_count) {
+			break;
+		}
+
+		/* Get the next input datum from the buffer */
+
+		buffer_offset += obj_desc->common_field.access_byte_width;
+		ACPI_MEMCPY(&raw_datum, ((char *)buffer) + buffer_offset,
+			    ACPI_MIN(obj_desc->common_field.access_byte_width,
+				     buffer_length - buffer_offset));
+		merged_datum |=
+		    raw_datum << obj_desc->common_field.start_field_bit_offset;
+	}
+
+	/* Mask off any extra bits in the last datum */
+
+	buffer_tail_bits = (obj_desc->common_field.bit_length +
+			    obj_desc->common_field.start_field_bit_offset) %
+	    obj_desc->common_field.access_bit_width;
+	if (buffer_tail_bits) {
+		mask &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
+	}
+
+	/* Write the last datum to the field */
+
+	merged_datum &= mask;
+	status = acpi_ex_write_with_update_rule(obj_desc,
+						mask, merged_datum,
+						field_offset);
+
+      exit:
+	/* Free temporary buffer if we used one */
+
+	if (new_buffer) {
+		ACPI_FREE(new_buffer);
+	}
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
new file mode 100644
index 0000000..6b0747ac
--- /dev/null
+++ b/drivers/acpi/acpica/exmisc.c
@@ -0,0 +1,726 @@
+
+/******************************************************************************
+ *
+ * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlcode.h"
+#include "amlresrc.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exmisc")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_get_object_reference
+ *
+ * PARAMETERS:  obj_desc            - Create a reference to this object
+ *              return_desc         - Where to store the reference
+ *              walk_state          - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Obtain and return a "reference" to the target object
+ *              Common code for the ref_of_op and the cond_ref_of_op.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
+			     union acpi_operand_object **return_desc,
+			     struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object *reference_obj;
+	union acpi_operand_object *referenced_obj;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc);
+
+	*return_desc = NULL;
+
+	switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
+	case ACPI_DESC_TYPE_OPERAND:
+
+		if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_LOCAL_REFERENCE) {
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+
+		/*
+		 * Must be a reference to a Local or Arg
+		 */
+		switch (obj_desc->reference.class) {
+		case ACPI_REFCLASS_LOCAL:
+		case ACPI_REFCLASS_ARG:
+		case ACPI_REFCLASS_DEBUG:
+
+			/* The referenced object is the pseudo-node for the local/arg */
+
+			referenced_obj = obj_desc->reference.object;
+			break;
+
+		default:
+
+			ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+				    obj_desc->reference.class));
+			return_ACPI_STATUS(AE_AML_INTERNAL);
+		}
+		break;
+
+	case ACPI_DESC_TYPE_NAMED:
+
+		/*
+		 * A named reference that has already been resolved to a Node
+		 */
+		referenced_obj = obj_desc;
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Invalid descriptor type %X",
+			    ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
+		return_ACPI_STATUS(AE_TYPE);
+	}
+
+	/* Create a new reference object */
+
+	reference_obj =
+	    acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
+	if (!reference_obj) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	reference_obj->reference.class = ACPI_REFCLASS_REFOF;
+	reference_obj->reference.object = referenced_obj;
+	*return_desc = reference_obj;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "Object %p Type [%s], returning Reference %p\n",
+			  obj_desc, acpi_ut_get_object_type_name(obj_desc),
+			  *return_desc));
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_concat_template
+ *
+ * PARAMETERS:  Operand0            - First source object
+ *              Operand1            - Second source object
+ *              actual_return_desc  - Where to place the return object
+ *              walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Concatenate two resource templates
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_concat_template(union acpi_operand_object *operand0,
+			union acpi_operand_object *operand1,
+			union acpi_operand_object **actual_return_desc,
+			struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	union acpi_operand_object *return_desc;
+	u8 *new_buf;
+	u8 *end_tag;
+	acpi_size length0;
+	acpi_size length1;
+	acpi_size new_length;
+
+	ACPI_FUNCTION_TRACE(ex_concat_template);
+
+	/*
+	 * Find the end_tag descriptor in each resource template.
+	 * Note1: returned pointers point TO the end_tag, not past it.
+	 * Note2: zero-length buffers are allowed; treated like one end_tag
+	 */
+
+	/* Get the length of the first resource template */
+
+	status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
+
+	/* Get the length of the second resource template */
+
+	status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
+
+	/* Combine both lengths, minimum size will be 2 for end_tag */
+
+	new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
+
+	/* Create a new buffer object for the result (with one end_tag) */
+
+	return_desc = acpi_ut_create_buffer_object(new_length);
+	if (!return_desc) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/*
+	 * Copy the templates to the new buffer, 0 first, then 1 follows. One
+	 * end_tag descriptor is copied from Operand1.
+	 */
+	new_buf = return_desc->buffer.pointer;
+	ACPI_MEMCPY(new_buf, operand0->buffer.pointer, length0);
+	ACPI_MEMCPY(new_buf + length0, operand1->buffer.pointer, length1);
+
+	/* Insert end_tag and set the checksum to zero, means "ignore checksum" */
+
+	new_buf[new_length - 1] = 0;
+	new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
+
+	/* Return the completed resource template */
+
+	*actual_return_desc = return_desc;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_do_concatenate
+ *
+ * PARAMETERS:  Operand0            - First source object
+ *              Operand1            - Second source object
+ *              actual_return_desc  - Where to place the return object
+ *              walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Concatenate two objects OF THE SAME TYPE.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_do_concatenate(union acpi_operand_object *operand0,
+		       union acpi_operand_object *operand1,
+		       union acpi_operand_object **actual_return_desc,
+		       struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object *local_operand1 = operand1;
+	union acpi_operand_object *return_desc;
+	char *new_buf;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ex_do_concatenate);
+
+	/*
+	 * Convert the second operand if necessary.  The first operand
+	 * determines the type of the second operand, (See the Data Types
+	 * section of the ACPI specification.)  Both object types are
+	 * guaranteed to be either Integer/String/Buffer by the operand
+	 * resolution mechanism.
+	 */
+	switch (ACPI_GET_OBJECT_TYPE(operand0)) {
+	case ACPI_TYPE_INTEGER:
+		status =
+		    acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
+		break;
+
+	case ACPI_TYPE_STRING:
+		status = acpi_ex_convert_to_string(operand1, &local_operand1,
+						   ACPI_IMPLICIT_CONVERT_HEX);
+		break;
+
+	case ACPI_TYPE_BUFFER:
+		status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
+		break;
+
+	default:
+		ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+			    ACPI_GET_OBJECT_TYPE(operand0)));
+		status = AE_AML_INTERNAL;
+	}
+
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/*
+	 * Both operands are now known to be the same object type
+	 * (Both are Integer, String, or Buffer), and we can now perform the
+	 * concatenation.
+	 */
+
+	/*
+	 * There are three cases to handle:
+	 *
+	 * 1) Two Integers concatenated to produce a new Buffer
+	 * 2) Two Strings concatenated to produce a new String
+	 * 3) Two Buffers concatenated to produce a new Buffer
+	 */
+	switch (ACPI_GET_OBJECT_TYPE(operand0)) {
+	case ACPI_TYPE_INTEGER:
+
+		/* Result of two Integers is a Buffer */
+		/* Need enough buffer space for two integers */
+
+		return_desc = acpi_ut_create_buffer_object((acpi_size)
+							   ACPI_MUL_2
+							   (acpi_gbl_integer_byte_width));
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		new_buf = (char *)return_desc->buffer.pointer;
+
+		/* Copy the first integer, LSB first */
+
+		ACPI_MEMCPY(new_buf, &operand0->integer.value,
+			    acpi_gbl_integer_byte_width);
+
+		/* Copy the second integer (LSB first) after the first */
+
+		ACPI_MEMCPY(new_buf + acpi_gbl_integer_byte_width,
+			    &local_operand1->integer.value,
+			    acpi_gbl_integer_byte_width);
+		break;
+
+	case ACPI_TYPE_STRING:
+
+		/* Result of two Strings is a String */
+
+		return_desc = acpi_ut_create_string_object(((acpi_size)
+							    operand0->string.
+							    length +
+							    local_operand1->
+							    string.length));
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		new_buf = return_desc->string.pointer;
+
+		/* Concatenate the strings */
+
+		ACPI_STRCPY(new_buf, operand0->string.pointer);
+		ACPI_STRCPY(new_buf + operand0->string.length,
+			    local_operand1->string.pointer);
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		/* Result of two Buffers is a Buffer */
+
+		return_desc = acpi_ut_create_buffer_object(((acpi_size)
+							    operand0->buffer.
+							    length +
+							    local_operand1->
+							    buffer.length));
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		new_buf = (char *)return_desc->buffer.pointer;
+
+		/* Concatenate the buffers */
+
+		ACPI_MEMCPY(new_buf, operand0->buffer.pointer,
+			    operand0->buffer.length);
+		ACPI_MEMCPY(new_buf + operand0->buffer.length,
+			    local_operand1->buffer.pointer,
+			    local_operand1->buffer.length);
+		break;
+
+	default:
+
+		/* Invalid object type, should not happen here */
+
+		ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+			    ACPI_GET_OBJECT_TYPE(operand0)));
+		status = AE_AML_INTERNAL;
+		goto cleanup;
+	}
+
+	*actual_return_desc = return_desc;
+
+      cleanup:
+	if (local_operand1 != operand1) {
+		acpi_ut_remove_reference(local_operand1);
+	}
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_do_math_op
+ *
+ * PARAMETERS:  Opcode              - AML opcode
+ *              Integer0            - Integer operand #0
+ *              Integer1            - Integer operand #1
+ *
+ * RETURN:      Integer result of the operation
+ *
+ * DESCRIPTION: Execute a math AML opcode. The purpose of having all of the
+ *              math functions here is to prevent a lot of pointer dereferencing
+ *              to obtain the operands.
+ *
+ ******************************************************************************/
+
+acpi_integer
+acpi_ex_do_math_op(u16 opcode, acpi_integer integer0, acpi_integer integer1)
+{
+
+	ACPI_FUNCTION_ENTRY();
+
+	switch (opcode) {
+	case AML_ADD_OP:	/* Add (Integer0, Integer1, Result) */
+
+		return (integer0 + integer1);
+
+	case AML_BIT_AND_OP:	/* And (Integer0, Integer1, Result) */
+
+		return (integer0 & integer1);
+
+	case AML_BIT_NAND_OP:	/* NAnd (Integer0, Integer1, Result) */
+
+		return (~(integer0 & integer1));
+
+	case AML_BIT_OR_OP:	/* Or (Integer0, Integer1, Result) */
+
+		return (integer0 | integer1);
+
+	case AML_BIT_NOR_OP:	/* NOr (Integer0, Integer1, Result) */
+
+		return (~(integer0 | integer1));
+
+	case AML_BIT_XOR_OP:	/* XOr (Integer0, Integer1, Result) */
+
+		return (integer0 ^ integer1);
+
+	case AML_MULTIPLY_OP:	/* Multiply (Integer0, Integer1, Result) */
+
+		return (integer0 * integer1);
+
+	case AML_SHIFT_LEFT_OP:	/* shift_left (Operand, shift_count, Result) */
+
+		/*
+		 * We need to check if the shiftcount is larger than the integer bit
+		 * width since the behavior of this is not well-defined in the C language.
+		 */
+		if (integer1 >= acpi_gbl_integer_bit_width) {
+			return (0);
+		}
+		return (integer0 << integer1);
+
+	case AML_SHIFT_RIGHT_OP:	/* shift_right (Operand, shift_count, Result) */
+
+		/*
+		 * We need to check if the shiftcount is larger than the integer bit
+		 * width since the behavior of this is not well-defined in the C language.
+		 */
+		if (integer1 >= acpi_gbl_integer_bit_width) {
+			return (0);
+		}
+		return (integer0 >> integer1);
+
+	case AML_SUBTRACT_OP:	/* Subtract (Integer0, Integer1, Result) */
+
+		return (integer0 - integer1);
+
+	default:
+
+		return (0);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_do_logical_numeric_op
+ *
+ * PARAMETERS:  Opcode              - AML opcode
+ *              Integer0            - Integer operand #0
+ *              Integer1            - Integer operand #1
+ *              logical_result      - TRUE/FALSE result of the operation
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute a logical "Numeric" AML opcode. For these Numeric
+ *              operators (LAnd and LOr), both operands must be integers.
+ *
+ *              Note: cleanest machine code seems to be produced by the code
+ *              below, rather than using statements of the form:
+ *                  Result = (Integer0 && Integer1);
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_do_logical_numeric_op(u16 opcode,
+			      acpi_integer integer0,
+			      acpi_integer integer1, u8 * logical_result)
+{
+	acpi_status status = AE_OK;
+	u8 local_result = FALSE;
+
+	ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op);
+
+	switch (opcode) {
+	case AML_LAND_OP:	/* LAnd (Integer0, Integer1) */
+
+		if (integer0 && integer1) {
+			local_result = TRUE;
+		}
+		break;
+
+	case AML_LOR_OP:	/* LOr (Integer0, Integer1) */
+
+		if (integer0 || integer1) {
+			local_result = TRUE;
+		}
+		break;
+
+	default:
+		status = AE_AML_INTERNAL;
+		break;
+	}
+
+	/* Return the logical result and status */
+
+	*logical_result = local_result;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_do_logical_op
+ *
+ * PARAMETERS:  Opcode              - AML opcode
+ *              Operand0            - operand #0
+ *              Operand1            - operand #1
+ *              logical_result      - TRUE/FALSE result of the operation
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute a logical AML opcode. The purpose of having all of the
+ *              functions here is to prevent a lot of pointer dereferencing
+ *              to obtain the operands and to simplify the generation of the
+ *              logical value. For the Numeric operators (LAnd and LOr), both
+ *              operands must be integers. For the other logical operators,
+ *              operands can be any combination of Integer/String/Buffer. The
+ *              first operand determines the type to which the second operand
+ *              will be converted.
+ *
+ *              Note: cleanest machine code seems to be produced by the code
+ *              below, rather than using statements of the form:
+ *                  Result = (Operand0 == Operand1);
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_do_logical_op(u16 opcode,
+		      union acpi_operand_object *operand0,
+		      union acpi_operand_object *operand1, u8 * logical_result)
+{
+	union acpi_operand_object *local_operand1 = operand1;
+	acpi_integer integer0;
+	acpi_integer integer1;
+	u32 length0;
+	u32 length1;
+	acpi_status status = AE_OK;
+	u8 local_result = FALSE;
+	int compare;
+
+	ACPI_FUNCTION_TRACE(ex_do_logical_op);
+
+	/*
+	 * Convert the second operand if necessary.  The first operand
+	 * determines the type of the second operand, (See the Data Types
+	 * section of the ACPI 3.0+ specification.)  Both object types are
+	 * guaranteed to be either Integer/String/Buffer by the operand
+	 * resolution mechanism.
+	 */
+	switch (ACPI_GET_OBJECT_TYPE(operand0)) {
+	case ACPI_TYPE_INTEGER:
+		status =
+		    acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
+		break;
+
+	case ACPI_TYPE_STRING:
+		status = acpi_ex_convert_to_string(operand1, &local_operand1,
+						   ACPI_IMPLICIT_CONVERT_HEX);
+		break;
+
+	case ACPI_TYPE_BUFFER:
+		status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
+		break;
+
+	default:
+		status = AE_AML_INTERNAL;
+		break;
+	}
+
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/*
+	 * Two cases: 1) Both Integers, 2) Both Strings or Buffers
+	 */
+	if (ACPI_GET_OBJECT_TYPE(operand0) == ACPI_TYPE_INTEGER) {
+		/*
+		 * 1) Both operands are of type integer
+		 *    Note: local_operand1 may have changed above
+		 */
+		integer0 = operand0->integer.value;
+		integer1 = local_operand1->integer.value;
+
+		switch (opcode) {
+		case AML_LEQUAL_OP:	/* LEqual (Operand0, Operand1) */
+
+			if (integer0 == integer1) {
+				local_result = TRUE;
+			}
+			break;
+
+		case AML_LGREATER_OP:	/* LGreater (Operand0, Operand1) */
+
+			if (integer0 > integer1) {
+				local_result = TRUE;
+			}
+			break;
+
+		case AML_LLESS_OP:	/* LLess (Operand0, Operand1) */
+
+			if (integer0 < integer1) {
+				local_result = TRUE;
+			}
+			break;
+
+		default:
+			status = AE_AML_INTERNAL;
+			break;
+		}
+	} else {
+		/*
+		 * 2) Both operands are Strings or both are Buffers
+		 *    Note: Code below takes advantage of common Buffer/String
+		 *          object fields. local_operand1 may have changed above. Use
+		 *          memcmp to handle nulls in buffers.
+		 */
+		length0 = operand0->buffer.length;
+		length1 = local_operand1->buffer.length;
+
+		/* Lexicographic compare: compare the data bytes */
+
+		compare = ACPI_MEMCMP(operand0->buffer.pointer,
+				      local_operand1->buffer.pointer,
+				      (length0 > length1) ? length1 : length0);
+
+		switch (opcode) {
+		case AML_LEQUAL_OP:	/* LEqual (Operand0, Operand1) */
+
+			/* Length and all bytes must be equal */
+
+			if ((length0 == length1) && (compare == 0)) {
+
+				/* Length and all bytes match ==> TRUE */
+
+				local_result = TRUE;
+			}
+			break;
+
+		case AML_LGREATER_OP:	/* LGreater (Operand0, Operand1) */
+
+			if (compare > 0) {
+				local_result = TRUE;
+				goto cleanup;	/* TRUE */
+			}
+			if (compare < 0) {
+				goto cleanup;	/* FALSE */
+			}
+
+			/* Bytes match (to shortest length), compare lengths */
+
+			if (length0 > length1) {
+				local_result = TRUE;
+			}
+			break;
+
+		case AML_LLESS_OP:	/* LLess (Operand0, Operand1) */
+
+			if (compare > 0) {
+				goto cleanup;	/* FALSE */
+			}
+			if (compare < 0) {
+				local_result = TRUE;
+				goto cleanup;	/* TRUE */
+			}
+
+			/* Bytes match (to shortest length), compare lengths */
+
+			if (length0 < length1) {
+				local_result = TRUE;
+			}
+			break;
+
+		default:
+			status = AE_AML_INTERNAL;
+			break;
+		}
+	}
+
+      cleanup:
+
+	/* New object was created if implicit conversion performed - delete */
+
+	if (local_operand1 != operand1) {
+		acpi_ut_remove_reference(local_operand1);
+	}
+
+	/* Return the logical result and status */
+
+	*logical_result = local_result;
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
new file mode 100644
index 0000000..d301c1f
--- /dev/null
+++ b/drivers/acpi/acpica/exmutex.c
@@ -0,0 +1,474 @@
+
+/******************************************************************************
+ *
+ * Module Name: exmutex - ASL Mutex Acquire/Release functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "acevents.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exmutex")
+
+/* Local prototypes */
+static void
+acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
+		   struct acpi_thread_state *thread);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_unlink_mutex
+ *
+ * PARAMETERS:  obj_desc            - The mutex to be unlinked
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Remove a mutex from the "AcquiredMutex" list
+ *
+ ******************************************************************************/
+
+void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
+{
+	struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;
+
+	if (!thread) {
+		return;
+	}
+
+	/* Doubly linked list */
+
+	if (obj_desc->mutex.next) {
+		(obj_desc->mutex.next)->mutex.prev = obj_desc->mutex.prev;
+	}
+
+	if (obj_desc->mutex.prev) {
+		(obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
+	} else {
+		thread->acquired_mutex_list = obj_desc->mutex.next;
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_link_mutex
+ *
+ * PARAMETERS:  obj_desc        - The mutex to be linked
+ *              Thread          - Current executing thread object
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Add a mutex to the "AcquiredMutex" list for this walk
+ *
+ ******************************************************************************/
+
+static void
+acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
+		   struct acpi_thread_state *thread)
+{
+	union acpi_operand_object *list_head;
+
+	list_head = thread->acquired_mutex_list;
+
+	/* This object will be the first object in the list */
+
+	obj_desc->mutex.prev = NULL;
+	obj_desc->mutex.next = list_head;
+
+	/* Update old first object to point back to this object */
+
+	if (list_head) {
+		list_head->mutex.prev = obj_desc;
+	}
+
+	/* Update list head */
+
+	thread->acquired_mutex_list = obj_desc;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_acquire_mutex_object
+ *
+ * PARAMETERS:  time_desc           - Timeout in milliseconds
+ *              obj_desc            - Mutex object
+ *              Thread              - Current thread state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Acquire an AML mutex, low-level interface. Provides a common
+ *              path that supports multiple acquires by the same thread.
+ *
+ * MUTEX:       Interpreter must be locked
+ *
+ * NOTE: This interface is called from three places:
+ * 1) From acpi_ex_acquire_mutex, via an AML Acquire() operator
+ * 2) From acpi_ex_acquire_global_lock when an AML Field access requires the
+ *    global lock
+ * 3) From the external interface, acpi_acquire_global_lock
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_acquire_mutex_object(u16 timeout,
+			     union acpi_operand_object *obj_desc,
+			     acpi_thread_id thread_id)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex_object, obj_desc);
+
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Support for multiple acquires by the owning thread */
+
+	if (obj_desc->mutex.thread_id == thread_id) {
+		/*
+		 * The mutex is already owned by this thread, just increment the
+		 * acquisition depth
+		 */
+		obj_desc->mutex.acquisition_depth++;
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Acquire the mutex, wait if necessary. Special case for Global Lock */
+
+	if (obj_desc == acpi_gbl_global_lock_mutex) {
+		status = acpi_ev_acquire_global_lock(timeout);
+	} else {
+		status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
+						   timeout);
+	}
+
+	if (ACPI_FAILURE(status)) {
+
+		/* Includes failure from a timeout on time_desc */
+
+		return_ACPI_STATUS(status);
+	}
+
+	/* Acquired the mutex: update mutex object */
+
+	obj_desc->mutex.thread_id = thread_id;
+	obj_desc->mutex.acquisition_depth = 1;
+	obj_desc->mutex.original_sync_level = 0;
+	obj_desc->mutex.owner_thread = NULL;	/* Used only for AML Acquire() */
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_acquire_mutex
+ *
+ * PARAMETERS:  time_desc           - Timeout integer
+ *              obj_desc            - Mutex object
+ *              walk_state          - Current method execution state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Acquire an AML mutex
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
+		      union acpi_operand_object *obj_desc,
+		      struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex, obj_desc);
+
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Must have a valid thread ID */
+
+	if (!walk_state->thread) {
+		ACPI_ERROR((AE_INFO,
+			    "Cannot acquire Mutex [%4.4s], null thread info",
+			    acpi_ut_get_node_name(obj_desc->mutex.node)));
+		return_ACPI_STATUS(AE_AML_INTERNAL);
+	}
+
+	/*
+	 * Current sync level must be less than or equal to the sync level of the
+	 * mutex. This mechanism provides some deadlock prevention
+	 */
+	if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
+		ACPI_ERROR((AE_INFO,
+			    "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)",
+			    acpi_ut_get_node_name(obj_desc->mutex.node),
+			    walk_state->thread->current_sync_level));
+		return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
+	}
+
+	status = acpi_ex_acquire_mutex_object((u16) time_desc->integer.value,
+					      obj_desc,
+					      walk_state->thread->thread_id);
+	if (ACPI_SUCCESS(status) && obj_desc->mutex.acquisition_depth == 1) {
+
+		/* Save Thread object, original/current sync levels */
+
+		obj_desc->mutex.owner_thread = walk_state->thread;
+		obj_desc->mutex.original_sync_level =
+		    walk_state->thread->current_sync_level;
+		walk_state->thread->current_sync_level =
+		    obj_desc->mutex.sync_level;
+
+		/* Link the mutex to the current thread for force-unlock at method exit */
+
+		acpi_ex_link_mutex(obj_desc, walk_state->thread);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_release_mutex_object
+ *
+ * PARAMETERS:  obj_desc            - The object descriptor for this op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Release a previously acquired Mutex, low level interface.
+ *              Provides a common path that supports multiple releases (after
+ *              previous multiple acquires) by the same thread.
+ *
+ * MUTEX:       Interpreter must be locked
+ *
+ * NOTE: This interface is called from three places:
+ * 1) From acpi_ex_release_mutex, via an AML Acquire() operator
+ * 2) From acpi_ex_release_global_lock when an AML Field access requires the
+ *    global lock
+ * 3) From the external interface, acpi_release_global_lock
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ex_release_mutex_object);
+
+	if (obj_desc->mutex.acquisition_depth == 0) {
+		return (AE_NOT_ACQUIRED);
+	}
+
+	/* Match multiple Acquires with multiple Releases */
+
+	obj_desc->mutex.acquisition_depth--;
+	if (obj_desc->mutex.acquisition_depth != 0) {
+
+		/* Just decrement the depth and return */
+
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	if (obj_desc->mutex.owner_thread) {
+
+		/* Unlink the mutex from the owner's list */
+
+		acpi_ex_unlink_mutex(obj_desc);
+		obj_desc->mutex.owner_thread = NULL;
+	}
+
+	/* Release the mutex, special case for Global Lock */
+
+	if (obj_desc == acpi_gbl_global_lock_mutex) {
+		status = acpi_ev_release_global_lock();
+	} else {
+		acpi_os_release_mutex(obj_desc->mutex.os_mutex);
+	}
+
+	/* Clear mutex info */
+
+	obj_desc->mutex.thread_id = NULL;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_release_mutex
+ *
+ * PARAMETERS:  obj_desc            - The object descriptor for this op
+ *              walk_state          - Current method execution state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Release a previously acquired Mutex.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
+		      struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ex_release_mutex);
+
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* The mutex must have been previously acquired in order to release it */
+
+	if (!obj_desc->mutex.owner_thread) {
+		ACPI_ERROR((AE_INFO,
+			    "Cannot release Mutex [%4.4s], not acquired",
+			    acpi_ut_get_node_name(obj_desc->mutex.node)));
+		return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
+	}
+
+	/*
+	 * The Mutex is owned, but this thread must be the owner.
+	 * Special case for Global Lock, any thread can release
+	 */
+	if ((obj_desc->mutex.owner_thread->thread_id !=
+	     walk_state->thread->thread_id)
+	    && (obj_desc != acpi_gbl_global_lock_mutex)) {
+		ACPI_ERROR((AE_INFO,
+			    "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
+			    (unsigned long)walk_state->thread->thread_id,
+			    acpi_ut_get_node_name(obj_desc->mutex.node),
+			    (unsigned long)obj_desc->mutex.owner_thread->
+			    thread_id));
+		return_ACPI_STATUS(AE_AML_NOT_OWNER);
+	}
+
+	/* Must have a valid thread ID */
+
+	if (!walk_state->thread) {
+		ACPI_ERROR((AE_INFO,
+			    "Cannot release Mutex [%4.4s], null thread info",
+			    acpi_ut_get_node_name(obj_desc->mutex.node)));
+		return_ACPI_STATUS(AE_AML_INTERNAL);
+	}
+
+	/*
+	 * The sync level of the mutex must be less than or equal to the current
+	 * sync level
+	 */
+	if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
+		ACPI_ERROR((AE_INFO,
+			    "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
+			    acpi_ut_get_node_name(obj_desc->mutex.node),
+			    obj_desc->mutex.sync_level,
+			    walk_state->thread->current_sync_level));
+		return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
+	}
+
+	status = acpi_ex_release_mutex_object(obj_desc);
+
+	if (obj_desc->mutex.acquisition_depth == 0) {
+
+		/* Restore the original sync_level */
+
+		walk_state->thread->current_sync_level =
+		    obj_desc->mutex.original_sync_level;
+	}
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_release_all_mutexes
+ *
+ * PARAMETERS:  Thread          - Current executing thread object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Release all mutexes held by this thread
+ *
+ * NOTE: This function is called as the thread is exiting the interpreter.
+ * Mutexes are not released when an individual control method is exited, but
+ * only when the parent thread actually exits the interpreter. This allows one
+ * method to acquire a mutex, and a different method to release it, as long as
+ * this is performed underneath a single parent control method.
+ *
+ ******************************************************************************/
+
+void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
+{
+	union acpi_operand_object *next = thread->acquired_mutex_list;
+	union acpi_operand_object *obj_desc;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Traverse the list of owned mutexes, releasing each one */
+
+	while (next) {
+		obj_desc = next;
+		next = obj_desc->mutex.next;
+
+		obj_desc->mutex.prev = NULL;
+		obj_desc->mutex.next = NULL;
+		obj_desc->mutex.acquisition_depth = 0;
+
+		/* Release the mutex, special case for Global Lock */
+
+		if (obj_desc == acpi_gbl_global_lock_mutex) {
+
+			/* Ignore errors */
+
+			(void)acpi_ev_release_global_lock();
+		} else {
+			acpi_os_release_mutex(obj_desc->mutex.os_mutex);
+		}
+
+		/* Mark mutex unowned */
+
+		obj_desc->mutex.owner_thread = NULL;
+		obj_desc->mutex.thread_id = NULL;
+
+		/* Update Thread sync_level (Last mutex is the important one) */
+
+		thread->current_sync_level =
+		    obj_desc->mutex.original_sync_level;
+	}
+}
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
new file mode 100644
index 0000000..ffdae12
--- /dev/null
+++ b/drivers/acpi/acpica/exnames.c
@@ -0,0 +1,436 @@
+
+/******************************************************************************
+ *
+ * Module Name: exnames - interpreter/scanner name load/execute
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exnames")
+
+/* Local prototypes */
+static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs);
+
+static acpi_status
+acpi_ex_name_segment(u8 ** in_aml_address, char *name_string);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_allocate_name_string
+ *
+ * PARAMETERS:  prefix_count        - Count of parent levels. Special cases:
+ *                                    (-1)==root,  0==none
+ *              num_name_segs       - count of 4-character name segments
+ *
+ * RETURN:      A pointer to the allocated string segment.  This segment must
+ *              be deleted by the caller.
+ *
+ * DESCRIPTION: Allocate a buffer for a name string. Ensure allocated name
+ *              string is long enough, and set up prefix if any.
+ *
+ ******************************************************************************/
+
+static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
+{
+	char *temp_ptr;
+	char *name_string;
+	u32 size_needed;
+
+	ACPI_FUNCTION_TRACE(ex_allocate_name_string);
+
+	/*
+	 * Allow room for all \ and ^ prefixes, all segments and a multi_name_prefix.
+	 * Also, one byte for the null terminator.
+	 * This may actually be somewhat longer than needed.
+	 */
+	if (prefix_count == ACPI_UINT32_MAX) {
+
+		/* Special case for root */
+
+		size_needed = 1 + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
+	} else {
+		size_needed =
+		    prefix_count + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
+	}
+
+	/*
+	 * Allocate a buffer for the name.
+	 * This buffer must be deleted by the caller!
+	 */
+	name_string = ACPI_ALLOCATE(size_needed);
+	if (!name_string) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not allocate size %d", size_needed));
+		return_PTR(NULL);
+	}
+
+	temp_ptr = name_string;
+
+	/* Set up Root or Parent prefixes if needed */
+
+	if (prefix_count == ACPI_UINT32_MAX) {
+		*temp_ptr++ = AML_ROOT_PREFIX;
+	} else {
+		while (prefix_count--) {
+			*temp_ptr++ = AML_PARENT_PREFIX;
+		}
+	}
+
+	/* Set up Dual or Multi prefixes if needed */
+
+	if (num_name_segs > 2) {
+
+		/* Set up multi prefixes   */
+
+		*temp_ptr++ = AML_MULTI_NAME_PREFIX_OP;
+		*temp_ptr++ = (char)num_name_segs;
+	} else if (2 == num_name_segs) {
+
+		/* Set up dual prefixes */
+
+		*temp_ptr++ = AML_DUAL_NAME_PREFIX;
+	}
+
+	/*
+	 * Terminate string following prefixes. acpi_ex_name_segment() will
+	 * append the segment(s)
+	 */
+	*temp_ptr = 0;
+
+	return_PTR(name_string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_name_segment
+ *
+ * PARAMETERS:  in_aml_address  - Pointer to the name in the AML code
+ *              name_string     - Where to return the name. The name is appended
+ *                                to any existing string to form a namepath
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Extract an ACPI name (4 bytes) from the AML byte stream
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
+{
+	char *aml_address = (void *)*in_aml_address;
+	acpi_status status = AE_OK;
+	u32 index;
+	char char_buf[5];
+
+	ACPI_FUNCTION_TRACE(ex_name_segment);
+
+	/*
+	 * If first character is a digit, then we know that we aren't looking at a
+	 * valid name segment
+	 */
+	char_buf[0] = *aml_address;
+
+	if ('0' <= char_buf[0] && char_buf[0] <= '9') {
+		ACPI_ERROR((AE_INFO, "Invalid leading digit: %c", char_buf[0]));
+		return_ACPI_STATUS(AE_CTRL_PENDING);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n"));
+
+	for (index = 0; (index < ACPI_NAME_SIZE)
+	     && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
+		char_buf[index] = *aml_address++;
+		ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
+	}
+
+	/* Valid name segment  */
+
+	if (index == 4) {
+
+		/* Found 4 valid characters */
+
+		char_buf[4] = '\0';
+
+		if (name_string) {
+			ACPI_STRCAT(name_string, char_buf);
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "Appended to - %s\n", name_string));
+		} else {
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "No Name string - %s\n", char_buf));
+		}
+	} else if (index == 0) {
+		/*
+		 * First character was not a valid name character,
+		 * so we are looking at something other than a name.
+		 */
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "Leading character is not alpha: %02Xh (not a name)\n",
+				  char_buf[0]));
+		status = AE_CTRL_PENDING;
+	} else {
+		/*
+		 * Segment started with one or more valid characters, but fewer than
+		 * the required 4
+		 */
+		status = AE_AML_BAD_NAME;
+		ACPI_ERROR((AE_INFO,
+			    "Bad character %02x in name, at %p",
+			    *aml_address, aml_address));
+	}
+
+	*in_aml_address = ACPI_CAST_PTR(u8, aml_address);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_get_name_string
+ *
+ * PARAMETERS:  data_type           - Object type to be associated with this
+ *                                    name
+ *              in_aml_address      - Pointer to the namestring in the AML code
+ *              out_name_string     - Where the namestring is returned
+ *              out_name_length     - Length of the returned string
+ *
+ * RETURN:      Status, namestring and length
+ *
+ * DESCRIPTION: Extract a full namepath from the AML byte stream,
+ *              including any prefixes.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_get_name_string(acpi_object_type data_type,
+			u8 * in_aml_address,
+			char **out_name_string, u32 * out_name_length)
+{
+	acpi_status status = AE_OK;
+	u8 *aml_address = in_aml_address;
+	char *name_string = NULL;
+	u32 num_segments;
+	u32 prefix_count = 0;
+	u8 has_prefix = FALSE;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_get_name_string, aml_address);
+
+	if (ACPI_TYPE_LOCAL_REGION_FIELD == data_type ||
+	    ACPI_TYPE_LOCAL_BANK_FIELD == data_type ||
+	    ACPI_TYPE_LOCAL_INDEX_FIELD == data_type) {
+
+		/* Disallow prefixes for types associated with field_unit names */
+
+		name_string = acpi_ex_allocate_name_string(0, 1);
+		if (!name_string) {
+			status = AE_NO_MEMORY;
+		} else {
+			status =
+			    acpi_ex_name_segment(&aml_address, name_string);
+		}
+	} else {
+		/*
+		 * data_type is not a field name.
+		 * Examine first character of name for root or parent prefix operators
+		 */
+		switch (*aml_address) {
+		case AML_ROOT_PREFIX:
+
+			ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+					  "RootPrefix(\\) at %p\n",
+					  aml_address));
+
+			/*
+			 * Remember that we have a root_prefix --
+			 * see comment in acpi_ex_allocate_name_string()
+			 */
+			aml_address++;
+			prefix_count = ACPI_UINT32_MAX;
+			has_prefix = TRUE;
+			break;
+
+		case AML_PARENT_PREFIX:
+
+			/* Increment past possibly multiple parent prefixes */
+
+			do {
+				ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+						  "ParentPrefix (^) at %p\n",
+						  aml_address));
+
+				aml_address++;
+				prefix_count++;
+
+			} while (*aml_address == AML_PARENT_PREFIX);
+
+			has_prefix = TRUE;
+			break;
+
+		default:
+
+			/* Not a prefix character */
+
+			break;
+		}
+
+		/* Examine first character of name for name segment prefix operator */
+
+		switch (*aml_address) {
+		case AML_DUAL_NAME_PREFIX:
+
+			ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+					  "DualNamePrefix at %p\n",
+					  aml_address));
+
+			aml_address++;
+			name_string =
+			    acpi_ex_allocate_name_string(prefix_count, 2);
+			if (!name_string) {
+				status = AE_NO_MEMORY;
+				break;
+			}
+
+			/* Indicate that we processed a prefix */
+
+			has_prefix = TRUE;
+
+			status =
+			    acpi_ex_name_segment(&aml_address, name_string);
+			if (ACPI_SUCCESS(status)) {
+				status =
+				    acpi_ex_name_segment(&aml_address,
+							 name_string);
+			}
+			break;
+
+		case AML_MULTI_NAME_PREFIX_OP:
+
+			ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+					  "MultiNamePrefix at %p\n",
+					  aml_address));
+
+			/* Fetch count of segments remaining in name path */
+
+			aml_address++;
+			num_segments = *aml_address;
+
+			name_string =
+			    acpi_ex_allocate_name_string(prefix_count,
+							 num_segments);
+			if (!name_string) {
+				status = AE_NO_MEMORY;
+				break;
+			}
+
+			/* Indicate that we processed a prefix */
+
+			aml_address++;
+			has_prefix = TRUE;
+
+			while (num_segments &&
+			       (status =
+				acpi_ex_name_segment(&aml_address,
+						     name_string)) == AE_OK) {
+				num_segments--;
+			}
+
+			break;
+
+		case 0:
+
+			/* null_name valid as of 8-12-98 ASL/AML Grammar Update */
+
+			if (prefix_count == ACPI_UINT32_MAX) {
+				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+						  "NameSeg is \"\\\" followed by NULL\n"));
+			}
+
+			/* Consume the NULL byte */
+
+			aml_address++;
+			name_string =
+			    acpi_ex_allocate_name_string(prefix_count, 0);
+			if (!name_string) {
+				status = AE_NO_MEMORY;
+				break;
+			}
+
+			break;
+
+		default:
+
+			/* Name segment string */
+
+			name_string =
+			    acpi_ex_allocate_name_string(prefix_count, 1);
+			if (!name_string) {
+				status = AE_NO_MEMORY;
+				break;
+			}
+
+			status =
+			    acpi_ex_name_segment(&aml_address, name_string);
+			break;
+		}
+	}
+
+	if (AE_CTRL_PENDING == status && has_prefix) {
+
+		/* Ran out of segments after processing a prefix */
+
+		ACPI_ERROR((AE_INFO, "Malformed Name at %p", name_string));
+		status = AE_AML_BAD_NAME;
+	}
+
+	if (ACPI_FAILURE(status)) {
+		if (name_string) {
+			ACPI_FREE(name_string);
+		}
+		return_ACPI_STATUS(status);
+	}
+
+	*out_name_string = name_string;
+	*out_name_length = (u32) (aml_address - in_aml_address);
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
new file mode 100644
index 0000000..b530480
--- /dev/null
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -0,0 +1,1050 @@
+
+/******************************************************************************
+ *
+ * Module Name: exoparg1 - AML execution - opcodes with 1 argument
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "amlcode.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exoparg1")
+
+/*!
+ * Naming convention for AML interpreter execution routines.
+ *
+ * The routines that begin execution of AML opcodes are named with a common
+ * convention based upon the number of arguments, the number of target operands,
+ * and whether or not a value is returned:
+ *
+ *      AcpiExOpcode_xA_yT_zR
+ *
+ * Where:
+ *
+ * xA - ARGUMENTS:    The number of arguments (input operands) that are
+ *                    required for this opcode type (0 through 6 args).
+ * yT - TARGETS:      The number of targets (output operands) that are required
+ *                    for this opcode type (0, 1, or 2 targets).
+ * zR - RETURN VALUE: Indicates whether this opcode type returns a value
+ *                    as the function return (0 or 1).
+ *
+ * The AcpiExOpcode* functions are called via the Dispatcher component with
+ * fully resolved operands.
+!*/
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_0A_0T_1R
+ *
+ * PARAMETERS:  walk_state          - Current state (contains AML opcode)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute operator with no operands, one return value
+ *
+ ******************************************************************************/
+acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object *return_desc = NULL;
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_0A_0T_1R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	/* Examine the AML opcode */
+
+	switch (walk_state->opcode) {
+	case AML_TIMER_OP:	/*  Timer () */
+
+		/* Create a return object of type Integer */
+
+		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+		return_desc->integer.value = acpi_os_get_timer();
+		break;
+
+	default:		/*  Unknown opcode  */
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+		break;
+	}
+
+      cleanup:
+
+	/* Delete return object on error */
+
+	if ((ACPI_FAILURE(status)) || walk_state->result_obj) {
+		acpi_ut_remove_reference(return_desc);
+		walk_state->result_obj = NULL;
+	} else {
+		/* Save the return value */
+
+		walk_state->result_obj = return_desc;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_1A_0T_0R
+ *
+ * PARAMETERS:  walk_state          - Current state (contains AML opcode)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute Type 1 monadic operator with numeric operand on
+ *              object stack
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_0R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	/* Examine the AML opcode */
+
+	switch (walk_state->opcode) {
+	case AML_RELEASE_OP:	/*  Release (mutex_object) */
+
+		status = acpi_ex_release_mutex(operand[0], walk_state);
+		break;
+
+	case AML_RESET_OP:	/*  Reset (event_object) */
+
+		status = acpi_ex_system_reset_event(operand[0]);
+		break;
+
+	case AML_SIGNAL_OP:	/*  Signal (event_object) */
+
+		status = acpi_ex_system_signal_event(operand[0]);
+		break;
+
+	case AML_SLEEP_OP:	/*  Sleep (msec_time) */
+
+		status = acpi_ex_system_do_suspend(operand[0]->integer.value);
+		break;
+
+	case AML_STALL_OP:	/*  Stall (usec_time) */
+
+		status =
+		    acpi_ex_system_do_stall((u32) operand[0]->integer.value);
+		break;
+
+	case AML_UNLOAD_OP:	/*  Unload (Handle) */
+
+		status = acpi_ex_unload_table(operand[0]);
+		break;
+
+	default:		/*  Unknown opcode  */
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+		break;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_1A_1T_0R
+ *
+ * PARAMETERS:  walk_state          - Current state (contains AML opcode)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute opcode with one argument, one target, and no
+ *              return value.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object **operand = &walk_state->operands[0];
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_0R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	/* Examine the AML opcode */
+
+	switch (walk_state->opcode) {
+	case AML_LOAD_OP:
+
+		status = acpi_ex_load_op(operand[0], operand[1], walk_state);
+		break;
+
+	default:		/* Unknown opcode */
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+		goto cleanup;
+	}
+
+      cleanup:
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_1A_1T_1R
+ *
+ * PARAMETERS:  walk_state          - Current state (contains AML opcode)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute opcode with one argument, one target, and a
+ *              return value.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	union acpi_operand_object *return_desc = NULL;
+	union acpi_operand_object *return_desc2 = NULL;
+	u32 temp32;
+	u32 i;
+	acpi_integer power_of_ten;
+	acpi_integer digit;
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_1R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	/* Examine the AML opcode */
+
+	switch (walk_state->opcode) {
+	case AML_BIT_NOT_OP:
+	case AML_FIND_SET_LEFT_BIT_OP:
+	case AML_FIND_SET_RIGHT_BIT_OP:
+	case AML_FROM_BCD_OP:
+	case AML_TO_BCD_OP:
+	case AML_COND_REF_OF_OP:
+
+		/* Create a return object of type Integer for these opcodes */
+
+		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		switch (walk_state->opcode) {
+		case AML_BIT_NOT_OP:	/* Not (Operand, Result)  */
+
+			return_desc->integer.value = ~operand[0]->integer.value;
+			break;
+
+		case AML_FIND_SET_LEFT_BIT_OP:	/* find_set_left_bit (Operand, Result) */
+
+			return_desc->integer.value = operand[0]->integer.value;
+
+			/*
+			 * Acpi specification describes Integer type as a little
+			 * endian unsigned value, so this boundary condition is valid.
+			 */
+			for (temp32 = 0; return_desc->integer.value &&
+			     temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) {
+				return_desc->integer.value >>= 1;
+			}
+
+			return_desc->integer.value = temp32;
+			break;
+
+		case AML_FIND_SET_RIGHT_BIT_OP:	/* find_set_right_bit (Operand, Result) */
+
+			return_desc->integer.value = operand[0]->integer.value;
+
+			/*
+			 * The Acpi specification describes Integer type as a little
+			 * endian unsigned value, so this boundary condition is valid.
+			 */
+			for (temp32 = 0; return_desc->integer.value &&
+			     temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) {
+				return_desc->integer.value <<= 1;
+			}
+
+			/* Since the bit position is one-based, subtract from 33 (65) */
+
+			return_desc->integer.value =
+			    temp32 ==
+			    0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
+			break;
+
+		case AML_FROM_BCD_OP:	/* from_bcd (BCDValue, Result) */
+
+			/*
+			 * The 64-bit ACPI integer can hold 16 4-bit BCD characters
+			 * (if table is 32-bit, integer can hold 8 BCD characters)
+			 * Convert each 4-bit BCD value
+			 */
+			power_of_ten = 1;
+			return_desc->integer.value = 0;
+			digit = operand[0]->integer.value;
+
+			/* Convert each BCD digit (each is one nybble wide) */
+
+			for (i = 0;
+			     (i < acpi_gbl_integer_nybble_width) && (digit > 0);
+			     i++) {
+
+				/* Get the least significant 4-bit BCD digit */
+
+				temp32 = ((u32) digit) & 0xF;
+
+				/* Check the range of the digit */
+
+				if (temp32 > 9) {
+					ACPI_ERROR((AE_INFO,
+						    "BCD digit too large (not decimal): 0x%X",
+						    temp32));
+
+					status = AE_AML_NUMERIC_OVERFLOW;
+					goto cleanup;
+				}
+
+				/* Sum the digit into the result with the current power of 10 */
+
+				return_desc->integer.value +=
+				    (((acpi_integer) temp32) * power_of_ten);
+
+				/* Shift to next BCD digit */
+
+				digit >>= 4;
+
+				/* Next power of 10 */
+
+				power_of_ten *= 10;
+			}
+			break;
+
+		case AML_TO_BCD_OP:	/* to_bcd (Operand, Result) */
+
+			return_desc->integer.value = 0;
+			digit = operand[0]->integer.value;
+
+			/* Each BCD digit is one nybble wide */
+
+			for (i = 0;
+			     (i < acpi_gbl_integer_nybble_width) && (digit > 0);
+			     i++) {
+				(void)acpi_ut_short_divide(digit, 10, &digit,
+							   &temp32);
+
+				/*
+				 * Insert the BCD digit that resides in the
+				 * remainder from above
+				 */
+				return_desc->integer.value |=
+				    (((acpi_integer) temp32) << ACPI_MUL_4(i));
+			}
+
+			/* Overflow if there is any data left in Digit */
+
+			if (digit > 0) {
+				ACPI_ERROR((AE_INFO,
+					    "Integer too large to convert to BCD: %8.8X%8.8X",
+					    ACPI_FORMAT_UINT64(operand[0]->
+							       integer.value)));
+				status = AE_AML_NUMERIC_OVERFLOW;
+				goto cleanup;
+			}
+			break;
+
+		case AML_COND_REF_OF_OP:	/* cond_ref_of (source_object, Result) */
+
+			/*
+			 * This op is a little strange because the internal return value is
+			 * different than the return value stored in the result descriptor
+			 * (There are really two return values)
+			 */
+			if ((struct acpi_namespace_node *)operand[0] ==
+			    acpi_gbl_root_node) {
+				/*
+				 * This means that the object does not exist in the namespace,
+				 * return FALSE
+				 */
+				return_desc->integer.value = 0;
+				goto cleanup;
+			}
+
+			/* Get the object reference, store it, and remove our reference */
+
+			status = acpi_ex_get_object_reference(operand[0],
+							      &return_desc2,
+							      walk_state);
+			if (ACPI_FAILURE(status)) {
+				goto cleanup;
+			}
+
+			status =
+			    acpi_ex_store(return_desc2, operand[1], walk_state);
+			acpi_ut_remove_reference(return_desc2);
+
+			/* The object exists in the namespace, return TRUE */
+
+			return_desc->integer.value = ACPI_INTEGER_MAX;
+			goto cleanup;
+
+		default:
+			/* No other opcodes get here */
+			break;
+		}
+		break;
+
+	case AML_STORE_OP:	/* Store (Source, Target) */
+
+		/*
+		 * A store operand is typically a number, string, buffer or lvalue
+		 * Be careful about deleting the source object,
+		 * since the object itself may have been stored.
+		 */
+		status = acpi_ex_store(operand[0], operand[1], walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/* It is possible that the Store already produced a return object */
+
+		if (!walk_state->result_obj) {
+			/*
+			 * Normally, we would remove a reference on the Operand[0]
+			 * parameter; But since it is being used as the internal return
+			 * object (meaning we would normally increment it), the two
+			 * cancel out, and we simply don't do anything.
+			 */
+			walk_state->result_obj = operand[0];
+			walk_state->operands[0] = NULL;	/* Prevent deletion */
+		}
+		return_ACPI_STATUS(status);
+
+		/*
+		 * ACPI 2.0 Opcodes
+		 */
+	case AML_COPY_OP:	/* Copy (Source, Target) */
+
+		status =
+		    acpi_ut_copy_iobject_to_iobject(operand[0], &return_desc,
+						    walk_state);
+		break;
+
+	case AML_TO_DECSTRING_OP:	/* to_decimal_string (Data, Result) */
+
+		status = acpi_ex_convert_to_string(operand[0], &return_desc,
+						   ACPI_EXPLICIT_CONVERT_DECIMAL);
+		if (return_desc == operand[0]) {
+
+			/* No conversion performed, add ref to handle return value */
+			acpi_ut_add_reference(return_desc);
+		}
+		break;
+
+	case AML_TO_HEXSTRING_OP:	/* to_hex_string (Data, Result) */
+
+		status = acpi_ex_convert_to_string(operand[0], &return_desc,
+						   ACPI_EXPLICIT_CONVERT_HEX);
+		if (return_desc == operand[0]) {
+
+			/* No conversion performed, add ref to handle return value */
+			acpi_ut_add_reference(return_desc);
+		}
+		break;
+
+	case AML_TO_BUFFER_OP:	/* to_buffer (Data, Result) */
+
+		status = acpi_ex_convert_to_buffer(operand[0], &return_desc);
+		if (return_desc == operand[0]) {
+
+			/* No conversion performed, add ref to handle return value */
+			acpi_ut_add_reference(return_desc);
+		}
+		break;
+
+	case AML_TO_INTEGER_OP:	/* to_integer (Data, Result) */
+
+		status = acpi_ex_convert_to_integer(operand[0], &return_desc,
+						    ACPI_ANY_BASE);
+		if (return_desc == operand[0]) {
+
+			/* No conversion performed, add ref to handle return value */
+			acpi_ut_add_reference(return_desc);
+		}
+		break;
+
+	case AML_SHIFT_LEFT_BIT_OP:	/* shift_left_bit (Source, bit_num) */
+	case AML_SHIFT_RIGHT_BIT_OP:	/* shift_right_bit (Source, bit_num) */
+
+		/* These are two obsolete opcodes */
+
+		ACPI_ERROR((AE_INFO,
+			    "%s is obsolete and not implemented",
+			    acpi_ps_get_opcode_name(walk_state->opcode)));
+		status = AE_SUPPORT;
+		goto cleanup;
+
+	default:		/* Unknown opcode */
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+		goto cleanup;
+	}
+
+	if (ACPI_SUCCESS(status)) {
+
+		/* Store the return value computed above into the target object */
+
+		status = acpi_ex_store(return_desc, operand[1], walk_state);
+	}
+
+      cleanup:
+
+	/* Delete return object on error */
+
+	if (ACPI_FAILURE(status)) {
+		acpi_ut_remove_reference(return_desc);
+	}
+
+	/* Save return object on success */
+
+	else if (!walk_state->result_obj) {
+		walk_state->result_obj = return_desc;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_1A_0T_1R
+ *
+ * PARAMETERS:  walk_state          - Current state (contains AML opcode)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute opcode with one argument, no target, and a return value
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	union acpi_operand_object *temp_desc;
+	union acpi_operand_object *return_desc = NULL;
+	acpi_status status = AE_OK;
+	u32 type;
+	acpi_integer value;
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_1R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	/* Examine the AML opcode */
+
+	switch (walk_state->opcode) {
+	case AML_LNOT_OP:	/* LNot (Operand) */
+
+		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		/*
+		 * Set result to ONES (TRUE) if Value == 0.  Note:
+		 * return_desc->Integer.Value is initially == 0 (FALSE) from above.
+		 */
+		if (!operand[0]->integer.value) {
+			return_desc->integer.value = ACPI_INTEGER_MAX;
+		}
+		break;
+
+	case AML_DECREMENT_OP:	/* Decrement (Operand)  */
+	case AML_INCREMENT_OP:	/* Increment (Operand)  */
+
+		/*
+		 * Create a new integer.  Can't just get the base integer and
+		 * increment it because it may be an Arg or Field.
+		 */
+		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		/*
+		 * Since we are expecting a Reference operand, it can be either a
+		 * NS Node or an internal object.
+		 */
+		temp_desc = operand[0];
+		if (ACPI_GET_DESCRIPTOR_TYPE(temp_desc) ==
+		    ACPI_DESC_TYPE_OPERAND) {
+
+			/* Internal reference object - prevent deletion */
+
+			acpi_ut_add_reference(temp_desc);
+		}
+
+		/*
+		 * Convert the Reference operand to an Integer (This removes a
+		 * reference on the Operand[0] object)
+		 *
+		 * NOTE:  We use LNOT_OP here in order to force resolution of the
+		 * reference operand to an actual integer.
+		 */
+		status =
+		    acpi_ex_resolve_operands(AML_LNOT_OP, &temp_desc,
+					     walk_state);
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"While resolving operands for [%s]",
+					acpi_ps_get_opcode_name(walk_state->
+								opcode)));
+
+			goto cleanup;
+		}
+
+		/*
+		 * temp_desc is now guaranteed to be an Integer object --
+		 * Perform the actual increment or decrement
+		 */
+		if (walk_state->opcode == AML_INCREMENT_OP) {
+			return_desc->integer.value =
+			    temp_desc->integer.value + 1;
+		} else {
+			return_desc->integer.value =
+			    temp_desc->integer.value - 1;
+		}
+
+		/* Finished with this Integer object */
+
+		acpi_ut_remove_reference(temp_desc);
+
+		/*
+		 * Store the result back (indirectly) through the original
+		 * Reference object
+		 */
+		status = acpi_ex_store(return_desc, operand[0], walk_state);
+		break;
+
+	case AML_TYPE_OP:	/* object_type (source_object) */
+
+		/*
+		 * Note: The operand is not resolved at this point because we want to
+		 * get the associated object, not its value.  For example, we don't
+		 * want to resolve a field_unit to its value, we want the actual
+		 * field_unit object.
+		 */
+
+		/* Get the type of the base object */
+
+		status =
+		    acpi_ex_resolve_multiple(walk_state, operand[0], &type,
+					     NULL);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+
+		/* Allocate a descriptor to hold the type. */
+
+		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		return_desc->integer.value = type;
+		break;
+
+	case AML_SIZE_OF_OP:	/* size_of (source_object) */
+
+		/*
+		 * Note: The operand is not resolved at this point because we want to
+		 * get the associated object, not its value.
+		 */
+
+		/* Get the base object */
+
+		status = acpi_ex_resolve_multiple(walk_state,
+						  operand[0], &type,
+						  &temp_desc);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+
+		/*
+		 * The type of the base object must be integer, buffer, string, or
+		 * package.  All others are not supported.
+		 *
+		 * NOTE: Integer is not specifically supported by the ACPI spec,
+		 * but is supported implicitly via implicit operand conversion.
+		 * rather than bother with conversion, we just use the byte width
+		 * global (4 or 8 bytes).
+		 */
+		switch (type) {
+		case ACPI_TYPE_INTEGER:
+			value = acpi_gbl_integer_byte_width;
+			break;
+
+		case ACPI_TYPE_STRING:
+			value = temp_desc->string.length;
+			break;
+
+		case ACPI_TYPE_BUFFER:
+
+			/* Buffer arguments may not be evaluated at this point */
+
+			status = acpi_ds_get_buffer_arguments(temp_desc);
+			value = temp_desc->buffer.length;
+			break;
+
+		case ACPI_TYPE_PACKAGE:
+
+			/* Package arguments may not be evaluated at this point */
+
+			status = acpi_ds_get_package_arguments(temp_desc);
+			value = temp_desc->package.count;
+			break;
+
+		default:
+			ACPI_ERROR((AE_INFO,
+				    "Operand must be Buffer/Integer/String/Package - found type %s",
+				    acpi_ut_get_type_name(type)));
+			status = AE_AML_OPERAND_TYPE;
+			goto cleanup;
+		}
+
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+
+		/*
+		 * Now that we have the size of the object, create a result
+		 * object to hold the value
+		 */
+		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		return_desc->integer.value = value;
+		break;
+
+	case AML_REF_OF_OP:	/* ref_of (source_object) */
+
+		status =
+		    acpi_ex_get_object_reference(operand[0], &return_desc,
+						 walk_state);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+		break;
+
+	case AML_DEREF_OF_OP:	/* deref_of (obj_reference | String) */
+
+		/* Check for a method local or argument, or standalone String */
+
+		if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) ==
+		    ACPI_DESC_TYPE_NAMED) {
+			temp_desc =
+			    acpi_ns_get_attached_object((struct
+							 acpi_namespace_node *)
+							operand[0]);
+			if (temp_desc
+			    &&
+			    ((ACPI_GET_OBJECT_TYPE(temp_desc) ==
+			      ACPI_TYPE_STRING)
+			     || (ACPI_GET_OBJECT_TYPE(temp_desc) ==
+				 ACPI_TYPE_LOCAL_REFERENCE))) {
+				operand[0] = temp_desc;
+				acpi_ut_add_reference(temp_desc);
+			} else {
+				status = AE_AML_OPERAND_TYPE;
+				goto cleanup;
+			}
+		} else {
+			switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
+			case ACPI_TYPE_LOCAL_REFERENCE:
+				/*
+				 * This is a deref_of (local_x | arg_x)
+				 *
+				 * Must resolve/dereference the local/arg reference first
+				 */
+				switch (operand[0]->reference.class) {
+				case ACPI_REFCLASS_LOCAL:
+				case ACPI_REFCLASS_ARG:
+
+					/* Set Operand[0] to the value of the local/arg */
+
+					status =
+					    acpi_ds_method_data_get_value
+					    (operand[0]->reference.class,
+					     operand[0]->reference.value,
+					     walk_state, &temp_desc);
+					if (ACPI_FAILURE(status)) {
+						goto cleanup;
+					}
+
+					/*
+					 * Delete our reference to the input object and
+					 * point to the object just retrieved
+					 */
+					acpi_ut_remove_reference(operand[0]);
+					operand[0] = temp_desc;
+					break;
+
+				case ACPI_REFCLASS_REFOF:
+
+					/* Get the object to which the reference refers */
+
+					temp_desc =
+					    operand[0]->reference.object;
+					acpi_ut_remove_reference(operand[0]);
+					operand[0] = temp_desc;
+					break;
+
+				default:
+
+					/* Must be an Index op - handled below */
+					break;
+				}
+				break;
+
+			case ACPI_TYPE_STRING:
+				break;
+
+			default:
+				status = AE_AML_OPERAND_TYPE;
+				goto cleanup;
+			}
+		}
+
+		if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) !=
+		    ACPI_DESC_TYPE_NAMED) {
+			if (ACPI_GET_OBJECT_TYPE(operand[0]) ==
+			    ACPI_TYPE_STRING) {
+				/*
+				 * This is a deref_of (String). The string is a reference
+				 * to a named ACPI object.
+				 *
+				 * 1) Find the owning Node
+				 * 2) Dereference the node to an actual object. Could be a
+				 *    Field, so we need to resolve the node to a value.
+				 */
+				status =
+				    acpi_ns_get_node(walk_state->scope_info->
+						     scope.node,
+						     operand[0]->string.pointer,
+						     ACPI_NS_SEARCH_PARENT,
+						     ACPI_CAST_INDIRECT_PTR
+						     (struct
+						      acpi_namespace_node,
+						      &return_desc));
+				if (ACPI_FAILURE(status)) {
+					goto cleanup;
+				}
+
+				status =
+				    acpi_ex_resolve_node_to_value
+				    (ACPI_CAST_INDIRECT_PTR
+				     (struct acpi_namespace_node, &return_desc),
+				     walk_state);
+				goto cleanup;
+			}
+		}
+
+		/* Operand[0] may have changed from the code above */
+
+		if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) ==
+		    ACPI_DESC_TYPE_NAMED) {
+			/*
+			 * This is a deref_of (object_reference)
+			 * Get the actual object from the Node (This is the dereference).
+			 * This case may only happen when a local_x or arg_x is
+			 * dereferenced above.
+			 */
+			return_desc = acpi_ns_get_attached_object((struct
+								   acpi_namespace_node
+								   *)
+								  operand[0]);
+			acpi_ut_add_reference(return_desc);
+		} else {
+			/*
+			 * This must be a reference object produced by either the
+			 * Index() or ref_of() operator
+			 */
+			switch (operand[0]->reference.class) {
+			case ACPI_REFCLASS_INDEX:
+
+				/*
+				 * The target type for the Index operator must be
+				 * either a Buffer or a Package
+				 */
+				switch (operand[0]->reference.target_type) {
+				case ACPI_TYPE_BUFFER_FIELD:
+
+					temp_desc =
+					    operand[0]->reference.object;
+
+					/*
+					 * Create a new object that contains one element of the
+					 * buffer -- the element pointed to by the index.
+					 *
+					 * NOTE: index into a buffer is NOT a pointer to a
+					 * sub-buffer of the main buffer, it is only a pointer to a
+					 * single element (byte) of the buffer!
+					 */
+					return_desc =
+					    acpi_ut_create_internal_object
+					    (ACPI_TYPE_INTEGER);
+					if (!return_desc) {
+						status = AE_NO_MEMORY;
+						goto cleanup;
+					}
+
+					/*
+					 * Since we are returning the value of the buffer at the
+					 * indexed location, we don't need to add an additional
+					 * reference to the buffer itself.
+					 */
+					return_desc->integer.value =
+					    temp_desc->buffer.
+					    pointer[operand[0]->reference.
+						    value];
+					break;
+
+				case ACPI_TYPE_PACKAGE:
+
+					/*
+					 * Return the referenced element of the package.  We must
+					 * add another reference to the referenced object, however.
+					 */
+					return_desc =
+					    *(operand[0]->reference.where);
+					if (return_desc) {
+						acpi_ut_add_reference
+						    (return_desc);
+					}
+					break;
+
+				default:
+
+					ACPI_ERROR((AE_INFO,
+						    "Unknown Index TargetType %X in reference object %p",
+						    operand[0]->reference.
+						    target_type, operand[0]));
+					status = AE_AML_OPERAND_TYPE;
+					goto cleanup;
+				}
+				break;
+
+			case ACPI_REFCLASS_REFOF:
+
+				return_desc = operand[0]->reference.object;
+
+				if (ACPI_GET_DESCRIPTOR_TYPE(return_desc) ==
+				    ACPI_DESC_TYPE_NAMED) {
+					return_desc =
+					    acpi_ns_get_attached_object((struct
+									 acpi_namespace_node
+									 *)
+									return_desc);
+				}
+
+				/* Add another reference to the object! */
+
+				acpi_ut_add_reference(return_desc);
+				break;
+
+			default:
+				ACPI_ERROR((AE_INFO,
+					    "Unknown class in reference(%p) - %2.2X",
+					    operand[0],
+					    operand[0]->reference.class));
+
+				status = AE_TYPE;
+				goto cleanup;
+			}
+		}
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+		goto cleanup;
+	}
+
+      cleanup:
+
+	/* Delete return object on error */
+
+	if (ACPI_FAILURE(status)) {
+		acpi_ut_remove_reference(return_desc);
+	}
+
+	/* Save return object on success */
+
+	else {
+		walk_state->result_obj = return_desc;
+	}
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
new file mode 100644
index 0000000..0b4f513
--- /dev/null
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -0,0 +1,605 @@
+/******************************************************************************
+ *
+ * Module Name: exoparg2 - AML execution - opcodes with 2 arguments
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "acinterp.h"
+#include "acevents.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exoparg2")
+
+/*!
+ * Naming convention for AML interpreter execution routines.
+ *
+ * The routines that begin execution of AML opcodes are named with a common
+ * convention based upon the number of arguments, the number of target operands,
+ * and whether or not a value is returned:
+ *
+ *      AcpiExOpcode_xA_yT_zR
+ *
+ * Where:
+ *
+ * xA - ARGUMENTS:    The number of arguments (input operands) that are
+ *                    required for this opcode type (1 through 6 args).
+ * yT - TARGETS:      The number of targets (output operands) that are required
+ *                    for this opcode type (0, 1, or 2 targets).
+ * zR - RETURN VALUE: Indicates whether this opcode type returns a value
+ *                    as the function return (0 or 1).
+ *
+ * The AcpiExOpcode* functions are called via the Dispatcher component with
+ * fully resolved operands.
+!*/
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_2A_0T_0R
+ *
+ * PARAMETERS:  walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute opcode with two arguments, no target, and no return
+ *              value.
+ *
+ * ALLOCATION:  Deletes both operands
+ *
+ ******************************************************************************/
+acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	struct acpi_namespace_node *node;
+	u32 value;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_0R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	/* Examine the opcode */
+
+	switch (walk_state->opcode) {
+	case AML_NOTIFY_OP:	/* Notify (notify_object, notify_value) */
+
+		/* The first operand is a namespace node */
+
+		node = (struct acpi_namespace_node *)operand[0];
+
+		/* Second value is the notify value */
+
+		value = (u32) operand[1]->integer.value;
+
+		/* Are notifies allowed on this object? */
+
+		if (!acpi_ev_is_notify_object(node)) {
+			ACPI_ERROR((AE_INFO,
+				    "Unexpected notify object type [%s]",
+				    acpi_ut_get_type_name(node->type)));
+
+			status = AE_AML_OPERAND_TYPE;
+			break;
+		}
+#ifdef ACPI_GPE_NOTIFY_CHECK
+		/*
+		 * GPE method wake/notify check.  Here, we want to ensure that we
+		 * don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
+		 * GPE method during system runtime.  If we do, the GPE is marked
+		 * as "wake-only" and disabled.
+		 *
+		 * 1) Is the Notify() value == device_wake?
+		 * 2) Is this a GPE deferred method?  (An _Lxx or _Exx method)
+		 * 3) Did the original GPE happen at system runtime?
+		 *    (versus during wake)
+		 *
+		 * If all three cases are true, this is a wake-only GPE that should
+		 * be disabled at runtime.
+		 */
+		if (value == 2) {	/* device_wake */
+			status =
+			    acpi_ev_check_for_wake_only_gpe(walk_state->
+							    gpe_event_info);
+			if (ACPI_FAILURE(status)) {
+
+				/* AE_WAKE_ONLY_GPE only error, means ignore this notify */
+
+				return_ACPI_STATUS(AE_OK)
+			}
+		}
+#endif
+
+		/*
+		 * Dispatch the notify to the appropriate handler
+		 * NOTE: the request is queued for execution after this method
+		 * completes.  The notify handlers are NOT invoked synchronously
+		 * from this thread -- because handlers may in turn run other
+		 * control methods.
+		 */
+		status = acpi_ev_queue_notify_request(node, value);
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_2A_2T_1R
+ *
+ * PARAMETERS:  walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute a dyadic operator (2 operands) with 2 output targets
+ *              and one implicit return value.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	union acpi_operand_object *return_desc1 = NULL;
+	union acpi_operand_object *return_desc2 = NULL;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_2T_1R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	/* Execute the opcode */
+
+	switch (walk_state->opcode) {
+	case AML_DIVIDE_OP:
+
+		/* Divide (Dividend, Divisor, remainder_result quotient_result) */
+
+		return_desc1 =
+		    acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!return_desc1) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		return_desc2 =
+		    acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!return_desc2) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		/* Quotient to return_desc1, remainder to return_desc2 */
+
+		status = acpi_ut_divide(operand[0]->integer.value,
+					operand[1]->integer.value,
+					&return_desc1->integer.value,
+					&return_desc2->integer.value);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+		goto cleanup;
+	}
+
+	/* Store the results to the target reference operands */
+
+	status = acpi_ex_store(return_desc2, operand[2], walk_state);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	status = acpi_ex_store(return_desc1, operand[3], walk_state);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+      cleanup:
+	/*
+	 * Since the remainder is not returned indirectly, remove a reference to
+	 * it. Only the quotient is returned indirectly.
+	 */
+	acpi_ut_remove_reference(return_desc2);
+
+	if (ACPI_FAILURE(status)) {
+
+		/* Delete the return object */
+
+		acpi_ut_remove_reference(return_desc1);
+	}
+
+	/* Save return object (the remainder) on success */
+
+	else {
+		walk_state->result_obj = return_desc1;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_2A_1T_1R
+ *
+ * PARAMETERS:  walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute opcode with two arguments, one target, and a return
+ *              value.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	union acpi_operand_object *return_desc = NULL;
+	acpi_integer index;
+	acpi_status status = AE_OK;
+	acpi_size length;
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_1T_1R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	/* Execute the opcode */
+
+	if (walk_state->op_info->flags & AML_MATH) {
+
+		/* All simple math opcodes (add, etc.) */
+
+		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		return_desc->integer.value =
+		    acpi_ex_do_math_op(walk_state->opcode,
+				       operand[0]->integer.value,
+				       operand[1]->integer.value);
+		goto store_result_to_target;
+	}
+
+	switch (walk_state->opcode) {
+	case AML_MOD_OP:	/* Mod (Dividend, Divisor, remainder_result (ACPI 2.0) */
+
+		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		/* return_desc will contain the remainder */
+
+		status = acpi_ut_divide(operand[0]->integer.value,
+					operand[1]->integer.value,
+					NULL, &return_desc->integer.value);
+		break;
+
+	case AML_CONCAT_OP:	/* Concatenate (Data1, Data2, Result) */
+
+		status = acpi_ex_do_concatenate(operand[0], operand[1],
+						&return_desc, walk_state);
+		break;
+
+	case AML_TO_STRING_OP:	/* to_string (Buffer, Length, Result) (ACPI 2.0) */
+
+		/*
+		 * Input object is guaranteed to be a buffer at this point (it may have
+		 * been converted.)  Copy the raw buffer data to a new object of
+		 * type String.
+		 */
+
+		/*
+		 * Get the length of the new string. It is the smallest of:
+		 * 1) Length of the input buffer
+		 * 2) Max length as specified in the to_string operator
+		 * 3) Length of input buffer up to a zero byte (null terminator)
+		 *
+		 * NOTE: A length of zero is ok, and will create a zero-length, null
+		 *       terminated string.
+		 */
+		length = 0;
+		while ((length < operand[0]->buffer.length) &&
+		       (length < operand[1]->integer.value) &&
+		       (operand[0]->buffer.pointer[length])) {
+			length++;
+		}
+
+		/* Allocate a new string object */
+
+		return_desc = acpi_ut_create_string_object(length);
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		/*
+		 * Copy the raw buffer data with no transform.
+		 * (NULL terminated already)
+		 */
+		ACPI_MEMCPY(return_desc->string.pointer,
+			    operand[0]->buffer.pointer, length);
+		break;
+
+	case AML_CONCAT_RES_OP:
+
+		/* concatenate_res_template (Buffer, Buffer, Result) (ACPI 2.0) */
+
+		status = acpi_ex_concat_template(operand[0], operand[1],
+						 &return_desc, walk_state);
+		break;
+
+	case AML_INDEX_OP:	/* Index (Source Index Result) */
+
+		/* Create the internal return object */
+
+		return_desc =
+		    acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		/* Initialize the Index reference object */
+
+		index = operand[1]->integer.value;
+		return_desc->reference.value = (u32) index;
+		return_desc->reference.class = ACPI_REFCLASS_INDEX;
+
+		/*
+		 * At this point, the Source operand is a String, Buffer, or Package.
+		 * Verify that the index is within range.
+		 */
+		switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
+		case ACPI_TYPE_STRING:
+
+			if (index >= operand[0]->string.length) {
+				status = AE_AML_STRING_LIMIT;
+			}
+
+			return_desc->reference.target_type =
+			    ACPI_TYPE_BUFFER_FIELD;
+			break;
+
+		case ACPI_TYPE_BUFFER:
+
+			if (index >= operand[0]->buffer.length) {
+				status = AE_AML_BUFFER_LIMIT;
+			}
+
+			return_desc->reference.target_type =
+			    ACPI_TYPE_BUFFER_FIELD;
+			break;
+
+		case ACPI_TYPE_PACKAGE:
+
+			if (index >= operand[0]->package.count) {
+				status = AE_AML_PACKAGE_LIMIT;
+			}
+
+			return_desc->reference.target_type = ACPI_TYPE_PACKAGE;
+			return_desc->reference.where =
+			    &operand[0]->package.elements[index];
+			break;
+
+		default:
+
+			status = AE_AML_INTERNAL;
+			goto cleanup;
+		}
+
+		/* Failure means that the Index was beyond the end of the object */
+
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"Index (%X%8.8X) is beyond end of object",
+					ACPI_FORMAT_UINT64(index)));
+			goto cleanup;
+		}
+
+		/*
+		 * Save the target object and add a reference to it for the life
+		 * of the index
+		 */
+		return_desc->reference.object = operand[0];
+		acpi_ut_add_reference(operand[0]);
+
+		/* Store the reference to the Target */
+
+		status = acpi_ex_store(return_desc, operand[2], walk_state);
+
+		/* Return the reference */
+
+		walk_state->result_obj = return_desc;
+		goto cleanup;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+		break;
+	}
+
+      store_result_to_target:
+
+	if (ACPI_SUCCESS(status)) {
+		/*
+		 * Store the result of the operation (which is now in return_desc) into
+		 * the Target descriptor.
+		 */
+		status = acpi_ex_store(return_desc, operand[2], walk_state);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+
+		if (!walk_state->result_obj) {
+			walk_state->result_obj = return_desc;
+		}
+	}
+
+      cleanup:
+
+	/* Delete return object on error */
+
+	if (ACPI_FAILURE(status)) {
+		acpi_ut_remove_reference(return_desc);
+		walk_state->result_obj = NULL;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_2A_0T_1R
+ *
+ * PARAMETERS:  walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute opcode with 2 arguments, no target, and a return value
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	union acpi_operand_object *return_desc = NULL;
+	acpi_status status = AE_OK;
+	u8 logical_result = FALSE;
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_1R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	/* Create the internal return object */
+
+	return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+	if (!return_desc) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	/* Execute the Opcode */
+
+	if (walk_state->op_info->flags & AML_LOGICAL_NUMERIC) {
+
+		/* logical_op (Operand0, Operand1) */
+
+		status = acpi_ex_do_logical_numeric_op(walk_state->opcode,
+						       operand[0]->integer.
+						       value,
+						       operand[1]->integer.
+						       value, &logical_result);
+		goto store_logical_result;
+	} else if (walk_state->op_info->flags & AML_LOGICAL) {
+
+		/* logical_op (Operand0, Operand1) */
+
+		status = acpi_ex_do_logical_op(walk_state->opcode, operand[0],
+					       operand[1], &logical_result);
+		goto store_logical_result;
+	}
+
+	switch (walk_state->opcode) {
+	case AML_ACQUIRE_OP:	/* Acquire (mutex_object, Timeout) */
+
+		status =
+		    acpi_ex_acquire_mutex(operand[1], operand[0], walk_state);
+		if (status == AE_TIME) {
+			logical_result = TRUE;	/* TRUE = Acquire timed out */
+			status = AE_OK;
+		}
+		break;
+
+	case AML_WAIT_OP:	/* Wait (event_object, Timeout) */
+
+		status = acpi_ex_system_wait_event(operand[1], operand[0]);
+		if (status == AE_TIME) {
+			logical_result = TRUE;	/* TRUE, Wait timed out */
+			status = AE_OK;
+		}
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+		goto cleanup;
+	}
+
+      store_logical_result:
+	/*
+	 * Set return value to according to logical_result. logical TRUE (all ones)
+	 * Default is FALSE (zero)
+	 */
+	if (logical_result) {
+		return_desc->integer.value = ACPI_INTEGER_MAX;
+	}
+
+      cleanup:
+
+	/* Delete return object on error */
+
+	if (ACPI_FAILURE(status)) {
+		acpi_ut_remove_reference(return_desc);
+	}
+
+	/* Save return object on success */
+
+	else {
+		walk_state->result_obj = return_desc;
+	}
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
new file mode 100644
index 0000000..c6520bb
--- /dev/null
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -0,0 +1,273 @@
+
+/******************************************************************************
+ *
+ * Module Name: exoparg3 - AML execution - opcodes with 3 arguments
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "acparser.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exoparg3")
+
+/*!
+ * Naming convention for AML interpreter execution routines.
+ *
+ * The routines that begin execution of AML opcodes are named with a common
+ * convention based upon the number of arguments, the number of target operands,
+ * and whether or not a value is returned:
+ *
+ *      AcpiExOpcode_xA_yT_zR
+ *
+ * Where:
+ *
+ * xA - ARGUMENTS:    The number of arguments (input operands) that are
+ *                    required for this opcode type (1 through 6 args).
+ * yT - TARGETS:      The number of targets (output operands) that are required
+ *                    for this opcode type (0, 1, or 2 targets).
+ * zR - RETURN VALUE: Indicates whether this opcode type returns a value
+ *                    as the function return (0 or 1).
+ *
+ * The AcpiExOpcode* functions are called via the Dispatcher component with
+ * fully resolved operands.
+!*/
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_3A_0T_0R
+ *
+ * PARAMETERS:  walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute Triadic operator (3 operands)
+ *
+ ******************************************************************************/
+acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	struct acpi_signal_fatal_info *fatal;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_0T_0R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	switch (walk_state->opcode) {
+	case AML_FATAL_OP:	/* Fatal (fatal_type fatal_code fatal_arg) */
+
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "FatalOp: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
+				  (u32) operand[0]->integer.value,
+				  (u32) operand[1]->integer.value,
+				  (u32) operand[2]->integer.value));
+
+		fatal = ACPI_ALLOCATE(sizeof(struct acpi_signal_fatal_info));
+		if (fatal) {
+			fatal->type = (u32) operand[0]->integer.value;
+			fatal->code = (u32) operand[1]->integer.value;
+			fatal->argument = (u32) operand[2]->integer.value;
+		}
+
+		/* Always signal the OS! */
+
+		status = acpi_os_signal(ACPI_SIGNAL_FATAL, fatal);
+
+		/* Might return while OS is shutting down, just continue */
+
+		ACPI_FREE(fatal);
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+		goto cleanup;
+	}
+
+      cleanup:
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_3A_1T_1R
+ *
+ * PARAMETERS:  walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute Triadic operator (3 operands)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	union acpi_operand_object *return_desc = NULL;
+	char *buffer = NULL;
+	acpi_status status = AE_OK;
+	acpi_integer index;
+	acpi_size length;
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_1T_1R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	switch (walk_state->opcode) {
+	case AML_MID_OP:	/* Mid (Source[0], Index[1], Length[2], Result[3]) */
+
+		/*
+		 * Create the return object.  The Source operand is guaranteed to be
+		 * either a String or a Buffer, so just use its type.
+		 */
+		return_desc =
+		    acpi_ut_create_internal_object(ACPI_GET_OBJECT_TYPE
+						   (operand[0]));
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		/* Get the Integer values from the objects */
+
+		index = operand[1]->integer.value;
+		length = (acpi_size) operand[2]->integer.value;
+
+		/*
+		 * If the index is beyond the length of the String/Buffer, or if the
+		 * requested length is zero, return a zero-length String/Buffer
+		 */
+		if (index >= operand[0]->string.length) {
+			length = 0;
+		}
+
+		/* Truncate request if larger than the actual String/Buffer */
+
+		else if ((index + length) > operand[0]->string.length) {
+			length = (acpi_size) operand[0]->string.length -
+			    (acpi_size) index;
+		}
+
+		/* Strings always have a sub-pointer, not so for buffers */
+
+		switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
+		case ACPI_TYPE_STRING:
+
+			/* Always allocate a new buffer for the String */
+
+			buffer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
+			if (!buffer) {
+				status = AE_NO_MEMORY;
+				goto cleanup;
+			}
+			break;
+
+		case ACPI_TYPE_BUFFER:
+
+			/* If the requested length is zero, don't allocate a buffer */
+
+			if (length > 0) {
+
+				/* Allocate a new buffer for the Buffer */
+
+				buffer = ACPI_ALLOCATE_ZEROED(length);
+				if (!buffer) {
+					status = AE_NO_MEMORY;
+					goto cleanup;
+				}
+			}
+			break;
+
+		default:	/* Should not happen */
+
+			status = AE_AML_OPERAND_TYPE;
+			goto cleanup;
+		}
+
+		if (buffer) {
+
+			/* We have a buffer, copy the portion requested */
+
+			ACPI_MEMCPY(buffer, operand[0]->string.pointer + index,
+				    length);
+		}
+
+		/* Set the length of the new String/Buffer */
+
+		return_desc->string.pointer = buffer;
+		return_desc->string.length = (u32) length;
+
+		/* Mark buffer initialized */
+
+		return_desc->buffer.flags |= AOPOBJ_DATA_VALID;
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+		goto cleanup;
+	}
+
+	/* Store the result in the target */
+
+	status = acpi_ex_store(return_desc, operand[3], walk_state);
+
+      cleanup:
+
+	/* Delete return object on error */
+
+	if (ACPI_FAILURE(status) || walk_state->result_obj) {
+		acpi_ut_remove_reference(return_desc);
+		walk_state->result_obj = NULL;
+	}
+
+	/* Set the return object and exit */
+
+	else {
+		walk_state->result_obj = return_desc;
+	}
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
new file mode 100644
index 0000000..ae43f76
--- /dev/null
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -0,0 +1,341 @@
+
+/******************************************************************************
+ *
+ * Module Name: exoparg6 - AML execution - opcodes with 6 arguments
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "acparser.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exoparg6")
+
+/*!
+ * Naming convention for AML interpreter execution routines.
+ *
+ * The routines that begin execution of AML opcodes are named with a common
+ * convention based upon the number of arguments, the number of target operands,
+ * and whether or not a value is returned:
+ *
+ *      AcpiExOpcode_xA_yT_zR
+ *
+ * Where:
+ *
+ * xA - ARGUMENTS:    The number of arguments (input operands) that are
+ *                    required for this opcode type (1 through 6 args).
+ * yT - TARGETS:      The number of targets (output operands) that are required
+ *                    for this opcode type (0, 1, or 2 targets).
+ * zR - RETURN VALUE: Indicates whether this opcode type returns a value
+ *                    as the function return (0 or 1).
+ *
+ * The AcpiExOpcode* functions are called via the Dispatcher component with
+ * fully resolved operands.
+!*/
+/* Local prototypes */
+static u8
+acpi_ex_do_match(u32 match_op,
+		 union acpi_operand_object *package_obj,
+		 union acpi_operand_object *match_obj);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_do_match
+ *
+ * PARAMETERS:  match_op        - The AML match operand
+ *              package_obj     - Object from the target package
+ *              match_obj       - Object to be matched
+ *
+ * RETURN:      TRUE if the match is successful, FALSE otherwise
+ *
+ * DESCRIPTION: Implements the low-level match for the ASL Match operator.
+ *              Package elements will be implicitly converted to the type of
+ *              the match object (Integer/Buffer/String).
+ *
+ ******************************************************************************/
+
+static u8
+acpi_ex_do_match(u32 match_op,
+		 union acpi_operand_object *package_obj,
+		 union acpi_operand_object *match_obj)
+{
+	u8 logical_result = TRUE;
+	acpi_status status;
+
+	/*
+	 * Note: Since the package_obj/match_obj ordering is opposite to that of
+	 * the standard logical operators, we have to reverse them when we call
+	 * do_logical_op in order to make the implicit conversion rules work
+	 * correctly. However, this means we have to flip the entire equation
+	 * also. A bit ugly perhaps, but overall, better than fussing the
+	 * parameters around at runtime, over and over again.
+	 *
+	 * Below, P[i] refers to the package element, M refers to the Match object.
+	 */
+	switch (match_op) {
+	case MATCH_MTR:
+
+		/* Always true */
+
+		break;
+
+	case MATCH_MEQ:
+
+		/*
+		 * True if equal: (P[i] == M)
+		 * Change to:     (M == P[i])
+		 */
+		status =
+		    acpi_ex_do_logical_op(AML_LEQUAL_OP, match_obj, package_obj,
+					  &logical_result);
+		if (ACPI_FAILURE(status)) {
+			return (FALSE);
+		}
+		break;
+
+	case MATCH_MLE:
+
+		/*
+		 * True if less than or equal: (P[i] <= M) (P[i] not_greater than M)
+		 * Change to:                  (M >= P[i]) (M not_less than P[i])
+		 */
+		status =
+		    acpi_ex_do_logical_op(AML_LLESS_OP, match_obj, package_obj,
+					  &logical_result);
+		if (ACPI_FAILURE(status)) {
+			return (FALSE);
+		}
+		logical_result = (u8) ! logical_result;
+		break;
+
+	case MATCH_MLT:
+
+		/*
+		 * True if less than: (P[i] < M)
+		 * Change to:         (M > P[i])
+		 */
+		status =
+		    acpi_ex_do_logical_op(AML_LGREATER_OP, match_obj,
+					  package_obj, &logical_result);
+		if (ACPI_FAILURE(status)) {
+			return (FALSE);
+		}
+		break;
+
+	case MATCH_MGE:
+
+		/*
+		 * True if greater than or equal: (P[i] >= M) (P[i] not_less than M)
+		 * Change to:                     (M <= P[i]) (M not_greater than P[i])
+		 */
+		status =
+		    acpi_ex_do_logical_op(AML_LGREATER_OP, match_obj,
+					  package_obj, &logical_result);
+		if (ACPI_FAILURE(status)) {
+			return (FALSE);
+		}
+		logical_result = (u8) ! logical_result;
+		break;
+
+	case MATCH_MGT:
+
+		/*
+		 * True if greater than: (P[i] > M)
+		 * Change to:            (M < P[i])
+		 */
+		status =
+		    acpi_ex_do_logical_op(AML_LLESS_OP, match_obj, package_obj,
+					  &logical_result);
+		if (ACPI_FAILURE(status)) {
+			return (FALSE);
+		}
+		break;
+
+	default:
+
+		/* Undefined */
+
+		return (FALSE);
+	}
+
+	return logical_result;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_opcode_6A_0T_1R
+ *
+ * PARAMETERS:  walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute opcode with 6 arguments, no target, and a return value
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
+{
+	union acpi_operand_object **operand = &walk_state->operands[0];
+	union acpi_operand_object *return_desc = NULL;
+	acpi_status status = AE_OK;
+	acpi_integer index;
+	union acpi_operand_object *this_element;
+
+	ACPI_FUNCTION_TRACE_STR(ex_opcode_6A_0T_1R,
+				acpi_ps_get_opcode_name(walk_state->opcode));
+
+	switch (walk_state->opcode) {
+	case AML_MATCH_OP:
+		/*
+		 * Match (search_pkg[0], match_op1[1], match_obj1[2],
+		 *                      match_op2[3], match_obj2[4], start_index[5])
+		 */
+
+		/* Validate both Match Term Operators (MTR, MEQ, etc.) */
+
+		if ((operand[1]->integer.value > MAX_MATCH_OPERATOR) ||
+		    (operand[3]->integer.value > MAX_MATCH_OPERATOR)) {
+			ACPI_ERROR((AE_INFO, "Match operator out of range"));
+			status = AE_AML_OPERAND_VALUE;
+			goto cleanup;
+		}
+
+		/* Get the package start_index, validate against the package length */
+
+		index = operand[5]->integer.value;
+		if (index >= operand[0]->package.count) {
+			ACPI_ERROR((AE_INFO,
+				    "Index (%X%8.8X) beyond package end (%X)",
+				    ACPI_FORMAT_UINT64(index),
+				    operand[0]->package.count));
+			status = AE_AML_PACKAGE_LIMIT;
+			goto cleanup;
+		}
+
+		/* Create an integer for the return value */
+
+		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+
+		}
+
+		/* Default return value if no match found */
+
+		return_desc->integer.value = ACPI_INTEGER_MAX;
+
+		/*
+		 * Examine each element until a match is found. Both match conditions
+		 * must be satisfied for a match to occur. Within the loop,
+		 * "continue" signifies that the current element does not match
+		 * and the next should be examined.
+		 *
+		 * Upon finding a match, the loop will terminate via "break" at
+		 * the bottom.  If it terminates "normally", match_value will be
+		 * ACPI_INTEGER_MAX (Ones) (its initial value) indicating that no
+		 * match was found.
+		 */
+		for (; index < operand[0]->package.count; index++) {
+
+			/* Get the current package element */
+
+			this_element = operand[0]->package.elements[index];
+
+			/* Treat any uninitialized (NULL) elements as non-matching */
+
+			if (!this_element) {
+				continue;
+			}
+
+			/*
+			 * Both match conditions must be satisfied. Execution of a continue
+			 * (proceed to next iteration of enclosing for loop) signifies a
+			 * non-match.
+			 */
+			if (!acpi_ex_do_match((u32) operand[1]->integer.value,
+					      this_element, operand[2])) {
+				continue;
+			}
+
+			if (!acpi_ex_do_match((u32) operand[3]->integer.value,
+					      this_element, operand[4])) {
+				continue;
+			}
+
+			/* Match found: Index is the return value */
+
+			return_desc->integer.value = index;
+			break;
+		}
+		break;
+
+	case AML_LOAD_TABLE_OP:
+
+		status = acpi_ex_load_table_op(walk_state, &return_desc);
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+			    walk_state->opcode));
+		status = AE_AML_BAD_OPCODE;
+		goto cleanup;
+	}
+
+      cleanup:
+
+	/* Delete return object on error */
+
+	if (ACPI_FAILURE(status)) {
+		acpi_ut_remove_reference(return_desc);
+	}
+
+	/* Save return object on success */
+
+	else {
+		walk_state->result_obj = return_desc;
+	}
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
new file mode 100644
index 0000000..a226f74
--- /dev/null
+++ b/drivers/acpi/acpica/exprep.c
@@ -0,0 +1,590 @@
+
+/******************************************************************************
+ *
+ * Module Name: exprep - ACPI AML (p-code) execution - field prep utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlcode.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exprep")
+
+/* Local prototypes */
+static u32
+acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
+			    u8 field_flags, u32 * return_byte_alignment);
+
+#ifdef ACPI_UNDER_DEVELOPMENT
+
+static u32
+acpi_ex_generate_access(u32 field_bit_offset,
+			u32 field_bit_length, u32 region_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_generate_access
+ *
+ * PARAMETERS:  field_bit_offset    - Start of field within parent region/buffer
+ *              field_bit_length    - Length of field in bits
+ *              region_length       - Length of parent in bytes
+ *
+ * RETURN:      Field granularity (8, 16, 32 or 64) and
+ *              byte_alignment (1, 2, 3, or 4)
+ *
+ * DESCRIPTION: Generate an optimal access width for fields defined with the
+ *              any_acc keyword.
+ *
+ * NOTE: Need to have the region_length in order to check for boundary
+ *       conditions (end-of-region).  However, the region_length is a deferred
+ *       operation.  Therefore, to complete this implementation, the generation
+ *       of this access width must be deferred until the region length has
+ *       been evaluated.
+ *
+ ******************************************************************************/
+
+static u32
+acpi_ex_generate_access(u32 field_bit_offset,
+			u32 field_bit_length, u32 region_length)
+{
+	u32 field_byte_length;
+	u32 field_byte_offset;
+	u32 field_byte_end_offset;
+	u32 access_byte_width;
+	u32 field_start_offset;
+	u32 field_end_offset;
+	u32 minimum_access_width = 0xFFFFFFFF;
+	u32 minimum_accesses = 0xFFFFFFFF;
+	u32 accesses;
+
+	ACPI_FUNCTION_TRACE(ex_generate_access);
+
+	/* Round Field start offset and length to "minimal" byte boundaries */
+
+	field_byte_offset = ACPI_DIV_8(ACPI_ROUND_DOWN(field_bit_offset, 8));
+	field_byte_end_offset = ACPI_DIV_8(ACPI_ROUND_UP(field_bit_length +
+							 field_bit_offset, 8));
+	field_byte_length = field_byte_end_offset - field_byte_offset;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+			  "Bit length %d, Bit offset %d\n",
+			  field_bit_length, field_bit_offset));
+
+	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+			  "Byte Length %d, Byte Offset %d, End Offset %d\n",
+			  field_byte_length, field_byte_offset,
+			  field_byte_end_offset));
+
+	/*
+	 * Iterative search for the maximum access width that is both aligned
+	 * and does not go beyond the end of the region
+	 *
+	 * Start at byte_acc and work upwards to qword_acc max. (1,2,4,8 bytes)
+	 */
+	for (access_byte_width = 1; access_byte_width <= 8;
+	     access_byte_width <<= 1) {
+		/*
+		 * 1) Round end offset up to next access boundary and make sure that
+		 *    this does not go beyond the end of the parent region.
+		 * 2) When the Access width is greater than the field_byte_length, we
+		 *    are done. (This does not optimize for the perfectly aligned
+		 *    case yet).
+		 */
+		if (ACPI_ROUND_UP(field_byte_end_offset, access_byte_width) <=
+		    region_length) {
+			field_start_offset =
+			    ACPI_ROUND_DOWN(field_byte_offset,
+					    access_byte_width) /
+			    access_byte_width;
+
+			field_end_offset =
+			    ACPI_ROUND_UP((field_byte_length +
+					   field_byte_offset),
+					  access_byte_width) /
+			    access_byte_width;
+
+			accesses = field_end_offset - field_start_offset;
+
+			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+					  "AccessWidth %d end is within region\n",
+					  access_byte_width));
+
+			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+					  "Field Start %d, Field End %d -- requires %d accesses\n",
+					  field_start_offset, field_end_offset,
+					  accesses));
+
+			/* Single access is optimal */
+
+			if (accesses <= 1) {
+				ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+						  "Entire field can be accessed with one operation of size %d\n",
+						  access_byte_width));
+				return_VALUE(access_byte_width);
+			}
+
+			/*
+			 * Fits in the region, but requires more than one read/write.
+			 * try the next wider access on next iteration
+			 */
+			if (accesses < minimum_accesses) {
+				minimum_accesses = accesses;
+				minimum_access_width = access_byte_width;
+			}
+		} else {
+			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+					  "AccessWidth %d end is NOT within region\n",
+					  access_byte_width));
+			if (access_byte_width == 1) {
+				ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+						  "Field goes beyond end-of-region!\n"));
+
+				/* Field does not fit in the region at all */
+
+				return_VALUE(0);
+			}
+
+			/*
+			 * This width goes beyond the end-of-region, back off to
+			 * previous access
+			 */
+			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+					  "Backing off to previous optimal access width of %d\n",
+					  minimum_access_width));
+			return_VALUE(minimum_access_width);
+		}
+	}
+
+	/*
+	 * Could not read/write field with one operation,
+	 * just use max access width
+	 */
+	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+			  "Cannot access field in one operation, using width 8\n"));
+	return_VALUE(8);
+}
+#endif				/* ACPI_UNDER_DEVELOPMENT */
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_decode_field_access
+ *
+ * PARAMETERS:  obj_desc            - Field object
+ *              field_flags         - Encoded fieldflags (contains access bits)
+ *              return_byte_alignment - Where the byte alignment is returned
+ *
+ * RETURN:      Field granularity (8, 16, 32 or 64) and
+ *              byte_alignment (1, 2, 3, or 4)
+ *
+ * DESCRIPTION: Decode the access_type bits of a field definition.
+ *
+ ******************************************************************************/
+
+static u32
+acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
+			    u8 field_flags, u32 * return_byte_alignment)
+{
+	u32 access;
+	u32 byte_alignment;
+	u32 bit_length;
+
+	ACPI_FUNCTION_TRACE(ex_decode_field_access);
+
+	access = (field_flags & AML_FIELD_ACCESS_TYPE_MASK);
+
+	switch (access) {
+	case AML_FIELD_ACCESS_ANY:
+
+#ifdef ACPI_UNDER_DEVELOPMENT
+		byte_alignment =
+		    acpi_ex_generate_access(obj_desc->common_field.
+					    start_field_bit_offset,
+					    obj_desc->common_field.bit_length,
+					    0xFFFFFFFF
+					    /* Temp until we pass region_length as parameter */
+		    );
+		bit_length = byte_alignment * 8;
+#endif
+
+		byte_alignment = 1;
+		bit_length = 8;
+		break;
+
+	case AML_FIELD_ACCESS_BYTE:
+	case AML_FIELD_ACCESS_BUFFER:	/* ACPI 2.0 (SMBus Buffer) */
+		byte_alignment = 1;
+		bit_length = 8;
+		break;
+
+	case AML_FIELD_ACCESS_WORD:
+		byte_alignment = 2;
+		bit_length = 16;
+		break;
+
+	case AML_FIELD_ACCESS_DWORD:
+		byte_alignment = 4;
+		bit_length = 32;
+		break;
+
+	case AML_FIELD_ACCESS_QWORD:	/* ACPI 2.0 */
+		byte_alignment = 8;
+		bit_length = 64;
+		break;
+
+	default:
+		/* Invalid field access type */
+
+		ACPI_ERROR((AE_INFO, "Unknown field access type %X", access));
+		return_UINT32(0);
+	}
+
+	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_BUFFER_FIELD) {
+		/*
+		 * buffer_field access can be on any byte boundary, so the
+		 * byte_alignment is always 1 byte -- regardless of any byte_alignment
+		 * implied by the field access type.
+		 */
+		byte_alignment = 1;
+	}
+
+	*return_byte_alignment = byte_alignment;
+	return_UINT32(bit_length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_prep_common_field_object
+ *
+ * PARAMETERS:  obj_desc            - The field object
+ *              field_flags         - Access, lock_rule, and update_rule.
+ *                                    The format of a field_flag is described
+ *                                    in the ACPI specification
+ *              field_attribute     - Special attributes (not used)
+ *              field_bit_position  - Field start position
+ *              field_bit_length    - Field length in number of bits
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize the areas of the field object that are common
+ *              to the various types of fields.  Note: This is very "sensitive"
+ *              code because we are solving the general case for field
+ *              alignment.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
+				 u8 field_flags,
+				 u8 field_attribute,
+				 u32 field_bit_position, u32 field_bit_length)
+{
+	u32 access_bit_width;
+	u32 byte_alignment;
+	u32 nearest_byte_address;
+
+	ACPI_FUNCTION_TRACE(ex_prep_common_field_object);
+
+	/*
+	 * Note: the structure being initialized is the
+	 * ACPI_COMMON_FIELD_INFO;  No structure fields outside of the common
+	 * area are initialized by this procedure.
+	 */
+	obj_desc->common_field.field_flags = field_flags;
+	obj_desc->common_field.attribute = field_attribute;
+	obj_desc->common_field.bit_length = field_bit_length;
+
+	/*
+	 * Decode the access type so we can compute offsets.  The access type gives
+	 * two pieces of information - the width of each field access and the
+	 * necessary byte_alignment (address granularity) of the access.
+	 *
+	 * For any_acc, the access_bit_width is the largest width that is both
+	 * necessary and possible in an attempt to access the whole field in one
+	 * I/O operation.  However, for any_acc, the byte_alignment is always one
+	 * byte.
+	 *
+	 * For all Buffer Fields, the byte_alignment is always one byte.
+	 *
+	 * For all other access types (Byte, Word, Dword, Qword), the Bitwidth is
+	 * the same (equivalent) as the byte_alignment.
+	 */
+	access_bit_width = acpi_ex_decode_field_access(obj_desc, field_flags,
+						       &byte_alignment);
+	if (!access_bit_width) {
+		return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+	}
+
+	/* Setup width (access granularity) fields */
+
+	obj_desc->common_field.access_byte_width = (u8)
+	    ACPI_DIV_8(access_bit_width);	/* 1,  2,  4,  8 */
+
+	obj_desc->common_field.access_bit_width = (u8) access_bit_width;
+
+	/*
+	 * base_byte_offset is the address of the start of the field within the
+	 * region.  It is the byte address of the first *datum* (field-width data
+	 * unit) of the field. (i.e., the first datum that contains at least the
+	 * first *bit* of the field.)
+	 *
+	 * Note: byte_alignment is always either equal to the access_bit_width or 8
+	 * (Byte access), and it defines the addressing granularity of the parent
+	 * region or buffer.
+	 */
+	nearest_byte_address =
+	    ACPI_ROUND_BITS_DOWN_TO_BYTES(field_bit_position);
+	obj_desc->common_field.base_byte_offset = (u32)
+	    ACPI_ROUND_DOWN(nearest_byte_address, byte_alignment);
+
+	/*
+	 * start_field_bit_offset is the offset of the first bit of the field within
+	 * a field datum.
+	 */
+	obj_desc->common_field.start_field_bit_offset = (u8)
+	    (field_bit_position -
+	     ACPI_MUL_8(obj_desc->common_field.base_byte_offset));
+
+	/*
+	 * Does the entire field fit within a single field access element? (datum)
+	 * (i.e., without crossing a datum boundary)
+	 */
+	if ((obj_desc->common_field.start_field_bit_offset +
+	     field_bit_length) <= (u16) access_bit_width) {
+		obj_desc->common.flags |= AOPOBJ_SINGLE_DATUM;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_prep_field_value
+ *
+ * PARAMETERS:  Info    - Contains all field creation info
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Construct a union acpi_operand_object of type def_field and
+ *              connect it to the parent Node.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
+{
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *second_desc = NULL;
+	u32 type;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ex_prep_field_value);
+
+	/* Parameter validation */
+
+	if (info->field_type != ACPI_TYPE_LOCAL_INDEX_FIELD) {
+		if (!info->region_node) {
+			ACPI_ERROR((AE_INFO, "Null RegionNode"));
+			return_ACPI_STATUS(AE_AML_NO_OPERAND);
+		}
+
+		type = acpi_ns_get_type(info->region_node);
+		if (type != ACPI_TYPE_REGION) {
+			ACPI_ERROR((AE_INFO,
+				    "Needed Region, found type %X (%s)",
+				    type, acpi_ut_get_type_name(type)));
+
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+	}
+
+	/* Allocate a new field object */
+
+	obj_desc = acpi_ut_create_internal_object(info->field_type);
+	if (!obj_desc) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Initialize areas of the object that are common to all fields */
+
+	obj_desc->common_field.node = info->field_node;
+	status = acpi_ex_prep_common_field_object(obj_desc, info->field_flags,
+						  info->attribute,
+						  info->field_bit_position,
+						  info->field_bit_length);
+	if (ACPI_FAILURE(status)) {
+		acpi_ut_delete_object_desc(obj_desc);
+		return_ACPI_STATUS(status);
+	}
+
+	/* Initialize areas of the object that are specific to the field type */
+
+	switch (info->field_type) {
+	case ACPI_TYPE_LOCAL_REGION_FIELD:
+
+		obj_desc->field.region_obj =
+		    acpi_ns_get_attached_object(info->region_node);
+
+		/* An additional reference for the container */
+
+		acpi_ut_add_reference(obj_desc->field.region_obj);
+
+		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+				  "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
+				  obj_desc->field.start_field_bit_offset,
+				  obj_desc->field.base_byte_offset,
+				  obj_desc->field.access_byte_width,
+				  obj_desc->field.region_obj));
+		break;
+
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+		obj_desc->bank_field.value = info->bank_value;
+		obj_desc->bank_field.region_obj =
+		    acpi_ns_get_attached_object(info->region_node);
+		obj_desc->bank_field.bank_obj =
+		    acpi_ns_get_attached_object(info->register_node);
+
+		/* An additional reference for the attached objects */
+
+		acpi_ut_add_reference(obj_desc->bank_field.region_obj);
+		acpi_ut_add_reference(obj_desc->bank_field.bank_obj);
+
+		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+				  "Bank Field: BitOff %X, Off %X, Gran %X, Region %p, BankReg %p\n",
+				  obj_desc->bank_field.start_field_bit_offset,
+				  obj_desc->bank_field.base_byte_offset,
+				  obj_desc->field.access_byte_width,
+				  obj_desc->bank_field.region_obj,
+				  obj_desc->bank_field.bank_obj));
+
+		/*
+		 * Remember location in AML stream of the field unit
+		 * opcode and operands -- since the bank_value
+		 * operands must be evaluated.
+		 */
+		second_desc = obj_desc->common.next_object;
+		second_desc->extra.aml_start =
+		    ACPI_CAST_PTR(union acpi_parse_object,
+				  info->data_register_node)->named.data;
+		second_desc->extra.aml_length =
+		    ACPI_CAST_PTR(union acpi_parse_object,
+				  info->data_register_node)->named.length;
+
+		break;
+
+	case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+		/* Get the Index and Data registers */
+
+		obj_desc->index_field.index_obj =
+		    acpi_ns_get_attached_object(info->register_node);
+		obj_desc->index_field.data_obj =
+		    acpi_ns_get_attached_object(info->data_register_node);
+
+		if (!obj_desc->index_field.data_obj
+		    || !obj_desc->index_field.index_obj) {
+			ACPI_ERROR((AE_INFO,
+				    "Null Index Object during field prep"));
+			acpi_ut_delete_object_desc(obj_desc);
+			return_ACPI_STATUS(AE_AML_INTERNAL);
+		}
+
+		/* An additional reference for the attached objects */
+
+		acpi_ut_add_reference(obj_desc->index_field.data_obj);
+		acpi_ut_add_reference(obj_desc->index_field.index_obj);
+
+		/*
+		 * April 2006: Changed to match MS behavior
+		 *
+		 * The value written to the Index register is the byte offset of the
+		 * target field in units of the granularity of the index_field
+		 *
+		 * Previously, the value was calculated as an index in terms of the
+		 * width of the Data register, as below:
+		 *
+		 *      obj_desc->index_field.Value = (u32)
+		 *          (Info->field_bit_position / ACPI_MUL_8 (
+		 *              obj_desc->Field.access_byte_width));
+		 *
+		 * February 2006: Tried value as a byte offset:
+		 *      obj_desc->index_field.Value = (u32)
+		 *          ACPI_DIV_8 (Info->field_bit_position);
+		 */
+		obj_desc->index_field.value =
+		    (u32) ACPI_ROUND_DOWN(ACPI_DIV_8(info->field_bit_position),
+					  obj_desc->index_field.
+					  access_byte_width);
+
+		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+				  "IndexField: BitOff %X, Off %X, Value %X, Gran %X, Index %p, Data %p\n",
+				  obj_desc->index_field.start_field_bit_offset,
+				  obj_desc->index_field.base_byte_offset,
+				  obj_desc->index_field.value,
+				  obj_desc->field.access_byte_width,
+				  obj_desc->index_field.index_obj,
+				  obj_desc->index_field.data_obj));
+		break;
+
+	default:
+		/* No other types should get here */
+		break;
+	}
+
+	/*
+	 * Store the constructed descriptor (obj_desc) into the parent Node,
+	 * preserving the current type of that named_obj.
+	 */
+	status = acpi_ns_attach_object(info->field_node, obj_desc,
+				       acpi_ns_get_type(info->field_node));
+
+	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+			  "Set NamedObj %p [%4.4s], ObjDesc %p\n",
+			  info->field_node,
+			  acpi_ut_get_node_name(info->field_node), obj_desc));
+
+	/* Remove local reference to the object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
new file mode 100644
index 0000000..76ec8ff
--- /dev/null
+++ b/drivers/acpi/acpica/exregion.c
@@ -0,0 +1,499 @@
+
+/******************************************************************************
+ *
+ * Module Name: exregion - ACPI default op_region (address space) handlers
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exregion")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_system_memory_space_handler
+ *
+ * PARAMETERS:  Function            - Read or Write operation
+ *              Address             - Where in the space to read or write
+ *              bit_width           - Field width in bits (8, 16, or 32)
+ *              Value               - Pointer to in or out value
+ *              handler_context     - Pointer to Handler's context
+ *              region_context      - Pointer to context specific to the
+ *                                    accessed region
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Handler for the System Memory address space (Op Region)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_system_memory_space_handler(u32 function,
+				    acpi_physical_address address,
+				    u32 bit_width,
+				    acpi_integer * value,
+				    void *handler_context, void *region_context)
+{
+	acpi_status status = AE_OK;
+	void *logical_addr_ptr = NULL;
+	struct acpi_mem_space_context *mem_info = region_context;
+	u32 length;
+	acpi_size window_size;
+#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
+	u32 remainder;
+#endif
+
+	ACPI_FUNCTION_TRACE(ex_system_memory_space_handler);
+
+	/* Validate and translate the bit width */
+
+	switch (bit_width) {
+	case 8:
+		length = 1;
+		break;
+
+	case 16:
+		length = 2;
+		break;
+
+	case 32:
+		length = 4;
+		break;
+
+	case 64:
+		length = 8;
+		break;
+
+	default:
+		ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d",
+			    bit_width));
+		return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+	}
+
+#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
+	/*
+	 * Hardware does not support non-aligned data transfers, we must verify
+	 * the request.
+	 */
+	(void)acpi_ut_short_divide((acpi_integer) address, length, NULL,
+				   &remainder);
+	if (remainder != 0) {
+		return_ACPI_STATUS(AE_AML_ALIGNMENT);
+	}
+#endif
+
+	/*
+	 * Does the request fit into the cached memory mapping?
+	 * Is 1) Address below the current mapping? OR
+	 *    2) Address beyond the current mapping?
+	 */
+	if ((address < mem_info->mapped_physical_address) ||
+	    (((acpi_integer) address + length) > ((acpi_integer)
+						  mem_info->
+						  mapped_physical_address +
+						  mem_info->mapped_length))) {
+		/*
+		 * The request cannot be resolved by the current memory mapping;
+		 * Delete the existing mapping and create a new one.
+		 */
+		if (mem_info->mapped_length) {
+
+			/* Valid mapping, delete it */
+
+			acpi_os_unmap_memory(mem_info->mapped_logical_address,
+					     mem_info->mapped_length);
+		}
+
+		/*
+		 * Don't attempt to map memory beyond the end of the region, and
+		 * constrain the maximum mapping size to something reasonable.
+		 */
+		window_size = (acpi_size)
+		    ((mem_info->address + mem_info->length) - address);
+
+		if (window_size > ACPI_SYSMEM_REGION_WINDOW_SIZE) {
+			window_size = ACPI_SYSMEM_REGION_WINDOW_SIZE;
+		}
+
+		/* Create a new mapping starting at the address given */
+
+		mem_info->mapped_logical_address =
+			acpi_os_map_memory((acpi_physical_address) address, window_size);
+		if (!mem_info->mapped_logical_address) {
+			ACPI_ERROR((AE_INFO,
+				    "Could not map memory at %8.8X%8.8X, size %X",
+				    ACPI_FORMAT_NATIVE_UINT(address),
+				    (u32) window_size));
+			mem_info->mapped_length = 0;
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		/* Save the physical address and mapping size */
+
+		mem_info->mapped_physical_address = address;
+		mem_info->mapped_length = window_size;
+	}
+
+	/*
+	 * Generate a logical pointer corresponding to the address we want to
+	 * access
+	 */
+	logical_addr_ptr = mem_info->mapped_logical_address +
+	    ((acpi_integer) address -
+	     (acpi_integer) mem_info->mapped_physical_address);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			  "System-Memory (width %d) R/W %d Address=%8.8X%8.8X\n",
+			  bit_width, function,
+			  ACPI_FORMAT_NATIVE_UINT(address)));
+
+	/*
+	 * Perform the memory read or write
+	 *
+	 * Note: For machines that do not support non-aligned transfers, the target
+	 * address was checked for alignment above.  We do not attempt to break the
+	 * transfer up into smaller (byte-size) chunks because the AML specifically
+	 * asked for a transfer width that the hardware may require.
+	 */
+	switch (function) {
+	case ACPI_READ:
+
+		*value = 0;
+		switch (bit_width) {
+		case 8:
+			*value = (acpi_integer) ACPI_GET8(logical_addr_ptr);
+			break;
+
+		case 16:
+			*value = (acpi_integer) ACPI_GET16(logical_addr_ptr);
+			break;
+
+		case 32:
+			*value = (acpi_integer) ACPI_GET32(logical_addr_ptr);
+			break;
+
+		case 64:
+			*value = (acpi_integer) ACPI_GET64(logical_addr_ptr);
+			break;
+
+		default:
+			/* bit_width was already validated */
+			break;
+		}
+		break;
+
+	case ACPI_WRITE:
+
+		switch (bit_width) {
+		case 8:
+			ACPI_SET8(logical_addr_ptr) = (u8) * value;
+			break;
+
+		case 16:
+			ACPI_SET16(logical_addr_ptr) = (u16) * value;
+			break;
+
+		case 32:
+			ACPI_SET32(logical_addr_ptr) = (u32) * value;
+			break;
+
+		case 64:
+			ACPI_SET64(logical_addr_ptr) = (u64) * value;
+			break;
+
+		default:
+			/* bit_width was already validated */
+			break;
+		}
+		break;
+
+	default:
+		status = AE_BAD_PARAMETER;
+		break;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_system_io_space_handler
+ *
+ * PARAMETERS:  Function            - Read or Write operation
+ *              Address             - Where in the space to read or write
+ *              bit_width           - Field width in bits (8, 16, or 32)
+ *              Value               - Pointer to in or out value
+ *              handler_context     - Pointer to Handler's context
+ *              region_context      - Pointer to context specific to the
+ *                                    accessed region
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Handler for the System IO address space (Op Region)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_system_io_space_handler(u32 function,
+				acpi_physical_address address,
+				u32 bit_width,
+				acpi_integer * value,
+				void *handler_context, void *region_context)
+{
+	acpi_status status = AE_OK;
+	u32 value32;
+
+	ACPI_FUNCTION_TRACE(ex_system_io_space_handler);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			  "System-IO (width %d) R/W %d Address=%8.8X%8.8X\n",
+			  bit_width, function,
+			  ACPI_FORMAT_NATIVE_UINT(address)));
+
+	/* Decode the function parameter */
+
+	switch (function) {
+	case ACPI_READ:
+
+		status = acpi_os_read_port((acpi_io_address) address,
+					   &value32, bit_width);
+		*value = value32;
+		break;
+
+	case ACPI_WRITE:
+
+		status = acpi_os_write_port((acpi_io_address) address,
+					    (u32) * value, bit_width);
+		break;
+
+	default:
+		status = AE_BAD_PARAMETER;
+		break;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_pci_config_space_handler
+ *
+ * PARAMETERS:  Function            - Read or Write operation
+ *              Address             - Where in the space to read or write
+ *              bit_width           - Field width in bits (8, 16, or 32)
+ *              Value               - Pointer to in or out value
+ *              handler_context     - Pointer to Handler's context
+ *              region_context      - Pointer to context specific to the
+ *                                    accessed region
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Handler for the PCI Config address space (Op Region)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_pci_config_space_handler(u32 function,
+				 acpi_physical_address address,
+				 u32 bit_width,
+				 acpi_integer * value,
+				 void *handler_context, void *region_context)
+{
+	acpi_status status = AE_OK;
+	struct acpi_pci_id *pci_id;
+	u16 pci_register;
+	u32 value32;
+
+	ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
+
+	/*
+	 *  The arguments to acpi_os(Read|Write)pci_configuration are:
+	 *
+	 *  pci_segment is the PCI bus segment range 0-31
+	 *  pci_bus     is the PCI bus number range 0-255
+	 *  pci_device  is the PCI device number range 0-31
+	 *  pci_function is the PCI device function number
+	 *  pci_register is the Config space register range 0-255 bytes
+	 *
+	 *  Value - input value for write, output address for read
+	 *
+	 */
+	pci_id = (struct acpi_pci_id *)region_context;
+	pci_register = (u16) (u32) address;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			  "Pci-Config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
+			  function, bit_width, pci_id->segment, pci_id->bus,
+			  pci_id->device, pci_id->function, pci_register));
+
+	switch (function) {
+	case ACPI_READ:
+
+		status = acpi_os_read_pci_configuration(pci_id, pci_register,
+							&value32, bit_width);
+		*value = value32;
+		break;
+
+	case ACPI_WRITE:
+
+		status = acpi_os_write_pci_configuration(pci_id, pci_register,
+							 *value, bit_width);
+		break;
+
+	default:
+
+		status = AE_BAD_PARAMETER;
+		break;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_cmos_space_handler
+ *
+ * PARAMETERS:  Function            - Read or Write operation
+ *              Address             - Where in the space to read or write
+ *              bit_width           - Field width in bits (8, 16, or 32)
+ *              Value               - Pointer to in or out value
+ *              handler_context     - Pointer to Handler's context
+ *              region_context      - Pointer to context specific to the
+ *                                    accessed region
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Handler for the CMOS address space (Op Region)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_cmos_space_handler(u32 function,
+			   acpi_physical_address address,
+			   u32 bit_width,
+			   acpi_integer * value,
+			   void *handler_context, void *region_context)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ex_cmos_space_handler);
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_pci_bar_space_handler
+ *
+ * PARAMETERS:  Function            - Read or Write operation
+ *              Address             - Where in the space to read or write
+ *              bit_width           - Field width in bits (8, 16, or 32)
+ *              Value               - Pointer to in or out value
+ *              handler_context     - Pointer to Handler's context
+ *              region_context      - Pointer to context specific to the
+ *                                    accessed region
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Handler for the PCI bar_target address space (Op Region)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_pci_bar_space_handler(u32 function,
+			      acpi_physical_address address,
+			      u32 bit_width,
+			      acpi_integer * value,
+			      void *handler_context, void *region_context)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ex_pci_bar_space_handler);
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_data_table_space_handler
+ *
+ * PARAMETERS:  Function            - Read or Write operation
+ *              Address             - Where in the space to read or write
+ *              bit_width           - Field width in bits (8, 16, or 32)
+ *              Value               - Pointer to in or out value
+ *              handler_context     - Pointer to Handler's context
+ *              region_context      - Pointer to context specific to the
+ *                                    accessed region
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Handler for the Data Table address space (Op Region)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_data_table_space_handler(u32 function,
+				 acpi_physical_address address,
+				 u32 bit_width,
+				 acpi_integer * value,
+				 void *handler_context, void *region_context)
+{
+	ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
+
+	/* Perform the memory read or write */
+
+	switch (function) {
+	case ACPI_READ:
+
+		ACPI_MEMCPY(ACPI_CAST_PTR(char, value),
+			    ACPI_PHYSADDR_TO_PTR(address),
+			    ACPI_DIV_8(bit_width));
+		break;
+
+	case ACPI_WRITE:
+	default:
+
+		return_ACPI_STATUS(AE_SUPPORT);
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
new file mode 100644
index 0000000..a063a74
--- /dev/null
+++ b/drivers/acpi/acpica/exresnte.c
@@ -0,0 +1,278 @@
+
+/******************************************************************************
+ *
+ * Module Name: exresnte - AML Interpreter object resolution
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exresnte")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_resolve_node_to_value
+ *
+ * PARAMETERS:  object_ptr      - Pointer to a location that contains
+ *                                a pointer to a NS node, and will receive a
+ *                                pointer to the resolved object.
+ *              walk_state      - Current state.  Valid only if executing AML
+ *                                code.  NULL if simply resolving an object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Resolve a Namespace node to a valued object
+ *
+ * Note: for some of the data types, the pointer attached to the Node
+ * can be either a pointer to an actual internal object or a pointer into the
+ * AML stream itself.  These types are currently:
+ *
+ *      ACPI_TYPE_INTEGER
+ *      ACPI_TYPE_STRING
+ *      ACPI_TYPE_BUFFER
+ *      ACPI_TYPE_MUTEX
+ *      ACPI_TYPE_PACKAGE
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
+			      struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object *source_desc;
+	union acpi_operand_object *obj_desc = NULL;
+	struct acpi_namespace_node *node;
+	acpi_object_type entry_type;
+
+	ACPI_FUNCTION_TRACE(ex_resolve_node_to_value);
+
+	/*
+	 * The stack pointer points to a struct acpi_namespace_node (Node).  Get the
+	 * object that is attached to the Node.
+	 */
+	node = *object_ptr;
+	source_desc = acpi_ns_get_attached_object(node);
+	entry_type = acpi_ns_get_type((acpi_handle) node);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p SourceDesc=%p [%s]\n",
+			  node, source_desc,
+			  acpi_ut_get_type_name(entry_type)));
+
+	if ((entry_type == ACPI_TYPE_LOCAL_ALIAS) ||
+	    (entry_type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
+
+		/* There is always exactly one level of indirection */
+
+		node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object);
+		source_desc = acpi_ns_get_attached_object(node);
+		entry_type = acpi_ns_get_type((acpi_handle) node);
+		*object_ptr = node;
+	}
+
+	/*
+	 * Several object types require no further processing:
+	 * 1) Device/Thermal objects don't have a "real" subobject, return the Node
+	 * 2) Method locals and arguments have a pseudo-Node
+	 * 3) 10/2007: Added method type to assist with Package construction.
+	 */
+	if ((entry_type == ACPI_TYPE_DEVICE) ||
+	    (entry_type == ACPI_TYPE_THERMAL) ||
+	    (entry_type == ACPI_TYPE_METHOD) ||
+	    (node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	if (!source_desc) {
+		ACPI_ERROR((AE_INFO, "No object attached to node %p", node));
+		return_ACPI_STATUS(AE_AML_NO_OPERAND);
+	}
+
+	/*
+	 * Action is based on the type of the Node, which indicates the type
+	 * of the attached object or pointer
+	 */
+	switch (entry_type) {
+	case ACPI_TYPE_PACKAGE:
+
+		if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_PACKAGE) {
+			ACPI_ERROR((AE_INFO, "Object not a Package, type %s",
+				    acpi_ut_get_object_type_name(source_desc)));
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+
+		status = acpi_ds_get_package_arguments(source_desc);
+		if (ACPI_SUCCESS(status)) {
+
+			/* Return an additional reference to the object */
+
+			obj_desc = source_desc;
+			acpi_ut_add_reference(obj_desc);
+		}
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) {
+			ACPI_ERROR((AE_INFO, "Object not a Buffer, type %s",
+				    acpi_ut_get_object_type_name(source_desc)));
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+
+		status = acpi_ds_get_buffer_arguments(source_desc);
+		if (ACPI_SUCCESS(status)) {
+
+			/* Return an additional reference to the object */
+
+			obj_desc = source_desc;
+			acpi_ut_add_reference(obj_desc);
+		}
+		break;
+
+	case ACPI_TYPE_STRING:
+
+		if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_STRING) {
+			ACPI_ERROR((AE_INFO, "Object not a String, type %s",
+				    acpi_ut_get_object_type_name(source_desc)));
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+
+		/* Return an additional reference to the object */
+
+		obj_desc = source_desc;
+		acpi_ut_add_reference(obj_desc);
+		break;
+
+	case ACPI_TYPE_INTEGER:
+
+		if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_INTEGER) {
+			ACPI_ERROR((AE_INFO, "Object not a Integer, type %s",
+				    acpi_ut_get_object_type_name(source_desc)));
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+
+		/* Return an additional reference to the object */
+
+		obj_desc = source_desc;
+		acpi_ut_add_reference(obj_desc);
+		break;
+
+	case ACPI_TYPE_BUFFER_FIELD:
+	case ACPI_TYPE_LOCAL_REGION_FIELD:
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+	case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "FieldRead Node=%p SourceDesc=%p Type=%X\n",
+				  node, source_desc, entry_type));
+
+		status =
+		    acpi_ex_read_data_from_field(walk_state, source_desc,
+						 &obj_desc);
+		break;
+
+		/* For these objects, just return the object attached to the Node */
+
+	case ACPI_TYPE_MUTEX:
+	case ACPI_TYPE_POWER:
+	case ACPI_TYPE_PROCESSOR:
+	case ACPI_TYPE_EVENT:
+	case ACPI_TYPE_REGION:
+
+		/* Return an additional reference to the object */
+
+		obj_desc = source_desc;
+		acpi_ut_add_reference(obj_desc);
+		break;
+
+		/* TYPE_ANY is untyped, and thus there is no object associated with it */
+
+	case ACPI_TYPE_ANY:
+
+		ACPI_ERROR((AE_INFO,
+			    "Untyped entry %p, no attached object!", node));
+
+		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);	/* Cannot be AE_TYPE */
+
+	case ACPI_TYPE_LOCAL_REFERENCE:
+
+		switch (source_desc->reference.class) {
+		case ACPI_REFCLASS_TABLE:	/* This is a ddb_handle */
+		case ACPI_REFCLASS_REFOF:
+		case ACPI_REFCLASS_INDEX:
+
+			/* Return an additional reference to the object */
+
+			obj_desc = source_desc;
+			acpi_ut_add_reference(obj_desc);
+			break;
+
+		default:
+			/* No named references are allowed here */
+
+			ACPI_ERROR((AE_INFO,
+				    "Unsupported Reference type %X",
+				    source_desc->reference.class));
+
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+		break;
+
+	default:
+
+		/* Default case is for unknown types */
+
+		ACPI_ERROR((AE_INFO,
+			    "Node %p - Unknown object type %X",
+			    node, entry_type));
+
+		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+
+	}			/* switch (entry_type) */
+
+	/* Return the object descriptor */
+
+	*object_ptr = (void *)obj_desc;
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
new file mode 100644
index 0000000..f6105a6
--- /dev/null
+++ b/drivers/acpi/acpica/exresolv.c
@@ -0,0 +1,551 @@
+
+/******************************************************************************
+ *
+ * Module Name: exresolv - AML Interpreter object resolution
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exresolv")
+
+/* Local prototypes */
+static acpi_status
+acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
+				struct acpi_walk_state *walk_state);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_resolve_to_value
+ *
+ * PARAMETERS:  **stack_ptr         - Points to entry on obj_stack, which can
+ *                                    be either an (union acpi_operand_object *)
+ *                                    or an acpi_handle.
+ *              walk_state          - Current method state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert Reference objects to values
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr,
+			 struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_resolve_to_value, stack_ptr);
+
+	if (!stack_ptr || !*stack_ptr) {
+		ACPI_ERROR((AE_INFO, "Internal - null pointer"));
+		return_ACPI_STATUS(AE_AML_NO_OPERAND);
+	}
+
+	/*
+	 * The entity pointed to by the stack_ptr can be either
+	 * 1) A valid union acpi_operand_object, or
+	 * 2) A struct acpi_namespace_node (named_obj)
+	 */
+	if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_OPERAND) {
+		status = acpi_ex_resolve_object_to_value(stack_ptr, walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		if (!*stack_ptr) {
+			ACPI_ERROR((AE_INFO, "Internal - null pointer"));
+			return_ACPI_STATUS(AE_AML_NO_OPERAND);
+		}
+	}
+
+	/*
+	 * Object on the stack may have changed if acpi_ex_resolve_object_to_value()
+	 * was called (i.e., we can't use an _else_ here.)
+	 */
+	if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_NAMED) {
+		status =
+		    acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
+						  (struct acpi_namespace_node,
+						   stack_ptr), walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Resolved object %p\n", *stack_ptr));
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_resolve_object_to_value
+ *
+ * PARAMETERS:  stack_ptr       - Pointer to an internal object
+ *              walk_state      - Current method state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Retrieve the value from an internal object. The Reference type
+ *              uses the associated AML opcode to determine the value.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
+				struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object *stack_desc;
+	union acpi_operand_object *obj_desc = NULL;
+	u8 ref_type;
+
+	ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
+
+	stack_desc = *stack_ptr;
+
+	/* This is a union acpi_operand_object    */
+
+	switch (ACPI_GET_OBJECT_TYPE(stack_desc)) {
+	case ACPI_TYPE_LOCAL_REFERENCE:
+
+		ref_type = stack_desc->reference.class;
+
+		switch (ref_type) {
+		case ACPI_REFCLASS_LOCAL:
+		case ACPI_REFCLASS_ARG:
+
+			/*
+			 * Get the local from the method's state info
+			 * Note: this increments the local's object reference count
+			 */
+			status = acpi_ds_method_data_get_value(ref_type,
+							       stack_desc->
+							       reference.value,
+							       walk_state,
+							       &obj_desc);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+					  "[Arg/Local %X] ValueObj is %p\n",
+					  stack_desc->reference.value,
+					  obj_desc));
+
+			/*
+			 * Now we can delete the original Reference Object and
+			 * replace it with the resolved value
+			 */
+			acpi_ut_remove_reference(stack_desc);
+			*stack_ptr = obj_desc;
+			break;
+
+		case ACPI_REFCLASS_INDEX:
+
+			switch (stack_desc->reference.target_type) {
+			case ACPI_TYPE_BUFFER_FIELD:
+
+				/* Just return - do not dereference */
+				break;
+
+			case ACPI_TYPE_PACKAGE:
+
+				/* If method call or copy_object - do not dereference */
+
+				if ((walk_state->opcode ==
+				     AML_INT_METHODCALL_OP)
+				    || (walk_state->opcode == AML_COPY_OP)) {
+					break;
+				}
+
+				/* Otherwise, dereference the package_index to a package element */
+
+				obj_desc = *stack_desc->reference.where;
+				if (obj_desc) {
+					/*
+					 * Valid object descriptor, copy pointer to return value
+					 * (i.e., dereference the package index)
+					 * Delete the ref object, increment the returned object
+					 */
+					acpi_ut_remove_reference(stack_desc);
+					acpi_ut_add_reference(obj_desc);
+					*stack_ptr = obj_desc;
+				} else {
+					/*
+					 * A NULL object descriptor means an uninitialized element of
+					 * the package, can't dereference it
+					 */
+					ACPI_ERROR((AE_INFO,
+						    "Attempt to dereference an Index to NULL package element Idx=%p",
+						    stack_desc));
+					status = AE_AML_UNINITIALIZED_ELEMENT;
+				}
+				break;
+
+			default:
+
+				/* Invalid reference object */
+
+				ACPI_ERROR((AE_INFO,
+					    "Unknown TargetType %X in Index/Reference object %p",
+					    stack_desc->reference.target_type,
+					    stack_desc));
+				status = AE_AML_INTERNAL;
+				break;
+			}
+			break;
+
+		case ACPI_REFCLASS_REFOF:
+		case ACPI_REFCLASS_DEBUG:
+		case ACPI_REFCLASS_TABLE:
+
+			/* Just leave the object as-is, do not dereference */
+
+			break;
+
+		case ACPI_REFCLASS_NAME:	/* Reference to a named object */
+
+			/* Dereference the name */
+
+			if ((stack_desc->reference.node->type ==
+			     ACPI_TYPE_DEVICE)
+			    || (stack_desc->reference.node->type ==
+				ACPI_TYPE_THERMAL)) {
+
+				/* These node types do not have 'real' subobjects */
+
+				*stack_ptr = (void *)stack_desc->reference.node;
+			} else {
+				/* Get the object pointed to by the namespace node */
+
+				*stack_ptr =
+				    (stack_desc->reference.node)->object;
+				acpi_ut_add_reference(*stack_ptr);
+			}
+
+			acpi_ut_remove_reference(stack_desc);
+			break;
+
+		default:
+
+			ACPI_ERROR((AE_INFO,
+				    "Unknown Reference type %X in %p", ref_type,
+				    stack_desc));
+			status = AE_AML_INTERNAL;
+			break;
+		}
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		status = acpi_ds_get_buffer_arguments(stack_desc);
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+
+		status = acpi_ds_get_package_arguments(stack_desc);
+		break;
+
+	case ACPI_TYPE_BUFFER_FIELD:
+	case ACPI_TYPE_LOCAL_REGION_FIELD:
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+	case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "FieldRead SourceDesc=%p Type=%X\n",
+				  stack_desc,
+				  ACPI_GET_OBJECT_TYPE(stack_desc)));
+
+		status =
+		    acpi_ex_read_data_from_field(walk_state, stack_desc,
+						 &obj_desc);
+
+		/* Remove a reference to the original operand, then override */
+
+		acpi_ut_remove_reference(*stack_ptr);
+		*stack_ptr = (void *)obj_desc;
+		break;
+
+	default:
+		break;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_resolve_multiple
+ *
+ * PARAMETERS:  walk_state          - Current state (contains AML opcode)
+ *              Operand             - Starting point for resolution
+ *              return_type         - Where the object type is returned
+ *              return_desc         - Where the resolved object is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Return the base object and type.  Traverse a reference list if
+ *              necessary to get to the base object.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
+			 union acpi_operand_object *operand,
+			 acpi_object_type * return_type,
+			 union acpi_operand_object **return_desc)
+{
+	union acpi_operand_object *obj_desc = (void *)operand;
+	struct acpi_namespace_node *node;
+	acpi_object_type type;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_ex_resolve_multiple);
+
+	/* Operand can be either a namespace node or an operand descriptor */
+
+	switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
+	case ACPI_DESC_TYPE_OPERAND:
+		type = obj_desc->common.type;
+		break;
+
+	case ACPI_DESC_TYPE_NAMED:
+		type = ((struct acpi_namespace_node *)obj_desc)->type;
+		obj_desc =
+		    acpi_ns_get_attached_object((struct acpi_namespace_node *)
+						obj_desc);
+
+		/* If we had an Alias node, use the attached object for type info */
+
+		if (type == ACPI_TYPE_LOCAL_ALIAS) {
+			type = ((struct acpi_namespace_node *)obj_desc)->type;
+			obj_desc =
+			    acpi_ns_get_attached_object((struct
+							 acpi_namespace_node *)
+							obj_desc);
+		}
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+	}
+
+	/* If type is anything other than a reference, we are done */
+
+	if (type != ACPI_TYPE_LOCAL_REFERENCE) {
+		goto exit;
+	}
+
+	/*
+	 * For reference objects created via the ref_of, Index, or Load/load_table
+	 * operators, we need to get to the base object (as per the ACPI
+	 * specification of the object_type and size_of operators). This means
+	 * traversing the list of possibly many nested references.
+	 */
+	while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) {
+		switch (obj_desc->reference.class) {
+		case ACPI_REFCLASS_REFOF:
+		case ACPI_REFCLASS_NAME:
+
+			/* Dereference the reference pointer */
+
+			if (obj_desc->reference.class == ACPI_REFCLASS_REFOF) {
+				node = obj_desc->reference.object;
+			} else {	/* AML_INT_NAMEPATH_OP */
+
+				node = obj_desc->reference.node;
+			}
+
+			/* All "References" point to a NS node */
+
+			if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
+			    ACPI_DESC_TYPE_NAMED) {
+				ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]",
+					    node,
+					    acpi_ut_get_descriptor_name(node)));
+				return_ACPI_STATUS(AE_AML_INTERNAL);
+			}
+
+			/* Get the attached object */
+
+			obj_desc = acpi_ns_get_attached_object(node);
+			if (!obj_desc) {
+
+				/* No object, use the NS node type */
+
+				type = acpi_ns_get_type(node);
+				goto exit;
+			}
+
+			/* Check for circular references */
+
+			if (obj_desc == operand) {
+				return_ACPI_STATUS(AE_AML_CIRCULAR_REFERENCE);
+			}
+			break;
+
+		case ACPI_REFCLASS_INDEX:
+
+			/* Get the type of this reference (index into another object) */
+
+			type = obj_desc->reference.target_type;
+			if (type != ACPI_TYPE_PACKAGE) {
+				goto exit;
+			}
+
+			/*
+			 * The main object is a package, we want to get the type
+			 * of the individual package element that is referenced by
+			 * the index.
+			 *
+			 * This could of course in turn be another reference object.
+			 */
+			obj_desc = *(obj_desc->reference.where);
+			if (!obj_desc) {
+
+				/* NULL package elements are allowed */
+
+				type = 0;	/* Uninitialized */
+				goto exit;
+			}
+			break;
+
+		case ACPI_REFCLASS_TABLE:
+
+			type = ACPI_TYPE_DDB_HANDLE;
+			goto exit;
+
+		case ACPI_REFCLASS_LOCAL:
+		case ACPI_REFCLASS_ARG:
+
+			if (return_desc) {
+				status =
+				    acpi_ds_method_data_get_value(obj_desc->
+								  reference.
+								  class,
+								  obj_desc->
+								  reference.
+								  value,
+								  walk_state,
+								  &obj_desc);
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+				acpi_ut_remove_reference(obj_desc);
+			} else {
+				status =
+				    acpi_ds_method_data_get_node(obj_desc->
+								 reference.
+								 class,
+								 obj_desc->
+								 reference.
+								 value,
+								 walk_state,
+								 &node);
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+
+				obj_desc = acpi_ns_get_attached_object(node);
+				if (!obj_desc) {
+					type = ACPI_TYPE_ANY;
+					goto exit;
+				}
+			}
+			break;
+
+		case ACPI_REFCLASS_DEBUG:
+
+			/* The Debug Object is of type "DebugObject" */
+
+			type = ACPI_TYPE_DEBUG_OBJECT;
+			goto exit;
+
+		default:
+
+			ACPI_ERROR((AE_INFO,
+				    "Unknown Reference Class %2.2X",
+				    obj_desc->reference.class));
+			return_ACPI_STATUS(AE_AML_INTERNAL);
+		}
+	}
+
+	/*
+	 * Now we are guaranteed to have an object that has not been created
+	 * via the ref_of or Index operators.
+	 */
+	type = ACPI_GET_OBJECT_TYPE(obj_desc);
+
+      exit:
+	/* Convert internal types to external types */
+
+	switch (type) {
+	case ACPI_TYPE_LOCAL_REGION_FIELD:
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+	case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+		type = ACPI_TYPE_FIELD_UNIT;
+		break;
+
+	case ACPI_TYPE_LOCAL_SCOPE:
+
+		/* Per ACPI Specification, Scope is untyped */
+
+		type = ACPI_TYPE_ANY;
+		break;
+
+	default:
+		/* No change to Type required */
+		break;
+	}
+
+	*return_type = type;
+	if (return_desc) {
+		*return_desc = obj_desc;
+	}
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
new file mode 100644
index 0000000..3c38027
--- /dev/null
+++ b/drivers/acpi/acpica/exresop.c
@@ -0,0 +1,701 @@
+
+/******************************************************************************
+ *
+ * Module Name: exresop - AML Interpreter operand/object resolution
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "amlcode.h"
+#include "acparser.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exresop")
+
+/* Local prototypes */
+static acpi_status
+acpi_ex_check_object_type(acpi_object_type type_needed,
+			  acpi_object_type this_type, void *object);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_check_object_type
+ *
+ * PARAMETERS:  type_needed         Object type needed
+ *              this_type           Actual object type
+ *              Object              Object pointer
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Check required type against actual type
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_check_object_type(acpi_object_type type_needed,
+			  acpi_object_type this_type, void *object)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	if (type_needed == ACPI_TYPE_ANY) {
+
+		/* All types OK, so we don't perform any typechecks */
+
+		return (AE_OK);
+	}
+
+	if (type_needed == ACPI_TYPE_LOCAL_REFERENCE) {
+		/*
+		 * Allow the AML "Constant" opcodes (Zero, One, etc.) to be reference
+		 * objects and thus allow them to be targets.  (As per the ACPI
+		 * specification, a store to a constant is a noop.)
+		 */
+		if ((this_type == ACPI_TYPE_INTEGER) &&
+		    (((union acpi_operand_object *)object)->common.
+		     flags & AOPOBJ_AML_CONSTANT)) {
+			return (AE_OK);
+		}
+	}
+
+	if (type_needed != this_type) {
+		ACPI_ERROR((AE_INFO,
+			    "Needed type [%s], found [%s] %p",
+			    acpi_ut_get_type_name(type_needed),
+			    acpi_ut_get_type_name(this_type), object));
+
+		return (AE_AML_OPERAND_TYPE);
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_resolve_operands
+ *
+ * PARAMETERS:  Opcode              - Opcode being interpreted
+ *              stack_ptr           - Pointer to the operand stack to be
+ *                                    resolved
+ *              walk_state          - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert multiple input operands to the types required by the
+ *              target operator.
+ *
+ *      Each 5-bit group in arg_types represents one required
+ *      operand and indicates the required Type. The corresponding operand
+ *      will be converted to the required type if possible, otherwise we
+ *      abort with an exception.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_resolve_operands(u16 opcode,
+			 union acpi_operand_object ** stack_ptr,
+			 struct acpi_walk_state * walk_state)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status = AE_OK;
+	u8 object_type;
+	u32 arg_types;
+	const struct acpi_opcode_info *op_info;
+	u32 this_arg_type;
+	acpi_object_type type_needed;
+	u16 target_op = 0;
+
+	ACPI_FUNCTION_TRACE_U32(ex_resolve_operands, opcode);
+
+	op_info = acpi_ps_get_opcode_info(opcode);
+	if (op_info->class == AML_CLASS_UNKNOWN) {
+		return_ACPI_STATUS(AE_AML_BAD_OPCODE);
+	}
+
+	arg_types = op_info->runtime_args;
+	if (arg_types == ARGI_INVALID_OPCODE) {
+		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", opcode));
+
+		return_ACPI_STATUS(AE_AML_INTERNAL);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "Opcode %X [%s] RequiredOperandTypes=%8.8X\n",
+			  opcode, op_info->name, arg_types));
+
+	/*
+	 * Normal exit is with (arg_types == 0) at end of argument list.
+	 * Function will return an exception from within the loop upon
+	 * finding an entry which is not (or cannot be converted
+	 * to) the required type; if stack underflows; or upon
+	 * finding a NULL stack entry (which should not happen).
+	 */
+	while (GET_CURRENT_ARG_TYPE(arg_types)) {
+		if (!stack_ptr || !*stack_ptr) {
+			ACPI_ERROR((AE_INFO, "Null stack entry at %p",
+				    stack_ptr));
+
+			return_ACPI_STATUS(AE_AML_INTERNAL);
+		}
+
+		/* Extract useful items */
+
+		obj_desc = *stack_ptr;
+
+		/* Decode the descriptor type */
+
+		switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
+		case ACPI_DESC_TYPE_NAMED:
+
+			/* Namespace Node */
+
+			object_type =
+			    ((struct acpi_namespace_node *)obj_desc)->type;
+
+			/*
+			 * Resolve an alias object. The construction of these objects
+			 * guarantees that there is only one level of alias indirection;
+			 * thus, the attached object is always the aliased namespace node
+			 */
+			if (object_type == ACPI_TYPE_LOCAL_ALIAS) {
+				obj_desc =
+				    acpi_ns_get_attached_object((struct
+								 acpi_namespace_node
+								 *)obj_desc);
+				*stack_ptr = obj_desc;
+				object_type =
+				    ((struct acpi_namespace_node *)obj_desc)->
+				    type;
+			}
+			break;
+
+		case ACPI_DESC_TYPE_OPERAND:
+
+			/* ACPI internal object */
+
+			object_type = ACPI_GET_OBJECT_TYPE(obj_desc);
+
+			/* Check for bad acpi_object_type */
+
+			if (!acpi_ut_valid_object_type(object_type)) {
+				ACPI_ERROR((AE_INFO,
+					    "Bad operand object type [%X]",
+					    object_type));
+
+				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+			}
+
+			if (object_type == (u8) ACPI_TYPE_LOCAL_REFERENCE) {
+
+				/* Validate the Reference */
+
+				switch (obj_desc->reference.class) {
+				case ACPI_REFCLASS_DEBUG:
+
+					target_op = AML_DEBUG_OP;
+
+					/*lint -fallthrough */
+
+				case ACPI_REFCLASS_ARG:
+				case ACPI_REFCLASS_LOCAL:
+				case ACPI_REFCLASS_INDEX:
+				case ACPI_REFCLASS_REFOF:
+				case ACPI_REFCLASS_TABLE:	/* ddb_handle from LOAD_OP or LOAD_TABLE_OP */
+				case ACPI_REFCLASS_NAME:	/* Reference to a named object */
+
+					ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+							  "Operand is a Reference, Class [%s] %2.2X\n",
+							  acpi_ut_get_reference_name
+							  (obj_desc),
+							  obj_desc->reference.
+							  class));
+					break;
+
+				default:
+
+					ACPI_ERROR((AE_INFO,
+						    "Unknown Reference Class %2.2X in %p",
+						    obj_desc->reference.class,
+						    obj_desc));
+
+					return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+				}
+			}
+			break;
+
+		default:
+
+			/* Invalid descriptor */
+
+			ACPI_ERROR((AE_INFO, "Invalid descriptor %p [%s]",
+				    obj_desc,
+				    acpi_ut_get_descriptor_name(obj_desc)));
+
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+
+		/* Get one argument type, point to the next */
+
+		this_arg_type = GET_CURRENT_ARG_TYPE(arg_types);
+		INCREMENT_ARG_LIST(arg_types);
+
+		/*
+		 * Handle cases where the object does not need to be
+		 * resolved to a value
+		 */
+		switch (this_arg_type) {
+		case ARGI_REF_OR_STRING:	/* Can be a String or Reference */
+
+			if ((ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
+			     ACPI_DESC_TYPE_OPERAND)
+			    && (ACPI_GET_OBJECT_TYPE(obj_desc) ==
+				ACPI_TYPE_STRING)) {
+				/*
+				 * String found - the string references a named object and
+				 * must be resolved to a node
+				 */
+				goto next_operand;
+			}
+
+			/*
+			 * Else not a string - fall through to the normal Reference
+			 * case below
+			 */
+			/*lint -fallthrough */
+
+		case ARGI_REFERENCE:	/* References: */
+		case ARGI_INTEGER_REF:
+		case ARGI_OBJECT_REF:
+		case ARGI_DEVICE_REF:
+		case ARGI_TARGETREF:	/* Allows implicit conversion rules before store */
+		case ARGI_FIXED_TARGET:	/* No implicit conversion before store to target */
+		case ARGI_SIMPLE_TARGET:	/* Name, Local, or Arg - no implicit conversion  */
+
+			/*
+			 * Need an operand of type ACPI_TYPE_LOCAL_REFERENCE
+			 * A Namespace Node is OK as-is
+			 */
+			if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
+			    ACPI_DESC_TYPE_NAMED) {
+				goto next_operand;
+			}
+
+			status =
+			    acpi_ex_check_object_type(ACPI_TYPE_LOCAL_REFERENCE,
+						      object_type, obj_desc);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+			goto next_operand;
+
+		case ARGI_DATAREFOBJ:	/* Store operator only */
+
+			/*
+			 * We don't want to resolve index_op reference objects during
+			 * a store because this would be an implicit de_ref_of operation.
+			 * Instead, we just want to store the reference object.
+			 * -- All others must be resolved below.
+			 */
+			if ((opcode == AML_STORE_OP) &&
+			    (ACPI_GET_OBJECT_TYPE(*stack_ptr) ==
+			     ACPI_TYPE_LOCAL_REFERENCE)
+			    && ((*stack_ptr)->reference.class == ACPI_REFCLASS_INDEX)) {
+				goto next_operand;
+			}
+			break;
+
+		default:
+			/* All cases covered above */
+			break;
+		}
+
+		/*
+		 * Resolve this object to a value
+		 */
+		status = acpi_ex_resolve_to_value(stack_ptr, walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/* Get the resolved object */
+
+		obj_desc = *stack_ptr;
+
+		/*
+		 * Check the resulting object (value) type
+		 */
+		switch (this_arg_type) {
+			/*
+			 * For the simple cases, only one type of resolved object
+			 * is allowed
+			 */
+		case ARGI_MUTEX:
+
+			/* Need an operand of type ACPI_TYPE_MUTEX */
+
+			type_needed = ACPI_TYPE_MUTEX;
+			break;
+
+		case ARGI_EVENT:
+
+			/* Need an operand of type ACPI_TYPE_EVENT */
+
+			type_needed = ACPI_TYPE_EVENT;
+			break;
+
+		case ARGI_PACKAGE:	/* Package */
+
+			/* Need an operand of type ACPI_TYPE_PACKAGE */
+
+			type_needed = ACPI_TYPE_PACKAGE;
+			break;
+
+		case ARGI_ANYTYPE:
+
+			/* Any operand type will do */
+
+			type_needed = ACPI_TYPE_ANY;
+			break;
+
+		case ARGI_DDBHANDLE:
+
+			/* Need an operand of type ACPI_TYPE_DDB_HANDLE */
+
+			type_needed = ACPI_TYPE_LOCAL_REFERENCE;
+			break;
+
+			/*
+			 * The more complex cases allow multiple resolved object types
+			 */
+		case ARGI_INTEGER:
+
+			/*
+			 * Need an operand of type ACPI_TYPE_INTEGER,
+			 * But we can implicitly convert from a STRING or BUFFER
+			 * Aka - "Implicit Source Operand Conversion"
+			 */
+			status =
+			    acpi_ex_convert_to_integer(obj_desc, stack_ptr, 16);
+			if (ACPI_FAILURE(status)) {
+				if (status == AE_TYPE) {
+					ACPI_ERROR((AE_INFO,
+						    "Needed [Integer/String/Buffer], found [%s] %p",
+						    acpi_ut_get_object_type_name
+						    (obj_desc), obj_desc));
+
+					return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+				}
+
+				return_ACPI_STATUS(status);
+			}
+
+			if (obj_desc != *stack_ptr) {
+				acpi_ut_remove_reference(obj_desc);
+			}
+			goto next_operand;
+
+		case ARGI_BUFFER:
+
+			/*
+			 * Need an operand of type ACPI_TYPE_BUFFER,
+			 * But we can implicitly convert from a STRING or INTEGER
+			 * Aka - "Implicit Source Operand Conversion"
+			 */
+			status = acpi_ex_convert_to_buffer(obj_desc, stack_ptr);
+			if (ACPI_FAILURE(status)) {
+				if (status == AE_TYPE) {
+					ACPI_ERROR((AE_INFO,
+						    "Needed [Integer/String/Buffer], found [%s] %p",
+						    acpi_ut_get_object_type_name
+						    (obj_desc), obj_desc));
+
+					return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+				}
+
+				return_ACPI_STATUS(status);
+			}
+
+			if (obj_desc != *stack_ptr) {
+				acpi_ut_remove_reference(obj_desc);
+			}
+			goto next_operand;
+
+		case ARGI_STRING:
+
+			/*
+			 * Need an operand of type ACPI_TYPE_STRING,
+			 * But we can implicitly convert from a BUFFER or INTEGER
+			 * Aka - "Implicit Source Operand Conversion"
+			 */
+			status = acpi_ex_convert_to_string(obj_desc, stack_ptr,
+							   ACPI_IMPLICIT_CONVERT_HEX);
+			if (ACPI_FAILURE(status)) {
+				if (status == AE_TYPE) {
+					ACPI_ERROR((AE_INFO,
+						    "Needed [Integer/String/Buffer], found [%s] %p",
+						    acpi_ut_get_object_type_name
+						    (obj_desc), obj_desc));
+
+					return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+				}
+
+				return_ACPI_STATUS(status);
+			}
+
+			if (obj_desc != *stack_ptr) {
+				acpi_ut_remove_reference(obj_desc);
+			}
+			goto next_operand;
+
+		case ARGI_COMPUTEDATA:
+
+			/* Need an operand of type INTEGER, STRING or BUFFER */
+
+			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+			case ACPI_TYPE_INTEGER:
+			case ACPI_TYPE_STRING:
+			case ACPI_TYPE_BUFFER:
+
+				/* Valid operand */
+				break;
+
+			default:
+				ACPI_ERROR((AE_INFO,
+					    "Needed [Integer/String/Buffer], found [%s] %p",
+					    acpi_ut_get_object_type_name
+					    (obj_desc), obj_desc));
+
+				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+			}
+			goto next_operand;
+
+		case ARGI_BUFFER_OR_STRING:
+
+			/* Need an operand of type STRING or BUFFER */
+
+			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+			case ACPI_TYPE_STRING:
+			case ACPI_TYPE_BUFFER:
+
+				/* Valid operand */
+				break;
+
+			case ACPI_TYPE_INTEGER:
+
+				/* Highest priority conversion is to type Buffer */
+
+				status =
+				    acpi_ex_convert_to_buffer(obj_desc,
+							      stack_ptr);
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+
+				if (obj_desc != *stack_ptr) {
+					acpi_ut_remove_reference(obj_desc);
+				}
+				break;
+
+			default:
+				ACPI_ERROR((AE_INFO,
+					    "Needed [Integer/String/Buffer], found [%s] %p",
+					    acpi_ut_get_object_type_name
+					    (obj_desc), obj_desc));
+
+				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+			}
+			goto next_operand;
+
+		case ARGI_DATAOBJECT:
+			/*
+			 * ARGI_DATAOBJECT is only used by the size_of operator.
+			 * Need a buffer, string, package, or ref_of reference.
+			 *
+			 * The only reference allowed here is a direct reference to
+			 * a namespace node.
+			 */
+			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+			case ACPI_TYPE_PACKAGE:
+			case ACPI_TYPE_STRING:
+			case ACPI_TYPE_BUFFER:
+			case ACPI_TYPE_LOCAL_REFERENCE:
+
+				/* Valid operand */
+				break;
+
+			default:
+				ACPI_ERROR((AE_INFO,
+					    "Needed [Buffer/String/Package/Reference], found [%s] %p",
+					    acpi_ut_get_object_type_name
+					    (obj_desc), obj_desc));
+
+				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+			}
+			goto next_operand;
+
+		case ARGI_COMPLEXOBJ:
+
+			/* Need a buffer or package or (ACPI 2.0) String */
+
+			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+			case ACPI_TYPE_PACKAGE:
+			case ACPI_TYPE_STRING:
+			case ACPI_TYPE_BUFFER:
+
+				/* Valid operand */
+				break;
+
+			default:
+				ACPI_ERROR((AE_INFO,
+					    "Needed [Buffer/String/Package], found [%s] %p",
+					    acpi_ut_get_object_type_name
+					    (obj_desc), obj_desc));
+
+				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+			}
+			goto next_operand;
+
+		case ARGI_REGION_OR_BUFFER:	/* Used by Load() only */
+
+			/* Need an operand of type REGION or a BUFFER (which could be a resolved region field) */
+
+			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+			case ACPI_TYPE_BUFFER:
+			case ACPI_TYPE_REGION:
+
+				/* Valid operand */
+				break;
+
+			default:
+				ACPI_ERROR((AE_INFO,
+					    "Needed [Region/Buffer], found [%s] %p",
+					    acpi_ut_get_object_type_name
+					    (obj_desc), obj_desc));
+
+				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+			}
+			goto next_operand;
+
+		case ARGI_DATAREFOBJ:
+
+			/* Used by the Store() operator only */
+
+			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+			case ACPI_TYPE_INTEGER:
+			case ACPI_TYPE_PACKAGE:
+			case ACPI_TYPE_STRING:
+			case ACPI_TYPE_BUFFER:
+			case ACPI_TYPE_BUFFER_FIELD:
+			case ACPI_TYPE_LOCAL_REFERENCE:
+			case ACPI_TYPE_LOCAL_REGION_FIELD:
+			case ACPI_TYPE_LOCAL_BANK_FIELD:
+			case ACPI_TYPE_LOCAL_INDEX_FIELD:
+			case ACPI_TYPE_DDB_HANDLE:
+
+				/* Valid operand */
+				break;
+
+			default:
+
+				if (acpi_gbl_enable_interpreter_slack) {
+					/*
+					 * Enable original behavior of Store(), allowing any and all
+					 * objects as the source operand.  The ACPI spec does not
+					 * allow this, however.
+					 */
+					break;
+				}
+
+				if (target_op == AML_DEBUG_OP) {
+
+					/* Allow store of any object to the Debug object */
+
+					break;
+				}
+
+				ACPI_ERROR((AE_INFO,
+					    "Needed Integer/Buffer/String/Package/Ref/Ddb], found [%s] %p",
+					    acpi_ut_get_object_type_name
+					    (obj_desc), obj_desc));
+
+				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+			}
+			goto next_operand;
+
+		default:
+
+			/* Unknown type */
+
+			ACPI_ERROR((AE_INFO,
+				    "Internal - Unknown ARGI (required operand) type %X",
+				    this_arg_type));
+
+			return_ACPI_STATUS(AE_BAD_PARAMETER);
+		}
+
+		/*
+		 * Make sure that the original object was resolved to the
+		 * required object type (Simple cases only).
+		 */
+		status = acpi_ex_check_object_type(type_needed,
+						   ACPI_GET_OBJECT_TYPE
+						   (*stack_ptr), *stack_ptr);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+	      next_operand:
+		/*
+		 * If more operands needed, decrement stack_ptr to point
+		 * to next operand on stack
+		 */
+		if (GET_CURRENT_ARG_TYPE(arg_types)) {
+			stack_ptr--;
+		}
+	}
+
+	ACPI_DUMP_OPERANDS(walk_state->operands,
+			   acpi_ps_get_opcode_name(opcode),
+			   walk_state->num_operands);
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
new file mode 100644
index 0000000..e35e9b4
--- /dev/null
+++ b/drivers/acpi/acpica/exstore.c
@@ -0,0 +1,716 @@
+
+/******************************************************************************
+ *
+ * Module Name: exstore - AML Interpreter object store support
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "amlcode.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exstore")
+
+/* Local prototypes */
+static void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+			u32 level, u32 index);
+
+static acpi_status
+acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
+			      union acpi_operand_object *dest_desc,
+			      struct acpi_walk_state *walk_state);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_do_debug_object
+ *
+ * PARAMETERS:  source_desc         - Value to be stored
+ *              Level               - Indentation level (used for packages)
+ *              Index               - Current package element, zero if not pkg
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Handles stores to the Debug Object.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+			u32 level, u32 index)
+{
+	u32 i;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
+
+	/* Print line header as long as we are not in the middle of an object display */
+
+	if (!((level > 0) && index == 0)) {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
+				      level, " "));
+	}
+
+	/* Display index for package output only */
+
+	if (index > 0) {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+				      "(%.2u) ", index - 1));
+	}
+
+	if (!source_desc) {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n"));
+		return_VOID;
+	}
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ",
+				      acpi_ut_get_object_type_name
+				      (source_desc)));
+
+		if (!acpi_ut_valid_internal_object(source_desc)) {
+			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+					      "%p, Invalid Internal Object!\n",
+					      source_desc));
+			return_VOID;
+		}
+	} else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
+		   ACPI_DESC_TYPE_NAMED) {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: %p\n",
+				      acpi_ut_get_type_name(((struct
+							      acpi_namespace_node
+							      *)source_desc)->
+							    type),
+				      source_desc));
+		return_VOID;
+	} else {
+		return_VOID;
+	}
+
+	/* source_desc is of type ACPI_DESC_TYPE_OPERAND */
+
+	switch (ACPI_GET_OBJECT_TYPE(source_desc)) {
+	case ACPI_TYPE_INTEGER:
+
+		/* Output correct integer width */
+
+		if (acpi_gbl_integer_byte_width == 4) {
+			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X\n",
+					      (u32) source_desc->integer.
+					      value));
+		} else {
+			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+					      "0x%8.8X%8.8X\n",
+					      ACPI_FORMAT_UINT64(source_desc->
+								 integer.
+								 value)));
+		}
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n",
+				      (u32) source_desc->buffer.length));
+		ACPI_DUMP_BUFFER(source_desc->buffer.pointer,
+				 (source_desc->buffer.length <
+				  256) ? source_desc->buffer.length : 256);
+		break;
+
+	case ACPI_TYPE_STRING:
+
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X] \"%s\"\n",
+				      source_desc->string.length,
+				      source_desc->string.pointer));
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+				      "[Contains 0x%.2X Elements]\n",
+				      source_desc->package.count));
+
+		/* Output the entire contents of the package */
+
+		for (i = 0; i < source_desc->package.count; i++) {
+			acpi_ex_do_debug_object(source_desc->package.
+						elements[i], level + 4, i + 1);
+		}
+		break;
+
+	case ACPI_TYPE_LOCAL_REFERENCE:
+
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s] ",
+				      acpi_ut_get_reference_name(source_desc)));
+
+		/* Decode the reference */
+
+		switch (source_desc->reference.class) {
+		case ACPI_REFCLASS_INDEX:
+
+			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%X\n",
+					      source_desc->reference.value));
+			break;
+
+		case ACPI_REFCLASS_TABLE:
+
+			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+					      "Table Index 0x%X\n",
+					      source_desc->reference.value));
+			break;
+
+		default:
+			break;
+		}
+
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "  "));
+
+		/* Check for valid node first, then valid object */
+
+		if (source_desc->reference.node) {
+			if (ACPI_GET_DESCRIPTOR_TYPE
+			    (source_desc->reference.node) !=
+			    ACPI_DESC_TYPE_NAMED) {
+				ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+						      " %p - Not a valid namespace node\n",
+						      source_desc->reference.
+						      node));
+			} else {
+				ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+						      "Node %p [%4.4s] ",
+						      source_desc->reference.
+						      node,
+						      (source_desc->reference.
+						       node)->name.ascii));
+
+				switch ((source_desc->reference.node)->type) {
+
+					/* These types have no attached object */
+
+				case ACPI_TYPE_DEVICE:
+					acpi_os_printf("Device\n");
+					break;
+
+				case ACPI_TYPE_THERMAL:
+					acpi_os_printf("Thermal Zone\n");
+					break;
+
+				default:
+					acpi_ex_do_debug_object((source_desc->
+								 reference.
+								 node)->object,
+								level + 4, 0);
+					break;
+				}
+			}
+		} else if (source_desc->reference.object) {
+			if (ACPI_GET_DESCRIPTOR_TYPE
+			    (source_desc->reference.object) ==
+			    ACPI_DESC_TYPE_NAMED) {
+				acpi_ex_do_debug_object(((struct
+							  acpi_namespace_node *)
+							 source_desc->reference.
+							 object)->object,
+							level + 4, 0);
+			} else {
+				acpi_ex_do_debug_object(source_desc->reference.
+							object, level + 4, 0);
+			}
+		}
+		break;
+
+	default:
+
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n",
+				      source_desc));
+		break;
+	}
+
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_store
+ *
+ * PARAMETERS:  *source_desc        - Value to be stored
+ *              *dest_desc          - Where to store it.  Must be an NS node
+ *                                    or a union acpi_operand_object of type
+ *                                    Reference;
+ *              walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Store the value described by source_desc into the location
+ *              described by dest_desc. Called by various interpreter
+ *              functions to store the result of an operation into
+ *              the destination operand -- not just simply the actual "Store"
+ *              ASL operator.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_store(union acpi_operand_object *source_desc,
+	      union acpi_operand_object *dest_desc,
+	      struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object *ref_desc = dest_desc;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_store, dest_desc);
+
+	/* Validate parameters */
+
+	if (!source_desc || !dest_desc) {
+		ACPI_ERROR((AE_INFO, "Null parameter"));
+		return_ACPI_STATUS(AE_AML_NO_OPERAND);
+	}
+
+	/* dest_desc can be either a namespace node or an ACPI object */
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(dest_desc) == ACPI_DESC_TYPE_NAMED) {
+		/*
+		 * Dest is a namespace node,
+		 * Storing an object into a Named node.
+		 */
+		status = acpi_ex_store_object_to_node(source_desc,
+						      (struct
+						       acpi_namespace_node *)
+						      dest_desc, walk_state,
+						      ACPI_IMPLICIT_CONVERSION);
+
+		return_ACPI_STATUS(status);
+	}
+
+	/* Destination object must be a Reference or a Constant object */
+
+	switch (ACPI_GET_OBJECT_TYPE(dest_desc)) {
+	case ACPI_TYPE_LOCAL_REFERENCE:
+		break;
+
+	case ACPI_TYPE_INTEGER:
+
+		/* Allow stores to Constants -- a Noop as per ACPI spec */
+
+		if (dest_desc->common.flags & AOPOBJ_AML_CONSTANT) {
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		/*lint -fallthrough */
+
+	default:
+
+		/* Destination is not a Reference object */
+
+		ACPI_ERROR((AE_INFO,
+			    "Target is not a Reference or Constant object - %s [%p]",
+			    acpi_ut_get_object_type_name(dest_desc),
+			    dest_desc));
+
+		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+	}
+
+	/*
+	 * Examine the Reference class. These cases are handled:
+	 *
+	 * 1) Store to Name (Change the object associated with a name)
+	 * 2) Store to an indexed area of a Buffer or Package
+	 * 3) Store to a Method Local or Arg
+	 * 4) Store to the debug object
+	 */
+	switch (ref_desc->reference.class) {
+	case ACPI_REFCLASS_REFOF:
+
+		/* Storing an object into a Name "container" */
+
+		status = acpi_ex_store_object_to_node(source_desc,
+						      ref_desc->reference.
+						      object, walk_state,
+						      ACPI_IMPLICIT_CONVERSION);
+		break;
+
+	case ACPI_REFCLASS_INDEX:
+
+		/* Storing to an Index (pointer into a packager or buffer) */
+
+		status =
+		    acpi_ex_store_object_to_index(source_desc, ref_desc,
+						  walk_state);
+		break;
+
+	case ACPI_REFCLASS_LOCAL:
+	case ACPI_REFCLASS_ARG:
+
+		/* Store to a method local/arg  */
+
+		status =
+		    acpi_ds_store_object_to_local(ref_desc->reference.class,
+						  ref_desc->reference.value,
+						  source_desc, walk_state);
+		break;
+
+	case ACPI_REFCLASS_DEBUG:
+
+		/*
+		 * Storing to the Debug object causes the value stored to be
+		 * displayed and otherwise has no effect -- see ACPI Specification
+		 */
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "**** Write to Debug Object: Object %p %s ****:\n\n",
+				  source_desc,
+				  acpi_ut_get_object_type_name(source_desc)));
+
+		acpi_ex_do_debug_object(source_desc, 0, 0);
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+			    ref_desc->reference.class));
+		ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
+
+		status = AE_AML_INTERNAL;
+		break;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_store_object_to_index
+ *
+ * PARAMETERS:  *source_desc            - Value to be stored
+ *              *dest_desc              - Named object to receive the value
+ *              walk_state              - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Store the object to indexed Buffer or Package element
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
+			      union acpi_operand_object *index_desc,
+			      struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *new_desc;
+	u8 value = 0;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ex_store_object_to_index);
+
+	/*
+	 * Destination must be a reference pointer, and
+	 * must point to either a buffer or a package
+	 */
+	switch (index_desc->reference.target_type) {
+	case ACPI_TYPE_PACKAGE:
+		/*
+		 * Storing to a package element. Copy the object and replace
+		 * any existing object with the new object. No implicit
+		 * conversion is performed.
+		 *
+		 * The object at *(index_desc->Reference.Where) is the
+		 * element within the package that is to be modified.
+		 * The parent package object is at index_desc->Reference.Object
+		 */
+		obj_desc = *(index_desc->reference.where);
+
+		if (ACPI_GET_OBJECT_TYPE(source_desc) ==
+		    ACPI_TYPE_LOCAL_REFERENCE
+		    && source_desc->reference.class == ACPI_REFCLASS_TABLE) {
+
+			/* This is a DDBHandle, just add a reference to it */
+
+			acpi_ut_add_reference(source_desc);
+			new_desc = source_desc;
+		} else {
+			/* Normal object, copy it */
+
+			status =
+			    acpi_ut_copy_iobject_to_iobject(source_desc,
+							    &new_desc,
+							    walk_state);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		}
+
+		if (obj_desc) {
+
+			/* Decrement reference count by the ref count of the parent package */
+
+			for (i = 0; i < ((union acpi_operand_object *)
+					 index_desc->reference.object)->common.
+			     reference_count; i++) {
+				acpi_ut_remove_reference(obj_desc);
+			}
+		}
+
+		*(index_desc->reference.where) = new_desc;
+
+		/* Increment ref count by the ref count of the parent package-1 */
+
+		for (i = 1; i < ((union acpi_operand_object *)
+				 index_desc->reference.object)->common.
+		     reference_count; i++) {
+			acpi_ut_add_reference(new_desc);
+		}
+
+		break;
+
+	case ACPI_TYPE_BUFFER_FIELD:
+
+		/*
+		 * Store into a Buffer or String (not actually a real buffer_field)
+		 * at a location defined by an Index.
+		 *
+		 * The first 8-bit element of the source object is written to the
+		 * 8-bit Buffer location defined by the Index destination object,
+		 * according to the ACPI 2.0 specification.
+		 */
+
+		/*
+		 * Make sure the target is a Buffer or String. An error should
+		 * not happen here, since the reference_object was constructed
+		 * by the INDEX_OP code.
+		 */
+		obj_desc = index_desc->reference.object;
+		if ((ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_BUFFER) &&
+		    (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_STRING)) {
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+
+		/*
+		 * The assignment of the individual elements will be slightly
+		 * different for each source type.
+		 */
+		switch (ACPI_GET_OBJECT_TYPE(source_desc)) {
+		case ACPI_TYPE_INTEGER:
+
+			/* Use the least-significant byte of the integer */
+
+			value = (u8) (source_desc->integer.value);
+			break;
+
+		case ACPI_TYPE_BUFFER:
+		case ACPI_TYPE_STRING:
+
+			/* Note: Takes advantage of common string/buffer fields */
+
+			value = source_desc->buffer.pointer[0];
+			break;
+
+		default:
+
+			/* All other types are invalid */
+
+			ACPI_ERROR((AE_INFO,
+				    "Source must be Integer/Buffer/String type, not %s",
+				    acpi_ut_get_object_type_name(source_desc)));
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+
+		/* Store the source value into the target buffer byte */
+
+		obj_desc->buffer.pointer[index_desc->reference.value] = value;
+		break;
+
+	default:
+		ACPI_ERROR((AE_INFO, "Target is not a Package or BufferField"));
+		status = AE_AML_OPERAND_TYPE;
+		break;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_store_object_to_node
+ *
+ * PARAMETERS:  source_desc             - Value to be stored
+ *              Node                    - Named object to receive the value
+ *              walk_state              - Current walk state
+ *              implicit_conversion     - Perform implicit conversion (yes/no)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Store the object to the named object.
+ *
+ *              The Assignment of an object to a named object is handled here
+ *              The value passed in will replace the current value (if any)
+ *              with the input value.
+ *
+ *              When storing into an object the data is converted to the
+ *              target object type then stored in the object.  This means
+ *              that the target object type (for an initialized target) will
+ *              not be changed by a store operation.
+ *
+ *              Assumes parameters are already validated.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
+			     struct acpi_namespace_node *node,
+			     struct acpi_walk_state *walk_state,
+			     u8 implicit_conversion)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object *target_desc;
+	union acpi_operand_object *new_desc;
+	acpi_object_type target_type;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_node, source_desc);
+
+	/* Get current type of the node, and object attached to Node */
+
+	target_type = acpi_ns_get_type(node);
+	target_desc = acpi_ns_get_attached_object(node);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p(%s) into node %p(%s)\n",
+			  source_desc,
+			  acpi_ut_get_object_type_name(source_desc), node,
+			  acpi_ut_get_type_name(target_type)));
+
+	/*
+	 * Resolve the source object to an actual value
+	 * (If it is a reference object)
+	 */
+	status = acpi_ex_resolve_object(&source_desc, target_type, walk_state);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* If no implicit conversion, drop into the default case below */
+
+	if ((!implicit_conversion) ||
+	    ((walk_state->opcode == AML_COPY_OP) &&
+	     (target_type != ACPI_TYPE_LOCAL_REGION_FIELD) &&
+	     (target_type != ACPI_TYPE_LOCAL_BANK_FIELD) &&
+	     (target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) {
+		/*
+		 * Force execution of default (no implicit conversion). Note:
+		 * copy_object does not perform an implicit conversion, as per the ACPI
+		 * spec -- except in case of region/bank/index fields -- because these
+		 * objects must retain their original type permanently.
+		 */
+		target_type = ACPI_TYPE_ANY;
+	}
+
+	/* Do the actual store operation */
+
+	switch (target_type) {
+	case ACPI_TYPE_BUFFER_FIELD:
+	case ACPI_TYPE_LOCAL_REGION_FIELD:
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+	case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+		/* For fields, copy the source data to the target field. */
+
+		status = acpi_ex_write_data_to_field(source_desc, target_desc,
+						     &walk_state->result_obj);
+		break;
+
+	case ACPI_TYPE_INTEGER:
+	case ACPI_TYPE_STRING:
+	case ACPI_TYPE_BUFFER:
+
+		/*
+		 * These target types are all of type Integer/String/Buffer, and
+		 * therefore support implicit conversion before the store.
+		 *
+		 * Copy and/or convert the source object to a new target object
+		 */
+		status =
+		    acpi_ex_store_object_to_object(source_desc, target_desc,
+						   &new_desc, walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		if (new_desc != target_desc) {
+			/*
+			 * Store the new new_desc as the new value of the Name, and set
+			 * the Name's type to that of the value being stored in it.
+			 * source_desc reference count is incremented by attach_object.
+			 *
+			 * Note: This may change the type of the node if an explicit store
+			 * has been performed such that the node/object type has been
+			 * changed.
+			 */
+			status =
+			    acpi_ns_attach_object(node, new_desc,
+						  new_desc->common.type);
+
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+					  "Store %s into %s via Convert/Attach\n",
+					  acpi_ut_get_object_type_name
+					  (source_desc),
+					  acpi_ut_get_object_type_name
+					  (new_desc)));
+		}
+		break;
+
+	default:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Storing %s (%p) directly into node (%p) with no implicit conversion\n",
+				  acpi_ut_get_object_type_name(source_desc),
+				  source_desc, node));
+
+		/* No conversions for all other types.  Just attach the source object */
+
+		status = acpi_ns_attach_object(node, source_desc,
+					       ACPI_GET_OBJECT_TYPE
+					       (source_desc));
+		break;
+	}
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
new file mode 100644
index 0000000..145d153
--- /dev/null
+++ b/drivers/acpi/acpica/exstoren.c
@@ -0,0 +1,304 @@
+
+/******************************************************************************
+ *
+ * Module Name: exstoren - AML Interpreter object store support,
+ *                        Store to Node (namespace object)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exstoren")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_resolve_object
+ *
+ * PARAMETERS:  source_desc_ptr     - Pointer to the source object
+ *              target_type         - Current type of the target
+ *              walk_state          - Current walk state
+ *
+ * RETURN:      Status, resolved object in source_desc_ptr.
+ *
+ * DESCRIPTION: Resolve an object.  If the object is a reference, dereference
+ *              it and return the actual object in the source_desc_ptr.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
+		       acpi_object_type target_type,
+		       struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object *source_desc = *source_desc_ptr;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ex_resolve_object);
+
+	/* Ensure we have a Target that can be stored to */
+
+	switch (target_type) {
+	case ACPI_TYPE_BUFFER_FIELD:
+	case ACPI_TYPE_LOCAL_REGION_FIELD:
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+	case ACPI_TYPE_LOCAL_INDEX_FIELD:
+		/*
+		 * These cases all require only Integers or values that
+		 * can be converted to Integers (Strings or Buffers)
+		 */
+
+	case ACPI_TYPE_INTEGER:
+	case ACPI_TYPE_STRING:
+	case ACPI_TYPE_BUFFER:
+
+		/*
+		 * Stores into a Field/Region or into a Integer/Buffer/String
+		 * are all essentially the same.  This case handles the
+		 * "interchangeable" types Integer, String, and Buffer.
+		 */
+		if (ACPI_GET_OBJECT_TYPE(source_desc) ==
+		    ACPI_TYPE_LOCAL_REFERENCE) {
+
+			/* Resolve a reference object first */
+
+			status =
+			    acpi_ex_resolve_to_value(source_desc_ptr,
+						     walk_state);
+			if (ACPI_FAILURE(status)) {
+				break;
+			}
+		}
+
+		/* For copy_object, no further validation necessary */
+
+		if (walk_state->opcode == AML_COPY_OP) {
+			break;
+		}
+
+		/* Must have a Integer, Buffer, or String */
+
+		if ((ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_INTEGER) &&
+		    (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) &&
+		    (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_STRING) &&
+		    !((ACPI_GET_OBJECT_TYPE(source_desc) ==
+		       ACPI_TYPE_LOCAL_REFERENCE)
+		      && (source_desc->reference.class ==
+			  ACPI_REFCLASS_TABLE))) {
+
+			/* Conversion successful but still not a valid type */
+
+			ACPI_ERROR((AE_INFO,
+				    "Cannot assign type %s to %s (must be type Int/Str/Buf)",
+				    acpi_ut_get_object_type_name(source_desc),
+				    acpi_ut_get_type_name(target_type)));
+			status = AE_AML_OPERAND_TYPE;
+		}
+		break;
+
+	case ACPI_TYPE_LOCAL_ALIAS:
+	case ACPI_TYPE_LOCAL_METHOD_ALIAS:
+
+		/*
+		 * All aliases should have been resolved earlier, during the
+		 * operand resolution phase.
+		 */
+		ACPI_ERROR((AE_INFO, "Store into an unresolved Alias object"));
+		status = AE_AML_INTERNAL;
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+	default:
+
+		/*
+		 * All other types than Alias and the various Fields come here,
+		 * including the untyped case - ACPI_TYPE_ANY.
+		 */
+		break;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_store_object_to_object
+ *
+ * PARAMETERS:  source_desc         - Object to store
+ *              dest_desc           - Object to receive a copy of the source
+ *              new_desc            - New object if dest_desc is obsoleted
+ *              walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: "Store" an object to another object.  This may include
+ *              converting the source type to the target type (implicit
+ *              conversion), and a copy of the value of the source to
+ *              the target.
+ *
+ *              The Assignment of an object to another (not named) object
+ *              is handled here.
+ *              The Source passed in will replace the current value (if any)
+ *              with the input value.
+ *
+ *              When storing into an object the data is converted to the
+ *              target object type then stored in the object.  This means
+ *              that the target object type (for an initialized target) will
+ *              not be changed by a store operation.
+ *
+ *              This module allows destination types of Number, String,
+ *              Buffer, and Package.
+ *
+ *              Assumes parameters are already validated.  NOTE: source_desc
+ *              resolution (from a reference object) must be performed by
+ *              the caller if necessary.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
+			       union acpi_operand_object *dest_desc,
+			       union acpi_operand_object **new_desc,
+			       struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object *actual_src_desc;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_object, source_desc);
+
+	actual_src_desc = source_desc;
+	if (!dest_desc) {
+		/*
+		 * There is no destination object (An uninitialized node or
+		 * package element), so we can simply copy the source object
+		 * creating a new destination object
+		 */
+		status =
+		    acpi_ut_copy_iobject_to_iobject(actual_src_desc, new_desc,
+						    walk_state);
+		return_ACPI_STATUS(status);
+	}
+
+	if (ACPI_GET_OBJECT_TYPE(source_desc) !=
+	    ACPI_GET_OBJECT_TYPE(dest_desc)) {
+		/*
+		 * The source type does not match the type of the destination.
+		 * Perform the "implicit conversion" of the source to the current type
+		 * of the target as per the ACPI specification.
+		 *
+		 * If no conversion performed, actual_src_desc = source_desc.
+		 * Otherwise, actual_src_desc is a temporary object to hold the
+		 * converted object.
+		 */
+		status =
+		    acpi_ex_convert_to_target_type(ACPI_GET_OBJECT_TYPE
+						   (dest_desc), source_desc,
+						   &actual_src_desc,
+						   walk_state);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		if (source_desc == actual_src_desc) {
+			/*
+			 * No conversion was performed. Return the source_desc as the
+			 * new object.
+			 */
+			*new_desc = source_desc;
+			return_ACPI_STATUS(AE_OK);
+		}
+	}
+
+	/*
+	 * We now have two objects of identical types, and we can perform a
+	 * copy of the *value* of the source object.
+	 */
+	switch (ACPI_GET_OBJECT_TYPE(dest_desc)) {
+	case ACPI_TYPE_INTEGER:
+
+		dest_desc->integer.value = actual_src_desc->integer.value;
+
+		/* Truncate value if we are executing from a 32-bit ACPI table */
+
+		acpi_ex_truncate_for32bit_table(dest_desc);
+		break;
+
+	case ACPI_TYPE_STRING:
+
+		status =
+		    acpi_ex_store_string_to_string(actual_src_desc, dest_desc);
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		status =
+		    acpi_ex_store_buffer_to_buffer(actual_src_desc, dest_desc);
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+
+		status =
+		    acpi_ut_copy_iobject_to_iobject(actual_src_desc, &dest_desc,
+						    walk_state);
+		break;
+
+	default:
+		/*
+		 * All other types come here.
+		 */
+		ACPI_WARNING((AE_INFO, "Store into type %s not implemented",
+			      acpi_ut_get_object_type_name(dest_desc)));
+
+		status = AE_NOT_IMPLEMENTED;
+		break;
+	}
+
+	if (actual_src_desc != source_desc) {
+
+		/* Delete the intermediate (temporary) source object */
+
+		acpi_ut_remove_reference(actual_src_desc);
+	}
+
+	*new_desc = dest_desc;
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
new file mode 100644
index 0000000..67340cc
--- /dev/null
+++ b/drivers/acpi/acpica/exstorob.c
@@ -0,0 +1,209 @@
+
+/******************************************************************************
+ *
+ * Module Name: exstorob - AML Interpreter object store support, store to object
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exstorob")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_store_buffer_to_buffer
+ *
+ * PARAMETERS:  source_desc         - Source object to copy
+ *              target_desc         - Destination object of the copy
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Copy a buffer object to another buffer object.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
+			       union acpi_operand_object *target_desc)
+{
+	u32 length;
+	u8 *buffer;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc);
+
+	/* We know that source_desc is a buffer by now */
+
+	buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer);
+	length = source_desc->buffer.length;
+
+	/*
+	 * If target is a buffer of length zero or is a static buffer,
+	 * allocate a new buffer of the proper length
+	 */
+	if ((target_desc->buffer.length == 0) ||
+	    (target_desc->common.flags & AOPOBJ_STATIC_POINTER)) {
+		target_desc->buffer.pointer = ACPI_ALLOCATE(length);
+		if (!target_desc->buffer.pointer) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		target_desc->buffer.length = length;
+	}
+
+	/* Copy source buffer to target buffer */
+
+	if (length <= target_desc->buffer.length) {
+
+		/* Clear existing buffer and copy in the new one */
+
+		ACPI_MEMSET(target_desc->buffer.pointer, 0,
+			    target_desc->buffer.length);
+		ACPI_MEMCPY(target_desc->buffer.pointer, buffer, length);
+
+#ifdef ACPI_OBSOLETE_BEHAVIOR
+		/*
+		 * NOTE: ACPI versions up to 3.0 specified that the buffer must be
+		 * truncated if the string is smaller than the buffer.  However, "other"
+		 * implementations of ACPI never did this and thus became the defacto
+		 * standard. ACPI 3.0_a changes this behavior such that the buffer
+		 * is no longer truncated.
+		 */
+
+		/*
+		 * OBSOLETE BEHAVIOR:
+		 * If the original source was a string, we must truncate the buffer,
+		 * according to the ACPI spec.  Integer-to-Buffer and Buffer-to-Buffer
+		 * copy must not truncate the original buffer.
+		 */
+		if (original_src_type == ACPI_TYPE_STRING) {
+
+			/* Set the new length of the target */
+
+			target_desc->buffer.length = length;
+		}
+#endif
+	} else {
+		/* Truncate the source, copy only what will fit */
+
+		ACPI_MEMCPY(target_desc->buffer.pointer, buffer,
+			    target_desc->buffer.length);
+
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "Truncating source buffer from %X to %X\n",
+				  length, target_desc->buffer.length));
+	}
+
+	/* Copy flags */
+
+	target_desc->buffer.flags = source_desc->buffer.flags;
+	target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_store_string_to_string
+ *
+ * PARAMETERS:  source_desc         - Source object to copy
+ *              target_desc         - Destination object of the copy
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Copy a String object to another String object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
+			       union acpi_operand_object *target_desc)
+{
+	u32 length;
+	u8 *buffer;
+
+	ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc);
+
+	/* We know that source_desc is a string by now */
+
+	buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer);
+	length = source_desc->string.length;
+
+	/*
+	 * Replace existing string value if it will fit and the string
+	 * pointer is not a static pointer (part of an ACPI table)
+	 */
+	if ((length < target_desc->string.length) &&
+	    (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) {
+		/*
+		 * String will fit in existing non-static buffer.
+		 * Clear old string and copy in the new one
+		 */
+		ACPI_MEMSET(target_desc->string.pointer, 0,
+			    (acpi_size) target_desc->string.length + 1);
+		ACPI_MEMCPY(target_desc->string.pointer, buffer, length);
+	} else {
+		/*
+		 * Free the current buffer, then allocate a new buffer
+		 * large enough to hold the value
+		 */
+		if (target_desc->string.pointer &&
+		    (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) {
+
+			/* Only free if not a pointer into the DSDT */
+
+			ACPI_FREE(target_desc->string.pointer);
+		}
+
+		target_desc->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size)
+								   length + 1);
+		if (!target_desc->string.pointer) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
+		ACPI_MEMCPY(target_desc->string.pointer, buffer, length);
+	}
+
+	/* Set the new target length */
+
+	target_desc->string.length = length;
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
new file mode 100644
index 0000000..3d00b93
--- /dev/null
+++ b/drivers/acpi/acpica/exsystem.c
@@ -0,0 +1,303 @@
+
+/******************************************************************************
+ *
+ * Module Name: exsystem - Interface to OS services
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exsystem")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_system_wait_semaphore
+ *
+ * PARAMETERS:  Semaphore       - Semaphore to wait on
+ *              Timeout         - Max time to wait
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Implements a semaphore wait with a check to see if the
+ *              semaphore is available immediately.  If it is not, the
+ *              interpreter is released before waiting.
+ *
+ ******************************************************************************/
+acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
+
+	status = acpi_os_wait_semaphore(semaphore, 1, ACPI_DO_NOT_WAIT);
+	if (ACPI_SUCCESS(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	if (status == AE_TIME) {
+
+		/* We must wait, so unlock the interpreter */
+
+		acpi_ex_relinquish_interpreter();
+
+		status = acpi_os_wait_semaphore(semaphore, 1, timeout);
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "*** Thread awake after blocking, %s\n",
+				  acpi_format_exception(status)));
+
+		/* Reacquire the interpreter */
+
+		acpi_ex_reacquire_interpreter();
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_system_wait_mutex
+ *
+ * PARAMETERS:  Mutex           - Mutex to wait on
+ *              Timeout         - Max time to wait
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Implements a mutex wait with a check to see if the
+ *              mutex is available immediately.  If it is not, the
+ *              interpreter is released before waiting.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
+
+	status = acpi_os_acquire_mutex(mutex, ACPI_DO_NOT_WAIT);
+	if (ACPI_SUCCESS(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	if (status == AE_TIME) {
+
+		/* We must wait, so unlock the interpreter */
+
+		acpi_ex_relinquish_interpreter();
+
+		status = acpi_os_acquire_mutex(mutex, timeout);
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "*** Thread awake after blocking, %s\n",
+				  acpi_format_exception(status)));
+
+		/* Reacquire the interpreter */
+
+		acpi_ex_reacquire_interpreter();
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_system_do_stall
+ *
+ * PARAMETERS:  how_long        - The amount of time to stall,
+ *                                in microseconds
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Suspend running thread for specified amount of time.
+ *              Note: ACPI specification requires that Stall() does not
+ *              relinquish the processor, and delays longer than 100 usec
+ *              should use Sleep() instead.  We allow stalls up to 255 usec
+ *              for compatibility with other interpreters and existing BIOSs.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_system_do_stall(u32 how_long)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_ENTRY();
+
+	if (how_long > 255) {	/* 255 microseconds */
+		/*
+		 * Longer than 255 usec, this is an error
+		 *
+		 * (ACPI specifies 100 usec as max, but this gives some slack in
+		 * order to support existing BIOSs)
+		 */
+		ACPI_ERROR((AE_INFO, "Time parameter is too large (%d)",
+			    how_long));
+		status = AE_AML_OPERAND_VALUE;
+	} else {
+		acpi_os_stall(how_long);
+	}
+
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_system_do_suspend
+ *
+ * PARAMETERS:  how_long        - The amount of time to suspend,
+ *                                in milliseconds
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Suspend running thread for specified amount of time.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	/* Since this thread will sleep, we must release the interpreter */
+
+	acpi_ex_relinquish_interpreter();
+
+	acpi_os_sleep(how_long);
+
+	/* And now we must get the interpreter again */
+
+	acpi_ex_reacquire_interpreter();
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_system_signal_event
+ *
+ * PARAMETERS:  obj_desc        - The object descriptor for this op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Provides an access point to perform synchronization operations
+ *              within the AML.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ex_system_signal_event);
+
+	if (obj_desc) {
+		status =
+		    acpi_os_signal_semaphore(obj_desc->event.os_semaphore, 1);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_system_wait_event
+ *
+ * PARAMETERS:  time_desc       - The 'time to delay' object descriptor
+ *              obj_desc        - The object descriptor for this op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Provides an access point to perform synchronization operations
+ *              within the AML.  This operation is a request to wait for an
+ *              event.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_system_wait_event(union acpi_operand_object *time_desc,
+			  union acpi_operand_object *obj_desc)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ex_system_wait_event);
+
+	if (obj_desc) {
+		status =
+		    acpi_ex_system_wait_semaphore(obj_desc->event.os_semaphore,
+						  (u16) time_desc->integer.
+						  value);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_system_reset_event
+ *
+ * PARAMETERS:  obj_desc        - The object descriptor for this op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Reset an event to a known state.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc)
+{
+	acpi_status status = AE_OK;
+	acpi_semaphore temp_semaphore;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * We are going to simply delete the existing semaphore and
+	 * create a new one!
+	 */
+	status =
+	    acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &temp_semaphore);
+	if (ACPI_SUCCESS(status)) {
+		(void)acpi_os_delete_semaphore(obj_desc->event.os_semaphore);
+		obj_desc->event.os_semaphore = temp_semaphore;
+	}
+
+	return (status);
+}
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
new file mode 100644
index 0000000..32b85d6
--- /dev/null
+++ b/drivers/acpi/acpica/exutils.c
@@ -0,0 +1,421 @@
+
+/******************************************************************************
+ *
+ * Module Name: exutils - interpreter/scanner utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/*
+ * DEFINE_AML_GLOBALS is tested in amlcode.h
+ * to determine whether certain global names should be "defined" or only
+ * "declared" in the current compilation.  This enhances maintainability
+ * by enabling a single header file to embody all knowledge of the names
+ * in question.
+ *
+ * Exactly one module of any executable should #define DEFINE_GLOBALS
+ * before #including the header files which use this convention.  The
+ * names in question will be defined and initialized in that module,
+ * and declared as extern in all other modules which #include those
+ * header files.
+ */
+
+#define DEFINE_AML_GLOBALS
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exutils")
+
+/* Local prototypes */
+static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_enter_interpreter
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Enter the interpreter execution region. Failure to enter
+ *              the interpreter region is a fatal system error. Used in
+ *              conjunction with exit_interpreter.
+ *
+ ******************************************************************************/
+
+void acpi_ex_enter_interpreter(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ex_enter_interpreter);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not acquire AML Interpreter mutex"));
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_reacquire_interpreter
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Reacquire the interpreter execution region from within the
+ *              interpreter code. Failure to enter the interpreter region is a
+ *              fatal system error. Used in  conjuction with
+ *              relinquish_interpreter
+ *
+ ******************************************************************************/
+
+void acpi_ex_reacquire_interpreter(void)
+{
+	ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
+
+	/*
+	 * If the global serialized flag is set, do not release the interpreter,
+	 * since it was not actually released by acpi_ex_relinquish_interpreter.
+	 * This forces the interpreter to be single threaded.
+	 */
+	if (!acpi_gbl_all_methods_serialized) {
+		acpi_ex_enter_interpreter();
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_exit_interpreter
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Exit the interpreter execution region. This is the top level
+ *              routine used to exit the interpreter when all processing has
+ *              been completed.
+ *
+ ******************************************************************************/
+
+void acpi_ex_exit_interpreter(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ex_exit_interpreter);
+
+	status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not release AML Interpreter mutex"));
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_relinquish_interpreter
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Exit the interpreter execution region, from within the
+ *              interpreter - before attempting an operation that will possibly
+ *              block the running thread.
+ *
+ * Cases where the interpreter is unlocked internally
+ *      1) Method to be blocked on a Sleep() AML opcode
+ *      2) Method to be blocked on an Acquire() AML opcode
+ *      3) Method to be blocked on a Wait() AML opcode
+ *      4) Method to be blocked to acquire the global lock
+ *      5) Method to be blocked waiting to execute a serialized control method
+ *          that is currently executing
+ *      6) About to invoke a user-installed opregion handler
+ *
+ ******************************************************************************/
+
+void acpi_ex_relinquish_interpreter(void)
+{
+	ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
+
+	/*
+	 * If the global serialized flag is set, do not release the interpreter.
+	 * This forces the interpreter to be single threaded.
+	 */
+	if (!acpi_gbl_all_methods_serialized) {
+		acpi_ex_exit_interpreter();
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_truncate_for32bit_table
+ *
+ * PARAMETERS:  obj_desc        - Object to be truncated
+ *
+ * RETURN:      none
+ *
+ * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
+ *              32-bit, as determined by the revision of the DSDT.
+ *
+ ******************************************************************************/
+
+void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
+{
+
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * Object must be a valid number and we must be executing
+	 * a control method. NS node could be there for AML_INT_NAMEPATH_OP.
+	 */
+	if ((!obj_desc) ||
+	    (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) ||
+	    (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) {
+		return;
+	}
+
+	if (acpi_gbl_integer_byte_width == 4) {
+		/*
+		 * We are running a method that exists in a 32-bit ACPI table.
+		 * Truncate the value to 32 bits by zeroing out the upper 32-bit field
+		 */
+		obj_desc->integer.value &= (acpi_integer) ACPI_UINT32_MAX;
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_acquire_global_lock
+ *
+ * PARAMETERS:  field_flags           - Flags with Lock rule:
+ *                                      always_lock or never_lock
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Obtain the ACPI hardware Global Lock, only if the field
+ *              flags specifiy that it is to be obtained before field access.
+ *
+ ******************************************************************************/
+
+void acpi_ex_acquire_global_lock(u32 field_flags)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ex_acquire_global_lock);
+
+	/* Only use the lock if the always_lock bit is set */
+
+	if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
+		return_VOID;
+	}
+
+	/* Attempt to get the global lock, wait forever */
+
+	status = acpi_ex_acquire_mutex_object(ACPI_WAIT_FOREVER,
+					      acpi_gbl_global_lock_mutex,
+					      acpi_os_get_thread_id());
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Could not acquire Global Lock"));
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_release_global_lock
+ *
+ * PARAMETERS:  field_flags           - Flags with Lock rule:
+ *                                      always_lock or never_lock
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Release the ACPI hardware Global Lock
+ *
+ ******************************************************************************/
+
+void acpi_ex_release_global_lock(u32 field_flags)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ex_release_global_lock);
+
+	/* Only use the lock if the always_lock bit is set */
+
+	if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
+		return_VOID;
+	}
+
+	/* Release the global lock */
+
+	status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
+	if (ACPI_FAILURE(status)) {
+
+		/* Report the error, but there isn't much else we can do */
+
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Could not release Global Lock"));
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_digits_needed
+ *
+ * PARAMETERS:  Value           - Value to be represented
+ *              Base            - Base of representation
+ *
+ * RETURN:      The number of digits.
+ *
+ * DESCRIPTION: Calculate the number of digits needed to represent the Value
+ *              in the given Base (Radix)
+ *
+ ******************************************************************************/
+
+static u32 acpi_ex_digits_needed(acpi_integer value, u32 base)
+{
+	u32 num_digits;
+	acpi_integer current_value;
+
+	ACPI_FUNCTION_TRACE(ex_digits_needed);
+
+	/* acpi_integer is unsigned, so we don't worry about a '-' prefix */
+
+	if (value == 0) {
+		return_UINT32(1);
+	}
+
+	current_value = value;
+	num_digits = 0;
+
+	/* Count the digits in the requested base */
+
+	while (current_value) {
+		(void)acpi_ut_short_divide(current_value, base, &current_value,
+					   NULL);
+		num_digits++;
+	}
+
+	return_UINT32(num_digits);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_eisa_id_to_string
+ *
+ * PARAMETERS:  numeric_id      - EISA ID to be converted
+ *              out_string      - Where to put the converted string (8 bytes)
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Convert a numeric EISA ID to string representation
+ *
+ ******************************************************************************/
+
+void acpi_ex_eisa_id_to_string(u32 numeric_id, char *out_string)
+{
+	u32 eisa_id;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Swap ID to big-endian to get contiguous bits */
+
+	eisa_id = acpi_ut_dword_byte_swap(numeric_id);
+
+	out_string[0] = (char)('@' + (((unsigned long)eisa_id >> 26) & 0x1f));
+	out_string[1] = (char)('@' + ((eisa_id >> 21) & 0x1f));
+	out_string[2] = (char)('@' + ((eisa_id >> 16) & 0x1f));
+	out_string[3] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 12);
+	out_string[4] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 8);
+	out_string[5] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 4);
+	out_string[6] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 0);
+	out_string[7] = 0;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_unsigned_integer_to_string
+ *
+ * PARAMETERS:  Value           - Value to be converted
+ *              out_string      - Where to put the converted string (8 bytes)
+ *
+ * RETURN:      None, string
+ *
+ * DESCRIPTION: Convert a number to string representation. Assumes string
+ *              buffer is large enough to hold the string.
+ *
+ ******************************************************************************/
+
+void acpi_ex_unsigned_integer_to_string(acpi_integer value, char *out_string)
+{
+	u32 count;
+	u32 digits_needed;
+	u32 remainder;
+
+	ACPI_FUNCTION_ENTRY();
+
+	digits_needed = acpi_ex_digits_needed(value, 10);
+	out_string[digits_needed] = 0;
+
+	for (count = digits_needed; count > 0; count--) {
+		(void)acpi_ut_short_divide(value, 10, &value, &remainder);
+		out_string[count - 1] = (char)('0' + remainder);
+	}
+}
+
+#endif
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
new file mode 100644
index 0000000..a9d4fea
--- /dev/null
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -0,0 +1,185 @@
+
+/******************************************************************************
+ *
+ * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_HARDWARE
+ACPI_MODULE_NAME("hwacpi")
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_set_mode
+ *
+ * PARAMETERS:  Mode            - SYS_MODE_ACPI or SYS_MODE_LEGACY
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Transitions the system into the requested mode.
+ *
+ ******************************************************************************/
+acpi_status acpi_hw_set_mode(u32 mode)
+{
+
+	acpi_status status;
+	u32 retry;
+
+	ACPI_FUNCTION_TRACE(hw_set_mode);
+
+	/*
+	 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
+	 * system does not support mode transition.
+	 */
+	if (!acpi_gbl_FADT.smi_command) {
+		ACPI_ERROR((AE_INFO,
+			    "No SMI_CMD in FADT, mode transition failed"));
+		return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+	}
+
+	/*
+	 * ACPI 2.0 clarified the meaning of ACPI_ENABLE and ACPI_DISABLE
+	 * in FADT: If it is zero, enabling or disabling is not supported.
+	 * As old systems may have used zero for mode transition,
+	 * we make sure both the numbers are zero to determine these
+	 * transitions are not supported.
+	 */
+	if (!acpi_gbl_FADT.acpi_enable && !acpi_gbl_FADT.acpi_disable) {
+		ACPI_ERROR((AE_INFO,
+			    "No ACPI mode transition supported in this system (enable/disable both zero)"));
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	switch (mode) {
+	case ACPI_SYS_MODE_ACPI:
+
+		/* BIOS should have disabled ALL fixed and GP events */
+
+		status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+					    (u32) acpi_gbl_FADT.acpi_enable, 8);
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "Attempting to enable ACPI mode\n"));
+		break;
+
+	case ACPI_SYS_MODE_LEGACY:
+
+		/*
+		 * BIOS should clear all fixed status bits and restore fixed event
+		 * enable bits to default
+		 */
+		status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+					    (u32) acpi_gbl_FADT.acpi_disable,
+					    8);
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "Attempting to enable Legacy (non-ACPI) mode\n"));
+		break;
+
+	default:
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Could not write ACPI mode change"));
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Some hardware takes a LONG time to switch modes. Give them 3 sec to
+	 * do so, but allow faster systems to proceed more quickly.
+	 */
+	retry = 3000;
+	while (retry) {
+		if (acpi_hw_get_mode() == mode) {
+			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+					  "Mode %X successfully enabled\n",
+					  mode));
+			return_ACPI_STATUS(AE_OK);
+		}
+		acpi_os_stall(1000);
+		retry--;
+	}
+
+	ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
+	return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_get_mode
+ *
+ * PARAMETERS:  none
+ *
+ * RETURN:      SYS_MODE_ACPI or SYS_MODE_LEGACY
+ *
+ * DESCRIPTION: Return current operating state of system.  Determined by
+ *              querying the SCI_EN bit.
+ *
+ ******************************************************************************/
+
+u32 acpi_hw_get_mode(void)
+{
+	acpi_status status;
+	u32 value;
+
+	ACPI_FUNCTION_TRACE(hw_get_mode);
+
+	/*
+	 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
+	 * system does not support mode transition.
+	 */
+	if (!acpi_gbl_FADT.smi_command) {
+		return_UINT32(ACPI_SYS_MODE_ACPI);
+	}
+
+	status = acpi_get_register(ACPI_BITREG_SCI_ENABLE, &value);
+	if (ACPI_FAILURE(status)) {
+		return_UINT32(ACPI_SYS_MODE_LEGACY);
+	}
+
+	if (value) {
+		return_UINT32(ACPI_SYS_MODE_ACPI);
+	} else {
+		return_UINT32(ACPI_SYS_MODE_LEGACY);
+	}
+}
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
new file mode 100644
index 0000000..2013b66
--- /dev/null
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -0,0 +1,469 @@
+
+/******************************************************************************
+ *
+ * Module Name: hwgpe - Low level GPE enable/disable/clear functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+
+#define _COMPONENT          ACPI_HARDWARE
+ACPI_MODULE_NAME("hwgpe")
+
+/* Local prototypes */
+static acpi_status
+acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+				struct acpi_gpe_block_info *gpe_block,
+				void *context);
+
+/******************************************************************************
+ *
+ * FUNCTION:	acpi_hw_low_disable_gpe
+ *
+ * PARAMETERS:	gpe_event_info	    - Info block for the GPE to be disabled
+ *
+ * RETURN:	Status
+ *
+ * DESCRIPTION: Disable a single GPE in the enable register.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
+	struct acpi_gpe_register_info *gpe_register_info;
+	acpi_status status;
+	u32 enable_mask;
+
+	/* Get the info block for the entire GPE register */
+
+	gpe_register_info = gpe_event_info->register_info;
+	if (!gpe_register_info) {
+		return (AE_NOT_EXIST);
+	}
+
+	/* Get current value of the enable register that contains this GPE */
+
+	status = acpi_read(&enable_mask, &gpe_register_info->enable_address);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* Clear just the bit that corresponds to this GPE */
+
+	ACPI_CLEAR_BIT(enable_mask,
+		       ((u32) 1 <<
+			(gpe_event_info->gpe_number -
+			 gpe_register_info->base_gpe_number)));
+
+	/* Write the updated enable mask */
+
+	status = acpi_write(enable_mask, &gpe_register_info->enable_address);
+	return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_write_gpe_enable_reg
+ *
+ * PARAMETERS:  gpe_event_info      - Info block for the GPE to be enabled
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Write a GPE enable register.  Note: The bit for this GPE must
+ *              already be cleared or set in the parent register
+ *              enable_for_run mask.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
+{
+	struct acpi_gpe_register_info *gpe_register_info;
+	acpi_status status;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Get the info block for the entire GPE register */
+
+	gpe_register_info = gpe_event_info->register_info;
+	if (!gpe_register_info) {
+		return (AE_NOT_EXIST);
+	}
+
+	/* Write the entire GPE (runtime) enable register */
+
+	status = acpi_write(gpe_register_info->enable_for_run,
+			    &gpe_register_info->enable_address);
+
+	return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_clear_gpe
+ *
+ * PARAMETERS:  gpe_event_info      - Info block for the GPE to be cleared
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clear the status bit for a single GPE.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
+{
+	acpi_status status;
+	u8 register_bit;
+
+	ACPI_FUNCTION_ENTRY();
+
+	register_bit = (u8)
+	    (1 <<
+	     (gpe_event_info->gpe_number -
+	      gpe_event_info->register_info->base_gpe_number));
+
+	/*
+	 * Write a one to the appropriate bit in the status register to
+	 * clear this GPE.
+	 */
+	status = acpi_write(register_bit,
+			    &gpe_event_info->register_info->status_address);
+
+	return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_get_gpe_status
+ *
+ * PARAMETERS:  gpe_event_info      - Info block for the GPE to queried
+ *              event_status        - Where the GPE status is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Return the status of a single GPE.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
+		       acpi_event_status * event_status)
+{
+	u32 in_byte;
+	u8 register_bit;
+	struct acpi_gpe_register_info *gpe_register_info;
+	acpi_status status;
+	acpi_event_status local_event_status = 0;
+
+	ACPI_FUNCTION_ENTRY();
+
+	if (!event_status) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/* Get the info block for the entire GPE register */
+
+	gpe_register_info = gpe_event_info->register_info;
+
+	/* Get the register bitmask for this GPE */
+
+	register_bit = (u8)
+	    (1 <<
+	     (gpe_event_info->gpe_number -
+	      gpe_event_info->register_info->base_gpe_number));
+
+	/* GPE currently enabled? (enabled for runtime?) */
+
+	if (register_bit & gpe_register_info->enable_for_run) {
+		local_event_status |= ACPI_EVENT_FLAG_ENABLED;
+	}
+
+	/* GPE enabled for wake? */
+
+	if (register_bit & gpe_register_info->enable_for_wake) {
+		local_event_status |= ACPI_EVENT_FLAG_WAKE_ENABLED;
+	}
+
+	/* GPE currently active (status bit == 1)? */
+
+	status = acpi_read(&in_byte, &gpe_register_info->status_address);
+	if (ACPI_FAILURE(status)) {
+		goto unlock_and_exit;
+	}
+
+	if (register_bit & in_byte) {
+		local_event_status |= ACPI_EVENT_FLAG_SET;
+	}
+
+	/* Set return value */
+
+	(*event_status) = local_event_status;
+
+      unlock_and_exit:
+	return (status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_disable_gpe_block
+ *
+ * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
+ *              gpe_block           - Gpe Block info
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Disable all GPEs within a single GPE block
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+			  struct acpi_gpe_block_info *gpe_block, void *context)
+{
+	u32 i;
+	acpi_status status;
+
+	/* Examine each GPE Register within the block */
+
+	for (i = 0; i < gpe_block->register_count; i++) {
+
+		/* Disable all GPEs in this register */
+
+		status =
+		    acpi_write(0x00,
+			       &gpe_block->register_info[i].enable_address);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+	}
+
+	return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_clear_gpe_block
+ *
+ * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
+ *              gpe_block           - Gpe Block info
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clear status bits for all GPEs within a single GPE block
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+			struct acpi_gpe_block_info *gpe_block, void *context)
+{
+	u32 i;
+	acpi_status status;
+
+	/* Examine each GPE Register within the block */
+
+	for (i = 0; i < gpe_block->register_count; i++) {
+
+		/* Clear status on all GPEs in this register */
+
+		status =
+		    acpi_write(0xFF,
+			       &gpe_block->register_info[i].status_address);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+	}
+
+	return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_enable_runtime_gpe_block
+ *
+ * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
+ *              gpe_block           - Gpe Block info
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable all "runtime" GPEs within a single GPE block. Includes
+ *              combination wake/run GPEs.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+				 struct acpi_gpe_block_info *gpe_block, void *context)
+{
+	u32 i;
+	acpi_status status;
+
+	/* NOTE: assumes that all GPEs are currently disabled */
+
+	/* Examine each GPE Register within the block */
+
+	for (i = 0; i < gpe_block->register_count; i++) {
+		if (!gpe_block->register_info[i].enable_for_run) {
+			continue;
+		}
+
+		/* Enable all "runtime" GPEs in this register */
+
+		status = acpi_write(gpe_block->register_info[i].enable_for_run,
+				    &gpe_block->register_info[i].
+				    enable_address);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+	}
+
+	return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_enable_wakeup_gpe_block
+ *
+ * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
+ *              gpe_block           - Gpe Block info
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable all "wake" GPEs within a single GPE block. Includes
+ *              combination wake/run GPEs.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+				struct acpi_gpe_block_info *gpe_block,
+				void *context)
+{
+	u32 i;
+	acpi_status status;
+
+	/* Examine each GPE Register within the block */
+
+	for (i = 0; i < gpe_block->register_count; i++) {
+		if (!gpe_block->register_info[i].enable_for_wake) {
+			continue;
+		}
+
+		/* Enable all "wake" GPEs in this register */
+
+		status = acpi_write(gpe_block->register_info[i].enable_for_wake,
+				    &gpe_block->register_info[i].
+				    enable_address);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+	}
+
+	return (AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_disable_all_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_disable_all_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(hw_disable_all_gpes);
+
+	status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
+	status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL);
+	return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_enable_all_runtime_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_enable_all_runtime_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes);
+
+	status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block, NULL);
+	return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_enable_all_wakeup_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable all "wakeup" GPEs, in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_enable_all_wakeup_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes);
+
+	status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL);
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
new file mode 100644
index 0000000..4dc43b0
--- /dev/null
+++ b/drivers/acpi/acpica/hwregs.c
@@ -0,0 +1,353 @@
+
+/*******************************************************************************
+ *
+ * Module Name: hwregs - Read/write access functions for the various ACPI
+ *                       control and status registers.
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acevents.h"
+
+#define _COMPONENT          ACPI_HARDWARE
+ACPI_MODULE_NAME("hwregs")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_clear_acpi_status
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clears all fixed and general purpose status bits
+ *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+acpi_status acpi_hw_clear_acpi_status(void)
+{
+	acpi_status status;
+	acpi_cpu_flags lock_flags = 0;
+
+	ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n",
+			  ACPI_BITMASK_ALL_FIXED_STATUS,
+			  (u16) acpi_gbl_FADT.xpm1a_event_block.address));
+
+	lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+
+	status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
+					ACPI_BITMASK_ALL_FIXED_STATUS);
+	if (ACPI_FAILURE(status)) {
+		goto unlock_and_exit;
+	}
+
+	/* Clear the fixed events */
+
+	if (acpi_gbl_FADT.xpm1b_event_block.address) {
+		status = acpi_write(ACPI_BITMASK_ALL_FIXED_STATUS,
+				    &acpi_gbl_FADT.xpm1b_event_block);
+		if (ACPI_FAILURE(status)) {
+			goto unlock_and_exit;
+		}
+	}
+
+	/* Clear the GPE Bits in all GPE registers in all GPE blocks */
+
+	status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL);
+
+      unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_get_register_bit_mask
+ *
+ * PARAMETERS:  register_id         - Index of ACPI Register to access
+ *
+ * RETURN:      The bitmask to be used when accessing the register
+ *
+ * DESCRIPTION: Map register_id into a register bitmask.
+ *
+ ******************************************************************************/
+
+struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	if (register_id > ACPI_BITREG_MAX) {
+		ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
+			    register_id));
+		return (NULL);
+	}
+
+	return (&acpi_gbl_bit_register_info[register_id]);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_register_read
+ *
+ * PARAMETERS:  register_id         - ACPI Register ID
+ *              return_value        - Where the register value is returned
+ *
+ * RETURN:      Status and the value read.
+ *
+ * DESCRIPTION: Read from the specified ACPI register
+ *
+ ******************************************************************************/
+acpi_status
+acpi_hw_register_read(u32 register_id, u32 * return_value)
+{
+	u32 value1 = 0;
+	u32 value2 = 0;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(hw_register_read);
+
+	switch (register_id) {
+	case ACPI_REGISTER_PM1_STATUS:	/* 16-bit access */
+
+		status = acpi_read(&value1, &acpi_gbl_FADT.xpm1a_event_block);
+		if (ACPI_FAILURE(status)) {
+			goto exit;
+		}
+
+		/* PM1B is optional */
+
+		status = acpi_read(&value2, &acpi_gbl_FADT.xpm1b_event_block);
+		value1 |= value2;
+		break;
+
+	case ACPI_REGISTER_PM1_ENABLE:	/* 16-bit access */
+
+		status = acpi_read(&value1, &acpi_gbl_xpm1a_enable);
+		if (ACPI_FAILURE(status)) {
+			goto exit;
+		}
+
+		/* PM1B is optional */
+
+		status = acpi_read(&value2, &acpi_gbl_xpm1b_enable);
+		value1 |= value2;
+		break;
+
+	case ACPI_REGISTER_PM1_CONTROL:	/* 16-bit access */
+
+		status = acpi_read(&value1, &acpi_gbl_FADT.xpm1a_control_block);
+		if (ACPI_FAILURE(status)) {
+			goto exit;
+		}
+
+		status = acpi_read(&value2, &acpi_gbl_FADT.xpm1b_control_block);
+		value1 |= value2;
+		break;
+
+	case ACPI_REGISTER_PM2_CONTROL:	/* 8-bit access */
+
+		status = acpi_read(&value1, &acpi_gbl_FADT.xpm2_control_block);
+		break;
+
+	case ACPI_REGISTER_PM_TIMER:	/* 32-bit access */
+
+		status = acpi_read(&value1, &acpi_gbl_FADT.xpm_timer_block);
+		break;
+
+	case ACPI_REGISTER_SMI_COMMAND_BLOCK:	/* 8-bit access */
+
+		status =
+		    acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8);
+		break;
+
+	default:
+		ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
+		status = AE_BAD_PARAMETER;
+		break;
+	}
+
+      exit:
+
+	if (ACPI_SUCCESS(status)) {
+		*return_value = value1;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_hw_register_write
+ *
+ * PARAMETERS:  register_id         - ACPI Register ID
+ *              Value               - The value to write
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Write to the specified ACPI register
+ *
+ * NOTE: In accordance with the ACPI specification, this function automatically
+ * preserves the value of the following bits, meaning that these bits cannot be
+ * changed via this interface:
+ *
+ * PM1_CONTROL[0] = SCI_EN
+ * PM1_CONTROL[9]
+ * PM1_STATUS[11]
+ *
+ * ACPI References:
+ * 1) Hardware Ignored Bits: When software writes to a register with ignored
+ *      bit fields, it preserves the ignored bit fields
+ * 2) SCI_EN: OSPM always preserves this bit position
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_register_write(u32 register_id, u32 value)
+{
+	acpi_status status;
+	u32 read_value;
+
+	ACPI_FUNCTION_TRACE(hw_register_write);
+
+	switch (register_id) {
+	case ACPI_REGISTER_PM1_STATUS:	/* 16-bit access */
+
+		/* Perform a read first to preserve certain bits (per ACPI spec) */
+
+		status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS,
+					       &read_value);
+		if (ACPI_FAILURE(status)) {
+			goto exit;
+		}
+
+		/* Insert the bits to be preserved */
+
+		ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS,
+				 read_value);
+
+		/* Now we can write the data */
+
+		status = acpi_write(value, &acpi_gbl_FADT.xpm1a_event_block);
+		if (ACPI_FAILURE(status)) {
+			goto exit;
+		}
+
+		/* PM1B is optional */
+
+		status = acpi_write(value, &acpi_gbl_FADT.xpm1b_event_block);
+		break;
+
+	case ACPI_REGISTER_PM1_ENABLE:	/* 16-bit access */
+
+		status = acpi_write(value, &acpi_gbl_xpm1a_enable);
+		if (ACPI_FAILURE(status)) {
+			goto exit;
+		}
+
+		/* PM1B is optional */
+
+		status = acpi_write(value, &acpi_gbl_xpm1b_enable);
+		break;
+
+	case ACPI_REGISTER_PM1_CONTROL:	/* 16-bit access */
+
+		/*
+		 * Perform a read first to preserve certain bits (per ACPI spec)
+		 */
+		status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
+					       &read_value);
+		if (ACPI_FAILURE(status)) {
+			goto exit;
+		}
+
+		/* Insert the bits to be preserved */
+
+		ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS,
+				 read_value);
+
+		/* Now we can write the data */
+
+		status = acpi_write(value, &acpi_gbl_FADT.xpm1a_control_block);
+		if (ACPI_FAILURE(status)) {
+			goto exit;
+		}
+
+		status = acpi_write(value, &acpi_gbl_FADT.xpm1b_control_block);
+		break;
+
+	case ACPI_REGISTER_PM1A_CONTROL:	/* 16-bit access */
+
+		status = acpi_write(value, &acpi_gbl_FADT.xpm1a_control_block);
+		break;
+
+	case ACPI_REGISTER_PM1B_CONTROL:	/* 16-bit access */
+
+		status = acpi_write(value, &acpi_gbl_FADT.xpm1b_control_block);
+		break;
+
+	case ACPI_REGISTER_PM2_CONTROL:	/* 8-bit access */
+
+		status = acpi_write(value, &acpi_gbl_FADT.xpm2_control_block);
+		break;
+
+	case ACPI_REGISTER_PM_TIMER:	/* 32-bit access */
+
+		status = acpi_write(value, &acpi_gbl_FADT.xpm_timer_block);
+		break;
+
+	case ACPI_REGISTER_SMI_COMMAND_BLOCK:	/* 8-bit access */
+
+		/* SMI_CMD is currently always in IO space */
+
+		status =
+		    acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8);
+		break;
+
+	default:
+		status = AE_BAD_PARAMETER;
+		break;
+	}
+
+      exit:
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
new file mode 100644
index 0000000..a2af2a4
--- /dev/null
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -0,0 +1,629 @@
+
+/******************************************************************************
+ *
+ * Name: hwsleep.c - ACPI Hardware Sleep/Wake Interface
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_HARDWARE
+ACPI_MODULE_NAME("hwsleep")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_firmware_waking_vector
+ *
+ * PARAMETERS:  physical_address    - 32-bit physical address of ACPI real mode
+ *                                    entry point.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
+ *
+ ******************************************************************************/
+acpi_status
+acpi_set_firmware_waking_vector(u32 physical_address)
+{
+	ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
+
+
+	/*
+	 * According to the ACPI specification 2.0c and later, the 64-bit
+	 * waking vector should be cleared and the 32-bit waking vector should
+	 * be used, unless we want the wake-up code to be called by the BIOS in
+	 * Protected Mode.  Some systems (for example HP dv5-1004nr) are known
+	 * to fail to resume if the 64-bit vector is used.
+	 */
+
+	/* Set the 32-bit vector */
+
+	acpi_gbl_FACS->firmware_waking_vector = physical_address;
+
+	/* Clear the 64-bit vector if it exists */
+
+	if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
+		acpi_gbl_FACS->xfirmware_waking_vector = 0;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_firmware_waking_vector64
+ *
+ * PARAMETERS:  physical_address    - 64-bit physical address of ACPI protected
+ *                                    mode entry point.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if
+ *              it exists in the table.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_set_firmware_waking_vector64(u64 physical_address)
+{
+	ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
+
+
+	/* Determine if the 64-bit vector actually exists */
+
+	if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Clear 32-bit vector, set the 64-bit X_ vector */
+
+	acpi_gbl_FACS->firmware_waking_vector = 0;
+	acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enter_sleep_state_prep
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state to enter
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Prepare to enter a system sleep state (see ACPI 2.0 spec p 231)
+ *              This function must execute with interrupts enabled.
+ *              We break sleeping into 2 stages so that OSPM can handle
+ *              various OS-specific tasks between the two steps.
+ *
+ ******************************************************************************/
+acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
+{
+	acpi_status status;
+	struct acpi_object_list arg_list;
+	union acpi_object arg;
+
+	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
+
+	/*
+	 * _PSW methods could be run here to enable wake-on keyboard, LAN, etc.
+	 */
+	status = acpi_get_sleep_type_data(sleep_state,
+					  &acpi_gbl_sleep_type_a,
+					  &acpi_gbl_sleep_type_b);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Setup parameter object */
+
+	arg_list.count = 1;
+	arg_list.pointer = &arg;
+
+	arg.type = ACPI_TYPE_INTEGER;
+	arg.integer.value = sleep_state;
+
+	/* Run the _PTS method */
+
+	status = acpi_evaluate_object(NULL, METHOD_NAME__PTS, &arg_list, NULL);
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Setup the argument to _SST */
+
+	switch (sleep_state) {
+	case ACPI_STATE_S0:
+		arg.integer.value = ACPI_SST_WORKING;
+		break;
+
+	case ACPI_STATE_S1:
+	case ACPI_STATE_S2:
+	case ACPI_STATE_S3:
+		arg.integer.value = ACPI_SST_SLEEPING;
+		break;
+
+	case ACPI_STATE_S4:
+		arg.integer.value = ACPI_SST_SLEEP_CONTEXT;
+		break;
+
+	default:
+		arg.integer.value = ACPI_SST_INDICATOR_OFF;	/* Default is off */
+		break;
+	}
+
+	/*
+	 * Set the system indicators to show the desired sleep state.
+	 * _SST is an optional method (return no error if not found)
+	 */
+	status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"While executing method _SST"));
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enter_sleep_state
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state to enter
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
+ *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
+{
+	u32 PM1Acontrol;
+	u32 PM1Bcontrol;
+	struct acpi_bit_register_info *sleep_type_reg_info;
+	struct acpi_bit_register_info *sleep_enable_reg_info;
+	u32 in_value;
+	struct acpi_object_list arg_list;
+	union acpi_object arg;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
+
+	if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
+	    (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
+		ACPI_ERROR((AE_INFO, "Sleep values out of range: A=%X B=%X",
+			    acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
+		return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+	}
+
+	sleep_type_reg_info =
+	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE_A);
+	sleep_enable_reg_info =
+	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE);
+
+	/* Clear wake status */
+
+	status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Clear all fixed and general purpose status bits */
+
+	status = acpi_hw_clear_acpi_status();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * 1) Disable/Clear all GPEs
+	 * 2) Enable all wakeup GPEs
+	 */
+	status = acpi_hw_disable_all_gpes();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+	acpi_gbl_system_awake_and_running = FALSE;
+
+	status = acpi_hw_enable_all_wakeup_gpes();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Execute the _GTS method */
+
+	arg_list.count = 1;
+	arg_list.pointer = &arg;
+	arg.type = ACPI_TYPE_INTEGER;
+	arg.integer.value = sleep_state;
+
+	status = acpi_evaluate_object(NULL, METHOD_NAME__GTS, &arg_list, NULL);
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Get current value of PM1A control */
+
+	status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+			  "Entering sleep state [S%d]\n", sleep_state));
+
+	/* Clear SLP_EN and SLP_TYP fields */
+
+	PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask |
+			 sleep_enable_reg_info->access_bit_mask);
+	PM1Bcontrol = PM1Acontrol;
+
+	/* Insert SLP_TYP bits */
+
+	PM1Acontrol |=
+	    (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
+	PM1Bcontrol |=
+	    (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);
+
+	/*
+	 * We split the writes of SLP_TYP and SLP_EN to workaround
+	 * poorly implemented hardware.
+	 */
+
+	/* Write #1: fill in SLP_TYP data */
+
+	status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
+					PM1Acontrol);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
+					PM1Bcontrol);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Insert SLP_ENABLE bit */
+
+	PM1Acontrol |= sleep_enable_reg_info->access_bit_mask;
+	PM1Bcontrol |= sleep_enable_reg_info->access_bit_mask;
+
+	/* Write #2: SLP_TYP + SLP_EN */
+
+	ACPI_FLUSH_CPU_CACHE();
+
+	status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
+					PM1Acontrol);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
+					PM1Bcontrol);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	if (sleep_state > ACPI_STATE_S3) {
+		/*
+		 * We wanted to sleep > S3, but it didn't happen (by virtue of the
+		 * fact that we are still executing!)
+		 *
+		 * Wait ten seconds, then try again. This is to get S4/S5 to work on
+		 * all machines.
+		 *
+		 * We wait so long to allow chipsets that poll this reg very slowly to
+		 * still read the right value. Ideally, this block would go
+		 * away entirely.
+		 */
+		acpi_os_stall(10000000);
+
+		status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
+						sleep_enable_reg_info->
+						access_bit_mask);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/* Wait until we enter sleep state */
+
+	do {
+		status = acpi_get_register_unlocked(ACPI_BITREG_WAKE_STATUS,
+						    &in_value);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/* Spin until we wake */
+
+	} while (!in_value);
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enter_sleep_state_s4bios
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform a S4 bios request.
+ *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
+ *
+ ******************************************************************************/
+acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
+{
+	u32 in_value;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
+
+	status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_hw_clear_acpi_status();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * 1) Disable/Clear all GPEs
+	 * 2) Enable all wakeup GPEs
+	 */
+	status = acpi_hw_disable_all_gpes();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+	acpi_gbl_system_awake_and_running = FALSE;
+
+	status = acpi_hw_enable_all_wakeup_gpes();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	ACPI_FLUSH_CPU_CACHE();
+
+	status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+				    (u32) acpi_gbl_FADT.S4bios_request, 8);
+
+	do {
+		acpi_os_stall(1000);
+		status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	} while (!in_value);
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_leave_sleep_state_prep
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state we are exiting
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
+ *              sleep.
+ *              Called with interrupts DISABLED.
+ *
+ ******************************************************************************/
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
+{
+	struct acpi_object_list arg_list;
+	union acpi_object arg;
+	acpi_status status;
+	struct acpi_bit_register_info *sleep_type_reg_info;
+	struct acpi_bit_register_info *sleep_enable_reg_info;
+	u32 PM1Acontrol;
+	u32 PM1Bcontrol;
+
+	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
+
+	/*
+	 * Set SLP_TYPE and SLP_EN to state S0.
+	 * This is unclear from the ACPI Spec, but it is required
+	 * by some machines.
+	 */
+	status = acpi_get_sleep_type_data(ACPI_STATE_S0,
+					  &acpi_gbl_sleep_type_a,
+					  &acpi_gbl_sleep_type_b);
+	if (ACPI_SUCCESS(status)) {
+		sleep_type_reg_info =
+		    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE_A);
+		sleep_enable_reg_info =
+		    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE);
+
+		/* Get current value of PM1A control */
+
+		status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
+					       &PM1Acontrol);
+		if (ACPI_SUCCESS(status)) {
+
+			/* Clear SLP_EN and SLP_TYP fields */
+
+			PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask |
+					 sleep_enable_reg_info->
+					 access_bit_mask);
+			PM1Bcontrol = PM1Acontrol;
+
+			/* Insert SLP_TYP bits */
+
+			PM1Acontrol |=
+			    (acpi_gbl_sleep_type_a << sleep_type_reg_info->
+			     bit_position);
+			PM1Bcontrol |=
+			    (acpi_gbl_sleep_type_b << sleep_type_reg_info->
+			     bit_position);
+
+			/* Just ignore any errors */
+
+			(void)acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
+						     PM1Acontrol);
+			(void)acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
+						     PM1Bcontrol);
+		}
+	}
+
+	/* Execute the _BFS method */
+
+	arg_list.count = 1;
+	arg_list.pointer = &arg;
+	arg.type = ACPI_TYPE_INTEGER;
+	arg.integer.value = sleep_state;
+
+	status = acpi_evaluate_object(NULL, METHOD_NAME__BFS, &arg_list, NULL);
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		ACPI_EXCEPTION((AE_INFO, status, "During Method _BFS"));
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_leave_sleep_state
+ *
+ * PARAMETERS:  sleep_state         - Which sleep state we just exited
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
+ *              Called with interrupts ENABLED.
+ *
+ ******************************************************************************/
+acpi_status acpi_leave_sleep_state(u8 sleep_state)
+{
+	struct acpi_object_list arg_list;
+	union acpi_object arg;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
+
+	/* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */
+
+	acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID;
+
+	/* Setup parameter object */
+
+	arg_list.count = 1;
+	arg_list.pointer = &arg;
+	arg.type = ACPI_TYPE_INTEGER;
+
+	/* Ignore any errors from these methods */
+
+	arg.integer.value = ACPI_SST_WAKING;
+	status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
+	}
+
+	/*
+	 * GPEs must be enabled before _WAK is called as GPEs
+	 * might get fired there
+	 *
+	 * Restore the GPEs:
+	 * 1) Disable/Clear all GPEs
+	 * 2) Enable all runtime GPEs
+	 */
+	status = acpi_hw_disable_all_gpes();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+	status = acpi_hw_enable_all_runtime_gpes();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	arg.integer.value = sleep_state;
+	status = acpi_evaluate_object(NULL, METHOD_NAME__WAK, &arg_list, NULL);
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		ACPI_EXCEPTION((AE_INFO, status, "During Method _WAK"));
+	}
+	/* TBD: _WAK "sometimes" returns stuff - do we want to look at it? */
+
+	/*
+	 * Some BIOSes assume that WAK_STS will be cleared on resume and use
+	 * it to determine whether the system is rebooting or resuming. Clear
+	 * it for compatibility.
+	 */
+	acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
+
+	acpi_gbl_system_awake_and_running = TRUE;
+
+	/* Enable power button */
+
+	(void)
+	    acpi_set_register(acpi_gbl_fixed_event_info
+			      [ACPI_EVENT_POWER_BUTTON].enable_register_id, 1);
+
+	(void)
+	    acpi_set_register(acpi_gbl_fixed_event_info
+			      [ACPI_EVENT_POWER_BUTTON].status_register_id, 1);
+
+	arg.integer.value = ACPI_SST_WORKING;
+	status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
new file mode 100644
index 0000000..b7f522c
--- /dev/null
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -0,0 +1,188 @@
+
+/******************************************************************************
+ *
+ * Name: hwtimer.c - ACPI Power Management Timer Interface
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_HARDWARE
+ACPI_MODULE_NAME("hwtimer")
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_get_timer_resolution
+ *
+ * PARAMETERS:  Resolution          - Where the resolution is returned
+ *
+ * RETURN:      Status and timer resolution
+ *
+ * DESCRIPTION: Obtains resolution of the ACPI PM Timer (24 or 32 bits).
+ *
+ ******************************************************************************/
+acpi_status acpi_get_timer_resolution(u32 * resolution)
+{
+	ACPI_FUNCTION_TRACE(acpi_get_timer_resolution);
+
+	if (!resolution) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
+		*resolution = 24;
+	} else {
+		*resolution = 32;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_get_timer
+ *
+ * PARAMETERS:  Ticks               - Where the timer value is returned
+ *
+ * RETURN:      Status and current timer value (ticks)
+ *
+ * DESCRIPTION: Obtains current value of ACPI PM Timer (in ticks).
+ *
+ ******************************************************************************/
+acpi_status acpi_get_timer(u32 * ticks)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_get_timer);
+
+	if (!ticks) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status =
+	    acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT.xpm_timer_block);
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_timer)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_get_timer_duration
+ *
+ * PARAMETERS:  start_ticks         - Starting timestamp
+ *              end_ticks           - End timestamp
+ *              time_elapsed        - Where the elapsed time is returned
+ *
+ * RETURN:      Status and time_elapsed
+ *
+ * DESCRIPTION: Computes the time elapsed (in microseconds) between two
+ *              PM Timer time stamps, taking into account the possibility of
+ *              rollovers, the timer resolution, and timer frequency.
+ *
+ *              The PM Timer's clock ticks at roughly 3.6 times per
+ *              _microsecond_, and its clock continues through Cx state
+ *              transitions (unlike many CPU timestamp counters) -- making it
+ *              a versatile and accurate timer.
+ *
+ *              Note that this function accommodates only a single timer
+ *              rollover.  Thus for 24-bit timers, this function should only
+ *              be used for calculating durations less than ~4.6 seconds
+ *              (~20 minutes for 32-bit timers) -- calculations below:
+ *
+ *              2**24 Ticks / 3,600,000 Ticks/Sec = 4.66 sec
+ *              2**32 Ticks / 3,600,000 Ticks/Sec = 1193 sec or 19.88 minutes
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
+{
+	acpi_status status;
+	u32 delta_ticks;
+	acpi_integer quotient;
+
+	ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
+
+	if (!time_elapsed) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Compute Tick Delta:
+	 * Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
+	 */
+	if (start_ticks < end_ticks) {
+		delta_ticks = end_ticks - start_ticks;
+	} else if (start_ticks > end_ticks) {
+		if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
+
+			/* 24-bit Timer */
+
+			delta_ticks =
+			    (((0x00FFFFFF - start_ticks) +
+			      end_ticks) & 0x00FFFFFF);
+		} else {
+			/* 32-bit Timer */
+
+			delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks;
+		}
+	} else {		/* start_ticks == end_ticks */
+
+		*time_elapsed = 0;
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/*
+	 * Compute Duration (Requires a 64-bit multiply and divide):
+	 *
+	 * time_elapsed = (delta_ticks * 1000000) / PM_TIMER_FREQUENCY;
+	 */
+	status = acpi_ut_short_divide(((u64) delta_ticks) * 1000000,
+				      PM_TIMER_FREQUENCY, &quotient, NULL);
+
+	*time_elapsed = (u32) quotient;
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_timer_duration)
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
new file mode 100644
index 0000000..ae597c0
--- /dev/null
+++ b/drivers/acpi/acpica/hwxface.c
@@ -0,0 +1,593 @@
+
+/******************************************************************************
+ *
+ * Module Name: hwxface - Public ACPICA hardware interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_HARDWARE
+ACPI_MODULE_NAME("hwxface")
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_reset
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Set reset register in memory or IO space. Note: Does not
+ *              support reset register in PCI config space, this must be
+ *              handled separately.
+ *
+ ******************************************************************************/
+acpi_status acpi_reset(void)
+{
+	struct acpi_generic_address *reset_reg;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_reset);
+
+	reset_reg = &acpi_gbl_FADT.reset_register;
+
+	/* Check if the reset register is supported */
+
+	if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
+	    !reset_reg->address) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Write the reset value to the reset register */
+
+	status = acpi_write(acpi_gbl_FADT.reset_value, reset_reg);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_reset)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_read
+ *
+ * PARAMETERS:  Value               - Where the value is returned
+ *              Reg                 - GAS register structure
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Read from either memory or IO space.
+ *
+ ******************************************************************************/
+acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg)
+{
+	u32 width;
+	u64 address;
+	acpi_status status;
+
+	ACPI_FUNCTION_NAME(acpi_read);
+
+	/*
+	 * Must have a valid pointer to a GAS structure, and
+	 * a non-zero address within. However, don't return an error
+	 * because the PM1A/B code must not fail if B isn't present.
+	 */
+	if (!reg) {
+		return (AE_OK);
+	}
+
+	/* Get a local copy of the address. Handles possible alignment issues */
+
+	ACPI_MOVE_64_TO_64(&address, &reg->address);
+	if (!address) {
+		return (AE_OK);
+	}
+
+	/* Supported widths are 8/16/32 */
+
+	width = reg->bit_width;
+	if ((width != 8) && (width != 16) && (width != 32)) {
+		return (AE_SUPPORT);
+	}
+
+	/* Initialize entire 32-bit return value to zero */
+
+	*value = 0;
+
+	/*
+	 * Two address spaces supported: Memory or IO.
+	 * PCI_Config is not supported here because the GAS struct is insufficient
+	 */
+	switch (reg->space_id) {
+	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+
+		status = acpi_os_read_memory((acpi_physical_address) address,
+					     value, width);
+		break;
+
+	case ACPI_ADR_SPACE_SYSTEM_IO:
+
+		status =
+		    acpi_os_read_port((acpi_io_address) address, value, width);
+		break;
+
+	default:
+		ACPI_ERROR((AE_INFO,
+			    "Unsupported address space: %X", reg->space_id));
+		return (AE_BAD_PARAMETER);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_IO,
+			  "Read:  %8.8X width %2d from %8.8X%8.8X (%s)\n",
+			  *value, width, ACPI_FORMAT_UINT64(address),
+			  acpi_ut_get_region_name(reg->space_id)));
+
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_read)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_write
+ *
+ * PARAMETERS:  Value               - To be written
+ *              Reg                 - GAS register structure
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Write to either memory or IO space.
+ *
+ ******************************************************************************/
+acpi_status acpi_write(u32 value, struct acpi_generic_address *reg)
+{
+	u32 width;
+	u64 address;
+	acpi_status status;
+
+	ACPI_FUNCTION_NAME(acpi_write);
+
+	/*
+	 * Must have a valid pointer to a GAS structure, and
+	 * a non-zero address within. However, don't return an error
+	 * because the PM1A/B code must not fail if B isn't present.
+	 */
+	if (!reg) {
+		return (AE_OK);
+	}
+
+	/* Get a local copy of the address. Handles possible alignment issues */
+
+	ACPI_MOVE_64_TO_64(&address, &reg->address);
+	if (!address) {
+		return (AE_OK);
+	}
+
+	/* Supported widths are 8/16/32 */
+
+	width = reg->bit_width;
+	if ((width != 8) && (width != 16) && (width != 32)) {
+		return (AE_SUPPORT);
+	}
+
+	/*
+	 * Two address spaces supported: Memory or IO.
+	 * PCI_Config is not supported here because the GAS struct is insufficient
+	 */
+	switch (reg->space_id) {
+	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+
+		status = acpi_os_write_memory((acpi_physical_address) address,
+					      value, width);
+		break;
+
+	case ACPI_ADR_SPACE_SYSTEM_IO:
+
+		status = acpi_os_write_port((acpi_io_address) address, value,
+					    width);
+		break;
+
+	default:
+		ACPI_ERROR((AE_INFO,
+			    "Unsupported address space: %X", reg->space_id));
+		return (AE_BAD_PARAMETER);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_IO,
+			  "Wrote: %8.8X width %2d   to %8.8X%8.8X (%s)\n",
+			  value, width, ACPI_FORMAT_UINT64(address),
+			  acpi_ut_get_region_name(reg->space_id)));
+
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_write)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_register_unlocked
+ *
+ * PARAMETERS:  register_id     - ID of ACPI bit_register to access
+ *              return_value    - Value that was read from the register
+ *
+ * RETURN:      Status and the value read from specified Register. Value
+ *              returned is normalized to bit0 (is shifted all the way right)
+ *
+ * DESCRIPTION: ACPI bit_register read function. Does not acquire the HW lock.
+ *
+ ******************************************************************************/
+acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value)
+{
+	u32 register_value = 0;
+	struct acpi_bit_register_info *bit_reg_info;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_get_register_unlocked);
+
+	/* Get the info structure corresponding to the requested ACPI Register */
+
+	bit_reg_info = acpi_hw_get_bit_register_info(register_id);
+	if (!bit_reg_info) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Read from the register */
+
+	status = acpi_hw_register_read(bit_reg_info->parent_register,
+				       &register_value);
+
+	if (ACPI_SUCCESS(status)) {
+
+		/* Normalize the value that was read */
+
+		register_value =
+		    ((register_value & bit_reg_info->access_bit_mask)
+		     >> bit_reg_info->bit_position);
+
+		*return_value = register_value;
+
+		ACPI_DEBUG_PRINT((ACPI_DB_IO, "Read value %8.8X register %X\n",
+				  register_value,
+				  bit_reg_info->parent_register));
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_register_unlocked)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_register
+ *
+ * PARAMETERS:  register_id     - ID of ACPI bit_register to access
+ *              return_value    - Value that was read from the register
+ *
+ * RETURN:      Status and the value read from specified Register. Value
+ *              returned is normalized to bit0 (is shifted all the way right)
+ *
+ * DESCRIPTION: ACPI bit_register read function.
+ *
+ ******************************************************************************/
+acpi_status acpi_get_register(u32 register_id, u32 *return_value)
+{
+	acpi_status status;
+	acpi_cpu_flags flags;
+
+	flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+	status = acpi_get_register_unlocked(register_id, return_value);
+	acpi_os_release_lock(acpi_gbl_hardware_lock, flags);
+
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_register)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_register
+ *
+ * PARAMETERS:  register_id     - ID of ACPI bit_register to access
+ *              Value           - (only used on write) value to write to the
+ *                                Register, NOT pre-normalized to the bit pos
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: ACPI Bit Register write function.
+ *
+ ******************************************************************************/
+acpi_status acpi_set_register(u32 register_id, u32 value)
+{
+	u32 register_value = 0;
+	struct acpi_bit_register_info *bit_reg_info;
+	acpi_status status;
+	acpi_cpu_flags lock_flags;
+
+	ACPI_FUNCTION_TRACE_U32(acpi_set_register, register_id);
+
+	/* Get the info structure corresponding to the requested ACPI Register */
+
+	bit_reg_info = acpi_hw_get_bit_register_info(register_id);
+	if (!bit_reg_info) {
+		ACPI_ERROR((AE_INFO, "Bad ACPI HW RegisterId: %X",
+			    register_id));
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+
+	/* Always do a register read first so we can insert the new bits  */
+
+	status = acpi_hw_register_read(bit_reg_info->parent_register,
+				       &register_value);
+	if (ACPI_FAILURE(status)) {
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * Decode the Register ID
+	 * Register ID = [Register block ID] | [bit ID]
+	 *
+	 * Check bit ID to fine locate Register offset.
+	 * Check Mask to determine Register offset, and then read-write.
+	 */
+	switch (bit_reg_info->parent_register) {
+	case ACPI_REGISTER_PM1_STATUS:
+
+		/*
+		 * Status Registers are different from the rest. Clear by
+		 * writing 1, and writing 0 has no effect. So, the only relevant
+		 * information is the single bit we're interested in, all others should
+		 * be written as 0 so they will be left unchanged.
+		 */
+		value = ACPI_REGISTER_PREPARE_BITS(value,
+						   bit_reg_info->bit_position,
+						   bit_reg_info->
+						   access_bit_mask);
+		if (value) {
+			status =
+			    acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
+						   (u16) value);
+			register_value = 0;
+		}
+		break;
+
+	case ACPI_REGISTER_PM1_ENABLE:
+
+		ACPI_REGISTER_INSERT_VALUE(register_value,
+					   bit_reg_info->bit_position,
+					   bit_reg_info->access_bit_mask,
+					   value);
+
+		status = acpi_hw_register_write(ACPI_REGISTER_PM1_ENABLE,
+						(u16) register_value);
+		break;
+
+	case ACPI_REGISTER_PM1_CONTROL:
+
+		/*
+		 * Write the PM1 Control register.
+		 * Note that at this level, the fact that there are actually TWO
+		 * registers (A and B - and B may not exist) is abstracted.
+		 */
+		ACPI_DEBUG_PRINT((ACPI_DB_IO, "PM1 control: Read %X\n",
+				  register_value));
+
+		ACPI_REGISTER_INSERT_VALUE(register_value,
+					   bit_reg_info->bit_position,
+					   bit_reg_info->access_bit_mask,
+					   value);
+
+		status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
+						(u16) register_value);
+		break;
+
+	case ACPI_REGISTER_PM2_CONTROL:
+
+		status = acpi_hw_register_read(ACPI_REGISTER_PM2_CONTROL,
+					       &register_value);
+		if (ACPI_FAILURE(status)) {
+			goto unlock_and_exit;
+		}
+
+		ACPI_DEBUG_PRINT((ACPI_DB_IO,
+				  "PM2 control: Read %X from %8.8X%8.8X\n",
+				  register_value,
+				  ACPI_FORMAT_UINT64(acpi_gbl_FADT.
+						     xpm2_control_block.
+						     address)));
+
+		ACPI_REGISTER_INSERT_VALUE(register_value,
+					   bit_reg_info->bit_position,
+					   bit_reg_info->access_bit_mask,
+					   value);
+
+		ACPI_DEBUG_PRINT((ACPI_DB_IO,
+				  "About to write %4.4X to %8.8X%8.8X\n",
+				  register_value,
+				  ACPI_FORMAT_UINT64(acpi_gbl_FADT.
+						     xpm2_control_block.
+						     address)));
+
+		status = acpi_hw_register_write(ACPI_REGISTER_PM2_CONTROL,
+						(u8) (register_value));
+		break;
+
+	default:
+		break;
+	}
+
+      unlock_and_exit:
+
+	acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+
+	/* Normalize the value that was read */
+
+	ACPI_DEBUG_EXEC(register_value =
+			((register_value & bit_reg_info->access_bit_mask) >>
+			 bit_reg_info->bit_position));
+
+	ACPI_DEBUG_PRINT((ACPI_DB_IO,
+			  "Set bits: %8.8X actual %8.8X register %X\n", value,
+			  register_value, bit_reg_info->parent_register));
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_register)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_sleep_type_data
+ *
+ * PARAMETERS:  sleep_state         - Numeric sleep state
+ *              *sleep_type_a        - Where SLP_TYPa is returned
+ *              *sleep_type_b        - Where SLP_TYPb is returned
+ *
+ * RETURN:      Status - ACPI status
+ *
+ * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep
+ *              state.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
+{
+	acpi_status status = AE_OK;
+	struct acpi_evaluate_info *info;
+
+	ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
+
+	/* Validate parameters */
+
+	if ((sleep_state > ACPI_S_STATES_MAX) || !sleep_type_a || !sleep_type_b) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Allocate the evaluation information block */
+
+	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+	if (!info) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	info->pathname =
+	    ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
+
+	/* Evaluate the namespace object containing the values for this state */
+
+	status = acpi_ns_evaluate(info);
+	if (ACPI_FAILURE(status)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "%s while evaluating SleepState [%s]\n",
+				  acpi_format_exception(status),
+				  info->pathname));
+
+		goto cleanup;
+	}
+
+	/* Must have a return object */
+
+	if (!info->return_object) {
+		ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
+			    info->pathname));
+		status = AE_NOT_EXIST;
+	}
+
+	/* It must be of type Package */
+
+	else if (ACPI_GET_OBJECT_TYPE(info->return_object) != ACPI_TYPE_PACKAGE) {
+		ACPI_ERROR((AE_INFO,
+			    "Sleep State return object is not a Package"));
+		status = AE_AML_OPERAND_TYPE;
+	}
+
+	/*
+	 * The package must have at least two elements. NOTE (March 2005): This
+	 * goes against the current ACPI spec which defines this object as a
+	 * package with one encoded DWORD element. However, existing practice
+	 * by BIOS vendors seems to be to have 2 or more elements, at least
+	 * one per sleep type (A/B).
+	 */
+	else if (info->return_object->package.count < 2) {
+		ACPI_ERROR((AE_INFO,
+			    "Sleep State return package does not have at least two elements"));
+		status = AE_AML_NO_OPERAND;
+	}
+
+	/* The first two elements must both be of type Integer */
+
+	else if ((ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[0])
+		  != ACPI_TYPE_INTEGER) ||
+		 (ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[1])
+		  != ACPI_TYPE_INTEGER)) {
+		ACPI_ERROR((AE_INFO,
+			    "Sleep State return package elements are not both Integers (%s, %s)",
+			    acpi_ut_get_object_type_name(info->return_object->
+							 package.elements[0]),
+			    acpi_ut_get_object_type_name(info->return_object->
+							 package.elements[1])));
+		status = AE_AML_OPERAND_TYPE;
+	} else {
+		/* Valid _Sx_ package size, type, and value */
+
+		*sleep_type_a = (u8)
+		    (info->return_object->package.elements[0])->integer.value;
+		*sleep_type_b = (u8)
+		    (info->return_object->package.elements[1])->integer.value;
+	}
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"While evaluating SleepState [%s], bad Sleep object %p type %s",
+				info->pathname, info->return_object,
+				acpi_ut_get_object_type_name(info->
+							     return_object)));
+	}
+
+	acpi_ut_remove_reference(info->return_object);
+
+      cleanup:
+	ACPI_FREE(info);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data)
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
new file mode 100644
index 0000000..88303eb
--- /dev/null
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -0,0 +1,676 @@
+/*******************************************************************************
+ *
+ * Module Name: nsaccess - Top-level functions for accessing ACPI namespace
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "amlcode.h"
+#include "acnamesp.h"
+#include "acdispat.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsaccess")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_root_initialize
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Allocate and initialize the default root named objects
+ *
+ * MUTEX:       Locks namespace for entire execution
+ *
+ ******************************************************************************/
+acpi_status acpi_ns_root_initialize(void)
+{
+	acpi_status status;
+	const struct acpi_predefined_names *init_val = NULL;
+	struct acpi_namespace_node *new_node;
+	union acpi_operand_object *obj_desc;
+	acpi_string val = NULL;
+
+	ACPI_FUNCTION_TRACE(ns_root_initialize);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * The global root ptr is initially NULL, so a non-NULL value indicates
+	 * that acpi_ns_root_initialize() has already been called; just return.
+	 */
+	if (acpi_gbl_root_node) {
+		status = AE_OK;
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * Tell the rest of the subsystem that the root is initialized
+	 * (This is OK because the namespace is locked)
+	 */
+	acpi_gbl_root_node = &acpi_gbl_root_node_struct;
+
+	/* Enter the pre-defined names in the name table */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			  "Entering predefined entries into namespace\n"));
+
+	for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) {
+
+		/* _OSI is optional for now, will be permanent later */
+
+		if (!ACPI_STRCMP(init_val->name, "_OSI")
+		    && !acpi_gbl_create_osi_method) {
+			continue;
+		}
+
+		status = acpi_ns_lookup(NULL, init_val->name, init_val->type,
+					ACPI_IMODE_LOAD_PASS2,
+					ACPI_NS_NO_UPSEARCH, NULL, &new_node);
+
+		if (ACPI_FAILURE(status) || (!new_node)) {	/* Must be on same line for code converter */
+			ACPI_EXCEPTION((AE_INFO, status,
+					"Could not create predefined name %s",
+					init_val->name));
+		}
+
+		/*
+		 * Name entered successfully.
+		 * If entry in pre_defined_names[] specifies an
+		 * initial value, create the initial value.
+		 */
+		if (init_val->val) {
+			status = acpi_os_predefined_override(init_val, &val);
+			if (ACPI_FAILURE(status)) {
+				ACPI_ERROR((AE_INFO,
+					    "Could not override predefined %s",
+					    init_val->name));
+			}
+
+			if (!val) {
+				val = init_val->val;
+			}
+
+			/*
+			 * Entry requests an initial value, allocate a
+			 * descriptor for it.
+			 */
+			obj_desc =
+			    acpi_ut_create_internal_object(init_val->type);
+			if (!obj_desc) {
+				status = AE_NO_MEMORY;
+				goto unlock_and_exit;
+			}
+
+			/*
+			 * Convert value string from table entry to
+			 * internal representation. Only types actually
+			 * used for initial values are implemented here.
+			 */
+			switch (init_val->type) {
+			case ACPI_TYPE_METHOD:
+				obj_desc->method.param_count =
+				    (u8) ACPI_TO_INTEGER(val);
+				obj_desc->common.flags |= AOPOBJ_DATA_VALID;
+
+#if defined (ACPI_ASL_COMPILER)
+
+				/* Save the parameter count for the i_aSL compiler */
+
+				new_node->value = obj_desc->method.param_count;
+#else
+				/* Mark this as a very SPECIAL method */
+
+				obj_desc->method.method_flags =
+				    AML_METHOD_INTERNAL_ONLY;
+				obj_desc->method.implementation =
+				    acpi_ut_osi_implementation;
+#endif
+				break;
+
+			case ACPI_TYPE_INTEGER:
+
+				obj_desc->integer.value = ACPI_TO_INTEGER(val);
+				break;
+
+			case ACPI_TYPE_STRING:
+
+				/*
+				 * Build an object around the static string
+				 */
+				obj_desc->string.length =
+				    (u32) ACPI_STRLEN(val);
+				obj_desc->string.pointer = val;
+				obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
+				break;
+
+			case ACPI_TYPE_MUTEX:
+
+				obj_desc->mutex.node = new_node;
+				obj_desc->mutex.sync_level =
+				    (u8) (ACPI_TO_INTEGER(val) - 1);
+
+				/* Create a mutex */
+
+				status =
+				    acpi_os_create_mutex(&obj_desc->mutex.
+							 os_mutex);
+				if (ACPI_FAILURE(status)) {
+					acpi_ut_remove_reference(obj_desc);
+					goto unlock_and_exit;
+				}
+
+				/* Special case for ACPI Global Lock */
+
+				if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
+					acpi_gbl_global_lock_mutex = obj_desc;
+
+					/* Create additional counting semaphore for global lock */
+
+					status =
+					    acpi_os_create_semaphore(1, 0,
+								     &acpi_gbl_global_lock_semaphore);
+					if (ACPI_FAILURE(status)) {
+						acpi_ut_remove_reference
+						    (obj_desc);
+						goto unlock_and_exit;
+					}
+				}
+				break;
+
+			default:
+
+				ACPI_ERROR((AE_INFO,
+					    "Unsupported initial type value %X",
+					    init_val->type));
+				acpi_ut_remove_reference(obj_desc);
+				obj_desc = NULL;
+				continue;
+			}
+
+			/* Store pointer to value descriptor in the Node */
+
+			status = acpi_ns_attach_object(new_node, obj_desc,
+						       ACPI_GET_OBJECT_TYPE
+						       (obj_desc));
+
+			/* Remove local reference to the object */
+
+			acpi_ut_remove_reference(obj_desc);
+		}
+	}
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+	/* Save a handle to "_GPE", it is always present */
+
+	if (ACPI_SUCCESS(status)) {
+		status = acpi_ns_get_node(NULL, "\\_GPE", ACPI_NS_NO_UPSEARCH,
+					  &acpi_gbl_fadt_gpe_device);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_lookup
+ *
+ * PARAMETERS:  scope_info      - Current scope info block
+ *              Pathname        - Search pathname, in internal format
+ *                                (as represented in the AML stream)
+ *              Type            - Type associated with name
+ *              interpreter_mode - IMODE_LOAD_PASS2 => add name if not found
+ *              Flags           - Flags describing the search restrictions
+ *              walk_state      - Current state of the walk
+ *              return_node     - Where the Node is placed (if found
+ *                                or created successfully)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Find or enter the passed name in the name space.
+ *              Log an error if name not found in Exec mode.
+ *
+ * MUTEX:       Assumes namespace is locked.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_lookup(union acpi_generic_state *scope_info,
+	       char *pathname,
+	       acpi_object_type type,
+	       acpi_interpreter_mode interpreter_mode,
+	       u32 flags,
+	       struct acpi_walk_state *walk_state,
+	       struct acpi_namespace_node **return_node)
+{
+	acpi_status status;
+	char *path = pathname;
+	struct acpi_namespace_node *prefix_node;
+	struct acpi_namespace_node *current_node = NULL;
+	struct acpi_namespace_node *this_node = NULL;
+	u32 num_segments;
+	u32 num_carats;
+	acpi_name simple_name;
+	acpi_object_type type_to_check_for;
+	acpi_object_type this_search_type;
+	u32 search_parent_flag = ACPI_NS_SEARCH_PARENT;
+	u32 local_flags;
+
+	ACPI_FUNCTION_TRACE(ns_lookup);
+
+	if (!return_node) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_SEARCH_PARENT);
+	*return_node = ACPI_ENTRY_NOT_FOUND;
+	acpi_gbl_ns_lookup_count++;
+
+	if (!acpi_gbl_root_node) {
+		return_ACPI_STATUS(AE_NO_NAMESPACE);
+	}
+
+	/*
+	 * Get the prefix scope.
+	 * A null scope means use the root scope
+	 */
+	if ((!scope_info) || (!scope_info->scope.node)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+				  "Null scope prefix, using root node (%p)\n",
+				  acpi_gbl_root_node));
+
+		prefix_node = acpi_gbl_root_node;
+	} else {
+		prefix_node = scope_info->scope.node;
+		if (ACPI_GET_DESCRIPTOR_TYPE(prefix_node) !=
+		    ACPI_DESC_TYPE_NAMED) {
+			ACPI_ERROR((AE_INFO, "%p is not a namespace node [%s]",
+				    prefix_node,
+				    acpi_ut_get_descriptor_name(prefix_node)));
+			return_ACPI_STATUS(AE_AML_INTERNAL);
+		}
+
+		if (!(flags & ACPI_NS_PREFIX_IS_SCOPE)) {
+			/*
+			 * This node might not be a actual "scope" node (such as a
+			 * Device/Method, etc.)  It could be a Package or other object node.
+			 * Backup up the tree to find the containing scope node.
+			 */
+			while (!acpi_ns_opens_scope(prefix_node->type) &&
+			       prefix_node->type != ACPI_TYPE_ANY) {
+				prefix_node =
+				    acpi_ns_get_parent_node(prefix_node);
+			}
+		}
+	}
+
+	/* Save type   TBD: may be no longer necessary */
+
+	type_to_check_for = type;
+
+	/*
+	 * Begin examination of the actual pathname
+	 */
+	if (!pathname) {
+
+		/* A Null name_path is allowed and refers to the root */
+
+		num_segments = 0;
+		this_node = acpi_gbl_root_node;
+		path = "";
+
+		ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+				  "Null Pathname (Zero segments), Flags=%X\n",
+				  flags));
+	} else {
+		/*
+		 * Name pointer is valid (and must be in internal name format)
+		 *
+		 * Check for scope prefixes:
+		 *
+		 * As represented in the AML stream, a namepath consists of an
+		 * optional scope prefix followed by a name segment part.
+		 *
+		 * If present, the scope prefix is either a Root Prefix (in
+		 * which case the name is fully qualified), or one or more
+		 * Parent Prefixes (in which case the name's scope is relative
+		 * to the current scope).
+		 */
+		if (*path == (u8) AML_ROOT_PREFIX) {
+
+			/* Pathname is fully qualified, start from the root */
+
+			this_node = acpi_gbl_root_node;
+			search_parent_flag = ACPI_NS_NO_UPSEARCH;
+
+			/* Point to name segment part */
+
+			path++;
+
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "Path is absolute from root [%p]\n",
+					  this_node));
+		} else {
+			/* Pathname is relative to current scope, start there */
+
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "Searching relative to prefix scope [%4.4s] (%p)\n",
+					  acpi_ut_get_node_name(prefix_node),
+					  prefix_node));
+
+			/*
+			 * Handle multiple Parent Prefixes (carat) by just getting
+			 * the parent node for each prefix instance.
+			 */
+			this_node = prefix_node;
+			num_carats = 0;
+			while (*path == (u8) AML_PARENT_PREFIX) {
+
+				/* Name is fully qualified, no search rules apply */
+
+				search_parent_flag = ACPI_NS_NO_UPSEARCH;
+				/*
+				 * Point past this prefix to the name segment
+				 * part or the next Parent Prefix
+				 */
+				path++;
+
+				/* Backup to the parent node */
+
+				num_carats++;
+				this_node = acpi_ns_get_parent_node(this_node);
+				if (!this_node) {
+
+					/* Current scope has no parent scope */
+
+					ACPI_ERROR((AE_INFO,
+						    "ACPI path has too many parent prefixes (^) - reached beyond root node"));
+					return_ACPI_STATUS(AE_NOT_FOUND);
+				}
+			}
+
+			if (search_parent_flag == ACPI_NS_NO_UPSEARCH) {
+				ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+						  "Search scope is [%4.4s], path has %d carat(s)\n",
+						  acpi_ut_get_node_name
+						  (this_node), num_carats));
+			}
+		}
+
+		/*
+		 * Determine the number of ACPI name segments in this pathname.
+		 *
+		 * The segment part consists of either:
+		 *  - A Null name segment (0)
+		 *  - A dual_name_prefix followed by two 4-byte name segments
+		 *  - A multi_name_prefix followed by a byte indicating the
+		 *      number of segments and the segments themselves.
+		 *  - A single 4-byte name segment
+		 *
+		 * Examine the name prefix opcode, if any, to determine the number of
+		 * segments.
+		 */
+		switch (*path) {
+		case 0:
+			/*
+			 * Null name after a root or parent prefixes. We already
+			 * have the correct target node and there are no name segments.
+			 */
+			num_segments = 0;
+			type = this_node->type;
+
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "Prefix-only Pathname (Zero name segments), Flags=%X\n",
+					  flags));
+			break;
+
+		case AML_DUAL_NAME_PREFIX:
+
+			/* More than one name_seg, search rules do not apply */
+
+			search_parent_flag = ACPI_NS_NO_UPSEARCH;
+
+			/* Two segments, point to first name segment */
+
+			num_segments = 2;
+			path++;
+
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "Dual Pathname (2 segments, Flags=%X)\n",
+					  flags));
+			break;
+
+		case AML_MULTI_NAME_PREFIX_OP:
+
+			/* More than one name_seg, search rules do not apply */
+
+			search_parent_flag = ACPI_NS_NO_UPSEARCH;
+
+			/* Extract segment count, point to first name segment */
+
+			path++;
+			num_segments = (u32) (u8) * path;
+			path++;
+
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "Multi Pathname (%d Segments, Flags=%X)\n",
+					  num_segments, flags));
+			break;
+
+		default:
+			/*
+			 * Not a Null name, no Dual or Multi prefix, hence there is
+			 * only one name segment and Pathname is already pointing to it.
+			 */
+			num_segments = 1;
+
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "Simple Pathname (1 segment, Flags=%X)\n",
+					  flags));
+			break;
+		}
+
+		ACPI_DEBUG_EXEC(acpi_ns_print_pathname(num_segments, path));
+	}
+
+	/*
+	 * Search namespace for each segment of the name. Loop through and
+	 * verify (or add to the namespace) each name segment.
+	 *
+	 * The object type is significant only at the last name
+	 * segment. (We don't care about the types along the path, only
+	 * the type of the final target object.)
+	 */
+	this_search_type = ACPI_TYPE_ANY;
+	current_node = this_node;
+	while (num_segments && current_node) {
+		num_segments--;
+		if (!num_segments) {
+			/*
+			 * This is the last segment, enable typechecking
+			 */
+			this_search_type = type;
+
+			/*
+			 * Only allow automatic parent search (search rules) if the caller
+			 * requested it AND we have a single, non-fully-qualified name_seg
+			 */
+			if ((search_parent_flag != ACPI_NS_NO_UPSEARCH) &&
+			    (flags & ACPI_NS_SEARCH_PARENT)) {
+				local_flags |= ACPI_NS_SEARCH_PARENT;
+			}
+
+			/* Set error flag according to caller */
+
+			if (flags & ACPI_NS_ERROR_IF_FOUND) {
+				local_flags |= ACPI_NS_ERROR_IF_FOUND;
+			}
+		}
+
+		/* Extract one ACPI name from the front of the pathname */
+
+		ACPI_MOVE_32_TO_32(&simple_name, path);
+
+		/* Try to find the single (4 character) ACPI name */
+
+		status =
+		    acpi_ns_search_and_enter(simple_name, walk_state,
+					     current_node, interpreter_mode,
+					     this_search_type, local_flags,
+					     &this_node);
+		if (ACPI_FAILURE(status)) {
+			if (status == AE_NOT_FOUND) {
+
+				/* Name not found in ACPI namespace */
+
+				ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+						  "Name [%4.4s] not found in scope [%4.4s] %p\n",
+						  (char *)&simple_name,
+						  (char *)&current_node->name,
+						  current_node));
+			}
+
+			*return_node = this_node;
+			return_ACPI_STATUS(status);
+		}
+
+		/* More segments to follow? */
+
+		if (num_segments > 0) {
+			/*
+			 * If we have an alias to an object that opens a scope (such as a
+			 * device or processor), we need to dereference the alias here so that
+			 * we can access any children of the original node (via the remaining
+			 * segments).
+			 */
+			if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) {
+				if (!this_node->object) {
+					return_ACPI_STATUS(AE_NOT_EXIST);
+				}
+
+				if (acpi_ns_opens_scope
+				    (((struct acpi_namespace_node *)this_node->
+				      object)->type)) {
+					this_node =
+					    (struct acpi_namespace_node *)
+					    this_node->object;
+				}
+			}
+		}
+
+		/* Special handling for the last segment (num_segments == 0) */
+
+		else {
+			/*
+			 * Sanity typecheck of the target object:
+			 *
+			 * If 1) This is the last segment (num_segments == 0)
+			 *    2) And we are looking for a specific type
+			 *       (Not checking for TYPE_ANY)
+			 *    3) Which is not an alias
+			 *    4) Which is not a local type (TYPE_SCOPE)
+			 *    5) And the type of target object is known (not TYPE_ANY)
+			 *    6) And target object does not match what we are looking for
+			 *
+			 * Then we have a type mismatch. Just warn and ignore it.
+			 */
+			if ((type_to_check_for != ACPI_TYPE_ANY) &&
+			    (type_to_check_for != ACPI_TYPE_LOCAL_ALIAS) &&
+			    (type_to_check_for != ACPI_TYPE_LOCAL_METHOD_ALIAS)
+			    && (type_to_check_for != ACPI_TYPE_LOCAL_SCOPE)
+			    && (this_node->type != ACPI_TYPE_ANY)
+			    && (this_node->type != type_to_check_for)) {
+
+				/* Complain about a type mismatch */
+
+				ACPI_WARNING((AE_INFO,
+					      "NsLookup: Type mismatch on %4.4s (%s), searching for (%s)",
+					      ACPI_CAST_PTR(char, &simple_name),
+					      acpi_ut_get_type_name(this_node->
+								    type),
+					      acpi_ut_get_type_name
+					      (type_to_check_for)));
+			}
+
+			/*
+			 * If this is the last name segment and we are not looking for a
+			 * specific type, but the type of found object is known, use that type
+			 * to (later) see if it opens a scope.
+			 */
+			if (type == ACPI_TYPE_ANY) {
+				type = this_node->type;
+			}
+		}
+
+		/* Point to next name segment and make this node current */
+
+		path += ACPI_NAME_SIZE;
+		current_node = this_node;
+	}
+
+	/*
+	 * Always check if we need to open a new scope
+	 */
+	if (!(flags & ACPI_NS_DONT_OPEN_SCOPE) && (walk_state)) {
+		/*
+		 * If entry is a type which opens a scope, push the new scope on the
+		 * scope stack.
+		 */
+		if (acpi_ns_opens_scope(type)) {
+			status =
+			    acpi_ds_scope_stack_push(this_node, type,
+						     walk_state);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		}
+	}
+
+	*return_node = this_node;
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
new file mode 100644
index 0000000..f976d84
--- /dev/null
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -0,0 +1,497 @@
+/*******************************************************************************
+ *
+ * Module Name: nsalloc - Namespace allocation and deletion utilities
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsalloc")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_create_node
+ *
+ * PARAMETERS:  Name            - Name of the new node (4 char ACPI name)
+ *
+ * RETURN:      New namespace node (Null on failure)
+ *
+ * DESCRIPTION: Create a namespace node
+ *
+ ******************************************************************************/
+struct acpi_namespace_node *acpi_ns_create_node(u32 name)
+{
+	struct acpi_namespace_node *node;
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+	u32 temp;
+#endif
+
+	ACPI_FUNCTION_TRACE(ns_create_node);
+
+	node = acpi_os_acquire_object(acpi_gbl_namespace_cache);
+	if (!node) {
+		return_PTR(NULL);
+	}
+
+	ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++);
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+	temp =
+	    acpi_gbl_ns_node_list->total_allocated -
+	    acpi_gbl_ns_node_list->total_freed;
+	if (temp > acpi_gbl_ns_node_list->max_occupied) {
+		acpi_gbl_ns_node_list->max_occupied = temp;
+	}
+#endif
+
+	node->name.integer = name;
+	ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED);
+	return_PTR(node);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_delete_node
+ *
+ * PARAMETERS:  Node            - Node to be deleted
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Delete a namespace node
+ *
+ ******************************************************************************/
+
+void acpi_ns_delete_node(struct acpi_namespace_node *node)
+{
+	struct acpi_namespace_node *parent_node;
+	struct acpi_namespace_node *prev_node;
+	struct acpi_namespace_node *next_node;
+
+	ACPI_FUNCTION_TRACE_PTR(ns_delete_node, node);
+
+	parent_node = acpi_ns_get_parent_node(node);
+
+	prev_node = NULL;
+	next_node = parent_node->child;
+
+	/* Find the node that is the previous peer in the parent's child list */
+
+	while (next_node != node) {
+		prev_node = next_node;
+		next_node = prev_node->peer;
+	}
+
+	if (prev_node) {
+
+		/* Node is not first child, unlink it */
+
+		prev_node->peer = next_node->peer;
+		if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
+			prev_node->flags |= ANOBJ_END_OF_PEER_LIST;
+		}
+	} else {
+		/* Node is first child (has no previous peer) */
+
+		if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
+
+			/* No peers at all */
+
+			parent_node->child = NULL;
+		} else {	/* Link peer list to parent */
+
+			parent_node->child = next_node->peer;
+		}
+	}
+
+	ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
+
+	/*
+	 * Detach an object if there is one, then delete the node
+	 */
+	acpi_ns_detach_object(node);
+	(void)acpi_os_release_object(acpi_gbl_namespace_cache, node);
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_install_node
+ *
+ * PARAMETERS:  walk_state      - Current state of the walk
+ *              parent_node     - The parent of the new Node
+ *              Node            - The new Node to install
+ *              Type            - ACPI object type of the new Node
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Initialize a new namespace node and install it amongst
+ *              its peers.
+ *
+ *              Note: Current namespace lookup is linear search. This appears
+ *              to be sufficient as namespace searches consume only a small
+ *              fraction of the execution time of the ACPI subsystem.
+ *
+ ******************************************************************************/
+
+void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namespace_node *parent_node,	/* Parent */
+			  struct acpi_namespace_node *node,	/* New Child */
+			  acpi_object_type type)
+{
+	acpi_owner_id owner_id = 0;
+	struct acpi_namespace_node *child_node;
+
+	ACPI_FUNCTION_TRACE(ns_install_node);
+
+	/*
+	 * Get the owner ID from the Walk state
+	 * The owner ID is used to track table deletion and
+	 * deletion of objects created by methods
+	 */
+	if (walk_state) {
+		owner_id = walk_state->owner_id;
+	}
+
+	/* Link the new entry into the parent and existing children */
+
+	child_node = parent_node->child;
+	if (!child_node) {
+		parent_node->child = node;
+		node->flags |= ANOBJ_END_OF_PEER_LIST;
+		node->peer = parent_node;
+	} else {
+		while (!(child_node->flags & ANOBJ_END_OF_PEER_LIST)) {
+			child_node = child_node->peer;
+		}
+
+		child_node->peer = node;
+
+		/* Clear end-of-list flag */
+
+		child_node->flags &= ~ANOBJ_END_OF_PEER_LIST;
+		node->flags |= ANOBJ_END_OF_PEER_LIST;
+		node->peer = parent_node;
+	}
+
+	/* Init the new entry */
+
+	node->owner_id = owner_id;
+	node->type = (u8) type;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+			  "%4.4s (%s) [Node %p Owner %X] added to %4.4s (%s) [Node %p]\n",
+			  acpi_ut_get_node_name(node),
+			  acpi_ut_get_type_name(node->type), node, owner_id,
+			  acpi_ut_get_node_name(parent_node),
+			  acpi_ut_get_type_name(parent_node->type),
+			  parent_node));
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_delete_children
+ *
+ * PARAMETERS:  parent_node     - Delete this objects children
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Delete all children of the parent object. In other words,
+ *              deletes a "scope".
+ *
+ ******************************************************************************/
+
+void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
+{
+	struct acpi_namespace_node *child_node;
+	struct acpi_namespace_node *next_node;
+	u8 flags;
+
+	ACPI_FUNCTION_TRACE_PTR(ns_delete_children, parent_node);
+
+	if (!parent_node) {
+		return_VOID;
+	}
+
+	/* If no children, all done! */
+
+	child_node = parent_node->child;
+	if (!child_node) {
+		return_VOID;
+	}
+
+	/*
+	 * Deallocate all children at this level
+	 */
+	do {
+
+		/* Get the things we need */
+
+		next_node = child_node->peer;
+		flags = child_node->flags;
+
+		/* Grandchildren should have all been deleted already */
+
+		if (child_node->child) {
+			ACPI_ERROR((AE_INFO, "Found a grandchild! P=%p C=%p",
+				    parent_node, child_node));
+		}
+
+		/* Now we can free this child object */
+
+		ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "Object %p, Remaining %X\n", child_node,
+				  acpi_gbl_current_node_count));
+
+		/*
+		 * Detach an object if there is one, then free the child node
+		 */
+		acpi_ns_detach_object(child_node);
+
+		/* Now we can delete the node */
+
+		(void)acpi_os_release_object(acpi_gbl_namespace_cache,
+					     child_node);
+
+		/* And move on to the next child in the list */
+
+		child_node = next_node;
+
+	} while (!(flags & ANOBJ_END_OF_PEER_LIST));
+
+	/* Clear the parent's child pointer */
+
+	parent_node->child = NULL;
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_delete_namespace_subtree
+ *
+ * PARAMETERS:  parent_node     - Root of the subtree to be deleted
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Delete a subtree of the namespace.  This includes all objects
+ *              stored within the subtree.
+ *
+ ******************************************************************************/
+
+void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
+{
+	struct acpi_namespace_node *child_node = NULL;
+	u32 level = 1;
+
+	ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree);
+
+	if (!parent_node) {
+		return_VOID;
+	}
+
+	/*
+	 * Traverse the tree of objects until we bubble back up
+	 * to where we started.
+	 */
+	while (level > 0) {
+
+		/* Get the next node in this scope (NULL if none) */
+
+		child_node =
+		    acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
+					  child_node);
+		if (child_node) {
+
+			/* Found a child node - detach any attached object */
+
+			acpi_ns_detach_object(child_node);
+
+			/* Check if this node has any children */
+
+			if (acpi_ns_get_next_node
+			    (ACPI_TYPE_ANY, child_node, NULL)) {
+				/*
+				 * There is at least one child of this node,
+				 * visit the node
+				 */
+				level++;
+				parent_node = child_node;
+				child_node = NULL;
+			}
+		} else {
+			/*
+			 * No more children of this parent node.
+			 * Move up to the grandparent.
+			 */
+			level--;
+
+			/*
+			 * Now delete all of the children of this parent
+			 * all at the same time.
+			 */
+			acpi_ns_delete_children(parent_node);
+
+			/* New "last child" is this parent node */
+
+			child_node = parent_node;
+
+			/* Move up the tree to the grandparent */
+
+			parent_node = acpi_ns_get_parent_node(parent_node);
+		}
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_delete_namespace_by_owner
+ *
+ * PARAMETERS:  owner_id    - All nodes with this owner will be deleted
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Delete entries within the namespace that are owned by a
+ *              specific ID.  Used to delete entire ACPI tables.  All
+ *              reference counts are updated.
+ *
+ * MUTEX:       Locks namespace during deletion walk.
+ *
+ ******************************************************************************/
+
+void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
+{
+	struct acpi_namespace_node *child_node;
+	struct acpi_namespace_node *deletion_node;
+	struct acpi_namespace_node *parent_node;
+	u32 level;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE_U32(ns_delete_namespace_by_owner, owner_id);
+
+	if (owner_id == 0) {
+		return_VOID;
+	}
+
+	/* Lock namespace for possible update */
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_VOID;
+	}
+
+	deletion_node = NULL;
+	parent_node = acpi_gbl_root_node;
+	child_node = NULL;
+	level = 1;
+
+	/*
+	 * Traverse the tree of nodes until we bubble back up
+	 * to where we started.
+	 */
+	while (level > 0) {
+		/*
+		 * Get the next child of this parent node. When child_node is NULL,
+		 * the first child of the parent is returned
+		 */
+		child_node =
+		    acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
+					  child_node);
+
+		if (deletion_node) {
+			acpi_ns_delete_children(deletion_node);
+			acpi_ns_delete_node(deletion_node);
+			deletion_node = NULL;
+		}
+
+		if (child_node) {
+			if (child_node->owner_id == owner_id) {
+
+				/* Found a matching child node - detach any attached object */
+
+				acpi_ns_detach_object(child_node);
+			}
+
+			/* Check if this node has any children */
+
+			if (acpi_ns_get_next_node
+			    (ACPI_TYPE_ANY, child_node, NULL)) {
+				/*
+				 * There is at least one child of this node,
+				 * visit the node
+				 */
+				level++;
+				parent_node = child_node;
+				child_node = NULL;
+			} else if (child_node->owner_id == owner_id) {
+				deletion_node = child_node;
+			}
+		} else {
+			/*
+			 * No more children of this parent node.
+			 * Move up to the grandparent.
+			 */
+			level--;
+			if (level != 0) {
+				if (parent_node->owner_id == owner_id) {
+					deletion_node = parent_node;
+				}
+			}
+
+			/* New "last child" is this parent node */
+
+			child_node = parent_node;
+
+			/* Move up the tree to the grandparent */
+
+			parent_node = acpi_ns_get_parent_node(parent_node);
+		}
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_VOID;
+}
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
new file mode 100644
index 0000000..0da33c8
--- /dev/null
+++ b/drivers/acpi/acpica/nsdump.c
@@ -0,0 +1,709 @@
+/******************************************************************************
+ *
+ * Module Name: nsdump - table dumping routines for debug
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsdump")
+
+/* Local prototypes */
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+void acpi_ns_dump_root_devices(void);
+
+static acpi_status
+acpi_ns_dump_one_device(acpi_handle obj_handle,
+			u32 level, void *context, void **return_value);
+#endif
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_print_pathname
+ *
+ * PARAMETERS:  num_segments        - Number of ACPI name segments
+ *              Pathname            - The compressed (internal) path
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print an object's full namespace pathname
+ *
+ ******************************************************************************/
+
+void acpi_ns_print_pathname(u32 num_segments, char *pathname)
+{
+	u32 i;
+
+	ACPI_FUNCTION_NAME(ns_print_pathname);
+
+	if (!(acpi_dbg_level & ACPI_LV_NAMES)
+	    || !(acpi_dbg_layer & ACPI_NAMESPACE)) {
+		return;
+	}
+
+	/* Print the entire name */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "["));
+
+	while (num_segments) {
+		for (i = 0; i < 4; i++) {
+			ACPI_IS_PRINT(pathname[i]) ?
+			    acpi_os_printf("%c", pathname[i]) :
+			    acpi_os_printf("?");
+		}
+
+		pathname += ACPI_NAME_SIZE;
+		num_segments--;
+		if (num_segments) {
+			acpi_os_printf(".");
+		}
+	}
+
+	acpi_os_printf("]\n");
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_pathname
+ *
+ * PARAMETERS:  Handle              - Object
+ *              Msg                 - Prefix message
+ *              Level               - Desired debug level
+ *              Component           - Caller's component ID
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print an object's full namespace pathname
+ *              Manages allocation/freeing of a pathname buffer
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
+{
+
+	ACPI_FUNCTION_TRACE(ns_dump_pathname);
+
+	/* Do this only if the requested debug level and component are enabled */
+
+	if (!(acpi_dbg_level & level) || !(acpi_dbg_layer & component)) {
+		return_VOID;
+	}
+
+	/* Convert handle to a full pathname and print it (with supplied message) */
+
+	acpi_ns_print_node_pathname(handle, msg);
+	acpi_os_printf("\n");
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_one_object
+ *
+ * PARAMETERS:  obj_handle          - Node to be dumped
+ *              Level               - Nesting level of the handle
+ *              Context             - Passed into walk_namespace
+ *              return_value        - Not used
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Dump a single Node
+ *              This procedure is a user_function called by acpi_ns_walk_namespace.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_dump_one_object(acpi_handle obj_handle,
+			u32 level, void *context, void **return_value)
+{
+	struct acpi_walk_info *info = (struct acpi_walk_info *)context;
+	struct acpi_namespace_node *this_node;
+	union acpi_operand_object *obj_desc = NULL;
+	acpi_object_type obj_type;
+	acpi_object_type type;
+	u32 bytes_to_dump;
+	u32 dbg_level;
+	u32 i;
+
+	ACPI_FUNCTION_NAME(ns_dump_one_object);
+
+	/* Is output enabled? */
+
+	if (!(acpi_dbg_level & info->debug_level)) {
+		return (AE_OK);
+	}
+
+	if (!obj_handle) {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Null object handle\n"));
+		return (AE_OK);
+	}
+
+	this_node = acpi_ns_map_handle_to_node(obj_handle);
+	type = this_node->type;
+
+	/* Check if the owner matches */
+
+	if ((info->owner_id != ACPI_OWNER_ID_MAX) &&
+	    (info->owner_id != this_node->owner_id)) {
+		return (AE_OK);
+	}
+
+	if (!(info->display_type & ACPI_DISPLAY_SHORT)) {
+
+		/* Indent the object according to the level */
+
+		acpi_os_printf("%2d%*s", (u32) level - 1, (int)level * 2, " ");
+
+		/* Check the node type and name */
+
+		if (type > ACPI_TYPE_LOCAL_MAX) {
+			ACPI_WARNING((AE_INFO, "Invalid ACPI Object Type %08X",
+				      type));
+		}
+
+		if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
+			this_node->name.integer =
+			    acpi_ut_repair_name(this_node->name.ascii);
+
+			ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
+				      this_node->name.integer));
+		}
+
+		acpi_os_printf("%4.4s", acpi_ut_get_node_name(this_node));
+	}
+
+	/*
+	 * Now we can print out the pertinent information
+	 */
+	acpi_os_printf(" %-12s %p %2.2X ",
+		       acpi_ut_get_type_name(type), this_node,
+		       this_node->owner_id);
+
+	dbg_level = acpi_dbg_level;
+	acpi_dbg_level = 0;
+	obj_desc = acpi_ns_get_attached_object(this_node);
+	acpi_dbg_level = dbg_level;
+
+	/* Temp nodes are those nodes created by a control method */
+
+	if (this_node->flags & ANOBJ_TEMPORARY) {
+		acpi_os_printf("(T) ");
+	}
+
+	switch (info->display_type & ACPI_DISPLAY_MASK) {
+	case ACPI_DISPLAY_SUMMARY:
+
+		if (!obj_desc) {
+
+			/* No attached object, we are done */
+
+			acpi_os_printf("\n");
+			return (AE_OK);
+		}
+
+		switch (type) {
+		case ACPI_TYPE_PROCESSOR:
+
+			acpi_os_printf("ID %X Len %.4X Addr %p\n",
+				       obj_desc->processor.proc_id,
+				       obj_desc->processor.length,
+				       ACPI_CAST_PTR(void,
+						     obj_desc->processor.
+						     address));
+			break;
+
+		case ACPI_TYPE_DEVICE:
+
+			acpi_os_printf("Notify Object: %p\n", obj_desc);
+			break;
+
+		case ACPI_TYPE_METHOD:
+
+			acpi_os_printf("Args %X Len %.4X Aml %p\n",
+				       (u32) obj_desc->method.param_count,
+				       obj_desc->method.aml_length,
+				       obj_desc->method.aml_start);
+			break;
+
+		case ACPI_TYPE_INTEGER:
+
+			acpi_os_printf("= %8.8X%8.8X\n",
+				       ACPI_FORMAT_UINT64(obj_desc->integer.
+							  value));
+			break;
+
+		case ACPI_TYPE_PACKAGE:
+
+			if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+				acpi_os_printf("Elements %.2X\n",
+					       obj_desc->package.count);
+			} else {
+				acpi_os_printf("[Length not yet evaluated]\n");
+			}
+			break;
+
+		case ACPI_TYPE_BUFFER:
+
+			if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+				acpi_os_printf("Len %.2X",
+					       obj_desc->buffer.length);
+
+				/* Dump some of the buffer */
+
+				if (obj_desc->buffer.length > 0) {
+					acpi_os_printf(" =");
+					for (i = 0;
+					     (i < obj_desc->buffer.length
+					      && i < 12); i++) {
+						acpi_os_printf(" %.2hX",
+							       obj_desc->buffer.
+							       pointer[i]);
+					}
+				}
+				acpi_os_printf("\n");
+			} else {
+				acpi_os_printf("[Length not yet evaluated]\n");
+			}
+			break;
+
+		case ACPI_TYPE_STRING:
+
+			acpi_os_printf("Len %.2X ", obj_desc->string.length);
+			acpi_ut_print_string(obj_desc->string.pointer, 32);
+			acpi_os_printf("\n");
+			break;
+
+		case ACPI_TYPE_REGION:
+
+			acpi_os_printf("[%s]",
+				       acpi_ut_get_region_name(obj_desc->region.
+							       space_id));
+			if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
+				acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
+					       ACPI_FORMAT_NATIVE_UINT
+					       (obj_desc->region.address),
+					       obj_desc->region.length);
+			} else {
+				acpi_os_printf
+				    (" [Address/Length not yet evaluated]\n");
+			}
+			break;
+
+		case ACPI_TYPE_LOCAL_REFERENCE:
+
+			acpi_os_printf("[%s]\n",
+				       acpi_ut_get_reference_name(obj_desc));
+			break;
+
+		case ACPI_TYPE_BUFFER_FIELD:
+
+			if (obj_desc->buffer_field.buffer_obj &&
+			    obj_desc->buffer_field.buffer_obj->buffer.node) {
+				acpi_os_printf("Buf [%4.4s]",
+					       acpi_ut_get_node_name(obj_desc->
+								     buffer_field.
+								     buffer_obj->
+								     buffer.
+								     node));
+			}
+			break;
+
+		case ACPI_TYPE_LOCAL_REGION_FIELD:
+
+			acpi_os_printf("Rgn [%4.4s]",
+				       acpi_ut_get_node_name(obj_desc->
+							     common_field.
+							     region_obj->region.
+							     node));
+			break;
+
+		case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+			acpi_os_printf("Rgn [%4.4s] Bnk [%4.4s]",
+				       acpi_ut_get_node_name(obj_desc->
+							     common_field.
+							     region_obj->region.
+							     node),
+				       acpi_ut_get_node_name(obj_desc->
+							     bank_field.
+							     bank_obj->
+							     common_field.
+							     node));
+			break;
+
+		case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+			acpi_os_printf("Idx [%4.4s] Dat [%4.4s]",
+				       acpi_ut_get_node_name(obj_desc->
+							     index_field.
+							     index_obj->
+							     common_field.node),
+				       acpi_ut_get_node_name(obj_desc->
+							     index_field.
+							     data_obj->
+							     common_field.
+							     node));
+			break;
+
+		case ACPI_TYPE_LOCAL_ALIAS:
+		case ACPI_TYPE_LOCAL_METHOD_ALIAS:
+
+			acpi_os_printf("Target %4.4s (%p)\n",
+				       acpi_ut_get_node_name(obj_desc),
+				       obj_desc);
+			break;
+
+		default:
+
+			acpi_os_printf("Object %p\n", obj_desc);
+			break;
+		}
+
+		/* Common field handling */
+
+		switch (type) {
+		case ACPI_TYPE_BUFFER_FIELD:
+		case ACPI_TYPE_LOCAL_REGION_FIELD:
+		case ACPI_TYPE_LOCAL_BANK_FIELD:
+		case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+			acpi_os_printf(" Off %.3X Len %.2X Acc %.2hd\n",
+				       (obj_desc->common_field.
+					base_byte_offset * 8)
+				       +
+				       obj_desc->common_field.
+				       start_field_bit_offset,
+				       obj_desc->common_field.bit_length,
+				       obj_desc->common_field.
+				       access_byte_width);
+			break;
+
+		default:
+			break;
+		}
+		break;
+
+	case ACPI_DISPLAY_OBJECTS:
+
+		acpi_os_printf("O:%p", obj_desc);
+		if (!obj_desc) {
+
+			/* No attached object, we are done */
+
+			acpi_os_printf("\n");
+			return (AE_OK);
+		}
+
+		acpi_os_printf("(R%d)", obj_desc->common.reference_count);
+
+		switch (type) {
+		case ACPI_TYPE_METHOD:
+
+			/* Name is a Method and its AML offset/length are set */
+
+			acpi_os_printf(" M:%p-%X\n", obj_desc->method.aml_start,
+				       obj_desc->method.aml_length);
+			break;
+
+		case ACPI_TYPE_INTEGER:
+
+			acpi_os_printf(" I:%8.8X8.8%X\n",
+				       ACPI_FORMAT_UINT64(obj_desc->integer.
+							  value));
+			break;
+
+		case ACPI_TYPE_STRING:
+
+			acpi_os_printf(" S:%p-%X\n", obj_desc->string.pointer,
+				       obj_desc->string.length);
+			break;
+
+		case ACPI_TYPE_BUFFER:
+
+			acpi_os_printf(" B:%p-%X\n", obj_desc->buffer.pointer,
+				       obj_desc->buffer.length);
+			break;
+
+		default:
+
+			acpi_os_printf("\n");
+			break;
+		}
+		break;
+
+	default:
+		acpi_os_printf("\n");
+		break;
+	}
+
+	/* If debug turned off, done */
+
+	if (!(acpi_dbg_level & ACPI_LV_VALUES)) {
+		return (AE_OK);
+	}
+
+	/* If there is an attached object, display it */
+
+	dbg_level = acpi_dbg_level;
+	acpi_dbg_level = 0;
+	obj_desc = acpi_ns_get_attached_object(this_node);
+	acpi_dbg_level = dbg_level;
+
+	/* Dump attached objects */
+
+	while (obj_desc) {
+		obj_type = ACPI_TYPE_INVALID;
+		acpi_os_printf("Attached Object %p: ", obj_desc);
+
+		/* Decode the type of attached object and dump the contents */
+
+		switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
+		case ACPI_DESC_TYPE_NAMED:
+
+			acpi_os_printf("(Ptr to Node)\n");
+			bytes_to_dump = sizeof(struct acpi_namespace_node);
+			ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump);
+			break;
+
+		case ACPI_DESC_TYPE_OPERAND:
+
+			obj_type = ACPI_GET_OBJECT_TYPE(obj_desc);
+
+			if (obj_type > ACPI_TYPE_LOCAL_MAX) {
+				acpi_os_printf
+				    ("(Pointer to ACPI Object type %.2X [UNKNOWN])\n",
+				     obj_type);
+				bytes_to_dump = 32;
+			} else {
+				acpi_os_printf
+				    ("(Pointer to ACPI Object type %.2X [%s])\n",
+				     obj_type, acpi_ut_get_type_name(obj_type));
+				bytes_to_dump =
+				    sizeof(union acpi_operand_object);
+			}
+
+			ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump);
+			break;
+
+		default:
+
+			break;
+		}
+
+		/* If value is NOT an internal object, we are done */
+
+		if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) !=
+		    ACPI_DESC_TYPE_OPERAND) {
+			goto cleanup;
+		}
+
+		/*
+		 * Valid object, get the pointer to next level, if any
+		 */
+		switch (obj_type) {
+		case ACPI_TYPE_BUFFER:
+		case ACPI_TYPE_STRING:
+			/*
+			 * NOTE: takes advantage of common fields between string/buffer
+			 */
+			bytes_to_dump = obj_desc->string.length;
+			obj_desc = (void *)obj_desc->string.pointer;
+			acpi_os_printf("(Buffer/String pointer %p length %X)\n",
+				       obj_desc, bytes_to_dump);
+			ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump);
+			goto cleanup;
+
+		case ACPI_TYPE_BUFFER_FIELD:
+			obj_desc =
+			    (union acpi_operand_object *)obj_desc->buffer_field.
+			    buffer_obj;
+			break;
+
+		case ACPI_TYPE_PACKAGE:
+			obj_desc = (void *)obj_desc->package.elements;
+			break;
+
+		case ACPI_TYPE_METHOD:
+			obj_desc = (void *)obj_desc->method.aml_start;
+			break;
+
+		case ACPI_TYPE_LOCAL_REGION_FIELD:
+			obj_desc = (void *)obj_desc->field.region_obj;
+			break;
+
+		case ACPI_TYPE_LOCAL_BANK_FIELD:
+			obj_desc = (void *)obj_desc->bank_field.region_obj;
+			break;
+
+		case ACPI_TYPE_LOCAL_INDEX_FIELD:
+			obj_desc = (void *)obj_desc->index_field.index_obj;
+			break;
+
+		default:
+			goto cleanup;
+		}
+
+		obj_type = ACPI_TYPE_INVALID;	/* Terminate loop after next pass */
+	}
+
+      cleanup:
+	acpi_os_printf("\n");
+	return (AE_OK);
+}
+
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_objects
+ *
+ * PARAMETERS:  Type                - Object type to be dumped
+ *              display_type        - 0 or ACPI_DISPLAY_SUMMARY
+ *              max_depth           - Maximum depth of dump. Use ACPI_UINT32_MAX
+ *                                    for an effectively unlimited depth.
+ *              owner_id            - Dump only objects owned by this ID.  Use
+ *                                    ACPI_UINT32_MAX to match all owners.
+ *              start_handle        - Where in namespace to start/end search
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Dump typed objects within the loaded namespace.
+ *              Uses acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object.
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_dump_objects(acpi_object_type type,
+		     u8 display_type,
+		     u32 max_depth,
+		     acpi_owner_id owner_id, acpi_handle start_handle)
+{
+	struct acpi_walk_info info;
+
+	ACPI_FUNCTION_ENTRY();
+
+	info.debug_level = ACPI_LV_TABLES;
+	info.owner_id = owner_id;
+	info.display_type = display_type;
+
+	(void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+				     ACPI_NS_WALK_NO_UNLOCK |
+				     ACPI_NS_WALK_TEMP_NODES,
+				     acpi_ns_dump_one_object, (void *)&info,
+				     NULL);
+}
+#endif				/* ACPI_FUTURE_USAGE */
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_entry
+ *
+ * PARAMETERS:  Handle              - Node to be dumped
+ *              debug_level         - Output level
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Dump a single Node
+ *
+ ******************************************************************************/
+
+void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level)
+{
+	struct acpi_walk_info info;
+
+	ACPI_FUNCTION_ENTRY();
+
+	info.debug_level = debug_level;
+	info.owner_id = ACPI_OWNER_ID_MAX;
+	info.display_type = ACPI_DISPLAY_SUMMARY;
+
+	(void)acpi_ns_dump_one_object(handle, 1, &info, NULL);
+}
+
+#ifdef ACPI_ASL_COMPILER
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_tables
+ *
+ * PARAMETERS:  search_base         - Root of subtree to be dumped, or
+ *                                    NS_ALL to dump the entire namespace
+ *              max_depth           - Maximum depth of dump.  Use INT_MAX
+ *                                    for an effectively unlimited depth.
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Dump the name space, or a portion of it.
+ *
+ ******************************************************************************/
+
+void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
+{
+	acpi_handle search_handle = search_base;
+
+	ACPI_FUNCTION_TRACE(ns_dump_tables);
+
+	if (!acpi_gbl_root_node) {
+		/*
+		 * If the name space has not been initialized,
+		 * there is nothing to dump.
+		 */
+		ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
+				  "namespace not initialized!\n"));
+		return_VOID;
+	}
+
+	if (ACPI_NS_ALL == search_base) {
+
+		/* Entire namespace */
+
+		search_handle = acpi_gbl_root_node;
+		ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "\\\n"));
+	}
+
+	acpi_ns_dump_objects(ACPI_TYPE_ANY, ACPI_DISPLAY_OBJECTS, max_depth,
+			     ACPI_OWNER_ID_MAX, search_handle);
+	return_VOID;
+}
+#endif				/* _ACPI_ASL_COMPILER */
+#endif				/* defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) */
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
new file mode 100644
index 0000000..41994fe
--- /dev/null
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -0,0 +1,141 @@
+/******************************************************************************
+ *
+ * Module Name: nsdump - table dumping routines for debug
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+/* TBD: This entire module is apparently obsolete and should be removed */
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsdumpdv")
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#include "acnamesp.h"
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_one_device
+ *
+ * PARAMETERS:  Handle              - Node to be dumped
+ *              Level               - Nesting level of the handle
+ *              Context             - Passed into walk_namespace
+ *              return_value        - Not used
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Dump a single Node that represents a device
+ *              This procedure is a user_function called by acpi_ns_walk_namespace.
+ *
+ ******************************************************************************/
+static acpi_status
+acpi_ns_dump_one_device(acpi_handle obj_handle,
+			u32 level, void *context, void **return_value)
+{
+	struct acpi_buffer buffer;
+	struct acpi_device_info *info;
+	acpi_status status;
+	u32 i;
+
+	ACPI_FUNCTION_NAME(ns_dump_one_device);
+
+	status =
+	    acpi_ns_dump_one_object(obj_handle, level, context, return_value);
+
+	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+	status = acpi_get_object_info(obj_handle, &buffer);
+	if (ACPI_SUCCESS(status)) {
+		info = buffer.pointer;
+		for (i = 0; i < level; i++) {
+			ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " "));
+		}
+
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES,
+				      "    HID: %s, ADR: %8.8X%8.8X, Status: %X\n",
+				      info->hardware_id.value,
+				      ACPI_FORMAT_UINT64(info->address),
+				      info->current_status));
+		ACPI_FREE(info);
+	}
+
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_root_devices
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Dump all objects of type "device"
+ *
+ ******************************************************************************/
+
+void acpi_ns_dump_root_devices(void)
+{
+	acpi_handle sys_bus_handle;
+	acpi_status status;
+
+	ACPI_FUNCTION_NAME(ns_dump_root_devices);
+
+	/* Only dump the table if tracing is enabled */
+
+	if (!(ACPI_LV_TABLES & acpi_dbg_level)) {
+		return;
+	}
+
+	status = acpi_get_handle(NULL, ACPI_NS_SYSTEM_BUS, &sys_bus_handle);
+	if (ACPI_FAILURE(status)) {
+		return;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
+			  "Display of all devices in the namespace:\n"));
+
+	status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, sys_bus_handle,
+					ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
+					acpi_ns_dump_one_device, NULL, NULL);
+}
+
+#endif
+#endif
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
new file mode 100644
index 0000000..0f3d5f9
--- /dev/null
+++ b/drivers/acpi/acpica/nseval.c
@@ -0,0 +1,278 @@
+/*******************************************************************************
+ *
+ * Module Name: nseval - Object evaluation, includes control method execution
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nseval")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_evaluate
+ *
+ * PARAMETERS:  Info            - Evaluation info block, contains:
+ *                  prefix_node     - Prefix or Method/Object Node to execute
+ *                  Pathname        - Name of method to execute, If NULL, the
+ *                                    Node is the object to execute
+ *                  Parameters      - List of parameters to pass to the method,
+ *                                    terminated by NULL. Params itself may be
+ *                                    NULL if no parameters are being passed.
+ *                  return_object   - Where to put method's return value (if
+ *                                    any). If NULL, no value is returned.
+ *                  parameter_type  - Type of Parameter list
+ *                  return_object   - Where to put method's return value (if
+ *                                    any). If NULL, no value is returned.
+ *                  Flags           - ACPI_IGNORE_RETURN_VALUE to delete return
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute a control method or return the current value of an
+ *              ACPI namespace object.
+ *
+ * MUTEX:       Locks interpreter
+ *
+ ******************************************************************************/
+acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(ns_evaluate);
+
+	if (!info) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Initialize the return value to an invalid object */
+
+	info->return_object = NULL;
+	info->param_count = 0;
+
+	/*
+	 * Get the actual namespace node for the target object. Handles these cases:
+	 *
+	 * 1) Null node, Pathname (absolute path)
+	 * 2) Node, Pathname (path relative to Node)
+	 * 3) Node, Null Pathname
+	 */
+	status = acpi_ns_get_node(info->prefix_node, info->pathname,
+				  ACPI_NS_NO_UPSEARCH, &info->resolved_node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * For a method alias, we must grab the actual method node so that proper
+	 * scoping context will be established before execution.
+	 */
+	if (acpi_ns_get_type(info->resolved_node) ==
+	    ACPI_TYPE_LOCAL_METHOD_ALIAS) {
+		info->resolved_node =
+		    ACPI_CAST_PTR(struct acpi_namespace_node,
+				  info->resolved_node->object);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n", info->pathname,
+			  info->resolved_node,
+			  acpi_ns_get_attached_object(info->resolved_node)));
+
+	node = info->resolved_node;
+
+	/*
+	 * Two major cases here:
+	 *
+	 * 1) The object is a control method -- execute it
+	 * 2) The object is not a method -- just return it's current value
+	 */
+	if (acpi_ns_get_type(info->resolved_node) == ACPI_TYPE_METHOD) {
+		/*
+		 * 1) Object is a control method - execute it
+		 */
+
+		/* Verify that there is a method object associated with this node */
+
+		info->obj_desc =
+		    acpi_ns_get_attached_object(info->resolved_node);
+		if (!info->obj_desc) {
+			ACPI_ERROR((AE_INFO,
+				    "Control method has no attached sub-object"));
+			return_ACPI_STATUS(AE_NULL_OBJECT);
+		}
+
+		/* Count the number of arguments being passed to the method */
+
+		if (info->parameters) {
+			while (info->parameters[info->param_count]) {
+				if (info->param_count > ACPI_METHOD_MAX_ARG) {
+					return_ACPI_STATUS(AE_LIMIT);
+				}
+				info->param_count++;
+			}
+		}
+
+
+		ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:",
+				   ACPI_LV_INFO, _COMPONENT);
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Method at AML address %p Length %X\n",
+				  info->obj_desc->method.aml_start + 1,
+				  info->obj_desc->method.aml_length - 1));
+
+		/*
+		 * Any namespace deletion must acquire both the namespace and
+		 * interpreter locks to ensure that no thread is using the portion of
+		 * the namespace that is being deleted.
+		 *
+		 * Execute the method via the interpreter. The interpreter is locked
+		 * here before calling into the AML parser
+		 */
+		acpi_ex_enter_interpreter();
+		status = acpi_ps_execute_method(info);
+		acpi_ex_exit_interpreter();
+	} else {
+		/*
+		 * 2) Object is not a method, return its current value
+		 *
+		 * Disallow certain object types. For these, "evaluation" is undefined.
+		 */
+		switch (info->resolved_node->type) {
+		case ACPI_TYPE_DEVICE:
+		case ACPI_TYPE_EVENT:
+		case ACPI_TYPE_MUTEX:
+		case ACPI_TYPE_REGION:
+		case ACPI_TYPE_THERMAL:
+		case ACPI_TYPE_LOCAL_SCOPE:
+
+			ACPI_ERROR((AE_INFO,
+				    "[%4.4s] Evaluation of object type [%s] is not supported",
+				    info->resolved_node->name.ascii,
+				    acpi_ut_get_type_name(info->resolved_node->
+							  type)));
+
+			return_ACPI_STATUS(AE_TYPE);
+
+		default:
+			break;
+		}
+
+		/*
+		 * Objects require additional resolution steps (e.g., the Node may be
+		 * a field that must be read, etc.) -- we can't just grab the object
+		 * out of the node.
+		 *
+		 * Use resolve_node_to_value() to get the associated value.
+		 *
+		 * NOTE: we can get away with passing in NULL for a walk state because
+		 * resolved_node is guaranteed to not be a reference to either a method
+		 * local or a method argument (because this interface is never called
+		 * from a running method.)
+		 *
+		 * Even though we do not directly invoke the interpreter for object
+		 * resolution, we must lock it because we could access an opregion.
+		 * The opregion access code assumes that the interpreter is locked.
+		 */
+		acpi_ex_enter_interpreter();
+
+		/* Function has a strange interface */
+
+		status =
+		    acpi_ex_resolve_node_to_value(&info->resolved_node, NULL);
+		acpi_ex_exit_interpreter();
+
+		/*
+		 * If acpi_ex_resolve_node_to_value() succeeded, the return value was placed
+		 * in resolved_node.
+		 */
+		if (ACPI_SUCCESS(status)) {
+			status = AE_CTRL_RETURN_VALUE;
+			info->return_object =
+			    ACPI_CAST_PTR(union acpi_operand_object,
+					  info->resolved_node);
+
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "Returning object %p [%s]\n",
+					  info->return_object,
+					  acpi_ut_get_object_type_name(info->
+								       return_object)));
+		}
+	}
+
+	/*
+	 * Check input argument count against the ASL-defined count for a method.
+	 * Also check predefined names: argument count and return value against
+	 * the ACPI specification. Some incorrect return value types are repaired.
+	 */
+	(void)acpi_ns_check_predefined_names(node, info->param_count,
+		status, &info->return_object);
+
+	/* Check if there is a return value that must be dealt with */
+
+	if (status == AE_CTRL_RETURN_VALUE) {
+
+		/* If caller does not want the return value, delete it */
+
+		if (info->flags & ACPI_IGNORE_RETURN_VALUE) {
+			acpi_ut_remove_reference(info->return_object);
+			info->return_object = NULL;
+		}
+
+		/* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
+
+		status = AE_OK;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+			  "*** Completed evaluation of object %s ***\n",
+			  info->pathname));
+
+	/*
+	 * Namespace was unlocked by the handling acpi_ns* function, so we
+	 * just return
+	 */
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
new file mode 100644
index 0000000..13501cb
--- /dev/null
+++ b/drivers/acpi/acpica/nsinit.c
@@ -0,0 +1,593 @@
+/******************************************************************************
+ *
+ * Module Name: nsinit - namespace initialization
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include <linux/nmi.h>
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsinit")
+
+/* Local prototypes */
+static acpi_status
+acpi_ns_init_one_object(acpi_handle obj_handle,
+			u32 level, void *context, void **return_value);
+
+static acpi_status
+acpi_ns_init_one_device(acpi_handle obj_handle,
+			u32 nesting_level, void *context, void **return_value);
+
+static acpi_status
+acpi_ns_find_ini_methods(acpi_handle obj_handle,
+			 u32 nesting_level, void *context, void **return_value);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_initialize_objects
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Walk the entire namespace and perform any necessary
+ *              initialization on the objects found therein
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ns_initialize_objects(void)
+{
+	acpi_status status;
+	struct acpi_init_walk_info info;
+
+	ACPI_FUNCTION_TRACE(ns_initialize_objects);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "**** Starting initialization of namespace objects ****\n"));
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+			      "Completing Region/Field/Buffer/Package initialization:"));
+
+	/* Set all init info to zero */
+
+	ACPI_MEMSET(&info, 0, sizeof(struct acpi_init_walk_info));
+
+	/* Walk entire namespace from the supplied root */
+
+	status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+				     ACPI_UINT32_MAX, acpi_ns_init_one_object,
+				     &info, NULL);
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
+	}
+
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+			      "\nInitialized %hd/%hd Regions %hd/%hd Fields %hd/%hd Buffers %hd/%hd Packages (%hd nodes)\n",
+			      info.op_region_init, info.op_region_count,
+			      info.field_init, info.field_count,
+			      info.buffer_init, info.buffer_count,
+			      info.package_init, info.package_count,
+			      info.object_count));
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "%hd Control Methods found\n", info.method_count));
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "%hd Op Regions found\n", info.op_region_count));
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_initialize_devices
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      acpi_status
+ *
+ * DESCRIPTION: Walk the entire namespace and initialize all ACPI devices.
+ *              This means running _INI on all present devices.
+ *
+ *              Note: We install PCI config space handler on region access,
+ *              not here.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ns_initialize_devices(void)
+{
+	acpi_status status;
+	struct acpi_device_walk_info info;
+
+	ACPI_FUNCTION_TRACE(ns_initialize_devices);
+
+	/* Init counters */
+
+	info.device_count = 0;
+	info.num_STA = 0;
+	info.num_INI = 0;
+
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+			      "Initializing Device/Processor/Thermal objects by executing _INI methods:"));
+
+	/* Tree analysis: find all subtrees that contain _INI methods */
+
+	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+					ACPI_UINT32_MAX, FALSE,
+					acpi_ns_find_ini_methods, &info, NULL);
+	if (ACPI_FAILURE(status)) {
+		goto error_exit;
+	}
+
+	/* Allocate the evaluation information block */
+
+	info.evaluate_info =
+	    ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+	if (!info.evaluate_info) {
+		status = AE_NO_MEMORY;
+		goto error_exit;
+	}
+
+	/* Walk namespace to execute all _INIs on present devices */
+
+	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+					ACPI_UINT32_MAX, FALSE,
+					acpi_ns_init_one_device, &info, NULL);
+
+	ACPI_FREE(info.evaluate_info);
+	if (ACPI_FAILURE(status)) {
+		goto error_exit;
+	}
+
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+			      "\nExecuted %hd _INI methods requiring %hd _STA executions (examined %hd objects)\n",
+			      info.num_INI, info.num_STA, info.device_count));
+
+	return_ACPI_STATUS(status);
+
+      error_exit:
+	ACPI_EXCEPTION((AE_INFO, status, "During device initialization"));
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_init_one_object
+ *
+ * PARAMETERS:  obj_handle      - Node
+ *              Level           - Current nesting level
+ *              Context         - Points to a init info struct
+ *              return_value    - Not used
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every object
+ *              within the  namespace.
+ *
+ *              Currently, the only objects that require initialization are:
+ *              1) Methods
+ *              2) Op Regions
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_init_one_object(acpi_handle obj_handle,
+			u32 level, void *context, void **return_value)
+{
+	acpi_object_type type;
+	acpi_status status = AE_OK;
+	struct acpi_init_walk_info *info =
+	    (struct acpi_init_walk_info *)context;
+	struct acpi_namespace_node *node =
+	    (struct acpi_namespace_node *)obj_handle;
+	union acpi_operand_object *obj_desc;
+
+	ACPI_FUNCTION_NAME(ns_init_one_object);
+
+	info->object_count++;
+
+	/* And even then, we are only interested in a few object types */
+
+	type = acpi_ns_get_type(obj_handle);
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc) {
+		return (AE_OK);
+	}
+
+	/* Increment counters for object types we are looking for */
+
+	switch (type) {
+	case ACPI_TYPE_REGION:
+		info->op_region_count++;
+		break;
+
+	case ACPI_TYPE_BUFFER_FIELD:
+		info->field_count++;
+		break;
+
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+		info->field_count++;
+		break;
+
+	case ACPI_TYPE_BUFFER:
+		info->buffer_count++;
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+		info->package_count++;
+		break;
+
+	default:
+
+		/* No init required, just exit now */
+		return (AE_OK);
+	}
+
+	/*
+	 * If the object is already initialized, nothing else to do
+	 */
+	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+		return (AE_OK);
+	}
+
+	/*
+	 * Must lock the interpreter before executing AML code
+	 */
+	acpi_ex_enter_interpreter();
+
+	/*
+	 * Each of these types can contain executable AML code within the
+	 * declaration.
+	 */
+	switch (type) {
+	case ACPI_TYPE_REGION:
+
+		info->op_region_init++;
+		status = acpi_ds_get_region_arguments(obj_desc);
+		break;
+
+	case ACPI_TYPE_BUFFER_FIELD:
+
+		info->field_init++;
+		status = acpi_ds_get_buffer_field_arguments(obj_desc);
+		break;
+
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+		info->field_init++;
+		status = acpi_ds_get_bank_field_arguments(obj_desc);
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		info->buffer_init++;
+		status = acpi_ds_get_buffer_arguments(obj_desc);
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+
+		info->package_init++;
+		status = acpi_ds_get_package_arguments(obj_desc);
+		break;
+
+	default:
+		/* No other types can get here */
+		break;
+	}
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Could not execute arguments for [%4.4s] (%s)",
+				acpi_ut_get_node_name(node),
+				acpi_ut_get_type_name(type)));
+	}
+
+	/*
+	 * Print a dot for each object unless we are going to print the entire
+	 * pathname
+	 */
+	if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
+	}
+
+	/*
+	 * We ignore errors from above, and always return OK, since we don't want
+	 * to abort the walk on any single error.
+	 */
+	acpi_ex_exit_interpreter();
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_find_ini_methods
+ *
+ * PARAMETERS:  acpi_walk_callback
+ *
+ * RETURN:      acpi_status
+ *
+ * DESCRIPTION: Called during namespace walk. Finds objects named _INI under
+ *              device/processor/thermal objects, and marks the entire subtree
+ *              with a SUBTREE_HAS_INI flag. This flag is used during the
+ *              subsequent device initialization walk to avoid entire subtrees
+ *              that do not contain an _INI.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_find_ini_methods(acpi_handle obj_handle,
+			 u32 nesting_level, void *context, void **return_value)
+{
+	struct acpi_device_walk_info *info =
+	    ACPI_CAST_PTR(struct acpi_device_walk_info, context);
+	struct acpi_namespace_node *node;
+	struct acpi_namespace_node *parent_node;
+
+	/* Keep count of device/processor/thermal objects */
+
+	node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
+	if ((node->type == ACPI_TYPE_DEVICE) ||
+	    (node->type == ACPI_TYPE_PROCESSOR) ||
+	    (node->type == ACPI_TYPE_THERMAL)) {
+		info->device_count++;
+		return (AE_OK);
+	}
+
+	/* We are only looking for methods named _INI */
+
+	if (!ACPI_COMPARE_NAME(node->name.ascii, METHOD_NAME__INI)) {
+		return (AE_OK);
+	}
+
+	/*
+	 * The only _INI methods that we care about are those that are
+	 * present under Device, Processor, and Thermal objects.
+	 */
+	parent_node = acpi_ns_get_parent_node(node);
+	switch (parent_node->type) {
+	case ACPI_TYPE_DEVICE:
+	case ACPI_TYPE_PROCESSOR:
+	case ACPI_TYPE_THERMAL:
+
+		/* Mark parent and bubble up the INI present flag to the root */
+
+		while (parent_node) {
+			parent_node->flags |= ANOBJ_SUBTREE_HAS_INI;
+			parent_node = acpi_ns_get_parent_node(parent_node);
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_init_one_device
+ *
+ * PARAMETERS:  acpi_walk_callback
+ *
+ * RETURN:      acpi_status
+ *
+ * DESCRIPTION: This is called once per device soon after ACPI is enabled
+ *              to initialize each device. It determines if the device is
+ *              present, and if so, calls _INI.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_init_one_device(acpi_handle obj_handle,
+			u32 nesting_level, void *context, void **return_value)
+{
+	struct acpi_device_walk_info *walk_info =
+	    ACPI_CAST_PTR(struct acpi_device_walk_info, context);
+	struct acpi_evaluate_info *info = walk_info->evaluate_info;
+	u32 flags;
+	acpi_status status;
+	struct acpi_namespace_node *device_node;
+
+	ACPI_FUNCTION_TRACE(ns_init_one_device);
+
+	/* We are interested in Devices, Processors and thermal_zones only */
+
+	device_node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
+	if ((device_node->type != ACPI_TYPE_DEVICE) &&
+	    (device_node->type != ACPI_TYPE_PROCESSOR) &&
+	    (device_node->type != ACPI_TYPE_THERMAL)) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/*
+	 * Because of an earlier namespace analysis, all subtrees that contain an
+	 * _INI method are tagged.
+	 *
+	 * If this device subtree does not contain any _INI methods, we
+	 * can exit now and stop traversing this entire subtree.
+	 */
+	if (!(device_node->flags & ANOBJ_SUBTREE_HAS_INI)) {
+		return_ACPI_STATUS(AE_CTRL_DEPTH);
+	}
+
+	/*
+	 * Run _STA to determine if this device is present and functioning. We
+	 * must know this information for two important reasons (from ACPI spec):
+	 *
+	 * 1) We can only run _INI if the device is present.
+	 * 2) We must abort the device tree walk on this subtree if the device is
+	 *    not present and is not functional (we will not examine the children)
+	 *
+	 * The _STA method is not required to be present under the device, we
+	 * assume the device is present if _STA does not exist.
+	 */
+	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+			(ACPI_TYPE_METHOD, device_node, METHOD_NAME__STA));
+
+	status = acpi_ut_execute_STA(device_node, &flags);
+	if (ACPI_FAILURE(status)) {
+
+		/* Ignore error and move on to next device */
+
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/*
+	 * Flags == -1 means that _STA was not found. In this case, we assume that
+	 * the device is both present and functional.
+	 *
+	 * From the ACPI spec, description of _STA:
+	 *
+	 * "If a device object (including the processor object) does not have an
+	 * _STA object, then OSPM assumes that all of the above bits are set (in
+	 * other words, the device is present, ..., and functioning)"
+	 */
+	if (flags != ACPI_UINT32_MAX) {
+		walk_info->num_STA++;
+	}
+
+	/*
+	 * Examine the PRESENT and FUNCTIONING status bits
+	 *
+	 * Note: ACPI spec does not seem to specify behavior for the present but
+	 * not functioning case, so we assume functioning if present.
+	 */
+	if (!(flags & ACPI_STA_DEVICE_PRESENT)) {
+
+		/* Device is not present, we must examine the Functioning bit */
+
+		if (flags & ACPI_STA_DEVICE_FUNCTIONING) {
+			/*
+			 * Device is not present but is "functioning". In this case,
+			 * we will not run _INI, but we continue to examine the children
+			 * of this device.
+			 *
+			 * From the ACPI spec, description of _STA: (Note - no mention
+			 * of whether to run _INI or not on the device in question)
+			 *
+			 * "_STA may return bit 0 clear (not present) with bit 3 set
+			 * (device is functional). This case is used to indicate a valid
+			 * device for which no device driver should be loaded (for example,
+			 * a bridge device.) Children of this device may be present and
+			 * valid. OSPM should continue enumeration below a device whose
+			 * _STA returns this bit combination"
+			 */
+			return_ACPI_STATUS(AE_OK);
+		} else {
+			/*
+			 * Device is not present and is not functioning. We must abort the
+			 * walk of this subtree immediately -- don't look at the children
+			 * of such a device.
+			 *
+			 * From the ACPI spec, description of _INI:
+			 *
+			 * "If the _STA method indicates that the device is not present,
+			 * OSPM will not run the _INI and will not examine the children
+			 * of the device for _INI methods"
+			 */
+			return_ACPI_STATUS(AE_CTRL_DEPTH);
+		}
+	}
+
+	/*
+	 * The device is present or is assumed present if no _STA exists.
+	 * Run the _INI if it exists (not required to exist)
+	 *
+	 * Note: We know there is an _INI within this subtree, but it may not be
+	 * under this particular device, it may be lower in the branch.
+	 */
+	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+			(ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
+
+	info->prefix_node = device_node;
+	info->pathname = METHOD_NAME__INI;
+	info->parameters = NULL;
+	info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+	/*
+	 * Some hardware relies on this being executed as atomically
+	 * as possible (without an NMI being received in the middle of
+	 * this) - so disable NMIs and initialize the device:
+	 */
+	acpi_nmi_disable();
+	status = acpi_ns_evaluate(info);
+	acpi_nmi_enable();
+
+	if (ACPI_SUCCESS(status)) {
+		walk_info->num_INI++;
+
+		if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
+		    (!(acpi_dbg_level & ACPI_LV_INFO))) {
+			ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
+		}
+	}
+#ifdef ACPI_DEBUG_OUTPUT
+	else if (status != AE_NOT_FOUND) {
+
+		/* Ignore error and move on to next device */
+
+		char *scope_name =
+		    acpi_ns_get_external_pathname(info->resolved_node);
+
+		ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution",
+				scope_name));
+		ACPI_FREE(scope_name);
+	}
+#endif
+
+	/* Ignore errors from above */
+
+	status = AE_OK;
+
+	/*
+	 * The _INI method has been run if present; call the Global Initialization
+	 * Handler for this device.
+	 */
+	if (acpi_gbl_init_handler) {
+		status =
+		    acpi_gbl_init_handler(device_node, ACPI_INIT_DEVICE_INI);
+	}
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
new file mode 100644
index 0000000..a0ba9e1
--- /dev/null
+++ b/drivers/acpi/acpica/nsload.c
@@ -0,0 +1,315 @@
+/******************************************************************************
+ *
+ * Module Name: nsload - namespace loading/expanding/contracting procedures
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acdispat.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsload")
+
+/* Local prototypes */
+#ifdef ACPI_FUTURE_IMPLEMENTATION
+acpi_status acpi_ns_unload_namespace(acpi_handle handle);
+
+static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
+#endif
+
+#ifndef ACPI_NO_METHOD_EXECUTION
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_load_table
+ *
+ * PARAMETERS:  table_index     - Index for table to be loaded
+ *              Node            - Owning NS node
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Load one ACPI table into the namespace
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ns_load_table);
+
+	/*
+	 * Parse the table and load the namespace with all named
+	 * objects found within.  Control methods are NOT parsed
+	 * at this time.  In fact, the control methods cannot be
+	 * parsed until the entire namespace is loaded, because
+	 * if a control method makes a forward reference (call)
+	 * to another control method, we can't continue parsing
+	 * because we don't know how many arguments to parse next!
+	 */
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* If table already loaded into namespace, just return */
+
+	if (acpi_tb_is_table_loaded(table_index)) {
+		status = AE_ALREADY_EXISTS;
+		goto unlock;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			  "**** Loading table into namespace ****\n"));
+
+	status = acpi_tb_allocate_owner_id(table_index);
+	if (ACPI_FAILURE(status)) {
+		goto unlock;
+	}
+
+	status = acpi_ns_parse_table(table_index, node);
+	if (ACPI_SUCCESS(status)) {
+		acpi_tb_set_table_loaded_flag(table_index, TRUE);
+	} else {
+		(void)acpi_tb_release_owner_id(table_index);
+	}
+
+      unlock:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Now we can parse the control methods.  We always parse
+	 * them here for a sanity check, and if configured for
+	 * just-in-time parsing, we delete the control method
+	 * parse trees.
+	 */
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			  "**** Begin Table Method Parsing and Object Initialization ****\n"));
+
+	status = acpi_ds_initialize_objects(table_index, node);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			  "**** Completed Table Method Parsing and Object Initialization ****\n"));
+
+	return_ACPI_STATUS(status);
+}
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_load_namespace
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Load the name space from what ever is pointed to by DSDT.
+ *              (DSDT points to either the BIOS or a buffer.)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ns_load_namespace(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_load_name_space);
+
+	/* There must be at least a DSDT installed */
+
+	if (acpi_gbl_DSDT == NULL) {
+		ACPI_ERROR((AE_INFO, "DSDT is not in memory"));
+		return_ACPI_STATUS(AE_NO_ACPI_TABLES);
+	}
+
+	/*
+	 * Load the namespace.  The DSDT is required,
+	 * but the SSDT and PSDT tables are optional.
+	 */
+	status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Ignore exceptions from these */
+
+	(void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_SSDT);
+	(void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_PSDT);
+
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+			      "ACPI Namespace successfully loaded at root %p\n",
+			      acpi_gbl_root_node));
+
+	return_ACPI_STATUS(status);
+}
+#endif
+
+#ifdef ACPI_FUTURE_IMPLEMENTATION
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_delete_subtree
+ *
+ * PARAMETERS:  start_handle        - Handle in namespace where search begins
+ *
+ * RETURNS      Status
+ *
+ * DESCRIPTION: Walks the namespace starting at the given handle and deletes
+ *              all objects, entries, and scopes in the entire subtree.
+ *
+ *              Namespace/Interpreter should be locked or the subsystem should
+ *              be in shutdown before this routine is called.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
+{
+	acpi_status status;
+	acpi_handle child_handle;
+	acpi_handle parent_handle;
+	acpi_handle next_child_handle;
+	acpi_handle dummy;
+	u32 level;
+
+	ACPI_FUNCTION_TRACE(ns_delete_subtree);
+
+	parent_handle = start_handle;
+	child_handle = NULL;
+	level = 1;
+
+	/*
+	 * Traverse the tree of objects until we bubble back up
+	 * to where we started.
+	 */
+	while (level > 0) {
+
+		/* Attempt to get the next object in this scope */
+
+		status = acpi_get_next_object(ACPI_TYPE_ANY, parent_handle,
+					      child_handle, &next_child_handle);
+
+		child_handle = next_child_handle;
+
+		/* Did we get a new object? */
+
+		if (ACPI_SUCCESS(status)) {
+
+			/* Check if this object has any children */
+
+			if (ACPI_SUCCESS
+			    (acpi_get_next_object
+			     (ACPI_TYPE_ANY, child_handle, NULL, &dummy))) {
+				/*
+				 * There is at least one child of this object,
+				 * visit the object
+				 */
+				level++;
+				parent_handle = child_handle;
+				child_handle = NULL;
+			}
+		} else {
+			/*
+			 * No more children in this object, go back up to
+			 * the object's parent
+			 */
+			level--;
+
+			/* Delete all children now */
+
+			acpi_ns_delete_children(child_handle);
+
+			child_handle = parent_handle;
+			status = acpi_get_parent(parent_handle, &parent_handle);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+		}
+	}
+
+	/* Now delete the starting object, and we are done */
+
+	acpi_ns_delete_node(child_handle);
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ *  FUNCTION:       acpi_ns_unload_name_space
+ *
+ *  PARAMETERS:     Handle          - Root of namespace subtree to be deleted
+ *
+ *  RETURN:         Status
+ *
+ *  DESCRIPTION:    Shrinks the namespace, typically in response to an undocking
+ *                  event.  Deletes an entire subtree starting from (and
+ *                  including) the given handle.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ns_unload_namespace(acpi_handle handle)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ns_unload_name_space);
+
+	/* Parameter validation */
+
+	if (!acpi_gbl_root_node) {
+		return_ACPI_STATUS(AE_NO_NAMESPACE);
+	}
+
+	if (!handle) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* This function does the real work */
+
+	status = acpi_ns_delete_subtree(handle);
+
+	return_ACPI_STATUS(status);
+}
+#endif
+#endif
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
new file mode 100644
index 0000000..ae3dc10
--- /dev/null
+++ b/drivers/acpi/acpica/nsnames.c
@@ -0,0 +1,265 @@
+/*******************************************************************************
+ *
+ * Module Name: nsnames - Name manipulation and search
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "amlcode.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsnames")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_build_external_path
+ *
+ * PARAMETERS:  Node            - NS node whose pathname is needed
+ *              Size            - Size of the pathname
+ *              *name_buffer    - Where to return the pathname
+ *
+ * RETURN:      Status
+ *              Places the pathname into the name_buffer, in external format
+ *              (name segments separated by path separators)
+ *
+ * DESCRIPTION: Generate a full pathaname
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ns_build_external_path(struct acpi_namespace_node *node,
+			    acpi_size size, char *name_buffer)
+{
+	acpi_size index;
+	struct acpi_namespace_node *parent_node;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Special case for root */
+
+	index = size - 1;
+	if (index < ACPI_NAME_SIZE) {
+		name_buffer[0] = AML_ROOT_PREFIX;
+		name_buffer[1] = 0;
+		return (AE_OK);
+	}
+
+	/* Store terminator byte, then build name backwards */
+
+	parent_node = node;
+	name_buffer[index] = 0;
+
+	while ((index > ACPI_NAME_SIZE) && (parent_node != acpi_gbl_root_node)) {
+		index -= ACPI_NAME_SIZE;
+
+		/* Put the name into the buffer */
+
+		ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name);
+		parent_node = acpi_ns_get_parent_node(parent_node);
+
+		/* Prefix name with the path separator */
+
+		index--;
+		name_buffer[index] = ACPI_PATH_SEPARATOR;
+	}
+
+	/* Overwrite final separator with the root prefix character */
+
+	name_buffer[index] = AML_ROOT_PREFIX;
+
+	if (index != 0) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not construct external pathname; index=%X, size=%X, Path=%s",
+			    (u32) index, (u32) size, &name_buffer[size]));
+
+		return (AE_BAD_PARAMETER);
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_external_pathname
+ *
+ * PARAMETERS:  Node            - Namespace node whose pathname is needed
+ *
+ * RETURN:      Pointer to storage containing the fully qualified name of
+ *              the node, In external format (name segments separated by path
+ *              separators.)
+ *
+ * DESCRIPTION: Used for debug printing in acpi_ns_search_table().
+ *
+ ******************************************************************************/
+
+char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
+{
+	acpi_status status;
+	char *name_buffer;
+	acpi_size size;
+
+	ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node);
+
+	/* Calculate required buffer size based on depth below root */
+
+	size = acpi_ns_get_pathname_length(node);
+	if (!size) {
+		return_PTR(NULL);
+	}
+
+	/* Allocate a buffer to be returned to caller */
+
+	name_buffer = ACPI_ALLOCATE_ZEROED(size);
+	if (!name_buffer) {
+		ACPI_ERROR((AE_INFO, "Allocation failure"));
+		return_PTR(NULL);
+	}
+
+	/* Build the path in the allocated buffer */
+
+	status = acpi_ns_build_external_path(node, size, name_buffer);
+	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(name_buffer);
+		return_PTR(NULL);
+	}
+
+	return_PTR(name_buffer);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_pathname_length
+ *
+ * PARAMETERS:  Node        - Namespace node
+ *
+ * RETURN:      Length of path, including prefix
+ *
+ * DESCRIPTION: Get the length of the pathname string for this node
+ *
+ ******************************************************************************/
+
+acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
+{
+	acpi_size size;
+	struct acpi_namespace_node *next_node;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * Compute length of pathname as 5 * number of name segments.
+	 * Go back up the parent tree to the root
+	 */
+	size = 0;
+	next_node = node;
+
+	while (next_node && (next_node != acpi_gbl_root_node)) {
+		if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
+			ACPI_ERROR((AE_INFO,
+				    "Invalid Namespace Node (%p) while traversing namespace",
+				    next_node));
+			return 0;
+		}
+		size += ACPI_PATH_SEGMENT_LENGTH;
+		next_node = acpi_ns_get_parent_node(next_node);
+	}
+
+	if (!size) {
+		size = 1;	/* Root node case */
+	}
+
+	return (size + 1);	/* +1 for null string terminator */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_handle_to_pathname
+ *
+ * PARAMETERS:  target_handle           - Handle of named object whose name is
+ *                                        to be found
+ *              Buffer                  - Where the pathname is returned
+ *
+ * RETURN:      Status, Buffer is filled with pathname if status is AE_OK
+ *
+ * DESCRIPTION: Build and return a full namespace pathname
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_handle_to_pathname(acpi_handle target_handle,
+			   struct acpi_buffer * buffer)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+	acpi_size required_size;
+
+	ACPI_FUNCTION_TRACE_PTR(ns_handle_to_pathname, target_handle);
+
+	node = acpi_ns_map_handle_to_node(target_handle);
+	if (!node) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Determine size required for the caller buffer */
+
+	required_size = acpi_ns_get_pathname_length(node);
+	if (!required_size) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Validate/Allocate/Clear caller buffer */
+
+	status = acpi_ut_initialize_buffer(buffer, required_size);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Build the path in the caller buffer */
+
+	status =
+	    acpi_ns_build_external_path(node, required_size, buffer->pointer);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n",
+			  (char *)buffer->pointer, (u32) required_size));
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
new file mode 100644
index 0000000..08a97a5
--- /dev/null
+++ b/drivers/acpi/acpica/nsobject.c
@@ -0,0 +1,441 @@
+/*******************************************************************************
+ *
+ * Module Name: nsobject - Utilities for objects attached to namespace
+ *                         table entries
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsobject")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_attach_object
+ *
+ * PARAMETERS:  Node                - Parent Node
+ *              Object              - Object to be attached
+ *              Type                - Type of object, or ACPI_TYPE_ANY if not
+ *                                    known
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Record the given object as the value associated with the
+ *              name whose acpi_handle is passed.  If Object is NULL
+ *              and Type is ACPI_TYPE_ANY, set the name as having no value.
+ *              Note: Future may require that the Node->Flags field be passed
+ *              as a parameter.
+ *
+ * MUTEX:       Assumes namespace is locked
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ns_attach_object(struct acpi_namespace_node *node,
+		      union acpi_operand_object *object, acpi_object_type type)
+{
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *last_obj_desc;
+	acpi_object_type object_type = ACPI_TYPE_ANY;
+
+	ACPI_FUNCTION_TRACE(ns_attach_object);
+
+	/*
+	 * Parameter validation
+	 */
+	if (!node) {
+
+		/* Invalid handle */
+
+		ACPI_ERROR((AE_INFO, "Null NamedObj handle"));
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (!object && (ACPI_TYPE_ANY != type)) {
+
+		/* Null object */
+
+		ACPI_ERROR((AE_INFO,
+			    "Null object, but type not ACPI_TYPE_ANY"));
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+
+		/* Not a name handle */
+
+		ACPI_ERROR((AE_INFO, "Invalid handle %p [%s]",
+			    node, acpi_ut_get_descriptor_name(node)));
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Check if this object is already attached */
+
+	if (node->object == object) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Obj %p already installed in NameObj %p\n",
+				  object, node));
+
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* If null object, we will just install it */
+
+	if (!object) {
+		obj_desc = NULL;
+		object_type = ACPI_TYPE_ANY;
+	}
+
+	/*
+	 * If the source object is a namespace Node with an attached object,
+	 * we will use that (attached) object
+	 */
+	else if ((ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) &&
+		 ((struct acpi_namespace_node *)object)->object) {
+		/*
+		 * Value passed is a name handle and that name has a
+		 * non-null value.  Use that name's value and type.
+		 */
+		obj_desc = ((struct acpi_namespace_node *)object)->object;
+		object_type = ((struct acpi_namespace_node *)object)->type;
+	}
+
+	/*
+	 * Otherwise, we will use the parameter object, but we must type
+	 * it first
+	 */
+	else {
+		obj_desc = (union acpi_operand_object *)object;
+
+		/* Use the given type */
+
+		object_type = type;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Installing %p into Node %p [%4.4s]\n",
+			  obj_desc, node, acpi_ut_get_node_name(node)));
+
+	/* Detach an existing attached object if present */
+
+	if (node->object) {
+		acpi_ns_detach_object(node);
+	}
+
+	if (obj_desc) {
+		/*
+		 * Must increment the new value's reference count
+		 * (if it is an internal object)
+		 */
+		acpi_ut_add_reference(obj_desc);
+
+		/*
+		 * Handle objects with multiple descriptors - walk
+		 * to the end of the descriptor list
+		 */
+		last_obj_desc = obj_desc;
+		while (last_obj_desc->common.next_object) {
+			last_obj_desc = last_obj_desc->common.next_object;
+		}
+
+		/* Install the object at the front of the object list */
+
+		last_obj_desc->common.next_object = node->object;
+	}
+
+	node->type = (u8) object_type;
+	node->object = obj_desc;
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_detach_object
+ *
+ * PARAMETERS:  Node           - A Namespace node whose object will be detached
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Detach/delete an object associated with a namespace node.
+ *              if the object is an allocated object, it is freed.
+ *              Otherwise, the field is simply cleared.
+ *
+ ******************************************************************************/
+
+void acpi_ns_detach_object(struct acpi_namespace_node *node)
+{
+	union acpi_operand_object *obj_desc;
+
+	ACPI_FUNCTION_TRACE(ns_detach_object);
+
+	obj_desc = node->object;
+
+	if (!obj_desc ||
+	    (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA)) {
+		return_VOID;
+	}
+
+	/* Clear the entry in all cases */
+
+	node->object = NULL;
+	if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_OPERAND) {
+		node->object = obj_desc->common.next_object;
+		if (node->object &&
+		    (ACPI_GET_OBJECT_TYPE(node->object) !=
+		     ACPI_TYPE_LOCAL_DATA)) {
+			node->object = node->object->common.next_object;
+		}
+	}
+
+	/* Reset the node type to untyped */
+
+	node->type = ACPI_TYPE_ANY;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Node %p [%4.4s] Object %p\n",
+			  node, acpi_ut_get_node_name(node), obj_desc));
+
+	/* Remove one reference on the object (and all subobjects) */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_attached_object
+ *
+ * PARAMETERS:  Node             - Namespace node
+ *
+ * RETURN:      Current value of the object field from the Node whose
+ *              handle is passed
+ *
+ * DESCRIPTION: Obtain the object attached to a namespace node.
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ns_get_attached_object(struct
+						       acpi_namespace_node
+						       *node)
+{
+	ACPI_FUNCTION_TRACE_PTR(ns_get_attached_object, node);
+
+	if (!node) {
+		ACPI_WARNING((AE_INFO, "Null Node ptr"));
+		return_PTR(NULL);
+	}
+
+	if (!node->object ||
+	    ((ACPI_GET_DESCRIPTOR_TYPE(node->object) != ACPI_DESC_TYPE_OPERAND)
+	     && (ACPI_GET_DESCRIPTOR_TYPE(node->object) !=
+		 ACPI_DESC_TYPE_NAMED))
+	    || (ACPI_GET_OBJECT_TYPE(node->object) == ACPI_TYPE_LOCAL_DATA)) {
+		return_PTR(NULL);
+	}
+
+	return_PTR(node->object);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_secondary_object
+ *
+ * PARAMETERS:  Node             - Namespace node
+ *
+ * RETURN:      Current value of the object field from the Node whose
+ *              handle is passed.
+ *
+ * DESCRIPTION: Obtain a secondary object associated with a namespace node.
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ns_get_secondary_object(union
+							acpi_operand_object
+							*obj_desc)
+{
+	ACPI_FUNCTION_TRACE_PTR(ns_get_secondary_object, obj_desc);
+
+	if ((!obj_desc) ||
+	    (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) ||
+	    (!obj_desc->common.next_object) ||
+	    (ACPI_GET_OBJECT_TYPE(obj_desc->common.next_object) ==
+	     ACPI_TYPE_LOCAL_DATA)) {
+		return_PTR(NULL);
+	}
+
+	return_PTR(obj_desc->common.next_object);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_attach_data
+ *
+ * PARAMETERS:  Node            - Namespace node
+ *              Handler         - Handler to be associated with the data
+ *              Data            - Data to be attached
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Low-level attach data.  Create and attach a Data object.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_attach_data(struct acpi_namespace_node *node,
+		    acpi_object_handler handler, void *data)
+{
+	union acpi_operand_object *prev_obj_desc;
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *data_desc;
+
+	/* We only allow one attachment per handler */
+
+	prev_obj_desc = NULL;
+	obj_desc = node->object;
+	while (obj_desc) {
+		if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) &&
+		    (obj_desc->data.handler == handler)) {
+			return (AE_ALREADY_EXISTS);
+		}
+
+		prev_obj_desc = obj_desc;
+		obj_desc = obj_desc->common.next_object;
+	}
+
+	/* Create an internal object for the data */
+
+	data_desc = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_DATA);
+	if (!data_desc) {
+		return (AE_NO_MEMORY);
+	}
+
+	data_desc->data.handler = handler;
+	data_desc->data.pointer = data;
+
+	/* Install the data object */
+
+	if (prev_obj_desc) {
+		prev_obj_desc->common.next_object = data_desc;
+	} else {
+		node->object = data_desc;
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_detach_data
+ *
+ * PARAMETERS:  Node            - Namespace node
+ *              Handler         - Handler associated with the data
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Low-level detach data.  Delete the data node, but the caller
+ *              is responsible for the actual data.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_detach_data(struct acpi_namespace_node * node,
+		    acpi_object_handler handler)
+{
+	union acpi_operand_object *obj_desc;
+	union acpi_operand_object *prev_obj_desc;
+
+	prev_obj_desc = NULL;
+	obj_desc = node->object;
+	while (obj_desc) {
+		if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) &&
+		    (obj_desc->data.handler == handler)) {
+			if (prev_obj_desc) {
+				prev_obj_desc->common.next_object =
+				    obj_desc->common.next_object;
+			} else {
+				node->object = obj_desc->common.next_object;
+			}
+
+			acpi_ut_remove_reference(obj_desc);
+			return (AE_OK);
+		}
+
+		prev_obj_desc = obj_desc;
+		obj_desc = obj_desc->common.next_object;
+	}
+
+	return (AE_NOT_FOUND);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_attached_data
+ *
+ * PARAMETERS:  Node            - Namespace node
+ *              Handler         - Handler associated with the data
+ *              Data            - Where the data is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Low level interface to obtain data previously associated with
+ *              a namespace node.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_get_attached_data(struct acpi_namespace_node * node,
+			  acpi_object_handler handler, void **data)
+{
+	union acpi_operand_object *obj_desc;
+
+	obj_desc = node->object;
+	while (obj_desc) {
+		if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) &&
+		    (obj_desc->data.handler == handler)) {
+			*data = obj_desc->data.pointer;
+			return (AE_OK);
+		}
+
+		obj_desc = obj_desc->common.next_object;
+	}
+
+	return (AE_NOT_FOUND);
+}
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
new file mode 100644
index 0000000..b9e8d00
--- /dev/null
+++ b/drivers/acpi/acpica/nsparse.c
@@ -0,0 +1,204 @@
+/******************************************************************************
+ *
+ * Module Name: nsparse - namespace interface to AML parser
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acparser.h"
+#include "acdispat.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsparse")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    ns_one_complete_parse
+ *
+ * PARAMETERS:  pass_number             - 1 or 2
+ *              table_desc              - The table to be parsed.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform one complete parse of an ACPI/AML table.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ns_one_complete_parse(u32 pass_number,
+			   u32 table_index,
+			   struct acpi_namespace_node *start_node)
+{
+	union acpi_parse_object *parse_root;
+	acpi_status status;
+       u32 aml_length;
+	u8 *aml_start;
+	struct acpi_walk_state *walk_state;
+	struct acpi_table_header *table;
+	acpi_owner_id owner_id;
+
+	ACPI_FUNCTION_TRACE(ns_one_complete_parse);
+
+	status = acpi_tb_get_owner_id(table_index, &owner_id);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Create and init a Root Node */
+
+	parse_root = acpi_ps_create_scope_op();
+	if (!parse_root) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Create and initialize a new walk state */
+
+	walk_state = acpi_ds_create_walk_state(owner_id, NULL, NULL, NULL);
+	if (!walk_state) {
+		acpi_ps_free_op(parse_root);
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	status = acpi_get_table_by_index(table_index, &table);
+	if (ACPI_FAILURE(status)) {
+		acpi_ds_delete_walk_state(walk_state);
+		acpi_ps_free_op(parse_root);
+		return_ACPI_STATUS(status);
+	}
+
+	/* Table must consist of at least a complete header */
+
+	if (table->length < sizeof(struct acpi_table_header)) {
+		status = AE_BAD_HEADER;
+	} else {
+		aml_start = (u8 *) table + sizeof(struct acpi_table_header);
+		aml_length = table->length - sizeof(struct acpi_table_header);
+		status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
+					       aml_start, aml_length, NULL,
+					       (u8) pass_number);
+	}
+
+	if (ACPI_FAILURE(status)) {
+		acpi_ds_delete_walk_state(walk_state);
+		goto cleanup;
+	}
+
+	/* start_node is the default location to load the table */
+
+	if (start_node && start_node != acpi_gbl_root_node) {
+		status =
+		    acpi_ds_scope_stack_push(start_node, ACPI_TYPE_METHOD,
+					     walk_state);
+		if (ACPI_FAILURE(status)) {
+			acpi_ds_delete_walk_state(walk_state);
+			goto cleanup;
+		}
+	}
+
+	/* Parse the AML */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n",
+			  (unsigned)pass_number));
+	status = acpi_ps_parse_aml(walk_state);
+
+      cleanup:
+	acpi_ps_delete_parse_tree(parse_root);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_parse_table
+ *
+ * PARAMETERS:  table_desc      - An ACPI table descriptor for table to parse
+ *              start_node      - Where to enter the table into the namespace
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Parse AML within an ACPI table and return a tree of ops
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ns_parse_table);
+
+	/*
+	 * AML Parse, pass 1
+	 *
+	 * In this pass, we load most of the namespace.  Control methods
+	 * are not parsed until later.  A parse tree is not created.  Instead,
+	 * each Parser Op subtree is deleted when it is finished.  This saves
+	 * a great deal of memory, and allows a small cache of parse objects
+	 * to service the entire parse.  The second pass of the parse then
+	 * performs another complete parse of the AML.
+	 */
+	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
+	status =
+	    acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index,
+				       start_node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * AML Parse, pass 2
+	 *
+	 * In this pass, we resolve forward references and other things
+	 * that could not be completed during the first pass.
+	 * Another complete parse of the AML is performed, but the
+	 * overhead of this is compensated for by the fact that the
+	 * parse objects are all cached.
+	 */
+	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n"));
+	status =
+	    acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, table_index,
+				       start_node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
new file mode 100644
index 0000000..4527032
--- /dev/null
+++ b/drivers/acpi/acpica/nspredef.c
@@ -0,0 +1,1065 @@
+/******************************************************************************
+ *
+ * Module Name: nspredef - Validation of ACPI predefined methods and objects
+ *              $Revision: 1.1 $
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acpredef.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nspredef")
+
+/*******************************************************************************
+ *
+ * This module validates predefined ACPI objects that appear in the namespace,
+ * at the time they are evaluated (via acpi_evaluate_object). The purpose of this
+ * validation is to detect problems with BIOS-exposed predefined ACPI objects
+ * before the results are returned to the ACPI-related drivers.
+ *
+ * There are several areas that are validated:
+ *
+ *  1) The number of input arguments as defined by the method/object in the
+ *      ASL is validated against the ACPI specification.
+ *  2) The type of the return object (if any) is validated against the ACPI
+ *      specification.
+ *  3) For returned package objects, the count of package elements is
+ *      validated, as well as the type of each package element. Nested
+ *      packages are supported.
+ *
+ * For any problems found, a warning message is issued.
+ *
+ ******************************************************************************/
+/* Local prototypes */
+static acpi_status
+acpi_ns_check_package(char *pathname,
+		      union acpi_operand_object **return_object_ptr,
+		      const union acpi_predefined_info *predefined);
+
+static acpi_status
+acpi_ns_check_package_elements(char *pathname,
+			       union acpi_operand_object **elements,
+			       u8 type1, u32 count1, u8 type2, u32 count2);
+
+static acpi_status
+acpi_ns_check_object_type(char *pathname,
+			  union acpi_operand_object **return_object_ptr,
+			  u32 expected_btypes, u32 package_index);
+
+static acpi_status
+acpi_ns_check_reference(char *pathname,
+			union acpi_operand_object *return_object);
+
+static acpi_status
+acpi_ns_repair_object(u32 expected_btypes,
+		      u32 package_index,
+		      union acpi_operand_object **return_object_ptr);
+
+/*
+ * Names for the types that can be returned by the predefined objects.
+ * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
+ */
+static const char *acpi_rtype_names[] = {
+	"/Integer",
+	"/String",
+	"/Buffer",
+	"/Package",
+	"/Reference",
+};
+
+#define ACPI_NOT_PACKAGE    ACPI_UINT32_MAX
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_check_predefined_names
+ *
+ * PARAMETERS:  Node            - Namespace node for the method/object
+ *              return_object_ptr - Pointer to the object returned from the
+ *                                evaluation of a method or object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Check an ACPI name for a match in the predefined name list.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
+			       u32 user_param_count,
+			       acpi_status return_status,
+			       union acpi_operand_object **return_object_ptr)
+{
+	union acpi_operand_object *return_object = *return_object_ptr;
+	acpi_status status = AE_OK;
+	const union acpi_predefined_info *predefined;
+	char *pathname;
+
+	/* Match the name for this method/object against the predefined list */
+
+	predefined = acpi_ns_check_for_predefined_name(node);
+
+	/* Get the full pathname to the object, for use in error messages */
+
+	pathname = acpi_ns_get_external_pathname(node);
+	if (!pathname) {
+		pathname = ACPI_CAST_PTR(char, predefined->info.name);
+	}
+
+	/*
+	 * Check that the parameter count for this method matches the ASL
+	 * definition. For predefined names, ensure that both the caller and
+	 * the method itself are in accordance with the ACPI specification.
+	 */
+	acpi_ns_check_parameter_count(pathname, node, user_param_count,
+				      predefined);
+
+	/* If not a predefined name, we cannot validate the return object */
+
+	if (!predefined) {
+		goto exit;
+	}
+
+	/* If the method failed, we cannot validate the return object */
+
+	if ((return_status != AE_OK) && (return_status != AE_CTRL_RETURN_VALUE)) {
+		goto exit;
+	}
+
+	/*
+	 * Only validate the return value on the first successful evaluation of
+	 * the method. This ensures that any warnings will only be emitted during
+	 * the very first evaluation of the method/object.
+	 */
+	if (node->flags & ANOBJ_EVALUATED) {
+		goto exit;
+	}
+
+	/* Mark the node as having been successfully evaluated */
+
+	node->flags |= ANOBJ_EVALUATED;
+
+	/*
+	 * If there is no return value, check if we require a return value for
+	 * this predefined name. Either one return value is expected, or none,
+	 * for both methods and other objects.
+	 *
+	 * Exit now if there is no return object. Warning if one was expected.
+	 */
+	if (!return_object) {
+		if ((predefined->info.expected_btypes) &&
+		    (!(predefined->info.expected_btypes & ACPI_RTYPE_NONE))) {
+			ACPI_ERROR((AE_INFO,
+				    "%s: Missing expected return value",
+				    pathname));
+
+			status = AE_AML_NO_RETURN_VALUE;
+		}
+		goto exit;
+	}
+
+	/*
+	 * We have a return value, but if one wasn't expected, just exit, this is
+	 * not a problem
+	 *
+	 * For example, if the "Implicit Return" feature is enabled, methods will
+	 * always return a value
+	 */
+	if (!predefined->info.expected_btypes) {
+		goto exit;
+	}
+
+	/*
+	 * Check that the type of the return object is what is expected for
+	 * this predefined name
+	 */
+	status = acpi_ns_check_object_type(pathname, return_object_ptr,
+					   predefined->info.expected_btypes,
+					   ACPI_NOT_PACKAGE);
+	if (ACPI_FAILURE(status)) {
+		goto exit;
+	}
+
+	/* For returned Package objects, check the type of all sub-objects */
+
+	if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_PACKAGE) {
+		status =
+		    acpi_ns_check_package(pathname, return_object_ptr,
+					  predefined);
+	}
+
+      exit:
+	if (pathname != predefined->info.name) {
+		ACPI_FREE(pathname);
+	}
+
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_check_parameter_count
+ *
+ * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
+ *              Node            - Namespace node for the method/object
+ *              user_param_count - Number of args passed in by the caller
+ *              Predefined      - Pointer to entry in predefined name table
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Check that the declared (in ASL/AML) parameter count for a
+ *              predefined name is what is expected (i.e., what is defined in
+ *              the ACPI specification for this predefined name.)
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_check_parameter_count(char *pathname,
+			      struct acpi_namespace_node *node,
+			      u32 user_param_count,
+			      const union acpi_predefined_info *predefined)
+{
+	u32 param_count;
+	u32 required_params_current;
+	u32 required_params_old;
+
+	/* Methods have 0-7 parameters. All other types have zero. */
+
+	param_count = 0;
+	if (node->type == ACPI_TYPE_METHOD) {
+		param_count = node->object->method.param_count;
+	}
+
+	/* Argument count check for non-predefined methods/objects */
+
+	if (!predefined) {
+		/*
+		 * Warning if too few or too many arguments have been passed by the
+		 * caller. An incorrect number of arguments may not cause the method
+		 * to fail. However, the method will fail if there are too few
+		 * arguments and the method attempts to use one of the missing ones.
+		 */
+		if (user_param_count < param_count) {
+			ACPI_WARNING((AE_INFO,
+				      "%s: Insufficient arguments - needs %d, found %d",
+				      pathname, param_count, user_param_count));
+		} else if (user_param_count > param_count) {
+			ACPI_WARNING((AE_INFO,
+				      "%s: Excess arguments - needs %d, found %d",
+				      pathname, param_count, user_param_count));
+		}
+		return;
+	}
+
+	/* Allow two different legal argument counts (_SCP, etc.) */
+
+	required_params_current = predefined->info.param_count & 0x0F;
+	required_params_old = predefined->info.param_count >> 4;
+
+	if (user_param_count != ACPI_UINT32_MAX) {
+
+		/* Validate the user-supplied parameter count */
+
+		if ((user_param_count != required_params_current) &&
+		    (user_param_count != required_params_old)) {
+			ACPI_WARNING((AE_INFO,
+				      "%s: Parameter count mismatch - caller passed %d, ACPI requires %d",
+				      pathname, user_param_count,
+				      required_params_current));
+		}
+	}
+
+	/*
+	 * Only validate the argument count on the first successful evaluation of
+	 * the method. This ensures that any warnings will only be emitted during
+	 * the very first evaluation of the method/object.
+	 */
+	if (node->flags & ANOBJ_EVALUATED) {
+		return;
+	}
+
+	/*
+	 * Check that the ASL-defined parameter count is what is expected for
+	 * this predefined name.
+	 */
+	if ((param_count != required_params_current) &&
+	    (param_count != required_params_old)) {
+		ACPI_WARNING((AE_INFO,
+			      "%s: Parameter count mismatch - ASL declared %d, ACPI requires %d",
+			      pathname, param_count, required_params_current));
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_check_for_predefined_name
+ *
+ * PARAMETERS:  Node            - Namespace node for the method/object
+ *
+ * RETURN:      Pointer to entry in predefined table. NULL indicates not found.
+ *
+ * DESCRIPTION: Check an object name against the predefined object list.
+ *
+ ******************************************************************************/
+
+const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
+								    acpi_namespace_node
+								    *node)
+{
+	const union acpi_predefined_info *this_name;
+
+	/* Quick check for a predefined name, first character must be underscore */
+
+	if (node->name.ascii[0] != '_') {
+		return (NULL);
+	}
+
+	/* Search info table for a predefined method/object name */
+
+	this_name = predefined_names;
+	while (this_name->info.name[0]) {
+		if (ACPI_COMPARE_NAME(node->name.ascii, this_name->info.name)) {
+
+			/* Return pointer to this table entry */
+
+			return (this_name);
+		}
+
+		/*
+		 * Skip next entry in the table if this name returns a Package
+		 * (next entry contains the package info)
+		 */
+		if (this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) {
+			this_name++;
+		}
+
+		this_name++;
+	}
+
+	return (NULL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_check_package
+ *
+ * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
+ *              return_object_ptr - Pointer to the object returned from the
+ *                                evaluation of a method or object
+ *              Predefined      - Pointer to entry in predefined name table
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Check a returned package object for the correct count and
+ *              correct type of all sub-objects.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_package(char *pathname,
+		      union acpi_operand_object **return_object_ptr,
+		      const union acpi_predefined_info *predefined)
+{
+	union acpi_operand_object *return_object = *return_object_ptr;
+	const union acpi_predefined_info *package;
+	union acpi_operand_object *sub_package;
+	union acpi_operand_object **elements;
+	union acpi_operand_object **sub_elements;
+	acpi_status status;
+	u32 expected_count;
+	u32 count;
+	u32 i;
+	u32 j;
+
+	ACPI_FUNCTION_NAME(ns_check_package);
+
+	/* The package info for this name is in the next table entry */
+
+	package = predefined + 1;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+			  "%s Validating return Package of Type %X, Count %X\n",
+			  pathname, package->ret_info.type,
+			  return_object->package.count));
+
+	/* Extract package count and elements array */
+
+	elements = return_object->package.elements;
+	count = return_object->package.count;
+
+	/* The package must have at least one element, else invalid */
+
+	if (!count) {
+		ACPI_WARNING((AE_INFO,
+			      "%s: Return Package has no elements (empty)",
+			      pathname));
+
+		return (AE_AML_OPERAND_VALUE);
+	}
+
+	/*
+	 * Decode the type of the expected package contents
+	 *
+	 * PTYPE1 packages contain no subpackages
+	 * PTYPE2 packages contain sub-packages
+	 */
+	switch (package->ret_info.type) {
+	case ACPI_PTYPE1_FIXED:
+
+		/*
+		 * The package count is fixed and there are no sub-packages
+		 *
+		 * If package is too small, exit.
+		 * If package is larger than expected, issue warning but continue
+		 */
+		expected_count =
+		    package->ret_info.count1 + package->ret_info.count2;
+		if (count < expected_count) {
+			goto package_too_small;
+		} else if (count > expected_count) {
+			ACPI_WARNING((AE_INFO,
+				      "%s: Return Package is larger than needed - "
+				      "found %u, expected %u", pathname, count,
+				      expected_count));
+		}
+
+		/* Validate all elements of the returned package */
+
+		status = acpi_ns_check_package_elements(pathname, elements,
+							package->ret_info.
+							object_type1,
+							package->ret_info.
+							count1,
+							package->ret_info.
+							object_type2,
+							package->ret_info.
+							count2);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+		break;
+
+	case ACPI_PTYPE1_VAR:
+
+		/*
+		 * The package count is variable, there are no sub-packages, and all
+		 * elements must be of the same type
+		 */
+		for (i = 0; i < count; i++) {
+			status = acpi_ns_check_object_type(pathname, elements,
+							   package->ret_info.
+							   object_type1, i);
+			if (ACPI_FAILURE(status)) {
+				return (status);
+			}
+			elements++;
+		}
+		break;
+
+	case ACPI_PTYPE1_OPTION:
+
+		/*
+		 * The package count is variable, there are no sub-packages. There are
+		 * a fixed number of required elements, and a variable number of
+		 * optional elements.
+		 *
+		 * Check if package is at least as large as the minimum required
+		 */
+		expected_count = package->ret_info3.count;
+		if (count < expected_count) {
+			goto package_too_small;
+		}
+
+		/* Variable number of sub-objects */
+
+		for (i = 0; i < count; i++) {
+			if (i < package->ret_info3.count) {
+
+				/* These are the required package elements (0, 1, or 2) */
+
+				status =
+				    acpi_ns_check_object_type(pathname,
+							      elements,
+							      package->
+							      ret_info3.
+							      object_type[i],
+							      i);
+				if (ACPI_FAILURE(status)) {
+					return (status);
+				}
+			} else {
+				/* These are the optional package elements */
+
+				status =
+				    acpi_ns_check_object_type(pathname,
+							      elements,
+							      package->
+							      ret_info3.
+							      tail_object_type,
+							      i);
+				if (ACPI_FAILURE(status)) {
+					return (status);
+				}
+			}
+			elements++;
+		}
+		break;
+
+	case ACPI_PTYPE2_PKG_COUNT:
+
+		/* First element is the (Integer) count of sub-packages to follow */
+
+		status = acpi_ns_check_object_type(pathname, elements,
+						   ACPI_RTYPE_INTEGER, 0);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+
+		/*
+		 * Count cannot be larger than the parent package length, but allow it
+		 * to be smaller. The >= accounts for the Integer above.
+		 */
+		expected_count = (u32) (*elements)->integer.value;
+		if (expected_count >= count) {
+			goto package_too_small;
+		}
+
+		count = expected_count;
+		elements++;
+
+		/* Now we can walk the sub-packages */
+
+		/*lint -fallthrough */
+
+	case ACPI_PTYPE2:
+	case ACPI_PTYPE2_FIXED:
+	case ACPI_PTYPE2_MIN:
+	case ACPI_PTYPE2_COUNT:
+
+		/*
+		 * These types all return a single package that consists of a variable
+		 * number of sub-packages
+		 */
+		for (i = 0; i < count; i++) {
+			sub_package = *elements;
+			sub_elements = sub_package->package.elements;
+
+			/* Each sub-object must be of type Package */
+
+			status =
+			    acpi_ns_check_object_type(pathname, &sub_package,
+						      ACPI_RTYPE_PACKAGE, i);
+			if (ACPI_FAILURE(status)) {
+				return (status);
+			}
+
+			/* Examine the different types of sub-packages */
+
+			switch (package->ret_info.type) {
+			case ACPI_PTYPE2:
+			case ACPI_PTYPE2_PKG_COUNT:
+
+				/* Each subpackage has a fixed number of elements */
+
+				expected_count =
+				    package->ret_info.count1 +
+				    package->ret_info.count2;
+				if (sub_package->package.count !=
+				    expected_count) {
+					count = sub_package->package.count;
+					goto package_too_small;
+				}
+
+				status =
+				    acpi_ns_check_package_elements(pathname,
+								   sub_elements,
+								   package->
+								   ret_info.
+								   object_type1,
+								   package->
+								   ret_info.
+								   count1,
+								   package->
+								   ret_info.
+								   object_type2,
+								   package->
+								   ret_info.
+								   count2);
+				if (ACPI_FAILURE(status)) {
+					return (status);
+				}
+				break;
+
+			case ACPI_PTYPE2_FIXED:
+
+				/* Each sub-package has a fixed length */
+
+				expected_count = package->ret_info2.count;
+				if (sub_package->package.count < expected_count) {
+					count = sub_package->package.count;
+					goto package_too_small;
+				}
+
+				/* Check the type of each sub-package element */
+
+				for (j = 0; j < expected_count; j++) {
+					status =
+					    acpi_ns_check_object_type(pathname,
+						&sub_elements[j],
+						package->ret_info2.object_type[j], j);
+					if (ACPI_FAILURE(status)) {
+						return (status);
+					}
+				}
+				break;
+
+			case ACPI_PTYPE2_MIN:
+
+				/* Each sub-package has a variable but minimum length */
+
+				expected_count = package->ret_info.count1;
+				if (sub_package->package.count < expected_count) {
+					count = sub_package->package.count;
+					goto package_too_small;
+				}
+
+				/* Check the type of each sub-package element */
+
+				status =
+				    acpi_ns_check_package_elements(pathname,
+								   sub_elements,
+								   package->
+								   ret_info.
+								   object_type1,
+								   sub_package->
+								   package.
+								   count, 0, 0);
+				if (ACPI_FAILURE(status)) {
+					return (status);
+				}
+				break;
+
+			case ACPI_PTYPE2_COUNT:
+
+				/* First element is the (Integer) count of elements to follow */
+
+				status =
+				    acpi_ns_check_object_type(pathname,
+							      sub_elements,
+							      ACPI_RTYPE_INTEGER,
+							      0);
+				if (ACPI_FAILURE(status)) {
+					return (status);
+				}
+
+				/* Make sure package is large enough for the Count */
+
+				expected_count =
+				    (u32) (*sub_elements)->integer.value;
+				if (sub_package->package.count < expected_count) {
+					count = sub_package->package.count;
+					goto package_too_small;
+				}
+
+				/* Check the type of each sub-package element */
+
+				status =
+				    acpi_ns_check_package_elements(pathname,
+								   (sub_elements
+								    + 1),
+								   package->
+								   ret_info.
+								   object_type1,
+								   (expected_count
+								    - 1), 0, 0);
+				if (ACPI_FAILURE(status)) {
+					return (status);
+				}
+				break;
+
+			default:
+				break;
+			}
+
+			elements++;
+		}
+		break;
+
+	default:
+
+		/* Should not get here if predefined info table is correct */
+
+		ACPI_WARNING((AE_INFO,
+			      "%s: Invalid internal return type in table entry: %X",
+			      pathname, package->ret_info.type));
+
+		return (AE_AML_INTERNAL);
+	}
+
+	return (AE_OK);
+
+      package_too_small:
+
+	/* Error exit for the case with an incorrect package count */
+
+	ACPI_WARNING((AE_INFO, "%s: Return Package is too small - "
+		      "found %u, expected %u", pathname, count,
+		      expected_count));
+
+	return (AE_AML_OPERAND_VALUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_check_package_elements
+ *
+ * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
+ *              Elements        - Pointer to the package elements array
+ *              Type1           - Object type for first group
+ *              Count1          - Count for first group
+ *              Type2           - Object type for second group
+ *              Count2          - Count for second group
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Check that all elements of a package are of the correct object
+ *              type. Supports up to two groups of different object types.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_package_elements(char *pathname,
+			       union acpi_operand_object **elements,
+			       u8 type1, u32 count1, u8 type2, u32 count2)
+{
+	union acpi_operand_object **this_element = elements;
+	acpi_status status;
+	u32 i;
+
+	/*
+	 * Up to two groups of package elements are supported by the data
+	 * structure. All elements in each group must be of the same type.
+	 * The second group can have a count of zero.
+	 */
+	for (i = 0; i < count1; i++) {
+		status = acpi_ns_check_object_type(pathname, this_element,
+						   type1, i);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+		this_element++;
+	}
+
+	for (i = 0; i < count2; i++) {
+		status = acpi_ns_check_object_type(pathname, this_element,
+						   type2, (i + count1));
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+		this_element++;
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_check_object_type
+ *
+ * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
+ *              return_object_ptr - Pointer to the object returned from the
+ *                                evaluation of a method or object
+ *              expected_btypes - Bitmap of expected return type(s)
+ *              package_index   - Index of object within parent package (if
+ *                                applicable - ACPI_NOT_PACKAGE otherwise)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Check the type of the return object against the expected object
+ *              type(s). Use of Btype allows multiple expected object types.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_object_type(char *pathname,
+			  union acpi_operand_object **return_object_ptr,
+			  u32 expected_btypes, u32 package_index)
+{
+	union acpi_operand_object *return_object = *return_object_ptr;
+	acpi_status status = AE_OK;
+	u32 return_btype;
+	char type_buffer[48];	/* Room for 5 types */
+	u32 this_rtype;
+	u32 i;
+	u32 j;
+
+	/*
+	 * If we get a NULL return_object here, it is a NULL package element,
+	 * and this is always an error.
+	 */
+	if (!return_object) {
+		goto type_error_exit;
+	}
+
+	/* A Namespace node should not get here, but make sure */
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) {
+		ACPI_WARNING((AE_INFO,
+			      "%s: Invalid return type - Found a Namespace node [%4.4s] type %s",
+			      pathname, return_object->node.name.ascii,
+			      acpi_ut_get_type_name(return_object->node.type)));
+		return (AE_AML_OPERAND_TYPE);
+	}
+
+	/*
+	 * Convert the object type (ACPI_TYPE_xxx) to a bitmapped object type.
+	 * The bitmapped type allows multiple possible return types.
+	 *
+	 * Note, the cases below must handle all of the possible types returned
+	 * from all of the predefined names (including elements of returned
+	 * packages)
+	 */
+	switch (ACPI_GET_OBJECT_TYPE(return_object)) {
+	case ACPI_TYPE_INTEGER:
+		return_btype = ACPI_RTYPE_INTEGER;
+		break;
+
+	case ACPI_TYPE_BUFFER:
+		return_btype = ACPI_RTYPE_BUFFER;
+		break;
+
+	case ACPI_TYPE_STRING:
+		return_btype = ACPI_RTYPE_STRING;
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+		return_btype = ACPI_RTYPE_PACKAGE;
+		break;
+
+	case ACPI_TYPE_LOCAL_REFERENCE:
+		return_btype = ACPI_RTYPE_REFERENCE;
+		break;
+
+	default:
+		/* Not one of the supported objects, must be incorrect */
+
+		goto type_error_exit;
+	}
+
+	/* Is the object one of the expected types? */
+
+	if (!(return_btype & expected_btypes)) {
+
+		/* Type mismatch -- attempt repair of the returned object */
+
+		status = acpi_ns_repair_object(expected_btypes, package_index,
+					       return_object_ptr);
+		if (ACPI_SUCCESS(status)) {
+			return (status);
+		}
+		goto type_error_exit;
+	}
+
+	/* For reference objects, check that the reference type is correct */
+
+	if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_LOCAL_REFERENCE) {
+		status = acpi_ns_check_reference(pathname, return_object);
+	}
+
+	return (status);
+
+      type_error_exit:
+
+	/* Create a string with all expected types for this predefined object */
+
+	j = 1;
+	type_buffer[0] = 0;
+	this_rtype = ACPI_RTYPE_INTEGER;
+
+	for (i = 0; i < ACPI_NUM_RTYPES; i++) {
+
+		/* If one of the expected types, concatenate the name of this type */
+
+		if (expected_btypes & this_rtype) {
+			ACPI_STRCAT(type_buffer, &acpi_rtype_names[i][j]);
+			j = 0;	/* Use name separator from now on */
+		}
+		this_rtype <<= 1;	/* Next Rtype */
+	}
+
+	if (package_index == ACPI_NOT_PACKAGE) {
+		ACPI_WARNING((AE_INFO,
+			      "%s: Return type mismatch - found %s, expected %s",
+			      pathname,
+			      acpi_ut_get_object_type_name(return_object),
+			      type_buffer));
+	} else {
+		ACPI_WARNING((AE_INFO,
+			      "%s: Return Package type mismatch at index %u - "
+			      "found %s, expected %s", pathname, package_index,
+			      acpi_ut_get_object_type_name(return_object),
+			      type_buffer));
+	}
+
+	return (AE_AML_OPERAND_TYPE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_check_reference
+ *
+ * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
+ *              return_object   - Object returned from the evaluation of a
+ *                                method or object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Check a returned reference object for the correct reference
+ *              type. The only reference type that can be returned from a
+ *              predefined method is a named reference. All others are invalid.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_reference(char *pathname,
+			union acpi_operand_object *return_object)
+{
+
+	/*
+	 * Check the reference object for the correct reference type (opcode).
+	 * The only type of reference that can be converted to an union acpi_object is
+	 * a reference to a named object (reference class: NAME)
+	 */
+	if (return_object->reference.class == ACPI_REFCLASS_NAME) {
+		return (AE_OK);
+	}
+
+	ACPI_WARNING((AE_INFO,
+		      "%s: Return type mismatch - unexpected reference object type [%s] %2.2X",
+		      pathname, acpi_ut_get_reference_name(return_object),
+		      return_object->reference.class));
+
+	return (AE_AML_OPERAND_TYPE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_repair_object
+ *
+ * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
+ *              package_index   - Used to determine if target is in a package
+ *              return_object_ptr - Pointer to the object returned from the
+ *                                evaluation of a method or object
+ *
+ * RETURN:      Status. AE_OK if repair was successful.
+ *
+ * DESCRIPTION: Attempt to repair/convert a return object of a type that was
+ *              not expected.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_repair_object(u32 expected_btypes,
+		      u32 package_index,
+		      union acpi_operand_object **return_object_ptr)
+{
+	union acpi_operand_object *return_object = *return_object_ptr;
+	union acpi_operand_object *new_object;
+	acpi_size length;
+
+	switch (ACPI_GET_OBJECT_TYPE(return_object)) {
+	case ACPI_TYPE_BUFFER:
+
+		if (!(expected_btypes & ACPI_RTYPE_STRING)) {
+			return (AE_AML_OPERAND_TYPE);
+		}
+
+		/*
+		 * Have a Buffer, expected a String, convert. Use a to_string
+		 * conversion, no transform performed on the buffer data. The best
+		 * example of this is the _BIF method, where the string data from
+		 * the battery is often (incorrectly) returned as buffer object(s).
+		 */
+		length = 0;
+		while ((length < return_object->buffer.length) &&
+		       (return_object->buffer.pointer[length])) {
+			length++;
+		}
+
+		/* Allocate a new string object */
+
+		new_object = acpi_ut_create_string_object(length);
+		if (!new_object) {
+			return (AE_NO_MEMORY);
+		}
+
+		/*
+		 * Copy the raw buffer data with no transform. String is already NULL
+		 * terminated at Length+1.
+		 */
+		ACPI_MEMCPY(new_object->string.pointer,
+			    return_object->buffer.pointer, length);
+
+		/* Install the new return object */
+
+		acpi_ut_remove_reference(return_object);
+		*return_object_ptr = new_object;
+
+		/*
+		 * If the object is a package element, we need to:
+		 * 1. Decrement the reference count of the orignal object, it was
+		 *    incremented when building the package
+		 * 2. Increment the reference count of the new object, it will be
+		 *    decremented when releasing the package
+		 */
+		if (package_index != ACPI_NOT_PACKAGE) {
+			acpi_ut_remove_reference(return_object);
+			acpi_ut_add_reference(new_object);
+		}
+		return (AE_OK);
+
+	default:
+		break;
+	}
+
+	return (AE_AML_OPERAND_TYPE);
+}
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
new file mode 100644
index 0000000..6fea13f
--- /dev/null
+++ b/drivers/acpi/acpica/nssearch.c
@@ -0,0 +1,415 @@
+/*******************************************************************************
+ *
+ * Module Name: nssearch - Namespace search
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nssearch")
+
+/* Local prototypes */
+static acpi_status
+acpi_ns_search_parent_tree(u32 target_name,
+			   struct acpi_namespace_node *node,
+			   acpi_object_type type,
+			   struct acpi_namespace_node **return_node);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_search_one_scope
+ *
+ * PARAMETERS:  target_name     - Ascii ACPI name to search for
+ *              parent_node     - Starting node where search will begin
+ *              Type            - Object type to match
+ *              return_node     - Where the matched Named obj is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Search a single level of the namespace. Performs a
+ *              simple search of the specified level, and does not add
+ *              entries or search parents.
+ *
+ *
+ *      Named object lists are built (and subsequently dumped) in the
+ *      order in which the names are encountered during the namespace load;
+ *
+ *      All namespace searching is linear in this implementation, but
+ *      could be easily modified to support any improved search
+ *      algorithm. However, the linear search was chosen for simplicity
+ *      and because the trees are small and the other interpreter
+ *      execution overhead is relatively high.
+ *
+ *      Note: CPU execution analysis has shown that the AML interpreter spends
+ *      a very small percentage of its time searching the namespace. Therefore,
+ *      the linear search seems to be sufficient, as there would seem to be
+ *      little value in improving the search.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_search_one_scope(u32 target_name,
+			 struct acpi_namespace_node *parent_node,
+			 acpi_object_type type,
+			 struct acpi_namespace_node **return_node)
+{
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(ns_search_one_scope);
+
+#ifdef ACPI_DEBUG_OUTPUT
+	if (ACPI_LV_NAMES & acpi_dbg_level) {
+		char *scope_name;
+
+		scope_name = acpi_ns_get_external_pathname(parent_node);
+		if (scope_name) {
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "Searching %s (%p) For [%4.4s] (%s)\n",
+					  scope_name, parent_node,
+					  ACPI_CAST_PTR(char, &target_name),
+					  acpi_ut_get_type_name(type)));
+
+			ACPI_FREE(scope_name);
+		}
+	}
+#endif
+
+	/*
+	 * Search for name at this namespace level, which is to say that we
+	 * must search for the name among the children of this object
+	 */
+	node = parent_node->child;
+	while (node) {
+
+		/* Check for match against the name */
+
+		if (node->name.integer == target_name) {
+
+			/* Resolve a control method alias if any */
+
+			if (acpi_ns_get_type(node) ==
+			    ACPI_TYPE_LOCAL_METHOD_ALIAS) {
+				node =
+				    ACPI_CAST_PTR(struct acpi_namespace_node,
+						  node->object);
+			}
+
+			/* Found matching entry */
+
+			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+					  "Name [%4.4s] (%s) %p found in scope [%4.4s] %p\n",
+					  ACPI_CAST_PTR(char, &target_name),
+					  acpi_ut_get_type_name(node->type),
+					  node,
+					  acpi_ut_get_node_name(parent_node),
+					  parent_node));
+
+			*return_node = node;
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		/*
+		 * The last entry in the list points back to the parent,
+		 * so a flag is used to indicate the end-of-list
+		 */
+		if (node->flags & ANOBJ_END_OF_PEER_LIST) {
+
+			/* Searched entire list, we are done */
+
+			break;
+		}
+
+		/* Didn't match name, move on to the next peer object */
+
+		node = node->peer;
+	}
+
+	/* Searched entire namespace level, not found */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+			  "Name [%4.4s] (%s) not found in search in scope [%4.4s] %p first child %p\n",
+			  ACPI_CAST_PTR(char, &target_name),
+			  acpi_ut_get_type_name(type),
+			  acpi_ut_get_node_name(parent_node), parent_node,
+			  parent_node->child));
+
+	return_ACPI_STATUS(AE_NOT_FOUND);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_search_parent_tree
+ *
+ * PARAMETERS:  target_name     - Ascii ACPI name to search for
+ *              Node            - Starting node where search will begin
+ *              Type            - Object type to match
+ *              return_node     - Where the matched Node is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Called when a name has not been found in the current namespace
+ *              level. Before adding it or giving up, ACPI scope rules require
+ *              searching enclosing scopes in cases identified by acpi_ns_local().
+ *
+ *              "A name is located by finding the matching name in the current
+ *              name space, and then in the parent name space. If the parent
+ *              name space does not contain the name, the search continues
+ *              recursively until either the name is found or the name space
+ *              does not have a parent (the root of the name space). This
+ *              indicates that the name is not found" (From ACPI Specification,
+ *              section 5.3)
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_search_parent_tree(u32 target_name,
+			   struct acpi_namespace_node *node,
+			   acpi_object_type type,
+			   struct acpi_namespace_node **return_node)
+{
+	acpi_status status;
+	struct acpi_namespace_node *parent_node;
+
+	ACPI_FUNCTION_TRACE(ns_search_parent_tree);
+
+	parent_node = acpi_ns_get_parent_node(node);
+
+	/*
+	 * If there is no parent (i.e., we are at the root) or type is "local",
+	 * we won't be searching the parent tree.
+	 */
+	if (!parent_node) {
+		ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "[%4.4s] has no parent\n",
+				  ACPI_CAST_PTR(char, &target_name)));
+		return_ACPI_STATUS(AE_NOT_FOUND);
+	}
+
+	if (acpi_ns_local(type)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+				  "[%4.4s] type [%s] must be local to this scope (no parent search)\n",
+				  ACPI_CAST_PTR(char, &target_name),
+				  acpi_ut_get_type_name(type)));
+		return_ACPI_STATUS(AE_NOT_FOUND);
+	}
+
+	/* Search the parent tree */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+			  "Searching parent [%4.4s] for [%4.4s]\n",
+			  acpi_ut_get_node_name(parent_node),
+			  ACPI_CAST_PTR(char, &target_name)));
+
+	/*
+	 * Search parents until target is found or we have backed up to the root
+	 */
+	while (parent_node) {
+		/*
+		 * Search parent scope. Use TYPE_ANY because we don't care about the
+		 * object type at this point, we only care about the existence of
+		 * the actual name we are searching for. Typechecking comes later.
+		 */
+		status =
+		    acpi_ns_search_one_scope(target_name, parent_node,
+					     ACPI_TYPE_ANY, return_node);
+		if (ACPI_SUCCESS(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/* Not found here, go up another level (until we reach the root) */
+
+		parent_node = acpi_ns_get_parent_node(parent_node);
+	}
+
+	/* Not found in parent tree */
+
+	return_ACPI_STATUS(AE_NOT_FOUND);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_search_and_enter
+ *
+ * PARAMETERS:  target_name         - Ascii ACPI name to search for (4 chars)
+ *              walk_state          - Current state of the walk
+ *              Node                - Starting node where search will begin
+ *              interpreter_mode    - Add names only in ACPI_MODE_LOAD_PASS_x.
+ *                                    Otherwise,search only.
+ *              Type                - Object type to match
+ *              Flags               - Flags describing the search restrictions
+ *              return_node         - Where the Node is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Search for a name segment in a single namespace level,
+ *              optionally adding it if it is not found. If the passed
+ *              Type is not Any and the type previously stored in the
+ *              entry was Any (i.e. unknown), update the stored type.
+ *
+ *              In ACPI_IMODE_EXECUTE, search only.
+ *              In other modes, search and add if not found.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_search_and_enter(u32 target_name,
+			 struct acpi_walk_state *walk_state,
+			 struct acpi_namespace_node *node,
+			 acpi_interpreter_mode interpreter_mode,
+			 acpi_object_type type,
+			 u32 flags, struct acpi_namespace_node **return_node)
+{
+	acpi_status status;
+	struct acpi_namespace_node *new_node;
+
+	ACPI_FUNCTION_TRACE(ns_search_and_enter);
+
+	/* Parameter validation */
+
+	if (!node || !target_name || !return_node) {
+		ACPI_ERROR((AE_INFO,
+			    "Null parameter: Node %p Name %X ReturnNode %p",
+			    node, target_name, return_node));
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Name must consist of valid ACPI characters. We will repair the name if
+	 * necessary because we don't want to abort because of this, but we want
+	 * all namespace names to be printable. A warning message is appropriate.
+	 *
+	 * This issue came up because there are in fact machines that exhibit
+	 * this problem, and we want to be able to enable ACPI support for them,
+	 * even though there are a few bad names.
+	 */
+	if (!acpi_ut_valid_acpi_name(target_name)) {
+		target_name =
+		    acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name));
+
+		/* Report warning only if in strict mode or debug mode */
+
+		if (!acpi_gbl_enable_interpreter_slack) {
+			ACPI_WARNING((AE_INFO,
+				      "Found bad character(s) in name, repaired: [%4.4s]\n",
+				      ACPI_CAST_PTR(char, &target_name)));
+		} else {
+			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+					  "Found bad character(s) in name, repaired: [%4.4s]\n",
+					  ACPI_CAST_PTR(char, &target_name)));
+		}
+	}
+
+	/* Try to find the name in the namespace level specified by the caller */
+
+	*return_node = ACPI_ENTRY_NOT_FOUND;
+	status = acpi_ns_search_one_scope(target_name, node, type, return_node);
+	if (status != AE_NOT_FOUND) {
+		/*
+		 * If we found it AND the request specifies that a find is an error,
+		 * return the error
+		 */
+		if ((status == AE_OK) && (flags & ACPI_NS_ERROR_IF_FOUND)) {
+			status = AE_ALREADY_EXISTS;
+		}
+
+		/* Either found it or there was an error: finished either way */
+
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * The name was not found. If we are NOT performing the first pass
+	 * (name entry) of loading the namespace, search the parent tree (all the
+	 * way to the root if necessary.) We don't want to perform the parent
+	 * search when the namespace is actually being loaded. We want to perform
+	 * the search when namespace references are being resolved (load pass 2)
+	 * and during the execution phase.
+	 */
+	if ((interpreter_mode != ACPI_IMODE_LOAD_PASS1) &&
+	    (flags & ACPI_NS_SEARCH_PARENT)) {
+		/*
+		 * Not found at this level - search parent tree according to the
+		 * ACPI specification
+		 */
+		status =
+		    acpi_ns_search_parent_tree(target_name, node, type,
+					       return_node);
+		if (ACPI_SUCCESS(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/* In execute mode, just search, never add names. Exit now */
+
+	if (interpreter_mode == ACPI_IMODE_EXECUTE) {
+		ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+				  "%4.4s Not found in %p [Not adding]\n",
+				  ACPI_CAST_PTR(char, &target_name), node));
+
+		return_ACPI_STATUS(AE_NOT_FOUND);
+	}
+
+	/* Create the new named object */
+
+	new_node = acpi_ns_create_node(target_name);
+	if (!new_node) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+#ifdef ACPI_ASL_COMPILER
+	/*
+	 * Node is an object defined by an External() statement
+	 */
+	if (flags & ACPI_NS_EXTERNAL) {
+		new_node->flags |= ANOBJ_IS_EXTERNAL;
+	}
+#endif
+
+	if (flags & ACPI_NS_TEMPORARY) {
+		new_node->flags |= ANOBJ_TEMPORARY;
+	}
+
+	/* Install the new object into the parent's list of children */
+
+	acpi_ns_install_node(walk_state, node, new_node, type);
+	*return_node = new_node;
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
new file mode 100644
index 0000000..3e1149b
--- /dev/null
+++ b/drivers/acpi/acpica/nsutils.c
@@ -0,0 +1,997 @@
+/******************************************************************************
+ *
+ * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
+ *                        parents and siblings and Scope manipulation
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "amlcode.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsutils")
+
+/* Local prototypes */
+static u8 acpi_ns_valid_path_separator(char sep);
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_report_error
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              internal_name       - Name or path of the namespace node
+ *              lookup_status       - Exception code from NS lookup
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print warning message with full pathname
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_report_error(const char *module_name,
+		     u32 line_number,
+		     const char *internal_name, acpi_status lookup_status)
+{
+	acpi_status status;
+	u32 bad_name;
+	char *name = NULL;
+
+	acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
+
+	if (lookup_status == AE_BAD_CHARACTER) {
+
+		/* There is a non-ascii character in the name */
+
+		ACPI_MOVE_32_TO_32(&bad_name, internal_name);
+		acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
+	} else {
+		/* Convert path to external format */
+
+		status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
+						  internal_name, NULL, &name);
+
+		/* Print target name */
+
+		if (ACPI_SUCCESS(status)) {
+			acpi_os_printf("[%s]", name);
+		} else {
+			acpi_os_printf("[COULD NOT EXTERNALIZE NAME]");
+		}
+
+		if (name) {
+			ACPI_FREE(name);
+		}
+	}
+
+	acpi_os_printf(" Namespace lookup failure, %s\n",
+		       acpi_format_exception(lookup_status));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_report_method_error
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              Message             - Error message to use on failure
+ *              prefix_node         - Prefix relative to the path
+ *              Path                - Path to the node (optional)
+ *              method_status       - Execution status
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print warning message with full pathname
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_report_method_error(const char *module_name,
+			    u32 line_number,
+			    const char *message,
+			    struct acpi_namespace_node *prefix_node,
+			    const char *path, acpi_status method_status)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node = prefix_node;
+
+	acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
+
+	if (path) {
+		status =
+		    acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
+				     &node);
+		if (ACPI_FAILURE(status)) {
+			acpi_os_printf("[Could not get node by pathname]");
+		}
+	}
+
+	acpi_ns_print_node_pathname(node, message);
+	acpi_os_printf(", %s\n", acpi_format_exception(method_status));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_print_node_pathname
+ *
+ * PARAMETERS:  Node            - Object
+ *              Message         - Prefix message
+ *
+ * DESCRIPTION: Print an object's full namespace pathname
+ *              Manages allocation/freeing of a pathname buffer
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
+			    const char *message)
+{
+	struct acpi_buffer buffer;
+	acpi_status status;
+
+	if (!node) {
+		acpi_os_printf("[NULL NAME]");
+		return;
+	}
+
+	/* Convert handle to full pathname and print it (with supplied message) */
+
+	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+
+	status = acpi_ns_handle_to_pathname(node, &buffer);
+	if (ACPI_SUCCESS(status)) {
+		if (message) {
+			acpi_os_printf("%s ", message);
+		}
+
+		acpi_os_printf("[%s] (Node %p)", (char *)buffer.pointer, node);
+		ACPI_FREE(buffer.pointer);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_valid_root_prefix
+ *
+ * PARAMETERS:  Prefix          - Character to be checked
+ *
+ * RETURN:      TRUE if a valid prefix
+ *
+ * DESCRIPTION: Check if a character is a valid ACPI Root prefix
+ *
+ ******************************************************************************/
+
+u8 acpi_ns_valid_root_prefix(char prefix)
+{
+
+	return ((u8) (prefix == '\\'));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_valid_path_separator
+ *
+ * PARAMETERS:  Sep         - Character to be checked
+ *
+ * RETURN:      TRUE if a valid path separator
+ *
+ * DESCRIPTION: Check if a character is a valid ACPI path separator
+ *
+ ******************************************************************************/
+
+static u8 acpi_ns_valid_path_separator(char sep)
+{
+
+	return ((u8) (sep == '.'));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_type
+ *
+ * PARAMETERS:  Node        - Parent Node to be examined
+ *
+ * RETURN:      Type field from Node whose handle is passed
+ *
+ * DESCRIPTION: Return the type of a Namespace node
+ *
+ ******************************************************************************/
+
+acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
+{
+	ACPI_FUNCTION_TRACE(ns_get_type);
+
+	if (!node) {
+		ACPI_WARNING((AE_INFO, "Null Node parameter"));
+		return_UINT32(ACPI_TYPE_ANY);
+	}
+
+	return_UINT32((acpi_object_type) node->type);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_local
+ *
+ * PARAMETERS:  Type        - A namespace object type
+ *
+ * RETURN:      LOCAL if names must be found locally in objects of the
+ *              passed type, 0 if enclosing scopes should be searched
+ *
+ * DESCRIPTION: Returns scope rule for the given object type.
+ *
+ ******************************************************************************/
+
+u32 acpi_ns_local(acpi_object_type type)
+{
+	ACPI_FUNCTION_TRACE(ns_local);
+
+	if (!acpi_ut_valid_object_type(type)) {
+
+		/* Type code out of range  */
+
+		ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+		return_UINT32(ACPI_NS_NORMAL);
+	}
+
+	return_UINT32((u32) acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_internal_name_length
+ *
+ * PARAMETERS:  Info            - Info struct initialized with the
+ *                                external name pointer.
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Calculate the length of the internal (AML) namestring
+ *              corresponding to the external (ASL) namestring.
+ *
+ ******************************************************************************/
+
+void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
+{
+	const char *next_external_char;
+	u32 i;
+
+	ACPI_FUNCTION_ENTRY();
+
+	next_external_char = info->external_name;
+	info->num_carats = 0;
+	info->num_segments = 0;
+	info->fully_qualified = FALSE;
+
+	/*
+	 * For the internal name, the required length is 4 bytes per segment, plus
+	 * 1 each for root_prefix, multi_name_prefix_op, segment count, trailing null
+	 * (which is not really needed, but no there's harm in putting it there)
+	 *
+	 * strlen() + 1 covers the first name_seg, which has no path separator
+	 */
+	if (acpi_ns_valid_root_prefix(*next_external_char)) {
+		info->fully_qualified = TRUE;
+		next_external_char++;
+
+		/* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */
+
+		while (acpi_ns_valid_root_prefix(*next_external_char)) {
+			next_external_char++;
+		}
+	} else {
+		/*
+		 * Handle Carat prefixes
+		 */
+		while (*next_external_char == '^') {
+			info->num_carats++;
+			next_external_char++;
+		}
+	}
+
+	/*
+	 * Determine the number of ACPI name "segments" by counting the number of
+	 * path separators within the string. Start with one segment since the
+	 * segment count is [(# separators) + 1], and zero separators is ok.
+	 */
+	if (*next_external_char) {
+		info->num_segments = 1;
+		for (i = 0; next_external_char[i]; i++) {
+			if (acpi_ns_valid_path_separator(next_external_char[i])) {
+				info->num_segments++;
+			}
+		}
+	}
+
+	info->length = (ACPI_NAME_SIZE * info->num_segments) +
+	    4 + info->num_carats;
+
+	info->next_external_char = next_external_char;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_build_internal_name
+ *
+ * PARAMETERS:  Info            - Info struct fully initialized
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Construct the internal (AML) namestring
+ *              corresponding to the external (ASL) namestring.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
+{
+	u32 num_segments = info->num_segments;
+	char *internal_name = info->internal_name;
+	const char *external_name = info->next_external_char;
+	char *result = NULL;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ns_build_internal_name);
+
+	/* Setup the correct prefixes, counts, and pointers */
+
+	if (info->fully_qualified) {
+		internal_name[0] = '\\';
+
+		if (num_segments <= 1) {
+			result = &internal_name[1];
+		} else if (num_segments == 2) {
+			internal_name[1] = AML_DUAL_NAME_PREFIX;
+			result = &internal_name[2];
+		} else {
+			internal_name[1] = AML_MULTI_NAME_PREFIX_OP;
+			internal_name[2] = (char)num_segments;
+			result = &internal_name[3];
+		}
+	} else {
+		/*
+		 * Not fully qualified.
+		 * Handle Carats first, then append the name segments
+		 */
+		i = 0;
+		if (info->num_carats) {
+			for (i = 0; i < info->num_carats; i++) {
+				internal_name[i] = '^';
+			}
+		}
+
+		if (num_segments <= 1) {
+			result = &internal_name[i];
+		} else if (num_segments == 2) {
+			internal_name[i] = AML_DUAL_NAME_PREFIX;
+			result = &internal_name[(acpi_size) i + 1];
+		} else {
+			internal_name[i] = AML_MULTI_NAME_PREFIX_OP;
+			internal_name[(acpi_size) i + 1] = (char)num_segments;
+			result = &internal_name[(acpi_size) i + 2];
+		}
+	}
+
+	/* Build the name (minus path separators) */
+
+	for (; num_segments; num_segments--) {
+		for (i = 0; i < ACPI_NAME_SIZE; i++) {
+			if (acpi_ns_valid_path_separator(*external_name) ||
+			    (*external_name == 0)) {
+
+				/* Pad the segment with underscore(s) if segment is short */
+
+				result[i] = '_';
+			} else {
+				/* Convert the character to uppercase and save it */
+
+				result[i] =
+				    (char)ACPI_TOUPPER((int)*external_name);
+				external_name++;
+			}
+		}
+
+		/* Now we must have a path separator, or the pathname is bad */
+
+		if (!acpi_ns_valid_path_separator(*external_name) &&
+		    (*external_name != 0)) {
+			return_ACPI_STATUS(AE_BAD_PARAMETER);
+		}
+
+		/* Move on the next segment */
+
+		external_name++;
+		result += ACPI_NAME_SIZE;
+	}
+
+	/* Terminate the string */
+
+	*result = 0;
+
+	if (info->fully_qualified) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Returning [%p] (abs) \"\\%s\"\n",
+				  internal_name, internal_name));
+	} else {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Returning [%p] (rel) \"%s\"\n",
+				  internal_name, internal_name));
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_internalize_name
+ *
+ * PARAMETERS:  *external_name          - External representation of name
+ *              **Converted Name        - Where to return the resulting
+ *                                        internal represention of the name
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert an external representation (e.g. "\_PR_.CPU0")
+ *              to internal form (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30)
+ *
+ *******************************************************************************/
+
+acpi_status
+acpi_ns_internalize_name(const char *external_name, char **converted_name)
+{
+	char *internal_name;
+	struct acpi_namestring_info info;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ns_internalize_name);
+
+	if ((!external_name) || (*external_name == 0) || (!converted_name)) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Get the length of the new internal name */
+
+	info.external_name = external_name;
+	acpi_ns_get_internal_name_length(&info);
+
+	/* We need a segment to store the internal  name */
+
+	internal_name = ACPI_ALLOCATE_ZEROED(info.length);
+	if (!internal_name) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Build the name */
+
+	info.internal_name = internal_name;
+	status = acpi_ns_build_internal_name(&info);
+	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(internal_name);
+		return_ACPI_STATUS(status);
+	}
+
+	*converted_name = internal_name;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_externalize_name
+ *
+ * PARAMETERS:  internal_name_length - Lenth of the internal name below
+ *              internal_name       - Internal representation of name
+ *              converted_name_length - Where the length is returned
+ *              converted_name      - Where the resulting external name
+ *                                    is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert internal name (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30)
+ *              to its external (printable) form (e.g. "\_PR_.CPU0")
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_externalize_name(u32 internal_name_length,
+			 const char *internal_name,
+			 u32 * converted_name_length, char **converted_name)
+{
+	u32 names_index = 0;
+	u32 num_segments = 0;
+	u32 required_length;
+	u32 prefix_length = 0;
+	u32 i = 0;
+	u32 j = 0;
+
+	ACPI_FUNCTION_TRACE(ns_externalize_name);
+
+	if (!internal_name_length || !internal_name || !converted_name) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Check for a prefix (one '\' | one or more '^').
+	 */
+	switch (internal_name[0]) {
+	case '\\':
+		prefix_length = 1;
+		break;
+
+	case '^':
+		for (i = 0; i < internal_name_length; i++) {
+			if (internal_name[i] == '^') {
+				prefix_length = i + 1;
+			} else {
+				break;
+			}
+		}
+
+		if (i == internal_name_length) {
+			prefix_length = i;
+		}
+
+		break;
+
+	default:
+		break;
+	}
+
+	/*
+	 * Check for object names.  Note that there could be 0-255 of these
+	 * 4-byte elements.
+	 */
+	if (prefix_length < internal_name_length) {
+		switch (internal_name[prefix_length]) {
+		case AML_MULTI_NAME_PREFIX_OP:
+
+			/* <count> 4-byte names */
+
+			names_index = prefix_length + 2;
+			num_segments = (u8)
+			    internal_name[(acpi_size) prefix_length + 1];
+			break;
+
+		case AML_DUAL_NAME_PREFIX:
+
+			/* Two 4-byte names */
+
+			names_index = prefix_length + 1;
+			num_segments = 2;
+			break;
+
+		case 0:
+
+			/* null_name */
+
+			names_index = 0;
+			num_segments = 0;
+			break;
+
+		default:
+
+			/* one 4-byte name */
+
+			names_index = prefix_length;
+			num_segments = 1;
+			break;
+		}
+	}
+
+	/*
+	 * Calculate the length of converted_name, which equals the length
+	 * of the prefix, length of all object names, length of any required
+	 * punctuation ('.') between object names, plus the NULL terminator.
+	 */
+	required_length = prefix_length + (4 * num_segments) +
+	    ((num_segments > 0) ? (num_segments - 1) : 0) + 1;
+
+	/*
+	 * Check to see if we're still in bounds.  If not, there's a problem
+	 * with internal_name (invalid format).
+	 */
+	if (required_length > internal_name_length) {
+		ACPI_ERROR((AE_INFO, "Invalid internal name"));
+		return_ACPI_STATUS(AE_BAD_PATHNAME);
+	}
+
+	/*
+	 * Build converted_name
+	 */
+	*converted_name = ACPI_ALLOCATE_ZEROED(required_length);
+	if (!(*converted_name)) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	j = 0;
+
+	for (i = 0; i < prefix_length; i++) {
+		(*converted_name)[j++] = internal_name[i];
+	}
+
+	if (num_segments > 0) {
+		for (i = 0; i < num_segments; i++) {
+			if (i > 0) {
+				(*converted_name)[j++] = '.';
+			}
+
+			(*converted_name)[j++] = internal_name[names_index++];
+			(*converted_name)[j++] = internal_name[names_index++];
+			(*converted_name)[j++] = internal_name[names_index++];
+			(*converted_name)[j++] = internal_name[names_index++];
+		}
+	}
+
+	if (converted_name_length) {
+		*converted_name_length = (u32) required_length;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_map_handle_to_node
+ *
+ * PARAMETERS:  Handle          - Handle to be converted to an Node
+ *
+ * RETURN:      A Name table entry pointer
+ *
+ * DESCRIPTION: Convert a namespace handle to a real Node
+ *
+ * Note: Real integer handles would allow for more verification
+ *       and keep all pointers within this subsystem - however this introduces
+ *       more (and perhaps unnecessary) overhead.
+ *
+ ******************************************************************************/
+
+struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
+{
+
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * Simple implementation
+	 */
+	if ((!handle) || (handle == ACPI_ROOT_OBJECT)) {
+		return (acpi_gbl_root_node);
+	}
+
+	/* We can at least attempt to verify the handle */
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(handle) != ACPI_DESC_TYPE_NAMED) {
+		return (NULL);
+	}
+
+	return (ACPI_CAST_PTR(struct acpi_namespace_node, handle));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_convert_entry_to_handle
+ *
+ * PARAMETERS:  Node          - Node to be converted to a Handle
+ *
+ * RETURN:      A user handle
+ *
+ * DESCRIPTION: Convert a real Node to a namespace handle
+ *
+ ******************************************************************************/
+
+acpi_handle acpi_ns_convert_entry_to_handle(struct acpi_namespace_node *node)
+{
+
+	/*
+	 * Simple implementation for now;
+	 */
+	return ((acpi_handle) node);
+
+/* Example future implementation ---------------------
+
+	if (!Node)
+	{
+		return (NULL);
+	}
+
+	if (Node == acpi_gbl_root_node)
+	{
+		return (ACPI_ROOT_OBJECT);
+	}
+
+	return ((acpi_handle) Node);
+------------------------------------------------------*/
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_terminate
+ *
+ * PARAMETERS:  none
+ *
+ * RETURN:      none
+ *
+ * DESCRIPTION: free memory allocated for namespace and ACPI table storage.
+ *
+ ******************************************************************************/
+
+void acpi_ns_terminate(void)
+{
+	union acpi_operand_object *obj_desc;
+
+	ACPI_FUNCTION_TRACE(ns_terminate);
+
+	/*
+	 * 1) Free the entire namespace -- all nodes and objects
+	 *
+	 * Delete all object descriptors attached to namepsace nodes
+	 */
+	acpi_ns_delete_namespace_subtree(acpi_gbl_root_node);
+
+	/* Detach any objects attached to the root */
+
+	obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node);
+	if (obj_desc) {
+		acpi_ns_detach_object(acpi_gbl_root_node);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_opens_scope
+ *
+ * PARAMETERS:  Type        - A valid namespace type
+ *
+ * RETURN:      NEWSCOPE if the passed type "opens a name scope" according
+ *              to the ACPI specification, else 0
+ *
+ ******************************************************************************/
+
+u32 acpi_ns_opens_scope(acpi_object_type type)
+{
+	ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type));
+
+	if (!acpi_ut_valid_object_type(type)) {
+
+		/* type code out of range  */
+
+		ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+		return_UINT32(ACPI_NS_NORMAL);
+	}
+
+	return_UINT32(((u32) acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_node
+ *
+ * PARAMETERS:  *Pathname   - Name to be found, in external (ASL) format. The
+ *                            \ (backslash) and ^ (carat) prefixes, and the
+ *                            . (period) to separate segments are supported.
+ *              prefix_node  - Root of subtree to be searched, or NS_ALL for the
+ *                            root of the name space.  If Name is fully
+ *                            qualified (first s8 is '\'), the passed value
+ *                            of Scope will not be accessed.
+ *              Flags       - Used to indicate whether to perform upsearch or
+ *                            not.
+ *              return_node - Where the Node is returned
+ *
+ * DESCRIPTION: Look up a name relative to a given scope and return the
+ *              corresponding Node.  NOTE: Scope can be null.
+ *
+ * MUTEX:       Locks namespace
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
+		 const char *pathname,
+		 u32 flags, struct acpi_namespace_node **return_node)
+{
+	union acpi_generic_state scope_info;
+	acpi_status status;
+	char *internal_path;
+
+	ACPI_FUNCTION_TRACE_PTR(ns_get_node, pathname);
+
+	if (!pathname) {
+		*return_node = prefix_node;
+		if (!prefix_node) {
+			*return_node = acpi_gbl_root_node;
+		}
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Convert path to internal representation */
+
+	status = acpi_ns_internalize_name(pathname, &internal_path);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Must lock namespace during lookup */
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/* Setup lookup scope (search starting point) */
+
+	scope_info.scope.node = prefix_node;
+
+	/* Lookup the name in the namespace */
+
+	status = acpi_ns_lookup(&scope_info, internal_path, ACPI_TYPE_ANY,
+				ACPI_IMODE_EXECUTE,
+				(flags | ACPI_NS_DONT_OPEN_SCOPE), NULL,
+				return_node);
+	if (ACPI_FAILURE(status)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s, %s\n",
+				  pathname, acpi_format_exception(status)));
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+      cleanup:
+	ACPI_FREE(internal_path);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_parent_node
+ *
+ * PARAMETERS:  Node       - Current table entry
+ *
+ * RETURN:      Parent entry of the given entry
+ *
+ * DESCRIPTION: Obtain the parent entry for a given entry in the namespace.
+ *
+ ******************************************************************************/
+
+struct acpi_namespace_node *acpi_ns_get_parent_node(struct acpi_namespace_node
+						    *node)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	if (!node) {
+		return (NULL);
+	}
+
+	/*
+	 * Walk to the end of this peer list. The last entry is marked with a flag
+	 * and the peer pointer is really a pointer back to the parent. This saves
+	 * putting a parent back pointer in each and every named object!
+	 */
+	while (!(node->flags & ANOBJ_END_OF_PEER_LIST)) {
+		node = node->peer;
+	}
+
+	return (node->peer);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_next_valid_node
+ *
+ * PARAMETERS:  Node       - Current table entry
+ *
+ * RETURN:      Next valid Node in the linked node list. NULL if no more valid
+ *              nodes.
+ *
+ * DESCRIPTION: Find the next valid node within a name table.
+ *              Useful for implementing NULL-end-of-list loops.
+ *
+ ******************************************************************************/
+
+struct acpi_namespace_node *acpi_ns_get_next_valid_node(struct
+							acpi_namespace_node
+							*node)
+{
+
+	/* If we are at the end of this peer list, return NULL */
+
+	if (node->flags & ANOBJ_END_OF_PEER_LIST) {
+		return NULL;
+	}
+
+	/* Otherwise just return the next peer */
+
+	return (node->peer);
+}
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_find_parent_name
+ *
+ * PARAMETERS:  *child_node            - Named Obj whose name is to be found
+ *
+ * RETURN:      The ACPI name
+ *
+ * DESCRIPTION: Search for the given obj in its parent scope and return the
+ *              name segment, or "????" if the parent name can't be found
+ *              (which "should not happen").
+ *
+ ******************************************************************************/
+
+acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node * child_node)
+{
+	struct acpi_namespace_node *parent_node;
+
+	ACPI_FUNCTION_TRACE(ns_find_parent_name);
+
+	if (child_node) {
+
+		/* Valid entry.  Get the parent Node */
+
+		parent_node = acpi_ns_get_parent_node(child_node);
+		if (parent_node) {
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+					  "Parent of %p [%4.4s] is %p [%4.4s]\n",
+					  child_node,
+					  acpi_ut_get_node_name(child_node),
+					  parent_node,
+					  acpi_ut_get_node_name(parent_node)));
+
+			if (parent_node->name.integer) {
+				return_VALUE((acpi_name) parent_node->name.
+					     integer);
+			}
+		}
+
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "Unable to find parent of %p (%4.4s)\n",
+				  child_node,
+				  acpi_ut_get_node_name(child_node)));
+	}
+
+	return_VALUE(ACPI_UNKNOWN_NAME);
+}
+#endif
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
new file mode 100644
index 0000000..200895f
--- /dev/null
+++ b/drivers/acpi/acpica/nswalk.c
@@ -0,0 +1,296 @@
+/******************************************************************************
+ *
+ * Module Name: nswalk - Functions for walking the ACPI namespace
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nswalk")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_next_node
+ *
+ * PARAMETERS:  Type                - Type of node to be searched for
+ *              parent_node         - Parent node whose children we are
+ *                                    getting
+ *              child_node          - Previous child that was found.
+ *                                    The NEXT child will be returned
+ *
+ * RETURN:      struct acpi_namespace_node - Pointer to the NEXT child or NULL if
+ *                                    none is found.
+ *
+ * DESCRIPTION: Return the next peer node within the namespace.  If Handle
+ *              is valid, Scope is ignored.  Otherwise, the first node
+ *              within Scope is returned.
+ *
+ ******************************************************************************/
+struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
+						  *parent_node, struct acpi_namespace_node
+						  *child_node)
+{
+	struct acpi_namespace_node *next_node = NULL;
+
+	ACPI_FUNCTION_ENTRY();
+
+	if (!child_node) {
+
+		/* It's really the parent's _scope_ that we want */
+
+		next_node = parent_node->child;
+	}
+
+	else {
+		/* Start search at the NEXT node */
+
+		next_node = acpi_ns_get_next_valid_node(child_node);
+	}
+
+	/* If any type is OK, we are done */
+
+	if (type == ACPI_TYPE_ANY) {
+
+		/* next_node is NULL if we are at the end-of-list */
+
+		return (next_node);
+	}
+
+	/* Must search for the node -- but within this scope only */
+
+	while (next_node) {
+
+		/* If type matches, we are done */
+
+		if (next_node->type == type) {
+			return (next_node);
+		}
+
+		/* Otherwise, move on to the next node */
+
+		next_node = acpi_ns_get_next_valid_node(next_node);
+	}
+
+	/* Not found */
+
+	return (NULL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_walk_namespace
+ *
+ * PARAMETERS:  Type                - acpi_object_type to search for
+ *              start_node          - Handle in namespace where search begins
+ *              max_depth           - Depth to which search is to reach
+ *              Flags               - Whether to unlock the NS before invoking
+ *                                    the callback routine
+ *              user_function       - Called when an object of "Type" is found
+ *              Context             - Passed to user function
+ *              return_value        - from the user_function if terminated early.
+ *                                    Otherwise, returns NULL.
+ * RETURNS:     Status
+ *
+ * DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
+ *              starting (and ending) at the node specified by start_handle.
+ *              The user_function is called whenever a node that matches
+ *              the type parameter is found.  If the user function returns
+ *              a non-zero value, the search is terminated immediately and this
+ *              value is returned to the caller.
+ *
+ *              The point of this procedure is to provide a generic namespace
+ *              walk routine that can be called from multiple places to
+ *              provide multiple services;  the User Function can be tailored
+ *              to each task, whether it is a print function, a compare
+ *              function, etc.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_walk_namespace(acpi_object_type type,
+		       acpi_handle start_node,
+		       u32 max_depth,
+		       u32 flags,
+		       acpi_walk_callback user_function,
+		       void *context, void **return_value)
+{
+	acpi_status status;
+	acpi_status mutex_status;
+	struct acpi_namespace_node *child_node;
+	struct acpi_namespace_node *parent_node;
+	acpi_object_type child_type;
+	u32 level;
+
+	ACPI_FUNCTION_TRACE(ns_walk_namespace);
+
+	/* Special case for the namespace Root Node */
+
+	if (start_node == ACPI_ROOT_OBJECT) {
+		start_node = acpi_gbl_root_node;
+	}
+
+	/* Null child means "get first node" */
+
+	parent_node = start_node;
+	child_node = NULL;
+	child_type = ACPI_TYPE_ANY;
+	level = 1;
+
+	/*
+	 * Traverse the tree of nodes until we bubble back up to where we
+	 * started. When Level is zero, the loop is done because we have
+	 * bubbled up to (and passed) the original parent handle (start_entry)
+	 */
+	while (level > 0) {
+
+		/* Get the next node in this scope.  Null if not found */
+
+		status = AE_OK;
+		child_node =
+		    acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
+					  child_node);
+		if (child_node) {
+
+			/* Found next child, get the type if we are not searching for ANY */
+
+			if (type != ACPI_TYPE_ANY) {
+				child_type = child_node->type;
+			}
+
+			/*
+			 * Ignore all temporary namespace nodes (created during control
+			 * method execution) unless told otherwise. These temporary nodes
+			 * can cause a race condition because they can be deleted during the
+			 * execution of the user function (if the namespace is unlocked before
+			 * invocation of the user function.) Only the debugger namespace dump
+			 * will examine the temporary nodes.
+			 */
+			if ((child_node->flags & ANOBJ_TEMPORARY) &&
+			    !(flags & ACPI_NS_WALK_TEMP_NODES)) {
+				status = AE_CTRL_DEPTH;
+			}
+
+			/* Type must match requested type */
+
+			else if (child_type == type) {
+				/*
+				 * Found a matching node, invoke the user callback function.
+				 * Unlock the namespace if flag is set.
+				 */
+				if (flags & ACPI_NS_WALK_UNLOCK) {
+					mutex_status =
+					    acpi_ut_release_mutex
+					    (ACPI_MTX_NAMESPACE);
+					if (ACPI_FAILURE(mutex_status)) {
+						return_ACPI_STATUS
+						    (mutex_status);
+					}
+				}
+
+				status =
+				    user_function(child_node, level, context,
+						  return_value);
+
+				if (flags & ACPI_NS_WALK_UNLOCK) {
+					mutex_status =
+					    acpi_ut_acquire_mutex
+					    (ACPI_MTX_NAMESPACE);
+					if (ACPI_FAILURE(mutex_status)) {
+						return_ACPI_STATUS
+						    (mutex_status);
+					}
+				}
+
+				switch (status) {
+				case AE_OK:
+				case AE_CTRL_DEPTH:
+
+					/* Just keep going */
+					break;
+
+				case AE_CTRL_TERMINATE:
+
+					/* Exit now, with OK status */
+
+					return_ACPI_STATUS(AE_OK);
+
+				default:
+
+					/* All others are valid exceptions */
+
+					return_ACPI_STATUS(status);
+				}
+			}
+
+			/*
+			 * Depth first search: Attempt to go down another level in the
+			 * namespace if we are allowed to.  Don't go any further if we have
+			 * reached the caller specified maximum depth or if the user
+			 * function has specified that the maximum depth has been reached.
+			 */
+			if ((level < max_depth) && (status != AE_CTRL_DEPTH)) {
+				if (acpi_ns_get_next_node
+				    (ACPI_TYPE_ANY, child_node, NULL)) {
+
+					/* There is at least one child of this node, visit it */
+
+					level++;
+					parent_node = child_node;
+					child_node = NULL;
+				}
+			}
+		} else {
+			/*
+			 * No more children of this node (acpi_ns_get_next_node failed), go
+			 * back upwards in the namespace tree to the node's parent.
+			 */
+			level--;
+			child_node = parent_node;
+			parent_node = acpi_ns_get_parent_node(parent_node);
+		}
+	}
+
+	/* Complete walk, not terminated by user function */
+
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
new file mode 100644
index 0000000..22a7171
--- /dev/null
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -0,0 +1,812 @@
+/*******************************************************************************
+ *
+ * Module Name: nsxfeval - Public interfaces to the ACPI subsystem
+ *                         ACPI Object evaluation interfaces
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsxfeval")
+
+/* Local prototypes */
+static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
+
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_evaluate_object_typed
+ *
+ * PARAMETERS:  Handle              - Object handle (optional)
+ *              Pathname            - Object pathname (optional)
+ *              external_params     - List of parameters to pass to method,
+ *                                    terminated by NULL.  May be NULL
+ *                                    if no parameters are being passed.
+ *              return_buffer       - Where to put method's return value (if
+ *                                    any).  If NULL, no value is returned.
+ *              return_type         - Expected type of return object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Find and evaluate the given object, passing the given
+ *              parameters if necessary.  One of "Handle" or "Pathname" must
+ *              be valid (non-null)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_evaluate_object_typed(acpi_handle handle,
+			   acpi_string pathname,
+			   struct acpi_object_list *external_params,
+			   struct acpi_buffer *return_buffer,
+			   acpi_object_type return_type)
+{
+	acpi_status status;
+	u8 must_free = FALSE;
+
+	ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed);
+
+	/* Return buffer must be valid */
+
+	if (!return_buffer) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (return_buffer->length == ACPI_ALLOCATE_BUFFER) {
+		must_free = TRUE;
+	}
+
+	/* Evaluate the object */
+
+	status =
+	    acpi_evaluate_object(handle, pathname, external_params,
+				 return_buffer);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Type ANY means "don't care" */
+
+	if (return_type == ACPI_TYPE_ANY) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	if (return_buffer->length == 0) {
+
+		/* Error because caller specifically asked for a return value */
+
+		ACPI_ERROR((AE_INFO, "No return value"));
+		return_ACPI_STATUS(AE_NULL_OBJECT);
+	}
+
+	/* Examine the object type returned from evaluate_object */
+
+	if (((union acpi_object *)return_buffer->pointer)->type == return_type) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Return object type does not match requested type */
+
+	ACPI_ERROR((AE_INFO,
+		    "Incorrect return type [%s] requested [%s]",
+		    acpi_ut_get_type_name(((union acpi_object *)return_buffer->
+					   pointer)->type),
+		    acpi_ut_get_type_name(return_type)));
+
+	if (must_free) {
+
+		/* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
+
+		ACPI_FREE(return_buffer->pointer);
+		return_buffer->pointer = NULL;
+	}
+
+	return_buffer->length = 0;
+	return_ACPI_STATUS(AE_TYPE);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
+#endif				/*  ACPI_FUTURE_USAGE  */
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_evaluate_object
+ *
+ * PARAMETERS:  Handle              - Object handle (optional)
+ *              Pathname            - Object pathname (optional)
+ *              external_params     - List of parameters to pass to method,
+ *                                    terminated by NULL.  May be NULL
+ *                                    if no parameters are being passed.
+ *              return_buffer       - Where to put method's return value (if
+ *                                    any).  If NULL, no value is returned.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Find and evaluate the given object, passing the given
+ *              parameters if necessary.  One of "Handle" or "Pathname" must
+ *              be valid (non-null)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_evaluate_object(acpi_handle handle,
+		     acpi_string pathname,
+		     struct acpi_object_list *external_params,
+		     struct acpi_buffer *return_buffer)
+{
+	acpi_status status;
+	struct acpi_evaluate_info *info;
+	acpi_size buffer_space_needed;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(acpi_evaluate_object);
+
+	/* Allocate and initialize the evaluation information block */
+
+	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+	if (!info) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	info->pathname = pathname;
+
+	/* Convert and validate the device handle */
+
+	info->prefix_node = acpi_ns_map_handle_to_node(handle);
+	if (!info->prefix_node) {
+		status = AE_BAD_PARAMETER;
+		goto cleanup;
+	}
+
+	/*
+	 * If there are parameters to be passed to a control method, the external
+	 * objects must all be converted to internal objects
+	 */
+	if (external_params && external_params->count) {
+		/*
+		 * Allocate a new parameter block for the internal objects
+		 * Add 1 to count to allow for null terminated internal list
+		 */
+		info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)
+							 external_params->
+							 count +
+							 1) * sizeof(void *));
+		if (!info->parameters) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		/* Convert each external object in the list to an internal object */
+
+		for (i = 0; i < external_params->count; i++) {
+			status =
+			    acpi_ut_copy_eobject_to_iobject(&external_params->
+							    pointer[i],
+							    &info->
+							    parameters[i]);
+			if (ACPI_FAILURE(status)) {
+				goto cleanup;
+			}
+		}
+		info->parameters[external_params->count] = NULL;
+	}
+
+	/*
+	 * Three major cases:
+	 * 1) Fully qualified pathname
+	 * 2) No handle, not fully qualified pathname (error)
+	 * 3) Valid handle
+	 */
+	if ((pathname) && (acpi_ns_valid_root_prefix(pathname[0]))) {
+
+		/* The path is fully qualified, just evaluate by name */
+
+		info->prefix_node = NULL;
+		status = acpi_ns_evaluate(info);
+	} else if (!handle) {
+		/*
+		 * A handle is optional iff a fully qualified pathname is specified.
+		 * Since we've already handled fully qualified names above, this is
+		 * an error
+		 */
+		if (!pathname) {
+			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+					  "Both Handle and Pathname are NULL"));
+		} else {
+			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+					  "Null Handle with relative pathname [%s]",
+					  pathname));
+		}
+
+		status = AE_BAD_PARAMETER;
+	} else {
+		/* We have a namespace a node and a possible relative path */
+
+		status = acpi_ns_evaluate(info);
+	}
+
+	/*
+	 * If we are expecting a return value, and all went well above,
+	 * copy the return value to an external object.
+	 */
+	if (return_buffer) {
+		if (!info->return_object) {
+			return_buffer->length = 0;
+		} else {
+			if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
+			    ACPI_DESC_TYPE_NAMED) {
+				/*
+				 * If we received a NS Node as a return object, this means that
+				 * the object we are evaluating has nothing interesting to
+				 * return (such as a mutex, etc.)  We return an error because
+				 * these types are essentially unsupported by this interface.
+				 * We don't check up front because this makes it easier to add
+				 * support for various types at a later date if necessary.
+				 */
+				status = AE_TYPE;
+				info->return_object = NULL;	/* No need to delete a NS Node */
+				return_buffer->length = 0;
+			}
+
+			if (ACPI_SUCCESS(status)) {
+
+				/* Dereference Index and ref_of references */
+
+				acpi_ns_resolve_references(info);
+
+				/* Get the size of the returned object */
+
+				status =
+				    acpi_ut_get_object_size(info->return_object,
+							    &buffer_space_needed);
+				if (ACPI_SUCCESS(status)) {
+
+					/* Validate/Allocate/Clear caller buffer */
+
+					status =
+					    acpi_ut_initialize_buffer
+					    (return_buffer,
+					     buffer_space_needed);
+					if (ACPI_FAILURE(status)) {
+						/*
+						 * Caller's buffer is too small or a new one can't
+						 * be allocated
+						 */
+						ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+								  "Needed buffer size %X, %s\n",
+								  (u32)
+								  buffer_space_needed,
+								  acpi_format_exception
+								  (status)));
+					} else {
+						/* We have enough space for the object, build it */
+
+						status =
+						    acpi_ut_copy_iobject_to_eobject
+						    (info->return_object,
+						     return_buffer);
+					}
+				}
+			}
+		}
+	}
+
+	if (info->return_object) {
+		/*
+		 * Delete the internal return object. NOTE: Interpreter must be
+		 * locked to avoid race condition.
+		 */
+		acpi_ex_enter_interpreter();
+
+		/* Remove one reference on the return object (should delete it) */
+
+		acpi_ut_remove_reference(info->return_object);
+		acpi_ex_exit_interpreter();
+	}
+
+      cleanup:
+
+	/* Free the input parameter list (if we created one) */
+
+	if (info->parameters) {
+
+		/* Free the allocated parameter block */
+
+		acpi_ut_delete_internal_object_list(info->parameters);
+	}
+
+	ACPI_FREE(info);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_resolve_references
+ *
+ * PARAMETERS:  Info                    - Evaluation info block
+ *
+ * RETURN:      Info->return_object is replaced with the dereferenced object
+ *
+ * DESCRIPTION: Dereference certain reference objects. Called before an
+ *              internal return object is converted to an external union acpi_object.
+ *
+ * Performs an automatic dereference of Index and ref_of reference objects.
+ * These reference objects are not supported by the union acpi_object, so this is a
+ * last resort effort to return something useful. Also, provides compatibility
+ * with other ACPI implementations.
+ *
+ * NOTE: does not handle references within returned package objects or nested
+ * references, but this support could be added later if found to be necessary.
+ *
+ ******************************************************************************/
+static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
+{
+	union acpi_operand_object *obj_desc = NULL;
+	struct acpi_namespace_node *node;
+
+	/* We are interested in reference objects only */
+
+	if (ACPI_GET_OBJECT_TYPE(info->return_object) !=
+	    ACPI_TYPE_LOCAL_REFERENCE) {
+		return;
+	}
+
+	/*
+	 * Two types of references are supported - those created by Index and
+	 * ref_of operators. A name reference (AML_NAMEPATH_OP) can be converted
+	 * to an union acpi_object, so it is not dereferenced here. A ddb_handle
+	 * (AML_LOAD_OP) cannot be dereferenced, nor can it be converted to
+	 * an union acpi_object.
+	 */
+	switch (info->return_object->reference.class) {
+	case ACPI_REFCLASS_INDEX:
+
+		obj_desc = *(info->return_object->reference.where);
+		break;
+
+	case ACPI_REFCLASS_REFOF:
+
+		node = info->return_object->reference.object;
+		if (node) {
+			obj_desc = node->object;
+		}
+		break;
+
+	default:
+		return;
+	}
+
+	/* Replace the existing reference object */
+
+	if (obj_desc) {
+		acpi_ut_add_reference(obj_desc);
+		acpi_ut_remove_reference(info->return_object);
+		info->return_object = obj_desc;
+	}
+
+	return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_walk_namespace
+ *
+ * PARAMETERS:  Type                - acpi_object_type to search for
+ *              start_object        - Handle in namespace where search begins
+ *              max_depth           - Depth to which search is to reach
+ *              user_function       - Called when an object of "Type" is found
+ *              Context             - Passed to user function
+ *              return_value        - Location where return value of
+ *                                    user_function is put if terminated early
+ *
+ * RETURNS      Return value from the user_function if terminated early.
+ *              Otherwise, returns NULL.
+ *
+ * DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
+ *              starting (and ending) at the object specified by start_handle.
+ *              The user_function is called whenever an object that matches
+ *              the type parameter is found.  If the user function returns
+ *              a non-zero value, the search is terminated immediately and this
+ *              value is returned to the caller.
+ *
+ *              The point of this procedure is to provide a generic namespace
+ *              walk routine that can be called from multiple places to
+ *              provide multiple services;  the User Function can be tailored
+ *              to each task, whether it is a print function, a compare
+ *              function, etc.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_walk_namespace(acpi_object_type type,
+		    acpi_handle start_object,
+		    u32 max_depth,
+		    acpi_walk_callback user_function,
+		    void *context, void **return_value)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_walk_namespace);
+
+	/* Parameter validation */
+
+	if ((type > ACPI_TYPE_LOCAL_MAX) || (!max_depth) || (!user_function)) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Lock the namespace around the walk.
+	 * The namespace will be unlocked/locked around each call
+	 * to the user function - since this function
+	 * must be allowed to make Acpi calls itself.
+	 */
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_ns_walk_namespace(type, start_object, max_depth,
+					ACPI_NS_WALK_UNLOCK,
+					user_function, context, return_value);
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_walk_namespace)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_device_callback
+ *
+ * PARAMETERS:  Callback from acpi_get_device
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Takes callbacks from walk_namespace and filters out all non-
+ *              present devices, or if they specified a HID, it filters based
+ *              on that.
+ *
+ ******************************************************************************/
+static acpi_status
+acpi_ns_get_device_callback(acpi_handle obj_handle,
+			    u32 nesting_level,
+			    void *context, void **return_value)
+{
+	struct acpi_get_devices_info *info = context;
+	acpi_status status;
+	struct acpi_namespace_node *node;
+	u32 flags;
+	struct acpica_device_id hid;
+	struct acpi_compatible_id_list *cid;
+	u32 i;
+	int found;
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	node = acpi_ns_map_handle_to_node(obj_handle);
+	status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	if (!node) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/* Run _STA to determine if device is present */
+
+	status = acpi_ut_execute_STA(node, &flags);
+	if (ACPI_FAILURE(status)) {
+		return (AE_CTRL_DEPTH);
+	}
+
+	if (!(flags & ACPI_STA_DEVICE_PRESENT) &&
+	    !(flags & ACPI_STA_DEVICE_FUNCTIONING)) {
+		/*
+		 * Don't examine the children of the device only when the
+		 * device is neither present nor functional. See ACPI spec,
+		 * description of _STA for more information.
+		 */
+		return (AE_CTRL_DEPTH);
+	}
+
+	/* Filter based on device HID & CID */
+
+	if (info->hid != NULL) {
+		status = acpi_ut_execute_HID(node, &hid);
+		if (status == AE_NOT_FOUND) {
+			return (AE_OK);
+		} else if (ACPI_FAILURE(status)) {
+			return (AE_CTRL_DEPTH);
+		}
+
+		if (ACPI_STRNCMP(hid.value, info->hid, sizeof(hid.value)) != 0) {
+
+			/* Get the list of Compatible IDs */
+
+			status = acpi_ut_execute_CID(node, &cid);
+			if (status == AE_NOT_FOUND) {
+				return (AE_OK);
+			} else if (ACPI_FAILURE(status)) {
+				return (AE_CTRL_DEPTH);
+			}
+
+			/* Walk the CID list */
+
+			found = 0;
+			for (i = 0; i < cid->count; i++) {
+				if (ACPI_STRNCMP(cid->id[i].value, info->hid,
+						 sizeof(struct
+							acpi_compatible_id)) ==
+				    0) {
+					found = 1;
+					break;
+				}
+			}
+			ACPI_FREE(cid);
+			if (!found)
+				return (AE_OK);
+		}
+	}
+
+	status = info->user_function(obj_handle, nesting_level, info->context,
+				     return_value);
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_devices
+ *
+ * PARAMETERS:  HID                 - HID to search for. Can be NULL.
+ *              user_function       - Called when a matching object is found
+ *              Context             - Passed to user function
+ *              return_value        - Location where return value of
+ *                                    user_function is put if terminated early
+ *
+ * RETURNS      Return value from the user_function if terminated early.
+ *              Otherwise, returns NULL.
+ *
+ * DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
+ *              starting (and ending) at the object specified by start_handle.
+ *              The user_function is called whenever an object of type
+ *              Device is found.  If the user function returns
+ *              a non-zero value, the search is terminated immediately and this
+ *              value is returned to the caller.
+ *
+ *              This is a wrapper for walk_namespace, but the callback performs
+ *              additional filtering. Please see acpi_ns_get_device_callback.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_get_devices(const char *HID,
+		 acpi_walk_callback user_function,
+		 void *context, void **return_value)
+{
+	acpi_status status;
+	struct acpi_get_devices_info info;
+
+	ACPI_FUNCTION_TRACE(acpi_get_devices);
+
+	/* Parameter validation */
+
+	if (!user_function) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * We're going to call their callback from OUR callback, so we need
+	 * to know what it is, and their context parameter.
+	 */
+	info.hid = HID;
+	info.context = context;
+	info.user_function = user_function;
+
+	/*
+	 * Lock the namespace around the walk.
+	 * The namespace will be unlocked/locked around each call
+	 * to the user function - since this function
+	 * must be allowed to make Acpi calls itself.
+	 */
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+					ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
+					acpi_ns_get_device_callback, &info,
+					return_value);
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_devices)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_attach_data
+ *
+ * PARAMETERS:  obj_handle          - Namespace node
+ *              Handler             - Handler for this attachment
+ *              Data                - Pointer to data to be attached
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Attach arbitrary data and handler to a namespace node.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_attach_data(acpi_handle obj_handle,
+		 acpi_object_handler handler, void *data)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	/* Parameter validation */
+
+	if (!obj_handle || !handler || !data) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* Convert and validate the handle */
+
+	node = acpi_ns_map_handle_to_node(obj_handle);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	status = acpi_ns_attach_data(node, handler, data);
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_attach_data)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_detach_data
+ *
+ * PARAMETERS:  obj_handle          - Namespace node handle
+ *              Handler             - Handler used in call to acpi_attach_data
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove data that was previously attached to a node.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	/* Parameter validation */
+
+	if (!obj_handle || !handler) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* Convert and validate the handle */
+
+	node = acpi_ns_map_handle_to_node(obj_handle);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	status = acpi_ns_detach_data(node, handler);
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_detach_data)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_data
+ *
+ * PARAMETERS:  obj_handle          - Namespace node
+ *              Handler             - Handler used in call to attach_data
+ *              Data                - Where the data is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Retrieve data that was previously attached to a namespace node.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	/* Parameter validation */
+
+	if (!obj_handle || !handler || !data) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* Convert and validate the handle */
+
+	node = acpi_ns_map_handle_to_node(obj_handle);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	status = acpi_ns_get_attached_data(node, handler, data);
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_data)
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
new file mode 100644
index 0000000..9589fea
--- /dev/null
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -0,0 +1,360 @@
+/******************************************************************************
+ *
+ * Module Name: nsxfname - Public interfaces to the ACPI subsystem
+ *                         ACPI Namespace oriented interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsxfname")
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_get_handle
+ *
+ * PARAMETERS:  Parent          - Object to search under (search scope).
+ *              Pathname        - Pointer to an asciiz string containing the
+ *                                name
+ *              ret_handle      - Where the return handle is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This routine will search for a caller specified name in the
+ *              name space.  The caller can restrict the search region by
+ *              specifying a non NULL parent.  The parent value is itself a
+ *              namespace handle.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_handle(acpi_handle parent,
+		acpi_string pathname, acpi_handle * ret_handle)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node = NULL;
+	struct acpi_namespace_node *prefix_node = NULL;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Parameter Validation */
+
+	if (!ret_handle || !pathname) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/* Convert a parent handle to a prefix node */
+
+	if (parent) {
+		prefix_node = acpi_ns_map_handle_to_node(parent);
+		if (!prefix_node) {
+			return (AE_BAD_PARAMETER);
+		}
+	}
+
+	/*
+	 * Valid cases are:
+	 * 1) Fully qualified pathname
+	 * 2) Parent + Relative pathname
+	 *
+	 * Error for <null Parent + relative path>
+	 */
+	if (acpi_ns_valid_root_prefix(pathname[0])) {
+
+		/* Pathname is fully qualified (starts with '\') */
+
+		/* Special case for root-only, since we can't search for it */
+
+		if (!ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH)) {
+			*ret_handle =
+			    acpi_ns_convert_entry_to_handle(acpi_gbl_root_node);
+			return (AE_OK);
+		}
+	} else if (!prefix_node) {
+
+		/* Relative path with null prefix is disallowed */
+
+		return (AE_BAD_PARAMETER);
+	}
+
+	/* Find the Node and convert to a handle */
+
+	status =
+	    acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH, &node);
+	if (ACPI_SUCCESS(status)) {
+		*ret_handle = acpi_ns_convert_entry_to_handle(node);
+	}
+
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_handle)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_get_name
+ *
+ * PARAMETERS:  Handle          - Handle to be converted to a pathname
+ *              name_type       - Full pathname or single segment
+ *              Buffer          - Buffer for returned path
+ *
+ * RETURN:      Pointer to a string containing the fully qualified Name.
+ *
+ * DESCRIPTION: This routine returns the fully qualified name associated with
+ *              the Handle parameter.  This and the acpi_pathname_to_handle are
+ *              complementary functions.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	/* Parameter validation */
+
+	if (name_type > ACPI_NAME_TYPE_MAX) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_validate_buffer(buffer);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	if (name_type == ACPI_FULL_PATHNAME) {
+
+		/* Get the full pathname (From the namespace root) */
+
+		status = acpi_ns_handle_to_pathname(handle, buffer);
+		return (status);
+	}
+
+	/*
+	 * Wants the single segment ACPI name.
+	 * Validate handle and convert to a namespace Node
+	 */
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	node = acpi_ns_map_handle_to_node(handle);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Validate/Allocate/Clear caller buffer */
+
+	status = acpi_ut_initialize_buffer(buffer, ACPI_PATH_SEGMENT_LENGTH);
+	if (ACPI_FAILURE(status)) {
+		goto unlock_and_exit;
+	}
+
+	/* Just copy the ACPI name from the Node and zero terminate it */
+
+	ACPI_STRNCPY(buffer->pointer, acpi_ut_get_node_name(node),
+		     ACPI_NAME_SIZE);
+	((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
+	status = AE_OK;
+
+      unlock_and_exit:
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_name)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_get_object_info
+ *
+ * PARAMETERS:  Handle          - Object Handle
+ *              Buffer          - Where the info is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Returns information about an object as gleaned from the
+ *              namespace node and possibly by running several standard
+ *              control methods (Such as in the case of a device.)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+	struct acpi_device_info *info;
+	struct acpi_device_info *return_info;
+	struct acpi_compatible_id_list *cid_list = NULL;
+	acpi_size size;
+
+	/* Parameter validation */
+
+	if (!handle || !buffer) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_validate_buffer(buffer);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_device_info));
+	if (!info) {
+		return (AE_NO_MEMORY);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	node = acpi_ns_map_handle_to_node(handle);
+	if (!node) {
+		(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+		status = AE_BAD_PARAMETER;
+		goto cleanup;
+	}
+
+	/* Init return structure */
+
+	size = sizeof(struct acpi_device_info);
+
+	info->type = node->type;
+	info->name = node->name.integer;
+	info->valid = 0;
+
+	if (node->type == ACPI_TYPE_METHOD) {
+		info->param_count = node->object->method.param_count;
+	}
+
+	status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/* If not a device, we are all done */
+
+	if (info->type == ACPI_TYPE_DEVICE) {
+		/*
+		 * Get extra info for ACPI Devices objects only:
+		 * Run the Device _HID, _UID, _CID, _STA, _ADR and _sx_d methods.
+		 *
+		 * Note: none of these methods are required, so they may or may
+		 * not be present for this device.  The Info->Valid bitfield is used
+		 * to indicate which methods were found and ran successfully.
+		 */
+
+		/* Execute the Device._HID method */
+
+		status = acpi_ut_execute_HID(node, &info->hardware_id);
+		if (ACPI_SUCCESS(status)) {
+			info->valid |= ACPI_VALID_HID;
+		}
+
+		/* Execute the Device._UID method */
+
+		status = acpi_ut_execute_UID(node, &info->unique_id);
+		if (ACPI_SUCCESS(status)) {
+			info->valid |= ACPI_VALID_UID;
+		}
+
+		/* Execute the Device._CID method */
+
+		status = acpi_ut_execute_CID(node, &cid_list);
+		if (ACPI_SUCCESS(status)) {
+			size += cid_list->size;
+			info->valid |= ACPI_VALID_CID;
+		}
+
+		/* Execute the Device._STA method */
+
+		status = acpi_ut_execute_STA(node, &info->current_status);
+		if (ACPI_SUCCESS(status)) {
+			info->valid |= ACPI_VALID_STA;
+		}
+
+		/* Execute the Device._ADR method */
+
+		status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, node,
+							 &info->address);
+		if (ACPI_SUCCESS(status)) {
+			info->valid |= ACPI_VALID_ADR;
+		}
+
+		/* Execute the Device._sx_d methods */
+
+		status = acpi_ut_execute_sxds(node, info->highest_dstates);
+		if (ACPI_SUCCESS(status)) {
+			info->valid |= ACPI_VALID_SXDS;
+		}
+	}
+
+	/* Validate/Allocate/Clear caller buffer */
+
+	status = acpi_ut_initialize_buffer(buffer, size);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/* Populate the return buffer */
+
+	return_info = buffer->pointer;
+	ACPI_MEMCPY(return_info, info, sizeof(struct acpi_device_info));
+
+	if (cid_list) {
+		ACPI_MEMCPY(&return_info->compatibility_id, cid_list,
+			    cid_list->size);
+	}
+
+      cleanup:
+	ACPI_FREE(info);
+	if (cid_list) {
+		ACPI_FREE(cid_list);
+	}
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_object_info)
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
new file mode 100644
index 0000000..1c7efc1
--- /dev/null
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -0,0 +1,287 @@
+/*******************************************************************************
+ *
+ * Module Name: nsxfobj - Public interfaces to the ACPI subsystem
+ *                         ACPI Object oriented interfaces
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsxfobj")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_id
+ *
+ * PARAMETERS:  Handle          - Handle of object whose id is desired
+ *              ret_id          - Where the id will be placed
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This routine returns the owner id associated with a handle
+ *
+ ******************************************************************************/
+acpi_status acpi_get_id(acpi_handle handle, acpi_owner_id * ret_id)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	/* Parameter Validation */
+
+	if (!ret_id) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* Convert and validate the handle */
+
+	node = acpi_ns_map_handle_to_node(handle);
+	if (!node) {
+		(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+		return (AE_BAD_PARAMETER);
+	}
+
+	*ret_id = node->owner_id;
+
+	status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_id)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_type
+ *
+ * PARAMETERS:  Handle          - Handle of object whose type is desired
+ *              ret_type        - Where the type will be placed
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This routine returns the type associatd with a particular handle
+ *
+ ******************************************************************************/
+acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	/* Parameter Validation */
+
+	if (!ret_type) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Special case for the predefined Root Node
+	 * (return type ANY)
+	 */
+	if (handle == ACPI_ROOT_OBJECT) {
+		*ret_type = ACPI_TYPE_ANY;
+		return (AE_OK);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* Convert and validate the handle */
+
+	node = acpi_ns_map_handle_to_node(handle);
+	if (!node) {
+		(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+		return (AE_BAD_PARAMETER);
+	}
+
+	*ret_type = node->type;
+
+	status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_type)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_parent
+ *
+ * PARAMETERS:  Handle          - Handle of object whose parent is desired
+ *              ret_handle      - Where the parent handle will be placed
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Returns a handle to the parent of the object represented by
+ *              Handle.
+ *
+ ******************************************************************************/
+acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
+{
+	struct acpi_namespace_node *node;
+	acpi_status status;
+
+	if (!ret_handle) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/* Special case for the predefined Root Node (no parent) */
+
+	if (handle == ACPI_ROOT_OBJECT) {
+		return (AE_NULL_ENTRY);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* Convert and validate the handle */
+
+	node = acpi_ns_map_handle_to_node(handle);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Get the parent entry */
+
+	*ret_handle =
+	    acpi_ns_convert_entry_to_handle(acpi_ns_get_parent_node(node));
+
+	/* Return exception if parent is null */
+
+	if (!acpi_ns_get_parent_node(node)) {
+		status = AE_NULL_ENTRY;
+	}
+
+      unlock_and_exit:
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_parent)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_next_object
+ *
+ * PARAMETERS:  Type            - Type of object to be searched for
+ *              Parent          - Parent object whose children we are getting
+ *              last_child      - Previous child that was found.
+ *                                The NEXT child will be returned
+ *              ret_handle      - Where handle to the next object is placed
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Return the next peer object within the namespace.  If Handle is
+ *              valid, Scope is ignored.  Otherwise, the first object within
+ *              Scope is returned.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_next_object(acpi_object_type type,
+		     acpi_handle parent,
+		     acpi_handle child, acpi_handle * ret_handle)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+	struct acpi_namespace_node *parent_node = NULL;
+	struct acpi_namespace_node *child_node = NULL;
+
+	/* Parameter validation */
+
+	if (type > ACPI_TYPE_EXTERNAL_MAX) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* If null handle, use the parent */
+
+	if (!child) {
+
+		/* Start search at the beginning of the specified scope */
+
+		parent_node = acpi_ns_map_handle_to_node(parent);
+		if (!parent_node) {
+			status = AE_BAD_PARAMETER;
+			goto unlock_and_exit;
+		}
+	} else {
+		/* Non-null handle, ignore the parent */
+		/* Convert and validate the handle */
+
+		child_node = acpi_ns_map_handle_to_node(child);
+		if (!child_node) {
+			status = AE_BAD_PARAMETER;
+			goto unlock_and_exit;
+		}
+	}
+
+	/* Internal function does the real work */
+
+	node = acpi_ns_get_next_node(type, parent_node, child_node);
+	if (!node) {
+		status = AE_NOT_FOUND;
+		goto unlock_and_exit;
+	}
+
+	if (ret_handle) {
+		*ret_handle = acpi_ns_convert_entry_to_handle(node);
+	}
+
+      unlock_and_exit:
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_next_object)
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
new file mode 100644
index 0000000..b161f35
--- /dev/null
+++ b/drivers/acpi/acpica/psargs.c
@@ -0,0 +1,752 @@
+/******************************************************************************
+ *
+ * Module Name: psargs - Parse AML opcode arguments
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+#include "acnamesp.h"
+#include "acdispat.h"
+
+#define _COMPONENT          ACPI_PARSER
+ACPI_MODULE_NAME("psargs")
+
+/* Local prototypes */
+static u32
+acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state);
+
+static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
+						       *parser_state);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_next_package_length
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *
+ * RETURN:      Decoded package length. On completion, the AML pointer points
+ *              past the length byte or bytes.
+ *
+ * DESCRIPTION: Decode and return a package length field.
+ *              Note: Largest package length is 28 bits, from ACPI specification
+ *
+ ******************************************************************************/
+
+static u32
+acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
+{
+	u8 *aml = parser_state->aml;
+	u32 package_length = 0;
+	u32 byte_count;
+	u8 byte_zero_mask = 0x3F;	/* Default [0:5] */
+
+	ACPI_FUNCTION_TRACE(ps_get_next_package_length);
+
+	/*
+	 * Byte 0 bits [6:7] contain the number of additional bytes
+	 * used to encode the package length, either 0,1,2, or 3
+	 */
+	byte_count = (aml[0] >> 6);
+	parser_state->aml += ((acpi_size) byte_count + 1);
+
+	/* Get bytes 3, 2, 1 as needed */
+
+	while (byte_count) {
+		/*
+		 * Final bit positions for the package length bytes:
+		 *      Byte3->[20:27]
+		 *      Byte2->[12:19]
+		 *      Byte1->[04:11]
+		 *      Byte0->[00:03]
+		 */
+		package_length |= (aml[byte_count] << ((byte_count << 3) - 4));
+
+		byte_zero_mask = 0x0F;	/* Use bits [0:3] of byte 0 */
+		byte_count--;
+	}
+
+	/* Byte 0 is a special case, either bits [0:3] or [0:5] are used */
+
+	package_length |= (aml[0] & byte_zero_mask);
+	return_UINT32(package_length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_next_package_end
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *
+ * RETURN:      Pointer to end-of-package +1
+ *
+ * DESCRIPTION: Get next package length and return a pointer past the end of
+ *              the package.  Consumes the package length field
+ *
+ ******************************************************************************/
+
+u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state)
+{
+	u8 *start = parser_state->aml;
+	u32 package_length;
+
+	ACPI_FUNCTION_TRACE(ps_get_next_package_end);
+
+	/* Function below updates parser_state->Aml */
+
+	package_length = acpi_ps_get_next_package_length(parser_state);
+
+	return_PTR(start + package_length);	/* end of package */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_next_namestring
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *
+ * RETURN:      Pointer to the start of the name string (pointer points into
+ *              the AML.
+ *
+ * DESCRIPTION: Get next raw namestring within the AML stream.  Handles all name
+ *              prefix characters.  Set parser state to point past the string.
+ *              (Name is consumed from the AML.)
+ *
+ ******************************************************************************/
+
+char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
+{
+	u8 *start = parser_state->aml;
+	u8 *end = parser_state->aml;
+
+	ACPI_FUNCTION_TRACE(ps_get_next_namestring);
+
+	/* Point past any namestring prefix characters (backslash or carat) */
+
+	while (acpi_ps_is_prefix_char(*end)) {
+		end++;
+	}
+
+	/* Decode the path prefix character */
+
+	switch (*end) {
+	case 0:
+
+		/* null_name */
+
+		if (end == start) {
+			start = NULL;
+		}
+		end++;
+		break;
+
+	case AML_DUAL_NAME_PREFIX:
+
+		/* Two name segments */
+
+		end += 1 + (2 * ACPI_NAME_SIZE);
+		break;
+
+	case AML_MULTI_NAME_PREFIX_OP:
+
+		/* Multiple name segments, 4 chars each, count in next byte */
+
+		end += 2 + (*(end + 1) * ACPI_NAME_SIZE);
+		break;
+
+	default:
+
+		/* Single name segment */
+
+		end += ACPI_NAME_SIZE;
+		break;
+	}
+
+	parser_state->aml = end;
+	return_PTR((char *)start);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_next_namepath
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *              Arg                 - Where the namepath will be stored
+ *              arg_count           - If the namepath points to a control method
+ *                                    the method's argument is returned here.
+ *              possible_method_call - Whether the namepath can possibly be the
+ *                                    start of a method call
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get next name (if method call, return # of required args).
+ *              Names are looked up in the internal namespace to determine
+ *              if the name represents a control method.  If a method
+ *              is found, the number of arguments to the method is returned.
+ *              This information is critical for parsing to continue correctly.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
+			  struct acpi_parse_state *parser_state,
+			  union acpi_parse_object *arg, u8 possible_method_call)
+{
+	acpi_status status;
+	char *path;
+	union acpi_parse_object *name_op;
+	union acpi_operand_object *method_desc;
+	struct acpi_namespace_node *node;
+	u8 *start = parser_state->aml;
+
+	ACPI_FUNCTION_TRACE(ps_get_next_namepath);
+
+	path = acpi_ps_get_next_namestring(parser_state);
+	acpi_ps_init_op(arg, AML_INT_NAMEPATH_OP);
+
+	/* Null path case is allowed, just exit */
+
+	if (!path) {
+		arg->common.value.name = path;
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/*
+	 * Lookup the name in the internal namespace, starting with the current
+	 * scope. We don't want to add anything new to the namespace here,
+	 * however, so we use MODE_EXECUTE.
+	 * Allow searching of the parent tree, but don't open a new scope -
+	 * we just want to lookup the object (must be mode EXECUTE to perform
+	 * the upsearch)
+	 */
+	status = acpi_ns_lookup(walk_state->scope_info, path,
+				ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+				ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE,
+				NULL, &node);
+
+	/*
+	 * If this name is a control method invocation, we must
+	 * setup the method call
+	 */
+	if (ACPI_SUCCESS(status) &&
+	    possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
+		if (walk_state->opcode == AML_UNLOAD_OP) {
+			/*
+			 * acpi_ps_get_next_namestring has increased the AML pointer,
+			 * so we need to restore the saved AML pointer for method call.
+			 */
+			walk_state->parser_state.aml = start;
+			walk_state->arg_count = 1;
+			acpi_ps_init_op(arg, AML_INT_METHODCALL_OP);
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		/* This name is actually a control method invocation */
+
+		method_desc = acpi_ns_get_attached_object(node);
+		ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+				  "Control Method - %p Desc %p Path=%p\n", node,
+				  method_desc, path));
+
+		name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+		if (!name_op) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		/* Change Arg into a METHOD CALL and attach name to it */
+
+		acpi_ps_init_op(arg, AML_INT_METHODCALL_OP);
+		name_op->common.value.name = path;
+
+		/* Point METHODCALL/NAME to the METHOD Node */
+
+		name_op->common.node = node;
+		acpi_ps_append_arg(arg, name_op);
+
+		if (!method_desc) {
+			ACPI_ERROR((AE_INFO,
+				    "Control Method %p has no attached object",
+				    node));
+			return_ACPI_STATUS(AE_AML_INTERNAL);
+		}
+
+		ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+				  "Control Method - %p Args %X\n",
+				  node, method_desc->method.param_count));
+
+		/* Get the number of arguments to expect */
+
+		walk_state->arg_count = method_desc->method.param_count;
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/*
+	 * Special handling if the name was not found during the lookup -
+	 * some not_found cases are allowed
+	 */
+	if (status == AE_NOT_FOUND) {
+
+		/* 1) not_found is ok during load pass 1/2 (allow forward references) */
+
+		if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) !=
+		    ACPI_PARSE_EXECUTE) {
+			status = AE_OK;
+		}
+
+		/* 2) not_found during a cond_ref_of(x) is ok by definition */
+
+		else if (walk_state->op->common.aml_opcode ==
+			 AML_COND_REF_OF_OP) {
+			status = AE_OK;
+		}
+
+		/*
+		 * 3) not_found while building a Package is ok at this point, we
+		 * may flag as an error later if slack mode is not enabled.
+		 * (Some ASL code depends on allowing this behavior)
+		 */
+		else if ((arg->common.parent) &&
+			 ((arg->common.parent->common.aml_opcode ==
+			   AML_PACKAGE_OP)
+			  || (arg->common.parent->common.aml_opcode ==
+			      AML_VAR_PACKAGE_OP))) {
+			status = AE_OK;
+		}
+	}
+
+	/* Final exception check (may have been changed from code above) */
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR_NAMESPACE(path, status);
+
+		if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
+		    ACPI_PARSE_EXECUTE) {
+
+			/* Report a control method execution error */
+
+			status = acpi_ds_method_error(status, walk_state);
+		}
+	}
+
+	/* Save the namepath */
+
+	arg->common.value.name = path;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_next_simple_arg
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *              arg_type            - The argument type (AML_*_ARG)
+ *              Arg                 - Where the argument is returned
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Get the next simple argument (constant, string, or namestring)
+ *
+ ******************************************************************************/
+
+void
+acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
+			    u32 arg_type, union acpi_parse_object *arg)
+{
+	u32 length;
+	u16 opcode;
+	u8 *aml = parser_state->aml;
+
+	ACPI_FUNCTION_TRACE_U32(ps_get_next_simple_arg, arg_type);
+
+	switch (arg_type) {
+	case ARGP_BYTEDATA:
+
+		/* Get 1 byte from the AML stream */
+
+		opcode = AML_BYTE_OP;
+		arg->common.value.integer = (acpi_integer) * aml;
+		length = 1;
+		break;
+
+	case ARGP_WORDDATA:
+
+		/* Get 2 bytes from the AML stream */
+
+		opcode = AML_WORD_OP;
+		ACPI_MOVE_16_TO_64(&arg->common.value.integer, aml);
+		length = 2;
+		break;
+
+	case ARGP_DWORDDATA:
+
+		/* Get 4 bytes from the AML stream */
+
+		opcode = AML_DWORD_OP;
+		ACPI_MOVE_32_TO_64(&arg->common.value.integer, aml);
+		length = 4;
+		break;
+
+	case ARGP_QWORDDATA:
+
+		/* Get 8 bytes from the AML stream */
+
+		opcode = AML_QWORD_OP;
+		ACPI_MOVE_64_TO_64(&arg->common.value.integer, aml);
+		length = 8;
+		break;
+
+	case ARGP_CHARLIST:
+
+		/* Get a pointer to the string, point past the string */
+
+		opcode = AML_STRING_OP;
+		arg->common.value.string = ACPI_CAST_PTR(char, aml);
+
+		/* Find the null terminator */
+
+		length = 0;
+		while (aml[length]) {
+			length++;
+		}
+		length++;
+		break;
+
+	case ARGP_NAME:
+	case ARGP_NAMESTRING:
+
+		acpi_ps_init_op(arg, AML_INT_NAMEPATH_OP);
+		arg->common.value.name =
+		    acpi_ps_get_next_namestring(parser_state);
+		return_VOID;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type));
+		return_VOID;
+	}
+
+	acpi_ps_init_op(arg, opcode);
+	parser_state->aml += length;
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_next_field
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *
+ * RETURN:      A newly allocated FIELD op
+ *
+ * DESCRIPTION: Get next field (named_field, reserved_field, or access_field)
+ *
+ ******************************************************************************/
+
+static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
+						       *parser_state)
+{
+	u32 aml_offset = (u32)
+	    ACPI_PTR_DIFF(parser_state->aml,
+			  parser_state->aml_start);
+	union acpi_parse_object *field;
+	u16 opcode;
+	u32 name;
+
+	ACPI_FUNCTION_TRACE(ps_get_next_field);
+
+	/* Determine field type */
+
+	switch (ACPI_GET8(parser_state->aml)) {
+	default:
+
+		opcode = AML_INT_NAMEDFIELD_OP;
+		break;
+
+	case 0x00:
+
+		opcode = AML_INT_RESERVEDFIELD_OP;
+		parser_state->aml++;
+		break;
+
+	case 0x01:
+
+		opcode = AML_INT_ACCESSFIELD_OP;
+		parser_state->aml++;
+		break;
+	}
+
+	/* Allocate a new field op */
+
+	field = acpi_ps_alloc_op(opcode);
+	if (!field) {
+		return_PTR(NULL);
+	}
+
+	field->common.aml_offset = aml_offset;
+
+	/* Decode the field type */
+
+	switch (opcode) {
+	case AML_INT_NAMEDFIELD_OP:
+
+		/* Get the 4-character name */
+
+		ACPI_MOVE_32_TO_32(&name, parser_state->aml);
+		acpi_ps_set_name(field, name);
+		parser_state->aml += ACPI_NAME_SIZE;
+
+		/* Get the length which is encoded as a package length */
+
+		field->common.value.size =
+		    acpi_ps_get_next_package_length(parser_state);
+		break;
+
+	case AML_INT_RESERVEDFIELD_OP:
+
+		/* Get the length which is encoded as a package length */
+
+		field->common.value.size =
+		    acpi_ps_get_next_package_length(parser_state);
+		break;
+
+	case AML_INT_ACCESSFIELD_OP:
+
+		/*
+		 * Get access_type and access_attrib and merge into the field Op
+		 * access_type is first operand, access_attribute is second
+		 */
+		field->common.value.integer =
+		    (((u32) ACPI_GET8(parser_state->aml) << 8));
+		parser_state->aml++;
+		field->common.value.integer |= ACPI_GET8(parser_state->aml);
+		parser_state->aml++;
+		break;
+
+	default:
+
+		/* Opcode was set in previous switch */
+		break;
+	}
+
+	return_PTR(field);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_next_arg
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *              parser_state        - Current parser state object
+ *              arg_type            - The argument type (AML_*_ARG)
+ *              return_arg          - Where the next arg is returned
+ *
+ * RETURN:      Status, and an op object containing the next argument.
+ *
+ * DESCRIPTION: Get next argument (including complex list arguments that require
+ *              pushing the parser stack)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
+		     struct acpi_parse_state *parser_state,
+		     u32 arg_type, union acpi_parse_object **return_arg)
+{
+	union acpi_parse_object *arg = NULL;
+	union acpi_parse_object *prev = NULL;
+	union acpi_parse_object *field;
+	u32 subop;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_get_next_arg, parser_state);
+
+	switch (arg_type) {
+	case ARGP_BYTEDATA:
+	case ARGP_WORDDATA:
+	case ARGP_DWORDDATA:
+	case ARGP_CHARLIST:
+	case ARGP_NAME:
+	case ARGP_NAMESTRING:
+
+		/* Constants, strings, and namestrings are all the same size */
+
+		arg = acpi_ps_alloc_op(AML_BYTE_OP);
+		if (!arg) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+		acpi_ps_get_next_simple_arg(parser_state, arg_type, arg);
+		break;
+
+	case ARGP_PKGLENGTH:
+
+		/* Package length, nothing returned */
+
+		parser_state->pkg_end =
+		    acpi_ps_get_next_package_end(parser_state);
+		break;
+
+	case ARGP_FIELDLIST:
+
+		if (parser_state->aml < parser_state->pkg_end) {
+
+			/* Non-empty list */
+
+			while (parser_state->aml < parser_state->pkg_end) {
+				field = acpi_ps_get_next_field(parser_state);
+				if (!field) {
+					return_ACPI_STATUS(AE_NO_MEMORY);
+				}
+
+				if (prev) {
+					prev->common.next = field;
+				} else {
+					arg = field;
+				}
+				prev = field;
+			}
+
+			/* Skip to End of byte data */
+
+			parser_state->aml = parser_state->pkg_end;
+		}
+		break;
+
+	case ARGP_BYTELIST:
+
+		if (parser_state->aml < parser_state->pkg_end) {
+
+			/* Non-empty list */
+
+			arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+			if (!arg) {
+				return_ACPI_STATUS(AE_NO_MEMORY);
+			}
+
+			/* Fill in bytelist data */
+
+			arg->common.value.size = (u32)
+			    ACPI_PTR_DIFF(parser_state->pkg_end,
+					  parser_state->aml);
+			arg->named.data = parser_state->aml;
+
+			/* Skip to End of byte data */
+
+			parser_state->aml = parser_state->pkg_end;
+		}
+		break;
+
+	case ARGP_TARGET:
+	case ARGP_SUPERNAME:
+	case ARGP_SIMPLENAME:
+
+		subop = acpi_ps_peek_opcode(parser_state);
+		if (subop == 0 ||
+		    acpi_ps_is_leading_char(subop) ||
+		    acpi_ps_is_prefix_char(subop)) {
+
+			/* null_name or name_string */
+
+			arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+			if (!arg) {
+				return_ACPI_STATUS(AE_NO_MEMORY);
+			}
+
+			/* To support super_name arg of Unload */
+
+			if (walk_state->opcode == AML_UNLOAD_OP) {
+				status =
+				    acpi_ps_get_next_namepath(walk_state,
+							      parser_state, arg,
+							      1);
+
+				/*
+				 * If the super_name arg of Unload is a method call,
+				 * we have restored the AML pointer, just free this Arg
+				 */
+				if (arg->common.aml_opcode ==
+				    AML_INT_METHODCALL_OP) {
+					acpi_ps_free_op(arg);
+					arg = NULL;
+				}
+			} else {
+				status =
+				    acpi_ps_get_next_namepath(walk_state,
+							      parser_state, arg,
+							      0);
+			}
+		} else {
+			/* Single complex argument, nothing returned */
+
+			walk_state->arg_count = 1;
+		}
+		break;
+
+	case ARGP_DATAOBJ:
+	case ARGP_TERMARG:
+
+		/* Single complex argument, nothing returned */
+
+		walk_state->arg_count = 1;
+		break;
+
+	case ARGP_DATAOBJLIST:
+	case ARGP_TERMLIST:
+	case ARGP_OBJLIST:
+
+		if (parser_state->aml < parser_state->pkg_end) {
+
+			/* Non-empty list of variable arguments, nothing returned */
+
+			walk_state->arg_count = ACPI_VAR_ARGS;
+		}
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type));
+		status = AE_AML_OPERAND_TYPE;
+		break;
+	}
+
+	*return_arg = arg;
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
new file mode 100644
index 0000000..c5f6ce1
--- /dev/null
+++ b/drivers/acpi/acpica/psloop.c
@@ -0,0 +1,1088 @@
+/******************************************************************************
+ *
+ * Module Name: psloop - Main AML parse loop
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/*
+ * Parse the AML and build an operation tree as most interpreters, (such as
+ * Perl) do. Parsing is done by hand rather than with a YACC generated parser
+ * to tightly constrain stack and dynamic memory usage. Parsing is kept
+ * flexible and the code fairly compact by parsing based on a list of AML
+ * opcode templates in aml_op_info[].
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "acdispat.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_PARSER
+ACPI_MODULE_NAME("psloop")
+
+static u32 acpi_gbl_depth = 0;
+
+/* Local prototypes */
+
+static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
+
+static acpi_status
+acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
+		       u8 * aml_op_start,
+		       union acpi_parse_object *unnamed_op,
+		       union acpi_parse_object **op);
+
+static acpi_status
+acpi_ps_create_op(struct acpi_walk_state *walk_state,
+		  u8 * aml_op_start, union acpi_parse_object **new_op);
+
+static acpi_status
+acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
+		      u8 * aml_op_start, union acpi_parse_object *op);
+
+static acpi_status
+acpi_ps_complete_op(struct acpi_walk_state *walk_state,
+		    union acpi_parse_object **op, acpi_status status);
+
+static acpi_status
+acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+			  union acpi_parse_object *op, acpi_status status);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_aml_opcode
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Extract the next AML opcode from the input stream.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
+{
+
+	ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
+
+	walk_state->aml_offset =
+	    (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
+				walk_state->parser_state.aml_start);
+	walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
+
+	/*
+	 * First cut to determine what we have found:
+	 * 1) A valid AML opcode
+	 * 2) A name string
+	 * 3) An unknown/invalid opcode
+	 */
+	walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
+
+	switch (walk_state->op_info->class) {
+	case AML_CLASS_ASCII:
+	case AML_CLASS_PREFIX:
+		/*
+		 * Starts with a valid prefix or ASCII char, this is a name
+		 * string. Convert the bare name string to a namepath.
+		 */
+		walk_state->opcode = AML_INT_NAMEPATH_OP;
+		walk_state->arg_types = ARGP_NAMESTRING;
+		break;
+
+	case AML_CLASS_UNKNOWN:
+
+		/* The opcode is unrecognized. Just skip unknown opcodes */
+
+		ACPI_ERROR((AE_INFO,
+			    "Found unknown opcode %X at AML address %p offset %X, ignoring",
+			    walk_state->opcode, walk_state->parser_state.aml,
+			    walk_state->aml_offset));
+
+		ACPI_DUMP_BUFFER(walk_state->parser_state.aml, 128);
+
+		/* Assume one-byte bad opcode */
+
+		walk_state->parser_state.aml++;
+		return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+
+	default:
+
+		/* Found opcode info, this is a normal opcode */
+
+		walk_state->parser_state.aml +=
+		    acpi_ps_get_opcode_size(walk_state->opcode);
+		walk_state->arg_types = walk_state->op_info->parse_args;
+		break;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_build_named_op
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *              aml_op_start        - Begin of named Op in AML
+ *              unnamed_op          - Early Op (not a named Op)
+ *              Op                  - Returned Op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Parse a named Op
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
+		       u8 * aml_op_start,
+		       union acpi_parse_object *unnamed_op,
+		       union acpi_parse_object **op)
+{
+	acpi_status status = AE_OK;
+	union acpi_parse_object *arg = NULL;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
+
+	unnamed_op->common.value.arg = NULL;
+	unnamed_op->common.arg_list_length = 0;
+	unnamed_op->common.aml_opcode = walk_state->opcode;
+
+	/*
+	 * Get and append arguments until we find the node that contains
+	 * the name (the type ARGP_NAME).
+	 */
+	while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
+	       (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
+		status =
+		    acpi_ps_get_next_arg(walk_state,
+					 &(walk_state->parser_state),
+					 GET_CURRENT_ARG_TYPE(walk_state->
+							      arg_types), &arg);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		acpi_ps_append_arg(unnamed_op, arg);
+		INCREMENT_ARG_LIST(walk_state->arg_types);
+	}
+
+	/*
+	 * Make sure that we found a NAME and didn't run out of arguments
+	 */
+	if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
+		return_ACPI_STATUS(AE_AML_NO_OPERAND);
+	}
+
+	/* We know that this arg is a name, move to next arg */
+
+	INCREMENT_ARG_LIST(walk_state->arg_types);
+
+	/*
+	 * Find the object. This will either insert the object into
+	 * the namespace or simply look it up
+	 */
+	walk_state->op = NULL;
+
+	status = walk_state->descending_callback(walk_state, op);
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
+		return_ACPI_STATUS(status);
+	}
+
+	if (!*op) {
+		return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+	}
+
+	status = acpi_ps_next_parse_state(walk_state, *op, status);
+	if (ACPI_FAILURE(status)) {
+		if (status == AE_CTRL_PENDING) {
+			return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
+		}
+		return_ACPI_STATUS(status);
+	}
+
+	acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
+	acpi_gbl_depth++;
+
+	if ((*op)->common.aml_opcode == AML_REGION_OP ||
+	    (*op)->common.aml_opcode == AML_DATA_REGION_OP) {
+		/*
+		 * Defer final parsing of an operation_region body, because we don't
+		 * have enough info in the first pass to parse it correctly (i.e.,
+		 * there may be method calls within the term_arg elements of the body.)
+		 *
+		 * However, we must continue parsing because the opregion is not a
+		 * standalone package -- we don't know where the end is at this point.
+		 *
+		 * (Length is unknown until parse of the body complete)
+		 */
+		(*op)->named.data = aml_op_start;
+		(*op)->named.length = 0;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_create_op
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *              aml_op_start        - Op start in AML
+ *              new_op              - Returned Op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get Op from AML
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ps_create_op(struct acpi_walk_state *walk_state,
+		  u8 * aml_op_start, union acpi_parse_object **new_op)
+{
+	acpi_status status = AE_OK;
+	union acpi_parse_object *op;
+	union acpi_parse_object *named_op = NULL;
+	union acpi_parse_object *parent_scope;
+	u8 argument_count;
+	const struct acpi_opcode_info *op_info;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
+
+	status = acpi_ps_get_aml_opcode(walk_state);
+	if (status == AE_CTRL_PARSE_CONTINUE) {
+		return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+	}
+
+	/* Create Op structure and append to parent's argument list */
+
+	walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
+	op = acpi_ps_alloc_op(walk_state->opcode);
+	if (!op) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	if (walk_state->op_info->flags & AML_NAMED) {
+		status =
+		    acpi_ps_build_named_op(walk_state, aml_op_start, op,
+					   &named_op);
+		acpi_ps_free_op(op);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		*new_op = named_op;
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Not a named opcode, just allocate Op and append to parent */
+
+	if (walk_state->op_info->flags & AML_CREATE) {
+		/*
+		 * Backup to beginning of create_xXXfield declaration
+		 * body_length is unknown until we parse the body
+		 */
+		op->named.data = aml_op_start;
+		op->named.length = 0;
+	}
+
+	if (walk_state->opcode == AML_BANK_FIELD_OP) {
+		/*
+		 * Backup to beginning of bank_field declaration
+		 * body_length is unknown until we parse the body
+		 */
+		op->named.data = aml_op_start;
+		op->named.length = 0;
+	}
+
+	parent_scope = acpi_ps_get_parent_scope(&(walk_state->parser_state));
+	acpi_ps_append_arg(parent_scope, op);
+
+	if (parent_scope) {
+		op_info =
+		    acpi_ps_get_opcode_info(parent_scope->common.aml_opcode);
+		if (op_info->flags & AML_HAS_TARGET) {
+			argument_count =
+			    acpi_ps_get_argument_count(op_info->type);
+			if (parent_scope->common.arg_list_length >
+			    argument_count) {
+				op->common.flags |= ACPI_PARSEOP_TARGET;
+			}
+		} else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
+			op->common.flags |= ACPI_PARSEOP_TARGET;
+		}
+	}
+
+	if (walk_state->descending_callback != NULL) {
+		/*
+		 * Find the object. This will either insert the object into
+		 * the namespace or simply look it up
+		 */
+		walk_state->op = *new_op = op;
+
+		status = walk_state->descending_callback(walk_state, &op);
+		status = acpi_ps_next_parse_state(walk_state, op, status);
+		if (status == AE_CTRL_PENDING) {
+			status = AE_CTRL_PARSE_PENDING;
+		}
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_arguments
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *              aml_op_start        - Op start in AML
+ *              Op                  - Current Op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get arguments for passed Op.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
+		      u8 * aml_op_start, union acpi_parse_object *op)
+{
+	acpi_status status = AE_OK;
+	union acpi_parse_object *arg = NULL;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state);
+
+	switch (op->common.aml_opcode) {
+	case AML_BYTE_OP:	/* AML_BYTEDATA_ARG */
+	case AML_WORD_OP:	/* AML_WORDDATA_ARG */
+	case AML_DWORD_OP:	/* AML_DWORDATA_ARG */
+	case AML_QWORD_OP:	/* AML_QWORDATA_ARG */
+	case AML_STRING_OP:	/* AML_ASCIICHARLIST_ARG */
+
+		/* Fill in constant or string argument directly */
+
+		acpi_ps_get_next_simple_arg(&(walk_state->parser_state),
+					    GET_CURRENT_ARG_TYPE(walk_state->
+								 arg_types),
+					    op);
+		break;
+
+	case AML_INT_NAMEPATH_OP:	/* AML_NAMESTRING_ARG */
+
+		status =
+		    acpi_ps_get_next_namepath(walk_state,
+					      &(walk_state->parser_state), op,
+					      1);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		walk_state->arg_types = 0;
+		break;
+
+	default:
+		/*
+		 * Op is not a constant or string, append each argument to the Op
+		 */
+		while (GET_CURRENT_ARG_TYPE(walk_state->arg_types)
+		       && !walk_state->arg_count) {
+			walk_state->aml_offset =
+			    (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
+						walk_state->parser_state.
+						aml_start);
+
+			status =
+			    acpi_ps_get_next_arg(walk_state,
+						 &(walk_state->parser_state),
+						 GET_CURRENT_ARG_TYPE
+						 (walk_state->arg_types), &arg);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+
+			if (arg) {
+				arg->common.aml_offset = walk_state->aml_offset;
+				acpi_ps_append_arg(op, arg);
+			}
+
+			INCREMENT_ARG_LIST(walk_state->arg_types);
+		}
+
+		/* Special processing for certain opcodes */
+
+		/* TBD (remove): Temporary mechanism to disable this code if needed */
+
+#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
+
+		if ((walk_state->pass_number <= ACPI_IMODE_LOAD_PASS1) &&
+		    ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) {
+			/*
+			 * We want to skip If/Else/While constructs during Pass1 because we
+			 * want to actually conditionally execute the code during Pass2.
+			 *
+			 * Except for disassembly, where we always want to walk the
+			 * If/Else/While packages
+			 */
+			switch (op->common.aml_opcode) {
+			case AML_IF_OP:
+			case AML_ELSE_OP:
+			case AML_WHILE_OP:
+
+				ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+						  "Pass1: Skipping an If/Else/While body\n"));
+
+				/* Skip body of if/else/while in pass 1 */
+
+				walk_state->parser_state.aml =
+				    walk_state->parser_state.pkg_end;
+				walk_state->arg_count = 0;
+				break;
+
+			default:
+				break;
+			}
+		}
+#endif
+
+		switch (op->common.aml_opcode) {
+		case AML_METHOD_OP:
+			/*
+			 * Skip parsing of control method because we don't have enough
+			 * info in the first pass to parse it correctly.
+			 *
+			 * Save the length and address of the body
+			 */
+			op->named.data = walk_state->parser_state.aml;
+			op->named.length = (u32)
+			    (walk_state->parser_state.pkg_end -
+			     walk_state->parser_state.aml);
+
+			/* Skip body of method */
+
+			walk_state->parser_state.aml =
+			    walk_state->parser_state.pkg_end;
+			walk_state->arg_count = 0;
+			break;
+
+		case AML_BUFFER_OP:
+		case AML_PACKAGE_OP:
+		case AML_VAR_PACKAGE_OP:
+
+			if ((op->common.parent) &&
+			    (op->common.parent->common.aml_opcode ==
+			     AML_NAME_OP)
+			    && (walk_state->pass_number <=
+				ACPI_IMODE_LOAD_PASS2)) {
+				/*
+				 * Skip parsing of Buffers and Packages because we don't have
+				 * enough info in the first pass to parse them correctly.
+				 */
+				op->named.data = aml_op_start;
+				op->named.length = (u32)
+				    (walk_state->parser_state.pkg_end -
+				     aml_op_start);
+
+				/* Skip body */
+
+				walk_state->parser_state.aml =
+				    walk_state->parser_state.pkg_end;
+				walk_state->arg_count = 0;
+			}
+			break;
+
+		case AML_WHILE_OP:
+
+			if (walk_state->control_state) {
+				walk_state->control_state->control.package_end =
+				    walk_state->parser_state.pkg_end;
+			}
+			break;
+
+		default:
+
+			/* No action for all other opcodes */
+			break;
+		}
+
+		break;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_complete_op
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *              Op                  - Returned Op
+ *              Status              - Parse status before complete Op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Complete Op
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ps_complete_op(struct acpi_walk_state *walk_state,
+		    union acpi_parse_object **op, acpi_status status)
+{
+	acpi_status status2;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
+
+	/*
+	 * Finished one argument of the containing scope
+	 */
+	walk_state->parser_state.scope->parse_scope.arg_count--;
+
+	/* Close this Op (will result in parse subtree deletion) */
+
+	status2 = acpi_ps_complete_this_op(walk_state, *op);
+	if (ACPI_FAILURE(status2)) {
+		return_ACPI_STATUS(status2);
+	}
+
+	*op = NULL;
+
+	switch (status) {
+	case AE_OK:
+		break;
+
+	case AE_CTRL_TRANSFER:
+
+		/* We are about to transfer to a called method */
+
+		walk_state->prev_op = NULL;
+		walk_state->prev_arg_types = walk_state->arg_types;
+		return_ACPI_STATUS(status);
+
+	case AE_CTRL_END:
+
+		acpi_ps_pop_scope(&(walk_state->parser_state), op,
+				  &walk_state->arg_types,
+				  &walk_state->arg_count);
+
+		if (*op) {
+			walk_state->op = *op;
+			walk_state->op_info =
+			    acpi_ps_get_opcode_info((*op)->common.aml_opcode);
+			walk_state->opcode = (*op)->common.aml_opcode;
+
+			status = walk_state->ascending_callback(walk_state);
+			status =
+			    acpi_ps_next_parse_state(walk_state, *op, status);
+
+			status2 = acpi_ps_complete_this_op(walk_state, *op);
+			if (ACPI_FAILURE(status2)) {
+				return_ACPI_STATUS(status2);
+			}
+		}
+
+		status = AE_OK;
+		break;
+
+	case AE_CTRL_BREAK:
+	case AE_CTRL_CONTINUE:
+
+		/* Pop off scopes until we find the While */
+
+		while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
+			acpi_ps_pop_scope(&(walk_state->parser_state), op,
+					  &walk_state->arg_types,
+					  &walk_state->arg_count);
+		}
+
+		/* Close this iteration of the While loop */
+
+		walk_state->op = *op;
+		walk_state->op_info =
+		    acpi_ps_get_opcode_info((*op)->common.aml_opcode);
+		walk_state->opcode = (*op)->common.aml_opcode;
+
+		status = walk_state->ascending_callback(walk_state);
+		status = acpi_ps_next_parse_state(walk_state, *op, status);
+
+		status2 = acpi_ps_complete_this_op(walk_state, *op);
+		if (ACPI_FAILURE(status2)) {
+			return_ACPI_STATUS(status2);
+		}
+
+		status = AE_OK;
+		break;
+
+	case AE_CTRL_TERMINATE:
+
+		/* Clean up */
+		do {
+			if (*op) {
+				status2 =
+				    acpi_ps_complete_this_op(walk_state, *op);
+				if (ACPI_FAILURE(status2)) {
+					return_ACPI_STATUS(status2);
+				}
+
+				acpi_ut_delete_generic_state
+				    (acpi_ut_pop_generic_state
+				     (&walk_state->control_state));
+			}
+
+			acpi_ps_pop_scope(&(walk_state->parser_state), op,
+					  &walk_state->arg_types,
+					  &walk_state->arg_count);
+
+		} while (*op);
+
+		return_ACPI_STATUS(AE_OK);
+
+	default:		/* All other non-AE_OK status */
+
+		do {
+			if (*op) {
+				status2 =
+				    acpi_ps_complete_this_op(walk_state, *op);
+				if (ACPI_FAILURE(status2)) {
+					return_ACPI_STATUS(status2);
+				}
+			}
+
+			acpi_ps_pop_scope(&(walk_state->parser_state), op,
+					  &walk_state->arg_types,
+					  &walk_state->arg_count);
+
+		} while (*op);
+
+#if 0
+		/*
+		 * TBD: Cleanup parse ops on error
+		 */
+		if (*op == NULL) {
+			acpi_ps_pop_scope(parser_state, op,
+					  &walk_state->arg_types,
+					  &walk_state->arg_count);
+		}
+#endif
+		walk_state->prev_op = NULL;
+		walk_state->prev_arg_types = walk_state->arg_types;
+		return_ACPI_STATUS(status);
+	}
+
+	/* This scope complete? */
+
+	if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
+		acpi_ps_pop_scope(&(walk_state->parser_state), op,
+				  &walk_state->arg_types,
+				  &walk_state->arg_count);
+		ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
+	} else {
+		*op = NULL;
+	}
+
+	ACPI_PREEMPTION_POINT();
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_complete_final_op
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *              Op                  - Current Op
+ *              Status              - Current parse status before complete last
+ *                                    Op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Complete last Op.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+			  union acpi_parse_object *op, acpi_status status)
+{
+	acpi_status status2;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
+
+	/*
+	 * Complete the last Op (if not completed), and clear the scope stack.
+	 * It is easily possible to end an AML "package" with an unbounded number
+	 * of open scopes (such as when several ASL blocks are closed with
+	 * sequential closing braces). We want to terminate each one cleanly.
+	 */
+	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
+			  op));
+	do {
+		if (op) {
+			if (walk_state->ascending_callback != NULL) {
+				walk_state->op = op;
+				walk_state->op_info =
+				    acpi_ps_get_opcode_info(op->common.
+							    aml_opcode);
+				walk_state->opcode = op->common.aml_opcode;
+
+				status =
+				    walk_state->ascending_callback(walk_state);
+				status =
+				    acpi_ps_next_parse_state(walk_state, op,
+							     status);
+				if (status == AE_CTRL_PENDING) {
+					status =
+					    acpi_ps_complete_op(walk_state, &op,
+								AE_OK);
+					if (ACPI_FAILURE(status)) {
+						return_ACPI_STATUS(status);
+					}
+				}
+
+				if (status == AE_CTRL_TERMINATE) {
+					status = AE_OK;
+
+					/* Clean up */
+					do {
+						if (op) {
+							status2 =
+							    acpi_ps_complete_this_op
+							    (walk_state, op);
+							if (ACPI_FAILURE
+							    (status2)) {
+								return_ACPI_STATUS
+								    (status2);
+							}
+						}
+
+						acpi_ps_pop_scope(&
+								  (walk_state->
+								   parser_state),
+								  &op,
+								  &walk_state->
+								  arg_types,
+								  &walk_state->
+								  arg_count);
+
+					} while (op);
+
+					return_ACPI_STATUS(status);
+				}
+
+				else if (ACPI_FAILURE(status)) {
+
+					/* First error is most important */
+
+					(void)
+					    acpi_ps_complete_this_op(walk_state,
+								     op);
+					return_ACPI_STATUS(status);
+				}
+			}
+
+			status2 = acpi_ps_complete_this_op(walk_state, op);
+			if (ACPI_FAILURE(status2)) {
+				return_ACPI_STATUS(status2);
+			}
+		}
+
+		acpi_ps_pop_scope(&(walk_state->parser_state), &op,
+				  &walk_state->arg_types,
+				  &walk_state->arg_count);
+
+	} while (op);
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_parse_loop
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Parse AML (pointed to by the current parser state) and return
+ *              a tree of ops.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+	union acpi_parse_object *op = NULL;	/* current op */
+	struct acpi_parse_state *parser_state;
+	u8 *aml_op_start = NULL;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
+
+	if (walk_state->descending_callback == NULL) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	parser_state = &walk_state->parser_state;
+	walk_state->arg_types = 0;
+
+#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
+
+	if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
+
+		/* We are restarting a preempted control method */
+
+		if (acpi_ps_has_completed_scope(parser_state)) {
+			/*
+			 * We must check if a predicate to an IF or WHILE statement
+			 * was just completed
+			 */
+			if ((parser_state->scope->parse_scope.op) &&
+			    ((parser_state->scope->parse_scope.op->common.
+			      aml_opcode == AML_IF_OP)
+			     || (parser_state->scope->parse_scope.op->common.
+				 aml_opcode == AML_WHILE_OP))
+			    && (walk_state->control_state)
+			    && (walk_state->control_state->common.state ==
+				ACPI_CONTROL_PREDICATE_EXECUTING)) {
+				/*
+				 * A predicate was just completed, get the value of the
+				 * predicate and branch based on that value
+				 */
+				walk_state->op = NULL;
+				status =
+				    acpi_ds_get_predicate_value(walk_state,
+								ACPI_TO_POINTER
+								(TRUE));
+				if (ACPI_FAILURE(status)
+				    && ((status & AE_CODE_MASK) !=
+					AE_CODE_CONTROL)) {
+					if (status == AE_AML_NO_RETURN_VALUE) {
+						ACPI_EXCEPTION((AE_INFO, status,
+								"Invoked method did not return a value"));
+
+					}
+
+					ACPI_EXCEPTION((AE_INFO, status,
+							"GetPredicate Failed"));
+					return_ACPI_STATUS(status);
+				}
+
+				status =
+				    acpi_ps_next_parse_state(walk_state, op,
+							     status);
+			}
+
+			acpi_ps_pop_scope(parser_state, &op,
+					  &walk_state->arg_types,
+					  &walk_state->arg_count);
+			ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+					  "Popped scope, Op=%p\n", op));
+		} else if (walk_state->prev_op) {
+
+			/* We were in the middle of an op */
+
+			op = walk_state->prev_op;
+			walk_state->arg_types = walk_state->prev_arg_types;
+		}
+	}
+#endif
+
+	/* Iterative parsing loop, while there is more AML to process: */
+
+	while ((parser_state->aml < parser_state->aml_end) || (op)) {
+		aml_op_start = parser_state->aml;
+		if (!op) {
+			status =
+			    acpi_ps_create_op(walk_state, aml_op_start, &op);
+			if (ACPI_FAILURE(status)) {
+				if (status == AE_CTRL_PARSE_CONTINUE) {
+					continue;
+				}
+
+				if (status == AE_CTRL_PARSE_PENDING) {
+					status = AE_OK;
+				}
+
+				status =
+				    acpi_ps_complete_op(walk_state, &op,
+							status);
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+
+				continue;
+			}
+
+			op->common.aml_offset = walk_state->aml_offset;
+
+			if (walk_state->op_info) {
+				ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+						  "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
+						  (u32) op->common.aml_opcode,
+						  walk_state->op_info->name, op,
+						  parser_state->aml,
+						  op->common.aml_offset));
+			}
+		}
+
+		/*
+		 * Start arg_count at zero because we don't know if there are
+		 * any args yet
+		 */
+		walk_state->arg_count = 0;
+
+		/* Are there any arguments that must be processed? */
+
+		if (walk_state->arg_types) {
+
+			/* Get arguments */
+
+			status =
+			    acpi_ps_get_arguments(walk_state, aml_op_start, op);
+			if (ACPI_FAILURE(status)) {
+				status =
+				    acpi_ps_complete_op(walk_state, &op,
+							status);
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+
+				continue;
+			}
+		}
+
+		/* Check for arguments that need to be processed */
+
+		if (walk_state->arg_count) {
+			/*
+			 * There are arguments (complex ones), push Op and
+			 * prepare for argument
+			 */
+			status = acpi_ps_push_scope(parser_state, op,
+						    walk_state->arg_types,
+						    walk_state->arg_count);
+			if (ACPI_FAILURE(status)) {
+				status =
+				    acpi_ps_complete_op(walk_state, &op,
+							status);
+				if (ACPI_FAILURE(status)) {
+					return_ACPI_STATUS(status);
+				}
+
+				continue;
+			}
+
+			op = NULL;
+			continue;
+		}
+
+		/*
+		 * All arguments have been processed -- Op is complete,
+		 * prepare for next
+		 */
+		walk_state->op_info =
+		    acpi_ps_get_opcode_info(op->common.aml_opcode);
+		if (walk_state->op_info->flags & AML_NAMED) {
+			if (acpi_gbl_depth) {
+				acpi_gbl_depth--;
+			}
+
+			if (op->common.aml_opcode == AML_REGION_OP ||
+			    op->common.aml_opcode == AML_DATA_REGION_OP) {
+				/*
+				 * Skip parsing of control method or opregion body,
+				 * because we don't have enough info in the first pass
+				 * to parse them correctly.
+				 *
+				 * Completed parsing an op_region declaration, we now
+				 * know the length.
+				 */
+				op->named.length =
+				    (u32) (parser_state->aml - op->named.data);
+			}
+		}
+
+		if (walk_state->op_info->flags & AML_CREATE) {
+			/*
+			 * Backup to beginning of create_xXXfield declaration (1 for
+			 * Opcode)
+			 *
+			 * body_length is unknown until we parse the body
+			 */
+			op->named.length =
+			    (u32) (parser_state->aml - op->named.data);
+		}
+
+		if (op->common.aml_opcode == AML_BANK_FIELD_OP) {
+			/*
+			 * Backup to beginning of bank_field declaration
+			 *
+			 * body_length is unknown until we parse the body
+			 */
+			op->named.length =
+			    (u32) (parser_state->aml - op->named.data);
+		}
+
+		/* This op complete, notify the dispatcher */
+
+		if (walk_state->ascending_callback != NULL) {
+			walk_state->op = op;
+			walk_state->opcode = op->common.aml_opcode;
+
+			status = walk_state->ascending_callback(walk_state);
+			status =
+			    acpi_ps_next_parse_state(walk_state, op, status);
+			if (status == AE_CTRL_PENDING) {
+				status = AE_OK;
+			}
+		}
+
+		status = acpi_ps_complete_op(walk_state, &op, status);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+	}			/* while parser_state->Aml */
+
+	status = acpi_ps_complete_final_op(walk_state, op, status);
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
new file mode 100644
index 0000000..3bc3a60
--- /dev/null
+++ b/drivers/acpi/acpica/psopcode.c
@@ -0,0 +1,810 @@
+/******************************************************************************
+ *
+ * Module Name: psopcode - Parser/Interpreter opcode information table
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "acopcode.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_PARSER
+ACPI_MODULE_NAME("psopcode")
+
+static const u8 acpi_gbl_argument_count[] =
+    { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
+
+/*******************************************************************************
+ *
+ * NAME:        acpi_gbl_aml_op_info
+ *
+ * DESCRIPTION: Opcode table. Each entry contains <opcode, type, name, operands>
+ *              The name is a simple ascii string, the operand specifier is an
+ *              ascii string with one letter per operand.  The letter specifies
+ *              the operand type.
+ *
+ ******************************************************************************/
+
+/*
+ * Summary of opcode types/flags
+ *
+
+ Opcodes that have associated namespace objects (AML_NSOBJECT flag)
+
+	AML_SCOPE_OP
+	AML_DEVICE_OP
+	AML_THERMAL_ZONE_OP
+	AML_METHOD_OP
+	AML_POWER_RES_OP
+	AML_PROCESSOR_OP
+	AML_FIELD_OP
+	AML_INDEX_FIELD_OP
+	AML_BANK_FIELD_OP
+	AML_NAME_OP
+	AML_ALIAS_OP
+	AML_MUTEX_OP
+	AML_EVENT_OP
+	AML_REGION_OP
+	AML_CREATE_FIELD_OP
+	AML_CREATE_BIT_FIELD_OP
+	AML_CREATE_BYTE_FIELD_OP
+	AML_CREATE_WORD_FIELD_OP
+	AML_CREATE_DWORD_FIELD_OP
+	AML_CREATE_QWORD_FIELD_OP
+	AML_INT_NAMEDFIELD_OP
+	AML_INT_METHODCALL_OP
+	AML_INT_NAMEPATH_OP
+
+  Opcodes that are "namespace" opcodes (AML_NSOPCODE flag)
+
+	AML_SCOPE_OP
+	AML_DEVICE_OP
+	AML_THERMAL_ZONE_OP
+	AML_METHOD_OP
+	AML_POWER_RES_OP
+	AML_PROCESSOR_OP
+	AML_FIELD_OP
+	AML_INDEX_FIELD_OP
+	AML_BANK_FIELD_OP
+	AML_NAME_OP
+	AML_ALIAS_OP
+	AML_MUTEX_OP
+	AML_EVENT_OP
+	AML_REGION_OP
+	AML_INT_NAMEDFIELD_OP
+
+  Opcodes that have an associated namespace node (AML_NSNODE flag)
+
+	AML_SCOPE_OP
+	AML_DEVICE_OP
+	AML_THERMAL_ZONE_OP
+	AML_METHOD_OP
+	AML_POWER_RES_OP
+	AML_PROCESSOR_OP
+	AML_NAME_OP
+	AML_ALIAS_OP
+	AML_MUTEX_OP
+	AML_EVENT_OP
+	AML_REGION_OP
+	AML_CREATE_FIELD_OP
+	AML_CREATE_BIT_FIELD_OP
+	AML_CREATE_BYTE_FIELD_OP
+	AML_CREATE_WORD_FIELD_OP
+	AML_CREATE_DWORD_FIELD_OP
+	AML_CREATE_QWORD_FIELD_OP
+	AML_INT_NAMEDFIELD_OP
+	AML_INT_METHODCALL_OP
+	AML_INT_NAMEPATH_OP
+
+  Opcodes that define named ACPI objects (AML_NAMED flag)
+
+	AML_SCOPE_OP
+	AML_DEVICE_OP
+	AML_THERMAL_ZONE_OP
+	AML_METHOD_OP
+	AML_POWER_RES_OP
+	AML_PROCESSOR_OP
+	AML_NAME_OP
+	AML_ALIAS_OP
+	AML_MUTEX_OP
+	AML_EVENT_OP
+	AML_REGION_OP
+	AML_INT_NAMEDFIELD_OP
+
+  Opcodes that contain executable AML as part of the definition that
+  must be deferred until needed
+
+	AML_METHOD_OP
+	AML_VAR_PACKAGE_OP
+	AML_CREATE_FIELD_OP
+	AML_CREATE_BIT_FIELD_OP
+	AML_CREATE_BYTE_FIELD_OP
+	AML_CREATE_WORD_FIELD_OP
+	AML_CREATE_DWORD_FIELD_OP
+	AML_CREATE_QWORD_FIELD_OP
+	AML_REGION_OP
+	AML_BUFFER_OP
+
+  Field opcodes
+
+	AML_CREATE_FIELD_OP
+	AML_FIELD_OP
+	AML_INDEX_FIELD_OP
+	AML_BANK_FIELD_OP
+
+  Field "Create" opcodes
+
+	AML_CREATE_FIELD_OP
+	AML_CREATE_BIT_FIELD_OP
+	AML_CREATE_BYTE_FIELD_OP
+	AML_CREATE_WORD_FIELD_OP
+	AML_CREATE_DWORD_FIELD_OP
+	AML_CREATE_QWORD_FIELD_OP
+
+ ******************************************************************************/
+
+/*
+ * Master Opcode information table.  A summary of everything we know about each
+ * opcode, all in one place.
+ */
+const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
+/*! [Begin] no source code translation */
+/* Index           Name                 Parser Args               Interpreter Args                ObjectType                    Class                      Type                  Flags */
+
+/* 00 */ ACPI_OP("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, ACPI_TYPE_INTEGER,
+		 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
+/* 01 */ ACPI_OP("One", ARGP_ONE_OP, ARGI_ONE_OP, ACPI_TYPE_INTEGER,
+		 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
+/* 02 */ ACPI_OP("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP,
+		 ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT,
+		 AML_TYPE_NAMED_SIMPLE,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+		 AML_NSNODE | AML_NAMED),
+/* 03 */ ACPI_OP("Name", ARGP_NAME_OP, ARGI_NAME_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+		 AML_NSNODE | AML_NAMED),
+/* 04 */ ACPI_OP("ByteConst", ARGP_BYTE_OP, ARGI_BYTE_OP,
+		 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LITERAL, AML_CONSTANT),
+/* 05 */ ACPI_OP("WordConst", ARGP_WORD_OP, ARGI_WORD_OP,
+		 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LITERAL, AML_CONSTANT),
+/* 06 */ ACPI_OP("DwordConst", ARGP_DWORD_OP, ARGI_DWORD_OP,
+		 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LITERAL, AML_CONSTANT),
+/* 07 */ ACPI_OP("String", ARGP_STRING_OP, ARGI_STRING_OP,
+		 ACPI_TYPE_STRING, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LITERAL, AML_CONSTANT),
+/* 08 */ ACPI_OP("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
+		 ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT,
+		 AML_TYPE_NAMED_NO_OBJ,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+		 AML_NSNODE | AML_NAMED),
+/* 09 */ ACPI_OP("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP,
+		 ACPI_TYPE_BUFFER, AML_CLASS_CREATE,
+		 AML_TYPE_CREATE_OBJECT,
+		 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
+/* 0A */ ACPI_OP("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP,
+		 ACPI_TYPE_PACKAGE, AML_CLASS_CREATE,
+		 AML_TYPE_CREATE_OBJECT,
+		 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
+/* 0B */ ACPI_OP("Method", ARGP_METHOD_OP, ARGI_METHOD_OP,
+		 ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT,
+		 AML_TYPE_NAMED_COMPLEX,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+		 AML_NSNODE | AML_NAMED | AML_DEFER),
+/* 0C */ ACPI_OP("Local0", ARGP_LOCAL0, ARGI_LOCAL0,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LOCAL_VARIABLE, 0),
+/* 0D */ ACPI_OP("Local1", ARGP_LOCAL1, ARGI_LOCAL1,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LOCAL_VARIABLE, 0),
+/* 0E */ ACPI_OP("Local2", ARGP_LOCAL2, ARGI_LOCAL2,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LOCAL_VARIABLE, 0),
+/* 0F */ ACPI_OP("Local3", ARGP_LOCAL3, ARGI_LOCAL3,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LOCAL_VARIABLE, 0),
+/* 10 */ ACPI_OP("Local4", ARGP_LOCAL4, ARGI_LOCAL4,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LOCAL_VARIABLE, 0),
+/* 11 */ ACPI_OP("Local5", ARGP_LOCAL5, ARGI_LOCAL5,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LOCAL_VARIABLE, 0),
+/* 12 */ ACPI_OP("Local6", ARGP_LOCAL6, ARGI_LOCAL6,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LOCAL_VARIABLE, 0),
+/* 13 */ ACPI_OP("Local7", ARGP_LOCAL7, ARGI_LOCAL7,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LOCAL_VARIABLE, 0),
+/* 14 */ ACPI_OP("Arg0", ARGP_ARG0, ARGI_ARG0,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_METHOD_ARGUMENT, 0),
+/* 15 */ ACPI_OP("Arg1", ARGP_ARG1, ARGI_ARG1,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_METHOD_ARGUMENT, 0),
+/* 16 */ ACPI_OP("Arg2", ARGP_ARG2, ARGI_ARG2,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_METHOD_ARGUMENT, 0),
+/* 17 */ ACPI_OP("Arg3", ARGP_ARG3, ARGI_ARG3,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_METHOD_ARGUMENT, 0),
+/* 18 */ ACPI_OP("Arg4", ARGP_ARG4, ARGI_ARG4,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_METHOD_ARGUMENT, 0),
+/* 19 */ ACPI_OP("Arg5", ARGP_ARG5, ARGI_ARG5,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_METHOD_ARGUMENT, 0),
+/* 1A */ ACPI_OP("Arg6", ARGP_ARG6, ARGI_ARG6,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_METHOD_ARGUMENT, 0),
+/* 1B */ ACPI_OP("Store", ARGP_STORE_OP, ARGI_STORE_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+		 AML_FLAGS_EXEC_1A_1T_1R),
+/* 1C */ ACPI_OP("RefOf", ARGP_REF_OF_OP, ARGI_REF_OF_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
+		 AML_FLAGS_EXEC_1A_0T_1R),
+/* 1D */ ACPI_OP("Add", ARGP_ADD_OP, ARGI_ADD_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 1E */ ACPI_OP("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+/* 1F */ ACPI_OP("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 20 */ ACPI_OP("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_0T_1R,
+		 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
+/* 21 */ ACPI_OP("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_0T_1R,
+		 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
+/* 22 */ ACPI_OP("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 23 */ ACPI_OP("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_2T_1R,
+		 AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT),
+/* 24 */ ACPI_OP("ShiftLeft", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 25 */ ACPI_OP("ShiftRight", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 26 */ ACPI_OP("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 27 */ ACPI_OP("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 28 */ ACPI_OP("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 29 */ ACPI_OP("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 2A */ ACPI_OP("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
+/* 2B */ ACPI_OP("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 2C */ ACPI_OP("FindSetLeftBit", ARGP_FIND_SET_LEFT_BIT_OP,
+		 ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 2D */ ACPI_OP("FindSetRightBit", ARGP_FIND_SET_RIGHT_BIT_OP,
+		 ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 2E */ ACPI_OP("DerefOf", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R),
+/* 2F */ ACPI_OP("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R),
+/* 30 */ ACPI_OP("SizeOf", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_0T_1R,
+		 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
+/* 31 */ ACPI_OP("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R),
+/* 32 */ ACPI_OP("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R,
+		 AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT),
+/* 33 */ ACPI_OP("CreateDWordField", ARGP_CREATE_DWORD_FIELD_OP,
+		 ARGI_CREATE_DWORD_FIELD_OP,
+		 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+		 AML_TYPE_CREATE_FIELD,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+		 AML_DEFER | AML_CREATE),
+/* 34 */ ACPI_OP("CreateWordField", ARGP_CREATE_WORD_FIELD_OP,
+		 ARGI_CREATE_WORD_FIELD_OP,
+		 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+		 AML_TYPE_CREATE_FIELD,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+		 AML_DEFER | AML_CREATE),
+/* 35 */ ACPI_OP("CreateByteField", ARGP_CREATE_BYTE_FIELD_OP,
+		 ARGI_CREATE_BYTE_FIELD_OP,
+		 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+		 AML_TYPE_CREATE_FIELD,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+		 AML_DEFER | AML_CREATE),
+/* 36 */ ACPI_OP("CreateBitField", ARGP_CREATE_BIT_FIELD_OP,
+		 ARGI_CREATE_BIT_FIELD_OP,
+		 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+		 AML_TYPE_CREATE_FIELD,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+		 AML_DEFER | AML_CREATE),
+/* 37 */ ACPI_OP("ObjectType", ARGP_TYPE_OP, ARGI_TYPE_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_0T_1R,
+		 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
+/* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+		 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
+/* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+		 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
+/* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
+		 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
+/* 3B */ ACPI_OP("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_0T_1R,
+		 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
+/* 3C */ ACPI_OP("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_0T_1R,
+		 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
+/* 3D */ ACPI_OP("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+		 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
+/* 3E */ ACPI_OP("If", ARGP_IF_OP, ARGI_IF_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
+/* 3F */ ACPI_OP("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
+/* 40 */ ACPI_OP("While", ARGP_WHILE_OP, ARGI_WHILE_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
+/* 41 */ ACPI_OP("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+/* 42 */ ACPI_OP("Return", ARGP_RETURN_OP, ARGI_RETURN_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_CONTROL,
+		 AML_TYPE_CONTROL, AML_HAS_ARGS),
+/* 43 */ ACPI_OP("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+/* 44 */ ACPI_OP("BreakPoint", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+/* 45 */ ACPI_OP("Ones", ARGP_ONES_OP, ARGI_ONES_OP, ACPI_TYPE_INTEGER,
+		 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
+
+/* Prefixed opcodes (Two-byte opcodes with a prefix op) */
+
+/* 46 */ ACPI_OP("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, ACPI_TYPE_MUTEX,
+		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+		 AML_NSNODE | AML_NAMED),
+/* 47 */ ACPI_OP("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, ACPI_TYPE_EVENT,
+		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
+		 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
+/* 48 */ ACPI_OP("CondRefOf", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
+/* 49 */ ACPI_OP("CreateField", ARGP_CREATE_FIELD_OP,
+		 ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD,
+		 AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+		 AML_DEFER | AML_FIELD | AML_CREATE),
+/* 4A */ ACPI_OP("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R,
+		 AML_FLAGS_EXEC_1A_1T_0R),
+/* 4B */ ACPI_OP("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
+		 AML_FLAGS_EXEC_1A_0T_0R),
+/* 4C */ ACPI_OP("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
+		 AML_FLAGS_EXEC_1A_0T_0R),
+/* 4D */ ACPI_OP("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R),
+/* 4E */ ACPI_OP("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
+/* 4F */ ACPI_OP("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
+		 AML_FLAGS_EXEC_2A_0T_1R),
+/* 50 */ ACPI_OP("Reset", ARGP_RESET_OP, ARGI_RESET_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
+		 AML_FLAGS_EXEC_1A_0T_0R),
+/* 51 */ ACPI_OP("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
+/* 52 */ ACPI_OP("FromBCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_1T_1R,
+		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 53 */ ACPI_OP("ToBCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 54 */ ACPI_OP("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
+/* 55 */ ACPI_OP("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP,
+		 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+		 AML_TYPE_CONSTANT, 0),
+/* 56 */ ACPI_OP("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_CONSTANT, 0),
+/* 57 */ ACPI_OP("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R,
+		 AML_FLAGS_EXEC_3A_0T_0R),
+/* 58 */ ACPI_OP("OperationRegion", ARGP_REGION_OP, ARGI_REGION_OP,
+		 ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT,
+		 AML_TYPE_NAMED_COMPLEX,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+		 AML_NSNODE | AML_NAMED | AML_DEFER),
+/* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
+/* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP,
+		 ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT,
+		 AML_TYPE_NAMED_NO_OBJ,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+		 AML_NSNODE | AML_NAMED),
+/* 5B */ ACPI_OP("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP,
+		 ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT,
+		 AML_TYPE_NAMED_SIMPLE,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+		 AML_NSNODE | AML_NAMED),
+/* 5C */ ACPI_OP("PowerResource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP,
+		 ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT,
+		 AML_TYPE_NAMED_SIMPLE,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+		 AML_NSNODE | AML_NAMED),
+/* 5D */ ACPI_OP("ThermalZone", ARGP_THERMAL_ZONE_OP,
+		 ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL,
+		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+		 AML_NSNODE | AML_NAMED),
+/* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
+		 AML_TYPE_NAMED_FIELD,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
+/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP,
+		 ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT,
+		 AML_TYPE_NAMED_FIELD,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD |
+		 AML_DEFER),
+
+/* Internal opcodes that map to invalid AML opcodes */
+
+/* 60 */ ACPI_OP("LNotEqual", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
+		 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
+/* 61 */ ACPI_OP("LLessEqual", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
+		 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
+/* 62 */ ACPI_OP("LGreaterEqual", ARGP_LGREATEREQUAL_OP,
+		 ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_INTERNAL, AML_TYPE_BOGUS,
+		 AML_HAS_ARGS | AML_CONSTANT),
+/* 63 */ ACPI_OP("-NamePath-", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP,
+		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE),
+/* 64 */ ACPI_OP("-MethodCall-", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP,
+		 ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL,
+		 AML_TYPE_METHOD_CALL,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE),
+/* 65 */ ACPI_OP("-ByteList-", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LITERAL, 0),
+/* 66 */ ACPI_OP("-ReservedField-", ARGP_RESERVEDFIELD_OP,
+		 ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
+/* 67 */ ACPI_OP("-NamedField-", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
+		 AML_TYPE_BOGUS,
+		 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
+/* 68 */ ACPI_OP("-AccessField-", ARGP_ACCESSFIELD_OP,
+		 ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
+/* 69 */ ACPI_OP("-StaticString", ARGP_STATICSTRING_OP,
+		 ARGI_STATICSTRING_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
+/* 6A */ ACPI_OP("-Return Value-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
+		 AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN,
+		 AML_HAS_ARGS | AML_HAS_RETVAL),
+/* 6B */ ACPI_OP("-UNKNOWN_OP-", ARG_NONE, ARG_NONE, ACPI_TYPE_INVALID,
+		 AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS),
+/* 6C */ ACPI_OP("-ASCII_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
+		 AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS),
+/* 6D */ ACPI_OP("-PREFIX_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
+		 AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS),
+
+/* ACPI 2.0 opcodes */
+
+/* 6E */ ACPI_OP("QwordConst", ARGP_QWORD_OP, ARGI_QWORD_OP,
+		 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
+		 AML_TYPE_LITERAL, AML_CONSTANT),
+	/* 6F */ ACPI_OP("Package", /* Var */ ARGP_VAR_PACKAGE_OP,
+			 ARGI_VAR_PACKAGE_OP, ACPI_TYPE_PACKAGE,
+			 AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT,
+			 AML_HAS_ARGS | AML_DEFER),
+/* 70 */ ACPI_OP("ConcatenateResTemplate", ARGP_CONCAT_RES_OP,
+		 ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+/* 71 */ ACPI_OP("Mod", ARGP_MOD_OP, ARGI_MOD_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+/* 72 */ ACPI_OP("CreateQWordField", ARGP_CREATE_QWORD_FIELD_OP,
+		 ARGI_CREATE_QWORD_FIELD_OP,
+		 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
+		 AML_TYPE_CREATE_FIELD,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
+		 AML_DEFER | AML_CREATE),
+/* 73 */ ACPI_OP("ToBuffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_1T_1R,
+		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 74 */ ACPI_OP("ToDecimalString", ARGP_TO_DEC_STR_OP,
+		 ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
+		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 75 */ ACPI_OP("ToHexString", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_1T_1R,
+		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 76 */ ACPI_OP("ToInteger", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_1T_1R,
+		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
+/* 77 */ ACPI_OP("ToString", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_2A_1T_1R,
+		 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
+/* 78 */ ACPI_OP("CopyObject", ARGP_COPY_OP, ARGI_COPY_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
+/* 79 */ ACPI_OP("Mid", ARGP_MID_OP, ARGI_MID_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R,
+		 AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT),
+/* 7A */ ACPI_OP("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
+/* 7B */ ACPI_OP("LoadTable", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
+		 AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R),
+/* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP,
+		 ARGI_DATA_REGION_OP, ACPI_TYPE_REGION,
+		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+		 AML_NSNODE | AML_NAMED | AML_DEFER),
+/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
+		 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
+		 AML_TYPE_NAMED_NO_OBJ,
+		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE),
+
+/* ACPI 3.0 opcodes */
+
+/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
+		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
+		 AML_FLAGS_EXEC_0A_0T_1R)
+
+/*! [End] no source code translation !*/
+};
+
+/*
+ * This table is directly indexed by the opcodes, and returns an
+ * index into the table above
+ */
+static const u8 acpi_gbl_short_op_index[256] = {
+/*              0     1     2     3     4     5     6     7  */
+/*              8     9     A     B     C     D     E     F  */
+/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
+/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
+/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
+/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
+/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
+/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC,
+/* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
+/* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK,
+/* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
+/* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
+/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30,
+/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72,
+/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
+/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
+/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
+/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK,
+/* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45,
+};
+
+/*
+ * This table is indexed by the second opcode of the extended opcode
+ * pair.  It returns an index into the opcode table (acpi_gbl_aml_op_info)
+ */
+static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
+/*              0     1     2     3     4     5     6     7  */
+/*              8     9     A     B     C     D     E     F  */
+/* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK,
+/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B,
+/* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
+/* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK,
+/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+/* 0x88 */ 0x7C,
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_opcode_info
+ *
+ * PARAMETERS:  Opcode              - The AML opcode
+ *
+ * RETURN:      A pointer to the info about the opcode.
+ *
+ * DESCRIPTION: Find AML opcode description based on the opcode.
+ *              NOTE: This procedure must ALWAYS return a valid pointer!
+ *
+ ******************************************************************************/
+
+const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
+{
+	ACPI_FUNCTION_NAME(ps_get_opcode_info);
+
+	/*
+	 * Detect normal 8-bit opcode or extended 16-bit opcode
+	 */
+	if (!(opcode & 0xFF00)) {
+
+		/* Simple (8-bit) opcode: 0-255, can't index beyond table  */
+
+		return (&acpi_gbl_aml_op_info
+			[acpi_gbl_short_op_index[(u8) opcode]]);
+	}
+
+	if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
+	    (((u8) opcode) <= MAX_EXTENDED_OPCODE)) {
+
+		/* Valid extended (16-bit) opcode */
+
+		return (&acpi_gbl_aml_op_info
+			[acpi_gbl_long_op_index[(u8) opcode]]);
+	}
+
+	/* Unknown AML opcode */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+			  "Unknown AML opcode [%4.4X]\n", opcode));
+
+	return (&acpi_gbl_aml_op_info[_UNK]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_opcode_name
+ *
+ * PARAMETERS:  Opcode              - The AML opcode
+ *
+ * RETURN:      A pointer to the name of the opcode (ASCII String)
+ *              Note: Never returns NULL.
+ *
+ * DESCRIPTION: Translate an opcode into a human-readable string
+ *
+ ******************************************************************************/
+
+char *acpi_ps_get_opcode_name(u16 opcode)
+{
+#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
+
+	const struct acpi_opcode_info *op;
+
+	op = acpi_ps_get_opcode_info(opcode);
+
+	/* Always guaranteed to return a valid pointer */
+
+	return (op->name);
+
+#else
+	return ("OpcodeName unavailable");
+
+#endif
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_argument_count
+ *
+ * PARAMETERS:  op_type             - Type associated with the AML opcode
+ *
+ * RETURN:      Argument count
+ *
+ * DESCRIPTION: Obtain the number of expected arguments for an AML opcode
+ *
+ ******************************************************************************/
+
+u8 acpi_ps_get_argument_count(u32 op_type)
+{
+
+	if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
+		return (acpi_gbl_argument_count[op_type]);
+	}
+
+	return (0);
+}
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
new file mode 100644
index 0000000..70838e9
--- /dev/null
+++ b/drivers/acpi/acpica/psparse.c
@@ -0,0 +1,701 @@
+/******************************************************************************
+ *
+ * Module Name: psparse - Parser top level AML parse routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/*
+ * Parse the AML and build an operation tree as most interpreters,
+ * like Perl, do.  Parsing is done by hand rather than with a YACC
+ * generated parser to tightly constrain stack and dynamic memory
+ * usage.  At the same time, parsing is kept flexible and the code
+ * fairly compact by parsing based on a list of AML opcode
+ * templates in aml_op_info[]
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "acdispat.h"
+#include "amlcode.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_PARSER
+ACPI_MODULE_NAME("psparse")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_opcode_size
+ *
+ * PARAMETERS:  Opcode          - An AML opcode
+ *
+ * RETURN:      Size of the opcode, in bytes (1 or 2)
+ *
+ * DESCRIPTION: Get the size of the current opcode.
+ *
+ ******************************************************************************/
+u32 acpi_ps_get_opcode_size(u32 opcode)
+{
+
+	/* Extended (2-byte) opcode if > 255 */
+
+	if (opcode > 0x00FF) {
+		return (2);
+	}
+
+	/* Otherwise, just a single byte opcode */
+
+	return (1);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_peek_opcode
+ *
+ * PARAMETERS:  parser_state        - A parser state object
+ *
+ * RETURN:      Next AML opcode
+ *
+ * DESCRIPTION: Get next AML opcode (without incrementing AML pointer)
+ *
+ ******************************************************************************/
+
+u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
+{
+	u8 *aml;
+	u16 opcode;
+
+	aml = parser_state->aml;
+	opcode = (u16) ACPI_GET8(aml);
+
+	if (opcode == AML_EXTENDED_OP_PREFIX) {
+
+		/* Extended opcode, get the second opcode byte */
+
+		aml++;
+		opcode = (u16) ((opcode << 8) | ACPI_GET8(aml));
+	}
+
+	return (opcode);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_complete_this_op
+ *
+ * PARAMETERS:  walk_state      - Current State
+ *              Op              - Op to complete
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform any cleanup at the completion of an Op.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
+			 union acpi_parse_object * op)
+{
+	union acpi_parse_object *prev;
+	union acpi_parse_object *next;
+	const struct acpi_opcode_info *parent_info;
+	union acpi_parse_object *replacement_op = NULL;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op);
+
+	/* Check for null Op, can happen if AML code is corrupt */
+
+	if (!op) {
+		return_ACPI_STATUS(AE_OK);	/* OK for now */
+	}
+
+	/* Delete this op and the subtree below it if asked to */
+
+	if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) !=
+	     ACPI_PARSE_DELETE_TREE)
+	    || (walk_state->op_info->class == AML_CLASS_ARGUMENT)) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Make sure that we only delete this subtree */
+
+	if (op->common.parent) {
+		prev = op->common.parent->common.value.arg;
+		if (!prev) {
+
+			/* Nothing more to do */
+
+			goto cleanup;
+		}
+
+		/*
+		 * Check if we need to replace the operator and its subtree
+		 * with a return value op (placeholder op)
+		 */
+		parent_info =
+		    acpi_ps_get_opcode_info(op->common.parent->common.
+					    aml_opcode);
+
+		switch (parent_info->class) {
+		case AML_CLASS_CONTROL:
+			break;
+
+		case AML_CLASS_CREATE:
+
+			/*
+			 * These opcodes contain term_arg operands. The current
+			 * op must be replaced by a placeholder return op
+			 */
+			replacement_op =
+			    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+			if (!replacement_op) {
+				status = AE_NO_MEMORY;
+			}
+			break;
+
+		case AML_CLASS_NAMED_OBJECT:
+
+			/*
+			 * These opcodes contain term_arg operands. The current
+			 * op must be replaced by a placeholder return op
+			 */
+			if ((op->common.parent->common.aml_opcode ==
+			     AML_REGION_OP)
+			    || (op->common.parent->common.aml_opcode ==
+				AML_DATA_REGION_OP)
+			    || (op->common.parent->common.aml_opcode ==
+				AML_BUFFER_OP)
+			    || (op->common.parent->common.aml_opcode ==
+				AML_PACKAGE_OP)
+			    || (op->common.parent->common.aml_opcode ==
+				AML_BANK_FIELD_OP)
+			    || (op->common.parent->common.aml_opcode ==
+				AML_VAR_PACKAGE_OP)) {
+				replacement_op =
+				    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+				if (!replacement_op) {
+					status = AE_NO_MEMORY;
+				}
+			} else
+			    if ((op->common.parent->common.aml_opcode ==
+				 AML_NAME_OP)
+				&& (walk_state->pass_number <=
+				    ACPI_IMODE_LOAD_PASS2)) {
+				if ((op->common.aml_opcode == AML_BUFFER_OP)
+				    || (op->common.aml_opcode == AML_PACKAGE_OP)
+				    || (op->common.aml_opcode ==
+					AML_VAR_PACKAGE_OP)) {
+					replacement_op =
+					    acpi_ps_alloc_op(op->common.
+							     aml_opcode);
+					if (!replacement_op) {
+						status = AE_NO_MEMORY;
+					} else {
+						replacement_op->named.data =
+						    op->named.data;
+						replacement_op->named.length =
+						    op->named.length;
+					}
+				}
+			}
+			break;
+
+		default:
+
+			replacement_op =
+			    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+			if (!replacement_op) {
+				status = AE_NO_MEMORY;
+			}
+		}
+
+		/* We must unlink this op from the parent tree */
+
+		if (prev == op) {
+
+			/* This op is the first in the list */
+
+			if (replacement_op) {
+				replacement_op->common.parent =
+				    op->common.parent;
+				replacement_op->common.value.arg = NULL;
+				replacement_op->common.node = op->common.node;
+				op->common.parent->common.value.arg =
+				    replacement_op;
+				replacement_op->common.next = op->common.next;
+			} else {
+				op->common.parent->common.value.arg =
+				    op->common.next;
+			}
+		}
+
+		/* Search the parent list */
+
+		else
+			while (prev) {
+
+				/* Traverse all siblings in the parent's argument list */
+
+				next = prev->common.next;
+				if (next == op) {
+					if (replacement_op) {
+						replacement_op->common.parent =
+						    op->common.parent;
+						replacement_op->common.value.
+						    arg = NULL;
+						replacement_op->common.node =
+						    op->common.node;
+						prev->common.next =
+						    replacement_op;
+						replacement_op->common.next =
+						    op->common.next;
+						next = NULL;
+					} else {
+						prev->common.next =
+						    op->common.next;
+						next = NULL;
+					}
+				}
+				prev = next;
+			}
+	}
+
+      cleanup:
+
+	/* Now we can actually delete the subtree rooted at Op */
+
+	acpi_ps_delete_parse_tree(op);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_next_parse_state
+ *
+ * PARAMETERS:  walk_state          - Current state
+ *              Op                  - Current parse op
+ *              callback_status     - Status from previous operation
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Update the parser state based upon the return exception from
+ *              the parser callback.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
+			 union acpi_parse_object *op,
+			 acpi_status callback_status)
+{
+	struct acpi_parse_state *parser_state = &walk_state->parser_state;
+	acpi_status status = AE_CTRL_PENDING;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_next_parse_state, op);
+
+	switch (callback_status) {
+	case AE_CTRL_TERMINATE:
+		/*
+		 * A control method was terminated via a RETURN statement.
+		 * The walk of this method is complete.
+		 */
+		parser_state->aml = parser_state->aml_end;
+		status = AE_CTRL_TERMINATE;
+		break;
+
+	case AE_CTRL_BREAK:
+
+		parser_state->aml = walk_state->aml_last_while;
+		walk_state->control_state->common.value = FALSE;
+		status = AE_CTRL_BREAK;
+		break;
+
+	case AE_CTRL_CONTINUE:
+
+		parser_state->aml = walk_state->aml_last_while;
+		status = AE_CTRL_CONTINUE;
+		break;
+
+	case AE_CTRL_PENDING:
+
+		parser_state->aml = walk_state->aml_last_while;
+		break;
+
+#if 0
+	case AE_CTRL_SKIP:
+
+		parser_state->aml = parser_state->scope->parse_scope.pkg_end;
+		status = AE_OK;
+		break;
+#endif
+
+	case AE_CTRL_TRUE:
+		/*
+		 * Predicate of an IF was true, and we are at the matching ELSE.
+		 * Just close out this package
+		 */
+		parser_state->aml = acpi_ps_get_next_package_end(parser_state);
+		status = AE_CTRL_PENDING;
+		break;
+
+	case AE_CTRL_FALSE:
+		/*
+		 * Either an IF/WHILE Predicate was false or we encountered a BREAK
+		 * opcode.  In both cases, we do not execute the rest of the
+		 * package;  We simply close out the parent (finishing the walk of
+		 * this branch of the tree) and continue execution at the parent
+		 * level.
+		 */
+		parser_state->aml = parser_state->scope->parse_scope.pkg_end;
+
+		/* In the case of a BREAK, just force a predicate (if any) to FALSE */
+
+		walk_state->control_state->common.value = FALSE;
+		status = AE_CTRL_END;
+		break;
+
+	case AE_CTRL_TRANSFER:
+
+		/* A method call (invocation) -- transfer control */
+
+		status = AE_CTRL_TRANSFER;
+		walk_state->prev_op = op;
+		walk_state->method_call_op = op;
+		walk_state->method_call_node =
+		    (op->common.value.arg)->common.node;
+
+		/* Will return value (if any) be used by the caller? */
+
+		walk_state->return_used =
+		    acpi_ds_is_result_used(op, walk_state);
+		break;
+
+	default:
+
+		status = callback_status;
+		if ((callback_status & AE_CODE_MASK) == AE_CODE_CONTROL) {
+			status = AE_OK;
+		}
+		break;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_parse_aml
+ *
+ * PARAMETERS:  walk_state      - Current state
+ *
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Parse raw AML and return a tree of ops
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	struct acpi_thread_state *thread;
+	struct acpi_thread_state *prev_walk_list = acpi_gbl_current_walk_list;
+	struct acpi_walk_state *previous_walk_state;
+
+	ACPI_FUNCTION_TRACE(ps_parse_aml);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+			  "Entered with WalkState=%p Aml=%p size=%X\n",
+			  walk_state, walk_state->parser_state.aml,
+			  walk_state->parser_state.aml_size));
+
+	if (!walk_state->parser_state.aml) {
+		return_ACPI_STATUS(AE_NULL_OBJECT);
+	}
+
+	/* Create and initialize a new thread state */
+
+	thread = acpi_ut_create_thread_state();
+	if (!thread) {
+		if (walk_state->method_desc) {
+
+			/* Executing a control method - additional cleanup */
+
+			acpi_ds_terminate_control_method(
+				walk_state->method_desc, walk_state);
+		}
+
+		acpi_ds_delete_walk_state(walk_state);
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	walk_state->thread = thread;
+
+	/*
+	 * If executing a method, the starting sync_level is this method's
+	 * sync_level
+	 */
+	if (walk_state->method_desc) {
+		walk_state->thread->current_sync_level =
+		    walk_state->method_desc->method.sync_level;
+	}
+
+	acpi_ds_push_walk_state(walk_state, thread);
+
+	/*
+	 * This global allows the AML debugger to get a handle to the currently
+	 * executing control method.
+	 */
+	acpi_gbl_current_walk_list = thread;
+
+	/*
+	 * Execute the walk loop as long as there is a valid Walk State.  This
+	 * handles nested control method invocations without recursion.
+	 */
+	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state));
+
+	status = AE_OK;
+	while (walk_state) {
+		if (ACPI_SUCCESS(status)) {
+			/*
+			 * The parse_loop executes AML until the method terminates
+			 * or calls another method.
+			 */
+			status = acpi_ps_parse_loop(walk_state);
+		}
+
+		ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+				  "Completed one call to walk loop, %s State=%p\n",
+				  acpi_format_exception(status), walk_state));
+
+		if (status == AE_CTRL_TRANSFER) {
+			/*
+			 * A method call was detected.
+			 * Transfer control to the called control method
+			 */
+			status =
+			    acpi_ds_call_control_method(thread, walk_state,
+							NULL);
+			if (ACPI_FAILURE(status)) {
+				status =
+				    acpi_ds_method_error(status, walk_state);
+			}
+
+			/*
+			 * If the transfer to the new method method call worked, a new walk
+			 * state was created -- get it
+			 */
+			walk_state = acpi_ds_get_current_walk_state(thread);
+			continue;
+		} else if (status == AE_CTRL_TERMINATE) {
+			status = AE_OK;
+		} else if ((status != AE_OK) && (walk_state->method_desc)) {
+
+			/* Either the method parse or actual execution failed */
+
+			ACPI_ERROR_METHOD("Method parse/execution failed",
+					  walk_state->method_node, NULL,
+					  status);
+
+			/* Check for possible multi-thread reentrancy problem */
+
+			if ((status == AE_ALREADY_EXISTS) &&
+			    (!walk_state->method_desc->method.mutex)) {
+				ACPI_INFO((AE_INFO,
+					   "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
+					   walk_state->method_node->name.
+					   ascii));
+
+				/*
+				 * Method tried to create an object twice. The probable cause is
+				 * that the method cannot handle reentrancy.
+				 *
+				 * The method is marked not_serialized, but it tried to create
+				 * a named object, causing the second thread entrance to fail.
+				 * Workaround this problem by marking the method permanently
+				 * as Serialized.
+				 */
+				walk_state->method_desc->method.method_flags |=
+				    AML_METHOD_SERIALIZED;
+				walk_state->method_desc->method.sync_level = 0;
+			}
+		}
+
+		/* We are done with this walk, move on to the parent if any */
+
+		walk_state = acpi_ds_pop_walk_state(thread);
+
+		/* Reset the current scope to the beginning of scope stack */
+
+		acpi_ds_scope_stack_clear(walk_state);
+
+		/*
+		 * If we just returned from the execution of a control method or if we
+		 * encountered an error during the method parse phase, there's lots of
+		 * cleanup to do
+		 */
+		if (((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
+		     ACPI_PARSE_EXECUTE) || (ACPI_FAILURE(status))) {
+			acpi_ds_terminate_control_method(walk_state->
+							 method_desc,
+							 walk_state);
+		}
+
+		/* Delete this walk state and all linked control states */
+
+		acpi_ps_cleanup_scope(&walk_state->parser_state);
+		previous_walk_state = walk_state;
+
+		ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+				  "ReturnValue=%p, ImplicitValue=%p State=%p\n",
+				  walk_state->return_desc,
+				  walk_state->implicit_return_obj, walk_state));
+
+		/* Check if we have restarted a preempted walk */
+
+		walk_state = acpi_ds_get_current_walk_state(thread);
+		if (walk_state) {
+			if (ACPI_SUCCESS(status)) {
+				/*
+				 * There is another walk state, restart it.
+				 * If the method return value is not used by the parent,
+				 * The object is deleted
+				 */
+				if (!previous_walk_state->return_desc) {
+					/*
+					 * In slack mode execution, if there is no return value
+					 * we should implicitly return zero (0) as a default value.
+					 */
+					if (acpi_gbl_enable_interpreter_slack &&
+					    !previous_walk_state->
+					    implicit_return_obj) {
+						previous_walk_state->
+						    implicit_return_obj =
+						    acpi_ut_create_internal_object
+						    (ACPI_TYPE_INTEGER);
+						if (!previous_walk_state->
+						    implicit_return_obj) {
+							return_ACPI_STATUS
+							    (AE_NO_MEMORY);
+						}
+
+						previous_walk_state->
+						    implicit_return_obj->
+						    integer.value = 0;
+					}
+
+					/* Restart the calling control method */
+
+					status =
+					    acpi_ds_restart_control_method
+					    (walk_state,
+					     previous_walk_state->
+					     implicit_return_obj);
+				} else {
+					/*
+					 * We have a valid return value, delete any implicit
+					 * return value.
+					 */
+					acpi_ds_clear_implicit_return
+					    (previous_walk_state);
+
+					status =
+					    acpi_ds_restart_control_method
+					    (walk_state,
+					     previous_walk_state->return_desc);
+				}
+				if (ACPI_SUCCESS(status)) {
+					walk_state->walk_type |=
+					    ACPI_WALK_METHOD_RESTART;
+				}
+			} else {
+				/* On error, delete any return object or implicit return */
+
+				acpi_ut_remove_reference(previous_walk_state->
+							 return_desc);
+				acpi_ds_clear_implicit_return
+				    (previous_walk_state);
+			}
+		}
+
+		/*
+		 * Just completed a 1st-level method, save the final internal return
+		 * value (if any)
+		 */
+		else if (previous_walk_state->caller_return_desc) {
+			if (previous_walk_state->implicit_return_obj) {
+				*(previous_walk_state->caller_return_desc) =
+				    previous_walk_state->implicit_return_obj;
+			} else {
+				/* NULL if no return value */
+
+				*(previous_walk_state->caller_return_desc) =
+				    previous_walk_state->return_desc;
+			}
+		} else {
+			if (previous_walk_state->return_desc) {
+
+				/* Caller doesn't want it, must delete it */
+
+				acpi_ut_remove_reference(previous_walk_state->
+							 return_desc);
+			}
+			if (previous_walk_state->implicit_return_obj) {
+
+				/* Caller doesn't want it, must delete it */
+
+				acpi_ut_remove_reference(previous_walk_state->
+							 implicit_return_obj);
+			}
+		}
+
+		acpi_ds_delete_walk_state(previous_walk_state);
+	}
+
+	/* Normal exit */
+
+	acpi_ex_release_all_mutexes(thread);
+	acpi_ut_delete_generic_state(ACPI_CAST_PTR
+				     (union acpi_generic_state, thread));
+	acpi_gbl_current_walk_list = prev_walk_list;
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
new file mode 100644
index 0000000..2feca5c
--- /dev/null
+++ b/drivers/acpi/acpica/psscope.c
@@ -0,0 +1,265 @@
+/******************************************************************************
+ *
+ * Module Name: psscope - Parser scope stack management routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+
+#define _COMPONENT          ACPI_PARSER
+ACPI_MODULE_NAME("psscope")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_parent_scope
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *
+ * RETURN:      Pointer to an Op object
+ *
+ * DESCRIPTION: Get parent of current op being parsed
+ *
+ ******************************************************************************/
+union acpi_parse_object *acpi_ps_get_parent_scope(struct acpi_parse_state
+						  *parser_state)
+{
+
+	return (parser_state->scope->parse_scope.op);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_has_completed_scope
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *
+ * RETURN:      Boolean, TRUE = scope completed.
+ *
+ * DESCRIPTION: Is parsing of current argument complete?  Determined by
+ *              1) AML pointer is at or beyond the end of the scope
+ *              2) The scope argument count has reached zero.
+ *
+ ******************************************************************************/
+
+u8 acpi_ps_has_completed_scope(struct acpi_parse_state * parser_state)
+{
+
+	return ((u8)
+		((parser_state->aml >= parser_state->scope->parse_scope.arg_end
+		  || !parser_state->scope->parse_scope.arg_count)));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_init_scope
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *              Root                - the Root Node of this new scope
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Allocate and init a new scope object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_init_scope(struct acpi_parse_state * parser_state,
+		   union acpi_parse_object * root_op)
+{
+	union acpi_generic_state *scope;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_init_scope, root_op);
+
+	scope = acpi_ut_create_generic_state();
+	if (!scope) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_RPSCOPE;
+	scope->parse_scope.op = root_op;
+	scope->parse_scope.arg_count = ACPI_VAR_ARGS;
+	scope->parse_scope.arg_end = parser_state->aml_end;
+	scope->parse_scope.pkg_end = parser_state->aml_end;
+
+	parser_state->scope = scope;
+	parser_state->start_op = root_op;
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_push_scope
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *              Op                  - Current op to be pushed
+ *              remaining_args      - List of args remaining
+ *              arg_count           - Fixed or variable number of args
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Push current op to begin parsing its argument
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_push_scope(struct acpi_parse_state *parser_state,
+		   union acpi_parse_object *op,
+		   u32 remaining_args, u32 arg_count)
+{
+	union acpi_generic_state *scope;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_push_scope, op);
+
+	scope = acpi_ut_create_generic_state();
+	if (!scope) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_PSCOPE;
+	scope->parse_scope.op = op;
+	scope->parse_scope.arg_list = remaining_args;
+	scope->parse_scope.arg_count = arg_count;
+	scope->parse_scope.pkg_end = parser_state->pkg_end;
+
+	/* Push onto scope stack */
+
+	acpi_ut_push_generic_state(&parser_state->scope, scope);
+
+	if (arg_count == ACPI_VAR_ARGS) {
+
+		/* Multiple arguments */
+
+		scope->parse_scope.arg_end = parser_state->pkg_end;
+	} else {
+		/* Single argument */
+
+		scope->parse_scope.arg_end = ACPI_TO_POINTER(ACPI_MAX_PTR);
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_pop_scope
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *              Op                  - Where the popped op is returned
+ *              arg_list            - Where the popped "next argument" is
+ *                                    returned
+ *              arg_count           - Count of objects in arg_list
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Return to parsing a previous op
+ *
+ ******************************************************************************/
+
+void
+acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
+		  union acpi_parse_object **op, u32 * arg_list, u32 * arg_count)
+{
+	union acpi_generic_state *scope = parser_state->scope;
+
+	ACPI_FUNCTION_TRACE(ps_pop_scope);
+
+	/* Only pop the scope if there is in fact a next scope */
+
+	if (scope->common.next) {
+		scope = acpi_ut_pop_generic_state(&parser_state->scope);
+
+		/* Return to parsing previous op */
+
+		*op = scope->parse_scope.op;
+		*arg_list = scope->parse_scope.arg_list;
+		*arg_count = scope->parse_scope.arg_count;
+		parser_state->pkg_end = scope->parse_scope.pkg_end;
+
+		/* All done with this scope state structure */
+
+		acpi_ut_delete_generic_state(scope);
+	} else {
+		/* Empty parse stack, prepare to fetch next opcode */
+
+		*op = NULL;
+		*arg_list = 0;
+		*arg_count = 0;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+			  "Popped Op %p Args %X\n", *op, *arg_count));
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_cleanup_scope
+ *
+ * PARAMETERS:  parser_state        - Current parser state object
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Destroy available list, remaining stack levels, and return
+ *              root scope
+ *
+ ******************************************************************************/
+
+void acpi_ps_cleanup_scope(struct acpi_parse_state *parser_state)
+{
+	union acpi_generic_state *scope;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_cleanup_scope, parser_state);
+
+	if (!parser_state) {
+		return_VOID;
+	}
+
+	/* Delete anything on the scope stack */
+
+	while (parser_state->scope) {
+		scope = acpi_ut_pop_generic_state(&parser_state->scope);
+		acpi_ut_delete_generic_state(scope);
+	}
+
+	return_VOID;
+}
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
new file mode 100644
index 0000000..4d33891
--- /dev/null
+++ b/drivers/acpi/acpica/pstree.c
@@ -0,0 +1,312 @@
+/******************************************************************************
+ *
+ * Module Name: pstree - Parser op tree manipulation/traversal/search
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_PARSER
+ACPI_MODULE_NAME("pstree")
+
+/* Local prototypes */
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op);
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_arg
+ *
+ * PARAMETERS:  Op              - Get an argument for this op
+ *              Argn            - Nth argument to get
+ *
+ * RETURN:      The argument (as an Op object). NULL if argument does not exist
+ *
+ * DESCRIPTION: Get the specified op's argument.
+ *
+ ******************************************************************************/
+
+union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
+{
+	union acpi_parse_object *arg = NULL;
+	const struct acpi_opcode_info *op_info;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Get the info structure for this opcode */
+
+	op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
+	if (op_info->class == AML_CLASS_UNKNOWN) {
+
+		/* Invalid opcode or ASCII character */
+
+		return (NULL);
+	}
+
+	/* Check if this opcode requires argument sub-objects */
+
+	if (!(op_info->flags & AML_HAS_ARGS)) {
+
+		/* Has no linked argument objects */
+
+		return (NULL);
+	}
+
+	/* Get the requested argument object */
+
+	arg = op->common.value.arg;
+	while (arg && argn) {
+		argn--;
+		arg = arg->common.next;
+	}
+
+	return (arg);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_append_arg
+ *
+ * PARAMETERS:  Op              - Append an argument to this Op.
+ *              Arg             - Argument Op to append
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Append an argument to an op's argument list (a NULL arg is OK)
+ *
+ ******************************************************************************/
+
+void
+acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
+{
+	union acpi_parse_object *prev_arg;
+	const struct acpi_opcode_info *op_info;
+
+	ACPI_FUNCTION_ENTRY();
+
+	if (!op) {
+		return;
+	}
+
+	/* Get the info structure for this opcode */
+
+	op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
+	if (op_info->class == AML_CLASS_UNKNOWN) {
+
+		/* Invalid opcode */
+
+		ACPI_ERROR((AE_INFO, "Invalid AML Opcode: 0x%2.2X",
+			    op->common.aml_opcode));
+		return;
+	}
+
+	/* Check if this opcode requires argument sub-objects */
+
+	if (!(op_info->flags & AML_HAS_ARGS)) {
+
+		/* Has no linked argument objects */
+
+		return;
+	}
+
+	/* Append the argument to the linked argument list */
+
+	if (op->common.value.arg) {
+
+		/* Append to existing argument list */
+
+		prev_arg = op->common.value.arg;
+		while (prev_arg->common.next) {
+			prev_arg = prev_arg->common.next;
+		}
+		prev_arg->common.next = arg;
+	} else {
+		/* No argument list, this will be the first argument */
+
+		op->common.value.arg = arg;
+	}
+
+	/* Set the parent in this arg and any args linked after it */
+
+	while (arg) {
+		arg->common.parent = op;
+		arg = arg->common.next;
+
+		op->common.arg_list_length++;
+	}
+}
+
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_depth_next
+ *
+ * PARAMETERS:  Origin          - Root of subtree to search
+ *              Op              - Last (previous) Op that was found
+ *
+ * RETURN:      Next Op found in the search.
+ *
+ * DESCRIPTION: Get next op in tree (walking the tree in depth-first order)
+ *              Return NULL when reaching "origin" or when walking up from root
+ *
+ ******************************************************************************/
+
+union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
+						union acpi_parse_object *op)
+{
+	union acpi_parse_object *next = NULL;
+	union acpi_parse_object *parent;
+	union acpi_parse_object *arg;
+
+	ACPI_FUNCTION_ENTRY();
+
+	if (!op) {
+		return (NULL);
+	}
+
+	/* Look for an argument or child */
+
+	next = acpi_ps_get_arg(op, 0);
+	if (next) {
+		return (next);
+	}
+
+	/* Look for a sibling */
+
+	next = op->common.next;
+	if (next) {
+		return (next);
+	}
+
+	/* Look for a sibling of parent */
+
+	parent = op->common.parent;
+
+	while (parent) {
+		arg = acpi_ps_get_arg(parent, 0);
+		while (arg && (arg != origin) && (arg != op)) {
+			arg = arg->common.next;
+		}
+
+		if (arg == origin) {
+
+			/* Reached parent of origin, end search */
+
+			return (NULL);
+		}
+
+		if (parent->common.next) {
+
+			/* Found sibling of parent */
+
+			return (parent->common.next);
+		}
+
+		op = parent;
+		parent = parent->common.parent;
+	}
+
+	return (next);
+}
+
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_child
+ *
+ * PARAMETERS:  Op              - Get the child of this Op
+ *
+ * RETURN:      Child Op, Null if none is found.
+ *
+ * DESCRIPTION: Get op's children or NULL if none
+ *
+ ******************************************************************************/
+
+union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op)
+{
+	union acpi_parse_object *child = NULL;
+
+	ACPI_FUNCTION_ENTRY();
+
+	switch (op->common.aml_opcode) {
+	case AML_SCOPE_OP:
+	case AML_ELSE_OP:
+	case AML_DEVICE_OP:
+	case AML_THERMAL_ZONE_OP:
+	case AML_INT_METHODCALL_OP:
+
+		child = acpi_ps_get_arg(op, 0);
+		break;
+
+	case AML_BUFFER_OP:
+	case AML_PACKAGE_OP:
+	case AML_METHOD_OP:
+	case AML_IF_OP:
+	case AML_WHILE_OP:
+	case AML_FIELD_OP:
+
+		child = acpi_ps_get_arg(op, 1);
+		break;
+
+	case AML_POWER_RES_OP:
+	case AML_INDEX_FIELD_OP:
+
+		child = acpi_ps_get_arg(op, 2);
+		break;
+
+	case AML_PROCESSOR_OP:
+	case AML_BANK_FIELD_OP:
+
+		child = acpi_ps_get_arg(op, 3);
+		break;
+
+	default:
+		/* All others have no children */
+		break;
+	}
+
+	return (child);
+}
+#endif
+#endif				/*  ACPI_FUTURE_USAGE  */
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
new file mode 100644
index 0000000..e636e07
--- /dev/null
+++ b/drivers/acpi/acpica/psutils.c
@@ -0,0 +1,244 @@
+/******************************************************************************
+ *
+ * Module Name: psutils - Parser miscellaneous utilities (Parser only)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_PARSER
+ACPI_MODULE_NAME("psutils")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_create_scope_op
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      A new Scope object, null on failure
+ *
+ * DESCRIPTION: Create a Scope and associated namepath op with the root name
+ *
+ ******************************************************************************/
+union acpi_parse_object *acpi_ps_create_scope_op(void)
+{
+	union acpi_parse_object *scope_op;
+
+	scope_op = acpi_ps_alloc_op(AML_SCOPE_OP);
+	if (!scope_op) {
+		return (NULL);
+	}
+
+	scope_op->named.name = ACPI_ROOT_NAME;
+	return (scope_op);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_init_op
+ *
+ * PARAMETERS:  Op              - A newly allocated Op object
+ *              Opcode          - Opcode to store in the Op
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Initialize a parse (Op) object
+ *
+ ******************************************************************************/
+
+void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
+	op->common.aml_opcode = opcode;
+
+	ACPI_DISASM_ONLY_MEMBERS(ACPI_STRNCPY(op->common.aml_op_name,
+					      (acpi_ps_get_opcode_info
+					       (opcode))->name,
+					      sizeof(op->common.aml_op_name)));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_alloc_op
+ *
+ * PARAMETERS:  Opcode          - Opcode that will be stored in the new Op
+ *
+ * RETURN:      Pointer to the new Op, null on failure
+ *
+ * DESCRIPTION: Allocate an acpi_op, choose op type (and thus size) based on
+ *              opcode.  A cache of opcodes is available for the pure
+ *              GENERIC_OP, since this is by far the most commonly used.
+ *
+ ******************************************************************************/
+
+union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
+{
+	union acpi_parse_object *op;
+	const struct acpi_opcode_info *op_info;
+	u8 flags = ACPI_PARSEOP_GENERIC;
+
+	ACPI_FUNCTION_ENTRY();
+
+	op_info = acpi_ps_get_opcode_info(opcode);
+
+	/* Determine type of parse_op required */
+
+	if (op_info->flags & AML_DEFER) {
+		flags = ACPI_PARSEOP_DEFERRED;
+	} else if (op_info->flags & AML_NAMED) {
+		flags = ACPI_PARSEOP_NAMED;
+	} else if (opcode == AML_INT_BYTELIST_OP) {
+		flags = ACPI_PARSEOP_BYTELIST;
+	}
+
+	/* Allocate the minimum required size object */
+
+	if (flags == ACPI_PARSEOP_GENERIC) {
+
+		/* The generic op (default) is by far the most common (16 to 1) */
+
+		op = acpi_os_acquire_object(acpi_gbl_ps_node_cache);
+	} else {
+		/* Extended parseop */
+
+		op = acpi_os_acquire_object(acpi_gbl_ps_node_ext_cache);
+	}
+
+	/* Initialize the Op */
+
+	if (op) {
+		acpi_ps_init_op(op, opcode);
+		op->common.flags = flags;
+	}
+
+	return (op);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_free_op
+ *
+ * PARAMETERS:  Op              - Op to be freed
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Free an Op object.  Either put it on the GENERIC_OP cache list
+ *              or actually free it.
+ *
+ ******************************************************************************/
+
+void acpi_ps_free_op(union acpi_parse_object *op)
+{
+	ACPI_FUNCTION_NAME(ps_free_op);
+
+	if (op->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Free retval op: %p\n",
+				  op));
+	}
+
+	if (op->common.flags & ACPI_PARSEOP_GENERIC) {
+		(void)acpi_os_release_object(acpi_gbl_ps_node_cache, op);
+	} else {
+		(void)acpi_os_release_object(acpi_gbl_ps_node_ext_cache, op);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    Utility functions
+ *
+ * DESCRIPTION: Low level character and object functions
+ *
+ ******************************************************************************/
+
+/*
+ * Is "c" a namestring lead character?
+ */
+u8 acpi_ps_is_leading_char(u32 c)
+{
+	return ((u8) (c == '_' || (c >= 'A' && c <= 'Z')));
+}
+
+/*
+ * Is "c" a namestring prefix character?
+ */
+u8 acpi_ps_is_prefix_char(u32 c)
+{
+	return ((u8) (c == '\\' || c == '^'));
+}
+
+/*
+ * Get op's name (4-byte name segment) or 0 if unnamed
+ */
+#ifdef ACPI_FUTURE_USAGE
+u32 acpi_ps_get_name(union acpi_parse_object * op)
+{
+
+	/* The "generic" object has no name associated with it */
+
+	if (op->common.flags & ACPI_PARSEOP_GENERIC) {
+		return (0);
+	}
+
+	/* Only the "Extended" parse objects have a name */
+
+	return (op->named.name);
+}
+#endif				/*  ACPI_FUTURE_USAGE  */
+
+/*
+ * Set op's name
+ */
+void acpi_ps_set_name(union acpi_parse_object *op, u32 name)
+{
+
+	/* The "generic" object has no name associated with it */
+
+	if (op->common.flags & ACPI_PARSEOP_GENERIC) {
+		return;
+	}
+
+	op->named.name = name;
+}
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
new file mode 100644
index 0000000..78b8b79
--- /dev/null
+++ b/drivers/acpi/acpica/pswalk.c
@@ -0,0 +1,110 @@
+/******************************************************************************
+ *
+ * Module Name: pswalk - Parser routines to walk parsed op tree(s)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+
+#define _COMPONENT          ACPI_PARSER
+ACPI_MODULE_NAME("pswalk")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_delete_parse_tree
+ *
+ * PARAMETERS:  subtree_root        - Root of tree (or subtree) to delete
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Delete a portion of or an entire parse tree.
+ *
+ ******************************************************************************/
+void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root)
+{
+	union acpi_parse_object *op = subtree_root;
+	union acpi_parse_object *next = NULL;
+	union acpi_parse_object *parent = NULL;
+
+	ACPI_FUNCTION_TRACE_PTR(ps_delete_parse_tree, subtree_root);
+
+	/* Visit all nodes in the subtree */
+
+	while (op) {
+
+		/* Check if we are not ascending */
+
+		if (op != parent) {
+
+			/* Look for an argument or child of the current op */
+
+			next = acpi_ps_get_arg(op, 0);
+			if (next) {
+
+				/* Still going downward in tree (Op is not completed yet) */
+
+				op = next;
+				continue;
+			}
+		}
+
+		/* No more children, this Op is complete. */
+
+		next = op->common.next;
+		parent = op->common.parent;
+
+		acpi_ps_free_op(op);
+
+		/* If we are back to the starting point, the walk is complete. */
+
+		if (op == subtree_root) {
+			return_VOID;
+		}
+		if (next) {
+			op = next;
+		} else {
+			op = parent;
+		}
+	}
+
+	return_VOID;
+}
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
new file mode 100644
index 0000000..ff06032
--- /dev/null
+++ b/drivers/acpi/acpica/psxface.c
@@ -0,0 +1,385 @@
+/******************************************************************************
+ *
+ * Module Name: psxface - Parser external interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "amlcode.h"
+
+#define _COMPONENT          ACPI_PARSER
+ACPI_MODULE_NAME("psxface")
+
+/* Local Prototypes */
+static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
+
+static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
+
+static void
+acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_debug_trace
+ *
+ * PARAMETERS:  method_name     - Valid ACPI name string
+ *              debug_level     - Optional level mask. 0 to use default
+ *              debug_layer     - Optional layer mask. 0 to use default
+ *              Flags           - bit 1: one shot(1) or persistent(0)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: External interface to enable debug tracing during control
+ *              method execution
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
+{
+	acpi_status status;
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* TBDs: Validate name, allow full path or just nameseg */
+
+	acpi_gbl_trace_method_name = *ACPI_CAST_PTR(u32, name);
+	acpi_gbl_trace_flags = flags;
+
+	if (debug_level) {
+		acpi_gbl_trace_dbg_level = debug_level;
+	}
+	if (debug_layer) {
+		acpi_gbl_trace_dbg_layer = debug_layer;
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_start_trace
+ *
+ * PARAMETERS:  Info        - Method info struct
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Start control method execution trace
+ *
+ ******************************************************************************/
+
+static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_ENTRY();
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return;
+	}
+
+	if ((!acpi_gbl_trace_method_name) ||
+	    (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
+		goto exit;
+	}
+
+	acpi_gbl_original_dbg_level = acpi_dbg_level;
+	acpi_gbl_original_dbg_layer = acpi_dbg_layer;
+
+	acpi_dbg_level = 0x00FFFFFF;
+	acpi_dbg_layer = ACPI_UINT32_MAX;
+
+	if (acpi_gbl_trace_dbg_level) {
+		acpi_dbg_level = acpi_gbl_trace_dbg_level;
+	}
+	if (acpi_gbl_trace_dbg_layer) {
+		acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
+	}
+
+      exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_stop_trace
+ *
+ * PARAMETERS:  Info        - Method info struct
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Stop control method execution trace
+ *
+ ******************************************************************************/
+
+static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_ENTRY();
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return;
+	}
+
+	if ((!acpi_gbl_trace_method_name) ||
+	    (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
+		goto exit;
+	}
+
+	/* Disable further tracing if type is one-shot */
+
+	if (acpi_gbl_trace_flags & 1) {
+		acpi_gbl_trace_method_name = 0;
+		acpi_gbl_trace_dbg_level = 0;
+		acpi_gbl_trace_dbg_layer = 0;
+	}
+
+	acpi_dbg_level = acpi_gbl_original_dbg_level;
+	acpi_dbg_layer = acpi_gbl_original_dbg_layer;
+
+      exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_execute_method
+ *
+ * PARAMETERS:  Info            - Method info block, contains:
+ *                  Node            - Method Node to execute
+ *                  obj_desc        - Method object
+ *                  Parameters      - List of parameters to pass to the method,
+ *                                    terminated by NULL. Params itself may be
+ *                                    NULL if no parameters are being passed.
+ *                  return_object   - Where to put method's return value (if
+ *                                    any). If NULL, no value is returned.
+ *                  parameter_type  - Type of Parameter list
+ *                  return_object   - Where to put method's return value (if
+ *                                    any). If NULL, no value is returned.
+ *                  pass_number     - Parse or execute pass
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Execute a control method
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
+{
+	acpi_status status;
+	union acpi_parse_object *op;
+	struct acpi_walk_state *walk_state;
+
+	ACPI_FUNCTION_TRACE(ps_execute_method);
+
+	/* Validate the Info and method Node */
+
+	if (!info || !info->resolved_node) {
+		return_ACPI_STATUS(AE_NULL_ENTRY);
+	}
+
+	/* Init for new method, wait on concurrency semaphore */
+
+	status =
+	    acpi_ds_begin_method_execution(info->resolved_node, info->obj_desc,
+					   NULL);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * The caller "owns" the parameters, so give each one an extra reference
+	 */
+	acpi_ps_update_parameter_list(info, REF_INCREMENT);
+
+	/* Begin tracing if requested */
+
+	acpi_ps_start_trace(info);
+
+	/*
+	 * Execute the method. Performs parse simultaneously
+	 */
+	ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+			  "**** Begin Method Parse/Execute [%4.4s] **** Node=%p Obj=%p\n",
+			  info->resolved_node->name.ascii, info->resolved_node,
+			  info->obj_desc));
+
+	/* Create and init a Root Node */
+
+	op = acpi_ps_create_scope_op();
+	if (!op) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	/* Create and initialize a new walk state */
+
+	info->pass_number = ACPI_IMODE_EXECUTE;
+	walk_state =
+	    acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL,
+				      NULL, NULL);
+	if (!walk_state) {
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
+				       info->obj_desc->method.aml_start,
+				       info->obj_desc->method.aml_length, info,
+				       info->pass_number);
+	if (ACPI_FAILURE(status)) {
+		acpi_ds_delete_walk_state(walk_state);
+		goto cleanup;
+	}
+
+	/* Invoke an internal method if necessary */
+
+	if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
+		status = info->obj_desc->method.implementation(walk_state);
+		info->return_object = walk_state->return_desc;
+
+		/* Cleanup states */
+
+		acpi_ds_scope_stack_clear(walk_state);
+		acpi_ps_cleanup_scope(&walk_state->parser_state);
+		acpi_ds_terminate_control_method(walk_state->method_desc,
+						 walk_state);
+		acpi_ds_delete_walk_state(walk_state);
+		goto cleanup;
+	}
+
+	/*
+	 * Start method evaluation with an implicit return of zero.
+	 * This is done for Windows compatibility.
+	 */
+	if (acpi_gbl_enable_interpreter_slack) {
+		walk_state->implicit_return_obj =
+		    acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+		if (!walk_state->implicit_return_obj) {
+			status = AE_NO_MEMORY;
+			acpi_ds_delete_walk_state(walk_state);
+			goto cleanup;
+		}
+
+		walk_state->implicit_return_obj->integer.value = 0;
+	}
+
+	/* Parse the AML */
+
+	status = acpi_ps_parse_aml(walk_state);
+
+	/* walk_state was deleted by parse_aml */
+
+      cleanup:
+	acpi_ps_delete_parse_tree(op);
+
+	/* End optional tracing */
+
+	acpi_ps_stop_trace(info);
+
+	/* Take away the extra reference that we gave the parameters above */
+
+	acpi_ps_update_parameter_list(info, REF_DECREMENT);
+
+	/* Exit now if error above */
+
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * If the method has returned an object, signal this to the caller with
+	 * a control exception code
+	 */
+	if (info->return_object) {
+		ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Method returned ObjDesc=%p\n",
+				  info->return_object));
+		ACPI_DUMP_STACK_ENTRY(info->return_object);
+
+		status = AE_CTRL_RETURN_VALUE;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_update_parameter_list
+ *
+ * PARAMETERS:  Info            - See struct acpi_evaluate_info
+ *                                (Used: parameter_type and Parameters)
+ *              Action          - Add or Remove reference
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Update reference count on all method parameter objects
+ *
+ ******************************************************************************/
+
+static void
+acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
+{
+	u32 i;
+
+	if (info->parameters) {
+
+		/* Update reference count for each parameter */
+
+		for (i = 0; info->parameters[i]; i++) {
+
+			/* Ignore errors, just do them all */
+
+			(void)acpi_ut_update_object_reference(info->
+							      parameters[i],
+							      action);
+		}
+	}
+}
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
new file mode 100644
index 0000000..1e437bf
--- /dev/null
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -0,0 +1,381 @@
+/*******************************************************************************
+ *
+ * Module Name: rsaddr - Address resource descriptors (16/32/64)
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rsaddr")
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_address16 - All WORD (16-bit) address resources
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_convert_address16[5] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_ADDRESS16,
+	 ACPI_RS_SIZE(struct acpi_resource_address16),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_address16)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_ADDRESS16,
+	 sizeof(struct aml_resource_address16),
+	 0},
+
+	/* Resource Type, General Flags, and Type-Specific Flags */
+
+	{ACPI_RSC_ADDRESS, 0, 0, 0},
+
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * Address Granularity
+	 * Address Range Minimum
+	 * Address Range Maximum
+	 * Address Translation Offset
+	 * Address Length
+	 */
+	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.granularity),
+	 AML_OFFSET(address16.granularity),
+	 5},
+
+	/* Optional resource_source (Index and String) */
+
+	{ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.address16.resource_source),
+	 0,
+	 sizeof(struct aml_resource_address16)}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_address32 - All DWORD (32-bit) address resources
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_address32[5] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_ADDRESS32,
+	 ACPI_RS_SIZE(struct acpi_resource_address32),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_address32)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_ADDRESS32,
+	 sizeof(struct aml_resource_address32),
+	 0},
+
+	/* Resource Type, General Flags, and Type-Specific Flags */
+
+	{ACPI_RSC_ADDRESS, 0, 0, 0},
+
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * Address Granularity
+	 * Address Range Minimum
+	 * Address Range Maximum
+	 * Address Translation Offset
+	 * Address Length
+	 */
+	{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.granularity),
+	 AML_OFFSET(address32.granularity),
+	 5},
+
+	/* Optional resource_source (Index and String) */
+
+	{ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.address32.resource_source),
+	 0,
+	 sizeof(struct aml_resource_address32)}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_address64 - All QWORD (64-bit) address resources
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_address64[5] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_ADDRESS64,
+	 ACPI_RS_SIZE(struct acpi_resource_address64),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_address64)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_ADDRESS64,
+	 sizeof(struct aml_resource_address64),
+	 0},
+
+	/* Resource Type, General Flags, and Type-Specific Flags */
+
+	{ACPI_RSC_ADDRESS, 0, 0, 0},
+
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * Address Granularity
+	 * Address Range Minimum
+	 * Address Range Maximum
+	 * Address Translation Offset
+	 * Address Length
+	 */
+	{ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.granularity),
+	 AML_OFFSET(address64.granularity),
+	 5},
+
+	/* Optional resource_source (Index and String) */
+
+	{ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.address64.resource_source),
+	 0,
+	 sizeof(struct aml_resource_address64)}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_ext_address64 - All Extended (64-bit) address resources
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_ext_address64[5] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64,
+	 ACPI_RS_SIZE(struct acpi_resource_extended_address64),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_ext_address64)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64,
+	 sizeof(struct aml_resource_extended_address64),
+	 0},
+
+	/* Resource Type, General Flags, and Type-Specific Flags */
+
+	{ACPI_RSC_ADDRESS, 0, 0, 0},
+
+	/* Revision ID */
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.ext_address64.revision_iD),
+	 AML_OFFSET(ext_address64.revision_iD),
+	 1},
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * Address Granularity
+	 * Address Range Minimum
+	 * Address Range Maximum
+	 * Address Translation Offset
+	 * Address Length
+	 * Type-Specific Attribute
+	 */
+	{ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.ext_address64.granularity),
+	 AML_OFFSET(ext_address64.granularity),
+	 6}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_general_flags - Flags common to all address descriptors
+ *
+ ******************************************************************************/
+
+static struct acpi_rsconvert_info acpi_rs_convert_general_flags[6] = {
+	{ACPI_RSC_FLAGINIT, 0, AML_OFFSET(address.flags),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_general_flags)},
+
+	/* Resource Type (Memory, Io, bus_number, etc.) */
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.address.resource_type),
+	 AML_OFFSET(address.resource_type),
+	 1},
+
+	/* General Flags - Consume, Decode, min_fixed, max_fixed */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.producer_consumer),
+	 AML_OFFSET(address.flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.decode),
+	 AML_OFFSET(address.flags),
+	 1},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.min_address_fixed),
+	 AML_OFFSET(address.flags),
+	 2},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.max_address_fixed),
+	 AML_OFFSET(address.flags),
+	 3}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_mem_flags - Flags common to Memory address descriptors
+ *
+ ******************************************************************************/
+
+static struct acpi_rsconvert_info acpi_rs_convert_mem_flags[5] = {
+	{ACPI_RSC_FLAGINIT, 0, AML_OFFSET(address.specific_flags),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_mem_flags)},
+
+	/* Memory-specific flags */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.write_protect),
+	 AML_OFFSET(address.specific_flags),
+	 0},
+
+	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.caching),
+	 AML_OFFSET(address.specific_flags),
+	 1},
+
+	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.range_type),
+	 AML_OFFSET(address.specific_flags),
+	 3},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.translation),
+	 AML_OFFSET(address.specific_flags),
+	 5}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_io_flags - Flags common to I/O address descriptors
+ *
+ ******************************************************************************/
+
+static struct acpi_rsconvert_info acpi_rs_convert_io_flags[4] = {
+	{ACPI_RSC_FLAGINIT, 0, AML_OFFSET(address.specific_flags),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_io_flags)},
+
+	/* I/O-specific flags */
+
+	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.address.info.io.range_type),
+	 AML_OFFSET(address.specific_flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.info.io.translation),
+	 AML_OFFSET(address.specific_flags),
+	 4},
+
+	{ACPI_RSC_1BITFLAG,
+	 ACPI_RS_OFFSET(data.address.info.io.translation_type),
+	 AML_OFFSET(address.specific_flags),
+	 5}
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_get_address_common
+ *
+ * PARAMETERS:  Resource            - Pointer to the internal resource struct
+ *              Aml                 - Pointer to the AML resource descriptor
+ *
+ * RETURN:      TRUE if the resource_type field is OK, FALSE otherwise
+ *
+ * DESCRIPTION: Convert common flag fields from a raw AML resource descriptor
+ *              to an internal resource descriptor
+ *
+ ******************************************************************************/
+
+u8
+acpi_rs_get_address_common(struct acpi_resource *resource,
+			   union aml_resource *aml)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	/* Validate the Resource Type */
+
+	if ((aml->address.resource_type > 2)
+	    && (aml->address.resource_type < 0xC0)) {
+		return (FALSE);
+	}
+
+	/* Get the Resource Type and General Flags */
+
+	(void)acpi_rs_convert_aml_to_resource(resource, aml,
+					      acpi_rs_convert_general_flags);
+
+	/* Get the Type-Specific Flags (Memory and I/O descriptors only) */
+
+	if (resource->data.address.resource_type == ACPI_MEMORY_RANGE) {
+		(void)acpi_rs_convert_aml_to_resource(resource, aml,
+						      acpi_rs_convert_mem_flags);
+	} else if (resource->data.address.resource_type == ACPI_IO_RANGE) {
+		(void)acpi_rs_convert_aml_to_resource(resource, aml,
+						      acpi_rs_convert_io_flags);
+	} else {
+		/* Generic resource type, just grab the type_specific byte */
+
+		resource->data.address.info.type_specific =
+		    aml->address.specific_flags;
+	}
+
+	return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_set_address_common
+ *
+ * PARAMETERS:  Aml                 - Pointer to the AML resource descriptor
+ *              Resource            - Pointer to the internal resource struct
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Convert common flag fields from a resource descriptor to an
+ *              AML descriptor
+ *
+ ******************************************************************************/
+
+void
+acpi_rs_set_address_common(union aml_resource *aml,
+			   struct acpi_resource *resource)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	/* Set the Resource Type and General Flags */
+
+	(void)acpi_rs_convert_resource_to_aml(resource, aml,
+					      acpi_rs_convert_general_flags);
+
+	/* Set the Type-Specific Flags (Memory and I/O descriptors only) */
+
+	if (resource->data.address.resource_type == ACPI_MEMORY_RANGE) {
+		(void)acpi_rs_convert_resource_to_aml(resource, aml,
+						      acpi_rs_convert_mem_flags);
+	} else if (resource->data.address.resource_type == ACPI_IO_RANGE) {
+		(void)acpi_rs_convert_resource_to_aml(resource, aml,
+						      acpi_rs_convert_io_flags);
+	} else {
+		/* Generic resource type, just copy the type_specific byte */
+
+		aml->address.specific_flags =
+		    resource->data.address.info.type_specific;
+	}
+}
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
new file mode 100644
index 0000000..52865ee
--- /dev/null
+++ b/drivers/acpi/acpica/rscalc.c
@@ -0,0 +1,618 @@
+/*******************************************************************************
+ *
+ * Module Name: rscalc - Calculate stream and list lengths
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rscalc")
+
+/* Local prototypes */
+static u8 acpi_rs_count_set_bits(u16 bit_field);
+
+static acpi_rs_length
+acpi_rs_struct_option_length(struct acpi_resource_source *resource_source);
+
+static u32
+acpi_rs_stream_option_length(u32 resource_length, u32 minimum_total_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_count_set_bits
+ *
+ * PARAMETERS:  bit_field       - Field in which to count bits
+ *
+ * RETURN:      Number of bits set within the field
+ *
+ * DESCRIPTION: Count the number of bits set in a resource field. Used for
+ *              (Short descriptor) interrupt and DMA lists.
+ *
+ ******************************************************************************/
+
+static u8 acpi_rs_count_set_bits(u16 bit_field)
+{
+	u8 bits_set;
+
+	ACPI_FUNCTION_ENTRY();
+
+	for (bits_set = 0; bit_field; bits_set++) {
+
+		/* Zero the least significant bit that is set */
+
+		bit_field &= (u16) (bit_field - 1);
+	}
+
+	return bits_set;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_struct_option_length
+ *
+ * PARAMETERS:  resource_source     - Pointer to optional descriptor field
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Common code to handle optional resource_source_index and
+ *              resource_source fields in some Large descriptors. Used during
+ *              list-to-stream conversion
+ *
+ ******************************************************************************/
+
+static acpi_rs_length
+acpi_rs_struct_option_length(struct acpi_resource_source *resource_source)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * If the resource_source string is valid, return the size of the string
+	 * (string_length includes the NULL terminator) plus the size of the
+	 * resource_source_index (1).
+	 */
+	if (resource_source->string_ptr) {
+		return ((acpi_rs_length) (resource_source->string_length + 1));
+	}
+
+	return (0);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_stream_option_length
+ *
+ * PARAMETERS:  resource_length     - Length from the resource header
+ *              minimum_total_length - Minimum length of this resource, before
+ *                                    any optional fields. Includes header size
+ *
+ * RETURN:      Length of optional string (0 if no string present)
+ *
+ * DESCRIPTION: Common code to handle optional resource_source_index and
+ *              resource_source fields in some Large descriptors. Used during
+ *              stream-to-list conversion
+ *
+ ******************************************************************************/
+
+static u32
+acpi_rs_stream_option_length(u32 resource_length,
+			     u32 minimum_aml_resource_length)
+{
+	u32 string_length = 0;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * The resource_source_index and resource_source are optional elements of some
+	 * Large-type resource descriptors.
+	 */
+
+	/*
+	 * If the length of the actual resource descriptor is greater than the ACPI
+	 * spec-defined minimum length, it means that a resource_source_index exists
+	 * and is followed by a (required) null terminated string. The string length
+	 * (including the null terminator) is the resource length minus the minimum
+	 * length, minus one byte for the resource_source_index itself.
+	 */
+	if (resource_length > minimum_aml_resource_length) {
+
+		/* Compute the length of the optional string */
+
+		string_length =
+		    resource_length - minimum_aml_resource_length - 1;
+	}
+
+	/*
+	 * Round the length up to a multiple of the native word in order to
+	 * guarantee that the entire resource descriptor is native word aligned
+	 */
+	return ((u32) ACPI_ROUND_UP_TO_NATIVE_WORD(string_length));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_get_aml_length
+ *
+ * PARAMETERS:  Resource            - Pointer to the resource linked list
+ *              size_needed         - Where the required size is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Takes a linked list of internal resource descriptors and
+ *              calculates the size buffer needed to hold the corresponding
+ *              external resource byte stream.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
+{
+	acpi_size aml_size_needed = 0;
+	acpi_rs_length total_size;
+
+	ACPI_FUNCTION_TRACE(rs_get_aml_length);
+
+	/* Traverse entire list of internal resource descriptors */
+
+	while (resource) {
+
+		/* Validate the descriptor type */
+
+		if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
+			return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
+		}
+
+		/* Get the base size of the (external stream) resource descriptor */
+
+		total_size = acpi_gbl_aml_resource_sizes[resource->type];
+
+		/*
+		 * Augment the base size for descriptors with optional and/or
+		 * variable-length fields
+		 */
+		switch (resource->type) {
+		case ACPI_RESOURCE_TYPE_IRQ:
+
+			/* Length can be 3 or 2 */
+
+			if (resource->data.irq.descriptor_length == 2) {
+				total_size--;
+			}
+			break;
+
+		case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+
+			/* Length can be 1 or 0 */
+
+			if (resource->data.irq.descriptor_length == 0) {
+				total_size--;
+			}
+			break;
+
+		case ACPI_RESOURCE_TYPE_VENDOR:
+			/*
+			 * Vendor Defined Resource:
+			 * For a Vendor Specific resource, if the Length is between 1 and 7
+			 * it will be created as a Small Resource data type, otherwise it
+			 * is a Large Resource data type.
+			 */
+			if (resource->data.vendor.byte_length > 7) {
+
+				/* Base size of a Large resource descriptor */
+
+				total_size =
+				    sizeof(struct aml_resource_large_header);
+			}
+
+			/* Add the size of the vendor-specific data */
+
+			total_size = (acpi_rs_length)
+			    (total_size + resource->data.vendor.byte_length);
+			break;
+
+		case ACPI_RESOURCE_TYPE_END_TAG:
+			/*
+			 * End Tag:
+			 * We are done -- return the accumulated total size.
+			 */
+			*size_needed = aml_size_needed + total_size;
+
+			/* Normal exit */
+
+			return_ACPI_STATUS(AE_OK);
+
+		case ACPI_RESOURCE_TYPE_ADDRESS16:
+			/*
+			 * 16-Bit Address Resource:
+			 * Add the size of the optional resource_source info
+			 */
+			total_size = (acpi_rs_length)
+			    (total_size +
+			     acpi_rs_struct_option_length(&resource->data.
+							  address16.
+							  resource_source));
+			break;
+
+		case ACPI_RESOURCE_TYPE_ADDRESS32:
+			/*
+			 * 32-Bit Address Resource:
+			 * Add the size of the optional resource_source info
+			 */
+			total_size = (acpi_rs_length)
+			    (total_size +
+			     acpi_rs_struct_option_length(&resource->data.
+							  address32.
+							  resource_source));
+			break;
+
+		case ACPI_RESOURCE_TYPE_ADDRESS64:
+			/*
+			 * 64-Bit Address Resource:
+			 * Add the size of the optional resource_source info
+			 */
+			total_size = (acpi_rs_length)
+			    (total_size +
+			     acpi_rs_struct_option_length(&resource->data.
+							  address64.
+							  resource_source));
+			break;
+
+		case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+			/*
+			 * Extended IRQ Resource:
+			 * Add the size of each additional optional interrupt beyond the
+			 * required 1 (4 bytes for each u32 interrupt number)
+			 */
+			total_size = (acpi_rs_length)
+			    (total_size +
+			     ((resource->data.extended_irq.interrupt_count -
+			       1) * 4) +
+			     /* Add the size of the optional resource_source info */
+			     acpi_rs_struct_option_length(&resource->data.
+							  extended_irq.
+							  resource_source));
+			break;
+
+		default:
+			break;
+		}
+
+		/* Update the total */
+
+		aml_size_needed += total_size;
+
+		/* Point to the next object */
+
+		resource =
+		    ACPI_ADD_PTR(struct acpi_resource, resource,
+				 resource->length);
+	}
+
+	/* Did not find an end_tag resource descriptor */
+
+	return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_get_list_length
+ *
+ * PARAMETERS:  aml_buffer          - Pointer to the resource byte stream
+ *              aml_buffer_length   - Size of aml_buffer
+ *              size_needed         - Where the size needed is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Takes an external resource byte stream and calculates the size
+ *              buffer needed to hold the corresponding internal resource
+ *              descriptor linked list.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_list_length(u8 * aml_buffer,
+			u32 aml_buffer_length, acpi_size * size_needed)
+{
+	acpi_status status;
+	u8 *end_aml;
+	u8 *buffer;
+	u32 buffer_size;
+	u16 temp16;
+	u16 resource_length;
+	u32 extra_struct_bytes;
+	u8 resource_index;
+	u8 minimum_aml_resource_length;
+
+	ACPI_FUNCTION_TRACE(rs_get_list_length);
+
+	*size_needed = 0;
+	end_aml = aml_buffer + aml_buffer_length;
+
+	/* Walk the list of AML resource descriptors */
+
+	while (aml_buffer < end_aml) {
+
+		/* Validate the Resource Type and Resource Length */
+
+		status = acpi_ut_validate_resource(aml_buffer, &resource_index);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/* Get the resource length and base (minimum) AML size */
+
+		resource_length = acpi_ut_get_resource_length(aml_buffer);
+		minimum_aml_resource_length =
+		    acpi_gbl_resource_aml_sizes[resource_index];
+
+		/*
+		 * Augment the size for descriptors with optional
+		 * and/or variable length fields
+		 */
+		extra_struct_bytes = 0;
+		buffer =
+		    aml_buffer + acpi_ut_get_resource_header_length(aml_buffer);
+
+		switch (acpi_ut_get_resource_type(aml_buffer)) {
+		case ACPI_RESOURCE_NAME_IRQ:
+			/*
+			 * IRQ Resource:
+			 * Get the number of bits set in the 16-bit IRQ mask
+			 */
+			ACPI_MOVE_16_TO_16(&temp16, buffer);
+			extra_struct_bytes = acpi_rs_count_set_bits(temp16);
+			break;
+
+		case ACPI_RESOURCE_NAME_DMA:
+			/*
+			 * DMA Resource:
+			 * Get the number of bits set in the 8-bit DMA mask
+			 */
+			extra_struct_bytes = acpi_rs_count_set_bits(*buffer);
+			break;
+
+		case ACPI_RESOURCE_NAME_VENDOR_SMALL:
+		case ACPI_RESOURCE_NAME_VENDOR_LARGE:
+			/*
+			 * Vendor Resource:
+			 * Get the number of vendor data bytes
+			 */
+			extra_struct_bytes = resource_length;
+			break;
+
+		case ACPI_RESOURCE_NAME_END_TAG:
+			/*
+			 * End Tag:
+			 * This is the normal exit, add size of end_tag
+			 */
+			*size_needed += ACPI_RS_SIZE_MIN;
+			return_ACPI_STATUS(AE_OK);
+
+		case ACPI_RESOURCE_NAME_ADDRESS32:
+		case ACPI_RESOURCE_NAME_ADDRESS16:
+		case ACPI_RESOURCE_NAME_ADDRESS64:
+			/*
+			 * Address Resource:
+			 * Add the size of the optional resource_source
+			 */
+			extra_struct_bytes =
+			    acpi_rs_stream_option_length(resource_length,
+							 minimum_aml_resource_length);
+			break;
+
+		case ACPI_RESOURCE_NAME_EXTENDED_IRQ:
+			/*
+			 * Extended IRQ Resource:
+			 * Using the interrupt_table_length, add 4 bytes for each additional
+			 * interrupt. Note: at least one interrupt is required and is
+			 * included in the minimum descriptor size (reason for the -1)
+			 */
+			extra_struct_bytes = (buffer[1] - 1) * sizeof(u32);
+
+			/* Add the size of the optional resource_source */
+
+			extra_struct_bytes +=
+			    acpi_rs_stream_option_length(resource_length -
+							 extra_struct_bytes,
+							 minimum_aml_resource_length);
+			break;
+
+		default:
+			break;
+		}
+
+		/*
+		 * Update the required buffer size for the internal descriptor structs
+		 *
+		 * Important: Round the size up for the appropriate alignment. This
+		 * is a requirement on IA64.
+		 */
+		buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
+		    extra_struct_bytes;
+		buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
+
+		*size_needed += buffer_size;
+
+		ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+				  "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
+				  acpi_ut_get_resource_type(aml_buffer),
+				  acpi_ut_get_descriptor_length(aml_buffer),
+				  buffer_size));
+
+		/*
+		 * Point to the next resource within the AML stream using the length
+		 * contained in the resource descriptor header
+		 */
+		aml_buffer += acpi_ut_get_descriptor_length(aml_buffer);
+	}
+
+	/* Did not find an end_tag resource descriptor */
+
+	return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_get_pci_routing_table_length
+ *
+ * PARAMETERS:  package_object          - Pointer to the package object
+ *              buffer_size_needed      - u32 pointer of the size buffer
+ *                                        needed to properly return the
+ *                                        parsed data
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Given a package representing a PCI routing table, this
+ *              calculates the size of the corresponding linked list of
+ *              descriptions.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
+				     acpi_size * buffer_size_needed)
+{
+	u32 number_of_elements;
+	acpi_size temp_size_needed = 0;
+	union acpi_operand_object **top_object_list;
+	u32 index;
+	union acpi_operand_object *package_element;
+	union acpi_operand_object **sub_object_list;
+	u8 name_found;
+	u32 table_index;
+
+	ACPI_FUNCTION_TRACE(rs_get_pci_routing_table_length);
+
+	number_of_elements = package_object->package.count;
+
+	/*
+	 * Calculate the size of the return buffer.
+	 * The base size is the number of elements * the sizes of the
+	 * structures.  Additional space for the strings is added below.
+	 * The minus one is to subtract the size of the u8 Source[1]
+	 * member because it is added below.
+	 *
+	 * But each PRT_ENTRY structure has a pointer to a string and
+	 * the size of that string must be found.
+	 */
+	top_object_list = package_object->package.elements;
+
+	for (index = 0; index < number_of_elements; index++) {
+
+		/* Dereference the sub-package */
+
+		package_element = *top_object_list;
+
+		/*
+		 * The sub_object_list will now point to an array of the
+		 * four IRQ elements: Address, Pin, Source and source_index
+		 */
+		sub_object_list = package_element->package.elements;
+
+		/* Scan the irq_table_elements for the Source Name String */
+
+		name_found = FALSE;
+
+		for (table_index = 0; table_index < 4 && !name_found;
+		     table_index++) {
+			if (*sub_object_list &&	/* Null object allowed */
+			    ((ACPI_TYPE_STRING ==
+			      ACPI_GET_OBJECT_TYPE(*sub_object_list)) ||
+			     ((ACPI_TYPE_LOCAL_REFERENCE ==
+			       ACPI_GET_OBJECT_TYPE(*sub_object_list)) &&
+			      ((*sub_object_list)->reference.class ==
+			       ACPI_REFCLASS_NAME)))) {
+				name_found = TRUE;
+			} else {
+				/* Look at the next element */
+
+				sub_object_list++;
+			}
+		}
+
+		temp_size_needed += (sizeof(struct acpi_pci_routing_table) - 4);
+
+		/* Was a String type found? */
+
+		if (name_found) {
+			if (ACPI_GET_OBJECT_TYPE(*sub_object_list) ==
+			    ACPI_TYPE_STRING) {
+				/*
+				 * The length String.Length field does not include the
+				 * terminating NULL, add 1
+				 */
+				temp_size_needed += ((acpi_size)
+						     (*sub_object_list)->string.
+						     length + 1);
+			} else {
+				temp_size_needed +=
+				    acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
+				if (!temp_size_needed) {
+					return_ACPI_STATUS(AE_BAD_PARAMETER);
+				}
+			}
+		} else {
+			/*
+			 * If no name was found, then this is a NULL, which is
+			 * translated as a u32 zero.
+			 */
+			temp_size_needed += sizeof(u32);
+		}
+
+		/* Round up the size since each element must be aligned */
+
+		temp_size_needed = ACPI_ROUND_UP_TO_64BIT(temp_size_needed);
+
+		/* Point to the next union acpi_operand_object */
+
+		top_object_list++;
+	}
+
+	/*
+	 * Add an extra element to the end of the list, essentially a
+	 * NULL terminator
+	 */
+	*buffer_size_needed =
+	    temp_size_needed + sizeof(struct acpi_pci_routing_table);
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
new file mode 100644
index 0000000..61566b1
--- /dev/null
+++ b/drivers/acpi/acpica/rscreate.c
@@ -0,0 +1,468 @@
+/*******************************************************************************
+ *
+ * Module Name: rscreate - Create resource lists/tables
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rscreate")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_create_resource_list
+ *
+ * PARAMETERS:  aml_buffer          - Pointer to the resource byte stream
+ *              output_buffer       - Pointer to the user's buffer
+ *
+ * RETURN:      Status: AE_OK if okay, else a valid acpi_status code
+ *              If output_buffer is not large enough, output_buffer_length
+ *              indicates how large output_buffer should be, else it
+ *              indicates how may u8 elements of output_buffer are valid.
+ *
+ * DESCRIPTION: Takes the byte stream returned from a _CRS, _PRS control method
+ *              execution and parses the stream to create a linked list
+ *              of device resources.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
+			     struct acpi_buffer *output_buffer)
+{
+
+	acpi_status status;
+	u8 *aml_start;
+	acpi_size list_size_needed = 0;
+	u32 aml_buffer_length;
+	void *resource;
+
+	ACPI_FUNCTION_TRACE(rs_create_resource_list);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlBuffer = %p\n", aml_buffer));
+
+	/* Params already validated, so we don't re-validate here */
+
+	aml_buffer_length = aml_buffer->buffer.length;
+	aml_start = aml_buffer->buffer.pointer;
+
+	/*
+	 * Pass the aml_buffer into a module that can calculate
+	 * the buffer size needed for the linked list
+	 */
+	status = acpi_rs_get_list_length(aml_start, aml_buffer_length,
+					 &list_size_needed);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Status=%X ListSizeNeeded=%X\n",
+			  status, (u32) list_size_needed));
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Validate/Allocate/Clear caller buffer */
+
+	status = acpi_ut_initialize_buffer(output_buffer, list_size_needed);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Do the conversion */
+
+	resource = output_buffer->pointer;
+	status = acpi_ut_walk_aml_resources(aml_start, aml_buffer_length,
+					    acpi_rs_convert_aml_to_resources,
+					    &resource);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
+			  output_buffer->pointer, (u32) output_buffer->length));
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_create_pci_routing_table
+ *
+ * PARAMETERS:  package_object          - Pointer to a union acpi_operand_object
+ *                                        package
+ *              output_buffer           - Pointer to the user's buffer
+ *
+ * RETURN:      Status  AE_OK if okay, else a valid acpi_status code.
+ *              If the output_buffer is too small, the error will be
+ *              AE_BUFFER_OVERFLOW and output_buffer->Length will point
+ *              to the size buffer needed.
+ *
+ * DESCRIPTION: Takes the union acpi_operand_object    package and creates a
+ *              linked list of PCI interrupt descriptions
+ *
+ * NOTE: It is the caller's responsibility to ensure that the start of the
+ * output buffer is aligned properly (if necessary).
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
+				 struct acpi_buffer *output_buffer)
+{
+	u8 *buffer;
+	union acpi_operand_object **top_object_list;
+	union acpi_operand_object **sub_object_list;
+	union acpi_operand_object *obj_desc;
+	acpi_size buffer_size_needed = 0;
+	u32 number_of_elements;
+	u32 index;
+	struct acpi_pci_routing_table *user_prt;
+	struct acpi_namespace_node *node;
+	acpi_status status;
+	struct acpi_buffer path_buffer;
+
+	ACPI_FUNCTION_TRACE(rs_create_pci_routing_table);
+
+	/* Params already validated, so we don't re-validate here */
+
+	/* Get the required buffer length */
+
+	status = acpi_rs_get_pci_routing_table_length(package_object,
+						      &buffer_size_needed);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "BufferSizeNeeded = %X\n",
+			  (u32) buffer_size_needed));
+
+	/* Validate/Allocate/Clear caller buffer */
+
+	status = acpi_ut_initialize_buffer(output_buffer, buffer_size_needed);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Loop through the ACPI_INTERNAL_OBJECTS - Each object should be a
+	 * package that in turn contains an acpi_integer Address, a u8 Pin,
+	 * a Name, and a u8 source_index.
+	 */
+	top_object_list = package_object->package.elements;
+	number_of_elements = package_object->package.count;
+	buffer = output_buffer->pointer;
+	user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer);
+
+	for (index = 0; index < number_of_elements; index++) {
+		int source_name_index = 2;
+		int source_index_index = 3;
+
+		/*
+		 * Point user_prt past this current structure
+		 *
+		 * NOTE: On the first iteration, user_prt->Length will
+		 * be zero because we cleared the return buffer earlier
+		 */
+		buffer += user_prt->length;
+		user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer);
+
+		/*
+		 * Fill in the Length field with the information we have at this point.
+		 * The minus four is to subtract the size of the u8 Source[4] member
+		 * because it is added below.
+		 */
+		user_prt->length = (sizeof(struct acpi_pci_routing_table) - 4);
+
+		/* Each element of the top-level package must also be a package */
+
+		if (ACPI_GET_OBJECT_TYPE(*top_object_list) != ACPI_TYPE_PACKAGE) {
+			ACPI_ERROR((AE_INFO,
+				    "(PRT[%X]) Need sub-package, found %s",
+				    index,
+				    acpi_ut_get_object_type_name
+				    (*top_object_list)));
+			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+		}
+
+		/* Each sub-package must be of length 4 */
+
+		if ((*top_object_list)->package.count != 4) {
+			ACPI_ERROR((AE_INFO,
+				    "(PRT[%X]) Need package of length 4, found length %d",
+				    index, (*top_object_list)->package.count));
+			return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT);
+		}
+
+		/*
+		 * Dereference the sub-package.
+		 * The sub_object_list will now point to an array of the four IRQ
+		 * elements: [Address, Pin, Source, source_index]
+		 */
+		sub_object_list = (*top_object_list)->package.elements;
+
+		/* 1) First subobject: Dereference the PRT.Address */
+
+		obj_desc = sub_object_list[0];
+		if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
+			ACPI_ERROR((AE_INFO,
+				    "(PRT[%X].Address) Need Integer, found %s",
+				    index,
+				    acpi_ut_get_object_type_name(obj_desc)));
+			return_ACPI_STATUS(AE_BAD_DATA);
+		}
+
+		user_prt->address = obj_desc->integer.value;
+
+		/* 2) Second subobject: Dereference the PRT.Pin */
+
+		obj_desc = sub_object_list[1];
+		if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
+			ACPI_ERROR((AE_INFO,
+				    "(PRT[%X].Pin) Need Integer, found %s",
+				    index,
+				    acpi_ut_get_object_type_name(obj_desc)));
+			return_ACPI_STATUS(AE_BAD_DATA);
+		}
+
+		/*
+		 * If BIOS erroneously reversed the _PRT source_name and source_index,
+		 * then reverse them back.
+		 */
+		if (ACPI_GET_OBJECT_TYPE(sub_object_list[3]) !=
+		    ACPI_TYPE_INTEGER) {
+			if (acpi_gbl_enable_interpreter_slack) {
+				source_name_index = 3;
+				source_index_index = 2;
+				printk(KERN_WARNING
+				       "ACPI: Handling Garbled _PRT entry\n");
+			} else {
+				ACPI_ERROR((AE_INFO,
+					    "(PRT[%X].source_index) Need Integer, found %s",
+					    index,
+					    acpi_ut_get_object_type_name
+					    (sub_object_list[3])));
+				return_ACPI_STATUS(AE_BAD_DATA);
+			}
+		}
+
+		user_prt->pin = (u32) obj_desc->integer.value;
+
+		/*
+		 * If the BIOS has erroneously reversed the _PRT source_name (index 2)
+		 * and the source_index (index 3), fix it. _PRT is important enough to
+		 * workaround this BIOS error. This also provides compatibility with
+		 * other ACPI implementations.
+		 */
+		obj_desc = sub_object_list[3];
+		if (!obj_desc
+		    || (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) {
+			sub_object_list[3] = sub_object_list[2];
+			sub_object_list[2] = obj_desc;
+
+			ACPI_WARNING((AE_INFO,
+				      "(PRT[%X].Source) SourceName and SourceIndex are reversed, fixed",
+				      index));
+		}
+
+		/*
+		 * 3) Third subobject: Dereference the PRT.source_name
+		 * The name may be unresolved (slack mode), so allow a null object
+		 */
+		obj_desc = sub_object_list[source_name_index];
+		if (obj_desc) {
+			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+			case ACPI_TYPE_LOCAL_REFERENCE:
+
+				if (obj_desc->reference.class !=
+				    ACPI_REFCLASS_NAME) {
+					ACPI_ERROR((AE_INFO,
+						    "(PRT[%X].Source) Need name, found Reference Class %X",
+						    index,
+						    obj_desc->reference.class));
+					return_ACPI_STATUS(AE_BAD_DATA);
+				}
+
+				node = obj_desc->reference.node;
+
+				/* Use *remaining* length of the buffer as max for pathname */
+
+				path_buffer.length = output_buffer->length -
+				    (u32) ((u8 *) user_prt->source -
+					   (u8 *) output_buffer->pointer);
+				path_buffer.pointer = user_prt->source;
+
+				status =
+				    acpi_ns_handle_to_pathname((acpi_handle)
+							       node,
+							       &path_buffer);
+
+				/* +1 to include null terminator */
+
+				user_prt->length +=
+				    (u32) ACPI_STRLEN(user_prt->source) + 1;
+				break;
+
+			case ACPI_TYPE_STRING:
+
+				ACPI_STRCPY(user_prt->source,
+					    obj_desc->string.pointer);
+
+				/*
+				 * Add to the Length field the length of the string
+				 * (add 1 for terminator)
+				 */
+				user_prt->length += obj_desc->string.length + 1;
+				break;
+
+			case ACPI_TYPE_INTEGER:
+				/*
+				 * If this is a number, then the Source Name is NULL, since the
+				 * entire buffer was zeroed out, we can leave this alone.
+				 *
+				 * Add to the Length field the length of the u32 NULL
+				 */
+				user_prt->length += sizeof(u32);
+				break;
+
+			default:
+
+				ACPI_ERROR((AE_INFO,
+					    "(PRT[%X].Source) Need Ref/String/Integer, found %s",
+					    index,
+					    acpi_ut_get_object_type_name
+					    (obj_desc)));
+				return_ACPI_STATUS(AE_BAD_DATA);
+			}
+		}
+
+		/* Now align the current length */
+
+		user_prt->length =
+		    (u32) ACPI_ROUND_UP_TO_64BIT(user_prt->length);
+
+		/* 4) Fourth subobject: Dereference the PRT.source_index */
+
+		obj_desc = sub_object_list[source_index_index];
+		if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
+			ACPI_ERROR((AE_INFO,
+				    "(PRT[%X].SourceIndex) Need Integer, found %s",
+				    index,
+				    acpi_ut_get_object_type_name(obj_desc)));
+			return_ACPI_STATUS(AE_BAD_DATA);
+		}
+
+		user_prt->source_index = (u32) obj_desc->integer.value;
+
+		/* Point to the next union acpi_operand_object in the top level package */
+
+		top_object_list++;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
+			  output_buffer->pointer, (u32) output_buffer->length));
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_create_aml_resources
+ *
+ * PARAMETERS:  linked_list_buffer      - Pointer to the resource linked list
+ *              output_buffer           - Pointer to the user's buffer
+ *
+ * RETURN:      Status  AE_OK if okay, else a valid acpi_status code.
+ *              If the output_buffer is too small, the error will be
+ *              AE_BUFFER_OVERFLOW and output_buffer->Length will point
+ *              to the size buffer needed.
+ *
+ * DESCRIPTION: Takes the linked list of device resources and
+ *              creates a bytestream to be used as input for the
+ *              _SRS control method.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
+			     struct acpi_buffer *output_buffer)
+{
+	acpi_status status;
+	acpi_size aml_size_needed = 0;
+
+	ACPI_FUNCTION_TRACE(rs_create_aml_resources);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "LinkedListBuffer = %p\n",
+			  linked_list_buffer));
+
+	/*
+	 * Params already validated, so we don't re-validate here
+	 *
+	 * Pass the linked_list_buffer into a module that calculates
+	 * the buffer size needed for the byte stream.
+	 */
+	status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
+			  (u32) aml_size_needed,
+			  acpi_format_exception(status)));
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Validate/Allocate/Clear caller buffer */
+
+	status = acpi_ut_initialize_buffer(output_buffer, aml_size_needed);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Do the conversion */
+
+	status =
+	    acpi_rs_convert_resources_to_aml(linked_list_buffer,
+					     aml_size_needed,
+					     output_buffer->pointer);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
+			  output_buffer->pointer, (u32) output_buffer->length));
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
new file mode 100644
index 0000000..3f0ca5a
--- /dev/null
+++ b/drivers/acpi/acpica/rsdump.c
@@ -0,0 +1,771 @@
+/*******************************************************************************
+ *
+ * Module Name: rsdump - Functions to display the resource structures.
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rsdump")
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/* Local prototypes */
+static void acpi_rs_out_string(char *title, char *value);
+
+static void acpi_rs_out_integer8(char *title, u8 value);
+
+static void acpi_rs_out_integer16(char *title, u16 value);
+
+static void acpi_rs_out_integer32(char *title, u32 value);
+
+static void acpi_rs_out_integer64(char *title, u64 value);
+
+static void acpi_rs_out_title(char *title);
+
+static void acpi_rs_dump_byte_list(u16 length, u8 * data);
+
+static void acpi_rs_dump_dword_list(u8 length, u32 * data);
+
+static void acpi_rs_dump_short_byte_list(u8 length, u8 * data);
+
+static void
+acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
+
+static void acpi_rs_dump_address_common(union acpi_resource_data *resource);
+
+static void
+acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
+
+#define ACPI_RSD_OFFSET(f)          (u8) ACPI_OFFSET (union acpi_resource_data,f)
+#define ACPI_PRT_OFFSET(f)          (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
+#define ACPI_RSD_TABLE_SIZE(name)   (sizeof(name) / sizeof (struct acpi_rsdump_info))
+
+/*******************************************************************************
+ *
+ * Resource Descriptor info tables
+ *
+ * Note: The first table entry must be a Title or Literal and must contain
+ * the table length (number of table entries)
+ *
+ ******************************************************************************/
+
+struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.descriptor_length),
+	 "Descriptor Length", NULL},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
+	 acpi_gbl_he_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
+	 acpi_gbl_ll_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
+	 acpi_gbl_shr_decode},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
+	 "Interrupt Count", NULL},
+	{ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(irq.interrupts[0]),
+	 "Interrupt List", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_dma), "DMA", NULL},
+	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.type), "Speed",
+	 acpi_gbl_typ_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(dma.bus_master), "Mastering",
+	 acpi_gbl_bm_decode},
+	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.transfer), "Transfer Type",
+	 acpi_gbl_siz_decode},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(dma.channel_count), "Channel Count",
+	 NULL},
+	{ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(dma.channels[0]), "Channel List",
+	 NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_start_dpf[4] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf),
+	 "Start-Dependent-Functions", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(start_dpf.descriptor_length),
+	 "Descriptor Length", NULL},
+	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority),
+	 "Compatibility Priority", acpi_gbl_config_decode},
+	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness),
+	 "Performance/Robustness", acpi_gbl_config_decode}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_end_dpf[1] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_dpf),
+	 "End-Dependent-Functions", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_io[6] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io), "I/O", NULL},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(io.io_decode), "Address Decoding",
+	 acpi_gbl_io_decode},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.minimum), "Address Minimum", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.maximum), "Address Maximum", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.alignment), "Alignment", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.address_length), "Address Length",
+	 NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_io[3] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_io),
+	 "Fixed I/O", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_io.address), "Address", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_io.address_length),
+	 "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_vendor[3] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_vendor),
+	 "Vendor Specific", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(vendor.byte_length), "Length", NULL},
+	{ACPI_RSD_LONGLIST, ACPI_RSD_OFFSET(vendor.byte_data[0]), "Vendor Data",
+	 NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_end_tag[1] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "EndTag",
+	 NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_memory24[6] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory24),
+	 "24-Bit Memory Range", NULL},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory24.write_protect),
+	 "Write Protect", acpi_gbl_rw_decode},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.minimum), "Address Minimum",
+	 NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.maximum), "Address Maximum",
+	 NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.alignment), "Alignment",
+	 NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.address_length),
+	 "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_memory32[6] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory32),
+	 "32-Bit Memory Range", NULL},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory32.write_protect),
+	 "Write Protect", acpi_gbl_rw_decode},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.minimum), "Address Minimum",
+	 NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.maximum), "Address Maximum",
+	 NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.alignment), "Alignment",
+	 NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.address_length),
+	 "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[4] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_memory32),
+	 "32-Bit Fixed Memory Range", NULL},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(fixed_memory32.write_protect),
+	 "Write Protect", acpi_gbl_rw_decode},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address), "Address",
+	 NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address_length),
+	 "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
+	 "16-Bit WORD Address Space", NULL},
+	{ACPI_RSD_ADDRESS, 0, NULL, NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity",
+	 NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum",
+	 NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum",
+	 NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset),
+	 "Translation Offset", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length),
+	 "Address Length", NULL},
+	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
+	 "32-Bit DWORD Address Space", NULL},
+	{ACPI_RSD_ADDRESS, 0, NULL, NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity",
+	 NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum",
+	 NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum",
+	 NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset),
+	 "Translation Offset", NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length),
+	 "Address Length", NULL},
+	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
+	 "64-Bit QWORD Address Space", NULL},
+	{ACPI_RSD_ADDRESS, 0, NULL, NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity",
+	 NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum",
+	 NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum",
+	 NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset),
+	 "Translation Offset", NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length),
+	 "Address Length", NULL},
+	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
+	 "64-Bit Extended Address Space", NULL},
+	{ACPI_RSD_ADDRESS, 0, NULL, NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity),
+	 "Granularity", NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum),
+	 "Address Minimum", NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum),
+	 "Address Maximum", NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset),
+	 "Translation Offset", NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length),
+	 "Address Length", NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
+	 "Type-Specific Attribute", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_irq),
+	 "Extended IRQ", NULL},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.producer_consumer),
+	 "Type", acpi_gbl_consume_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.triggering),
+	 "Triggering", acpi_gbl_he_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
+	 acpi_gbl_ll_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
+	 acpi_gbl_shr_decode},
+	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
+	 NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(extended_irq.interrupt_count),
+	 "Interrupt Count", NULL},
+	{ACPI_RSD_DWORDLIST, ACPI_RSD_OFFSET(extended_irq.interrupts[0]),
+	 "Interrupt List", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_generic_reg),
+	 "Generic Register", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.space_id), "Space ID",
+	 NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_width), "Bit Width",
+	 NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_offset), "Bit Offset",
+	 NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.access_size),
+	 "Access Size", NULL},
+	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
+};
+
+/*
+ * Tables used for common address descriptor flag fields
+ */
+static struct acpi_rsdump_info acpi_rs_dump_general_flags[5] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_general_flags), NULL,
+	 NULL},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.producer_consumer),
+	 "Consumer/Producer", acpi_gbl_consume_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.decode), "Address Decode",
+	 acpi_gbl_dec_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.min_address_fixed),
+	 "Min Relocatability", acpi_gbl_min_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.max_address_fixed),
+	 "Max Relocatability", acpi_gbl_max_decode}
+};
+
+static struct acpi_rsdump_info acpi_rs_dump_memory_flags[5] = {
+	{ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory_flags),
+	 "Resource Type", (void *)"Memory Range"},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.write_protect),
+	 "Write Protect", acpi_gbl_rw_decode},
+	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.caching),
+	 "Caching", acpi_gbl_mem_decode},
+	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.range_type),
+	 "Range Type", acpi_gbl_mtp_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.translation),
+	 "Translation", acpi_gbl_ttp_decode}
+};
+
+static struct acpi_rsdump_info acpi_rs_dump_io_flags[4] = {
+	{ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io_flags),
+	 "Resource Type", (void *)"I/O Range"},
+	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.io.range_type),
+	 "Range Type", acpi_gbl_rng_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation),
+	 "Translation", acpi_gbl_ttp_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation_type),
+	 "Translation Type", acpi_gbl_trs_decode}
+};
+
+/*
+ * Table used to dump _PRT contents
+ */
+static struct acpi_rsdump_info acpi_rs_dump_prt[5] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_prt), NULL, NULL},
+	{ACPI_RSD_UINT64, ACPI_PRT_OFFSET(address), "Address", NULL},
+	{ACPI_RSD_UINT32, ACPI_PRT_OFFSET(pin), "Pin", NULL},
+	{ACPI_RSD_STRING, ACPI_PRT_OFFSET(source[0]), "Source", NULL},
+	{ACPI_RSD_UINT32, ACPI_PRT_OFFSET(source_index), "Source Index", NULL}
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_dump_descriptor
+ *
+ * PARAMETERS:  Resource
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION:
+ *
+ ******************************************************************************/
+
+static void
+acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
+{
+	u8 *target = NULL;
+	u8 *previous_target;
+	char *name;
+	u8 count;
+
+	/* First table entry must contain the table length (# of table entries) */
+
+	count = table->offset;
+
+	while (count) {
+		previous_target = target;
+		target = ACPI_ADD_PTR(u8, resource, table->offset);
+		name = table->name;
+
+		switch (table->opcode) {
+		case ACPI_RSD_TITLE:
+			/*
+			 * Optional resource title
+			 */
+			if (table->name) {
+				acpi_os_printf("%s Resource\n", name);
+			}
+			break;
+
+			/* Strings */
+
+		case ACPI_RSD_LITERAL:
+			acpi_rs_out_string(name,
+					   ACPI_CAST_PTR(char, table->pointer));
+			break;
+
+		case ACPI_RSD_STRING:
+			acpi_rs_out_string(name, ACPI_CAST_PTR(char, target));
+			break;
+
+			/* Data items, 8/16/32/64 bit */
+
+		case ACPI_RSD_UINT8:
+			acpi_rs_out_integer8(name, ACPI_GET8(target));
+			break;
+
+		case ACPI_RSD_UINT16:
+			acpi_rs_out_integer16(name, ACPI_GET16(target));
+			break;
+
+		case ACPI_RSD_UINT32:
+			acpi_rs_out_integer32(name, ACPI_GET32(target));
+			break;
+
+		case ACPI_RSD_UINT64:
+			acpi_rs_out_integer64(name, ACPI_GET64(target));
+			break;
+
+			/* Flags: 1-bit and 2-bit flags supported */
+
+		case ACPI_RSD_1BITFLAG:
+			acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+							       table->
+							       pointer[*target &
+								       0x01]));
+			break;
+
+		case ACPI_RSD_2BITFLAG:
+			acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+							       table->
+							       pointer[*target &
+								       0x03]));
+			break;
+
+		case ACPI_RSD_SHORTLIST:
+			/*
+			 * Short byte list (single line output) for DMA and IRQ resources
+			 * Note: The list length is obtained from the previous table entry
+			 */
+			if (previous_target) {
+				acpi_rs_out_title(name);
+				acpi_rs_dump_short_byte_list(*previous_target,
+							     target);
+			}
+			break;
+
+		case ACPI_RSD_LONGLIST:
+			/*
+			 * Long byte list for Vendor resource data
+			 * Note: The list length is obtained from the previous table entry
+			 */
+			if (previous_target) {
+				acpi_rs_dump_byte_list(ACPI_GET16
+						       (previous_target),
+						       target);
+			}
+			break;
+
+		case ACPI_RSD_DWORDLIST:
+			/*
+			 * Dword list for Extended Interrupt resources
+			 * Note: The list length is obtained from the previous table entry
+			 */
+			if (previous_target) {
+				acpi_rs_dump_dword_list(*previous_target,
+							ACPI_CAST_PTR(u32,
+								      target));
+			}
+			break;
+
+		case ACPI_RSD_ADDRESS:
+			/*
+			 * Common flags for all Address resources
+			 */
+			acpi_rs_dump_address_common(ACPI_CAST_PTR
+						    (union acpi_resource_data,
+						     target));
+			break;
+
+		case ACPI_RSD_SOURCE:
+			/*
+			 * Optional resource_source for Address resources
+			 */
+			acpi_rs_dump_resource_source(ACPI_CAST_PTR(struct
+								   acpi_resource_source,
+								   target));
+			break;
+
+		default:
+			acpi_os_printf("**** Invalid table opcode [%X] ****\n",
+				       table->opcode);
+			return;
+		}
+
+		table++;
+		count--;
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_dump_resource_source
+ *
+ * PARAMETERS:  resource_source     - Pointer to a Resource Source struct
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Common routine for dumping the optional resource_source and the
+ *              corresponding resource_source_index.
+ *
+ ******************************************************************************/
+
+static void
+acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	if (resource_source->index == 0xFF) {
+		return;
+	}
+
+	acpi_rs_out_integer8("Resource Source Index", resource_source->index);
+
+	acpi_rs_out_string("Resource Source",
+			   resource_source->string_ptr ?
+			   resource_source->string_ptr : "[Not Specified]");
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_dump_address_common
+ *
+ * PARAMETERS:  Resource        - Pointer to an internal resource descriptor
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Dump the fields that are common to all Address resource
+ *              descriptors
+ *
+ ******************************************************************************/
+
+static void acpi_rs_dump_address_common(union acpi_resource_data *resource)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	/* Decode the type-specific flags */
+
+	switch (resource->address.resource_type) {
+	case ACPI_MEMORY_RANGE:
+
+		acpi_rs_dump_descriptor(resource, acpi_rs_dump_memory_flags);
+		break;
+
+	case ACPI_IO_RANGE:
+
+		acpi_rs_dump_descriptor(resource, acpi_rs_dump_io_flags);
+		break;
+
+	case ACPI_BUS_NUMBER_RANGE:
+
+		acpi_rs_out_string("Resource Type", "Bus Number Range");
+		break;
+
+	default:
+
+		acpi_rs_out_integer8("Resource Type",
+				     (u8) resource->address.resource_type);
+		break;
+	}
+
+	/* Decode the general flags */
+
+	acpi_rs_dump_descriptor(resource, acpi_rs_dump_general_flags);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_dump_resource_list
+ *
+ * PARAMETERS:  resource_list       - Pointer to a resource descriptor list
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Dispatches the structure to the correct dump routine.
+ *
+ ******************************************************************************/
+
+void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
+{
+	u32 count = 0;
+	u32 type;
+
+	ACPI_FUNCTION_ENTRY();
+
+	if (!(acpi_dbg_level & ACPI_LV_RESOURCES)
+	    || !(_COMPONENT & acpi_dbg_layer)) {
+		return;
+	}
+
+	/* Walk list and dump all resource descriptors (END_TAG terminates) */
+
+	do {
+		acpi_os_printf("\n[%02X] ", count);
+		count++;
+
+		/* Validate Type before dispatch */
+
+		type = resource_list->type;
+		if (type > ACPI_RESOURCE_TYPE_MAX) {
+			acpi_os_printf
+			    ("Invalid descriptor type (%X) in resource list\n",
+			     resource_list->type);
+			return;
+		}
+
+		/* Dump the resource descriptor */
+
+		acpi_rs_dump_descriptor(&resource_list->data,
+					acpi_gbl_dump_resource_dispatch[type]);
+
+		/* Point to the next resource structure */
+
+		resource_list =
+		    ACPI_ADD_PTR(struct acpi_resource, resource_list,
+				 resource_list->length);
+
+		/* Exit when END_TAG descriptor is reached */
+
+	} while (type != ACPI_RESOURCE_TYPE_END_TAG);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_dump_irq_list
+ *
+ * PARAMETERS:  route_table     - Pointer to the routing table to dump.
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print IRQ routing table
+ *
+ ******************************************************************************/
+
+void acpi_rs_dump_irq_list(u8 * route_table)
+{
+	struct acpi_pci_routing_table *prt_element;
+	u8 count;
+
+	ACPI_FUNCTION_ENTRY();
+
+	if (!(acpi_dbg_level & ACPI_LV_RESOURCES)
+	    || !(_COMPONENT & acpi_dbg_layer)) {
+		return;
+	}
+
+	prt_element = ACPI_CAST_PTR(struct acpi_pci_routing_table, route_table);
+
+	/* Dump all table elements, Exit on zero length element */
+
+	for (count = 0; prt_element->length; count++) {
+		acpi_os_printf("\n[%02X] PCI IRQ Routing Table Package\n",
+			       count);
+		acpi_rs_dump_descriptor(prt_element, acpi_rs_dump_prt);
+
+		prt_element = ACPI_ADD_PTR(struct acpi_pci_routing_table,
+					   prt_element, prt_element->length);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_out*
+ *
+ * PARAMETERS:  Title       - Name of the resource field
+ *              Value       - Value of the resource field
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Miscellaneous helper functions to consistently format the
+ *              output of the resource dump routines
+ *
+ ******************************************************************************/
+
+static void acpi_rs_out_string(char *title, char *value)
+{
+	acpi_os_printf("%27s : %s", title, value);
+	if (!*value) {
+		acpi_os_printf("[NULL NAMESTRING]");
+	}
+	acpi_os_printf("\n");
+}
+
+static void acpi_rs_out_integer8(char *title, u8 value)
+{
+	acpi_os_printf("%27s : %2.2X\n", title, value);
+}
+
+static void acpi_rs_out_integer16(char *title, u16 value)
+{
+	acpi_os_printf("%27s : %4.4X\n", title, value);
+}
+
+static void acpi_rs_out_integer32(char *title, u32 value)
+{
+	acpi_os_printf("%27s : %8.8X\n", title, value);
+}
+
+static void acpi_rs_out_integer64(char *title, u64 value)
+{
+	acpi_os_printf("%27s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
+}
+
+static void acpi_rs_out_title(char *title)
+{
+	acpi_os_printf("%27s : ", title);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_dump*List
+ *
+ * PARAMETERS:  Length      - Number of elements in the list
+ *              Data        - Start of the list
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Miscellaneous functions to dump lists of raw data
+ *
+ ******************************************************************************/
+
+static void acpi_rs_dump_byte_list(u16 length, u8 * data)
+{
+	u8 i;
+
+	for (i = 0; i < length; i++) {
+		acpi_os_printf("%25s%2.2X : %2.2X\n", "Byte", i, data[i]);
+	}
+}
+
+static void acpi_rs_dump_short_byte_list(u8 length, u8 * data)
+{
+	u8 i;
+
+	for (i = 0; i < length; i++) {
+		acpi_os_printf("%X ", data[i]);
+	}
+	acpi_os_printf("\n");
+}
+
+static void acpi_rs_dump_dword_list(u8 length, u32 * data)
+{
+	u8 i;
+
+	for (i = 0; i < length; i++) {
+		acpi_os_printf("%25s%2.2X : %8.8X\n", "Dword", i, data[i]);
+	}
+}
+
+#endif
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
new file mode 100644
index 0000000..77b25fd
--- /dev/null
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -0,0 +1,206 @@
+/*******************************************************************************
+ *
+ * Module Name: rsinfo - Dispatch and Info tables
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rsinfo")
+
+/*
+ * Resource dispatch and information tables. Any new resource types (either
+ * Large or Small) must be reflected in each of these tables, so they are here
+ * in one place.
+ *
+ * The tables for Large descriptors are indexed by bits 6:0 of the AML
+ * descriptor type byte. The tables for Small descriptors are indexed by
+ * bits 6:3 of the descriptor byte. The tables for internal resource
+ * descriptors are indexed by the acpi_resource_type field.
+ */
+/* Dispatch table for resource-to-AML (Set Resource) conversion functions */
+struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
+	acpi_rs_set_irq,	/* 0x00, ACPI_RESOURCE_TYPE_IRQ */
+	acpi_rs_convert_dma,	/* 0x01, ACPI_RESOURCE_TYPE_DMA */
+	acpi_rs_set_start_dpf,	/* 0x02, ACPI_RESOURCE_TYPE_START_DEPENDENT */
+	acpi_rs_convert_end_dpf,	/* 0x03, ACPI_RESOURCE_TYPE_END_DEPENDENT */
+	acpi_rs_convert_io,	/* 0x04, ACPI_RESOURCE_TYPE_IO */
+	acpi_rs_convert_fixed_io,	/* 0x05, ACPI_RESOURCE_TYPE_FIXED_IO */
+	acpi_rs_set_vendor,	/* 0x06, ACPI_RESOURCE_TYPE_VENDOR */
+	acpi_rs_convert_end_tag,	/* 0x07, ACPI_RESOURCE_TYPE_END_TAG */
+	acpi_rs_convert_memory24,	/* 0x08, ACPI_RESOURCE_TYPE_MEMORY24 */
+	acpi_rs_convert_memory32,	/* 0x09, ACPI_RESOURCE_TYPE_MEMORY32 */
+	acpi_rs_convert_fixed_memory32,	/* 0x0A, ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */
+	acpi_rs_convert_address16,	/* 0x0B, ACPI_RESOURCE_TYPE_ADDRESS16 */
+	acpi_rs_convert_address32,	/* 0x0C, ACPI_RESOURCE_TYPE_ADDRESS32 */
+	acpi_rs_convert_address64,	/* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */
+	acpi_rs_convert_ext_address64,	/* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
+	acpi_rs_convert_ext_irq,	/* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
+	acpi_rs_convert_generic_reg	/* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+};
+
+/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
+
+struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
+	/* Small descriptors */
+
+	NULL,			/* 0x00, Reserved */
+	NULL,			/* 0x01, Reserved */
+	NULL,			/* 0x02, Reserved */
+	NULL,			/* 0x03, Reserved */
+	acpi_rs_get_irq,	/* 0x04, ACPI_RESOURCE_NAME_IRQ */
+	acpi_rs_convert_dma,	/* 0x05, ACPI_RESOURCE_NAME_DMA */
+	acpi_rs_get_start_dpf,	/* 0x06, ACPI_RESOURCE_NAME_START_DEPENDENT */
+	acpi_rs_convert_end_dpf,	/* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */
+	acpi_rs_convert_io,	/* 0x08, ACPI_RESOURCE_NAME_IO */
+	acpi_rs_convert_fixed_io,	/* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */
+	NULL,			/* 0x0A, Reserved */
+	NULL,			/* 0x0B, Reserved */
+	NULL,			/* 0x0C, Reserved */
+	NULL,			/* 0x0D, Reserved */
+	acpi_rs_get_vendor_small,	/* 0x0E, ACPI_RESOURCE_NAME_VENDOR_SMALL */
+	acpi_rs_convert_end_tag,	/* 0x0F, ACPI_RESOURCE_NAME_END_TAG */
+
+	/* Large descriptors */
+
+	NULL,			/* 0x00, Reserved */
+	acpi_rs_convert_memory24,	/* 0x01, ACPI_RESOURCE_NAME_MEMORY24 */
+	acpi_rs_convert_generic_reg,	/* 0x02, ACPI_RESOURCE_NAME_GENERIC_REGISTER */
+	NULL,			/* 0x03, Reserved */
+	acpi_rs_get_vendor_large,	/* 0x04, ACPI_RESOURCE_NAME_VENDOR_LARGE */
+	acpi_rs_convert_memory32,	/* 0x05, ACPI_RESOURCE_NAME_MEMORY32 */
+	acpi_rs_convert_fixed_memory32,	/* 0x06, ACPI_RESOURCE_NAME_FIXED_MEMORY32 */
+	acpi_rs_convert_address32,	/* 0x07, ACPI_RESOURCE_NAME_ADDRESS32 */
+	acpi_rs_convert_address16,	/* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */
+	acpi_rs_convert_ext_irq,	/* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */
+	acpi_rs_convert_address64,	/* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
+	acpi_rs_convert_ext_address64	/* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+};
+
+#ifdef ACPI_FUTURE_USAGE
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+/* Dispatch table for resource dump functions */
+
+struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
+	acpi_rs_dump_irq,	/* ACPI_RESOURCE_TYPE_IRQ */
+	acpi_rs_dump_dma,	/* ACPI_RESOURCE_TYPE_DMA */
+	acpi_rs_dump_start_dpf,	/* ACPI_RESOURCE_TYPE_START_DEPENDENT */
+	acpi_rs_dump_end_dpf,	/* ACPI_RESOURCE_TYPE_END_DEPENDENT */
+	acpi_rs_dump_io,	/* ACPI_RESOURCE_TYPE_IO */
+	acpi_rs_dump_fixed_io,	/* ACPI_RESOURCE_TYPE_FIXED_IO */
+	acpi_rs_dump_vendor,	/* ACPI_RESOURCE_TYPE_VENDOR */
+	acpi_rs_dump_end_tag,	/* ACPI_RESOURCE_TYPE_END_TAG */
+	acpi_rs_dump_memory24,	/* ACPI_RESOURCE_TYPE_MEMORY24 */
+	acpi_rs_dump_memory32,	/* ACPI_RESOURCE_TYPE_MEMORY32 */
+	acpi_rs_dump_fixed_memory32,	/* ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */
+	acpi_rs_dump_address16,	/* ACPI_RESOURCE_TYPE_ADDRESS16 */
+	acpi_rs_dump_address32,	/* ACPI_RESOURCE_TYPE_ADDRESS32 */
+	acpi_rs_dump_address64,	/* ACPI_RESOURCE_TYPE_ADDRESS64 */
+	acpi_rs_dump_ext_address64,	/* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
+	acpi_rs_dump_ext_irq,	/* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
+	acpi_rs_dump_generic_reg,	/* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+};
+#endif
+
+#endif				/* ACPI_FUTURE_USAGE */
+/*
+ * Base sizes for external AML resource descriptors, indexed by internal type.
+ * Includes size of the descriptor header (1 byte for small descriptors,
+ * 3 bytes for large descriptors)
+ */
+const u8 acpi_gbl_aml_resource_sizes[] = {
+	sizeof(struct aml_resource_irq),	/* ACPI_RESOURCE_TYPE_IRQ (optional Byte 3 always created) */
+	sizeof(struct aml_resource_dma),	/* ACPI_RESOURCE_TYPE_DMA */
+	sizeof(struct aml_resource_start_dependent),	/* ACPI_RESOURCE_TYPE_START_DEPENDENT (optional Byte 1 always created) */
+	sizeof(struct aml_resource_end_dependent),	/* ACPI_RESOURCE_TYPE_END_DEPENDENT */
+	sizeof(struct aml_resource_io),	/* ACPI_RESOURCE_TYPE_IO */
+	sizeof(struct aml_resource_fixed_io),	/* ACPI_RESOURCE_TYPE_FIXED_IO */
+	sizeof(struct aml_resource_vendor_small),	/* ACPI_RESOURCE_TYPE_VENDOR */
+	sizeof(struct aml_resource_end_tag),	/* ACPI_RESOURCE_TYPE_END_TAG */
+	sizeof(struct aml_resource_memory24),	/* ACPI_RESOURCE_TYPE_MEMORY24 */
+	sizeof(struct aml_resource_memory32),	/* ACPI_RESOURCE_TYPE_MEMORY32 */
+	sizeof(struct aml_resource_fixed_memory32),	/* ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */
+	sizeof(struct aml_resource_address16),	/* ACPI_RESOURCE_TYPE_ADDRESS16 */
+	sizeof(struct aml_resource_address32),	/* ACPI_RESOURCE_TYPE_ADDRESS32 */
+	sizeof(struct aml_resource_address64),	/* ACPI_RESOURCE_TYPE_ADDRESS64 */
+	sizeof(struct aml_resource_extended_address64),	/*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
+	sizeof(struct aml_resource_extended_irq),	/* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
+	sizeof(struct aml_resource_generic_register)	/* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+};
+
+const u8 acpi_gbl_resource_struct_sizes[] = {
+	/* Small descriptors */
+
+	0,
+	0,
+	0,
+	0,
+	ACPI_RS_SIZE(struct acpi_resource_irq),
+	ACPI_RS_SIZE(struct acpi_resource_dma),
+	ACPI_RS_SIZE(struct acpi_resource_start_dependent),
+	ACPI_RS_SIZE_MIN,
+	ACPI_RS_SIZE(struct acpi_resource_io),
+	ACPI_RS_SIZE(struct acpi_resource_fixed_io),
+	0,
+	0,
+	0,
+	0,
+	ACPI_RS_SIZE(struct acpi_resource_vendor),
+	ACPI_RS_SIZE_MIN,
+
+	/* Large descriptors */
+
+	0,
+	ACPI_RS_SIZE(struct acpi_resource_memory24),
+	ACPI_RS_SIZE(struct acpi_resource_generic_register),
+	0,
+	ACPI_RS_SIZE(struct acpi_resource_vendor),
+	ACPI_RS_SIZE(struct acpi_resource_memory32),
+	ACPI_RS_SIZE(struct acpi_resource_fixed_memory32),
+	ACPI_RS_SIZE(struct acpi_resource_address32),
+	ACPI_RS_SIZE(struct acpi_resource_address16),
+	ACPI_RS_SIZE(struct acpi_resource_extended_irq),
+	ACPI_RS_SIZE(struct acpi_resource_address64),
+	ACPI_RS_SIZE(struct acpi_resource_extended_address64)
+};
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
new file mode 100644
index 0000000..35a49aa
--- /dev/null
+++ b/drivers/acpi/acpica/rsio.c
@@ -0,0 +1,290 @@
+/*******************************************************************************
+ *
+ * Module Name: rsio - IO and DMA resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rsio")
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_io
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_convert_io[5] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IO,
+	 ACPI_RS_SIZE(struct acpi_resource_io),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_io)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IO,
+	 sizeof(struct aml_resource_io),
+	 0},
+
+	/* Decode flag */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.io.io_decode),
+	 AML_OFFSET(io.flags),
+	 0},
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * Address Alignment
+	 * Length
+	 * Minimum Base Address
+	 * Maximum Base Address
+	 */
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.io.alignment),
+	 AML_OFFSET(io.alignment),
+	 2},
+
+	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.io.minimum),
+	 AML_OFFSET(io.minimum),
+	 2}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_fixed_io
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_fixed_io[4] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_IO,
+	 ACPI_RS_SIZE(struct acpi_resource_fixed_io),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_io)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_IO,
+	 sizeof(struct aml_resource_fixed_io),
+	 0},
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * Base Address
+	 * Length
+	 */
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_io.address_length),
+	 AML_OFFSET(fixed_io.address_length),
+	 1},
+
+	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_io.address),
+	 AML_OFFSET(fixed_io.address),
+	 1}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_generic_reg
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_generic_reg[4] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GENERIC_REGISTER,
+	 ACPI_RS_SIZE(struct acpi_resource_generic_register),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_generic_reg)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GENERIC_REGISTER,
+	 sizeof(struct aml_resource_generic_register),
+	 0},
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * Address Space ID
+	 * Register Bit Width
+	 * Register Bit Offset
+	 * Access Size
+	 */
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.generic_reg.space_id),
+	 AML_OFFSET(generic_reg.address_space_id),
+	 4},
+
+	/* Get the Register Address */
+
+	{ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.generic_reg.address),
+	 AML_OFFSET(generic_reg.address),
+	 1}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_end_dpf
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_end_dpf[2] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_END_DEPENDENT,
+	 ACPI_RS_SIZE_MIN,
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_end_dpf)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_END_DEPENDENT,
+	 sizeof(struct aml_resource_end_dependent),
+	 0}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_end_tag
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_end_tag[2] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_END_TAG,
+	 ACPI_RS_SIZE_MIN,
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_end_tag)},
+
+	/*
+	 * Note: The checksum field is set to zero, meaning that the resource
+	 * data is treated as if the checksum operation succeeded.
+	 * (ACPI Spec 1.0b Section 6.4.2.8)
+	 */
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_END_TAG,
+	 sizeof(struct aml_resource_end_tag),
+	 0}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_get_start_dpf
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_get_start_dpf[6] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_START_DEPENDENT,
+	 ACPI_RS_SIZE(struct acpi_resource_start_dependent),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_get_start_dpf)},
+
+	/* Defaults for Compatibility and Performance priorities */
+
+	{ACPI_RSC_SET8, ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
+	 ACPI_ACCEPTABLE_CONFIGURATION,
+	 2},
+
+	/* Get the descriptor length (0 or 1 for Start Dpf descriptor) */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
+	 AML_OFFSET(start_dpf.descriptor_type),
+	 0},
+
+	/* All done if there is no flag byte present in the descriptor */
+
+	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 1},
+
+	/* Flag byte is present, get the flags */
+
+	{ACPI_RSC_2BITFLAG,
+	 ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
+	 AML_OFFSET(start_dpf.flags),
+	 0},
+
+	{ACPI_RSC_2BITFLAG,
+	 ACPI_RS_OFFSET(data.start_dpf.performance_robustness),
+	 AML_OFFSET(start_dpf.flags),
+	 2}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_set_start_dpf
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_set_start_dpf[10] = {
+	/* Start with a default descriptor of length 1 */
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_START_DEPENDENT,
+	 sizeof(struct aml_resource_start_dependent),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_set_start_dpf)},
+
+	/* Set the default flag values */
+
+	{ACPI_RSC_2BITFLAG,
+	 ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
+	 AML_OFFSET(start_dpf.flags),
+	 0},
+
+	{ACPI_RSC_2BITFLAG,
+	 ACPI_RS_OFFSET(data.start_dpf.performance_robustness),
+	 AML_OFFSET(start_dpf.flags),
+	 2},
+	/*
+	 * All done if the output descriptor length is required to be 1
+	 * (i.e., optimization to 0 bytes cannot be attempted)
+	 */
+	{ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+	 ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
+	 1},
+
+	/* Set length to 0 bytes (no flags byte) */
+
+	{ACPI_RSC_LENGTH, 0, 0,
+	 sizeof(struct aml_resource_start_dependent_noprio)},
+
+	/*
+	 * All done if the output descriptor length is required to be 0.
+	 *
+	 * TBD: Perhaps we should check for error if input flags are not
+	 * compatible with a 0-byte descriptor.
+	 */
+	{ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+	 ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
+	 0},
+
+	/* Reset length to 1 byte (descriptor with flags byte) */
+
+	{ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_start_dependent)},
+
+	/*
+	 * All done if flags byte is necessary -- if either priority value
+	 * is not ACPI_ACCEPTABLE_CONFIGURATION
+	 */
+	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
+	 ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
+	 ACPI_ACCEPTABLE_CONFIGURATION},
+
+	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
+	 ACPI_RS_OFFSET(data.start_dpf.performance_robustness),
+	 ACPI_ACCEPTABLE_CONFIGURATION},
+
+	/* Flag byte is not necessary */
+
+	{ACPI_RSC_LENGTH, 0, 0,
+	 sizeof(struct aml_resource_start_dependent_noprio)}
+};
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
new file mode 100644
index 0000000..2e02569
--- /dev/null
+++ b/drivers/acpi/acpica/rsirq.c
@@ -0,0 +1,266 @@
+/*******************************************************************************
+ *
+ * Module Name: rsirq - IRQ resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rsirq")
+
+/*******************************************************************************
+ *
+ * acpi_rs_get_irq
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IRQ,
+	 ACPI_RS_SIZE(struct acpi_resource_irq),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_get_irq)},
+
+	/* Get the IRQ mask (bytes 1:2) */
+
+	{ACPI_RSC_BITMASK16, ACPI_RS_OFFSET(data.irq.interrupts[0]),
+	 AML_OFFSET(irq.irq_mask),
+	 ACPI_RS_OFFSET(data.irq.interrupt_count)},
+
+	/* Set default flags (others are zero) */
+
+	{ACPI_RSC_SET8, ACPI_RS_OFFSET(data.irq.triggering),
+	 ACPI_EDGE_SENSITIVE,
+	 1},
+
+	/* Get the descriptor length (2 or 3 for IRQ descriptor) */
+
+	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.irq.descriptor_length),
+	 AML_OFFSET(irq.descriptor_type),
+	 0},
+
+	/* All done if no flag byte present in descriptor */
+
+	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 3},
+
+	/* Get flags: Triggering[0], Polarity[3], Sharing[4] */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
+	 AML_OFFSET(irq.flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.polarity),
+	 AML_OFFSET(irq.flags),
+	 3},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
+	 AML_OFFSET(irq.flags),
+	 4}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_set_irq
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
+	/* Start with a default descriptor of length 3 */
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IRQ,
+	 sizeof(struct aml_resource_irq),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_set_irq)},
+
+	/* Convert interrupt list to 16-bit IRQ bitmask */
+
+	{ACPI_RSC_BITMASK16, ACPI_RS_OFFSET(data.irq.interrupts[0]),
+	 AML_OFFSET(irq.irq_mask),
+	 ACPI_RS_OFFSET(data.irq.interrupt_count)},
+
+	/* Set the flags byte */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
+	 AML_OFFSET(irq.flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.polarity),
+	 AML_OFFSET(irq.flags),
+	 3},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
+	 AML_OFFSET(irq.flags),
+	 4},
+
+	/*
+	 * All done if the output descriptor length is required to be 3
+	 * (i.e., optimization to 2 bytes cannot be attempted)
+	 */
+	{ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+	 ACPI_RS_OFFSET(data.irq.descriptor_length),
+	 3},
+
+	/* Set length to 2 bytes (no flags byte) */
+
+	{ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)},
+
+	/*
+	 * All done if the output descriptor length is required to be 2.
+	 *
+	 * TBD: Perhaps we should check for error if input flags are not
+	 * compatible with a 2-byte descriptor.
+	 */
+	{ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+	 ACPI_RS_OFFSET(data.irq.descriptor_length),
+	 2},
+
+	/* Reset length to 3 bytes (descriptor with flags byte) */
+
+	{ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq)},
+
+	/*
+	 * Check if the flags byte is necessary. Not needed if the flags are:
+	 * ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH, ACPI_EXCLUSIVE
+	 */
+	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
+	 ACPI_RS_OFFSET(data.irq.triggering),
+	 ACPI_EDGE_SENSITIVE},
+
+	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
+	 ACPI_RS_OFFSET(data.irq.polarity),
+	 ACPI_ACTIVE_HIGH},
+
+	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
+	 ACPI_RS_OFFSET(data.irq.sharable),
+	 ACPI_EXCLUSIVE},
+
+	/* We can optimize to a 2-byte irq_no_flags() descriptor */
+
+	{ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_ext_irq
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_EXTENDED_IRQ,
+	 ACPI_RS_SIZE(struct acpi_resource_extended_irq),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_ext_irq)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_EXTENDED_IRQ,
+	 sizeof(struct aml_resource_extended_irq),
+	 0},
+
+	/* Flag bits */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.producer_consumer),
+	 AML_OFFSET(extended_irq.flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.triggering),
+	 AML_OFFSET(extended_irq.flags),
+	 1},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.polarity),
+	 AML_OFFSET(extended_irq.flags),
+	 2},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.sharable),
+	 AML_OFFSET(extended_irq.flags),
+	 3},
+
+	/* IRQ Table length (Byte4) */
+
+	{ACPI_RSC_COUNT, ACPI_RS_OFFSET(data.extended_irq.interrupt_count),
+	 AML_OFFSET(extended_irq.interrupt_count),
+	 sizeof(u32)}
+	,
+
+	/* Copy every IRQ in the table, each is 32 bits */
+
+	{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.extended_irq.interrupts[0]),
+	 AML_OFFSET(extended_irq.interrupts[0]),
+	 0}
+	,
+
+	/* Optional resource_source (Index and String) */
+
+	{ACPI_RSC_SOURCEX, ACPI_RS_OFFSET(data.extended_irq.resource_source),
+	 ACPI_RS_OFFSET(data.extended_irq.interrupts[0]),
+	 sizeof(struct aml_resource_extended_irq)}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_dma
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_dma[6] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_DMA,
+	 ACPI_RS_SIZE(struct acpi_resource_dma),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_dma)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_DMA,
+	 sizeof(struct aml_resource_dma),
+	 0},
+
+	/* Flags: transfer preference, bus mastering, channel speed */
+
+	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.dma.transfer),
+	 AML_OFFSET(dma.flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.dma.bus_master),
+	 AML_OFFSET(dma.flags),
+	 2},
+
+	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.dma.type),
+	 AML_OFFSET(dma.flags),
+	 5},
+
+	/* DMA channel mask bits */
+
+	{ACPI_RSC_BITMASK, ACPI_RS_OFFSET(data.dma.channels[0]),
+	 AML_OFFSET(dma.dma_channel_mask),
+	 ACPI_RS_OFFSET(data.dma.channel_count)}
+};
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
new file mode 100644
index 0000000..1b1dbc6
--- /dev/null
+++ b/drivers/acpi/acpica/rslist.c
@@ -0,0 +1,203 @@
+/*******************************************************************************
+ *
+ * Module Name: rslist - Linked list utilities
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rslist")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_convert_aml_to_resources
+ *
+ * PARAMETERS:  acpi_walk_aml_callback
+ *              resource_ptr            - Pointer to the buffer that will
+ *                                        contain the output structures
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert an AML resource to an internal representation of the
+ *              resource that is aligned and easier to access.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_rs_convert_aml_to_resources(u8 * aml,
+				 u32 length,
+				 u32 offset, u8 resource_index, void **context)
+{
+	struct acpi_resource **resource_ptr =
+	    ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
+	struct acpi_resource *resource;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
+
+	/*
+	 * Check that the input buffer and all subsequent pointers into it
+	 * are aligned on a native word boundary. Most important on IA64
+	 */
+	resource = *resource_ptr;
+	if (ACPI_IS_MISALIGNED(resource)) {
+		ACPI_WARNING((AE_INFO,
+			      "Misaligned resource pointer %p", resource));
+	}
+
+	/* Convert the AML byte stream resource to a local resource struct */
+
+	status =
+	    acpi_rs_convert_aml_to_resource(resource,
+					    ACPI_CAST_PTR(union aml_resource,
+							  aml),
+					    acpi_gbl_get_resource_dispatch
+					    [resource_index]);
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Could not convert AML resource (Type %X)",
+				*aml));
+		return_ACPI_STATUS(status);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+			  "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
+			  acpi_ut_get_resource_type(aml), length,
+			  resource->length));
+
+	/* Point to the next structure in the output buffer */
+
+	*resource_ptr = ACPI_ADD_PTR(void, resource, resource->length);
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_convert_resources_to_aml
+ *
+ * PARAMETERS:  Resource            - Pointer to the resource linked list
+ *              aml_size_needed     - Calculated size of the byte stream
+ *                                    needed from calling acpi_rs_get_aml_length()
+ *                                    The size of the output_buffer is
+ *                                    guaranteed to be >= aml_size_needed
+ *              output_buffer       - Pointer to the buffer that will
+ *                                    contain the byte stream
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Takes the resource linked list and parses it, creating a
+ *              byte stream of resources in the caller's output buffer
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
+				 acpi_size aml_size_needed, u8 * output_buffer)
+{
+	u8 *aml = output_buffer;
+	u8 *end_aml = output_buffer + aml_size_needed;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
+
+	/* Walk the resource descriptor list, convert each descriptor */
+
+	while (aml < end_aml) {
+
+		/* Validate the (internal) Resource Type */
+
+		if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
+			ACPI_ERROR((AE_INFO,
+				    "Invalid descriptor type (%X) in resource list",
+				    resource->type));
+			return_ACPI_STATUS(AE_BAD_DATA);
+		}
+
+		/* Perform the conversion */
+
+		status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
+										 aml_resource,
+										 aml),
+							 acpi_gbl_set_resource_dispatch
+							 [resource->type]);
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"Could not convert resource (type %X) to AML",
+					resource->type));
+			return_ACPI_STATUS(status);
+		}
+
+		/* Perform final sanity check on the new AML resource descriptor */
+
+		status =
+		    acpi_ut_validate_resource(ACPI_CAST_PTR
+					      (union aml_resource, aml), NULL);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/* Check for end-of-list, normal exit */
+
+		if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
+
+			/* An End Tag indicates the end of the input Resource Template */
+
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		/*
+		 * Extract the total length of the new descriptor and set the
+		 * Aml to point to the next (output) resource descriptor
+		 */
+		aml += acpi_ut_get_descriptor_length(aml);
+
+		/* Point to the next input resource descriptor */
+
+		resource =
+		    ACPI_ADD_PTR(struct acpi_resource, resource,
+				 resource->length);
+	}
+
+	/* Completed buffer, but did not find an end_tag resource descriptor */
+
+	return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+}
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
new file mode 100644
index 0000000..ddc76ce
--- /dev/null
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -0,0 +1,236 @@
+/*******************************************************************************
+ *
+ * Module Name: rsmem24 - Memory resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rsmemory")
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_memory24
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_convert_memory24[4] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY24,
+	 ACPI_RS_SIZE(struct acpi_resource_memory24),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory24)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY24,
+	 sizeof(struct aml_resource_memory24),
+	 0},
+
+	/* Read/Write bit */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory24.write_protect),
+	 AML_OFFSET(memory24.flags),
+	 0},
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * Minimum Base Address
+	 * Maximum Base Address
+	 * Address Base Alignment
+	 * Range Length
+	 */
+	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.memory24.minimum),
+	 AML_OFFSET(memory24.minimum),
+	 4}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_memory32
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_memory32[4] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY32,
+	 ACPI_RS_SIZE(struct acpi_resource_memory32),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory32)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY32,
+	 sizeof(struct aml_resource_memory32),
+	 0},
+
+	/* Read/Write bit */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory32.write_protect),
+	 AML_OFFSET(memory32.flags),
+	 0},
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * Minimum Base Address
+	 * Maximum Base Address
+	 * Address Base Alignment
+	 * Range Length
+	 */
+	{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.memory32.minimum),
+	 AML_OFFSET(memory32.minimum),
+	 4}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_fixed_memory32
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_fixed_memory32[4] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_MEMORY32,
+	 ACPI_RS_SIZE(struct acpi_resource_fixed_memory32),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_memory32)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_MEMORY32,
+	 sizeof(struct aml_resource_fixed_memory32),
+	 0},
+
+	/* Read/Write bit */
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.fixed_memory32.write_protect),
+	 AML_OFFSET(fixed_memory32.flags),
+	 0},
+	/*
+	 * These fields are contiguous in both the source and destination:
+	 * Base Address
+	 * Range Length
+	 */
+	{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.fixed_memory32.address),
+	 AML_OFFSET(fixed_memory32.address),
+	 2}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_get_vendor_small
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_get_vendor_small[3] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR,
+	 ACPI_RS_SIZE(struct acpi_resource_vendor),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_small)},
+
+	/* Length of the vendor data (byte count) */
+
+	{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
+	 0,
+	 sizeof(u8)}
+	,
+
+	/* Vendor data */
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
+	 sizeof(struct aml_resource_small_header),
+	 0}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_get_vendor_large
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_get_vendor_large[3] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR,
+	 ACPI_RS_SIZE(struct acpi_resource_vendor),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_large)},
+
+	/* Length of the vendor data (byte count) */
+
+	{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
+	 0,
+	 sizeof(u8)}
+	,
+
+	/* Vendor data */
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
+	 sizeof(struct aml_resource_large_header),
+	 0}
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_set_vendor
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_set_vendor[7] = {
+	/* Default is a small vendor descriptor */
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_SMALL,
+	 sizeof(struct aml_resource_small_header),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_set_vendor)},
+
+	/* Get the length and copy the data */
+
+	{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
+	 0,
+	 0},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
+	 sizeof(struct aml_resource_small_header),
+	 0},
+
+	/*
+	 * All done if the Vendor byte length is 7 or less, meaning that it will
+	 * fit within a small descriptor
+	 */
+	{ACPI_RSC_EXIT_LE, 0, 0, 7},
+
+	/* Must create a large vendor descriptor */
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_LARGE,
+	 sizeof(struct aml_resource_large_header),
+	 0},
+
+	{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
+	 0,
+	 0},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
+	 sizeof(struct aml_resource_large_header),
+	 0}
+};
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
new file mode 100644
index 0000000..5bc49a5
--- /dev/null
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -0,0 +1,561 @@
+/*******************************************************************************
+ *
+ * Module Name: rsmisc - Miscellaneous resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rsmisc")
+#define INIT_RESOURCE_TYPE(i)       i->resource_offset
+#define INIT_RESOURCE_LENGTH(i)     i->aml_offset
+#define INIT_TABLE_LENGTH(i)        i->value
+#define COMPARE_OPCODE(i)           i->resource_offset
+#define COMPARE_TARGET(i)           i->aml_offset
+#define COMPARE_VALUE(i)            i->value
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_convert_aml_to_resource
+ *
+ * PARAMETERS:  Resource            - Pointer to the resource descriptor
+ *              Aml                 - Where the AML descriptor is returned
+ *              Info                - Pointer to appropriate conversion table
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert an external AML resource descriptor to the corresponding
+ *              internal resource descriptor
+ *
+ ******************************************************************************/
+acpi_status
+acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
+				union aml_resource *aml,
+				struct acpi_rsconvert_info *info)
+{
+	acpi_rs_length aml_resource_length;
+	void *source;
+	void *destination;
+	char *target;
+	u8 count;
+	u8 flags_mode = FALSE;
+	u16 item_count = 0;
+	u16 temp16 = 0;
+
+	ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
+
+	if (((acpi_size) resource) & 0x3) {
+
+		/* Each internal resource struct is expected to be 32-bit aligned */
+
+		ACPI_WARNING((AE_INFO,
+			      "Misaligned resource pointer (get): %p Type %2.2X Len %X",
+			      resource, resource->type, resource->length));
+	}
+
+	/* Extract the resource Length field (does not include header length) */
+
+	aml_resource_length = acpi_ut_get_resource_length(aml);
+
+	/*
+	 * First table entry must be ACPI_RSC_INITxxx and must contain the
+	 * table length (# of table entries)
+	 */
+	count = INIT_TABLE_LENGTH(info);
+
+	while (count) {
+		/*
+		 * Source is the external AML byte stream buffer,
+		 * destination is the internal resource descriptor
+		 */
+		source = ACPI_ADD_PTR(void, aml, info->aml_offset);
+		destination =
+		    ACPI_ADD_PTR(void, resource, info->resource_offset);
+
+		switch (info->opcode) {
+		case ACPI_RSC_INITGET:
+			/*
+			 * Get the resource type and the initial (minimum) length
+			 */
+			ACPI_MEMSET(resource, 0, INIT_RESOURCE_LENGTH(info));
+			resource->type = INIT_RESOURCE_TYPE(info);
+			resource->length = INIT_RESOURCE_LENGTH(info);
+			break;
+
+		case ACPI_RSC_INITSET:
+			break;
+
+		case ACPI_RSC_FLAGINIT:
+
+			flags_mode = TRUE;
+			break;
+
+		case ACPI_RSC_1BITFLAG:
+			/*
+			 * Mask and shift the flag bit
+			 */
+			ACPI_SET8(destination) = (u8)
+			    ((ACPI_GET8(source) >> info->value) & 0x01);
+			break;
+
+		case ACPI_RSC_2BITFLAG:
+			/*
+			 * Mask and shift the flag bits
+			 */
+			ACPI_SET8(destination) = (u8)
+			    ((ACPI_GET8(source) >> info->value) & 0x03);
+			break;
+
+		case ACPI_RSC_COUNT:
+
+			item_count = ACPI_GET8(source);
+			ACPI_SET8(destination) = (u8) item_count;
+
+			resource->length = resource->length +
+			    (info->value * (item_count - 1));
+			break;
+
+		case ACPI_RSC_COUNT16:
+
+			item_count = aml_resource_length;
+			ACPI_SET16(destination) = item_count;
+
+			resource->length = resource->length +
+			    (info->value * (item_count - 1));
+			break;
+
+		case ACPI_RSC_LENGTH:
+
+			resource->length = resource->length + info->value;
+			break;
+
+		case ACPI_RSC_MOVE8:
+		case ACPI_RSC_MOVE16:
+		case ACPI_RSC_MOVE32:
+		case ACPI_RSC_MOVE64:
+			/*
+			 * Raw data move. Use the Info value field unless item_count has
+			 * been previously initialized via a COUNT opcode
+			 */
+			if (info->value) {
+				item_count = info->value;
+			}
+			acpi_rs_move_data(destination, source, item_count,
+					  info->opcode);
+			break;
+
+		case ACPI_RSC_SET8:
+
+			ACPI_MEMSET(destination, info->aml_offset, info->value);
+			break;
+
+		case ACPI_RSC_DATA8:
+
+			target = ACPI_ADD_PTR(char, resource, info->value);
+			ACPI_MEMCPY(destination, source, ACPI_GET16(target));
+			break;
+
+		case ACPI_RSC_ADDRESS:
+			/*
+			 * Common handler for address descriptor flags
+			 */
+			if (!acpi_rs_get_address_common(resource, aml)) {
+				return_ACPI_STATUS
+				    (AE_AML_INVALID_RESOURCE_TYPE);
+			}
+			break;
+
+		case ACPI_RSC_SOURCE:
+			/*
+			 * Optional resource_source (Index and String)
+			 */
+			resource->length +=
+			    acpi_rs_get_resource_source(aml_resource_length,
+							info->value,
+							destination, aml, NULL);
+			break;
+
+		case ACPI_RSC_SOURCEX:
+			/*
+			 * Optional resource_source (Index and String). This is the more
+			 * complicated case used by the Interrupt() macro
+			 */
+			target =
+			    ACPI_ADD_PTR(char, resource,
+					 info->aml_offset + (item_count * 4));
+
+			resource->length +=
+			    acpi_rs_get_resource_source(aml_resource_length,
+							(acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target);
+			break;
+
+		case ACPI_RSC_BITMASK:
+			/*
+			 * 8-bit encoded bitmask (DMA macro)
+			 */
+			item_count =
+			    acpi_rs_decode_bitmask(ACPI_GET8(source),
+						   destination);
+			if (item_count) {
+				resource->length += (item_count - 1);
+			}
+
+			target = ACPI_ADD_PTR(char, resource, info->value);
+			ACPI_SET8(target) = (u8) item_count;
+			break;
+
+		case ACPI_RSC_BITMASK16:
+			/*
+			 * 16-bit encoded bitmask (IRQ macro)
+			 */
+			ACPI_MOVE_16_TO_16(&temp16, source);
+
+			item_count =
+			    acpi_rs_decode_bitmask(temp16, destination);
+			if (item_count) {
+				resource->length += (item_count - 1);
+			}
+
+			target = ACPI_ADD_PTR(char, resource, info->value);
+			ACPI_SET8(target) = (u8) item_count;
+			break;
+
+		case ACPI_RSC_EXIT_NE:
+			/*
+			 * Control - Exit conversion if not equal
+			 */
+			switch (info->resource_offset) {
+			case ACPI_RSC_COMPARE_AML_LENGTH:
+				if (aml_resource_length != info->value) {
+					goto exit;
+				}
+				break;
+
+			case ACPI_RSC_COMPARE_VALUE:
+				if (ACPI_GET8(source) != info->value) {
+					goto exit;
+				}
+				break;
+
+			default:
+
+				ACPI_ERROR((AE_INFO,
+					    "Invalid conversion sub-opcode"));
+				return_ACPI_STATUS(AE_BAD_PARAMETER);
+			}
+			break;
+
+		default:
+
+			ACPI_ERROR((AE_INFO, "Invalid conversion opcode"));
+			return_ACPI_STATUS(AE_BAD_PARAMETER);
+		}
+
+		count--;
+		info++;
+	}
+
+      exit:
+	if (!flags_mode) {
+
+		/* Round the resource struct length up to the next boundary (32 or 64) */
+
+		resource->length =
+		    (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(resource->length);
+	}
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_convert_resource_to_aml
+ *
+ * PARAMETERS:  Resource            - Pointer to the resource descriptor
+ *              Aml                 - Where the AML descriptor is returned
+ *              Info                - Pointer to appropriate conversion table
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert an internal resource descriptor to the corresponding
+ *              external AML resource descriptor.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
+				union aml_resource *aml,
+				struct acpi_rsconvert_info *info)
+{
+	void *source = NULL;
+	void *destination;
+	acpi_rsdesc_size aml_length = 0;
+	u8 count;
+	u16 temp16 = 0;
+	u16 item_count = 0;
+
+	ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
+
+	/*
+	 * First table entry must be ACPI_RSC_INITxxx and must contain the
+	 * table length (# of table entries)
+	 */
+	count = INIT_TABLE_LENGTH(info);
+
+	while (count) {
+		/*
+		 * Source is the internal resource descriptor,
+		 * destination is the external AML byte stream buffer
+		 */
+		source = ACPI_ADD_PTR(void, resource, info->resource_offset);
+		destination = ACPI_ADD_PTR(void, aml, info->aml_offset);
+
+		switch (info->opcode) {
+		case ACPI_RSC_INITSET:
+
+			ACPI_MEMSET(aml, 0, INIT_RESOURCE_LENGTH(info));
+			aml_length = INIT_RESOURCE_LENGTH(info);
+			acpi_rs_set_resource_header(INIT_RESOURCE_TYPE(info),
+						    aml_length, aml);
+			break;
+
+		case ACPI_RSC_INITGET:
+			break;
+
+		case ACPI_RSC_FLAGINIT:
+			/*
+			 * Clear the flag byte
+			 */
+			ACPI_SET8(destination) = 0;
+			break;
+
+		case ACPI_RSC_1BITFLAG:
+			/*
+			 * Mask and shift the flag bit
+			 */
+			ACPI_SET8(destination) |= (u8)
+			    ((ACPI_GET8(source) & 0x01) << info->value);
+			break;
+
+		case ACPI_RSC_2BITFLAG:
+			/*
+			 * Mask and shift the flag bits
+			 */
+			ACPI_SET8(destination) |= (u8)
+			    ((ACPI_GET8(source) & 0x03) << info->value);
+			break;
+
+		case ACPI_RSC_COUNT:
+
+			item_count = ACPI_GET8(source);
+			ACPI_SET8(destination) = (u8) item_count;
+
+			aml_length =
+			    (u16) (aml_length +
+				   (info->value * (item_count - 1)));
+			break;
+
+		case ACPI_RSC_COUNT16:
+
+			item_count = ACPI_GET16(source);
+			aml_length = (u16) (aml_length + item_count);
+			acpi_rs_set_resource_length(aml_length, aml);
+			break;
+
+		case ACPI_RSC_LENGTH:
+
+			acpi_rs_set_resource_length(info->value, aml);
+			break;
+
+		case ACPI_RSC_MOVE8:
+		case ACPI_RSC_MOVE16:
+		case ACPI_RSC_MOVE32:
+		case ACPI_RSC_MOVE64:
+
+			if (info->value) {
+				item_count = info->value;
+			}
+			acpi_rs_move_data(destination, source, item_count,
+					  info->opcode);
+			break;
+
+		case ACPI_RSC_ADDRESS:
+
+			/* Set the Resource Type, General Flags, and Type-Specific Flags */
+
+			acpi_rs_set_address_common(aml, resource);
+			break;
+
+		case ACPI_RSC_SOURCEX:
+			/*
+			 * Optional resource_source (Index and String)
+			 */
+			aml_length =
+			    acpi_rs_set_resource_source(aml, (acpi_rs_length)
+							aml_length, source);
+			acpi_rs_set_resource_length(aml_length, aml);
+			break;
+
+		case ACPI_RSC_SOURCE:
+			/*
+			 * Optional resource_source (Index and String). This is the more
+			 * complicated case used by the Interrupt() macro
+			 */
+			aml_length =
+			    acpi_rs_set_resource_source(aml, info->value,
+							source);
+			acpi_rs_set_resource_length(aml_length, aml);
+			break;
+
+		case ACPI_RSC_BITMASK:
+			/*
+			 * 8-bit encoded bitmask (DMA macro)
+			 */
+			ACPI_SET8(destination) = (u8)
+			    acpi_rs_encode_bitmask(source,
+						   *ACPI_ADD_PTR(u8, resource,
+								 info->value));
+			break;
+
+		case ACPI_RSC_BITMASK16:
+			/*
+			 * 16-bit encoded bitmask (IRQ macro)
+			 */
+			temp16 = acpi_rs_encode_bitmask(source,
+							*ACPI_ADD_PTR(u8,
+								      resource,
+								      info->
+								      value));
+			ACPI_MOVE_16_TO_16(destination, &temp16);
+			break;
+
+		case ACPI_RSC_EXIT_LE:
+			/*
+			 * Control - Exit conversion if less than or equal
+			 */
+			if (item_count <= info->value) {
+				goto exit;
+			}
+			break;
+
+		case ACPI_RSC_EXIT_NE:
+			/*
+			 * Control - Exit conversion if not equal
+			 */
+			switch (COMPARE_OPCODE(info)) {
+			case ACPI_RSC_COMPARE_VALUE:
+
+				if (*ACPI_ADD_PTR(u8, resource,
+						  COMPARE_TARGET(info)) !=
+				    COMPARE_VALUE(info)) {
+					goto exit;
+				}
+				break;
+
+			default:
+
+				ACPI_ERROR((AE_INFO,
+					    "Invalid conversion sub-opcode"));
+				return_ACPI_STATUS(AE_BAD_PARAMETER);
+			}
+			break;
+
+		case ACPI_RSC_EXIT_EQ:
+			/*
+			 * Control - Exit conversion if equal
+			 */
+			if (*ACPI_ADD_PTR(u8, resource,
+					  COMPARE_TARGET(info)) ==
+			    COMPARE_VALUE(info)) {
+				goto exit;
+			}
+			break;
+
+		default:
+
+			ACPI_ERROR((AE_INFO, "Invalid conversion opcode"));
+			return_ACPI_STATUS(AE_BAD_PARAMETER);
+		}
+
+		count--;
+		info++;
+	}
+
+      exit:
+	return_ACPI_STATUS(AE_OK);
+}
+
+#if 0
+/* Previous resource validations */
+
+if (aml->ext_address64.revision_iD != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) {
+	return_ACPI_STATUS(AE_SUPPORT);
+}
+
+if (resource->data.start_dpf.performance_robustness >= 3) {
+	return_ACPI_STATUS(AE_AML_BAD_RESOURCE_VALUE);
+}
+
+if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
+	/*
+	 * Only [active_high, edge_sensitive] or [active_low, level_sensitive]
+	 * polarity/trigger interrupts are allowed (ACPI spec, section
+	 * "IRQ Format"), so 0x00 and 0x09 are illegal.
+	 */
+	ACPI_ERROR((AE_INFO,
+		    "Invalid interrupt polarity/trigger in resource list, %X",
+		    aml->irq.flags));
+	return_ACPI_STATUS(AE_BAD_DATA);
+}
+
+resource->data.extended_irq.interrupt_count = temp8;
+if (temp8 < 1) {
+
+	/* Must have at least one IRQ */
+
+	return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
+}
+
+if (resource->data.dma.transfer == 0x03) {
+	ACPI_ERROR((AE_INFO, "Invalid DMA.Transfer preference (3)"));
+	return_ACPI_STATUS(AE_BAD_DATA);
+}
+#endif
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
new file mode 100644
index 0000000..bc03d59
--- /dev/null
+++ b/drivers/acpi/acpica/rsutils.c
@@ -0,0 +1,727 @@
+/*******************************************************************************
+ *
+ * Module Name: rsutils - Utilities for the resource manager
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rsutils")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_decode_bitmask
+ *
+ * PARAMETERS:  Mask            - Bitmask to decode
+ *              List            - Where the converted list is returned
+ *
+ * RETURN:      Count of bits set (length of list)
+ *
+ * DESCRIPTION: Convert a bit mask into a list of values
+ *
+ ******************************************************************************/
+u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
+{
+	u8 i;
+	u8 bit_count;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Decode the mask bits */
+
+	for (i = 0, bit_count = 0; mask; i++) {
+		if (mask & 0x0001) {
+			list[bit_count] = i;
+			bit_count++;
+		}
+
+		mask >>= 1;
+	}
+
+	return (bit_count);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_encode_bitmask
+ *
+ * PARAMETERS:  List            - List of values to encode
+ *              Count           - Length of list
+ *
+ * RETURN:      Encoded bitmask
+ *
+ * DESCRIPTION: Convert a list of values to an encoded bitmask
+ *
+ ******************************************************************************/
+
+u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
+{
+	u32 i;
+	u16 mask;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Encode the list into a single bitmask */
+
+	for (i = 0, mask = 0; i < count; i++) {
+		mask |= (0x1 << list[i]);
+	}
+
+	return mask;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_move_data
+ *
+ * PARAMETERS:  Destination         - Pointer to the destination descriptor
+ *              Source              - Pointer to the source descriptor
+ *              item_count          - How many items to move
+ *              move_type           - Byte width
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Move multiple data items from one descriptor to another. Handles
+ *              alignment issues and endian issues if necessary, as configured
+ *              via the ACPI_MOVE_* macros. (This is why a memcpy is not used)
+ *
+ ******************************************************************************/
+
+void
+acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
+{
+	u32 i;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* One move per item */
+
+	for (i = 0; i < item_count; i++) {
+		switch (move_type) {
+			/*
+			 * For the 8-bit case, we can perform the move all at once
+			 * since there are no alignment or endian issues
+			 */
+		case ACPI_RSC_MOVE8:
+			ACPI_MEMCPY(destination, source, item_count);
+			return;
+
+			/*
+			 * 16-, 32-, and 64-bit cases must use the move macros that perform
+			 * endian conversion and/or accomodate hardware that cannot perform
+			 * misaligned memory transfers
+			 */
+		case ACPI_RSC_MOVE16:
+			ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
+					   &ACPI_CAST_PTR(u16, source)[i]);
+			break;
+
+		case ACPI_RSC_MOVE32:
+			ACPI_MOVE_32_TO_32(&ACPI_CAST_PTR(u32, destination)[i],
+					   &ACPI_CAST_PTR(u32, source)[i]);
+			break;
+
+		case ACPI_RSC_MOVE64:
+			ACPI_MOVE_64_TO_64(&ACPI_CAST_PTR(u64, destination)[i],
+					   &ACPI_CAST_PTR(u64, source)[i]);
+			break;
+
+		default:
+			return;
+		}
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_set_resource_length
+ *
+ * PARAMETERS:  total_length        - Length of the AML descriptor, including
+ *                                    the header and length fields.
+ *              Aml                 - Pointer to the raw AML descriptor
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Set the resource_length field of an AML
+ *              resource descriptor, both Large and Small descriptors are
+ *              supported automatically. Note: Descriptor Type field must
+ *              be valid.
+ *
+ ******************************************************************************/
+
+void
+acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
+			    union aml_resource *aml)
+{
+	acpi_rs_length resource_length;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Length is the total descriptor length minus the header length */
+
+	resource_length = (acpi_rs_length)
+	    (total_length - acpi_ut_get_resource_header_length(aml));
+
+	/* Length is stored differently for large and small descriptors */
+
+	if (aml->small_header.descriptor_type & ACPI_RESOURCE_NAME_LARGE) {
+
+		/* Large descriptor -- bytes 1-2 contain the 16-bit length */
+
+		ACPI_MOVE_16_TO_16(&aml->large_header.resource_length,
+				   &resource_length);
+	} else {
+		/* Small descriptor -- bits 2:0 of byte 0 contain the length */
+
+		aml->small_header.descriptor_type = (u8)
+
+		    /* Clear any existing length, preserving descriptor type bits */
+		    ((aml->small_header.
+		      descriptor_type & ~ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK)
+
+		     | resource_length);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_set_resource_header
+ *
+ * PARAMETERS:  descriptor_type     - Byte to be inserted as the type
+ *              total_length        - Length of the AML descriptor, including
+ *                                    the header and length fields.
+ *              Aml                 - Pointer to the raw AML descriptor
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Set the descriptor_type and resource_length fields of an AML
+ *              resource descriptor, both Large and Small descriptors are
+ *              supported automatically
+ *
+ ******************************************************************************/
+
+void
+acpi_rs_set_resource_header(u8 descriptor_type,
+			    acpi_rsdesc_size total_length,
+			    union aml_resource *aml)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	/* Set the Resource Type */
+
+	aml->small_header.descriptor_type = descriptor_type;
+
+	/* Set the Resource Length */
+
+	acpi_rs_set_resource_length(total_length, aml);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_strcpy
+ *
+ * PARAMETERS:  Destination         - Pointer to the destination string
+ *              Source              - Pointer to the source string
+ *
+ * RETURN:      String length, including NULL terminator
+ *
+ * DESCRIPTION: Local string copy that returns the string length, saving a
+ *              strcpy followed by a strlen.
+ *
+ ******************************************************************************/
+
+static u16 acpi_rs_strcpy(char *destination, char *source)
+{
+	u16 i;
+
+	ACPI_FUNCTION_ENTRY();
+
+	for (i = 0; source[i]; i++) {
+		destination[i] = source[i];
+	}
+
+	destination[i] = 0;
+
+	/* Return string length including the NULL terminator */
+
+	return ((u16) (i + 1));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_get_resource_source
+ *
+ * PARAMETERS:  resource_length     - Length field of the descriptor
+ *              minimum_length      - Minimum length of the descriptor (minus
+ *                                    any optional fields)
+ *              resource_source     - Where the resource_source is returned
+ *              Aml                 - Pointer to the raw AML descriptor
+ *              string_ptr          - (optional) where to store the actual
+ *                                    resource_source string
+ *
+ * RETURN:      Length of the string plus NULL terminator, rounded up to native
+ *              word boundary
+ *
+ * DESCRIPTION: Copy the optional resource_source data from a raw AML descriptor
+ *              to an internal resource descriptor
+ *
+ ******************************************************************************/
+
+acpi_rs_length
+acpi_rs_get_resource_source(acpi_rs_length resource_length,
+			    acpi_rs_length minimum_length,
+			    struct acpi_resource_source * resource_source,
+			    union aml_resource * aml, char *string_ptr)
+{
+	acpi_rsdesc_size total_length;
+	u8 *aml_resource_source;
+
+	ACPI_FUNCTION_ENTRY();
+
+	total_length =
+	    resource_length + sizeof(struct aml_resource_large_header);
+	aml_resource_source = ACPI_ADD_PTR(u8, aml, minimum_length);
+
+	/*
+	 * resource_source is present if the length of the descriptor is longer than
+	 * the minimum length.
+	 *
+	 * Note: Some resource descriptors will have an additional null, so
+	 * we add 1 to the minimum length.
+	 */
+	if (total_length > (acpi_rsdesc_size) (minimum_length + 1)) {
+
+		/* Get the resource_source_index */
+
+		resource_source->index = aml_resource_source[0];
+
+		resource_source->string_ptr = string_ptr;
+		if (!string_ptr) {
+			/*
+			 * String destination pointer is not specified; Set the String
+			 * pointer to the end of the current resource_source structure.
+			 */
+			resource_source->string_ptr =
+			    ACPI_ADD_PTR(char, resource_source,
+					 sizeof(struct acpi_resource_source));
+		}
+
+		/*
+		 * In order for the Resource length to be a multiple of the native
+		 * word, calculate the length of the string (+1 for NULL terminator)
+		 * and expand to the next word multiple.
+		 *
+		 * Zero the entire area of the buffer.
+		 */
+		total_length = (u32)
+		ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1;
+		total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
+
+		ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
+
+		/* Copy the resource_source string to the destination */
+
+		resource_source->string_length =
+		    acpi_rs_strcpy(resource_source->string_ptr,
+				   ACPI_CAST_PTR(char,
+						 &aml_resource_source[1]));
+
+		return ((acpi_rs_length) total_length);
+	}
+
+	/* resource_source is not present */
+
+	resource_source->index = 0;
+	resource_source->string_length = 0;
+	resource_source->string_ptr = NULL;
+	return (0);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_set_resource_source
+ *
+ * PARAMETERS:  Aml                 - Pointer to the raw AML descriptor
+ *              minimum_length      - Minimum length of the descriptor (minus
+ *                                    any optional fields)
+ *              resource_source     - Internal resource_source
+
+ *
+ * RETURN:      Total length of the AML descriptor
+ *
+ * DESCRIPTION: Convert an optional resource_source from internal format to a
+ *              raw AML resource descriptor
+ *
+ ******************************************************************************/
+
+acpi_rsdesc_size
+acpi_rs_set_resource_source(union aml_resource * aml,
+			    acpi_rs_length minimum_length,
+			    struct acpi_resource_source * resource_source)
+{
+	u8 *aml_resource_source;
+	acpi_rsdesc_size descriptor_length;
+
+	ACPI_FUNCTION_ENTRY();
+
+	descriptor_length = minimum_length;
+
+	/* Non-zero string length indicates presence of a resource_source */
+
+	if (resource_source->string_length) {
+
+		/* Point to the end of the AML descriptor */
+
+		aml_resource_source = ACPI_ADD_PTR(u8, aml, minimum_length);
+
+		/* Copy the resource_source_index */
+
+		aml_resource_source[0] = (u8) resource_source->index;
+
+		/* Copy the resource_source string */
+
+		ACPI_STRCPY(ACPI_CAST_PTR(char, &aml_resource_source[1]),
+			    resource_source->string_ptr);
+
+		/*
+		 * Add the length of the string (+ 1 for null terminator) to the
+		 * final descriptor length
+		 */
+		descriptor_length +=
+		    ((acpi_rsdesc_size) resource_source->string_length + 1);
+	}
+
+	/* Return the new total length of the AML descriptor */
+
+	return (descriptor_length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_get_prt_method_data
+ *
+ * PARAMETERS:  Node            - Device node
+ *              ret_buffer      - Pointer to a buffer structure for the
+ *                                results
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to get the _PRT value of an object
+ *              contained in an object specified by the handle passed in
+ *
+ *              If the function fails an appropriate status will be returned
+ *              and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
+			    struct acpi_buffer * ret_buffer)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(rs_get_prt_method_data);
+
+	/* Parameters guaranteed valid by caller */
+
+	/* Execute the method, no parameters */
+
+	status = acpi_ut_evaluate_object(node, METHOD_NAME__PRT,
+					 ACPI_BTYPE_PACKAGE, &obj_desc);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Create a resource linked list from the byte stream buffer that comes
+	 * back from the _CRS method execution.
+	 */
+	status = acpi_rs_create_pci_routing_table(obj_desc, ret_buffer);
+
+	/* On exit, we must delete the object returned by evaluate_object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_get_crs_method_data
+ *
+ * PARAMETERS:  Node            - Device node
+ *              ret_buffer      - Pointer to a buffer structure for the
+ *                                results
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to get the _CRS value of an object
+ *              contained in an object specified by the handle passed in
+ *
+ *              If the function fails an appropriate status will be returned
+ *              and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
+			    struct acpi_buffer *ret_buffer)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(rs_get_crs_method_data);
+
+	/* Parameters guaranteed valid by caller */
+
+	/* Execute the method, no parameters */
+
+	status = acpi_ut_evaluate_object(node, METHOD_NAME__CRS,
+					 ACPI_BTYPE_BUFFER, &obj_desc);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Make the call to create a resource linked list from the
+	 * byte stream buffer that comes back from the _CRS method
+	 * execution.
+	 */
+	status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
+
+	/* On exit, we must delete the object returned by evaluate_object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_get_prs_method_data
+ *
+ * PARAMETERS:  Node            - Device node
+ *              ret_buffer      - Pointer to a buffer structure for the
+ *                                results
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to get the _PRS value of an object
+ *              contained in an object specified by the handle passed in
+ *
+ *              If the function fails an appropriate status will be returned
+ *              and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+#ifdef ACPI_FUTURE_USAGE
+acpi_status
+acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
+			    struct acpi_buffer *ret_buffer)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(rs_get_prs_method_data);
+
+	/* Parameters guaranteed valid by caller */
+
+	/* Execute the method, no parameters */
+
+	status = acpi_ut_evaluate_object(node, METHOD_NAME__PRS,
+					 ACPI_BTYPE_BUFFER, &obj_desc);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Make the call to create a resource linked list from the
+	 * byte stream buffer that comes back from the _CRS method
+	 * execution.
+	 */
+	status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
+
+	/* On exit, we must delete the object returned by evaluate_object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+#endif				/*  ACPI_FUTURE_USAGE  */
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_get_method_data
+ *
+ * PARAMETERS:  Handle          - Handle to the containing object
+ *              Path            - Path to method, relative to Handle
+ *              ret_buffer      - Pointer to a buffer structure for the
+ *                                results
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to get the _CRS or _PRS value of an
+ *              object contained in an object specified by the handle passed in
+ *
+ *              If the function fails an appropriate status will be returned
+ *              and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_method_data(acpi_handle handle,
+			char *path, struct acpi_buffer *ret_buffer)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(rs_get_method_data);
+
+	/* Parameters guaranteed valid by caller */
+
+	/* Execute the method, no parameters */
+
+	status =
+	    acpi_ut_evaluate_object(handle, path, ACPI_BTYPE_BUFFER, &obj_desc);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Make the call to create a resource linked list from the
+	 * byte stream buffer that comes back from the method
+	 * execution.
+	 */
+	status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
+
+	/* On exit, we must delete the object returned by evaluate_object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_set_srs_method_data
+ *
+ * PARAMETERS:  Node            - Device node
+ *              in_buffer       - Pointer to a buffer structure of the
+ *                                parameter
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to set the _SRS of an object contained
+ *              in an object specified by the handle passed in
+ *
+ *              If the function fails an appropriate status will be returned
+ *              and the contents of the callers buffer is undefined.
+ *
+ * Note: Parameters guaranteed valid by caller
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
+			    struct acpi_buffer *in_buffer)
+{
+	struct acpi_evaluate_info *info;
+	union acpi_operand_object *args[2];
+	acpi_status status;
+	struct acpi_buffer buffer;
+
+	ACPI_FUNCTION_TRACE(rs_set_srs_method_data);
+
+	/* Allocate and initialize the evaluation information block */
+
+	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+	if (!info) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	info->prefix_node = node;
+	info->pathname = METHOD_NAME__SRS;
+	info->parameters = args;
+	info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+	/*
+	 * The in_buffer parameter will point to a linked list of
+	 * resource parameters. It needs to be formatted into a
+	 * byte stream to be sent in as an input parameter to _SRS
+	 *
+	 * Convert the linked list into a byte stream
+	 */
+	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+	status = acpi_rs_create_aml_resources(in_buffer->pointer, &buffer);
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/* Create and initialize the method parameter object */
+
+	args[0] = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
+	if (!args[0]) {
+		/*
+		 * Must free the buffer allocated above (otherwise it is freed
+		 * later)
+		 */
+		ACPI_FREE(buffer.pointer);
+		status = AE_NO_MEMORY;
+		goto cleanup;
+	}
+
+	args[0]->buffer.length = (u32) buffer.length;
+	args[0]->buffer.pointer = buffer.pointer;
+	args[0]->common.flags = AOPOBJ_DATA_VALID;
+	args[1] = NULL;
+
+	/* Execute the method, no return value is expected */
+
+	status = acpi_ns_evaluate(info);
+
+	/* Clean up and return the status from acpi_ns_evaluate */
+
+	acpi_ut_remove_reference(args[0]);
+
+      cleanup:
+	ACPI_FREE(info);
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
new file mode 100644
index 0000000..69a2aa5
--- /dev/null
+++ b/drivers/acpi/acpica/rsxface.c
@@ -0,0 +1,571 @@
+/*******************************************************************************
+ *
+ * Module Name: rsxface - Public interfaces to the resource manager
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rsxface")
+
+/* Local macros for 16,32-bit to 64-bit conversion */
+#define ACPI_COPY_FIELD(out, in, field)  ((out)->field = (in)->field)
+#define ACPI_COPY_ADDRESS(out, in)                      \
+	ACPI_COPY_FIELD(out, in, resource_type);             \
+	ACPI_COPY_FIELD(out, in, producer_consumer);         \
+	ACPI_COPY_FIELD(out, in, decode);                    \
+	ACPI_COPY_FIELD(out, in, min_address_fixed);         \
+	ACPI_COPY_FIELD(out, in, max_address_fixed);         \
+	ACPI_COPY_FIELD(out, in, info);                      \
+	ACPI_COPY_FIELD(out, in, granularity);               \
+	ACPI_COPY_FIELD(out, in, minimum);                   \
+	ACPI_COPY_FIELD(out, in, maximum);                   \
+	ACPI_COPY_FIELD(out, in, translation_offset);        \
+	ACPI_COPY_FIELD(out, in, address_length);            \
+	ACPI_COPY_FIELD(out, in, resource_source);
+/* Local prototypes */
+static acpi_status
+acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context);
+
+static acpi_status
+acpi_rs_validate_parameters(acpi_handle device_handle,
+			    struct acpi_buffer *buffer,
+			    struct acpi_namespace_node **return_node);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_validate_parameters
+ *
+ * PARAMETERS:  device_handle   - Handle to a device
+ *              Buffer          - Pointer to a data buffer
+ *              return_node     - Pointer to where the device node is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Common parameter validation for resource interfaces
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_rs_validate_parameters(acpi_handle device_handle,
+			    struct acpi_buffer *buffer,
+			    struct acpi_namespace_node **return_node)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(rs_validate_parameters);
+
+	/*
+	 * Must have a valid handle to an ACPI device
+	 */
+	if (!device_handle) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	node = acpi_ns_map_handle_to_node(device_handle);
+	if (!node) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (node->type != ACPI_TYPE_DEVICE) {
+		return_ACPI_STATUS(AE_TYPE);
+	}
+
+	/*
+	 * Validate the user buffer object
+	 *
+	 * if there is a non-zero buffer length we also need a valid pointer in
+	 * the buffer. If it's a zero buffer length, we'll be returning the
+	 * needed buffer size (later), so keep going.
+	 */
+	status = acpi_ut_validate_buffer(buffer);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	*return_node = node;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_irq_routing_table
+ *
+ * PARAMETERS:  device_handle   - Handle to the Bus device we are querying
+ *              ret_buffer      - Pointer to a buffer to receive the
+ *                                current resources for the device
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to get the IRQ routing table for a
+ *              specific bus. The caller must first acquire a handle for the
+ *              desired bus. The routine table is placed in the buffer pointed
+ *              to by the ret_buffer variable parameter.
+ *
+ *              If the function fails an appropriate status will be returned
+ *              and the value of ret_buffer is undefined.
+ *
+ *              This function attempts to execute the _PRT method contained in
+ *              the object indicated by the passed device_handle.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_get_irq_routing_table(acpi_handle device_handle,
+			   struct acpi_buffer *ret_buffer)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(acpi_get_irq_routing_table);
+
+	/* Validate parameters then dispatch to internal routine */
+
+	status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_rs_get_prt_method_data(node, ret_buffer);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_irq_routing_table)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_current_resources
+ *
+ * PARAMETERS:  device_handle   - Handle to the device object for the
+ *                                device we are querying
+ *              ret_buffer      - Pointer to a buffer to receive the
+ *                                current resources for the device
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to get the current resources for a
+ *              specific device. The caller must first acquire a handle for
+ *              the desired device. The resource data is placed in the buffer
+ *              pointed to by the ret_buffer variable parameter.
+ *
+ *              If the function fails an appropriate status will be returned
+ *              and the value of ret_buffer is undefined.
+ *
+ *              This function attempts to execute the _CRS method contained in
+ *              the object indicated by the passed device_handle.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_current_resources(acpi_handle device_handle,
+			   struct acpi_buffer *ret_buffer)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(acpi_get_current_resources);
+
+	/* Validate parameters then dispatch to internal routine */
+
+	status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_rs_get_crs_method_data(node, ret_buffer);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_current_resources)
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_possible_resources
+ *
+ * PARAMETERS:  device_handle   - Handle to the device object for the
+ *                                device we are querying
+ *              ret_buffer      - Pointer to a buffer to receive the
+ *                                resources for the device
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to get a list of the possible resources
+ *              for a specific device. The caller must first acquire a handle
+ *              for the desired device. The resource data is placed in the
+ *              buffer pointed to by the ret_buffer variable.
+ *
+ *              If the function fails an appropriate status will be returned
+ *              and the value of ret_buffer is undefined.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_possible_resources(acpi_handle device_handle,
+			    struct acpi_buffer *ret_buffer)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(acpi_get_possible_resources);
+
+	/* Validate parameters then dispatch to internal routine */
+
+	status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_rs_get_prs_method_data(node, ret_buffer);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_possible_resources)
+#endif				/*  ACPI_FUTURE_USAGE  */
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_current_resources
+ *
+ * PARAMETERS:  device_handle   - Handle to the device object for the
+ *                                device we are setting resources
+ *              in_buffer       - Pointer to a buffer containing the
+ *                                resources to be set for the device
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to set the current resources for a
+ *              specific device. The caller must first acquire a handle for
+ *              the desired device. The resource data is passed to the routine
+ *              the buffer pointed to by the in_buffer variable.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_set_current_resources(acpi_handle device_handle,
+			   struct acpi_buffer *in_buffer)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(acpi_set_current_resources);
+
+	/* Validate the buffer, don't allow zero length */
+
+	if ((!in_buffer) || (!in_buffer->pointer) || (!in_buffer->length)) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Validate parameters then dispatch to internal routine */
+
+	status = acpi_rs_validate_parameters(device_handle, in_buffer, &node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_rs_set_srs_method_data(node, in_buffer);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_resource_to_address64
+ *
+ * PARAMETERS:  Resource        - Pointer to a resource
+ *              Out             - Pointer to the users's return buffer
+ *                                (a struct acpi_resource_address64)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: If the resource is an address16, address32, or address64,
+ *              copy it to the address64 return buffer. This saves the
+ *              caller from having to duplicate code for different-sized
+ *              addresses.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_resource_to_address64(struct acpi_resource *resource,
+			   struct acpi_resource_address64 *out)
+{
+	struct acpi_resource_address16 *address16;
+	struct acpi_resource_address32 *address32;
+
+	if (!resource || !out) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/* Convert 16 or 32 address descriptor to 64 */
+
+	switch (resource->type) {
+	case ACPI_RESOURCE_TYPE_ADDRESS16:
+
+		address16 = (struct acpi_resource_address16 *)&resource->data;
+		ACPI_COPY_ADDRESS(out, address16);
+		break;
+
+	case ACPI_RESOURCE_TYPE_ADDRESS32:
+
+		address32 = (struct acpi_resource_address32 *)&resource->data;
+		ACPI_COPY_ADDRESS(out, address32);
+		break;
+
+	case ACPI_RESOURCE_TYPE_ADDRESS64:
+
+		/* Simple copy for 64 bit source */
+
+		ACPI_MEMCPY(out, &resource->data,
+			    sizeof(struct acpi_resource_address64));
+		break;
+
+	default:
+		return (AE_BAD_PARAMETER);
+	}
+
+	return (AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_vendor_resource
+ *
+ * PARAMETERS:  device_handle   - Handle for the parent device object
+ *              Name            - Method name for the parent resource
+ *                                (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ *              Uuid            - Pointer to the UUID to be matched.
+ *                                includes both subtype and 16-byte UUID
+ *              ret_buffer      - Where the vendor resource is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Walk a resource template for the specified evice to find a
+ *              vendor-defined resource that matches the supplied UUID and
+ *              UUID subtype. Returns a struct acpi_resource of type Vendor.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_vendor_resource(acpi_handle device_handle,
+			 char *name,
+			 struct acpi_vendor_uuid * uuid,
+			 struct acpi_buffer * ret_buffer)
+{
+	struct acpi_vendor_walk_info info;
+	acpi_status status;
+
+	/* Other parameters are validated by acpi_walk_resources */
+
+	if (!uuid || !ret_buffer) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	info.uuid = uuid;
+	info.buffer = ret_buffer;
+	info.status = AE_NOT_EXIST;
+
+	/* Walk the _CRS or _PRS resource list for this device */
+
+	status =
+	    acpi_walk_resources(device_handle, name,
+				acpi_rs_match_vendor_resource, &info);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	return (info.status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_vendor_resource)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_match_vendor_resource
+ *
+ * PARAMETERS:  acpi_walk_resource_callback
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Match a vendor resource via the ACPI 3.0 UUID
+ *
+ ******************************************************************************/
+static acpi_status
+acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
+{
+	struct acpi_vendor_walk_info *info = context;
+	struct acpi_resource_vendor_typed *vendor;
+	struct acpi_buffer *buffer;
+	acpi_status status;
+
+	/* Ignore all descriptors except Vendor */
+
+	if (resource->type != ACPI_RESOURCE_TYPE_VENDOR) {
+		return (AE_OK);
+	}
+
+	vendor = &resource->data.vendor_typed;
+
+	/*
+	 * For a valid match, these conditions must hold:
+	 *
+	 * 1) Length of descriptor data must be at least as long as a UUID struct
+	 * 2) The UUID subtypes must match
+	 * 3) The UUID data must match
+	 */
+	if ((vendor->byte_length < (ACPI_UUID_LENGTH + 1)) ||
+	    (vendor->uuid_subtype != info->uuid->subtype) ||
+	    (ACPI_MEMCMP(vendor->uuid, info->uuid->data, ACPI_UUID_LENGTH))) {
+		return (AE_OK);
+	}
+
+	/* Validate/Allocate/Clear caller buffer */
+
+	buffer = info->buffer;
+	status = acpi_ut_initialize_buffer(buffer, resource->length);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	/* Found the correct resource, copy and return it */
+
+	ACPI_MEMCPY(buffer->pointer, resource, resource->length);
+	buffer->length = resource->length;
+
+	/* Found the desired descriptor, terminate resource walk */
+
+	info->status = AE_OK;
+	return (AE_CTRL_TERMINATE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_walk_resources
+ *
+ * PARAMETERS:  device_handle   - Handle to the device object for the
+ *                                device we are querying
+ *              Name            - Method name of the resources we want
+ *                                (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ *              user_function   - Called for each resource
+ *              Context         - Passed to user_function
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Retrieves the current or possible resource list for the
+ *              specified device. The user_function is called once for
+ *              each resource in the list.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_walk_resources(acpi_handle device_handle,
+		    char *name,
+		    acpi_walk_resource_callback user_function, void *context)
+{
+	acpi_status status;
+	struct acpi_buffer buffer;
+	struct acpi_resource *resource;
+	struct acpi_resource *resource_end;
+
+	ACPI_FUNCTION_TRACE(acpi_walk_resources);
+
+	/* Parameter validation */
+
+	if (!device_handle || !user_function || !name ||
+	    (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
+	     !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Get the _CRS or _PRS resource list */
+
+	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+	status = acpi_rs_get_method_data(device_handle, name, &buffer);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Buffer now contains the resource list */
+
+	resource = ACPI_CAST_PTR(struct acpi_resource, buffer.pointer);
+	resource_end =
+	    ACPI_ADD_PTR(struct acpi_resource, buffer.pointer, buffer.length);
+
+	/* Walk the resource list until the end_tag is found (or buffer end) */
+
+	while (resource < resource_end) {
+
+		/* Sanity check the resource */
+
+		if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
+			status = AE_AML_INVALID_RESOURCE_TYPE;
+			break;
+		}
+
+		/* Invoke the user function, abort on any error returned */
+
+		status = user_function(resource, context);
+		if (ACPI_FAILURE(status)) {
+			if (status == AE_CTRL_TERMINATE) {
+
+				/* This is an OK termination by the user function */
+
+				status = AE_OK;
+			}
+			break;
+		}
+
+		/* end_tag indicates end-of-list */
+
+		if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
+			break;
+		}
+
+		/* Get the next resource descriptor */
+
+		resource =
+		    ACPI_ADD_PTR(struct acpi_resource, resource,
+				 resource->length);
+	}
+
+	ACPI_FREE(buffer.pointer);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_walk_resources)
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
new file mode 100644
index 0000000..3636e4f
--- /dev/null
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -0,0 +1,610 @@
+/******************************************************************************
+ *
+ * Module Name: tbfadt   - FADT table utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_TABLES
+ACPI_MODULE_NAME("tbfadt")
+
+/* Local prototypes */
+static inline void
+acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
+			     u8 space_id, u8 byte_width, u64 address);
+
+static void acpi_tb_convert_fadt(void);
+
+static void acpi_tb_validate_fadt(void);
+
+/* Table for conversion of FADT to common internal format and FADT validation */
+
+typedef struct acpi_fadt_info {
+	char *name;
+	u8 address64;
+	u8 address32;
+	u8 length;
+	u8 default_length;
+	u8 type;
+
+} acpi_fadt_info;
+
+#define ACPI_FADT_REQUIRED          1
+#define ACPI_FADT_SEPARATE_LENGTH   2
+
+static struct acpi_fadt_info fadt_info_table[] = {
+	{"Pm1aEventBlock",
+	 ACPI_FADT_OFFSET(xpm1a_event_block),
+	 ACPI_FADT_OFFSET(pm1a_event_block),
+	 ACPI_FADT_OFFSET(pm1_event_length),
+	 ACPI_PM1_REGISTER_WIDTH * 2,	/* Enable + Status register */
+	 ACPI_FADT_REQUIRED},
+
+	{"Pm1bEventBlock",
+	 ACPI_FADT_OFFSET(xpm1b_event_block),
+	 ACPI_FADT_OFFSET(pm1b_event_block),
+	 ACPI_FADT_OFFSET(pm1_event_length),
+	 ACPI_PM1_REGISTER_WIDTH * 2,	/* Enable + Status register */
+	 0},
+
+	{"Pm1aControlBlock",
+	 ACPI_FADT_OFFSET(xpm1a_control_block),
+	 ACPI_FADT_OFFSET(pm1a_control_block),
+	 ACPI_FADT_OFFSET(pm1_control_length),
+	 ACPI_PM1_REGISTER_WIDTH,
+	 ACPI_FADT_REQUIRED},
+
+	{"Pm1bControlBlock",
+	 ACPI_FADT_OFFSET(xpm1b_control_block),
+	 ACPI_FADT_OFFSET(pm1b_control_block),
+	 ACPI_FADT_OFFSET(pm1_control_length),
+	 ACPI_PM1_REGISTER_WIDTH,
+	 0},
+
+	{"Pm2ControlBlock",
+	 ACPI_FADT_OFFSET(xpm2_control_block),
+	 ACPI_FADT_OFFSET(pm2_control_block),
+	 ACPI_FADT_OFFSET(pm2_control_length),
+	 ACPI_PM2_REGISTER_WIDTH,
+	 ACPI_FADT_SEPARATE_LENGTH},
+
+	{"PmTimerBlock",
+	 ACPI_FADT_OFFSET(xpm_timer_block),
+	 ACPI_FADT_OFFSET(pm_timer_block),
+	 ACPI_FADT_OFFSET(pm_timer_length),
+	 ACPI_PM_TIMER_WIDTH,
+	 ACPI_FADT_REQUIRED},
+
+	{"Gpe0Block",
+	 ACPI_FADT_OFFSET(xgpe0_block),
+	 ACPI_FADT_OFFSET(gpe0_block),
+	 ACPI_FADT_OFFSET(gpe0_block_length),
+	 0,
+	 ACPI_FADT_SEPARATE_LENGTH},
+
+	{"Gpe1Block",
+	 ACPI_FADT_OFFSET(xgpe1_block),
+	 ACPI_FADT_OFFSET(gpe1_block),
+	 ACPI_FADT_OFFSET(gpe1_block_length),
+	 0,
+	 ACPI_FADT_SEPARATE_LENGTH}
+};
+
+#define ACPI_FADT_INFO_ENTRIES        (sizeof (fadt_info_table) / sizeof (struct acpi_fadt_info))
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_init_generic_address
+ *
+ * PARAMETERS:  generic_address     - GAS struct to be initialized
+ *              byte_width          - Width of this register
+ *              Address             - Address of the register
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Initialize a Generic Address Structure (GAS)
+ *              See the ACPI specification for a full description and
+ *              definition of this structure.
+ *
+ ******************************************************************************/
+
+static inline void
+acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
+			     u8 space_id, u8 byte_width, u64 address)
+{
+
+	/*
+	 * The 64-bit Address field is non-aligned in the byte packed
+	 * GAS struct.
+	 */
+	ACPI_MOVE_64_TO_64(&generic_address->address, &address);
+
+	/* All other fields are byte-wide */
+
+	generic_address->space_id = space_id;
+	generic_address->bit_width = (u8)ACPI_MUL_8(byte_width);
+	generic_address->bit_offset = 0;
+	generic_address->access_width = 0;	/* Access width ANY */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_parse_fadt
+ *
+ * PARAMETERS:  table_index         - Index for the FADT
+ *              Flags               - Flags
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Initialize the FADT, DSDT and FACS tables
+ *              (FADT contains the addresses of the DSDT and FACS)
+ *
+ ******************************************************************************/
+
+void acpi_tb_parse_fadt(u32 table_index, u8 flags)
+{
+	u32 length;
+	struct acpi_table_header *table;
+
+	/*
+	 * The FADT has multiple versions with different lengths,
+	 * and it contains pointers to both the DSDT and FACS tables.
+	 *
+	 * Get a local copy of the FADT and convert it to a common format
+	 * Map entire FADT, assumed to be smaller than one page.
+	 */
+	length = acpi_gbl_root_table_list.tables[table_index].length;
+
+	table =
+	    acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
+			       address, length);
+	if (!table) {
+		return;
+	}
+
+	/*
+	 * Validate the FADT checksum before we copy the table. Ignore
+	 * checksum error as we want to try to get the DSDT and FACS.
+	 */
+	(void)acpi_tb_verify_checksum(table, length);
+
+	/* Obtain a local copy of the FADT in common ACPI 2.0+ format */
+
+	acpi_tb_create_local_fadt(table, length);
+
+	/* All done with the real FADT, unmap it */
+
+	acpi_os_unmap_memory(table, length);
+
+	/* Obtain the DSDT and FACS tables via their addresses within the FADT */
+
+	acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
+			      flags, ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
+
+	acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
+			      flags, ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_create_local_fadt
+ *
+ * PARAMETERS:  Table               - Pointer to BIOS FADT
+ *              Length              - Length of the table
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Get a local copy of the FADT and convert it to a common format.
+ *              Performs validation on some important FADT fields.
+ *
+ * NOTE:        We create a local copy of the FADT regardless of the version.
+ *
+ ******************************************************************************/
+
+void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
+{
+
+	/*
+	 * Check if the FADT is larger than the largest table that we expect
+	 * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
+	 * a warning.
+	 */
+	if (length > sizeof(struct acpi_table_fadt)) {
+		ACPI_WARNING((AE_INFO,
+			      "FADT (revision %u) is longer than ACPI 2.0 version, "
+			      "truncating length 0x%X to 0x%zX",
+			      table->revision, (unsigned)length,
+			      sizeof(struct acpi_table_fadt)));
+	}
+
+	/* Clear the entire local FADT */
+
+	ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
+
+	/* Copy the original FADT, up to sizeof (struct acpi_table_fadt) */
+
+	ACPI_MEMCPY(&acpi_gbl_FADT, table,
+		    ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
+
+	/*
+	 * 1) Convert the local copy of the FADT to the common internal format
+	 * 2) Validate some of the important values within the FADT
+	 */
+	acpi_tb_convert_fadt();
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_convert_fadt
+ *
+ * PARAMETERS:  None, uses acpi_gbl_FADT
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Converts all versions of the FADT to a common internal format.
+ *              Expand all 32-bit addresses to 64-bit.
+ *
+ * NOTE:        acpi_gbl_FADT must be of size (struct acpi_table_fadt),
+ *              and must contain a copy of the actual FADT.
+ *
+ * ACPICA will use the "X" fields of the FADT for all addresses.
+ *
+ * "X" fields are optional extensions to the original V1.0 fields. Even if
+ * they are present in the structure, they can be optionally not used by
+ * setting them to zero. Therefore, we must selectively expand V1.0 fields
+ * if the corresponding X field is zero.
+ *
+ * For ACPI 1.0 FADTs, all address fields are expanded to the corresponding
+ * "X" fields.
+ *
+ * For ACPI 2.0 FADTs, any "X" fields that are NULL are filled in by
+ * expanding the corresponding ACPI 1.0 field.
+ *
+ ******************************************************************************/
+
+static void acpi_tb_convert_fadt(void)
+{
+	u8 pm1_register_bit_width;
+	u8 pm1_register_byte_width;
+	struct acpi_generic_address *target64;
+	u32 i;
+
+	/* Update the local FADT table header length */
+
+	acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
+
+	/*
+	 * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
+	 * Later code will always use the X 64-bit field. Also, check for an
+	 * address mismatch between the 32-bit and 64-bit address fields
+	 * (FIRMWARE_CTRL/X_FIRMWARE_CTRL, DSDT/X_DSDT) which would indicate
+	 * the presence of two FACS or two DSDT tables.
+	 */
+	if (!acpi_gbl_FADT.Xfacs) {
+		acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs;
+	} else if (acpi_gbl_FADT.facs &&
+		   (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
+		ACPI_WARNING((AE_INFO,
+		    "32/64 FACS address mismatch in FADT - two FACS tables!"));
+	}
+
+	if (!acpi_gbl_FADT.Xdsdt) {
+		acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
+	} else if (acpi_gbl_FADT.dsdt &&
+		   (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
+		ACPI_WARNING((AE_INFO,
+		    "32/64 DSDT address mismatch in FADT - two DSDT tables!"));
+	}
+
+	/*
+	 * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
+	 * should be zero are indeed zero. This will workaround BIOSs that
+	 * inadvertently place values in these fields.
+	 *
+	 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
+	 * offset 45, 55, 95, and the word located at offset 109, 110.
+	 */
+	if (acpi_gbl_FADT.header.revision < FADT2_REVISION_ID) {
+		acpi_gbl_FADT.preferred_profile = 0;
+		acpi_gbl_FADT.pstate_control = 0;
+		acpi_gbl_FADT.cst_control = 0;
+		acpi_gbl_FADT.boot_flags = 0;
+	}
+
+	/*
+	 * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
+	 * generic address structures as necessary. Later code will always use
+	 * the 64-bit address structures.
+	 */
+	for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
+		target64 =
+		    ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
+				 fadt_info_table[i].address64);
+
+		/* Expand only if the 64-bit X target is null */
+
+		if (!target64->address) {
+
+			/* The space_id is always I/O for the 32-bit legacy address fields */
+
+			acpi_tb_init_generic_address(target64,
+						     ACPI_ADR_SPACE_SYSTEM_IO,
+						     *ACPI_ADD_PTR(u8,
+								   &acpi_gbl_FADT,
+								   fadt_info_table
+								   [i].length),
+						     (u64) * ACPI_ADD_PTR(u32,
+									  &acpi_gbl_FADT,
+									  fadt_info_table
+									  [i].
+									  address32));
+		}
+	}
+
+	/* Validate FADT values now, before we make any changes */
+
+	acpi_tb_validate_fadt();
+
+	/*
+	 * Optionally check all register lengths against the default values and
+	 * update them if they are incorrect.
+	 */
+	if (acpi_gbl_use_default_register_widths) {
+		for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
+			target64 =
+			    ACPI_ADD_PTR(struct acpi_generic_address,
+					 &acpi_gbl_FADT,
+					 fadt_info_table[i].address64);
+
+			/*
+			 * If a valid register (Address != 0) and the (default_length > 0)
+			 * (Not a GPE register), then check the width against the default.
+			 */
+			if ((target64->address) &&
+			    (fadt_info_table[i].default_length > 0) &&
+			    (fadt_info_table[i].default_length !=
+			     target64->bit_width)) {
+				ACPI_WARNING((AE_INFO,
+					      "Invalid length for %s: %d, using default %d",
+					      fadt_info_table[i].name,
+					      target64->bit_width,
+					      fadt_info_table[i].
+					      default_length));
+
+				/* Incorrect size, set width to the default */
+
+				target64->bit_width =
+				    fadt_info_table[i].default_length;
+			}
+		}
+	}
+
+	/*
+	 * Get the length of the individual PM1 registers (enable and status).
+	 * Each register is defined to be (event block length / 2).
+	 */
+	pm1_register_bit_width =
+	    (u8)ACPI_DIV_2(acpi_gbl_FADT.xpm1a_event_block.bit_width);
+	pm1_register_byte_width = (u8)ACPI_DIV_8(pm1_register_bit_width);
+
+	/*
+	 * Adjust the lengths of the PM1 Event Blocks so that they can be used to
+	 * access the PM1 status register(s). Use (width / 2)
+	 */
+	acpi_gbl_FADT.xpm1a_event_block.bit_width = pm1_register_bit_width;
+	acpi_gbl_FADT.xpm1b_event_block.bit_width = pm1_register_bit_width;
+
+	/*
+	 * Calculate separate GAS structs for the PM1 Enable registers.
+	 * These addresses do not appear (directly) in the FADT, so it is
+	 * useful to calculate them once, here.
+	 *
+	 * The PM event blocks are split into two register blocks, first is the
+	 * PM Status Register block, followed immediately by the PM Enable
+	 * Register block. Each is of length (xpm1x_event_block.bit_width/2).
+	 *
+	 * On various systems the v2 fields (and particularly the bit widths)
+	 * cannot be relied upon, though. Hence resort to using the v1 length
+	 * here (and warn about the inconsistency).
+	 */
+	if (acpi_gbl_FADT.xpm1a_event_block.bit_width
+	    != acpi_gbl_FADT.pm1_event_length * 8)
+		printk(KERN_WARNING "FADT: "
+		       "X_PM1a_EVT_BLK.bit_width (%u) does not match"
+		       " PM1_EVT_LEN (%u)\n",
+		       acpi_gbl_FADT.xpm1a_event_block.bit_width,
+		       acpi_gbl_FADT.pm1_event_length);
+
+	/* The PM1A register block is required */
+
+	acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
+				     acpi_gbl_FADT.xpm1a_event_block.space_id,
+				     pm1_register_byte_width,
+				     (acpi_gbl_FADT.xpm1a_event_block.address +
+				      pm1_register_byte_width));
+	/* Don't forget to copy space_id of the GAS */
+	acpi_gbl_xpm1a_enable.space_id =
+	    acpi_gbl_FADT.xpm1a_event_block.space_id;
+
+	/* The PM1B register block is optional, ignore if not present */
+
+	if (acpi_gbl_FADT.xpm1b_event_block.address) {
+		if (acpi_gbl_FADT.xpm1b_event_block.bit_width
+		    != acpi_gbl_FADT.pm1_event_length * 8)
+			printk(KERN_WARNING "FADT: "
+			       "X_PM1b_EVT_BLK.bit_width (%u) does not match"
+			       " PM1_EVT_LEN (%u)\n",
+			       acpi_gbl_FADT.xpm1b_event_block.bit_width,
+			       acpi_gbl_FADT.pm1_event_length);
+		acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
+					     acpi_gbl_FADT.xpm1b_event_block.space_id,
+					     pm1_register_byte_width,
+					     (acpi_gbl_FADT.xpm1b_event_block.
+					      address + pm1_register_byte_width));
+		/* Don't forget to copy space_id of the GAS */
+		acpi_gbl_xpm1b_enable.space_id =
+		    acpi_gbl_FADT.xpm1b_event_block.space_id;
+
+	}
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_validate_fadt
+ *
+ * PARAMETERS:  Table           - Pointer to the FADT to be validated
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Validate various important fields within the FADT. If a problem
+ *              is found, issue a message, but no status is returned.
+ *              Used by both the table manager and the disassembler.
+ *
+ * Possible additional checks:
+ * (acpi_gbl_FADT.pm1_event_length >= 4)
+ * (acpi_gbl_FADT.pm1_control_length >= 2)
+ * (acpi_gbl_FADT.pm_timer_length >= 4)
+ * Gpe block lengths must be multiple of 2
+ *
+ ******************************************************************************/
+
+static void acpi_tb_validate_fadt(void)
+{
+	char *name;
+	u32 *address32;
+	struct acpi_generic_address *address64;
+	u8 length;
+	u32 i;
+
+	/*
+	 * Check for FACS and DSDT address mismatches. An address mismatch between
+	 * the 32-bit and 64-bit address fields (FIRMWARE_CTRL/X_FIRMWARE_CTRL and
+	 * DSDT/X_DSDT) would indicate the presence of two FACS or two DSDT tables.
+	 */
+	if (acpi_gbl_FADT.facs &&
+	    (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
+		ACPI_WARNING((AE_INFO,
+			      "32/64X FACS address mismatch in FADT - "
+			      "two FACS tables! %8.8X/%8.8X%8.8X",
+			      acpi_gbl_FADT.facs,
+			      ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
+	}
+
+	if (acpi_gbl_FADT.dsdt &&
+	    (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
+		ACPI_WARNING((AE_INFO,
+			      "32/64X DSDT address mismatch in FADT - "
+			      "two DSDT tables! %8.8X/%8.8X%8.8X",
+			      acpi_gbl_FADT.dsdt,
+			      ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
+	}
+
+	/* Examine all of the 64-bit extended address fields (X fields) */
+
+	for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
+		/*
+		 * Generate pointers to the 32-bit and 64-bit addresses, get the
+		 * register length (width), and the register name
+		 */
+		address64 = ACPI_ADD_PTR(struct acpi_generic_address,
+					 &acpi_gbl_FADT,
+					 fadt_info_table[i].address64);
+		address32 =
+		    ACPI_ADD_PTR(u32, &acpi_gbl_FADT,
+				 fadt_info_table[i].address32);
+		length =
+		    *ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
+				  fadt_info_table[i].length);
+		name = fadt_info_table[i].name;
+
+		/*
+		 * For each extended field, check for length mismatch between the
+		 * legacy length field and the corresponding 64-bit X length field.
+		 */
+		if (address64 && (address64->bit_width != ACPI_MUL_8(length))) {
+			ACPI_WARNING((AE_INFO,
+				      "32/64X length mismatch in %s: %d/%d",
+				      name, ACPI_MUL_8(length),
+				      address64->bit_width));
+		}
+
+		if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
+			/*
+			 * Field is required (Pm1a_event, Pm1a_control, pm_timer).
+			 * Both the address and length must be non-zero.
+			 */
+			if (!address64->address || !length) {
+				ACPI_ERROR((AE_INFO,
+					    "Required field %s has zero address and/or length: %8.8X%8.8X/%X",
+					    name,
+					    ACPI_FORMAT_UINT64(address64->
+							       address),
+					    length));
+			}
+		} else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) {
+			/*
+			 * Field is optional (PM2Control, GPE0, GPE1) AND has its own
+			 * length field. If present, both the address and length must be valid.
+			 */
+			if ((address64->address && !length)
+			    || (!address64->address && length)) {
+				ACPI_WARNING((AE_INFO,
+					      "Optional field %s has zero address or length: %8.8X%8.8X/%X",
+					      name,
+					      ACPI_FORMAT_UINT64(address64->
+								 address),
+					      length));
+			}
+		}
+
+		/* If both 32- and 64-bit addresses are valid (non-zero), they must match */
+
+		if (address64->address && *address32 &&
+		    (address64->address != (u64) * address32)) {
+			ACPI_ERROR((AE_INFO,
+				    "32/64X address mismatch in %s: %8.8X/%8.8X%8.8X, using 64X",
+				    name, *address32,
+				    ACPI_FORMAT_UINT64(address64->address)));
+		}
+	}
+}
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
new file mode 100644
index 0000000..1054dfd
--- /dev/null
+++ b/drivers/acpi/acpica/tbfind.c
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * Module Name: tbfind   - find table
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_TABLES
+ACPI_MODULE_NAME("tbfind")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_find_table
+ *
+ * PARAMETERS:  Signature           - String with ACPI table signature
+ *              oem_id              - String with the table OEM ID
+ *              oem_table_id        - String with the OEM Table ID
+ *              table_index         - Where the table index is returned
+ *
+ * RETURN:      Status and table index
+ *
+ * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the
+ *              Signature, OEM ID and OEM Table ID. Returns an index that can
+ *              be used to get the table header or entire table.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_tb_find_table(char *signature,
+		   char *oem_id, char *oem_table_id, u32 *table_index)
+{
+	u32 i;
+	acpi_status status;
+	struct acpi_table_header header;
+
+	ACPI_FUNCTION_TRACE(tb_find_table);
+
+	/* Normalize the input strings */
+
+	ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header));
+	ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE);
+	ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
+	ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+
+	/* Search for the table */
+
+	for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+		if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
+				header.signature, ACPI_NAME_SIZE)) {
+
+			/* Not the requested table */
+
+			continue;
+		}
+
+		/* Table with matching signature has been found */
+
+		if (!acpi_gbl_root_table_list.tables[i].pointer) {
+
+			/* Table is not currently mapped, map it */
+
+			status =
+			    acpi_tb_verify_table(&acpi_gbl_root_table_list.
+						 tables[i]);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+
+			if (!acpi_gbl_root_table_list.tables[i].pointer) {
+				continue;
+			}
+		}
+
+		/* Check for table match on all IDs */
+
+		if (!ACPI_MEMCMP
+		    (acpi_gbl_root_table_list.tables[i].pointer->signature,
+		     header.signature, ACPI_NAME_SIZE) && (!oem_id[0]
+							   ||
+							   !ACPI_MEMCMP
+							   (acpi_gbl_root_table_list.
+							    tables[i].pointer->
+							    oem_id,
+							    header.oem_id,
+							    ACPI_OEM_ID_SIZE))
+		    && (!oem_table_id[0]
+			|| !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i].
+					pointer->oem_table_id,
+					header.oem_table_id,
+					ACPI_OEM_TABLE_ID_SIZE))) {
+			*table_index = i;
+
+			ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
+					  "Found table [%4.4s]\n",
+					  header.signature));
+			return_ACPI_STATUS(AE_OK);
+		}
+	}
+
+	return_ACPI_STATUS(AE_NOT_FOUND);
+}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
new file mode 100644
index 0000000..37374b2
--- /dev/null
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -0,0 +1,574 @@
+/******************************************************************************
+ *
+ * Module Name: tbinstal - ACPI table installation and removal
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_TABLES
+ACPI_MODULE_NAME("tbinstal")
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_verify_table
+ *
+ * PARAMETERS:  table_desc          - table
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: this function is called to verify and map table
+ *
+ *****************************************************************************/
+acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(tb_verify_table);
+
+	/* Map the table if necessary */
+
+	if (!table_desc->pointer) {
+		if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
+		    ACPI_TABLE_ORIGIN_MAPPED) {
+			table_desc->pointer =
+			    acpi_os_map_memory(table_desc->address,
+					       table_desc->length);
+		}
+		if (!table_desc->pointer) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+	}
+
+	/* FACS is the odd table, has no standard ACPI header and no checksum */
+
+	if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
+
+		/* Always calculate checksum, ignore bad checksum if requested */
+
+		status =
+		    acpi_tb_verify_checksum(table_desc->pointer,
+					    table_desc->length);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_add_table
+ *
+ * PARAMETERS:  table_desc          - Table descriptor
+ *              table_index         - Where the table index is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to add the ACPI table
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
+{
+	u32 i;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(tb_add_table);
+
+	if (!table_desc->pointer) {
+		status = acpi_tb_verify_table(table_desc);
+		if (ACPI_FAILURE(status) || !table_desc->pointer) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/*
+	 * Originally, we checked the table signature for "SSDT" or "PSDT" here.
+	 * Next, we added support for OEMx tables, signature "OEM".
+	 * Valid tables were encountered with a null signature, so we've just
+	 * given up on validating the signature, since it seems to be a waste
+	 * of code. The original code was removed (05/2008).
+	 */
+
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+	/* Check if table is already registered */
+
+	for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+		if (!acpi_gbl_root_table_list.tables[i].pointer) {
+			status =
+			    acpi_tb_verify_table(&acpi_gbl_root_table_list.
+						 tables[i]);
+			if (ACPI_FAILURE(status)
+			    || !acpi_gbl_root_table_list.tables[i].pointer) {
+				continue;
+			}
+		}
+
+		/*
+		 * Check for a table match on the entire table length,
+		 * not just the header.
+		 */
+		if (table_desc->length !=
+		    acpi_gbl_root_table_list.tables[i].length) {
+			continue;
+		}
+
+		if (ACPI_MEMCMP(table_desc->pointer,
+				acpi_gbl_root_table_list.tables[i].pointer,
+				acpi_gbl_root_table_list.tables[i].length)) {
+			continue;
+		}
+
+		/*
+		 * Note: the current mechanism does not unregister a table if it is
+		 * dynamically unloaded. The related namespace entries are deleted,
+		 * but the table remains in the root table list.
+		 *
+		 * The assumption here is that the number of different tables that
+		 * will be loaded is actually small, and there is minimal overhead
+		 * in just keeping the table in case it is needed again.
+		 *
+		 * If this assumption changes in the future (perhaps on large
+		 * machines with many table load/unload operations), tables will
+		 * need to be unregistered when they are unloaded, and slots in the
+		 * root table list should be reused when empty.
+		 */
+
+		/*
+		 * Table is already registered.
+		 * We can delete the table that was passed as a parameter.
+		 */
+		acpi_tb_delete_table(table_desc);
+		*table_index = i;
+
+		if (acpi_gbl_root_table_list.tables[i].
+		    flags & ACPI_TABLE_IS_LOADED) {
+
+			/* Table is still loaded, this is an error */
+
+			status = AE_ALREADY_EXISTS;
+			goto release;
+		} else {
+			/* Table was unloaded, allow it to be reloaded */
+
+			table_desc->pointer =
+			    acpi_gbl_root_table_list.tables[i].pointer;
+			table_desc->address =
+			    acpi_gbl_root_table_list.tables[i].address;
+			status = AE_OK;
+			goto print_header;
+		}
+	}
+
+	/* Add the table to the global root table list */
+
+	status = acpi_tb_store_table(table_desc->address, table_desc->pointer,
+				     table_desc->length, table_desc->flags,
+				     table_index);
+	if (ACPI_FAILURE(status)) {
+		goto release;
+	}
+
+      print_header:
+	acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
+
+      release:
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_resize_root_table_list
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Expand the size of global table array
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_resize_root_table_list(void)
+{
+	struct acpi_table_desc *tables;
+
+	ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
+
+	/* allow_resize flag is a parameter to acpi_initialize_tables */
+
+	if (!(acpi_gbl_root_table_list.flags & ACPI_ROOT_ALLOW_RESIZE)) {
+		ACPI_ERROR((AE_INFO,
+			    "Resize of Root Table Array is not allowed"));
+		return_ACPI_STATUS(AE_SUPPORT);
+	}
+
+	/* Increase the Table Array size */
+
+	tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
+				       size + ACPI_ROOT_TABLE_SIZE_INCREMENT)
+				      * sizeof(struct acpi_table_desc));
+	if (!tables) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not allocate new root table array"));
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Copy and free the previous table array */
+
+	if (acpi_gbl_root_table_list.tables) {
+		ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
+			    (acpi_size) acpi_gbl_root_table_list.size *
+			    sizeof(struct acpi_table_desc));
+
+		if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
+			ACPI_FREE(acpi_gbl_root_table_list.tables);
+		}
+	}
+
+	acpi_gbl_root_table_list.tables = tables;
+	acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT;
+	acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED;
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_store_table
+ *
+ * PARAMETERS:  Address             - Table address
+ *              Table               - Table header
+ *              Length              - Table length
+ *              Flags               - flags
+ *
+ * RETURN:      Status and table index.
+ *
+ * DESCRIPTION: Add an ACPI table to the global table list
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_tb_store_table(acpi_physical_address address,
+		    struct acpi_table_header *table,
+		    u32 length, u8 flags, u32 *table_index)
+{
+	acpi_status status = AE_OK;
+
+	/* Ensure that there is room for the table in the Root Table List */
+
+	if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) {
+		status = acpi_tb_resize_root_table_list();
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+	}
+
+	/* Initialize added table */
+
+	acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
+	    address = address;
+	acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
+	    pointer = table;
+	acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length =
+	    length;
+	acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
+	    owner_id = 0;
+	acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags =
+	    flags;
+
+	ACPI_MOVE_32_TO_32(&
+			   (acpi_gbl_root_table_list.
+			    tables[acpi_gbl_root_table_list.count].signature),
+			   table->signature);
+
+	*table_index = acpi_gbl_root_table_list.count;
+	acpi_gbl_root_table_list.count++;
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_delete_table
+ *
+ * PARAMETERS:  table_index         - Table index
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Delete one internal ACPI table
+ *
+ ******************************************************************************/
+
+void acpi_tb_delete_table(struct acpi_table_desc *table_desc)
+{
+	/* Table must be mapped or allocated */
+	if (!table_desc->pointer) {
+		return;
+	}
+	switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
+	case ACPI_TABLE_ORIGIN_MAPPED:
+		acpi_os_unmap_memory(table_desc->pointer, table_desc->length);
+		break;
+	case ACPI_TABLE_ORIGIN_ALLOCATED:
+		ACPI_FREE(table_desc->pointer);
+		break;
+	default:;
+	}
+
+	table_desc->pointer = NULL;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_terminate
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Delete all internal ACPI tables
+ *
+ ******************************************************************************/
+
+void acpi_tb_terminate(void)
+{
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(tb_terminate);
+
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+	/* Delete the individual tables */
+
+	for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+		acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
+	}
+
+	/*
+	 * Delete the root table array if allocated locally. Array cannot be
+	 * mapped, so we don't need to check for that flag.
+	 */
+	if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
+		ACPI_FREE(acpi_gbl_root_table_list.tables);
+	}
+
+	acpi_gbl_root_table_list.tables = NULL;
+	acpi_gbl_root_table_list.flags = 0;
+	acpi_gbl_root_table_list.count = 0;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_delete_namespace_by_owner
+ *
+ * PARAMETERS:  table_index         - Table index
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Delete all namespace objects created when this table was loaded.
+ *
+ ******************************************************************************/
+
+void acpi_tb_delete_namespace_by_owner(u32 table_index)
+{
+	acpi_owner_id owner_id;
+
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+	if (table_index < acpi_gbl_root_table_list.count) {
+		owner_id =
+		    acpi_gbl_root_table_list.tables[table_index].owner_id;
+	} else {
+		(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+		return;
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+	acpi_ns_delete_namespace_by_owner(owner_id);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_allocate_owner_id
+ *
+ * PARAMETERS:  table_index         - Table index
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Allocates owner_id in table_desc
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_allocate_owner_id(u32 table_index)
+{
+	acpi_status status = AE_BAD_PARAMETER;
+
+	ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
+
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+	if (table_index < acpi_gbl_root_table_list.count) {
+		status = acpi_ut_allocate_owner_id
+		    (&(acpi_gbl_root_table_list.tables[table_index].owner_id));
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_release_owner_id
+ *
+ * PARAMETERS:  table_index         - Table index
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Releases owner_id in table_desc
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_release_owner_id(u32 table_index)
+{
+	acpi_status status = AE_BAD_PARAMETER;
+
+	ACPI_FUNCTION_TRACE(tb_release_owner_id);
+
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+	if (table_index < acpi_gbl_root_table_list.count) {
+		acpi_ut_release_owner_id(&
+					 (acpi_gbl_root_table_list.
+					  tables[table_index].owner_id));
+		status = AE_OK;
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_get_owner_id
+ *
+ * PARAMETERS:  table_index         - Table index
+ *              owner_id            - Where the table owner_id is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: returns owner_id for the ACPI table
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
+{
+	acpi_status status = AE_BAD_PARAMETER;
+
+	ACPI_FUNCTION_TRACE(tb_get_owner_id);
+
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+	if (table_index < acpi_gbl_root_table_list.count) {
+		*owner_id =
+		    acpi_gbl_root_table_list.tables[table_index].owner_id;
+		status = AE_OK;
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_is_table_loaded
+ *
+ * PARAMETERS:  table_index         - Table index
+ *
+ * RETURN:      Table Loaded Flag
+ *
+ ******************************************************************************/
+
+u8 acpi_tb_is_table_loaded(u32 table_index)
+{
+	u8 is_loaded = FALSE;
+
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+	if (table_index < acpi_gbl_root_table_list.count) {
+		is_loaded = (u8)
+		    (acpi_gbl_root_table_list.tables[table_index].
+		     flags & ACPI_TABLE_IS_LOADED);
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+	return (is_loaded);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_set_table_loaded_flag
+ *
+ * PARAMETERS:  table_index         - Table index
+ *              is_loaded           - TRUE if table is loaded, FALSE otherwise
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Sets the table loaded flag to either TRUE or FALSE.
+ *
+ ******************************************************************************/
+
+void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded)
+{
+
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+	if (table_index < acpi_gbl_root_table_list.count) {
+		if (is_loaded) {
+			acpi_gbl_root_table_list.tables[table_index].flags |=
+			    ACPI_TABLE_IS_LOADED;
+		} else {
+			acpi_gbl_root_table_list.tables[table_index].flags &=
+			    ~ACPI_TABLE_IS_LOADED;
+		}
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+}
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
new file mode 100644
index 0000000..9684cc8
--- /dev/null
+++ b/drivers/acpi/acpica/tbutils.c
@@ -0,0 +1,583 @@
+/******************************************************************************
+ *
+ * Module Name: tbutils   - table utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_TABLES
+ACPI_MODULE_NAME("tbutils")
+
+/* Local prototypes */
+static acpi_physical_address
+acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_check_xsdt
+ *
+ * PARAMETERS:  address                    - Pointer to the XSDT
+ *
+ * RETURN:      status
+ *		AE_OK - XSDT is okay
+ *		AE_NO_MEMORY - can't map XSDT
+ *		AE_INVALID_TABLE_LENGTH - invalid table length
+ *		AE_NULL_ENTRY - XSDT has NULL entry
+ *
+ * DESCRIPTION: validate XSDT
+******************************************************************************/
+
+static acpi_status
+acpi_tb_check_xsdt(acpi_physical_address address)
+{
+	struct acpi_table_header *table;
+	u32 length;
+	u64 xsdt_entry_address;
+	u8 *table_entry;
+	u32 table_count;
+	int i;
+
+	table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
+	if (!table)
+		return AE_NO_MEMORY;
+
+	length = table->length;
+	acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+	if (length < sizeof(struct acpi_table_header))
+		return AE_INVALID_TABLE_LENGTH;
+
+	table = acpi_os_map_memory(address, length);
+	if (!table)
+		return AE_NO_MEMORY;
+
+	/* Calculate the number of tables described in XSDT */
+	table_count =
+		(u32) ((table->length -
+		sizeof(struct acpi_table_header)) / sizeof(u64));
+	table_entry =
+		ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
+	for (i = 0; i < table_count; i++) {
+		ACPI_MOVE_64_TO_64(&xsdt_entry_address, table_entry);
+		if (!xsdt_entry_address) {
+			/* XSDT has NULL entry */
+			break;
+		}
+		table_entry += sizeof(u64);
+	}
+	acpi_os_unmap_memory(table, length);
+
+	if (i < table_count)
+		return AE_NULL_ENTRY;
+	else
+		return AE_OK;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_initialize_facs
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a permanent mapping for the FADT and save it in a global
+ *              for accessing the Global Lock and Firmware Waking Vector
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_initialize_facs(void)
+{
+	acpi_status status;
+
+	status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+					 ACPI_CAST_INDIRECT_PTR(struct
+								acpi_table_header,
+								&acpi_gbl_FACS));
+	return status;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_tables_loaded
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      TRUE if required ACPI tables are loaded
+ *
+ * DESCRIPTION: Determine if the minimum required ACPI tables are present
+ *              (FADT, FACS, DSDT)
+ *
+ ******************************************************************************/
+
+u8 acpi_tb_tables_loaded(void)
+{
+
+	if (acpi_gbl_root_table_list.count >= 3) {
+		return (TRUE);
+	}
+
+	return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_print_table_header
+ *
+ * PARAMETERS:  Address             - Table physical address
+ *              Header              - Table header
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print an ACPI table header. Special cases for FACS and RSDP.
+ *
+ ******************************************************************************/
+
+void
+acpi_tb_print_table_header(acpi_physical_address address,
+			   struct acpi_table_header *header)
+{
+
+	if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
+
+		/* FACS only has signature and length fields of common table header */
+
+		ACPI_INFO((AE_INFO, "%4.4s %08lX, %04X",
+			   header->signature, (unsigned long)address,
+			   header->length));
+	} else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
+
+		/* RSDP has no common fields */
+
+		ACPI_INFO((AE_INFO, "RSDP %08lX, %04X (r%d %6.6s)",
+			   (unsigned long)address,
+			   (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
+			    revision >
+			    0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
+					       header)->length : 20,
+			   ACPI_CAST_PTR(struct acpi_table_rsdp,
+					 header)->revision,
+			   ACPI_CAST_PTR(struct acpi_table_rsdp,
+					 header)->oem_id));
+	} else {
+		/* Standard ACPI table with full common header */
+
+		ACPI_INFO((AE_INFO,
+			   "%4.4s %08lX, %04X (r%d %6.6s %8.8s %8X %4.4s %8X)",
+			   header->signature, (unsigned long)address,
+			   header->length, header->revision, header->oem_id,
+			   header->oem_table_id, header->oem_revision,
+			   header->asl_compiler_id,
+			   header->asl_compiler_revision));
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_validate_checksum
+ *
+ * PARAMETERS:  Table               - ACPI table to verify
+ *              Length              - Length of entire table
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Verifies that the table checksums to zero. Optionally returns
+ *              exception on bad checksum.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
+{
+	u8 checksum;
+
+	/* Compute the checksum on the table */
+
+	checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
+
+	/* Checksum ok? (should be zero) */
+
+	if (checksum) {
+		ACPI_WARNING((AE_INFO,
+			      "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
+			      table->signature, table->checksum,
+			      (u8) (table->checksum - checksum)));
+
+#if (ACPI_CHECKSUM_ABORT)
+
+		return (AE_BAD_CHECKSUM);
+#endif
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_checksum
+ *
+ * PARAMETERS:  Buffer          - Pointer to memory region to be checked
+ *              Length          - Length of this memory region
+ *
+ * RETURN:      Checksum (u8)
+ *
+ * DESCRIPTION: Calculates circular checksum of memory region.
+ *
+ ******************************************************************************/
+
+u8 acpi_tb_checksum(u8 *buffer, u32 length)
+{
+	u8 sum = 0;
+	u8 *end = buffer + length;
+
+	while (buffer < end) {
+		sum = (u8) (sum + *(buffer++));
+	}
+
+	return sum;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_install_table
+ *
+ * PARAMETERS:  Address                 - Physical address of DSDT or FACS
+ *              Flags                   - Flags
+ *              Signature               - Table signature, NULL if no need to
+ *                                        match
+ *              table_index             - Index into root table array
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Install an ACPI table into the global data structure.
+ *
+ ******************************************************************************/
+
+void
+acpi_tb_install_table(acpi_physical_address address,
+		      u8 flags, char *signature, u32 table_index)
+{
+	struct acpi_table_header *table;
+
+	if (!address) {
+		ACPI_ERROR((AE_INFO,
+			    "Null physical address for ACPI table [%s]",
+			    signature));
+		return;
+	}
+
+	/* Map just the table header */
+
+	table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
+	if (!table) {
+		return;
+	}
+
+	/* If a particular signature is expected, signature must match */
+
+	if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
+		ACPI_ERROR((AE_INFO,
+			    "Invalid signature 0x%X for ACPI table [%s]",
+			    *ACPI_CAST_PTR(u32, table->signature), signature));
+		goto unmap_and_exit;
+	}
+
+	/* Initialize the table entry */
+
+	acpi_gbl_root_table_list.tables[table_index].address = address;
+	acpi_gbl_root_table_list.tables[table_index].length = table->length;
+	acpi_gbl_root_table_list.tables[table_index].flags = flags;
+
+	ACPI_MOVE_32_TO_32(&
+			   (acpi_gbl_root_table_list.tables[table_index].
+			    signature), table->signature);
+
+	acpi_tb_print_table_header(address, table);
+
+	if (table_index == ACPI_TABLE_INDEX_DSDT) {
+
+		/* Global integer width is based upon revision of the DSDT */
+
+		acpi_ut_set_integer_width(table->revision);
+	}
+
+      unmap_and_exit:
+	acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_get_root_table_entry
+ *
+ * PARAMETERS:  table_entry         - Pointer to the RSDT/XSDT table entry
+ *              table_entry_size    - sizeof 32 or 64 (RSDT or XSDT)
+ *
+ * RETURN:      Physical address extracted from the root table
+ *
+ * DESCRIPTION: Get one root table entry. Handles 32-bit and 64-bit cases on
+ *              both 32-bit and 64-bit platforms
+ *
+ * NOTE:        acpi_physical_address is 32-bit on 32-bit platforms, 64-bit on
+ *              64-bit platforms.
+ *
+ ******************************************************************************/
+
+static acpi_physical_address
+acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
+{
+	u64 address64;
+
+	/*
+	 * Get the table physical address (32-bit for RSDT, 64-bit for XSDT):
+	 * Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT
+	 */
+	if (table_entry_size == sizeof(u32)) {
+		/*
+		 * 32-bit platform, RSDT: Return 32-bit table entry
+		 * 64-bit platform, RSDT: Expand 32-bit to 64-bit and return
+		 */
+		return ((acpi_physical_address)
+			(*ACPI_CAST_PTR(u32, table_entry)));
+	} else {
+		/*
+		 * 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return
+		 * 64-bit platform, XSDT: Move (unaligned) 64-bit to local, return 64-bit
+		 */
+		ACPI_MOVE_64_TO_64(&address64, table_entry);
+
+#if ACPI_MACHINE_WIDTH == 32
+		if (address64 > ACPI_UINT32_MAX) {
+
+			/* Will truncate 64-bit address to 32 bits, issue warning */
+
+			ACPI_WARNING((AE_INFO,
+				      "64-bit Physical Address in XSDT is too large (%8.8X%8.8X), truncating",
+				      ACPI_FORMAT_UINT64(address64)));
+		}
+#endif
+		return ((acpi_physical_address) (address64));
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_parse_root_table
+ *
+ * PARAMETERS:  Rsdp                    - Pointer to the RSDP
+ *              Flags                   - Flags
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to parse the Root System Description
+ *              Table (RSDT or XSDT)
+ *
+ * NOTE:        Tables are mapped (not copied) for efficiency. The FACS must
+ *              be mapped and cannot be copied because it contains the actual
+ *              memory location of the ACPI Global Lock.
+ *
+ ******************************************************************************/
+
+acpi_status __init
+acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
+{
+	struct acpi_table_rsdp *rsdp;
+	u32 table_entry_size;
+	u32 i;
+	u32 table_count;
+	struct acpi_table_header *table;
+	acpi_physical_address address;
+	acpi_physical_address uninitialized_var(rsdt_address);
+	u32 length;
+	u8 *table_entry;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(tb_parse_root_table);
+
+	/*
+	 * Map the entire RSDP and extract the address of the RSDT or XSDT
+	 */
+	rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp));
+	if (!rsdp) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	acpi_tb_print_table_header(rsdp_address,
+				   ACPI_CAST_PTR(struct acpi_table_header,
+						 rsdp));
+
+	/* Differentiate between RSDT and XSDT root tables */
+
+	if (rsdp->revision > 1 && rsdp->xsdt_physical_address
+			&& !acpi_rsdt_forced) {
+		/*
+		 * Root table is an XSDT (64-bit physical addresses). We must use the
+		 * XSDT if the revision is > 1 and the XSDT pointer is present, as per
+		 * the ACPI specification.
+		 */
+		address = (acpi_physical_address) rsdp->xsdt_physical_address;
+		table_entry_size = sizeof(u64);
+		rsdt_address = (acpi_physical_address)
+					rsdp->rsdt_physical_address;
+	} else {
+		/* Root table is an RSDT (32-bit physical addresses) */
+
+		address = (acpi_physical_address) rsdp->rsdt_physical_address;
+		table_entry_size = sizeof(u32);
+	}
+
+	/*
+	 * It is not possible to map more than one entry in some environments,
+	 * so unmap the RSDP here before mapping other tables
+	 */
+	acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp));
+
+	if (table_entry_size == sizeof(u64)) {
+		if (acpi_tb_check_xsdt(address) == AE_NULL_ENTRY) {
+			/* XSDT has NULL entry, RSDT is used */
+			address = rsdt_address;
+			table_entry_size = sizeof(u32);
+			ACPI_WARNING((AE_INFO, "BIOS XSDT has NULL entry, "
+					"using RSDT"));
+		}
+	}
+	/* Map the RSDT/XSDT table header to get the full table length */
+
+	table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
+	if (!table) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	acpi_tb_print_table_header(address, table);
+
+	/* Get the length of the full table, verify length and map entire table */
+
+	length = table->length;
+	acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+
+	if (length < sizeof(struct acpi_table_header)) {
+		ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT",
+			    length));
+		return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
+	}
+
+	table = acpi_os_map_memory(address, length);
+	if (!table) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Validate the root table checksum */
+
+	status = acpi_tb_verify_checksum(table, length);
+	if (ACPI_FAILURE(status)) {
+		acpi_os_unmap_memory(table, length);
+		return_ACPI_STATUS(status);
+	}
+
+	/* Calculate the number of tables described in the root table */
+
+	table_count =
+	    (u32) ((table->length -
+		    sizeof(struct acpi_table_header)) / table_entry_size);
+
+	/*
+	 * First two entries in the table array are reserved for the DSDT and FACS,
+	 * which are not actually present in the RSDT/XSDT - they come from the FADT
+	 */
+	table_entry =
+	    ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
+	acpi_gbl_root_table_list.count = 2;
+
+	/*
+	 * Initialize the root table array from the RSDT/XSDT
+	 */
+	for (i = 0; i < table_count; i++) {
+		if (acpi_gbl_root_table_list.count >=
+		    acpi_gbl_root_table_list.size) {
+
+			/* There is no more room in the root table array, attempt resize */
+
+			status = acpi_tb_resize_root_table_list();
+			if (ACPI_FAILURE(status)) {
+				ACPI_WARNING((AE_INFO,
+					      "Truncating %u table entries!",
+					      (unsigned)
+					      (acpi_gbl_root_table_list.size -
+					       acpi_gbl_root_table_list.
+					       count)));
+				break;
+			}
+		}
+
+		/* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
+
+		acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
+		    address =
+		    acpi_tb_get_root_table_entry(table_entry, table_entry_size);
+
+		table_entry += table_entry_size;
+		acpi_gbl_root_table_list.count++;
+	}
+
+	/*
+	 * It is not possible to map more than one entry in some environments,
+	 * so unmap the root table here before mapping other tables
+	 */
+	acpi_os_unmap_memory(table, length);
+
+	/*
+	 * Complete the initialization of the root table array by examining
+	 * the header of each table
+	 */
+	for (i = 2; i < acpi_gbl_root_table_list.count; i++) {
+		acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
+				      address, flags, NULL, i);
+
+		/* Special case for FADT - get the DSDT and FACS */
+
+		if (ACPI_COMPARE_NAME
+		    (&acpi_gbl_root_table_list.tables[i].signature,
+		     ACPI_SIG_FADT)) {
+			acpi_tb_parse_fadt(i, flags);
+		}
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
new file mode 100644
index 0000000..c3e841f
--- /dev/null
+++ b/drivers/acpi/acpica/tbxface.c
@@ -0,0 +1,735 @@
+/******************************************************************************
+ *
+ * Module Name: tbxface - Public interfaces to the ACPI subsystem
+ *                         ACPI table oriented interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_TABLES
+ACPI_MODULE_NAME("tbxface")
+
+/* Local prototypes */
+static acpi_status acpi_tb_load_namespace(void);
+
+static int no_auto_ssdt;
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_allocate_root_table
+ *
+ * PARAMETERS:  initial_table_count - Size of initial_table_array, in number of
+ *                                    struct acpi_table_desc structures
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Allocate a root table array. Used by i_aSL compiler and
+ *              acpi_initialize_tables.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_allocate_root_table(u32 initial_table_count)
+{
+
+	acpi_gbl_root_table_list.size = initial_table_count;
+	acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
+
+	return (acpi_tb_resize_root_table_list());
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_initialize_tables
+ *
+ * PARAMETERS:  initial_table_array - Pointer to an array of pre-allocated
+ *                                    struct acpi_table_desc structures. If NULL, the
+ *                                    array is dynamically allocated.
+ *              initial_table_count - Size of initial_table_array, in number of
+ *                                    struct acpi_table_desc structures
+ *              allow_realloc       - Flag to tell Table Manager if resize of
+ *                                    pre-allocated array is allowed. Ignored
+ *                                    if initial_table_array is NULL.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initialize the table manager, get the RSDP and RSDT/XSDT.
+ *
+ * NOTE:        Allows static allocation of the initial table array in order
+ *              to avoid the use of dynamic memory in confined environments
+ *              such as the kernel boot sequence where it may not be available.
+ *
+ *              If the host OS memory managers are initialized, use NULL for
+ *              initial_table_array, and the table will be dynamically allocated.
+ *
+ ******************************************************************************/
+
+acpi_status __init
+acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
+		       u32 initial_table_count, u8 allow_resize)
+{
+	acpi_physical_address rsdp_address;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_initialize_tables);
+
+	/*
+	 * Set up the Root Table Array
+	 * Allocate the table array if requested
+	 */
+	if (!initial_table_array) {
+		status = acpi_allocate_root_table(initial_table_count);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	} else {
+		/* Root Table Array has been statically allocated by the host */
+
+		ACPI_MEMSET(initial_table_array, 0,
+			    (acpi_size) initial_table_count *
+			    sizeof(struct acpi_table_desc));
+
+		acpi_gbl_root_table_list.tables = initial_table_array;
+		acpi_gbl_root_table_list.size = initial_table_count;
+		acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
+		if (allow_resize) {
+			acpi_gbl_root_table_list.flags |=
+			    ACPI_ROOT_ALLOW_RESIZE;
+		}
+	}
+
+	/* Get the address of the RSDP */
+
+	rsdp_address = acpi_os_get_root_pointer();
+	if (!rsdp_address) {
+		return_ACPI_STATUS(AE_NOT_FOUND);
+	}
+
+	/*
+	 * Get the root table (RSDT or XSDT) and extract all entries to the local
+	 * Root Table Array. This array contains the information of the RSDT/XSDT
+	 * in a common, more useable format.
+	 */
+	status =
+	    acpi_tb_parse_root_table(rsdp_address, ACPI_TABLE_ORIGIN_MAPPED);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_reallocate_root_table
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Reallocate Root Table List into dynamic memory. Copies the
+ *              root list from the previously provided scratch area. Should
+ *              be called once dynamic memory allocation is available in the
+ *              kernel
+ *
+ ******************************************************************************/
+acpi_status acpi_reallocate_root_table(void)
+{
+	struct acpi_table_desc *tables;
+	acpi_size new_size;
+
+	ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
+
+	/*
+	 * Only reallocate the root table if the host provided a static buffer
+	 * for the table array in the call to acpi_initialize_tables.
+	 */
+	if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
+		return_ACPI_STATUS(AE_SUPPORT);
+	}
+
+	new_size = ((acpi_size) acpi_gbl_root_table_list.count +
+		    ACPI_ROOT_TABLE_SIZE_INCREMENT) *
+	    sizeof(struct acpi_table_desc);
+
+	/* Create new array and copy the old array */
+
+	tables = ACPI_ALLOCATE_ZEROED(new_size);
+	if (!tables) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size);
+
+	acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count;
+	acpi_gbl_root_table_list.tables = tables;
+	acpi_gbl_root_table_list.flags =
+	    ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_load_table
+ *
+ * PARAMETERS:  table_ptr       - pointer to a buffer containing the entire
+ *                                table to be loaded
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to load a table from the caller's
+ *              buffer. The buffer must contain an entire ACPI Table including
+ *              a valid header. The header fields will be verified, and if it
+ *              is determined that the table is invalid, the call will fail.
+ *
+ ******************************************************************************/
+acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
+{
+	acpi_status status;
+	u32 table_index;
+	struct acpi_table_desc table_desc;
+
+	if (!table_ptr)
+		return AE_BAD_PARAMETER;
+
+	ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
+	table_desc.pointer = table_ptr;
+	table_desc.length = table_ptr->length;
+	table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
+
+	/*
+	 * Install the new table into the local data structures
+	 */
+	status = acpi_tb_add_table(&table_desc, &table_index);
+	if (ACPI_FAILURE(status)) {
+		return status;
+	}
+	status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
+	return status;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_load_table)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_get_table_header
+ *
+ * PARAMETERS:  Signature           - ACPI signature of needed table
+ *              Instance            - Which instance (for SSDTs)
+ *              out_table_header    - The pointer to the table header to fill
+ *
+ * RETURN:      Status and pointer to mapped table header
+ *
+ * DESCRIPTION: Finds an ACPI table header.
+ *
+ * NOTE:        Caller is responsible in unmapping the header with
+ *              acpi_os_unmap_memory
+ *
+ *****************************************************************************/
+acpi_status
+acpi_get_table_header(char *signature,
+		      u32 instance, struct acpi_table_header *out_table_header)
+{
+       u32 i;
+       u32 j;
+	struct acpi_table_header *header;
+
+	/* Parameter validation */
+
+	if (!signature || !out_table_header) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Walk the root table list
+	 */
+	for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+		if (!ACPI_COMPARE_NAME
+		    (&(acpi_gbl_root_table_list.tables[i].signature),
+		     signature)) {
+			continue;
+		}
+
+		if (++j < instance) {
+			continue;
+		}
+
+		if (!acpi_gbl_root_table_list.tables[i].pointer) {
+			if ((acpi_gbl_root_table_list.tables[i].
+			     flags & ACPI_TABLE_ORIGIN_MASK) ==
+			    ACPI_TABLE_ORIGIN_MAPPED) {
+				header =
+				    acpi_os_map_memory(acpi_gbl_root_table_list.
+						       tables[i].address,
+						       sizeof(struct
+							      acpi_table_header));
+				if (!header) {
+					return AE_NO_MEMORY;
+				}
+				ACPI_MEMCPY(out_table_header, header,
+					    sizeof(struct acpi_table_header));
+				acpi_os_unmap_memory(header,
+						     sizeof(struct
+							    acpi_table_header));
+			} else {
+				return AE_NOT_FOUND;
+			}
+		} else {
+			ACPI_MEMCPY(out_table_header,
+				    acpi_gbl_root_table_list.tables[i].pointer,
+				    sizeof(struct acpi_table_header));
+		}
+		return (AE_OK);
+	}
+
+	return (AE_NOT_FOUND);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_table_header)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_unload_table_id
+ *
+ * PARAMETERS:  id            - Owner ID of the table to be removed.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This routine is used to force the unload of a table (by id)
+ *
+ ******************************************************************************/
+acpi_status acpi_unload_table_id(acpi_owner_id id)
+{
+	int i;
+	acpi_status status = AE_NOT_EXIST;
+
+	ACPI_FUNCTION_TRACE(acpi_unload_table_id);
+
+	/* Find table in the global table list */
+	for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+		if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
+			continue;
+		}
+		/*
+		 * Delete all namespace objects owned by this table. Note that these
+		 * objects can appear anywhere in the namespace by virtue of the AML
+		 * "Scope" operator. Thus, we need to track ownership by an ID, not
+		 * simply a position within the hierarchy
+		 */
+		acpi_tb_delete_namespace_by_owner(i);
+		status = acpi_tb_release_owner_id(i);
+		acpi_tb_set_table_loaded_flag(i, FALSE);
+		break;
+	}
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_table
+ *
+ * PARAMETERS:  Signature           - ACPI signature of needed table
+ *              Instance            - Which instance (for SSDTs)
+ *              out_table           - Where the pointer to the table is returned
+ *
+ * RETURN:      Status and pointer to table
+ *
+ * DESCRIPTION: Finds and verifies an ACPI table.
+ *
+ *****************************************************************************/
+acpi_status
+acpi_get_table(char *signature,
+	       u32 instance, struct acpi_table_header **out_table)
+{
+       u32 i;
+       u32 j;
+	acpi_status status;
+
+	/* Parameter validation */
+
+	if (!signature || !out_table) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Walk the root table list
+	 */
+	for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+		if (!ACPI_COMPARE_NAME
+		    (&(acpi_gbl_root_table_list.tables[i].signature),
+		     signature)) {
+			continue;
+		}
+
+		if (++j < instance) {
+			continue;
+		}
+
+		status =
+		    acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]);
+		if (ACPI_SUCCESS(status)) {
+			*out_table = acpi_gbl_root_table_list.tables[i].pointer;
+		}
+
+		if (!acpi_gbl_permanent_mmap) {
+			acpi_gbl_root_table_list.tables[i].pointer = NULL;
+		}
+
+		return (status);
+	}
+
+	return (AE_NOT_FOUND);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_table)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_table_by_index
+ *
+ * PARAMETERS:  table_index         - Table index
+ *              Table               - Where the pointer to the table is returned
+ *
+ * RETURN:      Status and pointer to the table
+ *
+ * DESCRIPTION: Obtain a table by an index into the global table list.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_get_table_by_index);
+
+	/* Parameter validation */
+
+	if (!table) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+	/* Validate index */
+
+	if (table_index >= acpi_gbl_root_table_list.count) {
+		(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
+
+		/* Table is not mapped, map it */
+
+		status =
+		    acpi_tb_verify_table(&acpi_gbl_root_table_list.
+					 tables[table_index]);
+		if (ACPI_FAILURE(status)) {
+			(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	*table = acpi_gbl_root_table_list.tables[table_index].pointer;
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_load_namespace
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
+ *              the RSDT/XSDT.
+ *
+ ******************************************************************************/
+static acpi_status acpi_tb_load_namespace(void)
+{
+	acpi_status status;
+	struct acpi_table_header *table;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(tb_load_namespace);
+
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+	/*
+	 * Load the namespace. The DSDT is required, but any SSDT and PSDT tables
+	 * are optional.
+	 */
+	if (!acpi_gbl_root_table_list.count ||
+	    !ACPI_COMPARE_NAME(&
+			       (acpi_gbl_root_table_list.
+				tables[ACPI_TABLE_INDEX_DSDT].signature),
+			       ACPI_SIG_DSDT)
+	    ||
+	    ACPI_FAILURE(acpi_tb_verify_table
+			 (&acpi_gbl_root_table_list.
+			  tables[ACPI_TABLE_INDEX_DSDT]))) {
+		status = AE_NO_ACPI_TABLES;
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * Find DSDT table
+	 */
+	status =
+	    acpi_os_table_override(acpi_gbl_root_table_list.
+				   tables[ACPI_TABLE_INDEX_DSDT].pointer,
+				   &table);
+	if (ACPI_SUCCESS(status) && table) {
+		/*
+		 * DSDT table has been found
+		 */
+		acpi_tb_delete_table(&acpi_gbl_root_table_list.
+				     tables[ACPI_TABLE_INDEX_DSDT]);
+		acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer =
+		    table;
+		acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].length =
+		    table->length;
+		acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].flags =
+		    ACPI_TABLE_ORIGIN_UNKNOWN;
+
+		ACPI_INFO((AE_INFO, "Table DSDT replaced by host OS"));
+		acpi_tb_print_table_header(0, table);
+
+		if (no_auto_ssdt == 0) {
+			printk(KERN_WARNING "ACPI: DSDT override uses original SSDTs unless \"acpi_no_auto_ssdt\"\n");
+		}
+	}
+
+	status =
+	    acpi_tb_verify_table(&acpi_gbl_root_table_list.
+				 tables[ACPI_TABLE_INDEX_DSDT]);
+	if (ACPI_FAILURE(status)) {
+
+		/* A valid DSDT is required */
+
+		status = AE_NO_ACPI_TABLES;
+		goto unlock_and_exit;
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+
+	/*
+	 * Load and parse tables.
+	 */
+	status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Load any SSDT or PSDT tables. Note: Loop leaves tables locked
+	 */
+	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+	for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+		if ((!ACPI_COMPARE_NAME
+		     (&(acpi_gbl_root_table_list.tables[i].signature),
+		      ACPI_SIG_SSDT)
+		     &&
+		     !ACPI_COMPARE_NAME(&
+					(acpi_gbl_root_table_list.tables[i].
+					 signature), ACPI_SIG_PSDT))
+		    ||
+		    ACPI_FAILURE(acpi_tb_verify_table
+				 (&acpi_gbl_root_table_list.tables[i]))) {
+			continue;
+		}
+
+		if (no_auto_ssdt) {
+			printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n");
+			continue;
+		}
+
+		/* Ignore errors while loading tables, get as many as possible */
+
+		(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+		(void)acpi_ns_load_table(i, acpi_gbl_root_node);
+		(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_load_tables
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
+ *
+ ******************************************************************************/
+
+acpi_status acpi_load_tables(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_load_tables);
+
+	/*
+	 * Load the namespace from the tables
+	 */
+	status = acpi_tb_load_namespace();
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"While loading namespace from ACPI tables"));
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_load_tables)
+
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_table_handler
+ *
+ * PARAMETERS:  Handler         - Table event handler
+ *              Context         - Value passed to the handler on each event
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install table event handler
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_table_handler(acpi_tbl_handler handler, void *context)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_install_table_handler);
+
+	if (!handler) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Don't allow more than one handler */
+
+	if (acpi_gbl_table_handler) {
+		status = AE_ALREADY_EXISTS;
+		goto cleanup;
+	}
+
+	/* Install the handler */
+
+	acpi_gbl_table_handler = handler;
+	acpi_gbl_table_handler_context = context;
+
+      cleanup:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_table_handler
+ *
+ * PARAMETERS:  Handler         - Table event handler that was installed
+ *                                previously.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove table event handler
+ *
+ ******************************************************************************/
+acpi_status acpi_remove_table_handler(acpi_tbl_handler handler)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_remove_table_handler);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Make sure that the installed handler is the same */
+
+	if (!handler || handler != acpi_gbl_table_handler) {
+		status = AE_BAD_PARAMETER;
+		goto cleanup;
+	}
+
+	/* Remove the handler */
+
+	acpi_gbl_table_handler = NULL;
+
+      cleanup:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_table_handler)
+
+
+static int __init acpi_no_auto_ssdt_setup(char *s) {
+
+        printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
+
+        no_auto_ssdt = 1;
+
+        return 1;
+}
+
+__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
new file mode 100644
index 0000000..b7fc8dd
--- /dev/null
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -0,0 +1,274 @@
+/******************************************************************************
+ *
+ * Module Name: tbxfroot - Find the root ACPI table (RSDT)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_TABLES
+ACPI_MODULE_NAME("tbxfroot")
+
+/* Local prototypes */
+static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
+
+static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_validate_rsdp
+ *
+ * PARAMETERS:  Rsdp                - Pointer to unvalidated RSDP
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Validate the RSDP (ptr)
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * The signature and checksum must both be correct
+	 *
+	 * Note: Sometimes there exists more than one RSDP in memory; the valid
+	 * RSDP has a valid checksum, all others have an invalid checksum.
+	 */
+	if (ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)
+	    != 0) {
+
+		/* Nope, BAD Signature */
+
+		return (AE_BAD_SIGNATURE);
+	}
+
+	/* Check the standard checksum */
+
+	if (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) {
+		return (AE_BAD_CHECKSUM);
+	}
+
+	/* Check extended checksum if table version >= 2 */
+
+	if ((rsdp->revision >= 2) &&
+	    (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) {
+		return (AE_BAD_CHECKSUM);
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_find_root_pointer
+ *
+ * PARAMETERS:  table_address           - Where the table pointer is returned
+ *
+ * RETURN:      Status, RSDP physical address
+ *
+ * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
+ *              pointer structure.  If it is found, set *RSDP to point to it.
+ *
+ * NOTE1:       The RSDP must be either in the first 1_k of the Extended
+ *              BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
+ *              Only a 32-bit physical address is necessary.
+ *
+ * NOTE2:       This function is always available, regardless of the
+ *              initialization state of the rest of ACPI.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_find_root_pointer(acpi_size *table_address)
+{
+	u8 *table_ptr;
+	u8 *mem_rover;
+	u32 physical_address;
+
+	ACPI_FUNCTION_TRACE(acpi_find_root_pointer);
+
+	/* 1a) Get the location of the Extended BIOS Data Area (EBDA) */
+
+	table_ptr = acpi_os_map_memory((acpi_physical_address)
+				       ACPI_EBDA_PTR_LOCATION,
+				       ACPI_EBDA_PTR_LENGTH);
+	if (!table_ptr) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not map memory at %8.8X for length %X",
+			    ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
+
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	ACPI_MOVE_16_TO_32(&physical_address, table_ptr);
+
+	/* Convert segment part to physical address */
+
+	physical_address <<= 4;
+	acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH);
+
+	/* EBDA present? */
+
+	if (physical_address > 0x400) {
+		/*
+		 * 1b) Search EBDA paragraphs (EBDA is required to be a
+		 *     minimum of 1_k length)
+		 */
+		table_ptr = acpi_os_map_memory((acpi_physical_address)
+					       physical_address,
+					       ACPI_EBDA_WINDOW_SIZE);
+		if (!table_ptr) {
+			ACPI_ERROR((AE_INFO,
+				    "Could not map memory at %8.8X for length %X",
+				    physical_address, ACPI_EBDA_WINDOW_SIZE));
+
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+
+		mem_rover =
+		    acpi_tb_scan_memory_for_rsdp(table_ptr,
+						 ACPI_EBDA_WINDOW_SIZE);
+		acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE);
+
+		if (mem_rover) {
+
+			/* Return the physical address */
+
+			physical_address +=
+			    (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
+
+			*table_address = physical_address;
+			return_ACPI_STATUS(AE_OK);
+		}
+	}
+
+	/*
+	 * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh
+	 */
+	table_ptr = acpi_os_map_memory((acpi_physical_address)
+				       ACPI_HI_RSDP_WINDOW_BASE,
+				       ACPI_HI_RSDP_WINDOW_SIZE);
+
+	if (!table_ptr) {
+		ACPI_ERROR((AE_INFO,
+			    "Could not map memory at %8.8X for length %X",
+			    ACPI_HI_RSDP_WINDOW_BASE,
+			    ACPI_HI_RSDP_WINDOW_SIZE));
+
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	mem_rover =
+	    acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
+	acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
+
+	if (mem_rover) {
+
+		/* Return the physical address */
+
+		physical_address = (u32)
+		    (ACPI_HI_RSDP_WINDOW_BASE +
+		     ACPI_PTR_DIFF(mem_rover, table_ptr));
+
+		*table_address = physical_address;
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* A valid RSDP was not found */
+
+	ACPI_ERROR((AE_INFO, "A valid RSDP was not found"));
+	return_ACPI_STATUS(AE_NOT_FOUND);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_scan_memory_for_rsdp
+ *
+ * PARAMETERS:  start_address       - Starting pointer for search
+ *              Length              - Maximum length to search
+ *
+ * RETURN:      Pointer to the RSDP if found, otherwise NULL.
+ *
+ * DESCRIPTION: Search a block of memory for the RSDP signature
+ *
+ ******************************************************************************/
+static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
+{
+	acpi_status status;
+	u8 *mem_rover;
+	u8 *end_address;
+
+	ACPI_FUNCTION_TRACE(tb_scan_memory_for_rsdp);
+
+	end_address = start_address + length;
+
+	/* Search from given start address for the requested length */
+
+	for (mem_rover = start_address; mem_rover < end_address;
+	     mem_rover += ACPI_RSDP_SCAN_STEP) {
+
+		/* The RSDP signature and checksum must both be correct */
+
+		status =
+		    acpi_tb_validate_rsdp(ACPI_CAST_PTR
+					  (struct acpi_table_rsdp, mem_rover));
+		if (ACPI_SUCCESS(status)) {
+
+			/* Sig and checksum valid, we have found a real RSDP */
+
+			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+					  "RSDP located at physical address %p\n",
+					  mem_rover));
+			return_PTR(mem_rover);
+		}
+
+		/* No sig match or bad checksum, keep searching */
+	}
+
+	/* Searched entire block, no RSDP was found */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			  "Searched entire block from %p, valid RSDP was not found\n",
+			  start_address));
+	return_PTR(NULL);
+}
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
new file mode 100644
index 0000000..7580f6b
--- /dev/null
+++ b/drivers/acpi/acpica/utalloc.c
@@ -0,0 +1,383 @@
+/******************************************************************************
+ *
+ * Module Name: utalloc - local memory allocation routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acdebug.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utalloc")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_caches
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create all local caches
+ *
+ ******************************************************************************/
+acpi_status acpi_ut_create_caches(void)
+{
+	acpi_status status;
+
+	/* Object Caches, for frequently used objects */
+
+	status =
+	    acpi_os_create_cache("Acpi-Namespace",
+				 sizeof(struct acpi_namespace_node),
+				 ACPI_MAX_NAMESPACE_CACHE_DEPTH,
+				 &acpi_gbl_namespace_cache);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	status =
+	    acpi_os_create_cache("Acpi-State", sizeof(union acpi_generic_state),
+				 ACPI_MAX_STATE_CACHE_DEPTH,
+				 &acpi_gbl_state_cache);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	status =
+	    acpi_os_create_cache("Acpi-Parse",
+				 sizeof(struct acpi_parse_obj_common),
+				 ACPI_MAX_PARSE_CACHE_DEPTH,
+				 &acpi_gbl_ps_node_cache);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	status =
+	    acpi_os_create_cache("Acpi-ParseExt",
+				 sizeof(struct acpi_parse_obj_named),
+				 ACPI_MAX_EXTPARSE_CACHE_DEPTH,
+				 &acpi_gbl_ps_node_ext_cache);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	status =
+	    acpi_os_create_cache("Acpi-Operand",
+				 sizeof(union acpi_operand_object),
+				 ACPI_MAX_OBJECT_CACHE_DEPTH,
+				 &acpi_gbl_operand_cache);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+	/* Memory allocation lists */
+
+	status = acpi_ut_create_list("Acpi-Global", 0, &acpi_gbl_global_list);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	status =
+	    acpi_ut_create_list("Acpi-Namespace",
+				sizeof(struct acpi_namespace_node),
+				&acpi_gbl_ns_node_list);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+#endif
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_delete_caches
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Purge and delete all local caches
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_delete_caches(void)
+{
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+	char buffer[7];
+
+	if (acpi_gbl_display_final_mem_stats) {
+		ACPI_STRCPY(buffer, "MEMORY");
+		(void)acpi_db_display_statistics(buffer);
+	}
+#endif
+
+	(void)acpi_os_delete_cache(acpi_gbl_namespace_cache);
+	acpi_gbl_namespace_cache = NULL;
+
+	(void)acpi_os_delete_cache(acpi_gbl_state_cache);
+	acpi_gbl_state_cache = NULL;
+
+	(void)acpi_os_delete_cache(acpi_gbl_operand_cache);
+	acpi_gbl_operand_cache = NULL;
+
+	(void)acpi_os_delete_cache(acpi_gbl_ps_node_cache);
+	acpi_gbl_ps_node_cache = NULL;
+
+	(void)acpi_os_delete_cache(acpi_gbl_ps_node_ext_cache);
+	acpi_gbl_ps_node_ext_cache = NULL;
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+	/* Debug only - display leftover memory allocation, if any */
+
+	acpi_ut_dump_allocations(ACPI_UINT32_MAX, NULL);
+
+	/* Free memory lists */
+
+	ACPI_FREE(acpi_gbl_global_list);
+	acpi_gbl_global_list = NULL;
+
+	ACPI_FREE(acpi_gbl_ns_node_list);
+	acpi_gbl_ns_node_list = NULL;
+#endif
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_validate_buffer
+ *
+ * PARAMETERS:  Buffer              - Buffer descriptor to be validated
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Perform parameter validation checks on an struct acpi_buffer
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
+{
+
+	/* Obviously, the structure pointer must be valid */
+
+	if (!buffer) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/* Special semantics for the length */
+
+	if ((buffer->length == ACPI_NO_BUFFER) ||
+	    (buffer->length == ACPI_ALLOCATE_BUFFER) ||
+	    (buffer->length == ACPI_ALLOCATE_LOCAL_BUFFER)) {
+		return (AE_OK);
+	}
+
+	/* Length is valid, the buffer pointer must be also */
+
+	if (!buffer->pointer) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_initialize_buffer
+ *
+ * PARAMETERS:  Buffer              - Buffer to be validated
+ *              required_length     - Length needed
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Validate that the buffer is of the required length or
+ *              allocate a new buffer. Returned buffer is always zeroed.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
+			  acpi_size required_length)
+{
+	acpi_size input_buffer_length;
+
+	/* Parameter validation */
+
+	if (!buffer || !required_length) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Buffer->Length is used as both an input and output parameter. Get the
+	 * input actual length and set the output required buffer length.
+	 */
+	input_buffer_length = buffer->length;
+	buffer->length = required_length;
+
+	/*
+	 * The input buffer length contains the actual buffer length, or the type
+	 * of buffer to be allocated by this routine.
+	 */
+	switch (input_buffer_length) {
+	case ACPI_NO_BUFFER:
+
+		/* Return the exception (and the required buffer length) */
+
+		return (AE_BUFFER_OVERFLOW);
+
+	case ACPI_ALLOCATE_BUFFER:
+
+		/* Allocate a new buffer */
+
+		buffer->pointer = acpi_os_allocate(required_length);
+		break;
+
+	case ACPI_ALLOCATE_LOCAL_BUFFER:
+
+		/* Allocate a new buffer with local interface to allow tracking */
+
+		buffer->pointer = ACPI_ALLOCATE(required_length);
+		break;
+
+	default:
+
+		/* Existing buffer: Validate the size of the buffer */
+
+		if (input_buffer_length < required_length) {
+			return (AE_BUFFER_OVERFLOW);
+		}
+		break;
+	}
+
+	/* Validate allocation from above or input buffer pointer */
+
+	if (!buffer->pointer) {
+		return (AE_NO_MEMORY);
+	}
+
+	/* Have a valid buffer, clear it */
+
+	ACPI_MEMSET(buffer->pointer, 0, required_length);
+	return (AE_OK);
+}
+
+#ifdef NOT_USED_BY_LINUX
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_allocate
+ *
+ * PARAMETERS:  Size                - Size of the allocation
+ *              Component           - Component type of caller
+ *              Module              - Source file name of caller
+ *              Line                - Line number of caller
+ *
+ * RETURN:      Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: Subsystem equivalent of malloc.
+ *
+ ******************************************************************************/
+
+void *acpi_ut_allocate(acpi_size size,
+		       u32 component, const char *module, u32 line)
+{
+	void *allocation;
+
+	ACPI_FUNCTION_TRACE_U32(ut_allocate, size);
+
+	/* Check for an inadvertent size of zero bytes */
+
+	if (!size) {
+		ACPI_WARNING((module, line,
+			      "Attempt to allocate zero bytes, allocating 1 byte"));
+		size = 1;
+	}
+
+	allocation = acpi_os_allocate(size);
+	if (!allocation) {
+
+		/* Report allocation error */
+
+		ACPI_WARNING((module, line,
+			      "Could not allocate size %X", (u32) size));
+
+		return_PTR(NULL);
+	}
+
+	return_PTR(allocation);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_allocate_zeroed
+ *
+ * PARAMETERS:  Size                - Size of the allocation
+ *              Component           - Component type of caller
+ *              Module              - Source file name of caller
+ *              Line                - Line number of caller
+ *
+ * RETURN:      Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
+ *
+ ******************************************************************************/
+
+void *acpi_ut_allocate_zeroed(acpi_size size,
+			      u32 component, const char *module, u32 line)
+{
+	void *allocation;
+
+	ACPI_FUNCTION_ENTRY();
+
+	allocation = acpi_ut_allocate(size, component, module, line);
+	if (allocation) {
+
+		/* Clear the memory block */
+
+		ACPI_MEMSET(allocation, 0, size);
+	}
+
+	return (allocation);
+}
+#endif
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
new file mode 100644
index 0000000..b0dcfd3
--- /dev/null
+++ b/drivers/acpi/acpica/utcopy.c
@@ -0,0 +1,970 @@
+/******************************************************************************
+ *
+ * Module Name: utcopy - Internal to external object translation utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utcopy")
+
+/* Local prototypes */
+static acpi_status
+acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
+				union acpi_object *external_object,
+				u8 * data_space, acpi_size * buffer_space_used);
+
+static acpi_status
+acpi_ut_copy_ielement_to_ielement(u8 object_type,
+				  union acpi_operand_object *source_object,
+				  union acpi_generic_state *state,
+				  void *context);
+
+static acpi_status
+acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
+				  u8 * buffer, acpi_size * space_used);
+
+static acpi_status
+acpi_ut_copy_esimple_to_isimple(union acpi_object *user_obj,
+				union acpi_operand_object **return_obj);
+
+static acpi_status
+acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
+				  union acpi_operand_object **internal_object);
+
+static acpi_status
+acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
+			   union acpi_operand_object *dest_desc);
+
+static acpi_status
+acpi_ut_copy_ielement_to_eelement(u8 object_type,
+				  union acpi_operand_object *source_object,
+				  union acpi_generic_state *state,
+				  void *context);
+
+static acpi_status
+acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
+				  union acpi_operand_object *dest_obj,
+				  struct acpi_walk_state *walk_state);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_isimple_to_esimple
+ *
+ * PARAMETERS:  internal_object     - Source object to be copied
+ *              external_object     - Where to return the copied object
+ *              data_space          - Where object data is returned (such as
+ *                                    buffer and string data)
+ *              buffer_space_used   - Length of data_space that was used
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to copy a simple internal object to
+ *              an external object.
+ *
+ *              The data_space buffer is assumed to have sufficient space for
+ *              the object.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
+				union acpi_object *external_object,
+				u8 * data_space, acpi_size * buffer_space_used)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ut_copy_isimple_to_esimple);
+
+	*buffer_space_used = 0;
+
+	/*
+	 * Check for NULL object case (could be an uninitialized
+	 * package element)
+	 */
+	if (!internal_object) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Always clear the external object */
+
+	ACPI_MEMSET(external_object, 0, sizeof(union acpi_object));
+
+	/*
+	 * In general, the external object will be the same type as
+	 * the internal object
+	 */
+	external_object->type = ACPI_GET_OBJECT_TYPE(internal_object);
+
+	/* However, only a limited number of external types are supported */
+
+	switch (ACPI_GET_OBJECT_TYPE(internal_object)) {
+	case ACPI_TYPE_STRING:
+
+		external_object->string.pointer = (char *)data_space;
+		external_object->string.length = internal_object->string.length;
+		*buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size)
+								  internal_object->
+								  string.
+								  length + 1);
+
+		ACPI_MEMCPY((void *)data_space,
+			    (void *)internal_object->string.pointer,
+			    (acpi_size) internal_object->string.length + 1);
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		external_object->buffer.pointer = data_space;
+		external_object->buffer.length = internal_object->buffer.length;
+		*buffer_space_used =
+		    ACPI_ROUND_UP_TO_NATIVE_WORD(internal_object->string.
+						 length);
+
+		ACPI_MEMCPY((void *)data_space,
+			    (void *)internal_object->buffer.pointer,
+			    internal_object->buffer.length);
+		break;
+
+	case ACPI_TYPE_INTEGER:
+
+		external_object->integer.value = internal_object->integer.value;
+		break;
+
+	case ACPI_TYPE_LOCAL_REFERENCE:
+
+		/* This is an object reference. */
+
+		switch (internal_object->reference.class) {
+		case ACPI_REFCLASS_NAME:
+
+			/*
+			 * For namepath, return the object handle ("reference")
+			 * We are referring to the namespace node
+			 */
+			external_object->reference.handle =
+			    internal_object->reference.node;
+			external_object->reference.actual_type =
+			    acpi_ns_get_type(internal_object->reference.node);
+			break;
+
+		default:
+
+			/* All other reference types are unsupported */
+
+			return_ACPI_STATUS(AE_TYPE);
+		}
+		break;
+
+	case ACPI_TYPE_PROCESSOR:
+
+		external_object->processor.proc_id =
+		    internal_object->processor.proc_id;
+		external_object->processor.pblk_address =
+		    internal_object->processor.address;
+		external_object->processor.pblk_length =
+		    internal_object->processor.length;
+		break;
+
+	case ACPI_TYPE_POWER:
+
+		external_object->power_resource.system_level =
+		    internal_object->power_resource.system_level;
+
+		external_object->power_resource.resource_order =
+		    internal_object->power_resource.resource_order;
+		break;
+
+	default:
+		/*
+		 * There is no corresponding external object type
+		 */
+		ACPI_ERROR((AE_INFO,
+			    "Unsupported object type, cannot convert to external object: %s",
+			    acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE
+						  (internal_object))));
+
+		return_ACPI_STATUS(AE_SUPPORT);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_ielement_to_eelement
+ *
+ * PARAMETERS:  acpi_pkg_callback
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Copy one package element to another package element
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_ielement_to_eelement(u8 object_type,
+				  union acpi_operand_object *source_object,
+				  union acpi_generic_state *state,
+				  void *context)
+{
+	acpi_status status = AE_OK;
+	struct acpi_pkg_info *info = (struct acpi_pkg_info *)context;
+	acpi_size object_space;
+	u32 this_index;
+	union acpi_object *target_object;
+
+	ACPI_FUNCTION_ENTRY();
+
+	this_index = state->pkg.index;
+	target_object = (union acpi_object *)
+	    &((union acpi_object *)(state->pkg.dest_object))->package.
+	    elements[this_index];
+
+	switch (object_type) {
+	case ACPI_COPY_TYPE_SIMPLE:
+
+		/*
+		 * This is a simple or null object
+		 */
+		status = acpi_ut_copy_isimple_to_esimple(source_object,
+							 target_object,
+							 info->free_space,
+							 &object_space);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+		break;
+
+	case ACPI_COPY_TYPE_PACKAGE:
+
+		/*
+		 * Build the package object
+		 */
+		target_object->type = ACPI_TYPE_PACKAGE;
+		target_object->package.count = source_object->package.count;
+		target_object->package.elements =
+		    ACPI_CAST_PTR(union acpi_object, info->free_space);
+
+		/*
+		 * Pass the new package object back to the package walk routine
+		 */
+		state->pkg.this_target_obj = target_object;
+
+		/*
+		 * Save space for the array of objects (Package elements)
+		 * update the buffer length counter
+		 */
+		object_space = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size)
+							    target_object->
+							    package.count *
+							    sizeof(union
+								   acpi_object));
+		break;
+
+	default:
+		return (AE_BAD_PARAMETER);
+	}
+
+	info->free_space += object_space;
+	info->length += object_space;
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_ipackage_to_epackage
+ *
+ * PARAMETERS:  internal_object     - Pointer to the object we are returning
+ *              Buffer              - Where the object is returned
+ *              space_used          - Where the object length is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to place a package object in a user
+ *              buffer.  A package object by definition contains other objects.
+ *
+ *              The buffer is assumed to have sufficient space for the object.
+ *              The caller must have verified the buffer length needed using the
+ *              acpi_ut_get_object_size function before calling this function.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
+				  u8 * buffer, acpi_size * space_used)
+{
+	union acpi_object *external_object;
+	acpi_status status;
+	struct acpi_pkg_info info;
+
+	ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_epackage);
+
+	/*
+	 * First package at head of the buffer
+	 */
+	external_object = ACPI_CAST_PTR(union acpi_object, buffer);
+
+	/*
+	 * Free space begins right after the first package
+	 */
+	info.length = ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
+	info.free_space =
+	    buffer + ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
+	info.object_space = 0;
+	info.num_packages = 1;
+
+	external_object->type = ACPI_GET_OBJECT_TYPE(internal_object);
+	external_object->package.count = internal_object->package.count;
+	external_object->package.elements = ACPI_CAST_PTR(union acpi_object,
+							  info.free_space);
+
+	/*
+	 * Leave room for an array of ACPI_OBJECTS in the buffer
+	 * and move the free space past it
+	 */
+	info.length += (acpi_size) external_object->package.count *
+	    ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
+	info.free_space += external_object->package.count *
+	    ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
+
+	status = acpi_ut_walk_package_tree(internal_object, external_object,
+					   acpi_ut_copy_ielement_to_eelement,
+					   &info);
+
+	*space_used = info.length;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_iobject_to_eobject
+ *
+ * PARAMETERS:  internal_object     - The internal object to be converted
+ *              buffer_ptr          - Where the object is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to build an API object to be returned to
+ *              the caller.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *internal_object,
+				struct acpi_buffer *ret_buffer)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ut_copy_iobject_to_eobject);
+
+	if (ACPI_GET_OBJECT_TYPE(internal_object) == ACPI_TYPE_PACKAGE) {
+		/*
+		 * Package object:  Copy all subobjects (including
+		 * nested packages)
+		 */
+		status = acpi_ut_copy_ipackage_to_epackage(internal_object,
+							   ret_buffer->pointer,
+							   &ret_buffer->length);
+	} else {
+		/*
+		 * Build a simple object (no nested objects)
+		 */
+		status = acpi_ut_copy_isimple_to_esimple(internal_object,
+							 ACPI_CAST_PTR(union
+								       acpi_object,
+								       ret_buffer->
+								       pointer),
+							 ACPI_ADD_PTR(u8,
+								      ret_buffer->
+								      pointer,
+								      ACPI_ROUND_UP_TO_NATIVE_WORD
+								      (sizeof
+								       (union
+									acpi_object))),
+							 &ret_buffer->length);
+		/*
+		 * build simple does not include the object size in the length
+		 * so we add it in here
+		 */
+		ret_buffer->length += sizeof(union acpi_object);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_esimple_to_isimple
+ *
+ * PARAMETERS:  external_object     - The external object to be converted
+ *              ret_internal_object - Where the internal object is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function copies an external object to an internal one.
+ *              NOTE: Pointers can be copied, we don't need to copy data.
+ *              (The pointers have to be valid in our address space no matter
+ *              what we do with them!)
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
+				union acpi_operand_object **ret_internal_object)
+{
+	union acpi_operand_object *internal_object;
+
+	ACPI_FUNCTION_TRACE(ut_copy_esimple_to_isimple);
+
+	/*
+	 * Simple types supported are: String, Buffer, Integer
+	 */
+	switch (external_object->type) {
+	case ACPI_TYPE_STRING:
+	case ACPI_TYPE_BUFFER:
+	case ACPI_TYPE_INTEGER:
+	case ACPI_TYPE_LOCAL_REFERENCE:
+
+		internal_object = acpi_ut_create_internal_object((u8)
+								 external_object->
+								 type);
+		if (!internal_object) {
+			return_ACPI_STATUS(AE_NO_MEMORY);
+		}
+		break;
+
+	case ACPI_TYPE_ANY:	/* This is the case for a NULL object */
+
+		*ret_internal_object = NULL;
+		return_ACPI_STATUS(AE_OK);
+
+	default:
+		/* All other types are not supported */
+
+		ACPI_ERROR((AE_INFO,
+			    "Unsupported object type, cannot convert to internal object: %s",
+			    acpi_ut_get_type_name(external_object->type)));
+
+		return_ACPI_STATUS(AE_SUPPORT);
+	}
+
+	/* Must COPY string and buffer contents */
+
+	switch (external_object->type) {
+	case ACPI_TYPE_STRING:
+
+		internal_object->string.pointer =
+		    ACPI_ALLOCATE_ZEROED((acpi_size) external_object->string.
+					 length + 1);
+		if (!internal_object->string.pointer) {
+			goto error_exit;
+		}
+
+		ACPI_MEMCPY(internal_object->string.pointer,
+			    external_object->string.pointer,
+			    external_object->string.length);
+
+		internal_object->string.length = external_object->string.length;
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		internal_object->buffer.pointer =
+		    ACPI_ALLOCATE_ZEROED(external_object->buffer.length);
+		if (!internal_object->buffer.pointer) {
+			goto error_exit;
+		}
+
+		ACPI_MEMCPY(internal_object->buffer.pointer,
+			    external_object->buffer.pointer,
+			    external_object->buffer.length);
+
+		internal_object->buffer.length = external_object->buffer.length;
+
+		/* Mark buffer data valid */
+
+		internal_object->buffer.flags |= AOPOBJ_DATA_VALID;
+		break;
+
+	case ACPI_TYPE_INTEGER:
+
+		internal_object->integer.value = external_object->integer.value;
+		break;
+
+	case ACPI_TYPE_LOCAL_REFERENCE:
+
+		/* TBD: should validate incoming handle */
+
+		internal_object->reference.class = ACPI_REFCLASS_NAME;
+		internal_object->reference.node =
+		    external_object->reference.handle;
+		break;
+
+	default:
+		/* Other types can't get here */
+		break;
+	}
+
+	*ret_internal_object = internal_object;
+	return_ACPI_STATUS(AE_OK);
+
+      error_exit:
+	acpi_ut_remove_reference(internal_object);
+	return_ACPI_STATUS(AE_NO_MEMORY);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_epackage_to_ipackage
+ *
+ * PARAMETERS:  external_object     - The external object to be converted
+ *              internal_object     - Where the internal object is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Copy an external package object to an internal package.
+ *              Handles nested packages.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
+				  union acpi_operand_object **internal_object)
+{
+	acpi_status status = AE_OK;
+	union acpi_operand_object *package_object;
+	union acpi_operand_object **package_elements;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage);
+
+	/* Create the package object */
+
+	package_object =
+	    acpi_ut_create_package_object(external_object->package.count);
+	if (!package_object) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	package_elements = package_object->package.elements;
+
+	/*
+	 * Recursive implementation. Probably ok, since nested external packages
+	 * as parameters should be very rare.
+	 */
+	for (i = 0; i < external_object->package.count; i++) {
+		status =
+		    acpi_ut_copy_eobject_to_iobject(&external_object->package.
+						    elements[i],
+						    &package_elements[i]);
+		if (ACPI_FAILURE(status)) {
+
+			/* Truncate package and delete it */
+
+			package_object->package.count = i;
+			package_elements[i] = NULL;
+			acpi_ut_remove_reference(package_object);
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/* Mark package data valid */
+
+	package_object->package.flags |= AOPOBJ_DATA_VALID;
+
+	*internal_object = package_object;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_eobject_to_iobject
+ *
+ * PARAMETERS:  external_object     - The external object to be converted
+ *              internal_object     - Where the internal object is returned
+ *
+ * RETURN:      Status              - the status of the call
+ *
+ * DESCRIPTION: Converts an external object to an internal object.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object,
+				union acpi_operand_object **internal_object)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ut_copy_eobject_to_iobject);
+
+	if (external_object->type == ACPI_TYPE_PACKAGE) {
+		status =
+		    acpi_ut_copy_epackage_to_ipackage(external_object,
+						      internal_object);
+	} else {
+		/*
+		 * Build a simple object (no nested objects)
+		 */
+		status =
+		    acpi_ut_copy_esimple_to_isimple(external_object,
+						    internal_object);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_simple_object
+ *
+ * PARAMETERS:  source_desc         - The internal object to be copied
+ *              dest_desc           - New target object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Simple copy of one internal object to another.  Reference count
+ *              of the destination object is preserved.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
+			   union acpi_operand_object *dest_desc)
+{
+	u16 reference_count;
+	union acpi_operand_object *next_object;
+
+	/* Save fields from destination that we don't want to overwrite */
+
+	reference_count = dest_desc->common.reference_count;
+	next_object = dest_desc->common.next_object;
+
+	/* Copy the entire source object over the destination object */
+
+	ACPI_MEMCPY((char *)dest_desc, (char *)source_desc,
+		    sizeof(union acpi_operand_object));
+
+	/* Restore the saved fields */
+
+	dest_desc->common.reference_count = reference_count;
+	dest_desc->common.next_object = next_object;
+
+	/* New object is not static, regardless of source */
+
+	dest_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
+
+	/* Handle the objects with extra data */
+
+	switch (ACPI_GET_OBJECT_TYPE(dest_desc)) {
+	case ACPI_TYPE_BUFFER:
+		/*
+		 * Allocate and copy the actual buffer if and only if:
+		 * 1) There is a valid buffer pointer
+		 * 2) The buffer has a length > 0
+		 */
+		if ((source_desc->buffer.pointer) &&
+		    (source_desc->buffer.length)) {
+			dest_desc->buffer.pointer =
+			    ACPI_ALLOCATE(source_desc->buffer.length);
+			if (!dest_desc->buffer.pointer) {
+				return (AE_NO_MEMORY);
+			}
+
+			/* Copy the actual buffer data */
+
+			ACPI_MEMCPY(dest_desc->buffer.pointer,
+				    source_desc->buffer.pointer,
+				    source_desc->buffer.length);
+		}
+		break;
+
+	case ACPI_TYPE_STRING:
+		/*
+		 * Allocate and copy the actual string if and only if:
+		 * 1) There is a valid string pointer
+		 * (Pointer to a NULL string is allowed)
+		 */
+		if (source_desc->string.pointer) {
+			dest_desc->string.pointer =
+			    ACPI_ALLOCATE((acpi_size) source_desc->string.
+					  length + 1);
+			if (!dest_desc->string.pointer) {
+				return (AE_NO_MEMORY);
+			}
+
+			/* Copy the actual string data */
+
+			ACPI_MEMCPY(dest_desc->string.pointer,
+				    source_desc->string.pointer,
+				    (acpi_size) source_desc->string.length + 1);
+		}
+		break;
+
+	case ACPI_TYPE_LOCAL_REFERENCE:
+		/*
+		 * We copied the reference object, so we now must add a reference
+		 * to the object pointed to by the reference
+		 *
+		 * DDBHandle reference (from Load/load_table) is a special reference,
+		 * it does not have a Reference.Object, so does not need to
+		 * increase the reference count
+		 */
+		if (source_desc->reference.class == ACPI_REFCLASS_TABLE) {
+			break;
+		}
+
+		acpi_ut_add_reference(source_desc->reference.object);
+		break;
+
+	case ACPI_TYPE_REGION:
+		/*
+		 * We copied the Region Handler, so we now must add a reference
+		 */
+		if (dest_desc->region.handler) {
+			acpi_ut_add_reference(dest_desc->region.handler);
+		}
+		break;
+
+	default:
+		/* Nothing to do for other simple objects */
+		break;
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_ielement_to_ielement
+ *
+ * PARAMETERS:  acpi_pkg_callback
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Copy one package element to another package element
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_ielement_to_ielement(u8 object_type,
+				  union acpi_operand_object *source_object,
+				  union acpi_generic_state *state,
+				  void *context)
+{
+	acpi_status status = AE_OK;
+	u32 this_index;
+	union acpi_operand_object **this_target_ptr;
+	union acpi_operand_object *target_object;
+
+	ACPI_FUNCTION_ENTRY();
+
+	this_index = state->pkg.index;
+	this_target_ptr = (union acpi_operand_object **)
+	    &state->pkg.dest_object->package.elements[this_index];
+
+	switch (object_type) {
+	case ACPI_COPY_TYPE_SIMPLE:
+
+		/* A null source object indicates a (legal) null package element */
+
+		if (source_object) {
+			/*
+			 * This is a simple object, just copy it
+			 */
+			target_object =
+			    acpi_ut_create_internal_object(ACPI_GET_OBJECT_TYPE
+							   (source_object));
+			if (!target_object) {
+				return (AE_NO_MEMORY);
+			}
+
+			status =
+			    acpi_ut_copy_simple_object(source_object,
+						       target_object);
+			if (ACPI_FAILURE(status)) {
+				goto error_exit;
+			}
+
+			*this_target_ptr = target_object;
+		} else {
+			/* Pass through a null element */
+
+			*this_target_ptr = NULL;
+		}
+		break;
+
+	case ACPI_COPY_TYPE_PACKAGE:
+
+		/*
+		 * This object is a package - go down another nesting level
+		 * Create and build the package object
+		 */
+		target_object =
+		    acpi_ut_create_package_object(source_object->package.count);
+		if (!target_object) {
+			return (AE_NO_MEMORY);
+		}
+
+		target_object->common.flags = source_object->common.flags;
+
+		/* Pass the new package object back to the package walk routine */
+
+		state->pkg.this_target_obj = target_object;
+
+		/* Store the object pointer in the parent package object */
+
+		*this_target_ptr = target_object;
+		break;
+
+	default:
+		return (AE_BAD_PARAMETER);
+	}
+
+	return (status);
+
+      error_exit:
+	acpi_ut_remove_reference(target_object);
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_ipackage_to_ipackage
+ *
+ * PARAMETERS:  *source_obj     - Pointer to the source package object
+ *              *dest_obj       - Where the internal object is returned
+ *
+ * RETURN:      Status          - the status of the call
+ *
+ * DESCRIPTION: This function is called to copy an internal package object
+ *              into another internal package object.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
+				  union acpi_operand_object *dest_obj,
+				  struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_ipackage);
+
+	dest_obj->common.type = ACPI_GET_OBJECT_TYPE(source_obj);
+	dest_obj->common.flags = source_obj->common.flags;
+	dest_obj->package.count = source_obj->package.count;
+
+	/*
+	 * Create the object array and walk the source package tree
+	 */
+	dest_obj->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+							   source_obj->package.
+							   count +
+							   1) * sizeof(void *));
+	if (!dest_obj->package.elements) {
+		ACPI_ERROR((AE_INFO, "Package allocation failure"));
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/*
+	 * Copy the package element-by-element by walking the package "tree".
+	 * This handles nested packages of arbitrary depth.
+	 */
+	status = acpi_ut_walk_package_tree(source_obj, dest_obj,
+					   acpi_ut_copy_ielement_to_ielement,
+					   walk_state);
+	if (ACPI_FAILURE(status)) {
+
+		/* On failure, delete the destination package object */
+
+		acpi_ut_remove_reference(dest_obj);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_iobject_to_iobject
+ *
+ * PARAMETERS:  walk_state          - Current walk state
+ *              source_desc         - The internal object to be copied
+ *              dest_desc           - Where the copied object is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Copy an internal object to a new internal object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
+				union acpi_operand_object **dest_desc,
+				struct acpi_walk_state *walk_state)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(ut_copy_iobject_to_iobject);
+
+	/* Create the top level object */
+
+	*dest_desc =
+	    acpi_ut_create_internal_object(ACPI_GET_OBJECT_TYPE(source_desc));
+	if (!*dest_desc) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Copy the object and possible subobjects */
+
+	if (ACPI_GET_OBJECT_TYPE(source_desc) == ACPI_TYPE_PACKAGE) {
+		status =
+		    acpi_ut_copy_ipackage_to_ipackage(source_desc, *dest_desc,
+						      walk_state);
+	} else {
+		status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
+	}
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
new file mode 100644
index 0000000..38821f5
--- /dev/null
+++ b/drivers/acpi/acpica/utdebug.c
@@ -0,0 +1,651 @@
+/******************************************************************************
+ *
+ * Module Name: utdebug - Debug print routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utdebug")
+#ifdef ACPI_DEBUG_OUTPUT
+static acpi_thread_id acpi_gbl_prev_thread_id;
+static char *acpi_gbl_fn_entry_str = "----Entry";
+static char *acpi_gbl_fn_exit_str = "----Exit-";
+
+/* Local prototypes */
+
+static const char *acpi_ut_trim_function_name(const char *function_name);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_init_stack_ptr_trace
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Save the current CPU stack pointer at subsystem startup
+ *
+ ******************************************************************************/
+
+void acpi_ut_init_stack_ptr_trace(void)
+{
+	acpi_size current_sp;
+
+	acpi_gbl_entry_stack_pointer = &current_sp;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_track_stack_ptr
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Save the current CPU stack pointer
+ *
+ ******************************************************************************/
+
+void acpi_ut_track_stack_ptr(void)
+{
+	acpi_size current_sp;
+
+	if (&current_sp < acpi_gbl_lowest_stack_pointer) {
+		acpi_gbl_lowest_stack_pointer = &current_sp;
+	}
+
+	if (acpi_gbl_nesting_level > acpi_gbl_deepest_nesting) {
+		acpi_gbl_deepest_nesting = acpi_gbl_nesting_level;
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_trim_function_name
+ *
+ * PARAMETERS:  function_name       - Ascii string containing a procedure name
+ *
+ * RETURN:      Updated pointer to the function name
+ *
+ * DESCRIPTION: Remove the "Acpi" prefix from the function name, if present.
+ *              This allows compiler macros such as __func__ to be used
+ *              with no change to the debug output.
+ *
+ ******************************************************************************/
+
+static const char *acpi_ut_trim_function_name(const char *function_name)
+{
+
+	/* All Function names are longer than 4 chars, check is safe */
+
+	if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_MIXED) {
+
+		/* This is the case where the original source has not been modified */
+
+		return (function_name + 4);
+	}
+
+	if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_LOWER) {
+
+		/* This is the case where the source has been 'linuxized' */
+
+		return (function_name + 5);
+	}
+
+	return (function_name);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_debug_print
+ *
+ * PARAMETERS:  requested_debug_level - Requested debug print level
+ *              line_number         - Caller's line number (for error output)
+ *              function_name       - Caller's procedure name
+ *              module_name         - Caller's module name
+ *              component_id        - Caller's component ID
+ *              Format              - Printf format field
+ *              ...                 - Optional printf arguments
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print error message with prefix consisting of the module name,
+ *              line number, and component ID.
+ *
+ ******************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_debug_print(u32 requested_debug_level,
+		 u32 line_number,
+		 const char *function_name,
+		 const char *module_name,
+		 u32 component_id, const char *format, ...)
+{
+	acpi_thread_id thread_id;
+	va_list args;
+
+	/*
+	 * Stay silent if the debug level or component ID is disabled
+	 */
+	if (!(requested_debug_level & acpi_dbg_level) ||
+	    !(component_id & acpi_dbg_layer)) {
+		return;
+	}
+
+	/*
+	 * Thread tracking and context switch notification
+	 */
+	thread_id = acpi_os_get_thread_id();
+	if (thread_id != acpi_gbl_prev_thread_id) {
+		if (ACPI_LV_THREADS & acpi_dbg_level) {
+			acpi_os_printf
+			    ("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
+			     (unsigned long)acpi_gbl_prev_thread_id,
+			     (unsigned long)thread_id);
+		}
+
+		acpi_gbl_prev_thread_id = thread_id;
+	}
+
+	/*
+	 * Display the module name, current line number, thread ID (if requested),
+	 * current procedure nesting level, and the current procedure name
+	 */
+	acpi_os_printf("%8s-%04ld ", module_name, line_number);
+
+	if (ACPI_LV_THREADS & acpi_dbg_level) {
+		acpi_os_printf("[%04lX] ", (unsigned long)thread_id);
+	}
+
+	acpi_os_printf("[%02ld] %-22.22s: ",
+		       acpi_gbl_nesting_level,
+		       acpi_ut_trim_function_name(function_name));
+
+	va_start(args, format);
+	acpi_os_vprintf(format, args);
+	va_end(args);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_debug_print)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_debug_print_raw
+ *
+ * PARAMETERS:  requested_debug_level - Requested debug print level
+ *              line_number         - Caller's line number
+ *              function_name       - Caller's procedure name
+ *              module_name         - Caller's module name
+ *              component_id        - Caller's component ID
+ *              Format              - Printf format field
+ *              ...                 - Optional printf arguments
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print message with no headers.  Has same interface as
+ *              debug_print so that the same macros can be used.
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_debug_print_raw(u32 requested_debug_level,
+		     u32 line_number,
+		     const char *function_name,
+		     const char *module_name,
+		     u32 component_id, const char *format, ...)
+{
+	va_list args;
+
+	if (!(requested_debug_level & acpi_dbg_level) ||
+	    !(component_id & acpi_dbg_layer)) {
+		return;
+	}
+
+	va_start(args, format);
+	acpi_os_vprintf(format, args);
+	va_end(args);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_debug_print_raw)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_trace
+ *
+ * PARAMETERS:  line_number         - Caller's line number
+ *              function_name       - Caller's procedure name
+ *              module_name         - Caller's module name
+ *              component_id        - Caller's component ID
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Function entry trace.  Prints only if TRACE_FUNCTIONS bit is
+ *              set in debug_level
+ *
+ ******************************************************************************/
+void
+acpi_ut_trace(u32 line_number,
+	      const char *function_name,
+	      const char *module_name, u32 component_id)
+{
+
+	acpi_gbl_nesting_level++;
+	acpi_ut_track_stack_ptr();
+
+	acpi_debug_print(ACPI_LV_FUNCTIONS,
+			 line_number, function_name, module_name, component_id,
+			 "%s\n", acpi_gbl_fn_entry_str);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_ut_trace)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_trace_ptr
+ *
+ * PARAMETERS:  line_number         - Caller's line number
+ *              function_name       - Caller's procedure name
+ *              module_name         - Caller's module name
+ *              component_id        - Caller's component ID
+ *              Pointer             - Pointer to display
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Function entry trace.  Prints only if TRACE_FUNCTIONS bit is
+ *              set in debug_level
+ *
+ ******************************************************************************/
+void
+acpi_ut_trace_ptr(u32 line_number,
+		  const char *function_name,
+		  const char *module_name, u32 component_id, void *pointer)
+{
+	acpi_gbl_nesting_level++;
+	acpi_ut_track_stack_ptr();
+
+	acpi_debug_print(ACPI_LV_FUNCTIONS,
+			 line_number, function_name, module_name, component_id,
+			 "%s %p\n", acpi_gbl_fn_entry_str, pointer);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_trace_str
+ *
+ * PARAMETERS:  line_number         - Caller's line number
+ *              function_name       - Caller's procedure name
+ *              module_name         - Caller's module name
+ *              component_id        - Caller's component ID
+ *              String              - Additional string to display
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Function entry trace.  Prints only if TRACE_FUNCTIONS bit is
+ *              set in debug_level
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_trace_str(u32 line_number,
+		  const char *function_name,
+		  const char *module_name, u32 component_id, char *string)
+{
+
+	acpi_gbl_nesting_level++;
+	acpi_ut_track_stack_ptr();
+
+	acpi_debug_print(ACPI_LV_FUNCTIONS,
+			 line_number, function_name, module_name, component_id,
+			 "%s %s\n", acpi_gbl_fn_entry_str, string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_trace_u32
+ *
+ * PARAMETERS:  line_number         - Caller's line number
+ *              function_name       - Caller's procedure name
+ *              module_name         - Caller's module name
+ *              component_id        - Caller's component ID
+ *              Integer             - Integer to display
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Function entry trace.  Prints only if TRACE_FUNCTIONS bit is
+ *              set in debug_level
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_trace_u32(u32 line_number,
+		  const char *function_name,
+		  const char *module_name, u32 component_id, u32 integer)
+{
+
+	acpi_gbl_nesting_level++;
+	acpi_ut_track_stack_ptr();
+
+	acpi_debug_print(ACPI_LV_FUNCTIONS,
+			 line_number, function_name, module_name, component_id,
+			 "%s %08X\n", acpi_gbl_fn_entry_str, integer);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_exit
+ *
+ * PARAMETERS:  line_number         - Caller's line number
+ *              function_name       - Caller's procedure name
+ *              module_name         - Caller's module name
+ *              component_id        - Caller's component ID
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Function exit trace.  Prints only if TRACE_FUNCTIONS bit is
+ *              set in debug_level
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_exit(u32 line_number,
+	     const char *function_name,
+	     const char *module_name, u32 component_id)
+{
+
+	acpi_debug_print(ACPI_LV_FUNCTIONS,
+			 line_number, function_name, module_name, component_id,
+			 "%s\n", acpi_gbl_fn_exit_str);
+
+	acpi_gbl_nesting_level--;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_ut_exit)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_status_exit
+ *
+ * PARAMETERS:  line_number         - Caller's line number
+ *              function_name       - Caller's procedure name
+ *              module_name         - Caller's module name
+ *              component_id        - Caller's component ID
+ *              Status              - Exit status code
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Function exit trace.  Prints only if TRACE_FUNCTIONS bit is
+ *              set in debug_level. Prints exit status also.
+ *
+ ******************************************************************************/
+void
+acpi_ut_status_exit(u32 line_number,
+		    const char *function_name,
+		    const char *module_name,
+		    u32 component_id, acpi_status status)
+{
+
+	if (ACPI_SUCCESS(status)) {
+		acpi_debug_print(ACPI_LV_FUNCTIONS,
+				 line_number, function_name, module_name,
+				 component_id, "%s %s\n", acpi_gbl_fn_exit_str,
+				 acpi_format_exception(status));
+	} else {
+		acpi_debug_print(ACPI_LV_FUNCTIONS,
+				 line_number, function_name, module_name,
+				 component_id, "%s ****Exception****: %s\n",
+				 acpi_gbl_fn_exit_str,
+				 acpi_format_exception(status));
+	}
+
+	acpi_gbl_nesting_level--;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_value_exit
+ *
+ * PARAMETERS:  line_number         - Caller's line number
+ *              function_name       - Caller's procedure name
+ *              module_name         - Caller's module name
+ *              component_id        - Caller's component ID
+ *              Value               - Value to be printed with exit msg
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Function exit trace.  Prints only if TRACE_FUNCTIONS bit is
+ *              set in debug_level. Prints exit value also.
+ *
+ ******************************************************************************/
+void
+acpi_ut_value_exit(u32 line_number,
+		   const char *function_name,
+		   const char *module_name,
+		   u32 component_id, acpi_integer value)
+{
+
+	acpi_debug_print(ACPI_LV_FUNCTIONS,
+			 line_number, function_name, module_name, component_id,
+			 "%s %8.8X%8.8X\n", acpi_gbl_fn_exit_str,
+			 ACPI_FORMAT_UINT64(value));
+
+	acpi_gbl_nesting_level--;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_ptr_exit
+ *
+ * PARAMETERS:  line_number         - Caller's line number
+ *              function_name       - Caller's procedure name
+ *              module_name         - Caller's module name
+ *              component_id        - Caller's component ID
+ *              Ptr                 - Pointer to display
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Function exit trace.  Prints only if TRACE_FUNCTIONS bit is
+ *              set in debug_level. Prints exit value also.
+ *
+ ******************************************************************************/
+void
+acpi_ut_ptr_exit(u32 line_number,
+		 const char *function_name,
+		 const char *module_name, u32 component_id, u8 *ptr)
+{
+
+	acpi_debug_print(ACPI_LV_FUNCTIONS,
+			 line_number, function_name, module_name, component_id,
+			 "%s %p\n", acpi_gbl_fn_exit_str, ptr);
+
+	acpi_gbl_nesting_level--;
+}
+
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_dump_buffer
+ *
+ * PARAMETERS:  Buffer              - Buffer to dump
+ *              Count               - Amount to dump, in bytes
+ *              Display             - BYTE, WORD, DWORD, or QWORD display
+ *              component_iD        - Caller's component ID
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Generic dump buffer in both hex and ascii.
+ *
+ ******************************************************************************/
+
+void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
+{
+	u32 i = 0;
+	u32 j;
+	u32 temp32;
+	u8 buf_char;
+
+	if (!buffer) {
+		acpi_os_printf("Null Buffer Pointer in DumpBuffer!\n");
+		return;
+	}
+
+	if ((count < 4) || (count & 0x01)) {
+		display = DB_BYTE_DISPLAY;
+	}
+
+	/* Nasty little dump buffer routine! */
+
+	while (i < count) {
+
+		/* Print current offset */
+
+		acpi_os_printf("%6.4X: ", i);
+
+		/* Print 16 hex chars */
+
+		for (j = 0; j < 16;) {
+			if (i + j >= count) {
+
+				/* Dump fill spaces */
+
+				acpi_os_printf("%*s", ((display * 2) + 1), " ");
+				j += display;
+				continue;
+			}
+
+			switch (display) {
+			case DB_BYTE_DISPLAY:
+			default:	/* Default is BYTE display */
+
+				acpi_os_printf("%02X ",
+					       buffer[(acpi_size) i + j]);
+				break;
+
+			case DB_WORD_DISPLAY:
+
+				ACPI_MOVE_16_TO_32(&temp32,
+						   &buffer[(acpi_size) i + j]);
+				acpi_os_printf("%04X ", temp32);
+				break;
+
+			case DB_DWORD_DISPLAY:
+
+				ACPI_MOVE_32_TO_32(&temp32,
+						   &buffer[(acpi_size) i + j]);
+				acpi_os_printf("%08X ", temp32);
+				break;
+
+			case DB_QWORD_DISPLAY:
+
+				ACPI_MOVE_32_TO_32(&temp32,
+						   &buffer[(acpi_size) i + j]);
+				acpi_os_printf("%08X", temp32);
+
+				ACPI_MOVE_32_TO_32(&temp32,
+						   &buffer[(acpi_size) i + j +
+							   4]);
+				acpi_os_printf("%08X ", temp32);
+				break;
+			}
+
+			j += display;
+		}
+
+		/*
+		 * Print the ASCII equivalent characters but watch out for the bad
+		 * unprintable ones (printable chars are 0x20 through 0x7E)
+		 */
+		acpi_os_printf(" ");
+		for (j = 0; j < 16; j++) {
+			if (i + j >= count) {
+				acpi_os_printf("\n");
+				return;
+			}
+
+			buf_char = buffer[(acpi_size) i + j];
+			if (ACPI_IS_PRINT(buf_char)) {
+				acpi_os_printf("%c", buf_char);
+			} else {
+				acpi_os_printf(".");
+			}
+		}
+
+		/* Done with that line. */
+
+		acpi_os_printf("\n");
+		i += 16;
+	}
+
+	return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_dump_buffer
+ *
+ * PARAMETERS:  Buffer              - Buffer to dump
+ *              Count               - Amount to dump, in bytes
+ *              Display             - BYTE, WORD, DWORD, or QWORD display
+ *              component_iD        - Caller's component ID
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Generic dump buffer in both hex and ascii.
+ *
+ ******************************************************************************/
+
+void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
+{
+
+	/* Only dump the buffer if tracing is enabled */
+
+	if (!((ACPI_LV_TABLES & acpi_dbg_level) &&
+	      (component_id & acpi_dbg_layer))) {
+		return;
+	}
+
+	acpi_ut_dump_buffer2(buffer, count, display);
+}
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
new file mode 100644
index 0000000..a0be9e3
--- /dev/null
+++ b/drivers/acpi/acpica/utdelete.c
@@ -0,0 +1,677 @@
+/*******************************************************************************
+ *
+ * Module Name: utdelete - object deletion and reference count utilities
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "acnamesp.h"
+#include "acevents.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utdelete")
+
+/* Local prototypes */
+static void acpi_ut_delete_internal_obj(union acpi_operand_object *object);
+
+static void
+acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_delete_internal_obj
+ *
+ * PARAMETERS:  Object         - Object to be deleted
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Low level object deletion, after reference counts have been
+ *              updated (All reference counts, including sub-objects!)
+ *
+ ******************************************************************************/
+
+static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
+{
+	void *obj_pointer = NULL;
+	union acpi_operand_object *handler_desc;
+	union acpi_operand_object *second_desc;
+	union acpi_operand_object *next_desc;
+
+	ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
+
+	if (!object) {
+		return_VOID;
+	}
+
+	/*
+	 * Must delete or free any pointers within the object that are not
+	 * actual ACPI objects (for example, a raw buffer pointer).
+	 */
+	switch (ACPI_GET_OBJECT_TYPE(object)) {
+	case ACPI_TYPE_STRING:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "**** String %p, ptr %p\n", object,
+				  object->string.pointer));
+
+		/* Free the actual string buffer */
+
+		if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
+
+			/* But only if it is NOT a pointer into an ACPI table */
+
+			obj_pointer = object->string.pointer;
+		}
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "**** Buffer %p, ptr %p\n", object,
+				  object->buffer.pointer));
+
+		/* Free the actual buffer */
+
+		if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
+
+			/* But only if it is NOT a pointer into an ACPI table */
+
+			obj_pointer = object->buffer.pointer;
+		}
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  " **** Package of count %X\n",
+				  object->package.count));
+
+		/*
+		 * Elements of the package are not handled here, they are deleted
+		 * separately
+		 */
+
+		/* Free the (variable length) element pointer array */
+
+		obj_pointer = object->package.elements;
+		break;
+
+		/*
+		 * These objects have a possible list of notify handlers.
+		 * Device object also may have a GPE block.
+		 */
+	case ACPI_TYPE_DEVICE:
+
+		if (object->device.gpe_block) {
+			(void)acpi_ev_delete_gpe_block(object->device.
+						       gpe_block);
+		}
+
+		/*lint -fallthrough */
+
+	case ACPI_TYPE_PROCESSOR:
+	case ACPI_TYPE_THERMAL:
+
+		/* Walk the notify handler list for this object */
+
+		handler_desc = object->common_notify.handler;
+		while (handler_desc) {
+			next_desc = handler_desc->address_space.next;
+			acpi_ut_remove_reference(handler_desc);
+			handler_desc = next_desc;
+		}
+		break;
+
+	case ACPI_TYPE_MUTEX:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "***** Mutex %p, OS Mutex %p\n",
+				  object, object->mutex.os_mutex));
+
+		if (object == acpi_gbl_global_lock_mutex) {
+
+			/* Global Lock has extra semaphore */
+
+			(void)
+			    acpi_os_delete_semaphore
+			    (acpi_gbl_global_lock_semaphore);
+			acpi_gbl_global_lock_semaphore = NULL;
+
+			acpi_os_delete_mutex(object->mutex.os_mutex);
+			acpi_gbl_global_lock_mutex = NULL;
+		} else {
+			acpi_ex_unlink_mutex(object);
+			acpi_os_delete_mutex(object->mutex.os_mutex);
+		}
+		break;
+
+	case ACPI_TYPE_EVENT:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "***** Event %p, OS Semaphore %p\n",
+				  object, object->event.os_semaphore));
+
+		(void)acpi_os_delete_semaphore(object->event.os_semaphore);
+		object->event.os_semaphore = NULL;
+		break;
+
+	case ACPI_TYPE_METHOD:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "***** Method %p\n", object));
+
+		/* Delete the method mutex if it exists */
+
+		if (object->method.mutex) {
+			acpi_os_delete_mutex(object->method.mutex->mutex.
+					     os_mutex);
+			acpi_ut_delete_object_desc(object->method.mutex);
+			object->method.mutex = NULL;
+		}
+		break;
+
+	case ACPI_TYPE_REGION:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "***** Region %p\n", object));
+
+		second_desc = acpi_ns_get_secondary_object(object);
+		if (second_desc) {
+			/*
+			 * Free the region_context if and only if the handler is one of the
+			 * default handlers -- and therefore, we created the context object
+			 * locally, it was not created by an external caller.
+			 */
+			handler_desc = object->region.handler;
+			if (handler_desc) {
+				if (handler_desc->address_space.handler_flags &
+				    ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
+
+					/* Deactivate region and free region context */
+
+					if (handler_desc->address_space.setup) {
+						(void)handler_desc->
+						    address_space.setup(object,
+									ACPI_REGION_DEACTIVATE,
+									handler_desc->
+									address_space.
+									context,
+									&second_desc->
+									extra.
+									region_context);
+					}
+				}
+
+				acpi_ut_remove_reference(handler_desc);
+			}
+
+			/* Now we can free the Extra object */
+
+			acpi_ut_delete_object_desc(second_desc);
+		}
+		break;
+
+	case ACPI_TYPE_BUFFER_FIELD:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "***** Buffer Field %p\n", object));
+
+		second_desc = acpi_ns_get_secondary_object(object);
+		if (second_desc) {
+			acpi_ut_delete_object_desc(second_desc);
+		}
+		break;
+
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "***** Bank Field %p\n", object));
+
+		second_desc = acpi_ns_get_secondary_object(object);
+		if (second_desc) {
+			acpi_ut_delete_object_desc(second_desc);
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	/* Free any allocated memory (pointer within the object) found above */
+
+	if (obj_pointer) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "Deleting Object Subptr %p\n", obj_pointer));
+		ACPI_FREE(obj_pointer);
+	}
+
+	/* Now the object can be safely deleted */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Deleting Object %p [%s]\n",
+			  object, acpi_ut_get_object_type_name(object)));
+
+	acpi_ut_delete_object_desc(object);
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_delete_internal_object_list
+ *
+ * PARAMETERS:  obj_list        - Pointer to the list to be deleted
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: This function deletes an internal object list, including both
+ *              simple objects and package objects
+ *
+ ******************************************************************************/
+
+void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
+{
+	union acpi_operand_object **internal_obj;
+
+	ACPI_FUNCTION_TRACE(ut_delete_internal_object_list);
+
+	/* Walk the null-terminated internal list */
+
+	for (internal_obj = obj_list; *internal_obj; internal_obj++) {
+		acpi_ut_remove_reference(*internal_obj);
+	}
+
+	/* Free the combined parameter pointer list and object array */
+
+	ACPI_FREE(obj_list);
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_update_ref_count
+ *
+ * PARAMETERS:  Object          - Object whose ref count is to be updated
+ *              Action          - What to do
+ *
+ * RETURN:      New ref count
+ *
+ * DESCRIPTION: Modify the ref count and return it.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
+{
+	u16 count;
+	u16 new_count;
+
+	ACPI_FUNCTION_NAME(ut_update_ref_count);
+
+	if (!object) {
+		return;
+	}
+
+	count = object->common.reference_count;
+	new_count = count;
+
+	/*
+	 * Perform the reference count action (increment, decrement, force delete)
+	 */
+	switch (action) {
+	case REF_INCREMENT:
+
+		new_count++;
+		object->common.reference_count = new_count;
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "Obj %p Refs=%X, [Incremented]\n",
+				  object, new_count));
+		break;
+
+	case REF_DECREMENT:
+
+		if (count < 1) {
+			ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+					  "Obj %p Refs=%X, can't decrement! (Set to 0)\n",
+					  object, new_count));
+
+			new_count = 0;
+		} else {
+			new_count--;
+
+			ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+					  "Obj %p Refs=%X, [Decremented]\n",
+					  object, new_count));
+		}
+
+		if (ACPI_GET_OBJECT_TYPE(object) == ACPI_TYPE_METHOD) {
+			ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+					  "Method Obj %p Refs=%X, [Decremented]\n",
+					  object, new_count));
+		}
+
+		object->common.reference_count = new_count;
+		if (new_count == 0) {
+			acpi_ut_delete_internal_obj(object);
+		}
+		break;
+
+	case REF_FORCE_DELETE:
+
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "Obj %p Refs=%X, Force delete! (Set to 0)\n",
+				  object, count));
+
+		new_count = 0;
+		object->common.reference_count = new_count;
+		acpi_ut_delete_internal_obj(object);
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Unknown action (%X)", action));
+		break;
+	}
+
+	/*
+	 * Sanity check the reference count, for debug purposes only.
+	 * (A deleted object will have a huge reference count)
+	 */
+	if (count > ACPI_MAX_REFERENCE_COUNT) {
+		ACPI_WARNING((AE_INFO,
+			      "Large Reference Count (%X) in object %p", count,
+			      object));
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_update_object_reference
+ *
+ * PARAMETERS:  Object              - Increment ref count for this object
+ *                                    and all sub-objects
+ *              Action              - Either REF_INCREMENT or REF_DECREMENT or
+ *                                    REF_FORCE_DELETE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Increment the object reference count
+ *
+ * Object references are incremented when:
+ * 1) An object is attached to a Node (namespace object)
+ * 2) An object is copied (all subobjects must be incremented)
+ *
+ * Object references are decremented when:
+ * 1) An object is detached from an Node
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
+{
+	acpi_status status = AE_OK;
+	union acpi_generic_state *state_list = NULL;
+	union acpi_operand_object *next_object = NULL;
+	union acpi_generic_state *state;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object);
+
+	while (object) {
+
+		/* Make sure that this isn't a namespace handle */
+
+		if (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) {
+			ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+					  "Object %p is NS handle\n", object));
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		/*
+		 * All sub-objects must have their reference count incremented also.
+		 * Different object types have different subobjects.
+		 */
+		switch (ACPI_GET_OBJECT_TYPE(object)) {
+		case ACPI_TYPE_DEVICE:
+		case ACPI_TYPE_PROCESSOR:
+		case ACPI_TYPE_POWER:
+		case ACPI_TYPE_THERMAL:
+
+			/* Update the notify objects for these types (if present) */
+
+			acpi_ut_update_ref_count(object->common_notify.
+						 system_notify, action);
+			acpi_ut_update_ref_count(object->common_notify.
+						 device_notify, action);
+			break;
+
+		case ACPI_TYPE_PACKAGE:
+			/*
+			 * We must update all the sub-objects of the package,
+			 * each of whom may have their own sub-objects.
+			 */
+			for (i = 0; i < object->package.count; i++) {
+				/*
+				 * Push each element onto the stack for later processing.
+				 * Note: There can be null elements within the package,
+				 * these are simply ignored
+				 */
+				status =
+				    acpi_ut_create_update_state_and_push
+				    (object->package.elements[i], action,
+				     &state_list);
+				if (ACPI_FAILURE(status)) {
+					goto error_exit;
+				}
+			}
+			break;
+
+		case ACPI_TYPE_BUFFER_FIELD:
+
+			next_object = object->buffer_field.buffer_obj;
+			break;
+
+		case ACPI_TYPE_LOCAL_REGION_FIELD:
+
+			next_object = object->field.region_obj;
+			break;
+
+		case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+			next_object = object->bank_field.bank_obj;
+			status =
+			    acpi_ut_create_update_state_and_push(object->
+								 bank_field.
+								 region_obj,
+								 action,
+								 &state_list);
+			if (ACPI_FAILURE(status)) {
+				goto error_exit;
+			}
+			break;
+
+		case ACPI_TYPE_LOCAL_INDEX_FIELD:
+
+			next_object = object->index_field.index_obj;
+			status =
+			    acpi_ut_create_update_state_and_push(object->
+								 index_field.
+								 data_obj,
+								 action,
+								 &state_list);
+			if (ACPI_FAILURE(status)) {
+				goto error_exit;
+			}
+			break;
+
+		case ACPI_TYPE_LOCAL_REFERENCE:
+			/*
+			 * The target of an Index (a package, string, or buffer) or a named
+			 * reference must track changes to the ref count of the index or
+			 * target object.
+			 */
+			if ((object->reference.class == ACPI_REFCLASS_INDEX) ||
+			    (object->reference.class == ACPI_REFCLASS_NAME)) {
+				next_object = object->reference.object;
+			}
+			break;
+
+		case ACPI_TYPE_REGION:
+		default:
+			break;	/* No subobjects for all other types */
+		}
+
+		/*
+		 * Now we can update the count in the main object. This can only
+		 * happen after we update the sub-objects in case this causes the
+		 * main object to be deleted.
+		 */
+		acpi_ut_update_ref_count(object, action);
+		object = NULL;
+
+		/* Move on to the next object to be updated */
+
+		if (next_object) {
+			object = next_object;
+			next_object = NULL;
+		} else if (state_list) {
+			state = acpi_ut_pop_generic_state(&state_list);
+			object = state->update.object;
+			acpi_ut_delete_generic_state(state);
+		}
+	}
+
+	return_ACPI_STATUS(AE_OK);
+
+      error_exit:
+
+	ACPI_EXCEPTION((AE_INFO, status,
+			"Could not update object reference count"));
+
+	/* Free any stacked Update State objects */
+
+	while (state_list) {
+		state = acpi_ut_pop_generic_state(&state_list);
+		acpi_ut_delete_generic_state(state);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_add_reference
+ *
+ * PARAMETERS:  Object          - Object whose reference count is to be
+ *                                incremented
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Add one reference to an ACPI object
+ *
+ ******************************************************************************/
+
+void acpi_ut_add_reference(union acpi_operand_object *object)
+{
+
+	ACPI_FUNCTION_TRACE_PTR(ut_add_reference, object);
+
+	/* Ensure that we have a valid object */
+
+	if (!acpi_ut_valid_internal_object(object)) {
+		return_VOID;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+			  "Obj %p Current Refs=%X [To Be Incremented]\n",
+			  object, object->common.reference_count));
+
+	/* Increment the reference count */
+
+	(void)acpi_ut_update_object_reference(object, REF_INCREMENT);
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_remove_reference
+ *
+ * PARAMETERS:  Object         - Object whose ref count will be decremented
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Decrement the reference count of an ACPI internal object
+ *
+ ******************************************************************************/
+
+void acpi_ut_remove_reference(union acpi_operand_object *object)
+{
+
+	ACPI_FUNCTION_TRACE_PTR(ut_remove_reference, object);
+
+	/*
+	 * Allow a NULL pointer to be passed in, just ignore it. This saves
+	 * each caller from having to check. Also, ignore NS nodes.
+	 *
+	 */
+	if (!object ||
+	    (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) {
+		return_VOID;
+	}
+
+	/* Ensure that we have a valid object */
+
+	if (!acpi_ut_valid_internal_object(object)) {
+		return_VOID;
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+			  "Obj %p Current Refs=%X [To Be Decremented]\n",
+			  object, object->common.reference_count));
+
+	/*
+	 * Decrement the reference count, and only actually delete the object
+	 * if the reference count becomes 0. (Must also decrement the ref count
+	 * of all subobjects!)
+	 */
+	(void)acpi_ut_update_object_reference(object, REF_DECREMENT);
+	return_VOID;
+}
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
new file mode 100644
index 0000000..da9450bc
--- /dev/null
+++ b/drivers/acpi/acpica/uteval.c
@@ -0,0 +1,752 @@
+/******************************************************************************
+ *
+ * Module Name: uteval - Object evaluation
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("uteval")
+
+/* Local prototypes */
+static void
+acpi_ut_copy_id_string(char *destination, char *source, acpi_size max_length);
+
+static acpi_status
+acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
+			  struct acpi_compatible_id *one_cid);
+
+/*
+ * Strings supported by the _OSI predefined (internal) method.
+ */
+static char *acpi_interfaces_supported[] = {
+	/* Operating System Vendor Strings */
+
+	"Windows 2000",		/* Windows 2000 */
+	"Windows 2001",		/* Windows XP */
+	"Windows 2001 SP1",	/* Windows XP SP1 */
+	"Windows 2001 SP2",	/* Windows XP SP2 */
+	"Windows 2001.1",	/* Windows Server 2003 */
+	"Windows 2001.1 SP1",	/* Windows Server 2003 SP1 - Added 03/2006 */
+	"Windows 2006",		/* Windows Vista - Added 03/2006 */
+
+	/* Feature Group Strings */
+
+	"Extended Address Space Descriptor"
+	    /*
+	     * All "optional" feature group strings (features that are implemented
+	     * by the host) should be implemented in the host version of
+	     * acpi_os_validate_interface and should not be added here.
+	     */
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_osi_implementation
+ *
+ * PARAMETERS:  walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Implementation of the _OSI predefined control method
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	union acpi_operand_object *string_desc;
+	union acpi_operand_object *return_desc;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ut_osi_implementation);
+
+	/* Validate the string input argument */
+
+	string_desc = walk_state->arguments[0].object;
+	if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) {
+		return_ACPI_STATUS(AE_TYPE);
+	}
+
+	/* Create a return object */
+
+	return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+	if (!return_desc) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Default return value is SUPPORTED */
+
+	return_desc->integer.value = ACPI_UINT32_MAX;
+	walk_state->return_desc = return_desc;
+
+	/* Compare input string to static table of supported interfaces */
+
+	for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
+		if (!ACPI_STRCMP
+		    (string_desc->string.pointer,
+		     acpi_interfaces_supported[i])) {
+
+			/* The interface is supported */
+
+			return_ACPI_STATUS(AE_OK);
+		}
+	}
+
+	/*
+	 * Did not match the string in the static table, call the host OSL to
+	 * check for a match with one of the optional strings (such as
+	 * "Module Device", "3.0 Thermal Model", etc.)
+	 */
+	status = acpi_os_validate_interface(string_desc->string.pointer);
+	if (ACPI_SUCCESS(status)) {
+
+		/* The interface is supported */
+
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* The interface is not supported */
+
+	return_desc->integer.value = 0;
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_osi_invalidate
+ *
+ * PARAMETERS:  interface_string
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: invalidate string in pre-defiend _OSI string list
+ *
+ ******************************************************************************/
+
+acpi_status acpi_osi_invalidate(char *interface)
+{
+	int i;
+
+	for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
+		if (!ACPI_STRCMP(interface, acpi_interfaces_supported[i])) {
+			*acpi_interfaces_supported[i] = '\0';
+			return AE_OK;
+		}
+	}
+	return AE_NOT_FOUND;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_evaluate_object
+ *
+ * PARAMETERS:  prefix_node         - Starting node
+ *              Path                - Path to object from starting node
+ *              expected_return_types - Bitmap of allowed return types
+ *              return_desc         - Where a return value is stored
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Evaluates a namespace object and verifies the type of the
+ *              return object.  Common code that simplifies accessing objects
+ *              that have required return objects of fixed types.
+ *
+ *              NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
+			char *path,
+			u32 expected_return_btypes,
+			union acpi_operand_object **return_desc)
+{
+	struct acpi_evaluate_info *info;
+	acpi_status status;
+	u32 return_btype;
+
+	ACPI_FUNCTION_TRACE(ut_evaluate_object);
+
+	/* Allocate the evaluation information block */
+
+	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+	if (!info) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	info->prefix_node = prefix_node;
+	info->pathname = path;
+
+	/* Evaluate the object/method */
+
+	status = acpi_ns_evaluate(info);
+	if (ACPI_FAILURE(status)) {
+		if (status == AE_NOT_FOUND) {
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+					  "[%4.4s.%s] was not found\n",
+					  acpi_ut_get_node_name(prefix_node),
+					  path));
+		} else {
+			ACPI_ERROR_METHOD("Method execution failed",
+					  prefix_node, path, status);
+		}
+
+		goto cleanup;
+	}
+
+	/* Did we get a return object? */
+
+	if (!info->return_object) {
+		if (expected_return_btypes) {
+			ACPI_ERROR_METHOD("No object was returned from",
+					  prefix_node, path, AE_NOT_EXIST);
+
+			status = AE_NOT_EXIST;
+		}
+
+		goto cleanup;
+	}
+
+	/* Map the return object type to the bitmapped type */
+
+	switch (ACPI_GET_OBJECT_TYPE(info->return_object)) {
+	case ACPI_TYPE_INTEGER:
+		return_btype = ACPI_BTYPE_INTEGER;
+		break;
+
+	case ACPI_TYPE_BUFFER:
+		return_btype = ACPI_BTYPE_BUFFER;
+		break;
+
+	case ACPI_TYPE_STRING:
+		return_btype = ACPI_BTYPE_STRING;
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+		return_btype = ACPI_BTYPE_PACKAGE;
+		break;
+
+	default:
+		return_btype = 0;
+		break;
+	}
+
+	if ((acpi_gbl_enable_interpreter_slack) && (!expected_return_btypes)) {
+		/*
+		 * We received a return object, but one was not expected.  This can
+		 * happen frequently if the "implicit return" feature is enabled.
+		 * Just delete the return object and return AE_OK.
+		 */
+		acpi_ut_remove_reference(info->return_object);
+		goto cleanup;
+	}
+
+	/* Is the return object one of the expected types? */
+
+	if (!(expected_return_btypes & return_btype)) {
+		ACPI_ERROR_METHOD("Return object type is incorrect",
+				  prefix_node, path, AE_TYPE);
+
+		ACPI_ERROR((AE_INFO,
+			    "Type returned from %s was incorrect: %s, expected Btypes: %X",
+			    path,
+			    acpi_ut_get_object_type_name(info->return_object),
+			    expected_return_btypes));
+
+		/* On error exit, we must delete the return object */
+
+		acpi_ut_remove_reference(info->return_object);
+		status = AE_TYPE;
+		goto cleanup;
+	}
+
+	/* Object type is OK, return it */
+
+	*return_desc = info->return_object;
+
+      cleanup:
+	ACPI_FREE(info);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_evaluate_numeric_object
+ *
+ * PARAMETERS:  object_name         - Object name to be evaluated
+ *              device_node         - Node for the device
+ *              Address             - Where the value is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Evaluates a numeric namespace object for a selected device
+ *              and stores result in *Address.
+ *
+ *              NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_evaluate_numeric_object(char *object_name,
+				struct acpi_namespace_node *device_node,
+				acpi_integer * address)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ut_evaluate_numeric_object);
+
+	status = acpi_ut_evaluate_object(device_node, object_name,
+					 ACPI_BTYPE_INTEGER, &obj_desc);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Get the returned Integer */
+
+	*address = obj_desc->integer.value;
+
+	/* On exit, we must delete the return object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_copy_id_string
+ *
+ * PARAMETERS:  Destination         - Where to copy the string
+ *              Source              - Source string
+ *              max_length          - Length of the destination buffer
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Copies an ID string for the _HID, _CID, and _UID methods.
+ *              Performs removal of a leading asterisk if present -- workaround
+ *              for a known issue on a bunch of machines.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ut_copy_id_string(char *destination, char *source, acpi_size max_length)
+{
+
+	/*
+	 * Workaround for ID strings that have a leading asterisk. This construct
+	 * is not allowed by the ACPI specification  (ID strings must be
+	 * alphanumeric), but enough existing machines have this embedded in their
+	 * ID strings that the following code is useful.
+	 */
+	if (*source == '*') {
+		source++;
+	}
+
+	/* Do the actual copy */
+
+	ACPI_STRNCPY(destination, source, max_length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_execute_HID
+ *
+ * PARAMETERS:  device_node         - Node for the device
+ *              Hid                 - Where the HID is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Executes the _HID control method that returns the hardware
+ *              ID of the device.
+ *
+ *              NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
+		    struct acpica_device_id *hid)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ut_execute_HID);
+
+	status = acpi_ut_evaluate_object(device_node, METHOD_NAME__HID,
+					 ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
+					 &obj_desc);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
+		/* Convert the Numeric HID to string */
+
+		acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value,
+					  hid->value);
+	} else {
+		/* Copy the String HID from the returned object */
+
+		acpi_ut_copy_id_string(hid->value, obj_desc->string.pointer,
+				       sizeof(hid->value));
+	}
+
+	/* On exit, we must delete the return object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_translate_one_cid
+ *
+ * PARAMETERS:  obj_desc            - _CID object, must be integer or string
+ *              one_cid             - Where the CID string is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Return a numeric or string _CID value as a string.
+ *              (Compatible ID)
+ *
+ *              NOTE:  Assumes a maximum _CID string length of
+ *                     ACPI_MAX_CID_LENGTH.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
+			  struct acpi_compatible_id *one_cid)
+{
+
+	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
+	case ACPI_TYPE_INTEGER:
+
+		/* Convert the Numeric CID to string */
+
+		acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value,
+					  one_cid->value);
+		return (AE_OK);
+
+	case ACPI_TYPE_STRING:
+
+		if (obj_desc->string.length > ACPI_MAX_CID_LENGTH) {
+			return (AE_AML_STRING_LIMIT);
+		}
+
+		/* Copy the String CID from the returned object */
+
+		acpi_ut_copy_id_string(one_cid->value, obj_desc->string.pointer,
+				       ACPI_MAX_CID_LENGTH);
+		return (AE_OK);
+
+	default:
+
+		return (AE_TYPE);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_execute_CID
+ *
+ * PARAMETERS:  device_node         - Node for the device
+ *              return_cid_list     - Where the CID list is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Executes the _CID control method that returns one or more
+ *              compatible hardware IDs for the device.
+ *
+ *              NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
+		    struct acpi_compatible_id_list ** return_cid_list)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+	u32 count;
+	u32 size;
+	struct acpi_compatible_id_list *cid_list;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ut_execute_CID);
+
+	/* Evaluate the _CID method for this device */
+
+	status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CID,
+					 ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING
+					 | ACPI_BTYPE_PACKAGE, &obj_desc);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Get the number of _CIDs returned */
+
+	count = 1;
+	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
+		count = obj_desc->package.count;
+	}
+
+	/* Allocate a worst-case buffer for the _CIDs */
+
+	size = (((count - 1) * sizeof(struct acpi_compatible_id)) +
+		sizeof(struct acpi_compatible_id_list));
+
+	cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size);
+	if (!cid_list) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Init CID list */
+
+	cid_list->count = count;
+	cid_list->size = size;
+
+	/*
+	 *  A _CID can return either a single compatible ID or a package of
+	 *  compatible IDs.  Each compatible ID can be one of the following:
+	 *  1) Integer (32 bit compressed EISA ID) or
+	 *  2) String (PCI ID format, e.g. "PCI\VEN_vvvv&DEV_dddd&SUBSYS_ssssssss")
+	 */
+
+	/* The _CID object can be either a single CID or a package (list) of CIDs */
+
+	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
+
+		/* Translate each package element */
+
+		for (i = 0; i < count; i++) {
+			status =
+			    acpi_ut_translate_one_cid(obj_desc->package.
+						      elements[i],
+						      &cid_list->id[i]);
+			if (ACPI_FAILURE(status)) {
+				break;
+			}
+		}
+	} else {
+		/* Only one CID, translate to a string */
+
+		status = acpi_ut_translate_one_cid(obj_desc, cid_list->id);
+	}
+
+	/* Cleanup on error */
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(cid_list);
+	} else {
+		*return_cid_list = cid_list;
+	}
+
+	/* On exit, we must delete the _CID return object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_execute_UID
+ *
+ * PARAMETERS:  device_node         - Node for the device
+ *              Uid                 - Where the UID is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Executes the _UID control method that returns the hardware
+ *              ID of the device.
+ *
+ *              NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
+		    struct acpica_device_id *uid)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ut_execute_UID);
+
+	status = acpi_ut_evaluate_object(device_node, METHOD_NAME__UID,
+					 ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
+					 &obj_desc);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
+		/* Convert the Numeric UID to string */
+
+		acpi_ex_unsigned_integer_to_string(obj_desc->integer.value,
+						   uid->value);
+	} else {
+		/* Copy the String UID from the returned object */
+
+		acpi_ut_copy_id_string(uid->value, obj_desc->string.pointer,
+				       sizeof(uid->value));
+	}
+
+	/* On exit, we must delete the return object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_execute_STA
+ *
+ * PARAMETERS:  device_node         - Node for the device
+ *              Flags               - Where the status flags are returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Executes _STA for selected device and stores results in
+ *              *Flags.
+ *
+ *              NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ut_execute_STA);
+
+	status = acpi_ut_evaluate_object(device_node, METHOD_NAME__STA,
+					 ACPI_BTYPE_INTEGER, &obj_desc);
+	if (ACPI_FAILURE(status)) {
+		if (AE_NOT_FOUND == status) {
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+					  "_STA on %4.4s was not found, assuming device is present\n",
+					  acpi_ut_get_node_name(device_node)));
+
+			*flags = ACPI_UINT32_MAX;
+			status = AE_OK;
+		}
+
+		return_ACPI_STATUS(status);
+	}
+
+	/* Extract the status flags */
+
+	*flags = (u32) obj_desc->integer.value;
+
+	/* On exit, we must delete the return object */
+
+	acpi_ut_remove_reference(obj_desc);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_execute_Sxds
+ *
+ * PARAMETERS:  device_node         - Node for the device
+ *              Flags               - Where the status flags are returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Executes _STA for selected device and stores results in
+ *              *Flags.
+ *
+ *              NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ut_execute_sxds);
+
+	for (i = 0; i < 4; i++) {
+		highest[i] = 0xFF;
+		status = acpi_ut_evaluate_object(device_node,
+						 ACPI_CAST_PTR(char,
+							       acpi_gbl_highest_dstate_names
+							       [i]),
+						 ACPI_BTYPE_INTEGER, &obj_desc);
+		if (ACPI_FAILURE(status)) {
+			if (status != AE_NOT_FOUND) {
+				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+						  "%s on Device %4.4s, %s\n",
+						  ACPI_CAST_PTR(char,
+								acpi_gbl_highest_dstate_names
+								[i]),
+						  acpi_ut_get_node_name
+						  (device_node),
+						  acpi_format_exception
+						  (status)));
+
+				return_ACPI_STATUS(status);
+			}
+		} else {
+			/* Extract the Dstate value */
+
+			highest[i] = (u8) obj_desc->integer.value;
+
+			/* Delete the return object */
+
+			acpi_ut_remove_reference(obj_desc);
+		}
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
new file mode 100644
index 0000000..a3ab9d9
--- /dev/null
+++ b/drivers/acpi/acpica/utglobal.c
@@ -0,0 +1,823 @@
+/******************************************************************************
+ *
+ * Module Name: utglobal - Global variables for the ACPI subsystem
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#define DEFINE_ACPI_GLOBALS
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utglobal")
+
+/*******************************************************************************
+ *
+ * Static global variable initialization.
+ *
+ ******************************************************************************/
+/*
+ * We want the debug switches statically initialized so they
+ * are already set when the debugger is entered.
+ */
+/* Debug switch - level and trace mask */
+u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
+
+/* Debug switch - layer (component) mask */
+
+u32 acpi_dbg_layer = 0;
+u32 acpi_gbl_nesting_level = 0;
+
+/* Debugger globals */
+
+u8 acpi_gbl_db_terminate_threads = FALSE;
+u8 acpi_gbl_abort_method = FALSE;
+u8 acpi_gbl_method_executing = FALSE;
+
+/* System flags */
+
+u32 acpi_gbl_startup_flags = 0;
+
+/* System starts uninitialized */
+
+u8 acpi_gbl_shutdown = TRUE;
+
+const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
+	"\\_S0_",
+	"\\_S1_",
+	"\\_S2_",
+	"\\_S3_",
+	"\\_S4_",
+	"\\_S5_"
+};
+
+const char *acpi_gbl_highest_dstate_names[4] = {
+	"_S1D",
+	"_S2D",
+	"_S3D",
+	"_S4D"
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_format_exception
+ *
+ * PARAMETERS:  Status       - The acpi_status code to be formatted
+ *
+ * RETURN:      A string containing the exception text. A valid pointer is
+ *              always returned.
+ *
+ * DESCRIPTION: This function translates an ACPI exception into an ASCII string
+ *              It is here instead of utxface.c so it is always present.
+ *
+ ******************************************************************************/
+
+const char *acpi_format_exception(acpi_status status)
+{
+	const char *exception = NULL;
+
+	ACPI_FUNCTION_ENTRY();
+
+	exception = acpi_ut_validate_exception(status);
+	if (!exception) {
+
+		/* Exception code was not recognized */
+
+		ACPI_ERROR((AE_INFO,
+			    "Unknown exception code: 0x%8.8X", status));
+
+		exception = "UNKNOWN_STATUS_CODE";
+		dump_stack();
+	}
+
+	return (ACPI_CAST_PTR(const char, exception));
+}
+
+ACPI_EXPORT_SYMBOL(acpi_format_exception)
+
+/*******************************************************************************
+ *
+ * Namespace globals
+ *
+ ******************************************************************************/
+/*
+ * Predefined ACPI Names (Built-in to the Interpreter)
+ *
+ * NOTES:
+ * 1) _SB_ is defined to be a device to allow \_SB_._INI to be run
+ *    during the initialization sequence.
+ * 2) _TZ_ is defined to be a thermal zone in order to allow ASL code to
+ *    perform a Notify() operation on it.
+ */
+const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
+	{"_GPE", ACPI_TYPE_LOCAL_SCOPE, NULL},
+	{"_PR_", ACPI_TYPE_LOCAL_SCOPE, NULL},
+	{"_SB_", ACPI_TYPE_DEVICE, NULL},
+	{"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
+	{"_TZ_", ACPI_TYPE_THERMAL, NULL},
+	{"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
+	{"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
+	{"_GL_", ACPI_TYPE_MUTEX, (char *)1},
+
+#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
+	{"_OSI", ACPI_TYPE_METHOD, (char *)1},
+#endif
+
+	/* Table terminator */
+
+	{NULL, ACPI_TYPE_ANY, NULL}
+};
+
+/*
+ * Properties of the ACPI Object Types, both internal and external.
+ * The table is indexed by values of acpi_object_type
+ */
+const u8 acpi_gbl_ns_properties[] = {
+	ACPI_NS_NORMAL,		/* 00 Any              */
+	ACPI_NS_NORMAL,		/* 01 Number           */
+	ACPI_NS_NORMAL,		/* 02 String           */
+	ACPI_NS_NORMAL,		/* 03 Buffer           */
+	ACPI_NS_NORMAL,		/* 04 Package          */
+	ACPI_NS_NORMAL,		/* 05 field_unit       */
+	ACPI_NS_NEWSCOPE,	/* 06 Device           */
+	ACPI_NS_NORMAL,		/* 07 Event            */
+	ACPI_NS_NEWSCOPE,	/* 08 Method           */
+	ACPI_NS_NORMAL,		/* 09 Mutex            */
+	ACPI_NS_NORMAL,		/* 10 Region           */
+	ACPI_NS_NEWSCOPE,	/* 11 Power            */
+	ACPI_NS_NEWSCOPE,	/* 12 Processor        */
+	ACPI_NS_NEWSCOPE,	/* 13 Thermal          */
+	ACPI_NS_NORMAL,		/* 14 buffer_field     */
+	ACPI_NS_NORMAL,		/* 15 ddb_handle       */
+	ACPI_NS_NORMAL,		/* 16 Debug Object     */
+	ACPI_NS_NORMAL,		/* 17 def_field        */
+	ACPI_NS_NORMAL,		/* 18 bank_field       */
+	ACPI_NS_NORMAL,		/* 19 index_field      */
+	ACPI_NS_NORMAL,		/* 20 Reference        */
+	ACPI_NS_NORMAL,		/* 21 Alias            */
+	ACPI_NS_NORMAL,		/* 22 method_alias     */
+	ACPI_NS_NORMAL,		/* 23 Notify           */
+	ACPI_NS_NORMAL,		/* 24 Address Handler  */
+	ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL,	/* 25 Resource Desc    */
+	ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL,	/* 26 Resource Field   */
+	ACPI_NS_NEWSCOPE,	/* 27 Scope            */
+	ACPI_NS_NORMAL,		/* 28 Extra            */
+	ACPI_NS_NORMAL,		/* 29 Data             */
+	ACPI_NS_NORMAL		/* 30 Invalid          */
+};
+
+/* Hex to ASCII conversion table */
+
+static const char acpi_gbl_hex_to_ascii[] = {
+	'0', '1', '2', '3', '4', '5', '6', '7',
+	'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_hex_to_ascii_char
+ *
+ * PARAMETERS:  Integer             - Contains the hex digit
+ *              Position            - bit position of the digit within the
+ *                                    integer (multiple of 4)
+ *
+ * RETURN:      The converted Ascii character
+ *
+ * DESCRIPTION: Convert a hex digit to an Ascii character
+ *
+ ******************************************************************************/
+
+char acpi_ut_hex_to_ascii_char(acpi_integer integer, u32 position)
+{
+
+	return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
+}
+
+/******************************************************************************
+ *
+ * Event and Hardware globals
+ *
+ ******************************************************************************/
+
+struct acpi_bit_register_info acpi_gbl_bit_register_info[ACPI_NUM_BITREG] = {
+	/* Name                                     Parent Register             Register Bit Position                   Register Bit Mask       */
+
+	/* ACPI_BITREG_TIMER_STATUS         */ {ACPI_REGISTER_PM1_STATUS,
+						ACPI_BITPOSITION_TIMER_STATUS,
+						ACPI_BITMASK_TIMER_STATUS},
+	/* ACPI_BITREG_BUS_MASTER_STATUS    */ {ACPI_REGISTER_PM1_STATUS,
+						ACPI_BITPOSITION_BUS_MASTER_STATUS,
+						ACPI_BITMASK_BUS_MASTER_STATUS},
+	/* ACPI_BITREG_GLOBAL_LOCK_STATUS   */ {ACPI_REGISTER_PM1_STATUS,
+						ACPI_BITPOSITION_GLOBAL_LOCK_STATUS,
+						ACPI_BITMASK_GLOBAL_LOCK_STATUS},
+	/* ACPI_BITREG_POWER_BUTTON_STATUS  */ {ACPI_REGISTER_PM1_STATUS,
+						ACPI_BITPOSITION_POWER_BUTTON_STATUS,
+						ACPI_BITMASK_POWER_BUTTON_STATUS},
+	/* ACPI_BITREG_SLEEP_BUTTON_STATUS  */ {ACPI_REGISTER_PM1_STATUS,
+						ACPI_BITPOSITION_SLEEP_BUTTON_STATUS,
+						ACPI_BITMASK_SLEEP_BUTTON_STATUS},
+	/* ACPI_BITREG_RT_CLOCK_STATUS      */ {ACPI_REGISTER_PM1_STATUS,
+						ACPI_BITPOSITION_RT_CLOCK_STATUS,
+						ACPI_BITMASK_RT_CLOCK_STATUS},
+	/* ACPI_BITREG_WAKE_STATUS          */ {ACPI_REGISTER_PM1_STATUS,
+						ACPI_BITPOSITION_WAKE_STATUS,
+						ACPI_BITMASK_WAKE_STATUS},
+	/* ACPI_BITREG_PCIEXP_WAKE_STATUS   */ {ACPI_REGISTER_PM1_STATUS,
+						ACPI_BITPOSITION_PCIEXP_WAKE_STATUS,
+						ACPI_BITMASK_PCIEXP_WAKE_STATUS},
+
+	/* ACPI_BITREG_TIMER_ENABLE         */ {ACPI_REGISTER_PM1_ENABLE,
+						ACPI_BITPOSITION_TIMER_ENABLE,
+						ACPI_BITMASK_TIMER_ENABLE},
+	/* ACPI_BITREG_GLOBAL_LOCK_ENABLE   */ {ACPI_REGISTER_PM1_ENABLE,
+						ACPI_BITPOSITION_GLOBAL_LOCK_ENABLE,
+						ACPI_BITMASK_GLOBAL_LOCK_ENABLE},
+	/* ACPI_BITREG_POWER_BUTTON_ENABLE  */ {ACPI_REGISTER_PM1_ENABLE,
+						ACPI_BITPOSITION_POWER_BUTTON_ENABLE,
+						ACPI_BITMASK_POWER_BUTTON_ENABLE},
+	/* ACPI_BITREG_SLEEP_BUTTON_ENABLE  */ {ACPI_REGISTER_PM1_ENABLE,
+						ACPI_BITPOSITION_SLEEP_BUTTON_ENABLE,
+						ACPI_BITMASK_SLEEP_BUTTON_ENABLE},
+	/* ACPI_BITREG_RT_CLOCK_ENABLE      */ {ACPI_REGISTER_PM1_ENABLE,
+						ACPI_BITPOSITION_RT_CLOCK_ENABLE,
+						ACPI_BITMASK_RT_CLOCK_ENABLE},
+	/* ACPI_BITREG_PCIEXP_WAKE_DISABLE  */ {ACPI_REGISTER_PM1_ENABLE,
+						ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE,
+						ACPI_BITMASK_PCIEXP_WAKE_DISABLE},
+
+	/* ACPI_BITREG_SCI_ENABLE           */ {ACPI_REGISTER_PM1_CONTROL,
+						ACPI_BITPOSITION_SCI_ENABLE,
+						ACPI_BITMASK_SCI_ENABLE},
+	/* ACPI_BITREG_BUS_MASTER_RLD       */ {ACPI_REGISTER_PM1_CONTROL,
+						ACPI_BITPOSITION_BUS_MASTER_RLD,
+						ACPI_BITMASK_BUS_MASTER_RLD},
+	/* ACPI_BITREG_GLOBAL_LOCK_RELEASE  */ {ACPI_REGISTER_PM1_CONTROL,
+						ACPI_BITPOSITION_GLOBAL_LOCK_RELEASE,
+						ACPI_BITMASK_GLOBAL_LOCK_RELEASE},
+	/* ACPI_BITREG_SLEEP_TYPE_A         */ {ACPI_REGISTER_PM1_CONTROL,
+						ACPI_BITPOSITION_SLEEP_TYPE_X,
+						ACPI_BITMASK_SLEEP_TYPE_X},
+	/* ACPI_BITREG_SLEEP_TYPE_B         */ {ACPI_REGISTER_PM1_CONTROL,
+						ACPI_BITPOSITION_SLEEP_TYPE_X,
+						ACPI_BITMASK_SLEEP_TYPE_X},
+	/* ACPI_BITREG_SLEEP_ENABLE         */ {ACPI_REGISTER_PM1_CONTROL,
+						ACPI_BITPOSITION_SLEEP_ENABLE,
+						ACPI_BITMASK_SLEEP_ENABLE},
+
+	/* ACPI_BITREG_ARB_DIS              */ {ACPI_REGISTER_PM2_CONTROL,
+						ACPI_BITPOSITION_ARB_DISABLE,
+						ACPI_BITMASK_ARB_DISABLE}
+};
+
+struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] = {
+	/* ACPI_EVENT_PMTIMER       */ {ACPI_BITREG_TIMER_STATUS,
+					ACPI_BITREG_TIMER_ENABLE,
+					ACPI_BITMASK_TIMER_STATUS,
+					ACPI_BITMASK_TIMER_ENABLE},
+	/* ACPI_EVENT_GLOBAL        */ {ACPI_BITREG_GLOBAL_LOCK_STATUS,
+					ACPI_BITREG_GLOBAL_LOCK_ENABLE,
+					ACPI_BITMASK_GLOBAL_LOCK_STATUS,
+					ACPI_BITMASK_GLOBAL_LOCK_ENABLE},
+	/* ACPI_EVENT_POWER_BUTTON  */ {ACPI_BITREG_POWER_BUTTON_STATUS,
+					ACPI_BITREG_POWER_BUTTON_ENABLE,
+					ACPI_BITMASK_POWER_BUTTON_STATUS,
+					ACPI_BITMASK_POWER_BUTTON_ENABLE},
+	/* ACPI_EVENT_SLEEP_BUTTON  */ {ACPI_BITREG_SLEEP_BUTTON_STATUS,
+					ACPI_BITREG_SLEEP_BUTTON_ENABLE,
+					ACPI_BITMASK_SLEEP_BUTTON_STATUS,
+					ACPI_BITMASK_SLEEP_BUTTON_ENABLE},
+	/* ACPI_EVENT_RTC           */ {ACPI_BITREG_RT_CLOCK_STATUS,
+					ACPI_BITREG_RT_CLOCK_ENABLE,
+					ACPI_BITMASK_RT_CLOCK_STATUS,
+					ACPI_BITMASK_RT_CLOCK_ENABLE},
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_region_name
+ *
+ * PARAMETERS:  None.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Translate a Space ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/* Region type decoding */
+
+const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
+	"SystemMemory",
+	"SystemIO",
+	"PCI_Config",
+	"EmbeddedControl",
+	"SMBus",
+	"SystemCMOS",
+	"PCIBARTarget",
+	"DataTable"
+};
+
+char *acpi_ut_get_region_name(u8 space_id)
+{
+
+	if (space_id >= ACPI_USER_REGION_BEGIN) {
+		return ("UserDefinedRegion");
+	} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
+		return ("InvalidSpaceId");
+	}
+
+	return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_event_name
+ *
+ * PARAMETERS:  None.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Translate a Event ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/* Event type decoding */
+
+static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
+	"PM_Timer",
+	"GlobalLock",
+	"PowerButton",
+	"SleepButton",
+	"RealTimeClock",
+};
+
+char *acpi_ut_get_event_name(u32 event_id)
+{
+
+	if (event_id > ACPI_EVENT_MAX) {
+		return ("InvalidEventID");
+	}
+
+	return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_type_name
+ *
+ * PARAMETERS:  None.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Translate a Type ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+/*
+ * Elements of acpi_gbl_ns_type_names below must match
+ * one-to-one with values of acpi_object_type
+ *
+ * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching;
+ * when stored in a table it really means that we have thus far seen no
+ * evidence to indicate what type is actually going to be stored for this entry.
+ */
+static const char acpi_gbl_bad_type[] = "UNDEFINED";
+
+/* Printable names of the ACPI object types */
+
+static const char *acpi_gbl_ns_type_names[] = {
+	/* 00 */ "Untyped",
+	/* 01 */ "Integer",
+	/* 02 */ "String",
+	/* 03 */ "Buffer",
+	/* 04 */ "Package",
+	/* 05 */ "FieldUnit",
+	/* 06 */ "Device",
+	/* 07 */ "Event",
+	/* 08 */ "Method",
+	/* 09 */ "Mutex",
+	/* 10 */ "Region",
+	/* 11 */ "Power",
+	/* 12 */ "Processor",
+	/* 13 */ "Thermal",
+	/* 14 */ "BufferField",
+	/* 15 */ "DdbHandle",
+	/* 16 */ "DebugObject",
+	/* 17 */ "RegionField",
+	/* 18 */ "BankField",
+	/* 19 */ "IndexField",
+	/* 20 */ "Reference",
+	/* 21 */ "Alias",
+	/* 22 */ "MethodAlias",
+	/* 23 */ "Notify",
+	/* 24 */ "AddrHandler",
+	/* 25 */ "ResourceDesc",
+	/* 26 */ "ResourceFld",
+	/* 27 */ "Scope",
+	/* 28 */ "Extra",
+	/* 29 */ "Data",
+	/* 30 */ "Invalid"
+};
+
+char *acpi_ut_get_type_name(acpi_object_type type)
+{
+
+	if (type > ACPI_TYPE_INVALID) {
+		return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
+	}
+
+	return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type]));
+}
+
+char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
+{
+
+	if (!obj_desc) {
+		return ("[NULL Object Descriptor]");
+	}
+
+	return (acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE(obj_desc)));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_node_name
+ *
+ * PARAMETERS:  Object               - A namespace node
+ *
+ * RETURN:      Pointer to a string
+ *
+ * DESCRIPTION: Validate the node and return the node's ACPI name.
+ *
+ ******************************************************************************/
+
+char *acpi_ut_get_node_name(void *object)
+{
+	struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
+
+	/* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
+
+	if (!object) {
+		return ("NULL");
+	}
+
+	/* Check for Root node */
+
+	if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) {
+		return ("\"\\\" ");
+	}
+
+	/* Descriptor must be a namespace node */
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+		return ("####");
+	}
+
+	/* Name must be a valid ACPI name */
+
+	if (!acpi_ut_valid_acpi_name(node->name.integer)) {
+		node->name.integer = acpi_ut_repair_name(node->name.ascii);
+	}
+
+	/* Return the name */
+
+	return (node->name.ascii);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_descriptor_name
+ *
+ * PARAMETERS:  Object               - An ACPI object
+ *
+ * RETURN:      Pointer to a string
+ *
+ * DESCRIPTION: Validate object and return the descriptor type
+ *
+ ******************************************************************************/
+
+/* Printable names of object descriptor types */
+
+static const char *acpi_gbl_desc_type_names[] = {
+	/* 00 */ "Invalid",
+	/* 01 */ "Cached",
+	/* 02 */ "State-Generic",
+	/* 03 */ "State-Update",
+	/* 04 */ "State-Package",
+	/* 05 */ "State-Control",
+	/* 06 */ "State-RootParseScope",
+	/* 07 */ "State-ParseScope",
+	/* 08 */ "State-WalkScope",
+	/* 09 */ "State-Result",
+	/* 10 */ "State-Notify",
+	/* 11 */ "State-Thread",
+	/* 12 */ "Walk",
+	/* 13 */ "Parser",
+	/* 14 */ "Operand",
+	/* 15 */ "Node"
+};
+
+char *acpi_ut_get_descriptor_name(void *object)
+{
+
+	if (!object) {
+		return ("NULL OBJECT");
+	}
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) {
+		return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
+	}
+
+	return (ACPI_CAST_PTR(char,
+			      acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE
+						       (object)]));
+
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_reference_name
+ *
+ * PARAMETERS:  Object               - An ACPI reference object
+ *
+ * RETURN:      Pointer to a string
+ *
+ * DESCRIPTION: Decode a reference object sub-type to a string.
+ *
+ ******************************************************************************/
+
+/* Printable names of reference object sub-types */
+
+static const char *acpi_gbl_ref_class_names[] = {
+	/* 00 */ "Local",
+	/* 01 */ "Argument",
+	/* 02 */ "RefOf",
+	/* 03 */ "Index",
+	/* 04 */ "DdbHandle",
+	/* 05 */ "Named Object",
+	/* 06 */ "Debug"
+};
+
+const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
+{
+	if (!object)
+		return "NULL Object";
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND)
+		return "Not an Operand object";
+
+	if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE)
+		return "Not a Reference object";
+
+	if (object->reference.class > ACPI_REFCLASS_MAX)
+		return "Unknown Reference class";
+
+	return acpi_gbl_ref_class_names[object->reference.class];
+}
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/*
+ * Strings and procedures used for debug only
+ */
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_mutex_name
+ *
+ * PARAMETERS:  mutex_id        - The predefined ID for this mutex.
+ *
+ * RETURN:      String containing the name of the mutex. Always returns a valid
+ *              pointer.
+ *
+ * DESCRIPTION: Translate a mutex ID into a name string (Debug only)
+ *
+ ******************************************************************************/
+
+char *acpi_ut_get_mutex_name(u32 mutex_id)
+{
+
+	if (mutex_id > ACPI_MAX_MUTEX) {
+		return ("Invalid Mutex ID");
+	}
+
+	return (acpi_gbl_mutex_names[mutex_id]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_notify_name
+ *
+ * PARAMETERS:  notify_value    - Value from the Notify() request
+ *
+ * RETURN:      String corresponding to the Notify Value.
+ *
+ * DESCRIPTION: Translate a Notify Value to a notify namestring.
+ *
+ ******************************************************************************/
+
+/* Names for Notify() values, used for debug output */
+
+static const char *acpi_gbl_notify_value_names[] = {
+	"Bus Check",
+	"Device Check",
+	"Device Wake",
+	"Eject Request",
+	"Device Check Light",
+	"Frequency Mismatch",
+	"Bus Mode Mismatch",
+	"Power Fault",
+	"Capabilities Check",
+	"Device PLD Check",
+	"Reserved",
+	"System Locality Update"
+};
+
+const char *acpi_ut_get_notify_name(u32 notify_value)
+{
+
+	if (notify_value <= ACPI_NOTIFY_MAX) {
+		return (acpi_gbl_notify_value_names[notify_value]);
+	} else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
+		return ("Reserved");
+	} else {		/* Greater or equal to 0x80 */
+
+		return ("**Device Specific**");
+	}
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_valid_object_type
+ *
+ * PARAMETERS:  Type            - Object type to be validated
+ *
+ * RETURN:      TRUE if valid object type, FALSE otherwise
+ *
+ * DESCRIPTION: Validate an object type
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_object_type(acpi_object_type type)
+{
+
+	if (type > ACPI_TYPE_LOCAL_MAX) {
+
+		/* Note: Assumes all TYPEs are contiguous (external/local) */
+
+		return (FALSE);
+	}
+
+	return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_init_globals
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Init library globals.  All globals that require specific
+ *              initialization should be initialized here!
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_init_globals(void)
+{
+	acpi_status status;
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ut_init_globals);
+
+	/* Create all memory caches */
+
+	status = acpi_ut_create_caches();
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Mutex locked flags */
+
+	for (i = 0; i < ACPI_NUM_MUTEX; i++) {
+		acpi_gbl_mutex_info[i].mutex = NULL;
+		acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+		acpi_gbl_mutex_info[i].use_count = 0;
+	}
+
+	for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) {
+		acpi_gbl_owner_id_mask[i] = 0;
+	}
+	acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;	/* Last ID is never valid */
+
+	/* GPE support */
+
+	acpi_gbl_gpe_xrupt_list_head = NULL;
+	acpi_gbl_gpe_fadt_blocks[0] = NULL;
+	acpi_gbl_gpe_fadt_blocks[1] = NULL;
+	acpi_current_gpe_count = 0;
+
+	/* Global handlers */
+
+	acpi_gbl_system_notify.handler = NULL;
+	acpi_gbl_device_notify.handler = NULL;
+	acpi_gbl_exception_handler = NULL;
+	acpi_gbl_init_handler = NULL;
+	acpi_gbl_table_handler = NULL;
+
+	/* Global Lock support */
+
+	acpi_gbl_global_lock_semaphore = NULL;
+	acpi_gbl_global_lock_mutex = NULL;
+	acpi_gbl_global_lock_acquired = FALSE;
+	acpi_gbl_global_lock_handle = 0;
+	acpi_gbl_global_lock_present = FALSE;
+
+	/* Miscellaneous variables */
+
+	acpi_gbl_cm_single_step = FALSE;
+	acpi_gbl_db_terminate_threads = FALSE;
+	acpi_gbl_shutdown = FALSE;
+	acpi_gbl_ns_lookup_count = 0;
+	acpi_gbl_ps_find_count = 0;
+	acpi_gbl_acpi_hardware_present = TRUE;
+	acpi_gbl_last_owner_id_index = 0;
+	acpi_gbl_next_owner_id_offset = 0;
+	acpi_gbl_trace_method_name = 0;
+	acpi_gbl_trace_dbg_level = 0;
+	acpi_gbl_trace_dbg_layer = 0;
+	acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
+	acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
+
+	/* Hardware oriented */
+
+	acpi_gbl_events_initialized = FALSE;
+	acpi_gbl_system_awake_and_running = TRUE;
+
+	/* Namespace */
+
+	acpi_gbl_root_node = NULL;
+	acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
+	acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
+	acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
+	acpi_gbl_root_node_struct.child = NULL;
+	acpi_gbl_root_node_struct.peer = NULL;
+	acpi_gbl_root_node_struct.object = NULL;
+	acpi_gbl_root_node_struct.flags = ANOBJ_END_OF_PEER_LIST;
+
+#ifdef ACPI_DEBUG_OUTPUT
+	acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
+#endif
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+	acpi_gbl_display_final_mem_stats = FALSE;
+#endif
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
+ACPI_EXPORT_SYMBOL(acpi_dbg_level)
+ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
+ACPI_EXPORT_SYMBOL(acpi_current_gpe_count)
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
new file mode 100644
index 0000000..a54ca84
--- /dev/null
+++ b/drivers/acpi/acpica/utinit.c
@@ -0,0 +1,152 @@
+/******************************************************************************
+ *
+ * Module Name: utinit - Common ACPI subsystem initialization
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acevents.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utinit")
+
+/* Local prototypes */
+static void acpi_ut_terminate(void);
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_terminate
+ *
+ * PARAMETERS:  none
+ *
+ * RETURN:      none
+ *
+ * DESCRIPTION: Free global memory
+ *
+ ******************************************************************************/
+
+static void acpi_ut_terminate(void)
+{
+	struct acpi_gpe_block_info *gpe_block;
+	struct acpi_gpe_block_info *next_gpe_block;
+	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+	struct acpi_gpe_xrupt_info *next_gpe_xrupt_info;
+
+	ACPI_FUNCTION_TRACE(ut_terminate);
+
+	/* Free global GPE blocks and related info structures */
+
+	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+	while (gpe_xrupt_info) {
+		gpe_block = gpe_xrupt_info->gpe_block_list_head;
+		while (gpe_block) {
+			next_gpe_block = gpe_block->next;
+			ACPI_FREE(gpe_block->event_info);
+			ACPI_FREE(gpe_block->register_info);
+			ACPI_FREE(gpe_block);
+
+			gpe_block = next_gpe_block;
+		}
+		next_gpe_xrupt_info = gpe_xrupt_info->next;
+		ACPI_FREE(gpe_xrupt_info);
+		gpe_xrupt_info = next_gpe_xrupt_info;
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_subsystem_shutdown
+ *
+ * PARAMETERS:  none
+ *
+ * RETURN:      none
+ *
+ * DESCRIPTION: Shutdown the various subsystems.  Don't delete the mutex
+ *              objects here -- because the AML debugger may be still running.
+ *
+ ******************************************************************************/
+
+void acpi_ut_subsystem_shutdown(void)
+{
+
+	ACPI_FUNCTION_TRACE(ut_subsystem_shutdown);
+
+	/* Just exit if subsystem is already shutdown */
+
+	if (acpi_gbl_shutdown) {
+		ACPI_ERROR((AE_INFO, "ACPI Subsystem is already terminated"));
+		return_VOID;
+	}
+
+	/* Subsystem appears active, go ahead and shut it down */
+
+	acpi_gbl_shutdown = TRUE;
+	acpi_gbl_startup_flags = 0;
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n"));
+
+#ifndef ACPI_ASL_COMPILER
+
+	/* Close the acpi_event Handling */
+
+	acpi_ev_terminate();
+#endif
+
+	/* Close the Namespace */
+
+	acpi_ns_terminate();
+
+	/* Delete the ACPI tables */
+
+	acpi_tb_terminate();
+
+	/* Close the globals */
+
+	acpi_ut_terminate();
+
+	/* Purge the local caches */
+
+	(void)acpi_ut_delete_caches();
+	return_VOID;
+}
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
new file mode 100644
index 0000000..c9f682d
--- /dev/null
+++ b/drivers/acpi/acpica/utmath.c
@@ -0,0 +1,312 @@
+/*******************************************************************************
+ *
+ * Module Name: utmath - Integer math support routines
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utmath")
+
+/*
+ * Support for double-precision integer divide.  This code is included here
+ * in order to support kernel environments where the double-precision math
+ * library is not available.
+ */
+#ifndef ACPI_USE_NATIVE_DIVIDE
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_short_divide
+ *
+ * PARAMETERS:  Dividend            - 64-bit dividend
+ *              Divisor             - 32-bit divisor
+ *              out_quotient        - Pointer to where the quotient is returned
+ *              out_remainder       - Pointer to where the remainder is returned
+ *
+ * RETURN:      Status (Checks for divide-by-zero)
+ *
+ * DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits)
+ *              divide and modulo.  The result is a 64-bit quotient and a
+ *              32-bit remainder.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ut_short_divide(acpi_integer dividend,
+		     u32 divisor,
+		     acpi_integer * out_quotient, u32 * out_remainder)
+{
+	union uint64_overlay dividend_ovl;
+	union uint64_overlay quotient;
+	u32 remainder32;
+
+	ACPI_FUNCTION_TRACE(ut_short_divide);
+
+	/* Always check for a zero divisor */
+
+	if (divisor == 0) {
+		ACPI_ERROR((AE_INFO, "Divide by zero"));
+		return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
+	}
+
+	dividend_ovl.full = dividend;
+
+	/*
+	 * The quotient is 64 bits, the remainder is always 32 bits,
+	 * and is generated by the second divide.
+	 */
+	ACPI_DIV_64_BY_32(0, dividend_ovl.part.hi, divisor,
+			  quotient.part.hi, remainder32);
+	ACPI_DIV_64_BY_32(remainder32, dividend_ovl.part.lo, divisor,
+			  quotient.part.lo, remainder32);
+
+	/* Return only what was requested */
+
+	if (out_quotient) {
+		*out_quotient = quotient.full;
+	}
+	if (out_remainder) {
+		*out_remainder = remainder32;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_divide
+ *
+ * PARAMETERS:  in_dividend         - Dividend
+ *              in_divisor          - Divisor
+ *              out_quotient        - Pointer to where the quotient is returned
+ *              out_remainder       - Pointer to where the remainder is returned
+ *
+ * RETURN:      Status (Checks for divide-by-zero)
+ *
+ * DESCRIPTION: Perform a divide and modulo.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_divide(acpi_integer in_dividend,
+	       acpi_integer in_divisor,
+	       acpi_integer * out_quotient, acpi_integer * out_remainder)
+{
+	union uint64_overlay dividend;
+	union uint64_overlay divisor;
+	union uint64_overlay quotient;
+	union uint64_overlay remainder;
+	union uint64_overlay normalized_dividend;
+	union uint64_overlay normalized_divisor;
+	u32 partial1;
+	union uint64_overlay partial2;
+	union uint64_overlay partial3;
+
+	ACPI_FUNCTION_TRACE(ut_divide);
+
+	/* Always check for a zero divisor */
+
+	if (in_divisor == 0) {
+		ACPI_ERROR((AE_INFO, "Divide by zero"));
+		return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
+	}
+
+	divisor.full = in_divisor;
+	dividend.full = in_dividend;
+	if (divisor.part.hi == 0) {
+		/*
+		 * 1) Simplest case is where the divisor is 32 bits, we can
+		 * just do two divides
+		 */
+		remainder.part.hi = 0;
+
+		/*
+		 * The quotient is 64 bits, the remainder is always 32 bits,
+		 * and is generated by the second divide.
+		 */
+		ACPI_DIV_64_BY_32(0, dividend.part.hi, divisor.part.lo,
+				  quotient.part.hi, partial1);
+		ACPI_DIV_64_BY_32(partial1, dividend.part.lo, divisor.part.lo,
+				  quotient.part.lo, remainder.part.lo);
+	}
+
+	else {
+		/*
+		 * 2) The general case where the divisor is a full 64 bits
+		 * is more difficult
+		 */
+		quotient.part.hi = 0;
+		normalized_dividend = dividend;
+		normalized_divisor = divisor;
+
+		/* Normalize the operands (shift until the divisor is < 32 bits) */
+
+		do {
+			ACPI_SHIFT_RIGHT_64(normalized_divisor.part.hi,
+					    normalized_divisor.part.lo);
+			ACPI_SHIFT_RIGHT_64(normalized_dividend.part.hi,
+					    normalized_dividend.part.lo);
+
+		} while (normalized_divisor.part.hi != 0);
+
+		/* Partial divide */
+
+		ACPI_DIV_64_BY_32(normalized_dividend.part.hi,
+				  normalized_dividend.part.lo,
+				  normalized_divisor.part.lo,
+				  quotient.part.lo, partial1);
+
+		/*
+		 * The quotient is always 32 bits, and simply requires adjustment.
+		 * The 64-bit remainder must be generated.
+		 */
+		partial1 = quotient.part.lo * divisor.part.hi;
+		partial2.full =
+		    (acpi_integer) quotient.part.lo * divisor.part.lo;
+		partial3.full = (acpi_integer) partial2.part.hi + partial1;
+
+		remainder.part.hi = partial3.part.lo;
+		remainder.part.lo = partial2.part.lo;
+
+		if (partial3.part.hi == 0) {
+			if (partial3.part.lo >= dividend.part.hi) {
+				if (partial3.part.lo == dividend.part.hi) {
+					if (partial2.part.lo > dividend.part.lo) {
+						quotient.part.lo--;
+						remainder.full -= divisor.full;
+					}
+				} else {
+					quotient.part.lo--;
+					remainder.full -= divisor.full;
+				}
+			}
+
+			remainder.full = remainder.full - dividend.full;
+			remainder.part.hi = (u32) - ((s32) remainder.part.hi);
+			remainder.part.lo = (u32) - ((s32) remainder.part.lo);
+
+			if (remainder.part.lo) {
+				remainder.part.hi--;
+			}
+		}
+	}
+
+	/* Return only what was requested */
+
+	if (out_quotient) {
+		*out_quotient = quotient.full;
+	}
+	if (out_remainder) {
+		*out_remainder = remainder.full;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+#else
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_short_divide, acpi_ut_divide
+ *
+ * PARAMETERS:  See function headers above
+ *
+ * DESCRIPTION: Native versions of the ut_divide functions. Use these if either
+ *              1) The target is a 64-bit platform and therefore 64-bit
+ *                 integer math is supported directly by the machine.
+ *              2) The target is a 32-bit or 16-bit platform, and the
+ *                 double-precision integer math library is available to
+ *                 perform the divide.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ut_short_divide(acpi_integer in_dividend,
+		     u32 divisor,
+		     acpi_integer * out_quotient, u32 * out_remainder)
+{
+
+	ACPI_FUNCTION_TRACE(ut_short_divide);
+
+	/* Always check for a zero divisor */
+
+	if (divisor == 0) {
+		ACPI_ERROR((AE_INFO, "Divide by zero"));
+		return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
+	}
+
+	/* Return only what was requested */
+
+	if (out_quotient) {
+		*out_quotient = in_dividend / divisor;
+	}
+	if (out_remainder) {
+		*out_remainder = (u32) (in_dividend % divisor);
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+acpi_status
+acpi_ut_divide(acpi_integer in_dividend,
+	       acpi_integer in_divisor,
+	       acpi_integer * out_quotient, acpi_integer * out_remainder)
+{
+	ACPI_FUNCTION_TRACE(ut_divide);
+
+	/* Always check for a zero divisor */
+
+	if (in_divisor == 0) {
+		ACPI_ERROR((AE_INFO, "Divide by zero"));
+		return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
+	}
+
+	/* Return only what was requested */
+
+	if (out_quotient) {
+		*out_quotient = in_dividend / in_divisor;
+	}
+	if (out_remainder) {
+		*out_remainder = in_dividend % in_divisor;
+	}
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+#endif
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
new file mode 100644
index 0000000..c1f7f4e
--- /dev/null
+++ b/drivers/acpi/acpica/utmisc.c
@@ -0,0 +1,1093 @@
+/*******************************************************************************
+ *
+ * Module Name: utmisc - common utility procedures
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <linux/module.h>
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utmisc")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_validate_exception
+ *
+ * PARAMETERS:  Status       - The acpi_status code to be formatted
+ *
+ * RETURN:      A string containing the exception text. NULL if exception is
+ *              not valid.
+ *
+ * DESCRIPTION: This function validates and translates an ACPI exception into
+ *              an ASCII string.
+ *
+ ******************************************************************************/
+const char *acpi_ut_validate_exception(acpi_status status)
+{
+	u32 sub_status;
+	const char *exception = NULL;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * Status is composed of two parts, a "type" and an actual code
+	 */
+	sub_status = (status & ~AE_CODE_MASK);
+
+	switch (status & AE_CODE_MASK) {
+	case AE_CODE_ENVIRONMENTAL:
+
+		if (sub_status <= AE_CODE_ENV_MAX) {
+			exception = acpi_gbl_exception_names_env[sub_status];
+		}
+		break;
+
+	case AE_CODE_PROGRAMMER:
+
+		if (sub_status <= AE_CODE_PGM_MAX) {
+			exception = acpi_gbl_exception_names_pgm[sub_status];
+		}
+		break;
+
+	case AE_CODE_ACPI_TABLES:
+
+		if (sub_status <= AE_CODE_TBL_MAX) {
+			exception = acpi_gbl_exception_names_tbl[sub_status];
+		}
+		break;
+
+	case AE_CODE_AML:
+
+		if (sub_status <= AE_CODE_AML_MAX) {
+			exception = acpi_gbl_exception_names_aml[sub_status];
+		}
+		break;
+
+	case AE_CODE_CONTROL:
+
+		if (sub_status <= AE_CODE_CTRL_MAX) {
+			exception = acpi_gbl_exception_names_ctrl[sub_status];
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return (ACPI_CAST_PTR(const char, exception));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_is_aml_table
+ *
+ * PARAMETERS:  Table               - An ACPI table
+ *
+ * RETURN:      TRUE if table contains executable AML; FALSE otherwise
+ *
+ * DESCRIPTION: Check ACPI Signature for a table that contains AML code.
+ *              Currently, these are DSDT,SSDT,PSDT. All other table types are
+ *              data tables that do not contain AML code.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
+{
+
+	/* These are the only tables that contain executable AML */
+
+	if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
+	    ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
+	    ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
+		return (TRUE);
+	}
+
+	return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_allocate_owner_id
+ *
+ * PARAMETERS:  owner_id        - Where the new owner ID is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to
+ *              track objects created by the table or method, to be deleted
+ *              when the method exits or the table is unloaded.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
+{
+	u32 i;
+	u32 j;
+	u32 k;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
+
+	/* Guard against multiple allocations of ID to the same location */
+
+	if (*owner_id) {
+		ACPI_ERROR((AE_INFO, "Owner ID [%2.2X] already exists",
+			    *owner_id));
+		return_ACPI_STATUS(AE_ALREADY_EXISTS);
+	}
+
+	/* Mutex for the global ID mask */
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Find a free owner ID, cycle through all possible IDs on repeated
+	 * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have
+	 * to be scanned twice.
+	 */
+	for (i = 0, j = acpi_gbl_last_owner_id_index;
+	     i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) {
+		if (j >= ACPI_NUM_OWNERID_MASKS) {
+			j = 0;	/* Wraparound to start of mask array */
+		}
+
+		for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
+			if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
+
+				/* There are no free IDs in this mask */
+
+				break;
+			}
+
+			if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) {
+				/*
+				 * Found a free ID. The actual ID is the bit index plus one,
+				 * making zero an invalid Owner ID. Save this as the last ID
+				 * allocated and update the global ID mask.
+				 */
+				acpi_gbl_owner_id_mask[j] |= (1 << k);
+
+				acpi_gbl_last_owner_id_index = (u8) j;
+				acpi_gbl_next_owner_id_offset = (u8) (k + 1);
+
+				/*
+				 * Construct encoded ID from the index and bit position
+				 *
+				 * Note: Last [j].k (bit 255) is never used and is marked
+				 * permanently allocated (prevents +1 overflow)
+				 */
+				*owner_id =
+				    (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
+
+				ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+						  "Allocated OwnerId: %2.2X\n",
+						  (unsigned int)*owner_id));
+				goto exit;
+			}
+		}
+
+		acpi_gbl_next_owner_id_offset = 0;
+	}
+
+	/*
+	 * All owner_ids have been allocated. This typically should
+	 * not happen since the IDs are reused after deallocation. The IDs are
+	 * allocated upon table load (one per table) and method execution, and
+	 * they are released when a table is unloaded or a method completes
+	 * execution.
+	 *
+	 * If this error happens, there may be very deep nesting of invoked control
+	 * methods, or there may be a bug where the IDs are not released.
+	 */
+	status = AE_OWNER_ID_LIMIT;
+	ACPI_ERROR((AE_INFO,
+		    "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
+
+      exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_release_owner_id
+ *
+ * PARAMETERS:  owner_id_ptr        - Pointer to a previously allocated owner_iD
+ *
+ * RETURN:      None. No error is returned because we are either exiting a
+ *              control method or unloading a table. Either way, we would
+ *              ignore any error anyway.
+ *
+ * DESCRIPTION: Release a table or method owner ID.  Valid IDs are 1 - 255
+ *
+ ******************************************************************************/
+
+void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
+{
+	acpi_owner_id owner_id = *owner_id_ptr;
+	acpi_status status;
+	u32 index;
+	u32 bit;
+
+	ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
+
+	/* Always clear the input owner_id (zero is an invalid ID) */
+
+	*owner_id_ptr = 0;
+
+	/* Zero is not a valid owner_iD */
+
+	if (owner_id == 0) {
+		ACPI_ERROR((AE_INFO, "Invalid OwnerId: %2.2X", owner_id));
+		return_VOID;
+	}
+
+	/* Mutex for the global ID mask */
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+	if (ACPI_FAILURE(status)) {
+		return_VOID;
+	}
+
+	/* Normalize the ID to zero */
+
+	owner_id--;
+
+	/* Decode ID to index/offset pair */
+
+	index = ACPI_DIV_32(owner_id);
+	bit = 1 << ACPI_MOD_32(owner_id);
+
+	/* Free the owner ID only if it is valid */
+
+	if (acpi_gbl_owner_id_mask[index] & bit) {
+		acpi_gbl_owner_id_mask[index] ^= bit;
+	} else {
+		ACPI_ERROR((AE_INFO,
+			    "Release of non-allocated OwnerId: %2.2X",
+			    owner_id + 1));
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_strupr (strupr)
+ *
+ * PARAMETERS:  src_string      - The source string to convert
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Convert string to uppercase
+ *
+ * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
+ *
+ ******************************************************************************/
+
+void acpi_ut_strupr(char *src_string)
+{
+	char *string;
+
+	ACPI_FUNCTION_ENTRY();
+
+	if (!src_string) {
+		return;
+	}
+
+	/* Walk entire string, uppercasing the letters */
+
+	for (string = src_string; *string; string++) {
+		*string = (char)ACPI_TOUPPER(*string);
+	}
+
+	return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_print_string
+ *
+ * PARAMETERS:  String          - Null terminated ASCII string
+ *              max_length      - Maximum output length
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Dump an ASCII string with support for ACPI-defined escape
+ *              sequences.
+ *
+ ******************************************************************************/
+
+void acpi_ut_print_string(char *string, u8 max_length)
+{
+	u32 i;
+
+	if (!string) {
+		acpi_os_printf("<\"NULL STRING PTR\">");
+		return;
+	}
+
+	acpi_os_printf("\"");
+	for (i = 0; string[i] && (i < max_length); i++) {
+
+		/* Escape sequences */
+
+		switch (string[i]) {
+		case 0x07:
+			acpi_os_printf("\\a");	/* BELL */
+			break;
+
+		case 0x08:
+			acpi_os_printf("\\b");	/* BACKSPACE */
+			break;
+
+		case 0x0C:
+			acpi_os_printf("\\f");	/* FORMFEED */
+			break;
+
+		case 0x0A:
+			acpi_os_printf("\\n");	/* LINEFEED */
+			break;
+
+		case 0x0D:
+			acpi_os_printf("\\r");	/* CARRIAGE RETURN */
+			break;
+
+		case 0x09:
+			acpi_os_printf("\\t");	/* HORIZONTAL TAB */
+			break;
+
+		case 0x0B:
+			acpi_os_printf("\\v");	/* VERTICAL TAB */
+			break;
+
+		case '\'':	/* Single Quote */
+		case '\"':	/* Double Quote */
+		case '\\':	/* Backslash */
+			acpi_os_printf("\\%c", (int)string[i]);
+			break;
+
+		default:
+
+			/* Check for printable character or hex escape */
+
+			if (ACPI_IS_PRINT(string[i])) {
+				/* This is a normal character */
+
+				acpi_os_printf("%c", (int)string[i]);
+			} else {
+				/* All others will be Hex escapes */
+
+				acpi_os_printf("\\x%2.2X", (s32) string[i]);
+			}
+			break;
+		}
+	}
+	acpi_os_printf("\"");
+
+	if (i == max_length && string[i]) {
+		acpi_os_printf("...");
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_dword_byte_swap
+ *
+ * PARAMETERS:  Value           - Value to be converted
+ *
+ * RETURN:      u32 integer with bytes swapped
+ *
+ * DESCRIPTION: Convert a 32-bit value to big-endian (swap the bytes)
+ *
+ ******************************************************************************/
+
+u32 acpi_ut_dword_byte_swap(u32 value)
+{
+	union {
+		u32 value;
+		u8 bytes[4];
+	} out;
+	union {
+		u32 value;
+		u8 bytes[4];
+	} in;
+
+	ACPI_FUNCTION_ENTRY();
+
+	in.value = value;
+
+	out.bytes[0] = in.bytes[3];
+	out.bytes[1] = in.bytes[2];
+	out.bytes[2] = in.bytes[1];
+	out.bytes[3] = in.bytes[0];
+
+	return (out.value);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_set_integer_width
+ *
+ * PARAMETERS:  Revision            From DSDT header
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Set the global integer bit width based upon the revision
+ *              of the DSDT.  For Revision 1 and 0, Integers are 32 bits.
+ *              For Revision 2 and above, Integers are 64 bits.  Yes, this
+ *              makes a difference.
+ *
+ ******************************************************************************/
+
+void acpi_ut_set_integer_width(u8 revision)
+{
+
+	if (revision < 2) {
+
+		/* 32-bit case */
+
+		acpi_gbl_integer_bit_width = 32;
+		acpi_gbl_integer_nybble_width = 8;
+		acpi_gbl_integer_byte_width = 4;
+	} else {
+		/* 64-bit case (ACPI 2.0+) */
+
+		acpi_gbl_integer_bit_width = 64;
+		acpi_gbl_integer_nybble_width = 16;
+		acpi_gbl_integer_byte_width = 8;
+	}
+}
+
+#ifdef ACPI_DEBUG_OUTPUT
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_display_init_pathname
+ *
+ * PARAMETERS:  Type                - Object type of the node
+ *              obj_handle          - Handle whose pathname will be displayed
+ *              Path                - Additional path string to be appended.
+ *                                      (NULL if no extra path)
+ *
+ * RETURN:      acpi_status
+ *
+ * DESCRIPTION: Display full pathname of an object, DEBUG ONLY
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_display_init_pathname(u8 type,
+			      struct acpi_namespace_node *obj_handle,
+			      char *path)
+{
+	acpi_status status;
+	struct acpi_buffer buffer;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Only print the path if the appropriate debug level is enabled */
+
+	if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
+		return;
+	}
+
+	/* Get the full pathname to the node */
+
+	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+	status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
+	if (ACPI_FAILURE(status)) {
+		return;
+	}
+
+	/* Print what we're doing */
+
+	switch (type) {
+	case ACPI_TYPE_METHOD:
+		acpi_os_printf("Executing  ");
+		break;
+
+	default:
+		acpi_os_printf("Initializing ");
+		break;
+	}
+
+	/* Print the object type and pathname */
+
+	acpi_os_printf("%-12s %s",
+		       acpi_ut_get_type_name(type), (char *)buffer.pointer);
+
+	/* Extra path is used to append names like _STA, _INI, etc. */
+
+	if (path) {
+		acpi_os_printf(".%s", path);
+	}
+	acpi_os_printf("\n");
+
+	ACPI_FREE(buffer.pointer);
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_valid_acpi_char
+ *
+ * PARAMETERS:  Char            - The character to be examined
+ *              Position        - Byte position (0-3)
+ *
+ * RETURN:      TRUE if the character is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI character. Must be one of:
+ *              1) Upper case alpha
+ *              2) numeric
+ *              3) underscore
+ *
+ *              We allow a '!' as the last character because of the ASF! table
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_acpi_char(char character, u32 position)
+{
+
+	if (!((character >= 'A' && character <= 'Z') ||
+	      (character >= '0' && character <= '9') || (character == '_'))) {
+
+		/* Allow a '!' in the last position */
+
+		if (character == '!' && position == 3) {
+			return (TRUE);
+		}
+
+		return (FALSE);
+	}
+
+	return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_valid_acpi_name
+ *
+ * PARAMETERS:  Name            - The name to be examined
+ *
+ * RETURN:      TRUE if the name is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI name.  Each character must be one of:
+ *              1) Upper case alpha
+ *              2) numeric
+ *              3) underscore
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_acpi_name(u32 name)
+{
+	u32 i;
+
+	ACPI_FUNCTION_ENTRY();
+
+	for (i = 0; i < ACPI_NAME_SIZE; i++) {
+		if (!acpi_ut_valid_acpi_char
+		    ((ACPI_CAST_PTR(char, &name))[i], i)) {
+			return (FALSE);
+		}
+	}
+
+	return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_repair_name
+ *
+ * PARAMETERS:  Name            - The ACPI name to be repaired
+ *
+ * RETURN:      Repaired version of the name
+ *
+ * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
+ *              return the new name.
+ *
+ ******************************************************************************/
+
+acpi_name acpi_ut_repair_name(char *name)
+{
+       u32 i;
+	char new_name[ACPI_NAME_SIZE];
+
+	for (i = 0; i < ACPI_NAME_SIZE; i++) {
+		new_name[i] = name[i];
+
+		/*
+		 * Replace a bad character with something printable, yet technically
+		 * still invalid. This prevents any collisions with existing "good"
+		 * names in the namespace.
+		 */
+		if (!acpi_ut_valid_acpi_char(name[i], i)) {
+			new_name[i] = '*';
+		}
+	}
+
+	return (*(u32 *) new_name);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_strtoul64
+ *
+ * PARAMETERS:  String          - Null terminated string
+ *              Base            - Radix of the string: 16 or ACPI_ANY_BASE;
+ *                                ACPI_ANY_BASE means 'in behalf of to_integer'
+ *              ret_integer     - Where the converted integer is returned
+ *
+ * RETURN:      Status and Converted value
+ *
+ * DESCRIPTION: Convert a string into an unsigned value. Performs either a
+ *              32-bit or 64-bit conversion, depending on the current mode
+ *              of the interpreter.
+ *              NOTE: Does not support Octal strings, not needed.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
+{
+	u32 this_digit = 0;
+	acpi_integer return_value = 0;
+	acpi_integer quotient;
+	acpi_integer dividend;
+	u32 to_integer_op = (base == ACPI_ANY_BASE);
+	u32 mode32 = (acpi_gbl_integer_byte_width == 4);
+	u8 valid_digits = 0;
+	u8 sign_of0x = 0;
+	u8 term = 0;
+
+	ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
+
+	switch (base) {
+	case ACPI_ANY_BASE:
+	case 16:
+		break;
+
+	default:
+		/* Invalid Base */
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (!string) {
+		goto error_exit;
+	}
+
+	/* Skip over any white space in the buffer */
+
+	while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
+		string++;
+	}
+
+	if (to_integer_op) {
+		/*
+		 * Base equal to ACPI_ANY_BASE means 'to_integer operation case'.
+		 * We need to determine if it is decimal or hexadecimal.
+		 */
+		if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
+			sign_of0x = 1;
+			base = 16;
+
+			/* Skip over the leading '0x' */
+			string += 2;
+		} else {
+			base = 10;
+		}
+	}
+
+	/* Any string left? Check that '0x' is not followed by white space. */
+
+	if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
+		if (to_integer_op) {
+			goto error_exit;
+		} else {
+			goto all_done;
+		}
+	}
+
+	/*
+	 * Perform a 32-bit or 64-bit conversion, depending upon the current
+	 * execution mode of the interpreter
+	 */
+	dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
+
+	/* Main loop: convert the string to a 32- or 64-bit integer */
+
+	while (*string) {
+		if (ACPI_IS_DIGIT(*string)) {
+
+			/* Convert ASCII 0-9 to Decimal value */
+
+			this_digit = ((u8) * string) - '0';
+		} else if (base == 10) {
+
+			/* Digit is out of range; possible in to_integer case only */
+
+			term = 1;
+		} else {
+			this_digit = (u8) ACPI_TOUPPER(*string);
+			if (ACPI_IS_XDIGIT((char)this_digit)) {
+
+				/* Convert ASCII Hex char to value */
+
+				this_digit = this_digit - 'A' + 10;
+			} else {
+				term = 1;
+			}
+		}
+
+		if (term) {
+			if (to_integer_op) {
+				goto error_exit;
+			} else {
+				break;
+			}
+		} else if ((valid_digits == 0) && (this_digit == 0)
+			   && !sign_of0x) {
+
+			/* Skip zeros */
+			string++;
+			continue;
+		}
+
+		valid_digits++;
+
+		if (sign_of0x && ((valid_digits > 16)
+				  || ((valid_digits > 8) && mode32))) {
+			/*
+			 * This is to_integer operation case.
+			 * No any restrictions for string-to-integer conversion,
+			 * see ACPI spec.
+			 */
+			goto error_exit;
+		}
+
+		/* Divide the digit into the correct position */
+
+		(void)
+		    acpi_ut_short_divide((dividend - (acpi_integer) this_digit),
+					 base, &quotient, NULL);
+
+		if (return_value > quotient) {
+			if (to_integer_op) {
+				goto error_exit;
+			} else {
+				break;
+			}
+		}
+
+		return_value *= base;
+		return_value += this_digit;
+		string++;
+	}
+
+	/* All done, normal exit */
+
+      all_done:
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
+			  ACPI_FORMAT_UINT64(return_value)));
+
+	*ret_integer = return_value;
+	return_ACPI_STATUS(AE_OK);
+
+      error_exit:
+	/* Base was set/validated above */
+
+	if (base == 10) {
+		return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
+	} else {
+		return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_update_state_and_push
+ *
+ * PARAMETERS:  Object          - Object to be added to the new state
+ *              Action          - Increment/Decrement
+ *              state_list      - List the state will be added to
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new state and push it
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
+				     u16 action,
+				     union acpi_generic_state **state_list)
+{
+	union acpi_generic_state *state;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/* Ignore null objects; these are expected */
+
+	if (!object) {
+		return (AE_OK);
+	}
+
+	state = acpi_ut_create_update_state(object, action);
+	if (!state) {
+		return (AE_NO_MEMORY);
+	}
+
+	acpi_ut_push_generic_state(state_list, state);
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_walk_package_tree
+ *
+ * PARAMETERS:  source_object       - The package to walk
+ *              target_object       - Target object (if package is being copied)
+ *              walk_callback       - Called once for each package element
+ *              Context             - Passed to the callback function
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Walk through a package
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
+			  void *target_object,
+			  acpi_pkg_callback walk_callback, void *context)
+{
+	acpi_status status = AE_OK;
+	union acpi_generic_state *state_list = NULL;
+	union acpi_generic_state *state;
+	u32 this_index;
+	union acpi_operand_object *this_source_obj;
+
+	ACPI_FUNCTION_TRACE(ut_walk_package_tree);
+
+	state = acpi_ut_create_pkg_state(source_object, target_object, 0);
+	if (!state) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	while (state) {
+
+		/* Get one element of the package */
+
+		this_index = state->pkg.index;
+		this_source_obj = (union acpi_operand_object *)
+		    state->pkg.source_object->package.elements[this_index];
+
+		/*
+		 * Check for:
+		 * 1) An uninitialized package element.  It is completely
+		 *    legal to declare a package and leave it uninitialized
+		 * 2) Not an internal object - can be a namespace node instead
+		 * 3) Any type other than a package.  Packages are handled in else
+		 *    case below.
+		 */
+		if ((!this_source_obj) ||
+		    (ACPI_GET_DESCRIPTOR_TYPE(this_source_obj) !=
+		     ACPI_DESC_TYPE_OPERAND)
+		    || (ACPI_GET_OBJECT_TYPE(this_source_obj) !=
+			ACPI_TYPE_PACKAGE)) {
+			status =
+			    walk_callback(ACPI_COPY_TYPE_SIMPLE,
+					  this_source_obj, state, context);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+
+			state->pkg.index++;
+			while (state->pkg.index >=
+			       state->pkg.source_object->package.count) {
+				/*
+				 * We've handled all of the objects at this level,  This means
+				 * that we have just completed a package.  That package may
+				 * have contained one or more packages itself.
+				 *
+				 * Delete this state and pop the previous state (package).
+				 */
+				acpi_ut_delete_generic_state(state);
+				state = acpi_ut_pop_generic_state(&state_list);
+
+				/* Finished when there are no more states */
+
+				if (!state) {
+					/*
+					 * We have handled all of the objects in the top level
+					 * package just add the length of the package objects
+					 * and exit
+					 */
+					return_ACPI_STATUS(AE_OK);
+				}
+
+				/*
+				 * Go back up a level and move the index past the just
+				 * completed package object.
+				 */
+				state->pkg.index++;
+			}
+		} else {
+			/* This is a subobject of type package */
+
+			status =
+			    walk_callback(ACPI_COPY_TYPE_PACKAGE,
+					  this_source_obj, state, context);
+			if (ACPI_FAILURE(status)) {
+				return_ACPI_STATUS(status);
+			}
+
+			/*
+			 * Push the current state and create a new one
+			 * The callback above returned a new target package object.
+			 */
+			acpi_ut_push_generic_state(&state_list, state);
+			state = acpi_ut_create_pkg_state(this_source_obj,
+							 state->pkg.
+							 this_target_obj, 0);
+			if (!state) {
+
+				/* Free any stacked Update State objects */
+
+				while (state_list) {
+					state =
+					    acpi_ut_pop_generic_state
+					    (&state_list);
+					acpi_ut_delete_generic_state(state);
+				}
+				return_ACPI_STATUS(AE_NO_MEMORY);
+			}
+		}
+	}
+
+	/* We should never get here */
+
+	return_ACPI_STATUS(AE_AML_INTERNAL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_error, acpi_exception, acpi_warning, acpi_info
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              Format              - Printf format string + additional args
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Print message with module/line/version info
+ *
+ ******************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_error(const char *module_name, u32 line_number, const char *format, ...)
+{
+	va_list args;
+
+	acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
+
+	va_start(args, format);
+	acpi_os_vprintf(format, args);
+	acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+	va_end(args);
+}
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_exception(const char *module_name,
+	       u32 line_number, acpi_status status, const char *format, ...)
+{
+	va_list args;
+
+	acpi_os_printf("ACPI Exception (%s-%04d): %s, ", module_name,
+		       line_number, acpi_format_exception(status));
+
+	va_start(args, format);
+	acpi_os_vprintf(format, args);
+	acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+	va_end(args);
+}
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_warning(const char *module_name, u32 line_number, const char *format, ...)
+{
+	va_list args;
+
+	acpi_os_printf("ACPI Warning (%s-%04d): ", module_name, line_number);
+
+	va_start(args, format);
+	acpi_os_vprintf(format, args);
+	acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+	va_end(args);
+}
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_info(const char *module_name, u32 line_number, const char *format, ...)
+{
+	va_list args;
+
+	/*
+	 * Removed module_name, line_number, and acpica version, not needed
+	 * for info output
+	 */
+	acpi_os_printf("ACPI: ");
+
+	va_start(args, format);
+	acpi_os_vprintf(format, args);
+	acpi_os_printf("\n");
+	va_end(args);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_error)
+ACPI_EXPORT_SYMBOL(acpi_exception)
+ACPI_EXPORT_SYMBOL(acpi_warning)
+ACPI_EXPORT_SYMBOL(acpi_info)
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
new file mode 100644
index 0000000..14eb52c
--- /dev/null
+++ b/drivers/acpi/acpica/utmutex.c
@@ -0,0 +1,342 @@
+/*******************************************************************************
+ *
+ * Module Name: utmutex - local mutex support
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utmutex")
+
+/* Local prototypes */
+static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id);
+
+static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_mutex_initialize
+ *
+ * PARAMETERS:  None.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create the system mutex objects.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_mutex_initialize(void)
+{
+	u32 i;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ut_mutex_initialize);
+
+	/*
+	 * Create each of the predefined mutex objects
+	 */
+	for (i = 0; i < ACPI_NUM_MUTEX; i++) {
+		status = acpi_ut_create_mutex(i);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/* Create the spinlocks for use at interrupt level */
+
+	spin_lock_init(acpi_gbl_gpe_lock);
+	spin_lock_init(acpi_gbl_hardware_lock);
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_mutex_terminate
+ *
+ * PARAMETERS:  None.
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Delete all of the system mutex objects.
+ *
+ ******************************************************************************/
+
+void acpi_ut_mutex_terminate(void)
+{
+	u32 i;
+
+	ACPI_FUNCTION_TRACE(ut_mutex_terminate);
+
+	/*
+	 * Delete each predefined mutex object
+	 */
+	for (i = 0; i < ACPI_NUM_MUTEX; i++) {
+		(void)acpi_ut_delete_mutex(i);
+	}
+
+	/* Delete the spinlocks */
+
+	acpi_os_delete_lock(acpi_gbl_gpe_lock);
+	acpi_os_delete_lock(acpi_gbl_hardware_lock);
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_mutex
+ *
+ * PARAMETERS:  mutex_iD        - ID of the mutex to be created
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a mutex object.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE_U32(ut_create_mutex, mutex_id);
+
+	if (mutex_id > ACPI_MAX_MUTEX) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (!acpi_gbl_mutex_info[mutex_id].mutex) {
+		status =
+		    acpi_os_create_mutex(&acpi_gbl_mutex_info[mutex_id].mutex);
+		acpi_gbl_mutex_info[mutex_id].thread_id =
+		    ACPI_MUTEX_NOT_ACQUIRED;
+		acpi_gbl_mutex_info[mutex_id].use_count = 0;
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_delete_mutex
+ *
+ * PARAMETERS:  mutex_iD        - ID of the mutex to be deleted
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Delete a mutex object.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
+{
+
+	ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id);
+
+	if (mutex_id > ACPI_MAX_MUTEX) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	acpi_os_delete_mutex(acpi_gbl_mutex_info[mutex_id].mutex);
+
+	acpi_gbl_mutex_info[mutex_id].mutex = NULL;
+	acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_acquire_mutex
+ *
+ * PARAMETERS:  mutex_iD        - ID of the mutex to be acquired
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Acquire a mutex object.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
+{
+	acpi_status status;
+	acpi_thread_id this_thread_id;
+
+	ACPI_FUNCTION_NAME(ut_acquire_mutex);
+
+	if (mutex_id > ACPI_MAX_MUTEX) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	this_thread_id = acpi_os_get_thread_id();
+
+#ifdef ACPI_MUTEX_DEBUG
+	{
+		u32 i;
+		/*
+		 * Mutex debug code, for internal debugging only.
+		 *
+		 * Deadlock prevention.  Check if this thread owns any mutexes of value
+		 * greater than or equal to this one.  If so, the thread has violated
+		 * the mutex ordering rule.  This indicates a coding error somewhere in
+		 * the ACPI subsystem code.
+		 */
+		for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
+			if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
+				if (i == mutex_id) {
+					ACPI_ERROR((AE_INFO,
+						    "Mutex [%s] already acquired by this thread [%X]",
+						    acpi_ut_get_mutex_name
+						    (mutex_id),
+						    this_thread_id));
+
+					return (AE_ALREADY_ACQUIRED);
+				}
+
+				ACPI_ERROR((AE_INFO,
+					    "Invalid acquire order: Thread %X owns [%s], wants [%s]",
+					    this_thread_id,
+					    acpi_ut_get_mutex_name(i),
+					    acpi_ut_get_mutex_name(mutex_id)));
+
+				return (AE_ACQUIRE_DEADLOCK);
+			}
+		}
+	}
+#endif
+
+	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+			  "Thread %lX attempting to acquire Mutex [%s]\n",
+			  (unsigned long)this_thread_id,
+			  acpi_ut_get_mutex_name(mutex_id)));
+
+	status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
+				       ACPI_WAIT_FOREVER);
+	if (ACPI_SUCCESS(status)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+				  "Thread %lX acquired Mutex [%s]\n",
+				  (unsigned long)this_thread_id,
+				  acpi_ut_get_mutex_name(mutex_id)));
+
+		acpi_gbl_mutex_info[mutex_id].use_count++;
+		acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
+	} else {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Thread %lX could not acquire Mutex [%X]",
+				(unsigned long)this_thread_id, mutex_id));
+	}
+
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_release_mutex
+ *
+ * PARAMETERS:  mutex_iD        - ID of the mutex to be released
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Release a mutex object.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
+{
+	acpi_thread_id this_thread_id;
+
+	ACPI_FUNCTION_NAME(ut_release_mutex);
+
+	this_thread_id = acpi_os_get_thread_id();
+	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+			  "Thread %lX releasing Mutex [%s]\n",
+			  (unsigned long)this_thread_id,
+			  acpi_ut_get_mutex_name(mutex_id)));
+
+	if (mutex_id > ACPI_MAX_MUTEX) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	/*
+	 * Mutex must be acquired in order to release it!
+	 */
+	if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
+		ACPI_ERROR((AE_INFO,
+			    "Mutex [%X] is not acquired, cannot release",
+			    mutex_id));
+
+		return (AE_NOT_ACQUIRED);
+	}
+#ifdef ACPI_MUTEX_DEBUG
+	{
+		u32 i;
+		/*
+		 * Mutex debug code, for internal debugging only.
+		 *
+		 * Deadlock prevention.  Check if this thread owns any mutexes of value
+		 * greater than this one.  If so, the thread has violated the mutex
+		 * ordering rule.  This indicates a coding error somewhere in
+		 * the ACPI subsystem code.
+		 */
+		for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
+			if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
+				if (i == mutex_id) {
+					continue;
+				}
+
+				ACPI_ERROR((AE_INFO,
+					    "Invalid release order: owns [%s], releasing [%s]",
+					    acpi_ut_get_mutex_name(i),
+					    acpi_ut_get_mutex_name(mutex_id)));
+
+				return (AE_RELEASE_DEADLOCK);
+			}
+		}
+	}
+#endif
+
+	/* Mark unlocked FIRST */
+
+	acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+
+	acpi_os_release_mutex(acpi_gbl_mutex_info[mutex_id].mutex);
+	return (AE_OK);
+}
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
new file mode 100644
index 0000000..fd5ea75
--- /dev/null
+++ b/drivers/acpi/acpica/utobject.c
@@ -0,0 +1,677 @@
+/******************************************************************************
+ *
+ * Module Name: utobject - ACPI object create/delete/size/cache routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utobject")
+
+/* Local prototypes */
+static acpi_status
+acpi_ut_get_simple_object_size(union acpi_operand_object *obj,
+			       acpi_size * obj_length);
+
+static acpi_status
+acpi_ut_get_package_object_size(union acpi_operand_object *obj,
+				acpi_size * obj_length);
+
+static acpi_status
+acpi_ut_get_element_length(u8 object_type,
+			   union acpi_operand_object *source_object,
+			   union acpi_generic_state *state, void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_internal_object_dbg
+ *
+ * PARAMETERS:  module_name         - Source file name of caller
+ *              line_number         - Line number of caller
+ *              component_id        - Component type of caller
+ *              Type                - ACPI Type of the new object
+ *
+ * RETURN:      A new internal object, null on failure
+ *
+ * DESCRIPTION: Create and initialize a new internal object.
+ *
+ * NOTE:        We always allocate the worst-case object descriptor because
+ *              these objects are cached, and we want them to be
+ *              one-size-satisifies-any-request.  This in itself may not be
+ *              the most memory efficient, but the efficiency of the object
+ *              cache should more than make up for this!
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char
+							      *module_name,
+							      u32 line_number,
+							      u32 component_id,
+							      acpi_object_type
+							      type)
+{
+	union acpi_operand_object *object;
+	union acpi_operand_object *second_object;
+
+	ACPI_FUNCTION_TRACE_STR(ut_create_internal_object_dbg,
+				acpi_ut_get_type_name(type));
+
+	/* Allocate the raw object descriptor */
+
+	object =
+	    acpi_ut_allocate_object_desc_dbg(module_name, line_number,
+					     component_id);
+	if (!object) {
+		return_PTR(NULL);
+	}
+
+	switch (type) {
+	case ACPI_TYPE_REGION:
+	case ACPI_TYPE_BUFFER_FIELD:
+	case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+		/* These types require a secondary object */
+
+		second_object = acpi_ut_allocate_object_desc_dbg(module_name,
+								 line_number,
+								 component_id);
+		if (!second_object) {
+			acpi_ut_delete_object_desc(object);
+			return_PTR(NULL);
+		}
+
+		second_object->common.type = ACPI_TYPE_LOCAL_EXTRA;
+		second_object->common.reference_count = 1;
+
+		/* Link the second object to the first */
+
+		object->common.next_object = second_object;
+		break;
+
+	default:
+		/* All others have no secondary object */
+		break;
+	}
+
+	/* Save the object type in the object descriptor */
+
+	object->common.type = (u8) type;
+
+	/* Init the reference count */
+
+	object->common.reference_count = 1;
+
+	/* Any per-type initialization should go here */
+
+	return_PTR(object);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_package_object
+ *
+ * PARAMETERS:  Count               - Number of package elements
+ *
+ * RETURN:      Pointer to a new Package object, null on failure
+ *
+ * DESCRIPTION: Create a fully initialized package object
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ut_create_package_object(u32 count)
+{
+	union acpi_operand_object *package_desc;
+	union acpi_operand_object **package_elements;
+
+	ACPI_FUNCTION_TRACE_U32(ut_create_package_object, count);
+
+	/* Create a new Package object */
+
+	package_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
+	if (!package_desc) {
+		return_PTR(NULL);
+	}
+
+	/*
+	 * Create the element array. Count+1 allows the array to be null
+	 * terminated.
+	 */
+	package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count +
+						 1) * sizeof(void *));
+	if (!package_elements) {
+		acpi_ut_remove_reference(package_desc);
+		return_PTR(NULL);
+	}
+
+	package_desc->package.count = count;
+	package_desc->package.elements = package_elements;
+	return_PTR(package_desc);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_buffer_object
+ *
+ * PARAMETERS:  buffer_size            - Size of buffer to be created
+ *
+ * RETURN:      Pointer to a new Buffer object, null on failure
+ *
+ * DESCRIPTION: Create a fully initialized buffer object
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
+{
+	union acpi_operand_object *buffer_desc;
+	u8 *buffer = NULL;
+
+	ACPI_FUNCTION_TRACE_U32(ut_create_buffer_object, buffer_size);
+
+	/* Create a new Buffer object */
+
+	buffer_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
+	if (!buffer_desc) {
+		return_PTR(NULL);
+	}
+
+	/* Create an actual buffer only if size > 0 */
+
+	if (buffer_size > 0) {
+
+		/* Allocate the actual buffer */
+
+		buffer = ACPI_ALLOCATE_ZEROED(buffer_size);
+		if (!buffer) {
+			ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+				    (u32) buffer_size));
+			acpi_ut_remove_reference(buffer_desc);
+			return_PTR(NULL);
+		}
+	}
+
+	/* Complete buffer object initialization */
+
+	buffer_desc->buffer.flags |= AOPOBJ_DATA_VALID;
+	buffer_desc->buffer.pointer = buffer;
+	buffer_desc->buffer.length = (u32) buffer_size;
+
+	/* Return the new buffer descriptor */
+
+	return_PTR(buffer_desc);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_string_object
+ *
+ * PARAMETERS:  string_size         - Size of string to be created. Does not
+ *                                    include NULL terminator, this is added
+ *                                    automatically.
+ *
+ * RETURN:      Pointer to a new String object
+ *
+ * DESCRIPTION: Create a fully initialized string object
+ *
+ ******************************************************************************/
+
+union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
+{
+	union acpi_operand_object *string_desc;
+	char *string;
+
+	ACPI_FUNCTION_TRACE_U32(ut_create_string_object, string_size);
+
+	/* Create a new String object */
+
+	string_desc = acpi_ut_create_internal_object(ACPI_TYPE_STRING);
+	if (!string_desc) {
+		return_PTR(NULL);
+	}
+
+	/*
+	 * Allocate the actual string buffer -- (Size + 1) for NULL terminator.
+	 * NOTE: Zero-length strings are NULL terminated
+	 */
+	string = ACPI_ALLOCATE_ZEROED(string_size + 1);
+	if (!string) {
+		ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+			    (u32) string_size));
+		acpi_ut_remove_reference(string_desc);
+		return_PTR(NULL);
+	}
+
+	/* Complete string object initialization */
+
+	string_desc->string.pointer = string;
+	string_desc->string.length = (u32) string_size;
+
+	/* Return the new string descriptor */
+
+	return_PTR(string_desc);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_valid_internal_object
+ *
+ * PARAMETERS:  Object              - Object to be validated
+ *
+ * RETURN:      TRUE if object is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Validate a pointer to be a union acpi_operand_object
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_internal_object(void *object)
+{
+
+	ACPI_FUNCTION_NAME(ut_valid_internal_object);
+
+	/* Check for a null pointer */
+
+	if (!object) {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Null Object Ptr\n"));
+		return (FALSE);
+	}
+
+	/* Check the descriptor type field */
+
+	switch (ACPI_GET_DESCRIPTOR_TYPE(object)) {
+	case ACPI_DESC_TYPE_OPERAND:
+
+		/* The object appears to be a valid union acpi_operand_object    */
+
+		return (TRUE);
+
+	default:
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "%p is not not an ACPI operand obj [%s]\n",
+				  object, acpi_ut_get_descriptor_name(object)));
+		break;
+	}
+
+	return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_allocate_object_desc_dbg
+ *
+ * PARAMETERS:  module_name         - Caller's module name (for error output)
+ *              line_number         - Caller's line number (for error output)
+ *              component_id        - Caller's component ID (for error output)
+ *
+ * RETURN:      Pointer to newly allocated object descriptor.  Null on error
+ *
+ * DESCRIPTION: Allocate a new object descriptor.  Gracefully handle
+ *              error conditions.
+ *
+ ******************************************************************************/
+
+void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
+				       u32 line_number, u32 component_id)
+{
+	union acpi_operand_object *object;
+
+	ACPI_FUNCTION_TRACE(ut_allocate_object_desc_dbg);
+
+	object = acpi_os_acquire_object(acpi_gbl_operand_cache);
+	if (!object) {
+		ACPI_ERROR((module_name, line_number,
+			    "Could not allocate an object descriptor"));
+
+		return_PTR(NULL);
+	}
+
+	/* Mark the descriptor type */
+
+	memset(object, 0, sizeof(union acpi_operand_object));
+	ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND);
+
+	ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p Size %X\n",
+			  object, (u32) sizeof(union acpi_operand_object)));
+
+	return_PTR(object);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_delete_object_desc
+ *
+ * PARAMETERS:  Object          - An Acpi internal object to be deleted
+ *
+ * RETURN:      None.
+ *
+ * DESCRIPTION: Free an ACPI object descriptor or add it to the object cache
+ *
+ ******************************************************************************/
+
+void acpi_ut_delete_object_desc(union acpi_operand_object *object)
+{
+	ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
+
+	/* Object must be a union acpi_operand_object    */
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
+		ACPI_ERROR((AE_INFO,
+			    "%p is not an ACPI Operand object [%s]", object,
+			    acpi_ut_get_descriptor_name(object)));
+		return_VOID;
+	}
+
+	(void)acpi_os_release_object(acpi_gbl_operand_cache, object);
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_simple_object_size
+ *
+ * PARAMETERS:  internal_object    - An ACPI operand object
+ *              obj_length         - Where the length is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to determine the space required to
+ *              contain a simple object for return to an external user.
+ *
+ *              The length includes the object structure plus any additional
+ *              needed space.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
+			       acpi_size * obj_length)
+{
+	acpi_size length;
+	acpi_size size;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
+
+	/*
+	 * Handle a null object (Could be a uninitialized package
+	 * element -- which is legal)
+	 */
+	if (!internal_object) {
+		*obj_length = sizeof(union acpi_object);
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Start with the length of the Acpi object */
+
+	length = sizeof(union acpi_object);
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) {
+
+		/* Object is a named object (reference), just return the length */
+
+		*obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * The final length depends on the object type
+	 * Strings and Buffers are packed right up against the parent object and
+	 * must be accessed bytewise or there may be alignment problems on
+	 * certain processors
+	 */
+	switch (ACPI_GET_OBJECT_TYPE(internal_object)) {
+	case ACPI_TYPE_STRING:
+
+		length += (acpi_size) internal_object->string.length + 1;
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		length += (acpi_size) internal_object->buffer.length;
+		break;
+
+	case ACPI_TYPE_INTEGER:
+	case ACPI_TYPE_PROCESSOR:
+	case ACPI_TYPE_POWER:
+
+		/* No extra data for these types */
+
+		break;
+
+	case ACPI_TYPE_LOCAL_REFERENCE:
+
+		switch (internal_object->reference.class) {
+		case ACPI_REFCLASS_NAME:
+
+			/*
+			 * Get the actual length of the full pathname to this object.
+			 * The reference will be converted to the pathname to the object
+			 */
+			size =
+			    acpi_ns_get_pathname_length(internal_object->
+							reference.node);
+			if (!size) {
+				return_ACPI_STATUS(AE_BAD_PARAMETER);
+			}
+
+			length += ACPI_ROUND_UP_TO_NATIVE_WORD(size);
+			break;
+
+		default:
+
+			/*
+			 * No other reference opcodes are supported.
+			 * Notably, Locals and Args are not supported, but this may be
+			 * required eventually.
+			 */
+			ACPI_ERROR((AE_INFO,
+				    "Cannot convert to external object - "
+				    "unsupported Reference Class [%s] %X in object %p",
+				    acpi_ut_get_reference_name(internal_object),
+				    internal_object->reference.class,
+				    internal_object));
+			status = AE_TYPE;
+			break;
+		}
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Cannot convert to external object - "
+			    "unsupported type [%s] %X in object %p",
+			    acpi_ut_get_object_type_name(internal_object),
+			    ACPI_GET_OBJECT_TYPE(internal_object),
+			    internal_object));
+		status = AE_TYPE;
+		break;
+	}
+
+	/*
+	 * Account for the space required by the object rounded up to the next
+	 * multiple of the machine word size.  This keeps each object aligned
+	 * on a machine word boundary. (preventing alignment faults on some
+	 * machines.)
+	 */
+	*obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_element_length
+ *
+ * PARAMETERS:  acpi_pkg_callback
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get the length of one package element.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_get_element_length(u8 object_type,
+			   union acpi_operand_object *source_object,
+			   union acpi_generic_state *state, void *context)
+{
+	acpi_status status = AE_OK;
+	struct acpi_pkg_info *info = (struct acpi_pkg_info *)context;
+	acpi_size object_space;
+
+	switch (object_type) {
+	case ACPI_COPY_TYPE_SIMPLE:
+
+		/*
+		 * Simple object - just get the size (Null object/entry is handled
+		 * here also) and sum it into the running package length
+		 */
+		status =
+		    acpi_ut_get_simple_object_size(source_object,
+						   &object_space);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+
+		info->length += object_space;
+		break;
+
+	case ACPI_COPY_TYPE_PACKAGE:
+
+		/* Package object - nothing much to do here, let the walk handle it */
+
+		info->num_packages++;
+		state->pkg.this_target_obj = NULL;
+		break;
+
+	default:
+
+		/* No other types allowed */
+
+		return (AE_BAD_PARAMETER);
+	}
+
+	return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_package_object_size
+ *
+ * PARAMETERS:  internal_object     - An ACPI internal object
+ *              obj_length          - Where the length is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to determine the space required to
+ *              contain a package object for return to an external user.
+ *
+ *              This is moderately complex since a package contains other
+ *              objects including packages.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
+				acpi_size * obj_length)
+{
+	acpi_status status;
+	struct acpi_pkg_info info;
+
+	ACPI_FUNCTION_TRACE_PTR(ut_get_package_object_size, internal_object);
+
+	info.length = 0;
+	info.object_space = 0;
+	info.num_packages = 1;
+
+	status = acpi_ut_walk_package_tree(internal_object, NULL,
+					   acpi_ut_get_element_length, &info);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * We have handled all of the objects in all levels of the package.
+	 * just add the length of the package objects themselves.
+	 * Round up to the next machine word.
+	 */
+	info.length += ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)) *
+	    (acpi_size) info.num_packages;
+
+	/* Return the total package length */
+
+	*obj_length = info.length;
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_object_size
+ *
+ * PARAMETERS:  internal_object     - An ACPI internal object
+ *              obj_length          - Where the length will be returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to determine the space required to
+ *              contain an object for return to an API user.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_get_object_size(union acpi_operand_object *internal_object,
+			acpi_size * obj_length)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_ENTRY();
+
+	if ((ACPI_GET_DESCRIPTOR_TYPE(internal_object) ==
+	     ACPI_DESC_TYPE_OPERAND)
+	    && (ACPI_GET_OBJECT_TYPE(internal_object) == ACPI_TYPE_PACKAGE)) {
+		status =
+		    acpi_ut_get_package_object_size(internal_object,
+						    obj_length);
+	} else {
+		status =
+		    acpi_ut_get_simple_object_size(internal_object, obj_length);
+	}
+
+	return (status);
+}
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
new file mode 100644
index 0000000..91b7c00
--- /dev/null
+++ b/drivers/acpi/acpica/utresrc.c
@@ -0,0 +1,616 @@
+/*******************************************************************************
+ *
+ * Module Name: utresrc - Resource management utilities
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "amlresrc.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utresrc")
+#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+/*
+ * Strings used to decode resource descriptors.
+ * Used by both the disasssembler and the debugger resource dump routines
+ */
+const char *acpi_gbl_bm_decode[] = {
+	"NotBusMaster",
+	"BusMaster"
+};
+
+const char *acpi_gbl_config_decode[] = {
+	"0 - Good Configuration",
+	"1 - Acceptable Configuration",
+	"2 - Suboptimal Configuration",
+	"3 - ***Invalid Configuration***",
+};
+
+const char *acpi_gbl_consume_decode[] = {
+	"ResourceProducer",
+	"ResourceConsumer"
+};
+
+const char *acpi_gbl_dec_decode[] = {
+	"PosDecode",
+	"SubDecode"
+};
+
+const char *acpi_gbl_he_decode[] = {
+	"Level",
+	"Edge"
+};
+
+const char *acpi_gbl_io_decode[] = {
+	"Decode10",
+	"Decode16"
+};
+
+const char *acpi_gbl_ll_decode[] = {
+	"ActiveHigh",
+	"ActiveLow"
+};
+
+const char *acpi_gbl_max_decode[] = {
+	"MaxNotFixed",
+	"MaxFixed"
+};
+
+const char *acpi_gbl_mem_decode[] = {
+	"NonCacheable",
+	"Cacheable",
+	"WriteCombining",
+	"Prefetchable"
+};
+
+const char *acpi_gbl_min_decode[] = {
+	"MinNotFixed",
+	"MinFixed"
+};
+
+const char *acpi_gbl_mtp_decode[] = {
+	"AddressRangeMemory",
+	"AddressRangeReserved",
+	"AddressRangeACPI",
+	"AddressRangeNVS"
+};
+
+const char *acpi_gbl_rng_decode[] = {
+	"InvalidRanges",
+	"NonISAOnlyRanges",
+	"ISAOnlyRanges",
+	"EntireRange"
+};
+
+const char *acpi_gbl_rw_decode[] = {
+	"ReadOnly",
+	"ReadWrite"
+};
+
+const char *acpi_gbl_shr_decode[] = {
+	"Exclusive",
+	"Shared"
+};
+
+const char *acpi_gbl_siz_decode[] = {
+	"Transfer8",
+	"Transfer8_16",
+	"Transfer16",
+	"InvalidSize"
+};
+
+const char *acpi_gbl_trs_decode[] = {
+	"DenseTranslation",
+	"SparseTranslation"
+};
+
+const char *acpi_gbl_ttp_decode[] = {
+	"TypeStatic",
+	"TypeTranslation"
+};
+
+const char *acpi_gbl_typ_decode[] = {
+	"Compatibility",
+	"TypeA",
+	"TypeB",
+	"TypeF"
+};
+
+#endif
+
+/*
+ * Base sizes of the raw AML resource descriptors, indexed by resource type.
+ * Zero indicates a reserved (and therefore invalid) resource type.
+ */
+const u8 acpi_gbl_resource_aml_sizes[] = {
+	/* Small descriptors */
+
+	0,
+	0,
+	0,
+	0,
+	ACPI_AML_SIZE_SMALL(struct aml_resource_irq),
+	ACPI_AML_SIZE_SMALL(struct aml_resource_dma),
+	ACPI_AML_SIZE_SMALL(struct aml_resource_start_dependent),
+	ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
+	ACPI_AML_SIZE_SMALL(struct aml_resource_io),
+	ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
+	0,
+	0,
+	0,
+	0,
+	ACPI_AML_SIZE_SMALL(struct aml_resource_vendor_small),
+	ACPI_AML_SIZE_SMALL(struct aml_resource_end_tag),
+
+	/* Large descriptors */
+
+	0,
+	ACPI_AML_SIZE_LARGE(struct aml_resource_memory24),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_generic_register),
+	0,
+	ACPI_AML_SIZE_LARGE(struct aml_resource_vendor_large),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_memory32),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_fixed_memory32),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_address32),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64)
+};
+
+/*
+ * Resource types, used to validate the resource length field.
+ * The length of fixed-length types must match exactly, variable
+ * lengths must meet the minimum required length, etc.
+ * Zero indicates a reserved (and therefore invalid) resource type.
+ */
+static const u8 acpi_gbl_resource_types[] = {
+	/* Small descriptors */
+
+	0,
+	0,
+	0,
+	0,
+	ACPI_SMALL_VARIABLE_LENGTH,
+	ACPI_FIXED_LENGTH,
+	ACPI_SMALL_VARIABLE_LENGTH,
+	ACPI_FIXED_LENGTH,
+	ACPI_FIXED_LENGTH,
+	ACPI_FIXED_LENGTH,
+	0,
+	0,
+	0,
+	0,
+	ACPI_VARIABLE_LENGTH,
+	ACPI_FIXED_LENGTH,
+
+	/* Large descriptors */
+
+	0,
+	ACPI_FIXED_LENGTH,
+	ACPI_FIXED_LENGTH,
+	0,
+	ACPI_VARIABLE_LENGTH,
+	ACPI_FIXED_LENGTH,
+	ACPI_FIXED_LENGTH,
+	ACPI_VARIABLE_LENGTH,
+	ACPI_VARIABLE_LENGTH,
+	ACPI_VARIABLE_LENGTH,
+	ACPI_VARIABLE_LENGTH,
+	ACPI_FIXED_LENGTH
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_walk_aml_resources
+ *
+ * PARAMETERS:  Aml             - Pointer to the raw AML resource template
+ *              aml_length      - Length of the entire template
+ *              user_function   - Called once for each descriptor found. If
+ *                                NULL, a pointer to the end_tag is returned
+ *              Context         - Passed to user_function
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Walk a raw AML resource list(buffer). User function called
+ *              once for each resource found.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_walk_aml_resources(u8 * aml,
+			   acpi_size aml_length,
+			   acpi_walk_aml_callback user_function, void **context)
+{
+	acpi_status status;
+	u8 *end_aml;
+	u8 resource_index;
+	u32 length;
+	u32 offset = 0;
+
+	ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
+
+	/* The absolute minimum resource template is one end_tag descriptor */
+
+	if (aml_length < sizeof(struct aml_resource_end_tag)) {
+		return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+	}
+
+	/* Point to the end of the resource template buffer */
+
+	end_aml = aml + aml_length;
+
+	/* Walk the byte list, abort on any invalid descriptor type or length */
+
+	while (aml < end_aml) {
+
+		/* Validate the Resource Type and Resource Length */
+
+		status = acpi_ut_validate_resource(aml, &resource_index);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+
+		/* Get the length of this descriptor */
+
+		length = acpi_ut_get_descriptor_length(aml);
+
+		/* Invoke the user function */
+
+		if (user_function) {
+			status =
+			    user_function(aml, length, offset, resource_index,
+					  context);
+			if (ACPI_FAILURE(status)) {
+				return (status);
+			}
+		}
+
+		/* An end_tag descriptor terminates this resource template */
+
+		if (acpi_ut_get_resource_type(aml) ==
+		    ACPI_RESOURCE_NAME_END_TAG) {
+			/*
+			 * There must be at least one more byte in the buffer for
+			 * the 2nd byte of the end_tag
+			 */
+			if ((aml + 1) >= end_aml) {
+				return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+			}
+
+			/* Return the pointer to the end_tag if requested */
+
+			if (!user_function) {
+				*context = aml;
+			}
+
+			/* Normal exit */
+
+			return_ACPI_STATUS(AE_OK);
+		}
+
+		aml += length;
+		offset += length;
+	}
+
+	/* Did not find an end_tag descriptor */
+
+	return (AE_AML_NO_RESOURCE_END_TAG);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_validate_resource
+ *
+ * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
+ *              return_index    - Where the resource index is returned. NULL
+ *                                if the index is not required.
+ *
+ * RETURN:      Status, and optionally the Index into the global resource tables
+ *
+ * DESCRIPTION: Validate an AML resource descriptor by checking the Resource
+ *              Type and Resource Length. Returns an index into the global
+ *              resource information/dispatch tables for later use.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
+{
+	u8 resource_type;
+	u8 resource_index;
+	acpi_rs_length resource_length;
+	acpi_rs_length minimum_resource_length;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * 1) Validate the resource_type field (Byte 0)
+	 */
+	resource_type = ACPI_GET8(aml);
+
+	/*
+	 * Byte 0 contains the descriptor name (Resource Type)
+	 * Examine the large/small bit in the resource header
+	 */
+	if (resource_type & ACPI_RESOURCE_NAME_LARGE) {
+
+		/* Verify the large resource type (name) against the max */
+
+		if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
+			return (AE_AML_INVALID_RESOURCE_TYPE);
+		}
+
+		/*
+		 * Large Resource Type -- bits 6:0 contain the name
+		 * Translate range 0x80-0x8B to index range 0x10-0x1B
+		 */
+		resource_index = (u8) (resource_type - 0x70);
+	} else {
+		/*
+		 * Small Resource Type -- bits 6:3 contain the name
+		 * Shift range to index range 0x00-0x0F
+		 */
+		resource_index = (u8)
+		    ((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
+	}
+
+	/* Check validity of the resource type, zero indicates name is invalid */
+
+	if (!acpi_gbl_resource_types[resource_index]) {
+		return (AE_AML_INVALID_RESOURCE_TYPE);
+	}
+
+	/*
+	 * 2) Validate the resource_length field. This ensures that the length
+	 *    is at least reasonable, and guarantees that it is non-zero.
+	 */
+	resource_length = acpi_ut_get_resource_length(aml);
+	minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
+
+	/* Validate based upon the type of resource - fixed length or variable */
+
+	switch (acpi_gbl_resource_types[resource_index]) {
+	case ACPI_FIXED_LENGTH:
+
+		/* Fixed length resource, length must match exactly */
+
+		if (resource_length != minimum_resource_length) {
+			return (AE_AML_BAD_RESOURCE_LENGTH);
+		}
+		break;
+
+	case ACPI_VARIABLE_LENGTH:
+
+		/* Variable length resource, length must be at least the minimum */
+
+		if (resource_length < minimum_resource_length) {
+			return (AE_AML_BAD_RESOURCE_LENGTH);
+		}
+		break;
+
+	case ACPI_SMALL_VARIABLE_LENGTH:
+
+		/* Small variable length resource, length can be (Min) or (Min-1) */
+
+		if ((resource_length > minimum_resource_length) ||
+		    (resource_length < (minimum_resource_length - 1))) {
+			return (AE_AML_BAD_RESOURCE_LENGTH);
+		}
+		break;
+
+	default:
+
+		/* Shouldn't happen (because of validation earlier), but be sure */
+
+		return (AE_AML_INVALID_RESOURCE_TYPE);
+	}
+
+	/* Optionally return the resource table index */
+
+	if (return_index) {
+		*return_index = resource_index;
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_resource_type
+ *
+ * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
+ *
+ * RETURN:      The Resource Type with no extraneous bits (except the
+ *              Large/Small descriptor bit -- this is left alone)
+ *
+ * DESCRIPTION: Extract the Resource Type/Name from the first byte of
+ *              a resource descriptor.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_get_resource_type(void *aml)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * Byte 0 contains the descriptor name (Resource Type)
+	 * Examine the large/small bit in the resource header
+	 */
+	if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
+
+		/* Large Resource Type -- bits 6:0 contain the name */
+
+		return (ACPI_GET8(aml));
+	} else {
+		/* Small Resource Type -- bits 6:3 contain the name */
+
+		return ((u8) (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_SMALL_MASK));
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_resource_length
+ *
+ * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
+ *
+ * RETURN:      Byte Length
+ *
+ * DESCRIPTION: Get the "Resource Length" of a raw AML descriptor. By
+ *              definition, this does not include the size of the descriptor
+ *              header or the length field itself.
+ *
+ ******************************************************************************/
+
+u16 acpi_ut_get_resource_length(void *aml)
+{
+	acpi_rs_length resource_length;
+
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * Byte 0 contains the descriptor name (Resource Type)
+	 * Examine the large/small bit in the resource header
+	 */
+	if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
+
+		/* Large Resource type -- bytes 1-2 contain the 16-bit length */
+
+		ACPI_MOVE_16_TO_16(&resource_length, ACPI_ADD_PTR(u8, aml, 1));
+
+	} else {
+		/* Small Resource type -- bits 2:0 of byte 0 contain the length */
+
+		resource_length = (u16) (ACPI_GET8(aml) &
+					 ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK);
+	}
+
+	return (resource_length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_resource_header_length
+ *
+ * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
+ *
+ * RETURN:      Length of the AML header (depends on large/small descriptor)
+ *
+ * DESCRIPTION: Get the length of the header for this resource.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_get_resource_header_length(void *aml)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	/* Examine the large/small bit in the resource header */
+
+	if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
+		return (sizeof(struct aml_resource_large_header));
+	} else {
+		return (sizeof(struct aml_resource_small_header));
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_descriptor_length
+ *
+ * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
+ *
+ * RETURN:      Byte length
+ *
+ * DESCRIPTION: Get the total byte length of a raw AML descriptor, including the
+ *              length of the descriptor header and the length field itself.
+ *              Used to walk descriptor lists.
+ *
+ ******************************************************************************/
+
+u32 acpi_ut_get_descriptor_length(void *aml)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	/*
+	 * Get the Resource Length (does not include header length) and add
+	 * the header length (depends on if this is a small or large resource)
+	 */
+	return (acpi_ut_get_resource_length(aml) +
+		acpi_ut_get_resource_header_length(aml));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_resource_end_tag
+ *
+ * PARAMETERS:  obj_desc        - The resource template buffer object
+ *              end_tag         - Where the pointer to the end_tag is returned
+ *
+ * RETURN:      Status, pointer to the end tag
+ *
+ * DESCRIPTION: Find the end_tag resource descriptor in an AML resource template
+ *              Note: allows a buffer length of zero.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc,
+			     u8 ** end_tag)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ut_get_resource_end_tag);
+
+	/* Allow a buffer length of zero */
+
+	if (!obj_desc->buffer.length) {
+		*end_tag = obj_desc->buffer.pointer;
+		return_ACPI_STATUS(AE_OK);
+	}
+
+	/* Validate the template and get a pointer to the end_tag */
+
+	status = acpi_ut_walk_aml_resources(obj_desc->buffer.pointer,
+					    obj_desc->buffer.length, NULL,
+					    (void **)end_tag);
+
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
new file mode 100644
index 0000000..0440c95
--- /dev/null
+++ b/drivers/acpi/acpica/utstate.c
@@ -0,0 +1,347 @@
+/*******************************************************************************
+ *
+ * Module Name: utstate - state object support procedures
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utstate")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_pkg_state_and_push
+ *
+ * PARAMETERS:  Object          - Object to be added to the new state
+ *              Action          - Increment/Decrement
+ *              state_list      - List the state will be added to
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create a new state and push it
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ut_create_pkg_state_and_push(void *internal_object,
+				  void *external_object,
+				  u16 index,
+				  union acpi_generic_state **state_list)
+{
+	union acpi_generic_state *state;
+
+	ACPI_FUNCTION_ENTRY();
+
+	state =
+	    acpi_ut_create_pkg_state(internal_object, external_object, index);
+	if (!state) {
+		return (AE_NO_MEMORY);
+	}
+
+	acpi_ut_push_generic_state(state_list, state);
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_push_generic_state
+ *
+ * PARAMETERS:  list_head           - Head of the state stack
+ *              State               - State object to push
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Push a state object onto a state stack
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_push_generic_state(union acpi_generic_state **list_head,
+			   union acpi_generic_state *state)
+{
+	ACPI_FUNCTION_TRACE(ut_push_generic_state);
+
+	/* Push the state object onto the front of the list (stack) */
+
+	state->common.next = *list_head;
+	*list_head = state;
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_pop_generic_state
+ *
+ * PARAMETERS:  list_head           - Head of the state stack
+ *
+ * RETURN:      The popped state object
+ *
+ * DESCRIPTION: Pop a state object from a state stack
+ *
+ ******************************************************************************/
+
+union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
+						    **list_head)
+{
+	union acpi_generic_state *state;
+
+	ACPI_FUNCTION_TRACE(ut_pop_generic_state);
+
+	/* Remove the state object at the head of the list (stack) */
+
+	state = *list_head;
+	if (state) {
+
+		/* Update the list head */
+
+		*list_head = state->common.next;
+	}
+
+	return_PTR(state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_generic_state
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      The new state object. NULL on failure.
+ *
+ * DESCRIPTION: Create a generic state object.  Attempt to obtain one from
+ *              the global state cache;  If none available, create a new one.
+ *
+ ******************************************************************************/
+
+union acpi_generic_state *acpi_ut_create_generic_state(void)
+{
+	union acpi_generic_state *state;
+
+	ACPI_FUNCTION_ENTRY();
+
+	state = acpi_os_acquire_object(acpi_gbl_state_cache);
+	if (state) {
+
+		/* Initialize */
+		memset(state, 0, sizeof(union acpi_generic_state));
+		state->common.descriptor_type = ACPI_DESC_TYPE_STATE;
+	}
+
+	return (state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_thread_state
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      New Thread State. NULL on failure
+ *
+ * DESCRIPTION: Create a "Thread State" - a flavor of the generic state used
+ *              to track per-thread info during method execution
+ *
+ ******************************************************************************/
+
+struct acpi_thread_state *acpi_ut_create_thread_state(void)
+{
+	union acpi_generic_state *state;
+
+	ACPI_FUNCTION_TRACE(ut_create_thread_state);
+
+	/* Create the generic state object */
+
+	state = acpi_ut_create_generic_state();
+	if (!state) {
+		return_PTR(NULL);
+	}
+
+	/* Init fields specific to the update struct */
+
+	state->common.descriptor_type = ACPI_DESC_TYPE_STATE_THREAD;
+	state->thread.thread_id = acpi_os_get_thread_id();
+
+	/* Check for invalid thread ID - zero is very bad, it will break things */
+
+	if (!state->thread.thread_id) {
+		ACPI_ERROR((AE_INFO, "Invalid zero ID from AcpiOsGetThreadId"));
+		state->thread.thread_id = (acpi_thread_id) 1;
+	}
+
+	return_PTR((struct acpi_thread_state *)state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_update_state
+ *
+ * PARAMETERS:  Object          - Initial Object to be installed in the state
+ *              Action          - Update action to be performed
+ *
+ * RETURN:      New state object, null on failure
+ *
+ * DESCRIPTION: Create an "Update State" - a flavor of the generic state used
+ *              to update reference counts and delete complex objects such
+ *              as packages.
+ *
+ ******************************************************************************/
+
+union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
+						      *object, u16 action)
+{
+	union acpi_generic_state *state;
+
+	ACPI_FUNCTION_TRACE_PTR(ut_create_update_state, object);
+
+	/* Create the generic state object */
+
+	state = acpi_ut_create_generic_state();
+	if (!state) {
+		return_PTR(NULL);
+	}
+
+	/* Init fields specific to the update struct */
+
+	state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE;
+	state->update.object = object;
+	state->update.value = action;
+
+	return_PTR(state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_pkg_state
+ *
+ * PARAMETERS:  Object          - Initial Object to be installed in the state
+ *              Action          - Update action to be performed
+ *
+ * RETURN:      New state object, null on failure
+ *
+ * DESCRIPTION: Create a "Package State"
+ *
+ ******************************************************************************/
+
+union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
+						   void *external_object,
+						   u16 index)
+{
+	union acpi_generic_state *state;
+
+	ACPI_FUNCTION_TRACE_PTR(ut_create_pkg_state, internal_object);
+
+	/* Create the generic state object */
+
+	state = acpi_ut_create_generic_state();
+	if (!state) {
+		return_PTR(NULL);
+	}
+
+	/* Init fields specific to the update struct */
+
+	state->common.descriptor_type = ACPI_DESC_TYPE_STATE_PACKAGE;
+	state->pkg.source_object = (union acpi_operand_object *)internal_object;
+	state->pkg.dest_object = external_object;
+	state->pkg.index = index;
+	state->pkg.num_packages = 1;
+
+	return_PTR(state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_create_control_state
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      New state object, null on failure
+ *
+ * DESCRIPTION: Create a "Control State" - a flavor of the generic state used
+ *              to support nested IF/WHILE constructs in the AML.
+ *
+ ******************************************************************************/
+
+union acpi_generic_state *acpi_ut_create_control_state(void)
+{
+	union acpi_generic_state *state;
+
+	ACPI_FUNCTION_TRACE(ut_create_control_state);
+
+	/* Create the generic state object */
+
+	state = acpi_ut_create_generic_state();
+	if (!state) {
+		return_PTR(NULL);
+	}
+
+	/* Init fields specific to the control struct */
+
+	state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL;
+	state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING;
+
+	return_PTR(state);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_delete_generic_state
+ *
+ * PARAMETERS:  State               - The state object to be deleted
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Release a state object to the state cache. NULL state objects
+ *              are ignored.
+ *
+ ******************************************************************************/
+
+void acpi_ut_delete_generic_state(union acpi_generic_state *state)
+{
+	ACPI_FUNCTION_TRACE(ut_delete_generic_state);
+
+	/* Ignore null state */
+
+	if (state) {
+		(void)acpi_os_release_object(acpi_gbl_state_cache, state);
+	}
+	return_VOID;
+}
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
new file mode 100644
index 0000000..078a2272
--- /dev/null
+++ b/drivers/acpi/acpica/utxface.c
@@ -0,0 +1,512 @@
+/******************************************************************************
+ *
+ * Module Name: utxface - External interfaces for "global" ACPI functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "acdebug.h"
+#include "actables.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utxface")
+
+#ifndef ACPI_ASL_COMPILER
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_initialize_subsystem
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Initializes all global variables.  This is the first function
+ *              called, so any early initialization belongs here.
+ *
+ ******************************************************************************/
+acpi_status __init acpi_initialize_subsystem(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
+
+	acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
+	ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
+
+	/* Initialize the OS-Dependent layer */
+
+	status = acpi_os_initialize();
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization"));
+		return_ACPI_STATUS(status);
+	}
+
+	/* Initialize all globals used by the subsystem */
+
+	status = acpi_ut_init_globals();
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"During initialization of globals"));
+		return_ACPI_STATUS(status);
+	}
+
+	/* Create the default mutex objects */
+
+	status = acpi_ut_mutex_initialize();
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"During Global Mutex creation"));
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Initialize the namespace manager and
+	 * the root of the namespace tree
+	 */
+	status = acpi_ns_root_initialize();
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"During Namespace initialization"));
+		return_ACPI_STATUS(status);
+	}
+
+	/* If configured, initialize the AML debugger */
+
+	ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enable_subsystem
+ *
+ * PARAMETERS:  Flags           - Init/enable Options
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Completes the subsystem initialization including hardware.
+ *              Puts system into ACPI mode if it isn't already.
+ *
+ ******************************************************************************/
+acpi_status acpi_enable_subsystem(u32 flags)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
+
+	/* Enable ACPI mode */
+
+	if (!(flags & ACPI_NO_ACPI_ENABLE)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "[Init] Going into ACPI mode\n"));
+
+		acpi_gbl_original_mode = acpi_hw_get_mode();
+
+		status = acpi_enable();
+		if (ACPI_FAILURE(status)) {
+			ACPI_WARNING((AE_INFO, "AcpiEnable failed"));
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/*
+	 * Obtain a permanent mapping for the FACS. This is required for the
+	 * Global Lock and the Firmware Waking Vector
+	 */
+	status = acpi_tb_initialize_facs();
+	if (ACPI_FAILURE(status)) {
+		ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Install the default op_region handlers. These are installed unless
+	 * other handlers have already been installed via the
+	 * install_address_space_handler interface.
+	 */
+	if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "[Init] Installing default address space handlers\n"));
+
+		status = acpi_ev_install_region_handlers();
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/*
+	 * Initialize ACPI Event handling (Fixed and General Purpose)
+	 *
+	 * Note1: We must have the hardware and events initialized before we can
+	 * execute any control methods safely. Any control method can require
+	 * ACPI hardware support, so the hardware must be fully initialized before
+	 * any method execution!
+	 *
+	 * Note2: Fixed events are initialized and enabled here. GPEs are
+	 * initialized, but cannot be enabled until after the hardware is
+	 * completely initialized (SCI and global_lock activated)
+	 */
+	if (!(flags & ACPI_NO_EVENT_INIT)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "[Init] Initializing ACPI events\n"));
+
+		status = acpi_ev_initialize_events();
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/*
+	 * Install the SCI handler and Global Lock handler. This completes the
+	 * hardware initialization.
+	 */
+	if (!(flags & ACPI_NO_HANDLER_INIT)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "[Init] Installing SCI/GL handlers\n"));
+
+		status = acpi_ev_install_xrupt_handlers();
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_initialize_objects
+ *
+ * PARAMETERS:  Flags           - Init/enable Options
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Completes namespace initialization by initializing device
+ *              objects and executing AML code for Regions, buffers, etc.
+ *
+ ******************************************************************************/
+acpi_status acpi_initialize_objects(u32 flags)
+{
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_TRACE(acpi_initialize_objects);
+
+	/*
+	 * Run all _REG methods
+	 *
+	 * Note: Any objects accessed by the _REG methods will be automatically
+	 * initialized, even if they contain executable AML (see the call to
+	 * acpi_ns_initialize_objects below).
+	 */
+	if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "[Init] Executing _REG OpRegion methods\n"));
+
+		status = acpi_ev_initialize_op_regions();
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/*
+	 * Initialize the objects that remain uninitialized. This runs the
+	 * executable AML that may be part of the declaration of these objects:
+	 * operation_regions, buffer_fields, Buffers, and Packages.
+	 */
+	if (!(flags & ACPI_NO_OBJECT_INIT)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "[Init] Completing Initialization of ACPI Objects\n"));
+
+		status = acpi_ns_initialize_objects();
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/*
+	 * Initialize all device objects in the namespace. This runs the device
+	 * _STA and _INI methods.
+	 */
+	if (!(flags & ACPI_NO_DEVICE_INIT)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+				  "[Init] Initializing ACPI Devices\n"));
+
+		status = acpi_ns_initialize_devices();
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
+	}
+
+	/*
+	 * Complete the GPE initialization for the GPE blocks defined in the FADT
+	 * (GPE block 0 and 1).
+	 *
+	 * Note1: This is where the _PRW methods are executed for the GPEs. These
+	 * methods can only be executed after the SCI and Global Lock handlers are
+	 * installed and initialized.
+	 *
+	 * Note2: Currently, there seems to be no need to run the _REG methods
+	 * before execution of the _PRW methods and enabling of the GPEs.
+	 */
+	if (!(flags & ACPI_NO_EVENT_INIT)) {
+		status = acpi_ev_install_fadt_gpes();
+		if (ACPI_FAILURE(status))
+			return (status);
+	}
+
+	/*
+	 * Empty the caches (delete the cached objects) on the assumption that
+	 * the table load filled them up more than they will be at runtime --
+	 * thus wasting non-paged memory.
+	 */
+	status = acpi_purge_cached_objects();
+
+	acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
+
+#endif
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_terminate
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Shutdown the ACPI subsystem.  Release all resources.
+ *
+ ******************************************************************************/
+acpi_status acpi_terminate(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_terminate);
+
+	/* Terminate the AML Debugger if present */
+
+	ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = TRUE);
+
+	/* Shutdown and free all resources */
+
+	acpi_ut_subsystem_shutdown();
+
+	/* Free the mutex objects */
+
+	acpi_ut_mutex_terminate();
+
+#ifdef ACPI_DEBUGGER
+
+	/* Shut down the debugger */
+
+	acpi_db_terminate();
+#endif
+
+	/* Now we can shutdown the OS-dependent layer */
+
+	status = acpi_os_terminate();
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_terminate)
+#ifndef ACPI_ASL_COMPILER
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_subsystem_status
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status of the ACPI subsystem
+ *
+ * DESCRIPTION: Other drivers that use the ACPI subsystem should call this
+ *              before making any other calls, to ensure the subsystem
+ *              initialized successfully.
+ *
+ ******************************************************************************/
+acpi_status acpi_subsystem_status(void)
+{
+
+	if (acpi_gbl_startup_flags & ACPI_INITIALIZED_OK) {
+		return (AE_OK);
+	} else {
+		return (AE_ERROR);
+	}
+}
+
+ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_system_info
+ *
+ * PARAMETERS:  out_buffer      - A buffer to receive the resources for the
+ *                                device
+ *
+ * RETURN:      Status          - the status of the call
+ *
+ * DESCRIPTION: This function is called to get information about the current
+ *              state of the ACPI subsystem.  It will return system information
+ *              in the out_buffer.
+ *
+ *              If the function fails an appropriate status will be returned
+ *              and the value of out_buffer is undefined.
+ *
+ ******************************************************************************/
+acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
+{
+	struct acpi_system_info *info_ptr;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_get_system_info);
+
+	/* Parameter validation */
+
+	status = acpi_ut_validate_buffer(out_buffer);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Validate/Allocate/Clear caller buffer */
+
+	status =
+	    acpi_ut_initialize_buffer(out_buffer,
+				      sizeof(struct acpi_system_info));
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/*
+	 * Populate the return buffer
+	 */
+	info_ptr = (struct acpi_system_info *)out_buffer->pointer;
+
+	info_ptr->acpi_ca_version = ACPI_CA_VERSION;
+
+	/* System flags (ACPI capabilities) */
+
+	info_ptr->flags = ACPI_SYS_MODE_ACPI;
+
+	/* Timer resolution - 24 or 32 bits  */
+
+	if (acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) {
+		info_ptr->timer_resolution = 24;
+	} else {
+		info_ptr->timer_resolution = 32;
+	}
+
+	/* Clear the reserved fields */
+
+	info_ptr->reserved1 = 0;
+	info_ptr->reserved2 = 0;
+
+	/* Current debug levels */
+
+	info_ptr->debug_layer = acpi_dbg_layer;
+	info_ptr->debug_level = acpi_dbg_level;
+
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_system_info)
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_install_initialization_handler
+ *
+ * PARAMETERS:  Handler             - Callback procedure
+ *              Function            - Not (currently) used, see below
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install an initialization handler
+ *
+ * TBD: When a second function is added, must save the Function also.
+ *
+ ****************************************************************************/
+acpi_status
+acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
+{
+
+	if (!handler) {
+		return (AE_BAD_PARAMETER);
+	}
+
+	if (acpi_gbl_init_handler) {
+		return (AE_ALREADY_EXISTS);
+	}
+
+	acpi_gbl_init_handler = handler;
+	return AE_OK;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
+#endif				/*  ACPI_FUTURE_USAGE  */
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_purge_cached_objects
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Empty all caches (delete the cached objects)
+ *
+ ****************************************************************************/
+acpi_status acpi_purge_cached_objects(void)
+{
+	ACPI_FUNCTION_TRACE(acpi_purge_cached_objects);
+
+	(void)acpi_os_purge_cache(acpi_gbl_state_cache);
+	(void)acpi_os_purge_cache(acpi_gbl_operand_cache);
+	(void)acpi_os_purge_cache(acpi_gbl_ps_node_cache);
+	(void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache);
+	return_ACPI_STATUS(AE_OK);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
+#endif
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 1423b0c..65132f9 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -471,7 +471,7 @@
 
 static int acpi_battery_update(struct acpi_battery *battery)
 {
-	int result;
+	int result, old_present = acpi_battery_present(battery);
 	result = acpi_battery_get_status(battery);
 	if (result)
 		return result;
@@ -482,7 +482,8 @@
 		return 0;
 	}
 #endif
-	if (!battery->update_time) {
+	if (!battery->update_time ||
+	    old_present != acpi_battery_present(battery)) {
 		result = acpi_battery_get_info(battery);
 		if (result)
 			return result;
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
index 307963b..332fe4b 100644
--- a/drivers/acpi/cm_sbs.c
+++ b/drivers/acpi/cm_sbs.c
@@ -27,9 +27,6 @@
 #include <linux/seq_file.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
-#include <acpi/acmacros.h>
-#include <acpi/actypes.h>
-#include <acpi/acutils.h>
 
 ACPI_MODULE_NAME("cm_sbs");
 #define ACPI_AC_CLASS		"ac_adapter"
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index c483968..20223cb 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -9,7 +9,6 @@
 #include <linux/moduleparam.h>
 #include <asm/uaccess.h>
 #include <acpi/acpi_drivers.h>
-#include <acpi/acglobal.h>
 
 #define _COMPONENT		ACPI_SYSTEM_COMPONENT
 ACPI_MODULE_NAME("debug");
diff --git a/drivers/acpi/dispatcher/Makefile b/drivers/acpi/dispatcher/Makefile
deleted file mode 100644
index eb7e602..0000000
--- a/drivers/acpi/dispatcher/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for all Linux ACPI interpreter subdirectories
-#
-
-obj-y := dsfield.o   dsmthdat.o  dsopcode.o  dswexec.o  dswscope.o \
-	 dsmethod.o  dsobject.o  dsutils.o   dswload.o  dswstate.o \
-	 dsinit.o
-
-EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/dispatcher/dsfield.c
deleted file mode 100644
index f988a5e..0000000
--- a/drivers/acpi/dispatcher/dsfield.c
+++ /dev/null
@@ -1,649 +0,0 @@
-/******************************************************************************
- *
- * Module Name: dsfield - Dispatcher field routines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/amlcode.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acparser.h>
-
-#define _COMPONENT          ACPI_DISPATCHER
-ACPI_MODULE_NAME("dsfield")
-
-/* Local prototypes */
-static acpi_status
-acpi_ds_get_field_names(struct acpi_create_field_info *info,
-			struct acpi_walk_state *walk_state,
-			union acpi_parse_object *arg);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_create_buffer_field
- *
- * PARAMETERS:  Op                  - Current parse op (create_xXField)
- *              walk_state          - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute the create_field operators:
- *              create_bit_field_op,
- *              create_byte_field_op,
- *              create_word_field_op,
- *              create_dword_field_op,
- *              create_qword_field_op,
- *              create_field_op     (all of which define a field in a buffer)
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_create_buffer_field(union acpi_parse_object *op,
-			    struct acpi_walk_state *walk_state)
-{
-	union acpi_parse_object *arg;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *second_desc = NULL;
-	u32 flags;
-
-	ACPI_FUNCTION_TRACE(ds_create_buffer_field);
-
-	/*
-	 * Get the name_string argument (name of the new buffer_field)
-	 */
-	if (op->common.aml_opcode == AML_CREATE_FIELD_OP) {
-
-		/* For create_field, name is the 4th argument */
-
-		arg = acpi_ps_get_arg(op, 3);
-	} else {
-		/* For all other create_xXXField operators, name is the 3rd argument */
-
-		arg = acpi_ps_get_arg(op, 2);
-	}
-
-	if (!arg) {
-		return_ACPI_STATUS(AE_AML_NO_OPERAND);
-	}
-
-	if (walk_state->deferred_node) {
-		node = walk_state->deferred_node;
-		status = AE_OK;
-	} else {
-		/* Execute flag should always be set when this function is entered */
-
-		if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
-			return_ACPI_STATUS(AE_AML_INTERNAL);
-		}
-
-		/* Creating new namespace node, should not already exist */
-
-		flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
-		    ACPI_NS_ERROR_IF_FOUND;
-
-		/* Mark node temporary if we are executing a method */
-
-		if (walk_state->method_node) {
-			flags |= ACPI_NS_TEMPORARY;
-		}
-
-		/* Enter the name_string into the namespace */
-
-		status =
-		    acpi_ns_lookup(walk_state->scope_info,
-				   arg->common.value.string, ACPI_TYPE_ANY,
-				   ACPI_IMODE_LOAD_PASS1, flags, walk_state,
-				   &node);
-		if (ACPI_FAILURE(status)) {
-			ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/*
-	 * We could put the returned object (Node) on the object stack for later,
-	 * but for now, we will put it in the "op" object that the parser uses,
-	 * so we can get it again at the end of this scope.
-	 */
-	op->common.node = node;
-
-	/*
-	 * If there is no object attached to the node, this node was just created
-	 * and we need to create the field object. Otherwise, this was a lookup
-	 * of an existing node and we don't want to create the field object again.
-	 */
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (obj_desc) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/*
-	 * The Field definition is not fully parsed at this time.
-	 * (We must save the address of the AML for the buffer and index operands)
-	 */
-
-	/* Create the buffer field object */
-
-	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER_FIELD);
-	if (!obj_desc) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	/*
-	 * Remember location in AML stream of the field unit opcode and operands --
-	 * since the buffer and index operands must be evaluated.
-	 */
-	second_desc = obj_desc->common.next_object;
-	second_desc->extra.aml_start = op->named.data;
-	second_desc->extra.aml_length = op->named.length;
-	obj_desc->buffer_field.node = node;
-
-	/* Attach constructed field descriptors to parent node */
-
-	status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_BUFFER_FIELD);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-      cleanup:
-
-	/* Remove local reference to the object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_get_field_names
- *
- * PARAMETERS:  Info            - create_field info structure
- *  `           walk_state      - Current method state
- *              Arg             - First parser arg for the field name list
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Process all named fields in a field declaration.  Names are
- *              entered into the namespace.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ds_get_field_names(struct acpi_create_field_info *info,
-			struct acpi_walk_state *walk_state,
-			union acpi_parse_object *arg)
-{
-	acpi_status status;
-	acpi_integer position;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
-
-	/* First field starts at bit zero */
-
-	info->field_bit_position = 0;
-
-	/* Process all elements in the field list (of parse nodes) */
-
-	while (arg) {
-		/*
-		 * Three types of field elements are handled:
-		 * 1) Offset - specifies a bit offset
-		 * 2) access_as - changes the access mode
-		 * 3) Name - Enters a new named field into the namespace
-		 */
-		switch (arg->common.aml_opcode) {
-		case AML_INT_RESERVEDFIELD_OP:
-
-			position = (acpi_integer) info->field_bit_position
-			    + (acpi_integer) arg->common.value.size;
-
-			if (position > ACPI_UINT32_MAX) {
-				ACPI_ERROR((AE_INFO,
-					    "Bit offset within field too large (> 0xFFFFFFFF)"));
-				return_ACPI_STATUS(AE_SUPPORT);
-			}
-
-			info->field_bit_position = (u32) position;
-			break;
-
-		case AML_INT_ACCESSFIELD_OP:
-
-			/*
-			 * Get a new access_type and access_attribute -- to be used for all
-			 * field units that follow, until field end or another access_as
-			 * keyword.
-			 *
-			 * In field_flags, preserve the flag bits other than the
-			 * ACCESS_TYPE bits
-			 */
-			info->field_flags = (u8)
-			    ((info->
-			      field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) |
-			     ((u8) ((u32) arg->common.value.integer >> 8)));
-
-			info->attribute = (u8) (arg->common.value.integer);
-			break;
-
-		case AML_INT_NAMEDFIELD_OP:
-
-			/* Lookup the name, it should already exist */
-
-			status = acpi_ns_lookup(walk_state->scope_info,
-						(char *)&arg->named.name,
-						info->field_type,
-						ACPI_IMODE_EXECUTE,
-						ACPI_NS_DONT_OPEN_SCOPE,
-						walk_state, &info->field_node);
-			if (ACPI_FAILURE(status)) {
-				ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
-						     status);
-				return_ACPI_STATUS(status);
-			} else {
-				arg->common.node = info->field_node;
-				info->field_bit_length = arg->common.value.size;
-
-				/*
-				 * If there is no object attached to the node, this node was
-				 * just created and we need to create the field object.
-				 * Otherwise, this was a lookup of an existing node and we
-				 * don't want to create the field object again.
-				 */
-				if (!acpi_ns_get_attached_object
-				    (info->field_node)) {
-					status = acpi_ex_prep_field_value(info);
-					if (ACPI_FAILURE(status)) {
-						return_ACPI_STATUS(status);
-					}
-				}
-			}
-
-			/* Keep track of bit position for the next field */
-
-			position = (acpi_integer) info->field_bit_position
-			    + (acpi_integer) arg->common.value.size;
-
-			if (position > ACPI_UINT32_MAX) {
-				ACPI_ERROR((AE_INFO,
-					    "Field [%4.4s] bit offset too large (> 0xFFFFFFFF)",
-					    ACPI_CAST_PTR(char,
-							  &info->field_node->
-							  name)));
-				return_ACPI_STATUS(AE_SUPPORT);
-			}
-
-			info->field_bit_position += info->field_bit_length;
-			break;
-
-		default:
-
-			ACPI_ERROR((AE_INFO,
-				    "Invalid opcode in field list: %X",
-				    arg->common.aml_opcode));
-			return_ACPI_STATUS(AE_AML_BAD_OPCODE);
-		}
-
-		arg = arg->common.next;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_create_field
- *
- * PARAMETERS:  Op              - Op containing the Field definition and args
- *              region_node     - Object for the containing Operation Region
- *  `           walk_state      - Current method state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new field in the specified operation region
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_create_field(union acpi_parse_object *op,
-		     struct acpi_namespace_node *region_node,
-		     struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	union acpi_parse_object *arg;
-	struct acpi_create_field_info info;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_create_field, op);
-
-	/* First arg is the name of the parent op_region (must already exist) */
-
-	arg = op->common.value.arg;
-	if (!region_node) {
-		status =
-		    acpi_ns_lookup(walk_state->scope_info,
-				   arg->common.value.name, ACPI_TYPE_REGION,
-				   ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
-				   walk_state, &region_node);
-		if (ACPI_FAILURE(status)) {
-			ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/* Second arg is the field flags */
-
-	arg = arg->common.next;
-	info.field_flags = (u8) arg->common.value.integer;
-	info.attribute = 0;
-
-	/* Each remaining arg is a Named Field */
-
-	info.field_type = ACPI_TYPE_LOCAL_REGION_FIELD;
-	info.region_node = region_node;
-
-	status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_init_field_objects
- *
- * PARAMETERS:  Op              - Op containing the Field definition and args
- *  `           walk_state      - Current method state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: For each "Field Unit" name in the argument list that is
- *              part of the field declaration, enter the name into the
- *              namespace.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_init_field_objects(union acpi_parse_object *op,
-			   struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	union acpi_parse_object *arg = NULL;
-	struct acpi_namespace_node *node;
-	u8 type = 0;
-	u32 flags;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_init_field_objects, op);
-
-	/* Execute flag should always be set when this function is entered */
-
-	if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
-		if (walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP) {
-
-			/* bank_field Op is deferred, just return OK */
-
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		return_ACPI_STATUS(AE_AML_INTERNAL);
-	}
-
-	/*
-	 * Get the field_list argument for this opcode. This is the start of the
-	 * list of field elements.
-	 */
-	switch (walk_state->opcode) {
-	case AML_FIELD_OP:
-		arg = acpi_ps_get_arg(op, 2);
-		type = ACPI_TYPE_LOCAL_REGION_FIELD;
-		break;
-
-	case AML_BANK_FIELD_OP:
-		arg = acpi_ps_get_arg(op, 4);
-		type = ACPI_TYPE_LOCAL_BANK_FIELD;
-		break;
-
-	case AML_INDEX_FIELD_OP:
-		arg = acpi_ps_get_arg(op, 3);
-		type = ACPI_TYPE_LOCAL_INDEX_FIELD;
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Creating new namespace node(s), should not already exist */
-
-	flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
-	    ACPI_NS_ERROR_IF_FOUND;
-
-	/* Mark node(s) temporary if we are executing a method */
-
-	if (walk_state->method_node) {
-		flags |= ACPI_NS_TEMPORARY;
-	}
-
-	/*
-	 * Walk the list of entries in the field_list
-	 * Note: field_list can be of zero length. In this case, Arg will be NULL.
-	 */
-	while (arg) {
-		/*
-		 * Ignore OFFSET and ACCESSAS terms here; we are only interested in the
-		 * field names in order to enter them into the namespace.
-		 */
-		if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
-			status = acpi_ns_lookup(walk_state->scope_info,
-						(char *)&arg->named.name, type,
-						ACPI_IMODE_LOAD_PASS1, flags,
-						walk_state, &node);
-			if (ACPI_FAILURE(status)) {
-				ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
-						     status);
-				if (status != AE_ALREADY_EXISTS) {
-					return_ACPI_STATUS(status);
-				}
-
-				/* Name already exists, just ignore this error */
-
-				status = AE_OK;
-			}
-
-			arg->common.node = node;
-		}
-
-		/* Get the next field element in the list */
-
-		arg = arg->common.next;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_create_bank_field
- *
- * PARAMETERS:  Op              - Op containing the Field definition and args
- *              region_node     - Object for the containing Operation Region
- *              walk_state      - Current method state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new bank field in the specified operation region
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_create_bank_field(union acpi_parse_object *op,
-			  struct acpi_namespace_node *region_node,
-			  struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	union acpi_parse_object *arg;
-	struct acpi_create_field_info info;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_create_bank_field, op);
-
-	/* First arg is the name of the parent op_region (must already exist) */
-
-	arg = op->common.value.arg;
-	if (!region_node) {
-		status =
-		    acpi_ns_lookup(walk_state->scope_info,
-				   arg->common.value.name, ACPI_TYPE_REGION,
-				   ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
-				   walk_state, &region_node);
-		if (ACPI_FAILURE(status)) {
-			ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/* Second arg is the Bank Register (Field) (must already exist) */
-
-	arg = arg->common.next;
-	status =
-	    acpi_ns_lookup(walk_state->scope_info, arg->common.value.string,
-			   ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
-			   ACPI_NS_SEARCH_PARENT, walk_state,
-			   &info.register_node);
-	if (ACPI_FAILURE(status)) {
-		ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Third arg is the bank_value
-	 * This arg is a term_arg, not a constant
-	 * It will be evaluated later, by acpi_ds_eval_bank_field_operands
-	 */
-	arg = arg->common.next;
-
-	/* Fourth arg is the field flags */
-
-	arg = arg->common.next;
-	info.field_flags = (u8) arg->common.value.integer;
-
-	/* Each remaining arg is a Named Field */
-
-	info.field_type = ACPI_TYPE_LOCAL_BANK_FIELD;
-	info.region_node = region_node;
-
-	/*
-	 * Use Info.data_register_node to store bank_field Op
-	 * It's safe because data_register_node will never be used when create bank field
-	 * We store aml_start and aml_length in the bank_field Op for late evaluation
-	 * Used in acpi_ex_prep_field_value(Info)
-	 *
-	 * TBD: Or, should we add a field in struct acpi_create_field_info, like "void *ParentOp"?
-	 */
-	info.data_register_node = (struct acpi_namespace_node *)op;
-
-	status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_create_index_field
- *
- * PARAMETERS:  Op              - Op containing the Field definition and args
- *              region_node     - Object for the containing Operation Region
- *  `           walk_state      - Current method state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new index field in the specified operation region
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_create_index_field(union acpi_parse_object *op,
-			   struct acpi_namespace_node *region_node,
-			   struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	union acpi_parse_object *arg;
-	struct acpi_create_field_info info;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_create_index_field, op);
-
-	/* First arg is the name of the Index register (must already exist) */
-
-	arg = op->common.value.arg;
-	status =
-	    acpi_ns_lookup(walk_state->scope_info, arg->common.value.string,
-			   ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
-			   ACPI_NS_SEARCH_PARENT, walk_state,
-			   &info.register_node);
-	if (ACPI_FAILURE(status)) {
-		ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
-		return_ACPI_STATUS(status);
-	}
-
-	/* Second arg is the data register (must already exist) */
-
-	arg = arg->common.next;
-	status =
-	    acpi_ns_lookup(walk_state->scope_info, arg->common.value.string,
-			   ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
-			   ACPI_NS_SEARCH_PARENT, walk_state,
-			   &info.data_register_node);
-	if (ACPI_FAILURE(status)) {
-		ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
-		return_ACPI_STATUS(status);
-	}
-
-	/* Next arg is the field flags */
-
-	arg = arg->common.next;
-	info.field_flags = (u8) arg->common.value.integer;
-
-	/* Each remaining arg is a Named Field */
-
-	info.field_type = ACPI_TYPE_LOCAL_INDEX_FIELD;
-	info.region_node = region_node;
-
-	status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c
deleted file mode 100644
index 949f7c7..0000000
--- a/drivers/acpi/dispatcher/dsinit.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/******************************************************************************
- *
- * Module Name: dsinit - Object initialization namespace walk
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acdispat.h>
-#include <acpi/acnamesp.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_DISPATCHER
-ACPI_MODULE_NAME("dsinit")
-
-/* Local prototypes */
-static acpi_status
-acpi_ds_init_one_object(acpi_handle obj_handle,
-			u32 level, void *context, void **return_value);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_init_one_object
- *
- * PARAMETERS:  obj_handle      - Node for the object
- *              Level           - Current nesting level
- *              Context         - Points to a init info struct
- *              return_value    - Not used
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every object
- *              within the namespace.
- *
- *              Currently, the only objects that require initialization are:
- *              1) Methods
- *              2) Operation Regions
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ds_init_one_object(acpi_handle obj_handle,
-			u32 level, void *context, void **return_value)
-{
-	struct acpi_init_walk_info *info =
-	    (struct acpi_init_walk_info *)context;
-	struct acpi_namespace_node *node =
-	    (struct acpi_namespace_node *)obj_handle;
-	acpi_object_type type;
-	acpi_status status;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * We are only interested in NS nodes owned by the table that
-	 * was just loaded
-	 */
-	if (node->owner_id != info->owner_id) {
-		return (AE_OK);
-	}
-
-	info->object_count++;
-
-	/* And even then, we are only interested in a few object types */
-
-	type = acpi_ns_get_type(obj_handle);
-
-	switch (type) {
-	case ACPI_TYPE_REGION:
-
-		status = acpi_ds_initialize_region(obj_handle);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"During Region initialization %p [%4.4s]",
-					obj_handle,
-					acpi_ut_get_node_name(obj_handle)));
-		}
-
-		info->op_region_count++;
-		break;
-
-	case ACPI_TYPE_METHOD:
-
-		info->method_count++;
-		break;
-
-	case ACPI_TYPE_DEVICE:
-
-		info->device_count++;
-		break;
-
-	default:
-		break;
-	}
-
-	/*
-	 * We ignore errors from above, and always return OK, since
-	 * we don't want to abort the walk on a single error.
-	 */
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_initialize_objects
- *
- * PARAMETERS:  table_desc      - Descriptor for parent ACPI table
- *              start_node      - Root of subtree to be initialized.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Walk the namespace starting at "StartNode" and perform any
- *              necessary initialization on the objects found therein
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_initialize_objects(u32 table_index,
-			   struct acpi_namespace_node * start_node)
-{
-	acpi_status status;
-	struct acpi_init_walk_info info;
-	struct acpi_table_header *table;
-	acpi_owner_id owner_id;
-
-	ACPI_FUNCTION_TRACE(ds_initialize_objects);
-
-	status = acpi_tb_get_owner_id(table_index, &owner_id);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "**** Starting initialization of namespace objects ****\n"));
-	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
-
-	info.method_count = 0;
-	info.op_region_count = 0;
-	info.object_count = 0;
-	info.device_count = 0;
-	info.table_index = table_index;
-	info.owner_id = owner_id;
-
-	/* Walk entire namespace from the supplied root */
-
-	status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
-				     acpi_ds_init_one_object, &info, NULL);
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
-	}
-
-	status = acpi_get_table_by_index(table_index, &table);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-			      "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n",
-			      table->signature, owner_id, info.object_count,
-			      info.device_count, info.method_count,
-			      info.op_region_count));
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "%hd Methods, %hd Regions\n", info.method_count,
-			  info.op_region_count));
-
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
deleted file mode 100644
index 279a5a6..0000000
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ /dev/null
@@ -1,623 +0,0 @@
-/******************************************************************************
- *
- * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/amlcode.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acdisasm.h>
-
-#define _COMPONENT          ACPI_DISPATCHER
-ACPI_MODULE_NAME("dsmethod")
-
-/* Local prototypes */
-static acpi_status
-acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_method_error
- *
- * PARAMETERS:  Status          - Execution status
- *              walk_state      - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Called on method error. Invoke the global exception handler if
- *              present, dump the method data if the disassembler is configured
- *
- *              Note: Allows the exception handler to change the status code
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	/* Ignore AE_OK and control exception codes */
-
-	if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
-		return (status);
-	}
-
-	/* Invoke the global exception handler */
-
-	if (acpi_gbl_exception_handler) {
-
-		/* Exit the interpreter, allow handler to execute methods */
-
-		acpi_ex_exit_interpreter();
-
-		/*
-		 * Handler can map the exception code to anything it wants, including
-		 * AE_OK, in which case the executing method will not be aborted.
-		 */
-		status = acpi_gbl_exception_handler(status,
-						    walk_state->method_node ?
-						    walk_state->method_node->
-						    name.integer : 0,
-						    walk_state->opcode,
-						    walk_state->aml_offset,
-						    NULL);
-		acpi_ex_enter_interpreter();
-	}
-
-	acpi_ds_clear_implicit_return(walk_state);
-
-#ifdef ACPI_DISASSEMBLER
-	if (ACPI_FAILURE(status)) {
-
-		/* Display method locals/args if disassembler is present */
-
-		acpi_dm_dump_method_info(status, walk_state, walk_state->op);
-	}
-#endif
-
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_create_method_mutex
- *
- * PARAMETERS:  obj_desc            - The method object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a mutex object for a serialized control method
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
-{
-	union acpi_operand_object *mutex_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ds_create_method_mutex);
-
-	/* Create the new mutex object */
-
-	mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
-	if (!mutex_desc) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Create the actual OS Mutex */
-
-	status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	mutex_desc->mutex.sync_level = method_desc->method.sync_level;
-	method_desc->method.mutex = mutex_desc;
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_begin_method_execution
- *
- * PARAMETERS:  method_node         - Node of the method
- *              obj_desc            - The method object
- *              walk_state          - current state, NULL if not yet executing
- *                                    a method.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Prepare a method for execution.  Parses the method if necessary,
- *              increments the thread count, and waits at the method semaphore
- *              for clearance to execute.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
-			       union acpi_operand_object *obj_desc,
-			       struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
-
-	if (!method_node) {
-		return_ACPI_STATUS(AE_NULL_ENTRY);
-	}
-
-	/* Prevent wraparound of thread count */
-
-	if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
-		ACPI_ERROR((AE_INFO,
-			    "Method reached maximum reentrancy limit (255)"));
-		return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
-	}
-
-	/*
-	 * If this method is serialized, we need to acquire the method mutex.
-	 */
-	if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
-		/*
-		 * Create a mutex for the method if it is defined to be Serialized
-		 * and a mutex has not already been created. We defer the mutex creation
-		 * until a method is actually executed, to minimize the object count
-		 */
-		if (!obj_desc->method.mutex) {
-			status = acpi_ds_create_method_mutex(obj_desc);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		}
-
-		/*
-		 * The current_sync_level (per-thread) must be less than or equal to
-		 * the sync level of the method. This mechanism provides some
-		 * deadlock prevention
-		 *
-		 * Top-level method invocation has no walk state at this point
-		 */
-		if (walk_state &&
-		    (walk_state->thread->current_sync_level >
-		     obj_desc->method.mutex->mutex.sync_level)) {
-			ACPI_ERROR((AE_INFO,
-				    "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
-				    acpi_ut_get_node_name(method_node),
-				    walk_state->thread->current_sync_level));
-
-			return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
-		}
-
-		/*
-		 * Obtain the method mutex if necessary. Do not acquire mutex for a
-		 * recursive call.
-		 */
-		if (!walk_state ||
-		    !obj_desc->method.mutex->mutex.thread_id ||
-		    (walk_state->thread->thread_id !=
-		     obj_desc->method.mutex->mutex.thread_id)) {
-			/*
-			 * Acquire the method mutex. This releases the interpreter if we
-			 * block (and reacquires it before it returns)
-			 */
-			status =
-			    acpi_ex_system_wait_mutex(obj_desc->method.mutex->
-						      mutex.os_mutex,
-						      ACPI_WAIT_FOREVER);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-
-			/* Update the mutex and walk info and save the original sync_level */
-
-			if (walk_state) {
-				obj_desc->method.mutex->mutex.
-				    original_sync_level =
-				    walk_state->thread->current_sync_level;
-
-				obj_desc->method.mutex->mutex.thread_id =
-				    walk_state->thread->thread_id;
-				walk_state->thread->current_sync_level =
-				    obj_desc->method.sync_level;
-			} else {
-				obj_desc->method.mutex->mutex.
-				    original_sync_level =
-				    obj_desc->method.mutex->mutex.sync_level;
-			}
-		}
-
-		/* Always increase acquisition depth */
-
-		obj_desc->method.mutex->mutex.acquisition_depth++;
-	}
-
-	/*
-	 * Allocate an Owner ID for this method, only if this is the first thread
-	 * to begin concurrent execution. We only need one owner_id, even if the
-	 * method is invoked recursively.
-	 */
-	if (!obj_desc->method.owner_id) {
-		status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-	}
-
-	/*
-	 * Increment the method parse tree thread count since it has been
-	 * reentered one more time (even if it is the same thread)
-	 */
-	obj_desc->method.thread_count++;
-	return_ACPI_STATUS(status);
-
-      cleanup:
-	/* On error, must release the method mutex (if present) */
-
-	if (obj_desc->method.mutex) {
-		acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
-	}
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_call_control_method
- *
- * PARAMETERS:  Thread              - Info for this thread
- *              this_walk_state     - Current walk state
- *              Op                  - Current Op to be walked
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Transfer execution to a called control method
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_call_control_method(struct acpi_thread_state *thread,
-			    struct acpi_walk_state *this_walk_state,
-			    union acpi_parse_object *op)
-{
-	acpi_status status;
-	struct acpi_namespace_node *method_node;
-	struct acpi_walk_state *next_walk_state = NULL;
-	union acpi_operand_object *obj_desc;
-	struct acpi_evaluate_info *info;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "Calling method %p, currentstate=%p\n",
-			  this_walk_state->prev_op, this_walk_state));
-
-	/*
-	 * Get the namespace entry for the control method we are about to call
-	 */
-	method_node = this_walk_state->method_call_node;
-	if (!method_node) {
-		return_ACPI_STATUS(AE_NULL_ENTRY);
-	}
-
-	obj_desc = acpi_ns_get_attached_object(method_node);
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_NULL_OBJECT);
-	}
-
-	/* Init for new method, possibly wait on method mutex */
-
-	status = acpi_ds_begin_method_execution(method_node, obj_desc,
-						this_walk_state);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Begin method parse/execution. Create a new walk state */
-
-	next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
-						    NULL, obj_desc, thread);
-	if (!next_walk_state) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	/*
-	 * The resolved arguments were put on the previous walk state's operand
-	 * stack. Operands on the previous walk state stack always
-	 * start at index 0. Also, null terminate the list of arguments
-	 */
-	this_walk_state->operands[this_walk_state->num_operands] = NULL;
-
-	/*
-	 * Allocate and initialize the evaluation information block
-	 * TBD: this is somewhat inefficient, should change interface to
-	 * ds_init_aml_walk. For now, keeps this struct off the CPU stack
-	 */
-	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
-	if (!info) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	info->parameters = &this_walk_state->operands[0];
-
-	status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
-				       obj_desc->method.aml_start,
-				       obj_desc->method.aml_length, info,
-				       ACPI_IMODE_EXECUTE);
-
-	ACPI_FREE(info);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/*
-	 * Delete the operands on the previous walkstate operand stack
-	 * (they were copied to new objects)
-	 */
-	for (i = 0; i < obj_desc->method.param_count; i++) {
-		acpi_ut_remove_reference(this_walk_state->operands[i]);
-		this_walk_state->operands[i] = NULL;
-	}
-
-	/* Clear the operand stack */
-
-	this_walk_state->num_operands = 0;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
-			  method_node->name.ascii, next_walk_state));
-
-	/* Invoke an internal method if necessary */
-
-	if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
-		status = obj_desc->method.implementation(next_walk_state);
-	}
-
-	return_ACPI_STATUS(status);
-
-      cleanup:
-
-	/* On error, we must terminate the method properly */
-
-	acpi_ds_terminate_control_method(obj_desc, next_walk_state);
-	if (next_walk_state) {
-		acpi_ds_delete_walk_state(next_walk_state);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_restart_control_method
- *
- * PARAMETERS:  walk_state          - State for preempted method (caller)
- *              return_desc         - Return value from the called method
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Restart a method that was preempted by another (nested) method
- *              invocation.  Handle the return value (if any) from the callee.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
-			       union acpi_operand_object *return_desc)
-{
-	acpi_status status;
-	int same_as_implicit_return;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
-			  acpi_ut_get_node_name(walk_state->method_node),
-			  walk_state->method_call_op, return_desc));
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "    ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
-			  walk_state->return_used,
-			  walk_state->results, walk_state));
-
-	/* Did the called method return a value? */
-
-	if (return_desc) {
-
-		/* Is the implicit return object the same as the return desc? */
-
-		same_as_implicit_return =
-		    (walk_state->implicit_return_obj == return_desc);
-
-		/* Are we actually going to use the return value? */
-
-		if (walk_state->return_used) {
-
-			/* Save the return value from the previous method */
-
-			status = acpi_ds_result_push(return_desc, walk_state);
-			if (ACPI_FAILURE(status)) {
-				acpi_ut_remove_reference(return_desc);
-				return_ACPI_STATUS(status);
-			}
-
-			/*
-			 * Save as THIS method's return value in case it is returned
-			 * immediately to yet another method
-			 */
-			walk_state->return_desc = return_desc;
-		}
-
-		/*
-		 * The following code is the optional support for the so-called
-		 * "implicit return". Some AML code assumes that the last value of the
-		 * method is "implicitly" returned to the caller, in the absence of an
-		 * explicit return value.
-		 *
-		 * Just save the last result of the method as the return value.
-		 *
-		 * NOTE: this is optional because the ASL language does not actually
-		 * support this behavior.
-		 */
-		else if (!acpi_ds_do_implicit_return
-			 (return_desc, walk_state, FALSE)
-			 || same_as_implicit_return) {
-			/*
-			 * Delete the return value if it will not be used by the
-			 * calling method or remove one reference if the explicit return
-			 * is the same as the implicit return value.
-			 */
-			acpi_ut_remove_reference(return_desc);
-		}
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_terminate_control_method
- *
- * PARAMETERS:  method_desc         - Method object
- *              walk_state          - State associated with the method
- *
- * RETURN:      None
- *
- * DESCRIPTION: Terminate a control method.  Delete everything that the method
- *              created, delete all locals and arguments, and delete the parse
- *              tree if requested.
- *
- * MUTEX:       Interpreter is locked
- *
- ******************************************************************************/
-
-void
-acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
-				 struct acpi_walk_state *walk_state)
-{
-
-	ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
-
-	/* method_desc is required, walk_state is optional */
-
-	if (!method_desc) {
-		return_VOID;
-	}
-
-	if (walk_state) {
-
-		/* Delete all arguments and locals */
-
-		acpi_ds_method_data_delete_all(walk_state);
-
-		/*
-		 * If method is serialized, release the mutex and restore the
-		 * current sync level for this thread
-		 */
-		if (method_desc->method.mutex) {
-
-			/* Acquisition Depth handles recursive calls */
-
-			method_desc->method.mutex->mutex.acquisition_depth--;
-			if (!method_desc->method.mutex->mutex.acquisition_depth) {
-				walk_state->thread->current_sync_level =
-				    method_desc->method.mutex->mutex.
-				    original_sync_level;
-
-				acpi_os_release_mutex(method_desc->method.
-						      mutex->mutex.os_mutex);
-				method_desc->method.mutex->mutex.thread_id = NULL;
-			}
-		}
-
-		/*
-		 * Delete any namespace objects created anywhere within
-		 * the namespace by the execution of this method
-		 */
-		acpi_ns_delete_namespace_by_owner(method_desc->method.owner_id);
-	}
-
-	/* Decrement the thread count on the method */
-
-	if (method_desc->method.thread_count) {
-		method_desc->method.thread_count--;
-	} else {
-		ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
-	}
-
-	/* Are there any other threads currently executing this method? */
-
-	if (method_desc->method.thread_count) {
-		/*
-		 * Additional threads. Do not release the owner_id in this case,
-		 * we immediately reuse it for the next thread executing this method
-		 */
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "*** Completed execution of one thread, %d threads remaining\n",
-				  method_desc->method.thread_count));
-	} else {
-		/* This is the only executing thread for this method */
-
-		/*
-		 * Support to dynamically change a method from not_serialized to
-		 * Serialized if it appears that the method is incorrectly written and
-		 * does not support multiple thread execution. The best example of this
-		 * is if such a method creates namespace objects and blocks. A second
-		 * thread will fail with an AE_ALREADY_EXISTS exception
-		 *
-		 * This code is here because we must wait until the last thread exits
-		 * before creating the synchronization semaphore.
-		 */
-		if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
-		    && (!method_desc->method.mutex)) {
-			(void)acpi_ds_create_method_mutex(method_desc);
-		}
-
-		/* No more threads, we can free the owner_id */
-
-		acpi_ut_release_owner_id(&method_desc->method.owner_id);
-	}
-
-	return_VOID;
-}
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/dispatcher/dsmthdat.c
deleted file mode 100644
index d03f81b..0000000
--- a/drivers/acpi/dispatcher/dsmthdat.c
+++ /dev/null
@@ -1,717 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: dsmthdat - control method arguments and local variables
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acdispat.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_DISPATCHER
-ACPI_MODULE_NAME("dsmthdat")
-
-/* Local prototypes */
-static void
-acpi_ds_method_data_delete_value(u8 type,
-				 u32 index, struct acpi_walk_state *walk_state);
-
-static acpi_status
-acpi_ds_method_data_set_value(u8 type,
-			      u32 index,
-			      union acpi_operand_object *object,
-			      struct acpi_walk_state *walk_state);
-
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-acpi_object_type
-acpi_ds_method_data_get_type(u16 opcode,
-			     u32 index, struct acpi_walk_state *walk_state);
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_method_data_init
- *
- * PARAMETERS:  walk_state          - Current walk state object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initialize the data structures that hold the method's arguments
- *              and locals.  The data struct is an array of namespace nodes for
- *              each - this allows ref_of and de_ref_of to work properly for these
- *              special data types.
- *
- * NOTES:       walk_state fields are initialized to zero by the
- *              ACPI_ALLOCATE_ZEROED().
- *
- *              A pseudo-Namespace Node is assigned to each argument and local
- *              so that ref_of() can return a pointer to the Node.
- *
- ******************************************************************************/
-
-void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
-{
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ds_method_data_init);
-
-	/* Init the method arguments */
-
-	for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++) {
-		ACPI_MOVE_32_TO_32(&walk_state->arguments[i].name,
-				   NAMEOF_ARG_NTE);
-		walk_state->arguments[i].name.integer |= (i << 24);
-		walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
-		walk_state->arguments[i].type = ACPI_TYPE_ANY;
-		walk_state->arguments[i].flags =
-		    ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_ARG;
-	}
-
-	/* Init the method locals */
-
-	for (i = 0; i < ACPI_METHOD_NUM_LOCALS; i++) {
-		ACPI_MOVE_32_TO_32(&walk_state->local_variables[i].name,
-				   NAMEOF_LOCAL_NTE);
-
-		walk_state->local_variables[i].name.integer |= (i << 24);
-		walk_state->local_variables[i].descriptor_type =
-		    ACPI_DESC_TYPE_NAMED;
-		walk_state->local_variables[i].type = ACPI_TYPE_ANY;
-		walk_state->local_variables[i].flags =
-		    ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_LOCAL;
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_method_data_delete_all
- *
- * PARAMETERS:  walk_state          - Current walk state object
- *
- * RETURN:      None
- *
- * DESCRIPTION: Delete method locals and arguments.  Arguments are only
- *              deleted if this method was called from another method.
- *
- ******************************************************************************/
-
-void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
-{
-	u32 index;
-
-	ACPI_FUNCTION_TRACE(ds_method_data_delete_all);
-
-	/* Detach the locals */
-
-	for (index = 0; index < ACPI_METHOD_NUM_LOCALS; index++) {
-		if (walk_state->local_variables[index].object) {
-			ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Local%d=%p\n",
-					  index,
-					  walk_state->local_variables[index].
-					  object));
-
-			/* Detach object (if present) and remove a reference */
-
-			acpi_ns_detach_object(&walk_state->
-					      local_variables[index]);
-		}
-	}
-
-	/* Detach the arguments */
-
-	for (index = 0; index < ACPI_METHOD_NUM_ARGS; index++) {
-		if (walk_state->arguments[index].object) {
-			ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Deleting Arg%d=%p\n",
-					  index,
-					  walk_state->arguments[index].object));
-
-			/* Detach object (if present) and remove a reference */
-
-			acpi_ns_detach_object(&walk_state->arguments[index]);
-		}
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_method_data_init_args
- *
- * PARAMETERS:  *Params         - Pointer to a parameter list for the method
- *              max_param_count - The arg count for this method
- *              walk_state      - Current walk state object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initialize arguments for a method.  The parameter list is a list
- *              of ACPI operand objects, either null terminated or whose length
- *              is defined by max_param_count.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_method_data_init_args(union acpi_operand_object **params,
-			      u32 max_param_count,
-			      struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	u32 index = 0;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_method_data_init_args, params);
-
-	if (!params) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "No param list passed to method\n"));
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Copy passed parameters into the new method stack frame */
-
-	while ((index < ACPI_METHOD_NUM_ARGS) &&
-	       (index < max_param_count) && params[index]) {
-		/*
-		 * A valid parameter.
-		 * Store the argument in the method/walk descriptor.
-		 * Do not copy the arg in order to implement call by reference
-		 */
-		status = acpi_ds_method_data_set_value(ACPI_REFCLASS_ARG, index,
-						       params[index],
-						       walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		index++;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%d args passed to method\n", index));
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_method_data_get_node
- *
- * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
- *                                    ACPI_REFCLASS_ARG
- *              Index               - Which Local or Arg whose type to get
- *              walk_state          - Current walk state object
- *              Node                - Where the node is returned.
- *
- * RETURN:      Status and node
- *
- * DESCRIPTION: Get the Node associated with a local or arg.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_method_data_get_node(u8 type,
-			     u32 index,
-			     struct acpi_walk_state *walk_state,
-			     struct acpi_namespace_node **node)
-{
-	ACPI_FUNCTION_TRACE(ds_method_data_get_node);
-
-	/*
-	 * Method Locals and Arguments are supported
-	 */
-	switch (type) {
-	case ACPI_REFCLASS_LOCAL:
-
-		if (index > ACPI_METHOD_MAX_LOCAL) {
-			ACPI_ERROR((AE_INFO,
-				    "Local index %d is invalid (max %d)",
-				    index, ACPI_METHOD_MAX_LOCAL));
-			return_ACPI_STATUS(AE_AML_INVALID_INDEX);
-		}
-
-		/* Return a pointer to the pseudo-node */
-
-		*node = &walk_state->local_variables[index];
-		break;
-
-	case ACPI_REFCLASS_ARG:
-
-		if (index > ACPI_METHOD_MAX_ARG) {
-			ACPI_ERROR((AE_INFO,
-				    "Arg index %d is invalid (max %d)",
-				    index, ACPI_METHOD_MAX_ARG));
-			return_ACPI_STATUS(AE_AML_INVALID_INDEX);
-		}
-
-		/* Return a pointer to the pseudo-node */
-
-		*node = &walk_state->arguments[index];
-		break;
-
-	default:
-		ACPI_ERROR((AE_INFO, "Type %d is invalid", type));
-		return_ACPI_STATUS(AE_TYPE);
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_method_data_set_value
- *
- * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
- *                                    ACPI_REFCLASS_ARG
- *              Index               - Which Local or Arg to get
- *              Object              - Object to be inserted into the stack entry
- *              walk_state          - Current walk state object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Insert an object onto the method stack at entry Opcode:Index.
- *              Note: There is no "implicit conversion" for locals.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ds_method_data_set_value(u8 type,
-			      u32 index,
-			      union acpi_operand_object *object,
-			      struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-
-	ACPI_FUNCTION_TRACE(ds_method_data_set_value);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-			  "NewObj %p Type %2.2X, Refs=%d [%s]\n", object,
-			  type, object->common.reference_count,
-			  acpi_ut_get_type_name(object->common.type)));
-
-	/* Get the namespace node for the arg/local */
-
-	status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Increment ref count so object can't be deleted while installed.
-	 * NOTE: We do not copy the object in order to preserve the call by
-	 * reference semantics of ACPI Control Method invocation.
-	 * (See ACPI Specification 2.0_c)
-	 */
-	acpi_ut_add_reference(object);
-
-	/* Install the object */
-
-	node->object = object;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_method_data_get_value
- *
- * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
- *                                    ACPI_REFCLASS_ARG
- *              Index               - Which local_var or argument to get
- *              walk_state          - Current walk state object
- *              dest_desc           - Where Arg or Local value is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Retrieve value of selected Arg or Local for this method
- *              Used only in acpi_ex_resolve_to_value().
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_method_data_get_value(u8 type,
-			      u32 index,
-			      struct acpi_walk_state *walk_state,
-			      union acpi_operand_object **dest_desc)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-	union acpi_operand_object *object;
-
-	ACPI_FUNCTION_TRACE(ds_method_data_get_value);
-
-	/* Validate the object descriptor */
-
-	if (!dest_desc) {
-		ACPI_ERROR((AE_INFO, "Null object descriptor pointer"));
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Get the namespace node for the arg/local */
-
-	status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Get the object from the node */
-
-	object = node->object;
-
-	/* Examine the returned object, it must be valid. */
-
-	if (!object) {
-		/*
-		 * Index points to uninitialized object.
-		 * This means that either 1) The expected argument was
-		 * not passed to the method, or 2) A local variable
-		 * was referenced by the method (via the ASL)
-		 * before it was initialized.  Either case is an error.
-		 */
-
-		/* If slack enabled, init the local_x/arg_x to an Integer of value zero */
-
-		if (acpi_gbl_enable_interpreter_slack) {
-			object =
-			    acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-			if (!object) {
-				return_ACPI_STATUS(AE_NO_MEMORY);
-			}
-
-			object->integer.value = 0;
-			node->object = object;
-		}
-
-		/* Otherwise, return the error */
-
-		else
-			switch (type) {
-			case ACPI_REFCLASS_ARG:
-
-				ACPI_ERROR((AE_INFO,
-					    "Uninitialized Arg[%d] at node %p",
-					    index, node));
-
-				return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
-
-			case ACPI_REFCLASS_LOCAL:
-
-				ACPI_ERROR((AE_INFO,
-					    "Uninitialized Local[%d] at node %p",
-					    index, node));
-
-				return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL);
-
-			default:
-
-				ACPI_ERROR((AE_INFO,
-					    "Not a Arg/Local opcode: %X",
-					    type));
-				return_ACPI_STATUS(AE_AML_INTERNAL);
-			}
-	}
-
-	/*
-	 * The Index points to an initialized and valid object.
-	 * Return an additional reference to the object
-	 */
-	*dest_desc = object;
-	acpi_ut_add_reference(object);
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_method_data_delete_value
- *
- * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
- *                                    ACPI_REFCLASS_ARG
- *              Index               - Which local_var or argument to delete
- *              walk_state          - Current walk state object
- *
- * RETURN:      None
- *
- * DESCRIPTION: Delete the entry at Opcode:Index.  Inserts
- *              a null into the stack slot after the object is deleted.
- *
- ******************************************************************************/
-
-static void
-acpi_ds_method_data_delete_value(u8 type,
-				 u32 index, struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-	union acpi_operand_object *object;
-
-	ACPI_FUNCTION_TRACE(ds_method_data_delete_value);
-
-	/* Get the namespace node for the arg/local */
-
-	status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
-	if (ACPI_FAILURE(status)) {
-		return_VOID;
-	}
-
-	/* Get the associated object */
-
-	object = acpi_ns_get_attached_object(node);
-
-	/*
-	 * Undefine the Arg or Local by setting its descriptor
-	 * pointer to NULL. Locals/Args can contain both
-	 * ACPI_OPERAND_OBJECTS and ACPI_NAMESPACE_NODEs
-	 */
-	node->object = NULL;
-
-	if ((object) &&
-	    (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_OPERAND)) {
-		/*
-		 * There is a valid object.
-		 * Decrement the reference count by one to balance the
-		 * increment when the object was stored.
-		 */
-		acpi_ut_remove_reference(object);
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_store_object_to_local
- *
- * PARAMETERS:  Type                - Either ACPI_REFCLASS_LOCAL or
- *                                    ACPI_REFCLASS_ARG
- *              Index               - Which Local or Arg to set
- *              obj_desc            - Value to be stored
- *              walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Store a value in an Arg or Local.  The obj_desc is installed
- *              as the new value for the Arg or Local and the reference count
- *              for obj_desc is incremented.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_store_object_to_local(u8 type,
-			      u32 index,
-			      union acpi_operand_object *obj_desc,
-			      struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-	union acpi_operand_object *current_obj_desc;
-	union acpi_operand_object *new_obj_desc;
-
-	ACPI_FUNCTION_TRACE(ds_store_object_to_local);
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Type=%2.2X Index=%d Obj=%p\n",
-			  type, index, obj_desc));
-
-	/* Parameter validation */
-
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Get the namespace node for the arg/local */
-
-	status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	current_obj_desc = acpi_ns_get_attached_object(node);
-	if (current_obj_desc == obj_desc) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p already installed!\n",
-				  obj_desc));
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * If the reference count on the object is more than one, we must
-	 * take a copy of the object before we store.  A reference count
-	 * of exactly 1 means that the object was just created during the
-	 * evaluation of an expression, and we can safely use it since it
-	 * is not used anywhere else.
-	 */
-	new_obj_desc = obj_desc;
-	if (obj_desc->common.reference_count > 1) {
-		status =
-		    acpi_ut_copy_iobject_to_iobject(obj_desc, &new_obj_desc,
-						    walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/*
-	 * If there is an object already in this slot, we either
-	 * have to delete it, or if this is an argument and there
-	 * is an object reference stored there, we have to do
-	 * an indirect store!
-	 */
-	if (current_obj_desc) {
-		/*
-		 * Check for an indirect store if an argument
-		 * contains an object reference (stored as an Node).
-		 * We don't allow this automatic dereferencing for
-		 * locals, since a store to a local should overwrite
-		 * anything there, including an object reference.
-		 *
-		 * If both Arg0 and Local0 contain ref_of (Local4):
-		 *
-		 * Store (1, Arg0)             - Causes indirect store to local4
-		 * Store (1, Local0)           - Stores 1 in local0, overwriting
-		 *                                  the reference to local4
-		 * Store (1, de_refof (Local0)) - Causes indirect store to local4
-		 *
-		 * Weird, but true.
-		 */
-		if (type == ACPI_REFCLASS_ARG) {
-			/*
-			 * If we have a valid reference object that came from ref_of(),
-			 * do the indirect store
-			 */
-			if ((ACPI_GET_DESCRIPTOR_TYPE(current_obj_desc) ==
-			     ACPI_DESC_TYPE_OPERAND)
-			    && (current_obj_desc->common.type ==
-				ACPI_TYPE_LOCAL_REFERENCE)
-			    && (current_obj_desc->reference.class ==
-				ACPI_REFCLASS_REFOF)) {
-				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-						  "Arg (%p) is an ObjRef(Node), storing in node %p\n",
-						  new_obj_desc,
-						  current_obj_desc));
-
-				/*
-				 * Store this object to the Node (perform the indirect store)
-				 * NOTE: No implicit conversion is performed, as per the ACPI
-				 * specification rules on storing to Locals/Args.
-				 */
-				status =
-				    acpi_ex_store_object_to_node(new_obj_desc,
-								 current_obj_desc->
-								 reference.
-								 object,
-								 walk_state,
-								 ACPI_NO_IMPLICIT_CONVERSION);
-
-				/* Remove local reference if we copied the object above */
-
-				if (new_obj_desc != obj_desc) {
-					acpi_ut_remove_reference(new_obj_desc);
-				}
-				return_ACPI_STATUS(status);
-			}
-		}
-
-		/* Delete the existing object before storing the new one */
-
-		acpi_ds_method_data_delete_value(type, index, walk_state);
-	}
-
-	/*
-	 * Install the Obj descriptor (*new_obj_desc) into
-	 * the descriptor for the Arg or Local.
-	 * (increments the object reference count by one)
-	 */
-	status =
-	    acpi_ds_method_data_set_value(type, index, new_obj_desc,
-					  walk_state);
-
-	/* Remove local reference if we copied the object above */
-
-	if (new_obj_desc != obj_desc) {
-		acpi_ut_remove_reference(new_obj_desc);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_method_data_get_type
- *
- * PARAMETERS:  Opcode              - Either AML_LOCAL_OP or AML_ARG_OP
- *              Index               - Which Local or Arg whose type to get
- *              walk_state          - Current walk state object
- *
- * RETURN:      Data type of current value of the selected Arg or Local
- *
- * DESCRIPTION: Get the type of the object stored in the Local or Arg
- *
- ******************************************************************************/
-
-acpi_object_type
-acpi_ds_method_data_get_type(u16 opcode,
-			     u32 index, struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-	union acpi_operand_object *object;
-
-	ACPI_FUNCTION_TRACE(ds_method_data_get_type);
-
-	/* Get the namespace node for the arg/local */
-
-	status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node);
-	if (ACPI_FAILURE(status)) {
-		return_VALUE((ACPI_TYPE_NOT_FOUND));
-	}
-
-	/* Get the object */
-
-	object = acpi_ns_get_attached_object(node);
-	if (!object) {
-
-		/* Uninitialized local/arg, return TYPE_ANY */
-
-		return_VALUE(ACPI_TYPE_ANY);
-	}
-
-	/* Get the object type */
-
-	return_VALUE(ACPI_GET_OBJECT_TYPE(object));
-}
-#endif
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
deleted file mode 100644
index 4f08e59..0000000
--- a/drivers/acpi/dispatcher/dsobject.c
+++ /dev/null
@@ -1,812 +0,0 @@
-/******************************************************************************
- *
- * Module Name: dsobject - Dispatcher object management routines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/amlcode.h>
-#include <acpi/acdispat.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_DISPATCHER
-ACPI_MODULE_NAME("dsobject")
-
-/* Local prototypes */
-static acpi_status
-acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
-			      union acpi_parse_object *op,
-			      union acpi_operand_object **obj_desc_ptr);
-
-#ifndef ACPI_NO_METHOD_EXECUTION
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_build_internal_object
- *
- * PARAMETERS:  walk_state      - Current walk state
- *              Op              - Parser object to be translated
- *              obj_desc_ptr    - Where the ACPI internal object is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Translate a parser Op object to the equivalent namespace object
- *              Simple objects are any objects other than a package object!
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
-			      union acpi_parse_object *op,
-			      union acpi_operand_object **obj_desc_ptr)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ds_build_internal_object);
-
-	*obj_desc_ptr = NULL;
-	if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
-		/*
-		 * This is a named object reference. If this name was
-		 * previously looked up in the namespace, it was stored in this op.
-		 * Otherwise, go ahead and look it up now
-		 */
-		if (!op->common.node) {
-			status = acpi_ns_lookup(walk_state->scope_info,
-						op->common.value.string,
-						ACPI_TYPE_ANY,
-						ACPI_IMODE_EXECUTE,
-						ACPI_NS_SEARCH_PARENT |
-						ACPI_NS_DONT_OPEN_SCOPE, NULL,
-						ACPI_CAST_INDIRECT_PTR(struct
-								       acpi_namespace_node,
-								       &(op->
-									 common.
-									 node)));
-			if (ACPI_FAILURE(status)) {
-
-				/* Check if we are resolving a named reference within a package */
-
-				if ((status == AE_NOT_FOUND)
-				    && (acpi_gbl_enable_interpreter_slack)
-				    &&
-				    ((op->common.parent->common.aml_opcode ==
-				      AML_PACKAGE_OP)
-				     || (op->common.parent->common.aml_opcode ==
-					 AML_VAR_PACKAGE_OP))) {
-					/*
-					 * We didn't find the target and we are populating elements
-					 * of a package - ignore if slack enabled. Some ASL code
-					 * contains dangling invalid references in packages and
-					 * expects that no exception will be issued. Leave the
-					 * element as a null element. It cannot be used, but it
-					 * can be overwritten by subsequent ASL code - this is
-					 * typically the case.
-					 */
-					ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-							  "Ignoring unresolved reference in package [%4.4s]\n",
-							  walk_state->
-							  scope_info->scope.
-							  node->name.ascii));
-
-					return_ACPI_STATUS(AE_OK);
-				} else {
-					ACPI_ERROR_NAMESPACE(op->common.value.
-							     string, status);
-				}
-
-				return_ACPI_STATUS(status);
-			}
-		}
-
-		/* Special object resolution for elements of a package */
-
-		if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
-		    (op->common.parent->common.aml_opcode ==
-		     AML_VAR_PACKAGE_OP)) {
-			/*
-			 * Attempt to resolve the node to a value before we insert it into
-			 * the package. If this is a reference to a common data type,
-			 * resolve it immediately. According to the ACPI spec, package
-			 * elements can only be "data objects" or method references.
-			 * Attempt to resolve to an Integer, Buffer, String or Package.
-			 * If cannot, return the named reference (for things like Devices,
-			 * Methods, etc.) Buffer Fields and Fields will resolve to simple
-			 * objects (int/buf/str/pkg).
-			 *
-			 * NOTE: References to things like Devices, Methods, Mutexes, etc.
-			 * will remain as named references. This behavior is not described
-			 * in the ACPI spec, but it appears to be an oversight.
-			 */
-			obj_desc =
-			    ACPI_CAST_PTR(union acpi_operand_object,
-					  op->common.node);
-
-			status =
-			    acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
-							  (struct
-							   acpi_namespace_node,
-							   &obj_desc),
-							  walk_state);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-
-			switch (op->common.node->type) {
-				/*
-				 * For these types, we need the actual node, not the subobject.
-				 * However, the subobject did not get an extra reference count above.
-				 *
-				 * TBD: should ex_resolve_node_to_value be changed to fix this?
-				 */
-			case ACPI_TYPE_DEVICE:
-			case ACPI_TYPE_THERMAL:
-
-				acpi_ut_add_reference(op->common.node->object);
-
-				/*lint -fallthrough */
-				/*
-				 * For these types, we need the actual node, not the subobject.
-				 * The subobject got an extra reference count in ex_resolve_node_to_value.
-				 */
-			case ACPI_TYPE_MUTEX:
-			case ACPI_TYPE_METHOD:
-			case ACPI_TYPE_POWER:
-			case ACPI_TYPE_PROCESSOR:
-			case ACPI_TYPE_EVENT:
-			case ACPI_TYPE_REGION:
-
-				/* We will create a reference object for these types below */
-				break;
-
-			default:
-				/*
-				 * All other types - the node was resolved to an actual
-				 * object, we are done.
-				 */
-				goto exit;
-			}
-		}
-	}
-
-	/* Create and init a new internal ACPI object */
-
-	obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
-						   (op->common.aml_opcode))->
-						  object_type);
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	status =
-	    acpi_ds_init_object_from_op(walk_state, op, op->common.aml_opcode,
-					&obj_desc);
-	if (ACPI_FAILURE(status)) {
-		acpi_ut_remove_reference(obj_desc);
-		return_ACPI_STATUS(status);
-	}
-
-      exit:
-	*obj_desc_ptr = obj_desc;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_build_internal_buffer_obj
- *
- * PARAMETERS:  walk_state      - Current walk state
- *              Op              - Parser object to be translated
- *              buffer_length   - Length of the buffer
- *              obj_desc_ptr    - Where the ACPI internal object is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Translate a parser Op package object to the equivalent
- *              namespace object
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
-				  union acpi_parse_object *op,
-				  u32 buffer_length,
-				  union acpi_operand_object **obj_desc_ptr)
-{
-	union acpi_parse_object *arg;
-	union acpi_operand_object *obj_desc;
-	union acpi_parse_object *byte_list;
-	u32 byte_list_length = 0;
-
-	ACPI_FUNCTION_TRACE(ds_build_internal_buffer_obj);
-
-	/*
-	 * If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
-	 * The buffer object already exists (from the NS node), otherwise it must
-	 * be created.
-	 */
-	obj_desc = *obj_desc_ptr;
-	if (!obj_desc) {
-
-		/* Create a new buffer object */
-
-		obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
-		*obj_desc_ptr = obj_desc;
-		if (!obj_desc) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-	}
-
-	/*
-	 * Second arg is the buffer data (optional) byte_list can be either
-	 * individual bytes or a string initializer.  In either case, a
-	 * byte_list appears in the AML.
-	 */
-	arg = op->common.value.arg;	/* skip first arg */
-
-	byte_list = arg->named.next;
-	if (byte_list) {
-		if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
-			ACPI_ERROR((AE_INFO,
-				    "Expecting bytelist, got AML opcode %X in op %p",
-				    byte_list->common.aml_opcode, byte_list));
-
-			acpi_ut_remove_reference(obj_desc);
-			return (AE_TYPE);
-		}
-
-		byte_list_length = (u32) byte_list->common.value.integer;
-	}
-
-	/*
-	 * The buffer length (number of bytes) will be the larger of:
-	 * 1) The specified buffer length and
-	 * 2) The length of the initializer byte list
-	 */
-	obj_desc->buffer.length = buffer_length;
-	if (byte_list_length > buffer_length) {
-		obj_desc->buffer.length = byte_list_length;
-	}
-
-	/* Allocate the buffer */
-
-	if (obj_desc->buffer.length == 0) {
-		obj_desc->buffer.pointer = NULL;
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Buffer defined with zero length in AML, creating\n"));
-	} else {
-		obj_desc->buffer.pointer =
-		    ACPI_ALLOCATE_ZEROED(obj_desc->buffer.length);
-		if (!obj_desc->buffer.pointer) {
-			acpi_ut_delete_object_desc(obj_desc);
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		/* Initialize buffer from the byte_list (if present) */
-
-		if (byte_list) {
-			ACPI_MEMCPY(obj_desc->buffer.pointer,
-				    byte_list->named.data, byte_list_length);
-		}
-	}
-
-	obj_desc->buffer.flags |= AOPOBJ_DATA_VALID;
-	op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_build_internal_package_obj
- *
- * PARAMETERS:  walk_state      - Current walk state
- *              Op              - Parser object to be translated
- *              element_count   - Number of elements in the package - this is
- *                                the num_elements argument to Package()
- *              obj_desc_ptr    - Where the ACPI internal object is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Translate a parser Op package object to the equivalent
- *              namespace object
- *
- * NOTE: The number of elements in the package will be always be the num_elements
- * count, regardless of the number of elements in the package list. If
- * num_elements is smaller, only that many package list elements are used.
- * if num_elements is larger, the Package object is padded out with
- * objects of type Uninitialized (as per ACPI spec.)
- *
- * Even though the ASL compilers do not allow num_elements to be smaller
- * than the Package list length (for the fixed length package opcode), some
- * BIOS code modifies the AML on the fly to adjust the num_elements, and
- * this code compensates for that. This also provides compatibility with
- * other AML interpreters.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
-				   union acpi_parse_object *op,
-				   u32 element_count,
-				   union acpi_operand_object **obj_desc_ptr)
-{
-	union acpi_parse_object *arg;
-	union acpi_parse_object *parent;
-	union acpi_operand_object *obj_desc = NULL;
-	acpi_status status = AE_OK;
-	unsigned i;
-	u16 index;
-	u16 reference_count;
-
-	ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
-
-	/* Find the parent of a possibly nested package */
-
-	parent = op->common.parent;
-	while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
-	       (parent->common.aml_opcode == AML_VAR_PACKAGE_OP)) {
-		parent = parent->common.parent;
-	}
-
-	/*
-	 * If we are evaluating a Named package object "Name (xxxx, Package)",
-	 * the package object already exists, otherwise it must be created.
-	 */
-	obj_desc = *obj_desc_ptr;
-	if (!obj_desc) {
-		obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
-		*obj_desc_ptr = obj_desc;
-		if (!obj_desc) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		obj_desc->package.node = parent->common.node;
-	}
-
-	/*
-	 * Allocate the element array (array of pointers to the individual
-	 * objects) based on the num_elements parameter. Add an extra pointer slot
-	 * so that the list is always null terminated.
-	 */
-	obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
-							   element_count +
-							   1) * sizeof(void *));
-
-	if (!obj_desc->package.elements) {
-		acpi_ut_delete_object_desc(obj_desc);
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	obj_desc->package.count = element_count;
-
-	/*
-	 * Initialize the elements of the package, up to the num_elements count.
-	 * Package is automatically padded with uninitialized (NULL) elements
-	 * if num_elements is greater than the package list length. Likewise,
-	 * Package is truncated if num_elements is less than the list length.
-	 */
-	arg = op->common.value.arg;
-	arg = arg->common.next;
-	for (i = 0; arg && (i < element_count); i++) {
-		if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
-			if (arg->common.node->type == ACPI_TYPE_METHOD) {
-				/*
-				 * A method reference "looks" to the parser to be a method
-				 * invocation, so we special case it here
-				 */
-				arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
-				status =
-				    acpi_ds_build_internal_object(walk_state,
-								  arg,
-								  &obj_desc->
-								  package.
-								  elements[i]);
-			} else {
-				/* This package element is already built, just get it */
-
-				obj_desc->package.elements[i] =
-				    ACPI_CAST_PTR(union acpi_operand_object,
-						  arg->common.node);
-			}
-		} else {
-			status = acpi_ds_build_internal_object(walk_state, arg,
-							       &obj_desc->
-							       package.
-							       elements[i]);
-		}
-
-		if (*obj_desc_ptr) {
-
-			/* Existing package, get existing reference count */
-
-			reference_count =
-			    (*obj_desc_ptr)->common.reference_count;
-			if (reference_count > 1) {
-
-				/* Make new element ref count match original ref count */
-
-				for (index = 0; index < (reference_count - 1);
-				     index++) {
-					acpi_ut_add_reference((obj_desc->
-							       package.
-							       elements[i]));
-				}
-			}
-		}
-
-		arg = arg->common.next;
-	}
-
-	/* Check for match between num_elements and actual length of package_list */
-
-	if (arg) {
-		/*
-		 * num_elements was exhausted, but there are remaining elements in the
-		 * package_list.
-		 *
-		 * Note: technically, this is an error, from ACPI spec: "It is an error
-		 * for NumElements to be less than the number of elements in the
-		 * PackageList". However, for now, we just print an error message and
-		 * no exception is returned.
-		 */
-		while (arg) {
-
-			/* Find out how many elements there really are */
-
-			i++;
-			arg = arg->common.next;
-		}
-
-		ACPI_WARNING((AE_INFO,
-			    "Package List length (%X) larger than NumElements count (%X), truncated\n",
-			    i, element_count));
-	} else if (i < element_count) {
-		/*
-		 * Arg list (elements) was exhausted, but we did not reach num_elements count.
-		 * Note: this is not an error, the package is padded out with NULLs.
-		 */
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Package List length (%X) smaller than NumElements count (%X), padded with null elements\n",
-				  i, element_count));
-	}
-
-	obj_desc->package.flags |= AOPOBJ_DATA_VALID;
-	op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_create_node
- *
- * PARAMETERS:  walk_state      - Current walk state
- *              Node            - NS Node to be initialized
- *              Op              - Parser object to be translated
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create the object to be associated with a namespace node
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_create_node(struct acpi_walk_state *walk_state,
-		    struct acpi_namespace_node *node,
-		    union acpi_parse_object *op)
-{
-	acpi_status status;
-	union acpi_operand_object *obj_desc;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_create_node, op);
-
-	/*
-	 * Because of the execution pass through the non-control-method
-	 * parts of the table, we can arrive here twice.  Only init
-	 * the named object node the first time through
-	 */
-	if (acpi_ns_get_attached_object(node)) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	if (!op->common.value.arg) {
-
-		/* No arguments, there is nothing to do */
-
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Build an internal object for the argument(s) */
-
-	status = acpi_ds_build_internal_object(walk_state, op->common.value.arg,
-					       &obj_desc);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Re-type the object according to its argument */
-
-	node->type = ACPI_GET_OBJECT_TYPE(obj_desc);
-
-	/* Attach obj to node */
-
-	status = acpi_ns_attach_object(node, obj_desc, node->type);
-
-	/* Remove local reference to the object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-#endif				/* ACPI_NO_METHOD_EXECUTION */
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_init_object_from_op
- *
- * PARAMETERS:  walk_state      - Current walk state
- *              Op              - Parser op used to init the internal object
- *              Opcode          - AML opcode associated with the object
- *              ret_obj_desc    - Namespace object to be initialized
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initialize a namespace object from a parser Op and its
- *              associated arguments.  The namespace object is a more compact
- *              representation of the Op and its arguments.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
-			    union acpi_parse_object *op,
-			    u16 opcode,
-			    union acpi_operand_object **ret_obj_desc)
-{
-	const struct acpi_opcode_info *op_info;
-	union acpi_operand_object *obj_desc;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ds_init_object_from_op);
-
-	obj_desc = *ret_obj_desc;
-	op_info = acpi_ps_get_opcode_info(opcode);
-	if (op_info->class == AML_CLASS_UNKNOWN) {
-
-		/* Unknown opcode */
-
-		return_ACPI_STATUS(AE_TYPE);
-	}
-
-	/* Perform per-object initialization */
-
-	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-	case ACPI_TYPE_BUFFER:
-
-		/*
-		 * Defer evaluation of Buffer term_arg operand
-		 */
-		obj_desc->buffer.node =
-		    ACPI_CAST_PTR(struct acpi_namespace_node,
-				  walk_state->operands[0]);
-		obj_desc->buffer.aml_start = op->named.data;
-		obj_desc->buffer.aml_length = op->named.length;
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-
-		/*
-		 * Defer evaluation of Package term_arg operand
-		 */
-		obj_desc->package.node =
-		    ACPI_CAST_PTR(struct acpi_namespace_node,
-				  walk_state->operands[0]);
-		obj_desc->package.aml_start = op->named.data;
-		obj_desc->package.aml_length = op->named.length;
-		break;
-
-	case ACPI_TYPE_INTEGER:
-
-		switch (op_info->type) {
-		case AML_TYPE_CONSTANT:
-			/*
-			 * Resolve AML Constants here - AND ONLY HERE!
-			 * All constants are integers.
-			 * We mark the integer with a flag that indicates that it started
-			 * life as a constant -- so that stores to constants will perform
-			 * as expected (noop). zero_op is used as a placeholder for optional
-			 * target operands.
-			 */
-			obj_desc->common.flags = AOPOBJ_AML_CONSTANT;
-
-			switch (opcode) {
-			case AML_ZERO_OP:
-
-				obj_desc->integer.value = 0;
-				break;
-
-			case AML_ONE_OP:
-
-				obj_desc->integer.value = 1;
-				break;
-
-			case AML_ONES_OP:
-
-				obj_desc->integer.value = ACPI_INTEGER_MAX;
-
-				/* Truncate value if we are executing from a 32-bit ACPI table */
-
-#ifndef ACPI_NO_METHOD_EXECUTION
-				acpi_ex_truncate_for32bit_table(obj_desc);
-#endif
-				break;
-
-			case AML_REVISION_OP:
-
-				obj_desc->integer.value = ACPI_CA_VERSION;
-				break;
-
-			default:
-
-				ACPI_ERROR((AE_INFO,
-					    "Unknown constant opcode %X",
-					    opcode));
-				status = AE_AML_OPERAND_TYPE;
-				break;
-			}
-			break;
-
-		case AML_TYPE_LITERAL:
-
-			obj_desc->integer.value = op->common.value.integer;
-#ifndef ACPI_NO_METHOD_EXECUTION
-			acpi_ex_truncate_for32bit_table(obj_desc);
-#endif
-			break;
-
-		default:
-			ACPI_ERROR((AE_INFO, "Unknown Integer type %X",
-				    op_info->type));
-			status = AE_AML_OPERAND_TYPE;
-			break;
-		}
-		break;
-
-	case ACPI_TYPE_STRING:
-
-		obj_desc->string.pointer = op->common.value.string;
-		obj_desc->string.length =
-		    (u32) ACPI_STRLEN(op->common.value.string);
-
-		/*
-		 * The string is contained in the ACPI table, don't ever try
-		 * to delete it
-		 */
-		obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
-		break;
-
-	case ACPI_TYPE_METHOD:
-		break;
-
-	case ACPI_TYPE_LOCAL_REFERENCE:
-
-		switch (op_info->type) {
-		case AML_TYPE_LOCAL_VARIABLE:
-
-			/* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */
-
-			obj_desc->reference.value = opcode - AML_LOCAL_OP;
-			obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
-
-#ifndef ACPI_NO_METHOD_EXECUTION
-			status =
-			    acpi_ds_method_data_get_node(ACPI_REFCLASS_LOCAL,
-							 obj_desc->reference.
-							 value, walk_state,
-							 ACPI_CAST_INDIRECT_PTR
-							 (struct
-							  acpi_namespace_node,
-							  &obj_desc->reference.
-							  object));
-#endif
-			break;
-
-		case AML_TYPE_METHOD_ARGUMENT:
-
-			/* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */
-
-			obj_desc->reference.value = opcode - AML_ARG_OP;
-			obj_desc->reference.class = ACPI_REFCLASS_ARG;
-
-#ifndef ACPI_NO_METHOD_EXECUTION
-			status = acpi_ds_method_data_get_node(ACPI_REFCLASS_ARG,
-							      obj_desc->
-							      reference.value,
-							      walk_state,
-							      ACPI_CAST_INDIRECT_PTR
-							      (struct
-							       acpi_namespace_node,
-							       &obj_desc->
-							       reference.
-							       object));
-#endif
-			break;
-
-		default:	/* Object name or Debug object */
-
-			switch (op->common.aml_opcode) {
-			case AML_INT_NAMEPATH_OP:
-
-				/* Node was saved in Op */
-
-				obj_desc->reference.node = op->common.node;
-				obj_desc->reference.object =
-				    op->common.node->object;
-				obj_desc->reference.class = ACPI_REFCLASS_NAME;
-				break;
-
-			case AML_DEBUG_OP:
-
-				obj_desc->reference.class = ACPI_REFCLASS_DEBUG;
-				break;
-
-			default:
-
-				ACPI_ERROR((AE_INFO,
-					    "Unimplemented reference type for AML opcode: %4.4X",
-					    opcode));
-				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-			}
-			break;
-		}
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unimplemented data type: %X",
-			    ACPI_GET_OBJECT_TYPE(obj_desc)));
-
-		status = AE_AML_OPERAND_TYPE;
-		break;
-	}
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
deleted file mode 100644
index 69fae59..0000000
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ /dev/null
@@ -1,1429 +0,0 @@
-/******************************************************************************
- *
- * Module Name: dsopcode - Dispatcher Op Region support and handling of
- *                         "control" opcodes
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/amlcode.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acevents.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_DISPATCHER
-ACPI_MODULE_NAME("dsopcode")
-
-/* Local prototypes */
-static acpi_status
-acpi_ds_execute_arguments(struct acpi_namespace_node *node,
-			  struct acpi_namespace_node *scope_node,
-			  u32 aml_length, u8 * aml_start);
-
-static acpi_status
-acpi_ds_init_buffer_field(u16 aml_opcode,
-			  union acpi_operand_object *obj_desc,
-			  union acpi_operand_object *buffer_desc,
-			  union acpi_operand_object *offset_desc,
-			  union acpi_operand_object *length_desc,
-			  union acpi_operand_object *result_desc);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_execute_arguments
- *
- * PARAMETERS:  Node                - Object NS node
- *              scope_node          - Parent NS node
- *              aml_length          - Length of executable AML
- *              aml_start           - Pointer to the AML
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Late (deferred) execution of region or field arguments
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ds_execute_arguments(struct acpi_namespace_node *node,
-			  struct acpi_namespace_node *scope_node,
-			  u32 aml_length, u8 * aml_start)
-{
-	acpi_status status;
-	union acpi_parse_object *op;
-	struct acpi_walk_state *walk_state;
-
-	ACPI_FUNCTION_TRACE(ds_execute_arguments);
-
-	/*
-	 * Allocate a new parser op to be the root of the parsed tree
-	 */
-	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
-	if (!op) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Save the Node for use in acpi_ps_parse_aml */
-
-	op->common.node = scope_node;
-
-	/* Create and initialize a new parser state */
-
-	walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
-	if (!walk_state) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
-				       aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
-	if (ACPI_FAILURE(status)) {
-		acpi_ds_delete_walk_state(walk_state);
-		goto cleanup;
-	}
-
-	/* Mark this parse as a deferred opcode */
-
-	walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
-	walk_state->deferred_node = node;
-
-	/* Pass1: Parse the entire declaration */
-
-	status = acpi_ps_parse_aml(walk_state);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/* Get and init the Op created above */
-
-	op->common.node = node;
-	acpi_ps_delete_parse_tree(op);
-
-	/* Evaluate the deferred arguments */
-
-	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
-	if (!op) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	op->common.node = scope_node;
-
-	/* Create and initialize a new parser state */
-
-	walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
-	if (!walk_state) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	/* Execute the opcode and arguments */
-
-	status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
-				       aml_length, NULL, ACPI_IMODE_EXECUTE);
-	if (ACPI_FAILURE(status)) {
-		acpi_ds_delete_walk_state(walk_state);
-		goto cleanup;
-	}
-
-	/* Mark this execution as a deferred opcode */
-
-	walk_state->deferred_node = node;
-	status = acpi_ps_parse_aml(walk_state);
-
-      cleanup:
-	acpi_ps_delete_parse_tree(op);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_get_buffer_field_arguments
- *
- * PARAMETERS:  obj_desc        - A valid buffer_field object
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
- *              evaluation of these field attributes.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
-{
-	union acpi_operand_object *extra_desc;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
-
-	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Get the AML pointer (method object) and buffer_field node */
-
-	extra_desc = acpi_ns_get_secondary_object(obj_desc);
-	node = obj_desc->buffer_field.node;
-
-	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
-			(ACPI_TYPE_BUFFER_FIELD, node, NULL));
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
-			  acpi_ut_get_node_name(node)));
-
-	/* Execute the AML code for the term_arg arguments */
-
-	status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
-					   extra_desc->extra.aml_length,
-					   extra_desc->extra.aml_start);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_get_bank_field_arguments
- *
- * PARAMETERS:  obj_desc        - A valid bank_field object
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Get bank_field bank_value. This implements the late
- *              evaluation of these field attributes.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
-{
-	union acpi_operand_object *extra_desc;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
-
-	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Get the AML pointer (method object) and bank_field node */
-
-	extra_desc = acpi_ns_get_secondary_object(obj_desc);
-	node = obj_desc->bank_field.node;
-
-	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
-			(ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
-			  acpi_ut_get_node_name(node)));
-
-	/* Execute the AML code for the term_arg arguments */
-
-	status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
-					   extra_desc->extra.aml_length,
-					   extra_desc->extra.aml_start);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_get_buffer_arguments
- *
- * PARAMETERS:  obj_desc        - A valid Buffer object
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Get Buffer length and initializer byte list.  This implements
- *              the late evaluation of these attributes.
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
-
-	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Get the Buffer node */
-
-	node = obj_desc->buffer.node;
-	if (!node) {
-		ACPI_ERROR((AE_INFO,
-			    "No pointer back to NS node in buffer obj %p",
-			    obj_desc));
-		return_ACPI_STATUS(AE_AML_INTERNAL);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
-
-	/* Execute the AML code for the term_arg arguments */
-
-	status = acpi_ds_execute_arguments(node, node,
-					   obj_desc->buffer.aml_length,
-					   obj_desc->buffer.aml_start);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_get_package_arguments
- *
- * PARAMETERS:  obj_desc        - A valid Package object
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Get Package length and initializer byte list.  This implements
- *              the late evaluation of these attributes.
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
-
-	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Get the Package node */
-
-	node = obj_desc->package.node;
-	if (!node) {
-		ACPI_ERROR((AE_INFO,
-			    "No pointer back to NS node in package %p",
-			    obj_desc));
-		return_ACPI_STATUS(AE_AML_INTERNAL);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
-
-	/* Execute the AML code for the term_arg arguments */
-
-	status = acpi_ds_execute_arguments(node, node,
-					   obj_desc->package.aml_length,
-					   obj_desc->package.aml_start);
-	return_ACPI_STATUS(status);
-}
-
-/*****************************************************************************
- *
- * FUNCTION:    acpi_ds_get_region_arguments
- *
- * PARAMETERS:  obj_desc        - A valid region object
- *
- * RETURN:      Status.
- *
- * DESCRIPTION: Get region address and length.  This implements the late
- *              evaluation of these region attributes.
- *
- ****************************************************************************/
-
-acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-	union acpi_operand_object *extra_desc;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
-
-	if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	extra_desc = acpi_ns_get_secondary_object(obj_desc);
-	if (!extra_desc) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	/* Get the Region node */
-
-	node = obj_desc->region.node;
-
-	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
-			(ACPI_TYPE_REGION, node, NULL));
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
-			  acpi_ut_get_node_name(node),
-			  extra_desc->extra.aml_start));
-
-	/* Execute the argument AML */
-
-	status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
-					   extra_desc->extra.aml_length,
-					   extra_desc->extra.aml_start);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Validate the region address/length via the host OS */
-
-	status = acpi_os_validate_address(obj_desc->region.space_id,
-					  obj_desc->region.address,
-					  (acpi_size) obj_desc->region.length,
-					  acpi_ut_get_node_name(node));
-
-	if (ACPI_FAILURE(status)) {
-		/*
-		 * Invalid address/length. We will emit an error message and mark
-		 * the region as invalid, so that it will cause an additional error if
-		 * it is ever used. Then return AE_OK.
-		 */
-		ACPI_EXCEPTION((AE_INFO, status,
-				"During address validation of OpRegion [%4.4s]",
-				node->name.ascii));
-		obj_desc->common.flags |= AOPOBJ_INVALID;
-		status = AE_OK;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_initialize_region
- *
- * PARAMETERS:  obj_handle      - Region namespace node
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Front end to ev_initialize_region
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_initialize_region(acpi_handle obj_handle)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	obj_desc = acpi_ns_get_attached_object(obj_handle);
-
-	/* Namespace is NOT locked */
-
-	status = acpi_ev_initialize_region(obj_desc, FALSE);
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_init_buffer_field
- *
- * PARAMETERS:  aml_opcode      - create_xxx_field
- *              obj_desc        - buffer_field object
- *              buffer_desc     - Host Buffer
- *              offset_desc     - Offset into buffer
- *              length_desc     - Length of field (CREATE_FIELD_OP only)
- *              result_desc     - Where to store the result
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Perform actual initialization of a buffer field
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ds_init_buffer_field(u16 aml_opcode,
-			  union acpi_operand_object *obj_desc,
-			  union acpi_operand_object *buffer_desc,
-			  union acpi_operand_object *offset_desc,
-			  union acpi_operand_object *length_desc,
-			  union acpi_operand_object *result_desc)
-{
-	u32 offset;
-	u32 bit_offset;
-	u32 bit_count;
-	u8 field_flags;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_init_buffer_field, obj_desc);
-
-	/* Host object must be a Buffer */
-
-	if (ACPI_GET_OBJECT_TYPE(buffer_desc) != ACPI_TYPE_BUFFER) {
-		ACPI_ERROR((AE_INFO,
-			    "Target of Create Field is not a Buffer object - %s",
-			    acpi_ut_get_object_type_name(buffer_desc)));
-
-		status = AE_AML_OPERAND_TYPE;
-		goto cleanup;
-	}
-
-	/*
-	 * The last parameter to all of these opcodes (result_desc) started
-	 * out as a name_string, and should therefore now be a NS node
-	 * after resolution in acpi_ex_resolve_operands().
-	 */
-	if (ACPI_GET_DESCRIPTOR_TYPE(result_desc) != ACPI_DESC_TYPE_NAMED) {
-		ACPI_ERROR((AE_INFO,
-			    "(%s) destination not a NS Node [%s]",
-			    acpi_ps_get_opcode_name(aml_opcode),
-			    acpi_ut_get_descriptor_name(result_desc)));
-
-		status = AE_AML_OPERAND_TYPE;
-		goto cleanup;
-	}
-
-	offset = (u32) offset_desc->integer.value;
-
-	/*
-	 * Setup the Bit offsets and counts, according to the opcode
-	 */
-	switch (aml_opcode) {
-	case AML_CREATE_FIELD_OP:
-
-		/* Offset is in bits, count is in bits */
-
-		field_flags = AML_FIELD_ACCESS_BYTE;
-		bit_offset = offset;
-		bit_count = (u32) length_desc->integer.value;
-
-		/* Must have a valid (>0) bit count */
-
-		if (bit_count == 0) {
-			ACPI_ERROR((AE_INFO,
-				    "Attempt to CreateField of length zero"));
-			status = AE_AML_OPERAND_VALUE;
-			goto cleanup;
-		}
-		break;
-
-	case AML_CREATE_BIT_FIELD_OP:
-
-		/* Offset is in bits, Field is one bit */
-
-		bit_offset = offset;
-		bit_count = 1;
-		field_flags = AML_FIELD_ACCESS_BYTE;
-		break;
-
-	case AML_CREATE_BYTE_FIELD_OP:
-
-		/* Offset is in bytes, field is one byte */
-
-		bit_offset = 8 * offset;
-		bit_count = 8;
-		field_flags = AML_FIELD_ACCESS_BYTE;
-		break;
-
-	case AML_CREATE_WORD_FIELD_OP:
-
-		/* Offset is in bytes, field is one word */
-
-		bit_offset = 8 * offset;
-		bit_count = 16;
-		field_flags = AML_FIELD_ACCESS_WORD;
-		break;
-
-	case AML_CREATE_DWORD_FIELD_OP:
-
-		/* Offset is in bytes, field is one dword */
-
-		bit_offset = 8 * offset;
-		bit_count = 32;
-		field_flags = AML_FIELD_ACCESS_DWORD;
-		break;
-
-	case AML_CREATE_QWORD_FIELD_OP:
-
-		/* Offset is in bytes, field is one qword */
-
-		bit_offset = 8 * offset;
-		bit_count = 64;
-		field_flags = AML_FIELD_ACCESS_QWORD;
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO,
-			    "Unknown field creation opcode %02x", aml_opcode));
-		status = AE_AML_BAD_OPCODE;
-		goto cleanup;
-	}
-
-	/* Entire field must fit within the current length of the buffer */
-
-	if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
-		ACPI_ERROR((AE_INFO,
-			    "Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)",
-			    acpi_ut_get_node_name(result_desc),
-			    bit_offset + bit_count,
-			    acpi_ut_get_node_name(buffer_desc->buffer.node),
-			    8 * (u32) buffer_desc->buffer.length));
-		status = AE_AML_BUFFER_LIMIT;
-		goto cleanup;
-	}
-
-	/*
-	 * Initialize areas of the field object that are common to all fields
-	 * For field_flags, use LOCK_RULE = 0 (NO_LOCK),
-	 * UPDATE_RULE = 0 (UPDATE_PRESERVE)
-	 */
-	status = acpi_ex_prep_common_field_object(obj_desc, field_flags, 0,
-						  bit_offset, bit_count);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	obj_desc->buffer_field.buffer_obj = buffer_desc;
-
-	/* Reference count for buffer_desc inherits obj_desc count */
-
-	buffer_desc->common.reference_count = (u16)
-	    (buffer_desc->common.reference_count +
-	     obj_desc->common.reference_count);
-
-      cleanup:
-
-	/* Always delete the operands */
-
-	acpi_ut_remove_reference(offset_desc);
-	acpi_ut_remove_reference(buffer_desc);
-
-	if (aml_opcode == AML_CREATE_FIELD_OP) {
-		acpi_ut_remove_reference(length_desc);
-	}
-
-	/* On failure, delete the result descriptor */
-
-	if (ACPI_FAILURE(status)) {
-		acpi_ut_remove_reference(result_desc);	/* Result descriptor */
-	} else {
-		/* Now the address and length are valid for this buffer_field */
-
-		obj_desc->buffer_field.flags |= AOPOBJ_DATA_VALID;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_eval_buffer_field_operands
- *
- * PARAMETERS:  walk_state      - Current walk
- *              Op              - A valid buffer_field Op object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get buffer_field Buffer and Index
- *              Called from acpi_ds_exec_end_op during buffer_field parse tree walk
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
-				   union acpi_parse_object *op)
-{
-	acpi_status status;
-	union acpi_operand_object *obj_desc;
-	struct acpi_namespace_node *node;
-	union acpi_parse_object *next_op;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_eval_buffer_field_operands, op);
-
-	/*
-	 * This is where we evaluate the address and length fields of the
-	 * create_xxx_field declaration
-	 */
-	node = op->common.node;
-
-	/* next_op points to the op that holds the Buffer */
-
-	next_op = op->common.value.arg;
-
-	/* Evaluate/create the address and length operands */
-
-	status = acpi_ds_create_operands(walk_state, next_op);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	/* Resolve the operands */
-
-	status = acpi_ex_resolve_operands(op->common.aml_opcode,
-					  ACPI_WALK_OPERANDS, walk_state);
-	if (ACPI_FAILURE(status)) {
-		ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
-			    acpi_ps_get_opcode_name(op->common.aml_opcode),
-			    status));
-
-		return_ACPI_STATUS(status);
-	}
-
-	/* Initialize the Buffer Field */
-
-	if (op->common.aml_opcode == AML_CREATE_FIELD_OP) {
-
-		/* NOTE: Slightly different operands for this opcode */
-
-		status =
-		    acpi_ds_init_buffer_field(op->common.aml_opcode, obj_desc,
-					      walk_state->operands[0],
-					      walk_state->operands[1],
-					      walk_state->operands[2],
-					      walk_state->operands[3]);
-	} else {
-		/* All other, create_xxx_field opcodes */
-
-		status =
-		    acpi_ds_init_buffer_field(op->common.aml_opcode, obj_desc,
-					      walk_state->operands[0],
-					      walk_state->operands[1], NULL,
-					      walk_state->operands[2]);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_eval_region_operands
- *
- * PARAMETERS:  walk_state      - Current walk
- *              Op              - A valid region Op object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get region address and length
- *              Called from acpi_ds_exec_end_op during op_region parse tree walk
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
-			     union acpi_parse_object *op)
-{
-	acpi_status status;
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *operand_desc;
-	struct acpi_namespace_node *node;
-	union acpi_parse_object *next_op;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_eval_region_operands, op);
-
-	/*
-	 * This is where we evaluate the address and length fields of the
-	 * op_region declaration
-	 */
-	node = op->common.node;
-
-	/* next_op points to the op that holds the space_iD */
-
-	next_op = op->common.value.arg;
-
-	/* next_op points to address op */
-
-	next_op = next_op->common.next;
-
-	/* Evaluate/create the address and length operands */
-
-	status = acpi_ds_create_operands(walk_state, next_op);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Resolve the length and address operands to numbers */
-
-	status = acpi_ex_resolve_operands(op->common.aml_opcode,
-					  ACPI_WALK_OPERANDS, walk_state);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	/*
-	 * Get the length operand and save it
-	 * (at Top of stack)
-	 */
-	operand_desc = walk_state->operands[walk_state->num_operands - 1];
-
-	obj_desc->region.length = (u32) operand_desc->integer.value;
-	acpi_ut_remove_reference(operand_desc);
-
-	/*
-	 * Get the address and save it
-	 * (at top of stack - 1)
-	 */
-	operand_desc = walk_state->operands[walk_state->num_operands - 2];
-
-	obj_desc->region.address = (acpi_physical_address)
-	    operand_desc->integer.value;
-	acpi_ut_remove_reference(operand_desc);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
-			  obj_desc,
-			  ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
-			  obj_desc->region.length));
-
-	/* Now the address and length are valid for this opregion */
-
-	obj_desc->region.flags |= AOPOBJ_DATA_VALID;
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_eval_table_region_operands
- *
- * PARAMETERS:  walk_state      - Current walk
- *              Op              - A valid region Op object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get region address and length
- *              Called from acpi_ds_exec_end_op during data_table_region parse tree walk
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
-				   union acpi_parse_object *op)
-{
-	acpi_status status;
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object **operand;
-	struct acpi_namespace_node *node;
-	union acpi_parse_object *next_op;
-	u32 table_index;
-	struct acpi_table_header *table;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
-
-	/*
-	 * This is where we evaluate the signature_string and oem_iDString
-	 * and oem_table_iDString of the data_table_region declaration
-	 */
-	node = op->common.node;
-
-	/* next_op points to signature_string op */
-
-	next_op = op->common.value.arg;
-
-	/*
-	 * Evaluate/create the signature_string and oem_iDString
-	 * and oem_table_iDString operands
-	 */
-	status = acpi_ds_create_operands(walk_state, next_op);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Resolve the signature_string and oem_iDString
-	 * and oem_table_iDString operands
-	 */
-	status = acpi_ex_resolve_operands(op->common.aml_opcode,
-					  ACPI_WALK_OPERANDS, walk_state);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	operand = &walk_state->operands[0];
-
-	/* Find the ACPI table */
-
-	status = acpi_tb_find_table(operand[0]->string.pointer,
-				    operand[1]->string.pointer,
-				    operand[2]->string.pointer, &table_index);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	acpi_ut_remove_reference(operand[0]);
-	acpi_ut_remove_reference(operand[1]);
-	acpi_ut_remove_reference(operand[2]);
-
-	status = acpi_get_table_by_index(table_index, &table);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	obj_desc->region.address =
-	    (acpi_physical_address) ACPI_TO_INTEGER(table);
-	obj_desc->region.length = table->length;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
-			  obj_desc,
-			  ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
-			  obj_desc->region.length));
-
-	/* Now the address and length are valid for this opregion */
-
-	obj_desc->region.flags |= AOPOBJ_DATA_VALID;
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_eval_data_object_operands
- *
- * PARAMETERS:  walk_state      - Current walk
- *              Op              - A valid data_object Op object
- *              obj_desc        - data_object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get the operands and complete the following data object types:
- *              Buffer, Package.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
-				  union acpi_parse_object *op,
-				  union acpi_operand_object *obj_desc)
-{
-	acpi_status status;
-	union acpi_operand_object *arg_desc;
-	u32 length;
-
-	ACPI_FUNCTION_TRACE(ds_eval_data_object_operands);
-
-	/* The first operand (for all of these data objects) is the length */
-
-	/*
-	 * Set proper index into operand stack for acpi_ds_obj_stack_push
-	 * invoked inside acpi_ds_create_operand.
-	 */
-	walk_state->operand_index = walk_state->num_operands;
-
-	status = acpi_ds_create_operand(walk_state, op->common.value.arg, 1);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_ex_resolve_operands(walk_state->opcode,
-					  &(walk_state->
-					    operands[walk_state->num_operands -
-						     1]), walk_state);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Extract length operand */
-
-	arg_desc = walk_state->operands[walk_state->num_operands - 1];
-	length = (u32) arg_desc->integer.value;
-
-	/* Cleanup for length operand */
-
-	status = acpi_ds_obj_stack_pop(1, walk_state);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	acpi_ut_remove_reference(arg_desc);
-
-	/*
-	 * Create the actual data object
-	 */
-	switch (op->common.aml_opcode) {
-	case AML_BUFFER_OP:
-
-		status =
-		    acpi_ds_build_internal_buffer_obj(walk_state, op, length,
-						      &obj_desc);
-		break;
-
-	case AML_PACKAGE_OP:
-	case AML_VAR_PACKAGE_OP:
-
-		status =
-		    acpi_ds_build_internal_package_obj(walk_state, op, length,
-						       &obj_desc);
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_AML_BAD_OPCODE);
-	}
-
-	if (ACPI_SUCCESS(status)) {
-		/*
-		 * Return the object in the walk_state, unless the parent is a package -
-		 * in this case, the return object will be stored in the parse tree
-		 * for the package.
-		 */
-		if ((!op->common.parent) ||
-		    ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) &&
-		     (op->common.parent->common.aml_opcode !=
-		      AML_VAR_PACKAGE_OP)
-		     && (op->common.parent->common.aml_opcode != AML_NAME_OP))) {
-			walk_state->result_obj = obj_desc;
-		}
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_eval_bank_field_operands
- *
- * PARAMETERS:  walk_state      - Current walk
- *              Op              - A valid bank_field Op object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get bank_field bank_value
- *              Called from acpi_ds_exec_end_op during bank_field parse tree walk
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
-				 union acpi_parse_object *op)
-{
-	acpi_status status;
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *operand_desc;
-	struct acpi_namespace_node *node;
-	union acpi_parse_object *next_op;
-	union acpi_parse_object *arg;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_eval_bank_field_operands, op);
-
-	/*
-	 * This is where we evaluate the bank_value field of the
-	 * bank_field declaration
-	 */
-
-	/* next_op points to the op that holds the Region */
-
-	next_op = op->common.value.arg;
-
-	/* next_op points to the op that holds the Bank Register */
-
-	next_op = next_op->common.next;
-
-	/* next_op points to the op that holds the Bank Value */
-
-	next_op = next_op->common.next;
-
-	/*
-	 * Set proper index into operand stack for acpi_ds_obj_stack_push
-	 * invoked inside acpi_ds_create_operand.
-	 *
-	 * We use walk_state->Operands[0] to store the evaluated bank_value
-	 */
-	walk_state->operand_index = 0;
-
-	status = acpi_ds_create_operand(walk_state, next_op, 0);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_ex_resolve_to_value(&walk_state->operands[0], walk_state);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS,
-			   acpi_ps_get_opcode_name(op->common.aml_opcode), 1);
-	/*
-	 * Get the bank_value operand and save it
-	 * (at Top of stack)
-	 */
-	operand_desc = walk_state->operands[0];
-
-	/* Arg points to the start Bank Field */
-
-	arg = acpi_ps_get_arg(op, 4);
-	while (arg) {
-
-		/* Ignore OFFSET and ACCESSAS terms here */
-
-		if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
-			node = arg->common.node;
-
-			obj_desc = acpi_ns_get_attached_object(node);
-			if (!obj_desc) {
-				return_ACPI_STATUS(AE_NOT_EXIST);
-			}
-
-			obj_desc->bank_field.value =
-			    (u32) operand_desc->integer.value;
-		}
-
-		/* Move to next field in the list */
-
-		arg = arg->common.next;
-	}
-
-	acpi_ut_remove_reference(operand_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_exec_begin_control_op
- *
- * PARAMETERS:  walk_list       - The list that owns the walk stack
- *              Op              - The control Op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Handles all control ops encountered during control method
- *              execution.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
-			      union acpi_parse_object *op)
-{
-	acpi_status status = AE_OK;
-	union acpi_generic_state *control_state;
-
-	ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op,
-			  op->common.aml_opcode, walk_state));
-
-	switch (op->common.aml_opcode) {
-	case AML_IF_OP:
-	case AML_WHILE_OP:
-
-		/*
-		 * IF/WHILE: Create a new control state to manage these
-		 * constructs. We need to manage these as a stack, in order
-		 * to handle nesting.
-		 */
-		control_state = acpi_ut_create_control_state();
-		if (!control_state) {
-			status = AE_NO_MEMORY;
-			break;
-		}
-		/*
-		 * Save a pointer to the predicate for multiple executions
-		 * of a loop
-		 */
-		control_state->control.aml_predicate_start =
-		    walk_state->parser_state.aml - 1;
-		control_state->control.package_end =
-		    walk_state->parser_state.pkg_end;
-		control_state->control.opcode = op->common.aml_opcode;
-
-		/* Push the control state on this walk's control stack */
-
-		acpi_ut_push_generic_state(&walk_state->control_state,
-					   control_state);
-		break;
-
-	case AML_ELSE_OP:
-
-		/* Predicate is in the state object */
-		/* If predicate is true, the IF was executed, ignore ELSE part */
-
-		if (walk_state->last_predicate) {
-			status = AE_CTRL_TRUE;
-		}
-
-		break;
-
-	case AML_RETURN_OP:
-
-		break;
-
-	default:
-		break;
-	}
-
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_exec_end_control_op
- *
- * PARAMETERS:  walk_list       - The list that owns the walk stack
- *              Op              - The control Op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Handles all control ops encountered during control method
- *              execution.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
-			    union acpi_parse_object * op)
-{
-	acpi_status status = AE_OK;
-	union acpi_generic_state *control_state;
-
-	ACPI_FUNCTION_NAME(ds_exec_end_control_op);
-
-	switch (op->common.aml_opcode) {
-	case AML_IF_OP:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op));
-
-		/*
-		 * Save the result of the predicate in case there is an
-		 * ELSE to come
-		 */
-		walk_state->last_predicate =
-		    (u8) walk_state->control_state->common.value;
-
-		/*
-		 * Pop the control state that was created at the start
-		 * of the IF and free it
-		 */
-		control_state =
-		    acpi_ut_pop_generic_state(&walk_state->control_state);
-		acpi_ut_delete_generic_state(control_state);
-		break;
-
-	case AML_ELSE_OP:
-
-		break;
-
-	case AML_WHILE_OP:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
-
-		if (walk_state->control_state->common.value) {
-
-			/* Predicate was true, go back and evaluate it again! */
-
-			status = AE_CTRL_PENDING;
-		}
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "[WHILE_OP] termination! Op=%p\n", op));
-
-		/* Pop this control state and free it */
-
-		control_state =
-		    acpi_ut_pop_generic_state(&walk_state->control_state);
-
-		walk_state->aml_last_while =
-		    control_state->control.aml_predicate_start;
-		acpi_ut_delete_generic_state(control_state);
-		break;
-
-	case AML_RETURN_OP:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "[RETURN_OP] Op=%p Arg=%p\n", op,
-				  op->common.value.arg));
-
-		/*
-		 * One optional operand -- the return value
-		 * It can be either an immediate operand or a result that
-		 * has been bubbled up the tree
-		 */
-		if (op->common.value.arg) {
-
-			/* Since we have a real Return(), delete any implicit return */
-
-			acpi_ds_clear_implicit_return(walk_state);
-
-			/* Return statement has an immediate operand */
-
-			status =
-			    acpi_ds_create_operands(walk_state,
-						    op->common.value.arg);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-
-			/*
-			 * If value being returned is a Reference (such as
-			 * an arg or local), resolve it now because it may
-			 * cease to exist at the end of the method.
-			 */
-			status =
-			    acpi_ex_resolve_to_value(&walk_state->operands[0],
-						     walk_state);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-
-			/*
-			 * Get the return value and save as the last result
-			 * value.  This is the only place where walk_state->return_desc
-			 * is set to anything other than zero!
-			 */
-			walk_state->return_desc = walk_state->operands[0];
-		} else if (walk_state->result_count) {
-
-			/* Since we have a real Return(), delete any implicit return */
-
-			acpi_ds_clear_implicit_return(walk_state);
-
-			/*
-			 * The return value has come from a previous calculation.
-			 *
-			 * If value being returned is a Reference (such as
-			 * an arg or local), resolve it now because it may
-			 * cease to exist at the end of the method.
-			 *
-			 * Allow references created by the Index operator to return unchanged.
-			 */
-			if ((ACPI_GET_DESCRIPTOR_TYPE
-			     (walk_state->results->results.obj_desc[0]) ==
-			     ACPI_DESC_TYPE_OPERAND)
-			    &&
-			    (ACPI_GET_OBJECT_TYPE
-			     (walk_state->results->results.obj_desc[0]) ==
-			     ACPI_TYPE_LOCAL_REFERENCE)
-			    && ((walk_state->results->results.obj_desc[0])->
-				reference.class != ACPI_REFCLASS_INDEX)) {
-				status =
-				    acpi_ex_resolve_to_value(&walk_state->
-							     results->results.
-							     obj_desc[0],
-							     walk_state);
-				if (ACPI_FAILURE(status)) {
-					return (status);
-				}
-			}
-
-			walk_state->return_desc =
-			    walk_state->results->results.obj_desc[0];
-		} else {
-			/* No return operand */
-
-			if (walk_state->num_operands) {
-				acpi_ut_remove_reference(walk_state->
-							 operands[0]);
-			}
-
-			walk_state->operands[0] = NULL;
-			walk_state->num_operands = 0;
-			walk_state->return_desc = NULL;
-		}
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "Completed RETURN_OP State=%p, RetVal=%p\n",
-				  walk_state, walk_state->return_desc));
-
-		/* End the control method execution right now */
-
-		status = AE_CTRL_TERMINATE;
-		break;
-
-	case AML_NOOP_OP:
-
-		/* Just do nothing! */
-		break;
-
-	case AML_BREAK_POINT_OP:
-
-		/* Call up to the OS service layer to handle this */
-
-		status =
-		    acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
-				   "Executed AML Breakpoint opcode");
-
-		/* If and when it returns, all done. */
-
-		break;
-
-	case AML_BREAK_OP:
-	case AML_CONTINUE_OP:	/* ACPI 2.0 */
-
-		/* Pop and delete control states until we find a while */
-
-		while (walk_state->control_state &&
-		       (walk_state->control_state->control.opcode !=
-			AML_WHILE_OP)) {
-			control_state =
-			    acpi_ut_pop_generic_state(&walk_state->
-						      control_state);
-			acpi_ut_delete_generic_state(control_state);
-		}
-
-		/* No while found? */
-
-		if (!walk_state->control_state) {
-			return (AE_AML_NO_WHILE);
-		}
-
-		/* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */
-
-		walk_state->aml_last_while =
-		    walk_state->control_state->control.package_end;
-
-		/* Return status depending on opcode */
-
-		if (op->common.aml_opcode == AML_BREAK_OP) {
-			status = AE_CTRL_BREAK;
-		} else {
-			status = AE_CTRL_CONTINUE;
-		}
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p",
-			    op->common.aml_opcode, op));
-
-		status = AE_AML_BAD_OPCODE;
-		break;
-	}
-
-	return (status);
-}
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
deleted file mode 100644
index b398982..0000000
--- a/drivers/acpi/dispatcher/dsutils.c
+++ /dev/null
@@ -1,868 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: dsutils - Dispatcher utilities
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/amlcode.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acdebug.h>
-
-#define _COMPONENT          ACPI_DISPATCHER
-ACPI_MODULE_NAME("dsutils")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_clear_implicit_return
- *
- * PARAMETERS:  walk_state          - Current State
- *
- * RETURN:      None.
- *
- * DESCRIPTION: Clear and remove a reference on an implicit return value.  Used
- *              to delete "stale" return values (if enabled, the return value
- *              from every operator is saved at least momentarily, in case the
- *              parent method exits.)
- *
- ******************************************************************************/
-void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state)
-{
-	ACPI_FUNCTION_NAME(ds_clear_implicit_return);
-
-	/*
-	 * Slack must be enabled for this feature
-	 */
-	if (!acpi_gbl_enable_interpreter_slack) {
-		return;
-	}
-
-	if (walk_state->implicit_return_obj) {
-		/*
-		 * Delete any "stale" implicit return. However, in
-		 * complex statements, the implicit return value can be
-		 * bubbled up several levels.
-		 */
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "Removing reference on stale implicit return obj %p\n",
-				  walk_state->implicit_return_obj));
-
-		acpi_ut_remove_reference(walk_state->implicit_return_obj);
-		walk_state->implicit_return_obj = NULL;
-	}
-}
-
-#ifndef ACPI_NO_METHOD_EXECUTION
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_do_implicit_return
- *
- * PARAMETERS:  return_desc         - The return value
- *              walk_state          - Current State
- *              add_reference       - True if a reference should be added to the
- *                                    return object
- *
- * RETURN:      TRUE if implicit return enabled, FALSE otherwise
- *
- * DESCRIPTION: Implements the optional "implicit return".  We save the result
- *              of every ASL operator and control method invocation in case the
- *              parent method exit.  Before storing a new return value, we
- *              delete the previous return value.
- *
- ******************************************************************************/
-
-u8
-acpi_ds_do_implicit_return(union acpi_operand_object *return_desc,
-			   struct acpi_walk_state *walk_state, u8 add_reference)
-{
-	ACPI_FUNCTION_NAME(ds_do_implicit_return);
-
-	/*
-	 * Slack must be enabled for this feature, and we must
-	 * have a valid return object
-	 */
-	if ((!acpi_gbl_enable_interpreter_slack) || (!return_desc)) {
-		return (FALSE);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "Result %p will be implicitly returned; Prev=%p\n",
-			  return_desc, walk_state->implicit_return_obj));
-
-	/*
-	 * Delete any "stale" implicit return value first. However, in
-	 * complex statements, the implicit return value can be
-	 * bubbled up several levels, so we don't clear the value if it
-	 * is the same as the return_desc.
-	 */
-	if (walk_state->implicit_return_obj) {
-		if (walk_state->implicit_return_obj == return_desc) {
-			return (TRUE);
-		}
-		acpi_ds_clear_implicit_return(walk_state);
-	}
-
-	/* Save the implicit return value, add a reference if requested */
-
-	walk_state->implicit_return_obj = return_desc;
-	if (add_reference) {
-		acpi_ut_add_reference(return_desc);
-	}
-
-	return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_is_result_used
- *
- * PARAMETERS:  Op                  - Current Op
- *              walk_state          - Current State
- *
- * RETURN:      TRUE if result is used, FALSE otherwise
- *
- * DESCRIPTION: Check if a result object will be used by the parent
- *
- ******************************************************************************/
-
-u8
-acpi_ds_is_result_used(union acpi_parse_object * op,
-		       struct acpi_walk_state * walk_state)
-{
-	const struct acpi_opcode_info *parent_info;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_is_result_used, op);
-
-	/* Must have both an Op and a Result Object */
-
-	if (!op) {
-		ACPI_ERROR((AE_INFO, "Null Op"));
-		return_UINT8(TRUE);
-	}
-
-	/*
-	 * We know that this operator is not a
-	 * Return() operator (would not come here.) The following code is the
-	 * optional support for a so-called "implicit return". Some AML code
-	 * assumes that the last value of the method is "implicitly" returned
-	 * to the caller. Just save the last result as the return value.
-	 * NOTE: this is optional because the ASL language does not actually
-	 * support this behavior.
-	 */
-	(void)acpi_ds_do_implicit_return(walk_state->result_obj, walk_state,
-					 TRUE);
-
-	/*
-	 * Now determine if the parent will use the result
-	 *
-	 * If there is no parent, or the parent is a scope_op, we are executing
-	 * at the method level. An executing method typically has no parent,
-	 * since each method is parsed separately.  A method invoked externally
-	 * via execute_control_method has a scope_op as the parent.
-	 */
-	if ((!op->common.parent) ||
-	    (op->common.parent->common.aml_opcode == AML_SCOPE_OP)) {
-
-		/* No parent, the return value cannot possibly be used */
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "At Method level, result of [%s] not used\n",
-				  acpi_ps_get_opcode_name(op->common.
-							  aml_opcode)));
-		return_UINT8(FALSE);
-	}
-
-	/* Get info on the parent. The root_op is AML_SCOPE */
-
-	parent_info =
-	    acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
-	if (parent_info->class == AML_CLASS_UNKNOWN) {
-		ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
-		return_UINT8(FALSE);
-	}
-
-	/*
-	 * Decide what to do with the result based on the parent.  If
-	 * the parent opcode will not use the result, delete the object.
-	 * Otherwise leave it as is, it will be deleted when it is used
-	 * as an operand later.
-	 */
-	switch (parent_info->class) {
-	case AML_CLASS_CONTROL:
-
-		switch (op->common.parent->common.aml_opcode) {
-		case AML_RETURN_OP:
-
-			/* Never delete the return value associated with a return opcode */
-
-			goto result_used;
-
-		case AML_IF_OP:
-		case AML_WHILE_OP:
-
-			/*
-			 * If we are executing the predicate AND this is the predicate op,
-			 * we will use the return value
-			 */
-			if ((walk_state->control_state->common.state ==
-			     ACPI_CONTROL_PREDICATE_EXECUTING)
-			    && (walk_state->control_state->control.
-				predicate_op == op)) {
-				goto result_used;
-			}
-			break;
-
-		default:
-			/* Ignore other control opcodes */
-			break;
-		}
-
-		/* The general control opcode returns no result */
-
-		goto result_not_used;
-
-	case AML_CLASS_CREATE:
-
-		/*
-		 * These opcodes allow term_arg(s) as operands and therefore
-		 * the operands can be method calls.  The result is used.
-		 */
-		goto result_used;
-
-	case AML_CLASS_NAMED_OBJECT:
-
-		if ((op->common.parent->common.aml_opcode == AML_REGION_OP) ||
-		    (op->common.parent->common.aml_opcode == AML_DATA_REGION_OP)
-		    || (op->common.parent->common.aml_opcode == AML_PACKAGE_OP)
-		    || (op->common.parent->common.aml_opcode ==
-			AML_VAR_PACKAGE_OP)
-		    || (op->common.parent->common.aml_opcode == AML_BUFFER_OP)
-		    || (op->common.parent->common.aml_opcode ==
-			AML_INT_EVAL_SUBTREE_OP)
-		    || (op->common.parent->common.aml_opcode ==
-			AML_BANK_FIELD_OP)) {
-			/*
-			 * These opcodes allow term_arg(s) as operands and therefore
-			 * the operands can be method calls.  The result is used.
-			 */
-			goto result_used;
-		}
-
-		goto result_not_used;
-
-	default:
-
-		/*
-		 * In all other cases. the parent will actually use the return
-		 * object, so keep it.
-		 */
-		goto result_used;
-	}
-
-      result_used:
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "Result of [%s] used by Parent [%s] Op=%p\n",
-			  acpi_ps_get_opcode_name(op->common.aml_opcode),
-			  acpi_ps_get_opcode_name(op->common.parent->common.
-						  aml_opcode), op));
-
-	return_UINT8(TRUE);
-
-      result_not_used:
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "Result of [%s] not used by Parent [%s] Op=%p\n",
-			  acpi_ps_get_opcode_name(op->common.aml_opcode),
-			  acpi_ps_get_opcode_name(op->common.parent->common.
-						  aml_opcode), op));
-
-	return_UINT8(FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_delete_result_if_not_used
- *
- * PARAMETERS:  Op              - Current parse Op
- *              result_obj      - Result of the operation
- *              walk_state      - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Used after interpretation of an opcode.  If there is an internal
- *              result descriptor, check if the parent opcode will actually use
- *              this result.  If not, delete the result now so that it will
- *              not become orphaned.
- *
- ******************************************************************************/
-
-void
-acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
-				  union acpi_operand_object *result_obj,
-				  struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_delete_result_if_not_used, result_obj);
-
-	if (!op) {
-		ACPI_ERROR((AE_INFO, "Null Op"));
-		return_VOID;
-	}
-
-	if (!result_obj) {
-		return_VOID;
-	}
-
-	if (!acpi_ds_is_result_used(op, walk_state)) {
-
-		/* Must pop the result stack (obj_desc should be equal to result_obj) */
-
-		status = acpi_ds_result_pop(&obj_desc, walk_state);
-		if (ACPI_SUCCESS(status)) {
-			acpi_ut_remove_reference(result_obj);
-		}
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_resolve_operands
- *
- * PARAMETERS:  walk_state          - Current walk state with operands on stack
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Resolve all operands to their values.  Used to prepare
- *              arguments to a control method invocation (a call from one
- *              method to another.)
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state)
-{
-	u32 i;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_resolve_operands, walk_state);
-
-	/*
-	 * Attempt to resolve each of the valid operands
-	 * Method arguments are passed by reference, not by value.  This means
-	 * that the actual objects are passed, not copies of the objects.
-	 */
-	for (i = 0; i < walk_state->num_operands; i++) {
-		status =
-		    acpi_ex_resolve_to_value(&walk_state->operands[i],
-					     walk_state);
-		if (ACPI_FAILURE(status)) {
-			break;
-		}
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_clear_operands
- *
- * PARAMETERS:  walk_state          - Current walk state with operands on stack
- *
- * RETURN:      None
- *
- * DESCRIPTION: Clear all operands on the current walk state operand stack.
- *
- ******************************************************************************/
-
-void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
-{
-	u32 i;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_clear_operands, walk_state);
-
-	/* Remove a reference on each operand on the stack */
-
-	for (i = 0; i < walk_state->num_operands; i++) {
-		/*
-		 * Remove a reference to all operands, including both
-		 * "Arguments" and "Targets".
-		 */
-		acpi_ut_remove_reference(walk_state->operands[i]);
-		walk_state->operands[i] = NULL;
-	}
-
-	walk_state->num_operands = 0;
-	return_VOID;
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_create_operand
- *
- * PARAMETERS:  walk_state      - Current walk state
- *              Arg             - Parse object for the argument
- *              arg_index       - Which argument (zero based)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Translate a parse tree object that is an argument to an AML
- *              opcode to the equivalent interpreter object.  This may include
- *              looking up a name or entering a new name into the internal
- *              namespace.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_create_operand(struct acpi_walk_state *walk_state,
-		       union acpi_parse_object *arg, u32 arg_index)
-{
-	acpi_status status = AE_OK;
-	char *name_string;
-	u32 name_length;
-	union acpi_operand_object *obj_desc;
-	union acpi_parse_object *parent_op;
-	u16 opcode;
-	acpi_interpreter_mode interpreter_mode;
-	const struct acpi_opcode_info *op_info;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_create_operand, arg);
-
-	/* A valid name must be looked up in the namespace */
-
-	if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
-	    (arg->common.value.string) &&
-	    !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Getting a name: Arg=%p\n",
-				  arg));
-
-		/* Get the entire name string from the AML stream */
-
-		status =
-		    acpi_ex_get_name_string(ACPI_TYPE_ANY,
-					    arg->common.value.buffer,
-					    &name_string, &name_length);
-
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* All prefixes have been handled, and the name is in name_string */
-
-		/*
-		 * Special handling for buffer_field declarations. This is a deferred
-		 * opcode that unfortunately defines the field name as the last
-		 * parameter instead of the first.  We get here when we are performing
-		 * the deferred execution, so the actual name of the field is already
-		 * in the namespace.  We don't want to attempt to look it up again
-		 * because we may be executing in a different scope than where the
-		 * actual opcode exists.
-		 */
-		if ((walk_state->deferred_node) &&
-		    (walk_state->deferred_node->type == ACPI_TYPE_BUFFER_FIELD)
-		    && (arg_index ==
-			(u32) ((walk_state->opcode ==
-				AML_CREATE_FIELD_OP) ? 3 : 2))) {
-			obj_desc =
-			    ACPI_CAST_PTR(union acpi_operand_object,
-					  walk_state->deferred_node);
-			status = AE_OK;
-		} else {	/* All other opcodes */
-
-			/*
-			 * Differentiate between a namespace "create" operation
-			 * versus a "lookup" operation (IMODE_LOAD_PASS2 vs.
-			 * IMODE_EXECUTE) in order to support the creation of
-			 * namespace objects during the execution of control methods.
-			 */
-			parent_op = arg->common.parent;
-			op_info =
-			    acpi_ps_get_opcode_info(parent_op->common.
-						    aml_opcode);
-			if ((op_info->flags & AML_NSNODE)
-			    && (parent_op->common.aml_opcode !=
-				AML_INT_METHODCALL_OP)
-			    && (parent_op->common.aml_opcode != AML_REGION_OP)
-			    && (parent_op->common.aml_opcode !=
-				AML_INT_NAMEPATH_OP)) {
-
-				/* Enter name into namespace if not found */
-
-				interpreter_mode = ACPI_IMODE_LOAD_PASS2;
-			} else {
-				/* Return a failure if name not found */
-
-				interpreter_mode = ACPI_IMODE_EXECUTE;
-			}
-
-			status =
-			    acpi_ns_lookup(walk_state->scope_info, name_string,
-					   ACPI_TYPE_ANY, interpreter_mode,
-					   ACPI_NS_SEARCH_PARENT |
-					   ACPI_NS_DONT_OPEN_SCOPE, walk_state,
-					   ACPI_CAST_INDIRECT_PTR(struct
-								  acpi_namespace_node,
-								  &obj_desc));
-			/*
-			 * The only case where we pass through (ignore) a NOT_FOUND
-			 * error is for the cond_ref_of opcode.
-			 */
-			if (status == AE_NOT_FOUND) {
-				if (parent_op->common.aml_opcode ==
-				    AML_COND_REF_OF_OP) {
-					/*
-					 * For the Conditional Reference op, it's OK if
-					 * the name is not found;  We just need a way to
-					 * indicate this to the interpreter, set the
-					 * object to the root
-					 */
-					obj_desc = ACPI_CAST_PTR(union
-								 acpi_operand_object,
-								 acpi_gbl_root_node);
-					status = AE_OK;
-				} else {
-					/*
-					 * We just plain didn't find it -- which is a
-					 * very serious error at this point
-					 */
-					status = AE_AML_NAME_NOT_FOUND;
-				}
-			}
-
-			if (ACPI_FAILURE(status)) {
-				ACPI_ERROR_NAMESPACE(name_string, status);
-			}
-		}
-
-		/* Free the namestring created above */
-
-		ACPI_FREE(name_string);
-
-		/* Check status from the lookup */
-
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* Put the resulting object onto the current object stack */
-
-		status = acpi_ds_obj_stack_push(obj_desc, walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-		ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
-				   (obj_desc, walk_state));
-	} else {
-		/* Check for null name case */
-
-		if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
-		    !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
-			/*
-			 * If the name is null, this means that this is an
-			 * optional result parameter that was not specified
-			 * in the original ASL.  Create a Zero Constant for a
-			 * placeholder.  (Store to a constant is a Noop.)
-			 */
-			opcode = AML_ZERO_OP;	/* Has no arguments! */
-
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "Null namepath: Arg=%p\n", arg));
-		} else {
-			opcode = arg->common.aml_opcode;
-		}
-
-		/* Get the object type of the argument */
-
-		op_info = acpi_ps_get_opcode_info(opcode);
-		if (op_info->object_type == ACPI_TYPE_INVALID) {
-			return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
-		}
-
-		if ((op_info->flags & AML_HAS_RETVAL)
-		    || (arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "Argument previously created, already stacked\n"));
-
-			ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
-					   (walk_state->
-					    operands[walk_state->num_operands -
-						     1], walk_state));
-
-			/*
-			 * Use value that was already previously returned
-			 * by the evaluation of this argument
-			 */
-			status = acpi_ds_result_pop(&obj_desc, walk_state);
-			if (ACPI_FAILURE(status)) {
-				/*
-				 * Only error is underflow, and this indicates
-				 * a missing or null operand!
-				 */
-				ACPI_EXCEPTION((AE_INFO, status,
-						"Missing or null operand"));
-				return_ACPI_STATUS(status);
-			}
-		} else {
-			/* Create an ACPI_INTERNAL_OBJECT for the argument */
-
-			obj_desc =
-			    acpi_ut_create_internal_object(op_info->
-							   object_type);
-			if (!obj_desc) {
-				return_ACPI_STATUS(AE_NO_MEMORY);
-			}
-
-			/* Initialize the new object */
-
-			status =
-			    acpi_ds_init_object_from_op(walk_state, arg, opcode,
-							&obj_desc);
-			if (ACPI_FAILURE(status)) {
-				acpi_ut_delete_object_desc(obj_desc);
-				return_ACPI_STATUS(status);
-			}
-		}
-
-		/* Put the operand object on the object stack */
-
-		status = acpi_ds_obj_stack_push(obj_desc, walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
-				   (obj_desc, walk_state));
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_create_operands
- *
- * PARAMETERS:  walk_state          - Current state
- *              first_arg           - First argument of a parser argument tree
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Convert an operator's arguments from a parse tree format to
- *              namespace objects and place those argument object on the object
- *              stack in preparation for evaluation by the interpreter.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_create_operands(struct acpi_walk_state *walk_state,
-			union acpi_parse_object *first_arg)
-{
-	acpi_status status = AE_OK;
-	union acpi_parse_object *arg;
-	union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
-	u32 arg_count = 0;
-	u32 index = walk_state->num_operands;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
-
-	/* Get all arguments in the list */
-
-	arg = first_arg;
-	while (arg) {
-		if (index >= ACPI_OBJ_NUM_OPERANDS) {
-			return_ACPI_STATUS(AE_BAD_DATA);
-		}
-
-		arguments[index] = arg;
-		walk_state->operands[index] = NULL;
-
-		/* Move on to next argument, if any */
-
-		arg = arg->common.next;
-		arg_count++;
-		index++;
-	}
-
-	index--;
-
-	/* It is the appropriate order to get objects from the Result stack */
-
-	for (i = 0; i < arg_count; i++) {
-		arg = arguments[index];
-
-		/* Force the filling of the operand stack in inverse order */
-
-		walk_state->operand_index = (u8) index;
-
-		status = acpi_ds_create_operand(walk_state, arg, index);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-
-		index--;
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "Arg #%d (%p) done, Arg1=%p\n", index, arg,
-				  first_arg));
-	}
-
-	return_ACPI_STATUS(status);
-
-      cleanup:
-	/*
-	 * We must undo everything done above; meaning that we must
-	 * pop everything off of the operand stack and delete those
-	 * objects
-	 */
-	acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
-
-	ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d", index));
-	return_ACPI_STATUS(status);
-}
-
-/*****************************************************************************
- *
- * FUNCTION:    acpi_ds_evaluate_name_path
- *
- * PARAMETERS:  walk_state      - Current state of the parse tree walk,
- *                                the opcode of current operation should be
- *                                AML_INT_NAMEPATH_OP
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Translate the -name_path- parse tree object to the equivalent
- *              interpreter object, convert it to value, if needed, duplicate
- *              it, if needed, and push it onto the current result stack.
- *
- ****************************************************************************/
-
-acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-	union acpi_parse_object *op = walk_state->op;
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	union acpi_operand_object *new_obj_desc;
-	u8 type;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_evaluate_name_path, walk_state);
-
-	if (!op->common.parent) {
-
-		/* This happens after certain exception processing */
-
-		goto exit;
-	}
-
-	if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
-	    (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP) ||
-	    (op->common.parent->common.aml_opcode == AML_REF_OF_OP)) {
-
-		/* TBD: Should we specify this feature as a bit of op_info->Flags of these opcodes? */
-
-		goto exit;
-	}
-
-	status = acpi_ds_create_operand(walk_state, op, 0);
-	if (ACPI_FAILURE(status)) {
-		goto exit;
-	}
-
-	if (op->common.flags & ACPI_PARSEOP_TARGET) {
-		new_obj_desc = *operand;
-		goto push_result;
-	}
-
-	type = ACPI_GET_OBJECT_TYPE(*operand);
-
-	status = acpi_ex_resolve_to_value(operand, walk_state);
-	if (ACPI_FAILURE(status)) {
-		goto exit;
-	}
-
-	if (type == ACPI_TYPE_INTEGER) {
-
-		/* It was incremented by acpi_ex_resolve_to_value */
-
-		acpi_ut_remove_reference(*operand);
-
-		status =
-		    acpi_ut_copy_iobject_to_iobject(*operand, &new_obj_desc,
-						    walk_state);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-	} else {
-		/*
-		 * The object either was anew created or is
-		 * a Namespace node - don't decrement it.
-		 */
-		new_obj_desc = *operand;
-	}
-
-	/* Cleanup for name-path operand */
-
-	status = acpi_ds_obj_stack_pop(1, walk_state);
-	if (ACPI_FAILURE(status)) {
-		walk_state->result_obj = new_obj_desc;
-		goto exit;
-	}
-
-      push_result:
-
-	walk_state->result_obj = new_obj_desc;
-
-	status = acpi_ds_result_push(walk_state->result_obj, walk_state);
-	if (ACPI_SUCCESS(status)) {
-
-		/* Force to take it from stack */
-
-		op->common.flags |= ACPI_PARSEOP_IN_STACK;
-	}
-
-      exit:
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
deleted file mode 100644
index 396fe12..0000000
--- a/drivers/acpi/dispatcher/dswexec.c
+++ /dev/null
@@ -1,745 +0,0 @@
-/******************************************************************************
- *
- * Module Name: dswexec - Dispatcher method execution callbacks;
- *                        dispatch to interpreter.
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/amlcode.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acdebug.h>
-
-#define _COMPONENT          ACPI_DISPATCHER
-ACPI_MODULE_NAME("dswexec")
-
-/*
- * Dispatch table for opcode classes
- */
-static ACPI_EXECUTE_OP acpi_gbl_op_type_dispatch[] = {
-	acpi_ex_opcode_0A_0T_1R,
-	acpi_ex_opcode_1A_0T_0R,
-	acpi_ex_opcode_1A_0T_1R,
-	acpi_ex_opcode_1A_1T_0R,
-	acpi_ex_opcode_1A_1T_1R,
-	acpi_ex_opcode_2A_0T_0R,
-	acpi_ex_opcode_2A_0T_1R,
-	acpi_ex_opcode_2A_1T_1R,
-	acpi_ex_opcode_2A_2T_1R,
-	acpi_ex_opcode_3A_0T_0R,
-	acpi_ex_opcode_3A_1T_1R,
-	acpi_ex_opcode_6A_0T_1R
-};
-
-/*****************************************************************************
- *
- * FUNCTION:    acpi_ds_get_predicate_value
- *
- * PARAMETERS:  walk_state      - Current state of the parse tree walk
- *              result_obj      - if non-zero, pop result from result stack
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get the result of a predicate evaluation
- *
- ****************************************************************************/
-
-acpi_status
-acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
-			    union acpi_operand_object *result_obj)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *local_obj_desc = NULL;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_get_predicate_value, walk_state);
-
-	walk_state->control_state->common.state = 0;
-
-	if (result_obj) {
-		status = acpi_ds_result_pop(&obj_desc, walk_state);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Could not get result from predicate evaluation"));
-
-			return_ACPI_STATUS(status);
-		}
-	} else {
-		status = acpi_ds_create_operand(walk_state, walk_state->op, 0);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		status =
-		    acpi_ex_resolve_to_value(&walk_state->operands[0],
-					     walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		obj_desc = walk_state->operands[0];
-	}
-
-	if (!obj_desc) {
-		ACPI_ERROR((AE_INFO,
-			    "No predicate ObjDesc=%p State=%p",
-			    obj_desc, walk_state));
-
-		return_ACPI_STATUS(AE_AML_NO_OPERAND);
-	}
-
-	/*
-	 * Result of predicate evaluation must be an Integer
-	 * object. Implicitly convert the argument if necessary.
-	 */
-	status = acpi_ex_convert_to_integer(obj_desc, &local_obj_desc, 16);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	if (ACPI_GET_OBJECT_TYPE(local_obj_desc) != ACPI_TYPE_INTEGER) {
-		ACPI_ERROR((AE_INFO,
-			    "Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X",
-			    obj_desc, walk_state,
-			    ACPI_GET_OBJECT_TYPE(obj_desc)));
-
-		status = AE_AML_OPERAND_TYPE;
-		goto cleanup;
-	}
-
-	/* Truncate the predicate to 32-bits if necessary */
-
-	acpi_ex_truncate_for32bit_table(local_obj_desc);
-
-	/*
-	 * Save the result of the predicate evaluation on
-	 * the control stack
-	 */
-	if (local_obj_desc->integer.value) {
-		walk_state->control_state->common.value = TRUE;
-	} else {
-		/*
-		 * Predicate is FALSE, we will just toss the
-		 * rest of the package
-		 */
-		walk_state->control_state->common.value = FALSE;
-		status = AE_CTRL_FALSE;
-	}
-
-	/* Predicate can be used for an implicit return value */
-
-	(void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE);
-
-      cleanup:
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n",
-			  walk_state->control_state->common.value,
-			  walk_state->op));
-
-	/* Break to debugger to display result */
-
-	ACPI_DEBUGGER_EXEC(acpi_db_display_result_object
-			   (local_obj_desc, walk_state));
-
-	/*
-	 * Delete the predicate result object (we know that
-	 * we don't need it anymore)
-	 */
-	if (local_obj_desc != obj_desc) {
-		acpi_ut_remove_reference(local_obj_desc);
-	}
-	acpi_ut_remove_reference(obj_desc);
-
-	walk_state->control_state->common.state = ACPI_CONTROL_NORMAL;
-	return_ACPI_STATUS(status);
-}
-
-/*****************************************************************************
- *
- * FUNCTION:    acpi_ds_exec_begin_op
- *
- * PARAMETERS:  walk_state      - Current state of the parse tree walk
- *              out_op          - Where to return op if a new one is created
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Descending callback used during the execution of control
- *              methods.  This is where most operators and operands are
- *              dispatched to the interpreter.
- *
- ****************************************************************************/
-
-acpi_status
-acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
-		      union acpi_parse_object **out_op)
-{
-	union acpi_parse_object *op;
-	acpi_status status = AE_OK;
-	u32 opcode_class;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_exec_begin_op, walk_state);
-
-	op = walk_state->op;
-	if (!op) {
-		status = acpi_ds_load2_begin_op(walk_state, out_op);
-		if (ACPI_FAILURE(status)) {
-			goto error_exit;
-		}
-
-		op = *out_op;
-		walk_state->op = op;
-		walk_state->opcode = op->common.aml_opcode;
-		walk_state->op_info =
-		    acpi_ps_get_opcode_info(op->common.aml_opcode);
-
-		if (acpi_ns_opens_scope(walk_state->op_info->object_type)) {
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "(%s) Popping scope for Op %p\n",
-					  acpi_ut_get_type_name(walk_state->
-								op_info->
-								object_type),
-					  op));
-
-			status = acpi_ds_scope_stack_pop(walk_state);
-			if (ACPI_FAILURE(status)) {
-				goto error_exit;
-			}
-		}
-	}
-
-	if (op == walk_state->origin) {
-		if (out_op) {
-			*out_op = op;
-		}
-
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/*
-	 * If the previous opcode was a conditional, this opcode
-	 * must be the beginning of the associated predicate.
-	 * Save this knowledge in the current scope descriptor
-	 */
-	if ((walk_state->control_state) &&
-	    (walk_state->control_state->common.state ==
-	     ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Exec predicate Op=%p State=%p\n", op,
-				  walk_state));
-
-		walk_state->control_state->common.state =
-		    ACPI_CONTROL_PREDICATE_EXECUTING;
-
-		/* Save start of predicate */
-
-		walk_state->control_state->control.predicate_op = op;
-	}
-
-	opcode_class = walk_state->op_info->class;
-
-	/* We want to send namepaths to the load code */
-
-	if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
-		opcode_class = AML_CLASS_NAMED_OBJECT;
-	}
-
-	/*
-	 * Handle the opcode based upon the opcode type
-	 */
-	switch (opcode_class) {
-	case AML_CLASS_CONTROL:
-
-		status = acpi_ds_exec_begin_control_op(walk_state, op);
-		break;
-
-	case AML_CLASS_NAMED_OBJECT:
-
-		if (walk_state->walk_type & ACPI_WALK_METHOD) {
-			/*
-			 * Found a named object declaration during method execution;
-			 * we must enter this object into the namespace.  The created
-			 * object is temporary and will be deleted upon completion of
-			 * the execution of this method.
-			 */
-			status = acpi_ds_load2_begin_op(walk_state, NULL);
-		}
-
-		break;
-
-	case AML_CLASS_EXECUTE:
-	case AML_CLASS_CREATE:
-
-		break;
-
-	default:
-		break;
-	}
-
-	/* Nothing to do here during method execution */
-
-	return_ACPI_STATUS(status);
-
-      error_exit:
-	status = acpi_ds_method_error(status, walk_state);
-	return_ACPI_STATUS(status);
-}
-
-/*****************************************************************************
- *
- * FUNCTION:    acpi_ds_exec_end_op
- *
- * PARAMETERS:  walk_state      - Current state of the parse tree walk
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Ascending callback used during the execution of control
- *              methods.  The only thing we really need to do here is to
- *              notice the beginning of IF, ELSE, and WHILE blocks.
- *
- ****************************************************************************/
-
-acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
-{
-	union acpi_parse_object *op;
-	acpi_status status = AE_OK;
-	u32 op_type;
-	u32 op_class;
-	union acpi_parse_object *next_op;
-	union acpi_parse_object *first_arg;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state);
-
-	op = walk_state->op;
-	op_type = walk_state->op_info->type;
-	op_class = walk_state->op_info->class;
-
-	if (op_class == AML_CLASS_UNKNOWN) {
-		ACPI_ERROR((AE_INFO, "Unknown opcode %X",
-			    op->common.aml_opcode));
-		return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
-	}
-
-	first_arg = op->common.value.arg;
-
-	/* Init the walk state */
-
-	walk_state->num_operands = 0;
-	walk_state->operand_index = 0;
-	walk_state->return_desc = NULL;
-	walk_state->result_obj = NULL;
-
-	/* Call debugger for single step support (DEBUG build only) */
-
-	ACPI_DEBUGGER_EXEC(status =
-			   acpi_db_single_step(walk_state, op, op_class));
-	ACPI_DEBUGGER_EXEC(if (ACPI_FAILURE(status)) {
-			   return_ACPI_STATUS(status);}
-	) ;
-
-	/* Decode the Opcode Class */
-
-	switch (op_class) {
-	case AML_CLASS_ARGUMENT:	/* Constants, literals, etc. */
-
-		if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
-			status = acpi_ds_evaluate_name_path(walk_state);
-			if (ACPI_FAILURE(status)) {
-				goto cleanup;
-			}
-		}
-		break;
-
-	case AML_CLASS_EXECUTE:	/* Most operators with arguments */
-
-		/* Build resolved operand stack */
-
-		status = acpi_ds_create_operands(walk_state, first_arg);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-
-		/*
-		 * All opcodes require operand resolution, with the only exceptions
-		 * being the object_type and size_of operators.
-		 */
-		if (!(walk_state->op_info->flags & AML_NO_OPERAND_RESOLVE)) {
-
-			/* Resolve all operands */
-
-			status = acpi_ex_resolve_operands(walk_state->opcode,
-							  &(walk_state->
-							    operands
-							    [walk_state->
-							     num_operands - 1]),
-							  walk_state);
-		}
-
-		if (ACPI_SUCCESS(status)) {
-			/*
-			 * Dispatch the request to the appropriate interpreter handler
-			 * routine.  There is one routine per opcode "type" based upon the
-			 * number of opcode arguments and return type.
-			 */
-			status =
-			    acpi_gbl_op_type_dispatch[op_type] (walk_state);
-		} else {
-			/*
-			 * Treat constructs of the form "Store(LocalX,LocalX)" as noops when the
-			 * Local is uninitialized.
-			 */
-			if ((status == AE_AML_UNINITIALIZED_LOCAL) &&
-			    (walk_state->opcode == AML_STORE_OP) &&
-			    (walk_state->operands[0]->common.type ==
-			     ACPI_TYPE_LOCAL_REFERENCE)
-			    && (walk_state->operands[1]->common.type ==
-				ACPI_TYPE_LOCAL_REFERENCE)
-			    && (walk_state->operands[0]->reference.class ==
-				walk_state->operands[1]->reference.class)
-			    && (walk_state->operands[0]->reference.value ==
-				walk_state->operands[1]->reference.value)) {
-				status = AE_OK;
-			} else {
-				ACPI_EXCEPTION((AE_INFO, status,
-						"While resolving operands for [%s]",
-						acpi_ps_get_opcode_name
-						(walk_state->opcode)));
-			}
-		}
-
-		/* Always delete the argument objects and clear the operand stack */
-
-		acpi_ds_clear_operands(walk_state);
-
-		/*
-		 * If a result object was returned from above, push it on the
-		 * current result stack
-		 */
-		if (ACPI_SUCCESS(status) && walk_state->result_obj) {
-			status =
-			    acpi_ds_result_push(walk_state->result_obj,
-						walk_state);
-		}
-		break;
-
-	default:
-
-		switch (op_type) {
-		case AML_TYPE_CONTROL:	/* Type 1 opcode, IF/ELSE/WHILE/NOOP */
-
-			/* 1 Operand, 0 external_result, 0 internal_result */
-
-			status = acpi_ds_exec_end_control_op(walk_state, op);
-
-			break;
-
-		case AML_TYPE_METHOD_CALL:
-
-			/*
-			 * If the method is referenced from within a package
-			 * declaration, it is not a invocation of the method, just
-			 * a reference to it.
-			 */
-			if ((op->asl.parent) &&
-			    ((op->asl.parent->asl.aml_opcode == AML_PACKAGE_OP)
-			     || (op->asl.parent->asl.aml_opcode ==
-				 AML_VAR_PACKAGE_OP))) {
-				ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-						  "Method Reference in a Package, Op=%p\n",
-						  op));
-
-				op->common.node =
-				    (struct acpi_namespace_node *)op->asl.value.
-				    arg->asl.node;
-				acpi_ut_add_reference(op->asl.value.arg->asl.
-						      node->object);
-				return_ACPI_STATUS(AE_OK);
-			}
-
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "Method invocation, Op=%p\n", op));
-
-			/*
-			 * (AML_METHODCALL) Op->Asl.Value.Arg->Asl.Node contains
-			 * the method Node pointer
-			 */
-			/* next_op points to the op that holds the method name */
-
-			next_op = first_arg;
-
-			/* next_op points to first argument op */
-
-			next_op = next_op->common.next;
-
-			/*
-			 * Get the method's arguments and put them on the operand stack
-			 */
-			status = acpi_ds_create_operands(walk_state, next_op);
-			if (ACPI_FAILURE(status)) {
-				break;
-			}
-
-			/*
-			 * Since the operands will be passed to another control method,
-			 * we must resolve all local references here (Local variables,
-			 * arguments to *this* method, etc.)
-			 */
-			status = acpi_ds_resolve_operands(walk_state);
-			if (ACPI_FAILURE(status)) {
-
-				/* On error, clear all resolved operands */
-
-				acpi_ds_clear_operands(walk_state);
-				break;
-			}
-
-			/*
-			 * Tell the walk loop to preempt this running method and
-			 * execute the new method
-			 */
-			status = AE_CTRL_TRANSFER;
-
-			/*
-			 * Return now; we don't want to disturb anything,
-			 * especially the operand count!
-			 */
-			return_ACPI_STATUS(status);
-
-		case AML_TYPE_CREATE_FIELD:
-
-			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-					  "Executing CreateField Buffer/Index Op=%p\n",
-					  op));
-
-			status = acpi_ds_load2_end_op(walk_state);
-			if (ACPI_FAILURE(status)) {
-				break;
-			}
-
-			status =
-			    acpi_ds_eval_buffer_field_operands(walk_state, op);
-			break;
-
-		case AML_TYPE_CREATE_OBJECT:
-
-			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-					  "Executing CreateObject (Buffer/Package) Op=%p\n",
-					  op));
-
-			switch (op->common.parent->common.aml_opcode) {
-			case AML_NAME_OP:
-
-				/*
-				 * Put the Node on the object stack (Contains the ACPI Name
-				 * of this object)
-				 */
-				walk_state->operands[0] =
-				    (void *)op->common.parent->common.node;
-				walk_state->num_operands = 1;
-
-				status = acpi_ds_create_node(walk_state,
-							     op->common.parent->
-							     common.node,
-							     op->common.parent);
-				if (ACPI_FAILURE(status)) {
-					break;
-				}
-
-				/* Fall through */
-				/*lint -fallthrough */
-
-			case AML_INT_EVAL_SUBTREE_OP:
-
-				status =
-				    acpi_ds_eval_data_object_operands
-				    (walk_state, op,
-				     acpi_ns_get_attached_object(op->common.
-								 parent->common.
-								 node));
-				break;
-
-			default:
-
-				status =
-				    acpi_ds_eval_data_object_operands
-				    (walk_state, op, NULL);
-				break;
-			}
-
-			/*
-			 * If a result object was returned from above, push it on the
-			 * current result stack
-			 */
-			if (walk_state->result_obj) {
-				status =
-				    acpi_ds_result_push(walk_state->result_obj,
-							walk_state);
-			}
-			break;
-
-		case AML_TYPE_NAMED_FIELD:
-		case AML_TYPE_NAMED_COMPLEX:
-		case AML_TYPE_NAMED_SIMPLE:
-		case AML_TYPE_NAMED_NO_OBJ:
-
-			status = acpi_ds_load2_end_op(walk_state);
-			if (ACPI_FAILURE(status)) {
-				break;
-			}
-
-			if (op->common.aml_opcode == AML_REGION_OP) {
-				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-						  "Executing OpRegion Address/Length Op=%p\n",
-						  op));
-
-				status =
-				    acpi_ds_eval_region_operands(walk_state,
-								 op);
-				if (ACPI_FAILURE(status)) {
-					break;
-				}
-			} else if (op->common.aml_opcode == AML_DATA_REGION_OP) {
-				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-						  "Executing DataTableRegion Strings Op=%p\n",
-						  op));
-
-				status =
-				    acpi_ds_eval_table_region_operands
-				    (walk_state, op);
-				if (ACPI_FAILURE(status)) {
-					break;
-				}
-			} else if (op->common.aml_opcode == AML_BANK_FIELD_OP) {
-				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-						  "Executing BankField Op=%p\n",
-						  op));
-
-				status =
-				    acpi_ds_eval_bank_field_operands(walk_state,
-								     op);
-				if (ACPI_FAILURE(status)) {
-					break;
-				}
-			}
-			break;
-
-		case AML_TYPE_UNDEFINED:
-
-			ACPI_ERROR((AE_INFO,
-				    "Undefined opcode type Op=%p", op));
-			return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
-
-		case AML_TYPE_BOGUS:
-
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "Internal opcode=%X type Op=%p\n",
-					  walk_state->opcode, op));
-			break;
-
-		default:
-
-			ACPI_ERROR((AE_INFO,
-				    "Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p",
-				    op_class, op_type, op->common.aml_opcode,
-				    op));
-
-			status = AE_NOT_IMPLEMENTED;
-			break;
-		}
-	}
-
-	/*
-	 * ACPI 2.0 support for 64-bit integers: Truncate numeric
-	 * result value if we are executing from a 32-bit ACPI table
-	 */
-	acpi_ex_truncate_for32bit_table(walk_state->result_obj);
-
-	/*
-	 * Check if we just completed the evaluation of a
-	 * conditional predicate
-	 */
-	if ((ACPI_SUCCESS(status)) &&
-	    (walk_state->control_state) &&
-	    (walk_state->control_state->common.state ==
-	     ACPI_CONTROL_PREDICATE_EXECUTING) &&
-	    (walk_state->control_state->control.predicate_op == op)) {
-		status =
-		    acpi_ds_get_predicate_value(walk_state,
-						walk_state->result_obj);
-		walk_state->result_obj = NULL;
-	}
-
-      cleanup:
-
-	if (walk_state->result_obj) {
-
-		/* Break to debugger to display result */
-
-		ACPI_DEBUGGER_EXEC(acpi_db_display_result_object
-				   (walk_state->result_obj, walk_state));
-
-		/*
-		 * Delete the result op if and only if:
-		 * Parent will not use the result -- such as any
-		 * non-nested type2 op in a method (parent will be method)
-		 */
-		acpi_ds_delete_result_if_not_used(op, walk_state->result_obj,
-						  walk_state);
-	}
-#ifdef _UNDER_DEVELOPMENT
-
-	if (walk_state->parser_state.aml == walk_state->parser_state.aml_end) {
-		acpi_db_method_end(walk_state);
-	}
-#endif
-
-	/* Invoke exception handler on error */
-
-	if (ACPI_FAILURE(status)) {
-		status = acpi_ds_method_error(status, walk_state);
-	}
-
-	/* Always clear the object stack */
-
-	walk_state->num_operands = 0;
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c
deleted file mode 100644
index dff7a3e..0000000
--- a/drivers/acpi/dispatcher/dswload.c
+++ /dev/null
@@ -1,1202 +0,0 @@
-/******************************************************************************
- *
- * Module Name: dswload - Dispatcher namespace load callbacks
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/amlcode.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acevents.h>
-
-#ifdef ACPI_ASL_COMPILER
-#include <acpi/acdisasm.h>
-#endif
-
-#define _COMPONENT          ACPI_DISPATCHER
-ACPI_MODULE_NAME("dswload")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_init_callbacks
- *
- * PARAMETERS:  walk_state      - Current state of the parse tree walk
- *              pass_number     - 1, 2, or 3
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Init walk state callbacks
- *
- ******************************************************************************/
-acpi_status
-acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
-{
-
-	switch (pass_number) {
-	case 1:
-		walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
-		    ACPI_PARSE_DELETE_TREE;
-		walk_state->descending_callback = acpi_ds_load1_begin_op;
-		walk_state->ascending_callback = acpi_ds_load1_end_op;
-		break;
-
-	case 2:
-		walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 |
-		    ACPI_PARSE_DELETE_TREE;
-		walk_state->descending_callback = acpi_ds_load2_begin_op;
-		walk_state->ascending_callback = acpi_ds_load2_end_op;
-		break;
-
-	case 3:
-#ifndef ACPI_NO_METHOD_EXECUTION
-		walk_state->parse_flags |= ACPI_PARSE_EXECUTE |
-		    ACPI_PARSE_DELETE_TREE;
-		walk_state->descending_callback = acpi_ds_exec_begin_op;
-		walk_state->ascending_callback = acpi_ds_exec_end_op;
-#endif
-		break;
-
-	default:
-		return (AE_BAD_PARAMETER);
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_load1_begin_op
- *
- * PARAMETERS:  walk_state      - Current state of the parse tree walk
- *              out_op          - Where to return op if a new one is created
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Descending callback used during the loading of ACPI tables.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
-		       union acpi_parse_object ** out_op)
-{
-	union acpi_parse_object *op;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-	acpi_object_type object_type;
-	char *path;
-	u32 flags;
-
-	ACPI_FUNCTION_TRACE(ds_load1_begin_op);
-
-	op = walk_state->op;
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
-			  walk_state));
-
-	/* We are only interested in opcodes that have an associated name */
-
-	if (op) {
-		if (!(walk_state->op_info->flags & AML_NAMED)) {
-			*out_op = op;
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		/* Check if this object has already been installed in the namespace */
-
-		if (op->common.node) {
-			*out_op = op;
-			return_ACPI_STATUS(AE_OK);
-		}
-	}
-
-	path = acpi_ps_get_next_namestring(&walk_state->parser_state);
-
-	/* Map the raw opcode into an internal object type */
-
-	object_type = walk_state->op_info->object_type;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "State=%p Op=%p [%s]\n", walk_state, op,
-			  acpi_ut_get_type_name(object_type)));
-
-	switch (walk_state->opcode) {
-	case AML_SCOPE_OP:
-
-		/*
-		 * The target name of the Scope() operator must exist at this point so
-		 * that we can actually open the scope to enter new names underneath it.
-		 * Allow search-to-root for single namesegs.
-		 */
-		status =
-		    acpi_ns_lookup(walk_state->scope_info, path, object_type,
-				   ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
-				   walk_state, &(node));
-#ifdef ACPI_ASL_COMPILER
-		if (status == AE_NOT_FOUND) {
-			/*
-			 * Table disassembly:
-			 * Target of Scope() not found. Generate an External for it, and
-			 * insert the name into the namespace.
-			 */
-			acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0);
-			status =
-			    acpi_ns_lookup(walk_state->scope_info, path,
-					   object_type, ACPI_IMODE_LOAD_PASS1,
-					   ACPI_NS_SEARCH_PARENT, walk_state,
-					   &node);
-		}
-#endif
-		if (ACPI_FAILURE(status)) {
-			ACPI_ERROR_NAMESPACE(path, status);
-			return_ACPI_STATUS(status);
-		}
-
-		/*
-		 * Check to make sure that the target is
-		 * one of the opcodes that actually opens a scope
-		 */
-		switch (node->type) {
-		case ACPI_TYPE_ANY:
-		case ACPI_TYPE_LOCAL_SCOPE:	/* Scope  */
-		case ACPI_TYPE_DEVICE:
-		case ACPI_TYPE_POWER:
-		case ACPI_TYPE_PROCESSOR:
-		case ACPI_TYPE_THERMAL:
-
-			/* These are acceptable types */
-			break;
-
-		case ACPI_TYPE_INTEGER:
-		case ACPI_TYPE_STRING:
-		case ACPI_TYPE_BUFFER:
-
-			/*
-			 * These types we will allow, but we will change the type. This
-			 * enables some existing code of the form:
-			 *
-			 *  Name (DEB, 0)
-			 *  Scope (DEB) { ... }
-			 *
-			 * Note: silently change the type here. On the second pass, we will report
-			 * a warning
-			 */
-			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-					  "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n",
-					  path,
-					  acpi_ut_get_type_name(node->type)));
-
-			node->type = ACPI_TYPE_ANY;
-			walk_state->scope_info->common.value = ACPI_TYPE_ANY;
-			break;
-
-		default:
-
-			/* All other types are an error */
-
-			ACPI_ERROR((AE_INFO,
-				    "Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)",
-				    acpi_ut_get_type_name(node->type), path));
-
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-		break;
-
-	default:
-		/*
-		 * For all other named opcodes, we will enter the name into
-		 * the namespace.
-		 *
-		 * Setup the search flags.
-		 * Since we are entering a name into the namespace, we do not want to
-		 * enable the search-to-root upsearch.
-		 *
-		 * There are only two conditions where it is acceptable that the name
-		 * already exists:
-		 *    1) the Scope() operator can reopen a scoping object that was
-		 *       previously defined (Scope, Method, Device, etc.)
-		 *    2) Whenever we are parsing a deferred opcode (op_region, Buffer,
-		 *       buffer_field, or Package), the name of the object is already
-		 *       in the namespace.
-		 */
-		if (walk_state->deferred_node) {
-
-			/* This name is already in the namespace, get the node */
-
-			node = walk_state->deferred_node;
-			status = AE_OK;
-			break;
-		}
-
-		/*
-		 * If we are executing a method, do not create any namespace objects
-		 * during the load phase, only during execution.
-		 */
-		if (walk_state->method_node) {
-			node = NULL;
-			status = AE_OK;
-			break;
-		}
-
-		flags = ACPI_NS_NO_UPSEARCH;
-		if ((walk_state->opcode != AML_SCOPE_OP) &&
-		    (!(walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP))) {
-			flags |= ACPI_NS_ERROR_IF_FOUND;
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "[%s] Cannot already exist\n",
-					  acpi_ut_get_type_name(object_type)));
-		} else {
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "[%s] Both Find or Create allowed\n",
-					  acpi_ut_get_type_name(object_type)));
-		}
-
-		/*
-		 * Enter the named type into the internal namespace. We enter the name
-		 * as we go downward in the parse tree. Any necessary subobjects that
-		 * involve arguments to the opcode must be created as we go back up the
-		 * parse tree later.
-		 */
-		status =
-		    acpi_ns_lookup(walk_state->scope_info, path, object_type,
-				   ACPI_IMODE_LOAD_PASS1, flags, walk_state,
-				   &node);
-		if (ACPI_FAILURE(status)) {
-			if (status == AE_ALREADY_EXISTS) {
-
-				/* The name already exists in this scope */
-
-				if (node->flags & ANOBJ_IS_EXTERNAL) {
-					/*
-					 * Allow one create on an object or segment that was
-					 * previously declared External
-					 */
-					node->flags &= ~ANOBJ_IS_EXTERNAL;
-					node->type = (u8) object_type;
-
-					/* Just retyped a node, probably will need to open a scope */
-
-					if (acpi_ns_opens_scope(object_type)) {
-						status =
-						    acpi_ds_scope_stack_push
-						    (node, object_type,
-						     walk_state);
-						if (ACPI_FAILURE(status)) {
-							return_ACPI_STATUS
-							    (status);
-						}
-					}
-
-					status = AE_OK;
-				}
-			}
-
-			if (ACPI_FAILURE(status)) {
-				ACPI_ERROR_NAMESPACE(path, status);
-				return_ACPI_STATUS(status);
-			}
-		}
-		break;
-	}
-
-	/* Common exit */
-
-	if (!op) {
-
-		/* Create a new op */
-
-		op = acpi_ps_alloc_op(walk_state->opcode);
-		if (!op) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-	}
-
-	/* Initialize the op */
-
-#if (defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY))
-	op->named.path = ACPI_CAST_PTR(u8, path);
-#endif
-
-	if (node) {
-		/*
-		 * Put the Node in the "op" object that the parser uses, so we
-		 * can get it again quickly when this scope is closed
-		 */
-		op->common.node = node;
-		op->named.name = node->name.integer;
-	}
-
-	acpi_ps_append_arg(acpi_ps_get_parent_scope(&walk_state->parser_state),
-			   op);
-	*out_op = op;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_load1_end_op
- *
- * PARAMETERS:  walk_state      - Current state of the parse tree walk
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Ascending callback used during the loading of the namespace,
- *              both control methods and everything else.
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
-{
-	union acpi_parse_object *op;
-	acpi_object_type object_type;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ds_load1_end_op);
-
-	op = walk_state->op;
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
-			  walk_state));
-
-	/* We are only interested in opcodes that have an associated name */
-
-	if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Get the object type to determine if we should pop the scope */
-
-	object_type = walk_state->op_info->object_type;
-
-#ifndef ACPI_NO_METHOD_EXECUTION
-	if (walk_state->op_info->flags & AML_FIELD) {
-		/*
-		 * If we are executing a method, do not create any namespace objects
-		 * during the load phase, only during execution.
-		 */
-		if (!walk_state->method_node) {
-			if (walk_state->opcode == AML_FIELD_OP ||
-			    walk_state->opcode == AML_BANK_FIELD_OP ||
-			    walk_state->opcode == AML_INDEX_FIELD_OP) {
-				status =
-				    acpi_ds_init_field_objects(op, walk_state);
-			}
-		}
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * If we are executing a method, do not create any namespace objects
-	 * during the load phase, only during execution.
-	 */
-	if (!walk_state->method_node) {
-		if (op->common.aml_opcode == AML_REGION_OP) {
-			status =
-			    acpi_ex_create_region(op->named.data,
-						  op->named.length,
-						  (acpi_adr_space_type) ((op->
-									  common.
-									  value.
-									  arg)->
-									 common.
-									 value.
-									 integer),
-						  walk_state);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		} else if (op->common.aml_opcode == AML_DATA_REGION_OP) {
-			status =
-			    acpi_ex_create_region(op->named.data,
-						  op->named.length,
-						  REGION_DATA_TABLE,
-						  walk_state);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		}
-	}
-#endif
-
-	if (op->common.aml_opcode == AML_NAME_OP) {
-
-		/* For Name opcode, get the object type from the argument */
-
-		if (op->common.value.arg) {
-			object_type = (acpi_ps_get_opcode_info((op->common.
-								value.arg)->
-							       common.
-							       aml_opcode))->
-			    object_type;
-
-			/* Set node type if we have a namespace node */
-
-			if (op->common.node) {
-				op->common.node->type = (u8) object_type;
-			}
-		}
-	}
-
-	/*
-	 * If we are executing a method, do not create any namespace objects
-	 * during the load phase, only during execution.
-	 */
-	if (!walk_state->method_node) {
-		if (op->common.aml_opcode == AML_METHOD_OP) {
-			/*
-			 * method_op pkg_length name_string method_flags term_list
-			 *
-			 * Note: We must create the method node/object pair as soon as we
-			 * see the method declaration. This allows later pass1 parsing
-			 * of invocations of the method (need to know the number of
-			 * arguments.)
-			 */
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
-					  walk_state, op, op->named.node));
-
-			if (!acpi_ns_get_attached_object(op->named.node)) {
-				walk_state->operands[0] =
-				    ACPI_CAST_PTR(void, op->named.node);
-				walk_state->num_operands = 1;
-
-				status =
-				    acpi_ds_create_operands(walk_state,
-							    op->common.value.
-							    arg);
-				if (ACPI_SUCCESS(status)) {
-					status =
-					    acpi_ex_create_method(op->named.
-								  data,
-								  op->named.
-								  length,
-								  walk_state);
-				}
-
-				walk_state->operands[0] = NULL;
-				walk_state->num_operands = 0;
-
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-			}
-		}
-	}
-
-	/* Pop the scope stack (only if loading a table) */
-
-	if (!walk_state->method_node && acpi_ns_opens_scope(object_type)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "(%s): Popping scope for Op %p\n",
-				  acpi_ut_get_type_name(object_type), op));
-
-		status = acpi_ds_scope_stack_pop(walk_state);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_load2_begin_op
- *
- * PARAMETERS:  walk_state      - Current state of the parse tree walk
- *              out_op          - Wher to return op if a new one is created
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Descending callback used during the loading of ACPI tables.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
-		       union acpi_parse_object **out_op)
-{
-	union acpi_parse_object *op;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-	acpi_object_type object_type;
-	char *buffer_ptr;
-	u32 flags;
-
-	ACPI_FUNCTION_TRACE(ds_load2_begin_op);
-
-	op = walk_state->op;
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
-			  walk_state));
-
-	if (op) {
-		if ((walk_state->control_state) &&
-		    (walk_state->control_state->common.state ==
-		     ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
-
-			/* We are executing a while loop outside of a method */
-
-			status = acpi_ds_exec_begin_op(walk_state, out_op);
-			return_ACPI_STATUS(status);
-		}
-
-		/* We only care about Namespace opcodes here */
-
-		if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
-		     (walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
-		    (!(walk_state->op_info->flags & AML_NAMED))) {
-#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
-			if ((walk_state->op_info->class == AML_CLASS_EXECUTE) ||
-			    (walk_state->op_info->class == AML_CLASS_CONTROL)) {
-				ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-						  "Begin/EXEC: %s (fl %8.8X)\n",
-						  walk_state->op_info->name,
-						  walk_state->op_info->flags));
-
-				/* Executing a type1 or type2 opcode outside of a method */
-
-				status =
-				    acpi_ds_exec_begin_op(walk_state, out_op);
-				return_ACPI_STATUS(status);
-			}
-#endif
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		/* Get the name we are going to enter or lookup in the namespace */
-
-		if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
-
-			/* For Namepath op, get the path string */
-
-			buffer_ptr = op->common.value.string;
-			if (!buffer_ptr) {
-
-				/* No name, just exit */
-
-				return_ACPI_STATUS(AE_OK);
-			}
-		} else {
-			/* Get name from the op */
-
-			buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
-		}
-	} else {
-		/* Get the namestring from the raw AML */
-
-		buffer_ptr =
-		    acpi_ps_get_next_namestring(&walk_state->parser_state);
-	}
-
-	/* Map the opcode into an internal object type */
-
-	object_type = walk_state->op_info->object_type;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "State=%p Op=%p Type=%X\n", walk_state, op,
-			  object_type));
-
-	switch (walk_state->opcode) {
-	case AML_FIELD_OP:
-	case AML_BANK_FIELD_OP:
-	case AML_INDEX_FIELD_OP:
-
-		node = NULL;
-		status = AE_OK;
-		break;
-
-	case AML_INT_NAMEPATH_OP:
-		/*
-		 * The name_path is an object reference to an existing object.
-		 * Don't enter the name into the namespace, but look it up
-		 * for use later.
-		 */
-		status =
-		    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
-				   object_type, ACPI_IMODE_EXECUTE,
-				   ACPI_NS_SEARCH_PARENT, walk_state, &(node));
-		break;
-
-	case AML_SCOPE_OP:
-		/*
-		 * The Path is an object reference to an existing object.
-		 * Don't enter the name into the namespace, but look it up
-		 * for use later.
-		 */
-		status =
-		    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
-				   object_type, ACPI_IMODE_EXECUTE,
-				   ACPI_NS_SEARCH_PARENT, walk_state, &(node));
-		if (ACPI_FAILURE(status)) {
-#ifdef ACPI_ASL_COMPILER
-			if (status == AE_NOT_FOUND) {
-				status = AE_OK;
-			} else {
-				ACPI_ERROR_NAMESPACE(buffer_ptr, status);
-			}
-#else
-			ACPI_ERROR_NAMESPACE(buffer_ptr, status);
-#endif
-			return_ACPI_STATUS(status);
-		}
-
-		/*
-		 * We must check to make sure that the target is
-		 * one of the opcodes that actually opens a scope
-		 */
-		switch (node->type) {
-		case ACPI_TYPE_ANY:
-		case ACPI_TYPE_LOCAL_SCOPE:	/* Scope */
-		case ACPI_TYPE_DEVICE:
-		case ACPI_TYPE_POWER:
-		case ACPI_TYPE_PROCESSOR:
-		case ACPI_TYPE_THERMAL:
-
-			/* These are acceptable types */
-			break;
-
-		case ACPI_TYPE_INTEGER:
-		case ACPI_TYPE_STRING:
-		case ACPI_TYPE_BUFFER:
-
-			/*
-			 * These types we will allow, but we will change the type. This
-			 * enables some existing code of the form:
-			 *
-			 *  Name (DEB, 0)
-			 *  Scope (DEB) { ... }
-			 */
-			ACPI_WARNING((AE_INFO,
-				      "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)",
-				      buffer_ptr,
-				      acpi_ut_get_type_name(node->type)));
-
-			node->type = ACPI_TYPE_ANY;
-			walk_state->scope_info->common.value = ACPI_TYPE_ANY;
-			break;
-
-		default:
-
-			/* All other types are an error */
-
-			ACPI_ERROR((AE_INFO,
-				    "Invalid type (%s) for target of Scope operator [%4.4s]",
-				    acpi_ut_get_type_name(node->type),
-				    buffer_ptr));
-
-			return (AE_AML_OPERAND_TYPE);
-		}
-		break;
-
-	default:
-
-		/* All other opcodes */
-
-		if (op && op->common.node) {
-
-			/* This op/node was previously entered into the namespace */
-
-			node = op->common.node;
-
-			if (acpi_ns_opens_scope(object_type)) {
-				status =
-				    acpi_ds_scope_stack_push(node, object_type,
-							     walk_state);
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-			}
-
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		/*
-		 * Enter the named type into the internal namespace. We enter the name
-		 * as we go downward in the parse tree. Any necessary subobjects that
-		 * involve arguments to the opcode must be created as we go back up the
-		 * parse tree later.
-		 *
-		 * Note: Name may already exist if we are executing a deferred opcode.
-		 */
-		if (walk_state->deferred_node) {
-
-			/* This name is already in the namespace, get the node */
-
-			node = walk_state->deferred_node;
-			status = AE_OK;
-			break;
-		}
-
-		flags = ACPI_NS_NO_UPSEARCH;
-		if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
-
-			/* Execution mode, node cannot already exist, node is temporary */
-
-			flags |= (ACPI_NS_ERROR_IF_FOUND | ACPI_NS_TEMPORARY);
-		}
-
-		/* Add new entry or lookup existing entry */
-
-		status =
-		    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
-				   object_type, ACPI_IMODE_LOAD_PASS2, flags,
-				   walk_state, &node);
-
-		if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "***New Node [%4.4s] %p is temporary\n",
-					  acpi_ut_get_node_name(node), node));
-		}
-		break;
-	}
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_ERROR_NAMESPACE(buffer_ptr, status);
-		return_ACPI_STATUS(status);
-	}
-
-	if (!op) {
-
-		/* Create a new op */
-
-		op = acpi_ps_alloc_op(walk_state->opcode);
-		if (!op) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		/* Initialize the new op */
-
-		if (node) {
-			op->named.name = node->name.integer;
-		}
-		*out_op = op;
-	}
-
-	/*
-	 * Put the Node in the "op" object that the parser uses, so we
-	 * can get it again quickly when this scope is closed
-	 */
-	op->common.node = node;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_load2_end_op
- *
- * PARAMETERS:  walk_state      - Current state of the parse tree walk
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Ascending callback used during the loading of the namespace,
- *              both control methods and everything else.
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
-{
-	union acpi_parse_object *op;
-	acpi_status status = AE_OK;
-	acpi_object_type object_type;
-	struct acpi_namespace_node *node;
-	union acpi_parse_object *arg;
-	struct acpi_namespace_node *new_node;
-#ifndef ACPI_NO_METHOD_EXECUTION
-	u32 i;
-	u8 region_space;
-#endif
-
-	ACPI_FUNCTION_TRACE(ds_load2_end_op);
-
-	op = walk_state->op;
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
-			  walk_state->op_info->name, op, walk_state));
-
-	/* Check if opcode had an associated namespace object */
-
-	if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
-#ifndef ACPI_NO_METHOD_EXECUTION
-#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
-		/* No namespace object. Executable opcode? */
-
-		if ((walk_state->op_info->class == AML_CLASS_EXECUTE) ||
-		    (walk_state->op_info->class == AML_CLASS_CONTROL)) {
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "End/EXEC:   %s (fl %8.8X)\n",
-					  walk_state->op_info->name,
-					  walk_state->op_info->flags));
-
-			/* Executing a type1 or type2 opcode outside of a method */
-
-			status = acpi_ds_exec_end_op(walk_state);
-			return_ACPI_STATUS(status);
-		}
-#endif
-#endif
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	if (op->common.aml_opcode == AML_SCOPE_OP) {
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "Ending scope Op=%p State=%p\n", op,
-				  walk_state));
-	}
-
-	object_type = walk_state->op_info->object_type;
-
-	/*
-	 * Get the Node/name from the earlier lookup
-	 * (It was saved in the *op structure)
-	 */
-	node = op->common.node;
-
-	/*
-	 * Put the Node on the object stack (Contains the ACPI Name of
-	 * this object)
-	 */
-	walk_state->operands[0] = (void *)node;
-	walk_state->num_operands = 1;
-
-	/* Pop the scope stack */
-
-	if (acpi_ns_opens_scope(object_type) &&
-	    (op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "(%s) Popping scope for Op %p\n",
-				  acpi_ut_get_type_name(object_type), op));
-
-		status = acpi_ds_scope_stack_pop(walk_state);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-	}
-
-	/*
-	 * Named operations are as follows:
-	 *
-	 * AML_ALIAS
-	 * AML_BANKFIELD
-	 * AML_CREATEBITFIELD
-	 * AML_CREATEBYTEFIELD
-	 * AML_CREATEDWORDFIELD
-	 * AML_CREATEFIELD
-	 * AML_CREATEQWORDFIELD
-	 * AML_CREATEWORDFIELD
-	 * AML_DATA_REGION
-	 * AML_DEVICE
-	 * AML_EVENT
-	 * AML_FIELD
-	 * AML_INDEXFIELD
-	 * AML_METHOD
-	 * AML_METHODCALL
-	 * AML_MUTEX
-	 * AML_NAME
-	 * AML_NAMEDFIELD
-	 * AML_OPREGION
-	 * AML_POWERRES
-	 * AML_PROCESSOR
-	 * AML_SCOPE
-	 * AML_THERMALZONE
-	 */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
-			  acpi_ps_get_opcode_name(op->common.aml_opcode),
-			  walk_state, op, node));
-
-	/* Decode the opcode */
-
-	arg = op->common.value.arg;
-
-	switch (walk_state->op_info->type) {
-#ifndef ACPI_NO_METHOD_EXECUTION
-
-	case AML_TYPE_CREATE_FIELD:
-		/*
-		 * Create the field object, but the field buffer and index must
-		 * be evaluated later during the execution phase
-		 */
-		status = acpi_ds_create_buffer_field(op, walk_state);
-		break;
-
-	case AML_TYPE_NAMED_FIELD:
-		/*
-		 * If we are executing a method, initialize the field
-		 */
-		if (walk_state->method_node) {
-			status = acpi_ds_init_field_objects(op, walk_state);
-		}
-
-		switch (op->common.aml_opcode) {
-		case AML_INDEX_FIELD_OP:
-
-			status =
-			    acpi_ds_create_index_field(op,
-						       (acpi_handle) arg->
-						       common.node, walk_state);
-			break;
-
-		case AML_BANK_FIELD_OP:
-
-			status =
-			    acpi_ds_create_bank_field(op, arg->common.node,
-						      walk_state);
-			break;
-
-		case AML_FIELD_OP:
-
-			status =
-			    acpi_ds_create_field(op, arg->common.node,
-						 walk_state);
-			break;
-
-		default:
-			/* All NAMED_FIELD opcodes must be handled above */
-			break;
-		}
-		break;
-
-	case AML_TYPE_NAMED_SIMPLE:
-
-		status = acpi_ds_create_operands(walk_state, arg);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-
-		switch (op->common.aml_opcode) {
-		case AML_PROCESSOR_OP:
-
-			status = acpi_ex_create_processor(walk_state);
-			break;
-
-		case AML_POWER_RES_OP:
-
-			status = acpi_ex_create_power_resource(walk_state);
-			break;
-
-		case AML_MUTEX_OP:
-
-			status = acpi_ex_create_mutex(walk_state);
-			break;
-
-		case AML_EVENT_OP:
-
-			status = acpi_ex_create_event(walk_state);
-			break;
-
-		case AML_ALIAS_OP:
-
-			status = acpi_ex_create_alias(walk_state);
-			break;
-
-		default:
-			/* Unknown opcode */
-
-			status = AE_OK;
-			goto cleanup;
-		}
-
-		/* Delete operands */
-
-		for (i = 1; i < walk_state->num_operands; i++) {
-			acpi_ut_remove_reference(walk_state->operands[i]);
-			walk_state->operands[i] = NULL;
-		}
-
-		break;
-#endif				/* ACPI_NO_METHOD_EXECUTION */
-
-	case AML_TYPE_NAMED_COMPLEX:
-
-		switch (op->common.aml_opcode) {
-#ifndef ACPI_NO_METHOD_EXECUTION
-		case AML_REGION_OP:
-		case AML_DATA_REGION_OP:
-
-			if (op->common.aml_opcode == AML_REGION_OP) {
-				region_space = (acpi_adr_space_type)
-				    ((op->common.value.arg)->common.value.
-				     integer);
-			} else {
-				region_space = REGION_DATA_TABLE;
-			}
-
-			/*
-			 * If we are executing a method, initialize the region
-			 */
-			if (walk_state->method_node) {
-				status =
-				    acpi_ex_create_region(op->named.data,
-							  op->named.length,
-							  region_space,
-							  walk_state);
-				if (ACPI_FAILURE(status)) {
-					return (status);
-				}
-			}
-
-			/*
-			 * The op_region is not fully parsed at this time. Only valid
-			 * argument is the space_id. (We must save the address of the
-			 * AML of the address and length operands)
-			 */
-
-			/*
-			 * If we have a valid region, initialize it
-			 * Namespace is NOT locked at this point.
-			 */
-			status =
-			    acpi_ev_initialize_region
-			    (acpi_ns_get_attached_object(node), FALSE);
-			if (ACPI_FAILURE(status)) {
-				/*
-				 *  If AE_NOT_EXIST is returned, it is not fatal
-				 *  because many regions get created before a handler
-				 *  is installed for said region.
-				 */
-				if (AE_NOT_EXIST == status) {
-					status = AE_OK;
-				}
-			}
-			break;
-
-		case AML_NAME_OP:
-
-			status = acpi_ds_create_node(walk_state, node, op);
-			break;
-
-		case AML_METHOD_OP:
-			/*
-			 * method_op pkg_length name_string method_flags term_list
-			 *
-			 * Note: We must create the method node/object pair as soon as we
-			 * see the method declaration. This allows later pass1 parsing
-			 * of invocations of the method (need to know the number of
-			 * arguments.)
-			 */
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
-					  walk_state, op, op->named.node));
-
-			if (!acpi_ns_get_attached_object(op->named.node)) {
-				walk_state->operands[0] =
-				    ACPI_CAST_PTR(void, op->named.node);
-				walk_state->num_operands = 1;
-
-				status =
-				    acpi_ds_create_operands(walk_state,
-							    op->common.value.
-							    arg);
-				if (ACPI_SUCCESS(status)) {
-					status =
-					    acpi_ex_create_method(op->named.
-								  data,
-								  op->named.
-								  length,
-								  walk_state);
-				}
-				walk_state->operands[0] = NULL;
-				walk_state->num_operands = 0;
-
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-			}
-			break;
-
-#endif				/* ACPI_NO_METHOD_EXECUTION */
-
-		default:
-			/* All NAMED_COMPLEX opcodes must be handled above */
-			break;
-		}
-		break;
-
-	case AML_CLASS_INTERNAL:
-
-		/* case AML_INT_NAMEPATH_OP: */
-		break;
-
-	case AML_CLASS_METHOD_CALL:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-				  "RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
-				  walk_state, op, node));
-
-		/*
-		 * Lookup the method name and save the Node
-		 */
-		status =
-		    acpi_ns_lookup(walk_state->scope_info,
-				   arg->common.value.string, ACPI_TYPE_ANY,
-				   ACPI_IMODE_LOAD_PASS2,
-				   ACPI_NS_SEARCH_PARENT |
-				   ACPI_NS_DONT_OPEN_SCOPE, walk_state,
-				   &(new_node));
-		if (ACPI_SUCCESS(status)) {
-			/*
-			 * Make sure that what we found is indeed a method
-			 * We didn't search for a method on purpose, to see if the name
-			 * would resolve
-			 */
-			if (new_node->type != ACPI_TYPE_METHOD) {
-				status = AE_AML_OPERAND_TYPE;
-			}
-
-			/* We could put the returned object (Node) on the object stack for
-			 * later, but for now, we will put it in the "op" object that the
-			 * parser uses, so we can get it again at the end of this scope
-			 */
-			op->common.node = new_node;
-		} else {
-			ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
-		}
-		break;
-
-	default:
-		break;
-	}
-
-      cleanup:
-
-	/* Remove the Node pushed at the very beginning */
-
-	walk_state->operands[0] = NULL;
-	walk_state->num_operands = 0;
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/dispatcher/dswscope.c b/drivers/acpi/dispatcher/dswscope.c
deleted file mode 100644
index 9e60732..0000000
--- a/drivers/acpi/dispatcher/dswscope.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/******************************************************************************
- *
- * Module Name: dswscope - Scope stack manipulation
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acdispat.h>
-
-#define _COMPONENT          ACPI_DISPATCHER
-ACPI_MODULE_NAME("dswscope")
-
-/****************************************************************************
- *
- * FUNCTION:    acpi_ds_scope_stack_clear
- *
- * PARAMETERS:  walk_state      - Current state
- *
- * RETURN:      None
- *
- * DESCRIPTION: Pop (and free) everything on the scope stack except the
- *              root scope object (which remains at the stack top.)
- *
- ***************************************************************************/
-void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state)
-{
-	union acpi_generic_state *scope_info;
-
-	ACPI_FUNCTION_NAME(ds_scope_stack_clear);
-
-	while (walk_state->scope_info) {
-
-		/* Pop a scope off the stack */
-
-		scope_info = walk_state->scope_info;
-		walk_state->scope_info = scope_info->scope.next;
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Popped object type (%s)\n",
-				  acpi_ut_get_type_name(scope_info->common.
-							value)));
-		acpi_ut_delete_generic_state(scope_info);
-	}
-}
-
-/****************************************************************************
- *
- * FUNCTION:    acpi_ds_scope_stack_push
- *
- * PARAMETERS:  Node            - Name to be made current
- *              Type            - Type of frame being pushed
- *              walk_state      - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Push the current scope on the scope stack, and make the
- *              passed Node current.
- *
- ***************************************************************************/
-
-acpi_status
-acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
-			 acpi_object_type type,
-			 struct acpi_walk_state *walk_state)
-{
-	union acpi_generic_state *scope_info;
-	union acpi_generic_state *old_scope_info;
-
-	ACPI_FUNCTION_TRACE(ds_scope_stack_push);
-
-	if (!node) {
-
-		/* Invalid scope   */
-
-		ACPI_ERROR((AE_INFO, "Null scope parameter"));
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Make sure object type is valid */
-
-	if (!acpi_ut_valid_object_type(type)) {
-		ACPI_WARNING((AE_INFO, "Invalid object type: 0x%X", type));
-	}
-
-	/* Allocate a new scope object */
-
-	scope_info = acpi_ut_create_generic_state();
-	if (!scope_info) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Init new scope object */
-
-	scope_info->common.descriptor_type = ACPI_DESC_TYPE_STATE_WSCOPE;
-	scope_info->scope.node = node;
-	scope_info->common.value = (u16) type;
-
-	walk_state->scope_depth++;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-			  "[%.2d] Pushed scope ",
-			  (u32) walk_state->scope_depth));
-
-	old_scope_info = walk_state->scope_info;
-	if (old_scope_info) {
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC,
-				      "[%4.4s] (%s)",
-				      acpi_ut_get_node_name(old_scope_info->
-							    scope.node),
-				      acpi_ut_get_type_name(old_scope_info->
-							    common.value)));
-	} else {
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "[\\___] (%s)", "ROOT"));
-	}
-
-	ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC,
-			      ", New scope -> [%4.4s] (%s)\n",
-			      acpi_ut_get_node_name(scope_info->scope.node),
-			      acpi_ut_get_type_name(scope_info->common.value)));
-
-	/* Push new scope object onto stack */
-
-	acpi_ut_push_generic_state(&walk_state->scope_info, scope_info);
-	return_ACPI_STATUS(AE_OK);
-}
-
-/****************************************************************************
- *
- * FUNCTION:    acpi_ds_scope_stack_pop
- *
- * PARAMETERS:  walk_state      - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Pop the scope stack once.
- *
- ***************************************************************************/
-
-acpi_status acpi_ds_scope_stack_pop(struct acpi_walk_state *walk_state)
-{
-	union acpi_generic_state *scope_info;
-	union acpi_generic_state *new_scope_info;
-
-	ACPI_FUNCTION_TRACE(ds_scope_stack_pop);
-
-	/*
-	 * Pop scope info object off the stack.
-	 */
-	scope_info = acpi_ut_pop_generic_state(&walk_state->scope_info);
-	if (!scope_info) {
-		return_ACPI_STATUS(AE_STACK_UNDERFLOW);
-	}
-
-	walk_state->scope_depth--;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-			  "[%.2d] Popped scope [%4.4s] (%s), New scope -> ",
-			  (u32) walk_state->scope_depth,
-			  acpi_ut_get_node_name(scope_info->scope.node),
-			  acpi_ut_get_type_name(scope_info->common.value)));
-
-	new_scope_info = walk_state->scope_info;
-	if (new_scope_info) {
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC,
-				      "[%4.4s] (%s)\n",
-				      acpi_ut_get_node_name(new_scope_info->
-							    scope.node),
-				      acpi_ut_get_type_name(new_scope_info->
-							    common.value)));
-	} else {
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "[\\___] (ROOT)\n"));
-	}
-
-	acpi_ut_delete_generic_state(scope_info);
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
deleted file mode 100644
index b00d4af..0000000
--- a/drivers/acpi/dispatcher/dswstate.c
+++ /dev/null
@@ -1,752 +0,0 @@
-/******************************************************************************
- *
- * Module Name: dswstate - Dispatcher parse tree walk management routines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/acdispat.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_DISPATCHER
-ACPI_MODULE_NAME("dswstate")
-
-  /* Local prototypes */
-static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *ws);
-static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_result_pop
- *
- * PARAMETERS:  Object              - Where to return the popped object
- *              walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Pop an object off the top of this walk's result stack
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_result_pop(union acpi_operand_object **object,
-		   struct acpi_walk_state *walk_state)
-{
-	u32 index;
-	union acpi_generic_state *state;
-	acpi_status status;
-
-	ACPI_FUNCTION_NAME(ds_result_pop);
-
-	state = walk_state->results;
-
-	/* Incorrect state of result stack */
-
-	if (state && !walk_state->result_count) {
-		ACPI_ERROR((AE_INFO, "No results on result stack"));
-		return (AE_AML_INTERNAL);
-	}
-
-	if (!state && walk_state->result_count) {
-		ACPI_ERROR((AE_INFO, "No result state for result stack"));
-		return (AE_AML_INTERNAL);
-	}
-
-	/* Empty result stack */
-
-	if (!state) {
-		ACPI_ERROR((AE_INFO, "Result stack is empty! State=%p",
-			    walk_state));
-		return (AE_AML_NO_RETURN_VALUE);
-	}
-
-	/* Return object of the top element and clean that top element result stack */
-
-	walk_state->result_count--;
-	index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
-
-	*object = state->results.obj_desc[index];
-	if (!*object) {
-		ACPI_ERROR((AE_INFO,
-			    "No result objects on result stack, State=%p",
-			    walk_state));
-		return (AE_AML_NO_RETURN_VALUE);
-	}
-
-	state->results.obj_desc[index] = NULL;
-	if (index == 0) {
-		status = acpi_ds_result_stack_pop(walk_state);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-			  "Obj=%p [%s] Index=%X State=%p Num=%X\n", *object,
-			  acpi_ut_get_object_type_name(*object),
-			  index, walk_state, walk_state->result_count));
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_result_push
- *
- * PARAMETERS:  Object              - Where to return the popped object
- *              walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Push an object onto the current result stack
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_result_push(union acpi_operand_object * object,
-		    struct acpi_walk_state * walk_state)
-{
-	union acpi_generic_state *state;
-	acpi_status status;
-	u32 index;
-
-	ACPI_FUNCTION_NAME(ds_result_push);
-
-	if (walk_state->result_count > walk_state->result_size) {
-		ACPI_ERROR((AE_INFO, "Result stack is full"));
-		return (AE_AML_INTERNAL);
-	} else if (walk_state->result_count == walk_state->result_size) {
-
-		/* Extend the result stack */
-
-		status = acpi_ds_result_stack_push(walk_state);
-		if (ACPI_FAILURE(status)) {
-			ACPI_ERROR((AE_INFO,
-				    "Failed to extend the result stack"));
-			return (status);
-		}
-	}
-
-	if (!(walk_state->result_count < walk_state->result_size)) {
-		ACPI_ERROR((AE_INFO, "No free elements in result stack"));
-		return (AE_AML_INTERNAL);
-	}
-
-	state = walk_state->results;
-	if (!state) {
-		ACPI_ERROR((AE_INFO, "No result stack frame during push"));
-		return (AE_AML_INTERNAL);
-	}
-
-	if (!object) {
-		ACPI_ERROR((AE_INFO,
-			    "Null Object! Obj=%p State=%p Num=%X",
-			    object, walk_state, walk_state->result_count));
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* Assign the address of object to the top free element of result stack */
-
-	index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
-	state->results.obj_desc[index] = object;
-	walk_state->result_count++;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p Num=%X Cur=%X\n",
-			  object,
-			  acpi_ut_get_object_type_name((union
-							acpi_operand_object *)
-						       object), walk_state,
-			  walk_state->result_count,
-			  walk_state->current_result));
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_result_stack_push
- *
- * PARAMETERS:  walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Push an object onto the walk_state result stack
- *
- ******************************************************************************/
-
-static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state)
-{
-	union acpi_generic_state *state;
-
-	ACPI_FUNCTION_NAME(ds_result_stack_push);
-
-	/* Check for stack overflow */
-
-	if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) >
-	    ACPI_RESULTS_OBJ_NUM_MAX) {
-		ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%X",
-			    walk_state, walk_state->result_size));
-		return (AE_STACK_OVERFLOW);
-	}
-
-	state = acpi_ut_create_generic_state();
-	if (!state) {
-		return (AE_NO_MEMORY);
-	}
-
-	state->common.descriptor_type = ACPI_DESC_TYPE_STATE_RESULT;
-	acpi_ut_push_generic_state(&walk_state->results, state);
-
-	/* Increase the length of the result stack by the length of frame */
-
-	walk_state->result_size += ACPI_RESULTS_FRAME_OBJ_NUM;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Results=%p State=%p\n",
-			  state, walk_state));
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_result_stack_pop
- *
- * PARAMETERS:  walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Pop an object off of the walk_state result stack
- *
- ******************************************************************************/
-
-static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state)
-{
-	union acpi_generic_state *state;
-
-	ACPI_FUNCTION_NAME(ds_result_stack_pop);
-
-	/* Check for stack underflow */
-
-	if (walk_state->results == NULL) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Result stack underflow - State=%p\n",
-				  walk_state));
-		return (AE_AML_NO_OPERAND);
-	}
-
-	if (walk_state->result_size < ACPI_RESULTS_FRAME_OBJ_NUM) {
-		ACPI_ERROR((AE_INFO, "Insufficient result stack size"));
-		return (AE_AML_INTERNAL);
-	}
-
-	state = acpi_ut_pop_generic_state(&walk_state->results);
-	acpi_ut_delete_generic_state(state);
-
-	/* Decrease the length of result stack by the length of frame */
-
-	walk_state->result_size -= ACPI_RESULTS_FRAME_OBJ_NUM;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-			  "Result=%p RemainingResults=%X State=%p\n",
-			  state, walk_state->result_count, walk_state));
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_obj_stack_push
- *
- * PARAMETERS:  Object              - Object to push
- *              walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Push an object onto this walk's object/operand stack
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
-{
-	ACPI_FUNCTION_NAME(ds_obj_stack_push);
-
-	/* Check for stack overflow */
-
-	if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
-		ACPI_ERROR((AE_INFO,
-			    "Object stack overflow! Obj=%p State=%p #Ops=%X",
-			    object, walk_state, walk_state->num_operands));
-		return (AE_STACK_OVERFLOW);
-	}
-
-	/* Put the object onto the stack */
-
-	walk_state->operands[walk_state->operand_index] = object;
-	walk_state->num_operands++;
-
-	/* For the usual order of filling the operand stack */
-
-	walk_state->operand_index++;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p #Ops=%X\n",
-			  object,
-			  acpi_ut_get_object_type_name((union
-							acpi_operand_object *)
-						       object), walk_state,
-			  walk_state->num_operands));
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_obj_stack_pop
- *
- * PARAMETERS:  pop_count           - Number of objects/entries to pop
- *              walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Pop this walk's object stack.  Objects on the stack are NOT
- *              deleted by this routine.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
-{
-	u32 i;
-
-	ACPI_FUNCTION_NAME(ds_obj_stack_pop);
-
-	for (i = 0; i < pop_count; i++) {
-
-		/* Check for stack underflow */
-
-		if (walk_state->num_operands == 0) {
-			ACPI_ERROR((AE_INFO,
-				    "Object stack underflow! Count=%X State=%p #Ops=%X",
-				    pop_count, walk_state,
-				    walk_state->num_operands));
-			return (AE_STACK_UNDERFLOW);
-		}
-
-		/* Just set the stack entry to null */
-
-		walk_state->num_operands--;
-		walk_state->operands[walk_state->num_operands] = NULL;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
-			  pop_count, walk_state, walk_state->num_operands));
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_obj_stack_pop_and_delete
- *
- * PARAMETERS:  pop_count           - Number of objects/entries to pop
- *              walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Pop this walk's object stack and delete each object that is
- *              popped off.
- *
- ******************************************************************************/
-
-void
-acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
-				 struct acpi_walk_state *walk_state)
-{
-	s32 i;
-	union acpi_operand_object *obj_desc;
-
-	ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete);
-
-	if (pop_count == 0) {
-		return;
-	}
-
-	for (i = (s32) pop_count - 1; i >= 0; i--) {
-		if (walk_state->num_operands == 0) {
-			return;
-		}
-
-		/* Pop the stack and delete an object if present in this stack entry */
-
-		walk_state->num_operands--;
-		obj_desc = walk_state->operands[i];
-		if (obj_desc) {
-			acpi_ut_remove_reference(walk_state->operands[i]);
-			walk_state->operands[i] = NULL;
-		}
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
-			  pop_count, walk_state, walk_state->num_operands));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_get_current_walk_state
- *
- * PARAMETERS:  Thread          - Get current active state for this Thread
- *
- * RETURN:      Pointer to the current walk state
- *
- * DESCRIPTION: Get the walk state that is at the head of the list (the "current"
- *              walk state.)
- *
- ******************************************************************************/
-
-struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
-						       *thread)
-{
-	ACPI_FUNCTION_NAME(ds_get_current_walk_state);
-
-	if (!thread) {
-		return (NULL);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Current WalkState %p\n",
-			  thread->walk_state_list));
-
-	return (thread->walk_state_list);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_push_walk_state
- *
- * PARAMETERS:  walk_state      - State to push
- *              Thread          - Thread state object
- *
- * RETURN:      None
- *
- * DESCRIPTION: Place the Thread state at the head of the state list
- *
- ******************************************************************************/
-
-void
-acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
-			struct acpi_thread_state *thread)
-{
-	ACPI_FUNCTION_TRACE(ds_push_walk_state);
-
-	walk_state->next = thread->walk_state_list;
-	thread->walk_state_list = walk_state;
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_pop_walk_state
- *
- * PARAMETERS:  Thread      - Current thread state
- *
- * RETURN:      A walk_state object popped from the thread's stack
- *
- * DESCRIPTION: Remove and return the walkstate object that is at the head of
- *              the walk stack for the given walk list.  NULL indicates that
- *              the list is empty.
- *
- ******************************************************************************/
-
-struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
-{
-	struct acpi_walk_state *walk_state;
-
-	ACPI_FUNCTION_TRACE(ds_pop_walk_state);
-
-	walk_state = thread->walk_state_list;
-
-	if (walk_state) {
-
-		/* Next walk state becomes the current walk state */
-
-		thread->walk_state_list = walk_state->next;
-
-		/*
-		 * Don't clear the NEXT field, this serves as an indicator
-		 * that there is a parent WALK STATE
-		 * Do Not: walk_state->Next = NULL;
-		 */
-	}
-
-	return_PTR(walk_state);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_create_walk_state
- *
- * PARAMETERS:  owner_id        - ID for object creation
- *              Origin          - Starting point for this walk
- *              method_desc     - Method object
- *              Thread          - Current thread state
- *
- * RETURN:      Pointer to the new walk state.
- *
- * DESCRIPTION: Allocate and initialize a new walk state.  The current walk
- *              state is set to this new state.
- *
- ******************************************************************************/
-
-struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
-						  *origin, union acpi_operand_object
-						  *method_desc, struct acpi_thread_state
-						  *thread)
-{
-	struct acpi_walk_state *walk_state;
-
-	ACPI_FUNCTION_TRACE(ds_create_walk_state);
-
-	walk_state = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_walk_state));
-	if (!walk_state) {
-		return_PTR(NULL);
-	}
-
-	walk_state->descriptor_type = ACPI_DESC_TYPE_WALK;
-	walk_state->method_desc = method_desc;
-	walk_state->owner_id = owner_id;
-	walk_state->origin = origin;
-	walk_state->thread = thread;
-
-	walk_state->parser_state.start_op = origin;
-
-	/* Init the method args/local */
-
-#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
-	acpi_ds_method_data_init(walk_state);
-#endif
-
-	/* Put the new state at the head of the walk list */
-
-	if (thread) {
-		acpi_ds_push_walk_state(walk_state, thread);
-	}
-
-	return_PTR(walk_state);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_init_aml_walk
- *
- * PARAMETERS:  walk_state      - New state to be initialized
- *              Op              - Current parse op
- *              method_node     - Control method NS node, if any
- *              aml_start       - Start of AML
- *              aml_length      - Length of AML
- *              Info            - Method info block (params, etc.)
- *              pass_number     - 1, 2, or 3
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initialize a walk state for a pass 1 or 2 parse tree walk
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
-		      union acpi_parse_object *op,
-		      struct acpi_namespace_node *method_node,
-		      u8 * aml_start,
-		      u32 aml_length,
-		      struct acpi_evaluate_info *info, u8 pass_number)
-{
-	acpi_status status;
-	struct acpi_parse_state *parser_state = &walk_state->parser_state;
-	union acpi_parse_object *extra_op;
-
-	ACPI_FUNCTION_TRACE(ds_init_aml_walk);
-
-	walk_state->parser_state.aml =
-	    walk_state->parser_state.aml_start = aml_start;
-	walk_state->parser_state.aml_end =
-	    walk_state->parser_state.pkg_end = aml_start + aml_length;
-
-	/* The next_op of the next_walk will be the beginning of the method */
-
-	walk_state->next_op = NULL;
-	walk_state->pass_number = pass_number;
-
-	if (info) {
-		walk_state->params = info->parameters;
-		walk_state->caller_return_desc = &info->return_object;
-	}
-
-	status = acpi_ps_init_scope(&walk_state->parser_state, op);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	if (method_node) {
-		walk_state->parser_state.start_node = method_node;
-		walk_state->walk_type = ACPI_WALK_METHOD;
-		walk_state->method_node = method_node;
-		walk_state->method_desc =
-		    acpi_ns_get_attached_object(method_node);
-
-		/* Push start scope on scope stack and make it current  */
-
-		status =
-		    acpi_ds_scope_stack_push(method_node, ACPI_TYPE_METHOD,
-					     walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* Init the method arguments */
-
-		status = acpi_ds_method_data_init_args(walk_state->params,
-						       ACPI_METHOD_NUM_ARGS,
-						       walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	} else {
-		/*
-		 * Setup the current scope.
-		 * Find a Named Op that has a namespace node associated with it.
-		 * search upwards from this Op.  Current scope is the first
-		 * Op with a namespace node.
-		 */
-		extra_op = parser_state->start_op;
-		while (extra_op && !extra_op->common.node) {
-			extra_op = extra_op->common.parent;
-		}
-
-		if (!extra_op) {
-			parser_state->start_node = NULL;
-		} else {
-			parser_state->start_node = extra_op->common.node;
-		}
-
-		if (parser_state->start_node) {
-
-			/* Push start scope on scope stack and make it current  */
-
-			status =
-			    acpi_ds_scope_stack_push(parser_state->start_node,
-						     parser_state->start_node->
-						     type, walk_state);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		}
-	}
-
-	status = acpi_ds_init_callbacks(walk_state, pass_number);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_delete_walk_state
- *
- * PARAMETERS:  walk_state      - State to delete
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Delete a walk state including all internal data structures
- *
- ******************************************************************************/
-
-void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
-{
-	union acpi_generic_state *state;
-
-	ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state);
-
-	if (!walk_state) {
-		return;
-	}
-
-	if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) {
-		ACPI_ERROR((AE_INFO, "%p is not a valid walk state",
-			    walk_state));
-		return;
-	}
-
-	/* There should not be any open scopes */
-
-	if (walk_state->parser_state.scope) {
-		ACPI_ERROR((AE_INFO, "%p walk still has a scope list",
-			    walk_state));
-		acpi_ps_cleanup_scope(&walk_state->parser_state);
-	}
-
-	/* Always must free any linked control states */
-
-	while (walk_state->control_state) {
-		state = walk_state->control_state;
-		walk_state->control_state = state->common.next;
-
-		acpi_ut_delete_generic_state(state);
-	}
-
-	/* Always must free any linked parse states */
-
-	while (walk_state->scope_info) {
-		state = walk_state->scope_info;
-		walk_state->scope_info = state->common.next;
-
-		acpi_ut_delete_generic_state(state);
-	}
-
-	/* Always must free any stacked result states */
-
-	while (walk_state->results) {
-		state = walk_state->results;
-		walk_state->results = state->common.next;
-
-		acpi_ut_delete_generic_state(state);
-	}
-
-	ACPI_FREE(walk_state);
-	return_VOID;
-}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 30f3ef2..a2b82c9 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -42,7 +42,6 @@
 #include <asm/io.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
-#include <acpi/actypes.h>
 
 #define ACPI_EC_CLASS			"embedded_controller"
 #define ACPI_EC_DEVICE_NAME		"Embedded Controller"
@@ -121,31 +120,6 @@
 	spinlock_t curr_lock;
 } *boot_ec, *first_ec;
 
-/* 
- * Some Asus system have exchanged ECDT data/command IO addresses.
- */
-static int print_ecdt_error(const struct dmi_system_id *id)
-{
-	printk(KERN_NOTICE PREFIX "%s detected - "
-		"ECDT has exchanged control/data I/O address\n",
-		id->ident);
-	return 0;
-}
-
-static struct dmi_system_id __cpuinitdata ec_dmi_table[] = {
-	{
-	print_ecdt_error, "Asus L4R", {
-	DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
-	DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),
-	DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL},
-	{
-	print_ecdt_error, "Asus M6R", {
-	DMI_MATCH(DMI_BIOS_VERSION, "0207"),
-	DMI_MATCH(DMI_PRODUCT_NAME, "M6R"),
-	DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL},
-	{},
-};
-
 /* --------------------------------------------------------------------------
                              Transaction Management
    -------------------------------------------------------------------------- */
@@ -370,7 +344,7 @@
  * Note: samsung nv5000 doesn't work with ec burst mode.
  * http://bugzilla.kernel.org/show_bug.cgi?id=4980
  */
-int acpi_ec_burst_enable(struct acpi_ec *ec)
+static int acpi_ec_burst_enable(struct acpi_ec *ec)
 {
 	u8 d;
 	struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
@@ -380,7 +354,7 @@
 	return acpi_ec_transaction(ec, &t, 0);
 }
 
-int acpi_ec_burst_disable(struct acpi_ec *ec)
+static int acpi_ec_burst_disable(struct acpi_ec *ec)
 {
 	struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
 				.wdata = NULL, .rdata = NULL,
@@ -756,10 +730,15 @@
 acpi_ec_register_query_methods(acpi_handle handle, u32 level,
 			       void *context, void **return_value)
 {
-	struct acpi_namespace_node *node = handle;
+	char node_name[5];
+	struct acpi_buffer buffer = { sizeof(node_name), node_name };
 	struct acpi_ec *ec = context;
 	int value = 0;
-	if (sscanf(node->name.ascii, "_Q%x", &value) == 1) {
+	acpi_status status;
+
+	status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
+
+	if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1) {
 		acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
 	}
 	return AE_OK;
@@ -978,8 +957,8 @@
 
 int __init acpi_ec_ecdt_probe(void)
 {
-	int ret;
 	acpi_status status;
+	struct acpi_ec *saved_ec = NULL;
 	struct acpi_table_ecdt *ecdt_ptr;
 
 	boot_ec = make_acpi_ec();
@@ -994,42 +973,55 @@
 		pr_info(PREFIX "EC description table is found, configuring boot EC\n");
 		boot_ec->command_addr = ecdt_ptr->control.address;
 		boot_ec->data_addr = ecdt_ptr->data.address;
-		if (dmi_check_system(ec_dmi_table)) {
-			/*
-			 * If the board falls into ec_dmi_table, it means
-			 * that ECDT table gives the incorrect command/status
-			 * & data I/O address. Just fix it.
-			 */
-			boot_ec->data_addr = ecdt_ptr->control.address;
-			boot_ec->command_addr = ecdt_ptr->data.address;
-		}
 		boot_ec->gpe = ecdt_ptr->gpe;
 		boot_ec->handle = ACPI_ROOT_OBJECT;
 		acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
+		/* Don't trust ECDT, which comes from ASUSTek */
+		if (!dmi_name_in_vendors("ASUS"))
+			goto install;
+		saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
+		if (!saved_ec)
+			return -ENOMEM;
+		memcpy(&saved_ec, boot_ec, sizeof(saved_ec));
+	/* fall through */
+	}
+	/* This workaround is needed only on some broken machines,
+	 * which require early EC, but fail to provide ECDT */
+	printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
+	status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
+					boot_ec, NULL);
+	/* Check that acpi_get_devices actually find something */
+	if (ACPI_FAILURE(status) || !boot_ec->handle)
+		goto error;
+	if (saved_ec) {
+		/* try to find good ECDT from ASUSTek */
+		if (saved_ec->command_addr != boot_ec->command_addr ||
+		    saved_ec->data_addr != boot_ec->data_addr ||
+		    saved_ec->gpe != boot_ec->gpe ||
+		    saved_ec->handle != boot_ec->handle)
+			pr_info(PREFIX "ASUSTek keeps feeding us with broken "
+			"ECDT tables, which are very hard to workaround. "
+			"Trying to use DSDT EC info instead. Please send "
+			"output of acpidump to linux-acpi@vger.kernel.org\n");
+		kfree(saved_ec);
+		saved_ec = NULL;
 	} else {
-		/* This workaround is needed only on some broken machines,
-		 * which require early EC, but fail to provide ECDT */
-		acpi_handle x;
-		printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
-		status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
-						boot_ec, NULL);
-		/* Check that acpi_get_devices actually find something */
-		if (ACPI_FAILURE(status) || !boot_ec->handle)
-			goto error;
 		/* We really need to limit this workaround, the only ASUS,
-		 * which needs it, has fake EC._INI method, so use it as flag.
-		 * Keep boot_ec struct as it will be needed soon.
-		 */
-		if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &x)))
+		* which needs it, has fake EC._INI method, so use it as flag.
+		* Keep boot_ec struct as it will be needed soon.
+		*/
+		acpi_handle dummy;
+		if (!dmi_name_in_vendors("ASUS") ||
+		    ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI",
+							&dummy)))
 			return -ENODEV;
 	}
-
-	ret = ec_install_handlers(boot_ec);
-	if (!ret) {
+install:
+	if (!ec_install_handlers(boot_ec)) {
 		first_ec = boot_ec;
 		return 0;
 	}
-      error:
+error:
 	kfree(boot_ec);
 	boot_ec = NULL;
 	return -ENODEV;
diff --git a/drivers/acpi/events/Makefile b/drivers/acpi/events/Makefile
deleted file mode 100644
index d29f2ee..0000000
--- a/drivers/acpi/events/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for all Linux ACPI interpreter subdirectories
-#
-
-obj-y := evevent.o  evregion.o  evsci.o    evxfevnt.o \
-	 evmisc.o   evrgnini.o  evxface.o  evxfregn.o \
-	 evgpe.o    evgpeblk.o
-
-EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
deleted file mode 100644
index c56c5c6..0000000
--- a/drivers/acpi/events/evevent.c
+++ /dev/null
@@ -1,312 +0,0 @@
-/******************************************************************************
- *
- * Module Name: evevent - Fixed Event handling and dispatch
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acevents.h>
-
-#define _COMPONENT          ACPI_EVENTS
-ACPI_MODULE_NAME("evevent")
-
-/* Local prototypes */
-static acpi_status acpi_ev_fixed_event_initialize(void);
-
-static u32 acpi_ev_fixed_event_dispatch(u32 event);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_initialize_events
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initialize global data structures for ACPI events (Fixed, GPE)
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_initialize_events(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_initialize_events);
-
-	/*
-	 * Initialize the Fixed and General Purpose Events. This is done prior to
-	 * enabling SCIs to prevent interrupts from occurring before the handlers are
-	 * installed.
-	 */
-	status = acpi_ev_fixed_event_initialize();
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Unable to initialize fixed events"));
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_ev_gpe_initialize();
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Unable to initialize general purpose events"));
-		return_ACPI_STATUS(status);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_install_fadt_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
- *              (0 and 1). This causes the _PRW methods to be run, so the HW
- *              must be fully initialized at this point, including global lock
- *              support.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_install_fadt_gpes(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_install_fadt_gpes);
-
-	/* Namespace must be locked */
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* FADT GPE Block 0 */
-
-	(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
-					   acpi_gbl_gpe_fadt_blocks[0]);
-
-	/* FADT GPE Block 1 */
-
-	(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
-					   acpi_gbl_gpe_fadt_blocks[1]);
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_install_xrupt_handlers
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Install interrupt handlers for the SCI and Global Lock
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_install_xrupt_handlers(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
-
-	/* Install the SCI handler */
-
-	status = acpi_ev_install_sci_handler();
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Unable to install System Control Interrupt handler"));
-		return_ACPI_STATUS(status);
-	}
-
-	/* Install the handler for the Global Lock */
-
-	status = acpi_ev_init_global_lock_handler();
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Unable to initialize Global Lock handler"));
-		return_ACPI_STATUS(status);
-	}
-
-	acpi_gbl_events_initialized = TRUE;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_fixed_event_initialize
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Install the fixed event handlers and enable the fixed events.
- *
- ******************************************************************************/
-
-static acpi_status acpi_ev_fixed_event_initialize(void)
-{
-	u32 i;
-	acpi_status status;
-
-	/*
-	 * Initialize the structure that keeps track of fixed event handlers
-	 * and enable the fixed events.
-	 */
-	for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
-		acpi_gbl_fixed_event_handlers[i].handler = NULL;
-		acpi_gbl_fixed_event_handlers[i].context = NULL;
-
-		/* Enable the fixed event */
-
-		if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
-			status =
-			    acpi_set_register(acpi_gbl_fixed_event_info[i].
-					      enable_register_id, 0);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-		}
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_fixed_event_detect
- *
- * PARAMETERS:  None
- *
- * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
- *
- * DESCRIPTION: Checks the PM status register for active fixed events
- *
- ******************************************************************************/
-
-u32 acpi_ev_fixed_event_detect(void)
-{
-	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
-	u32 fixed_status;
-	u32 fixed_enable;
-	u32 i;
-
-	ACPI_FUNCTION_NAME(ev_fixed_event_detect);
-
-	/*
-	 * Read the fixed feature status and enable registers, as all the cases
-	 * depend on their values.  Ignore errors here.
-	 */
-	(void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
-	(void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
-			  "Fixed Event Block: Enable %08X Status %08X\n",
-			  fixed_enable, fixed_status));
-
-	/*
-	 * Check for all possible Fixed Events and dispatch those that are active
-	 */
-	for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
-
-		/* Both the status and enable bits must be on for this event */
-
-		if ((fixed_status & acpi_gbl_fixed_event_info[i].
-		     status_bit_mask)
-		    && (fixed_enable & acpi_gbl_fixed_event_info[i].
-			enable_bit_mask)) {
-
-			/* Found an active (signalled) event */
-			acpi_os_fixed_event_count(i);
-			int_status |= acpi_ev_fixed_event_dispatch(i);
-		}
-	}
-
-	return (int_status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_fixed_event_dispatch
- *
- * PARAMETERS:  Event               - Event type
- *
- * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
- *
- * DESCRIPTION: Clears the status bit for the requested event, calls the
- *              handler that previously registered for the event.
- *
- ******************************************************************************/
-
-static u32 acpi_ev_fixed_event_dispatch(u32 event)
-{
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Clear the status bit */
-
-	(void)acpi_set_register(acpi_gbl_fixed_event_info[event].
-				status_register_id, 1);
-
-	/*
-	 * Make sure we've got a handler.  If not, report an error.
-	 * The event is disabled to prevent further interrupts.
-	 */
-	if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
-		(void)acpi_set_register(acpi_gbl_fixed_event_info[event].
-					enable_register_id, 0);
-
-		ACPI_ERROR((AE_INFO,
-			    "No installed handler for fixed event [%08X]",
-			    event));
-
-		return (ACPI_INTERRUPT_NOT_HANDLED);
-	}
-
-	/* Invoke the Fixed Event handler */
-
-	return ((acpi_gbl_fixed_event_handlers[event].
-		 handler) (acpi_gbl_fixed_event_handlers[event].context));
-}
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
deleted file mode 100644
index f45c74f..0000000
--- a/drivers/acpi/events/evgpe.c
+++ /dev/null
@@ -1,725 +0,0 @@
-/******************************************************************************
- *
- * Module Name: evgpe - General Purpose Event handling and dispatch
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acevents.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_EVENTS
-ACPI_MODULE_NAME("evgpe")
-
-/* Local prototypes */
-static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_set_gpe_type
- *
- * PARAMETERS:  gpe_event_info          - GPE to set
- *              Type                    - New type
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run)
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_set_gpe_type);
-
-	/* Validate type and update register enable masks */
-
-	switch (type) {
-	case ACPI_GPE_TYPE_WAKE:
-	case ACPI_GPE_TYPE_RUNTIME:
-	case ACPI_GPE_TYPE_WAKE_RUN:
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Disable the GPE if currently enabled */
-
-	status = acpi_ev_disable_gpe(gpe_event_info);
-
-	/* Type was validated above */
-
-	gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK;	/* Clear type bits */
-	gpe_event_info->flags |= type;	/* Insert type */
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_update_gpe_enable_masks
- *
- * PARAMETERS:  gpe_event_info          - GPE to update
- *              Type                    - What to do: ACPI_GPE_DISABLE or
- *                                        ACPI_GPE_ENABLE
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Updates GPE register enable masks based on the GPE type
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
-				u8 type)
-{
-	struct acpi_gpe_register_info *gpe_register_info;
-	u8 register_bit;
-
-	ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
-
-	gpe_register_info = gpe_event_info->register_info;
-	if (!gpe_register_info) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-	register_bit = (u8)
-	    (1 <<
-	     (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
-
-	/* 1) Disable case.  Simply clear all enable bits */
-
-	if (type == ACPI_GPE_DISABLE) {
-		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
-			       register_bit);
-		ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* 2) Enable case.  Set/Clear the appropriate enable bits */
-
-	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
-	case ACPI_GPE_TYPE_WAKE:
-		ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
-		ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
-		break;
-
-	case ACPI_GPE_TYPE_RUNTIME:
-		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
-			       register_bit);
-		ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
-		break;
-
-	case ACPI_GPE_TYPE_WAKE_RUN:
-		ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
-		ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_enable_gpe
- *
- * PARAMETERS:  gpe_event_info          - GPE to enable
- *              write_to_hardware       - Enable now, or just mark data structs
- *                                        (WAKE GPEs should be deferred)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enable a GPE based on the GPE type
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
-		   u8 write_to_hardware)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_enable_gpe);
-
-	/* Make sure HW enable masks are updated */
-
-	status =
-	    acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Mark wake-enabled or HW enable, or both */
-
-	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
-	case ACPI_GPE_TYPE_WAKE:
-
-		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
-		break;
-
-	case ACPI_GPE_TYPE_WAKE_RUN:
-
-		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
-
-		/*lint -fallthrough */
-
-	case ACPI_GPE_TYPE_RUNTIME:
-
-		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
-
-		if (write_to_hardware) {
-
-			/* Clear the GPE (of stale events), then enable it */
-
-			status = acpi_hw_clear_gpe(gpe_event_info);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-
-			/* Enable the requested runtime GPE */
-
-			status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
-		}
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_disable_gpe
- *
- * PARAMETERS:  gpe_event_info          - GPE to disable
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Disable a GPE based on the GPE type
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_disable_gpe);
-
-	/* Make sure HW enable masks are updated */
-
-	status =
-	    acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Clear the appropriate enabled flags for this GPE */
-
-	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
-	case ACPI_GPE_TYPE_WAKE:
-		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
-		break;
-
-	case ACPI_GPE_TYPE_WAKE_RUN:
-		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
-
-		/* fallthrough */
-
-	case ACPI_GPE_TYPE_RUNTIME:
-
-		/* Disable the requested runtime GPE */
-
-		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
-		break;
-
-	default:
-		break;
-	}
-
-	/*
-	 * Even if we don't know the GPE type, make sure that we always
-	 * disable it. low_disable_gpe will just clear the enable bit for this
-	 * GPE and write it. It will not write out the current GPE enable mask,
-	 * since this may inadvertently enable GPEs too early, if a rogue GPE has
-	 * come in during ACPICA initialization - possibly as a result of AML or
-	 * other code that has enabled the GPE.
-	 */
-	status = acpi_hw_low_disable_gpe(gpe_event_info);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_get_gpe_event_info
- *
- * PARAMETERS:  gpe_device          - Device node.  NULL for GPE0/GPE1
- *              gpe_number          - Raw GPE number
- *
- * RETURN:      A GPE event_info struct. NULL if not a valid GPE
- *
- * DESCRIPTION: Returns the event_info struct associated with this GPE.
- *              Validates the gpe_block and the gpe_number
- *
- *              Should be called only when the GPE lists are semaphore locked
- *              and not subject to change.
- *
- ******************************************************************************/
-
-struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
-						       u32 gpe_number)
-{
-	union acpi_operand_object *obj_desc;
-	struct acpi_gpe_block_info *gpe_block;
-	u32 i;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* A NULL gpe_block means use the FADT-defined GPE block(s) */
-
-	if (!gpe_device) {
-
-		/* Examine GPE Block 0 and 1 (These blocks are permanent) */
-
-		for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
-			gpe_block = acpi_gbl_gpe_fadt_blocks[i];
-			if (gpe_block) {
-				if ((gpe_number >= gpe_block->block_base_number)
-				    && (gpe_number <
-					gpe_block->block_base_number +
-					(gpe_block->register_count * 8))) {
-					return (&gpe_block->
-						event_info[gpe_number -
-							   gpe_block->
-							   block_base_number]);
-				}
-			}
-		}
-
-		/* The gpe_number was not in the range of either FADT GPE block */
-
-		return (NULL);
-	}
-
-	/* A Non-NULL gpe_device means this is a GPE Block Device */
-
-	obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
-					       gpe_device);
-	if (!obj_desc || !obj_desc->device.gpe_block) {
-		return (NULL);
-	}
-
-	gpe_block = obj_desc->device.gpe_block;
-
-	if ((gpe_number >= gpe_block->block_base_number) &&
-	    (gpe_number <
-	     gpe_block->block_base_number + (gpe_block->register_count * 8))) {
-		return (&gpe_block->
-			event_info[gpe_number - gpe_block->block_base_number]);
-	}
-
-	return (NULL);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_gpe_detect
- *
- * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
- *                                    Can have multiple GPE blocks attached.
- *
- * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
- *
- * DESCRIPTION: Detect if any GP events have occurred.  This function is
- *              executed at interrupt level.
- *
- ******************************************************************************/
-
-u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
-{
-	acpi_status status;
-	struct acpi_gpe_block_info *gpe_block;
-	struct acpi_gpe_register_info *gpe_register_info;
-	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
-	u8 enabled_status_byte;
-	u32 status_reg;
-	u32 enable_reg;
-	acpi_cpu_flags flags;
-	u32 i;
-	u32 j;
-
-	ACPI_FUNCTION_NAME(ev_gpe_detect);
-
-	/* Check for the case where there are no GPEs */
-
-	if (!gpe_xrupt_list) {
-		return (int_status);
-	}
-
-	/*
-	 * We need to obtain the GPE lock for both the data structs and registers
-	 * Note: Not necessary to obtain the hardware lock, since the GPE registers
-	 * are owned by the gpe_lock.
-	 */
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Examine all GPE blocks attached to this interrupt level */
-
-	gpe_block = gpe_xrupt_list->gpe_block_list_head;
-	while (gpe_block) {
-		/*
-		 * Read all of the 8-bit GPE status and enable registers
-		 * in this GPE block, saving all of them.
-		 * Find all currently active GP events.
-		 */
-		for (i = 0; i < gpe_block->register_count; i++) {
-
-			/* Get the next status/enable pair */
-
-			gpe_register_info = &gpe_block->register_info[i];
-
-			/* Read the Status Register */
-
-			status =
-			    acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH,
-						   &status_reg,
-						   &gpe_register_info->
-						   status_address);
-			if (ACPI_FAILURE(status)) {
-				goto unlock_and_exit;
-			}
-
-			/* Read the Enable Register */
-
-			status =
-			    acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH,
-						   &enable_reg,
-						   &gpe_register_info->
-						   enable_address);
-			if (ACPI_FAILURE(status)) {
-				goto unlock_and_exit;
-			}
-
-			ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
-					  "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
-					  gpe_register_info->base_gpe_number,
-					  status_reg, enable_reg));
-
-			/* Check if there is anything active at all in this register */
-
-			enabled_status_byte = (u8) (status_reg & enable_reg);
-			if (!enabled_status_byte) {
-
-				/* No active GPEs in this register, move on */
-
-				continue;
-			}
-
-			/* Now look at the individual GPEs in this byte register */
-
-			for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
-
-				/* Examine one GPE bit */
-
-				if (enabled_status_byte & (1 << j)) {
-					/*
-					 * Found an active GPE. Dispatch the event to a handler
-					 * or method.
-					 */
-					int_status |=
-					    acpi_ev_gpe_dispatch(&gpe_block->
-						event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
-				}
-			}
-		}
-
-		gpe_block = gpe_block->next;
-	}
-
-      unlock_and_exit:
-
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return (int_status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_asynch_execute_gpe_method
- *
- * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
- *
- * RETURN:      None
- *
- * DESCRIPTION: Perform the actual execution of a GPE control method. This
- *              function is called from an invocation of acpi_os_execute and
- *              therefore does NOT execute at interrupt level - so that
- *              the control method itself is not executed in the context of
- *              an interrupt handler.
- *
- ******************************************************************************/
-static void acpi_ev_asynch_enable_gpe(void *context);
-
-static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
-{
-	struct acpi_gpe_event_info *gpe_event_info = (void *)context;
-	acpi_status status;
-	struct acpi_gpe_event_info local_gpe_event_info;
-	struct acpi_evaluate_info *info;
-
-	ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_VOID;
-	}
-
-	/* Must revalidate the gpe_number/gpe_block */
-
-	if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
-		status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-		return_VOID;
-	}
-
-	/* Set the GPE flags for return to enabled state */
-
-	(void)acpi_ev_enable_gpe(gpe_event_info, FALSE);
-
-	/*
-	 * Take a snapshot of the GPE info for this level - we copy the
-	 * info to prevent a race condition with remove_handler/remove_block.
-	 */
-	ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
-		    sizeof(struct acpi_gpe_event_info));
-
-	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_VOID;
-	}
-
-	/*
-	 * Must check for control method type dispatch one more
-	 * time to avoid race with ev_gpe_install_handler
-	 */
-	if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
-	    ACPI_GPE_DISPATCH_METHOD) {
-
-		/* Allocate the evaluation information block */
-
-		info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
-		if (!info) {
-			status = AE_NO_MEMORY;
-		} else {
-			/*
-			 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
-			 * control method that corresponds to this GPE
-			 */
-			info->prefix_node =
-			    local_gpe_event_info.dispatch.method_node;
-			info->flags = ACPI_IGNORE_RETURN_VALUE;
-
-			status = acpi_ns_evaluate(info);
-			ACPI_FREE(info);
-		}
-
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"while evaluating GPE method [%4.4s]",
-					acpi_ut_get_node_name
-					(local_gpe_event_info.dispatch.
-					 method_node)));
-		}
-	}
-	/* Defer enabling of GPE until all notify handlers are done */
-	acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
-				gpe_event_info);
-	return_VOID;
-}
-
-static void acpi_ev_asynch_enable_gpe(void *context)
-{
-	struct acpi_gpe_event_info *gpe_event_info = context;
-	acpi_status status;
-	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
-	    ACPI_GPE_LEVEL_TRIGGERED) {
-		/*
-		 * GPE is level-triggered, we clear the GPE status bit after
-		 * handling the event.
-		 */
-		status = acpi_hw_clear_gpe(gpe_event_info);
-		if (ACPI_FAILURE(status)) {
-			return_VOID;
-		}
-	}
-
-	/* Enable this GPE */
-	(void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_gpe_dispatch
- *
- * PARAMETERS:  gpe_event_info  - Info for this GPE
- *              gpe_number      - Number relative to the parent GPE block
- *
- * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
- *
- * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
- *              or method (e.g. _Lxx/_Exx) handler.
- *
- *              This function executes at interrupt level.
- *
- ******************************************************************************/
-
-u32
-acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
-
-	acpi_os_gpe_count(gpe_number);
-
-	/*
-	 * If edge-triggered, clear the GPE status bit now.  Note that
-	 * level-triggered events are cleared after the GPE is serviced.
-	 */
-	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
-	    ACPI_GPE_EDGE_TRIGGERED) {
-		status = acpi_hw_clear_gpe(gpe_event_info);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to clear GPE[%2X]",
-					gpe_number));
-			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-		}
-	}
-
-	/*
-	 * Dispatch the GPE to either an installed handler, or the control method
-	 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
-	 * it and do not attempt to run the method. If there is neither a handler
-	 * nor a method, we disable this GPE to prevent further such pointless
-	 * events from firing.
-	 */
-	switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
-	case ACPI_GPE_DISPATCH_HANDLER:
-
-		/*
-		 * Invoke the installed handler (at interrupt level)
-		 * Ignore return status for now.  TBD: leave GPE disabled on error?
-		 */
-		(void)gpe_event_info->dispatch.handler->address(gpe_event_info->
-								dispatch.
-								handler->
-								context);
-
-		/* It is now safe to clear level-triggered events. */
-
-		if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
-		    ACPI_GPE_LEVEL_TRIGGERED) {
-			status = acpi_hw_clear_gpe(gpe_event_info);
-			if (ACPI_FAILURE(status)) {
-				ACPI_EXCEPTION((AE_INFO, status,
-						"Unable to clear GPE[%2X]",
-						gpe_number));
-				return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-			}
-		}
-		break;
-
-	case ACPI_GPE_DISPATCH_METHOD:
-
-		/*
-		 * Disable the GPE, so it doesn't keep firing before the method has a
-		 * chance to run (it runs asynchronously with interrupts enabled).
-		 */
-		status = acpi_ev_disable_gpe(gpe_event_info);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to disable GPE[%2X]",
-					gpe_number));
-			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-		}
-
-		/*
-		 * Execute the method associated with the GPE
-		 * NOTE: Level-triggered GPEs are cleared after the method completes.
-		 */
-		status = acpi_os_execute(OSL_GPE_HANDLER,
-					 acpi_ev_asynch_execute_gpe_method,
-					 gpe_event_info);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to queue handler for GPE[%2X] - event disabled",
-					gpe_number));
-		}
-		break;
-
-	default:
-
-		/* No handler or method to run! */
-
-		ACPI_ERROR((AE_INFO,
-			    "No handler or method for GPE[%2X], disabling event",
-			    gpe_number));
-
-		/*
-		 * Disable the GPE. The GPE will remain disabled until the ACPI
-		 * Core Subsystem is restarted, or a handler is installed.
-		 */
-		status = acpi_ev_disable_gpe(gpe_event_info);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to disable GPE[%2X]",
-					gpe_number));
-			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-		}
-		break;
-	}
-
-	return_UINT32(ACPI_INTERRUPT_HANDLED);
-}
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
deleted file mode 100644
index 73c058e..0000000
--- a/drivers/acpi/events/evgpeblk.c
+++ /dev/null
@@ -1,1215 +0,0 @@
-/******************************************************************************
- *
- * Module Name: evgpeblk - GPE block creation and initialization.
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acevents.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_EVENTS
-ACPI_MODULE_NAME("evgpeblk")
-
-/* Local prototypes */
-static acpi_status
-acpi_ev_save_method_info(acpi_handle obj_handle,
-			 u32 level, void *obj_desc, void **return_value);
-
-static acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
-			  u32 level, void *info, void **return_value);
-
-static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
-							       interrupt_number);
-
-static acpi_status
-acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
-
-static acpi_status
-acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
-			  u32 interrupt_number);
-
-static acpi_status
-acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_valid_gpe_event
- *
- * PARAMETERS:  gpe_event_info              - Info for this GPE
- *
- * RETURN:      TRUE if the gpe_event is valid
- *
- * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
- *              Should be called only when the GPE lists are semaphore locked
- *              and not subject to change.
- *
- ******************************************************************************/
-
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
-{
-	struct acpi_gpe_xrupt_info *gpe_xrupt_block;
-	struct acpi_gpe_block_info *gpe_block;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* No need for spin lock since we are not changing any list elements */
-
-	/* Walk the GPE interrupt levels */
-
-	gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
-	while (gpe_xrupt_block) {
-		gpe_block = gpe_xrupt_block->gpe_block_list_head;
-
-		/* Walk the GPE blocks on this interrupt level */
-
-		while (gpe_block) {
-			if ((&gpe_block->event_info[0] <= gpe_event_info) &&
-			    (&gpe_block->
-			     event_info[((acpi_size) gpe_block->
-					 register_count) * 8] >
-			     gpe_event_info)) {
-				return (TRUE);
-			}
-
-			gpe_block = gpe_block->next;
-		}
-
-		gpe_xrupt_block = gpe_xrupt_block->next;
-	}
-
-	return (FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_walk_gpe_list
- *
- * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Walk the GPE lists.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback)
-{
-	struct acpi_gpe_block_info *gpe_block;
-	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
-	acpi_status status = AE_OK;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Walk the interrupt level descriptor list */
-
-	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
-	while (gpe_xrupt_info) {
-
-		/* Walk all Gpe Blocks attached to this interrupt level */
-
-		gpe_block = gpe_xrupt_info->gpe_block_list_head;
-		while (gpe_block) {
-
-			/* One callback per GPE block */
-
-			status = gpe_walk_callback(gpe_xrupt_info, gpe_block);
-			if (ACPI_FAILURE(status)) {
-				goto unlock_and_exit;
-			}
-
-			gpe_block = gpe_block->next;
-		}
-
-		gpe_xrupt_info = gpe_xrupt_info->next;
-	}
-
-      unlock_and_exit:
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_delete_gpe_handlers
- *
- * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
- *              gpe_block           - Gpe Block info
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
- *              Used only prior to termination.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-			    struct acpi_gpe_block_info *gpe_block)
-{
-	struct acpi_gpe_event_info *gpe_event_info;
-	u32 i;
-	u32 j;
-
-	ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
-
-	/* Examine each GPE Register within the block */
-
-	for (i = 0; i < gpe_block->register_count; i++) {
-
-		/* Now look at the individual GPEs in this byte register */
-
-		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
-			gpe_event_info =
-			    &gpe_block->
-			    event_info[((acpi_size) i *
-					ACPI_GPE_REGISTER_WIDTH) + j];
-
-			if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-			    ACPI_GPE_DISPATCH_HANDLER) {
-				ACPI_FREE(gpe_event_info->dispatch.handler);
-				gpe_event_info->dispatch.handler = NULL;
-				gpe_event_info->flags &=
-				    ~ACPI_GPE_DISPATCH_MASK;
-			}
-		}
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_save_method_info
- *
- * PARAMETERS:  Callback from walk_namespace
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
- *              control method under the _GPE portion of the namespace.
- *              Extract the name and GPE type from the object, saving this
- *              information for quick lookup during GPE dispatch
- *
- *              The name of each GPE control method is of the form:
- *              "_Lxx" or "_Exx"
- *              Where:
- *                  L      - means that the GPE is level triggered
- *                  E      - means that the GPE is edge triggered
- *                  xx     - is the GPE number [in HEX]
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_save_method_info(acpi_handle obj_handle,
-			 u32 level, void *obj_desc, void **return_value)
-{
-	struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
-	struct acpi_gpe_event_info *gpe_event_info;
-	u32 gpe_number;
-	char name[ACPI_NAME_SIZE + 1];
-	u8 type;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_save_method_info);
-
-	/*
-	 * _Lxx and _Exx GPE method support
-	 *
-	 * 1) Extract the name from the object and convert to a string
-	 */
-	ACPI_MOVE_32_TO_32(name,
-			   &((struct acpi_namespace_node *)obj_handle)->name.
-			   integer);
-	name[ACPI_NAME_SIZE] = 0;
-
-	/*
-	 * 2) Edge/Level determination is based on the 2nd character
-	 *    of the method name
-	 *
-	 * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
-	 * if a _PRW object is found that points to this GPE.
-	 */
-	switch (name[1]) {
-	case 'L':
-		type = ACPI_GPE_LEVEL_TRIGGERED;
-		break;
-
-	case 'E':
-		type = ACPI_GPE_EDGE_TRIGGERED;
-		break;
-
-	default:
-		/* Unknown method type, just ignore it! */
-
-		ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
-				  "Ignoring unknown GPE method type: %s (name not of form _Lxx or _Exx)",
-				  name));
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Convert the last two characters of the name to the GPE Number */
-
-	gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
-	if (gpe_number == ACPI_UINT32_MAX) {
-
-		/* Conversion failed; invalid method, just ignore it */
-
-		ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
-				  "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
-				  name));
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Ensure that we have a valid GPE number for this GPE block */
-
-	if ((gpe_number < gpe_block->block_base_number) ||
-	    (gpe_number >=
-	     (gpe_block->block_base_number +
-	      (gpe_block->register_count * 8)))) {
-		/*
-		 * Not valid for this GPE block, just ignore it
-		 * However, it may be valid for a different GPE block, since GPE0 and GPE1
-		 * methods both appear under \_GPE.
-		 */
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/*
-	 * Now we can add this information to the gpe_event_info block
-	 * for use during dispatch of this GPE. Default type is RUNTIME, although
-	 * this may change when the _PRW methods are executed later.
-	 */
-	gpe_event_info =
-	    &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
-
-	gpe_event_info->flags = (u8)
-	    (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
-
-	gpe_event_info->dispatch.method_node =
-	    (struct acpi_namespace_node *)obj_handle;
-
-	/* Update enable mask, but don't enable the HW GPE as of yet */
-
-	status = acpi_ev_enable_gpe(gpe_event_info, FALSE);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
-			  "Registered GPE method %s as GPE number 0x%.2X\n",
-			  name, gpe_number));
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_match_prw_and_gpe
- *
- * PARAMETERS:  Callback from walk_namespace
- *
- * RETURN:      Status. NOTE: We ignore errors so that the _PRW walk is
- *              not aborted on a single _PRW failure.
- *
- * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
- *              Device. Run the _PRW method. If present, extract the GPE
- *              number and mark the GPE as a WAKE GPE.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
-			  u32 level, void *info, void **return_value)
-{
-	struct acpi_gpe_walk_info *gpe_info = (void *)info;
-	struct acpi_namespace_node *gpe_device;
-	struct acpi_gpe_block_info *gpe_block;
-	struct acpi_namespace_node *target_gpe_device;
-	struct acpi_gpe_event_info *gpe_event_info;
-	union acpi_operand_object *pkg_desc;
-	union acpi_operand_object *obj_desc;
-	u32 gpe_number;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
-
-	/* Check for a _PRW method under this device */
-
-	status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
-					 ACPI_BTYPE_PACKAGE, &pkg_desc);
-	if (ACPI_FAILURE(status)) {
-
-		/* Ignore all errors from _PRW, we don't want to abort the subsystem */
-
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* The returned _PRW package must have at least two elements */
-
-	if (pkg_desc->package.count < 2) {
-		goto cleanup;
-	}
-
-	/* Extract pointers from the input context */
-
-	gpe_device = gpe_info->gpe_device;
-	gpe_block = gpe_info->gpe_block;
-
-	/*
-	 * The _PRW object must return a package, we are only interested
-	 * in the first element
-	 */
-	obj_desc = pkg_desc->package.elements[0];
-
-	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
-
-		/* Use FADT-defined GPE device (from definition of _PRW) */
-
-		target_gpe_device = acpi_gbl_fadt_gpe_device;
-
-		/* Integer is the GPE number in the FADT described GPE blocks */
-
-		gpe_number = (u32) obj_desc->integer.value;
-	} else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
-
-		/* Package contains a GPE reference and GPE number within a GPE block */
-
-		if ((obj_desc->package.count < 2) ||
-		    (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[0]) !=
-		     ACPI_TYPE_LOCAL_REFERENCE)
-		    || (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[1]) !=
-			ACPI_TYPE_INTEGER)) {
-			goto cleanup;
-		}
-
-		/* Get GPE block reference and decode */
-
-		target_gpe_device =
-		    obj_desc->package.elements[0]->reference.node;
-		gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
-	} else {
-		/* Unknown type, just ignore it */
-
-		goto cleanup;
-	}
-
-	/*
-	 * Is this GPE within this block?
-	 *
-	 * TRUE iff these conditions are true:
-	 *     1) The GPE devices match.
-	 *     2) The GPE index(number) is within the range of the Gpe Block
-	 *          associated with the GPE device.
-	 */
-	if ((gpe_device == target_gpe_device) &&
-	    (gpe_number >= gpe_block->block_base_number) &&
-	    (gpe_number <
-	     gpe_block->block_base_number + (gpe_block->register_count * 8))) {
-		gpe_event_info =
-		    &gpe_block->event_info[gpe_number -
-					   gpe_block->block_base_number];
-
-		/* Mark GPE for WAKE-ONLY but WAKE_DISABLED */
-
-		gpe_event_info->flags &=
-		    ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
-
-		status =
-		    acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-		status =
-		    acpi_ev_update_gpe_enable_masks(gpe_event_info,
-						    ACPI_GPE_DISABLE);
-	}
-
-      cleanup:
-	acpi_ut_remove_reference(pkg_desc);
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_get_gpe_xrupt_block
- *
- * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
- *
- * RETURN:      A GPE interrupt block
- *
- * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
- *              block per unique interrupt level used for GPEs.
- *              Should be called only when the GPE lists are semaphore locked
- *              and not subject to change.
- *
- ******************************************************************************/
-
-static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
-							       interrupt_number)
-{
-	struct acpi_gpe_xrupt_info *next_gpe_xrupt;
-	struct acpi_gpe_xrupt_info *gpe_xrupt;
-	acpi_status status;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
-
-	/* No need for lock since we are not changing any list elements here */
-
-	next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
-	while (next_gpe_xrupt) {
-		if (next_gpe_xrupt->interrupt_number == interrupt_number) {
-			return_PTR(next_gpe_xrupt);
-		}
-
-		next_gpe_xrupt = next_gpe_xrupt->next;
-	}
-
-	/* Not found, must allocate a new xrupt descriptor */
-
-	gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
-	if (!gpe_xrupt) {
-		return_PTR(NULL);
-	}
-
-	gpe_xrupt->interrupt_number = interrupt_number;
-
-	/* Install new interrupt descriptor with spin lock */
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-	if (acpi_gbl_gpe_xrupt_list_head) {
-		next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
-		while (next_gpe_xrupt->next) {
-			next_gpe_xrupt = next_gpe_xrupt->next;
-		}
-
-		next_gpe_xrupt->next = gpe_xrupt;
-		gpe_xrupt->previous = next_gpe_xrupt;
-	} else {
-		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
-	}
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
-	/* Install new interrupt handler if not SCI_INT */
-
-	if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
-		status = acpi_os_install_interrupt_handler(interrupt_number,
-							   acpi_ev_gpe_xrupt_handler,
-							   gpe_xrupt);
-		if (ACPI_FAILURE(status)) {
-			ACPI_ERROR((AE_INFO,
-				    "Could not install GPE interrupt handler at level 0x%X",
-				    interrupt_number));
-			return_PTR(NULL);
-		}
-	}
-
-	return_PTR(gpe_xrupt);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_delete_gpe_xrupt
- *
- * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
- *              interrupt handler if not the SCI interrupt.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
-{
-	acpi_status status;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
-
-	/* We never want to remove the SCI interrupt handler */
-
-	if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
-		gpe_xrupt->gpe_block_list_head = NULL;
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Disable this interrupt */
-
-	status =
-	    acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
-					     acpi_ev_gpe_xrupt_handler);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Unlink the interrupt block with lock */
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-	if (gpe_xrupt->previous) {
-		gpe_xrupt->previous->next = gpe_xrupt->next;
-	} else {
-		/* No previous, update list head */
-
-		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
-	}
-
-	if (gpe_xrupt->next) {
-		gpe_xrupt->next->previous = gpe_xrupt->previous;
-	}
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
-	/* Free the block */
-
-	ACPI_FREE(gpe_xrupt);
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_install_gpe_block
- *
- * PARAMETERS:  gpe_block       - New GPE block
- *              interrupt_number - Xrupt to be associated with this GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Install new GPE block with mutex support
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
-			  u32 interrupt_number)
-{
-	struct acpi_gpe_block_info *next_gpe_block;
-	struct acpi_gpe_xrupt_info *gpe_xrupt_block;
-	acpi_status status;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(ev_install_gpe_block);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
-	if (!gpe_xrupt_block) {
-		status = AE_NO_MEMORY;
-		goto unlock_and_exit;
-	}
-
-	/* Install the new block at the end of the list with lock */
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-	if (gpe_xrupt_block->gpe_block_list_head) {
-		next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
-		while (next_gpe_block->next) {
-			next_gpe_block = next_gpe_block->next;
-		}
-
-		next_gpe_block->next = gpe_block;
-		gpe_block->previous = next_gpe_block;
-	} else {
-		gpe_xrupt_block->gpe_block_list_head = gpe_block;
-	}
-
-	gpe_block->xrupt_block = gpe_xrupt_block;
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
-      unlock_and_exit:
-	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_delete_gpe_block
- *
- * PARAMETERS:  gpe_block       - Existing GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove a GPE block
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
-{
-	acpi_status status;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(ev_install_gpe_block);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Disable all GPEs in this block */
-
-	status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block);
-
-	if (!gpe_block->previous && !gpe_block->next) {
-
-		/* This is the last gpe_block on this interrupt */
-
-		status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
-		if (ACPI_FAILURE(status)) {
-			goto unlock_and_exit;
-		}
-	} else {
-		/* Remove the block on this interrupt with lock */
-
-		flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-		if (gpe_block->previous) {
-			gpe_block->previous->next = gpe_block->next;
-		} else {
-			gpe_block->xrupt_block->gpe_block_list_head =
-			    gpe_block->next;
-		}
-
-		if (gpe_block->next) {
-			gpe_block->next->previous = gpe_block->previous;
-		}
-		acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	}
-
-	/* Free the gpe_block */
-
-	ACPI_FREE(gpe_block->register_info);
-	ACPI_FREE(gpe_block->event_info);
-	ACPI_FREE(gpe_block);
-
-      unlock_and_exit:
-	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_create_gpe_info_blocks
- *
- * PARAMETERS:  gpe_block   - New GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
-{
-	struct acpi_gpe_register_info *gpe_register_info = NULL;
-	struct acpi_gpe_event_info *gpe_event_info = NULL;
-	struct acpi_gpe_event_info *this_event;
-	struct acpi_gpe_register_info *this_register;
-	u32 i;
-	u32 j;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
-
-	/* Allocate the GPE register information block */
-
-	gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
-						 register_count *
-						 sizeof(struct
-							acpi_gpe_register_info));
-	if (!gpe_register_info) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not allocate the GpeRegisterInfo table"));
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/*
-	 * Allocate the GPE event_info block. There are eight distinct GPEs
-	 * per register. Initialization to zeros is sufficient.
-	 */
-	gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
-					       register_count *
-					       ACPI_GPE_REGISTER_WIDTH) *
-					      sizeof(struct
-						     acpi_gpe_event_info));
-	if (!gpe_event_info) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not allocate the GpeEventInfo table"));
-		status = AE_NO_MEMORY;
-		goto error_exit;
-	}
-
-	/* Save the new Info arrays in the GPE block */
-
-	gpe_block->register_info = gpe_register_info;
-	gpe_block->event_info = gpe_event_info;
-
-	/*
-	 * Initialize the GPE Register and Event structures. A goal of these
-	 * tables is to hide the fact that there are two separate GPE register sets
-	 * in a given GPE hardware block, the status registers occupy the first half,
-	 * and the enable registers occupy the second half.
-	 */
-	this_register = gpe_register_info;
-	this_event = gpe_event_info;
-
-	for (i = 0; i < gpe_block->register_count; i++) {
-
-		/* Init the register_info for this GPE register (8 GPEs) */
-
-		this_register->base_gpe_number =
-		    (u8) (gpe_block->block_base_number +
-			  (i * ACPI_GPE_REGISTER_WIDTH));
-
-		this_register->status_address.address =
-		    gpe_block->block_address.address + i;
-
-		this_register->enable_address.address =
-		    gpe_block->block_address.address + i +
-		    gpe_block->register_count;
-
-		this_register->status_address.space_id =
-		    gpe_block->block_address.space_id;
-		this_register->enable_address.space_id =
-		    gpe_block->block_address.space_id;
-		this_register->status_address.bit_width =
-		    ACPI_GPE_REGISTER_WIDTH;
-		this_register->enable_address.bit_width =
-		    ACPI_GPE_REGISTER_WIDTH;
-		this_register->status_address.bit_offset =
-		    ACPI_GPE_REGISTER_WIDTH;
-		this_register->enable_address.bit_offset =
-		    ACPI_GPE_REGISTER_WIDTH;
-
-		/* Init the event_info for each GPE within this register */
-
-		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
-			this_event->gpe_number =
-			    (u8) (this_register->base_gpe_number + j);
-			this_event->register_info = this_register;
-			this_event++;
-		}
-
-		/* Disable all GPEs within this register */
-
-		status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0x00,
-						 &this_register->
-						 enable_address);
-		if (ACPI_FAILURE(status)) {
-			goto error_exit;
-		}
-
-		/* Clear any pending GPE events within this register */
-
-		status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0xFF,
-						 &this_register->
-						 status_address);
-		if (ACPI_FAILURE(status)) {
-			goto error_exit;
-		}
-
-		this_register++;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-
-      error_exit:
-	if (gpe_register_info) {
-		ACPI_FREE(gpe_register_info);
-	}
-	if (gpe_event_info) {
-		ACPI_FREE(gpe_event_info);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_create_gpe_block
- *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE block
- *              gpe_block_address   - Address and space_iD
- *              register_count      - Number of GPE register pairs in the block
- *              gpe_block_base_number - Starting GPE number for the block
- *              interrupt_number    - H/W interrupt for the block
- *              return_gpe_block    - Where the new block descriptor is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
- *              the block are disabled at exit.
- *              Note: Assumes namespace is locked.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
-			 struct acpi_generic_address *gpe_block_address,
-			 u32 register_count,
-			 u8 gpe_block_base_number,
-			 u32 interrupt_number,
-			 struct acpi_gpe_block_info **return_gpe_block)
-{
-	acpi_status status;
-	struct acpi_gpe_block_info *gpe_block;
-
-	ACPI_FUNCTION_TRACE(ev_create_gpe_block);
-
-	if (!register_count) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Allocate a new GPE block */
-
-	gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
-	if (!gpe_block) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Initialize the new GPE block */
-
-	gpe_block->node = gpe_device;
-	gpe_block->register_count = register_count;
-	gpe_block->block_base_number = gpe_block_base_number;
-
-	ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
-		    sizeof(struct acpi_generic_address));
-
-	/*
-	 * Create the register_info and event_info sub-structures
-	 * Note: disables and clears all GPEs in the block
-	 */
-	status = acpi_ev_create_gpe_info_blocks(gpe_block);
-	if (ACPI_FAILURE(status)) {
-		ACPI_FREE(gpe_block);
-		return_ACPI_STATUS(status);
-	}
-
-	/* Install the new block in the global lists */
-
-	status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
-	if (ACPI_FAILURE(status)) {
-		ACPI_FREE(gpe_block);
-		return_ACPI_STATUS(status);
-	}
-
-	/* Find all GPE methods (_Lxx, _Exx) for this block */
-
-	status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
-					ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
-					acpi_ev_save_method_info, gpe_block,
-					NULL);
-
-	/* Return the new block */
-
-	if (return_gpe_block) {
-		(*return_gpe_block) = gpe_block;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
-			  "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
-			  (u32) gpe_block->block_base_number,
-			  (u32) (gpe_block->block_base_number +
-				 ((gpe_block->register_count *
-				   ACPI_GPE_REGISTER_WIDTH) - 1)),
-			  gpe_device->name.ascii, gpe_block->register_count,
-			  interrupt_number));
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_initialize_gpe_block
- *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE block
- *              gpe_block           - Gpe Block info
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initialize and enable a GPE block. First find and run any
- *              _PRT methods associated with the block, then enable the
- *              appropriate GPEs.
- *              Note: Assumes namespace is locked.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
-			     struct acpi_gpe_block_info *gpe_block)
-{
-	acpi_status status;
-	struct acpi_gpe_event_info *gpe_event_info;
-	struct acpi_gpe_walk_info gpe_info;
-	u32 wake_gpe_count;
-	u32 gpe_enabled_count;
-	u32 i;
-	u32 j;
-
-	ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
-
-	/* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
-
-	if (!gpe_block) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/*
-	 * Runtime option: Should wake GPEs be enabled at runtime?  The default
-	 * is no, they should only be enabled just as the machine goes to sleep.
-	 */
-	if (acpi_gbl_leave_wake_gpes_disabled) {
-		/*
-		 * Differentiate runtime vs wake GPEs, via the _PRW control methods.
-		 * Each GPE that has one or more _PRWs that reference it is by
-		 * definition a wake GPE and will not be enabled while the machine
-		 * is running.
-		 */
-		gpe_info.gpe_block = gpe_block;
-		gpe_info.gpe_device = gpe_device;
-
-		status =
-		    acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
-					   ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
-					   acpi_ev_match_prw_and_gpe, &gpe_info,
-					   NULL);
-	}
-
-	/*
-	 * Enable all GPEs in this block that have these attributes:
-	 * 1) are "runtime" or "run/wake" GPEs, and
-	 * 2) have a corresponding _Lxx or _Exx method
-	 *
-	 * Any other GPEs within this block must be enabled via the acpi_enable_gpe()
-	 * external interface.
-	 */
-	wake_gpe_count = 0;
-	gpe_enabled_count = 0;
-
-	for (i = 0; i < gpe_block->register_count; i++) {
-		for (j = 0; j < 8; j++) {
-
-			/* Get the info block for this particular GPE */
-
-			gpe_event_info =
-			    &gpe_block->
-			    event_info[((acpi_size) i *
-					ACPI_GPE_REGISTER_WIDTH) + j];
-
-			if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-			     ACPI_GPE_DISPATCH_METHOD)
-			    && (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
-				gpe_enabled_count++;
-			}
-
-			if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) {
-				wake_gpe_count++;
-			}
-		}
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
-			  "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
-			  wake_gpe_count, gpe_enabled_count));
-
-	/* Enable all valid runtime GPEs found above */
-
-	status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
-	if (ACPI_FAILURE(status)) {
-		ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
-			    gpe_block));
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_gpe_initialize
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initialize the GPE data structures
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_gpe_initialize(void)
-{
-	u32 register_count0 = 0;
-	u32 register_count1 = 0;
-	u32 gpe_number_max = 0;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_gpe_initialize);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Initialize the GPE Block(s) defined in the FADT
-	 *
-	 * Why the GPE register block lengths are divided by 2:  From the ACPI Spec,
-	 * section "General-Purpose Event Registers", we have:
-	 *
-	 * "Each register block contains two registers of equal length
-	 *  GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
-	 *  GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
-	 *  The length of the GPE1_STS and GPE1_EN registers is equal to
-	 *  half the GPE1_LEN. If a generic register block is not supported
-	 *  then its respective block pointer and block length values in the
-	 *  FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
-	 *  to be the same size."
-	 */
-
-	/*
-	 * Determine the maximum GPE number for this machine.
-	 *
-	 * Note: both GPE0 and GPE1 are optional, and either can exist without
-	 * the other.
-	 *
-	 * If EITHER the register length OR the block address are zero, then that
-	 * particular block is not supported.
-	 */
-	if (acpi_gbl_FADT.gpe0_block_length &&
-	    acpi_gbl_FADT.xgpe0_block.address) {
-
-		/* GPE block 0 exists (has both length and address > 0) */
-
-		register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
-
-		gpe_number_max =
-		    (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
-
-		/* Install GPE Block 0 */
-
-		status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
-						  &acpi_gbl_FADT.xgpe0_block,
-						  register_count0, 0,
-						  acpi_gbl_FADT.sci_interrupt,
-						  &acpi_gbl_gpe_fadt_blocks[0]);
-
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Could not create GPE Block 0"));
-		}
-	}
-
-	if (acpi_gbl_FADT.gpe1_block_length &&
-	    acpi_gbl_FADT.xgpe1_block.address) {
-
-		/* GPE block 1 exists (has both length and address > 0) */
-
-		register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
-
-		/* Check for GPE0/GPE1 overlap (if both banks exist) */
-
-		if ((register_count0) &&
-		    (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
-			ACPI_ERROR((AE_INFO,
-				    "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
-				    gpe_number_max, acpi_gbl_FADT.gpe1_base,
-				    acpi_gbl_FADT.gpe1_base +
-				    ((register_count1 *
-				      ACPI_GPE_REGISTER_WIDTH) - 1)));
-
-			/* Ignore GPE1 block by setting the register count to zero */
-
-			register_count1 = 0;
-		} else {
-			/* Install GPE Block 1 */
-
-			status =
-			    acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
-						     &acpi_gbl_FADT.xgpe1_block,
-						     register_count1,
-						     acpi_gbl_FADT.gpe1_base,
-						     acpi_gbl_FADT.
-						     sci_interrupt,
-						     &acpi_gbl_gpe_fadt_blocks
-						     [1]);
-
-			if (ACPI_FAILURE(status)) {
-				ACPI_EXCEPTION((AE_INFO, status,
-						"Could not create GPE Block 1"));
-			}
-
-			/*
-			 * GPE0 and GPE1 do not have to be contiguous in the GPE number
-			 * space. However, GPE0 always starts at GPE number zero.
-			 */
-			gpe_number_max = acpi_gbl_FADT.gpe1_base +
-			    ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
-		}
-	}
-
-	/* Exit if there are no GPE registers */
-
-	if ((register_count0 + register_count1) == 0) {
-
-		/* GPEs are not required by ACPI, this is OK */
-
-		ACPI_DEBUG_PRINT((ACPI_DB_INIT,
-				  "There are no GPE blocks defined in the FADT\n"));
-		status = AE_OK;
-		goto cleanup;
-	}
-
-	/* Check for Max GPE number out-of-range */
-
-	if (gpe_number_max > ACPI_GPE_MAX) {
-		ACPI_ERROR((AE_INFO,
-			    "Maximum GPE number from FADT is too large: 0x%X",
-			    gpe_number_max));
-		status = AE_BAD_VALUE;
-		goto cleanup;
-	}
-
-      cleanup:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
deleted file mode 100644
index 1d5670b..0000000
--- a/drivers/acpi/events/evmisc.c
+++ /dev/null
@@ -1,631 +0,0 @@
-/******************************************************************************
- *
- * Module Name: evmisc - Miscellaneous event manager support functions
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acevents.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_EVENTS
-ACPI_MODULE_NAME("evmisc")
-
-/* Pointer to FACS needed for the Global Lock */
-static struct acpi_table_facs *facs = NULL;
-
-/* Local prototypes */
-
-static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
-
-static u32 acpi_ev_global_lock_handler(void *context);
-
-static acpi_status acpi_ev_remove_global_lock_handler(void);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_is_notify_object
- *
- * PARAMETERS:  Node            - Node to check
- *
- * RETURN:      TRUE if notifies allowed on this object
- *
- * DESCRIPTION: Check type of node for a object that supports notifies.
- *
- *              TBD: This could be replaced by a flag bit in the node.
- *
- ******************************************************************************/
-
-u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
-{
-	switch (node->type) {
-	case ACPI_TYPE_DEVICE:
-	case ACPI_TYPE_PROCESSOR:
-	case ACPI_TYPE_THERMAL:
-		/*
-		 * These are the ONLY objects that can receive ACPI notifications
-		 */
-		return (TRUE);
-
-	default:
-		return (FALSE);
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_queue_notify_request
- *
- * PARAMETERS:  Node            - NS node for the notified object
- *              notify_value    - Value from the Notify() request
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Dispatch a device notification event to a previously
- *              installed handler.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
-			     u32 notify_value)
-{
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *handler_obj = NULL;
-	union acpi_generic_state *notify_info;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_NAME(ev_queue_notify_request);
-
-	/*
-	 * For value 3 (Ejection Request), some device method may need to be run.
-	 * For value 2 (Device Wake) if _PRW exists, the _PS0 method may need
-	 *   to be run.
-	 * For value 0x80 (Status Change) on the power button or sleep button,
-	 *   initiate soft-off or sleep operation?
-	 */
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n",
-			  acpi_ut_get_node_name(node), node, notify_value,
-			  acpi_ut_get_notify_name(notify_value)));
-
-	/* Get the notify object attached to the NS Node */
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (obj_desc) {
-
-		/* We have the notify object, Get the right handler */
-
-		switch (node->type) {
-
-			/* Notify allowed only on these types */
-
-		case ACPI_TYPE_DEVICE:
-		case ACPI_TYPE_THERMAL:
-		case ACPI_TYPE_PROCESSOR:
-
-			if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
-				handler_obj =
-				    obj_desc->common_notify.system_notify;
-			} else {
-				handler_obj =
-				    obj_desc->common_notify.device_notify;
-			}
-			break;
-
-		default:
-			/* All other types are not supported */
-			return (AE_TYPE);
-		}
-	}
-
-	/*
-	 * If there is any handler to run, schedule the dispatcher.
-	 * Check for:
-	 * 1) Global system notify handler
-	 * 2) Global device notify handler
-	 * 3) Per-device notify handler
-	 */
-	if ((acpi_gbl_system_notify.handler
-	     && (notify_value <= ACPI_MAX_SYS_NOTIFY))
-	    || (acpi_gbl_device_notify.handler
-		&& (notify_value > ACPI_MAX_SYS_NOTIFY)) || handler_obj) {
-		notify_info = acpi_ut_create_generic_state();
-		if (!notify_info) {
-			return (AE_NO_MEMORY);
-		}
-
-		if (!handler_obj) {
-			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-					  "Executing system notify handler for Notify (%4.4s, %X) node %p\n",
-					  acpi_ut_get_node_name(node),
-					  notify_value, node));
-		}
-
-		notify_info->common.descriptor_type =
-		    ACPI_DESC_TYPE_STATE_NOTIFY;
-		notify_info->notify.node = node;
-		notify_info->notify.value = (u16) notify_value;
-		notify_info->notify.handler_obj = handler_obj;
-
-		status =
-		    acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
-				    notify_info);
-		if (ACPI_FAILURE(status)) {
-			acpi_ut_delete_generic_state(notify_info);
-		}
-	} else {
-		/*
-		 * There is no notify handler (per-device or system) for this device.
-		 */
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "No notify handler for Notify (%4.4s, %X) node %p\n",
-				  acpi_ut_get_node_name(node), notify_value,
-				  node));
-	}
-
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_notify_dispatch
- *
- * PARAMETERS:  Context         - To be passed to the notify handler
- *
- * RETURN:      None.
- *
- * DESCRIPTION: Dispatch a device notification event to a previously
- *              installed handler.
- *
- ******************************************************************************/
-
-static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
-{
-	union acpi_generic_state *notify_info =
-	    (union acpi_generic_state *)context;
-	acpi_notify_handler global_handler = NULL;
-	void *global_context = NULL;
-	union acpi_operand_object *handler_obj;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * We will invoke a global notify handler if installed.
-	 * This is done _before_ we invoke the per-device handler attached
-	 * to the device.
-	 */
-	if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) {
-
-		/* Global system notification handler */
-
-		if (acpi_gbl_system_notify.handler) {
-			global_handler = acpi_gbl_system_notify.handler;
-			global_context = acpi_gbl_system_notify.context;
-		}
-	} else {
-		/* Global driver notification handler */
-
-		if (acpi_gbl_device_notify.handler) {
-			global_handler = acpi_gbl_device_notify.handler;
-			global_context = acpi_gbl_device_notify.context;
-		}
-	}
-
-	/* Invoke the system handler first, if present */
-
-	if (global_handler) {
-		global_handler(notify_info->notify.node,
-			       notify_info->notify.value, global_context);
-	}
-
-	/* Now invoke the per-device handler, if present */
-
-	handler_obj = notify_info->notify.handler_obj;
-	if (handler_obj) {
-		handler_obj->notify.handler(notify_info->notify.node,
-					    notify_info->notify.value,
-					    handler_obj->notify.context);
-	}
-
-	/* All done with the info object */
-
-	acpi_ut_delete_generic_state(notify_info);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_global_lock_handler
- *
- * PARAMETERS:  Context         - From thread interface, not used
- *
- * RETURN:      ACPI_INTERRUPT_HANDLED
- *
- * DESCRIPTION: Invoked directly from the SCI handler when a global lock
- *              release interrupt occurs. Attempt to acquire the global lock,
- *              if successful, signal the thread waiting for the lock.
- *
- * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
- * this is not possible for some reason, a separate thread will have to be
- * scheduled to do this.
- *
- ******************************************************************************/
-
-static u32 acpi_ev_global_lock_handler(void *context)
-{
-	u8 acquired = FALSE;
-
-	/*
-	 * Attempt to get the lock.
-	 *
-	 * If we don't get it now, it will be marked pending and we will
-	 * take another interrupt when it becomes free.
-	 */
-	ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
-	if (acquired) {
-
-		/* Got the lock, now wake all threads waiting for it */
-
-		acpi_gbl_global_lock_acquired = TRUE;
-		/* Send a unit to the semaphore */
-
-		if (ACPI_FAILURE
-		    (acpi_os_signal_semaphore
-		     (acpi_gbl_global_lock_semaphore, 1))) {
-			ACPI_ERROR((AE_INFO,
-				    "Could not signal Global Lock semaphore"));
-		}
-	}
-
-	return (ACPI_INTERRUPT_HANDLED);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_init_global_lock_handler
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Install a handler for the global lock release event
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_init_global_lock_handler(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
-
-	status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
-					 ACPI_CAST_INDIRECT_PTR(struct
-								acpi_table_header,
-								&facs));
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	acpi_gbl_global_lock_present = TRUE;
-	status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
-						  acpi_ev_global_lock_handler,
-						  NULL);
-
-	/*
-	 * If the global lock does not exist on this platform, the attempt
-	 * to enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick)
-	 * Map to AE_OK, but mark global lock as not present.
-	 * Any attempt to actually use the global lock will be flagged
-	 * with an error.
-	 */
-	if (status == AE_NO_HARDWARE_RESPONSE) {
-		ACPI_ERROR((AE_INFO,
-			    "No response from Global Lock hardware, disabling lock"));
-
-		acpi_gbl_global_lock_present = FALSE;
-		status = AE_OK;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_remove_global_lock_handler
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove the handler for the Global Lock
- *
- ******************************************************************************/
-
-static acpi_status acpi_ev_remove_global_lock_handler(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
-
-	acpi_gbl_global_lock_present = FALSE;
-	status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
-						 acpi_ev_global_lock_handler);
-
-	return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_ev_acquire_global_lock
- *
- * PARAMETERS:  Timeout         - Max time to wait for the lock, in millisec.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Attempt to gain ownership of the Global Lock.
- *
- * MUTEX:       Interpreter must be locked
- *
- * Note: The original implementation allowed multiple threads to "acquire" the
- * Global Lock, and the OS would hold the lock until the last thread had
- * released it. However, this could potentially starve the BIOS out of the
- * lock, especially in the case where there is a tight handshake between the
- * Embedded Controller driver and the BIOS. Therefore, this implementation
- * allows only one thread to acquire the HW Global Lock at a time, and makes
- * the global lock appear as a standard mutex on the OS side.
- *
- *****************************************************************************/
-static acpi_thread_id acpi_ev_global_lock_thread_id;
-static int acpi_ev_global_lock_acquired;
-
-acpi_status acpi_ev_acquire_global_lock(u16 timeout)
-{
-	acpi_status status = AE_OK;
-	u8 acquired = FALSE;
-
-	ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
-
-	/*
-	 * Only one thread can acquire the GL at a time, the global_lock_mutex
-	 * enforces this. This interface releases the interpreter if we must wait.
-	 */
-	status = acpi_ex_system_wait_mutex(
-			acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
-	if (status == AE_TIME) {
-		if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
-			acpi_ev_global_lock_acquired++;
-			return AE_OK;
-		}
-	}
-
-	if (ACPI_FAILURE(status)) {
-		status = acpi_ex_system_wait_mutex(
-				acpi_gbl_global_lock_mutex->mutex.os_mutex,
-				timeout);
-	}
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
-	acpi_ev_global_lock_acquired++;
-
-	/*
-	 * Update the global lock handle and check for wraparound. The handle is
-	 * only used for the external global lock interfaces, but it is updated
-	 * here to properly handle the case where a single thread may acquire the
-	 * lock via both the AML and the acpi_acquire_global_lock interfaces. The
-	 * handle is therefore updated on the first acquire from a given thread
-	 * regardless of where the acquisition request originated.
-	 */
-	acpi_gbl_global_lock_handle++;
-	if (acpi_gbl_global_lock_handle == 0) {
-		acpi_gbl_global_lock_handle = 1;
-	}
-
-	/*
-	 * Make sure that a global lock actually exists. If not, just treat
-	 * the lock as a standard mutex.
-	 */
-	if (!acpi_gbl_global_lock_present) {
-		acpi_gbl_global_lock_acquired = TRUE;
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Attempt to acquire the actual hardware lock */
-
-	ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
-	if (acquired) {
-
-		/* We got the lock */
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Acquired hardware Global Lock\n"));
-
-		acpi_gbl_global_lock_acquired = TRUE;
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/*
-	 * Did not get the lock. The pending bit was set above, and we must now
-	 * wait until we get the global lock released interrupt.
-	 */
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
-
-	/*
-	 * Wait for handshake with the global lock interrupt handler.
-	 * This interface releases the interpreter if we must wait.
-	 */
-	status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
-					       ACPI_WAIT_FOREVER);
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_release_global_lock
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Releases ownership of the Global Lock.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_release_global_lock(void)
-{
-	u8 pending = FALSE;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ev_release_global_lock);
-
-	/* Lock must be already acquired */
-
-	if (!acpi_gbl_global_lock_acquired) {
-		ACPI_WARNING((AE_INFO,
-			      "Cannot release the ACPI Global Lock, it has not been acquired"));
-		return_ACPI_STATUS(AE_NOT_ACQUIRED);
-	}
-
-	acpi_ev_global_lock_acquired--;
-	if (acpi_ev_global_lock_acquired > 0) {
-		return AE_OK;
-	}
-
-	if (acpi_gbl_global_lock_present) {
-
-		/* Allow any thread to release the lock */
-
-		ACPI_RELEASE_GLOBAL_LOCK(facs, pending);
-
-		/*
-		 * If the pending bit was set, we must write GBL_RLS to the control
-		 * register
-		 */
-		if (pending) {
-			status =
-			    acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
-					      1);
-		}
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Released hardware Global Lock\n"));
-	}
-
-	acpi_gbl_global_lock_acquired = FALSE;
-
-	/* Release the local GL mutex */
-	acpi_ev_global_lock_thread_id = NULL;
-	acpi_ev_global_lock_acquired = 0;
-	acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
-	return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_ev_terminate
- *
- * PARAMETERS:  none
- *
- * RETURN:      none
- *
- * DESCRIPTION: Disable events and free memory allocated for table storage.
- *
- ******************************************************************************/
-
-void acpi_ev_terminate(void)
-{
-	u32 i;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_terminate);
-
-	if (acpi_gbl_events_initialized) {
-		/*
-		 * Disable all event-related functionality.
-		 * In all cases, on error, print a message but obviously we don't abort.
-		 */
-
-		/* Disable all fixed events */
-
-		for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
-			status = acpi_disable_event(i, 0);
-			if (ACPI_FAILURE(status)) {
-				ACPI_ERROR((AE_INFO,
-					    "Could not disable fixed event %d",
-					    (u32) i));
-			}
-		}
-
-		/* Disable all GPEs in all GPE blocks */
-
-		status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block);
-
-		/* Remove SCI handler */
-
-		status = acpi_ev_remove_sci_handler();
-		if (ACPI_FAILURE(status)) {
-			ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
-		}
-
-		status = acpi_ev_remove_global_lock_handler();
-		if (ACPI_FAILURE(status)) {
-			ACPI_ERROR((AE_INFO,
-				    "Could not remove Global Lock handler"));
-		}
-	}
-
-	/* Deallocate all handler objects installed within GPE info structs */
-
-	status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers);
-
-	/* Return to original mode if necessary */
-
-	if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
-		status = acpi_disable();
-		if (ACPI_FAILURE(status)) {
-			ACPI_WARNING((AE_INFO, "AcpiDisable failed"));
-		}
-	}
-	return_VOID;
-}
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
deleted file mode 100644
index 236fbd1..0000000
--- a/drivers/acpi/events/evregion.c
+++ /dev/null
@@ -1,1074 +0,0 @@
-/******************************************************************************
- *
- * Module Name: evregion - ACPI address_space (op_region) handler dispatch
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acevents.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_EVENTS
-ACPI_MODULE_NAME("evregion")
-#define ACPI_NUM_DEFAULT_SPACES     4
-static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
-	ACPI_ADR_SPACE_SYSTEM_MEMORY,
-	ACPI_ADR_SPACE_SYSTEM_IO,
-	ACPI_ADR_SPACE_PCI_CONFIG,
-	ACPI_ADR_SPACE_DATA_TABLE
-};
-
-/* Local prototypes */
-
-static acpi_status
-acpi_ev_reg_run(acpi_handle obj_handle,
-		u32 level, void *context, void **return_value);
-
-static acpi_status
-acpi_ev_install_handler(acpi_handle obj_handle,
-			u32 level, void *context, void **return_value);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_install_region_handlers
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Installs the core subsystem default address space handlers.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_install_region_handlers(void)
-{
-	acpi_status status;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ev_install_region_handlers);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * All address spaces (PCI Config, EC, SMBus) are scope dependent
-	 * and registration must occur for a specific device.
-	 *
-	 * In the case of the system memory and IO address spaces there is currently
-	 * no device associated with the address space.  For these we use the root.
-	 *
-	 * We install the default PCI config space handler at the root so
-	 * that this space is immediately available even though the we have
-	 * not enumerated all the PCI Root Buses yet.  This is to conform
-	 * to the ACPI specification which states that the PCI config
-	 * space must be always available -- even though we are nowhere
-	 * near ready to find the PCI root buses at this point.
-	 *
-	 * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler
-	 * has already been installed (via acpi_install_address_space_handler).
-	 * Similar for AE_SAME_HANDLER.
-	 */
-	for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
-		status = acpi_ev_install_space_handler(acpi_gbl_root_node,
-						       acpi_gbl_default_address_spaces
-						       [i],
-						       ACPI_DEFAULT_HANDLER,
-						       NULL, NULL);
-		switch (status) {
-		case AE_OK:
-		case AE_SAME_HANDLER:
-		case AE_ALREADY_EXISTS:
-
-			/* These exceptions are all OK */
-
-			status = AE_OK;
-			break;
-
-		default:
-
-			goto unlock_and_exit;
-		}
-	}
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_initialize_op_regions
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute _REG methods for all Operation Regions that have
- *              an installed default region handler.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_initialize_op_regions(void)
-{
-	acpi_status status;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ev_initialize_op_regions);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Run the _REG methods for op_regions in each default address space
-	 */
-	for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
-
-		/* TBD: Make sure handler is the DEFAULT handler, otherwise
-		 * _REG will have already been run.
-		 */
-		status = acpi_ev_execute_reg_methods(acpi_gbl_root_node,
-						     acpi_gbl_default_address_spaces
-						     [i]);
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_execute_reg_method
- *
- * PARAMETERS:  region_obj          - Region object
- *              Function            - Passed to _REG: On (1) or Off (0)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute _REG method for a region
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
-{
-	struct acpi_evaluate_info *info;
-	union acpi_operand_object *args[3];
-	union acpi_operand_object *region_obj2;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_execute_reg_method);
-
-	region_obj2 = acpi_ns_get_secondary_object(region_obj);
-	if (!region_obj2) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	if (region_obj2->extra.method_REG == NULL) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Allocate and initialize the evaluation information block */
-
-	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
-	if (!info) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	info->prefix_node = region_obj2->extra.method_REG;
-	info->pathname = NULL;
-	info->parameters = args;
-	info->flags = ACPI_IGNORE_RETURN_VALUE;
-
-	/*
-	 * The _REG method has two arguments:
-	 *
-	 * Arg0 - Integer:
-	 *  Operation region space ID Same value as region_obj->Region.space_id
-	 *
-	 * Arg1 - Integer:
-	 *  connection status 1 for connecting the handler, 0 for disconnecting
-	 *  the handler (Passed as a parameter)
-	 */
-	args[0] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-	if (!args[0]) {
-		status = AE_NO_MEMORY;
-		goto cleanup1;
-	}
-
-	args[1] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-	if (!args[1]) {
-		status = AE_NO_MEMORY;
-		goto cleanup2;
-	}
-
-	/* Setup the parameter objects */
-
-	args[0]->integer.value = region_obj->region.space_id;
-	args[1]->integer.value = function;
-	args[2] = NULL;
-
-	/* Execute the method, no return value */
-
-	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
-			(ACPI_TYPE_METHOD, info->prefix_node, NULL));
-
-	status = acpi_ns_evaluate(info);
-	acpi_ut_remove_reference(args[1]);
-
-      cleanup2:
-	acpi_ut_remove_reference(args[0]);
-
-      cleanup1:
-	ACPI_FREE(info);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_address_space_dispatch
- *
- * PARAMETERS:  region_obj          - Internal region object
- *              Function            - Read or Write operation
- *              Address             - Where in the space to read or write
- *              bit_width           - Field width in bits (8, 16, 32, or 64)
- *              Value               - Pointer to in or out value, must be
- *                                    full 64-bit acpi_integer
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Dispatch an address space or operation region access to
- *              a previously installed handler.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
-			       u32 function,
-			       acpi_physical_address address,
-			       u32 bit_width, acpi_integer * value)
-{
-	acpi_status status;
-	acpi_adr_space_handler handler;
-	acpi_adr_space_setup region_setup;
-	union acpi_operand_object *handler_desc;
-	union acpi_operand_object *region_obj2;
-	void *region_context = NULL;
-
-	ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
-
-	region_obj2 = acpi_ns_get_secondary_object(region_obj);
-	if (!region_obj2) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	/* Ensure that there is a handler associated with this region */
-
-	handler_desc = region_obj->region.handler;
-	if (!handler_desc) {
-		ACPI_ERROR((AE_INFO,
-			    "No handler for Region [%4.4s] (%p) [%s]",
-			    acpi_ut_get_node_name(region_obj->region.node),
-			    region_obj,
-			    acpi_ut_get_region_name(region_obj->region.
-						    space_id)));
-
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	/*
-	 * It may be the case that the region has never been initialized
-	 * Some types of regions require special init code
-	 */
-	if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
-		/*
-		 * This region has not been initialized yet, do it
-		 */
-		region_setup = handler_desc->address_space.setup;
-		if (!region_setup) {
-
-			/* No initialization routine, exit with error */
-
-			ACPI_ERROR((AE_INFO,
-				    "No init routine for region(%p) [%s]",
-				    region_obj,
-				    acpi_ut_get_region_name(region_obj->region.
-							    space_id)));
-			return_ACPI_STATUS(AE_NOT_EXIST);
-		}
-
-		/*
-		 * We must exit the interpreter because the region
-		 * setup will potentially execute control methods
-		 * (e.g., _REG method for this region)
-		 */
-		acpi_ex_exit_interpreter();
-
-		status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
-				      handler_desc->address_space.context,
-				      &region_context);
-
-		/* Re-enter the interpreter */
-
-		acpi_ex_enter_interpreter();
-
-		/* Check for failure of the Region Setup */
-
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"During region initialization: [%s]",
-					acpi_ut_get_region_name(region_obj->
-								region.
-								space_id)));
-			return_ACPI_STATUS(status);
-		}
-
-		/*
-		 * Region initialization may have been completed by region_setup
-		 */
-		if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
-			region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
-
-			if (region_obj2->extra.region_context) {
-
-				/* The handler for this region was already installed */
-
-				ACPI_FREE(region_context);
-			} else {
-				/*
-				 * Save the returned context for use in all accesses to
-				 * this particular region
-				 */
-				region_obj2->extra.region_context =
-				    region_context;
-			}
-		}
-	}
-
-	/* We have everything we need, we can invoke the address space handler */
-
-	handler = handler_desc->address_space.handler;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-			  "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
-			  &region_obj->region.handler->address_space, handler,
-			  ACPI_FORMAT_NATIVE_UINT(address),
-			  acpi_ut_get_region_name(region_obj->region.
-						  space_id)));
-
-	if (!(handler_desc->address_space.handler_flags &
-	      ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
-		/*
-		 * For handlers other than the default (supplied) handlers, we must
-		 * exit the interpreter because the handler *might* block -- we don't
-		 * know what it will do, so we can't hold the lock on the intepreter.
-		 */
-		acpi_ex_exit_interpreter();
-	}
-
-	/* Call the handler */
-
-	status = handler(function, address, bit_width, value,
-			 handler_desc->address_space.context,
-			 region_obj2->extra.region_context);
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
-				acpi_ut_get_region_name(region_obj->region.
-							space_id)));
-	}
-
-	if (!(handler_desc->address_space.handler_flags &
-	      ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
-		/*
-		 * We just returned from a non-default handler, we must re-enter the
-		 * interpreter
-		 */
-		acpi_ex_enter_interpreter();
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_detach_region
- *
- * PARAMETERS:  region_obj          - Region Object
- *              acpi_ns_is_locked   - Namespace Region Already Locked?
- *
- * RETURN:      None
- *
- * DESCRIPTION: Break the association between the handler and the region
- *              this is a two way association.
- *
- ******************************************************************************/
-
-void
-acpi_ev_detach_region(union acpi_operand_object *region_obj,
-		      u8 acpi_ns_is_locked)
-{
-	union acpi_operand_object *handler_obj;
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object **last_obj_ptr;
-	acpi_adr_space_setup region_setup;
-	void **region_context;
-	union acpi_operand_object *region_obj2;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_detach_region);
-
-	region_obj2 = acpi_ns_get_secondary_object(region_obj);
-	if (!region_obj2) {
-		return_VOID;
-	}
-	region_context = &region_obj2->extra.region_context;
-
-	/* Get the address handler from the region object */
-
-	handler_obj = region_obj->region.handler;
-	if (!handler_obj) {
-
-		/* This region has no handler, all done */
-
-		return_VOID;
-	}
-
-	/* Find this region in the handler's list */
-
-	obj_desc = handler_obj->address_space.region_list;
-	last_obj_ptr = &handler_obj->address_space.region_list;
-
-	while (obj_desc) {
-
-		/* Is this the correct Region? */
-
-		if (obj_desc == region_obj) {
-			ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-					  "Removing Region %p from address handler %p\n",
-					  region_obj, handler_obj));
-
-			/* This is it, remove it from the handler's list */
-
-			*last_obj_ptr = obj_desc->region.next;
-			obj_desc->region.next = NULL;	/* Must clear field */
-
-			if (acpi_ns_is_locked) {
-				status =
-				    acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-				if (ACPI_FAILURE(status)) {
-					return_VOID;
-				}
-			}
-
-			/* Now stop region accesses by executing the _REG method */
-
-			status = acpi_ev_execute_reg_method(region_obj, 0);
-			if (ACPI_FAILURE(status)) {
-				ACPI_EXCEPTION((AE_INFO, status,
-						"from region _REG, [%s]",
-						acpi_ut_get_region_name
-						(region_obj->region.space_id)));
-			}
-
-			if (acpi_ns_is_locked) {
-				status =
-				    acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-				if (ACPI_FAILURE(status)) {
-					return_VOID;
-				}
-			}
-
-			/*
-			 * If the region has been activated, call the setup handler
-			 * with the deactivate notification
-			 */
-			if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) {
-				region_setup = handler_obj->address_space.setup;
-				status =
-				    region_setup(region_obj,
-						 ACPI_REGION_DEACTIVATE,
-						 handler_obj->address_space.
-						 context, region_context);
-
-				/* Init routine may fail, Just ignore errors */
-
-				if (ACPI_FAILURE(status)) {
-					ACPI_EXCEPTION((AE_INFO, status,
-							"from region handler - deactivate, [%s]",
-							acpi_ut_get_region_name
-							(region_obj->region.
-							 space_id)));
-				}
-
-				region_obj->region.flags &=
-				    ~(AOPOBJ_SETUP_COMPLETE);
-			}
-
-			/*
-			 * Remove handler reference in the region
-			 *
-			 * NOTE: this doesn't mean that the region goes away, the region
-			 * is just inaccessible as indicated to the _REG method
-			 *
-			 * If the region is on the handler's list, this must be the
-			 * region's handler
-			 */
-			region_obj->region.handler = NULL;
-			acpi_ut_remove_reference(handler_obj);
-
-			return_VOID;
-		}
-
-		/* Walk the linked list of handlers */
-
-		last_obj_ptr = &obj_desc->region.next;
-		obj_desc = obj_desc->region.next;
-	}
-
-	/* If we get here, the region was not in the handler's region list */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-			  "Cannot remove region %p from address handler %p\n",
-			  region_obj, handler_obj));
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_attach_region
- *
- * PARAMETERS:  handler_obj         - Handler Object
- *              region_obj          - Region Object
- *              acpi_ns_is_locked   - Namespace Region Already Locked?
- *
- * RETURN:      None
- *
- * DESCRIPTION: Create the association between the handler and the region
- *              this is a two way association.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_attach_region(union acpi_operand_object *handler_obj,
-		      union acpi_operand_object *region_obj,
-		      u8 acpi_ns_is_locked)
-{
-
-	ACPI_FUNCTION_TRACE(ev_attach_region);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-			  "Adding Region [%4.4s] %p to address handler %p [%s]\n",
-			  acpi_ut_get_node_name(region_obj->region.node),
-			  region_obj, handler_obj,
-			  acpi_ut_get_region_name(region_obj->region.
-						  space_id)));
-
-	/* Link this region to the front of the handler's list */
-
-	region_obj->region.next = handler_obj->address_space.region_list;
-	handler_obj->address_space.region_list = region_obj;
-
-	/* Install the region's handler */
-
-	if (region_obj->region.handler) {
-		return_ACPI_STATUS(AE_ALREADY_EXISTS);
-	}
-
-	region_obj->region.handler = handler_obj;
-	acpi_ut_add_reference(handler_obj);
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_install_handler
- *
- * PARAMETERS:  walk_namespace callback
- *
- * DESCRIPTION: This routine installs an address handler into objects that are
- *              of type Region or Device.
- *
- *              If the Object is a Device, and the device has a handler of
- *              the same type then the search is terminated in that branch.
- *
- *              This is because the existing handler is closer in proximity
- *              to any more regions than the one we are trying to install.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_install_handler(acpi_handle obj_handle,
-			u32 level, void *context, void **return_value)
-{
-	union acpi_operand_object *handler_obj;
-	union acpi_operand_object *next_handler_obj;
-	union acpi_operand_object *obj_desc;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_NAME(ev_install_handler);
-
-	handler_obj = (union acpi_operand_object *)context;
-
-	/* Parameter validation */
-
-	if (!handler_obj) {
-		return (AE_OK);
-	}
-
-	/* Convert and validate the device handle */
-
-	node = acpi_ns_map_handle_to_node(obj_handle);
-	if (!node) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * We only care about regions.and objects
-	 * that are allowed to have address space handlers
-	 */
-	if ((node->type != ACPI_TYPE_DEVICE) &&
-	    (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
-		return (AE_OK);
-	}
-
-	/* Check for an existing internal object */
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc) {
-
-		/* No object, just exit */
-
-		return (AE_OK);
-	}
-
-	/* Devices are handled different than regions */
-
-	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_DEVICE) {
-
-		/* Check if this Device already has a handler for this address space */
-
-		next_handler_obj = obj_desc->device.handler;
-		while (next_handler_obj) {
-
-			/* Found a handler, is it for the same address space? */
-
-			if (next_handler_obj->address_space.space_id ==
-			    handler_obj->address_space.space_id) {
-				ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-						  "Found handler for region [%s] in device %p(%p) handler %p\n",
-						  acpi_ut_get_region_name
-						  (handler_obj->address_space.
-						   space_id), obj_desc,
-						  next_handler_obj,
-						  handler_obj));
-
-				/*
-				 * Since the object we found it on was a device, then it
-				 * means that someone has already installed a handler for
-				 * the branch of the namespace from this device on.  Just
-				 * bail out telling the walk routine to not traverse this
-				 * branch.  This preserves the scoping rule for handlers.
-				 */
-				return (AE_CTRL_DEPTH);
-			}
-
-			/* Walk the linked list of handlers attached to this device */
-
-			next_handler_obj = next_handler_obj->address_space.next;
-		}
-
-		/*
-		 * As long as the device didn't have a handler for this
-		 * space we don't care about it.  We just ignore it and
-		 * proceed.
-		 */
-		return (AE_OK);
-	}
-
-	/* Object is a Region */
-
-	if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
-		/*
-		 * This region is for a different address space
-		 * -- just ignore it
-		 */
-		return (AE_OK);
-	}
-
-	/*
-	 * Now we have a region and it is for the handler's address
-	 * space type.
-	 *
-	 * First disconnect region for any previous handler (if any)
-	 */
-	acpi_ev_detach_region(obj_desc, FALSE);
-
-	/* Connect the region to the new handler */
-
-	status = acpi_ev_attach_region(handler_obj, obj_desc, FALSE);
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_install_space_handler
- *
- * PARAMETERS:  Node            - Namespace node for the device
- *              space_id        - The address space ID
- *              Handler         - Address of the handler
- *              Setup           - Address of the setup function
- *              Context         - Value passed to the handler on each access
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Install a handler for all op_regions of a given space_id.
- *              Assumes namespace is locked
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_install_space_handler(struct acpi_namespace_node * node,
-			      acpi_adr_space_type space_id,
-			      acpi_adr_space_handler handler,
-			      acpi_adr_space_setup setup, void *context)
-{
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *handler_obj;
-	acpi_status status;
-	acpi_object_type type;
-	u8 flags = 0;
-
-	ACPI_FUNCTION_TRACE(ev_install_space_handler);
-
-	/*
-	 * This registration is valid for only the types below
-	 * and the root.  This is where the default handlers
-	 * get placed.
-	 */
-	if ((node->type != ACPI_TYPE_DEVICE) &&
-	    (node->type != ACPI_TYPE_PROCESSOR) &&
-	    (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	if (handler == ACPI_DEFAULT_HANDLER) {
-		flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED;
-
-		switch (space_id) {
-		case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-			handler = acpi_ex_system_memory_space_handler;
-			setup = acpi_ev_system_memory_region_setup;
-			break;
-
-		case ACPI_ADR_SPACE_SYSTEM_IO:
-			handler = acpi_ex_system_io_space_handler;
-			setup = acpi_ev_io_space_region_setup;
-			break;
-
-		case ACPI_ADR_SPACE_PCI_CONFIG:
-			handler = acpi_ex_pci_config_space_handler;
-			setup = acpi_ev_pci_config_region_setup;
-			break;
-
-		case ACPI_ADR_SPACE_CMOS:
-			handler = acpi_ex_cmos_space_handler;
-			setup = acpi_ev_cmos_region_setup;
-			break;
-
-		case ACPI_ADR_SPACE_PCI_BAR_TARGET:
-			handler = acpi_ex_pci_bar_space_handler;
-			setup = acpi_ev_pci_bar_region_setup;
-			break;
-
-		case ACPI_ADR_SPACE_DATA_TABLE:
-			handler = acpi_ex_data_table_space_handler;
-			setup = NULL;
-			break;
-
-		default:
-			status = AE_BAD_PARAMETER;
-			goto unlock_and_exit;
-		}
-	}
-
-	/* If the caller hasn't specified a setup routine, use the default */
-
-	if (!setup) {
-		setup = acpi_ev_default_region_setup;
-	}
-
-	/* Check for an existing internal object */
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (obj_desc) {
-		/*
-		 * The attached device object already exists.
-		 * Make sure the handler is not already installed.
-		 */
-		handler_obj = obj_desc->device.handler;
-
-		/* Walk the handler list for this device */
-
-		while (handler_obj) {
-
-			/* Same space_id indicates a handler already installed */
-
-			if (handler_obj->address_space.space_id == space_id) {
-				if (handler_obj->address_space.handler ==
-				    handler) {
-					/*
-					 * It is (relatively) OK to attempt to install the SAME
-					 * handler twice. This can easily happen
-					 * with PCI_Config space.
-					 */
-					status = AE_SAME_HANDLER;
-					goto unlock_and_exit;
-				} else {
-					/* A handler is already installed */
-
-					status = AE_ALREADY_EXISTS;
-				}
-				goto unlock_and_exit;
-			}
-
-			/* Walk the linked list of handlers */
-
-			handler_obj = handler_obj->address_space.next;
-		}
-	} else {
-		ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-				  "Creating object on Device %p while installing handler\n",
-				  node));
-
-		/* obj_desc does not exist, create one */
-
-		if (node->type == ACPI_TYPE_ANY) {
-			type = ACPI_TYPE_DEVICE;
-		} else {
-			type = node->type;
-		}
-
-		obj_desc = acpi_ut_create_internal_object(type);
-		if (!obj_desc) {
-			status = AE_NO_MEMORY;
-			goto unlock_and_exit;
-		}
-
-		/* Init new descriptor */
-
-		obj_desc->common.type = (u8) type;
-
-		/* Attach the new object to the Node */
-
-		status = acpi_ns_attach_object(node, obj_desc, type);
-
-		/* Remove local reference to the object */
-
-		acpi_ut_remove_reference(obj_desc);
-
-		if (ACPI_FAILURE(status)) {
-			goto unlock_and_exit;
-		}
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-			  "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
-			  acpi_ut_get_region_name(space_id), space_id,
-			  acpi_ut_get_node_name(node), node, obj_desc));
-
-	/*
-	 * Install the handler
-	 *
-	 * At this point there is no existing handler.
-	 * Just allocate the object for the handler and link it
-	 * into the list.
-	 */
-	handler_obj =
-	    acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
-	if (!handler_obj) {
-		status = AE_NO_MEMORY;
-		goto unlock_and_exit;
-	}
-
-	/* Init handler obj */
-
-	handler_obj->address_space.space_id = (u8) space_id;
-	handler_obj->address_space.handler_flags = flags;
-	handler_obj->address_space.region_list = NULL;
-	handler_obj->address_space.node = node;
-	handler_obj->address_space.handler = handler;
-	handler_obj->address_space.context = context;
-	handler_obj->address_space.setup = setup;
-
-	/* Install at head of Device.address_space list */
-
-	handler_obj->address_space.next = obj_desc->device.handler;
-
-	/*
-	 * The Device object is the first reference on the handler_obj.
-	 * Each region that uses the handler adds a reference.
-	 */
-	obj_desc->device.handler = handler_obj;
-
-	/*
-	 * Walk the namespace finding all of the regions this
-	 * handler will manage.
-	 *
-	 * Start at the device and search the branch toward
-	 * the leaf nodes until either the leaf is encountered or
-	 * a device is detected that has an address handler of the
-	 * same type.
-	 *
-	 * In either case, back up and search down the remainder
-	 * of the branch
-	 */
-	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
-					ACPI_NS_WALK_UNLOCK,
-					acpi_ev_install_handler, handler_obj,
-					NULL);
-
-      unlock_and_exit:
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_execute_reg_methods
- *
- * PARAMETERS:  Node            - Namespace node for the device
- *              space_id        - The address space ID
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Run all _REG methods for the input Space ID;
- *              Note: assumes namespace is locked, or system init time.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
-			    acpi_adr_space_type space_id)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
-
-	/*
-	 * Run all _REG methods for all Operation Regions for this
-	 * space ID.  This is a separate walk in order to handle any
-	 * interdependencies between regions and _REG methods.  (i.e. handlers
-	 * must be installed for all regions of this Space ID before we
-	 * can run any _REG methods)
-	 */
-	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
-					ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
-					&space_id, NULL);
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_reg_run
- *
- * PARAMETERS:  walk_namespace callback
- *
- * DESCRIPTION: Run _REG method for region objects of the requested space_iD
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_reg_run(acpi_handle obj_handle,
-		u32 level, void *context, void **return_value)
-{
-	union acpi_operand_object *obj_desc;
-	struct acpi_namespace_node *node;
-	acpi_adr_space_type space_id;
-	acpi_status status;
-
-	space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context);
-
-	/* Convert and validate the device handle */
-
-	node = acpi_ns_map_handle_to_node(obj_handle);
-	if (!node) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * We only care about regions.and objects
-	 * that are allowed to have address space handlers
-	 */
-	if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
-		return (AE_OK);
-	}
-
-	/* Check for an existing internal object */
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc) {
-
-		/* No object, just exit */
-
-		return (AE_OK);
-	}
-
-	/* Object is a Region */
-
-	if (obj_desc->region.space_id != space_id) {
-		/*
-		 * This region is for a different address space
-		 * -- just ignore it
-		 */
-		return (AE_OK);
-	}
-
-	status = acpi_ev_execute_reg_method(obj_desc, 1);
-	return (status);
-}
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
deleted file mode 100644
index 6b94b38..0000000
--- a/drivers/acpi/events/evrgnini.c
+++ /dev/null
@@ -1,688 +0,0 @@
-/******************************************************************************
- *
- * Module Name: evrgnini- ACPI address_space (op_region) init
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acevents.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_EVENTS
-ACPI_MODULE_NAME("evrgnini")
-
-/* Local prototypes */
-static u8 acpi_ev_match_pci_root_bridge(char *id);
-
-static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_system_memory_region_setup
- *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
- *              handler_context     - Address space handler context
- *              region_context      - Region specific context
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Setup a system_memory operation region
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_system_memory_region_setup(acpi_handle handle,
-				   u32 function,
-				   void *handler_context, void **region_context)
-{
-	union acpi_operand_object *region_desc =
-	    (union acpi_operand_object *)handle;
-	struct acpi_mem_space_context *local_region_context;
-
-	ACPI_FUNCTION_TRACE(ev_system_memory_region_setup);
-
-	if (function == ACPI_REGION_DEACTIVATE) {
-		if (*region_context) {
-			local_region_context =
-			    (struct acpi_mem_space_context *)*region_context;
-
-			/* Delete a cached mapping if present */
-
-			if (local_region_context->mapped_length) {
-				acpi_os_unmap_memory(local_region_context->
-						     mapped_logical_address,
-						     local_region_context->
-						     mapped_length);
-			}
-			ACPI_FREE(local_region_context);
-			*region_context = NULL;
-		}
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Create a new context */
-
-	local_region_context =
-	    ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_mem_space_context));
-	if (!(local_region_context)) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Save the region length and address for use in the handler */
-
-	local_region_context->length = region_desc->region.length;
-	local_region_context->address = region_desc->region.address;
-
-	*region_context = local_region_context;
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_io_space_region_setup
- *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
- *              handler_context     - Address space handler context
- *              region_context      - Region specific context
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Setup a IO operation region
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_io_space_region_setup(acpi_handle handle,
-			      u32 function,
-			      void *handler_context, void **region_context)
-{
-	ACPI_FUNCTION_TRACE(ev_io_space_region_setup);
-
-	if (function == ACPI_REGION_DEACTIVATE) {
-		*region_context = NULL;
-	} else {
-		*region_context = handler_context;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_pci_config_region_setup
- *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
- *              handler_context     - Address space handler context
- *              region_context      - Region specific context
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Setup a PCI_Config operation region
- *
- * MUTEX:       Assumes namespace is not locked
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_pci_config_region_setup(acpi_handle handle,
-				u32 function,
-				void *handler_context, void **region_context)
-{
-	acpi_status status = AE_OK;
-	acpi_integer pci_value;
-	struct acpi_pci_id *pci_id = *region_context;
-	union acpi_operand_object *handler_obj;
-	struct acpi_namespace_node *parent_node;
-	struct acpi_namespace_node *pci_root_node;
-	struct acpi_namespace_node *pci_device_node;
-	union acpi_operand_object *region_obj =
-	    (union acpi_operand_object *)handle;
-
-	ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
-
-	handler_obj = region_obj->region.handler;
-	if (!handler_obj) {
-		/*
-		 * No installed handler. This shouldn't happen because the dispatch
-		 * routine checks before we get here, but we check again just in case.
-		 */
-		ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-				  "Attempting to init a region %p, with no handler\n",
-				  region_obj));
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	*region_context = NULL;
-	if (function == ACPI_REGION_DEACTIVATE) {
-		if (pci_id) {
-			ACPI_FREE(pci_id);
-		}
-		return_ACPI_STATUS(status);
-	}
-
-	parent_node = acpi_ns_get_parent_node(region_obj->region.node);
-
-	/*
-	 * Get the _SEG and _BBN values from the device upon which the handler
-	 * is installed.
-	 *
-	 * We need to get the _SEG and _BBN objects relative to the PCI BUS device.
-	 * This is the device the handler has been registered to handle.
-	 */
-
-	/*
-	 * If the address_space.Node is still pointing to the root, we need
-	 * to scan upward for a PCI Root bridge and re-associate the op_region
-	 * handlers with that device.
-	 */
-	if (handler_obj->address_space.node == acpi_gbl_root_node) {
-
-		/* Start search from the parent object */
-
-		pci_root_node = parent_node;
-		while (pci_root_node != acpi_gbl_root_node) {
-
-			/* Get the _HID/_CID in order to detect a root_bridge */
-
-			if (acpi_ev_is_pci_root_bridge(pci_root_node)) {
-
-				/* Install a handler for this PCI root bridge */
-
-				status =
-				    acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
-				if (ACPI_FAILURE(status)) {
-					if (status == AE_SAME_HANDLER) {
-						/*
-						 * It is OK if the handler is already installed on the root
-						 * bridge.  Still need to return a context object for the
-						 * new PCI_Config operation region, however.
-						 */
-						status = AE_OK;
-					} else {
-						ACPI_EXCEPTION((AE_INFO, status,
-								"Could not install PciConfig handler for Root Bridge %4.4s",
-								acpi_ut_get_node_name
-								(pci_root_node)));
-					}
-				}
-				break;
-			}
-
-			pci_root_node = acpi_ns_get_parent_node(pci_root_node);
-		}
-
-		/* PCI root bridge not found, use namespace root node */
-	} else {
-		pci_root_node = handler_obj->address_space.node;
-	}
-
-	/*
-	 * If this region is now initialized, we are done.
-	 * (install_address_space_handler could have initialized it)
-	 */
-	if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Region is still not initialized. Create a new context */
-
-	pci_id = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pci_id));
-	if (!pci_id) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/*
-	 * For PCI_Config space access, we need the segment, bus,
-	 * device and function numbers.  Acquire them here.
-	 *
-	 * Find the parent device object. (This allows the operation region to be
-	 * within a subscope under the device, such as a control method.)
-	 */
-	pci_device_node = region_obj->region.node;
-	while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
-		pci_device_node = acpi_ns_get_parent_node(pci_device_node);
-	}
-
-	if (!pci_device_node) {
-		ACPI_FREE(pci_id);
-		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-	}
-
-	/*
-	 * Get the PCI device and function numbers from the _ADR object
-	 * contained in the parent's scope.
-	 */
-	status =
-	    acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
-					    &pci_value);
-
-	/*
-	 * The default is zero, and since the allocation above zeroed
-	 * the data, just do nothing on failure.
-	 */
-	if (ACPI_SUCCESS(status)) {
-		pci_id->device = ACPI_HIWORD(ACPI_LODWORD(pci_value));
-		pci_id->function = ACPI_LOWORD(ACPI_LODWORD(pci_value));
-	}
-
-	/* The PCI segment number comes from the _SEG method */
-
-	status =
-	    acpi_ut_evaluate_numeric_object(METHOD_NAME__SEG, pci_root_node,
-					    &pci_value);
-	if (ACPI_SUCCESS(status)) {
-		pci_id->segment = ACPI_LOWORD(pci_value);
-	}
-
-	/* The PCI bus number comes from the _BBN method */
-
-	status =
-	    acpi_ut_evaluate_numeric_object(METHOD_NAME__BBN, pci_root_node,
-					    &pci_value);
-	if (ACPI_SUCCESS(status)) {
-		pci_id->bus = ACPI_LOWORD(pci_value);
-	}
-
-	/* Complete this device's pci_id */
-
-	acpi_os_derive_pci_id(pci_root_node, region_obj->region.node, &pci_id);
-
-	*region_context = pci_id;
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_match_pci_root_bridge
- *
- * PARAMETERS:  Id              - The HID/CID in string format
- *
- * RETURN:      TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
- *
- * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
- *
- ******************************************************************************/
-
-static u8 acpi_ev_match_pci_root_bridge(char *id)
-{
-
-	/*
-	 * Check if this is a PCI root.
-	 * ACPI 3.0+: check for a PCI Express root also.
-	 */
-	if (!(ACPI_STRNCMP(id,
-			   PCI_ROOT_HID_STRING,
-			   sizeof(PCI_ROOT_HID_STRING))) ||
-	    !(ACPI_STRNCMP(id,
-			   PCI_EXPRESS_ROOT_HID_STRING,
-			   sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
-		return (TRUE);
-	}
-
-	return (FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_is_pci_root_bridge
- *
- * PARAMETERS:  Node            - Device node being examined
- *
- * RETURN:      TRUE if device is a PCI/PCI-Express Root Bridge
- *
- * DESCRIPTION: Determine if the input device represents a PCI Root Bridge by
- *              examining the _HID and _CID for the device.
- *
- ******************************************************************************/
-
-static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
-{
-	acpi_status status;
-	struct acpica_device_id hid;
-	struct acpi_compatible_id_list *cid;
-	u32 i;
-
-	/*
-	 * Get the _HID and check for a PCI Root Bridge
-	 */
-	status = acpi_ut_execute_HID(node, &hid);
-	if (ACPI_FAILURE(status)) {
-		return (FALSE);
-	}
-
-	if (acpi_ev_match_pci_root_bridge(hid.value)) {
-		return (TRUE);
-	}
-
-	/*
-	 * The _HID did not match.
-	 * Get the _CID and check for a PCI Root Bridge
-	 */
-	status = acpi_ut_execute_CID(node, &cid);
-	if (ACPI_FAILURE(status)) {
-		return (FALSE);
-	}
-
-	/* Check all _CIDs in the returned list */
-
-	for (i = 0; i < cid->count; i++) {
-		if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
-			ACPI_FREE(cid);
-			return (TRUE);
-		}
-	}
-
-	ACPI_FREE(cid);
-	return (FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_pci_bar_region_setup
- *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
- *              handler_context     - Address space handler context
- *              region_context      - Region specific context
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Setup a pci_bAR operation region
- *
- * MUTEX:       Assumes namespace is not locked
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_pci_bar_region_setup(acpi_handle handle,
-			     u32 function,
-			     void *handler_context, void **region_context)
-{
-	ACPI_FUNCTION_TRACE(ev_pci_bar_region_setup);
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_cmos_region_setup
- *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
- *              handler_context     - Address space handler context
- *              region_context      - Region specific context
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Setup a CMOS operation region
- *
- * MUTEX:       Assumes namespace is not locked
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_cmos_region_setup(acpi_handle handle,
-			  u32 function,
-			  void *handler_context, void **region_context)
-{
-	ACPI_FUNCTION_TRACE(ev_cmos_region_setup);
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_default_region_setup
- *
- * PARAMETERS:  Handle              - Region we are interested in
- *              Function            - Start or stop
- *              handler_context     - Address space handler context
- *              region_context      - Region specific context
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Default region initialization
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_default_region_setup(acpi_handle handle,
-			     u32 function,
-			     void *handler_context, void **region_context)
-{
-	ACPI_FUNCTION_TRACE(ev_default_region_setup);
-
-	if (function == ACPI_REGION_DEACTIVATE) {
-		*region_context = NULL;
-	} else {
-		*region_context = handler_context;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_initialize_region
- *
- * PARAMETERS:  region_obj      - Region we are initializing
- *              acpi_ns_locked  - Is namespace locked?
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initializes the region, finds any _REG methods and saves them
- *              for execution at a later time
- *
- *              Get the appropriate address space handler for a newly
- *              created region.
- *
- *              This also performs address space specific initialization.  For
- *              example, PCI regions must have an _ADR object that contains
- *              a PCI address in the scope of the definition.  This address is
- *              required to perform an access to PCI config space.
- *
- * MUTEX:       Interpreter should be unlocked, because we may run the _REG
- *              method for this region.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_initialize_region(union acpi_operand_object *region_obj,
-			  u8 acpi_ns_locked)
-{
-	union acpi_operand_object *handler_obj;
-	union acpi_operand_object *obj_desc;
-	acpi_adr_space_type space_id;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-	struct acpi_namespace_node *method_node;
-	acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
-	union acpi_operand_object *region_obj2;
-
-	ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
-
-	if (!region_obj) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (region_obj->common.flags & AOPOBJ_OBJECT_INITIALIZED) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	region_obj2 = acpi_ns_get_secondary_object(region_obj);
-	if (!region_obj2) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	node = acpi_ns_get_parent_node(region_obj->region.node);
-	space_id = region_obj->region.space_id;
-
-	/* Setup defaults */
-
-	region_obj->region.handler = NULL;
-	region_obj2->extra.method_REG = NULL;
-	region_obj->common.flags &= ~(AOPOBJ_SETUP_COMPLETE);
-	region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED;
-
-	/* Find any "_REG" method associated with this region definition */
-
-	status =
-	    acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
-				     &method_node);
-	if (ACPI_SUCCESS(status)) {
-		/*
-		 * The _REG method is optional and there can be only one per region
-		 * definition.  This will be executed when the handler is attached
-		 * or removed
-		 */
-		region_obj2->extra.method_REG = method_node;
-	}
-
-	/*
-	 * The following loop depends upon the root Node having no parent
-	 * ie: acpi_gbl_root_node->parent_entry being set to NULL
-	 */
-	while (node) {
-
-		/* Check to see if a handler exists */
-
-		handler_obj = NULL;
-		obj_desc = acpi_ns_get_attached_object(node);
-		if (obj_desc) {
-
-			/* Can only be a handler if the object exists */
-
-			switch (node->type) {
-			case ACPI_TYPE_DEVICE:
-
-				handler_obj = obj_desc->device.handler;
-				break;
-
-			case ACPI_TYPE_PROCESSOR:
-
-				handler_obj = obj_desc->processor.handler;
-				break;
-
-			case ACPI_TYPE_THERMAL:
-
-				handler_obj = obj_desc->thermal_zone.handler;
-				break;
-
-			default:
-				/* Ignore other objects */
-				break;
-			}
-
-			while (handler_obj) {
-
-				/* Is this handler of the correct type? */
-
-				if (handler_obj->address_space.space_id ==
-				    space_id) {
-
-					/* Found correct handler */
-
-					ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-							  "Found handler %p for region %p in obj %p\n",
-							  handler_obj,
-							  region_obj,
-							  obj_desc));
-
-					status =
-					    acpi_ev_attach_region(handler_obj,
-								  region_obj,
-								  acpi_ns_locked);
-
-					/*
-					 * Tell all users that this region is usable by running the _REG
-					 * method
-					 */
-					if (acpi_ns_locked) {
-						status =
-						    acpi_ut_release_mutex
-						    (ACPI_MTX_NAMESPACE);
-						if (ACPI_FAILURE(status)) {
-							return_ACPI_STATUS
-							    (status);
-						}
-					}
-
-					status =
-					    acpi_ev_execute_reg_method
-					    (region_obj, 1);
-
-					if (acpi_ns_locked) {
-						status =
-						    acpi_ut_acquire_mutex
-						    (ACPI_MTX_NAMESPACE);
-						if (ACPI_FAILURE(status)) {
-							return_ACPI_STATUS
-							    (status);
-						}
-					}
-
-					return_ACPI_STATUS(AE_OK);
-				}
-
-				/* Try next handler in the list */
-
-				handler_obj = handler_obj->address_space.next;
-			}
-		}
-
-		/*
-		 * This node does not have the handler we need;
-		 * Pop up one level
-		 */
-		node = acpi_ns_get_parent_node(node);
-	}
-
-	/* If we get here, there is no handler for this region */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-			  "No handler for RegionType %s(%X) (RegionObj %p)\n",
-			  acpi_ut_get_region_name(space_id), space_id,
-			  region_obj));
-
-	return_ACPI_STATUS(AE_NOT_EXIST);
-}
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c
deleted file mode 100644
index 2a8b778..0000000
--- a/drivers/acpi/events/evsci.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: evsci - System Control Interrupt configuration and
- *                      legacy to ACPI mode state transition functions
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acevents.h>
-
-#define _COMPONENT          ACPI_EVENTS
-ACPI_MODULE_NAME("evsci")
-
-/* Local prototypes */
-static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_sci_xrupt_handler
- *
- * PARAMETERS:  Context   - Calling Context
- *
- * RETURN:      Status code indicates whether interrupt was handled.
- *
- * DESCRIPTION: Interrupt handler that will figure out what function or
- *              control method to call to deal with a SCI.
- *
- ******************************************************************************/
-
-static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
-{
-	struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
-	u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
-
-	ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler);
-
-	/*
-	 * We are guaranteed by the ACPI CA initialization/shutdown code that
-	 * if this interrupt handler is installed, ACPI is enabled.
-	 */
-
-	/*
-	 * Fixed Events:
-	 * Check for and dispatch any Fixed Events that have occurred
-	 */
-	interrupt_handled |= acpi_ev_fixed_event_detect();
-
-	/*
-	 * General Purpose Events:
-	 * Check for and dispatch any GPEs that have occurred
-	 */
-	interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
-
-	return_UINT32(interrupt_handled);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_gpe_xrupt_handler
- *
- * PARAMETERS:  Context   - Calling Context
- *
- * RETURN:      Status code indicates whether interrupt was handled.
- *
- * DESCRIPTION: Handler for GPE Block Device interrupts
- *
- ******************************************************************************/
-
-u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
-{
-	struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
-	u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
-
-	ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
-
-	/*
-	 * We are guaranteed by the ACPI CA initialization/shutdown code that
-	 * if this interrupt handler is installed, ACPI is enabled.
-	 */
-
-	/*
-	 * GPEs:
-	 * Check for and dispatch any GPEs that have occurred
-	 */
-	interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
-
-	return_UINT32(interrupt_handled);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_ev_install_sci_handler
- *
- * PARAMETERS:  none
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Installs SCI handler.
- *
- ******************************************************************************/
-
-u32 acpi_ev_install_sci_handler(void)
-{
-	u32 status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ev_install_sci_handler);
-
-	status =
-	    acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
-					      acpi_ev_sci_xrupt_handler,
-					      acpi_gbl_gpe_xrupt_list_head);
-	return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_ev_remove_sci_handler
- *
- * PARAMETERS:  none
- *
- * RETURN:      E_OK if handler uninstalled OK, E_ERROR if handler was not
- *              installed to begin with
- *
- * DESCRIPTION: Remove the SCI interrupt handler.  No further SCIs will be
- *              taken.
- *
- * Note:  It doesn't seem important to disable all events or set the event
- *        enable registers to their original values.  The OS should disable
- *        the SCI interrupt level when the handler is removed, so no more
- *        events will come in.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_remove_sci_handler(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
-
-	/* Just let the OS remove the handler and disable the level */
-
-	status =
-	    acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
-					     acpi_ev_sci_xrupt_handler);
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
deleted file mode 100644
index 94a6efe..0000000
--- a/drivers/acpi/events/evxface.c
+++ /dev/null
@@ -1,820 +0,0 @@
-/******************************************************************************
- *
- * Module Name: evxface - External interfaces for ACPI events
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acevents.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_EVENTS
-ACPI_MODULE_NAME("evxface")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_exception_handler
- *
- * PARAMETERS:  Handler         - Pointer to the handler function for the
- *                                event
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Saves the pointer to the handler function
- *
- ******************************************************************************/
-#ifdef ACPI_FUTURE_USAGE
-acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Don't allow two handlers. */
-
-	if (acpi_gbl_exception_handler) {
-		status = AE_ALREADY_EXISTS;
-		goto cleanup;
-	}
-
-	/* Install the handler */
-
-	acpi_gbl_exception_handler = handler;
-
-      cleanup:
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
-#endif				/*  ACPI_FUTURE_USAGE  */
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_fixed_event_handler
- *
- * PARAMETERS:  Event           - Event type to enable.
- *              Handler         - Pointer to the handler function for the
- *                                event
- *              Context         - Value passed to the handler on each GPE
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Saves the pointer to the handler function and then enables the
- *              event.
- *
- ******************************************************************************/
-acpi_status
-acpi_install_fixed_event_handler(u32 event,
-				 acpi_event_handler handler, void *context)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
-
-	/* Parameter validation */
-
-	if (event > ACPI_EVENT_MAX) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Don't allow two handlers. */
-
-	if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
-		status = AE_ALREADY_EXISTS;
-		goto cleanup;
-	}
-
-	/* Install the handler before enabling the event */
-
-	acpi_gbl_fixed_event_handlers[event].handler = handler;
-	acpi_gbl_fixed_event_handlers[event].context = context;
-
-	status = acpi_clear_event(event);
-	if (ACPI_SUCCESS(status))
-		status = acpi_enable_event(event, 0);
-	if (ACPI_FAILURE(status)) {
-		ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
-			      event));
-
-		/* Remove the handler */
-
-		acpi_gbl_fixed_event_handlers[event].handler = NULL;
-		acpi_gbl_fixed_event_handlers[event].context = NULL;
-	} else {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Enabled fixed event %X, Handler=%p\n", event,
-				  handler));
-	}
-
-      cleanup:
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_remove_fixed_event_handler
- *
- * PARAMETERS:  Event           - Event type to disable.
- *              Handler         - Address of the handler
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Disables the event and unregisters the event handler.
- *
- ******************************************************************************/
-acpi_status
-acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
-
-	/* Parameter validation */
-
-	if (event > ACPI_EVENT_MAX) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Disable the event before removing the handler */
-
-	status = acpi_disable_event(event, 0);
-
-	/* Always Remove the handler */
-
-	acpi_gbl_fixed_event_handlers[event].handler = NULL;
-	acpi_gbl_fixed_event_handlers[event].context = NULL;
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_WARNING((AE_INFO,
-			      "Could not write to fixed event enable register %X",
-			      event));
-	} else {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
-				  event));
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_notify_handler
- *
- * PARAMETERS:  Device          - The device for which notifies will be handled
- *              handler_type    - The type of handler:
- *                                  ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
- *                                  ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
- *                                  ACPI_ALL_NOTIFY:  both system and device
- *              Handler         - Address of the handler
- *              Context         - Value passed to the handler on each GPE
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Install a handler for notifies on an ACPI device
- *
- ******************************************************************************/
-acpi_status
-acpi_install_notify_handler(acpi_handle device,
-			    u32 handler_type,
-			    acpi_notify_handler handler, void *context)
-{
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *notify_obj;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_install_notify_handler);
-
-	/* Parameter validation */
-
-	if ((!device) ||
-	    (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Convert and validate the device handle */
-
-	node = acpi_ns_map_handle_to_node(device);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/*
-	 * Root Object:
-	 * Registering a notify handler on the root object indicates that the
-	 * caller wishes to receive notifications for all objects.  Note that
-	 * only one <external> global handler can be regsitered (per notify type).
-	 */
-	if (device == ACPI_ROOT_OBJECT) {
-
-		/* Make sure the handler is not already installed */
-
-		if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
-		     acpi_gbl_system_notify.handler) ||
-		    ((handler_type & ACPI_DEVICE_NOTIFY) &&
-		     acpi_gbl_device_notify.handler)) {
-			status = AE_ALREADY_EXISTS;
-			goto unlock_and_exit;
-		}
-
-		if (handler_type & ACPI_SYSTEM_NOTIFY) {
-			acpi_gbl_system_notify.node = node;
-			acpi_gbl_system_notify.handler = handler;
-			acpi_gbl_system_notify.context = context;
-		}
-
-		if (handler_type & ACPI_DEVICE_NOTIFY) {
-			acpi_gbl_device_notify.node = node;
-			acpi_gbl_device_notify.handler = handler;
-			acpi_gbl_device_notify.context = context;
-		}
-
-		/* Global notify handler installed */
-	}
-
-	/*
-	 * All Other Objects:
-	 * Caller will only receive notifications specific to the target object.
-	 * Note that only certain object types can receive notifications.
-	 */
-	else {
-		/* Notifies allowed on this object? */
-
-		if (!acpi_ev_is_notify_object(node)) {
-			status = AE_TYPE;
-			goto unlock_and_exit;
-		}
-
-		/* Check for an existing internal object */
-
-		obj_desc = acpi_ns_get_attached_object(node);
-		if (obj_desc) {
-
-			/* Object exists - make sure there's no handler */
-
-			if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
-			     obj_desc->common_notify.system_notify) ||
-			    ((handler_type & ACPI_DEVICE_NOTIFY) &&
-			     obj_desc->common_notify.device_notify)) {
-				status = AE_ALREADY_EXISTS;
-				goto unlock_and_exit;
-			}
-		} else {
-			/* Create a new object */
-
-			obj_desc = acpi_ut_create_internal_object(node->type);
-			if (!obj_desc) {
-				status = AE_NO_MEMORY;
-				goto unlock_and_exit;
-			}
-
-			/* Attach new object to the Node */
-
-			status =
-			    acpi_ns_attach_object(device, obj_desc, node->type);
-
-			/* Remove local reference to the object */
-
-			acpi_ut_remove_reference(obj_desc);
-			if (ACPI_FAILURE(status)) {
-				goto unlock_and_exit;
-			}
-		}
-
-		/* Install the handler */
-
-		notify_obj =
-		    acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY);
-		if (!notify_obj) {
-			status = AE_NO_MEMORY;
-			goto unlock_and_exit;
-		}
-
-		notify_obj->notify.node = node;
-		notify_obj->notify.handler = handler;
-		notify_obj->notify.context = context;
-
-		if (handler_type & ACPI_SYSTEM_NOTIFY) {
-			obj_desc->common_notify.system_notify = notify_obj;
-		}
-
-		if (handler_type & ACPI_DEVICE_NOTIFY) {
-			obj_desc->common_notify.device_notify = notify_obj;
-		}
-
-		if (handler_type == ACPI_ALL_NOTIFY) {
-
-			/* Extra ref if installed in both */
-
-			acpi_ut_add_reference(notify_obj);
-		}
-	}
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_remove_notify_handler
- *
- * PARAMETERS:  Device          - The device for which notifies will be handled
- *              handler_type    - The type of handler:
- *                                  ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
- *                                  ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
- *                                  ACPI_ALL_NOTIFY:  both system and device
- *              Handler         - Address of the handler
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove a handler for notifies on an ACPI device
- *
- ******************************************************************************/
-acpi_status
-acpi_remove_notify_handler(acpi_handle device,
-			   u32 handler_type, acpi_notify_handler handler)
-{
-	union acpi_operand_object *notify_obj;
-	union acpi_operand_object *obj_desc;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_remove_notify_handler);
-
-	/* Parameter validation */
-
-	if ((!device) ||
-	    (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
-		status = AE_BAD_PARAMETER;
-		goto exit;
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		goto exit;
-	}
-
-	/* Convert and validate the device handle */
-
-	node = acpi_ns_map_handle_to_node(device);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Root Object */
-
-	if (device == ACPI_ROOT_OBJECT) {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Removing notify handler for namespace root object\n"));
-
-		if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
-		     !acpi_gbl_system_notify.handler) ||
-		    ((handler_type & ACPI_DEVICE_NOTIFY) &&
-		     !acpi_gbl_device_notify.handler)) {
-			status = AE_NOT_EXIST;
-			goto unlock_and_exit;
-		}
-
-		/* Make sure all deferred tasks are completed */
-
-		(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-		acpi_os_wait_events_complete(NULL);
-		status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-
-		if (handler_type & ACPI_SYSTEM_NOTIFY) {
-			acpi_gbl_system_notify.node = NULL;
-			acpi_gbl_system_notify.handler = NULL;
-			acpi_gbl_system_notify.context = NULL;
-		}
-
-		if (handler_type & ACPI_DEVICE_NOTIFY) {
-			acpi_gbl_device_notify.node = NULL;
-			acpi_gbl_device_notify.handler = NULL;
-			acpi_gbl_device_notify.context = NULL;
-		}
-	}
-
-	/* All Other Objects */
-
-	else {
-		/* Notifies allowed on this object? */
-
-		if (!acpi_ev_is_notify_object(node)) {
-			status = AE_TYPE;
-			goto unlock_and_exit;
-		}
-
-		/* Check for an existing internal object */
-
-		obj_desc = acpi_ns_get_attached_object(node);
-		if (!obj_desc) {
-			status = AE_NOT_EXIST;
-			goto unlock_and_exit;
-		}
-
-		/* Object exists - make sure there's an existing handler */
-
-		if (handler_type & ACPI_SYSTEM_NOTIFY) {
-			notify_obj = obj_desc->common_notify.system_notify;
-			if (!notify_obj) {
-				status = AE_NOT_EXIST;
-				goto unlock_and_exit;
-			}
-
-			if (notify_obj->notify.handler != handler) {
-				status = AE_BAD_PARAMETER;
-				goto unlock_and_exit;
-			}
-			/* Make sure all deferred tasks are completed */
-
-			(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-			acpi_os_wait_events_complete(NULL);
-			status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-			if (ACPI_FAILURE(status)) {
-				goto exit;
-			}
-
-			/* Remove the handler */
-			obj_desc->common_notify.system_notify = NULL;
-			acpi_ut_remove_reference(notify_obj);
-		}
-
-		if (handler_type & ACPI_DEVICE_NOTIFY) {
-			notify_obj = obj_desc->common_notify.device_notify;
-			if (!notify_obj) {
-				status = AE_NOT_EXIST;
-				goto unlock_and_exit;
-			}
-
-			if (notify_obj->notify.handler != handler) {
-				status = AE_BAD_PARAMETER;
-				goto unlock_and_exit;
-			}
-			/* Make sure all deferred tasks are completed */
-
-			(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-			acpi_os_wait_events_complete(NULL);
-			status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-			if (ACPI_FAILURE(status)) {
-				goto exit;
-			}
-
-			/* Remove the handler */
-			obj_desc->common_notify.device_notify = NULL;
-			acpi_ut_remove_reference(notify_obj);
-		}
-	}
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-      exit:
-	if (ACPI_FAILURE(status))
-		ACPI_EXCEPTION((AE_INFO, status, "Removing notify handler"));
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_gpe_handler
- *
- * PARAMETERS:  gpe_device      - Namespace node for the GPE (NULL for FADT
- *                                defined GPEs)
- *              gpe_number      - The GPE number within the GPE block
- *              Type            - Whether this GPE should be treated as an
- *                                edge- or level-triggered interrupt.
- *              Address         - Address of the handler
- *              Context         - Value passed to the handler on each GPE
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Install a handler for a General Purpose Event.
- *
- ******************************************************************************/
-acpi_status
-acpi_install_gpe_handler(acpi_handle gpe_device,
-			 u32 gpe_number,
-			 u32 type, acpi_event_handler address, void *context)
-{
-	struct acpi_gpe_event_info *gpe_event_info;
-	struct acpi_handler_info *handler;
-	acpi_status status;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
-
-	/* Parameter validation */
-
-	if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) {
-		status = AE_BAD_PARAMETER;
-		goto exit;
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		goto exit;
-	}
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Make sure that there isn't a handler there already */
-
-	if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-	    ACPI_GPE_DISPATCH_HANDLER) {
-		status = AE_ALREADY_EXISTS;
-		goto unlock_and_exit;
-	}
-
-	/* Allocate and init handler object */
-
-	handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
-	if (!handler) {
-		status = AE_NO_MEMORY;
-		goto unlock_and_exit;
-	}
-
-	handler->address = address;
-	handler->context = context;
-	handler->method_node = gpe_event_info->dispatch.method_node;
-
-	/* Disable the GPE before installing the handler */
-
-	status = acpi_ev_disable_gpe(gpe_event_info);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	/* Install the handler */
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-	gpe_event_info->dispatch.handler = handler;
-
-	/* Setup up dispatch flags to indicate handler (vs. method) */
-
-	gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);	/* Clear bits */
-	gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER);
-
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-      exit:
-	if (ACPI_FAILURE(status))
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Installing notify handler failed"));
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_remove_gpe_handler
- *
- * PARAMETERS:  gpe_device      - Namespace node for the GPE (NULL for FADT
- *                                defined GPEs)
- *              gpe_number      - The event to remove a handler
- *              Address         - Address of the handler
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove a handler for a General Purpose acpi_event.
- *
- ******************************************************************************/
-acpi_status
-acpi_remove_gpe_handler(acpi_handle gpe_device,
-			u32 gpe_number, acpi_event_handler address)
-{
-	struct acpi_gpe_event_info *gpe_event_info;
-	struct acpi_handler_info *handler;
-	acpi_status status;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler);
-
-	/* Parameter validation */
-
-	if (!address) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Make sure that a handler is indeed installed */
-
-	if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
-	    ACPI_GPE_DISPATCH_HANDLER) {
-		status = AE_NOT_EXIST;
-		goto unlock_and_exit;
-	}
-
-	/* Make sure that the installed handler is the same */
-
-	if (gpe_event_info->dispatch.handler->address != address) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Disable the GPE before removing the handler */
-
-	status = acpi_ev_disable_gpe(gpe_event_info);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	/* Make sure all deferred tasks are completed */
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	acpi_os_wait_events_complete(NULL);
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Remove the handler */
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-	handler = gpe_event_info->dispatch.handler;
-
-	/* Restore Method node (if any), set dispatch flags */
-
-	gpe_event_info->dispatch.method_node = handler->method_node;
-	gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK;	/* Clear bits */
-	if (handler->method_node) {
-		gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD;
-	}
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
-	/* Now we can free the handler object */
-
-	ACPI_FREE(handler);
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_acquire_global_lock
- *
- * PARAMETERS:  Timeout         - How long the caller is willing to wait
- *              Handle          - Where the handle to the lock is returned
- *                                (if acquired)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Acquire the ACPI Global Lock
- *
- * Note: Allows callers with the same thread ID to acquire the global lock
- * multiple times. In other words, externally, the behavior of the global lock
- * is identical to an AML mutex. On the first acquire, a new handle is
- * returned. On any subsequent calls to acquire by the same thread, the same
- * handle is returned.
- *
- ******************************************************************************/
-acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
-{
-	acpi_status status;
-
-	if (!handle) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* Must lock interpreter to prevent race conditions */
-
-	acpi_ex_enter_interpreter();
-
-	status = acpi_ex_acquire_mutex_object(timeout,
-					      acpi_gbl_global_lock_mutex,
-					      acpi_os_get_thread_id());
-
-	if (ACPI_SUCCESS(status)) {
-
-		/* Return the global lock handle (updated in acpi_ev_acquire_global_lock) */
-
-		*handle = acpi_gbl_global_lock_handle;
-	}
-
-	acpi_ex_exit_interpreter();
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_release_global_lock
- *
- * PARAMETERS:  Handle      - Returned from acpi_acquire_global_lock
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Release the ACPI Global Lock. The handle must be valid.
- *
- ******************************************************************************/
-acpi_status acpi_release_global_lock(u32 handle)
-{
-	acpi_status status;
-
-	if (!handle || (handle != acpi_gbl_global_lock_handle)) {
-		return (AE_NOT_ACQUIRED);
-	}
-
-	status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_release_global_lock)
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
deleted file mode 100644
index 41554f7..0000000
--- a/drivers/acpi/events/evxfevnt.c
+++ /dev/null
@@ -1,719 +0,0 @@
-/******************************************************************************
- *
- * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acevents.h>
-#include <acpi/acnamesp.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_EVENTS
-ACPI_MODULE_NAME("evxfevnt")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enable
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Transfers the system into ACPI mode.
- *
- ******************************************************************************/
-acpi_status acpi_enable(void)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(acpi_enable);
-
-	/* ACPI tables must be present */
-
-	if (!acpi_tb_tables_loaded()) {
-		return_ACPI_STATUS(AE_NO_ACPI_TABLES);
-	}
-
-	/* Check current mode */
-
-	if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
-		ACPI_DEBUG_PRINT((ACPI_DB_INIT,
-				  "System is already in ACPI mode\n"));
-	} else {
-		/* Transition to ACPI mode */
-
-		status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
-		if (ACPI_FAILURE(status)) {
-			ACPI_ERROR((AE_INFO,
-				    "Could not transition to ACPI mode"));
-			return_ACPI_STATUS(status);
-		}
-
-		ACPI_DEBUG_PRINT((ACPI_DB_INIT,
-				  "Transition to ACPI mode successful\n"));
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enable)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_disable
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Transfers the system into LEGACY (non-ACPI) mode.
- *
- ******************************************************************************/
-acpi_status acpi_disable(void)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(acpi_disable);
-
-	if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
-		ACPI_DEBUG_PRINT((ACPI_DB_INIT,
-				  "System is already in legacy (non-ACPI) mode\n"));
-	} else {
-		/* Transition to LEGACY mode */
-
-		status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY);
-
-		if (ACPI_FAILURE(status)) {
-			ACPI_ERROR((AE_INFO,
-				    "Could not exit ACPI mode to legacy mode"));
-			return_ACPI_STATUS(status);
-		}
-
-		ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI mode disabled\n"));
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_disable)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enable_event
- *
- * PARAMETERS:  Event           - The fixed eventto be enabled
- *              Flags           - Reserved
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enable an ACPI event (fixed)
- *
- ******************************************************************************/
-acpi_status acpi_enable_event(u32 event, u32 flags)
-{
-	acpi_status status = AE_OK;
-	u32 value;
-
-	ACPI_FUNCTION_TRACE(acpi_enable_event);
-
-	/* Decode the Fixed Event */
-
-	if (event > ACPI_EVENT_MAX) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Enable the requested fixed event (by writing a one to the
-	 * enable register bit)
-	 */
-	status =
-	    acpi_set_register(acpi_gbl_fixed_event_info[event].
-			      enable_register_id, 1);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Make sure that the hardware responded */
-
-	status =
-	    acpi_get_register(acpi_gbl_fixed_event_info[event].
-			      enable_register_id, &value);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	if (value != 1) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not enable %s event",
-			    acpi_ut_get_event_name(event)));
-		return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enable_event)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_set_gpe_type
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device
- *              gpe_number      - GPE level within the GPE block
- *              Type            - New GPE type
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Set the type of an individual GPE
- *
- ******************************************************************************/
-acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-
-	ACPI_FUNCTION_TRACE(acpi_set_gpe_type);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Set the new type (will disable GPE if currently enabled) */
-
-	status = acpi_ev_set_gpe_type(gpe_event_info, type);
-
-      unlock_and_exit:
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_set_gpe_type)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enable_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device
- *              gpe_number      - GPE level within the GPE block
- *              Flags           - Just enable, or also wake enable?
- *                                Called from ISR or not
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enable an ACPI event (general purpose)
- *
- ******************************************************************************/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
-	acpi_status status = AE_OK;
-	acpi_cpu_flags flags;
-	struct acpi_gpe_event_info *gpe_event_info;
-
-	ACPI_FUNCTION_TRACE(acpi_enable_gpe);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Perform the enable */
-
-	status = acpi_ev_enable_gpe(gpe_event_info, TRUE);
-
-      unlock_and_exit:
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_disable_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device
- *              gpe_number      - GPE level within the GPE block
- *              Flags           - Just disable, or also wake disable?
- *                                Called from ISR or not
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Disable an ACPI event (general purpose)
- *
- ******************************************************************************/
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
-	acpi_status status = AE_OK;
-	acpi_cpu_flags flags;
-	struct acpi_gpe_event_info *gpe_event_info;
-
-	ACPI_FUNCTION_TRACE(acpi_disable_gpe);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	status = acpi_ev_disable_gpe(gpe_event_info);
-
-unlock_and_exit:
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_disable_event
- *
- * PARAMETERS:  Event           - The fixed eventto be enabled
- *              Flags           - Reserved
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Disable an ACPI event (fixed)
- *
- ******************************************************************************/
-acpi_status acpi_disable_event(u32 event, u32 flags)
-{
-	acpi_status status = AE_OK;
-	u32 value;
-
-	ACPI_FUNCTION_TRACE(acpi_disable_event);
-
-	/* Decode the Fixed Event */
-
-	if (event > ACPI_EVENT_MAX) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Disable the requested fixed event (by writing a zero to the
-	 * enable register bit)
-	 */
-	status =
-	    acpi_set_register(acpi_gbl_fixed_event_info[event].
-			      enable_register_id, 0);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status =
-	    acpi_get_register(acpi_gbl_fixed_event_info[event].
-			      enable_register_id, &value);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	if (value != 0) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not disable %s events",
-			    acpi_ut_get_event_name(event)));
-		return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_disable_event)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_clear_event
- *
- * PARAMETERS:  Event           - The fixed event to be cleared
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Clear an ACPI event (fixed)
- *
- ******************************************************************************/
-acpi_status acpi_clear_event(u32 event)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(acpi_clear_event);
-
-	/* Decode the Fixed Event */
-
-	if (event > ACPI_EVENT_MAX) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Clear the requested fixed event (By writing a one to the
-	 * status register bit)
-	 */
-	status =
-	    acpi_set_register(acpi_gbl_fixed_event_info[event].
-			      status_register_id, 1);
-
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_clear_event)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_clear_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device
- *              gpe_number      - GPE level within the GPE block
- *              Flags           - Called from an ISR or not
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Clear an ACPI event (general purpose)
- *
- ******************************************************************************/
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-
-	ACPI_FUNCTION_TRACE(acpi_clear_gpe);
-
-	/* Use semaphore lock if not executing at interrupt level */
-
-	if (flags & ACPI_NOT_ISR) {
-		status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	status = acpi_hw_clear_gpe(gpe_event_info);
-
-      unlock_and_exit:
-	if (flags & ACPI_NOT_ISR) {
-		(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	}
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_event_status
- *
- * PARAMETERS:  Event           - The fixed event
- *              event_status    - Where the current status of the event will
- *                                be returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Obtains and returns the current status of the event
- *
- ******************************************************************************/
-acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
-{
-	acpi_status status = AE_OK;
-	u32 value;
-
-	ACPI_FUNCTION_TRACE(acpi_get_event_status);
-
-	if (!event_status) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Decode the Fixed Event */
-
-	if (event > ACPI_EVENT_MAX) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Get the status of the requested fixed event */
-
-	status =
-	    acpi_get_register(acpi_gbl_fixed_event_info[event].
-			      enable_register_id, &value);
-	if (ACPI_FAILURE(status))
-		return_ACPI_STATUS(status);
-
-	*event_status = value;
-
-	status =
-	    acpi_get_register(acpi_gbl_fixed_event_info[event].
-			      status_register_id, &value);
-	if (ACPI_FAILURE(status))
-		return_ACPI_STATUS(status);
-
-	if (value)
-		*event_status |= ACPI_EVENT_FLAG_SET;
-
-	if (acpi_gbl_fixed_event_handlers[event].handler)
-		*event_status |= ACPI_EVENT_FLAG_HANDLE;
-
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_event_status)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_gpe_status
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device
- *              gpe_number      - GPE level within the GPE block
- *              Flags           - Called from an ISR or not
- *              event_status    - Where the current status of the event will
- *                                be returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get status of an event (general purpose)
- *
- ******************************************************************************/
-acpi_status
-acpi_get_gpe_status(acpi_handle gpe_device,
-		    u32 gpe_number, u32 flags, acpi_event_status * event_status)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-
-	ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
-
-	/* Use semaphore lock if not executing at interrupt level */
-
-	if (flags & ACPI_NOT_ISR) {
-		status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Obtain status on the requested GPE number */
-
-	status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
-
-	if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
-		*event_status |= ACPI_EVENT_FLAG_HANDLE;
-
-      unlock_and_exit:
-	if (flags & ACPI_NOT_ISR) {
-		(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	}
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_gpe_block
- *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
- *              gpe_block_address   - Address and space_iD
- *              register_count      - Number of GPE register pairs in the block
- *              interrupt_number    - H/W interrupt for the block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create and Install a block of GPE registers
- *
- ******************************************************************************/
-acpi_status
-acpi_install_gpe_block(acpi_handle gpe_device,
-		       struct acpi_generic_address *gpe_block_address,
-		       u32 register_count, u32 interrupt_number)
-{
-	acpi_status status;
-	union acpi_operand_object *obj_desc;
-	struct acpi_namespace_node *node;
-	struct acpi_gpe_block_info *gpe_block;
-
-	ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
-
-	if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	node = acpi_ns_map_handle_to_node(gpe_device);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/*
-	 * For user-installed GPE Block Devices, the gpe_block_base_number
-	 * is always zero
-	 */
-	status =
-	    acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
-				     interrupt_number, &gpe_block);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	/* Run the _PRW methods and enable the GPEs */
-
-	status = acpi_ev_initialize_gpe_block(node, gpe_block);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	/* Get the device_object attached to the node */
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc) {
-
-		/* No object, create a new one */
-
-		obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
-		if (!obj_desc) {
-			status = AE_NO_MEMORY;
-			goto unlock_and_exit;
-		}
-
-		status =
-		    acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
-
-		/* Remove local reference to the object */
-
-		acpi_ut_remove_reference(obj_desc);
-
-		if (ACPI_FAILURE(status)) {
-			goto unlock_and_exit;
-		}
-	}
-
-	/* Install the GPE block in the device_object */
-
-	obj_desc->device.gpe_block = gpe_block;
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_remove_gpe_block
- *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove a previously installed block of GPE registers
- *
- ******************************************************************************/
-acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-	struct acpi_namespace_node *node;
-
-	ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
-
-	if (!gpe_device) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	node = acpi_ns_map_handle_to_node(gpe_device);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Get the device_object attached to the node */
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc || !obj_desc->device.gpe_block) {
-		return_ACPI_STATUS(AE_NULL_OBJECT);
-	}
-
-	/* Delete the GPE block (but not the device_object) */
-
-	status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
-	if (ACPI_SUCCESS(status)) {
-		obj_desc->device.gpe_block = NULL;
-	}
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c
deleted file mode 100644
index e875080..0000000
--- a/drivers/acpi/events/evxfregn.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/******************************************************************************
- *
- * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
- *                         Address Spaces.
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acevents.h>
-
-#define _COMPONENT          ACPI_EVENTS
-ACPI_MODULE_NAME("evxfregn")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_address_space_handler
- *
- * PARAMETERS:  Device          - Handle for the device
- *              space_id        - The address space ID
- *              Handler         - Address of the handler
- *              Setup           - Address of the setup function
- *              Context         - Value passed to the handler on each access
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Install a handler for all op_regions of a given space_id.
- *
- ******************************************************************************/
-acpi_status
-acpi_install_address_space_handler(acpi_handle device,
-				   acpi_adr_space_type space_id,
-				   acpi_adr_space_handler handler,
-				   acpi_adr_space_setup setup, void *context)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_install_address_space_handler);
-
-	/* Parameter validation */
-
-	if (!device) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Convert and validate the device handle */
-
-	node = acpi_ns_map_handle_to_node(device);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Install the handler for all Regions for this Space ID */
-
-	status =
-	    acpi_ev_install_space_handler(node, space_id, handler, setup,
-					  context);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	/* Run all _REG methods for this address space */
-
-	status = acpi_ev_execute_reg_methods(node, space_id);
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_remove_address_space_handler
- *
- * PARAMETERS:  Device          - Handle for the device
- *              space_id        - The address space ID
- *              Handler         - Address of the handler
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove a previously installed handler.
- *
- ******************************************************************************/
-acpi_status
-acpi_remove_address_space_handler(acpi_handle device,
-				  acpi_adr_space_type space_id,
-				  acpi_adr_space_handler handler)
-{
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *handler_obj;
-	union acpi_operand_object *region_obj;
-	union acpi_operand_object **last_obj_ptr;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_remove_address_space_handler);
-
-	/* Parameter validation */
-
-	if (!device) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Convert and validate the device handle */
-
-	node = acpi_ns_map_handle_to_node(device);
-	if (!node ||
-	    ((node->type != ACPI_TYPE_DEVICE) &&
-	     (node->type != ACPI_TYPE_PROCESSOR) &&
-	     (node->type != ACPI_TYPE_THERMAL) &&
-	     (node != acpi_gbl_root_node))) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Make sure the internal object exists */
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc) {
-		status = AE_NOT_EXIST;
-		goto unlock_and_exit;
-	}
-
-	/* Find the address handler the user requested */
-
-	handler_obj = obj_desc->device.handler;
-	last_obj_ptr = &obj_desc->device.handler;
-	while (handler_obj) {
-
-		/* We have a handler, see if user requested this one */
-
-		if (handler_obj->address_space.space_id == space_id) {
-
-			/* Handler must be the same as the installed handler */
-
-			if (handler_obj->address_space.handler != handler) {
-				status = AE_BAD_PARAMETER;
-				goto unlock_and_exit;
-			}
-
-			/* Matched space_id, first dereference this in the Regions */
-
-			ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-					  "Removing address handler %p(%p) for region %s on Device %p(%p)\n",
-					  handler_obj, handler,
-					  acpi_ut_get_region_name(space_id),
-					  node, obj_desc));
-
-			region_obj = handler_obj->address_space.region_list;
-
-			/* Walk the handler's region list */
-
-			while (region_obj) {
-				/*
-				 * First disassociate the handler from the region.
-				 *
-				 * NOTE: this doesn't mean that the region goes away
-				 * The region is just inaccessible as indicated to
-				 * the _REG method
-				 */
-				acpi_ev_detach_region(region_obj, TRUE);
-
-				/*
-				 * Walk the list: Just grab the head because the
-				 * detach_region removed the previous head.
-				 */
-				region_obj =
-				    handler_obj->address_space.region_list;
-
-			}
-
-			/* Remove this Handler object from the list */
-
-			*last_obj_ptr = handler_obj->address_space.next;
-
-			/* Now we can delete the handler object */
-
-			acpi_ut_remove_reference(handler_obj);
-			goto unlock_and_exit;
-		}
-
-		/* Walk the linked list of handlers */
-
-		last_obj_ptr = &handler_obj->address_space.next;
-		handler_obj = handler_obj->address_space.next;
-	}
-
-	/* The handler does not exist */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
-			  "Unable to remove address handler %p for %s(%X), DevNode %p, obj %p\n",
-			  handler, acpi_ut_get_region_name(space_id), space_id,
-			  node, obj_desc));
-
-	status = AE_NOT_EXIST;
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
diff --git a/drivers/acpi/executer/Makefile b/drivers/acpi/executer/Makefile
deleted file mode 100644
index e09998a..0000000
--- a/drivers/acpi/executer/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for all Linux ACPI interpreter subdirectories
-#
-
-obj-y := exconfig.o  exfield.o  exnames.o   exoparg6.o  exresolv.o  exstorob.o\
-	 exconvrt.o  exfldio.o  exoparg1.o  exprep.o    exresop.o   exsystem.o\
-	 excreate.o  exmisc.o   exoparg2.o  exregion.o  exstore.o   exutils.o \
-	 exdump.o    exmutex.o  exoparg3.o  exresnte.o  exstoren.o
-
-EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
deleted file mode 100644
index 74da6fa..0000000
--- a/drivers/acpi/executer/exconfig.c
+++ /dev/null
@@ -1,535 +0,0 @@
-/******************************************************************************
- *
- * Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-#include <acpi/actables.h>
-#include <acpi/acdispat.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exconfig")
-
-/* Local prototypes */
-static acpi_status
-acpi_ex_add_table(u32 table_index,
-		  struct acpi_namespace_node *parent_node,
-		  union acpi_operand_object **ddb_handle);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_add_table
- *
- * PARAMETERS:  Table               - Pointer to raw table
- *              parent_node         - Where to load the table (scope)
- *              ddb_handle          - Where to return the table handle.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Common function to Install and Load an ACPI table with a
- *              returned table handle.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ex_add_table(u32 table_index,
-		  struct acpi_namespace_node *parent_node,
-		  union acpi_operand_object **ddb_handle)
-{
-	acpi_status status;
-	union acpi_operand_object *obj_desc;
-
-	ACPI_FUNCTION_TRACE(ex_add_table);
-
-	/* Create an object to be the table handle */
-
-	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Init the table handle */
-
-	obj_desc->reference.class = ACPI_REFCLASS_TABLE;
-	*ddb_handle = obj_desc;
-
-	/* Install the new table into the local data structures */
-
-	obj_desc->reference.value = table_index;
-
-	/* Add the table to the namespace */
-
-	status = acpi_ns_load_table(table_index, parent_node);
-	if (ACPI_FAILURE(status)) {
-		acpi_ut_remove_reference(obj_desc);
-		*ddb_handle = NULL;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_load_table_op
- *
- * PARAMETERS:  walk_state          - Current state with operands
- *              return_desc         - Where to store the return object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Load an ACPI table from the RSDT/XSDT
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
-		      union acpi_operand_object **return_desc)
-{
-	acpi_status status;
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	struct acpi_namespace_node *parent_node;
-	struct acpi_namespace_node *start_node;
-	struct acpi_namespace_node *parameter_node = NULL;
-	union acpi_operand_object *ddb_handle;
-	struct acpi_table_header *table;
-	u32 table_index;
-
-	ACPI_FUNCTION_TRACE(ex_load_table_op);
-
-	/* Validate lengths for the signature_string, OEMIDString, OEMtable_iD */
-
-	if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
-	    (operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
-	    (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Find the ACPI table in the RSDT/XSDT */
-
-	status = acpi_tb_find_table(operand[0]->string.pointer,
-				    operand[1]->string.pointer,
-				    operand[2]->string.pointer, &table_index);
-	if (ACPI_FAILURE(status)) {
-		if (status != AE_NOT_FOUND) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* Table not found, return an Integer=0 and AE_OK */
-
-		ddb_handle = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!ddb_handle) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		ddb_handle->integer.value = 0;
-		*return_desc = ddb_handle;
-
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Default nodes */
-
-	start_node = walk_state->scope_info->scope.node;
-	parent_node = acpi_gbl_root_node;
-
-	/* root_path (optional parameter) */
-
-	if (operand[3]->string.length > 0) {
-		/*
-		 * Find the node referenced by the root_path_string. This is the
-		 * location within the namespace where the table will be loaded.
-		 */
-		status =
-		    acpi_ns_get_node(start_node, operand[3]->string.pointer,
-				     ACPI_NS_SEARCH_PARENT, &parent_node);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/* parameter_path (optional parameter) */
-
-	if (operand[4]->string.length > 0) {
-		if ((operand[4]->string.pointer[0] != '\\') &&
-		    (operand[4]->string.pointer[0] != '^')) {
-			/*
-			 * Path is not absolute, so it will be relative to the node
-			 * referenced by the root_path_string (or the NS root if omitted)
-			 */
-			start_node = parent_node;
-		}
-
-		/* Find the node referenced by the parameter_path_string */
-
-		status =
-		    acpi_ns_get_node(start_node, operand[4]->string.pointer,
-				     ACPI_NS_SEARCH_PARENT, &parameter_node);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/* Load the table into the namespace */
-
-	status = acpi_ex_add_table(table_index, parent_node, &ddb_handle);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Parameter Data (optional) */
-
-	if (parameter_node) {
-
-		/* Store the parameter data into the optional parameter object */
-
-		status = acpi_ex_store(operand[5],
-				       ACPI_CAST_PTR(union acpi_operand_object,
-						     parameter_node),
-				       walk_state);
-		if (ACPI_FAILURE(status)) {
-			(void)acpi_ex_unload_table(ddb_handle);
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	status = acpi_get_table_by_index(table_index, &table);
-	if (ACPI_SUCCESS(status)) {
-		ACPI_INFO((AE_INFO,
-			   "Dynamic OEM Table Load - [%.4s] OemId [%.6s] OemTableId [%.8s]",
-			   table->signature, table->oem_id,
-			   table->oem_table_id));
-	}
-
-	/* Invoke table handler if present */
-
-	if (acpi_gbl_table_handler) {
-		(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
-					     acpi_gbl_table_handler_context);
-	}
-
-	*return_desc = ddb_handle;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_load_op
- *
- * PARAMETERS:  obj_desc        - Region or Buffer/Field where the table will be
- *                                obtained
- *              Target          - Where a handle to the table will be stored
- *              walk_state      - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Load an ACPI table from a field or operation region
- *
- * NOTE: Region Fields (Field, bank_field, index_fields) are resolved to buffer
- *       objects before this code is reached.
- *
- *       If source is an operation region, it must refer to system_memory, as
- *       per the ACPI specification.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_load_op(union acpi_operand_object *obj_desc,
-		union acpi_operand_object *target,
-		struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object *ddb_handle;
-	struct acpi_table_header *table;
-	struct acpi_table_desc table_desc;
-	u32 table_index;
-	acpi_status status;
-	u32 length;
-
-	ACPI_FUNCTION_TRACE(ex_load_op);
-
-	ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
-
-	/* Source Object can be either an op_region or a Buffer/Field */
-
-	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-	case ACPI_TYPE_REGION:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Load table from Region %p\n", obj_desc));
-
-		/* Region must be system_memory (from ACPI spec) */
-
-		if (obj_desc->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-
-		/*
-		 * If the Region Address and Length have not been previously evaluated,
-		 * evaluate them now and save the results.
-		 */
-		if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
-			status = acpi_ds_get_region_arguments(obj_desc);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		}
-
-		/*
-		 * Map the table header and get the actual table length. The region
-		 * length is not guaranteed to be the same as the table length.
-		 */
-		table = acpi_os_map_memory(obj_desc->region.address,
-					   sizeof(struct acpi_table_header));
-		if (!table) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		length = table->length;
-		acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
-
-		/* Must have at least an ACPI table header */
-
-		if (length < sizeof(struct acpi_table_header)) {
-			return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
-		}
-
-		/*
-		 * The memory region is not guaranteed to remain stable and we must
-		 * copy the table to a local buffer. For example, the memory region
-		 * is corrupted after suspend on some machines. Dynamically loaded
-		 * tables are usually small, so this overhead is minimal.
-		 */
-
-		/* Allocate a buffer for the table */
-
-		table_desc.pointer = ACPI_ALLOCATE(length);
-		if (!table_desc.pointer) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		/* Map the entire table and copy it */
-
-		table = acpi_os_map_memory(obj_desc->region.address, length);
-		if (!table) {
-			ACPI_FREE(table_desc.pointer);
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		ACPI_MEMCPY(table_desc.pointer, table, length);
-		acpi_os_unmap_memory(table, length);
-
-		table_desc.address = obj_desc->region.address;
-		break;
-
-	case ACPI_TYPE_BUFFER:	/* Buffer or resolved region_field */
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Load table from Buffer or Field %p\n",
-				  obj_desc));
-
-		/* Must have at least an ACPI table header */
-
-		if (obj_desc->buffer.length < sizeof(struct acpi_table_header)) {
-			return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
-		}
-
-		/* Get the actual table length from the table header */
-
-		table =
-		    ACPI_CAST_PTR(struct acpi_table_header,
-				  obj_desc->buffer.pointer);
-		length = table->length;
-
-		/* Table cannot extend beyond the buffer */
-
-		if (length > obj_desc->buffer.length) {
-			return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
-		}
-		if (length < sizeof(struct acpi_table_header)) {
-			return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
-		}
-
-		/*
-		 * Copy the table from the buffer because the buffer could be modified
-		 * or even deleted in the future
-		 */
-		table_desc.pointer = ACPI_ALLOCATE(length);
-		if (!table_desc.pointer) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		ACPI_MEMCPY(table_desc.pointer, table, length);
-		table_desc.address = ACPI_TO_INTEGER(table_desc.pointer);
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-	}
-
-	/* Validate table checksum (will not get validated in tb_add_table) */
-
-	status = acpi_tb_verify_checksum(table_desc.pointer, length);
-	if (ACPI_FAILURE(status)) {
-		ACPI_FREE(table_desc.pointer);
-		return_ACPI_STATUS(status);
-	}
-
-	/* Complete the table descriptor */
-
-	table_desc.length = length;
-	table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
-
-	/* Install the new table into the local data structures */
-
-	status = acpi_tb_add_table(&table_desc, &table_index);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/*
-	 * Add the table to the namespace.
-	 *
-	 * Note: Load the table objects relative to the root of the namespace.
-	 * This appears to go against the ACPI specification, but we do it for
-	 * compatibility with other ACPI implementations.
-	 */
-	status =
-	    acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle);
-	if (ACPI_FAILURE(status)) {
-
-		/* On error, table_ptr was deallocated above */
-
-		return_ACPI_STATUS(status);
-	}
-
-	/* Store the ddb_handle into the Target operand */
-
-	status = acpi_ex_store(ddb_handle, target, walk_state);
-	if (ACPI_FAILURE(status)) {
-		(void)acpi_ex_unload_table(ddb_handle);
-
-		/* table_ptr was deallocated above */
-
-		acpi_ut_remove_reference(ddb_handle);
-		return_ACPI_STATUS(status);
-	}
-
-	/* Invoke table handler if present */
-
-	if (acpi_gbl_table_handler) {
-		(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD,
-					     table_desc.pointer,
-					     acpi_gbl_table_handler_context);
-	}
-
-      cleanup:
-	if (ACPI_FAILURE(status)) {
-
-		/* Delete allocated table buffer */
-
-		acpi_tb_delete_table(&table_desc);
-	}
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_unload_table
- *
- * PARAMETERS:  ddb_handle          - Handle to a previously loaded table
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Unload an ACPI table
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *table_desc = ddb_handle;
-	u32 table_index;
-	struct acpi_table_header *table;
-
-	ACPI_FUNCTION_TRACE(ex_unload_table);
-
-	/*
-	 * Validate the handle
-	 * Although the handle is partially validated in acpi_ex_reconfiguration(),
-	 * when it calls acpi_ex_resolve_operands(), the handle is more completely
-	 * validated here.
-	 */
-	if ((!ddb_handle) ||
-	    (ACPI_GET_DESCRIPTOR_TYPE(ddb_handle) != ACPI_DESC_TYPE_OPERAND) ||
-	    (ACPI_GET_OBJECT_TYPE(ddb_handle) != ACPI_TYPE_LOCAL_REFERENCE)) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Get the table index from the ddb_handle */
-
-	table_index = table_desc->reference.value;
-
-	/* Invoke table handler if present */
-
-	if (acpi_gbl_table_handler) {
-		status = acpi_get_table_by_index(table_index, &table);
-		if (ACPI_SUCCESS(status)) {
-			(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
-						     table,
-						     acpi_gbl_table_handler_context);
-		}
-	}
-
-	/*
-	 * Delete the entire namespace under this table Node
-	 * (Offset contains the table_id)
-	 */
-	acpi_tb_delete_namespace_by_owner(table_index);
-	(void)acpi_tb_release_owner_id(table_index);
-
-	acpi_tb_set_table_loaded_flag(table_index, FALSE);
-
-	/* Table unloaded, remove a reference to the ddb_handle object */
-
-	acpi_ut_remove_reference(ddb_handle);
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
deleted file mode 100644
index 1d1f35a..0000000
--- a/drivers/acpi/executer/exconvrt.c
+++ /dev/null
@@ -1,691 +0,0 @@
-/******************************************************************************
- *
- * Module Name: exconvrt - Object conversion routines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exconvrt")
-
-/* Local prototypes */
-static u32
-acpi_ex_convert_to_ascii(acpi_integer integer,
-			 u16 base, u8 * string, u8 max_length);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_convert_to_integer
- *
- * PARAMETERS:  obj_desc        - Object to be converted. Must be an
- *                                Integer, Buffer, or String
- *              result_desc     - Where the new Integer object is returned
- *              Flags           - Used for string conversion
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Convert an ACPI Object to an integer.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
-			   union acpi_operand_object **result_desc, u32 flags)
-{
-	union acpi_operand_object *return_desc;
-	u8 *pointer;
-	acpi_integer result;
-	u32 i;
-	u32 count;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc);
-
-	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-	case ACPI_TYPE_INTEGER:
-
-		/* No conversion necessary */
-
-		*result_desc = obj_desc;
-		return_ACPI_STATUS(AE_OK);
-
-	case ACPI_TYPE_BUFFER:
-	case ACPI_TYPE_STRING:
-
-		/* Note: Takes advantage of common buffer/string fields */
-
-		pointer = obj_desc->buffer.pointer;
-		count = obj_desc->buffer.length;
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_TYPE);
-	}
-
-	/*
-	 * Convert the buffer/string to an integer. Note that both buffers and
-	 * strings are treated as raw data - we don't convert ascii to hex for
-	 * strings.
-	 *
-	 * There are two terminating conditions for the loop:
-	 * 1) The size of an integer has been reached, or
-	 * 2) The end of the buffer or string has been reached
-	 */
-	result = 0;
-
-	/* String conversion is different than Buffer conversion */
-
-	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-	case ACPI_TYPE_STRING:
-
-		/*
-		 * Convert string to an integer - for most cases, the string must be
-		 * hexadecimal as per the ACPI specification. The only exception (as
-		 * of ACPI 3.0) is that the to_integer() operator allows both decimal
-		 * and hexadecimal strings (hex prefixed with "0x").
-		 */
-		status = acpi_ut_strtoul64((char *)pointer, flags, &result);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		/* Check for zero-length buffer */
-
-		if (!count) {
-			return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
-		}
-
-		/* Transfer no more than an integer's worth of data */
-
-		if (count > acpi_gbl_integer_byte_width) {
-			count = acpi_gbl_integer_byte_width;
-		}
-
-		/*
-		 * Convert buffer to an integer - we simply grab enough raw data
-		 * from the buffer to fill an integer
-		 */
-		for (i = 0; i < count; i++) {
-			/*
-			 * Get next byte and shift it into the Result.
-			 * Little endian is used, meaning that the first byte of the buffer
-			 * is the LSB of the integer
-			 */
-			result |= (((acpi_integer) pointer[i]) << (i * 8));
-		}
-		break;
-
-	default:
-
-		/* No other types can get here */
-		break;
-	}
-
-	/* Create a new integer */
-
-	return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-	if (!return_desc) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
-			  ACPI_FORMAT_UINT64(result)));
-
-	/* Save the Result */
-
-	return_desc->integer.value = result;
-	acpi_ex_truncate_for32bit_table(return_desc);
-	*result_desc = return_desc;
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_convert_to_buffer
- *
- * PARAMETERS:  obj_desc        - Object to be converted. Must be an
- *                                Integer, Buffer, or String
- *              result_desc     - Where the new buffer object is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Convert an ACPI Object to a Buffer
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
-			  union acpi_operand_object **result_desc)
-{
-	union acpi_operand_object *return_desc;
-	u8 *new_buf;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_convert_to_buffer, obj_desc);
-
-	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-	case ACPI_TYPE_BUFFER:
-
-		/* No conversion necessary */
-
-		*result_desc = obj_desc;
-		return_ACPI_STATUS(AE_OK);
-
-	case ACPI_TYPE_INTEGER:
-
-		/*
-		 * Create a new Buffer object.
-		 * Need enough space for one integer
-		 */
-		return_desc =
-		    acpi_ut_create_buffer_object(acpi_gbl_integer_byte_width);
-		if (!return_desc) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		/* Copy the integer to the buffer, LSB first */
-
-		new_buf = return_desc->buffer.pointer;
-		ACPI_MEMCPY(new_buf,
-			    &obj_desc->integer.value,
-			    acpi_gbl_integer_byte_width);
-		break;
-
-	case ACPI_TYPE_STRING:
-
-		/*
-		 * Create a new Buffer object
-		 * Size will be the string length
-		 *
-		 * NOTE: Add one to the string length to include the null terminator.
-		 * The ACPI spec is unclear on this subject, but there is existing
-		 * ASL/AML code that depends on the null being transferred to the new
-		 * buffer.
-		 */
-		return_desc = acpi_ut_create_buffer_object((acpi_size)
-							   obj_desc->string.
-							   length + 1);
-		if (!return_desc) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		/* Copy the string to the buffer */
-
-		new_buf = return_desc->buffer.pointer;
-		ACPI_STRNCPY((char *)new_buf, (char *)obj_desc->string.pointer,
-			     obj_desc->string.length);
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_TYPE);
-	}
-
-	/* Mark buffer initialized */
-
-	return_desc->common.flags |= AOPOBJ_DATA_VALID;
-	*result_desc = return_desc;
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_convert_to_ascii
- *
- * PARAMETERS:  Integer         - Value to be converted
- *              Base            - ACPI_STRING_DECIMAL or ACPI_STRING_HEX
- *              String          - Where the string is returned
- *              data_width      - Size of data item to be converted, in bytes
- *
- * RETURN:      Actual string length
- *
- * DESCRIPTION: Convert an ACPI Integer to a hex or decimal string
- *
- ******************************************************************************/
-
-static u32
-acpi_ex_convert_to_ascii(acpi_integer integer,
-			 u16 base, u8 * string, u8 data_width)
-{
-	acpi_integer digit;
-	u32 i;
-	u32 j;
-	u32 k = 0;
-	u32 hex_length;
-	u32 decimal_length;
-	u32 remainder;
-	u8 supress_zeros;
-
-	ACPI_FUNCTION_ENTRY();
-
-	switch (base) {
-	case 10:
-
-		/* Setup max length for the decimal number */
-
-		switch (data_width) {
-		case 1:
-			decimal_length = ACPI_MAX8_DECIMAL_DIGITS;
-			break;
-
-		case 4:
-			decimal_length = ACPI_MAX32_DECIMAL_DIGITS;
-			break;
-
-		case 8:
-		default:
-			decimal_length = ACPI_MAX64_DECIMAL_DIGITS;
-			break;
-		}
-
-		supress_zeros = TRUE;	/* No leading zeros */
-		remainder = 0;
-
-		for (i = decimal_length; i > 0; i--) {
-
-			/* Divide by nth factor of 10 */
-
-			digit = integer;
-			for (j = 0; j < i; j++) {
-				(void)acpi_ut_short_divide(digit, 10, &digit,
-							   &remainder);
-			}
-
-			/* Handle leading zeros */
-
-			if (remainder != 0) {
-				supress_zeros = FALSE;
-			}
-
-			if (!supress_zeros) {
-				string[k] = (u8) (ACPI_ASCII_ZERO + remainder);
-				k++;
-			}
-		}
-		break;
-
-	case 16:
-
-		/* hex_length: 2 ascii hex chars per data byte */
-
-		hex_length = ACPI_MUL_2(data_width);
-		for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) {
-
-			/* Get one hex digit, most significant digits first */
-
-			string[k] =
-			    (u8) acpi_ut_hex_to_ascii_char(integer,
-							   ACPI_MUL_4(j));
-			k++;
-		}
-		break;
-
-	default:
-		return (0);
-	}
-
-	/*
-	 * Since leading zeros are suppressed, we must check for the case where
-	 * the integer equals 0
-	 *
-	 * Finally, null terminate the string and return the length
-	 */
-	if (!k) {
-		string[0] = ACPI_ASCII_ZERO;
-		k = 1;
-	}
-
-	string[k] = 0;
-	return ((u32) k);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_convert_to_string
- *
- * PARAMETERS:  obj_desc        - Object to be converted. Must be an
- *                                Integer, Buffer, or String
- *              result_desc     - Where the string object is returned
- *              Type            - String flags (base and conversion type)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Convert an ACPI Object to a string
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
-			  union acpi_operand_object ** result_desc, u32 type)
-{
-	union acpi_operand_object *return_desc;
-	u8 *new_buf;
-	u32 i;
-	u32 string_length = 0;
-	u16 base = 16;
-	u8 separator = ',';
-
-	ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc);
-
-	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-	case ACPI_TYPE_STRING:
-
-		/* No conversion necessary */
-
-		*result_desc = obj_desc;
-		return_ACPI_STATUS(AE_OK);
-
-	case ACPI_TYPE_INTEGER:
-
-		switch (type) {
-		case ACPI_EXPLICIT_CONVERT_DECIMAL:
-
-			/* Make room for maximum decimal number */
-
-			string_length = ACPI_MAX_DECIMAL_DIGITS;
-			base = 10;
-			break;
-
-		default:
-
-			/* Two hex string characters for each integer byte */
-
-			string_length = ACPI_MUL_2(acpi_gbl_integer_byte_width);
-			break;
-		}
-
-		/*
-		 * Create a new String
-		 * Need enough space for one ASCII integer (plus null terminator)
-		 */
-		return_desc =
-		    acpi_ut_create_string_object((acpi_size) string_length);
-		if (!return_desc) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		new_buf = return_desc->buffer.pointer;
-
-		/* Convert integer to string */
-
-		string_length =
-		    acpi_ex_convert_to_ascii(obj_desc->integer.value, base,
-					     new_buf,
-					     acpi_gbl_integer_byte_width);
-
-		/* Null terminate at the correct place */
-
-		return_desc->string.length = string_length;
-		new_buf[string_length] = 0;
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		/* Setup string length, base, and separator */
-
-		switch (type) {
-		case ACPI_EXPLICIT_CONVERT_DECIMAL:	/* Used by to_decimal_string */
-			/*
-			 * From ACPI: "If Data is a buffer, it is converted to a string of
-			 * decimal values separated by commas."
-			 */
-			base = 10;
-
-			/*
-			 * Calculate the final string length. Individual string values
-			 * are variable length (include separator for each)
-			 */
-			for (i = 0; i < obj_desc->buffer.length; i++) {
-				if (obj_desc->buffer.pointer[i] >= 100) {
-					string_length += 4;
-				} else if (obj_desc->buffer.pointer[i] >= 10) {
-					string_length += 3;
-				} else {
-					string_length += 2;
-				}
-			}
-			break;
-
-		case ACPI_IMPLICIT_CONVERT_HEX:
-			/*
-			 * From the ACPI spec:
-			 *"The entire contents of the buffer are converted to a string of
-			 * two-character hexadecimal numbers, each separated by a space."
-			 */
-			separator = ' ';
-			string_length = (obj_desc->buffer.length * 3);
-			break;
-
-		case ACPI_EXPLICIT_CONVERT_HEX:	/* Used by to_hex_string */
-			/*
-			 * From ACPI: "If Data is a buffer, it is converted to a string of
-			 * hexadecimal values separated by commas."
-			 */
-			string_length = (obj_desc->buffer.length * 3);
-			break;
-
-		default:
-			return_ACPI_STATUS(AE_BAD_PARAMETER);
-		}
-
-		/*
-		 * Create a new string object and string buffer
-		 * (-1 because of extra separator included in string_length from above)
-		 * Allow creation of zero-length strings from zero-length buffers.
-		 */
-		if (string_length) {
-			string_length--;
-		}
-
-		return_desc = acpi_ut_create_string_object((acpi_size)
-							   string_length);
-		if (!return_desc) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		new_buf = return_desc->buffer.pointer;
-
-		/*
-		 * Convert buffer bytes to hex or decimal values
-		 * (separated by commas or spaces)
-		 */
-		for (i = 0; i < obj_desc->buffer.length; i++) {
-			new_buf += acpi_ex_convert_to_ascii((acpi_integer)
-							    obj_desc->buffer.
-							    pointer[i], base,
-							    new_buf, 1);
-			*new_buf++ = separator;	/* each separated by a comma or space */
-		}
-
-		/*
-		 * Null terminate the string
-		 * (overwrites final comma/space from above)
-		 */
-		if (obj_desc->buffer.length) {
-			new_buf--;
-		}
-		*new_buf = 0;
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_TYPE);
-	}
-
-	*result_desc = return_desc;
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_convert_to_target_type
- *
- * PARAMETERS:  destination_type    - Current type of the destination
- *              source_desc         - Source object to be converted.
- *              result_desc         - Where the converted object is returned
- *              walk_state          - Current method state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Implements "implicit conversion" rules for storing an object.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_convert_to_target_type(acpi_object_type destination_type,
-			       union acpi_operand_object *source_desc,
-			       union acpi_operand_object **result_desc,
-			       struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ex_convert_to_target_type);
-
-	/* Default behavior */
-
-	*result_desc = source_desc;
-
-	/*
-	 * If required by the target,
-	 * perform implicit conversion on the source before we store it.
-	 */
-	switch (GET_CURRENT_ARG_TYPE(walk_state->op_info->runtime_args)) {
-	case ARGI_SIMPLE_TARGET:
-	case ARGI_FIXED_TARGET:
-	case ARGI_INTEGER_REF:	/* Handles Increment, Decrement cases */
-
-		switch (destination_type) {
-		case ACPI_TYPE_LOCAL_REGION_FIELD:
-			/*
-			 * Named field can always handle conversions
-			 */
-			break;
-
-		default:
-			/* No conversion allowed for these types */
-
-			if (destination_type !=
-			    ACPI_GET_OBJECT_TYPE(source_desc)) {
-				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-						  "Explicit operator, will store (%s) over existing type (%s)\n",
-						  acpi_ut_get_object_type_name
-						  (source_desc),
-						  acpi_ut_get_type_name
-						  (destination_type)));
-				status = AE_TYPE;
-			}
-		}
-		break;
-
-	case ARGI_TARGETREF:
-
-		switch (destination_type) {
-		case ACPI_TYPE_INTEGER:
-		case ACPI_TYPE_BUFFER_FIELD:
-		case ACPI_TYPE_LOCAL_BANK_FIELD:
-		case ACPI_TYPE_LOCAL_INDEX_FIELD:
-			/*
-			 * These types require an Integer operand. We can convert
-			 * a Buffer or a String to an Integer if necessary.
-			 */
-			status =
-			    acpi_ex_convert_to_integer(source_desc, result_desc,
-						       16);
-			break;
-
-		case ACPI_TYPE_STRING:
-			/*
-			 * The operand must be a String. We can convert an
-			 * Integer or Buffer if necessary
-			 */
-			status =
-			    acpi_ex_convert_to_string(source_desc, result_desc,
-						      ACPI_IMPLICIT_CONVERT_HEX);
-			break;
-
-		case ACPI_TYPE_BUFFER:
-			/*
-			 * The operand must be a Buffer. We can convert an
-			 * Integer or String if necessary
-			 */
-			status =
-			    acpi_ex_convert_to_buffer(source_desc, result_desc);
-			break;
-
-		default:
-			ACPI_ERROR((AE_INFO,
-				    "Bad destination type during conversion: %X",
-				    destination_type));
-			status = AE_AML_INTERNAL;
-			break;
-		}
-		break;
-
-	case ARGI_REFERENCE:
-		/*
-		 * create_xxxx_field cases - we are storing the field object into the name
-		 */
-		break;
-
-	default:
-		ACPI_ERROR((AE_INFO,
-			    "Unknown Target type ID 0x%X AmlOpcode %X DestType %s",
-			    GET_CURRENT_ARG_TYPE(walk_state->op_info->
-						 runtime_args),
-			    walk_state->opcode,
-			    acpi_ut_get_type_name(destination_type)));
-		status = AE_AML_INTERNAL;
-	}
-
-	/*
-	 * Source-to-Target conversion semantics:
-	 *
-	 * If conversion to the target type cannot be performed, then simply
-	 * overwrite the target with the new object and type.
-	 */
-	if (status == AE_TYPE) {
-		status = AE_OK;
-	}
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
deleted file mode 100644
index ad09696..0000000
--- a/drivers/acpi/executer/excreate.c
+++ /dev/null
@@ -1,521 +0,0 @@
-/******************************************************************************
- *
- * Module Name: excreate - Named object creation
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("excreate")
-#ifndef ACPI_NO_METHOD_EXECUTION
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_create_alias
- *
- * PARAMETERS:  walk_state           - Current state, contains operands
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new named alias
- *
- ******************************************************************************/
-acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
-{
-	struct acpi_namespace_node *target_node;
-	struct acpi_namespace_node *alias_node;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ex_create_alias);
-
-	/* Get the source/alias operands (both namespace nodes) */
-
-	alias_node = (struct acpi_namespace_node *)walk_state->operands[0];
-	target_node = (struct acpi_namespace_node *)walk_state->operands[1];
-
-	if ((target_node->type == ACPI_TYPE_LOCAL_ALIAS) ||
-	    (target_node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
-		/*
-		 * Dereference an existing alias so that we don't create a chain
-		 * of aliases.  With this code, we guarantee that an alias is
-		 * always exactly one level of indirection away from the
-		 * actual aliased name.
-		 */
-		target_node =
-		    ACPI_CAST_PTR(struct acpi_namespace_node,
-				  target_node->object);
-	}
-
-	/*
-	 * For objects that can never change (i.e., the NS node will
-	 * permanently point to the same object), we can simply attach
-	 * the object to the new NS node.  For other objects (such as
-	 * Integers, buffers, etc.), we have to point the Alias node
-	 * to the original Node.
-	 */
-	switch (target_node->type) {
-
-		/* For these types, the sub-object can change dynamically via a Store */
-
-	case ACPI_TYPE_INTEGER:
-	case ACPI_TYPE_STRING:
-	case ACPI_TYPE_BUFFER:
-	case ACPI_TYPE_PACKAGE:
-	case ACPI_TYPE_BUFFER_FIELD:
-
-		/*
-		 * These types open a new scope, so we need the NS node in order to access
-		 * any children.
-		 */
-	case ACPI_TYPE_DEVICE:
-	case ACPI_TYPE_POWER:
-	case ACPI_TYPE_PROCESSOR:
-	case ACPI_TYPE_THERMAL:
-	case ACPI_TYPE_LOCAL_SCOPE:
-
-		/*
-		 * The new alias has the type ALIAS and points to the original
-		 * NS node, not the object itself.
-		 */
-		alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
-		alias_node->object =
-		    ACPI_CAST_PTR(union acpi_operand_object, target_node);
-		break;
-
-	case ACPI_TYPE_METHOD:
-
-		/*
-		 * Control method aliases need to be differentiated
-		 */
-		alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS;
-		alias_node->object =
-		    ACPI_CAST_PTR(union acpi_operand_object, target_node);
-		break;
-
-	default:
-
-		/* Attach the original source object to the new Alias Node */
-
-		/*
-		 * The new alias assumes the type of the target, and it points
-		 * to the same object.  The reference count of the object has an
-		 * additional reference to prevent deletion out from under either the
-		 * target node or the alias Node
-		 */
-		status = acpi_ns_attach_object(alias_node,
-					       acpi_ns_get_attached_object
-					       (target_node),
-					       target_node->type);
-		break;
-	}
-
-	/* Since both operands are Nodes, we don't need to delete them */
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_create_event
- *
- * PARAMETERS:  walk_state          - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new event object
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	union acpi_operand_object *obj_desc;
-
-	ACPI_FUNCTION_TRACE(ex_create_event);
-
-	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_EVENT);
-	if (!obj_desc) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	/*
-	 * Create the actual OS semaphore, with zero initial units -- meaning
-	 * that the event is created in an unsignalled state
-	 */
-	status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0,
-					  &obj_desc->event.os_semaphore);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/* Attach object to the Node */
-
-	status =
-	    acpi_ns_attach_object((struct acpi_namespace_node *)walk_state->
-				  operands[0], obj_desc, ACPI_TYPE_EVENT);
-
-      cleanup:
-	/*
-	 * Remove local reference to the object (on error, will cause deletion
-	 * of both object and semaphore if present.)
-	 */
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_create_mutex
- *
- * PARAMETERS:  walk_state          - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new mutex object
- *
- *              Mutex (Name[0], sync_level[1])
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *obj_desc;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_create_mutex, ACPI_WALK_OPERANDS);
-
-	/* Create the new mutex object */
-
-	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
-	if (!obj_desc) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	/* Create the actual OS Mutex */
-
-	status = acpi_os_create_mutex(&obj_desc->mutex.os_mutex);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/* Init object and attach to NS node */
-
-	obj_desc->mutex.sync_level =
-	    (u8) walk_state->operands[1]->integer.value;
-	obj_desc->mutex.node =
-	    (struct acpi_namespace_node *)walk_state->operands[0];
-
-	status =
-	    acpi_ns_attach_object(obj_desc->mutex.node, obj_desc,
-				  ACPI_TYPE_MUTEX);
-
-      cleanup:
-	/*
-	 * Remove local reference to the object (on error, will cause deletion
-	 * of both object and semaphore if present.)
-	 */
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_create_region
- *
- * PARAMETERS:  aml_start           - Pointer to the region declaration AML
- *              aml_length          - Max length of the declaration AML
- *              region_space        - space_iD for the region
- *              walk_state          - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new operation region object
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_create_region(u8 * aml_start,
-		      u32 aml_length,
-		      u8 region_space, struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	union acpi_operand_object *obj_desc;
-	struct acpi_namespace_node *node;
-	union acpi_operand_object *region_obj2;
-
-	ACPI_FUNCTION_TRACE(ex_create_region);
-
-	/* Get the Namespace Node */
-
-	node = walk_state->op->common.node;
-
-	/*
-	 * If the region object is already attached to this node,
-	 * just return
-	 */
-	if (acpi_ns_get_attached_object(node)) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/*
-	 * Space ID must be one of the predefined IDs, or in the user-defined
-	 * range
-	 */
-	if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
-	    (region_space < ACPI_USER_REGION_BEGIN)) {
-		ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X",
-			    region_space));
-		return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (%X)\n",
-			  acpi_ut_get_region_name(region_space), region_space));
-
-	/* Create the region descriptor */
-
-	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_REGION);
-	if (!obj_desc) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	/*
-	 * Remember location in AML stream of address & length
-	 * operands since they need to be evaluated at run time.
-	 */
-	region_obj2 = obj_desc->common.next_object;
-	region_obj2->extra.aml_start = aml_start;
-	region_obj2->extra.aml_length = aml_length;
-
-	/* Init the region from the operands */
-
-	obj_desc->region.space_id = region_space;
-	obj_desc->region.address = 0;
-	obj_desc->region.length = 0;
-	obj_desc->region.node = node;
-
-	/* Install the new region object in the parent Node */
-
-	status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION);
-
-      cleanup:
-
-	/* Remove local reference to the object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_create_processor
- *
- * PARAMETERS:  walk_state          - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new processor object and populate the fields
- *
- *              Processor (Name[0], cpu_iD[1], pblock_addr[2], pblock_length[3])
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_create_processor, walk_state);
-
-	/* Create the processor object */
-
-	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PROCESSOR);
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Initialize the processor object from the operands */
-
-	obj_desc->processor.proc_id = (u8) operand[1]->integer.value;
-	obj_desc->processor.length = (u8) operand[3]->integer.value;
-	obj_desc->processor.address =
-	    (acpi_io_address) operand[2]->integer.value;
-
-	/* Install the processor object in the parent Node */
-
-	status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0],
-				       obj_desc, ACPI_TYPE_PROCESSOR);
-
-	/* Remove local reference to the object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_create_power_resource
- *
- * PARAMETERS:  walk_state          - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new power_resource object and populate the fields
- *
- *              power_resource (Name[0], system_level[1], resource_order[2])
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_create_power_resource(struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	acpi_status status;
-	union acpi_operand_object *obj_desc;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_create_power_resource, walk_state);
-
-	/* Create the power resource object */
-
-	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_POWER);
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Initialize the power object from the operands */
-
-	obj_desc->power_resource.system_level = (u8) operand[1]->integer.value;
-	obj_desc->power_resource.resource_order =
-	    (u16) operand[2]->integer.value;
-
-	/* Install the  power resource object in the parent Node */
-
-	status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0],
-				       obj_desc, ACPI_TYPE_POWER);
-
-	/* Remove local reference to the object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_create_method
- *
- * PARAMETERS:  aml_start       - First byte of the method's AML
- *              aml_length      - AML byte count for this method
- *              walk_state      - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new method object
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_create_method(u8 * aml_start,
-		      u32 aml_length, struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-	u8 method_flags;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_create_method, walk_state);
-
-	/* Create a new method object */
-
-	obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
-	if (!obj_desc) {
-		status = AE_NO_MEMORY;
-		goto exit;
-	}
-
-	/* Save the method's AML pointer and length  */
-
-	obj_desc->method.aml_start = aml_start;
-	obj_desc->method.aml_length = aml_length;
-
-	/*
-	 * Disassemble the method flags. Split off the Arg Count
-	 * for efficiency
-	 */
-	method_flags = (u8) operand[1]->integer.value;
-
-	obj_desc->method.method_flags =
-	    (u8) (method_flags & ~AML_METHOD_ARG_COUNT);
-	obj_desc->method.param_count =
-	    (u8) (method_flags & AML_METHOD_ARG_COUNT);
-
-	/*
-	 * Get the sync_level. If method is serialized, a mutex will be
-	 * created for this method when it is parsed.
-	 */
-	if (method_flags & AML_METHOD_SERIALIZED) {
-		/*
-		 * ACPI 1.0: sync_level = 0
-		 * ACPI 2.0: sync_level = sync_level in method declaration
-		 */
-		obj_desc->method.sync_level = (u8)
-		    ((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4);
-	}
-
-	/* Attach the new object to the method Node */
-
-	status = acpi_ns_attach_object((struct acpi_namespace_node *)operand[0],
-				       obj_desc, ACPI_TYPE_METHOD);
-
-	/* Remove local reference to the object */
-
-	acpi_ut_remove_reference(obj_desc);
-
-      exit:
-	/* Remove a reference to the operand */
-
-	acpi_ut_remove_reference(operand[1]);
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
deleted file mode 100644
index d087a7d..0000000
--- a/drivers/acpi/executer/exdump.c
+++ /dev/null
@@ -1,1059 +0,0 @@
-/******************************************************************************
- *
- * Module Name: exdump - Interpreter debug output routines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exdump")
-
-/*
- * The following routines are used for debug output only
- */
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/* Local prototypes */
-static void acpi_ex_out_string(char *title, char *value);
-
-static void acpi_ex_out_pointer(char *title, void *value);
-
-static void
-acpi_ex_dump_object(union acpi_operand_object *obj_desc,
-		    struct acpi_exdump_info *info);
-
-static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc);
-
-static void
-acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
-			 u32 level, u32 index);
-
-/*******************************************************************************
- *
- * Object Descriptor info tables
- *
- * Note: The first table entry must be an INIT opcode and must contain
- * the table length (number of table entries)
- *
- ******************************************************************************/
-
-static struct acpi_exdump_info acpi_ex_dump_integer[2] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_integer), NULL},
-	{ACPI_EXD_UINT64, ACPI_EXD_OFFSET(integer.value), "Value"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_string[4] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_string), NULL},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(string.length), "Length"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(string.pointer), "Pointer"},
-	{ACPI_EXD_STRING, 0, NULL}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
-	{ACPI_EXD_BUFFER, 0, NULL}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_package[5] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"},
-	{ACPI_EXD_PACKAGE, 0, NULL}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_device[4] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.system_notify),
-	 "System Notify"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.device_notify),
-	 "Device Notify"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_event[2] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_event), NULL},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_method[8] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.thread_count), "Thread Count"},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(method.aml_length), "Aml Length"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.aml_start), "Aml Start"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"},
-	{ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth),
-	 "Acquire Depth"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_region[7] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region), NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.space_id), "Space Id"},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.flags), "Flags"},
-	{ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(region.address), "Address"},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(region.length), "Length"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.handler), "Handler"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.next), "Next"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_power[5] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_power), NULL},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.system_level),
-	 "System Level"},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.resource_order),
-	 "Resource Order"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.system_notify),
-	 "System Notify"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.device_notify),
-	 "Device Notify"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"},
-	{ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify),
-	 "System Notify"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.device_notify),
-	 "Device Notify"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.handler), "Handler"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_thermal[4] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_thermal), NULL},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.system_notify),
-	 "System Notify"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.device_notify),
-	 "Device Notify"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.handler), "Handler"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer_field), NULL},
-	{ACPI_EXD_FIELD, 0, NULL},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer_field.buffer_obj),
-	 "Buffer Object"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_region_field[3] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL},
-	{ACPI_EXD_FIELD, 0, NULL},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_bank_field), NULL},
-	{ACPI_EXD_FIELD, 0, NULL},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(bank_field.value), "Value"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(bank_field.region_obj),
-	 "Region Object"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(bank_field.bank_obj), "Bank Object"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_index_field[5] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_bank_field), NULL},
-	{ACPI_EXD_FIELD, 0, NULL},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(index_field.value), "Value"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.index_obj),
-	 "Index Object"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.data_obj), "Data Object"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_reference), NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.class), "Class"},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.value), "Value"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.node), "Node"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"},
-	{ACPI_EXD_REFERENCE, 0, NULL}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_address_handler),
-	 NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(address_space.space_id), "Space Id"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.next), "Next"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.region_list),
-	 "Region List"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.node), "Node"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_notify[3] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"}
-};
-
-/* Miscellaneous tables */
-
-static struct acpi_exdump_info acpi_ex_dump_common[4] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_common), NULL},
-	{ACPI_EXD_TYPE, 0, NULL},
-	{ACPI_EXD_UINT16, ACPI_EXD_OFFSET(common.reference_count),
-	 "Reference Count"},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_field_common), NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.field_flags),
-	 "Field Flags"},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.access_byte_width),
-	 "Access Byte Width"},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.bit_length),
-	 "Bit Length"},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.start_field_bit_offset),
-	 "Field Bit Offset"},
-	{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.base_byte_offset),
-	 "Base Byte Offset"},
-	{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
-};
-
-static struct acpi_exdump_info acpi_ex_dump_node[5] = {
-	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"},
-	{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
-	{ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(child), "Child List"},
-	{ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(peer), "Next Peer"}
-};
-
-/* Dispatch table, indexed by object type */
-
-static struct acpi_exdump_info *acpi_ex_dump_info[] = {
-	NULL,
-	acpi_ex_dump_integer,
-	acpi_ex_dump_string,
-	acpi_ex_dump_buffer,
-	acpi_ex_dump_package,
-	NULL,
-	acpi_ex_dump_device,
-	acpi_ex_dump_event,
-	acpi_ex_dump_method,
-	acpi_ex_dump_mutex,
-	acpi_ex_dump_region,
-	acpi_ex_dump_power,
-	acpi_ex_dump_processor,
-	acpi_ex_dump_thermal,
-	acpi_ex_dump_buffer_field,
-	NULL,
-	NULL,
-	acpi_ex_dump_region_field,
-	acpi_ex_dump_bank_field,
-	acpi_ex_dump_index_field,
-	acpi_ex_dump_reference,
-	NULL,
-	NULL,
-	acpi_ex_dump_notify,
-	acpi_ex_dump_address_handler,
-	NULL,
-	NULL,
-	NULL
-};
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_dump_object
- *
- * PARAMETERS:  obj_desc            - Descriptor to dump
- *              Info                - Info table corresponding to this object
- *                                    type
- *
- * RETURN:      None
- *
- * DESCRIPTION: Walk the info table for this object
- *
- ******************************************************************************/
-
-static void
-acpi_ex_dump_object(union acpi_operand_object *obj_desc,
-		    struct acpi_exdump_info *info)
-{
-	u8 *target;
-	char *name;
-	u8 count;
-
-	if (!info) {
-		acpi_os_printf
-		    ("ExDumpObject: Display not implemented for object type %s\n",
-		     acpi_ut_get_object_type_name(obj_desc));
-		return;
-	}
-
-	/* First table entry must contain the table length (# of table entries) */
-
-	count = info->offset;
-
-	while (count) {
-		target = ACPI_ADD_PTR(u8, obj_desc, info->offset);
-		name = info->name;
-
-		switch (info->opcode) {
-		case ACPI_EXD_INIT:
-			break;
-
-		case ACPI_EXD_TYPE:
-			acpi_ex_out_string("Type",
-					   acpi_ut_get_object_type_name
-					   (obj_desc));
-			break;
-
-		case ACPI_EXD_UINT8:
-
-			acpi_os_printf("%20s : %2.2X\n", name, *target);
-			break;
-
-		case ACPI_EXD_UINT16:
-
-			acpi_os_printf("%20s : %4.4X\n", name,
-				       ACPI_GET16(target));
-			break;
-
-		case ACPI_EXD_UINT32:
-
-			acpi_os_printf("%20s : %8.8X\n", name,
-				       ACPI_GET32(target));
-			break;
-
-		case ACPI_EXD_UINT64:
-
-			acpi_os_printf("%20s : %8.8X%8.8X\n", "Value",
-				       ACPI_FORMAT_UINT64(ACPI_GET64(target)));
-			break;
-
-		case ACPI_EXD_POINTER:
-		case ACPI_EXD_ADDRESS:
-
-			acpi_ex_out_pointer(name,
-					    *ACPI_CAST_PTR(void *, target));
-			break;
-
-		case ACPI_EXD_STRING:
-
-			acpi_ut_print_string(obj_desc->string.pointer,
-					     ACPI_UINT8_MAX);
-			acpi_os_printf("\n");
-			break;
-
-		case ACPI_EXD_BUFFER:
-
-			ACPI_DUMP_BUFFER(obj_desc->buffer.pointer,
-					 obj_desc->buffer.length);
-			break;
-
-		case ACPI_EXD_PACKAGE:
-
-			/* Dump the package contents */
-
-			acpi_os_printf("\nPackage Contents:\n");
-			acpi_ex_dump_package_obj(obj_desc, 0, 0);
-			break;
-
-		case ACPI_EXD_FIELD:
-
-			acpi_ex_dump_object(obj_desc,
-					    acpi_ex_dump_field_common);
-			break;
-
-		case ACPI_EXD_REFERENCE:
-
-			acpi_ex_out_string("Class Name",
-					   (char *)
-					   acpi_ut_get_reference_name
-					   (obj_desc));
-			acpi_ex_dump_reference_obj(obj_desc);
-			break;
-
-		default:
-			acpi_os_printf("**** Invalid table opcode [%X] ****\n",
-				       info->opcode);
-			return;
-		}
-
-		info++;
-		count--;
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_dump_operand
- *
- * PARAMETERS:  *obj_desc       - Pointer to entry to be dumped
- *              Depth           - Current nesting depth
- *
- * RETURN:      None
- *
- * DESCRIPTION: Dump an operand object
- *
- ******************************************************************************/
-
-void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
-{
-	u32 length;
-	u32 index;
-
-	ACPI_FUNCTION_NAME(ex_dump_operand)
-
-	    if (!((ACPI_LV_EXEC & acpi_dbg_level)
-		  && (_COMPONENT & acpi_dbg_layer))) {
-		return;
-	}
-
-	if (!obj_desc) {
-
-		/* This could be a null element of a package */
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Null Object Descriptor\n"));
-		return;
-	}
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p Namespace Node: ",
-				  obj_desc));
-		ACPI_DUMP_ENTRY(obj_desc, ACPI_LV_EXEC);
-		return;
-	}
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "%p is not a node or operand object: [%s]\n",
-				  obj_desc,
-				  acpi_ut_get_descriptor_name(obj_desc)));
-		ACPI_DUMP_BUFFER(obj_desc, sizeof(union acpi_operand_object));
-		return;
-	}
-
-	/* obj_desc is a valid object */
-
-	if (depth > 0) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%*s[%u] %p ",
-				  depth, " ", depth, obj_desc));
-	} else {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p ", obj_desc));
-	}
-
-	/* Decode object type */
-
-	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-	case ACPI_TYPE_LOCAL_REFERENCE:
-
-		acpi_os_printf("Reference: [%s] ",
-			       acpi_ut_get_reference_name(obj_desc));
-
-		switch (obj_desc->reference.class) {
-		case ACPI_REFCLASS_DEBUG:
-
-			acpi_os_printf("\n");
-			break;
-
-		case ACPI_REFCLASS_INDEX:
-
-			acpi_os_printf("%p\n", obj_desc->reference.object);
-			break;
-
-		case ACPI_REFCLASS_TABLE:
-
-			acpi_os_printf("Table Index %X\n",
-				       obj_desc->reference.value);
-			break;
-
-		case ACPI_REFCLASS_REFOF:
-
-			acpi_os_printf("%p [%s]\n", obj_desc->reference.object,
-				       acpi_ut_get_type_name(((union
-							       acpi_operand_object
-							       *)
-							      obj_desc->
-							      reference.
-							      object)->common.
-							     type));
-			break;
-
-		case ACPI_REFCLASS_ARG:
-
-			acpi_os_printf("%X", obj_desc->reference.value);
-
-			if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
-
-				/* Value is an Integer */
-
-				acpi_os_printf(" value is [%8.8X%8.8x]",
-					       ACPI_FORMAT_UINT64(obj_desc->
-								  integer.
-								  value));
-			}
-
-			acpi_os_printf("\n");
-			break;
-
-		case ACPI_REFCLASS_LOCAL:
-
-			acpi_os_printf("%X", obj_desc->reference.value);
-
-			if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
-
-				/* Value is an Integer */
-
-				acpi_os_printf(" value is [%8.8X%8.8x]",
-					       ACPI_FORMAT_UINT64(obj_desc->
-								  integer.
-								  value));
-			}
-
-			acpi_os_printf("\n");
-			break;
-
-		case ACPI_REFCLASS_NAME:
-
-			acpi_os_printf("- [%4.4s]\n",
-				       obj_desc->reference.node->name.ascii);
-			break;
-
-		default:	/* Unknown reference class */
-
-			acpi_os_printf("%2.2X\n", obj_desc->reference.class);
-			break;
-		}
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		acpi_os_printf("Buffer length %.2X @ %p\n",
-			       obj_desc->buffer.length,
-			       obj_desc->buffer.pointer);
-
-		/* Debug only -- dump the buffer contents */
-
-		if (obj_desc->buffer.pointer) {
-			length = obj_desc->buffer.length;
-			if (length > 128) {
-				length = 128;
-			}
-
-			acpi_os_printf
-			    ("Buffer Contents: (displaying length 0x%.2X)\n",
-			     length);
-			ACPI_DUMP_BUFFER(obj_desc->buffer.pointer, length);
-		}
-		break;
-
-	case ACPI_TYPE_INTEGER:
-
-		acpi_os_printf("Integer %8.8X%8.8X\n",
-			       ACPI_FORMAT_UINT64(obj_desc->integer.value));
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-
-		acpi_os_printf("Package [Len %X] ElementArray %p\n",
-			       obj_desc->package.count,
-			       obj_desc->package.elements);
-
-		/*
-		 * If elements exist, package element pointer is valid,
-		 * and debug_level exceeds 1, dump package's elements.
-		 */
-		if (obj_desc->package.count &&
-		    obj_desc->package.elements && acpi_dbg_level > 1) {
-			for (index = 0; index < obj_desc->package.count;
-			     index++) {
-				acpi_ex_dump_operand(obj_desc->package.
-						     elements[index],
-						     depth + 1);
-			}
-		}
-		break;
-
-	case ACPI_TYPE_REGION:
-
-		acpi_os_printf("Region %s (%X)",
-			       acpi_ut_get_region_name(obj_desc->region.
-						       space_id),
-			       obj_desc->region.space_id);
-
-		/*
-		 * If the address and length have not been evaluated,
-		 * don't print them.
-		 */
-		if (!(obj_desc->region.flags & AOPOBJ_DATA_VALID)) {
-			acpi_os_printf("\n");
-		} else {
-			acpi_os_printf(" base %8.8X%8.8X Length %X\n",
-				       ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
-							       address),
-				       obj_desc->region.length);
-		}
-		break;
-
-	case ACPI_TYPE_STRING:
-
-		acpi_os_printf("String length %X @ %p ",
-			       obj_desc->string.length,
-			       obj_desc->string.pointer);
-
-		acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX);
-		acpi_os_printf("\n");
-		break;
-
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-
-		acpi_os_printf("BankField\n");
-		break;
-
-	case ACPI_TYPE_LOCAL_REGION_FIELD:
-
-		acpi_os_printf
-		    ("RegionField: Bits=%X AccWidth=%X Lock=%X Update=%X at byte=%X bit=%X of below:\n",
-		     obj_desc->field.bit_length,
-		     obj_desc->field.access_byte_width,
-		     obj_desc->field.field_flags & AML_FIELD_LOCK_RULE_MASK,
-		     obj_desc->field.field_flags & AML_FIELD_UPDATE_RULE_MASK,
-		     obj_desc->field.base_byte_offset,
-		     obj_desc->field.start_field_bit_offset);
-
-		acpi_ex_dump_operand(obj_desc->field.region_obj, depth + 1);
-		break;
-
-	case ACPI_TYPE_LOCAL_INDEX_FIELD:
-
-		acpi_os_printf("IndexField\n");
-		break;
-
-	case ACPI_TYPE_BUFFER_FIELD:
-
-		acpi_os_printf("BufferField: %X bits at byte %X bit %X of\n",
-			       obj_desc->buffer_field.bit_length,
-			       obj_desc->buffer_field.base_byte_offset,
-			       obj_desc->buffer_field.start_field_bit_offset);
-
-		if (!obj_desc->buffer_field.buffer_obj) {
-			ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "*NULL*\n"));
-		} else
-		    if (ACPI_GET_OBJECT_TYPE(obj_desc->buffer_field.buffer_obj)
-			!= ACPI_TYPE_BUFFER) {
-			acpi_os_printf("*not a Buffer*\n");
-		} else {
-			acpi_ex_dump_operand(obj_desc->buffer_field.buffer_obj,
-					     depth + 1);
-		}
-		break;
-
-	case ACPI_TYPE_EVENT:
-
-		acpi_os_printf("Event\n");
-		break;
-
-	case ACPI_TYPE_METHOD:
-
-		acpi_os_printf("Method(%X) @ %p:%X\n",
-			       obj_desc->method.param_count,
-			       obj_desc->method.aml_start,
-			       obj_desc->method.aml_length);
-		break;
-
-	case ACPI_TYPE_MUTEX:
-
-		acpi_os_printf("Mutex\n");
-		break;
-
-	case ACPI_TYPE_DEVICE:
-
-		acpi_os_printf("Device\n");
-		break;
-
-	case ACPI_TYPE_POWER:
-
-		acpi_os_printf("Power\n");
-		break;
-
-	case ACPI_TYPE_PROCESSOR:
-
-		acpi_os_printf("Processor\n");
-		break;
-
-	case ACPI_TYPE_THERMAL:
-
-		acpi_os_printf("Thermal\n");
-		break;
-
-	default:
-		/* Unknown Type */
-
-		acpi_os_printf("Unknown Type %X\n",
-			       ACPI_GET_OBJECT_TYPE(obj_desc));
-		break;
-	}
-
-	return;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_dump_operands
- *
- * PARAMETERS:	Operands	    - A list of Operand objects
- *		opcode_name	    - AML opcode name
- *		num_operands	    - Operand count for this opcode
- *
- * DESCRIPTION: Dump the operands associated with the opcode
- *
- ******************************************************************************/
-
-void
-acpi_ex_dump_operands(union acpi_operand_object **operands,
-		      const char *opcode_name, u32 num_operands)
-{
-	ACPI_FUNCTION_NAME(ex_dump_operands);
-
-	if (!opcode_name) {
-		opcode_name = "UNKNOWN";
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-			  "**** Start operand dump for opcode [%s], %d operands\n",
-			  opcode_name, num_operands));
-
-	if (num_operands == 0) {
-		num_operands = 1;
-	}
-
-	/* Dump the individual operands */
-
-	while (num_operands) {
-		acpi_ex_dump_operand(*operands, 0);
-		operands++;
-		num_operands--;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-			  "**** End operand dump for [%s]\n", opcode_name));
-	return;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_out* functions
- *
- * PARAMETERS:  Title               - Descriptive text
- *              Value               - Value to be displayed
- *
- * DESCRIPTION: Object dump output formatting functions.  These functions
- *              reduce the number of format strings required and keeps them
- *              all in one place for easy modification.
- *
- ******************************************************************************/
-
-static void acpi_ex_out_string(char *title, char *value)
-{
-	acpi_os_printf("%20s : %s\n", title, value);
-}
-
-static void acpi_ex_out_pointer(char *title, void *value)
-{
-	acpi_os_printf("%20s : %p\n", title, value);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_dump_namespace_node
- *
- * PARAMETERS:  Node                - Descriptor to dump
- *              Flags               - Force display if TRUE
- *
- * DESCRIPTION: Dumps the members of the given.Node
- *
- ******************************************************************************/
-
-void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
-{
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!flags) {
-		if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
-		      && (_COMPONENT & acpi_dbg_layer))) {
-			return;
-		}
-	}
-
-	acpi_os_printf("%20s : %4.4s\n", "Name", acpi_ut_get_node_name(node));
-	acpi_ex_out_string("Type", acpi_ut_get_type_name(node->type));
-	acpi_ex_out_pointer("Attached Object",
-			    acpi_ns_get_attached_object(node));
-	acpi_ex_out_pointer("Parent", acpi_ns_get_parent_node(node));
-
-	acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node),
-			    acpi_ex_dump_node);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_dump_reference_obj
- *
- * PARAMETERS:  Object              - Descriptor to dump
- *
- * DESCRIPTION: Dumps a reference object
- *
- ******************************************************************************/
-
-static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
-{
-	struct acpi_buffer ret_buf;
-	acpi_status status;
-
-	ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER;
-
-	if (obj_desc->reference.class == ACPI_REFCLASS_NAME) {
-		acpi_os_printf(" %p ", obj_desc->reference.node);
-
-		status =
-		    acpi_ns_handle_to_pathname(obj_desc->reference.node,
-					       &ret_buf);
-		if (ACPI_FAILURE(status)) {
-			acpi_os_printf(" Could not convert name to pathname\n");
-		} else {
-			acpi_os_printf("%s\n", (char *)ret_buf.pointer);
-			ACPI_FREE(ret_buf.pointer);
-		}
-	} else if (obj_desc->reference.object) {
-		if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
-		    ACPI_DESC_TYPE_OPERAND) {
-			acpi_os_printf(" Target: %p",
-				       obj_desc->reference.object);
-			if (obj_desc->reference.class == ACPI_REFCLASS_TABLE) {
-				acpi_os_printf(" Table Index: %X\n",
-					       obj_desc->reference.value);
-			} else {
-				acpi_os_printf(" Target: %p [%s]\n",
-					       obj_desc->reference.object,
-					       acpi_ut_get_type_name(((union
-								       acpi_operand_object
-								       *)
-								      obj_desc->
-								      reference.
-								      object)->
-								     common.
-								     type));
-			}
-		} else {
-			acpi_os_printf(" Target: %p\n",
-				       obj_desc->reference.object);
-		}
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_dump_package_obj
- *
- * PARAMETERS:  obj_desc            - Descriptor to dump
- *              Level               - Indentation Level
- *              Index               - Package index for this object
- *
- * DESCRIPTION: Dumps the elements of the package
- *
- ******************************************************************************/
-
-static void
-acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
-			 u32 level, u32 index)
-{
-	u32 i;
-
-	/* Indentation and index output */
-
-	if (level > 0) {
-		for (i = 0; i < level; i++) {
-			acpi_os_printf(" ");
-		}
-
-		acpi_os_printf("[%.2d] ", index);
-	}
-
-	acpi_os_printf("%p ", obj_desc);
-
-	/* Null package elements are allowed */
-
-	if (!obj_desc) {
-		acpi_os_printf("[Null Object]\n");
-		return;
-	}
-
-	/* Packages may only contain a few object types */
-
-	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-	case ACPI_TYPE_INTEGER:
-
-		acpi_os_printf("[Integer] = %8.8X%8.8X\n",
-			       ACPI_FORMAT_UINT64(obj_desc->integer.value));
-		break;
-
-	case ACPI_TYPE_STRING:
-
-		acpi_os_printf("[String] Value: ");
-		for (i = 0; i < obj_desc->string.length; i++) {
-			acpi_os_printf("%c", obj_desc->string.pointer[i]);
-		}
-		acpi_os_printf("\n");
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		acpi_os_printf("[Buffer] Length %.2X = ",
-			       obj_desc->buffer.length);
-		if (obj_desc->buffer.length) {
-			acpi_ut_dump_buffer(ACPI_CAST_PTR
-					    (u8, obj_desc->buffer.pointer),
-					    obj_desc->buffer.length,
-					    DB_DWORD_DISPLAY, _COMPONENT);
-		} else {
-			acpi_os_printf("\n");
-		}
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-
-		acpi_os_printf("[Package] Contains %d Elements:\n",
-			       obj_desc->package.count);
-
-		for (i = 0; i < obj_desc->package.count; i++) {
-			acpi_ex_dump_package_obj(obj_desc->package.elements[i],
-						 level + 1, i);
-		}
-		break;
-
-	case ACPI_TYPE_LOCAL_REFERENCE:
-
-		acpi_os_printf("[Object Reference] Type [%s] %2.2X",
-			       acpi_ut_get_reference_name(obj_desc),
-			       obj_desc->reference.class);
-		acpi_ex_dump_reference_obj(obj_desc);
-		break;
-
-	default:
-
-		acpi_os_printf("[Unknown Type] %X\n",
-			       ACPI_GET_OBJECT_TYPE(obj_desc));
-		break;
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_dump_object_descriptor
- *
- * PARAMETERS:  obj_desc            - Descriptor to dump
- *              Flags               - Force display if TRUE
- *
- * DESCRIPTION: Dumps the members of the object descriptor given.
- *
- ******************************************************************************/
-
-void
-acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
-{
-	ACPI_FUNCTION_TRACE(ex_dump_object_descriptor);
-
-	if (!obj_desc) {
-		return_VOID;
-	}
-
-	if (!flags) {
-		if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
-		      && (_COMPONENT & acpi_dbg_layer))) {
-			return_VOID;
-		}
-	}
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) {
-		acpi_ex_dump_namespace_node((struct acpi_namespace_node *)
-					    obj_desc, flags);
-
-		acpi_os_printf("\nAttached Object (%p):\n",
-			       ((struct acpi_namespace_node *)obj_desc)->
-			       object);
-
-		acpi_ex_dump_object_descriptor(((struct acpi_namespace_node *)
-						obj_desc)->object, flags);
-		return_VOID;
-	}
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
-		acpi_os_printf
-		    ("ExDumpObjectDescriptor: %p is not an ACPI operand object: [%s]\n",
-		     obj_desc, acpi_ut_get_descriptor_name(obj_desc));
-		return_VOID;
-	}
-
-	if (obj_desc->common.type > ACPI_TYPE_NS_NODE_MAX) {
-		return_VOID;
-	}
-
-	/* Common Fields */
-
-	acpi_ex_dump_object(obj_desc, acpi_ex_dump_common);
-
-	/* Object-specific fields */
-
-	acpi_ex_dump_object(obj_desc, acpi_ex_dump_info[obj_desc->common.type]);
-	return_VOID;
-}
-
-#endif
diff --git a/drivers/acpi/executer/exfield.c b/drivers/acpi/executer/exfield.c
deleted file mode 100644
index 3e440d8..0000000
--- a/drivers/acpi/executer/exfield.c
+++ /dev/null
@@ -1,339 +0,0 @@
-/******************************************************************************
- *
- * Module Name: exfield - ACPI AML (p-code) execution - field manipulation
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exfield")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_read_data_from_field
- *
- * PARAMETERS:  walk_state          - Current execution state
- *              obj_desc            - The named field
- *              ret_buffer_desc     - Where the return data object is stored
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Read from a named field.  Returns either an Integer or a
- *              Buffer, depending on the size of the field.
- *
- ******************************************************************************/
-acpi_status
-acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
-			     union acpi_operand_object *obj_desc,
-			     union acpi_operand_object **ret_buffer_desc)
-{
-	acpi_status status;
-	union acpi_operand_object *buffer_desc;
-	acpi_size length;
-	void *buffer;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
-
-	/* Parameter validation */
-
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_AML_NO_OPERAND);
-	}
-	if (!ret_buffer_desc) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_BUFFER_FIELD) {
-		/*
-		 * If the buffer_field arguments have not been previously evaluated,
-		 * evaluate them now and save the results.
-		 */
-		if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
-			status = acpi_ds_get_buffer_field_arguments(obj_desc);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		}
-	} else
-	    if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REGION_FIELD)
-		&& (obj_desc->field.region_obj->region.space_id ==
-		    ACPI_ADR_SPACE_SMBUS)) {
-		/*
-		 * This is an SMBus read.  We must create a buffer to hold the data
-		 * and directly access the region handler.
-		 */
-		buffer_desc =
-		    acpi_ut_create_buffer_object(ACPI_SMBUS_BUFFER_SIZE);
-		if (!buffer_desc) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		/* Lock entire transaction if requested */
-
-		acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
-
-		/*
-		 * Perform the read.
-		 * Note: Smbus protocol value is passed in upper 16-bits of Function
-		 */
-		status = acpi_ex_access_region(obj_desc, 0,
-					       ACPI_CAST_PTR(acpi_integer,
-							     buffer_desc->
-							     buffer.pointer),
-					       ACPI_READ | (obj_desc->field.
-							    attribute << 16));
-		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
-		goto exit;
-	}
-
-	/*
-	 * Allocate a buffer for the contents of the field.
-	 *
-	 * If the field is larger than the size of an acpi_integer, create
-	 * a BUFFER to hold it.  Otherwise, use an INTEGER.  This allows
-	 * the use of arithmetic operators on the returned value if the
-	 * field size is equal or smaller than an Integer.
-	 *
-	 * Note: Field.length is in bits.
-	 */
-	length =
-	    (acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
-	if (length > acpi_gbl_integer_byte_width) {
-
-		/* Field is too large for an Integer, create a Buffer instead */
-
-		buffer_desc = acpi_ut_create_buffer_object(length);
-		if (!buffer_desc) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-		buffer = buffer_desc->buffer.pointer;
-	} else {
-		/* Field will fit within an Integer (normal case) */
-
-		buffer_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!buffer_desc) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		length = acpi_gbl_integer_byte_width;
-		buffer_desc->integer.value = 0;
-		buffer = &buffer_desc->integer.value;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-			  "FieldRead [TO]:   Obj %p, Type %X, Buf %p, ByteLen %X\n",
-			  obj_desc, ACPI_GET_OBJECT_TYPE(obj_desc), buffer,
-			  (u32) length));
-	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-			  "FieldRead [FROM]: BitLen %X, BitOff %X, ByteOff %X\n",
-			  obj_desc->common_field.bit_length,
-			  obj_desc->common_field.start_field_bit_offset,
-			  obj_desc->common_field.base_byte_offset));
-
-	/* Lock entire transaction if requested */
-
-	acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
-
-	/* Read from the field */
-
-	status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
-	acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
-
-      exit:
-	if (ACPI_FAILURE(status)) {
-		acpi_ut_remove_reference(buffer_desc);
-	} else {
-		*ret_buffer_desc = buffer_desc;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_write_data_to_field
- *
- * PARAMETERS:  source_desc         - Contains data to write
- *              obj_desc            - The named field
- *              result_desc         - Where the return value is returned, if any
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Write to a named field
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
-			    union acpi_operand_object *obj_desc,
-			    union acpi_operand_object **result_desc)
-{
-	acpi_status status;
-	u32 length;
-	void *buffer;
-	union acpi_operand_object *buffer_desc;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
-
-	/* Parameter validation */
-
-	if (!source_desc || !obj_desc) {
-		return_ACPI_STATUS(AE_AML_NO_OPERAND);
-	}
-
-	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_BUFFER_FIELD) {
-		/*
-		 * If the buffer_field arguments have not been previously evaluated,
-		 * evaluate them now and save the results.
-		 */
-		if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
-			status = acpi_ds_get_buffer_field_arguments(obj_desc);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		}
-	} else
-	    if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REGION_FIELD)
-		&& (obj_desc->field.region_obj->region.space_id ==
-		    ACPI_ADR_SPACE_SMBUS)) {
-		/*
-		 * This is an SMBus write.  We will bypass the entire field mechanism
-		 * and handoff the buffer directly to the handler.
-		 *
-		 * Source must be a buffer of sufficient size (ACPI_SMBUS_BUFFER_SIZE).
-		 */
-		if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) {
-			ACPI_ERROR((AE_INFO,
-				    "SMBus write requires Buffer, found type %s",
-				    acpi_ut_get_object_type_name(source_desc)));
-
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-
-		if (source_desc->buffer.length < ACPI_SMBUS_BUFFER_SIZE) {
-			ACPI_ERROR((AE_INFO,
-				    "SMBus write requires Buffer of length %X, found length %X",
-				    ACPI_SMBUS_BUFFER_SIZE,
-				    source_desc->buffer.length));
-
-			return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
-		}
-
-		buffer_desc =
-		    acpi_ut_create_buffer_object(ACPI_SMBUS_BUFFER_SIZE);
-		if (!buffer_desc) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		buffer = buffer_desc->buffer.pointer;
-		ACPI_MEMCPY(buffer, source_desc->buffer.pointer,
-			    ACPI_SMBUS_BUFFER_SIZE);
-
-		/* Lock entire transaction if requested */
-
-		acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
-
-		/*
-		 * Perform the write (returns status and perhaps data in the
-		 * same buffer)
-		 * Note: SMBus protocol type is passed in upper 16-bits of Function.
-		 */
-		status = acpi_ex_access_region(obj_desc, 0,
-					       (acpi_integer *) buffer,
-					       ACPI_WRITE | (obj_desc->field.
-							     attribute << 16));
-		acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
-
-		*result_desc = buffer_desc;
-		return_ACPI_STATUS(status);
-	}
-
-	/* Get a pointer to the data to be written */
-
-	switch (ACPI_GET_OBJECT_TYPE(source_desc)) {
-	case ACPI_TYPE_INTEGER:
-		buffer = &source_desc->integer.value;
-		length = sizeof(source_desc->integer.value);
-		break;
-
-	case ACPI_TYPE_BUFFER:
-		buffer = source_desc->buffer.pointer;
-		length = source_desc->buffer.length;
-		break;
-
-	case ACPI_TYPE_STRING:
-		buffer = source_desc->string.pointer;
-		length = source_desc->string.length;
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-			  "FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n",
-			  source_desc,
-			  acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE
-						(source_desc)),
-			  ACPI_GET_OBJECT_TYPE(source_desc), buffer, length));
-
-	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-			  "FieldWrite [TO]:   Obj %p (%s:%X), BitLen %X, BitOff %X, ByteOff %X\n",
-			  obj_desc,
-			  acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE(obj_desc)),
-			  ACPI_GET_OBJECT_TYPE(obj_desc),
-			  obj_desc->common_field.bit_length,
-			  obj_desc->common_field.start_field_bit_offset,
-			  obj_desc->common_field.base_byte_offset));
-
-	/* Lock entire transaction if requested */
-
-	acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
-
-	/* Write to the field */
-
-	status = acpi_ex_insert_into_field(obj_desc, buffer, length);
-	acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c
deleted file mode 100644
index 9ff9d1f..0000000
--- a/drivers/acpi/executer/exfldio.c
+++ /dev/null
@@ -1,957 +0,0 @@
-/******************************************************************************
- *
- * Module Name: exfldio - Aml Field I/O
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
-#include <acpi/acevents.h>
-#include <acpi/acdispat.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exfldio")
-
-/* Local prototypes */
-static acpi_status
-acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
-		       u32 field_datum_byte_offset,
-		       acpi_integer * value, u32 read_write);
-
-static u8
-acpi_ex_register_overflow(union acpi_operand_object *obj_desc,
-			  acpi_integer value);
-
-static acpi_status
-acpi_ex_setup_region(union acpi_operand_object *obj_desc,
-		     u32 field_datum_byte_offset);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_setup_region
- *
- * PARAMETERS:  obj_desc                - Field to be read or written
- *              field_datum_byte_offset - Byte offset of this datum within the
- *                                        parent field
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Common processing for acpi_ex_extract_from_field and
- *              acpi_ex_insert_into_field. Initialize the Region if necessary and
- *              validate the request.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ex_setup_region(union acpi_operand_object *obj_desc,
-		     u32 field_datum_byte_offset)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *rgn_desc;
-
-	ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
-
-	rgn_desc = obj_desc->common_field.region_obj;
-
-	/* We must have a valid region */
-
-	if (ACPI_GET_OBJECT_TYPE(rgn_desc) != ACPI_TYPE_REGION) {
-		ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)",
-			    ACPI_GET_OBJECT_TYPE(rgn_desc),
-			    acpi_ut_get_object_type_name(rgn_desc)));
-
-		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-	}
-
-	/*
-	 * If the Region Address and Length have not been previously evaluated,
-	 * evaluate them now and save the results.
-	 */
-	if (!(rgn_desc->common.flags & AOPOBJ_DATA_VALID)) {
-		status = acpi_ds_get_region_arguments(rgn_desc);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/* Exit if Address/Length have been disallowed by the host OS */
-
-	if (rgn_desc->common.flags & AOPOBJ_INVALID) {
-		return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
-	}
-
-	/*
-	 * Exit now for SMBus address space, it has a non-linear address space
-	 * and the request cannot be directly validated
-	 */
-	if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS) {
-
-		/* SMBus has a non-linear address space */
-
-		return_ACPI_STATUS(AE_OK);
-	}
-#ifdef ACPI_UNDER_DEVELOPMENT
-	/*
-	 * If the Field access is any_acc, we can now compute the optimal
-	 * access (because we know know the length of the parent region)
-	 */
-	if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-#endif
-
-	/*
-	 * Validate the request.  The entire request from the byte offset for a
-	 * length of one field datum (access width) must fit within the region.
-	 * (Region length is specified in bytes)
-	 */
-	if (rgn_desc->region.length <
-	    (obj_desc->common_field.base_byte_offset +
-	     field_datum_byte_offset +
-	     obj_desc->common_field.access_byte_width)) {
-		if (acpi_gbl_enable_interpreter_slack) {
-			/*
-			 * Slack mode only:  We will go ahead and allow access to this
-			 * field if it is within the region length rounded up to the next
-			 * access width boundary. acpi_size cast for 64-bit compile.
-			 */
-			if (ACPI_ROUND_UP(rgn_desc->region.length,
-					  obj_desc->common_field.
-					  access_byte_width) >=
-			    ((acpi_size) obj_desc->common_field.
-			     base_byte_offset +
-			     obj_desc->common_field.access_byte_width +
-			     field_datum_byte_offset)) {
-				return_ACPI_STATUS(AE_OK);
-			}
-		}
-
-		if (rgn_desc->region.length <
-		    obj_desc->common_field.access_byte_width) {
-			/*
-			 * This is the case where the access_type (acc_word, etc.) is wider
-			 * than the region itself.  For example, a region of length one
-			 * byte, and a field with Dword access specified.
-			 */
-			ACPI_ERROR((AE_INFO,
-				    "Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)",
-				    acpi_ut_get_node_name(obj_desc->
-							  common_field.node),
-				    obj_desc->common_field.access_byte_width,
-				    acpi_ut_get_node_name(rgn_desc->region.
-							  node),
-				    rgn_desc->region.length));
-		}
-
-		/*
-		 * Offset rounded up to next multiple of field width
-		 * exceeds region length, indicate an error
-		 */
-		ACPI_ERROR((AE_INFO,
-			    "Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)",
-			    acpi_ut_get_node_name(obj_desc->common_field.node),
-			    obj_desc->common_field.base_byte_offset,
-			    field_datum_byte_offset,
-			    obj_desc->common_field.access_byte_width,
-			    acpi_ut_get_node_name(rgn_desc->region.node),
-			    rgn_desc->region.length));
-
-		return_ACPI_STATUS(AE_AML_REGION_LIMIT);
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_access_region
- *
- * PARAMETERS:  obj_desc                - Field to be read
- *              field_datum_byte_offset - Byte offset of this datum within the
- *                                        parent field
- *              Value                   - Where to store value (must at least
- *                                        the size of acpi_integer)
- *              Function                - Read or Write flag plus other region-
- *                                        dependent flags
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Read or Write a single field datum to an Operation Region.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_access_region(union acpi_operand_object *obj_desc,
-		      u32 field_datum_byte_offset,
-		      acpi_integer * value, u32 function)
-{
-	acpi_status status;
-	union acpi_operand_object *rgn_desc;
-	acpi_physical_address address;
-
-	ACPI_FUNCTION_TRACE(ex_access_region);
-
-	/*
-	 * Ensure that the region operands are fully evaluated and verify
-	 * the validity of the request
-	 */
-	status = acpi_ex_setup_region(obj_desc, field_datum_byte_offset);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * The physical address of this field datum is:
-	 *
-	 * 1) The base of the region, plus
-	 * 2) The base offset of the field, plus
-	 * 3) The current offset into the field
-	 */
-	rgn_desc = obj_desc->common_field.region_obj;
-	address = rgn_desc->region.address +
-	    obj_desc->common_field.base_byte_offset + field_datum_byte_offset;
-
-	if ((function & ACPI_IO_MASK) == ACPI_READ) {
-		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "[READ]"));
-	} else {
-		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "[WRITE]"));
-	}
-
-	ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
-			      " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
-			      acpi_ut_get_region_name(rgn_desc->region.
-						      space_id),
-			      rgn_desc->region.space_id,
-			      obj_desc->common_field.access_byte_width,
-			      obj_desc->common_field.base_byte_offset,
-			      field_datum_byte_offset, ACPI_CAST_PTR(void,
-								     address)));
-
-	/* Invoke the appropriate address_space/op_region handler */
-
-	status = acpi_ev_address_space_dispatch(rgn_desc, function,
-						address,
-						ACPI_MUL_8(obj_desc->
-							   common_field.
-							   access_byte_width),
-						value);
-
-	if (ACPI_FAILURE(status)) {
-		if (status == AE_NOT_IMPLEMENTED) {
-			ACPI_ERROR((AE_INFO,
-				    "Region %s(%X) not implemented",
-				    acpi_ut_get_region_name(rgn_desc->region.
-							    space_id),
-				    rgn_desc->region.space_id));
-		} else if (status == AE_NOT_EXIST) {
-			ACPI_ERROR((AE_INFO,
-				    "Region %s(%X) has no handler",
-				    acpi_ut_get_region_name(rgn_desc->region.
-							    space_id),
-				    rgn_desc->region.space_id));
-		}
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_register_overflow
- *
- * PARAMETERS:  obj_desc                - Register(Field) to be written
- *              Value                   - Value to be stored
- *
- * RETURN:      TRUE if value overflows the field, FALSE otherwise
- *
- * DESCRIPTION: Check if a value is out of range of the field being written.
- *              Used to check if the values written to Index and Bank registers
- *              are out of range.  Normally, the value is simply truncated
- *              to fit the field, but this case is most likely a serious
- *              coding error in the ASL.
- *
- ******************************************************************************/
-
-static u8
-acpi_ex_register_overflow(union acpi_operand_object *obj_desc,
-			  acpi_integer value)
-{
-
-	if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
-		/*
-		 * The field is large enough to hold the maximum integer, so we can
-		 * never overflow it.
-		 */
-		return (FALSE);
-	}
-
-	if (value >= ((acpi_integer) 1 << obj_desc->common_field.bit_length)) {
-		/*
-		 * The Value is larger than the maximum value that can fit into
-		 * the register.
-		 */
-		return (TRUE);
-	}
-
-	/* The Value will fit into the field with no truncation */
-
-	return (FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_field_datum_io
- *
- * PARAMETERS:  obj_desc                - Field to be read
- *              field_datum_byte_offset - Byte offset of this datum within the
- *                                        parent field
- *              Value                   - Where to store value (must be 64 bits)
- *              read_write              - Read or Write flag
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Read or Write a single datum of a field.  The field_type is
- *              demultiplexed here to handle the different types of fields
- *              (buffer_field, region_field, index_field, bank_field)
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
-		       u32 field_datum_byte_offset,
-		       acpi_integer * value, u32 read_write)
-{
-	acpi_status status;
-	acpi_integer local_value;
-
-	ACPI_FUNCTION_TRACE_U32(ex_field_datum_io, field_datum_byte_offset);
-
-	if (read_write == ACPI_READ) {
-		if (!value) {
-			local_value = 0;
-
-			/* To support reads without saving return value */
-			value = &local_value;
-		}
-
-		/* Clear the entire return buffer first, [Very Important!] */
-
-		*value = 0;
-	}
-
-	/*
-	 * The four types of fields are:
-	 *
-	 * buffer_field - Read/write from/to a Buffer
-	 * region_field - Read/write from/to a Operation Region.
-	 * bank_field  - Write to a Bank Register, then read/write from/to an
-	 *               operation_region
-	 * index_field - Write to an Index Register, then read/write from/to a
-	 *               Data Register
-	 */
-	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-	case ACPI_TYPE_BUFFER_FIELD:
-		/*
-		 * If the buffer_field arguments have not been previously evaluated,
-		 * evaluate them now and save the results.
-		 */
-		if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
-			status = acpi_ds_get_buffer_field_arguments(obj_desc);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		}
-
-		if (read_write == ACPI_READ) {
-			/*
-			 * Copy the data from the source buffer.
-			 * Length is the field width in bytes.
-			 */
-			ACPI_MEMCPY(value,
-				    (obj_desc->buffer_field.buffer_obj)->buffer.
-				    pointer +
-				    obj_desc->buffer_field.base_byte_offset +
-				    field_datum_byte_offset,
-				    obj_desc->common_field.access_byte_width);
-		} else {
-			/*
-			 * Copy the data to the target buffer.
-			 * Length is the field width in bytes.
-			 */
-			ACPI_MEMCPY((obj_desc->buffer_field.buffer_obj)->buffer.
-				    pointer +
-				    obj_desc->buffer_field.base_byte_offset +
-				    field_datum_byte_offset, value,
-				    obj_desc->common_field.access_byte_width);
-		}
-
-		status = AE_OK;
-		break;
-
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-
-		/*
-		 * Ensure that the bank_value is not beyond the capacity of
-		 * the register
-		 */
-		if (acpi_ex_register_overflow(obj_desc->bank_field.bank_obj,
-					      (acpi_integer) obj_desc->
-					      bank_field.value)) {
-			return_ACPI_STATUS(AE_AML_REGISTER_LIMIT);
-		}
-
-		/*
-		 * For bank_fields, we must write the bank_value to the bank_register
-		 * (itself a region_field) before we can access the data.
-		 */
-		status =
-		    acpi_ex_insert_into_field(obj_desc->bank_field.bank_obj,
-					      &obj_desc->bank_field.value,
-					      sizeof(obj_desc->bank_field.
-						     value));
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/*
-		 * Now that the Bank has been selected, fall through to the
-		 * region_field case and write the datum to the Operation Region
-		 */
-
-		/*lint -fallthrough */
-
-	case ACPI_TYPE_LOCAL_REGION_FIELD:
-		/*
-		 * For simple region_fields, we just directly access the owning
-		 * Operation Region.
-		 */
-		status =
-		    acpi_ex_access_region(obj_desc, field_datum_byte_offset,
-					  value, read_write);
-		break;
-
-	case ACPI_TYPE_LOCAL_INDEX_FIELD:
-
-		/*
-		 * Ensure that the index_value is not beyond the capacity of
-		 * the register
-		 */
-		if (acpi_ex_register_overflow(obj_desc->index_field.index_obj,
-					      (acpi_integer) obj_desc->
-					      index_field.value)) {
-			return_ACPI_STATUS(AE_AML_REGISTER_LIMIT);
-		}
-
-		/* Write the index value to the index_register (itself a region_field) */
-
-		field_datum_byte_offset += obj_desc->index_field.value;
-
-		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-				  "Write to Index Register: Value %8.8X\n",
-				  field_datum_byte_offset));
-
-		status =
-		    acpi_ex_insert_into_field(obj_desc->index_field.index_obj,
-					      &field_datum_byte_offset,
-					      sizeof(field_datum_byte_offset));
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-				  "I/O to Data Register: ValuePtr %p\n",
-				  value));
-
-		if (read_write == ACPI_READ) {
-
-			/* Read the datum from the data_register */
-
-			status =
-			    acpi_ex_extract_from_field(obj_desc->index_field.
-						       data_obj, value,
-						       sizeof(acpi_integer));
-		} else {
-			/* Write the datum to the data_register */
-
-			status =
-			    acpi_ex_insert_into_field(obj_desc->index_field.
-						      data_obj, value,
-						      sizeof(acpi_integer));
-		}
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X",
-			    ACPI_GET_OBJECT_TYPE(obj_desc)));
-		status = AE_AML_INTERNAL;
-		break;
-	}
-
-	if (ACPI_SUCCESS(status)) {
-		if (read_write == ACPI_READ) {
-			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-					  "Value Read %8.8X%8.8X, Width %d\n",
-					  ACPI_FORMAT_UINT64(*value),
-					  obj_desc->common_field.
-					  access_byte_width));
-		} else {
-			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-					  "Value Written %8.8X%8.8X, Width %d\n",
-					  ACPI_FORMAT_UINT64(*value),
-					  obj_desc->common_field.
-					  access_byte_width));
-		}
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_write_with_update_rule
- *
- * PARAMETERS:  obj_desc                - Field to be written
- *              Mask                    - bitmask within field datum
- *              field_value             - Value to write
- *              field_datum_byte_offset - Offset of datum within field
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Apply the field update rule to a field write
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
-			       acpi_integer mask,
-			       acpi_integer field_value,
-			       u32 field_datum_byte_offset)
-{
-	acpi_status status = AE_OK;
-	acpi_integer merged_value;
-	acpi_integer current_value;
-
-	ACPI_FUNCTION_TRACE_U32(ex_write_with_update_rule, mask);
-
-	/* Start with the new bits  */
-
-	merged_value = field_value;
-
-	/* If the mask is all ones, we don't need to worry about the update rule */
-
-	if (mask != ACPI_INTEGER_MAX) {
-
-		/* Decode the update rule */
-
-		switch (obj_desc->common_field.
-			field_flags & AML_FIELD_UPDATE_RULE_MASK) {
-		case AML_FIELD_UPDATE_PRESERVE:
-			/*
-			 * Check if update rule needs to be applied (not if mask is all
-			 * ones)  The left shift drops the bits we want to ignore.
-			 */
-			if ((~mask << (ACPI_MUL_8(sizeof(mask)) -
-				       ACPI_MUL_8(obj_desc->common_field.
-						  access_byte_width))) != 0) {
-				/*
-				 * Read the current contents of the byte/word/dword containing
-				 * the field, and merge with the new field value.
-				 */
-				status =
-				    acpi_ex_field_datum_io(obj_desc,
-							   field_datum_byte_offset,
-							   &current_value,
-							   ACPI_READ);
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-
-				merged_value |= (current_value & ~mask);
-			}
-			break;
-
-		case AML_FIELD_UPDATE_WRITE_AS_ONES:
-
-			/* Set positions outside the field to all ones */
-
-			merged_value |= ~mask;
-			break;
-
-		case AML_FIELD_UPDATE_WRITE_AS_ZEROS:
-
-			/* Set positions outside the field to all zeros */
-
-			merged_value &= mask;
-			break;
-
-		default:
-
-			ACPI_ERROR((AE_INFO,
-				    "Unknown UpdateRule value: %X",
-				    (obj_desc->common_field.
-				     field_flags &
-				     AML_FIELD_UPDATE_RULE_MASK)));
-			return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
-		}
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-			  "Mask %8.8X%8.8X, DatumOffset %X, Width %X, Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n",
-			  ACPI_FORMAT_UINT64(mask),
-			  field_datum_byte_offset,
-			  obj_desc->common_field.access_byte_width,
-			  ACPI_FORMAT_UINT64(field_value),
-			  ACPI_FORMAT_UINT64(merged_value)));
-
-	/* Write the merged value */
-
-	status = acpi_ex_field_datum_io(obj_desc, field_datum_byte_offset,
-					&merged_value, ACPI_WRITE);
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_extract_from_field
- *
- * PARAMETERS:  obj_desc            - Field to be read
- *              Buffer              - Where to store the field data
- *              buffer_length       - Length of Buffer
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Retrieve the current value of the given field
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
-			   void *buffer, u32 buffer_length)
-{
-	acpi_status status;
-	acpi_integer raw_datum;
-	acpi_integer merged_datum;
-	u32 field_offset = 0;
-	u32 buffer_offset = 0;
-	u32 buffer_tail_bits;
-	u32 datum_count;
-	u32 field_datum_count;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ex_extract_from_field);
-
-	/* Validate target buffer and clear it */
-
-	if (buffer_length <
-	    ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
-		ACPI_ERROR((AE_INFO,
-			    "Field size %X (bits) is too large for buffer (%X)",
-			    obj_desc->common_field.bit_length, buffer_length));
-
-		return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
-	}
-	ACPI_MEMSET(buffer, 0, buffer_length);
-
-	/* Compute the number of datums (access width data items) */
-
-	datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
-				       obj_desc->common_field.access_bit_width);
-	field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
-					     obj_desc->common_field.
-					     start_field_bit_offset,
-					     obj_desc->common_field.
-					     access_bit_width);
-
-	/* Priming read from the field */
-
-	status =
-	    acpi_ex_field_datum_io(obj_desc, field_offset, &raw_datum,
-				   ACPI_READ);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-	merged_datum =
-	    raw_datum >> obj_desc->common_field.start_field_bit_offset;
-
-	/* Read the rest of the field */
-
-	for (i = 1; i < field_datum_count; i++) {
-
-		/* Get next input datum from the field */
-
-		field_offset += obj_desc->common_field.access_byte_width;
-		status = acpi_ex_field_datum_io(obj_desc, field_offset,
-						&raw_datum, ACPI_READ);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/*
-		 * Merge with previous datum if necessary.
-		 *
-		 * Note: Before the shift, check if the shift value will be larger than
-		 * the integer size. If so, there is no need to perform the operation.
-		 * This avoids the differences in behavior between different compilers
-		 * concerning shift values larger than the target data width.
-		 */
-		if ((obj_desc->common_field.access_bit_width -
-		     obj_desc->common_field.start_field_bit_offset) <
-		    ACPI_INTEGER_BIT_SIZE) {
-			merged_datum |=
-			    raw_datum << (obj_desc->common_field.
-					  access_bit_width -
-					  obj_desc->common_field.
-					  start_field_bit_offset);
-		}
-
-		if (i == datum_count) {
-			break;
-		}
-
-		/* Write merged datum to target buffer */
-
-		ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum,
-			    ACPI_MIN(obj_desc->common_field.access_byte_width,
-				     buffer_length - buffer_offset));
-
-		buffer_offset += obj_desc->common_field.access_byte_width;
-		merged_datum =
-		    raw_datum >> obj_desc->common_field.start_field_bit_offset;
-	}
-
-	/* Mask off any extra bits in the last datum */
-
-	buffer_tail_bits = obj_desc->common_field.bit_length %
-	    obj_desc->common_field.access_bit_width;
-	if (buffer_tail_bits) {
-		merged_datum &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
-	}
-
-	/* Write the last datum to the buffer */
-
-	ACPI_MEMCPY(((char *)buffer) + buffer_offset, &merged_datum,
-		    ACPI_MIN(obj_desc->common_field.access_byte_width,
-			     buffer_length - buffer_offset));
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_insert_into_field
- *
- * PARAMETERS:  obj_desc            - Field to be written
- *              Buffer              - Data to be written
- *              buffer_length       - Length of Buffer
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Store the Buffer contents into the given field
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
-			  void *buffer, u32 buffer_length)
-{
-	acpi_status status;
-	acpi_integer mask;
-	acpi_integer width_mask;
-	acpi_integer merged_datum;
-	acpi_integer raw_datum = 0;
-	u32 field_offset = 0;
-	u32 buffer_offset = 0;
-	u32 buffer_tail_bits;
-	u32 datum_count;
-	u32 field_datum_count;
-	u32 i;
-	u32 required_length;
-	void *new_buffer;
-
-	ACPI_FUNCTION_TRACE(ex_insert_into_field);
-
-	/* Validate input buffer */
-
-	new_buffer = NULL;
-	required_length =
-	    ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
-	/*
-	 * We must have a buffer that is at least as long as the field
-	 * we are writing to.  This is because individual fields are
-	 * indivisible and partial writes are not supported -- as per
-	 * the ACPI specification.
-	 */
-	if (buffer_length < required_length) {
-
-		/* We need to create a new buffer */
-
-		new_buffer = ACPI_ALLOCATE_ZEROED(required_length);
-		if (!new_buffer) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		/*
-		 * Copy the original data to the new buffer, starting
-		 * at Byte zero.  All unused (upper) bytes of the
-		 * buffer will be 0.
-		 */
-		ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length);
-		buffer = new_buffer;
-		buffer_length = required_length;
-	}
-
-	/*
-	 * Create the bitmasks used for bit insertion.
-	 * Note: This if/else is used to bypass compiler differences with the
-	 * shift operator
-	 */
-	if (obj_desc->common_field.access_bit_width == ACPI_INTEGER_BIT_SIZE) {
-		width_mask = ACPI_INTEGER_MAX;
-	} else {
-		width_mask =
-		    ACPI_MASK_BITS_ABOVE(obj_desc->common_field.
-					 access_bit_width);
-	}
-
-	mask = width_mask &
-	    ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset);
-
-	/* Compute the number of datums (access width data items) */
-
-	datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
-				       obj_desc->common_field.access_bit_width);
-
-	field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
-					     obj_desc->common_field.
-					     start_field_bit_offset,
-					     obj_desc->common_field.
-					     access_bit_width);
-
-	/* Get initial Datum from the input buffer */
-
-	ACPI_MEMCPY(&raw_datum, buffer,
-		    ACPI_MIN(obj_desc->common_field.access_byte_width,
-			     buffer_length - buffer_offset));
-
-	merged_datum =
-	    raw_datum << obj_desc->common_field.start_field_bit_offset;
-
-	/* Write the entire field */
-
-	for (i = 1; i < field_datum_count; i++) {
-
-		/* Write merged datum to the target field */
-
-		merged_datum &= mask;
-		status = acpi_ex_write_with_update_rule(obj_desc, mask,
-							merged_datum,
-							field_offset);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-
-		field_offset += obj_desc->common_field.access_byte_width;
-
-		/*
-		 * Start new output datum by merging with previous input datum
-		 * if necessary.
-		 *
-		 * Note: Before the shift, check if the shift value will be larger than
-		 * the integer size. If so, there is no need to perform the operation.
-		 * This avoids the differences in behavior between different compilers
-		 * concerning shift values larger than the target data width.
-		 */
-		if ((obj_desc->common_field.access_bit_width -
-		     obj_desc->common_field.start_field_bit_offset) <
-		    ACPI_INTEGER_BIT_SIZE) {
-			merged_datum =
-			    raw_datum >> (obj_desc->common_field.
-					  access_bit_width -
-					  obj_desc->common_field.
-					  start_field_bit_offset);
-		} else {
-			merged_datum = 0;
-		}
-
-		mask = width_mask;
-
-		if (i == datum_count) {
-			break;
-		}
-
-		/* Get the next input datum from the buffer */
-
-		buffer_offset += obj_desc->common_field.access_byte_width;
-		ACPI_MEMCPY(&raw_datum, ((char *)buffer) + buffer_offset,
-			    ACPI_MIN(obj_desc->common_field.access_byte_width,
-				     buffer_length - buffer_offset));
-		merged_datum |=
-		    raw_datum << obj_desc->common_field.start_field_bit_offset;
-	}
-
-	/* Mask off any extra bits in the last datum */
-
-	buffer_tail_bits = (obj_desc->common_field.bit_length +
-			    obj_desc->common_field.start_field_bit_offset) %
-	    obj_desc->common_field.access_bit_width;
-	if (buffer_tail_bits) {
-		mask &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits);
-	}
-
-	/* Write the last datum to the field */
-
-	merged_datum &= mask;
-	status = acpi_ex_write_with_update_rule(obj_desc,
-						mask, merged_datum,
-						field_offset);
-
-      exit:
-	/* Free temporary buffer if we used one */
-
-	if (new_buffer) {
-		ACPI_FREE(new_buffer);
-	}
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c
deleted file mode 100644
index efb1913..0000000
--- a/drivers/acpi/executer/exmisc.c
+++ /dev/null
@@ -1,725 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
-#include <acpi/amlresrc.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exmisc")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_get_object_reference
- *
- * PARAMETERS:  obj_desc            - Create a reference to this object
- *              return_desc         - Where to store the reference
- *              walk_state          - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Obtain and return a "reference" to the target object
- *              Common code for the ref_of_op and the cond_ref_of_op.
- *
- ******************************************************************************/
-acpi_status
-acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
-			     union acpi_operand_object **return_desc,
-			     struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object *reference_obj;
-	union acpi_operand_object *referenced_obj;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc);
-
-	*return_desc = NULL;
-
-	switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
-	case ACPI_DESC_TYPE_OPERAND:
-
-		if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_LOCAL_REFERENCE) {
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-
-		/*
-		 * Must be a reference to a Local or Arg
-		 */
-		switch (obj_desc->reference.class) {
-		case ACPI_REFCLASS_LOCAL:
-		case ACPI_REFCLASS_ARG:
-		case ACPI_REFCLASS_DEBUG:
-
-			/* The referenced object is the pseudo-node for the local/arg */
-
-			referenced_obj = obj_desc->reference.object;
-			break;
-
-		default:
-
-			ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
-				    obj_desc->reference.class));
-			return_ACPI_STATUS(AE_AML_INTERNAL);
-		}
-		break;
-
-	case ACPI_DESC_TYPE_NAMED:
-
-		/*
-		 * A named reference that has already been resolved to a Node
-		 */
-		referenced_obj = obj_desc;
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Invalid descriptor type %X",
-			    ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
-		return_ACPI_STATUS(AE_TYPE);
-	}
-
-	/* Create a new reference object */
-
-	reference_obj =
-	    acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
-	if (!reference_obj) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	reference_obj->reference.class = ACPI_REFCLASS_REFOF;
-	reference_obj->reference.object = referenced_obj;
-	*return_desc = reference_obj;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-			  "Object %p Type [%s], returning Reference %p\n",
-			  obj_desc, acpi_ut_get_object_type_name(obj_desc),
-			  *return_desc));
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_concat_template
- *
- * PARAMETERS:  Operand0            - First source object
- *              Operand1            - Second source object
- *              actual_return_desc  - Where to place the return object
- *              walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Concatenate two resource templates
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_concat_template(union acpi_operand_object *operand0,
-			union acpi_operand_object *operand1,
-			union acpi_operand_object **actual_return_desc,
-			struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	union acpi_operand_object *return_desc;
-	u8 *new_buf;
-	u8 *end_tag;
-	acpi_size length0;
-	acpi_size length1;
-	acpi_size new_length;
-
-	ACPI_FUNCTION_TRACE(ex_concat_template);
-
-	/*
-	 * Find the end_tag descriptor in each resource template.
-	 * Note1: returned pointers point TO the end_tag, not past it.
-	 * Note2: zero-length buffers are allowed; treated like one end_tag
-	 */
-
-	/* Get the length of the first resource template */
-
-	status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
-
-	/* Get the length of the second resource template */
-
-	status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
-
-	/* Combine both lengths, minimum size will be 2 for end_tag */
-
-	new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
-
-	/* Create a new buffer object for the result (with one end_tag) */
-
-	return_desc = acpi_ut_create_buffer_object(new_length);
-	if (!return_desc) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/*
-	 * Copy the templates to the new buffer, 0 first, then 1 follows. One
-	 * end_tag descriptor is copied from Operand1.
-	 */
-	new_buf = return_desc->buffer.pointer;
-	ACPI_MEMCPY(new_buf, operand0->buffer.pointer, length0);
-	ACPI_MEMCPY(new_buf + length0, operand1->buffer.pointer, length1);
-
-	/* Insert end_tag and set the checksum to zero, means "ignore checksum" */
-
-	new_buf[new_length - 1] = 0;
-	new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
-
-	/* Return the completed resource template */
-
-	*actual_return_desc = return_desc;
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_do_concatenate
- *
- * PARAMETERS:  Operand0            - First source object
- *              Operand1            - Second source object
- *              actual_return_desc  - Where to place the return object
- *              walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Concatenate two objects OF THE SAME TYPE.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_do_concatenate(union acpi_operand_object *operand0,
-		       union acpi_operand_object *operand1,
-		       union acpi_operand_object **actual_return_desc,
-		       struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object *local_operand1 = operand1;
-	union acpi_operand_object *return_desc;
-	char *new_buf;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ex_do_concatenate);
-
-	/*
-	 * Convert the second operand if necessary.  The first operand
-	 * determines the type of the second operand, (See the Data Types
-	 * section of the ACPI specification.)  Both object types are
-	 * guaranteed to be either Integer/String/Buffer by the operand
-	 * resolution mechanism.
-	 */
-	switch (ACPI_GET_OBJECT_TYPE(operand0)) {
-	case ACPI_TYPE_INTEGER:
-		status =
-		    acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
-		break;
-
-	case ACPI_TYPE_STRING:
-		status = acpi_ex_convert_to_string(operand1, &local_operand1,
-						   ACPI_IMPLICIT_CONVERT_HEX);
-		break;
-
-	case ACPI_TYPE_BUFFER:
-		status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
-		break;
-
-	default:
-		ACPI_ERROR((AE_INFO, "Invalid object type: %X",
-			    ACPI_GET_OBJECT_TYPE(operand0)));
-		status = AE_AML_INTERNAL;
-	}
-
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/*
-	 * Both operands are now known to be the same object type
-	 * (Both are Integer, String, or Buffer), and we can now perform the
-	 * concatenation.
-	 */
-
-	/*
-	 * There are three cases to handle:
-	 *
-	 * 1) Two Integers concatenated to produce a new Buffer
-	 * 2) Two Strings concatenated to produce a new String
-	 * 3) Two Buffers concatenated to produce a new Buffer
-	 */
-	switch (ACPI_GET_OBJECT_TYPE(operand0)) {
-	case ACPI_TYPE_INTEGER:
-
-		/* Result of two Integers is a Buffer */
-		/* Need enough buffer space for two integers */
-
-		return_desc = acpi_ut_create_buffer_object((acpi_size)
-							   ACPI_MUL_2
-							   (acpi_gbl_integer_byte_width));
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		new_buf = (char *)return_desc->buffer.pointer;
-
-		/* Copy the first integer, LSB first */
-
-		ACPI_MEMCPY(new_buf, &operand0->integer.value,
-			    acpi_gbl_integer_byte_width);
-
-		/* Copy the second integer (LSB first) after the first */
-
-		ACPI_MEMCPY(new_buf + acpi_gbl_integer_byte_width,
-			    &local_operand1->integer.value,
-			    acpi_gbl_integer_byte_width);
-		break;
-
-	case ACPI_TYPE_STRING:
-
-		/* Result of two Strings is a String */
-
-		return_desc = acpi_ut_create_string_object(((acpi_size)
-							    operand0->string.
-							    length +
-							    local_operand1->
-							    string.length));
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		new_buf = return_desc->string.pointer;
-
-		/* Concatenate the strings */
-
-		ACPI_STRCPY(new_buf, operand0->string.pointer);
-		ACPI_STRCPY(new_buf + operand0->string.length,
-			    local_operand1->string.pointer);
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		/* Result of two Buffers is a Buffer */
-
-		return_desc = acpi_ut_create_buffer_object(((acpi_size)
-							    operand0->buffer.
-							    length +
-							    local_operand1->
-							    buffer.length));
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		new_buf = (char *)return_desc->buffer.pointer;
-
-		/* Concatenate the buffers */
-
-		ACPI_MEMCPY(new_buf, operand0->buffer.pointer,
-			    operand0->buffer.length);
-		ACPI_MEMCPY(new_buf + operand0->buffer.length,
-			    local_operand1->buffer.pointer,
-			    local_operand1->buffer.length);
-		break;
-
-	default:
-
-		/* Invalid object type, should not happen here */
-
-		ACPI_ERROR((AE_INFO, "Invalid object type: %X",
-			    ACPI_GET_OBJECT_TYPE(operand0)));
-		status = AE_AML_INTERNAL;
-		goto cleanup;
-	}
-
-	*actual_return_desc = return_desc;
-
-      cleanup:
-	if (local_operand1 != operand1) {
-		acpi_ut_remove_reference(local_operand1);
-	}
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_do_math_op
- *
- * PARAMETERS:  Opcode              - AML opcode
- *              Integer0            - Integer operand #0
- *              Integer1            - Integer operand #1
- *
- * RETURN:      Integer result of the operation
- *
- * DESCRIPTION: Execute a math AML opcode. The purpose of having all of the
- *              math functions here is to prevent a lot of pointer dereferencing
- *              to obtain the operands.
- *
- ******************************************************************************/
-
-acpi_integer
-acpi_ex_do_math_op(u16 opcode, acpi_integer integer0, acpi_integer integer1)
-{
-
-	ACPI_FUNCTION_ENTRY();
-
-	switch (opcode) {
-	case AML_ADD_OP:	/* Add (Integer0, Integer1, Result) */
-
-		return (integer0 + integer1);
-
-	case AML_BIT_AND_OP:	/* And (Integer0, Integer1, Result) */
-
-		return (integer0 & integer1);
-
-	case AML_BIT_NAND_OP:	/* NAnd (Integer0, Integer1, Result) */
-
-		return (~(integer0 & integer1));
-
-	case AML_BIT_OR_OP:	/* Or (Integer0, Integer1, Result) */
-
-		return (integer0 | integer1);
-
-	case AML_BIT_NOR_OP:	/* NOr (Integer0, Integer1, Result) */
-
-		return (~(integer0 | integer1));
-
-	case AML_BIT_XOR_OP:	/* XOr (Integer0, Integer1, Result) */
-
-		return (integer0 ^ integer1);
-
-	case AML_MULTIPLY_OP:	/* Multiply (Integer0, Integer1, Result) */
-
-		return (integer0 * integer1);
-
-	case AML_SHIFT_LEFT_OP:	/* shift_left (Operand, shift_count, Result) */
-
-		/*
-		 * We need to check if the shiftcount is larger than the integer bit
-		 * width since the behavior of this is not well-defined in the C language.
-		 */
-		if (integer1 >= acpi_gbl_integer_bit_width) {
-			return (0);
-		}
-		return (integer0 << integer1);
-
-	case AML_SHIFT_RIGHT_OP:	/* shift_right (Operand, shift_count, Result) */
-
-		/*
-		 * We need to check if the shiftcount is larger than the integer bit
-		 * width since the behavior of this is not well-defined in the C language.
-		 */
-		if (integer1 >= acpi_gbl_integer_bit_width) {
-			return (0);
-		}
-		return (integer0 >> integer1);
-
-	case AML_SUBTRACT_OP:	/* Subtract (Integer0, Integer1, Result) */
-
-		return (integer0 - integer1);
-
-	default:
-
-		return (0);
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_do_logical_numeric_op
- *
- * PARAMETERS:  Opcode              - AML opcode
- *              Integer0            - Integer operand #0
- *              Integer1            - Integer operand #1
- *              logical_result      - TRUE/FALSE result of the operation
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute a logical "Numeric" AML opcode. For these Numeric
- *              operators (LAnd and LOr), both operands must be integers.
- *
- *              Note: cleanest machine code seems to be produced by the code
- *              below, rather than using statements of the form:
- *                  Result = (Integer0 && Integer1);
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_do_logical_numeric_op(u16 opcode,
-			      acpi_integer integer0,
-			      acpi_integer integer1, u8 * logical_result)
-{
-	acpi_status status = AE_OK;
-	u8 local_result = FALSE;
-
-	ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op);
-
-	switch (opcode) {
-	case AML_LAND_OP:	/* LAnd (Integer0, Integer1) */
-
-		if (integer0 && integer1) {
-			local_result = TRUE;
-		}
-		break;
-
-	case AML_LOR_OP:	/* LOr (Integer0, Integer1) */
-
-		if (integer0 || integer1) {
-			local_result = TRUE;
-		}
-		break;
-
-	default:
-		status = AE_AML_INTERNAL;
-		break;
-	}
-
-	/* Return the logical result and status */
-
-	*logical_result = local_result;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_do_logical_op
- *
- * PARAMETERS:  Opcode              - AML opcode
- *              Operand0            - operand #0
- *              Operand1            - operand #1
- *              logical_result      - TRUE/FALSE result of the operation
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute a logical AML opcode. The purpose of having all of the
- *              functions here is to prevent a lot of pointer dereferencing
- *              to obtain the operands and to simplify the generation of the
- *              logical value. For the Numeric operators (LAnd and LOr), both
- *              operands must be integers. For the other logical operators,
- *              operands can be any combination of Integer/String/Buffer. The
- *              first operand determines the type to which the second operand
- *              will be converted.
- *
- *              Note: cleanest machine code seems to be produced by the code
- *              below, rather than using statements of the form:
- *                  Result = (Operand0 == Operand1);
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_do_logical_op(u16 opcode,
-		      union acpi_operand_object *operand0,
-		      union acpi_operand_object *operand1, u8 * logical_result)
-{
-	union acpi_operand_object *local_operand1 = operand1;
-	acpi_integer integer0;
-	acpi_integer integer1;
-	u32 length0;
-	u32 length1;
-	acpi_status status = AE_OK;
-	u8 local_result = FALSE;
-	int compare;
-
-	ACPI_FUNCTION_TRACE(ex_do_logical_op);
-
-	/*
-	 * Convert the second operand if necessary.  The first operand
-	 * determines the type of the second operand, (See the Data Types
-	 * section of the ACPI 3.0+ specification.)  Both object types are
-	 * guaranteed to be either Integer/String/Buffer by the operand
-	 * resolution mechanism.
-	 */
-	switch (ACPI_GET_OBJECT_TYPE(operand0)) {
-	case ACPI_TYPE_INTEGER:
-		status =
-		    acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
-		break;
-
-	case ACPI_TYPE_STRING:
-		status = acpi_ex_convert_to_string(operand1, &local_operand1,
-						   ACPI_IMPLICIT_CONVERT_HEX);
-		break;
-
-	case ACPI_TYPE_BUFFER:
-		status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
-		break;
-
-	default:
-		status = AE_AML_INTERNAL;
-		break;
-	}
-
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/*
-	 * Two cases: 1) Both Integers, 2) Both Strings or Buffers
-	 */
-	if (ACPI_GET_OBJECT_TYPE(operand0) == ACPI_TYPE_INTEGER) {
-		/*
-		 * 1) Both operands are of type integer
-		 *    Note: local_operand1 may have changed above
-		 */
-		integer0 = operand0->integer.value;
-		integer1 = local_operand1->integer.value;
-
-		switch (opcode) {
-		case AML_LEQUAL_OP:	/* LEqual (Operand0, Operand1) */
-
-			if (integer0 == integer1) {
-				local_result = TRUE;
-			}
-			break;
-
-		case AML_LGREATER_OP:	/* LGreater (Operand0, Operand1) */
-
-			if (integer0 > integer1) {
-				local_result = TRUE;
-			}
-			break;
-
-		case AML_LLESS_OP:	/* LLess (Operand0, Operand1) */
-
-			if (integer0 < integer1) {
-				local_result = TRUE;
-			}
-			break;
-
-		default:
-			status = AE_AML_INTERNAL;
-			break;
-		}
-	} else {
-		/*
-		 * 2) Both operands are Strings or both are Buffers
-		 *    Note: Code below takes advantage of common Buffer/String
-		 *          object fields. local_operand1 may have changed above. Use
-		 *          memcmp to handle nulls in buffers.
-		 */
-		length0 = operand0->buffer.length;
-		length1 = local_operand1->buffer.length;
-
-		/* Lexicographic compare: compare the data bytes */
-
-		compare = ACPI_MEMCMP(operand0->buffer.pointer,
-				      local_operand1->buffer.pointer,
-				      (length0 > length1) ? length1 : length0);
-
-		switch (opcode) {
-		case AML_LEQUAL_OP:	/* LEqual (Operand0, Operand1) */
-
-			/* Length and all bytes must be equal */
-
-			if ((length0 == length1) && (compare == 0)) {
-
-				/* Length and all bytes match ==> TRUE */
-
-				local_result = TRUE;
-			}
-			break;
-
-		case AML_LGREATER_OP:	/* LGreater (Operand0, Operand1) */
-
-			if (compare > 0) {
-				local_result = TRUE;
-				goto cleanup;	/* TRUE */
-			}
-			if (compare < 0) {
-				goto cleanup;	/* FALSE */
-			}
-
-			/* Bytes match (to shortest length), compare lengths */
-
-			if (length0 > length1) {
-				local_result = TRUE;
-			}
-			break;
-
-		case AML_LLESS_OP:	/* LLess (Operand0, Operand1) */
-
-			if (compare > 0) {
-				goto cleanup;	/* FALSE */
-			}
-			if (compare < 0) {
-				local_result = TRUE;
-				goto cleanup;	/* TRUE */
-			}
-
-			/* Bytes match (to shortest length), compare lengths */
-
-			if (length0 < length1) {
-				local_result = TRUE;
-			}
-			break;
-
-		default:
-			status = AE_AML_INTERNAL;
-			break;
-		}
-	}
-
-      cleanup:
-
-	/* New object was created if implicit conversion performed - delete */
-
-	if (local_operand1 != operand1) {
-		acpi_ut_remove_reference(local_operand1);
-	}
-
-	/* Return the logical result and status */
-
-	*logical_result = local_result;
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
deleted file mode 100644
index a8bf3d7..0000000
--- a/drivers/acpi/executer/exmutex.c
+++ /dev/null
@@ -1,473 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exmutex - ASL Mutex Acquire/Release functions
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/acevents.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exmutex")
-
-/* Local prototypes */
-static void
-acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
-		   struct acpi_thread_state *thread);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_unlink_mutex
- *
- * PARAMETERS:  obj_desc            - The mutex to be unlinked
- *
- * RETURN:      None
- *
- * DESCRIPTION: Remove a mutex from the "AcquiredMutex" list
- *
- ******************************************************************************/
-
-void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
-{
-	struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;
-
-	if (!thread) {
-		return;
-	}
-
-	/* Doubly linked list */
-
-	if (obj_desc->mutex.next) {
-		(obj_desc->mutex.next)->mutex.prev = obj_desc->mutex.prev;
-	}
-
-	if (obj_desc->mutex.prev) {
-		(obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
-	} else {
-		thread->acquired_mutex_list = obj_desc->mutex.next;
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_link_mutex
- *
- * PARAMETERS:  obj_desc        - The mutex to be linked
- *              Thread          - Current executing thread object
- *
- * RETURN:      None
- *
- * DESCRIPTION: Add a mutex to the "AcquiredMutex" list for this walk
- *
- ******************************************************************************/
-
-static void
-acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
-		   struct acpi_thread_state *thread)
-{
-	union acpi_operand_object *list_head;
-
-	list_head = thread->acquired_mutex_list;
-
-	/* This object will be the first object in the list */
-
-	obj_desc->mutex.prev = NULL;
-	obj_desc->mutex.next = list_head;
-
-	/* Update old first object to point back to this object */
-
-	if (list_head) {
-		list_head->mutex.prev = obj_desc;
-	}
-
-	/* Update list head */
-
-	thread->acquired_mutex_list = obj_desc;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_acquire_mutex_object
- *
- * PARAMETERS:  time_desc           - Timeout in milliseconds
- *              obj_desc            - Mutex object
- *              Thread              - Current thread state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Acquire an AML mutex, low-level interface. Provides a common
- *              path that supports multiple acquires by the same thread.
- *
- * MUTEX:       Interpreter must be locked
- *
- * NOTE: This interface is called from three places:
- * 1) From acpi_ex_acquire_mutex, via an AML Acquire() operator
- * 2) From acpi_ex_acquire_global_lock when an AML Field access requires the
- *    global lock
- * 3) From the external interface, acpi_acquire_global_lock
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_acquire_mutex_object(u16 timeout,
-			     union acpi_operand_object *obj_desc,
-			     acpi_thread_id thread_id)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex_object, obj_desc);
-
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Support for multiple acquires by the owning thread */
-
-	if (obj_desc->mutex.thread_id == thread_id) {
-		/*
-		 * The mutex is already owned by this thread, just increment the
-		 * acquisition depth
-		 */
-		obj_desc->mutex.acquisition_depth++;
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Acquire the mutex, wait if necessary. Special case for Global Lock */
-
-	if (obj_desc == acpi_gbl_global_lock_mutex) {
-		status = acpi_ev_acquire_global_lock(timeout);
-	} else {
-		status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
-						   timeout);
-	}
-
-	if (ACPI_FAILURE(status)) {
-
-		/* Includes failure from a timeout on time_desc */
-
-		return_ACPI_STATUS(status);
-	}
-
-	/* Acquired the mutex: update mutex object */
-
-	obj_desc->mutex.thread_id = thread_id;
-	obj_desc->mutex.acquisition_depth = 1;
-	obj_desc->mutex.original_sync_level = 0;
-	obj_desc->mutex.owner_thread = NULL;	/* Used only for AML Acquire() */
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_acquire_mutex
- *
- * PARAMETERS:  time_desc           - Timeout integer
- *              obj_desc            - Mutex object
- *              walk_state          - Current method execution state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Acquire an AML mutex
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
-		      union acpi_operand_object *obj_desc,
-		      struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex, obj_desc);
-
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Must have a valid thread ID */
-
-	if (!walk_state->thread) {
-		ACPI_ERROR((AE_INFO,
-			    "Cannot acquire Mutex [%4.4s], null thread info",
-			    acpi_ut_get_node_name(obj_desc->mutex.node)));
-		return_ACPI_STATUS(AE_AML_INTERNAL);
-	}
-
-	/*
-	 * Current sync level must be less than or equal to the sync level of the
-	 * mutex. This mechanism provides some deadlock prevention
-	 */
-	if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
-		ACPI_ERROR((AE_INFO,
-			    "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)",
-			    acpi_ut_get_node_name(obj_desc->mutex.node),
-			    walk_state->thread->current_sync_level));
-		return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
-	}
-
-	status = acpi_ex_acquire_mutex_object((u16) time_desc->integer.value,
-					      obj_desc,
-					      walk_state->thread->thread_id);
-	if (ACPI_SUCCESS(status) && obj_desc->mutex.acquisition_depth == 1) {
-
-		/* Save Thread object, original/current sync levels */
-
-		obj_desc->mutex.owner_thread = walk_state->thread;
-		obj_desc->mutex.original_sync_level =
-		    walk_state->thread->current_sync_level;
-		walk_state->thread->current_sync_level =
-		    obj_desc->mutex.sync_level;
-
-		/* Link the mutex to the current thread for force-unlock at method exit */
-
-		acpi_ex_link_mutex(obj_desc, walk_state->thread);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_release_mutex_object
- *
- * PARAMETERS:  obj_desc            - The object descriptor for this op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Release a previously acquired Mutex, low level interface.
- *              Provides a common path that supports multiple releases (after
- *              previous multiple acquires) by the same thread.
- *
- * MUTEX:       Interpreter must be locked
- *
- * NOTE: This interface is called from three places:
- * 1) From acpi_ex_release_mutex, via an AML Acquire() operator
- * 2) From acpi_ex_release_global_lock when an AML Field access requires the
- *    global lock
- * 3) From the external interface, acpi_release_global_lock
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ex_release_mutex_object);
-
-	if (obj_desc->mutex.acquisition_depth == 0) {
-		return (AE_NOT_ACQUIRED);
-	}
-
-	/* Match multiple Acquires with multiple Releases */
-
-	obj_desc->mutex.acquisition_depth--;
-	if (obj_desc->mutex.acquisition_depth != 0) {
-
-		/* Just decrement the depth and return */
-
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	if (obj_desc->mutex.owner_thread) {
-
-		/* Unlink the mutex from the owner's list */
-
-		acpi_ex_unlink_mutex(obj_desc);
-		obj_desc->mutex.owner_thread = NULL;
-	}
-
-	/* Release the mutex, special case for Global Lock */
-
-	if (obj_desc == acpi_gbl_global_lock_mutex) {
-		status = acpi_ev_release_global_lock();
-	} else {
-		acpi_os_release_mutex(obj_desc->mutex.os_mutex);
-	}
-
-	/* Clear mutex info */
-
-	obj_desc->mutex.thread_id = NULL;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_release_mutex
- *
- * PARAMETERS:  obj_desc            - The object descriptor for this op
- *              walk_state          - Current method execution state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Release a previously acquired Mutex.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
-		      struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ex_release_mutex);
-
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* The mutex must have been previously acquired in order to release it */
-
-	if (!obj_desc->mutex.owner_thread) {
-		ACPI_ERROR((AE_INFO,
-			    "Cannot release Mutex [%4.4s], not acquired",
-			    acpi_ut_get_node_name(obj_desc->mutex.node)));
-		return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
-	}
-
-	/*
-	 * The Mutex is owned, but this thread must be the owner.
-	 * Special case for Global Lock, any thread can release
-	 */
-	if ((obj_desc->mutex.owner_thread->thread_id !=
-	     walk_state->thread->thread_id)
-	    && (obj_desc != acpi_gbl_global_lock_mutex)) {
-		ACPI_ERROR((AE_INFO,
-			    "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
-			    (unsigned long)walk_state->thread->thread_id,
-			    acpi_ut_get_node_name(obj_desc->mutex.node),
-			    (unsigned long)obj_desc->mutex.owner_thread->
-			    thread_id));
-		return_ACPI_STATUS(AE_AML_NOT_OWNER);
-	}
-
-	/* Must have a valid thread ID */
-
-	if (!walk_state->thread) {
-		ACPI_ERROR((AE_INFO,
-			    "Cannot release Mutex [%4.4s], null thread info",
-			    acpi_ut_get_node_name(obj_desc->mutex.node)));
-		return_ACPI_STATUS(AE_AML_INTERNAL);
-	}
-
-	/*
-	 * The sync level of the mutex must be less than or equal to the current
-	 * sync level
-	 */
-	if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
-		ACPI_ERROR((AE_INFO,
-			    "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
-			    acpi_ut_get_node_name(obj_desc->mutex.node),
-			    obj_desc->mutex.sync_level,
-			    walk_state->thread->current_sync_level));
-		return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
-	}
-
-	status = acpi_ex_release_mutex_object(obj_desc);
-
-	if (obj_desc->mutex.acquisition_depth == 0) {
-
-		/* Restore the original sync_level */
-
-		walk_state->thread->current_sync_level =
-		    obj_desc->mutex.original_sync_level;
-	}
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_release_all_mutexes
- *
- * PARAMETERS:  Thread          - Current executing thread object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Release all mutexes held by this thread
- *
- * NOTE: This function is called as the thread is exiting the interpreter.
- * Mutexes are not released when an individual control method is exited, but
- * only when the parent thread actually exits the interpreter. This allows one
- * method to acquire a mutex, and a different method to release it, as long as
- * this is performed underneath a single parent control method.
- *
- ******************************************************************************/
-
-void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
-{
-	union acpi_operand_object *next = thread->acquired_mutex_list;
-	union acpi_operand_object *obj_desc;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Traverse the list of owned mutexes, releasing each one */
-
-	while (next) {
-		obj_desc = next;
-		next = obj_desc->mutex.next;
-
-		obj_desc->mutex.prev = NULL;
-		obj_desc->mutex.next = NULL;
-		obj_desc->mutex.acquisition_depth = 0;
-
-		/* Release the mutex, special case for Global Lock */
-
-		if (obj_desc == acpi_gbl_global_lock_mutex) {
-
-			/* Ignore errors */
-
-			(void)acpi_ev_release_global_lock();
-		} else {
-			acpi_os_release_mutex(obj_desc->mutex.os_mutex);
-		}
-
-		/* Mark mutex unowned */
-
-		obj_desc->mutex.owner_thread = NULL;
-		obj_desc->mutex.thread_id = NULL;
-
-		/* Update Thread sync_level (Last mutex is the important one) */
-
-		thread->current_sync_level =
-		    obj_desc->mutex.original_sync_level;
-	}
-}
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/executer/exnames.c
deleted file mode 100644
index 817e67b..0000000
--- a/drivers/acpi/executer/exnames.c
+++ /dev/null
@@ -1,435 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exnames - interpreter/scanner name load/execute
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exnames")
-
-/* Local prototypes */
-static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs);
-
-static acpi_status
-acpi_ex_name_segment(u8 ** in_aml_address, char *name_string);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_allocate_name_string
- *
- * PARAMETERS:  prefix_count        - Count of parent levels. Special cases:
- *                                    (-1)==root,  0==none
- *              num_name_segs       - count of 4-character name segments
- *
- * RETURN:      A pointer to the allocated string segment.  This segment must
- *              be deleted by the caller.
- *
- * DESCRIPTION: Allocate a buffer for a name string. Ensure allocated name
- *              string is long enough, and set up prefix if any.
- *
- ******************************************************************************/
-
-static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
-{
-	char *temp_ptr;
-	char *name_string;
-	u32 size_needed;
-
-	ACPI_FUNCTION_TRACE(ex_allocate_name_string);
-
-	/*
-	 * Allow room for all \ and ^ prefixes, all segments and a multi_name_prefix.
-	 * Also, one byte for the null terminator.
-	 * This may actually be somewhat longer than needed.
-	 */
-	if (prefix_count == ACPI_UINT32_MAX) {
-
-		/* Special case for root */
-
-		size_needed = 1 + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
-	} else {
-		size_needed =
-		    prefix_count + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
-	}
-
-	/*
-	 * Allocate a buffer for the name.
-	 * This buffer must be deleted by the caller!
-	 */
-	name_string = ACPI_ALLOCATE(size_needed);
-	if (!name_string) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not allocate size %d", size_needed));
-		return_PTR(NULL);
-	}
-
-	temp_ptr = name_string;
-
-	/* Set up Root or Parent prefixes if needed */
-
-	if (prefix_count == ACPI_UINT32_MAX) {
-		*temp_ptr++ = AML_ROOT_PREFIX;
-	} else {
-		while (prefix_count--) {
-			*temp_ptr++ = AML_PARENT_PREFIX;
-		}
-	}
-
-	/* Set up Dual or Multi prefixes if needed */
-
-	if (num_name_segs > 2) {
-
-		/* Set up multi prefixes   */
-
-		*temp_ptr++ = AML_MULTI_NAME_PREFIX_OP;
-		*temp_ptr++ = (char)num_name_segs;
-	} else if (2 == num_name_segs) {
-
-		/* Set up dual prefixes */
-
-		*temp_ptr++ = AML_DUAL_NAME_PREFIX;
-	}
-
-	/*
-	 * Terminate string following prefixes. acpi_ex_name_segment() will
-	 * append the segment(s)
-	 */
-	*temp_ptr = 0;
-
-	return_PTR(name_string);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_name_segment
- *
- * PARAMETERS:  in_aml_address  - Pointer to the name in the AML code
- *              name_string     - Where to return the name. The name is appended
- *                                to any existing string to form a namepath
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Extract an ACPI name (4 bytes) from the AML byte stream
- *
- ******************************************************************************/
-
-static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
-{
-	char *aml_address = (void *)*in_aml_address;
-	acpi_status status = AE_OK;
-	u32 index;
-	char char_buf[5];
-
-	ACPI_FUNCTION_TRACE(ex_name_segment);
-
-	/*
-	 * If first character is a digit, then we know that we aren't looking at a
-	 * valid name segment
-	 */
-	char_buf[0] = *aml_address;
-
-	if ('0' <= char_buf[0] && char_buf[0] <= '9') {
-		ACPI_ERROR((AE_INFO, "Invalid leading digit: %c", char_buf[0]));
-		return_ACPI_STATUS(AE_CTRL_PENDING);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n"));
-
-	for (index = 0; (index < ACPI_NAME_SIZE)
-	     && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
-		char_buf[index] = *aml_address++;
-		ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
-	}
-
-	/* Valid name segment  */
-
-	if (index == 4) {
-
-		/* Found 4 valid characters */
-
-		char_buf[4] = '\0';
-
-		if (name_string) {
-			ACPI_STRCAT(name_string, char_buf);
-			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "Appended to - %s\n", name_string));
-		} else {
-			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "No Name string - %s\n", char_buf));
-		}
-	} else if (index == 0) {
-		/*
-		 * First character was not a valid name character,
-		 * so we are looking at something other than a name.
-		 */
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Leading character is not alpha: %02Xh (not a name)\n",
-				  char_buf[0]));
-		status = AE_CTRL_PENDING;
-	} else {
-		/*
-		 * Segment started with one or more valid characters, but fewer than
-		 * the required 4
-		 */
-		status = AE_AML_BAD_NAME;
-		ACPI_ERROR((AE_INFO,
-			    "Bad character %02x in name, at %p",
-			    *aml_address, aml_address));
-	}
-
-	*in_aml_address = ACPI_CAST_PTR(u8, aml_address);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_get_name_string
- *
- * PARAMETERS:  data_type           - Object type to be associated with this
- *                                    name
- *              in_aml_address      - Pointer to the namestring in the AML code
- *              out_name_string     - Where the namestring is returned
- *              out_name_length     - Length of the returned string
- *
- * RETURN:      Status, namestring and length
- *
- * DESCRIPTION: Extract a full namepath from the AML byte stream,
- *              including any prefixes.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_get_name_string(acpi_object_type data_type,
-			u8 * in_aml_address,
-			char **out_name_string, u32 * out_name_length)
-{
-	acpi_status status = AE_OK;
-	u8 *aml_address = in_aml_address;
-	char *name_string = NULL;
-	u32 num_segments;
-	u32 prefix_count = 0;
-	u8 has_prefix = FALSE;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_get_name_string, aml_address);
-
-	if (ACPI_TYPE_LOCAL_REGION_FIELD == data_type ||
-	    ACPI_TYPE_LOCAL_BANK_FIELD == data_type ||
-	    ACPI_TYPE_LOCAL_INDEX_FIELD == data_type) {
-
-		/* Disallow prefixes for types associated with field_unit names */
-
-		name_string = acpi_ex_allocate_name_string(0, 1);
-		if (!name_string) {
-			status = AE_NO_MEMORY;
-		} else {
-			status =
-			    acpi_ex_name_segment(&aml_address, name_string);
-		}
-	} else {
-		/*
-		 * data_type is not a field name.
-		 * Examine first character of name for root or parent prefix operators
-		 */
-		switch (*aml_address) {
-		case AML_ROOT_PREFIX:
-
-			ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
-					  "RootPrefix(\\) at %p\n",
-					  aml_address));
-
-			/*
-			 * Remember that we have a root_prefix --
-			 * see comment in acpi_ex_allocate_name_string()
-			 */
-			aml_address++;
-			prefix_count = ACPI_UINT32_MAX;
-			has_prefix = TRUE;
-			break;
-
-		case AML_PARENT_PREFIX:
-
-			/* Increment past possibly multiple parent prefixes */
-
-			do {
-				ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
-						  "ParentPrefix (^) at %p\n",
-						  aml_address));
-
-				aml_address++;
-				prefix_count++;
-
-			} while (*aml_address == AML_PARENT_PREFIX);
-
-			has_prefix = TRUE;
-			break;
-
-		default:
-
-			/* Not a prefix character */
-
-			break;
-		}
-
-		/* Examine first character of name for name segment prefix operator */
-
-		switch (*aml_address) {
-		case AML_DUAL_NAME_PREFIX:
-
-			ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
-					  "DualNamePrefix at %p\n",
-					  aml_address));
-
-			aml_address++;
-			name_string =
-			    acpi_ex_allocate_name_string(prefix_count, 2);
-			if (!name_string) {
-				status = AE_NO_MEMORY;
-				break;
-			}
-
-			/* Indicate that we processed a prefix */
-
-			has_prefix = TRUE;
-
-			status =
-			    acpi_ex_name_segment(&aml_address, name_string);
-			if (ACPI_SUCCESS(status)) {
-				status =
-				    acpi_ex_name_segment(&aml_address,
-							 name_string);
-			}
-			break;
-
-		case AML_MULTI_NAME_PREFIX_OP:
-
-			ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
-					  "MultiNamePrefix at %p\n",
-					  aml_address));
-
-			/* Fetch count of segments remaining in name path */
-
-			aml_address++;
-			num_segments = *aml_address;
-
-			name_string =
-			    acpi_ex_allocate_name_string(prefix_count,
-							 num_segments);
-			if (!name_string) {
-				status = AE_NO_MEMORY;
-				break;
-			}
-
-			/* Indicate that we processed a prefix */
-
-			aml_address++;
-			has_prefix = TRUE;
-
-			while (num_segments &&
-			       (status =
-				acpi_ex_name_segment(&aml_address,
-						     name_string)) == AE_OK) {
-				num_segments--;
-			}
-
-			break;
-
-		case 0:
-
-			/* null_name valid as of 8-12-98 ASL/AML Grammar Update */
-
-			if (prefix_count == ACPI_UINT32_MAX) {
-				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-						  "NameSeg is \"\\\" followed by NULL\n"));
-			}
-
-			/* Consume the NULL byte */
-
-			aml_address++;
-			name_string =
-			    acpi_ex_allocate_name_string(prefix_count, 0);
-			if (!name_string) {
-				status = AE_NO_MEMORY;
-				break;
-			}
-
-			break;
-
-		default:
-
-			/* Name segment string */
-
-			name_string =
-			    acpi_ex_allocate_name_string(prefix_count, 1);
-			if (!name_string) {
-				status = AE_NO_MEMORY;
-				break;
-			}
-
-			status =
-			    acpi_ex_name_segment(&aml_address, name_string);
-			break;
-		}
-	}
-
-	if (AE_CTRL_PENDING == status && has_prefix) {
-
-		/* Ran out of segments after processing a prefix */
-
-		ACPI_ERROR((AE_INFO, "Malformed Name at %p", name_string));
-		status = AE_AML_BAD_NAME;
-	}
-
-	if (ACPI_FAILURE(status)) {
-		if (name_string) {
-			ACPI_FREE(name_string);
-		}
-		return_ACPI_STATUS(status);
-	}
-
-	*out_name_string = name_string;
-	*out_name_length = (u32) (aml_address - in_aml_address);
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/executer/exoparg1.c
deleted file mode 100644
index f622f9e..0000000
--- a/drivers/acpi/executer/exoparg1.c
+++ /dev/null
@@ -1,1049 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exoparg1 - AML execution - opcodes with 1 argument
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exoparg1")
-
-/*!
- * Naming convention for AML interpreter execution routines.
- *
- * The routines that begin execution of AML opcodes are named with a common
- * convention based upon the number of arguments, the number of target operands,
- * and whether or not a value is returned:
- *
- *      AcpiExOpcode_xA_yT_zR
- *
- * Where:
- *
- * xA - ARGUMENTS:    The number of arguments (input operands) that are
- *                    required for this opcode type (0 through 6 args).
- * yT - TARGETS:      The number of targets (output operands) that are required
- *                    for this opcode type (0, 1, or 2 targets).
- * zR - RETURN VALUE: Indicates whether this opcode type returns a value
- *                    as the function return (0 or 1).
- *
- * The AcpiExOpcode* functions are called via the Dispatcher component with
- * fully resolved operands.
-!*/
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_0A_0T_1R
- *
- * PARAMETERS:  walk_state          - Current state (contains AML opcode)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute operator with no operands, one return value
- *
- ******************************************************************************/
-acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *return_desc = NULL;
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_0A_0T_1R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	/* Examine the AML opcode */
-
-	switch (walk_state->opcode) {
-	case AML_TIMER_OP:	/*  Timer () */
-
-		/* Create a return object of type Integer */
-
-		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-		return_desc->integer.value = acpi_os_get_timer();
-		break;
-
-	default:		/*  Unknown opcode  */
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-		break;
-	}
-
-      cleanup:
-
-	/* Delete return object on error */
-
-	if ((ACPI_FAILURE(status)) || walk_state->result_obj) {
-		acpi_ut_remove_reference(return_desc);
-		walk_state->result_obj = NULL;
-	} else {
-		/* Save the return value */
-
-		walk_state->result_obj = return_desc;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_1A_0T_0R
- *
- * PARAMETERS:  walk_state          - Current state (contains AML opcode)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute Type 1 monadic operator with numeric operand on
- *              object stack
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_0R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	/* Examine the AML opcode */
-
-	switch (walk_state->opcode) {
-	case AML_RELEASE_OP:	/*  Release (mutex_object) */
-
-		status = acpi_ex_release_mutex(operand[0], walk_state);
-		break;
-
-	case AML_RESET_OP:	/*  Reset (event_object) */
-
-		status = acpi_ex_system_reset_event(operand[0]);
-		break;
-
-	case AML_SIGNAL_OP:	/*  Signal (event_object) */
-
-		status = acpi_ex_system_signal_event(operand[0]);
-		break;
-
-	case AML_SLEEP_OP:	/*  Sleep (msec_time) */
-
-		status = acpi_ex_system_do_suspend(operand[0]->integer.value);
-		break;
-
-	case AML_STALL_OP:	/*  Stall (usec_time) */
-
-		status =
-		    acpi_ex_system_do_stall((u32) operand[0]->integer.value);
-		break;
-
-	case AML_UNLOAD_OP:	/*  Unload (Handle) */
-
-		status = acpi_ex_unload_table(operand[0]);
-		break;
-
-	default:		/*  Unknown opcode  */
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-		break;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_1A_1T_0R
- *
- * PARAMETERS:  walk_state          - Current state (contains AML opcode)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute opcode with one argument, one target, and no
- *              return value.
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object **operand = &walk_state->operands[0];
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_0R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	/* Examine the AML opcode */
-
-	switch (walk_state->opcode) {
-	case AML_LOAD_OP:
-
-		status = acpi_ex_load_op(operand[0], operand[1], walk_state);
-		break;
-
-	default:		/* Unknown opcode */
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-		goto cleanup;
-	}
-
-      cleanup:
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_1A_1T_1R
- *
- * PARAMETERS:  walk_state          - Current state (contains AML opcode)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute opcode with one argument, one target, and a
- *              return value.
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	union acpi_operand_object *return_desc = NULL;
-	union acpi_operand_object *return_desc2 = NULL;
-	u32 temp32;
-	u32 i;
-	acpi_integer power_of_ten;
-	acpi_integer digit;
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_1R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	/* Examine the AML opcode */
-
-	switch (walk_state->opcode) {
-	case AML_BIT_NOT_OP:
-	case AML_FIND_SET_LEFT_BIT_OP:
-	case AML_FIND_SET_RIGHT_BIT_OP:
-	case AML_FROM_BCD_OP:
-	case AML_TO_BCD_OP:
-	case AML_COND_REF_OF_OP:
-
-		/* Create a return object of type Integer for these opcodes */
-
-		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		switch (walk_state->opcode) {
-		case AML_BIT_NOT_OP:	/* Not (Operand, Result)  */
-
-			return_desc->integer.value = ~operand[0]->integer.value;
-			break;
-
-		case AML_FIND_SET_LEFT_BIT_OP:	/* find_set_left_bit (Operand, Result) */
-
-			return_desc->integer.value = operand[0]->integer.value;
-
-			/*
-			 * Acpi specification describes Integer type as a little
-			 * endian unsigned value, so this boundary condition is valid.
-			 */
-			for (temp32 = 0; return_desc->integer.value &&
-			     temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) {
-				return_desc->integer.value >>= 1;
-			}
-
-			return_desc->integer.value = temp32;
-			break;
-
-		case AML_FIND_SET_RIGHT_BIT_OP:	/* find_set_right_bit (Operand, Result) */
-
-			return_desc->integer.value = operand[0]->integer.value;
-
-			/*
-			 * The Acpi specification describes Integer type as a little
-			 * endian unsigned value, so this boundary condition is valid.
-			 */
-			for (temp32 = 0; return_desc->integer.value &&
-			     temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) {
-				return_desc->integer.value <<= 1;
-			}
-
-			/* Since the bit position is one-based, subtract from 33 (65) */
-
-			return_desc->integer.value =
-			    temp32 ==
-			    0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
-			break;
-
-		case AML_FROM_BCD_OP:	/* from_bcd (BCDValue, Result) */
-
-			/*
-			 * The 64-bit ACPI integer can hold 16 4-bit BCD characters
-			 * (if table is 32-bit, integer can hold 8 BCD characters)
-			 * Convert each 4-bit BCD value
-			 */
-			power_of_ten = 1;
-			return_desc->integer.value = 0;
-			digit = operand[0]->integer.value;
-
-			/* Convert each BCD digit (each is one nybble wide) */
-
-			for (i = 0;
-			     (i < acpi_gbl_integer_nybble_width) && (digit > 0);
-			     i++) {
-
-				/* Get the least significant 4-bit BCD digit */
-
-				temp32 = ((u32) digit) & 0xF;
-
-				/* Check the range of the digit */
-
-				if (temp32 > 9) {
-					ACPI_ERROR((AE_INFO,
-						    "BCD digit too large (not decimal): 0x%X",
-						    temp32));
-
-					status = AE_AML_NUMERIC_OVERFLOW;
-					goto cleanup;
-				}
-
-				/* Sum the digit into the result with the current power of 10 */
-
-				return_desc->integer.value +=
-				    (((acpi_integer) temp32) * power_of_ten);
-
-				/* Shift to next BCD digit */
-
-				digit >>= 4;
-
-				/* Next power of 10 */
-
-				power_of_ten *= 10;
-			}
-			break;
-
-		case AML_TO_BCD_OP:	/* to_bcd (Operand, Result) */
-
-			return_desc->integer.value = 0;
-			digit = operand[0]->integer.value;
-
-			/* Each BCD digit is one nybble wide */
-
-			for (i = 0;
-			     (i < acpi_gbl_integer_nybble_width) && (digit > 0);
-			     i++) {
-				(void)acpi_ut_short_divide(digit, 10, &digit,
-							   &temp32);
-
-				/*
-				 * Insert the BCD digit that resides in the
-				 * remainder from above
-				 */
-				return_desc->integer.value |=
-				    (((acpi_integer) temp32) << ACPI_MUL_4(i));
-			}
-
-			/* Overflow if there is any data left in Digit */
-
-			if (digit > 0) {
-				ACPI_ERROR((AE_INFO,
-					    "Integer too large to convert to BCD: %8.8X%8.8X",
-					    ACPI_FORMAT_UINT64(operand[0]->
-							       integer.value)));
-				status = AE_AML_NUMERIC_OVERFLOW;
-				goto cleanup;
-			}
-			break;
-
-		case AML_COND_REF_OF_OP:	/* cond_ref_of (source_object, Result) */
-
-			/*
-			 * This op is a little strange because the internal return value is
-			 * different than the return value stored in the result descriptor
-			 * (There are really two return values)
-			 */
-			if ((struct acpi_namespace_node *)operand[0] ==
-			    acpi_gbl_root_node) {
-				/*
-				 * This means that the object does not exist in the namespace,
-				 * return FALSE
-				 */
-				return_desc->integer.value = 0;
-				goto cleanup;
-			}
-
-			/* Get the object reference, store it, and remove our reference */
-
-			status = acpi_ex_get_object_reference(operand[0],
-							      &return_desc2,
-							      walk_state);
-			if (ACPI_FAILURE(status)) {
-				goto cleanup;
-			}
-
-			status =
-			    acpi_ex_store(return_desc2, operand[1], walk_state);
-			acpi_ut_remove_reference(return_desc2);
-
-			/* The object exists in the namespace, return TRUE */
-
-			return_desc->integer.value = ACPI_INTEGER_MAX;
-			goto cleanup;
-
-		default:
-			/* No other opcodes get here */
-			break;
-		}
-		break;
-
-	case AML_STORE_OP:	/* Store (Source, Target) */
-
-		/*
-		 * A store operand is typically a number, string, buffer or lvalue
-		 * Be careful about deleting the source object,
-		 * since the object itself may have been stored.
-		 */
-		status = acpi_ex_store(operand[0], operand[1], walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* It is possible that the Store already produced a return object */
-
-		if (!walk_state->result_obj) {
-			/*
-			 * Normally, we would remove a reference on the Operand[0]
-			 * parameter; But since it is being used as the internal return
-			 * object (meaning we would normally increment it), the two
-			 * cancel out, and we simply don't do anything.
-			 */
-			walk_state->result_obj = operand[0];
-			walk_state->operands[0] = NULL;	/* Prevent deletion */
-		}
-		return_ACPI_STATUS(status);
-
-		/*
-		 * ACPI 2.0 Opcodes
-		 */
-	case AML_COPY_OP:	/* Copy (Source, Target) */
-
-		status =
-		    acpi_ut_copy_iobject_to_iobject(operand[0], &return_desc,
-						    walk_state);
-		break;
-
-	case AML_TO_DECSTRING_OP:	/* to_decimal_string (Data, Result) */
-
-		status = acpi_ex_convert_to_string(operand[0], &return_desc,
-						   ACPI_EXPLICIT_CONVERT_DECIMAL);
-		if (return_desc == operand[0]) {
-
-			/* No conversion performed, add ref to handle return value */
-			acpi_ut_add_reference(return_desc);
-		}
-		break;
-
-	case AML_TO_HEXSTRING_OP:	/* to_hex_string (Data, Result) */
-
-		status = acpi_ex_convert_to_string(operand[0], &return_desc,
-						   ACPI_EXPLICIT_CONVERT_HEX);
-		if (return_desc == operand[0]) {
-
-			/* No conversion performed, add ref to handle return value */
-			acpi_ut_add_reference(return_desc);
-		}
-		break;
-
-	case AML_TO_BUFFER_OP:	/* to_buffer (Data, Result) */
-
-		status = acpi_ex_convert_to_buffer(operand[0], &return_desc);
-		if (return_desc == operand[0]) {
-
-			/* No conversion performed, add ref to handle return value */
-			acpi_ut_add_reference(return_desc);
-		}
-		break;
-
-	case AML_TO_INTEGER_OP:	/* to_integer (Data, Result) */
-
-		status = acpi_ex_convert_to_integer(operand[0], &return_desc,
-						    ACPI_ANY_BASE);
-		if (return_desc == operand[0]) {
-
-			/* No conversion performed, add ref to handle return value */
-			acpi_ut_add_reference(return_desc);
-		}
-		break;
-
-	case AML_SHIFT_LEFT_BIT_OP:	/* shift_left_bit (Source, bit_num) */
-	case AML_SHIFT_RIGHT_BIT_OP:	/* shift_right_bit (Source, bit_num) */
-
-		/* These are two obsolete opcodes */
-
-		ACPI_ERROR((AE_INFO,
-			    "%s is obsolete and not implemented",
-			    acpi_ps_get_opcode_name(walk_state->opcode)));
-		status = AE_SUPPORT;
-		goto cleanup;
-
-	default:		/* Unknown opcode */
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-		goto cleanup;
-	}
-
-	if (ACPI_SUCCESS(status)) {
-
-		/* Store the return value computed above into the target object */
-
-		status = acpi_ex_store(return_desc, operand[1], walk_state);
-	}
-
-      cleanup:
-
-	/* Delete return object on error */
-
-	if (ACPI_FAILURE(status)) {
-		acpi_ut_remove_reference(return_desc);
-	}
-
-	/* Save return object on success */
-
-	else if (!walk_state->result_obj) {
-		walk_state->result_obj = return_desc;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_1A_0T_1R
- *
- * PARAMETERS:  walk_state          - Current state (contains AML opcode)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute opcode with one argument, no target, and a return value
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	union acpi_operand_object *temp_desc;
-	union acpi_operand_object *return_desc = NULL;
-	acpi_status status = AE_OK;
-	u32 type;
-	acpi_integer value;
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_1R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	/* Examine the AML opcode */
-
-	switch (walk_state->opcode) {
-	case AML_LNOT_OP:	/* LNot (Operand) */
-
-		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		/*
-		 * Set result to ONES (TRUE) if Value == 0.  Note:
-		 * return_desc->Integer.Value is initially == 0 (FALSE) from above.
-		 */
-		if (!operand[0]->integer.value) {
-			return_desc->integer.value = ACPI_INTEGER_MAX;
-		}
-		break;
-
-	case AML_DECREMENT_OP:	/* Decrement (Operand)  */
-	case AML_INCREMENT_OP:	/* Increment (Operand)  */
-
-		/*
-		 * Create a new integer.  Can't just get the base integer and
-		 * increment it because it may be an Arg or Field.
-		 */
-		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		/*
-		 * Since we are expecting a Reference operand, it can be either a
-		 * NS Node or an internal object.
-		 */
-		temp_desc = operand[0];
-		if (ACPI_GET_DESCRIPTOR_TYPE(temp_desc) ==
-		    ACPI_DESC_TYPE_OPERAND) {
-
-			/* Internal reference object - prevent deletion */
-
-			acpi_ut_add_reference(temp_desc);
-		}
-
-		/*
-		 * Convert the Reference operand to an Integer (This removes a
-		 * reference on the Operand[0] object)
-		 *
-		 * NOTE:  We use LNOT_OP here in order to force resolution of the
-		 * reference operand to an actual integer.
-		 */
-		status =
-		    acpi_ex_resolve_operands(AML_LNOT_OP, &temp_desc,
-					     walk_state);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"While resolving operands for [%s]",
-					acpi_ps_get_opcode_name(walk_state->
-								opcode)));
-
-			goto cleanup;
-		}
-
-		/*
-		 * temp_desc is now guaranteed to be an Integer object --
-		 * Perform the actual increment or decrement
-		 */
-		if (walk_state->opcode == AML_INCREMENT_OP) {
-			return_desc->integer.value =
-			    temp_desc->integer.value + 1;
-		} else {
-			return_desc->integer.value =
-			    temp_desc->integer.value - 1;
-		}
-
-		/* Finished with this Integer object */
-
-		acpi_ut_remove_reference(temp_desc);
-
-		/*
-		 * Store the result back (indirectly) through the original
-		 * Reference object
-		 */
-		status = acpi_ex_store(return_desc, operand[0], walk_state);
-		break;
-
-	case AML_TYPE_OP:	/* object_type (source_object) */
-
-		/*
-		 * Note: The operand is not resolved at this point because we want to
-		 * get the associated object, not its value.  For example, we don't
-		 * want to resolve a field_unit to its value, we want the actual
-		 * field_unit object.
-		 */
-
-		/* Get the type of the base object */
-
-		status =
-		    acpi_ex_resolve_multiple(walk_state, operand[0], &type,
-					     NULL);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-
-		/* Allocate a descriptor to hold the type. */
-
-		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		return_desc->integer.value = type;
-		break;
-
-	case AML_SIZE_OF_OP:	/* size_of (source_object) */
-
-		/*
-		 * Note: The operand is not resolved at this point because we want to
-		 * get the associated object, not its value.
-		 */
-
-		/* Get the base object */
-
-		status = acpi_ex_resolve_multiple(walk_state,
-						  operand[0], &type,
-						  &temp_desc);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-
-		/*
-		 * The type of the base object must be integer, buffer, string, or
-		 * package.  All others are not supported.
-		 *
-		 * NOTE: Integer is not specifically supported by the ACPI spec,
-		 * but is supported implicitly via implicit operand conversion.
-		 * rather than bother with conversion, we just use the byte width
-		 * global (4 or 8 bytes).
-		 */
-		switch (type) {
-		case ACPI_TYPE_INTEGER:
-			value = acpi_gbl_integer_byte_width;
-			break;
-
-		case ACPI_TYPE_STRING:
-			value = temp_desc->string.length;
-			break;
-
-		case ACPI_TYPE_BUFFER:
-
-			/* Buffer arguments may not be evaluated at this point */
-
-			status = acpi_ds_get_buffer_arguments(temp_desc);
-			value = temp_desc->buffer.length;
-			break;
-
-		case ACPI_TYPE_PACKAGE:
-
-			/* Package arguments may not be evaluated at this point */
-
-			status = acpi_ds_get_package_arguments(temp_desc);
-			value = temp_desc->package.count;
-			break;
-
-		default:
-			ACPI_ERROR((AE_INFO,
-				    "Operand must be Buffer/Integer/String/Package - found type %s",
-				    acpi_ut_get_type_name(type)));
-			status = AE_AML_OPERAND_TYPE;
-			goto cleanup;
-		}
-
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-
-		/*
-		 * Now that we have the size of the object, create a result
-		 * object to hold the value
-		 */
-		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		return_desc->integer.value = value;
-		break;
-
-	case AML_REF_OF_OP:	/* ref_of (source_object) */
-
-		status =
-		    acpi_ex_get_object_reference(operand[0], &return_desc,
-						 walk_state);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-		break;
-
-	case AML_DEREF_OF_OP:	/* deref_of (obj_reference | String) */
-
-		/* Check for a method local or argument, or standalone String */
-
-		if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) ==
-		    ACPI_DESC_TYPE_NAMED) {
-			temp_desc =
-			    acpi_ns_get_attached_object((struct
-							 acpi_namespace_node *)
-							operand[0]);
-			if (temp_desc
-			    &&
-			    ((ACPI_GET_OBJECT_TYPE(temp_desc) ==
-			      ACPI_TYPE_STRING)
-			     || (ACPI_GET_OBJECT_TYPE(temp_desc) ==
-				 ACPI_TYPE_LOCAL_REFERENCE))) {
-				operand[0] = temp_desc;
-				acpi_ut_add_reference(temp_desc);
-			} else {
-				status = AE_AML_OPERAND_TYPE;
-				goto cleanup;
-			}
-		} else {
-			switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
-			case ACPI_TYPE_LOCAL_REFERENCE:
-				/*
-				 * This is a deref_of (local_x | arg_x)
-				 *
-				 * Must resolve/dereference the local/arg reference first
-				 */
-				switch (operand[0]->reference.class) {
-				case ACPI_REFCLASS_LOCAL:
-				case ACPI_REFCLASS_ARG:
-
-					/* Set Operand[0] to the value of the local/arg */
-
-					status =
-					    acpi_ds_method_data_get_value
-					    (operand[0]->reference.class,
-					     operand[0]->reference.value,
-					     walk_state, &temp_desc);
-					if (ACPI_FAILURE(status)) {
-						goto cleanup;
-					}
-
-					/*
-					 * Delete our reference to the input object and
-					 * point to the object just retrieved
-					 */
-					acpi_ut_remove_reference(operand[0]);
-					operand[0] = temp_desc;
-					break;
-
-				case ACPI_REFCLASS_REFOF:
-
-					/* Get the object to which the reference refers */
-
-					temp_desc =
-					    operand[0]->reference.object;
-					acpi_ut_remove_reference(operand[0]);
-					operand[0] = temp_desc;
-					break;
-
-				default:
-
-					/* Must be an Index op - handled below */
-					break;
-				}
-				break;
-
-			case ACPI_TYPE_STRING:
-				break;
-
-			default:
-				status = AE_AML_OPERAND_TYPE;
-				goto cleanup;
-			}
-		}
-
-		if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) !=
-		    ACPI_DESC_TYPE_NAMED) {
-			if (ACPI_GET_OBJECT_TYPE(operand[0]) ==
-			    ACPI_TYPE_STRING) {
-				/*
-				 * This is a deref_of (String). The string is a reference
-				 * to a named ACPI object.
-				 *
-				 * 1) Find the owning Node
-				 * 2) Dereference the node to an actual object. Could be a
-				 *    Field, so we need to resolve the node to a value.
-				 */
-				status =
-				    acpi_ns_get_node(walk_state->scope_info->
-						     scope.node,
-						     operand[0]->string.pointer,
-						     ACPI_NS_SEARCH_PARENT,
-						     ACPI_CAST_INDIRECT_PTR
-						     (struct
-						      acpi_namespace_node,
-						      &return_desc));
-				if (ACPI_FAILURE(status)) {
-					goto cleanup;
-				}
-
-				status =
-				    acpi_ex_resolve_node_to_value
-				    (ACPI_CAST_INDIRECT_PTR
-				     (struct acpi_namespace_node, &return_desc),
-				     walk_state);
-				goto cleanup;
-			}
-		}
-
-		/* Operand[0] may have changed from the code above */
-
-		if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) ==
-		    ACPI_DESC_TYPE_NAMED) {
-			/*
-			 * This is a deref_of (object_reference)
-			 * Get the actual object from the Node (This is the dereference).
-			 * This case may only happen when a local_x or arg_x is
-			 * dereferenced above.
-			 */
-			return_desc = acpi_ns_get_attached_object((struct
-								   acpi_namespace_node
-								   *)
-								  operand[0]);
-			acpi_ut_add_reference(return_desc);
-		} else {
-			/*
-			 * This must be a reference object produced by either the
-			 * Index() or ref_of() operator
-			 */
-			switch (operand[0]->reference.class) {
-			case ACPI_REFCLASS_INDEX:
-
-				/*
-				 * The target type for the Index operator must be
-				 * either a Buffer or a Package
-				 */
-				switch (operand[0]->reference.target_type) {
-				case ACPI_TYPE_BUFFER_FIELD:
-
-					temp_desc =
-					    operand[0]->reference.object;
-
-					/*
-					 * Create a new object that contains one element of the
-					 * buffer -- the element pointed to by the index.
-					 *
-					 * NOTE: index into a buffer is NOT a pointer to a
-					 * sub-buffer of the main buffer, it is only a pointer to a
-					 * single element (byte) of the buffer!
-					 */
-					return_desc =
-					    acpi_ut_create_internal_object
-					    (ACPI_TYPE_INTEGER);
-					if (!return_desc) {
-						status = AE_NO_MEMORY;
-						goto cleanup;
-					}
-
-					/*
-					 * Since we are returning the value of the buffer at the
-					 * indexed location, we don't need to add an additional
-					 * reference to the buffer itself.
-					 */
-					return_desc->integer.value =
-					    temp_desc->buffer.
-					    pointer[operand[0]->reference.
-						    value];
-					break;
-
-				case ACPI_TYPE_PACKAGE:
-
-					/*
-					 * Return the referenced element of the package.  We must
-					 * add another reference to the referenced object, however.
-					 */
-					return_desc =
-					    *(operand[0]->reference.where);
-					if (return_desc) {
-						acpi_ut_add_reference
-						    (return_desc);
-					}
-					break;
-
-				default:
-
-					ACPI_ERROR((AE_INFO,
-						    "Unknown Index TargetType %X in reference object %p",
-						    operand[0]->reference.
-						    target_type, operand[0]));
-					status = AE_AML_OPERAND_TYPE;
-					goto cleanup;
-				}
-				break;
-
-			case ACPI_REFCLASS_REFOF:
-
-				return_desc = operand[0]->reference.object;
-
-				if (ACPI_GET_DESCRIPTOR_TYPE(return_desc) ==
-				    ACPI_DESC_TYPE_NAMED) {
-					return_desc =
-					    acpi_ns_get_attached_object((struct
-									 acpi_namespace_node
-									 *)
-									return_desc);
-				}
-
-				/* Add another reference to the object! */
-
-				acpi_ut_add_reference(return_desc);
-				break;
-
-			default:
-				ACPI_ERROR((AE_INFO,
-					    "Unknown class in reference(%p) - %2.2X",
-					    operand[0],
-					    operand[0]->reference.class));
-
-				status = AE_TYPE;
-				goto cleanup;
-			}
-		}
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-		goto cleanup;
-	}
-
-      cleanup:
-
-	/* Delete return object on error */
-
-	if (ACPI_FAILURE(status)) {
-		acpi_ut_remove_reference(return_desc);
-	}
-
-	/* Save return object on success */
-
-	else {
-		walk_state->result_obj = return_desc;
-	}
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/executer/exoparg2.c
deleted file mode 100644
index 368def5..0000000
--- a/drivers/acpi/executer/exoparg2.c
+++ /dev/null
@@ -1,604 +0,0 @@
-/******************************************************************************
- *
- * Module Name: exoparg2 - AML execution - opcodes with 2 arguments
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/acinterp.h>
-#include <acpi/acevents.h>
-#include <acpi/amlcode.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exoparg2")
-
-/*!
- * Naming convention for AML interpreter execution routines.
- *
- * The routines that begin execution of AML opcodes are named with a common
- * convention based upon the number of arguments, the number of target operands,
- * and whether or not a value is returned:
- *
- *      AcpiExOpcode_xA_yT_zR
- *
- * Where:
- *
- * xA - ARGUMENTS:    The number of arguments (input operands) that are
- *                    required for this opcode type (1 through 6 args).
- * yT - TARGETS:      The number of targets (output operands) that are required
- *                    for this opcode type (0, 1, or 2 targets).
- * zR - RETURN VALUE: Indicates whether this opcode type returns a value
- *                    as the function return (0 or 1).
- *
- * The AcpiExOpcode* functions are called via the Dispatcher component with
- * fully resolved operands.
-!*/
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_2A_0T_0R
- *
- * PARAMETERS:  walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute opcode with two arguments, no target, and no return
- *              value.
- *
- * ALLOCATION:  Deletes both operands
- *
- ******************************************************************************/
-acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	struct acpi_namespace_node *node;
-	u32 value;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_0R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	/* Examine the opcode */
-
-	switch (walk_state->opcode) {
-	case AML_NOTIFY_OP:	/* Notify (notify_object, notify_value) */
-
-		/* The first operand is a namespace node */
-
-		node = (struct acpi_namespace_node *)operand[0];
-
-		/* Second value is the notify value */
-
-		value = (u32) operand[1]->integer.value;
-
-		/* Are notifies allowed on this object? */
-
-		if (!acpi_ev_is_notify_object(node)) {
-			ACPI_ERROR((AE_INFO,
-				    "Unexpected notify object type [%s]",
-				    acpi_ut_get_type_name(node->type)));
-
-			status = AE_AML_OPERAND_TYPE;
-			break;
-		}
-#ifdef ACPI_GPE_NOTIFY_CHECK
-		/*
-		 * GPE method wake/notify check.  Here, we want to ensure that we
-		 * don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
-		 * GPE method during system runtime.  If we do, the GPE is marked
-		 * as "wake-only" and disabled.
-		 *
-		 * 1) Is the Notify() value == device_wake?
-		 * 2) Is this a GPE deferred method?  (An _Lxx or _Exx method)
-		 * 3) Did the original GPE happen at system runtime?
-		 *    (versus during wake)
-		 *
-		 * If all three cases are true, this is a wake-only GPE that should
-		 * be disabled at runtime.
-		 */
-		if (value == 2) {	/* device_wake */
-			status =
-			    acpi_ev_check_for_wake_only_gpe(walk_state->
-							    gpe_event_info);
-			if (ACPI_FAILURE(status)) {
-
-				/* AE_WAKE_ONLY_GPE only error, means ignore this notify */
-
-				return_ACPI_STATUS(AE_OK)
-			}
-		}
-#endif
-
-		/*
-		 * Dispatch the notify to the appropriate handler
-		 * NOTE: the request is queued for execution after this method
-		 * completes.  The notify handlers are NOT invoked synchronously
-		 * from this thread -- because handlers may in turn run other
-		 * control methods.
-		 */
-		status = acpi_ev_queue_notify_request(node, value);
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_2A_2T_1R
- *
- * PARAMETERS:  walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute a dyadic operator (2 operands) with 2 output targets
- *              and one implicit return value.
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	union acpi_operand_object *return_desc1 = NULL;
-	union acpi_operand_object *return_desc2 = NULL;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_2T_1R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	/* Execute the opcode */
-
-	switch (walk_state->opcode) {
-	case AML_DIVIDE_OP:
-
-		/* Divide (Dividend, Divisor, remainder_result quotient_result) */
-
-		return_desc1 =
-		    acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!return_desc1) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		return_desc2 =
-		    acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!return_desc2) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		/* Quotient to return_desc1, remainder to return_desc2 */
-
-		status = acpi_ut_divide(operand[0]->integer.value,
-					operand[1]->integer.value,
-					&return_desc1->integer.value,
-					&return_desc2->integer.value);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-		goto cleanup;
-	}
-
-	/* Store the results to the target reference operands */
-
-	status = acpi_ex_store(return_desc2, operand[2], walk_state);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	status = acpi_ex_store(return_desc1, operand[3], walk_state);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-      cleanup:
-	/*
-	 * Since the remainder is not returned indirectly, remove a reference to
-	 * it. Only the quotient is returned indirectly.
-	 */
-	acpi_ut_remove_reference(return_desc2);
-
-	if (ACPI_FAILURE(status)) {
-
-		/* Delete the return object */
-
-		acpi_ut_remove_reference(return_desc1);
-	}
-
-	/* Save return object (the remainder) on success */
-
-	else {
-		walk_state->result_obj = return_desc1;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_2A_1T_1R
- *
- * PARAMETERS:  walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute opcode with two arguments, one target, and a return
- *              value.
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	union acpi_operand_object *return_desc = NULL;
-	acpi_integer index;
-	acpi_status status = AE_OK;
-	acpi_size length;
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_1T_1R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	/* Execute the opcode */
-
-	if (walk_state->op_info->flags & AML_MATH) {
-
-		/* All simple math opcodes (add, etc.) */
-
-		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		return_desc->integer.value =
-		    acpi_ex_do_math_op(walk_state->opcode,
-				       operand[0]->integer.value,
-				       operand[1]->integer.value);
-		goto store_result_to_target;
-	}
-
-	switch (walk_state->opcode) {
-	case AML_MOD_OP:	/* Mod (Dividend, Divisor, remainder_result (ACPI 2.0) */
-
-		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		/* return_desc will contain the remainder */
-
-		status = acpi_ut_divide(operand[0]->integer.value,
-					operand[1]->integer.value,
-					NULL, &return_desc->integer.value);
-		break;
-
-	case AML_CONCAT_OP:	/* Concatenate (Data1, Data2, Result) */
-
-		status = acpi_ex_do_concatenate(operand[0], operand[1],
-						&return_desc, walk_state);
-		break;
-
-	case AML_TO_STRING_OP:	/* to_string (Buffer, Length, Result) (ACPI 2.0) */
-
-		/*
-		 * Input object is guaranteed to be a buffer at this point (it may have
-		 * been converted.)  Copy the raw buffer data to a new object of
-		 * type String.
-		 */
-
-		/*
-		 * Get the length of the new string. It is the smallest of:
-		 * 1) Length of the input buffer
-		 * 2) Max length as specified in the to_string operator
-		 * 3) Length of input buffer up to a zero byte (null terminator)
-		 *
-		 * NOTE: A length of zero is ok, and will create a zero-length, null
-		 *       terminated string.
-		 */
-		length = 0;
-		while ((length < operand[0]->buffer.length) &&
-		       (length < operand[1]->integer.value) &&
-		       (operand[0]->buffer.pointer[length])) {
-			length++;
-		}
-
-		/* Allocate a new string object */
-
-		return_desc = acpi_ut_create_string_object(length);
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		/*
-		 * Copy the raw buffer data with no transform.
-		 * (NULL terminated already)
-		 */
-		ACPI_MEMCPY(return_desc->string.pointer,
-			    operand[0]->buffer.pointer, length);
-		break;
-
-	case AML_CONCAT_RES_OP:
-
-		/* concatenate_res_template (Buffer, Buffer, Result) (ACPI 2.0) */
-
-		status = acpi_ex_concat_template(operand[0], operand[1],
-						 &return_desc, walk_state);
-		break;
-
-	case AML_INDEX_OP:	/* Index (Source Index Result) */
-
-		/* Create the internal return object */
-
-		return_desc =
-		    acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		/* Initialize the Index reference object */
-
-		index = operand[1]->integer.value;
-		return_desc->reference.value = (u32) index;
-		return_desc->reference.class = ACPI_REFCLASS_INDEX;
-
-		/*
-		 * At this point, the Source operand is a String, Buffer, or Package.
-		 * Verify that the index is within range.
-		 */
-		switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
-		case ACPI_TYPE_STRING:
-
-			if (index >= operand[0]->string.length) {
-				status = AE_AML_STRING_LIMIT;
-			}
-
-			return_desc->reference.target_type =
-			    ACPI_TYPE_BUFFER_FIELD;
-			break;
-
-		case ACPI_TYPE_BUFFER:
-
-			if (index >= operand[0]->buffer.length) {
-				status = AE_AML_BUFFER_LIMIT;
-			}
-
-			return_desc->reference.target_type =
-			    ACPI_TYPE_BUFFER_FIELD;
-			break;
-
-		case ACPI_TYPE_PACKAGE:
-
-			if (index >= operand[0]->package.count) {
-				status = AE_AML_PACKAGE_LIMIT;
-			}
-
-			return_desc->reference.target_type = ACPI_TYPE_PACKAGE;
-			return_desc->reference.where =
-			    &operand[0]->package.elements[index];
-			break;
-
-		default:
-
-			status = AE_AML_INTERNAL;
-			goto cleanup;
-		}
-
-		/* Failure means that the Index was beyond the end of the object */
-
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Index (%X%8.8X) is beyond end of object",
-					ACPI_FORMAT_UINT64(index)));
-			goto cleanup;
-		}
-
-		/*
-		 * Save the target object and add a reference to it for the life
-		 * of the index
-		 */
-		return_desc->reference.object = operand[0];
-		acpi_ut_add_reference(operand[0]);
-
-		/* Store the reference to the Target */
-
-		status = acpi_ex_store(return_desc, operand[2], walk_state);
-
-		/* Return the reference */
-
-		walk_state->result_obj = return_desc;
-		goto cleanup;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-		break;
-	}
-
-      store_result_to_target:
-
-	if (ACPI_SUCCESS(status)) {
-		/*
-		 * Store the result of the operation (which is now in return_desc) into
-		 * the Target descriptor.
-		 */
-		status = acpi_ex_store(return_desc, operand[2], walk_state);
-		if (ACPI_FAILURE(status)) {
-			goto cleanup;
-		}
-
-		if (!walk_state->result_obj) {
-			walk_state->result_obj = return_desc;
-		}
-	}
-
-      cleanup:
-
-	/* Delete return object on error */
-
-	if (ACPI_FAILURE(status)) {
-		acpi_ut_remove_reference(return_desc);
-		walk_state->result_obj = NULL;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_2A_0T_1R
- *
- * PARAMETERS:  walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute opcode with 2 arguments, no target, and a return value
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	union acpi_operand_object *return_desc = NULL;
-	acpi_status status = AE_OK;
-	u8 logical_result = FALSE;
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_1R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	/* Create the internal return object */
-
-	return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-	if (!return_desc) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	/* Execute the Opcode */
-
-	if (walk_state->op_info->flags & AML_LOGICAL_NUMERIC) {
-
-		/* logical_op (Operand0, Operand1) */
-
-		status = acpi_ex_do_logical_numeric_op(walk_state->opcode,
-						       operand[0]->integer.
-						       value,
-						       operand[1]->integer.
-						       value, &logical_result);
-		goto store_logical_result;
-	} else if (walk_state->op_info->flags & AML_LOGICAL) {
-
-		/* logical_op (Operand0, Operand1) */
-
-		status = acpi_ex_do_logical_op(walk_state->opcode, operand[0],
-					       operand[1], &logical_result);
-		goto store_logical_result;
-	}
-
-	switch (walk_state->opcode) {
-	case AML_ACQUIRE_OP:	/* Acquire (mutex_object, Timeout) */
-
-		status =
-		    acpi_ex_acquire_mutex(operand[1], operand[0], walk_state);
-		if (status == AE_TIME) {
-			logical_result = TRUE;	/* TRUE = Acquire timed out */
-			status = AE_OK;
-		}
-		break;
-
-	case AML_WAIT_OP:	/* Wait (event_object, Timeout) */
-
-		status = acpi_ex_system_wait_event(operand[1], operand[0]);
-		if (status == AE_TIME) {
-			logical_result = TRUE;	/* TRUE, Wait timed out */
-			status = AE_OK;
-		}
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-		goto cleanup;
-	}
-
-      store_logical_result:
-	/*
-	 * Set return value to according to logical_result. logical TRUE (all ones)
-	 * Default is FALSE (zero)
-	 */
-	if (logical_result) {
-		return_desc->integer.value = ACPI_INTEGER_MAX;
-	}
-
-      cleanup:
-
-	/* Delete return object on error */
-
-	if (ACPI_FAILURE(status)) {
-		acpi_ut_remove_reference(return_desc);
-	}
-
-	/* Save return object on success */
-
-	else {
-		walk_state->result_obj = return_desc;
-	}
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exoparg3.c b/drivers/acpi/executer/exoparg3.c
deleted file mode 100644
index 9cb4197..0000000
--- a/drivers/acpi/executer/exoparg3.c
+++ /dev/null
@@ -1,272 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exoparg3 - AML execution - opcodes with 3 arguments
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/acparser.h>
-#include <acpi/amlcode.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exoparg3")
-
-/*!
- * Naming convention for AML interpreter execution routines.
- *
- * The routines that begin execution of AML opcodes are named with a common
- * convention based upon the number of arguments, the number of target operands,
- * and whether or not a value is returned:
- *
- *      AcpiExOpcode_xA_yT_zR
- *
- * Where:
- *
- * xA - ARGUMENTS:    The number of arguments (input operands) that are
- *                    required for this opcode type (1 through 6 args).
- * yT - TARGETS:      The number of targets (output operands) that are required
- *                    for this opcode type (0, 1, or 2 targets).
- * zR - RETURN VALUE: Indicates whether this opcode type returns a value
- *                    as the function return (0 or 1).
- *
- * The AcpiExOpcode* functions are called via the Dispatcher component with
- * fully resolved operands.
-!*/
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_3A_0T_0R
- *
- * PARAMETERS:  walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute Triadic operator (3 operands)
- *
- ******************************************************************************/
-acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	struct acpi_signal_fatal_info *fatal;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_0T_0R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	switch (walk_state->opcode) {
-	case AML_FATAL_OP:	/* Fatal (fatal_type fatal_code fatal_arg) */
-
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "FatalOp: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
-				  (u32) operand[0]->integer.value,
-				  (u32) operand[1]->integer.value,
-				  (u32) operand[2]->integer.value));
-
-		fatal = ACPI_ALLOCATE(sizeof(struct acpi_signal_fatal_info));
-		if (fatal) {
-			fatal->type = (u32) operand[0]->integer.value;
-			fatal->code = (u32) operand[1]->integer.value;
-			fatal->argument = (u32) operand[2]->integer.value;
-		}
-
-		/* Always signal the OS! */
-
-		status = acpi_os_signal(ACPI_SIGNAL_FATAL, fatal);
-
-		/* Might return while OS is shutting down, just continue */
-
-		ACPI_FREE(fatal);
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-		goto cleanup;
-	}
-
-      cleanup:
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_3A_1T_1R
- *
- * PARAMETERS:  walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute Triadic operator (3 operands)
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	union acpi_operand_object *return_desc = NULL;
-	char *buffer = NULL;
-	acpi_status status = AE_OK;
-	acpi_integer index;
-	acpi_size length;
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_1T_1R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	switch (walk_state->opcode) {
-	case AML_MID_OP:	/* Mid (Source[0], Index[1], Length[2], Result[3]) */
-
-		/*
-		 * Create the return object.  The Source operand is guaranteed to be
-		 * either a String or a Buffer, so just use its type.
-		 */
-		return_desc =
-		    acpi_ut_create_internal_object(ACPI_GET_OBJECT_TYPE
-						   (operand[0]));
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		/* Get the Integer values from the objects */
-
-		index = operand[1]->integer.value;
-		length = (acpi_size) operand[2]->integer.value;
-
-		/*
-		 * If the index is beyond the length of the String/Buffer, or if the
-		 * requested length is zero, return a zero-length String/Buffer
-		 */
-		if (index >= operand[0]->string.length) {
-			length = 0;
-		}
-
-		/* Truncate request if larger than the actual String/Buffer */
-
-		else if ((index + length) > operand[0]->string.length) {
-			length = (acpi_size) operand[0]->string.length -
-			    (acpi_size) index;
-		}
-
-		/* Strings always have a sub-pointer, not so for buffers */
-
-		switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
-		case ACPI_TYPE_STRING:
-
-			/* Always allocate a new buffer for the String */
-
-			buffer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
-			if (!buffer) {
-				status = AE_NO_MEMORY;
-				goto cleanup;
-			}
-			break;
-
-		case ACPI_TYPE_BUFFER:
-
-			/* If the requested length is zero, don't allocate a buffer */
-
-			if (length > 0) {
-
-				/* Allocate a new buffer for the Buffer */
-
-				buffer = ACPI_ALLOCATE_ZEROED(length);
-				if (!buffer) {
-					status = AE_NO_MEMORY;
-					goto cleanup;
-				}
-			}
-			break;
-
-		default:	/* Should not happen */
-
-			status = AE_AML_OPERAND_TYPE;
-			goto cleanup;
-		}
-
-		if (buffer) {
-
-			/* We have a buffer, copy the portion requested */
-
-			ACPI_MEMCPY(buffer, operand[0]->string.pointer + index,
-				    length);
-		}
-
-		/* Set the length of the new String/Buffer */
-
-		return_desc->string.pointer = buffer;
-		return_desc->string.length = (u32) length;
-
-		/* Mark buffer initialized */
-
-		return_desc->buffer.flags |= AOPOBJ_DATA_VALID;
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-		goto cleanup;
-	}
-
-	/* Store the result in the target */
-
-	status = acpi_ex_store(return_desc, operand[3], walk_state);
-
-      cleanup:
-
-	/* Delete return object on error */
-
-	if (ACPI_FAILURE(status) || walk_state->result_obj) {
-		acpi_ut_remove_reference(return_desc);
-		walk_state->result_obj = NULL;
-	}
-
-	/* Set the return object and exit */
-
-	else {
-		walk_state->result_obj = return_desc;
-	}
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exoparg6.c b/drivers/acpi/executer/exoparg6.c
deleted file mode 100644
index 67d4873..0000000
--- a/drivers/acpi/executer/exoparg6.c
+++ /dev/null
@@ -1,340 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exoparg6 - AML execution - opcodes with 6 arguments
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/acparser.h>
-#include <acpi/amlcode.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exoparg6")
-
-/*!
- * Naming convention for AML interpreter execution routines.
- *
- * The routines that begin execution of AML opcodes are named with a common
- * convention based upon the number of arguments, the number of target operands,
- * and whether or not a value is returned:
- *
- *      AcpiExOpcode_xA_yT_zR
- *
- * Where:
- *
- * xA - ARGUMENTS:    The number of arguments (input operands) that are
- *                    required for this opcode type (1 through 6 args).
- * yT - TARGETS:      The number of targets (output operands) that are required
- *                    for this opcode type (0, 1, or 2 targets).
- * zR - RETURN VALUE: Indicates whether this opcode type returns a value
- *                    as the function return (0 or 1).
- *
- * The AcpiExOpcode* functions are called via the Dispatcher component with
- * fully resolved operands.
-!*/
-/* Local prototypes */
-static u8
-acpi_ex_do_match(u32 match_op,
-		 union acpi_operand_object *package_obj,
-		 union acpi_operand_object *match_obj);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_do_match
- *
- * PARAMETERS:  match_op        - The AML match operand
- *              package_obj     - Object from the target package
- *              match_obj       - Object to be matched
- *
- * RETURN:      TRUE if the match is successful, FALSE otherwise
- *
- * DESCRIPTION: Implements the low-level match for the ASL Match operator.
- *              Package elements will be implicitly converted to the type of
- *              the match object (Integer/Buffer/String).
- *
- ******************************************************************************/
-
-static u8
-acpi_ex_do_match(u32 match_op,
-		 union acpi_operand_object *package_obj,
-		 union acpi_operand_object *match_obj)
-{
-	u8 logical_result = TRUE;
-	acpi_status status;
-
-	/*
-	 * Note: Since the package_obj/match_obj ordering is opposite to that of
-	 * the standard logical operators, we have to reverse them when we call
-	 * do_logical_op in order to make the implicit conversion rules work
-	 * correctly. However, this means we have to flip the entire equation
-	 * also. A bit ugly perhaps, but overall, better than fussing the
-	 * parameters around at runtime, over and over again.
-	 *
-	 * Below, P[i] refers to the package element, M refers to the Match object.
-	 */
-	switch (match_op) {
-	case MATCH_MTR:
-
-		/* Always true */
-
-		break;
-
-	case MATCH_MEQ:
-
-		/*
-		 * True if equal: (P[i] == M)
-		 * Change to:     (M == P[i])
-		 */
-		status =
-		    acpi_ex_do_logical_op(AML_LEQUAL_OP, match_obj, package_obj,
-					  &logical_result);
-		if (ACPI_FAILURE(status)) {
-			return (FALSE);
-		}
-		break;
-
-	case MATCH_MLE:
-
-		/*
-		 * True if less than or equal: (P[i] <= M) (P[i] not_greater than M)
-		 * Change to:                  (M >= P[i]) (M not_less than P[i])
-		 */
-		status =
-		    acpi_ex_do_logical_op(AML_LLESS_OP, match_obj, package_obj,
-					  &logical_result);
-		if (ACPI_FAILURE(status)) {
-			return (FALSE);
-		}
-		logical_result = (u8) ! logical_result;
-		break;
-
-	case MATCH_MLT:
-
-		/*
-		 * True if less than: (P[i] < M)
-		 * Change to:         (M > P[i])
-		 */
-		status =
-		    acpi_ex_do_logical_op(AML_LGREATER_OP, match_obj,
-					  package_obj, &logical_result);
-		if (ACPI_FAILURE(status)) {
-			return (FALSE);
-		}
-		break;
-
-	case MATCH_MGE:
-
-		/*
-		 * True if greater than or equal: (P[i] >= M) (P[i] not_less than M)
-		 * Change to:                     (M <= P[i]) (M not_greater than P[i])
-		 */
-		status =
-		    acpi_ex_do_logical_op(AML_LGREATER_OP, match_obj,
-					  package_obj, &logical_result);
-		if (ACPI_FAILURE(status)) {
-			return (FALSE);
-		}
-		logical_result = (u8) ! logical_result;
-		break;
-
-	case MATCH_MGT:
-
-		/*
-		 * True if greater than: (P[i] > M)
-		 * Change to:            (M < P[i])
-		 */
-		status =
-		    acpi_ex_do_logical_op(AML_LLESS_OP, match_obj, package_obj,
-					  &logical_result);
-		if (ACPI_FAILURE(status)) {
-			return (FALSE);
-		}
-		break;
-
-	default:
-
-		/* Undefined */
-
-		return (FALSE);
-	}
-
-	return logical_result;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_opcode_6A_0T_1R
- *
- * PARAMETERS:  walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute opcode with 6 arguments, no target, and a return value
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
-{
-	union acpi_operand_object **operand = &walk_state->operands[0];
-	union acpi_operand_object *return_desc = NULL;
-	acpi_status status = AE_OK;
-	acpi_integer index;
-	union acpi_operand_object *this_element;
-
-	ACPI_FUNCTION_TRACE_STR(ex_opcode_6A_0T_1R,
-				acpi_ps_get_opcode_name(walk_state->opcode));
-
-	switch (walk_state->opcode) {
-	case AML_MATCH_OP:
-		/*
-		 * Match (search_pkg[0], match_op1[1], match_obj1[2],
-		 *                      match_op2[3], match_obj2[4], start_index[5])
-		 */
-
-		/* Validate both Match Term Operators (MTR, MEQ, etc.) */
-
-		if ((operand[1]->integer.value > MAX_MATCH_OPERATOR) ||
-		    (operand[3]->integer.value > MAX_MATCH_OPERATOR)) {
-			ACPI_ERROR((AE_INFO, "Match operator out of range"));
-			status = AE_AML_OPERAND_VALUE;
-			goto cleanup;
-		}
-
-		/* Get the package start_index, validate against the package length */
-
-		index = operand[5]->integer.value;
-		if (index >= operand[0]->package.count) {
-			ACPI_ERROR((AE_INFO,
-				    "Index (%X%8.8X) beyond package end (%X)",
-				    ACPI_FORMAT_UINT64(index),
-				    operand[0]->package.count));
-			status = AE_AML_PACKAGE_LIMIT;
-			goto cleanup;
-		}
-
-		/* Create an integer for the return value */
-
-		return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-
-		}
-
-		/* Default return value if no match found */
-
-		return_desc->integer.value = ACPI_INTEGER_MAX;
-
-		/*
-		 * Examine each element until a match is found. Both match conditions
-		 * must be satisfied for a match to occur. Within the loop,
-		 * "continue" signifies that the current element does not match
-		 * and the next should be examined.
-		 *
-		 * Upon finding a match, the loop will terminate via "break" at
-		 * the bottom.  If it terminates "normally", match_value will be
-		 * ACPI_INTEGER_MAX (Ones) (its initial value) indicating that no
-		 * match was found.
-		 */
-		for (; index < operand[0]->package.count; index++) {
-
-			/* Get the current package element */
-
-			this_element = operand[0]->package.elements[index];
-
-			/* Treat any uninitialized (NULL) elements as non-matching */
-
-			if (!this_element) {
-				continue;
-			}
-
-			/*
-			 * Both match conditions must be satisfied. Execution of a continue
-			 * (proceed to next iteration of enclosing for loop) signifies a
-			 * non-match.
-			 */
-			if (!acpi_ex_do_match((u32) operand[1]->integer.value,
-					      this_element, operand[2])) {
-				continue;
-			}
-
-			if (!acpi_ex_do_match((u32) operand[3]->integer.value,
-					      this_element, operand[4])) {
-				continue;
-			}
-
-			/* Match found: Index is the return value */
-
-			return_desc->integer.value = index;
-			break;
-		}
-		break;
-
-	case AML_LOAD_TABLE_OP:
-
-		status = acpi_ex_load_table_op(walk_state, &return_desc);
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
-			    walk_state->opcode));
-		status = AE_AML_BAD_OPCODE;
-		goto cleanup;
-	}
-
-      cleanup:
-
-	/* Delete return object on error */
-
-	if (ACPI_FAILURE(status)) {
-		acpi_ut_remove_reference(return_desc);
-	}
-
-	/* Save return object on success */
-
-	else {
-		walk_state->result_obj = return_desc;
-	}
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
deleted file mode 100644
index 5d438c3..0000000
--- a/drivers/acpi/executer/exprep.c
+++ /dev/null
@@ -1,589 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exprep - ACPI AML (p-code) execution - field prep utilities
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exprep")
-
-/* Local prototypes */
-static u32
-acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
-			    u8 field_flags, u32 * return_byte_alignment);
-
-#ifdef ACPI_UNDER_DEVELOPMENT
-
-static u32
-acpi_ex_generate_access(u32 field_bit_offset,
-			u32 field_bit_length, u32 region_length);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_generate_access
- *
- * PARAMETERS:  field_bit_offset    - Start of field within parent region/buffer
- *              field_bit_length    - Length of field in bits
- *              region_length       - Length of parent in bytes
- *
- * RETURN:      Field granularity (8, 16, 32 or 64) and
- *              byte_alignment (1, 2, 3, or 4)
- *
- * DESCRIPTION: Generate an optimal access width for fields defined with the
- *              any_acc keyword.
- *
- * NOTE: Need to have the region_length in order to check for boundary
- *       conditions (end-of-region).  However, the region_length is a deferred
- *       operation.  Therefore, to complete this implementation, the generation
- *       of this access width must be deferred until the region length has
- *       been evaluated.
- *
- ******************************************************************************/
-
-static u32
-acpi_ex_generate_access(u32 field_bit_offset,
-			u32 field_bit_length, u32 region_length)
-{
-	u32 field_byte_length;
-	u32 field_byte_offset;
-	u32 field_byte_end_offset;
-	u32 access_byte_width;
-	u32 field_start_offset;
-	u32 field_end_offset;
-	u32 minimum_access_width = 0xFFFFFFFF;
-	u32 minimum_accesses = 0xFFFFFFFF;
-	u32 accesses;
-
-	ACPI_FUNCTION_TRACE(ex_generate_access);
-
-	/* Round Field start offset and length to "minimal" byte boundaries */
-
-	field_byte_offset = ACPI_DIV_8(ACPI_ROUND_DOWN(field_bit_offset, 8));
-	field_byte_end_offset = ACPI_DIV_8(ACPI_ROUND_UP(field_bit_length +
-							 field_bit_offset, 8));
-	field_byte_length = field_byte_end_offset - field_byte_offset;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-			  "Bit length %d, Bit offset %d\n",
-			  field_bit_length, field_bit_offset));
-
-	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-			  "Byte Length %d, Byte Offset %d, End Offset %d\n",
-			  field_byte_length, field_byte_offset,
-			  field_byte_end_offset));
-
-	/*
-	 * Iterative search for the maximum access width that is both aligned
-	 * and does not go beyond the end of the region
-	 *
-	 * Start at byte_acc and work upwards to qword_acc max. (1,2,4,8 bytes)
-	 */
-	for (access_byte_width = 1; access_byte_width <= 8;
-	     access_byte_width <<= 1) {
-		/*
-		 * 1) Round end offset up to next access boundary and make sure that
-		 *    this does not go beyond the end of the parent region.
-		 * 2) When the Access width is greater than the field_byte_length, we
-		 *    are done. (This does not optimize for the perfectly aligned
-		 *    case yet).
-		 */
-		if (ACPI_ROUND_UP(field_byte_end_offset, access_byte_width) <=
-		    region_length) {
-			field_start_offset =
-			    ACPI_ROUND_DOWN(field_byte_offset,
-					    access_byte_width) /
-			    access_byte_width;
-
-			field_end_offset =
-			    ACPI_ROUND_UP((field_byte_length +
-					   field_byte_offset),
-					  access_byte_width) /
-			    access_byte_width;
-
-			accesses = field_end_offset - field_start_offset;
-
-			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-					  "AccessWidth %d end is within region\n",
-					  access_byte_width));
-
-			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-					  "Field Start %d, Field End %d -- requires %d accesses\n",
-					  field_start_offset, field_end_offset,
-					  accesses));
-
-			/* Single access is optimal */
-
-			if (accesses <= 1) {
-				ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-						  "Entire field can be accessed with one operation of size %d\n",
-						  access_byte_width));
-				return_VALUE(access_byte_width);
-			}
-
-			/*
-			 * Fits in the region, but requires more than one read/write.
-			 * try the next wider access on next iteration
-			 */
-			if (accesses < minimum_accesses) {
-				minimum_accesses = accesses;
-				minimum_access_width = access_byte_width;
-			}
-		} else {
-			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-					  "AccessWidth %d end is NOT within region\n",
-					  access_byte_width));
-			if (access_byte_width == 1) {
-				ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-						  "Field goes beyond end-of-region!\n"));
-
-				/* Field does not fit in the region at all */
-
-				return_VALUE(0);
-			}
-
-			/*
-			 * This width goes beyond the end-of-region, back off to
-			 * previous access
-			 */
-			ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-					  "Backing off to previous optimal access width of %d\n",
-					  minimum_access_width));
-			return_VALUE(minimum_access_width);
-		}
-	}
-
-	/*
-	 * Could not read/write field with one operation,
-	 * just use max access width
-	 */
-	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-			  "Cannot access field in one operation, using width 8\n"));
-	return_VALUE(8);
-}
-#endif				/* ACPI_UNDER_DEVELOPMENT */
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_decode_field_access
- *
- * PARAMETERS:  obj_desc            - Field object
- *              field_flags         - Encoded fieldflags (contains access bits)
- *              return_byte_alignment - Where the byte alignment is returned
- *
- * RETURN:      Field granularity (8, 16, 32 or 64) and
- *              byte_alignment (1, 2, 3, or 4)
- *
- * DESCRIPTION: Decode the access_type bits of a field definition.
- *
- ******************************************************************************/
-
-static u32
-acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
-			    u8 field_flags, u32 * return_byte_alignment)
-{
-	u32 access;
-	u32 byte_alignment;
-	u32 bit_length;
-
-	ACPI_FUNCTION_TRACE(ex_decode_field_access);
-
-	access = (field_flags & AML_FIELD_ACCESS_TYPE_MASK);
-
-	switch (access) {
-	case AML_FIELD_ACCESS_ANY:
-
-#ifdef ACPI_UNDER_DEVELOPMENT
-		byte_alignment =
-		    acpi_ex_generate_access(obj_desc->common_field.
-					    start_field_bit_offset,
-					    obj_desc->common_field.bit_length,
-					    0xFFFFFFFF
-					    /* Temp until we pass region_length as parameter */
-		    );
-		bit_length = byte_alignment * 8;
-#endif
-
-		byte_alignment = 1;
-		bit_length = 8;
-		break;
-
-	case AML_FIELD_ACCESS_BYTE:
-	case AML_FIELD_ACCESS_BUFFER:	/* ACPI 2.0 (SMBus Buffer) */
-		byte_alignment = 1;
-		bit_length = 8;
-		break;
-
-	case AML_FIELD_ACCESS_WORD:
-		byte_alignment = 2;
-		bit_length = 16;
-		break;
-
-	case AML_FIELD_ACCESS_DWORD:
-		byte_alignment = 4;
-		bit_length = 32;
-		break;
-
-	case AML_FIELD_ACCESS_QWORD:	/* ACPI 2.0 */
-		byte_alignment = 8;
-		bit_length = 64;
-		break;
-
-	default:
-		/* Invalid field access type */
-
-		ACPI_ERROR((AE_INFO, "Unknown field access type %X", access));
-		return_UINT32(0);
-	}
-
-	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_BUFFER_FIELD) {
-		/*
-		 * buffer_field access can be on any byte boundary, so the
-		 * byte_alignment is always 1 byte -- regardless of any byte_alignment
-		 * implied by the field access type.
-		 */
-		byte_alignment = 1;
-	}
-
-	*return_byte_alignment = byte_alignment;
-	return_UINT32(bit_length);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_prep_common_field_object
- *
- * PARAMETERS:  obj_desc            - The field object
- *              field_flags         - Access, lock_rule, and update_rule.
- *                                    The format of a field_flag is described
- *                                    in the ACPI specification
- *              field_attribute     - Special attributes (not used)
- *              field_bit_position  - Field start position
- *              field_bit_length    - Field length in number of bits
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initialize the areas of the field object that are common
- *              to the various types of fields.  Note: This is very "sensitive"
- *              code because we are solving the general case for field
- *              alignment.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
-				 u8 field_flags,
-				 u8 field_attribute,
-				 u32 field_bit_position, u32 field_bit_length)
-{
-	u32 access_bit_width;
-	u32 byte_alignment;
-	u32 nearest_byte_address;
-
-	ACPI_FUNCTION_TRACE(ex_prep_common_field_object);
-
-	/*
-	 * Note: the structure being initialized is the
-	 * ACPI_COMMON_FIELD_INFO;  No structure fields outside of the common
-	 * area are initialized by this procedure.
-	 */
-	obj_desc->common_field.field_flags = field_flags;
-	obj_desc->common_field.attribute = field_attribute;
-	obj_desc->common_field.bit_length = field_bit_length;
-
-	/*
-	 * Decode the access type so we can compute offsets.  The access type gives
-	 * two pieces of information - the width of each field access and the
-	 * necessary byte_alignment (address granularity) of the access.
-	 *
-	 * For any_acc, the access_bit_width is the largest width that is both
-	 * necessary and possible in an attempt to access the whole field in one
-	 * I/O operation.  However, for any_acc, the byte_alignment is always one
-	 * byte.
-	 *
-	 * For all Buffer Fields, the byte_alignment is always one byte.
-	 *
-	 * For all other access types (Byte, Word, Dword, Qword), the Bitwidth is
-	 * the same (equivalent) as the byte_alignment.
-	 */
-	access_bit_width = acpi_ex_decode_field_access(obj_desc, field_flags,
-						       &byte_alignment);
-	if (!access_bit_width) {
-		return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
-	}
-
-	/* Setup width (access granularity) fields */
-
-	obj_desc->common_field.access_byte_width = (u8)
-	    ACPI_DIV_8(access_bit_width);	/* 1,  2,  4,  8 */
-
-	obj_desc->common_field.access_bit_width = (u8) access_bit_width;
-
-	/*
-	 * base_byte_offset is the address of the start of the field within the
-	 * region.  It is the byte address of the first *datum* (field-width data
-	 * unit) of the field. (i.e., the first datum that contains at least the
-	 * first *bit* of the field.)
-	 *
-	 * Note: byte_alignment is always either equal to the access_bit_width or 8
-	 * (Byte access), and it defines the addressing granularity of the parent
-	 * region or buffer.
-	 */
-	nearest_byte_address =
-	    ACPI_ROUND_BITS_DOWN_TO_BYTES(field_bit_position);
-	obj_desc->common_field.base_byte_offset = (u32)
-	    ACPI_ROUND_DOWN(nearest_byte_address, byte_alignment);
-
-	/*
-	 * start_field_bit_offset is the offset of the first bit of the field within
-	 * a field datum.
-	 */
-	obj_desc->common_field.start_field_bit_offset = (u8)
-	    (field_bit_position -
-	     ACPI_MUL_8(obj_desc->common_field.base_byte_offset));
-
-	/*
-	 * Does the entire field fit within a single field access element? (datum)
-	 * (i.e., without crossing a datum boundary)
-	 */
-	if ((obj_desc->common_field.start_field_bit_offset +
-	     field_bit_length) <= (u16) access_bit_width) {
-		obj_desc->common.flags |= AOPOBJ_SINGLE_DATUM;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_prep_field_value
- *
- * PARAMETERS:  Info    - Contains all field creation info
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Construct an union acpi_operand_object of type def_field and
- *              connect it to the parent Node.
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
-{
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *second_desc = NULL;
-	u32 type;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ex_prep_field_value);
-
-	/* Parameter validation */
-
-	if (info->field_type != ACPI_TYPE_LOCAL_INDEX_FIELD) {
-		if (!info->region_node) {
-			ACPI_ERROR((AE_INFO, "Null RegionNode"));
-			return_ACPI_STATUS(AE_AML_NO_OPERAND);
-		}
-
-		type = acpi_ns_get_type(info->region_node);
-		if (type != ACPI_TYPE_REGION) {
-			ACPI_ERROR((AE_INFO,
-				    "Needed Region, found type %X (%s)",
-				    type, acpi_ut_get_type_name(type)));
-
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-	}
-
-	/* Allocate a new field object */
-
-	obj_desc = acpi_ut_create_internal_object(info->field_type);
-	if (!obj_desc) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Initialize areas of the object that are common to all fields */
-
-	obj_desc->common_field.node = info->field_node;
-	status = acpi_ex_prep_common_field_object(obj_desc, info->field_flags,
-						  info->attribute,
-						  info->field_bit_position,
-						  info->field_bit_length);
-	if (ACPI_FAILURE(status)) {
-		acpi_ut_delete_object_desc(obj_desc);
-		return_ACPI_STATUS(status);
-	}
-
-	/* Initialize areas of the object that are specific to the field type */
-
-	switch (info->field_type) {
-	case ACPI_TYPE_LOCAL_REGION_FIELD:
-
-		obj_desc->field.region_obj =
-		    acpi_ns_get_attached_object(info->region_node);
-
-		/* An additional reference for the container */
-
-		acpi_ut_add_reference(obj_desc->field.region_obj);
-
-		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-				  "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
-				  obj_desc->field.start_field_bit_offset,
-				  obj_desc->field.base_byte_offset,
-				  obj_desc->field.access_byte_width,
-				  obj_desc->field.region_obj));
-		break;
-
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-
-		obj_desc->bank_field.value = info->bank_value;
-		obj_desc->bank_field.region_obj =
-		    acpi_ns_get_attached_object(info->region_node);
-		obj_desc->bank_field.bank_obj =
-		    acpi_ns_get_attached_object(info->register_node);
-
-		/* An additional reference for the attached objects */
-
-		acpi_ut_add_reference(obj_desc->bank_field.region_obj);
-		acpi_ut_add_reference(obj_desc->bank_field.bank_obj);
-
-		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-				  "Bank Field: BitOff %X, Off %X, Gran %X, Region %p, BankReg %p\n",
-				  obj_desc->bank_field.start_field_bit_offset,
-				  obj_desc->bank_field.base_byte_offset,
-				  obj_desc->field.access_byte_width,
-				  obj_desc->bank_field.region_obj,
-				  obj_desc->bank_field.bank_obj));
-
-		/*
-		 * Remember location in AML stream of the field unit
-		 * opcode and operands -- since the bank_value
-		 * operands must be evaluated.
-		 */
-		second_desc = obj_desc->common.next_object;
-		second_desc->extra.aml_start =
-		    ACPI_CAST_PTR(union acpi_parse_object,
-				  info->data_register_node)->named.data;
-		second_desc->extra.aml_length =
-		    ACPI_CAST_PTR(union acpi_parse_object,
-				  info->data_register_node)->named.length;
-
-		break;
-
-	case ACPI_TYPE_LOCAL_INDEX_FIELD:
-
-		/* Get the Index and Data registers */
-
-		obj_desc->index_field.index_obj =
-		    acpi_ns_get_attached_object(info->register_node);
-		obj_desc->index_field.data_obj =
-		    acpi_ns_get_attached_object(info->data_register_node);
-
-		if (!obj_desc->index_field.data_obj
-		    || !obj_desc->index_field.index_obj) {
-			ACPI_ERROR((AE_INFO,
-				    "Null Index Object during field prep"));
-			acpi_ut_delete_object_desc(obj_desc);
-			return_ACPI_STATUS(AE_AML_INTERNAL);
-		}
-
-		/* An additional reference for the attached objects */
-
-		acpi_ut_add_reference(obj_desc->index_field.data_obj);
-		acpi_ut_add_reference(obj_desc->index_field.index_obj);
-
-		/*
-		 * April 2006: Changed to match MS behavior
-		 *
-		 * The value written to the Index register is the byte offset of the
-		 * target field in units of the granularity of the index_field
-		 *
-		 * Previously, the value was calculated as an index in terms of the
-		 * width of the Data register, as below:
-		 *
-		 *      obj_desc->index_field.Value = (u32)
-		 *          (Info->field_bit_position / ACPI_MUL_8 (
-		 *              obj_desc->Field.access_byte_width));
-		 *
-		 * February 2006: Tried value as a byte offset:
-		 *      obj_desc->index_field.Value = (u32)
-		 *          ACPI_DIV_8 (Info->field_bit_position);
-		 */
-		obj_desc->index_field.value =
-		    (u32) ACPI_ROUND_DOWN(ACPI_DIV_8(info->field_bit_position),
-					  obj_desc->index_field.
-					  access_byte_width);
-
-		ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-				  "IndexField: BitOff %X, Off %X, Value %X, Gran %X, Index %p, Data %p\n",
-				  obj_desc->index_field.start_field_bit_offset,
-				  obj_desc->index_field.base_byte_offset,
-				  obj_desc->index_field.value,
-				  obj_desc->field.access_byte_width,
-				  obj_desc->index_field.index_obj,
-				  obj_desc->index_field.data_obj));
-		break;
-
-	default:
-		/* No other types should get here */
-		break;
-	}
-
-	/*
-	 * Store the constructed descriptor (obj_desc) into the parent Node,
-	 * preserving the current type of that named_obj.
-	 */
-	status = acpi_ns_attach_object(info->field_node, obj_desc,
-				       acpi_ns_get_type(info->field_node));
-
-	ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
-			  "Set NamedObj %p [%4.4s], ObjDesc %p\n",
-			  info->field_node,
-			  acpi_ut_get_node_name(info->field_node), obj_desc));
-
-	/* Remove local reference to the object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c
deleted file mode 100644
index 7a41c40..0000000
--- a/drivers/acpi/executer/exregion.c
+++ /dev/null
@@ -1,498 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exregion - ACPI default op_region (address space) handlers
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exregion")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_system_memory_space_handler
- *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
- *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
- *              handler_context     - Pointer to Handler's context
- *              region_context      - Pointer to context specific to the
- *                                    accessed region
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Handler for the System Memory address space (Op Region)
- *
- ******************************************************************************/
-acpi_status
-acpi_ex_system_memory_space_handler(u32 function,
-				    acpi_physical_address address,
-				    u32 bit_width,
-				    acpi_integer * value,
-				    void *handler_context, void *region_context)
-{
-	acpi_status status = AE_OK;
-	void *logical_addr_ptr = NULL;
-	struct acpi_mem_space_context *mem_info = region_context;
-	u32 length;
-	acpi_size window_size;
-#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
-	u32 remainder;
-#endif
-
-	ACPI_FUNCTION_TRACE(ex_system_memory_space_handler);
-
-	/* Validate and translate the bit width */
-
-	switch (bit_width) {
-	case 8:
-		length = 1;
-		break;
-
-	case 16:
-		length = 2;
-		break;
-
-	case 32:
-		length = 4;
-		break;
-
-	case 64:
-		length = 8;
-		break;
-
-	default:
-		ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d",
-			    bit_width));
-		return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
-	}
-
-#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
-	/*
-	 * Hardware does not support non-aligned data transfers, we must verify
-	 * the request.
-	 */
-	(void)acpi_ut_short_divide((acpi_integer) address, length, NULL,
-				   &remainder);
-	if (remainder != 0) {
-		return_ACPI_STATUS(AE_AML_ALIGNMENT);
-	}
-#endif
-
-	/*
-	 * Does the request fit into the cached memory mapping?
-	 * Is 1) Address below the current mapping? OR
-	 *    2) Address beyond the current mapping?
-	 */
-	if ((address < mem_info->mapped_physical_address) ||
-	    (((acpi_integer) address + length) > ((acpi_integer)
-						  mem_info->
-						  mapped_physical_address +
-						  mem_info->mapped_length))) {
-		/*
-		 * The request cannot be resolved by the current memory mapping;
-		 * Delete the existing mapping and create a new one.
-		 */
-		if (mem_info->mapped_length) {
-
-			/* Valid mapping, delete it */
-
-			acpi_os_unmap_memory(mem_info->mapped_logical_address,
-					     mem_info->mapped_length);
-		}
-
-		/*
-		 * Don't attempt to map memory beyond the end of the region, and
-		 * constrain the maximum mapping size to something reasonable.
-		 */
-		window_size = (acpi_size)
-		    ((mem_info->address + mem_info->length) - address);
-
-		if (window_size > ACPI_SYSMEM_REGION_WINDOW_SIZE) {
-			window_size = ACPI_SYSMEM_REGION_WINDOW_SIZE;
-		}
-
-		/* Create a new mapping starting at the address given */
-
-		mem_info->mapped_logical_address =
-			acpi_os_map_memory((acpi_physical_address) address, window_size);
-		if (!mem_info->mapped_logical_address) {
-			ACPI_ERROR((AE_INFO,
-				    "Could not map memory at %8.8X%8.8X, size %X",
-				    ACPI_FORMAT_NATIVE_UINT(address),
-				    (u32) window_size));
-			mem_info->mapped_length = 0;
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		/* Save the physical address and mapping size */
-
-		mem_info->mapped_physical_address = address;
-		mem_info->mapped_length = window_size;
-	}
-
-	/*
-	 * Generate a logical pointer corresponding to the address we want to
-	 * access
-	 */
-	logical_addr_ptr = mem_info->mapped_logical_address +
-	    ((acpi_integer) address -
-	     (acpi_integer) mem_info->mapped_physical_address);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "System-Memory (width %d) R/W %d Address=%8.8X%8.8X\n",
-			  bit_width, function,
-			  ACPI_FORMAT_NATIVE_UINT(address)));
-
-	/*
-	 * Perform the memory read or write
-	 *
-	 * Note: For machines that do not support non-aligned transfers, the target
-	 * address was checked for alignment above.  We do not attempt to break the
-	 * transfer up into smaller (byte-size) chunks because the AML specifically
-	 * asked for a transfer width that the hardware may require.
-	 */
-	switch (function) {
-	case ACPI_READ:
-
-		*value = 0;
-		switch (bit_width) {
-		case 8:
-			*value = (acpi_integer) ACPI_GET8(logical_addr_ptr);
-			break;
-
-		case 16:
-			*value = (acpi_integer) ACPI_GET16(logical_addr_ptr);
-			break;
-
-		case 32:
-			*value = (acpi_integer) ACPI_GET32(logical_addr_ptr);
-			break;
-
-		case 64:
-			*value = (acpi_integer) ACPI_GET64(logical_addr_ptr);
-			break;
-
-		default:
-			/* bit_width was already validated */
-			break;
-		}
-		break;
-
-	case ACPI_WRITE:
-
-		switch (bit_width) {
-		case 8:
-			ACPI_SET8(logical_addr_ptr) = (u8) * value;
-			break;
-
-		case 16:
-			ACPI_SET16(logical_addr_ptr) = (u16) * value;
-			break;
-
-		case 32:
-			ACPI_SET32(logical_addr_ptr) = (u32) * value;
-			break;
-
-		case 64:
-			ACPI_SET64(logical_addr_ptr) = (u64) * value;
-			break;
-
-		default:
-			/* bit_width was already validated */
-			break;
-		}
-		break;
-
-	default:
-		status = AE_BAD_PARAMETER;
-		break;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_system_io_space_handler
- *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
- *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
- *              handler_context     - Pointer to Handler's context
- *              region_context      - Pointer to context specific to the
- *                                    accessed region
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Handler for the System IO address space (Op Region)
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_system_io_space_handler(u32 function,
-				acpi_physical_address address,
-				u32 bit_width,
-				acpi_integer * value,
-				void *handler_context, void *region_context)
-{
-	acpi_status status = AE_OK;
-	u32 value32;
-
-	ACPI_FUNCTION_TRACE(ex_system_io_space_handler);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "System-IO (width %d) R/W %d Address=%8.8X%8.8X\n",
-			  bit_width, function,
-			  ACPI_FORMAT_NATIVE_UINT(address)));
-
-	/* Decode the function parameter */
-
-	switch (function) {
-	case ACPI_READ:
-
-		status = acpi_os_read_port((acpi_io_address) address,
-					   &value32, bit_width);
-		*value = value32;
-		break;
-
-	case ACPI_WRITE:
-
-		status = acpi_os_write_port((acpi_io_address) address,
-					    (u32) * value, bit_width);
-		break;
-
-	default:
-		status = AE_BAD_PARAMETER;
-		break;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_pci_config_space_handler
- *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
- *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
- *              handler_context     - Pointer to Handler's context
- *              region_context      - Pointer to context specific to the
- *                                    accessed region
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Handler for the PCI Config address space (Op Region)
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_pci_config_space_handler(u32 function,
-				 acpi_physical_address address,
-				 u32 bit_width,
-				 acpi_integer * value,
-				 void *handler_context, void *region_context)
-{
-	acpi_status status = AE_OK;
-	struct acpi_pci_id *pci_id;
-	u16 pci_register;
-	u32 value32;
-
-	ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
-
-	/*
-	 *  The arguments to acpi_os(Read|Write)pci_configuration are:
-	 *
-	 *  pci_segment is the PCI bus segment range 0-31
-	 *  pci_bus     is the PCI bus number range 0-255
-	 *  pci_device  is the PCI device number range 0-31
-	 *  pci_function is the PCI device function number
-	 *  pci_register is the Config space register range 0-255 bytes
-	 *
-	 *  Value - input value for write, output address for read
-	 *
-	 */
-	pci_id = (struct acpi_pci_id *)region_context;
-	pci_register = (u16) (u32) address;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "Pci-Config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
-			  function, bit_width, pci_id->segment, pci_id->bus,
-			  pci_id->device, pci_id->function, pci_register));
-
-	switch (function) {
-	case ACPI_READ:
-
-		status = acpi_os_read_pci_configuration(pci_id, pci_register,
-							&value32, bit_width);
-		*value = value32;
-		break;
-
-	case ACPI_WRITE:
-
-		status = acpi_os_write_pci_configuration(pci_id, pci_register,
-							 *value, bit_width);
-		break;
-
-	default:
-
-		status = AE_BAD_PARAMETER;
-		break;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_cmos_space_handler
- *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
- *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
- *              handler_context     - Pointer to Handler's context
- *              region_context      - Pointer to context specific to the
- *                                    accessed region
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Handler for the CMOS address space (Op Region)
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_cmos_space_handler(u32 function,
-			   acpi_physical_address address,
-			   u32 bit_width,
-			   acpi_integer * value,
-			   void *handler_context, void *region_context)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ex_cmos_space_handler);
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_pci_bar_space_handler
- *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
- *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
- *              handler_context     - Pointer to Handler's context
- *              region_context      - Pointer to context specific to the
- *                                    accessed region
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Handler for the PCI bar_target address space (Op Region)
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_pci_bar_space_handler(u32 function,
-			      acpi_physical_address address,
-			      u32 bit_width,
-			      acpi_integer * value,
-			      void *handler_context, void *region_context)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ex_pci_bar_space_handler);
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_data_table_space_handler
- *
- * PARAMETERS:  Function            - Read or Write operation
- *              Address             - Where in the space to read or write
- *              bit_width           - Field width in bits (8, 16, or 32)
- *              Value               - Pointer to in or out value
- *              handler_context     - Pointer to Handler's context
- *              region_context      - Pointer to context specific to the
- *                                    accessed region
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Handler for the Data Table address space (Op Region)
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_data_table_space_handler(u32 function,
-				 acpi_physical_address address,
-				 u32 bit_width,
-				 acpi_integer * value,
-				 void *handler_context, void *region_context)
-{
-	ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
-
-	/* Perform the memory read or write */
-
-	switch (function) {
-	case ACPI_READ:
-
-		ACPI_MEMCPY(ACPI_CAST_PTR(char, value),
-			    ACPI_PHYSADDR_TO_PTR(address),
-			    ACPI_DIV_8(bit_width));
-		break;
-
-	case ACPI_WRITE:
-	default:
-
-		return_ACPI_STATUS(AE_SUPPORT);
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/executer/exresnte.c
deleted file mode 100644
index 423ad36..0000000
--- a/drivers/acpi/executer/exresnte.c
+++ /dev/null
@@ -1,277 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exresnte - AML Interpreter object resolution
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exresnte")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_resolve_node_to_value
- *
- * PARAMETERS:  object_ptr      - Pointer to a location that contains
- *                                a pointer to a NS node, and will receive a
- *                                pointer to the resolved object.
- *              walk_state      - Current state.  Valid only if executing AML
- *                                code.  NULL if simply resolving an object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Resolve a Namespace node to a valued object
- *
- * Note: for some of the data types, the pointer attached to the Node
- * can be either a pointer to an actual internal object or a pointer into the
- * AML stream itself.  These types are currently:
- *
- *      ACPI_TYPE_INTEGER
- *      ACPI_TYPE_STRING
- *      ACPI_TYPE_BUFFER
- *      ACPI_TYPE_MUTEX
- *      ACPI_TYPE_PACKAGE
- *
- ******************************************************************************/
-acpi_status
-acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
-			      struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *source_desc;
-	union acpi_operand_object *obj_desc = NULL;
-	struct acpi_namespace_node *node;
-	acpi_object_type entry_type;
-
-	ACPI_FUNCTION_TRACE(ex_resolve_node_to_value);
-
-	/*
-	 * The stack pointer points to a struct acpi_namespace_node (Node).  Get the
-	 * object that is attached to the Node.
-	 */
-	node = *object_ptr;
-	source_desc = acpi_ns_get_attached_object(node);
-	entry_type = acpi_ns_get_type((acpi_handle) node);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p SourceDesc=%p [%s]\n",
-			  node, source_desc,
-			  acpi_ut_get_type_name(entry_type)));
-
-	if ((entry_type == ACPI_TYPE_LOCAL_ALIAS) ||
-	    (entry_type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
-
-		/* There is always exactly one level of indirection */
-
-		node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object);
-		source_desc = acpi_ns_get_attached_object(node);
-		entry_type = acpi_ns_get_type((acpi_handle) node);
-		*object_ptr = node;
-	}
-
-	/*
-	 * Several object types require no further processing:
-	 * 1) Device/Thermal objects don't have a "real" subobject, return the Node
-	 * 2) Method locals and arguments have a pseudo-Node
-	 * 3) 10/2007: Added method type to assist with Package construction.
-	 */
-	if ((entry_type == ACPI_TYPE_DEVICE) ||
-	    (entry_type == ACPI_TYPE_THERMAL) ||
-	    (entry_type == ACPI_TYPE_METHOD) ||
-	    (node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	if (!source_desc) {
-		ACPI_ERROR((AE_INFO, "No object attached to node %p", node));
-		return_ACPI_STATUS(AE_AML_NO_OPERAND);
-	}
-
-	/*
-	 * Action is based on the type of the Node, which indicates the type
-	 * of the attached object or pointer
-	 */
-	switch (entry_type) {
-	case ACPI_TYPE_PACKAGE:
-
-		if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_PACKAGE) {
-			ACPI_ERROR((AE_INFO, "Object not a Package, type %s",
-				    acpi_ut_get_object_type_name(source_desc)));
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-
-		status = acpi_ds_get_package_arguments(source_desc);
-		if (ACPI_SUCCESS(status)) {
-
-			/* Return an additional reference to the object */
-
-			obj_desc = source_desc;
-			acpi_ut_add_reference(obj_desc);
-		}
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) {
-			ACPI_ERROR((AE_INFO, "Object not a Buffer, type %s",
-				    acpi_ut_get_object_type_name(source_desc)));
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-
-		status = acpi_ds_get_buffer_arguments(source_desc);
-		if (ACPI_SUCCESS(status)) {
-
-			/* Return an additional reference to the object */
-
-			obj_desc = source_desc;
-			acpi_ut_add_reference(obj_desc);
-		}
-		break;
-
-	case ACPI_TYPE_STRING:
-
-		if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_STRING) {
-			ACPI_ERROR((AE_INFO, "Object not a String, type %s",
-				    acpi_ut_get_object_type_name(source_desc)));
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-
-		/* Return an additional reference to the object */
-
-		obj_desc = source_desc;
-		acpi_ut_add_reference(obj_desc);
-		break;
-
-	case ACPI_TYPE_INTEGER:
-
-		if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_INTEGER) {
-			ACPI_ERROR((AE_INFO, "Object not a Integer, type %s",
-				    acpi_ut_get_object_type_name(source_desc)));
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-
-		/* Return an additional reference to the object */
-
-		obj_desc = source_desc;
-		acpi_ut_add_reference(obj_desc);
-		break;
-
-	case ACPI_TYPE_BUFFER_FIELD:
-	case ACPI_TYPE_LOCAL_REGION_FIELD:
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-	case ACPI_TYPE_LOCAL_INDEX_FIELD:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "FieldRead Node=%p SourceDesc=%p Type=%X\n",
-				  node, source_desc, entry_type));
-
-		status =
-		    acpi_ex_read_data_from_field(walk_state, source_desc,
-						 &obj_desc);
-		break;
-
-		/* For these objects, just return the object attached to the Node */
-
-	case ACPI_TYPE_MUTEX:
-	case ACPI_TYPE_POWER:
-	case ACPI_TYPE_PROCESSOR:
-	case ACPI_TYPE_EVENT:
-	case ACPI_TYPE_REGION:
-
-		/* Return an additional reference to the object */
-
-		obj_desc = source_desc;
-		acpi_ut_add_reference(obj_desc);
-		break;
-
-		/* TYPE_ANY is untyped, and thus there is no object associated with it */
-
-	case ACPI_TYPE_ANY:
-
-		ACPI_ERROR((AE_INFO,
-			    "Untyped entry %p, no attached object!", node));
-
-		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);	/* Cannot be AE_TYPE */
-
-	case ACPI_TYPE_LOCAL_REFERENCE:
-
-		switch (source_desc->reference.class) {
-		case ACPI_REFCLASS_TABLE:	/* This is a ddb_handle */
-		case ACPI_REFCLASS_REFOF:
-		case ACPI_REFCLASS_INDEX:
-
-			/* Return an additional reference to the object */
-
-			obj_desc = source_desc;
-			acpi_ut_add_reference(obj_desc);
-			break;
-
-		default:
-			/* No named references are allowed here */
-
-			ACPI_ERROR((AE_INFO,
-				    "Unsupported Reference type %X",
-				    source_desc->reference.class));
-
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-		break;
-
-	default:
-
-		/* Default case is for unknown types */
-
-		ACPI_ERROR((AE_INFO,
-			    "Node %p - Unknown object type %X",
-			    node, entry_type));
-
-		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-
-	}			/* switch (entry_type) */
-
-	/* Return the object descriptor */
-
-	*object_ptr = (void *)obj_desc;
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/executer/exresolv.c
deleted file mode 100644
index 89571b9..0000000
--- a/drivers/acpi/executer/exresolv.c
+++ /dev/null
@@ -1,550 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exresolv - AML Interpreter object resolution
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/amlcode.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exresolv")
-
-/* Local prototypes */
-static acpi_status
-acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
-				struct acpi_walk_state *walk_state);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_resolve_to_value
- *
- * PARAMETERS:  **stack_ptr         - Points to entry on obj_stack, which can
- *                                    be either an (union acpi_operand_object *)
- *                                    or an acpi_handle.
- *              walk_state          - Current method state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Convert Reference objects to values
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr,
-			 struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_resolve_to_value, stack_ptr);
-
-	if (!stack_ptr || !*stack_ptr) {
-		ACPI_ERROR((AE_INFO, "Internal - null pointer"));
-		return_ACPI_STATUS(AE_AML_NO_OPERAND);
-	}
-
-	/*
-	 * The entity pointed to by the stack_ptr can be either
-	 * 1) A valid union acpi_operand_object, or
-	 * 2) A struct acpi_namespace_node (named_obj)
-	 */
-	if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_OPERAND) {
-		status = acpi_ex_resolve_object_to_value(stack_ptr, walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		if (!*stack_ptr) {
-			ACPI_ERROR((AE_INFO, "Internal - null pointer"));
-			return_ACPI_STATUS(AE_AML_NO_OPERAND);
-		}
-	}
-
-	/*
-	 * Object on the stack may have changed if acpi_ex_resolve_object_to_value()
-	 * was called (i.e., we can't use an _else_ here.)
-	 */
-	if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_NAMED) {
-		status =
-		    acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
-						  (struct acpi_namespace_node,
-						   stack_ptr), walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Resolved object %p\n", *stack_ptr));
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_resolve_object_to_value
- *
- * PARAMETERS:  stack_ptr       - Pointer to an internal object
- *              walk_state      - Current method state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Retrieve the value from an internal object. The Reference type
- *              uses the associated AML opcode to determine the value.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
-				struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *stack_desc;
-	union acpi_operand_object *obj_desc = NULL;
-	u8 ref_type;
-
-	ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
-
-	stack_desc = *stack_ptr;
-
-	/* This is an union acpi_operand_object    */
-
-	switch (ACPI_GET_OBJECT_TYPE(stack_desc)) {
-	case ACPI_TYPE_LOCAL_REFERENCE:
-
-		ref_type = stack_desc->reference.class;
-
-		switch (ref_type) {
-		case ACPI_REFCLASS_LOCAL:
-		case ACPI_REFCLASS_ARG:
-
-			/*
-			 * Get the local from the method's state info
-			 * Note: this increments the local's object reference count
-			 */
-			status = acpi_ds_method_data_get_value(ref_type,
-							       stack_desc->
-							       reference.value,
-							       walk_state,
-							       &obj_desc);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-
-			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-					  "[Arg/Local %X] ValueObj is %p\n",
-					  stack_desc->reference.value,
-					  obj_desc));
-
-			/*
-			 * Now we can delete the original Reference Object and
-			 * replace it with the resolved value
-			 */
-			acpi_ut_remove_reference(stack_desc);
-			*stack_ptr = obj_desc;
-			break;
-
-		case ACPI_REFCLASS_INDEX:
-
-			switch (stack_desc->reference.target_type) {
-			case ACPI_TYPE_BUFFER_FIELD:
-
-				/* Just return - do not dereference */
-				break;
-
-			case ACPI_TYPE_PACKAGE:
-
-				/* If method call or copy_object - do not dereference */
-
-				if ((walk_state->opcode ==
-				     AML_INT_METHODCALL_OP)
-				    || (walk_state->opcode == AML_COPY_OP)) {
-					break;
-				}
-
-				/* Otherwise, dereference the package_index to a package element */
-
-				obj_desc = *stack_desc->reference.where;
-				if (obj_desc) {
-					/*
-					 * Valid object descriptor, copy pointer to return value
-					 * (i.e., dereference the package index)
-					 * Delete the ref object, increment the returned object
-					 */
-					acpi_ut_remove_reference(stack_desc);
-					acpi_ut_add_reference(obj_desc);
-					*stack_ptr = obj_desc;
-				} else {
-					/*
-					 * A NULL object descriptor means an uninitialized element of
-					 * the package, can't dereference it
-					 */
-					ACPI_ERROR((AE_INFO,
-						    "Attempt to dereference an Index to NULL package element Idx=%p",
-						    stack_desc));
-					status = AE_AML_UNINITIALIZED_ELEMENT;
-				}
-				break;
-
-			default:
-
-				/* Invalid reference object */
-
-				ACPI_ERROR((AE_INFO,
-					    "Unknown TargetType %X in Index/Reference object %p",
-					    stack_desc->reference.target_type,
-					    stack_desc));
-				status = AE_AML_INTERNAL;
-				break;
-			}
-			break;
-
-		case ACPI_REFCLASS_REFOF:
-		case ACPI_REFCLASS_DEBUG:
-		case ACPI_REFCLASS_TABLE:
-
-			/* Just leave the object as-is, do not dereference */
-
-			break;
-
-		case ACPI_REFCLASS_NAME:	/* Reference to a named object */
-
-			/* Dereference the name */
-
-			if ((stack_desc->reference.node->type ==
-			     ACPI_TYPE_DEVICE)
-			    || (stack_desc->reference.node->type ==
-				ACPI_TYPE_THERMAL)) {
-
-				/* These node types do not have 'real' subobjects */
-
-				*stack_ptr = (void *)stack_desc->reference.node;
-			} else {
-				/* Get the object pointed to by the namespace node */
-
-				*stack_ptr =
-				    (stack_desc->reference.node)->object;
-				acpi_ut_add_reference(*stack_ptr);
-			}
-
-			acpi_ut_remove_reference(stack_desc);
-			break;
-
-		default:
-
-			ACPI_ERROR((AE_INFO,
-				    "Unknown Reference type %X in %p", ref_type,
-				    stack_desc));
-			status = AE_AML_INTERNAL;
-			break;
-		}
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		status = acpi_ds_get_buffer_arguments(stack_desc);
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-
-		status = acpi_ds_get_package_arguments(stack_desc);
-		break;
-
-	case ACPI_TYPE_BUFFER_FIELD:
-	case ACPI_TYPE_LOCAL_REGION_FIELD:
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-	case ACPI_TYPE_LOCAL_INDEX_FIELD:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "FieldRead SourceDesc=%p Type=%X\n",
-				  stack_desc,
-				  ACPI_GET_OBJECT_TYPE(stack_desc)));
-
-		status =
-		    acpi_ex_read_data_from_field(walk_state, stack_desc,
-						 &obj_desc);
-
-		/* Remove a reference to the original operand, then override */
-
-		acpi_ut_remove_reference(*stack_ptr);
-		*stack_ptr = (void *)obj_desc;
-		break;
-
-	default:
-		break;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_resolve_multiple
- *
- * PARAMETERS:  walk_state          - Current state (contains AML opcode)
- *              Operand             - Starting point for resolution
- *              return_type         - Where the object type is returned
- *              return_desc         - Where the resolved object is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Return the base object and type.  Traverse a reference list if
- *              necessary to get to the base object.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
-			 union acpi_operand_object *operand,
-			 acpi_object_type * return_type,
-			 union acpi_operand_object **return_desc)
-{
-	union acpi_operand_object *obj_desc = (void *)operand;
-	struct acpi_namespace_node *node;
-	acpi_object_type type;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_ex_resolve_multiple);
-
-	/* Operand can be either a namespace node or an operand descriptor */
-
-	switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
-	case ACPI_DESC_TYPE_OPERAND:
-		type = obj_desc->common.type;
-		break;
-
-	case ACPI_DESC_TYPE_NAMED:
-		type = ((struct acpi_namespace_node *)obj_desc)->type;
-		obj_desc =
-		    acpi_ns_get_attached_object((struct acpi_namespace_node *)
-						obj_desc);
-
-		/* If we had an Alias node, use the attached object for type info */
-
-		if (type == ACPI_TYPE_LOCAL_ALIAS) {
-			type = ((struct acpi_namespace_node *)obj_desc)->type;
-			obj_desc =
-			    acpi_ns_get_attached_object((struct
-							 acpi_namespace_node *)
-							obj_desc);
-		}
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-	}
-
-	/* If type is anything other than a reference, we are done */
-
-	if (type != ACPI_TYPE_LOCAL_REFERENCE) {
-		goto exit;
-	}
-
-	/*
-	 * For reference objects created via the ref_of, Index, or Load/load_table
-	 * operators, we need to get to the base object (as per the ACPI
-	 * specification of the object_type and size_of operators). This means
-	 * traversing the list of possibly many nested references.
-	 */
-	while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) {
-		switch (obj_desc->reference.class) {
-		case ACPI_REFCLASS_REFOF:
-		case ACPI_REFCLASS_NAME:
-
-			/* Dereference the reference pointer */
-
-			if (obj_desc->reference.class == ACPI_REFCLASS_REFOF) {
-				node = obj_desc->reference.object;
-			} else {	/* AML_INT_NAMEPATH_OP */
-
-				node = obj_desc->reference.node;
-			}
-
-			/* All "References" point to a NS node */
-
-			if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
-			    ACPI_DESC_TYPE_NAMED) {
-				ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]",
-					    node,
-					    acpi_ut_get_descriptor_name(node)));
-				return_ACPI_STATUS(AE_AML_INTERNAL);
-			}
-
-			/* Get the attached object */
-
-			obj_desc = acpi_ns_get_attached_object(node);
-			if (!obj_desc) {
-
-				/* No object, use the NS node type */
-
-				type = acpi_ns_get_type(node);
-				goto exit;
-			}
-
-			/* Check for circular references */
-
-			if (obj_desc == operand) {
-				return_ACPI_STATUS(AE_AML_CIRCULAR_REFERENCE);
-			}
-			break;
-
-		case ACPI_REFCLASS_INDEX:
-
-			/* Get the type of this reference (index into another object) */
-
-			type = obj_desc->reference.target_type;
-			if (type != ACPI_TYPE_PACKAGE) {
-				goto exit;
-			}
-
-			/*
-			 * The main object is a package, we want to get the type
-			 * of the individual package element that is referenced by
-			 * the index.
-			 *
-			 * This could of course in turn be another reference object.
-			 */
-			obj_desc = *(obj_desc->reference.where);
-			if (!obj_desc) {
-
-				/* NULL package elements are allowed */
-
-				type = 0;	/* Uninitialized */
-				goto exit;
-			}
-			break;
-
-		case ACPI_REFCLASS_TABLE:
-
-			type = ACPI_TYPE_DDB_HANDLE;
-			goto exit;
-
-		case ACPI_REFCLASS_LOCAL:
-		case ACPI_REFCLASS_ARG:
-
-			if (return_desc) {
-				status =
-				    acpi_ds_method_data_get_value(obj_desc->
-								  reference.
-								  class,
-								  obj_desc->
-								  reference.
-								  value,
-								  walk_state,
-								  &obj_desc);
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-				acpi_ut_remove_reference(obj_desc);
-			} else {
-				status =
-				    acpi_ds_method_data_get_node(obj_desc->
-								 reference.
-								 class,
-								 obj_desc->
-								 reference.
-								 value,
-								 walk_state,
-								 &node);
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-
-				obj_desc = acpi_ns_get_attached_object(node);
-				if (!obj_desc) {
-					type = ACPI_TYPE_ANY;
-					goto exit;
-				}
-			}
-			break;
-
-		case ACPI_REFCLASS_DEBUG:
-
-			/* The Debug Object is of type "DebugObject" */
-
-			type = ACPI_TYPE_DEBUG_OBJECT;
-			goto exit;
-
-		default:
-
-			ACPI_ERROR((AE_INFO,
-				    "Unknown Reference Class %2.2X",
-				    obj_desc->reference.class));
-			return_ACPI_STATUS(AE_AML_INTERNAL);
-		}
-	}
-
-	/*
-	 * Now we are guaranteed to have an object that has not been created
-	 * via the ref_of or Index operators.
-	 */
-	type = ACPI_GET_OBJECT_TYPE(obj_desc);
-
-      exit:
-	/* Convert internal types to external types */
-
-	switch (type) {
-	case ACPI_TYPE_LOCAL_REGION_FIELD:
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-	case ACPI_TYPE_LOCAL_INDEX_FIELD:
-
-		type = ACPI_TYPE_FIELD_UNIT;
-		break;
-
-	case ACPI_TYPE_LOCAL_SCOPE:
-
-		/* Per ACPI Specification, Scope is untyped */
-
-		type = ACPI_TYPE_ANY;
-		break;
-
-	default:
-		/* No change to Type required */
-		break;
-	}
-
-	*return_type = type;
-	if (return_desc) {
-		*return_desc = obj_desc;
-	}
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
deleted file mode 100644
index 0bb8259..0000000
--- a/drivers/acpi/executer/exresop.c
+++ /dev/null
@@ -1,700 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exresop - AML Interpreter operand/object resolution
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/amlcode.h>
-#include <acpi/acparser.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exresop")
-
-/* Local prototypes */
-static acpi_status
-acpi_ex_check_object_type(acpi_object_type type_needed,
-			  acpi_object_type this_type, void *object);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_check_object_type
- *
- * PARAMETERS:  type_needed         Object type needed
- *              this_type           Actual object type
- *              Object              Object pointer
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Check required type against actual type
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ex_check_object_type(acpi_object_type type_needed,
-			  acpi_object_type this_type, void *object)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	if (type_needed == ACPI_TYPE_ANY) {
-
-		/* All types OK, so we don't perform any typechecks */
-
-		return (AE_OK);
-	}
-
-	if (type_needed == ACPI_TYPE_LOCAL_REFERENCE) {
-		/*
-		 * Allow the AML "Constant" opcodes (Zero, One, etc.) to be reference
-		 * objects and thus allow them to be targets.  (As per the ACPI
-		 * specification, a store to a constant is a noop.)
-		 */
-		if ((this_type == ACPI_TYPE_INTEGER) &&
-		    (((union acpi_operand_object *)object)->common.
-		     flags & AOPOBJ_AML_CONSTANT)) {
-			return (AE_OK);
-		}
-	}
-
-	if (type_needed != this_type) {
-		ACPI_ERROR((AE_INFO,
-			    "Needed type [%s], found [%s] %p",
-			    acpi_ut_get_type_name(type_needed),
-			    acpi_ut_get_type_name(this_type), object));
-
-		return (AE_AML_OPERAND_TYPE);
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_resolve_operands
- *
- * PARAMETERS:  Opcode              - Opcode being interpreted
- *              stack_ptr           - Pointer to the operand stack to be
- *                                    resolved
- *              walk_state          - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Convert multiple input operands to the types required by the
- *              target operator.
- *
- *      Each 5-bit group in arg_types represents one required
- *      operand and indicates the required Type. The corresponding operand
- *      will be converted to the required type if possible, otherwise we
- *      abort with an exception.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_resolve_operands(u16 opcode,
-			 union acpi_operand_object ** stack_ptr,
-			 struct acpi_walk_state * walk_state)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status = AE_OK;
-	u8 object_type;
-	u32 arg_types;
-	const struct acpi_opcode_info *op_info;
-	u32 this_arg_type;
-	acpi_object_type type_needed;
-	u16 target_op = 0;
-
-	ACPI_FUNCTION_TRACE_U32(ex_resolve_operands, opcode);
-
-	op_info = acpi_ps_get_opcode_info(opcode);
-	if (op_info->class == AML_CLASS_UNKNOWN) {
-		return_ACPI_STATUS(AE_AML_BAD_OPCODE);
-	}
-
-	arg_types = op_info->runtime_args;
-	if (arg_types == ARGI_INVALID_OPCODE) {
-		ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", opcode));
-
-		return_ACPI_STATUS(AE_AML_INTERNAL);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-			  "Opcode %X [%s] RequiredOperandTypes=%8.8X\n",
-			  opcode, op_info->name, arg_types));
-
-	/*
-	 * Normal exit is with (arg_types == 0) at end of argument list.
-	 * Function will return an exception from within the loop upon
-	 * finding an entry which is not (or cannot be converted
-	 * to) the required type; if stack underflows; or upon
-	 * finding a NULL stack entry (which should not happen).
-	 */
-	while (GET_CURRENT_ARG_TYPE(arg_types)) {
-		if (!stack_ptr || !*stack_ptr) {
-			ACPI_ERROR((AE_INFO, "Null stack entry at %p",
-				    stack_ptr));
-
-			return_ACPI_STATUS(AE_AML_INTERNAL);
-		}
-
-		/* Extract useful items */
-
-		obj_desc = *stack_ptr;
-
-		/* Decode the descriptor type */
-
-		switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
-		case ACPI_DESC_TYPE_NAMED:
-
-			/* Namespace Node */
-
-			object_type =
-			    ((struct acpi_namespace_node *)obj_desc)->type;
-
-			/*
-			 * Resolve an alias object. The construction of these objects
-			 * guarantees that there is only one level of alias indirection;
-			 * thus, the attached object is always the aliased namespace node
-			 */
-			if (object_type == ACPI_TYPE_LOCAL_ALIAS) {
-				obj_desc =
-				    acpi_ns_get_attached_object((struct
-								 acpi_namespace_node
-								 *)obj_desc);
-				*stack_ptr = obj_desc;
-				object_type =
-				    ((struct acpi_namespace_node *)obj_desc)->
-				    type;
-			}
-			break;
-
-		case ACPI_DESC_TYPE_OPERAND:
-
-			/* ACPI internal object */
-
-			object_type = ACPI_GET_OBJECT_TYPE(obj_desc);
-
-			/* Check for bad acpi_object_type */
-
-			if (!acpi_ut_valid_object_type(object_type)) {
-				ACPI_ERROR((AE_INFO,
-					    "Bad operand object type [%X]",
-					    object_type));
-
-				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-			}
-
-			if (object_type == (u8) ACPI_TYPE_LOCAL_REFERENCE) {
-
-				/* Validate the Reference */
-
-				switch (obj_desc->reference.class) {
-				case ACPI_REFCLASS_DEBUG:
-
-					target_op = AML_DEBUG_OP;
-
-					/*lint -fallthrough */
-
-				case ACPI_REFCLASS_ARG:
-				case ACPI_REFCLASS_LOCAL:
-				case ACPI_REFCLASS_INDEX:
-				case ACPI_REFCLASS_REFOF:
-				case ACPI_REFCLASS_TABLE:	/* ddb_handle from LOAD_OP or LOAD_TABLE_OP */
-				case ACPI_REFCLASS_NAME:	/* Reference to a named object */
-
-					ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-							  "Operand is a Reference, Class [%s] %2.2X\n",
-							  acpi_ut_get_reference_name
-							  (obj_desc),
-							  obj_desc->reference.
-							  class));
-					break;
-
-				default:
-
-					ACPI_ERROR((AE_INFO,
-						    "Unknown Reference Class %2.2X in %p",
-						    obj_desc->reference.class,
-						    obj_desc));
-
-					return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-				}
-			}
-			break;
-
-		default:
-
-			/* Invalid descriptor */
-
-			ACPI_ERROR((AE_INFO, "Invalid descriptor %p [%s]",
-				    obj_desc,
-				    acpi_ut_get_descriptor_name(obj_desc)));
-
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-
-		/* Get one argument type, point to the next */
-
-		this_arg_type = GET_CURRENT_ARG_TYPE(arg_types);
-		INCREMENT_ARG_LIST(arg_types);
-
-		/*
-		 * Handle cases where the object does not need to be
-		 * resolved to a value
-		 */
-		switch (this_arg_type) {
-		case ARGI_REF_OR_STRING:	/* Can be a String or Reference */
-
-			if ((ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
-			     ACPI_DESC_TYPE_OPERAND)
-			    && (ACPI_GET_OBJECT_TYPE(obj_desc) ==
-				ACPI_TYPE_STRING)) {
-				/*
-				 * String found - the string references a named object and
-				 * must be resolved to a node
-				 */
-				goto next_operand;
-			}
-
-			/*
-			 * Else not a string - fall through to the normal Reference
-			 * case below
-			 */
-			/*lint -fallthrough */
-
-		case ARGI_REFERENCE:	/* References: */
-		case ARGI_INTEGER_REF:
-		case ARGI_OBJECT_REF:
-		case ARGI_DEVICE_REF:
-		case ARGI_TARGETREF:	/* Allows implicit conversion rules before store */
-		case ARGI_FIXED_TARGET:	/* No implicit conversion before store to target */
-		case ARGI_SIMPLE_TARGET:	/* Name, Local, or Arg - no implicit conversion  */
-
-			/*
-			 * Need an operand of type ACPI_TYPE_LOCAL_REFERENCE
-			 * A Namespace Node is OK as-is
-			 */
-			if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
-			    ACPI_DESC_TYPE_NAMED) {
-				goto next_operand;
-			}
-
-			status =
-			    acpi_ex_check_object_type(ACPI_TYPE_LOCAL_REFERENCE,
-						      object_type, obj_desc);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-			goto next_operand;
-
-		case ARGI_DATAREFOBJ:	/* Store operator only */
-
-			/*
-			 * We don't want to resolve index_op reference objects during
-			 * a store because this would be an implicit de_ref_of operation.
-			 * Instead, we just want to store the reference object.
-			 * -- All others must be resolved below.
-			 */
-			if ((opcode == AML_STORE_OP) &&
-			    (ACPI_GET_OBJECT_TYPE(*stack_ptr) ==
-			     ACPI_TYPE_LOCAL_REFERENCE)
-			    && ((*stack_ptr)->reference.class == ACPI_REFCLASS_INDEX)) {
-				goto next_operand;
-			}
-			break;
-
-		default:
-			/* All cases covered above */
-			break;
-		}
-
-		/*
-		 * Resolve this object to a value
-		 */
-		status = acpi_ex_resolve_to_value(stack_ptr, walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* Get the resolved object */
-
-		obj_desc = *stack_ptr;
-
-		/*
-		 * Check the resulting object (value) type
-		 */
-		switch (this_arg_type) {
-			/*
-			 * For the simple cases, only one type of resolved object
-			 * is allowed
-			 */
-		case ARGI_MUTEX:
-
-			/* Need an operand of type ACPI_TYPE_MUTEX */
-
-			type_needed = ACPI_TYPE_MUTEX;
-			break;
-
-		case ARGI_EVENT:
-
-			/* Need an operand of type ACPI_TYPE_EVENT */
-
-			type_needed = ACPI_TYPE_EVENT;
-			break;
-
-		case ARGI_PACKAGE:	/* Package */
-
-			/* Need an operand of type ACPI_TYPE_PACKAGE */
-
-			type_needed = ACPI_TYPE_PACKAGE;
-			break;
-
-		case ARGI_ANYTYPE:
-
-			/* Any operand type will do */
-
-			type_needed = ACPI_TYPE_ANY;
-			break;
-
-		case ARGI_DDBHANDLE:
-
-			/* Need an operand of type ACPI_TYPE_DDB_HANDLE */
-
-			type_needed = ACPI_TYPE_LOCAL_REFERENCE;
-			break;
-
-			/*
-			 * The more complex cases allow multiple resolved object types
-			 */
-		case ARGI_INTEGER:
-
-			/*
-			 * Need an operand of type ACPI_TYPE_INTEGER,
-			 * But we can implicitly convert from a STRING or BUFFER
-			 * Aka - "Implicit Source Operand Conversion"
-			 */
-			status =
-			    acpi_ex_convert_to_integer(obj_desc, stack_ptr, 16);
-			if (ACPI_FAILURE(status)) {
-				if (status == AE_TYPE) {
-					ACPI_ERROR((AE_INFO,
-						    "Needed [Integer/String/Buffer], found [%s] %p",
-						    acpi_ut_get_object_type_name
-						    (obj_desc), obj_desc));
-
-					return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-				}
-
-				return_ACPI_STATUS(status);
-			}
-
-			if (obj_desc != *stack_ptr) {
-				acpi_ut_remove_reference(obj_desc);
-			}
-			goto next_operand;
-
-		case ARGI_BUFFER:
-
-			/*
-			 * Need an operand of type ACPI_TYPE_BUFFER,
-			 * But we can implicitly convert from a STRING or INTEGER
-			 * Aka - "Implicit Source Operand Conversion"
-			 */
-			status = acpi_ex_convert_to_buffer(obj_desc, stack_ptr);
-			if (ACPI_FAILURE(status)) {
-				if (status == AE_TYPE) {
-					ACPI_ERROR((AE_INFO,
-						    "Needed [Integer/String/Buffer], found [%s] %p",
-						    acpi_ut_get_object_type_name
-						    (obj_desc), obj_desc));
-
-					return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-				}
-
-				return_ACPI_STATUS(status);
-			}
-
-			if (obj_desc != *stack_ptr) {
-				acpi_ut_remove_reference(obj_desc);
-			}
-			goto next_operand;
-
-		case ARGI_STRING:
-
-			/*
-			 * Need an operand of type ACPI_TYPE_STRING,
-			 * But we can implicitly convert from a BUFFER or INTEGER
-			 * Aka - "Implicit Source Operand Conversion"
-			 */
-			status = acpi_ex_convert_to_string(obj_desc, stack_ptr,
-							   ACPI_IMPLICIT_CONVERT_HEX);
-			if (ACPI_FAILURE(status)) {
-				if (status == AE_TYPE) {
-					ACPI_ERROR((AE_INFO,
-						    "Needed [Integer/String/Buffer], found [%s] %p",
-						    acpi_ut_get_object_type_name
-						    (obj_desc), obj_desc));
-
-					return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-				}
-
-				return_ACPI_STATUS(status);
-			}
-
-			if (obj_desc != *stack_ptr) {
-				acpi_ut_remove_reference(obj_desc);
-			}
-			goto next_operand;
-
-		case ARGI_COMPUTEDATA:
-
-			/* Need an operand of type INTEGER, STRING or BUFFER */
-
-			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-			case ACPI_TYPE_INTEGER:
-			case ACPI_TYPE_STRING:
-			case ACPI_TYPE_BUFFER:
-
-				/* Valid operand */
-				break;
-
-			default:
-				ACPI_ERROR((AE_INFO,
-					    "Needed [Integer/String/Buffer], found [%s] %p",
-					    acpi_ut_get_object_type_name
-					    (obj_desc), obj_desc));
-
-				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-			}
-			goto next_operand;
-
-		case ARGI_BUFFER_OR_STRING:
-
-			/* Need an operand of type STRING or BUFFER */
-
-			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-			case ACPI_TYPE_STRING:
-			case ACPI_TYPE_BUFFER:
-
-				/* Valid operand */
-				break;
-
-			case ACPI_TYPE_INTEGER:
-
-				/* Highest priority conversion is to type Buffer */
-
-				status =
-				    acpi_ex_convert_to_buffer(obj_desc,
-							      stack_ptr);
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-
-				if (obj_desc != *stack_ptr) {
-					acpi_ut_remove_reference(obj_desc);
-				}
-				break;
-
-			default:
-				ACPI_ERROR((AE_INFO,
-					    "Needed [Integer/String/Buffer], found [%s] %p",
-					    acpi_ut_get_object_type_name
-					    (obj_desc), obj_desc));
-
-				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-			}
-			goto next_operand;
-
-		case ARGI_DATAOBJECT:
-			/*
-			 * ARGI_DATAOBJECT is only used by the size_of operator.
-			 * Need a buffer, string, package, or ref_of reference.
-			 *
-			 * The only reference allowed here is a direct reference to
-			 * a namespace node.
-			 */
-			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-			case ACPI_TYPE_PACKAGE:
-			case ACPI_TYPE_STRING:
-			case ACPI_TYPE_BUFFER:
-			case ACPI_TYPE_LOCAL_REFERENCE:
-
-				/* Valid operand */
-				break;
-
-			default:
-				ACPI_ERROR((AE_INFO,
-					    "Needed [Buffer/String/Package/Reference], found [%s] %p",
-					    acpi_ut_get_object_type_name
-					    (obj_desc), obj_desc));
-
-				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-			}
-			goto next_operand;
-
-		case ARGI_COMPLEXOBJ:
-
-			/* Need a buffer or package or (ACPI 2.0) String */
-
-			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-			case ACPI_TYPE_PACKAGE:
-			case ACPI_TYPE_STRING:
-			case ACPI_TYPE_BUFFER:
-
-				/* Valid operand */
-				break;
-
-			default:
-				ACPI_ERROR((AE_INFO,
-					    "Needed [Buffer/String/Package], found [%s] %p",
-					    acpi_ut_get_object_type_name
-					    (obj_desc), obj_desc));
-
-				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-			}
-			goto next_operand;
-
-		case ARGI_REGION_OR_BUFFER:	/* Used by Load() only */
-
-			/* Need an operand of type REGION or a BUFFER (which could be a resolved region field) */
-
-			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-			case ACPI_TYPE_BUFFER:
-			case ACPI_TYPE_REGION:
-
-				/* Valid operand */
-				break;
-
-			default:
-				ACPI_ERROR((AE_INFO,
-					    "Needed [Region/Buffer], found [%s] %p",
-					    acpi_ut_get_object_type_name
-					    (obj_desc), obj_desc));
-
-				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-			}
-			goto next_operand;
-
-		case ARGI_DATAREFOBJ:
-
-			/* Used by the Store() operator only */
-
-			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-			case ACPI_TYPE_INTEGER:
-			case ACPI_TYPE_PACKAGE:
-			case ACPI_TYPE_STRING:
-			case ACPI_TYPE_BUFFER:
-			case ACPI_TYPE_BUFFER_FIELD:
-			case ACPI_TYPE_LOCAL_REFERENCE:
-			case ACPI_TYPE_LOCAL_REGION_FIELD:
-			case ACPI_TYPE_LOCAL_BANK_FIELD:
-			case ACPI_TYPE_LOCAL_INDEX_FIELD:
-			case ACPI_TYPE_DDB_HANDLE:
-
-				/* Valid operand */
-				break;
-
-			default:
-
-				if (acpi_gbl_enable_interpreter_slack) {
-					/*
-					 * Enable original behavior of Store(), allowing any and all
-					 * objects as the source operand.  The ACPI spec does not
-					 * allow this, however.
-					 */
-					break;
-				}
-
-				if (target_op == AML_DEBUG_OP) {
-
-					/* Allow store of any object to the Debug object */
-
-					break;
-				}
-
-				ACPI_ERROR((AE_INFO,
-					    "Needed Integer/Buffer/String/Package/Ref/Ddb], found [%s] %p",
-					    acpi_ut_get_object_type_name
-					    (obj_desc), obj_desc));
-
-				return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-			}
-			goto next_operand;
-
-		default:
-
-			/* Unknown type */
-
-			ACPI_ERROR((AE_INFO,
-				    "Internal - Unknown ARGI (required operand) type %X",
-				    this_arg_type));
-
-			return_ACPI_STATUS(AE_BAD_PARAMETER);
-		}
-
-		/*
-		 * Make sure that the original object was resolved to the
-		 * required object type (Simple cases only).
-		 */
-		status = acpi_ex_check_object_type(type_needed,
-						   ACPI_GET_OBJECT_TYPE
-						   (*stack_ptr), *stack_ptr);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-	      next_operand:
-		/*
-		 * If more operands needed, decrement stack_ptr to point
-		 * to next operand on stack
-		 */
-		if (GET_CURRENT_ARG_TYPE(arg_types)) {
-			stack_ptr--;
-		}
-	}
-
-	ACPI_DUMP_OPERANDS(walk_state->operands,
-			   acpi_ps_get_opcode_name(opcode),
-			   walk_state->num_operands);
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c
deleted file mode 100644
index 3318df4..0000000
--- a/drivers/acpi/executer/exstore.c
+++ /dev/null
@@ -1,715 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exstore - AML Interpreter object store support
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exstore")
-
-/* Local prototypes */
-static void
-acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
-			u32 level, u32 index);
-
-static acpi_status
-acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
-			      union acpi_operand_object *dest_desc,
-			      struct acpi_walk_state *walk_state);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_do_debug_object
- *
- * PARAMETERS:  source_desc         - Value to be stored
- *              Level               - Indentation level (used for packages)
- *              Index               - Current package element, zero if not pkg
- *
- * RETURN:      None
- *
- * DESCRIPTION: Handles stores to the Debug Object.
- *
- ******************************************************************************/
-
-static void
-acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
-			u32 level, u32 index)
-{
-	u32 i;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
-
-	/* Print line header as long as we are not in the middle of an object display */
-
-	if (!((level > 0) && index == 0)) {
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
-				      level, " "));
-	}
-
-	/* Display index for package output only */
-
-	if (index > 0) {
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
-				      "(%.2u) ", index - 1));
-	}
-
-	if (!source_desc) {
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n"));
-		return_VOID;
-	}
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ",
-				      acpi_ut_get_object_type_name
-				      (source_desc)));
-
-		if (!acpi_ut_valid_internal_object(source_desc)) {
-			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
-					      "%p, Invalid Internal Object!\n",
-					      source_desc));
-			return_VOID;
-		}
-	} else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
-		   ACPI_DESC_TYPE_NAMED) {
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: %p\n",
-				      acpi_ut_get_type_name(((struct
-							      acpi_namespace_node
-							      *)source_desc)->
-							    type),
-				      source_desc));
-		return_VOID;
-	} else {
-		return_VOID;
-	}
-
-	/* source_desc is of type ACPI_DESC_TYPE_OPERAND */
-
-	switch (ACPI_GET_OBJECT_TYPE(source_desc)) {
-	case ACPI_TYPE_INTEGER:
-
-		/* Output correct integer width */
-
-		if (acpi_gbl_integer_byte_width == 4) {
-			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X\n",
-					      (u32) source_desc->integer.
-					      value));
-		} else {
-			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
-					      "0x%8.8X%8.8X\n",
-					      ACPI_FORMAT_UINT64(source_desc->
-								 integer.
-								 value)));
-		}
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n",
-				      (u32) source_desc->buffer.length));
-		ACPI_DUMP_BUFFER(source_desc->buffer.pointer,
-				 (source_desc->buffer.length <
-				  256) ? source_desc->buffer.length : 256);
-		break;
-
-	case ACPI_TYPE_STRING:
-
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X] \"%s\"\n",
-				      source_desc->string.length,
-				      source_desc->string.pointer));
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
-				      "[Contains 0x%.2X Elements]\n",
-				      source_desc->package.count));
-
-		/* Output the entire contents of the package */
-
-		for (i = 0; i < source_desc->package.count; i++) {
-			acpi_ex_do_debug_object(source_desc->package.
-						elements[i], level + 4, i + 1);
-		}
-		break;
-
-	case ACPI_TYPE_LOCAL_REFERENCE:
-
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s] ",
-				      acpi_ut_get_reference_name(source_desc)));
-
-		/* Decode the reference */
-
-		switch (source_desc->reference.class) {
-		case ACPI_REFCLASS_INDEX:
-
-			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%X\n",
-					      source_desc->reference.value));
-			break;
-
-		case ACPI_REFCLASS_TABLE:
-
-			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
-					      "Table Index 0x%X\n",
-					      source_desc->reference.value));
-			break;
-
-		default:
-			break;
-		}
-
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "  "));
-
-		/* Check for valid node first, then valid object */
-
-		if (source_desc->reference.node) {
-			if (ACPI_GET_DESCRIPTOR_TYPE
-			    (source_desc->reference.node) !=
-			    ACPI_DESC_TYPE_NAMED) {
-				ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
-						      " %p - Not a valid namespace node\n",
-						      source_desc->reference.
-						      node));
-			} else {
-				ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
-						      "Node %p [%4.4s] ",
-						      source_desc->reference.
-						      node,
-						      (source_desc->reference.
-						       node)->name.ascii));
-
-				switch ((source_desc->reference.node)->type) {
-
-					/* These types have no attached object */
-
-				case ACPI_TYPE_DEVICE:
-					acpi_os_printf("Device\n");
-					break;
-
-				case ACPI_TYPE_THERMAL:
-					acpi_os_printf("Thermal Zone\n");
-					break;
-
-				default:
-					acpi_ex_do_debug_object((source_desc->
-								 reference.
-								 node)->object,
-								level + 4, 0);
-					break;
-				}
-			}
-		} else if (source_desc->reference.object) {
-			if (ACPI_GET_DESCRIPTOR_TYPE
-			    (source_desc->reference.object) ==
-			    ACPI_DESC_TYPE_NAMED) {
-				acpi_ex_do_debug_object(((struct
-							  acpi_namespace_node *)
-							 source_desc->reference.
-							 object)->object,
-							level + 4, 0);
-			} else {
-				acpi_ex_do_debug_object(source_desc->reference.
-							object, level + 4, 0);
-			}
-		}
-		break;
-
-	default:
-
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n",
-				      source_desc));
-		break;
-	}
-
-	ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_store
- *
- * PARAMETERS:  *source_desc        - Value to be stored
- *              *dest_desc          - Where to store it.  Must be an NS node
- *                                    or an union acpi_operand_object of type
- *                                    Reference;
- *              walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Store the value described by source_desc into the location
- *              described by dest_desc. Called by various interpreter
- *              functions to store the result of an operation into
- *              the destination operand -- not just simply the actual "Store"
- *              ASL operator.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_store(union acpi_operand_object *source_desc,
-	      union acpi_operand_object *dest_desc,
-	      struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *ref_desc = dest_desc;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_store, dest_desc);
-
-	/* Validate parameters */
-
-	if (!source_desc || !dest_desc) {
-		ACPI_ERROR((AE_INFO, "Null parameter"));
-		return_ACPI_STATUS(AE_AML_NO_OPERAND);
-	}
-
-	/* dest_desc can be either a namespace node or an ACPI object */
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(dest_desc) == ACPI_DESC_TYPE_NAMED) {
-		/*
-		 * Dest is a namespace node,
-		 * Storing an object into a Named node.
-		 */
-		status = acpi_ex_store_object_to_node(source_desc,
-						      (struct
-						       acpi_namespace_node *)
-						      dest_desc, walk_state,
-						      ACPI_IMPLICIT_CONVERSION);
-
-		return_ACPI_STATUS(status);
-	}
-
-	/* Destination object must be a Reference or a Constant object */
-
-	switch (ACPI_GET_OBJECT_TYPE(dest_desc)) {
-	case ACPI_TYPE_LOCAL_REFERENCE:
-		break;
-
-	case ACPI_TYPE_INTEGER:
-
-		/* Allow stores to Constants -- a Noop as per ACPI spec */
-
-		if (dest_desc->common.flags & AOPOBJ_AML_CONSTANT) {
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		/*lint -fallthrough */
-
-	default:
-
-		/* Destination is not a Reference object */
-
-		ACPI_ERROR((AE_INFO,
-			    "Target is not a Reference or Constant object - %s [%p]",
-			    acpi_ut_get_object_type_name(dest_desc),
-			    dest_desc));
-
-		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-	}
-
-	/*
-	 * Examine the Reference class. These cases are handled:
-	 *
-	 * 1) Store to Name (Change the object associated with a name)
-	 * 2) Store to an indexed area of a Buffer or Package
-	 * 3) Store to a Method Local or Arg
-	 * 4) Store to the debug object
-	 */
-	switch (ref_desc->reference.class) {
-	case ACPI_REFCLASS_REFOF:
-
-		/* Storing an object into a Name "container" */
-
-		status = acpi_ex_store_object_to_node(source_desc,
-						      ref_desc->reference.
-						      object, walk_state,
-						      ACPI_IMPLICIT_CONVERSION);
-		break;
-
-	case ACPI_REFCLASS_INDEX:
-
-		/* Storing to an Index (pointer into a packager or buffer) */
-
-		status =
-		    acpi_ex_store_object_to_index(source_desc, ref_desc,
-						  walk_state);
-		break;
-
-	case ACPI_REFCLASS_LOCAL:
-	case ACPI_REFCLASS_ARG:
-
-		/* Store to a method local/arg  */
-
-		status =
-		    acpi_ds_store_object_to_local(ref_desc->reference.class,
-						  ref_desc->reference.value,
-						  source_desc, walk_state);
-		break;
-
-	case ACPI_REFCLASS_DEBUG:
-
-		/*
-		 * Storing to the Debug object causes the value stored to be
-		 * displayed and otherwise has no effect -- see ACPI Specification
-		 */
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "**** Write to Debug Object: Object %p %s ****:\n\n",
-				  source_desc,
-				  acpi_ut_get_object_type_name(source_desc)));
-
-		acpi_ex_do_debug_object(source_desc, 0, 0);
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
-			    ref_desc->reference.class));
-		ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
-
-		status = AE_AML_INTERNAL;
-		break;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_store_object_to_index
- *
- * PARAMETERS:  *source_desc            - Value to be stored
- *              *dest_desc              - Named object to receive the value
- *              walk_state              - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Store the object to indexed Buffer or Package element
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
-			      union acpi_operand_object *index_desc,
-			      struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *new_desc;
-	u8 value = 0;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ex_store_object_to_index);
-
-	/*
-	 * Destination must be a reference pointer, and
-	 * must point to either a buffer or a package
-	 */
-	switch (index_desc->reference.target_type) {
-	case ACPI_TYPE_PACKAGE:
-		/*
-		 * Storing to a package element. Copy the object and replace
-		 * any existing object with the new object. No implicit
-		 * conversion is performed.
-		 *
-		 * The object at *(index_desc->Reference.Where) is the
-		 * element within the package that is to be modified.
-		 * The parent package object is at index_desc->Reference.Object
-		 */
-		obj_desc = *(index_desc->reference.where);
-
-		if (ACPI_GET_OBJECT_TYPE(source_desc) ==
-		    ACPI_TYPE_LOCAL_REFERENCE
-		    && source_desc->reference.class == ACPI_REFCLASS_TABLE) {
-
-			/* This is a DDBHandle, just add a reference to it */
-
-			acpi_ut_add_reference(source_desc);
-			new_desc = source_desc;
-		} else {
-			/* Normal object, copy it */
-
-			status =
-			    acpi_ut_copy_iobject_to_iobject(source_desc,
-							    &new_desc,
-							    walk_state);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		}
-
-		if (obj_desc) {
-
-			/* Decrement reference count by the ref count of the parent package */
-
-			for (i = 0; i < ((union acpi_operand_object *)
-					 index_desc->reference.object)->common.
-			     reference_count; i++) {
-				acpi_ut_remove_reference(obj_desc);
-			}
-		}
-
-		*(index_desc->reference.where) = new_desc;
-
-		/* Increment ref count by the ref count of the parent package-1 */
-
-		for (i = 1; i < ((union acpi_operand_object *)
-				 index_desc->reference.object)->common.
-		     reference_count; i++) {
-			acpi_ut_add_reference(new_desc);
-		}
-
-		break;
-
-	case ACPI_TYPE_BUFFER_FIELD:
-
-		/*
-		 * Store into a Buffer or String (not actually a real buffer_field)
-		 * at a location defined by an Index.
-		 *
-		 * The first 8-bit element of the source object is written to the
-		 * 8-bit Buffer location defined by the Index destination object,
-		 * according to the ACPI 2.0 specification.
-		 */
-
-		/*
-		 * Make sure the target is a Buffer or String. An error should
-		 * not happen here, since the reference_object was constructed
-		 * by the INDEX_OP code.
-		 */
-		obj_desc = index_desc->reference.object;
-		if ((ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_BUFFER) &&
-		    (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_STRING)) {
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-
-		/*
-		 * The assignment of the individual elements will be slightly
-		 * different for each source type.
-		 */
-		switch (ACPI_GET_OBJECT_TYPE(source_desc)) {
-		case ACPI_TYPE_INTEGER:
-
-			/* Use the least-significant byte of the integer */
-
-			value = (u8) (source_desc->integer.value);
-			break;
-
-		case ACPI_TYPE_BUFFER:
-		case ACPI_TYPE_STRING:
-
-			/* Note: Takes advantage of common string/buffer fields */
-
-			value = source_desc->buffer.pointer[0];
-			break;
-
-		default:
-
-			/* All other types are invalid */
-
-			ACPI_ERROR((AE_INFO,
-				    "Source must be Integer/Buffer/String type, not %s",
-				    acpi_ut_get_object_type_name(source_desc)));
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-
-		/* Store the source value into the target buffer byte */
-
-		obj_desc->buffer.pointer[index_desc->reference.value] = value;
-		break;
-
-	default:
-		ACPI_ERROR((AE_INFO, "Target is not a Package or BufferField"));
-		status = AE_AML_OPERAND_TYPE;
-		break;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_store_object_to_node
- *
- * PARAMETERS:  source_desc             - Value to be stored
- *              Node                    - Named object to receive the value
- *              walk_state              - Current walk state
- *              implicit_conversion     - Perform implicit conversion (yes/no)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Store the object to the named object.
- *
- *              The Assignment of an object to a named object is handled here
- *              The value passed in will replace the current value (if any)
- *              with the input value.
- *
- *              When storing into an object the data is converted to the
- *              target object type then stored in the object.  This means
- *              that the target object type (for an initialized target) will
- *              not be changed by a store operation.
- *
- *              Assumes parameters are already validated.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
-			     struct acpi_namespace_node *node,
-			     struct acpi_walk_state *walk_state,
-			     u8 implicit_conversion)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *target_desc;
-	union acpi_operand_object *new_desc;
-	acpi_object_type target_type;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_node, source_desc);
-
-	/* Get current type of the node, and object attached to Node */
-
-	target_type = acpi_ns_get_type(node);
-	target_desc = acpi_ns_get_attached_object(node);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p(%s) into node %p(%s)\n",
-			  source_desc,
-			  acpi_ut_get_object_type_name(source_desc), node,
-			  acpi_ut_get_type_name(target_type)));
-
-	/*
-	 * Resolve the source object to an actual value
-	 * (If it is a reference object)
-	 */
-	status = acpi_ex_resolve_object(&source_desc, target_type, walk_state);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* If no implicit conversion, drop into the default case below */
-
-	if ((!implicit_conversion) ||
-	    ((walk_state->opcode == AML_COPY_OP) &&
-	     (target_type != ACPI_TYPE_LOCAL_REGION_FIELD) &&
-	     (target_type != ACPI_TYPE_LOCAL_BANK_FIELD) &&
-	     (target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) {
-		/*
-		 * Force execution of default (no implicit conversion). Note:
-		 * copy_object does not perform an implicit conversion, as per the ACPI
-		 * spec -- except in case of region/bank/index fields -- because these
-		 * objects must retain their original type permanently.
-		 */
-		target_type = ACPI_TYPE_ANY;
-	}
-
-	/* Do the actual store operation */
-
-	switch (target_type) {
-	case ACPI_TYPE_BUFFER_FIELD:
-	case ACPI_TYPE_LOCAL_REGION_FIELD:
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-	case ACPI_TYPE_LOCAL_INDEX_FIELD:
-
-		/* For fields, copy the source data to the target field. */
-
-		status = acpi_ex_write_data_to_field(source_desc, target_desc,
-						     &walk_state->result_obj);
-		break;
-
-	case ACPI_TYPE_INTEGER:
-	case ACPI_TYPE_STRING:
-	case ACPI_TYPE_BUFFER:
-
-		/*
-		 * These target types are all of type Integer/String/Buffer, and
-		 * therefore support implicit conversion before the store.
-		 *
-		 * Copy and/or convert the source object to a new target object
-		 */
-		status =
-		    acpi_ex_store_object_to_object(source_desc, target_desc,
-						   &new_desc, walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		if (new_desc != target_desc) {
-			/*
-			 * Store the new new_desc as the new value of the Name, and set
-			 * the Name's type to that of the value being stored in it.
-			 * source_desc reference count is incremented by attach_object.
-			 *
-			 * Note: This may change the type of the node if an explicit store
-			 * has been performed such that the node/object type has been
-			 * changed.
-			 */
-			status =
-			    acpi_ns_attach_object(node, new_desc,
-						  new_desc->common.type);
-
-			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-					  "Store %s into %s via Convert/Attach\n",
-					  acpi_ut_get_object_type_name
-					  (source_desc),
-					  acpi_ut_get_object_type_name
-					  (new_desc)));
-		}
-		break;
-
-	default:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Storing %s (%p) directly into node (%p) with no implicit conversion\n",
-				  acpi_ut_get_object_type_name(source_desc),
-				  source_desc, node));
-
-		/* No conversions for all other types.  Just attach the source object */
-
-		status = acpi_ns_attach_object(node, source_desc,
-					       ACPI_GET_OBJECT_TYPE
-					       (source_desc));
-		break;
-	}
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/executer/exstoren.c
deleted file mode 100644
index eef61a0..0000000
--- a/drivers/acpi/executer/exstoren.c
+++ /dev/null
@@ -1,303 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exstoren - AML Interpreter object store support,
- *                        Store to Node (namespace object)
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exstoren")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_resolve_object
- *
- * PARAMETERS:  source_desc_ptr     - Pointer to the source object
- *              target_type         - Current type of the target
- *              walk_state          - Current walk state
- *
- * RETURN:      Status, resolved object in source_desc_ptr.
- *
- * DESCRIPTION: Resolve an object.  If the object is a reference, dereference
- *              it and return the actual object in the source_desc_ptr.
- *
- ******************************************************************************/
-acpi_status
-acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
-		       acpi_object_type target_type,
-		       struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object *source_desc = *source_desc_ptr;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ex_resolve_object);
-
-	/* Ensure we have a Target that can be stored to */
-
-	switch (target_type) {
-	case ACPI_TYPE_BUFFER_FIELD:
-	case ACPI_TYPE_LOCAL_REGION_FIELD:
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-	case ACPI_TYPE_LOCAL_INDEX_FIELD:
-		/*
-		 * These cases all require only Integers or values that
-		 * can be converted to Integers (Strings or Buffers)
-		 */
-
-	case ACPI_TYPE_INTEGER:
-	case ACPI_TYPE_STRING:
-	case ACPI_TYPE_BUFFER:
-
-		/*
-		 * Stores into a Field/Region or into a Integer/Buffer/String
-		 * are all essentially the same.  This case handles the
-		 * "interchangeable" types Integer, String, and Buffer.
-		 */
-		if (ACPI_GET_OBJECT_TYPE(source_desc) ==
-		    ACPI_TYPE_LOCAL_REFERENCE) {
-
-			/* Resolve a reference object first */
-
-			status =
-			    acpi_ex_resolve_to_value(source_desc_ptr,
-						     walk_state);
-			if (ACPI_FAILURE(status)) {
-				break;
-			}
-		}
-
-		/* For copy_object, no further validation necessary */
-
-		if (walk_state->opcode == AML_COPY_OP) {
-			break;
-		}
-
-		/* Must have a Integer, Buffer, or String */
-
-		if ((ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_INTEGER) &&
-		    (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) &&
-		    (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_STRING) &&
-		    !((ACPI_GET_OBJECT_TYPE(source_desc) ==
-		       ACPI_TYPE_LOCAL_REFERENCE)
-		      && (source_desc->reference.class ==
-			  ACPI_REFCLASS_TABLE))) {
-
-			/* Conversion successful but still not a valid type */
-
-			ACPI_ERROR((AE_INFO,
-				    "Cannot assign type %s to %s (must be type Int/Str/Buf)",
-				    acpi_ut_get_object_type_name(source_desc),
-				    acpi_ut_get_type_name(target_type)));
-			status = AE_AML_OPERAND_TYPE;
-		}
-		break;
-
-	case ACPI_TYPE_LOCAL_ALIAS:
-	case ACPI_TYPE_LOCAL_METHOD_ALIAS:
-
-		/*
-		 * All aliases should have been resolved earlier, during the
-		 * operand resolution phase.
-		 */
-		ACPI_ERROR((AE_INFO, "Store into an unresolved Alias object"));
-		status = AE_AML_INTERNAL;
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-	default:
-
-		/*
-		 * All other types than Alias and the various Fields come here,
-		 * including the untyped case - ACPI_TYPE_ANY.
-		 */
-		break;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_store_object_to_object
- *
- * PARAMETERS:  source_desc         - Object to store
- *              dest_desc           - Object to receive a copy of the source
- *              new_desc            - New object if dest_desc is obsoleted
- *              walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: "Store" an object to another object.  This may include
- *              converting the source type to the target type (implicit
- *              conversion), and a copy of the value of the source to
- *              the target.
- *
- *              The Assignment of an object to another (not named) object
- *              is handled here.
- *              The Source passed in will replace the current value (if any)
- *              with the input value.
- *
- *              When storing into an object the data is converted to the
- *              target object type then stored in the object.  This means
- *              that the target object type (for an initialized target) will
- *              not be changed by a store operation.
- *
- *              This module allows destination types of Number, String,
- *              Buffer, and Package.
- *
- *              Assumes parameters are already validated.  NOTE: source_desc
- *              resolution (from a reference object) must be performed by
- *              the caller if necessary.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
-			       union acpi_operand_object *dest_desc,
-			       union acpi_operand_object **new_desc,
-			       struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object *actual_src_desc;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_object, source_desc);
-
-	actual_src_desc = source_desc;
-	if (!dest_desc) {
-		/*
-		 * There is no destination object (An uninitialized node or
-		 * package element), so we can simply copy the source object
-		 * creating a new destination object
-		 */
-		status =
-		    acpi_ut_copy_iobject_to_iobject(actual_src_desc, new_desc,
-						    walk_state);
-		return_ACPI_STATUS(status);
-	}
-
-	if (ACPI_GET_OBJECT_TYPE(source_desc) !=
-	    ACPI_GET_OBJECT_TYPE(dest_desc)) {
-		/*
-		 * The source type does not match the type of the destination.
-		 * Perform the "implicit conversion" of the source to the current type
-		 * of the target as per the ACPI specification.
-		 *
-		 * If no conversion performed, actual_src_desc = source_desc.
-		 * Otherwise, actual_src_desc is a temporary object to hold the
-		 * converted object.
-		 */
-		status =
-		    acpi_ex_convert_to_target_type(ACPI_GET_OBJECT_TYPE
-						   (dest_desc), source_desc,
-						   &actual_src_desc,
-						   walk_state);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		if (source_desc == actual_src_desc) {
-			/*
-			 * No conversion was performed. Return the source_desc as the
-			 * new object.
-			 */
-			*new_desc = source_desc;
-			return_ACPI_STATUS(AE_OK);
-		}
-	}
-
-	/*
-	 * We now have two objects of identical types, and we can perform a
-	 * copy of the *value* of the source object.
-	 */
-	switch (ACPI_GET_OBJECT_TYPE(dest_desc)) {
-	case ACPI_TYPE_INTEGER:
-
-		dest_desc->integer.value = actual_src_desc->integer.value;
-
-		/* Truncate value if we are executing from a 32-bit ACPI table */
-
-		acpi_ex_truncate_for32bit_table(dest_desc);
-		break;
-
-	case ACPI_TYPE_STRING:
-
-		status =
-		    acpi_ex_store_string_to_string(actual_src_desc, dest_desc);
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		status =
-		    acpi_ex_store_buffer_to_buffer(actual_src_desc, dest_desc);
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-
-		status =
-		    acpi_ut_copy_iobject_to_iobject(actual_src_desc, &dest_desc,
-						    walk_state);
-		break;
-
-	default:
-		/*
-		 * All other types come here.
-		 */
-		ACPI_WARNING((AE_INFO, "Store into type %s not implemented",
-			      acpi_ut_get_object_type_name(dest_desc)));
-
-		status = AE_NOT_IMPLEMENTED;
-		break;
-	}
-
-	if (actual_src_desc != source_desc) {
-
-		/* Delete the intermediate (temporary) source object */
-
-		acpi_ut_remove_reference(actual_src_desc);
-	}
-
-	*new_desc = dest_desc;
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/executer/exstorob.c b/drivers/acpi/executer/exstorob.c
deleted file mode 100644
index 9a75ff0..0000000
--- a/drivers/acpi/executer/exstorob.c
+++ /dev/null
@@ -1,208 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exstorob - AML Interpreter object store support, store to object
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exstorob")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_store_buffer_to_buffer
- *
- * PARAMETERS:  source_desc         - Source object to copy
- *              target_desc         - Destination object of the copy
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Copy a buffer object to another buffer object.
- *
- ******************************************************************************/
-acpi_status
-acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
-			       union acpi_operand_object *target_desc)
-{
-	u32 length;
-	u8 *buffer;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc);
-
-	/* We know that source_desc is a buffer by now */
-
-	buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer);
-	length = source_desc->buffer.length;
-
-	/*
-	 * If target is a buffer of length zero or is a static buffer,
-	 * allocate a new buffer of the proper length
-	 */
-	if ((target_desc->buffer.length == 0) ||
-	    (target_desc->common.flags & AOPOBJ_STATIC_POINTER)) {
-		target_desc->buffer.pointer = ACPI_ALLOCATE(length);
-		if (!target_desc->buffer.pointer) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		target_desc->buffer.length = length;
-	}
-
-	/* Copy source buffer to target buffer */
-
-	if (length <= target_desc->buffer.length) {
-
-		/* Clear existing buffer and copy in the new one */
-
-		ACPI_MEMSET(target_desc->buffer.pointer, 0,
-			    target_desc->buffer.length);
-		ACPI_MEMCPY(target_desc->buffer.pointer, buffer, length);
-
-#ifdef ACPI_OBSOLETE_BEHAVIOR
-		/*
-		 * NOTE: ACPI versions up to 3.0 specified that the buffer must be
-		 * truncated if the string is smaller than the buffer.  However, "other"
-		 * implementations of ACPI never did this and thus became the defacto
-		 * standard. ACPI 3.0_a changes this behavior such that the buffer
-		 * is no longer truncated.
-		 */
-
-		/*
-		 * OBSOLETE BEHAVIOR:
-		 * If the original source was a string, we must truncate the buffer,
-		 * according to the ACPI spec.  Integer-to-Buffer and Buffer-to-Buffer
-		 * copy must not truncate the original buffer.
-		 */
-		if (original_src_type == ACPI_TYPE_STRING) {
-
-			/* Set the new length of the target */
-
-			target_desc->buffer.length = length;
-		}
-#endif
-	} else {
-		/* Truncate the source, copy only what will fit */
-
-		ACPI_MEMCPY(target_desc->buffer.pointer, buffer,
-			    target_desc->buffer.length);
-
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Truncating source buffer from %X to %X\n",
-				  length, target_desc->buffer.length));
-	}
-
-	/* Copy flags */
-
-	target_desc->buffer.flags = source_desc->buffer.flags;
-	target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_store_string_to_string
- *
- * PARAMETERS:  source_desc         - Source object to copy
- *              target_desc         - Destination object of the copy
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Copy a String object to another String object
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
-			       union acpi_operand_object *target_desc)
-{
-	u32 length;
-	u8 *buffer;
-
-	ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc);
-
-	/* We know that source_desc is a string by now */
-
-	buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer);
-	length = source_desc->string.length;
-
-	/*
-	 * Replace existing string value if it will fit and the string
-	 * pointer is not a static pointer (part of an ACPI table)
-	 */
-	if ((length < target_desc->string.length) &&
-	    (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) {
-		/*
-		 * String will fit in existing non-static buffer.
-		 * Clear old string and copy in the new one
-		 */
-		ACPI_MEMSET(target_desc->string.pointer, 0,
-			    (acpi_size) target_desc->string.length + 1);
-		ACPI_MEMCPY(target_desc->string.pointer, buffer, length);
-	} else {
-		/*
-		 * Free the current buffer, then allocate a new buffer
-		 * large enough to hold the value
-		 */
-		if (target_desc->string.pointer &&
-		    (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) {
-
-			/* Only free if not a pointer into the DSDT */
-
-			ACPI_FREE(target_desc->string.pointer);
-		}
-
-		target_desc->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size)
-								   length + 1);
-		if (!target_desc->string.pointer) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
-		ACPI_MEMCPY(target_desc->string.pointer, buffer, length);
-	}
-
-	/* Set the new target length */
-
-	target_desc->string.length = length;
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c
deleted file mode 100644
index 68990f1..0000000
--- a/drivers/acpi/executer/exsystem.c
+++ /dev/null
@@ -1,302 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exsystem - Interface to OS services
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exsystem")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_system_wait_semaphore
- *
- * PARAMETERS:  Semaphore       - Semaphore to wait on
- *              Timeout         - Max time to wait
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Implements a semaphore wait with a check to see if the
- *              semaphore is available immediately.  If it is not, the
- *              interpreter is released before waiting.
- *
- ******************************************************************************/
-acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
-
-	status = acpi_os_wait_semaphore(semaphore, 1, ACPI_DO_NOT_WAIT);
-	if (ACPI_SUCCESS(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	if (status == AE_TIME) {
-
-		/* We must wait, so unlock the interpreter */
-
-		acpi_ex_relinquish_interpreter();
-
-		status = acpi_os_wait_semaphore(semaphore, 1, timeout);
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "*** Thread awake after blocking, %s\n",
-				  acpi_format_exception(status)));
-
-		/* Reacquire the interpreter */
-
-		acpi_ex_reacquire_interpreter();
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_system_wait_mutex
- *
- * PARAMETERS:  Mutex           - Mutex to wait on
- *              Timeout         - Max time to wait
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Implements a mutex wait with a check to see if the
- *              mutex is available immediately.  If it is not, the
- *              interpreter is released before waiting.
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
-
-	status = acpi_os_acquire_mutex(mutex, ACPI_DO_NOT_WAIT);
-	if (ACPI_SUCCESS(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	if (status == AE_TIME) {
-
-		/* We must wait, so unlock the interpreter */
-
-		acpi_ex_relinquish_interpreter();
-
-		status = acpi_os_acquire_mutex(mutex, timeout);
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "*** Thread awake after blocking, %s\n",
-				  acpi_format_exception(status)));
-
-		/* Reacquire the interpreter */
-
-		acpi_ex_reacquire_interpreter();
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_system_do_stall
- *
- * PARAMETERS:  how_long        - The amount of time to stall,
- *                                in microseconds
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Suspend running thread for specified amount of time.
- *              Note: ACPI specification requires that Stall() does not
- *              relinquish the processor, and delays longer than 100 usec
- *              should use Sleep() instead.  We allow stalls up to 255 usec
- *              for compatibility with other interpreters and existing BIOSs.
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_system_do_stall(u32 how_long)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (how_long > 255) {	/* 255 microseconds */
-		/*
-		 * Longer than 255 usec, this is an error
-		 *
-		 * (ACPI specifies 100 usec as max, but this gives some slack in
-		 * order to support existing BIOSs)
-		 */
-		ACPI_ERROR((AE_INFO, "Time parameter is too large (%d)",
-			    how_long));
-		status = AE_AML_OPERAND_VALUE;
-	} else {
-		acpi_os_stall(how_long);
-	}
-
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_system_do_suspend
- *
- * PARAMETERS:  how_long        - The amount of time to suspend,
- *                                in milliseconds
- *
- * RETURN:      None
- *
- * DESCRIPTION: Suspend running thread for specified amount of time.
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	/* Since this thread will sleep, we must release the interpreter */
-
-	acpi_ex_relinquish_interpreter();
-
-	acpi_os_sleep(how_long);
-
-	/* And now we must get the interpreter again */
-
-	acpi_ex_reacquire_interpreter();
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_system_signal_event
- *
- * PARAMETERS:  obj_desc        - The object descriptor for this op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Provides an access point to perform synchronization operations
- *              within the AML.
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ex_system_signal_event);
-
-	if (obj_desc) {
-		status =
-		    acpi_os_signal_semaphore(obj_desc->event.os_semaphore, 1);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_system_wait_event
- *
- * PARAMETERS:  time_desc       - The 'time to delay' object descriptor
- *              obj_desc        - The object descriptor for this op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Provides an access point to perform synchronization operations
- *              within the AML.  This operation is a request to wait for an
- *              event.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_system_wait_event(union acpi_operand_object *time_desc,
-			  union acpi_operand_object *obj_desc)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ex_system_wait_event);
-
-	if (obj_desc) {
-		status =
-		    acpi_ex_system_wait_semaphore(obj_desc->event.os_semaphore,
-						  (u16) time_desc->integer.
-						  value);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_system_reset_event
- *
- * PARAMETERS:  obj_desc        - The object descriptor for this op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Reset an event to a known state.
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc)
-{
-	acpi_status status = AE_OK;
-	acpi_semaphore temp_semaphore;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * We are going to simply delete the existing semaphore and
-	 * create a new one!
-	 */
-	status =
-	    acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &temp_semaphore);
-	if (ACPI_SUCCESS(status)) {
-		(void)acpi_os_delete_semaphore(obj_desc->event.os_semaphore);
-		obj_desc->event.os_semaphore = temp_semaphore;
-	}
-
-	return (status);
-}
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c
deleted file mode 100644
index 86c0388..0000000
--- a/drivers/acpi/executer/exutils.c
+++ /dev/null
@@ -1,420 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: exutils - interpreter/scanner utilities
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-/*
- * DEFINE_AML_GLOBALS is tested in amlcode.h
- * to determine whether certain global names should be "defined" or only
- * "declared" in the current compilation.  This enhances maintainability
- * by enabling a single header file to embody all knowledge of the names
- * in question.
- *
- * Exactly one module of any executable should #define DEFINE_GLOBALS
- * before #including the header files which use this convention.  The
- * names in question will be defined and initialized in that module,
- * and declared as extern in all other modules which #include those
- * header files.
- */
-
-#define DEFINE_AML_GLOBALS
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
-
-#define _COMPONENT          ACPI_EXECUTER
-ACPI_MODULE_NAME("exutils")
-
-/* Local prototypes */
-static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
-
-#ifndef ACPI_NO_METHOD_EXECUTION
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_enter_interpreter
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Enter the interpreter execution region. Failure to enter
- *              the interpreter region is a fatal system error. Used in
- *              conjunction with exit_interpreter.
- *
- ******************************************************************************/
-
-void acpi_ex_enter_interpreter(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ex_enter_interpreter);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
-	if (ACPI_FAILURE(status)) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not acquire AML Interpreter mutex"));
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_reacquire_interpreter
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Reacquire the interpreter execution region from within the
- *              interpreter code. Failure to enter the interpreter region is a
- *              fatal system error. Used in  conjuction with
- *              relinquish_interpreter
- *
- ******************************************************************************/
-
-void acpi_ex_reacquire_interpreter(void)
-{
-	ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
-
-	/*
-	 * If the global serialized flag is set, do not release the interpreter,
-	 * since it was not actually released by acpi_ex_relinquish_interpreter.
-	 * This forces the interpreter to be single threaded.
-	 */
-	if (!acpi_gbl_all_methods_serialized) {
-		acpi_ex_enter_interpreter();
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_exit_interpreter
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Exit the interpreter execution region. This is the top level
- *              routine used to exit the interpreter when all processing has
- *              been completed.
- *
- ******************************************************************************/
-
-void acpi_ex_exit_interpreter(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ex_exit_interpreter);
-
-	status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
-	if (ACPI_FAILURE(status)) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not release AML Interpreter mutex"));
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_relinquish_interpreter
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Exit the interpreter execution region, from within the
- *              interpreter - before attempting an operation that will possibly
- *              block the running thread.
- *
- * Cases where the interpreter is unlocked internally
- *      1) Method to be blocked on a Sleep() AML opcode
- *      2) Method to be blocked on an Acquire() AML opcode
- *      3) Method to be blocked on a Wait() AML opcode
- *      4) Method to be blocked to acquire the global lock
- *      5) Method to be blocked waiting to execute a serialized control method
- *          that is currently executing
- *      6) About to invoke a user-installed opregion handler
- *
- ******************************************************************************/
-
-void acpi_ex_relinquish_interpreter(void)
-{
-	ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
-
-	/*
-	 * If the global serialized flag is set, do not release the interpreter.
-	 * This forces the interpreter to be single threaded.
-	 */
-	if (!acpi_gbl_all_methods_serialized) {
-		acpi_ex_exit_interpreter();
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_truncate_for32bit_table
- *
- * PARAMETERS:  obj_desc        - Object to be truncated
- *
- * RETURN:      none
- *
- * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
- *              32-bit, as determined by the revision of the DSDT.
- *
- ******************************************************************************/
-
-void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
-{
-
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * Object must be a valid number and we must be executing
-	 * a control method. NS node could be there for AML_INT_NAMEPATH_OP.
-	 */
-	if ((!obj_desc) ||
-	    (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) ||
-	    (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) {
-		return;
-	}
-
-	if (acpi_gbl_integer_byte_width == 4) {
-		/*
-		 * We are running a method that exists in a 32-bit ACPI table.
-		 * Truncate the value to 32 bits by zeroing out the upper 32-bit field
-		 */
-		obj_desc->integer.value &= (acpi_integer) ACPI_UINT32_MAX;
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_acquire_global_lock
- *
- * PARAMETERS:  field_flags           - Flags with Lock rule:
- *                                      always_lock or never_lock
- *
- * RETURN:      None
- *
- * DESCRIPTION: Obtain the ACPI hardware Global Lock, only if the field
- *              flags specifiy that it is to be obtained before field access.
- *
- ******************************************************************************/
-
-void acpi_ex_acquire_global_lock(u32 field_flags)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ex_acquire_global_lock);
-
-	/* Only use the lock if the always_lock bit is set */
-
-	if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
-		return_VOID;
-	}
-
-	/* Attempt to get the global lock, wait forever */
-
-	status = acpi_ex_acquire_mutex_object(ACPI_WAIT_FOREVER,
-					      acpi_gbl_global_lock_mutex,
-					      acpi_os_get_thread_id());
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Could not acquire Global Lock"));
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_release_global_lock
- *
- * PARAMETERS:  field_flags           - Flags with Lock rule:
- *                                      always_lock or never_lock
- *
- * RETURN:      None
- *
- * DESCRIPTION: Release the ACPI hardware Global Lock
- *
- ******************************************************************************/
-
-void acpi_ex_release_global_lock(u32 field_flags)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ex_release_global_lock);
-
-	/* Only use the lock if the always_lock bit is set */
-
-	if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
-		return_VOID;
-	}
-
-	/* Release the global lock */
-
-	status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
-	if (ACPI_FAILURE(status)) {
-
-		/* Report the error, but there isn't much else we can do */
-
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Could not release Global Lock"));
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_digits_needed
- *
- * PARAMETERS:  Value           - Value to be represented
- *              Base            - Base of representation
- *
- * RETURN:      The number of digits.
- *
- * DESCRIPTION: Calculate the number of digits needed to represent the Value
- *              in the given Base (Radix)
- *
- ******************************************************************************/
-
-static u32 acpi_ex_digits_needed(acpi_integer value, u32 base)
-{
-	u32 num_digits;
-	acpi_integer current_value;
-
-	ACPI_FUNCTION_TRACE(ex_digits_needed);
-
-	/* acpi_integer is unsigned, so we don't worry about a '-' prefix */
-
-	if (value == 0) {
-		return_UINT32(1);
-	}
-
-	current_value = value;
-	num_digits = 0;
-
-	/* Count the digits in the requested base */
-
-	while (current_value) {
-		(void)acpi_ut_short_divide(current_value, base, &current_value,
-					   NULL);
-		num_digits++;
-	}
-
-	return_UINT32(num_digits);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_eisa_id_to_string
- *
- * PARAMETERS:  numeric_id      - EISA ID to be converted
- *              out_string      - Where to put the converted string (8 bytes)
- *
- * RETURN:      None
- *
- * DESCRIPTION: Convert a numeric EISA ID to string representation
- *
- ******************************************************************************/
-
-void acpi_ex_eisa_id_to_string(u32 numeric_id, char *out_string)
-{
-	u32 eisa_id;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Swap ID to big-endian to get contiguous bits */
-
-	eisa_id = acpi_ut_dword_byte_swap(numeric_id);
-
-	out_string[0] = (char)('@' + (((unsigned long)eisa_id >> 26) & 0x1f));
-	out_string[1] = (char)('@' + ((eisa_id >> 21) & 0x1f));
-	out_string[2] = (char)('@' + ((eisa_id >> 16) & 0x1f));
-	out_string[3] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 12);
-	out_string[4] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 8);
-	out_string[5] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 4);
-	out_string[6] = acpi_ut_hex_to_ascii_char((acpi_integer) eisa_id, 0);
-	out_string[7] = 0;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_unsigned_integer_to_string
- *
- * PARAMETERS:  Value           - Value to be converted
- *              out_string      - Where to put the converted string (8 bytes)
- *
- * RETURN:      None, string
- *
- * DESCRIPTION: Convert a number to string representation. Assumes string
- *              buffer is large enough to hold the string.
- *
- ******************************************************************************/
-
-void acpi_ex_unsigned_integer_to_string(acpi_integer value, char *out_string)
-{
-	u32 count;
-	u32 digits_needed;
-	u32 remainder;
-
-	ACPI_FUNCTION_ENTRY();
-
-	digits_needed = acpi_ex_digits_needed(value, 10);
-	out_string[digits_needed] = 0;
-
-	for (count = digits_needed; count > 0; count--) {
-		(void)acpi_ut_short_divide(value, 10, &value, &remainder);
-		out_string[count - 1] = (char)('0' + remainder);
-	}
-}
-
-#endif
diff --git a/drivers/acpi/hardware/Makefile b/drivers/acpi/hardware/Makefile
deleted file mode 100644
index 438ad373..0000000
--- a/drivers/acpi/hardware/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for all Linux ACPI interpreter subdirectories
-#
-
-obj-y := hwacpi.o  hwgpe.o  hwregs.o  hwsleep.o
-
-obj-$(ACPI_FUTURE_USAGE) += hwtimer.o
-
-EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/hardware/hwacpi.c b/drivers/acpi/hardware/hwacpi.c
deleted file mode 100644
index 816894e..0000000
--- a/drivers/acpi/hardware/hwacpi.c
+++ /dev/null
@@ -1,184 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-
-#define _COMPONENT          ACPI_HARDWARE
-ACPI_MODULE_NAME("hwacpi")
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_set_mode
- *
- * PARAMETERS:  Mode            - SYS_MODE_ACPI or SYS_MODE_LEGACY
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Transitions the system into the requested mode.
- *
- ******************************************************************************/
-acpi_status acpi_hw_set_mode(u32 mode)
-{
-
-	acpi_status status;
-	u32 retry;
-
-	ACPI_FUNCTION_TRACE(hw_set_mode);
-
-	/*
-	 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
-	 * system does not support mode transition.
-	 */
-	if (!acpi_gbl_FADT.smi_command) {
-		ACPI_ERROR((AE_INFO,
-			    "No SMI_CMD in FADT, mode transition failed"));
-		return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
-	}
-
-	/*
-	 * ACPI 2.0 clarified the meaning of ACPI_ENABLE and ACPI_DISABLE
-	 * in FADT: If it is zero, enabling or disabling is not supported.
-	 * As old systems may have used zero for mode transition,
-	 * we make sure both the numbers are zero to determine these
-	 * transitions are not supported.
-	 */
-	if (!acpi_gbl_FADT.acpi_enable && !acpi_gbl_FADT.acpi_disable) {
-		ACPI_ERROR((AE_INFO,
-			    "No ACPI mode transition supported in this system (enable/disable both zero)"));
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	switch (mode) {
-	case ACPI_SYS_MODE_ACPI:
-
-		/* BIOS should have disabled ALL fixed and GP events */
-
-		status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
-					    (u32) acpi_gbl_FADT.acpi_enable, 8);
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Attempting to enable ACPI mode\n"));
-		break;
-
-	case ACPI_SYS_MODE_LEGACY:
-
-		/*
-		 * BIOS should clear all fixed status bits and restore fixed event
-		 * enable bits to default
-		 */
-		status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
-					    (u32) acpi_gbl_FADT.acpi_disable,
-					    8);
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Attempting to enable Legacy (non-ACPI) mode\n"));
-		break;
-
-	default:
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Could not write ACPI mode change"));
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Some hardware takes a LONG time to switch modes. Give them 3 sec to
-	 * do so, but allow faster systems to proceed more quickly.
-	 */
-	retry = 3000;
-	while (retry) {
-		if (acpi_hw_get_mode() == mode) {
-			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-					  "Mode %X successfully enabled\n",
-					  mode));
-			return_ACPI_STATUS(AE_OK);
-		}
-		acpi_os_stall(1000);
-		retry--;
-	}
-
-	ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
-	return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_hw_get_mode
- *
- * PARAMETERS:  none
- *
- * RETURN:      SYS_MODE_ACPI or SYS_MODE_LEGACY
- *
- * DESCRIPTION: Return current operating state of system.  Determined by
- *              querying the SCI_EN bit.
- *
- ******************************************************************************/
-
-u32 acpi_hw_get_mode(void)
-{
-	acpi_status status;
-	u32 value;
-
-	ACPI_FUNCTION_TRACE(hw_get_mode);
-
-	/*
-	 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
-	 * system does not support mode transition.
-	 */
-	if (!acpi_gbl_FADT.smi_command) {
-		return_UINT32(ACPI_SYS_MODE_ACPI);
-	}
-
-	status = acpi_get_register(ACPI_BITREG_SCI_ENABLE, &value);
-	if (ACPI_FAILURE(status)) {
-		return_UINT32(ACPI_SYS_MODE_LEGACY);
-	}
-
-	if (value) {
-		return_UINT32(ACPI_SYS_MODE_ACPI);
-	} else {
-		return_UINT32(ACPI_SYS_MODE_LEGACY);
-	}
-}
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c
deleted file mode 100644
index 0b80db9..0000000
--- a/drivers/acpi/hardware/hwgpe.c
+++ /dev/null
@@ -1,477 +0,0 @@
-
-/******************************************************************************
- *
- * Module Name: hwgpe - Low level GPE enable/disable/clear functions
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acevents.h>
-
-#define _COMPONENT          ACPI_HARDWARE
-ACPI_MODULE_NAME("hwgpe")
-
-/* Local prototypes */
-static acpi_status
-acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-				struct acpi_gpe_block_info *gpe_block);
-
-/******************************************************************************
- *
- * FUNCTION:	acpi_hw_low_disable_gpe
- *
- * PARAMETERS:	gpe_event_info	    - Info block for the GPE to be disabled
- *
- * RETURN:	Status
- *
- * DESCRIPTION: Disable a single GPE in the enable register.
- *
- ******************************************************************************/
-
-acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
-{
-	struct acpi_gpe_register_info *gpe_register_info;
-	acpi_status status;
-	u32 enable_mask;
-
-	/* Get the info block for the entire GPE register */
-
-	gpe_register_info = gpe_event_info->register_info;
-	if (!gpe_register_info) {
-		return (AE_NOT_EXIST);
-	}
-
-	/* Get current value of the enable register that contains this GPE */
-
-	status = acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH, &enable_mask,
-					&gpe_register_info->enable_address);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* Clear just the bit that corresponds to this GPE */
-
-	ACPI_CLEAR_BIT(enable_mask,
-		       ((u32) 1 <<
-			(gpe_event_info->gpe_number -
-			 gpe_register_info->base_gpe_number)));
-
-	/* Write the updated enable mask */
-
-	status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, enable_mask,
-					 &gpe_register_info->enable_address);
-
-	return (status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_write_gpe_enable_reg
- *
- * PARAMETERS:  gpe_event_info      - Info block for the GPE to be enabled
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Write a GPE enable register.  Note: The bit for this GPE must
- *              already be cleared or set in the parent register
- *              enable_for_run mask.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
-{
-	struct acpi_gpe_register_info *gpe_register_info;
-	acpi_status status;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Get the info block for the entire GPE register */
-
-	gpe_register_info = gpe_event_info->register_info;
-	if (!gpe_register_info) {
-		return (AE_NOT_EXIST);
-	}
-
-	/* Write the entire GPE (runtime) enable register */
-
-	status = acpi_hw_low_level_write(8, gpe_register_info->enable_for_run,
-					 &gpe_register_info->enable_address);
-
-	return (status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_clear_gpe
- *
- * PARAMETERS:  gpe_event_info      - Info block for the GPE to be cleared
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Clear the status bit for a single GPE.
- *
- ******************************************************************************/
-
-acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
-{
-	acpi_status status;
-	u8 register_bit;
-
-	ACPI_FUNCTION_ENTRY();
-
-	register_bit = (u8)
-	    (1 <<
-	     (gpe_event_info->gpe_number -
-	      gpe_event_info->register_info->base_gpe_number));
-
-	/*
-	 * Write a one to the appropriate bit in the status register to
-	 * clear this GPE.
-	 */
-	status = acpi_hw_low_level_write(8, register_bit,
-					 &gpe_event_info->register_info->
-					 status_address);
-
-	return (status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_get_gpe_status
- *
- * PARAMETERS:  gpe_event_info      - Info block for the GPE to queried
- *              event_status        - Where the GPE status is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Return the status of a single GPE.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
-		       acpi_event_status * event_status)
-{
-	u32 in_byte;
-	u8 register_bit;
-	struct acpi_gpe_register_info *gpe_register_info;
-	acpi_status status;
-	acpi_event_status local_event_status = 0;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!event_status) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* Get the info block for the entire GPE register */
-
-	gpe_register_info = gpe_event_info->register_info;
-
-	/* Get the register bitmask for this GPE */
-
-	register_bit = (u8)
-	    (1 <<
-	     (gpe_event_info->gpe_number -
-	      gpe_event_info->register_info->base_gpe_number));
-
-	/* GPE currently enabled? (enabled for runtime?) */
-
-	if (register_bit & gpe_register_info->enable_for_run) {
-		local_event_status |= ACPI_EVENT_FLAG_ENABLED;
-	}
-
-	/* GPE enabled for wake? */
-
-	if (register_bit & gpe_register_info->enable_for_wake) {
-		local_event_status |= ACPI_EVENT_FLAG_WAKE_ENABLED;
-	}
-
-	/* GPE currently active (status bit == 1)? */
-
-	status =
-	    acpi_hw_low_level_read(8, &in_byte,
-				   &gpe_register_info->status_address);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	if (register_bit & in_byte) {
-		local_event_status |= ACPI_EVENT_FLAG_SET;
-	}
-
-	/* Set return value */
-
-	(*event_status) = local_event_status;
-
-      unlock_and_exit:
-	return (status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_disable_gpe_block
- *
- * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
- *              gpe_block           - Gpe Block info
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Disable all GPEs within a single GPE block
- *
- ******************************************************************************/
-
-acpi_status
-acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
-			  struct acpi_gpe_block_info * gpe_block)
-{
-	u32 i;
-	acpi_status status;
-
-	/* Examine each GPE Register within the block */
-
-	for (i = 0; i < gpe_block->register_count; i++) {
-
-		/* Disable all GPEs in this register */
-
-		status = acpi_hw_low_level_write(8, 0x00,
-						 &gpe_block->register_info[i].
-						 enable_address);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-	}
-
-	return (AE_OK);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_clear_gpe_block
- *
- * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
- *              gpe_block           - Gpe Block info
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Clear status bits for all GPEs within a single GPE block
- *
- ******************************************************************************/
-
-acpi_status
-acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
-			struct acpi_gpe_block_info * gpe_block)
-{
-	u32 i;
-	acpi_status status;
-
-	/* Examine each GPE Register within the block */
-
-	for (i = 0; i < gpe_block->register_count; i++) {
-
-		/* Clear status on all GPEs in this register */
-
-		status = acpi_hw_low_level_write(8, 0xFF,
-						 &gpe_block->register_info[i].
-						 status_address);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-	}
-
-	return (AE_OK);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_enable_runtime_gpe_block
- *
- * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
- *              gpe_block           - Gpe Block info
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enable all "runtime" GPEs within a single GPE block. Includes
- *              combination wake/run GPEs.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
-				 struct acpi_gpe_block_info * gpe_block)
-{
-	u32 i;
-	acpi_status status;
-
-	/* NOTE: assumes that all GPEs are currently disabled */
-
-	/* Examine each GPE Register within the block */
-
-	for (i = 0; i < gpe_block->register_count; i++) {
-		if (!gpe_block->register_info[i].enable_for_run) {
-			continue;
-		}
-
-		/* Enable all "runtime" GPEs in this register */
-
-		status =
-		    acpi_hw_low_level_write(8,
-					    gpe_block->register_info[i].
-					    enable_for_run,
-					    &gpe_block->register_info[i].
-					    enable_address);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-	}
-
-	return (AE_OK);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_enable_wakeup_gpe_block
- *
- * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
- *              gpe_block           - Gpe Block info
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enable all "wake" GPEs within a single GPE block. Includes
- *              combination wake/run GPEs.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-				struct acpi_gpe_block_info *gpe_block)
-{
-	u32 i;
-	acpi_status status;
-
-	/* Examine each GPE Register within the block */
-
-	for (i = 0; i < gpe_block->register_count; i++) {
-		if (!gpe_block->register_info[i].enable_for_wake) {
-			continue;
-		}
-
-		/* Enable all "wake" GPEs in this register */
-
-		status = acpi_hw_low_level_write(8,
-						 gpe_block->register_info[i].
-						 enable_for_wake,
-						 &gpe_block->register_info[i].
-						 enable_address);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-	}
-
-	return (AE_OK);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_disable_all_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
- *
- ******************************************************************************/
-
-acpi_status acpi_hw_disable_all_gpes(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(hw_disable_all_gpes);
-
-	status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block);
-	status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
-	return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_enable_all_runtime_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
- *
- ******************************************************************************/
-
-acpi_status acpi_hw_enable_all_runtime_gpes(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes);
-
-	status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block);
-	return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_enable_all_wakeup_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enable all "wakeup" GPEs, in all GPE blocks
- *
- ******************************************************************************/
-
-acpi_status acpi_hw_enable_all_wakeup_gpes(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes);
-
-	status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block);
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/hardware/hwregs.c
deleted file mode 100644
index ddf792a..0000000
--- a/drivers/acpi/hardware/hwregs.c
+++ /dev/null
@@ -1,857 +0,0 @@
-
-/*******************************************************************************
- *
- * Module Name: hwregs - Read/write access functions for the various ACPI
- *                       control and status registers.
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acevents.h>
-
-#define _COMPONENT          ACPI_HARDWARE
-ACPI_MODULE_NAME("hwregs")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_hw_clear_acpi_status
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Clears all fixed and general purpose status bits
- *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
- *
- ******************************************************************************/
-acpi_status acpi_hw_clear_acpi_status(void)
-{
-	acpi_status status;
-	acpi_cpu_flags lock_flags = 0;
-
-	ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n",
-			  ACPI_BITMASK_ALL_FIXED_STATUS,
-			  (u16) acpi_gbl_FADT.xpm1a_event_block.address));
-
-	lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
-
-	status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
-					ACPI_BITMASK_ALL_FIXED_STATUS);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	/* Clear the fixed events */
-
-	if (acpi_gbl_FADT.xpm1b_event_block.address) {
-		status =
-		    acpi_hw_low_level_write(16, ACPI_BITMASK_ALL_FIXED_STATUS,
-					    &acpi_gbl_FADT.xpm1b_event_block);
-		if (ACPI_FAILURE(status)) {
-			goto unlock_and_exit;
-		}
-	}
-
-	/* Clear the GPE Bits in all GPE registers in all GPE blocks */
-
-	status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
-
-      unlock_and_exit:
-	acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_sleep_type_data
- *
- * PARAMETERS:  sleep_state         - Numeric sleep state
- *              *sleep_type_a        - Where SLP_TYPa is returned
- *              *sleep_type_b        - Where SLP_TYPb is returned
- *
- * RETURN:      Status - ACPI status
- *
- * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep
- *              state.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
-{
-	acpi_status status = AE_OK;
-	struct acpi_evaluate_info *info;
-
-	ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
-
-	/* Validate parameters */
-
-	if ((sleep_state > ACPI_S_STATES_MAX) || !sleep_type_a || !sleep_type_b) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Allocate the evaluation information block */
-
-	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
-	if (!info) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	info->pathname =
-	    ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
-
-	/* Evaluate the namespace object containing the values for this state */
-
-	status = acpi_ns_evaluate(info);
-	if (ACPI_FAILURE(status)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "%s while evaluating SleepState [%s]\n",
-				  acpi_format_exception(status),
-				  info->pathname));
-
-		goto cleanup;
-	}
-
-	/* Must have a return object */
-
-	if (!info->return_object) {
-		ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
-			    info->pathname));
-		status = AE_NOT_EXIST;
-	}
-
-	/* It must be of type Package */
-
-	else if (ACPI_GET_OBJECT_TYPE(info->return_object) != ACPI_TYPE_PACKAGE) {
-		ACPI_ERROR((AE_INFO,
-			    "Sleep State return object is not a Package"));
-		status = AE_AML_OPERAND_TYPE;
-	}
-
-	/*
-	 * The package must have at least two elements. NOTE (March 2005): This
-	 * goes against the current ACPI spec which defines this object as a
-	 * package with one encoded DWORD element. However, existing practice
-	 * by BIOS vendors seems to be to have 2 or more elements, at least
-	 * one per sleep type (A/B).
-	 */
-	else if (info->return_object->package.count < 2) {
-		ACPI_ERROR((AE_INFO,
-			    "Sleep State return package does not have at least two elements"));
-		status = AE_AML_NO_OPERAND;
-	}
-
-	/* The first two elements must both be of type Integer */
-
-	else if ((ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[0])
-		  != ACPI_TYPE_INTEGER) ||
-		 (ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[1])
-		  != ACPI_TYPE_INTEGER)) {
-		ACPI_ERROR((AE_INFO,
-			    "Sleep State return package elements are not both Integers (%s, %s)",
-			    acpi_ut_get_object_type_name(info->return_object->
-							 package.elements[0]),
-			    acpi_ut_get_object_type_name(info->return_object->
-							 package.elements[1])));
-		status = AE_AML_OPERAND_TYPE;
-	} else {
-		/* Valid _Sx_ package size, type, and value */
-
-		*sleep_type_a = (u8)
-		    (info->return_object->package.elements[0])->integer.value;
-		*sleep_type_b = (u8)
-		    (info->return_object->package.elements[1])->integer.value;
-	}
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"While evaluating SleepState [%s], bad Sleep object %p type %s",
-				info->pathname, info->return_object,
-				acpi_ut_get_object_type_name(info->
-							     return_object)));
-	}
-
-	acpi_ut_remove_reference(info->return_object);
-
-      cleanup:
-	ACPI_FREE(info);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_hw_get_register_bit_mask
- *
- * PARAMETERS:  register_id         - Index of ACPI Register to access
- *
- * RETURN:      The bitmask to be used when accessing the register
- *
- * DESCRIPTION: Map register_id into a register bitmask.
- *
- ******************************************************************************/
-struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	if (register_id > ACPI_BITREG_MAX) {
-		ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
-			    register_id));
-		return (NULL);
-	}
-
-	return (&acpi_gbl_bit_register_info[register_id]);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_register
- *
- * PARAMETERS:  register_id     - ID of ACPI bit_register to access
- *              return_value    - Value that was read from the register
- *
- * RETURN:      Status and the value read from specified Register. Value
- *              returned is normalized to bit0 (is shifted all the way right)
- *
- * DESCRIPTION: ACPI bit_register read function.
- *
- ******************************************************************************/
-
-acpi_status acpi_get_register_unlocked(u32 register_id, u32 * return_value)
-{
-	u32 register_value = 0;
-	struct acpi_bit_register_info *bit_reg_info;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_get_register);
-
-	/* Get the info structure corresponding to the requested ACPI Register */
-
-	bit_reg_info = acpi_hw_get_bit_register_info(register_id);
-	if (!bit_reg_info) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Read from the register */
-
-	status = acpi_hw_register_read(bit_reg_info->parent_register,
-				       &register_value);
-
-	if (ACPI_SUCCESS(status)) {
-
-		/* Normalize the value that was read */
-
-		register_value =
-		    ((register_value & bit_reg_info->access_bit_mask)
-		     >> bit_reg_info->bit_position);
-
-		*return_value = register_value;
-
-		ACPI_DEBUG_PRINT((ACPI_DB_IO, "Read value %8.8X register %X\n",
-				  register_value,
-				  bit_reg_info->parent_register));
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-acpi_status acpi_get_register(u32 register_id, u32 * return_value)
-{
-	acpi_status status;
-	acpi_cpu_flags flags;
-	flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
-	status = acpi_get_register_unlocked(register_id, return_value);
-	acpi_os_release_lock(acpi_gbl_hardware_lock, flags);
-	return status;
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_register)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_set_register
- *
- * PARAMETERS:  register_id     - ID of ACPI bit_register to access
- *              Value           - (only used on write) value to write to the
- *                                Register, NOT pre-normalized to the bit pos
- *
- * RETURN:      Status
- *
- * DESCRIPTION: ACPI Bit Register write function.
- *
- ******************************************************************************/
-acpi_status acpi_set_register(u32 register_id, u32 value)
-{
-	u32 register_value = 0;
-	struct acpi_bit_register_info *bit_reg_info;
-	acpi_status status;
-	acpi_cpu_flags lock_flags;
-
-	ACPI_FUNCTION_TRACE_U32(acpi_set_register, register_id);
-
-	/* Get the info structure corresponding to the requested ACPI Register */
-
-	bit_reg_info = acpi_hw_get_bit_register_info(register_id);
-	if (!bit_reg_info) {
-		ACPI_ERROR((AE_INFO, "Bad ACPI HW RegisterId: %X",
-			    register_id));
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
-
-	/* Always do a register read first so we can insert the new bits  */
-
-	status = acpi_hw_register_read(bit_reg_info->parent_register,
-				       &register_value);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	/*
-	 * Decode the Register ID
-	 * Register ID = [Register block ID] | [bit ID]
-	 *
-	 * Check bit ID to fine locate Register offset.
-	 * Check Mask to determine Register offset, and then read-write.
-	 */
-	switch (bit_reg_info->parent_register) {
-	case ACPI_REGISTER_PM1_STATUS:
-
-		/*
-		 * Status Registers are different from the rest. Clear by
-		 * writing 1, and writing 0 has no effect. So, the only relevant
-		 * information is the single bit we're interested in, all others should
-		 * be written as 0 so they will be left unchanged.
-		 */
-		value = ACPI_REGISTER_PREPARE_BITS(value,
-						   bit_reg_info->bit_position,
-						   bit_reg_info->
-						   access_bit_mask);
-		if (value) {
-			status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
-							(u16) value);
-			register_value = 0;
-		}
-		break;
-
-	case ACPI_REGISTER_PM1_ENABLE:
-
-		ACPI_REGISTER_INSERT_VALUE(register_value,
-					   bit_reg_info->bit_position,
-					   bit_reg_info->access_bit_mask,
-					   value);
-
-		status = acpi_hw_register_write(ACPI_REGISTER_PM1_ENABLE,
-						(u16) register_value);
-		break;
-
-	case ACPI_REGISTER_PM1_CONTROL:
-
-		/*
-		 * Write the PM1 Control register.
-		 * Note that at this level, the fact that there are actually TWO
-		 * registers (A and B - and B may not exist) is abstracted.
-		 */
-		ACPI_DEBUG_PRINT((ACPI_DB_IO, "PM1 control: Read %X\n",
-				  register_value));
-
-		ACPI_REGISTER_INSERT_VALUE(register_value,
-					   bit_reg_info->bit_position,
-					   bit_reg_info->access_bit_mask,
-					   value);
-
-		status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
-						(u16) register_value);
-		break;
-
-	case ACPI_REGISTER_PM2_CONTROL:
-
-		status = acpi_hw_register_read(ACPI_REGISTER_PM2_CONTROL,
-					       &register_value);
-		if (ACPI_FAILURE(status)) {
-			goto unlock_and_exit;
-		}
-
-		ACPI_DEBUG_PRINT((ACPI_DB_IO,
-				  "PM2 control: Read %X from %8.8X%8.8X\n",
-				  register_value,
-				  ACPI_FORMAT_UINT64(acpi_gbl_FADT.
-						     xpm2_control_block.
-						     address)));
-
-		ACPI_REGISTER_INSERT_VALUE(register_value,
-					   bit_reg_info->bit_position,
-					   bit_reg_info->access_bit_mask,
-					   value);
-
-		ACPI_DEBUG_PRINT((ACPI_DB_IO,
-				  "About to write %4.4X to %8.8X%8.8X\n",
-				  register_value,
-				  ACPI_FORMAT_UINT64(acpi_gbl_FADT.
-						     xpm2_control_block.
-						     address)));
-
-		status = acpi_hw_register_write(ACPI_REGISTER_PM2_CONTROL,
-						(u8) (register_value));
-		break;
-
-	default:
-		break;
-	}
-
-      unlock_and_exit:
-
-	acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
-
-	/* Normalize the value that was read */
-
-	ACPI_DEBUG_EXEC(register_value =
-			((register_value & bit_reg_info->access_bit_mask) >>
-			 bit_reg_info->bit_position));
-
-	ACPI_DEBUG_PRINT((ACPI_DB_IO,
-			  "Set bits: %8.8X actual %8.8X register %X\n", value,
-			  register_value, bit_reg_info->parent_register));
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_set_register)
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_register_read
- *
- * PARAMETERS:  register_id         - ACPI Register ID
- *              return_value        - Where the register value is returned
- *
- * RETURN:      Status and the value read.
- *
- * DESCRIPTION: Read from the specified ACPI register
- *
- ******************************************************************************/
-acpi_status
-acpi_hw_register_read(u32 register_id, u32 * return_value)
-{
-	u32 value1 = 0;
-	u32 value2 = 0;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(hw_register_read);
-
-	switch (register_id) {
-	case ACPI_REGISTER_PM1_STATUS:	/* 16-bit access */
-
-		status =
-		    acpi_hw_low_level_read(16, &value1,
-					   &acpi_gbl_FADT.xpm1a_event_block);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-
-		/* PM1B is optional */
-
-		status =
-		    acpi_hw_low_level_read(16, &value2,
-					   &acpi_gbl_FADT.xpm1b_event_block);
-		value1 |= value2;
-		break;
-
-	case ACPI_REGISTER_PM1_ENABLE:	/* 16-bit access */
-
-		status =
-		    acpi_hw_low_level_read(16, &value1, &acpi_gbl_xpm1a_enable);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-
-		/* PM1B is optional */
-
-		status =
-		    acpi_hw_low_level_read(16, &value2, &acpi_gbl_xpm1b_enable);
-		value1 |= value2;
-		break;
-
-	case ACPI_REGISTER_PM1_CONTROL:	/* 16-bit access */
-
-		status =
-		    acpi_hw_low_level_read(16, &value1,
-					   &acpi_gbl_FADT.xpm1a_control_block);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-
-		status =
-		    acpi_hw_low_level_read(16, &value2,
-					   &acpi_gbl_FADT.xpm1b_control_block);
-		value1 |= value2;
-		break;
-
-	case ACPI_REGISTER_PM2_CONTROL:	/* 8-bit access */
-
-		status =
-		    acpi_hw_low_level_read(8, &value1,
-					   &acpi_gbl_FADT.xpm2_control_block);
-		break;
-
-	case ACPI_REGISTER_PM_TIMER:	/* 32-bit access */
-
-		status =
-		    acpi_hw_low_level_read(32, &value1,
-					   &acpi_gbl_FADT.xpm_timer_block);
-		break;
-
-	case ACPI_REGISTER_SMI_COMMAND_BLOCK:	/* 8-bit access */
-
-		status =
-		    acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8);
-		break;
-
-	default:
-		ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
-		status = AE_BAD_PARAMETER;
-		break;
-	}
-
-      exit:
-
-	if (ACPI_SUCCESS(status)) {
-		*return_value = value1;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_register_write
- *
- * PARAMETERS:  register_id         - ACPI Register ID
- *              Value               - The value to write
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Write to the specified ACPI register
- *
- * NOTE: In accordance with the ACPI specification, this function automatically
- * preserves the value of the following bits, meaning that these bits cannot be
- * changed via this interface:
- *
- * PM1_CONTROL[0] = SCI_EN
- * PM1_CONTROL[9]
- * PM1_STATUS[11]
- *
- * ACPI References:
- * 1) Hardware Ignored Bits: When software writes to a register with ignored
- *      bit fields, it preserves the ignored bit fields
- * 2) SCI_EN: OSPM always preserves this bit position
- *
- ******************************************************************************/
-
-acpi_status acpi_hw_register_write(u32 register_id, u32 value)
-{
-	acpi_status status;
-	u32 read_value;
-
-	ACPI_FUNCTION_TRACE(hw_register_write);
-
-	switch (register_id) {
-	case ACPI_REGISTER_PM1_STATUS:	/* 16-bit access */
-
-		/* Perform a read first to preserve certain bits (per ACPI spec) */
-
-		status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS,
-					       &read_value);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-
-		/* Insert the bits to be preserved */
-
-		ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS,
-				 read_value);
-
-		/* Now we can write the data */
-
-		status =
-		    acpi_hw_low_level_write(16, value,
-					    &acpi_gbl_FADT.xpm1a_event_block);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-
-		/* PM1B is optional */
-
-		status =
-		    acpi_hw_low_level_write(16, value,
-					    &acpi_gbl_FADT.xpm1b_event_block);
-		break;
-
-	case ACPI_REGISTER_PM1_ENABLE:	/* 16-bit access */
-
-		status =
-		    acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1a_enable);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-
-		/* PM1B is optional */
-
-		status =
-		    acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1b_enable);
-		break;
-
-	case ACPI_REGISTER_PM1_CONTROL:	/* 16-bit access */
-
-		/*
-		 * Perform a read first to preserve certain bits (per ACPI spec)
-		 */
-		status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
-					       &read_value);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-
-		/* Insert the bits to be preserved */
-
-		ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS,
-				 read_value);
-
-		/* Now we can write the data */
-
-		status =
-		    acpi_hw_low_level_write(16, value,
-					    &acpi_gbl_FADT.xpm1a_control_block);
-		if (ACPI_FAILURE(status)) {
-			goto exit;
-		}
-
-		status =
-		    acpi_hw_low_level_write(16, value,
-					    &acpi_gbl_FADT.xpm1b_control_block);
-		break;
-
-	case ACPI_REGISTER_PM1A_CONTROL:	/* 16-bit access */
-
-		status =
-		    acpi_hw_low_level_write(16, value,
-					    &acpi_gbl_FADT.xpm1a_control_block);
-		break;
-
-	case ACPI_REGISTER_PM1B_CONTROL:	/* 16-bit access */
-
-		status =
-		    acpi_hw_low_level_write(16, value,
-					    &acpi_gbl_FADT.xpm1b_control_block);
-		break;
-
-	case ACPI_REGISTER_PM2_CONTROL:	/* 8-bit access */
-
-		status =
-		    acpi_hw_low_level_write(8, value,
-					    &acpi_gbl_FADT.xpm2_control_block);
-		break;
-
-	case ACPI_REGISTER_PM_TIMER:	/* 32-bit access */
-
-		status =
-		    acpi_hw_low_level_write(32, value,
-					    &acpi_gbl_FADT.xpm_timer_block);
-		break;
-
-	case ACPI_REGISTER_SMI_COMMAND_BLOCK:	/* 8-bit access */
-
-		/* SMI_CMD is currently always in IO space */
-
-		status =
-		    acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8);
-		break;
-
-	default:
-		status = AE_BAD_PARAMETER;
-		break;
-	}
-
-      exit:
-	return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_low_level_read
- *
- * PARAMETERS:  Width               - 8, 16, or 32
- *              Value               - Where the value is returned
- *              Reg                 - GAS register structure
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Read from either memory or IO space.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
-{
-	u64 address;
-	acpi_status status;
-
-	ACPI_FUNCTION_NAME(hw_low_level_read);
-
-	/*
-	 * Must have a valid pointer to a GAS structure, and
-	 * a non-zero address within. However, don't return an error
-	 * because the PM1A/B code must not fail if B isn't present.
-	 */
-	if (!reg) {
-		return (AE_OK);
-	}
-
-	/* Get a local copy of the address. Handles possible alignment issues */
-
-	ACPI_MOVE_64_TO_64(&address, &reg->address);
-	if (!address) {
-		return (AE_OK);
-	}
-	*value = 0;
-
-	/*
-	 * Two address spaces supported: Memory or IO.
-	 * PCI_Config is not supported here because the GAS struct is insufficient
-	 */
-	switch (reg->space_id) {
-	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-
-		status = acpi_os_read_memory((acpi_physical_address) address,
-					     value, width);
-		break;
-
-	case ACPI_ADR_SPACE_SYSTEM_IO:
-
-		status =
-		    acpi_os_read_port((acpi_io_address) address, value, width);
-		break;
-
-	default:
-		ACPI_ERROR((AE_INFO,
-			    "Unsupported address space: %X", reg->space_id));
-		return (AE_BAD_PARAMETER);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_IO,
-			  "Read:  %8.8X width %2d from %8.8X%8.8X (%s)\n",
-			  *value, width, ACPI_FORMAT_UINT64(address),
-			  acpi_ut_get_region_name(reg->space_id)));
-
-	return (status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_hw_low_level_write
- *
- * PARAMETERS:  Width               - 8, 16, or 32
- *              Value               - To be written
- *              Reg                 - GAS register structure
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Write to either memory or IO space.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
-{
-	u64 address;
-	acpi_status status;
-
-	ACPI_FUNCTION_NAME(hw_low_level_write);
-
-	/*
-	 * Must have a valid pointer to a GAS structure, and
-	 * a non-zero address within. However, don't return an error
-	 * because the PM1A/B code must not fail if B isn't present.
-	 */
-	if (!reg) {
-		return (AE_OK);
-	}
-
-	/* Get a local copy of the address. Handles possible alignment issues */
-
-	ACPI_MOVE_64_TO_64(&address, &reg->address);
-	if (!address) {
-		return (AE_OK);
-	}
-
-	/*
-	 * Two address spaces supported: Memory or IO.
-	 * PCI_Config is not supported here because the GAS struct is insufficient
-	 */
-	switch (reg->space_id) {
-	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-
-		status = acpi_os_write_memory((acpi_physical_address) address,
-					      value, width);
-		break;
-
-	case ACPI_ADR_SPACE_SYSTEM_IO:
-
-		status = acpi_os_write_port((acpi_io_address) address, value,
-					    width);
-		break;
-
-	default:
-		ACPI_ERROR((AE_INFO,
-			    "Unsupported address space: %X", reg->space_id));
-		return (AE_BAD_PARAMETER);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_IO,
-			  "Wrote: %8.8X width %2d   to %8.8X%8.8X (%s)\n",
-			  value, width, ACPI_FORMAT_UINT64(address),
-			  acpi_ut_get_region_name(reg->space_id)));
-
-	return (status);
-}
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
deleted file mode 100644
index 25dccdf..0000000
--- a/drivers/acpi/hardware/hwsleep.c
+++ /dev/null
@@ -1,643 +0,0 @@
-
-/******************************************************************************
- *
- * Name: hwsleep.c - ACPI Hardware Sleep/Wake Interface
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_HARDWARE
-ACPI_MODULE_NAME("hwsleep")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_set_firmware_waking_vector
- *
- * PARAMETERS:  physical_address    - Physical address of ACPI real mode
- *                                    entry point.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Access function for the firmware_waking_vector field in FACS
- *
- ******************************************************************************/
-acpi_status
-acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
-{
-	struct acpi_table_facs *facs;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
-
-	/* Get the FACS */
-
-	status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
-					 ACPI_CAST_INDIRECT_PTR(struct
-								acpi_table_header,
-								&facs));
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * According to the ACPI specification 2.0c and later, the 64-bit
-	 * waking vector should be cleared and the 32-bit waking vector should
-	 * be used, unless we want the wake-up code to be called by the BIOS in
-	 * Protected Mode.  Some systems (for example HP dv5-1004nr) are known
-	 * to fail to resume if the 64-bit vector is used.
-	 */
-	if (facs->version >= 1)
-		facs->xfirmware_waking_vector = 0;
-
-	facs->firmware_waking_vector = (u32)physical_address;
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_firmware_waking_vector
- *
- * PARAMETERS:  *physical_address   - Where the contents of
- *                                    the firmware_waking_vector field of
- *                                    the FACS will be returned.
- *
- * RETURN:      Status, vector
- *
- * DESCRIPTION: Access function for the firmware_waking_vector field in FACS
- *
- ******************************************************************************/
-#ifdef ACPI_FUTURE_USAGE
-acpi_status
-acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
-{
-	struct acpi_table_facs *facs;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector);
-
-	if (!physical_address) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Get the FACS */
-
-	status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
-					 ACPI_CAST_INDIRECT_PTR(struct
-								acpi_table_header,
-								&facs));
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Get the vector */
-	*physical_address = (acpi_physical_address)facs->firmware_waking_vector;
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector)
-#endif
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enter_sleep_state_prep
- *
- * PARAMETERS:  sleep_state         - Which sleep state to enter
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Prepare to enter a system sleep state (see ACPI 2.0 spec p 231)
- *              This function must execute with interrupts enabled.
- *              We break sleeping into 2 stages so that OSPM can handle
- *              various OS-specific tasks between the two steps.
- *
- ******************************************************************************/
-acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
-{
-	acpi_status status;
-	struct acpi_object_list arg_list;
-	union acpi_object arg;
-
-	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
-
-	/*
-	 * _PSW methods could be run here to enable wake-on keyboard, LAN, etc.
-	 */
-	status = acpi_get_sleep_type_data(sleep_state,
-					  &acpi_gbl_sleep_type_a,
-					  &acpi_gbl_sleep_type_b);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Setup parameter object */
-
-	arg_list.count = 1;
-	arg_list.pointer = &arg;
-
-	arg.type = ACPI_TYPE_INTEGER;
-	arg.integer.value = sleep_state;
-
-	/* Run the _PTS method */
-
-	status = acpi_evaluate_object(NULL, METHOD_NAME__PTS, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Setup the argument to _SST */
-
-	switch (sleep_state) {
-	case ACPI_STATE_S0:
-		arg.integer.value = ACPI_SST_WORKING;
-		break;
-
-	case ACPI_STATE_S1:
-	case ACPI_STATE_S2:
-	case ACPI_STATE_S3:
-		arg.integer.value = ACPI_SST_SLEEPING;
-		break;
-
-	case ACPI_STATE_S4:
-		arg.integer.value = ACPI_SST_SLEEP_CONTEXT;
-		break;
-
-	default:
-		arg.integer.value = ACPI_SST_INDICATOR_OFF;	/* Default is off */
-		break;
-	}
-
-	/*
-	 * Set the system indicators to show the desired sleep state.
-	 * _SST is an optional method (return no error if not found)
-	 */
-	status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"While executing method _SST"));
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enter_sleep_state
- *
- * PARAMETERS:  sleep_state         - Which sleep state to enter
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
- *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
- *
- ******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
-{
-	u32 PM1Acontrol;
-	u32 PM1Bcontrol;
-	struct acpi_bit_register_info *sleep_type_reg_info;
-	struct acpi_bit_register_info *sleep_enable_reg_info;
-	u32 in_value;
-	struct acpi_object_list arg_list;
-	union acpi_object arg;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
-
-	if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
-	    (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
-		ACPI_ERROR((AE_INFO, "Sleep values out of range: A=%X B=%X",
-			    acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
-		return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
-	}
-
-	sleep_type_reg_info =
-	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE_A);
-	sleep_enable_reg_info =
-	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE);
-
-	/* Clear wake status */
-
-	status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Clear all fixed and general purpose status bits */
-
-	status = acpi_hw_clear_acpi_status();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * 1) Disable/Clear all GPEs
-	 * 2) Enable all wakeup GPEs
-	 */
-	status = acpi_hw_disable_all_gpes();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-	acpi_gbl_system_awake_and_running = FALSE;
-
-	status = acpi_hw_enable_all_wakeup_gpes();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Execute the _GTS method */
-
-	arg_list.count = 1;
-	arg_list.pointer = &arg;
-	arg.type = ACPI_TYPE_INTEGER;
-	arg.integer.value = sleep_state;
-
-	status = acpi_evaluate_object(NULL, METHOD_NAME__GTS, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Get current value of PM1A control */
-
-	status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
-			  "Entering sleep state [S%d]\n", sleep_state));
-
-	/* Clear SLP_EN and SLP_TYP fields */
-
-	PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask |
-			 sleep_enable_reg_info->access_bit_mask);
-	PM1Bcontrol = PM1Acontrol;
-
-	/* Insert SLP_TYP bits */
-
-	PM1Acontrol |=
-	    (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
-	PM1Bcontrol |=
-	    (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);
-
-	/*
-	 * We split the writes of SLP_TYP and SLP_EN to workaround
-	 * poorly implemented hardware.
-	 */
-
-	/* Write #1: fill in SLP_TYP data */
-
-	status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
-					PM1Acontrol);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
-					PM1Bcontrol);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Insert SLP_ENABLE bit */
-
-	PM1Acontrol |= sleep_enable_reg_info->access_bit_mask;
-	PM1Bcontrol |= sleep_enable_reg_info->access_bit_mask;
-
-	/* Write #2: SLP_TYP + SLP_EN */
-
-	ACPI_FLUSH_CPU_CACHE();
-
-	status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
-					PM1Acontrol);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
-					PM1Bcontrol);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	if (sleep_state > ACPI_STATE_S3) {
-		/*
-		 * We wanted to sleep > S3, but it didn't happen (by virtue of the
-		 * fact that we are still executing!)
-		 *
-		 * Wait ten seconds, then try again. This is to get S4/S5 to work on
-		 * all machines.
-		 *
-		 * We wait so long to allow chipsets that poll this reg very slowly to
-		 * still read the right value. Ideally, this block would go
-		 * away entirely.
-		 */
-		acpi_os_stall(10000000);
-
-		status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
-						sleep_enable_reg_info->
-						access_bit_mask);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/* Wait until we enter sleep state */
-
-	do {
-		status = acpi_get_register_unlocked(ACPI_BITREG_WAKE_STATUS,
-						    &in_value);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* Spin until we wake */
-
-	} while (!in_value);
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enter_sleep_state_s4bios
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Perform a S4 bios request.
- *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
- *
- ******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
-{
-	u32 in_value;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
-
-	status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_hw_clear_acpi_status();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * 1) Disable/Clear all GPEs
-	 * 2) Enable all wakeup GPEs
-	 */
-	status = acpi_hw_disable_all_gpes();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-	acpi_gbl_system_awake_and_running = FALSE;
-
-	status = acpi_hw_enable_all_wakeup_gpes();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	ACPI_FLUSH_CPU_CACHE();
-
-	status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
-				    (u32) acpi_gbl_FADT.S4bios_request, 8);
-
-	do {
-		acpi_os_stall(1000);
-		status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	} while (!in_value);
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_leave_sleep_state_prep
- *
- * PARAMETERS:  sleep_state         - Which sleep state we are exiting
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
- *              sleep.
- *              Called with interrupts DISABLED.
- *
- ******************************************************************************/
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
-{
-	struct acpi_object_list arg_list;
-	union acpi_object arg;
-	acpi_status status;
-	struct acpi_bit_register_info *sleep_type_reg_info;
-	struct acpi_bit_register_info *sleep_enable_reg_info;
-	u32 PM1Acontrol;
-	u32 PM1Bcontrol;
-
-	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
-
-	/*
-	 * Set SLP_TYPE and SLP_EN to state S0.
-	 * This is unclear from the ACPI Spec, but it is required
-	 * by some machines.
-	 */
-	status = acpi_get_sleep_type_data(ACPI_STATE_S0,
-					  &acpi_gbl_sleep_type_a,
-					  &acpi_gbl_sleep_type_b);
-	if (ACPI_SUCCESS(status)) {
-		sleep_type_reg_info =
-		    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE_A);
-		sleep_enable_reg_info =
-		    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE);
-
-		/* Get current value of PM1A control */
-
-		status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
-					       &PM1Acontrol);
-		if (ACPI_SUCCESS(status)) {
-
-			/* Clear SLP_EN and SLP_TYP fields */
-
-			PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask |
-					 sleep_enable_reg_info->
-					 access_bit_mask);
-			PM1Bcontrol = PM1Acontrol;
-
-			/* Insert SLP_TYP bits */
-
-			PM1Acontrol |=
-			    (acpi_gbl_sleep_type_a << sleep_type_reg_info->
-			     bit_position);
-			PM1Bcontrol |=
-			    (acpi_gbl_sleep_type_b << sleep_type_reg_info->
-			     bit_position);
-
-			/* Just ignore any errors */
-
-			(void)acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
-						     PM1Acontrol);
-			(void)acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
-						     PM1Bcontrol);
-		}
-	}
-
-	/* Execute the _BFS method */
-
-	arg_list.count = 1;
-	arg_list.pointer = &arg;
-	arg.type = ACPI_TYPE_INTEGER;
-	arg.integer.value = sleep_state;
-
-	status = acpi_evaluate_object(NULL, METHOD_NAME__BFS, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		ACPI_EXCEPTION((AE_INFO, status, "During Method _BFS"));
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_leave_sleep_state
- *
- * PARAMETERS:  sleep_state         - Which sleep state we just exited
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
- *              Called with interrupts ENABLED.
- *
- ******************************************************************************/
-acpi_status acpi_leave_sleep_state(u8 sleep_state)
-{
-	struct acpi_object_list arg_list;
-	union acpi_object arg;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
-
-	/* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */
-
-	acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID;
-
-	/* Setup parameter object */
-
-	arg_list.count = 1;
-	arg_list.pointer = &arg;
-	arg.type = ACPI_TYPE_INTEGER;
-
-	/* Ignore any errors from these methods */
-
-	arg.integer.value = ACPI_SST_WAKING;
-	status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
-	}
-
-	/*
-	 * GPEs must be enabled before _WAK is called as GPEs
-	 * might get fired there
-	 *
-	 * Restore the GPEs:
-	 * 1) Disable/Clear all GPEs
-	 * 2) Enable all runtime GPEs
-	 */
-	status = acpi_hw_disable_all_gpes();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-	status = acpi_hw_enable_all_runtime_gpes();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	arg.integer.value = sleep_state;
-	status = acpi_evaluate_object(NULL, METHOD_NAME__WAK, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		ACPI_EXCEPTION((AE_INFO, status, "During Method _WAK"));
-	}
-	/* TBD: _WAK "sometimes" returns stuff - do we want to look at it? */
-
-	/*
-	 * Some BIOSes assume that WAK_STS will be cleared on resume and use
-	 * it to determine whether the system is rebooting or resuming. Clear
-	 * it for compatibility.
-	 */
-	acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
-
-	acpi_gbl_system_awake_and_running = TRUE;
-
-	/* Enable power button */
-
-	(void)
-	    acpi_set_register(acpi_gbl_fixed_event_info
-			      [ACPI_EVENT_POWER_BUTTON].enable_register_id, 1);
-
-	(void)
-	    acpi_set_register(acpi_gbl_fixed_event_info
-			      [ACPI_EVENT_POWER_BUTTON].status_register_id, 1);
-
-	arg.integer.value = ACPI_SST_WORKING;
-	status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
diff --git a/drivers/acpi/hardware/hwtimer.c b/drivers/acpi/hardware/hwtimer.c
deleted file mode 100644
index b53d575..0000000
--- a/drivers/acpi/hardware/hwtimer.c
+++ /dev/null
@@ -1,187 +0,0 @@
-
-/******************************************************************************
- *
- * Name: hwtimer.c - ACPI Power Management Timer Interface
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-
-#define _COMPONENT          ACPI_HARDWARE
-ACPI_MODULE_NAME("hwtimer")
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_get_timer_resolution
- *
- * PARAMETERS:  Resolution          - Where the resolution is returned
- *
- * RETURN:      Status and timer resolution
- *
- * DESCRIPTION: Obtains resolution of the ACPI PM Timer (24 or 32 bits).
- *
- ******************************************************************************/
-acpi_status acpi_get_timer_resolution(u32 * resolution)
-{
-	ACPI_FUNCTION_TRACE(acpi_get_timer_resolution);
-
-	if (!resolution) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
-		*resolution = 24;
-	} else {
-		*resolution = 32;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution)
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_get_timer
- *
- * PARAMETERS:  Ticks               - Where the timer value is returned
- *
- * RETURN:      Status and current timer value (ticks)
- *
- * DESCRIPTION: Obtains current value of ACPI PM Timer (in ticks).
- *
- ******************************************************************************/
-acpi_status acpi_get_timer(u32 * ticks)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_get_timer);
-
-	if (!ticks) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status =
-	    acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT.xpm_timer_block);
-
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_timer)
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_get_timer_duration
- *
- * PARAMETERS:  start_ticks         - Starting timestamp
- *              end_ticks           - End timestamp
- *              time_elapsed        - Where the elapsed time is returned
- *
- * RETURN:      Status and time_elapsed
- *
- * DESCRIPTION: Computes the time elapsed (in microseconds) between two
- *              PM Timer time stamps, taking into account the possibility of
- *              rollovers, the timer resolution, and timer frequency.
- *
- *              The PM Timer's clock ticks at roughly 3.6 times per
- *              _microsecond_, and its clock continues through Cx state
- *              transitions (unlike many CPU timestamp counters) -- making it
- *              a versatile and accurate timer.
- *
- *              Note that this function accommodates only a single timer
- *              rollover.  Thus for 24-bit timers, this function should only
- *              be used for calculating durations less than ~4.6 seconds
- *              (~20 minutes for 32-bit timers) -- calculations below:
- *
- *              2**24 Ticks / 3,600,000 Ticks/Sec = 4.66 sec
- *              2**32 Ticks / 3,600,000 Ticks/Sec = 1193 sec or 19.88 minutes
- *
- ******************************************************************************/
-acpi_status
-acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
-{
-	acpi_status status;
-	u32 delta_ticks;
-	acpi_integer quotient;
-
-	ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
-
-	if (!time_elapsed) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Compute Tick Delta:
-	 * Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
-	 */
-	if (start_ticks < end_ticks) {
-		delta_ticks = end_ticks - start_ticks;
-	} else if (start_ticks > end_ticks) {
-		if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
-
-			/* 24-bit Timer */
-
-			delta_ticks =
-			    (((0x00FFFFFF - start_ticks) +
-			      end_ticks) & 0x00FFFFFF);
-		} else {
-			/* 32-bit Timer */
-
-			delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks;
-		}
-	} else {		/* start_ticks == end_ticks */
-
-		*time_elapsed = 0;
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/*
-	 * Compute Duration (Requires a 64-bit multiply and divide):
-	 *
-	 * time_elapsed = (delta_ticks * 1000000) / PM_TIMER_FREQUENCY;
-	 */
-	status = acpi_ut_short_divide(((u64) delta_ticks) * 1000000,
-				      PM_TIMER_FREQUENCY, &quotient, NULL);
-
-	*time_elapsed = (u32) quotient;
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_timer_duration)
diff --git a/drivers/acpi/namespace/Makefile b/drivers/acpi/namespace/Makefile
deleted file mode 100644
index 371a2da..0000000
--- a/drivers/acpi/namespace/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Makefile for all Linux ACPI interpreter subdirectories
-#
-
-obj-y := nsaccess.o  nsload.o    nssearch.o  nsxfeval.o \
-	 nsalloc.o   nseval.o    nsnames.o   nsutils.o   nsxfname.o \
-	 nsdump.o    nsinit.o    nsobject.o  nswalk.o    nsxfobj.o  \
-	 nsparse.o   nspredef.o
-
-obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
-
-EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/namespace/nsaccess.c
deleted file mode 100644
index c39a7f6..0000000
--- a/drivers/acpi/namespace/nsaccess.c
+++ /dev/null
@@ -1,674 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: nsaccess - Top-level functions for accessing ACPI namespace
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/amlcode.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acdispat.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsaccess")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_root_initialize
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Allocate and initialize the default root named objects
- *
- * MUTEX:       Locks namespace for entire execution
- *
- ******************************************************************************/
-acpi_status acpi_ns_root_initialize(void)
-{
-	acpi_status status;
-	const struct acpi_predefined_names *init_val = NULL;
-	struct acpi_namespace_node *new_node;
-	union acpi_operand_object *obj_desc;
-	acpi_string val = NULL;
-
-	ACPI_FUNCTION_TRACE(ns_root_initialize);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * The global root ptr is initially NULL, so a non-NULL value indicates
-	 * that acpi_ns_root_initialize() has already been called; just return.
-	 */
-	if (acpi_gbl_root_node) {
-		status = AE_OK;
-		goto unlock_and_exit;
-	}
-
-	/*
-	 * Tell the rest of the subsystem that the root is initialized
-	 * (This is OK because the namespace is locked)
-	 */
-	acpi_gbl_root_node = &acpi_gbl_root_node_struct;
-
-	/* Enter the pre-defined names in the name table */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "Entering predefined entries into namespace\n"));
-
-	for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) {
-
-		/* _OSI is optional for now, will be permanent later */
-
-		if (!ACPI_STRCMP(init_val->name, "_OSI")
-		    && !acpi_gbl_create_osi_method) {
-			continue;
-		}
-
-		status = acpi_ns_lookup(NULL, init_val->name, init_val->type,
-					ACPI_IMODE_LOAD_PASS2,
-					ACPI_NS_NO_UPSEARCH, NULL, &new_node);
-
-		if (ACPI_FAILURE(status) || (!new_node)) {	/* Must be on same line for code converter */
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Could not create predefined name %s",
-					init_val->name));
-		}
-
-		/*
-		 * Name entered successfully.
-		 * If entry in pre_defined_names[] specifies an
-		 * initial value, create the initial value.
-		 */
-		if (init_val->val) {
-			status = acpi_os_predefined_override(init_val, &val);
-			if (ACPI_FAILURE(status)) {
-				ACPI_ERROR((AE_INFO,
-					    "Could not override predefined %s",
-					    init_val->name));
-			}
-
-			if (!val) {
-				val = init_val->val;
-			}
-
-			/*
-			 * Entry requests an initial value, allocate a
-			 * descriptor for it.
-			 */
-			obj_desc =
-			    acpi_ut_create_internal_object(init_val->type);
-			if (!obj_desc) {
-				status = AE_NO_MEMORY;
-				goto unlock_and_exit;
-			}
-
-			/*
-			 * Convert value string from table entry to
-			 * internal representation. Only types actually
-			 * used for initial values are implemented here.
-			 */
-			switch (init_val->type) {
-			case ACPI_TYPE_METHOD:
-				obj_desc->method.param_count =
-				    (u8) ACPI_TO_INTEGER(val);
-				obj_desc->common.flags |= AOPOBJ_DATA_VALID;
-
-#if defined (ACPI_ASL_COMPILER)
-
-				/* Save the parameter count for the i_aSL compiler */
-
-				new_node->value = obj_desc->method.param_count;
-#else
-				/* Mark this as a very SPECIAL method */
-
-				obj_desc->method.method_flags =
-				    AML_METHOD_INTERNAL_ONLY;
-
-#ifndef ACPI_DUMP_APP
-				obj_desc->method.implementation =
-				    acpi_ut_osi_implementation;
-#endif
-#endif
-				break;
-
-			case ACPI_TYPE_INTEGER:
-
-				obj_desc->integer.value = ACPI_TO_INTEGER(val);
-				break;
-
-			case ACPI_TYPE_STRING:
-
-				/*
-				 * Build an object around the static string
-				 */
-				obj_desc->string.length =
-				    (u32) ACPI_STRLEN(val);
-				obj_desc->string.pointer = val;
-				obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
-				break;
-
-			case ACPI_TYPE_MUTEX:
-
-				obj_desc->mutex.node = new_node;
-				obj_desc->mutex.sync_level =
-				    (u8) (ACPI_TO_INTEGER(val) - 1);
-
-				/* Create a mutex */
-
-				status =
-				    acpi_os_create_mutex(&obj_desc->mutex.
-							 os_mutex);
-				if (ACPI_FAILURE(status)) {
-					acpi_ut_remove_reference(obj_desc);
-					goto unlock_and_exit;
-				}
-
-				/* Special case for ACPI Global Lock */
-
-				if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
-					acpi_gbl_global_lock_mutex = obj_desc;
-
-					/* Create additional counting semaphore for global lock */
-
-					status =
-					    acpi_os_create_semaphore(1, 0,
-								     &acpi_gbl_global_lock_semaphore);
-					if (ACPI_FAILURE(status)) {
-						acpi_ut_remove_reference
-						    (obj_desc);
-						goto unlock_and_exit;
-					}
-				}
-				break;
-
-			default:
-
-				ACPI_ERROR((AE_INFO,
-					    "Unsupported initial type value %X",
-					    init_val->type));
-				acpi_ut_remove_reference(obj_desc);
-				obj_desc = NULL;
-				continue;
-			}
-
-			/* Store pointer to value descriptor in the Node */
-
-			status = acpi_ns_attach_object(new_node, obj_desc,
-						       ACPI_GET_OBJECT_TYPE
-						       (obj_desc));
-
-			/* Remove local reference to the object */
-
-			acpi_ut_remove_reference(obj_desc);
-		}
-	}
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
-	/* Save a handle to "_GPE", it is always present */
-
-	if (ACPI_SUCCESS(status)) {
-		status = acpi_ns_get_node(NULL, "\\_GPE", ACPI_NS_NO_UPSEARCH,
-					  &acpi_gbl_fadt_gpe_device);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_lookup
- *
- * PARAMETERS:  scope_info      - Current scope info block
- *              Pathname        - Search pathname, in internal format
- *                                (as represented in the AML stream)
- *              Type            - Type associated with name
- *              interpreter_mode - IMODE_LOAD_PASS2 => add name if not found
- *              Flags           - Flags describing the search restrictions
- *              walk_state      - Current state of the walk
- *              return_node     - Where the Node is placed (if found
- *                                or created successfully)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Find or enter the passed name in the name space.
- *              Log an error if name not found in Exec mode.
- *
- * MUTEX:       Assumes namespace is locked.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_lookup(union acpi_generic_state *scope_info,
-	       char *pathname,
-	       acpi_object_type type,
-	       acpi_interpreter_mode interpreter_mode,
-	       u32 flags,
-	       struct acpi_walk_state *walk_state,
-	       struct acpi_namespace_node **return_node)
-{
-	acpi_status status;
-	char *path = pathname;
-	struct acpi_namespace_node *prefix_node;
-	struct acpi_namespace_node *current_node = NULL;
-	struct acpi_namespace_node *this_node = NULL;
-	u32 num_segments;
-	u32 num_carats;
-	acpi_name simple_name;
-	acpi_object_type type_to_check_for;
-	acpi_object_type this_search_type;
-	u32 search_parent_flag = ACPI_NS_SEARCH_PARENT;
-	u32 local_flags;
-
-	ACPI_FUNCTION_TRACE(ns_lookup);
-
-	if (!return_node) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_SEARCH_PARENT);
-	*return_node = ACPI_ENTRY_NOT_FOUND;
-	acpi_gbl_ns_lookup_count++;
-
-	if (!acpi_gbl_root_node) {
-		return_ACPI_STATUS(AE_NO_NAMESPACE);
-	}
-
-	/*
-	 * Get the prefix scope.
-	 * A null scope means use the root scope
-	 */
-	if ((!scope_info) || (!scope_info->scope.node)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-				  "Null scope prefix, using root node (%p)\n",
-				  acpi_gbl_root_node));
-
-		prefix_node = acpi_gbl_root_node;
-	} else {
-		prefix_node = scope_info->scope.node;
-		if (ACPI_GET_DESCRIPTOR_TYPE(prefix_node) !=
-		    ACPI_DESC_TYPE_NAMED) {
-			ACPI_ERROR((AE_INFO, "%p is not a namespace node [%s]",
-				    prefix_node,
-				    acpi_ut_get_descriptor_name(prefix_node)));
-			return_ACPI_STATUS(AE_AML_INTERNAL);
-		}
-
-		if (!(flags & ACPI_NS_PREFIX_IS_SCOPE)) {
-			/*
-			 * This node might not be a actual "scope" node (such as a
-			 * Device/Method, etc.)  It could be a Package or other object node.
-			 * Backup up the tree to find the containing scope node.
-			 */
-			while (!acpi_ns_opens_scope(prefix_node->type) &&
-			       prefix_node->type != ACPI_TYPE_ANY) {
-				prefix_node =
-				    acpi_ns_get_parent_node(prefix_node);
-			}
-		}
-	}
-
-	/* Save type   TBD: may be no longer necessary */
-
-	type_to_check_for = type;
-
-	/*
-	 * Begin examination of the actual pathname
-	 */
-	if (!pathname) {
-
-		/* A Null name_path is allowed and refers to the root */
-
-		num_segments = 0;
-		this_node = acpi_gbl_root_node;
-		path = "";
-
-		ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-				  "Null Pathname (Zero segments), Flags=%X\n",
-				  flags));
-	} else {
-		/*
-		 * Name pointer is valid (and must be in internal name format)
-		 *
-		 * Check for scope prefixes:
-		 *
-		 * As represented in the AML stream, a namepath consists of an
-		 * optional scope prefix followed by a name segment part.
-		 *
-		 * If present, the scope prefix is either a Root Prefix (in
-		 * which case the name is fully qualified), or one or more
-		 * Parent Prefixes (in which case the name's scope is relative
-		 * to the current scope).
-		 */
-		if (*path == (u8) AML_ROOT_PREFIX) {
-
-			/* Pathname is fully qualified, start from the root */
-
-			this_node = acpi_gbl_root_node;
-			search_parent_flag = ACPI_NS_NO_UPSEARCH;
-
-			/* Point to name segment part */
-
-			path++;
-
-			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "Path is absolute from root [%p]\n",
-					  this_node));
-		} else {
-			/* Pathname is relative to current scope, start there */
-
-			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "Searching relative to prefix scope [%4.4s] (%p)\n",
-					  acpi_ut_get_node_name(prefix_node),
-					  prefix_node));
-
-			/*
-			 * Handle multiple Parent Prefixes (carat) by just getting
-			 * the parent node for each prefix instance.
-			 */
-			this_node = prefix_node;
-			num_carats = 0;
-			while (*path == (u8) AML_PARENT_PREFIX) {
-
-				/* Name is fully qualified, no search rules apply */
-
-				search_parent_flag = ACPI_NS_NO_UPSEARCH;
-				/*
-				 * Point past this prefix to the name segment
-				 * part or the next Parent Prefix
-				 */
-				path++;
-
-				/* Backup to the parent node */
-
-				num_carats++;
-				this_node = acpi_ns_get_parent_node(this_node);
-				if (!this_node) {
-
-					/* Current scope has no parent scope */
-
-					ACPI_ERROR((AE_INFO,
-						    "ACPI path has too many parent prefixes (^) - reached beyond root node"));
-					return_ACPI_STATUS(AE_NOT_FOUND);
-				}
-			}
-
-			if (search_parent_flag == ACPI_NS_NO_UPSEARCH) {
-				ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-						  "Search scope is [%4.4s], path has %d carat(s)\n",
-						  acpi_ut_get_node_name
-						  (this_node), num_carats));
-			}
-		}
-
-		/*
-		 * Determine the number of ACPI name segments in this pathname.
-		 *
-		 * The segment part consists of either:
-		 *  - A Null name segment (0)
-		 *  - A dual_name_prefix followed by two 4-byte name segments
-		 *  - A multi_name_prefix followed by a byte indicating the
-		 *      number of segments and the segments themselves.
-		 *  - A single 4-byte name segment
-		 *
-		 * Examine the name prefix opcode, if any, to determine the number of
-		 * segments.
-		 */
-		switch (*path) {
-		case 0:
-			/*
-			 * Null name after a root or parent prefixes. We already
-			 * have the correct target node and there are no name segments.
-			 */
-			num_segments = 0;
-			type = this_node->type;
-
-			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "Prefix-only Pathname (Zero name segments), Flags=%X\n",
-					  flags));
-			break;
-
-		case AML_DUAL_NAME_PREFIX:
-
-			/* More than one name_seg, search rules do not apply */
-
-			search_parent_flag = ACPI_NS_NO_UPSEARCH;
-
-			/* Two segments, point to first name segment */
-
-			num_segments = 2;
-			path++;
-
-			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "Dual Pathname (2 segments, Flags=%X)\n",
-					  flags));
-			break;
-
-		case AML_MULTI_NAME_PREFIX_OP:
-
-			/* More than one name_seg, search rules do not apply */
-
-			search_parent_flag = ACPI_NS_NO_UPSEARCH;
-
-			/* Extract segment count, point to first name segment */
-
-			path++;
-			num_segments = (u32) (u8) * path;
-			path++;
-
-			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "Multi Pathname (%d Segments, Flags=%X)\n",
-					  num_segments, flags));
-			break;
-
-		default:
-			/*
-			 * Not a Null name, no Dual or Multi prefix, hence there is
-			 * only one name segment and Pathname is already pointing to it.
-			 */
-			num_segments = 1;
-
-			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "Simple Pathname (1 segment, Flags=%X)\n",
-					  flags));
-			break;
-		}
-
-		ACPI_DEBUG_EXEC(acpi_ns_print_pathname(num_segments, path));
-	}
-
-	/*
-	 * Search namespace for each segment of the name.  Loop through and
-	 * verify (or add to the namespace) each name segment.
-	 *
-	 * The object type is significant only at the last name
-	 * segment.  (We don't care about the types along the path, only
-	 * the type of the final target object.)
-	 */
-	this_search_type = ACPI_TYPE_ANY;
-	current_node = this_node;
-	while (num_segments && current_node) {
-		num_segments--;
-		if (!num_segments) {
-			/*
-			 * This is the last segment, enable typechecking
-			 */
-			this_search_type = type;
-
-			/*
-			 * Only allow automatic parent search (search rules) if the caller
-			 * requested it AND we have a single, non-fully-qualified name_seg
-			 */
-			if ((search_parent_flag != ACPI_NS_NO_UPSEARCH) &&
-			    (flags & ACPI_NS_SEARCH_PARENT)) {
-				local_flags |= ACPI_NS_SEARCH_PARENT;
-			}
-
-			/* Set error flag according to caller */
-
-			if (flags & ACPI_NS_ERROR_IF_FOUND) {
-				local_flags |= ACPI_NS_ERROR_IF_FOUND;
-			}
-		}
-
-		/* Extract one ACPI name from the front of the pathname */
-
-		ACPI_MOVE_32_TO_32(&simple_name, path);
-
-		/* Try to find the single (4 character) ACPI name */
-
-		status =
-		    acpi_ns_search_and_enter(simple_name, walk_state,
-					     current_node, interpreter_mode,
-					     this_search_type, local_flags,
-					     &this_node);
-		if (ACPI_FAILURE(status)) {
-			if (status == AE_NOT_FOUND) {
-
-				/* Name not found in ACPI namespace */
-
-				ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-						  "Name [%4.4s] not found in scope [%4.4s] %p\n",
-						  (char *)&simple_name,
-						  (char *)&current_node->name,
-						  current_node));
-			}
-
-			*return_node = this_node;
-			return_ACPI_STATUS(status);
-		}
-
-		/* More segments to follow? */
-
-		if (num_segments > 0) {
-			/*
-			 * If we have an alias to an object that opens a scope (such as a
-			 * device or processor), we need to dereference the alias here so that
-			 * we can access any children of the original node (via the remaining
-			 * segments).
-			 */
-			if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) {
-				if (acpi_ns_opens_scope
-				    (((struct acpi_namespace_node *)this_node->
-				      object)->type)) {
-					this_node =
-					    (struct acpi_namespace_node *)
-					    this_node->object;
-				}
-			}
-		}
-
-		/* Special handling for the last segment (num_segments == 0) */
-
-		else {
-			/*
-			 * Sanity typecheck of the target object:
-			 *
-			 * If 1) This is the last segment (num_segments == 0)
-			 *    2) And we are looking for a specific type
-			 *       (Not checking for TYPE_ANY)
-			 *    3) Which is not an alias
-			 *    4) Which is not a local type (TYPE_SCOPE)
-			 *    5) And the type of target object is known (not TYPE_ANY)
-			 *    6) And target object does not match what we are looking for
-			 *
-			 * Then we have a type mismatch. Just warn and ignore it.
-			 */
-			if ((type_to_check_for != ACPI_TYPE_ANY) &&
-			    (type_to_check_for != ACPI_TYPE_LOCAL_ALIAS) &&
-			    (type_to_check_for != ACPI_TYPE_LOCAL_METHOD_ALIAS)
-			    && (type_to_check_for != ACPI_TYPE_LOCAL_SCOPE)
-			    && (this_node->type != ACPI_TYPE_ANY)
-			    && (this_node->type != type_to_check_for)) {
-
-				/* Complain about a type mismatch */
-
-				ACPI_WARNING((AE_INFO,
-					      "NsLookup: Type mismatch on %4.4s (%s), searching for (%s)",
-					      ACPI_CAST_PTR(char, &simple_name),
-					      acpi_ut_get_type_name(this_node->
-								    type),
-					      acpi_ut_get_type_name
-					      (type_to_check_for)));
-			}
-
-			/*
-			 * If this is the last name segment and we are not looking for a
-			 * specific type, but the type of found object is known, use that type
-			 * to (later) see if it opens a scope.
-			 */
-			if (type == ACPI_TYPE_ANY) {
-				type = this_node->type;
-			}
-		}
-
-		/* Point to next name segment and make this node current */
-
-		path += ACPI_NAME_SIZE;
-		current_node = this_node;
-	}
-
-	/*
-	 * Always check if we need to open a new scope
-	 */
-	if (!(flags & ACPI_NS_DONT_OPEN_SCOPE) && (walk_state)) {
-		/*
-		 * If entry is a type which opens a scope, push the new scope on the
-		 * scope stack.
-		 */
-		if (acpi_ns_opens_scope(type)) {
-			status =
-			    acpi_ds_scope_stack_push(this_node, type,
-						     walk_state);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		}
-	}
-
-	*return_node = this_node;
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/namespace/nsalloc.c b/drivers/acpi/namespace/nsalloc.c
deleted file mode 100644
index 3a1740a..0000000
--- a/drivers/acpi/namespace/nsalloc.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: nsalloc - Namespace allocation and deletion utilities
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsalloc")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_create_node
- *
- * PARAMETERS:  Name            - Name of the new node (4 char ACPI name)
- *
- * RETURN:      New namespace node (Null on failure)
- *
- * DESCRIPTION: Create a namespace node
- *
- ******************************************************************************/
-struct acpi_namespace_node *acpi_ns_create_node(u32 name)
-{
-	struct acpi_namespace_node *node;
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-	u32 temp;
-#endif
-
-	ACPI_FUNCTION_TRACE(ns_create_node);
-
-	node = acpi_os_acquire_object(acpi_gbl_namespace_cache);
-	if (!node) {
-		return_PTR(NULL);
-	}
-
-	ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++);
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-	temp =
-	    acpi_gbl_ns_node_list->total_allocated -
-	    acpi_gbl_ns_node_list->total_freed;
-	if (temp > acpi_gbl_ns_node_list->max_occupied) {
-		acpi_gbl_ns_node_list->max_occupied = temp;
-	}
-#endif
-
-	node->name.integer = name;
-	ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED);
-	return_PTR(node);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_delete_node
- *
- * PARAMETERS:  Node            - Node to be deleted
- *
- * RETURN:      None
- *
- * DESCRIPTION: Delete a namespace node
- *
- ******************************************************************************/
-
-void acpi_ns_delete_node(struct acpi_namespace_node *node)
-{
-	struct acpi_namespace_node *parent_node;
-	struct acpi_namespace_node *prev_node;
-	struct acpi_namespace_node *next_node;
-
-	ACPI_FUNCTION_TRACE_PTR(ns_delete_node, node);
-
-	parent_node = acpi_ns_get_parent_node(node);
-
-	prev_node = NULL;
-	next_node = parent_node->child;
-
-	/* Find the node that is the previous peer in the parent's child list */
-
-	while (next_node != node) {
-		prev_node = next_node;
-		next_node = prev_node->peer;
-	}
-
-	if (prev_node) {
-
-		/* Node is not first child, unlink it */
-
-		prev_node->peer = next_node->peer;
-		if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
-			prev_node->flags |= ANOBJ_END_OF_PEER_LIST;
-		}
-	} else {
-		/* Node is first child (has no previous peer) */
-
-		if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
-
-			/* No peers at all */
-
-			parent_node->child = NULL;
-		} else {	/* Link peer list to parent */
-
-			parent_node->child = next_node->peer;
-		}
-	}
-
-	ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
-
-	/*
-	 * Detach an object if there is one, then delete the node
-	 */
-	acpi_ns_detach_object(node);
-	(void)acpi_os_release_object(acpi_gbl_namespace_cache, node);
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_install_node
- *
- * PARAMETERS:  walk_state      - Current state of the walk
- *              parent_node     - The parent of the new Node
- *              Node            - The new Node to install
- *              Type            - ACPI object type of the new Node
- *
- * RETURN:      None
- *
- * DESCRIPTION: Initialize a new namespace node and install it amongst
- *              its peers.
- *
- *              Note: Current namespace lookup is linear search. This appears
- *              to be sufficient as namespace searches consume only a small
- *              fraction of the execution time of the ACPI subsystem.
- *
- ******************************************************************************/
-
-void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namespace_node *parent_node,	/* Parent */
-			  struct acpi_namespace_node *node,	/* New Child */
-			  acpi_object_type type)
-{
-	acpi_owner_id owner_id = 0;
-	struct acpi_namespace_node *child_node;
-
-	ACPI_FUNCTION_TRACE(ns_install_node);
-
-	/*
-	 * Get the owner ID from the Walk state
-	 * The owner ID is used to track table deletion and
-	 * deletion of objects created by methods
-	 */
-	if (walk_state) {
-		owner_id = walk_state->owner_id;
-	}
-
-	/* Link the new entry into the parent and existing children */
-
-	child_node = parent_node->child;
-	if (!child_node) {
-		parent_node->child = node;
-		node->flags |= ANOBJ_END_OF_PEER_LIST;
-		node->peer = parent_node;
-	} else {
-		while (!(child_node->flags & ANOBJ_END_OF_PEER_LIST)) {
-			child_node = child_node->peer;
-		}
-
-		child_node->peer = node;
-
-		/* Clear end-of-list flag */
-
-		child_node->flags &= ~ANOBJ_END_OF_PEER_LIST;
-		node->flags |= ANOBJ_END_OF_PEER_LIST;
-		node->peer = parent_node;
-	}
-
-	/* Init the new entry */
-
-	node->owner_id = owner_id;
-	node->type = (u8) type;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-			  "%4.4s (%s) [Node %p Owner %X] added to %4.4s (%s) [Node %p]\n",
-			  acpi_ut_get_node_name(node),
-			  acpi_ut_get_type_name(node->type), node, owner_id,
-			  acpi_ut_get_node_name(parent_node),
-			  acpi_ut_get_type_name(parent_node->type),
-			  parent_node));
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_delete_children
- *
- * PARAMETERS:  parent_node     - Delete this objects children
- *
- * RETURN:      None.
- *
- * DESCRIPTION: Delete all children of the parent object. In other words,
- *              deletes a "scope".
- *
- ******************************************************************************/
-
-void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
-{
-	struct acpi_namespace_node *child_node;
-	struct acpi_namespace_node *next_node;
-	u8 flags;
-
-	ACPI_FUNCTION_TRACE_PTR(ns_delete_children, parent_node);
-
-	if (!parent_node) {
-		return_VOID;
-	}
-
-	/* If no children, all done! */
-
-	child_node = parent_node->child;
-	if (!child_node) {
-		return_VOID;
-	}
-
-	/*
-	 * Deallocate all children at this level
-	 */
-	do {
-
-		/* Get the things we need */
-
-		next_node = child_node->peer;
-		flags = child_node->flags;
-
-		/* Grandchildren should have all been deleted already */
-
-		if (child_node->child) {
-			ACPI_ERROR((AE_INFO, "Found a grandchild! P=%p C=%p",
-				    parent_node, child_node));
-		}
-
-		/* Now we can free this child object */
-
-		ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "Object %p, Remaining %X\n", child_node,
-				  acpi_gbl_current_node_count));
-
-		/*
-		 * Detach an object if there is one, then free the child node
-		 */
-		acpi_ns_detach_object(child_node);
-
-		/* Now we can delete the node */
-
-		(void)acpi_os_release_object(acpi_gbl_namespace_cache,
-					     child_node);
-
-		/* And move on to the next child in the list */
-
-		child_node = next_node;
-
-	} while (!(flags & ANOBJ_END_OF_PEER_LIST));
-
-	/* Clear the parent's child pointer */
-
-	parent_node->child = NULL;
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_delete_namespace_subtree
- *
- * PARAMETERS:  parent_node     - Root of the subtree to be deleted
- *
- * RETURN:      None.
- *
- * DESCRIPTION: Delete a subtree of the namespace.  This includes all objects
- *              stored within the subtree.
- *
- ******************************************************************************/
-
-void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
-{
-	struct acpi_namespace_node *child_node = NULL;
-	u32 level = 1;
-
-	ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree);
-
-	if (!parent_node) {
-		return_VOID;
-	}
-
-	/*
-	 * Traverse the tree of objects until we bubble back up
-	 * to where we started.
-	 */
-	while (level > 0) {
-
-		/* Get the next node in this scope (NULL if none) */
-
-		child_node =
-		    acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
-					  child_node);
-		if (child_node) {
-
-			/* Found a child node - detach any attached object */
-
-			acpi_ns_detach_object(child_node);
-
-			/* Check if this node has any children */
-
-			if (acpi_ns_get_next_node
-			    (ACPI_TYPE_ANY, child_node, NULL)) {
-				/*
-				 * There is at least one child of this node,
-				 * visit the node
-				 */
-				level++;
-				parent_node = child_node;
-				child_node = NULL;
-			}
-		} else {
-			/*
-			 * No more children of this parent node.
-			 * Move up to the grandparent.
-			 */
-			level--;
-
-			/*
-			 * Now delete all of the children of this parent
-			 * all at the same time.
-			 */
-			acpi_ns_delete_children(parent_node);
-
-			/* New "last child" is this parent node */
-
-			child_node = parent_node;
-
-			/* Move up the tree to the grandparent */
-
-			parent_node = acpi_ns_get_parent_node(parent_node);
-		}
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_delete_namespace_by_owner
- *
- * PARAMETERS:  owner_id    - All nodes with this owner will be deleted
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Delete entries within the namespace that are owned by a
- *              specific ID.  Used to delete entire ACPI tables.  All
- *              reference counts are updated.
- *
- * MUTEX:       Locks namespace during deletion walk.
- *
- ******************************************************************************/
-
-void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
-{
-	struct acpi_namespace_node *child_node;
-	struct acpi_namespace_node *deletion_node;
-	struct acpi_namespace_node *parent_node;
-	u32 level;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE_U32(ns_delete_namespace_by_owner, owner_id);
-
-	if (owner_id == 0) {
-		return_VOID;
-	}
-
-	/* Lock namespace for possible update */
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return_VOID;
-	}
-
-	deletion_node = NULL;
-	parent_node = acpi_gbl_root_node;
-	child_node = NULL;
-	level = 1;
-
-	/*
-	 * Traverse the tree of nodes until we bubble back up
-	 * to where we started.
-	 */
-	while (level > 0) {
-		/*
-		 * Get the next child of this parent node. When child_node is NULL,
-		 * the first child of the parent is returned
-		 */
-		child_node =
-		    acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
-					  child_node);
-
-		if (deletion_node) {
-			acpi_ns_delete_children(deletion_node);
-			acpi_ns_delete_node(deletion_node);
-			deletion_node = NULL;
-		}
-
-		if (child_node) {
-			if (child_node->owner_id == owner_id) {
-
-				/* Found a matching child node - detach any attached object */
-
-				acpi_ns_detach_object(child_node);
-			}
-
-			/* Check if this node has any children */
-
-			if (acpi_ns_get_next_node
-			    (ACPI_TYPE_ANY, child_node, NULL)) {
-				/*
-				 * There is at least one child of this node,
-				 * visit the node
-				 */
-				level++;
-				parent_node = child_node;
-				child_node = NULL;
-			} else if (child_node->owner_id == owner_id) {
-				deletion_node = child_node;
-			}
-		} else {
-			/*
-			 * No more children of this parent node.
-			 * Move up to the grandparent.
-			 */
-			level--;
-			if (level != 0) {
-				if (parent_node->owner_id == owner_id) {
-					deletion_node = parent_node;
-				}
-			}
-
-			/* New "last child" is this parent node */
-
-			child_node = parent_node;
-
-			/* Move up the tree to the grandparent */
-
-			parent_node = acpi_ns_get_parent_node(parent_node);
-		}
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_VOID;
-}
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
deleted file mode 100644
index cc0ae39..0000000
--- a/drivers/acpi/namespace/nsdump.c
+++ /dev/null
@@ -1,708 +0,0 @@
-/******************************************************************************
- *
- * Module Name: nsdump - table dumping routines for debug
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsdump")
-
-/* Local prototypes */
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-void acpi_ns_dump_root_devices(void);
-
-static acpi_status
-acpi_ns_dump_one_device(acpi_handle obj_handle,
-			u32 level, void *context, void **return_value);
-#endif
-
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_print_pathname
- *
- * PARAMETERS:  num_segments        - Number of ACPI name segments
- *              Pathname            - The compressed (internal) path
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print an object's full namespace pathname
- *
- ******************************************************************************/
-
-void acpi_ns_print_pathname(u32 num_segments, char *pathname)
-{
-	u32 i;
-
-	ACPI_FUNCTION_NAME(ns_print_pathname);
-
-	if (!(acpi_dbg_level & ACPI_LV_NAMES)
-	    || !(acpi_dbg_layer & ACPI_NAMESPACE)) {
-		return;
-	}
-
-	/* Print the entire name */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "["));
-
-	while (num_segments) {
-		for (i = 0; i < 4; i++) {
-			ACPI_IS_PRINT(pathname[i]) ?
-			    acpi_os_printf("%c", pathname[i]) :
-			    acpi_os_printf("?");
-		}
-
-		pathname += ACPI_NAME_SIZE;
-		num_segments--;
-		if (num_segments) {
-			acpi_os_printf(".");
-		}
-	}
-
-	acpi_os_printf("]\n");
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_dump_pathname
- *
- * PARAMETERS:  Handle              - Object
- *              Msg                 - Prefix message
- *              Level               - Desired debug level
- *              Component           - Caller's component ID
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print an object's full namespace pathname
- *              Manages allocation/freeing of a pathname buffer
- *
- ******************************************************************************/
-
-void
-acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
-{
-
-	ACPI_FUNCTION_TRACE(ns_dump_pathname);
-
-	/* Do this only if the requested debug level and component are enabled */
-
-	if (!(acpi_dbg_level & level) || !(acpi_dbg_layer & component)) {
-		return_VOID;
-	}
-
-	/* Convert handle to a full pathname and print it (with supplied message) */
-
-	acpi_ns_print_node_pathname(handle, msg);
-	acpi_os_printf("\n");
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_dump_one_object
- *
- * PARAMETERS:  obj_handle          - Node to be dumped
- *              Level               - Nesting level of the handle
- *              Context             - Passed into walk_namespace
- *              return_value        - Not used
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Dump a single Node
- *              This procedure is a user_function called by acpi_ns_walk_namespace.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_dump_one_object(acpi_handle obj_handle,
-			u32 level, void *context, void **return_value)
-{
-	struct acpi_walk_info *info = (struct acpi_walk_info *)context;
-	struct acpi_namespace_node *this_node;
-	union acpi_operand_object *obj_desc = NULL;
-	acpi_object_type obj_type;
-	acpi_object_type type;
-	u32 bytes_to_dump;
-	u32 dbg_level;
-	u32 i;
-
-	ACPI_FUNCTION_NAME(ns_dump_one_object);
-
-	/* Is output enabled? */
-
-	if (!(acpi_dbg_level & info->debug_level)) {
-		return (AE_OK);
-	}
-
-	if (!obj_handle) {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Null object handle\n"));
-		return (AE_OK);
-	}
-
-	this_node = acpi_ns_map_handle_to_node(obj_handle);
-	type = this_node->type;
-
-	/* Check if the owner matches */
-
-	if ((info->owner_id != ACPI_OWNER_ID_MAX) &&
-	    (info->owner_id != this_node->owner_id)) {
-		return (AE_OK);
-	}
-
-	if (!(info->display_type & ACPI_DISPLAY_SHORT)) {
-
-		/* Indent the object according to the level */
-
-		acpi_os_printf("%2d%*s", (u32) level - 1, (int)level * 2, " ");
-
-		/* Check the node type and name */
-
-		if (type > ACPI_TYPE_LOCAL_MAX) {
-			ACPI_WARNING((AE_INFO, "Invalid ACPI Object Type %08X",
-				      type));
-		}
-
-		if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
-			this_node->name.integer =
-			    acpi_ut_repair_name(this_node->name.ascii);
-
-			ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
-				      this_node->name.integer));
-		}
-
-		acpi_os_printf("%4.4s", acpi_ut_get_node_name(this_node));
-	}
-
-	/*
-	 * Now we can print out the pertinent information
-	 */
-	acpi_os_printf(" %-12s %p %2.2X ",
-		       acpi_ut_get_type_name(type), this_node,
-		       this_node->owner_id);
-
-	dbg_level = acpi_dbg_level;
-	acpi_dbg_level = 0;
-	obj_desc = acpi_ns_get_attached_object(this_node);
-	acpi_dbg_level = dbg_level;
-
-	/* Temp nodes are those nodes created by a control method */
-
-	if (this_node->flags & ANOBJ_TEMPORARY) {
-		acpi_os_printf("(T) ");
-	}
-
-	switch (info->display_type & ACPI_DISPLAY_MASK) {
-	case ACPI_DISPLAY_SUMMARY:
-
-		if (!obj_desc) {
-
-			/* No attached object, we are done */
-
-			acpi_os_printf("\n");
-			return (AE_OK);
-		}
-
-		switch (type) {
-		case ACPI_TYPE_PROCESSOR:
-
-			acpi_os_printf("ID %X Len %.4X Addr %p\n",
-				       obj_desc->processor.proc_id,
-				       obj_desc->processor.length,
-				       ACPI_CAST_PTR(void,
-						     obj_desc->processor.
-						     address));
-			break;
-
-		case ACPI_TYPE_DEVICE:
-
-			acpi_os_printf("Notify Object: %p\n", obj_desc);
-			break;
-
-		case ACPI_TYPE_METHOD:
-
-			acpi_os_printf("Args %X Len %.4X Aml %p\n",
-				       (u32) obj_desc->method.param_count,
-				       obj_desc->method.aml_length,
-				       obj_desc->method.aml_start);
-			break;
-
-		case ACPI_TYPE_INTEGER:
-
-			acpi_os_printf("= %8.8X%8.8X\n",
-				       ACPI_FORMAT_UINT64(obj_desc->integer.
-							  value));
-			break;
-
-		case ACPI_TYPE_PACKAGE:
-
-			if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
-				acpi_os_printf("Elements %.2X\n",
-					       obj_desc->package.count);
-			} else {
-				acpi_os_printf("[Length not yet evaluated]\n");
-			}
-			break;
-
-		case ACPI_TYPE_BUFFER:
-
-			if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
-				acpi_os_printf("Len %.2X",
-					       obj_desc->buffer.length);
-
-				/* Dump some of the buffer */
-
-				if (obj_desc->buffer.length > 0) {
-					acpi_os_printf(" =");
-					for (i = 0;
-					     (i < obj_desc->buffer.length
-					      && i < 12); i++) {
-						acpi_os_printf(" %.2hX",
-							       obj_desc->buffer.
-							       pointer[i]);
-					}
-				}
-				acpi_os_printf("\n");
-			} else {
-				acpi_os_printf("[Length not yet evaluated]\n");
-			}
-			break;
-
-		case ACPI_TYPE_STRING:
-
-			acpi_os_printf("Len %.2X ", obj_desc->string.length);
-			acpi_ut_print_string(obj_desc->string.pointer, 32);
-			acpi_os_printf("\n");
-			break;
-
-		case ACPI_TYPE_REGION:
-
-			acpi_os_printf("[%s]",
-				       acpi_ut_get_region_name(obj_desc->region.
-							       space_id));
-			if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
-				acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
-					       ACPI_FORMAT_NATIVE_UINT
-					       (obj_desc->region.address),
-					       obj_desc->region.length);
-			} else {
-				acpi_os_printf
-				    (" [Address/Length not yet evaluated]\n");
-			}
-			break;
-
-		case ACPI_TYPE_LOCAL_REFERENCE:
-
-			acpi_os_printf("[%s]\n",
-				       acpi_ut_get_reference_name(obj_desc));
-			break;
-
-		case ACPI_TYPE_BUFFER_FIELD:
-
-			if (obj_desc->buffer_field.buffer_obj &&
-			    obj_desc->buffer_field.buffer_obj->buffer.node) {
-				acpi_os_printf("Buf [%4.4s]",
-					       acpi_ut_get_node_name(obj_desc->
-								     buffer_field.
-								     buffer_obj->
-								     buffer.
-								     node));
-			}
-			break;
-
-		case ACPI_TYPE_LOCAL_REGION_FIELD:
-
-			acpi_os_printf("Rgn [%4.4s]",
-				       acpi_ut_get_node_name(obj_desc->
-							     common_field.
-							     region_obj->region.
-							     node));
-			break;
-
-		case ACPI_TYPE_LOCAL_BANK_FIELD:
-
-			acpi_os_printf("Rgn [%4.4s] Bnk [%4.4s]",
-				       acpi_ut_get_node_name(obj_desc->
-							     common_field.
-							     region_obj->region.
-							     node),
-				       acpi_ut_get_node_name(obj_desc->
-							     bank_field.
-							     bank_obj->
-							     common_field.
-							     node));
-			break;
-
-		case ACPI_TYPE_LOCAL_INDEX_FIELD:
-
-			acpi_os_printf("Idx [%4.4s] Dat [%4.4s]",
-				       acpi_ut_get_node_name(obj_desc->
-							     index_field.
-							     index_obj->
-							     common_field.node),
-				       acpi_ut_get_node_name(obj_desc->
-							     index_field.
-							     data_obj->
-							     common_field.
-							     node));
-			break;
-
-		case ACPI_TYPE_LOCAL_ALIAS:
-		case ACPI_TYPE_LOCAL_METHOD_ALIAS:
-
-			acpi_os_printf("Target %4.4s (%p)\n",
-				       acpi_ut_get_node_name(obj_desc),
-				       obj_desc);
-			break;
-
-		default:
-
-			acpi_os_printf("Object %p\n", obj_desc);
-			break;
-		}
-
-		/* Common field handling */
-
-		switch (type) {
-		case ACPI_TYPE_BUFFER_FIELD:
-		case ACPI_TYPE_LOCAL_REGION_FIELD:
-		case ACPI_TYPE_LOCAL_BANK_FIELD:
-		case ACPI_TYPE_LOCAL_INDEX_FIELD:
-
-			acpi_os_printf(" Off %.3X Len %.2X Acc %.2hd\n",
-				       (obj_desc->common_field.
-					base_byte_offset * 8)
-				       +
-				       obj_desc->common_field.
-				       start_field_bit_offset,
-				       obj_desc->common_field.bit_length,
-				       obj_desc->common_field.
-				       access_byte_width);
-			break;
-
-		default:
-			break;
-		}
-		break;
-
-	case ACPI_DISPLAY_OBJECTS:
-
-		acpi_os_printf("O:%p", obj_desc);
-		if (!obj_desc) {
-
-			/* No attached object, we are done */
-
-			acpi_os_printf("\n");
-			return (AE_OK);
-		}
-
-		acpi_os_printf("(R%d)", obj_desc->common.reference_count);
-
-		switch (type) {
-		case ACPI_TYPE_METHOD:
-
-			/* Name is a Method and its AML offset/length are set */
-
-			acpi_os_printf(" M:%p-%X\n", obj_desc->method.aml_start,
-				       obj_desc->method.aml_length);
-			break;
-
-		case ACPI_TYPE_INTEGER:
-
-			acpi_os_printf(" I:%8.8X8.8%X\n",
-				       ACPI_FORMAT_UINT64(obj_desc->integer.
-							  value));
-			break;
-
-		case ACPI_TYPE_STRING:
-
-			acpi_os_printf(" S:%p-%X\n", obj_desc->string.pointer,
-				       obj_desc->string.length);
-			break;
-
-		case ACPI_TYPE_BUFFER:
-
-			acpi_os_printf(" B:%p-%X\n", obj_desc->buffer.pointer,
-				       obj_desc->buffer.length);
-			break;
-
-		default:
-
-			acpi_os_printf("\n");
-			break;
-		}
-		break;
-
-	default:
-		acpi_os_printf("\n");
-		break;
-	}
-
-	/* If debug turned off, done */
-
-	if (!(acpi_dbg_level & ACPI_LV_VALUES)) {
-		return (AE_OK);
-	}
-
-	/* If there is an attached object, display it */
-
-	dbg_level = acpi_dbg_level;
-	acpi_dbg_level = 0;
-	obj_desc = acpi_ns_get_attached_object(this_node);
-	acpi_dbg_level = dbg_level;
-
-	/* Dump attached objects */
-
-	while (obj_desc) {
-		obj_type = ACPI_TYPE_INVALID;
-		acpi_os_printf("Attached Object %p: ", obj_desc);
-
-		/* Decode the type of attached object and dump the contents */
-
-		switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
-		case ACPI_DESC_TYPE_NAMED:
-
-			acpi_os_printf("(Ptr to Node)\n");
-			bytes_to_dump = sizeof(struct acpi_namespace_node);
-			ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump);
-			break;
-
-		case ACPI_DESC_TYPE_OPERAND:
-
-			obj_type = ACPI_GET_OBJECT_TYPE(obj_desc);
-
-			if (obj_type > ACPI_TYPE_LOCAL_MAX) {
-				acpi_os_printf
-				    ("(Pointer to ACPI Object type %.2X [UNKNOWN])\n",
-				     obj_type);
-				bytes_to_dump = 32;
-			} else {
-				acpi_os_printf
-				    ("(Pointer to ACPI Object type %.2X [%s])\n",
-				     obj_type, acpi_ut_get_type_name(obj_type));
-				bytes_to_dump =
-				    sizeof(union acpi_operand_object);
-			}
-
-			ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump);
-			break;
-
-		default:
-
-			break;
-		}
-
-		/* If value is NOT an internal object, we are done */
-
-		if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) !=
-		    ACPI_DESC_TYPE_OPERAND) {
-			goto cleanup;
-		}
-
-		/*
-		 * Valid object, get the pointer to next level, if any
-		 */
-		switch (obj_type) {
-		case ACPI_TYPE_BUFFER:
-		case ACPI_TYPE_STRING:
-			/*
-			 * NOTE: takes advantage of common fields between string/buffer
-			 */
-			bytes_to_dump = obj_desc->string.length;
-			obj_desc = (void *)obj_desc->string.pointer;
-			acpi_os_printf("(Buffer/String pointer %p length %X)\n",
-				       obj_desc, bytes_to_dump);
-			ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump);
-			goto cleanup;
-
-		case ACPI_TYPE_BUFFER_FIELD:
-			obj_desc =
-			    (union acpi_operand_object *)obj_desc->buffer_field.
-			    buffer_obj;
-			break;
-
-		case ACPI_TYPE_PACKAGE:
-			obj_desc = (void *)obj_desc->package.elements;
-			break;
-
-		case ACPI_TYPE_METHOD:
-			obj_desc = (void *)obj_desc->method.aml_start;
-			break;
-
-		case ACPI_TYPE_LOCAL_REGION_FIELD:
-			obj_desc = (void *)obj_desc->field.region_obj;
-			break;
-
-		case ACPI_TYPE_LOCAL_BANK_FIELD:
-			obj_desc = (void *)obj_desc->bank_field.region_obj;
-			break;
-
-		case ACPI_TYPE_LOCAL_INDEX_FIELD:
-			obj_desc = (void *)obj_desc->index_field.index_obj;
-			break;
-
-		default:
-			goto cleanup;
-		}
-
-		obj_type = ACPI_TYPE_INVALID;	/* Terminate loop after next pass */
-	}
-
-      cleanup:
-	acpi_os_printf("\n");
-	return (AE_OK);
-}
-
-#ifdef ACPI_FUTURE_USAGE
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_dump_objects
- *
- * PARAMETERS:  Type                - Object type to be dumped
- *              display_type        - 0 or ACPI_DISPLAY_SUMMARY
- *              max_depth           - Maximum depth of dump. Use ACPI_UINT32_MAX
- *                                    for an effectively unlimited depth.
- *              owner_id            - Dump only objects owned by this ID.  Use
- *                                    ACPI_UINT32_MAX to match all owners.
- *              start_handle        - Where in namespace to start/end search
- *
- * RETURN:      None
- *
- * DESCRIPTION: Dump typed objects within the loaded namespace.
- *              Uses acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object.
- *
- ******************************************************************************/
-
-void
-acpi_ns_dump_objects(acpi_object_type type,
-		     u8 display_type,
-		     u32 max_depth,
-		     acpi_owner_id owner_id, acpi_handle start_handle)
-{
-	struct acpi_walk_info info;
-
-	ACPI_FUNCTION_ENTRY();
-
-	info.debug_level = ACPI_LV_TABLES;
-	info.owner_id = owner_id;
-	info.display_type = display_type;
-
-	(void)acpi_ns_walk_namespace(type, start_handle, max_depth,
-				     ACPI_NS_WALK_NO_UNLOCK |
-				     ACPI_NS_WALK_TEMP_NODES,
-				     acpi_ns_dump_one_object, (void *)&info,
-				     NULL);
-}
-#endif				/* ACPI_FUTURE_USAGE */
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_dump_entry
- *
- * PARAMETERS:  Handle              - Node to be dumped
- *              debug_level         - Output level
- *
- * RETURN:      None
- *
- * DESCRIPTION: Dump a single Node
- *
- ******************************************************************************/
-
-void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level)
-{
-	struct acpi_walk_info info;
-
-	ACPI_FUNCTION_ENTRY();
-
-	info.debug_level = debug_level;
-	info.owner_id = ACPI_OWNER_ID_MAX;
-	info.display_type = ACPI_DISPLAY_SUMMARY;
-
-	(void)acpi_ns_dump_one_object(handle, 1, &info, NULL);
-}
-
-#ifdef ACPI_ASL_COMPILER
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_dump_tables
- *
- * PARAMETERS:  search_base         - Root of subtree to be dumped, or
- *                                    NS_ALL to dump the entire namespace
- *              max_depth           - Maximum depth of dump.  Use INT_MAX
- *                                    for an effectively unlimited depth.
- *
- * RETURN:      None
- *
- * DESCRIPTION: Dump the name space, or a portion of it.
- *
- ******************************************************************************/
-
-void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
-{
-	acpi_handle search_handle = search_base;
-
-	ACPI_FUNCTION_TRACE(ns_dump_tables);
-
-	if (!acpi_gbl_root_node) {
-		/*
-		 * If the name space has not been initialized,
-		 * there is nothing to dump.
-		 */
-		ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
-				  "namespace not initialized!\n"));
-		return_VOID;
-	}
-
-	if (ACPI_NS_ALL == search_base) {
-
-		/* Entire namespace */
-
-		search_handle = acpi_gbl_root_node;
-		ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "\\\n"));
-	}
-
-	acpi_ns_dump_objects(ACPI_TYPE_ANY, ACPI_DISPLAY_OBJECTS, max_depth,
-			     ACPI_OWNER_ID_MAX, search_handle);
-	return_VOID;
-}
-#endif				/* _ACPI_ASL_COMPILER */
-#endif				/* defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) */
diff --git a/drivers/acpi/namespace/nsdumpdv.c b/drivers/acpi/namespace/nsdumpdv.c
deleted file mode 100644
index 428f50f..0000000
--- a/drivers/acpi/namespace/nsdumpdv.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/******************************************************************************
- *
- * Module Name: nsdump - table dumping routines for debug
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-
-/* TBD: This entire module is apparently obsolete and should be removed */
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsdumpdv")
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-#include <acpi/acnamesp.h>
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_dump_one_device
- *
- * PARAMETERS:  Handle              - Node to be dumped
- *              Level               - Nesting level of the handle
- *              Context             - Passed into walk_namespace
- *              return_value        - Not used
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Dump a single Node that represents a device
- *              This procedure is a user_function called by acpi_ns_walk_namespace.
- *
- ******************************************************************************/
-static acpi_status
-acpi_ns_dump_one_device(acpi_handle obj_handle,
-			u32 level, void *context, void **return_value)
-{
-	struct acpi_buffer buffer;
-	struct acpi_device_info *info;
-	acpi_status status;
-	u32 i;
-
-	ACPI_FUNCTION_NAME(ns_dump_one_device);
-
-	status =
-	    acpi_ns_dump_one_object(obj_handle, level, context, return_value);
-
-	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
-	status = acpi_get_object_info(obj_handle, &buffer);
-	if (ACPI_SUCCESS(status)) {
-		info = buffer.pointer;
-		for (i = 0; i < level; i++) {
-			ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES, " "));
-		}
-
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_TABLES,
-				      "    HID: %s, ADR: %8.8X%8.8X, Status: %X\n",
-				      info->hardware_id.value,
-				      ACPI_FORMAT_UINT64(info->address),
-				      info->current_status));
-		ACPI_FREE(info);
-	}
-
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_dump_root_devices
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Dump all objects of type "device"
- *
- ******************************************************************************/
-
-void acpi_ns_dump_root_devices(void)
-{
-	acpi_handle sys_bus_handle;
-	acpi_status status;
-
-	ACPI_FUNCTION_NAME(ns_dump_root_devices);
-
-	/* Only dump the table if tracing is enabled */
-
-	if (!(ACPI_LV_TABLES & acpi_dbg_level)) {
-		return;
-	}
-
-	status = acpi_get_handle(NULL, ACPI_NS_SYSTEM_BUS, &sys_bus_handle);
-	if (ACPI_FAILURE(status)) {
-		return;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
-			  "Display of all devices in the namespace:\n"));
-
-	status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, sys_bus_handle,
-					ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
-					acpi_ns_dump_one_device, NULL, NULL);
-}
-
-#endif
-#endif
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
deleted file mode 100644
index 4cdf03a..0000000
--- a/drivers/acpi/namespace/nseval.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: nseval - Object evaluation, includes control method execution
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nseval")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_evaluate
- *
- * PARAMETERS:  Info            - Evaluation info block, contains:
- *                  prefix_node     - Prefix or Method/Object Node to execute
- *                  Pathname        - Name of method to execute, If NULL, the
- *                                    Node is the object to execute
- *                  Parameters      - List of parameters to pass to the method,
- *                                    terminated by NULL. Params itself may be
- *                                    NULL if no parameters are being passed.
- *                  return_object   - Where to put method's return value (if
- *                                    any). If NULL, no value is returned.
- *                  parameter_type  - Type of Parameter list
- *                  return_object   - Where to put method's return value (if
- *                                    any). If NULL, no value is returned.
- *                  Flags           - ACPI_IGNORE_RETURN_VALUE to delete return
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute a control method or return the current value of an
- *              ACPI namespace object.
- *
- * MUTEX:       Locks interpreter
- *
- ******************************************************************************/
-acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-
-	ACPI_FUNCTION_TRACE(ns_evaluate);
-
-	if (!info) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Initialize the return value to an invalid object */
-
-	info->return_object = NULL;
-
-	/*
-	 * Get the actual namespace node for the target object. Handles these cases:
-	 *
-	 * 1) Null node, Pathname (absolute path)
-	 * 2) Node, Pathname (path relative to Node)
-	 * 3) Node, Null Pathname
-	 */
-	status = acpi_ns_get_node(info->prefix_node, info->pathname,
-				  ACPI_NS_NO_UPSEARCH, &info->resolved_node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * For a method alias, we must grab the actual method node so that proper
-	 * scoping context will be established before execution.
-	 */
-	if (acpi_ns_get_type(info->resolved_node) ==
-	    ACPI_TYPE_LOCAL_METHOD_ALIAS) {
-		info->resolved_node =
-		    ACPI_CAST_PTR(struct acpi_namespace_node,
-				  info->resolved_node->object);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n", info->pathname,
-			  info->resolved_node,
-			  acpi_ns_get_attached_object(info->resolved_node)));
-
-	node = info->resolved_node;
-
-	/*
-	 * Two major cases here:
-	 *
-	 * 1) The object is a control method -- execute it
-	 * 2) The object is not a method -- just return it's current value
-	 */
-	if (acpi_ns_get_type(info->resolved_node) == ACPI_TYPE_METHOD) {
-		/*
-		 * 1) Object is a control method - execute it
-		 */
-
-		/* Verify that there is a method object associated with this node */
-
-		info->obj_desc =
-		    acpi_ns_get_attached_object(info->resolved_node);
-		if (!info->obj_desc) {
-			ACPI_ERROR((AE_INFO,
-				    "Control method has no attached sub-object"));
-			return_ACPI_STATUS(AE_NULL_OBJECT);
-		}
-
-		/*
-		 * Calculate the number of arguments being passed to the method
-		 */
-
-		info->param_count = 0;
-		if (info->parameters) {
-			while (info->parameters[info->param_count])
-				info->param_count++;
-		}
-
-		/*
-		 * Warning if too few or too many arguments have been passed by the
-		 * caller. We don't want to abort here with an error because an
-		 * incorrect number of arguments may not cause the method to fail.
-		 * However, the method will fail if there are too few arguments passed
-		 * and the method attempts to use one of the missing ones.
-		 */
-
-		if (info->param_count < info->obj_desc->method.param_count) {
-			ACPI_WARNING((AE_INFO,
-				    "Insufficient arguments - "
-				    "method [%4.4s] needs %d, found %d",
-				    acpi_ut_get_node_name(info->resolved_node),
-				    info->obj_desc->method.param_count,
-				    info->param_count));
-		} else if (info->param_count >
-				info->obj_desc->method.param_count) {
-			ACPI_WARNING((AE_INFO,
-				      "Excess arguments - "
-				      "method [%4.4s] needs %d, found %d",
-				      acpi_ut_get_node_name(info->
-							    resolved_node),
-				      info->obj_desc->method.param_count,
-				      info->param_count));
-		}
-
-		ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:",
-				   ACPI_LV_INFO, _COMPONENT);
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Method at AML address %p Length %X\n",
-				  info->obj_desc->method.aml_start + 1,
-				  info->obj_desc->method.aml_length - 1));
-
-		/*
-		 * Any namespace deletion must acquire both the namespace and
-		 * interpreter locks to ensure that no thread is using the portion of
-		 * the namespace that is being deleted.
-		 *
-		 * Execute the method via the interpreter. The interpreter is locked
-		 * here before calling into the AML parser
-		 */
-		acpi_ex_enter_interpreter();
-		status = acpi_ps_execute_method(info);
-		acpi_ex_exit_interpreter();
-	} else {
-		/*
-		 * 2) Object is not a method, return its current value
-		 *
-		 * Disallow certain object types. For these, "evaluation" is undefined.
-		 */
-		switch (info->resolved_node->type) {
-		case ACPI_TYPE_DEVICE:
-		case ACPI_TYPE_EVENT:
-		case ACPI_TYPE_MUTEX:
-		case ACPI_TYPE_REGION:
-		case ACPI_TYPE_THERMAL:
-		case ACPI_TYPE_LOCAL_SCOPE:
-
-			ACPI_ERROR((AE_INFO,
-				    "[%4.4s] Evaluation of object type [%s] is not supported",
-				    info->resolved_node->name.ascii,
-				    acpi_ut_get_type_name(info->resolved_node->
-							  type)));
-
-			return_ACPI_STATUS(AE_TYPE);
-
-		default:
-			break;
-		}
-
-		/*
-		 * Objects require additional resolution steps (e.g., the Node may be
-		 * a field that must be read, etc.) -- we can't just grab the object
-		 * out of the node.
-		 *
-		 * Use resolve_node_to_value() to get the associated value.
-		 *
-		 * NOTE: we can get away with passing in NULL for a walk state because
-		 * resolved_node is guaranteed to not be a reference to either a method
-		 * local or a method argument (because this interface is never called
-		 * from a running method.)
-		 *
-		 * Even though we do not directly invoke the interpreter for object
-		 * resolution, we must lock it because we could access an opregion.
-		 * The opregion access code assumes that the interpreter is locked.
-		 */
-		acpi_ex_enter_interpreter();
-
-		/* Function has a strange interface */
-
-		status =
-		    acpi_ex_resolve_node_to_value(&info->resolved_node, NULL);
-		acpi_ex_exit_interpreter();
-
-		/*
-		 * If acpi_ex_resolve_node_to_value() succeeded, the return value was placed
-		 * in resolved_node.
-		 */
-		if (ACPI_SUCCESS(status)) {
-			status = AE_CTRL_RETURN_VALUE;
-			info->return_object =
-			    ACPI_CAST_PTR(union acpi_operand_object,
-					  info->resolved_node);
-
-			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "Returning object %p [%s]\n",
-					  info->return_object,
-					  acpi_ut_get_object_type_name(info->
-								       return_object)));
-		}
-	}
-
-	/* Validation of return values for ACPI-predefined methods and objects */
-
-	if ((status == AE_OK) || (status == AE_CTRL_RETURN_VALUE)) {
-		/*
-		 * If this is the first evaluation, check the return value. This
-		 * ensures that any warnings will only be emitted during the very
-		 * first evaluation of the object.
-		 */
-		if (!(node->flags & ANOBJ_EVALUATED)) {
-			/*
-			 * Check for a predefined ACPI name. If found, validate the
-			 * returned object.
-			 *
-			 * Note: Ignore return status for now, emit warnings if there are
-			 * problems with the returned object. May change later to abort
-			 * the method on invalid return object.
-			 */
-			(void)acpi_ns_check_predefined_names(node,
-							     info->
-							     return_object);
-		}
-
-		/* Mark the node as having been evaluated */
-
-		node->flags |= ANOBJ_EVALUATED;
-	}
-
-	/* Check if there is a return value that must be dealt with */
-
-	if (status == AE_CTRL_RETURN_VALUE) {
-
-		/* If caller does not want the return value, delete it */
-
-		if (info->flags & ACPI_IGNORE_RETURN_VALUE) {
-			acpi_ut_remove_reference(info->return_object);
-			info->return_object = NULL;
-		}
-
-		/* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
-
-		status = AE_OK;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-			  "*** Completed evaluation of object %s ***\n",
-			  info->pathname));
-
-	/*
-	 * Namespace was unlocked by the handling acpi_ns* function, so we
-	 * just return
-	 */
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
deleted file mode 100644
index e4c5751..0000000
--- a/drivers/acpi/namespace/nsinit.c
+++ /dev/null
@@ -1,592 +0,0 @@
-/******************************************************************************
- *
- * Module Name: nsinit - namespace initialization
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-#include <linux/nmi.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsinit")
-
-/* Local prototypes */
-static acpi_status
-acpi_ns_init_one_object(acpi_handle obj_handle,
-			u32 level, void *context, void **return_value);
-
-static acpi_status
-acpi_ns_init_one_device(acpi_handle obj_handle,
-			u32 nesting_level, void *context, void **return_value);
-
-static acpi_status
-acpi_ns_find_ini_methods(acpi_handle obj_handle,
-			 u32 nesting_level, void *context, void **return_value);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_initialize_objects
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Walk the entire namespace and perform any necessary
- *              initialization on the objects found therein
- *
- ******************************************************************************/
-
-acpi_status acpi_ns_initialize_objects(void)
-{
-	acpi_status status;
-	struct acpi_init_walk_info info;
-
-	ACPI_FUNCTION_TRACE(ns_initialize_objects);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "**** Starting initialization of namespace objects ****\n"));
-	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-			      "Completing Region/Field/Buffer/Package initialization:"));
-
-	/* Set all init info to zero */
-
-	ACPI_MEMSET(&info, 0, sizeof(struct acpi_init_walk_info));
-
-	/* Walk entire namespace from the supplied root */
-
-	status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
-				     ACPI_UINT32_MAX, acpi_ns_init_one_object,
-				     &info, NULL);
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
-	}
-
-	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-			      "\nInitialized %hd/%hd Regions %hd/%hd Fields %hd/%hd Buffers %hd/%hd Packages (%hd nodes)\n",
-			      info.op_region_init, info.op_region_count,
-			      info.field_init, info.field_count,
-			      info.buffer_init, info.buffer_count,
-			      info.package_init, info.package_count,
-			      info.object_count));
-
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "%hd Control Methods found\n", info.method_count));
-	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-			  "%hd Op Regions found\n", info.op_region_count));
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_initialize_devices
- *
- * PARAMETERS:  None
- *
- * RETURN:      acpi_status
- *
- * DESCRIPTION: Walk the entire namespace and initialize all ACPI devices.
- *              This means running _INI on all present devices.
- *
- *              Note: We install PCI config space handler on region access,
- *              not here.
- *
- ******************************************************************************/
-
-acpi_status acpi_ns_initialize_devices(void)
-{
-	acpi_status status;
-	struct acpi_device_walk_info info;
-
-	ACPI_FUNCTION_TRACE(ns_initialize_devices);
-
-	/* Init counters */
-
-	info.device_count = 0;
-	info.num_STA = 0;
-	info.num_INI = 0;
-
-	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-			      "Initializing Device/Processor/Thermal objects by executing _INI methods:"));
-
-	/* Tree analysis: find all subtrees that contain _INI methods */
-
-	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
-					ACPI_UINT32_MAX, FALSE,
-					acpi_ns_find_ini_methods, &info, NULL);
-	if (ACPI_FAILURE(status)) {
-		goto error_exit;
-	}
-
-	/* Allocate the evaluation information block */
-
-	info.evaluate_info =
-	    ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
-	if (!info.evaluate_info) {
-		status = AE_NO_MEMORY;
-		goto error_exit;
-	}
-
-	/* Walk namespace to execute all _INIs on present devices */
-
-	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
-					ACPI_UINT32_MAX, FALSE,
-					acpi_ns_init_one_device, &info, NULL);
-
-	ACPI_FREE(info.evaluate_info);
-	if (ACPI_FAILURE(status)) {
-		goto error_exit;
-	}
-
-	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-			      "\nExecuted %hd _INI methods requiring %hd _STA executions (examined %hd objects)\n",
-			      info.num_INI, info.num_STA, info.device_count));
-
-	return_ACPI_STATUS(status);
-
-      error_exit:
-	ACPI_EXCEPTION((AE_INFO, status, "During device initialization"));
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_init_one_object
- *
- * PARAMETERS:  obj_handle      - Node
- *              Level           - Current nesting level
- *              Context         - Points to a init info struct
- *              return_value    - Not used
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every object
- *              within the  namespace.
- *
- *              Currently, the only objects that require initialization are:
- *              1) Methods
- *              2) Op Regions
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_init_one_object(acpi_handle obj_handle,
-			u32 level, void *context, void **return_value)
-{
-	acpi_object_type type;
-	acpi_status status = AE_OK;
-	struct acpi_init_walk_info *info =
-	    (struct acpi_init_walk_info *)context;
-	struct acpi_namespace_node *node =
-	    (struct acpi_namespace_node *)obj_handle;
-	union acpi_operand_object *obj_desc;
-
-	ACPI_FUNCTION_NAME(ns_init_one_object);
-
-	info->object_count++;
-
-	/* And even then, we are only interested in a few object types */
-
-	type = acpi_ns_get_type(obj_handle);
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc) {
-		return (AE_OK);
-	}
-
-	/* Increment counters for object types we are looking for */
-
-	switch (type) {
-	case ACPI_TYPE_REGION:
-		info->op_region_count++;
-		break;
-
-	case ACPI_TYPE_BUFFER_FIELD:
-		info->field_count++;
-		break;
-
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-		info->field_count++;
-		break;
-
-	case ACPI_TYPE_BUFFER:
-		info->buffer_count++;
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-		info->package_count++;
-		break;
-
-	default:
-
-		/* No init required, just exit now */
-		return (AE_OK);
-	}
-
-	/*
-	 * If the object is already initialized, nothing else to do
-	 */
-	if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
-		return (AE_OK);
-	}
-
-	/*
-	 * Must lock the interpreter before executing AML code
-	 */
-	acpi_ex_enter_interpreter();
-
-	/*
-	 * Each of these types can contain executable AML code within the
-	 * declaration.
-	 */
-	switch (type) {
-	case ACPI_TYPE_REGION:
-
-		info->op_region_init++;
-		status = acpi_ds_get_region_arguments(obj_desc);
-		break;
-
-	case ACPI_TYPE_BUFFER_FIELD:
-
-		info->field_init++;
-		status = acpi_ds_get_buffer_field_arguments(obj_desc);
-		break;
-
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-
-		info->field_init++;
-		status = acpi_ds_get_bank_field_arguments(obj_desc);
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		info->buffer_init++;
-		status = acpi_ds_get_buffer_arguments(obj_desc);
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-
-		info->package_init++;
-		status = acpi_ds_get_package_arguments(obj_desc);
-		break;
-
-	default:
-		/* No other types can get here */
-		break;
-	}
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Could not execute arguments for [%4.4s] (%s)",
-				acpi_ut_get_node_name(node),
-				acpi_ut_get_type_name(type)));
-	}
-
-	/*
-	 * Print a dot for each object unless we are going to print the entire
-	 * pathname
-	 */
-	if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
-		ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
-	}
-
-	/*
-	 * We ignore errors from above, and always return OK, since we don't want
-	 * to abort the walk on any single error.
-	 */
-	acpi_ex_exit_interpreter();
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_find_ini_methods
- *
- * PARAMETERS:  acpi_walk_callback
- *
- * RETURN:      acpi_status
- *
- * DESCRIPTION: Called during namespace walk. Finds objects named _INI under
- *              device/processor/thermal objects, and marks the entire subtree
- *              with a SUBTREE_HAS_INI flag. This flag is used during the
- *              subsequent device initialization walk to avoid entire subtrees
- *              that do not contain an _INI.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_find_ini_methods(acpi_handle obj_handle,
-			 u32 nesting_level, void *context, void **return_value)
-{
-	struct acpi_device_walk_info *info =
-	    ACPI_CAST_PTR(struct acpi_device_walk_info, context);
-	struct acpi_namespace_node *node;
-	struct acpi_namespace_node *parent_node;
-
-	/* Keep count of device/processor/thermal objects */
-
-	node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
-	if ((node->type == ACPI_TYPE_DEVICE) ||
-	    (node->type == ACPI_TYPE_PROCESSOR) ||
-	    (node->type == ACPI_TYPE_THERMAL)) {
-		info->device_count++;
-		return (AE_OK);
-	}
-
-	/* We are only looking for methods named _INI */
-
-	if (!ACPI_COMPARE_NAME(node->name.ascii, METHOD_NAME__INI)) {
-		return (AE_OK);
-	}
-
-	/*
-	 * The only _INI methods that we care about are those that are
-	 * present under Device, Processor, and Thermal objects.
-	 */
-	parent_node = acpi_ns_get_parent_node(node);
-	switch (parent_node->type) {
-	case ACPI_TYPE_DEVICE:
-	case ACPI_TYPE_PROCESSOR:
-	case ACPI_TYPE_THERMAL:
-
-		/* Mark parent and bubble up the INI present flag to the root */
-
-		while (parent_node) {
-			parent_node->flags |= ANOBJ_SUBTREE_HAS_INI;
-			parent_node = acpi_ns_get_parent_node(parent_node);
-		}
-		break;
-
-	default:
-		break;
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_init_one_device
- *
- * PARAMETERS:  acpi_walk_callback
- *
- * RETURN:      acpi_status
- *
- * DESCRIPTION: This is called once per device soon after ACPI is enabled
- *              to initialize each device. It determines if the device is
- *              present, and if so, calls _INI.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_init_one_device(acpi_handle obj_handle,
-			u32 nesting_level, void *context, void **return_value)
-{
-	struct acpi_device_walk_info *walk_info =
-	    ACPI_CAST_PTR(struct acpi_device_walk_info, context);
-	struct acpi_evaluate_info *info = walk_info->evaluate_info;
-	u32 flags;
-	acpi_status status;
-	struct acpi_namespace_node *device_node;
-
-	ACPI_FUNCTION_TRACE(ns_init_one_device);
-
-	/* We are interested in Devices, Processors and thermal_zones only */
-
-	device_node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
-	if ((device_node->type != ACPI_TYPE_DEVICE) &&
-	    (device_node->type != ACPI_TYPE_PROCESSOR) &&
-	    (device_node->type != ACPI_TYPE_THERMAL)) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/*
-	 * Because of an earlier namespace analysis, all subtrees that contain an
-	 * _INI method are tagged.
-	 *
-	 * If this device subtree does not contain any _INI methods, we
-	 * can exit now and stop traversing this entire subtree.
-	 */
-	if (!(device_node->flags & ANOBJ_SUBTREE_HAS_INI)) {
-		return_ACPI_STATUS(AE_CTRL_DEPTH);
-	}
-
-	/*
-	 * Run _STA to determine if this device is present and functioning. We
-	 * must know this information for two important reasons (from ACPI spec):
-	 *
-	 * 1) We can only run _INI if the device is present.
-	 * 2) We must abort the device tree walk on this subtree if the device is
-	 *    not present and is not functional (we will not examine the children)
-	 *
-	 * The _STA method is not required to be present under the device, we
-	 * assume the device is present if _STA does not exist.
-	 */
-	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
-			(ACPI_TYPE_METHOD, device_node, METHOD_NAME__STA));
-
-	status = acpi_ut_execute_STA(device_node, &flags);
-	if (ACPI_FAILURE(status)) {
-
-		/* Ignore error and move on to next device */
-
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/*
-	 * Flags == -1 means that _STA was not found. In this case, we assume that
-	 * the device is both present and functional.
-	 *
-	 * From the ACPI spec, description of _STA:
-	 *
-	 * "If a device object (including the processor object) does not have an
-	 * _STA object, then OSPM assumes that all of the above bits are set (in
-	 * other words, the device is present, ..., and functioning)"
-	 */
-	if (flags != ACPI_UINT32_MAX) {
-		walk_info->num_STA++;
-	}
-
-	/*
-	 * Examine the PRESENT and FUNCTIONING status bits
-	 *
-	 * Note: ACPI spec does not seem to specify behavior for the present but
-	 * not functioning case, so we assume functioning if present.
-	 */
-	if (!(flags & ACPI_STA_DEVICE_PRESENT)) {
-
-		/* Device is not present, we must examine the Functioning bit */
-
-		if (flags & ACPI_STA_DEVICE_FUNCTIONING) {
-			/*
-			 * Device is not present but is "functioning". In this case,
-			 * we will not run _INI, but we continue to examine the children
-			 * of this device.
-			 *
-			 * From the ACPI spec, description of _STA: (Note - no mention
-			 * of whether to run _INI or not on the device in question)
-			 *
-			 * "_STA may return bit 0 clear (not present) with bit 3 set
-			 * (device is functional). This case is used to indicate a valid
-			 * device for which no device driver should be loaded (for example,
-			 * a bridge device.) Children of this device may be present and
-			 * valid. OSPM should continue enumeration below a device whose
-			 * _STA returns this bit combination"
-			 */
-			return_ACPI_STATUS(AE_OK);
-		} else {
-			/*
-			 * Device is not present and is not functioning. We must abort the
-			 * walk of this subtree immediately -- don't look at the children
-			 * of such a device.
-			 *
-			 * From the ACPI spec, description of _INI:
-			 *
-			 * "If the _STA method indicates that the device is not present,
-			 * OSPM will not run the _INI and will not examine the children
-			 * of the device for _INI methods"
-			 */
-			return_ACPI_STATUS(AE_CTRL_DEPTH);
-		}
-	}
-
-	/*
-	 * The device is present or is assumed present if no _STA exists.
-	 * Run the _INI if it exists (not required to exist)
-	 *
-	 * Note: We know there is an _INI within this subtree, but it may not be
-	 * under this particular device, it may be lower in the branch.
-	 */
-	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
-			(ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
-
-	info->prefix_node = device_node;
-	info->pathname = METHOD_NAME__INI;
-	info->parameters = NULL;
-	info->flags = ACPI_IGNORE_RETURN_VALUE;
-
-	/*
-	 * Some hardware relies on this being executed as atomically
-	 * as possible (without an NMI being received in the middle of
-	 * this) - so disable NMIs and initialize the device:
-	 */
-	acpi_nmi_disable();
-	status = acpi_ns_evaluate(info);
-	acpi_nmi_enable();
-
-	if (ACPI_SUCCESS(status)) {
-		walk_info->num_INI++;
-
-		if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
-		    (!(acpi_dbg_level & ACPI_LV_INFO))) {
-			ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
-		}
-	}
-#ifdef ACPI_DEBUG_OUTPUT
-	else if (status != AE_NOT_FOUND) {
-
-		/* Ignore error and move on to next device */
-
-		char *scope_name =
-		    acpi_ns_get_external_pathname(info->resolved_node);
-
-		ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution",
-				scope_name));
-		ACPI_FREE(scope_name);
-	}
-#endif
-
-	/* Ignore errors from above */
-
-	status = AE_OK;
-
-	/*
-	 * The _INI method has been run if present; call the Global Initialization
-	 * Handler for this device.
-	 */
-	if (acpi_gbl_init_handler) {
-		status =
-		    acpi_gbl_init_handler(device_node, ACPI_INIT_DEVICE_INI);
-	}
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c
deleted file mode 100644
index a4a412b..0000000
--- a/drivers/acpi/namespace/nsload.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/******************************************************************************
- *
- * Module Name: nsload - namespace loading/expanding/contracting procedures
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acdispat.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsload")
-
-/* Local prototypes */
-#ifdef ACPI_FUTURE_IMPLEMENTATION
-acpi_status acpi_ns_unload_namespace(acpi_handle handle);
-
-static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
-#endif
-
-#ifndef ACPI_NO_METHOD_EXECUTION
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_load_table
- *
- * PARAMETERS:  table_index     - Index for table to be loaded
- *              Node            - Owning NS node
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Load one ACPI table into the namespace
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ns_load_table);
-
-	/*
-	 * Parse the table and load the namespace with all named
-	 * objects found within.  Control methods are NOT parsed
-	 * at this time.  In fact, the control methods cannot be
-	 * parsed until the entire namespace is loaded, because
-	 * if a control method makes a forward reference (call)
-	 * to another control method, we can't continue parsing
-	 * because we don't know how many arguments to parse next!
-	 */
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* If table already loaded into namespace, just return */
-
-	if (acpi_tb_is_table_loaded(table_index)) {
-		status = AE_ALREADY_EXISTS;
-		goto unlock;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "**** Loading table into namespace ****\n"));
-
-	status = acpi_tb_allocate_owner_id(table_index);
-	if (ACPI_FAILURE(status)) {
-		goto unlock;
-	}
-
-	status = acpi_ns_parse_table(table_index, node);
-	if (ACPI_SUCCESS(status)) {
-		acpi_tb_set_table_loaded_flag(table_index, TRUE);
-	} else {
-		(void)acpi_tb_release_owner_id(table_index);
-	}
-
-      unlock:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Now we can parse the control methods.  We always parse
-	 * them here for a sanity check, and if configured for
-	 * just-in-time parsing, we delete the control method
-	 * parse trees.
-	 */
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "**** Begin Table Method Parsing and Object Initialization ****\n"));
-
-	status = acpi_ds_initialize_objects(table_index, node);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "**** Completed Table Method Parsing and Object Initialization ****\n"));
-
-	return_ACPI_STATUS(status);
-}
-
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-/*******************************************************************************
- *
- * FUNCTION:    acpi_load_namespace
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Load the name space from what ever is pointed to by DSDT.
- *              (DSDT points to either the BIOS or a buffer.)
- *
- ******************************************************************************/
-
-acpi_status acpi_ns_load_namespace(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_load_name_space);
-
-	/* There must be at least a DSDT installed */
-
-	if (acpi_gbl_DSDT == NULL) {
-		ACPI_ERROR((AE_INFO, "DSDT is not in memory"));
-		return_ACPI_STATUS(AE_NO_ACPI_TABLES);
-	}
-
-	/*
-	 * Load the namespace.  The DSDT is required,
-	 * but the SSDT and PSDT tables are optional.
-	 */
-	status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Ignore exceptions from these */
-
-	(void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_SSDT);
-	(void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_PSDT);
-
-	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-			      "ACPI Namespace successfully loaded at root %p\n",
-			      acpi_gbl_root_node));
-
-	return_ACPI_STATUS(status);
-}
-#endif
-
-#ifdef ACPI_FUTURE_IMPLEMENTATION
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_delete_subtree
- *
- * PARAMETERS:  start_handle        - Handle in namespace where search begins
- *
- * RETURNS      Status
- *
- * DESCRIPTION: Walks the namespace starting at the given handle and deletes
- *              all objects, entries, and scopes in the entire subtree.
- *
- *              Namespace/Interpreter should be locked or the subsystem should
- *              be in shutdown before this routine is called.
- *
- ******************************************************************************/
-
-static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
-{
-	acpi_status status;
-	acpi_handle child_handle;
-	acpi_handle parent_handle;
-	acpi_handle next_child_handle;
-	acpi_handle dummy;
-	u32 level;
-
-	ACPI_FUNCTION_TRACE(ns_delete_subtree);
-
-	parent_handle = start_handle;
-	child_handle = NULL;
-	level = 1;
-
-	/*
-	 * Traverse the tree of objects until we bubble back up
-	 * to where we started.
-	 */
-	while (level > 0) {
-
-		/* Attempt to get the next object in this scope */
-
-		status = acpi_get_next_object(ACPI_TYPE_ANY, parent_handle,
-					      child_handle, &next_child_handle);
-
-		child_handle = next_child_handle;
-
-		/* Did we get a new object? */
-
-		if (ACPI_SUCCESS(status)) {
-
-			/* Check if this object has any children */
-
-			if (ACPI_SUCCESS
-			    (acpi_get_next_object
-			     (ACPI_TYPE_ANY, child_handle, NULL, &dummy))) {
-				/*
-				 * There is at least one child of this object,
-				 * visit the object
-				 */
-				level++;
-				parent_handle = child_handle;
-				child_handle = NULL;
-			}
-		} else {
-			/*
-			 * No more children in this object, go back up to
-			 * the object's parent
-			 */
-			level--;
-
-			/* Delete all children now */
-
-			acpi_ns_delete_children(child_handle);
-
-			child_handle = parent_handle;
-			status = acpi_get_parent(parent_handle, &parent_handle);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-		}
-	}
-
-	/* Now delete the starting object, and we are done */
-
-	acpi_ns_delete_node(child_handle);
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- *  FUNCTION:       acpi_ns_unload_name_space
- *
- *  PARAMETERS:     Handle          - Root of namespace subtree to be deleted
- *
- *  RETURN:         Status
- *
- *  DESCRIPTION:    Shrinks the namespace, typically in response to an undocking
- *                  event.  Deletes an entire subtree starting from (and
- *                  including) the given handle.
- *
- ******************************************************************************/
-
-acpi_status acpi_ns_unload_namespace(acpi_handle handle)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ns_unload_name_space);
-
-	/* Parameter validation */
-
-	if (!acpi_gbl_root_node) {
-		return_ACPI_STATUS(AE_NO_NAMESPACE);
-	}
-
-	if (!handle) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* This function does the real work */
-
-	status = acpi_ns_delete_subtree(handle);
-
-	return_ACPI_STATUS(status);
-}
-#endif
-#endif
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
deleted file mode 100644
index 42a39a7..0000000
--- a/drivers/acpi/namespace/nsnames.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: nsnames - Name manipulation and search
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/amlcode.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsnames")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_build_external_path
- *
- * PARAMETERS:  Node            - NS node whose pathname is needed
- *              Size            - Size of the pathname
- *              *name_buffer    - Where to return the pathname
- *
- * RETURN:      Status
- *              Places the pathname into the name_buffer, in external format
- *              (name segments separated by path separators)
- *
- * DESCRIPTION: Generate a full pathaname
- *
- ******************************************************************************/
-acpi_status
-acpi_ns_build_external_path(struct acpi_namespace_node *node,
-			    acpi_size size, char *name_buffer)
-{
-	acpi_size index;
-	struct acpi_namespace_node *parent_node;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Special case for root */
-
-	index = size - 1;
-	if (index < ACPI_NAME_SIZE) {
-		name_buffer[0] = AML_ROOT_PREFIX;
-		name_buffer[1] = 0;
-		return (AE_OK);
-	}
-
-	/* Store terminator byte, then build name backwards */
-
-	parent_node = node;
-	name_buffer[index] = 0;
-
-	while ((index > ACPI_NAME_SIZE) && (parent_node != acpi_gbl_root_node)) {
-		index -= ACPI_NAME_SIZE;
-
-		/* Put the name into the buffer */
-
-		ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name);
-		parent_node = acpi_ns_get_parent_node(parent_node);
-
-		/* Prefix name with the path separator */
-
-		index--;
-		name_buffer[index] = ACPI_PATH_SEPARATOR;
-	}
-
-	/* Overwrite final separator with the root prefix character */
-
-	name_buffer[index] = AML_ROOT_PREFIX;
-
-	if (index != 0) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not construct external pathname; index=%X, size=%X, Path=%s",
-			    (u32) index, (u32) size, &name_buffer[size]));
-
-		return (AE_BAD_PARAMETER);
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_external_pathname
- *
- * PARAMETERS:  Node            - Namespace node whose pathname is needed
- *
- * RETURN:      Pointer to storage containing the fully qualified name of
- *              the node, In external format (name segments separated by path
- *              separators.)
- *
- * DESCRIPTION: Used for debug printing in acpi_ns_search_table().
- *
- ******************************************************************************/
-
-char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
-{
-	acpi_status status;
-	char *name_buffer;
-	acpi_size size;
-
-	ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node);
-
-	/* Calculate required buffer size based on depth below root */
-
-	size = acpi_ns_get_pathname_length(node);
-	if (!size) {
-		return_PTR(NULL);
-	}
-
-	/* Allocate a buffer to be returned to caller */
-
-	name_buffer = ACPI_ALLOCATE_ZEROED(size);
-	if (!name_buffer) {
-		ACPI_ERROR((AE_INFO, "Allocation failure"));
-		return_PTR(NULL);
-	}
-
-	/* Build the path in the allocated buffer */
-
-	status = acpi_ns_build_external_path(node, size, name_buffer);
-	if (ACPI_FAILURE(status)) {
-		ACPI_FREE(name_buffer);
-		return_PTR(NULL);
-	}
-
-	return_PTR(name_buffer);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_pathname_length
- *
- * PARAMETERS:  Node        - Namespace node
- *
- * RETURN:      Length of path, including prefix
- *
- * DESCRIPTION: Get the length of the pathname string for this node
- *
- ******************************************************************************/
-
-acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
-{
-	acpi_size size;
-	struct acpi_namespace_node *next_node;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * Compute length of pathname as 5 * number of name segments.
-	 * Go back up the parent tree to the root
-	 */
-	size = 0;
-	next_node = node;
-
-	while (next_node && (next_node != acpi_gbl_root_node)) {
-		if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
-			ACPI_ERROR((AE_INFO,
-				    "Invalid Namespace Node (%p) while traversing namespace",
-				    next_node));
-			return 0;
-		}
-		size += ACPI_PATH_SEGMENT_LENGTH;
-		next_node = acpi_ns_get_parent_node(next_node);
-	}
-
-	if (!size) {
-		size = 1;	/* Root node case */
-	}
-
-	return (size + 1);	/* +1 for null string terminator */
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_handle_to_pathname
- *
- * PARAMETERS:  target_handle           - Handle of named object whose name is
- *                                        to be found
- *              Buffer                  - Where the pathname is returned
- *
- * RETURN:      Status, Buffer is filled with pathname if status is AE_OK
- *
- * DESCRIPTION: Build and return a full namespace pathname
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_handle_to_pathname(acpi_handle target_handle,
-			   struct acpi_buffer * buffer)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-	acpi_size required_size;
-
-	ACPI_FUNCTION_TRACE_PTR(ns_handle_to_pathname, target_handle);
-
-	node = acpi_ns_map_handle_to_node(target_handle);
-	if (!node) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Determine size required for the caller buffer */
-
-	required_size = acpi_ns_get_pathname_length(node);
-	if (!required_size) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Validate/Allocate/Clear caller buffer */
-
-	status = acpi_ut_initialize_buffer(buffer, required_size);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Build the path in the caller buffer */
-
-	status =
-	    acpi_ns_build_external_path(node, required_size, buffer->pointer);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n",
-			  (char *)buffer->pointer, (u32) required_size));
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/namespace/nsobject.c b/drivers/acpi/namespace/nsobject.c
deleted file mode 100644
index 15fe09e..0000000
--- a/drivers/acpi/namespace/nsobject.c
+++ /dev/null
@@ -1,440 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: nsobject - Utilities for objects attached to namespace
- *                         table entries
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsobject")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_attach_object
- *
- * PARAMETERS:  Node                - Parent Node
- *              Object              - Object to be attached
- *              Type                - Type of object, or ACPI_TYPE_ANY if not
- *                                    known
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Record the given object as the value associated with the
- *              name whose acpi_handle is passed.  If Object is NULL
- *              and Type is ACPI_TYPE_ANY, set the name as having no value.
- *              Note: Future may require that the Node->Flags field be passed
- *              as a parameter.
- *
- * MUTEX:       Assumes namespace is locked
- *
- ******************************************************************************/
-acpi_status
-acpi_ns_attach_object(struct acpi_namespace_node *node,
-		      union acpi_operand_object *object, acpi_object_type type)
-{
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *last_obj_desc;
-	acpi_object_type object_type = ACPI_TYPE_ANY;
-
-	ACPI_FUNCTION_TRACE(ns_attach_object);
-
-	/*
-	 * Parameter validation
-	 */
-	if (!node) {
-
-		/* Invalid handle */
-
-		ACPI_ERROR((AE_INFO, "Null NamedObj handle"));
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (!object && (ACPI_TYPE_ANY != type)) {
-
-		/* Null object */
-
-		ACPI_ERROR((AE_INFO,
-			    "Null object, but type not ACPI_TYPE_ANY"));
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
-
-		/* Not a name handle */
-
-		ACPI_ERROR((AE_INFO, "Invalid handle %p [%s]",
-			    node, acpi_ut_get_descriptor_name(node)));
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Check if this object is already attached */
-
-	if (node->object == object) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Obj %p already installed in NameObj %p\n",
-				  object, node));
-
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* If null object, we will just install it */
-
-	if (!object) {
-		obj_desc = NULL;
-		object_type = ACPI_TYPE_ANY;
-	}
-
-	/*
-	 * If the source object is a namespace Node with an attached object,
-	 * we will use that (attached) object
-	 */
-	else if ((ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) &&
-		 ((struct acpi_namespace_node *)object)->object) {
-		/*
-		 * Value passed is a name handle and that name has a
-		 * non-null value.  Use that name's value and type.
-		 */
-		obj_desc = ((struct acpi_namespace_node *)object)->object;
-		object_type = ((struct acpi_namespace_node *)object)->type;
-	}
-
-	/*
-	 * Otherwise, we will use the parameter object, but we must type
-	 * it first
-	 */
-	else {
-		obj_desc = (union acpi_operand_object *)object;
-
-		/* Use the given type */
-
-		object_type = type;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Installing %p into Node %p [%4.4s]\n",
-			  obj_desc, node, acpi_ut_get_node_name(node)));
-
-	/* Detach an existing attached object if present */
-
-	if (node->object) {
-		acpi_ns_detach_object(node);
-	}
-
-	if (obj_desc) {
-		/*
-		 * Must increment the new value's reference count
-		 * (if it is an internal object)
-		 */
-		acpi_ut_add_reference(obj_desc);
-
-		/*
-		 * Handle objects with multiple descriptors - walk
-		 * to the end of the descriptor list
-		 */
-		last_obj_desc = obj_desc;
-		while (last_obj_desc->common.next_object) {
-			last_obj_desc = last_obj_desc->common.next_object;
-		}
-
-		/* Install the object at the front of the object list */
-
-		last_obj_desc->common.next_object = node->object;
-	}
-
-	node->type = (u8) object_type;
-	node->object = obj_desc;
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_detach_object
- *
- * PARAMETERS:  Node           - A Namespace node whose object will be detached
- *
- * RETURN:      None.
- *
- * DESCRIPTION: Detach/delete an object associated with a namespace node.
- *              if the object is an allocated object, it is freed.
- *              Otherwise, the field is simply cleared.
- *
- ******************************************************************************/
-
-void acpi_ns_detach_object(struct acpi_namespace_node *node)
-{
-	union acpi_operand_object *obj_desc;
-
-	ACPI_FUNCTION_TRACE(ns_detach_object);
-
-	obj_desc = node->object;
-
-	if (!obj_desc ||
-	    (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA)) {
-		return_VOID;
-	}
-
-	/* Clear the entry in all cases */
-
-	node->object = NULL;
-	if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_OPERAND) {
-		node->object = obj_desc->common.next_object;
-		if (node->object &&
-		    (ACPI_GET_OBJECT_TYPE(node->object) !=
-		     ACPI_TYPE_LOCAL_DATA)) {
-			node->object = node->object->common.next_object;
-		}
-	}
-
-	/* Reset the node type to untyped */
-
-	node->type = ACPI_TYPE_ANY;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Node %p [%4.4s] Object %p\n",
-			  node, acpi_ut_get_node_name(node), obj_desc));
-
-	/* Remove one reference on the object (and all subobjects) */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_attached_object
- *
- * PARAMETERS:  Node             - Namespace node
- *
- * RETURN:      Current value of the object field from the Node whose
- *              handle is passed
- *
- * DESCRIPTION: Obtain the object attached to a namespace node.
- *
- ******************************************************************************/
-
-union acpi_operand_object *acpi_ns_get_attached_object(struct
-						       acpi_namespace_node
-						       *node)
-{
-	ACPI_FUNCTION_TRACE_PTR(ns_get_attached_object, node);
-
-	if (!node) {
-		ACPI_WARNING((AE_INFO, "Null Node ptr"));
-		return_PTR(NULL);
-	}
-
-	if (!node->object ||
-	    ((ACPI_GET_DESCRIPTOR_TYPE(node->object) != ACPI_DESC_TYPE_OPERAND)
-	     && (ACPI_GET_DESCRIPTOR_TYPE(node->object) !=
-		 ACPI_DESC_TYPE_NAMED))
-	    || (ACPI_GET_OBJECT_TYPE(node->object) == ACPI_TYPE_LOCAL_DATA)) {
-		return_PTR(NULL);
-	}
-
-	return_PTR(node->object);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_secondary_object
- *
- * PARAMETERS:  Node             - Namespace node
- *
- * RETURN:      Current value of the object field from the Node whose
- *              handle is passed.
- *
- * DESCRIPTION: Obtain a secondary object associated with a namespace node.
- *
- ******************************************************************************/
-
-union acpi_operand_object *acpi_ns_get_secondary_object(union
-							acpi_operand_object
-							*obj_desc)
-{
-	ACPI_FUNCTION_TRACE_PTR(ns_get_secondary_object, obj_desc);
-
-	if ((!obj_desc) ||
-	    (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) ||
-	    (!obj_desc->common.next_object) ||
-	    (ACPI_GET_OBJECT_TYPE(obj_desc->common.next_object) ==
-	     ACPI_TYPE_LOCAL_DATA)) {
-		return_PTR(NULL);
-	}
-
-	return_PTR(obj_desc->common.next_object);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_attach_data
- *
- * PARAMETERS:  Node            - Namespace node
- *              Handler         - Handler to be associated with the data
- *              Data            - Data to be attached
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Low-level attach data.  Create and attach a Data object.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_attach_data(struct acpi_namespace_node *node,
-		    acpi_object_handler handler, void *data)
-{
-	union acpi_operand_object *prev_obj_desc;
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *data_desc;
-
-	/* We only allow one attachment per handler */
-
-	prev_obj_desc = NULL;
-	obj_desc = node->object;
-	while (obj_desc) {
-		if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) &&
-		    (obj_desc->data.handler == handler)) {
-			return (AE_ALREADY_EXISTS);
-		}
-
-		prev_obj_desc = obj_desc;
-		obj_desc = obj_desc->common.next_object;
-	}
-
-	/* Create an internal object for the data */
-
-	data_desc = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_DATA);
-	if (!data_desc) {
-		return (AE_NO_MEMORY);
-	}
-
-	data_desc->data.handler = handler;
-	data_desc->data.pointer = data;
-
-	/* Install the data object */
-
-	if (prev_obj_desc) {
-		prev_obj_desc->common.next_object = data_desc;
-	} else {
-		node->object = data_desc;
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_detach_data
- *
- * PARAMETERS:  Node            - Namespace node
- *              Handler         - Handler associated with the data
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Low-level detach data.  Delete the data node, but the caller
- *              is responsible for the actual data.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_detach_data(struct acpi_namespace_node * node,
-		    acpi_object_handler handler)
-{
-	union acpi_operand_object *obj_desc;
-	union acpi_operand_object *prev_obj_desc;
-
-	prev_obj_desc = NULL;
-	obj_desc = node->object;
-	while (obj_desc) {
-		if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) &&
-		    (obj_desc->data.handler == handler)) {
-			if (prev_obj_desc) {
-				prev_obj_desc->common.next_object =
-				    obj_desc->common.next_object;
-			} else {
-				node->object = obj_desc->common.next_object;
-			}
-
-			acpi_ut_remove_reference(obj_desc);
-			return (AE_OK);
-		}
-
-		prev_obj_desc = obj_desc;
-		obj_desc = obj_desc->common.next_object;
-	}
-
-	return (AE_NOT_FOUND);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_attached_data
- *
- * PARAMETERS:  Node            - Namespace node
- *              Handler         - Handler associated with the data
- *              Data            - Where the data is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Low level interface to obtain data previously associated with
- *              a namespace node.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_get_attached_data(struct acpi_namespace_node * node,
-			  acpi_object_handler handler, void **data)
-{
-	union acpi_operand_object *obj_desc;
-
-	obj_desc = node->object;
-	while (obj_desc) {
-		if ((ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) &&
-		    (obj_desc->data.handler == handler)) {
-			*data = obj_desc->data.pointer;
-			return (AE_OK);
-		}
-
-		obj_desc = obj_desc->common.next_object;
-	}
-
-	return (AE_NOT_FOUND);
-}
diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/namespace/nsparse.c
deleted file mode 100644
index a82271a..0000000
--- a/drivers/acpi/namespace/nsparse.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/******************************************************************************
- *
- * Module Name: nsparse - namespace interface to AML parser
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acparser.h>
-#include <acpi/acdispat.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsparse")
-
-/*******************************************************************************
- *
- * FUNCTION:    ns_one_complete_parse
- *
- * PARAMETERS:  pass_number             - 1 or 2
- *              table_desc              - The table to be parsed.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Perform one complete parse of an ACPI/AML table.
- *
- ******************************************************************************/
-acpi_status
-acpi_ns_one_complete_parse(u32 pass_number,
-			   u32 table_index,
-			   struct acpi_namespace_node *start_node)
-{
-	union acpi_parse_object *parse_root;
-	acpi_status status;
-       u32 aml_length;
-	u8 *aml_start;
-	struct acpi_walk_state *walk_state;
-	struct acpi_table_header *table;
-	acpi_owner_id owner_id;
-
-	ACPI_FUNCTION_TRACE(ns_one_complete_parse);
-
-	status = acpi_tb_get_owner_id(table_index, &owner_id);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Create and init a Root Node */
-
-	parse_root = acpi_ps_create_scope_op();
-	if (!parse_root) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Create and initialize a new walk state */
-
-	walk_state = acpi_ds_create_walk_state(owner_id, NULL, NULL, NULL);
-	if (!walk_state) {
-		acpi_ps_free_op(parse_root);
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	status = acpi_get_table_by_index(table_index, &table);
-	if (ACPI_FAILURE(status)) {
-		acpi_ds_delete_walk_state(walk_state);
-		acpi_ps_free_op(parse_root);
-		return_ACPI_STATUS(status);
-	}
-
-	/* Table must consist of at least a complete header */
-
-	if (table->length < sizeof(struct acpi_table_header)) {
-		status = AE_BAD_HEADER;
-	} else {
-		aml_start = (u8 *) table + sizeof(struct acpi_table_header);
-		aml_length = table->length - sizeof(struct acpi_table_header);
-		status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
-					       aml_start, aml_length, NULL,
-					       (u8) pass_number);
-	}
-
-	if (ACPI_FAILURE(status)) {
-		acpi_ds_delete_walk_state(walk_state);
-		goto cleanup;
-	}
-
-	/* start_node is the default location to load the table */
-
-	if (start_node && start_node != acpi_gbl_root_node) {
-		status =
-		    acpi_ds_scope_stack_push(start_node, ACPI_TYPE_METHOD,
-					     walk_state);
-		if (ACPI_FAILURE(status)) {
-			acpi_ds_delete_walk_state(walk_state);
-			goto cleanup;
-		}
-	}
-
-	/* Parse the AML */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n",
-			  (unsigned)pass_number));
-	status = acpi_ps_parse_aml(walk_state);
-
-      cleanup:
-	acpi_ps_delete_parse_tree(parse_root);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_parse_table
- *
- * PARAMETERS:  table_desc      - An ACPI table descriptor for table to parse
- *              start_node      - Where to enter the table into the namespace
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Parse AML within an ACPI table and return a tree of ops
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ns_parse_table);
-
-	/*
-	 * AML Parse, pass 1
-	 *
-	 * In this pass, we load most of the namespace.  Control methods
-	 * are not parsed until later.  A parse tree is not created.  Instead,
-	 * each Parser Op subtree is deleted when it is finished.  This saves
-	 * a great deal of memory, and allows a small cache of parse objects
-	 * to service the entire parse.  The second pass of the parse then
-	 * performs another complete parse of the AML.
-	 */
-	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
-	status =
-	    acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index,
-				       start_node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * AML Parse, pass 2
-	 *
-	 * In this pass, we resolve forward references and other things
-	 * that could not be completed during the first pass.
-	 * Another complete parse of the AML is performed, but the
-	 * overhead of this is compensated for by the fact that the
-	 * parse objects are all cached.
-	 */
-	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n"));
-	status =
-	    acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, table_index,
-				       start_node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/namespace/nspredef.c b/drivers/acpi/namespace/nspredef.c
deleted file mode 100644
index 0f17cf0..0000000
--- a/drivers/acpi/namespace/nspredef.c
+++ /dev/null
@@ -1,900 +0,0 @@
-/******************************************************************************
- *
- * Module Name: nspredef - Validation of ACPI predefined methods and objects
- *              $Revision: 1.1 $
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acpredef.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nspredef")
-
-/*******************************************************************************
- *
- * This module validates predefined ACPI objects that appear in the namespace,
- * at the time they are evaluated (via acpi_evaluate_object). The purpose of this
- * validation is to detect problems with BIOS-exposed predefined ACPI objects
- * before the results are returned to the ACPI-related drivers.
- *
- * There are several areas that are validated:
- *
- *  1) The number of input arguments as defined by the method/object in the
- *      ASL is validated against the ACPI specification.
- *  2) The type of the return object (if any) is validated against the ACPI
- *      specification.
- *  3) For returned package objects, the count of package elements is
- *      validated, as well as the type of each package element. Nested
- *      packages are supported.
- *
- * For any problems found, a warning message is issued.
- *
- ******************************************************************************/
-/* Local prototypes */
-static acpi_status
-acpi_ns_check_package(char *pathname,
-		      union acpi_operand_object *return_object,
-		      const union acpi_predefined_info *predefined);
-
-static acpi_status
-acpi_ns_check_package_elements(char *pathname,
-			       union acpi_operand_object **elements,
-			       u8 type1, u32 count1, u8 type2, u32 count2);
-
-static acpi_status
-acpi_ns_check_object_type(char *pathname,
-			  union acpi_operand_object *return_object,
-			  u32 expected_btypes, u32 package_index);
-
-static acpi_status
-acpi_ns_check_reference(char *pathname,
-			union acpi_operand_object *return_object);
-
-/*
- * Names for the types that can be returned by the predefined objects.
- * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
- */
-static const char *acpi_rtype_names[] = {
-	"/Integer",
-	"/String",
-	"/Buffer",
-	"/Package",
-	"/Reference",
-};
-
-#define ACPI_NOT_PACKAGE    ACPI_UINT32_MAX
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_check_predefined_names
- *
- * PARAMETERS:  Node            - Namespace node for the method/object
- *              return_object   - Object returned from the evaluation of this
- *                                method/object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Check an ACPI name for a match in the predefined name list.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
-			       union acpi_operand_object *return_object)
-{
-	acpi_status status = AE_OK;
-	const union acpi_predefined_info *predefined;
-	char *pathname;
-
-	/* Match the name for this method/object against the predefined list */
-
-	predefined = acpi_ns_check_for_predefined_name(node);
-	if (!predefined) {
-
-		/* Name was not one of the predefined names */
-
-		return (AE_OK);
-	}
-
-	/* Get the full pathname to the object, for use in error messages */
-
-	pathname = acpi_ns_get_external_pathname(node);
-	if (!pathname) {
-		pathname = ACPI_CAST_PTR(char, predefined->info.name);
-	}
-
-	/*
-	 * Check that the parameter count for this method is in accordance
-	 * with the ACPI specification.
-	 */
-	acpi_ns_check_parameter_count(pathname, node, predefined);
-
-	/*
-	 * If there is no return value, check if we require a return value for
-	 * this predefined name. Either one return value is expected, or none,
-	 * for both methods and other objects.
-	 *
-	 * Exit now if there is no return object. Warning if one was expected.
-	 */
-	if (!return_object) {
-		if ((predefined->info.expected_btypes) &&
-		    (!(predefined->info.expected_btypes & ACPI_RTYPE_NONE))) {
-			ACPI_ERROR((AE_INFO,
-				    "%s: Missing expected return value",
-				    pathname));
-
-			status = AE_AML_NO_RETURN_VALUE;
-		}
-		goto exit;
-	}
-
-	/*
-	 * We have a return value, but if one wasn't expected, just exit, this is
-	 * not a problem
-	 *
-	 * For example, if "Implicit return value" is enabled, methods will
-	 * always return a value
-	 */
-	if (!predefined->info.expected_btypes) {
-		goto exit;
-	}
-
-	/*
-	 * Check that the type of the return object is what is expected for
-	 * this predefined name
-	 */
-	status = acpi_ns_check_object_type(pathname, return_object,
-					   predefined->info.expected_btypes,
-					   ACPI_NOT_PACKAGE);
-	if (ACPI_FAILURE(status)) {
-		goto exit;
-	}
-
-	/* For returned Package objects, check the type of all sub-objects */
-
-	if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_PACKAGE) {
-		status =
-		    acpi_ns_check_package(pathname, return_object, predefined);
-	}
-
-      exit:
-	if (pathname) {
-		ACPI_FREE(pathname);
-	}
-
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_check_parameter_count
- *
- * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
- *              Node            - Namespace node for the method/object
- *              Predefined      - Pointer to entry in predefined name table
- *
- * RETURN:      None
- *
- * DESCRIPTION: Check that the declared (in ASL/AML) parameter count for a
- *              predefined name is what is expected (i.e., what is defined in
- *              the ACPI specification for this predefined name.)
- *
- ******************************************************************************/
-
-void
-acpi_ns_check_parameter_count(char *pathname,
-			      struct acpi_namespace_node *node,
-			      const union acpi_predefined_info *predefined)
-{
-	u32 param_count;
-	u32 required_params_current;
-	u32 required_params_old;
-
-	/*
-	 * Check that the ASL-defined parameter count is what is expected for
-	 * this predefined name.
-	 *
-	 * Methods have 0-7 parameters. All other types have zero.
-	 */
-	param_count = 0;
-	if (node->type == ACPI_TYPE_METHOD) {
-		param_count = node->object->method.param_count;
-	}
-
-	/* Validate parameter count - allow two different legal counts (_SCP) */
-
-	required_params_current = predefined->info.param_count & 0x0F;
-	required_params_old = predefined->info.param_count >> 4;
-
-	if ((param_count != required_params_current) &&
-	    (param_count != required_params_old)) {
-		ACPI_WARNING((AE_INFO,
-			      "%s: Parameter count mismatch - ASL declared %d, expected %d",
-			      pathname, param_count, required_params_current));
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_check_for_predefined_name
- *
- * PARAMETERS:  Node            - Namespace node for the method/object
- *
- * RETURN:      Pointer to entry in predefined table. NULL indicates not found.
- *
- * DESCRIPTION: Check an object name against the predefined object list.
- *
- ******************************************************************************/
-
-const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
-								    acpi_namespace_node
-								    *node)
-{
-	const union acpi_predefined_info *this_name;
-
-	/* Quick check for a predefined name, first character must be underscore */
-
-	if (node->name.ascii[0] != '_') {
-		return (NULL);
-	}
-
-	/* Search info table for a predefined method/object name */
-
-	this_name = predefined_names;
-	while (this_name->info.name[0]) {
-		if (ACPI_COMPARE_NAME(node->name.ascii, this_name->info.name)) {
-
-			/* Return pointer to this table entry */
-
-			return (this_name);
-		}
-
-		/*
-		 * Skip next entry in the table if this name returns a Package
-		 * (next entry contains the package info)
-		 */
-		if (this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) {
-			this_name++;
-		}
-
-		this_name++;
-	}
-
-	return (NULL);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_check_package
- *
- * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
- *              return_object   - Object returned from the evaluation of a
- *                                method or object
- *              Predefined      - Pointer to entry in predefined name table
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Check a returned package object for the correct count and
- *              correct type of all sub-objects.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_check_package(char *pathname,
-		      union acpi_operand_object *return_object,
-		      const union acpi_predefined_info *predefined)
-{
-	const union acpi_predefined_info *package;
-	union acpi_operand_object *sub_package;
-	union acpi_operand_object **elements;
-	union acpi_operand_object **sub_elements;
-	acpi_status status;
-	u32 expected_count;
-	u32 count;
-	u32 i;
-	u32 j;
-
-	ACPI_FUNCTION_NAME(ns_check_package);
-
-	/* The package info for this name is in the next table entry */
-
-	package = predefined + 1;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-			  "%s Validating return Package of Type %X, Count %X\n",
-			  pathname, package->ret_info.type,
-			  return_object->package.count));
-
-	/* Extract package count and elements array */
-
-	elements = return_object->package.elements;
-	count = return_object->package.count;
-
-	/* The package must have at least one element, else invalid */
-
-	if (!count) {
-		ACPI_WARNING((AE_INFO,
-			      "%s: Return Package has no elements (empty)",
-			      pathname));
-
-		return (AE_AML_OPERAND_VALUE);
-	}
-
-	/*
-	 * Decode the type of the expected package contents
-	 *
-	 * PTYPE1 packages contain no subpackages
-	 * PTYPE2 packages contain sub-packages
-	 */
-	switch (package->ret_info.type) {
-	case ACPI_PTYPE1_FIXED:
-
-		/*
-		 * The package count is fixed and there are no sub-packages
-		 *
-		 * If package is too small, exit.
-		 * If package is larger than expected, issue warning but continue
-		 */
-		expected_count =
-		    package->ret_info.count1 + package->ret_info.count2;
-		if (count < expected_count) {
-			goto package_too_small;
-		} else if (count > expected_count) {
-			ACPI_WARNING((AE_INFO,
-				      "%s: Return Package is larger than needed - "
-				      "found %u, expected %u", pathname, count,
-				      expected_count));
-		}
-
-		/* Validate all elements of the returned package */
-
-		status = acpi_ns_check_package_elements(pathname, elements,
-							package->ret_info.
-							object_type1,
-							package->ret_info.
-							count1,
-							package->ret_info.
-							object_type2,
-							package->ret_info.
-							count2);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-		break;
-
-	case ACPI_PTYPE1_VAR:
-
-		/*
-		 * The package count is variable, there are no sub-packages, and all
-		 * elements must be of the same type
-		 */
-		for (i = 0; i < count; i++) {
-			status = acpi_ns_check_object_type(pathname, *elements,
-							   package->ret_info.
-							   object_type1, i);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-			elements++;
-		}
-		break;
-
-	case ACPI_PTYPE1_OPTION:
-
-		/*
-		 * The package count is variable, there are no sub-packages. There are
-		 * a fixed number of required elements, and a variable number of
-		 * optional elements.
-		 *
-		 * Check if package is at least as large as the minimum required
-		 */
-		expected_count = package->ret_info3.count;
-		if (count < expected_count) {
-			goto package_too_small;
-		}
-
-		/* Variable number of sub-objects */
-
-		for (i = 0; i < count; i++) {
-			if (i < package->ret_info3.count) {
-
-				/* These are the required package elements (0, 1, or 2) */
-
-				status =
-				    acpi_ns_check_object_type(pathname,
-							      *elements,
-							      package->
-							      ret_info3.
-							      object_type[i],
-							      i);
-				if (ACPI_FAILURE(status)) {
-					return (status);
-				}
-			} else {
-				/* These are the optional package elements */
-
-				status =
-				    acpi_ns_check_object_type(pathname,
-							      *elements,
-							      package->
-							      ret_info3.
-							      tail_object_type,
-							      i);
-				if (ACPI_FAILURE(status)) {
-					return (status);
-				}
-			}
-			elements++;
-		}
-		break;
-
-	case ACPI_PTYPE2_PKG_COUNT:
-
-		/* First element is the (Integer) count of sub-packages to follow */
-
-		status = acpi_ns_check_object_type(pathname, *elements,
-						   ACPI_RTYPE_INTEGER, 0);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-
-		/*
-		 * Count cannot be larger than the parent package length, but allow it
-		 * to be smaller. The >= accounts for the Integer above.
-		 */
-		expected_count = (u32) (*elements)->integer.value;
-		if (expected_count >= count) {
-			goto package_too_small;
-		}
-
-		count = expected_count;
-		elements++;
-
-		/* Now we can walk the sub-packages */
-
-		/*lint -fallthrough */
-
-	case ACPI_PTYPE2:
-	case ACPI_PTYPE2_FIXED:
-	case ACPI_PTYPE2_MIN:
-	case ACPI_PTYPE2_COUNT:
-
-		/*
-		 * These types all return a single package that consists of a variable
-		 * number of sub-packages
-		 */
-		for (i = 0; i < count; i++) {
-			sub_package = *elements;
-			sub_elements = sub_package->package.elements;
-
-			/* Each sub-object must be of type Package */
-
-			status =
-			    acpi_ns_check_object_type(pathname, sub_package,
-						      ACPI_RTYPE_PACKAGE, i);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-
-			/* Examine the different types of sub-packages */
-
-			switch (package->ret_info.type) {
-			case ACPI_PTYPE2:
-			case ACPI_PTYPE2_PKG_COUNT:
-
-				/* Each subpackage has a fixed number of elements */
-
-				expected_count =
-				    package->ret_info.count1 +
-				    package->ret_info.count2;
-				if (sub_package->package.count !=
-				    expected_count) {
-					count = sub_package->package.count;
-					goto package_too_small;
-				}
-
-				status =
-				    acpi_ns_check_package_elements(pathname,
-								   sub_elements,
-								   package->
-								   ret_info.
-								   object_type1,
-								   package->
-								   ret_info.
-								   count1,
-								   package->
-								   ret_info.
-								   object_type2,
-								   package->
-								   ret_info.
-								   count2);
-				if (ACPI_FAILURE(status)) {
-					return (status);
-				}
-				break;
-
-			case ACPI_PTYPE2_FIXED:
-
-				/* Each sub-package has a fixed length */
-
-				expected_count = package->ret_info2.count;
-				if (sub_package->package.count < expected_count) {
-					count = sub_package->package.count;
-					goto package_too_small;
-				}
-
-				/* Check the type of each sub-package element */
-
-				for (j = 0; j < expected_count; j++) {
-					status =
-					    acpi_ns_check_object_type(pathname,
-								      sub_elements
-								      [j],
-								      package->
-								      ret_info2.
-								      object_type
-								      [j], j);
-					if (ACPI_FAILURE(status)) {
-						return (status);
-					}
-				}
-				break;
-
-			case ACPI_PTYPE2_MIN:
-
-				/* Each sub-package has a variable but minimum length */
-
-				expected_count = package->ret_info.count1;
-				if (sub_package->package.count < expected_count) {
-					count = sub_package->package.count;
-					goto package_too_small;
-				}
-
-				/* Check the type of each sub-package element */
-
-				status =
-				    acpi_ns_check_package_elements(pathname,
-								   sub_elements,
-								   package->
-								   ret_info.
-								   object_type1,
-								   sub_package->
-								   package.
-								   count, 0, 0);
-				if (ACPI_FAILURE(status)) {
-					return (status);
-				}
-				break;
-
-			case ACPI_PTYPE2_COUNT:
-
-				/* First element is the (Integer) count of elements to follow */
-
-				status =
-				    acpi_ns_check_object_type(pathname,
-							      *sub_elements,
-							      ACPI_RTYPE_INTEGER,
-							      0);
-				if (ACPI_FAILURE(status)) {
-					return (status);
-				}
-
-				/* Make sure package is large enough for the Count */
-
-				expected_count =
-				    (u32) (*sub_elements)->integer.value;
-				if (sub_package->package.count < expected_count) {
-					count = sub_package->package.count;
-					goto package_too_small;
-				}
-
-				/* Check the type of each sub-package element */
-
-				status =
-				    acpi_ns_check_package_elements(pathname,
-								   (sub_elements
-								    + 1),
-								   package->
-								   ret_info.
-								   object_type1,
-								   (expected_count
-								    - 1), 0, 0);
-				if (ACPI_FAILURE(status)) {
-					return (status);
-				}
-				break;
-
-			default:
-				break;
-			}
-
-			elements++;
-		}
-		break;
-
-	default:
-
-		/* Should not get here if predefined info table is correct */
-
-		ACPI_WARNING((AE_INFO,
-			      "%s: Invalid internal return type in table entry: %X",
-			      pathname, package->ret_info.type));
-
-		return (AE_AML_INTERNAL);
-	}
-
-	return (AE_OK);
-
-      package_too_small:
-
-	/* Error exit for the case with an incorrect package count */
-
-	ACPI_WARNING((AE_INFO, "%s: Return Package is too small - "
-		      "found %u, expected %u", pathname, count,
-		      expected_count));
-
-	return (AE_AML_OPERAND_VALUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_check_package_elements
- *
- * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
- *              Elements        - Pointer to the package elements array
- *              Type1           - Object type for first group
- *              Count1          - Count for first group
- *              Type2           - Object type for second group
- *              Count2          - Count for second group
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Check that all elements of a package are of the correct object
- *              type. Supports up to two groups of different object types.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_check_package_elements(char *pathname,
-			       union acpi_operand_object **elements,
-			       u8 type1, u32 count1, u8 type2, u32 count2)
-{
-	union acpi_operand_object **this_element = elements;
-	acpi_status status;
-	u32 i;
-
-	/*
-	 * Up to two groups of package elements are supported by the data
-	 * structure. All elements in each group must be of the same type.
-	 * The second group can have a count of zero.
-	 */
-	for (i = 0; i < count1; i++) {
-		status = acpi_ns_check_object_type(pathname, *this_element,
-						   type1, i);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-		this_element++;
-	}
-
-	for (i = 0; i < count2; i++) {
-		status = acpi_ns_check_object_type(pathname, *this_element,
-						   type2, (i + count1));
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-		this_element++;
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_check_object_type
- *
- * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
- *              return_object   - Object return from the execution of this
- *                                method/object
- *              expected_btypes - Bitmap of expected return type(s)
- *              package_index   - Index of object within parent package (if
- *                                applicable - ACPI_NOT_PACKAGE otherwise)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Check the type of the return object against the expected object
- *              type(s). Use of Btype allows multiple expected object types.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_check_object_type(char *pathname,
-			  union acpi_operand_object *return_object,
-			  u32 expected_btypes, u32 package_index)
-{
-	acpi_status status = AE_OK;
-	u32 return_btype;
-	char type_buffer[48];	/* Room for 5 types */
-	u32 this_rtype;
-	u32 i;
-	u32 j;
-
-	/*
-	 * If we get a NULL return_object here, it is a NULL package element,
-	 * and this is always an error.
-	 */
-	if (!return_object) {
-		goto type_error_exit;
-	}
-
-	/* A Namespace node should not get here, but make sure */
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) {
-		ACPI_WARNING((AE_INFO,
-			      "%s: Invalid return type - Found a Namespace node [%4.4s] type %s",
-			      pathname, return_object->node.name.ascii,
-			      acpi_ut_get_type_name(return_object->node.type)));
-		return (AE_AML_OPERAND_TYPE);
-	}
-
-	/*
-	 * Convert the object type (ACPI_TYPE_xxx) to a bitmapped object type.
-	 * The bitmapped type allows multiple possible return types.
-	 *
-	 * Note, the cases below must handle all of the possible types returned
-	 * from all of the predefined names (including elements of returned
-	 * packages)
-	 */
-	switch (ACPI_GET_OBJECT_TYPE(return_object)) {
-	case ACPI_TYPE_INTEGER:
-		return_btype = ACPI_RTYPE_INTEGER;
-		break;
-
-	case ACPI_TYPE_BUFFER:
-		return_btype = ACPI_RTYPE_BUFFER;
-		break;
-
-	case ACPI_TYPE_STRING:
-		return_btype = ACPI_RTYPE_STRING;
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-		return_btype = ACPI_RTYPE_PACKAGE;
-		break;
-
-	case ACPI_TYPE_LOCAL_REFERENCE:
-		return_btype = ACPI_RTYPE_REFERENCE;
-		break;
-
-	default:
-		/* Not one of the supported objects, must be incorrect */
-
-		goto type_error_exit;
-	}
-
-	/* Is the object one of the expected types? */
-
-	if (!(return_btype & expected_btypes)) {
-		goto type_error_exit;
-	}
-
-	/* For reference objects, check that the reference type is correct */
-
-	if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_LOCAL_REFERENCE) {
-		status = acpi_ns_check_reference(pathname, return_object);
-	}
-
-	return (status);
-
-      type_error_exit:
-
-	/* Create a string with all expected types for this predefined object */
-
-	j = 1;
-	type_buffer[0] = 0;
-	this_rtype = ACPI_RTYPE_INTEGER;
-
-	for (i = 0; i < ACPI_NUM_RTYPES; i++) {
-
-		/* If one of the expected types, concatenate the name of this type */
-
-		if (expected_btypes & this_rtype) {
-			ACPI_STRCAT(type_buffer, &acpi_rtype_names[i][j]);
-			j = 0;	/* Use name separator from now on */
-		}
-		this_rtype <<= 1;	/* Next Rtype */
-	}
-
-	if (package_index == ACPI_NOT_PACKAGE) {
-		ACPI_WARNING((AE_INFO,
-			      "%s: Return type mismatch - found %s, expected %s",
-			      pathname,
-			      acpi_ut_get_object_type_name(return_object),
-			      type_buffer));
-	} else {
-		ACPI_WARNING((AE_INFO,
-			      "%s: Return Package type mismatch at index %u - "
-			      "found %s, expected %s", pathname, package_index,
-			      acpi_ut_get_object_type_name(return_object),
-			      type_buffer));
-	}
-
-	return (AE_AML_OPERAND_TYPE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_check_reference
- *
- * PARAMETERS:  Pathname        - Full pathname to the node (for error msgs)
- *              return_object   - Object returned from the evaluation of a
- *                                method or object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Check a returned reference object for the correct reference
- *              type. The only reference type that can be returned from a
- *              predefined method is a named reference. All others are invalid.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_check_reference(char *pathname,
-			union acpi_operand_object *return_object)
-{
-
-	/*
-	 * Check the reference object for the correct reference type (opcode).
-	 * The only type of reference that can be converted to an union acpi_object is
-	 * a reference to a named object (reference class: NAME)
-	 */
-	if (return_object->reference.class == ACPI_REFCLASS_NAME) {
-		return (AE_OK);
-	}
-
-	ACPI_WARNING((AE_INFO,
-		      "%s: Return type mismatch - unexpected reference object type [%s] %2.2X",
-		      pathname, acpi_ut_get_reference_name(return_object),
-		      return_object->reference.class));
-
-	return (AE_AML_OPERAND_TYPE);
-}
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/namespace/nssearch.c
deleted file mode 100644
index a9a80bf..0000000
--- a/drivers/acpi/namespace/nssearch.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: nssearch - Namespace search
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nssearch")
-
-/* Local prototypes */
-static acpi_status
-acpi_ns_search_parent_tree(u32 target_name,
-			   struct acpi_namespace_node *node,
-			   acpi_object_type type,
-			   struct acpi_namespace_node **return_node);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_search_one_scope
- *
- * PARAMETERS:  target_name     - Ascii ACPI name to search for
- *              parent_node     - Starting node where search will begin
- *              Type            - Object type to match
- *              return_node     - Where the matched Named obj is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Search a single level of the namespace. Performs a
- *              simple search of the specified level, and does not add
- *              entries or search parents.
- *
- *
- *      Named object lists are built (and subsequently dumped) in the
- *      order in which the names are encountered during the namespace load;
- *
- *      All namespace searching is linear in this implementation, but
- *      could be easily modified to support any improved search
- *      algorithm. However, the linear search was chosen for simplicity
- *      and because the trees are small and the other interpreter
- *      execution overhead is relatively high.
- *
- *      Note: CPU execution analysis has shown that the AML interpreter spends
- *      a very small percentage of its time searching the namespace. Therefore,
- *      the linear search seems to be sufficient, as there would seem to be
- *      little value in improving the search.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_search_one_scope(u32 target_name,
-			 struct acpi_namespace_node *parent_node,
-			 acpi_object_type type,
-			 struct acpi_namespace_node **return_node)
-{
-	struct acpi_namespace_node *node;
-
-	ACPI_FUNCTION_TRACE(ns_search_one_scope);
-
-#ifdef ACPI_DEBUG_OUTPUT
-	if (ACPI_LV_NAMES & acpi_dbg_level) {
-		char *scope_name;
-
-		scope_name = acpi_ns_get_external_pathname(parent_node);
-		if (scope_name) {
-			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "Searching %s (%p) For [%4.4s] (%s)\n",
-					  scope_name, parent_node,
-					  ACPI_CAST_PTR(char, &target_name),
-					  acpi_ut_get_type_name(type)));
-
-			ACPI_FREE(scope_name);
-		}
-	}
-#endif
-
-	/*
-	 * Search for name at this namespace level, which is to say that we
-	 * must search for the name among the children of this object
-	 */
-	node = parent_node->child;
-	while (node) {
-
-		/* Check for match against the name */
-
-		if (node->name.integer == target_name) {
-
-			/* Resolve a control method alias if any */
-
-			if (acpi_ns_get_type(node) ==
-			    ACPI_TYPE_LOCAL_METHOD_ALIAS) {
-				node =
-				    ACPI_CAST_PTR(struct acpi_namespace_node,
-						  node->object);
-			}
-
-			/* Found matching entry */
-
-			ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-					  "Name [%4.4s] (%s) %p found in scope [%4.4s] %p\n",
-					  ACPI_CAST_PTR(char, &target_name),
-					  acpi_ut_get_type_name(node->type),
-					  node,
-					  acpi_ut_get_node_name(parent_node),
-					  parent_node));
-
-			*return_node = node;
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		/*
-		 * The last entry in the list points back to the parent,
-		 * so a flag is used to indicate the end-of-list
-		 */
-		if (node->flags & ANOBJ_END_OF_PEER_LIST) {
-
-			/* Searched entire list, we are done */
-
-			break;
-		}
-
-		/* Didn't match name, move on to the next peer object */
-
-		node = node->peer;
-	}
-
-	/* Searched entire namespace level, not found */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-			  "Name [%4.4s] (%s) not found in search in scope [%4.4s] %p first child %p\n",
-			  ACPI_CAST_PTR(char, &target_name),
-			  acpi_ut_get_type_name(type),
-			  acpi_ut_get_node_name(parent_node), parent_node,
-			  parent_node->child));
-
-	return_ACPI_STATUS(AE_NOT_FOUND);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_search_parent_tree
- *
- * PARAMETERS:  target_name     - Ascii ACPI name to search for
- *              Node            - Starting node where search will begin
- *              Type            - Object type to match
- *              return_node     - Where the matched Node is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Called when a name has not been found in the current namespace
- *              level. Before adding it or giving up, ACPI scope rules require
- *              searching enclosing scopes in cases identified by acpi_ns_local().
- *
- *              "A name is located by finding the matching name in the current
- *              name space, and then in the parent name space. If the parent
- *              name space does not contain the name, the search continues
- *              recursively until either the name is found or the name space
- *              does not have a parent (the root of the name space). This
- *              indicates that the name is not found" (From ACPI Specification,
- *              section 5.3)
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_search_parent_tree(u32 target_name,
-			   struct acpi_namespace_node *node,
-			   acpi_object_type type,
-			   struct acpi_namespace_node **return_node)
-{
-	acpi_status status;
-	struct acpi_namespace_node *parent_node;
-
-	ACPI_FUNCTION_TRACE(ns_search_parent_tree);
-
-	parent_node = acpi_ns_get_parent_node(node);
-
-	/*
-	 * If there is no parent (i.e., we are at the root) or type is "local",
-	 * we won't be searching the parent tree.
-	 */
-	if (!parent_node) {
-		ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "[%4.4s] has no parent\n",
-				  ACPI_CAST_PTR(char, &target_name)));
-		return_ACPI_STATUS(AE_NOT_FOUND);
-	}
-
-	if (acpi_ns_local(type)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-				  "[%4.4s] type [%s] must be local to this scope (no parent search)\n",
-				  ACPI_CAST_PTR(char, &target_name),
-				  acpi_ut_get_type_name(type)));
-		return_ACPI_STATUS(AE_NOT_FOUND);
-	}
-
-	/* Search the parent tree */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-			  "Searching parent [%4.4s] for [%4.4s]\n",
-			  acpi_ut_get_node_name(parent_node),
-			  ACPI_CAST_PTR(char, &target_name)));
-
-	/*
-	 * Search parents until target is found or we have backed up to the root
-	 */
-	while (parent_node) {
-		/*
-		 * Search parent scope. Use TYPE_ANY because we don't care about the
-		 * object type at this point, we only care about the existence of
-		 * the actual name we are searching for. Typechecking comes later.
-		 */
-		status =
-		    acpi_ns_search_one_scope(target_name, parent_node,
-					     ACPI_TYPE_ANY, return_node);
-		if (ACPI_SUCCESS(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* Not found here, go up another level (until we reach the root) */
-
-		parent_node = acpi_ns_get_parent_node(parent_node);
-	}
-
-	/* Not found in parent tree */
-
-	return_ACPI_STATUS(AE_NOT_FOUND);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_search_and_enter
- *
- * PARAMETERS:  target_name         - Ascii ACPI name to search for (4 chars)
- *              walk_state          - Current state of the walk
- *              Node                - Starting node where search will begin
- *              interpreter_mode    - Add names only in ACPI_MODE_LOAD_PASS_x.
- *                                    Otherwise,search only.
- *              Type                - Object type to match
- *              Flags               - Flags describing the search restrictions
- *              return_node         - Where the Node is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Search for a name segment in a single namespace level,
- *              optionally adding it if it is not found. If the passed
- *              Type is not Any and the type previously stored in the
- *              entry was Any (i.e. unknown), update the stored type.
- *
- *              In ACPI_IMODE_EXECUTE, search only.
- *              In other modes, search and add if not found.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_search_and_enter(u32 target_name,
-			 struct acpi_walk_state *walk_state,
-			 struct acpi_namespace_node *node,
-			 acpi_interpreter_mode interpreter_mode,
-			 acpi_object_type type,
-			 u32 flags, struct acpi_namespace_node **return_node)
-{
-	acpi_status status;
-	struct acpi_namespace_node *new_node;
-
-	ACPI_FUNCTION_TRACE(ns_search_and_enter);
-
-	/* Parameter validation */
-
-	if (!node || !target_name || !return_node) {
-		ACPI_ERROR((AE_INFO,
-			    "Null parameter: Node %p Name %X ReturnNode %p",
-			    node, target_name, return_node));
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Name must consist of valid ACPI characters. We will repair the name if
-	 * necessary because we don't want to abort because of this, but we want
-	 * all namespace names to be printable. A warning message is appropriate.
-	 *
-	 * This issue came up because there are in fact machines that exhibit
-	 * this problem, and we want to be able to enable ACPI support for them,
-	 * even though there are a few bad names.
-	 */
-	if (!acpi_ut_valid_acpi_name(target_name)) {
-		target_name =
-		    acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name));
-
-		/* Report warning only if in strict mode or debug mode */
-
-		if (!acpi_gbl_enable_interpreter_slack) {
-			ACPI_WARNING((AE_INFO,
-				      "Found bad character(s) in name, repaired: [%4.4s]\n",
-				      ACPI_CAST_PTR(char, &target_name)));
-		} else {
-			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-					  "Found bad character(s) in name, repaired: [%4.4s]\n",
-					  ACPI_CAST_PTR(char, &target_name)));
-		}
-	}
-
-	/* Try to find the name in the namespace level specified by the caller */
-
-	*return_node = ACPI_ENTRY_NOT_FOUND;
-	status = acpi_ns_search_one_scope(target_name, node, type, return_node);
-	if (status != AE_NOT_FOUND) {
-		/*
-		 * If we found it AND the request specifies that a find is an error,
-		 * return the error
-		 */
-		if ((status == AE_OK) && (flags & ACPI_NS_ERROR_IF_FOUND)) {
-			status = AE_ALREADY_EXISTS;
-		}
-
-		/* Either found it or there was an error: finished either way */
-
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * The name was not found. If we are NOT performing the first pass
-	 * (name entry) of loading the namespace, search the parent tree (all the
-	 * way to the root if necessary.) We don't want to perform the parent
-	 * search when the namespace is actually being loaded. We want to perform
-	 * the search when namespace references are being resolved (load pass 2)
-	 * and during the execution phase.
-	 */
-	if ((interpreter_mode != ACPI_IMODE_LOAD_PASS1) &&
-	    (flags & ACPI_NS_SEARCH_PARENT)) {
-		/*
-		 * Not found at this level - search parent tree according to the
-		 * ACPI specification
-		 */
-		status =
-		    acpi_ns_search_parent_tree(target_name, node, type,
-					       return_node);
-		if (ACPI_SUCCESS(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/* In execute mode, just search, never add names. Exit now */
-
-	if (interpreter_mode == ACPI_IMODE_EXECUTE) {
-		ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
-				  "%4.4s Not found in %p [Not adding]\n",
-				  ACPI_CAST_PTR(char, &target_name), node));
-
-		return_ACPI_STATUS(AE_NOT_FOUND);
-	}
-
-	/* Create the new named object */
-
-	new_node = acpi_ns_create_node(target_name);
-	if (!new_node) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-#ifdef ACPI_ASL_COMPILER
-	/*
-	 * Node is an object defined by an External() statement
-	 */
-	if (flags & ACPI_NS_EXTERNAL) {
-		new_node->flags |= ANOBJ_IS_EXTERNAL;
-	}
-#endif
-
-	if (flags & ACPI_NS_TEMPORARY) {
-		new_node->flags |= ANOBJ_TEMPORARY;
-	}
-
-	/* Install the new object into the parent's list of children */
-
-	acpi_ns_install_node(walk_state, node, new_node, type);
-	*return_node = new_node;
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c
deleted file mode 100644
index b0817e1..0000000
--- a/drivers/acpi/namespace/nsutils.c
+++ /dev/null
@@ -1,990 +0,0 @@
-/******************************************************************************
- *
- * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
- *                        parents and siblings and Scope manipulation
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/amlcode.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsutils")
-
-/* Local prototypes */
-static u8 acpi_ns_valid_path_separator(char sep);
-
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_report_error
- *
- * PARAMETERS:  module_name         - Caller's module name (for error output)
- *              line_number         - Caller's line number (for error output)
- *              internal_name       - Name or path of the namespace node
- *              lookup_status       - Exception code from NS lookup
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print warning message with full pathname
- *
- ******************************************************************************/
-
-void
-acpi_ns_report_error(const char *module_name,
-		     u32 line_number,
-		     const char *internal_name, acpi_status lookup_status)
-{
-	acpi_status status;
-	u32 bad_name;
-	char *name = NULL;
-
-	acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
-
-	if (lookup_status == AE_BAD_CHARACTER) {
-
-		/* There is a non-ascii character in the name */
-
-		ACPI_MOVE_32_TO_32(&bad_name, internal_name);
-		acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
-	} else {
-		/* Convert path to external format */
-
-		status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
-						  internal_name, NULL, &name);
-
-		/* Print target name */
-
-		if (ACPI_SUCCESS(status)) {
-			acpi_os_printf("[%s]", name);
-		} else {
-			acpi_os_printf("[COULD NOT EXTERNALIZE NAME]");
-		}
-
-		if (name) {
-			ACPI_FREE(name);
-		}
-	}
-
-	acpi_os_printf(" Namespace lookup failure, %s\n",
-		       acpi_format_exception(lookup_status));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_report_method_error
- *
- * PARAMETERS:  module_name         - Caller's module name (for error output)
- *              line_number         - Caller's line number (for error output)
- *              Message             - Error message to use on failure
- *              prefix_node         - Prefix relative to the path
- *              Path                - Path to the node (optional)
- *              method_status       - Execution status
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print warning message with full pathname
- *
- ******************************************************************************/
-
-void
-acpi_ns_report_method_error(const char *module_name,
-			    u32 line_number,
-			    const char *message,
-			    struct acpi_namespace_node *prefix_node,
-			    const char *path, acpi_status method_status)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node = prefix_node;
-
-	acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
-
-	if (path) {
-		status =
-		    acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
-				     &node);
-		if (ACPI_FAILURE(status)) {
-			acpi_os_printf("[Could not get node by pathname]");
-		}
-	}
-
-	acpi_ns_print_node_pathname(node, message);
-	acpi_os_printf(", %s\n", acpi_format_exception(method_status));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_print_node_pathname
- *
- * PARAMETERS:  Node            - Object
- *              Message         - Prefix message
- *
- * DESCRIPTION: Print an object's full namespace pathname
- *              Manages allocation/freeing of a pathname buffer
- *
- ******************************************************************************/
-
-void
-acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
-			    const char *message)
-{
-	struct acpi_buffer buffer;
-	acpi_status status;
-
-	if (!node) {
-		acpi_os_printf("[NULL NAME]");
-		return;
-	}
-
-	/* Convert handle to full pathname and print it (with supplied message) */
-
-	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
-
-	status = acpi_ns_handle_to_pathname(node, &buffer);
-	if (ACPI_SUCCESS(status)) {
-		if (message) {
-			acpi_os_printf("%s ", message);
-		}
-
-		acpi_os_printf("[%s] (Node %p)", (char *)buffer.pointer, node);
-		ACPI_FREE(buffer.pointer);
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_valid_root_prefix
- *
- * PARAMETERS:  Prefix          - Character to be checked
- *
- * RETURN:      TRUE if a valid prefix
- *
- * DESCRIPTION: Check if a character is a valid ACPI Root prefix
- *
- ******************************************************************************/
-
-u8 acpi_ns_valid_root_prefix(char prefix)
-{
-
-	return ((u8) (prefix == '\\'));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_valid_path_separator
- *
- * PARAMETERS:  Sep         - Character to be checked
- *
- * RETURN:      TRUE if a valid path separator
- *
- * DESCRIPTION: Check if a character is a valid ACPI path separator
- *
- ******************************************************************************/
-
-static u8 acpi_ns_valid_path_separator(char sep)
-{
-
-	return ((u8) (sep == '.'));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_type
- *
- * PARAMETERS:  Node        - Parent Node to be examined
- *
- * RETURN:      Type field from Node whose handle is passed
- *
- * DESCRIPTION: Return the type of a Namespace node
- *
- ******************************************************************************/
-
-acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
-{
-	ACPI_FUNCTION_TRACE(ns_get_type);
-
-	if (!node) {
-		ACPI_WARNING((AE_INFO, "Null Node parameter"));
-		return_UINT32(ACPI_TYPE_ANY);
-	}
-
-	return_UINT32((acpi_object_type) node->type);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_local
- *
- * PARAMETERS:  Type        - A namespace object type
- *
- * RETURN:      LOCAL if names must be found locally in objects of the
- *              passed type, 0 if enclosing scopes should be searched
- *
- * DESCRIPTION: Returns scope rule for the given object type.
- *
- ******************************************************************************/
-
-u32 acpi_ns_local(acpi_object_type type)
-{
-	ACPI_FUNCTION_TRACE(ns_local);
-
-	if (!acpi_ut_valid_object_type(type)) {
-
-		/* Type code out of range  */
-
-		ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
-		return_UINT32(ACPI_NS_NORMAL);
-	}
-
-	return_UINT32((u32) acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_internal_name_length
- *
- * PARAMETERS:  Info            - Info struct initialized with the
- *                                external name pointer.
- *
- * RETURN:      None
- *
- * DESCRIPTION: Calculate the length of the internal (AML) namestring
- *              corresponding to the external (ASL) namestring.
- *
- ******************************************************************************/
-
-void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
-{
-	const char *next_external_char;
-	u32 i;
-
-	ACPI_FUNCTION_ENTRY();
-
-	next_external_char = info->external_name;
-	info->num_carats = 0;
-	info->num_segments = 0;
-	info->fully_qualified = FALSE;
-
-	/*
-	 * For the internal name, the required length is 4 bytes per segment, plus
-	 * 1 each for root_prefix, multi_name_prefix_op, segment count, trailing null
-	 * (which is not really needed, but no there's harm in putting it there)
-	 *
-	 * strlen() + 1 covers the first name_seg, which has no path separator
-	 */
-	if (acpi_ns_valid_root_prefix(next_external_char[0])) {
-		info->fully_qualified = TRUE;
-		next_external_char++;
-	} else {
-		/*
-		 * Handle Carat prefixes
-		 */
-		while (*next_external_char == '^') {
-			info->num_carats++;
-			next_external_char++;
-		}
-	}
-
-	/*
-	 * Determine the number of ACPI name "segments" by counting the number of
-	 * path separators within the string. Start with one segment since the
-	 * segment count is [(# separators) + 1], and zero separators is ok.
-	 */
-	if (*next_external_char) {
-		info->num_segments = 1;
-		for (i = 0; next_external_char[i]; i++) {
-			if (acpi_ns_valid_path_separator(next_external_char[i])) {
-				info->num_segments++;
-			}
-		}
-	}
-
-	info->length = (ACPI_NAME_SIZE * info->num_segments) +
-	    4 + info->num_carats;
-
-	info->next_external_char = next_external_char;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_build_internal_name
- *
- * PARAMETERS:  Info            - Info struct fully initialized
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Construct the internal (AML) namestring
- *              corresponding to the external (ASL) namestring.
- *
- ******************************************************************************/
-
-acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
-{
-	u32 num_segments = info->num_segments;
-	char *internal_name = info->internal_name;
-	const char *external_name = info->next_external_char;
-	char *result = NULL;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ns_build_internal_name);
-
-	/* Setup the correct prefixes, counts, and pointers */
-
-	if (info->fully_qualified) {
-		internal_name[0] = '\\';
-
-		if (num_segments <= 1) {
-			result = &internal_name[1];
-		} else if (num_segments == 2) {
-			internal_name[1] = AML_DUAL_NAME_PREFIX;
-			result = &internal_name[2];
-		} else {
-			internal_name[1] = AML_MULTI_NAME_PREFIX_OP;
-			internal_name[2] = (char)num_segments;
-			result = &internal_name[3];
-		}
-	} else {
-		/*
-		 * Not fully qualified.
-		 * Handle Carats first, then append the name segments
-		 */
-		i = 0;
-		if (info->num_carats) {
-			for (i = 0; i < info->num_carats; i++) {
-				internal_name[i] = '^';
-			}
-		}
-
-		if (num_segments <= 1) {
-			result = &internal_name[i];
-		} else if (num_segments == 2) {
-			internal_name[i] = AML_DUAL_NAME_PREFIX;
-			result = &internal_name[(acpi_size) i + 1];
-		} else {
-			internal_name[i] = AML_MULTI_NAME_PREFIX_OP;
-			internal_name[(acpi_size) i + 1] = (char)num_segments;
-			result = &internal_name[(acpi_size) i + 2];
-		}
-	}
-
-	/* Build the name (minus path separators) */
-
-	for (; num_segments; num_segments--) {
-		for (i = 0; i < ACPI_NAME_SIZE; i++) {
-			if (acpi_ns_valid_path_separator(*external_name) ||
-			    (*external_name == 0)) {
-
-				/* Pad the segment with underscore(s) if segment is short */
-
-				result[i] = '_';
-			} else {
-				/* Convert the character to uppercase and save it */
-
-				result[i] =
-				    (char)ACPI_TOUPPER((int)*external_name);
-				external_name++;
-			}
-		}
-
-		/* Now we must have a path separator, or the pathname is bad */
-
-		if (!acpi_ns_valid_path_separator(*external_name) &&
-		    (*external_name != 0)) {
-			return_ACPI_STATUS(AE_BAD_PARAMETER);
-		}
-
-		/* Move on the next segment */
-
-		external_name++;
-		result += ACPI_NAME_SIZE;
-	}
-
-	/* Terminate the string */
-
-	*result = 0;
-
-	if (info->fully_qualified) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Returning [%p] (abs) \"\\%s\"\n",
-				  internal_name, internal_name));
-	} else {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Returning [%p] (rel) \"%s\"\n",
-				  internal_name, internal_name));
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_internalize_name
- *
- * PARAMETERS:  *external_name          - External representation of name
- *              **Converted Name        - Where to return the resulting
- *                                        internal represention of the name
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Convert an external representation (e.g. "\_PR_.CPU0")
- *              to internal form (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30)
- *
- *******************************************************************************/
-
-acpi_status
-acpi_ns_internalize_name(const char *external_name, char **converted_name)
-{
-	char *internal_name;
-	struct acpi_namestring_info info;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ns_internalize_name);
-
-	if ((!external_name) || (*external_name == 0) || (!converted_name)) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Get the length of the new internal name */
-
-	info.external_name = external_name;
-	acpi_ns_get_internal_name_length(&info);
-
-	/* We need a segment to store the internal  name */
-
-	internal_name = ACPI_ALLOCATE_ZEROED(info.length);
-	if (!internal_name) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Build the name */
-
-	info.internal_name = internal_name;
-	status = acpi_ns_build_internal_name(&info);
-	if (ACPI_FAILURE(status)) {
-		ACPI_FREE(internal_name);
-		return_ACPI_STATUS(status);
-	}
-
-	*converted_name = internal_name;
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_externalize_name
- *
- * PARAMETERS:  internal_name_length - Lenth of the internal name below
- *              internal_name       - Internal representation of name
- *              converted_name_length - Where the length is returned
- *              converted_name      - Where the resulting external name
- *                                    is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Convert internal name (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30)
- *              to its external (printable) form (e.g. "\_PR_.CPU0")
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_externalize_name(u32 internal_name_length,
-			 const char *internal_name,
-			 u32 * converted_name_length, char **converted_name)
-{
-	u32 names_index = 0;
-	u32 num_segments = 0;
-	u32 required_length;
-	u32 prefix_length = 0;
-	u32 i = 0;
-	u32 j = 0;
-
-	ACPI_FUNCTION_TRACE(ns_externalize_name);
-
-	if (!internal_name_length || !internal_name || !converted_name) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Check for a prefix (one '\' | one or more '^').
-	 */
-	switch (internal_name[0]) {
-	case '\\':
-		prefix_length = 1;
-		break;
-
-	case '^':
-		for (i = 0; i < internal_name_length; i++) {
-			if (internal_name[i] == '^') {
-				prefix_length = i + 1;
-			} else {
-				break;
-			}
-		}
-
-		if (i == internal_name_length) {
-			prefix_length = i;
-		}
-
-		break;
-
-	default:
-		break;
-	}
-
-	/*
-	 * Check for object names.  Note that there could be 0-255 of these
-	 * 4-byte elements.
-	 */
-	if (prefix_length < internal_name_length) {
-		switch (internal_name[prefix_length]) {
-		case AML_MULTI_NAME_PREFIX_OP:
-
-			/* <count> 4-byte names */
-
-			names_index = prefix_length + 2;
-			num_segments = (u8)
-			    internal_name[(acpi_size) prefix_length + 1];
-			break;
-
-		case AML_DUAL_NAME_PREFIX:
-
-			/* Two 4-byte names */
-
-			names_index = prefix_length + 1;
-			num_segments = 2;
-			break;
-
-		case 0:
-
-			/* null_name */
-
-			names_index = 0;
-			num_segments = 0;
-			break;
-
-		default:
-
-			/* one 4-byte name */
-
-			names_index = prefix_length;
-			num_segments = 1;
-			break;
-		}
-	}
-
-	/*
-	 * Calculate the length of converted_name, which equals the length
-	 * of the prefix, length of all object names, length of any required
-	 * punctuation ('.') between object names, plus the NULL terminator.
-	 */
-	required_length = prefix_length + (4 * num_segments) +
-	    ((num_segments > 0) ? (num_segments - 1) : 0) + 1;
-
-	/*
-	 * Check to see if we're still in bounds.  If not, there's a problem
-	 * with internal_name (invalid format).
-	 */
-	if (required_length > internal_name_length) {
-		ACPI_ERROR((AE_INFO, "Invalid internal name"));
-		return_ACPI_STATUS(AE_BAD_PATHNAME);
-	}
-
-	/*
-	 * Build converted_name
-	 */
-	*converted_name = ACPI_ALLOCATE_ZEROED(required_length);
-	if (!(*converted_name)) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	j = 0;
-
-	for (i = 0; i < prefix_length; i++) {
-		(*converted_name)[j++] = internal_name[i];
-	}
-
-	if (num_segments > 0) {
-		for (i = 0; i < num_segments; i++) {
-			if (i > 0) {
-				(*converted_name)[j++] = '.';
-			}
-
-			(*converted_name)[j++] = internal_name[names_index++];
-			(*converted_name)[j++] = internal_name[names_index++];
-			(*converted_name)[j++] = internal_name[names_index++];
-			(*converted_name)[j++] = internal_name[names_index++];
-		}
-	}
-
-	if (converted_name_length) {
-		*converted_name_length = (u32) required_length;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_map_handle_to_node
- *
- * PARAMETERS:  Handle          - Handle to be converted to an Node
- *
- * RETURN:      A Name table entry pointer
- *
- * DESCRIPTION: Convert a namespace handle to a real Node
- *
- * Note: Real integer handles would allow for more verification
- *       and keep all pointers within this subsystem - however this introduces
- *       more (and perhaps unnecessary) overhead.
- *
- ******************************************************************************/
-
-struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
-{
-
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * Simple implementation
-	 */
-	if ((!handle) || (handle == ACPI_ROOT_OBJECT)) {
-		return (acpi_gbl_root_node);
-	}
-
-	/* We can at least attempt to verify the handle */
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(handle) != ACPI_DESC_TYPE_NAMED) {
-		return (NULL);
-	}
-
-	return (ACPI_CAST_PTR(struct acpi_namespace_node, handle));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_convert_entry_to_handle
- *
- * PARAMETERS:  Node          - Node to be converted to a Handle
- *
- * RETURN:      A user handle
- *
- * DESCRIPTION: Convert a real Node to a namespace handle
- *
- ******************************************************************************/
-
-acpi_handle acpi_ns_convert_entry_to_handle(struct acpi_namespace_node *node)
-{
-
-	/*
-	 * Simple implementation for now;
-	 */
-	return ((acpi_handle) node);
-
-/* Example future implementation ---------------------
-
-	if (!Node)
-	{
-		return (NULL);
-	}
-
-	if (Node == acpi_gbl_root_node)
-	{
-		return (ACPI_ROOT_OBJECT);
-	}
-
-	return ((acpi_handle) Node);
-------------------------------------------------------*/
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_terminate
- *
- * PARAMETERS:  none
- *
- * RETURN:      none
- *
- * DESCRIPTION: free memory allocated for namespace and ACPI table storage.
- *
- ******************************************************************************/
-
-void acpi_ns_terminate(void)
-{
-	union acpi_operand_object *obj_desc;
-
-	ACPI_FUNCTION_TRACE(ns_terminate);
-
-	/*
-	 * 1) Free the entire namespace -- all nodes and objects
-	 *
-	 * Delete all object descriptors attached to namepsace nodes
-	 */
-	acpi_ns_delete_namespace_subtree(acpi_gbl_root_node);
-
-	/* Detach any objects attached to the root */
-
-	obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node);
-	if (obj_desc) {
-		acpi_ns_detach_object(acpi_gbl_root_node);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_opens_scope
- *
- * PARAMETERS:  Type        - A valid namespace type
- *
- * RETURN:      NEWSCOPE if the passed type "opens a name scope" according
- *              to the ACPI specification, else 0
- *
- ******************************************************************************/
-
-u32 acpi_ns_opens_scope(acpi_object_type type)
-{
-	ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type));
-
-	if (!acpi_ut_valid_object_type(type)) {
-
-		/* type code out of range  */
-
-		ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
-		return_UINT32(ACPI_NS_NORMAL);
-	}
-
-	return_UINT32(((u32) acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_node
- *
- * PARAMETERS:  *Pathname   - Name to be found, in external (ASL) format. The
- *                            \ (backslash) and ^ (carat) prefixes, and the
- *                            . (period) to separate segments are supported.
- *              prefix_node  - Root of subtree to be searched, or NS_ALL for the
- *                            root of the name space.  If Name is fully
- *                            qualified (first s8 is '\'), the passed value
- *                            of Scope will not be accessed.
- *              Flags       - Used to indicate whether to perform upsearch or
- *                            not.
- *              return_node - Where the Node is returned
- *
- * DESCRIPTION: Look up a name relative to a given scope and return the
- *              corresponding Node.  NOTE: Scope can be null.
- *
- * MUTEX:       Locks namespace
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
-		 const char *pathname,
-		 u32 flags, struct acpi_namespace_node **return_node)
-{
-	union acpi_generic_state scope_info;
-	acpi_status status;
-	char *internal_path;
-
-	ACPI_FUNCTION_TRACE_PTR(ns_get_node, pathname);
-
-	if (!pathname) {
-		*return_node = prefix_node;
-		if (!prefix_node) {
-			*return_node = acpi_gbl_root_node;
-		}
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Convert path to internal representation */
-
-	status = acpi_ns_internalize_name(pathname, &internal_path);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Must lock namespace during lookup */
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/* Setup lookup scope (search starting point) */
-
-	scope_info.scope.node = prefix_node;
-
-	/* Lookup the name in the namespace */
-
-	status = acpi_ns_lookup(&scope_info, internal_path, ACPI_TYPE_ANY,
-				ACPI_IMODE_EXECUTE,
-				(flags | ACPI_NS_DONT_OPEN_SCOPE), NULL,
-				return_node);
-	if (ACPI_FAILURE(status)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s, %s\n",
-				  pathname, acpi_format_exception(status)));
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
-      cleanup:
-	ACPI_FREE(internal_path);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_parent_node
- *
- * PARAMETERS:  Node       - Current table entry
- *
- * RETURN:      Parent entry of the given entry
- *
- * DESCRIPTION: Obtain the parent entry for a given entry in the namespace.
- *
- ******************************************************************************/
-
-struct acpi_namespace_node *acpi_ns_get_parent_node(struct acpi_namespace_node
-						    *node)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	if (!node) {
-		return (NULL);
-	}
-
-	/*
-	 * Walk to the end of this peer list. The last entry is marked with a flag
-	 * and the peer pointer is really a pointer back to the parent. This saves
-	 * putting a parent back pointer in each and every named object!
-	 */
-	while (!(node->flags & ANOBJ_END_OF_PEER_LIST)) {
-		node = node->peer;
-	}
-
-	return (node->peer);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_next_valid_node
- *
- * PARAMETERS:  Node       - Current table entry
- *
- * RETURN:      Next valid Node in the linked node list. NULL if no more valid
- *              nodes.
- *
- * DESCRIPTION: Find the next valid node within a name table.
- *              Useful for implementing NULL-end-of-list loops.
- *
- ******************************************************************************/
-
-struct acpi_namespace_node *acpi_ns_get_next_valid_node(struct
-							acpi_namespace_node
-							*node)
-{
-
-	/* If we are at the end of this peer list, return NULL */
-
-	if (node->flags & ANOBJ_END_OF_PEER_LIST) {
-		return NULL;
-	}
-
-	/* Otherwise just return the next peer */
-
-	return (node->peer);
-}
-
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_find_parent_name
- *
- * PARAMETERS:  *child_node            - Named Obj whose name is to be found
- *
- * RETURN:      The ACPI name
- *
- * DESCRIPTION: Search for the given obj in its parent scope and return the
- *              name segment, or "????" if the parent name can't be found
- *              (which "should not happen").
- *
- ******************************************************************************/
-
-acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node * child_node)
-{
-	struct acpi_namespace_node *parent_node;
-
-	ACPI_FUNCTION_TRACE(ns_find_parent_name);
-
-	if (child_node) {
-
-		/* Valid entry.  Get the parent Node */
-
-		parent_node = acpi_ns_get_parent_node(child_node);
-		if (parent_node) {
-			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-					  "Parent of %p [%4.4s] is %p [%4.4s]\n",
-					  child_node,
-					  acpi_ut_get_node_name(child_node),
-					  parent_node,
-					  acpi_ut_get_node_name(parent_node)));
-
-			if (parent_node->name.integer) {
-				return_VALUE((acpi_name) parent_node->name.
-					     integer);
-			}
-		}
-
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Unable to find parent of %p (%4.4s)\n",
-				  child_node,
-				  acpi_ut_get_node_name(child_node)));
-	}
-
-	return_VALUE(ACPI_UNKNOWN_NAME);
-}
-#endif
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
deleted file mode 100644
index 3c905ce..0000000
--- a/drivers/acpi/namespace/nswalk.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/******************************************************************************
- *
- * Module Name: nswalk - Functions for walking the ACPI namespace
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nswalk")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_next_node
- *
- * PARAMETERS:  Type                - Type of node to be searched for
- *              parent_node         - Parent node whose children we are
- *                                    getting
- *              child_node          - Previous child that was found.
- *                                    The NEXT child will be returned
- *
- * RETURN:      struct acpi_namespace_node - Pointer to the NEXT child or NULL if
- *                                    none is found.
- *
- * DESCRIPTION: Return the next peer node within the namespace.  If Handle
- *              is valid, Scope is ignored.  Otherwise, the first node
- *              within Scope is returned.
- *
- ******************************************************************************/
-struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
-						  *parent_node, struct acpi_namespace_node
-						  *child_node)
-{
-	struct acpi_namespace_node *next_node = NULL;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!child_node) {
-
-		/* It's really the parent's _scope_ that we want */
-
-		next_node = parent_node->child;
-	}
-
-	else {
-		/* Start search at the NEXT node */
-
-		next_node = acpi_ns_get_next_valid_node(child_node);
-	}
-
-	/* If any type is OK, we are done */
-
-	if (type == ACPI_TYPE_ANY) {
-
-		/* next_node is NULL if we are at the end-of-list */
-
-		return (next_node);
-	}
-
-	/* Must search for the node -- but within this scope only */
-
-	while (next_node) {
-
-		/* If type matches, we are done */
-
-		if (next_node->type == type) {
-			return (next_node);
-		}
-
-		/* Otherwise, move on to the next node */
-
-		next_node = acpi_ns_get_next_valid_node(next_node);
-	}
-
-	/* Not found */
-
-	return (NULL);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_walk_namespace
- *
- * PARAMETERS:  Type                - acpi_object_type to search for
- *              start_node          - Handle in namespace where search begins
- *              max_depth           - Depth to which search is to reach
- *              Flags               - Whether to unlock the NS before invoking
- *                                    the callback routine
- *              user_function       - Called when an object of "Type" is found
- *              Context             - Passed to user function
- *              return_value        - from the user_function if terminated early.
- *                                    Otherwise, returns NULL.
- * RETURNS:     Status
- *
- * DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
- *              starting (and ending) at the node specified by start_handle.
- *              The user_function is called whenever a node that matches
- *              the type parameter is found.  If the user function returns
- *              a non-zero value, the search is terminated immediately and this
- *              value is returned to the caller.
- *
- *              The point of this procedure is to provide a generic namespace
- *              walk routine that can be called from multiple places to
- *              provide multiple services;  the User Function can be tailored
- *              to each task, whether it is a print function, a compare
- *              function, etc.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_walk_namespace(acpi_object_type type,
-		       acpi_handle start_node,
-		       u32 max_depth,
-		       u32 flags,
-		       acpi_walk_callback user_function,
-		       void *context, void **return_value)
-{
-	acpi_status status;
-	acpi_status mutex_status;
-	struct acpi_namespace_node *child_node;
-	struct acpi_namespace_node *parent_node;
-	acpi_object_type child_type;
-	u32 level;
-
-	ACPI_FUNCTION_TRACE(ns_walk_namespace);
-
-	/* Special case for the namespace Root Node */
-
-	if (start_node == ACPI_ROOT_OBJECT) {
-		start_node = acpi_gbl_root_node;
-	}
-
-	/* Null child means "get first node" */
-
-	parent_node = start_node;
-	child_node = NULL;
-	child_type = ACPI_TYPE_ANY;
-	level = 1;
-
-	/*
-	 * Traverse the tree of nodes until we bubble back up to where we
-	 * started. When Level is zero, the loop is done because we have
-	 * bubbled up to (and passed) the original parent handle (start_entry)
-	 */
-	while (level > 0) {
-
-		/* Get the next node in this scope.  Null if not found */
-
-		status = AE_OK;
-		child_node =
-		    acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
-					  child_node);
-		if (child_node) {
-
-			/* Found next child, get the type if we are not searching for ANY */
-
-			if (type != ACPI_TYPE_ANY) {
-				child_type = child_node->type;
-			}
-
-			/*
-			 * Ignore all temporary namespace nodes (created during control
-			 * method execution) unless told otherwise. These temporary nodes
-			 * can cause a race condition because they can be deleted during the
-			 * execution of the user function (if the namespace is unlocked before
-			 * invocation of the user function.) Only the debugger namespace dump
-			 * will examine the temporary nodes.
-			 */
-			if ((child_node->flags & ANOBJ_TEMPORARY) &&
-			    !(flags & ACPI_NS_WALK_TEMP_NODES)) {
-				status = AE_CTRL_DEPTH;
-			}
-
-			/* Type must match requested type */
-
-			else if (child_type == type) {
-				/*
-				 * Found a matching node, invoke the user callback function.
-				 * Unlock the namespace if flag is set.
-				 */
-				if (flags & ACPI_NS_WALK_UNLOCK) {
-					mutex_status =
-					    acpi_ut_release_mutex
-					    (ACPI_MTX_NAMESPACE);
-					if (ACPI_FAILURE(mutex_status)) {
-						return_ACPI_STATUS
-						    (mutex_status);
-					}
-				}
-
-				status =
-				    user_function(child_node, level, context,
-						  return_value);
-
-				if (flags & ACPI_NS_WALK_UNLOCK) {
-					mutex_status =
-					    acpi_ut_acquire_mutex
-					    (ACPI_MTX_NAMESPACE);
-					if (ACPI_FAILURE(mutex_status)) {
-						return_ACPI_STATUS
-						    (mutex_status);
-					}
-				}
-
-				switch (status) {
-				case AE_OK:
-				case AE_CTRL_DEPTH:
-
-					/* Just keep going */
-					break;
-
-				case AE_CTRL_TERMINATE:
-
-					/* Exit now, with OK status */
-
-					return_ACPI_STATUS(AE_OK);
-
-				default:
-
-					/* All others are valid exceptions */
-
-					return_ACPI_STATUS(status);
-				}
-			}
-
-			/*
-			 * Depth first search: Attempt to go down another level in the
-			 * namespace if we are allowed to.  Don't go any further if we have
-			 * reached the caller specified maximum depth or if the user
-			 * function has specified that the maximum depth has been reached.
-			 */
-			if ((level < max_depth) && (status != AE_CTRL_DEPTH)) {
-				if (acpi_ns_get_next_node
-				    (ACPI_TYPE_ANY, child_node, NULL)) {
-
-					/* There is at least one child of this node, visit it */
-
-					level++;
-					parent_node = child_node;
-					child_node = NULL;
-				}
-			}
-		} else {
-			/*
-			 * No more children of this node (acpi_ns_get_next_node failed), go
-			 * back upwards in the namespace tree to the node's parent.
-			 */
-			level--;
-			child_node = parent_node;
-			parent_node = acpi_ns_get_parent_node(parent_node);
-		}
-	}
-
-	/* Complete walk, not terminated by user function */
-
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
deleted file mode 100644
index a085cc3..0000000
--- a/drivers/acpi/namespace/nsxfeval.c
+++ /dev/null
@@ -1,811 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: nsxfeval - Public interfaces to the ACPI subsystem
- *                         ACPI Object evaluation interfaces
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsxfeval")
-
-/* Local prototypes */
-static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
-
-#ifdef ACPI_FUTURE_USAGE
-/*******************************************************************************
- *
- * FUNCTION:    acpi_evaluate_object_typed
- *
- * PARAMETERS:  Handle              - Object handle (optional)
- *              Pathname            - Object pathname (optional)
- *              external_params     - List of parameters to pass to method,
- *                                    terminated by NULL.  May be NULL
- *                                    if no parameters are being passed.
- *              return_buffer       - Where to put method's return value (if
- *                                    any).  If NULL, no value is returned.
- *              return_type         - Expected type of return object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Find and evaluate the given object, passing the given
- *              parameters if necessary.  One of "Handle" or "Pathname" must
- *              be valid (non-null)
- *
- ******************************************************************************/
-
-acpi_status
-acpi_evaluate_object_typed(acpi_handle handle,
-			   acpi_string pathname,
-			   struct acpi_object_list *external_params,
-			   struct acpi_buffer *return_buffer,
-			   acpi_object_type return_type)
-{
-	acpi_status status;
-	u8 must_free = FALSE;
-
-	ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed);
-
-	/* Return buffer must be valid */
-
-	if (!return_buffer) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (return_buffer->length == ACPI_ALLOCATE_BUFFER) {
-		must_free = TRUE;
-	}
-
-	/* Evaluate the object */
-
-	status =
-	    acpi_evaluate_object(handle, pathname, external_params,
-				 return_buffer);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Type ANY means "don't care" */
-
-	if (return_type == ACPI_TYPE_ANY) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	if (return_buffer->length == 0) {
-
-		/* Error because caller specifically asked for a return value */
-
-		ACPI_ERROR((AE_INFO, "No return value"));
-		return_ACPI_STATUS(AE_NULL_OBJECT);
-	}
-
-	/* Examine the object type returned from evaluate_object */
-
-	if (((union acpi_object *)return_buffer->pointer)->type == return_type) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Return object type does not match requested type */
-
-	ACPI_ERROR((AE_INFO,
-		    "Incorrect return type [%s] requested [%s]",
-		    acpi_ut_get_type_name(((union acpi_object *)return_buffer->
-					   pointer)->type),
-		    acpi_ut_get_type_name(return_type)));
-
-	if (must_free) {
-
-		/* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
-
-		ACPI_FREE(return_buffer->pointer);
-		return_buffer->pointer = NULL;
-	}
-
-	return_buffer->length = 0;
-	return_ACPI_STATUS(AE_TYPE);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
-#endif				/*  ACPI_FUTURE_USAGE  */
-/*******************************************************************************
- *
- * FUNCTION:    acpi_evaluate_object
- *
- * PARAMETERS:  Handle              - Object handle (optional)
- *              Pathname            - Object pathname (optional)
- *              external_params     - List of parameters to pass to method,
- *                                    terminated by NULL.  May be NULL
- *                                    if no parameters are being passed.
- *              return_buffer       - Where to put method's return value (if
- *                                    any).  If NULL, no value is returned.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Find and evaluate the given object, passing the given
- *              parameters if necessary.  One of "Handle" or "Pathname" must
- *              be valid (non-null)
- *
- ******************************************************************************/
-acpi_status
-acpi_evaluate_object(acpi_handle handle,
-		     acpi_string pathname,
-		     struct acpi_object_list *external_params,
-		     struct acpi_buffer *return_buffer)
-{
-	acpi_status status;
-	struct acpi_evaluate_info *info;
-	acpi_size buffer_space_needed;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(acpi_evaluate_object);
-
-	/* Allocate and initialize the evaluation information block */
-
-	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
-	if (!info) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	info->pathname = pathname;
-
-	/* Convert and validate the device handle */
-
-	info->prefix_node = acpi_ns_map_handle_to_node(handle);
-	if (!info->prefix_node) {
-		status = AE_BAD_PARAMETER;
-		goto cleanup;
-	}
-
-	/*
-	 * If there are parameters to be passed to a control method, the external
-	 * objects must all be converted to internal objects
-	 */
-	if (external_params && external_params->count) {
-		/*
-		 * Allocate a new parameter block for the internal objects
-		 * Add 1 to count to allow for null terminated internal list
-		 */
-		info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)
-							 external_params->
-							 count +
-							 1) * sizeof(void *));
-		if (!info->parameters) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		/* Convert each external object in the list to an internal object */
-
-		for (i = 0; i < external_params->count; i++) {
-			status =
-			    acpi_ut_copy_eobject_to_iobject(&external_params->
-							    pointer[i],
-							    &info->
-							    parameters[i]);
-			if (ACPI_FAILURE(status)) {
-				goto cleanup;
-			}
-		}
-		info->parameters[external_params->count] = NULL;
-	}
-
-	/*
-	 * Three major cases:
-	 * 1) Fully qualified pathname
-	 * 2) No handle, not fully qualified pathname (error)
-	 * 3) Valid handle
-	 */
-	if ((pathname) && (acpi_ns_valid_root_prefix(pathname[0]))) {
-
-		/* The path is fully qualified, just evaluate by name */
-
-		info->prefix_node = NULL;
-		status = acpi_ns_evaluate(info);
-	} else if (!handle) {
-		/*
-		 * A handle is optional iff a fully qualified pathname is specified.
-		 * Since we've already handled fully qualified names above, this is
-		 * an error
-		 */
-		if (!pathname) {
-			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-					  "Both Handle and Pathname are NULL"));
-		} else {
-			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-					  "Null Handle with relative pathname [%s]",
-					  pathname));
-		}
-
-		status = AE_BAD_PARAMETER;
-	} else {
-		/* We have a namespace a node and a possible relative path */
-
-		status = acpi_ns_evaluate(info);
-	}
-
-	/*
-	 * If we are expecting a return value, and all went well above,
-	 * copy the return value to an external object.
-	 */
-	if (return_buffer) {
-		if (!info->return_object) {
-			return_buffer->length = 0;
-		} else {
-			if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
-			    ACPI_DESC_TYPE_NAMED) {
-				/*
-				 * If we received a NS Node as a return object, this means that
-				 * the object we are evaluating has nothing interesting to
-				 * return (such as a mutex, etc.)  We return an error because
-				 * these types are essentially unsupported by this interface.
-				 * We don't check up front because this makes it easier to add
-				 * support for various types at a later date if necessary.
-				 */
-				status = AE_TYPE;
-				info->return_object = NULL;	/* No need to delete a NS Node */
-				return_buffer->length = 0;
-			}
-
-			if (ACPI_SUCCESS(status)) {
-
-				/* Dereference Index and ref_of references */
-
-				acpi_ns_resolve_references(info);
-
-				/* Get the size of the returned object */
-
-				status =
-				    acpi_ut_get_object_size(info->return_object,
-							    &buffer_space_needed);
-				if (ACPI_SUCCESS(status)) {
-
-					/* Validate/Allocate/Clear caller buffer */
-
-					status =
-					    acpi_ut_initialize_buffer
-					    (return_buffer,
-					     buffer_space_needed);
-					if (ACPI_FAILURE(status)) {
-						/*
-						 * Caller's buffer is too small or a new one can't
-						 * be allocated
-						 */
-						ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-								  "Needed buffer size %X, %s\n",
-								  (u32)
-								  buffer_space_needed,
-								  acpi_format_exception
-								  (status)));
-					} else {
-						/* We have enough space for the object, build it */
-
-						status =
-						    acpi_ut_copy_iobject_to_eobject
-						    (info->return_object,
-						     return_buffer);
-					}
-				}
-			}
-		}
-	}
-
-	if (info->return_object) {
-		/*
-		 * Delete the internal return object. NOTE: Interpreter must be
-		 * locked to avoid race condition.
-		 */
-		acpi_ex_enter_interpreter();
-
-		/* Remove one reference on the return object (should delete it) */
-
-		acpi_ut_remove_reference(info->return_object);
-		acpi_ex_exit_interpreter();
-	}
-
-      cleanup:
-
-	/* Free the input parameter list (if we created one) */
-
-	if (info->parameters) {
-
-		/* Free the allocated parameter block */
-
-		acpi_ut_delete_internal_object_list(info->parameters);
-	}
-
-	ACPI_FREE(info);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_resolve_references
- *
- * PARAMETERS:  Info                    - Evaluation info block
- *
- * RETURN:      Info->return_object is replaced with the dereferenced object
- *
- * DESCRIPTION: Dereference certain reference objects. Called before an
- *              internal return object is converted to an external union acpi_object.
- *
- * Performs an automatic dereference of Index and ref_of reference objects.
- * These reference objects are not supported by the union acpi_object, so this is a
- * last resort effort to return something useful. Also, provides compatibility
- * with other ACPI implementations.
- *
- * NOTE: does not handle references within returned package objects or nested
- * references, but this support could be added later if found to be necessary.
- *
- ******************************************************************************/
-static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
-{
-	union acpi_operand_object *obj_desc = NULL;
-	struct acpi_namespace_node *node;
-
-	/* We are interested in reference objects only */
-
-	if (ACPI_GET_OBJECT_TYPE(info->return_object) !=
-	    ACPI_TYPE_LOCAL_REFERENCE) {
-		return;
-	}
-
-	/*
-	 * Two types of references are supported - those created by Index and
-	 * ref_of operators. A name reference (AML_NAMEPATH_OP) can be converted
-	 * to an union acpi_object, so it is not dereferenced here. A ddb_handle
-	 * (AML_LOAD_OP) cannot be dereferenced, nor can it be converted to
-	 * an union acpi_object.
-	 */
-	switch (info->return_object->reference.class) {
-	case ACPI_REFCLASS_INDEX:
-
-		obj_desc = *(info->return_object->reference.where);
-		break;
-
-	case ACPI_REFCLASS_REFOF:
-
-		node = info->return_object->reference.object;
-		if (node) {
-			obj_desc = node->object;
-		}
-		break;
-
-	default:
-		return;
-	}
-
-	/* Replace the existing reference object */
-
-	if (obj_desc) {
-		acpi_ut_add_reference(obj_desc);
-		acpi_ut_remove_reference(info->return_object);
-		info->return_object = obj_desc;
-	}
-
-	return;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_walk_namespace
- *
- * PARAMETERS:  Type                - acpi_object_type to search for
- *              start_object        - Handle in namespace where search begins
- *              max_depth           - Depth to which search is to reach
- *              user_function       - Called when an object of "Type" is found
- *              Context             - Passed to user function
- *              return_value        - Location where return value of
- *                                    user_function is put if terminated early
- *
- * RETURNS      Return value from the user_function if terminated early.
- *              Otherwise, returns NULL.
- *
- * DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
- *              starting (and ending) at the object specified by start_handle.
- *              The user_function is called whenever an object that matches
- *              the type parameter is found.  If the user function returns
- *              a non-zero value, the search is terminated immediately and this
- *              value is returned to the caller.
- *
- *              The point of this procedure is to provide a generic namespace
- *              walk routine that can be called from multiple places to
- *              provide multiple services;  the User Function can be tailored
- *              to each task, whether it is a print function, a compare
- *              function, etc.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_walk_namespace(acpi_object_type type,
-		    acpi_handle start_object,
-		    u32 max_depth,
-		    acpi_walk_callback user_function,
-		    void *context, void **return_value)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_walk_namespace);
-
-	/* Parameter validation */
-
-	if ((type > ACPI_TYPE_LOCAL_MAX) || (!max_depth) || (!user_function)) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Lock the namespace around the walk.
-	 * The namespace will be unlocked/locked around each call
-	 * to the user function - since this function
-	 * must be allowed to make Acpi calls itself.
-	 */
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_ns_walk_namespace(type, start_object, max_depth,
-					ACPI_NS_WALK_UNLOCK,
-					user_function, context, return_value);
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_walk_namespace)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_get_device_callback
- *
- * PARAMETERS:  Callback from acpi_get_device
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Takes callbacks from walk_namespace and filters out all non-
- *              present devices, or if they specified a HID, it filters based
- *              on that.
- *
- ******************************************************************************/
-static acpi_status
-acpi_ns_get_device_callback(acpi_handle obj_handle,
-			    u32 nesting_level,
-			    void *context, void **return_value)
-{
-	struct acpi_get_devices_info *info = context;
-	acpi_status status;
-	struct acpi_namespace_node *node;
-	u32 flags;
-	struct acpica_device_id hid;
-	struct acpi_compatible_id_list *cid;
-	u32 i;
-	int found;
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	node = acpi_ns_map_handle_to_node(obj_handle);
-	status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	if (!node) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* Run _STA to determine if device is present */
-
-	status = acpi_ut_execute_STA(node, &flags);
-	if (ACPI_FAILURE(status)) {
-		return (AE_CTRL_DEPTH);
-	}
-
-	if (!(flags & ACPI_STA_DEVICE_PRESENT) &&
-	    !(flags & ACPI_STA_DEVICE_FUNCTIONING)) {
-		/*
-		 * Don't examine the children of the device only when the
-		 * device is neither present nor functional. See ACPI spec,
-		 * description of _STA for more information.
-		 */
-		return (AE_CTRL_DEPTH);
-	}
-
-	/* Filter based on device HID & CID */
-
-	if (info->hid != NULL) {
-		status = acpi_ut_execute_HID(node, &hid);
-		if (status == AE_NOT_FOUND) {
-			return (AE_OK);
-		} else if (ACPI_FAILURE(status)) {
-			return (AE_CTRL_DEPTH);
-		}
-
-		if (ACPI_STRNCMP(hid.value, info->hid, sizeof(hid.value)) != 0) {
-
-			/* Get the list of Compatible IDs */
-
-			status = acpi_ut_execute_CID(node, &cid);
-			if (status == AE_NOT_FOUND) {
-				return (AE_OK);
-			} else if (ACPI_FAILURE(status)) {
-				return (AE_CTRL_DEPTH);
-			}
-
-			/* Walk the CID list */
-
-			found = 0;
-			for (i = 0; i < cid->count; i++) {
-				if (ACPI_STRNCMP(cid->id[i].value, info->hid,
-						 sizeof(struct
-							acpi_compatible_id)) ==
-				    0) {
-					found = 1;
-					break;
-				}
-			}
-			ACPI_FREE(cid);
-			if (!found)
-				return (AE_OK);
-		}
-	}
-
-	status = info->user_function(obj_handle, nesting_level, info->context,
-				     return_value);
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_devices
- *
- * PARAMETERS:  HID                 - HID to search for. Can be NULL.
- *              user_function       - Called when a matching object is found
- *              Context             - Passed to user function
- *              return_value        - Location where return value of
- *                                    user_function is put if terminated early
- *
- * RETURNS      Return value from the user_function if terminated early.
- *              Otherwise, returns NULL.
- *
- * DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
- *              starting (and ending) at the object specified by start_handle.
- *              The user_function is called whenever an object of type
- *              Device is found.  If the user function returns
- *              a non-zero value, the search is terminated immediately and this
- *              value is returned to the caller.
- *
- *              This is a wrapper for walk_namespace, but the callback performs
- *              additional filtering. Please see acpi_ns_get_device_callback.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_get_devices(const char *HID,
-		 acpi_walk_callback user_function,
-		 void *context, void **return_value)
-{
-	acpi_status status;
-	struct acpi_get_devices_info info;
-
-	ACPI_FUNCTION_TRACE(acpi_get_devices);
-
-	/* Parameter validation */
-
-	if (!user_function) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * We're going to call their callback from OUR callback, so we need
-	 * to know what it is, and their context parameter.
-	 */
-	info.hid = HID;
-	info.context = context;
-	info.user_function = user_function;
-
-	/*
-	 * Lock the namespace around the walk.
-	 * The namespace will be unlocked/locked around each call
-	 * to the user function - since this function
-	 * must be allowed to make Acpi calls itself.
-	 */
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
-					ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
-					acpi_ns_get_device_callback, &info,
-					return_value);
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_devices)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_attach_data
- *
- * PARAMETERS:  obj_handle          - Namespace node
- *              Handler             - Handler for this attachment
- *              Data                - Pointer to data to be attached
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Attach arbitrary data and handler to a namespace node.
- *
- ******************************************************************************/
-acpi_status
-acpi_attach_data(acpi_handle obj_handle,
-		 acpi_object_handler handler, void *data)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	/* Parameter validation */
-
-	if (!obj_handle || !handler || !data) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* Convert and validate the handle */
-
-	node = acpi_ns_map_handle_to_node(obj_handle);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	status = acpi_ns_attach_data(node, handler, data);
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_attach_data)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_detach_data
- *
- * PARAMETERS:  obj_handle          - Namespace node handle
- *              Handler             - Handler used in call to acpi_attach_data
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove data that was previously attached to a node.
- *
- ******************************************************************************/
-acpi_status
-acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	/* Parameter validation */
-
-	if (!obj_handle || !handler) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* Convert and validate the handle */
-
-	node = acpi_ns_map_handle_to_node(obj_handle);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	status = acpi_ns_detach_data(node, handler);
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_detach_data)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_data
- *
- * PARAMETERS:  obj_handle          - Namespace node
- *              Handler             - Handler used in call to attach_data
- *              Data                - Where the data is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Retrieve data that was previously attached to a namespace node.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	/* Parameter validation */
-
-	if (!obj_handle || !handler || !data) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* Convert and validate the handle */
-
-	node = acpi_ns_map_handle_to_node(obj_handle);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	status = acpi_ns_get_attached_data(node, handler, data);
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_data)
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/namespace/nsxfname.c
deleted file mode 100644
index 5efa4e7..0000000
--- a/drivers/acpi/namespace/nsxfname.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/******************************************************************************
- *
- * Module Name: nsxfname - Public interfaces to the ACPI subsystem
- *                         ACPI Namespace oriented interfaces
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsxfname")
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_get_handle
- *
- * PARAMETERS:  Parent          - Object to search under (search scope).
- *              Pathname        - Pointer to an asciiz string containing the
- *                                name
- *              ret_handle      - Where the return handle is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This routine will search for a caller specified name in the
- *              name space.  The caller can restrict the search region by
- *              specifying a non NULL parent.  The parent value is itself a
- *              namespace handle.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_handle(acpi_handle parent,
-		acpi_string pathname, acpi_handle * ret_handle)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node = NULL;
-	struct acpi_namespace_node *prefix_node = NULL;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Parameter Validation */
-
-	if (!ret_handle || !pathname) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* Convert a parent handle to a prefix node */
-
-	if (parent) {
-		prefix_node = acpi_ns_map_handle_to_node(parent);
-		if (!prefix_node) {
-			return (AE_BAD_PARAMETER);
-		}
-	}
-
-	/*
-	 * Valid cases are:
-	 * 1) Fully qualified pathname
-	 * 2) Parent + Relative pathname
-	 *
-	 * Error for <null Parent + relative path>
-	 */
-	if (acpi_ns_valid_root_prefix(pathname[0])) {
-
-		/* Pathname is fully qualified (starts with '\') */
-
-		/* Special case for root-only, since we can't search for it */
-
-		if (!ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH)) {
-			*ret_handle =
-			    acpi_ns_convert_entry_to_handle(acpi_gbl_root_node);
-			return (AE_OK);
-		}
-	} else if (!prefix_node) {
-
-		/* Relative path with null prefix is disallowed */
-
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* Find the Node and convert to a handle */
-
-	status =
-	    acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH, &node);
-	if (ACPI_SUCCESS(status)) {
-		*ret_handle = acpi_ns_convert_entry_to_handle(node);
-	}
-
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_handle)
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_get_name
- *
- * PARAMETERS:  Handle          - Handle to be converted to a pathname
- *              name_type       - Full pathname or single segment
- *              Buffer          - Buffer for returned path
- *
- * RETURN:      Pointer to a string containing the fully qualified Name.
- *
- * DESCRIPTION: This routine returns the fully qualified name associated with
- *              the Handle parameter.  This and the acpi_pathname_to_handle are
- *              complementary functions.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-
-	/* Parameter validation */
-
-	if (name_type > ACPI_NAME_TYPE_MAX) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_validate_buffer(buffer);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	if (name_type == ACPI_FULL_PATHNAME) {
-
-		/* Get the full pathname (From the namespace root) */
-
-		status = acpi_ns_handle_to_pathname(handle, buffer);
-		return (status);
-	}
-
-	/*
-	 * Wants the single segment ACPI name.
-	 * Validate handle and convert to a namespace Node
-	 */
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	node = acpi_ns_map_handle_to_node(handle);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Validate/Allocate/Clear caller buffer */
-
-	status = acpi_ut_initialize_buffer(buffer, ACPI_PATH_SEGMENT_LENGTH);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	/* Just copy the ACPI name from the Node and zero terminate it */
-
-	ACPI_STRNCPY(buffer->pointer, acpi_ut_get_node_name(node),
-		     ACPI_NAME_SIZE);
-	((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
-	status = AE_OK;
-
-      unlock_and_exit:
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_name)
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_get_object_info
- *
- * PARAMETERS:  Handle          - Object Handle
- *              Buffer          - Where the info is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Returns information about an object as gleaned from the
- *              namespace node and possibly by running several standard
- *              control methods (Such as in the case of a device.)
- *
- ******************************************************************************/
-acpi_status
-acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-	struct acpi_device_info *info;
-	struct acpi_device_info *return_info;
-	struct acpi_compatible_id_list *cid_list = NULL;
-	acpi_size size;
-
-	/* Parameter validation */
-
-	if (!handle || !buffer) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_validate_buffer(buffer);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_device_info));
-	if (!info) {
-		return (AE_NO_MEMORY);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	node = acpi_ns_map_handle_to_node(handle);
-	if (!node) {
-		(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-		status = AE_BAD_PARAMETER;
-		goto cleanup;
-	}
-
-	/* Init return structure */
-
-	size = sizeof(struct acpi_device_info);
-
-	info->type = node->type;
-	info->name = node->name.integer;
-	info->valid = 0;
-
-	if (node->type == ACPI_TYPE_METHOD) {
-		info->param_count = node->object->method.param_count;
-	}
-
-	status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/* If not a device, we are all done */
-
-	if (info->type == ACPI_TYPE_DEVICE) {
-		/*
-		 * Get extra info for ACPI Devices objects only:
-		 * Run the Device _HID, _UID, _CID, _STA, _ADR and _sx_d methods.
-		 *
-		 * Note: none of these methods are required, so they may or may
-		 * not be present for this device.  The Info->Valid bitfield is used
-		 * to indicate which methods were found and ran successfully.
-		 */
-
-		/* Execute the Device._HID method */
-
-		status = acpi_ut_execute_HID(node, &info->hardware_id);
-		if (ACPI_SUCCESS(status)) {
-			info->valid |= ACPI_VALID_HID;
-		}
-
-		/* Execute the Device._UID method */
-
-		status = acpi_ut_execute_UID(node, &info->unique_id);
-		if (ACPI_SUCCESS(status)) {
-			info->valid |= ACPI_VALID_UID;
-		}
-
-		/* Execute the Device._CID method */
-
-		status = acpi_ut_execute_CID(node, &cid_list);
-		if (ACPI_SUCCESS(status)) {
-			size += cid_list->size;
-			info->valid |= ACPI_VALID_CID;
-		}
-
-		/* Execute the Device._STA method */
-
-		status = acpi_ut_execute_STA(node, &info->current_status);
-		if (ACPI_SUCCESS(status)) {
-			info->valid |= ACPI_VALID_STA;
-		}
-
-		/* Execute the Device._ADR method */
-
-		status = acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, node,
-							 &info->address);
-		if (ACPI_SUCCESS(status)) {
-			info->valid |= ACPI_VALID_ADR;
-		}
-
-		/* Execute the Device._sx_d methods */
-
-		status = acpi_ut_execute_sxds(node, info->highest_dstates);
-		if (ACPI_SUCCESS(status)) {
-			info->valid |= ACPI_VALID_SXDS;
-		}
-	}
-
-	/* Validate/Allocate/Clear caller buffer */
-
-	status = acpi_ut_initialize_buffer(buffer, size);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/* Populate the return buffer */
-
-	return_info = buffer->pointer;
-	ACPI_MEMCPY(return_info, info, sizeof(struct acpi_device_info));
-
-	if (cid_list) {
-		ACPI_MEMCPY(&return_info->compatibility_id, cid_list,
-			    cid_list->size);
-	}
-
-      cleanup:
-	ACPI_FREE(info);
-	if (cid_list) {
-		ACPI_FREE(cid_list);
-	}
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_object_info)
diff --git a/drivers/acpi/namespace/nsxfobj.c b/drivers/acpi/namespace/nsxfobj.c
deleted file mode 100644
index 2b375ee..0000000
--- a/drivers/acpi/namespace/nsxfobj.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: nsxfobj - Public interfaces to the ACPI subsystem
- *                         ACPI Object oriented interfaces
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_NAMESPACE
-ACPI_MODULE_NAME("nsxfobj")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_id
- *
- * PARAMETERS:  Handle          - Handle of object whose id is desired
- *              ret_id          - Where the id will be placed
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This routine returns the owner id associated with a handle
- *
- ******************************************************************************/
-acpi_status acpi_get_id(acpi_handle handle, acpi_owner_id * ret_id)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	/* Parameter Validation */
-
-	if (!ret_id) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* Convert and validate the handle */
-
-	node = acpi_ns_map_handle_to_node(handle);
-	if (!node) {
-		(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-		return (AE_BAD_PARAMETER);
-	}
-
-	*ret_id = node->owner_id;
-
-	status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_id)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_type
- *
- * PARAMETERS:  Handle          - Handle of object whose type is desired
- *              ret_type        - Where the type will be placed
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This routine returns the type associatd with a particular handle
- *
- ******************************************************************************/
-acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	/* Parameter Validation */
-
-	if (!ret_type) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Special case for the predefined Root Node
-	 * (return type ANY)
-	 */
-	if (handle == ACPI_ROOT_OBJECT) {
-		*ret_type = ACPI_TYPE_ANY;
-		return (AE_OK);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* Convert and validate the handle */
-
-	node = acpi_ns_map_handle_to_node(handle);
-	if (!node) {
-		(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-		return (AE_BAD_PARAMETER);
-	}
-
-	*ret_type = node->type;
-
-	status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_type)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_parent
- *
- * PARAMETERS:  Handle          - Handle of object whose parent is desired
- *              ret_handle      - Where the parent handle will be placed
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Returns a handle to the parent of the object represented by
- *              Handle.
- *
- ******************************************************************************/
-acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
-{
-	struct acpi_namespace_node *node;
-	acpi_status status;
-
-	if (!ret_handle) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* Special case for the predefined Root Node (no parent) */
-
-	if (handle == ACPI_ROOT_OBJECT) {
-		return (AE_NULL_ENTRY);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* Convert and validate the handle */
-
-	node = acpi_ns_map_handle_to_node(handle);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Get the parent entry */
-
-	*ret_handle =
-	    acpi_ns_convert_entry_to_handle(acpi_ns_get_parent_node(node));
-
-	/* Return exception if parent is null */
-
-	if (!acpi_ns_get_parent_node(node)) {
-		status = AE_NULL_ENTRY;
-	}
-
-      unlock_and_exit:
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_parent)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_next_object
- *
- * PARAMETERS:  Type            - Type of object to be searched for
- *              Parent          - Parent object whose children we are getting
- *              last_child      - Previous child that was found.
- *                                The NEXT child will be returned
- *              ret_handle      - Where handle to the next object is placed
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Return the next peer object within the namespace.  If Handle is
- *              valid, Scope is ignored.  Otherwise, the first object within
- *              Scope is returned.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_next_object(acpi_object_type type,
-		     acpi_handle parent,
-		     acpi_handle child, acpi_handle * ret_handle)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-	struct acpi_namespace_node *parent_node = NULL;
-	struct acpi_namespace_node *child_node = NULL;
-
-	/* Parameter validation */
-
-	if (type > ACPI_TYPE_EXTERNAL_MAX) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* If null handle, use the parent */
-
-	if (!child) {
-
-		/* Start search at the beginning of the specified scope */
-
-		parent_node = acpi_ns_map_handle_to_node(parent);
-		if (!parent_node) {
-			status = AE_BAD_PARAMETER;
-			goto unlock_and_exit;
-		}
-	} else {
-		/* Non-null handle, ignore the parent */
-		/* Convert and validate the handle */
-
-		child_node = acpi_ns_map_handle_to_node(child);
-		if (!child_node) {
-			status = AE_BAD_PARAMETER;
-			goto unlock_and_exit;
-		}
-	}
-
-	/* Internal function does the real work */
-
-	node = acpi_ns_get_next_node(type, parent_node, child_node);
-	if (!node) {
-		status = AE_NOT_FOUND;
-		goto unlock_and_exit;
-	}
-
-	if (ret_handle) {
-		*ret_handle = acpi_ns_convert_entry_to_handle(node);
-	}
-
-      unlock_and_exit:
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_next_object)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 25ceae91..c5e292a 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -29,7 +29,6 @@
 #include <linux/errno.h>
 #include <linux/acpi.h>
 #include <acpi/acpi_bus.h>
-#include <acpi/acmacros.h>
 
 #define ACPI_NUMA	0x80000000
 #define _COMPONENT	ACPI_NUMA
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c811142..6729a49 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -726,7 +726,7 @@
 
 	dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
 	if (!dpc)
-		return_ACPI_STATUS(AE_NO_MEMORY);
+		return AE_NO_MEMORY;
 
 	dpc->function = function;
 	dpc->context = context;
@@ -747,7 +747,7 @@
 		status = AE_ERROR;
 		kfree(dpc);
 	}
-	return_ACPI_STATUS(status);
+	return status;
 }
 
 acpi_status acpi_os_execute(acpi_execute_type type,
diff --git a/drivers/acpi/parser/Makefile b/drivers/acpi/parser/Makefile
deleted file mode 100644
index db24ee0..0000000
--- a/drivers/acpi/parser/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for all Linux ACPI interpreter subdirectories
-#
-
-obj-y := psargs.o    psparse.o  psloop.o pstree.o   pswalk.o  \
-	 psopcode.o  psscope.o  psutils.o  psxface.o
-
-EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c
deleted file mode 100644
index d830b29..0000000
--- a/drivers/acpi/parser/psargs.c
+++ /dev/null
@@ -1,751 +0,0 @@
-/******************************************************************************
- *
- * Module Name: psargs - Parse AML opcode arguments
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/amlcode.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acdispat.h>
-
-#define _COMPONENT          ACPI_PARSER
-ACPI_MODULE_NAME("psargs")
-
-/* Local prototypes */
-static u32
-acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state);
-
-static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
-						       *parser_state);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_next_package_length
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *
- * RETURN:      Decoded package length. On completion, the AML pointer points
- *              past the length byte or bytes.
- *
- * DESCRIPTION: Decode and return a package length field.
- *              Note: Largest package length is 28 bits, from ACPI specification
- *
- ******************************************************************************/
-
-static u32
-acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
-{
-	u8 *aml = parser_state->aml;
-	u32 package_length = 0;
-	u32 byte_count;
-	u8 byte_zero_mask = 0x3F;	/* Default [0:5] */
-
-	ACPI_FUNCTION_TRACE(ps_get_next_package_length);
-
-	/*
-	 * Byte 0 bits [6:7] contain the number of additional bytes
-	 * used to encode the package length, either 0,1,2, or 3
-	 */
-	byte_count = (aml[0] >> 6);
-	parser_state->aml += ((acpi_size) byte_count + 1);
-
-	/* Get bytes 3, 2, 1 as needed */
-
-	while (byte_count) {
-		/*
-		 * Final bit positions for the package length bytes:
-		 *      Byte3->[20:27]
-		 *      Byte2->[12:19]
-		 *      Byte1->[04:11]
-		 *      Byte0->[00:03]
-		 */
-		package_length |= (aml[byte_count] << ((byte_count << 3) - 4));
-
-		byte_zero_mask = 0x0F;	/* Use bits [0:3] of byte 0 */
-		byte_count--;
-	}
-
-	/* Byte 0 is a special case, either bits [0:3] or [0:5] are used */
-
-	package_length |= (aml[0] & byte_zero_mask);
-	return_UINT32(package_length);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_next_package_end
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *
- * RETURN:      Pointer to end-of-package +1
- *
- * DESCRIPTION: Get next package length and return a pointer past the end of
- *              the package.  Consumes the package length field
- *
- ******************************************************************************/
-
-u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state)
-{
-	u8 *start = parser_state->aml;
-	u32 package_length;
-
-	ACPI_FUNCTION_TRACE(ps_get_next_package_end);
-
-	/* Function below updates parser_state->Aml */
-
-	package_length = acpi_ps_get_next_package_length(parser_state);
-
-	return_PTR(start + package_length);	/* end of package */
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_next_namestring
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *
- * RETURN:      Pointer to the start of the name string (pointer points into
- *              the AML.
- *
- * DESCRIPTION: Get next raw namestring within the AML stream.  Handles all name
- *              prefix characters.  Set parser state to point past the string.
- *              (Name is consumed from the AML.)
- *
- ******************************************************************************/
-
-char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
-{
-	u8 *start = parser_state->aml;
-	u8 *end = parser_state->aml;
-
-	ACPI_FUNCTION_TRACE(ps_get_next_namestring);
-
-	/* Point past any namestring prefix characters (backslash or carat) */
-
-	while (acpi_ps_is_prefix_char(*end)) {
-		end++;
-	}
-
-	/* Decode the path prefix character */
-
-	switch (*end) {
-	case 0:
-
-		/* null_name */
-
-		if (end == start) {
-			start = NULL;
-		}
-		end++;
-		break;
-
-	case AML_DUAL_NAME_PREFIX:
-
-		/* Two name segments */
-
-		end += 1 + (2 * ACPI_NAME_SIZE);
-		break;
-
-	case AML_MULTI_NAME_PREFIX_OP:
-
-		/* Multiple name segments, 4 chars each, count in next byte */
-
-		end += 2 + (*(end + 1) * ACPI_NAME_SIZE);
-		break;
-
-	default:
-
-		/* Single name segment */
-
-		end += ACPI_NAME_SIZE;
-		break;
-	}
-
-	parser_state->aml = end;
-	return_PTR((char *)start);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_next_namepath
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *              Arg                 - Where the namepath will be stored
- *              arg_count           - If the namepath points to a control method
- *                                    the method's argument is returned here.
- *              possible_method_call - Whether the namepath can possibly be the
- *                                    start of a method call
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get next name (if method call, return # of required args).
- *              Names are looked up in the internal namespace to determine
- *              if the name represents a control method.  If a method
- *              is found, the number of arguments to the method is returned.
- *              This information is critical for parsing to continue correctly.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
-			  struct acpi_parse_state *parser_state,
-			  union acpi_parse_object *arg, u8 possible_method_call)
-{
-	acpi_status status;
-	char *path;
-	union acpi_parse_object *name_op;
-	union acpi_operand_object *method_desc;
-	struct acpi_namespace_node *node;
-	u8 *start = parser_state->aml;
-
-	ACPI_FUNCTION_TRACE(ps_get_next_namepath);
-
-	path = acpi_ps_get_next_namestring(parser_state);
-	acpi_ps_init_op(arg, AML_INT_NAMEPATH_OP);
-
-	/* Null path case is allowed, just exit */
-
-	if (!path) {
-		arg->common.value.name = path;
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/*
-	 * Lookup the name in the internal namespace, starting with the current
-	 * scope. We don't want to add anything new to the namespace here,
-	 * however, so we use MODE_EXECUTE.
-	 * Allow searching of the parent tree, but don't open a new scope -
-	 * we just want to lookup the object (must be mode EXECUTE to perform
-	 * the upsearch)
-	 */
-	status = acpi_ns_lookup(walk_state->scope_info, path,
-				ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
-				ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE,
-				NULL, &node);
-
-	/*
-	 * If this name is a control method invocation, we must
-	 * setup the method call
-	 */
-	if (ACPI_SUCCESS(status) &&
-	    possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
-		if (walk_state->opcode == AML_UNLOAD_OP) {
-			/*
-			 * acpi_ps_get_next_namestring has increased the AML pointer,
-			 * so we need to restore the saved AML pointer for method call.
-			 */
-			walk_state->parser_state.aml = start;
-			walk_state->arg_count = 1;
-			acpi_ps_init_op(arg, AML_INT_METHODCALL_OP);
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		/* This name is actually a control method invocation */
-
-		method_desc = acpi_ns_get_attached_object(node);
-		ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-				  "Control Method - %p Desc %p Path=%p\n", node,
-				  method_desc, path));
-
-		name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
-		if (!name_op) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		/* Change Arg into a METHOD CALL and attach name to it */
-
-		acpi_ps_init_op(arg, AML_INT_METHODCALL_OP);
-		name_op->common.value.name = path;
-
-		/* Point METHODCALL/NAME to the METHOD Node */
-
-		name_op->common.node = node;
-		acpi_ps_append_arg(arg, name_op);
-
-		if (!method_desc) {
-			ACPI_ERROR((AE_INFO,
-				    "Control Method %p has no attached object",
-				    node));
-			return_ACPI_STATUS(AE_AML_INTERNAL);
-		}
-
-		ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-				  "Control Method - %p Args %X\n",
-				  node, method_desc->method.param_count));
-
-		/* Get the number of arguments to expect */
-
-		walk_state->arg_count = method_desc->method.param_count;
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/*
-	 * Special handling if the name was not found during the lookup -
-	 * some not_found cases are allowed
-	 */
-	if (status == AE_NOT_FOUND) {
-
-		/* 1) not_found is ok during load pass 1/2 (allow forward references) */
-
-		if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) !=
-		    ACPI_PARSE_EXECUTE) {
-			status = AE_OK;
-		}
-
-		/* 2) not_found during a cond_ref_of(x) is ok by definition */
-
-		else if (walk_state->op->common.aml_opcode ==
-			 AML_COND_REF_OF_OP) {
-			status = AE_OK;
-		}
-
-		/*
-		 * 3) not_found while building a Package is ok at this point, we
-		 * may flag as an error later if slack mode is not enabled.
-		 * (Some ASL code depends on allowing this behavior)
-		 */
-		else if ((arg->common.parent) &&
-			 ((arg->common.parent->common.aml_opcode ==
-			   AML_PACKAGE_OP)
-			  || (arg->common.parent->common.aml_opcode ==
-			      AML_VAR_PACKAGE_OP))) {
-			status = AE_OK;
-		}
-	}
-
-	/* Final exception check (may have been changed from code above) */
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_ERROR_NAMESPACE(path, status);
-
-		if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
-		    ACPI_PARSE_EXECUTE) {
-
-			/* Report a control method execution error */
-
-			status = acpi_ds_method_error(status, walk_state);
-		}
-	}
-
-	/* Save the namepath */
-
-	arg->common.value.name = path;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_next_simple_arg
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *              arg_type            - The argument type (AML_*_ARG)
- *              Arg                 - Where the argument is returned
- *
- * RETURN:      None
- *
- * DESCRIPTION: Get the next simple argument (constant, string, or namestring)
- *
- ******************************************************************************/
-
-void
-acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
-			    u32 arg_type, union acpi_parse_object *arg)
-{
-	u32 length;
-	u16 opcode;
-	u8 *aml = parser_state->aml;
-
-	ACPI_FUNCTION_TRACE_U32(ps_get_next_simple_arg, arg_type);
-
-	switch (arg_type) {
-	case ARGP_BYTEDATA:
-
-		/* Get 1 byte from the AML stream */
-
-		opcode = AML_BYTE_OP;
-		arg->common.value.integer = (acpi_integer) * aml;
-		length = 1;
-		break;
-
-	case ARGP_WORDDATA:
-
-		/* Get 2 bytes from the AML stream */
-
-		opcode = AML_WORD_OP;
-		ACPI_MOVE_16_TO_64(&arg->common.value.integer, aml);
-		length = 2;
-		break;
-
-	case ARGP_DWORDDATA:
-
-		/* Get 4 bytes from the AML stream */
-
-		opcode = AML_DWORD_OP;
-		ACPI_MOVE_32_TO_64(&arg->common.value.integer, aml);
-		length = 4;
-		break;
-
-	case ARGP_QWORDDATA:
-
-		/* Get 8 bytes from the AML stream */
-
-		opcode = AML_QWORD_OP;
-		ACPI_MOVE_64_TO_64(&arg->common.value.integer, aml);
-		length = 8;
-		break;
-
-	case ARGP_CHARLIST:
-
-		/* Get a pointer to the string, point past the string */
-
-		opcode = AML_STRING_OP;
-		arg->common.value.string = ACPI_CAST_PTR(char, aml);
-
-		/* Find the null terminator */
-
-		length = 0;
-		while (aml[length]) {
-			length++;
-		}
-		length++;
-		break;
-
-	case ARGP_NAME:
-	case ARGP_NAMESTRING:
-
-		acpi_ps_init_op(arg, AML_INT_NAMEPATH_OP);
-		arg->common.value.name =
-		    acpi_ps_get_next_namestring(parser_state);
-		return_VOID;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type));
-		return_VOID;
-	}
-
-	acpi_ps_init_op(arg, opcode);
-	parser_state->aml += length;
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_next_field
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *
- * RETURN:      A newly allocated FIELD op
- *
- * DESCRIPTION: Get next field (named_field, reserved_field, or access_field)
- *
- ******************************************************************************/
-
-static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
-						       *parser_state)
-{
-	u32 aml_offset = (u32)
-	    ACPI_PTR_DIFF(parser_state->aml,
-			  parser_state->aml_start);
-	union acpi_parse_object *field;
-	u16 opcode;
-	u32 name;
-
-	ACPI_FUNCTION_TRACE(ps_get_next_field);
-
-	/* Determine field type */
-
-	switch (ACPI_GET8(parser_state->aml)) {
-	default:
-
-		opcode = AML_INT_NAMEDFIELD_OP;
-		break;
-
-	case 0x00:
-
-		opcode = AML_INT_RESERVEDFIELD_OP;
-		parser_state->aml++;
-		break;
-
-	case 0x01:
-
-		opcode = AML_INT_ACCESSFIELD_OP;
-		parser_state->aml++;
-		break;
-	}
-
-	/* Allocate a new field op */
-
-	field = acpi_ps_alloc_op(opcode);
-	if (!field) {
-		return_PTR(NULL);
-	}
-
-	field->common.aml_offset = aml_offset;
-
-	/* Decode the field type */
-
-	switch (opcode) {
-	case AML_INT_NAMEDFIELD_OP:
-
-		/* Get the 4-character name */
-
-		ACPI_MOVE_32_TO_32(&name, parser_state->aml);
-		acpi_ps_set_name(field, name);
-		parser_state->aml += ACPI_NAME_SIZE;
-
-		/* Get the length which is encoded as a package length */
-
-		field->common.value.size =
-		    acpi_ps_get_next_package_length(parser_state);
-		break;
-
-	case AML_INT_RESERVEDFIELD_OP:
-
-		/* Get the length which is encoded as a package length */
-
-		field->common.value.size =
-		    acpi_ps_get_next_package_length(parser_state);
-		break;
-
-	case AML_INT_ACCESSFIELD_OP:
-
-		/*
-		 * Get access_type and access_attrib and merge into the field Op
-		 * access_type is first operand, access_attribute is second
-		 */
-		field->common.value.integer =
-		    (((u32) ACPI_GET8(parser_state->aml) << 8));
-		parser_state->aml++;
-		field->common.value.integer |= ACPI_GET8(parser_state->aml);
-		parser_state->aml++;
-		break;
-
-	default:
-
-		/* Opcode was set in previous switch */
-		break;
-	}
-
-	return_PTR(field);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_next_arg
- *
- * PARAMETERS:  walk_state          - Current state
- *              parser_state        - Current parser state object
- *              arg_type            - The argument type (AML_*_ARG)
- *              return_arg          - Where the next arg is returned
- *
- * RETURN:      Status, and an op object containing the next argument.
- *
- * DESCRIPTION: Get next argument (including complex list arguments that require
- *              pushing the parser stack)
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
-		     struct acpi_parse_state *parser_state,
-		     u32 arg_type, union acpi_parse_object **return_arg)
-{
-	union acpi_parse_object *arg = NULL;
-	union acpi_parse_object *prev = NULL;
-	union acpi_parse_object *field;
-	u32 subop;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_get_next_arg, parser_state);
-
-	switch (arg_type) {
-	case ARGP_BYTEDATA:
-	case ARGP_WORDDATA:
-	case ARGP_DWORDDATA:
-	case ARGP_CHARLIST:
-	case ARGP_NAME:
-	case ARGP_NAMESTRING:
-
-		/* Constants, strings, and namestrings are all the same size */
-
-		arg = acpi_ps_alloc_op(AML_BYTE_OP);
-		if (!arg) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-		acpi_ps_get_next_simple_arg(parser_state, arg_type, arg);
-		break;
-
-	case ARGP_PKGLENGTH:
-
-		/* Package length, nothing returned */
-
-		parser_state->pkg_end =
-		    acpi_ps_get_next_package_end(parser_state);
-		break;
-
-	case ARGP_FIELDLIST:
-
-		if (parser_state->aml < parser_state->pkg_end) {
-
-			/* Non-empty list */
-
-			while (parser_state->aml < parser_state->pkg_end) {
-				field = acpi_ps_get_next_field(parser_state);
-				if (!field) {
-					return_ACPI_STATUS(AE_NO_MEMORY);
-				}
-
-				if (prev) {
-					prev->common.next = field;
-				} else {
-					arg = field;
-				}
-				prev = field;
-			}
-
-			/* Skip to End of byte data */
-
-			parser_state->aml = parser_state->pkg_end;
-		}
-		break;
-
-	case ARGP_BYTELIST:
-
-		if (parser_state->aml < parser_state->pkg_end) {
-
-			/* Non-empty list */
-
-			arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
-			if (!arg) {
-				return_ACPI_STATUS(AE_NO_MEMORY);
-			}
-
-			/* Fill in bytelist data */
-
-			arg->common.value.size = (u32)
-			    ACPI_PTR_DIFF(parser_state->pkg_end,
-					  parser_state->aml);
-			arg->named.data = parser_state->aml;
-
-			/* Skip to End of byte data */
-
-			parser_state->aml = parser_state->pkg_end;
-		}
-		break;
-
-	case ARGP_TARGET:
-	case ARGP_SUPERNAME:
-	case ARGP_SIMPLENAME:
-
-		subop = acpi_ps_peek_opcode(parser_state);
-		if (subop == 0 ||
-		    acpi_ps_is_leading_char(subop) ||
-		    acpi_ps_is_prefix_char(subop)) {
-
-			/* null_name or name_string */
-
-			arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
-			if (!arg) {
-				return_ACPI_STATUS(AE_NO_MEMORY);
-			}
-
-			/* To support super_name arg of Unload */
-
-			if (walk_state->opcode == AML_UNLOAD_OP) {
-				status =
-				    acpi_ps_get_next_namepath(walk_state,
-							      parser_state, arg,
-							      1);
-
-				/*
-				 * If the super_name arg of Unload is a method call,
-				 * we have restored the AML pointer, just free this Arg
-				 */
-				if (arg->common.aml_opcode ==
-				    AML_INT_METHODCALL_OP) {
-					acpi_ps_free_op(arg);
-					arg = NULL;
-				}
-			} else {
-				status =
-				    acpi_ps_get_next_namepath(walk_state,
-							      parser_state, arg,
-							      0);
-			}
-		} else {
-			/* Single complex argument, nothing returned */
-
-			walk_state->arg_count = 1;
-		}
-		break;
-
-	case ARGP_DATAOBJ:
-	case ARGP_TERMARG:
-
-		/* Single complex argument, nothing returned */
-
-		walk_state->arg_count = 1;
-		break;
-
-	case ARGP_DATAOBJLIST:
-	case ARGP_TERMLIST:
-	case ARGP_OBJLIST:
-
-		if (parser_state->aml < parser_state->pkg_end) {
-
-			/* Non-empty list of variable arguments, nothing returned */
-
-			walk_state->arg_count = ACPI_VAR_ARGS;
-		}
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type));
-		status = AE_AML_OPERAND_TYPE;
-		break;
-	}
-
-	*return_arg = arg;
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/parser/psloop.c
deleted file mode 100644
index 4647039..0000000
--- a/drivers/acpi/parser/psloop.c
+++ /dev/null
@@ -1,1087 +0,0 @@
-/******************************************************************************
- *
- * Module Name: psloop - Main AML parse loop
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-/*
- * Parse the AML and build an operation tree as most interpreters, (such as
- * Perl) do. Parsing is done by hand rather than with a YACC generated parser
- * to tightly constrain stack and dynamic memory usage. Parsing is kept
- * flexible and the code fairly compact by parsing based on a list of AML
- * opcode templates in aml_op_info[].
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/acdispat.h>
-#include <acpi/amlcode.h>
-
-#define _COMPONENT          ACPI_PARSER
-ACPI_MODULE_NAME("psloop")
-
-static u32 acpi_gbl_depth = 0;
-
-/* Local prototypes */
-
-static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
-
-static acpi_status
-acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
-		       u8 * aml_op_start,
-		       union acpi_parse_object *unnamed_op,
-		       union acpi_parse_object **op);
-
-static acpi_status
-acpi_ps_create_op(struct acpi_walk_state *walk_state,
-		  u8 * aml_op_start, union acpi_parse_object **new_op);
-
-static acpi_status
-acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
-		      u8 * aml_op_start, union acpi_parse_object *op);
-
-static acpi_status
-acpi_ps_complete_op(struct acpi_walk_state *walk_state,
-		    union acpi_parse_object **op, acpi_status status);
-
-static acpi_status
-acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
-			  union acpi_parse_object *op, acpi_status status);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_aml_opcode
- *
- * PARAMETERS:  walk_state          - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Extract the next AML opcode from the input stream.
- *
- ******************************************************************************/
-
-static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
-{
-
-	ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
-
-	walk_state->aml_offset =
-	    (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
-				walk_state->parser_state.aml_start);
-	walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
-
-	/*
-	 * First cut to determine what we have found:
-	 * 1) A valid AML opcode
-	 * 2) A name string
-	 * 3) An unknown/invalid opcode
-	 */
-	walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
-
-	switch (walk_state->op_info->class) {
-	case AML_CLASS_ASCII:
-	case AML_CLASS_PREFIX:
-		/*
-		 * Starts with a valid prefix or ASCII char, this is a name
-		 * string. Convert the bare name string to a namepath.
-		 */
-		walk_state->opcode = AML_INT_NAMEPATH_OP;
-		walk_state->arg_types = ARGP_NAMESTRING;
-		break;
-
-	case AML_CLASS_UNKNOWN:
-
-		/* The opcode is unrecognized. Just skip unknown opcodes */
-
-		ACPI_ERROR((AE_INFO,
-			    "Found unknown opcode %X at AML address %p offset %X, ignoring",
-			    walk_state->opcode, walk_state->parser_state.aml,
-			    walk_state->aml_offset));
-
-		ACPI_DUMP_BUFFER(walk_state->parser_state.aml, 128);
-
-		/* Assume one-byte bad opcode */
-
-		walk_state->parser_state.aml++;
-		return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
-
-	default:
-
-		/* Found opcode info, this is a normal opcode */
-
-		walk_state->parser_state.aml +=
-		    acpi_ps_get_opcode_size(walk_state->opcode);
-		walk_state->arg_types = walk_state->op_info->parse_args;
-		break;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_build_named_op
- *
- * PARAMETERS:  walk_state          - Current state
- *              aml_op_start        - Begin of named Op in AML
- *              unnamed_op          - Early Op (not a named Op)
- *              Op                  - Returned Op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Parse a named Op
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
-		       u8 * aml_op_start,
-		       union acpi_parse_object *unnamed_op,
-		       union acpi_parse_object **op)
-{
-	acpi_status status = AE_OK;
-	union acpi_parse_object *arg = NULL;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
-
-	unnamed_op->common.value.arg = NULL;
-	unnamed_op->common.arg_list_length = 0;
-	unnamed_op->common.aml_opcode = walk_state->opcode;
-
-	/*
-	 * Get and append arguments until we find the node that contains
-	 * the name (the type ARGP_NAME).
-	 */
-	while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
-	       (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
-		status =
-		    acpi_ps_get_next_arg(walk_state,
-					 &(walk_state->parser_state),
-					 GET_CURRENT_ARG_TYPE(walk_state->
-							      arg_types), &arg);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		acpi_ps_append_arg(unnamed_op, arg);
-		INCREMENT_ARG_LIST(walk_state->arg_types);
-	}
-
-	/*
-	 * Make sure that we found a NAME and didn't run out of arguments
-	 */
-	if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
-		return_ACPI_STATUS(AE_AML_NO_OPERAND);
-	}
-
-	/* We know that this arg is a name, move to next arg */
-
-	INCREMENT_ARG_LIST(walk_state->arg_types);
-
-	/*
-	 * Find the object. This will either insert the object into
-	 * the namespace or simply look it up
-	 */
-	walk_state->op = NULL;
-
-	status = walk_state->descending_callback(walk_state, op);
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
-		return_ACPI_STATUS(status);
-	}
-
-	if (!*op) {
-		return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
-	}
-
-	status = acpi_ps_next_parse_state(walk_state, *op, status);
-	if (ACPI_FAILURE(status)) {
-		if (status == AE_CTRL_PENDING) {
-			return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
-		}
-		return_ACPI_STATUS(status);
-	}
-
-	acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
-	acpi_gbl_depth++;
-
-	if ((*op)->common.aml_opcode == AML_REGION_OP ||
-	    (*op)->common.aml_opcode == AML_DATA_REGION_OP) {
-		/*
-		 * Defer final parsing of an operation_region body, because we don't
-		 * have enough info in the first pass to parse it correctly (i.e.,
-		 * there may be method calls within the term_arg elements of the body.)
-		 *
-		 * However, we must continue parsing because the opregion is not a
-		 * standalone package -- we don't know where the end is at this point.
-		 *
-		 * (Length is unknown until parse of the body complete)
-		 */
-		(*op)->named.data = aml_op_start;
-		(*op)->named.length = 0;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_create_op
- *
- * PARAMETERS:  walk_state          - Current state
- *              aml_op_start        - Op start in AML
- *              new_op              - Returned Op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get Op from AML
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_create_op(struct acpi_walk_state *walk_state,
-		  u8 * aml_op_start, union acpi_parse_object **new_op)
-{
-	acpi_status status = AE_OK;
-	union acpi_parse_object *op;
-	union acpi_parse_object *named_op = NULL;
-	union acpi_parse_object *parent_scope;
-	u8 argument_count;
-	const struct acpi_opcode_info *op_info;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
-
-	status = acpi_ps_get_aml_opcode(walk_state);
-	if (status == AE_CTRL_PARSE_CONTINUE) {
-		return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
-	}
-
-	/* Create Op structure and append to parent's argument list */
-
-	walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
-	op = acpi_ps_alloc_op(walk_state->opcode);
-	if (!op) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	if (walk_state->op_info->flags & AML_NAMED) {
-		status =
-		    acpi_ps_build_named_op(walk_state, aml_op_start, op,
-					   &named_op);
-		acpi_ps_free_op(op);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		*new_op = named_op;
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Not a named opcode, just allocate Op and append to parent */
-
-	if (walk_state->op_info->flags & AML_CREATE) {
-		/*
-		 * Backup to beginning of create_xXXfield declaration
-		 * body_length is unknown until we parse the body
-		 */
-		op->named.data = aml_op_start;
-		op->named.length = 0;
-	}
-
-	if (walk_state->opcode == AML_BANK_FIELD_OP) {
-		/*
-		 * Backup to beginning of bank_field declaration
-		 * body_length is unknown until we parse the body
-		 */
-		op->named.data = aml_op_start;
-		op->named.length = 0;
-	}
-
-	parent_scope = acpi_ps_get_parent_scope(&(walk_state->parser_state));
-	acpi_ps_append_arg(parent_scope, op);
-
-	if (parent_scope) {
-		op_info =
-		    acpi_ps_get_opcode_info(parent_scope->common.aml_opcode);
-		if (op_info->flags & AML_HAS_TARGET) {
-			argument_count =
-			    acpi_ps_get_argument_count(op_info->type);
-			if (parent_scope->common.arg_list_length >
-			    argument_count) {
-				op->common.flags |= ACPI_PARSEOP_TARGET;
-			}
-		} else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
-			op->common.flags |= ACPI_PARSEOP_TARGET;
-		}
-	}
-
-	if (walk_state->descending_callback != NULL) {
-		/*
-		 * Find the object. This will either insert the object into
-		 * the namespace or simply look it up
-		 */
-		walk_state->op = *new_op = op;
-
-		status = walk_state->descending_callback(walk_state, &op);
-		status = acpi_ps_next_parse_state(walk_state, op, status);
-		if (status == AE_CTRL_PENDING) {
-			status = AE_CTRL_PARSE_PENDING;
-		}
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_arguments
- *
- * PARAMETERS:  walk_state          - Current state
- *              aml_op_start        - Op start in AML
- *              Op                  - Current Op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get arguments for passed Op.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
-		      u8 * aml_op_start, union acpi_parse_object *op)
-{
-	acpi_status status = AE_OK;
-	union acpi_parse_object *arg = NULL;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state);
-
-	switch (op->common.aml_opcode) {
-	case AML_BYTE_OP:	/* AML_BYTEDATA_ARG */
-	case AML_WORD_OP:	/* AML_WORDDATA_ARG */
-	case AML_DWORD_OP:	/* AML_DWORDATA_ARG */
-	case AML_QWORD_OP:	/* AML_QWORDATA_ARG */
-	case AML_STRING_OP:	/* AML_ASCIICHARLIST_ARG */
-
-		/* Fill in constant or string argument directly */
-
-		acpi_ps_get_next_simple_arg(&(walk_state->parser_state),
-					    GET_CURRENT_ARG_TYPE(walk_state->
-								 arg_types),
-					    op);
-		break;
-
-	case AML_INT_NAMEPATH_OP:	/* AML_NAMESTRING_ARG */
-
-		status =
-		    acpi_ps_get_next_namepath(walk_state,
-					      &(walk_state->parser_state), op,
-					      1);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		walk_state->arg_types = 0;
-		break;
-
-	default:
-		/*
-		 * Op is not a constant or string, append each argument to the Op
-		 */
-		while (GET_CURRENT_ARG_TYPE(walk_state->arg_types)
-		       && !walk_state->arg_count) {
-			walk_state->aml_offset =
-			    (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
-						walk_state->parser_state.
-						aml_start);
-
-			status =
-			    acpi_ps_get_next_arg(walk_state,
-						 &(walk_state->parser_state),
-						 GET_CURRENT_ARG_TYPE
-						 (walk_state->arg_types), &arg);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-
-			if (arg) {
-				arg->common.aml_offset = walk_state->aml_offset;
-				acpi_ps_append_arg(op, arg);
-			}
-
-			INCREMENT_ARG_LIST(walk_state->arg_types);
-		}
-
-		/* Special processing for certain opcodes */
-
-		/* TBD (remove): Temporary mechanism to disable this code if needed */
-
-#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
-
-		if ((walk_state->pass_number <= ACPI_IMODE_LOAD_PASS1) &&
-		    ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) {
-			/*
-			 * We want to skip If/Else/While constructs during Pass1 because we
-			 * want to actually conditionally execute the code during Pass2.
-			 *
-			 * Except for disassembly, where we always want to walk the
-			 * If/Else/While packages
-			 */
-			switch (op->common.aml_opcode) {
-			case AML_IF_OP:
-			case AML_ELSE_OP:
-			case AML_WHILE_OP:
-
-				ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-						  "Pass1: Skipping an If/Else/While body\n"));
-
-				/* Skip body of if/else/while in pass 1 */
-
-				walk_state->parser_state.aml =
-				    walk_state->parser_state.pkg_end;
-				walk_state->arg_count = 0;
-				break;
-
-			default:
-				break;
-			}
-		}
-#endif
-
-		switch (op->common.aml_opcode) {
-		case AML_METHOD_OP:
-			/*
-			 * Skip parsing of control method because we don't have enough
-			 * info in the first pass to parse it correctly.
-			 *
-			 * Save the length and address of the body
-			 */
-			op->named.data = walk_state->parser_state.aml;
-			op->named.length = (u32)
-			    (walk_state->parser_state.pkg_end -
-			     walk_state->parser_state.aml);
-
-			/* Skip body of method */
-
-			walk_state->parser_state.aml =
-			    walk_state->parser_state.pkg_end;
-			walk_state->arg_count = 0;
-			break;
-
-		case AML_BUFFER_OP:
-		case AML_PACKAGE_OP:
-		case AML_VAR_PACKAGE_OP:
-
-			if ((op->common.parent) &&
-			    (op->common.parent->common.aml_opcode ==
-			     AML_NAME_OP)
-			    && (walk_state->pass_number <=
-				ACPI_IMODE_LOAD_PASS2)) {
-				/*
-				 * Skip parsing of Buffers and Packages because we don't have
-				 * enough info in the first pass to parse them correctly.
-				 */
-				op->named.data = aml_op_start;
-				op->named.length = (u32)
-				    (walk_state->parser_state.pkg_end -
-				     aml_op_start);
-
-				/* Skip body */
-
-				walk_state->parser_state.aml =
-				    walk_state->parser_state.pkg_end;
-				walk_state->arg_count = 0;
-			}
-			break;
-
-		case AML_WHILE_OP:
-
-			if (walk_state->control_state) {
-				walk_state->control_state->control.package_end =
-				    walk_state->parser_state.pkg_end;
-			}
-			break;
-
-		default:
-
-			/* No action for all other opcodes */
-			break;
-		}
-
-		break;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_complete_op
- *
- * PARAMETERS:  walk_state          - Current state
- *              Op                  - Returned Op
- *              Status              - Parse status before complete Op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Complete Op
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_complete_op(struct acpi_walk_state *walk_state,
-		    union acpi_parse_object **op, acpi_status status)
-{
-	acpi_status status2;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
-
-	/*
-	 * Finished one argument of the containing scope
-	 */
-	walk_state->parser_state.scope->parse_scope.arg_count--;
-
-	/* Close this Op (will result in parse subtree deletion) */
-
-	status2 = acpi_ps_complete_this_op(walk_state, *op);
-	if (ACPI_FAILURE(status2)) {
-		return_ACPI_STATUS(status2);
-	}
-
-	*op = NULL;
-
-	switch (status) {
-	case AE_OK:
-		break;
-
-	case AE_CTRL_TRANSFER:
-
-		/* We are about to transfer to a called method */
-
-		walk_state->prev_op = NULL;
-		walk_state->prev_arg_types = walk_state->arg_types;
-		return_ACPI_STATUS(status);
-
-	case AE_CTRL_END:
-
-		acpi_ps_pop_scope(&(walk_state->parser_state), op,
-				  &walk_state->arg_types,
-				  &walk_state->arg_count);
-
-		if (*op) {
-			walk_state->op = *op;
-			walk_state->op_info =
-			    acpi_ps_get_opcode_info((*op)->common.aml_opcode);
-			walk_state->opcode = (*op)->common.aml_opcode;
-
-			status = walk_state->ascending_callback(walk_state);
-			status =
-			    acpi_ps_next_parse_state(walk_state, *op, status);
-
-			status2 = acpi_ps_complete_this_op(walk_state, *op);
-			if (ACPI_FAILURE(status2)) {
-				return_ACPI_STATUS(status2);
-			}
-		}
-
-		status = AE_OK;
-		break;
-
-	case AE_CTRL_BREAK:
-	case AE_CTRL_CONTINUE:
-
-		/* Pop off scopes until we find the While */
-
-		while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
-			acpi_ps_pop_scope(&(walk_state->parser_state), op,
-					  &walk_state->arg_types,
-					  &walk_state->arg_count);
-		}
-
-		/* Close this iteration of the While loop */
-
-		walk_state->op = *op;
-		walk_state->op_info =
-		    acpi_ps_get_opcode_info((*op)->common.aml_opcode);
-		walk_state->opcode = (*op)->common.aml_opcode;
-
-		status = walk_state->ascending_callback(walk_state);
-		status = acpi_ps_next_parse_state(walk_state, *op, status);
-
-		status2 = acpi_ps_complete_this_op(walk_state, *op);
-		if (ACPI_FAILURE(status2)) {
-			return_ACPI_STATUS(status2);
-		}
-
-		status = AE_OK;
-		break;
-
-	case AE_CTRL_TERMINATE:
-
-		/* Clean up */
-		do {
-			if (*op) {
-				status2 =
-				    acpi_ps_complete_this_op(walk_state, *op);
-				if (ACPI_FAILURE(status2)) {
-					return_ACPI_STATUS(status2);
-				}
-
-				acpi_ut_delete_generic_state
-				    (acpi_ut_pop_generic_state
-				     (&walk_state->control_state));
-			}
-
-			acpi_ps_pop_scope(&(walk_state->parser_state), op,
-					  &walk_state->arg_types,
-					  &walk_state->arg_count);
-
-		} while (*op);
-
-		return_ACPI_STATUS(AE_OK);
-
-	default:		/* All other non-AE_OK status */
-
-		do {
-			if (*op) {
-				status2 =
-				    acpi_ps_complete_this_op(walk_state, *op);
-				if (ACPI_FAILURE(status2)) {
-					return_ACPI_STATUS(status2);
-				}
-			}
-
-			acpi_ps_pop_scope(&(walk_state->parser_state), op,
-					  &walk_state->arg_types,
-					  &walk_state->arg_count);
-
-		} while (*op);
-
-#if 0
-		/*
-		 * TBD: Cleanup parse ops on error
-		 */
-		if (*op == NULL) {
-			acpi_ps_pop_scope(parser_state, op,
-					  &walk_state->arg_types,
-					  &walk_state->arg_count);
-		}
-#endif
-		walk_state->prev_op = NULL;
-		walk_state->prev_arg_types = walk_state->arg_types;
-		return_ACPI_STATUS(status);
-	}
-
-	/* This scope complete? */
-
-	if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
-		acpi_ps_pop_scope(&(walk_state->parser_state), op,
-				  &walk_state->arg_types,
-				  &walk_state->arg_count);
-		ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
-	} else {
-		*op = NULL;
-	}
-
-	ACPI_PREEMPTION_POINT();
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_complete_final_op
- *
- * PARAMETERS:  walk_state          - Current state
- *              Op                  - Current Op
- *              Status              - Current parse status before complete last
- *                                    Op
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Complete last Op.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
-			  union acpi_parse_object *op, acpi_status status)
-{
-	acpi_status status2;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
-
-	/*
-	 * Complete the last Op (if not completed), and clear the scope stack.
-	 * It is easily possible to end an AML "package" with an unbounded number
-	 * of open scopes (such as when several ASL blocks are closed with
-	 * sequential closing braces). We want to terminate each one cleanly.
-	 */
-	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
-			  op));
-	do {
-		if (op) {
-			if (walk_state->ascending_callback != NULL) {
-				walk_state->op = op;
-				walk_state->op_info =
-				    acpi_ps_get_opcode_info(op->common.
-							    aml_opcode);
-				walk_state->opcode = op->common.aml_opcode;
-
-				status =
-				    walk_state->ascending_callback(walk_state);
-				status =
-				    acpi_ps_next_parse_state(walk_state, op,
-							     status);
-				if (status == AE_CTRL_PENDING) {
-					status =
-					    acpi_ps_complete_op(walk_state, &op,
-								AE_OK);
-					if (ACPI_FAILURE(status)) {
-						return_ACPI_STATUS(status);
-					}
-				}
-
-				if (status == AE_CTRL_TERMINATE) {
-					status = AE_OK;
-
-					/* Clean up */
-					do {
-						if (op) {
-							status2 =
-							    acpi_ps_complete_this_op
-							    (walk_state, op);
-							if (ACPI_FAILURE
-							    (status2)) {
-								return_ACPI_STATUS
-								    (status2);
-							}
-						}
-
-						acpi_ps_pop_scope(&
-								  (walk_state->
-								   parser_state),
-								  &op,
-								  &walk_state->
-								  arg_types,
-								  &walk_state->
-								  arg_count);
-
-					} while (op);
-
-					return_ACPI_STATUS(status);
-				}
-
-				else if (ACPI_FAILURE(status)) {
-
-					/* First error is most important */
-
-					(void)
-					    acpi_ps_complete_this_op(walk_state,
-								     op);
-					return_ACPI_STATUS(status);
-				}
-			}
-
-			status2 = acpi_ps_complete_this_op(walk_state, op);
-			if (ACPI_FAILURE(status2)) {
-				return_ACPI_STATUS(status2);
-			}
-		}
-
-		acpi_ps_pop_scope(&(walk_state->parser_state), &op,
-				  &walk_state->arg_types,
-				  &walk_state->arg_count);
-
-	} while (op);
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_parse_loop
- *
- * PARAMETERS:  walk_state          - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Parse AML (pointed to by the current parser state) and return
- *              a tree of ops.
- *
- ******************************************************************************/
-
-acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-	union acpi_parse_object *op = NULL;	/* current op */
-	struct acpi_parse_state *parser_state;
-	u8 *aml_op_start = NULL;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
-
-	if (walk_state->descending_callback == NULL) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	parser_state = &walk_state->parser_state;
-	walk_state->arg_types = 0;
-
-#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
-
-	if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
-
-		/* We are restarting a preempted control method */
-
-		if (acpi_ps_has_completed_scope(parser_state)) {
-			/*
-			 * We must check if a predicate to an IF or WHILE statement
-			 * was just completed
-			 */
-			if ((parser_state->scope->parse_scope.op) &&
-			    ((parser_state->scope->parse_scope.op->common.
-			      aml_opcode == AML_IF_OP)
-			     || (parser_state->scope->parse_scope.op->common.
-				 aml_opcode == AML_WHILE_OP))
-			    && (walk_state->control_state)
-			    && (walk_state->control_state->common.state ==
-				ACPI_CONTROL_PREDICATE_EXECUTING)) {
-				/*
-				 * A predicate was just completed, get the value of the
-				 * predicate and branch based on that value
-				 */
-				walk_state->op = NULL;
-				status =
-				    acpi_ds_get_predicate_value(walk_state,
-								ACPI_TO_POINTER
-								(TRUE));
-				if (ACPI_FAILURE(status)
-				    && ((status & AE_CODE_MASK) !=
-					AE_CODE_CONTROL)) {
-					if (status == AE_AML_NO_RETURN_VALUE) {
-						ACPI_EXCEPTION((AE_INFO, status,
-								"Invoked method did not return a value"));
-
-					}
-
-					ACPI_EXCEPTION((AE_INFO, status,
-							"GetPredicate Failed"));
-					return_ACPI_STATUS(status);
-				}
-
-				status =
-				    acpi_ps_next_parse_state(walk_state, op,
-							     status);
-			}
-
-			acpi_ps_pop_scope(parser_state, &op,
-					  &walk_state->arg_types,
-					  &walk_state->arg_count);
-			ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-					  "Popped scope, Op=%p\n", op));
-		} else if (walk_state->prev_op) {
-
-			/* We were in the middle of an op */
-
-			op = walk_state->prev_op;
-			walk_state->arg_types = walk_state->prev_arg_types;
-		}
-	}
-#endif
-
-	/* Iterative parsing loop, while there is more AML to process: */
-
-	while ((parser_state->aml < parser_state->aml_end) || (op)) {
-		aml_op_start = parser_state->aml;
-		if (!op) {
-			status =
-			    acpi_ps_create_op(walk_state, aml_op_start, &op);
-			if (ACPI_FAILURE(status)) {
-				if (status == AE_CTRL_PARSE_CONTINUE) {
-					continue;
-				}
-
-				if (status == AE_CTRL_PARSE_PENDING) {
-					status = AE_OK;
-				}
-
-				status =
-				    acpi_ps_complete_op(walk_state, &op,
-							status);
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-
-				continue;
-			}
-
-			op->common.aml_offset = walk_state->aml_offset;
-
-			if (walk_state->op_info) {
-				ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-						  "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
-						  (u32) op->common.aml_opcode,
-						  walk_state->op_info->name, op,
-						  parser_state->aml,
-						  op->common.aml_offset));
-			}
-		}
-
-		/*
-		 * Start arg_count at zero because we don't know if there are
-		 * any args yet
-		 */
-		walk_state->arg_count = 0;
-
-		/* Are there any arguments that must be processed? */
-
-		if (walk_state->arg_types) {
-
-			/* Get arguments */
-
-			status =
-			    acpi_ps_get_arguments(walk_state, aml_op_start, op);
-			if (ACPI_FAILURE(status)) {
-				status =
-				    acpi_ps_complete_op(walk_state, &op,
-							status);
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-
-				continue;
-			}
-		}
-
-		/* Check for arguments that need to be processed */
-
-		if (walk_state->arg_count) {
-			/*
-			 * There are arguments (complex ones), push Op and
-			 * prepare for argument
-			 */
-			status = acpi_ps_push_scope(parser_state, op,
-						    walk_state->arg_types,
-						    walk_state->arg_count);
-			if (ACPI_FAILURE(status)) {
-				status =
-				    acpi_ps_complete_op(walk_state, &op,
-							status);
-				if (ACPI_FAILURE(status)) {
-					return_ACPI_STATUS(status);
-				}
-
-				continue;
-			}
-
-			op = NULL;
-			continue;
-		}
-
-		/*
-		 * All arguments have been processed -- Op is complete,
-		 * prepare for next
-		 */
-		walk_state->op_info =
-		    acpi_ps_get_opcode_info(op->common.aml_opcode);
-		if (walk_state->op_info->flags & AML_NAMED) {
-			if (acpi_gbl_depth) {
-				acpi_gbl_depth--;
-			}
-
-			if (op->common.aml_opcode == AML_REGION_OP ||
-			    op->common.aml_opcode == AML_DATA_REGION_OP) {
-				/*
-				 * Skip parsing of control method or opregion body,
-				 * because we don't have enough info in the first pass
-				 * to parse them correctly.
-				 *
-				 * Completed parsing an op_region declaration, we now
-				 * know the length.
-				 */
-				op->named.length =
-				    (u32) (parser_state->aml - op->named.data);
-			}
-		}
-
-		if (walk_state->op_info->flags & AML_CREATE) {
-			/*
-			 * Backup to beginning of create_xXXfield declaration (1 for
-			 * Opcode)
-			 *
-			 * body_length is unknown until we parse the body
-			 */
-			op->named.length =
-			    (u32) (parser_state->aml - op->named.data);
-		}
-
-		if (op->common.aml_opcode == AML_BANK_FIELD_OP) {
-			/*
-			 * Backup to beginning of bank_field declaration
-			 *
-			 * body_length is unknown until we parse the body
-			 */
-			op->named.length =
-			    (u32) (parser_state->aml - op->named.data);
-		}
-
-		/* This op complete, notify the dispatcher */
-
-		if (walk_state->ascending_callback != NULL) {
-			walk_state->op = op;
-			walk_state->opcode = op->common.aml_opcode;
-
-			status = walk_state->ascending_callback(walk_state);
-			status =
-			    acpi_ps_next_parse_state(walk_state, op, status);
-			if (status == AE_CTRL_PENDING) {
-				status = AE_OK;
-			}
-		}
-
-		status = acpi_ps_complete_op(walk_state, &op, status);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-	}			/* while parser_state->Aml */
-
-	status = acpi_ps_complete_final_op(walk_state, op, status);
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
deleted file mode 100644
index f425ab3..0000000
--- a/drivers/acpi/parser/psopcode.c
+++ /dev/null
@@ -1,809 +0,0 @@
-/******************************************************************************
- *
- * Module Name: psopcode - Parser/Interpreter opcode information table
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/acopcode.h>
-#include <acpi/amlcode.h>
-
-#define _COMPONENT          ACPI_PARSER
-ACPI_MODULE_NAME("psopcode")
-
-static const u8 acpi_gbl_argument_count[] =
-    { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
-
-/*******************************************************************************
- *
- * NAME:        acpi_gbl_aml_op_info
- *
- * DESCRIPTION: Opcode table. Each entry contains <opcode, type, name, operands>
- *              The name is a simple ascii string, the operand specifier is an
- *              ascii string with one letter per operand.  The letter specifies
- *              the operand type.
- *
- ******************************************************************************/
-
-/*
- * Summary of opcode types/flags
- *
-
- Opcodes that have associated namespace objects (AML_NSOBJECT flag)
-
-	AML_SCOPE_OP
-	AML_DEVICE_OP
-	AML_THERMAL_ZONE_OP
-	AML_METHOD_OP
-	AML_POWER_RES_OP
-	AML_PROCESSOR_OP
-	AML_FIELD_OP
-	AML_INDEX_FIELD_OP
-	AML_BANK_FIELD_OP
-	AML_NAME_OP
-	AML_ALIAS_OP
-	AML_MUTEX_OP
-	AML_EVENT_OP
-	AML_REGION_OP
-	AML_CREATE_FIELD_OP
-	AML_CREATE_BIT_FIELD_OP
-	AML_CREATE_BYTE_FIELD_OP
-	AML_CREATE_WORD_FIELD_OP
-	AML_CREATE_DWORD_FIELD_OP
-	AML_CREATE_QWORD_FIELD_OP
-	AML_INT_NAMEDFIELD_OP
-	AML_INT_METHODCALL_OP
-	AML_INT_NAMEPATH_OP
-
-  Opcodes that are "namespace" opcodes (AML_NSOPCODE flag)
-
-	AML_SCOPE_OP
-	AML_DEVICE_OP
-	AML_THERMAL_ZONE_OP
-	AML_METHOD_OP
-	AML_POWER_RES_OP
-	AML_PROCESSOR_OP
-	AML_FIELD_OP
-	AML_INDEX_FIELD_OP
-	AML_BANK_FIELD_OP
-	AML_NAME_OP
-	AML_ALIAS_OP
-	AML_MUTEX_OP
-	AML_EVENT_OP
-	AML_REGION_OP
-	AML_INT_NAMEDFIELD_OP
-
-  Opcodes that have an associated namespace node (AML_NSNODE flag)
-
-	AML_SCOPE_OP
-	AML_DEVICE_OP
-	AML_THERMAL_ZONE_OP
-	AML_METHOD_OP
-	AML_POWER_RES_OP
-	AML_PROCESSOR_OP
-	AML_NAME_OP
-	AML_ALIAS_OP
-	AML_MUTEX_OP
-	AML_EVENT_OP
-	AML_REGION_OP
-	AML_CREATE_FIELD_OP
-	AML_CREATE_BIT_FIELD_OP
-	AML_CREATE_BYTE_FIELD_OP
-	AML_CREATE_WORD_FIELD_OP
-	AML_CREATE_DWORD_FIELD_OP
-	AML_CREATE_QWORD_FIELD_OP
-	AML_INT_NAMEDFIELD_OP
-	AML_INT_METHODCALL_OP
-	AML_INT_NAMEPATH_OP
-
-  Opcodes that define named ACPI objects (AML_NAMED flag)
-
-	AML_SCOPE_OP
-	AML_DEVICE_OP
-	AML_THERMAL_ZONE_OP
-	AML_METHOD_OP
-	AML_POWER_RES_OP
-	AML_PROCESSOR_OP
-	AML_NAME_OP
-	AML_ALIAS_OP
-	AML_MUTEX_OP
-	AML_EVENT_OP
-	AML_REGION_OP
-	AML_INT_NAMEDFIELD_OP
-
-  Opcodes that contain executable AML as part of the definition that
-  must be deferred until needed
-
-	AML_METHOD_OP
-	AML_VAR_PACKAGE_OP
-	AML_CREATE_FIELD_OP
-	AML_CREATE_BIT_FIELD_OP
-	AML_CREATE_BYTE_FIELD_OP
-	AML_CREATE_WORD_FIELD_OP
-	AML_CREATE_DWORD_FIELD_OP
-	AML_CREATE_QWORD_FIELD_OP
-	AML_REGION_OP
-	AML_BUFFER_OP
-
-  Field opcodes
-
-	AML_CREATE_FIELD_OP
-	AML_FIELD_OP
-	AML_INDEX_FIELD_OP
-	AML_BANK_FIELD_OP
-
-  Field "Create" opcodes
-
-	AML_CREATE_FIELD_OP
-	AML_CREATE_BIT_FIELD_OP
-	AML_CREATE_BYTE_FIELD_OP
-	AML_CREATE_WORD_FIELD_OP
-	AML_CREATE_DWORD_FIELD_OP
-	AML_CREATE_QWORD_FIELD_OP
-
- ******************************************************************************/
-
-/*
- * Master Opcode information table.  A summary of everything we know about each
- * opcode, all in one place.
- */
-const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
-/*! [Begin] no source code translation */
-/* Index           Name                 Parser Args               Interpreter Args                ObjectType                    Class                      Type                  Flags */
-
-/* 00 */ ACPI_OP("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, ACPI_TYPE_INTEGER,
-		 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
-/* 01 */ ACPI_OP("One", ARGP_ONE_OP, ARGI_ONE_OP, ACPI_TYPE_INTEGER,
-		 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
-/* 02 */ ACPI_OP("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP,
-		 ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT,
-		 AML_TYPE_NAMED_SIMPLE,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-		 AML_NSNODE | AML_NAMED),
-/* 03 */ ACPI_OP("Name", ARGP_NAME_OP, ARGI_NAME_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-		 AML_NSNODE | AML_NAMED),
-/* 04 */ ACPI_OP("ByteConst", ARGP_BYTE_OP, ARGI_BYTE_OP,
-		 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LITERAL, AML_CONSTANT),
-/* 05 */ ACPI_OP("WordConst", ARGP_WORD_OP, ARGI_WORD_OP,
-		 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LITERAL, AML_CONSTANT),
-/* 06 */ ACPI_OP("DwordConst", ARGP_DWORD_OP, ARGI_DWORD_OP,
-		 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LITERAL, AML_CONSTANT),
-/* 07 */ ACPI_OP("String", ARGP_STRING_OP, ARGI_STRING_OP,
-		 ACPI_TYPE_STRING, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LITERAL, AML_CONSTANT),
-/* 08 */ ACPI_OP("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
-		 ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT,
-		 AML_TYPE_NAMED_NO_OBJ,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-		 AML_NSNODE | AML_NAMED),
-/* 09 */ ACPI_OP("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP,
-		 ACPI_TYPE_BUFFER, AML_CLASS_CREATE,
-		 AML_TYPE_CREATE_OBJECT,
-		 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
-/* 0A */ ACPI_OP("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP,
-		 ACPI_TYPE_PACKAGE, AML_CLASS_CREATE,
-		 AML_TYPE_CREATE_OBJECT,
-		 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
-/* 0B */ ACPI_OP("Method", ARGP_METHOD_OP, ARGI_METHOD_OP,
-		 ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT,
-		 AML_TYPE_NAMED_COMPLEX,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-		 AML_NSNODE | AML_NAMED | AML_DEFER),
-/* 0C */ ACPI_OP("Local0", ARGP_LOCAL0, ARGI_LOCAL0,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LOCAL_VARIABLE, 0),
-/* 0D */ ACPI_OP("Local1", ARGP_LOCAL1, ARGI_LOCAL1,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LOCAL_VARIABLE, 0),
-/* 0E */ ACPI_OP("Local2", ARGP_LOCAL2, ARGI_LOCAL2,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LOCAL_VARIABLE, 0),
-/* 0F */ ACPI_OP("Local3", ARGP_LOCAL3, ARGI_LOCAL3,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LOCAL_VARIABLE, 0),
-/* 10 */ ACPI_OP("Local4", ARGP_LOCAL4, ARGI_LOCAL4,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LOCAL_VARIABLE, 0),
-/* 11 */ ACPI_OP("Local5", ARGP_LOCAL5, ARGI_LOCAL5,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LOCAL_VARIABLE, 0),
-/* 12 */ ACPI_OP("Local6", ARGP_LOCAL6, ARGI_LOCAL6,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LOCAL_VARIABLE, 0),
-/* 13 */ ACPI_OP("Local7", ARGP_LOCAL7, ARGI_LOCAL7,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LOCAL_VARIABLE, 0),
-/* 14 */ ACPI_OP("Arg0", ARGP_ARG0, ARGI_ARG0,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_METHOD_ARGUMENT, 0),
-/* 15 */ ACPI_OP("Arg1", ARGP_ARG1, ARGI_ARG1,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_METHOD_ARGUMENT, 0),
-/* 16 */ ACPI_OP("Arg2", ARGP_ARG2, ARGI_ARG2,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_METHOD_ARGUMENT, 0),
-/* 17 */ ACPI_OP("Arg3", ARGP_ARG3, ARGI_ARG3,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_METHOD_ARGUMENT, 0),
-/* 18 */ ACPI_OP("Arg4", ARGP_ARG4, ARGI_ARG4,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_METHOD_ARGUMENT, 0),
-/* 19 */ ACPI_OP("Arg5", ARGP_ARG5, ARGI_ARG5,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_METHOD_ARGUMENT, 0),
-/* 1A */ ACPI_OP("Arg6", ARGP_ARG6, ARGI_ARG6,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_METHOD_ARGUMENT, 0),
-/* 1B */ ACPI_OP("Store", ARGP_STORE_OP, ARGI_STORE_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
-		 AML_FLAGS_EXEC_1A_1T_1R),
-/* 1C */ ACPI_OP("RefOf", ARGP_REF_OF_OP, ARGI_REF_OF_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
-		 AML_FLAGS_EXEC_1A_0T_1R),
-/* 1D */ ACPI_OP("Add", ARGP_ADD_OP, ARGI_ADD_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
-/* 1E */ ACPI_OP("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
-/* 1F */ ACPI_OP("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
-/* 20 */ ACPI_OP("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_0T_1R,
-		 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
-/* 21 */ ACPI_OP("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_0T_1R,
-		 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
-/* 22 */ ACPI_OP("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
-/* 23 */ ACPI_OP("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_2T_1R,
-		 AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT),
-/* 24 */ ACPI_OP("ShiftLeft", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
-/* 25 */ ACPI_OP("ShiftRight", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
-/* 26 */ ACPI_OP("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
-/* 27 */ ACPI_OP("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
-/* 28 */ ACPI_OP("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
-/* 29 */ ACPI_OP("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
-/* 2A */ ACPI_OP("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
-/* 2B */ ACPI_OP("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
-		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
-/* 2C */ ACPI_OP("FindSetLeftBit", ARGP_FIND_SET_LEFT_BIT_OP,
-		 ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
-		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
-/* 2D */ ACPI_OP("FindSetRightBit", ARGP_FIND_SET_RIGHT_BIT_OP,
-		 ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
-		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
-/* 2E */ ACPI_OP("DerefOf", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R),
-/* 2F */ ACPI_OP("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R),
-/* 30 */ ACPI_OP("SizeOf", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_0T_1R,
-		 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
-/* 31 */ ACPI_OP("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R),
-/* 32 */ ACPI_OP("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R,
-		 AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT),
-/* 33 */ ACPI_OP("CreateDWordField", ARGP_CREATE_DWORD_FIELD_OP,
-		 ARGI_CREATE_DWORD_FIELD_OP,
-		 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
-		 AML_TYPE_CREATE_FIELD,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
-		 AML_DEFER | AML_CREATE),
-/* 34 */ ACPI_OP("CreateWordField", ARGP_CREATE_WORD_FIELD_OP,
-		 ARGI_CREATE_WORD_FIELD_OP,
-		 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
-		 AML_TYPE_CREATE_FIELD,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
-		 AML_DEFER | AML_CREATE),
-/* 35 */ ACPI_OP("CreateByteField", ARGP_CREATE_BYTE_FIELD_OP,
-		 ARGI_CREATE_BYTE_FIELD_OP,
-		 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
-		 AML_TYPE_CREATE_FIELD,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
-		 AML_DEFER | AML_CREATE),
-/* 36 */ ACPI_OP("CreateBitField", ARGP_CREATE_BIT_FIELD_OP,
-		 ARGI_CREATE_BIT_FIELD_OP,
-		 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
-		 AML_TYPE_CREATE_FIELD,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
-		 AML_DEFER | AML_CREATE),
-/* 37 */ ACPI_OP("ObjectType", ARGP_TYPE_OP, ARGI_TYPE_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_0T_1R,
-		 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
-/* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
-		 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
-/* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
-		 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
-/* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
-		 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
-/* 3B */ ACPI_OP("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_0T_1R,
-		 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
-/* 3C */ ACPI_OP("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_0T_1R,
-		 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
-/* 3D */ ACPI_OP("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
-		 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
-/* 3E */ ACPI_OP("If", ARGP_IF_OP, ARGI_IF_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
-/* 3F */ ACPI_OP("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
-/* 40 */ ACPI_OP("While", ARGP_WHILE_OP, ARGI_WHILE_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
-/* 41 */ ACPI_OP("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
-/* 42 */ ACPI_OP("Return", ARGP_RETURN_OP, ARGI_RETURN_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_CONTROL,
-		 AML_TYPE_CONTROL, AML_HAS_ARGS),
-/* 43 */ ACPI_OP("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
-/* 44 */ ACPI_OP("BreakPoint", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
-/* 45 */ ACPI_OP("Ones", ARGP_ONES_OP, ARGI_ONES_OP, ACPI_TYPE_INTEGER,
-		 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
-
-/* Prefixed opcodes (Two-byte opcodes with a prefix op) */
-
-/* 46 */ ACPI_OP("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, ACPI_TYPE_MUTEX,
-		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-		 AML_NSNODE | AML_NAMED),
-/* 47 */ ACPI_OP("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, ACPI_TYPE_EVENT,
-		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
-		 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
-/* 48 */ ACPI_OP("CondRefOf", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
-/* 49 */ ACPI_OP("CreateField", ARGP_CREATE_FIELD_OP,
-		 ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD,
-		 AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
-		 AML_DEFER | AML_FIELD | AML_CREATE),
-/* 4A */ ACPI_OP("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R,
-		 AML_FLAGS_EXEC_1A_1T_0R),
-/* 4B */ ACPI_OP("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
-		 AML_FLAGS_EXEC_1A_0T_0R),
-/* 4C */ ACPI_OP("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
-		 AML_FLAGS_EXEC_1A_0T_0R),
-/* 4D */ ACPI_OP("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R),
-/* 4E */ ACPI_OP("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
-/* 4F */ ACPI_OP("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
-		 AML_FLAGS_EXEC_2A_0T_1R),
-/* 50 */ ACPI_OP("Reset", ARGP_RESET_OP, ARGI_RESET_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
-		 AML_FLAGS_EXEC_1A_0T_0R),
-/* 51 */ ACPI_OP("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
-/* 52 */ ACPI_OP("FromBCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_1T_1R,
-		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
-/* 53 */ ACPI_OP("ToBCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
-		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
-/* 54 */ ACPI_OP("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
-/* 55 */ ACPI_OP("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP,
-		 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
-		 AML_TYPE_CONSTANT, 0),
-/* 56 */ ACPI_OP("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_CONSTANT, 0),
-/* 57 */ ACPI_OP("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R,
-		 AML_FLAGS_EXEC_3A_0T_0R),
-/* 58 */ ACPI_OP("OperationRegion", ARGP_REGION_OP, ARGI_REGION_OP,
-		 ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT,
-		 AML_TYPE_NAMED_COMPLEX,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-		 AML_NSNODE | AML_NAMED | AML_DEFER),
-/* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
-/* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP,
-		 ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT,
-		 AML_TYPE_NAMED_NO_OBJ,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-		 AML_NSNODE | AML_NAMED),
-/* 5B */ ACPI_OP("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP,
-		 ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT,
-		 AML_TYPE_NAMED_SIMPLE,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-		 AML_NSNODE | AML_NAMED),
-/* 5C */ ACPI_OP("PowerResource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP,
-		 ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT,
-		 AML_TYPE_NAMED_SIMPLE,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-		 AML_NSNODE | AML_NAMED),
-/* 5D */ ACPI_OP("ThermalZone", ARGP_THERMAL_ZONE_OP,
-		 ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL,
-		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-		 AML_NSNODE | AML_NAMED),
-/* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
-		 AML_TYPE_NAMED_FIELD,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
-/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP,
-		 ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT,
-		 AML_TYPE_NAMED_FIELD,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD |
-		 AML_DEFER),
-
-/* Internal opcodes that map to invalid AML opcodes */
-
-/* 60 */ ACPI_OP("LNotEqual", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
-		 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
-/* 61 */ ACPI_OP("LLessEqual", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
-		 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
-/* 62 */ ACPI_OP("LGreaterEqual", ARGP_LGREATEREQUAL_OP,
-		 ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_INTERNAL, AML_TYPE_BOGUS,
-		 AML_HAS_ARGS | AML_CONSTANT),
-/* 63 */ ACPI_OP("-NamePath-", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP,
-		 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE),
-/* 64 */ ACPI_OP("-MethodCall-", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP,
-		 ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL,
-		 AML_TYPE_METHOD_CALL,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE),
-/* 65 */ ACPI_OP("-ByteList-", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LITERAL, 0),
-/* 66 */ ACPI_OP("-ReservedField-", ARGP_RESERVEDFIELD_OP,
-		 ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
-/* 67 */ ACPI_OP("-NamedField-", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
-		 AML_TYPE_BOGUS,
-		 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
-/* 68 */ ACPI_OP("-AccessField-", ARGP_ACCESSFIELD_OP,
-		 ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
-/* 69 */ ACPI_OP("-StaticString", ARGP_STATICSTRING_OP,
-		 ARGI_STATICSTRING_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
-/* 6A */ ACPI_OP("-Return Value-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
-		 AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN,
-		 AML_HAS_ARGS | AML_HAS_RETVAL),
-/* 6B */ ACPI_OP("-UNKNOWN_OP-", ARG_NONE, ARG_NONE, ACPI_TYPE_INVALID,
-		 AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS),
-/* 6C */ ACPI_OP("-ASCII_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
-		 AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS),
-/* 6D */ ACPI_OP("-PREFIX_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
-		 AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS),
-
-/* ACPI 2.0 opcodes */
-
-/* 6E */ ACPI_OP("QwordConst", ARGP_QWORD_OP, ARGI_QWORD_OP,
-		 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
-		 AML_TYPE_LITERAL, AML_CONSTANT),
-	/* 6F */ ACPI_OP("Package", /* Var */ ARGP_VAR_PACKAGE_OP,
-			 ARGI_VAR_PACKAGE_OP, ACPI_TYPE_PACKAGE,
-			 AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT,
-			 AML_HAS_ARGS | AML_DEFER),
-/* 70 */ ACPI_OP("ConcatenateResTemplate", ARGP_CONCAT_RES_OP,
-		 ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
-/* 71 */ ACPI_OP("Mod", ARGP_MOD_OP, ARGI_MOD_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
-/* 72 */ ACPI_OP("CreateQWordField", ARGP_CREATE_QWORD_FIELD_OP,
-		 ARGI_CREATE_QWORD_FIELD_OP,
-		 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
-		 AML_TYPE_CREATE_FIELD,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
-		 AML_DEFER | AML_CREATE),
-/* 73 */ ACPI_OP("ToBuffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_1T_1R,
-		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
-/* 74 */ ACPI_OP("ToDecimalString", ARGP_TO_DEC_STR_OP,
-		 ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
-		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
-/* 75 */ ACPI_OP("ToHexString", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_1T_1R,
-		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
-/* 76 */ ACPI_OP("ToInteger", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_1T_1R,
-		 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
-/* 77 */ ACPI_OP("ToString", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_2A_1T_1R,
-		 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
-/* 78 */ ACPI_OP("CopyObject", ARGP_COPY_OP, ARGI_COPY_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
-/* 79 */ ACPI_OP("Mid", ARGP_MID_OP, ARGI_MID_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R,
-		 AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT),
-/* 7A */ ACPI_OP("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
-/* 7B */ ACPI_OP("LoadTable", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
-		 AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R),
-/* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP,
-		 ARGI_DATA_REGION_OP, ACPI_TYPE_REGION,
-		 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-		 AML_NSNODE | AML_NAMED | AML_DEFER),
-/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
-		 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
-		 AML_TYPE_NAMED_NO_OBJ,
-		 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE),
-
-/* ACPI 3.0 opcodes */
-
-/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
-		 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
-		 AML_FLAGS_EXEC_0A_0T_1R)
-
-/*! [End] no source code translation !*/
-};
-
-/*
- * This table is directly indexed by the opcodes, and returns an
- * index into the table above
- */
-static const u8 acpi_gbl_short_op_index[256] = {
-/*              0     1     2     3     4     5     6     7  */
-/*              8     9     A     B     C     D     E     F  */
-/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
-/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
-/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
-/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
-/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
-/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
-/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
-/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
-/* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC,
-/* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
-/* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK,
-/* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
-/* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
-/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30,
-/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72,
-/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
-/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
-/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
-/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK,
-/* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45,
-};
-
-/*
- * This table is indexed by the second opcode of the extended opcode
- * pair.  It returns an index into the opcode table (acpi_gbl_aml_op_info)
- */
-static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
-/*              0     1     2     3     4     5     6     7  */
-/*              8     9     A     B     C     D     E     F  */
-/* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK,
-/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B,
-/* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
-/* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK,
-/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
-/* 0x88 */ 0x7C,
-};
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_opcode_info
- *
- * PARAMETERS:  Opcode              - The AML opcode
- *
- * RETURN:      A pointer to the info about the opcode.
- *
- * DESCRIPTION: Find AML opcode description based on the opcode.
- *              NOTE: This procedure must ALWAYS return a valid pointer!
- *
- ******************************************************************************/
-
-const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
-{
-	ACPI_FUNCTION_NAME(ps_get_opcode_info);
-
-	/*
-	 * Detect normal 8-bit opcode or extended 16-bit opcode
-	 */
-	if (!(opcode & 0xFF00)) {
-
-		/* Simple (8-bit) opcode: 0-255, can't index beyond table  */
-
-		return (&acpi_gbl_aml_op_info
-			[acpi_gbl_short_op_index[(u8) opcode]]);
-	}
-
-	if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
-	    (((u8) opcode) <= MAX_EXTENDED_OPCODE)) {
-
-		/* Valid extended (16-bit) opcode */
-
-		return (&acpi_gbl_aml_op_info
-			[acpi_gbl_long_op_index[(u8) opcode]]);
-	}
-
-	/* Unknown AML opcode */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-			  "Unknown AML opcode [%4.4X]\n", opcode));
-
-	return (&acpi_gbl_aml_op_info[_UNK]);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_opcode_name
- *
- * PARAMETERS:  Opcode              - The AML opcode
- *
- * RETURN:      A pointer to the name of the opcode (ASCII String)
- *              Note: Never returns NULL.
- *
- * DESCRIPTION: Translate an opcode into a human-readable string
- *
- ******************************************************************************/
-
-char *acpi_ps_get_opcode_name(u16 opcode)
-{
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
-
-	const struct acpi_opcode_info *op;
-
-	op = acpi_ps_get_opcode_info(opcode);
-
-	/* Always guaranteed to return a valid pointer */
-
-	return (op->name);
-
-#else
-	return ("OpcodeName unavailable");
-
-#endif
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_argument_count
- *
- * PARAMETERS:  op_type             - Type associated with the AML opcode
- *
- * RETURN:      Argument count
- *
- * DESCRIPTION: Obtain the number of expected arguments for an AML opcode
- *
- ******************************************************************************/
-
-u8 acpi_ps_get_argument_count(u32 op_type)
-{
-
-	if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
-		return (acpi_gbl_argument_count[op_type]);
-	}
-
-	return (0);
-}
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c
deleted file mode 100644
index 68e932f..0000000
--- a/drivers/acpi/parser/psparse.c
+++ /dev/null
@@ -1,688 +0,0 @@
-/******************************************************************************
- *
- * Module Name: psparse - Parser top level AML parse routines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-/*
- * Parse the AML and build an operation tree as most interpreters,
- * like Perl, do.  Parsing is done by hand rather than with a YACC
- * generated parser to tightly constrain stack and dynamic memory
- * usage.  At the same time, parsing is kept flexible and the code
- * fairly compact by parsing based on a list of AML opcode
- * templates in aml_op_info[]
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/acdispat.h>
-#include <acpi/amlcode.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_PARSER
-ACPI_MODULE_NAME("psparse")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_opcode_size
- *
- * PARAMETERS:  Opcode          - An AML opcode
- *
- * RETURN:      Size of the opcode, in bytes (1 or 2)
- *
- * DESCRIPTION: Get the size of the current opcode.
- *
- ******************************************************************************/
-u32 acpi_ps_get_opcode_size(u32 opcode)
-{
-
-	/* Extended (2-byte) opcode if > 255 */
-
-	if (opcode > 0x00FF) {
-		return (2);
-	}
-
-	/* Otherwise, just a single byte opcode */
-
-	return (1);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_peek_opcode
- *
- * PARAMETERS:  parser_state        - A parser state object
- *
- * RETURN:      Next AML opcode
- *
- * DESCRIPTION: Get next AML opcode (without incrementing AML pointer)
- *
- ******************************************************************************/
-
-u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
-{
-	u8 *aml;
-	u16 opcode;
-
-	aml = parser_state->aml;
-	opcode = (u16) ACPI_GET8(aml);
-
-	if (opcode == AML_EXTENDED_OP_PREFIX) {
-
-		/* Extended opcode, get the second opcode byte */
-
-		aml++;
-		opcode = (u16) ((opcode << 8) | ACPI_GET8(aml));
-	}
-
-	return (opcode);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_complete_this_op
- *
- * PARAMETERS:  walk_state      - Current State
- *              Op              - Op to complete
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Perform any cleanup at the completion of an Op.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
-			 union acpi_parse_object * op)
-{
-	union acpi_parse_object *prev;
-	union acpi_parse_object *next;
-	const struct acpi_opcode_info *parent_info;
-	union acpi_parse_object *replacement_op = NULL;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op);
-
-	/* Check for null Op, can happen if AML code is corrupt */
-
-	if (!op) {
-		return_ACPI_STATUS(AE_OK);	/* OK for now */
-	}
-
-	/* Delete this op and the subtree below it if asked to */
-
-	if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) !=
-	     ACPI_PARSE_DELETE_TREE)
-	    || (walk_state->op_info->class == AML_CLASS_ARGUMENT)) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Make sure that we only delete this subtree */
-
-	if (op->common.parent) {
-		prev = op->common.parent->common.value.arg;
-		if (!prev) {
-
-			/* Nothing more to do */
-
-			goto cleanup;
-		}
-
-		/*
-		 * Check if we need to replace the operator and its subtree
-		 * with a return value op (placeholder op)
-		 */
-		parent_info =
-		    acpi_ps_get_opcode_info(op->common.parent->common.
-					    aml_opcode);
-
-		switch (parent_info->class) {
-		case AML_CLASS_CONTROL:
-			break;
-
-		case AML_CLASS_CREATE:
-
-			/*
-			 * These opcodes contain term_arg operands. The current
-			 * op must be replaced by a placeholder return op
-			 */
-			replacement_op =
-			    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
-			if (!replacement_op) {
-				status = AE_NO_MEMORY;
-			}
-			break;
-
-		case AML_CLASS_NAMED_OBJECT:
-
-			/*
-			 * These opcodes contain term_arg operands. The current
-			 * op must be replaced by a placeholder return op
-			 */
-			if ((op->common.parent->common.aml_opcode ==
-			     AML_REGION_OP)
-			    || (op->common.parent->common.aml_opcode ==
-				AML_DATA_REGION_OP)
-			    || (op->common.parent->common.aml_opcode ==
-				AML_BUFFER_OP)
-			    || (op->common.parent->common.aml_opcode ==
-				AML_PACKAGE_OP)
-			    || (op->common.parent->common.aml_opcode ==
-				AML_BANK_FIELD_OP)
-			    || (op->common.parent->common.aml_opcode ==
-				AML_VAR_PACKAGE_OP)) {
-				replacement_op =
-				    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
-				if (!replacement_op) {
-					status = AE_NO_MEMORY;
-				}
-			} else
-			    if ((op->common.parent->common.aml_opcode ==
-				 AML_NAME_OP)
-				&& (walk_state->pass_number <=
-				    ACPI_IMODE_LOAD_PASS2)) {
-				if ((op->common.aml_opcode == AML_BUFFER_OP)
-				    || (op->common.aml_opcode == AML_PACKAGE_OP)
-				    || (op->common.aml_opcode ==
-					AML_VAR_PACKAGE_OP)) {
-					replacement_op =
-					    acpi_ps_alloc_op(op->common.
-							     aml_opcode);
-					if (!replacement_op) {
-						status = AE_NO_MEMORY;
-					} else {
-						replacement_op->named.data =
-						    op->named.data;
-						replacement_op->named.length =
-						    op->named.length;
-					}
-				}
-			}
-			break;
-
-		default:
-
-			replacement_op =
-			    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
-			if (!replacement_op) {
-				status = AE_NO_MEMORY;
-			}
-		}
-
-		/* We must unlink this op from the parent tree */
-
-		if (prev == op) {
-
-			/* This op is the first in the list */
-
-			if (replacement_op) {
-				replacement_op->common.parent =
-				    op->common.parent;
-				replacement_op->common.value.arg = NULL;
-				replacement_op->common.node = op->common.node;
-				op->common.parent->common.value.arg =
-				    replacement_op;
-				replacement_op->common.next = op->common.next;
-			} else {
-				op->common.parent->common.value.arg =
-				    op->common.next;
-			}
-		}
-
-		/* Search the parent list */
-
-		else
-			while (prev) {
-
-				/* Traverse all siblings in the parent's argument list */
-
-				next = prev->common.next;
-				if (next == op) {
-					if (replacement_op) {
-						replacement_op->common.parent =
-						    op->common.parent;
-						replacement_op->common.value.
-						    arg = NULL;
-						replacement_op->common.node =
-						    op->common.node;
-						prev->common.next =
-						    replacement_op;
-						replacement_op->common.next =
-						    op->common.next;
-						next = NULL;
-					} else {
-						prev->common.next =
-						    op->common.next;
-						next = NULL;
-					}
-				}
-				prev = next;
-			}
-	}
-
-      cleanup:
-
-	/* Now we can actually delete the subtree rooted at Op */
-
-	acpi_ps_delete_parse_tree(op);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_next_parse_state
- *
- * PARAMETERS:  walk_state          - Current state
- *              Op                  - Current parse op
- *              callback_status     - Status from previous operation
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Update the parser state based upon the return exception from
- *              the parser callback.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
-			 union acpi_parse_object *op,
-			 acpi_status callback_status)
-{
-	struct acpi_parse_state *parser_state = &walk_state->parser_state;
-	acpi_status status = AE_CTRL_PENDING;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_next_parse_state, op);
-
-	switch (callback_status) {
-	case AE_CTRL_TERMINATE:
-		/*
-		 * A control method was terminated via a RETURN statement.
-		 * The walk of this method is complete.
-		 */
-		parser_state->aml = parser_state->aml_end;
-		status = AE_CTRL_TERMINATE;
-		break;
-
-	case AE_CTRL_BREAK:
-
-		parser_state->aml = walk_state->aml_last_while;
-		walk_state->control_state->common.value = FALSE;
-		status = AE_CTRL_BREAK;
-		break;
-
-	case AE_CTRL_CONTINUE:
-
-		parser_state->aml = walk_state->aml_last_while;
-		status = AE_CTRL_CONTINUE;
-		break;
-
-	case AE_CTRL_PENDING:
-
-		parser_state->aml = walk_state->aml_last_while;
-		break;
-
-#if 0
-	case AE_CTRL_SKIP:
-
-		parser_state->aml = parser_state->scope->parse_scope.pkg_end;
-		status = AE_OK;
-		break;
-#endif
-
-	case AE_CTRL_TRUE:
-		/*
-		 * Predicate of an IF was true, and we are at the matching ELSE.
-		 * Just close out this package
-		 */
-		parser_state->aml = acpi_ps_get_next_package_end(parser_state);
-		status = AE_CTRL_PENDING;
-		break;
-
-	case AE_CTRL_FALSE:
-		/*
-		 * Either an IF/WHILE Predicate was false or we encountered a BREAK
-		 * opcode.  In both cases, we do not execute the rest of the
-		 * package;  We simply close out the parent (finishing the walk of
-		 * this branch of the tree) and continue execution at the parent
-		 * level.
-		 */
-		parser_state->aml = parser_state->scope->parse_scope.pkg_end;
-
-		/* In the case of a BREAK, just force a predicate (if any) to FALSE */
-
-		walk_state->control_state->common.value = FALSE;
-		status = AE_CTRL_END;
-		break;
-
-	case AE_CTRL_TRANSFER:
-
-		/* A method call (invocation) -- transfer control */
-
-		status = AE_CTRL_TRANSFER;
-		walk_state->prev_op = op;
-		walk_state->method_call_op = op;
-		walk_state->method_call_node =
-		    (op->common.value.arg)->common.node;
-
-		/* Will return value (if any) be used by the caller? */
-
-		walk_state->return_used =
-		    acpi_ds_is_result_used(op, walk_state);
-		break;
-
-	default:
-
-		status = callback_status;
-		if ((callback_status & AE_CODE_MASK) == AE_CODE_CONTROL) {
-			status = AE_OK;
-		}
-		break;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_parse_aml
- *
- * PARAMETERS:  walk_state      - Current state
- *
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Parse raw AML and return a tree of ops
- *
- ******************************************************************************/
-
-acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	struct acpi_thread_state *thread;
-	struct acpi_thread_state *prev_walk_list = acpi_gbl_current_walk_list;
-	struct acpi_walk_state *previous_walk_state;
-
-	ACPI_FUNCTION_TRACE(ps_parse_aml);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-			  "Entered with WalkState=%p Aml=%p size=%X\n",
-			  walk_state, walk_state->parser_state.aml,
-			  walk_state->parser_state.aml_size));
-
-	/* Create and initialize a new thread state */
-
-	thread = acpi_ut_create_thread_state();
-	if (!thread) {
-		acpi_ds_delete_walk_state(walk_state);
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	walk_state->thread = thread;
-
-	/*
-	 * If executing a method, the starting sync_level is this method's
-	 * sync_level
-	 */
-	if (walk_state->method_desc) {
-		walk_state->thread->current_sync_level =
-		    walk_state->method_desc->method.sync_level;
-	}
-
-	acpi_ds_push_walk_state(walk_state, thread);
-
-	/*
-	 * This global allows the AML debugger to get a handle to the currently
-	 * executing control method.
-	 */
-	acpi_gbl_current_walk_list = thread;
-
-	/*
-	 * Execute the walk loop as long as there is a valid Walk State.  This
-	 * handles nested control method invocations without recursion.
-	 */
-	ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state));
-
-	status = AE_OK;
-	while (walk_state) {
-		if (ACPI_SUCCESS(status)) {
-			/*
-			 * The parse_loop executes AML until the method terminates
-			 * or calls another method.
-			 */
-			status = acpi_ps_parse_loop(walk_state);
-		}
-
-		ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-				  "Completed one call to walk loop, %s State=%p\n",
-				  acpi_format_exception(status), walk_state));
-
-		if (status == AE_CTRL_TRANSFER) {
-			/*
-			 * A method call was detected.
-			 * Transfer control to the called control method
-			 */
-			status =
-			    acpi_ds_call_control_method(thread, walk_state,
-							NULL);
-			if (ACPI_FAILURE(status)) {
-				status =
-				    acpi_ds_method_error(status, walk_state);
-			}
-
-			/*
-			 * If the transfer to the new method method call worked, a new walk
-			 * state was created -- get it
-			 */
-			walk_state = acpi_ds_get_current_walk_state(thread);
-			continue;
-		} else if (status == AE_CTRL_TERMINATE) {
-			status = AE_OK;
-		} else if ((status != AE_OK) && (walk_state->method_desc)) {
-
-			/* Either the method parse or actual execution failed */
-
-			ACPI_ERROR_METHOD("Method parse/execution failed",
-					  walk_state->method_node, NULL,
-					  status);
-
-			/* Check for possible multi-thread reentrancy problem */
-
-			if ((status == AE_ALREADY_EXISTS) &&
-			    (!walk_state->method_desc->method.mutex)) {
-				ACPI_INFO((AE_INFO,
-					   "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
-					   walk_state->method_node->name.
-					   ascii));
-
-				/*
-				 * Method tried to create an object twice. The probable cause is
-				 * that the method cannot handle reentrancy.
-				 *
-				 * The method is marked not_serialized, but it tried to create
-				 * a named object, causing the second thread entrance to fail.
-				 * Workaround this problem by marking the method permanently
-				 * as Serialized.
-				 */
-				walk_state->method_desc->method.method_flags |=
-				    AML_METHOD_SERIALIZED;
-				walk_state->method_desc->method.sync_level = 0;
-			}
-		}
-
-		/* We are done with this walk, move on to the parent if any */
-
-		walk_state = acpi_ds_pop_walk_state(thread);
-
-		/* Reset the current scope to the beginning of scope stack */
-
-		acpi_ds_scope_stack_clear(walk_state);
-
-		/*
-		 * If we just returned from the execution of a control method or if we
-		 * encountered an error during the method parse phase, there's lots of
-		 * cleanup to do
-		 */
-		if (((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
-		     ACPI_PARSE_EXECUTE) || (ACPI_FAILURE(status))) {
-			acpi_ds_terminate_control_method(walk_state->
-							 method_desc,
-							 walk_state);
-		}
-
-		/* Delete this walk state and all linked control states */
-
-		acpi_ps_cleanup_scope(&walk_state->parser_state);
-		previous_walk_state = walk_state;
-
-		ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-				  "ReturnValue=%p, ImplicitValue=%p State=%p\n",
-				  walk_state->return_desc,
-				  walk_state->implicit_return_obj, walk_state));
-
-		/* Check if we have restarted a preempted walk */
-
-		walk_state = acpi_ds_get_current_walk_state(thread);
-		if (walk_state) {
-			if (ACPI_SUCCESS(status)) {
-				/*
-				 * There is another walk state, restart it.
-				 * If the method return value is not used by the parent,
-				 * The object is deleted
-				 */
-				if (!previous_walk_state->return_desc) {
-					/*
-					 * In slack mode execution, if there is no return value
-					 * we should implicitly return zero (0) as a default value.
-					 */
-					if (acpi_gbl_enable_interpreter_slack &&
-					    !previous_walk_state->
-					    implicit_return_obj) {
-						previous_walk_state->
-						    implicit_return_obj =
-						    acpi_ut_create_internal_object
-						    (ACPI_TYPE_INTEGER);
-						if (!previous_walk_state->
-						    implicit_return_obj) {
-							return_ACPI_STATUS
-							    (AE_NO_MEMORY);
-						}
-
-						previous_walk_state->
-						    implicit_return_obj->
-						    integer.value = 0;
-					}
-
-					/* Restart the calling control method */
-
-					status =
-					    acpi_ds_restart_control_method
-					    (walk_state,
-					     previous_walk_state->
-					     implicit_return_obj);
-				} else {
-					/*
-					 * We have a valid return value, delete any implicit
-					 * return value.
-					 */
-					acpi_ds_clear_implicit_return
-					    (previous_walk_state);
-
-					status =
-					    acpi_ds_restart_control_method
-					    (walk_state,
-					     previous_walk_state->return_desc);
-				}
-				if (ACPI_SUCCESS(status)) {
-					walk_state->walk_type |=
-					    ACPI_WALK_METHOD_RESTART;
-				}
-			} else {
-				/* On error, delete any return object or implicit return */
-
-				acpi_ut_remove_reference(previous_walk_state->
-							 return_desc);
-				acpi_ds_clear_implicit_return
-				    (previous_walk_state);
-			}
-		}
-
-		/*
-		 * Just completed a 1st-level method, save the final internal return
-		 * value (if any)
-		 */
-		else if (previous_walk_state->caller_return_desc) {
-			if (previous_walk_state->implicit_return_obj) {
-				*(previous_walk_state->caller_return_desc) =
-				    previous_walk_state->implicit_return_obj;
-			} else {
-				/* NULL if no return value */
-
-				*(previous_walk_state->caller_return_desc) =
-				    previous_walk_state->return_desc;
-			}
-		} else {
-			if (previous_walk_state->return_desc) {
-
-				/* Caller doesn't want it, must delete it */
-
-				acpi_ut_remove_reference(previous_walk_state->
-							 return_desc);
-			}
-			if (previous_walk_state->implicit_return_obj) {
-
-				/* Caller doesn't want it, must delete it */
-
-				acpi_ut_remove_reference(previous_walk_state->
-							 implicit_return_obj);
-			}
-		}
-
-		acpi_ds_delete_walk_state(previous_walk_state);
-	}
-
-	/* Normal exit */
-
-	acpi_ex_release_all_mutexes(thread);
-	acpi_ut_delete_generic_state(ACPI_CAST_PTR
-				     (union acpi_generic_state, thread));
-	acpi_gbl_current_walk_list = prev_walk_list;
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/parser/psscope.c b/drivers/acpi/parser/psscope.c
deleted file mode 100644
index ee50e67..0000000
--- a/drivers/acpi/parser/psscope.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/******************************************************************************
- *
- * Module Name: psscope - Parser scope stack management routines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-
-#define _COMPONENT          ACPI_PARSER
-ACPI_MODULE_NAME("psscope")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_parent_scope
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *
- * RETURN:      Pointer to an Op object
- *
- * DESCRIPTION: Get parent of current op being parsed
- *
- ******************************************************************************/
-union acpi_parse_object *acpi_ps_get_parent_scope(struct acpi_parse_state
-						  *parser_state)
-{
-
-	return (parser_state->scope->parse_scope.op);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_has_completed_scope
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *
- * RETURN:      Boolean, TRUE = scope completed.
- *
- * DESCRIPTION: Is parsing of current argument complete?  Determined by
- *              1) AML pointer is at or beyond the end of the scope
- *              2) The scope argument count has reached zero.
- *
- ******************************************************************************/
-
-u8 acpi_ps_has_completed_scope(struct acpi_parse_state * parser_state)
-{
-
-	return ((u8)
-		((parser_state->aml >= parser_state->scope->parse_scope.arg_end
-		  || !parser_state->scope->parse_scope.arg_count)));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_init_scope
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *              Root                - the Root Node of this new scope
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Allocate and init a new scope object
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ps_init_scope(struct acpi_parse_state * parser_state,
-		   union acpi_parse_object * root_op)
-{
-	union acpi_generic_state *scope;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_init_scope, root_op);
-
-	scope = acpi_ut_create_generic_state();
-	if (!scope) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_RPSCOPE;
-	scope->parse_scope.op = root_op;
-	scope->parse_scope.arg_count = ACPI_VAR_ARGS;
-	scope->parse_scope.arg_end = parser_state->aml_end;
-	scope->parse_scope.pkg_end = parser_state->aml_end;
-
-	parser_state->scope = scope;
-	parser_state->start_op = root_op;
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_push_scope
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *              Op                  - Current op to be pushed
- *              remaining_args      - List of args remaining
- *              arg_count           - Fixed or variable number of args
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Push current op to begin parsing its argument
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ps_push_scope(struct acpi_parse_state *parser_state,
-		   union acpi_parse_object *op,
-		   u32 remaining_args, u32 arg_count)
-{
-	union acpi_generic_state *scope;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_push_scope, op);
-
-	scope = acpi_ut_create_generic_state();
-	if (!scope) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_PSCOPE;
-	scope->parse_scope.op = op;
-	scope->parse_scope.arg_list = remaining_args;
-	scope->parse_scope.arg_count = arg_count;
-	scope->parse_scope.pkg_end = parser_state->pkg_end;
-
-	/* Push onto scope stack */
-
-	acpi_ut_push_generic_state(&parser_state->scope, scope);
-
-	if (arg_count == ACPI_VAR_ARGS) {
-
-		/* Multiple arguments */
-
-		scope->parse_scope.arg_end = parser_state->pkg_end;
-	} else {
-		/* Single argument */
-
-		scope->parse_scope.arg_end = ACPI_TO_POINTER(ACPI_MAX_PTR);
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_pop_scope
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *              Op                  - Where the popped op is returned
- *              arg_list            - Where the popped "next argument" is
- *                                    returned
- *              arg_count           - Count of objects in arg_list
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Return to parsing a previous op
- *
- ******************************************************************************/
-
-void
-acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
-		  union acpi_parse_object **op, u32 * arg_list, u32 * arg_count)
-{
-	union acpi_generic_state *scope = parser_state->scope;
-
-	ACPI_FUNCTION_TRACE(ps_pop_scope);
-
-	/* Only pop the scope if there is in fact a next scope */
-
-	if (scope->common.next) {
-		scope = acpi_ut_pop_generic_state(&parser_state->scope);
-
-		/* Return to parsing previous op */
-
-		*op = scope->parse_scope.op;
-		*arg_list = scope->parse_scope.arg_list;
-		*arg_count = scope->parse_scope.arg_count;
-		parser_state->pkg_end = scope->parse_scope.pkg_end;
-
-		/* All done with this scope state structure */
-
-		acpi_ut_delete_generic_state(scope);
-	} else {
-		/* Empty parse stack, prepare to fetch next opcode */
-
-		*op = NULL;
-		*arg_list = 0;
-		*arg_count = 0;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-			  "Popped Op %p Args %X\n", *op, *arg_count));
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_cleanup_scope
- *
- * PARAMETERS:  parser_state        - Current parser state object
- *
- * RETURN:      None
- *
- * DESCRIPTION: Destroy available list, remaining stack levels, and return
- *              root scope
- *
- ******************************************************************************/
-
-void acpi_ps_cleanup_scope(struct acpi_parse_state *parser_state)
-{
-	union acpi_generic_state *scope;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_cleanup_scope, parser_state);
-
-	if (!parser_state) {
-		return_VOID;
-	}
-
-	/* Delete anything on the scope stack */
-
-	while (parser_state->scope) {
-		scope = acpi_ut_pop_generic_state(&parser_state->scope);
-		acpi_ut_delete_generic_state(scope);
-	}
-
-	return_VOID;
-}
diff --git a/drivers/acpi/parser/pstree.c b/drivers/acpi/parser/pstree.c
deleted file mode 100644
index 1dd355d..0000000
--- a/drivers/acpi/parser/pstree.c
+++ /dev/null
@@ -1,311 +0,0 @@
-/******************************************************************************
- *
- * Module Name: pstree - Parser op tree manipulation/traversal/search
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/amlcode.h>
-
-#define _COMPONENT          ACPI_PARSER
-ACPI_MODULE_NAME("pstree")
-
-/* Local prototypes */
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op);
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_arg
- *
- * PARAMETERS:  Op              - Get an argument for this op
- *              Argn            - Nth argument to get
- *
- * RETURN:      The argument (as an Op object). NULL if argument does not exist
- *
- * DESCRIPTION: Get the specified op's argument.
- *
- ******************************************************************************/
-
-union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
-{
-	union acpi_parse_object *arg = NULL;
-	const struct acpi_opcode_info *op_info;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Get the info structure for this opcode */
-
-	op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
-	if (op_info->class == AML_CLASS_UNKNOWN) {
-
-		/* Invalid opcode or ASCII character */
-
-		return (NULL);
-	}
-
-	/* Check if this opcode requires argument sub-objects */
-
-	if (!(op_info->flags & AML_HAS_ARGS)) {
-
-		/* Has no linked argument objects */
-
-		return (NULL);
-	}
-
-	/* Get the requested argument object */
-
-	arg = op->common.value.arg;
-	while (arg && argn) {
-		argn--;
-		arg = arg->common.next;
-	}
-
-	return (arg);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_append_arg
- *
- * PARAMETERS:  Op              - Append an argument to this Op.
- *              Arg             - Argument Op to append
- *
- * RETURN:      None.
- *
- * DESCRIPTION: Append an argument to an op's argument list (a NULL arg is OK)
- *
- ******************************************************************************/
-
-void
-acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
-{
-	union acpi_parse_object *prev_arg;
-	const struct acpi_opcode_info *op_info;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!op) {
-		return;
-	}
-
-	/* Get the info structure for this opcode */
-
-	op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
-	if (op_info->class == AML_CLASS_UNKNOWN) {
-
-		/* Invalid opcode */
-
-		ACPI_ERROR((AE_INFO, "Invalid AML Opcode: 0x%2.2X",
-			    op->common.aml_opcode));
-		return;
-	}
-
-	/* Check if this opcode requires argument sub-objects */
-
-	if (!(op_info->flags & AML_HAS_ARGS)) {
-
-		/* Has no linked argument objects */
-
-		return;
-	}
-
-	/* Append the argument to the linked argument list */
-
-	if (op->common.value.arg) {
-
-		/* Append to existing argument list */
-
-		prev_arg = op->common.value.arg;
-		while (prev_arg->common.next) {
-			prev_arg = prev_arg->common.next;
-		}
-		prev_arg->common.next = arg;
-	} else {
-		/* No argument list, this will be the first argument */
-
-		op->common.value.arg = arg;
-	}
-
-	/* Set the parent in this arg and any args linked after it */
-
-	while (arg) {
-		arg->common.parent = op;
-		arg = arg->common.next;
-
-		op->common.arg_list_length++;
-	}
-}
-
-#ifdef ACPI_FUTURE_USAGE
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_depth_next
- *
- * PARAMETERS:  Origin          - Root of subtree to search
- *              Op              - Last (previous) Op that was found
- *
- * RETURN:      Next Op found in the search.
- *
- * DESCRIPTION: Get next op in tree (walking the tree in depth-first order)
- *              Return NULL when reaching "origin" or when walking up from root
- *
- ******************************************************************************/
-
-union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
-						union acpi_parse_object *op)
-{
-	union acpi_parse_object *next = NULL;
-	union acpi_parse_object *parent;
-	union acpi_parse_object *arg;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!op) {
-		return (NULL);
-	}
-
-	/* Look for an argument or child */
-
-	next = acpi_ps_get_arg(op, 0);
-	if (next) {
-		return (next);
-	}
-
-	/* Look for a sibling */
-
-	next = op->common.next;
-	if (next) {
-		return (next);
-	}
-
-	/* Look for a sibling of parent */
-
-	parent = op->common.parent;
-
-	while (parent) {
-		arg = acpi_ps_get_arg(parent, 0);
-		while (arg && (arg != origin) && (arg != op)) {
-			arg = arg->common.next;
-		}
-
-		if (arg == origin) {
-
-			/* Reached parent of origin, end search */
-
-			return (NULL);
-		}
-
-		if (parent->common.next) {
-
-			/* Found sibling of parent */
-
-			return (parent->common.next);
-		}
-
-		op = parent;
-		parent = parent->common.parent;
-	}
-
-	return (next);
-}
-
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_get_child
- *
- * PARAMETERS:  Op              - Get the child of this Op
- *
- * RETURN:      Child Op, Null if none is found.
- *
- * DESCRIPTION: Get op's children or NULL if none
- *
- ******************************************************************************/
-
-union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op)
-{
-	union acpi_parse_object *child = NULL;
-
-	ACPI_FUNCTION_ENTRY();
-
-	switch (op->common.aml_opcode) {
-	case AML_SCOPE_OP:
-	case AML_ELSE_OP:
-	case AML_DEVICE_OP:
-	case AML_THERMAL_ZONE_OP:
-	case AML_INT_METHODCALL_OP:
-
-		child = acpi_ps_get_arg(op, 0);
-		break;
-
-	case AML_BUFFER_OP:
-	case AML_PACKAGE_OP:
-	case AML_METHOD_OP:
-	case AML_IF_OP:
-	case AML_WHILE_OP:
-	case AML_FIELD_OP:
-
-		child = acpi_ps_get_arg(op, 1);
-		break;
-
-	case AML_POWER_RES_OP:
-	case AML_INDEX_FIELD_OP:
-
-		child = acpi_ps_get_arg(op, 2);
-		break;
-
-	case AML_PROCESSOR_OP:
-	case AML_BANK_FIELD_OP:
-
-		child = acpi_ps_get_arg(op, 3);
-		break;
-
-	default:
-		/* All others have no children */
-		break;
-	}
-
-	return (child);
-}
-#endif
-#endif				/*  ACPI_FUTURE_USAGE  */
diff --git a/drivers/acpi/parser/psutils.c b/drivers/acpi/parser/psutils.c
deleted file mode 100644
index 7cf1f65..0000000
--- a/drivers/acpi/parser/psutils.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/******************************************************************************
- *
- * Module Name: psutils - Parser miscellaneous utilities (Parser only)
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/amlcode.h>
-
-#define _COMPONENT          ACPI_PARSER
-ACPI_MODULE_NAME("psutils")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_create_scope_op
- *
- * PARAMETERS:  None
- *
- * RETURN:      A new Scope object, null on failure
- *
- * DESCRIPTION: Create a Scope and associated namepath op with the root name
- *
- ******************************************************************************/
-union acpi_parse_object *acpi_ps_create_scope_op(void)
-{
-	union acpi_parse_object *scope_op;
-
-	scope_op = acpi_ps_alloc_op(AML_SCOPE_OP);
-	if (!scope_op) {
-		return (NULL);
-	}
-
-	scope_op->named.name = ACPI_ROOT_NAME;
-	return (scope_op);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_init_op
- *
- * PARAMETERS:  Op              - A newly allocated Op object
- *              Opcode          - Opcode to store in the Op
- *
- * RETURN:      None
- *
- * DESCRIPTION: Initialize a parse (Op) object
- *
- ******************************************************************************/
-
-void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
-	op->common.aml_opcode = opcode;
-
-	ACPI_DISASM_ONLY_MEMBERS(ACPI_STRNCPY(op->common.aml_op_name,
-					      (acpi_ps_get_opcode_info
-					       (opcode))->name,
-					      sizeof(op->common.aml_op_name)));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_alloc_op
- *
- * PARAMETERS:  Opcode          - Opcode that will be stored in the new Op
- *
- * RETURN:      Pointer to the new Op, null on failure
- *
- * DESCRIPTION: Allocate an acpi_op, choose op type (and thus size) based on
- *              opcode.  A cache of opcodes is available for the pure
- *              GENERIC_OP, since this is by far the most commonly used.
- *
- ******************************************************************************/
-
-union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
-{
-	union acpi_parse_object *op;
-	const struct acpi_opcode_info *op_info;
-	u8 flags = ACPI_PARSEOP_GENERIC;
-
-	ACPI_FUNCTION_ENTRY();
-
-	op_info = acpi_ps_get_opcode_info(opcode);
-
-	/* Determine type of parse_op required */
-
-	if (op_info->flags & AML_DEFER) {
-		flags = ACPI_PARSEOP_DEFERRED;
-	} else if (op_info->flags & AML_NAMED) {
-		flags = ACPI_PARSEOP_NAMED;
-	} else if (opcode == AML_INT_BYTELIST_OP) {
-		flags = ACPI_PARSEOP_BYTELIST;
-	}
-
-	/* Allocate the minimum required size object */
-
-	if (flags == ACPI_PARSEOP_GENERIC) {
-
-		/* The generic op (default) is by far the most common (16 to 1) */
-
-		op = acpi_os_acquire_object(acpi_gbl_ps_node_cache);
-	} else {
-		/* Extended parseop */
-
-		op = acpi_os_acquire_object(acpi_gbl_ps_node_ext_cache);
-	}
-
-	/* Initialize the Op */
-
-	if (op) {
-		acpi_ps_init_op(op, opcode);
-		op->common.flags = flags;
-	}
-
-	return (op);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_free_op
- *
- * PARAMETERS:  Op              - Op to be freed
- *
- * RETURN:      None.
- *
- * DESCRIPTION: Free an Op object.  Either put it on the GENERIC_OP cache list
- *              or actually free it.
- *
- ******************************************************************************/
-
-void acpi_ps_free_op(union acpi_parse_object *op)
-{
-	ACPI_FUNCTION_NAME(ps_free_op);
-
-	if (op->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Free retval op: %p\n",
-				  op));
-	}
-
-	if (op->common.flags & ACPI_PARSEOP_GENERIC) {
-		(void)acpi_os_release_object(acpi_gbl_ps_node_cache, op);
-	} else {
-		(void)acpi_os_release_object(acpi_gbl_ps_node_ext_cache, op);
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    Utility functions
- *
- * DESCRIPTION: Low level character and object functions
- *
- ******************************************************************************/
-
-/*
- * Is "c" a namestring lead character?
- */
-u8 acpi_ps_is_leading_char(u32 c)
-{
-	return ((u8) (c == '_' || (c >= 'A' && c <= 'Z')));
-}
-
-/*
- * Is "c" a namestring prefix character?
- */
-u8 acpi_ps_is_prefix_char(u32 c)
-{
-	return ((u8) (c == '\\' || c == '^'));
-}
-
-/*
- * Get op's name (4-byte name segment) or 0 if unnamed
- */
-#ifdef ACPI_FUTURE_USAGE
-u32 acpi_ps_get_name(union acpi_parse_object * op)
-{
-
-	/* The "generic" object has no name associated with it */
-
-	if (op->common.flags & ACPI_PARSEOP_GENERIC) {
-		return (0);
-	}
-
-	/* Only the "Extended" parse objects have a name */
-
-	return (op->named.name);
-}
-#endif				/*  ACPI_FUTURE_USAGE  */
-
-/*
- * Set op's name
- */
-void acpi_ps_set_name(union acpi_parse_object *op, u32 name)
-{
-
-	/* The "generic" object has no name associated with it */
-
-	if (op->common.flags & ACPI_PARSEOP_GENERIC) {
-		return;
-	}
-
-	op->named.name = name;
-}
diff --git a/drivers/acpi/parser/pswalk.c b/drivers/acpi/parser/pswalk.c
deleted file mode 100644
index 8b86ad5..0000000
--- a/drivers/acpi/parser/pswalk.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/******************************************************************************
- *
- * Module Name: pswalk - Parser routines to walk parsed op tree(s)
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-
-#define _COMPONENT          ACPI_PARSER
-ACPI_MODULE_NAME("pswalk")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_delete_parse_tree
- *
- * PARAMETERS:  subtree_root        - Root of tree (or subtree) to delete
- *
- * RETURN:      None
- *
- * DESCRIPTION: Delete a portion of or an entire parse tree.
- *
- ******************************************************************************/
-void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root)
-{
-	union acpi_parse_object *op = subtree_root;
-	union acpi_parse_object *next = NULL;
-	union acpi_parse_object *parent = NULL;
-
-	ACPI_FUNCTION_TRACE_PTR(ps_delete_parse_tree, subtree_root);
-
-	/* Visit all nodes in the subtree */
-
-	while (op) {
-
-		/* Check if we are not ascending */
-
-		if (op != parent) {
-
-			/* Look for an argument or child of the current op */
-
-			next = acpi_ps_get_arg(op, 0);
-			if (next) {
-
-				/* Still going downward in tree (Op is not completed yet) */
-
-				op = next;
-				continue;
-			}
-		}
-
-		/* No more children, this Op is complete. */
-
-		next = op->common.next;
-		parent = op->common.parent;
-
-		acpi_ps_free_op(op);
-
-		/* If we are back to the starting point, the walk is complete. */
-
-		if (op == subtree_root) {
-			return_VOID;
-		}
-		if (next) {
-			op = next;
-		} else {
-			op = parent;
-		}
-	}
-
-	return_VOID;
-}
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c
deleted file mode 100644
index 270469a..0000000
--- a/drivers/acpi/parser/psxface.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/******************************************************************************
- *
- * Module Name: psxface - Parser external interfaces
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acparser.h>
-#include <acpi/acdispat.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_PARSER
-ACPI_MODULE_NAME("psxface")
-
-/* Local Prototypes */
-static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
-
-static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
-
-static void
-acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_debug_trace
- *
- * PARAMETERS:  method_name     - Valid ACPI name string
- *              debug_level     - Optional level mask. 0 to use default
- *              debug_layer     - Optional layer mask. 0 to use default
- *              Flags           - bit 1: one shot(1) or persistent(0)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: External interface to enable debug tracing during control
- *              method execution
- *
- ******************************************************************************/
-
-acpi_status
-acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
-{
-	acpi_status status;
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* TBDs: Validate name, allow full path or just nameseg */
-
-	acpi_gbl_trace_method_name = *ACPI_CAST_PTR(u32, name);
-	acpi_gbl_trace_flags = flags;
-
-	if (debug_level) {
-		acpi_gbl_trace_dbg_level = debug_level;
-	}
-	if (debug_layer) {
-		acpi_gbl_trace_dbg_layer = debug_layer;
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_start_trace
- *
- * PARAMETERS:  Info        - Method info struct
- *
- * RETURN:      None
- *
- * DESCRIPTION: Start control method execution trace
- *
- ******************************************************************************/
-
-static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_ENTRY();
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return;
-	}
-
-	if ((!acpi_gbl_trace_method_name) ||
-	    (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
-		goto exit;
-	}
-
-	acpi_gbl_original_dbg_level = acpi_dbg_level;
-	acpi_gbl_original_dbg_layer = acpi_dbg_layer;
-
-	acpi_dbg_level = 0x00FFFFFF;
-	acpi_dbg_layer = ACPI_UINT32_MAX;
-
-	if (acpi_gbl_trace_dbg_level) {
-		acpi_dbg_level = acpi_gbl_trace_dbg_level;
-	}
-	if (acpi_gbl_trace_dbg_layer) {
-		acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
-	}
-
-      exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_stop_trace
- *
- * PARAMETERS:  Info        - Method info struct
- *
- * RETURN:      None
- *
- * DESCRIPTION: Stop control method execution trace
- *
- ******************************************************************************/
-
-static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_ENTRY();
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return;
-	}
-
-	if ((!acpi_gbl_trace_method_name) ||
-	    (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
-		goto exit;
-	}
-
-	/* Disable further tracing if type is one-shot */
-
-	if (acpi_gbl_trace_flags & 1) {
-		acpi_gbl_trace_method_name = 0;
-		acpi_gbl_trace_dbg_level = 0;
-		acpi_gbl_trace_dbg_layer = 0;
-	}
-
-	acpi_dbg_level = acpi_gbl_original_dbg_level;
-	acpi_dbg_layer = acpi_gbl_original_dbg_layer;
-
-      exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_execute_method
- *
- * PARAMETERS:  Info            - Method info block, contains:
- *                  Node            - Method Node to execute
- *                  obj_desc        - Method object
- *                  Parameters      - List of parameters to pass to the method,
- *                                    terminated by NULL. Params itself may be
- *                                    NULL if no parameters are being passed.
- *                  return_object   - Where to put method's return value (if
- *                                    any). If NULL, no value is returned.
- *                  parameter_type  - Type of Parameter list
- *                  return_object   - Where to put method's return value (if
- *                                    any). If NULL, no value is returned.
- *                  pass_number     - Parse or execute pass
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Execute a control method
- *
- ******************************************************************************/
-
-acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
-{
-	acpi_status status;
-	union acpi_parse_object *op;
-	struct acpi_walk_state *walk_state;
-
-	ACPI_FUNCTION_TRACE(ps_execute_method);
-
-	/* Validate the Info and method Node */
-
-	if (!info || !info->resolved_node) {
-		return_ACPI_STATUS(AE_NULL_ENTRY);
-	}
-
-	/* Init for new method, wait on concurrency semaphore */
-
-	status =
-	    acpi_ds_begin_method_execution(info->resolved_node, info->obj_desc,
-					   NULL);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * The caller "owns" the parameters, so give each one an extra reference
-	 */
-	acpi_ps_update_parameter_list(info, REF_INCREMENT);
-
-	/* Begin tracing if requested */
-
-	acpi_ps_start_trace(info);
-
-	/*
-	 * Execute the method. Performs parse simultaneously
-	 */
-	ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-			  "**** Begin Method Parse/Execute [%4.4s] **** Node=%p Obj=%p\n",
-			  info->resolved_node->name.ascii, info->resolved_node,
-			  info->obj_desc));
-
-	/* Create and init a Root Node */
-
-	op = acpi_ps_create_scope_op();
-	if (!op) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	/* Create and initialize a new walk state */
-
-	info->pass_number = ACPI_IMODE_EXECUTE;
-	walk_state =
-	    acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL,
-				      NULL, NULL);
-	if (!walk_state) {
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
-				       info->obj_desc->method.aml_start,
-				       info->obj_desc->method.aml_length, info,
-				       info->pass_number);
-	if (ACPI_FAILURE(status)) {
-		acpi_ds_delete_walk_state(walk_state);
-		goto cleanup;
-	}
-
-	/* Parse the AML */
-
-	status = acpi_ps_parse_aml(walk_state);
-
-	/* walk_state was deleted by parse_aml */
-
-      cleanup:
-	acpi_ps_delete_parse_tree(op);
-
-	/* End optional tracing */
-
-	acpi_ps_stop_trace(info);
-
-	/* Take away the extra reference that we gave the parameters above */
-
-	acpi_ps_update_parameter_list(info, REF_DECREMENT);
-
-	/* Exit now if error above */
-
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * If the method has returned an object, signal this to the caller with
-	 * a control exception code
-	 */
-	if (info->return_object) {
-		ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Method returned ObjDesc=%p\n",
-				  info->return_object));
-		ACPI_DUMP_STACK_ENTRY(info->return_object);
-
-		status = AE_CTRL_RETURN_VALUE;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_update_parameter_list
- *
- * PARAMETERS:  Info            - See struct acpi_evaluate_info
- *                                (Used: parameter_type and Parameters)
- *              Action          - Add or Remove reference
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Update reference count on all method parameter objects
- *
- ******************************************************************************/
-
-static void
-acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
-{
-	u32 i;
-
-	if (info->parameters) {
-
-		/* Update reference count for each parameter */
-
-		for (i = 0; info->parameters[i]; i++) {
-
-			/* Ignore errors, just do them all */
-
-			(void)acpi_ut_update_object_reference(info->
-							      parameters[i],
-							      action);
-		}
-	}
-}
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 4b252ea..95650f8 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -99,7 +99,7 @@
 	 */
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "Device %s has PCI address %02x:%02x:%02x.%02x\n",
+			  "Device %s has PCI address %04x:%02x:%02x.%d\n",
 			  acpi_device_bid(device), id->segment, id->bus,
 			  id->device, id->function));
 
@@ -111,12 +111,11 @@
 int acpi_pci_bind(struct acpi_device *device)
 {
 	int result = 0;
-	acpi_status status = AE_OK;
-	struct acpi_pci_data *data = NULL;
-	struct acpi_pci_data *pdata = NULL;
-	char *pathname = NULL;
-	struct acpi_buffer buffer = { 0, NULL };
-	acpi_handle handle = NULL;
+	acpi_status status;
+	struct acpi_pci_data *data;
+	struct acpi_pci_data *pdata;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	acpi_handle handle;
 	struct pci_dev *dev;
 	struct pci_bus *bus;
 
@@ -124,21 +123,18 @@
 	if (!device || !device->parent)
 		return -EINVAL;
 
-	pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL);
-	if (!pathname)
-		return -ENOMEM;
-	buffer.length = ACPI_PATHNAME_MAX;
-	buffer.pointer = pathname;
-
 	data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
-	if (!data) {
-		kfree(pathname);
+	if (!data)
 		return -ENOMEM;
+
+	status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
+	if (ACPI_FAILURE(status)) {
+		kfree(data);
+		return -ENODEV;
 	}
 
-	acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n",
-			  pathname));
+			  (char *)buffer.pointer));
 
 	/* 
 	 * Segment & Bus
@@ -166,7 +162,7 @@
 	data->id.device = device->pnp.bus_address >> 16;
 	data->id.function = device->pnp.bus_address & 0xFFFF;
 
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %02x:%02x:%02x.%02x\n",
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %04x:%02x:%02x.%d\n",
 			  data->id.segment, data->id.bus, data->id.device,
 			  data->id.function));
 
@@ -196,7 +192,7 @@
 	}
 	if (!data->dev) {
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Device %02x:%02x:%02x.%02x not present in PCI namespace\n",
+				  "Device %04x:%02x:%02x.%d not present in PCI namespace\n",
 				  data->id.segment, data->id.bus,
 				  data->id.device, data->id.function));
 		result = -ENODEV;
@@ -204,7 +200,7 @@
 	}
 	if (!data->dev->bus) {
 		printk(KERN_ERR PREFIX
-			    "Device %02x:%02x:%02x.%02x has invalid 'bus' field\n",
+			    "Device %04x:%02x:%02x.%d has invalid 'bus' field\n",
 			    data->id.segment, data->id.bus,
 			    data->id.device, data->id.function);
 		result = -ENODEV;
@@ -219,7 +215,7 @@
 	 */
 	if (data->dev->subordinate) {
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Device %02x:%02x:%02x.%02x is a PCI bridge\n",
+				  "Device %04x:%02x:%02x.%d is a PCI bridge\n",
 				  data->id.segment, data->id.bus,
 				  data->id.device, data->id.function));
 		data->bus = data->dev->subordinate;
@@ -262,7 +258,7 @@
 	}
 
       end:
-	kfree(pathname);
+	kfree(buffer.pointer);
 	if (result)
 		kfree(data);
 
@@ -272,25 +268,21 @@
 static int acpi_pci_unbind(struct acpi_device *device)
 {
 	int result = 0;
-	acpi_status status = AE_OK;
-	struct acpi_pci_data *data = NULL;
-	char *pathname = NULL;
-	struct acpi_buffer buffer = { 0, NULL };
+	acpi_status status;
+	struct acpi_pci_data *data;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
 
 	if (!device || !device->parent)
 		return -EINVAL;
 
-	pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL);
-	if (!pathname)
-		return -ENOMEM;
+	status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
 
-	buffer.length = ACPI_PATHNAME_MAX;
-	buffer.pointer = pathname;
-	acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unbinding PCI device [%s]...\n",
-			  pathname));
-	kfree(pathname);
+			  (char *) buffer.pointer));
+	kfree(buffer.pointer);
 
 	status =
 	    acpi_get_data(device->handle, acpi_pci_data_handler,
@@ -322,50 +314,44 @@
 		   struct acpi_pci_id *id, struct pci_bus *bus)
 {
 	int result = 0;
-	acpi_status status = AE_OK;
+	acpi_status status;
 	struct acpi_pci_data *data = NULL;
-	char *pathname = NULL;
-	struct acpi_buffer buffer = { 0, NULL };
-
-	pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL);
-	if (!pathname)
-		return -ENOMEM;
-
-	buffer.length = ACPI_PATHNAME_MAX;
-	buffer.pointer = pathname;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
 	if (!device || !id || !bus) {
-		kfree(pathname);
 		return -EINVAL;
 	}
 
 	data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
-	if (!data) {
-		kfree(pathname);
+	if (!data)
 		return -ENOMEM;
-	}
 
 	data->id = *id;
 	data->bus = bus;
 	device->ops.bind = acpi_pci_bind;
 	device->ops.unbind = acpi_pci_unbind;
 
-	acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
+	status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
+	if (ACPI_FAILURE(status)) {
+		kfree (data);
+		return -ENODEV;
+	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to "
-			  "%02x:%02x\n", pathname, id->segment, id->bus));
+			"%04x:%02x\n", (char *)buffer.pointer,
+			id->segment, id->bus));
 
 	status = acpi_attach_data(device->handle, acpi_pci_data_handler, data);
 	if (ACPI_FAILURE(status)) {
 		ACPI_EXCEPTION((AE_INFO, status,
 				"Unable to attach ACPI-PCI context to device %s",
-				pathname));
+				(char *)buffer.pointer));
 		result = -ENODEV;
 		goto end;
 	}
 
       end:
-	kfree(pathname);
+	kfree(buffer.pointer);
 	if (result != 0)
 		kfree(data);
 
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index bf79d83..891bdf6 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -4,6 +4,8 @@
  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  *  Copyright (C) 2002       Dominik Brodowski <devel@brodo.de>
+ *  (c) Copyright 2008 Hewlett-Packard Development Company, L.P.
+ *	Bjorn Helgaas <bjorn.helgaas@hp.com>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -41,29 +43,36 @@
 #define _COMPONENT		ACPI_PCI_COMPONENT
 ACPI_MODULE_NAME("pci_irq");
 
-static struct acpi_prt_list acpi_prt;
+struct acpi_prt_entry {
+	struct list_head	list;
+	struct acpi_pci_id	id;
+	u8			pin;
+	acpi_handle		link;
+	u32			index;		/* GSI, or link _CRS index */
+};
+
+static LIST_HEAD(acpi_prt_list);
 static DEFINE_SPINLOCK(acpi_prt_lock);
 
+static inline char pin_name(int pin)
+{
+	return 'A' + pin - 1;
+}
+
 /* --------------------------------------------------------------------------
                          PCI IRQ Routing Table (PRT) Support
    -------------------------------------------------------------------------- */
 
-static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment,
-							  int bus,
-							  int device, int pin)
+static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(struct pci_dev *dev,
+							  int pin)
 {
-	struct acpi_prt_entry *entry = NULL;
+	struct acpi_prt_entry *entry;
+	int segment = pci_domain_nr(dev->bus);
+	int bus = dev->bus->number;
+	int device = PCI_SLOT(dev->devfn);
 
-	if (!acpi_prt.count)
-		return NULL;
-
-	/*
-	 * Parse through all PRT entries looking for a match on the specified
-	 * PCI device's segment, bus, device, and pin (don't care about func).
-	 *
-	 */
 	spin_lock(&acpi_prt_lock);
-	list_for_each_entry(entry, &acpi_prt.entries, node) {
+	list_for_each_entry(entry, &acpi_prt_list, list) {
 		if ((segment == entry->id.segment)
 		    && (bus == entry->id.bus)
 		    && (device == entry->id.device)
@@ -72,7 +81,6 @@
 			return entry;
 		}
 	}
-
 	spin_unlock(&acpi_prt_lock);
 	return NULL;
 }
@@ -124,25 +132,27 @@
 	char			*actual_source;
 };
 
+#define PCI_INTX_PIN(c)		(c - 'A' + 1)
+
 /*
  * These systems have incorrect _PRT entries.  The BIOS claims the PCI
  * interrupt at the listed segment/bus/device/pin is connected to the first
  * link device, but it is actually connected to the second.
  */
 static struct prt_quirk prt_quirks[] = {
-	{ medion_md9580, 0, 0, 9, 'A',
+	{ medion_md9580, 0, 0, 9, PCI_INTX_PIN('A'),
 		"\\_SB_.PCI0.ISA_.LNKA",
 		"\\_SB_.PCI0.ISA_.LNKB"},
-	{ dell_optiplex, 0, 0, 0xd, 'A',
+	{ dell_optiplex, 0, 0, 0xd, PCI_INTX_PIN('A'),
 		"\\_SB_.LNKB",
 		"\\_SB_.LNKA"},
-	{ hp_t5710, 0, 0, 1, 'A',
+	{ hp_t5710, 0, 0, 1, PCI_INTX_PIN('A'),
 		"\\_SB_.PCI0.LNK1",
 		"\\_SB_.PCI0.LNK3"},
 };
 
-static void
-do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt)
+static void do_prt_fixups(struct acpi_prt_entry *entry,
+			  struct acpi_pci_routing_table *prt)
 {
 	int i;
 	struct prt_quirk *quirk;
@@ -158,42 +168,43 @@
 		    entry->id.segment == quirk->segment &&
 		    entry->id.bus == quirk->bus &&
 		    entry->id.device == quirk->device &&
-		    entry->pin + 'A' == quirk->pin &&
+		    entry->pin == quirk->pin &&
 		    !strcmp(prt->source, quirk->source) &&
 		    strlen(prt->source) >= strlen(quirk->actual_source)) {
 			printk(KERN_WARNING PREFIX "firmware reports "
 				"%04x:%02x:%02x PCI INT %c connected to %s; "
 				"changing to %s\n",
 				entry->id.segment, entry->id.bus,
-				entry->id.device, 'A' + entry->pin,
+				entry->id.device, pin_name(entry->pin),
 				prt->source, quirk->actual_source);
 			strcpy(prt->source, quirk->actual_source);
 		}
 	}
 }
 
-static int
-acpi_pci_irq_add_entry(acpi_handle handle,
-		       int segment, int bus, struct acpi_pci_routing_table *prt)
+static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
+				  struct acpi_pci_routing_table *prt)
 {
-	struct acpi_prt_entry *entry = NULL;
-
-
-	if (!prt)
-		return -EINVAL;
+	struct acpi_prt_entry *entry;
 
 	entry = kzalloc(sizeof(struct acpi_prt_entry), GFP_KERNEL);
 	if (!entry)
 		return -ENOMEM;
 
+	/*
+	 * Note that the _PRT uses 0=INTA, 1=INTB, etc, while PCI uses
+	 * 1=INTA, 2=INTB.  We use the PCI encoding throughout, so convert
+	 * it here.
+	 */
 	entry->id.segment = segment;
 	entry->id.bus = bus;
 	entry->id.device = (prt->address >> 16) & 0xFFFF;
-	entry->id.function = prt->address & 0xFFFF;
-	entry->pin = prt->pin;
+	entry->pin = prt->pin + 1;
 
 	do_prt_fixups(entry, prt);
 
+	entry->index = prt->source_index;
+
 	/*
 	 * Type 1: Dynamic
 	 * ---------------
@@ -207,10 +218,9 @@
 	 *       (e.g. exists somewhere 'below' this _PRT entry in the ACPI
 	 *       namespace).
 	 */
-	if (prt->source[0]) {
-		acpi_get_handle(handle, prt->source, &entry->link.handle);
-		entry->link.index = prt->source_index;
-	}
+	if (prt->source[0])
+		acpi_get_handle(handle, prt->source, &entry->link);
+
 	/*
 	 * Type 2: Static
 	 * --------------
@@ -218,84 +228,38 @@
 	 * the IRQ value, which is hardwired to specific interrupt inputs on
 	 * the interrupt controller.
 	 */
-	else
-		entry->link.index = prt->source_index;
 
 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO,
-			      "      %02X:%02X:%02X[%c] -> %s[%d]\n",
+			      "      %04x:%02x:%02x[%c] -> %s[%d]\n",
 			      entry->id.segment, entry->id.bus,
-			      entry->id.device, ('A' + entry->pin), prt->source,
-			      entry->link.index));
+			      entry->id.device, pin_name(entry->pin),
+			      prt->source, entry->index));
 
 	spin_lock(&acpi_prt_lock);
-	list_add_tail(&entry->node, &acpi_prt.entries);
-	acpi_prt.count++;
+	list_add_tail(&entry->list, &acpi_prt_list);
 	spin_unlock(&acpi_prt_lock);
 
 	return 0;
 }
 
-static void
-acpi_pci_irq_del_entry(int segment, int bus, struct acpi_prt_entry *entry)
-{
-	if (segment == entry->id.segment && bus == entry->id.bus) {
-		acpi_prt.count--;
-		list_del(&entry->node);
-		kfree(entry);
-	}
-}
-
 int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
 {
-	acpi_status status = AE_OK;
-	char *pathname = NULL;
-	struct acpi_buffer buffer = { 0, NULL };
-	struct acpi_pci_routing_table *prt = NULL;
-	struct acpi_pci_routing_table *entry = NULL;
-	static int first_time = 1;
+	acpi_status status;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_pci_routing_table *entry;
 
-
-	pathname = kzalloc(ACPI_PATHNAME_MAX, GFP_KERNEL);
-	if (!pathname)
-		return -ENOMEM;
-
-	if (first_time) {
-		acpi_prt.count = 0;
-		INIT_LIST_HEAD(&acpi_prt.entries);
-		first_time = 0;
-	}
-
-	/* 
-	 * NOTE: We're given a 'handle' to the _PRT object's parent device
-	 *       (either a PCI root bridge or PCI-PCI bridge).
-	 */
-
-	buffer.length = ACPI_PATHNAME_MAX;
-	buffer.pointer = pathname;
-	acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+	/* 'handle' is the _PRT's parent (root bridge or PCI-PCI bridge) */
+	status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
 
 	printk(KERN_DEBUG "ACPI: PCI Interrupt Routing Table [%s._PRT]\n",
-	       pathname);
+	       (char *) buffer.pointer);
 
-	/* 
-	 * Evaluate this _PRT and add its entries to our global list (acpi_prt).
-	 */
+	kfree(buffer.pointer);
 
-	buffer.length = 0;
+	buffer.length = ACPI_ALLOCATE_BUFFER;
 	buffer.pointer = NULL;
-	kfree(pathname);
-	status = acpi_get_irq_routing_table(handle, &buffer);
-	if (status != AE_BUFFER_OVERFLOW) {
-		ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRT [%s]",
-				acpi_format_exception(status)));
-		return -ENODEV;
-	}
-
-	prt = kzalloc(buffer.length, GFP_KERNEL);
-	if (!prt) {
-		return -ENOMEM;
-	}
-	buffer.pointer = prt;
 
 	status = acpi_get_irq_routing_table(handle, &buffer);
 	if (ACPI_FAILURE(status)) {
@@ -305,36 +269,30 @@
 		return -ENODEV;
 	}
 
-	entry = prt;
-
+	entry = buffer.pointer;
 	while (entry && (entry->length > 0)) {
 		acpi_pci_irq_add_entry(handle, segment, bus, entry);
 		entry = (struct acpi_pci_routing_table *)
 		    ((unsigned long)entry + entry->length);
 	}
 
-	kfree(prt);
-
+	kfree(buffer.pointer);
 	return 0;
 }
 
 void acpi_pci_irq_del_prt(int segment, int bus)
 {
-	struct list_head *node = NULL, *n = NULL;
-	struct acpi_prt_entry *entry = NULL;
-
-	if (!acpi_prt.count) {
-		return;
-	}
+	struct acpi_prt_entry *entry, *tmp;
 
 	printk(KERN_DEBUG
-	       "ACPI: Delete PCI Interrupt Routing Table for %x:%x\n", segment,
-	       bus);
+	       "ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n",
+	       segment, bus);
 	spin_lock(&acpi_prt_lock);
-	list_for_each_safe(node, n, &acpi_prt.entries) {
-		entry = list_entry(node, struct acpi_prt_entry, node);
-
-		acpi_pci_irq_del_entry(segment, bus, entry);
+	list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) {
+		if (segment == entry->id.segment && bus == entry->id.bus) {
+			list_del(&entry->list);
+			kfree(entry);
+		}
 	}
 	spin_unlock(&acpi_prt_lock);
 }
@@ -342,162 +300,26 @@
 /* --------------------------------------------------------------------------
                           PCI Interrupt Routing Support
    -------------------------------------------------------------------------- */
-typedef int (*irq_lookup_func) (struct acpi_prt_entry *, int *, int *, char **);
-
-static int
-acpi_pci_allocate_irq(struct acpi_prt_entry *entry,
-		      int *triggering, int *polarity, char **link)
+static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
 {
-	int irq;
+	struct acpi_prt_entry *entry;
+	struct pci_dev *bridge;
+	u8 bridge_pin, orig_pin = pin;
 
-
-	if (entry->link.handle) {
-		irq = acpi_pci_link_allocate_irq(entry->link.handle,
-						 entry->link.index, triggering,
-						 polarity, link);
-		if (irq < 0) {
-			printk(KERN_WARNING PREFIX
-				      "Invalid IRQ link routing entry\n");
-			return -1;
-		}
-	} else {
-		irq = entry->link.index;
-		*triggering = ACPI_LEVEL_SENSITIVE;
-		*polarity = ACPI_ACTIVE_LOW;
+	entry = acpi_pci_irq_find_prt_entry(dev, pin);
+	if (entry) {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n",
+				  pci_name(dev), pin_name(pin)));
+		return entry;
 	}
 
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found IRQ %d\n", irq));
-	return irq;
-}
-
-static int
-acpi_pci_free_irq(struct acpi_prt_entry *entry,
-		  int *triggering, int *polarity, char **link)
-{
-	int irq;
-
-	if (entry->link.handle) {
-		irq = acpi_pci_link_free_irq(entry->link.handle);
-	} else {
-		irq = entry->link.index;
-	}
-	return irq;
-}
-
-#ifdef CONFIG_X86_IO_APIC
-extern int noioapicquirk;
-
-static int bridge_has_boot_interrupt_variant(struct pci_bus *bus)
-{
-	struct pci_bus *bus_it;
-
-	for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) {
-		if (!bus_it->self)
-			return 0;
-
-		printk(KERN_INFO "vendor=%04x device=%04x\n", bus_it->self->vendor,
-				bus_it->self->device);
-
-		if (bus_it->self->irq_reroute_variant)
-			return bus_it->self->irq_reroute_variant;
-	}
-	return 0;
-}
-#endif /* CONFIG_X86_IO_APIC */
-
-/*
- * acpi_pci_irq_lookup
- * success: return IRQ >= 0
- * failure: return -1
- */
-static int
-acpi_pci_irq_lookup(struct pci_bus *bus,
-		    int device,
-		    int pin,
-		    int *triggering,
-		    int *polarity, char **link, irq_lookup_func func)
-{
-	struct acpi_prt_entry *entry = NULL;
-	int segment = pci_domain_nr(bus);
-	int bus_nr = bus->number;
-	int ret;
-
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "Searching for PRT entry for %02x:%02x:%02x[%c]\n",
-			  segment, bus_nr, device, ('A' + pin)));
-
-	entry = acpi_pci_irq_find_prt_entry(segment, bus_nr, device, pin);
-	if (!entry) {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PRT entry not found\n"));
-		return -1;
-	}
-
-	ret = func(entry, triggering, polarity, link);
-
-#ifdef CONFIG_X86_IO_APIC
-	/*
-	 * Some chipsets (e.g. intel 6700PXH) generate a legacy INTx when the
-	 * IRQ entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel
-	 * does during interrupt handling). When this INTx generation cannot be
-	 * disabled, we reroute these interrupts to their legacy equivalent to
-	 * get rid of spurious interrupts.
-	 */
-        if (!noioapicquirk) {
-		switch (bridge_has_boot_interrupt_variant(bus)) {
-		case 0:
-			/* no rerouting necessary */
-			break;
-
-		case INTEL_IRQ_REROUTE_VARIANT:
-			/*
-			 * Remap according to INTx routing table in 6700PXH
-			 * specs, intel order number 302628-002, section
-			 * 2.15.2. Other chipsets (80332, ...) have the same
-			 * mapping and are handled here as well.
-			 */
-			printk(KERN_INFO "pci irq %d -> rerouted to legacy "
-					 "irq %d\n", ret, (ret % 4) + 16);
-			ret = (ret % 4) + 16;
-			break;
-
-		default:
-			printk(KERN_INFO "not rerouting irq %d to legacy irq: "
-					 "unknown mapping\n", ret);
-			break;
-		}
-	}
-#endif /* CONFIG_X86_IO_APIC */
-
-	return ret;
-}
-
-/*
- * acpi_pci_irq_derive
- * success: return IRQ >= 0
- * failure: return < 0
- */
-static int
-acpi_pci_irq_derive(struct pci_dev *dev,
-		    int pin,
-		    int *triggering,
-		    int *polarity, char **link, irq_lookup_func func)
-{
-	struct pci_dev *bridge = dev;
-	int irq = -1;
-	u8 bridge_pin = 0, orig_pin = pin;
-
-
-	if (!dev)
-		return -EINVAL;
-
 	/* 
 	 * Attempt to derive an IRQ for this device from a parent bridge's
 	 * PCI interrupt routing entry (eg. yenta bridge and add-in card bridge).
 	 */
-	while (irq < 0 && bridge->bus->self) {
-		pin = (pin + PCI_SLOT(bridge->devfn)) % 4;
-		bridge = bridge->bus->self;
+	bridge = dev->bus->self;
+	while (bridge) {
+		pin = (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1;
 
 		if ((bridge->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) {
 			/* PC card has the same IRQ as its cardbridge */
@@ -506,50 +328,40 @@
 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 						  "No interrupt pin configured for device %s\n",
 						  pci_name(bridge)));
-				return -1;
+				return NULL;
 			}
-			/* Pin is from 0 to 3 */
-			bridge_pin--;
 			pin = bridge_pin;
 		}
 
-		irq = acpi_pci_irq_lookup(bridge->bus, PCI_SLOT(bridge->devfn),
-					  pin, triggering, polarity,
-					  link, func);
+		entry = acpi_pci_irq_find_prt_entry(bridge, pin);
+		if (entry) {
+			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+					 "Derived GSI for %s INT %c from %s\n",
+					 pci_name(dev), pin_name(orig_pin),
+					 pci_name(bridge)));
+			return entry;
+		}
+
+		dev = bridge;
+		bridge = dev->bus->self;
 	}
 
-	if (irq < 0) {
-		dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n",
-			 'A' + orig_pin);
-		return -1;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Derive IRQ %d for device %s from %s\n",
-			  irq, pci_name(dev), pci_name(bridge)));
-
-	return irq;
+	dev_warn(&dev->dev, "can't derive routing for PCI INT %c\n",
+		 pin_name(orig_pin));
+	return NULL;
 }
 
-/*
- * acpi_pci_irq_enable
- * success: return 0
- * failure: return < 0
- */
-
 int acpi_pci_irq_enable(struct pci_dev *dev)
 {
-	int irq = 0;
-	u8 pin = 0;
+	struct acpi_prt_entry *entry;
+	int gsi;
+	u8 pin;
 	int triggering = ACPI_LEVEL_SENSITIVE;
 	int polarity = ACPI_ACTIVE_LOW;
 	char *link = NULL;
 	char link_desc[16];
 	int rc;
 
-
-	if (!dev)
-		return -EINVAL;
-
 	pin = dev->pin;
 	if (!pin) {
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -557,31 +369,9 @@
 				  pci_name(dev)));
 		return 0;
 	}
-	pin--;
 
-	if (!dev->bus) {
-		dev_err(&dev->dev, "invalid (NULL) 'bus' field\n");
-		return -ENODEV;
-	}
-
-	/* 
-	 * First we check the PCI IRQ routing table (PRT) for an IRQ.  PRT
-	 * values override any BIOS-assigned IRQs set during boot.
-	 */
-	irq = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin,
-				  &triggering, &polarity, &link,
-				  acpi_pci_allocate_irq);
-
-	/*
-	 * If no PRT entry was found, we'll try to derive an IRQ from the
-	 * device's parent bridge.
-	 */
-	if (irq < 0)
-		irq = acpi_pci_irq_derive(dev, pin, &triggering,
-					  &polarity, &link,
-					  acpi_pci_allocate_irq);
-
-	if (irq < 0) {
+	entry = acpi_pci_irq_lookup(dev, pin);
+	if (!entry) {
 		/*
 		 * IDE legacy mode controller IRQs are magic. Why do compat
 		 * extensions always make such a nasty mess.
@@ -590,12 +380,24 @@
 				(dev->class & 0x05) == 0)
 			return 0;
 	}
+
+	if (entry) {
+		if (entry->link)
+			gsi = acpi_pci_link_allocate_irq(entry->link,
+							 entry->index,
+							 &triggering, &polarity,
+							 &link);
+		else
+			gsi = entry->index;
+	} else
+		gsi = -1;
+
 	/*
 	 * No IRQ known to the ACPI subsystem - maybe the BIOS / 
 	 * driver reported one, then use it. Exit in any case.
 	 */
-	if (irq < 0) {
-		dev_warn(&dev->dev, "PCI INT %c: no GSI", 'A' + pin);
+	if (gsi < 0) {
+		dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin));
 		/* Interrupt Line values above 0xF are forbidden */
 		if (dev->irq > 0 && (dev->irq <= 0xF)) {
 			printk(" - using IRQ %d\n", dev->irq);
@@ -608,10 +410,10 @@
 		}
 	}
 
-	rc = acpi_register_gsi(irq, triggering, polarity);
+	rc = acpi_register_gsi(gsi, triggering, polarity);
 	if (rc < 0) {
 		dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n",
-			 'A' + pin);
+			 pin_name(pin));
 		return rc;
 	}
 	dev->irq = rc;
@@ -622,7 +424,7 @@
 		link_desc[0] = '\0';
 
 	dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
-		 'A' + pin, link_desc, irq,
+		 pin_name(pin), link_desc, gsi,
 		 (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
 		 (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
 
@@ -636,42 +438,28 @@
 
 void acpi_pci_irq_disable(struct pci_dev *dev)
 {
-	int gsi = 0;
-	u8 pin = 0;
-	int triggering = ACPI_LEVEL_SENSITIVE;
-	int polarity = ACPI_ACTIVE_LOW;
-
-
-	if (!dev || !dev->bus)
-		return;
+	struct acpi_prt_entry *entry;
+	int gsi;
+	u8 pin;
 
 	pin = dev->pin;
 	if (!pin)
 		return;
-	pin--;
 
-	/*
-	 * First we check the PCI IRQ routing table (PRT) for an IRQ.
-	 */
-	gsi = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin,
-				  &triggering, &polarity, NULL,
-				  acpi_pci_free_irq);
-	/*
-	 * If no PRT entry was found, we'll try to derive an IRQ from the
-	 * device's parent bridge.
-	 */
-	if (gsi < 0)
-		gsi = acpi_pci_irq_derive(dev, pin,
-					  &triggering, &polarity, NULL,
-					  acpi_pci_free_irq);
-	if (gsi < 0)
+	entry = acpi_pci_irq_lookup(dev, pin);
+	if (!entry)
 		return;
 
+	if (entry->link)
+		gsi = acpi_pci_link_free_irq(entry->link);
+	else
+		gsi = entry->index;
+
 	/*
 	 * TBD: It might be worth clearing dev->irq by magic constant
 	 * (e.g. PCI_UNDEFINED_IRQ).
 	 */
 
-	dev_info(&dev->dev, "PCI INT %c disabled\n", 'A' + pin);
+	dev_info(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
 	acpi_unregister_gsi(gsi);
 }
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index e52ad91..1c6e73c 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -796,10 +796,6 @@
 	struct list_head *node = NULL;
 	struct acpi_pci_link *link = NULL;
 
-
-	/* Make sure SCI is enabled again (Apple firmware bug?) */
-	acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1);
-
 	list_for_each(node, &acpi_link.entries) {
 		link = list_entry(node, struct acpi_pci_link, node);
 		if (!link) {
@@ -912,7 +908,7 @@
 
 __setup("acpi_irq_nobalance", acpi_irq_nobalance_set);
 
-int __init acpi_irq_balance_set(char *str)
+static int __init acpi_irq_balance_set(char *str)
 {
 	acpi_irq_balance = 1;
 	return 1;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 642554b..5b38a02 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -31,6 +31,7 @@
 #include <linux/spinlock.h>
 #include <linux/pm.h>
 #include <linux/pci.h>
+#include <linux/pci-acpi.h>
 #include <linux/acpi.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
@@ -193,6 +194,7 @@
 	unsigned long long value = 0;
 	acpi_handle handle = NULL;
 	struct acpi_device *child;
+	u32 flags, base_flags;
 
 
 	if (!device)
@@ -210,6 +212,13 @@
 
 	device->ops.bind = acpi_pci_bind;
 
+	/*
+	 * All supported architectures that use ACPI have support for
+	 * PCI domains, so we indicate this in _OSC support capabilities.
+	 */
+	flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+	pci_acpi_osc_support(device->handle, flags);
+
 	/* 
 	 * Segment
 	 * -------
@@ -335,6 +344,17 @@
 	list_for_each_entry(child, &device->children, node)
 		acpi_pci_bridge_scan(child);
 
+	/* Indicate support for various _OSC capabilities. */
+	if (pci_ext_cfg_avail(root->bus->self))
+		flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
+	if (pcie_aspm_enabled())
+		flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
+			OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
+	if (pci_msi_enabled())
+		flags |= OSC_MSI_SUPPORT;
+	if (flags != base_flags)
+		pci_acpi_osc_support(device->handle, flags);
+
       end:
 	if (result) {
 		if (!list_empty(&root->node))
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index bb7d50d..c926e7d 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -139,6 +139,8 @@
 {
 	acpi_status status = AE_OK;
 	unsigned long long sta = 0;
+	char node_name[5];
+	struct acpi_buffer buffer = { sizeof(node_name), node_name };
 
 
 	if (!handle || !state)
@@ -151,8 +153,10 @@
 	*state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON:
 			      ACPI_POWER_RESOURCE_STATE_OFF;
 
+	acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
+
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n",
-			  acpi_ut_get_node_name(handle),
+			  node_name,
 				*state ? "on" : "off"));
 
 	return 0;
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
new file mode 100644
index 0000000..428c911d
--- /dev/null
+++ b/drivers/acpi/proc.c
@@ -0,0 +1,531 @@
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/suspend.h>
+#include <linux/bcd.h>
+#include <asm/uaccess.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#ifdef CONFIG_X86
+#include <linux/mc146818rtc.h>
+#endif
+
+#include "sleep.h"
+
+#define _COMPONENT		ACPI_SYSTEM_COMPONENT
+
+/*
+ * this file provides support for:
+ * /proc/acpi/sleep
+ * /proc/acpi/alarm
+ * /proc/acpi/wakeup
+ */
+
+ACPI_MODULE_NAME("sleep")
+#ifdef	CONFIG_ACPI_PROCFS
+static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset)
+{
+	int i;
+
+	for (i = 0; i <= ACPI_STATE_S5; i++) {
+		if (sleep_states[i]) {
+			seq_printf(seq, "S%d ", i);
+		}
+	}
+
+	seq_puts(seq, "\n");
+
+	return 0;
+}
+
+static int acpi_system_sleep_open_fs(struct inode *inode, struct file *file)
+{
+	return single_open(file, acpi_system_sleep_seq_show, PDE(inode)->data);
+}
+
+static ssize_t
+acpi_system_write_sleep(struct file *file,
+			const char __user * buffer, size_t count, loff_t * ppos)
+{
+	char str[12];
+	u32 state = 0;
+	int error = 0;
+
+	if (count > sizeof(str) - 1)
+		goto Done;
+	memset(str, 0, sizeof(str));
+	if (copy_from_user(str, buffer, count))
+		return -EFAULT;
+
+	/* Check for S4 bios request */
+	if (!strcmp(str, "4b")) {
+		error = acpi_suspend(4);
+		goto Done;
+	}
+	state = simple_strtoul(str, NULL, 0);
+#ifdef CONFIG_HIBERNATION
+	if (state == 4) {
+		error = hibernate();
+		goto Done;
+	}
+#endif
+	error = acpi_suspend(state);
+      Done:
+	return error ? error : count;
+}
+#endif				/* CONFIG_ACPI_PROCFS */
+
+#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
+/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
+#else
+#define	HAVE_ACPI_LEGACY_ALARM
+#endif
+
+#ifdef	HAVE_ACPI_LEGACY_ALARM
+
+static u32 cmos_bcd_read(int offset, int rtc_control);
+
+static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
+{
+	u32 sec, min, hr;
+	u32 day, mo, yr, cent = 0;
+	u32 today = 0;
+	unsigned char rtc_control = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rtc_lock, flags);
+
+	rtc_control = CMOS_READ(RTC_CONTROL);
+	sec = cmos_bcd_read(RTC_SECONDS_ALARM, rtc_control);
+	min = cmos_bcd_read(RTC_MINUTES_ALARM, rtc_control);
+	hr = cmos_bcd_read(RTC_HOURS_ALARM, rtc_control);
+
+	/* If we ever get an FACP with proper values... */
+	if (acpi_gbl_FADT.day_alarm) {
+		/* ACPI spec: only low 6 its should be cared */
+		day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
+		if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+			day = bcd2bin(day);
+	} else
+		day = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
+	if (acpi_gbl_FADT.month_alarm)
+		mo = cmos_bcd_read(acpi_gbl_FADT.month_alarm, rtc_control);
+	else {
+		mo = cmos_bcd_read(RTC_MONTH, rtc_control);
+		today = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
+	}
+	if (acpi_gbl_FADT.century)
+		cent = cmos_bcd_read(acpi_gbl_FADT.century, rtc_control);
+
+	yr = cmos_bcd_read(RTC_YEAR, rtc_control);
+
+	spin_unlock_irqrestore(&rtc_lock, flags);
+
+	/* we're trusting the FADT (see above) */
+	if (!acpi_gbl_FADT.century)
+		/* If we're not trusting the FADT, we should at least make it
+		 * right for _this_ century... ehm, what is _this_ century?
+		 *
+		 * TBD:
+		 *  ASAP: find piece of code in the kernel, e.g. star tracker driver,
+		 *        which we can trust to determine the century correctly. Atom
+		 *        watch driver would be nice, too...
+		 *
+		 *  if that has not happened, change for first release in 2050:
+		 *        if (yr<50)
+		 *                yr += 2100;
+		 *        else
+		 *                yr += 2000;   // current line of code
+		 *
+		 *  if that has not happened either, please do on 2099/12/31:23:59:59
+		 *        s/2000/2100
+		 *
+		 */
+		yr += 2000;
+	else
+		yr += cent * 100;
+
+	/*
+	 * Show correct dates for alarms up to a month into the future.
+	 * This solves issues for nearly all situations with the common
+	 * 30-day alarm clocks in PC hardware.
+	 */
+	if (day < today) {
+		if (mo < 12) {
+			mo += 1;
+		} else {
+			mo = 1;
+			yr += 1;
+		}
+	}
+
+	seq_printf(seq, "%4.4u-", yr);
+	(mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
+	(day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day);
+	(hr > 23) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", hr);
+	(min > 59) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", min);
+	(sec > 59) ? seq_puts(seq, "**\n") : seq_printf(seq, "%2.2u\n", sec);
+
+	return 0;
+}
+
+static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file)
+{
+	return single_open(file, acpi_system_alarm_seq_show, PDE(inode)->data);
+}
+
+static int get_date_field(char **p, u32 * value)
+{
+	char *next = NULL;
+	char *string_end = NULL;
+	int result = -EINVAL;
+
+	/*
+	 * Try to find delimeter, only to insert null.  The end of the
+	 * string won't have one, but is still valid.
+	 */
+	if (*p == NULL)
+		return result;
+
+	next = strpbrk(*p, "- :");
+	if (next)
+		*next++ = '\0';
+
+	*value = simple_strtoul(*p, &string_end, 10);
+
+	/* Signal success if we got a good digit */
+	if (string_end != *p)
+		result = 0;
+
+	if (next)
+		*p = next;
+	else
+		*p = NULL;
+
+	return result;
+}
+
+/* Read a possibly BCD register, always return binary */
+static u32 cmos_bcd_read(int offset, int rtc_control)
+{
+	u32 val = CMOS_READ(offset);
+	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+		val = bcd2bin(val);
+	return val;
+}
+
+/* Write binary value into possibly BCD register */
+static void cmos_bcd_write(u32 val, int offset, int rtc_control)
+{
+	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+		val = bin2bcd(val);
+	CMOS_WRITE(val, offset);
+}
+
+static ssize_t
+acpi_system_write_alarm(struct file *file,
+			const char __user * buffer, size_t count, loff_t * ppos)
+{
+	int result = 0;
+	char alarm_string[30] = { '\0' };
+	char *p = alarm_string;
+	u32 sec, min, hr, day, mo, yr;
+	int adjust = 0;
+	unsigned char rtc_control = 0;
+
+	if (count > sizeof(alarm_string) - 1)
+		return -EINVAL;
+
+	if (copy_from_user(alarm_string, buffer, count))
+		return -EFAULT;
+
+	alarm_string[count] = '\0';
+
+	/* check for time adjustment */
+	if (alarm_string[0] == '+') {
+		p++;
+		adjust = 1;
+	}
+
+	if ((result = get_date_field(&p, &yr)))
+		goto end;
+	if ((result = get_date_field(&p, &mo)))
+		goto end;
+	if ((result = get_date_field(&p, &day)))
+		goto end;
+	if ((result = get_date_field(&p, &hr)))
+		goto end;
+	if ((result = get_date_field(&p, &min)))
+		goto end;
+	if ((result = get_date_field(&p, &sec)))
+		goto end;
+
+	spin_lock_irq(&rtc_lock);
+
+	rtc_control = CMOS_READ(RTC_CONTROL);
+
+	if (adjust) {
+		yr += cmos_bcd_read(RTC_YEAR, rtc_control);
+		mo += cmos_bcd_read(RTC_MONTH, rtc_control);
+		day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
+		hr += cmos_bcd_read(RTC_HOURS, rtc_control);
+		min += cmos_bcd_read(RTC_MINUTES, rtc_control);
+		sec += cmos_bcd_read(RTC_SECONDS, rtc_control);
+	}
+
+	spin_unlock_irq(&rtc_lock);
+
+	if (sec > 59) {
+		min += sec/60;
+		sec = sec%60;
+	}
+	if (min > 59) {
+		hr += min/60;
+		min = min%60;
+	}
+	if (hr > 23) {
+		day += hr/24;
+		hr = hr%24;
+	}
+	if (day > 31) {
+		mo += day/32;
+		day = day%32;
+	}
+	if (mo > 12) {
+		yr += mo/13;
+		mo = mo%13;
+	}
+
+	spin_lock_irq(&rtc_lock);
+	/*
+	 * Disable alarm interrupt before setting alarm timer or else
+	 * when ACPI_EVENT_RTC is enabled, a spurious ACPI interrupt occurs
+	 */
+	rtc_control &= ~RTC_AIE;
+	CMOS_WRITE(rtc_control, RTC_CONTROL);
+	CMOS_READ(RTC_INTR_FLAGS);
+
+	/* write the fields the rtc knows about */
+	cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control);
+	cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control);
+	cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control);
+
+	/*
+	 * If the system supports an enhanced alarm it will have non-zero
+	 * offsets into the CMOS RAM here -- which for some reason are pointing
+	 * to the RTC area of memory.
+	 */
+	if (acpi_gbl_FADT.day_alarm)
+		cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control);
+	if (acpi_gbl_FADT.month_alarm)
+		cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control);
+	if (acpi_gbl_FADT.century) {
+		if (adjust)
+			yr += cmos_bcd_read(acpi_gbl_FADT.century, rtc_control) * 100;
+		cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control);
+	}
+	/* enable the rtc alarm interrupt */
+	rtc_control |= RTC_AIE;
+	CMOS_WRITE(rtc_control, RTC_CONTROL);
+	CMOS_READ(RTC_INTR_FLAGS);
+
+	spin_unlock_irq(&rtc_lock);
+
+	acpi_clear_event(ACPI_EVENT_RTC);
+	acpi_enable_event(ACPI_EVENT_RTC, 0);
+
+	*ppos += count;
+
+	result = 0;
+      end:
+	return result ? result : count;
+}
+#endif				/* HAVE_ACPI_LEGACY_ALARM */
+
+extern struct list_head acpi_wakeup_device_list;
+extern spinlock_t acpi_device_lock;
+
+static int
+acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
+{
+	struct list_head *node, *next;
+
+	seq_printf(seq, "Device\tS-state\t  Status   Sysfs node\n");
+
+	spin_lock(&acpi_device_lock);
+	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+		struct acpi_device *dev =
+		    container_of(node, struct acpi_device, wakeup_list);
+		struct device *ldev;
+
+		if (!dev->wakeup.flags.valid)
+			continue;
+		spin_unlock(&acpi_device_lock);
+
+		ldev = acpi_get_physical_device(dev->handle);
+		seq_printf(seq, "%s\t  S%d\t%c%-8s  ",
+			   dev->pnp.bus_id,
+			   (u32) dev->wakeup.sleep_state,
+			   dev->wakeup.flags.run_wake ? '*' : ' ',
+			   dev->wakeup.state.enabled ? "enabled" : "disabled");
+		if (ldev)
+			seq_printf(seq, "%s:%s",
+				   ldev->bus ? ldev->bus->name : "no-bus",
+				   dev_name(ldev));
+		seq_printf(seq, "\n");
+		put_device(ldev);
+
+		spin_lock(&acpi_device_lock);
+	}
+	spin_unlock(&acpi_device_lock);
+	return 0;
+}
+
+static void physical_device_enable_wakeup(struct acpi_device *adev)
+{
+	struct device *dev = acpi_get_physical_device(adev->handle);
+
+	if (dev && device_can_wakeup(dev))
+		device_set_wakeup_enable(dev, adev->wakeup.state.enabled);
+}
+
+static ssize_t
+acpi_system_write_wakeup_device(struct file *file,
+				const char __user * buffer,
+				size_t count, loff_t * ppos)
+{
+	struct list_head *node, *next;
+	char strbuf[5];
+	char str[5] = "";
+	int len = count;
+	struct acpi_device *found_dev = NULL;
+
+	if (len > 4)
+		len = 4;
+
+	if (copy_from_user(strbuf, buffer, len))
+		return -EFAULT;
+	strbuf[len] = '\0';
+	sscanf(strbuf, "%s", str);
+
+	spin_lock(&acpi_device_lock);
+	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+		struct acpi_device *dev =
+		    container_of(node, struct acpi_device, wakeup_list);
+		if (!dev->wakeup.flags.valid)
+			continue;
+
+		if (!strncmp(dev->pnp.bus_id, str, 4)) {
+			dev->wakeup.state.enabled =
+			    dev->wakeup.state.enabled ? 0 : 1;
+			found_dev = dev;
+			break;
+		}
+	}
+	if (found_dev) {
+		physical_device_enable_wakeup(found_dev);
+		list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+			struct acpi_device *dev = container_of(node,
+							       struct
+							       acpi_device,
+							       wakeup_list);
+
+			if ((dev != found_dev) &&
+			    (dev->wakeup.gpe_number ==
+			     found_dev->wakeup.gpe_number)
+			    && (dev->wakeup.gpe_device ==
+				found_dev->wakeup.gpe_device)) {
+				printk(KERN_WARNING
+				       "ACPI: '%s' and '%s' have the same GPE, "
+				       "can't disable/enable one seperately\n",
+				       dev->pnp.bus_id, found_dev->pnp.bus_id);
+				dev->wakeup.state.enabled =
+				    found_dev->wakeup.state.enabled;
+				physical_device_enable_wakeup(dev);
+			}
+		}
+	}
+	spin_unlock(&acpi_device_lock);
+	return count;
+}
+
+static int
+acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
+{
+	return single_open(file, acpi_system_wakeup_device_seq_show,
+			   PDE(inode)->data);
+}
+
+static const struct file_operations acpi_system_wakeup_device_fops = {
+	.owner = THIS_MODULE,
+	.open = acpi_system_wakeup_device_open_fs,
+	.read = seq_read,
+	.write = acpi_system_write_wakeup_device,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+#ifdef	CONFIG_ACPI_PROCFS
+static const struct file_operations acpi_system_sleep_fops = {
+	.owner = THIS_MODULE,
+	.open = acpi_system_sleep_open_fs,
+	.read = seq_read,
+	.write = acpi_system_write_sleep,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+#endif				/* CONFIG_ACPI_PROCFS */
+
+#ifdef	HAVE_ACPI_LEGACY_ALARM
+static const struct file_operations acpi_system_alarm_fops = {
+	.owner = THIS_MODULE,
+	.open = acpi_system_alarm_open_fs,
+	.read = seq_read,
+	.write = acpi_system_write_alarm,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static u32 rtc_handler(void *context)
+{
+	acpi_clear_event(ACPI_EVENT_RTC);
+	acpi_disable_event(ACPI_EVENT_RTC, 0);
+
+	return ACPI_INTERRUPT_HANDLED;
+}
+#endif				/* HAVE_ACPI_LEGACY_ALARM */
+
+static int __init acpi_sleep_proc_init(void)
+{
+	if (acpi_disabled)
+		return 0;
+
+#ifdef	CONFIG_ACPI_PROCFS
+	/* 'sleep' [R/W] */
+	proc_create("sleep", S_IFREG | S_IRUGO | S_IWUSR,
+		    acpi_root_dir, &acpi_system_sleep_fops);
+#endif				/* CONFIG_ACPI_PROCFS */
+
+#ifdef	HAVE_ACPI_LEGACY_ALARM
+	/* 'alarm' [R/W] */
+	proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
+		    acpi_root_dir, &acpi_system_alarm_fops);
+
+	acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
+	/*
+	 * Disable the RTC event after installing RTC handler.
+	 * Only when RTC alarm is set will it be enabled.
+	 */
+	acpi_clear_event(ACPI_EVENT_RTC);
+	acpi_disable_event(ACPI_EVENT_RTC, 0);
+#endif				/* HAVE_ACPI_LEGACY_ALARM */
+
+	/* 'wakeup device' [R/W] */
+	proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
+		    acpi_root_dir, &acpi_system_wakeup_device_fops);
+
+	return 0;
+}
+
+late_initcall(acpi_sleep_proc_init);
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index a6b662c..93f9114 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -42,7 +42,7 @@
 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
 	case ACPI_ADR_SPACE_SYSTEM_IO:
 		printk(KERN_DEBUG "ACPI MEMORY or I/O RESET_REG.\n");
-		acpi_hw_low_level_write(8, reset_value, rr);
+		acpi_reset();
 		break;
 	}
 	/* Wait ten seconds */
diff --git a/drivers/acpi/resources/Makefile b/drivers/acpi/resources/Makefile
deleted file mode 100644
index 8de4f69..0000000
--- a/drivers/acpi/resources/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for all Linux ACPI interpreter subdirectories
-#
-
-obj-y := rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
-	 rscalc.o  rsirq.o  rsmemory.o  rsutils.o
-
-obj-$(ACPI_FUTURE_USAGE) += rsdump.o
-
-EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/resources/rsaddr.c b/drivers/acpi/resources/rsaddr.c
deleted file mode 100644
index 7f96332..0000000
--- a/drivers/acpi/resources/rsaddr.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rsaddr - Address resource descriptors (16/32/64)
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acresrc.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rsaddr")
-
-/*******************************************************************************
- *
- * acpi_rs_convert_address16 - All WORD (16-bit) address resources
- *
- ******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_address16[5] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_ADDRESS16,
-	 ACPI_RS_SIZE(struct acpi_resource_address16),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_address16)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_ADDRESS16,
-	 sizeof(struct aml_resource_address16),
-	 0},
-
-	/* Resource Type, General Flags, and Type-Specific Flags */
-
-	{ACPI_RSC_ADDRESS, 0, 0, 0},
-
-	/*
-	 * These fields are contiguous in both the source and destination:
-	 * Address Granularity
-	 * Address Range Minimum
-	 * Address Range Maximum
-	 * Address Translation Offset
-	 * Address Length
-	 */
-	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.granularity),
-	 AML_OFFSET(address16.granularity),
-	 5},
-
-	/* Optional resource_source (Index and String) */
-
-	{ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.address16.resource_source),
-	 0,
-	 sizeof(struct aml_resource_address16)}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_address32 - All DWORD (32-bit) address resources
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_address32[5] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_ADDRESS32,
-	 ACPI_RS_SIZE(struct acpi_resource_address32),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_address32)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_ADDRESS32,
-	 sizeof(struct aml_resource_address32),
-	 0},
-
-	/* Resource Type, General Flags, and Type-Specific Flags */
-
-	{ACPI_RSC_ADDRESS, 0, 0, 0},
-
-	/*
-	 * These fields are contiguous in both the source and destination:
-	 * Address Granularity
-	 * Address Range Minimum
-	 * Address Range Maximum
-	 * Address Translation Offset
-	 * Address Length
-	 */
-	{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.granularity),
-	 AML_OFFSET(address32.granularity),
-	 5},
-
-	/* Optional resource_source (Index and String) */
-
-	{ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.address32.resource_source),
-	 0,
-	 sizeof(struct aml_resource_address32)}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_address64 - All QWORD (64-bit) address resources
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_address64[5] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_ADDRESS64,
-	 ACPI_RS_SIZE(struct acpi_resource_address64),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_address64)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_ADDRESS64,
-	 sizeof(struct aml_resource_address64),
-	 0},
-
-	/* Resource Type, General Flags, and Type-Specific Flags */
-
-	{ACPI_RSC_ADDRESS, 0, 0, 0},
-
-	/*
-	 * These fields are contiguous in both the source and destination:
-	 * Address Granularity
-	 * Address Range Minimum
-	 * Address Range Maximum
-	 * Address Translation Offset
-	 * Address Length
-	 */
-	{ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.granularity),
-	 AML_OFFSET(address64.granularity),
-	 5},
-
-	/* Optional resource_source (Index and String) */
-
-	{ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.address64.resource_source),
-	 0,
-	 sizeof(struct aml_resource_address64)}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_ext_address64 - All Extended (64-bit) address resources
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_ext_address64[5] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64,
-	 ACPI_RS_SIZE(struct acpi_resource_extended_address64),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_ext_address64)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64,
-	 sizeof(struct aml_resource_extended_address64),
-	 0},
-
-	/* Resource Type, General Flags, and Type-Specific Flags */
-
-	{ACPI_RSC_ADDRESS, 0, 0, 0},
-
-	/* Revision ID */
-
-	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.ext_address64.revision_iD),
-	 AML_OFFSET(ext_address64.revision_iD),
-	 1},
-	/*
-	 * These fields are contiguous in both the source and destination:
-	 * Address Granularity
-	 * Address Range Minimum
-	 * Address Range Maximum
-	 * Address Translation Offset
-	 * Address Length
-	 * Type-Specific Attribute
-	 */
-	{ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.ext_address64.granularity),
-	 AML_OFFSET(ext_address64.granularity),
-	 6}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_general_flags - Flags common to all address descriptors
- *
- ******************************************************************************/
-
-static struct acpi_rsconvert_info acpi_rs_convert_general_flags[6] = {
-	{ACPI_RSC_FLAGINIT, 0, AML_OFFSET(address.flags),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_general_flags)},
-
-	/* Resource Type (Memory, Io, bus_number, etc.) */
-
-	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.address.resource_type),
-	 AML_OFFSET(address.resource_type),
-	 1},
-
-	/* General Flags - Consume, Decode, min_fixed, max_fixed */
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.producer_consumer),
-	 AML_OFFSET(address.flags),
-	 0},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.decode),
-	 AML_OFFSET(address.flags),
-	 1},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.min_address_fixed),
-	 AML_OFFSET(address.flags),
-	 2},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.max_address_fixed),
-	 AML_OFFSET(address.flags),
-	 3}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_mem_flags - Flags common to Memory address descriptors
- *
- ******************************************************************************/
-
-static struct acpi_rsconvert_info acpi_rs_convert_mem_flags[5] = {
-	{ACPI_RSC_FLAGINIT, 0, AML_OFFSET(address.specific_flags),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_mem_flags)},
-
-	/* Memory-specific flags */
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.write_protect),
-	 AML_OFFSET(address.specific_flags),
-	 0},
-
-	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.caching),
-	 AML_OFFSET(address.specific_flags),
-	 1},
-
-	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.range_type),
-	 AML_OFFSET(address.specific_flags),
-	 3},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.info.mem.translation),
-	 AML_OFFSET(address.specific_flags),
-	 5}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_io_flags - Flags common to I/O address descriptors
- *
- ******************************************************************************/
-
-static struct acpi_rsconvert_info acpi_rs_convert_io_flags[4] = {
-	{ACPI_RSC_FLAGINIT, 0, AML_OFFSET(address.specific_flags),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_io_flags)},
-
-	/* I/O-specific flags */
-
-	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.address.info.io.range_type),
-	 AML_OFFSET(address.specific_flags),
-	 0},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.info.io.translation),
-	 AML_OFFSET(address.specific_flags),
-	 4},
-
-	{ACPI_RSC_1BITFLAG,
-	 ACPI_RS_OFFSET(data.address.info.io.translation_type),
-	 AML_OFFSET(address.specific_flags),
-	 5}
-};
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_get_address_common
- *
- * PARAMETERS:  Resource            - Pointer to the internal resource struct
- *              Aml                 - Pointer to the AML resource descriptor
- *
- * RETURN:      TRUE if the resource_type field is OK, FALSE otherwise
- *
- * DESCRIPTION: Convert common flag fields from a raw AML resource descriptor
- *              to an internal resource descriptor
- *
- ******************************************************************************/
-
-u8
-acpi_rs_get_address_common(struct acpi_resource *resource,
-			   union aml_resource *aml)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	/* Validate the Resource Type */
-
-	if ((aml->address.resource_type > 2)
-	    && (aml->address.resource_type < 0xC0)) {
-		return (FALSE);
-	}
-
-	/* Get the Resource Type and General Flags */
-
-	(void)acpi_rs_convert_aml_to_resource(resource, aml,
-					      acpi_rs_convert_general_flags);
-
-	/* Get the Type-Specific Flags (Memory and I/O descriptors only) */
-
-	if (resource->data.address.resource_type == ACPI_MEMORY_RANGE) {
-		(void)acpi_rs_convert_aml_to_resource(resource, aml,
-						      acpi_rs_convert_mem_flags);
-	} else if (resource->data.address.resource_type == ACPI_IO_RANGE) {
-		(void)acpi_rs_convert_aml_to_resource(resource, aml,
-						      acpi_rs_convert_io_flags);
-	} else {
-		/* Generic resource type, just grab the type_specific byte */
-
-		resource->data.address.info.type_specific =
-		    aml->address.specific_flags;
-	}
-
-	return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_set_address_common
- *
- * PARAMETERS:  Aml                 - Pointer to the AML resource descriptor
- *              Resource            - Pointer to the internal resource struct
- *
- * RETURN:      None
- *
- * DESCRIPTION: Convert common flag fields from a resource descriptor to an
- *              AML descriptor
- *
- ******************************************************************************/
-
-void
-acpi_rs_set_address_common(union aml_resource *aml,
-			   struct acpi_resource *resource)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	/* Set the Resource Type and General Flags */
-
-	(void)acpi_rs_convert_resource_to_aml(resource, aml,
-					      acpi_rs_convert_general_flags);
-
-	/* Set the Type-Specific Flags (Memory and I/O descriptors only) */
-
-	if (resource->data.address.resource_type == ACPI_MEMORY_RANGE) {
-		(void)acpi_rs_convert_resource_to_aml(resource, aml,
-						      acpi_rs_convert_mem_flags);
-	} else if (resource->data.address.resource_type == ACPI_IO_RANGE) {
-		(void)acpi_rs_convert_resource_to_aml(resource, aml,
-						      acpi_rs_convert_io_flags);
-	} else {
-		/* Generic resource type, just copy the type_specific byte */
-
-		aml->address.specific_flags =
-		    resource->data.address.info.type_specific;
-	}
-}
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
deleted file mode 100644
index 8eaaecf..0000000
--- a/drivers/acpi/resources/rscalc.c
+++ /dev/null
@@ -1,617 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rscalc - Calculate stream and list lengths
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acresrc.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rscalc")
-
-/* Local prototypes */
-static u8 acpi_rs_count_set_bits(u16 bit_field);
-
-static acpi_rs_length
-acpi_rs_struct_option_length(struct acpi_resource_source *resource_source);
-
-static u32
-acpi_rs_stream_option_length(u32 resource_length, u32 minimum_total_length);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_count_set_bits
- *
- * PARAMETERS:  bit_field       - Field in which to count bits
- *
- * RETURN:      Number of bits set within the field
- *
- * DESCRIPTION: Count the number of bits set in a resource field. Used for
- *              (Short descriptor) interrupt and DMA lists.
- *
- ******************************************************************************/
-
-static u8 acpi_rs_count_set_bits(u16 bit_field)
-{
-	u8 bits_set;
-
-	ACPI_FUNCTION_ENTRY();
-
-	for (bits_set = 0; bit_field; bits_set++) {
-
-		/* Zero the least significant bit that is set */
-
-		bit_field &= (u16) (bit_field - 1);
-	}
-
-	return bits_set;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_struct_option_length
- *
- * PARAMETERS:  resource_source     - Pointer to optional descriptor field
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Common code to handle optional resource_source_index and
- *              resource_source fields in some Large descriptors. Used during
- *              list-to-stream conversion
- *
- ******************************************************************************/
-
-static acpi_rs_length
-acpi_rs_struct_option_length(struct acpi_resource_source *resource_source)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * If the resource_source string is valid, return the size of the string
-	 * (string_length includes the NULL terminator) plus the size of the
-	 * resource_source_index (1).
-	 */
-	if (resource_source->string_ptr) {
-		return ((acpi_rs_length) (resource_source->string_length + 1));
-	}
-
-	return (0);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_stream_option_length
- *
- * PARAMETERS:  resource_length     - Length from the resource header
- *              minimum_total_length - Minimum length of this resource, before
- *                                    any optional fields. Includes header size
- *
- * RETURN:      Length of optional string (0 if no string present)
- *
- * DESCRIPTION: Common code to handle optional resource_source_index and
- *              resource_source fields in some Large descriptors. Used during
- *              stream-to-list conversion
- *
- ******************************************************************************/
-
-static u32
-acpi_rs_stream_option_length(u32 resource_length,
-			     u32 minimum_aml_resource_length)
-{
-	u32 string_length = 0;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * The resource_source_index and resource_source are optional elements of some
-	 * Large-type resource descriptors.
-	 */
-
-	/*
-	 * If the length of the actual resource descriptor is greater than the ACPI
-	 * spec-defined minimum length, it means that a resource_source_index exists
-	 * and is followed by a (required) null terminated string. The string length
-	 * (including the null terminator) is the resource length minus the minimum
-	 * length, minus one byte for the resource_source_index itself.
-	 */
-	if (resource_length > minimum_aml_resource_length) {
-
-		/* Compute the length of the optional string */
-
-		string_length =
-		    resource_length - minimum_aml_resource_length - 1;
-	}
-
-	/*
-	 * Round the length up to a multiple of the native word in order to
-	 * guarantee that the entire resource descriptor is native word aligned
-	 */
-	return ((u32) ACPI_ROUND_UP_TO_NATIVE_WORD(string_length));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_get_aml_length
- *
- * PARAMETERS:  Resource            - Pointer to the resource linked list
- *              size_needed         - Where the required size is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Takes a linked list of internal resource descriptors and
- *              calculates the size buffer needed to hold the corresponding
- *              external resource byte stream.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
-{
-	acpi_size aml_size_needed = 0;
-	acpi_rs_length total_size;
-
-	ACPI_FUNCTION_TRACE(rs_get_aml_length);
-
-	/* Traverse entire list of internal resource descriptors */
-
-	while (resource) {
-
-		/* Validate the descriptor type */
-
-		if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
-			return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
-		}
-
-		/* Get the base size of the (external stream) resource descriptor */
-
-		total_size = acpi_gbl_aml_resource_sizes[resource->type];
-
-		/*
-		 * Augment the base size for descriptors with optional and/or
-		 * variable-length fields
-		 */
-		switch (resource->type) {
-		case ACPI_RESOURCE_TYPE_IRQ:
-
-			/* Length can be 3 or 2 */
-
-			if (resource->data.irq.descriptor_length == 2) {
-				total_size--;
-			}
-			break;
-
-		case ACPI_RESOURCE_TYPE_START_DEPENDENT:
-
-			/* Length can be 1 or 0 */
-
-			if (resource->data.irq.descriptor_length == 0) {
-				total_size--;
-			}
-			break;
-
-		case ACPI_RESOURCE_TYPE_VENDOR:
-			/*
-			 * Vendor Defined Resource:
-			 * For a Vendor Specific resource, if the Length is between 1 and 7
-			 * it will be created as a Small Resource data type, otherwise it
-			 * is a Large Resource data type.
-			 */
-			if (resource->data.vendor.byte_length > 7) {
-
-				/* Base size of a Large resource descriptor */
-
-				total_size =
-				    sizeof(struct aml_resource_large_header);
-			}
-
-			/* Add the size of the vendor-specific data */
-
-			total_size = (acpi_rs_length)
-			    (total_size + resource->data.vendor.byte_length);
-			break;
-
-		case ACPI_RESOURCE_TYPE_END_TAG:
-			/*
-			 * End Tag:
-			 * We are done -- return the accumulated total size.
-			 */
-			*size_needed = aml_size_needed + total_size;
-
-			/* Normal exit */
-
-			return_ACPI_STATUS(AE_OK);
-
-		case ACPI_RESOURCE_TYPE_ADDRESS16:
-			/*
-			 * 16-Bit Address Resource:
-			 * Add the size of the optional resource_source info
-			 */
-			total_size = (acpi_rs_length)
-			    (total_size +
-			     acpi_rs_struct_option_length(&resource->data.
-							  address16.
-							  resource_source));
-			break;
-
-		case ACPI_RESOURCE_TYPE_ADDRESS32:
-			/*
-			 * 32-Bit Address Resource:
-			 * Add the size of the optional resource_source info
-			 */
-			total_size = (acpi_rs_length)
-			    (total_size +
-			     acpi_rs_struct_option_length(&resource->data.
-							  address32.
-							  resource_source));
-			break;
-
-		case ACPI_RESOURCE_TYPE_ADDRESS64:
-			/*
-			 * 64-Bit Address Resource:
-			 * Add the size of the optional resource_source info
-			 */
-			total_size = (acpi_rs_length)
-			    (total_size +
-			     acpi_rs_struct_option_length(&resource->data.
-							  address64.
-							  resource_source));
-			break;
-
-		case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
-			/*
-			 * Extended IRQ Resource:
-			 * Add the size of each additional optional interrupt beyond the
-			 * required 1 (4 bytes for each u32 interrupt number)
-			 */
-			total_size = (acpi_rs_length)
-			    (total_size +
-			     ((resource->data.extended_irq.interrupt_count -
-			       1) * 4) +
-			     /* Add the size of the optional resource_source info */
-			     acpi_rs_struct_option_length(&resource->data.
-							  extended_irq.
-							  resource_source));
-			break;
-
-		default:
-			break;
-		}
-
-		/* Update the total */
-
-		aml_size_needed += total_size;
-
-		/* Point to the next object */
-
-		resource =
-		    ACPI_ADD_PTR(struct acpi_resource, resource,
-				 resource->length);
-	}
-
-	/* Did not find an end_tag resource descriptor */
-
-	return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_get_list_length
- *
- * PARAMETERS:  aml_buffer          - Pointer to the resource byte stream
- *              aml_buffer_length   - Size of aml_buffer
- *              size_needed         - Where the size needed is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Takes an external resource byte stream and calculates the size
- *              buffer needed to hold the corresponding internal resource
- *              descriptor linked list.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_get_list_length(u8 * aml_buffer,
-			u32 aml_buffer_length, acpi_size * size_needed)
-{
-	acpi_status status;
-	u8 *end_aml;
-	u8 *buffer;
-	u32 buffer_size;
-	u16 temp16;
-	u16 resource_length;
-	u32 extra_struct_bytes;
-	u8 resource_index;
-	u8 minimum_aml_resource_length;
-
-	ACPI_FUNCTION_TRACE(rs_get_list_length);
-
-	*size_needed = 0;
-	end_aml = aml_buffer + aml_buffer_length;
-
-	/* Walk the list of AML resource descriptors */
-
-	while (aml_buffer < end_aml) {
-
-		/* Validate the Resource Type and Resource Length */
-
-		status = acpi_ut_validate_resource(aml_buffer, &resource_index);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* Get the resource length and base (minimum) AML size */
-
-		resource_length = acpi_ut_get_resource_length(aml_buffer);
-		minimum_aml_resource_length =
-		    acpi_gbl_resource_aml_sizes[resource_index];
-
-		/*
-		 * Augment the size for descriptors with optional
-		 * and/or variable length fields
-		 */
-		extra_struct_bytes = 0;
-		buffer =
-		    aml_buffer + acpi_ut_get_resource_header_length(aml_buffer);
-
-		switch (acpi_ut_get_resource_type(aml_buffer)) {
-		case ACPI_RESOURCE_NAME_IRQ:
-			/*
-			 * IRQ Resource:
-			 * Get the number of bits set in the 16-bit IRQ mask
-			 */
-			ACPI_MOVE_16_TO_16(&temp16, buffer);
-			extra_struct_bytes = acpi_rs_count_set_bits(temp16);
-			break;
-
-		case ACPI_RESOURCE_NAME_DMA:
-			/*
-			 * DMA Resource:
-			 * Get the number of bits set in the 8-bit DMA mask
-			 */
-			extra_struct_bytes = acpi_rs_count_set_bits(*buffer);
-			break;
-
-		case ACPI_RESOURCE_NAME_VENDOR_SMALL:
-		case ACPI_RESOURCE_NAME_VENDOR_LARGE:
-			/*
-			 * Vendor Resource:
-			 * Get the number of vendor data bytes
-			 */
-			extra_struct_bytes = resource_length;
-			break;
-
-		case ACPI_RESOURCE_NAME_END_TAG:
-			/*
-			 * End Tag:
-			 * This is the normal exit, add size of end_tag
-			 */
-			*size_needed += ACPI_RS_SIZE_MIN;
-			return_ACPI_STATUS(AE_OK);
-
-		case ACPI_RESOURCE_NAME_ADDRESS32:
-		case ACPI_RESOURCE_NAME_ADDRESS16:
-		case ACPI_RESOURCE_NAME_ADDRESS64:
-			/*
-			 * Address Resource:
-			 * Add the size of the optional resource_source
-			 */
-			extra_struct_bytes =
-			    acpi_rs_stream_option_length(resource_length,
-							 minimum_aml_resource_length);
-			break;
-
-		case ACPI_RESOURCE_NAME_EXTENDED_IRQ:
-			/*
-			 * Extended IRQ Resource:
-			 * Using the interrupt_table_length, add 4 bytes for each additional
-			 * interrupt. Note: at least one interrupt is required and is
-			 * included in the minimum descriptor size (reason for the -1)
-			 */
-			extra_struct_bytes = (buffer[1] - 1) * sizeof(u32);
-
-			/* Add the size of the optional resource_source */
-
-			extra_struct_bytes +=
-			    acpi_rs_stream_option_length(resource_length -
-							 extra_struct_bytes,
-							 minimum_aml_resource_length);
-			break;
-
-		default:
-			break;
-		}
-
-		/*
-		 * Update the required buffer size for the internal descriptor structs
-		 *
-		 * Important: Round the size up for the appropriate alignment. This
-		 * is a requirement on IA64.
-		 */
-		buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
-		    extra_struct_bytes;
-		buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
-
-		*size_needed += buffer_size;
-
-		ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
-				  "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
-				  acpi_ut_get_resource_type(aml_buffer),
-				  acpi_ut_get_descriptor_length(aml_buffer),
-				  buffer_size));
-
-		/*
-		 * Point to the next resource within the AML stream using the length
-		 * contained in the resource descriptor header
-		 */
-		aml_buffer += acpi_ut_get_descriptor_length(aml_buffer);
-	}
-
-	/* Did not find an end_tag resource descriptor */
-
-	return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_get_pci_routing_table_length
- *
- * PARAMETERS:  package_object          - Pointer to the package object
- *              buffer_size_needed      - u32 pointer of the size buffer
- *                                        needed to properly return the
- *                                        parsed data
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Given a package representing a PCI routing table, this
- *              calculates the size of the corresponding linked list of
- *              descriptions.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
-				     acpi_size * buffer_size_needed)
-{
-	u32 number_of_elements;
-	acpi_size temp_size_needed = 0;
-	union acpi_operand_object **top_object_list;
-	u32 index;
-	union acpi_operand_object *package_element;
-	union acpi_operand_object **sub_object_list;
-	u8 name_found;
-	u32 table_index;
-
-	ACPI_FUNCTION_TRACE(rs_get_pci_routing_table_length);
-
-	number_of_elements = package_object->package.count;
-
-	/*
-	 * Calculate the size of the return buffer.
-	 * The base size is the number of elements * the sizes of the
-	 * structures.  Additional space for the strings is added below.
-	 * The minus one is to subtract the size of the u8 Source[1]
-	 * member because it is added below.
-	 *
-	 * But each PRT_ENTRY structure has a pointer to a string and
-	 * the size of that string must be found.
-	 */
-	top_object_list = package_object->package.elements;
-
-	for (index = 0; index < number_of_elements; index++) {
-
-		/* Dereference the sub-package */
-
-		package_element = *top_object_list;
-
-		/*
-		 * The sub_object_list will now point to an array of the
-		 * four IRQ elements: Address, Pin, Source and source_index
-		 */
-		sub_object_list = package_element->package.elements;
-
-		/* Scan the irq_table_elements for the Source Name String */
-
-		name_found = FALSE;
-
-		for (table_index = 0; table_index < 4 && !name_found;
-		     table_index++) {
-			if (*sub_object_list &&	/* Null object allowed */
-			    ((ACPI_TYPE_STRING ==
-			      ACPI_GET_OBJECT_TYPE(*sub_object_list)) ||
-			     ((ACPI_TYPE_LOCAL_REFERENCE ==
-			       ACPI_GET_OBJECT_TYPE(*sub_object_list)) &&
-			      ((*sub_object_list)->reference.class ==
-			       ACPI_REFCLASS_NAME)))) {
-				name_found = TRUE;
-			} else {
-				/* Look at the next element */
-
-				sub_object_list++;
-			}
-		}
-
-		temp_size_needed += (sizeof(struct acpi_pci_routing_table) - 4);
-
-		/* Was a String type found? */
-
-		if (name_found) {
-			if (ACPI_GET_OBJECT_TYPE(*sub_object_list) ==
-			    ACPI_TYPE_STRING) {
-				/*
-				 * The length String.Length field does not include the
-				 * terminating NULL, add 1
-				 */
-				temp_size_needed += ((acpi_size)
-						     (*sub_object_list)->string.
-						     length + 1);
-			} else {
-				temp_size_needed +=
-				    acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
-				if (!temp_size_needed) {
-					return_ACPI_STATUS(AE_BAD_PARAMETER);
-				}
-			}
-		} else {
-			/*
-			 * If no name was found, then this is a NULL, which is
-			 * translated as a u32 zero.
-			 */
-			temp_size_needed += sizeof(u32);
-		}
-
-		/* Round up the size since each element must be aligned */
-
-		temp_size_needed = ACPI_ROUND_UP_TO_64BIT(temp_size_needed);
-
-		/* Point to the next union acpi_operand_object */
-
-		top_object_list++;
-	}
-
-	/*
-	 * Add an extra element to the end of the list, essentially a
-	 * NULL terminator
-	 */
-	*buffer_size_needed =
-	    temp_size_needed + sizeof(struct acpi_pci_routing_table);
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
deleted file mode 100644
index c0bbfa2..0000000
--- a/drivers/acpi/resources/rscreate.c
+++ /dev/null
@@ -1,467 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rscreate - Create resource lists/tables
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acresrc.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rscreate")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_create_resource_list
- *
- * PARAMETERS:  aml_buffer          - Pointer to the resource byte stream
- *              output_buffer       - Pointer to the user's buffer
- *
- * RETURN:      Status: AE_OK if okay, else a valid acpi_status code
- *              If output_buffer is not large enough, output_buffer_length
- *              indicates how large output_buffer should be, else it
- *              indicates how may u8 elements of output_buffer are valid.
- *
- * DESCRIPTION: Takes the byte stream returned from a _CRS, _PRS control method
- *              execution and parses the stream to create a linked list
- *              of device resources.
- *
- ******************************************************************************/
-acpi_status
-acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
-			     struct acpi_buffer *output_buffer)
-{
-
-	acpi_status status;
-	u8 *aml_start;
-	acpi_size list_size_needed = 0;
-	u32 aml_buffer_length;
-	void *resource;
-
-	ACPI_FUNCTION_TRACE(rs_create_resource_list);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlBuffer = %p\n", aml_buffer));
-
-	/* Params already validated, so we don't re-validate here */
-
-	aml_buffer_length = aml_buffer->buffer.length;
-	aml_start = aml_buffer->buffer.pointer;
-
-	/*
-	 * Pass the aml_buffer into a module that can calculate
-	 * the buffer size needed for the linked list
-	 */
-	status = acpi_rs_get_list_length(aml_start, aml_buffer_length,
-					 &list_size_needed);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Status=%X ListSizeNeeded=%X\n",
-			  status, (u32) list_size_needed));
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Validate/Allocate/Clear caller buffer */
-
-	status = acpi_ut_initialize_buffer(output_buffer, list_size_needed);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Do the conversion */
-
-	resource = output_buffer->pointer;
-	status = acpi_ut_walk_aml_resources(aml_start, aml_buffer_length,
-					    acpi_rs_convert_aml_to_resources,
-					    &resource);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
-			  output_buffer->pointer, (u32) output_buffer->length));
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_create_pci_routing_table
- *
- * PARAMETERS:  package_object          - Pointer to an union acpi_operand_object
- *                                        package
- *              output_buffer           - Pointer to the user's buffer
- *
- * RETURN:      Status  AE_OK if okay, else a valid acpi_status code.
- *              If the output_buffer is too small, the error will be
- *              AE_BUFFER_OVERFLOW and output_buffer->Length will point
- *              to the size buffer needed.
- *
- * DESCRIPTION: Takes the union acpi_operand_object    package and creates a
- *              linked list of PCI interrupt descriptions
- *
- * NOTE: It is the caller's responsibility to ensure that the start of the
- * output buffer is aligned properly (if necessary).
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
-				 struct acpi_buffer *output_buffer)
-{
-	u8 *buffer;
-	union acpi_operand_object **top_object_list;
-	union acpi_operand_object **sub_object_list;
-	union acpi_operand_object *obj_desc;
-	acpi_size buffer_size_needed = 0;
-	u32 number_of_elements;
-	u32 index;
-	struct acpi_pci_routing_table *user_prt;
-	struct acpi_namespace_node *node;
-	acpi_status status;
-	struct acpi_buffer path_buffer;
-
-	ACPI_FUNCTION_TRACE(rs_create_pci_routing_table);
-
-	/* Params already validated, so we don't re-validate here */
-
-	/* Get the required buffer length */
-
-	status = acpi_rs_get_pci_routing_table_length(package_object,
-						      &buffer_size_needed);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "BufferSizeNeeded = %X\n",
-			  (u32) buffer_size_needed));
-
-	/* Validate/Allocate/Clear caller buffer */
-
-	status = acpi_ut_initialize_buffer(output_buffer, buffer_size_needed);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Loop through the ACPI_INTERNAL_OBJECTS - Each object should be a
-	 * package that in turn contains an acpi_integer Address, a u8 Pin,
-	 * a Name, and a u8 source_index.
-	 */
-	top_object_list = package_object->package.elements;
-	number_of_elements = package_object->package.count;
-	buffer = output_buffer->pointer;
-	user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer);
-
-	for (index = 0; index < number_of_elements; index++) {
-		int source_name_index = 2;
-		int source_index_index = 3;
-
-		/*
-		 * Point user_prt past this current structure
-		 *
-		 * NOTE: On the first iteration, user_prt->Length will
-		 * be zero because we cleared the return buffer earlier
-		 */
-		buffer += user_prt->length;
-		user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer);
-
-		/*
-		 * Fill in the Length field with the information we have at this point.
-		 * The minus four is to subtract the size of the u8 Source[4] member
-		 * because it is added below.
-		 */
-		user_prt->length = (sizeof(struct acpi_pci_routing_table) - 4);
-
-		/* Each element of the top-level package must also be a package */
-
-		if (ACPI_GET_OBJECT_TYPE(*top_object_list) != ACPI_TYPE_PACKAGE) {
-			ACPI_ERROR((AE_INFO,
-				    "(PRT[%X]) Need sub-package, found %s",
-				    index,
-				    acpi_ut_get_object_type_name
-				    (*top_object_list)));
-			return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
-		}
-
-		/* Each sub-package must be of length 4 */
-
-		if ((*top_object_list)->package.count != 4) {
-			ACPI_ERROR((AE_INFO,
-				    "(PRT[%X]) Need package of length 4, found length %d",
-				    index, (*top_object_list)->package.count));
-			return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT);
-		}
-
-		/*
-		 * Dereference the sub-package.
-		 * The sub_object_list will now point to an array of the four IRQ
-		 * elements: [Address, Pin, Source, source_index]
-		 */
-		sub_object_list = (*top_object_list)->package.elements;
-
-		/* 1) First subobject: Dereference the PRT.Address */
-
-		obj_desc = sub_object_list[0];
-		if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
-			ACPI_ERROR((AE_INFO,
-				    "(PRT[%X].Address) Need Integer, found %s",
-				    index,
-				    acpi_ut_get_object_type_name(obj_desc)));
-			return_ACPI_STATUS(AE_BAD_DATA);
-		}
-
-		user_prt->address = obj_desc->integer.value;
-
-		/* 2) Second subobject: Dereference the PRT.Pin */
-
-		obj_desc = sub_object_list[1];
-		if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
-			ACPI_ERROR((AE_INFO,
-				    "(PRT[%X].Pin) Need Integer, found %s",
-				    index,
-				    acpi_ut_get_object_type_name(obj_desc)));
-			return_ACPI_STATUS(AE_BAD_DATA);
-		}
-
-		/*
-		 * If BIOS erroneously reversed the _PRT source_name and source_index,
-		 * then reverse them back.
-		 */
-		if (ACPI_GET_OBJECT_TYPE(sub_object_list[3]) !=
-		    ACPI_TYPE_INTEGER) {
-			if (acpi_gbl_enable_interpreter_slack) {
-				source_name_index = 3;
-				source_index_index = 2;
-				printk(KERN_WARNING
-				       "ACPI: Handling Garbled _PRT entry\n");
-			} else {
-				ACPI_ERROR((AE_INFO,
-					    "(PRT[%X].source_index) Need Integer, found %s",
-					    index,
-					    acpi_ut_get_object_type_name
-					    (sub_object_list[3])));
-				return_ACPI_STATUS(AE_BAD_DATA);
-			}
-		}
-
-		user_prt->pin = (u32) obj_desc->integer.value;
-
-		/*
-		 * If the BIOS has erroneously reversed the _PRT source_name (index 2)
-		 * and the source_index (index 3), fix it. _PRT is important enough to
-		 * workaround this BIOS error. This also provides compatibility with
-		 * other ACPI implementations.
-		 */
-		obj_desc = sub_object_list[3];
-		if (!obj_desc
-		    || (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) {
-			sub_object_list[3] = sub_object_list[2];
-			sub_object_list[2] = obj_desc;
-
-			ACPI_WARNING((AE_INFO,
-				      "(PRT[%X].Source) SourceName and SourceIndex are reversed, fixed",
-				      index));
-		}
-
-		/*
-		 * 3) Third subobject: Dereference the PRT.source_name
-		 * The name may be unresolved (slack mode), so allow a null object
-		 */
-		obj_desc = sub_object_list[source_name_index];
-		if (obj_desc) {
-			switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-			case ACPI_TYPE_LOCAL_REFERENCE:
-
-				if (obj_desc->reference.class !=
-				    ACPI_REFCLASS_NAME) {
-					ACPI_ERROR((AE_INFO,
-						    "(PRT[%X].Source) Need name, found Reference Class %X",
-						    index,
-						    obj_desc->reference.class));
-					return_ACPI_STATUS(AE_BAD_DATA);
-				}
-
-				node = obj_desc->reference.node;
-
-				/* Use *remaining* length of the buffer as max for pathname */
-
-				path_buffer.length = output_buffer->length -
-				    (u32) ((u8 *) user_prt->source -
-					   (u8 *) output_buffer->pointer);
-				path_buffer.pointer = user_prt->source;
-
-				status =
-				    acpi_ns_handle_to_pathname((acpi_handle)
-							       node,
-							       &path_buffer);
-
-				/* +1 to include null terminator */
-
-				user_prt->length +=
-				    (u32) ACPI_STRLEN(user_prt->source) + 1;
-				break;
-
-			case ACPI_TYPE_STRING:
-
-				ACPI_STRCPY(user_prt->source,
-					    obj_desc->string.pointer);
-
-				/*
-				 * Add to the Length field the length of the string
-				 * (add 1 for terminator)
-				 */
-				user_prt->length += obj_desc->string.length + 1;
-				break;
-
-			case ACPI_TYPE_INTEGER:
-				/*
-				 * If this is a number, then the Source Name is NULL, since the
-				 * entire buffer was zeroed out, we can leave this alone.
-				 *
-				 * Add to the Length field the length of the u32 NULL
-				 */
-				user_prt->length += sizeof(u32);
-				break;
-
-			default:
-
-				ACPI_ERROR((AE_INFO,
-					    "(PRT[%X].Source) Need Ref/String/Integer, found %s",
-					    index,
-					    acpi_ut_get_object_type_name
-					    (obj_desc)));
-				return_ACPI_STATUS(AE_BAD_DATA);
-			}
-		}
-
-		/* Now align the current length */
-
-		user_prt->length =
-		    (u32) ACPI_ROUND_UP_TO_64BIT(user_prt->length);
-
-		/* 4) Fourth subobject: Dereference the PRT.source_index */
-
-		obj_desc = sub_object_list[source_index_index];
-		if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) {
-			ACPI_ERROR((AE_INFO,
-				    "(PRT[%X].SourceIndex) Need Integer, found %s",
-				    index,
-				    acpi_ut_get_object_type_name(obj_desc)));
-			return_ACPI_STATUS(AE_BAD_DATA);
-		}
-
-		user_prt->source_index = (u32) obj_desc->integer.value;
-
-		/* Point to the next union acpi_operand_object in the top level package */
-
-		top_object_list++;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
-			  output_buffer->pointer, (u32) output_buffer->length));
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_create_aml_resources
- *
- * PARAMETERS:  linked_list_buffer      - Pointer to the resource linked list
- *              output_buffer           - Pointer to the user's buffer
- *
- * RETURN:      Status  AE_OK if okay, else a valid acpi_status code.
- *              If the output_buffer is too small, the error will be
- *              AE_BUFFER_OVERFLOW and output_buffer->Length will point
- *              to the size buffer needed.
- *
- * DESCRIPTION: Takes the linked list of device resources and
- *              creates a bytestream to be used as input for the
- *              _SRS control method.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
-			     struct acpi_buffer *output_buffer)
-{
-	acpi_status status;
-	acpi_size aml_size_needed = 0;
-
-	ACPI_FUNCTION_TRACE(rs_create_aml_resources);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "LinkedListBuffer = %p\n",
-			  linked_list_buffer));
-
-	/*
-	 * Params already validated, so we don't re-validate here
-	 *
-	 * Pass the linked_list_buffer into a module that calculates
-	 * the buffer size needed for the byte stream.
-	 */
-	status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
-			  (u32) aml_size_needed,
-			  acpi_format_exception(status)));
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Validate/Allocate/Clear caller buffer */
-
-	status = acpi_ut_initialize_buffer(output_buffer, aml_size_needed);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Do the conversion */
-
-	status =
-	    acpi_rs_convert_resources_to_aml(linked_list_buffer,
-					     aml_size_needed,
-					     output_buffer->pointer);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
-			  output_buffer->pointer, (u32) output_buffer->length));
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
deleted file mode 100644
index 6bbbb7b..0000000
--- a/drivers/acpi/resources/rsdump.c
+++ /dev/null
@@ -1,770 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rsdump - Functions to display the resource structures.
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acresrc.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rsdump")
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/* Local prototypes */
-static void acpi_rs_out_string(char *title, char *value);
-
-static void acpi_rs_out_integer8(char *title, u8 value);
-
-static void acpi_rs_out_integer16(char *title, u16 value);
-
-static void acpi_rs_out_integer32(char *title, u32 value);
-
-static void acpi_rs_out_integer64(char *title, u64 value);
-
-static void acpi_rs_out_title(char *title);
-
-static void acpi_rs_dump_byte_list(u16 length, u8 * data);
-
-static void acpi_rs_dump_dword_list(u8 length, u32 * data);
-
-static void acpi_rs_dump_short_byte_list(u8 length, u8 * data);
-
-static void
-acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
-
-static void acpi_rs_dump_address_common(union acpi_resource_data *resource);
-
-static void
-acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
-
-#define ACPI_RSD_OFFSET(f)          (u8) ACPI_OFFSET (union acpi_resource_data,f)
-#define ACPI_PRT_OFFSET(f)          (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
-#define ACPI_RSD_TABLE_SIZE(name)   (sizeof(name) / sizeof (struct acpi_rsdump_info))
-
-/*******************************************************************************
- *
- * Resource Descriptor info tables
- *
- * Note: The first table entry must be a Title or Literal and must contain
- * the table length (number of table entries)
- *
- ******************************************************************************/
-
-struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.descriptor_length),
-	 "Descriptor Length", NULL},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
-	 acpi_gbl_he_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
-	 acpi_gbl_ll_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
-	 acpi_gbl_shr_decode},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
-	 "Interrupt Count", NULL},
-	{ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(irq.interrupts[0]),
-	 "Interrupt List", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_dma), "DMA", NULL},
-	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.type), "Speed",
-	 acpi_gbl_typ_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(dma.bus_master), "Mastering",
-	 acpi_gbl_bm_decode},
-	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.transfer), "Transfer Type",
-	 acpi_gbl_siz_decode},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(dma.channel_count), "Channel Count",
-	 NULL},
-	{ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(dma.channels[0]), "Channel List",
-	 NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_start_dpf[4] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf),
-	 "Start-Dependent-Functions", NULL},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(start_dpf.descriptor_length),
-	 "Descriptor Length", NULL},
-	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority),
-	 "Compatibility Priority", acpi_gbl_config_decode},
-	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness),
-	 "Performance/Robustness", acpi_gbl_config_decode}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_end_dpf[1] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_dpf),
-	 "End-Dependent-Functions", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_io[6] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io), "I/O", NULL},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(io.io_decode), "Address Decoding",
-	 acpi_gbl_io_decode},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.minimum), "Address Minimum", NULL},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.maximum), "Address Maximum", NULL},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.alignment), "Alignment", NULL},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.address_length), "Address Length",
-	 NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_fixed_io[3] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_io),
-	 "Fixed I/O", NULL},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_io.address), "Address", NULL},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_io.address_length),
-	 "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_vendor[3] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_vendor),
-	 "Vendor Specific", NULL},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(vendor.byte_length), "Length", NULL},
-	{ACPI_RSD_LONGLIST, ACPI_RSD_OFFSET(vendor.byte_data[0]), "Vendor Data",
-	 NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_end_tag[1] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "EndTag",
-	 NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_memory24[6] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory24),
-	 "24-Bit Memory Range", NULL},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory24.write_protect),
-	 "Write Protect", acpi_gbl_rw_decode},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.minimum), "Address Minimum",
-	 NULL},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.maximum), "Address Maximum",
-	 NULL},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.alignment), "Alignment",
-	 NULL},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.address_length),
-	 "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_memory32[6] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory32),
-	 "32-Bit Memory Range", NULL},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory32.write_protect),
-	 "Write Protect", acpi_gbl_rw_decode},
-	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.minimum), "Address Minimum",
-	 NULL},
-	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.maximum), "Address Maximum",
-	 NULL},
-	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.alignment), "Alignment",
-	 NULL},
-	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.address_length),
-	 "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[4] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_memory32),
-	 "32-Bit Fixed Memory Range", NULL},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(fixed_memory32.write_protect),
-	 "Write Protect", acpi_gbl_rw_decode},
-	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address), "Address",
-	 NULL},
-	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address_length),
-	 "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
-	 "16-Bit WORD Address Space", NULL},
-	{ACPI_RSD_ADDRESS, 0, NULL, NULL},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity",
-	 NULL},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum",
-	 NULL},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum",
-	 NULL},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset),
-	 "Translation Offset", NULL},
-	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length),
-	 "Address Length", NULL},
-	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
-	 "32-Bit DWORD Address Space", NULL},
-	{ACPI_RSD_ADDRESS, 0, NULL, NULL},
-	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity",
-	 NULL},
-	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum",
-	 NULL},
-	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum",
-	 NULL},
-	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset),
-	 "Translation Offset", NULL},
-	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length),
-	 "Address Length", NULL},
-	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
-	 "64-Bit QWORD Address Space", NULL},
-	{ACPI_RSD_ADDRESS, 0, NULL, NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity",
-	 NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum",
-	 NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum",
-	 NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset),
-	 "Translation Offset", NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length),
-	 "Address Length", NULL},
-	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
-	 "64-Bit Extended Address Space", NULL},
-	{ACPI_RSD_ADDRESS, 0, NULL, NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity),
-	 "Granularity", NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum),
-	 "Address Minimum", NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum),
-	 "Address Maximum", NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset),
-	 "Translation Offset", NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length),
-	 "Address Length", NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
-	 "Type-Specific Attribute", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_irq),
-	 "Extended IRQ", NULL},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.producer_consumer),
-	 "Type", acpi_gbl_consume_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.triggering),
-	 "Triggering", acpi_gbl_he_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
-	 acpi_gbl_ll_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
-	 acpi_gbl_shr_decode},
-	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
-	 NULL},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(extended_irq.interrupt_count),
-	 "Interrupt Count", NULL},
-	{ACPI_RSD_DWORDLIST, ACPI_RSD_OFFSET(extended_irq.interrupts[0]),
-	 "Interrupt List", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_generic_reg),
-	 "Generic Register", NULL},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.space_id), "Space ID",
-	 NULL},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_width), "Bit Width",
-	 NULL},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_offset), "Bit Offset",
-	 NULL},
-	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.access_size),
-	 "Access Size", NULL},
-	{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
-};
-
-/*
- * Tables used for common address descriptor flag fields
- */
-static struct acpi_rsdump_info acpi_rs_dump_general_flags[5] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_general_flags), NULL,
-	 NULL},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.producer_consumer),
-	 "Consumer/Producer", acpi_gbl_consume_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.decode), "Address Decode",
-	 acpi_gbl_dec_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.min_address_fixed),
-	 "Min Relocatability", acpi_gbl_min_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.max_address_fixed),
-	 "Max Relocatability", acpi_gbl_max_decode}
-};
-
-static struct acpi_rsdump_info acpi_rs_dump_memory_flags[5] = {
-	{ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory_flags),
-	 "Resource Type", (void *)"Memory Range"},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.write_protect),
-	 "Write Protect", acpi_gbl_rw_decode},
-	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.caching),
-	 "Caching", acpi_gbl_mem_decode},
-	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.range_type),
-	 "Range Type", acpi_gbl_mtp_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.translation),
-	 "Translation", acpi_gbl_ttp_decode}
-};
-
-static struct acpi_rsdump_info acpi_rs_dump_io_flags[4] = {
-	{ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io_flags),
-	 "Resource Type", (void *)"I/O Range"},
-	{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.io.range_type),
-	 "Range Type", acpi_gbl_rng_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation),
-	 "Translation", acpi_gbl_ttp_decode},
-	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation_type),
-	 "Translation Type", acpi_gbl_trs_decode}
-};
-
-/*
- * Table used to dump _PRT contents
- */
-static struct acpi_rsdump_info acpi_rs_dump_prt[5] = {
-	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_prt), NULL, NULL},
-	{ACPI_RSD_UINT64, ACPI_PRT_OFFSET(address), "Address", NULL},
-	{ACPI_RSD_UINT32, ACPI_PRT_OFFSET(pin), "Pin", NULL},
-	{ACPI_RSD_STRING, ACPI_PRT_OFFSET(source[0]), "Source", NULL},
-	{ACPI_RSD_UINT32, ACPI_PRT_OFFSET(source_index), "Source Index", NULL}
-};
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_dump_descriptor
- *
- * PARAMETERS:  Resource
- *
- * RETURN:      None
- *
- * DESCRIPTION:
- *
- ******************************************************************************/
-
-static void
-acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
-{
-	u8 *target = NULL;
-	u8 *previous_target;
-	char *name;
-	u8 count;
-
-	/* First table entry must contain the table length (# of table entries) */
-
-	count = table->offset;
-
-	while (count) {
-		previous_target = target;
-		target = ACPI_ADD_PTR(u8, resource, table->offset);
-		name = table->name;
-
-		switch (table->opcode) {
-		case ACPI_RSD_TITLE:
-			/*
-			 * Optional resource title
-			 */
-			if (table->name) {
-				acpi_os_printf("%s Resource\n", name);
-			}
-			break;
-
-			/* Strings */
-
-		case ACPI_RSD_LITERAL:
-			acpi_rs_out_string(name,
-					   ACPI_CAST_PTR(char, table->pointer));
-			break;
-
-		case ACPI_RSD_STRING:
-			acpi_rs_out_string(name, ACPI_CAST_PTR(char, target));
-			break;
-
-			/* Data items, 8/16/32/64 bit */
-
-		case ACPI_RSD_UINT8:
-			acpi_rs_out_integer8(name, ACPI_GET8(target));
-			break;
-
-		case ACPI_RSD_UINT16:
-			acpi_rs_out_integer16(name, ACPI_GET16(target));
-			break;
-
-		case ACPI_RSD_UINT32:
-			acpi_rs_out_integer32(name, ACPI_GET32(target));
-			break;
-
-		case ACPI_RSD_UINT64:
-			acpi_rs_out_integer64(name, ACPI_GET64(target));
-			break;
-
-			/* Flags: 1-bit and 2-bit flags supported */
-
-		case ACPI_RSD_1BITFLAG:
-			acpi_rs_out_string(name, ACPI_CAST_PTR(char,
-							       table->
-							       pointer[*target &
-								       0x01]));
-			break;
-
-		case ACPI_RSD_2BITFLAG:
-			acpi_rs_out_string(name, ACPI_CAST_PTR(char,
-							       table->
-							       pointer[*target &
-								       0x03]));
-			break;
-
-		case ACPI_RSD_SHORTLIST:
-			/*
-			 * Short byte list (single line output) for DMA and IRQ resources
-			 * Note: The list length is obtained from the previous table entry
-			 */
-			if (previous_target) {
-				acpi_rs_out_title(name);
-				acpi_rs_dump_short_byte_list(*previous_target,
-							     target);
-			}
-			break;
-
-		case ACPI_RSD_LONGLIST:
-			/*
-			 * Long byte list for Vendor resource data
-			 * Note: The list length is obtained from the previous table entry
-			 */
-			if (previous_target) {
-				acpi_rs_dump_byte_list(ACPI_GET16
-						       (previous_target),
-						       target);
-			}
-			break;
-
-		case ACPI_RSD_DWORDLIST:
-			/*
-			 * Dword list for Extended Interrupt resources
-			 * Note: The list length is obtained from the previous table entry
-			 */
-			if (previous_target) {
-				acpi_rs_dump_dword_list(*previous_target,
-							ACPI_CAST_PTR(u32,
-								      target));
-			}
-			break;
-
-		case ACPI_RSD_ADDRESS:
-			/*
-			 * Common flags for all Address resources
-			 */
-			acpi_rs_dump_address_common(ACPI_CAST_PTR
-						    (union acpi_resource_data,
-						     target));
-			break;
-
-		case ACPI_RSD_SOURCE:
-			/*
-			 * Optional resource_source for Address resources
-			 */
-			acpi_rs_dump_resource_source(ACPI_CAST_PTR(struct
-								   acpi_resource_source,
-								   target));
-			break;
-
-		default:
-			acpi_os_printf("**** Invalid table opcode [%X] ****\n",
-				       table->opcode);
-			return;
-		}
-
-		table++;
-		count--;
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_dump_resource_source
- *
- * PARAMETERS:  resource_source     - Pointer to a Resource Source struct
- *
- * RETURN:      None
- *
- * DESCRIPTION: Common routine for dumping the optional resource_source and the
- *              corresponding resource_source_index.
- *
- ******************************************************************************/
-
-static void
-acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	if (resource_source->index == 0xFF) {
-		return;
-	}
-
-	acpi_rs_out_integer8("Resource Source Index", resource_source->index);
-
-	acpi_rs_out_string("Resource Source",
-			   resource_source->string_ptr ?
-			   resource_source->string_ptr : "[Not Specified]");
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_dump_address_common
- *
- * PARAMETERS:  Resource        - Pointer to an internal resource descriptor
- *
- * RETURN:      None
- *
- * DESCRIPTION: Dump the fields that are common to all Address resource
- *              descriptors
- *
- ******************************************************************************/
-
-static void acpi_rs_dump_address_common(union acpi_resource_data *resource)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	/* Decode the type-specific flags */
-
-	switch (resource->address.resource_type) {
-	case ACPI_MEMORY_RANGE:
-
-		acpi_rs_dump_descriptor(resource, acpi_rs_dump_memory_flags);
-		break;
-
-	case ACPI_IO_RANGE:
-
-		acpi_rs_dump_descriptor(resource, acpi_rs_dump_io_flags);
-		break;
-
-	case ACPI_BUS_NUMBER_RANGE:
-
-		acpi_rs_out_string("Resource Type", "Bus Number Range");
-		break;
-
-	default:
-
-		acpi_rs_out_integer8("Resource Type",
-				     (u8) resource->address.resource_type);
-		break;
-	}
-
-	/* Decode the general flags */
-
-	acpi_rs_dump_descriptor(resource, acpi_rs_dump_general_flags);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_dump_resource_list
- *
- * PARAMETERS:  resource_list       - Pointer to a resource descriptor list
- *
- * RETURN:      None
- *
- * DESCRIPTION: Dispatches the structure to the correct dump routine.
- *
- ******************************************************************************/
-
-void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
-{
-	u32 count = 0;
-	u32 type;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!(acpi_dbg_level & ACPI_LV_RESOURCES)
-	    || !(_COMPONENT & acpi_dbg_layer)) {
-		return;
-	}
-
-	/* Walk list and dump all resource descriptors (END_TAG terminates) */
-
-	do {
-		acpi_os_printf("\n[%02X] ", count);
-		count++;
-
-		/* Validate Type before dispatch */
-
-		type = resource_list->type;
-		if (type > ACPI_RESOURCE_TYPE_MAX) {
-			acpi_os_printf
-			    ("Invalid descriptor type (%X) in resource list\n",
-			     resource_list->type);
-			return;
-		}
-
-		/* Dump the resource descriptor */
-
-		acpi_rs_dump_descriptor(&resource_list->data,
-					acpi_gbl_dump_resource_dispatch[type]);
-
-		/* Point to the next resource structure */
-
-		resource_list =
-		    ACPI_ADD_PTR(struct acpi_resource, resource_list,
-				 resource_list->length);
-
-		/* Exit when END_TAG descriptor is reached */
-
-	} while (type != ACPI_RESOURCE_TYPE_END_TAG);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_dump_irq_list
- *
- * PARAMETERS:  route_table     - Pointer to the routing table to dump.
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print IRQ routing table
- *
- ******************************************************************************/
-
-void acpi_rs_dump_irq_list(u8 * route_table)
-{
-	struct acpi_pci_routing_table *prt_element;
-	u8 count;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!(acpi_dbg_level & ACPI_LV_RESOURCES)
-	    || !(_COMPONENT & acpi_dbg_layer)) {
-		return;
-	}
-
-	prt_element = ACPI_CAST_PTR(struct acpi_pci_routing_table, route_table);
-
-	/* Dump all table elements, Exit on zero length element */
-
-	for (count = 0; prt_element->length; count++) {
-		acpi_os_printf("\n[%02X] PCI IRQ Routing Table Package\n",
-			       count);
-		acpi_rs_dump_descriptor(prt_element, acpi_rs_dump_prt);
-
-		prt_element = ACPI_ADD_PTR(struct acpi_pci_routing_table,
-					   prt_element, prt_element->length);
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_out*
- *
- * PARAMETERS:  Title       - Name of the resource field
- *              Value       - Value of the resource field
- *
- * RETURN:      None
- *
- * DESCRIPTION: Miscellaneous helper functions to consistently format the
- *              output of the resource dump routines
- *
- ******************************************************************************/
-
-static void acpi_rs_out_string(char *title, char *value)
-{
-	acpi_os_printf("%27s : %s", title, value);
-	if (!*value) {
-		acpi_os_printf("[NULL NAMESTRING]");
-	}
-	acpi_os_printf("\n");
-}
-
-static void acpi_rs_out_integer8(char *title, u8 value)
-{
-	acpi_os_printf("%27s : %2.2X\n", title, value);
-}
-
-static void acpi_rs_out_integer16(char *title, u16 value)
-{
-	acpi_os_printf("%27s : %4.4X\n", title, value);
-}
-
-static void acpi_rs_out_integer32(char *title, u32 value)
-{
-	acpi_os_printf("%27s : %8.8X\n", title, value);
-}
-
-static void acpi_rs_out_integer64(char *title, u64 value)
-{
-	acpi_os_printf("%27s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
-}
-
-static void acpi_rs_out_title(char *title)
-{
-	acpi_os_printf("%27s : ", title);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_dump*List
- *
- * PARAMETERS:  Length      - Number of elements in the list
- *              Data        - Start of the list
- *
- * RETURN:      None
- *
- * DESCRIPTION: Miscellaneous functions to dump lists of raw data
- *
- ******************************************************************************/
-
-static void acpi_rs_dump_byte_list(u16 length, u8 * data)
-{
-	u8 i;
-
-	for (i = 0; i < length; i++) {
-		acpi_os_printf("%25s%2.2X : %2.2X\n", "Byte", i, data[i]);
-	}
-}
-
-static void acpi_rs_dump_short_byte_list(u8 length, u8 * data)
-{
-	u8 i;
-
-	for (i = 0; i < length; i++) {
-		acpi_os_printf("%X ", data[i]);
-	}
-	acpi_os_printf("\n");
-}
-
-static void acpi_rs_dump_dword_list(u8 length, u32 * data)
-{
-	u8 i;
-
-	for (i = 0; i < length; i++) {
-		acpi_os_printf("%25s%2.2X : %8.8X\n", "Dword", i, data[i]);
-	}
-}
-
-#endif
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/resources/rsinfo.c
deleted file mode 100644
index 3f0a1fe..0000000
--- a/drivers/acpi/resources/rsinfo.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rsinfo - Dispatch and Info tables
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acresrc.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rsinfo")
-
-/*
- * Resource dispatch and information tables. Any new resource types (either
- * Large or Small) must be reflected in each of these tables, so they are here
- * in one place.
- *
- * The tables for Large descriptors are indexed by bits 6:0 of the AML
- * descriptor type byte. The tables for Small descriptors are indexed by
- * bits 6:3 of the descriptor byte. The tables for internal resource
- * descriptors are indexed by the acpi_resource_type field.
- */
-/* Dispatch table for resource-to-AML (Set Resource) conversion functions */
-struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
-	acpi_rs_set_irq,	/* 0x00, ACPI_RESOURCE_TYPE_IRQ */
-	acpi_rs_convert_dma,	/* 0x01, ACPI_RESOURCE_TYPE_DMA */
-	acpi_rs_set_start_dpf,	/* 0x02, ACPI_RESOURCE_TYPE_START_DEPENDENT */
-	acpi_rs_convert_end_dpf,	/* 0x03, ACPI_RESOURCE_TYPE_END_DEPENDENT */
-	acpi_rs_convert_io,	/* 0x04, ACPI_RESOURCE_TYPE_IO */
-	acpi_rs_convert_fixed_io,	/* 0x05, ACPI_RESOURCE_TYPE_FIXED_IO */
-	acpi_rs_set_vendor,	/* 0x06, ACPI_RESOURCE_TYPE_VENDOR */
-	acpi_rs_convert_end_tag,	/* 0x07, ACPI_RESOURCE_TYPE_END_TAG */
-	acpi_rs_convert_memory24,	/* 0x08, ACPI_RESOURCE_TYPE_MEMORY24 */
-	acpi_rs_convert_memory32,	/* 0x09, ACPI_RESOURCE_TYPE_MEMORY32 */
-	acpi_rs_convert_fixed_memory32,	/* 0x0A, ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */
-	acpi_rs_convert_address16,	/* 0x0B, ACPI_RESOURCE_TYPE_ADDRESS16 */
-	acpi_rs_convert_address32,	/* 0x0C, ACPI_RESOURCE_TYPE_ADDRESS32 */
-	acpi_rs_convert_address64,	/* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */
-	acpi_rs_convert_ext_address64,	/* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
-	acpi_rs_convert_ext_irq,	/* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
-	acpi_rs_convert_generic_reg	/* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
-};
-
-/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
-
-struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
-	/* Small descriptors */
-
-	NULL,			/* 0x00, Reserved */
-	NULL,			/* 0x01, Reserved */
-	NULL,			/* 0x02, Reserved */
-	NULL,			/* 0x03, Reserved */
-	acpi_rs_get_irq,	/* 0x04, ACPI_RESOURCE_NAME_IRQ */
-	acpi_rs_convert_dma,	/* 0x05, ACPI_RESOURCE_NAME_DMA */
-	acpi_rs_get_start_dpf,	/* 0x06, ACPI_RESOURCE_NAME_START_DEPENDENT */
-	acpi_rs_convert_end_dpf,	/* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */
-	acpi_rs_convert_io,	/* 0x08, ACPI_RESOURCE_NAME_IO */
-	acpi_rs_convert_fixed_io,	/* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */
-	NULL,			/* 0x0A, Reserved */
-	NULL,			/* 0x0B, Reserved */
-	NULL,			/* 0x0C, Reserved */
-	NULL,			/* 0x0D, Reserved */
-	acpi_rs_get_vendor_small,	/* 0x0E, ACPI_RESOURCE_NAME_VENDOR_SMALL */
-	acpi_rs_convert_end_tag,	/* 0x0F, ACPI_RESOURCE_NAME_END_TAG */
-
-	/* Large descriptors */
-
-	NULL,			/* 0x00, Reserved */
-	acpi_rs_convert_memory24,	/* 0x01, ACPI_RESOURCE_NAME_MEMORY24 */
-	acpi_rs_convert_generic_reg,	/* 0x02, ACPI_RESOURCE_NAME_GENERIC_REGISTER */
-	NULL,			/* 0x03, Reserved */
-	acpi_rs_get_vendor_large,	/* 0x04, ACPI_RESOURCE_NAME_VENDOR_LARGE */
-	acpi_rs_convert_memory32,	/* 0x05, ACPI_RESOURCE_NAME_MEMORY32 */
-	acpi_rs_convert_fixed_memory32,	/* 0x06, ACPI_RESOURCE_NAME_FIXED_MEMORY32 */
-	acpi_rs_convert_address32,	/* 0x07, ACPI_RESOURCE_NAME_ADDRESS32 */
-	acpi_rs_convert_address16,	/* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */
-	acpi_rs_convert_ext_irq,	/* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */
-	acpi_rs_convert_address64,	/* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
-	acpi_rs_convert_ext_address64	/* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
-};
-
-#ifdef ACPI_FUTURE_USAGE
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-
-/* Dispatch table for resource dump functions */
-
-struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
-	acpi_rs_dump_irq,	/* ACPI_RESOURCE_TYPE_IRQ */
-	acpi_rs_dump_dma,	/* ACPI_RESOURCE_TYPE_DMA */
-	acpi_rs_dump_start_dpf,	/* ACPI_RESOURCE_TYPE_START_DEPENDENT */
-	acpi_rs_dump_end_dpf,	/* ACPI_RESOURCE_TYPE_END_DEPENDENT */
-	acpi_rs_dump_io,	/* ACPI_RESOURCE_TYPE_IO */
-	acpi_rs_dump_fixed_io,	/* ACPI_RESOURCE_TYPE_FIXED_IO */
-	acpi_rs_dump_vendor,	/* ACPI_RESOURCE_TYPE_VENDOR */
-	acpi_rs_dump_end_tag,	/* ACPI_RESOURCE_TYPE_END_TAG */
-	acpi_rs_dump_memory24,	/* ACPI_RESOURCE_TYPE_MEMORY24 */
-	acpi_rs_dump_memory32,	/* ACPI_RESOURCE_TYPE_MEMORY32 */
-	acpi_rs_dump_fixed_memory32,	/* ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */
-	acpi_rs_dump_address16,	/* ACPI_RESOURCE_TYPE_ADDRESS16 */
-	acpi_rs_dump_address32,	/* ACPI_RESOURCE_TYPE_ADDRESS32 */
-	acpi_rs_dump_address64,	/* ACPI_RESOURCE_TYPE_ADDRESS64 */
-	acpi_rs_dump_ext_address64,	/* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
-	acpi_rs_dump_ext_irq,	/* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
-	acpi_rs_dump_generic_reg,	/* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
-};
-#endif
-
-#endif				/* ACPI_FUTURE_USAGE */
-/*
- * Base sizes for external AML resource descriptors, indexed by internal type.
- * Includes size of the descriptor header (1 byte for small descriptors,
- * 3 bytes for large descriptors)
- */
-const u8 acpi_gbl_aml_resource_sizes[] = {
-	sizeof(struct aml_resource_irq),	/* ACPI_RESOURCE_TYPE_IRQ (optional Byte 3 always created) */
-	sizeof(struct aml_resource_dma),	/* ACPI_RESOURCE_TYPE_DMA */
-	sizeof(struct aml_resource_start_dependent),	/* ACPI_RESOURCE_TYPE_START_DEPENDENT (optional Byte 1 always created) */
-	sizeof(struct aml_resource_end_dependent),	/* ACPI_RESOURCE_TYPE_END_DEPENDENT */
-	sizeof(struct aml_resource_io),	/* ACPI_RESOURCE_TYPE_IO */
-	sizeof(struct aml_resource_fixed_io),	/* ACPI_RESOURCE_TYPE_FIXED_IO */
-	sizeof(struct aml_resource_vendor_small),	/* ACPI_RESOURCE_TYPE_VENDOR */
-	sizeof(struct aml_resource_end_tag),	/* ACPI_RESOURCE_TYPE_END_TAG */
-	sizeof(struct aml_resource_memory24),	/* ACPI_RESOURCE_TYPE_MEMORY24 */
-	sizeof(struct aml_resource_memory32),	/* ACPI_RESOURCE_TYPE_MEMORY32 */
-	sizeof(struct aml_resource_fixed_memory32),	/* ACPI_RESOURCE_TYPE_FIXED_MEMORY32 */
-	sizeof(struct aml_resource_address16),	/* ACPI_RESOURCE_TYPE_ADDRESS16 */
-	sizeof(struct aml_resource_address32),	/* ACPI_RESOURCE_TYPE_ADDRESS32 */
-	sizeof(struct aml_resource_address64),	/* ACPI_RESOURCE_TYPE_ADDRESS64 */
-	sizeof(struct aml_resource_extended_address64),	/*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
-	sizeof(struct aml_resource_extended_irq),	/* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
-	sizeof(struct aml_resource_generic_register)	/* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
-};
-
-const u8 acpi_gbl_resource_struct_sizes[] = {
-	/* Small descriptors */
-
-	0,
-	0,
-	0,
-	0,
-	ACPI_RS_SIZE(struct acpi_resource_irq),
-	ACPI_RS_SIZE(struct acpi_resource_dma),
-	ACPI_RS_SIZE(struct acpi_resource_start_dependent),
-	ACPI_RS_SIZE_MIN,
-	ACPI_RS_SIZE(struct acpi_resource_io),
-	ACPI_RS_SIZE(struct acpi_resource_fixed_io),
-	0,
-	0,
-	0,
-	0,
-	ACPI_RS_SIZE(struct acpi_resource_vendor),
-	ACPI_RS_SIZE_MIN,
-
-	/* Large descriptors */
-
-	0,
-	ACPI_RS_SIZE(struct acpi_resource_memory24),
-	ACPI_RS_SIZE(struct acpi_resource_generic_register),
-	0,
-	ACPI_RS_SIZE(struct acpi_resource_vendor),
-	ACPI_RS_SIZE(struct acpi_resource_memory32),
-	ACPI_RS_SIZE(struct acpi_resource_fixed_memory32),
-	ACPI_RS_SIZE(struct acpi_resource_address32),
-	ACPI_RS_SIZE(struct acpi_resource_address16),
-	ACPI_RS_SIZE(struct acpi_resource_extended_irq),
-	ACPI_RS_SIZE(struct acpi_resource_address64),
-	ACPI_RS_SIZE(struct acpi_resource_extended_address64)
-};
diff --git a/drivers/acpi/resources/rsio.c b/drivers/acpi/resources/rsio.c
deleted file mode 100644
index b66d42e..0000000
--- a/drivers/acpi/resources/rsio.c
+++ /dev/null
@@ -1,289 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rsio - IO and DMA resource descriptors
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acresrc.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rsio")
-
-/*******************************************************************************
- *
- * acpi_rs_convert_io
- *
- ******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_io[5] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IO,
-	 ACPI_RS_SIZE(struct acpi_resource_io),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_io)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IO,
-	 sizeof(struct aml_resource_io),
-	 0},
-
-	/* Decode flag */
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.io.io_decode),
-	 AML_OFFSET(io.flags),
-	 0},
-	/*
-	 * These fields are contiguous in both the source and destination:
-	 * Address Alignment
-	 * Length
-	 * Minimum Base Address
-	 * Maximum Base Address
-	 */
-	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.io.alignment),
-	 AML_OFFSET(io.alignment),
-	 2},
-
-	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.io.minimum),
-	 AML_OFFSET(io.minimum),
-	 2}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_fixed_io
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_fixed_io[4] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_IO,
-	 ACPI_RS_SIZE(struct acpi_resource_fixed_io),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_io)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_IO,
-	 sizeof(struct aml_resource_fixed_io),
-	 0},
-	/*
-	 * These fields are contiguous in both the source and destination:
-	 * Base Address
-	 * Length
-	 */
-	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_io.address_length),
-	 AML_OFFSET(fixed_io.address_length),
-	 1},
-
-	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_io.address),
-	 AML_OFFSET(fixed_io.address),
-	 1}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_generic_reg
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_generic_reg[4] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GENERIC_REGISTER,
-	 ACPI_RS_SIZE(struct acpi_resource_generic_register),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_generic_reg)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GENERIC_REGISTER,
-	 sizeof(struct aml_resource_generic_register),
-	 0},
-	/*
-	 * These fields are contiguous in both the source and destination:
-	 * Address Space ID
-	 * Register Bit Width
-	 * Register Bit Offset
-	 * Access Size
-	 */
-	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.generic_reg.space_id),
-	 AML_OFFSET(generic_reg.address_space_id),
-	 4},
-
-	/* Get the Register Address */
-
-	{ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.generic_reg.address),
-	 AML_OFFSET(generic_reg.address),
-	 1}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_end_dpf
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_end_dpf[2] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_END_DEPENDENT,
-	 ACPI_RS_SIZE_MIN,
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_end_dpf)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_END_DEPENDENT,
-	 sizeof(struct aml_resource_end_dependent),
-	 0}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_end_tag
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_end_tag[2] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_END_TAG,
-	 ACPI_RS_SIZE_MIN,
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_end_tag)},
-
-	/*
-	 * Note: The checksum field is set to zero, meaning that the resource
-	 * data is treated as if the checksum operation succeeded.
-	 * (ACPI Spec 1.0b Section 6.4.2.8)
-	 */
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_END_TAG,
-	 sizeof(struct aml_resource_end_tag),
-	 0}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_get_start_dpf
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_get_start_dpf[6] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_START_DEPENDENT,
-	 ACPI_RS_SIZE(struct acpi_resource_start_dependent),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_get_start_dpf)},
-
-	/* Defaults for Compatibility and Performance priorities */
-
-	{ACPI_RSC_SET8, ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
-	 ACPI_ACCEPTABLE_CONFIGURATION,
-	 2},
-
-	/* Get the descriptor length (0 or 1 for Start Dpf descriptor) */
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
-	 AML_OFFSET(start_dpf.descriptor_type),
-	 0},
-
-	/* All done if there is no flag byte present in the descriptor */
-
-	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 1},
-
-	/* Flag byte is present, get the flags */
-
-	{ACPI_RSC_2BITFLAG,
-	 ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
-	 AML_OFFSET(start_dpf.flags),
-	 0},
-
-	{ACPI_RSC_2BITFLAG,
-	 ACPI_RS_OFFSET(data.start_dpf.performance_robustness),
-	 AML_OFFSET(start_dpf.flags),
-	 2}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_set_start_dpf
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_set_start_dpf[10] = {
-	/* Start with a default descriptor of length 1 */
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_START_DEPENDENT,
-	 sizeof(struct aml_resource_start_dependent),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_set_start_dpf)},
-
-	/* Set the default flag values */
-
-	{ACPI_RSC_2BITFLAG,
-	 ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
-	 AML_OFFSET(start_dpf.flags),
-	 0},
-
-	{ACPI_RSC_2BITFLAG,
-	 ACPI_RS_OFFSET(data.start_dpf.performance_robustness),
-	 AML_OFFSET(start_dpf.flags),
-	 2},
-	/*
-	 * All done if the output descriptor length is required to be 1
-	 * (i.e., optimization to 0 bytes cannot be attempted)
-	 */
-	{ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
-	 ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
-	 1},
-
-	/* Set length to 0 bytes (no flags byte) */
-
-	{ACPI_RSC_LENGTH, 0, 0,
-	 sizeof(struct aml_resource_start_dependent_noprio)},
-
-	/*
-	 * All done if the output descriptor length is required to be 0.
-	 *
-	 * TBD: Perhaps we should check for error if input flags are not
-	 * compatible with a 0-byte descriptor.
-	 */
-	{ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
-	 ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
-	 0},
-
-	/* Reset length to 1 byte (descriptor with flags byte) */
-
-	{ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_start_dependent)},
-
-	/*
-	 * All done if flags byte is necessary -- if either priority value
-	 * is not ACPI_ACCEPTABLE_CONFIGURATION
-	 */
-	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
-	 ACPI_RS_OFFSET(data.start_dpf.compatibility_priority),
-	 ACPI_ACCEPTABLE_CONFIGURATION},
-
-	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
-	 ACPI_RS_OFFSET(data.start_dpf.performance_robustness),
-	 ACPI_ACCEPTABLE_CONFIGURATION},
-
-	/* Flag byte is not necessary */
-
-	{ACPI_RSC_LENGTH, 0, 0,
-	 sizeof(struct aml_resource_start_dependent_noprio)}
-};
diff --git a/drivers/acpi/resources/rsirq.c b/drivers/acpi/resources/rsirq.c
deleted file mode 100644
index a8805ef..0000000
--- a/drivers/acpi/resources/rsirq.c
+++ /dev/null
@@ -1,265 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rsirq - IRQ resource descriptors
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acresrc.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rsirq")
-
-/*******************************************************************************
- *
- * acpi_rs_get_irq
- *
- ******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IRQ,
-	 ACPI_RS_SIZE(struct acpi_resource_irq),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_get_irq)},
-
-	/* Get the IRQ mask (bytes 1:2) */
-
-	{ACPI_RSC_BITMASK16, ACPI_RS_OFFSET(data.irq.interrupts[0]),
-	 AML_OFFSET(irq.irq_mask),
-	 ACPI_RS_OFFSET(data.irq.interrupt_count)},
-
-	/* Set default flags (others are zero) */
-
-	{ACPI_RSC_SET8, ACPI_RS_OFFSET(data.irq.triggering),
-	 ACPI_EDGE_SENSITIVE,
-	 1},
-
-	/* Get the descriptor length (2 or 3 for IRQ descriptor) */
-
-	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.irq.descriptor_length),
-	 AML_OFFSET(irq.descriptor_type),
-	 0},
-
-	/* All done if no flag byte present in descriptor */
-
-	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 3},
-
-	/* Get flags: Triggering[0], Polarity[3], Sharing[4] */
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
-	 AML_OFFSET(irq.flags),
-	 0},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.polarity),
-	 AML_OFFSET(irq.flags),
-	 3},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
-	 AML_OFFSET(irq.flags),
-	 4}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_set_irq
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
-	/* Start with a default descriptor of length 3 */
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IRQ,
-	 sizeof(struct aml_resource_irq),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_set_irq)},
-
-	/* Convert interrupt list to 16-bit IRQ bitmask */
-
-	{ACPI_RSC_BITMASK16, ACPI_RS_OFFSET(data.irq.interrupts[0]),
-	 AML_OFFSET(irq.irq_mask),
-	 ACPI_RS_OFFSET(data.irq.interrupt_count)},
-
-	/* Set the flags byte */
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
-	 AML_OFFSET(irq.flags),
-	 0},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.polarity),
-	 AML_OFFSET(irq.flags),
-	 3},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
-	 AML_OFFSET(irq.flags),
-	 4},
-
-	/*
-	 * All done if the output descriptor length is required to be 3
-	 * (i.e., optimization to 2 bytes cannot be attempted)
-	 */
-	{ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
-	 ACPI_RS_OFFSET(data.irq.descriptor_length),
-	 3},
-
-	/* Set length to 2 bytes (no flags byte) */
-
-	{ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)},
-
-	/*
-	 * All done if the output descriptor length is required to be 2.
-	 *
-	 * TBD: Perhaps we should check for error if input flags are not
-	 * compatible with a 2-byte descriptor.
-	 */
-	{ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
-	 ACPI_RS_OFFSET(data.irq.descriptor_length),
-	 2},
-
-	/* Reset length to 3 bytes (descriptor with flags byte) */
-
-	{ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq)},
-
-	/*
-	 * Check if the flags byte is necessary. Not needed if the flags are:
-	 * ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH, ACPI_EXCLUSIVE
-	 */
-	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
-	 ACPI_RS_OFFSET(data.irq.triggering),
-	 ACPI_EDGE_SENSITIVE},
-
-	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
-	 ACPI_RS_OFFSET(data.irq.polarity),
-	 ACPI_ACTIVE_HIGH},
-
-	{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE,
-	 ACPI_RS_OFFSET(data.irq.sharable),
-	 ACPI_EXCLUSIVE},
-
-	/* We can optimize to a 2-byte irq_no_flags() descriptor */
-
-	{ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_ext_irq
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_EXTENDED_IRQ,
-	 ACPI_RS_SIZE(struct acpi_resource_extended_irq),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_ext_irq)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_EXTENDED_IRQ,
-	 sizeof(struct aml_resource_extended_irq),
-	 0},
-
-	/* Flag bits */
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.producer_consumer),
-	 AML_OFFSET(extended_irq.flags),
-	 0},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.triggering),
-	 AML_OFFSET(extended_irq.flags),
-	 1},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.polarity),
-	 AML_OFFSET(extended_irq.flags),
-	 2},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.sharable),
-	 AML_OFFSET(extended_irq.flags),
-	 3},
-
-	/* IRQ Table length (Byte4) */
-
-	{ACPI_RSC_COUNT, ACPI_RS_OFFSET(data.extended_irq.interrupt_count),
-	 AML_OFFSET(extended_irq.interrupt_count),
-	 sizeof(u32)}
-	,
-
-	/* Copy every IRQ in the table, each is 32 bits */
-
-	{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.extended_irq.interrupts[0]),
-	 AML_OFFSET(extended_irq.interrupts[0]),
-	 0}
-	,
-
-	/* Optional resource_source (Index and String) */
-
-	{ACPI_RSC_SOURCEX, ACPI_RS_OFFSET(data.extended_irq.resource_source),
-	 ACPI_RS_OFFSET(data.extended_irq.interrupts[0]),
-	 sizeof(struct aml_resource_extended_irq)}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_dma
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_dma[6] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_DMA,
-	 ACPI_RS_SIZE(struct acpi_resource_dma),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_dma)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_DMA,
-	 sizeof(struct aml_resource_dma),
-	 0},
-
-	/* Flags: transfer preference, bus mastering, channel speed */
-
-	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.dma.transfer),
-	 AML_OFFSET(dma.flags),
-	 0},
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.dma.bus_master),
-	 AML_OFFSET(dma.flags),
-	 2},
-
-	{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.dma.type),
-	 AML_OFFSET(dma.flags),
-	 5},
-
-	/* DMA channel mask bits */
-
-	{ACPI_RSC_BITMASK, ACPI_RS_OFFSET(data.dma.channels[0]),
-	 AML_OFFSET(dma.dma_channel_mask),
-	 ACPI_RS_OFFSET(data.dma.channel_count)}
-};
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
deleted file mode 100644
index b78c7e7..0000000
--- a/drivers/acpi/resources/rslist.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rslist - Linked list utilities
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acresrc.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rslist")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_convert_aml_to_resources
- *
- * PARAMETERS:  acpi_walk_aml_callback
- *              resource_ptr            - Pointer to the buffer that will
- *                                        contain the output structures
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Convert an AML resource to an internal representation of the
- *              resource that is aligned and easier to access.
- *
- ******************************************************************************/
-acpi_status
-acpi_rs_convert_aml_to_resources(u8 * aml,
-				 u32 length,
-				 u32 offset, u8 resource_index, void **context)
-{
-	struct acpi_resource **resource_ptr =
-	    ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
-	struct acpi_resource *resource;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
-
-	/*
-	 * Check that the input buffer and all subsequent pointers into it
-	 * are aligned on a native word boundary. Most important on IA64
-	 */
-	resource = *resource_ptr;
-	if (ACPI_IS_MISALIGNED(resource)) {
-		ACPI_WARNING((AE_INFO,
-			      "Misaligned resource pointer %p", resource));
-	}
-
-	/* Convert the AML byte stream resource to a local resource struct */
-
-	status =
-	    acpi_rs_convert_aml_to_resource(resource,
-					    ACPI_CAST_PTR(union aml_resource,
-							  aml),
-					    acpi_gbl_get_resource_dispatch
-					    [resource_index]);
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Could not convert AML resource (Type %X)",
-				*aml));
-		return_ACPI_STATUS(status);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
-			  "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
-			  acpi_ut_get_resource_type(aml), length,
-			  resource->length));
-
-	/* Point to the next structure in the output buffer */
-
-	*resource_ptr = ACPI_ADD_PTR(void, resource, resource->length);
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_convert_resources_to_aml
- *
- * PARAMETERS:  Resource            - Pointer to the resource linked list
- *              aml_size_needed     - Calculated size of the byte stream
- *                                    needed from calling acpi_rs_get_aml_length()
- *                                    The size of the output_buffer is
- *                                    guaranteed to be >= aml_size_needed
- *              output_buffer       - Pointer to the buffer that will
- *                                    contain the byte stream
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Takes the resource linked list and parses it, creating a
- *              byte stream of resources in the caller's output buffer
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
-				 acpi_size aml_size_needed, u8 * output_buffer)
-{
-	u8 *aml = output_buffer;
-	u8 *end_aml = output_buffer + aml_size_needed;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
-
-	/* Walk the resource descriptor list, convert each descriptor */
-
-	while (aml < end_aml) {
-
-		/* Validate the (internal) Resource Type */
-
-		if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
-			ACPI_ERROR((AE_INFO,
-				    "Invalid descriptor type (%X) in resource list",
-				    resource->type));
-			return_ACPI_STATUS(AE_BAD_DATA);
-		}
-
-		/* Perform the conversion */
-
-		status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
-										 aml_resource,
-										 aml),
-							 acpi_gbl_set_resource_dispatch
-							 [resource->type]);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Could not convert resource (type %X) to AML",
-					resource->type));
-			return_ACPI_STATUS(status);
-		}
-
-		/* Perform final sanity check on the new AML resource descriptor */
-
-		status =
-		    acpi_ut_validate_resource(ACPI_CAST_PTR
-					      (union aml_resource, aml), NULL);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* Check for end-of-list, normal exit */
-
-		if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
-
-			/* An End Tag indicates the end of the input Resource Template */
-
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		/*
-		 * Extract the total length of the new descriptor and set the
-		 * Aml to point to the next (output) resource descriptor
-		 */
-		aml += acpi_ut_get_descriptor_length(aml);
-
-		/* Point to the next input resource descriptor */
-
-		resource =
-		    ACPI_ADD_PTR(struct acpi_resource, resource,
-				 resource->length);
-	}
-
-	/* Completed buffer, but did not find an end_tag resource descriptor */
-
-	return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
-}
diff --git a/drivers/acpi/resources/rsmemory.c b/drivers/acpi/resources/rsmemory.c
deleted file mode 100644
index 63b21ab..0000000
--- a/drivers/acpi/resources/rsmemory.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rsmem24 - Memory resource descriptors
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acresrc.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rsmemory")
-
-/*******************************************************************************
- *
- * acpi_rs_convert_memory24
- *
- ******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_memory24[4] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY24,
-	 ACPI_RS_SIZE(struct acpi_resource_memory24),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory24)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY24,
-	 sizeof(struct aml_resource_memory24),
-	 0},
-
-	/* Read/Write bit */
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory24.write_protect),
-	 AML_OFFSET(memory24.flags),
-	 0},
-	/*
-	 * These fields are contiguous in both the source and destination:
-	 * Minimum Base Address
-	 * Maximum Base Address
-	 * Address Base Alignment
-	 * Range Length
-	 */
-	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.memory24.minimum),
-	 AML_OFFSET(memory24.minimum),
-	 4}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_memory32
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_memory32[4] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY32,
-	 ACPI_RS_SIZE(struct acpi_resource_memory32),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory32)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY32,
-	 sizeof(struct aml_resource_memory32),
-	 0},
-
-	/* Read/Write bit */
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory32.write_protect),
-	 AML_OFFSET(memory32.flags),
-	 0},
-	/*
-	 * These fields are contiguous in both the source and destination:
-	 * Minimum Base Address
-	 * Maximum Base Address
-	 * Address Base Alignment
-	 * Range Length
-	 */
-	{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.memory32.minimum),
-	 AML_OFFSET(memory32.minimum),
-	 4}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_convert_fixed_memory32
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_convert_fixed_memory32[4] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_MEMORY32,
-	 ACPI_RS_SIZE(struct acpi_resource_fixed_memory32),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_memory32)},
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_MEMORY32,
-	 sizeof(struct aml_resource_fixed_memory32),
-	 0},
-
-	/* Read/Write bit */
-
-	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.fixed_memory32.write_protect),
-	 AML_OFFSET(fixed_memory32.flags),
-	 0},
-	/*
-	 * These fields are contiguous in both the source and destination:
-	 * Base Address
-	 * Range Length
-	 */
-	{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.fixed_memory32.address),
-	 AML_OFFSET(fixed_memory32.address),
-	 2}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_get_vendor_small
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_get_vendor_small[3] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR,
-	 ACPI_RS_SIZE(struct acpi_resource_vendor),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_small)},
-
-	/* Length of the vendor data (byte count) */
-
-	{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
-	 0,
-	 sizeof(u8)}
-	,
-
-	/* Vendor data */
-
-	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
-	 sizeof(struct aml_resource_small_header),
-	 0}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_get_vendor_large
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_get_vendor_large[3] = {
-	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR,
-	 ACPI_RS_SIZE(struct acpi_resource_vendor),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_large)},
-
-	/* Length of the vendor data (byte count) */
-
-	{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
-	 0,
-	 sizeof(u8)}
-	,
-
-	/* Vendor data */
-
-	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
-	 sizeof(struct aml_resource_large_header),
-	 0}
-};
-
-/*******************************************************************************
- *
- * acpi_rs_set_vendor
- *
- ******************************************************************************/
-
-struct acpi_rsconvert_info acpi_rs_set_vendor[7] = {
-	/* Default is a small vendor descriptor */
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_SMALL,
-	 sizeof(struct aml_resource_small_header),
-	 ACPI_RSC_TABLE_SIZE(acpi_rs_set_vendor)},
-
-	/* Get the length and copy the data */
-
-	{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
-	 0,
-	 0},
-
-	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
-	 sizeof(struct aml_resource_small_header),
-	 0},
-
-	/*
-	 * All done if the Vendor byte length is 7 or less, meaning that it will
-	 * fit within a small descriptor
-	 */
-	{ACPI_RSC_EXIT_LE, 0, 0, 7},
-
-	/* Must create a large vendor descriptor */
-
-	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_LARGE,
-	 sizeof(struct aml_resource_large_header),
-	 0},
-
-	{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
-	 0,
-	 0},
-
-	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]),
-	 sizeof(struct aml_resource_large_header),
-	 0}
-};
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
deleted file mode 100644
index 96a6c03..0000000
--- a/drivers/acpi/resources/rsmisc.c
+++ /dev/null
@@ -1,560 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rsmisc - Miscellaneous resource descriptors
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acresrc.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rsmisc")
-#define INIT_RESOURCE_TYPE(i)       i->resource_offset
-#define INIT_RESOURCE_LENGTH(i)     i->aml_offset
-#define INIT_TABLE_LENGTH(i)        i->value
-#define COMPARE_OPCODE(i)           i->resource_offset
-#define COMPARE_TARGET(i)           i->aml_offset
-#define COMPARE_VALUE(i)            i->value
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_convert_aml_to_resource
- *
- * PARAMETERS:  Resource            - Pointer to the resource descriptor
- *              Aml                 - Where the AML descriptor is returned
- *              Info                - Pointer to appropriate conversion table
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Convert an external AML resource descriptor to the corresponding
- *              internal resource descriptor
- *
- ******************************************************************************/
-acpi_status
-acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
-				union aml_resource *aml,
-				struct acpi_rsconvert_info *info)
-{
-	acpi_rs_length aml_resource_length;
-	void *source;
-	void *destination;
-	char *target;
-	u8 count;
-	u8 flags_mode = FALSE;
-	u16 item_count = 0;
-	u16 temp16 = 0;
-
-	ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
-
-	if (((acpi_size) resource) & 0x3) {
-
-		/* Each internal resource struct is expected to be 32-bit aligned */
-
-		ACPI_WARNING((AE_INFO,
-			      "Misaligned resource pointer (get): %p Type %2.2X Len %X",
-			      resource, resource->type, resource->length));
-	}
-
-	/* Extract the resource Length field (does not include header length) */
-
-	aml_resource_length = acpi_ut_get_resource_length(aml);
-
-	/*
-	 * First table entry must be ACPI_RSC_INITxxx and must contain the
-	 * table length (# of table entries)
-	 */
-	count = INIT_TABLE_LENGTH(info);
-
-	while (count) {
-		/*
-		 * Source is the external AML byte stream buffer,
-		 * destination is the internal resource descriptor
-		 */
-		source = ACPI_ADD_PTR(void, aml, info->aml_offset);
-		destination =
-		    ACPI_ADD_PTR(void, resource, info->resource_offset);
-
-		switch (info->opcode) {
-		case ACPI_RSC_INITGET:
-			/*
-			 * Get the resource type and the initial (minimum) length
-			 */
-			ACPI_MEMSET(resource, 0, INIT_RESOURCE_LENGTH(info));
-			resource->type = INIT_RESOURCE_TYPE(info);
-			resource->length = INIT_RESOURCE_LENGTH(info);
-			break;
-
-		case ACPI_RSC_INITSET:
-			break;
-
-		case ACPI_RSC_FLAGINIT:
-
-			flags_mode = TRUE;
-			break;
-
-		case ACPI_RSC_1BITFLAG:
-			/*
-			 * Mask and shift the flag bit
-			 */
-			ACPI_SET8(destination) = (u8)
-			    ((ACPI_GET8(source) >> info->value) & 0x01);
-			break;
-
-		case ACPI_RSC_2BITFLAG:
-			/*
-			 * Mask and shift the flag bits
-			 */
-			ACPI_SET8(destination) = (u8)
-			    ((ACPI_GET8(source) >> info->value) & 0x03);
-			break;
-
-		case ACPI_RSC_COUNT:
-
-			item_count = ACPI_GET8(source);
-			ACPI_SET8(destination) = (u8) item_count;
-
-			resource->length = resource->length +
-			    (info->value * (item_count - 1));
-			break;
-
-		case ACPI_RSC_COUNT16:
-
-			item_count = aml_resource_length;
-			ACPI_SET16(destination) = item_count;
-
-			resource->length = resource->length +
-			    (info->value * (item_count - 1));
-			break;
-
-		case ACPI_RSC_LENGTH:
-
-			resource->length = resource->length + info->value;
-			break;
-
-		case ACPI_RSC_MOVE8:
-		case ACPI_RSC_MOVE16:
-		case ACPI_RSC_MOVE32:
-		case ACPI_RSC_MOVE64:
-			/*
-			 * Raw data move. Use the Info value field unless item_count has
-			 * been previously initialized via a COUNT opcode
-			 */
-			if (info->value) {
-				item_count = info->value;
-			}
-			acpi_rs_move_data(destination, source, item_count,
-					  info->opcode);
-			break;
-
-		case ACPI_RSC_SET8:
-
-			ACPI_MEMSET(destination, info->aml_offset, info->value);
-			break;
-
-		case ACPI_RSC_DATA8:
-
-			target = ACPI_ADD_PTR(char, resource, info->value);
-			ACPI_MEMCPY(destination, source, ACPI_GET16(target));
-			break;
-
-		case ACPI_RSC_ADDRESS:
-			/*
-			 * Common handler for address descriptor flags
-			 */
-			if (!acpi_rs_get_address_common(resource, aml)) {
-				return_ACPI_STATUS
-				    (AE_AML_INVALID_RESOURCE_TYPE);
-			}
-			break;
-
-		case ACPI_RSC_SOURCE:
-			/*
-			 * Optional resource_source (Index and String)
-			 */
-			resource->length +=
-			    acpi_rs_get_resource_source(aml_resource_length,
-							info->value,
-							destination, aml, NULL);
-			break;
-
-		case ACPI_RSC_SOURCEX:
-			/*
-			 * Optional resource_source (Index and String). This is the more
-			 * complicated case used by the Interrupt() macro
-			 */
-			target =
-			    ACPI_ADD_PTR(char, resource,
-					 info->aml_offset + (item_count * 4));
-
-			resource->length +=
-			    acpi_rs_get_resource_source(aml_resource_length,
-							(acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target);
-			break;
-
-		case ACPI_RSC_BITMASK:
-			/*
-			 * 8-bit encoded bitmask (DMA macro)
-			 */
-			item_count =
-			    acpi_rs_decode_bitmask(ACPI_GET8(source),
-						   destination);
-			if (item_count) {
-				resource->length += (item_count - 1);
-			}
-
-			target = ACPI_ADD_PTR(char, resource, info->value);
-			ACPI_SET8(target) = (u8) item_count;
-			break;
-
-		case ACPI_RSC_BITMASK16:
-			/*
-			 * 16-bit encoded bitmask (IRQ macro)
-			 */
-			ACPI_MOVE_16_TO_16(&temp16, source);
-
-			item_count =
-			    acpi_rs_decode_bitmask(temp16, destination);
-			if (item_count) {
-				resource->length += (item_count - 1);
-			}
-
-			target = ACPI_ADD_PTR(char, resource, info->value);
-			ACPI_SET8(target) = (u8) item_count;
-			break;
-
-		case ACPI_RSC_EXIT_NE:
-			/*
-			 * Control - Exit conversion if not equal
-			 */
-			switch (info->resource_offset) {
-			case ACPI_RSC_COMPARE_AML_LENGTH:
-				if (aml_resource_length != info->value) {
-					goto exit;
-				}
-				break;
-
-			case ACPI_RSC_COMPARE_VALUE:
-				if (ACPI_GET8(source) != info->value) {
-					goto exit;
-				}
-				break;
-
-			default:
-
-				ACPI_ERROR((AE_INFO,
-					    "Invalid conversion sub-opcode"));
-				return_ACPI_STATUS(AE_BAD_PARAMETER);
-			}
-			break;
-
-		default:
-
-			ACPI_ERROR((AE_INFO, "Invalid conversion opcode"));
-			return_ACPI_STATUS(AE_BAD_PARAMETER);
-		}
-
-		count--;
-		info++;
-	}
-
-      exit:
-	if (!flags_mode) {
-
-		/* Round the resource struct length up to the next boundary (32 or 64) */
-
-		resource->length =
-		    (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(resource->length);
-	}
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_convert_resource_to_aml
- *
- * PARAMETERS:  Resource            - Pointer to the resource descriptor
- *              Aml                 - Where the AML descriptor is returned
- *              Info                - Pointer to appropriate conversion table
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Convert an internal resource descriptor to the corresponding
- *              external AML resource descriptor.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
-				union aml_resource *aml,
-				struct acpi_rsconvert_info *info)
-{
-	void *source = NULL;
-	void *destination;
-	acpi_rsdesc_size aml_length = 0;
-	u8 count;
-	u16 temp16 = 0;
-	u16 item_count = 0;
-
-	ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
-
-	/*
-	 * First table entry must be ACPI_RSC_INITxxx and must contain the
-	 * table length (# of table entries)
-	 */
-	count = INIT_TABLE_LENGTH(info);
-
-	while (count) {
-		/*
-		 * Source is the internal resource descriptor,
-		 * destination is the external AML byte stream buffer
-		 */
-		source = ACPI_ADD_PTR(void, resource, info->resource_offset);
-		destination = ACPI_ADD_PTR(void, aml, info->aml_offset);
-
-		switch (info->opcode) {
-		case ACPI_RSC_INITSET:
-
-			ACPI_MEMSET(aml, 0, INIT_RESOURCE_LENGTH(info));
-			aml_length = INIT_RESOURCE_LENGTH(info);
-			acpi_rs_set_resource_header(INIT_RESOURCE_TYPE(info),
-						    aml_length, aml);
-			break;
-
-		case ACPI_RSC_INITGET:
-			break;
-
-		case ACPI_RSC_FLAGINIT:
-			/*
-			 * Clear the flag byte
-			 */
-			ACPI_SET8(destination) = 0;
-			break;
-
-		case ACPI_RSC_1BITFLAG:
-			/*
-			 * Mask and shift the flag bit
-			 */
-			ACPI_SET8(destination) |= (u8)
-			    ((ACPI_GET8(source) & 0x01) << info->value);
-			break;
-
-		case ACPI_RSC_2BITFLAG:
-			/*
-			 * Mask and shift the flag bits
-			 */
-			ACPI_SET8(destination) |= (u8)
-			    ((ACPI_GET8(source) & 0x03) << info->value);
-			break;
-
-		case ACPI_RSC_COUNT:
-
-			item_count = ACPI_GET8(source);
-			ACPI_SET8(destination) = (u8) item_count;
-
-			aml_length =
-			    (u16) (aml_length +
-				   (info->value * (item_count - 1)));
-			break;
-
-		case ACPI_RSC_COUNT16:
-
-			item_count = ACPI_GET16(source);
-			aml_length = (u16) (aml_length + item_count);
-			acpi_rs_set_resource_length(aml_length, aml);
-			break;
-
-		case ACPI_RSC_LENGTH:
-
-			acpi_rs_set_resource_length(info->value, aml);
-			break;
-
-		case ACPI_RSC_MOVE8:
-		case ACPI_RSC_MOVE16:
-		case ACPI_RSC_MOVE32:
-		case ACPI_RSC_MOVE64:
-
-			if (info->value) {
-				item_count = info->value;
-			}
-			acpi_rs_move_data(destination, source, item_count,
-					  info->opcode);
-			break;
-
-		case ACPI_RSC_ADDRESS:
-
-			/* Set the Resource Type, General Flags, and Type-Specific Flags */
-
-			acpi_rs_set_address_common(aml, resource);
-			break;
-
-		case ACPI_RSC_SOURCEX:
-			/*
-			 * Optional resource_source (Index and String)
-			 */
-			aml_length =
-			    acpi_rs_set_resource_source(aml, (acpi_rs_length)
-							aml_length, source);
-			acpi_rs_set_resource_length(aml_length, aml);
-			break;
-
-		case ACPI_RSC_SOURCE:
-			/*
-			 * Optional resource_source (Index and String). This is the more
-			 * complicated case used by the Interrupt() macro
-			 */
-			aml_length =
-			    acpi_rs_set_resource_source(aml, info->value,
-							source);
-			acpi_rs_set_resource_length(aml_length, aml);
-			break;
-
-		case ACPI_RSC_BITMASK:
-			/*
-			 * 8-bit encoded bitmask (DMA macro)
-			 */
-			ACPI_SET8(destination) = (u8)
-			    acpi_rs_encode_bitmask(source,
-						   *ACPI_ADD_PTR(u8, resource,
-								 info->value));
-			break;
-
-		case ACPI_RSC_BITMASK16:
-			/*
-			 * 16-bit encoded bitmask (IRQ macro)
-			 */
-			temp16 = acpi_rs_encode_bitmask(source,
-							*ACPI_ADD_PTR(u8,
-								      resource,
-								      info->
-								      value));
-			ACPI_MOVE_16_TO_16(destination, &temp16);
-			break;
-
-		case ACPI_RSC_EXIT_LE:
-			/*
-			 * Control - Exit conversion if less than or equal
-			 */
-			if (item_count <= info->value) {
-				goto exit;
-			}
-			break;
-
-		case ACPI_RSC_EXIT_NE:
-			/*
-			 * Control - Exit conversion if not equal
-			 */
-			switch (COMPARE_OPCODE(info)) {
-			case ACPI_RSC_COMPARE_VALUE:
-
-				if (*ACPI_ADD_PTR(u8, resource,
-						  COMPARE_TARGET(info)) !=
-				    COMPARE_VALUE(info)) {
-					goto exit;
-				}
-				break;
-
-			default:
-
-				ACPI_ERROR((AE_INFO,
-					    "Invalid conversion sub-opcode"));
-				return_ACPI_STATUS(AE_BAD_PARAMETER);
-			}
-			break;
-
-		case ACPI_RSC_EXIT_EQ:
-			/*
-			 * Control - Exit conversion if equal
-			 */
-			if (*ACPI_ADD_PTR(u8, resource,
-					  COMPARE_TARGET(info)) ==
-			    COMPARE_VALUE(info)) {
-				goto exit;
-			}
-			break;
-
-		default:
-
-			ACPI_ERROR((AE_INFO, "Invalid conversion opcode"));
-			return_ACPI_STATUS(AE_BAD_PARAMETER);
-		}
-
-		count--;
-		info++;
-	}
-
-      exit:
-	return_ACPI_STATUS(AE_OK);
-}
-
-#if 0
-/* Previous resource validations */
-
-if (aml->ext_address64.revision_iD != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) {
-	return_ACPI_STATUS(AE_SUPPORT);
-}
-
-if (resource->data.start_dpf.performance_robustness >= 3) {
-	return_ACPI_STATUS(AE_AML_BAD_RESOURCE_VALUE);
-}
-
-if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
-	/*
-	 * Only [active_high, edge_sensitive] or [active_low, level_sensitive]
-	 * polarity/trigger interrupts are allowed (ACPI spec, section
-	 * "IRQ Format"), so 0x00 and 0x09 are illegal.
-	 */
-	ACPI_ERROR((AE_INFO,
-		    "Invalid interrupt polarity/trigger in resource list, %X",
-		    aml->irq.flags));
-	return_ACPI_STATUS(AE_BAD_DATA);
-}
-
-resource->data.extended_irq.interrupt_count = temp8;
-if (temp8 < 1) {
-
-	/* Must have at least one IRQ */
-
-	return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
-}
-
-if (resource->data.dma.transfer == 0x03) {
-	ACPI_ERROR((AE_INFO, "Invalid DMA.Transfer preference (3)"));
-	return_ACPI_STATUS(AE_BAD_DATA);
-}
-#endif
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
deleted file mode 100644
index f7b3bcd..0000000
--- a/drivers/acpi/resources/rsutils.c
+++ /dev/null
@@ -1,726 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rsutils - Utilities for the resource manager
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acresrc.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rsutils")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_decode_bitmask
- *
- * PARAMETERS:  Mask            - Bitmask to decode
- *              List            - Where the converted list is returned
- *
- * RETURN:      Count of bits set (length of list)
- *
- * DESCRIPTION: Convert a bit mask into a list of values
- *
- ******************************************************************************/
-u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
-{
-	u8 i;
-	u8 bit_count;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Decode the mask bits */
-
-	for (i = 0, bit_count = 0; mask; i++) {
-		if (mask & 0x0001) {
-			list[bit_count] = i;
-			bit_count++;
-		}
-
-		mask >>= 1;
-	}
-
-	return (bit_count);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_encode_bitmask
- *
- * PARAMETERS:  List            - List of values to encode
- *              Count           - Length of list
- *
- * RETURN:      Encoded bitmask
- *
- * DESCRIPTION: Convert a list of values to an encoded bitmask
- *
- ******************************************************************************/
-
-u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
-{
-	u32 i;
-	u16 mask;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Encode the list into a single bitmask */
-
-	for (i = 0, mask = 0; i < count; i++) {
-		mask |= (0x1 << list[i]);
-	}
-
-	return mask;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_move_data
- *
- * PARAMETERS:  Destination         - Pointer to the destination descriptor
- *              Source              - Pointer to the source descriptor
- *              item_count          - How many items to move
- *              move_type           - Byte width
- *
- * RETURN:      None
- *
- * DESCRIPTION: Move multiple data items from one descriptor to another. Handles
- *              alignment issues and endian issues if necessary, as configured
- *              via the ACPI_MOVE_* macros. (This is why a memcpy is not used)
- *
- ******************************************************************************/
-
-void
-acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
-{
-	u32 i;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* One move per item */
-
-	for (i = 0; i < item_count; i++) {
-		switch (move_type) {
-			/*
-			 * For the 8-bit case, we can perform the move all at once
-			 * since there are no alignment or endian issues
-			 */
-		case ACPI_RSC_MOVE8:
-			ACPI_MEMCPY(destination, source, item_count);
-			return;
-
-			/*
-			 * 16-, 32-, and 64-bit cases must use the move macros that perform
-			 * endian conversion and/or accomodate hardware that cannot perform
-			 * misaligned memory transfers
-			 */
-		case ACPI_RSC_MOVE16:
-			ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
-					   &ACPI_CAST_PTR(u16, source)[i]);
-			break;
-
-		case ACPI_RSC_MOVE32:
-			ACPI_MOVE_32_TO_32(&ACPI_CAST_PTR(u32, destination)[i],
-					   &ACPI_CAST_PTR(u32, source)[i]);
-			break;
-
-		case ACPI_RSC_MOVE64:
-			ACPI_MOVE_64_TO_64(&ACPI_CAST_PTR(u64, destination)[i],
-					   &ACPI_CAST_PTR(u64, source)[i]);
-			break;
-
-		default:
-			return;
-		}
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_set_resource_length
- *
- * PARAMETERS:  total_length        - Length of the AML descriptor, including
- *                                    the header and length fields.
- *              Aml                 - Pointer to the raw AML descriptor
- *
- * RETURN:      None
- *
- * DESCRIPTION: Set the resource_length field of an AML
- *              resource descriptor, both Large and Small descriptors are
- *              supported automatically. Note: Descriptor Type field must
- *              be valid.
- *
- ******************************************************************************/
-
-void
-acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
-			    union aml_resource *aml)
-{
-	acpi_rs_length resource_length;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Length is the total descriptor length minus the header length */
-
-	resource_length = (acpi_rs_length)
-	    (total_length - acpi_ut_get_resource_header_length(aml));
-
-	/* Length is stored differently for large and small descriptors */
-
-	if (aml->small_header.descriptor_type & ACPI_RESOURCE_NAME_LARGE) {
-
-		/* Large descriptor -- bytes 1-2 contain the 16-bit length */
-
-		ACPI_MOVE_16_TO_16(&aml->large_header.resource_length,
-				   &resource_length);
-	} else {
-		/* Small descriptor -- bits 2:0 of byte 0 contain the length */
-
-		aml->small_header.descriptor_type = (u8)
-
-		    /* Clear any existing length, preserving descriptor type bits */
-		    ((aml->small_header.
-		      descriptor_type & ~ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK)
-
-		     | resource_length);
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_set_resource_header
- *
- * PARAMETERS:  descriptor_type     - Byte to be inserted as the type
- *              total_length        - Length of the AML descriptor, including
- *                                    the header and length fields.
- *              Aml                 - Pointer to the raw AML descriptor
- *
- * RETURN:      None
- *
- * DESCRIPTION: Set the descriptor_type and resource_length fields of an AML
- *              resource descriptor, both Large and Small descriptors are
- *              supported automatically
- *
- ******************************************************************************/
-
-void
-acpi_rs_set_resource_header(u8 descriptor_type,
-			    acpi_rsdesc_size total_length,
-			    union aml_resource *aml)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	/* Set the Resource Type */
-
-	aml->small_header.descriptor_type = descriptor_type;
-
-	/* Set the Resource Length */
-
-	acpi_rs_set_resource_length(total_length, aml);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_strcpy
- *
- * PARAMETERS:  Destination         - Pointer to the destination string
- *              Source              - Pointer to the source string
- *
- * RETURN:      String length, including NULL terminator
- *
- * DESCRIPTION: Local string copy that returns the string length, saving a
- *              strcpy followed by a strlen.
- *
- ******************************************************************************/
-
-static u16 acpi_rs_strcpy(char *destination, char *source)
-{
-	u16 i;
-
-	ACPI_FUNCTION_ENTRY();
-
-	for (i = 0; source[i]; i++) {
-		destination[i] = source[i];
-	}
-
-	destination[i] = 0;
-
-	/* Return string length including the NULL terminator */
-
-	return ((u16) (i + 1));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_get_resource_source
- *
- * PARAMETERS:  resource_length     - Length field of the descriptor
- *              minimum_length      - Minimum length of the descriptor (minus
- *                                    any optional fields)
- *              resource_source     - Where the resource_source is returned
- *              Aml                 - Pointer to the raw AML descriptor
- *              string_ptr          - (optional) where to store the actual
- *                                    resource_source string
- *
- * RETURN:      Length of the string plus NULL terminator, rounded up to native
- *              word boundary
- *
- * DESCRIPTION: Copy the optional resource_source data from a raw AML descriptor
- *              to an internal resource descriptor
- *
- ******************************************************************************/
-
-acpi_rs_length
-acpi_rs_get_resource_source(acpi_rs_length resource_length,
-			    acpi_rs_length minimum_length,
-			    struct acpi_resource_source * resource_source,
-			    union aml_resource * aml, char *string_ptr)
-{
-	acpi_rsdesc_size total_length;
-	u8 *aml_resource_source;
-
-	ACPI_FUNCTION_ENTRY();
-
-	total_length =
-	    resource_length + sizeof(struct aml_resource_large_header);
-	aml_resource_source = ACPI_ADD_PTR(u8, aml, minimum_length);
-
-	/*
-	 * resource_source is present if the length of the descriptor is longer than
-	 * the minimum length.
-	 *
-	 * Note: Some resource descriptors will have an additional null, so
-	 * we add 1 to the minimum length.
-	 */
-	if (total_length > (acpi_rsdesc_size) (minimum_length + 1)) {
-
-		/* Get the resource_source_index */
-
-		resource_source->index = aml_resource_source[0];
-
-		resource_source->string_ptr = string_ptr;
-		if (!string_ptr) {
-			/*
-			 * String destination pointer is not specified; Set the String
-			 * pointer to the end of the current resource_source structure.
-			 */
-			resource_source->string_ptr =
-			    ACPI_ADD_PTR(char, resource_source,
-					 sizeof(struct acpi_resource_source));
-		}
-
-		/*
-		 * In order for the Resource length to be a multiple of the native
-		 * word, calculate the length of the string (+1 for NULL terminator)
-		 * and expand to the next word multiple.
-		 *
-		 * Zero the entire area of the buffer.
-		 */
-		total_length = (u32)
-		ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1;
-		total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
-
-		ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
-
-		/* Copy the resource_source string to the destination */
-
-		resource_source->string_length =
-		    acpi_rs_strcpy(resource_source->string_ptr,
-				   ACPI_CAST_PTR(char,
-						 &aml_resource_source[1]));
-
-		return ((acpi_rs_length) total_length);
-	}
-
-	/* resource_source is not present */
-
-	resource_source->index = 0;
-	resource_source->string_length = 0;
-	resource_source->string_ptr = NULL;
-	return (0);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_set_resource_source
- *
- * PARAMETERS:  Aml                 - Pointer to the raw AML descriptor
- *              minimum_length      - Minimum length of the descriptor (minus
- *                                    any optional fields)
- *              resource_source     - Internal resource_source
-
- *
- * RETURN:      Total length of the AML descriptor
- *
- * DESCRIPTION: Convert an optional resource_source from internal format to a
- *              raw AML resource descriptor
- *
- ******************************************************************************/
-
-acpi_rsdesc_size
-acpi_rs_set_resource_source(union aml_resource * aml,
-			    acpi_rs_length minimum_length,
-			    struct acpi_resource_source * resource_source)
-{
-	u8 *aml_resource_source;
-	acpi_rsdesc_size descriptor_length;
-
-	ACPI_FUNCTION_ENTRY();
-
-	descriptor_length = minimum_length;
-
-	/* Non-zero string length indicates presence of a resource_source */
-
-	if (resource_source->string_length) {
-
-		/* Point to the end of the AML descriptor */
-
-		aml_resource_source = ACPI_ADD_PTR(u8, aml, minimum_length);
-
-		/* Copy the resource_source_index */
-
-		aml_resource_source[0] = (u8) resource_source->index;
-
-		/* Copy the resource_source string */
-
-		ACPI_STRCPY(ACPI_CAST_PTR(char, &aml_resource_source[1]),
-			    resource_source->string_ptr);
-
-		/*
-		 * Add the length of the string (+ 1 for null terminator) to the
-		 * final descriptor length
-		 */
-		descriptor_length +=
-		    ((acpi_rsdesc_size) resource_source->string_length + 1);
-	}
-
-	/* Return the new total length of the AML descriptor */
-
-	return (descriptor_length);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_get_prt_method_data
- *
- * PARAMETERS:  Node            - Device node
- *              ret_buffer      - Pointer to a buffer structure for the
- *                                results
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to get the _PRT value of an object
- *              contained in an object specified by the handle passed in
- *
- *              If the function fails an appropriate status will be returned
- *              and the contents of the callers buffer is undefined.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
-			    struct acpi_buffer * ret_buffer)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(rs_get_prt_method_data);
-
-	/* Parameters guaranteed valid by caller */
-
-	/* Execute the method, no parameters */
-
-	status = acpi_ut_evaluate_object(node, METHOD_NAME__PRT,
-					 ACPI_BTYPE_PACKAGE, &obj_desc);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Create a resource linked list from the byte stream buffer that comes
-	 * back from the _CRS method execution.
-	 */
-	status = acpi_rs_create_pci_routing_table(obj_desc, ret_buffer);
-
-	/* On exit, we must delete the object returned by evaluate_object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_get_crs_method_data
- *
- * PARAMETERS:  Node            - Device node
- *              ret_buffer      - Pointer to a buffer structure for the
- *                                results
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to get the _CRS value of an object
- *              contained in an object specified by the handle passed in
- *
- *              If the function fails an appropriate status will be returned
- *              and the contents of the callers buffer is undefined.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
-			    struct acpi_buffer *ret_buffer)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(rs_get_crs_method_data);
-
-	/* Parameters guaranteed valid by caller */
-
-	/* Execute the method, no parameters */
-
-	status = acpi_ut_evaluate_object(node, METHOD_NAME__CRS,
-					 ACPI_BTYPE_BUFFER, &obj_desc);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Make the call to create a resource linked list from the
-	 * byte stream buffer that comes back from the _CRS method
-	 * execution.
-	 */
-	status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
-
-	/* On exit, we must delete the object returned by evaluate_object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_get_prs_method_data
- *
- * PARAMETERS:  Node            - Device node
- *              ret_buffer      - Pointer to a buffer structure for the
- *                                results
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to get the _PRS value of an object
- *              contained in an object specified by the handle passed in
- *
- *              If the function fails an appropriate status will be returned
- *              and the contents of the callers buffer is undefined.
- *
- ******************************************************************************/
-
-#ifdef ACPI_FUTURE_USAGE
-acpi_status
-acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
-			    struct acpi_buffer *ret_buffer)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(rs_get_prs_method_data);
-
-	/* Parameters guaranteed valid by caller */
-
-	/* Execute the method, no parameters */
-
-	status = acpi_ut_evaluate_object(node, METHOD_NAME__PRS,
-					 ACPI_BTYPE_BUFFER, &obj_desc);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Make the call to create a resource linked list from the
-	 * byte stream buffer that comes back from the _CRS method
-	 * execution.
-	 */
-	status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
-
-	/* On exit, we must delete the object returned by evaluate_object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-#endif				/*  ACPI_FUTURE_USAGE  */
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_get_method_data
- *
- * PARAMETERS:  Handle          - Handle to the containing object
- *              Path            - Path to method, relative to Handle
- *              ret_buffer      - Pointer to a buffer structure for the
- *                                results
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to get the _CRS or _PRS value of an
- *              object contained in an object specified by the handle passed in
- *
- *              If the function fails an appropriate status will be returned
- *              and the contents of the callers buffer is undefined.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_get_method_data(acpi_handle handle,
-			char *path, struct acpi_buffer *ret_buffer)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(rs_get_method_data);
-
-	/* Parameters guaranteed valid by caller */
-
-	/* Execute the method, no parameters */
-
-	status =
-	    acpi_ut_evaluate_object(handle, path, ACPI_BTYPE_BUFFER, &obj_desc);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Make the call to create a resource linked list from the
-	 * byte stream buffer that comes back from the method
-	 * execution.
-	 */
-	status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
-
-	/* On exit, we must delete the object returned by evaluate_object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_set_srs_method_data
- *
- * PARAMETERS:  Node            - Device node
- *              in_buffer       - Pointer to a buffer structure of the
- *                                parameter
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to set the _SRS of an object contained
- *              in an object specified by the handle passed in
- *
- *              If the function fails an appropriate status will be returned
- *              and the contents of the callers buffer is undefined.
- *
- * Note: Parameters guaranteed valid by caller
- *
- ******************************************************************************/
-
-acpi_status
-acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
-			    struct acpi_buffer *in_buffer)
-{
-	struct acpi_evaluate_info *info;
-	union acpi_operand_object *args[2];
-	acpi_status status;
-	struct acpi_buffer buffer;
-
-	ACPI_FUNCTION_TRACE(rs_set_srs_method_data);
-
-	/* Allocate and initialize the evaluation information block */
-
-	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
-	if (!info) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	info->prefix_node = node;
-	info->pathname = METHOD_NAME__SRS;
-	info->parameters = args;
-	info->flags = ACPI_IGNORE_RETURN_VALUE;
-
-	/*
-	 * The in_buffer parameter will point to a linked list of
-	 * resource parameters. It needs to be formatted into a
-	 * byte stream to be sent in as an input parameter to _SRS
-	 *
-	 * Convert the linked list into a byte stream
-	 */
-	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
-	status = acpi_rs_create_aml_resources(in_buffer->pointer, &buffer);
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/* Create and initialize the method parameter object */
-
-	args[0] = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
-	if (!args[0]) {
-		/*
-		 * Must free the buffer allocated above (otherwise it is freed
-		 * later)
-		 */
-		ACPI_FREE(buffer.pointer);
-		status = AE_NO_MEMORY;
-		goto cleanup;
-	}
-
-	args[0]->buffer.length = (u32) buffer.length;
-	args[0]->buffer.pointer = buffer.pointer;
-	args[0]->common.flags = AOPOBJ_DATA_VALID;
-	args[1] = NULL;
-
-	/* Execute the method, no return value is expected */
-
-	status = acpi_ns_evaluate(info);
-
-	/* Clean up and return the status from acpi_ns_evaluate */
-
-	acpi_ut_remove_reference(args[0]);
-
-      cleanup:
-	ACPI_FREE(info);
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
deleted file mode 100644
index f59f4c4..0000000
--- a/drivers/acpi/resources/rsxface.c
+++ /dev/null
@@ -1,570 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: rsxface - Public interfaces to the resource manager
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acresrc.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_RESOURCES
-ACPI_MODULE_NAME("rsxface")
-
-/* Local macros for 16,32-bit to 64-bit conversion */
-#define ACPI_COPY_FIELD(out, in, field)  ((out)->field = (in)->field)
-#define ACPI_COPY_ADDRESS(out, in)                      \
-	ACPI_COPY_FIELD(out, in, resource_type);             \
-	ACPI_COPY_FIELD(out, in, producer_consumer);         \
-	ACPI_COPY_FIELD(out, in, decode);                    \
-	ACPI_COPY_FIELD(out, in, min_address_fixed);         \
-	ACPI_COPY_FIELD(out, in, max_address_fixed);         \
-	ACPI_COPY_FIELD(out, in, info);                      \
-	ACPI_COPY_FIELD(out, in, granularity);               \
-	ACPI_COPY_FIELD(out, in, minimum);                   \
-	ACPI_COPY_FIELD(out, in, maximum);                   \
-	ACPI_COPY_FIELD(out, in, translation_offset);        \
-	ACPI_COPY_FIELD(out, in, address_length);            \
-	ACPI_COPY_FIELD(out, in, resource_source);
-/* Local prototypes */
-static acpi_status
-acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context);
-
-static acpi_status
-acpi_rs_validate_parameters(acpi_handle device_handle,
-			    struct acpi_buffer *buffer,
-			    struct acpi_namespace_node **return_node);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_validate_parameters
- *
- * PARAMETERS:  device_handle   - Handle to a device
- *              Buffer          - Pointer to a data buffer
- *              return_node     - Pointer to where the device node is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Common parameter validation for resource interfaces
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_rs_validate_parameters(acpi_handle device_handle,
-			    struct acpi_buffer *buffer,
-			    struct acpi_namespace_node **return_node)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-
-	ACPI_FUNCTION_TRACE(rs_validate_parameters);
-
-	/*
-	 * Must have a valid handle to an ACPI device
-	 */
-	if (!device_handle) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	node = acpi_ns_map_handle_to_node(device_handle);
-	if (!node) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (node->type != ACPI_TYPE_DEVICE) {
-		return_ACPI_STATUS(AE_TYPE);
-	}
-
-	/*
-	 * Validate the user buffer object
-	 *
-	 * if there is a non-zero buffer length we also need a valid pointer in
-	 * the buffer. If it's a zero buffer length, we'll be returning the
-	 * needed buffer size (later), so keep going.
-	 */
-	status = acpi_ut_validate_buffer(buffer);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	*return_node = node;
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_irq_routing_table
- *
- * PARAMETERS:  device_handle   - Handle to the Bus device we are querying
- *              ret_buffer      - Pointer to a buffer to receive the
- *                                current resources for the device
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to get the IRQ routing table for a
- *              specific bus. The caller must first acquire a handle for the
- *              desired bus. The routine table is placed in the buffer pointed
- *              to by the ret_buffer variable parameter.
- *
- *              If the function fails an appropriate status will be returned
- *              and the value of ret_buffer is undefined.
- *
- *              This function attempts to execute the _PRT method contained in
- *              the object indicated by the passed device_handle.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_get_irq_routing_table(acpi_handle device_handle,
-			   struct acpi_buffer *ret_buffer)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-
-	ACPI_FUNCTION_TRACE(acpi_get_irq_routing_table);
-
-	/* Validate parameters then dispatch to internal routine */
-
-	status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_rs_get_prt_method_data(node, ret_buffer);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_irq_routing_table)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_current_resources
- *
- * PARAMETERS:  device_handle   - Handle to the device object for the
- *                                device we are querying
- *              ret_buffer      - Pointer to a buffer to receive the
- *                                current resources for the device
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to get the current resources for a
- *              specific device. The caller must first acquire a handle for
- *              the desired device. The resource data is placed in the buffer
- *              pointed to by the ret_buffer variable parameter.
- *
- *              If the function fails an appropriate status will be returned
- *              and the value of ret_buffer is undefined.
- *
- *              This function attempts to execute the _CRS method contained in
- *              the object indicated by the passed device_handle.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_current_resources(acpi_handle device_handle,
-			   struct acpi_buffer *ret_buffer)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-
-	ACPI_FUNCTION_TRACE(acpi_get_current_resources);
-
-	/* Validate parameters then dispatch to internal routine */
-
-	status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_rs_get_crs_method_data(node, ret_buffer);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_current_resources)
-#ifdef ACPI_FUTURE_USAGE
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_possible_resources
- *
- * PARAMETERS:  device_handle   - Handle to the device object for the
- *                                device we are querying
- *              ret_buffer      - Pointer to a buffer to receive the
- *                                resources for the device
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to get a list of the possible resources
- *              for a specific device. The caller must first acquire a handle
- *              for the desired device. The resource data is placed in the
- *              buffer pointed to by the ret_buffer variable.
- *
- *              If the function fails an appropriate status will be returned
- *              and the value of ret_buffer is undefined.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_possible_resources(acpi_handle device_handle,
-			    struct acpi_buffer *ret_buffer)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-
-	ACPI_FUNCTION_TRACE(acpi_get_possible_resources);
-
-	/* Validate parameters then dispatch to internal routine */
-
-	status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_rs_get_prs_method_data(node, ret_buffer);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_possible_resources)
-#endif				/*  ACPI_FUTURE_USAGE  */
-/*******************************************************************************
- *
- * FUNCTION:    acpi_set_current_resources
- *
- * PARAMETERS:  device_handle   - Handle to the device object for the
- *                                device we are setting resources
- *              in_buffer       - Pointer to a buffer containing the
- *                                resources to be set for the device
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to set the current resources for a
- *              specific device. The caller must first acquire a handle for
- *              the desired device. The resource data is passed to the routine
- *              the buffer pointed to by the in_buffer variable.
- *
- ******************************************************************************/
-acpi_status
-acpi_set_current_resources(acpi_handle device_handle,
-			   struct acpi_buffer *in_buffer)
-{
-	acpi_status status;
-	struct acpi_namespace_node *node;
-
-	ACPI_FUNCTION_TRACE(acpi_set_current_resources);
-
-	/* Validate the buffer, don't allow zero length */
-
-	if ((!in_buffer) || (!in_buffer->pointer) || (!in_buffer->length)) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Validate parameters then dispatch to internal routine */
-
-	status = acpi_rs_validate_parameters(device_handle, in_buffer, &node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_rs_set_srs_method_data(node, in_buffer);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_resource_to_address64
- *
- * PARAMETERS:  Resource        - Pointer to a resource
- *              Out             - Pointer to the users's return buffer
- *                                (a struct acpi_resource_address64)
- *
- * RETURN:      Status
- *
- * DESCRIPTION: If the resource is an address16, address32, or address64,
- *              copy it to the address64 return buffer. This saves the
- *              caller from having to duplicate code for different-sized
- *              addresses.
- *
- ******************************************************************************/
-acpi_status
-acpi_resource_to_address64(struct acpi_resource *resource,
-			   struct acpi_resource_address64 *out)
-{
-	struct acpi_resource_address16 *address16;
-	struct acpi_resource_address32 *address32;
-
-	if (!resource || !out) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* Convert 16 or 32 address descriptor to 64 */
-
-	switch (resource->type) {
-	case ACPI_RESOURCE_TYPE_ADDRESS16:
-
-		address16 = (struct acpi_resource_address16 *)&resource->data;
-		ACPI_COPY_ADDRESS(out, address16);
-		break;
-
-	case ACPI_RESOURCE_TYPE_ADDRESS32:
-
-		address32 = (struct acpi_resource_address32 *)&resource->data;
-		ACPI_COPY_ADDRESS(out, address32);
-		break;
-
-	case ACPI_RESOURCE_TYPE_ADDRESS64:
-
-		/* Simple copy for 64 bit source */
-
-		ACPI_MEMCPY(out, &resource->data,
-			    sizeof(struct acpi_resource_address64));
-		break;
-
-	default:
-		return (AE_BAD_PARAMETER);
-	}
-
-	return (AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_vendor_resource
- *
- * PARAMETERS:  device_handle   - Handle for the parent device object
- *              Name            - Method name for the parent resource
- *                                (METHOD_NAME__CRS or METHOD_NAME__PRS)
- *              Uuid            - Pointer to the UUID to be matched.
- *                                includes both subtype and 16-byte UUID
- *              ret_buffer      - Where the vendor resource is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Walk a resource template for the specified evice to find a
- *              vendor-defined resource that matches the supplied UUID and
- *              UUID subtype. Returns a struct acpi_resource of type Vendor.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_vendor_resource(acpi_handle device_handle,
-			 char *name,
-			 struct acpi_vendor_uuid * uuid,
-			 struct acpi_buffer * ret_buffer)
-{
-	struct acpi_vendor_walk_info info;
-	acpi_status status;
-
-	/* Other parameters are validated by acpi_walk_resources */
-
-	if (!uuid || !ret_buffer) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	info.uuid = uuid;
-	info.buffer = ret_buffer;
-	info.status = AE_NOT_EXIST;
-
-	/* Walk the _CRS or _PRS resource list for this device */
-
-	status =
-	    acpi_walk_resources(device_handle, name,
-				acpi_rs_match_vendor_resource, &info);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	return (info.status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_vendor_resource)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_rs_match_vendor_resource
- *
- * PARAMETERS:  acpi_walk_resource_callback
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Match a vendor resource via the ACPI 3.0 UUID
- *
- ******************************************************************************/
-static acpi_status
-acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
-{
-	struct acpi_vendor_walk_info *info = context;
-	struct acpi_resource_vendor_typed *vendor;
-	struct acpi_buffer *buffer;
-	acpi_status status;
-
-	/* Ignore all descriptors except Vendor */
-
-	if (resource->type != ACPI_RESOURCE_TYPE_VENDOR) {
-		return (AE_OK);
-	}
-
-	vendor = &resource->data.vendor_typed;
-
-	/*
-	 * For a valid match, these conditions must hold:
-	 *
-	 * 1) Length of descriptor data must be at least as long as a UUID struct
-	 * 2) The UUID subtypes must match
-	 * 3) The UUID data must match
-	 */
-	if ((vendor->byte_length < (ACPI_UUID_LENGTH + 1)) ||
-	    (vendor->uuid_subtype != info->uuid->subtype) ||
-	    (ACPI_MEMCMP(vendor->uuid, info->uuid->data, ACPI_UUID_LENGTH))) {
-		return (AE_OK);
-	}
-
-	/* Validate/Allocate/Clear caller buffer */
-
-	buffer = info->buffer;
-	status = acpi_ut_initialize_buffer(buffer, resource->length);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* Found the correct resource, copy and return it */
-
-	ACPI_MEMCPY(buffer->pointer, resource, resource->length);
-	buffer->length = resource->length;
-
-	/* Found the desired descriptor, terminate resource walk */
-
-	info->status = AE_OK;
-	return (AE_CTRL_TERMINATE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_walk_resources
- *
- * PARAMETERS:  device_handle   - Handle to the device object for the
- *                                device we are querying
- *              Name            - Method name of the resources we want
- *                                (METHOD_NAME__CRS or METHOD_NAME__PRS)
- *              user_function   - Called for each resource
- *              Context         - Passed to user_function
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Retrieves the current or possible resource list for the
- *              specified device. The user_function is called once for
- *              each resource in the list.
- *
- ******************************************************************************/
-acpi_status
-acpi_walk_resources(acpi_handle device_handle,
-		    char *name,
-		    acpi_walk_resource_callback user_function, void *context)
-{
-	acpi_status status;
-	struct acpi_buffer buffer;
-	struct acpi_resource *resource;
-	struct acpi_resource *resource_end;
-
-	ACPI_FUNCTION_TRACE(acpi_walk_resources);
-
-	/* Parameter validation */
-
-	if (!device_handle || !user_function || !name ||
-	    (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
-	     !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	/* Get the _CRS or _PRS resource list */
-
-	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
-	status = acpi_rs_get_method_data(device_handle, name, &buffer);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Buffer now contains the resource list */
-
-	resource = ACPI_CAST_PTR(struct acpi_resource, buffer.pointer);
-	resource_end =
-	    ACPI_ADD_PTR(struct acpi_resource, buffer.pointer, buffer.length);
-
-	/* Walk the resource list until the end_tag is found (or buffer end) */
-
-	while (resource < resource_end) {
-
-		/* Sanity check the resource */
-
-		if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
-			status = AE_AML_INVALID_RESOURCE_TYPE;
-			break;
-		}
-
-		/* Invoke the user function, abort on any error returned */
-
-		status = user_function(resource, context);
-		if (ACPI_FAILURE(status)) {
-			if (status == AE_CTRL_TERMINATE) {
-
-				/* This is an OK termination by the user function */
-
-				status = AE_OK;
-			}
-			break;
-		}
-
-		/* end_tag indicates end-of-list */
-
-		if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
-			break;
-		}
-
-		/* Get the next resource descriptor */
-
-		resource =
-		    ACPI_ADD_PTR(struct acpi_resource, resource,
-				 resource->length);
-	}
-
-	ACPI_FREE(buffer.pointer);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_walk_resources)
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index e53e590..0619734 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -10,7 +10,6 @@
 
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
-#include <acpi/actypes.h>
 #include <linux/wait.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 39b7233..c54d7b6 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -10,7 +10,6 @@
 #include <linux/kthread.h>
 
 #include <acpi/acpi_drivers.h>
-#include <acpi/acinterp.h>	/* for acpi_ex_eisa_id_to_string() */
 
 #define _COMPONENT		ACPI_BUS_COMPONENT
 ACPI_MODULE_NAME("scan");
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
new file mode 100644
index 0000000..7e3c609
--- /dev/null
+++ b/drivers/acpi/sleep.c
@@ -0,0 +1,747 @@
+/*
+ * sleep.c - ACPI sleep support.
+ *
+ * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
+ * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
+ * Copyright (c) 2000-2003 Patrick Mochel
+ * Copyright (c) 2003 Open Source Development Lab
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/dmi.h>
+#include <linux/device.h>
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+
+#include <asm/io.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include "sleep.h"
+
+u8 sleep_states[ACPI_S_STATE_COUNT];
+
+static void acpi_sleep_tts_switch(u32 acpi_state)
+{
+	union acpi_object in_arg = { ACPI_TYPE_INTEGER };
+	struct acpi_object_list arg_list = { 1, &in_arg };
+	acpi_status status = AE_OK;
+
+	in_arg.integer.value = acpi_state;
+	status = acpi_evaluate_object(NULL, "\\_TTS", &arg_list, NULL);
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		/*
+		 * OS can't evaluate the _TTS object correctly. Some warning
+		 * message will be printed. But it won't break anything.
+		 */
+		printk(KERN_NOTICE "Failure in evaluating _TTS object\n");
+	}
+}
+
+static int tts_notify_reboot(struct notifier_block *this,
+			unsigned long code, void *x)
+{
+	acpi_sleep_tts_switch(ACPI_STATE_S5);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block tts_notifier = {
+	.notifier_call	= tts_notify_reboot,
+	.next		= NULL,
+	.priority	= 0,
+};
+
+static int acpi_sleep_prepare(u32 acpi_state)
+{
+#ifdef CONFIG_ACPI_SLEEP
+	/* do we have a wakeup address for S2 and S3? */
+	if (acpi_state == ACPI_STATE_S3) {
+		if (!acpi_wakeup_address) {
+			return -EFAULT;
+		}
+		acpi_set_firmware_waking_vector(
+				(acpi_physical_address)acpi_wakeup_address);
+
+	}
+	ACPI_FLUSH_CPU_CACHE();
+	acpi_enable_wakeup_device_prep(acpi_state);
+#endif
+	printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n",
+		acpi_state);
+	acpi_enter_sleep_state_prep(acpi_state);
+	return 0;
+}
+
+#ifdef CONFIG_ACPI_SLEEP
+static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+/*
+ * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
+ * user to request that behavior by using the 'acpi_old_suspend_ordering'
+ * kernel command line option that causes the following variable to be set.
+ */
+static bool old_suspend_ordering;
+
+void __init acpi_old_suspend_ordering(void)
+{
+	old_suspend_ordering = true;
+}
+
+/*
+ * According to the ACPI specification the BIOS should make sure that ACPI is
+ * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states.  Still,
+ * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
+ * on such systems during resume.  Unfortunately that doesn't help in
+ * particularly pathological cases in which SCI_EN has to be set directly on
+ * resume, although the specification states very clearly that this flag is
+ * owned by the hardware.  The set_sci_en_on_resume variable will be set in such
+ * cases.
+ */
+static bool set_sci_en_on_resume;
+/*
+ * The ACPI specification wants us to save NVS memory regions during hibernation
+ * and to restore them during the subsequent resume.  However, it is not certain
+ * if this mechanism is going to work on all machines, so we allow the user to
+ * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line
+ * option.
+ */
+static bool s4_no_nvs;
+
+void __init acpi_s4_no_nvs(void)
+{
+	s4_no_nvs = true;
+}
+
+/**
+ *	acpi_pm_disable_gpes - Disable the GPEs.
+ */
+static int acpi_pm_disable_gpes(void)
+{
+	acpi_disable_all_gpes();
+	return 0;
+}
+
+/**
+ *	__acpi_pm_prepare - Prepare the platform to enter the target state.
+ *
+ *	If necessary, set the firmware waking vector and do arch-specific
+ *	nastiness to get the wakeup code to the waking vector.
+ */
+static int __acpi_pm_prepare(void)
+{
+	int error = acpi_sleep_prepare(acpi_target_sleep_state);
+
+	if (error)
+		acpi_target_sleep_state = ACPI_STATE_S0;
+	return error;
+}
+
+/**
+ *	acpi_pm_prepare - Prepare the platform to enter the target sleep
+ *		state and disable the GPEs.
+ */
+static int acpi_pm_prepare(void)
+{
+	int error = __acpi_pm_prepare();
+
+	if (!error)
+		acpi_disable_all_gpes();
+	return error;
+}
+
+/**
+ *	acpi_pm_finish - Instruct the platform to leave a sleep state.
+ *
+ *	This is called after we wake back up (or if entering the sleep state
+ *	failed).
+ */
+static void acpi_pm_finish(void)
+{
+	u32 acpi_state = acpi_target_sleep_state;
+
+	if (acpi_state == ACPI_STATE_S0)
+		return;
+
+	printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
+		acpi_state);
+	acpi_disable_wakeup_device(acpi_state);
+	acpi_leave_sleep_state(acpi_state);
+
+	/* reset firmware waking vector */
+	acpi_set_firmware_waking_vector((acpi_physical_address) 0);
+
+	acpi_target_sleep_state = ACPI_STATE_S0;
+}
+
+/**
+ *	acpi_pm_end - Finish up suspend sequence.
+ */
+static void acpi_pm_end(void)
+{
+	/*
+	 * This is necessary in case acpi_pm_finish() is not called during a
+	 * failing transition to a sleep state.
+	 */
+	acpi_target_sleep_state = ACPI_STATE_S0;
+	acpi_sleep_tts_switch(acpi_target_sleep_state);
+}
+#else /* !CONFIG_ACPI_SLEEP */
+#define acpi_target_sleep_state	ACPI_STATE_S0
+#endif /* CONFIG_ACPI_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+extern void do_suspend_lowlevel(void);
+
+static u32 acpi_suspend_states[] = {
+	[PM_SUSPEND_ON] = ACPI_STATE_S0,
+	[PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
+	[PM_SUSPEND_MEM] = ACPI_STATE_S3,
+	[PM_SUSPEND_MAX] = ACPI_STATE_S5
+};
+
+/**
+ *	acpi_suspend_begin - Set the target system sleep state to the state
+ *		associated with given @pm_state, if supported.
+ */
+static int acpi_suspend_begin(suspend_state_t pm_state)
+{
+	u32 acpi_state = acpi_suspend_states[pm_state];
+	int error = 0;
+
+	if (sleep_states[acpi_state]) {
+		acpi_target_sleep_state = acpi_state;
+		acpi_sleep_tts_switch(acpi_target_sleep_state);
+	} else {
+		printk(KERN_ERR "ACPI does not support this state: %d\n",
+			pm_state);
+		error = -ENOSYS;
+	}
+	return error;
+}
+
+/**
+ *	acpi_suspend_enter - Actually enter a sleep state.
+ *	@pm_state: ignored
+ *
+ *	Flush caches and go to sleep. For STR we have to call arch-specific
+ *	assembly, which in turn call acpi_enter_sleep_state().
+ *	It's unfortunate, but it works. Please fix if you're feeling frisky.
+ */
+static int acpi_suspend_enter(suspend_state_t pm_state)
+{
+	acpi_status status = AE_OK;
+	unsigned long flags = 0;
+	u32 acpi_state = acpi_target_sleep_state;
+
+	ACPI_FLUSH_CPU_CACHE();
+
+	/* Do arch specific saving of state. */
+	if (acpi_state == ACPI_STATE_S3) {
+		int error = acpi_save_state_mem();
+
+		if (error)
+			return error;
+	}
+
+	local_irq_save(flags);
+	acpi_enable_wakeup_device(acpi_state);
+	switch (acpi_state) {
+	case ACPI_STATE_S1:
+		barrier();
+		status = acpi_enter_sleep_state(acpi_state);
+		break;
+
+	case ACPI_STATE_S3:
+		do_suspend_lowlevel();
+		break;
+	}
+
+	/* If ACPI is not enabled by the BIOS, we need to enable it here. */
+	if (set_sci_en_on_resume)
+		acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1);
+	else
+		acpi_enable();
+
+	/* Reprogram control registers and execute _BFS */
+	acpi_leave_sleep_state_prep(acpi_state);
+
+	/* ACPI 3.0 specs (P62) says that it's the responsibility
+	 * of the OSPM to clear the status bit [ implying that the
+	 * POWER_BUTTON event should not reach userspace ]
+	 */
+	if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
+		acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+
+	/*
+	 * Disable and clear GPE status before interrupt is enabled. Some GPEs
+	 * (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
+	 * acpi_leave_sleep_state will reenable specific GPEs later
+	 */
+	acpi_disable_all_gpes();
+
+	local_irq_restore(flags);
+	printk(KERN_DEBUG "Back to C!\n");
+
+	/* restore processor state */
+	if (acpi_state == ACPI_STATE_S3)
+		acpi_restore_state_mem();
+
+	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
+}
+
+static int acpi_suspend_state_valid(suspend_state_t pm_state)
+{
+	u32 acpi_state;
+
+	switch (pm_state) {
+	case PM_SUSPEND_ON:
+	case PM_SUSPEND_STANDBY:
+	case PM_SUSPEND_MEM:
+		acpi_state = acpi_suspend_states[pm_state];
+
+		return sleep_states[acpi_state];
+	default:
+		return 0;
+	}
+}
+
+static struct platform_suspend_ops acpi_suspend_ops = {
+	.valid = acpi_suspend_state_valid,
+	.begin = acpi_suspend_begin,
+	.prepare = acpi_pm_prepare,
+	.enter = acpi_suspend_enter,
+	.finish = acpi_pm_finish,
+	.end = acpi_pm_end,
+};
+
+/**
+ *	acpi_suspend_begin_old - Set the target system sleep state to the
+ *		state associated with given @pm_state, if supported, and
+ *		execute the _PTS control method.  This function is used if the
+ *		pre-ACPI 2.0 suspend ordering has been requested.
+ */
+static int acpi_suspend_begin_old(suspend_state_t pm_state)
+{
+	int error = acpi_suspend_begin(pm_state);
+
+	if (!error)
+		error = __acpi_pm_prepare();
+	return error;
+}
+
+/*
+ * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
+ * been requested.
+ */
+static struct platform_suspend_ops acpi_suspend_ops_old = {
+	.valid = acpi_suspend_state_valid,
+	.begin = acpi_suspend_begin_old,
+	.prepare = acpi_pm_disable_gpes,
+	.enter = acpi_suspend_enter,
+	.finish = acpi_pm_finish,
+	.end = acpi_pm_end,
+	.recover = acpi_pm_finish,
+};
+
+static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
+{
+	old_suspend_ordering = true;
+	return 0;
+}
+
+static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
+{
+	set_sci_en_on_resume = true;
+	return 0;
+}
+
+static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+	{
+	.callback = init_old_suspend_ordering,
+	.ident = "Abit KN9 (nForce4 variant)",
+	.matches = {
+		DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
+		DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
+		},
+	},
+	{
+	.callback = init_old_suspend_ordering,
+	.ident = "HP xw4600 Workstation",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+		DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
+		},
+	},
+	{
+	.callback = init_set_sci_en_on_resume,
+	.ident = "Apple MacBook 1,1",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
+		DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
+		},
+	},
+	{
+	.callback = init_set_sci_en_on_resume,
+	.ident = "Apple MacMini 1,1",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
+		DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
+		},
+	},
+	{},
+};
+#endif /* CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+static unsigned long s4_hardware_signature;
+static struct acpi_table_facs *facs;
+static bool nosigcheck;
+
+void __init acpi_no_s4_hw_signature(void)
+{
+	nosigcheck = true;
+}
+
+static int acpi_hibernation_begin(void)
+{
+	int error;
+
+	error = s4_no_nvs ? 0 : hibernate_nvs_alloc();
+	if (!error) {
+		acpi_target_sleep_state = ACPI_STATE_S4;
+		acpi_sleep_tts_switch(acpi_target_sleep_state);
+	}
+
+	return error;
+}
+
+static int acpi_hibernation_pre_snapshot(void)
+{
+	int error = acpi_pm_prepare();
+
+	if (!error)
+		hibernate_nvs_save();
+
+	return error;
+}
+
+static int acpi_hibernation_enter(void)
+{
+	acpi_status status = AE_OK;
+	unsigned long flags = 0;
+
+	ACPI_FLUSH_CPU_CACHE();
+
+	local_irq_save(flags);
+	acpi_enable_wakeup_device(ACPI_STATE_S4);
+	/* This shouldn't return.  If it returns, we have a problem */
+	status = acpi_enter_sleep_state(ACPI_STATE_S4);
+	/* Reprogram control registers and execute _BFS */
+	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
+	local_irq_restore(flags);
+
+	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
+}
+
+static void acpi_hibernation_finish(void)
+{
+	hibernate_nvs_free();
+	acpi_pm_finish();
+}
+
+static void acpi_hibernation_leave(void)
+{
+	/*
+	 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
+	 * enable it here.
+	 */
+	acpi_enable();
+	/* Reprogram control registers and execute _BFS */
+	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
+	/* Check the hardware signature */
+	if (facs && s4_hardware_signature != facs->hardware_signature) {
+		printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
+			"cannot resume!\n");
+		panic("ACPI S4 hardware signature mismatch");
+	}
+	/* Restore the NVS memory area */
+	hibernate_nvs_restore();
+}
+
+static void acpi_pm_enable_gpes(void)
+{
+	acpi_enable_all_runtime_gpes();
+}
+
+static struct platform_hibernation_ops acpi_hibernation_ops = {
+	.begin = acpi_hibernation_begin,
+	.end = acpi_pm_end,
+	.pre_snapshot = acpi_hibernation_pre_snapshot,
+	.finish = acpi_hibernation_finish,
+	.prepare = acpi_pm_prepare,
+	.enter = acpi_hibernation_enter,
+	.leave = acpi_hibernation_leave,
+	.pre_restore = acpi_pm_disable_gpes,
+	.restore_cleanup = acpi_pm_enable_gpes,
+};
+
+/**
+ *	acpi_hibernation_begin_old - Set the target system sleep state to
+ *		ACPI_STATE_S4 and execute the _PTS control method.  This
+ *		function is used if the pre-ACPI 2.0 suspend ordering has been
+ *		requested.
+ */
+static int acpi_hibernation_begin_old(void)
+{
+	int error;
+	/*
+	 * The _TTS object should always be evaluated before the _PTS object.
+	 * When the old_suspended_ordering is true, the _PTS object is
+	 * evaluated in the acpi_sleep_prepare.
+	 */
+	acpi_sleep_tts_switch(ACPI_STATE_S4);
+
+	error = acpi_sleep_prepare(ACPI_STATE_S4);
+
+	if (!error) {
+		if (!s4_no_nvs)
+			error = hibernate_nvs_alloc();
+		if (!error)
+			acpi_target_sleep_state = ACPI_STATE_S4;
+	}
+	return error;
+}
+
+static int acpi_hibernation_pre_snapshot_old(void)
+{
+	int error = acpi_pm_disable_gpes();
+
+	if (!error)
+		hibernate_nvs_save();
+
+	return error;
+}
+
+/*
+ * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
+ * been requested.
+ */
+static struct platform_hibernation_ops acpi_hibernation_ops_old = {
+	.begin = acpi_hibernation_begin_old,
+	.end = acpi_pm_end,
+	.pre_snapshot = acpi_hibernation_pre_snapshot_old,
+	.finish = acpi_hibernation_finish,
+	.prepare = acpi_pm_disable_gpes,
+	.enter = acpi_hibernation_enter,
+	.leave = acpi_hibernation_leave,
+	.pre_restore = acpi_pm_disable_gpes,
+	.restore_cleanup = acpi_pm_enable_gpes,
+	.recover = acpi_pm_finish,
+};
+#endif /* CONFIG_HIBERNATION */
+
+int acpi_suspend(u32 acpi_state)
+{
+	suspend_state_t states[] = {
+		[1] = PM_SUSPEND_STANDBY,
+		[3] = PM_SUSPEND_MEM,
+		[5] = PM_SUSPEND_MAX
+	};
+
+	if (acpi_state < 6 && states[acpi_state])
+		return pm_suspend(states[acpi_state]);
+	if (acpi_state == 4)
+		return hibernate();
+	return -EINVAL;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ *	acpi_pm_device_sleep_state - return preferred power state of ACPI device
+ *		in the system sleep state given by %acpi_target_sleep_state
+ *	@dev: device to examine; its driver model wakeup flags control
+ *		whether it should be able to wake up the system
+ *	@d_min_p: used to store the upper limit of allowed states range
+ *	Return value: preferred power state of the device on success, -ENODEV on
+ *		failure (ie. if there's no 'struct acpi_device' for @dev)
+ *
+ *	Find the lowest power (highest number) ACPI device power state that
+ *	device @dev can be in while the system is in the sleep state represented
+ *	by %acpi_target_sleep_state.  If @wake is nonzero, the device should be
+ *	able to wake up the system from this sleep state.  If @d_min_p is set,
+ *	the highest power (lowest number) device power state of @dev allowed
+ *	in this system sleep state is stored at the location pointed to by it.
+ *
+ *	The caller must ensure that @dev is valid before using this function.
+ *	The caller is also responsible for figuring out if the device is
+ *	supposed to be able to wake up the system and passing this information
+ *	via @wake.
+ */
+
+int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
+{
+	acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
+	struct acpi_device *adev;
+	char acpi_method[] = "_SxD";
+	unsigned long long d_min, d_max;
+
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+		printk(KERN_DEBUG "ACPI handle has no context!\n");
+		return -ENODEV;
+	}
+
+	acpi_method[2] = '0' + acpi_target_sleep_state;
+	/*
+	 * If the sleep state is S0, we will return D3, but if the device has
+	 * _S0W, we will use the value from _S0W
+	 */
+	d_min = ACPI_STATE_D0;
+	d_max = ACPI_STATE_D3;
+
+	/*
+	 * If present, _SxD methods return the minimum D-state (highest power
+	 * state) we can use for the corresponding S-states.  Otherwise, the
+	 * minimum D-state is D0 (ACPI 3.x).
+	 *
+	 * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer
+	 * provided -- that's our fault recovery, we ignore retval.
+	 */
+	if (acpi_target_sleep_state > ACPI_STATE_S0)
+		acpi_evaluate_integer(handle, acpi_method, NULL, &d_min);
+
+	/*
+	 * If _PRW says we can wake up the system from the target sleep state,
+	 * the D-state returned by _SxD is sufficient for that (we assume a
+	 * wakeup-aware driver if wake is set).  Still, if _SxW exists
+	 * (ACPI 3.x), it should return the maximum (lowest power) D-state that
+	 * can wake the system.  _S0W may be valid, too.
+	 */
+	if (acpi_target_sleep_state == ACPI_STATE_S0 ||
+	    (device_may_wakeup(dev) && adev->wakeup.state.enabled &&
+	     adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+		acpi_status status;
+
+		acpi_method[3] = 'W';
+		status = acpi_evaluate_integer(handle, acpi_method, NULL,
+						&d_max);
+		if (ACPI_FAILURE(status)) {
+			d_max = d_min;
+		} else if (d_max < d_min) {
+			/* Warn the user of the broken DSDT */
+			printk(KERN_WARNING "ACPI: Wrong value from %s\n",
+				acpi_method);
+			/* Sanitize it */
+			d_min = d_max;
+		}
+	}
+
+	if (d_min_p)
+		*d_min_p = d_min;
+	return d_max;
+}
+
+/**
+ *	acpi_pm_device_sleep_wake - enable or disable the system wake-up
+ *                                  capability of given device
+ *	@dev: device to handle
+ *	@enable: 'true' - enable, 'false' - disable the wake-up capability
+ */
+int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
+{
+	acpi_handle handle;
+	struct acpi_device *adev;
+
+	if (!device_may_wakeup(dev))
+		return -EINVAL;
+
+	handle = DEVICE_ACPI_HANDLE(dev);
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+		printk(KERN_DEBUG "ACPI handle has no context!\n");
+		return -ENODEV;
+	}
+
+	return enable ?
+		acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
+		acpi_disable_wakeup_device_power(adev);
+}
+#endif
+
+static void acpi_power_off_prepare(void)
+{
+	/* Prepare to power off the system */
+	acpi_sleep_prepare(ACPI_STATE_S5);
+	acpi_disable_all_gpes();
+}
+
+static void acpi_power_off(void)
+{
+	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
+	printk("%s called\n", __func__);
+	local_irq_disable();
+	acpi_enable_wakeup_device(ACPI_STATE_S5);
+	acpi_enter_sleep_state(ACPI_STATE_S5);
+}
+
+int __init acpi_sleep_init(void)
+{
+	acpi_status status;
+	u8 type_a, type_b;
+#ifdef CONFIG_SUSPEND
+	int i = 0;
+
+	dmi_check_system(acpisleep_dmi_table);
+#endif
+
+	if (acpi_disabled)
+		return 0;
+
+	sleep_states[ACPI_STATE_S0] = 1;
+	printk(KERN_INFO PREFIX "(supports S0");
+
+#ifdef CONFIG_SUSPEND
+	for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
+		status = acpi_get_sleep_type_data(i, &type_a, &type_b);
+		if (ACPI_SUCCESS(status)) {
+			sleep_states[i] = 1;
+			printk(" S%d", i);
+		}
+	}
+
+	suspend_set_ops(old_suspend_ordering ?
+		&acpi_suspend_ops_old : &acpi_suspend_ops);
+#endif
+
+#ifdef CONFIG_HIBERNATION
+	status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
+	if (ACPI_SUCCESS(status)) {
+		hibernation_set_ops(old_suspend_ordering ?
+			&acpi_hibernation_ops_old : &acpi_hibernation_ops);
+		sleep_states[ACPI_STATE_S4] = 1;
+		printk(" S4");
+		if (!nosigcheck) {
+			acpi_get_table(ACPI_SIG_FACS, 1,
+				(struct acpi_table_header **)&facs);
+			if (facs)
+				s4_hardware_signature =
+					facs->hardware_signature;
+		}
+	}
+#endif
+	status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
+	if (ACPI_SUCCESS(status)) {
+		sleep_states[ACPI_STATE_S5] = 1;
+		printk(" S5");
+		pm_power_off_prepare = acpi_power_off_prepare;
+		pm_power_off = acpi_power_off;
+	}
+	printk(")\n");
+	/*
+	 * Register the tts_notifier to reboot notifier list so that the _TTS
+	 * object can also be evaluated when the system enters S5.
+	 */
+	register_reboot_notifier(&tts_notifier);
+	return 0;
+}
diff --git a/drivers/acpi/sleep/sleep.h b/drivers/acpi/sleep.h
similarity index 100%
rename from drivers/acpi/sleep/sleep.h
rename to drivers/acpi/sleep.h
diff --git a/drivers/acpi/sleep/Makefile b/drivers/acpi/sleep/Makefile
deleted file mode 100644
index f1fb888..0000000
--- a/drivers/acpi/sleep/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-y					:= wakeup.o
-obj-y					+= main.o
-obj-$(CONFIG_ACPI_SLEEP)		+= proc.o
-
-EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
deleted file mode 100644
index 28a691cc..0000000
--- a/drivers/acpi/sleep/main.c
+++ /dev/null
@@ -1,696 +0,0 @@
-/*
- * sleep.c - ACPI sleep support.
- *
- * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
- * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
- * Copyright (c) 2000-2003 Patrick Mochel
- * Copyright (c) 2003 Open Source Development Lab
- *
- * This file is released under the GPLv2.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/dmi.h>
-#include <linux/device.h>
-#include <linux/suspend.h>
-#include <linux/reboot.h>
-
-#include <asm/io.h>
-
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include "sleep.h"
-
-u8 sleep_states[ACPI_S_STATE_COUNT];
-
-static void acpi_sleep_tts_switch(u32 acpi_state)
-{
-	union acpi_object in_arg = { ACPI_TYPE_INTEGER };
-	struct acpi_object_list arg_list = { 1, &in_arg };
-	acpi_status status = AE_OK;
-
-	in_arg.integer.value = acpi_state;
-	status = acpi_evaluate_object(NULL, "\\_TTS", &arg_list, NULL);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		/*
-		 * OS can't evaluate the _TTS object correctly. Some warning
-		 * message will be printed. But it won't break anything.
-		 */
-		printk(KERN_NOTICE "Failure in evaluating _TTS object\n");
-	}
-}
-
-static int tts_notify_reboot(struct notifier_block *this,
-			unsigned long code, void *x)
-{
-	acpi_sleep_tts_switch(ACPI_STATE_S5);
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block tts_notifier = {
-	.notifier_call	= tts_notify_reboot,
-	.next		= NULL,
-	.priority	= 0,
-};
-
-static int acpi_sleep_prepare(u32 acpi_state)
-{
-#ifdef CONFIG_ACPI_SLEEP
-	/* do we have a wakeup address for S2 and S3? */
-	if (acpi_state == ACPI_STATE_S3) {
-		if (!acpi_wakeup_address) {
-			return -EFAULT;
-		}
-		acpi_set_firmware_waking_vector(
-				(acpi_physical_address)acpi_wakeup_address);
-
-	}
-	ACPI_FLUSH_CPU_CACHE();
-	acpi_enable_wakeup_device_prep(acpi_state);
-#endif
-	printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n",
-		acpi_state);
-	acpi_enter_sleep_state_prep(acpi_state);
-	return 0;
-}
-
-#ifdef CONFIG_ACPI_SLEEP
-static u32 acpi_target_sleep_state = ACPI_STATE_S0;
-/*
- * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
- * user to request that behavior by using the 'acpi_old_suspend_ordering'
- * kernel command line option that causes the following variable to be set.
- */
-static bool old_suspend_ordering;
-
-void __init acpi_old_suspend_ordering(void)
-{
-	old_suspend_ordering = true;
-}
-
-/*
- * According to the ACPI specification the BIOS should make sure that ACPI is
- * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states.  Still,
- * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
- * on such systems during resume.  Unfortunately that doesn't help in
- * particularly pathological cases in which SCI_EN has to be set directly on
- * resume, although the specification states very clearly that this flag is
- * owned by the hardware.  The set_sci_en_on_resume variable will be set in such
- * cases.
- */
-static bool set_sci_en_on_resume;
-
-/**
- *	acpi_pm_disable_gpes - Disable the GPEs.
- */
-static int acpi_pm_disable_gpes(void)
-{
-	acpi_hw_disable_all_gpes();
-	return 0;
-}
-
-/**
- *	__acpi_pm_prepare - Prepare the platform to enter the target state.
- *
- *	If necessary, set the firmware waking vector and do arch-specific
- *	nastiness to get the wakeup code to the waking vector.
- */
-static int __acpi_pm_prepare(void)
-{
-	int error = acpi_sleep_prepare(acpi_target_sleep_state);
-
-	if (error)
-		acpi_target_sleep_state = ACPI_STATE_S0;
-	return error;
-}
-
-/**
- *	acpi_pm_prepare - Prepare the platform to enter the target sleep
- *		state and disable the GPEs.
- */
-static int acpi_pm_prepare(void)
-{
-	int error = __acpi_pm_prepare();
-
-	if (!error)
-		acpi_hw_disable_all_gpes();
-	return error;
-}
-
-/**
- *	acpi_pm_finish - Instruct the platform to leave a sleep state.
- *
- *	This is called after we wake back up (or if entering the sleep state
- *	failed).
- */
-static void acpi_pm_finish(void)
-{
-	u32 acpi_state = acpi_target_sleep_state;
-
-	if (acpi_state == ACPI_STATE_S0)
-		return;
-
-	printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
-		acpi_state);
-	acpi_disable_wakeup_device(acpi_state);
-	acpi_leave_sleep_state(acpi_state);
-
-	/* reset firmware waking vector */
-	acpi_set_firmware_waking_vector((acpi_physical_address) 0);
-
-	acpi_target_sleep_state = ACPI_STATE_S0;
-}
-
-/**
- *	acpi_pm_end - Finish up suspend sequence.
- */
-static void acpi_pm_end(void)
-{
-	/*
-	 * This is necessary in case acpi_pm_finish() is not called during a
-	 * failing transition to a sleep state.
-	 */
-	acpi_target_sleep_state = ACPI_STATE_S0;
-	acpi_sleep_tts_switch(acpi_target_sleep_state);
-}
-#else /* !CONFIG_ACPI_SLEEP */
-#define acpi_target_sleep_state	ACPI_STATE_S0
-#endif /* CONFIG_ACPI_SLEEP */
-
-#ifdef CONFIG_SUSPEND
-extern void do_suspend_lowlevel(void);
-
-static u32 acpi_suspend_states[] = {
-	[PM_SUSPEND_ON] = ACPI_STATE_S0,
-	[PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
-	[PM_SUSPEND_MEM] = ACPI_STATE_S3,
-	[PM_SUSPEND_MAX] = ACPI_STATE_S5
-};
-
-/**
- *	acpi_suspend_begin - Set the target system sleep state to the state
- *		associated with given @pm_state, if supported.
- */
-static int acpi_suspend_begin(suspend_state_t pm_state)
-{
-	u32 acpi_state = acpi_suspend_states[pm_state];
-	int error = 0;
-
-	if (sleep_states[acpi_state]) {
-		acpi_target_sleep_state = acpi_state;
-		acpi_sleep_tts_switch(acpi_target_sleep_state);
-	} else {
-		printk(KERN_ERR "ACPI does not support this state: %d\n",
-			pm_state);
-		error = -ENOSYS;
-	}
-	return error;
-}
-
-/**
- *	acpi_suspend_enter - Actually enter a sleep state.
- *	@pm_state: ignored
- *
- *	Flush caches and go to sleep. For STR we have to call arch-specific
- *	assembly, which in turn call acpi_enter_sleep_state().
- *	It's unfortunate, but it works. Please fix if you're feeling frisky.
- */
-static int acpi_suspend_enter(suspend_state_t pm_state)
-{
-	acpi_status status = AE_OK;
-	unsigned long flags = 0;
-	u32 acpi_state = acpi_target_sleep_state;
-
-	ACPI_FLUSH_CPU_CACHE();
-
-	/* Do arch specific saving of state. */
-	if (acpi_state == ACPI_STATE_S3) {
-		int error = acpi_save_state_mem();
-
-		if (error)
-			return error;
-	}
-
-	local_irq_save(flags);
-	acpi_enable_wakeup_device(acpi_state);
-	switch (acpi_state) {
-	case ACPI_STATE_S1:
-		barrier();
-		status = acpi_enter_sleep_state(acpi_state);
-		break;
-
-	case ACPI_STATE_S3:
-		do_suspend_lowlevel();
-		break;
-	}
-
-	/* If ACPI is not enabled by the BIOS, we need to enable it here. */
-	if (set_sci_en_on_resume)
-		acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1);
-	else
-		acpi_enable();
-
-	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(acpi_state);
-
-	/* ACPI 3.0 specs (P62) says that it's the responsibility
-	 * of the OSPM to clear the status bit [ implying that the
-	 * POWER_BUTTON event should not reach userspace ]
-	 */
-	if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
-		acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
-
-	/*
-	 * Disable and clear GPE status before interrupt is enabled. Some GPEs
-	 * (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
-	 * acpi_leave_sleep_state will reenable specific GPEs later
-	 */
-	acpi_hw_disable_all_gpes();
-
-	local_irq_restore(flags);
-	printk(KERN_DEBUG "Back to C!\n");
-
-	/* restore processor state */
-	if (acpi_state == ACPI_STATE_S3)
-		acpi_restore_state_mem();
-
-	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
-}
-
-static int acpi_suspend_state_valid(suspend_state_t pm_state)
-{
-	u32 acpi_state;
-
-	switch (pm_state) {
-	case PM_SUSPEND_ON:
-	case PM_SUSPEND_STANDBY:
-	case PM_SUSPEND_MEM:
-		acpi_state = acpi_suspend_states[pm_state];
-
-		return sleep_states[acpi_state];
-	default:
-		return 0;
-	}
-}
-
-static struct platform_suspend_ops acpi_suspend_ops = {
-	.valid = acpi_suspend_state_valid,
-	.begin = acpi_suspend_begin,
-	.prepare = acpi_pm_prepare,
-	.enter = acpi_suspend_enter,
-	.finish = acpi_pm_finish,
-	.end = acpi_pm_end,
-};
-
-/**
- *	acpi_suspend_begin_old - Set the target system sleep state to the
- *		state associated with given @pm_state, if supported, and
- *		execute the _PTS control method.  This function is used if the
- *		pre-ACPI 2.0 suspend ordering has been requested.
- */
-static int acpi_suspend_begin_old(suspend_state_t pm_state)
-{
-	int error = acpi_suspend_begin(pm_state);
-
-	if (!error)
-		error = __acpi_pm_prepare();
-	return error;
-}
-
-/*
- * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
- * been requested.
- */
-static struct platform_suspend_ops acpi_suspend_ops_old = {
-	.valid = acpi_suspend_state_valid,
-	.begin = acpi_suspend_begin_old,
-	.prepare = acpi_pm_disable_gpes,
-	.enter = acpi_suspend_enter,
-	.finish = acpi_pm_finish,
-	.end = acpi_pm_end,
-	.recover = acpi_pm_finish,
-};
-
-static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
-{
-	old_suspend_ordering = true;
-	return 0;
-}
-
-static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
-{
-	set_sci_en_on_resume = true;
-	return 0;
-}
-
-static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
-	{
-	.callback = init_old_suspend_ordering,
-	.ident = "Abit KN9 (nForce4 variant)",
-	.matches = {
-		DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
-		DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
-		},
-	},
-	{
-	.callback = init_old_suspend_ordering,
-	.ident = "HP xw4600 Workstation",
-	.matches = {
-		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-		DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
-		},
-	},
-	{
-	.callback = init_set_sci_en_on_resume,
-	.ident = "Apple MacBook 1,1",
-	.matches = {
-		DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
-		DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
-		},
-	},
-	{
-	.callback = init_set_sci_en_on_resume,
-	.ident = "Apple MacMini 1,1",
-	.matches = {
-		DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
-		DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
-		},
-	},
-	{},
-};
-#endif /* CONFIG_SUSPEND */
-
-#ifdef CONFIG_HIBERNATION
-static unsigned long s4_hardware_signature;
-static struct acpi_table_facs *facs;
-static bool nosigcheck;
-
-void __init acpi_no_s4_hw_signature(void)
-{
-	nosigcheck = true;
-}
-
-static int acpi_hibernation_begin(void)
-{
-	acpi_target_sleep_state = ACPI_STATE_S4;
-	acpi_sleep_tts_switch(acpi_target_sleep_state);
-	return 0;
-}
-
-static int acpi_hibernation_enter(void)
-{
-	acpi_status status = AE_OK;
-	unsigned long flags = 0;
-
-	ACPI_FLUSH_CPU_CACHE();
-
-	local_irq_save(flags);
-	acpi_enable_wakeup_device(ACPI_STATE_S4);
-	/* This shouldn't return.  If it returns, we have a problem */
-	status = acpi_enter_sleep_state(ACPI_STATE_S4);
-	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
-	local_irq_restore(flags);
-
-	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
-}
-
-static void acpi_hibernation_leave(void)
-{
-	/*
-	 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
-	 * enable it here.
-	 */
-	acpi_enable();
-	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(ACPI_STATE_S4);
-	/* Check the hardware signature */
-	if (facs && s4_hardware_signature != facs->hardware_signature) {
-		printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
-			"cannot resume!\n");
-		panic("ACPI S4 hardware signature mismatch");
-	}
-}
-
-static void acpi_pm_enable_gpes(void)
-{
-	acpi_hw_enable_all_runtime_gpes();
-}
-
-static struct platform_hibernation_ops acpi_hibernation_ops = {
-	.begin = acpi_hibernation_begin,
-	.end = acpi_pm_end,
-	.pre_snapshot = acpi_pm_prepare,
-	.finish = acpi_pm_finish,
-	.prepare = acpi_pm_prepare,
-	.enter = acpi_hibernation_enter,
-	.leave = acpi_hibernation_leave,
-	.pre_restore = acpi_pm_disable_gpes,
-	.restore_cleanup = acpi_pm_enable_gpes,
-};
-
-/**
- *	acpi_hibernation_begin_old - Set the target system sleep state to
- *		ACPI_STATE_S4 and execute the _PTS control method.  This
- *		function is used if the pre-ACPI 2.0 suspend ordering has been
- *		requested.
- */
-static int acpi_hibernation_begin_old(void)
-{
-	int error;
-	/*
-	 * The _TTS object should always be evaluated before the _PTS object.
-	 * When the old_suspended_ordering is true, the _PTS object is
-	 * evaluated in the acpi_sleep_prepare.
-	 */
-	acpi_sleep_tts_switch(ACPI_STATE_S4);
-
-	error = acpi_sleep_prepare(ACPI_STATE_S4);
-
-	if (!error)
-		acpi_target_sleep_state = ACPI_STATE_S4;
-	return error;
-}
-
-/*
- * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
- * been requested.
- */
-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
-	.begin = acpi_hibernation_begin_old,
-	.end = acpi_pm_end,
-	.pre_snapshot = acpi_pm_disable_gpes,
-	.finish = acpi_pm_finish,
-	.prepare = acpi_pm_disable_gpes,
-	.enter = acpi_hibernation_enter,
-	.leave = acpi_hibernation_leave,
-	.pre_restore = acpi_pm_disable_gpes,
-	.restore_cleanup = acpi_pm_enable_gpes,
-	.recover = acpi_pm_finish,
-};
-#endif /* CONFIG_HIBERNATION */
-
-int acpi_suspend(u32 acpi_state)
-{
-	suspend_state_t states[] = {
-		[1] = PM_SUSPEND_STANDBY,
-		[3] = PM_SUSPEND_MEM,
-		[5] = PM_SUSPEND_MAX
-	};
-
-	if (acpi_state < 6 && states[acpi_state])
-		return pm_suspend(states[acpi_state]);
-	if (acpi_state == 4)
-		return hibernate();
-	return -EINVAL;
-}
-
-#ifdef CONFIG_PM_SLEEP
-/**
- *	acpi_pm_device_sleep_state - return preferred power state of ACPI device
- *		in the system sleep state given by %acpi_target_sleep_state
- *	@dev: device to examine; its driver model wakeup flags control
- *		whether it should be able to wake up the system
- *	@d_min_p: used to store the upper limit of allowed states range
- *	Return value: preferred power state of the device on success, -ENODEV on
- *		failure (ie. if there's no 'struct acpi_device' for @dev)
- *
- *	Find the lowest power (highest number) ACPI device power state that
- *	device @dev can be in while the system is in the sleep state represented
- *	by %acpi_target_sleep_state.  If @wake is nonzero, the device should be
- *	able to wake up the system from this sleep state.  If @d_min_p is set,
- *	the highest power (lowest number) device power state of @dev allowed
- *	in this system sleep state is stored at the location pointed to by it.
- *
- *	The caller must ensure that @dev is valid before using this function.
- *	The caller is also responsible for figuring out if the device is
- *	supposed to be able to wake up the system and passing this information
- *	via @wake.
- */
-
-int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
-{
-	acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
-	struct acpi_device *adev;
-	char acpi_method[] = "_SxD";
-	unsigned long long d_min, d_max;
-
-	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
-		printk(KERN_DEBUG "ACPI handle has no context!\n");
-		return -ENODEV;
-	}
-
-	acpi_method[2] = '0' + acpi_target_sleep_state;
-	/*
-	 * If the sleep state is S0, we will return D3, but if the device has
-	 * _S0W, we will use the value from _S0W
-	 */
-	d_min = ACPI_STATE_D0;
-	d_max = ACPI_STATE_D3;
-
-	/*
-	 * If present, _SxD methods return the minimum D-state (highest power
-	 * state) we can use for the corresponding S-states.  Otherwise, the
-	 * minimum D-state is D0 (ACPI 3.x).
-	 *
-	 * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer
-	 * provided -- that's our fault recovery, we ignore retval.
-	 */
-	if (acpi_target_sleep_state > ACPI_STATE_S0)
-		acpi_evaluate_integer(handle, acpi_method, NULL, &d_min);
-
-	/*
-	 * If _PRW says we can wake up the system from the target sleep state,
-	 * the D-state returned by _SxD is sufficient for that (we assume a
-	 * wakeup-aware driver if wake is set).  Still, if _SxW exists
-	 * (ACPI 3.x), it should return the maximum (lowest power) D-state that
-	 * can wake the system.  _S0W may be valid, too.
-	 */
-	if (acpi_target_sleep_state == ACPI_STATE_S0 ||
-	    (device_may_wakeup(dev) && adev->wakeup.state.enabled &&
-	     adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
-		acpi_status status;
-
-		acpi_method[3] = 'W';
-		status = acpi_evaluate_integer(handle, acpi_method, NULL,
-						&d_max);
-		if (ACPI_FAILURE(status)) {
-			d_max = d_min;
-		} else if (d_max < d_min) {
-			/* Warn the user of the broken DSDT */
-			printk(KERN_WARNING "ACPI: Wrong value from %s\n",
-				acpi_method);
-			/* Sanitize it */
-			d_min = d_max;
-		}
-	}
-
-	if (d_min_p)
-		*d_min_p = d_min;
-	return d_max;
-}
-
-/**
- *	acpi_pm_device_sleep_wake - enable or disable the system wake-up
- *                                  capability of given device
- *	@dev: device to handle
- *	@enable: 'true' - enable, 'false' - disable the wake-up capability
- */
-int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
-{
-	acpi_handle handle;
-	struct acpi_device *adev;
-
-	if (!device_may_wakeup(dev))
-		return -EINVAL;
-
-	handle = DEVICE_ACPI_HANDLE(dev);
-	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
-		printk(KERN_DEBUG "ACPI handle has no context!\n");
-		return -ENODEV;
-	}
-
-	return enable ?
-		acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
-		acpi_disable_wakeup_device_power(adev);
-}
-#endif
-
-static void acpi_power_off_prepare(void)
-{
-	/* Prepare to power off the system */
-	acpi_sleep_prepare(ACPI_STATE_S5);
-	acpi_hw_disable_all_gpes();
-}
-
-static void acpi_power_off(void)
-{
-	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
-	printk("%s called\n", __func__);
-	local_irq_disable();
-	acpi_enable_wakeup_device(ACPI_STATE_S5);
-	acpi_enter_sleep_state(ACPI_STATE_S5);
-}
-
-int __init acpi_sleep_init(void)
-{
-	acpi_status status;
-	u8 type_a, type_b;
-#ifdef CONFIG_SUSPEND
-	int i = 0;
-
-	dmi_check_system(acpisleep_dmi_table);
-#endif
-
-	if (acpi_disabled)
-		return 0;
-
-	sleep_states[ACPI_STATE_S0] = 1;
-	printk(KERN_INFO PREFIX "(supports S0");
-
-#ifdef CONFIG_SUSPEND
-	for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
-		status = acpi_get_sleep_type_data(i, &type_a, &type_b);
-		if (ACPI_SUCCESS(status)) {
-			sleep_states[i] = 1;
-			printk(" S%d", i);
-		}
-	}
-
-	suspend_set_ops(old_suspend_ordering ?
-		&acpi_suspend_ops_old : &acpi_suspend_ops);
-#endif
-
-#ifdef CONFIG_HIBERNATION
-	status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
-	if (ACPI_SUCCESS(status)) {
-		hibernation_set_ops(old_suspend_ordering ?
-			&acpi_hibernation_ops_old : &acpi_hibernation_ops);
-		sleep_states[ACPI_STATE_S4] = 1;
-		printk(" S4");
-		if (!nosigcheck) {
-			acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
-				(struct acpi_table_header **)&facs);
-			if (facs)
-				s4_hardware_signature =
-					facs->hardware_signature;
-		}
-	}
-#endif
-	status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
-	if (ACPI_SUCCESS(status)) {
-		sleep_states[ACPI_STATE_S5] = 1;
-		printk(" S5");
-		pm_power_off_prepare = acpi_power_off_prepare;
-		pm_power_off = acpi_power_off;
-	}
-	printk(")\n");
-	/*
-	 * Register the tts_notifier to reboot notifier list so that the _TTS
-	 * object can also be evaluated when the system enters S5.
-	 */
-	register_reboot_notifier(&tts_notifier);
-	return 0;
-}
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
deleted file mode 100644
index 4dbc227..0000000
--- a/drivers/acpi/sleep/proc.c
+++ /dev/null
@@ -1,526 +0,0 @@
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/suspend.h>
-#include <linux/bcd.h>
-#include <asm/uaccess.h>
-
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
-#ifdef CONFIG_X86
-#include <linux/mc146818rtc.h>
-#endif
-
-#include "sleep.h"
-
-#define _COMPONENT		ACPI_SYSTEM_COMPONENT
-
-/*
- * this file provides support for:
- * /proc/acpi/sleep
- * /proc/acpi/alarm
- * /proc/acpi/wakeup
- */
-
-ACPI_MODULE_NAME("sleep")
-#ifdef	CONFIG_ACPI_PROCFS
-static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset)
-{
-	int i;
-
-	ACPI_FUNCTION_TRACE("acpi_system_sleep_seq_show");
-
-	for (i = 0; i <= ACPI_STATE_S5; i++) {
-		if (sleep_states[i]) {
-			seq_printf(seq, "S%d ", i);
-		}
-	}
-
-	seq_puts(seq, "\n");
-
-	return 0;
-}
-
-static int acpi_system_sleep_open_fs(struct inode *inode, struct file *file)
-{
-	return single_open(file, acpi_system_sleep_seq_show, PDE(inode)->data);
-}
-
-static ssize_t
-acpi_system_write_sleep(struct file *file,
-			const char __user * buffer, size_t count, loff_t * ppos)
-{
-	char str[12];
-	u32 state = 0;
-	int error = 0;
-
-	if (count > sizeof(str) - 1)
-		goto Done;
-	memset(str, 0, sizeof(str));
-	if (copy_from_user(str, buffer, count))
-		return -EFAULT;
-
-	/* Check for S4 bios request */
-	if (!strcmp(str, "4b")) {
-		error = acpi_suspend(4);
-		goto Done;
-	}
-	state = simple_strtoul(str, NULL, 0);
-#ifdef CONFIG_HIBERNATION
-	if (state == 4) {
-		error = hibernate();
-		goto Done;
-	}
-#endif
-	error = acpi_suspend(state);
-      Done:
-	return error ? error : count;
-}
-#endif				/* CONFIG_ACPI_PROCFS */
-
-#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
-/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
-#else
-#define	HAVE_ACPI_LEGACY_ALARM
-#endif
-
-#ifdef	HAVE_ACPI_LEGACY_ALARM
-
-static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
-{
-	u32 sec, min, hr;
-	u32 day, mo, yr, cent = 0;
-	unsigned char rtc_control = 0;
-	unsigned long flags;
-
-	ACPI_FUNCTION_TRACE("acpi_system_alarm_seq_show");
-
-	spin_lock_irqsave(&rtc_lock, flags);
-
-	sec = CMOS_READ(RTC_SECONDS_ALARM);
-	min = CMOS_READ(RTC_MINUTES_ALARM);
-	hr = CMOS_READ(RTC_HOURS_ALARM);
-	rtc_control = CMOS_READ(RTC_CONTROL);
-
-	/* If we ever get an FACP with proper values... */
-	if (acpi_gbl_FADT.day_alarm)
-		/* ACPI spec: only low 6 its should be cared */
-		day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
-	else
-		day = CMOS_READ(RTC_DAY_OF_MONTH);
-	if (acpi_gbl_FADT.month_alarm)
-		mo = CMOS_READ(acpi_gbl_FADT.month_alarm);
-	else
-		mo = CMOS_READ(RTC_MONTH);
-	if (acpi_gbl_FADT.century)
-		cent = CMOS_READ(acpi_gbl_FADT.century);
-
-	yr = CMOS_READ(RTC_YEAR);
-
-	spin_unlock_irqrestore(&rtc_lock, flags);
-
-	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-		sec = bcd2bin(sec);
-		min = bcd2bin(min);
-		hr = bcd2bin(hr);
-		day = bcd2bin(day);
-		mo = bcd2bin(mo);
-		yr = bcd2bin(yr);
-		cent = bcd2bin(cent);
-	}
-
-	/* we're trusting the FADT (see above) */
-	if (!acpi_gbl_FADT.century)
-		/* If we're not trusting the FADT, we should at least make it
-		 * right for _this_ century... ehm, what is _this_ century?
-		 *
-		 * TBD:
-		 *  ASAP: find piece of code in the kernel, e.g. star tracker driver,
-		 *        which we can trust to determine the century correctly. Atom
-		 *        watch driver would be nice, too...
-		 *
-		 *  if that has not happened, change for first release in 2050:
-		 *        if (yr<50)
-		 *                yr += 2100;
-		 *        else
-		 *                yr += 2000;   // current line of code
-		 *
-		 *  if that has not happened either, please do on 2099/12/31:23:59:59
-		 *        s/2000/2100
-		 *
-		 */
-		yr += 2000;
-	else
-		yr += cent * 100;
-
-	seq_printf(seq, "%4.4u-", yr);
-	(mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
-	(day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day);
-	(hr > 23) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", hr);
-	(min > 59) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", min);
-	(sec > 59) ? seq_puts(seq, "**\n") : seq_printf(seq, "%2.2u\n", sec);
-
-	return 0;
-}
-
-static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file)
-{
-	return single_open(file, acpi_system_alarm_seq_show, PDE(inode)->data);
-}
-
-static int get_date_field(char **p, u32 * value)
-{
-	char *next = NULL;
-	char *string_end = NULL;
-	int result = -EINVAL;
-
-	/*
-	 * Try to find delimeter, only to insert null.  The end of the
-	 * string won't have one, but is still valid.
-	 */
-	if (*p == NULL)
-		return result;
-
-	next = strpbrk(*p, "- :");
-	if (next)
-		*next++ = '\0';
-
-	*value = simple_strtoul(*p, &string_end, 10);
-
-	/* Signal success if we got a good digit */
-	if (string_end != *p)
-		result = 0;
-
-	if (next)
-		*p = next;
-	else
-		*p = NULL;
-
-	return result;
-}
-
-/* Read a possibly BCD register, always return binary */
-static u32 cmos_bcd_read(int offset, int rtc_control)
-{
-	u32 val = CMOS_READ(offset);
-	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		val = bcd2bin(val);
-	return val;
-}
-
-/* Write binary value into possibly BCD register */
-static void cmos_bcd_write(u32 val, int offset, int rtc_control)
-{
-	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-		val = bin2bcd(val);
-	CMOS_WRITE(val, offset);
-}
-
-static ssize_t
-acpi_system_write_alarm(struct file *file,
-			const char __user * buffer, size_t count, loff_t * ppos)
-{
-	int result = 0;
-	char alarm_string[30] = { '\0' };
-	char *p = alarm_string;
-	u32 sec, min, hr, day, mo, yr;
-	int adjust = 0;
-	unsigned char rtc_control = 0;
-
-	ACPI_FUNCTION_TRACE("acpi_system_write_alarm");
-
-	if (count > sizeof(alarm_string) - 1)
-		return_VALUE(-EINVAL);
-
-	if (copy_from_user(alarm_string, buffer, count))
-		return_VALUE(-EFAULT);
-
-	alarm_string[count] = '\0';
-
-	/* check for time adjustment */
-	if (alarm_string[0] == '+') {
-		p++;
-		adjust = 1;
-	}
-
-	if ((result = get_date_field(&p, &yr)))
-		goto end;
-	if ((result = get_date_field(&p, &mo)))
-		goto end;
-	if ((result = get_date_field(&p, &day)))
-		goto end;
-	if ((result = get_date_field(&p, &hr)))
-		goto end;
-	if ((result = get_date_field(&p, &min)))
-		goto end;
-	if ((result = get_date_field(&p, &sec)))
-		goto end;
-
-	spin_lock_irq(&rtc_lock);
-
-	rtc_control = CMOS_READ(RTC_CONTROL);
-
-	if (adjust) {
-		yr += cmos_bcd_read(RTC_YEAR, rtc_control);
-		mo += cmos_bcd_read(RTC_MONTH, rtc_control);
-		day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
-		hr += cmos_bcd_read(RTC_HOURS, rtc_control);
-		min += cmos_bcd_read(RTC_MINUTES, rtc_control);
-		sec += cmos_bcd_read(RTC_SECONDS, rtc_control);
-	}
-
-	spin_unlock_irq(&rtc_lock);
-
-	if (sec > 59) {
-		min += sec/60;
-		sec = sec%60;
-	}
-	if (min > 59) {
-		hr += min/60;
-		min = min%60;
-	}
-	if (hr > 23) {
-		day += hr/24;
-		hr = hr%24;
-	}
-	if (day > 31) {
-		mo += day/32;
-		day = day%32;
-	}
-	if (mo > 12) {
-		yr += mo/13;
-		mo = mo%13;
-	}
-
-	spin_lock_irq(&rtc_lock);
-	/*
-	 * Disable alarm interrupt before setting alarm timer or else
-	 * when ACPI_EVENT_RTC is enabled, a spurious ACPI interrupt occurs
-	 */
-	rtc_control &= ~RTC_AIE;
-	CMOS_WRITE(rtc_control, RTC_CONTROL);
-	CMOS_READ(RTC_INTR_FLAGS);
-
-	/* write the fields the rtc knows about */
-	cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control);
-	cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control);
-	cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control);
-
-	/*
-	 * If the system supports an enhanced alarm it will have non-zero
-	 * offsets into the CMOS RAM here -- which for some reason are pointing
-	 * to the RTC area of memory.
-	 */
-	if (acpi_gbl_FADT.day_alarm)
-		cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control);
-	if (acpi_gbl_FADT.month_alarm)
-		cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control);
-	if (acpi_gbl_FADT.century) {
-		if (adjust)
-			yr += cmos_bcd_read(acpi_gbl_FADT.century, rtc_control) * 100;
-		cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control);
-	}
-	/* enable the rtc alarm interrupt */
-	rtc_control |= RTC_AIE;
-	CMOS_WRITE(rtc_control, RTC_CONTROL);
-	CMOS_READ(RTC_INTR_FLAGS);
-
-	spin_unlock_irq(&rtc_lock);
-
-	acpi_clear_event(ACPI_EVENT_RTC);
-	acpi_enable_event(ACPI_EVENT_RTC, 0);
-
-	*ppos += count;
-
-	result = 0;
-      end:
-	return_VALUE(result ? result : count);
-}
-#endif				/* HAVE_ACPI_LEGACY_ALARM */
-
-extern struct list_head acpi_wakeup_device_list;
-extern spinlock_t acpi_device_lock;
-
-static int
-acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
-{
-	struct list_head *node, *next;
-
-	seq_printf(seq, "Device\tS-state\t  Status   Sysfs node\n");
-
-	spin_lock(&acpi_device_lock);
-	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
-		struct acpi_device *dev =
-		    container_of(node, struct acpi_device, wakeup_list);
-		struct device *ldev;
-
-		if (!dev->wakeup.flags.valid)
-			continue;
-		spin_unlock(&acpi_device_lock);
-
-		ldev = acpi_get_physical_device(dev->handle);
-		seq_printf(seq, "%s\t  S%d\t%c%-8s  ",
-			   dev->pnp.bus_id,
-			   (u32) dev->wakeup.sleep_state,
-			   dev->wakeup.flags.run_wake ? '*' : ' ',
-			   dev->wakeup.state.enabled ? "enabled" : "disabled");
-		if (ldev)
-			seq_printf(seq, "%s:%s",
-				   ldev->bus ? ldev->bus->name : "no-bus",
-				   dev_name(ldev));
-		seq_printf(seq, "\n");
-		put_device(ldev);
-
-		spin_lock(&acpi_device_lock);
-	}
-	spin_unlock(&acpi_device_lock);
-	return 0;
-}
-
-static void physical_device_enable_wakeup(struct acpi_device *adev)
-{
-	struct device *dev = acpi_get_physical_device(adev->handle);
-
-	if (dev && device_can_wakeup(dev))
-		device_set_wakeup_enable(dev, adev->wakeup.state.enabled);
-}
-
-static ssize_t
-acpi_system_write_wakeup_device(struct file *file,
-				const char __user * buffer,
-				size_t count, loff_t * ppos)
-{
-	struct list_head *node, *next;
-	char strbuf[5];
-	char str[5] = "";
-	int len = count;
-	struct acpi_device *found_dev = NULL;
-
-	if (len > 4)
-		len = 4;
-
-	if (copy_from_user(strbuf, buffer, len))
-		return -EFAULT;
-	strbuf[len] = '\0';
-	sscanf(strbuf, "%s", str);
-
-	spin_lock(&acpi_device_lock);
-	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
-		struct acpi_device *dev =
-		    container_of(node, struct acpi_device, wakeup_list);
-		if (!dev->wakeup.flags.valid)
-			continue;
-
-		if (!strncmp(dev->pnp.bus_id, str, 4)) {
-			dev->wakeup.state.enabled =
-			    dev->wakeup.state.enabled ? 0 : 1;
-			found_dev = dev;
-			break;
-		}
-	}
-	if (found_dev) {
-		physical_device_enable_wakeup(found_dev);
-		list_for_each_safe(node, next, &acpi_wakeup_device_list) {
-			struct acpi_device *dev = container_of(node,
-							       struct
-							       acpi_device,
-							       wakeup_list);
-
-			if ((dev != found_dev) &&
-			    (dev->wakeup.gpe_number ==
-			     found_dev->wakeup.gpe_number)
-			    && (dev->wakeup.gpe_device ==
-				found_dev->wakeup.gpe_device)) {
-				printk(KERN_WARNING
-				       "ACPI: '%s' and '%s' have the same GPE, "
-				       "can't disable/enable one seperately\n",
-				       dev->pnp.bus_id, found_dev->pnp.bus_id);
-				dev->wakeup.state.enabled =
-				    found_dev->wakeup.state.enabled;
-				physical_device_enable_wakeup(dev);
-			}
-		}
-	}
-	spin_unlock(&acpi_device_lock);
-	return count;
-}
-
-static int
-acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
-{
-	return single_open(file, acpi_system_wakeup_device_seq_show,
-			   PDE(inode)->data);
-}
-
-static const struct file_operations acpi_system_wakeup_device_fops = {
-	.owner = THIS_MODULE,
-	.open = acpi_system_wakeup_device_open_fs,
-	.read = seq_read,
-	.write = acpi_system_write_wakeup_device,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-#ifdef	CONFIG_ACPI_PROCFS
-static const struct file_operations acpi_system_sleep_fops = {
-	.owner = THIS_MODULE,
-	.open = acpi_system_sleep_open_fs,
-	.read = seq_read,
-	.write = acpi_system_write_sleep,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-#endif				/* CONFIG_ACPI_PROCFS */
-
-#ifdef	HAVE_ACPI_LEGACY_ALARM
-static const struct file_operations acpi_system_alarm_fops = {
-	.owner = THIS_MODULE,
-	.open = acpi_system_alarm_open_fs,
-	.read = seq_read,
-	.write = acpi_system_write_alarm,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-static u32 rtc_handler(void *context)
-{
-	acpi_clear_event(ACPI_EVENT_RTC);
-	acpi_disable_event(ACPI_EVENT_RTC, 0);
-
-	return ACPI_INTERRUPT_HANDLED;
-}
-#endif				/* HAVE_ACPI_LEGACY_ALARM */
-
-static int __init acpi_sleep_proc_init(void)
-{
-	if (acpi_disabled)
-		return 0;
-
-#ifdef	CONFIG_ACPI_PROCFS
-	/* 'sleep' [R/W] */
-	proc_create("sleep", S_IFREG | S_IRUGO | S_IWUSR,
-		    acpi_root_dir, &acpi_system_sleep_fops);
-#endif				/* CONFIG_ACPI_PROCFS */
-
-#ifdef	HAVE_ACPI_LEGACY_ALARM
-	/* 'alarm' [R/W] */
-	proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
-		    acpi_root_dir, &acpi_system_alarm_fops);
-
-	acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
-	/*
-	 * Disable the RTC event after installing RTC handler.
-	 * Only when RTC alarm is set will it be enabled.
-	 */
-	acpi_clear_event(ACPI_EVENT_RTC);
-	acpi_disable_event(ACPI_EVENT_RTC, 0);
-#endif				/* HAVE_ACPI_LEGACY_ALARM */
-
-	/* 'wakeup device' [R/W] */
-	proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
-		    acpi_root_dir, &acpi_system_wakeup_device_fops);
-
-	return 0;
-}
-
-late_initcall(acpi_sleep_proc_init);
diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/sleep/wakeup.c
deleted file mode 100644
index dea4c23..0000000
--- a/drivers/acpi/sleep/wakeup.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * wakeup.c - support wakeup devices
- * Copyright (C) 2004 Li Shaohua <shaohua.li@intel.com>
- */
-
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <acpi/acpi_drivers.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <acpi/acevents.h>
-#include "sleep.h"
-
-#define _COMPONENT		ACPI_SYSTEM_COMPONENT
-ACPI_MODULE_NAME("wakeup_devices")
-
-extern struct list_head acpi_wakeup_device_list;
-extern spinlock_t acpi_device_lock;
-
-/**
- * acpi_enable_wakeup_device_prep - prepare wakeup devices
- *	@sleep_state:	ACPI state
- * Enable all wakup devices power if the devices' wakeup level
- * is higher than requested sleep level
- */
-
-void acpi_enable_wakeup_device_prep(u8 sleep_state)
-{
-	struct list_head *node, *next;
-
-	ACPI_FUNCTION_TRACE("acpi_enable_wakeup_device_prep");
-
-	spin_lock(&acpi_device_lock);
-	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
-		struct acpi_device *dev = container_of(node,
-						       struct acpi_device,
-						       wakeup_list);
-
-		if (!dev->wakeup.flags.valid ||
-		    !dev->wakeup.state.enabled ||
-		    (sleep_state > (u32) dev->wakeup.sleep_state))
-			continue;
-
-		spin_unlock(&acpi_device_lock);
-		acpi_enable_wakeup_device_power(dev, sleep_state);
-		spin_lock(&acpi_device_lock);
-	}
-	spin_unlock(&acpi_device_lock);
-}
-
-/**
- * acpi_enable_wakeup_device - enable wakeup devices
- *	@sleep_state:	ACPI state
- * Enable all wakup devices's GPE
- */
-void acpi_enable_wakeup_device(u8 sleep_state)
-{
-	struct list_head *node, *next;
-
-	/* 
-	 * Caution: this routine must be invoked when interrupt is disabled 
-	 * Refer ACPI2.0: P212
-	 */
-	ACPI_FUNCTION_TRACE("acpi_enable_wakeup_device");
-	spin_lock(&acpi_device_lock);
-	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
-		struct acpi_device *dev =
-			container_of(node, struct acpi_device, wakeup_list);
-
-		if (!dev->wakeup.flags.valid)
-			continue;
-
-		/* If users want to disable run-wake GPE,
-		 * we only disable it for wake and leave it for runtime
-		 */
-		if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
-		    || sleep_state > (u32) dev->wakeup.sleep_state) {
-			if (dev->wakeup.flags.run_wake) {
-				spin_unlock(&acpi_device_lock);
-				/* set_gpe_type will disable GPE, leave it like that */
-				acpi_set_gpe_type(dev->wakeup.gpe_device,
-						  dev->wakeup.gpe_number,
-						  ACPI_GPE_TYPE_RUNTIME);
-				spin_lock(&acpi_device_lock);
-			}
-			continue;
-		}
-		spin_unlock(&acpi_device_lock);
-		if (!dev->wakeup.flags.run_wake)
-			acpi_enable_gpe(dev->wakeup.gpe_device,
-					dev->wakeup.gpe_number);
-		spin_lock(&acpi_device_lock);
-	}
-	spin_unlock(&acpi_device_lock);
-}
-
-/**
- * acpi_disable_wakeup_device - disable devices' wakeup capability
- *	@sleep_state:	ACPI state
- * Disable all wakup devices's GPE and wakeup capability
- */
-void acpi_disable_wakeup_device(u8 sleep_state)
-{
-	struct list_head *node, *next;
-
-	ACPI_FUNCTION_TRACE("acpi_disable_wakeup_device");
-
-	spin_lock(&acpi_device_lock);
-	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
-		struct acpi_device *dev =
-			container_of(node, struct acpi_device, wakeup_list);
-
-		if (!dev->wakeup.flags.valid)
-			continue;
-
-		if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
-		    || sleep_state > (u32) dev->wakeup.sleep_state) {
-			if (dev->wakeup.flags.run_wake) {
-				spin_unlock(&acpi_device_lock);
-				acpi_set_gpe_type(dev->wakeup.gpe_device,
-						  dev->wakeup.gpe_number,
-						  ACPI_GPE_TYPE_WAKE_RUN);
-				/* Re-enable it, since set_gpe_type will disable it */
-				acpi_enable_gpe(dev->wakeup.gpe_device,
-						dev->wakeup.gpe_number);
-				spin_lock(&acpi_device_lock);
-			}
-			continue;
-		}
-
-		spin_unlock(&acpi_device_lock);
-		acpi_disable_wakeup_device_power(dev);
-		/* Never disable run-wake GPE */
-		if (!dev->wakeup.flags.run_wake) {
-			acpi_disable_gpe(dev->wakeup.gpe_device,
-					 dev->wakeup.gpe_number);
-			acpi_clear_gpe(dev->wakeup.gpe_device,
-				       dev->wakeup.gpe_number, ACPI_NOT_ISR);
-		}
-		spin_lock(&acpi_device_lock);
-	}
-	spin_unlock(&acpi_device_lock);
-}
-
-static int __init acpi_wakeup_device_init(void)
-{
-	struct list_head *node, *next;
-
-	if (acpi_disabled)
-		return 0;
-
-	spin_lock(&acpi_device_lock);
-	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
-		struct acpi_device *dev = container_of(node,
-						       struct acpi_device,
-						       wakeup_list);
-		/* In case user doesn't load button driver */
-		if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled)
-			continue;
-		spin_unlock(&acpi_device_lock);
-		acpi_set_gpe_type(dev->wakeup.gpe_device,
-				  dev->wakeup.gpe_number,
-				  ACPI_GPE_TYPE_WAKE_RUN);
-		acpi_enable_gpe(dev->wakeup.gpe_device,
-				dev->wakeup.gpe_number);
-		dev->wakeup.state.enabled = 1;
-		spin_lock(&acpi_device_lock);
-	}
-	spin_unlock(&acpi_device_lock);
-	return 0;
-}
-
-late_initcall(acpi_wakeup_device_init);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 6e4107f..391d035 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -192,65 +192,6 @@
 };
 static struct kobj_attribute *counter_attrs;
 
-static int count_num_gpes(void)
-{
-	int count = 0;
-	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
-	struct acpi_gpe_block_info *gpe_block;
-	acpi_cpu_flags flags;
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
-	while (gpe_xrupt_info) {
-		gpe_block = gpe_xrupt_info->gpe_block_list_head;
-		while (gpe_block) {
-			count += gpe_block->register_count *
-			    ACPI_GPE_REGISTER_WIDTH;
-			gpe_block = gpe_block->next;
-		}
-		gpe_xrupt_info = gpe_xrupt_info->next;
-	}
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
-	return count;
-}
-
-static int get_gpe_device(int index, acpi_handle *handle)
-{
-	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
-	struct acpi_gpe_block_info *gpe_block;
-	acpi_cpu_flags flags;
-	struct acpi_namespace_node *node;
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
-	while (gpe_xrupt_info) {
-		gpe_block = gpe_xrupt_info->gpe_block_list_head;
-		node = gpe_block->node;
-		while (gpe_block) {
-			index -= gpe_block->register_count *
-			    ACPI_GPE_REGISTER_WIDTH;
-			if (index < 0) {
-				acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-				/* return NULL if it's FADT GPE */
-				if (node->type != ACPI_TYPE_DEVICE)
-					*handle = NULL;
-				else
-					*handle = node;
-				return 0;
-			}
-			node = gpe_block->node;
-			gpe_block = gpe_block->next;
-		}
-		gpe_xrupt_info = gpe_xrupt_info->next;
-	}
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
-	return -ENODEV;
-}
-
 static void delete_gpe_attr_array(void)
 {
 	struct event_counter *tmp = all_counters;
@@ -309,7 +250,7 @@
 		goto end;
 
 	if (index < num_gpes) {
-		result = get_gpe_device(index, handle);
+		result = acpi_get_gpe_device(index, handle);
 		if (result) {
 			ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
 				"Invalid GPE 0x%x\n", index));
@@ -436,7 +377,7 @@
 	if (all_counters)
 		return;
 
-	num_gpes = count_num_gpes();
+	num_gpes = acpi_current_gpe_count;
 	num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
 
 	all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
diff --git a/drivers/acpi/tables/Makefile b/drivers/acpi/tables/Makefile
deleted file mode 100644
index 7385efa..0000000
--- a/drivers/acpi/tables/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for all Linux ACPI interpreter subdirectories
-#
-
-obj-y := tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
-
-EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
deleted file mode 100644
index 2817158..0000000
--- a/drivers/acpi/tables/tbfadt.c
+++ /dev/null
@@ -1,474 +0,0 @@
-/******************************************************************************
- *
- * Module Name: tbfadt   - FADT table utilities
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_TABLES
-ACPI_MODULE_NAME("tbfadt")
-
-/* Local prototypes */
-static void inline
-acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
-			     u8 byte_width, u64 address);
-
-static void acpi_tb_convert_fadt(void);
-
-static void acpi_tb_validate_fadt(void);
-
-/* Table for conversion of FADT to common internal format and FADT validation */
-
-typedef struct acpi_fadt_info {
-	char *name;
-	u8 target;
-	u8 source;
-	u8 length;
-	u8 type;
-
-} acpi_fadt_info;
-
-#define ACPI_FADT_REQUIRED          1
-#define ACPI_FADT_SEPARATE_LENGTH   2
-
-static struct acpi_fadt_info fadt_info_table[] = {
-	{"Pm1aEventBlock", ACPI_FADT_OFFSET(xpm1a_event_block),
-	 ACPI_FADT_OFFSET(pm1a_event_block),
-	 ACPI_FADT_OFFSET(pm1_event_length), ACPI_FADT_REQUIRED},
-
-	{"Pm1bEventBlock", ACPI_FADT_OFFSET(xpm1b_event_block),
-	 ACPI_FADT_OFFSET(pm1b_event_block),
-	 ACPI_FADT_OFFSET(pm1_event_length), 0},
-
-	{"Pm1aControlBlock", ACPI_FADT_OFFSET(xpm1a_control_block),
-	 ACPI_FADT_OFFSET(pm1a_control_block),
-	 ACPI_FADT_OFFSET(pm1_control_length), ACPI_FADT_REQUIRED},
-
-	{"Pm1bControlBlock", ACPI_FADT_OFFSET(xpm1b_control_block),
-	 ACPI_FADT_OFFSET(pm1b_control_block),
-	 ACPI_FADT_OFFSET(pm1_control_length), 0},
-
-	{"Pm2ControlBlock", ACPI_FADT_OFFSET(xpm2_control_block),
-	 ACPI_FADT_OFFSET(pm2_control_block),
-	 ACPI_FADT_OFFSET(pm2_control_length), ACPI_FADT_SEPARATE_LENGTH},
-
-	{"PmTimerBlock", ACPI_FADT_OFFSET(xpm_timer_block),
-	 ACPI_FADT_OFFSET(pm_timer_block),
-	 ACPI_FADT_OFFSET(pm_timer_length), ACPI_FADT_REQUIRED},
-
-	{"Gpe0Block", ACPI_FADT_OFFSET(xgpe0_block),
-	 ACPI_FADT_OFFSET(gpe0_block),
-	 ACPI_FADT_OFFSET(gpe0_block_length), ACPI_FADT_SEPARATE_LENGTH},
-
-	{"Gpe1Block", ACPI_FADT_OFFSET(xgpe1_block),
-	 ACPI_FADT_OFFSET(gpe1_block),
-	 ACPI_FADT_OFFSET(gpe1_block_length), ACPI_FADT_SEPARATE_LENGTH}
-};
-
-#define ACPI_FADT_INFO_ENTRIES        (sizeof (fadt_info_table) / sizeof (struct acpi_fadt_info))
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_init_generic_address
- *
- * PARAMETERS:  generic_address     - GAS struct to be initialized
- *              byte_width          - Width of this register
- *              Address             - Address of the register
- *
- * RETURN:      None
- *
- * DESCRIPTION: Initialize a Generic Address Structure (GAS)
- *              See the ACPI specification for a full description and
- *              definition of this structure.
- *
- ******************************************************************************/
-
-static void inline
-acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
-			     u8 byte_width, u64 address)
-{
-
-	/*
-	 * The 64-bit Address field is non-aligned in the byte packed
-	 * GAS struct.
-	 */
-	ACPI_MOVE_64_TO_64(&generic_address->address, &address);
-
-	/* All other fields are byte-wide */
-
-	generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO;
-	generic_address->bit_width = byte_width << 3;
-	generic_address->bit_offset = 0;
-	generic_address->access_width = 0;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_parse_fadt
- *
- * PARAMETERS:  table_index         - Index for the FADT
- *              Flags               - Flags
- *
- * RETURN:      None
- *
- * DESCRIPTION: Initialize the FADT, DSDT and FACS tables
- *              (FADT contains the addresses of the DSDT and FACS)
- *
- ******************************************************************************/
-
-void acpi_tb_parse_fadt(u32 table_index, u8 flags)
-{
-	u32 length;
-	struct acpi_table_header *table;
-
-	/*
-	 * The FADT has multiple versions with different lengths,
-	 * and it contains pointers to both the DSDT and FACS tables.
-	 *
-	 * Get a local copy of the FADT and convert it to a common format
-	 * Map entire FADT, assumed to be smaller than one page.
-	 */
-	length = acpi_gbl_root_table_list.tables[table_index].length;
-
-	table =
-	    acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
-			       address, length);
-	if (!table) {
-		return;
-	}
-
-	/*
-	 * Validate the FADT checksum before we copy the table. Ignore
-	 * checksum error as we want to try to get the DSDT and FACS.
-	 */
-	(void)acpi_tb_verify_checksum(table, length);
-
-	/* Obtain a local copy of the FADT in common ACPI 2.0+ format */
-
-	acpi_tb_create_local_fadt(table, length);
-
-	/* All done with the real FADT, unmap it */
-
-	acpi_os_unmap_memory(table, length);
-
-	/* Obtain the DSDT and FACS tables via their addresses within the FADT */
-
-	acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
-			      flags, ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
-
-	acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
-			      flags, ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_create_local_fadt
- *
- * PARAMETERS:  Table               - Pointer to BIOS FADT
- *              Length              - Length of the table
- *
- * RETURN:      None
- *
- * DESCRIPTION: Get a local copy of the FADT and convert it to a common format.
- *              Performs validation on some important FADT fields.
- *
- * NOTE:        We create a local copy of the FADT regardless of the version.
- *
- ******************************************************************************/
-
-void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
-{
-
-	/*
-	 * Check if the FADT is larger than the largest table that we expect
-	 * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
-	 * a warning.
-	 */
-	if (length > sizeof(struct acpi_table_fadt)) {
-		ACPI_WARNING((AE_INFO,
-			      "FADT (revision %u) is longer than ACPI 2.0 version, truncating length 0x%X to 0x%zX",
-			      table->revision, (unsigned)length,
-			      sizeof(struct acpi_table_fadt)));
-	}
-
-	/* Clear the entire local FADT */
-
-	ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
-
-	/* Copy the original FADT, up to sizeof (struct acpi_table_fadt) */
-
-	ACPI_MEMCPY(&acpi_gbl_FADT, table,
-		    ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
-
-	/*
-	 * 1) Convert the local copy of the FADT to the common internal format
-	 * 2) Validate some of the important values within the FADT
-	 */
-	acpi_tb_convert_fadt();
-	acpi_tb_validate_fadt();
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_convert_fadt
- *
- * PARAMETERS:  None, uses acpi_gbl_FADT
- *
- * RETURN:      None
- *
- * DESCRIPTION: Converts all versions of the FADT to a common internal format.
- *              Expand all 32-bit addresses to 64-bit.
- *
- * NOTE:        acpi_gbl_FADT must be of size (struct acpi_table_fadt),
- *              and must contain a copy of the actual FADT.
- *
- * ACPICA will use the "X" fields of the FADT for all addresses.
- *
- * "X" fields are optional extensions to the original V1.0 fields. Even if
- * they are present in the structure, they can be optionally not used by
- * setting them to zero. Therefore, we must selectively expand V1.0 fields
- * if the corresponding X field is zero.
- *
- * For ACPI 1.0 FADTs, all address fields are expanded to the corresponding
- * "X" fields.
- *
- * For ACPI 2.0 FADTs, any "X" fields that are NULL are filled in by
- * expanding the corresponding ACPI 1.0 field.
- *
- ******************************************************************************/
-
-static void acpi_tb_convert_fadt(void)
-{
-	u8 pm1_register_length;
-	struct acpi_generic_address *target;
-	u32 i;
-
-	/* Update the local FADT table header length */
-
-	acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
-
-	/* Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary */
-
-	if (!acpi_gbl_FADT.Xfacs) {
-		acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs;
-	}
-
-	if (!acpi_gbl_FADT.Xdsdt) {
-		acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
-	}
-
-	/*
-	 * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
-	 * should be zero are indeed zero. This will workaround BIOSs that
-	 * inadvertently place values in these fields.
-	 *
-	 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
-	 * offset 45, 55, 95, and the word located at offset 109, 110.
-	 */
-	if (acpi_gbl_FADT.header.revision < FADT2_REVISION_ID) {
-		acpi_gbl_FADT.preferred_profile = 0;
-		acpi_gbl_FADT.pstate_control = 0;
-		acpi_gbl_FADT.cst_control = 0;
-		acpi_gbl_FADT.boot_flags = 0;
-	}
-
-	/*
-	 * Expand the ACPI 1.0 32-bit V1.0 addresses to the ACPI 2.0 64-bit "X"
-	 * generic address structures as necessary.
-	 */
-	for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
-		target =
-		    ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
-				 fadt_info_table[i].target);
-
-		/* Expand only if the X target is null */
-
-		if (!target->address) {
-			acpi_tb_init_generic_address(target,
-						     *ACPI_ADD_PTR(u8,
-								   &acpi_gbl_FADT,
-								   fadt_info_table
-								   [i].length),
-						     (u64) * ACPI_ADD_PTR(u32,
-									  &acpi_gbl_FADT,
-									  fadt_info_table
-									  [i].
-									  source));
-		}
-	}
-
-	/*
-	 * Calculate separate GAS structs for the PM1 Enable registers.
-	 * These addresses do not appear (directly) in the FADT, so it is
-	 * useful to calculate them once, here.
-	 *
-	 * The PM event blocks are split into two register blocks, first is the
-	 * PM Status Register block, followed immediately by the PM Enable
-	 * Register block. Each is of length (xpm1x_event_block.bit_width/2).
-	 *
-	 * On various systems the v2 fields (and particularly the bit widths)
-	 * cannot be relied upon, though. Hence resort to using the v1 length
-	 * here (and warn about the inconsistency).
-	 */
-	if (acpi_gbl_FADT.xpm1a_event_block.bit_width
-	    != acpi_gbl_FADT.pm1_event_length * 8)
-		printk(KERN_WARNING "FADT: "
-		       "X_PM1a_EVT_BLK.bit_width (%u) does not match"
-		       " PM1_EVT_LEN (%u)\n",
-		       acpi_gbl_FADT.xpm1a_event_block.bit_width,
-		       acpi_gbl_FADT.pm1_event_length);
-	pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length);
-
-	/* The PM1A register block is required */
-
-	acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
-				     pm1_register_length,
-				     (acpi_gbl_FADT.xpm1a_event_block.address +
-				      pm1_register_length));
-	/* Don't forget to copy space_id of the GAS */
-	acpi_gbl_xpm1a_enable.space_id =
-	    acpi_gbl_FADT.xpm1a_event_block.space_id;
-
-	/* The PM1B register block is optional, ignore if not present */
-
-	if (acpi_gbl_FADT.xpm1b_event_block.address) {
-		if (acpi_gbl_FADT.xpm1b_event_block.bit_width
-		    != acpi_gbl_FADT.pm1_event_length * 8)
-			printk(KERN_WARNING "FADT: "
-			       "X_PM1b_EVT_BLK.bit_width (%u) does not match"
-			       " PM1_EVT_LEN (%u)\n",
-			       acpi_gbl_FADT.xpm1b_event_block.bit_width,
-			       acpi_gbl_FADT.pm1_event_length);
-		acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
-					     pm1_register_length,
-					     (acpi_gbl_FADT.xpm1b_event_block.
-					      address + pm1_register_length));
-		/* Don't forget to copy space_id of the GAS */
-		acpi_gbl_xpm1b_enable.space_id =
-		    acpi_gbl_FADT.xpm1b_event_block.space_id;
-
-	}
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_tb_validate_fadt
- *
- * PARAMETERS:  Table           - Pointer to the FADT to be validated
- *
- * RETURN:      None
- *
- * DESCRIPTION: Validate various important fields within the FADT. If a problem
- *              is found, issue a message, but no status is returned.
- *              Used by both the table manager and the disassembler.
- *
- * Possible additional checks:
- * (acpi_gbl_FADT.pm1_event_length >= 4)
- * (acpi_gbl_FADT.pm1_control_length >= 2)
- * (acpi_gbl_FADT.pm_timer_length >= 4)
- * Gpe block lengths must be multiple of 2
- *
- ******************************************************************************/
-
-static void acpi_tb_validate_fadt(void)
-{
-	u32 *address32;
-	struct acpi_generic_address *address64;
-	u8 length;
-	u32 i;
-
-	/* Examine all of the 64-bit extended address fields (X fields) */
-
-	for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
-
-		/* Generate pointers to the 32-bit and 64-bit addresses and get the length */
-
-		address64 =
-		    ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
-				 fadt_info_table[i].target);
-		address32 =
-		    ACPI_ADD_PTR(u32, &acpi_gbl_FADT,
-				 fadt_info_table[i].source);
-		length =
-		    *ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
-				  fadt_info_table[i].length);
-
-		if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
-			/*
-			 * Field is required (Pm1a_event, Pm1a_control, pm_timer).
-			 * Both the address and length must be non-zero.
-			 */
-			if (!address64->address || !length) {
-				ACPI_ERROR((AE_INFO,
-					    "Required field \"%s\" has zero address and/or length: %8.8X%8.8X/%X",
-					    fadt_info_table[i].name,
-					    ACPI_FORMAT_UINT64(address64->
-							       address),
-					    length));
-			}
-		} else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) {
-			/*
-			 * Field is optional (PM2Control, GPE0, GPE1) AND has its own
-			 * length field. If present, both the address and length must be valid.
-			 */
-			if ((address64->address && !length)
-			    || (!address64->address && length)) {
-				ACPI_WARNING((AE_INFO,
-					      "Optional field \"%s\" has zero address or length: %8.8X%8.8X/%X",
-					      fadt_info_table[i].name,
-					      ACPI_FORMAT_UINT64(address64->
-								 address),
-					      length));
-			}
-		}
-
-		/* If both 32- and 64-bit addresses are valid (non-zero), they must match */
-
-		if (address64->address && *address32 &&
-		    (address64->address != (u64) * address32)) {
-			ACPI_ERROR((AE_INFO,
-				    "32/64X address mismatch in \"%s\": [%8.8X] [%8.8X%8.8X], using 64X",
-				    fadt_info_table[i].name, *address32,
-				    ACPI_FORMAT_UINT64(address64->address)));
-		}
-	}
-}
diff --git a/drivers/acpi/tables/tbfind.c b/drivers/acpi/tables/tbfind.c
deleted file mode 100644
index 531584d..0000000
--- a/drivers/acpi/tables/tbfind.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/******************************************************************************
- *
- * Module Name: tbfind   - find table
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_TABLES
-ACPI_MODULE_NAME("tbfind")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_find_table
- *
- * PARAMETERS:  Signature           - String with ACPI table signature
- *              oem_id              - String with the table OEM ID
- *              oem_table_id        - String with the OEM Table ID
- *              table_index         - Where the table index is returned
- *
- * RETURN:      Status and table index
- *
- * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the
- *              Signature, OEM ID and OEM Table ID. Returns an index that can
- *              be used to get the table header or entire table.
- *
- ******************************************************************************/
-acpi_status
-acpi_tb_find_table(char *signature,
-		   char *oem_id, char *oem_table_id, u32 *table_index)
-{
-	u32 i;
-	acpi_status status;
-	struct acpi_table_header header;
-
-	ACPI_FUNCTION_TRACE(tb_find_table);
-
-	/* Normalize the input strings */
-
-	ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header));
-	ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE);
-	ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
-	ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
-
-	/* Search for the table */
-
-	for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
-		if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
-				header.signature, ACPI_NAME_SIZE)) {
-
-			/* Not the requested table */
-
-			continue;
-		}
-
-		/* Table with matching signature has been found */
-
-		if (!acpi_gbl_root_table_list.tables[i].pointer) {
-
-			/* Table is not currently mapped, map it */
-
-			status =
-			    acpi_tb_verify_table(&acpi_gbl_root_table_list.
-						 tables[i]);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-
-			if (!acpi_gbl_root_table_list.tables[i].pointer) {
-				continue;
-			}
-		}
-
-		/* Check for table match on all IDs */
-
-		if (!ACPI_MEMCMP
-		    (acpi_gbl_root_table_list.tables[i].pointer->signature,
-		     header.signature, ACPI_NAME_SIZE) && (!oem_id[0]
-							   ||
-							   !ACPI_MEMCMP
-							   (acpi_gbl_root_table_list.
-							    tables[i].pointer->
-							    oem_id,
-							    header.oem_id,
-							    ACPI_OEM_ID_SIZE))
-		    && (!oem_table_id[0]
-			|| !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i].
-					pointer->oem_table_id,
-					header.oem_table_id,
-					ACPI_OEM_TABLE_ID_SIZE))) {
-			*table_index = i;
-
-			ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
-					  "Found table [%4.4s]\n",
-					  header.signature));
-			return_ACPI_STATUS(AE_OK);
-		}
-	}
-
-	return_ACPI_STATUS(AE_NOT_FOUND);
-}
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
deleted file mode 100644
index 18747ce..0000000
--- a/drivers/acpi/tables/tbinstal.c
+++ /dev/null
@@ -1,573 +0,0 @@
-/******************************************************************************
- *
- * Module Name: tbinstal - ACPI table installation and removal
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_TABLES
-ACPI_MODULE_NAME("tbinstal")
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_tb_verify_table
- *
- * PARAMETERS:  table_desc          - table
- *
- * RETURN:      Status
- *
- * DESCRIPTION: this function is called to verify and map table
- *
- *****************************************************************************/
-acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(tb_verify_table);
-
-	/* Map the table if necessary */
-
-	if (!table_desc->pointer) {
-		if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
-		    ACPI_TABLE_ORIGIN_MAPPED) {
-			table_desc->pointer =
-			    acpi_os_map_memory(table_desc->address,
-					       table_desc->length);
-		}
-		if (!table_desc->pointer) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-	}
-
-	/* FACS is the odd table, has no standard ACPI header and no checksum */
-
-	if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
-
-		/* Always calculate checksum, ignore bad checksum if requested */
-
-		status =
-		    acpi_tb_verify_checksum(table_desc->pointer,
-					    table_desc->length);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_add_table
- *
- * PARAMETERS:  table_desc          - Table descriptor
- *              table_index         - Where the table index is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to add the ACPI table
- *
- ******************************************************************************/
-
-acpi_status
-acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
-{
-	u32 i;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(tb_add_table);
-
-	if (!table_desc->pointer) {
-		status = acpi_tb_verify_table(table_desc);
-		if (ACPI_FAILURE(status) || !table_desc->pointer) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/*
-	 * Originally, we checked the table signature for "SSDT" or "PSDT" here.
-	 * Next, we added support for OEMx tables, signature "OEM".
-	 * Valid tables were encountered with a null signature, so we've just
-	 * given up on validating the signature, since it seems to be a waste
-	 * of code. The original code was removed (05/2008).
-	 */
-
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
-	/* Check if table is already registered */
-
-	for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
-		if (!acpi_gbl_root_table_list.tables[i].pointer) {
-			status =
-			    acpi_tb_verify_table(&acpi_gbl_root_table_list.
-						 tables[i]);
-			if (ACPI_FAILURE(status)
-			    || !acpi_gbl_root_table_list.tables[i].pointer) {
-				continue;
-			}
-		}
-
-		/*
-		 * Check for a table match on the entire table length,
-		 * not just the header.
-		 */
-		if (table_desc->length !=
-		    acpi_gbl_root_table_list.tables[i].length) {
-			continue;
-		}
-
-		if (ACPI_MEMCMP(table_desc->pointer,
-				acpi_gbl_root_table_list.tables[i].pointer,
-				acpi_gbl_root_table_list.tables[i].length)) {
-			continue;
-		}
-
-		/*
-		 * Note: the current mechanism does not unregister a table if it is
-		 * dynamically unloaded. The related namespace entries are deleted,
-		 * but the table remains in the root table list.
-		 *
-		 * The assumption here is that the number of different tables that
-		 * will be loaded is actually small, and there is minimal overhead
-		 * in just keeping the table in case it is needed again.
-		 *
-		 * If this assumption changes in the future (perhaps on large
-		 * machines with many table load/unload operations), tables will
-		 * need to be unregistered when they are unloaded, and slots in the
-		 * root table list should be reused when empty.
-		 */
-
-		/*
-		 * Table is already registered.
-		 * We can delete the table that was passed as a parameter.
-		 */
-		acpi_tb_delete_table(table_desc);
-		*table_index = i;
-
-		if (acpi_gbl_root_table_list.tables[i].
-		    flags & ACPI_TABLE_IS_LOADED) {
-
-			/* Table is still loaded, this is an error */
-
-			status = AE_ALREADY_EXISTS;
-			goto release;
-		} else {
-			/* Table was unloaded, allow it to be reloaded */
-
-			table_desc->pointer =
-			    acpi_gbl_root_table_list.tables[i].pointer;
-			table_desc->address =
-			    acpi_gbl_root_table_list.tables[i].address;
-			status = AE_OK;
-			goto print_header;
-		}
-	}
-
-	/* Add the table to the global root table list */
-
-	status = acpi_tb_store_table(table_desc->address, table_desc->pointer,
-				     table_desc->length, table_desc->flags,
-				     table_index);
-	if (ACPI_FAILURE(status)) {
-		goto release;
-	}
-
-      print_header:
-	acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
-
-      release:
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_resize_root_table_list
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Expand the size of global table array
- *
- ******************************************************************************/
-
-acpi_status acpi_tb_resize_root_table_list(void)
-{
-	struct acpi_table_desc *tables;
-
-	ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
-
-	/* allow_resize flag is a parameter to acpi_initialize_tables */
-
-	if (!(acpi_gbl_root_table_list.flags & ACPI_ROOT_ALLOW_RESIZE)) {
-		ACPI_ERROR((AE_INFO,
-			    "Resize of Root Table Array is not allowed"));
-		return_ACPI_STATUS(AE_SUPPORT);
-	}
-
-	/* Increase the Table Array size */
-
-	tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
-				       size + ACPI_ROOT_TABLE_SIZE_INCREMENT)
-				      * sizeof(struct acpi_table_desc));
-	if (!tables) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not allocate new root table array"));
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Copy and free the previous table array */
-
-	if (acpi_gbl_root_table_list.tables) {
-		ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
-			    (acpi_size) acpi_gbl_root_table_list.size *
-			    sizeof(struct acpi_table_desc));
-
-		if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
-			ACPI_FREE(acpi_gbl_root_table_list.tables);
-		}
-	}
-
-	acpi_gbl_root_table_list.tables = tables;
-	acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT;
-	acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED;
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_store_table
- *
- * PARAMETERS:  Address             - Table address
- *              Table               - Table header
- *              Length              - Table length
- *              Flags               - flags
- *
- * RETURN:      Status and table index.
- *
- * DESCRIPTION: Add an ACPI table to the global table list
- *
- ******************************************************************************/
-
-acpi_status
-acpi_tb_store_table(acpi_physical_address address,
-		    struct acpi_table_header *table,
-		    u32 length, u8 flags, u32 *table_index)
-{
-	acpi_status status = AE_OK;
-
-	/* Ensure that there is room for the table in the Root Table List */
-
-	if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) {
-		status = acpi_tb_resize_root_table_list();
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-	}
-
-	/* Initialize added table */
-
-	acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
-	    address = address;
-	acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
-	    pointer = table;
-	acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length =
-	    length;
-	acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
-	    owner_id = 0;
-	acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags =
-	    flags;
-
-	ACPI_MOVE_32_TO_32(&
-			   (acpi_gbl_root_table_list.
-			    tables[acpi_gbl_root_table_list.count].signature),
-			   table->signature);
-
-	*table_index = acpi_gbl_root_table_list.count;
-	acpi_gbl_root_table_list.count++;
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_delete_table
- *
- * PARAMETERS:  table_index         - Table index
- *
- * RETURN:      None
- *
- * DESCRIPTION: Delete one internal ACPI table
- *
- ******************************************************************************/
-
-void acpi_tb_delete_table(struct acpi_table_desc *table_desc)
-{
-	/* Table must be mapped or allocated */
-	if (!table_desc->pointer) {
-		return;
-	}
-	switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
-	case ACPI_TABLE_ORIGIN_MAPPED:
-		acpi_os_unmap_memory(table_desc->pointer, table_desc->length);
-		break;
-	case ACPI_TABLE_ORIGIN_ALLOCATED:
-		ACPI_FREE(table_desc->pointer);
-		break;
-	default:;
-	}
-
-	table_desc->pointer = NULL;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_terminate
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Delete all internal ACPI tables
- *
- ******************************************************************************/
-
-void acpi_tb_terminate(void)
-{
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(tb_terminate);
-
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
-	/* Delete the individual tables */
-
-	for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
-		acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
-	}
-
-	/*
-	 * Delete the root table array if allocated locally. Array cannot be
-	 * mapped, so we don't need to check for that flag.
-	 */
-	if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
-		ACPI_FREE(acpi_gbl_root_table_list.tables);
-	}
-
-	acpi_gbl_root_table_list.tables = NULL;
-	acpi_gbl_root_table_list.flags = 0;
-	acpi_gbl_root_table_list.count = 0;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_delete_namespace_by_owner
- *
- * PARAMETERS:  table_index         - Table index
- *
- * RETURN:      None
- *
- * DESCRIPTION: Delete all namespace objects created when this table was loaded.
- *
- ******************************************************************************/
-
-void acpi_tb_delete_namespace_by_owner(u32 table_index)
-{
-	acpi_owner_id owner_id;
-
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-	if (table_index < acpi_gbl_root_table_list.count) {
-		owner_id =
-		    acpi_gbl_root_table_list.tables[table_index].owner_id;
-	} else {
-		(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-		return;
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-	acpi_ns_delete_namespace_by_owner(owner_id);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_allocate_owner_id
- *
- * PARAMETERS:  table_index         - Table index
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Allocates owner_id in table_desc
- *
- ******************************************************************************/
-
-acpi_status acpi_tb_allocate_owner_id(u32 table_index)
-{
-	acpi_status status = AE_BAD_PARAMETER;
-
-	ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
-
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-	if (table_index < acpi_gbl_root_table_list.count) {
-		status = acpi_ut_allocate_owner_id
-		    (&(acpi_gbl_root_table_list.tables[table_index].owner_id));
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_release_owner_id
- *
- * PARAMETERS:  table_index         - Table index
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Releases owner_id in table_desc
- *
- ******************************************************************************/
-
-acpi_status acpi_tb_release_owner_id(u32 table_index)
-{
-	acpi_status status = AE_BAD_PARAMETER;
-
-	ACPI_FUNCTION_TRACE(tb_release_owner_id);
-
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-	if (table_index < acpi_gbl_root_table_list.count) {
-		acpi_ut_release_owner_id(&
-					 (acpi_gbl_root_table_list.
-					  tables[table_index].owner_id));
-		status = AE_OK;
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_get_owner_id
- *
- * PARAMETERS:  table_index         - Table index
- *              owner_id            - Where the table owner_id is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: returns owner_id for the ACPI table
- *
- ******************************************************************************/
-
-acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
-{
-	acpi_status status = AE_BAD_PARAMETER;
-
-	ACPI_FUNCTION_TRACE(tb_get_owner_id);
-
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-	if (table_index < acpi_gbl_root_table_list.count) {
-		*owner_id =
-		    acpi_gbl_root_table_list.tables[table_index].owner_id;
-		status = AE_OK;
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_is_table_loaded
- *
- * PARAMETERS:  table_index         - Table index
- *
- * RETURN:      Table Loaded Flag
- *
- ******************************************************************************/
-
-u8 acpi_tb_is_table_loaded(u32 table_index)
-{
-	u8 is_loaded = FALSE;
-
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-	if (table_index < acpi_gbl_root_table_list.count) {
-		is_loaded = (u8)
-		    (acpi_gbl_root_table_list.tables[table_index].
-		     flags & ACPI_TABLE_IS_LOADED);
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-	return (is_loaded);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_set_table_loaded_flag
- *
- * PARAMETERS:  table_index         - Table index
- *              is_loaded           - TRUE if table is loaded, FALSE otherwise
- *
- * RETURN:      None
- *
- * DESCRIPTION: Sets the table loaded flag to either TRUE or FALSE.
- *
- ******************************************************************************/
-
-void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded)
-{
-
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-	if (table_index < acpi_gbl_root_table_list.count) {
-		if (is_loaded) {
-			acpi_gbl_root_table_list.tables[table_index].flags |=
-			    ACPI_TABLE_IS_LOADED;
-		} else {
-			acpi_gbl_root_table_list.tables[table_index].flags &=
-			    ~ACPI_TABLE_IS_LOADED;
-		}
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-}
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
deleted file mode 100644
index 0cc92ef..0000000
--- a/drivers/acpi/tables/tbutils.c
+++ /dev/null
@@ -1,557 +0,0 @@
-/******************************************************************************
- *
- * Module Name: tbutils   - table utilities
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_TABLES
-ACPI_MODULE_NAME("tbutils")
-
-/* Local prototypes */
-static acpi_physical_address
-acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_check_xsdt
- *
- * PARAMETERS:  address                    - Pointer to the XSDT
- *
- * RETURN:      status
- *		AE_OK - XSDT is okay
- *		AE_NO_MEMORY - can't map XSDT
- *		AE_INVALID_TABLE_LENGTH - invalid table length
- *		AE_NULL_ENTRY - XSDT has NULL entry
- *
- * DESCRIPTION: validate XSDT
-******************************************************************************/
-
-static acpi_status
-acpi_tb_check_xsdt(acpi_physical_address address)
-{
-	struct acpi_table_header *table;
-	u32 length;
-	u64 xsdt_entry_address;
-	u8 *table_entry;
-	u32 table_count;
-	int i;
-
-	table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
-	if (!table)
-		return AE_NO_MEMORY;
-
-	length = table->length;
-	acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
-	if (length < sizeof(struct acpi_table_header))
-		return AE_INVALID_TABLE_LENGTH;
-
-	table = acpi_os_map_memory(address, length);
-	if (!table)
-		return AE_NO_MEMORY;
-
-	/* Calculate the number of tables described in XSDT */
-	table_count =
-		(u32) ((table->length -
-		sizeof(struct acpi_table_header)) / sizeof(u64));
-	table_entry =
-		ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
-	for (i = 0; i < table_count; i++) {
-		ACPI_MOVE_64_TO_64(&xsdt_entry_address, table_entry);
-		if (!xsdt_entry_address) {
-			/* XSDT has NULL entry */
-			break;
-		}
-		table_entry += sizeof(u64);
-	}
-	acpi_os_unmap_memory(table, length);
-
-	if (i < table_count)
-		return AE_NULL_ENTRY;
-	else
-		return AE_OK;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_tables_loaded
- *
- * PARAMETERS:  None
- *
- * RETURN:      TRUE if required ACPI tables are loaded
- *
- * DESCRIPTION: Determine if the minimum required ACPI tables are present
- *              (FADT, FACS, DSDT)
- *
- ******************************************************************************/
-
-u8 acpi_tb_tables_loaded(void)
-{
-
-	if (acpi_gbl_root_table_list.count >= 3) {
-		return (TRUE);
-	}
-
-	return (FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_print_table_header
- *
- * PARAMETERS:  Address             - Table physical address
- *              Header              - Table header
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print an ACPI table header. Special cases for FACS and RSDP.
- *
- ******************************************************************************/
-
-void
-acpi_tb_print_table_header(acpi_physical_address address,
-			   struct acpi_table_header *header)
-{
-
-	if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
-
-		/* FACS only has signature and length fields of common table header */
-
-		ACPI_INFO((AE_INFO, "%4.4s %08lX, %04X",
-			   header->signature, (unsigned long)address,
-			   header->length));
-	} else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
-
-		/* RSDP has no common fields */
-
-		ACPI_INFO((AE_INFO, "RSDP %08lX, %04X (r%d %6.6s)",
-			   (unsigned long)address,
-			   (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
-			    revision >
-			    0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
-					       header)->length : 20,
-			   ACPI_CAST_PTR(struct acpi_table_rsdp,
-					 header)->revision,
-			   ACPI_CAST_PTR(struct acpi_table_rsdp,
-					 header)->oem_id));
-	} else {
-		/* Standard ACPI table with full common header */
-
-		ACPI_INFO((AE_INFO,
-			   "%4.4s %08lX, %04X (r%d %6.6s %8.8s %8X %4.4s %8X)",
-			   header->signature, (unsigned long)address,
-			   header->length, header->revision, header->oem_id,
-			   header->oem_table_id, header->oem_revision,
-			   header->asl_compiler_id,
-			   header->asl_compiler_revision));
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_validate_checksum
- *
- * PARAMETERS:  Table               - ACPI table to verify
- *              Length              - Length of entire table
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Verifies that the table checksums to zero. Optionally returns
- *              exception on bad checksum.
- *
- ******************************************************************************/
-
-acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
-{
-	u8 checksum;
-
-	/* Compute the checksum on the table */
-
-	checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
-
-	/* Checksum ok? (should be zero) */
-
-	if (checksum) {
-		ACPI_WARNING((AE_INFO,
-			      "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
-			      table->signature, table->checksum,
-			      (u8) (table->checksum - checksum)));
-
-#if (ACPI_CHECKSUM_ABORT)
-
-		return (AE_BAD_CHECKSUM);
-#endif
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_checksum
- *
- * PARAMETERS:  Buffer          - Pointer to memory region to be checked
- *              Length          - Length of this memory region
- *
- * RETURN:      Checksum (u8)
- *
- * DESCRIPTION: Calculates circular checksum of memory region.
- *
- ******************************************************************************/
-
-u8 acpi_tb_checksum(u8 *buffer, u32 length)
-{
-	u8 sum = 0;
-	u8 *end = buffer + length;
-
-	while (buffer < end) {
-		sum = (u8) (sum + *(buffer++));
-	}
-
-	return sum;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_install_table
- *
- * PARAMETERS:  Address                 - Physical address of DSDT or FACS
- *              Flags                   - Flags
- *              Signature               - Table signature, NULL if no need to
- *                                        match
- *              table_index             - Index into root table array
- *
- * RETURN:      None
- *
- * DESCRIPTION: Install an ACPI table into the global data structure.
- *
- ******************************************************************************/
-
-void
-acpi_tb_install_table(acpi_physical_address address,
-		      u8 flags, char *signature, u32 table_index)
-{
-	struct acpi_table_header *table;
-
-	if (!address) {
-		ACPI_ERROR((AE_INFO,
-			    "Null physical address for ACPI table [%s]",
-			    signature));
-		return;
-	}
-
-	/* Map just the table header */
-
-	table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
-	if (!table) {
-		return;
-	}
-
-	/* If a particular signature is expected, signature must match */
-
-	if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
-		ACPI_ERROR((AE_INFO,
-			    "Invalid signature 0x%X for ACPI table [%s]",
-			    *ACPI_CAST_PTR(u32, table->signature), signature));
-		goto unmap_and_exit;
-	}
-
-	/* Initialize the table entry */
-
-	acpi_gbl_root_table_list.tables[table_index].address = address;
-	acpi_gbl_root_table_list.tables[table_index].length = table->length;
-	acpi_gbl_root_table_list.tables[table_index].flags = flags;
-
-	ACPI_MOVE_32_TO_32(&
-			   (acpi_gbl_root_table_list.tables[table_index].
-			    signature), table->signature);
-
-	acpi_tb_print_table_header(address, table);
-
-	if (table_index == ACPI_TABLE_INDEX_DSDT) {
-
-		/* Global integer width is based upon revision of the DSDT */
-
-		acpi_ut_set_integer_width(table->revision);
-	}
-
-      unmap_and_exit:
-	acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_get_root_table_entry
- *
- * PARAMETERS:  table_entry         - Pointer to the RSDT/XSDT table entry
- *              table_entry_size    - sizeof 32 or 64 (RSDT or XSDT)
- *
- * RETURN:      Physical address extracted from the root table
- *
- * DESCRIPTION: Get one root table entry. Handles 32-bit and 64-bit cases on
- *              both 32-bit and 64-bit platforms
- *
- * NOTE:        acpi_physical_address is 32-bit on 32-bit platforms, 64-bit on
- *              64-bit platforms.
- *
- ******************************************************************************/
-
-static acpi_physical_address
-acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
-{
-	u64 address64;
-
-	/*
-	 * Get the table physical address (32-bit for RSDT, 64-bit for XSDT):
-	 * Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT
-	 */
-	if (table_entry_size == sizeof(u32)) {
-		/*
-		 * 32-bit platform, RSDT: Return 32-bit table entry
-		 * 64-bit platform, RSDT: Expand 32-bit to 64-bit and return
-		 */
-		return ((acpi_physical_address)
-			(*ACPI_CAST_PTR(u32, table_entry)));
-	} else {
-		/*
-		 * 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return
-		 * 64-bit platform, XSDT: Move (unaligned) 64-bit to local, return 64-bit
-		 */
-		ACPI_MOVE_64_TO_64(&address64, table_entry);
-
-#if ACPI_MACHINE_WIDTH == 32
-		if (address64 > ACPI_UINT32_MAX) {
-
-			/* Will truncate 64-bit address to 32 bits, issue warning */
-
-			ACPI_WARNING((AE_INFO,
-				      "64-bit Physical Address in XSDT is too large (%8.8X%8.8X), truncating",
-				      ACPI_FORMAT_UINT64(address64)));
-		}
-#endif
-		return ((acpi_physical_address) (address64));
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_parse_root_table
- *
- * PARAMETERS:  Rsdp                    - Pointer to the RSDP
- *              Flags                   - Flags
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to parse the Root System Description
- *              Table (RSDT or XSDT)
- *
- * NOTE:        Tables are mapped (not copied) for efficiency. The FACS must
- *              be mapped and cannot be copied because it contains the actual
- *              memory location of the ACPI Global Lock.
- *
- ******************************************************************************/
-
-acpi_status __init
-acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
-{
-	struct acpi_table_rsdp *rsdp;
-	u32 table_entry_size;
-	u32 i;
-	u32 table_count;
-	struct acpi_table_header *table;
-	acpi_physical_address address;
-	acpi_physical_address uninitialized_var(rsdt_address);
-	u32 length;
-	u8 *table_entry;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(tb_parse_root_table);
-
-	/*
-	 * Map the entire RSDP and extract the address of the RSDT or XSDT
-	 */
-	rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp));
-	if (!rsdp) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	acpi_tb_print_table_header(rsdp_address,
-				   ACPI_CAST_PTR(struct acpi_table_header,
-						 rsdp));
-
-	/* Differentiate between RSDT and XSDT root tables */
-
-	if (rsdp->revision > 1 && rsdp->xsdt_physical_address) {
-		/*
-		 * Root table is an XSDT (64-bit physical addresses). We must use the
-		 * XSDT if the revision is > 1 and the XSDT pointer is present, as per
-		 * the ACPI specification.
-		 */
-		address = (acpi_physical_address) rsdp->xsdt_physical_address;
-		table_entry_size = sizeof(u64);
-		rsdt_address = (acpi_physical_address)
-					rsdp->rsdt_physical_address;
-	} else {
-		/* Root table is an RSDT (32-bit physical addresses) */
-
-		address = (acpi_physical_address) rsdp->rsdt_physical_address;
-		table_entry_size = sizeof(u32);
-	}
-
-	/*
-	 * It is not possible to map more than one entry in some environments,
-	 * so unmap the RSDP here before mapping other tables
-	 */
-	acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp));
-
-	if (table_entry_size == sizeof(u64)) {
-		if (acpi_tb_check_xsdt(address) == AE_NULL_ENTRY) {
-			/* XSDT has NULL entry, RSDT is used */
-			address = rsdt_address;
-			table_entry_size = sizeof(u32);
-			ACPI_WARNING((AE_INFO, "BIOS XSDT has NULL entry, "
-					"using RSDT"));
-		}
-	}
-	/* Map the RSDT/XSDT table header to get the full table length */
-
-	table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
-	if (!table) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	acpi_tb_print_table_header(address, table);
-
-	/* Get the length of the full table, verify length and map entire table */
-
-	length = table->length;
-	acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
-
-	if (length < sizeof(struct acpi_table_header)) {
-		ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT",
-			    length));
-		return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
-	}
-
-	table = acpi_os_map_memory(address, length);
-	if (!table) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Validate the root table checksum */
-
-	status = acpi_tb_verify_checksum(table, length);
-	if (ACPI_FAILURE(status)) {
-		acpi_os_unmap_memory(table, length);
-		return_ACPI_STATUS(status);
-	}
-
-	/* Calculate the number of tables described in the root table */
-
-	table_count =
-	    (u32) ((table->length -
-		    sizeof(struct acpi_table_header)) / table_entry_size);
-
-	/*
-	 * First two entries in the table array are reserved for the DSDT and FACS,
-	 * which are not actually present in the RSDT/XSDT - they come from the FADT
-	 */
-	table_entry =
-	    ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
-	acpi_gbl_root_table_list.count = 2;
-
-	/*
-	 * Initialize the root table array from the RSDT/XSDT
-	 */
-	for (i = 0; i < table_count; i++) {
-		if (acpi_gbl_root_table_list.count >=
-		    acpi_gbl_root_table_list.size) {
-
-			/* There is no more room in the root table array, attempt resize */
-
-			status = acpi_tb_resize_root_table_list();
-			if (ACPI_FAILURE(status)) {
-				ACPI_WARNING((AE_INFO,
-					      "Truncating %u table entries!",
-					      (unsigned)
-					      (acpi_gbl_root_table_list.size -
-					       acpi_gbl_root_table_list.
-					       count)));
-				break;
-			}
-		}
-
-		/* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
-
-		acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
-		    address =
-		    acpi_tb_get_root_table_entry(table_entry, table_entry_size);
-
-		table_entry += table_entry_size;
-		acpi_gbl_root_table_list.count++;
-	}
-
-	/*
-	 * It is not possible to map more than one entry in some environments,
-	 * so unmap the root table here before mapping other tables
-	 */
-	acpi_os_unmap_memory(table, length);
-
-	/*
-	 * Complete the initialization of the root table array by examining
-	 * the header of each table
-	 */
-	for (i = 2; i < acpi_gbl_root_table_list.count; i++) {
-		acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
-				      address, flags, NULL, i);
-
-		/* Special case for FADT - get the DSDT and FACS */
-
-		if (ACPI_COMPARE_NAME
-		    (&acpi_gbl_root_table_list.tables[i].signature,
-		     ACPI_SIG_FADT)) {
-			acpi_tb_parse_fadt(i, flags);
-		}
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
deleted file mode 100644
index fd7770a..0000000
--- a/drivers/acpi/tables/tbxface.c
+++ /dev/null
@@ -1,734 +0,0 @@
-/******************************************************************************
- *
- * Module Name: tbxface - Public interfaces to the ACPI subsystem
- *                         ACPI table oriented interfaces
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_TABLES
-ACPI_MODULE_NAME("tbxface")
-
-/* Local prototypes */
-static acpi_status acpi_tb_load_namespace(void);
-
-static int no_auto_ssdt;
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_allocate_root_table
- *
- * PARAMETERS:  initial_table_count - Size of initial_table_array, in number of
- *                                    struct acpi_table_desc structures
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Allocate a root table array. Used by i_aSL compiler and
- *              acpi_initialize_tables.
- *
- ******************************************************************************/
-
-acpi_status acpi_allocate_root_table(u32 initial_table_count)
-{
-
-	acpi_gbl_root_table_list.size = initial_table_count;
-	acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
-
-	return (acpi_tb_resize_root_table_list());
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_initialize_tables
- *
- * PARAMETERS:  initial_table_array - Pointer to an array of pre-allocated
- *                                    struct acpi_table_desc structures. If NULL, the
- *                                    array is dynamically allocated.
- *              initial_table_count - Size of initial_table_array, in number of
- *                                    struct acpi_table_desc structures
- *              allow_realloc       - Flag to tell Table Manager if resize of
- *                                    pre-allocated array is allowed. Ignored
- *                                    if initial_table_array is NULL.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initialize the table manager, get the RSDP and RSDT/XSDT.
- *
- * NOTE:        Allows static allocation of the initial table array in order
- *              to avoid the use of dynamic memory in confined environments
- *              such as the kernel boot sequence where it may not be available.
- *
- *              If the host OS memory managers are initialized, use NULL for
- *              initial_table_array, and the table will be dynamically allocated.
- *
- ******************************************************************************/
-
-acpi_status __init
-acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
-		       u32 initial_table_count, u8 allow_resize)
-{
-	acpi_physical_address rsdp_address;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_initialize_tables);
-
-	/*
-	 * Set up the Root Table Array
-	 * Allocate the table array if requested
-	 */
-	if (!initial_table_array) {
-		status = acpi_allocate_root_table(initial_table_count);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	} else {
-		/* Root Table Array has been statically allocated by the host */
-
-		ACPI_MEMSET(initial_table_array, 0,
-			    (acpi_size) initial_table_count *
-			    sizeof(struct acpi_table_desc));
-
-		acpi_gbl_root_table_list.tables = initial_table_array;
-		acpi_gbl_root_table_list.size = initial_table_count;
-		acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
-		if (allow_resize) {
-			acpi_gbl_root_table_list.flags |=
-			    ACPI_ROOT_ALLOW_RESIZE;
-		}
-	}
-
-	/* Get the address of the RSDP */
-
-	rsdp_address = acpi_os_get_root_pointer();
-	if (!rsdp_address) {
-		return_ACPI_STATUS(AE_NOT_FOUND);
-	}
-
-	/*
-	 * Get the root table (RSDT or XSDT) and extract all entries to the local
-	 * Root Table Array. This array contains the information of the RSDT/XSDT
-	 * in a common, more useable format.
-	 */
-	status =
-	    acpi_tb_parse_root_table(rsdp_address, ACPI_TABLE_ORIGIN_MAPPED);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_reallocate_root_table
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Reallocate Root Table List into dynamic memory. Copies the
- *              root list from the previously provided scratch area. Should
- *              be called once dynamic memory allocation is available in the
- *              kernel
- *
- ******************************************************************************/
-acpi_status acpi_reallocate_root_table(void)
-{
-	struct acpi_table_desc *tables;
-	acpi_size new_size;
-
-	ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
-
-	/*
-	 * Only reallocate the root table if the host provided a static buffer
-	 * for the table array in the call to acpi_initialize_tables.
-	 */
-	if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
-		return_ACPI_STATUS(AE_SUPPORT);
-	}
-
-	new_size = ((acpi_size) acpi_gbl_root_table_list.count +
-		    ACPI_ROOT_TABLE_SIZE_INCREMENT) *
-	    sizeof(struct acpi_table_desc);
-
-	/* Create new array and copy the old array */
-
-	tables = ACPI_ALLOCATE_ZEROED(new_size);
-	if (!tables) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size);
-
-	acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count;
-	acpi_gbl_root_table_list.tables = tables;
-	acpi_gbl_root_table_list.flags =
-	    ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_load_table
- *
- * PARAMETERS:  table_ptr       - pointer to a buffer containing the entire
- *                                table to be loaded
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to load a table from the caller's
- *              buffer. The buffer must contain an entire ACPI Table including
- *              a valid header. The header fields will be verified, and if it
- *              is determined that the table is invalid, the call will fail.
- *
- ******************************************************************************/
-acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
-{
-	acpi_status status;
-	u32 table_index;
-	struct acpi_table_desc table_desc;
-
-	if (!table_ptr)
-		return AE_BAD_PARAMETER;
-
-	ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
-	table_desc.pointer = table_ptr;
-	table_desc.length = table_ptr->length;
-	table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
-
-	/*
-	 * Install the new table into the local data structures
-	 */
-	status = acpi_tb_add_table(&table_desc, &table_index);
-	if (ACPI_FAILURE(status)) {
-		return status;
-	}
-	status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
-	return status;
-}
-
-ACPI_EXPORT_SYMBOL(acpi_load_table)
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_get_table_header
- *
- * PARAMETERS:  Signature           - ACPI signature of needed table
- *              Instance            - Which instance (for SSDTs)
- *              out_table_header    - The pointer to the table header to fill
- *
- * RETURN:      Status and pointer to mapped table header
- *
- * DESCRIPTION: Finds an ACPI table header.
- *
- * NOTE:        Caller is responsible in unmapping the header with
- *              acpi_os_unmap_memory
- *
- *****************************************************************************/
-acpi_status
-acpi_get_table_header(char *signature,
-		      u32 instance, struct acpi_table_header *out_table_header)
-{
-       u32 i;
-       u32 j;
-	struct acpi_table_header *header;
-
-	/* Parameter validation */
-
-	if (!signature || !out_table_header) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Walk the root table list
-	 */
-	for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
-		if (!ACPI_COMPARE_NAME
-		    (&(acpi_gbl_root_table_list.tables[i].signature),
-		     signature)) {
-			continue;
-		}
-
-		if (++j < instance) {
-			continue;
-		}
-
-		if (!acpi_gbl_root_table_list.tables[i].pointer) {
-			if ((acpi_gbl_root_table_list.tables[i].
-			     flags & ACPI_TABLE_ORIGIN_MASK) ==
-			    ACPI_TABLE_ORIGIN_MAPPED) {
-				header =
-				    acpi_os_map_memory(acpi_gbl_root_table_list.
-						       tables[i].address,
-						       sizeof(struct
-							      acpi_table_header));
-				if (!header) {
-					return AE_NO_MEMORY;
-				}
-				ACPI_MEMCPY(out_table_header, header,
-					    sizeof(struct acpi_table_header));
-				acpi_os_unmap_memory(header,
-						     sizeof(struct
-							    acpi_table_header));
-			} else {
-				return AE_NOT_FOUND;
-			}
-		} else {
-			ACPI_MEMCPY(out_table_header,
-				    acpi_gbl_root_table_list.tables[i].pointer,
-				    sizeof(struct acpi_table_header));
-		}
-		return (AE_OK);
-	}
-
-	return (AE_NOT_FOUND);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_table_header)
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_unload_table_id
- *
- * PARAMETERS:  id            - Owner ID of the table to be removed.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This routine is used to force the unload of a table (by id)
- *
- ******************************************************************************/
-acpi_status acpi_unload_table_id(acpi_owner_id id)
-{
-	int i;
-	acpi_status status = AE_NOT_EXIST;
-
-	ACPI_FUNCTION_TRACE(acpi_unload_table_id);
-
-	/* Find table in the global table list */
-	for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
-		if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
-			continue;
-		}
-		/*
-		 * Delete all namespace objects owned by this table. Note that these
-		 * objects can appear anywhere in the namespace by virtue of the AML
-		 * "Scope" operator. Thus, we need to track ownership by an ID, not
-		 * simply a position within the hierarchy
-		 */
-		acpi_tb_delete_namespace_by_owner(i);
-		status = acpi_tb_release_owner_id(i);
-		acpi_tb_set_table_loaded_flag(i, FALSE);
-		break;
-	}
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_table
- *
- * PARAMETERS:  Signature           - ACPI signature of needed table
- *              Instance            - Which instance (for SSDTs)
- *              out_table           - Where the pointer to the table is returned
- *
- * RETURN:      Status and pointer to table
- *
- * DESCRIPTION: Finds and verifies an ACPI table.
- *
- *****************************************************************************/
-acpi_status
-acpi_get_table(char *signature,
-	       u32 instance, struct acpi_table_header **out_table)
-{
-       u32 i;
-       u32 j;
-	acpi_status status;
-
-	/* Parameter validation */
-
-	if (!signature || !out_table) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Walk the root table list
-	 */
-	for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
-		if (!ACPI_COMPARE_NAME
-		    (&(acpi_gbl_root_table_list.tables[i].signature),
-		     signature)) {
-			continue;
-		}
-
-		if (++j < instance) {
-			continue;
-		}
-
-		status =
-		    acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]);
-		if (ACPI_SUCCESS(status)) {
-			*out_table = acpi_gbl_root_table_list.tables[i].pointer;
-		}
-
-		if (!acpi_gbl_permanent_mmap) {
-			acpi_gbl_root_table_list.tables[i].pointer = NULL;
-		}
-
-		return (status);
-	}
-
-	return (AE_NOT_FOUND);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_table)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_table_by_index
- *
- * PARAMETERS:  table_index         - Table index
- *              Table               - Where the pointer to the table is returned
- *
- * RETURN:      Status and pointer to the table
- *
- * DESCRIPTION: Obtain a table by an index into the global table list.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_get_table_by_index);
-
-	/* Parameter validation */
-
-	if (!table) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
-	/* Validate index */
-
-	if (table_index >= acpi_gbl_root_table_list.count) {
-		(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
-
-		/* Table is not mapped, map it */
-
-		status =
-		    acpi_tb_verify_table(&acpi_gbl_root_table_list.
-					 tables[table_index]);
-		if (ACPI_FAILURE(status)) {
-			(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	*table = acpi_gbl_root_table_list.tables[table_index].pointer;
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_load_namespace
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
- *              the RSDT/XSDT.
- *
- ******************************************************************************/
-static acpi_status acpi_tb_load_namespace(void)
-{
-	acpi_status status;
-	struct acpi_table_header *table;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(tb_load_namespace);
-
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
-	/*
-	 * Load the namespace. The DSDT is required, but any SSDT and PSDT tables
-	 * are optional.
-	 */
-	if (!acpi_gbl_root_table_list.count ||
-	    !ACPI_COMPARE_NAME(&
-			       (acpi_gbl_root_table_list.
-				tables[ACPI_TABLE_INDEX_DSDT].signature),
-			       ACPI_SIG_DSDT)
-	    ||
-	    ACPI_FAILURE(acpi_tb_verify_table
-			 (&acpi_gbl_root_table_list.
-			  tables[ACPI_TABLE_INDEX_DSDT]))) {
-		status = AE_NO_ACPI_TABLES;
-		goto unlock_and_exit;
-	}
-
-	/*
-	 * Find DSDT table
-	 */
-	status =
-	    acpi_os_table_override(acpi_gbl_root_table_list.
-				   tables[ACPI_TABLE_INDEX_DSDT].pointer,
-				   &table);
-	if (ACPI_SUCCESS(status) && table) {
-		/*
-		 * DSDT table has been found
-		 */
-		acpi_tb_delete_table(&acpi_gbl_root_table_list.
-				     tables[ACPI_TABLE_INDEX_DSDT]);
-		acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer =
-		    table;
-		acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].length =
-		    table->length;
-		acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].flags =
-		    ACPI_TABLE_ORIGIN_UNKNOWN;
-
-		ACPI_INFO((AE_INFO, "Table DSDT replaced by host OS"));
-		acpi_tb_print_table_header(0, table);
-
-		if (no_auto_ssdt == 0) {
-			printk(KERN_WARNING "ACPI: DSDT override uses original SSDTs unless \"acpi_no_auto_ssdt\"\n");
-		}
-	}
-
-	status =
-	    acpi_tb_verify_table(&acpi_gbl_root_table_list.
-				 tables[ACPI_TABLE_INDEX_DSDT]);
-	if (ACPI_FAILURE(status)) {
-
-		/* A valid DSDT is required */
-
-		status = AE_NO_ACPI_TABLES;
-		goto unlock_and_exit;
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-
-	/*
-	 * Load and parse tables.
-	 */
-	status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Load any SSDT or PSDT tables. Note: Loop leaves tables locked
-	 */
-	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-	for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
-		if ((!ACPI_COMPARE_NAME
-		     (&(acpi_gbl_root_table_list.tables[i].signature),
-		      ACPI_SIG_SSDT)
-		     &&
-		     !ACPI_COMPARE_NAME(&
-					(acpi_gbl_root_table_list.tables[i].
-					 signature), ACPI_SIG_PSDT))
-		    ||
-		    ACPI_FAILURE(acpi_tb_verify_table
-				 (&acpi_gbl_root_table_list.tables[i]))) {
-			continue;
-		}
-
-		if (no_auto_ssdt) {
-			printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n");
-			continue;
-		}
-
-		/* Ignore errors while loading tables, get as many as possible */
-
-		(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-		(void)acpi_ns_load_table(i, acpi_gbl_root_node);
-		(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_load_tables
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
- *
- ******************************************************************************/
-
-acpi_status acpi_load_tables(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_load_tables);
-
-	/*
-	 * Load the namespace from the tables
-	 */
-	status = acpi_tb_load_namespace();
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"While loading namespace from ACPI tables"));
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_load_tables)
-
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_table_handler
- *
- * PARAMETERS:  Handler         - Table event handler
- *              Context         - Value passed to the handler on each event
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Install table event handler
- *
- ******************************************************************************/
-acpi_status
-acpi_install_table_handler(acpi_tbl_handler handler, void *context)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_install_table_handler);
-
-	if (!handler) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Don't allow more than one handler */
-
-	if (acpi_gbl_table_handler) {
-		status = AE_ALREADY_EXISTS;
-		goto cleanup;
-	}
-
-	/* Install the handler */
-
-	acpi_gbl_table_handler = handler;
-	acpi_gbl_table_handler_context = context;
-
-      cleanup:
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_remove_table_handler
- *
- * PARAMETERS:  Handler         - Table event handler that was installed
- *                                previously.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove table event handler
- *
- ******************************************************************************/
-acpi_status acpi_remove_table_handler(acpi_tbl_handler handler)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_remove_table_handler);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Make sure that the installed handler is the same */
-
-	if (!handler || handler != acpi_gbl_table_handler) {
-		status = AE_BAD_PARAMETER;
-		goto cleanup;
-	}
-
-	/* Remove the handler */
-
-	acpi_gbl_table_handler = NULL;
-
-      cleanup:
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_table_handler)
-
-
-static int __init acpi_no_auto_ssdt_setup(char *s) {
-
-        printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
-
-        no_auto_ssdt = 1;
-
-        return 1;
-}
-
-__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
deleted file mode 100644
index 2d157e0..0000000
--- a/drivers/acpi/tables/tbxfroot.c
+++ /dev/null
@@ -1,273 +0,0 @@
-/******************************************************************************
- *
- * Module Name: tbxfroot - Find the root ACPI table (RSDT)
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_TABLES
-ACPI_MODULE_NAME("tbxfroot")
-
-/* Local prototypes */
-static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
-
-static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_validate_rsdp
- *
- * PARAMETERS:  Rsdp                - Pointer to unvalidated RSDP
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Validate the RSDP (ptr)
- *
- ******************************************************************************/
-
-static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * The signature and checksum must both be correct
-	 *
-	 * Note: Sometimes there exists more than one RSDP in memory; the valid
-	 * RSDP has a valid checksum, all others have an invalid checksum.
-	 */
-	if (ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)
-	    != 0) {
-
-		/* Nope, BAD Signature */
-
-		return (AE_BAD_SIGNATURE);
-	}
-
-	/* Check the standard checksum */
-
-	if (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) {
-		return (AE_BAD_CHECKSUM);
-	}
-
-	/* Check extended checksum if table version >= 2 */
-
-	if ((rsdp->revision >= 2) &&
-	    (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) {
-		return (AE_BAD_CHECKSUM);
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_find_root_pointer
- *
- * PARAMETERS:  table_address           - Where the table pointer is returned
- *
- * RETURN:      Status, RSDP physical address
- *
- * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
- *              pointer structure.  If it is found, set *RSDP to point to it.
- *
- * NOTE1:       The RSDP must be either in the first 1_k of the Extended
- *              BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
- *              Only a 32-bit physical address is necessary.
- *
- * NOTE2:       This function is always available, regardless of the
- *              initialization state of the rest of ACPI.
- *
- ******************************************************************************/
-
-acpi_status acpi_find_root_pointer(acpi_size *table_address)
-{
-	u8 *table_ptr;
-	u8 *mem_rover;
-	u32 physical_address;
-
-	ACPI_FUNCTION_TRACE(acpi_find_root_pointer);
-
-	/* 1a) Get the location of the Extended BIOS Data Area (EBDA) */
-
-	table_ptr = acpi_os_map_memory((acpi_physical_address)
-				       ACPI_EBDA_PTR_LOCATION,
-				       ACPI_EBDA_PTR_LENGTH);
-	if (!table_ptr) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not map memory at %8.8X for length %X",
-			    ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
-
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	ACPI_MOVE_16_TO_32(&physical_address, table_ptr);
-
-	/* Convert segment part to physical address */
-
-	physical_address <<= 4;
-	acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH);
-
-	/* EBDA present? */
-
-	if (physical_address > 0x400) {
-		/*
-		 * 1b) Search EBDA paragraphs (EBDA is required to be a
-		 *     minimum of 1_k length)
-		 */
-		table_ptr = acpi_os_map_memory((acpi_physical_address)
-					       physical_address,
-					       ACPI_EBDA_WINDOW_SIZE);
-		if (!table_ptr) {
-			ACPI_ERROR((AE_INFO,
-				    "Could not map memory at %8.8X for length %X",
-				    physical_address, ACPI_EBDA_WINDOW_SIZE));
-
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-
-		mem_rover =
-		    acpi_tb_scan_memory_for_rsdp(table_ptr,
-						 ACPI_EBDA_WINDOW_SIZE);
-		acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE);
-
-		if (mem_rover) {
-
-			/* Return the physical address */
-
-			physical_address +=
-			    (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
-
-			*table_address = physical_address;
-			return_ACPI_STATUS(AE_OK);
-		}
-	}
-
-	/*
-	 * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh
-	 */
-	table_ptr = acpi_os_map_memory((acpi_physical_address)
-				       ACPI_HI_RSDP_WINDOW_BASE,
-				       ACPI_HI_RSDP_WINDOW_SIZE);
-
-	if (!table_ptr) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not map memory at %8.8X for length %X",
-			    ACPI_HI_RSDP_WINDOW_BASE,
-			    ACPI_HI_RSDP_WINDOW_SIZE));
-
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	mem_rover =
-	    acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
-	acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
-
-	if (mem_rover) {
-
-		/* Return the physical address */
-
-		physical_address = (u32)
-		    (ACPI_HI_RSDP_WINDOW_BASE +
-		     ACPI_PTR_DIFF(mem_rover, table_ptr));
-
-		*table_address = physical_address;
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* A valid RSDP was not found */
-
-	ACPI_ERROR((AE_INFO, "A valid RSDP was not found"));
-	return_ACPI_STATUS(AE_NOT_FOUND);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_tb_scan_memory_for_rsdp
- *
- * PARAMETERS:  start_address       - Starting pointer for search
- *              Length              - Maximum length to search
- *
- * RETURN:      Pointer to the RSDP if found, otherwise NULL.
- *
- * DESCRIPTION: Search a block of memory for the RSDP signature
- *
- ******************************************************************************/
-static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
-{
-	acpi_status status;
-	u8 *mem_rover;
-	u8 *end_address;
-
-	ACPI_FUNCTION_TRACE(tb_scan_memory_for_rsdp);
-
-	end_address = start_address + length;
-
-	/* Search from given start address for the requested length */
-
-	for (mem_rover = start_address; mem_rover < end_address;
-	     mem_rover += ACPI_RSDP_SCAN_STEP) {
-
-		/* The RSDP signature and checksum must both be correct */
-
-		status =
-		    acpi_tb_validate_rsdp(ACPI_CAST_PTR
-					  (struct acpi_table_rsdp, mem_rover));
-		if (ACPI_SUCCESS(status)) {
-
-			/* Sig and checksum valid, we have found a real RSDP */
-
-			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-					  "RSDP located at physical address %p\n",
-					  mem_rover));
-			return_PTR(mem_rover);
-		}
-
-		/* No sig match or bad checksum, keep searching */
-	}
-
-	/* Searched entire block, no RSDP was found */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "Searched entire block from %p, valid RSDP was not found\n",
-			  start_address));
-	return_PTR(NULL);
-}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 073ff09..99e6f1f 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -416,7 +416,8 @@
 	}
 
 	/* Passive (optional) */
-	if (flag & ACPI_TRIPS_PASSIVE) {
+	if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.flags.valid) ||
+		(flag == ACPI_TRIPS_INIT)) {
 		valid = tz->trips.passive.flags.valid;
 		if (psv == -1) {
 			status = AE_SUPPORT;
@@ -462,8 +463,11 @@
 		memset(&devices, 0, sizeof(struct acpi_handle_list));
 		status = acpi_evaluate_reference(tz->device->handle, "_PSL",
 							NULL, &devices);
-		if (ACPI_FAILURE(status))
+		if (ACPI_FAILURE(status)) {
+			printk(KERN_WARNING PREFIX
+				"Invalid passive threshold\n");
 			tz->trips.passive.flags.valid = 0;
+		}
 		else
 			tz->trips.passive.flags.valid = 1;
 
@@ -487,7 +491,8 @@
 		if (act == -1)
 			break; /* disable all active trip points */
 
-		if (flag & ACPI_TRIPS_ACTIVE) {
+		if ((flag == ACPI_TRIPS_INIT) || ((flag & ACPI_TRIPS_ACTIVE) &&
+			tz->trips.active[i].flags.valid)) {
 			status = acpi_evaluate_integer(tz->device->handle,
 							name, NULL, &tmp);
 			if (ACPI_FAILURE(status)) {
@@ -521,8 +526,11 @@
 			memset(&devices, 0, sizeof(struct acpi_handle_list));
 			status = acpi_evaluate_reference(tz->device->handle,
 						name, NULL, &devices);
-			if (ACPI_FAILURE(status))
+			if (ACPI_FAILURE(status)) {
+				printk(KERN_WARNING PREFIX
+					"Invalid active%d threshold\n", i);
 				tz->trips.active[i].flags.valid = 0;
+			}
 			else
 				tz->trips.active[i].flags.valid = 1;
 
diff --git a/drivers/acpi/utilities/Makefile b/drivers/acpi/utilities/Makefile
deleted file mode 100644
index 88eff14..0000000
--- a/drivers/acpi/utilities/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for all Linux ACPI interpreter subdirectories
-#
-
-obj-y := utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
-		utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
-		utstate.o utmutex.o utobject.o utcache.o utresrc.o
-
-EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
deleted file mode 100644
index 241c535..0000000
--- a/drivers/acpi/utilities/utalloc.c
+++ /dev/null
@@ -1,382 +0,0 @@
-/******************************************************************************
- *
- * Module Name: utalloc - local memory allocation routines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acdebug.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utalloc")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_caches
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create all local caches
- *
- ******************************************************************************/
-acpi_status acpi_ut_create_caches(void)
-{
-	acpi_status status;
-
-	/* Object Caches, for frequently used objects */
-
-	status =
-	    acpi_os_create_cache("Acpi-Namespace",
-				 sizeof(struct acpi_namespace_node),
-				 ACPI_MAX_NAMESPACE_CACHE_DEPTH,
-				 &acpi_gbl_namespace_cache);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	status =
-	    acpi_os_create_cache("Acpi-State", sizeof(union acpi_generic_state),
-				 ACPI_MAX_STATE_CACHE_DEPTH,
-				 &acpi_gbl_state_cache);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	status =
-	    acpi_os_create_cache("Acpi-Parse",
-				 sizeof(struct acpi_parse_obj_common),
-				 ACPI_MAX_PARSE_CACHE_DEPTH,
-				 &acpi_gbl_ps_node_cache);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	status =
-	    acpi_os_create_cache("Acpi-ParseExt",
-				 sizeof(struct acpi_parse_obj_named),
-				 ACPI_MAX_EXTPARSE_CACHE_DEPTH,
-				 &acpi_gbl_ps_node_ext_cache);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	status =
-	    acpi_os_create_cache("Acpi-Operand",
-				 sizeof(union acpi_operand_object),
-				 ACPI_MAX_OBJECT_CACHE_DEPTH,
-				 &acpi_gbl_operand_cache);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
-	/* Memory allocation lists */
-
-	status = acpi_ut_create_list("Acpi-Global", 0, &acpi_gbl_global_list);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	status =
-	    acpi_ut_create_list("Acpi-Namespace",
-				sizeof(struct acpi_namespace_node),
-				&acpi_gbl_ns_node_list);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-#endif
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_delete_caches
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Purge and delete all local caches
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_delete_caches(void)
-{
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-	char buffer[7];
-
-	if (acpi_gbl_display_final_mem_stats) {
-		ACPI_STRCPY(buffer, "MEMORY");
-		(void)acpi_db_display_statistics(buffer);
-	}
-#endif
-
-	(void)acpi_os_delete_cache(acpi_gbl_namespace_cache);
-	acpi_gbl_namespace_cache = NULL;
-
-	(void)acpi_os_delete_cache(acpi_gbl_state_cache);
-	acpi_gbl_state_cache = NULL;
-
-	(void)acpi_os_delete_cache(acpi_gbl_operand_cache);
-	acpi_gbl_operand_cache = NULL;
-
-	(void)acpi_os_delete_cache(acpi_gbl_ps_node_cache);
-	acpi_gbl_ps_node_cache = NULL;
-
-	(void)acpi_os_delete_cache(acpi_gbl_ps_node_ext_cache);
-	acpi_gbl_ps_node_ext_cache = NULL;
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
-	/* Debug only - display leftover memory allocation, if any */
-
-	acpi_ut_dump_allocations(ACPI_UINT32_MAX, NULL);
-
-	/* Free memory lists */
-
-	ACPI_FREE(acpi_gbl_global_list);
-	acpi_gbl_global_list = NULL;
-
-	ACPI_FREE(acpi_gbl_ns_node_list);
-	acpi_gbl_ns_node_list = NULL;
-#endif
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_validate_buffer
- *
- * PARAMETERS:  Buffer              - Buffer descriptor to be validated
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Perform parameter validation checks on an struct acpi_buffer
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
-{
-
-	/* Obviously, the structure pointer must be valid */
-
-	if (!buffer) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* Special semantics for the length */
-
-	if ((buffer->length == ACPI_NO_BUFFER) ||
-	    (buffer->length == ACPI_ALLOCATE_BUFFER) ||
-	    (buffer->length == ACPI_ALLOCATE_LOCAL_BUFFER)) {
-		return (AE_OK);
-	}
-
-	/* Length is valid, the buffer pointer must be also */
-
-	if (!buffer->pointer) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_initialize_buffer
- *
- * PARAMETERS:  Buffer              - Buffer to be validated
- *              required_length     - Length needed
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Validate that the buffer is of the required length or
- *              allocate a new buffer. Returned buffer is always zeroed.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
-			  acpi_size required_length)
-{
-	acpi_size input_buffer_length;
-
-	/* Parameter validation */
-
-	if (!buffer || !required_length) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Buffer->Length is used as both an input and output parameter. Get the
-	 * input actual length and set the output required buffer length.
-	 */
-	input_buffer_length = buffer->length;
-	buffer->length = required_length;
-
-	/*
-	 * The input buffer length contains the actual buffer length, or the type
-	 * of buffer to be allocated by this routine.
-	 */
-	switch (input_buffer_length) {
-	case ACPI_NO_BUFFER:
-
-		/* Return the exception (and the required buffer length) */
-
-		return (AE_BUFFER_OVERFLOW);
-
-	case ACPI_ALLOCATE_BUFFER:
-
-		/* Allocate a new buffer */
-
-		buffer->pointer = acpi_os_allocate(required_length);
-		break;
-
-	case ACPI_ALLOCATE_LOCAL_BUFFER:
-
-		/* Allocate a new buffer with local interface to allow tracking */
-
-		buffer->pointer = ACPI_ALLOCATE(required_length);
-		break;
-
-	default:
-
-		/* Existing buffer: Validate the size of the buffer */
-
-		if (input_buffer_length < required_length) {
-			return (AE_BUFFER_OVERFLOW);
-		}
-		break;
-	}
-
-	/* Validate allocation from above or input buffer pointer */
-
-	if (!buffer->pointer) {
-		return (AE_NO_MEMORY);
-	}
-
-	/* Have a valid buffer, clear it */
-
-	ACPI_MEMSET(buffer->pointer, 0, required_length);
-	return (AE_OK);
-}
-
-#ifdef NOT_USED_BY_LINUX
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_allocate
- *
- * PARAMETERS:  Size                - Size of the allocation
- *              Component           - Component type of caller
- *              Module              - Source file name of caller
- *              Line                - Line number of caller
- *
- * RETURN:      Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of malloc.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate(acpi_size size,
-		       u32 component, const char *module, u32 line)
-{
-	void *allocation;
-
-	ACPI_FUNCTION_TRACE_U32(ut_allocate, size);
-
-	/* Check for an inadvertent size of zero bytes */
-
-	if (!size) {
-		ACPI_WARNING((module, line,
-			      "Attempt to allocate zero bytes, allocating 1 byte"));
-		size = 1;
-	}
-
-	allocation = acpi_os_allocate(size);
-	if (!allocation) {
-
-		/* Report allocation error */
-
-		ACPI_WARNING((module, line,
-			      "Could not allocate size %X", (u32) size));
-
-		return_PTR(NULL);
-	}
-
-	return_PTR(allocation);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_allocate_zeroed
- *
- * PARAMETERS:  Size                - Size of the allocation
- *              Component           - Component type of caller
- *              Module              - Source file name of caller
- *              Line                - Line number of caller
- *
- * RETURN:      Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate_zeroed(acpi_size size,
-			      u32 component, const char *module, u32 line)
-{
-	void *allocation;
-
-	ACPI_FUNCTION_ENTRY();
-
-	allocation = acpi_ut_allocate(size, component, module, line);
-	if (allocation) {
-
-		/* Clear the memory block */
-
-		ACPI_MEMSET(allocation, 0, size);
-	}
-
-	return (allocation);
-}
-#endif
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
deleted file mode 100644
index 245fa80..0000000
--- a/drivers/acpi/utilities/utcache.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/******************************************************************************
- *
- * Module Name: utcache - local cache allocation routines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utcache")
-#ifdef ACPI_USE_LOCAL_CACHE
-/*******************************************************************************
- *
- * FUNCTION:    acpi_os_create_cache
- *
- * PARAMETERS:  cache_name      - Ascii name for the cache
- *              object_size     - Size of each cached object
- *              max_depth       - Maximum depth of the cache (in objects)
- *              return_cache    - Where the new cache object is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a cache object
- *
- ******************************************************************************/
-acpi_status
-acpi_os_create_cache(char *cache_name,
-		     u16 object_size,
-		     u16 max_depth, struct acpi_memory_list ** return_cache)
-{
-	struct acpi_memory_list *cache;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!cache_name || !return_cache || (object_size < 16)) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* Create the cache object */
-
-	cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
-	if (!cache) {
-		return (AE_NO_MEMORY);
-	}
-
-	/* Populate the cache object and return it */
-
-	ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
-	cache->link_offset = 8;
-	cache->list_name = cache_name;
-	cache->object_size = object_size;
-	cache->max_depth = max_depth;
-
-	*return_cache = cache;
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_os_purge_cache
- *
- * PARAMETERS:  Cache           - Handle to cache object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Free all objects within the requested cache.
- *
- ******************************************************************************/
-
-acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
-{
-	char *next;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!cache) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* Walk the list of objects in this cache */
-
-	while (cache->list_head) {
-
-		/* Delete and unlink one cached state object */
-
-		next = *(ACPI_CAST_INDIRECT_PTR(char,
-						&(((char *)cache->
-						   list_head)[cache->
-							      link_offset])));
-		ACPI_FREE(cache->list_head);
-
-		cache->list_head = next;
-		cache->current_depth--;
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_os_delete_cache
- *
- * PARAMETERS:  Cache           - Handle to cache object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Free all objects within the requested cache and delete the
- *              cache object.
- *
- ******************************************************************************/
-
-acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Purge all objects in the cache */
-
-	status = acpi_os_purge_cache(cache);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	/* Now we can delete the cache object */
-
-	ACPI_FREE(cache);
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_os_release_object
- *
- * PARAMETERS:  Cache       - Handle to cache object
- *              Object      - The object to be released
- *
- * RETURN:      None
- *
- * DESCRIPTION: Release an object to the specified cache.  If cache is full,
- *              the object is deleted.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_os_release_object(struct acpi_memory_list * cache, void *object)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!cache || !object) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/* If cache is full, just free this object */
-
-	if (cache->current_depth >= cache->max_depth) {
-		ACPI_FREE(object);
-		ACPI_MEM_TRACKING(cache->total_freed++);
-	}
-
-	/* Otherwise put this object back into the cache */
-
-	else {
-		status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-
-		/* Mark the object as cached */
-
-		ACPI_MEMSET(object, 0xCA, cache->object_size);
-		ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_CACHED);
-
-		/* Put the object at the head of the cache list */
-
-		*(ACPI_CAST_INDIRECT_PTR(char,
-					 &(((char *)object)[cache->
-							    link_offset]))) =
-		    cache->list_head;
-		cache->list_head = object;
-		cache->current_depth++;
-
-		(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_os_acquire_object
- *
- * PARAMETERS:  Cache           - Handle to cache object
- *
- * RETURN:      the acquired object.  NULL on error
- *
- * DESCRIPTION: Get an object from the specified cache.  If cache is empty,
- *              the object is allocated.
- *
- ******************************************************************************/
-
-void *acpi_os_acquire_object(struct acpi_memory_list *cache)
-{
-	acpi_status status;
-	void *object;
-
-	ACPI_FUNCTION_NAME(os_acquire_object);
-
-	if (!cache) {
-		return (NULL);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
-	if (ACPI_FAILURE(status)) {
-		return (NULL);
-	}
-
-	ACPI_MEM_TRACKING(cache->requests++);
-
-	/* Check the cache first */
-
-	if (cache->list_head) {
-
-		/* There is an object available, use it */
-
-		object = cache->list_head;
-		cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char,
-							    &(((char *)
-							       object)[cache->
-								       link_offset])));
-
-		cache->current_depth--;
-
-		ACPI_MEM_TRACKING(cache->hits++);
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Object %p from %s cache\n", object,
-				  cache->list_name));
-
-		status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
-		if (ACPI_FAILURE(status)) {
-			return (NULL);
-		}
-
-		/* Clear (zero) the previously used Object */
-
-		ACPI_MEMSET(object, 0, cache->object_size);
-	} else {
-		/* The cache is empty, create a new object */
-
-		ACPI_MEM_TRACKING(cache->total_allocated++);
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-		if ((cache->total_allocated - cache->total_freed) >
-		    cache->max_occupied) {
-			cache->max_occupied =
-			    cache->total_allocated - cache->total_freed;
-		}
-#endif
-
-		/* Avoid deadlock with ACPI_ALLOCATE_ZEROED */
-
-		status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
-		if (ACPI_FAILURE(status)) {
-			return (NULL);
-		}
-
-		object = ACPI_ALLOCATE_ZEROED(cache->object_size);
-		if (!object) {
-			return (NULL);
-		}
-	}
-
-	return (object);
-}
-#endif				/* ACPI_USE_LOCAL_CACHE */
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
deleted file mode 100644
index 5b2f7c2..0000000
--- a/drivers/acpi/utilities/utcopy.c
+++ /dev/null
@@ -1,969 +0,0 @@
-/******************************************************************************
- *
- * Module Name: utcopy - Internal to external object translation utilities
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utcopy")
-
-/* Local prototypes */
-static acpi_status
-acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
-				union acpi_object *external_object,
-				u8 * data_space, acpi_size * buffer_space_used);
-
-static acpi_status
-acpi_ut_copy_ielement_to_ielement(u8 object_type,
-				  union acpi_operand_object *source_object,
-				  union acpi_generic_state *state,
-				  void *context);
-
-static acpi_status
-acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
-				  u8 * buffer, acpi_size * space_used);
-
-static acpi_status
-acpi_ut_copy_esimple_to_isimple(union acpi_object *user_obj,
-				union acpi_operand_object **return_obj);
-
-static acpi_status
-acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
-				  union acpi_operand_object **internal_object);
-
-static acpi_status
-acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
-			   union acpi_operand_object *dest_desc);
-
-static acpi_status
-acpi_ut_copy_ielement_to_eelement(u8 object_type,
-				  union acpi_operand_object *source_object,
-				  union acpi_generic_state *state,
-				  void *context);
-
-static acpi_status
-acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
-				  union acpi_operand_object *dest_obj,
-				  struct acpi_walk_state *walk_state);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_isimple_to_esimple
- *
- * PARAMETERS:  internal_object     - Source object to be copied
- *              external_object     - Where to return the copied object
- *              data_space          - Where object data is returned (such as
- *                                    buffer and string data)
- *              buffer_space_used   - Length of data_space that was used
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to copy a simple internal object to
- *              an external object.
- *
- *              The data_space buffer is assumed to have sufficient space for
- *              the object.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
-				union acpi_object *external_object,
-				u8 * data_space, acpi_size * buffer_space_used)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ut_copy_isimple_to_esimple);
-
-	*buffer_space_used = 0;
-
-	/*
-	 * Check for NULL object case (could be an uninitialized
-	 * package element)
-	 */
-	if (!internal_object) {
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Always clear the external object */
-
-	ACPI_MEMSET(external_object, 0, sizeof(union acpi_object));
-
-	/*
-	 * In general, the external object will be the same type as
-	 * the internal object
-	 */
-	external_object->type = ACPI_GET_OBJECT_TYPE(internal_object);
-
-	/* However, only a limited number of external types are supported */
-
-	switch (ACPI_GET_OBJECT_TYPE(internal_object)) {
-	case ACPI_TYPE_STRING:
-
-		external_object->string.pointer = (char *)data_space;
-		external_object->string.length = internal_object->string.length;
-		*buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size)
-								  internal_object->
-								  string.
-								  length + 1);
-
-		ACPI_MEMCPY((void *)data_space,
-			    (void *)internal_object->string.pointer,
-			    (acpi_size) internal_object->string.length + 1);
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		external_object->buffer.pointer = data_space;
-		external_object->buffer.length = internal_object->buffer.length;
-		*buffer_space_used =
-		    ACPI_ROUND_UP_TO_NATIVE_WORD(internal_object->string.
-						 length);
-
-		ACPI_MEMCPY((void *)data_space,
-			    (void *)internal_object->buffer.pointer,
-			    internal_object->buffer.length);
-		break;
-
-	case ACPI_TYPE_INTEGER:
-
-		external_object->integer.value = internal_object->integer.value;
-		break;
-
-	case ACPI_TYPE_LOCAL_REFERENCE:
-
-		/* This is an object reference. */
-
-		switch (internal_object->reference.class) {
-		case ACPI_REFCLASS_NAME:
-
-			/*
-			 * For namepath, return the object handle ("reference")
-			 * We are referring to the namespace node
-			 */
-			external_object->reference.handle =
-			    internal_object->reference.node;
-			external_object->reference.actual_type =
-			    acpi_ns_get_type(internal_object->reference.node);
-			break;
-
-		default:
-
-			/* All other reference types are unsupported */
-
-			return_ACPI_STATUS(AE_TYPE);
-		}
-		break;
-
-	case ACPI_TYPE_PROCESSOR:
-
-		external_object->processor.proc_id =
-		    internal_object->processor.proc_id;
-		external_object->processor.pblk_address =
-		    internal_object->processor.address;
-		external_object->processor.pblk_length =
-		    internal_object->processor.length;
-		break;
-
-	case ACPI_TYPE_POWER:
-
-		external_object->power_resource.system_level =
-		    internal_object->power_resource.system_level;
-
-		external_object->power_resource.resource_order =
-		    internal_object->power_resource.resource_order;
-		break;
-
-	default:
-		/*
-		 * There is no corresponding external object type
-		 */
-		ACPI_ERROR((AE_INFO,
-			    "Unsupported object type, cannot convert to external object: %s",
-			    acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE
-						  (internal_object))));
-
-		return_ACPI_STATUS(AE_SUPPORT);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_ielement_to_eelement
- *
- * PARAMETERS:  acpi_pkg_callback
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Copy one package element to another package element
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_copy_ielement_to_eelement(u8 object_type,
-				  union acpi_operand_object *source_object,
-				  union acpi_generic_state *state,
-				  void *context)
-{
-	acpi_status status = AE_OK;
-	struct acpi_pkg_info *info = (struct acpi_pkg_info *)context;
-	acpi_size object_space;
-	u32 this_index;
-	union acpi_object *target_object;
-
-	ACPI_FUNCTION_ENTRY();
-
-	this_index = state->pkg.index;
-	target_object = (union acpi_object *)
-	    &((union acpi_object *)(state->pkg.dest_object))->package.
-	    elements[this_index];
-
-	switch (object_type) {
-	case ACPI_COPY_TYPE_SIMPLE:
-
-		/*
-		 * This is a simple or null object
-		 */
-		status = acpi_ut_copy_isimple_to_esimple(source_object,
-							 target_object,
-							 info->free_space,
-							 &object_space);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-		break;
-
-	case ACPI_COPY_TYPE_PACKAGE:
-
-		/*
-		 * Build the package object
-		 */
-		target_object->type = ACPI_TYPE_PACKAGE;
-		target_object->package.count = source_object->package.count;
-		target_object->package.elements =
-		    ACPI_CAST_PTR(union acpi_object, info->free_space);
-
-		/*
-		 * Pass the new package object back to the package walk routine
-		 */
-		state->pkg.this_target_obj = target_object;
-
-		/*
-		 * Save space for the array of objects (Package elements)
-		 * update the buffer length counter
-		 */
-		object_space = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size)
-							    target_object->
-							    package.count *
-							    sizeof(union
-								   acpi_object));
-		break;
-
-	default:
-		return (AE_BAD_PARAMETER);
-	}
-
-	info->free_space += object_space;
-	info->length += object_space;
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_ipackage_to_epackage
- *
- * PARAMETERS:  internal_object     - Pointer to the object we are returning
- *              Buffer              - Where the object is returned
- *              space_used          - Where the object length is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to place a package object in a user
- *              buffer.  A package object by definition contains other objects.
- *
- *              The buffer is assumed to have sufficient space for the object.
- *              The caller must have verified the buffer length needed using the
- *              acpi_ut_get_object_size function before calling this function.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
-				  u8 * buffer, acpi_size * space_used)
-{
-	union acpi_object *external_object;
-	acpi_status status;
-	struct acpi_pkg_info info;
-
-	ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_epackage);
-
-	/*
-	 * First package at head of the buffer
-	 */
-	external_object = ACPI_CAST_PTR(union acpi_object, buffer);
-
-	/*
-	 * Free space begins right after the first package
-	 */
-	info.length = ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
-	info.free_space =
-	    buffer + ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
-	info.object_space = 0;
-	info.num_packages = 1;
-
-	external_object->type = ACPI_GET_OBJECT_TYPE(internal_object);
-	external_object->package.count = internal_object->package.count;
-	external_object->package.elements = ACPI_CAST_PTR(union acpi_object,
-							  info.free_space);
-
-	/*
-	 * Leave room for an array of ACPI_OBJECTS in the buffer
-	 * and move the free space past it
-	 */
-	info.length += (acpi_size) external_object->package.count *
-	    ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
-	info.free_space += external_object->package.count *
-	    ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
-
-	status = acpi_ut_walk_package_tree(internal_object, external_object,
-					   acpi_ut_copy_ielement_to_eelement,
-					   &info);
-
-	*space_used = info.length;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_iobject_to_eobject
- *
- * PARAMETERS:  internal_object     - The internal object to be converted
- *              buffer_ptr          - Where the object is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to build an API object to be returned to
- *              the caller.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *internal_object,
-				struct acpi_buffer *ret_buffer)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ut_copy_iobject_to_eobject);
-
-	if (ACPI_GET_OBJECT_TYPE(internal_object) == ACPI_TYPE_PACKAGE) {
-		/*
-		 * Package object:  Copy all subobjects (including
-		 * nested packages)
-		 */
-		status = acpi_ut_copy_ipackage_to_epackage(internal_object,
-							   ret_buffer->pointer,
-							   &ret_buffer->length);
-	} else {
-		/*
-		 * Build a simple object (no nested objects)
-		 */
-		status = acpi_ut_copy_isimple_to_esimple(internal_object,
-							 ACPI_CAST_PTR(union
-								       acpi_object,
-								       ret_buffer->
-								       pointer),
-							 ACPI_ADD_PTR(u8,
-								      ret_buffer->
-								      pointer,
-								      ACPI_ROUND_UP_TO_NATIVE_WORD
-								      (sizeof
-								       (union
-									acpi_object))),
-							 &ret_buffer->length);
-		/*
-		 * build simple does not include the object size in the length
-		 * so we add it in here
-		 */
-		ret_buffer->length += sizeof(union acpi_object);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_esimple_to_isimple
- *
- * PARAMETERS:  external_object     - The external object to be converted
- *              ret_internal_object - Where the internal object is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function copies an external object to an internal one.
- *              NOTE: Pointers can be copied, we don't need to copy data.
- *              (The pointers have to be valid in our address space no matter
- *              what we do with them!)
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
-				union acpi_operand_object **ret_internal_object)
-{
-	union acpi_operand_object *internal_object;
-
-	ACPI_FUNCTION_TRACE(ut_copy_esimple_to_isimple);
-
-	/*
-	 * Simple types supported are: String, Buffer, Integer
-	 */
-	switch (external_object->type) {
-	case ACPI_TYPE_STRING:
-	case ACPI_TYPE_BUFFER:
-	case ACPI_TYPE_INTEGER:
-	case ACPI_TYPE_LOCAL_REFERENCE:
-
-		internal_object = acpi_ut_create_internal_object((u8)
-								 external_object->
-								 type);
-		if (!internal_object) {
-			return_ACPI_STATUS(AE_NO_MEMORY);
-		}
-		break;
-
-	case ACPI_TYPE_ANY:	/* This is the case for a NULL object */
-
-		*ret_internal_object = NULL;
-		return_ACPI_STATUS(AE_OK);
-
-	default:
-		/* All other types are not supported */
-
-		ACPI_ERROR((AE_INFO,
-			    "Unsupported object type, cannot convert to internal object: %s",
-			    acpi_ut_get_type_name(external_object->type)));
-
-		return_ACPI_STATUS(AE_SUPPORT);
-	}
-
-	/* Must COPY string and buffer contents */
-
-	switch (external_object->type) {
-	case ACPI_TYPE_STRING:
-
-		internal_object->string.pointer =
-		    ACPI_ALLOCATE_ZEROED((acpi_size) external_object->string.
-					 length + 1);
-		if (!internal_object->string.pointer) {
-			goto error_exit;
-		}
-
-		ACPI_MEMCPY(internal_object->string.pointer,
-			    external_object->string.pointer,
-			    external_object->string.length);
-
-		internal_object->string.length = external_object->string.length;
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		internal_object->buffer.pointer =
-		    ACPI_ALLOCATE_ZEROED(external_object->buffer.length);
-		if (!internal_object->buffer.pointer) {
-			goto error_exit;
-		}
-
-		ACPI_MEMCPY(internal_object->buffer.pointer,
-			    external_object->buffer.pointer,
-			    external_object->buffer.length);
-
-		internal_object->buffer.length = external_object->buffer.length;
-
-		/* Mark buffer data valid */
-
-		internal_object->buffer.flags |= AOPOBJ_DATA_VALID;
-		break;
-
-	case ACPI_TYPE_INTEGER:
-
-		internal_object->integer.value = external_object->integer.value;
-		break;
-
-	case ACPI_TYPE_LOCAL_REFERENCE:
-
-		/* TBD: should validate incoming handle */
-
-		internal_object->reference.class = ACPI_REFCLASS_NAME;
-		internal_object->reference.node =
-		    external_object->reference.handle;
-		break;
-
-	default:
-		/* Other types can't get here */
-		break;
-	}
-
-	*ret_internal_object = internal_object;
-	return_ACPI_STATUS(AE_OK);
-
-      error_exit:
-	acpi_ut_remove_reference(internal_object);
-	return_ACPI_STATUS(AE_NO_MEMORY);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_epackage_to_ipackage
- *
- * PARAMETERS:  external_object     - The external object to be converted
- *              internal_object     - Where the internal object is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Copy an external package object to an internal package.
- *              Handles nested packages.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
-				  union acpi_operand_object **internal_object)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *package_object;
-	union acpi_operand_object **package_elements;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage);
-
-	/* Create the package object */
-
-	package_object =
-	    acpi_ut_create_package_object(external_object->package.count);
-	if (!package_object) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	package_elements = package_object->package.elements;
-
-	/*
-	 * Recursive implementation. Probably ok, since nested external packages
-	 * as parameters should be very rare.
-	 */
-	for (i = 0; i < external_object->package.count; i++) {
-		status =
-		    acpi_ut_copy_eobject_to_iobject(&external_object->package.
-						    elements[i],
-						    &package_elements[i]);
-		if (ACPI_FAILURE(status)) {
-
-			/* Truncate package and delete it */
-
-			package_object->package.count = i;
-			package_elements[i] = NULL;
-			acpi_ut_remove_reference(package_object);
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/* Mark package data valid */
-
-	package_object->package.flags |= AOPOBJ_DATA_VALID;
-
-	*internal_object = package_object;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_eobject_to_iobject
- *
- * PARAMETERS:  external_object     - The external object to be converted
- *              internal_object     - Where the internal object is returned
- *
- * RETURN:      Status              - the status of the call
- *
- * DESCRIPTION: Converts an external object to an internal object.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object,
-				union acpi_operand_object **internal_object)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ut_copy_eobject_to_iobject);
-
-	if (external_object->type == ACPI_TYPE_PACKAGE) {
-		status =
-		    acpi_ut_copy_epackage_to_ipackage(external_object,
-						      internal_object);
-	} else {
-		/*
-		 * Build a simple object (no nested objects)
-		 */
-		status =
-		    acpi_ut_copy_esimple_to_isimple(external_object,
-						    internal_object);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_simple_object
- *
- * PARAMETERS:  source_desc         - The internal object to be copied
- *              dest_desc           - New target object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Simple copy of one internal object to another.  Reference count
- *              of the destination object is preserved.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
-			   union acpi_operand_object *dest_desc)
-{
-	u16 reference_count;
-	union acpi_operand_object *next_object;
-
-	/* Save fields from destination that we don't want to overwrite */
-
-	reference_count = dest_desc->common.reference_count;
-	next_object = dest_desc->common.next_object;
-
-	/* Copy the entire source object over the destination object */
-
-	ACPI_MEMCPY((char *)dest_desc, (char *)source_desc,
-		    sizeof(union acpi_operand_object));
-
-	/* Restore the saved fields */
-
-	dest_desc->common.reference_count = reference_count;
-	dest_desc->common.next_object = next_object;
-
-	/* New object is not static, regardless of source */
-
-	dest_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
-
-	/* Handle the objects with extra data */
-
-	switch (ACPI_GET_OBJECT_TYPE(dest_desc)) {
-	case ACPI_TYPE_BUFFER:
-		/*
-		 * Allocate and copy the actual buffer if and only if:
-		 * 1) There is a valid buffer pointer
-		 * 2) The buffer has a length > 0
-		 */
-		if ((source_desc->buffer.pointer) &&
-		    (source_desc->buffer.length)) {
-			dest_desc->buffer.pointer =
-			    ACPI_ALLOCATE(source_desc->buffer.length);
-			if (!dest_desc->buffer.pointer) {
-				return (AE_NO_MEMORY);
-			}
-
-			/* Copy the actual buffer data */
-
-			ACPI_MEMCPY(dest_desc->buffer.pointer,
-				    source_desc->buffer.pointer,
-				    source_desc->buffer.length);
-		}
-		break;
-
-	case ACPI_TYPE_STRING:
-		/*
-		 * Allocate and copy the actual string if and only if:
-		 * 1) There is a valid string pointer
-		 * (Pointer to a NULL string is allowed)
-		 */
-		if (source_desc->string.pointer) {
-			dest_desc->string.pointer =
-			    ACPI_ALLOCATE((acpi_size) source_desc->string.
-					  length + 1);
-			if (!dest_desc->string.pointer) {
-				return (AE_NO_MEMORY);
-			}
-
-			/* Copy the actual string data */
-
-			ACPI_MEMCPY(dest_desc->string.pointer,
-				    source_desc->string.pointer,
-				    (acpi_size) source_desc->string.length + 1);
-		}
-		break;
-
-	case ACPI_TYPE_LOCAL_REFERENCE:
-		/*
-		 * We copied the reference object, so we now must add a reference
-		 * to the object pointed to by the reference
-		 *
-		 * DDBHandle reference (from Load/load_table) is a special reference,
-		 * it does not have a Reference.Object, so does not need to
-		 * increase the reference count
-		 */
-		if (source_desc->reference.class == ACPI_REFCLASS_TABLE) {
-			break;
-		}
-
-		acpi_ut_add_reference(source_desc->reference.object);
-		break;
-
-	case ACPI_TYPE_REGION:
-		/*
-		 * We copied the Region Handler, so we now must add a reference
-		 */
-		if (dest_desc->region.handler) {
-			acpi_ut_add_reference(dest_desc->region.handler);
-		}
-		break;
-
-	default:
-		/* Nothing to do for other simple objects */
-		break;
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_ielement_to_ielement
- *
- * PARAMETERS:  acpi_pkg_callback
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Copy one package element to another package element
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_copy_ielement_to_ielement(u8 object_type,
-				  union acpi_operand_object *source_object,
-				  union acpi_generic_state *state,
-				  void *context)
-{
-	acpi_status status = AE_OK;
-	u32 this_index;
-	union acpi_operand_object **this_target_ptr;
-	union acpi_operand_object *target_object;
-
-	ACPI_FUNCTION_ENTRY();
-
-	this_index = state->pkg.index;
-	this_target_ptr = (union acpi_operand_object **)
-	    &state->pkg.dest_object->package.elements[this_index];
-
-	switch (object_type) {
-	case ACPI_COPY_TYPE_SIMPLE:
-
-		/* A null source object indicates a (legal) null package element */
-
-		if (source_object) {
-			/*
-			 * This is a simple object, just copy it
-			 */
-			target_object =
-			    acpi_ut_create_internal_object(ACPI_GET_OBJECT_TYPE
-							   (source_object));
-			if (!target_object) {
-				return (AE_NO_MEMORY);
-			}
-
-			status =
-			    acpi_ut_copy_simple_object(source_object,
-						       target_object);
-			if (ACPI_FAILURE(status)) {
-				goto error_exit;
-			}
-
-			*this_target_ptr = target_object;
-		} else {
-			/* Pass through a null element */
-
-			*this_target_ptr = NULL;
-		}
-		break;
-
-	case ACPI_COPY_TYPE_PACKAGE:
-
-		/*
-		 * This object is a package - go down another nesting level
-		 * Create and build the package object
-		 */
-		target_object =
-		    acpi_ut_create_package_object(source_object->package.count);
-		if (!target_object) {
-			return (AE_NO_MEMORY);
-		}
-
-		target_object->common.flags = source_object->common.flags;
-
-		/* Pass the new package object back to the package walk routine */
-
-		state->pkg.this_target_obj = target_object;
-
-		/* Store the object pointer in the parent package object */
-
-		*this_target_ptr = target_object;
-		break;
-
-	default:
-		return (AE_BAD_PARAMETER);
-	}
-
-	return (status);
-
-      error_exit:
-	acpi_ut_remove_reference(target_object);
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_ipackage_to_ipackage
- *
- * PARAMETERS:  *source_obj     - Pointer to the source package object
- *              *dest_obj       - Where the internal object is returned
- *
- * RETURN:      Status          - the status of the call
- *
- * DESCRIPTION: This function is called to copy an internal package object
- *              into another internal package object.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
-				  union acpi_operand_object *dest_obj,
-				  struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_ipackage);
-
-	dest_obj->common.type = ACPI_GET_OBJECT_TYPE(source_obj);
-	dest_obj->common.flags = source_obj->common.flags;
-	dest_obj->package.count = source_obj->package.count;
-
-	/*
-	 * Create the object array and walk the source package tree
-	 */
-	dest_obj->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
-							   source_obj->package.
-							   count +
-							   1) * sizeof(void *));
-	if (!dest_obj->package.elements) {
-		ACPI_ERROR((AE_INFO, "Package allocation failure"));
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/*
-	 * Copy the package element-by-element by walking the package "tree".
-	 * This handles nested packages of arbitrary depth.
-	 */
-	status = acpi_ut_walk_package_tree(source_obj, dest_obj,
-					   acpi_ut_copy_ielement_to_ielement,
-					   walk_state);
-	if (ACPI_FAILURE(status)) {
-
-		/* On failure, delete the destination package object */
-
-		acpi_ut_remove_reference(dest_obj);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_iobject_to_iobject
- *
- * PARAMETERS:  walk_state          - Current walk state
- *              source_desc         - The internal object to be copied
- *              dest_desc           - Where the copied object is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Copy an internal object to a new internal object
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
-				union acpi_operand_object **dest_desc,
-				struct acpi_walk_state *walk_state)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(ut_copy_iobject_to_iobject);
-
-	/* Create the top level object */
-
-	*dest_desc =
-	    acpi_ut_create_internal_object(ACPI_GET_OBJECT_TYPE(source_desc));
-	if (!*dest_desc) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Copy the object and possible subobjects */
-
-	if (ACPI_GET_OBJECT_TYPE(source_desc) == ACPI_TYPE_PACKAGE) {
-		status =
-		    acpi_ut_copy_ipackage_to_ipackage(source_desc, *dest_desc,
-						      walk_state);
-	} else {
-		status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
-	}
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
deleted file mode 100644
index fd66ecb..0000000
--- a/drivers/acpi/utilities/utdebug.c
+++ /dev/null
@@ -1,654 +0,0 @@
-/******************************************************************************
- *
- * Module Name: utdebug - Debug print routines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utdebug")
-#ifdef ACPI_DEBUG_OUTPUT
-static acpi_thread_id acpi_gbl_prev_thread_id;
-static char *acpi_gbl_fn_entry_str = "----Entry";
-static char *acpi_gbl_fn_exit_str = "----Exit-";
-
-/* Local prototypes */
-
-static const char *acpi_ut_trim_function_name(const char *function_name);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_init_stack_ptr_trace
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Save the current CPU stack pointer at subsystem startup
- *
- ******************************************************************************/
-
-void acpi_ut_init_stack_ptr_trace(void)
-{
-	acpi_size current_sp;
-
-	acpi_gbl_entry_stack_pointer = &current_sp;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_track_stack_ptr
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Save the current CPU stack pointer
- *
- ******************************************************************************/
-
-void acpi_ut_track_stack_ptr(void)
-{
-	acpi_size current_sp;
-
-	if (&current_sp < acpi_gbl_lowest_stack_pointer) {
-		acpi_gbl_lowest_stack_pointer = &current_sp;
-	}
-
-	if (acpi_gbl_nesting_level > acpi_gbl_deepest_nesting) {
-		acpi_gbl_deepest_nesting = acpi_gbl_nesting_level;
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_trim_function_name
- *
- * PARAMETERS:  function_name       - Ascii string containing a procedure name
- *
- * RETURN:      Updated pointer to the function name
- *
- * DESCRIPTION: Remove the "Acpi" prefix from the function name, if present.
- *              This allows compiler macros such as __func__ to be used
- *              with no change to the debug output.
- *
- ******************************************************************************/
-
-static const char *acpi_ut_trim_function_name(const char *function_name)
-{
-
-	/* All Function names are longer than 4 chars, check is safe */
-
-	if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_MIXED) {
-
-		/* This is the case where the original source has not been modified */
-
-		return (function_name + 4);
-	}
-
-	if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_LOWER) {
-
-		/* This is the case where the source has been 'linuxized' */
-
-		return (function_name + 5);
-	}
-
-	return (function_name);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_debug_print
- *
- * PARAMETERS:  requested_debug_level - Requested debug print level
- *              line_number         - Caller's line number (for error output)
- *              function_name       - Caller's procedure name
- *              module_name         - Caller's module name
- *              component_id        - Caller's component ID
- *              Format              - Printf format field
- *              ...                 - Optional printf arguments
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print error message with prefix consisting of the module name,
- *              line number, and component ID.
- *
- ******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_debug_print(u32 requested_debug_level,
-		    u32 line_number,
-		    const char *function_name,
-		    const char *module_name,
-		    u32 component_id, const char *format, ...)
-{
-	acpi_thread_id thread_id;
-	va_list args;
-
-	/*
-	 * Stay silent if the debug level or component ID is disabled
-	 */
-	if (!(requested_debug_level & acpi_dbg_level) ||
-	    !(component_id & acpi_dbg_layer)) {
-		return;
-	}
-
-	/*
-	 * Thread tracking and context switch notification
-	 */
-	thread_id = acpi_os_get_thread_id();
-	if (thread_id != acpi_gbl_prev_thread_id) {
-		if (ACPI_LV_THREADS & acpi_dbg_level) {
-			acpi_os_printf
-			    ("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
-			     (unsigned long)acpi_gbl_prev_thread_id,
-			     (unsigned long)thread_id);
-		}
-
-		acpi_gbl_prev_thread_id = thread_id;
-	}
-
-	/*
-	 * Display the module name, current line number, thread ID (if requested),
-	 * current procedure nesting level, and the current procedure name
-	 */
-	acpi_os_printf("%8s-%04ld ", module_name, line_number);
-
-	if (ACPI_LV_THREADS & acpi_dbg_level) {
-		acpi_os_printf("[%04lX] ", (unsigned long)thread_id);
-	}
-
-	acpi_os_printf("[%02ld] %-22.22s: ",
-		       acpi_gbl_nesting_level,
-		       acpi_ut_trim_function_name(function_name));
-
-	va_start(args, format);
-	acpi_os_vprintf(format, args);
-	va_end(args);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_ut_debug_print)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_debug_print_raw
- *
- * PARAMETERS:  requested_debug_level - Requested debug print level
- *              line_number         - Caller's line number
- *              function_name       - Caller's procedure name
- *              module_name         - Caller's module name
- *              component_id        - Caller's component ID
- *              Format              - Printf format field
- *              ...                 - Optional printf arguments
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print message with no headers.  Has same interface as
- *              debug_print so that the same macros can be used.
- *
- ******************************************************************************/
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_debug_print_raw(u32 requested_debug_level,
-			u32 line_number,
-			const char *function_name,
-			const char *module_name,
-			u32 component_id, const char *format, ...)
-{
-	va_list args;
-
-	if (!(requested_debug_level & acpi_dbg_level) ||
-	    !(component_id & acpi_dbg_layer)) {
-		return;
-	}
-
-	va_start(args, format);
-	acpi_os_vprintf(format, args);
-	va_end(args);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_trace
- *
- * PARAMETERS:  line_number         - Caller's line number
- *              function_name       - Caller's procedure name
- *              module_name         - Caller's module name
- *              component_id        - Caller's component ID
- *
- * RETURN:      None
- *
- * DESCRIPTION: Function entry trace.  Prints only if TRACE_FUNCTIONS bit is
- *              set in debug_level
- *
- ******************************************************************************/
-void
-acpi_ut_trace(u32 line_number,
-	      const char *function_name,
-	      const char *module_name, u32 component_id)
-{
-
-	acpi_gbl_nesting_level++;
-	acpi_ut_track_stack_ptr();
-
-	acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
-			    line_number, function_name, module_name,
-			    component_id, "%s\n", acpi_gbl_fn_entry_str);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_ut_trace)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_trace_ptr
- *
- * PARAMETERS:  line_number         - Caller's line number
- *              function_name       - Caller's procedure name
- *              module_name         - Caller's module name
- *              component_id        - Caller's component ID
- *              Pointer             - Pointer to display
- *
- * RETURN:      None
- *
- * DESCRIPTION: Function entry trace.  Prints only if TRACE_FUNCTIONS bit is
- *              set in debug_level
- *
- ******************************************************************************/
-void
-acpi_ut_trace_ptr(u32 line_number,
-		  const char *function_name,
-		  const char *module_name, u32 component_id, void *pointer)
-{
-	acpi_gbl_nesting_level++;
-	acpi_ut_track_stack_ptr();
-
-	acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
-			    line_number, function_name, module_name,
-			    component_id, "%s %p\n", acpi_gbl_fn_entry_str,
-			    pointer);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_trace_str
- *
- * PARAMETERS:  line_number         - Caller's line number
- *              function_name       - Caller's procedure name
- *              module_name         - Caller's module name
- *              component_id        - Caller's component ID
- *              String              - Additional string to display
- *
- * RETURN:      None
- *
- * DESCRIPTION: Function entry trace.  Prints only if TRACE_FUNCTIONS bit is
- *              set in debug_level
- *
- ******************************************************************************/
-
-void
-acpi_ut_trace_str(u32 line_number,
-		  const char *function_name,
-		  const char *module_name, u32 component_id, char *string)
-{
-
-	acpi_gbl_nesting_level++;
-	acpi_ut_track_stack_ptr();
-
-	acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
-			    line_number, function_name, module_name,
-			    component_id, "%s %s\n", acpi_gbl_fn_entry_str,
-			    string);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_trace_u32
- *
- * PARAMETERS:  line_number         - Caller's line number
- *              function_name       - Caller's procedure name
- *              module_name         - Caller's module name
- *              component_id        - Caller's component ID
- *              Integer             - Integer to display
- *
- * RETURN:      None
- *
- * DESCRIPTION: Function entry trace.  Prints only if TRACE_FUNCTIONS bit is
- *              set in debug_level
- *
- ******************************************************************************/
-
-void
-acpi_ut_trace_u32(u32 line_number,
-		  const char *function_name,
-		  const char *module_name, u32 component_id, u32 integer)
-{
-
-	acpi_gbl_nesting_level++;
-	acpi_ut_track_stack_ptr();
-
-	acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
-			    line_number, function_name, module_name,
-			    component_id, "%s %08X\n", acpi_gbl_fn_entry_str,
-			    integer);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_exit
- *
- * PARAMETERS:  line_number         - Caller's line number
- *              function_name       - Caller's procedure name
- *              module_name         - Caller's module name
- *              component_id        - Caller's component ID
- *
- * RETURN:      None
- *
- * DESCRIPTION: Function exit trace.  Prints only if TRACE_FUNCTIONS bit is
- *              set in debug_level
- *
- ******************************************************************************/
-
-void
-acpi_ut_exit(u32 line_number,
-	     const char *function_name,
-	     const char *module_name, u32 component_id)
-{
-
-	acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
-			    line_number, function_name, module_name,
-			    component_id, "%s\n", acpi_gbl_fn_exit_str);
-
-	acpi_gbl_nesting_level--;
-}
-
-ACPI_EXPORT_SYMBOL(acpi_ut_exit)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_status_exit
- *
- * PARAMETERS:  line_number         - Caller's line number
- *              function_name       - Caller's procedure name
- *              module_name         - Caller's module name
- *              component_id        - Caller's component ID
- *              Status              - Exit status code
- *
- * RETURN:      None
- *
- * DESCRIPTION: Function exit trace.  Prints only if TRACE_FUNCTIONS bit is
- *              set in debug_level. Prints exit status also.
- *
- ******************************************************************************/
-void
-acpi_ut_status_exit(u32 line_number,
-		    const char *function_name,
-		    const char *module_name,
-		    u32 component_id, acpi_status status)
-{
-
-	if (ACPI_SUCCESS(status)) {
-		acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
-				    line_number, function_name, module_name,
-				    component_id, "%s %s\n",
-				    acpi_gbl_fn_exit_str,
-				    acpi_format_exception(status));
-	} else {
-		acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
-				    line_number, function_name, module_name,
-				    component_id, "%s ****Exception****: %s\n",
-				    acpi_gbl_fn_exit_str,
-				    acpi_format_exception(status));
-	}
-
-	acpi_gbl_nesting_level--;
-}
-
-ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_value_exit
- *
- * PARAMETERS:  line_number         - Caller's line number
- *              function_name       - Caller's procedure name
- *              module_name         - Caller's module name
- *              component_id        - Caller's component ID
- *              Value               - Value to be printed with exit msg
- *
- * RETURN:      None
- *
- * DESCRIPTION: Function exit trace.  Prints only if TRACE_FUNCTIONS bit is
- *              set in debug_level. Prints exit value also.
- *
- ******************************************************************************/
-void
-acpi_ut_value_exit(u32 line_number,
-		   const char *function_name,
-		   const char *module_name,
-		   u32 component_id, acpi_integer value)
-{
-
-	acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
-			    line_number, function_name, module_name,
-			    component_id, "%s %8.8X%8.8X\n",
-			    acpi_gbl_fn_exit_str, ACPI_FORMAT_UINT64(value));
-
-	acpi_gbl_nesting_level--;
-}
-
-ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_ptr_exit
- *
- * PARAMETERS:  line_number         - Caller's line number
- *              function_name       - Caller's procedure name
- *              module_name         - Caller's module name
- *              component_id        - Caller's component ID
- *              Ptr                 - Pointer to display
- *
- * RETURN:      None
- *
- * DESCRIPTION: Function exit trace.  Prints only if TRACE_FUNCTIONS bit is
- *              set in debug_level. Prints exit value also.
- *
- ******************************************************************************/
-void
-acpi_ut_ptr_exit(u32 line_number,
-		 const char *function_name,
-		 const char *module_name, u32 component_id, u8 *ptr)
-{
-
-	acpi_ut_debug_print(ACPI_LV_FUNCTIONS,
-			    line_number, function_name, module_name,
-			    component_id, "%s %p\n", acpi_gbl_fn_exit_str, ptr);
-
-	acpi_gbl_nesting_level--;
-}
-
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_dump_buffer
- *
- * PARAMETERS:  Buffer              - Buffer to dump
- *              Count               - Amount to dump, in bytes
- *              Display             - BYTE, WORD, DWORD, or QWORD display
- *              component_iD        - Caller's component ID
- *
- * RETURN:      None
- *
- * DESCRIPTION: Generic dump buffer in both hex and ascii.
- *
- ******************************************************************************/
-
-void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
-{
-	u32 i = 0;
-	u32 j;
-	u32 temp32;
-	u8 buf_char;
-
-	if (!buffer) {
-		acpi_os_printf("Null Buffer Pointer in DumpBuffer!\n");
-		return;
-	}
-
-	if ((count < 4) || (count & 0x01)) {
-		display = DB_BYTE_DISPLAY;
-	}
-
-	/* Nasty little dump buffer routine! */
-
-	while (i < count) {
-
-		/* Print current offset */
-
-		acpi_os_printf("%6.4X: ", i);
-
-		/* Print 16 hex chars */
-
-		for (j = 0; j < 16;) {
-			if (i + j >= count) {
-
-				/* Dump fill spaces */
-
-				acpi_os_printf("%*s", ((display * 2) + 1), " ");
-				j += display;
-				continue;
-			}
-
-			switch (display) {
-			case DB_BYTE_DISPLAY:
-			default:	/* Default is BYTE display */
-
-				acpi_os_printf("%02X ",
-					       buffer[(acpi_size) i + j]);
-				break;
-
-			case DB_WORD_DISPLAY:
-
-				ACPI_MOVE_16_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j]);
-				acpi_os_printf("%04X ", temp32);
-				break;
-
-			case DB_DWORD_DISPLAY:
-
-				ACPI_MOVE_32_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j]);
-				acpi_os_printf("%08X ", temp32);
-				break;
-
-			case DB_QWORD_DISPLAY:
-
-				ACPI_MOVE_32_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j]);
-				acpi_os_printf("%08X", temp32);
-
-				ACPI_MOVE_32_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j +
-							   4]);
-				acpi_os_printf("%08X ", temp32);
-				break;
-			}
-
-			j += display;
-		}
-
-		/*
-		 * Print the ASCII equivalent characters but watch out for the bad
-		 * unprintable ones (printable chars are 0x20 through 0x7E)
-		 */
-		acpi_os_printf(" ");
-		for (j = 0; j < 16; j++) {
-			if (i + j >= count) {
-				acpi_os_printf("\n");
-				return;
-			}
-
-			buf_char = buffer[(acpi_size) i + j];
-			if (ACPI_IS_PRINT(buf_char)) {
-				acpi_os_printf("%c", buf_char);
-			} else {
-				acpi_os_printf(".");
-			}
-		}
-
-		/* Done with that line. */
-
-		acpi_os_printf("\n");
-		i += 16;
-	}
-
-	return;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_dump_buffer
- *
- * PARAMETERS:  Buffer              - Buffer to dump
- *              Count               - Amount to dump, in bytes
- *              Display             - BYTE, WORD, DWORD, or QWORD display
- *              component_iD        - Caller's component ID
- *
- * RETURN:      None
- *
- * DESCRIPTION: Generic dump buffer in both hex and ascii.
- *
- ******************************************************************************/
-
-void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
-{
-
-	/* Only dump the buffer if tracing is enabled */
-
-	if (!((ACPI_LV_TABLES & acpi_dbg_level) &&
-	      (component_id & acpi_dbg_layer))) {
-		return;
-	}
-
-	acpi_ut_dump_buffer2(buffer, count, display);
-}
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
deleted file mode 100644
index d197c6b..0000000
--- a/drivers/acpi/utilities/utdelete.c
+++ /dev/null
@@ -1,676 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: utdelete - object deletion and reference count utilities
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acinterp.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acevents.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utdelete")
-
-/* Local prototypes */
-static void acpi_ut_delete_internal_obj(union acpi_operand_object *object);
-
-static void
-acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_delete_internal_obj
- *
- * PARAMETERS:  Object         - Object to be deleted
- *
- * RETURN:      None
- *
- * DESCRIPTION: Low level object deletion, after reference counts have been
- *              updated (All reference counts, including sub-objects!)
- *
- ******************************************************************************/
-
-static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
-{
-	void *obj_pointer = NULL;
-	union acpi_operand_object *handler_desc;
-	union acpi_operand_object *second_desc;
-	union acpi_operand_object *next_desc;
-
-	ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
-
-	if (!object) {
-		return_VOID;
-	}
-
-	/*
-	 * Must delete or free any pointers within the object that are not
-	 * actual ACPI objects (for example, a raw buffer pointer).
-	 */
-	switch (ACPI_GET_OBJECT_TYPE(object)) {
-	case ACPI_TYPE_STRING:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "**** String %p, ptr %p\n", object,
-				  object->string.pointer));
-
-		/* Free the actual string buffer */
-
-		if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
-
-			/* But only if it is NOT a pointer into an ACPI table */
-
-			obj_pointer = object->string.pointer;
-		}
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "**** Buffer %p, ptr %p\n", object,
-				  object->buffer.pointer));
-
-		/* Free the actual buffer */
-
-		if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
-
-			/* But only if it is NOT a pointer into an ACPI table */
-
-			obj_pointer = object->buffer.pointer;
-		}
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  " **** Package of count %X\n",
-				  object->package.count));
-
-		/*
-		 * Elements of the package are not handled here, they are deleted
-		 * separately
-		 */
-
-		/* Free the (variable length) element pointer array */
-
-		obj_pointer = object->package.elements;
-		break;
-
-		/*
-		 * These objects have a possible list of notify handlers.
-		 * Device object also may have a GPE block.
-		 */
-	case ACPI_TYPE_DEVICE:
-
-		if (object->device.gpe_block) {
-			(void)acpi_ev_delete_gpe_block(object->device.
-						       gpe_block);
-		}
-
-		/*lint -fallthrough */
-
-	case ACPI_TYPE_PROCESSOR:
-	case ACPI_TYPE_THERMAL:
-
-		/* Walk the notify handler list for this object */
-
-		handler_desc = object->common_notify.handler;
-		while (handler_desc) {
-			next_desc = handler_desc->address_space.next;
-			acpi_ut_remove_reference(handler_desc);
-			handler_desc = next_desc;
-		}
-		break;
-
-	case ACPI_TYPE_MUTEX:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "***** Mutex %p, OS Mutex %p\n",
-				  object, object->mutex.os_mutex));
-
-		if (object == acpi_gbl_global_lock_mutex) {
-
-			/* Global Lock has extra semaphore */
-
-			(void)
-			    acpi_os_delete_semaphore
-			    (acpi_gbl_global_lock_semaphore);
-			acpi_gbl_global_lock_semaphore = NULL;
-
-			acpi_os_delete_mutex(object->mutex.os_mutex);
-			acpi_gbl_global_lock_mutex = NULL;
-		} else {
-			acpi_ex_unlink_mutex(object);
-			acpi_os_delete_mutex(object->mutex.os_mutex);
-		}
-		break;
-
-	case ACPI_TYPE_EVENT:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "***** Event %p, OS Semaphore %p\n",
-				  object, object->event.os_semaphore));
-
-		(void)acpi_os_delete_semaphore(object->event.os_semaphore);
-		object->event.os_semaphore = NULL;
-		break;
-
-	case ACPI_TYPE_METHOD:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "***** Method %p\n", object));
-
-		/* Delete the method mutex if it exists */
-
-		if (object->method.mutex) {
-			acpi_os_delete_mutex(object->method.mutex->mutex.
-					     os_mutex);
-			acpi_ut_delete_object_desc(object->method.mutex);
-			object->method.mutex = NULL;
-		}
-		break;
-
-	case ACPI_TYPE_REGION:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "***** Region %p\n", object));
-
-		second_desc = acpi_ns_get_secondary_object(object);
-		if (second_desc) {
-			/*
-			 * Free the region_context if and only if the handler is one of the
-			 * default handlers -- and therefore, we created the context object
-			 * locally, it was not created by an external caller.
-			 */
-			handler_desc = object->region.handler;
-			if (handler_desc) {
-				if (handler_desc->address_space.handler_flags &
-				    ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
-
-					/* Deactivate region and free region context */
-
-					if (handler_desc->address_space.setup) {
-						(void)handler_desc->
-						    address_space.setup(object,
-									ACPI_REGION_DEACTIVATE,
-									handler_desc->
-									address_space.
-									context,
-									&second_desc->
-									extra.
-									region_context);
-					}
-				}
-
-				acpi_ut_remove_reference(handler_desc);
-			}
-
-			/* Now we can free the Extra object */
-
-			acpi_ut_delete_object_desc(second_desc);
-		}
-		break;
-
-	case ACPI_TYPE_BUFFER_FIELD:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "***** Buffer Field %p\n", object));
-
-		second_desc = acpi_ns_get_secondary_object(object);
-		if (second_desc) {
-			acpi_ut_delete_object_desc(second_desc);
-		}
-		break;
-
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "***** Bank Field %p\n", object));
-
-		second_desc = acpi_ns_get_secondary_object(object);
-		if (second_desc) {
-			acpi_ut_delete_object_desc(second_desc);
-		}
-		break;
-
-	default:
-		break;
-	}
-
-	/* Free any allocated memory (pointer within the object) found above */
-
-	if (obj_pointer) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "Deleting Object Subptr %p\n", obj_pointer));
-		ACPI_FREE(obj_pointer);
-	}
-
-	/* Now the object can be safely deleted */
-
-	ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Deleting Object %p [%s]\n",
-			  object, acpi_ut_get_object_type_name(object)));
-
-	acpi_ut_delete_object_desc(object);
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_delete_internal_object_list
- *
- * PARAMETERS:  obj_list        - Pointer to the list to be deleted
- *
- * RETURN:      None
- *
- * DESCRIPTION: This function deletes an internal object list, including both
- *              simple objects and package objects
- *
- ******************************************************************************/
-
-void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
-{
-	union acpi_operand_object **internal_obj;
-
-	ACPI_FUNCTION_TRACE(ut_delete_internal_object_list);
-
-	/* Walk the null-terminated internal list */
-
-	for (internal_obj = obj_list; *internal_obj; internal_obj++) {
-		acpi_ut_remove_reference(*internal_obj);
-	}
-
-	/* Free the combined parameter pointer list and object array */
-
-	ACPI_FREE(obj_list);
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_update_ref_count
- *
- * PARAMETERS:  Object          - Object whose ref count is to be updated
- *              Action          - What to do
- *
- * RETURN:      New ref count
- *
- * DESCRIPTION: Modify the ref count and return it.
- *
- ******************************************************************************/
-
-static void
-acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
-{
-	u16 count;
-	u16 new_count;
-
-	ACPI_FUNCTION_NAME(ut_update_ref_count);
-
-	if (!object) {
-		return;
-	}
-
-	count = object->common.reference_count;
-	new_count = count;
-
-	/*
-	 * Perform the reference count action (increment, decrement, force delete)
-	 */
-	switch (action) {
-	case REF_INCREMENT:
-
-		new_count++;
-		object->common.reference_count = new_count;
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "Obj %p Refs=%X, [Incremented]\n",
-				  object, new_count));
-		break;
-
-	case REF_DECREMENT:
-
-		if (count < 1) {
-			ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-					  "Obj %p Refs=%X, can't decrement! (Set to 0)\n",
-					  object, new_count));
-
-			new_count = 0;
-		} else {
-			new_count--;
-
-			ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-					  "Obj %p Refs=%X, [Decremented]\n",
-					  object, new_count));
-		}
-
-		if (ACPI_GET_OBJECT_TYPE(object) == ACPI_TYPE_METHOD) {
-			ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-					  "Method Obj %p Refs=%X, [Decremented]\n",
-					  object, new_count));
-		}
-
-		object->common.reference_count = new_count;
-		if (new_count == 0) {
-			acpi_ut_delete_internal_obj(object);
-		}
-		break;
-
-	case REF_FORCE_DELETE:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "Obj %p Refs=%X, Force delete! (Set to 0)\n",
-				  object, count));
-
-		new_count = 0;
-		object->common.reference_count = new_count;
-		acpi_ut_delete_internal_obj(object);
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Unknown action (%X)", action));
-		break;
-	}
-
-	/*
-	 * Sanity check the reference count, for debug purposes only.
-	 * (A deleted object will have a huge reference count)
-	 */
-	if (count > ACPI_MAX_REFERENCE_COUNT) {
-		ACPI_WARNING((AE_INFO,
-			      "Large Reference Count (%X) in object %p", count,
-			      object));
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_update_object_reference
- *
- * PARAMETERS:  Object              - Increment ref count for this object
- *                                    and all sub-objects
- *              Action              - Either REF_INCREMENT or REF_DECREMENT or
- *                                    REF_FORCE_DELETE
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Increment the object reference count
- *
- * Object references are incremented when:
- * 1) An object is attached to a Node (namespace object)
- * 2) An object is copied (all subobjects must be incremented)
- *
- * Object references are decremented when:
- * 1) An object is detached from an Node
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
-{
-	acpi_status status = AE_OK;
-	union acpi_generic_state *state_list = NULL;
-	union acpi_operand_object *next_object = NULL;
-	union acpi_generic_state *state;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object);
-
-	while (object) {
-
-		/* Make sure that this isn't a namespace handle */
-
-		if (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) {
-			ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-					  "Object %p is NS handle\n", object));
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		/*
-		 * All sub-objects must have their reference count incremented also.
-		 * Different object types have different subobjects.
-		 */
-		switch (ACPI_GET_OBJECT_TYPE(object)) {
-		case ACPI_TYPE_DEVICE:
-		case ACPI_TYPE_PROCESSOR:
-		case ACPI_TYPE_POWER:
-		case ACPI_TYPE_THERMAL:
-
-			/* Update the notify objects for these types (if present) */
-
-			acpi_ut_update_ref_count(object->common_notify.
-						 system_notify, action);
-			acpi_ut_update_ref_count(object->common_notify.
-						 device_notify, action);
-			break;
-
-		case ACPI_TYPE_PACKAGE:
-			/*
-			 * We must update all the sub-objects of the package,
-			 * each of whom may have their own sub-objects.
-			 */
-			for (i = 0; i < object->package.count; i++) {
-				/*
-				 * Push each element onto the stack for later processing.
-				 * Note: There can be null elements within the package,
-				 * these are simply ignored
-				 */
-				status =
-				    acpi_ut_create_update_state_and_push
-				    (object->package.elements[i], action,
-				     &state_list);
-				if (ACPI_FAILURE(status)) {
-					goto error_exit;
-				}
-			}
-			break;
-
-		case ACPI_TYPE_BUFFER_FIELD:
-
-			next_object = object->buffer_field.buffer_obj;
-			break;
-
-		case ACPI_TYPE_LOCAL_REGION_FIELD:
-
-			next_object = object->field.region_obj;
-			break;
-
-		case ACPI_TYPE_LOCAL_BANK_FIELD:
-
-			next_object = object->bank_field.bank_obj;
-			status =
-			    acpi_ut_create_update_state_and_push(object->
-								 bank_field.
-								 region_obj,
-								 action,
-								 &state_list);
-			if (ACPI_FAILURE(status)) {
-				goto error_exit;
-			}
-			break;
-
-		case ACPI_TYPE_LOCAL_INDEX_FIELD:
-
-			next_object = object->index_field.index_obj;
-			status =
-			    acpi_ut_create_update_state_and_push(object->
-								 index_field.
-								 data_obj,
-								 action,
-								 &state_list);
-			if (ACPI_FAILURE(status)) {
-				goto error_exit;
-			}
-			break;
-
-		case ACPI_TYPE_LOCAL_REFERENCE:
-			/*
-			 * The target of an Index (a package, string, or buffer) or a named
-			 * reference must track changes to the ref count of the index or
-			 * target object.
-			 */
-			if ((object->reference.class == ACPI_REFCLASS_INDEX) ||
-			    (object->reference.class == ACPI_REFCLASS_NAME)) {
-				next_object = object->reference.object;
-			}
-			break;
-
-		case ACPI_TYPE_REGION:
-		default:
-			break;	/* No subobjects for all other types */
-		}
-
-		/*
-		 * Now we can update the count in the main object. This can only
-		 * happen after we update the sub-objects in case this causes the
-		 * main object to be deleted.
-		 */
-		acpi_ut_update_ref_count(object, action);
-		object = NULL;
-
-		/* Move on to the next object to be updated */
-
-		if (next_object) {
-			object = next_object;
-			next_object = NULL;
-		} else if (state_list) {
-			state = acpi_ut_pop_generic_state(&state_list);
-			object = state->update.object;
-			acpi_ut_delete_generic_state(state);
-		}
-	}
-
-	return_ACPI_STATUS(AE_OK);
-
-      error_exit:
-
-	ACPI_EXCEPTION((AE_INFO, status,
-			"Could not update object reference count"));
-
-	/* Free any stacked Update State objects */
-
-	while (state_list) {
-		state = acpi_ut_pop_generic_state(&state_list);
-		acpi_ut_delete_generic_state(state);
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_add_reference
- *
- * PARAMETERS:  Object          - Object whose reference count is to be
- *                                incremented
- *
- * RETURN:      None
- *
- * DESCRIPTION: Add one reference to an ACPI object
- *
- ******************************************************************************/
-
-void acpi_ut_add_reference(union acpi_operand_object *object)
-{
-
-	ACPI_FUNCTION_TRACE_PTR(ut_add_reference, object);
-
-	/* Ensure that we have a valid object */
-
-	if (!acpi_ut_valid_internal_object(object)) {
-		return_VOID;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-			  "Obj %p Current Refs=%X [To Be Incremented]\n",
-			  object, object->common.reference_count));
-
-	/* Increment the reference count */
-
-	(void)acpi_ut_update_object_reference(object, REF_INCREMENT);
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_remove_reference
- *
- * PARAMETERS:  Object         - Object whose ref count will be decremented
- *
- * RETURN:      None
- *
- * DESCRIPTION: Decrement the reference count of an ACPI internal object
- *
- ******************************************************************************/
-
-void acpi_ut_remove_reference(union acpi_operand_object *object)
-{
-
-	ACPI_FUNCTION_TRACE_PTR(ut_remove_reference, object);
-
-	/*
-	 * Allow a NULL pointer to be passed in, just ignore it. This saves
-	 * each caller from having to check. Also, ignore NS nodes.
-	 *
-	 */
-	if (!object ||
-	    (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) {
-		return_VOID;
-	}
-
-	/* Ensure that we have a valid object */
-
-	if (!acpi_ut_valid_internal_object(object)) {
-		return_VOID;
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-			  "Obj %p Current Refs=%X [To Be Decremented]\n",
-			  object, object->common.reference_count));
-
-	/*
-	 * Decrement the reference count, and only actually delete the object
-	 * if the reference count becomes 0. (Must also decrement the ref count
-	 * of all subobjects!)
-	 */
-	(void)acpi_ut_update_object_reference(object, REF_DECREMENT);
-	return_VOID;
-}
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c
deleted file mode 100644
index 352747e..0000000
--- a/drivers/acpi/utilities/uteval.c
+++ /dev/null
@@ -1,751 +0,0 @@
-/******************************************************************************
- *
- * Module Name: uteval - Object evaluation
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acinterp.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("uteval")
-
-/* Local prototypes */
-static void
-acpi_ut_copy_id_string(char *destination, char *source, acpi_size max_length);
-
-static acpi_status
-acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
-			  struct acpi_compatible_id *one_cid);
-
-/*
- * Strings supported by the _OSI predefined (internal) method.
- */
-static char *acpi_interfaces_supported[] = {
-	/* Operating System Vendor Strings */
-
-	"Windows 2000",		/* Windows 2000 */
-	"Windows 2001",		/* Windows XP */
-	"Windows 2001 SP1",	/* Windows XP SP1 */
-	"Windows 2001 SP2",	/* Windows XP SP2 */
-	"Windows 2001.1",	/* Windows Server 2003 */
-	"Windows 2001.1 SP1",	/* Windows Server 2003 SP1 - Added 03/2006 */
-	"Windows 2006",		/* Windows Vista - Added 03/2006 */
-
-	/* Feature Group Strings */
-
-	"Extended Address Space Descriptor"
-	    /*
-	     * All "optional" feature group strings (features that are implemented
-	     * by the host) should be implemented in the host version of
-	     * acpi_os_validate_interface and should not be added here.
-	     */
-};
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_osi_implementation
- *
- * PARAMETERS:  walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Implementation of the _OSI predefined control method
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	union acpi_operand_object *string_desc;
-	union acpi_operand_object *return_desc;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ut_osi_implementation);
-
-	/* Validate the string input argument */
-
-	string_desc = walk_state->arguments[0].object;
-	if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) {
-		return_ACPI_STATUS(AE_TYPE);
-	}
-
-	/* Create a return object */
-
-	return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
-	if (!return_desc) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Default return value is SUPPORTED */
-
-	return_desc->integer.value = ACPI_UINT32_MAX;
-	walk_state->return_desc = return_desc;
-
-	/* Compare input string to static table of supported interfaces */
-
-	for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
-		if (!ACPI_STRCMP
-		    (string_desc->string.pointer,
-		     acpi_interfaces_supported[i])) {
-
-			/* The interface is supported */
-
-			return_ACPI_STATUS(AE_CTRL_TERMINATE);
-		}
-	}
-
-	/*
-	 * Did not match the string in the static table, call the host OSL to
-	 * check for a match with one of the optional strings (such as
-	 * "Module Device", "3.0 Thermal Model", etc.)
-	 */
-	status = acpi_os_validate_interface(string_desc->string.pointer);
-	if (ACPI_SUCCESS(status)) {
-
-		/* The interface is supported */
-
-		return_ACPI_STATUS(AE_CTRL_TERMINATE);
-	}
-
-	/* The interface is not supported */
-
-	return_desc->integer.value = 0;
-	return_ACPI_STATUS(AE_CTRL_TERMINATE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_osi_invalidate
- *
- * PARAMETERS:  interface_string
- *
- * RETURN:      Status
- *
- * DESCRIPTION: invalidate string in pre-defiend _OSI string list
- *
- ******************************************************************************/
-
-acpi_status acpi_osi_invalidate(char *interface)
-{
-	int i;
-
-	for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
-		if (!ACPI_STRCMP(interface, acpi_interfaces_supported[i])) {
-			*acpi_interfaces_supported[i] = '\0';
-			return AE_OK;
-		}
-	}
-	return AE_NOT_FOUND;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_evaluate_object
- *
- * PARAMETERS:  prefix_node         - Starting node
- *              Path                - Path to object from starting node
- *              expected_return_types - Bitmap of allowed return types
- *              return_desc         - Where a return value is stored
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Evaluates a namespace object and verifies the type of the
- *              return object.  Common code that simplifies accessing objects
- *              that have required return objects of fixed types.
- *
- *              NOTE: Internal function, no parameter validation
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
-			char *path,
-			u32 expected_return_btypes,
-			union acpi_operand_object **return_desc)
-{
-	struct acpi_evaluate_info *info;
-	acpi_status status;
-	u32 return_btype;
-
-	ACPI_FUNCTION_TRACE(ut_evaluate_object);
-
-	/* Allocate the evaluation information block */
-
-	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
-	if (!info) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	info->prefix_node = prefix_node;
-	info->pathname = path;
-
-	/* Evaluate the object/method */
-
-	status = acpi_ns_evaluate(info);
-	if (ACPI_FAILURE(status)) {
-		if (status == AE_NOT_FOUND) {
-			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-					  "[%4.4s.%s] was not found\n",
-					  acpi_ut_get_node_name(prefix_node),
-					  path));
-		} else {
-			ACPI_ERROR_METHOD("Method execution failed",
-					  prefix_node, path, status);
-		}
-
-		goto cleanup;
-	}
-
-	/* Did we get a return object? */
-
-	if (!info->return_object) {
-		if (expected_return_btypes) {
-			ACPI_ERROR_METHOD("No object was returned from",
-					  prefix_node, path, AE_NOT_EXIST);
-
-			status = AE_NOT_EXIST;
-		}
-
-		goto cleanup;
-	}
-
-	/* Map the return object type to the bitmapped type */
-
-	switch (ACPI_GET_OBJECT_TYPE(info->return_object)) {
-	case ACPI_TYPE_INTEGER:
-		return_btype = ACPI_BTYPE_INTEGER;
-		break;
-
-	case ACPI_TYPE_BUFFER:
-		return_btype = ACPI_BTYPE_BUFFER;
-		break;
-
-	case ACPI_TYPE_STRING:
-		return_btype = ACPI_BTYPE_STRING;
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-		return_btype = ACPI_BTYPE_PACKAGE;
-		break;
-
-	default:
-		return_btype = 0;
-		break;
-	}
-
-	if ((acpi_gbl_enable_interpreter_slack) && (!expected_return_btypes)) {
-		/*
-		 * We received a return object, but one was not expected.  This can
-		 * happen frequently if the "implicit return" feature is enabled.
-		 * Just delete the return object and return AE_OK.
-		 */
-		acpi_ut_remove_reference(info->return_object);
-		goto cleanup;
-	}
-
-	/* Is the return object one of the expected types? */
-
-	if (!(expected_return_btypes & return_btype)) {
-		ACPI_ERROR_METHOD("Return object type is incorrect",
-				  prefix_node, path, AE_TYPE);
-
-		ACPI_ERROR((AE_INFO,
-			    "Type returned from %s was incorrect: %s, expected Btypes: %X",
-			    path,
-			    acpi_ut_get_object_type_name(info->return_object),
-			    expected_return_btypes));
-
-		/* On error exit, we must delete the return object */
-
-		acpi_ut_remove_reference(info->return_object);
-		status = AE_TYPE;
-		goto cleanup;
-	}
-
-	/* Object type is OK, return it */
-
-	*return_desc = info->return_object;
-
-      cleanup:
-	ACPI_FREE(info);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_evaluate_numeric_object
- *
- * PARAMETERS:  object_name         - Object name to be evaluated
- *              device_node         - Node for the device
- *              Address             - Where the value is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Evaluates a numeric namespace object for a selected device
- *              and stores result in *Address.
- *
- *              NOTE: Internal function, no parameter validation
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_evaluate_numeric_object(char *object_name,
-				struct acpi_namespace_node *device_node,
-				acpi_integer * address)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ut_evaluate_numeric_object);
-
-	status = acpi_ut_evaluate_object(device_node, object_name,
-					 ACPI_BTYPE_INTEGER, &obj_desc);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Get the returned Integer */
-
-	*address = obj_desc->integer.value;
-
-	/* On exit, we must delete the return object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_copy_id_string
- *
- * PARAMETERS:  Destination         - Where to copy the string
- *              Source              - Source string
- *              max_length          - Length of the destination buffer
- *
- * RETURN:      None
- *
- * DESCRIPTION: Copies an ID string for the _HID, _CID, and _UID methods.
- *              Performs removal of a leading asterisk if present -- workaround
- *              for a known issue on a bunch of machines.
- *
- ******************************************************************************/
-
-static void
-acpi_ut_copy_id_string(char *destination, char *source, acpi_size max_length)
-{
-
-	/*
-	 * Workaround for ID strings that have a leading asterisk. This construct
-	 * is not allowed by the ACPI specification  (ID strings must be
-	 * alphanumeric), but enough existing machines have this embedded in their
-	 * ID strings that the following code is useful.
-	 */
-	if (*source == '*') {
-		source++;
-	}
-
-	/* Do the actual copy */
-
-	ACPI_STRNCPY(destination, source, max_length);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_execute_HID
- *
- * PARAMETERS:  device_node         - Node for the device
- *              Hid                 - Where the HID is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Executes the _HID control method that returns the hardware
- *              ID of the device.
- *
- *              NOTE: Internal function, no parameter validation
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
-		    struct acpica_device_id *hid)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ut_execute_HID);
-
-	status = acpi_ut_evaluate_object(device_node, METHOD_NAME__HID,
-					 ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
-					 &obj_desc);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
-
-		/* Convert the Numeric HID to string */
-
-		acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value,
-					  hid->value);
-	} else {
-		/* Copy the String HID from the returned object */
-
-		acpi_ut_copy_id_string(hid->value, obj_desc->string.pointer,
-				       sizeof(hid->value));
-	}
-
-	/* On exit, we must delete the return object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_translate_one_cid
- *
- * PARAMETERS:  obj_desc            - _CID object, must be integer or string
- *              one_cid             - Where the CID string is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Return a numeric or string _CID value as a string.
- *              (Compatible ID)
- *
- *              NOTE:  Assumes a maximum _CID string length of
- *                     ACPI_MAX_CID_LENGTH.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
-			  struct acpi_compatible_id *one_cid)
-{
-
-	switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
-	case ACPI_TYPE_INTEGER:
-
-		/* Convert the Numeric CID to string */
-
-		acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value,
-					  one_cid->value);
-		return (AE_OK);
-
-	case ACPI_TYPE_STRING:
-
-		if (obj_desc->string.length > ACPI_MAX_CID_LENGTH) {
-			return (AE_AML_STRING_LIMIT);
-		}
-
-		/* Copy the String CID from the returned object */
-
-		acpi_ut_copy_id_string(one_cid->value, obj_desc->string.pointer,
-				       ACPI_MAX_CID_LENGTH);
-		return (AE_OK);
-
-	default:
-
-		return (AE_TYPE);
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_execute_CID
- *
- * PARAMETERS:  device_node         - Node for the device
- *              return_cid_list     - Where the CID list is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Executes the _CID control method that returns one or more
- *              compatible hardware IDs for the device.
- *
- *              NOTE: Internal function, no parameter validation
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
-		    struct acpi_compatible_id_list ** return_cid_list)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-	u32 count;
-	u32 size;
-	struct acpi_compatible_id_list *cid_list;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ut_execute_CID);
-
-	/* Evaluate the _CID method for this device */
-
-	status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CID,
-					 ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING
-					 | ACPI_BTYPE_PACKAGE, &obj_desc);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Get the number of _CIDs returned */
-
-	count = 1;
-	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
-		count = obj_desc->package.count;
-	}
-
-	/* Allocate a worst-case buffer for the _CIDs */
-
-	size = (((count - 1) * sizeof(struct acpi_compatible_id)) +
-		sizeof(struct acpi_compatible_id_list));
-
-	cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size);
-	if (!cid_list) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/* Init CID list */
-
-	cid_list->count = count;
-	cid_list->size = size;
-
-	/*
-	 *  A _CID can return either a single compatible ID or a package of
-	 *  compatible IDs.  Each compatible ID can be one of the following:
-	 *  1) Integer (32 bit compressed EISA ID) or
-	 *  2) String (PCI ID format, e.g. "PCI\VEN_vvvv&DEV_dddd&SUBSYS_ssssssss")
-	 */
-
-	/* The _CID object can be either a single CID or a package (list) of CIDs */
-
-	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
-
-		/* Translate each package element */
-
-		for (i = 0; i < count; i++) {
-			status =
-			    acpi_ut_translate_one_cid(obj_desc->package.
-						      elements[i],
-						      &cid_list->id[i]);
-			if (ACPI_FAILURE(status)) {
-				break;
-			}
-		}
-	} else {
-		/* Only one CID, translate to a string */
-
-		status = acpi_ut_translate_one_cid(obj_desc, cid_list->id);
-	}
-
-	/* Cleanup on error */
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_FREE(cid_list);
-	} else {
-		*return_cid_list = cid_list;
-	}
-
-	/* On exit, we must delete the _CID return object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_execute_UID
- *
- * PARAMETERS:  device_node         - Node for the device
- *              Uid                 - Where the UID is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Executes the _UID control method that returns the hardware
- *              ID of the device.
- *
- *              NOTE: Internal function, no parameter validation
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
-		    struct acpica_device_id *uid)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ut_execute_UID);
-
-	status = acpi_ut_evaluate_object(device_node, METHOD_NAME__UID,
-					 ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
-					 &obj_desc);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
-
-		/* Convert the Numeric UID to string */
-
-		acpi_ex_unsigned_integer_to_string(obj_desc->integer.value,
-						   uid->value);
-	} else {
-		/* Copy the String UID from the returned object */
-
-		acpi_ut_copy_id_string(uid->value, obj_desc->string.pointer,
-				       sizeof(uid->value));
-	}
-
-	/* On exit, we must delete the return object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_execute_STA
- *
- * PARAMETERS:  device_node         - Node for the device
- *              Flags               - Where the status flags are returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Executes _STA for selected device and stores results in
- *              *Flags.
- *
- *              NOTE: Internal function, no parameter validation
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ut_execute_STA);
-
-	status = acpi_ut_evaluate_object(device_node, METHOD_NAME__STA,
-					 ACPI_BTYPE_INTEGER, &obj_desc);
-	if (ACPI_FAILURE(status)) {
-		if (AE_NOT_FOUND == status) {
-			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-					  "_STA on %4.4s was not found, assuming device is present\n",
-					  acpi_ut_get_node_name(device_node)));
-
-			*flags = ACPI_UINT32_MAX;
-			status = AE_OK;
-		}
-
-		return_ACPI_STATUS(status);
-	}
-
-	/* Extract the status flags */
-
-	*flags = (u32) obj_desc->integer.value;
-
-	/* On exit, we must delete the return object */
-
-	acpi_ut_remove_reference(obj_desc);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_execute_Sxds
- *
- * PARAMETERS:  device_node         - Node for the device
- *              Flags               - Where the status flags are returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Executes _STA for selected device and stores results in
- *              *Flags.
- *
- *              NOTE: Internal function, no parameter validation
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ut_execute_sxds);
-
-	for (i = 0; i < 4; i++) {
-		highest[i] = 0xFF;
-		status = acpi_ut_evaluate_object(device_node,
-						 ACPI_CAST_PTR(char,
-							       acpi_gbl_highest_dstate_names
-							       [i]),
-						 ACPI_BTYPE_INTEGER, &obj_desc);
-		if (ACPI_FAILURE(status)) {
-			if (status != AE_NOT_FOUND) {
-				ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-						  "%s on Device %4.4s, %s\n",
-						  ACPI_CAST_PTR(char,
-								acpi_gbl_highest_dstate_names
-								[i]),
-						  acpi_ut_get_node_name
-						  (device_node),
-						  acpi_format_exception
-						  (status)));
-
-				return_ACPI_STATUS(status);
-			}
-		} else {
-			/* Extract the Dstate value */
-
-			highest[i] = (u8) obj_desc->integer.value;
-
-			/* Delete the return object */
-
-			acpi_ut_remove_reference(obj_desc);
-		}
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
deleted file mode 100644
index 17ed5ac..0000000
--- a/drivers/acpi/utilities/utglobal.c
+++ /dev/null
@@ -1,819 +0,0 @@
-/******************************************************************************
- *
- * Module Name: utglobal - Global variables for the ACPI subsystem
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#define DEFINE_ACPI_GLOBALS
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-
-ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
-#define _COMPONENT          ACPI_UTILITIES
-    ACPI_MODULE_NAME("utglobal")
-
-/*******************************************************************************
- *
- * Static global variable initialization.
- *
- ******************************************************************************/
-/*
- * We want the debug switches statically initialized so they
- * are already set when the debugger is entered.
- */
-/* Debug switch - level and trace mask */
-u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
-
-/* Debug switch - layer (component) mask */
-
-u32 acpi_dbg_layer = 0;
-u32 acpi_gbl_nesting_level = 0;
-
-/* Debugger globals */
-
-u8 acpi_gbl_db_terminate_threads = FALSE;
-u8 acpi_gbl_abort_method = FALSE;
-u8 acpi_gbl_method_executing = FALSE;
-
-/* System flags */
-
-u32 acpi_gbl_startup_flags = 0;
-
-/* System starts uninitialized */
-
-u8 acpi_gbl_shutdown = TRUE;
-
-const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
-	"\\_S0_",
-	"\\_S1_",
-	"\\_S2_",
-	"\\_S3_",
-	"\\_S4_",
-	"\\_S5_"
-};
-
-const char *acpi_gbl_highest_dstate_names[4] = {
-	"_S1D",
-	"_S2D",
-	"_S3D",
-	"_S4D"
-};
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_format_exception
- *
- * PARAMETERS:  Status       - The acpi_status code to be formatted
- *
- * RETURN:      A string containing the exception text. A valid pointer is
- *              always returned.
- *
- * DESCRIPTION: This function translates an ACPI exception into an ASCII string
- *              It is here instead of utxface.c so it is always present.
- *
- ******************************************************************************/
-
-const char *acpi_format_exception(acpi_status status)
-{
-	const char *exception = NULL;
-
-	ACPI_FUNCTION_ENTRY();
-
-	exception = acpi_ut_validate_exception(status);
-	if (!exception) {
-
-		/* Exception code was not recognized */
-
-		ACPI_ERROR((AE_INFO,
-			    "Unknown exception code: 0x%8.8X", status));
-
-		exception = "UNKNOWN_STATUS_CODE";
-		dump_stack();
-	}
-
-	return (ACPI_CAST_PTR(const char, exception));
-}
-
-ACPI_EXPORT_SYMBOL(acpi_format_exception)
-
-/*******************************************************************************
- *
- * Namespace globals
- *
- ******************************************************************************/
-/*
- * Predefined ACPI Names (Built-in to the Interpreter)
- *
- * NOTES:
- * 1) _SB_ is defined to be a device to allow \_SB_._INI to be run
- *    during the initialization sequence.
- * 2) _TZ_ is defined to be a thermal zone in order to allow ASL code to
- *    perform a Notify() operation on it.
- */
-const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
-	{"_GPE", ACPI_TYPE_LOCAL_SCOPE, NULL},
-	{"_PR_", ACPI_TYPE_LOCAL_SCOPE, NULL},
-	{"_SB_", ACPI_TYPE_DEVICE, NULL},
-	{"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
-	{"_TZ_", ACPI_TYPE_THERMAL, NULL},
-	{"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
-	{"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
-	{"_GL_", ACPI_TYPE_MUTEX, (char *)1},
-
-#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
-	{"_OSI", ACPI_TYPE_METHOD, (char *)1},
-#endif
-
-	/* Table terminator */
-
-	{NULL, ACPI_TYPE_ANY, NULL}
-};
-
-/*
- * Properties of the ACPI Object Types, both internal and external.
- * The table is indexed by values of acpi_object_type
- */
-const u8 acpi_gbl_ns_properties[] = {
-	ACPI_NS_NORMAL,		/* 00 Any              */
-	ACPI_NS_NORMAL,		/* 01 Number           */
-	ACPI_NS_NORMAL,		/* 02 String           */
-	ACPI_NS_NORMAL,		/* 03 Buffer           */
-	ACPI_NS_NORMAL,		/* 04 Package          */
-	ACPI_NS_NORMAL,		/* 05 field_unit       */
-	ACPI_NS_NEWSCOPE,	/* 06 Device           */
-	ACPI_NS_NORMAL,		/* 07 Event            */
-	ACPI_NS_NEWSCOPE,	/* 08 Method           */
-	ACPI_NS_NORMAL,		/* 09 Mutex            */
-	ACPI_NS_NORMAL,		/* 10 Region           */
-	ACPI_NS_NEWSCOPE,	/* 11 Power            */
-	ACPI_NS_NEWSCOPE,	/* 12 Processor        */
-	ACPI_NS_NEWSCOPE,	/* 13 Thermal          */
-	ACPI_NS_NORMAL,		/* 14 buffer_field     */
-	ACPI_NS_NORMAL,		/* 15 ddb_handle       */
-	ACPI_NS_NORMAL,		/* 16 Debug Object     */
-	ACPI_NS_NORMAL,		/* 17 def_field        */
-	ACPI_NS_NORMAL,		/* 18 bank_field       */
-	ACPI_NS_NORMAL,		/* 19 index_field      */
-	ACPI_NS_NORMAL,		/* 20 Reference        */
-	ACPI_NS_NORMAL,		/* 21 Alias            */
-	ACPI_NS_NORMAL,		/* 22 method_alias     */
-	ACPI_NS_NORMAL,		/* 23 Notify           */
-	ACPI_NS_NORMAL,		/* 24 Address Handler  */
-	ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL,	/* 25 Resource Desc    */
-	ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL,	/* 26 Resource Field   */
-	ACPI_NS_NEWSCOPE,	/* 27 Scope            */
-	ACPI_NS_NORMAL,		/* 28 Extra            */
-	ACPI_NS_NORMAL,		/* 29 Data             */
-	ACPI_NS_NORMAL		/* 30 Invalid          */
-};
-
-/* Hex to ASCII conversion table */
-
-static const char acpi_gbl_hex_to_ascii[] = {
-	'0', '1', '2', '3', '4', '5', '6', '7',
-	'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
-};
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_hex_to_ascii_char
- *
- * PARAMETERS:  Integer             - Contains the hex digit
- *              Position            - bit position of the digit within the
- *                                    integer (multiple of 4)
- *
- * RETURN:      The converted Ascii character
- *
- * DESCRIPTION: Convert a hex digit to an Ascii character
- *
- ******************************************************************************/
-
-char acpi_ut_hex_to_ascii_char(acpi_integer integer, u32 position)
-{
-
-	return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
-}
-
-/******************************************************************************
- *
- * Event and Hardware globals
- *
- ******************************************************************************/
-
-struct acpi_bit_register_info acpi_gbl_bit_register_info[ACPI_NUM_BITREG] = {
-	/* Name                                     Parent Register             Register Bit Position                   Register Bit Mask       */
-
-	/* ACPI_BITREG_TIMER_STATUS         */ {ACPI_REGISTER_PM1_STATUS,
-						ACPI_BITPOSITION_TIMER_STATUS,
-						ACPI_BITMASK_TIMER_STATUS},
-	/* ACPI_BITREG_BUS_MASTER_STATUS    */ {ACPI_REGISTER_PM1_STATUS,
-						ACPI_BITPOSITION_BUS_MASTER_STATUS,
-						ACPI_BITMASK_BUS_MASTER_STATUS},
-	/* ACPI_BITREG_GLOBAL_LOCK_STATUS   */ {ACPI_REGISTER_PM1_STATUS,
-						ACPI_BITPOSITION_GLOBAL_LOCK_STATUS,
-						ACPI_BITMASK_GLOBAL_LOCK_STATUS},
-	/* ACPI_BITREG_POWER_BUTTON_STATUS  */ {ACPI_REGISTER_PM1_STATUS,
-						ACPI_BITPOSITION_POWER_BUTTON_STATUS,
-						ACPI_BITMASK_POWER_BUTTON_STATUS},
-	/* ACPI_BITREG_SLEEP_BUTTON_STATUS  */ {ACPI_REGISTER_PM1_STATUS,
-						ACPI_BITPOSITION_SLEEP_BUTTON_STATUS,
-						ACPI_BITMASK_SLEEP_BUTTON_STATUS},
-	/* ACPI_BITREG_RT_CLOCK_STATUS      */ {ACPI_REGISTER_PM1_STATUS,
-						ACPI_BITPOSITION_RT_CLOCK_STATUS,
-						ACPI_BITMASK_RT_CLOCK_STATUS},
-	/* ACPI_BITREG_WAKE_STATUS          */ {ACPI_REGISTER_PM1_STATUS,
-						ACPI_BITPOSITION_WAKE_STATUS,
-						ACPI_BITMASK_WAKE_STATUS},
-	/* ACPI_BITREG_PCIEXP_WAKE_STATUS   */ {ACPI_REGISTER_PM1_STATUS,
-						ACPI_BITPOSITION_PCIEXP_WAKE_STATUS,
-						ACPI_BITMASK_PCIEXP_WAKE_STATUS},
-
-	/* ACPI_BITREG_TIMER_ENABLE         */ {ACPI_REGISTER_PM1_ENABLE,
-						ACPI_BITPOSITION_TIMER_ENABLE,
-						ACPI_BITMASK_TIMER_ENABLE},
-	/* ACPI_BITREG_GLOBAL_LOCK_ENABLE   */ {ACPI_REGISTER_PM1_ENABLE,
-						ACPI_BITPOSITION_GLOBAL_LOCK_ENABLE,
-						ACPI_BITMASK_GLOBAL_LOCK_ENABLE},
-	/* ACPI_BITREG_POWER_BUTTON_ENABLE  */ {ACPI_REGISTER_PM1_ENABLE,
-						ACPI_BITPOSITION_POWER_BUTTON_ENABLE,
-						ACPI_BITMASK_POWER_BUTTON_ENABLE},
-	/* ACPI_BITREG_SLEEP_BUTTON_ENABLE  */ {ACPI_REGISTER_PM1_ENABLE,
-						ACPI_BITPOSITION_SLEEP_BUTTON_ENABLE,
-						ACPI_BITMASK_SLEEP_BUTTON_ENABLE},
-	/* ACPI_BITREG_RT_CLOCK_ENABLE      */ {ACPI_REGISTER_PM1_ENABLE,
-						ACPI_BITPOSITION_RT_CLOCK_ENABLE,
-						ACPI_BITMASK_RT_CLOCK_ENABLE},
-	/* ACPI_BITREG_PCIEXP_WAKE_DISABLE  */ {ACPI_REGISTER_PM1_ENABLE,
-						ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE,
-						ACPI_BITMASK_PCIEXP_WAKE_DISABLE},
-
-	/* ACPI_BITREG_SCI_ENABLE           */ {ACPI_REGISTER_PM1_CONTROL,
-						ACPI_BITPOSITION_SCI_ENABLE,
-						ACPI_BITMASK_SCI_ENABLE},
-	/* ACPI_BITREG_BUS_MASTER_RLD       */ {ACPI_REGISTER_PM1_CONTROL,
-						ACPI_BITPOSITION_BUS_MASTER_RLD,
-						ACPI_BITMASK_BUS_MASTER_RLD},
-	/* ACPI_BITREG_GLOBAL_LOCK_RELEASE  */ {ACPI_REGISTER_PM1_CONTROL,
-						ACPI_BITPOSITION_GLOBAL_LOCK_RELEASE,
-						ACPI_BITMASK_GLOBAL_LOCK_RELEASE},
-	/* ACPI_BITREG_SLEEP_TYPE_A         */ {ACPI_REGISTER_PM1_CONTROL,
-						ACPI_BITPOSITION_SLEEP_TYPE_X,
-						ACPI_BITMASK_SLEEP_TYPE_X},
-	/* ACPI_BITREG_SLEEP_TYPE_B         */ {ACPI_REGISTER_PM1_CONTROL,
-						ACPI_BITPOSITION_SLEEP_TYPE_X,
-						ACPI_BITMASK_SLEEP_TYPE_X},
-	/* ACPI_BITREG_SLEEP_ENABLE         */ {ACPI_REGISTER_PM1_CONTROL,
-						ACPI_BITPOSITION_SLEEP_ENABLE,
-						ACPI_BITMASK_SLEEP_ENABLE},
-
-	/* ACPI_BITREG_ARB_DIS              */ {ACPI_REGISTER_PM2_CONTROL,
-						ACPI_BITPOSITION_ARB_DISABLE,
-						ACPI_BITMASK_ARB_DISABLE}
-};
-
-struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] = {
-	/* ACPI_EVENT_PMTIMER       */ {ACPI_BITREG_TIMER_STATUS,
-					ACPI_BITREG_TIMER_ENABLE,
-					ACPI_BITMASK_TIMER_STATUS,
-					ACPI_BITMASK_TIMER_ENABLE},
-	/* ACPI_EVENT_GLOBAL        */ {ACPI_BITREG_GLOBAL_LOCK_STATUS,
-					ACPI_BITREG_GLOBAL_LOCK_ENABLE,
-					ACPI_BITMASK_GLOBAL_LOCK_STATUS,
-					ACPI_BITMASK_GLOBAL_LOCK_ENABLE},
-	/* ACPI_EVENT_POWER_BUTTON  */ {ACPI_BITREG_POWER_BUTTON_STATUS,
-					ACPI_BITREG_POWER_BUTTON_ENABLE,
-					ACPI_BITMASK_POWER_BUTTON_STATUS,
-					ACPI_BITMASK_POWER_BUTTON_ENABLE},
-	/* ACPI_EVENT_SLEEP_BUTTON  */ {ACPI_BITREG_SLEEP_BUTTON_STATUS,
-					ACPI_BITREG_SLEEP_BUTTON_ENABLE,
-					ACPI_BITMASK_SLEEP_BUTTON_STATUS,
-					ACPI_BITMASK_SLEEP_BUTTON_ENABLE},
-	/* ACPI_EVENT_RTC           */ {ACPI_BITREG_RT_CLOCK_STATUS,
-					ACPI_BITREG_RT_CLOCK_ENABLE,
-					ACPI_BITMASK_RT_CLOCK_STATUS,
-					ACPI_BITMASK_RT_CLOCK_ENABLE},
-};
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_region_name
- *
- * PARAMETERS:  None.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Translate a Space ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-/* Region type decoding */
-
-const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
-	"SystemMemory",
-	"SystemIO",
-	"PCI_Config",
-	"EmbeddedControl",
-	"SMBus",
-	"CMOS",
-	"PCIBARTarget",
-	"DataTable"
-};
-
-char *acpi_ut_get_region_name(u8 space_id)
-{
-
-	if (space_id >= ACPI_USER_REGION_BEGIN) {
-		return ("UserDefinedRegion");
-	} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
-		return ("InvalidSpaceId");
-	}
-
-	return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_event_name
- *
- * PARAMETERS:  None.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Translate a Event ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-/* Event type decoding */
-
-static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
-	"PM_Timer",
-	"GlobalLock",
-	"PowerButton",
-	"SleepButton",
-	"RealTimeClock",
-};
-
-char *acpi_ut_get_event_name(u32 event_id)
-{
-
-	if (event_id > ACPI_EVENT_MAX) {
-		return ("InvalidEventID");
-	}
-
-	return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_type_name
- *
- * PARAMETERS:  None.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Translate a Type ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-/*
- * Elements of acpi_gbl_ns_type_names below must match
- * one-to-one with values of acpi_object_type
- *
- * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching;
- * when stored in a table it really means that we have thus far seen no
- * evidence to indicate what type is actually going to be stored for this entry.
- */
-static const char acpi_gbl_bad_type[] = "UNDEFINED";
-
-/* Printable names of the ACPI object types */
-
-static const char *acpi_gbl_ns_type_names[] = {
-	/* 00 */ "Untyped",
-	/* 01 */ "Integer",
-	/* 02 */ "String",
-	/* 03 */ "Buffer",
-	/* 04 */ "Package",
-	/* 05 */ "FieldUnit",
-	/* 06 */ "Device",
-	/* 07 */ "Event",
-	/* 08 */ "Method",
-	/* 09 */ "Mutex",
-	/* 10 */ "Region",
-	/* 11 */ "Power",
-	/* 12 */ "Processor",
-	/* 13 */ "Thermal",
-	/* 14 */ "BufferField",
-	/* 15 */ "DdbHandle",
-	/* 16 */ "DebugObject",
-	/* 17 */ "RegionField",
-	/* 18 */ "BankField",
-	/* 19 */ "IndexField",
-	/* 20 */ "Reference",
-	/* 21 */ "Alias",
-	/* 22 */ "MethodAlias",
-	/* 23 */ "Notify",
-	/* 24 */ "AddrHandler",
-	/* 25 */ "ResourceDesc",
-	/* 26 */ "ResourceFld",
-	/* 27 */ "Scope",
-	/* 28 */ "Extra",
-	/* 29 */ "Data",
-	/* 30 */ "Invalid"
-};
-
-char *acpi_ut_get_type_name(acpi_object_type type)
-{
-
-	if (type > ACPI_TYPE_INVALID) {
-		return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
-	}
-
-	return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type]));
-}
-
-char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
-{
-
-	if (!obj_desc) {
-		return ("[NULL Object Descriptor]");
-	}
-
-	return (acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE(obj_desc)));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_node_name
- *
- * PARAMETERS:  Object               - A namespace node
- *
- * RETURN:      Pointer to a string
- *
- * DESCRIPTION: Validate the node and return the node's ACPI name.
- *
- ******************************************************************************/
-
-char *acpi_ut_get_node_name(void *object)
-{
-	struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
-
-	/* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
-
-	if (!object) {
-		return ("NULL");
-	}
-
-	/* Check for Root node */
-
-	if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) {
-		return ("\"\\\" ");
-	}
-
-	/* Descriptor must be a namespace node */
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
-		return ("####");
-	}
-
-	/* Name must be a valid ACPI name */
-
-	if (!acpi_ut_valid_acpi_name(node->name.integer)) {
-		node->name.integer = acpi_ut_repair_name(node->name.ascii);
-	}
-
-	/* Return the name */
-
-	return (node->name.ascii);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_descriptor_name
- *
- * PARAMETERS:  Object               - An ACPI object
- *
- * RETURN:      Pointer to a string
- *
- * DESCRIPTION: Validate object and return the descriptor type
- *
- ******************************************************************************/
-
-/* Printable names of object descriptor types */
-
-static const char *acpi_gbl_desc_type_names[] = {
-	/* 00 */ "Invalid",
-	/* 01 */ "Cached",
-	/* 02 */ "State-Generic",
-	/* 03 */ "State-Update",
-	/* 04 */ "State-Package",
-	/* 05 */ "State-Control",
-	/* 06 */ "State-RootParseScope",
-	/* 07 */ "State-ParseScope",
-	/* 08 */ "State-WalkScope",
-	/* 09 */ "State-Result",
-	/* 10 */ "State-Notify",
-	/* 11 */ "State-Thread",
-	/* 12 */ "Walk",
-	/* 13 */ "Parser",
-	/* 14 */ "Operand",
-	/* 15 */ "Node"
-};
-
-char *acpi_ut_get_descriptor_name(void *object)
-{
-
-	if (!object) {
-		return ("NULL OBJECT");
-	}
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) {
-		return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
-	}
-
-	return (ACPI_CAST_PTR(char,
-			      acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE
-						       (object)]));
-
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_reference_name
- *
- * PARAMETERS:  Object               - An ACPI reference object
- *
- * RETURN:      Pointer to a string
- *
- * DESCRIPTION: Decode a reference object sub-type to a string.
- *
- ******************************************************************************/
-
-/* Printable names of reference object sub-types */
-
-static const char *acpi_gbl_ref_class_names[] = {
-	/* 00 */ "Local",
-	/* 01 */ "Argument",
-	/* 02 */ "RefOf",
-	/* 03 */ "Index",
-	/* 04 */ "DdbHandle",
-	/* 05 */ "Named Object",
-	/* 06 */ "Debug"
-};
-
-const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
-{
-	if (!object)
-		return "NULL Object";
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND)
-		return "Not an Operand object";
-
-	if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE)
-		return "Not a Reference object";
-
-	if (object->reference.class > ACPI_REFCLASS_MAX)
-		return "Unknown Reference class";
-
-	return acpi_gbl_ref_class_names[object->reference.class];
-}
-
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/*
- * Strings and procedures used for debug only
- */
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_mutex_name
- *
- * PARAMETERS:  mutex_id        - The predefined ID for this mutex.
- *
- * RETURN:      String containing the name of the mutex. Always returns a valid
- *              pointer.
- *
- * DESCRIPTION: Translate a mutex ID into a name string (Debug only)
- *
- ******************************************************************************/
-
-char *acpi_ut_get_mutex_name(u32 mutex_id)
-{
-
-	if (mutex_id > ACPI_MAX_MUTEX) {
-		return ("Invalid Mutex ID");
-	}
-
-	return (acpi_gbl_mutex_names[mutex_id]);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_notify_name
- *
- * PARAMETERS:  notify_value    - Value from the Notify() request
- *
- * RETURN:      String corresponding to the Notify Value.
- *
- * DESCRIPTION: Translate a Notify Value to a notify namestring.
- *
- ******************************************************************************/
-
-/* Names for Notify() values, used for debug output */
-
-static const char *acpi_gbl_notify_value_names[] = {
-	"Bus Check",
-	"Device Check",
-	"Device Wake",
-	"Eject Request",
-	"Device Check Light",
-	"Frequency Mismatch",
-	"Bus Mode Mismatch",
-	"Power Fault",
-	"Capabilities Check",
-	"Device PLD Check",
-	"Reserved",
-	"System Locality Update"
-};
-
-const char *acpi_ut_get_notify_name(u32 notify_value)
-{
-
-	if (notify_value <= ACPI_NOTIFY_MAX) {
-		return (acpi_gbl_notify_value_names[notify_value]);
-	} else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
-		return ("Reserved");
-	} else {		/* Greater or equal to 0x80 */
-
-		return ("**Device Specific**");
-	}
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_valid_object_type
- *
- * PARAMETERS:  Type            - Object type to be validated
- *
- * RETURN:      TRUE if valid object type, FALSE otherwise
- *
- * DESCRIPTION: Validate an object type
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_object_type(acpi_object_type type)
-{
-
-	if (type > ACPI_TYPE_LOCAL_MAX) {
-
-		/* Note: Assumes all TYPEs are contiguous (external/local) */
-
-		return (FALSE);
-	}
-
-	return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_init_globals
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Init library globals.  All globals that require specific
- *              initialization should be initialized here!
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_init_globals(void)
-{
-	acpi_status status;
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ut_init_globals);
-
-	/* Create all memory caches */
-
-	status = acpi_ut_create_caches();
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Mutex locked flags */
-
-	for (i = 0; i < ACPI_NUM_MUTEX; i++) {
-		acpi_gbl_mutex_info[i].mutex = NULL;
-		acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
-		acpi_gbl_mutex_info[i].use_count = 0;
-	}
-
-	for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) {
-		acpi_gbl_owner_id_mask[i] = 0;
-	}
-	acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;	/* Last ID is never valid */
-
-	/* GPE support */
-
-	acpi_gbl_gpe_xrupt_list_head = NULL;
-	acpi_gbl_gpe_fadt_blocks[0] = NULL;
-	acpi_gbl_gpe_fadt_blocks[1] = NULL;
-
-	/* Global handlers */
-
-	acpi_gbl_system_notify.handler = NULL;
-	acpi_gbl_device_notify.handler = NULL;
-	acpi_gbl_exception_handler = NULL;
-	acpi_gbl_init_handler = NULL;
-	acpi_gbl_table_handler = NULL;
-
-	/* Global Lock support */
-
-	acpi_gbl_global_lock_semaphore = NULL;
-	acpi_gbl_global_lock_mutex = NULL;
-	acpi_gbl_global_lock_acquired = FALSE;
-	acpi_gbl_global_lock_handle = 0;
-
-	/* Miscellaneous variables */
-
-	acpi_gbl_cm_single_step = FALSE;
-	acpi_gbl_db_terminate_threads = FALSE;
-	acpi_gbl_shutdown = FALSE;
-	acpi_gbl_ns_lookup_count = 0;
-	acpi_gbl_ps_find_count = 0;
-	acpi_gbl_acpi_hardware_present = TRUE;
-	acpi_gbl_last_owner_id_index = 0;
-	acpi_gbl_next_owner_id_offset = 0;
-	acpi_gbl_trace_method_name = 0;
-	acpi_gbl_trace_dbg_level = 0;
-	acpi_gbl_trace_dbg_layer = 0;
-	acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
-	acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
-
-	/* Hardware oriented */
-
-	acpi_gbl_events_initialized = FALSE;
-	acpi_gbl_system_awake_and_running = TRUE;
-
-	/* Namespace */
-
-	acpi_gbl_root_node = NULL;
-	acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
-	acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
-	acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
-	acpi_gbl_root_node_struct.child = NULL;
-	acpi_gbl_root_node_struct.peer = NULL;
-	acpi_gbl_root_node_struct.object = NULL;
-	acpi_gbl_root_node_struct.flags = ANOBJ_END_OF_PEER_LIST;
-
-#ifdef ACPI_DEBUG_OUTPUT
-	acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
-#endif
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-	acpi_gbl_display_final_mem_stats = FALSE;
-#endif
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_dbg_level)
-ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
diff --git a/drivers/acpi/utilities/utinit.c b/drivers/acpi/utilities/utinit.c
deleted file mode 100644
index cae515f..0000000
--- a/drivers/acpi/utilities/utinit.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/******************************************************************************
- *
- * Module Name: utinit - Common ACPI subsystem initialization
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acevents.h>
-#include <acpi/actables.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utinit")
-
-/* Local prototypes */
-static void acpi_ut_terminate(void);
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_ut_terminate
- *
- * PARAMETERS:  none
- *
- * RETURN:      none
- *
- * DESCRIPTION: Free global memory
- *
- ******************************************************************************/
-
-static void acpi_ut_terminate(void)
-{
-	struct acpi_gpe_block_info *gpe_block;
-	struct acpi_gpe_block_info *next_gpe_block;
-	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
-	struct acpi_gpe_xrupt_info *next_gpe_xrupt_info;
-
-	ACPI_FUNCTION_TRACE(ut_terminate);
-
-	/* Free global GPE blocks and related info structures */
-
-	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
-	while (gpe_xrupt_info) {
-		gpe_block = gpe_xrupt_info->gpe_block_list_head;
-		while (gpe_block) {
-			next_gpe_block = gpe_block->next;
-			ACPI_FREE(gpe_block->event_info);
-			ACPI_FREE(gpe_block->register_info);
-			ACPI_FREE(gpe_block);
-
-			gpe_block = next_gpe_block;
-		}
-		next_gpe_xrupt_info = gpe_xrupt_info->next;
-		ACPI_FREE(gpe_xrupt_info);
-		gpe_xrupt_info = next_gpe_xrupt_info;
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_subsystem_shutdown
- *
- * PARAMETERS:  none
- *
- * RETURN:      none
- *
- * DESCRIPTION: Shutdown the various subsystems.  Don't delete the mutex
- *              objects here -- because the AML debugger may be still running.
- *
- ******************************************************************************/
-
-void acpi_ut_subsystem_shutdown(void)
-{
-
-	ACPI_FUNCTION_TRACE(ut_subsystem_shutdown);
-
-	/* Just exit if subsystem is already shutdown */
-
-	if (acpi_gbl_shutdown) {
-		ACPI_ERROR((AE_INFO, "ACPI Subsystem is already terminated"));
-		return_VOID;
-	}
-
-	/* Subsystem appears active, go ahead and shut it down */
-
-	acpi_gbl_shutdown = TRUE;
-	acpi_gbl_startup_flags = 0;
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n"));
-
-#ifndef ACPI_ASL_COMPILER
-
-	/* Close the acpi_event Handling */
-
-	acpi_ev_terminate();
-#endif
-
-	/* Close the Namespace */
-
-	acpi_ns_terminate();
-
-	/* Delete the ACPI tables */
-
-	acpi_tb_terminate();
-
-	/* Close the globals */
-
-	acpi_ut_terminate();
-
-	/* Purge the local caches */
-
-	(void)acpi_ut_delete_caches();
-	return_VOID;
-}
diff --git a/drivers/acpi/utilities/utmath.c b/drivers/acpi/utilities/utmath.c
deleted file mode 100644
index c927324..0000000
--- a/drivers/acpi/utilities/utmath.c
+++ /dev/null
@@ -1,311 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: utmath - Integer math support routines
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utmath")
-
-/*
- * Support for double-precision integer divide.  This code is included here
- * in order to support kernel environments where the double-precision math
- * library is not available.
- */
-#ifndef ACPI_USE_NATIVE_DIVIDE
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_short_divide
- *
- * PARAMETERS:  Dividend            - 64-bit dividend
- *              Divisor             - 32-bit divisor
- *              out_quotient        - Pointer to where the quotient is returned
- *              out_remainder       - Pointer to where the remainder is returned
- *
- * RETURN:      Status (Checks for divide-by-zero)
- *
- * DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits)
- *              divide and modulo.  The result is a 64-bit quotient and a
- *              32-bit remainder.
- *
- ******************************************************************************/
-acpi_status
-acpi_ut_short_divide(acpi_integer dividend,
-		     u32 divisor,
-		     acpi_integer * out_quotient, u32 * out_remainder)
-{
-	union uint64_overlay dividend_ovl;
-	union uint64_overlay quotient;
-	u32 remainder32;
-
-	ACPI_FUNCTION_TRACE(ut_short_divide);
-
-	/* Always check for a zero divisor */
-
-	if (divisor == 0) {
-		ACPI_ERROR((AE_INFO, "Divide by zero"));
-		return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
-	}
-
-	dividend_ovl.full = dividend;
-
-	/*
-	 * The quotient is 64 bits, the remainder is always 32 bits,
-	 * and is generated by the second divide.
-	 */
-	ACPI_DIV_64_BY_32(0, dividend_ovl.part.hi, divisor,
-			  quotient.part.hi, remainder32);
-	ACPI_DIV_64_BY_32(remainder32, dividend_ovl.part.lo, divisor,
-			  quotient.part.lo, remainder32);
-
-	/* Return only what was requested */
-
-	if (out_quotient) {
-		*out_quotient = quotient.full;
-	}
-	if (out_remainder) {
-		*out_remainder = remainder32;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_divide
- *
- * PARAMETERS:  in_dividend         - Dividend
- *              in_divisor          - Divisor
- *              out_quotient        - Pointer to where the quotient is returned
- *              out_remainder       - Pointer to where the remainder is returned
- *
- * RETURN:      Status (Checks for divide-by-zero)
- *
- * DESCRIPTION: Perform a divide and modulo.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_divide(acpi_integer in_dividend,
-	       acpi_integer in_divisor,
-	       acpi_integer * out_quotient, acpi_integer * out_remainder)
-{
-	union uint64_overlay dividend;
-	union uint64_overlay divisor;
-	union uint64_overlay quotient;
-	union uint64_overlay remainder;
-	union uint64_overlay normalized_dividend;
-	union uint64_overlay normalized_divisor;
-	u32 partial1;
-	union uint64_overlay partial2;
-	union uint64_overlay partial3;
-
-	ACPI_FUNCTION_TRACE(ut_divide);
-
-	/* Always check for a zero divisor */
-
-	if (in_divisor == 0) {
-		ACPI_ERROR((AE_INFO, "Divide by zero"));
-		return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
-	}
-
-	divisor.full = in_divisor;
-	dividend.full = in_dividend;
-	if (divisor.part.hi == 0) {
-		/*
-		 * 1) Simplest case is where the divisor is 32 bits, we can
-		 * just do two divides
-		 */
-		remainder.part.hi = 0;
-
-		/*
-		 * The quotient is 64 bits, the remainder is always 32 bits,
-		 * and is generated by the second divide.
-		 */
-		ACPI_DIV_64_BY_32(0, dividend.part.hi, divisor.part.lo,
-				  quotient.part.hi, partial1);
-		ACPI_DIV_64_BY_32(partial1, dividend.part.lo, divisor.part.lo,
-				  quotient.part.lo, remainder.part.lo);
-	}
-
-	else {
-		/*
-		 * 2) The general case where the divisor is a full 64 bits
-		 * is more difficult
-		 */
-		quotient.part.hi = 0;
-		normalized_dividend = dividend;
-		normalized_divisor = divisor;
-
-		/* Normalize the operands (shift until the divisor is < 32 bits) */
-
-		do {
-			ACPI_SHIFT_RIGHT_64(normalized_divisor.part.hi,
-					    normalized_divisor.part.lo);
-			ACPI_SHIFT_RIGHT_64(normalized_dividend.part.hi,
-					    normalized_dividend.part.lo);
-
-		} while (normalized_divisor.part.hi != 0);
-
-		/* Partial divide */
-
-		ACPI_DIV_64_BY_32(normalized_dividend.part.hi,
-				  normalized_dividend.part.lo,
-				  normalized_divisor.part.lo,
-				  quotient.part.lo, partial1);
-
-		/*
-		 * The quotient is always 32 bits, and simply requires adjustment.
-		 * The 64-bit remainder must be generated.
-		 */
-		partial1 = quotient.part.lo * divisor.part.hi;
-		partial2.full =
-		    (acpi_integer) quotient.part.lo * divisor.part.lo;
-		partial3.full = (acpi_integer) partial2.part.hi + partial1;
-
-		remainder.part.hi = partial3.part.lo;
-		remainder.part.lo = partial2.part.lo;
-
-		if (partial3.part.hi == 0) {
-			if (partial3.part.lo >= dividend.part.hi) {
-				if (partial3.part.lo == dividend.part.hi) {
-					if (partial2.part.lo > dividend.part.lo) {
-						quotient.part.lo--;
-						remainder.full -= divisor.full;
-					}
-				} else {
-					quotient.part.lo--;
-					remainder.full -= divisor.full;
-				}
-			}
-
-			remainder.full = remainder.full - dividend.full;
-			remainder.part.hi = (u32) - ((s32) remainder.part.hi);
-			remainder.part.lo = (u32) - ((s32) remainder.part.lo);
-
-			if (remainder.part.lo) {
-				remainder.part.hi--;
-			}
-		}
-	}
-
-	/* Return only what was requested */
-
-	if (out_quotient) {
-		*out_quotient = quotient.full;
-	}
-	if (out_remainder) {
-		*out_remainder = remainder.full;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-#else
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_short_divide, acpi_ut_divide
- *
- * PARAMETERS:  See function headers above
- *
- * DESCRIPTION: Native versions of the ut_divide functions. Use these if either
- *              1) The target is a 64-bit platform and therefore 64-bit
- *                 integer math is supported directly by the machine.
- *              2) The target is a 32-bit or 16-bit platform, and the
- *                 double-precision integer math library is available to
- *                 perform the divide.
- *
- ******************************************************************************/
-acpi_status
-acpi_ut_short_divide(acpi_integer in_dividend,
-		     u32 divisor,
-		     acpi_integer * out_quotient, u32 * out_remainder)
-{
-
-	ACPI_FUNCTION_TRACE(ut_short_divide);
-
-	/* Always check for a zero divisor */
-
-	if (divisor == 0) {
-		ACPI_ERROR((AE_INFO, "Divide by zero"));
-		return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
-	}
-
-	/* Return only what was requested */
-
-	if (out_quotient) {
-		*out_quotient = in_dividend / divisor;
-	}
-	if (out_remainder) {
-		*out_remainder = (u32) (in_dividend % divisor);
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-acpi_status
-acpi_ut_divide(acpi_integer in_dividend,
-	       acpi_integer in_divisor,
-	       acpi_integer * out_quotient, acpi_integer * out_remainder)
-{
-	ACPI_FUNCTION_TRACE(ut_divide);
-
-	/* Always check for a zero divisor */
-
-	if (in_divisor == 0) {
-		ACPI_ERROR((AE_INFO, "Divide by zero"));
-		return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO);
-	}
-
-	/* Return only what was requested */
-
-	if (out_quotient) {
-		*out_quotient = in_dividend / in_divisor;
-	}
-	if (out_remainder) {
-		*out_remainder = in_dividend % in_divisor;
-	}
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-#endif
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
deleted file mode 100644
index 9089a15..0000000
--- a/drivers/acpi/utilities/utmisc.c
+++ /dev/null
@@ -1,1090 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: utmisc - common utility procedures
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <linux/module.h>
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utmisc")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_validate_exception
- *
- * PARAMETERS:  Status       - The acpi_status code to be formatted
- *
- * RETURN:      A string containing the exception text. NULL if exception is
- *              not valid.
- *
- * DESCRIPTION: This function validates and translates an ACPI exception into
- *              an ASCII string.
- *
- ******************************************************************************/
-const char *acpi_ut_validate_exception(acpi_status status)
-{
-	u32 sub_status;
-	const char *exception = NULL;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * Status is composed of two parts, a "type" and an actual code
-	 */
-	sub_status = (status & ~AE_CODE_MASK);
-
-	switch (status & AE_CODE_MASK) {
-	case AE_CODE_ENVIRONMENTAL:
-
-		if (sub_status <= AE_CODE_ENV_MAX) {
-			exception = acpi_gbl_exception_names_env[sub_status];
-		}
-		break;
-
-	case AE_CODE_PROGRAMMER:
-
-		if (sub_status <= AE_CODE_PGM_MAX) {
-			exception = acpi_gbl_exception_names_pgm[sub_status];
-		}
-		break;
-
-	case AE_CODE_ACPI_TABLES:
-
-		if (sub_status <= AE_CODE_TBL_MAX) {
-			exception = acpi_gbl_exception_names_tbl[sub_status];
-		}
-		break;
-
-	case AE_CODE_AML:
-
-		if (sub_status <= AE_CODE_AML_MAX) {
-			exception = acpi_gbl_exception_names_aml[sub_status];
-		}
-		break;
-
-	case AE_CODE_CONTROL:
-
-		if (sub_status <= AE_CODE_CTRL_MAX) {
-			exception = acpi_gbl_exception_names_ctrl[sub_status];
-		}
-		break;
-
-	default:
-		break;
-	}
-
-	return (ACPI_CAST_PTR(const char, exception));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_is_aml_table
- *
- * PARAMETERS:  Table               - An ACPI table
- *
- * RETURN:      TRUE if table contains executable AML; FALSE otherwise
- *
- * DESCRIPTION: Check ACPI Signature for a table that contains AML code.
- *              Currently, these are DSDT,SSDT,PSDT. All other table types are
- *              data tables that do not contain AML code.
- *
- ******************************************************************************/
-
-u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
-{
-
-	/* These are the only tables that contain executable AML */
-
-	if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
-	    ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
-	    ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
-		return (TRUE);
-	}
-
-	return (FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_allocate_owner_id
- *
- * PARAMETERS:  owner_id        - Where the new owner ID is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to
- *              track objects created by the table or method, to be deleted
- *              when the method exits or the table is unloaded.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
-{
-	u32 i;
-	u32 j;
-	u32 k;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
-
-	/* Guard against multiple allocations of ID to the same location */
-
-	if (*owner_id) {
-		ACPI_ERROR((AE_INFO, "Owner ID [%2.2X] already exists",
-			    *owner_id));
-		return_ACPI_STATUS(AE_ALREADY_EXISTS);
-	}
-
-	/* Mutex for the global ID mask */
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Find a free owner ID, cycle through all possible IDs on repeated
-	 * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have
-	 * to be scanned twice.
-	 */
-	for (i = 0, j = acpi_gbl_last_owner_id_index;
-	     i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) {
-		if (j >= ACPI_NUM_OWNERID_MASKS) {
-			j = 0;	/* Wraparound to start of mask array */
-		}
-
-		for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
-			if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
-
-				/* There are no free IDs in this mask */
-
-				break;
-			}
-
-			if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) {
-				/*
-				 * Found a free ID. The actual ID is the bit index plus one,
-				 * making zero an invalid Owner ID. Save this as the last ID
-				 * allocated and update the global ID mask.
-				 */
-				acpi_gbl_owner_id_mask[j] |= (1 << k);
-
-				acpi_gbl_last_owner_id_index = (u8) j;
-				acpi_gbl_next_owner_id_offset = (u8) (k + 1);
-
-				/*
-				 * Construct encoded ID from the index and bit position
-				 *
-				 * Note: Last [j].k (bit 255) is never used and is marked
-				 * permanently allocated (prevents +1 overflow)
-				 */
-				*owner_id =
-				    (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
-
-				ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
-						  "Allocated OwnerId: %2.2X\n",
-						  (unsigned int)*owner_id));
-				goto exit;
-			}
-		}
-
-		acpi_gbl_next_owner_id_offset = 0;
-	}
-
-	/*
-	 * All owner_ids have been allocated. This typically should
-	 * not happen since the IDs are reused after deallocation. The IDs are
-	 * allocated upon table load (one per table) and method execution, and
-	 * they are released when a table is unloaded or a method completes
-	 * execution.
-	 *
-	 * If this error happens, there may be very deep nesting of invoked control
-	 * methods, or there may be a bug where the IDs are not released.
-	 */
-	status = AE_OWNER_ID_LIMIT;
-	ACPI_ERROR((AE_INFO,
-		    "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
-
-      exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_release_owner_id
- *
- * PARAMETERS:  owner_id_ptr        - Pointer to a previously allocated owner_iD
- *
- * RETURN:      None. No error is returned because we are either exiting a
- *              control method or unloading a table. Either way, we would
- *              ignore any error anyway.
- *
- * DESCRIPTION: Release a table or method owner ID.  Valid IDs are 1 - 255
- *
- ******************************************************************************/
-
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
-{
-	acpi_owner_id owner_id = *owner_id_ptr;
-	acpi_status status;
-	u32 index;
-	u32 bit;
-
-	ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
-
-	/* Always clear the input owner_id (zero is an invalid ID) */
-
-	*owner_id_ptr = 0;
-
-	/* Zero is not a valid owner_iD */
-
-	if (owner_id == 0) {
-		ACPI_ERROR((AE_INFO, "Invalid OwnerId: %2.2X", owner_id));
-		return_VOID;
-	}
-
-	/* Mutex for the global ID mask */
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
-	if (ACPI_FAILURE(status)) {
-		return_VOID;
-	}
-
-	/* Normalize the ID to zero */
-
-	owner_id--;
-
-	/* Decode ID to index/offset pair */
-
-	index = ACPI_DIV_32(owner_id);
-	bit = 1 << ACPI_MOD_32(owner_id);
-
-	/* Free the owner ID only if it is valid */
-
-	if (acpi_gbl_owner_id_mask[index] & bit) {
-		acpi_gbl_owner_id_mask[index] ^= bit;
-	} else {
-		ACPI_ERROR((AE_INFO,
-			    "Release of non-allocated OwnerId: %2.2X",
-			    owner_id + 1));
-	}
-
-	(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_strupr (strupr)
- *
- * PARAMETERS:  src_string      - The source string to convert
- *
- * RETURN:      None
- *
- * DESCRIPTION: Convert string to uppercase
- *
- * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
- *
- ******************************************************************************/
-
-void acpi_ut_strupr(char *src_string)
-{
-	char *string;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!src_string) {
-		return;
-	}
-
-	/* Walk entire string, uppercasing the letters */
-
-	for (string = src_string; *string; string++) {
-		*string = (char)ACPI_TOUPPER(*string);
-	}
-
-	return;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_print_string
- *
- * PARAMETERS:  String          - Null terminated ASCII string
- *              max_length      - Maximum output length
- *
- * RETURN:      None
- *
- * DESCRIPTION: Dump an ASCII string with support for ACPI-defined escape
- *              sequences.
- *
- ******************************************************************************/
-
-void acpi_ut_print_string(char *string, u8 max_length)
-{
-	u32 i;
-
-	if (!string) {
-		acpi_os_printf("<\"NULL STRING PTR\">");
-		return;
-	}
-
-	acpi_os_printf("\"");
-	for (i = 0; string[i] && (i < max_length); i++) {
-
-		/* Escape sequences */
-
-		switch (string[i]) {
-		case 0x07:
-			acpi_os_printf("\\a");	/* BELL */
-			break;
-
-		case 0x08:
-			acpi_os_printf("\\b");	/* BACKSPACE */
-			break;
-
-		case 0x0C:
-			acpi_os_printf("\\f");	/* FORMFEED */
-			break;
-
-		case 0x0A:
-			acpi_os_printf("\\n");	/* LINEFEED */
-			break;
-
-		case 0x0D:
-			acpi_os_printf("\\r");	/* CARRIAGE RETURN */
-			break;
-
-		case 0x09:
-			acpi_os_printf("\\t");	/* HORIZONTAL TAB */
-			break;
-
-		case 0x0B:
-			acpi_os_printf("\\v");	/* VERTICAL TAB */
-			break;
-
-		case '\'':	/* Single Quote */
-		case '\"':	/* Double Quote */
-		case '\\':	/* Backslash */
-			acpi_os_printf("\\%c", (int)string[i]);
-			break;
-
-		default:
-
-			/* Check for printable character or hex escape */
-
-			if (ACPI_IS_PRINT(string[i])) {
-				/* This is a normal character */
-
-				acpi_os_printf("%c", (int)string[i]);
-			} else {
-				/* All others will be Hex escapes */
-
-				acpi_os_printf("\\x%2.2X", (s32) string[i]);
-			}
-			break;
-		}
-	}
-	acpi_os_printf("\"");
-
-	if (i == max_length && string[i]) {
-		acpi_os_printf("...");
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_dword_byte_swap
- *
- * PARAMETERS:  Value           - Value to be converted
- *
- * RETURN:      u32 integer with bytes swapped
- *
- * DESCRIPTION: Convert a 32-bit value to big-endian (swap the bytes)
- *
- ******************************************************************************/
-
-u32 acpi_ut_dword_byte_swap(u32 value)
-{
-	union {
-		u32 value;
-		u8 bytes[4];
-	} out;
-	union {
-		u32 value;
-		u8 bytes[4];
-	} in;
-
-	ACPI_FUNCTION_ENTRY();
-
-	in.value = value;
-
-	out.bytes[0] = in.bytes[3];
-	out.bytes[1] = in.bytes[2];
-	out.bytes[2] = in.bytes[1];
-	out.bytes[3] = in.bytes[0];
-
-	return (out.value);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_set_integer_width
- *
- * PARAMETERS:  Revision            From DSDT header
- *
- * RETURN:      None
- *
- * DESCRIPTION: Set the global integer bit width based upon the revision
- *              of the DSDT.  For Revision 1 and 0, Integers are 32 bits.
- *              For Revision 2 and above, Integers are 64 bits.  Yes, this
- *              makes a difference.
- *
- ******************************************************************************/
-
-void acpi_ut_set_integer_width(u8 revision)
-{
-
-	if (revision < 2) {
-
-		/* 32-bit case */
-
-		acpi_gbl_integer_bit_width = 32;
-		acpi_gbl_integer_nybble_width = 8;
-		acpi_gbl_integer_byte_width = 4;
-	} else {
-		/* 64-bit case (ACPI 2.0+) */
-
-		acpi_gbl_integer_bit_width = 64;
-		acpi_gbl_integer_nybble_width = 16;
-		acpi_gbl_integer_byte_width = 8;
-	}
-}
-
-#ifdef ACPI_DEBUG_OUTPUT
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_display_init_pathname
- *
- * PARAMETERS:  Type                - Object type of the node
- *              obj_handle          - Handle whose pathname will be displayed
- *              Path                - Additional path string to be appended.
- *                                      (NULL if no extra path)
- *
- * RETURN:      acpi_status
- *
- * DESCRIPTION: Display full pathname of an object, DEBUG ONLY
- *
- ******************************************************************************/
-
-void
-acpi_ut_display_init_pathname(u8 type,
-			      struct acpi_namespace_node *obj_handle,
-			      char *path)
-{
-	acpi_status status;
-	struct acpi_buffer buffer;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Only print the path if the appropriate debug level is enabled */
-
-	if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
-		return;
-	}
-
-	/* Get the full pathname to the node */
-
-	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
-	status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
-	if (ACPI_FAILURE(status)) {
-		return;
-	}
-
-	/* Print what we're doing */
-
-	switch (type) {
-	case ACPI_TYPE_METHOD:
-		acpi_os_printf("Executing  ");
-		break;
-
-	default:
-		acpi_os_printf("Initializing ");
-		break;
-	}
-
-	/* Print the object type and pathname */
-
-	acpi_os_printf("%-12s %s",
-		       acpi_ut_get_type_name(type), (char *)buffer.pointer);
-
-	/* Extra path is used to append names like _STA, _INI, etc. */
-
-	if (path) {
-		acpi_os_printf(".%s", path);
-	}
-	acpi_os_printf("\n");
-
-	ACPI_FREE(buffer.pointer);
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_valid_acpi_char
- *
- * PARAMETERS:  Char            - The character to be examined
- *              Position        - Byte position (0-3)
- *
- * RETURN:      TRUE if the character is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI character. Must be one of:
- *              1) Upper case alpha
- *              2) numeric
- *              3) underscore
- *
- *              We allow a '!' as the last character because of the ASF! table
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position)
-{
-
-	if (!((character >= 'A' && character <= 'Z') ||
-	      (character >= '0' && character <= '9') || (character == '_'))) {
-
-		/* Allow a '!' in the last position */
-
-		if (character == '!' && position == 3) {
-			return (TRUE);
-		}
-
-		return (FALSE);
-	}
-
-	return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_valid_acpi_name
- *
- * PARAMETERS:  Name            - The name to be examined
- *
- * RETURN:      TRUE if the name is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI name.  Each character must be one of:
- *              1) Upper case alpha
- *              2) numeric
- *              3) underscore
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_name(u32 name)
-{
-	u32 i;
-
-	ACPI_FUNCTION_ENTRY();
-
-	for (i = 0; i < ACPI_NAME_SIZE; i++) {
-		if (!acpi_ut_valid_acpi_char
-		    ((ACPI_CAST_PTR(char, &name))[i], i)) {
-			return (FALSE);
-		}
-	}
-
-	return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_repair_name
- *
- * PARAMETERS:  Name            - The ACPI name to be repaired
- *
- * RETURN:      Repaired version of the name
- *
- * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
- *              return the new name.
- *
- ******************************************************************************/
-
-acpi_name acpi_ut_repair_name(char *name)
-{
-       u32 i;
-	char new_name[ACPI_NAME_SIZE];
-
-	for (i = 0; i < ACPI_NAME_SIZE; i++) {
-		new_name[i] = name[i];
-
-		/*
-		 * Replace a bad character with something printable, yet technically
-		 * still invalid. This prevents any collisions with existing "good"
-		 * names in the namespace.
-		 */
-		if (!acpi_ut_valid_acpi_char(name[i], i)) {
-			new_name[i] = '*';
-		}
-	}
-
-	return (*(u32 *) new_name);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_strtoul64
- *
- * PARAMETERS:  String          - Null terminated string
- *              Base            - Radix of the string: 16 or ACPI_ANY_BASE;
- *                                ACPI_ANY_BASE means 'in behalf of to_integer'
- *              ret_integer     - Where the converted integer is returned
- *
- * RETURN:      Status and Converted value
- *
- * DESCRIPTION: Convert a string into an unsigned value. Performs either a
- *              32-bit or 64-bit conversion, depending on the current mode
- *              of the interpreter.
- *              NOTE: Does not support Octal strings, not needed.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
-{
-	u32 this_digit = 0;
-	acpi_integer return_value = 0;
-	acpi_integer quotient;
-	acpi_integer dividend;
-	u32 to_integer_op = (base == ACPI_ANY_BASE);
-	u32 mode32 = (acpi_gbl_integer_byte_width == 4);
-	u8 valid_digits = 0;
-	u8 sign_of0x = 0;
-	u8 term = 0;
-
-	ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
-
-	switch (base) {
-	case ACPI_ANY_BASE:
-	case 16:
-		break;
-
-	default:
-		/* Invalid Base */
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (!string) {
-		goto error_exit;
-	}
-
-	/* Skip over any white space in the buffer */
-
-	while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
-		string++;
-	}
-
-	if (to_integer_op) {
-		/*
-		 * Base equal to ACPI_ANY_BASE means 'to_integer operation case'.
-		 * We need to determine if it is decimal or hexadecimal.
-		 */
-		if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
-			sign_of0x = 1;
-			base = 16;
-
-			/* Skip over the leading '0x' */
-			string += 2;
-		} else {
-			base = 10;
-		}
-	}
-
-	/* Any string left? Check that '0x' is not followed by white space. */
-
-	if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
-		if (to_integer_op) {
-			goto error_exit;
-		} else {
-			goto all_done;
-		}
-	}
-
-	/*
-	 * Perform a 32-bit or 64-bit conversion, depending upon the current
-	 * execution mode of the interpreter
-	 */
-	dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
-
-	/* Main loop: convert the string to a 32- or 64-bit integer */
-
-	while (*string) {
-		if (ACPI_IS_DIGIT(*string)) {
-
-			/* Convert ASCII 0-9 to Decimal value */
-
-			this_digit = ((u8) * string) - '0';
-		} else if (base == 10) {
-
-			/* Digit is out of range; possible in to_integer case only */
-
-			term = 1;
-		} else {
-			this_digit = (u8) ACPI_TOUPPER(*string);
-			if (ACPI_IS_XDIGIT((char)this_digit)) {
-
-				/* Convert ASCII Hex char to value */
-
-				this_digit = this_digit - 'A' + 10;
-			} else {
-				term = 1;
-			}
-		}
-
-		if (term) {
-			if (to_integer_op) {
-				goto error_exit;
-			} else {
-				break;
-			}
-		} else if ((valid_digits == 0) && (this_digit == 0)
-			   && !sign_of0x) {
-
-			/* Skip zeros */
-			string++;
-			continue;
-		}
-
-		valid_digits++;
-
-		if (sign_of0x && ((valid_digits > 16)
-				  || ((valid_digits > 8) && mode32))) {
-			/*
-			 * This is to_integer operation case.
-			 * No any restrictions for string-to-integer conversion,
-			 * see ACPI spec.
-			 */
-			goto error_exit;
-		}
-
-		/* Divide the digit into the correct position */
-
-		(void)
-		    acpi_ut_short_divide((dividend - (acpi_integer) this_digit),
-					 base, &quotient, NULL);
-
-		if (return_value > quotient) {
-			if (to_integer_op) {
-				goto error_exit;
-			} else {
-				break;
-			}
-		}
-
-		return_value *= base;
-		return_value += this_digit;
-		string++;
-	}
-
-	/* All done, normal exit */
-
-      all_done:
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
-			  ACPI_FORMAT_UINT64(return_value)));
-
-	*ret_integer = return_value;
-	return_ACPI_STATUS(AE_OK);
-
-      error_exit:
-	/* Base was set/validated above */
-
-	if (base == 10) {
-		return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
-	} else {
-		return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_update_state_and_push
- *
- * PARAMETERS:  Object          - Object to be added to the new state
- *              Action          - Increment/Decrement
- *              state_list      - List the state will be added to
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new state and push it
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
-				     u16 action,
-				     union acpi_generic_state **state_list)
-{
-	union acpi_generic_state *state;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Ignore null objects; these are expected */
-
-	if (!object) {
-		return (AE_OK);
-	}
-
-	state = acpi_ut_create_update_state(object, action);
-	if (!state) {
-		return (AE_NO_MEMORY);
-	}
-
-	acpi_ut_push_generic_state(state_list, state);
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_walk_package_tree
- *
- * PARAMETERS:  source_object       - The package to walk
- *              target_object       - Target object (if package is being copied)
- *              walk_callback       - Called once for each package element
- *              Context             - Passed to the callback function
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Walk through a package
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
-			  void *target_object,
-			  acpi_pkg_callback walk_callback, void *context)
-{
-	acpi_status status = AE_OK;
-	union acpi_generic_state *state_list = NULL;
-	union acpi_generic_state *state;
-	u32 this_index;
-	union acpi_operand_object *this_source_obj;
-
-	ACPI_FUNCTION_TRACE(ut_walk_package_tree);
-
-	state = acpi_ut_create_pkg_state(source_object, target_object, 0);
-	if (!state) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	while (state) {
-
-		/* Get one element of the package */
-
-		this_index = state->pkg.index;
-		this_source_obj = (union acpi_operand_object *)
-		    state->pkg.source_object->package.elements[this_index];
-
-		/*
-		 * Check for:
-		 * 1) An uninitialized package element.  It is completely
-		 *    legal to declare a package and leave it uninitialized
-		 * 2) Not an internal object - can be a namespace node instead
-		 * 3) Any type other than a package.  Packages are handled in else
-		 *    case below.
-		 */
-		if ((!this_source_obj) ||
-		    (ACPI_GET_DESCRIPTOR_TYPE(this_source_obj) !=
-		     ACPI_DESC_TYPE_OPERAND)
-		    || (ACPI_GET_OBJECT_TYPE(this_source_obj) !=
-			ACPI_TYPE_PACKAGE)) {
-			status =
-			    walk_callback(ACPI_COPY_TYPE_SIMPLE,
-					  this_source_obj, state, context);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-
-			state->pkg.index++;
-			while (state->pkg.index >=
-			       state->pkg.source_object->package.count) {
-				/*
-				 * We've handled all of the objects at this level,  This means
-				 * that we have just completed a package.  That package may
-				 * have contained one or more packages itself.
-				 *
-				 * Delete this state and pop the previous state (package).
-				 */
-				acpi_ut_delete_generic_state(state);
-				state = acpi_ut_pop_generic_state(&state_list);
-
-				/* Finished when there are no more states */
-
-				if (!state) {
-					/*
-					 * We have handled all of the objects in the top level
-					 * package just add the length of the package objects
-					 * and exit
-					 */
-					return_ACPI_STATUS(AE_OK);
-				}
-
-				/*
-				 * Go back up a level and move the index past the just
-				 * completed package object.
-				 */
-				state->pkg.index++;
-			}
-		} else {
-			/* This is a subobject of type package */
-
-			status =
-			    walk_callback(ACPI_COPY_TYPE_PACKAGE,
-					  this_source_obj, state, context);
-			if (ACPI_FAILURE(status)) {
-				return_ACPI_STATUS(status);
-			}
-
-			/*
-			 * Push the current state and create a new one
-			 * The callback above returned a new target package object.
-			 */
-			acpi_ut_push_generic_state(&state_list, state);
-			state = acpi_ut_create_pkg_state(this_source_obj,
-							 state->pkg.
-							 this_target_obj, 0);
-			if (!state) {
-
-				/* Free any stacked Update State objects */
-
-				while (state_list) {
-					state =
-					    acpi_ut_pop_generic_state
-					    (&state_list);
-					acpi_ut_delete_generic_state(state);
-				}
-				return_ACPI_STATUS(AE_NO_MEMORY);
-			}
-		}
-	}
-
-	/* We should never get here */
-
-	return_ACPI_STATUS(AE_AML_INTERNAL);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_error, acpi_ut_warning, acpi_ut_info
- *
- * PARAMETERS:  module_name         - Caller's module name (for error output)
- *              line_number         - Caller's line number (for error output)
- *              Format              - Printf format string + additional args
- *
- * RETURN:      None
- *
- * DESCRIPTION: Print message with module/line/version info
- *
- ******************************************************************************/
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_error(const char *module_name, u32 line_number, const char *format, ...)
-{
-	va_list args;
-
-	acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
-
-	va_start(args, format);
-	acpi_os_vprintf(format, args);
-	acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
-	va_end(args);
-}
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_exception(const char *module_name,
-		  u32 line_number, acpi_status status, const char *format, ...)
-{
-	va_list args;
-
-	acpi_os_printf("ACPI Exception (%s-%04d): %s, ", module_name,
-		       line_number, acpi_format_exception(status));
-
-	va_start(args, format);
-	acpi_os_vprintf(format, args);
-	acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
-	va_end(args);
-}
-
-EXPORT_SYMBOL(acpi_ut_exception);
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_warning(const char *module_name,
-		u32 line_number, const char *format, ...)
-{
-	va_list args;
-
-	acpi_os_printf("ACPI Warning (%s-%04d): ", module_name, line_number);
-
-	va_start(args, format);
-	acpi_os_vprintf(format, args);
-	acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
-	va_end(args);
-}
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_info(const char *module_name, u32 line_number, const char *format, ...)
-{
-	va_list args;
-
-	/*
-	 * Removed module_name, line_number, and acpica version, not needed
-	 * for info output
-	 */
-	acpi_os_printf("ACPI: ");
-
-	va_start(args, format);
-	acpi_os_vprintf(format, args);
-	acpi_os_printf("\n");
-	va_end(args);
-}
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
deleted file mode 100644
index 7331dde..0000000
--- a/drivers/acpi/utilities/utmutex.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: utmutex - local mutex support
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utmutex")
-
-/* Local prototypes */
-static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id);
-
-static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_mutex_initialize
- *
- * PARAMETERS:  None.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create the system mutex objects.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_mutex_initialize(void)
-{
-	u32 i;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ut_mutex_initialize);
-
-	/*
-	 * Create each of the predefined mutex objects
-	 */
-	for (i = 0; i < ACPI_NUM_MUTEX; i++) {
-		status = acpi_ut_create_mutex(i);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/* Create the spinlocks for use at interrupt level */
-
-	spin_lock_init(acpi_gbl_gpe_lock);
-	spin_lock_init(acpi_gbl_hardware_lock);
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_mutex_terminate
- *
- * PARAMETERS:  None.
- *
- * RETURN:      None.
- *
- * DESCRIPTION: Delete all of the system mutex objects.
- *
- ******************************************************************************/
-
-void acpi_ut_mutex_terminate(void)
-{
-	u32 i;
-
-	ACPI_FUNCTION_TRACE(ut_mutex_terminate);
-
-	/*
-	 * Delete each predefined mutex object
-	 */
-	for (i = 0; i < ACPI_NUM_MUTEX; i++) {
-		(void)acpi_ut_delete_mutex(i);
-	}
-
-	/* Delete the spinlocks */
-
-	acpi_os_delete_lock(acpi_gbl_gpe_lock);
-	acpi_os_delete_lock(acpi_gbl_hardware_lock);
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_mutex
- *
- * PARAMETERS:  mutex_iD        - ID of the mutex to be created
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a mutex object.
- *
- ******************************************************************************/
-
-static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE_U32(ut_create_mutex, mutex_id);
-
-	if (mutex_id > ACPI_MAX_MUTEX) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (!acpi_gbl_mutex_info[mutex_id].mutex) {
-		status =
-		    acpi_os_create_mutex(&acpi_gbl_mutex_info[mutex_id].mutex);
-		acpi_gbl_mutex_info[mutex_id].thread_id =
-		    ACPI_MUTEX_NOT_ACQUIRED;
-		acpi_gbl_mutex_info[mutex_id].use_count = 0;
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_delete_mutex
- *
- * PARAMETERS:  mutex_iD        - ID of the mutex to be deleted
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Delete a mutex object.
- *
- ******************************************************************************/
-
-static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
-{
-
-	ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id);
-
-	if (mutex_id > ACPI_MAX_MUTEX) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	acpi_os_delete_mutex(acpi_gbl_mutex_info[mutex_id].mutex);
-
-	acpi_gbl_mutex_info[mutex_id].mutex = NULL;
-	acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_acquire_mutex
- *
- * PARAMETERS:  mutex_iD        - ID of the mutex to be acquired
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Acquire a mutex object.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
-{
-	acpi_status status;
-	acpi_thread_id this_thread_id;
-
-	ACPI_FUNCTION_NAME(ut_acquire_mutex);
-
-	if (mutex_id > ACPI_MAX_MUTEX) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	this_thread_id = acpi_os_get_thread_id();
-
-#ifdef ACPI_MUTEX_DEBUG
-	{
-		u32 i;
-		/*
-		 * Mutex debug code, for internal debugging only.
-		 *
-		 * Deadlock prevention.  Check if this thread owns any mutexes of value
-		 * greater than or equal to this one.  If so, the thread has violated
-		 * the mutex ordering rule.  This indicates a coding error somewhere in
-		 * the ACPI subsystem code.
-		 */
-		for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
-			if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
-				if (i == mutex_id) {
-					ACPI_ERROR((AE_INFO,
-						    "Mutex [%s] already acquired by this thread [%X]",
-						    acpi_ut_get_mutex_name
-						    (mutex_id),
-						    this_thread_id));
-
-					return (AE_ALREADY_ACQUIRED);
-				}
-
-				ACPI_ERROR((AE_INFO,
-					    "Invalid acquire order: Thread %X owns [%s], wants [%s]",
-					    this_thread_id,
-					    acpi_ut_get_mutex_name(i),
-					    acpi_ut_get_mutex_name(mutex_id)));
-
-				return (AE_ACQUIRE_DEADLOCK);
-			}
-		}
-	}
-#endif
-
-	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
-			  "Thread %lX attempting to acquire Mutex [%s]\n",
-			  (unsigned long)this_thread_id,
-			  acpi_ut_get_mutex_name(mutex_id)));
-
-	status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
-				       ACPI_WAIT_FOREVER);
-	if (ACPI_SUCCESS(status)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
-				  "Thread %lX acquired Mutex [%s]\n",
-				  (unsigned long)this_thread_id,
-				  acpi_ut_get_mutex_name(mutex_id)));
-
-		acpi_gbl_mutex_info[mutex_id].use_count++;
-		acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
-	} else {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Thread %lX could not acquire Mutex [%X]",
-				(unsigned long)this_thread_id, mutex_id));
-	}
-
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_release_mutex
- *
- * PARAMETERS:  mutex_iD        - ID of the mutex to be released
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Release a mutex object.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
-{
-	acpi_thread_id this_thread_id;
-
-	ACPI_FUNCTION_NAME(ut_release_mutex);
-
-	this_thread_id = acpi_os_get_thread_id();
-	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
-			  "Thread %lX releasing Mutex [%s]\n",
-			  (unsigned long)this_thread_id,
-			  acpi_ut_get_mutex_name(mutex_id)));
-
-	if (mutex_id > ACPI_MAX_MUTEX) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	/*
-	 * Mutex must be acquired in order to release it!
-	 */
-	if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
-		ACPI_ERROR((AE_INFO,
-			    "Mutex [%X] is not acquired, cannot release",
-			    mutex_id));
-
-		return (AE_NOT_ACQUIRED);
-	}
-#ifdef ACPI_MUTEX_DEBUG
-	{
-		u32 i;
-		/*
-		 * Mutex debug code, for internal debugging only.
-		 *
-		 * Deadlock prevention.  Check if this thread owns any mutexes of value
-		 * greater than this one.  If so, the thread has violated the mutex
-		 * ordering rule.  This indicates a coding error somewhere in
-		 * the ACPI subsystem code.
-		 */
-		for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
-			if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
-				if (i == mutex_id) {
-					continue;
-				}
-
-				ACPI_ERROR((AE_INFO,
-					    "Invalid release order: owns [%s], releasing [%s]",
-					    acpi_ut_get_mutex_name(i),
-					    acpi_ut_get_mutex_name(mutex_id)));
-
-				return (AE_RELEASE_DEADLOCK);
-			}
-		}
-	}
-#endif
-
-	/* Mark unlocked FIRST */
-
-	acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
-
-	acpi_os_release_mutex(acpi_gbl_mutex_info[mutex_id].mutex);
-	return (AE_OK);
-}
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
deleted file mode 100644
index c354e7a..0000000
--- a/drivers/acpi/utilities/utobject.c
+++ /dev/null
@@ -1,676 +0,0 @@
-/******************************************************************************
- *
- * Module Name: utobject - ACPI object create/delete/size/cache routines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utobject")
-
-/* Local prototypes */
-static acpi_status
-acpi_ut_get_simple_object_size(union acpi_operand_object *obj,
-			       acpi_size * obj_length);
-
-static acpi_status
-acpi_ut_get_package_object_size(union acpi_operand_object *obj,
-				acpi_size * obj_length);
-
-static acpi_status
-acpi_ut_get_element_length(u8 object_type,
-			   union acpi_operand_object *source_object,
-			   union acpi_generic_state *state, void *context);
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_internal_object_dbg
- *
- * PARAMETERS:  module_name         - Source file name of caller
- *              line_number         - Line number of caller
- *              component_id        - Component type of caller
- *              Type                - ACPI Type of the new object
- *
- * RETURN:      A new internal object, null on failure
- *
- * DESCRIPTION: Create and initialize a new internal object.
- *
- * NOTE:        We always allocate the worst-case object descriptor because
- *              these objects are cached, and we want them to be
- *              one-size-satisifies-any-request.  This in itself may not be
- *              the most memory efficient, but the efficiency of the object
- *              cache should more than make up for this!
- *
- ******************************************************************************/
-
-union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char
-							      *module_name,
-							      u32 line_number,
-							      u32 component_id,
-							      acpi_object_type
-							      type)
-{
-	union acpi_operand_object *object;
-	union acpi_operand_object *second_object;
-
-	ACPI_FUNCTION_TRACE_STR(ut_create_internal_object_dbg,
-				acpi_ut_get_type_name(type));
-
-	/* Allocate the raw object descriptor */
-
-	object =
-	    acpi_ut_allocate_object_desc_dbg(module_name, line_number,
-					     component_id);
-	if (!object) {
-		return_PTR(NULL);
-	}
-
-	switch (type) {
-	case ACPI_TYPE_REGION:
-	case ACPI_TYPE_BUFFER_FIELD:
-	case ACPI_TYPE_LOCAL_BANK_FIELD:
-
-		/* These types require a secondary object */
-
-		second_object = acpi_ut_allocate_object_desc_dbg(module_name,
-								 line_number,
-								 component_id);
-		if (!second_object) {
-			acpi_ut_delete_object_desc(object);
-			return_PTR(NULL);
-		}
-
-		second_object->common.type = ACPI_TYPE_LOCAL_EXTRA;
-		second_object->common.reference_count = 1;
-
-		/* Link the second object to the first */
-
-		object->common.next_object = second_object;
-		break;
-
-	default:
-		/* All others have no secondary object */
-		break;
-	}
-
-	/* Save the object type in the object descriptor */
-
-	object->common.type = (u8) type;
-
-	/* Init the reference count */
-
-	object->common.reference_count = 1;
-
-	/* Any per-type initialization should go here */
-
-	return_PTR(object);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_package_object
- *
- * PARAMETERS:  Count               - Number of package elements
- *
- * RETURN:      Pointer to a new Package object, null on failure
- *
- * DESCRIPTION: Create a fully initialized package object
- *
- ******************************************************************************/
-
-union acpi_operand_object *acpi_ut_create_package_object(u32 count)
-{
-	union acpi_operand_object *package_desc;
-	union acpi_operand_object **package_elements;
-
-	ACPI_FUNCTION_TRACE_U32(ut_create_package_object, count);
-
-	/* Create a new Package object */
-
-	package_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
-	if (!package_desc) {
-		return_PTR(NULL);
-	}
-
-	/*
-	 * Create the element array. Count+1 allows the array to be null
-	 * terminated.
-	 */
-	package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count +
-						 1) * sizeof(void *));
-	if (!package_elements) {
-		acpi_ut_remove_reference(package_desc);
-		return_PTR(NULL);
-	}
-
-	package_desc->package.count = count;
-	package_desc->package.elements = package_elements;
-	return_PTR(package_desc);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_buffer_object
- *
- * PARAMETERS:  buffer_size            - Size of buffer to be created
- *
- * RETURN:      Pointer to a new Buffer object, null on failure
- *
- * DESCRIPTION: Create a fully initialized buffer object
- *
- ******************************************************************************/
-
-union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
-{
-	union acpi_operand_object *buffer_desc;
-	u8 *buffer = NULL;
-
-	ACPI_FUNCTION_TRACE_U32(ut_create_buffer_object, buffer_size);
-
-	/* Create a new Buffer object */
-
-	buffer_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
-	if (!buffer_desc) {
-		return_PTR(NULL);
-	}
-
-	/* Create an actual buffer only if size > 0 */
-
-	if (buffer_size > 0) {
-
-		/* Allocate the actual buffer */
-
-		buffer = ACPI_ALLOCATE_ZEROED(buffer_size);
-		if (!buffer) {
-			ACPI_ERROR((AE_INFO, "Could not allocate size %X",
-				    (u32) buffer_size));
-			acpi_ut_remove_reference(buffer_desc);
-			return_PTR(NULL);
-		}
-	}
-
-	/* Complete buffer object initialization */
-
-	buffer_desc->buffer.flags |= AOPOBJ_DATA_VALID;
-	buffer_desc->buffer.pointer = buffer;
-	buffer_desc->buffer.length = (u32) buffer_size;
-
-	/* Return the new buffer descriptor */
-
-	return_PTR(buffer_desc);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_string_object
- *
- * PARAMETERS:  string_size         - Size of string to be created. Does not
- *                                    include NULL terminator, this is added
- *                                    automatically.
- *
- * RETURN:      Pointer to a new String object
- *
- * DESCRIPTION: Create a fully initialized string object
- *
- ******************************************************************************/
-
-union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
-{
-	union acpi_operand_object *string_desc;
-	char *string;
-
-	ACPI_FUNCTION_TRACE_U32(ut_create_string_object, string_size);
-
-	/* Create a new String object */
-
-	string_desc = acpi_ut_create_internal_object(ACPI_TYPE_STRING);
-	if (!string_desc) {
-		return_PTR(NULL);
-	}
-
-	/*
-	 * Allocate the actual string buffer -- (Size + 1) for NULL terminator.
-	 * NOTE: Zero-length strings are NULL terminated
-	 */
-	string = ACPI_ALLOCATE_ZEROED(string_size + 1);
-	if (!string) {
-		ACPI_ERROR((AE_INFO, "Could not allocate size %X",
-			    (u32) string_size));
-		acpi_ut_remove_reference(string_desc);
-		return_PTR(NULL);
-	}
-
-	/* Complete string object initialization */
-
-	string_desc->string.pointer = string;
-	string_desc->string.length = (u32) string_size;
-
-	/* Return the new string descriptor */
-
-	return_PTR(string_desc);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_valid_internal_object
- *
- * PARAMETERS:  Object              - Object to be validated
- *
- * RETURN:      TRUE if object is valid, FALSE otherwise
- *
- * DESCRIPTION: Validate a pointer to be an union acpi_operand_object
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_internal_object(void *object)
-{
-
-	ACPI_FUNCTION_NAME(ut_valid_internal_object);
-
-	/* Check for a null pointer */
-
-	if (!object) {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "**** Null Object Ptr\n"));
-		return (FALSE);
-	}
-
-	/* Check the descriptor type field */
-
-	switch (ACPI_GET_DESCRIPTOR_TYPE(object)) {
-	case ACPI_DESC_TYPE_OPERAND:
-
-		/* The object appears to be a valid union acpi_operand_object    */
-
-		return (TRUE);
-
-	default:
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "%p is not not an ACPI operand obj [%s]\n",
-				  object, acpi_ut_get_descriptor_name(object)));
-		break;
-	}
-
-	return (FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_allocate_object_desc_dbg
- *
- * PARAMETERS:  module_name         - Caller's module name (for error output)
- *              line_number         - Caller's line number (for error output)
- *              component_id        - Caller's component ID (for error output)
- *
- * RETURN:      Pointer to newly allocated object descriptor.  Null on error
- *
- * DESCRIPTION: Allocate a new object descriptor.  Gracefully handle
- *              error conditions.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
-				       u32 line_number, u32 component_id)
-{
-	union acpi_operand_object *object;
-
-	ACPI_FUNCTION_TRACE(ut_allocate_object_desc_dbg);
-
-	object = acpi_os_acquire_object(acpi_gbl_operand_cache);
-	if (!object) {
-		ACPI_ERROR((module_name, line_number,
-			    "Could not allocate an object descriptor"));
-
-		return_PTR(NULL);
-	}
-
-	/* Mark the descriptor type */
-
-	memset(object, 0, sizeof(union acpi_operand_object));
-	ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND);
-
-	ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p Size %X\n",
-			  object, (u32) sizeof(union acpi_operand_object)));
-
-	return_PTR(object);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_delete_object_desc
- *
- * PARAMETERS:  Object          - An Acpi internal object to be deleted
- *
- * RETURN:      None.
- *
- * DESCRIPTION: Free an ACPI object descriptor or add it to the object cache
- *
- ******************************************************************************/
-
-void acpi_ut_delete_object_desc(union acpi_operand_object *object)
-{
-	ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
-
-	/* Object must be an union acpi_operand_object    */
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
-		ACPI_ERROR((AE_INFO,
-			    "%p is not an ACPI Operand object [%s]", object,
-			    acpi_ut_get_descriptor_name(object)));
-		return_VOID;
-	}
-
-	(void)acpi_os_release_object(acpi_gbl_operand_cache, object);
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_simple_object_size
- *
- * PARAMETERS:  internal_object    - An ACPI operand object
- *              obj_length         - Where the length is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to determine the space required to
- *              contain a simple object for return to an external user.
- *
- *              The length includes the object structure plus any additional
- *              needed space.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
-			       acpi_size * obj_length)
-{
-	acpi_size length;
-	acpi_size size;
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
-
-	/*
-	 * Handle a null object (Could be a uninitialized package
-	 * element -- which is legal)
-	 */
-	if (!internal_object) {
-		*obj_length = sizeof(union acpi_object);
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Start with the length of the Acpi object */
-
-	length = sizeof(union acpi_object);
-
-	if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) {
-
-		/* Object is a named object (reference), just return the length */
-
-		*obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * The final length depends on the object type
-	 * Strings and Buffers are packed right up against the parent object and
-	 * must be accessed bytewise or there may be alignment problems on
-	 * certain processors
-	 */
-	switch (ACPI_GET_OBJECT_TYPE(internal_object)) {
-	case ACPI_TYPE_STRING:
-
-		length += (acpi_size) internal_object->string.length + 1;
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		length += (acpi_size) internal_object->buffer.length;
-		break;
-
-	case ACPI_TYPE_INTEGER:
-	case ACPI_TYPE_PROCESSOR:
-	case ACPI_TYPE_POWER:
-
-		/* No extra data for these types */
-
-		break;
-
-	case ACPI_TYPE_LOCAL_REFERENCE:
-
-		switch (internal_object->reference.class) {
-		case ACPI_REFCLASS_NAME:
-
-			/*
-			 * Get the actual length of the full pathname to this object.
-			 * The reference will be converted to the pathname to the object
-			 */
-			size =
-			    acpi_ns_get_pathname_length(internal_object->
-							reference.node);
-			if (!size) {
-				return_ACPI_STATUS(AE_BAD_PARAMETER);
-			}
-
-			length += ACPI_ROUND_UP_TO_NATIVE_WORD(size);
-			break;
-
-		default:
-
-			/*
-			 * No other reference opcodes are supported.
-			 * Notably, Locals and Args are not supported, but this may be
-			 * required eventually.
-			 */
-			ACPI_ERROR((AE_INFO,
-				    "Cannot convert to external object - "
-				    "unsupported Reference Class [%s] %X in object %p",
-				    acpi_ut_get_reference_name(internal_object),
-				    internal_object->reference.class,
-				    internal_object));
-			status = AE_TYPE;
-			break;
-		}
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Cannot convert to external object - "
-			    "unsupported type [%s] %X in object %p",
-			    acpi_ut_get_object_type_name(internal_object),
-			    ACPI_GET_OBJECT_TYPE(internal_object),
-			    internal_object));
-		status = AE_TYPE;
-		break;
-	}
-
-	/*
-	 * Account for the space required by the object rounded up to the next
-	 * multiple of the machine word size.  This keeps each object aligned
-	 * on a machine word boundary. (preventing alignment faults on some
-	 * machines.)
-	 */
-	*obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_element_length
- *
- * PARAMETERS:  acpi_pkg_callback
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get the length of one package element.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_get_element_length(u8 object_type,
-			   union acpi_operand_object *source_object,
-			   union acpi_generic_state *state, void *context)
-{
-	acpi_status status = AE_OK;
-	struct acpi_pkg_info *info = (struct acpi_pkg_info *)context;
-	acpi_size object_space;
-
-	switch (object_type) {
-	case ACPI_COPY_TYPE_SIMPLE:
-
-		/*
-		 * Simple object - just get the size (Null object/entry is handled
-		 * here also) and sum it into the running package length
-		 */
-		status =
-		    acpi_ut_get_simple_object_size(source_object,
-						   &object_space);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-
-		info->length += object_space;
-		break;
-
-	case ACPI_COPY_TYPE_PACKAGE:
-
-		/* Package object - nothing much to do here, let the walk handle it */
-
-		info->num_packages++;
-		state->pkg.this_target_obj = NULL;
-		break;
-
-	default:
-
-		/* No other types allowed */
-
-		return (AE_BAD_PARAMETER);
-	}
-
-	return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_package_object_size
- *
- * PARAMETERS:  internal_object     - An ACPI internal object
- *              obj_length          - Where the length is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to determine the space required to
- *              contain a package object for return to an external user.
- *
- *              This is moderately complex since a package contains other
- *              objects including packages.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
-				acpi_size * obj_length)
-{
-	acpi_status status;
-	struct acpi_pkg_info info;
-
-	ACPI_FUNCTION_TRACE_PTR(ut_get_package_object_size, internal_object);
-
-	info.length = 0;
-	info.object_space = 0;
-	info.num_packages = 1;
-
-	status = acpi_ut_walk_package_tree(internal_object, NULL,
-					   acpi_ut_get_element_length, &info);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * We have handled all of the objects in all levels of the package.
-	 * just add the length of the package objects themselves.
-	 * Round up to the next machine word.
-	 */
-	info.length += ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)) *
-	    (acpi_size) info.num_packages;
-
-	/* Return the total package length */
-
-	*obj_length = info.length;
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_object_size
- *
- * PARAMETERS:  internal_object     - An ACPI internal object
- *              obj_length          - Where the length will be returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This function is called to determine the space required to
- *              contain an object for return to an API user.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_get_object_size(union acpi_operand_object *internal_object,
-			acpi_size * obj_length)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if ((ACPI_GET_DESCRIPTOR_TYPE(internal_object) ==
-	     ACPI_DESC_TYPE_OPERAND)
-	    && (ACPI_GET_OBJECT_TYPE(internal_object) == ACPI_TYPE_PACKAGE)) {
-		status =
-		    acpi_ut_get_package_object_size(internal_object,
-						    obj_length);
-	} else {
-		status =
-		    acpi_ut_get_simple_object_size(internal_object, obj_length);
-	}
-
-	return (status);
-}
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
deleted file mode 100644
index c3e3e13..0000000
--- a/drivers/acpi/utilities/utresrc.c
+++ /dev/null
@@ -1,615 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: utresrc - Resource management utilities
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/amlresrc.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utresrc")
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
-/*
- * Strings used to decode resource descriptors.
- * Used by both the disasssembler and the debugger resource dump routines
- */
-const char *acpi_gbl_bm_decode[] = {
-	"NotBusMaster",
-	"BusMaster"
-};
-
-const char *acpi_gbl_config_decode[] = {
-	"0 - Good Configuration",
-	"1 - Acceptable Configuration",
-	"2 - Suboptimal Configuration",
-	"3 - ***Invalid Configuration***",
-};
-
-const char *acpi_gbl_consume_decode[] = {
-	"ResourceProducer",
-	"ResourceConsumer"
-};
-
-const char *acpi_gbl_dec_decode[] = {
-	"PosDecode",
-	"SubDecode"
-};
-
-const char *acpi_gbl_he_decode[] = {
-	"Level",
-	"Edge"
-};
-
-const char *acpi_gbl_io_decode[] = {
-	"Decode10",
-	"Decode16"
-};
-
-const char *acpi_gbl_ll_decode[] = {
-	"ActiveHigh",
-	"ActiveLow"
-};
-
-const char *acpi_gbl_max_decode[] = {
-	"MaxNotFixed",
-	"MaxFixed"
-};
-
-const char *acpi_gbl_mem_decode[] = {
-	"NonCacheable",
-	"Cacheable",
-	"WriteCombining",
-	"Prefetchable"
-};
-
-const char *acpi_gbl_min_decode[] = {
-	"MinNotFixed",
-	"MinFixed"
-};
-
-const char *acpi_gbl_mtp_decode[] = {
-	"AddressRangeMemory",
-	"AddressRangeReserved",
-	"AddressRangeACPI",
-	"AddressRangeNVS"
-};
-
-const char *acpi_gbl_rng_decode[] = {
-	"InvalidRanges",
-	"NonISAOnlyRanges",
-	"ISAOnlyRanges",
-	"EntireRange"
-};
-
-const char *acpi_gbl_rw_decode[] = {
-	"ReadOnly",
-	"ReadWrite"
-};
-
-const char *acpi_gbl_shr_decode[] = {
-	"Exclusive",
-	"Shared"
-};
-
-const char *acpi_gbl_siz_decode[] = {
-	"Transfer8",
-	"Transfer8_16",
-	"Transfer16",
-	"InvalidSize"
-};
-
-const char *acpi_gbl_trs_decode[] = {
-	"DenseTranslation",
-	"SparseTranslation"
-};
-
-const char *acpi_gbl_ttp_decode[] = {
-	"TypeStatic",
-	"TypeTranslation"
-};
-
-const char *acpi_gbl_typ_decode[] = {
-	"Compatibility",
-	"TypeA",
-	"TypeB",
-	"TypeF"
-};
-
-#endif
-
-/*
- * Base sizes of the raw AML resource descriptors, indexed by resource type.
- * Zero indicates a reserved (and therefore invalid) resource type.
- */
-const u8 acpi_gbl_resource_aml_sizes[] = {
-	/* Small descriptors */
-
-	0,
-	0,
-	0,
-	0,
-	ACPI_AML_SIZE_SMALL(struct aml_resource_irq),
-	ACPI_AML_SIZE_SMALL(struct aml_resource_dma),
-	ACPI_AML_SIZE_SMALL(struct aml_resource_start_dependent),
-	ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
-	ACPI_AML_SIZE_SMALL(struct aml_resource_io),
-	ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
-	0,
-	0,
-	0,
-	0,
-	ACPI_AML_SIZE_SMALL(struct aml_resource_vendor_small),
-	ACPI_AML_SIZE_SMALL(struct aml_resource_end_tag),
-
-	/* Large descriptors */
-
-	0,
-	ACPI_AML_SIZE_LARGE(struct aml_resource_memory24),
-	ACPI_AML_SIZE_LARGE(struct aml_resource_generic_register),
-	0,
-	ACPI_AML_SIZE_LARGE(struct aml_resource_vendor_large),
-	ACPI_AML_SIZE_LARGE(struct aml_resource_memory32),
-	ACPI_AML_SIZE_LARGE(struct aml_resource_fixed_memory32),
-	ACPI_AML_SIZE_LARGE(struct aml_resource_address32),
-	ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
-	ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
-	ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
-	ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64)
-};
-
-/*
- * Resource types, used to validate the resource length field.
- * The length of fixed-length types must match exactly, variable
- * lengths must meet the minimum required length, etc.
- * Zero indicates a reserved (and therefore invalid) resource type.
- */
-static const u8 acpi_gbl_resource_types[] = {
-	/* Small descriptors */
-
-	0,
-	0,
-	0,
-	0,
-	ACPI_SMALL_VARIABLE_LENGTH,
-	ACPI_FIXED_LENGTH,
-	ACPI_SMALL_VARIABLE_LENGTH,
-	ACPI_FIXED_LENGTH,
-	ACPI_FIXED_LENGTH,
-	ACPI_FIXED_LENGTH,
-	0,
-	0,
-	0,
-	0,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_FIXED_LENGTH,
-
-	/* Large descriptors */
-
-	0,
-	ACPI_FIXED_LENGTH,
-	ACPI_FIXED_LENGTH,
-	0,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_FIXED_LENGTH,
-	ACPI_FIXED_LENGTH,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_VARIABLE_LENGTH,
-	ACPI_FIXED_LENGTH
-};
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_walk_aml_resources
- *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource template
- *              aml_length      - Length of the entire template
- *              user_function   - Called once for each descriptor found. If
- *                                NULL, a pointer to the end_tag is returned
- *              Context         - Passed to user_function
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Walk a raw AML resource list(buffer). User function called
- *              once for each resource found.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_walk_aml_resources(u8 * aml,
-			   acpi_size aml_length,
-			   acpi_walk_aml_callback user_function, void **context)
-{
-	acpi_status status;
-	u8 *end_aml;
-	u8 resource_index;
-	u32 length;
-	u32 offset = 0;
-
-	ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
-
-	/* The absolute minimum resource template is one end_tag descriptor */
-
-	if (aml_length < sizeof(struct aml_resource_end_tag)) {
-		return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
-	}
-
-	/* Point to the end of the resource template buffer */
-
-	end_aml = aml + aml_length;
-
-	/* Walk the byte list, abort on any invalid descriptor type or length */
-
-	while (aml < end_aml) {
-
-		/* Validate the Resource Type and Resource Length */
-
-		status = acpi_ut_validate_resource(aml, &resource_index);
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-
-		/* Get the length of this descriptor */
-
-		length = acpi_ut_get_descriptor_length(aml);
-
-		/* Invoke the user function */
-
-		if (user_function) {
-			status =
-			    user_function(aml, length, offset, resource_index,
-					  context);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
-		}
-
-		/* An end_tag descriptor terminates this resource template */
-
-		if (acpi_ut_get_resource_type(aml) ==
-		    ACPI_RESOURCE_NAME_END_TAG) {
-			/*
-			 * There must be at least one more byte in the buffer for
-			 * the 2nd byte of the end_tag
-			 */
-			if ((aml + 1) >= end_aml) {
-				return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
-			}
-
-			/* Return the pointer to the end_tag if requested */
-
-			if (!user_function) {
-				*context = aml;
-			}
-
-			/* Normal exit */
-
-			return_ACPI_STATUS(AE_OK);
-		}
-
-		aml += length;
-		offset += length;
-	}
-
-	/* Did not find an end_tag descriptor */
-
-	return (AE_AML_NO_RESOURCE_END_TAG);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_validate_resource
- *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
- *              return_index    - Where the resource index is returned. NULL
- *                                if the index is not required.
- *
- * RETURN:      Status, and optionally the Index into the global resource tables
- *
- * DESCRIPTION: Validate an AML resource descriptor by checking the Resource
- *              Type and Resource Length. Returns an index into the global
- *              resource information/dispatch tables for later use.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
-{
-	u8 resource_type;
-	u8 resource_index;
-	acpi_rs_length resource_length;
-	acpi_rs_length minimum_resource_length;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * 1) Validate the resource_type field (Byte 0)
-	 */
-	resource_type = ACPI_GET8(aml);
-
-	/*
-	 * Byte 0 contains the descriptor name (Resource Type)
-	 * Examine the large/small bit in the resource header
-	 */
-	if (resource_type & ACPI_RESOURCE_NAME_LARGE) {
-
-		/* Verify the large resource type (name) against the max */
-
-		if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
-			return (AE_AML_INVALID_RESOURCE_TYPE);
-		}
-
-		/*
-		 * Large Resource Type -- bits 6:0 contain the name
-		 * Translate range 0x80-0x8B to index range 0x10-0x1B
-		 */
-		resource_index = (u8) (resource_type - 0x70);
-	} else {
-		/*
-		 * Small Resource Type -- bits 6:3 contain the name
-		 * Shift range to index range 0x00-0x0F
-		 */
-		resource_index = (u8)
-		    ((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
-	}
-
-	/* Check validity of the resource type, zero indicates name is invalid */
-
-	if (!acpi_gbl_resource_types[resource_index]) {
-		return (AE_AML_INVALID_RESOURCE_TYPE);
-	}
-
-	/*
-	 * 2) Validate the resource_length field. This ensures that the length
-	 *    is at least reasonable, and guarantees that it is non-zero.
-	 */
-	resource_length = acpi_ut_get_resource_length(aml);
-	minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
-
-	/* Validate based upon the type of resource - fixed length or variable */
-
-	switch (acpi_gbl_resource_types[resource_index]) {
-	case ACPI_FIXED_LENGTH:
-
-		/* Fixed length resource, length must match exactly */
-
-		if (resource_length != minimum_resource_length) {
-			return (AE_AML_BAD_RESOURCE_LENGTH);
-		}
-		break;
-
-	case ACPI_VARIABLE_LENGTH:
-
-		/* Variable length resource, length must be at least the minimum */
-
-		if (resource_length < minimum_resource_length) {
-			return (AE_AML_BAD_RESOURCE_LENGTH);
-		}
-		break;
-
-	case ACPI_SMALL_VARIABLE_LENGTH:
-
-		/* Small variable length resource, length can be (Min) or (Min-1) */
-
-		if ((resource_length > minimum_resource_length) ||
-		    (resource_length < (minimum_resource_length - 1))) {
-			return (AE_AML_BAD_RESOURCE_LENGTH);
-		}
-		break;
-
-	default:
-
-		/* Shouldn't happen (because of validation earlier), but be sure */
-
-		return (AE_AML_INVALID_RESOURCE_TYPE);
-	}
-
-	/* Optionally return the resource table index */
-
-	if (return_index) {
-		*return_index = resource_index;
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_resource_type
- *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
- *
- * RETURN:      The Resource Type with no extraneous bits (except the
- *              Large/Small descriptor bit -- this is left alone)
- *
- * DESCRIPTION: Extract the Resource Type/Name from the first byte of
- *              a resource descriptor.
- *
- ******************************************************************************/
-
-u8 acpi_ut_get_resource_type(void *aml)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * Byte 0 contains the descriptor name (Resource Type)
-	 * Examine the large/small bit in the resource header
-	 */
-	if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
-
-		/* Large Resource Type -- bits 6:0 contain the name */
-
-		return (ACPI_GET8(aml));
-	} else {
-		/* Small Resource Type -- bits 6:3 contain the name */
-
-		return ((u8) (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_SMALL_MASK));
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_resource_length
- *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
- *
- * RETURN:      Byte Length
- *
- * DESCRIPTION: Get the "Resource Length" of a raw AML descriptor. By
- *              definition, this does not include the size of the descriptor
- *              header or the length field itself.
- *
- ******************************************************************************/
-
-u16 acpi_ut_get_resource_length(void *aml)
-{
-	acpi_rs_length resource_length;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * Byte 0 contains the descriptor name (Resource Type)
-	 * Examine the large/small bit in the resource header
-	 */
-	if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
-
-		/* Large Resource type -- bytes 1-2 contain the 16-bit length */
-
-		ACPI_MOVE_16_TO_16(&resource_length, ACPI_ADD_PTR(u8, aml, 1));
-
-	} else {
-		/* Small Resource type -- bits 2:0 of byte 0 contain the length */
-
-		resource_length = (u16) (ACPI_GET8(aml) &
-					 ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK);
-	}
-
-	return (resource_length);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_resource_header_length
- *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
- *
- * RETURN:      Length of the AML header (depends on large/small descriptor)
- *
- * DESCRIPTION: Get the length of the header for this resource.
- *
- ******************************************************************************/
-
-u8 acpi_ut_get_resource_header_length(void *aml)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	/* Examine the large/small bit in the resource header */
-
-	if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
-		return (sizeof(struct aml_resource_large_header));
-	} else {
-		return (sizeof(struct aml_resource_small_header));
-	}
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_descriptor_length
- *
- * PARAMETERS:  Aml             - Pointer to the raw AML resource descriptor
- *
- * RETURN:      Byte length
- *
- * DESCRIPTION: Get the total byte length of a raw AML descriptor, including the
- *              length of the descriptor header and the length field itself.
- *              Used to walk descriptor lists.
- *
- ******************************************************************************/
-
-u32 acpi_ut_get_descriptor_length(void *aml)
-{
-	ACPI_FUNCTION_ENTRY();
-
-	/*
-	 * Get the Resource Length (does not include header length) and add
-	 * the header length (depends on if this is a small or large resource)
-	 */
-	return (acpi_ut_get_resource_length(aml) +
-		acpi_ut_get_resource_header_length(aml));
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_get_resource_end_tag
- *
- * PARAMETERS:  obj_desc        - The resource template buffer object
- *              end_tag         - Where the pointer to the end_tag is returned
- *
- * RETURN:      Status, pointer to the end tag
- *
- * DESCRIPTION: Find the end_tag resource descriptor in an AML resource template
- *              Note: allows a buffer length of zero.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc,
-			     u8 ** end_tag)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ut_get_resource_end_tag);
-
-	/* Allow a buffer length of zero */
-
-	if (!obj_desc->buffer.length) {
-		*end_tag = obj_desc->buffer.pointer;
-		return_ACPI_STATUS(AE_OK);
-	}
-
-	/* Validate the template and get a pointer to the end_tag */
-
-	status = acpi_ut_walk_aml_resources(obj_desc->buffer.pointer,
-					    obj_desc->buffer.length, NULL,
-					    (void **)end_tag);
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/utilities/utstate.c b/drivers/acpi/utilities/utstate.c
deleted file mode 100644
index 63a6d3d..0000000
--- a/drivers/acpi/utilities/utstate.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*******************************************************************************
- *
- * Module Name: utstate - state object support procedures
- *
- ******************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utstate")
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_pkg_state_and_push
- *
- * PARAMETERS:  Object          - Object to be added to the new state
- *              Action          - Increment/Decrement
- *              state_list      - List the state will be added to
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new state and push it
- *
- ******************************************************************************/
-acpi_status
-acpi_ut_create_pkg_state_and_push(void *internal_object,
-				  void *external_object,
-				  u16 index,
-				  union acpi_generic_state **state_list)
-{
-	union acpi_generic_state *state;
-
-	ACPI_FUNCTION_ENTRY();
-
-	state =
-	    acpi_ut_create_pkg_state(internal_object, external_object, index);
-	if (!state) {
-		return (AE_NO_MEMORY);
-	}
-
-	acpi_ut_push_generic_state(state_list, state);
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_push_generic_state
- *
- * PARAMETERS:  list_head           - Head of the state stack
- *              State               - State object to push
- *
- * RETURN:      None
- *
- * DESCRIPTION: Push a state object onto a state stack
- *
- ******************************************************************************/
-
-void
-acpi_ut_push_generic_state(union acpi_generic_state **list_head,
-			   union acpi_generic_state *state)
-{
-	ACPI_FUNCTION_TRACE(ut_push_generic_state);
-
-	/* Push the state object onto the front of the list (stack) */
-
-	state->common.next = *list_head;
-	*list_head = state;
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_pop_generic_state
- *
- * PARAMETERS:  list_head           - Head of the state stack
- *
- * RETURN:      The popped state object
- *
- * DESCRIPTION: Pop a state object from a state stack
- *
- ******************************************************************************/
-
-union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
-						    **list_head)
-{
-	union acpi_generic_state *state;
-
-	ACPI_FUNCTION_TRACE(ut_pop_generic_state);
-
-	/* Remove the state object at the head of the list (stack) */
-
-	state = *list_head;
-	if (state) {
-
-		/* Update the list head */
-
-		*list_head = state->common.next;
-	}
-
-	return_PTR(state);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_generic_state
- *
- * PARAMETERS:  None
- *
- * RETURN:      The new state object. NULL on failure.
- *
- * DESCRIPTION: Create a generic state object.  Attempt to obtain one from
- *              the global state cache;  If none available, create a new one.
- *
- ******************************************************************************/
-
-union acpi_generic_state *acpi_ut_create_generic_state(void)
-{
-	union acpi_generic_state *state;
-
-	ACPI_FUNCTION_ENTRY();
-
-	state = acpi_os_acquire_object(acpi_gbl_state_cache);
-	if (state) {
-
-		/* Initialize */
-		memset(state, 0, sizeof(union acpi_generic_state));
-		state->common.descriptor_type = ACPI_DESC_TYPE_STATE;
-	}
-
-	return (state);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_thread_state
- *
- * PARAMETERS:  None
- *
- * RETURN:      New Thread State. NULL on failure
- *
- * DESCRIPTION: Create a "Thread State" - a flavor of the generic state used
- *              to track per-thread info during method execution
- *
- ******************************************************************************/
-
-struct acpi_thread_state *acpi_ut_create_thread_state(void)
-{
-	union acpi_generic_state *state;
-
-	ACPI_FUNCTION_TRACE(ut_create_thread_state);
-
-	/* Create the generic state object */
-
-	state = acpi_ut_create_generic_state();
-	if (!state) {
-		return_PTR(NULL);
-	}
-
-	/* Init fields specific to the update struct */
-
-	state->common.descriptor_type = ACPI_DESC_TYPE_STATE_THREAD;
-	state->thread.thread_id = acpi_os_get_thread_id();
-
-	/* Check for invalid thread ID - zero is very bad, it will break things */
-
-	if (!state->thread.thread_id) {
-		ACPI_ERROR((AE_INFO, "Invalid zero ID from AcpiOsGetThreadId"));
-		state->thread.thread_id = (acpi_thread_id) 1;
-	}
-
-	return_PTR((struct acpi_thread_state *)state);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_update_state
- *
- * PARAMETERS:  Object          - Initial Object to be installed in the state
- *              Action          - Update action to be performed
- *
- * RETURN:      New state object, null on failure
- *
- * DESCRIPTION: Create an "Update State" - a flavor of the generic state used
- *              to update reference counts and delete complex objects such
- *              as packages.
- *
- ******************************************************************************/
-
-union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
-						      *object, u16 action)
-{
-	union acpi_generic_state *state;
-
-	ACPI_FUNCTION_TRACE_PTR(ut_create_update_state, object);
-
-	/* Create the generic state object */
-
-	state = acpi_ut_create_generic_state();
-	if (!state) {
-		return_PTR(NULL);
-	}
-
-	/* Init fields specific to the update struct */
-
-	state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE;
-	state->update.object = object;
-	state->update.value = action;
-
-	return_PTR(state);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_pkg_state
- *
- * PARAMETERS:  Object          - Initial Object to be installed in the state
- *              Action          - Update action to be performed
- *
- * RETURN:      New state object, null on failure
- *
- * DESCRIPTION: Create a "Package State"
- *
- ******************************************************************************/
-
-union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
-						   void *external_object,
-						   u16 index)
-{
-	union acpi_generic_state *state;
-
-	ACPI_FUNCTION_TRACE_PTR(ut_create_pkg_state, internal_object);
-
-	/* Create the generic state object */
-
-	state = acpi_ut_create_generic_state();
-	if (!state) {
-		return_PTR(NULL);
-	}
-
-	/* Init fields specific to the update struct */
-
-	state->common.descriptor_type = ACPI_DESC_TYPE_STATE_PACKAGE;
-	state->pkg.source_object = (union acpi_operand_object *)internal_object;
-	state->pkg.dest_object = external_object;
-	state->pkg.index = index;
-	state->pkg.num_packages = 1;
-
-	return_PTR(state);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_create_control_state
- *
- * PARAMETERS:  None
- *
- * RETURN:      New state object, null on failure
- *
- * DESCRIPTION: Create a "Control State" - a flavor of the generic state used
- *              to support nested IF/WHILE constructs in the AML.
- *
- ******************************************************************************/
-
-union acpi_generic_state *acpi_ut_create_control_state(void)
-{
-	union acpi_generic_state *state;
-
-	ACPI_FUNCTION_TRACE(ut_create_control_state);
-
-	/* Create the generic state object */
-
-	state = acpi_ut_create_generic_state();
-	if (!state) {
-		return_PTR(NULL);
-	}
-
-	/* Init fields specific to the control struct */
-
-	state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL;
-	state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING;
-
-	return_PTR(state);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_delete_generic_state
- *
- * PARAMETERS:  State               - The state object to be deleted
- *
- * RETURN:      None
- *
- * DESCRIPTION: Release a state object to the state cache. NULL state objects
- *              are ignored.
- *
- ******************************************************************************/
-
-void acpi_ut_delete_generic_state(union acpi_generic_state *state)
-{
-	ACPI_FUNCTION_TRACE(ut_delete_generic_state);
-
-	/* Ignore null state */
-
-	if (state) {
-		(void)acpi_os_release_object(acpi_gbl_state_cache, state);
-	}
-	return_VOID;
-}
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
deleted file mode 100644
index c198a4d..0000000
--- a/drivers/acpi/utilities/utxface.c
+++ /dev/null
@@ -1,500 +0,0 @@
-/******************************************************************************
- *
- * Module Name: utxface - External interfaces for "global" ACPI functions
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#include <acpi/acpi.h>
-#include <acpi/acevents.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acdebug.h>
-
-#define _COMPONENT          ACPI_UTILITIES
-ACPI_MODULE_NAME("utxface")
-
-#ifndef ACPI_ASL_COMPILER
-/*******************************************************************************
- *
- * FUNCTION:    acpi_initialize_subsystem
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Initializes all global variables.  This is the first function
- *              called, so any early initialization belongs here.
- *
- ******************************************************************************/
-acpi_status __init acpi_initialize_subsystem(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
-
-	acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
-	ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
-
-	/* Initialize the OS-Dependent layer */
-
-	status = acpi_os_initialize();
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization"));
-		return_ACPI_STATUS(status);
-	}
-
-	/* Initialize all globals used by the subsystem */
-
-	status = acpi_ut_init_globals();
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"During initialization of globals"));
-		return_ACPI_STATUS(status);
-	}
-
-	/* Create the default mutex objects */
-
-	status = acpi_ut_mutex_initialize();
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"During Global Mutex creation"));
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Initialize the namespace manager and
-	 * the root of the namespace tree
-	 */
-	status = acpi_ns_root_initialize();
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"During Namespace initialization"));
-		return_ACPI_STATUS(status);
-	}
-
-	/* If configured, initialize the AML debugger */
-
-	ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enable_subsystem
- *
- * PARAMETERS:  Flags           - Init/enable Options
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Completes the subsystem initialization including hardware.
- *              Puts system into ACPI mode if it isn't already.
- *
- ******************************************************************************/
-acpi_status acpi_enable_subsystem(u32 flags)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
-
-	/* Enable ACPI mode */
-
-	if (!(flags & ACPI_NO_ACPI_ENABLE)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "[Init] Going into ACPI mode\n"));
-
-		acpi_gbl_original_mode = acpi_hw_get_mode();
-
-		status = acpi_enable();
-		if (ACPI_FAILURE(status)) {
-			ACPI_WARNING((AE_INFO, "AcpiEnable failed"));
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/*
-	 * Install the default op_region handlers. These are installed unless
-	 * other handlers have already been installed via the
-	 * install_address_space_handler interface.
-	 */
-	if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "[Init] Installing default address space handlers\n"));
-
-		status = acpi_ev_install_region_handlers();
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/*
-	 * Initialize ACPI Event handling (Fixed and General Purpose)
-	 *
-	 * Note1: We must have the hardware and events initialized before we can
-	 * execute any control methods safely. Any control method can require
-	 * ACPI hardware support, so the hardware must be fully initialized before
-	 * any method execution!
-	 *
-	 * Note2: Fixed events are initialized and enabled here. GPEs are
-	 * initialized, but cannot be enabled until after the hardware is
-	 * completely initialized (SCI and global_lock activated)
-	 */
-	if (!(flags & ACPI_NO_EVENT_INIT)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "[Init] Initializing ACPI events\n"));
-
-		status = acpi_ev_initialize_events();
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/*
-	 * Install the SCI handler and Global Lock handler. This completes the
-	 * hardware initialization.
-	 */
-	if (!(flags & ACPI_NO_HANDLER_INIT)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "[Init] Installing SCI/GL handlers\n"));
-
-		status = acpi_ev_install_xrupt_handlers();
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_initialize_objects
- *
- * PARAMETERS:  Flags           - Init/enable Options
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Completes namespace initialization by initializing device
- *              objects and executing AML code for Regions, buffers, etc.
- *
- ******************************************************************************/
-acpi_status acpi_initialize_objects(u32 flags)
-{
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE(acpi_initialize_objects);
-
-	/*
-	 * Run all _REG methods
-	 *
-	 * Note: Any objects accessed by the _REG methods will be automatically
-	 * initialized, even if they contain executable AML (see the call to
-	 * acpi_ns_initialize_objects below).
-	 */
-	if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "[Init] Executing _REG OpRegion methods\n"));
-
-		status = acpi_ev_initialize_op_regions();
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/*
-	 * Initialize the objects that remain uninitialized. This runs the
-	 * executable AML that may be part of the declaration of these objects:
-	 * operation_regions, buffer_fields, Buffers, and Packages.
-	 */
-	if (!(flags & ACPI_NO_OBJECT_INIT)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "[Init] Completing Initialization of ACPI Objects\n"));
-
-		status = acpi_ns_initialize_objects();
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/*
-	 * Initialize all device objects in the namespace. This runs the device
-	 * _STA and _INI methods.
-	 */
-	if (!(flags & ACPI_NO_DEVICE_INIT)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "[Init] Initializing ACPI Devices\n"));
-
-		status = acpi_ns_initialize_devices();
-		if (ACPI_FAILURE(status)) {
-			return_ACPI_STATUS(status);
-		}
-	}
-
-	/*
-	 * Complete the GPE initialization for the GPE blocks defined in the FADT
-	 * (GPE block 0 and 1).
-	 *
-	 * Note1: This is where the _PRW methods are executed for the GPEs. These
-	 * methods can only be executed after the SCI and Global Lock handlers are
-	 * installed and initialized.
-	 *
-	 * Note2: Currently, there seems to be no need to run the _REG methods
-	 * before execution of the _PRW methods and enabling of the GPEs.
-	 */
-	if (!(flags & ACPI_NO_EVENT_INIT)) {
-		status = acpi_ev_install_fadt_gpes();
-		if (ACPI_FAILURE(status))
-			return (status);
-	}
-
-	/*
-	 * Empty the caches (delete the cached objects) on the assumption that
-	 * the table load filled them up more than they will be at runtime --
-	 * thus wasting non-paged memory.
-	 */
-	status = acpi_purge_cached_objects();
-
-	acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
-
-#endif
-/*******************************************************************************
- *
- * FUNCTION:    acpi_terminate
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Shutdown the ACPI subsystem.  Release all resources.
- *
- ******************************************************************************/
-acpi_status acpi_terminate(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_terminate);
-
-	/* Terminate the AML Debugger if present */
-
-	ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = TRUE);
-
-	/* Shutdown and free all resources */
-
-	acpi_ut_subsystem_shutdown();
-
-	/* Free the mutex objects */
-
-	acpi_ut_mutex_terminate();
-
-#ifdef ACPI_DEBUGGER
-
-	/* Shut down the debugger */
-
-	acpi_db_terminate();
-#endif
-
-	/* Now we can shutdown the OS-dependent layer */
-
-	status = acpi_os_terminate();
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_terminate)
-#ifndef ACPI_ASL_COMPILER
-#ifdef ACPI_FUTURE_USAGE
-/*******************************************************************************
- *
- * FUNCTION:    acpi_subsystem_status
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status of the ACPI subsystem
- *
- * DESCRIPTION: Other drivers that use the ACPI subsystem should call this
- *              before making any other calls, to ensure the subsystem
- *              initialized successfully.
- *
- ******************************************************************************/
-acpi_status acpi_subsystem_status(void)
-{
-
-	if (acpi_gbl_startup_flags & ACPI_INITIALIZED_OK) {
-		return (AE_OK);
-	} else {
-		return (AE_ERROR);
-	}
-}
-
-ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_system_info
- *
- * PARAMETERS:  out_buffer      - A buffer to receive the resources for the
- *                                device
- *
- * RETURN:      Status          - the status of the call
- *
- * DESCRIPTION: This function is called to get information about the current
- *              state of the ACPI subsystem.  It will return system information
- *              in the out_buffer.
- *
- *              If the function fails an appropriate status will be returned
- *              and the value of out_buffer is undefined.
- *
- ******************************************************************************/
-acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
-{
-	struct acpi_system_info *info_ptr;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_get_system_info);
-
-	/* Parameter validation */
-
-	status = acpi_ut_validate_buffer(out_buffer);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/* Validate/Allocate/Clear caller buffer */
-
-	status =
-	    acpi_ut_initialize_buffer(out_buffer,
-				      sizeof(struct acpi_system_info));
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	/*
-	 * Populate the return buffer
-	 */
-	info_ptr = (struct acpi_system_info *)out_buffer->pointer;
-
-	info_ptr->acpi_ca_version = ACPI_CA_VERSION;
-
-	/* System flags (ACPI capabilities) */
-
-	info_ptr->flags = ACPI_SYS_MODE_ACPI;
-
-	/* Timer resolution - 24 or 32 bits  */
-
-	if (acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) {
-		info_ptr->timer_resolution = 24;
-	} else {
-		info_ptr->timer_resolution = 32;
-	}
-
-	/* Clear the reserved fields */
-
-	info_ptr->reserved1 = 0;
-	info_ptr->reserved2 = 0;
-
-	/* Current debug levels */
-
-	info_ptr->debug_layer = acpi_dbg_layer;
-	info_ptr->debug_level = acpi_dbg_level;
-
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_system_info)
-
-/*****************************************************************************
- *
- * FUNCTION:    acpi_install_initialization_handler
- *
- * PARAMETERS:  Handler             - Callback procedure
- *              Function            - Not (currently) used, see below
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Install an initialization handler
- *
- * TBD: When a second function is added, must save the Function also.
- *
- ****************************************************************************/
-acpi_status
-acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
-{
-
-	if (!handler) {
-		return (AE_BAD_PARAMETER);
-	}
-
-	if (acpi_gbl_init_handler) {
-		return (AE_ALREADY_EXISTS);
-	}
-
-	acpi_gbl_init_handler = handler;
-	return AE_OK;
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
-#endif				/*  ACPI_FUTURE_USAGE  */
-/*****************************************************************************
- *
- * FUNCTION:    acpi_purge_cached_objects
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Empty all caches (delete the cached objects)
- *
- ****************************************************************************/
-acpi_status acpi_purge_cached_objects(void)
-{
-	ACPI_FUNCTION_TRACE(acpi_purge_cached_objects);
-
-	(void)acpi_os_purge_cache(acpi_gbl_state_cache);
-	(void)acpi_os_purge_cache(acpi_gbl_operand_cache);
-	(void)acpi_os_purge_cache(acpi_gbl_ps_node_cache);
-	(void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache);
-	return_ACPI_STATUS(AE_OK);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
-#endif
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index baa4419..f261737 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -36,6 +36,7 @@
 #include <linux/backlight.h>
 #include <linux/thermal.h>
 #include <linux/video_output.h>
+#include <linux/sort.h>
 #include <asm/uaccess.h>
 
 #include <acpi/acpi_bus.h>
@@ -481,6 +482,7 @@
 	int status = AE_OK;
 	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
 	struct acpi_object_list args = { 1, &arg0 };
+	int state;
 
 
 	arg0.integer.value = level;
@@ -489,6 +491,10 @@
 		status = acpi_evaluate_object(device->dev->handle, "_BCM",
 					      &args, NULL);
 	device->brightness->curr = level;
+	for (state = 2; state < device->brightness->count; state++)
+		if (level == device->brightness->levels[state])
+			device->backlight->props.brightness = state - 2;
+
 	return status;
 }
 
@@ -626,6 +632,16 @@
 }
 
 /*
+ * Simple comparison function used to sort backlight levels.
+ */
+
+static int
+acpi_video_cmp_level(const void *a, const void *b)
+{
+	return *(int *)a - *(int *)b;
+}
+
+/*
  *  Arg:	
  *  	device	: video output device (LCD, CRT, ..)
  *
@@ -676,6 +692,10 @@
 		count++;
 	}
 
+	/* don't sort the first two brightness levels */
+	sort(&br->levels[2], count - 2, sizeof(br->levels[2]),
+		acpi_video_cmp_level, NULL);
+
 	if (count < 2)
 		goto out_free_levels;
 
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index f022eb6..50e3d2d 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -234,7 +234,7 @@
  * To force that backlight or display output switching is processed by vendor
  * specific acpi drivers or video.ko driver.
  */
-int __init acpi_backlight(char *str)
+static int __init acpi_backlight(char *str)
 {
 	if (str == NULL || *str == '\0')
 		return 1;
@@ -250,7 +250,7 @@
 }
 __setup("acpi_backlight=", acpi_backlight);
 
-int __init acpi_display_output(char *str)
+static int __init acpi_display_output(char *str)
 {
 	if (str == NULL || *str == '\0')
 		return 1;
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
new file mode 100644
index 0000000..2d34806
--- /dev/null
+++ b/drivers/acpi/wakeup.c
@@ -0,0 +1,167 @@
+/*
+ * wakeup.c - support wakeup devices
+ * Copyright (C) 2004 Li Shaohua <shaohua.li@intel.com>
+ */
+
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include "sleep.h"
+
+#define _COMPONENT		ACPI_SYSTEM_COMPONENT
+ACPI_MODULE_NAME("wakeup_devices")
+
+extern struct list_head acpi_wakeup_device_list;
+extern spinlock_t acpi_device_lock;
+
+/**
+ * acpi_enable_wakeup_device_prep - prepare wakeup devices
+ *	@sleep_state:	ACPI state
+ * Enable all wakup devices power if the devices' wakeup level
+ * is higher than requested sleep level
+ */
+
+void acpi_enable_wakeup_device_prep(u8 sleep_state)
+{
+	struct list_head *node, *next;
+
+	spin_lock(&acpi_device_lock);
+	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+		struct acpi_device *dev = container_of(node,
+						       struct acpi_device,
+						       wakeup_list);
+
+		if (!dev->wakeup.flags.valid ||
+		    !dev->wakeup.state.enabled ||
+		    (sleep_state > (u32) dev->wakeup.sleep_state))
+			continue;
+
+		spin_unlock(&acpi_device_lock);
+		acpi_enable_wakeup_device_power(dev, sleep_state);
+		spin_lock(&acpi_device_lock);
+	}
+	spin_unlock(&acpi_device_lock);
+}
+
+/**
+ * acpi_enable_wakeup_device - enable wakeup devices
+ *	@sleep_state:	ACPI state
+ * Enable all wakup devices's GPE
+ */
+void acpi_enable_wakeup_device(u8 sleep_state)
+{
+	struct list_head *node, *next;
+
+	/* 
+	 * Caution: this routine must be invoked when interrupt is disabled 
+	 * Refer ACPI2.0: P212
+	 */
+	spin_lock(&acpi_device_lock);
+	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+		struct acpi_device *dev =
+			container_of(node, struct acpi_device, wakeup_list);
+
+		if (!dev->wakeup.flags.valid)
+			continue;
+
+		/* If users want to disable run-wake GPE,
+		 * we only disable it for wake and leave it for runtime
+		 */
+		if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
+		    || sleep_state > (u32) dev->wakeup.sleep_state) {
+			if (dev->wakeup.flags.run_wake) {
+				spin_unlock(&acpi_device_lock);
+				/* set_gpe_type will disable GPE, leave it like that */
+				acpi_set_gpe_type(dev->wakeup.gpe_device,
+						  dev->wakeup.gpe_number,
+						  ACPI_GPE_TYPE_RUNTIME);
+				spin_lock(&acpi_device_lock);
+			}
+			continue;
+		}
+		spin_unlock(&acpi_device_lock);
+		if (!dev->wakeup.flags.run_wake)
+			acpi_enable_gpe(dev->wakeup.gpe_device,
+					dev->wakeup.gpe_number);
+		spin_lock(&acpi_device_lock);
+	}
+	spin_unlock(&acpi_device_lock);
+}
+
+/**
+ * acpi_disable_wakeup_device - disable devices' wakeup capability
+ *	@sleep_state:	ACPI state
+ * Disable all wakup devices's GPE and wakeup capability
+ */
+void acpi_disable_wakeup_device(u8 sleep_state)
+{
+	struct list_head *node, *next;
+
+	spin_lock(&acpi_device_lock);
+	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+		struct acpi_device *dev =
+			container_of(node, struct acpi_device, wakeup_list);
+
+		if (!dev->wakeup.flags.valid)
+			continue;
+
+		if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
+		    || sleep_state > (u32) dev->wakeup.sleep_state) {
+			if (dev->wakeup.flags.run_wake) {
+				spin_unlock(&acpi_device_lock);
+				acpi_set_gpe_type(dev->wakeup.gpe_device,
+						  dev->wakeup.gpe_number,
+						  ACPI_GPE_TYPE_WAKE_RUN);
+				/* Re-enable it, since set_gpe_type will disable it */
+				acpi_enable_gpe(dev->wakeup.gpe_device,
+						dev->wakeup.gpe_number);
+				spin_lock(&acpi_device_lock);
+			}
+			continue;
+		}
+
+		spin_unlock(&acpi_device_lock);
+		acpi_disable_wakeup_device_power(dev);
+		/* Never disable run-wake GPE */
+		if (!dev->wakeup.flags.run_wake) {
+			acpi_disable_gpe(dev->wakeup.gpe_device,
+					 dev->wakeup.gpe_number);
+			acpi_clear_gpe(dev->wakeup.gpe_device,
+				       dev->wakeup.gpe_number, ACPI_NOT_ISR);
+		}
+		spin_lock(&acpi_device_lock);
+	}
+	spin_unlock(&acpi_device_lock);
+}
+
+static int __init acpi_wakeup_device_init(void)
+{
+	struct list_head *node, *next;
+
+	if (acpi_disabled)
+		return 0;
+
+	spin_lock(&acpi_device_lock);
+	list_for_each_safe(node, next, &acpi_wakeup_device_list) {
+		struct acpi_device *dev = container_of(node,
+						       struct acpi_device,
+						       wakeup_list);
+		/* In case user doesn't load button driver */
+		if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled)
+			continue;
+		spin_unlock(&acpi_device_lock);
+		acpi_set_gpe_type(dev->wakeup.gpe_device,
+				  dev->wakeup.gpe_number,
+				  ACPI_GPE_TYPE_WAKE_RUN);
+		acpi_enable_gpe(dev->wakeup.gpe_device,
+				dev->wakeup.gpe_number);
+		dev->wakeup.state.enabled = 1;
+		spin_lock(&acpi_device_lock);
+	}
+	spin_unlock(&acpi_device_lock);
+	return 0;
+}
+
+late_initcall(acpi_wakeup_device_init);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 6b94fb7..00c46e0 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -12,9 +12,10 @@
 #include <linux/device.h>
 #include <linux/string.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 #include <linux/amba/bus.h>
 
-#include <asm/io.h>
+#include <asm/irq.h>
 #include <asm/sizes.h>
 
 #define to_amba_device(d)	container_of(d, struct amba_device, dev)
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 1a7be96..503a908 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -698,6 +698,15 @@
 
 	  If unsure, say N.
 
+config PATA_OCTEON_CF
+	tristate "OCTEON Boot Bus Compact Flash support"
+	depends on CPU_CAVIUM_OCTEON
+	help
+	  This option enables a polled compact flash driver for use with
+	  compact flash cards attached to the OCTEON boot bus.
+
+	  If unsure, say N.
+
 config PATA_SCC
 	tristate "Toshiba's Cell Reference Set IDE support"
 	depends on PCI && PPC_CELLEB
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 674965f..7f1ecf9 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -69,6 +69,7 @@
 obj-$(CONFIG_PATA_SCC)		+= pata_scc.o
 obj-$(CONFIG_PATA_SCH)		+= pata_sch.o
 obj-$(CONFIG_PATA_BF54X)	+= pata_bf54x.o
+obj-$(CONFIG_PATA_OCTEON_CF)	+= pata_octeon_cf.o
 obj-$(CONFIG_PATA_PLATFORM)	+= pata_platform.o
 obj-$(CONFIG_PATA_OF_PLATFORM)	+= pata_of_platform.o
 obj-$(CONFIG_PATA_ICSIDE)	+= pata_icside.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 656448c..9603967 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -105,7 +105,7 @@
 	board_ahci_ign_iferr	= 2,
 	board_ahci_sb600	= 3,
 	board_ahci_mv		= 4,
-	board_ahci_sb700	= 5,
+	board_ahci_sb700	= 5, /* for SB700 and SB800 */
 	board_ahci_mcp65	= 6,
 	board_ahci_nopmp	= 7,
 
@@ -439,7 +439,7 @@
 		.udma_mask	= ATA_UDMA6,
 		.port_ops	= &ahci_ops,
 	},
-	/* board_ahci_sb700 */
+	/* board_ahci_sb700, for SB700 and SB800 */
 	{
 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL),
 		.flags		= AHCI_FLAG_COMMON,
@@ -2446,6 +2446,8 @@
 		speed_s = "1.5";
 	else if (speed == 2)
 		speed_s = "3";
+	else if (speed == 3)
+		speed_s = "6";
 	else
 		speed_s = "?";
 
@@ -2610,6 +2612,10 @@
 	    (pdev->revision == 0xa1 || pdev->revision == 0xa2))
 		hpriv->flags |= AHCI_HFLAG_NO_MSI;
 
+	/* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
+	if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
+		hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
+
 	if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
 		pci_intx(pdev, 1);
 
@@ -2654,6 +2660,9 @@
 	host->iomap = pcim_iomap_table(pdev);
 	host->private_data = hpriv;
 
+	if (!(hpriv->cap & HOST_CAP_SSS))
+		host->flags |= ATA_HOST_PARALLEL_SCAN;
+
 	if (pi.flags & ATA_FLAG_EM)
 		ahci_reset_em(host);
 
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 5fdf167..887d8f4 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -154,11 +154,13 @@
 
 struct piix_host_priv {
 	const int *map;
+	u32 saved_iocfg;
 	void __iomem *sidpr;
 };
 
 static int piix_init_one(struct pci_dev *pdev,
 			 const struct pci_device_id *ent);
+static void piix_remove_one(struct pci_dev *pdev);
 static int piix_pata_prereset(struct ata_link *link, unsigned long deadline);
 static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev);
 static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
@@ -296,7 +298,7 @@
 	.name			= DRV_NAME,
 	.id_table		= piix_pci_tbl,
 	.probe			= piix_init_one,
-	.remove			= ata_pci_remove_one,
+	.remove			= piix_remove_one,
 #ifdef CONFIG_PM
 	.suspend		= piix_pci_device_suspend,
 	.resume			= piix_pci_device_resume,
@@ -308,7 +310,7 @@
 };
 
 static struct ata_port_operations piix_pata_ops = {
-	.inherits		= &ata_bmdma_port_ops,
+	.inherits		= &ata_bmdma32_port_ops,
 	.cable_detect		= ata_cable_40wire,
 	.set_piomode		= piix_set_piomode,
 	.set_dmamode		= piix_set_dmamode,
@@ -610,8 +612,9 @@
 static int ich_pata_cable_detect(struct ata_port *ap)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	struct piix_host_priv *hpriv = ap->host->private_data;
 	const struct ich_laptop *lap = &ich_laptop[0];
-	u8 tmp, mask;
+	u8 mask;
 
 	/* Check for specials - Acer Aspire 5602WLMi */
 	while (lap->device) {
@@ -625,8 +628,7 @@
 
 	/* check BIOS cable detect results */
 	mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
-	pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
-	if ((tmp & mask) == 0)
+	if ((hpriv->saved_iocfg & mask) == 0)
 		return ATA_CBL_PATA40;
 	return ATA_CBL_PATA80;
 }
@@ -1350,7 +1352,7 @@
 	return 0;
 }
 
-static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
+static void piix_iocfg_bit18_quirk(struct ata_host *host)
 {
 	static const struct dmi_system_id sysids[] = {
 		{
@@ -1367,7 +1369,8 @@
 
 		{ }	/* terminate list */
 	};
-	u32 iocfg;
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	struct piix_host_priv *hpriv = host->private_data;
 
 	if (!dmi_check_system(sysids))
 		return;
@@ -1376,12 +1379,11 @@
 	 * seem to use it to disable a channel.  Clear the bit on the
 	 * affected systems.
 	 */
-	pci_read_config_dword(pdev, PIIX_IOCFG, &iocfg);
-	if (iocfg & (1 << 18)) {
+	if (hpriv->saved_iocfg & (1 << 18)) {
 		dev_printk(KERN_INFO, &pdev->dev,
 			   "applying IOCFG bit18 quirk\n");
-		iocfg &= ~(1 << 18);
-		pci_write_config_dword(pdev, PIIX_IOCFG, iocfg);
+		pci_write_config_dword(pdev, PIIX_IOCFG,
+				       hpriv->saved_iocfg & ~(1 << 18));
 	}
 }
 
@@ -1430,6 +1432,17 @@
 	if (rc)
 		return rc;
 
+	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+
+	/* Save IOCFG, this will be used for cable detection, quirk
+	 * detection and restoration on detach.  This is necessary
+	 * because some ACPI implementations mess up cable related
+	 * bits on _STM.  Reported on kernel bz#11879.
+	 */
+	pci_read_config_dword(pdev, PIIX_IOCFG, &hpriv->saved_iocfg);
+
 	/* ICH6R may be driven by either ata_piix or ahci driver
 	 * regardless of BIOS configuration.  Make sure AHCI mode is
 	 * off.
@@ -1441,10 +1454,6 @@
 	}
 
 	/* SATA map init can change port_info, do it before prepping host */
-	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
-	if (!hpriv)
-		return -ENOMEM;
-
 	if (port_flags & ATA_FLAG_SATA)
 		hpriv->map = piix_init_sata_map(pdev, port_info,
 					piix_map_db_table[ent->driver_data]);
@@ -1463,7 +1472,7 @@
 	}
 
 	/* apply IOCFG bit18 quirk */
-	piix_iocfg_bit18_quirk(pdev);
+	piix_iocfg_bit18_quirk(host);
 
 	/* On ICH5, some BIOSen disable the interrupt using the
 	 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
@@ -1488,6 +1497,16 @@
 	return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht);
 }
 
+static void piix_remove_one(struct pci_dev *pdev)
+{
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	struct piix_host_priv *hpriv = host->private_data;
+
+	pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg);
+
+	ata_pci_remove_one(pdev);
+}
+
 static int __init piix_init(void)
 {
 	int rc;
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index ef02e488..6273d98 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -19,12 +19,6 @@
 #include "libata.h"
 
 #include <acpi/acpi_bus.h>
-#include <acpi/acnames.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acparser.h>
-#include <acpi/acexcep.h>
-#include <acpi/acmacros.h>
-#include <acpi/actypes.h>
 
 enum {
 	ATA_ACPI_FILTER_SETXFER	= 1 << 0,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index fecca42..88c2428 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -56,6 +56,7 @@
 #include <linux/workqueue.h>
 #include <linux/scatterlist.h>
 #include <linux/io.h>
+#include <linux/async.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_host.h>
@@ -1006,6 +1007,7 @@
 	static const char * const spd_str[] = {
 		"1.5 Gbps",
 		"3.0 Gbps",
+		"6.0 Gbps",
 	};
 
 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
@@ -1999,6 +2001,10 @@
 	   as the caller should know this */
 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
 		return 0;
+	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
+	if (ata_id_is_cfa(adev->id)
+	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
+		return 0;
 	/* PIO3 and higher it is mandatory */
 	if (adev->pio_mode > XFER_PIO_2)
 		return 1;
@@ -3023,33 +3029,33 @@
  */
 
 static const struct ata_timing ata_timing[] = {
-/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
-	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
-	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
-	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
-	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
-	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
-	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
-	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
+/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
+	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
+	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
+	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
+	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
+	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
+	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
+	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
 
-	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
-	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
-	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
+	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
+	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
+	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
 
-	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
-	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
-	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
-	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
-	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
+	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
+	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
+	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
+	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
+	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
 
-/*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
-	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
-	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
-	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
-	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
-	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
-	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
-	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
+/*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
+	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
+	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
+	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
+	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
+	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
+	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
+	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
 
 	{ 0xFF }
 };
@@ -3059,14 +3065,15 @@
 
 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
 {
-	q->setup   = EZ(t->setup   * 1000,  T);
-	q->act8b   = EZ(t->act8b   * 1000,  T);
-	q->rec8b   = EZ(t->rec8b   * 1000,  T);
-	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
-	q->active  = EZ(t->active  * 1000,  T);
-	q->recover = EZ(t->recover * 1000,  T);
-	q->cycle   = EZ(t->cycle   * 1000,  T);
-	q->udma    = EZ(t->udma    * 1000, UT);
+	q->setup	= EZ(t->setup      * 1000,  T);
+	q->act8b	= EZ(t->act8b      * 1000,  T);
+	q->rec8b	= EZ(t->rec8b      * 1000,  T);
+	q->cyc8b	= EZ(t->cyc8b      * 1000,  T);
+	q->active	= EZ(t->active     * 1000,  T);
+	q->recover	= EZ(t->recover    * 1000,  T);
+	q->dmack_hold	= EZ(t->dmack_hold * 1000,  T);
+	q->cycle	= EZ(t->cycle      * 1000,  T);
+	q->udma		= EZ(t->udma       * 1000, UT);
 }
 
 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
@@ -3078,6 +3085,7 @@
 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
+	if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
 }
@@ -4550,7 +4558,7 @@
 	struct scatterlist *sg = qc->sg;
 	int dir = qc->dma_dir;
 
-	WARN_ON(sg == NULL);
+	WARN_ON_ONCE(sg == NULL);
 
 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
 
@@ -4770,7 +4778,7 @@
 	struct ata_port *ap = qc->ap;
 	unsigned int tag;
 
-	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
+	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
 
 	qc->flags = 0;
 	tag = qc->tag;
@@ -4785,8 +4793,8 @@
 	struct ata_port *ap = qc->ap;
 	struct ata_link *link = qc->dev->link;
 
-	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
-	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
+	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
 
 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
 		ata_sg_clean(qc);
@@ -4872,7 +4880,7 @@
 		struct ata_device *dev = qc->dev;
 		struct ata_eh_info *ehi = &dev->link->eh_info;
 
-		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
+		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
 
 		if (unlikely(qc->err_mask))
 			qc->flags |= ATA_QCFLAG_FAILED;
@@ -4994,16 +5002,16 @@
 	 * check is skipped for old EH because it reuses active qc to
 	 * request ATAPI sense.
 	 */
-	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
+	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
 
 	if (ata_is_ncq(prot)) {
-		WARN_ON(link->sactive & (1 << qc->tag));
+		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
 
 		if (!link->sactive)
 			ap->nr_active_links++;
 		link->sactive |= 1 << qc->tag;
 	} else {
-		WARN_ON(link->sactive);
+		WARN_ON_ONCE(link->sactive);
 
 		ap->nr_active_links++;
 		link->active_tag = qc->tag;
@@ -5909,6 +5917,65 @@
 	host->ops = ops;
 }
 
+
+static void async_port_probe(void *data, async_cookie_t cookie)
+{
+	int rc;
+	struct ata_port *ap = data;
+
+	/*
+	 * If we're not allowed to scan this host in parallel,
+	 * we need to wait until all previous scans have completed
+	 * before going further.
+	 * Jeff Garzik says this is only within a controller, so we
+	 * don't need to wait for port 0, only for later ports.
+	 */
+	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
+		async_synchronize_cookie(cookie);
+
+	/* probe */
+	if (ap->ops->error_handler) {
+		struct ata_eh_info *ehi = &ap->link.eh_info;
+		unsigned long flags;
+
+		ata_port_probe(ap);
+
+		/* kick EH for boot probing */
+		spin_lock_irqsave(ap->lock, flags);
+
+		ehi->probe_mask |= ATA_ALL_DEVICES;
+		ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
+		ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
+
+		ap->pflags &= ~ATA_PFLAG_INITIALIZING;
+		ap->pflags |= ATA_PFLAG_LOADING;
+		ata_port_schedule_eh(ap);
+
+		spin_unlock_irqrestore(ap->lock, flags);
+
+		/* wait for EH to finish */
+		ata_port_wait_eh(ap);
+	} else {
+		DPRINTK("ata%u: bus probe begin\n", ap->print_id);
+		rc = ata_bus_probe(ap);
+		DPRINTK("ata%u: bus probe end\n", ap->print_id);
+
+		if (rc) {
+			/* FIXME: do something useful here?
+			 * Current libata behavior will
+			 * tear down everything when
+			 * the module is removed
+			 * or the h/w is unplugged.
+			 */
+		}
+	}
+
+	/* in order to keep device order, we need to synchronize at this point */
+	async_synchronize_cookie(cookie);
+
+	ata_scsi_scan_host(ap, 1);
+
+}
 /**
  *	ata_host_register - register initialized ATA host
  *	@host: ATA host to register
@@ -5988,52 +6055,9 @@
 	DPRINTK("probe begin\n");
 	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap = host->ports[i];
-
-		/* probe */
-		if (ap->ops->error_handler) {
-			struct ata_eh_info *ehi = &ap->link.eh_info;
-			unsigned long flags;
-
-			ata_port_probe(ap);
-
-			/* kick EH for boot probing */
-			spin_lock_irqsave(ap->lock, flags);
-
-			ehi->probe_mask |= ATA_ALL_DEVICES;
-			ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
-			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
-
-			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
-			ap->pflags |= ATA_PFLAG_LOADING;
-			ata_port_schedule_eh(ap);
-
-			spin_unlock_irqrestore(ap->lock, flags);
-
-			/* wait for EH to finish */
-			ata_port_wait_eh(ap);
-		} else {
-			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
-			rc = ata_bus_probe(ap);
-			DPRINTK("ata%u: bus probe end\n", ap->print_id);
-
-			if (rc) {
-				/* FIXME: do something useful here?
-				 * Current libata behavior will
-				 * tear down everything when
-				 * the module is removed
-				 * or the h/w is unplugged.
-				 */
-			}
-		}
+		async_schedule(async_port_probe, ap);
 	}
-
-	/* probes are done, now scan each port's disk(s) */
-	DPRINTK("host probe begin\n");
-	for (i = 0; i < host->n_ports; i++) {
-		struct ata_port *ap = host->ports[i];
-
-		ata_scsi_scan_host(ap, 1);
-	}
+	DPRINTK("probe end\n");
 
 	return 0;
 }
@@ -6616,7 +6640,6 @@
 EXPORT_SYMBOL_GPL(ata_port_disable);
 EXPORT_SYMBOL_GPL(ata_ratelimit);
 EXPORT_SYMBOL_GPL(ata_wait_register);
-EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9e92107..a1a6e629 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -423,9 +423,9 @@
  *	RETURNS:
  *	Zero on success, negative errno on error.
  */
-static int ata_get_identity(struct scsi_device *sdev, void __user *arg)
+static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
+			    void __user *arg)
 {
-	struct ata_port *ap = ata_shost_to_port(sdev->host);
 	struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
 	u16 __user *dst = arg;
 	char buf[40];
@@ -645,7 +645,8 @@
 	return rc;
 }
 
-int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
+int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
+		     int cmd, void __user *arg)
 {
 	int val = -EINVAL, rc = -EINVAL;
 
@@ -663,7 +664,7 @@
 		return 0;
 
 	case HDIO_GET_IDENTITY:
-		return ata_get_identity(scsidev, arg);
+		return ata_get_identity(ap, scsidev, arg);
 
 	case HDIO_DRIVE_CMD:
 		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
@@ -682,6 +683,14 @@
 
 	return rc;
 }
+EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl);
+
+int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
+{
+	return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host),
+				scsidev, cmd, arg);
+}
+EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
 
 /**
  *	ata_scsi_qc_new - acquire new ata_queued_cmd reference
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 9033d16..5a4aad1 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -66,6 +66,7 @@
 
 	.port_start		= ata_sff_port_start,
 };
+EXPORT_SYMBOL_GPL(ata_sff_port_ops);
 
 const struct ata_port_operations ata_bmdma_port_ops = {
 	.inherits		= &ata_sff_port_ops,
@@ -77,6 +78,14 @@
 	.bmdma_stop		= ata_bmdma_stop,
 	.bmdma_status		= ata_bmdma_status,
 };
+EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
+
+const struct ata_port_operations ata_bmdma32_port_ops = {
+	.inherits		= &ata_bmdma_port_ops,
+
+	.sff_data_xfer		= ata_sff_data_xfer32,
+};
+EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
 
 /**
  *	ata_fill_sg - Fill PCI IDE PRD table
@@ -166,8 +175,9 @@
 			blen = len & 0xffff;
 			ap->prd[pi].addr = cpu_to_le32(addr);
 			if (blen == 0) {
-			   /* Some PATA chipsets like the CS5530 can't
-			      cope with 0x0000 meaning 64K as the spec says */
+				/* Some PATA chipsets like the CS5530 can't
+				   cope with 0x0000 meaning 64K as the spec
+				   says */
 				ap->prd[pi].flags_len = cpu_to_le32(0x8000);
 				blen = 0x8000;
 				ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
@@ -200,6 +210,7 @@
 
 	ata_fill_sg(qc);
 }
+EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
 
 /**
  *	ata_sff_dumb_qc_prep - Prepare taskfile for submission
@@ -217,6 +228,7 @@
 
 	ata_fill_sg_dumb(qc);
 }
+EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
 
 /**
  *	ata_sff_check_status - Read device status reg & clear interrupt
@@ -233,6 +245,7 @@
 {
 	return ioread8(ap->ioaddr.status_addr);
 }
+EXPORT_SYMBOL_GPL(ata_sff_check_status);
 
 /**
  *	ata_sff_altstatus - Read device alternate status reg
@@ -275,7 +288,7 @@
 		status = ata_sff_altstatus(ap);
 		/* Not us: We are busy */
 		if (status & ATA_BUSY)
-		    	return status;
+			return status;
 	}
 	/* Clear INTRQ latch */
 	status = ap->ops->sff_check_status(ap);
@@ -319,6 +332,7 @@
 	ata_sff_sync(ap);
 	ndelay(400);
 }
+EXPORT_SYMBOL_GPL(ata_sff_pause);
 
 /**
  *	ata_sff_dma_pause	-	Pause before commencing DMA
@@ -327,7 +341,7 @@
  *	Perform I/O fencing and ensure sufficient cycle delays occur
  *	for the HDMA1:0 transition
  */
- 
+
 void ata_sff_dma_pause(struct ata_port *ap)
 {
 	if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
@@ -341,6 +355,7 @@
 	   corruption. */
 	BUG();
 }
+EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
 
 /**
  *	ata_sff_busy_sleep - sleep until BSY clears, or timeout
@@ -396,6 +411,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
 
 static int ata_sff_check_ready(struct ata_link *link)
 {
@@ -422,6 +438,7 @@
 {
 	return ata_wait_ready(link, deadline, ata_sff_check_ready);
 }
+EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
 
 /**
  *	ata_sff_dev_select - Select device 0/1 on ATA bus
@@ -449,6 +466,7 @@
 	iowrite8(tmp, ap->ioaddr.device_addr);
 	ata_sff_pause(ap);	/* needed; also flushes, for mmio */
 }
+EXPORT_SYMBOL_GPL(ata_sff_dev_select);
 
 /**
  *	ata_dev_select - Select device 0/1 on ATA bus
@@ -513,6 +531,7 @@
 
 	return tmp;
 }
+EXPORT_SYMBOL_GPL(ata_sff_irq_on);
 
 /**
  *	ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
@@ -534,6 +553,7 @@
 
 	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
 }
+EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
 
 /**
  *	ata_sff_tf_load - send taskfile registers to host controller
@@ -558,7 +578,7 @@
 	}
 
 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
-		WARN_ON(!ioaddr->ctl_addr);
+		WARN_ON_ONCE(!ioaddr->ctl_addr);
 		iowrite8(tf->hob_feature, ioaddr->feature_addr);
 		iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
 		iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
@@ -593,6 +613,7 @@
 
 	ata_wait_idle(ap);
 }
+EXPORT_SYMBOL_GPL(ata_sff_tf_load);
 
 /**
  *	ata_sff_tf_read - input device's ATA taskfile shadow registers
@@ -630,9 +651,10 @@
 			iowrite8(tf->ctl, ioaddr->ctl_addr);
 			ap->last_ctl = tf->ctl;
 		} else
-			WARN_ON(1);
+			WARN_ON_ONCE(1);
 	}
 }
+EXPORT_SYMBOL_GPL(ata_sff_tf_read);
 
 /**
  *	ata_sff_exec_command - issue ATA command to host controller
@@ -652,6 +674,7 @@
 	iowrite8(tf->command, ap->ioaddr.command_addr);
 	ata_sff_pause(ap);
 }
+EXPORT_SYMBOL_GPL(ata_sff_exec_command);
 
 /**
  *	ata_tf_to_host - issue ATA taskfile to host controller
@@ -717,6 +740,53 @@
 
 	return words << 1;
 }
+EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
+
+/**
+ *	ata_sff_data_xfer32 - Transfer data by PIO
+ *	@dev: device to target
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@rw: read/write
+ *
+ *	Transfer data from/to the device data register by PIO using 32bit
+ *	I/O operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ *	RETURNS:
+ *	Bytes consumed.
+ */
+
+unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
+			       unsigned int buflen, int rw)
+{
+	struct ata_port *ap = dev->link->ap;
+	void __iomem *data_addr = ap->ioaddr.data_addr;
+	unsigned int words = buflen >> 2;
+	int slop = buflen & 3;
+
+	/* Transfer multiple of 4 bytes */
+	if (rw == READ)
+		ioread32_rep(data_addr, buf, words);
+	else
+		iowrite32_rep(data_addr, buf, words);
+
+	if (unlikely(slop)) {
+		__le32 pad;
+		if (rw == READ) {
+			pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
+			memcpy(buf + buflen - slop, &pad, slop);
+		} else {
+			memcpy(&pad, buf + buflen - slop, slop);
+			iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+		}
+		words++;
+	}
+	return words << 2;
+}
+EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
 
 /**
  *	ata_sff_data_xfer_noirq - Transfer data by PIO
@@ -746,6 +816,7 @@
 
 	return consumed;
 }
+EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
 
 /**
  *	ata_pio_sector - Transfer a sector of data.
@@ -820,7 +891,7 @@
 		/* READ/WRITE MULTIPLE */
 		unsigned int nsect;
 
-		WARN_ON(qc->dev->multi_count == 0);
+		WARN_ON_ONCE(qc->dev->multi_count == 0);
 
 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
 			    qc->dev->multi_count);
@@ -847,7 +918,7 @@
 {
 	/* send SCSI cdb */
 	DPRINTK("send cdb\n");
-	WARN_ON(qc->dev->cdb_len < 12);
+	WARN_ON_ONCE(qc->dev->cdb_len < 12);
 
 	ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
 	ata_sff_sync(ap);
@@ -922,13 +993,15 @@
 		buf = kmap_atomic(page, KM_IRQ0);
 
 		/* do the actual data transfer */
-		consumed = ap->ops->sff_data_xfer(dev,  buf + offset, count, rw);
+		consumed = ap->ops->sff_data_xfer(dev,  buf + offset,
+								count, rw);
 
 		kunmap_atomic(buf, KM_IRQ0);
 		local_irq_restore(flags);
 	} else {
 		buf = page_address(page);
-		consumed = ap->ops->sff_data_xfer(dev,  buf + offset, count, rw);
+		consumed = ap->ops->sff_data_xfer(dev,  buf + offset,
+								count, rw);
 	}
 
 	bytes -= min(bytes, consumed);
@@ -940,9 +1013,12 @@
 		qc->cursg_ofs = 0;
 	}
 
-	/* consumed can be larger than count only for the last transfer */
-	WARN_ON(qc->cursg && count != consumed);
-
+	/*
+	 * There used to be a  WARN_ON_ONCE(qc->cursg && count != consumed);
+	 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
+	 * check correctly as it doesn't know if it is the last request being
+	 * made. Somebody should implement a proper sanity check.
+	 */
 	if (bytes)
 		goto next_sg;
 	return 0;
@@ -1013,18 +1089,19 @@
  *	RETURNS:
  *	1 if ok in workqueue, 0 otherwise.
  */
-static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
+static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
+						struct ata_queued_cmd *qc)
 {
 	if (qc->tf.flags & ATA_TFLAG_POLLING)
 		return 1;
 
 	if (ap->hsm_task_state == HSM_ST_FIRST) {
 		if (qc->tf.protocol == ATA_PROT_PIO &&
-		    (qc->tf.flags & ATA_TFLAG_WRITE))
+		   (qc->tf.flags & ATA_TFLAG_WRITE))
 		    return 1;
 
 		if (ata_is_atapi(qc->tf.protocol) &&
-		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+		   !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
 			return 1;
 	}
 
@@ -1098,13 +1175,13 @@
 	unsigned long flags = 0;
 	int poll_next;
 
-	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
 
 	/* Make sure ata_sff_qc_issue() does not throw things
 	 * like DMA polling into the workqueue. Notice that
 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
 	 */
-	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
+	WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
 
 fsm_start:
 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
@@ -1313,7 +1390,7 @@
 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
 			ap->print_id, qc->dev->devno, status);
 
-		WARN_ON(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
+		WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
 
 		ap->hsm_task_state = HSM_ST_IDLE;
 
@@ -1338,6 +1415,7 @@
 
 	return poll_next;
 }
+EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
 
 void ata_pio_task(struct work_struct *work)
 {
@@ -1348,7 +1426,7 @@
 	int poll_next;
 
 fsm_start:
-	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
+	WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
 
 	/*
 	 * This is purely heuristic.  This is a fast path.
@@ -1437,7 +1515,7 @@
 		break;
 
 	case ATA_PROT_DMA:
-		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+		WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
 
 		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
@@ -1489,7 +1567,7 @@
 		break;
 
 	case ATAPI_PROT_DMA:
-		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+		WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
 
 		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
@@ -1501,12 +1579,13 @@
 		break;
 
 	default:
-		WARN_ON(1);
+		WARN_ON_ONCE(1);
 		return AC_ERR_SYSTEM;
 	}
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
 
 /**
  *	ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
@@ -1526,6 +1605,7 @@
 	qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
 	return true;
 }
+EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
 
 /**
  *	ata_sff_host_intr - Handle host interrupt for given (port, task)
@@ -1623,6 +1703,7 @@
 #endif
 	return 0;	/* irq not handled */
 }
+EXPORT_SYMBOL_GPL(ata_sff_host_intr);
 
 /**
  *	ata_sff_interrupt - Default ATA host interrupt handler
@@ -1667,6 +1748,7 @@
 
 	return IRQ_RETVAL(handled);
 }
+EXPORT_SYMBOL_GPL(ata_sff_interrupt);
 
 /**
  *	ata_sff_freeze - Freeze SFF controller port
@@ -1695,6 +1777,7 @@
 
 	ap->ops->sff_irq_clear(ap);
 }
+EXPORT_SYMBOL_GPL(ata_sff_freeze);
 
 /**
  *	ata_sff_thaw - Thaw SFF controller port
@@ -1712,6 +1795,7 @@
 	ap->ops->sff_irq_clear(ap);
 	ap->ops->sff_irq_on(ap);
 }
+EXPORT_SYMBOL_GPL(ata_sff_thaw);
 
 /**
  *	ata_sff_prereset - prepare SFF link for reset
@@ -1753,6 +1837,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ata_sff_prereset);
 
 /**
  *	ata_devchk - PATA device presence detection
@@ -1865,6 +1950,7 @@
 
 	return class;
 }
+EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
 
 /**
  *	ata_sff_wait_after_reset - wait for devices to become ready after reset
@@ -1941,6 +2027,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
 
 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
 			     unsigned long deadline)
@@ -2013,6 +2100,7 @@
 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ata_sff_softreset);
 
 /**
  *	sata_sff_hardreset - reset host port via SATA phy reset
@@ -2045,6 +2133,7 @@
 	DPRINTK("EXIT, class=%u\n", *class);
 	return rc;
 }
+EXPORT_SYMBOL_GPL(sata_sff_hardreset);
 
 /**
  *	ata_sff_postreset - SFF postreset callback
@@ -2080,6 +2169,7 @@
 	if (ap->ioaddr.ctl_addr)
 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
 }
+EXPORT_SYMBOL_GPL(ata_sff_postreset);
 
 /**
  *	ata_sff_error_handler - Stock error handler for BMDMA controller
@@ -2152,6 +2242,7 @@
 	ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
 		  ap->ops->postreset);
 }
+EXPORT_SYMBOL_GPL(ata_sff_error_handler);
 
 /**
  *	ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
@@ -2174,6 +2265,7 @@
 
 	spin_unlock_irqrestore(ap->lock, flags);
 }
+EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
 
 /**
  *	ata_sff_port_start - Set port up for dma.
@@ -2194,6 +2286,7 @@
 		return ata_port_start(ap);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ata_sff_port_start);
 
 /**
  *	ata_sff_std_ports - initialize ioaddr with standard port offsets.
@@ -2219,6 +2312,7 @@
 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
 }
+EXPORT_SYMBOL_GPL(ata_sff_std_ports);
 
 unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
 				    unsigned long xfer_mask)
@@ -2230,6 +2324,7 @@
 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
 	return xfer_mask;
 }
+EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
 
 /**
  *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
@@ -2258,6 +2353,7 @@
 	/* issue r/w command */
 	ap->ops->sff_exec_command(ap, &qc->tf);
 }
+EXPORT_SYMBOL_GPL(ata_bmdma_setup);
 
 /**
  *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
@@ -2290,6 +2386,7 @@
 	 * unneccessarily delayed for MMIO
 	 */
 }
+EXPORT_SYMBOL_GPL(ata_bmdma_start);
 
 /**
  *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
@@ -2314,6 +2411,7 @@
 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
 	ata_sff_dma_pause(ap);
 }
+EXPORT_SYMBOL_GPL(ata_bmdma_stop);
 
 /**
  *	ata_bmdma_status - Read PCI IDE BMDMA status
@@ -2330,6 +2428,7 @@
 {
 	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
 }
+EXPORT_SYMBOL_GPL(ata_bmdma_status);
 
 /**
  *	ata_bus_reset - reset host port and associated ATA channel
@@ -2422,6 +2521,7 @@
 
 	DPRINTK("EXIT\n");
 }
+EXPORT_SYMBOL_GPL(ata_bus_reset);
 
 #ifdef CONFIG_PCI
 
@@ -2449,6 +2549,7 @@
 		return -EOPNOTSUPP;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
 
 /**
  *	ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
@@ -2501,11 +2602,12 @@
 			host->flags |= ATA_HOST_SIMPLEX;
 
 		ata_port_desc(ap, "bmdma 0x%llx",
-			(unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
+		    (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
 	}
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
 
 static int ata_resources_present(struct pci_dev *pdev, int port)
 {
@@ -2513,7 +2615,7 @@
 
 	/* Check the PCI resources for this channel are enabled */
 	port = port * 2;
-	for (i = 0; i < 2; i ++) {
+	for (i = 0; i < 2; i++) {
 		if (pci_resource_start(pdev, port + i) == 0 ||
 		    pci_resource_len(pdev, port + i) == 0)
 			return 0;
@@ -2598,6 +2700,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
 
 /**
  *	ata_pci_sff_prepare_host - helper to prepare native PCI ATA host
@@ -2615,7 +2718,7 @@
  *	0 on success, -errno otherwise.
  */
 int ata_pci_sff_prepare_host(struct pci_dev *pdev,
-			     const struct ata_port_info * const * ppi,
+			     const struct ata_port_info * const *ppi,
 			     struct ata_host **r_host)
 {
 	struct ata_host *host;
@@ -2645,17 +2748,18 @@
 	*r_host = host;
 	return 0;
 
- err_bmdma:
+err_bmdma:
 	/* This is necessary because PCI and iomap resources are
 	 * merged and releasing the top group won't release the
 	 * acquired resources if some of those have been acquired
 	 * before entering this function.
 	 */
 	pcim_iounmap_regions(pdev, 0xf);
- err_out:
+err_out:
 	devres_release_group(&pdev->dev, NULL);
 	return rc;
 }
+EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
 
 /**
  *	ata_pci_sff_activate_host - start SFF host, request IRQ and register it
@@ -2741,7 +2845,7 @@
 	}
 
 	rc = ata_host_register(host, sht);
- out:
+out:
 	if (rc == 0)
 		devres_remove_group(dev, NULL);
 	else
@@ -2749,6 +2853,7 @@
 
 	return rc;
 }
+EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
 
 /**
  *	ata_pci_sff_init_one - Initialize/register PCI IDE host controller
@@ -2776,7 +2881,7 @@
  *	Zero on success, negative on errno-based value on error.
  */
 int ata_pci_sff_init_one(struct pci_dev *pdev,
-			 const struct ata_port_info * const * ppi,
+			 const struct ata_port_info * const *ppi,
 			 struct scsi_host_template *sht, void *host_priv)
 {
 	struct device *dev = &pdev->dev;
@@ -2815,7 +2920,7 @@
 
 	pci_set_master(pdev);
 	rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
- out:
+out:
 	if (rc == 0)
 		devres_remove_group(&pdev->dev, NULL);
 	else
@@ -2823,54 +2928,7 @@
 
 	return rc;
 }
-
-#endif /* CONFIG_PCI */
-
-EXPORT_SYMBOL_GPL(ata_sff_port_ops);
-EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
-EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
-EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
-EXPORT_SYMBOL_GPL(ata_sff_dev_select);
-EXPORT_SYMBOL_GPL(ata_sff_check_status);
-EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
-EXPORT_SYMBOL_GPL(ata_sff_pause);
-EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
-EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
-EXPORT_SYMBOL_GPL(ata_sff_tf_load);
-EXPORT_SYMBOL_GPL(ata_sff_tf_read);
-EXPORT_SYMBOL_GPL(ata_sff_exec_command);
-EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
-EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
-EXPORT_SYMBOL_GPL(ata_sff_irq_on);
-EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
-EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
-EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
-EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
-EXPORT_SYMBOL_GPL(ata_sff_host_intr);
-EXPORT_SYMBOL_GPL(ata_sff_interrupt);
-EXPORT_SYMBOL_GPL(ata_sff_freeze);
-EXPORT_SYMBOL_GPL(ata_sff_thaw);
-EXPORT_SYMBOL_GPL(ata_sff_prereset);
-EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
-EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
-EXPORT_SYMBOL_GPL(ata_sff_softreset);
-EXPORT_SYMBOL_GPL(sata_sff_hardreset);
-EXPORT_SYMBOL_GPL(ata_sff_postreset);
-EXPORT_SYMBOL_GPL(ata_sff_error_handler);
-EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
-EXPORT_SYMBOL_GPL(ata_sff_port_start);
-EXPORT_SYMBOL_GPL(ata_sff_std_ports);
-EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
-EXPORT_SYMBOL_GPL(ata_bmdma_setup);
-EXPORT_SYMBOL_GPL(ata_bmdma_start);
-EXPORT_SYMBOL_GPL(ata_bmdma_stop);
-EXPORT_SYMBOL_GPL(ata_bmdma_status);
-EXPORT_SYMBOL_GPL(ata_bus_reset);
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
-EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
-EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
-EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
-EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
+
 #endif /* CONFIG_PCI */
+
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index e2e332d..8b77a98 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -13,12 +13,6 @@
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
 #include <acpi/acpi_bus.h>
-#include <acpi/acnames.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acparser.h>
-#include <acpi/acexcep.h>
-#include <acpi/acmacros.h>
-#include <acpi/actypes.h>
 
 #include <linux/libata.h>
 #include <linux/ata.h>
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 73c466e..eb99dbe 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -19,7 +19,9 @@
  *
  *  TODO/CHECK
  *	Cannot have ATAPI on both master & slave for rev < c2 (???) but
- *	otherwise should do atapi DMA.
+ *	otherwise should do atapi DMA (For now for old we do PIO only for
+ *	ATAPI)
+ *	Review Sunblade workaround.
  */
 
 #include <linux/kernel.h>
@@ -33,12 +35,14 @@
 #include <linux/dmi.h>
 
 #define DRV_NAME "pata_ali"
-#define DRV_VERSION "0.7.5"
+#define DRV_VERSION "0.7.8"
 
 static int ali_atapi_dma = 0;
 module_param_named(atapi_dma, ali_atapi_dma, int, 0644);
 MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)");
 
+static struct pci_dev *ali_isa_bridge;
+
 /*
  *	Cable special cases
  */
@@ -147,8 +151,7 @@
 
 	pci_read_config_byte(pdev, pio_fifo, &fifo);
 	fifo &= ~(0x0F << shift);
-	if (on)
-		fifo |= (on << shift);
+	fifo |= (on << shift);
 	pci_write_config_byte(pdev, pio_fifo, fifo);
 }
 
@@ -337,6 +340,23 @@
 	return 0;
 }
 
+static void ali_c2_c3_postreset(struct ata_link *link, unsigned int *classes)
+{
+	u8 r;
+	int port_bit = 4 << link->ap->port_no;
+
+	/* If our bridge is an ALI 1533 then do the extra work */
+	if (ali_isa_bridge) {
+		/* Tristate and re-enable the bus signals */
+		pci_read_config_byte(ali_isa_bridge, 0x58, &r);
+		r &= ~port_bit;
+		pci_write_config_byte(ali_isa_bridge, 0x58, r);
+		r |= port_bit;
+		pci_write_config_byte(ali_isa_bridge, 0x58, r);
+	}
+	ata_sff_postreset(link, classes);
+}
+
 static struct scsi_host_template ali_sht = {
 	ATA_BMDMA_SHT(DRV_NAME),
 };
@@ -349,10 +369,11 @@
 	.inherits	= &ata_sff_port_ops,
 	.cable_detect	= ata_cable_40wire,
 	.set_piomode	= ali_set_piomode,
+	.sff_data_xfer  = ata_sff_data_xfer32,
 };
 
 static const struct ata_port_operations ali_dma_base_ops = {
-	.inherits	= &ata_bmdma_port_ops,
+	.inherits	= &ata_bmdma32_port_ops,
 	.set_piomode	= ali_set_piomode,
 	.set_dmamode	= ali_set_dmamode,
 };
@@ -377,6 +398,17 @@
 	.check_atapi_dma = ali_check_atapi_dma,
 	.cable_detect	= ali_c2_cable_detect,
 	.dev_config	= ali_lock_sectors,
+	.postreset	= ali_c2_c3_postreset,
+};
+
+/*
+ *	Port operations for DMA capable ALi with cable detect
+ */
+static struct ata_port_operations ali_c4_port_ops = {
+	.inherits	= &ali_dma_base_ops,
+	.check_atapi_dma = ali_check_atapi_dma,
+	.cable_detect	= ali_c2_cable_detect,
+	.dev_config	= ali_lock_sectors,
 };
 
 /*
@@ -401,52 +433,49 @@
 static void ali_init_chipset(struct pci_dev *pdev)
 {
 	u8 tmp;
-	struct pci_dev *north, *isa_bridge;
+	struct pci_dev *north;
 
 	/*
 	 * The chipset revision selects the driver operations and
 	 * mode data.
 	 */
 
-	if (pdev->revision >= 0x20 && pdev->revision < 0xC2) {
-		/* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
+	if (pdev->revision <= 0x20) {
+		pci_read_config_byte(pdev, 0x53, &tmp);
+		tmp |= 0x03;
+		pci_write_config_byte(pdev, 0x53, tmp);
+	} else {
+		pci_read_config_byte(pdev, 0x4a, &tmp);
+		pci_write_config_byte(pdev, 0x4a, tmp | 0x20);
 		pci_read_config_byte(pdev, 0x4B, &tmp);
-		/* Clear CD-ROM DMA write bit */
-		tmp &= 0x7F;
-		pci_write_config_byte(pdev, 0x4B, tmp);
-	} else if (pdev->revision >= 0xC2) {
-		/* Enable cable detection logic */
-		pci_read_config_byte(pdev, 0x4B, &tmp);
-		pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
-	}
-	north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
-	isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
-
-	if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) {
-		/* Configure the ALi bridge logic. For non ALi rely on BIOS.
-		   Set the south bridge enable bit */
-		pci_read_config_byte(isa_bridge, 0x79, &tmp);
-		if (pdev->revision == 0xC2)
-			pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
-		else if (pdev->revision > 0xC2 && pdev->revision < 0xC5)
-			pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
-	}
-	if (pdev->revision >= 0x20) {
+		if (pdev->revision < 0xC2)
+			/* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
+			/* Clear CD-ROM DMA write bit */
+			tmp &= 0x7F;
+		/* Cable and UDMA */
+		pci_write_config_byte(pdev, 0x4B, tmp | 0x09);
 		/*
 		 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
 		 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
 		 * via 0x54/55.
 		 */
 		pci_read_config_byte(pdev, 0x53, &tmp);
-		if (pdev->revision <= 0x20)
-			tmp &= ~0x02;
 		if (pdev->revision >= 0xc7)
 			tmp |= 0x03;
 		else
 			tmp |= 0x01;	/* CD_ROM enable for DMA */
 		pci_write_config_byte(pdev, 0x53, tmp);
 	}
-	pci_dev_put(isa_bridge);
+	north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+	if (north && north->vendor == PCI_VENDOR_ID_AL && ali_isa_bridge) {
+		/* Configure the ALi bridge logic. For non ALi rely on BIOS.
+		   Set the south bridge enable bit */
+		pci_read_config_byte(ali_isa_bridge, 0x79, &tmp);
+		if (pdev->revision == 0xC2)
+			pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x04);
+		else if (pdev->revision > 0xC2 && pdev->revision < 0xC5)
+			pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x02);
+	}
 	pci_dev_put(north);
 	ata_pci_bmdma_clear_simplex(pdev);
 }
@@ -503,7 +532,7 @@
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
 		.udma_mask = ATA_UDMA5,
-		.port_ops = &ali_c2_port_ops
+		.port_ops = &ali_c4_port_ops
 	};
 	/* Revision 0xC5 is UDMA133 with LBA48 DMA */
 	static const struct ata_port_info info_c5 = {
@@ -516,7 +545,6 @@
 
 	const struct ata_port_info *ppi[] = { NULL, NULL };
 	u8 tmp;
-	struct pci_dev *isa_bridge;
 	int rc;
 
 	rc = pcim_enable_device(pdev);
@@ -543,14 +571,12 @@
 
 	ali_init_chipset(pdev);
 
-	isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
-	if (isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) {
+	if (ali_isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) {
 		/* Are we paired with a UDMA capable chip */
-		pci_read_config_byte(isa_bridge, 0x5E, &tmp);
+		pci_read_config_byte(ali_isa_bridge, 0x5E, &tmp);
 		if ((tmp & 0x1E) == 0x12)
 	        	ppi[0] = &info_20_udma;
 	}
-	pci_dev_put(isa_bridge);
 
 	return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL);
 }
@@ -590,13 +616,20 @@
 
 static int __init ali_init(void)
 {
-	return pci_register_driver(&ali_pci_driver);
+	int ret;
+	ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
+
+	ret = pci_register_driver(&ali_pci_driver);
+	if (ret < 0)
+		pci_dev_put(ali_isa_bridge);
+	return ret;
 }
 
 
 static void __exit ali_exit(void)
 {
 	pci_unregister_driver(&ali_pci_driver);
+	pci_dev_put(ali_isa_bridge);
 }
 
 
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 0ec9c7d..63719ab9e 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -24,7 +24,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_amd"
-#define DRV_VERSION "0.3.10"
+#define DRV_VERSION "0.3.11"
 
 /**
  *	timing_setup		-	shared timing computation and load
@@ -345,7 +345,7 @@
 };
 
 static const struct ata_port_operations amd_base_port_ops = {
-	.inherits	= &ata_bmdma_port_ops,
+	.inherits	= &ata_bmdma32_port_ops,
 	.prereset	= amd_pre_reset,
 };
 
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 0e2cde8..506adde 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -32,21 +32,6 @@
 	ATIIXP_IDE_UDMA_MODE 	= 0x56
 };
 
-static int atiixp_pre_reset(struct ata_link *link, unsigned long deadline)
-{
-	struct ata_port *ap = link->ap;
-	static const struct pci_bits atiixp_enable_bits[] = {
-		{ 0x48, 1, 0x01, 0x00 },
-		{ 0x48, 1, 0x08, 0x00 }
-	};
-	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-
-	if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
-		return -ENOENT;
-
-	return ata_sff_prereset(link, deadline);
-}
-
 static int atiixp_cable_detect(struct ata_port *ap)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -229,10 +214,9 @@
 	.cable_detect	= atiixp_cable_detect,
 	.set_piomode	= atiixp_set_piomode,
 	.set_dmamode	= atiixp_set_dmamode,
-	.prereset	= atiixp_pre_reset,
 };
 
-static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	static const struct ata_port_info info = {
 		.flags = ATA_FLAG_SLAVE_POSS,
@@ -241,8 +225,18 @@
 		.udma_mask = 0x3F,
 		.port_ops = &atiixp_port_ops
 	};
-	const struct ata_port_info *ppi[] = { &info, NULL };
-	return ata_pci_sff_init_one(dev, ppi, &atiixp_sht, NULL);
+	static const struct pci_bits atiixp_enable_bits[] = {
+		{ 0x48, 1, 0x01, 0x00 },
+		{ 0x48, 1, 0x08, 0x00 }
+	};
+	const struct ata_port_info *ppi[] = { &info, &info };
+	int i;
+
+	for (i = 0; i < 2; i++)
+		if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i]))
+			ppi[i] = &ata_dummy_port_info;
+
+	return ata_pci_sff_init_one(pdev, ppi, &atiixp_sht, NULL);
 }
 
 static const struct pci_device_id atiixp[] = {
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index e0c4f05..65c28e5 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -30,7 +30,7 @@
 #define DRV_VERSION	"0.6.2"
 
 struct hpt_clock {
-	u8	xfer_speed;
+	u8	xfer_mode;
 	u32	timing;
 };
 
@@ -189,28 +189,6 @@
 	return ata_bmdma_mode_filter(adev, mask);
 }
 
-/**
- *	hpt36x_find_mode	-	reset the hpt36x bus
- *	@ap: ATA port
- *	@speed: transfer mode
- *
- *	Return the 32bit register programming information for this channel
- *	that matches the speed provided.
- */
-
-static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
-{
-	struct hpt_clock *clocks = ap->host->private_data;
-
-	while(clocks->xfer_speed) {
-		if (clocks->xfer_speed == speed)
-			return clocks->timing;
-		clocks++;
-	}
-	BUG();
-	return 0xffffffffU;	/* silence compiler warning */
-}
-
 static int hpt36x_cable_detect(struct ata_port *ap)
 {
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -226,6 +204,49 @@
 	return ATA_CBL_PATA80;
 }
 
+static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
+			    u8 mode)
+{
+	struct hpt_clock *clocks = ap->host->private_data;
+	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+	u32 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+	u32 addr2 = 0x51 + 4 * ap->port_no;
+	u32 mask, reg;
+	u8 fast;
+
+	/* Fast interrupt prediction disable, hold off interrupt disable */
+	pci_read_config_byte(pdev, addr2, &fast);
+	if (fast & 0x80) {
+		fast &= ~0x80;
+		pci_write_config_byte(pdev, addr2, fast);
+	}
+
+	/* determine timing mask and find matching clock entry */
+	if (mode < XFER_MW_DMA_0)
+		mask = 0xc1f8ffff;
+	else if (mode < XFER_UDMA_0)
+		mask = 0x303800ff;
+	else
+		mask = 0x30070000;
+
+	while (clocks->xfer_mode) {
+		if (clocks->xfer_mode == mode)
+			break;
+		clocks++;
+	}
+	if (!clocks->xfer_mode)
+		BUG();
+
+	/*
+	 * Combine new mode bits with old config bits and disable
+	 * on-chip PIO FIFO/buffer (and PIO MST mode as well) to avoid
+	 * problems handling I/O errors later.
+	 */
+	pci_read_config_dword(pdev, addr1, &reg);
+	reg = ((reg & ~mask) | (clocks->timing & mask)) & ~0xc0000000;
+	pci_write_config_dword(pdev, addr1, reg);
+}
+
 /**
  *	hpt366_set_piomode		-	PIO setup
  *	@ap: ATA interface
@@ -236,28 +257,7 @@
 
 static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
 {
-	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-	u32 addr1, addr2;
-	u32 reg;
-	u32 mode;
-	u8 fast;
-
-	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
-	addr2 = 0x51 + 4 * ap->port_no;
-
-	/* Fast interrupt prediction disable, hold off interrupt disable */
-	pci_read_config_byte(pdev, addr2, &fast);
-	if (fast & 0x80) {
-		fast &= ~0x80;
-		pci_write_config_byte(pdev, addr2, fast);
-	}
-
-	pci_read_config_dword(pdev, addr1, &reg);
-	mode = hpt36x_find_mode(ap, adev->pio_mode);
-	mode &= ~0x8000000;	/* No FIFO in PIO */
-	mode &= ~0x30070000;	/* Leave config bits alone */
-	reg &= 0x30070000;	/* Strip timing bits */
-	pci_write_config_dword(pdev, addr1, reg | mode);
+	hpt366_set_mode(ap, adev, adev->pio_mode);
 }
 
 /**
@@ -271,28 +271,7 @@
 
 static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev)
 {
-	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-	u32 addr1, addr2;
-	u32 reg;
-	u32 mode;
-	u8 fast;
-
-	addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
-	addr2 = 0x51 + 4 * ap->port_no;
-
-	/* Fast interrupt prediction disable, hold off interrupt disable */
-	pci_read_config_byte(pdev, addr2, &fast);
-	if (fast & 0x80) {
-		fast &= ~0x80;
-		pci_write_config_byte(pdev, addr2, fast);
-	}
-
-	pci_read_config_dword(pdev, addr1, &reg);
-	mode = hpt36x_find_mode(ap, adev->dma_mode);
-	mode |= 0x8000000;	/* FIFO in MWDMA or UDMA */
-	mode &= ~0xC0000000;	/* Leave config bits alone */
-	reg &= 0xC0000000;	/* Strip timing bits */
-	pci_write_config_dword(pdev, addr1, reg | mode);
+	hpt366_set_mode(ap, adev, adev->dma_mode);
 }
 
 static struct scsi_host_template hpt36x_sht = {
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index f11a320..f19cc64 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -23,7 +23,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt3x3"
-#define DRV_VERSION	"0.5.3"
+#define DRV_VERSION	"0.6.1"
 
 /**
  *	hpt3x3_set_piomode		-	PIO setup
@@ -80,14 +80,48 @@
 	r2 &= ~(0x11 << dn);	/* Clear MWDMA and UDMA bits */
 
 	if (adev->dma_mode >= XFER_UDMA_0)
-		r2 |= (0x10 << dn);	/* Ultra mode */
+		r2 |= (0x01 << dn);	/* Ultra mode */
 	else
-		r2 |= (0x01 << dn);	/* MWDMA */
+		r2 |= (0x10 << dn);	/* MWDMA */
 
 	pci_write_config_dword(pdev, 0x44, r1);
 	pci_write_config_dword(pdev, 0x48, r2);
 }
-#endif /* CONFIG_PATA_HPT3X3_DMA */
+
+/**
+ *	hpt3x3_freeze		-	DMA workaround
+ *	@ap: port to freeze
+ *
+ *	When freezing an HPT3x3 we must stop any pending DMA before
+ *	writing to the control register or the chip will hang
+ */
+
+static void hpt3x3_freeze(struct ata_port *ap)
+{
+	void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+	iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ ATA_DMA_START,
+			mmio + ATA_DMA_CMD);
+	ata_sff_dma_pause(ap);
+	ata_sff_freeze(ap);
+}
+
+/**
+ *	hpt3x3_bmdma_setup	-	DMA workaround
+ *	@qc: Queued command
+ *
+ *	When issuing BMDMA we must clean up the error/active bits in
+ *	software on this device
+ */
+
+static void hpt3x3_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	u8 r = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+	r |= ATA_DMA_INTR | ATA_DMA_ERR;
+	iowrite8(r, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+	return ata_bmdma_setup(qc);
+}
 
 /**
  *	hpt3x3_atapi_dma	-	ATAPI DMA check
@@ -101,18 +135,23 @@
 	return 1;
 }
 
+#endif /* CONFIG_PATA_HPT3X3_DMA */
+
 static struct scsi_host_template hpt3x3_sht = {
 	ATA_BMDMA_SHT(DRV_NAME),
 };
 
 static struct ata_port_operations hpt3x3_port_ops = {
 	.inherits	= &ata_bmdma_port_ops,
-	.check_atapi_dma= hpt3x3_atapi_dma,
 	.cable_detect	= ata_cable_40wire,
 	.set_piomode	= hpt3x3_set_piomode,
 #if defined(CONFIG_PATA_HPT3X3_DMA)
 	.set_dmamode	= hpt3x3_set_dmamode,
+	.bmdma_setup	= hpt3x3_bmdma_setup,
+	.check_atapi_dma= hpt3x3_atapi_dma,
+	.freeze		= hpt3x3_freeze,
 #endif
+	
 };
 
 /**
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index f828a29..f1bb2f9 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -80,7 +80,7 @@
 
 
 #define DRV_NAME "pata_it821x"
-#define DRV_VERSION "0.4.0"
+#define DRV_VERSION "0.4.2"
 
 struct it821x_dev
 {
@@ -494,8 +494,6 @@
  *	special. In our case we need to lock the sector count to avoid
  *	blowing the brains out of the firmware with large LBA48 requests
  *
- *	FIXME: When FUA appears we need to block FUA too. And SMART and
- *	basically we need to filter commands for this chip.
  */
 
 static void it821x_dev_config(struct ata_device *adev)
@@ -890,6 +888,13 @@
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = 0x1f,
 		.mwdma_mask = 0x07,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &it821x_rdc_port_ops
+	};
+	static const struct ata_port_info info_rdc_11 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = 0x1f,
+		.mwdma_mask = 0x07,
 		/* No UDMA */
 		.port_ops = &it821x_rdc_port_ops
 	};
@@ -903,7 +908,11 @@
 		return rc;
 		
 	if (pdev->vendor == PCI_VENDOR_ID_RDC) {
-		ppi[0] = &info_rdc;
+		/* Deal with Vortex86SX */
+		if (pdev->revision == 0x11)
+			ppi[0] = &info_rdc_11;
+		else
+			ppi[0] = &info_rdc;
 	} else {
 		/* Force the card into bypass mode if so requested */
 		if (it8212_noraid) {
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 7c8faa48..aa576ca 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -35,7 +35,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_mpiix"
-#define DRV_VERSION "0.7.6"
+#define DRV_VERSION "0.7.7"
 
 enum {
 	IDETIM = 0x6C,		/* IDE control register */
@@ -146,6 +146,7 @@
 	.cable_detect	= ata_cable_40wire,
 	.set_piomode	= mpiix_set_piomode,
 	.prereset	= mpiix_pre_reset,
+	.sff_data_xfer	= ata_sff_data_xfer32,
 };
 
 static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
new file mode 100644
index 0000000..0fe4ef3
--- /dev/null
+++ b/drivers/ata/pata_octeon_cf.c
@@ -0,0 +1,965 @@
+/*
+ * Driver for the Octeon bootbus compact flash.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 - 2009 Cavium Networks
+ * Copyright (C) 2008 Wind River Systems
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <scsi/scsi_host.h>
+
+#include <asm/octeon/octeon.h>
+
+/*
+ * The Octeon bootbus compact flash interface is connected in at least
+ * 3 different configurations on various evaluation boards:
+ *
+ * -- 8  bits no irq, no DMA
+ * -- 16 bits no irq, no DMA
+ * -- 16 bits True IDE mode with DMA, but no irq.
+ *
+ * In the last case the DMA engine can generate an interrupt when the
+ * transfer is complete.  For the first two cases only PIO is supported.
+ *
+ */
+
+#define DRV_NAME	"pata_octeon_cf"
+#define DRV_VERSION	"2.1"
+
+
+struct octeon_cf_port {
+	struct workqueue_struct *wq;
+	struct delayed_work delayed_finish;
+	struct ata_port *ap;
+	int dma_finished;
+};
+
+static struct scsi_host_template octeon_cf_sht = {
+	ATA_PIO_SHT(DRV_NAME),
+};
+
+/**
+ * Convert nanosecond based time to setting used in the
+ * boot bus timing register, based on timing multiple
+ */
+static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
+{
+	unsigned int val;
+
+	/*
+	 * Compute # of eclock periods to get desired duration in
+	 * nanoseconds.
+	 */
+	val = DIV_ROUND_UP(nsecs * (octeon_get_clock_rate() / 1000000),
+			  1000 * tim_mult);
+
+	return val;
+}
+
+static void octeon_cf_set_boot_reg_cfg(int cs)
+{
+	union cvmx_mio_boot_reg_cfgx reg_cfg;
+	reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+	reg_cfg.s.dmack = 0;	/* Don't assert DMACK on access */
+	reg_cfg.s.tim_mult = 2;	/* Timing mutiplier 2x */
+	reg_cfg.s.rd_dly = 0;	/* Sample on falling edge of BOOT_OE */
+	reg_cfg.s.sam = 0;	/* Don't combine write and output enable */
+	reg_cfg.s.we_ext = 0;	/* No write enable extension */
+	reg_cfg.s.oe_ext = 0;	/* No read enable extension */
+	reg_cfg.s.en = 1;	/* Enable this region */
+	reg_cfg.s.orbit = 0;	/* Don't combine with previous region */
+	reg_cfg.s.ale = 0;	/* Don't do address multiplexing */
+	cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
+}
+
+/**
+ * Called after libata determines the needed PIO mode. This
+ * function programs the Octeon bootbus regions to support the
+ * timing requirements of the PIO mode.
+ *
+ * @ap:     ATA port information
+ * @dev:    ATA device
+ */
+static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
+{
+	struct octeon_cf_data *ocd = ap->dev->platform_data;
+	union cvmx_mio_boot_reg_timx reg_tim;
+	int cs = ocd->base_region;
+	int T;
+	struct ata_timing timing;
+
+	int use_iordy;
+	int trh;
+	int pause;
+	/* These names are timing parameters from the ATA spec */
+	int t1;
+	int t2;
+	int t2i;
+
+	T = (int)(2000000000000LL / octeon_get_clock_rate());
+
+	if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
+		BUG();
+
+	t1 = timing.setup;
+	if (t1)
+		t1--;
+	t2 = timing.active;
+	if (t2)
+		t2--;
+	t2i = timing.act8b;
+	if (t2i)
+		t2i--;
+
+	trh = ns_to_tim_reg(2, 20);
+	if (trh)
+		trh--;
+
+	pause = timing.cycle - timing.active - timing.setup - trh;
+	if (pause)
+		pause--;
+
+	octeon_cf_set_boot_reg_cfg(cs);
+	if (ocd->dma_engine >= 0)
+		/* True IDE mode, program both chip selects.  */
+		octeon_cf_set_boot_reg_cfg(cs + 1);
+
+
+	use_iordy = ata_pio_need_iordy(dev);
+
+	reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs));
+	/* Disable page mode */
+	reg_tim.s.pagem = 0;
+	/* Enable dynamic timing */
+	reg_tim.s.waitm = use_iordy;
+	/* Pages are disabled */
+	reg_tim.s.pages = 0;
+	/* We don't use multiplexed address mode */
+	reg_tim.s.ale = 0;
+	/* Not used */
+	reg_tim.s.page = 0;
+	/* Time after IORDY to coninue to assert the data */
+	reg_tim.s.wait = 0;
+	/* Time to wait to complete the cycle. */
+	reg_tim.s.pause = pause;
+	/* How long to hold after a write to de-assert CE. */
+	reg_tim.s.wr_hld = trh;
+	/* How long to wait after a read to de-assert CE. */
+	reg_tim.s.rd_hld = trh;
+	/* How long write enable is asserted */
+	reg_tim.s.we = t2;
+	/* How long read enable is asserted */
+	reg_tim.s.oe = t2;
+	/* Time after CE that read/write starts */
+	reg_tim.s.ce = ns_to_tim_reg(2, 5);
+	/* Time before CE that address is valid */
+	reg_tim.s.adr = 0;
+
+	/* Program the bootbus region timing for the data port chip select. */
+	cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64);
+	if (ocd->dma_engine >= 0)
+		/* True IDE mode, program both chip selects.  */
+		cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64);
+}
+
+static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
+{
+	struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data;
+	union cvmx_mio_boot_dma_timx dma_tim;
+	unsigned int oe_a;
+	unsigned int oe_n;
+	unsigned int dma_ackh;
+	unsigned int dma_arq;
+	unsigned int pause;
+	unsigned int T0, Tkr, Td;
+	unsigned int tim_mult;
+
+	const struct ata_timing *timing;
+
+	timing = ata_timing_find_mode(dev->dma_mode);
+	T0	= timing->cycle;
+	Td	= timing->active;
+	Tkr	= timing->recover;
+	dma_ackh = timing->dmack_hold;
+
+	dma_tim.u64 = 0;
+	/* dma_tim.s.tim_mult = 0 --> 4x */
+	tim_mult = 4;
+
+	/* not spec'ed, value in eclocks, not affected by tim_mult */
+	dma_arq = 8;
+	pause = 25 - dma_arq * 1000 /
+		(octeon_get_clock_rate() / 1000000); /* Tz */
+
+	oe_a = Td;
+	/* Tkr from cf spec, lengthened to meet T0 */
+	oe_n = max(T0 - oe_a, Tkr);
+
+	dma_tim.s.dmack_pi = 1;
+
+	dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
+	dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
+
+	/*
+	 * This is tI, C.F. spec. says 0, but Sony CF card requires
+	 * more, we use 20 nS.
+	 */
+	dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);;
+	dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
+
+	dma_tim.s.dmarq = dma_arq;
+	dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
+
+	dma_tim.s.rd_dly = 0;	/* Sample right on edge */
+
+	/*  writes only */
+	dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
+	dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
+
+	pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
+		 ns_to_tim_reg(tim_mult, 60));
+	pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: "
+		 "%d, dmarq: %d, pause: %d\n",
+		 dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
+		 dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
+
+	cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine),
+		       dma_tim.u64);
+
+}
+
+/**
+ * Handle an 8 bit I/O request.
+ *
+ * @dev:        Device to access
+ * @buffer:     Data buffer
+ * @buflen:     Length of the buffer.
+ * @rw:         True to write.
+ */
+static unsigned int octeon_cf_data_xfer8(struct ata_device *dev,
+					 unsigned char *buffer,
+					 unsigned int buflen,
+					 int rw)
+{
+	struct ata_port *ap		= dev->link->ap;
+	void __iomem *data_addr		= ap->ioaddr.data_addr;
+	unsigned long words;
+	int count;
+
+	words = buflen;
+	if (rw) {
+		count = 16;
+		while (words--) {
+			iowrite8(*buffer, data_addr);
+			buffer++;
+			/*
+			 * Every 16 writes do a read so the bootbus
+			 * FIFO doesn't fill up.
+			 */
+			if (--count == 0) {
+				ioread8(ap->ioaddr.altstatus_addr);
+				count = 16;
+			}
+		}
+	} else {
+		ioread8_rep(data_addr, buffer, words);
+	}
+	return buflen;
+}
+
+/**
+ * Handle a 16 bit I/O request.
+ *
+ * @dev:        Device to access
+ * @buffer:     Data buffer
+ * @buflen:     Length of the buffer.
+ * @rw:         True to write.
+ */
+static unsigned int octeon_cf_data_xfer16(struct ata_device *dev,
+					  unsigned char *buffer,
+					  unsigned int buflen,
+					  int rw)
+{
+	struct ata_port *ap		= dev->link->ap;
+	void __iomem *data_addr		= ap->ioaddr.data_addr;
+	unsigned long words;
+	int count;
+
+	words = buflen / 2;
+	if (rw) {
+		count = 16;
+		while (words--) {
+			iowrite16(*(uint16_t *)buffer, data_addr);
+			buffer += sizeof(uint16_t);
+			/*
+			 * Every 16 writes do a read so the bootbus
+			 * FIFO doesn't fill up.
+			 */
+			if (--count == 0) {
+				ioread8(ap->ioaddr.altstatus_addr);
+				count = 16;
+			}
+		}
+	} else {
+		while (words--) {
+			*(uint16_t *)buffer = ioread16(data_addr);
+			buffer += sizeof(uint16_t);
+		}
+	}
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		__le16 align_buf[1] = { 0 };
+
+		if (rw == READ) {
+			align_buf[0] = cpu_to_le16(ioread16(data_addr));
+			memcpy(buffer, align_buf, 1);
+		} else {
+			memcpy(align_buf, buffer, 1);
+			iowrite16(le16_to_cpu(align_buf[0]), data_addr);
+		}
+		words++;
+	}
+	return buflen;
+}
+
+/**
+ * Read the taskfile for 16bit non-True IDE only.
+ */
+static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	u16 blob;
+	/* The base of the registers is at ioaddr.data_addr. */
+	void __iomem *base = ap->ioaddr.data_addr;
+
+	blob = __raw_readw(base + 0xc);
+	tf->feature = blob >> 8;
+
+	blob = __raw_readw(base + 2);
+	tf->nsect = blob & 0xff;
+	tf->lbal = blob >> 8;
+
+	blob = __raw_readw(base + 4);
+	tf->lbam = blob & 0xff;
+	tf->lbah = blob >> 8;
+
+	blob = __raw_readw(base + 6);
+	tf->device = blob & 0xff;
+	tf->command = blob >> 8;
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		if (likely(ap->ioaddr.ctl_addr)) {
+			iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
+
+			blob = __raw_readw(base + 0xc);
+			tf->hob_feature = blob >> 8;
+
+			blob = __raw_readw(base + 2);
+			tf->hob_nsect = blob & 0xff;
+			tf->hob_lbal = blob >> 8;
+
+			blob = __raw_readw(base + 4);
+			tf->hob_lbam = blob & 0xff;
+			tf->hob_lbah = blob >> 8;
+
+			iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
+			ap->last_ctl = tf->ctl;
+		} else {
+			WARN_ON(1);
+		}
+	}
+}
+
+static u8 octeon_cf_check_status16(struct ata_port *ap)
+{
+	u16 blob;
+	void __iomem *base = ap->ioaddr.data_addr;
+
+	blob = __raw_readw(base + 6);
+	return blob >> 8;
+}
+
+static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
+				 unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	void __iomem *base = ap->ioaddr.data_addr;
+	int rc;
+	u8 err;
+
+	DPRINTK("about to softreset\n");
+	__raw_writew(ap->ctl, base + 0xe);
+	udelay(20);
+	__raw_writew(ap->ctl | ATA_SRST, base + 0xe);
+	udelay(20);
+	__raw_writew(ap->ctl, base + 0xe);
+
+	rc = ata_sff_wait_after_reset(link, 1, deadline);
+	if (rc) {
+		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+		return rc;
+	}
+
+	/* determine by signature whether we have ATA or ATAPI devices */
+	classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
+	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
+	return 0;
+}
+
+/**
+ * Load the taskfile for 16bit non-True IDE only.  The device_addr is
+ * not loaded, we do this as part of octeon_cf_exec_command16.
+ */
+static void octeon_cf_tf_load16(struct ata_port *ap,
+				const struct ata_taskfile *tf)
+{
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+	/* The base of the registers is at ioaddr.data_addr. */
+	void __iomem *base = ap->ioaddr.data_addr;
+
+	if (tf->ctl != ap->last_ctl) {
+		iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		__raw_writew(tf->hob_feature << 8, base + 0xc);
+		__raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
+		__raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
+		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->hob_feature,
+			tf->hob_nsect,
+			tf->hob_lbal,
+			tf->hob_lbam,
+			tf->hob_lbah);
+	}
+	if (is_addr) {
+		__raw_writew(tf->feature << 8, base + 0xc);
+		__raw_writew(tf->nsect | tf->lbal << 8, base + 2);
+		__raw_writew(tf->lbam | tf->lbah << 8, base + 4);
+		VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->feature,
+			tf->nsect,
+			tf->lbal,
+			tf->lbam,
+			tf->lbah);
+	}
+	ata_wait_idle(ap);
+}
+
+
+static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device)
+{
+/*  There is only one device, do nothing. */
+	return;
+}
+
+/*
+ * Issue ATA command to host controller.  The device_addr is also sent
+ * as it must be written in a combined write with the command.
+ */
+static void octeon_cf_exec_command16(struct ata_port *ap,
+				const struct ata_taskfile *tf)
+{
+	/* The base of the registers is at ioaddr.data_addr. */
+	void __iomem *base = ap->ioaddr.data_addr;
+	u16 blob;
+
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		VPRINTK("device 0x%X\n", tf->device);
+		blob = tf->device;
+	} else {
+		blob = 0;
+	}
+
+	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+	blob |= (tf->command << 8);
+	__raw_writew(blob, base + 6);
+
+
+	ata_wait_idle(ap);
+}
+
+static u8 octeon_cf_irq_on(struct ata_port *ap)
+{
+	return 0;
+}
+
+static void octeon_cf_irq_clear(struct ata_port *ap)
+{
+	return;
+}
+
+static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct octeon_cf_port *cf_port;
+
+	cf_port = (struct octeon_cf_port *)ap->private_data;
+	DPRINTK("ENTER\n");
+	/* issue r/w command */
+	qc->cursg = qc->sg;
+	cf_port->dma_finished = 0;
+	ap->ops->sff_exec_command(ap, &qc->tf);
+	DPRINTK("EXIT\n");
+}
+
+/**
+ * Start a DMA transfer that was already setup
+ *
+ * @qc:     Information about the DMA
+ */
+static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
+{
+	struct octeon_cf_data *ocd = qc->ap->dev->platform_data;
+	union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
+	union cvmx_mio_boot_dma_intx mio_boot_dma_int;
+	struct scatterlist *sg;
+
+	VPRINTK("%d scatterlists\n", qc->n_elem);
+
+	/* Get the scatter list entry we need to DMA into */
+	sg = qc->cursg;
+	BUG_ON(!sg);
+
+	/*
+	 * Clear the DMA complete status.
+	 */
+	mio_boot_dma_int.u64 = 0;
+	mio_boot_dma_int.s.done = 1;
+	cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
+		       mio_boot_dma_int.u64);
+
+	/* Enable the interrupt.  */
+	cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine),
+		       mio_boot_dma_int.u64);
+
+	/* Set the direction of the DMA */
+	mio_boot_dma_cfg.u64 = 0;
+	mio_boot_dma_cfg.s.en = 1;
+	mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
+
+	/*
+	 * Don't stop the DMA if the device deasserts DMARQ. Many
+	 * compact flashes deassert DMARQ for a short time between
+	 * sectors. Instead of stopping and restarting the DMA, we'll
+	 * let the hardware do it. If the DMA is really stopped early
+	 * due to an error condition, a later timeout will force us to
+	 * stop.
+	 */
+	mio_boot_dma_cfg.s.clr = 0;
+
+	/* Size is specified in 16bit words and minus one notation */
+	mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1;
+
+	/* We need to swap the high and low bytes of every 16 bits */
+	mio_boot_dma_cfg.s.swap8 = 1;
+
+	mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
+
+	VPRINTK("%s %d bytes address=%p\n",
+		(mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
+		(void *)(unsigned long)mio_boot_dma_cfg.s.adr);
+
+	cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine),
+		       mio_boot_dma_cfg.u64);
+}
+
+/**
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host lock)
+ *
+ */
+static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
+					struct ata_queued_cmd *qc)
+{
+	struct ata_eh_info *ehi = &ap->link.eh_info;
+	struct octeon_cf_data *ocd = ap->dev->platform_data;
+	union cvmx_mio_boot_dma_cfgx dma_cfg;
+	union cvmx_mio_boot_dma_intx dma_int;
+	struct octeon_cf_port *cf_port;
+	u8 status;
+
+	VPRINTK("ata%u: protocol %d task_state %d\n",
+		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
+
+
+	if (ap->hsm_task_state != HSM_ST_LAST)
+		return 0;
+
+	cf_port = (struct octeon_cf_port *)ap->private_data;
+
+	dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
+	if (dma_cfg.s.size != 0xfffff) {
+		/* Error, the transfer was not complete.  */
+		qc->err_mask |= AC_ERR_HOST_BUS;
+		ap->hsm_task_state = HSM_ST_ERR;
+	}
+
+	/* Stop and clear the dma engine.  */
+	dma_cfg.u64 = 0;
+	dma_cfg.s.size = -1;
+	cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), dma_cfg.u64);
+
+	/* Disable the interrupt.  */
+	dma_int.u64 = 0;
+	cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), dma_int.u64);
+
+	/* Clear the DMA complete status */
+	dma_int.s.done = 1;
+	cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), dma_int.u64);
+
+	status = ap->ops->sff_check_status(ap);
+
+	ata_sff_hsm_move(ap, qc, status, 0);
+
+	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA))
+		ata_ehi_push_desc(ehi, "DMA stat 0x%x", status);
+
+	return 1;
+}
+
+/*
+ * Check if any queued commands have more DMAs, if so start the next
+ * transfer, else do end of transfer handling.
+ */
+static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
+{
+	struct ata_host *host = dev_instance;
+	struct octeon_cf_port *cf_port;
+	int i;
+	unsigned int handled = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	DPRINTK("ENTER\n");
+	for (i = 0; i < host->n_ports; i++) {
+		u8 status;
+		struct ata_port *ap;
+		struct ata_queued_cmd *qc;
+		union cvmx_mio_boot_dma_intx dma_int;
+		union cvmx_mio_boot_dma_cfgx dma_cfg;
+		struct octeon_cf_data *ocd;
+
+		ap = host->ports[i];
+		ocd = ap->dev->platform_data;
+		if (!ap || (ap->flags & ATA_FLAG_DISABLED))
+			continue;
+
+		ocd = ap->dev->platform_data;
+		cf_port = (struct octeon_cf_port *)ap->private_data;
+		dma_int.u64 =
+			cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
+		dma_cfg.u64 =
+			cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
+
+		qc = ata_qc_from_tag(ap, ap->link.active_tag);
+
+		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
+		    (qc->flags & ATA_QCFLAG_ACTIVE)) {
+			if (dma_int.s.done && !dma_cfg.s.en) {
+				if (!sg_is_last(qc->cursg)) {
+					qc->cursg = sg_next(qc->cursg);
+					handled = 1;
+					octeon_cf_dma_start(qc);
+					continue;
+				} else {
+					cf_port->dma_finished = 1;
+				}
+			}
+			if (!cf_port->dma_finished)
+				continue;
+			status = ioread8(ap->ioaddr.altstatus_addr);
+			if (status & (ATA_BUSY | ATA_DRQ)) {
+				/*
+				 * We are busy, try to handle it
+				 * later.  This is the DMA finished
+				 * interrupt, and it could take a
+				 * little while for the card to be
+				 * ready for more commands.
+				 */
+				/* Clear DMA irq. */
+				dma_int.u64 = 0;
+				dma_int.s.done = 1;
+				cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
+					       dma_int.u64);
+
+				queue_delayed_work(cf_port->wq,
+						   &cf_port->delayed_finish, 1);
+				handled = 1;
+			} else {
+				handled |= octeon_cf_dma_finished(ap, qc);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
+	DPRINTK("EXIT\n");
+	return IRQ_RETVAL(handled);
+}
+
+static void octeon_cf_delayed_finish(struct work_struct *work)
+{
+	struct octeon_cf_port *cf_port = container_of(work,
+						      struct octeon_cf_port,
+						      delayed_finish.work);
+	struct ata_port *ap = cf_port->ap;
+	struct ata_host *host = ap->host;
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+	u8 status;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	/*
+	 * If the port is not waiting for completion, it must have
+	 * handled it previously.  The hsm_task_state is
+	 * protected by host->lock.
+	 */
+	if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished)
+		goto out;
+
+	status = ioread8(ap->ioaddr.altstatus_addr);
+	if (status & (ATA_BUSY | ATA_DRQ)) {
+		/* Still busy, try again. */
+		queue_delayed_work(cf_port->wq,
+				   &cf_port->delayed_finish, 1);
+		goto out;
+	}
+	qc = ata_qc_from_tag(ap, ap->link.active_tag);
+	if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
+	    (qc->flags & ATA_QCFLAG_ACTIVE))
+		octeon_cf_dma_finished(ap, qc);
+out:
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void octeon_cf_dev_config(struct ata_device *dev)
+{
+	/*
+	 * A maximum of 2^20 - 1 16 bit transfers are possible with
+	 * the bootbus DMA.  So we need to throttle max_sectors to
+	 * (2^12 - 1 == 4095) to assure that this can never happen.
+	 */
+	dev->max_sectors = min(dev->max_sectors, 4095U);
+}
+
+/*
+ * Trap if driver tries to do standard bmdma commands.  They are not
+ * supported.
+ */
+static void unreachable_qc(struct ata_queued_cmd *qc)
+{
+	BUG();
+}
+
+static u8 unreachable_port(struct ata_port *ap)
+{
+	BUG();
+}
+
+/*
+ * We don't do ATAPI DMA so return 0.
+ */
+static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	return 0;
+}
+
+static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+
+		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
+		octeon_cf_dma_setup(qc);	    /* set up dma */
+		octeon_cf_dma_start(qc);	    /* initiate dma */
+		ap->hsm_task_state = HSM_ST_LAST;
+		break;
+
+	case ATAPI_PROT_DMA:
+		dev_err(ap->dev, "Error, ATAPI not supported\n");
+		BUG();
+
+	default:
+		return ata_sff_qc_issue(qc);
+	}
+
+	return 0;
+}
+
+static struct ata_port_operations octeon_cf_ops = {
+	.inherits		= &ata_sff_port_ops,
+	.check_atapi_dma	= octeon_cf_check_atapi_dma,
+	.qc_prep		= ata_noop_qc_prep,
+	.qc_issue		= octeon_cf_qc_issue,
+	.sff_dev_select		= octeon_cf_dev_select,
+	.sff_irq_on		= octeon_cf_irq_on,
+	.sff_irq_clear		= octeon_cf_irq_clear,
+	.bmdma_setup		= unreachable_qc,
+	.bmdma_start		= unreachable_qc,
+	.bmdma_stop		= unreachable_qc,
+	.bmdma_status		= unreachable_port,
+	.cable_detect		= ata_cable_40wire,
+	.set_piomode		= octeon_cf_set_piomode,
+	.set_dmamode		= octeon_cf_set_dmamode,
+	.dev_config		= octeon_cf_dev_config,
+};
+
+static int __devinit octeon_cf_probe(struct platform_device *pdev)
+{
+	struct resource *res_cs0, *res_cs1;
+
+	void __iomem *cs0;
+	void __iomem *cs1 = NULL;
+	struct ata_host *host;
+	struct ata_port *ap;
+	struct octeon_cf_data *ocd;
+	int irq = 0;
+	irq_handler_t irq_handler = NULL;
+	void __iomem *base;
+	struct octeon_cf_port *cf_port;
+
+	res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	if (!res_cs0)
+		return -EINVAL;
+
+	ocd = pdev->dev.platform_data;
+
+	cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
+				   res_cs0->end - res_cs0->start + 1);
+
+	if (!cs0)
+		return -ENOMEM;
+
+	/* Determine from availability of DMA if True IDE mode or not */
+	if (ocd->dma_engine >= 0) {
+		res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		if (!res_cs1)
+			return -EINVAL;
+
+		cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
+					   res_cs0->end - res_cs1->start + 1);
+
+		if (!cs1)
+			return -ENOMEM;
+	}
+
+	cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
+	if (!cf_port)
+		return -ENOMEM;
+
+	/* allocate host */
+	host = ata_host_alloc(&pdev->dev, 1);
+	if (!host)
+		goto free_cf_port;
+
+	ap = host->ports[0];
+	ap->private_data = cf_port;
+	cf_port->ap = ap;
+	ap->ops = &octeon_cf_ops;
+	ap->pio_mask = 0x7f; /* Support PIO 0-6 */
+	ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY
+		  | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
+
+	base = cs0 + ocd->base_region_bias;
+	if (!ocd->is16bit) {
+		ap->ioaddr.cmd_addr	= base;
+		ata_sff_std_ports(&ap->ioaddr);
+
+		ap->ioaddr.altstatus_addr = base + 0xe;
+		ap->ioaddr.ctl_addr	= base + 0xe;
+		octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
+	} else if (cs1) {
+		/* Presence of cs1 indicates True IDE mode.  */
+		ap->ioaddr.cmd_addr	= base + (ATA_REG_CMD << 1) + 1;
+		ap->ioaddr.data_addr	= base + (ATA_REG_DATA << 1);
+		ap->ioaddr.error_addr	= base + (ATA_REG_ERR << 1) + 1;
+		ap->ioaddr.feature_addr	= base + (ATA_REG_FEATURE << 1) + 1;
+		ap->ioaddr.nsect_addr	= base + (ATA_REG_NSECT << 1) + 1;
+		ap->ioaddr.lbal_addr	= base + (ATA_REG_LBAL << 1) + 1;
+		ap->ioaddr.lbam_addr	= base + (ATA_REG_LBAM << 1) + 1;
+		ap->ioaddr.lbah_addr	= base + (ATA_REG_LBAH << 1) + 1;
+		ap->ioaddr.device_addr	= base + (ATA_REG_DEVICE << 1) + 1;
+		ap->ioaddr.status_addr	= base + (ATA_REG_STATUS << 1) + 1;
+		ap->ioaddr.command_addr	= base + (ATA_REG_CMD << 1) + 1;
+		ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1;
+		ap->ioaddr.ctl_addr	= cs1 + (6 << 1) + 1;
+		octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
+
+		ap->mwdma_mask	= 0x1f; /* Support MWDMA 0-4 */
+		irq = platform_get_irq(pdev, 0);
+		irq_handler = octeon_cf_interrupt;
+
+		/* True IDE mode needs delayed work to poll for not-busy.  */
+		cf_port->wq = create_singlethread_workqueue(DRV_NAME);
+		if (!cf_port->wq)
+			goto free_cf_port;
+		INIT_DELAYED_WORK(&cf_port->delayed_finish,
+				  octeon_cf_delayed_finish);
+
+	} else {
+		/* 16 bit but not True IDE */
+		octeon_cf_ops.sff_data_xfer	= octeon_cf_data_xfer16;
+		octeon_cf_ops.softreset		= octeon_cf_softreset16;
+		octeon_cf_ops.sff_check_status	= octeon_cf_check_status16;
+		octeon_cf_ops.sff_tf_read	= octeon_cf_tf_read16;
+		octeon_cf_ops.sff_tf_load	= octeon_cf_tf_load16;
+		octeon_cf_ops.sff_exec_command	= octeon_cf_exec_command16;
+
+		ap->ioaddr.data_addr	= base + ATA_REG_DATA;
+		ap->ioaddr.nsect_addr	= base + ATA_REG_NSECT;
+		ap->ioaddr.lbal_addr	= base + ATA_REG_LBAL;
+		ap->ioaddr.ctl_addr	= base + 0xe;
+		ap->ioaddr.altstatus_addr = base + 0xe;
+	}
+
+	ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
+
+
+	dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
+		 (ocd->is16bit) ? 16 : 8,
+		 (cs1) ? ", True IDE" : "");
+
+
+	return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht);
+
+free_cf_port:
+	kfree(cf_port);
+	return -ENOMEM;
+}
+
+static struct platform_driver octeon_cf_driver = {
+	.probe		= octeon_cf_probe,
+	.driver		= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init octeon_cf_init(void)
+{
+	return platform_driver_register(&octeon_cf_driver);
+}
+
+
+MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
+MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
+
+module_init(octeon_cf_init);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 6afa07a..d8d743a 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -186,7 +186,7 @@
  *	A platform bus ATA device has been unplugged. Perform the needed
  *	cleanup. Also called on module unload for any active devices.
  */
-int __devexit __pata_platform_remove(struct device *dev)
+int __pata_platform_remove(struct device *dev)
 {
 	struct ata_host *host = dev_get_drvdata(dev);
 
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 83580a5..9e764e5 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -32,7 +32,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME "pata_sil680"
-#define DRV_VERSION "0.4.8"
+#define DRV_VERSION "0.4.9"
 
 #define SIL680_MMIO_BAR		5
 
@@ -195,7 +195,7 @@
 };
 
 static struct ata_port_operations sil680_port_ops = {
-	.inherits	= &ata_bmdma_port_ops,
+	.inherits	= &ata_bmdma32_port_ops,
 	.cable_detect	= sil680_cable_detect,
 	.set_piomode	= sil680_set_piomode,
 	.set_dmamode	= sil680_set_dmamode,
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 1a56db9..55bc88c 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1288,7 +1288,7 @@
 static int sata_fsl_probe(struct of_device *ofdev,
 			const struct of_device_id *match)
 {
-	int retval = 0;
+	int retval = -ENXIO;
 	void __iomem *hcr_base = NULL;
 	void __iomem *ssr_base = NULL;
 	void __iomem *csr_base = NULL;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index ccee930..2590c22 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -51,13 +51,6 @@
 	__le32	flags;
 };
 
-/*
- * Port multiplier
- */
-struct sil24_port_multiplier {
-	__le32	diag;
-	__le32	sactive;
-};
 
 enum {
 	SIL24_HOST_BAR		= 0,
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index c18935f..5c62da9 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -92,6 +92,8 @@
 	{ PCI_VDEVICE(VIA, 0x5372), vt6420 },
 	{ PCI_VDEVICE(VIA, 0x7372), vt6420 },
 	{ PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
+	{ PCI_VDEVICE(VIA, 0x9000), vt8251 },
+	{ PCI_VDEVICE(VIA, 0x9040), vt8251 },
 
 	{ }	/* terminate list */
 };
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 088885e..e1c7611 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -64,7 +64,7 @@
 #include <linux/jiffies.h>
 #include "iphase.h"		  
 #include "suni.h"		  
-#define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))  
+#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
 
 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
 
@@ -1306,7 +1306,7 @@
           // get real pkt length  pwang_test
           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
                                  skb->len - sizeof(*trailer));
-          length =  swap(trailer->length);
+	  length = swap_byte_order(trailer->length);
           if ((length > iadev->rx_buf_sz) || (length > 
                               (skb->len - sizeof(struct cpcs_trailer))))
           {
@@ -2995,7 +2995,7 @@
 		skb->len, PCI_DMA_TODEVICE);
 	wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
                                                   buf_desc_ptr->buf_start_lo;  
-	/* wr_ptr->bytes = swap(total_len);	didn't seem to affect ?? */  
+	/* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
 	wr_ptr->bytes = skb->len;  
 
         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d8e8c49..8f006f9 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -54,7 +54,7 @@
 	  such firmware, and do not wish to use an initrd.
 
 	  This single option controls the inclusion of firmware for
-	  every driver which uses request_firmare() and ships its
+	  every driver which uses request_firmware() and ships its
 	  firmware in the kernel source tree, to avoid a proliferation
 	  of 'Include firmware for xxx device' options.
 
diff --git a/drivers/base/base.h b/drivers/base/base.h
index b676f8f..0a5f055 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -63,32 +63,6 @@
 #define to_class(obj)	\
 	container_of(obj, struct class_private, class_subsys.kobj)
 
-/**
- * struct device_private - structure to hold the private to the driver core portions of the device structure.
- *
- * @klist_children - klist containing all children of this device
- * @knode_parent - node in sibling list
- * @knode_driver - node in driver list
- * @knode_bus - node in bus list
- * @device - pointer back to the struct class that this structure is
- * associated with.
- *
- * Nothing outside of the driver core should ever touch these fields.
- */
-struct device_private {
-	struct klist klist_children;
-	struct klist_node knode_parent;
-	struct klist_node knode_driver;
-	struct klist_node knode_bus;
-	struct device *device;
-};
-#define to_device_private_parent(obj)	\
-	container_of(obj, struct device_private, knode_parent)
-#define to_device_private_driver(obj)	\
-	container_of(obj, struct device_private, knode_driver)
-#define to_device_private_bus(obj)	\
-	container_of(obj, struct device_private, knode_bus)
-
 /* initialisation functions */
 extern int devices_init(void);
 extern int buses_init(void);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 0f0a504..83f32b8 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -253,14 +253,7 @@
 static struct device *next_device(struct klist_iter *i)
 {
 	struct klist_node *n = klist_next(i);
-	struct device *dev = NULL;
-	struct device_private *dev_prv;
-
-	if (n) {
-		dev_prv = to_device_private_bus(n);
-		dev = dev_prv->device;
-	}
-	return dev;
+	return n ? container_of(n, struct device, knode_bus) : NULL;
 }
 
 /**
@@ -293,7 +286,7 @@
 		return -EINVAL;
 
 	klist_iter_init_node(&bus->p->klist_devices, &i,
-			     (start ? &start->p->knode_bus : NULL));
+			     (start ? &start->knode_bus : NULL));
 	while ((dev = next_device(&i)) && !error)
 		error = fn(dev, data);
 	klist_iter_exit(&i);
@@ -327,7 +320,7 @@
 		return NULL;
 
 	klist_iter_init_node(&bus->p->klist_devices, &i,
-			     (start ? &start->p->knode_bus : NULL));
+			     (start ? &start->knode_bus : NULL));
 	while ((dev = next_device(&i)))
 		if (match(dev, data) && get_device(dev))
 			break;
@@ -514,8 +507,7 @@
 			ret = device_attach(dev);
 		WARN_ON(ret < 0);
 		if (ret >= 0)
-			klist_add_tail(&dev->p->knode_bus,
-				       &bus->p->klist_devices);
+			klist_add_tail(&dev->knode_bus, &bus->p->klist_devices);
 	}
 }
 
@@ -536,8 +528,8 @@
 		sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
 				  dev_name(dev));
 		device_remove_attrs(dev->bus, dev);
-		if (klist_node_attached(&dev->p->knode_bus))
-			klist_del(&dev->p->knode_bus);
+		if (klist_node_attached(&dev->knode_bus))
+			klist_del(&dev->knode_bus);
 
 		pr_debug("bus: '%s': remove device %s\n",
 			 dev->bus->name, dev_name(dev));
@@ -839,16 +831,14 @@
 
 static void klist_devices_get(struct klist_node *n)
 {
-	struct device_private *dev_prv = to_device_private_bus(n);
-	struct device *dev = dev_prv->device;
+	struct device *dev = container_of(n, struct device, knode_bus);
 
 	get_device(dev);
 }
 
 static void klist_devices_put(struct klist_node *n)
 {
-	struct device_private *dev_prv = to_device_private_bus(n);
-	struct device *dev = dev_prv->device;
+	struct device *dev = container_of(n, struct device, knode_bus);
 
 	put_device(dev);
 }
@@ -1003,20 +993,18 @@
 {
 	struct list_head *pos;
 	struct klist_node *n;
-	struct device_private *dev_prv;
 	struct device *b;
 
 	list_for_each(pos, list) {
 		n = container_of(pos, struct klist_node, n_node);
-		dev_prv = to_device_private_bus(n);
-		b = dev_prv->device;
+		b = container_of(n, struct device, knode_bus);
 		if (compare(a, b) <= 0) {
-			list_move_tail(&a->p->knode_bus.n_node,
-				       &b->p->knode_bus.n_node);
+			list_move_tail(&a->knode_bus.n_node,
+				       &b->knode_bus.n_node);
 			return;
 		}
 	}
-	list_move_tail(&a->p->knode_bus.n_node, list);
+	list_move_tail(&a->knode_bus.n_node, list);
 }
 
 void bus_sort_breadthfirst(struct bus_type *bus,
@@ -1026,7 +1014,6 @@
 	LIST_HEAD(sorted_devices);
 	struct list_head *pos, *tmp;
 	struct klist_node *n;
-	struct device_private *dev_prv;
 	struct device *dev;
 	struct klist *device_klist;
 
@@ -1035,8 +1022,7 @@
 	spin_lock(&device_klist->k_lock);
 	list_for_each_safe(pos, tmp, &device_klist->k_list) {
 		n = container_of(pos, struct klist_node, n_node);
-		dev_prv = to_device_private_bus(n);
-		dev = dev_prv->device;
+		dev = container_of(n, struct device, knode_bus);
 		device_insertion_sort_klist(dev, &sorted_devices, compare);
 	}
 	list_splice(&sorted_devices, &device_klist->k_list);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 61df508..8079afc 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -109,7 +109,6 @@
 static void device_release(struct kobject *kobj)
 {
 	struct device *dev = to_dev(kobj);
-	struct device_private *p = dev->p;
 
 	if (dev->release)
 		dev->release(dev);
@@ -121,7 +120,6 @@
 		WARN(1, KERN_ERR "Device '%s' does not have a release() "
 			"function, it is broken and must be fixed.\n",
 			dev_name(dev));
-	kfree(p);
 }
 
 static struct kobj_type device_ktype = {
@@ -509,16 +507,14 @@
 
 static void klist_children_get(struct klist_node *n)
 {
-	struct device_private *p = to_device_private_parent(n);
-	struct device *dev = p->device;
+	struct device *dev = container_of(n, struct device, knode_parent);
 
 	get_device(dev);
 }
 
 static void klist_children_put(struct klist_node *n)
 {
-	struct device_private *p = to_device_private_parent(n);
-	struct device *dev = p->device;
+	struct device *dev = container_of(n, struct device, knode_parent);
 
 	put_device(dev);
 }
@@ -540,15 +536,9 @@
  */
 void device_initialize(struct device *dev)
 {
-	dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
-	if (!dev->p) {
-		WARN_ON(1);
-		return;
-	}
-	dev->p->device = dev;
 	dev->kobj.kset = devices_kset;
 	kobject_init(&dev->kobj, &device_ktype);
-	klist_init(&dev->p->klist_children, klist_children_get,
+	klist_init(&dev->klist_children, klist_children_get,
 		   klist_children_put);
 	INIT_LIST_HEAD(&dev->dma_pools);
 	init_MUTEX(&dev->sem);
@@ -932,8 +922,7 @@
 	kobject_uevent(&dev->kobj, KOBJ_ADD);
 	bus_attach_device(dev);
 	if (parent)
-		klist_add_tail(&dev->p->knode_parent,
-			       &parent->p->klist_children);
+		klist_add_tail(&dev->knode_parent, &parent->klist_children);
 
 	if (dev->class) {
 		mutex_lock(&dev->class->p->class_mutex);
@@ -1047,7 +1036,7 @@
 	device_pm_remove(dev);
 	dpm_sysfs_remove(dev);
 	if (parent)
-		klist_del(&dev->p->knode_parent);
+		klist_del(&dev->knode_parent);
 	if (MAJOR(dev->devt)) {
 		device_remove_sys_dev_entry(dev);
 		device_remove_file(dev, &devt_attr);
@@ -1108,14 +1097,7 @@
 static struct device *next_device(struct klist_iter *i)
 {
 	struct klist_node *n = klist_next(i);
-	struct device *dev = NULL;
-	struct device_private *p;
-
-	if (n) {
-		p = to_device_private_parent(n);
-		dev = p->device;
-	}
-	return dev;
+	return n ? container_of(n, struct device, knode_parent) : NULL;
 }
 
 /**
@@ -1137,7 +1119,7 @@
 	struct device *child;
 	int error = 0;
 
-	klist_iter_init(&parent->p->klist_children, &i);
+	klist_iter_init(&parent->klist_children, &i);
 	while ((child = next_device(&i)) && !error)
 		error = fn(child, data);
 	klist_iter_exit(&i);
@@ -1168,7 +1150,7 @@
 	if (!parent)
 		return NULL;
 
-	klist_iter_init(&parent->p->klist_children, &i);
+	klist_iter_init(&parent->klist_children, &i);
 	while ((child = next_device(&i)))
 		if (match(child, data) && get_device(child))
 			break;
@@ -1582,10 +1564,9 @@
 	old_parent = dev->parent;
 	dev->parent = new_parent;
 	if (old_parent)
-		klist_remove(&dev->p->knode_parent);
+		klist_remove(&dev->knode_parent);
 	if (new_parent) {
-		klist_add_tail(&dev->p->knode_parent,
-			       &new_parent->p->klist_children);
+		klist_add_tail(&dev->knode_parent, &new_parent->klist_children);
 		set_dev_node(dev, dev_to_node(new_parent));
 	}
 
@@ -1597,11 +1578,11 @@
 		device_move_class_links(dev, new_parent, old_parent);
 		if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
 			if (new_parent)
-				klist_remove(&dev->p->knode_parent);
+				klist_remove(&dev->knode_parent);
 			dev->parent = old_parent;
 			if (old_parent) {
-				klist_add_tail(&dev->p->knode_parent,
-					       &old_parent->p->klist_children);
+				klist_add_tail(&dev->knode_parent,
+					       &old_parent->klist_children);
 				set_dev_node(dev, dev_to_node(old_parent));
 			}
 		}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 6fdaf76..315bed8 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -28,7 +28,7 @@
 
 static void driver_bound(struct device *dev)
 {
-	if (klist_node_attached(&dev->p->knode_driver)) {
+	if (klist_node_attached(&dev->knode_driver)) {
 		printk(KERN_WARNING "%s: device %s already bound\n",
 			__func__, kobject_name(&dev->kobj));
 		return;
@@ -41,7 +41,7 @@
 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
 					     BUS_NOTIFY_BOUND_DRIVER, dev);
 
-	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+	klist_add_tail(&dev->knode_driver, &dev->driver->p->klist_devices);
 }
 
 static int driver_sysfs_add(struct device *dev)
@@ -310,7 +310,7 @@
 			drv->remove(dev);
 		devres_release_all(dev);
 		dev->driver = NULL;
-		klist_remove(&dev->p->knode_driver);
+		klist_remove(&dev->knode_driver);
 	}
 }
 
@@ -340,7 +340,6 @@
  */
 void driver_detach(struct device_driver *drv)
 {
-	struct device_private *dev_prv;
 	struct device *dev;
 
 	for (;;) {
@@ -349,10 +348,8 @@
 			spin_unlock(&drv->p->klist_devices.k_lock);
 			break;
 		}
-		dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
-				     struct device_private,
-				     knode_driver.n_node);
-		dev = dev_prv->device;
+		dev = list_entry(drv->p->klist_devices.k_list.prev,
+				struct device, knode_driver.n_node);
 		get_device(dev);
 		spin_unlock(&drv->p->klist_devices.k_lock);
 
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index b76cc69..1e2bda7 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -19,14 +19,7 @@
 static struct device *next_device(struct klist_iter *i)
 {
 	struct klist_node *n = klist_next(i);
-	struct device *dev = NULL;
-	struct device_private *dev_prv;
-
-	if (n) {
-		dev_prv = to_device_private_driver(n);
-		dev = dev_prv->device;
-	}
-	return dev;
+	return n ? container_of(n, struct device, knode_driver) : NULL;
 }
 
 /**
@@ -49,7 +42,7 @@
 		return -EINVAL;
 
 	klist_iter_init_node(&drv->p->klist_devices, &i,
-			     start ? &start->p->knode_driver : NULL);
+			     start ? &start->knode_driver : NULL);
 	while ((dev = next_device(&i)) && !error)
 		error = fn(dev, data);
 	klist_iter_exit(&i);
@@ -83,7 +76,7 @@
 		return NULL;
 
 	klist_iter_init_node(&drv->p->klist_devices, &i,
-			     (start ? &start->p->knode_driver : NULL));
+			     (start ? &start->knode_driver : NULL));
 	while ((dev = next_device(&i)))
 		if (match(dev, data) && get_device(dev))
 			break;
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index a8bc1cb..a778fb5 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -28,6 +28,7 @@
 #include <linux/mm.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
+#include <linux/hardirq.h>
 #include <linux/topology.h>
 
 #define define_one_ro(_name) 		\
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 4b1d4ac..8df436f 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -156,7 +156,7 @@
 static volatile int fdc_nested;
 static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
  
-static DECLARE_WAIT_QUEUE_HEAD(motor_wait);
+static DECLARE_COMPLETION(motor_on_completion);
 
 static volatile int selected = -1;	/* currently selected drive */
 
@@ -184,8 +184,7 @@
 static unsigned char mfmdecode[128];
 
 /* floppy internal millisecond timer stuff */
-static volatile int ms_busy = -1;
-static DECLARE_WAIT_QUEUE_HEAD(ms_wait);
+static DECLARE_COMPLETION(ms_wait_completion);
 #define MS_TICKS ((amiga_eclock+50)/1000)
 
 /*
@@ -211,8 +210,7 @@
 
 static irqreturn_t ms_isr(int irq, void *dummy)
 {
-	ms_busy = -1;
-	wake_up(&ms_wait);
+	complete(&ms_wait_completion);
 	return IRQ_HANDLED;
 }
 
@@ -220,19 +218,17 @@
    A more generic routine would do a schedule a la timer.device */
 static void ms_delay(int ms)
 {
-	unsigned long flags;
 	int ticks;
+	static DEFINE_MUTEX(mutex);
+
 	if (ms > 0) {
-		local_irq_save(flags);
-		while (ms_busy == 0)
-			sleep_on(&ms_wait);
-		ms_busy = 0;
-		local_irq_restore(flags);
+		mutex_lock(&mutex);
 		ticks = MS_TICKS*ms-1;
 		ciaa.tblo=ticks%256;
 		ciaa.tbhi=ticks/256;
 		ciaa.crb=0x19; /*count eclock, force load, one-shoot, start */
-		sleep_on(&ms_wait);
+		wait_for_completion(&ms_wait_completion);
+		mutex_unlock(&mutex);
 	}
 }
 
@@ -254,8 +250,7 @@
 	printk("get_fdc: drive %d  fdc_busy %d  fdc_nested %d\n",drive,fdc_busy,fdc_nested);
 #endif
 	local_irq_save(flags);
-	while (!try_fdc(drive))
-		sleep_on(&fdc_wait);
+	wait_event(fdc_wait, try_fdc(drive));
 	fdc_busy = drive;
 	fdc_nested++;
 	local_irq_restore(flags);
@@ -330,7 +325,7 @@
 static void motor_on_callback(unsigned long nr)
 {
 	if (!(ciaa.pra & DSKRDY) || --on_attempts == 0) {
-		wake_up (&motor_wait);
+		complete_all(&motor_on_completion);
 	} else {
 		motor_on_timer.expires = jiffies + HZ/10;
 		add_timer(&motor_on_timer);
@@ -347,11 +342,12 @@
 		unit[nr].motor = 1;
 		fd_select(nr);
 
+		INIT_COMPLETION(motor_on_completion);
 		motor_on_timer.data = nr;
 		mod_timer(&motor_on_timer, jiffies + HZ/2);
 
 		on_attempts = 10;
-		sleep_on (&motor_wait);
+		wait_for_completion(&motor_on_completion);
 		fd_deselect(nr);
 	}
 
@@ -582,8 +578,7 @@
 {
 	drive&=3;
 	get_fdc(drive);
-	while (block_flag)
-		sleep_on(&wait_fd_block);
+	wait_event(wait_fd_block, !block_flag);
 	fd_select(drive);
 	/* setup adkcon bits correctly */
 	custom.adkcon = ADK_MSBSYNC;
@@ -598,8 +593,7 @@
 
 	block_flag = 1;
 
-	while (block_flag)
-		sleep_on (&wait_fd_block);
+	wait_event(wait_fd_block, !block_flag);
 
 	custom.dsklen = 0;
 	fd_deselect(drive);
@@ -616,8 +610,7 @@
 		rel_fdc();
 		return 0;
 	}
-	while (block_flag)
-		sleep_on(&wait_fd_block);
+	wait_event(wait_fd_block, !block_flag);
 	fd_select(drive);
 	/* clear adkcon bits */
 	custom.adkcon = ADK_PRECOMP1|ADK_PRECOMP0|ADK_WORDSYNC|ADK_MSBSYNC;
@@ -1294,8 +1287,7 @@
 			writepending = 0;
 			return 0;
 		}
-		while (block_flag == 2)
-			sleep_on (&wait_fd_block);
+		wait_event(wait_fd_block, block_flag != 2);
 	}
 	else {
 		local_irq_restore(flags);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7bcc1d8..34f80fa 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -406,6 +406,7 @@
 	ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
 	if (ret) {
 		printk(KERN_ERR "nbd: sysfs_create_file failed!");
+		lo->pid = 0;
 		return ret;
 	}
 
@@ -413,6 +414,7 @@
 		nbd_end_request(req);
 
 	sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
+	lo->pid = 0;
 	return 0;
 }
 
@@ -648,6 +650,8 @@
 		set_capacity(lo->disk, lo->bytesize >> 9);
 		return 0;
 	case NBD_DO_IT:
+		if (lo->pid)
+			return -EBUSY;
 		if (!lo->file)
 			return -EINVAL;
 		thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 936466f..bccc42b 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -141,7 +141,7 @@
 
 	start_sector = req->sector * priv->blocking_factor;
 	sectors = req->nr_sectors * priv->blocking_factor;
-	dev_dbg(&dev->sbd.core, "%s:%u: %s %lu sectors starting at %lu\n",
+	dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
 		__func__, __LINE__, op, sectors, start_sector);
 
 	if (write) {
@@ -178,7 +178,7 @@
 					      LV1_STORAGE_ATA_HDDOUT, 0, 0, 0,
 					      0, &dev->tag);
 	if (res) {
-		dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n",
+		dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
 			__func__, __LINE__, res);
 		end_request(req, 0);
 		return 0;
@@ -238,11 +238,11 @@
 
 	if (tag != dev->tag)
 		dev_err(&dev->sbd.core,
-			"%s:%u: tag mismatch, got %lx, expected %lx\n",
+			"%s:%u: tag mismatch, got %llx, expected %llx\n",
 			__func__, __LINE__, tag, dev->tag);
 
 	if (res) {
-		dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n",
+		dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
 			__func__, __LINE__, res, status);
 		return IRQ_HANDLED;
 	}
@@ -269,7 +269,7 @@
 		op = read ? "read" : "write";
 	}
 	if (status) {
-		dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
+		dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
 			__LINE__, op, status);
 		error = -EIO;
 	} else {
@@ -297,7 +297,7 @@
 
 	res = ps3stor_send_command(dev, LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, 0);
 	if (res) {
-		dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n",
+		dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
 			__func__, __LINE__, res);
 		return -EIO;
 	}
@@ -388,7 +388,7 @@
 				   sizeof(ata_cmnd), ata_cmnd.buffer,
 				   ata_cmnd.arglen);
 	if (res) {
-		dev_err(&dev->sbd.core, "%s:%u: identify disk failed 0x%lx\n",
+		dev_err(&dev->sbd.core, "%s:%u: identify disk failed 0x%llx\n",
 			__func__, __LINE__, res);
 		return -EIO;
 	}
@@ -426,7 +426,7 @@
 
 	if (dev->blk_size < 512) {
 		dev_err(&dev->sbd.core,
-			"%s:%u: cannot handle block size %lu\n", __func__,
+			"%s:%u: cannot handle block size %llu\n", __func__,
 			__LINE__, dev->blk_size);
 		return -EINVAL;
 	}
@@ -512,7 +512,7 @@
 		     dev->regions[dev->region_idx].size*priv->blocking_factor);
 
 	dev_info(&dev->sbd.core,
-		 "%s is a %s (%lu MiB total, %lu MiB for OtherOS)\n",
+		 "%s is a %s (%llu MiB total, %lu MiB for OtherOS)\n",
 		 gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
 		 get_capacity(gendisk) >> 11);
 
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 953c0b8..5861e33 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -153,7 +153,7 @@
 	pkt.vdisk_block_size = port->vdisk_block_size;
 	pkt.max_xfer_size = port->max_xfer_size;
 
-	viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n",
+	viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
 	       pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
 
 	return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
@@ -164,8 +164,8 @@
 	struct vdc_port *port = to_vdc_port(vio);
 	struct vio_disk_attr_info *pkt = arg;
 
-	viodbg(HS, "GOT ATTR stype[0x%x] ops[%lx] disk_size[%lu] disk_type[%x] "
-	       "xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n",
+	viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
+	       "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
 	       pkt->tag.stype, pkt->operations,
 	       pkt->vdisk_size, pkt->vdisk_type,
 	       pkt->xfer_mode, pkt->vdisk_block_size,
@@ -753,7 +753,7 @@
 
 	err = -ENODEV;
 	if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
-		printk(KERN_ERR PFX "Port id [%lu] too large.\n",
+		printk(KERN_ERR PFX "Port id [%llu] too large.\n",
 		       vdev->dev_no);
 		goto err_out_release_mdesc;
 	}
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 048d71d..12fb816 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -1579,7 +1579,7 @@
 	struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
 	unsigned long flags;
 	struct ub_lun *lun;
-	int lkr, rc;
+	int rc;
 
 	if (!sc->reset) {
 		printk(KERN_WARNING "%s: Running reset unrequested\n",
@@ -1597,10 +1597,11 @@
 	} else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
 		;
 	} else {
-		if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) {
+		rc = usb_lock_device_for_reset(sc->dev, sc->intf);
+		if (rc < 0) {
 			printk(KERN_NOTICE
 			    "%s: usb_lock_device_for_reset failed (%d)\n",
-			    sc->name, lkr);
+			    sc->name, rc);
 		} else {
 			rc = usb_reset_device(sc->dev);
 			if (rc < 0) {
@@ -1608,9 +1609,7 @@
 				    "usb_lock_device_for_reset failed (%d)\n",
 				    sc->name, rc);
 			}
-
-			if (lkr)
-				usb_unlock_device(sc->dev);
+			usb_unlock_device(sc->dev);
 		}
 	}
 
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 29e1dfa..381d686 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1206,6 +1206,7 @@
 	{ .compatible = "xlnx,opb-sysace-1.00.b", },
 	{ .compatible = "xlnx,opb-sysace-1.00.c", },
 	{ .compatible = "xlnx,xps-sysace-1.00.a", },
+	{ .compatible = "xlnx,sysace", },
 	{},
 };
 MODULE_DEVICE_TABLE(of, ace_of_match);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 35914b6..f5be808 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -616,6 +616,7 @@
 	default y
 	select HVC_DRIVER
 	select HVC_IRQ
+	select VIOPATH
 	help
 	  iSeries machines support a hypervisor virtual console.
 
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 4e0cfde..a58869e 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -1963,6 +1963,7 @@
 {
 	unsigned long flags;
 	struct serial_state * state;
+	int error;
 
 	if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_SERIAL))
 		return -ENODEV;
@@ -1975,8 +1976,11 @@
 	 *  We request SERDAT and SERPER only, because the serial registers are
 	 *  too spreaded over the custom register space
 	 */
-	if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4, "amiserial [Paula]"))
-		return -EBUSY;
+	if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4,
+				"amiserial [Paula]")) {
+		error = -EBUSY;
+		goto fail_put_tty_driver;
+	}
 
 	IRQ_ports = NULL;
 
@@ -1997,8 +2001,9 @@
 	serial_driver->flags = TTY_DRIVER_REAL_RAW;
 	tty_set_operations(serial_driver, &serial_ops);
 
-	if (tty_register_driver(serial_driver))
-		panic("Couldn't register serial driver\n");
+	error = tty_register_driver(serial_driver);
+	if (error)
+		goto fail_release_mem_region;
 
 	state = rs_table;
 	state->magic = SSTATE_MAGIC;
@@ -2024,8 +2029,14 @@
 	local_irq_save(flags);
 
 	/* set ISRs, and then disable the rx interrupts */
-	request_irq(IRQ_AMIGA_TBE, ser_tx_int, 0, "serial TX", state);
-	request_irq(IRQ_AMIGA_RBF, ser_rx_int, IRQF_DISABLED, "serial RX", state);
+	error = request_irq(IRQ_AMIGA_TBE, ser_tx_int, 0, "serial TX", state);
+	if (error)
+		goto fail_unregister;
+
+	error = request_irq(IRQ_AMIGA_RBF, ser_rx_int, IRQF_DISABLED,
+			    "serial RX", state);
+	if (error)
+		goto fail_free_irq;
 
 	/* turn off Rx and Tx interrupts */
 	custom.intena = IF_RBF | IF_TBE;
@@ -2045,6 +2056,16 @@
 	ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR);  /* inputs */
 
 	return 0;
+
+fail_free_irq:
+	free_irq(IRQ_AMIGA_TBE, state);
+fail_unregister:
+	tty_unregister_driver(serial_driver);
+fail_release_mem_region:
+	release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
+fail_put_tty_driver:
+	put_tty_driver(serial_driver);
+	return error;
 }
 
 static __exit void rs_exit(void) 
@@ -2064,6 +2085,9 @@
 	  kfree(info);
 	}
 
+	free_irq(IRQ_AMIGA_TBE, rs_table);
+	free_irq(IRQ_AMIGA_RBF, rs_table);
+
 	release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
 }
 
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 977dfb1..f6094ae 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -103,7 +103,7 @@
 bsr_len_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
-	return sprintf(buf, "%lu\n", bsr_dev->bsr_len);
+	return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
 }
 
 static struct device_attribute bsr_dev_attrs[] = {
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 39ad820..af7c13c 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -769,7 +769,7 @@
 	/* Check status of board configured in system.  */
 
 	/*
-	 * I check to see if the epca_setup routine detected an user error. It
+	 * I check to see if the epca_setup routine detected a user error. It
 	 * might be better to put this in pc_init, but for the moment it goes
 	 * here.
 	 */
diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
index 91cdb35..0afc8b8 100644
--- a/drivers/char/hvc_beat.c
+++ b/drivers/char/hvc_beat.c
@@ -44,7 +44,7 @@
 	static unsigned char q[sizeof(unsigned long) * 2]
 		__attribute__((aligned(sizeof(unsigned long))));
 	static int qlen = 0;
-	unsigned long got;
+	u64 got;
 
 again:
 	if (qlen) {
@@ -63,7 +63,7 @@
 		}
 	}
 	if (beat_get_term_char(vtermno, &got,
-		((unsigned long *)q), ((unsigned long *)q) + 1) == 0) {
+		((u64 *)q), ((u64 *)q) + 1) == 0) {
 		qlen = got;
 		goto again;
 	}
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 5a8a4c2..94e7e3c 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -318,7 +318,6 @@
 	} /* else count == 0 */
 
 	tty->driver_data = hp;
-	tty->low_latency = 1; /* Makes flushes to ldisc synchronous. */
 
 	hp->tty = tty;
 
@@ -764,13 +763,11 @@
 			return ERR_PTR(err);
 	}
 
-	hp = kmalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size,
+	hp = kzalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size,
 			GFP_KERNEL);
 	if (!hp)
 		return ERR_PTR(-ENOMEM);
 
-	memset(hp, 0x00, sizeof(*hp));
-
 	hp->vtermno = vtermno;
 	hp->data = data;
 	hp->ops = ops;
@@ -876,8 +873,11 @@
 		goto stop_thread;
 	}
 
-	/* FIXME: This mb() seems completely random.  Remove it. */
-	mb();
+	/*
+	 * Make sure tty is fully registered before allowing it to be
+	 * found by hvc_console_device.
+	 */
+	smp_mb();
 	hvc_driver = drv;
 	return 0;
 
diff --git a/drivers/char/hvc_irq.c b/drivers/char/hvc_irq.c
index d09e568..2623e17 100644
--- a/drivers/char/hvc_irq.c
+++ b/drivers/char/hvc_irq.c
@@ -37,7 +37,7 @@
 
 void notifier_del_irq(struct hvc_struct *hp, int irq)
 {
-	if (!irq)
+	if (!hp->irq_requested)
 		return;
 	free_irq(irq, hp);
 	hp->irq_requested = 0;
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 5ea7d77..a534968 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -1,26 +1,30 @@
 /*
- * hvc_iucv.c - z/VM IUCV back-end for the Hypervisor Console (HVC)
+ * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
  *
- * This back-end for HVC provides terminal access via
+ * This HVC device driver provides terminal access using
  * z/VM IUCV communication paths.
  *
- * Copyright IBM Corp. 2008.
+ * Copyright IBM Corp. 2008
  *
  * Author(s):	Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  */
 #define KMSG_COMPONENT		"hvc_iucv"
+#define pr_fmt(fmt)		KMSG_COMPONENT ": " fmt
 
 #include <linux/types.h>
 #include <asm/ebcdic.h>
+#include <linux/delay.h>
+#include <linux/init.h>
 #include <linux/mempool.h>
 #include <linux/module.h>
 #include <linux/tty.h>
+#include <linux/wait.h>
 #include <net/iucv/iucv.h>
 
 #include "hvc_console.h"
 
 
-/* HVC backend for z/VM IUCV */
+/* General device driver settings */
 #define HVC_IUCV_MAGIC		0xc9e4c3e5
 #define MAX_HVC_IUCV_LINES	HVC_ALLOC_TTY_ADAPTERS
 #define MEMPOOL_MIN_NR		(PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
@@ -33,14 +37,14 @@
 #define MSG_TYPE_WINSIZE	0x08	/* Terminal window size update */
 #define MSG_TYPE_DATA		0x10	/* Terminal data */
 
-#define MSG_SIZE(s)		((s) + offsetof(struct iucv_tty_msg, data))
 struct iucv_tty_msg {
 	u8	version;		/* Message version */
 	u8	type;			/* Message type */
-#define MSG_MAX_DATALEN		(~(u16)0)
+#define MSG_MAX_DATALEN		((u16)(~0))
 	u16	datalen;		/* Payload length */
 	u8	data[];			/* Payload buffer */
 } __attribute__((packed));
+#define MSG_SIZE(s)		((s) + offsetof(struct iucv_tty_msg, data))
 
 enum iucv_state_t {
 	IUCV_DISCONN	= 0,
@@ -54,19 +58,26 @@
 };
 
 struct hvc_iucv_private {
-	struct hvc_struct	*hvc; /* HVC console struct reference */
+	struct hvc_struct	*hvc;		/* HVC struct reference */
 	u8			srv_name[8];	/* IUCV service name (ebcdic) */
+	unsigned char		is_console;	/* Linux console usage flag */
 	enum iucv_state_t	iucv_state;	/* IUCV connection status */
 	enum tty_state_t	tty_state;	/* TTY status */
 	struct iucv_path	*path;		/* IUCV path pointer */
 	spinlock_t		lock;		/* hvc_iucv_private lock */
+#define SNDBUF_SIZE		(PAGE_SIZE)	/* must be < MSG_MAX_DATALEN */
+	void			*sndbuf;	/* send buffer		  */
+	size_t			sndbuf_len;	/* length of send buffer  */
+#define QUEUE_SNDBUF_DELAY	(HZ / 25)
+	struct delayed_work	sndbuf_work;	/* work: send iucv msg(s) */
+	wait_queue_head_t	sndbuf_waitq;	/* wait for send completion */
 	struct list_head	tty_outqueue;	/* outgoing IUCV messages */
 	struct list_head	tty_inqueue;	/* incoming IUCV messages */
 };
 
 struct iucv_tty_buffer {
 	struct list_head	list;	/* list pointer */
-	struct iucv_message	msg;	/* store an incoming IUCV message */
+	struct iucv_message	msg;	/* store an IUCV message */
 	size_t			offset;	/* data buffer offset */
 	struct iucv_tty_msg	*mbuf;	/* buffer to store input/output data */
 };
@@ -78,11 +89,12 @@
 static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
 
 
-/* Kernel module parameters */
-static unsigned long hvc_iucv_devices;
+/* Kernel module parameter: use one terminal device as default */
+static unsigned long hvc_iucv_devices = 1;
 
 /* Array of allocated hvc iucv tty lines... */
 static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
+#define IUCV_HVC_CON_IDX	(0)
 
 /* Kmem cache and mempool for iucv_tty_buffer elements */
 static struct kmem_cache *hvc_iucv_buffer_cache;
@@ -112,7 +124,7 @@
 }
 
 /**
- * alloc_tty_buffer() - Returns a new struct iucv_tty_buffer element.
+ * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
  * @size:	Size of the internal buffer used to store data.
  * @flags:	Memory allocation flags passed to mempool.
  *
@@ -120,7 +132,6 @@
  * allocates an internal data buffer with the specified size @size.
  * Note: The total message size arises from the internal buffer size and the
  *	 members of the iucv_tty_msg structure.
- *
  * The function returns NULL if memory allocation has failed.
  */
 static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
@@ -130,7 +141,7 @@
 	bufp = mempool_alloc(hvc_iucv_mempool, flags);
 	if (!bufp)
 		return NULL;
-	memset(bufp, 0, sizeof(struct iucv_tty_buffer));
+	memset(bufp, 0, sizeof(*bufp));
 
 	if (size > 0) {
 		bufp->msg.length = MSG_SIZE(size);
@@ -149,9 +160,6 @@
 /**
  * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
  * @bufp:	Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
- *
- * The destroy_tty_buffer() function frees the internal data buffer and returns
- * the struct iucv_tty_buffer element back to the mempool for freeing.
  */
 static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
 {
@@ -161,11 +169,7 @@
 
 /**
  * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
- * @list:	List head pointer to a list containing struct iucv_tty_buffer
- *		elements.
- *
- * Calls destroy_tty_buffer() for each struct iucv_tty_buffer element in the
- * list @list.
+ * @list:	List containing struct iucv_tty_buffer elements.
  */
 static void destroy_tty_buffer_list(struct list_head *list)
 {
@@ -178,24 +182,24 @@
 }
 
 /**
- * hvc_iucv_write() - Receive IUCV message write data to HVC console buffer.
- * @priv:		Pointer to hvc_iucv_private structure.
- * @buf:		HVC console buffer for writing received terminal data.
- * @count:		HVC console buffer size.
+ * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
+ * @priv:		Pointer to struct hvc_iucv_private
+ * @buf:		HVC buffer for writing received terminal data.
+ * @count:		HVC buffer size.
  * @has_more_data:	Pointer to an int variable.
  *
  * The function picks up pending messages from the input queue and receives
  * the message data that is then written to the specified buffer @buf.
- * If the buffer size @count is less than the data message size, then the
+ * If the buffer size @count is less than the data message size, the
  * message is kept on the input queue and @has_more_data is set to 1.
- * If the message data has been entirely written, the message is removed from
+ * If all message data has been written, the message is removed from
  * the input queue.
  *
  * The function returns the number of bytes written to the terminal, zero if
  * there are no pending data messages available or if there is no established
  * IUCV path.
  * If the IUCV path has been severed, then -EPIPE is returned to cause a
- * hang up (that is issued by the HVC console layer).
+ * hang up (that is issued by the HVC layer).
  */
 static int hvc_iucv_write(struct hvc_iucv_private *priv,
 			  char *buf, int count, int *has_more_data)
@@ -204,12 +208,12 @@
 	int written;
 	int rc;
 
-	/* Immediately return if there is no IUCV connection */
+	/* immediately return if there is no IUCV connection */
 	if (priv->iucv_state == IUCV_DISCONN)
 		return 0;
 
-	/* If the IUCV path has been severed, return -EPIPE to inform the
-	 * hvc console layer to hang up the tty device. */
+	/* if the IUCV path has been severed, return -EPIPE to inform the
+	 * HVC layer to hang up the tty device. */
 	if (priv->iucv_state == IUCV_SEVERED)
 		return -EPIPE;
 
@@ -217,7 +221,7 @@
 	if (list_empty(&priv->tty_inqueue))
 		return 0;
 
-	/* receive a iucv message and flip data to the tty (ldisc) */
+	/* receive an iucv message and flip data to the tty (ldisc) */
 	rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
 
 	written = 0;
@@ -260,7 +264,7 @@
 	case MSG_TYPE_WINSIZE:
 		if (rb->mbuf->datalen != sizeof(struct winsize))
 			break;
-		hvc_resize(priv->hvc, *((struct winsize *)rb->mbuf->data));
+		hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
 		break;
 
 	case MSG_TYPE_ERROR:	/* ignored ... */
@@ -284,10 +288,9 @@
  * @buf:	Pointer to a buffer to store data
  * @count:	Size of buffer available for writing
  *
- * The hvc_console thread calls this method to read characters from
- * the terminal backend. If an IUCV communication path has been established,
- * pending IUCV messages are received and data is copied into buffer @buf
- * up to @count bytes.
+ * The HVC thread calls this method to read characters from the back-end.
+ * If an IUCV communication path has been established, pending IUCV messages
+ * are received and data is copied into buffer @buf up to @count bytes.
  *
  * Locking:	The routine gets called under an irqsave() spinlock; and
  *		the routine locks the struct hvc_iucv_private->lock to call
@@ -318,66 +321,122 @@
 }
 
 /**
- * hvc_iucv_send() - Send an IUCV message containing terminal data.
+ * hvc_iucv_queue() - Buffer terminal data for sending.
  * @priv:	Pointer to struct hvc_iucv_private instance.
  * @buf:	Buffer containing data to send.
- * @size:	Size of buffer and amount of data to send.
+ * @count:	Size of buffer and amount of data to send.
  *
- * If an IUCV communication path is established, the function copies the buffer
- * data to a newly allocated struct iucv_tty_buffer element, sends the data and
- * puts the element to the outqueue.
+ * The function queues data for sending. To actually send the buffered data,
+ * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
+ * The function returns the number of data bytes that has been buffered.
  *
- * If there is no IUCV communication path established, the function returns 0.
- * If an existing IUCV communicaton path has been severed, the function returns
- * -EPIPE (can be passed to HVC layer to cause a tty hangup).
+ * If the device is not connected, data is ignored and the function returns
+ * @count.
+ * If the buffer is full, the function returns 0.
+ * If an existing IUCV communicaton path has been severed, -EPIPE is returned
+ * (that can be passed to HVC layer to cause a tty hangup).
  */
-static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf,
-			 int count)
+static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
+			  int count)
+{
+	size_t len;
+
+	if (priv->iucv_state == IUCV_DISCONN)
+		return count;			/* ignore data */
+
+	if (priv->iucv_state == IUCV_SEVERED)
+		return -EPIPE;
+
+	len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
+	if (!len)
+		return 0;
+
+	memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
+	priv->sndbuf_len += len;
+
+	if (priv->iucv_state == IUCV_CONNECTED)
+		schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
+
+	return len;
+}
+
+/**
+ * hvc_iucv_send() - Send an IUCV message containing terminal data.
+ * @priv:	Pointer to struct hvc_iucv_private instance.
+ *
+ * If an IUCV communication path has been established, the buffered output data
+ * is sent via an IUCV message and the number of bytes sent is returned.
+ * Returns 0 if there is no established IUCV communication path or
+ * -EPIPE if an existing IUCV communicaton path has been severed.
+ */
+static int hvc_iucv_send(struct hvc_iucv_private *priv)
 {
 	struct iucv_tty_buffer *sb;
-	int rc;
-	u16 len;
+	int rc, len;
 
 	if (priv->iucv_state == IUCV_SEVERED)
 		return -EPIPE;
 
 	if (priv->iucv_state == IUCV_DISCONN)
-		return 0;
+		return -EIO;
 
-	len = min_t(u16, MSG_MAX_DATALEN, count);
+	if (!priv->sndbuf_len)
+		return 0;
 
 	/* allocate internal buffer to store msg data and also compute total
 	 * message length */
-	sb = alloc_tty_buffer(len, GFP_ATOMIC);
+	sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
 	if (!sb)
 		return -ENOMEM;
 
-	sb->mbuf->datalen = len;
-	memcpy(sb->mbuf->data, buf, len);
+	memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
+	sb->mbuf->datalen = (u16) priv->sndbuf_len;
+	sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
 
 	list_add_tail(&sb->list, &priv->tty_outqueue);
 
 	rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
 				 (void *) sb->mbuf, sb->msg.length);
 	if (rc) {
+		/* drop the message here; however we might want to handle
+		 * 0x03 (msg limit reached) by trying again... */
 		list_del(&sb->list);
 		destroy_tty_buffer(sb);
-		len = 0;
 	}
+	len = priv->sndbuf_len;
+	priv->sndbuf_len = 0;
 
 	return len;
 }
 
 /**
+ * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
+ * @work:	Work structure.
+ *
+ * This work queue function sends buffered output data over IUCV and,
+ * if not all buffered data could be sent, reschedules itself.
+ */
+static void hvc_iucv_sndbuf_work(struct work_struct *work)
+{
+	struct hvc_iucv_private *priv;
+
+	priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
+	if (!priv)
+		return;
+
+	spin_lock_bh(&priv->lock);
+	hvc_iucv_send(priv);
+	spin_unlock_bh(&priv->lock);
+}
+
+/**
  * hvc_iucv_put_chars() - HVC put_chars operation.
  * @vtermno:	HVC virtual terminal number.
  * @buf:	Pointer to an buffer to read data from
  * @count:	Size of buffer available for reading
  *
- * The hvc_console thread calls this method to write characters from
- * to the terminal backend.
- * The function calls hvc_iucv_send() under the lock of the
- * struct hvc_iucv_private instance that corresponds to the tty @vtermno.
+ * The HVC thread calls this method to write characters to the back-end.
+ * The function calls hvc_iucv_queue() to queue terminal data for sending.
  *
  * Locking:	The method gets called under an irqsave() spinlock; and
  *		locks struct hvc_iucv_private->lock.
@@ -385,7 +444,7 @@
 static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
 {
 	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
-	int sent;
+	int queued;
 
 	if (count <= 0)
 		return 0;
@@ -394,10 +453,10 @@
 		return -ENODEV;
 
 	spin_lock(&priv->lock);
-	sent = hvc_iucv_send(priv, buf, count);
+	queued = hvc_iucv_queue(priv, buf, count);
 	spin_unlock(&priv->lock);
 
-	return sent;
+	return queued;
 }
 
 /**
@@ -406,7 +465,7 @@
  * @id:	Additional data (originally passed to hvc_alloc): the index of an struct
  *	hvc_iucv_private instance.
  *
- * The function sets the tty state to TTY_OPEN for the struct hvc_iucv_private
+ * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
  * instance that is derived from @id. Always returns 0.
  *
  * Locking:	struct hvc_iucv_private->lock, spin_lock_bh
@@ -427,12 +486,8 @@
 }
 
 /**
- * hvc_iucv_cleanup() - Clean up function if the tty portion is finally closed.
+ * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
  * @priv:	Pointer to the struct hvc_iucv_private instance.
- *
- * The functions severs the established IUCV communication path (if any), and
- * destroy struct iucv_tty_buffer elements from the in- and outqueue. Finally,
- * the functions resets the states to TTY_CLOSED and IUCV_DISCONN.
  */
 static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
 {
@@ -441,25 +496,62 @@
 
 	priv->tty_state = TTY_CLOSED;
 	priv->iucv_state = IUCV_DISCONN;
+
+	priv->sndbuf_len = 0;
 }
 
 /**
- * hvc_iucv_notifier_hangup() - HVC notifier for tty hangups.
- * @hp: Pointer to the HVC device (struct hvc_struct)
- * @id: Additional data (originally passed to hvc_alloc): the index of an struct
- *	hvc_iucv_private instance.
+ * tty_outqueue_empty() - Test if the tty outq is empty
+ * @priv:	Pointer to struct hvc_iucv_private instance.
+ */
+static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
+{
+	int rc;
+
+	spin_lock_bh(&priv->lock);
+	rc = list_empty(&priv->tty_outqueue);
+	spin_unlock_bh(&priv->lock);
+
+	return rc;
+}
+
+/**
+ * flush_sndbuf_sync() - Flush send buffer and wait for completion
+ * @priv:	Pointer to struct hvc_iucv_private instance.
  *
- * This routine notifies the HVC backend that a tty hangup (carrier loss,
+ * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
+ * to flush any buffered terminal output data and waits for completion.
+ */
+static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
+{
+	int sync_wait;
+
+	cancel_delayed_work_sync(&priv->sndbuf_work);
+
+	spin_lock_bh(&priv->lock);
+	hvc_iucv_send(priv);		/* force sending buffered data */
+	sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
+	spin_unlock_bh(&priv->lock);
+
+	if (sync_wait)
+		wait_event_timeout(priv->sndbuf_waitq,
+				   tty_outqueue_empty(priv), HZ);
+}
+
+/**
+ * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
+ * @hp:		Pointer to the HVC device (struct hvc_struct)
+ * @id:		Additional data (originally passed to hvc_alloc):
+ *		the index of an struct hvc_iucv_private instance.
+ *
+ * This routine notifies the HVC back-end that a tty hangup (carrier loss,
  * virtual or otherwise) has occured.
- *
- * The HVC backend for z/VM IUCV ignores virtual hangups (vhangup()), to keep
- * an existing IUCV communication path established.
+ * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
+ * to keep an existing IUCV communication path established.
  * (Background: vhangup() is called from user space (by getty or login) to
  *		disable writing to the tty by other applications).
- *
- * If the tty has been opened (e.g. getty) and an established IUCV path has been
- * severed (we caused the tty hangup in that case), then the functions invokes
- * hvc_iucv_cleanup() to clean up.
+ * If the tty has been opened and an established IUCV path has been severed
+ * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
  *
  * Locking:	struct hvc_iucv_private->lock
  */
@@ -471,12 +563,12 @@
 	if (!priv)
 		return;
 
+	flush_sndbuf_sync(priv);
+
 	spin_lock_bh(&priv->lock);
 	/* NOTE: If the hangup was scheduled by ourself (from the iucv
-	 *	 path_servered callback [IUCV_SEVERED]), then we have to
-	 *	 finally clean up the tty backend structure and set state to
-	 *	 TTY_CLOSED.
-	 *
+	 *	 path_servered callback [IUCV_SEVERED]), we have to clean up
+	 *	 our structure and to set state to TTY_CLOSED.
 	 *	 If the tty was hung up otherwise (e.g. vhangup()), then we
 	 *	 ignore this hangup and keep an established IUCV path open...
 	 *	 (...the reason is that we are not able to connect back to the
@@ -494,10 +586,9 @@
  * @id:		Additional data (originally passed to hvc_alloc):
  *		the index of an struct hvc_iucv_private instance.
  *
- * This routine notifies the HVC backend that the last tty device file
- * descriptor has been closed.
- * The function calls hvc_iucv_cleanup() to clean up the struct hvc_iucv_private
- * instance.
+ * This routine notifies the HVC back-end that the last tty device fd has been
+ * closed.  The function calls hvc_iucv_cleanup() to clean up the struct
+ * hvc_iucv_private instance.
  *
  * Locking:	struct hvc_iucv_private->lock
  */
@@ -510,6 +601,8 @@
 	if (!priv)
 		return;
 
+	flush_sndbuf_sync(priv);
+
 	spin_lock_bh(&priv->lock);
 	path = priv->path;		/* save reference to IUCV path */
 	priv->path = NULL;
@@ -527,20 +620,18 @@
 /**
  * hvc_iucv_path_pending() - IUCV handler to process a connection request.
  * @path:	Pending path (struct iucv_path)
- * @ipvmid:	Originator z/VM system identifier
+ * @ipvmid:	z/VM system identifier of originator
  * @ipuser:	User specified data for this path
  *		(AF_IUCV: port/service name and originator port)
  *
- * The function uses the @ipuser data to check to determine if the pending
- * path belongs to a terminal managed by this HVC backend.
- * If the check is successful, then an additional check is done to ensure
- * that a terminal cannot be accessed multiple times (only one connection
- * to a terminal is allowed). In that particular case, the pending path is
- * severed. If it is the first connection, the pending path is accepted and
- * associated to the struct hvc_iucv_private. The iucv state is updated to
- * reflect that a communication path has been established.
+ * The function uses the @ipuser data to determine if the pending path belongs
+ * to a terminal managed by this device driver.
+ * If the path belongs to this driver, ensure that the terminal is not accessed
+ * multiple times (only one connection to a terminal is allowed).
+ * If the terminal is not yet connected, the pending path is accepted and is
+ * associated to the appropriate struct hvc_iucv_private instance.
  *
- * Returns 0 if the path belongs to a terminal managed by the this HVC backend;
+ * Returns 0 if @path belongs to a terminal managed by the this device driver;
  * otherwise returns -ENODEV in order to dispatch this path to other handlers.
  *
  * Locking:	struct hvc_iucv_private->lock
@@ -559,7 +650,6 @@
 			priv = hvc_iucv_table[i];
 			break;
 		}
-
 	if (!priv)
 		return -ENODEV;
 
@@ -588,6 +678,9 @@
 	priv->path = path;
 	priv->iucv_state = IUCV_CONNECTED;
 
+	/* flush buffered output data... */
+	schedule_delayed_work(&priv->sndbuf_work, 5);
+
 out_path_handled:
 	spin_unlock(&priv->lock);
 	return 0;
@@ -603,8 +696,7 @@
  * sets the iucv state to IUCV_SEVERED for the associated struct
  * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty
  * hangup (hvc_iucv_get_chars() / hvc_iucv_write()).
- *
- * If tty portion of the HVC is closed then clean up the outqueue in addition.
+ * If tty portion of the HVC is closed, clean up the outqueue.
  *
  * Locking:	struct hvc_iucv_private->lock
  */
@@ -615,15 +707,25 @@
 	spin_lock(&priv->lock);
 	priv->iucv_state = IUCV_SEVERED;
 
-	/* NOTE: If the tty has not yet been opened by a getty program
-	 *	 (e.g. to see console messages), then cleanup the
-	 *	 hvc_iucv_private structure to allow re-connects.
+	/* If the tty has not yet been opened, clean up the hvc_iucv_private
+	 * structure to allow re-connects.
+	 * This is also done for our console device because console hangups
+	 * are handled specially and no notifier is called by HVC.
+	 * The tty session is active (TTY_OPEN) and ready for re-connects...
 	 *
-	 *	 If the tty has been opened, the get_chars() callback returns
-	 *	 -EPIPE to signal the hvc console layer to hang up the tty. */
+	 * If it has been opened, let get_chars() return -EPIPE to signal the
+	 * HVC layer to hang up the tty.
+	 * If so, we need to wake up the HVC thread to call get_chars()...
+	 */
 	priv->path = NULL;
 	if (priv->tty_state == TTY_CLOSED)
 		hvc_iucv_cleanup(priv);
+	else
+		if (priv->is_console) {
+			hvc_iucv_cleanup(priv);
+			priv->tty_state = TTY_OPENED;
+		} else
+			hvc_kick();
 	spin_unlock(&priv->lock);
 
 	/* finally sever path (outside of priv->lock due to lock ordering) */
@@ -636,9 +738,9 @@
  * @path:	Pending path (struct iucv_path)
  * @msg:	Pointer to the IUCV message
  *
- * The function stores an incoming message on the input queue for later
+ * The function puts an incoming message on the input queue for later
  * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
- * However, if the tty has not yet been opened, the message is rejected.
+ * If the tty has not yet been opened, the message is rejected.
  *
  * Locking:	struct hvc_iucv_private->lock
  */
@@ -648,6 +750,12 @@
 	struct hvc_iucv_private *priv = path->private;
 	struct iucv_tty_buffer *rb;
 
+	/* reject messages that exceed max size of iucv_tty_msg->datalen */
+	if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
+		iucv_message_reject(path, msg);
+		return;
+	}
+
 	spin_lock(&priv->lock);
 
 	/* reject messages if tty has not yet been opened */
@@ -656,7 +764,7 @@
 		goto unlock_return;
 	}
 
-	/* allocate buffer an empty buffer element */
+	/* allocate tty buffer to save iucv msg only */
 	rb = alloc_tty_buffer(0, GFP_ATOMIC);
 	if (!rb) {
 		iucv_message_reject(path, msg);
@@ -666,7 +774,7 @@
 
 	list_add_tail(&rb->list, &priv->tty_inqueue);
 
-	hvc_kick();	/* wakup hvc console thread */
+	hvc_kick();	/* wake up hvc thread */
 
 unlock_return:
 	spin_unlock(&priv->lock);
@@ -677,10 +785,10 @@
  * @path:	Pending path (struct iucv_path)
  * @msg:	Pointer to the IUCV message
  *
- * The function is called upon completion of message delivery and the
- * message is removed from the outqueue. Additional delivery information
- * can be found in msg->audit: rejected messages (0x040000 (IPADRJCT)) and
- * purged messages (0x010000 (IPADPGNR)).
+ * The function is called upon completion of message delivery to remove the
+ * message from the outqueue. Additional delivery information can be found
+ * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
+ *	       purged messages	 (0x010000 (IPADPGNR)).
  *
  * Locking:	struct hvc_iucv_private->lock
  */
@@ -697,6 +805,7 @@
 			list_move(&ent->list, &list_remove);
 			break;
 		}
+	wake_up(&priv->sndbuf_waitq);
 	spin_unlock(&priv->lock);
 	destroy_tty_buffer_list(&list_remove);
 }
@@ -713,13 +822,14 @@
 
 /**
  * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
- * @id:	hvc_iucv_table index
+ * @id:			hvc_iucv_table index
+ * @is_console:		Flag if the instance is used as Linux console
  *
- * This function allocates a new hvc_iucv_private struct and put the
- * instance into hvc_iucv_table at index @id.
+ * This function allocates a new hvc_iucv_private structure and stores
+ * the instance in hvc_iucv_table at index @id.
  * Returns 0 on success; otherwise non-zero.
  */
-static int __init hvc_iucv_alloc(int id)
+static int __init hvc_iucv_alloc(int id, unsigned int is_console)
 {
 	struct hvc_iucv_private *priv;
 	char name[9];
@@ -732,18 +842,33 @@
 	spin_lock_init(&priv->lock);
 	INIT_LIST_HEAD(&priv->tty_outqueue);
 	INIT_LIST_HEAD(&priv->tty_inqueue);
+	INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
+	init_waitqueue_head(&priv->sndbuf_waitq);
 
-	/* Finally allocate hvc */
-	priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id,
-			      HVC_IUCV_MAGIC + id, &hvc_iucv_ops, PAGE_SIZE);
+	priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!priv->sndbuf) {
+		kfree(priv);
+		return -ENOMEM;
+	}
+
+	/* set console flag */
+	priv->is_console = is_console;
+
+	/* finally allocate hvc */
+	priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /*		  PAGE_SIZE */
+			      HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
 	if (IS_ERR(priv->hvc)) {
 		rc = PTR_ERR(priv->hvc);
+		free_page((unsigned long) priv->sndbuf);
 		kfree(priv);
 		return rc;
 	}
 
+	/* notify HVC thread instead of using polling */
+	priv->hvc->irq_requested = 1;
+
 	/* setup iucv related information */
-	snprintf(name, 9, "ihvc%-4d", id);
+	snprintf(name, 9, "lnxhvc%-2d", id);
 	memcpy(priv->srv_name, name, 8);
 	ASCEBC(priv->srv_name, 8);
 
@@ -752,15 +877,16 @@
 }
 
 /**
- * hvc_iucv_init() - Initialization of HVC backend for z/VM IUCV
+ * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
  */
 static int __init hvc_iucv_init(void)
 {
-	int rc, i;
+	int rc;
+	unsigned int i;
 
 	if (!MACHINE_IS_VM) {
-		pr_warning("The z/VM IUCV Hypervisor console cannot be "
-			   "used without z/VM.\n");
+		pr_info("The z/VM IUCV HVC device driver cannot "
+			   "be used without z/VM\n");
 		return -ENODEV;
 	}
 
@@ -774,26 +900,33 @@
 					   sizeof(struct iucv_tty_buffer),
 					   0, 0, NULL);
 	if (!hvc_iucv_buffer_cache) {
-		pr_err("Not enough memory for driver initialization "
-			"(rs=%d).\n", 1);
+		pr_err("Allocating memory failed with reason code=%d\n", 1);
 		return -ENOMEM;
 	}
 
 	hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
 						    hvc_iucv_buffer_cache);
 	if (!hvc_iucv_mempool) {
-		pr_err("Not enough memory for driver initialization "
-			"(rs=%d).\n", 2);
+		pr_err("Allocating memory failed with reason code=%d\n", 2);
 		kmem_cache_destroy(hvc_iucv_buffer_cache);
 		return -ENOMEM;
 	}
 
+	/* register the first terminal device as console
+	 * (must be done before allocating hvc terminal devices) */
+	rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
+	if (rc) {
+		pr_err("Registering HVC terminal device as "
+		       "Linux console failed\n");
+		goto out_error_memory;
+	}
+
 	/* allocate hvc_iucv_private structs */
 	for (i = 0; i < hvc_iucv_devices; i++) {
-		rc = hvc_iucv_alloc(i);
+		rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
 		if (rc) {
-			pr_err("Could not create new z/VM IUCV HVC backend "
-				"rc=%d.\n", rc);
+			pr_err("Creating a new HVC terminal device "
+				"failed with error code=%d\n", rc);
 			goto out_error_hvc;
 		}
 	}
@@ -801,7 +934,8 @@
 	/* register IUCV callback handler */
 	rc = iucv_register(&hvc_iucv_handler, 0);
 	if (rc) {
-		pr_err("Could not register iucv handler (rc=%d).\n", rc);
+		pr_err("Registering IUCV handlers failed with error code=%d\n",
+			rc);
 		goto out_error_iucv;
 	}
 
@@ -816,22 +950,13 @@
 				hvc_remove(hvc_iucv_table[i]->hvc);
 			kfree(hvc_iucv_table[i]);
 		}
+out_error_memory:
 	mempool_destroy(hvc_iucv_mempool);
 	kmem_cache_destroy(hvc_iucv_buffer_cache);
 	return rc;
 }
 
 /**
- * hvc_iucv_console_init() - Early console initialization
- */
-static	int __init hvc_iucv_console_init(void)
-{
-	if (!MACHINE_IS_VM || !hvc_iucv_devices)
-		return -ENODEV;
-	return hvc_instantiate(HVC_IUCV_MAGIC, 0, &hvc_iucv_ops);
-}
-
-/**
  * hvc_iucv_config() - Parsing of hvc_iucv=  kernel command line parameter
  * @val:	Parameter value (numeric)
  */
@@ -841,10 +966,5 @@
 }
 
 
-module_init(hvc_iucv_init);
-console_initcall(hvc_iucv_console_init);
+device_initcall(hvc_iucv_init);
 __setup("hvc_iucv=", hvc_iucv_config);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("HVC back-end for z/VM IUCV.");
-MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 8859aea..9b3e09c 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -482,7 +482,7 @@
 	int i;
 
 	for (i = 0; i < SELFTEST_BUFFER_WORDS; i++)
-		dev_err(&np->op->dev, "Test buffer slot %d [0x%016lx]\n",
+		dev_err(&np->op->dev, "Test buffer slot %d [0x%016llx]\n",
 			i, np->test_buffer[i]);
 }
 
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 79b6f46..afbe456 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -44,7 +44,7 @@
 	u64 res = ps3stor_read_write_sectors(dev, lpar, start_sector, sectors,
 					     write);
 	if (res) {
-		dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
+		dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
 			__LINE__, write ? "write" : "read", res);
 		return -EIO;
 	}
@@ -59,7 +59,7 @@
 
 	max_sectors = dev->bounce_size / dev->blk_size;
 	if (sectors > max_sectors) {
-		dev_dbg(&dev->sbd.core, "%s:%u Limiting sectors to %lu\n",
+		dev_dbg(&dev->sbd.core, "%s:%u Limiting sectors to %llu\n",
 			__func__, __LINE__, max_sectors);
 		sectors = max_sectors;
 	}
@@ -144,7 +144,7 @@
 			goto fail;
 		}
 
-		n = min(remaining, sectors_read*dev->blk_size-offset);
+		n = min_t(u64, remaining, sectors_read*dev->blk_size-offset);
 		dev_dbg(&dev->sbd.core,
 			"%s:%u: copy %lu bytes from 0x%p to user 0x%p\n",
 			__func__, __LINE__, n, dev->bounce_buf+offset, buf);
@@ -225,7 +225,7 @@
 		if (end_read_sector >= start_read_sector) {
 			/* Merge head and tail */
 			dev_dbg(&dev->sbd.core,
-				"Merged head and tail: %lu sectors at %lu\n",
+				"Merged head and tail: %llu sectors at %llu\n",
 				chunk_sectors, start_write_sector);
 			res = ps3flash_read_sectors(dev, start_write_sector,
 						    chunk_sectors, 0);
@@ -235,7 +235,7 @@
 			if (head) {
 				/* Read head */
 				dev_dbg(&dev->sbd.core,
-					"head: %lu sectors at %lu\n", head,
+					"head: %llu sectors at %llu\n", head,
 					start_write_sector);
 				res = ps3flash_read_sectors(dev,
 							    start_write_sector,
@@ -247,7 +247,7 @@
 			    start_write_sector+chunk_sectors) {
 				/* Read tail */
 				dev_dbg(&dev->sbd.core,
-					"tail: %lu sectors at %lu\n", tail,
+					"tail: %llu sectors at %llu\n", tail,
 					start_read_sector);
 				sec_off = start_read_sector-start_write_sector;
 				res = ps3flash_read_sectors(dev,
@@ -258,7 +258,7 @@
 			}
 		}
 
-		n = min(remaining, dev->bounce_size-offset);
+		n = min_t(u64, remaining, dev->bounce_size-offset);
 		dev_dbg(&dev->sbd.core,
 			"%s:%u: copy %lu bytes from user 0x%p to 0x%p\n",
 			__func__, __LINE__, n, buf, dev->bounce_buf+offset);
@@ -299,11 +299,11 @@
 
 	if (tag != dev->tag)
 		dev_err(&dev->sbd.core,
-			"%s:%u: tag mismatch, got %lx, expected %lx\n",
+			"%s:%u: tag mismatch, got %llx, expected %llx\n",
 			__func__, __LINE__, tag, dev->tag);
 
 	if (res) {
-		dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n",
+		dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
 			__func__, __LINE__, res, status);
 	} else {
 		dev->lv1_status = status;
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 112a6ba..31038a0 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -32,7 +32,7 @@
 
 /* These are global because they are accessed in tty_io.c */
 #ifdef CONFIG_UNIX98_PTYS
-struct tty_driver *ptm_driver;
+static struct tty_driver *ptm_driver;
 static struct tty_driver *pts_driver;
 #endif
 
@@ -230,9 +230,7 @@
 /**
  *	pty_do_resize		-	resize event
  *	@tty: tty being resized
- *	@real_tty: real tty (not the same as tty if using a pty/tty pair)
- *	@rows: rows (character)
- *	@cols: cols (character)
+ *	@ws: window size being set.
  *
  *	Update the termios variables and send the neccessary signals to
  *	peform a terminal resize correctly
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 20d6efb..e0d0f8b 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -48,9 +48,10 @@
  *		CONFIG_HPET_EMULATE_RTC
  *	1.12a	Maciej W. Rozycki: Handle memory-mapped chips properly.
  *	1.12ac	Alan Cox: Allow read access to the day of week register
+ *	1.12b	David John: Remove calls to the BKL.
  */
 
-#define RTC_VERSION		"1.12ac"
+#define RTC_VERSION		"1.12b"
 
 /*
  *	Note that *all* calls to CMOS_READ and CMOS_WRITE are done with
@@ -73,7 +74,6 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/spinlock.h>
-#include <linux/smp_lock.h>
 #include <linux/sysctl.h>
 #include <linux/wait.h>
 #include <linux/bcd.h>
@@ -182,8 +182,8 @@
 
 /*
  * rtc_status is never changed by rtc_interrupt, and ioctl/open/close is
- * protected by the big kernel lock. However, ioctl can still disable the timer
- * in rtc_status and then with del_timer after the interrupt has read
+ * protected by the spin lock rtc_lock. However, ioctl can still disable the
+ * timer in rtc_status and then with del_timer after the interrupt has read
  * rtc_status but before mod_timer is called, which would then reenable the
  * timer (but you would need to have an awful timing before you'd trip on it)
  */
@@ -720,9 +720,7 @@
 static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	long ret;
-	lock_kernel();
 	ret = rtc_do_ioctl(cmd, arg, 0);
-	unlock_kernel();
 	return ret;
 }
 
@@ -731,12 +729,8 @@
  *	Also clear the previous interrupt data on an open, and clean
  *	up things on a close.
  */
-
-/* We use rtc_lock to protect against concurrent opens. So the BKL is not
- * needed here. Or anywhere else in this driver. */
 static int rtc_open(struct inode *inode, struct file *file)
 {
-	lock_kernel();
 	spin_lock_irq(&rtc_lock);
 
 	if (rtc_status & RTC_IS_OPEN)
@@ -746,12 +740,10 @@
 
 	rtc_irq_data = 0;
 	spin_unlock_irq(&rtc_lock);
-	unlock_kernel();
 	return 0;
 
 out_busy:
 	spin_unlock_irq(&rtc_lock);
-	unlock_kernel();
 	return -EBUSY;
 }
 
@@ -800,7 +792,6 @@
 }
 
 #ifdef RTC_IRQ
-/* Called without the kernel lock - fine */
 static unsigned int rtc_poll(struct file *file, poll_table *wait)
 {
 	unsigned long l;
diff --git a/drivers/char/ser_a2232.c b/drivers/char/ser_a2232.c
index 33872a2..33a2b53 100644
--- a/drivers/char/ser_a2232.c
+++ b/drivers/char/ser_a2232.c
@@ -718,6 +718,7 @@
 	u_char *from;
 	volatile u_char *to;
 	volatile struct a2232memory *mem;
+	int error, i;
 
 #ifdef CONFIG_SMP
 	return -ENODEV;	/* This driver is not SMP aware. Is there an SMP ZorroII-bus-machine? */
@@ -797,8 +798,15 @@
 	*/
 	if (a2232_init_drivers()) return -ENODEV; // maybe we should use a different -Exxx?
 
-	request_irq(IRQ_AMIGA_VERTB, a2232_vbl_inter, 0, "A2232 serial VBL", a2232_driver_ID);
-	return 0;
+	error = request_irq(IRQ_AMIGA_VERTB, a2232_vbl_inter, 0,
+			    "A2232 serial VBL", a2232_driver_ID);
+	if (error) {
+		for (i = 0; i < nr_a2232; i++)
+			zorro_release_device(zd_a2232[i]);
+		tty_unregister_driver(a2232_driver);
+		put_tty_driver(a2232_driver);
+	}
+	return error;
 }
 
 static void __exit a2232board_exit(void)
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 53544e2..f329f45 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -1,6 +1,4 @@
 /*
- * $Id: synclink_gt.c,v 4.50 2007/07/25 19:29:25 paulkf Exp $
- *
  * Device driver for Microgate SyncLink GT serial adapters.
  *
  * written by Paul Fulghum for Microgate Corporation
@@ -91,7 +89,6 @@
  * module identification
  */
 static char *driver_name     = "SyncLink GT";
-static char *driver_version  = "$Revision: 4.50 $";
 static char *tty_driver_name = "synclink_gt";
 static char *tty_dev_prefix  = "ttySLG";
 MODULE_LICENSE("GPL");
@@ -1309,7 +1306,7 @@
 	off_t	begin = 0;
 	struct slgt_info *info;
 
-	len += sprintf(page, "synclink_gt driver:%s\n", driver_version);
+	len += sprintf(page, "synclink_gt driver\n");
 
 	info = slgt_device_list;
 	while( info ) {
@@ -2441,7 +2438,7 @@
 	info->ri_chkcount = 0;
 	info->dsr_chkcount = 0;
 
-	slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR);
+	slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
 	get_signals(info);
 
 	if (info->netcount ||
@@ -3576,7 +3573,7 @@
 	struct slgt_info *info;
 	struct slgt_info *tmp;
 
-	printk("unload %s %s\n", driver_name, driver_version);
+	printk(KERN_INFO "unload %s\n", driver_name);
 
 	if (serial_driver) {
 		for (info=slgt_device_list ; info != NULL ; info=info->next_device)
@@ -3619,7 +3616,7 @@
 {
 	int rc;
 
- 	printk("%s %s\n", driver_name, driver_version);
+	printk(KERN_INFO "%s\n", driver_name);
 
 	serial_driver = alloc_tty_driver(MAX_DEVICES);
 	if (!serial_driver) {
@@ -3650,9 +3647,8 @@
 		goto error;
 	}
 
- 	printk("%s %s, tty major#%d\n",
-		driver_name, driver_version,
-		serial_driver->major);
+	printk(KERN_INFO "%s, tty major#%d\n",
+	       driver_name, serial_driver->major);
 
 	slgt_device_count = 0;
 	if ((rc = pci_register_driver(&pci_driver)) < 0) {
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index d41b9f6..33a9351 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -473,6 +473,12 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&sysrq_key_table_lock, flags);
+	/*
+	 * Raise the apparent loglevel to maximum so that the sysrq header
+	 * is shown to provide the user with positive feedback.  We do not
+	 * simply emit this at KERN_EMERG as that would change message
+	 * routing in the consumers of /proc/kmsg.
+	 */
 	orig_log_level = console_loglevel;
 	console_loglevel = 7;
 	printk(KERN_INFO "SysRq : ");
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index 68f052b..ed306eb 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -23,8 +23,6 @@
 #include <linux/security.h>
 #include <linux/module.h>
 #include <acpi/acpi.h>
-#include <acpi/actypes.h>
-#include <acpi/actbl.h>
 #include "tpm.h"
 
 #define TCG_EVENT_NAME_LEN_MAX	255
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index ab18c1e..70efba2 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -273,12 +273,23 @@
 	}
 }
 
-static struct device_driver nsc_drv = {
-	.name = "tpm_nsc",
-	.bus = &platform_bus_type,
-	.owner = THIS_MODULE,
-	.suspend = tpm_pm_suspend,
-	.resume = tpm_pm_resume,
+static int tpm_nsc_suspend(struct platform_device *dev, pm_message_t msg)
+{
+	return tpm_pm_suspend(&dev->dev, msg);
+}
+
+static int tpm_nsc_resume(struct platform_device *dev)
+{
+	return tpm_pm_resume(&dev->dev);
+}
+
+static struct platform_driver nsc_drv = {
+	.suspend         = tpm_nsc_suspend,
+	.resume          = tpm_nsc_resume,
+	.driver          = {
+		.name    = "tpm_nsc",
+		.owner   = THIS_MODULE,
+	},
 };
 
 static int __init init_nsc(void)
@@ -297,7 +308,7 @@
 			return -ENODEV;
 	}
 
-	err = driver_register(&nsc_drv);
+	err = platform_driver_register(&nsc_drv);
 	if (err)
 		return err;
 
@@ -308,17 +319,15 @@
 	/* enable the DPM module */
 	tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
 
-	pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
+	pdev = platform_device_alloc("tpm_nscl0", -1);
 	if (!pdev) {
 		rc = -ENOMEM;
 		goto err_unreg_drv;
 	}
 
-	pdev->name = "tpm_nscl0";
-	pdev->id = -1;
 	pdev->num_resources = 0;
+	pdev->dev.driver = &nsc_drv.driver;
 	pdev->dev.release = tpm_nsc_remove;
-	pdev->dev.driver = &nsc_drv;
 
 	if ((rc = platform_device_register(pdev)) < 0)
 		goto err_free_dev;
@@ -377,7 +386,7 @@
 err_free_dev:
 	kfree(pdev);
 err_unreg_drv:
-	driver_unregister(&nsc_drv);
+	platform_driver_unregister(&nsc_drv);
 	return rc;
 }
 
@@ -390,7 +399,7 @@
 		pdev = NULL;
 	}
 
-	driver_unregister(&nsc_drv);
+	platform_driver_unregister(&nsc_drv);
 }
 
 module_init(init_nsc);
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index a408c8e..6f4c7d0 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -1057,7 +1057,7 @@
 	if (retval)
 		return retval;
 
-	ld = tty_ldisc_ref(tty);
+	ld = tty_ldisc_ref_wait(tty);
 	switch (arg) {
 	case TCIFLUSH:
 		if (ld && ld->ops->flush_buffer)
diff --git a/drivers/char/vme_scc.c b/drivers/char/vme_scc.c
index 0e8234b..994e1a5 100644
--- a/drivers/char/vme_scc.c
+++ b/drivers/char/vme_scc.c
@@ -198,6 +198,7 @@
 static int mvme147_scc_init(void)
 {
 	struct scc_port *port;
+	int error;
 
 	printk(KERN_INFO "SCC: MVME147 Serial Driver\n");
 	/* Init channel A */
@@ -207,14 +208,23 @@
 	port->datap = port->ctrlp + 1;
 	port->port_a = &scc_ports[0];
 	port->port_b = &scc_ports[1];
-	request_irq(MVME147_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
+	error = request_irq(MVME147_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
 		            "SCC-A TX", port);
-	request_irq(MVME147_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
+	if (error)
+		goto fail;
+	error = request_irq(MVME147_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
 		            "SCC-A status", port);
-	request_irq(MVME147_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
+	if (error)
+		goto fail_free_a_tx;
+	error = request_irq(MVME147_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
 		            "SCC-A RX", port);
-	request_irq(MVME147_IRQ_SCCA_SPCOND, scc_spcond_int, IRQF_DISABLED,
-		            "SCC-A special cond", port);
+	if (error)
+		goto fail_free_a_stat;
+	error = request_irq(MVME147_IRQ_SCCA_SPCOND, scc_spcond_int,
+			    IRQF_DISABLED, "SCC-A special cond", port);
+	if (error)
+		goto fail_free_a_rx;
+
 	{
 		SCC_ACCESS_INIT(port);
 
@@ -234,14 +244,23 @@
 	port->datap = port->ctrlp + 1;
 	port->port_a = &scc_ports[0];
 	port->port_b = &scc_ports[1];
-	request_irq(MVME147_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
+	error = request_irq(MVME147_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
 		            "SCC-B TX", port);
-	request_irq(MVME147_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
+	if (error)
+		goto fail_free_a_spcond;
+	error = request_irq(MVME147_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
 		            "SCC-B status", port);
-	request_irq(MVME147_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
+	if (error)
+		goto fail_free_b_tx;
+	error = request_irq(MVME147_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
 		            "SCC-B RX", port);
-	request_irq(MVME147_IRQ_SCCB_SPCOND, scc_spcond_int, IRQF_DISABLED,
-		            "SCC-B special cond", port);
+	if (error)
+		goto fail_free_b_stat;
+	error = request_irq(MVME147_IRQ_SCCB_SPCOND, scc_spcond_int,
+			    IRQF_DISABLED, "SCC-B special cond", port);
+	if (error)
+		goto fail_free_b_rx;
+
 	{
 		SCC_ACCESS_INIT(port);
 
@@ -257,6 +276,23 @@
 	scc_init_drivers();
 
 	return 0;
+
+fail_free_b_rx:
+	free_irq(MVME147_IRQ_SCCB_RX, port);
+fail_free_b_stat:
+	free_irq(MVME147_IRQ_SCCB_STAT, port);
+fail_free_b_tx:
+	free_irq(MVME147_IRQ_SCCB_TX, port);
+fail_free_a_spcond:
+	free_irq(MVME147_IRQ_SCCA_SPCOND, port);
+fail_free_a_rx:
+	free_irq(MVME147_IRQ_SCCA_RX, port);
+fail_free_a_stat:
+	free_irq(MVME147_IRQ_SCCA_STAT, port);
+fail_free_a_tx:
+	free_irq(MVME147_IRQ_SCCA_TX, port);
+fail:
+	return error;
 }
 #endif
 
@@ -265,6 +301,7 @@
 static int mvme162_scc_init(void)
 {
 	struct scc_port *port;
+	int error;
 
 	if (!(mvme16x_config & MVME16x_CONFIG_GOT_SCCA))
 		return (-ENODEV);
@@ -277,14 +314,23 @@
 	port->datap = port->ctrlp + 2;
 	port->port_a = &scc_ports[0];
 	port->port_b = &scc_ports[1];
-	request_irq(MVME162_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
+	error = request_irq(MVME162_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
 		            "SCC-A TX", port);
-	request_irq(MVME162_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
+	if (error)
+		goto fail;
+	error = request_irq(MVME162_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
 		            "SCC-A status", port);
-	request_irq(MVME162_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
+	if (error)
+		goto fail_free_a_tx;
+	error = request_irq(MVME162_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
 		            "SCC-A RX", port);
-	request_irq(MVME162_IRQ_SCCA_SPCOND, scc_spcond_int, IRQF_DISABLED,
-		            "SCC-A special cond", port);
+	if (error)
+		goto fail_free_a_stat;
+	error = request_irq(MVME162_IRQ_SCCA_SPCOND, scc_spcond_int,
+			    IRQF_DISABLED, "SCC-A special cond", port);
+	if (error)
+		goto fail_free_a_rx;
+
 	{
 		SCC_ACCESS_INIT(port);
 
@@ -304,14 +350,22 @@
 	port->datap = port->ctrlp + 2;
 	port->port_a = &scc_ports[0];
 	port->port_b = &scc_ports[1];
-	request_irq(MVME162_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
+	error = request_irq(MVME162_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
 		            "SCC-B TX", port);
-	request_irq(MVME162_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
+	if (error)
+		goto fail_free_a_spcond;
+	error = request_irq(MVME162_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
 		            "SCC-B status", port);
-	request_irq(MVME162_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
+	if (error)
+		goto fail_free_b_tx;
+	error = request_irq(MVME162_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
 		            "SCC-B RX", port);
-	request_irq(MVME162_IRQ_SCCB_SPCOND, scc_spcond_int, IRQF_DISABLED,
-		            "SCC-B special cond", port);
+	if (error)
+		goto fail_free_b_stat;
+	error = request_irq(MVME162_IRQ_SCCB_SPCOND, scc_spcond_int,
+			    IRQF_DISABLED, "SCC-B special cond", port);
+	if (error)
+		goto fail_free_b_rx;
 
 	{
 		SCC_ACCESS_INIT(port);	/* Either channel will do */
@@ -328,6 +382,23 @@
 	scc_init_drivers();
 
 	return 0;
+
+fail_free_b_rx:
+	free_irq(MVME162_IRQ_SCCB_RX, port);
+fail_free_b_stat:
+	free_irq(MVME162_IRQ_SCCB_STAT, port);
+fail_free_b_tx:
+	free_irq(MVME162_IRQ_SCCB_TX, port);
+fail_free_a_spcond:
+	free_irq(MVME162_IRQ_SCCA_SPCOND, port);
+fail_free_a_rx:
+	free_irq(MVME162_IRQ_SCCA_RX, port);
+fail_free_a_stat:
+	free_irq(MVME162_IRQ_SCCA_STAT, port);
+fail_free_a_tx:
+	free_irq(MVME162_IRQ_SCCA_TX, port);
+fail:
+	return error;
 }
 #endif
 
@@ -336,6 +407,7 @@
 static int bvme6000_scc_init(void)
 {
 	struct scc_port *port;
+	int error;
 
 	printk(KERN_INFO "SCC: BVME6000 Serial Driver\n");
 	/* Init channel A */
@@ -345,14 +417,23 @@
 	port->datap = port->ctrlp + 4;
 	port->port_a = &scc_ports[0];
 	port->port_b = &scc_ports[1];
-	request_irq(BVME_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
+	error = request_irq(BVME_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
 		            "SCC-A TX", port);
-	request_irq(BVME_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
+	if (error)
+		goto fail;
+	error = request_irq(BVME_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
 		            "SCC-A status", port);
-	request_irq(BVME_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
+	if (error)
+		goto fail_free_a_tx;
+	error = request_irq(BVME_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
 		            "SCC-A RX", port);
-	request_irq(BVME_IRQ_SCCA_SPCOND, scc_spcond_int, IRQF_DISABLED,
-		            "SCC-A special cond", port);
+	if (error)
+		goto fail_free_a_stat;
+	error = request_irq(BVME_IRQ_SCCA_SPCOND, scc_spcond_int,
+			    IRQF_DISABLED, "SCC-A special cond", port);
+	if (error)
+		goto fail_free_a_rx;
+
 	{
 		SCC_ACCESS_INIT(port);
 
@@ -372,14 +453,22 @@
 	port->datap = port->ctrlp + 4;
 	port->port_a = &scc_ports[0];
 	port->port_b = &scc_ports[1];
-	request_irq(BVME_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
+	error = request_irq(BVME_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
 		            "SCC-B TX", port);
-	request_irq(BVME_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
+	if (error)
+		goto fail_free_a_spcond;
+	error = request_irq(BVME_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
 		            "SCC-B status", port);
-	request_irq(BVME_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
+	if (error)
+		goto fail_free_b_tx;
+	error = request_irq(BVME_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
 		            "SCC-B RX", port);
-	request_irq(BVME_IRQ_SCCB_SPCOND, scc_spcond_int, IRQF_DISABLED,
-		            "SCC-B special cond", port);
+	if (error)
+		goto fail_free_b_stat;
+	error = request_irq(BVME_IRQ_SCCB_SPCOND, scc_spcond_int,
+			    IRQF_DISABLED, "SCC-B special cond", port);
+	if (error)
+		goto fail_free_b_rx;
 
 	{
 		SCC_ACCESS_INIT(port);	/* Either channel will do */
@@ -393,6 +482,23 @@
 	scc_init_drivers();
 
 	return 0;
+
+fail:
+	free_irq(BVME_IRQ_SCCA_STAT, port);
+fail_free_a_tx:
+	free_irq(BVME_IRQ_SCCA_RX, port);
+fail_free_a_stat:
+	free_irq(BVME_IRQ_SCCA_SPCOND, port);
+fail_free_a_rx:
+	free_irq(BVME_IRQ_SCCB_TX, port);
+fail_free_a_spcond:
+	free_irq(BVME_IRQ_SCCB_STAT, port);
+fail_free_b_tx:
+	free_irq(BVME_IRQ_SCCB_RX, port);
+fail_free_b_stat:
+	free_irq(BVME_IRQ_SCCB_SPCOND, port);
+fail_free_b_rx:
+	return error;
 }
 #endif
 
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 8001421..7900bd6 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -969,8 +969,7 @@
  *	Takes the console sem and the called methods then take the tty
  *	termios_mutex and the tty ctrl_lock in that order.
  */
-
-int vt_resize(struct tty_struct *tty, struct winsize *ws)
+static int vt_resize(struct tty_struct *tty, struct winsize *ws)
 {
 	struct vc_data *vc = tty->driver_data;
 	int ret;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 5f076ae..a8c8d9c 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -83,7 +83,7 @@
 	select CPU_FREQ_GOV_USERSPACE
 	help
 	  Use the CPUFreq governor 'userspace' as default. This allows
-	  you to set the CPU frequency manually or when an userspace 
+	  you to set the CPU frequency manually or when a userspace 
 	  program shall be able to set the CPU dynamically without having
 	  to enable the userspace governor manually.
 
@@ -138,7 +138,7 @@
 	tristate "'userspace' governor for userspace frequency scaling"
 	help
 	  Enable this cpufreq governor when you either want to set the
-	  CPU frequency manually or when an userspace program shall
+	  CPU frequency manually or when a userspace program shall
 	  be able to set the CPU dynamically, like on LART 
 	  <http://www.lartmaker.nl/>.
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 01dde80..b55cb67 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -584,12 +584,12 @@
 	return i;
 }
 
-static ssize_t show_cpus(cpumask_t mask, char *buf)
+static ssize_t show_cpus(const struct cpumask *mask, char *buf)
 {
 	ssize_t i = 0;
 	unsigned int cpu;
 
-	for_each_cpu_mask_nr(cpu, mask) {
+	for_each_cpu(cpu, mask) {
 		if (i)
 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -606,7 +606,7 @@
  */
 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
 {
-	if (cpus_empty(policy->related_cpus))
+	if (cpumask_empty(policy->related_cpus))
 		return show_cpus(policy->cpus, buf);
 	return show_cpus(policy->related_cpus, buf);
 }
@@ -806,9 +806,20 @@
 		ret = -ENOMEM;
 		goto nomem_out;
 	}
+	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
+		kfree(policy);
+		ret = -ENOMEM;
+		goto nomem_out;
+	}
+	if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
+		free_cpumask_var(policy->cpus);
+		kfree(policy);
+		ret = -ENOMEM;
+		goto nomem_out;
+	}
 
 	policy->cpu = cpu;
-	policy->cpus = cpumask_of_cpu(cpu);
+	cpumask_copy(policy->cpus, cpumask_of(cpu));
 
 	/* Initially set CPU itself as the policy_cpu */
 	per_cpu(policy_cpu, cpu) = cpu;
@@ -843,7 +854,7 @@
 	}
 #endif
 
-	for_each_cpu_mask_nr(j, policy->cpus) {
+	for_each_cpu(j, policy->cpus) {
 		if (cpu == j)
 			continue;
 
@@ -861,7 +872,7 @@
 				goto err_out_driver_exit;
 
 			spin_lock_irqsave(&cpufreq_driver_lock, flags);
-			managed_policy->cpus = policy->cpus;
+			cpumask_copy(managed_policy->cpus, policy->cpus);
 			per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
 			spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
@@ -916,14 +927,14 @@
 	}
 
 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
-	for_each_cpu_mask_nr(j, policy->cpus) {
+	for_each_cpu(j, policy->cpus) {
 		per_cpu(cpufreq_cpu_data, j) = policy;
 		per_cpu(policy_cpu, j) = policy->cpu;
 	}
 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 	/* symlink affected CPUs */
-	for_each_cpu_mask_nr(j, policy->cpus) {
+	for_each_cpu(j, policy->cpus) {
 		if (j == cpu)
 			continue;
 		if (!cpu_online(j))
@@ -963,7 +974,7 @@
 
 err_out_unregister:
 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
-	for_each_cpu_mask_nr(j, policy->cpus)
+	for_each_cpu(j, policy->cpus)
 		per_cpu(cpufreq_cpu_data, j) = NULL;
 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
@@ -1024,7 +1035,7 @@
 	 */
 	if (unlikely(cpu != data->cpu)) {
 		dprintk("removing link\n");
-		cpu_clear(cpu, data->cpus);
+		cpumask_clear_cpu(cpu, data->cpus);
 		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 		sysfs_remove_link(&sys_dev->kobj, "cpufreq");
 		cpufreq_cpu_put(data);
@@ -1045,8 +1056,8 @@
 	 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
 	 * the sysfs links afterwards.
 	 */
-	if (unlikely(cpus_weight(data->cpus) > 1)) {
-		for_each_cpu_mask_nr(j, data->cpus) {
+	if (unlikely(cpumask_weight(data->cpus) > 1)) {
+		for_each_cpu(j, data->cpus) {
 			if (j == cpu)
 				continue;
 			per_cpu(cpufreq_cpu_data, j) = NULL;
@@ -1055,8 +1066,8 @@
 
 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-	if (unlikely(cpus_weight(data->cpus) > 1)) {
-		for_each_cpu_mask_nr(j, data->cpus) {
+	if (unlikely(cpumask_weight(data->cpus) > 1)) {
+		for_each_cpu(j, data->cpus) {
 			if (j == cpu)
 				continue;
 			dprintk("removing link for cpu %u\n", j);
@@ -1090,7 +1101,10 @@
 	if (cpufreq_driver->exit)
 		cpufreq_driver->exit(data);
 
+	free_cpumask_var(data->related_cpus);
+	free_cpumask_var(data->cpus);
 	kfree(data);
+	per_cpu(cpufreq_cpu_data, cpu) = NULL;
 
 	cpufreq_debug_enable_ratelimit();
 	return 0;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e265783..0320962 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -498,7 +498,7 @@
 			return rc;
 		}
 
-		for_each_cpu_mask_nr(j, policy->cpus) {
+		for_each_cpu(j, policy->cpus) {
 			struct cpu_dbs_info_s *j_dbs_info;
 			j_dbs_info = &per_cpu(cpu_dbs_info, j);
 			j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 2ab3c12..6a2b036 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -400,7 +400,7 @@
 	/* Get Absolute Load - in terms of freq */
 	max_load_freq = 0;
 
-	for_each_cpu_mask_nr(j, policy->cpus) {
+	for_each_cpu(j, policy->cpus) {
 		struct cpu_dbs_info_s *j_dbs_info;
 		cputime64_t cur_wall_time, cur_idle_time;
 		unsigned int idle_time, wall_time;
@@ -568,7 +568,7 @@
 			return rc;
 		}
 
-		for_each_cpu_mask_nr(j, policy->cpus) {
+		for_each_cpu(j, policy->cpus) {
 			struct cpu_dbs_info_s *j_dbs_info;
 			j_dbs_info = &per_cpu(cpu_dbs_info, j);
 			j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 8d7cf3f..f1df59f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -15,12 +15,14 @@
 #include <linux/tick.h>
 
 #define BREAK_FUZZ	4	/* 4 us */
+#define PRED_HISTORY_PCT	50
 
 struct menu_device {
 	int		last_state_idx;
 
 	unsigned int	expected_us;
 	unsigned int	predicted_us;
+	unsigned int    current_predicted_us;
 	unsigned int	last_measured_us;
 	unsigned int	elapsed_us;
 };
@@ -47,6 +49,12 @@
 	data->expected_us =
 		(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
 
+	/* Recalculate predicted_us based on prediction_history_pct */
+	data->predicted_us *= PRED_HISTORY_PCT;
+	data->predicted_us += (100 - PRED_HISTORY_PCT) *
+				data->current_predicted_us;
+	data->predicted_us /= 100;
+
 	/* find the deepest idle state that satisfies our constraints */
 	for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
 		struct cpuidle_state *s = &dev->states[i];
@@ -97,7 +105,7 @@
 		measured_us = -1;
 
 	/* Predict time until next break event */
-	data->predicted_us = max(measured_us, data->last_measured_us);
+	data->current_predicted_us = max(measured_us, data->last_measured_us);
 
 	if (last_idle_us + BREAK_FUZZ <
 	    data->expected_us - target->exit_latency) {
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index d883e1b..5543384 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -270,6 +270,6 @@
 	dca_sysfs_exit();
 }
 
-subsys_initcall(dca_init);
+arch_initcall(dca_init);
 module_exit(dca_exit);
 
diff --git a/drivers/dio/dio-sysfs.c b/drivers/dio/dio-sysfs.c
index f464630..ee1a3b5 100644
--- a/drivers/dio/dio-sysfs.c
+++ b/drivers/dio/dio-sysfs.c
@@ -58,20 +58,25 @@
 	struct dio_dev *d = to_dio_dev(dev);
 
 	return sprintf(buf, "0x%08lx 0x%08lx 0x%08lx\n",
-		       dio_resource_start(d), dio_resource_end(d),
+		       (unsigned long)dio_resource_start(d),
+		       (unsigned long)dio_resource_end(d),
 		       dio_resource_flags(d));
 }
 static DEVICE_ATTR(resource, S_IRUGO, dio_show_resource, NULL);
 
-void dio_create_sysfs_dev_files(struct dio_dev *d)
+int dio_create_sysfs_dev_files(struct dio_dev *d)
 {
 	struct device *dev = &d->dev;
+	int error;
 
 	/* current configuration's attributes */
-	device_create_file(dev, &dev_attr_id);
-	device_create_file(dev, &dev_attr_ipl);
-	device_create_file(dev, &dev_attr_secid);
-	device_create_file(dev, &dev_attr_name);
-	device_create_file(dev, &dev_attr_resource);
+	if ((error = device_create_file(dev, &dev_attr_id)) ||
+	    (error = device_create_file(dev, &dev_attr_ipl)) ||
+	    (error = device_create_file(dev, &dev_attr_secid)) ||
+	    (error = device_create_file(dev, &dev_attr_name)) ||
+	    (error = device_create_file(dev, &dev_attr_resource)))
+		return error;
+
+	return 0;
 }
 
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index 07f274f..10c3c49 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -173,6 +173,7 @@
 	mm_segment_t fs;
 	int i;
 	struct dio_dev *dev;
+	int error;
 
 	if (!MACH_IS_HP300)
 		return 0;
@@ -182,7 +183,11 @@
 	/* Initialize the DIO bus */ 
 	INIT_LIST_HEAD(&dio_bus.devices);
 	strcpy(dio_bus.dev.bus_id, "dio");
-	device_register(&dio_bus.dev);
+	error = device_register(&dio_bus.dev);
+	if (error) {
+		pr_err("DIO: Error registering dio_bus\n");
+		return error;
+	}
 
 	/* Request all resources */
 	dio_bus.num_resources = (hp300_model == HP_320 ? 1 : 2);
@@ -252,8 +257,15 @@
 
 		if (scode >= DIOII_SCBASE)
 			iounmap(va);
-		device_register(&dev->dev);
-		dio_create_sysfs_dev_files(dev);
+		error = device_register(&dev->dev);
+		if (error) {
+			pr_err("DIO: Error registering device %s\n",
+			       dev->name);
+			continue;
+		}
+		error = dio_create_sysfs_dev_files(dev);
+		if (error)
+			dev_err(&dev->dev, "Error creating sysfs files\n");
         }
 	return 0;
 }
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 904e575..e34b064 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -33,7 +33,6 @@
 config INTEL_IOP_ADMA
 	tristate "Intel IOP ADMA support"
 	depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
-	select ASYNC_CORE
 	select DMA_ENGINE
 	help
 	  Enable support for the Intel(R) IOP Series RAID engines.
@@ -59,7 +58,6 @@
 config MV_XOR
 	bool "Marvell XOR engine support"
 	depends on PLAT_ORION
-	select ASYNC_CORE
 	select DMA_ENGINE
 	---help---
 	  Enable support for the Marvell XOR engine.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6579965..403dbe7 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -31,32 +31,18 @@
  *
  * LOCKING:
  *
- * The subsystem keeps two global lists, dma_device_list and dma_client_list.
- * Both of these are protected by a mutex, dma_list_mutex.
+ * The subsystem keeps a global list of dma_device structs it is protected by a
+ * mutex, dma_list_mutex.
+ *
+ * A subsystem can get access to a channel by calling dmaengine_get() followed
+ * by dma_find_channel(), or if it has need for an exclusive channel it can call
+ * dma_request_channel().  Once a channel is allocated a reference is taken
+ * against its corresponding driver to disable removal.
  *
  * Each device has a channels list, which runs unlocked but is never modified
  * once the device is registered, it's just setup by the driver.
  *
- * Each client is responsible for keeping track of the channels it uses.  See
- * the definition of dma_event_callback in dmaengine.h.
- *
- * Each device has a kref, which is initialized to 1 when the device is
- * registered. A kref_get is done for each device registered.  When the
- * device is released, the corresponding kref_put is done in the release
- * method. Every time one of the device's channels is allocated to a client,
- * a kref_get occurs.  When the channel is freed, the corresponding kref_put
- * happens. The device's release function does a completion, so
- * unregister_device does a remove event, device_unregister, a kref_put
- * for the first reference, then waits on the completion for all other
- * references to finish.
- *
- * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
- * with a kref and a per_cpu local_t.  A dma_chan_get is called when a client
- * signals that it wants to use a channel, and dma_chan_put is called when
- * a channel is removed or a client using it is unregistered.  A client can
- * take extra references per outstanding transaction, as is the case with
- * the NET DMA client.  The release function does a kref_put on the device.
- *	-ChrisL, DanW
+ * See Documentation/dmaengine.txt for more details
  */
 
 #include <linux/init.h>
@@ -70,54 +56,85 @@
 #include <linux/rcupdate.h>
 #include <linux/mutex.h>
 #include <linux/jiffies.h>
+#include <linux/rculist.h>
+#include <linux/idr.h>
 
 static DEFINE_MUTEX(dma_list_mutex);
 static LIST_HEAD(dma_device_list);
-static LIST_HEAD(dma_client_list);
+static long dmaengine_ref_count;
+static struct idr dma_idr;
 
 /* --- sysfs implementation --- */
 
+/**
+ * dev_to_dma_chan - convert a device pointer to the its sysfs container object
+ * @dev - device node
+ *
+ * Must be called under dma_list_mutex
+ */
+static struct dma_chan *dev_to_dma_chan(struct device *dev)
+{
+	struct dma_chan_dev *chan_dev;
+
+	chan_dev = container_of(dev, typeof(*chan_dev), device);
+	return chan_dev->chan;
+}
+
 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	struct dma_chan *chan = to_dma_chan(dev);
+	struct dma_chan *chan;
 	unsigned long count = 0;
 	int i;
+	int err;
 
-	for_each_possible_cpu(i)
-		count += per_cpu_ptr(chan->local, i)->memcpy_count;
+	mutex_lock(&dma_list_mutex);
+	chan = dev_to_dma_chan(dev);
+	if (chan) {
+		for_each_possible_cpu(i)
+			count += per_cpu_ptr(chan->local, i)->memcpy_count;
+		err = sprintf(buf, "%lu\n", count);
+	} else
+		err = -ENODEV;
+	mutex_unlock(&dma_list_mutex);
 
-	return sprintf(buf, "%lu\n", count);
+	return err;
 }
 
 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
 				      char *buf)
 {
-	struct dma_chan *chan = to_dma_chan(dev);
+	struct dma_chan *chan;
 	unsigned long count = 0;
 	int i;
+	int err;
 
-	for_each_possible_cpu(i)
-		count += per_cpu_ptr(chan->local, i)->bytes_transferred;
+	mutex_lock(&dma_list_mutex);
+	chan = dev_to_dma_chan(dev);
+	if (chan) {
+		for_each_possible_cpu(i)
+			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
+		err = sprintf(buf, "%lu\n", count);
+	} else
+		err = -ENODEV;
+	mutex_unlock(&dma_list_mutex);
 
-	return sprintf(buf, "%lu\n", count);
+	return err;
 }
 
 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	struct dma_chan *chan = to_dma_chan(dev);
-	int in_use = 0;
+	struct dma_chan *chan;
+	int err;
 
-	if (unlikely(chan->slow_ref) &&
-		atomic_read(&chan->refcount.refcount) > 1)
-		in_use = 1;
-	else {
-		if (local_read(&(per_cpu_ptr(chan->local,
-			get_cpu())->refcount)) > 0)
-			in_use = 1;
-		put_cpu();
-	}
+	mutex_lock(&dma_list_mutex);
+	chan = dev_to_dma_chan(dev);
+	if (chan)
+		err = sprintf(buf, "%d\n", chan->client_count);
+	else
+		err = -ENODEV;
+	mutex_unlock(&dma_list_mutex);
 
-	return sprintf(buf, "%d\n", in_use);
+	return err;
 }
 
 static struct device_attribute dma_attrs[] = {
@@ -127,78 +144,112 @@
 	__ATTR_NULL
 };
 
-static void dma_async_device_cleanup(struct kref *kref);
-
-static void dma_dev_release(struct device *dev)
+static void chan_dev_release(struct device *dev)
 {
-	struct dma_chan *chan = to_dma_chan(dev);
-	kref_put(&chan->device->refcount, dma_async_device_cleanup);
+	struct dma_chan_dev *chan_dev;
+
+	chan_dev = container_of(dev, typeof(*chan_dev), device);
+	if (atomic_dec_and_test(chan_dev->idr_ref)) {
+		mutex_lock(&dma_list_mutex);
+		idr_remove(&dma_idr, chan_dev->dev_id);
+		mutex_unlock(&dma_list_mutex);
+		kfree(chan_dev->idr_ref);
+	}
+	kfree(chan_dev);
 }
 
 static struct class dma_devclass = {
 	.name		= "dma",
 	.dev_attrs	= dma_attrs,
-	.dev_release	= dma_dev_release,
+	.dev_release	= chan_dev_release,
 };
 
 /* --- client and device registration --- */
 
-#define dma_chan_satisfies_mask(chan, mask) \
-	__dma_chan_satisfies_mask((chan), &(mask))
+#define dma_device_satisfies_mask(device, mask) \
+	__dma_device_satisfies_mask((device), &(mask))
 static int
-__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
+__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
 {
 	dma_cap_mask_t has;
 
-	bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
+	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 		DMA_TX_TYPE_END);
 	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 }
 
-/**
- * dma_client_chan_alloc - try to allocate channels to a client
- * @client: &dma_client
- *
- * Called with dma_list_mutex held.
- */
-static void dma_client_chan_alloc(struct dma_client *client)
+static struct module *dma_chan_to_owner(struct dma_chan *chan)
 {
-	struct dma_device *device;
-	struct dma_chan *chan;
-	int desc;	/* allocated descriptor count */
-	enum dma_state_client ack;
+	return chan->device->dev->driver->owner;
+}
 
-	/* Find a channel */
-	list_for_each_entry(device, &dma_device_list, global_node) {
-		/* Does the client require a specific DMA controller? */
-		if (client->slave && client->slave->dma_dev
-				&& client->slave->dma_dev != device->dev)
-			continue;
+/**
+ * balance_ref_count - catch up the channel reference count
+ * @chan - channel to balance ->client_count versus dmaengine_ref_count
+ *
+ * balance_ref_count must be called under dma_list_mutex
+ */
+static void balance_ref_count(struct dma_chan *chan)
+{
+	struct module *owner = dma_chan_to_owner(chan);
 
-		list_for_each_entry(chan, &device->channels, device_node) {
-			if (!dma_chan_satisfies_mask(chan, client->cap_mask))
-				continue;
-
-			desc = chan->device->device_alloc_chan_resources(
-					chan, client);
-			if (desc >= 0) {
-				ack = client->event_callback(client,
-						chan,
-						DMA_RESOURCE_AVAILABLE);
-
-				/* we are done once this client rejects
-				 * an available resource
-				 */
-				if (ack == DMA_ACK) {
-					dma_chan_get(chan);
-					chan->client_count++;
-				} else if (ack == DMA_NAK)
-					return;
-			}
-		}
+	while (chan->client_count < dmaengine_ref_count) {
+		__module_get(owner);
+		chan->client_count++;
 	}
 }
 
+/**
+ * dma_chan_get - try to grab a dma channel's parent driver module
+ * @chan - channel to grab
+ *
+ * Must be called under dma_list_mutex
+ */
+static int dma_chan_get(struct dma_chan *chan)
+{
+	int err = -ENODEV;
+	struct module *owner = dma_chan_to_owner(chan);
+
+	if (chan->client_count) {
+		__module_get(owner);
+		err = 0;
+	} else if (try_module_get(owner))
+		err = 0;
+
+	if (err == 0)
+		chan->client_count++;
+
+	/* allocate upon first client reference */
+	if (chan->client_count == 1 && err == 0) {
+		int desc_cnt = chan->device->device_alloc_chan_resources(chan);
+
+		if (desc_cnt < 0) {
+			err = desc_cnt;
+			chan->client_count = 0;
+			module_put(owner);
+		} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
+			balance_ref_count(chan);
+	}
+
+	return err;
+}
+
+/**
+ * dma_chan_put - drop a reference to a dma channel's parent driver module
+ * @chan - channel to release
+ *
+ * Must be called under dma_list_mutex
+ */
+static void dma_chan_put(struct dma_chan *chan)
+{
+	if (!chan->client_count)
+		return; /* this channel failed alloc_chan_resources */
+	chan->client_count--;
+	module_put(dma_chan_to_owner(chan));
+	if (chan->client_count == 0)
+		chan->device->device_free_chan_resources(chan);
+}
+
 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 {
 	enum dma_status status;
@@ -218,138 +269,342 @@
 EXPORT_SYMBOL(dma_sync_wait);
 
 /**
- * dma_chan_cleanup - release a DMA channel's resources
- * @kref: kernel reference structure that contains the DMA channel device
+ * dma_cap_mask_all - enable iteration over all operation types
  */
-void dma_chan_cleanup(struct kref *kref)
-{
-	struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
-	chan->device->device_free_chan_resources(chan);
-	kref_put(&chan->device->refcount, dma_async_device_cleanup);
-}
-EXPORT_SYMBOL(dma_chan_cleanup);
-
-static void dma_chan_free_rcu(struct rcu_head *rcu)
-{
-	struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
-	int bias = 0x7FFFFFFF;
-	int i;
-	for_each_possible_cpu(i)
-		bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
-	atomic_sub(bias, &chan->refcount.refcount);
-	kref_put(&chan->refcount, dma_chan_cleanup);
-}
-
-static void dma_chan_release(struct dma_chan *chan)
-{
-	atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
-	chan->slow_ref = 1;
-	call_rcu(&chan->rcu, dma_chan_free_rcu);
-}
+static dma_cap_mask_t dma_cap_mask_all;
 
 /**
- * dma_chans_notify_available - broadcast available channels to the clients
+ * dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan - associated channel for this entry
  */
-static void dma_clients_notify_available(void)
-{
-	struct dma_client *client;
-
-	mutex_lock(&dma_list_mutex);
-
-	list_for_each_entry(client, &dma_client_list, global_node)
-		dma_client_chan_alloc(client);
-
-	mutex_unlock(&dma_list_mutex);
-}
+struct dma_chan_tbl_ent {
+	struct dma_chan *chan;
+};
 
 /**
- * dma_chans_notify_available - tell the clients that a channel is going away
- * @chan: channel on its way out
+ * channel_table - percpu lookup table for memory-to-memory offload providers
  */
-static void dma_clients_notify_removed(struct dma_chan *chan)
+static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
+
+static int __init dma_channel_table_init(void)
 {
-	struct dma_client *client;
-	enum dma_state_client ack;
+	enum dma_transaction_type cap;
+	int err = 0;
 
-	mutex_lock(&dma_list_mutex);
+	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 
-	list_for_each_entry(client, &dma_client_list, global_node) {
-		ack = client->event_callback(client, chan,
-				DMA_RESOURCE_REMOVED);
+	/* 'interrupt', 'private', and 'slave' are channel capabilities,
+	 * but are not associated with an operation so they do not need
+	 * an entry in the channel_table
+	 */
+	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
+	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
+	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 
-		/* client was holding resources for this channel so
-		 * free it
-		 */
-		if (ack == DMA_ACK) {
-			dma_chan_put(chan);
-			chan->client_count--;
+	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
+		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
+		if (!channel_table[cap]) {
+			err = -ENOMEM;
+			break;
 		}
 	}
 
-	mutex_unlock(&dma_list_mutex);
+	if (err) {
+		pr_err("dmaengine: initialization failure\n");
+		for_each_dma_cap_mask(cap, dma_cap_mask_all)
+			if (channel_table[cap])
+				free_percpu(channel_table[cap]);
+	}
+
+	return err;
 }
+arch_initcall(dma_channel_table_init);
 
 /**
- * dma_async_client_register - register a &dma_client
- * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
+ * dma_find_channel - find a channel to carry out the operation
+ * @tx_type: transaction type
  */
-void dma_async_client_register(struct dma_client *client)
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 {
-	/* validate client data */
-	BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
-		!client->slave);
+	struct dma_chan *chan;
+	int cpu;
 
-	mutex_lock(&dma_list_mutex);
-	list_add_tail(&client->global_node, &dma_client_list);
-	mutex_unlock(&dma_list_mutex);
+	WARN_ONCE(dmaengine_ref_count == 0,
+		  "client called %s without a reference", __func__);
+
+	cpu = get_cpu();
+	chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
+	put_cpu();
+
+	return chan;
 }
-EXPORT_SYMBOL(dma_async_client_register);
+EXPORT_SYMBOL(dma_find_channel);
 
 /**
- * dma_async_client_unregister - unregister a client and free the &dma_client
- * @client: &dma_client to free
- *
- * Force frees any allocated DMA channels, frees the &dma_client memory
+ * dma_issue_pending_all - flush all pending operations across all channels
  */
-void dma_async_client_unregister(struct dma_client *client)
+void dma_issue_pending_all(void)
 {
 	struct dma_device *device;
 	struct dma_chan *chan;
-	enum dma_state_client ack;
 
-	if (!client)
-		return;
+	WARN_ONCE(dmaengine_ref_count == 0,
+		  "client called %s without a reference", __func__);
 
-	mutex_lock(&dma_list_mutex);
-	/* free all channels the client is holding */
-	list_for_each_entry(device, &dma_device_list, global_node)
-		list_for_each_entry(chan, &device->channels, device_node) {
-			ack = client->event_callback(client, chan,
-				DMA_RESOURCE_REMOVED);
-
-			if (ack == DMA_ACK) {
-				dma_chan_put(chan);
-				chan->client_count--;
-			}
-		}
-
-	list_del(&client->global_node);
-	mutex_unlock(&dma_list_mutex);
+	rcu_read_lock();
+	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
+		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+			continue;
+		list_for_each_entry(chan, &device->channels, device_node)
+			if (chan->client_count)
+				device->device_issue_pending(chan);
+	}
+	rcu_read_unlock();
 }
-EXPORT_SYMBOL(dma_async_client_unregister);
+EXPORT_SYMBOL(dma_issue_pending_all);
 
 /**
- * dma_async_client_chan_request - send all available channels to the
- * client that satisfy the capability mask
- * @client - requester
+ * nth_chan - returns the nth channel of the given capability
+ * @cap: capability to match
+ * @n: nth channel desired
+ *
+ * Defaults to returning the channel with the desired capability and the
+ * lowest reference count when 'n' cannot be satisfied.  Must be called
+ * under dma_list_mutex.
  */
-void dma_async_client_chan_request(struct dma_client *client)
+static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
+{
+	struct dma_device *device;
+	struct dma_chan *chan;
+	struct dma_chan *ret = NULL;
+	struct dma_chan *min = NULL;
+
+	list_for_each_entry(device, &dma_device_list, global_node) {
+		if (!dma_has_cap(cap, device->cap_mask) ||
+		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
+			continue;
+		list_for_each_entry(chan, &device->channels, device_node) {
+			if (!chan->client_count)
+				continue;
+			if (!min)
+				min = chan;
+			else if (chan->table_count < min->table_count)
+				min = chan;
+
+			if (n-- == 0) {
+				ret = chan;
+				break; /* done */
+			}
+		}
+		if (ret)
+			break; /* done */
+	}
+
+	if (!ret)
+		ret = min;
+
+	if (ret)
+		ret->table_count++;
+
+	return ret;
+}
+
+/**
+ * dma_channel_rebalance - redistribute the available channels
+ *
+ * Optimize for cpu isolation (each cpu gets a dedicated channel for an
+ * operation type) in the SMP case,  and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case.  Must be called under
+ * dma_list_mutex.
+ */
+static void dma_channel_rebalance(void)
+{
+	struct dma_chan *chan;
+	struct dma_device *device;
+	int cpu;
+	int cap;
+	int n;
+
+	/* undo the last distribution */
+	for_each_dma_cap_mask(cap, dma_cap_mask_all)
+		for_each_possible_cpu(cpu)
+			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
+
+	list_for_each_entry(device, &dma_device_list, global_node) {
+		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+			continue;
+		list_for_each_entry(chan, &device->channels, device_node)
+			chan->table_count = 0;
+	}
+
+	/* don't populate the channel_table if no clients are available */
+	if (!dmaengine_ref_count)
+		return;
+
+	/* redistribute available channels */
+	n = 0;
+	for_each_dma_cap_mask(cap, dma_cap_mask_all)
+		for_each_online_cpu(cpu) {
+			if (num_possible_cpus() > 1)
+				chan = nth_chan(cap, n++);
+			else
+				chan = nth_chan(cap, -1);
+
+			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
+		}
+}
+
+static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
+					  dma_filter_fn fn, void *fn_param)
+{
+	struct dma_chan *chan;
+
+	if (!__dma_device_satisfies_mask(dev, mask)) {
+		pr_debug("%s: wrong capabilities\n", __func__);
+		return NULL;
+	}
+	/* devices with multiple channels need special handling as we need to
+	 * ensure that all channels are either private or public.
+	 */
+	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
+		list_for_each_entry(chan, &dev->channels, device_node) {
+			/* some channels are already publicly allocated */
+			if (chan->client_count)
+				return NULL;
+		}
+
+	list_for_each_entry(chan, &dev->channels, device_node) {
+		if (chan->client_count) {
+			pr_debug("%s: %s busy\n",
+				 __func__, dma_chan_name(chan));
+			continue;
+		}
+		if (fn && !fn(chan, fn_param)) {
+			pr_debug("%s: %s filter said false\n",
+				 __func__, dma_chan_name(chan));
+			continue;
+		}
+		return chan;
+	}
+
+	return NULL;
+}
+
+/**
+ * dma_request_channel - try to allocate an exclusive channel
+ * @mask: capabilities that the channel must satisfy
+ * @fn: optional callback to disposition available channels
+ * @fn_param: opaque parameter to pass to dma_filter_fn
+ */
+struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
+{
+	struct dma_device *device, *_d;
+	struct dma_chan *chan = NULL;
+	int err;
+
+	/* Find a channel */
+	mutex_lock(&dma_list_mutex);
+	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+		chan = private_candidate(mask, device, fn, fn_param);
+		if (chan) {
+			/* Found a suitable channel, try to grab, prep, and
+			 * return it.  We first set DMA_PRIVATE to disable
+			 * balance_ref_count as this channel will not be
+			 * published in the general-purpose allocator
+			 */
+			dma_cap_set(DMA_PRIVATE, device->cap_mask);
+			err = dma_chan_get(chan);
+
+			if (err == -ENODEV) {
+				pr_debug("%s: %s module removed\n", __func__,
+					 dma_chan_name(chan));
+				list_del_rcu(&device->global_node);
+			} else if (err)
+				pr_err("dmaengine: failed to get %s: (%d)\n",
+				       dma_chan_name(chan), err);
+			else
+				break;
+			chan = NULL;
+		}
+	}
+	mutex_unlock(&dma_list_mutex);
+
+	pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
+		 chan ? dma_chan_name(chan) : NULL);
+
+	return chan;
+}
+EXPORT_SYMBOL_GPL(__dma_request_channel);
+
+void dma_release_channel(struct dma_chan *chan)
 {
 	mutex_lock(&dma_list_mutex);
-	dma_client_chan_alloc(client);
+	WARN_ONCE(chan->client_count != 1,
+		  "chan reference count %d != 1\n", chan->client_count);
+	dma_chan_put(chan);
 	mutex_unlock(&dma_list_mutex);
 }
-EXPORT_SYMBOL(dma_async_client_chan_request);
+EXPORT_SYMBOL_GPL(dma_release_channel);
+
+/**
+ * dmaengine_get - register interest in dma_channels
+ */
+void dmaengine_get(void)
+{
+	struct dma_device *device, *_d;
+	struct dma_chan *chan;
+	int err;
+
+	mutex_lock(&dma_list_mutex);
+	dmaengine_ref_count++;
+
+	/* try to grab channels */
+	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+			continue;
+		list_for_each_entry(chan, &device->channels, device_node) {
+			err = dma_chan_get(chan);
+			if (err == -ENODEV) {
+				/* module removed before we could use it */
+				list_del_rcu(&device->global_node);
+				break;
+			} else if (err)
+				pr_err("dmaengine: failed to get %s: (%d)\n",
+				       dma_chan_name(chan), err);
+		}
+	}
+
+	/* if this is the first reference and there were channels
+	 * waiting we need to rebalance to get those channels
+	 * incorporated into the channel table
+	 */
+	if (dmaengine_ref_count == 1)
+		dma_channel_rebalance();
+	mutex_unlock(&dma_list_mutex);
+}
+EXPORT_SYMBOL(dmaengine_get);
+
+/**
+ * dmaengine_put - let dma drivers be removed when ref_count == 0
+ */
+void dmaengine_put(void)
+{
+	struct dma_device *device;
+	struct dma_chan *chan;
+
+	mutex_lock(&dma_list_mutex);
+	dmaengine_ref_count--;
+	BUG_ON(dmaengine_ref_count < 0);
+	/* drop channel references */
+	list_for_each_entry(device, &dma_device_list, global_node) {
+		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+			continue;
+		list_for_each_entry(chan, &device->channels, device_node)
+			dma_chan_put(chan);
+	}
+	mutex_unlock(&dma_list_mutex);
+}
+EXPORT_SYMBOL(dmaengine_put);
 
 /**
  * dma_async_device_register - registers DMA devices found
@@ -357,9 +612,9 @@
  */
 int dma_async_device_register(struct dma_device *device)
 {
-	static int id;
 	int chancnt = 0, rc;
 	struct dma_chan* chan;
+	atomic_t *idr_ref;
 
 	if (!device)
 		return -ENODEV;
@@ -386,57 +641,83 @@
 	BUG_ON(!device->device_issue_pending);
 	BUG_ON(!device->dev);
 
-	init_completion(&device->done);
-	kref_init(&device->refcount);
-
+	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
+	if (!idr_ref)
+		return -ENOMEM;
+	atomic_set(idr_ref, 0);
+ idr_retry:
+	if (!idr_pre_get(&dma_idr, GFP_KERNEL))
+		return -ENOMEM;
 	mutex_lock(&dma_list_mutex);
-	device->dev_id = id++;
+	rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
 	mutex_unlock(&dma_list_mutex);
+	if (rc == -EAGAIN)
+		goto idr_retry;
+	else if (rc != 0)
+		return rc;
 
 	/* represent channels in sysfs. Probably want devs too */
 	list_for_each_entry(chan, &device->channels, device_node) {
 		chan->local = alloc_percpu(typeof(*chan->local));
 		if (chan->local == NULL)
 			continue;
+		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+		if (chan->dev == NULL) {
+			free_percpu(chan->local);
+			continue;
+		}
 
 		chan->chan_id = chancnt++;
-		chan->dev.class = &dma_devclass;
-		chan->dev.parent = device->dev;
-		dev_set_name(&chan->dev, "dma%dchan%d",
+		chan->dev->device.class = &dma_devclass;
+		chan->dev->device.parent = device->dev;
+		chan->dev->chan = chan;
+		chan->dev->idr_ref = idr_ref;
+		chan->dev->dev_id = device->dev_id;
+		atomic_inc(idr_ref);
+		dev_set_name(&chan->dev->device, "dma%dchan%d",
 			     device->dev_id, chan->chan_id);
 
-		rc = device_register(&chan->dev);
+		rc = device_register(&chan->dev->device);
 		if (rc) {
-			chancnt--;
 			free_percpu(chan->local);
 			chan->local = NULL;
 			goto err_out;
 		}
-
-		/* One for the channel, one of the class device */
-		kref_get(&device->refcount);
-		kref_get(&device->refcount);
-		kref_init(&chan->refcount);
 		chan->client_count = 0;
-		chan->slow_ref = 0;
-		INIT_RCU_HEAD(&chan->rcu);
 	}
+	device->chancnt = chancnt;
 
 	mutex_lock(&dma_list_mutex);
-	list_add_tail(&device->global_node, &dma_device_list);
+	/* take references on public channels */
+	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
+		list_for_each_entry(chan, &device->channels, device_node) {
+			/* if clients are already waiting for channels we need
+			 * to take references on their behalf
+			 */
+			if (dma_chan_get(chan) == -ENODEV) {
+				/* note we can only get here for the first
+				 * channel as the remaining channels are
+				 * guaranteed to get a reference
+				 */
+				rc = -ENODEV;
+				mutex_unlock(&dma_list_mutex);
+				goto err_out;
+			}
+		}
+	list_add_tail_rcu(&device->global_node, &dma_device_list);
+	dma_channel_rebalance();
 	mutex_unlock(&dma_list_mutex);
 
-	dma_clients_notify_available();
-
 	return 0;
 
 err_out:
 	list_for_each_entry(chan, &device->channels, device_node) {
 		if (chan->local == NULL)
 			continue;
-		kref_put(&device->refcount, dma_async_device_cleanup);
-		device_unregister(&chan->dev);
-		chancnt--;
+		mutex_lock(&dma_list_mutex);
+		chan->dev->chan = NULL;
+		mutex_unlock(&dma_list_mutex);
+		device_unregister(&chan->dev->device);
 		free_percpu(chan->local);
 	}
 	return rc;
@@ -444,37 +725,30 @@
 EXPORT_SYMBOL(dma_async_device_register);
 
 /**
- * dma_async_device_cleanup - function called when all references are released
- * @kref: kernel reference object
- */
-static void dma_async_device_cleanup(struct kref *kref)
-{
-	struct dma_device *device;
-
-	device = container_of(kref, struct dma_device, refcount);
-	complete(&device->done);
-}
-
-/**
- * dma_async_device_unregister - unregisters DMA devices
+ * dma_async_device_unregister - unregister a DMA device
  * @device: &dma_device
+ *
+ * This routine is called by dma driver exit routines, dmaengine holds module
+ * references to prevent it being called while channels are in use.
  */
 void dma_async_device_unregister(struct dma_device *device)
 {
 	struct dma_chan *chan;
 
 	mutex_lock(&dma_list_mutex);
-	list_del(&device->global_node);
+	list_del_rcu(&device->global_node);
+	dma_channel_rebalance();
 	mutex_unlock(&dma_list_mutex);
 
 	list_for_each_entry(chan, &device->channels, device_node) {
-		dma_clients_notify_removed(chan);
-		device_unregister(&chan->dev);
-		dma_chan_release(chan);
+		WARN_ONCE(chan->client_count,
+			  "%s called while %d clients hold a reference\n",
+			  __func__, chan->client_count);
+		mutex_lock(&dma_list_mutex);
+		chan->dev->chan = NULL;
+		mutex_unlock(&dma_list_mutex);
+		device_unregister(&chan->dev->device);
 	}
-
-	kref_put(&device->refcount, dma_async_device_cleanup);
-	wait_for_completion(&device->done);
 }
 EXPORT_SYMBOL(dma_async_device_unregister);
 
@@ -626,10 +900,96 @@
 }
 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
 
+/* dma_wait_for_async_tx - spin wait for a transaction to complete
+ * @tx: in-flight transaction to wait on
+ *
+ * This routine assumes that tx was obtained from a call to async_memcpy,
+ * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
+ * and submitted).  Walking the parent chain is only meant to cover for DMA
+ * drivers that do not implement the DMA_INTERRUPT capability and may race with
+ * the driver's descriptor cleanup routine.
+ */
+enum dma_status
+dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
+{
+	enum dma_status status;
+	struct dma_async_tx_descriptor *iter;
+	struct dma_async_tx_descriptor *parent;
+
+	if (!tx)
+		return DMA_SUCCESS;
+
+	WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
+		  " %s\n", __func__, dma_chan_name(tx->chan));
+
+	/* poll through the dependency chain, return when tx is complete */
+	do {
+		iter = tx;
+
+		/* find the root of the unsubmitted dependency chain */
+		do {
+			parent = iter->parent;
+			if (!parent)
+				break;
+			else
+				iter = parent;
+		} while (parent);
+
+		/* there is a small window for ->parent == NULL and
+		 * ->cookie == -EBUSY
+		 */
+		while (iter->cookie == -EBUSY)
+			cpu_relax();
+
+		status = dma_sync_wait(iter->chan, iter->cookie);
+	} while (status == DMA_IN_PROGRESS || (iter != tx));
+
+	return status;
+}
+EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
+
+/* dma_run_dependencies - helper routine for dma drivers to process
+ *	(start) dependent operations on their target channel
+ * @tx: transaction with dependencies
+ */
+void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
+{
+	struct dma_async_tx_descriptor *dep = tx->next;
+	struct dma_async_tx_descriptor *dep_next;
+	struct dma_chan *chan;
+
+	if (!dep)
+		return;
+
+	chan = dep->chan;
+
+	/* keep submitting up until a channel switch is detected
+	 * in that case we will be called again as a result of
+	 * processing the interrupt from async_tx_channel_switch
+	 */
+	for (; dep; dep = dep_next) {
+		spin_lock_bh(&dep->lock);
+		dep->parent = NULL;
+		dep_next = dep->next;
+		if (dep_next && dep_next->chan == chan)
+			dep->next = NULL; /* ->next will be submitted */
+		else
+			dep_next = NULL; /* submit current dep and terminate */
+		spin_unlock_bh(&dep->lock);
+
+		dep->tx_submit(dep);
+	}
+
+	chan->device->device_issue_pending(chan);
+}
+EXPORT_SYMBOL_GPL(dma_run_dependencies);
+
 static int __init dma_bus_init(void)
 {
+	idr_init(&dma_idr);
 	mutex_init(&dma_list_mutex);
 	return class_register(&dma_devclass);
 }
-subsys_initcall(dma_bus_init);
+arch_initcall(dma_bus_init);
+
 
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ed9636b..3603f1e 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -35,7 +35,7 @@
 
 static unsigned int max_channels;
 module_param(max_channels, uint, S_IRUGO);
-MODULE_PARM_DESC(nr_channels,
+MODULE_PARM_DESC(max_channels,
 		"Maximum number of channels to use (default: all)");
 
 /*
@@ -71,7 +71,7 @@
 
 /*
  * These are protected by dma_list_mutex since they're only used by
- * the DMA client event callback
+ * the DMA filter function callback
  */
 static LIST_HEAD(dmatest_channels);
 static unsigned int nr_channels;
@@ -80,7 +80,7 @@
 {
 	if (test_channel[0] == '\0')
 		return true;
-	return strcmp(dev_name(&chan->dev), test_channel) == 0;
+	return strcmp(dma_chan_name(chan), test_channel) == 0;
 }
 
 static bool dmatest_match_device(struct dma_device *device)
@@ -215,7 +215,6 @@
 
 	smp_rmb();
 	chan = thread->chan;
-	dma_chan_get(chan);
 
 	while (!kthread_should_stop()) {
 		total_tests++;
@@ -293,7 +292,6 @@
 	}
 
 	ret = 0;
-	dma_chan_put(chan);
 	kfree(thread->dstbuf);
 err_dstbuf:
 	kfree(thread->srcbuf);
@@ -319,21 +317,16 @@
 	kfree(dtc);
 }
 
-static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
+static int dmatest_add_channel(struct dma_chan *chan)
 {
 	struct dmatest_chan	*dtc;
 	struct dmatest_thread	*thread;
 	unsigned int		i;
 
-	/* Have we already been told about this channel? */
-	list_for_each_entry(dtc, &dmatest_channels, node)
-		if (dtc->chan == chan)
-			return DMA_DUP;
-
 	dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
 	if (!dtc) {
-		pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev));
-		return DMA_NAK;
+		pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
+		return -ENOMEM;
 	}
 
 	dtc->chan = chan;
@@ -343,16 +336,16 @@
 		thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
 		if (!thread) {
 			pr_warning("dmatest: No memory for %s-test%u\n",
-				   dev_name(&chan->dev), i);
+				   dma_chan_name(chan), i);
 			break;
 		}
 		thread->chan = dtc->chan;
 		smp_wmb();
 		thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
-				dev_name(&chan->dev), i);
+				dma_chan_name(chan), i);
 		if (IS_ERR(thread->task)) {
 			pr_warning("dmatest: Failed to run thread %s-test%u\n",
-					dev_name(&chan->dev), i);
+					dma_chan_name(chan), i);
 			kfree(thread);
 			break;
 		}
@@ -362,86 +355,62 @@
 		list_add_tail(&thread->node, &dtc->threads);
 	}
 
-	pr_info("dmatest: Started %u threads using %s\n", i, dev_name(&chan->dev));
+	pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan));
 
 	list_add_tail(&dtc->node, &dmatest_channels);
 	nr_channels++;
 
-	return DMA_ACK;
+	return 0;
 }
 
-static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
+static bool filter(struct dma_chan *chan, void *param)
 {
-	struct dmatest_chan	*dtc, *_dtc;
-
-	list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
-		if (dtc->chan == chan) {
-			list_del(&dtc->node);
-			dmatest_cleanup_channel(dtc);
-			pr_debug("dmatest: lost channel %s\n",
-					dev_name(&chan->dev));
-			return DMA_ACK;
-		}
-	}
-
-	return DMA_DUP;
+	if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
+		return false;
+	else
+		return true;
 }
 
-/*
- * Start testing threads as new channels are assigned to us, and kill
- * them when the channels go away.
- *
- * When we unregister the client, all channels are removed so this
- * will also take care of cleaning things up when the module is
- * unloaded.
- */
-static enum dma_state_client
-dmatest_event(struct dma_client *client, struct dma_chan *chan,
-		enum dma_state state)
-{
-	enum dma_state_client	ack = DMA_NAK;
-
-	switch (state) {
-	case DMA_RESOURCE_AVAILABLE:
-		if (!dmatest_match_channel(chan)
-				|| !dmatest_match_device(chan->device))
-			ack = DMA_DUP;
-		else if (max_channels && nr_channels >= max_channels)
-			ack = DMA_NAK;
-		else
-			ack = dmatest_add_channel(chan);
-		break;
-
-	case DMA_RESOURCE_REMOVED:
-		ack = dmatest_remove_channel(chan);
-		break;
-
-	default:
-		pr_info("dmatest: Unhandled event %u (%s)\n",
-				state, dev_name(&chan->dev));
-		break;
-	}
-
-	return ack;
-}
-
-static struct dma_client dmatest_client = {
-	.event_callback	= dmatest_event,
-};
-
 static int __init dmatest_init(void)
 {
-	dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask);
-	dma_async_client_register(&dmatest_client);
-	dma_async_client_chan_request(&dmatest_client);
+	dma_cap_mask_t mask;
+	struct dma_chan *chan;
+	int err = 0;
 
-	return 0;
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_MEMCPY, mask);
+	for (;;) {
+		chan = dma_request_channel(mask, filter, NULL);
+		if (chan) {
+			err = dmatest_add_channel(chan);
+			if (err == 0)
+				continue;
+			else {
+				dma_release_channel(chan);
+				break; /* add_channel failed, punt */
+			}
+		} else
+			break; /* no more channels available */
+		if (max_channels && nr_channels >= max_channels)
+			break; /* we have all we need */
+	}
+
+	return err;
 }
-module_init(dmatest_init);
+/* when compiled-in wait for drivers to load first */
+late_initcall(dmatest_init);
 
 static void __exit dmatest_exit(void)
 {
-	dma_async_client_unregister(&dmatest_client);
+	struct dmatest_chan *dtc, *_dtc;
+
+	list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
+		list_del(&dtc->node);
+		dmatest_cleanup_channel(dtc);
+		pr_debug("dmatest: dropped channel %s\n",
+			 dma_chan_name(dtc->chan));
+		dma_release_channel(dtc->chan);
+	}
 }
 module_exit(dmatest_exit);
 
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 0778d99..6b702cc 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -70,6 +70,15 @@
  * the controller, though.
  */
 
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+static struct device *chan2parent(struct dma_chan *chan)
+{
+	return chan->dev->device.parent;
+}
+
 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
 {
 	return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
@@ -93,12 +102,12 @@
 			ret = desc;
 			break;
 		}
-		dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc);
+		dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
 		i++;
 	}
 	spin_unlock_bh(&dwc->lock);
 
-	dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i);
+	dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
 
 	return ret;
 }
@@ -108,10 +117,10 @@
 	struct dw_desc	*child;
 
 	list_for_each_entry(child, &desc->txd.tx_list, desc_node)
-		dma_sync_single_for_cpu(dwc->chan.dev.parent,
+		dma_sync_single_for_cpu(chan2parent(&dwc->chan),
 				child->txd.phys, sizeof(child->lli),
 				DMA_TO_DEVICE);
-	dma_sync_single_for_cpu(dwc->chan.dev.parent,
+	dma_sync_single_for_cpu(chan2parent(&dwc->chan),
 			desc->txd.phys, sizeof(desc->lli),
 			DMA_TO_DEVICE);
 }
@@ -129,11 +138,11 @@
 
 		spin_lock_bh(&dwc->lock);
 		list_for_each_entry(child, &desc->txd.tx_list, desc_node)
-			dev_vdbg(&dwc->chan.dev,
+			dev_vdbg(chan2dev(&dwc->chan),
 					"moving child desc %p to freelist\n",
 					child);
 		list_splice_init(&desc->txd.tx_list, &dwc->free_list);
-		dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc);
+		dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
 		list_add(&desc->desc_node, &dwc->free_list);
 		spin_unlock_bh(&dwc->lock);
 	}
@@ -163,9 +172,9 @@
 
 	/* ASSERT:  channel is idle */
 	if (dma_readl(dw, CH_EN) & dwc->mask) {
-		dev_err(&dwc->chan.dev,
+		dev_err(chan2dev(&dwc->chan),
 			"BUG: Attempted to start non-idle channel\n");
-		dev_err(&dwc->chan.dev,
+		dev_err(chan2dev(&dwc->chan),
 			"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
 			channel_readl(dwc, SAR),
 			channel_readl(dwc, DAR),
@@ -193,7 +202,7 @@
 	void				*param;
 	struct dma_async_tx_descriptor	*txd = &desc->txd;
 
-	dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie);
+	dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
 
 	dwc->completed = txd->cookie;
 	callback = txd->callback;
@@ -208,11 +217,11 @@
 	 * mapped before they were submitted...
 	 */
 	if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
-		dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len,
-				DMA_FROM_DEVICE);
+		dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar,
+			       desc->len, DMA_FROM_DEVICE);
 	if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
-		dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len,
-				DMA_TO_DEVICE);
+		dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar,
+			       desc->len, DMA_TO_DEVICE);
 
 	/*
 	 * The API requires that no submissions are done from a
@@ -228,7 +237,7 @@
 	LIST_HEAD(list);
 
 	if (dma_readl(dw, CH_EN) & dwc->mask) {
-		dev_err(&dwc->chan.dev,
+		dev_err(chan2dev(&dwc->chan),
 			"BUG: XFER bit set, but channel not idle!\n");
 
 		/* Try to continue after resetting the channel... */
@@ -273,7 +282,7 @@
 		return;
 	}
 
-	dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp);
+	dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
 
 	list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
 		if (desc->lli.llp == llp)
@@ -292,7 +301,7 @@
 		dwc_descriptor_complete(dwc, desc);
 	}
 
-	dev_err(&dwc->chan.dev,
+	dev_err(chan2dev(&dwc->chan),
 		"BUG: All descriptors done, but channel not idle!\n");
 
 	/* Try to continue after resetting the channel... */
@@ -308,7 +317,7 @@
 
 static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
 {
-	dev_printk(KERN_CRIT, &dwc->chan.dev,
+	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
 			"  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
 			lli->sar, lli->dar, lli->llp,
 			lli->ctlhi, lli->ctllo);
@@ -342,9 +351,9 @@
 	 * controller flagged an error instead of scribbling over
 	 * random memory locations.
 	 */
-	dev_printk(KERN_CRIT, &dwc->chan.dev,
+	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
 			"Bad descriptor submitted for DMA!\n");
-	dev_printk(KERN_CRIT, &dwc->chan.dev,
+	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
 			"  cookie: %d\n", bad_desc->txd.cookie);
 	dwc_dump_lli(dwc, &bad_desc->lli);
 	list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
@@ -442,12 +451,12 @@
 	 * for DMA. But this is hard to do in a race-free manner.
 	 */
 	if (list_empty(&dwc->active_list)) {
-		dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n",
+		dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
 				desc->txd.cookie);
 		dwc_dostart(dwc, desc);
 		list_add_tail(&desc->desc_node, &dwc->active_list);
 	} else {
-		dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n",
+		dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
 				desc->txd.cookie);
 
 		list_add_tail(&desc->desc_node, &dwc->queue);
@@ -472,11 +481,11 @@
 	unsigned int		dst_width;
 	u32			ctllo;
 
-	dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
+	dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
 			dest, src, len, flags);
 
 	if (unlikely(!len)) {
-		dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n");
+		dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
 		return NULL;
 	}
 
@@ -516,7 +525,7 @@
 			first = desc;
 		} else {
 			prev->lli.llp = desc->txd.phys;
-			dma_sync_single_for_device(chan->dev.parent,
+			dma_sync_single_for_device(chan2parent(chan),
 					prev->txd.phys, sizeof(prev->lli),
 					DMA_TO_DEVICE);
 			list_add_tail(&desc->desc_node,
@@ -531,7 +540,7 @@
 		prev->lli.ctllo |= DWC_CTLL_INT_EN;
 
 	prev->lli.llp = 0;
-	dma_sync_single_for_device(chan->dev.parent,
+	dma_sync_single_for_device(chan2parent(chan),
 			prev->txd.phys, sizeof(prev->lli),
 			DMA_TO_DEVICE);
 
@@ -562,15 +571,15 @@
 	struct scatterlist	*sg;
 	size_t			total_len = 0;
 
-	dev_vdbg(&chan->dev, "prep_dma_slave\n");
+	dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
 
 	if (unlikely(!dws || !sg_len))
 		return NULL;
 
-	reg_width = dws->slave.reg_width;
+	reg_width = dws->reg_width;
 	prev = first = NULL;
 
-	sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
+	sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
 
 	switch (direction) {
 	case DMA_TO_DEVICE:
@@ -579,7 +588,7 @@
 				| DWC_CTLL_DST_FIX
 				| DWC_CTLL_SRC_INC
 				| DWC_CTLL_FC_M2P);
-		reg = dws->slave.tx_reg;
+		reg = dws->tx_reg;
 		for_each_sg(sgl, sg, sg_len, i) {
 			struct dw_desc	*desc;
 			u32		len;
@@ -587,7 +596,7 @@
 
 			desc = dwc_desc_get(dwc);
 			if (!desc) {
-				dev_err(&chan->dev,
+				dev_err(chan2dev(chan),
 					"not enough descriptors available\n");
 				goto err_desc_get;
 			}
@@ -607,7 +616,7 @@
 				first = desc;
 			} else {
 				prev->lli.llp = desc->txd.phys;
-				dma_sync_single_for_device(chan->dev.parent,
+				dma_sync_single_for_device(chan2parent(chan),
 						prev->txd.phys,
 						sizeof(prev->lli),
 						DMA_TO_DEVICE);
@@ -625,7 +634,7 @@
 				| DWC_CTLL_SRC_FIX
 				| DWC_CTLL_FC_P2M);
 
-		reg = dws->slave.rx_reg;
+		reg = dws->rx_reg;
 		for_each_sg(sgl, sg, sg_len, i) {
 			struct dw_desc	*desc;
 			u32		len;
@@ -633,7 +642,7 @@
 
 			desc = dwc_desc_get(dwc);
 			if (!desc) {
-				dev_err(&chan->dev,
+				dev_err(chan2dev(chan),
 					"not enough descriptors available\n");
 				goto err_desc_get;
 			}
@@ -653,7 +662,7 @@
 				first = desc;
 			} else {
 				prev->lli.llp = desc->txd.phys;
-				dma_sync_single_for_device(chan->dev.parent,
+				dma_sync_single_for_device(chan2parent(chan),
 						prev->txd.phys,
 						sizeof(prev->lli),
 						DMA_TO_DEVICE);
@@ -673,7 +682,7 @@
 		prev->lli.ctllo |= DWC_CTLL_INT_EN;
 
 	prev->lli.llp = 0;
-	dma_sync_single_for_device(chan->dev.parent,
+	dma_sync_single_for_device(chan2parent(chan),
 			prev->txd.phys, sizeof(prev->lli),
 			DMA_TO_DEVICE);
 
@@ -758,29 +767,21 @@
 	spin_unlock_bh(&dwc->lock);
 }
 
-static int dwc_alloc_chan_resources(struct dma_chan *chan,
-		struct dma_client *client)
+static int dwc_alloc_chan_resources(struct dma_chan *chan)
 {
 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
 	struct dw_dma		*dw = to_dw_dma(chan->device);
 	struct dw_desc		*desc;
-	struct dma_slave	*slave;
 	struct dw_dma_slave	*dws;
 	int			i;
 	u32			cfghi;
 	u32			cfglo;
 
-	dev_vdbg(&chan->dev, "alloc_chan_resources\n");
-
-	/* Channels doing slave DMA can only handle one client. */
-	if (dwc->dws || client->slave) {
-		if (chan->client_count)
-			return -EBUSY;
-	}
+	dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
 
 	/* ASSERT:  channel is idle */
 	if (dma_readl(dw, CH_EN) & dwc->mask) {
-		dev_dbg(&chan->dev, "DMA channel not idle?\n");
+		dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
 		return -EIO;
 	}
 
@@ -789,23 +790,17 @@
 	cfghi = DWC_CFGH_FIFO_MODE;
 	cfglo = 0;
 
-	slave = client->slave;
-	if (slave) {
+	dws = dwc->dws;
+	if (dws) {
 		/*
 		 * We need controller-specific data to set up slave
 		 * transfers.
 		 */
-		BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev);
+		BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
 
-		dws = container_of(slave, struct dw_dma_slave, slave);
-
-		dwc->dws = dws;
 		cfghi = dws->cfg_hi;
 		cfglo = dws->cfg_lo;
-	} else {
-		dwc->dws = NULL;
 	}
-
 	channel_writel(dwc, CFG_LO, cfglo);
 	channel_writel(dwc, CFG_HI, cfghi);
 
@@ -822,7 +817,7 @@
 
 		desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
 		if (!desc) {
-			dev_info(&chan->dev,
+			dev_info(chan2dev(chan),
 				"only allocated %d descriptors\n", i);
 			spin_lock_bh(&dwc->lock);
 			break;
@@ -832,7 +827,7 @@
 		desc->txd.tx_submit = dwc_tx_submit;
 		desc->txd.flags = DMA_CTRL_ACK;
 		INIT_LIST_HEAD(&desc->txd.tx_list);
-		desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli,
+		desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
 				sizeof(desc->lli), DMA_TO_DEVICE);
 		dwc_desc_put(dwc, desc);
 
@@ -847,7 +842,7 @@
 
 	spin_unlock_bh(&dwc->lock);
 
-	dev_dbg(&chan->dev,
+	dev_dbg(chan2dev(chan),
 		"alloc_chan_resources allocated %d descriptors\n", i);
 
 	return i;
@@ -860,7 +855,7 @@
 	struct dw_desc		*desc, *_desc;
 	LIST_HEAD(list);
 
-	dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n",
+	dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
 			dwc->descs_allocated);
 
 	/* ASSERT:  channel is idle */
@@ -881,13 +876,13 @@
 	spin_unlock_bh(&dwc->lock);
 
 	list_for_each_entry_safe(desc, _desc, &list, desc_node) {
-		dev_vdbg(&chan->dev, "  freeing descriptor %p\n", desc);
-		dma_unmap_single(chan->dev.parent, desc->txd.phys,
+		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
+		dma_unmap_single(chan2parent(chan), desc->txd.phys,
 				sizeof(desc->lli), DMA_TO_DEVICE);
 		kfree(desc);
 	}
 
-	dev_vdbg(&chan->dev, "free_chan_resources done\n");
+	dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
 }
 
 /*----------------------------------------------------------------------*/
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 0b95dcc..ca70a21 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -366,8 +366,7 @@
  *
  * Return - The number of descriptors allocated.
  */
-static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
-					struct dma_client *client)
+static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
 {
 	struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
 
@@ -823,7 +822,7 @@
 	 */
 	WARN_ON(fdev->feature != new_fsl_chan->feature);
 
-	new_fsl_chan->dev = &new_fsl_chan->common.dev;
+	new_fsl_chan->dev = &new_fsl_chan->common.dev->device;
 	new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
 			new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
 
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index 9b16a3a..4105d65 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -75,60 +75,10 @@
 module_param(ioat_dca_enabled, int, 0644);
 MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
 
-static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
-{
-	struct ioat_device *device = pci_get_drvdata(pdev);
-	u8 version;
-	int err = 0;
-
-	version = readb(iobase + IOAT_VER_OFFSET);
-	switch (version) {
-	case IOAT_VER_1_2:
-		device->dma = ioat_dma_probe(pdev, iobase);
-		if (device->dma && ioat_dca_enabled)
-			device->dca = ioat_dca_init(pdev, iobase);
-		break;
-	case IOAT_VER_2_0:
-		device->dma = ioat_dma_probe(pdev, iobase);
-		if (device->dma && ioat_dca_enabled)
-			device->dca = ioat2_dca_init(pdev, iobase);
-		break;
-	case IOAT_VER_3_0:
-		device->dma = ioat_dma_probe(pdev, iobase);
-		if (device->dma && ioat_dca_enabled)
-			device->dca = ioat3_dca_init(pdev, iobase);
-		break;
-	default:
-		err = -ENODEV;
-		break;
-	}
-	if (!device->dma)
-		err = -ENODEV;
-	return err;
-}
-
-static void ioat_shutdown_functionality(struct pci_dev *pdev)
-{
-	struct ioat_device *device = pci_get_drvdata(pdev);
-
-	dev_err(&pdev->dev, "Removing dma and dca services\n");
-	if (device->dca) {
-		unregister_dca_provider(device->dca);
-		free_dca_provider(device->dca);
-		device->dca = NULL;
-	}
-
-	if (device->dma) {
-		ioat_dma_remove(device->dma);
-		device->dma = NULL;
-	}
-}
-
 static struct pci_driver ioat_pci_driver = {
 	.name		= "ioatdma",
 	.id_table	= ioat_pci_tbl,
 	.probe		= ioat_probe,
-	.shutdown	= ioat_shutdown_functionality,
 	.remove		= __devexit_p(ioat_remove),
 };
 
@@ -179,7 +129,29 @@
 
 	pci_set_master(pdev);
 
-	err = ioat_setup_functionality(pdev, iobase);
+	switch (readb(iobase + IOAT_VER_OFFSET)) {
+	case IOAT_VER_1_2:
+		device->dma = ioat_dma_probe(pdev, iobase);
+		if (device->dma && ioat_dca_enabled)
+			device->dca = ioat_dca_init(pdev, iobase);
+		break;
+	case IOAT_VER_2_0:
+		device->dma = ioat_dma_probe(pdev, iobase);
+		if (device->dma && ioat_dca_enabled)
+			device->dca = ioat2_dca_init(pdev, iobase);
+		break;
+	case IOAT_VER_3_0:
+		device->dma = ioat_dma_probe(pdev, iobase);
+		if (device->dma && ioat_dca_enabled)
+			device->dca = ioat3_dca_init(pdev, iobase);
+		break;
+	default:
+		err = -ENODEV;
+		break;
+	}
+	if (!device->dma)
+		err = -ENODEV;
+
 	if (err)
 		goto err_version;
 
@@ -198,17 +170,21 @@
 	return err;
 }
 
-/*
- * It is unsafe to remove this module: if removed while a requested
- * dma is outstanding, esp. from tcp, it is possible to hang while
- * waiting for something that will never finish.  However, if you're
- * feeling lucky, this usually works just fine.
- */
 static void __devexit ioat_remove(struct pci_dev *pdev)
 {
 	struct ioat_device *device = pci_get_drvdata(pdev);
 
-	ioat_shutdown_functionality(pdev);
+	dev_err(&pdev->dev, "Removing dma and dca services\n");
+	if (device->dca) {
+		unregister_dca_provider(device->dca);
+		free_dca_provider(device->dca);
+		device->dca = NULL;
+	}
+
+	if (device->dma) {
+		ioat_dma_remove(device->dma);
+		device->dma = NULL;
+	}
 
 	kfree(device);
 }
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 6607fdd..b3759c4 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -734,8 +734,7 @@
  * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
  * @chan: the channel to be filled out
  */
-static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
-					 struct dma_client *client)
+static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
 {
 	struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
 	struct ioat_desc_sw *desc;
@@ -1341,12 +1340,11 @@
  */
 #define IOAT_TEST_SIZE 2000
 
-DECLARE_COMPLETION(test_completion);
 static void ioat_dma_test_callback(void *dma_async_param)
 {
-	printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
-		dma_async_param);
-	complete(&test_completion);
+	struct completion *cmp = dma_async_param;
+
+	complete(cmp);
 }
 
 /**
@@ -1363,6 +1361,7 @@
 	dma_addr_t dma_dest, dma_src;
 	dma_cookie_t cookie;
 	int err = 0;
+	struct completion cmp;
 
 	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
 	if (!src)
@@ -1381,7 +1380,7 @@
 	dma_chan = container_of(device->common.channels.next,
 				struct dma_chan,
 				device_node);
-	if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
+	if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
 		dev_err(&device->pdev->dev,
 			"selftest cannot allocate chan resource\n");
 		err = -ENODEV;
@@ -1402,8 +1401,9 @@
 	}
 
 	async_tx_ack(tx);
+	init_completion(&cmp);
 	tx->callback = ioat_dma_test_callback;
-	tx->callback_param = (void *)0x8086;
+	tx->callback_param = &cmp;
 	cookie = tx->tx_submit(tx);
 	if (cookie < 0) {
 		dev_err(&device->pdev->dev,
@@ -1413,7 +1413,7 @@
 	}
 	device->common.device_issue_pending(dma_chan);
 
-	wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000));
+	wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
 	if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
 					!= DMA_SUCCESS) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 6be3172..ea5440d 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -24,7 +24,6 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/async_tx.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
@@ -116,7 +115,7 @@
 	}
 
 	/* run dependent operations */
-	async_tx_run_dependencies(&desc->async_tx);
+	dma_run_dependencies(&desc->async_tx);
 
 	return cookie;
 }
@@ -270,8 +269,6 @@
 			break;
 	}
 
-	BUG_ON(!seen_current);
-
 	if (cookie > 0) {
 		iop_chan->completed_cookie = cookie;
 		pr_debug("\tcompleted cookie %d\n", cookie);
@@ -471,8 +468,7 @@
  * greater than 2x the number slots needed to satisfy a device->max_xor
  * request.
  * */
-static int iop_adma_alloc_chan_resources(struct dma_chan *chan,
-					 struct dma_client *client)
+static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
 {
 	char *hw_desc;
 	int idx;
@@ -866,7 +862,7 @@
 	dma_chan = container_of(device->common.channels.next,
 				struct dma_chan,
 				device_node);
-	if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
+	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
 		err = -ENODEV;
 		goto out;
 	}
@@ -964,7 +960,7 @@
 	dma_chan = container_of(device->common.channels.next,
 				struct dma_chan,
 				device_node);
-	if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
+	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
 		err = -ENODEV;
 		goto out;
 	}
@@ -1115,26 +1111,13 @@
 	struct iop_adma_device *device = platform_get_drvdata(dev);
 	struct dma_chan *chan, *_chan;
 	struct iop_adma_chan *iop_chan;
-	int i;
 	struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
 
 	dma_async_device_unregister(&device->common);
 
-	for (i = 0; i < 3; i++) {
-		unsigned int irq;
-		irq = platform_get_irq(dev, i);
-		free_irq(irq, device);
-	}
-
 	dma_free_coherent(&dev->dev, plat_data->pool_size,
 			device->dma_desc_pool_virt, device->dma_desc_pool);
 
-	do {
-		struct resource *res;
-		res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-		release_mem_region(res->start, res->end - res->start);
-	} while (0);
-
 	list_for_each_entry_safe(chan, _chan, &device->common.channels,
 				device_node) {
 		iop_chan = to_iop_adma_chan(chan);
@@ -1255,7 +1238,6 @@
 	spin_lock_init(&iop_chan->lock);
 	INIT_LIST_HEAD(&iop_chan->chain);
 	INIT_LIST_HEAD(&iop_chan->all_slots);
-	INIT_RCU_HEAD(&iop_chan->common.rcu);
 	iop_chan->common.device = dma_dev;
 	list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
 
@@ -1431,16 +1413,12 @@
 	return platform_driver_register(&iop_adma_driver);
 }
 
-/* it's currently unsafe to unload this module */
-#if 0
 static void __exit iop_adma_exit (void)
 {
 	platform_driver_unregister(&iop_adma_driver);
 	return;
 }
 module_exit(iop_adma_exit);
-#endif
-
 module_init(iop_adma_init);
 
 MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index bcda174..d35cbd1 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -18,7 +18,6 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/async_tx.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
@@ -340,7 +339,7 @@
 	}
 
 	/* run dependent operations */
-	async_tx_run_dependencies(&desc->async_tx);
+	dma_run_dependencies(&desc->async_tx);
 
 	return cookie;
 }
@@ -607,8 +606,7 @@
 }
 
 /* returns the number of allocated descriptors */
-static int mv_xor_alloc_chan_resources(struct dma_chan *chan,
-				       struct dma_client *client)
+static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
 {
 	char *hw_desc;
 	int idx;
@@ -958,7 +956,7 @@
 	dma_chan = container_of(device->common.channels.next,
 				struct dma_chan,
 				device_node);
-	if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
+	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
 		err = -ENODEV;
 		goto out;
 	}
@@ -1053,7 +1051,7 @@
 	dma_chan = container_of(device->common.channels.next,
 				struct dma_chan,
 				device_node);
-	if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
+	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
 		err = -ENODEV;
 		goto out;
 	}
@@ -1221,7 +1219,6 @@
 	INIT_LIST_HEAD(&mv_chan->chain);
 	INIT_LIST_HEAD(&mv_chan->completed_slots);
 	INIT_LIST_HEAD(&mv_chan->all_slots);
-	INIT_RCU_HEAD(&mv_chan->common.rcu);
 	mv_chan->common.device = dma_dev;
 
 	list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index 418c18f..6bd91a1 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -75,7 +75,7 @@
 	 * controller, block reads to the config rom accesses the host
 	 * memory, but quadlet read access the hardware bus info block
 	 * registers.  That's just crack, but it means we should make
-	 * sure the contents of bus info block in host memory mathces
+	 * sure the contents of bus info block in host memory matches
 	 * the version stored in the OHCI registers.
 	 */
 
@@ -189,6 +189,17 @@
 	63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
 };
 
+void
+fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
+{
+	int scheduled;
+
+	fw_card_get(card);
+	scheduled = schedule_delayed_work(&card->work, delay);
+	if (!scheduled)
+		fw_card_put(card);
+}
+
 static void
 fw_card_bm_work(struct work_struct *work)
 {
@@ -198,6 +209,8 @@
 	unsigned long flags;
 	int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode;
 	bool do_reset = false;
+	bool root_device_is_running;
+	bool root_device_is_cmc;
 	__be32 lock_data[2];
 
 	spin_lock_irqsave(&card->lock, flags);
@@ -206,15 +219,16 @@
 
 	if (local_node == NULL) {
 		spin_unlock_irqrestore(&card->lock, flags);
-		return;
+		goto out_put_card;
 	}
 	fw_node_get(local_node);
 	fw_node_get(root_node);
 
 	generation = card->generation;
 	root_device = root_node->data;
-	if (root_device)
-		fw_device_get(root_device);
+	root_device_is_running = root_device &&
+			atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
+	root_device_is_cmc = root_device && root_device->cmc;
 	root_id = root_node->node_id;
 	grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
 
@@ -280,7 +294,7 @@
 		 * this task 100ms from now.
 		 */
 		spin_unlock_irqrestore(&card->lock, flags);
-		schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
+		fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10));
 		goto out;
 	}
 
@@ -297,14 +311,14 @@
 		 * config rom.  In either case, pick another root.
 		 */
 		new_root_id = local_node->node_id;
-	} else if (atomic_read(&root_device->state) != FW_DEVICE_RUNNING) {
+	} else if (!root_device_is_running) {
 		/*
 		 * If we haven't probed this device yet, bail out now
 		 * and let's try again once that's done.
 		 */
 		spin_unlock_irqrestore(&card->lock, flags);
 		goto out;
-	} else if (root_device->cmc) {
+	} else if (root_device_is_cmc) {
 		/*
 		 * FIXME: I suppose we should set the cmstr bit in the
 		 * STATE_CLEAR register of this node, as described in
@@ -351,10 +365,10 @@
 		fw_core_initiate_bus_reset(card, 1);
 	}
  out:
-	if (root_device)
-		fw_device_put(root_device);
 	fw_node_put(root_node);
 	fw_node_put(local_node);
+ out_put_card:
+	fw_card_put(card);
 }
 
 static void
@@ -510,7 +524,6 @@
 	fw_card_put(card);
 	wait_for_completion(&card->done);
 
-	cancel_delayed_work_sync(&card->work);
 	WARN_ON(!list_empty(&card->transaction_list));
 	del_timer_sync(&card->flush_timer);
 }
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index 6b9be42..2af5a8d 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -159,7 +159,8 @@
 
 	/*
 	 * Take the card lock so we don't set this to NULL while a
-	 * FW_NODE_UPDATED callback is being handled.
+	 * FW_NODE_UPDATED callback is being handled or while the
+	 * bus manager work looks at this node.
 	 */
 	spin_lock_irqsave(&card->lock, flags);
 	device->node->data = NULL;
@@ -617,7 +618,7 @@
  */
 DECLARE_RWSEM(fw_device_rwsem);
 
-static DEFINE_IDR(fw_device_idr);
+DEFINE_IDR(fw_device_idr);
 int fw_cdev_major;
 
 struct fw_device *fw_device_get_by_devt(dev_t devt)
@@ -689,18 +690,19 @@
 			fw_notify("giving up on config rom for node id %x\n",
 				  device->node_id);
 			if (device->node == device->card->root_node)
-				schedule_delayed_work(&device->card->work, 0);
+				fw_schedule_bm_work(device->card, 0);
 			fw_device_release(&device->device);
 		}
 		return;
 	}
 
-	err = -ENOMEM;
+	device_initialize(&device->device);
 
 	fw_device_get(device);
 	down_write(&fw_device_rwsem);
-	if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
-		err = idr_get_new(&fw_device_idr, device, &minor);
+	err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
+	      idr_get_new(&fw_device_idr, device, &minor) :
+	      -ENOMEM;
 	up_write(&fw_device_rwsem);
 
 	if (err < 0)
@@ -758,7 +760,7 @@
 	 * pretty harmless.
 	 */
 	if (device->node == device->card->root_node)
-		schedule_delayed_work(&device->card->work, 0);
+		fw_schedule_bm_work(device->card, 0);
 
 	return;
 
@@ -892,7 +894,7 @@
 	fw_device_shutdown(work);
  out:
 	if (node_id == card->root_node->node_id)
-		schedule_delayed_work(&card->work, 0);
+		fw_schedule_bm_work(card, 0);
 }
 
 void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
@@ -911,13 +913,14 @@
 
 		/*
 		 * Do minimal intialization of the device here, the
-		 * rest will happen in fw_device_init().  We need the
-		 * card and node so we can read the config rom and we
-		 * need to do device_initialize() now so
-		 * device_for_each_child() in FW_NODE_UPDATED is
-		 * doesn't freak out.
+		 * rest will happen in fw_device_init().
+		 *
+		 * Attention:  A lot of things, even fw_device_get(),
+		 * cannot be done before fw_device_init() finished!
+		 * You can basically just check device->state and
+		 * schedule work until then, but only while holding
+		 * card->lock.
 		 */
-		device_initialize(&device->device);
 		atomic_set(&device->state, FW_DEVICE_INITIALIZING);
 		device->card = fw_card_get(card);
 		device->node = fw_node_get(node);
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index 42305bb..df51732 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -21,6 +21,7 @@
 
 #include <linux/fs.h>
 #include <linux/cdev.h>
+#include <linux/idr.h>
 #include <linux/rwsem.h>
 #include <asm/atomic.h>
 
@@ -99,6 +100,7 @@
 void fw_device_cdev_remove(struct fw_device *device);
 
 extern struct rw_semaphore fw_device_rwsem;
+extern struct idr fw_device_idr;
 extern int fw_cdev_major;
 
 /*
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index e54403e..e88d506 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -670,17 +670,6 @@
 			&d, sizeof(d), complete_agent_reset_write_no_wait, t);
 }
 
-static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation)
-{
-	struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card;
-	unsigned long flags;
-
-	/* serialize with comparisons of lu->generation and card->generation */
-	spin_lock_irqsave(&card->lock, flags);
-	lu->generation = generation;
-	spin_unlock_irqrestore(&card->lock, flags);
-}
-
 static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
 {
 	/*
@@ -884,7 +873,7 @@
 		goto out;
 
 	generation    = device->generation;
-	smp_rmb();    /* node_id must not be older than generation */
+	smp_rmb();    /* node IDs must not be older than generation */
 	node_id       = device->node_id;
 	local_node_id = device->card->node_id;
 
@@ -908,7 +897,8 @@
 
 	tgt->node_id	  = node_id;
 	tgt->address_high = local_node_id << 16;
-	sbp2_set_generation(lu, generation);
+	smp_wmb();	  /* node IDs must not be older than generation */
+	lu->generation	  = generation;
 
 	lu->command_block_agent_address =
 		((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
@@ -1201,7 +1191,7 @@
 		goto out;
 
 	generation    = device->generation;
-	smp_rmb();    /* node_id must not be older than generation */
+	smp_rmb();    /* node IDs must not be older than generation */
 	node_id       = device->node_id;
 	local_node_id = device->card->node_id;
 
@@ -1228,7 +1218,8 @@
 
 	tgt->node_id      = node_id;
 	tgt->address_high = local_node_id << 16;
-	sbp2_set_generation(lu, generation);
+	smp_wmb();	  /* node IDs must not be older than generation */
+	lu->generation	  = generation;
 
 	fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
 		  tgt->bus_id, lu->lun, lu->retries);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 5e204713..c9be6e6 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -355,6 +355,9 @@
 {
 	fw_node_event(card, node, FW_NODE_DESTROYED);
 	fw_node_put(node);
+
+	/* Topology has changed - reset bus manager retry counter */
+	card->bm_retries = 0;
 }
 
 static void
@@ -374,6 +377,9 @@
 	}
 
 	fw_node_event(card, node, FW_NODE_CREATED);
+
+	/* Topology has changed - reset bus manager retry counter */
+	card->bm_retries = 0;
 }
 
 void fw_destroy_nodes(struct fw_card *card)
@@ -514,14 +520,6 @@
 
 	spin_lock_irqsave(&card->lock, flags);
 
-	/*
-	 * If the new topology has a different self_id_count the topology
-	 * changed, either nodes were added or removed. In that case we
-	 * reset the IRM reset counter.
-	 */
-	if (card->self_id_count != self_id_count)
-		card->bm_retries = 0;
-
 	card->node_id = node_id;
 	/*
 	 * Update node_id before generation to prevent anybody from using
@@ -530,7 +528,7 @@
 	smp_wmb();
 	card->generation = generation;
 	card->reset_jiffies = jiffies;
-	schedule_delayed_work(&card->work, 0);
+	fw_schedule_bm_work(card, 0);
 
 	local_node = build_tree(card, self_ids, self_id_count);
 
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 2884f87..699ac04 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -19,6 +19,7 @@
  */
 
 #include <linux/completion.h>
+#include <linux/idr.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/module.h>
@@ -971,6 +972,7 @@
 {
 	unregister_chrdev(fw_cdev_major, "firewire");
 	bus_unregister(&fw_bus_type);
+	idr_destroy(&fw_device_idr);
 }
 
 module_init(fw_core_init);
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 839466f0..c9ab12a 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -237,14 +237,6 @@
 	int link_speed;
 	int config_rom_generation;
 
-	/*
-	 * We need to store up to 4 self ID for a maximum of 63
-	 * devices plus 3 words for the topology map header.
-	 */
-	int self_id_count;
-	u32 topology_map[252 + 3];
-	u32 broadcast_channel;
-
 	spinlock_t lock; /* Take this lock when handling the lists in
 			  * this struct. */
 	struct fw_node *local_node;
@@ -262,6 +254,9 @@
 	struct delayed_work work;
 	int bm_retries;
 	int bm_generation;
+
+	u32 broadcast_channel;
+	u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
 };
 
 static inline struct fw_card *fw_card_get(struct fw_card *card)
@@ -278,6 +273,8 @@
 	kref_put(&card->kref, fw_card_release);
 }
 
+extern void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
+
 /*
  * The iso packet format allows for an immediate header/payload part
  * stored in 'header' immediately after the packet info plus an
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 50a071f..777fba4 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -238,11 +238,11 @@
 }
 
 /**
- * smi_request: generate SMI request
+ * dcdbas_smi_request: generate SMI request
  *
  * Called with smi_data_lock.
  */
-static int smi_request(struct smi_cmd *smi_cmd)
+int dcdbas_smi_request(struct smi_cmd *smi_cmd)
 {
 	cpumask_t old_mask;
 	int ret = 0;
@@ -309,14 +309,14 @@
 	switch (val) {
 	case 2:
 		/* Raw SMI */
-		ret = smi_request(smi_cmd);
+		ret = dcdbas_smi_request(smi_cmd);
 		if (!ret)
 			ret = count;
 		break;
 	case 1:
 		/* Calling Interface SMI */
 		smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer);
-		ret = smi_request(smi_cmd);
+		ret = dcdbas_smi_request(smi_cmd);
 		if (!ret)
 			ret = count;
 		break;
@@ -333,6 +333,7 @@
 	mutex_unlock(&smi_data_lock);
 	return ret;
 }
+EXPORT_SYMBOL(dcdbas_smi_request);
 
 /**
  * host_control_smi: generate host control SMI
diff --git a/drivers/firmware/dcdbas.h b/drivers/firmware/dcdbas.h
index 87bc341..ca3cb0a 100644
--- a/drivers/firmware/dcdbas.h
+++ b/drivers/firmware/dcdbas.h
@@ -101,5 +101,7 @@
 	} __attribute__ ((packed)) parameters;
 } __attribute__ ((packed));
 
+int dcdbas_smi_request(struct smi_cmd *smi_cmd);
+
 #endif /* _DCDBAS_H_ */
 
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 13946eb..b4704e1 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -576,7 +576,7 @@
 {
 	int size = 0;
 	if (!pos)
-		size = sprintf(buffer, "%s\n", image_type);
+		size = scnprintf(buffer, count, "%s\n", image_type);
 	return size;
 }
 
@@ -648,7 +648,7 @@
 	int size = 0;
 	if (!pos) {
 		spin_lock(&rbu_data.lock);
-		size = sprintf(buffer, "%lu\n", rbu_data.packetsize);
+		size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize);
 		spin_unlock(&rbu_data.lock);
 	}
 	return size;
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 3bf8ee1..261b9aa 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -56,9 +56,9 @@
 	ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
 };
 
-struct memmap_attribute memmap_start_attr = __ATTR_RO(start);
-struct memmap_attribute memmap_end_attr   = __ATTR_RO(end);
-struct memmap_attribute memmap_type_attr  = __ATTR_RO(type);
+static struct memmap_attribute memmap_start_attr = __ATTR_RO(start);
+static struct memmap_attribute memmap_end_attr   = __ATTR_RO(end);
+static struct memmap_attribute memmap_type_attr  = __ATTR_RO(type);
 
 /*
  * These are default attributes that are added for every memmap entry.
diff --git a/drivers/gpio/max7301.c b/drivers/gpio/max7301.c
index 8b24d78..3e7f4e0 100644
--- a/drivers/gpio/max7301.c
+++ b/drivers/gpio/max7301.c
@@ -217,8 +217,10 @@
 	int i, ret;
 
 	pdata = spi->dev.platform_data;
-	if (!pdata || !pdata->base)
-		return -ENODEV;
+	if (!pdata || !pdata->base) {
+		dev_dbg(&spi->dev, "incorrect or missing platform data\n");
+		return -EINVAL;
+	}
 
 	/*
 	 * bits_per_word cannot be configured in platform data
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index 55ae9a4..f786824 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -267,8 +267,10 @@
 	int ret, nr_port;
 
 	pdata = client->dev.platform_data;
-	if (pdata == NULL)
-		return -ENODEV;
+	if (pdata == NULL) {
+		dev_dbg(&client->dev, "no platform data\n");
+		return -EINVAL;
+	}
 
 	chip = kzalloc(sizeof(struct max732x_chip), GFP_KERNEL);
 	if (chip == NULL)
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/mcp23s08.c
index 89c1d22..f6fae0e 100644
--- a/drivers/gpio/mcp23s08.c
+++ b/drivers/gpio/mcp23s08.c
@@ -310,8 +310,10 @@
 	unsigned			base;
 
 	pdata = spi->dev.platform_data;
-	if (!pdata || !gpio_is_valid(pdata->base))
-		return -ENODEV;
+	if (!pdata || !gpio_is_valid(pdata->base)) {
+		dev_dbg(&spi->dev, "invalid or missing platform data\n");
+		return -EINVAL;
+	}
 
 	for (addr = 0; addr < 4; addr++) {
 		if (!pdata->chip[addr].is_present)
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 37f35388..8dc0164 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -202,8 +202,10 @@
 	int ret;
 
 	pdata = client->dev.platform_data;
-	if (pdata == NULL)
-		return -ENODEV;
+	if (pdata == NULL) {
+		dev_dbg(&client->dev, "no platform data\n");
+		return -EINVAL;
+	}
 
 	chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
 	if (chip == NULL)
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/pcf857x.c
index 4bc2070..9525724 100644
--- a/drivers/gpio/pcf857x.c
+++ b/drivers/gpio/pcf857x.c
@@ -188,8 +188,10 @@
 	int				status;
 
 	pdata = client->dev.platform_data;
-	if (!pdata)
-		return -ENODEV;
+	if (!pdata) {
+		dev_dbg(&client->dev, "no platform data\n");
+		return -EINVAL;
+	}
 
 	/* Allocate, initialize, and register this gpio_chip. */
 	gpio = kzalloc(sizeof *gpio, GFP_KERNEL);
@@ -248,8 +250,10 @@
 		else
 			status = i2c_read_le16(client);
 
-	} else
-		status = -ENODEV;
+	} else {
+		dev_dbg(&client->dev, "unsupported number of gpios\n");
+		status = -EINVAL;
+	}
 
 	if (status < 0)
 		goto fail;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 53c8725..5b2cbb7 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -210,7 +210,7 @@
 	int ret;
 
 	WARN(!mutex_is_locked(&dev->mode_config.mutex),
-	     "%s called w/o mode_config lock\n", __FUNCTION__);
+	     "%s called w/o mode_config lock\n", __func__);
 again:
 	if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
 		DRM_ERROR("Ran out memory getting a mode number\n");
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index d8a982b..964c5eb 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -36,7 +36,7 @@
 /*
  * Detailed mode info for 800x600@60Hz
  */
-static struct drm_display_mode std_mode[] = {
+static struct drm_display_mode std_modes[] = {
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840,
 		   968, 1056, 0, 600, 601, 605, 628, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -60,15 +60,18 @@
  * changes have occurred.
  *
  * FIXME: take into account monitor limits
+ *
+ * RETURNS:
+ * Number of modes found on @connector.
  */
-void drm_helper_probe_single_connector_modes(struct drm_connector *connector,
-					     uint32_t maxX, uint32_t maxY)
+int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+					    uint32_t maxX, uint32_t maxY)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_display_mode *mode, *t;
 	struct drm_connector_helper_funcs *connector_funcs =
 		connector->helper_private;
-	int ret;
+	int count = 0;
 
 	DRM_DEBUG("%s\n", drm_get_connector_name(connector));
 	/* set all modes to the unverified state */
@@ -81,14 +84,14 @@
 		DRM_DEBUG("%s is disconnected\n",
 			  drm_get_connector_name(connector));
 		/* TODO set EDID to NULL */
-		return;
+		return 0;
 	}
 
-	ret = (*connector_funcs->get_modes)(connector);
+	count = (*connector_funcs->get_modes)(connector);
+	if (!count)
+		return 0;
 
-	if (ret) {
-		drm_mode_connector_list_update(connector);
-	}
+	drm_mode_connector_list_update(connector);
 
 	if (maxX && maxY)
 		drm_mode_validate_size(dev, &connector->modes, maxX,
@@ -102,25 +105,8 @@
 
 	drm_mode_prune_invalid(dev, &connector->modes, true);
 
-	if (list_empty(&connector->modes)) {
-		struct drm_display_mode *stdmode;
-
-		DRM_DEBUG("No valid modes on %s\n",
-			  drm_get_connector_name(connector));
-
-		/* Should we do this here ???
-		 * When no valid EDID modes are available we end up
-		 * here and bailed in the past, now we add a standard
-		 * 640x480@60Hz mode and carry on.
-		 */
-		stdmode = drm_mode_duplicate(dev, &std_mode[0]);
-		drm_mode_probed_add(connector, stdmode);
-		drm_mode_list_concat(&connector->probed_modes,
-				     &connector->modes);
-
-		DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n",
-			  drm_get_connector_name(connector));
-	}
+	if (list_empty(&connector->modes))
+		return 0;
 
 	drm_mode_sort(&connector->modes);
 
@@ -131,20 +117,58 @@
 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 		drm_mode_debug_printmodeline(mode);
 	}
+
+	return count;
 }
 EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
 
-void drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
+int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
 				      uint32_t maxY)
 {
 	struct drm_connector *connector;
+	int count = 0;
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+		count += drm_helper_probe_single_connector_modes(connector,
+								 maxX, maxY);
 	}
+
+	return count;
 }
 EXPORT_SYMBOL(drm_helper_probe_connector_modes);
 
+static void drm_helper_add_std_modes(struct drm_device *dev,
+				     struct drm_connector *connector)
+{
+	struct drm_display_mode *mode, *t;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(std_modes); i++) {
+		struct drm_display_mode *stdmode;
+
+		/*
+		 * When no valid EDID modes are available we end up
+		 * here and bailed in the past, now we add some standard
+		 * modes and move on.
+		 */
+		stdmode = drm_mode_duplicate(dev, &std_modes[i]);
+		drm_mode_probed_add(connector, stdmode);
+		drm_mode_list_concat(&connector->probed_modes,
+				     &connector->modes);
+
+		DRM_DEBUG("Adding mode %s to %s\n", stdmode->name,
+			  drm_get_connector_name(connector));
+	}
+	drm_mode_sort(&connector->modes);
+
+	DRM_DEBUG("Added std modes on %s\n", drm_get_connector_name(connector));
+	list_for_each_entry_safe(mode, t, &connector->modes, head) {
+		mode->vrefresh = drm_mode_vrefresh(mode);
+
+		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+		drm_mode_debug_printmodeline(mode);
+	}
+}
 
 /**
  * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
@@ -237,6 +261,8 @@
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		enabled[i] = drm_connector_enabled(connector, true);
+		DRM_DEBUG("connector %d enabled? %s\n", connector->base.id,
+			  enabled[i] ? "yes" : "no");
 		any_enabled |= enabled[i];
 		i++;
 	}
@@ -265,11 +291,17 @@
 			continue;
 		}
 
+		DRM_DEBUG("looking for preferred mode on connector %d\n",
+			  connector->base.id);
+
 		modes[i] = drm_has_preferred_mode(connector, width, height);
-		if (!modes[i]) {
+		/* No preferred modes, pick one off the list */
+		if (!modes[i] && !list_empty(&connector->modes)) {
 			list_for_each_entry(modes[i], &connector->modes, head)
 				break;
 		}
+		DRM_DEBUG("found mode %s\n", modes[i] ? modes[i]->name :
+			  "none");
 		i++;
 	}
 	return true;
@@ -369,6 +401,8 @@
 	int width, height;
 	int i, ret;
 
+	DRM_DEBUG("\n");
+
 	width = dev->mode_config.max_width;
 	height = dev->mode_config.max_height;
 
@@ -390,6 +424,8 @@
 	if (!ret)
 		DRM_ERROR("Unable to find initial modes\n");
 
+	DRM_DEBUG("picking CRTCs for %dx%d config\n", width, height);
+
 	drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
 
 	i = 0;
@@ -403,6 +439,8 @@
 		}
 
 		if (mode && crtc) {
+			DRM_DEBUG("desired mode %s set on crtc %d\n",
+				  mode->name, crtc->base.id);
 			crtc->desired_mode = mode;
 			connector->encoder->crtc = crtc;
 		} else
@@ -442,6 +480,7 @@
 	int saved_x, saved_y;
 	struct drm_encoder *encoder;
 	bool ret = true;
+	bool depth_changed, bpp_changed;
 
 	adjusted_mode = drm_mode_duplicate(dev, mode);
 
@@ -450,6 +489,15 @@
 	if (!crtc->enabled)
 		return true;
 
+	if (old_fb && crtc->fb) {
+		depth_changed = (old_fb->depth != crtc->fb->depth);
+		bpp_changed = (old_fb->bits_per_pixel !=
+			       crtc->fb->bits_per_pixel);
+	} else {
+		depth_changed = true;
+		bpp_changed = true;
+	}
+
 	saved_mode = crtc->mode;
 	saved_x = crtc->x;
 	saved_y = crtc->y;
@@ -462,7 +510,8 @@
 	crtc->y = y;
 
 	if (drm_mode_equal(&saved_mode, &crtc->mode)) {
-		if (saved_x != crtc->x || saved_y != crtc->y) {
+		if (saved_x != crtc->x || saved_y != crtc->y ||
+		    depth_changed || bpp_changed) {
 			crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y,
 						  old_fb);
 			goto done;
@@ -568,8 +617,8 @@
 	struct drm_encoder **save_encoders, *new_encoder;
 	struct drm_framebuffer *old_fb;
 	bool save_enabled;
-	bool changed = false;
-	bool flip_or_move = false;
+	bool mode_changed = false;
+	bool fb_changed = false;
 	struct drm_connector *connector;
 	int count = 0, ro, fail = 0;
 	struct drm_crtc_helper_funcs *crtc_funcs;
@@ -597,7 +646,10 @@
 	/* save previous config */
 	save_enabled = set->crtc->enabled;
 
-	/* this is meant to be num_connector not num_crtc */
+	/*
+	 * We do mode_config.num_connectors here since we'll look at the
+	 * CRTC and encoder associated with each connector later.
+	 */
 	save_crtcs = kzalloc(dev->mode_config.num_connector *
 			     sizeof(struct drm_crtc *), GFP_KERNEL);
 	if (!save_crtcs)
@@ -613,21 +665,25 @@
 	/* We should be able to check here if the fb has the same properties
 	 * and then just flip_or_move it */
 	if (set->crtc->fb != set->fb) {
-		/* if we have no fb then its a change not a flip */
+		/* If we have no fb then treat it as a full mode set */
 		if (set->crtc->fb == NULL)
-			changed = true;
+			mode_changed = true;
+		else if ((set->fb->bits_per_pixel !=
+			 set->crtc->fb->bits_per_pixel) ||
+			 set->fb->depth != set->crtc->fb->depth)
+			fb_changed = true;
 		else
-			flip_or_move = true;
+			fb_changed = true;
 	}
 
 	if (set->x != set->crtc->x || set->y != set->crtc->y)
-		flip_or_move = true;
+		fb_changed = true;
 
 	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
 		DRM_DEBUG("modes are different\n");
 		drm_mode_debug_printmodeline(&set->crtc->mode);
 		drm_mode_debug_printmodeline(set->mode);
-		changed = true;
+		mode_changed = true;
 	}
 
 	/* a) traverse passed in connector list and get encoders for them */
@@ -650,7 +706,7 @@
 		}
 
 		if (new_encoder != connector->encoder) {
-			changed = true;
+			mode_changed = true;
 			connector->encoder = new_encoder;
 		}
 	}
@@ -677,16 +733,16 @@
 				new_crtc = set->crtc;
 		}
 		if (new_crtc != connector->encoder->crtc) {
-			changed = true;
+			mode_changed = true;
 			connector->encoder->crtc = new_crtc;
 		}
 	}
 
 	/* mode_set_base is not a required function */
-	if (flip_or_move && !crtc_funcs->mode_set_base)
-		changed = true;
+	if (fb_changed && !crtc_funcs->mode_set_base)
+		mode_changed = true;
 
-	if (changed) {
+	if (mode_changed) {
 		old_fb = set->crtc->fb;
 		set->crtc->fb = set->fb;
 		set->crtc->enabled = (set->mode != NULL);
@@ -705,7 +761,7 @@
 			set->crtc->desired_mode = set->mode;
 		}
 		drm_helper_disable_unused_functions(dev);
-	} else if (flip_or_move) {
+	} else if (fb_changed) {
 		old_fb = set->crtc->fb;
 		if (set->crtc->fb != set->fb)
 			set->crtc->fb = set->fb;
@@ -764,10 +820,31 @@
  */
 bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
 {
-	int ret = false;
+	struct drm_connector *connector;
+	int count = 0;
 
-	drm_helper_plugged_event(dev);
-	return ret;
+	count = drm_helper_probe_connector_modes(dev,
+						 dev->mode_config.max_width,
+						 dev->mode_config.max_height);
+
+	/*
+	 * None of the available connectors had any modes, so add some
+	 * and try to light them up anyway
+	 */
+	if (!count) {
+		DRM_ERROR("connectors have no modes, using standard modes\n");
+		list_for_each_entry(connector,
+				    &dev->mode_config.connector_list,
+				    head)
+			drm_helper_add_std_modes(dev, connector);
+	}
+
+	drm_setup_crtcs(dev);
+
+	/* alert the driver fb layer */
+	dev->mode_config.funcs->fb_changed(dev);
+
+	return 0;
 }
 EXPORT_SYMBOL(drm_helper_initial_config);
 
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 724e505..477caa1 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -267,7 +267,8 @@
  */
 int drm_irq_uninstall(struct drm_device * dev)
 {
-	int irq_enabled;
+	unsigned long irqflags;
+	int irq_enabled, i;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 		return -EINVAL;
@@ -277,6 +278,16 @@
 	dev->irq_enabled = 0;
 	mutex_unlock(&dev->struct_mutex);
 
+	/*
+	 * Wake up any waiters so they don't hang.
+	 */
+	spin_lock_irqsave(&dev->vbl_lock, irqflags);
+	for (i = 0; i < dev->num_crtcs; i++) {
+		DRM_WAKEUP(&dev->vbl_queue[i]);
+		dev->vblank_enabled[i] = 0;
+	}
+	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
 	if (!irq_enabled)
 		return -EINVAL;
 
@@ -652,8 +663,9 @@
 			  vblwait->request.sequence, crtc);
 		dev->last_vblank_wait[crtc] = vblwait->request.sequence;
 		DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
-			    ((drm_vblank_count(dev, crtc)
-			      - vblwait->request.sequence) <= (1 << 23)));
+			    (((drm_vblank_count(dev, crtc) -
+			       vblwait->request.sequence) <= (1 << 23)) ||
+			     !dev->irq_enabled));
 
 		if (ret != -EINTR) {
 			struct timeval now;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 62a4bf7..bbadf1c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -177,6 +177,14 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
+	master_priv->sarea = drm_getsarea(dev);
+	if (master_priv->sarea) {
+		master_priv->sarea_priv = (drm_i915_sarea_t *)
+			((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
+	} else {
+		DRM_DEBUG("sarea not found assuming DRI2 userspace\n");
+	}
+
 	if (init->ring_size != 0) {
 		if (dev_priv->ring.ring_obj != NULL) {
 			i915_dma_cleanup(dev);
@@ -1152,6 +1160,8 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		intel_modeset_cleanup(dev);
 
+		i915_gem_free_all_phys_object(dev);
+
 		mutex_lock(&dev->struct_mutex);
 		i915_gem_cleanup_ringbuffer(dev);
 		mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 563de18..e1351825 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -72,6 +72,18 @@
 #define WATCH_INACTIVE	0
 #define WATCH_PWRITE	0
 
+#define I915_GEM_PHYS_CURSOR_0 1
+#define I915_GEM_PHYS_CURSOR_1 2
+#define I915_GEM_PHYS_OVERLAY_REGS 3
+#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
+
+struct drm_i915_gem_phys_object {
+	int id;
+	struct page **page_list;
+	drm_dma_handle_t *handle;
+	struct drm_gem_object *cur_obj;
+};
+
 typedef struct _drm_i915_ring_buffer {
 	int tail_mask;
 	unsigned long Size;
@@ -358,6 +370,9 @@
 		uint32_t bit_6_swizzle_x;
 		/** Bit 6 swizzling required for Y tiling */
 		uint32_t bit_6_swizzle_y;
+
+		/* storage for physical objects */
+		struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
 	} mm;
 } drm_i915_private_t;
 
@@ -436,6 +451,9 @@
 	/** User space pin count and filp owning the pin */
 	uint32_t user_pin_count;
 	struct drm_file *pin_filp;
+
+	/** for phy allocated objects */
+	struct drm_i915_gem_phys_object *phys_obj;
 };
 
 /**
@@ -598,6 +616,11 @@
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
 				      int write);
+int i915_gem_attach_phys_object(struct drm_device *dev,
+				struct drm_gem_object *obj, int id);
+void i915_gem_detach_phys_object(struct drm_device *dev,
+				 struct drm_gem_object *obj);
+void i915_gem_free_all_phys_object(struct drm_device *dev);
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 14afc23..96316fd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -55,6 +55,9 @@
 static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
 static int i915_gem_evict_something(struct drm_device *dev);
+static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+				struct drm_i915_gem_pwrite *args,
+				struct drm_file *file_priv);
 
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 		     unsigned long end)
@@ -386,8 +389,10 @@
 	 * pread/pwrite currently are reading and writing from the CPU
 	 * perspective, requiring manual detiling by the client.
 	 */
-	if (obj_priv->tiling_mode == I915_TILING_NONE &&
-	    dev->gtt_total != 0)
+	if (obj_priv->phys_obj)
+		ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
+	else if (obj_priv->tiling_mode == I915_TILING_NONE &&
+		 dev->gtt_total != 0)
 		ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
 	else
 		ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
@@ -1445,7 +1450,7 @@
 
 	if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
 	    (obj_priv->gtt_offset & (obj->size - 1))) {
-		WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
+		WARN(1, "%s: object not 1M or size aligned\n", __func__);
 		return;
 	}
 
@@ -1478,7 +1483,7 @@
 
 	if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
 	    (obj_priv->gtt_offset & (obj->size - 1))) {
-		WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
+		WARN(1, "%s: object not 1M or size aligned\n", __func__);
 		return;
 	}
 
@@ -2858,6 +2863,9 @@
 	while (obj_priv->pin_count > 0)
 		i915_gem_object_unpin(obj);
 
+	if (obj_priv->phys_obj)
+		i915_gem_detach_phys_object(dev, obj);
+
 	i915_gem_object_unbind(obj);
 
 	list = &obj->map_list;
@@ -3293,3 +3301,180 @@
 
 	i915_gem_detect_bit_6_swizzle(dev);
 }
+
+/*
+ * Create a physically contiguous memory object for this object
+ * e.g. for cursor + overlay regs
+ */
+int i915_gem_init_phys_object(struct drm_device *dev,
+			      int id, int size)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_phys_object *phys_obj;
+	int ret;
+
+	if (dev_priv->mm.phys_objs[id - 1] || !size)
+		return 0;
+
+	phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
+	if (!phys_obj)
+		return -ENOMEM;
+
+	phys_obj->id = id;
+
+	phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
+	if (!phys_obj->handle) {
+		ret = -ENOMEM;
+		goto kfree_obj;
+	}
+#ifdef CONFIG_X86
+	set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
+#endif
+
+	dev_priv->mm.phys_objs[id - 1] = phys_obj;
+
+	return 0;
+kfree_obj:
+	drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
+	return ret;
+}
+
+void i915_gem_free_phys_object(struct drm_device *dev, int id)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_phys_object *phys_obj;
+
+	if (!dev_priv->mm.phys_objs[id - 1])
+		return;
+
+	phys_obj = dev_priv->mm.phys_objs[id - 1];
+	if (phys_obj->cur_obj) {
+		i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
+	}
+
+#ifdef CONFIG_X86
+	set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
+#endif
+	drm_pci_free(dev, phys_obj->handle);
+	kfree(phys_obj);
+	dev_priv->mm.phys_objs[id - 1] = NULL;
+}
+
+void i915_gem_free_all_phys_object(struct drm_device *dev)
+{
+	int i;
+
+	for (i = 0; i < I915_MAX_PHYS_OBJECT; i++)
+		i915_gem_free_phys_object(dev, i);
+}
+
+void i915_gem_detach_phys_object(struct drm_device *dev,
+				 struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object *obj_priv;
+	int i;
+	int ret;
+	int page_count;
+
+	obj_priv = obj->driver_private;
+	if (!obj_priv->phys_obj)
+		return;
+
+	ret = i915_gem_object_get_page_list(obj);
+	if (ret)
+		goto out;
+
+	page_count = obj->size / PAGE_SIZE;
+
+	for (i = 0; i < page_count; i++) {
+		char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+		char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+
+		memcpy(dst, src, PAGE_SIZE);
+		kunmap_atomic(dst, KM_USER0);
+	}
+	drm_clflush_pages(obj_priv->page_list, page_count);
+	drm_agp_chipset_flush(dev);
+out:
+	obj_priv->phys_obj->cur_obj = NULL;
+	obj_priv->phys_obj = NULL;
+}
+
+int
+i915_gem_attach_phys_object(struct drm_device *dev,
+			    struct drm_gem_object *obj, int id)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+	int ret = 0;
+	int page_count;
+	int i;
+
+	if (id > I915_MAX_PHYS_OBJECT)
+		return -EINVAL;
+
+	obj_priv = obj->driver_private;
+
+	if (obj_priv->phys_obj) {
+		if (obj_priv->phys_obj->id == id)
+			return 0;
+		i915_gem_detach_phys_object(dev, obj);
+	}
+
+
+	/* create a new object */
+	if (!dev_priv->mm.phys_objs[id - 1]) {
+		ret = i915_gem_init_phys_object(dev, id,
+						obj->size);
+		if (ret) {
+			DRM_ERROR("failed to init phys object %d size: %d\n", id, obj->size);
+			goto out;
+		}
+	}
+
+	/* bind to the object */
+	obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
+	obj_priv->phys_obj->cur_obj = obj;
+
+	ret = i915_gem_object_get_page_list(obj);
+	if (ret) {
+		DRM_ERROR("failed to get page list\n");
+		goto out;
+	}
+
+	page_count = obj->size / PAGE_SIZE;
+
+	for (i = 0; i < page_count; i++) {
+		char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
+		char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+
+		memcpy(dst, src, PAGE_SIZE);
+		kunmap_atomic(src, KM_USER0);
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+static int
+i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+		     struct drm_i915_gem_pwrite *args,
+		     struct drm_file *file_priv)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	void *obj_addr;
+	int ret;
+	char __user *user_data;
+
+	user_data = (char __user *) (uintptr_t) args->data_ptr;
+	obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
+
+	DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size);
+	ret = copy_from_user(obj_addr, user_data, args->size);
+	if (ret)
+		return -EFAULT;
+
+	drm_agp_chipset_flush(dev);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0cadafb..6290219 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -411,6 +411,12 @@
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
+	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+	u32 pipeconf;
+
+	pipeconf = I915_READ(pipeconf_reg);
+	if (!(pipeconf & PIPEACONF_ENABLE))
+		return -EINVAL;
 
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
 	if (IS_I965G(dev))
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8ccb9c3..31c3732 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -401,6 +401,8 @@
 	I915_WRITE(dspstride, crtc->fb->pitch);
 
 	dspcntr = I915_READ(dspcntr_reg);
+	/* Mask out pixel format bits in case we change it */
+	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
 	switch (crtc->fb->bits_per_pixel) {
 	case 8:
 		dspcntr |= DISPPLANE_8BPP;
@@ -1014,21 +1016,25 @@
 
 	if (bo->size < width * height * 4) {
 		DRM_ERROR("buffer is to small\n");
-		drm_gem_object_unreference(bo);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto fail;
 	}
 
-	if (dev_priv->cursor_needs_physical) {
-		addr = dev->agp->base + obj_priv->gtt_offset;
-	} else {
+	/* we only need to pin inside GTT if cursor is non-phy */
+	if (!dev_priv->cursor_needs_physical) {
+		ret = i915_gem_object_pin(bo, PAGE_SIZE);
+		if (ret) {
+			DRM_ERROR("failed to pin cursor bo\n");
+			goto fail;
+		}
 		addr = obj_priv->gtt_offset;
-	}
-
-	ret = i915_gem_object_pin(bo, PAGE_SIZE);
-	if (ret) {
-		DRM_ERROR("failed to pin cursor bo\n");
-		drm_gem_object_unreference(bo);
-		return ret;
+	} else {
+		ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
+		if (ret) {
+			DRM_ERROR("failed to attach phys object\n");
+			goto fail;
+		}
+		addr = obj_priv->phys_obj->handle->busaddr;
 	}
 
 	temp = 0;
@@ -1041,14 +1047,25 @@
 	I915_WRITE(base, addr);
 
 	if (intel_crtc->cursor_bo) {
-		i915_gem_object_unpin(intel_crtc->cursor_bo);
+		if (dev_priv->cursor_needs_physical) {
+			if (intel_crtc->cursor_bo != bo)
+				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
+		} else
+			i915_gem_object_unpin(intel_crtc->cursor_bo);
+		mutex_lock(&dev->struct_mutex);
 		drm_gem_object_unreference(intel_crtc->cursor_bo);
+		mutex_unlock(&dev->struct_mutex);
 	}
 
 	intel_crtc->cursor_addr = addr;
 	intel_crtc->cursor_bo = bo;
 
 	return 0;
+fail:
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(bo);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
 }
 
 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ccecfaf..2fafdcc 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -456,6 +456,13 @@
 		dev_priv->panel_fixed_mode =
 			drm_mode_duplicate(dev, dev_priv->vbt_mode);
 		mutex_unlock(&dev->mode_config.mutex);
+		if (dev_priv->panel_fixed_mode) {
+			dev_priv->panel_fixed_mode->type |=
+				DRM_MODE_TYPE_PREFERRED;
+			drm_mode_probed_add(connector,
+					    dev_priv->panel_fixed_mode);
+			goto out;
+		}
 	}
 
 	/*
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 03cb494..f0a0f72 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -102,7 +102,7 @@
 	struct usbhid_device *usbhid =
 		container_of(work, struct usbhid_device, reset_work);
 	struct hid_device *hid = usbhid->hid;
-	int rc_lock, rc = 0;
+	int rc = 0;
 
 	if (test_bit(HID_CLEAR_HALT, &usbhid->iofl)) {
 		dev_dbg(&usbhid->intf->dev, "clear halt\n");
@@ -113,11 +113,10 @@
 
 	else if (test_bit(HID_RESET_PENDING, &usbhid->iofl)) {
 		dev_dbg(&usbhid->intf->dev, "resetting device\n");
-		rc = rc_lock = usb_lock_device_for_reset(hid_to_usb_dev(hid), usbhid->intf);
-		if (rc_lock >= 0) {
+		rc = usb_lock_device_for_reset(hid_to_usb_dev(hid), usbhid->intf);
+		if (rc == 0) {
 			rc = usb_reset_device(hid_to_usb_dev(hid));
-			if (rc_lock)
-				usb_unlock_device(hid_to_usb_dev(hid));
+			usb_unlock_device(hid_to_usb_dev(hid));
 		}
 		clear_bit(HID_RESET_PENDING, &usbhid->iofl);
 	}
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 6a98f9f..d73eea3 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -874,12 +874,14 @@
 	INIT_LIST_HEAD(&hiddev->list);
 	spin_lock_init(&hiddev->list_lock);
 	mutex_init(&hiddev->existancelock);
+	hid->hiddev = hiddev;
 	hiddev->hid = hid;
 	hiddev->exist = 1;
 
 	retval = usb_register_dev(usbhid->intf, &hiddev_class);
 	if (retval) {
 		err_hid("Not able to get a minor for this device.");
+		hid->hiddev = NULL;
 		kfree(hiddev);
 		return -1;
 	} else {
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index c709e82..b84bf06 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -189,6 +189,16 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called adt7473.
 
+config SENSORS_ADT7475
+	tristate "Analog Devices ADT7475"
+	depends on I2C && EXPERIMENTAL
+	help
+	  If you say yes here you get support for the Analog Devices
+	  ADT7475 hardware monitoring chips.
+
+	  This driver can also be build as a module.  If so, the module
+	  will be called adt7475.
+
 config SENSORS_K8TEMP
 	tristate "AMD Athlon64/FX or Opteron temperature sensor"
 	depends on X86 && PCI && EXPERIMENTAL
@@ -284,11 +294,12 @@
 	  will be called f71805f.
 
 config SENSORS_F71882FG
-	tristate "Fintek F71882FG and F71883FG"
+	tristate "Fintek F71862FG, F71882FG and F8000"
 	depends on EXPERIMENTAL
 	help
 	  If you say yes here you get support for hardware monitoring
-	  features of the Fintek F71882FG and F71883FG Super-I/O chips.
+	  features of the Fintek F71882FG/F71883FG, F71862FG/71863FG
+	  and F8000 Super-I/O chips.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called f71882fg.
@@ -304,9 +315,13 @@
 	  will be called f75375s.
 
 config SENSORS_FSCHER
-	tristate "FSC Hermes"
+	tristate "FSC Hermes (DEPRECATED)"
 	depends on X86 && I2C
 	help
+	  This driver is DEPRECATED please use the new merged fschmd
+	  ("FSC Poseidon, Scylla, Hermes, Heimdall and Heracles") driver
+	  instead.
+
 	  If you say yes here you get support for Fujitsu Siemens
 	  Computers Hermes sensor chips.
 
@@ -314,9 +329,13 @@
 	  will be called fscher.
 
 config SENSORS_FSCPOS
-	tristate "FSC Poseidon"
+	tristate "FSC Poseidon (DEPRECATED)"
 	depends on X86 && I2C
 	help
+	  This driver is DEPRECATED please use the new merged fschmd
+	  ("FSC Poseidon, Scylla, Hermes, Heimdall and Heracles") driver
+	  instead.
+
 	  If you say yes here you get support for Fujitsu Siemens
 	  Computers Poseidon sensor chips.
 
@@ -325,14 +344,15 @@
 
 config SENSORS_FSCHMD
 	tristate "FSC Poseidon, Scylla, Hermes, Heimdall and Heracles"
-	depends on X86 && I2C && EXPERIMENTAL
+	depends on X86 && I2C
 	help
 	  If you say yes here you get support for various Fujitsu Siemens
-	  Computers sensor chips.
+	  Computers sensor chips, including support for the integrated
+	  watchdog.
 
-	  This is a new merged driver for FSC sensor chips which is intended
-	  as a replacment for the fscpos, fscscy and fscher drivers and adds
-	  support for several other FCS sensor chips.
+	  This is a merged driver for FSC sensor chips replacing the fscpos,
+	  fscscy and fscher drivers and adding support for several other FSC
+	  sensor chips.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called fschmd.
@@ -399,7 +419,8 @@
 	select HWMON_VID
 	help
 	  If you say yes here you get support for ITE IT8705F, IT8712F,
-	  IT8716F, IT8718F and IT8726F sensor chips, and the SiS960 clone.
+	  IT8716F, IT8718F, IT8720F and IT8726F sensor chips, and the
+	  SiS960 clone.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called it87.
@@ -417,11 +438,12 @@
 	  will be called lm63.
 
 config SENSORS_LM70
-	tristate "National Semiconductor LM70"
+	tristate "National Semiconductor LM70 / Texas Instruments TMP121"
 	depends on SPI_MASTER && EXPERIMENTAL
 	help
 	  If you say yes here you get support for the National Semiconductor
-	  LM70 digital temperature sensor chip.
+	  LM70 and Texas Instruments TMP121/TMP123 digital temperature
+	  sensor chips.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called lm70.
@@ -548,6 +570,17 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called lm93.
 
+config SENSORS_LTC4245
+	tristate "Linear Technology LTC4245"
+	depends on I2C && EXPERIMENTAL
+	default n
+	help
+	  If you say yes here you get support for Linear Technology LTC4245
+	  Multiple Supply Hot Swap Controller I2C interface.
+
+	  This driver can also be built as a module. If so, the module will
+	  be called ltc4245.
+
 config SENSORS_MAX1111
 	tristate "Maxim MAX1111 Multichannel, Serial 8-bit ADC chip"
 	depends on SPI_MASTER
@@ -838,6 +871,8 @@
 config SENSORS_LIS3LV02D
 	tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer"
 	depends on ACPI && INPUT
+	select NEW_LEDS
+	select LEDS_CLASS
 	default n
 	help
 	  This driver provides support for the LIS3LV02Dx accelerometer. In
@@ -849,10 +884,16 @@
 	  /sys/devices/platform/lis3lv02d.
 
 	  This driver also provides an absolute input class device, allowing
-	  the laptop to act as a pinball machine-esque joystick.
+	  the laptop to act as a pinball machine-esque joystick. On HP laptops,
+	  if the led infrastructure is activated, support for a led indicating
+	  disk protection will be provided as hp:red:hddprotection.
 
-	  This driver can also be built as a module.  If so, the module
-	  will be called lis3lv02d.
+	  This driver can also be built as modules.  If so, the core module
+	  will be called lis3lv02d and a specific module for HP laptops will be
+	  called hp_accel.
+
+	  Say Y here if you have an applicable laptop and want to experience
+	  the awesome power of lis3lv02d.
 
 config SENSORS_APPLESMC
 	tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 58fc5be..2e80f37 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -28,6 +28,8 @@
 obj-$(CONFIG_SENSORS_ADT7462)	+= adt7462.o
 obj-$(CONFIG_SENSORS_ADT7470)	+= adt7470.o
 obj-$(CONFIG_SENSORS_ADT7473)	+= adt7473.o
+obj-$(CONFIG_SENSORS_ADT7475)	+= adt7475.o
+
 obj-$(CONFIG_SENSORS_APPLESMC)	+= applesmc.o
 obj-$(CONFIG_SENSORS_AMS)	+= ams/
 obj-$(CONFIG_SENSORS_ATXP1)	+= atxp1.o
@@ -49,7 +51,7 @@
 obj-$(CONFIG_SENSORS_IBMPEX)	+= ibmpex.o
 obj-$(CONFIG_SENSORS_IT87)	+= it87.o
 obj-$(CONFIG_SENSORS_K8TEMP)	+= k8temp.o
-obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o
+obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o
 obj-$(CONFIG_SENSORS_LM63)	+= lm63.o
 obj-$(CONFIG_SENSORS_LM70)	+= lm70.o
 obj-$(CONFIG_SENSORS_LM75)	+= lm75.o
@@ -62,6 +64,7 @@
 obj-$(CONFIG_SENSORS_LM90)	+= lm90.o
 obj-$(CONFIG_SENSORS_LM92)	+= lm92.o
 obj-$(CONFIG_SENSORS_LM93)	+= lm93.o
+obj-$(CONFIG_SENSORS_LTC4245)	+= ltc4245.o
 obj-$(CONFIG_SENSORS_MAX1111)	+= max1111.o
 obj-$(CONFIG_SENSORS_MAX1619)	+= max1619.o
 obj-$(CONFIG_SENSORS_MAX6650)	+= max6650.o
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 70bb854..e52b388 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -279,7 +279,7 @@
 		{ "OTES1 Fan",		36, 2, 60, 1, 0 },
 		{ NULL, 0, 0, 0, 0, 0 } }
 	},
-	{ 0x0011, "AT8 32X(ATI RD580-ULI M1575)", {
+	{ 0x0011, "AT8 32X", {
 		{ "CPU Core",		 0, 0, 10, 1, 0 },
 		{ "DDR",		 1, 0, 20, 1, 0 },
 		{ "DDR VTT",		 2, 0, 10, 1, 0 },
@@ -402,7 +402,7 @@
 		{ "AUX3 Fan",		36, 2, 60, 1, 0 },
 		{ NULL, 0, 0, 0, 0, 0 } }
 	},
-	{ 0x0016, "AW9D-MAX       (Intel i975-ICH7)", {
+	{ 0x0016, "AW9D-MAX", {
 		{ "CPU Core",		 0, 0, 10, 1, 0 },
 		{ "DDR2",		 1, 0, 20, 1, 0 },
 		{ "DDR2 VTT",		 2, 0, 10, 1, 0 },
@@ -482,7 +482,7 @@
 		{ "AUX3 Fan",		36, 2, 60, 1, 0 },
 		{ NULL, 0, 0, 0, 0, 0 } }
 	},
-	{ 0x0019, NULL /* Unknown, need DMI string */, {
+	{ 0x0019, "IN9 32X MAX", {
 		{ "CPU Core",		 7, 0, 10, 1, 0 },
 		{ "DDR2",		13, 0, 20, 1, 0 },
 		{ "DDR2 VTT",		14, 0, 10, 1, 0 },
@@ -509,7 +509,7 @@
 		{ "AUX3 FAN",		36, 2, 60, 1, 0 },
 		{ NULL, 0, 0, 0, 0, 0 } }
 	},
-	{ 0x001A, "IP35 Pro(Intel P35-ICH9R)", {
+	{ 0x001A, "IP35 Pro", {
 		{ "CPU Core",		 0, 0, 10, 1, 0 },
 		{ "DDR2",		 1, 0, 20, 1, 0 },
 		{ "DDR2 VTT",		 2, 0, 10, 1, 0 },
@@ -1128,6 +1128,7 @@
 {
 	const char *board_vendor, *board_name;
 	int i, err = (force) ? 1 : -ENODEV;
+	size_t sublen;
 
 	board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
 	if (!board_vendor || strcmp(board_vendor, "http://www.abit.com.tw/"))
@@ -1137,9 +1138,20 @@
 	if (!board_name)
 		return err;
 
+	/* At the moment, we don't care about the part of the vendor
+	 * DMI string contained in brackets. Truncate the string at
+	 * the first occurrence of a bracket. Trim any trailing space
+	 * from the substring.
+	 */
+	sublen = strcspn(board_name, "(");
+	while (sublen > 0 && board_name[sublen - 1] == ' ')
+		sublen--;
+
 	for (i = 0; abituguru3_motherboards[i].id; i++) {
 		const char *dmi_name = abituguru3_motherboards[i].dmi_name;
-		if (dmi_name && !strcmp(dmi_name, board_name))
+		if (!dmi_name || strlen(dmi_name) != sublen)
+			continue;
+		if (!strncasecmp(board_name, dmi_name, sublen))
 			break;
 	}
 
@@ -1153,7 +1165,7 @@
 
 static inline int abituguru3_dmi_detect(void)
 {
-	return -ENODEV;
+	return 1;
 }
 
 #endif /* CONFIG_DMI */
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
new file mode 100644
index 0000000..d39877a
--- /dev/null
+++ b/drivers/hwmon/adt7475.c
@@ -0,0 +1,1221 @@
+/*
+ * adt7475 - Thermal sensor driver for the ADT7475 chip and derivatives
+ * Copyright (C) 2007-2008, Advanced Micro Devices, Inc.
+ * Copyright (C) 2008 Jordan Crouse <jordan@cosmicpenguin.net>
+ * Copyright (C) 2008 Hans de Goede <hdegoede@redhat.com>
+
+ * Derived from the lm83 driver by Jean Delvare
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+
+/* Indexes for the sysfs hooks */
+
+#define INPUT		0
+#define MIN		1
+#define MAX		2
+#define CONTROL		3
+#define OFFSET		3
+#define AUTOMIN		4
+#define THERM		5
+#define HYSTERSIS	6
+
+/* These are unique identifiers for the sysfs functions - unlike the
+   numbers above, these are not also indexes into an array
+*/
+
+#define ALARM		9
+#define FAULT		10
+
+/* 7475 Common Registers */
+
+#define REG_VOLTAGE_BASE	0x21
+#define REG_TEMP_BASE		0x25
+#define REG_TACH_BASE		0x28
+#define REG_PWM_BASE		0x30
+#define REG_PWM_MAX_BASE	0x38
+
+#define REG_DEVID		0x3D
+#define REG_VENDID		0x3E
+
+#define REG_STATUS1		0x41
+#define REG_STATUS2		0x42
+
+#define REG_VOLTAGE_MIN_BASE	0x46
+#define REG_VOLTAGE_MAX_BASE	0x47
+
+#define REG_TEMP_MIN_BASE	0x4E
+#define REG_TEMP_MAX_BASE	0x4F
+
+#define REG_TACH_MIN_BASE	0x54
+
+#define REG_PWM_CONFIG_BASE	0x5C
+
+#define REG_TEMP_TRANGE_BASE	0x5F
+
+#define REG_PWM_MIN_BASE	0x64
+
+#define REG_TEMP_TMIN_BASE	0x67
+#define REG_TEMP_THERM_BASE	0x6A
+
+#define REG_REMOTE1_HYSTERSIS	0x6D
+#define REG_REMOTE2_HYSTERSIS	0x6E
+
+#define REG_TEMP_OFFSET_BASE	0x70
+
+#define REG_EXTEND1		0x76
+#define REG_EXTEND2		0x77
+#define REG_CONFIG5		0x7C
+
+#define CONFIG5_TWOSCOMP	0x01
+#define CONFIG5_TEMPOFFSET	0x02
+
+/* ADT7475 Settings */
+
+#define ADT7475_VOLTAGE_COUNT	2
+#define ADT7475_TEMP_COUNT	3
+#define ADT7475_TACH_COUNT	4
+#define ADT7475_PWM_COUNT	3
+
+/* Macro to read the registers */
+
+#define adt7475_read(reg) i2c_smbus_read_byte_data(client, (reg))
+
+/* Macros to easily index the registers */
+
+#define TACH_REG(idx) (REG_TACH_BASE + ((idx) * 2))
+#define TACH_MIN_REG(idx) (REG_TACH_MIN_BASE + ((idx) * 2))
+
+#define PWM_REG(idx) (REG_PWM_BASE + (idx))
+#define PWM_MAX_REG(idx) (REG_PWM_MAX_BASE + (idx))
+#define PWM_MIN_REG(idx) (REG_PWM_MIN_BASE + (idx))
+#define PWM_CONFIG_REG(idx) (REG_PWM_CONFIG_BASE + (idx))
+
+#define VOLTAGE_REG(idx) (REG_VOLTAGE_BASE + (idx))
+#define VOLTAGE_MIN_REG(idx) (REG_VOLTAGE_MIN_BASE + ((idx) * 2))
+#define VOLTAGE_MAX_REG(idx) (REG_VOLTAGE_MAX_BASE + ((idx) * 2))
+
+#define TEMP_REG(idx) (REG_TEMP_BASE + (idx))
+#define TEMP_MIN_REG(idx) (REG_TEMP_MIN_BASE + ((idx) * 2))
+#define TEMP_MAX_REG(idx) (REG_TEMP_MAX_BASE + ((idx) * 2))
+#define TEMP_TMIN_REG(idx) (REG_TEMP_TMIN_BASE + (idx))
+#define TEMP_THERM_REG(idx) (REG_TEMP_THERM_BASE + (idx))
+#define TEMP_OFFSET_REG(idx) (REG_TEMP_OFFSET_BASE + (idx))
+#define TEMP_TRANGE_REG(idx) (REG_TEMP_TRANGE_BASE + (idx))
+
+static unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END };
+
+I2C_CLIENT_INSMOD_1(adt7475);
+
+static const struct i2c_device_id adt7475_id[] = {
+	{ "adt7475", adt7475 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, adt7475_id);
+
+struct adt7475_data {
+	struct device *hwmon_dev;
+	struct mutex lock;
+
+	unsigned long measure_updated;
+	unsigned long limits_updated;
+	char valid;
+
+	u8 config5;
+	u16 alarms;
+	u16 voltage[3][3];
+	u16 temp[7][3];
+	u16 tach[2][4];
+	u8 pwm[4][3];
+	u8 range[3];
+	u8 pwmctl[3];
+	u8 pwmchan[3];
+};
+
+static struct i2c_driver adt7475_driver;
+static struct adt7475_data *adt7475_update_device(struct device *dev);
+static void adt7475_read_hystersis(struct i2c_client *client);
+static void adt7475_read_pwm(struct i2c_client *client, int index);
+
+/* Given a temp value, convert it to register value */
+
+static inline u16 temp2reg(struct adt7475_data *data, long val)
+{
+	u16 ret;
+
+	if (!(data->config5 & CONFIG5_TWOSCOMP)) {
+		val = SENSORS_LIMIT(val, -64000, 191000);
+		ret = (val + 64500) / 1000;
+	} else {
+		val = SENSORS_LIMIT(val, -128000, 127000);
+		if (val < -500)
+			ret = (256500 + val) / 1000;
+		else
+			ret = (val + 500) / 1000;
+	}
+
+	return ret << 2;
+}
+
+/* Given a register value, convert it to a real temp value */
+
+static inline int reg2temp(struct adt7475_data *data, u16 reg)
+{
+	if (data->config5 & CONFIG5_TWOSCOMP) {
+		if (reg >= 512)
+			return (reg - 1024) * 250;
+		else
+			return reg * 250;
+	} else
+		return (reg - 256) * 250;
+}
+
+static inline int tach2rpm(u16 tach)
+{
+	if (tach == 0 || tach == 0xFFFF)
+		return 0;
+
+	return (90000 * 60) / tach;
+}
+
+static inline u16 rpm2tach(unsigned long rpm)
+{
+	if (rpm == 0)
+		return 0;
+
+	return SENSORS_LIMIT((90000 * 60) / rpm, 1, 0xFFFF);
+}
+
+static inline int reg2vcc(u16 reg)
+{
+	return (4296 * reg) / 1000;
+}
+
+static inline int reg2vccp(u16 reg)
+{
+	return (2929 * reg) / 1000;
+}
+
+static inline u16 vcc2reg(long vcc)
+{
+	vcc = SENSORS_LIMIT(vcc, 0, 4396);
+	return (vcc * 1000) / 4296;
+}
+
+static inline u16 vccp2reg(long vcc)
+{
+	vcc = SENSORS_LIMIT(vcc, 0, 2998);
+	return (vcc * 1000) / 2929;
+}
+
+static u16 adt7475_read_word(struct i2c_client *client, int reg)
+{
+	u16 val;
+
+	val = i2c_smbus_read_byte_data(client, reg);
+	val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
+
+	return val;
+}
+
+static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
+{
+	i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
+	i2c_smbus_write_byte_data(client, reg, val & 0xFF);
+}
+
+/* Find the nearest value in a table - used for pwm frequency and
+   auto temp range */
+static int find_nearest(long val, const int *array, int size)
+{
+	int i;
+
+	if (val < array[0])
+		return 0;
+
+	if (val > array[size - 1])
+		return size - 1;
+
+	for (i = 0; i < size - 1; i++) {
+		int a, b;
+
+		if (val > array[i + 1])
+			continue;
+
+		a = val - array[i];
+		b = array[i + 1] - val;
+
+		return (a <= b) ? i : i + 1;
+	}
+
+	return 0;
+}
+
+static ssize_t show_voltage(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct adt7475_data *data = adt7475_update_device(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	unsigned short val;
+
+	switch (sattr->nr) {
+	case ALARM:
+		return sprintf(buf, "%d\n",
+			       (data->alarms >> (sattr->index + 1)) & 1);
+	default:
+		val = data->voltage[sattr->nr][sattr->index];
+		return sprintf(buf, "%d\n",
+			       sattr->index ==
+			       0 ? reg2vccp(val) : reg2vcc(val));
+	}
+}
+
+static ssize_t set_voltage(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	unsigned char reg;
+	long val;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	mutex_lock(&data->lock);
+
+	data->voltage[sattr->nr][sattr->index] =
+		sattr->index ? vcc2reg(val) : vccp2reg(val);
+
+	if (sattr->nr == MIN)
+		reg = VOLTAGE_MIN_REG(sattr->index);
+	else
+		reg = VOLTAGE_MAX_REG(sattr->index);
+
+	i2c_smbus_write_byte_data(client, reg,
+				  data->voltage[sattr->nr][sattr->index] >> 2);
+	mutex_unlock(&data->lock);
+
+	return count;
+}
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct adt7475_data *data = adt7475_update_device(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	int out;
+
+	switch (sattr->nr) {
+	case HYSTERSIS:
+		mutex_lock(&data->lock);
+		out = data->temp[sattr->nr][sattr->index];
+		if (sattr->index != 1)
+			out = (out >> 4) & 0xF;
+		else
+			out = (out & 0xF);
+		/* Show the value as an absolute number tied to
+		 * THERM */
+		out = reg2temp(data, data->temp[THERM][sattr->index]) -
+			out * 1000;
+		mutex_unlock(&data->lock);
+		break;
+
+	case OFFSET:
+		/* Offset is always 2's complement, regardless of the
+		 * setting in CONFIG5 */
+		mutex_lock(&data->lock);
+		out = (s8)data->temp[sattr->nr][sattr->index];
+		if (data->config5 & CONFIG5_TEMPOFFSET)
+			out *= 1000;
+		else
+			out *= 500;
+		mutex_unlock(&data->lock);
+		break;
+
+	case ALARM:
+		out = (data->alarms >> (sattr->index + 4)) & 1;
+		break;
+
+	case FAULT:
+		/* Note - only for remote1 and remote2 */
+		out = data->alarms & (sattr->index ? 0x8000 : 0x4000);
+		out = out ? 0 : 1;
+		break;
+
+	default:
+		/* All other temp values are in the configured format */
+		out = reg2temp(data, data->temp[sattr->nr][sattr->index]);
+	}
+
+	return sprintf(buf, "%d\n", out);
+}
+
+static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	unsigned char reg = 0;
+	u8 out;
+	int temp;
+	long val;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	mutex_lock(&data->lock);
+
+	/* We need the config register in all cases for temp <-> reg conv. */
+	data->config5 = adt7475_read(REG_CONFIG5);
+
+	switch (sattr->nr) {
+	case OFFSET:
+		if (data->config5 & CONFIG5_TEMPOFFSET) {
+			val = SENSORS_LIMIT(val, -63000, 127000);
+			out = data->temp[OFFSET][sattr->index] = val / 1000;
+		} else {
+			val = SENSORS_LIMIT(val, -63000, 64000);
+			out = data->temp[OFFSET][sattr->index] = val / 500;
+		}
+		break;
+
+	case HYSTERSIS:
+		/* The value will be given as an absolute value, turn it
+		   into an offset based on THERM */
+
+		/* Read fresh THERM and HYSTERSIS values from the chip */
+		data->temp[THERM][sattr->index] =
+			adt7475_read(TEMP_THERM_REG(sattr->index)) << 2;
+		adt7475_read_hystersis(client);
+
+		temp = reg2temp(data, data->temp[THERM][sattr->index]);
+		val = SENSORS_LIMIT(val, temp - 15000, temp);
+		val = (temp - val) / 1000;
+
+		if (sattr->index != 1) {
+			data->temp[HYSTERSIS][sattr->index] &= 0xF0;
+			data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4;
+		} else {
+			data->temp[HYSTERSIS][sattr->index] &= 0x0F;
+			data->temp[HYSTERSIS][sattr->index] |= (val & 0xF);
+		}
+
+		out = data->temp[HYSTERSIS][sattr->index];
+		break;
+
+	default:
+		data->temp[sattr->nr][sattr->index] = temp2reg(data, val);
+
+		/* We maintain an extra 2 digits of precision for simplicity
+		 * - shift those back off before writing the value */
+		out = (u8) (data->temp[sattr->nr][sattr->index] >> 2);
+	}
+
+	switch (sattr->nr) {
+	case MIN:
+		reg = TEMP_MIN_REG(sattr->index);
+		break;
+	case MAX:
+		reg = TEMP_MAX_REG(sattr->index);
+		break;
+	case OFFSET:
+		reg = TEMP_OFFSET_REG(sattr->index);
+		break;
+	case AUTOMIN:
+		reg = TEMP_TMIN_REG(sattr->index);
+		break;
+	case THERM:
+		reg = TEMP_THERM_REG(sattr->index);
+		break;
+	case HYSTERSIS:
+		if (sattr->index != 2)
+			reg = REG_REMOTE1_HYSTERSIS;
+		else
+			reg = REG_REMOTE2_HYSTERSIS;
+
+		break;
+	}
+
+	i2c_smbus_write_byte_data(client, reg, out);
+
+	mutex_unlock(&data->lock);
+	return count;
+}
+
+/* Table of autorange values - the user will write the value in millidegrees,
+   and we'll convert it */
+static const int autorange_table[] = {
+	2000, 2500, 3330, 4000, 5000, 6670, 8000,
+	10000, 13330, 16000, 20000, 26670, 32000, 40000,
+	53330, 80000
+};
+
+static ssize_t show_point2(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct adt7475_data *data = adt7475_update_device(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	int out, val;
+
+	mutex_lock(&data->lock);
+	out = (data->range[sattr->index] >> 4) & 0x0F;
+	val = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
+	mutex_unlock(&data->lock);
+
+	return sprintf(buf, "%d\n", val + autorange_table[out]);
+}
+
+static ssize_t set_point2(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	int temp;
+	long val;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	mutex_lock(&data->lock);
+
+	/* Get a fresh copy of the needed registers */
+	data->config5 = adt7475_read(REG_CONFIG5);
+	data->temp[AUTOMIN][sattr->index] =
+		adt7475_read(TEMP_TMIN_REG(sattr->index)) << 2;
+	data->range[sattr->index] =
+		adt7475_read(TEMP_TRANGE_REG(sattr->index));
+
+	/* The user will write an absolute value, so subtract the start point
+	   to figure the range */
+	temp = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
+	val = SENSORS_LIMIT(val, temp + autorange_table[0],
+		temp + autorange_table[ARRAY_SIZE(autorange_table) - 1]);
+	val -= temp;
+
+	/* Find the nearest table entry to what the user wrote */
+	val = find_nearest(val, autorange_table, ARRAY_SIZE(autorange_table));
+
+	data->range[sattr->index] &= ~0xF0;
+	data->range[sattr->index] |= val << 4;
+
+	i2c_smbus_write_byte_data(client, TEMP_TRANGE_REG(sattr->index),
+				  data->range[sattr->index]);
+
+	mutex_unlock(&data->lock);
+	return count;
+}
+
+static ssize_t show_tach(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct adt7475_data *data = adt7475_update_device(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	int out;
+
+	if (sattr->nr == ALARM)
+		out = (data->alarms >> (sattr->index + 10)) & 1;
+	else
+		out = tach2rpm(data->tach[sattr->nr][sattr->index]);
+
+	return sprintf(buf, "%d\n", out);
+}
+
+static ssize_t set_tach(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	unsigned long val;
+
+	if (strict_strtoul(buf, 10, &val))
+		return -EINVAL;
+
+	mutex_lock(&data->lock);
+
+	data->tach[MIN][sattr->index] = rpm2tach(val);
+
+	adt7475_write_word(client, TACH_MIN_REG(sattr->index),
+			   data->tach[MIN][sattr->index]);
+
+	mutex_unlock(&data->lock);
+	return count;
+}
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct adt7475_data *data = adt7475_update_device(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+
+	return sprintf(buf, "%d\n", data->pwm[sattr->nr][sattr->index]);
+}
+
+static ssize_t show_pwmchan(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct adt7475_data *data = adt7475_update_device(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+
+	return sprintf(buf, "%d\n", data->pwmchan[sattr->index]);
+}
+
+static ssize_t show_pwmctrl(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct adt7475_data *data = adt7475_update_device(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+
+	return sprintf(buf, "%d\n", data->pwmctl[sattr->index]);
+}
+
+static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	unsigned char reg = 0;
+	long val;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	mutex_lock(&data->lock);
+
+	switch (sattr->nr) {
+	case INPUT:
+		/* Get a fresh value for CONTROL */
+		data->pwm[CONTROL][sattr->index] =
+			adt7475_read(PWM_CONFIG_REG(sattr->index));
+
+		/* If we are not in manual mode, then we shouldn't allow
+		 * the user to set the pwm speed */
+		if (((data->pwm[CONTROL][sattr->index] >> 5) & 7) != 7) {
+			mutex_unlock(&data->lock);
+			return count;
+		}
+
+		reg = PWM_REG(sattr->index);
+		break;
+
+	case MIN:
+		reg = PWM_MIN_REG(sattr->index);
+		break;
+
+	case MAX:
+		reg = PWM_MAX_REG(sattr->index);
+		break;
+	}
+
+	data->pwm[sattr->nr][sattr->index] = SENSORS_LIMIT(val, 0, 0xFF);
+	i2c_smbus_write_byte_data(client, reg,
+				  data->pwm[sattr->nr][sattr->index]);
+
+	mutex_unlock(&data->lock);
+
+	return count;
+}
+
+/* Called by set_pwmctrl and set_pwmchan */
+
+static int hw_set_pwm(struct i2c_client *client, int index,
+		      unsigned int pwmctl, unsigned int pwmchan)
+{
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	long val = 0;
+
+	switch (pwmctl) {
+	case 0:
+		val = 0x03;	/* Run at full speed */
+		break;
+	case 1:
+		val = 0x07;	/* Manual mode */
+		break;
+	case 2:
+		switch (pwmchan) {
+		case 1:
+			/* Remote1 controls PWM */
+			val = 0x00;
+			break;
+		case 2:
+			/* local controls PWM */
+			val = 0x01;
+			break;
+		case 4:
+			/* remote2 controls PWM */
+			val = 0x02;
+			break;
+		case 6:
+			/* local/remote2 control PWM */
+			val = 0x05;
+			break;
+		case 7:
+			/* All three control PWM */
+			val = 0x06;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	data->pwmctl[index] = pwmctl;
+	data->pwmchan[index] = pwmchan;
+
+	data->pwm[CONTROL][index] &= ~0xE0;
+	data->pwm[CONTROL][index] |= (val & 7) << 5;
+
+	i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
+				  data->pwm[CONTROL][index]);
+
+	return 0;
+}
+
+static ssize_t set_pwmchan(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	int r;
+	long val;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	mutex_lock(&data->lock);
+	/* Read Modify Write PWM values */
+	adt7475_read_pwm(client, sattr->index);
+	r = hw_set_pwm(client, sattr->index, data->pwmctl[sattr->index], val);
+	if (r)
+		count = r;
+	mutex_unlock(&data->lock);
+
+	return count;
+}
+
+static ssize_t set_pwmctrl(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	int r;
+	long val;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	mutex_lock(&data->lock);
+	/* Read Modify Write PWM values */
+	adt7475_read_pwm(client, sattr->index);
+	r = hw_set_pwm(client, sattr->index, val, data->pwmchan[sattr->index]);
+	if (r)
+		count = r;
+	mutex_unlock(&data->lock);
+
+	return count;
+}
+
+/* List of frequencies for the PWM */
+static const int pwmfreq_table[] = {
+	11, 14, 22, 29, 35, 44, 58, 88
+};
+
+static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct adt7475_data *data = adt7475_update_device(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+
+	return sprintf(buf, "%d\n",
+		       pwmfreq_table[data->range[sattr->index] & 7]);
+}
+
+static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	int out;
+	long val;
+
+	if (strict_strtol(buf, 10, &val))
+		return -EINVAL;
+
+	out = find_nearest(val, pwmfreq_table, ARRAY_SIZE(pwmfreq_table));
+
+	mutex_lock(&data->lock);
+
+	data->range[sattr->index] =
+		adt7475_read(TEMP_TRANGE_REG(sattr->index));
+	data->range[sattr->index] &= ~7;
+	data->range[sattr->index] |= out;
+
+	i2c_smbus_write_byte_data(client, TEMP_TRANGE_REG(sattr->index),
+				  data->range[sattr->index]);
+
+	mutex_unlock(&data->lock);
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_voltage, NULL, INPUT, 0);
+static SENSOR_DEVICE_ATTR_2(in1_max, S_IRUGO | S_IWUSR, show_voltage,
+			    set_voltage, MAX, 0);
+static SENSOR_DEVICE_ATTR_2(in1_min, S_IRUGO | S_IWUSR, show_voltage,
+			    set_voltage, MIN, 0);
+static SENSOR_DEVICE_ATTR_2(in1_alarm, S_IRUGO, show_voltage, NULL, ALARM, 0);
+static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_voltage, NULL, INPUT, 1);
+static SENSOR_DEVICE_ATTR_2(in2_max, S_IRUGO | S_IWUSR, show_voltage,
+			    set_voltage, MAX, 1);
+static SENSOR_DEVICE_ATTR_2(in2_min, S_IRUGO | S_IWUSR, show_voltage,
+			    set_voltage, MIN, 1);
+static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, show_voltage, NULL, ALARM, 1);
+static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, INPUT, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_alarm, S_IRUGO, show_temp, NULL, ALARM, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, show_temp, NULL, FAULT, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+			    MAX, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+			    MIN, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_offset, S_IRUGO | S_IWUSR, show_temp,
+			    set_temp, OFFSET, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_temp, set_temp, AUTOMIN, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_point2, set_point2, 0, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+			    THERM, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
+			    set_temp, HYSTERSIS, 0);
+static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, INPUT, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_alarm, S_IRUGO, show_temp, NULL, ALARM, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+			    MAX, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+			    MIN, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IRUGO | S_IWUSR, show_temp,
+			    set_temp, OFFSET, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_temp, set_temp, AUTOMIN, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_point2, set_point2, 0, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+			    THERM, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
+			    set_temp, HYSTERSIS, 1);
+static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, INPUT, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_alarm, S_IRUGO, show_temp, NULL, ALARM, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_fault, S_IRUGO, show_temp, NULL, FAULT, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+			    MAX, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
+			    MIN, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_offset, S_IRUGO | S_IWUSR, show_temp,
+			    set_temp, OFFSET, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_temp, set_temp, AUTOMIN, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_point2, set_point2, 0, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+			    THERM, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
+			    set_temp, HYSTERSIS, 2);
+static SENSOR_DEVICE_ATTR_2(fan1_input, S_IRUGO, show_tach, NULL, INPUT, 0);
+static SENSOR_DEVICE_ATTR_2(fan1_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
+			    MIN, 0);
+static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, show_tach, NULL, ALARM, 0);
+static SENSOR_DEVICE_ATTR_2(fan2_input, S_IRUGO, show_tach, NULL, INPUT, 1);
+static SENSOR_DEVICE_ATTR_2(fan2_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
+			    MIN, 1);
+static SENSOR_DEVICE_ATTR_2(fan2_alarm, S_IRUGO, show_tach, NULL, ALARM, 1);
+static SENSOR_DEVICE_ATTR_2(fan3_input, S_IRUGO, show_tach, NULL, INPUT, 2);
+static SENSOR_DEVICE_ATTR_2(fan3_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
+			    MIN, 2);
+static SENSOR_DEVICE_ATTR_2(fan3_alarm, S_IRUGO, show_tach, NULL, ALARM, 2);
+static SENSOR_DEVICE_ATTR_2(fan4_input, S_IRUGO, show_tach, NULL, INPUT, 3);
+static SENSOR_DEVICE_ATTR_2(fan4_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
+			    MIN, 3);
+static SENSOR_DEVICE_ATTR_2(fan4_alarm, S_IRUGO, show_tach, NULL, ALARM, 3);
+static SENSOR_DEVICE_ATTR_2(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT,
+			    0);
+static SENSOR_DEVICE_ATTR_2(pwm1_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
+			    set_pwmfreq, INPUT, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
+			    set_pwmctrl, INPUT, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_channel_temp, S_IRUGO | S_IWUSR,
+			    show_pwmchan, set_pwmchan, INPUT, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
+			    set_pwm, MIN, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
+			    set_pwm, MAX, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT,
+			    1);
+static SENSOR_DEVICE_ATTR_2(pwm2_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
+			    set_pwmfreq, INPUT, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
+			    set_pwmctrl, INPUT, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_channel_temp, S_IRUGO | S_IWUSR,
+			    show_pwmchan, set_pwmchan, INPUT, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
+			    set_pwm, MIN, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
+			    set_pwm, MAX, 1);
+static SENSOR_DEVICE_ATTR_2(pwm3, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT,
+			    2);
+static SENSOR_DEVICE_ATTR_2(pwm3_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
+			    set_pwmfreq, INPUT, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
+			    set_pwmctrl, INPUT, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_channel_temp, S_IRUGO | S_IWUSR,
+			    show_pwmchan, set_pwmchan, INPUT, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
+			    set_pwm, MIN, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
+			    set_pwm, MAX, 2);
+
+static struct attribute *adt7475_attrs[] = {
+	&sensor_dev_attr_in1_input.dev_attr.attr,
+	&sensor_dev_attr_in1_max.dev_attr.attr,
+	&sensor_dev_attr_in1_min.dev_attr.attr,
+	&sensor_dev_attr_in1_alarm.dev_attr.attr,
+	&sensor_dev_attr_in2_input.dev_attr.attr,
+	&sensor_dev_attr_in2_max.dev_attr.attr,
+	&sensor_dev_attr_in2_min.dev_attr.attr,
+	&sensor_dev_attr_in2_alarm.dev_attr.attr,
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_alarm.dev_attr.attr,
+	&sensor_dev_attr_temp1_fault.dev_attr.attr,
+	&sensor_dev_attr_temp1_max.dev_attr.attr,
+	&sensor_dev_attr_temp1_min.dev_attr.attr,
+	&sensor_dev_attr_temp1_offset.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp2_input.dev_attr.attr,
+	&sensor_dev_attr_temp2_alarm.dev_attr.attr,
+	&sensor_dev_attr_temp2_max.dev_attr.attr,
+	&sensor_dev_attr_temp2_min.dev_attr.attr,
+	&sensor_dev_attr_temp2_offset.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_temp2_crit.dev_attr.attr,
+	&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp3_input.dev_attr.attr,
+	&sensor_dev_attr_temp3_fault.dev_attr.attr,
+	&sensor_dev_attr_temp3_alarm.dev_attr.attr,
+	&sensor_dev_attr_temp3_max.dev_attr.attr,
+	&sensor_dev_attr_temp3_min.dev_attr.attr,
+	&sensor_dev_attr_temp3_offset.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_temp3_crit.dev_attr.attr,
+	&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
+	&sensor_dev_attr_fan1_input.dev_attr.attr,
+	&sensor_dev_attr_fan1_min.dev_attr.attr,
+	&sensor_dev_attr_fan1_alarm.dev_attr.attr,
+	&sensor_dev_attr_fan2_input.dev_attr.attr,
+	&sensor_dev_attr_fan2_min.dev_attr.attr,
+	&sensor_dev_attr_fan2_alarm.dev_attr.attr,
+	&sensor_dev_attr_fan3_input.dev_attr.attr,
+	&sensor_dev_attr_fan3_min.dev_attr.attr,
+	&sensor_dev_attr_fan3_alarm.dev_attr.attr,
+	&sensor_dev_attr_fan4_input.dev_attr.attr,
+	&sensor_dev_attr_fan4_min.dev_attr.attr,
+	&sensor_dev_attr_fan4_alarm.dev_attr.attr,
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	&sensor_dev_attr_pwm1_freq.dev_attr.attr,
+	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_channel_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm2.dev_attr.attr,
+	&sensor_dev_attr_pwm2_freq.dev_attr.attr,
+	&sensor_dev_attr_pwm2_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_channel_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm3.dev_attr.attr,
+	&sensor_dev_attr_pwm3_freq.dev_attr.attr,
+	&sensor_dev_attr_pwm3_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_channel_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
+	NULL,
+};
+
+struct attribute_group adt7475_attr_group = { .attrs = adt7475_attrs };
+
+static int adt7475_detect(struct i2c_client *client, int kind,
+			  struct i2c_board_info *info)
+{
+	struct i2c_adapter *adapter = client->adapter;
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
+
+	if (kind <= 0) {
+		if (adt7475_read(REG_VENDID) != 0x41 ||
+		    adt7475_read(REG_DEVID) != 0x75) {
+			dev_err(&adapter->dev,
+				"Couldn't detect a adt7475 part at 0x%02x\n",
+				(unsigned int)client->addr);
+			return -ENODEV;
+		}
+	}
+
+	strlcpy(info->type, adt7475_id[0].name, I2C_NAME_SIZE);
+
+	return 0;
+}
+
+static int adt7475_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct adt7475_data *data;
+	int i, ret = 0;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	mutex_init(&data->lock);
+	i2c_set_clientdata(client, data);
+
+	/* Call adt7475_read_pwm for all pwm's as this will reprogram any
+	   pwm's which are disabled to manual mode with 0% duty cycle */
+	for (i = 0; i < ADT7475_PWM_COUNT; i++)
+		adt7475_read_pwm(client, i);
+
+	ret = sysfs_create_group(&client->dev.kobj, &adt7475_attr_group);
+	if (ret)
+		goto efree;
+
+	data->hwmon_dev = hwmon_device_register(&client->dev);
+	if (IS_ERR(data->hwmon_dev)) {
+		ret = PTR_ERR(data->hwmon_dev);
+		goto eremove;
+	}
+
+	return 0;
+
+eremove:
+	sysfs_remove_group(&client->dev.kobj, &adt7475_attr_group);
+efree:
+	kfree(data);
+	return ret;
+}
+
+static int adt7475_remove(struct i2c_client *client)
+{
+	struct adt7475_data *data = i2c_get_clientdata(client);
+
+	hwmon_device_unregister(data->hwmon_dev);
+	sysfs_remove_group(&client->dev.kobj, &adt7475_attr_group);
+	kfree(data);
+
+	return 0;
+}
+
+static struct i2c_driver adt7475_driver = {
+	.class		= I2C_CLASS_HWMON,
+	.driver = {
+		.name	= "adt7475",
+	},
+	.probe		= adt7475_probe,
+	.remove		= adt7475_remove,
+	.id_table	= adt7475_id,
+	.detect		= adt7475_detect,
+	.address_data	= &addr_data,
+};
+
+static void adt7475_read_hystersis(struct i2c_client *client)
+{
+	struct adt7475_data *data = i2c_get_clientdata(client);
+
+	data->temp[HYSTERSIS][0] = (u16) adt7475_read(REG_REMOTE1_HYSTERSIS);
+	data->temp[HYSTERSIS][1] = data->temp[HYSTERSIS][0];
+	data->temp[HYSTERSIS][2] = (u16) adt7475_read(REG_REMOTE2_HYSTERSIS);
+}
+
+static void adt7475_read_pwm(struct i2c_client *client, int index)
+{
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	unsigned int v;
+
+	data->pwm[CONTROL][index] = adt7475_read(PWM_CONFIG_REG(index));
+
+	/* Figure out the internal value for pwmctrl and pwmchan
+	   based on the current settings */
+	v = (data->pwm[CONTROL][index] >> 5) & 7;
+
+	if (v == 3)
+		data->pwmctl[index] = 0;
+	else if (v == 7)
+		data->pwmctl[index] = 1;
+	else if (v == 4) {
+		/* The fan is disabled - we don't want to
+		   support that, so change to manual mode and
+		   set the duty cycle to 0 instead
+		*/
+		data->pwm[INPUT][index] = 0;
+		data->pwm[CONTROL][index] &= ~0xE0;
+		data->pwm[CONTROL][index] |= (7 << 5);
+
+		i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
+					  data->pwm[INPUT][index]);
+
+		i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
+					  data->pwm[CONTROL][index]);
+
+		data->pwmctl[index] = 1;
+	} else {
+		data->pwmctl[index] = 2;
+
+		switch (v) {
+		case 0:
+			data->pwmchan[index] = 1;
+			break;
+		case 1:
+			data->pwmchan[index] = 2;
+			break;
+		case 2:
+			data->pwmchan[index] = 4;
+			break;
+		case 5:
+			data->pwmchan[index] = 6;
+			break;
+		case 6:
+			data->pwmchan[index] = 7;
+			break;
+		}
+	}
+}
+
+static struct adt7475_data *adt7475_update_device(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	u8 ext;
+	int i;
+
+	mutex_lock(&data->lock);
+
+	/* Measurement values update every 2 seconds */
+	if (time_after(jiffies, data->measure_updated + HZ * 2) ||
+	    !data->valid) {
+		data->alarms = adt7475_read(REG_STATUS2) << 8;
+		data->alarms |= adt7475_read(REG_STATUS1);
+
+		ext = adt7475_read(REG_EXTEND1);
+		for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++)
+			data->voltage[INPUT][i] =
+				(adt7475_read(VOLTAGE_REG(i)) << 2) |
+				((ext >> ((i + 1) * 2)) & 3);
+
+		ext = adt7475_read(REG_EXTEND2);
+		for (i = 0; i < ADT7475_TEMP_COUNT; i++)
+			data->temp[INPUT][i] =
+				(adt7475_read(TEMP_REG(i)) << 2) |
+				((ext >> ((i + 1) * 2)) & 3);
+
+		for (i = 0; i < ADT7475_TACH_COUNT; i++)
+			data->tach[INPUT][i] =
+				adt7475_read_word(client, TACH_REG(i));
+
+		/* Updated by hw when in auto mode */
+		for (i = 0; i < ADT7475_PWM_COUNT; i++)
+			data->pwm[INPUT][i] = adt7475_read(PWM_REG(i));
+
+		data->measure_updated = jiffies;
+	}
+
+	/* Limits and settings, should never change update every 60 seconds */
+	if (time_after(jiffies, data->limits_updated + HZ * 2) ||
+	    !data->valid) {
+		data->config5 = adt7475_read(REG_CONFIG5);
+
+		for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
+			/* Adjust values so they match the input precision */
+			data->voltage[MIN][i] =
+				adt7475_read(VOLTAGE_MIN_REG(i)) << 2;
+			data->voltage[MAX][i] =
+				adt7475_read(VOLTAGE_MAX_REG(i)) << 2;
+		}
+
+		for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
+			/* Adjust values so they match the input precision */
+			data->temp[MIN][i] =
+				adt7475_read(TEMP_MIN_REG(i)) << 2;
+			data->temp[MAX][i] =
+				adt7475_read(TEMP_MAX_REG(i)) << 2;
+			data->temp[AUTOMIN][i] =
+				adt7475_read(TEMP_TMIN_REG(i)) << 2;
+			data->temp[THERM][i] =
+				adt7475_read(TEMP_THERM_REG(i)) << 2;
+			data->temp[OFFSET][i] =
+				adt7475_read(TEMP_OFFSET_REG(i));
+		}
+		adt7475_read_hystersis(client);
+
+		for (i = 0; i < ADT7475_TACH_COUNT; i++)
+			data->tach[MIN][i] =
+				adt7475_read_word(client, TACH_MIN_REG(i));
+
+		for (i = 0; i < ADT7475_PWM_COUNT; i++) {
+			data->pwm[MAX][i] = adt7475_read(PWM_MAX_REG(i));
+			data->pwm[MIN][i] = adt7475_read(PWM_MIN_REG(i));
+			/* Set the channel and control information */
+			adt7475_read_pwm(client, i);
+		}
+
+		data->range[0] = adt7475_read(TEMP_TRANGE_REG(0));
+		data->range[1] = adt7475_read(TEMP_TRANGE_REG(1));
+		data->range[2] = adt7475_read(TEMP_TRANGE_REG(2));
+
+		data->limits_updated = jiffies;
+		data->valid = 1;
+	}
+
+	mutex_unlock(&data->lock);
+
+	return data;
+}
+
+static int __init sensors_adt7475_init(void)
+{
+	return i2c_add_driver(&adt7475_driver);
+}
+
+static void __exit sensors_adt7475_exit(void)
+{
+	i2c_del_driver(&adt7475_driver);
+}
+
+MODULE_AUTHOR("Advanced Micro Devices, Inc");
+MODULE_DESCRIPTION("adt7475 driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_adt7475_init);
+module_exit(sensors_adt7475_exit);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index dca47a5..e301862 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -590,6 +590,11 @@
 	}
 
 	ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
+	/* newer macbooks report a single 10-bit bigendian value */
+	if (data_length == 10) {
+		left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2;
+		goto out;
+	}
 	left = buffer[2];
 	if (ret)
 		goto out;
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 8a45a2e..8acf829 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -53,7 +53,10 @@
 
 /* Insmod parameters */
 I2C_CLIENT_INSMOD_1(asb100);
-I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: "
+
+static unsigned short force_subclients[4];
+module_param_array(force_subclients, short, NULL, 0);
+MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
 	"{bus, clientaddr, subclientaddr1, subclientaddr2}");
 
 /* Voltage IN registers 0-6 */
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 27a5d39..3df202a 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -34,6 +34,7 @@
 #include <linux/hwmon-vid.h>
 #include <linux/err.h>
 #include <linux/mutex.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 
 /* ISA device, if found */
@@ -2361,6 +2362,10 @@
 	};
 	int err;
 
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto exit;
+
 	if (!(pdev = platform_device_alloc("dme1737", addr))) {
 		printk(KERN_ERR "dme1737: Failed to allocate device.\n");
 		err = -ENOMEM;
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 7a14a2d..89987657 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -39,6 +39,7 @@
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
 #include <linux/ioport.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 
 static unsigned short force_id;
@@ -1455,6 +1456,10 @@
 	}
 
 	res.name = pdev->name;
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto exit_device_put;
+
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
 		printk(KERN_ERR DRVNAME ": Device resource addition failed "
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 67067e9..609caff 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1,6 +1,6 @@
 /***************************************************************************
  *   Copyright (C) 2006 by Hans Edgington <hans@edgington.nl>              *
- *   Copyright (C) 2007 by Hans de Goede  <j.w.r.degoede@hhs.nl>           *
+ *   Copyright (C) 2007,2008 by Hans de Goede <hdegoede@redhat.com>        *
  *                                                                         *
  *   This program is free software; you can redistribute it and/or modify  *
  *   it under the terms of the GNU General Public License as published by  *
@@ -27,11 +27,12 @@
 #include <linux/hwmon-sysfs.h>
 #include <linux/err.h>
 #include <linux/mutex.h>
-#include <asm/io.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
 
 #define DRVNAME "f71882fg"
 
-#define SIO_F71882FG_LD_HWM	0x04	/* Hardware monitor logical device*/
+#define SIO_F71882FG_LD_HWM	0x04	/* Hardware monitor logical device */
 #define SIO_UNLOCK_KEY		0x87	/* Key to enable Super-I/O */
 #define SIO_LOCK_KEY		0xAA	/* Key to diasble Super-I/O */
 
@@ -43,7 +44,9 @@
 #define SIO_REG_ADDR		0x60	/* Logical device address (2 bytes) */
 
 #define SIO_FINTEK_ID		0x1934	/* Manufacturers ID */
+#define SIO_F71862_ID		0x0601	/* Chipset ID */
 #define SIO_F71882_ID		0x0541	/* Chipset ID */
+#define SIO_F8000_ID		0x0581	/* Chipset ID */
 
 #define REGION_LENGTH		8
 #define ADDR_REG_OFFSET		5
@@ -51,25 +54,36 @@
 
 #define F71882FG_REG_PECI		0x0A
 
-#define F71882FG_REG_IN_STATUS		0x12
-#define F71882FG_REG_IN_BEEP		0x13
+#define F71882FG_REG_IN_STATUS		0x12 /* f71882fg only */
+#define F71882FG_REG_IN_BEEP		0x13 /* f71882fg only */
 #define F71882FG_REG_IN(nr)		(0x20  + (nr))
-#define F71882FG_REG_IN1_HIGH		0x32
+#define F71882FG_REG_IN1_HIGH		0x32 /* f71882fg only */
 
 #define F71882FG_REG_FAN(nr)		(0xA0 + (16 * (nr)))
+#define F71882FG_REG_FAN_TARGET(nr)	(0xA2 + (16 * (nr)))
+#define F71882FG_REG_FAN_FULL_SPEED(nr)	(0xA4 + (16 * (nr)))
 #define F71882FG_REG_FAN_STATUS		0x92
 #define F71882FG_REG_FAN_BEEP		0x93
 
-#define F71882FG_REG_TEMP(nr)		(0x72 + 2 * (nr))
-#define F71882FG_REG_TEMP_OVT(nr)	(0x82 + 2 * (nr))
-#define F71882FG_REG_TEMP_HIGH(nr)	(0x83 + 2 * (nr))
+#define F71882FG_REG_TEMP(nr)		(0x70 + 2 * (nr))
+#define F71882FG_REG_TEMP_OVT(nr)	(0x80 + 2 * (nr))
+#define F71882FG_REG_TEMP_HIGH(nr)	(0x81 + 2 * (nr))
 #define F71882FG_REG_TEMP_STATUS	0x62
 #define F71882FG_REG_TEMP_BEEP		0x63
-#define F71882FG_REG_TEMP_HYST1		0x6C
-#define F71882FG_REG_TEMP_HYST23	0x6D
+#define F71882FG_REG_TEMP_HYST(nr)	(0x6C + (nr))
 #define F71882FG_REG_TEMP_TYPE		0x6B
 #define F71882FG_REG_TEMP_DIODE_OPEN	0x6F
 
+#define F71882FG_REG_PWM(nr)		(0xA3 + (16 * (nr)))
+#define F71882FG_REG_PWM_TYPE		0x94
+#define F71882FG_REG_PWM_ENABLE		0x96
+
+#define F71882FG_REG_FAN_HYST(nr)	(0x98 + (nr))
+
+#define F71882FG_REG_POINT_PWM(pwm, point)	(0xAA + (point) + (16 * (pwm)))
+#define F71882FG_REG_POINT_TEMP(pwm, point)	(0xA6 + (point) + (16 * (pwm)))
+#define F71882FG_REG_POINT_MAPPING(nr)		(0xAF + 16 * (nr))
+
 #define	F71882FG_REG_START		0x01
 
 #define FAN_MIN_DETECT			366 /* Lowest detectable fanspeed */
@@ -78,7 +92,15 @@
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
-static struct platform_device *f71882fg_pdev = NULL;
+enum chips { f71862fg, f71882fg, f8000 };
+
+static const char *f71882fg_names[] = {
+	"f71862fg",
+	"f71882fg",
+	"f8000",
+};
+
+static struct platform_device *f71882fg_pdev;
 
 /* Super-I/O Function prototypes */
 static inline int superio_inb(int base, int reg);
@@ -87,8 +109,13 @@
 static inline void superio_select(int base, int ld);
 static inline void superio_exit(int base);
 
+struct f71882fg_sio_data {
+	enum chips type;
+};
+
 struct f71882fg_data {
 	unsigned short addr;
+	enum chips type;
 	struct device *hwmon_dev;
 
 	struct mutex update_lock;
@@ -102,19 +129,30 @@
 	u8	in_status;
 	u8	in_beep;
 	u16	fan[4];
+	u16	fan_target[4];
+	u16	fan_full_speed[4];
 	u8	fan_status;
 	u8	fan_beep;
-	u8	temp[3];
-	u8	temp_ovt[3];
-	u8	temp_high[3];
-	u8	temp_hyst[3];
-	u8	temp_type[3];
+	/* Note: all models have only 3 temperature channels, but on some
+	   they are addressed as 0-2 and on others as 1-3, so for coding
+	   convenience we reserve space for 4 channels */
+	u8	temp[4];
+	u8	temp_ovt[4];
+	u8	temp_high[4];
+	u8	temp_hyst[2]; /* 2 hysts stored per reg */
+	u8	temp_type[4];
 	u8	temp_status;
 	u8	temp_beep;
 	u8	temp_diode_open;
+	u8	pwm[4];
+	u8	pwm_enable;
+	u8	pwm_auto_point_hyst[2];
+	u8	pwm_auto_point_mapping[4];
+	u8	pwm_auto_point_pwm[4][5];
+	u8	pwm_auto_point_temp[4][4];
 };
 
-/* Sysfs in*/
+/* Sysfs in */
 static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
 	char *buf);
 static ssize_t show_in_max(struct device *dev, struct device_attribute
@@ -130,6 +168,10 @@
 /* Sysfs Fan */
 static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
 	char *buf);
+static ssize_t show_fan_full_speed(struct device *dev,
+	struct device_attribute *devattr, char *buf);
+static ssize_t store_fan_full_speed(struct device *dev,
+	struct device_attribute *devattr, const char *buf, size_t count);
 static ssize_t show_fan_beep(struct device *dev, struct device_attribute
 	*devattr, char *buf);
 static ssize_t store_fan_beep(struct device *dev, struct device_attribute
@@ -163,16 +205,41 @@
 	*devattr, char *buf);
 static ssize_t show_temp_fault(struct device *dev, struct device_attribute
 	*devattr, char *buf);
+/* PWM and Auto point control */
+static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
+	char *buf);
+static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr,
+	const char *buf, size_t count);
+static ssize_t show_pwm_enable(struct device *dev,
+	struct device_attribute *devattr, char *buf);
+static ssize_t store_pwm_enable(struct device *dev,
+	struct device_attribute	*devattr, const char *buf, size_t count);
+static ssize_t show_pwm_interpolate(struct device *dev,
+	struct device_attribute *devattr, char *buf);
+static ssize_t store_pwm_interpolate(struct device *dev,
+	struct device_attribute *devattr, const char *buf, size_t count);
+static ssize_t show_pwm_auto_point_channel(struct device *dev,
+	struct device_attribute *devattr, char *buf);
+static ssize_t store_pwm_auto_point_channel(struct device *dev,
+	struct device_attribute *devattr, const char *buf, size_t count);
+static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev,
+	struct device_attribute *devattr, char *buf);
+static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
+	struct device_attribute *devattr, const char *buf, size_t count);
+static ssize_t show_pwm_auto_point_pwm(struct device *dev,
+	struct device_attribute *devattr, char *buf);
+static ssize_t store_pwm_auto_point_pwm(struct device *dev,
+	struct device_attribute *devattr, const char *buf, size_t count);
+static ssize_t show_pwm_auto_point_temp(struct device *dev,
+	struct device_attribute *devattr, char *buf);
+static ssize_t store_pwm_auto_point_temp(struct device *dev,
+	struct device_attribute *devattr, const char *buf, size_t count);
 /* Sysfs misc */
 static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
 	char *buf);
 
 static int __devinit f71882fg_probe(struct platform_device * pdev);
-static int __devexit f71882fg_remove(struct platform_device *pdev);
-static int __init f71882fg_init(void);
-static int __init f71882fg_find(int sioaddr, unsigned short *address);
-static int __init f71882fg_device_add(unsigned short address);
-static void __exit f71882fg_exit(void);
+static int f71882fg_remove(struct platform_device *pdev);
 
 static struct platform_driver f71882fg_driver = {
 	.driver = {
@@ -183,86 +250,531 @@
 	.remove		= __devexit_p(f71882fg_remove),
 };
 
-static struct device_attribute f71882fg_dev_attr[] =
-{
-	__ATTR( name, S_IRUGO, show_name, NULL ),
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+/* Temp and in attr common to both the f71862fg and f71882fg */
+static struct sensor_device_attribute_2 f718x2fg_in_temp_attr[] = {
+	SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
+	SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
+	SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+	SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
+	SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
+	SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
+	SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
+	SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
+	SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
+	SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
+	SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
+		store_temp_max, 0, 1),
+	SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
+		store_temp_max_hyst, 0, 1),
+	/* Should really be temp1_max_alarm, but older versions did not handle
+	   the max and crit alarms separately and lm_sensors v2 depends on the
+	   presence of temp#_alarm files. The same goes for temp2/3 _alarm. */
+	SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 1),
+	SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 1),
+	SENSOR_ATTR_2(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit,
+		store_temp_crit, 0, 1),
+	SENSOR_ATTR_2(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
+		0, 1),
+	SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
+	SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 5),
+	SENSOR_ATTR_2(temp1_type, S_IRUGO, show_temp_type, NULL, 0, 1),
+	SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
+	SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 2),
+	SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_max,
+		store_temp_max, 0, 2),
+	SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
+		store_temp_max_hyst, 0, 2),
+	/* Should be temp2_max_alarm, see temp1_alarm note */
+	SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 2),
+	SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 2),
+	SENSOR_ATTR_2(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit,
+		store_temp_crit, 0, 2),
+	SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
+		0, 2),
+	SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6),
+	SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 6),
+	SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2),
+	SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
+	SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3),
+	SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
+		store_temp_max, 0, 3),
+	SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
+		store_temp_max_hyst, 0, 3),
+	/* Should be temp3_max_alarm, see temp1_alarm note */
+	SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 3),
+	SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 3),
+	SENSOR_ATTR_2(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit,
+		store_temp_crit, 0, 3),
+	SENSOR_ATTR_2(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
+		0, 3),
+	SENSOR_ATTR_2(temp3_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 7),
+	SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 7),
+	SENSOR_ATTR_2(temp3_type, S_IRUGO, show_temp_type, NULL, 0, 3),
+	SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 3),
 };
 
-static struct sensor_device_attribute f71882fg_in_temp_attr[] =
-{
-	SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0),
-	SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1),
-	SENSOR_ATTR(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max, 1),
-	SENSOR_ATTR(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep, 1),
-	SENSOR_ATTR(in1_alarm, S_IRUGO, show_in_alarm, NULL, 1),
-	SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2),
-	SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3),
-	SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4),
-	SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5),
-	SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6),
-	SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7),
-	SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8),
-	SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0),
-	SENSOR_ATTR(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
-		store_temp_max, 0),
-	SENSOR_ATTR(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
-		store_temp_max_hyst, 0),
-	SENSOR_ATTR(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit,
-		store_temp_crit, 0),
-	SENSOR_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0),
-	SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0),
-	SENSOR_ATTR(temp1_beep, S_IRUGO|S_IWUSR, show_temp_beep,
-		store_temp_beep, 0),
-	SENSOR_ATTR(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0),
-	SENSOR_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0),
-	SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1),
-	SENSOR_ATTR(temp2_max, S_IRUGO|S_IWUSR, show_temp_max,
-		store_temp_max, 1),
-	SENSOR_ATTR(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
-		store_temp_max_hyst, 1),
-	SENSOR_ATTR(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit,
-		store_temp_crit, 1),
-	SENSOR_ATTR(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 1),
-	SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1),
-	SENSOR_ATTR(temp2_beep, S_IRUGO|S_IWUSR, show_temp_beep,
-		store_temp_beep, 1),
-	SENSOR_ATTR(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 1),
-	SENSOR_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1),
-	SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2),
-	SENSOR_ATTR(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
-		store_temp_max, 2),
-	SENSOR_ATTR(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max_hyst,
-		store_temp_max_hyst, 2),
-	SENSOR_ATTR(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit,
-		store_temp_crit, 2),
-	SENSOR_ATTR(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 2),
-	SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2),
-	SENSOR_ATTR(temp3_beep, S_IRUGO|S_IWUSR, show_temp_beep,
-		store_temp_beep, 2),
-	SENSOR_ATTR(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 2),
-	SENSOR_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2)
+/* Temp and in attr found only on the f71882fg */
+static struct sensor_device_attribute_2 f71882fg_in_temp_attr[] = {
+	SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max,
+		0, 1),
+	SENSOR_ATTR_2(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep,
+		0, 1),
+	SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1),
 };
 
-static struct sensor_device_attribute f71882fg_fan_attr[] =
-{
-	SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0),
-	SENSOR_ATTR(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep,
-		store_fan_beep, 0),
-	SENSOR_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0),
-	SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1),
-	SENSOR_ATTR(fan2_beep, S_IRUGO|S_IWUSR, show_fan_beep,
-		store_fan_beep, 1),
-	SENSOR_ATTR(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 1),
-	SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2),
-	SENSOR_ATTR(fan3_beep, S_IRUGO|S_IWUSR, show_fan_beep,
-		store_fan_beep, 2),
-	SENSOR_ATTR(fan3_alarm, S_IRUGO, show_fan_alarm, NULL, 2),
-	SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3),
-	SENSOR_ATTR(fan4_beep, S_IRUGO|S_IWUSR, show_fan_beep,
-		store_fan_beep, 3),
-	SENSOR_ATTR(fan4_alarm, S_IRUGO, show_fan_alarm, NULL, 3)
+/* Temp and in attr for the f8000
+   Note on the f8000 temp_ovt (crit) is used as max, and temp_high (max)
+   is used as hysteresis value to clear alarms
+ */
+static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
+	SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
+	SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
+	SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+	SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0),
+	SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_crit,
+		store_temp_crit, 0, 0),
+	SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO|S_IWUSR, show_temp_max,
+		store_temp_max, 0, 0),
+	SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 4),
+	SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 1),
+	SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_crit,
+		store_temp_crit, 0, 1),
+	SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max,
+		store_temp_max, 0, 1),
+	SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
+	SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1),
+	SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2),
+	SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_crit,
+		store_temp_crit, 0, 2),
+	SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO|S_IWUSR, show_temp_max,
+		store_temp_max, 0, 2),
+	SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6),
 };
 
+/* Fan / PWM attr common to all models */
+static struct sensor_device_attribute_2 fxxxx_fan_attr[] = {
+	SENSOR_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0),
+	SENSOR_ATTR_2(fan1_full_speed, S_IRUGO|S_IWUSR,
+		      show_fan_full_speed,
+		      store_fan_full_speed, 0, 0),
+	SENSOR_ATTR_2(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 0),
+	SENSOR_ATTR_2(fan2_input, S_IRUGO, show_fan, NULL, 0, 1),
+	SENSOR_ATTR_2(fan2_full_speed, S_IRUGO|S_IWUSR,
+		      show_fan_full_speed,
+		      store_fan_full_speed, 0, 1),
+	SENSOR_ATTR_2(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 1),
+	SENSOR_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 0, 2),
+	SENSOR_ATTR_2(fan3_full_speed, S_IRUGO|S_IWUSR,
+		      show_fan_full_speed,
+		      store_fan_full_speed, 0, 2),
+	SENSOR_ATTR_2(fan3_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 2),
+
+	SENSOR_ATTR_2(pwm1, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 0),
+	SENSOR_ATTR_2(pwm1_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
+		      store_pwm_enable, 0, 0),
+	SENSOR_ATTR_2(pwm1_interpolate, S_IRUGO|S_IWUSR,
+		      show_pwm_interpolate, store_pwm_interpolate, 0, 0),
+	SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_channel,
+		      store_pwm_auto_point_channel, 0, 0),
+
+	SENSOR_ATTR_2(pwm2, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 1),
+	SENSOR_ATTR_2(pwm2_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
+		      store_pwm_enable, 0, 1),
+	SENSOR_ATTR_2(pwm2_interpolate, S_IRUGO|S_IWUSR,
+		      show_pwm_interpolate, store_pwm_interpolate, 0, 1),
+	SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_channel,
+		      store_pwm_auto_point_channel, 0, 1),
+
+	SENSOR_ATTR_2(pwm3_interpolate, S_IRUGO|S_IWUSR,
+		      show_pwm_interpolate, store_pwm_interpolate, 0, 2),
+	SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_channel,
+		      store_pwm_auto_point_channel, 0, 2),
+};
+
+/* Fan / PWM attr for the f71862fg, less pwms and less zones per pwm than the
+   f71882fg */
+static struct sensor_device_attribute_2 f71862fg_fan_attr[] = {
+	SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+		store_fan_beep, 0, 0),
+	SENSOR_ATTR_2(fan2_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+		store_fan_beep, 0, 1),
+	SENSOR_ATTR_2(fan3_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+		store_fan_beep, 0, 2),
+
+	SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      1, 0),
+	SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      4, 0),
+	SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      0, 0),
+	SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      3, 0),
+	SENSOR_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp_hyst,
+		      store_pwm_auto_point_temp_hyst,
+		      0, 0),
+	SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 3, 0),
+
+	SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      1, 1),
+	SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      4, 1),
+	SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      0, 1),
+	SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      3, 1),
+	SENSOR_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp_hyst,
+		      store_pwm_auto_point_temp_hyst,
+		      0, 1),
+	SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 3, 1),
+
+	SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 2),
+	SENSOR_ATTR_2(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
+		      store_pwm_enable, 0, 2),
+	SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      1, 2),
+	SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      4, 2),
+	SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      0, 2),
+	SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      3, 2),
+	SENSOR_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp_hyst,
+		      store_pwm_auto_point_temp_hyst,
+		      0, 2),
+	SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 3, 2),
+};
+
+/* Fan / PWM attr for the f71882fg */
+static struct sensor_device_attribute_2 f71882fg_fan_attr[] = {
+	SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+		store_fan_beep, 0, 0),
+	SENSOR_ATTR_2(fan2_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+		store_fan_beep, 0, 1),
+	SENSOR_ATTR_2(fan3_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+		store_fan_beep, 0, 2),
+	SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3),
+	SENSOR_ATTR_2(fan4_full_speed, S_IRUGO|S_IWUSR,
+		      show_fan_full_speed,
+		      store_fan_full_speed, 0, 3),
+	SENSOR_ATTR_2(fan4_beep, S_IRUGO|S_IWUSR, show_fan_beep,
+		store_fan_beep, 0, 3),
+	SENSOR_ATTR_2(fan4_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 3),
+
+	SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      0, 0),
+	SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      1, 0),
+	SENSOR_ATTR_2(pwm1_auto_point3_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      2, 0),
+	SENSOR_ATTR_2(pwm1_auto_point4_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      3, 0),
+	SENSOR_ATTR_2(pwm1_auto_point5_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      4, 0),
+	SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      0, 0),
+	SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      1, 0),
+	SENSOR_ATTR_2(pwm1_auto_point3_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      2, 0),
+	SENSOR_ATTR_2(pwm1_auto_point4_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      3, 0),
+	SENSOR_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp_hyst,
+		      store_pwm_auto_point_temp_hyst,
+		      0, 0),
+	SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 1, 0),
+	SENSOR_ATTR_2(pwm1_auto_point3_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 2, 0),
+	SENSOR_ATTR_2(pwm1_auto_point4_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 3, 0),
+
+	SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      0, 1),
+	SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      1, 1),
+	SENSOR_ATTR_2(pwm2_auto_point3_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      2, 1),
+	SENSOR_ATTR_2(pwm2_auto_point4_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      3, 1),
+	SENSOR_ATTR_2(pwm2_auto_point5_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      4, 1),
+	SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      0, 1),
+	SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      1, 1),
+	SENSOR_ATTR_2(pwm2_auto_point3_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      2, 1),
+	SENSOR_ATTR_2(pwm2_auto_point4_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      3, 1),
+	SENSOR_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp_hyst,
+		      store_pwm_auto_point_temp_hyst,
+		      0, 1),
+	SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 1, 1),
+	SENSOR_ATTR_2(pwm2_auto_point3_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 2, 1),
+	SENSOR_ATTR_2(pwm2_auto_point4_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 3, 1),
+
+	SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 2),
+	SENSOR_ATTR_2(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
+		      store_pwm_enable, 0, 2),
+	SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      0, 2),
+	SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      1, 2),
+	SENSOR_ATTR_2(pwm3_auto_point3_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      2, 2),
+	SENSOR_ATTR_2(pwm3_auto_point4_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      3, 2),
+	SENSOR_ATTR_2(pwm3_auto_point5_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      4, 2),
+	SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      0, 2),
+	SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      1, 2),
+	SENSOR_ATTR_2(pwm3_auto_point3_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      2, 2),
+	SENSOR_ATTR_2(pwm3_auto_point4_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      3, 2),
+	SENSOR_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp_hyst,
+		      store_pwm_auto_point_temp_hyst,
+		      0, 2),
+	SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 1, 2),
+	SENSOR_ATTR_2(pwm3_auto_point3_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 2, 2),
+	SENSOR_ATTR_2(pwm3_auto_point4_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 3, 2),
+
+	SENSOR_ATTR_2(pwm4, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0, 3),
+	SENSOR_ATTR_2(pwm4_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
+		      store_pwm_enable, 0, 3),
+	SENSOR_ATTR_2(pwm4_interpolate, S_IRUGO|S_IWUSR,
+		      show_pwm_interpolate, store_pwm_interpolate, 0, 3),
+	SENSOR_ATTR_2(pwm4_auto_channels_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_channel,
+		      store_pwm_auto_point_channel, 0, 3),
+	SENSOR_ATTR_2(pwm4_auto_point1_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      0, 3),
+	SENSOR_ATTR_2(pwm4_auto_point2_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      1, 3),
+	SENSOR_ATTR_2(pwm4_auto_point3_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      2, 3),
+	SENSOR_ATTR_2(pwm4_auto_point4_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      3, 3),
+	SENSOR_ATTR_2(pwm4_auto_point5_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      4, 3),
+	SENSOR_ATTR_2(pwm4_auto_point1_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      0, 3),
+	SENSOR_ATTR_2(pwm4_auto_point2_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      1, 3),
+	SENSOR_ATTR_2(pwm4_auto_point3_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      2, 3),
+	SENSOR_ATTR_2(pwm4_auto_point4_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      3, 3),
+	SENSOR_ATTR_2(pwm4_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp_hyst,
+		      store_pwm_auto_point_temp_hyst,
+		      0, 3),
+	SENSOR_ATTR_2(pwm4_auto_point2_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 1, 3),
+	SENSOR_ATTR_2(pwm4_auto_point3_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 2, 3),
+	SENSOR_ATTR_2(pwm4_auto_point4_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 3, 3),
+};
+
+/* Fan / PWM attr for the f8000, zones mapped to temp instead of to pwm!
+   Also the register block at offset A0 maps to TEMP1 (so our temp2, as the
+   F8000 starts counting temps at 0), B0 maps the TEMP2 and C0 maps to TEMP0 */
+static struct sensor_device_attribute_2 f8000_fan_attr[] = {
+	SENSOR_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 0, 3),
+
+	SENSOR_ATTR_2(pwm3, S_IRUGO, show_pwm, NULL, 0, 2),
+
+	SENSOR_ATTR_2(temp1_auto_point1_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      0, 2),
+	SENSOR_ATTR_2(temp1_auto_point2_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      1, 2),
+	SENSOR_ATTR_2(temp1_auto_point3_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      2, 2),
+	SENSOR_ATTR_2(temp1_auto_point4_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      3, 2),
+	SENSOR_ATTR_2(temp1_auto_point5_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      4, 2),
+	SENSOR_ATTR_2(temp1_auto_point1_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      0, 2),
+	SENSOR_ATTR_2(temp1_auto_point2_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      1, 2),
+	SENSOR_ATTR_2(temp1_auto_point3_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      2, 2),
+	SENSOR_ATTR_2(temp1_auto_point4_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      3, 2),
+	SENSOR_ATTR_2(temp1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp_hyst,
+		      store_pwm_auto_point_temp_hyst,
+		      0, 2),
+	SENSOR_ATTR_2(temp1_auto_point2_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 1, 2),
+	SENSOR_ATTR_2(temp1_auto_point3_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 2, 2),
+	SENSOR_ATTR_2(temp1_auto_point4_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 3, 2),
+
+	SENSOR_ATTR_2(temp2_auto_point1_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      0, 0),
+	SENSOR_ATTR_2(temp2_auto_point2_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      1, 0),
+	SENSOR_ATTR_2(temp2_auto_point3_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      2, 0),
+	SENSOR_ATTR_2(temp2_auto_point4_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      3, 0),
+	SENSOR_ATTR_2(temp2_auto_point5_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      4, 0),
+	SENSOR_ATTR_2(temp2_auto_point1_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      0, 0),
+	SENSOR_ATTR_2(temp2_auto_point2_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      1, 0),
+	SENSOR_ATTR_2(temp2_auto_point3_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      2, 0),
+	SENSOR_ATTR_2(temp2_auto_point4_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      3, 0),
+	SENSOR_ATTR_2(temp2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp_hyst,
+		      store_pwm_auto_point_temp_hyst,
+		      0, 0),
+	SENSOR_ATTR_2(temp2_auto_point2_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 1, 0),
+	SENSOR_ATTR_2(temp2_auto_point3_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 2, 0),
+	SENSOR_ATTR_2(temp2_auto_point4_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 3, 0),
+
+	SENSOR_ATTR_2(temp3_auto_point1_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      0, 1),
+	SENSOR_ATTR_2(temp3_auto_point2_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      1, 1),
+	SENSOR_ATTR_2(temp3_auto_point3_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      2, 1),
+	SENSOR_ATTR_2(temp3_auto_point4_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      3, 1),
+	SENSOR_ATTR_2(temp3_auto_point5_pwm, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+		      4, 1),
+	SENSOR_ATTR_2(temp3_auto_point1_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      0, 1),
+	SENSOR_ATTR_2(temp3_auto_point2_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      1, 1),
+	SENSOR_ATTR_2(temp3_auto_point3_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      2, 1),
+	SENSOR_ATTR_2(temp3_auto_point4_temp, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+		      3, 1),
+	SENSOR_ATTR_2(temp3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+		      show_pwm_auto_point_temp_hyst,
+		      store_pwm_auto_point_temp_hyst,
+		      0, 1),
+	SENSOR_ATTR_2(temp3_auto_point2_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 1, 1),
+	SENSOR_ATTR_2(temp3_auto_point3_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 2, 1),
+	SENSOR_ATTR_2(temp3_auto_point4_temp_hyst, S_IRUGO,
+		      show_pwm_auto_point_temp_hyst, NULL, 3, 1),
+};
 
 /* Super I/O functions */
 static inline int superio_inb(int base, int reg)
@@ -299,11 +811,16 @@
 	outb(SIO_LOCK_KEY, base);
 }
 
-static inline u16 fan_from_reg(u16 reg)
+static inline int fan_from_reg(u16 reg)
 {
 	return reg ? (1500000 / reg) : 0;
 }
 
+static inline u16 fan_to_reg(int fan)
+{
+	return fan ? (1500000 / fan) : 0;
+}
+
 static u8 f71882fg_read8(struct f71882fg_data *data, u8 reg)
 {
 	u8 val;
@@ -332,52 +849,111 @@
 	outb(val, data->addr + DATA_REG_OFFSET);
 }
 
-static struct f71882fg_data *f71882fg_update_device(struct device * dev)
+static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val)
+{
+	outb(reg++, data->addr + ADDR_REG_OFFSET);
+	outb(val >> 8, data->addr + DATA_REG_OFFSET);
+	outb(reg, data->addr + ADDR_REG_OFFSET);
+	outb(val & 255, data->addr + DATA_REG_OFFSET);
+}
+
+static struct f71882fg_data *f71882fg_update_device(struct device *dev)
 {
 	struct f71882fg_data *data = dev_get_drvdata(dev);
-	int nr, reg, reg2;
+	int nr, reg = 0, reg2;
+	int nr_fans = (data->type == f71882fg) ? 4 : 3;
+	int nr_ins = (data->type == f8000) ? 3 : 9;
+	int temp_start = (data->type == f8000) ? 0 : 1;
 
 	mutex_lock(&data->update_lock);
 
 	/* Update once every 60 seconds */
 	if ( time_after(jiffies, data->last_limits + 60 * HZ ) ||
 			!data->valid) {
-		data->in1_max = f71882fg_read8(data, F71882FG_REG_IN1_HIGH);
-		data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
+		if (data->type == f71882fg) {
+			data->in1_max =
+				f71882fg_read8(data, F71882FG_REG_IN1_HIGH);
+			data->in_beep =
+				f71882fg_read8(data, F71882FG_REG_IN_BEEP);
+		}
 
 		/* Get High & boundary temps*/
-		for (nr = 0; nr < 3; nr++) {
+		for (nr = temp_start; nr < 3 + temp_start; nr++) {
 			data->temp_ovt[nr] = f71882fg_read8(data,
 						F71882FG_REG_TEMP_OVT(nr));
 			data->temp_high[nr] = f71882fg_read8(data,
 						F71882FG_REG_TEMP_HIGH(nr));
 		}
 
-		/* Have to hardcode hyst*/
-		data->temp_hyst[0] = f71882fg_read8(data,
-						F71882FG_REG_TEMP_HYST1) >> 4;
-		/* Hyst temps 2 & 3 stored in same register */
-		reg = f71882fg_read8(data, F71882FG_REG_TEMP_HYST23);
-		data->temp_hyst[1] = reg & 0x0F;
-		data->temp_hyst[2] = reg >> 4;
-
-		/* Have to hardcode type, because temp1 is special */
-		reg  = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
+		if (data->type != f8000) {
+			data->fan_beep = f71882fg_read8(data,
+						F71882FG_REG_FAN_BEEP);
+			data->temp_beep = f71882fg_read8(data,
+						F71882FG_REG_TEMP_BEEP);
+			data->temp_hyst[0] = f71882fg_read8(data,
+						F71882FG_REG_TEMP_HYST(0));
+			data->temp_hyst[1] = f71882fg_read8(data,
+						F71882FG_REG_TEMP_HYST(1));
+			/* Have to hardcode type, because temp1 is special */
+			reg  = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
+			data->temp_type[2] = (reg & 0x04) ? 2 : 4;
+			data->temp_type[3] = (reg & 0x08) ? 2 : 4;
+		}
 		reg2 = f71882fg_read8(data, F71882FG_REG_PECI);
 		if ((reg2 & 0x03) == 0x01)
-			data->temp_type[0] = 6 /* PECI */;
+			data->temp_type[1] = 6 /* PECI */;
 		else if ((reg2 & 0x03) == 0x02)
-			data->temp_type[0] = 5 /* AMDSI */;
+			data->temp_type[1] = 5 /* AMDSI */;
+		else if (data->type != f8000)
+			data->temp_type[1] = (reg & 0x02) ? 2 : 4;
 		else
-			data->temp_type[0] = (reg & 0x02) ? 2 : 4;
+			data->temp_type[1] = 2; /* F8000 only supports BJT */
 
-		data->temp_type[1] = (reg & 0x04) ? 2 : 4;
-		data->temp_type[2] = (reg & 0x08) ? 2 : 4;
+		data->pwm_enable = f71882fg_read8(data,
+						  F71882FG_REG_PWM_ENABLE);
+		data->pwm_auto_point_hyst[0] =
+			f71882fg_read8(data, F71882FG_REG_FAN_HYST(0));
+		data->pwm_auto_point_hyst[1] =
+			f71882fg_read8(data, F71882FG_REG_FAN_HYST(1));
 
-		data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP);
+		for (nr = 0; nr < nr_fans; nr++) {
+			data->pwm_auto_point_mapping[nr] =
+			    f71882fg_read8(data,
+					   F71882FG_REG_POINT_MAPPING(nr));
 
-		data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP);
-
+			if (data->type != f71862fg) {
+				int point;
+				for (point = 0; point < 5; point++) {
+					data->pwm_auto_point_pwm[nr][point] =
+						f71882fg_read8(data,
+							F71882FG_REG_POINT_PWM
+							(nr, point));
+				}
+				for (point = 0; point < 4; point++) {
+					data->pwm_auto_point_temp[nr][point] =
+						f71882fg_read8(data,
+							F71882FG_REG_POINT_TEMP
+							(nr, point));
+				}
+			} else {
+				data->pwm_auto_point_pwm[nr][1] =
+					f71882fg_read8(data,
+						F71882FG_REG_POINT_PWM
+						(nr, 1));
+				data->pwm_auto_point_pwm[nr][4] =
+					f71882fg_read8(data,
+						F71882FG_REG_POINT_PWM
+						(nr, 4));
+				data->pwm_auto_point_temp[nr][0] =
+					f71882fg_read8(data,
+						F71882FG_REG_POINT_TEMP
+						(nr, 0));
+				data->pwm_auto_point_temp[nr][3] =
+					f71882fg_read8(data,
+						F71882FG_REG_POINT_TEMP
+						(nr, 3));
+			}
+		}
 		data->last_limits = jiffies;
 	}
 
@@ -387,19 +963,32 @@
 						F71882FG_REG_TEMP_STATUS);
 		data->temp_diode_open = f71882fg_read8(data,
 						F71882FG_REG_TEMP_DIODE_OPEN);
-		for (nr = 0; nr < 3; nr++)
+		for (nr = temp_start; nr < 3 + temp_start; nr++)
 			data->temp[nr] = f71882fg_read8(data,
 						F71882FG_REG_TEMP(nr));
 
 		data->fan_status = f71882fg_read8(data,
 						F71882FG_REG_FAN_STATUS);
-		for (nr = 0; nr < 4; nr++)
+		for (nr = 0; nr < nr_fans; nr++) {
 			data->fan[nr] = f71882fg_read16(data,
 						F71882FG_REG_FAN(nr));
+			data->fan_target[nr] =
+			    f71882fg_read16(data, F71882FG_REG_FAN_TARGET(nr));
+			data->fan_full_speed[nr] =
+			    f71882fg_read16(data,
+					    F71882FG_REG_FAN_FULL_SPEED(nr));
+			data->pwm[nr] =
+			    f71882fg_read8(data, F71882FG_REG_PWM(nr));
+		}
 
-		data->in_status = f71882fg_read8(data,
+		/* The f8000 can monitor 1 more fan, but has no pwm for it */
+		if (data->type == f8000)
+			data->fan[3] = f71882fg_read16(data,
+						F71882FG_REG_FAN(3));
+		if (data->type == f71882fg)
+			data->in_status = f71882fg_read8(data,
 						F71882FG_REG_IN_STATUS);
-		for (nr = 0; nr < 9; nr++)
+		for (nr = 0; nr < nr_ins; nr++)
 			data->in[nr] = f71882fg_read8(data,
 						F71882FG_REG_IN(nr));
 
@@ -417,7 +1006,7 @@
 	char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 	int speed = fan_from_reg(data->fan[nr]);
 
 	if (speed == FAN_MIN_DETECT)
@@ -426,11 +1015,39 @@
 	return sprintf(buf, "%d\n", speed);
 }
 
+static ssize_t show_fan_full_speed(struct device *dev,
+				   struct device_attribute *devattr, char *buf)
+{
+	struct f71882fg_data *data = f71882fg_update_device(dev);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	int speed = fan_from_reg(data->fan_full_speed[nr]);
+	return sprintf(buf, "%d\n", speed);
+}
+
+static ssize_t store_fan_full_speed(struct device *dev,
+				    struct device_attribute *devattr,
+				    const char *buf, size_t count)
+{
+	struct f71882fg_data *data = dev_get_drvdata(dev);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	long val = simple_strtol(buf, NULL, 10);
+
+	val = SENSORS_LIMIT(val, 23, 1500000);
+	val = fan_to_reg(val);
+
+	mutex_lock(&data->update_lock);
+	f71882fg_write16(data, F71882FG_REG_FAN_FULL_SPEED(nr), val);
+	data->fan_full_speed[nr] = val;
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
 static ssize_t show_fan_beep(struct device *dev, struct device_attribute
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
 	if (data->fan_beep & (1 << nr))
 		return sprintf(buf, "1\n");
@@ -442,10 +1059,11 @@
 	*devattr, const char *buf, size_t count)
 {
 	struct f71882fg_data *data = dev_get_drvdata(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
-	int val = simple_strtoul(buf, NULL, 10);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
+	data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP);
 	if (val)
 		data->fan_beep |= 1 << nr;
 	else
@@ -461,7 +1079,7 @@
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
 	if (data->fan_status & (1 << nr))
 		return sprintf(buf, "1\n");
@@ -473,7 +1091,7 @@
 	char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
 	return sprintf(buf, "%d\n", data->in[nr] * 8);
 }
@@ -490,10 +1108,8 @@
 	*devattr, const char *buf, size_t count)
 {
 	struct f71882fg_data *data = dev_get_drvdata(dev);
-	int val = simple_strtoul(buf, NULL, 10) / 8;
-
-	if (val > 255)
-		val = 255;
+	long val = simple_strtol(buf, NULL, 10) / 8;
+	val = SENSORS_LIMIT(val, 0, 255);
 
 	mutex_lock(&data->update_lock);
 	f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
@@ -507,7 +1123,7 @@
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
 	if (data->in_beep & (1 << nr))
 		return sprintf(buf, "1\n");
@@ -519,10 +1135,11 @@
 	*devattr, const char *buf, size_t count)
 {
 	struct f71882fg_data *data = dev_get_drvdata(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
-	int val = simple_strtoul(buf, NULL, 10);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
+	data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
 	if (val)
 		data->in_beep |= 1 << nr;
 	else
@@ -538,7 +1155,7 @@
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
 	if (data->in_status & (1 << nr))
 		return sprintf(buf, "1\n");
@@ -550,7 +1167,7 @@
 	char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
 	return sprintf(buf, "%d\n", data->temp[nr] * 1000);
 }
@@ -559,7 +1176,7 @@
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
 	return sprintf(buf, "%d\n", data->temp_high[nr] * 1000);
 }
@@ -568,11 +1185,9 @@
 	*devattr, const char *buf, size_t count)
 {
 	struct f71882fg_data *data = dev_get_drvdata(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
-	int val = simple_strtoul(buf, NULL, 10) / 1000;
-
-	if (val > 255)
-		val = 255;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	long val = simple_strtol(buf, NULL, 10) / 1000;
+	val = SENSORS_LIMIT(val, 0, 255);
 
 	mutex_lock(&data->update_lock);
 	f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val);
@@ -586,48 +1201,46 @@
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	int temp_max_hyst;
 
-	return sprintf(buf, "%d\n",
-		(data->temp_high[nr] - data->temp_hyst[nr]) * 1000);
+	mutex_lock(&data->update_lock);
+	if (nr & 1)
+		temp_max_hyst = data->temp_hyst[nr / 2] >> 4;
+	else
+		temp_max_hyst = data->temp_hyst[nr / 2] & 0x0f;
+	temp_max_hyst = (data->temp_high[nr] - temp_max_hyst) * 1000;
+	mutex_unlock(&data->update_lock);
+
+	return sprintf(buf, "%d\n", temp_max_hyst);
 }
 
 static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
 	*devattr, const char *buf, size_t count)
 {
 	struct f71882fg_data *data = dev_get_drvdata(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
-	int val = simple_strtoul(buf, NULL, 10) / 1000;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	long val = simple_strtol(buf, NULL, 10) / 1000;
 	ssize_t ret = count;
+	u8 reg;
 
 	mutex_lock(&data->update_lock);
 
 	/* convert abs to relative and check */
+	data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr));
+	val = SENSORS_LIMIT(val, data->temp_high[nr] - 15,
+			    data->temp_high[nr]);
 	val = data->temp_high[nr] - val;
-	if (val < 0 || val > 15) {
-		ret = -EINVAL;
-		goto store_temp_max_hyst_exit;
-	}
-
-	data->temp_hyst[nr] = val;
 
 	/* convert value to register contents */
-	switch (nr) {
-		case 0:
-			val = val << 4;
-			break;
-		case 1:
-			val = val | (data->temp_hyst[2] << 4);
-			break;
-		case 2:
-			val = data->temp_hyst[1] | (val << 4);
-			break;
-	}
+	reg = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(nr / 2));
+	if (nr & 1)
+		reg = (reg & 0x0f) | (val << 4);
+	else
+		reg = (reg & 0xf0) | val;
+	f71882fg_write8(data, F71882FG_REG_TEMP_HYST(nr / 2), reg);
+	data->temp_hyst[nr / 2] = reg;
 
-	f71882fg_write8(data, nr ? F71882FG_REG_TEMP_HYST23 :
-		F71882FG_REG_TEMP_HYST1, val);
-
-store_temp_max_hyst_exit:
 	mutex_unlock(&data->update_lock);
 	return ret;
 }
@@ -636,7 +1249,7 @@
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
 	return sprintf(buf, "%d\n", data->temp_ovt[nr] * 1000);
 }
@@ -645,11 +1258,9 @@
 	*devattr, const char *buf, size_t count)
 {
 	struct f71882fg_data *data = dev_get_drvdata(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
-	int val = simple_strtoul(buf, NULL, 10) / 1000;
-
-	if (val > 255)
-		val = 255;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	long val = simple_strtol(buf, NULL, 10) / 1000;
+	val = SENSORS_LIMIT(val, 0, 255);
 
 	mutex_lock(&data->update_lock);
 	f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val);
@@ -663,17 +1274,25 @@
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	int temp_crit_hyst;
 
-	return sprintf(buf, "%d\n",
-		(data->temp_ovt[nr] - data->temp_hyst[nr]) * 1000);
+	mutex_lock(&data->update_lock);
+	if (nr & 1)
+		temp_crit_hyst = data->temp_hyst[nr / 2] >> 4;
+	else
+		temp_crit_hyst = data->temp_hyst[nr / 2] & 0x0f;
+	temp_crit_hyst = (data->temp_ovt[nr] - temp_crit_hyst) * 1000;
+	mutex_unlock(&data->update_lock);
+
+	return sprintf(buf, "%d\n", temp_crit_hyst);
 }
 
 static ssize_t show_temp_type(struct device *dev, struct device_attribute
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
 	return sprintf(buf, "%d\n", data->temp_type[nr]);
 }
@@ -682,9 +1301,9 @@
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
-	if (data->temp_beep & (1 << (nr + 1)))
+	if (data->temp_beep & (1 << nr))
 		return sprintf(buf, "1\n");
 	else
 		return sprintf(buf, "0\n");
@@ -694,14 +1313,15 @@
 	*devattr, const char *buf, size_t count)
 {
 	struct f71882fg_data *data = dev_get_drvdata(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
-	int val = simple_strtoul(buf, NULL, 10);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	unsigned long val = simple_strtoul(buf, NULL, 10);
 
 	mutex_lock(&data->update_lock);
+	data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP);
 	if (val)
-		data->temp_beep |= 1 << (nr + 1);
+		data->temp_beep |= 1 << nr;
 	else
-		data->temp_beep &= ~(1 << (nr + 1));
+		data->temp_beep &= ~(1 << nr);
 
 	f71882fg_write8(data, F71882FG_REG_TEMP_BEEP, data->temp_beep);
 	mutex_unlock(&data->update_lock);
@@ -713,9 +1333,9 @@
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
-	if (data->temp_status & (1 << (nr + 1)))
+	if (data->temp_status & (1 << nr))
 		return sprintf(buf, "1\n");
 	else
 		return sprintf(buf, "0\n");
@@ -725,113 +1345,528 @@
 	*devattr, char *buf)
 {
 	struct f71882fg_data *data = f71882fg_update_device(dev);
-	int nr = to_sensor_dev_attr(devattr)->index;
+	int nr = to_sensor_dev_attr_2(devattr)->index;
 
-	if (data->temp_diode_open & (1 << (nr + 1)))
+	if (data->temp_diode_open & (1 << nr))
 		return sprintf(buf, "1\n");
 	else
 		return sprintf(buf, "0\n");
 }
 
+static ssize_t show_pwm(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct f71882fg_data *data = f71882fg_update_device(dev);
+	int val, nr = to_sensor_dev_attr_2(devattr)->index;
+	mutex_lock(&data->update_lock);
+	if (data->pwm_enable & (1 << (2 * nr)))
+		/* PWM mode */
+		val = data->pwm[nr];
+	else {
+		/* RPM mode */
+		val = 255 * fan_from_reg(data->fan_target[nr])
+			/ fan_from_reg(data->fan_full_speed[nr]);
+	}
+	mutex_unlock(&data->update_lock);
+	return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+			 struct device_attribute *devattr, const char *buf,
+			 size_t count)
+{
+	struct f71882fg_data *data = dev_get_drvdata(dev);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	long val = simple_strtol(buf, NULL, 10);
+	val = SENSORS_LIMIT(val, 0, 255);
+
+	mutex_lock(&data->update_lock);
+	data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+	if ((data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 3) != 2) ||
+	    (data->type != f8000 && !((data->pwm_enable >> 2 * nr) & 2))) {
+		count = -EROFS;
+		goto leave;
+	}
+	if (data->pwm_enable & (1 << (2 * nr))) {
+		/* PWM mode */
+		f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
+		data->pwm[nr] = val;
+	} else {
+		/* RPM mode */
+		int target, full_speed;
+		full_speed = f71882fg_read16(data,
+					     F71882FG_REG_FAN_FULL_SPEED(nr));
+		target = fan_to_reg(val * fan_from_reg(full_speed) / 255);
+		f71882fg_write16(data, F71882FG_REG_FAN_TARGET(nr), target);
+		data->fan_target[nr] = target;
+		data->fan_full_speed[nr] = full_speed;
+	}
+leave:
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t show_pwm_enable(struct device *dev,
+			       struct device_attribute *devattr, char *buf)
+{
+	int result = 0;
+	struct f71882fg_data *data = f71882fg_update_device(dev);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+
+	switch ((data->pwm_enable >> 2 * nr) & 3) {
+	case 0:
+	case 1:
+		result = 2; /* Normal auto mode */
+		break;
+	case 2:
+		result = 1; /* Manual mode */
+		break;
+	case 3:
+		if (data->type == f8000)
+			result = 3; /* Thermostat mode */
+		else
+			result = 1; /* Manual mode */
+		break;
+	}
+
+	return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
+				*devattr, const char *buf, size_t count)
+{
+	struct f71882fg_data *data = dev_get_drvdata(dev);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	long val = simple_strtol(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+	/* Special case for F8000 auto PWM mode / Thermostat mode */
+	if (data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 1)) {
+		switch (val) {
+		case 2:
+			data->pwm_enable &= ~(2 << (2 * nr));
+			break;		/* Normal auto mode */
+		case 3:
+			data->pwm_enable |= 2 << (2 * nr);
+			break;		/* Thermostat mode */
+		default:
+			count = -EINVAL;
+			goto leave;
+		}
+	} else {
+		switch (val) {
+		case 1:
+			data->pwm_enable |= 2 << (2 * nr);
+			break;		/* Manual */
+		case 2:
+			data->pwm_enable &= ~(2 << (2 * nr));
+			break;		/* Normal auto mode */
+		default:
+			count = -EINVAL;
+			goto leave;
+		}
+	}
+	f71882fg_write8(data, F71882FG_REG_PWM_ENABLE, data->pwm_enable);
+leave:
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t show_pwm_auto_point_pwm(struct device *dev,
+				       struct device_attribute *devattr,
+				       char *buf)
+{
+	int result;
+	struct f71882fg_data *data = f71882fg_update_device(dev);
+	int pwm = to_sensor_dev_attr_2(devattr)->index;
+	int point = to_sensor_dev_attr_2(devattr)->nr;
+
+	mutex_lock(&data->update_lock);
+	if (data->pwm_enable & (1 << (2 * pwm))) {
+		/* PWM mode */
+		result = data->pwm_auto_point_pwm[pwm][point];
+	} else {
+		/* RPM mode */
+		result = 32 * 255 / (32 + data->pwm_auto_point_pwm[pwm][point]);
+	}
+	mutex_unlock(&data->update_lock);
+
+	return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_auto_point_pwm(struct device *dev,
+					struct device_attribute *devattr,
+					const char *buf, size_t count)
+{
+	struct f71882fg_data *data = dev_get_drvdata(dev);
+	int pwm = to_sensor_dev_attr_2(devattr)->index;
+	int point = to_sensor_dev_attr_2(devattr)->nr;
+	long val = simple_strtol(buf, NULL, 10);
+	val = SENSORS_LIMIT(val, 0, 255);
+
+	mutex_lock(&data->update_lock);
+	data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+	if (data->pwm_enable & (1 << (2 * pwm))) {
+		/* PWM mode */
+	} else {
+		/* RPM mode */
+		if (val < 29)	/* Prevent negative numbers */
+			val = 255;
+		else
+			val = (255 - val) * 32 / val;
+	}
+	f71882fg_write8(data, F71882FG_REG_POINT_PWM(pwm, point), val);
+	data->pwm_auto_point_pwm[pwm][point] = val;
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev,
+					     struct device_attribute *devattr,
+					     char *buf)
+{
+	int result = 0;
+	struct f71882fg_data *data = f71882fg_update_device(dev);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	int point = to_sensor_dev_attr_2(devattr)->nr;
+
+	mutex_lock(&data->update_lock);
+	if (nr & 1)
+		result = data->pwm_auto_point_hyst[nr / 2] >> 4;
+	else
+		result = data->pwm_auto_point_hyst[nr / 2] & 0x0f;
+	result = 1000 * (data->pwm_auto_point_temp[nr][point] - result);
+	mutex_unlock(&data->update_lock);
+
+	return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
+					      struct device_attribute *devattr,
+					      const char *buf, size_t count)
+{
+	struct f71882fg_data *data = dev_get_drvdata(dev);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	int point = to_sensor_dev_attr_2(devattr)->nr;
+	long val = simple_strtol(buf, NULL, 10) / 1000;
+	u8 reg;
+
+	mutex_lock(&data->update_lock);
+	data->pwm_auto_point_temp[nr][point] =
+		f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point));
+	val = SENSORS_LIMIT(val, data->pwm_auto_point_temp[nr][point] - 15,
+				data->pwm_auto_point_temp[nr][point]);
+	val = data->pwm_auto_point_temp[nr][point] - val;
+
+	reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2));
+	if (nr & 1)
+		reg = (reg & 0x0f) | (val << 4);
+	else
+		reg = (reg & 0xf0) | val;
+
+	f71882fg_write8(data, F71882FG_REG_FAN_HYST(nr / 2), reg);
+	data->pwm_auto_point_hyst[nr / 2] = reg;
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t show_pwm_interpolate(struct device *dev,
+				    struct device_attribute *devattr, char *buf)
+{
+	int result;
+	struct f71882fg_data *data = f71882fg_update_device(dev);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+
+	result = (data->pwm_auto_point_mapping[nr] >> 4) & 1;
+
+	return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_interpolate(struct device *dev,
+				     struct device_attribute *devattr,
+				     const char *buf, size_t count)
+{
+	struct f71882fg_data *data = dev_get_drvdata(dev);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	unsigned long val = simple_strtoul(buf, NULL, 10);
+
+	mutex_lock(&data->update_lock);
+	data->pwm_auto_point_mapping[nr] =
+		f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
+	if (val)
+		val = data->pwm_auto_point_mapping[nr] | (1 << 4);
+	else
+		val = data->pwm_auto_point_mapping[nr] & (~(1 << 4));
+	f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val);
+	data->pwm_auto_point_mapping[nr] = val;
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t show_pwm_auto_point_channel(struct device *dev,
+					   struct device_attribute *devattr,
+					   char *buf)
+{
+	int result;
+	struct f71882fg_data *data = f71882fg_update_device(dev);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	int temp_start = (data->type == f8000) ? 0 : 1;
+
+	result = 1 << ((data->pwm_auto_point_mapping[nr] & 3) - temp_start);
+
+	return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_auto_point_channel(struct device *dev,
+					    struct device_attribute *devattr,
+					    const char *buf, size_t count)
+{
+	struct f71882fg_data *data = dev_get_drvdata(dev);
+	int nr = to_sensor_dev_attr_2(devattr)->index;
+	int temp_start = (data->type == f8000) ? 0 : 1;
+	long val = simple_strtol(buf, NULL, 10);
+
+	switch (val) {
+	case 1:
+		val = 0;
+		break;
+	case 2:
+		val = 1;
+		break;
+	case 4:
+		val = 2;
+		break;
+	default:
+		return -EINVAL;
+	}
+	val += temp_start;
+	mutex_lock(&data->update_lock);
+	data->pwm_auto_point_mapping[nr] =
+		f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
+	val = (data->pwm_auto_point_mapping[nr] & 0xfc) | val;
+	f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val);
+	data->pwm_auto_point_mapping[nr] = val;
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t show_pwm_auto_point_temp(struct device *dev,
+					struct device_attribute *devattr,
+					char *buf)
+{
+	int result;
+	struct f71882fg_data *data = f71882fg_update_device(dev);
+	int pwm = to_sensor_dev_attr_2(devattr)->index;
+	int point = to_sensor_dev_attr_2(devattr)->nr;
+
+	result = data->pwm_auto_point_temp[pwm][point];
+	return sprintf(buf, "%d\n", 1000 * result);
+}
+
+static ssize_t store_pwm_auto_point_temp(struct device *dev,
+					 struct device_attribute *devattr,
+					 const char *buf, size_t count)
+{
+	struct f71882fg_data *data = dev_get_drvdata(dev);
+	int pwm = to_sensor_dev_attr_2(devattr)->index;
+	int point = to_sensor_dev_attr_2(devattr)->nr;
+	long val = simple_strtol(buf, NULL, 10) / 1000;
+	val = SENSORS_LIMIT(val, 0, 255);
+
+	mutex_lock(&data->update_lock);
+	f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val);
+	data->pwm_auto_point_temp[pwm][point] = val;
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
 static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
 	char *buf)
 {
-	return sprintf(buf, DRVNAME "\n");
+	struct f71882fg_data *data = dev_get_drvdata(dev);
+	return sprintf(buf, "%s\n", f71882fg_names[data->type]);
 }
 
+static int __devinit f71882fg_create_sysfs_files(struct platform_device *pdev,
+	struct sensor_device_attribute_2 *attr, int count)
+{
+	int err, i;
 
-static int __devinit f71882fg_probe(struct platform_device * pdev)
+	for (i = 0; i < count; i++) {
+		err = device_create_file(&pdev->dev, &attr[i].dev_attr);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+static int __devinit f71882fg_probe(struct platform_device *pdev)
 {
 	struct f71882fg_data *data;
-	int err, i;
+	struct f71882fg_sio_data *sio_data = pdev->dev.platform_data;
+	int err, i, nr_fans = (sio_data->type == f71882fg) ? 4 : 3;
 	u8 start_reg;
 
-	if (!(data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL)))
+	data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL);
+	if (!data)
 		return -ENOMEM;
 
 	data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
+	data->type = sio_data->type;
 	mutex_init(&data->update_lock);
 	platform_set_drvdata(pdev, data);
 
+	start_reg = f71882fg_read8(data, F71882FG_REG_START);
+	if (start_reg & 0x04) {
+		dev_warn(&pdev->dev, "Hardware monitor is powered down\n");
+		err = -ENODEV;
+		goto exit_free;
+	}
+	if (!(start_reg & 0x03)) {
+		dev_warn(&pdev->dev, "Hardware monitoring not activated\n");
+		err = -ENODEV;
+		goto exit_free;
+	}
+
+	data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+	/* If it is a 71862 and the fan / pwm part is enabled sanity check
+	   the pwm settings */
+	if (data->type == f71862fg && (start_reg & 0x02)) {
+		if ((data->pwm_enable & 0x15) != 0x15) {
+			dev_err(&pdev->dev,
+				"Invalid (reserved) pwm settings: 0x%02x\n",
+				(unsigned int)data->pwm_enable);
+			err = -ENODEV;
+			goto exit_free;
+		}
+	}
+
 	/* Register sysfs interface files */
-	for (i = 0; i < ARRAY_SIZE(f71882fg_dev_attr); i++) {
-		err = device_create_file(&pdev->dev, &f71882fg_dev_attr[i]);
+	err = device_create_file(&pdev->dev, &dev_attr_name);
+	if (err)
+		goto exit_unregister_sysfs;
+
+	if (start_reg & 0x01) {
+		switch (data->type) {
+		case f71882fg:
+			err = f71882fg_create_sysfs_files(pdev,
+					f71882fg_in_temp_attr,
+					ARRAY_SIZE(f71882fg_in_temp_attr));
+			if (err)
+				goto exit_unregister_sysfs;
+			/* fall through! */
+		case f71862fg:
+			err = f71882fg_create_sysfs_files(pdev,
+					f718x2fg_in_temp_attr,
+					ARRAY_SIZE(f718x2fg_in_temp_attr));
+			break;
+		case f8000:
+			err = f71882fg_create_sysfs_files(pdev,
+					f8000_in_temp_attr,
+					ARRAY_SIZE(f8000_in_temp_attr));
+			break;
+		}
 		if (err)
 			goto exit_unregister_sysfs;
 	}
 
-	start_reg = f71882fg_read8(data, F71882FG_REG_START);
-	if (start_reg & 0x01) {
-		for (i = 0; i < ARRAY_SIZE(f71882fg_in_temp_attr); i++) {
-			err = device_create_file(&pdev->dev,
-					&f71882fg_in_temp_attr[i].dev_attr);
-			if (err)
-				goto exit_unregister_sysfs;
-		}
-	}
-
 	if (start_reg & 0x02) {
-		for (i = 0; i < ARRAY_SIZE(f71882fg_fan_attr); i++) {
-			err = device_create_file(&pdev->dev,
-					&f71882fg_fan_attr[i].dev_attr);
-			if (err)
-				goto exit_unregister_sysfs;
+		err = f71882fg_create_sysfs_files(pdev, fxxxx_fan_attr,
+					ARRAY_SIZE(fxxxx_fan_attr));
+		if (err)
+			goto exit_unregister_sysfs;
+
+		switch (data->type) {
+		case f71862fg:
+			err = f71882fg_create_sysfs_files(pdev,
+					f71862fg_fan_attr,
+					ARRAY_SIZE(f71862fg_fan_attr));
+			break;
+		case f71882fg:
+			err = f71882fg_create_sysfs_files(pdev,
+					f71882fg_fan_attr,
+					ARRAY_SIZE(f71882fg_fan_attr));
+			break;
+		case f8000:
+			err = f71882fg_create_sysfs_files(pdev,
+					f8000_fan_attr,
+					ARRAY_SIZE(f8000_fan_attr));
+			break;
 		}
+		if (err)
+			goto exit_unregister_sysfs;
+
+		for (i = 0; i < nr_fans; i++)
+			dev_info(&pdev->dev, "Fan: %d is in %s mode\n", i + 1,
+				 (data->pwm_enable & (1 << 2 * i)) ?
+				 "duty-cycle" : "RPM");
 	}
 
 	data->hwmon_dev = hwmon_device_register(&pdev->dev);
 	if (IS_ERR(data->hwmon_dev)) {
 		err = PTR_ERR(data->hwmon_dev);
+		data->hwmon_dev = NULL;
 		goto exit_unregister_sysfs;
 	}
 
 	return 0;
 
 exit_unregister_sysfs:
-	for (i = 0; i < ARRAY_SIZE(f71882fg_dev_attr); i++)
-		device_remove_file(&pdev->dev, &f71882fg_dev_attr[i]);
-
-	for (i = 0; i < ARRAY_SIZE(f71882fg_in_temp_attr); i++)
-		device_remove_file(&pdev->dev,
-					&f71882fg_in_temp_attr[i].dev_attr);
-
-	for (i = 0; i < ARRAY_SIZE(f71882fg_fan_attr); i++)
-		device_remove_file(&pdev->dev, &f71882fg_fan_attr[i].dev_attr);
-
+	f71882fg_remove(pdev); /* Will unregister the sysfs files for us */
+	return err; /* f71882fg_remove() also frees our data */
+exit_free:
 	kfree(data);
-
 	return err;
 }
 
-static int __devexit f71882fg_remove(struct platform_device *pdev)
+static int f71882fg_remove(struct platform_device *pdev)
 {
 	int i;
 	struct f71882fg_data *data = platform_get_drvdata(pdev);
 
 	platform_set_drvdata(pdev, NULL);
-	hwmon_device_unregister(data->hwmon_dev);
+	if (data->hwmon_dev)
+		hwmon_device_unregister(data->hwmon_dev);
 
-	for (i = 0; i < ARRAY_SIZE(f71882fg_dev_attr); i++)
-		device_remove_file(&pdev->dev, &f71882fg_dev_attr[i]);
+	/* Note we are not looping over all attr arrays we have as the ones
+	   below are supersets of the ones skipped. */
+	device_remove_file(&pdev->dev, &dev_attr_name);
+
+	for (i = 0; i < ARRAY_SIZE(f718x2fg_in_temp_attr); i++)
+		device_remove_file(&pdev->dev,
+					&f718x2fg_in_temp_attr[i].dev_attr);
 
 	for (i = 0; i < ARRAY_SIZE(f71882fg_in_temp_attr); i++)
 		device_remove_file(&pdev->dev,
 					&f71882fg_in_temp_attr[i].dev_attr);
 
+	for (i = 0; i < ARRAY_SIZE(fxxxx_fan_attr); i++)
+		device_remove_file(&pdev->dev, &fxxxx_fan_attr[i].dev_attr);
+
 	for (i = 0; i < ARRAY_SIZE(f71882fg_fan_attr); i++)
 		device_remove_file(&pdev->dev, &f71882fg_fan_attr[i].dev_attr);
 
+	for (i = 0; i < ARRAY_SIZE(f8000_fan_attr); i++)
+		device_remove_file(&pdev->dev, &f8000_fan_attr[i].dev_attr);
+
 	kfree(data);
 
 	return 0;
 }
 
-static int __init f71882fg_find(int sioaddr, unsigned short *address)
+static int __init f71882fg_find(int sioaddr, unsigned short *address,
+	struct f71882fg_sio_data *sio_data)
 {
 	int err = -ENODEV;
 	u16 devid;
-	u8 start_reg;
-	struct f71882fg_data data;
 
 	superio_enter(sioaddr);
 
@@ -842,7 +1877,17 @@
 	}
 
 	devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
-	if (devid != SIO_F71882_ID) {
+	switch (devid) {
+	case SIO_F71862_ID:
+		sio_data->type = f71862fg;
+		break;
+	case SIO_F71882_ID:
+		sio_data->type = f71882fg;
+		break;
+	case SIO_F8000_ID:
+		sio_data->type = f8000;
+		break;
+	default:
 		printk(KERN_INFO DRVNAME ": Unsupported Fintek device\n");
 		goto exit;
 	}
@@ -861,24 +1906,17 @@
 	}
 	*address &= ~(REGION_LENGTH - 1);	/* Ignore 3 LSB */
 
-	data.addr = *address;
-	start_reg = f71882fg_read8(&data, F71882FG_REG_START);
-	if (!(start_reg & 0x03)) {
-		printk(KERN_WARNING DRVNAME
-			": Hardware monitoring not activated\n");
-		goto exit;
-	}
-
 	err = 0;
-	printk(KERN_INFO DRVNAME ": Found F71882FG chip at %#x, revision %d\n",
-		(unsigned int)*address,
+	printk(KERN_INFO DRVNAME ": Found %s chip at %#x, revision %d\n",
+		f71882fg_names[sio_data->type],	(unsigned int)*address,
 		(int)superio_inb(sioaddr, SIO_REG_DEVREV));
 exit:
 	superio_exit(sioaddr);
 	return err;
 }
 
-static int __init f71882fg_device_add(unsigned short address)
+static int __init f71882fg_device_add(unsigned short address,
+	const struct f71882fg_sio_data *sio_data)
 {
 	struct resource res = {
 		.start	= address,
@@ -892,12 +1930,23 @@
 		return -ENOMEM;
 
 	res.name = f71882fg_pdev->name;
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		return err;
+
 	err = platform_device_add_resources(f71882fg_pdev, &res, 1);
 	if (err) {
 		printk(KERN_ERR DRVNAME ": Device resource addition failed\n");
 		goto exit_device_put;
 	}
 
+	err = platform_device_add_data(f71882fg_pdev, sio_data,
+				       sizeof(struct f71882fg_sio_data));
+	if (err) {
+		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		goto exit_device_put;
+	}
+
 	err = platform_device_add(f71882fg_pdev);
 	if (err) {
 		printk(KERN_ERR DRVNAME ": Device addition failed\n");
@@ -916,14 +1965,20 @@
 {
 	int err = -ENODEV;
 	unsigned short address;
+	struct f71882fg_sio_data sio_data;
 
-	if (f71882fg_find(0x2e, &address) && f71882fg_find(0x4e, &address))
+	memset(&sio_data, 0, sizeof(sio_data));
+
+	if (f71882fg_find(0x2e, &address, &sio_data) &&
+	    f71882fg_find(0x4e, &address, &sio_data))
 		goto exit;
 
-	if ((err = platform_driver_register(&f71882fg_driver)))
+	err = platform_driver_register(&f71882fg_driver);
+	if (err)
 		goto exit;
 
-	if ((err = f71882fg_device_add(address)))
+	err = f71882fg_device_add(address, &sio_data);
+	if (err)
 		goto exit_driver;
 
 	return 0;
@@ -941,7 +1996,7 @@
 }
 
 MODULE_DESCRIPTION("F71882FG Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans Edgington (hans@edgington.nl)");
+MODULE_AUTHOR("Hans Edgington, Hans de Goede (hdegoede@redhat.com)");
 MODULE_LICENSE("GPL");
 
 module_init(f71882fg_init);
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 9671703..d07f4ef 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -1,6 +1,6 @@
 /* fschmd.c
  *
- * Copyright (C) 2007 Hans de Goede <j.w.r.degoede@hhs.nl>
+ * Copyright (C) 2007,2008 Hans de Goede <hdegoede@redhat.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -42,11 +42,20 @@
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
 #include <linux/dmi.h>
+#include <linux/fs.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/kref.h>
 
 /* Addresses to scan */
 static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
 
 /* Insmod parameters */
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+	__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 I2C_CLIENT_INSMOD_5(fscpos, fscher, fscscy, fschrc, fschmd);
 
 /*
@@ -63,19 +72,26 @@
 #define FSCHMD_REG_EVENT_STATE		0x04
 #define FSCHMD_REG_CONTROL		0x05
 
-#define FSCHMD_CONTROL_ALERT_LED_MASK	0x01
+#define FSCHMD_CONTROL_ALERT_LED	0x01
 
-/* watchdog (support to be implemented) */
+/* watchdog */
 #define FSCHMD_REG_WDOG_PRESET		0x28
 #define FSCHMD_REG_WDOG_STATE		0x23
 #define FSCHMD_REG_WDOG_CONTROL		0x21
 
+#define FSCHMD_WDOG_CONTROL_TRIGGER	0x10
+#define FSCHMD_WDOG_CONTROL_STARTED	0x10 /* the same as trigger */
+#define FSCHMD_WDOG_CONTROL_STOP	0x20
+#define FSCHMD_WDOG_CONTROL_RESOLUTION	0x40
+
+#define FSCHMD_WDOG_STATE_CARDRESET	0x02
+
 /* voltages, weird order is to keep the same order as the old drivers */
 static const u8 FSCHMD_REG_VOLT[3] = { 0x45, 0x42, 0x48 };
 
 /* minimum pwm at which the fan is driven (pwm can by increased depending on
    the temp. Notice that for the scy some fans share there minimum speed.
-   Also notice that with the scy the sensor order is different then with the
+   Also notice that with the scy the sensor order is different than with the
    other chips, this order was in the 2.4 driver and kept for consistency. */
 static const u8 FSCHMD_REG_FAN_MIN[5][6] = {
 	{ 0x55, 0x65 },					/* pos */
@@ -115,8 +131,8 @@
 static const int FSCHMD_NO_FAN_SENSORS[5] = { 3, 3, 6, 4, 5 };
 
 /* Fan status register bitmasks */
-#define FSCHMD_FAN_ALARM_MASK		0x04 /* called fault by FSC! */
-#define FSCHMD_FAN_NOT_PRESENT_MASK	0x08 /* not documented */
+#define FSCHMD_FAN_ALARM	0x04 /* called fault by FSC! */
+#define FSCHMD_FAN_NOT_PRESENT	0x08 /* not documented */
 
 
 /* actual temperature registers */
@@ -158,14 +174,11 @@
 static const int FSCHMD_NO_TEMP_SENSORS[5] = { 3, 3, 4, 3, 5 };
 
 /* temp status register bitmasks */
-#define FSCHMD_TEMP_WORKING_MASK	0x01
-#define FSCHMD_TEMP_ALERT_MASK		0x02
+#define FSCHMD_TEMP_WORKING	0x01
+#define FSCHMD_TEMP_ALERT	0x02
 /* there only really is an alarm if the sensor is working and alert == 1 */
 #define FSCHMD_TEMP_ALARM_MASK \
-	(FSCHMD_TEMP_WORKING_MASK | FSCHMD_TEMP_ALERT_MASK)
-
-/* our driver name */
-#define FSCHMD_NAME "fschmd"
+	(FSCHMD_TEMP_WORKING | FSCHMD_TEMP_ALERT)
 
 /*
  * Functions declarations
@@ -195,7 +208,7 @@
 static struct i2c_driver fschmd_driver = {
 	.class		= I2C_CLASS_HWMON,
 	.driver = {
-		.name	= FSCHMD_NAME,
+		.name	= "fschmd",
 	},
 	.probe		= fschmd_probe,
 	.remove		= fschmd_remove,
@@ -209,14 +222,26 @@
  */
 
 struct fschmd_data {
+	struct i2c_client *client;
 	struct device *hwmon_dev;
 	struct mutex update_lock;
+	struct mutex watchdog_lock;
+	struct list_head list; /* member of the watchdog_data_list */
+	struct kref kref;
+	struct miscdevice watchdog_miscdev;
 	int kind;
+	unsigned long watchdog_is_open;
+	char watchdog_expect_close;
+	char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
 	char valid; /* zero until following fields are valid */
 	unsigned long last_updated; /* in jiffies */
 
 	/* register values */
+	u8 revision;            /* chip revision */
 	u8 global_control;	/* global control register */
+	u8 watchdog_control;    /* watchdog control register */
+	u8 watchdog_state;      /* watchdog status register */
+	u8 watchdog_preset;     /* watchdog counter preset on trigger val */
 	u8 volt[3];		/* 12, 5, battery voltage */
 	u8 temp_act[5];		/* temperature */
 	u8 temp_status[5];	/* status of sensor */
@@ -228,11 +253,28 @@
 };
 
 /* Global variables to hold information read from special DMI tables, which are
-   available on FSC machines with an fscher or later chip. */
+   available on FSC machines with an fscher or later chip. There is no need to
+   protect these with a lock as they are only modified from our attach function
+   which always gets called with the i2c-core lock held and never accessed
+   before the attach function is done with them. */
 static int dmi_mult[3] = { 490, 200, 100 };
 static int dmi_offset[3] = { 0, 0, 0 };
 static int dmi_vref = -1;
 
+/* Somewhat ugly :( global data pointer list with all fschmd devices, so that
+   we can find our device data as when using misc_register there is no other
+   method to get to ones device data from the open fop. */
+static LIST_HEAD(watchdog_data_list);
+/* Note this lock not only protect list access, but also data.kref access */
+static DEFINE_MUTEX(watchdog_data_mutex);
+
+/* Release our data struct when we're detached from the i2c client *and* all
+   references to our watchdog device are released */
+static void fschmd_release_resources(struct kref *ref)
+{
+	struct fschmd_data *data = container_of(ref, struct fschmd_data, kref);
+	kfree(data);
+}
 
 /*
  * Sysfs attr show / store functions
@@ -300,7 +342,7 @@
 	struct fschmd_data *data = fschmd_update_device(dev);
 
 	/* bit 0 set means sensor working ok, so no fault! */
-	if (data->temp_status[index] & FSCHMD_TEMP_WORKING_MASK)
+	if (data->temp_status[index] & FSCHMD_TEMP_WORKING)
 		return sprintf(buf, "0\n");
 	else
 		return sprintf(buf, "1\n");
@@ -385,7 +427,7 @@
 	int index = to_sensor_dev_attr(devattr)->index;
 	struct fschmd_data *data = fschmd_update_device(dev);
 
-	if (data->fan_status[index] & FSCHMD_FAN_ALARM_MASK)
+	if (data->fan_status[index] & FSCHMD_FAN_ALARM)
 		return sprintf(buf, "1\n");
 	else
 		return sprintf(buf, "0\n");
@@ -397,7 +439,7 @@
 	int index = to_sensor_dev_attr(devattr)->index;
 	struct fschmd_data *data = fschmd_update_device(dev);
 
-	if (data->fan_status[index] & FSCHMD_FAN_NOT_PRESENT_MASK)
+	if (data->fan_status[index] & FSCHMD_FAN_NOT_PRESENT)
 		return sprintf(buf, "1\n");
 	else
 		return sprintf(buf, "0\n");
@@ -449,7 +491,7 @@
 {
 	struct fschmd_data *data = fschmd_update_device(dev);
 
-	if (data->global_control & FSCHMD_CONTROL_ALERT_LED_MASK)
+	if (data->global_control & FSCHMD_CONTROL_ALERT_LED)
 		return sprintf(buf, "1\n");
 	else
 		return sprintf(buf, "0\n");
@@ -467,9 +509,9 @@
 	reg = i2c_smbus_read_byte_data(to_i2c_client(dev), FSCHMD_REG_CONTROL);
 
 	if (v)
-		reg |= FSCHMD_CONTROL_ALERT_LED_MASK;
+		reg |= FSCHMD_CONTROL_ALERT_LED;
 	else
-		reg &= ~FSCHMD_CONTROL_ALERT_LED_MASK;
+		reg &= ~FSCHMD_CONTROL_ALERT_LED;
 
 	i2c_smbus_write_byte_data(to_i2c_client(dev), FSCHMD_REG_CONTROL, reg);
 
@@ -551,7 +593,265 @@
 
 
 /*
- * Real code
+ * Watchdog routines
+ */
+
+static int watchdog_set_timeout(struct fschmd_data *data, int timeout)
+{
+	int ret, resolution;
+	int kind = data->kind + 1; /* 0-x array index -> 1-x module param */
+
+	/* 2 second or 60 second resolution? */
+	if (timeout <= 510 || kind == fscpos || kind == fscscy)
+		resolution = 2;
+	else
+		resolution = 60;
+
+	if (timeout < resolution || timeout > (resolution * 255))
+		return -EINVAL;
+
+	mutex_lock(&data->watchdog_lock);
+	if (!data->client) {
+		ret = -ENODEV;
+		goto leave;
+	}
+
+	if (resolution == 2)
+		data->watchdog_control &= ~FSCHMD_WDOG_CONTROL_RESOLUTION;
+	else
+		data->watchdog_control |= FSCHMD_WDOG_CONTROL_RESOLUTION;
+
+	data->watchdog_preset = DIV_ROUND_UP(timeout, resolution);
+
+	/* Write new timeout value */
+	i2c_smbus_write_byte_data(data->client, FSCHMD_REG_WDOG_PRESET,
+		data->watchdog_preset);
+	/* Write new control register, do not trigger! */
+	i2c_smbus_write_byte_data(data->client, FSCHMD_REG_WDOG_CONTROL,
+		data->watchdog_control & ~FSCHMD_WDOG_CONTROL_TRIGGER);
+
+	ret = data->watchdog_preset * resolution;
+
+leave:
+	mutex_unlock(&data->watchdog_lock);
+	return ret;
+}
+
+static int watchdog_get_timeout(struct fschmd_data *data)
+{
+	int timeout;
+
+	mutex_lock(&data->watchdog_lock);
+	if (data->watchdog_control & FSCHMD_WDOG_CONTROL_RESOLUTION)
+		timeout = data->watchdog_preset * 60;
+	else
+		timeout = data->watchdog_preset * 2;
+	mutex_unlock(&data->watchdog_lock);
+
+	return timeout;
+}
+
+static int watchdog_trigger(struct fschmd_data *data)
+{
+	int ret = 0;
+
+	mutex_lock(&data->watchdog_lock);
+	if (!data->client) {
+		ret = -ENODEV;
+		goto leave;
+	}
+
+	data->watchdog_control |= FSCHMD_WDOG_CONTROL_TRIGGER;
+	i2c_smbus_write_byte_data(data->client, FSCHMD_REG_WDOG_CONTROL,
+					data->watchdog_control);
+leave:
+	mutex_unlock(&data->watchdog_lock);
+	return ret;
+}
+
+static int watchdog_stop(struct fschmd_data *data)
+{
+	int ret = 0;
+
+	mutex_lock(&data->watchdog_lock);
+	if (!data->client) {
+		ret = -ENODEV;
+		goto leave;
+	}
+
+	data->watchdog_control &= ~FSCHMD_WDOG_CONTROL_STARTED;
+	/* Don't store the stop flag in our watchdog control register copy, as
+	   its a write only bit (read always returns 0) */
+	i2c_smbus_write_byte_data(data->client, FSCHMD_REG_WDOG_CONTROL,
+		data->watchdog_control | FSCHMD_WDOG_CONTROL_STOP);
+leave:
+	mutex_unlock(&data->watchdog_lock);
+	return ret;
+}
+
+static int watchdog_open(struct inode *inode, struct file *filp)
+{
+	struct fschmd_data *pos, *data = NULL;
+
+	/* We get called from drivers/char/misc.c with misc_mtx hold, and we
+	   call misc_register() from fschmd_probe() with watchdog_data_mutex
+	   hold, as misc_register() takes the misc_mtx lock, this is a possible
+	   deadlock, so we use mutex_trylock here. */
+	if (!mutex_trylock(&watchdog_data_mutex))
+		return -ERESTARTSYS;
+	list_for_each_entry(pos, &watchdog_data_list, list) {
+		if (pos->watchdog_miscdev.minor == iminor(inode)) {
+			data = pos;
+			break;
+		}
+	}
+	/* Note we can never not have found data, so we don't check for this */
+	kref_get(&data->kref);
+	mutex_unlock(&watchdog_data_mutex);
+
+	if (test_and_set_bit(0, &data->watchdog_is_open))
+		return -EBUSY;
+
+	/* Start the watchdog */
+	watchdog_trigger(data);
+	filp->private_data = data;
+
+	return nonseekable_open(inode, filp);
+}
+
+static int watchdog_release(struct inode *inode, struct file *filp)
+{
+	struct fschmd_data *data = filp->private_data;
+
+	if (data->watchdog_expect_close) {
+		watchdog_stop(data);
+		data->watchdog_expect_close = 0;
+	} else {
+		watchdog_trigger(data);
+		dev_crit(&data->client->dev,
+			"unexpected close, not stopping watchdog!\n");
+	}
+
+	clear_bit(0, &data->watchdog_is_open);
+
+	mutex_lock(&watchdog_data_mutex);
+	kref_put(&data->kref, fschmd_release_resources);
+	mutex_unlock(&watchdog_data_mutex);
+
+	return 0;
+}
+
+static ssize_t watchdog_write(struct file *filp, const char __user *buf,
+	size_t count, loff_t *offset)
+{
+	size_t ret;
+	struct fschmd_data *data = filp->private_data;
+
+	if (count) {
+		if (!nowayout) {
+			size_t i;
+
+			/* Clear it in case it was set with a previous write */
+			data->watchdog_expect_close = 0;
+
+			for (i = 0; i != count; i++) {
+				char c;
+				if (get_user(c, buf + i))
+					return -EFAULT;
+				if (c == 'V')
+					data->watchdog_expect_close = 1;
+			}
+		}
+		ret = watchdog_trigger(data);
+		if (ret < 0)
+			return ret;
+	}
+	return count;
+}
+
+static int watchdog_ioctl(struct inode *inode, struct file *filp,
+	unsigned int cmd, unsigned long arg)
+{
+	static struct watchdog_info ident = {
+		.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
+				WDIOF_CARDRESET,
+		.identity = "FSC watchdog"
+	};
+	int i, ret = 0;
+	struct fschmd_data *data = filp->private_data;
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		ident.firmware_version = data->revision;
+		if (!nowayout)
+			ident.options |= WDIOF_MAGICCLOSE;
+		if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
+			ret = -EFAULT;
+		break;
+
+	case WDIOC_GETSTATUS:
+		ret = put_user(0, (int __user *)arg);
+		break;
+
+	case WDIOC_GETBOOTSTATUS:
+		if (data->watchdog_state & FSCHMD_WDOG_STATE_CARDRESET)
+			ret = put_user(WDIOF_CARDRESET, (int __user *)arg);
+		else
+			ret = put_user(0, (int __user *)arg);
+		break;
+
+	case WDIOC_KEEPALIVE:
+		ret = watchdog_trigger(data);
+		break;
+
+	case WDIOC_GETTIMEOUT:
+		i = watchdog_get_timeout(data);
+		ret = put_user(i, (int __user *)arg);
+		break;
+
+	case WDIOC_SETTIMEOUT:
+		if (get_user(i, (int __user *)arg)) {
+			ret = -EFAULT;
+			break;
+		}
+		ret = watchdog_set_timeout(data, i);
+		if (ret > 0)
+			ret = put_user(ret, (int __user *)arg);
+		break;
+
+	case WDIOC_SETOPTIONS:
+		if (get_user(i, (int __user *)arg)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		if (i & WDIOS_DISABLECARD)
+			ret = watchdog_stop(data);
+		else if (i & WDIOS_ENABLECARD)
+			ret = watchdog_trigger(data);
+		else
+			ret = -EINVAL;
+
+		break;
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+static struct file_operations watchdog_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.open = watchdog_open,
+	.release = watchdog_release,
+	.write = watchdog_write,
+	.ioctl = watchdog_ioctl,
+};
+
+
+/*
+ * Detect, register, unregister and update device functions
  */
 
 /* DMI decode routine to read voltage scaling factors from special DMI tables,
@@ -661,9 +961,9 @@
 			const struct i2c_device_id *id)
 {
 	struct fschmd_data *data;
-	u8 revision;
 	const char * const names[5] = { "Poseidon", "Hermes", "Scylla",
 					"Heracles", "Heimdall" };
+	const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
 	int i, err;
 	enum chips kind = id->driver_data;
 
@@ -673,6 +973,13 @@
 
 	i2c_set_clientdata(client, data);
 	mutex_init(&data->update_lock);
+	mutex_init(&data->watchdog_lock);
+	INIT_LIST_HEAD(&data->list);
+	kref_init(&data->kref);
+	/* Store client pointer in our data struct for watchdog usage
+	   (where the client is found through a data ptr instead of the
+	   otherway around) */
+	data->client = client;
 
 	if (kind == fscpos) {
 		/* The Poseidon has hardwired temp limits, fill these
@@ -683,16 +990,27 @@
 	}
 
 	/* Read the special DMI table for fscher and newer chips */
-	if (kind == fscher || kind >= fschrc) {
+	if ((kind == fscher || kind >= fschrc) && dmi_vref == -1) {
 		dmi_walk(fschmd_dmi_decode);
 		if (dmi_vref == -1) {
-			printk(KERN_WARNING FSCHMD_NAME
-				": Couldn't get voltage scaling factors from "
+			dev_warn(&client->dev,
+				"Couldn't get voltage scaling factors from "
 				"BIOS DMI table, using builtin defaults\n");
 			dmi_vref = 33;
 		}
 	}
 
+	/* Read in some never changing registers */
+	data->revision = i2c_smbus_read_byte_data(client, FSCHMD_REG_REVISION);
+	data->global_control = i2c_smbus_read_byte_data(client,
+					FSCHMD_REG_CONTROL);
+	data->watchdog_control = i2c_smbus_read_byte_data(client,
+					FSCHMD_REG_WDOG_CONTROL);
+	data->watchdog_state = i2c_smbus_read_byte_data(client,
+					FSCHMD_REG_WDOG_STATE);
+	data->watchdog_preset = i2c_smbus_read_byte_data(client,
+					FSCHMD_REG_WDOG_PRESET);
+
 	/* i2c kind goes from 1-5, we want from 0-4 to address arrays */
 	data->kind = kind - 1;
 
@@ -735,9 +1053,43 @@
 		goto exit_detach;
 	}
 
-	revision = i2c_smbus_read_byte_data(client, FSCHMD_REG_REVISION);
-	printk(KERN_INFO FSCHMD_NAME ": Detected FSC %s chip, revision: %d\n",
-		names[data->kind], (int) revision);
+	/* We take the data_mutex lock early so that watchdog_open() cannot
+	   run when misc_register() has completed, but we've not yet added
+	   our data to the watchdog_data_list (and set the default timeout) */
+	mutex_lock(&watchdog_data_mutex);
+	for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
+		/* Register our watchdog part */
+		snprintf(data->watchdog_name, sizeof(data->watchdog_name),
+			"watchdog%c", (i == 0) ? '\0' : ('0' + i));
+		data->watchdog_miscdev.name = data->watchdog_name;
+		data->watchdog_miscdev.fops = &watchdog_fops;
+		data->watchdog_miscdev.minor = watchdog_minors[i];
+		err = misc_register(&data->watchdog_miscdev);
+		if (err == -EBUSY)
+			continue;
+		if (err) {
+			data->watchdog_miscdev.minor = 0;
+			dev_err(&client->dev,
+				"Registering watchdog chardev: %d\n", err);
+			break;
+		}
+
+		list_add(&data->list, &watchdog_data_list);
+		watchdog_set_timeout(data, 60);
+		dev_info(&client->dev,
+			"Registered watchdog chardev major 10, minor: %d\n",
+			watchdog_minors[i]);
+		break;
+	}
+	if (i == ARRAY_SIZE(watchdog_minors)) {
+		data->watchdog_miscdev.minor = 0;
+		dev_warn(&client->dev, "Couldn't register watchdog chardev "
+			"(due to no free minor)\n");
+	}
+	mutex_unlock(&watchdog_data_mutex);
+
+	dev_info(&client->dev, "Detected FSC %s chip, revision: %d\n",
+		names[data->kind], (int) data->revision);
 
 	return 0;
 
@@ -751,6 +1103,24 @@
 	struct fschmd_data *data = i2c_get_clientdata(client);
 	int i;
 
+	/* Unregister the watchdog (if registered) */
+	if (data->watchdog_miscdev.minor) {
+		misc_deregister(&data->watchdog_miscdev);
+		if (data->watchdog_is_open) {
+			dev_warn(&client->dev,
+				"i2c client detached with watchdog open! "
+				"Stopping watchdog.\n");
+			watchdog_stop(data);
+		}
+		mutex_lock(&watchdog_data_mutex);
+		list_del(&data->list);
+		mutex_unlock(&watchdog_data_mutex);
+		/* Tell the watchdog code the client is gone */
+		mutex_lock(&data->watchdog_lock);
+		data->client = NULL;
+		mutex_unlock(&data->watchdog_lock);
+	}
+
 	/* Check if registered in case we're called from fschmd_detect
 	   to cleanup after an error */
 	if (data->hwmon_dev)
@@ -765,7 +1135,10 @@
 		device_remove_file(&client->dev,
 					&fschmd_fan_attr[i].dev_attr);
 
-	kfree(data);
+	mutex_lock(&watchdog_data_mutex);
+	kref_put(&data->kref, fschmd_release_resources);
+	mutex_unlock(&watchdog_data_mutex);
+
 	return 0;
 }
 
@@ -798,7 +1171,7 @@
 					data->temp_act[i] < data->temp_max[i])
 				i2c_smbus_write_byte_data(client,
 					FSCHMD_REG_TEMP_STATE[data->kind][i],
-					FSCHMD_TEMP_ALERT_MASK);
+					FSCHMD_TEMP_ALERT);
 		}
 
 		for (i = 0; i < FSCHMD_NO_FAN_SENSORS[data->kind]; i++) {
@@ -816,28 +1189,17 @@
 					FSCHMD_REG_FAN_MIN[data->kind][i]);
 
 			/* reset fan status if speed is back to > 0 */
-			if ((data->fan_status[i] & FSCHMD_FAN_ALARM_MASK) &&
+			if ((data->fan_status[i] & FSCHMD_FAN_ALARM) &&
 					data->fan_act[i])
 				i2c_smbus_write_byte_data(client,
 					FSCHMD_REG_FAN_STATE[data->kind][i],
-					FSCHMD_FAN_ALARM_MASK);
+					FSCHMD_FAN_ALARM);
 		}
 
 		for (i = 0; i < 3; i++)
 			data->volt[i] = i2c_smbus_read_byte_data(client,
 						FSCHMD_REG_VOLT[i]);
 
-		data->global_control = i2c_smbus_read_byte_data(client,
-						FSCHMD_REG_CONTROL);
-
-		/* To be implemented in the future
-		data->watchdog[0] = i2c_smbus_read_byte_data(client,
-						FSCHMD_REG_WDOG_PRESET);
-		data->watchdog[1] = i2c_smbus_read_byte_data(client,
-						FSCHMD_REG_WDOG_STATE);
-		data->watchdog[2] = i2c_smbus_read_byte_data(client,
-						FSCHMD_REG_WDOG_CONTROL); */
-
 		data->last_updated = jiffies;
 		data->valid = 1;
 	}
@@ -857,7 +1219,7 @@
 	i2c_del_driver(&fschmd_driver);
 }
 
-MODULE_AUTHOR("Hans de Goede <j.w.r.degoede@hhs.nl>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
 MODULE_DESCRIPTION("FSC Poseidon, Hermes, Scylla, Heracles and "
 			"Heimdall driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
new file mode 100644
index 0000000..0370524
--- /dev/null
+++ b/drivers/hwmon/hp_accel.c
@@ -0,0 +1,334 @@
+/*
+ *  hp_accel.c - Interface between LIS3LV02DL driver and HP ACPI BIOS
+ *
+ *  Copyright (C) 2007-2008 Yan Burman
+ *  Copyright (C) 2008 Eric Piel
+ *  Copyright (C) 2008-2009 Pavel Machek
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/freezer.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <linux/leds.h>
+#include <acpi/acpi_drivers.h>
+#include <asm/atomic.h>
+#include "lis3lv02d.h"
+
+#define DRIVER_NAME     "lis3lv02d"
+#define ACPI_MDPS_CLASS "accelerometer"
+
+/* Delayed LEDs infrastructure ------------------------------------ */
+
+/* Special LED class that can defer work */
+struct delayed_led_classdev {
+	struct led_classdev led_classdev;
+	struct work_struct work;
+	enum led_brightness new_brightness;
+
+	unsigned int led;		/* For driver */
+	void (*set_brightness)(struct delayed_led_classdev *data, enum led_brightness value);
+};
+
+static inline void delayed_set_status_worker(struct work_struct *work)
+{
+	struct delayed_led_classdev *data =
+			container_of(work, struct delayed_led_classdev, work);
+
+	data->set_brightness(data, data->new_brightness);
+}
+
+static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
+			      enum led_brightness brightness)
+{
+	struct delayed_led_classdev *data = container_of(led_cdev,
+			     struct delayed_led_classdev, led_classdev);
+	data->new_brightness = brightness;
+	schedule_work(&data->work);
+}
+
+/* HP-specific accelerometer driver ------------------------------------ */
+
+/* For automatic insertion of the module */
+static struct acpi_device_id lis3lv02d_device_ids[] = {
+	{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
+	{"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
+
+
+/**
+ * lis3lv02d_acpi_init - ACPI _INI method: initialize the device.
+ * @handle: the handle of the device
+ *
+ * Returns AE_OK on success.
+ */
+acpi_status lis3lv02d_acpi_init(acpi_handle handle)
+{
+	return acpi_evaluate_object(handle, METHOD_NAME__INI, NULL, NULL);
+}
+
+/**
+ * lis3lv02d_acpi_read - ACPI ALRD method: read a register
+ * @handle: the handle of the device
+ * @reg:    the register to read
+ * @ret:    result of the operation
+ *
+ * Returns AE_OK on success.
+ */
+acpi_status lis3lv02d_acpi_read(acpi_handle handle, int reg, u8 *ret)
+{
+	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+	struct acpi_object_list args = { 1, &arg0 };
+	unsigned long long lret;
+	acpi_status status;
+
+	arg0.integer.value = reg;
+
+	status = acpi_evaluate_integer(handle, "ALRD", &args, &lret);
+	*ret = lret;
+	return status;
+}
+
+/**
+ * lis3lv02d_acpi_write - ACPI ALWR method: write to a register
+ * @handle: the handle of the device
+ * @reg:    the register to write to
+ * @val:    the value to write
+ *
+ * Returns AE_OK on success.
+ */
+acpi_status lis3lv02d_acpi_write(acpi_handle handle, int reg, u8 val)
+{
+	unsigned long long ret; /* Not used when writting */
+	union acpi_object in_obj[2];
+	struct acpi_object_list args = { 2, in_obj };
+
+	in_obj[0].type          = ACPI_TYPE_INTEGER;
+	in_obj[0].integer.value = reg;
+	in_obj[1].type          = ACPI_TYPE_INTEGER;
+	in_obj[1].integer.value = val;
+
+	return acpi_evaluate_integer(handle, "ALWR", &args, &ret);
+}
+
+static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
+{
+	adev.ac = *((struct axis_conversion *)dmi->driver_data);
+	printk(KERN_INFO DRIVER_NAME ": hardware type %s found.\n", dmi->ident);
+
+	return 1;
+}
+
+/* Represents, for each axis seen by userspace, the corresponding hw axis (+1).
+ * If the value is negative, the opposite of the hw value is used. */
+static struct axis_conversion lis3lv02d_axis_normal = {1, 2, 3};
+static struct axis_conversion lis3lv02d_axis_y_inverted = {1, -2, 3};
+static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3};
+static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3};
+static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3};
+static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3};
+
+#define AXIS_DMI_MATCH(_ident, _name, _axis) {		\
+	.ident = _ident,				\
+	.callback = lis3lv02d_dmi_matched,		\
+	.matches = {					\
+		DMI_MATCH(DMI_PRODUCT_NAME, _name)	\
+	},						\
+	.driver_data = &lis3lv02d_axis_##_axis		\
+}
+static struct dmi_system_id lis3lv02d_dmi_ids[] = {
+	/* product names are truncated to match all kinds of a same model */
+	AXIS_DMI_MATCH("NC64x0", "HP Compaq nc64", x_inverted),
+	AXIS_DMI_MATCH("NC84x0", "HP Compaq nc84", z_inverted),
+	AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted),
+	AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted),
+	AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
+	AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
+	AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
+	{ NULL, }
+/* Laptop models without axis info (yet):
+ * "NC651xx" "HP Compaq 651"
+ * "NC671xx" "HP Compaq 671"
+ * "NC6910" "HP Compaq 6910"
+ * HP Compaq 8710x Notebook PC / Mobile Workstation
+ * "NC2400" "HP Compaq nc2400"
+ * "NX74x0" "HP Compaq nx74"
+ * "NX6325" "HP Compaq nx6325"
+ * "NC4400" "HP Compaq nc4400"
+ */
+};
+
+static void hpled_set(struct delayed_led_classdev *led_cdev, enum led_brightness value)
+{
+	acpi_handle handle = adev.device->handle;
+	unsigned long long ret; /* Not used when writing */
+	union acpi_object in_obj[1];
+	struct acpi_object_list args = { 1, in_obj };
+
+	in_obj[0].type          = ACPI_TYPE_INTEGER;
+	in_obj[0].integer.value = !!value;
+
+	acpi_evaluate_integer(handle, "ALED", &args, &ret);
+}
+
+static struct delayed_led_classdev hpled_led = {
+	.led_classdev = {
+		.name			= "hp::hddprotect",
+		.default_trigger	= "none",
+		.brightness_set		= delayed_sysfs_set,
+		.flags                  = LED_CORE_SUSPENDRESUME,
+	},
+	.set_brightness = hpled_set,
+};
+
+static int lis3lv02d_add(struct acpi_device *device)
+{
+	u8 val;
+	int ret;
+
+	if (!device)
+		return -EINVAL;
+
+	adev.device = device;
+	adev.init = lis3lv02d_acpi_init;
+	adev.read = lis3lv02d_acpi_read;
+	adev.write = lis3lv02d_acpi_write;
+	strcpy(acpi_device_name(device), DRIVER_NAME);
+	strcpy(acpi_device_class(device), ACPI_MDPS_CLASS);
+	device->driver_data = &adev;
+
+	lis3lv02d_acpi_read(device->handle, WHO_AM_I, &val);
+	if ((val != LIS3LV02DL_ID) && (val != LIS302DL_ID)) {
+		printk(KERN_ERR DRIVER_NAME
+				": Accelerometer chip not LIS3LV02D{L,Q}\n");
+	}
+
+	/* If possible use a "standard" axes order */
+	if (dmi_check_system(lis3lv02d_dmi_ids) == 0) {
+		printk(KERN_INFO DRIVER_NAME ": laptop model unknown, "
+				 "using default axes configuration\n");
+		adev.ac = lis3lv02d_axis_normal;
+	}
+
+	INIT_WORK(&hpled_led.work, delayed_set_status_worker);
+	ret = led_classdev_register(NULL, &hpled_led.led_classdev);
+	if (ret)
+		return ret;
+
+	ret = lis3lv02d_init_device(&adev);
+	if (ret) {
+		flush_work(&hpled_led.work);
+		led_classdev_unregister(&hpled_led.led_classdev);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int lis3lv02d_remove(struct acpi_device *device, int type)
+{
+	if (!device)
+		return -EINVAL;
+
+	lis3lv02d_joystick_disable();
+	lis3lv02d_poweroff(device->handle);
+
+	flush_work(&hpled_led.work);
+	led_classdev_unregister(&hpled_led.led_classdev);
+
+	return lis3lv02d_remove_fs();
+}
+
+
+#ifdef CONFIG_PM
+static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
+{
+	/* make sure the device is off when we suspend */
+	lis3lv02d_poweroff(device->handle);
+	return 0;
+}
+
+static int lis3lv02d_resume(struct acpi_device *device)
+{
+	/* put back the device in the right state (ACPI might turn it on) */
+	mutex_lock(&adev.lock);
+	if (adev.usage > 0)
+		lis3lv02d_poweron(device->handle);
+	else
+		lis3lv02d_poweroff(device->handle);
+	mutex_unlock(&adev.lock);
+	return 0;
+}
+#else
+#define lis3lv02d_suspend NULL
+#define lis3lv02d_resume NULL
+#endif
+
+/* For the HP MDPS aka 3D Driveguard */
+static struct acpi_driver lis3lv02d_driver = {
+	.name  = DRIVER_NAME,
+	.class = ACPI_MDPS_CLASS,
+	.ids   = lis3lv02d_device_ids,
+	.ops = {
+		.add     = lis3lv02d_add,
+		.remove  = lis3lv02d_remove,
+		.suspend = lis3lv02d_suspend,
+		.resume  = lis3lv02d_resume,
+	}
+};
+
+static int __init lis3lv02d_init_module(void)
+{
+	int ret;
+
+	if (acpi_disabled)
+		return -ENODEV;
+
+	ret = acpi_bus_register_driver(&lis3lv02d_driver);
+	if (ret < 0)
+		return ret;
+
+	printk(KERN_INFO DRIVER_NAME " driver loaded.\n");
+
+	return 0;
+}
+
+static void __exit lis3lv02d_exit_module(void)
+{
+	acpi_bus_unregister_driver(&lis3lv02d_driver);
+}
+
+MODULE_DESCRIPTION("Glue between LIS3LV02Dx and HP ACPI BIOS and support for disk protection LED.");
+MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
+MODULE_LICENSE("GPL");
+
+module_init(lis3lv02d_init_module);
+module_exit(lis3lv02d_exit_module);
+
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index 2ede938..27d7f72 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -490,6 +490,13 @@
 	0
 };
 
+static struct pci_device_id i5k_amb_ids[] __devinitdata = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, i5k_amb_ids);
+
 static int __devinit i5k_amb_probe(struct platform_device *pdev)
 {
 	struct i5k_amb_data *data;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index b74c957..95a99c5 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -14,6 +14,7 @@
               IT8712F  Super I/O chip w/LPC interface
               IT8716F  Super I/O chip w/LPC interface
               IT8718F  Super I/O chip w/LPC interface
+              IT8720F  Super I/O chip w/LPC interface
               IT8726F  Super I/O chip w/LPC interface
               Sis950   A clone of the IT8705F
 
@@ -48,11 +49,12 @@
 #include <linux/sysfs.h>
 #include <linux/string.h>
 #include <linux/dmi.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 
 #define DRVNAME "it87"
 
-enum chips { it87, it8712, it8716, it8718 };
+enum chips { it87, it8712, it8716, it8718, it8720 };
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
@@ -64,7 +66,10 @@
 #define	DEV	0x07	/* Register: Logical device select */
 #define	VAL	0x2f	/* The value to read/write */
 #define PME	0x04	/* The device with the fan registers in it */
-#define GPIO	0x07	/* The device with the IT8718F VID value in it */
+
+/* The device with the IT8718F/IT8720F VID value in it */
+#define GPIO	0x07
+
 #define	DEVID	0x20	/* Register: Device ID */
 #define	DEVREV	0x22	/* Register: Device Revision */
 
@@ -113,6 +118,7 @@
 #define IT8705F_DEVID 0x8705
 #define IT8716F_DEVID 0x8716
 #define IT8718F_DEVID 0x8718
+#define IT8720F_DEVID 0x8720
 #define IT8726F_DEVID 0x8726
 #define IT87_ACT_REG  0x30
 #define IT87_BASE_REG 0x60
@@ -150,8 +156,8 @@
 #define IT87_REG_ALARM2        0x02
 #define IT87_REG_ALARM3        0x03
 
-/* The IT8718F has the VID value in a different register, in Super-I/O
-   configuration space. */
+/* The IT8718F and IT8720F have the VID value in a different register, in
+   Super-I/O configuration space. */
 #define IT87_REG_VID           0x0a
 /* The IT8705F and IT8712F earlier than revision 0x08 use register 0x0b
    for fan divisors. Later IT8712F revisions must use 16-bit tachometer
@@ -282,7 +288,8 @@
 	return (data->type == it87 && data->revision >= 0x03)
 	    || (data->type == it8712 && data->revision >= 0x08)
 	    || data->type == it8716
-	    || data->type == it8718;
+	    || data->type == it8718
+	    || data->type == it8720;
 }
 
 static int it87_probe(struct platform_device *pdev);
@@ -992,6 +999,9 @@
 	case IT8718F_DEVID:
 		sio_data->type = it8718;
 		break;
+	case IT8720F_DEVID:
+		sio_data->type = it8720;
+		break;
 	case 0xffff:	/* No device at all */
 		goto exit;
 	default:
@@ -1022,7 +1032,8 @@
 		int reg;
 
 		superio_select(GPIO);
-		if (chip_type == it8718)
+		if ((chip_type == it8718) ||
+		    (chip_type == it8720))
 			sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
 
 		reg = superio_inb(IT87_SIO_PINX2_REG);
@@ -1068,6 +1079,7 @@
 		"it8712",
 		"it8716",
 		"it8718",
+		"it8720",
 	};
 
 	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1226,7 +1238,7 @@
 	}
 
 	if (data->type == it8712 || data->type == it8716
-	 || data->type == it8718) {
+	 || data->type == it8718 || data->type == it8720) {
 		data->vrm = vid_which_vrm();
 		/* VID reading from Super-I/O config space if available */
 		data->vid = sio_data->vid_value;
@@ -1374,7 +1386,7 @@
 			it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
 	}
 
-	/* Check if temperature channnels are reset manually or by some reason */
+	/* Check if temperature channels are reset manually or by some reason */
 	tmp = it87_read_value(data, IT87_REG_TEMP_ENABLE);
 	if ((tmp & 0x3f) == 0) {
 		/* Temp1,Temp3=thermistor; Temp2=thermal diode */
@@ -1513,7 +1525,8 @@
 
 		data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
 		/* The 8705 does not have VID capability.
-		   The 8718 does not use IT87_REG_VID for the same purpose. */
+		   The 8718 and the 8720 don't use IT87_REG_VID for the
+		   same purpose. */
 		if (data->type == it8712 || data->type == it8716) {
 			data->vid = it87_read_value(data, IT87_REG_VID);
 			/* The older IT8712F revisions had only 5 VID pins,
@@ -1540,6 +1553,10 @@
 	};
 	int err;
 
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto exit;
+
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
@@ -1608,7 +1625,7 @@
 
 MODULE_AUTHOR("Chris Gauthron, "
 	      "Jean Delvare <khali@linux-fr.org>");
-MODULE_DESCRIPTION("IT8705F/8712F/8716F/8718F/8726F, SiS950 driver");
+MODULE_DESCRIPTION("IT8705F/8712F/8716F/8718F/8720F/8726F, SiS950 driver");
 module_param(update_vbat, bool, 0);
 MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
 module_param(fix_pwm_polarity, bool, 0);
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index bd2bde0..1fe9951 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -31,6 +31,7 @@
 #include <linux/hwmon-sysfs.h>
 #include <linux/err.h>
 #include <linux/mutex.h>
+#include <asm/processor.h>
 
 #define TEMP_FROM_REG(val)	(((((val) >> 16) & 0xff) - 49) * 1000)
 #define REG_TEMP	0xe4
@@ -47,6 +48,8 @@
 	/* registers values */
 	u8 sensorsp;		/* sensor presence bits - SEL_CORE & SEL_PLACE */
 	u32 temp[2][2];		/* core, place */
+	u8 swap_core_select;    /* meaning of SEL_CORE is inverted */
+	u32 temp_offset;
 };
 
 static struct k8temp_data *k8temp_update_device(struct device *dev)
@@ -114,10 +117,15 @@
 	    to_sensor_dev_attr_2(devattr);
 	int core = attr->nr;
 	int place = attr->index;
+	int temp;
 	struct k8temp_data *data = k8temp_update_device(dev);
 
-	return sprintf(buf, "%d\n",
-		       TEMP_FROM_REG(data->temp[core][place]));
+	if (data->swap_core_select)
+		core = core ? 0 : 1;
+
+	temp = TEMP_FROM_REG(data->temp[core][place]) + data->temp_offset;
+
+	return sprintf(buf, "%d\n", temp);
 }
 
 /* core, place */
@@ -141,20 +149,49 @@
 	int err;
 	u8 scfg;
 	u32 temp;
+	u8 model, stepping;
 	struct k8temp_data *data;
-	u32 cpuid = cpuid_eax(1);
-
-	/* this feature should be available since SH-C0 core */
-	if ((cpuid == 0xf40) || (cpuid == 0xf50) || (cpuid == 0xf51)) {
-		err = -ENODEV;
-		goto exit;
-	}
 
 	if (!(data = kzalloc(sizeof(struct k8temp_data), GFP_KERNEL))) {
 		err = -ENOMEM;
 		goto exit;
 	}
 
+	model = boot_cpu_data.x86_model;
+	stepping = boot_cpu_data.x86_mask;
+
+	switch (boot_cpu_data.x86) {
+	case 0xf:
+		/* feature available since SH-C0, exclude older revisions */
+		if (((model == 4) && (stepping == 0)) ||
+		    ((model == 5) && (stepping <= 1))) {
+			err = -ENODEV;
+			goto exit_free;
+		}
+
+		/*
+		 * AMD NPT family 0fh, i.e. RevF and RevG:
+		 * meaning of SEL_CORE bit is inverted
+		 */
+		if (model >= 0x40) {
+			data->swap_core_select = 1;
+			dev_warn(&pdev->dev, "Temperature readouts might be "
+				 "wrong - check erratum #141\n");
+		}
+
+		if ((model >= 0x69) &&
+		    !(model == 0xc1 || model == 0x6c || model == 0x7c)) {
+			/*
+			 * RevG desktop CPUs (i.e. no socket S1G1 parts)
+			 * need additional offset, otherwise reported
+			 * temperature is below ambient temperature
+			 */
+			data->temp_offset = 21000;
+		}
+
+		break;
+	}
+
 	pci_read_config_byte(pdev, REG_TEMP, &scfg);
 	scfg &= ~(SEL_PLACE | SEL_CORE);		/* Select sensor 0, core0 */
 	pci_write_config_byte(pdev, REG_TEMP, scfg);
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index c002144..219d2d0 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 2007-2008 Yan Burman
  *  Copyright (C) 2008 Eric Piel
+ *  Copyright (C) 2008 Pavel Machek
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -39,7 +40,6 @@
 #include "lis3lv02d.h"
 
 #define DRIVER_NAME     "lis3lv02d"
-#define ACPI_MDPS_CLASS "accelerometer"
 
 /* joystick device poll interval in milliseconds */
 #define MDPS_POLL_INTERVAL 50
@@ -55,100 +55,17 @@
 /* Maximum value our axis may get for the input device (signed 12 bits) */
 #define MDPS_MAX_VAL 2048
 
-struct axis_conversion {
-	s8	x;
-	s8	y;
-	s8	z;
-};
+struct acpi_lis3lv02d adev;
+EXPORT_SYMBOL_GPL(adev);
 
-struct acpi_lis3lv02d {
-	struct acpi_device	*device;   /* The ACPI device */
-	struct input_dev	*idev;     /* input device */
-	struct task_struct	*kthread;  /* kthread for input */
-	struct mutex            lock;
-	struct platform_device	*pdev;     /* platform device */
-	atomic_t		count;     /* interrupt count after last read */
-	int			xcalib;    /* calibrated null value for x */
-	int			ycalib;    /* calibrated null value for y */
-	int			zcalib;    /* calibrated null value for z */
-	unsigned char		is_on;     /* whether the device is on or off */
-	unsigned char		usage;     /* usage counter */
-	struct axis_conversion	ac;        /* hw -> logical axis */
-};
-
-static struct acpi_lis3lv02d adev;
-
-static int lis3lv02d_remove_fs(void);
 static int lis3lv02d_add_fs(struct acpi_device *device);
 
-/* For automatic insertion of the module */
-static struct acpi_device_id lis3lv02d_device_ids[] = {
-	{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
-	{"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
-
-/**
- * lis3lv02d_acpi_init - ACPI _INI method: initialize the device.
- * @handle: the handle of the device
- *
- * Returns AE_OK on success.
- */
-static inline acpi_status lis3lv02d_acpi_init(acpi_handle handle)
-{
-	return acpi_evaluate_object(handle, METHOD_NAME__INI, NULL, NULL);
-}
-
-/**
- * lis3lv02d_acpi_read - ACPI ALRD method: read a register
- * @handle: the handle of the device
- * @reg:    the register to read
- * @ret:    result of the operation
- *
- * Returns AE_OK on success.
- */
-static acpi_status lis3lv02d_acpi_read(acpi_handle handle, int reg, u8 *ret)
-{
-	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-	struct acpi_object_list args = { 1, &arg0 };
-	unsigned long long lret;
-	acpi_status status;
-
-	arg0.integer.value = reg;
-
-	status = acpi_evaluate_integer(handle, "ALRD", &args, &lret);
-	*ret = lret;
-	return status;
-}
-
-/**
- * lis3lv02d_acpi_write - ACPI ALWR method: write to a register
- * @handle: the handle of the device
- * @reg:    the register to write to
- * @val:    the value to write
- *
- * Returns AE_OK on success.
- */
-static acpi_status lis3lv02d_acpi_write(acpi_handle handle, int reg, u8 val)
-{
-	unsigned long long ret; /* Not used when writting */
-	union acpi_object in_obj[2];
-	struct acpi_object_list args = { 2, in_obj };
-
-	in_obj[0].type          = ACPI_TYPE_INTEGER;
-	in_obj[0].integer.value = reg;
-	in_obj[1].type          = ACPI_TYPE_INTEGER;
-	in_obj[1].integer.value = val;
-
-	return acpi_evaluate_integer(handle, "ALWR", &args, &ret);
-}
-
 static s16 lis3lv02d_read_16(acpi_handle handle, int reg)
 {
 	u8 lo, hi;
 
-	lis3lv02d_acpi_read(handle, reg, &lo);
-	lis3lv02d_acpi_read(handle, reg + 1, &hi);
+	adev.read(handle, reg, &lo);
+	adev.read(handle, reg + 1, &hi);
 	/* In "12 bit right justified" mode, bit 6, bit 7, bit 8 = bit 5 */
 	return (s16)((hi << 8) | lo);
 }
@@ -190,54 +107,31 @@
 	*z = lis3lv02d_get_axis(adev.ac.z, position);
 }
 
-static inline void lis3lv02d_poweroff(acpi_handle handle)
+void lis3lv02d_poweroff(acpi_handle handle)
 {
 	adev.is_on = 0;
 	/* disable X,Y,Z axis and power down */
-	lis3lv02d_acpi_write(handle, CTRL_REG1, 0x00);
+	adev.write(handle, CTRL_REG1, 0x00);
 }
+EXPORT_SYMBOL_GPL(lis3lv02d_poweroff);
 
-static void lis3lv02d_poweron(acpi_handle handle)
+void lis3lv02d_poweron(acpi_handle handle)
 {
 	u8 val;
 
 	adev.is_on = 1;
-	lis3lv02d_acpi_init(handle);
-	lis3lv02d_acpi_write(handle, FF_WU_CFG, 0);
+	adev.init(handle);
+	adev.write(handle, FF_WU_CFG, 0);
 	/*
 	 * BDU: LSB and MSB values are not updated until both have been read.
 	 *      So the value read will always be correct.
 	 * IEN: Interrupt for free-fall and DD, not for data-ready.
 	 */
-	lis3lv02d_acpi_read(handle, CTRL_REG2, &val);
+	adev.read(handle, CTRL_REG2, &val);
 	val |= CTRL2_BDU | CTRL2_IEN;
-	lis3lv02d_acpi_write(handle, CTRL_REG2, val);
+	adev.write(handle, CTRL_REG2, val);
 }
-
-#ifdef CONFIG_PM
-static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
-{
-	/* make sure the device is off when we suspend */
-	lis3lv02d_poweroff(device->handle);
-	return 0;
-}
-
-static int lis3lv02d_resume(struct acpi_device *device)
-{
-	/* put back the device in the right state (ACPI might turn it on) */
-	mutex_lock(&adev.lock);
-	if (adev.usage > 0)
-		lis3lv02d_poweron(device->handle);
-	else
-		lis3lv02d_poweroff(device->handle);
-	mutex_unlock(&adev.lock);
-	return 0;
-}
-#else
-#define lis3lv02d_suspend NULL
-#define lis3lv02d_resume NULL
-#endif
-
+EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
 
 /*
  * To be called before starting to use the device. It makes sure that the
@@ -315,7 +209,7 @@
 	lis3lv02d_get_xyz(adev.device->handle, &adev.xcalib, &adev.ycalib, &adev.zcalib);
 }
 
-static int lis3lv02d_joystick_enable(void)
+int lis3lv02d_joystick_enable(void)
 {
 	int err;
 
@@ -349,8 +243,9 @@
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
 
-static void lis3lv02d_joystick_disable(void)
+void lis3lv02d_joystick_disable(void)
 {
 	if (!adev.idev)
 		return;
@@ -358,13 +253,13 @@
 	input_unregister_device(adev.idev);
 	adev.idev = NULL;
 }
-
+EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
 
 /*
  * Initialise the accelerometer and the various subsystems.
  * Should be rather independant of the bus system.
  */
-static int lis3lv02d_init_device(struct acpi_lis3lv02d *dev)
+int lis3lv02d_init_device(struct acpi_lis3lv02d *dev)
 {
 	mutex_init(&dev->lock);
 	lis3lv02d_add_fs(dev->device);
@@ -376,93 +271,7 @@
 	lis3lv02d_decrease_use(dev);
 	return 0;
 }
-
-static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
-{
-	adev.ac = *((struct axis_conversion *)dmi->driver_data);
-	printk(KERN_INFO DRIVER_NAME ": hardware type %s found.\n", dmi->ident);
-
-	return 1;
-}
-
-/* Represents, for each axis seen by userspace, the corresponding hw axis (+1).
- * If the value is negative, the opposite of the hw value is used. */
-static struct axis_conversion lis3lv02d_axis_normal = {1, 2, 3};
-static struct axis_conversion lis3lv02d_axis_y_inverted = {1, -2, 3};
-static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3};
-static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3};
-static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3};
-static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3};
-
-#define AXIS_DMI_MATCH(_ident, _name, _axis) {		\
-	.ident = _ident,				\
-	.callback = lis3lv02d_dmi_matched,		\
-	.matches = {					\
-		DMI_MATCH(DMI_PRODUCT_NAME, _name)	\
-	},						\
-	.driver_data = &lis3lv02d_axis_##_axis		\
-}
-static struct dmi_system_id lis3lv02d_dmi_ids[] = {
-	/* product names are truncated to match all kinds of a same model */
-	AXIS_DMI_MATCH("NC64x0", "HP Compaq nc64", x_inverted),
-	AXIS_DMI_MATCH("NC84x0", "HP Compaq nc84", z_inverted),
-	AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted),
-	AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted),
-	AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
-	AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
-	AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
-	{ NULL, }
-/* Laptop models without axis info (yet):
- * "NC651xx" "HP Compaq 651"
- * "NC671xx" "HP Compaq 671"
- * "NC6910" "HP Compaq 6910"
- * HP Compaq 8710x Notebook PC / Mobile Workstation
- * "NC2400" "HP Compaq nc2400"
- * "NX74x0" "HP Compaq nx74"
- * "NX6325" "HP Compaq nx6325"
- * "NC4400" "HP Compaq nc4400"
- */
-};
-
-static int lis3lv02d_add(struct acpi_device *device)
-{
-	u8 val;
-
-	if (!device)
-		return -EINVAL;
-
-	adev.device = device;
-	strcpy(acpi_device_name(device), DRIVER_NAME);
-	strcpy(acpi_device_class(device), ACPI_MDPS_CLASS);
-	device->driver_data = &adev;
-
-	lis3lv02d_acpi_read(device->handle, WHO_AM_I, &val);
-	if ((val != LIS3LV02DL_ID) && (val != LIS302DL_ID)) {
-		printk(KERN_ERR DRIVER_NAME
-				": Accelerometer chip not LIS3LV02D{L,Q}\n");
-	}
-
-	/* If possible use a "standard" axes order */
-	if (dmi_check_system(lis3lv02d_dmi_ids) == 0) {
-		printk(KERN_INFO DRIVER_NAME ": laptop model unknown, "
-				 "using default axes configuration\n");
-		adev.ac = lis3lv02d_axis_normal;
-	}
-
-	return lis3lv02d_init_device(&adev);
-}
-
-static int lis3lv02d_remove(struct acpi_device *device, int type)
-{
-	if (!device)
-		return -EINVAL;
-
-	lis3lv02d_joystick_disable();
-	lis3lv02d_poweroff(device->handle);
-
-	return lis3lv02d_remove_fs();
-}
-
+EXPORT_SYMBOL_GPL(lis3lv02d_init_device);
 
 /* Sysfs stuff */
 static ssize_t lis3lv02d_position_show(struct device *dev,
@@ -501,7 +310,7 @@
 	int val;
 
 	lis3lv02d_increase_use(&adev);
-	lis3lv02d_acpi_read(adev.device->handle, CTRL_REG1, &ctrl);
+	adev.read(adev.device->handle, CTRL_REG1, &ctrl);
 	lis3lv02d_decrease_use(&adev);
 	val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4;
 	return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]);
@@ -523,6 +332,7 @@
 	.attrs = lis3lv02d_attributes
 };
 
+
 static int lis3lv02d_add_fs(struct acpi_device *device)
 {
 	adev.pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
@@ -532,50 +342,15 @@
 	return sysfs_create_group(&adev.pdev->dev.kobj, &lis3lv02d_attribute_group);
 }
 
-static int lis3lv02d_remove_fs(void)
+int lis3lv02d_remove_fs(void)
 {
 	sysfs_remove_group(&adev.pdev->dev.kobj, &lis3lv02d_attribute_group);
 	platform_device_unregister(adev.pdev);
 	return 0;
 }
-
-/* For the HP MDPS aka 3D Driveguard */
-static struct acpi_driver lis3lv02d_driver = {
-	.name  = DRIVER_NAME,
-	.class = ACPI_MDPS_CLASS,
-	.ids   = lis3lv02d_device_ids,
-	.ops = {
-		.add     = lis3lv02d_add,
-		.remove  = lis3lv02d_remove,
-		.suspend = lis3lv02d_suspend,
-		.resume  = lis3lv02d_resume,
-	}
-};
-
-static int __init lis3lv02d_init_module(void)
-{
-	int ret;
-
-	if (acpi_disabled)
-		return -ENODEV;
-
-	ret = acpi_bus_register_driver(&lis3lv02d_driver);
-	if (ret < 0)
-		return ret;
-
-	printk(KERN_INFO DRIVER_NAME " driver loaded.\n");
-
-	return 0;
-}
-
-static void __exit lis3lv02d_exit_module(void)
-{
-	acpi_bus_unregister_driver(&lis3lv02d_driver);
-}
+EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
 
 MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver");
 MODULE_AUTHOR("Yan Burman and Eric Piel");
 MODULE_LICENSE("GPL");
 
-module_init(lis3lv02d_init_module);
-module_exit(lis3lv02d_exit_module);
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index 330cfc6..223f1c0 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -23,7 +23,7 @@
  * The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to
  * be connected via SPI. There exists also several similar chips (such as LIS302DL or
  * LIS3L02DQ) but not in the HP laptops and they have slightly different registers.
- * They can also be connected via IÂË›C.
+ * They can also be connected via I²C.
  */
 
 #define LIS3LV02DL_ID	0x3A /* Also the LIS3LV02DQ */
@@ -147,3 +147,36 @@
 	DD_SRC_IA	= 0x40,
 };
 
+struct axis_conversion {
+	s8	x;
+	s8	y;
+	s8	z;
+};
+
+struct acpi_lis3lv02d {
+	struct acpi_device	*device;   /* The ACPI device */
+	acpi_status (*init) (acpi_handle handle);
+	acpi_status (*write) (acpi_handle handle, int reg, u8 val);
+	acpi_status (*read) (acpi_handle handle, int reg, u8 *ret);
+
+	struct input_dev	*idev;     /* input device */
+	struct task_struct	*kthread;  /* kthread for input */
+	struct mutex            lock;
+	struct platform_device	*pdev;     /* platform device */
+	atomic_t		count;     /* interrupt count after last read */
+	int			xcalib;    /* calibrated null value for x */
+	int			ycalib;    /* calibrated null value for y */
+	int			zcalib;    /* calibrated null value for z */
+	unsigned char		is_on;     /* whether the device is on or off */
+	unsigned char		usage;     /* usage counter */
+	struct axis_conversion	ac;        /* hw -> logical axis */
+};
+
+int lis3lv02d_init_device(struct acpi_lis3lv02d *dev);
+int lis3lv02d_joystick_enable(void);
+void lis3lv02d_joystick_disable(void);
+void lis3lv02d_poweroff(acpi_handle handle);
+void lis3lv02d_poweron(acpi_handle handle);
+int lis3lv02d_remove_fs(void);
+
+extern struct acpi_lis3lv02d adev;
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index d435f00..ae6204f 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -37,9 +37,13 @@
 
 #define DRVNAME		"lm70"
 
+#define LM70_CHIP_LM70		0	/* original NS LM70 */
+#define LM70_CHIP_TMP121	1	/* TI TMP121/TMP123 */
+
 struct lm70 {
 	struct device *hwmon_dev;
 	struct mutex lock;
+	unsigned int chip;
 };
 
 /* sysfs hook function */
@@ -47,7 +51,7 @@
 		struct device_attribute *attr, char *buf)
 {
 	struct spi_device *spi = to_spi_device(dev);
-	int status, val;
+	int status, val = 0;
 	u8 rxbuf[2];
 	s16 raw=0;
 	struct lm70 *p_lm70 = dev_get_drvdata(&spi->dev);
@@ -65,12 +69,12 @@
 		"spi_write_then_read failed with status %d\n", status);
 		goto out;
 	}
-	dev_dbg(dev, "rxbuf[1] : 0x%x rxbuf[0] : 0x%x\n", rxbuf[1], rxbuf[0]);
-
-	raw = (rxbuf[1] << 8) + rxbuf[0];
-	dev_dbg(dev, "raw=0x%x\n", raw);
+	raw = (rxbuf[0] << 8) + rxbuf[1];
+	dev_dbg(dev, "rxbuf[0] : 0x%02x rxbuf[1] : 0x%02x raw=0x%04x\n",
+		rxbuf[0], rxbuf[1], raw);
 
 	/*
+	 * LM70:
 	 * The "raw" temperature read into rxbuf[] is a 16-bit signed 2's
 	 * complement value. Only the MSB 11 bits (1 sign + 10 temperature
 	 * bits) are meaningful; the LSB 5 bits are to be discarded.
@@ -80,8 +84,21 @@
 	 * by 0.25. Also multiply by 1000 to represent in millidegrees
 	 * Celsius.
 	 * So it's equivalent to multiplying by 0.25 * 1000 = 250.
+	 *
+	 * TMP121/TMP123:
+	 * 13 bits of 2's complement data, discard LSB 3 bits,
+	 * resolution 0.0625 degrees celsius.
 	 */
-	val = ((int)raw/32) * 250;
+	switch (p_lm70->chip) {
+	case LM70_CHIP_LM70:
+		val = ((int)raw / 32) * 250;
+		break;
+
+	case LM70_CHIP_TMP121:
+		val = ((int)raw / 8) * 625 / 10;
+		break;
+	}
+
 	status = sprintf(buf, "%d\n", val); /* millidegrees Celsius */
 out:
 	mutex_unlock(&p_lm70->lock);
@@ -93,27 +110,39 @@
 static ssize_t lm70_show_name(struct device *dev, struct device_attribute
 			      *devattr, char *buf)
 {
-	return sprintf(buf, "lm70\n");
+	struct lm70 *p_lm70 = dev_get_drvdata(dev);
+	int ret;
+
+	switch (p_lm70->chip) {
+	case LM70_CHIP_LM70:
+		ret = sprintf(buf, "lm70\n");
+		break;
+	case LM70_CHIP_TMP121:
+		ret = sprintf(buf, "tmp121\n");
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
 }
 
 static DEVICE_ATTR(name, S_IRUGO, lm70_show_name, NULL);
 
 /*----------------------------------------------------------------------*/
 
-static int __devinit lm70_probe(struct spi_device *spi)
+static int __devinit common_probe(struct spi_device *spi, int chip)
 {
 	struct lm70 *p_lm70;
 	int status;
 
-	/* signaling is SPI_MODE_0 on a 3-wire link (shared SI/SO) */
-	if ((spi->mode & (SPI_CPOL|SPI_CPHA)) || !(spi->mode & SPI_3WIRE))
-		return -EINVAL;
+	/* NOTE:  we assume 8-bit words, and convert to 16 bits manually */
 
 	p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL);
 	if (!p_lm70)
 		return -ENOMEM;
 
 	mutex_init(&p_lm70->lock);
+	p_lm70->chip = chip;
 
 	/* sysfs hook */
 	p_lm70->hwmon_dev = hwmon_device_register(&spi->dev);
@@ -141,6 +170,24 @@
 	return status;
 }
 
+static int __devinit lm70_probe(struct spi_device *spi)
+{
+	/* signaling is SPI_MODE_0 on a 3-wire link (shared SI/SO) */
+	if ((spi->mode & (SPI_CPOL | SPI_CPHA)) || !(spi->mode & SPI_3WIRE))
+		return -EINVAL;
+
+	return common_probe(spi, LM70_CHIP_LM70);
+}
+
+static int __devinit tmp121_probe(struct spi_device *spi)
+{
+	/* signaling is SPI_MODE_0 with only MISO connected */
+	if (spi->mode & (SPI_CPOL | SPI_CPHA))
+		return -EINVAL;
+
+	return common_probe(spi, LM70_CHIP_TMP121);
+}
+
 static int __devexit lm70_remove(struct spi_device *spi)
 {
 	struct lm70 *p_lm70 = dev_get_drvdata(&spi->dev);
@@ -154,6 +201,15 @@
 	return 0;
 }
 
+static struct spi_driver tmp121_driver = {
+	.driver = {
+		.name	= "tmp121",
+		.owner	= THIS_MODULE,
+	},
+	.probe	= tmp121_probe,
+	.remove	= __devexit_p(lm70_remove),
+};
+
 static struct spi_driver lm70_driver = {
 	.driver = {
 		.name	= "lm70",
@@ -165,17 +221,26 @@
 
 static int __init init_lm70(void)
 {
-	return spi_register_driver(&lm70_driver);
+	int ret = spi_register_driver(&lm70_driver);
+	if (ret)
+		return ret;
+
+	ret = spi_register_driver(&tmp121_driver);
+	if (ret)
+		spi_unregister_driver(&lm70_driver);
+
+	return ret;
 }
 
 static void __exit cleanup_lm70(void)
 {
 	spi_unregister_driver(&lm70_driver);
+	spi_unregister_driver(&tmp121_driver);
 }
 
 module_init(init_lm70);
 module_exit(cleanup_lm70);
 
 MODULE_AUTHOR("Kaiwan N Billimoria");
-MODULE_DESCRIPTION("National Semiconductor LM70 Linux driver");
+MODULE_DESCRIPTION("NS LM70 / TI TMP121/TMP123 Linux driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
new file mode 100644
index 0000000..034b2c5
--- /dev/null
+++ b/drivers/hwmon/ltc4245.c
@@ -0,0 +1,567 @@
+/*
+ * Driver for Linear Technology LTC4245 I2C Multiple Supply Hot Swap Controller
+ *
+ * Copyright (C) 2008 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This driver is based on the ds1621 and ina209 drivers.
+ *
+ * Datasheet:
+ * http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1140,P19392,D13517
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+/* Valid addresses are 0x20 - 0x3f
+ *
+ * For now, we do not probe, since some of these addresses
+ * are known to be unfriendly to probing */
+static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
+
+/* Insmod parameters */
+I2C_CLIENT_INSMOD_1(ltc4245);
+
+/* Here are names of the chip's registers (a.k.a. commands) */
+enum ltc4245_cmd {
+	LTC4245_STATUS			= 0x00, /* readonly */
+	LTC4245_ALERT			= 0x01,
+	LTC4245_CONTROL			= 0x02,
+	LTC4245_ON			= 0x03,
+	LTC4245_FAULT1			= 0x04,
+	LTC4245_FAULT2			= 0x05,
+	LTC4245_GPIO			= 0x06,
+	LTC4245_ADCADR			= 0x07,
+
+	LTC4245_12VIN			= 0x10,
+	LTC4245_12VSENSE		= 0x11,
+	LTC4245_12VOUT			= 0x12,
+	LTC4245_5VIN			= 0x13,
+	LTC4245_5VSENSE			= 0x14,
+	LTC4245_5VOUT			= 0x15,
+	LTC4245_3VIN			= 0x16,
+	LTC4245_3VSENSE			= 0x17,
+	LTC4245_3VOUT			= 0x18,
+	LTC4245_VEEIN			= 0x19,
+	LTC4245_VEESENSE		= 0x1a,
+	LTC4245_VEEOUT			= 0x1b,
+	LTC4245_GPIOADC1		= 0x1c,
+	LTC4245_GPIOADC2		= 0x1d,
+	LTC4245_GPIOADC3		= 0x1e,
+};
+
+struct ltc4245_data {
+	struct device *hwmon_dev;
+
+	struct mutex update_lock;
+	bool valid;
+	unsigned long last_updated; /* in jiffies */
+
+	/* Control registers */
+	u8 cregs[0x08];
+
+	/* Voltage registers */
+	u8 vregs[0x0f];
+};
+
+static struct ltc4245_data *ltc4245_update_device(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct ltc4245_data *data = i2c_get_clientdata(client);
+	s32 val;
+	int i;
+
+	mutex_lock(&data->update_lock);
+
+	if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+
+		dev_dbg(&client->dev, "Starting ltc4245 update\n");
+
+		/* Read control registers -- 0x00 to 0x07 */
+		for (i = 0; i < ARRAY_SIZE(data->cregs); i++) {
+			val = i2c_smbus_read_byte_data(client, i);
+			if (unlikely(val < 0))
+				data->cregs[i] = 0;
+			else
+				data->cregs[i] = val;
+		}
+
+		/* Read voltage registers -- 0x10 to 0x1f */
+		for (i = 0; i < ARRAY_SIZE(data->vregs); i++) {
+			val = i2c_smbus_read_byte_data(client, i+0x10);
+			if (unlikely(val < 0))
+				data->vregs[i] = 0;
+			else
+				data->vregs[i] = val;
+		}
+
+		data->last_updated = jiffies;
+		data->valid = 1;
+	}
+
+	mutex_unlock(&data->update_lock);
+
+	return data;
+}
+
+/* Return the voltage from the given register in millivolts */
+static int ltc4245_get_voltage(struct device *dev, u8 reg)
+{
+	struct ltc4245_data *data = ltc4245_update_device(dev);
+	const u8 regval = data->vregs[reg - 0x10];
+	u32 voltage = 0;
+
+	switch (reg) {
+	case LTC4245_12VIN:
+	case LTC4245_12VOUT:
+		voltage = regval * 55;
+		break;
+	case LTC4245_5VIN:
+	case LTC4245_5VOUT:
+		voltage = regval * 22;
+		break;
+	case LTC4245_3VIN:
+	case LTC4245_3VOUT:
+		voltage = regval * 15;
+		break;
+	case LTC4245_VEEIN:
+	case LTC4245_VEEOUT:
+		voltage = regval * -55;
+		break;
+	case LTC4245_GPIOADC1:
+	case LTC4245_GPIOADC2:
+	case LTC4245_GPIOADC3:
+		voltage = regval * 10;
+		break;
+	default:
+		/* If we get here, the developer messed up */
+		WARN_ON_ONCE(1);
+		break;
+	}
+
+	return voltage;
+}
+
+/* Return the current in the given sense register in milliAmperes */
+static unsigned int ltc4245_get_current(struct device *dev, u8 reg)
+{
+	struct ltc4245_data *data = ltc4245_update_device(dev);
+	const u8 regval = data->vregs[reg - 0x10];
+	unsigned int voltage;
+	unsigned int curr;
+
+	/* The strange looking conversions that follow are fixed-point
+	 * math, since we cannot do floating point in the kernel.
+	 *
+	 * Step 1: convert sense register to microVolts
+	 * Step 2: convert voltage to milliAmperes
+	 *
+	 * If you play around with the V=IR equation, you come up with
+	 * the following: X uV / Y mOhm == Z mA
+	 *
+	 * With the resistors that are fractions of a milliOhm, we multiply
+	 * the voltage and resistance by 10, to shift the decimal point.
+	 * Now we can use the normal division operator again.
+	 */
+
+	switch (reg) {
+	case LTC4245_12VSENSE:
+		voltage = regval * 250; /* voltage in uV */
+		curr = voltage / 50; /* sense resistor 50 mOhm */
+		break;
+	case LTC4245_5VSENSE:
+		voltage = regval * 125; /* voltage in uV */
+		curr = (voltage * 10) / 35; /* sense resistor 3.5 mOhm */
+		break;
+	case LTC4245_3VSENSE:
+		voltage = regval * 125; /* voltage in uV */
+		curr = (voltage * 10) / 25; /* sense resistor 2.5 mOhm */
+		break;
+	case LTC4245_VEESENSE:
+		voltage = regval * 250; /* voltage in uV */
+		curr = voltage / 100; /* sense resistor 100 mOhm */
+		break;
+	default:
+		/* If we get here, the developer messed up */
+		WARN_ON_ONCE(1);
+		curr = 0;
+		break;
+	}
+
+	return curr;
+}
+
+static ssize_t ltc4245_show_voltage(struct device *dev,
+				    struct device_attribute *da,
+				    char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	const int voltage = ltc4245_get_voltage(dev, attr->index);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", voltage);
+}
+
+static ssize_t ltc4245_show_current(struct device *dev,
+				    struct device_attribute *da,
+				    char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	const unsigned int curr = ltc4245_get_current(dev, attr->index);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", curr);
+}
+
+static ssize_t ltc4245_show_power(struct device *dev,
+				  struct device_attribute *da,
+				  char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	const unsigned int curr = ltc4245_get_current(dev, attr->index);
+	const int output_voltage = ltc4245_get_voltage(dev, attr->index+1);
+
+	/* current in mA * voltage in mV == power in uW */
+	const unsigned int power = abs(output_voltage * curr);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", power);
+}
+
+static ssize_t ltc4245_show_alarm(struct device *dev,
+					  struct device_attribute *da,
+					  char *buf)
+{
+	struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
+	struct ltc4245_data *data = ltc4245_update_device(dev);
+	const u8 reg = data->cregs[attr->index];
+	const u32 mask = attr->nr;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", (reg & mask) ? 1 : 0);
+}
+
+/* These macros are used below in constructing device attribute objects
+ * for use with sysfs_create_group() to make a sysfs device file
+ * for each register.
+ */
+
+#define LTC4245_VOLTAGE(name, ltc4245_cmd_idx) \
+	static SENSOR_DEVICE_ATTR(name, S_IRUGO, \
+	ltc4245_show_voltage, NULL, ltc4245_cmd_idx)
+
+#define LTC4245_CURRENT(name, ltc4245_cmd_idx) \
+	static SENSOR_DEVICE_ATTR(name, S_IRUGO, \
+	ltc4245_show_current, NULL, ltc4245_cmd_idx)
+
+#define LTC4245_POWER(name, ltc4245_cmd_idx) \
+	static SENSOR_DEVICE_ATTR(name, S_IRUGO, \
+	ltc4245_show_power, NULL, ltc4245_cmd_idx)
+
+#define LTC4245_ALARM(name, mask, reg) \
+	static SENSOR_DEVICE_ATTR_2(name, S_IRUGO, \
+	ltc4245_show_alarm, NULL, (mask), reg)
+
+/* Construct a sensor_device_attribute structure for each register */
+
+/* Input voltages */
+LTC4245_VOLTAGE(in1_input,			LTC4245_12VIN);
+LTC4245_VOLTAGE(in2_input,			LTC4245_5VIN);
+LTC4245_VOLTAGE(in3_input,			LTC4245_3VIN);
+LTC4245_VOLTAGE(in4_input,			LTC4245_VEEIN);
+
+/* Input undervoltage alarms */
+LTC4245_ALARM(in1_min_alarm,	(1 << 0),	LTC4245_FAULT1);
+LTC4245_ALARM(in2_min_alarm,	(1 << 1),	LTC4245_FAULT1);
+LTC4245_ALARM(in3_min_alarm,	(1 << 2),	LTC4245_FAULT1);
+LTC4245_ALARM(in4_min_alarm,	(1 << 3),	LTC4245_FAULT1);
+
+/* Currents (via sense resistor) */
+LTC4245_CURRENT(curr1_input,			LTC4245_12VSENSE);
+LTC4245_CURRENT(curr2_input,			LTC4245_5VSENSE);
+LTC4245_CURRENT(curr3_input,			LTC4245_3VSENSE);
+LTC4245_CURRENT(curr4_input,			LTC4245_VEESENSE);
+
+/* Overcurrent alarms */
+LTC4245_ALARM(curr1_max_alarm,	(1 << 4),	LTC4245_FAULT1);
+LTC4245_ALARM(curr2_max_alarm,	(1 << 5),	LTC4245_FAULT1);
+LTC4245_ALARM(curr3_max_alarm,	(1 << 6),	LTC4245_FAULT1);
+LTC4245_ALARM(curr4_max_alarm,	(1 << 7),	LTC4245_FAULT1);
+
+/* Output voltages */
+LTC4245_VOLTAGE(in5_input,			LTC4245_12VOUT);
+LTC4245_VOLTAGE(in6_input,			LTC4245_5VOUT);
+LTC4245_VOLTAGE(in7_input,			LTC4245_3VOUT);
+LTC4245_VOLTAGE(in8_input,			LTC4245_VEEOUT);
+
+/* Power Bad alarms */
+LTC4245_ALARM(in5_min_alarm,	(1 << 0),	LTC4245_FAULT2);
+LTC4245_ALARM(in6_min_alarm,	(1 << 1),	LTC4245_FAULT2);
+LTC4245_ALARM(in7_min_alarm,	(1 << 2),	LTC4245_FAULT2);
+LTC4245_ALARM(in8_min_alarm,	(1 << 3),	LTC4245_FAULT2);
+
+/* GPIO voltages */
+LTC4245_VOLTAGE(in9_input,			LTC4245_GPIOADC1);
+LTC4245_VOLTAGE(in10_input,			LTC4245_GPIOADC2);
+LTC4245_VOLTAGE(in11_input,			LTC4245_GPIOADC3);
+
+/* Power Consumption (virtual) */
+LTC4245_POWER(power1_input,			LTC4245_12VSENSE);
+LTC4245_POWER(power2_input,			LTC4245_5VSENSE);
+LTC4245_POWER(power3_input,			LTC4245_3VSENSE);
+LTC4245_POWER(power4_input,			LTC4245_VEESENSE);
+
+/* Finally, construct an array of pointers to members of the above objects,
+ * as required for sysfs_create_group()
+ */
+static struct attribute *ltc4245_attributes[] = {
+	&sensor_dev_attr_in1_input.dev_attr.attr,
+	&sensor_dev_attr_in2_input.dev_attr.attr,
+	&sensor_dev_attr_in3_input.dev_attr.attr,
+	&sensor_dev_attr_in4_input.dev_attr.attr,
+
+	&sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_in2_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_in3_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_in4_min_alarm.dev_attr.attr,
+
+	&sensor_dev_attr_curr1_input.dev_attr.attr,
+	&sensor_dev_attr_curr2_input.dev_attr.attr,
+	&sensor_dev_attr_curr3_input.dev_attr.attr,
+	&sensor_dev_attr_curr4_input.dev_attr.attr,
+
+	&sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
+	&sensor_dev_attr_curr2_max_alarm.dev_attr.attr,
+	&sensor_dev_attr_curr3_max_alarm.dev_attr.attr,
+	&sensor_dev_attr_curr4_max_alarm.dev_attr.attr,
+
+	&sensor_dev_attr_in5_input.dev_attr.attr,
+	&sensor_dev_attr_in6_input.dev_attr.attr,
+	&sensor_dev_attr_in7_input.dev_attr.attr,
+	&sensor_dev_attr_in8_input.dev_attr.attr,
+
+	&sensor_dev_attr_in5_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_in6_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_in7_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_in8_min_alarm.dev_attr.attr,
+
+	&sensor_dev_attr_in9_input.dev_attr.attr,
+	&sensor_dev_attr_in10_input.dev_attr.attr,
+	&sensor_dev_attr_in11_input.dev_attr.attr,
+
+	&sensor_dev_attr_power1_input.dev_attr.attr,
+	&sensor_dev_attr_power2_input.dev_attr.attr,
+	&sensor_dev_attr_power3_input.dev_attr.attr,
+	&sensor_dev_attr_power4_input.dev_attr.attr,
+
+	NULL,
+};
+
+static const struct attribute_group ltc4245_group = {
+	.attrs = ltc4245_attributes,
+};
+
+static int ltc4245_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct ltc4245_data *data;
+	int ret;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		ret = -ENOMEM;
+		goto out_kzalloc;
+	}
+
+	i2c_set_clientdata(client, data);
+	mutex_init(&data->update_lock);
+
+	/* Initialize the LTC4245 chip */
+	/* TODO */
+
+	/* Register sysfs hooks */
+	ret = sysfs_create_group(&client->dev.kobj, &ltc4245_group);
+	if (ret)
+		goto out_sysfs_create_group;
+
+	data->hwmon_dev = hwmon_device_register(&client->dev);
+	if (IS_ERR(data->hwmon_dev)) {
+		ret = PTR_ERR(data->hwmon_dev);
+		goto out_hwmon_device_register;
+	}
+
+	return 0;
+
+out_hwmon_device_register:
+	sysfs_remove_group(&client->dev.kobj, &ltc4245_group);
+out_sysfs_create_group:
+	kfree(data);
+out_kzalloc:
+	return ret;
+}
+
+static int ltc4245_remove(struct i2c_client *client)
+{
+	struct ltc4245_data *data = i2c_get_clientdata(client);
+
+	hwmon_device_unregister(data->hwmon_dev);
+	sysfs_remove_group(&client->dev.kobj, &ltc4245_group);
+
+	kfree(data);
+
+	return 0;
+}
+
+/* Check that some bits in a control register appear at all possible
+ * locations without changing value
+ *
+ * @client: the i2c client to use
+ * @reg: the register to read
+ * @bits: the bits to check (0xff checks all bits,
+ *                           0x03 checks only the last two bits)
+ *
+ * return -ERRNO if the register read failed
+ * return -ENODEV if the register value doesn't stay constant at all
+ * possible addresses
+ *
+ * return 0 for success
+ */
+static int ltc4245_check_control_reg(struct i2c_client *client, u8 reg, u8 bits)
+{
+	int i;
+	s32 v, voff1, voff2;
+
+	/* Read register and check for error */
+	v = i2c_smbus_read_byte_data(client, reg);
+	if (v < 0)
+		return v;
+
+	v &= bits;
+
+	for (i = 0x00; i < 0xff; i += 0x20) {
+
+		voff1 = i2c_smbus_read_byte_data(client, reg + i);
+		if (voff1 < 0)
+			return voff1;
+
+		voff2 = i2c_smbus_read_byte_data(client, reg + i + 0x08);
+		if (voff2 < 0)
+			return voff2;
+
+		voff1 &= bits;
+		voff2 &= bits;
+
+		if (v != voff1 || v != voff2)
+			return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int ltc4245_detect(struct i2c_client *client,
+			  int kind,
+			  struct i2c_board_info *info)
+{
+	struct i2c_adapter *adapter = client->adapter;
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
+
+	if (kind < 0) {		/* probed detection - check the chip type */
+		s32 v;		/* 8 bits from the chip, or -ERRNO */
+
+		/* Chip registers 0x00-0x07 are control registers
+		 * Chip registers 0x10-0x1f are data registers
+		 *
+		 * Address bits b7-b5 are ignored. This makes the chip "repeat"
+		 * in steps of 0x20. Any control registers should appear with
+		 * the same values across all duplicated addresses.
+		 *
+		 * Register 0x02 bit b2 is reserved, expect 0
+		 * Register 0x07 bits b7 to b4 are reserved, expect 0
+		 *
+		 * Registers 0x01, 0x02 are control registers and should not
+		 * change on their own.
+		 *
+		 * Register 0x06 bits b6 and b7 are control bits, and should
+		 * not change on their own.
+		 *
+		 * Register 0x07 bits b3 to b0 are control bits, and should
+		 * not change on their own.
+		 */
+
+		/* read register 0x02 reserved bit, expect 0 */
+		v = i2c_smbus_read_byte_data(client, LTC4245_CONTROL);
+		if (v < 0 || (v & 0x04) != 0)
+			return -ENODEV;
+
+		/* read register 0x07 reserved bits, expect 0 */
+		v = i2c_smbus_read_byte_data(client, LTC4245_ADCADR);
+		if (v < 0 || (v & 0xf0) != 0)
+			return -ENODEV;
+
+		/* check that the alert register appears at all locations */
+		if (ltc4245_check_control_reg(client, LTC4245_ALERT, 0xff))
+			return -ENODEV;
+
+		/* check that the control register appears at all locations */
+		if (ltc4245_check_control_reg(client, LTC4245_CONTROL, 0xff))
+			return -ENODEV;
+
+		/* check that register 0x06 bits b6 and b7 stay constant */
+		if (ltc4245_check_control_reg(client, LTC4245_GPIO, 0xc0))
+			return -ENODEV;
+
+		/* check that register 0x07 bits b3-b0 stay constant */
+		if (ltc4245_check_control_reg(client, LTC4245_ADCADR, 0x0f))
+			return -ENODEV;
+	}
+
+	strlcpy(info->type, "ltc4245", I2C_NAME_SIZE);
+	dev_info(&adapter->dev, "ltc4245 %s at address 0x%02x\n",
+			kind < 0 ? "probed" : "forced",
+			client->addr);
+
+	return 0;
+}
+
+static const struct i2c_device_id ltc4245_id[] = {
+	{ "ltc4245", ltc4245 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, ltc4245_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ltc4245_driver = {
+	.class		= I2C_CLASS_HWMON,
+	.driver = {
+		.name	= "ltc4245",
+	},
+	.probe		= ltc4245_probe,
+	.remove		= ltc4245_remove,
+	.id_table	= ltc4245_id,
+	.detect		= ltc4245_detect,
+	.address_data	= &addr_data,
+};
+
+static int __init ltc4245_init(void)
+{
+	return i2c_add_driver(&ltc4245_driver);
+}
+
+static void __exit ltc4245_exit(void)
+{
+	i2c_del_driver(&ltc4245_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("LTC4245 driver");
+MODULE_LICENSE("GPL");
+
+module_init(ltc4245_init);
+module_exit(ltc4245_exit);
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 5fbfa34..fb052fe 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -43,6 +43,7 @@
 #include <linux/hwmon-vid.h>
 #include <linux/err.h>
 #include <linux/mutex.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 
 static u8 devid;
@@ -1627,6 +1628,11 @@
 			continue;
 		res.start = extra_isa[i];
 		res.end = extra_isa[i] + PC87360_EXTENT - 1;
+
+		err = acpi_check_resource_conflict(&res);
+		if (err)
+			goto exit_device_put;
+
 		err = platform_device_add_resources(pdev, &res, 1);
 		if (err) {
 			printk(KERN_ERR "pc87360: Device resource[%d] "
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 7265f22..3a8a0f7 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -32,6 +32,7 @@
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
 #include <linux/ioport.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 
 static unsigned short force_id;
@@ -524,6 +525,10 @@
 	};
 	int err;
 
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto exit;
+
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index a276806..aa2e831 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -62,6 +62,7 @@
 #include <linux/jiffies.h>
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 
 
@@ -727,6 +728,10 @@
 	};
 	int err;
 
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto exit;
+
 	pdev = platform_device_alloc("sis5595", address);
 	if (!pdev) {
 		err = -ENOMEM;
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index eb03544..6f6d52b 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -36,6 +36,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 
 static unsigned short force_id;
@@ -303,6 +304,10 @@
 	};
 	int err;
 
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto exit;
+
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index d1b4985..a92dbb9 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -37,6 +37,7 @@
 #include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 
 static unsigned short force_id;
@@ -705,6 +706,10 @@
 	};
 	int err;
 
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto exit;
+
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index f1ee5e7..a022aed 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -41,6 +41,7 @@
 #include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 
 
@@ -783,6 +784,10 @@
 	};
 	int err;
 
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto exit;
+
 	pdev = platform_device_alloc("via686a", address);
 	if (!pdev) {
 		err = -ENOMEM;
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 12b4359..b0ce378 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -32,6 +32,7 @@
 #include <linux/err.h>
 #include <linux/mutex.h>
 #include <linux/ioport.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 
 static int uch_config = -1;
@@ -1259,6 +1260,10 @@
 	}
 
 	res.name = pdev->name;
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto EXIT;
+
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
 		printk(KERN_ERR DRVNAME ": Device resource addition failed "
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 5bc5727..9982b45 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -35,6 +35,7 @@
 #include <linux/hwmon-vid.h>
 #include <linux/err.h>
 #include <linux/mutex.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 
 static int force_addr;
@@ -894,6 +895,10 @@
 	};
 	int err;
 
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto exit;
+
 	pdev = platform_device_alloc("vt8231", address);
 	if (!pdev) {
 		err = -ENOMEM;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 075164d..cb808d0 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -48,6 +48,7 @@
 #include <linux/hwmon-vid.h>
 #include <linux/err.h>
 #include <linux/mutex.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 #include "lm75.h"
 
@@ -502,7 +503,7 @@
 		}
 
 		for (i = 0; i < 4; i++) {
-			/* pwmcfg, tolarance mapped for i=0, i=1 to same reg */
+			/* pwmcfg, tolerance mapped for i=0, i=1 to same reg */
 			if (i != 1) {
 				pwmcfg = w83627ehf_read_value(data,
 						W83627EHF_REG_PWM_ENABLE[i]);
@@ -1544,6 +1545,11 @@
 	res.start = address + IOREGION_OFFSET;
 	res.end = address + IOREGION_OFFSET + IOREGION_LENGTH - 1;
 	res.flags = IORESOURCE_IO;
+
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto exit;
+
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
 		printk(KERN_ERR DRVNAME ": Device resource addition failed "
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index b30e579..389150b 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -50,6 +50,7 @@
 #include <linux/err.h>
 #include <linux/mutex.h>
 #include <linux/ioport.h>
+#include <linux/acpi.h>
 #include <asm/io.h>
 #include "lm75.h"
 
@@ -1793,6 +1794,10 @@
 	};
 	int err;
 
+	err = acpi_check_resource_conflict(&res);
+	if (err)
+		goto exit;
+
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index fc12bd4..dbfb30c 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -58,7 +58,10 @@
 						0x2e, 0x2f, I2C_CLIENT_END };
 /* Insmod parameters */
 I2C_CLIENT_INSMOD_4(w83781d, w83782d, w83783s, as99127f);
-I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: "
+
+static unsigned short force_subclients[4];
+module_param_array(force_subclients, short, NULL, 0);
+MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
 		    "{bus, clientaddr, subclientaddr1, subclientaddr2}");
 
 static int reset;
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 5768def..97851c5 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -53,7 +53,10 @@
 
 /* Insmod parameters */
 I2C_CLIENT_INSMOD_1(w83791d);
-I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: "
+
+static unsigned short force_subclients[4];
+module_param_array(force_subclients, short, NULL, 0);
+MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
 			"{bus, clientaddr, subclientaddr1, subclientaddr2}");
 
 static int reset;
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index cf94c5b..2be1619 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -51,7 +51,10 @@
 
 /* Insmod parameters */
 I2C_CLIENT_INSMOD_1(w83792d);
-I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: "
+
+static unsigned short force_subclients[4];
+module_param_array(force_subclients, short, NULL, 0);
+MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
 			"{bus, clientaddr, subclientaddr1, subclientaddr2}");
 
 static int init;
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 0a739f1..47dd398 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -42,7 +42,10 @@
 
 /* Insmod parameters */
 I2C_CLIENT_INSMOD_1(w83793);
-I2C_CLIENT_MODULE_PARM(force_subclients, "List of subclient addresses: "
+
+static unsigned short force_subclients[4];
+module_param_array(force_subclients, short, NULL, 0);
+MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
 		       "{bus, clientaddr, subclientaddr1, subclientaddr2}");
 
 static int reset;
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index fc3e5b0..dd9e796 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -399,8 +399,8 @@
 	if ((error = ali1563_setup(dev)))
 		goto exit;
 	ali1563_adapter.dev.parent = &dev->dev;
-	sprintf(ali1563_adapter.name,"SMBus ALi 1563 Adapter @ %04x",
-		ali1563_smba);
+	snprintf(ali1563_adapter.name, sizeof(ali1563_adapter.name),
+		 "SMBus ALi 1563 Adapter @ %04x", ali1563_smba);
 	if ((error = i2c_add_adapter(&ali1563_adapter)))
 		goto exit_shutdown;
 	return 0;
diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
index 8ba2bcf..378fcb5 100644
--- a/drivers/i2c/busses/i2c-amd756-s4882.c
+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
@@ -197,8 +197,8 @@
 	for (i = 1; i < 5; i++) {
 		s4882_algo[i] = *(amd756_smbus.algo);
 		s4882_adapter[i] = amd756_smbus;
-		sprintf(s4882_adapter[i].name,
-			"SMBus 8111 adapter (CPU%d)", i-1);
+		snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name),
+			 "SMBus 8111 adapter (CPU%d)", i-1);
 		s4882_adapter[i].algo = s4882_algo+i;
 		s4882_adapter[i].dev.parent = amd756_smbus.dev.parent;
 	}
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 424dad6..36bee5b 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -380,8 +380,9 @@
 	/* set up the sysfs linkage to our parent device */
 	amd756_smbus.dev.parent = &pdev->dev;
 
-	sprintf(amd756_smbus.name, "SMBus %s adapter at %04x",
-		chipname[id->driver_data], amd756_ioport);
+	snprintf(amd756_smbus.name, sizeof(amd756_smbus.name),
+		 "SMBus %s adapter at %04x", chipname[id->driver_data],
+		 amd756_ioport);
 
 	error = i2c_add_adapter(&amd756_smbus);
 	if (error) {
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 9efb021..67d9dc5 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -222,7 +222,7 @@
 		rc = -ENOMEM;
 		goto fail2;
 	}
-	sprintf(adapter->name, "AT91");
+	snprintf(adapter->name, sizeof(adapter->name), "AT91");
 	adapter->algo = &at91_algorithm;
 	adapter->class = I2C_CLASS_HWMON;
 	adapter->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 3c855ff..3fd2c41 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -656,7 +656,7 @@
 	strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name));
 	p_adap->algo = &bfin_twi_algorithm;
 	p_adap->algo_data = iface;
-	p_adap->class = I2C_CLASS_ALL;
+	p_adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
 	p_adap->dev.parent = &pdev->dev;
 
 	rc = peripheral_request_list(pin_req[pdev->id], "i2c-bfin-twi");
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 5123eb6..526625e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -64,7 +64,7 @@
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 /* I801 SMBus address offsets */
 #define SMBHSTSTS	(0 + i801_smba)
@@ -583,6 +583,40 @@
 
 MODULE_DEVICE_TABLE (pci, i801_ids);
 
+#if defined CONFIG_INPUT_APANEL || defined CONFIG_INPUT_APANEL_MODULE
+static unsigned char apanel_addr;
+
+/* Scan the system ROM for the signature "FJKEYINF" */
+static __init const void __iomem *bios_signature(const void __iomem *bios)
+{
+	ssize_t offset;
+	const unsigned char signature[] = "FJKEYINF";
+
+	for (offset = 0; offset < 0x10000; offset += 0x10) {
+		if (check_signature(bios + offset, signature,
+				    sizeof(signature)-1))
+			return bios + offset;
+	}
+	return NULL;
+}
+
+static void __init input_apanel_init(void)
+{
+	void __iomem *bios;
+	const void __iomem *p;
+
+	bios = ioremap(0xF0000, 0x10000); /* Can't fail */
+	p = bios_signature(bios);
+	if (p) {
+		/* just use the first address */
+		apanel_addr = readb(p + 8 + 3) >> 1;
+	}
+	iounmap(bios);
+}
+#else
+static void __init input_apanel_init(void) {}
+#endif
+
 static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	unsigned char temp;
@@ -667,6 +701,19 @@
 		dev_err(&dev->dev, "Failed to add SMBus adapter\n");
 		goto exit_release;
 	}
+
+	/* Register optional slaves */
+#if defined CONFIG_INPUT_APANEL || defined CONFIG_INPUT_APANEL_MODULE
+	if (apanel_addr) {
+		struct i2c_board_info info;
+
+		memset(&info, 0, sizeof(struct i2c_board_info));
+		info.addr = apanel_addr;
+		strlcpy(info.type, "fujitsu_apanel", I2C_NAME_SIZE);
+		i2c_new_device(&i801_adapter, &info);
+	}
+#endif
+
 	return 0;
 
 exit_release:
@@ -717,6 +764,7 @@
 
 static int __init i2c_i801_init(void)
 {
+	input_apanel_init();
 	return pci_register_driver(&i801_driver);
 }
 
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 587f5b2..6af6814 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1076,10 +1076,10 @@
 
 #ifdef CONFIG_I2C_PXA_SLAVE
 	printk(KERN_INFO "I2C: %s: PXA I2C adapter, slave address %d\n",
-	       i2c->adap.dev.bus_id, i2c->slave_addr);
+	       dev_name(&i2c->adap.dev), i2c->slave_addr);
 #else
 	printk(KERN_INFO "I2C: %s: PXA I2C adapter\n",
-	       i2c->adap.dev.bus_id);
+	       dev_name(&i2c->adap.dev));
 #endif
 	return 0;
 
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index f69f91f..5b7f956 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -906,7 +906,7 @@
 
 	platform_set_drvdata(pdev, i2c);
 
-	dev_info(&pdev->dev, "%s: S3C I2C adapter\n", i2c->adap.dev.bus_id);
+	dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
 	return 0;
 
  err_cpufreq:
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 5e0e254..baa28b7 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -475,7 +475,7 @@
 
 	id->adap.nr = pdev->id;
 	id->adap.algo = &sh7760_i2c_algo;
-	id->adap.class = I2C_CLASS_ALL;
+	id->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
 	id->adap.retries = 3;
 	id->adap.algo_data = id;
 	id->adap.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 6c3d60b..1c01083 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -500,7 +500,7 @@
 	while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) {
 		for (n = res->start; hook && n <= res->end; n++) {
 			if (request_irq(n, sh_mobile_i2c_isr, IRQF_DISABLED,
-					dev->dev.bus_id, dev))
+					dev_name(&dev->dev), dev))
 				goto rollback;
 		}
 		k++;
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index dfc2d5e..8ce2daf 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -389,8 +389,8 @@
 	/* set up the sysfs linkage to our parent device */
 	sis5595_adapter.dev.parent = &dev->dev;
 
-	sprintf(sis5595_adapter.name, "SMBus SIS5595 adapter at %04x",
-		sis5595_base + SMB_INDEX);
+	snprintf(sis5595_adapter.name, sizeof(sis5595_adapter.name),
+		 "SMBus SIS5595 adapter at %04x", sis5595_base + SMB_INDEX);
 	err = i2c_add_adapter(&sis5595_adapter);
 	if (err) {
 		release_region(sis5595_base + SMB_INDEX, 2);
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index e7c4b79..9c9c016 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -487,8 +487,8 @@
 	/* set up the sysfs linkage to our parent device */
 	sis630_adapter.dev.parent = &dev->dev;
 
-	sprintf(sis630_adapter.name, "SMBus SIS630 adapter at %04x",
-		acpi_base + SMB_STS);
+	snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
+		 "SMBus SIS630 adapter at %04x", acpi_base + SMB_STS);
 
 	return i2c_add_adapter(&sis630_adapter);
 }
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 864ac56..b9bef04 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -114,18 +114,6 @@
 	  These devices are hard to detect and rarely found on mainstream
 	  hardware.  If unsure, say N.
 
-config ISP1301_OMAP
-	tristate "Philips ISP1301 with OMAP OTG"
-	depends on ARCH_OMAP_OTG
-	help
-	  If you say yes here you get support for the Philips ISP1301
-	  USB-On-The-Go transceiver working with the OMAP OTG controller.
-	  The ISP1301 is used in products including H2 and H3 development
-	  boards for Texas Instruments OMAP processors.
-	  
-	  This driver can also be built as a module.  If so, the module
-	  will be called isp1301_omap.
-
 config SENSORS_MAX6875
 	tristate "Maxim MAX6875 Power supply supervisor"
 	depends on EXPERIMENTAL
@@ -151,15 +139,4 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called tsl2550.
 
-config MCU_MPC8349EMITX
-	tristate "MPC8349E-mITX MCU driver"
-	depends on I2C && PPC_83xx
-	select GENERIC_GPIO
-	select ARCH_REQUIRE_GPIOLIB
-	help
-	  Say Y here to enable soft power-off functionality on the Freescale
-	  boards with the MPC8349E-mITX-compatible MCU chips. This driver will
-	  also register MCU GPIOs with the generic GPIO API, so you'll able
-	  to use MCU pins as GPIOs.
-
 endmenu
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index 8b95f41..00fcb51 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -18,9 +18,7 @@
 obj-$(CONFIG_SENSORS_PCF8574)	+= pcf8574.o
 obj-$(CONFIG_PCF8575)		+= pcf8575.o
 obj-$(CONFIG_SENSORS_PCF8591)	+= pcf8591.o
-obj-$(CONFIG_ISP1301_OMAP)	+= isp1301_omap.o
 obj-$(CONFIG_SENSORS_TSL2550)	+= tsl2550.o
-obj-$(CONFIG_MCU_MPC8349EMITX)	+= mcu_mpc8349emitx.o
 
 ifeq ($(CONFIG_I2C_DEBUG_CHIP),y)
 EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index c6a63f4..b1c9abe 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -459,7 +459,7 @@
 		pr_debug("I2C adapter driver [%s] forgot to specify "
 			 "physical device\n", adap->name);
 	}
-	sprintf(adap->dev.bus_id, "i2c-%d", adap->nr);
+	dev_set_name(&adap->dev, "i2c-%d", adap->nr);
 	adap->dev.release = &i2c_adapter_dev_release;
 	adap->dev.class = &i2c_adapter_class;
 	res = device_register(&adap->dev);
@@ -845,8 +845,8 @@
 	} else
 		client->dev.release = i2c_client_dev_release;
 
-	snprintf(&client->dev.bus_id[0], sizeof(client->dev.bus_id),
-		"%d-%04x", i2c_adapter_id(adapter), client->addr);
+	dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adapter),
+		     client->addr);
 	res = device_register(&client->dev);
 	if (res)
 		goto out_err;
@@ -856,7 +856,7 @@
 	mutex_unlock(&adapter->clist_lock);
 
 	dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n",
-		client->name, client->dev.bus_id);
+		client->name, dev_name(&client->dev));
 
 	if (adapter->client_register)  {
 		if (adapter->client_register(client)) {
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 3f95038..b1c6f68 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -701,11 +701,6 @@
        depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX
 endchoice
 
-config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ
-       int "Maximum transfer size (KB) per request (up to 128)"
-       default "128"
-       depends on BLK_DEV_IDE_AU1XXX
-
 config BLK_DEV_IDE_TX4938
 	tristate "TX4938 internal IDE support"
 	depends on SOC_TX4938
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 2f9e941..d8f295b 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -18,12 +18,6 @@
 #include <linux/dmi.h>
 
 #include <acpi/acpi_bus.h>
-#include <acpi/acnames.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acparser.h>
-#include <acpi/acexcep.h>
-#include <acpi/acmacros.h>
-#include <acpi/actypes.h>
 
 #define REGS_PER_GTF		7
 struct taskfile_array {
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 4088a62..806760d 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -633,7 +633,7 @@
 	printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
 		q->max_sectors / 2);
 
-	if (ata_id_is_ssd(id) || ata_id_is_cfa(id))
+	if (ata_id_is_ssd(id))
 		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
 
 	/* calculate drive capacity, and select LBA if possible */
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index e728cfe..753b92e 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -493,7 +493,7 @@
 	stat = tp_ops->read_status(hwif);
 
 	if (stat & ATA_BUSY) {
-		local_irq_save(flags);
+		local_save_flags(flags);
 		local_irq_enable_in_hardirq();
 		timeout += jiffies;
 		while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) {
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 4b3bf6a..60538d9 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -186,12 +186,10 @@
 	       blk_pm_suspend_request(rq) ? "suspend" : "resume");
 #endif
 	spin_lock_irqsave(q->queue_lock, flags);
-	if (blk_pm_suspend_request(rq)) {
+	if (blk_pm_suspend_request(rq))
 		blk_stop_queue(q);
-	} else {
+	else
 		drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
-		blk_start_queue(q);
-	}
 	spin_unlock_irqrestore(q->queue_lock, flags);
 
 	drive->hwif->rq = NULL;
@@ -219,6 +217,8 @@
 		 * point.
 		 */
 		ide_hwif_t *hwif = drive->hwif;
+		struct request_queue *q = drive->queue;
+		unsigned long flags;
 		int rc;
 #ifdef DEBUG_PM
 		printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
@@ -231,5 +231,9 @@
 		rc = ide_wait_not_busy(hwif, 100000);
 		if (rc)
 			printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
+
+		spin_lock_irqsave(q->queue_lock, flags);
+		blk_start_queue(q);
+		spin_unlock_irqrestore(q->queue_lock, flags);
 	}
 }
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 0ccbb44..312127e 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -796,7 +796,7 @@
 	if (irqd)
 		disable_irq(hwif->irq);
 
-	local_irq_save(flags);
+	local_save_flags(flags);
 	local_irq_enable_in_hardirq();
 
 	if (ide_port_wait_ready(hwif) == -EBUSY)
diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
index 0be27ac..e1c4f54 100644
--- a/drivers/ide/it821x.c
+++ b/drivers/ide/it821x.c
@@ -68,6 +68,8 @@
 
 #define DRV_NAME "it821x"
 
+#define QUIRK_VORTEX86 1
+
 struct it821x_dev
 {
 	unsigned int smart:1,		/* Are we in smart raid mode */
@@ -79,6 +81,7 @@
 	u16	pio[2];			/* Cached PIO values */
 	u16	mwdma[2];		/* Cached MWDMA values */
 	u16	udma[2];		/* Cached UDMA values (per drive) */
+	u16	quirks;
 };
 
 #define ATA_66		0
@@ -557,8 +560,7 @@
 	 *	this is necessary.
 	 */
 
-	pci_read_config_byte(dev, 0x08, &conf);
-	if (conf == 0x10) {
+	if (dev->revision == 0x10) {
 		idev->timing10 = 1;
 		hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
 		if (idev->smart == 0)
@@ -577,6 +579,12 @@
 
 	hwif->ultra_mask = ATA_UDMA6;
 	hwif->mwdma_mask = ATA_MWDMA2;
+
+	/* Vortex86SX quirk: prevent Ultra-DMA mode to fix BadCRC issue */
+	if (idev->quirks & QUIRK_VORTEX86) {
+		if (dev->revision == 0x11)
+			hwif->ultra_mask = 0;
+	}
 }
 
 static void it8212_disable_raid(struct pci_dev *dev)
@@ -649,6 +657,8 @@
 		return -ENOMEM;
 	}
 
+	itdevs->quirks = id->driver_data;
+
 	rc = ide_pci_init_one(dev, &it821x_chipset, itdevs);
 	if (rc)
 		kfree(itdevs);
@@ -668,6 +678,7 @@
 static const struct pci_device_id it821x_pci_tbl[] = {
 	{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), 0 },
 	{ PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), 0 },
+	{ PCI_VDEVICE(RDC, PCI_DEVICE_ID_RDC_D1010), QUIRK_VORTEX86 },
 	{ 0, },
 };
 
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
index 48cc748..6297956 100644
--- a/drivers/ide/sl82c105.c
+++ b/drivers/ide/sl82c105.c
@@ -310,10 +310,6 @@
 	.dma_ops	= &sl82c105_dma_ops,
 	.host_flags	= IDE_HFLAG_IO_32BIT |
 			  IDE_HFLAG_UNMASK_IRQS |
-/* FIXME: check for Compatibility mode in generic IDE PCI code */
-#if defined(CONFIG_LOPEC) || defined(CONFIG_SANDPOINT)
-			  IDE_HFLAG_FORCE_LEGACY_IRQS |
-#endif
 			  IDE_HFLAG_SERIALIZE_DMA |
 			  IDE_HFLAG_NO_AUTODMA,
 	.pio_mask	= ATA_PIO5,
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index b4ef218..d909534 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -202,7 +202,6 @@
 	.exec_command		= ide_exec_command,
 	.read_status		= ide_read_status,
 	.read_altstatus		= ide_read_altstatus,
-	.read_sff_dma_status	= ide_read_sff_dma_status,
 
 	.set_irq		= ide_set_irq,
 
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index fecc0e0..703c3ee 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -432,8 +432,6 @@
 	if (via_clock < 20000 || via_clock > 50000) {
 		printk(KERN_WARNING DRV_NAME ": User given PCI clock speed "
 			"impossible (%d), using 33 MHz instead.\n", via_clock);
-		printk(KERN_WARNING DRV_NAME ": Use ide0=ata66 if you want "
-			"to assume 80-wire cable.\n");
 		via_clock = 33333;
 	}
 
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
index c90be40..31400c8 100644
--- a/drivers/ieee1394/csr.c
+++ b/drivers/ieee1394/csr.c
@@ -68,22 +68,22 @@
 	.host_reset =	host_reset,
 };
 
-static struct hpsb_address_ops map_ops = {
+const static struct hpsb_address_ops map_ops = {
 	.read = read_maps,
 };
 
-static struct hpsb_address_ops fcp_ops = {
+const static struct hpsb_address_ops fcp_ops = {
 	.write = write_fcp,
 };
 
-static struct hpsb_address_ops reg_ops = {
+const static struct hpsb_address_ops reg_ops = {
 	.read = read_regs,
 	.write = write_regs,
 	.lock = lock_regs,
 	.lock64 = lock64_regs,
 };
 
-static struct hpsb_address_ops config_rom_ops = {
+const static struct hpsb_address_ops config_rom_ops = {
 	.read = read_config_rom,
 };
 
@@ -217,7 +217,7 @@
 
 	host->csr.generation = 2;
 
-	bus_info[1] = __constant_cpu_to_be32(0x31333934);
+	bus_info[1] = IEEE1394_BUSID_MAGIC;
 	bus_info[2] = cpu_to_be32((hpsb_disable_irm ? 0 : 1 << CSR_IRMC_SHIFT) |
 				  (1 << CSR_CMC_SHIFT) |
 				  (1 << CSR_ISC_SHIFT) |
@@ -250,7 +250,7 @@
 {
 	quadlet_t bus_info[CSR_BUS_INFO_SIZE];
 
-	bus_info[1] = __constant_cpu_to_be32(0x31333934);
+	bus_info[1] = IEEE1394_BUSID_MAGIC;
 	bus_info[2] = cpu_to_be32((0 << CSR_IRMC_SHIFT) |
 				  (0 << CSR_CMC_SHIFT) |
 				  (0 << CSR_ISC_SHIFT) |
diff --git a/drivers/ieee1394/csr.h b/drivers/ieee1394/csr.h
index f115465..90fb3f2 100644
--- a/drivers/ieee1394/csr.h
+++ b/drivers/ieee1394/csr.h
@@ -50,11 +50,11 @@
 #define CSR_MAX_ROM_SHIFT		8
 #define CSR_GENERATION_SHIFT		4
 
-#define CSR_SET_BUS_INFO_GENERATION(csr, gen)				\
-	((csr)->bus_info_data[2] =					\
-		cpu_to_be32((be32_to_cpu((csr)->bus_info_data[2]) &	\
-			     ~(0xf << CSR_GENERATION_SHIFT)) |		\
-			    (gen) << CSR_GENERATION_SHIFT))
+static inline void csr_set_bus_info_generation(struct csr1212_csr *csr, u8 gen)
+{
+	csr->bus_info_data[2] &= ~cpu_to_be32(0xf << CSR_GENERATION_SHIFT);
+	csr->bus_info_data[2] |= cpu_to_be32((u32)gen << CSR_GENERATION_SHIFT);
+}
 
 struct csr_control {
 	spinlock_t lock;
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index 5e38a68..a6dfeb0 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -1077,15 +1077,10 @@
 	int i;
 	int ret;
 
-	/* IEEE 1212 says that the entire bus info block should be readable in
-	 * a single transaction regardless of the max_rom value.
-	 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
-	 * bus info block will be read 1 quadlet at a time.  The rest of the
-	 * ConfigROM will be read according to the max_rom field. */
 	for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
 		ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
-			sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
-			csr->private);
+				&csr->cache_head->data[bytes_to_quads(i)],
+				csr->private);
 		if (ret != CSR1212_SUCCESS)
 			return ret;
 
@@ -1104,8 +1099,8 @@
 	 * a time. */
 	for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
 		ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
-			sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
-			csr->private);
+				&csr->cache_head->data[bytes_to_quads(i)],
+				csr->private);
 		if (ret != CSR1212_SUCCESS)
 			return ret;
 	}
@@ -1289,7 +1284,7 @@
 
 		if (csr->ops->bus_read(csr,
 				       CSR1212_REGISTER_SPACE_BASE + kv->offset,
-				       sizeof(u32), &q, csr->private))
+				       &q, csr->private))
 			return -EIO;
 
 		kv->value.leaf.len = be32_to_cpu(q) >> 16;
@@ -1372,17 +1367,8 @@
 		addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
 			cr->offset_end) & ~(csr->max_rom - 1);
 
-		if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
-				       csr->private)) {
-			if (csr->max_rom == 4)
-				/* We've got problems! */
-				return -EIO;
-
-			/* Apperently the max_rom value was a lie, set it to
-			 * do quadlet reads and try again. */
-			csr->max_rom = 4;
-			continue;
-		}
+		if (csr->ops->bus_read(csr, addr, cache_ptr, csr->private))
+			return -EIO;
 
 		cr->offset_end += csr->max_rom - (cr->offset_end &
 						  (csr->max_rom - 1));
@@ -1433,7 +1419,6 @@
 
 int csr1212_parse_csr(struct csr1212_csr *csr)
 {
-	static const int mr_map[] = { 4, 64, 1024, 0 };
 	struct csr1212_dentry *dentry;
 	int ret;
 
@@ -1443,15 +1428,13 @@
 	if (ret != CSR1212_SUCCESS)
 		return ret;
 
-	if (!csr->ops->get_max_rom) {
-		csr->max_rom = mr_map[0];	/* default value */
-	} else {
-		int i = csr->ops->get_max_rom(csr->bus_info_data,
-					      csr->private);
-		if (i & ~0x3)
-			return -EINVAL;
-		csr->max_rom = mr_map[i];
-	}
+	/*
+	 * There has been a buggy firmware with bus_info_block.max_rom > 0
+	 * spotted which actually only supported quadlet read requests to the
+	 * config ROM.  Therefore read everything quadlet by quadlet regardless
+	 * of what the bus info block says.
+	 */
+	csr->max_rom = 4;
 
 	csr->cache_head->layout_head = csr->root_kv;
 	csr->cache_head->layout_tail = csr->root_kv;
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
index 043039f..a892d92 100644
--- a/drivers/ieee1394/csr1212.h
+++ b/drivers/ieee1394/csr1212.h
@@ -181,7 +181,7 @@
 struct csr1212_csr {
 	size_t bus_info_len;	/* bus info block length in bytes */
 	size_t crc_len;		/* crc length in bytes */
-	u32 *bus_info_data;	/* bus info data incl bus name and EUI */
+	__be32 *bus_info_data;	/* bus info data incl bus name and EUI */
 
 	void *private;		/* private, bus specific data */
 	struct csr1212_bus_ops *ops;
@@ -200,7 +200,7 @@
 	 * entries located in the Units Space.  Must return 0 on success
 	 * anything else indicates an error. */
 	int (*bus_read) (struct csr1212_csr *csr, u64 addr,
-			 u16 length, void *buffer, void *private);
+			 void *buffer, void *private);
 
 	/* This function is used by csr1212 to allocate a region in units space
 	 * in the event that Config ROM entries don't all fit in the predefined
@@ -211,11 +211,6 @@
 	/* This function is used by csr1212 to release a region in units space
 	 * that is no longer needed. */
 	void (*release_addr) (u64 addr, void *private);
-
-	/* This function is used by csr1212 to determine the max read request
-	 * supported by a remote node when reading the ConfigROM space.  Must
-	 * return 0, 1, or 2 per IEEE 1212.  */
-	int (*get_max_rom) (u32 *bus_info, void *private);
 };
 
 
diff --git a/drivers/ieee1394/dv1394-private.h b/drivers/ieee1394/dv1394-private.h
index 7d1d284..18b92cb 100644
--- a/drivers/ieee1394/dv1394-private.h
+++ b/drivers/ieee1394/dv1394-private.h
@@ -77,11 +77,11 @@
    See the Texas Instruments OHCI 1394 chipset documentation.
 */
 
-struct output_more_immediate { u32 q[8]; };
-struct output_more { u32 q[4]; };
-struct output_last { u32 q[4]; };
-struct input_more { u32 q[4]; };
-struct input_last { u32 q[4]; };
+struct output_more_immediate { __le32 q[8]; };
+struct output_more { __le32 q[4]; };
+struct output_last { __le32 q[4]; };
+struct input_more { __le32 q[4]; };
+struct input_last { __le32 q[4]; };
 
 /* outputs */
 
@@ -92,9 +92,9 @@
 					      unsigned int  payload_size)
 {
 	omi->q[0] = cpu_to_le32(0x02000000 | 8); /* OUTPUT_MORE_IMMEDIATE; 8 is the size of the IT header */
-	omi->q[1] = 0;
-	omi->q[2] = 0;
-	omi->q[3] = 0;
+	omi->q[1] = cpu_to_le32(0);
+	omi->q[2] = cpu_to_le32(0);
+	omi->q[3] = cpu_to_le32(0);
 
 	/* IT packet header */
 	omi->q[4] = cpu_to_le32(  (0x0 << 16)  /* IEEE1394_SPEED_100 */
@@ -106,8 +106,8 @@
 	/* reserved field; mimic behavior of my Sony DSR-40 */
 	omi->q[5] = cpu_to_le32((payload_size << 16) | (0x7F << 8) | 0xA0);
 
-	omi->q[6] = 0;
-	omi->q[7] = 0;
+	omi->q[6] = cpu_to_le32(0);
+	omi->q[7] = cpu_to_le32(0);
 }
 
 static inline void fill_output_more(struct output_more *om,
@@ -116,8 +116,8 @@
 {
 	om->q[0] = cpu_to_le32(data_size);
 	om->q[1] = cpu_to_le32(data_phys_addr);
-	om->q[2] = 0;
-	om->q[3] = 0;
+	om->q[2] = cpu_to_le32(0);
+	om->q[3] = cpu_to_le32(0);
 }
 
 static inline void fill_output_last(struct output_last *ol,
@@ -140,8 +140,8 @@
 
 	ol->q[0] = cpu_to_le32(temp);
 	ol->q[1] = cpu_to_le32(data_phys_addr);
-	ol->q[2] = 0;
-	ol->q[3] = 0;
+	ol->q[2] = cpu_to_le32(0);
+	ol->q[3] = cpu_to_le32(0);
 }
 
 /* inputs */
@@ -161,8 +161,8 @@
 
 	im->q[0] = cpu_to_le32(temp);
 	im->q[1] = cpu_to_le32(data_phys_addr);
-	im->q[2] = 0; /* branchAddress and Z not use in packet-per-buffer mode */
-	im->q[3] = 0; /* xferStatus & resCount, resCount must be initialize to data_size */
+	im->q[2] = cpu_to_le32(0); /* branchAddress and Z not use in packet-per-buffer mode */
+	im->q[3] = cpu_to_le32(0); /* xferStatus & resCount, resCount must be initialize to data_size */
 }
  
 static inline void fill_input_last(struct input_last *il,
@@ -331,7 +331,7 @@
 
 	/* points to status/timestamp field of first DMA packet */
 	/* (we'll check it later to monitor timestamp accuracy) */
-	u32 *frame_begin_timestamp;
+	__le32 *frame_begin_timestamp;
 
 	/* the timestamp we assigned to the first packet in the frame */
 	u32 assigned_timestamp;
@@ -348,15 +348,15 @@
 	   that can cause interrupts. We'll check these from the
 	   interrupt handler.
 	*/
-	u32 *mid_frame_timestamp;
-	u32 *frame_end_timestamp;
+	__le32 *mid_frame_timestamp;
+	__le32 *frame_end_timestamp;
 
 	/* branch address field of final packet. This is effectively
 	   the "tail" in the chain of DMA descriptor blocks.
 	   We will fill it with the address of the first DMA descriptor
 	   block in the subsequent frame, once it is ready.
 	*/
-	u32 *frame_end_branch;
+	__le32 *frame_end_branch;
 
 	/* the number of descriptors in the first descriptor block
 	   of the frame. Needed to start DMA */
@@ -365,10 +365,10 @@
 
 
 struct packet {
-	u16	timestamp;
+	__le16	timestamp;
 	u16	invalid;
 	u16	iso_header;
-	u16	data_length;
+	__le16	data_length;
 	u32	cip_h1;
 	u32	cip_h2;
 	unsigned char data[480];
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index c19f232..a329e6b 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -265,7 +265,7 @@
 	/* these flags denote packets that need special attention */
 	int empty_packet, first_packet, last_packet, mid_packet;
 
-	u32 *branch_address, *last_branch_address = NULL;
+	__le32 *branch_address, *last_branch_address = NULL;
 	unsigned long data_p;
 	int first_packet_empty = 0;
 	u32 cycleTimer, ct_sec, ct_cyc, ct_off;
@@ -848,7 +848,7 @@
 	dma_addr_t block_dma = 0;
 	struct packet *data = NULL;
 	dma_addr_t data_dma = 0;
-	u32 *last_branch_address = NULL;
+	__le32 *last_branch_address = NULL;
 	unsigned long irq_flags;
 	int want_interrupt = 0;
 	struct frame *f = NULL;
@@ -2110,17 +2110,17 @@
 			f = video->frames[next_i / MAX_PACKETS];
 			next = &(f->descriptor_pool[next_i % MAX_PACKETS]);
 			next_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
-			next->u.in.il.q[0] |= 3 << 20; /* enable interrupt */
-			next->u.in.il.q[2] = 0; /* disable branch */
+			next->u.in.il.q[0] |= cpu_to_le32(3 << 20); /* enable interrupt */
+			next->u.in.il.q[2] = cpu_to_le32(0); /* disable branch */
 
 			/* link previous to next */
 			prev_i = (next_i == 0) ? (MAX_PACKETS * video->n_frames - 1) : (next_i - 1);
 			f = video->frames[prev_i / MAX_PACKETS];
 			prev = &(f->descriptor_pool[prev_i % MAX_PACKETS]);
 			if (prev_i % (MAX_PACKETS/2)) {
-				prev->u.in.il.q[0] &= ~(3 << 20); /* no interrupt */
+				prev->u.in.il.q[0] &= ~cpu_to_le32(3 << 20); /* no interrupt */
 			} else {
-				prev->u.in.il.q[0] |= 3 << 20; /* enable interrupt */
+				prev->u.in.il.q[0] |= cpu_to_le32(3 << 20); /* enable interrupt */
 			}
 			prev->u.in.il.q[2] = cpu_to_le32(next_dma | 1); /* set Z=1 */
 			wmb();
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 2012869..1a919df 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -92,7 +92,7 @@
 	struct list_head list;
 	u16 dgl;
 	u16 dg_size;
-	u16 ether_type;
+	__be16 ether_type;
 	struct sk_buff *skb;
 	char *pbuf;
 	struct list_head frag_info;
@@ -181,7 +181,7 @@
 static void ether1394_host_reset(struct hpsb_host *host);
 
 /* Function for incoming 1394 packets */
-static struct hpsb_address_ops addr_ops = {
+const static struct hpsb_address_ops addr_ops = {
 	.write =	ether1394_write,
 };
 
@@ -245,12 +245,6 @@
 	return 0;
 }
 
-/* Return statistics to the caller */
-static struct net_device_stats *ether1394_stats(struct net_device *dev)
-{
-	return &(((struct eth1394_priv *)netdev_priv(dev))->stats);
-}
-
 /* FIXME: What to do if we timeout? I think a host reset is probably in order,
  * so that's what we do. Should we increment the stat counters too?  */
 static void ether1394_tx_timeout(struct net_device *dev)
@@ -516,16 +510,19 @@
 	.parse		= ether1394_header_parse,
 };
 
+static const struct net_device_ops ether1394_netdev_ops = {
+	.ndo_open	= ether1394_open,
+	.ndo_stop	= ether1394_stop,
+	.ndo_start_xmit	= ether1394_tx,
+	.ndo_tx_timeout	= ether1394_tx_timeout,
+	.ndo_change_mtu	= ether1394_change_mtu,
+};
+
 static void ether1394_init_dev(struct net_device *dev)
 {
-	dev->open		= ether1394_open;
-	dev->stop		= ether1394_stop;
-	dev->hard_start_xmit	= ether1394_tx;
-	dev->get_stats		= ether1394_stats;
-	dev->tx_timeout		= ether1394_tx_timeout;
-	dev->change_mtu		= ether1394_change_mtu;
 
 	dev->header_ops		= &ether1394_header_ops;
+	dev->netdev_ops		= &ether1394_netdev_ops;
 
 	SET_ETHTOOL_OPS(dev, &ethtool_ops);
 
@@ -767,7 +764,7 @@
 static int ether1394_header_cache(const struct neighbour *neigh,
 				  struct hh_cache *hh)
 {
-	unsigned short type = hh->hh_type;
+	__be16 type = hh->hh_type;
 	struct net_device *dev = neigh->dev;
 	struct eth1394hdr *eth =
 		(struct eth1394hdr *)((u8 *)hh->hh_data + 16 - ETH1394_HLEN);
@@ -795,7 +792,7 @@
  ******************************************/
 
 /* Copied from net/ethernet/eth.c */
-static u16 ether1394_type_trans(struct sk_buff *skb, struct net_device *dev)
+static __be16 ether1394_type_trans(struct sk_buff *skb, struct net_device *dev)
 {
 	struct eth1394hdr *eth;
 	unsigned char *rawp;
@@ -829,17 +826,17 @@
 
 /* Parse an encapsulated IP1394 header into an ethernet frame packet.
  * We also perform ARP translation here, if need be.  */
-static u16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
+static __be16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
 				 nodeid_t srcid, nodeid_t destid,
-				 u16 ether_type)
+				 __be16 ether_type)
 {
 	struct eth1394_priv *priv = netdev_priv(dev);
-	u64 dest_hw;
-	unsigned short ret = 0;
+	__be64 dest_hw;
+	__be16 ret = 0;
 
 	/* Setup our hw addresses. We use these to build the ethernet header. */
 	if (destid == (LOCAL_BUS | ALL_NODES))
-		dest_hw = ~0ULL;  /* broadcast */
+		dest_hw = ~cpu_to_be64(0);  /* broadcast */
 	else
 		dest_hw = cpu_to_be64((u64)priv->host->csr.guid_hi << 32 |
 				      priv->host->csr.guid_lo);
@@ -873,7 +870,7 @@
 		node = eth1394_find_node_guid(&priv->ip_node_list,
 					      be64_to_cpu(guid));
 		if (!node)
-			return 0;
+			return cpu_to_be16(0);
 
 		node_info =
 		    (struct eth1394_node_info *)node->ud->device.driver_data;
@@ -1063,7 +1060,7 @@
 	unsigned long flags;
 	struct eth1394_priv *priv = netdev_priv(dev);
 	union eth1394_hdr *hdr = (union eth1394_hdr *)buf;
-	u16 ether_type = 0;  /* initialized to clear warning */
+	__be16 ether_type = cpu_to_be16(0);  /* initialized to clear warning */
 	int hdr_len;
 	struct unit_directory *ud = priv->ud_list[NODEID_TO_NODE(srcid)];
 	struct eth1394_node_info *node_info;
@@ -1075,7 +1072,7 @@
 			HPSB_PRINT(KERN_ERR, "ether1394 rx: sender nodeid "
 				   "lookup failure: " NODE_BUS_FMT,
 				   NODE_BUS_ARGS(priv->host, srcid));
-			priv->stats.rx_dropped++;
+			dev->stats.rx_dropped++;
 			return -1;
 		}
 		ud = node->ud;
@@ -1098,7 +1095,7 @@
 		skb = dev_alloc_skb(len + dev->hard_header_len + 15);
 		if (unlikely(!skb)) {
 			ETH1394_PRINT_G(KERN_ERR, "Out of memory\n");
-			priv->stats.rx_dropped++;
+			dev->stats.rx_dropped++;
 			return -1;
 		}
 		skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
@@ -1217,15 +1214,15 @@
 	spin_lock_irqsave(&priv->lock, flags);
 
 	if (!skb->protocol) {
-		priv->stats.rx_errors++;
-		priv->stats.rx_dropped++;
+		dev->stats.rx_errors++;
+		dev->stats.rx_dropped++;
 		dev_kfree_skb_any(skb);
 	} else if (netif_rx(skb) == NET_RX_DROP) {
-		priv->stats.rx_errors++;
-		priv->stats.rx_dropped++;
+		dev->stats.rx_errors++;
+		dev->stats.rx_dropped++;
 	} else {
-		priv->stats.rx_packets++;
-		priv->stats.rx_bytes += skb->len;
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += skb->len;
 	}
 
 	spin_unlock_irqrestore(&priv->lock, flags);
@@ -1234,8 +1231,6 @@
 	if (netif_queue_stopped(dev))
 		netif_wake_queue(dev);
 
-	dev->last_rx = jiffies;
-
 	return 0;
 }
 
@@ -1259,7 +1254,7 @@
 
 static void ether1394_iso(struct hpsb_iso *iso)
 {
-	quadlet_t *data;
+	__be32 *data;
 	char *buf;
 	struct eth1394_host_info *hi;
 	struct net_device *dev;
@@ -1283,7 +1278,7 @@
 	for (i = 0; i < nready; i++) {
 		struct hpsb_iso_packet_info *info =
 			&iso->infos[(iso->first_packet + i) % iso->buf_packets];
-		data = (quadlet_t *)(iso->data_buf.kvirt + info->offset);
+		data = (__be32 *)(iso->data_buf.kvirt + info->offset);
 
 		/* skip over GASP header */
 		buf = (char *)data + 8;
@@ -1509,17 +1504,18 @@
 static void ether1394_dg_complete(struct packet_task *ptask, int fail)
 {
 	struct sk_buff *skb = ptask->skb;
-	struct eth1394_priv *priv = netdev_priv(skb->dev);
+	struct net_device *dev = skb->dev;
+	struct eth1394_priv *priv = netdev_priv(dev);
 	unsigned long flags;
 
 	/* Statistics */
 	spin_lock_irqsave(&priv->lock, flags);
 	if (fail) {
-		priv->stats.tx_dropped++;
-		priv->stats.tx_errors++;
+		dev->stats.tx_dropped++;
+		dev->stats.tx_errors++;
 	} else {
-		priv->stats.tx_bytes += skb->len;
-		priv->stats.tx_packets++;
+		dev->stats.tx_bytes += skb->len;
+		dev->stats.tx_packets++;
 	}
 	spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -1614,7 +1610,7 @@
 		if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
 			priv->bc_dgl++;
 	} else {
-		__be64 guid = get_unaligned((u64 *)hdr_buf.h_dest);
+		__be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
 
 		node = eth1394_find_node_guid(&priv->ip_node_list,
 					      be64_to_cpu(guid));
@@ -1696,8 +1692,8 @@
 		dev_kfree_skb(skb);
 
 	spin_lock_irqsave(&priv->lock, flags);
-	priv->stats.tx_dropped++;
-	priv->stats.tx_errors++;
+	dev->stats.tx_dropped++;
+	dev->stats.tx_errors++;
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	/*
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
index 4f3e2dd..d53bac4 100644
--- a/drivers/ieee1394/eth1394.h
+++ b/drivers/ieee1394/eth1394.h
@@ -54,7 +54,6 @@
 
 /* Private structure for our ethernet driver */
 struct eth1394_priv {
-	struct net_device_stats stats;	/* Device stats			 */
 	struct hpsb_host *host;		/* The card for this dev	 */
 	u16 bc_maxpayload;		/* Max broadcast payload	 */
 	u8 bc_sspd;			/* Max broadcast speed		 */
@@ -82,7 +81,7 @@
 
 struct eth1394hdr {
 	unsigned char	h_dest[ETH1394_ALEN];	/* destination eth1394 addr	*/
-	unsigned short	h_proto;		/* packet type ID field	*/
+	__be16		h_proto;		/* packet type ID field	*/
 }  __attribute__((packed));
 
 static inline struct eth1394hdr *eth1394_hdr(const struct sk_buff *skb)
@@ -99,13 +98,13 @@
 struct eth1394_uf_hdr {
 	u16 lf:2;
 	u16 res:14;
-	u16 ether_type;		/* Ethernet packet type */
+	__be16 ether_type;		/* Ethernet packet type */
 } __attribute__((packed));
 #elif defined __LITTLE_ENDIAN_BITFIELD
 struct eth1394_uf_hdr {
 	u16 res:14;
 	u16 lf:2;
-	u16 ether_type;
+	__be16 ether_type;
 } __attribute__((packed));
 #else
 #error Unknown bit field type
@@ -117,7 +116,7 @@
 	u16 lf:2;
 	u16 res1:2;
 	u16 dg_size:12;		/* Datagram size */
-	u16 ether_type;		/* Ethernet packet type */
+	__be16 ether_type;		/* Ethernet packet type */
 	u16 dgl;		/* Datagram label */
 	u16 res2;
 } __attribute__((packed));
@@ -126,7 +125,7 @@
 	u16 dg_size:12;
 	u16 res1:2;
 	u16 lf:2;
-	u16 ether_type;
+	__be16 ether_type;
 	u16 dgl;
 	u16 res2;
 } __attribute__((packed));
@@ -207,11 +206,11 @@
 	u16 opcode;		/* ARP Opcode	*/
 	/* Above is exactly the same format as struct arphdr */
 
-	u64 s_uniq_id;		/* Sender's 64bit EUI			*/
+	__be64 s_uniq_id;	/* Sender's 64bit EUI		*/
 	u8 max_rec;		/* Sender's max packet size		*/
 	u8 sspd;		/* Sender's max speed			*/
-	u16 fifo_hi;		/* hi 16bits of sender's FIFO addr	*/
-	u32 fifo_lo;		/* lo 32bits of sender's FIFO addr	*/
+	__be16 fifo_hi;		/* hi 16bits of sender's FIFO addr	*/
+	__be32 fifo_lo;		/* lo 32bits of sender's FIFO addr	*/
 	u32 sip;		/* Sender's IP Address			*/
 	u32 tip;		/* IP Address of requested hw addr	*/
 };
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 272543a..600e391 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -320,7 +320,7 @@
  */
 u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
 					 struct hpsb_host *host,
-					 struct hpsb_address_ops *ops,
+					 const struct hpsb_address_ops *ops,
 					 u64 size, u64 alignment,
 					 u64 start, u64 end)
 {
@@ -407,7 +407,8 @@
  * are automatically deallocated together with the hpsb_highlevel @hl.
  */
 int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
-			    struct hpsb_address_ops *ops, u64 start, u64 end)
+			    const struct hpsb_address_ops *ops,
+			    u64 start, u64 end)
 {
 	struct hpsb_address_serve *as;
 	struct list_head *lh;
@@ -420,7 +421,7 @@
 		return 0;
 	}
 
-	as = kmalloc(sizeof(*as), GFP_ATOMIC);
+	as = kmalloc(sizeof(*as), GFP_KERNEL);
 	if (!as)
 		return 0;
 
@@ -477,7 +478,7 @@
 	return retval;
 }
 
-static struct hpsb_address_ops dummy_ops;
+const static struct hpsb_address_ops dummy_ops;
 
 /* dummy address spaces as lower and upper bounds of the host's a.s. list */
 static void init_hpsb_highlevel(struct hpsb_host *host)
diff --git a/drivers/ieee1394/highlevel.h b/drivers/ieee1394/highlevel.h
index bc5d085..9dba89f 100644
--- a/drivers/ieee1394/highlevel.h
+++ b/drivers/ieee1394/highlevel.h
@@ -15,7 +15,7 @@
 struct hpsb_address_serve {
 	struct list_head host_list;	/* per host list */
 	struct list_head hl_list;	/* hpsb_highlevel list */
-	struct hpsb_address_ops *op;
+	const struct hpsb_address_ops *op;
 	struct hpsb_host *host;
 	u64 start;	/* first address handled, quadlet aligned */
 	u64 end;	/* first address behind, quadlet aligned */
@@ -119,11 +119,12 @@
 
 u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
 					 struct hpsb_host *host,
-					 struct hpsb_address_ops *ops,
+					 const struct hpsb_address_ops *ops,
 					 u64 size, u64 alignment,
 					 u64 start, u64 end);
 int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
-			    struct hpsb_address_ops *ops, u64 start, u64 end);
+			    const struct hpsb_address_ops *ops,
+			    u64 start, u64 end);
 int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
 			      u64 start);
 
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index 237d0c9..e947d8f 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -34,18 +34,18 @@
 {
 	struct hpsb_host *host =
 		container_of(work, struct hpsb_host, delayed_reset.work);
-	int generation = host->csr.generation + 1;
+	u8 generation = host->csr.generation + 1;
 
 	/* The generation field rolls over to 2 rather than 0 per IEEE
 	 * 1394a-2000. */
 	if (generation > 0xf || generation < 2)
 		generation = 2;
 
-	CSR_SET_BUS_INFO_GENERATION(host->csr.rom, generation);
+	csr_set_bus_info_generation(host->csr.rom, generation);
 	if (csr1212_generate_csr_image(host->csr.rom) != CSR1212_SUCCESS) {
 		/* CSR image creation failed.
 		 * Reset generation field and do not issue a bus reset. */
-		CSR_SET_BUS_INFO_GENERATION(host->csr.rom,
+		csr_set_bus_info_generation(host->csr.rom,
 					    host->csr.generation);
 		return;
 	}
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index dd22995..49c3590 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -154,7 +154,7 @@
 	 * to set the hardware ConfigROM if the hardware supports handling
 	 * reads to the ConfigROM on its own. */
 	void (*set_hw_config_rom)(struct hpsb_host *host,
-				  quadlet_t *config_rom);
+				  __be32 *config_rom);
 
 	/* This function shall implement packet transmission based on
 	 * packet->type.  It shall CRC both parts of the packet (unless
diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h
index 4049207..e0ae0d3 100644
--- a/drivers/ieee1394/ieee1394.h
+++ b/drivers/ieee1394/ieee1394.h
@@ -121,6 +121,9 @@
 
 #include <asm/byteorder.h>
 
+/* '1' '3' '9' '4' in ASCII */
+#define IEEE1394_BUSID_MAGIC	cpu_to_be32(0x31333934)
+
 #ifdef __BIG_ENDIAN_BITFIELD
 
 struct selfid {
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 79ef5fd..906c5a9 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -67,7 +67,7 @@
 	for (i = IEEE1394_SPEED_100; i <= old_speed; i++) {
 		*speed = i;
 		error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
-				  &q, sizeof(quadlet_t));
+				  &q, 4);
 		if (error)
 			break;
 		*buffer = q;
@@ -85,7 +85,7 @@
 	return error;
 }
 
-static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
+static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr,
 			    void *buffer, void *__ci)
 {
 	struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
@@ -93,7 +93,7 @@
 
 	for (i = 1; ; i++) {
 		error = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
-				  buffer, length);
+				  buffer, 4);
 		if (!error) {
 			ci->speed_unverified = 0;
 			break;
@@ -104,7 +104,7 @@
 
 		/* The ieee1394_core guessed the node's speed capability from
 		 * the self ID.  Check whether a lower speed works. */
-		if (ci->speed_unverified && length == sizeof(quadlet_t)) {
+		if (ci->speed_unverified) {
 			error = nodemgr_check_speed(ci, addr, buffer);
 			if (!error)
 				break;
@@ -115,20 +115,8 @@
 	return error;
 }
 
-#define OUI_FREECOM_TECHNOLOGIES_GMBH 0x0001db
-
-static int nodemgr_get_max_rom(quadlet_t *bus_info_data, void *__ci)
-{
-	/* Freecom FireWire Hard Drive firmware bug */
-	if (be32_to_cpu(bus_info_data[3]) >> 8 == OUI_FREECOM_TECHNOLOGIES_GMBH)
-		return 0;
-
-	return (be32_to_cpu(bus_info_data[2]) >> 8) & 0x3;
-}
-
 static struct csr1212_bus_ops nodemgr_csr_ops = {
 	.bus_read =	nodemgr_bus_read,
-	.get_max_rom =	nodemgr_get_max_rom
 };
 
 
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index 4f287a3..15ea097 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -31,9 +31,6 @@
 struct hpsb_host;
 struct ieee1394_device_id;
 
-/* '1' '3' '9' '4' in ASCII */
-#define IEEE1394_BUSID_MAGIC	__constant_cpu_to_be32(0x31333934)
-
 /* This is the start of a Node entry structure. It should be a stable API
  * for which to gather info from the Node Manager about devices attached
  * to the bus.  */
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index e509e13..65c1429 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -2973,7 +2973,7 @@
 	return 0;
 }
 
-static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
+static void ohci_set_hw_config_rom(struct hpsb_host *host, __be32 *config_rom)
 {
 	struct ti_ohci *ohci = host->hostdata;
 
@@ -3199,15 +3199,16 @@
 	/* Now enable LPS, which we need in order to start accessing
 	 * most of the registers.  In fact, on some cards (ALI M5251),
 	 * accessing registers in the SClk domain without LPS enabled
-	 * will lock up the machine.  Wait 50msec to make sure we have
-	 * full link enabled.  */
+	 * will lock up the machine. */
 	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
 
 	/* Disable and clear interrupts */
 	reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
 	reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
 
-	mdelay(50);
+	/* Flush MMIO writes and wait to make sure we have full link enabled. */
+	reg_read(ohci, OHCI1394_Version);
+	msleep(50);
 
 	/* Determine the number of available IR and IT contexts. */
 	ohci->nb_iso_rcv_ctx =
@@ -3233,8 +3234,9 @@
 	 * we need to get to that "no event", so enough should be initialized
 	 * by that point.
 	 */
-	if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
-			 OHCI1394_DRIVER_NAME, ohci)) {
+	err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
+			  OHCI1394_DRIVER_NAME, ohci);
+	if (err) {
 		PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
 		goto err;
 	}
@@ -3381,6 +3383,7 @@
 	ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
 	ohci_soft_reset(ohci);
 
+	free_irq(dev->irq, ohci);
 	err = pci_save_state(dev);
 	if (err) {
 		PRINT(KERN_ERR, "pci_save_state failed with %d", err);
@@ -3420,7 +3423,16 @@
 	reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
 	reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
 	reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
-	mdelay(50);
+	reg_read(ohci, OHCI1394_Version);
+	msleep(50);
+
+	err = request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
+			  OHCI1394_DRIVER_NAME, ohci);
+	if (err) {
+		PRINT_G(KERN_ERR, "Failed to allocate interrupt %d", dev->irq);
+		return err;
+	}
+
 	ohci_initialize(ohci);
 
 	hpsb_resume_host(ohci->host);
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 7aee1ac..dc15cad 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1463,7 +1463,7 @@
 
                                 /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
 				if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
-				    (lynx->bus_info_block[1] == __constant_cpu_to_be32(0x31333934)))
+				    (lynx->bus_info_block[1] == IEEE1394_BUSID_MAGIC))
                                 {
                                         PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
                                 } else {
diff --git a/drivers/ieee1394/pcilynx.h b/drivers/ieee1394/pcilynx.h
index ec27321..693a169 100644
--- a/drivers/ieee1394/pcilynx.h
+++ b/drivers/ieee1394/pcilynx.h
@@ -52,7 +52,7 @@
         void __iomem *local_rom;
         void __iomem *local_ram;
         void __iomem *aux_port;
-	quadlet_t bus_info_block[5];
+	__be32 bus_info_block[5];
 
         /*
          * use local RAM of LOCALRAM_SIZE bytes for PCLs, which allows for
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index bf7e761..bad66c6 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -90,7 +90,7 @@
 static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
 		      u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
 		      u16 flags);
-static struct hpsb_address_ops arm_ops = {
+const static struct hpsb_address_ops arm_ops = {
 	.read = arm_read,
 	.write = arm_write,
 	.lock = arm_lock,
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index a373c18..ab1034c 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -265,7 +265,7 @@
 	.host_reset	= sbp2_host_reset,
 };
 
-static struct hpsb_address_ops sbp2_ops = {
+const static struct hpsb_address_ops sbp2_ops = {
 	.write		= sbp2_handle_status_write
 };
 
@@ -275,7 +275,7 @@
 static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
 				    size_t, u16);
 
-static struct hpsb_address_ops sbp2_physdma_ops = {
+const static struct hpsb_address_ops sbp2_physdma_ops = {
 	.read		= sbp2_handle_physdma_read,
 	.write		= sbp2_handle_physdma_write,
 };
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 2f4c28a..97e4b23 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -196,7 +196,7 @@
 
 	if (h_ret != H_SUCCESS) {
 		ehca_err(device, "hipz_h_alloc_resource_cq() failed "
-			 "h_ret=%li device=%p", h_ret, device);
+			 "h_ret=%lli device=%p", h_ret, device);
 		cq = ERR_PTR(ehca2ib_return_code(h_ret));
 		goto create_cq_exit2;
 	}
@@ -232,7 +232,7 @@
 
 		if (h_ret < H_SUCCESS) {
 			ehca_err(device, "hipz_h_register_rpage_cq() failed "
-				 "ehca_cq=%p cq_num=%x h_ret=%li counter=%i "
+				 "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
 				 "act_pages=%i", my_cq, my_cq->cq_number,
 				 h_ret, counter, param.act_pages);
 			cq = ERR_PTR(-EINVAL);
@@ -244,7 +244,7 @@
 			if ((h_ret != H_SUCCESS) || vpage) {
 				ehca_err(device, "Registration of pages not "
 					 "complete ehca_cq=%p cq_num=%x "
-					 "h_ret=%li", my_cq, my_cq->cq_number,
+					 "h_ret=%lli", my_cq, my_cq->cq_number,
 					 h_ret);
 				cq = ERR_PTR(-EAGAIN);
 				goto create_cq_exit4;
@@ -252,7 +252,7 @@
 		} else {
 			if (h_ret != H_PAGE_REGISTERED) {
 				ehca_err(device, "Registration of page failed "
-					 "ehca_cq=%p cq_num=%x h_ret=%li "
+					 "ehca_cq=%p cq_num=%x h_ret=%lli "
 					 "counter=%i act_pages=%i",
 					 my_cq, my_cq->cq_number,
 					 h_ret, counter, param.act_pages);
@@ -266,7 +266,7 @@
 
 	gal = my_cq->galpas.kernel;
 	cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
-	ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
+	ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
 		 my_cq, my_cq->cq_number, cqx_fec);
 
 	my_cq->ib_cq.cqe = my_cq->nr_of_entries =
@@ -307,7 +307,7 @@
 	h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
 	if (h_ret != H_SUCCESS)
 		ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
-			 "cq_num=%x h_ret=%li", my_cq, my_cq->cq_number, h_ret);
+			 "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
 
 create_cq_exit2:
 	write_lock_irqsave(&ehca_cq_idr_lock, flags);
@@ -355,7 +355,7 @@
 	h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
 	if (h_ret == H_R_STATE) {
 		/* cq in err: read err data and destroy it forcibly */
-		ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err "
+		ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
 			 "state. Try to delete it forcibly.",
 			 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
 		ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
@@ -365,7 +365,7 @@
 				 cq_num);
 	}
 	if (h_ret != H_SUCCESS) {
-		ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%li "
+		ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
 			 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
 		return ehca2ib_return_code(h_ret);
 	}
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 4628822..9209c53 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -393,7 +393,7 @@
 	hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
 				  cap, props->init_type, port_modify_mask);
 	if (hret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "Modify port failed  h_ret=%li",
+		ehca_err(&shca->ib_device, "Modify port failed  h_ret=%lli",
 			 hret);
 		ret = -EINVAL;
 	}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 3128a50..99bcbd7 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -99,7 +99,7 @@
 			return;
 
 		ehca_err(&shca->ib_device,
-			 "QP 0x%x (resource=%lx) has errors.",
+			 "QP 0x%x (resource=%llx) has errors.",
 			 qp->ib_qp.qp_num, resource);
 		break;
 	}
@@ -108,21 +108,21 @@
 		struct ehca_cq *cq = (struct ehca_cq *)data;
 
 		ehca_err(&shca->ib_device,
-			 "CQ 0x%x (resource=%lx) has errors.",
+			 "CQ 0x%x (resource=%llx) has errors.",
 			 cq->cq_number, resource);
 		break;
 	}
 	default:
 		ehca_err(&shca->ib_device,
-			 "Unknown error type: %lx on %s.",
+			 "Unknown error type: %llx on %s.",
 			 type, shca->ib_device.name);
 		break;
 	}
 
-	ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
+	ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
 	ehca_err(&shca->ib_device, "EHCA ----- error data begin "
 		 "---------------------------------------------------");
-	ehca_dmp(rblock, length, "resource=%lx", resource);
+	ehca_dmp(rblock, length, "resource=%llx", resource);
 	ehca_err(&shca->ib_device, "EHCA ----- error data end "
 		 "----------------------------------------------------");
 
@@ -152,7 +152,7 @@
 
 	if (ret == H_R_STATE)
 		ehca_err(&shca->ib_device,
-			 "No error data is available: %lx.", resource);
+			 "No error data is available: %llx.", resource);
 	else if (ret == H_SUCCESS) {
 		int length;
 
@@ -164,7 +164,7 @@
 		print_error_data(shca, data, rblock, length);
 	} else
 		ehca_err(&shca->ib_device,
-			 "Error data could not be fetched: %lx", resource);
+			 "Error data could not be fetched: %llx", resource);
 
 	ehca_free_fw_ctrlblock(rblock);
 
@@ -514,7 +514,7 @@
 	struct ehca_cq *cq;
 
 	eqe_value = eqe->entry;
-	ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
+	ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
 	if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
 		ehca_dbg(&shca->ib_device, "Got completion event");
 		token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
@@ -603,7 +603,7 @@
 		ret = hipz_h_eoi(eq->ist);
 		if (ret != H_SUCCESS)
 			ehca_err(&shca->ib_device,
-				 "bad return code EOI -rc = %ld\n", ret);
+				 "bad return code EOI -rc = %lld\n", ret);
 		ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
 	}
 	if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 3b77b67..368311c 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -304,7 +304,7 @@
 
 	h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
 	if (h_ret != H_SUCCESS) {
-		ehca_gen_err("Cannot query device properties. h_ret=%li",
+		ehca_gen_err("Cannot query device properties. h_ret=%lli",
 			     h_ret);
 		ret = -EPERM;
 		goto sense_attributes1;
@@ -391,7 +391,7 @@
 	port = (struct hipz_query_port *)rblock;
 	h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
 	if (h_ret != H_SUCCESS) {
-		ehca_gen_err("Cannot query port properties. h_ret=%li",
+		ehca_gen_err("Cannot query port properties. h_ret=%lli",
 			     h_ret);
 		ret = -EPERM;
 		goto sense_attributes1;
@@ -682,7 +682,7 @@
 {
 	struct ehca_shca *shca = dev->driver_data;
 
-	return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle);
+	return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
 
 }
 static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
@@ -955,7 +955,7 @@
 			struct ehca_eq *eq = &shca->eq;
 			int max = 3;
 			volatile u64 q_ofs, q_ofs2;
-			u64 flags;
+			unsigned long flags;
 			spin_lock_irqsave(&eq->spinlock, flags);
 			q_ofs = eq->ipz_queue.current_q_offset;
 			spin_unlock_irqrestore(&eq->spinlock, flags);
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c
index e3ef026..120aedf 100644
--- a/drivers/infiniband/hw/ehca/ehca_mcast.c
+++ b/drivers/infiniband/hw/ehca/ehca_mcast.c
@@ -88,7 +88,7 @@
 	if (h_ret != H_SUCCESS)
 		ehca_err(ibqp->device,
 			 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
-			 "h_ret=%li", my_qp, ibqp->qp_num, h_ret);
+			 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
 
 	return ehca2ib_return_code(h_ret);
 }
@@ -125,7 +125,7 @@
 	if (h_ret != H_SUCCESS)
 		ehca_err(ibqp->device,
 			 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
-			 "h_ret=%li", my_qp, ibqp->qp_num, h_ret);
+			 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
 
 	return ehca2ib_return_code(h_ret);
 }
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index f974367..72f83f7 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -204,7 +204,7 @@
 	}
 	if ((size == 0) ||
 	    (((u64)iova_start + size) < (u64)iova_start)) {
-		ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
+		ehca_err(pd->device, "bad input values: size=%llx iova_start=%p",
 			 size, iova_start);
 		ib_mr = ERR_PTR(-EINVAL);
 		goto reg_phys_mr_exit0;
@@ -309,8 +309,8 @@
 	}
 
 	if (length == 0 || virt + length < virt) {
-		ehca_err(pd->device, "bad input values: length=%lx "
-			 "virt_base=%lx", length, virt);
+		ehca_err(pd->device, "bad input values: length=%llx "
+			 "virt_base=%llx", length, virt);
 		ib_mr = ERR_PTR(-EINVAL);
 		goto reg_user_mr_exit0;
 	}
@@ -373,7 +373,7 @@
 			  &e_mr->ib.ib_mr.rkey);
 	if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
 		ehca_warn(pd->device, "failed to register mr "
-			  "with hwpage_size=%lx", hwpage_size);
+			  "with hwpage_size=%llx", hwpage_size);
 		ehca_info(pd->device, "try to register mr with "
 			  "kpage_size=%lx", PAGE_SIZE);
 		/*
@@ -509,7 +509,7 @@
 			goto rereg_phys_mr_exit1;
 		if ((new_size == 0) ||
 		    (((u64)iova_start + new_size) < (u64)iova_start)) {
-			ehca_err(mr->device, "bad input values: new_size=%lx "
+			ehca_err(mr->device, "bad input values: new_size=%llx "
 				 "iova_start=%p", new_size, iova_start);
 			ret = -EINVAL;
 			goto rereg_phys_mr_exit1;
@@ -580,8 +580,8 @@
 
 	h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
 	if (h_ret != H_SUCCESS) {
-		ehca_err(mr->device, "hipz_mr_query failed, h_ret=%li mr=%p "
-			 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
+		ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p "
+			 "hca_hndl=%llx mr_hndl=%llx lkey=%x",
 			 h_ret, mr, shca->ipz_hca_handle.handle,
 			 e_mr->ipz_mr_handle.handle, mr->lkey);
 		ret = ehca2ib_return_code(h_ret);
@@ -630,8 +630,8 @@
 	/* TODO: BUSY: MR still has bound window(s) */
 	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
 	if (h_ret != H_SUCCESS) {
-		ehca_err(mr->device, "hipz_free_mr failed, h_ret=%li shca=%p "
-			 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
+		ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
+			 "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
 			 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
 			 e_mr->ipz_mr_handle.handle, mr->lkey);
 		ret = ehca2ib_return_code(h_ret);
@@ -671,8 +671,8 @@
 	h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
 					 e_pd->fw_pd, &hipzout);
 	if (h_ret != H_SUCCESS) {
-		ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%li "
-			 "shca=%p hca_hndl=%lx mw=%p",
+		ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
+			 "shca=%p hca_hndl=%llx mw=%p",
 			 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
 		ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
 		goto alloc_mw_exit1;
@@ -713,8 +713,8 @@
 
 	h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
 	if (h_ret != H_SUCCESS) {
-		ehca_err(mw->device, "hipz_free_mw failed, h_ret=%li shca=%p "
-			 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
+		ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
+			 "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
 			 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
 			 e_mw->ipz_mw_handle.handle);
 		return ehca2ib_return_code(h_ret);
@@ -840,7 +840,7 @@
 		goto map_phys_fmr_exit0;
 	if (iova % e_fmr->fmr_page_size) {
 		/* only whole-numbered pages */
-		ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
+		ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
 			 iova, e_fmr->fmr_page_size);
 		ret = -EINVAL;
 		goto map_phys_fmr_exit0;
@@ -878,7 +878,7 @@
 map_phys_fmr_exit0:
 	if (ret)
 		ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
-			 "iova=%lx", ret, fmr, page_list, list_len, iova);
+			 "iova=%llx", ret, fmr, page_list, list_len, iova);
 	return ret;
 } /* end ehca_map_phys_fmr() */
 
@@ -964,8 +964,8 @@
 
 	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
 	if (h_ret != H_SUCCESS) {
-		ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%li e_fmr=%p "
-			 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
+		ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
+			 "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
 			 h_ret, e_fmr, shca->ipz_hca_handle.handle,
 			 e_fmr->ipz_mr_handle.handle, fmr->lkey);
 		ret = ehca2ib_return_code(h_ret);
@@ -1007,8 +1007,8 @@
 					 (u64)iova_start, size, hipz_acl,
 					 e_pd->fw_pd, &hipzout);
 	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%li "
-			 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
+		ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
+			 "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
 		ret = ehca2ib_return_code(h_ret);
 		goto ehca_reg_mr_exit0;
 	}
@@ -1033,9 +1033,9 @@
 ehca_reg_mr_exit1:
 	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
 	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "h_ret=%li shca=%p e_mr=%p "
-			 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
-			 "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%i",
+		ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
+			 "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
+			 "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
 			 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
 			 hipzout.lkey, pginfo, pginfo->num_kpages,
 			 pginfo->num_hwpages, ret);
@@ -1045,8 +1045,8 @@
 ehca_reg_mr_exit0:
 	if (ret)
 		ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
-			 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
-			 "num_kpages=%lx num_hwpages=%lx",
+			 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
+			 "num_kpages=%llx num_hwpages=%llx",
 			 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
 			 pginfo->num_kpages, pginfo->num_hwpages);
 	return ret;
@@ -1116,8 +1116,8 @@
 			 */
 			if (h_ret != H_SUCCESS) {
 				ehca_err(&shca->ib_device, "last "
-					 "hipz_reg_rpage_mr failed, h_ret=%li "
-					 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
+					 "hipz_reg_rpage_mr failed, h_ret=%lli "
+					 "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
 					 " lkey=%x", h_ret, e_mr, i,
 					 shca->ipz_hca_handle.handle,
 					 e_mr->ipz_mr_handle.handle,
@@ -1128,8 +1128,8 @@
 				ret = 0;
 		} else if (h_ret != H_PAGE_REGISTERED) {
 			ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
-				 "h_ret=%li e_mr=%p i=%x lkey=%x hca_hndl=%lx "
-				 "mr_hndl=%lx", h_ret, e_mr, i,
+				 "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
+				 "mr_hndl=%llx", h_ret, e_mr, i,
 				 e_mr->ib.ib_mr.lkey,
 				 shca->ipz_hca_handle.handle,
 				 e_mr->ipz_mr_handle.handle);
@@ -1145,7 +1145,7 @@
 ehca_reg_mr_rpages_exit0:
 	if (ret)
 		ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
-			 "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr,
+			 "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
 			 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
 	return ret;
 } /* end ehca_reg_mr_rpages() */
@@ -1184,7 +1184,7 @@
 	ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
 	if (ret) {
 		ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
-			 "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
+			 "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
 			 "kpage=%p", e_mr, pginfo, pginfo->type,
 			 pginfo->num_kpages, pginfo->num_hwpages, kpage);
 		goto ehca_rereg_mr_rereg1_exit1;
@@ -1205,13 +1205,13 @@
 		 * (MW bound or MR is shared)
 		 */
 		ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
-			  "(Rereg1), h_ret=%li e_mr=%p", h_ret, e_mr);
+			  "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
 		*pginfo = pginfo_save;
 		ret = -EAGAIN;
 	} else if ((u64 *)hipzout.vaddr != iova_start) {
 		ehca_err(&shca->ib_device, "PHYP changed iova_start in "
-			 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
-			 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
+			 "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
+			 "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
 			 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
 			 e_mr->ib.ib_mr.lkey, hipzout.lkey);
 		ret = -EFAULT;
@@ -1235,7 +1235,7 @@
 ehca_rereg_mr_rereg1_exit0:
 	if ( ret && (ret != -EAGAIN) )
 		ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
-			 "pginfo=%p num_kpages=%lx num_hwpages=%lx",
+			 "pginfo=%p num_kpages=%llx num_hwpages=%llx",
 			 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
 			 pginfo->num_hwpages);
 	return ret;
@@ -1263,7 +1263,7 @@
 	    (e_mr->num_hwpages > MAX_RPAGES) ||
 	    (pginfo->num_hwpages > e_mr->num_hwpages)) {
 		ehca_dbg(&shca->ib_device, "Rereg3 case, "
-			 "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
+			 "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
 			 pginfo->num_hwpages, e_mr->num_hwpages);
 		rereg_1_hcall = 0;
 		rereg_3_hcall = 1;
@@ -1295,7 +1295,7 @@
 		h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
 		if (h_ret != H_SUCCESS) {
 			ehca_err(&shca->ib_device, "hipz_free_mr failed, "
-				 "h_ret=%li e_mr=%p hca_hndl=%lx mr_hndl=%lx "
+				 "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
 				 "mr->lkey=%x",
 				 h_ret, e_mr, shca->ipz_hca_handle.handle,
 				 e_mr->ipz_mr_handle.handle,
@@ -1328,8 +1328,8 @@
 ehca_rereg_mr_exit0:
 	if (ret)
 		ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
-			 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
-			 "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
+			 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
+			 "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
 			 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
 			 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
 			 rereg_1_hcall, rereg_3_hcall);
@@ -1371,8 +1371,8 @@
 		 * FMRs are not shared and no MW bound to FMRs
 		 */
 		ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
-			 "(Rereg1), h_ret=%li e_fmr=%p hca_hndl=%lx "
-			 "mr_hndl=%lx lkey=%x lkey_out=%x",
+			 "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
+			 "mr_hndl=%llx lkey=%x lkey_out=%x",
 			 h_ret, e_fmr, shca->ipz_hca_handle.handle,
 			 e_fmr->ipz_mr_handle.handle,
 			 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
@@ -1383,7 +1383,7 @@
 	h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
 	if (h_ret != H_SUCCESS) {
 		ehca_err(&shca->ib_device, "hipz_free_mr failed, "
-			 "h_ret=%li e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
+			 "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
 			 "lkey=%x",
 			 h_ret, e_fmr, shca->ipz_hca_handle.handle,
 			 e_fmr->ipz_mr_handle.handle,
@@ -1447,9 +1447,9 @@
 				    (u64)iova_start, hipz_acl, e_pd->fw_pd,
 				    &hipzout);
 	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li "
+		ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
 			 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
-			 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
+			 "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
 			 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
 			 shca->ipz_hca_handle.handle,
 			 e_origmr->ipz_mr_handle.handle,
@@ -1527,7 +1527,7 @@
 			  &e_mr->ib.ib_mr.rkey);
 	if (ret) {
 		ehca_err(&shca->ib_device, "reg of internal max MR failed, "
-			 "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
+			 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
 			 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
 			 num_kpages, num_hwpages);
 		goto ehca_reg_internal_maxmr_exit1;
@@ -1573,8 +1573,8 @@
 				    (u64)iova_start, hipz_acl, e_pd->fw_pd,
 				    &hipzout);
 	if (h_ret != H_SUCCESS) {
-		ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li "
-			 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
+		ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
+			 "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
 			 h_ret, e_origmr, shca->ipz_hca_handle.handle,
 			 e_origmr->ipz_mr_handle.handle,
 			 e_origmr->ib.ib_mr.lkey);
@@ -1651,28 +1651,28 @@
 	/* check first buffer */
 	if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
 		ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
-			     "pbuf->addr=%lx pbuf->size=%lx",
+			     "pbuf->addr=%llx pbuf->size=%llx",
 			     iova_start, pbuf->addr, pbuf->size);
 		return -EINVAL;
 	}
 	if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
 	    (num_phys_buf > 1)) {
-		ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
-			     "pbuf->size=%lx", pbuf->addr, pbuf->size);
+		ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx "
+			     "pbuf->size=%llx", pbuf->addr, pbuf->size);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < num_phys_buf; i++) {
 		if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
-			ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
-				     "pbuf->size=%lx",
+			ehca_gen_err("bad address, i=%x pbuf->addr=%llx "
+				     "pbuf->size=%llx",
 				     i, pbuf->addr, pbuf->size);
 			return -EINVAL;
 		}
 		if (((i > 0) &&	/* not 1st */
 		     (i < (num_phys_buf - 1)) &&	/* not last */
 		     (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
-			ehca_gen_err("bad size, i=%x pbuf->size=%lx",
+			ehca_gen_err("bad size, i=%x pbuf->size=%llx",
 				     i, pbuf->size);
 			return -EINVAL;
 		}
@@ -1705,7 +1705,7 @@
 	page = page_list;
 	for (i = 0; i < list_len; i++) {
 		if (*page % e_fmr->fmr_page_size) {
-			ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
+			ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
 				     "fmr_page_size=%x", i, *page, page, e_fmr,
 				     e_fmr->fmr_page_size);
 			return -EINVAL;
@@ -1743,9 +1743,9 @@
 					     (pginfo->next_hwpage *
 					      pginfo->hwpage_size));
 			if ( !(*kpage) ) {
-				ehca_gen_err("pgaddr=%lx "
-					     "chunk->page_list[i]=%lx "
-					     "i=%x next_hwpage=%lx",
+				ehca_gen_err("pgaddr=%llx "
+					     "chunk->page_list[i]=%llx "
+					     "i=%x next_hwpage=%llx",
 					     pgaddr, (u64)sg_dma_address(
 						     &chunk->page_list[i]),
 					     i, pginfo->next_hwpage);
@@ -1795,11 +1795,11 @@
 	for (t = start_idx; t <= end_idx; t++) {
 		u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
 		if (ehca_debug_level >= 3)
-			ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
+			ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
 				     *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
 		if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
-			ehca_gen_err("uncontiguous page found pgaddr=%lx "
-				     "prev_pgaddr=%lx page_list_i=%x",
+			ehca_gen_err("uncontiguous page found pgaddr=%llx "
+				     "prev_pgaddr=%llx page_list_i=%x",
 				     pgaddr, *prev_pgaddr, t);
 			return -EINVAL;
 		}
@@ -1833,7 +1833,7 @@
 					   << PAGE_SHIFT );
 				*kpage = phys_to_abs(pgaddr);
 				if ( !(*kpage) ) {
-					ehca_gen_err("pgaddr=%lx i=%x",
+					ehca_gen_err("pgaddr=%llx i=%x",
 						     pgaddr, i);
 					ret = -EFAULT;
 					return ret;
@@ -1846,8 +1846,8 @@
 					if (pginfo->hwpage_cnt) {
 						ehca_gen_err(
 							"invalid alignment "
-							"pgaddr=%lx i=%x "
-							"mr_pgsize=%lx",
+							"pgaddr=%llx i=%x "
+							"mr_pgsize=%llx",
 							pgaddr, i,
 							pginfo->hwpage_size);
 						ret = -EFAULT;
@@ -1866,8 +1866,8 @@
 				if (ehca_debug_level >= 3) {
 					u64 val = *(u64 *)abs_to_virt(
 						phys_to_abs(pgaddr));
-					ehca_gen_dbg("kpage=%lx chunk_page=%lx "
-						     "value=%016lx",
+					ehca_gen_dbg("kpage=%llx chunk_page=%llx "
+						     "value=%016llx",
 						     *kpage, pgaddr, val);
 				}
 				prev_pgaddr = pgaddr;
@@ -1944,9 +1944,9 @@
 			if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
 			    (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
 				ehca_gen_err("kpage_cnt >= num_kpages, "
-					     "kpage_cnt=%lx num_kpages=%lx "
-					     "hwpage_cnt=%lx "
-					     "num_hwpages=%lx i=%x",
+					     "kpage_cnt=%llx num_kpages=%llx "
+					     "hwpage_cnt=%llx "
+					     "num_hwpages=%llx i=%x",
 					     pginfo->kpage_cnt,
 					     pginfo->num_kpages,
 					     pginfo->hwpage_cnt,
@@ -1957,8 +1957,8 @@
 				(pbuf->addr & ~(pginfo->hwpage_size - 1)) +
 				(pginfo->next_hwpage * pginfo->hwpage_size));
 			if ( !(*kpage) && pbuf->addr ) {
-				ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx "
-					     "next_hwpage=%lx", pbuf->addr,
+				ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
+					     "next_hwpage=%llx", pbuf->addr,
 					     pbuf->size, pginfo->next_hwpage);
 				return -EFAULT;
 			}
@@ -1996,8 +1996,8 @@
 		*kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
 				     pginfo->next_hwpage * pginfo->hwpage_size);
 		if ( !(*kpage) ) {
-			ehca_gen_err("*fmrlist=%lx fmrlist=%p "
-				     "next_listelem=%lx next_hwpage=%lx",
+			ehca_gen_err("*fmrlist=%llx fmrlist=%p "
+				     "next_listelem=%llx next_hwpage=%llx",
 				     *fmrlist, fmrlist,
 				     pginfo->u.fmr.next_listelem,
 				     pginfo->next_hwpage);
@@ -2025,7 +2025,7 @@
 						    ~(pginfo->hwpage_size - 1));
 				if (prev + pginfo->u.fmr.fmr_pgsize != p) {
 					ehca_gen_err("uncontiguous fmr pages "
-						     "found prev=%lx p=%lx "
+						     "found prev=%llx p=%llx "
 						     "idx=%x", prev, p, i + j);
 					return -EINVAL;
 				}
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index f161cf1..00c1081 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -331,7 +331,7 @@
 		if (cnt == (nr_q_pages - 1)) {	/* last page! */
 			if (h_ret != expected_hret) {
 				ehca_err(ib_dev, "hipz_qp_register_rpage() "
-					 "h_ret=%li", h_ret);
+					 "h_ret=%lli", h_ret);
 				ret = ehca2ib_return_code(h_ret);
 				goto init_qp_queue1;
 			}
@@ -345,7 +345,7 @@
 		} else {
 			if (h_ret != H_PAGE_REGISTERED) {
 				ehca_err(ib_dev, "hipz_qp_register_rpage() "
-					 "h_ret=%li", h_ret);
+					 "h_ret=%lli", h_ret);
 				ret = ehca2ib_return_code(h_ret);
 				goto init_qp_queue1;
 			}
@@ -709,7 +709,7 @@
 
 	h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms);
 	if (h_ret != H_SUCCESS) {
-		ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%li",
+		ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
 			 h_ret);
 		ret = ehca2ib_return_code(h_ret);
 		goto create_qp_exit1;
@@ -1010,7 +1010,7 @@
 				mqpcb, my_qp->galpas.kernel);
 	if (hret != H_SUCCESS) {
 		ehca_err(pd->device, "Could not modify SRQ to INIT "
-			 "ehca_qp=%p qp_num=%x h_ret=%li",
+			 "ehca_qp=%p qp_num=%x h_ret=%lli",
 			 my_qp, my_qp->real_qp_num, hret);
 		goto create_srq2;
 	}
@@ -1024,7 +1024,7 @@
 				mqpcb, my_qp->galpas.kernel);
 	if (hret != H_SUCCESS) {
 		ehca_err(pd->device, "Could not enable SRQ "
-			 "ehca_qp=%p qp_num=%x h_ret=%li",
+			 "ehca_qp=%p qp_num=%x h_ret=%lli",
 			 my_qp, my_qp->real_qp_num, hret);
 		goto create_srq2;
 	}
@@ -1038,7 +1038,7 @@
 				mqpcb, my_qp->galpas.kernel);
 	if (hret != H_SUCCESS) {
 		ehca_err(pd->device, "Could not modify SRQ to RTR "
-			 "ehca_qp=%p qp_num=%x h_ret=%li",
+			 "ehca_qp=%p qp_num=%x h_ret=%lli",
 			 my_qp, my_qp->real_qp_num, hret);
 		goto create_srq2;
 	}
@@ -1078,7 +1078,7 @@
 					   &bad_send_wqe_p, NULL, 2);
 	if (h_ret != H_SUCCESS) {
 		ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
-			 " ehca_qp=%p qp_num=%x h_ret=%li",
+			 " ehca_qp=%p qp_num=%x h_ret=%lli",
 			 my_qp, qp_num, h_ret);
 		return ehca2ib_return_code(h_ret);
 	}
@@ -1134,7 +1134,7 @@
 
 	if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
 		ehca_gen_err("Invalid offset for calculating left cqes "
-				"wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v);
+				"wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
 		return -EFAULT;
 	}
 
@@ -1168,7 +1168,7 @@
 				&send_wqe_p, &recv_wqe_p, 4);
 		if (h_ret != H_SUCCESS) {
 			ehca_err(&shca->ib_device, "disable_and_get_wqe() "
-				 "failed ehca_qp=%p qp_num=%x h_ret=%li",
+				 "failed ehca_qp=%p qp_num=%x h_ret=%lli",
 				 my_qp, qp_num, h_ret);
 			return ehca2ib_return_code(h_ret);
 		}
@@ -1261,7 +1261,7 @@
 				mqpcb, my_qp->galpas.kernel);
 	if (h_ret != H_SUCCESS) {
 		ehca_err(ibqp->device, "hipz_h_query_qp() failed "
-			 "ehca_qp=%p qp_num=%x h_ret=%li",
+			 "ehca_qp=%p qp_num=%x h_ret=%lli",
 			 my_qp, ibqp->qp_num, h_ret);
 		ret = ehca2ib_return_code(h_ret);
 		goto modify_qp_exit1;
@@ -1690,7 +1690,7 @@
 
 	if (h_ret != H_SUCCESS) {
 		ret = ehca2ib_return_code(h_ret);
-		ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%li "
+		ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
 			 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
 		goto modify_qp_exit2;
 	}
@@ -1723,7 +1723,7 @@
 			ret = ehca2ib_return_code(h_ret);
 			ehca_err(ibqp->device, "ENABLE in context of "
 				 "RESET_2_INIT failed! Maybe you didn't get "
-				 "a LID h_ret=%li ehca_qp=%p qp_num=%x",
+				 "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
 				 h_ret, my_qp, ibqp->qp_num);
 			goto modify_qp_exit2;
 		}
@@ -1909,7 +1909,7 @@
 	if (h_ret != H_SUCCESS) {
 		ret = ehca2ib_return_code(h_ret);
 		ehca_err(qp->device, "hipz_h_query_qp() failed "
-			 "ehca_qp=%p qp_num=%x h_ret=%li",
+			 "ehca_qp=%p qp_num=%x h_ret=%lli",
 			 my_qp, qp->qp_num, h_ret);
 		goto query_qp_exit1;
 	}
@@ -2074,7 +2074,7 @@
 
 	if (h_ret != H_SUCCESS) {
 		ret = ehca2ib_return_code(h_ret);
-		ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%li "
+		ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
 			 "ehca_qp=%p qp_num=%x",
 			 h_ret, my_qp, my_qp->real_qp_num);
 	}
@@ -2108,7 +2108,7 @@
 	if (h_ret != H_SUCCESS) {
 		ret = ehca2ib_return_code(h_ret);
 		ehca_err(srq->device, "hipz_h_query_qp() failed "
-			 "ehca_qp=%p qp_num=%x h_ret=%li",
+			 "ehca_qp=%p qp_num=%x h_ret=%lli",
 			 my_qp, my_qp->real_qp_num, h_ret);
 		goto query_srq_exit1;
 	}
@@ -2179,7 +2179,7 @@
 
 	h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
 	if (h_ret != H_SUCCESS) {
-		ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li "
+		ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
 			 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
 		return ehca2ib_return_code(h_ret);
 	}
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index c711268..5a3d96f 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -822,7 +822,7 @@
 		offset = qmap->next_wqe_idx * ipz_queue->qe_size;
 		wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
 		if (!wqe) {
-			ehca_err(cq->device, "Invalid wqe offset=%#lx on "
+			ehca_err(cq->device, "Invalid wqe offset=%#llx on "
 				 "qp_num=%#x", offset, my_qp->real_qp_num);
 			return nr;
 		}
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 706d97a..44447aa 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -85,7 +85,7 @@
 
 		if (ret != H_SUCCESS) {
 			ehca_err(&shca->ib_device,
-				 "Can't define AQP1 for port %x. h_ret=%li",
+				 "Can't define AQP1 for port %x. h_ret=%lli",
 				 port, ret);
 			return ret;
 		}
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 21f7d06..f09914c 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -116,7 +116,7 @@
 		unsigned char *deb = (unsigned char *)(adr); \
 		for (x = 0; x < l; x += 16) { \
 			printk(KERN_INFO "EHCA_DMP:%s " format \
-			       " adr=%p ofs=%04x %016lx %016lx\n", \
+			       " adr=%p ofs=%04x %016llx %016llx\n", \
 			       __func__, ##args, deb, x, \
 			       *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
 			deb += 16; \
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index e43ed8f..3cb688d 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -114,7 +114,7 @@
 
 	physical = galpas->user.fw_handle;
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-	ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
+	ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
 	/* VM_IO | VM_RESERVED are set by remap_pfn_range() */
 	ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
 			   vma->vm_page_prot);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 415d3a4..d0ab0c0 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -226,7 +226,7 @@
 			     u32 *eq_ist)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 	u64 allocate_controls;
 
 	/* resource type */
@@ -249,7 +249,7 @@
 	*eq_ist = (u32)outs[5];
 
 	if (ret == H_NOT_ENOUGH_RESOURCES)
-		ehca_gen_err("Not enough resource - ret=%li ", ret);
+		ehca_gen_err("Not enough resource - ret=%lli ", ret);
 
 	return ret;
 }
@@ -270,7 +270,7 @@
 			     struct ehca_alloc_cq_parms *param)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
 				adapter_handle.handle,   /* r4  */
@@ -287,7 +287,7 @@
 		hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
 
 	if (ret == H_NOT_ENOUGH_RESOURCES)
-		ehca_gen_err("Not enough resources. ret=%li", ret);
+		ehca_gen_err("Not enough resources. ret=%lli", ret);
 
 	return ret;
 }
@@ -297,7 +297,7 @@
 {
 	u64 ret;
 	u64 allocate_controls, max_r10_reg, r11, r12;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	allocate_controls =
 		EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
@@ -362,7 +362,7 @@
 		hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]);
 
 	if (ret == H_NOT_ENOUGH_RESOURCES)
-		ehca_gen_err("Not enough resources. ret=%li", ret);
+		ehca_gen_err("Not enough resources. ret=%lli", ret);
 
 	return ret;
 }
@@ -454,7 +454,7 @@
 			     const u64 count)
 {
 	if (count != 1) {
-		ehca_gen_err("Ppage counter=%lx", count);
+		ehca_gen_err("Ppage counter=%llx", count);
 		return H_PARAMETER;
 	}
 	return hipz_h_register_rpage(adapter_handle,
@@ -489,7 +489,7 @@
 			     const struct h_galpa gal)
 {
 	if (count != 1) {
-		ehca_gen_err("Page counter=%lx", count);
+		ehca_gen_err("Page counter=%llx", count);
 		return H_PARAMETER;
 	}
 
@@ -508,7 +508,7 @@
 			     const struct h_galpa galpa)
 {
 	if (count > 1) {
-		ehca_gen_err("Page counter=%lx", count);
+		ehca_gen_err("Page counter=%llx", count);
 		return H_PARAMETER;
 	}
 
@@ -525,7 +525,7 @@
 			       int dis_and_get_function_code)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
 				adapter_handle.handle,     /* r4 */
@@ -548,7 +548,7 @@
 		     struct h_galpa gal)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 	ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
 				adapter_handle.handle, /* r4 */
 				qp_handle.handle,      /* r5 */
@@ -557,7 +557,7 @@
 				0, 0, 0, 0, 0);
 
 	if (ret == H_NOT_ENOUGH_RESOURCES)
-		ehca_gen_err("Insufficient resources ret=%li", ret);
+		ehca_gen_err("Insufficient resources ret=%lli", ret);
 
 	return ret;
 }
@@ -579,7 +579,7 @@
 		      struct ehca_qp *qp)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	ret = hcp_galpas_dtor(&qp->galpas);
 	if (ret) {
@@ -593,7 +593,7 @@
 				qp->ipz_qp_handle.handle,  /* r6 */
 				0, 0, 0, 0, 0, 0);
 	if (ret == H_HARDWARE)
-		ehca_gen_err("HCA not operational. ret=%li", ret);
+		ehca_gen_err("HCA not operational. ret=%lli", ret);
 
 	ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
 				      adapter_handle.handle,     /* r4 */
@@ -601,7 +601,7 @@
 				      0, 0, 0, 0, 0);
 
 	if (ret == H_RESOURCE)
-		ehca_gen_err("Resource still in use. ret=%li", ret);
+		ehca_gen_err("Resource still in use. ret=%lli", ret);
 
 	return ret;
 }
@@ -625,7 +625,7 @@
 		       u32 * bma_qp_nr)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
 				adapter_handle.handle, /* r4 */
@@ -636,7 +636,7 @@
 	*bma_qp_nr = (u32)outs[1];
 
 	if (ret == H_ALIAS_EXIST)
-		ehca_gen_err("AQP1 already exists. ret=%li", ret);
+		ehca_gen_err("AQP1 already exists. ret=%lli", ret);
 
 	return ret;
 }
@@ -658,7 +658,7 @@
 				      0, 0);
 
 	if (ret == H_NOT_ENOUGH_RESOURCES)
-		ehca_gen_err("Not enough resources. ret=%li", ret);
+		ehca_gen_err("Not enough resources. ret=%lli", ret);
 
 	return ret;
 }
@@ -697,7 +697,7 @@
 				      0, 0, 0, 0);
 
 	if (ret == H_RESOURCE)
-		ehca_gen_err("H_FREE_RESOURCE failed ret=%li ", ret);
+		ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
 
 	return ret;
 }
@@ -719,7 +719,7 @@
 				      0, 0, 0, 0, 0);
 
 	if (ret == H_RESOURCE)
-		ehca_gen_err("Resource in use. ret=%li ", ret);
+		ehca_gen_err("Resource in use. ret=%lli ", ret);
 
 	return ret;
 }
@@ -733,7 +733,7 @@
 			     struct ehca_mr_hipzout_parms *outparms)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
 				adapter_handle.handle,            /* r4 */
@@ -774,9 +774,9 @@
 
 	if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
 		ehca_gen_err("logical_address_of_page not on a 4k boundary "
-			     "adapter_handle=%lx mr=%p mr_handle=%lx "
+			     "adapter_handle=%llx mr=%p mr_handle=%llx "
 			     "pagesize=%x queue_type=%x "
-			     "logical_address_of_page=%lx count=%lx",
+			     "logical_address_of_page=%llx count=%llx",
 			     adapter_handle.handle, mr,
 			     mr->ipz_mr_handle.handle, pagesize, queue_type,
 			     logical_address_of_page, count);
@@ -794,7 +794,7 @@
 		    struct ehca_mr_hipzout_parms *outparms)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
 				adapter_handle.handle,     /* r4 */
@@ -828,7 +828,7 @@
 			  struct ehca_mr_hipzout_parms *outparms)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
 				adapter_handle.handle,    /* r4 */
@@ -855,7 +855,7 @@
 			struct ehca_mr_hipzout_parms *outparms)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
 				adapter_handle.handle,            /* r4 */
@@ -877,7 +877,7 @@
 			     struct ehca_mw_hipzout_parms *outparms)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
 				adapter_handle.handle,      /* r4 */
@@ -895,7 +895,7 @@
 		    struct ehca_mw_hipzout_parms *outparms)
 {
 	u64 ret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
 				adapter_handle.handle,    /* r4 */
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a3c5af1d..de5263b 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -367,7 +367,7 @@
 		if (err)
 			goto out;
 	} else {
-		/* Can't be smaller then the number of outstanding CQEs */
+		/* Can't be smaller than the number of outstanding CQEs */
 		outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
 		if (entries < outst_cqe + 1) {
 			err = 0;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index dcefe1f..61588bd 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -543,14 +543,21 @@
 {
 	static int mlx4_ib_version_printed;
 	struct mlx4_ib_dev *ibdev;
+	int num_ports = 0;
 	int i;
 
-
 	if (!mlx4_ib_version_printed) {
 		printk(KERN_INFO "%s", mlx4_ib_version);
 		++mlx4_ib_version_printed;
 	}
 
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+		num_ports++;
+
+	/* No point in registering a device with no ports... */
+	if (num_ports == 0)
+		return NULL;
+
 	ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
 	if (!ibdev) {
 		dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
@@ -574,9 +581,7 @@
 	ibdev->ib_dev.owner		= THIS_MODULE;
 	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
 	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
-	ibdev->num_ports = 0;
-	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
-		ibdev->num_ports++;
+	ibdev->num_ports		= num_ports;
 	ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
 	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
 	ibdev->ib_dev.dma_device	= &dev->pdev->dev;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 39167a7..a91cb4c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1462,7 +1462,8 @@
 }
 
 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
-			 struct mlx4_ib_qp *qp, unsigned *lso_seg_len)
+			 struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
+			 __be32 *lso_hdr_sz)
 {
 	unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
 
@@ -1479,12 +1480,8 @@
 
 	memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
 
-	/* make sure LSO header is written before overwriting stamping */
-	wmb();
-
-	wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
-					wr->wr.ud.hlen);
-
+	*lso_hdr_sz  = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
+				   wr->wr.ud.hlen);
 	*lso_seg_len = halign;
 	return 0;
 }
@@ -1518,6 +1515,9 @@
 	int uninitialized_var(stamp);
 	int uninitialized_var(size);
 	unsigned uninitialized_var(seglen);
+	__be32 dummy;
+	__be32 *lso_wqe;
+	__be32 uninitialized_var(lso_hdr_sz);
 	int i;
 
 	spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1525,6 +1525,8 @@
 	ind = qp->sq_next_wqe;
 
 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
+		lso_wqe = &dummy;
+
 		if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
 			err = -ENOMEM;
 			*bad_wr = wr;
@@ -1606,11 +1608,12 @@
 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
 
 			if (wr->opcode == IB_WR_LSO) {
-				err = build_lso_seg(wqe, wr, qp, &seglen);
+				err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz);
 				if (unlikely(err)) {
 					*bad_wr = wr;
 					goto out;
 				}
+				lso_wqe = (__be32 *) wqe;
 				wqe  += seglen;
 				size += seglen / 16;
 			}
@@ -1652,6 +1655,14 @@
 		for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
 			set_data_seg(dseg, wr->sg_list + i);
 
+		/*
+		 * Possibly overwrite stamping in cacheline with LSO
+		 * segment only after making sure all data segments
+		 * are written.
+		 */
+		wmb();
+		*lso_wqe = lso_hdr_sz;
+
 		ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
 				    MLX4_WQE_CTRL_FENCE : 0) | size;
 
@@ -1686,7 +1697,6 @@
 			stamp_send_wqe(qp, stamp, size * 16);
 			ind = pad_wraparound(qp, ind);
 		}
-
 	}
 
 out:
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index a812db2..a01b448 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -778,12 +778,13 @@
 	unsigned long flags;
 	struct list_head *hte;
 	struct nes_cm_node *cm_node;
+	__be32 tmp_addr = cpu_to_be32(loc_addr);
 
 	/* get a handle on the hte */
 	hte = &cm_core->connected_nodes;
 
 	nes_debug(NES_DBG_CM, "Searching for an owner node: %pI4:%x from core %p->%p\n",
-		  &loc_addr, loc_port, cm_core, hte);
+		  &tmp_addr, loc_port, cm_core, hte);
 
 	/* walk list and find cm_node associated with this session ID */
 	spin_lock_irqsave(&cm_core->ht_lock, flags);
@@ -816,6 +817,7 @@
 {
 	unsigned long flags;
 	struct nes_cm_listener *listen_node;
+	__be32 tmp_addr = cpu_to_be32(dst_addr);
 
 	/* walk list and find cm_node associated with this session ID */
 	spin_lock_irqsave(&cm_core->listen_list_lock, flags);
@@ -833,7 +835,7 @@
 	spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
 
 	nes_debug(NES_DBG_CM, "Unable to find listener for %pI4:%x\n",
-		  &dst_addr, dst_port);
+		  &tmp_addr, dst_port);
 
 	/* no listener */
 	return NULL;
@@ -2059,6 +2061,7 @@
 	struct tcphdr *tcph;
 	struct nes_cm_info nfo;
 	int skb_handled = 1;
+	__be32 tmp_daddr, tmp_saddr;
 
 	if (!skb)
 		return 0;
@@ -2074,8 +2077,11 @@
 	nfo.rem_addr = ntohl(iph->saddr);
 	nfo.rem_port = ntohs(tcph->source);
 
+	tmp_daddr = cpu_to_be32(iph->daddr);
+	tmp_saddr = cpu_to_be32(iph->saddr);
+
 	nes_debug(NES_DBG_CM, "Received packet: dest=%pI4:0x%04X src=%pI4:0x%04X\n",
-		  &iph->daddr, tcph->dest, &iph->saddr, tcph->source);
+		  &tmp_daddr, tcph->dest, &tmp_saddr, tcph->source);
 
 	do {
 		cm_node = find_node(cm_core,
@@ -2705,7 +2711,7 @@
 			sizeof(struct ietf_mpa_frame));
 
 
-	/* notify OF layer that accept event was successfull */
+	/* notify OF layer that accept event was successful */
 	cm_id->add_ref(cm_id);
 
 	cm_event.event = IW_CM_EVENT_ESTABLISHED;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index aa9b734..6f3bc1b 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -655,6 +655,7 @@
 	struct nes_adapter *nesadapter = nesdev->nesadapter;
 	int arp_index;
 	int err = 0;
+	__be32 tmp_addr;
 
 	for (arp_index = 0; (u32) arp_index < nesadapter->arp_table_size; arp_index++) {
 		if (nesadapter->arp_table[arp_index].ip_addr == ip_addr)
@@ -682,8 +683,9 @@
 
 	/* DELETE or RESOLVE */
 	if (arp_index == nesadapter->arp_table_size) {
+		tmp_addr = cpu_to_be32(ip_addr);
 		nes_debug(NES_DBG_NETDEV, "MAC for %pI4 not in ARP table - cannot %s\n",
-			  &ip_addr, action == NES_ARP_RESOLVE ? "resolve" : "delete");
+			  &tmp_addr, action == NES_ARP_RESOLVE ? "resolve" : "delete");
 		return -1;
 	}
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 19e06bc..0bd2a4f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -106,23 +106,17 @@
 
 	ipoib_dbg(priv, "bringing up interface\n");
 
-	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+	if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+		napi_enable(&priv->napi);
 
 	if (ipoib_pkey_dev_delay_open(dev))
 		return 0;
 
-	napi_enable(&priv->napi);
+	if (ipoib_ib_dev_open(dev))
+		goto err_disable;
 
-	if (ipoib_ib_dev_open(dev)) {
-		napi_disable(&priv->napi);
-		return -EINVAL;
-	}
-
-	if (ipoib_ib_dev_up(dev)) {
-		ipoib_ib_dev_stop(dev, 1);
-		napi_disable(&priv->napi);
-		return -EINVAL;
-	}
+	if (ipoib_ib_dev_up(dev))
+		goto err_stop;
 
 	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
 		struct ipoib_dev_priv *cpriv;
@@ -144,6 +138,15 @@
 	netif_start_queue(dev);
 
 	return 0;
+
+err_stop:
+	ipoib_ib_dev_stop(dev, 1);
+
+err_disable:
+	napi_disable(&priv->napi);
+	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+
+	return -EINVAL;
 }
 
 static int ipoib_stop(struct net_device *dev)
@@ -711,26 +714,26 @@
 
 		neigh = *to_ipoib_neigh(skb->dst->neighbour);
 
-		if (neigh->ah)
-			if (unlikely((memcmp(&neigh->dgid.raw,
-					    skb->dst->neighbour->ha + 4,
-					    sizeof(union ib_gid))) ||
-					 (neigh->dev != dev))) {
-				spin_lock_irqsave(&priv->lock, flags);
-				/*
-				 * It's safe to call ipoib_put_ah() inside
-				 * priv->lock here, because we know that
-				 * path->ah will always hold one more reference,
-				 * so ipoib_put_ah() will never do more than
-				 * decrement the ref count.
-				 */
+		if (unlikely((memcmp(&neigh->dgid.raw,
+				     skb->dst->neighbour->ha + 4,
+				     sizeof(union ib_gid))) ||
+			     (neigh->dev != dev))) {
+			spin_lock_irqsave(&priv->lock, flags);
+			/*
+			 * It's safe to call ipoib_put_ah() inside
+			 * priv->lock here, because we know that
+			 * path->ah will always hold one more reference,
+			 * so ipoib_put_ah() will never do more than
+			 * decrement the ref count.
+			 */
+			if (neigh->ah)
 				ipoib_put_ah(neigh->ah);
-				list_del(&neigh->list);
-				ipoib_neigh_free(dev, neigh);
-				spin_unlock_irqrestore(&priv->lock, flags);
-				ipoib_path_lookup(skb, dev);
-				return NETDEV_TX_OK;
-			}
+			list_del(&neigh->list);
+			ipoib_neigh_free(dev, neigh);
+			spin_unlock_irqrestore(&priv->lock, flags);
+			ipoib_path_lookup(skb, dev);
+			return NETDEV_TX_OK;
+		}
 
 		if (ipoib_cm_get(neigh)) {
 			if (ipoib_cm_up(neigh)) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index a2eb3b9..425e311 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -409,7 +409,7 @@
 	}
 
 	if (mcast->logcount++ < 20) {
-		if (status == -ETIMEDOUT) {
+		if (status == -ETIMEDOUT || status == -EAGAIN) {
 			ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
 					mcast->mcmember.mgid.raw, status);
 		} else {
@@ -529,6 +529,9 @@
 	if (!priv->broadcast) {
 		struct ipoib_mcast *broadcast;
 
+		if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+			return;
+
 		broadcast = ipoib_mcast_alloc(dev, 1);
 		if (!broadcast) {
 			ipoib_warn(priv, "failed to allocate broadcast group\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 2cf1a40..5a76a55 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -61,6 +61,7 @@
 
 	ppriv = netdev_priv(pdev);
 
+	rtnl_lock();
 	mutex_lock(&ppriv->vlan_mutex);
 
 	/*
@@ -111,7 +112,7 @@
 		goto device_init_failed;
 	}
 
-	result = register_netdev(priv->dev);
+	result = register_netdevice(priv->dev);
 	if (result) {
 		ipoib_warn(priv, "failed to initialize; error %i", result);
 		goto register_failed;
@@ -134,12 +135,13 @@
 	list_add_tail(&priv->list, &ppriv->child_intfs);
 
 	mutex_unlock(&ppriv->vlan_mutex);
+	rtnl_unlock();
 
 	return 0;
 
 sysfs_failed:
 	ipoib_delete_debug_files(priv->dev);
-	unregister_netdev(priv->dev);
+	unregister_netdevice(priv->dev);
 
 register_failed:
 	ipoib_dev_cleanup(priv->dev);
@@ -149,6 +151,7 @@
 
 err:
 	mutex_unlock(&ppriv->vlan_mutex);
+	rtnl_unlock();
 	return result;
 }
 
@@ -162,10 +165,11 @@
 
 	ppriv = netdev_priv(pdev);
 
+	rtnl_lock();
 	mutex_lock(&ppriv->vlan_mutex);
 	list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
 		if (priv->pkey == pkey) {
-			unregister_netdev(priv->dev);
+			unregister_netdevice(priv->dev);
 			ipoib_dev_cleanup(priv->dev);
 			list_del(&priv->list);
 			free_netdev(priv->dev);
@@ -175,6 +179,7 @@
 		}
 	}
 	mutex_unlock(&ppriv->vlan_mutex);
+	rtnl_unlock();
 
 	return ret;
 }
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
index 77dedba..b411c51 100644
--- a/drivers/infiniband/ulp/iser/Kconfig
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -1,6 +1,6 @@
 config INFINIBAND_ISER
 	tristate "iSCSI Extensions for RDMA (iSER)"
-	depends on SCSI && INET
+	depends on SCSI && INET && INFINIBAND_ADDR_TRANS
 	select SCSI_ISCSI_ATTRS
 	---help---
 	  Support for the iSCSI Extensions for RDMA (iSER) Protocol
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index f6e9f39..c3c8b9b 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -65,7 +65,7 @@
 
 /*
  * Scancode to keycode tables. These are just the default setting, and
- * are loadable via an userland utility.
+ * are loadable via a userland utility.
  */
 
 static const unsigned short atkbd_set2_keycode[512] = {
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 199055d..67e5553 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -220,4 +220,11 @@
 	  Say Y here if you want to support the built-in real time clock
 	  of the HP SDC controller.
 
+config INPUT_PCF50633_PMU
+	tristate "PCF50633 PMU events"
+	depends on MFD_PCF50633
+	help
+	 Say Y to include support for delivering  PMU events via  input
+	 layer on NXP PCF50633.
+
 endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index d7db2ae..bb62e6e 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -21,3 +21,4 @@
 obj-$(CONFIG_INPUT_UINPUT)		+= uinput.o
 obj-$(CONFIG_INPUT_APANEL)		+= apanel.o
 obj-$(CONFIG_INPUT_SGI_BTNS)		+= sgi_btns.o
+obj-$(CONFIG_INPUT_PCF50633_PMU)	+= pcf50633-input.o
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
index d82f7f7..71b8243 100644
--- a/drivers/input/misc/apanel.c
+++ b/drivers/input/misc/apanel.c
@@ -57,7 +57,7 @@
 
 struct apanel {
 	struct input_polled_dev *ipdev;
-	struct i2c_client client;
+	struct i2c_client *client;
 	unsigned short keymap[MAX_PANEL_KEYS];
 	u16    nkeys;
 	u16    led_bits;
@@ -66,16 +66,7 @@
 };
 
 
-static int apanel_probe(struct i2c_adapter *, int, int);
-
-/* for now, we only support one address */
-static unsigned short normal_i2c[] = {0, I2C_CLIENT_END};
-static unsigned short ignore = I2C_CLIENT_END;
-static struct i2c_client_address_data addr_data = {
-	.normal_i2c	= normal_i2c,
-	.probe		= &ignore,
-	.ignore		= &ignore,
-};
+static int apanel_probe(struct i2c_client *, const struct i2c_device_id *);
 
 static void report_key(struct input_dev *input, unsigned keycode)
 {
@@ -103,12 +94,12 @@
 	s32 data;
 	int i;
 
-	data = i2c_smbus_read_word_data(&ap->client, cmd);
+	data = i2c_smbus_read_word_data(ap->client, cmd);
 	if (data < 0)
 		return;	/* ignore errors (due to ACPI??) */
 
 	/* write back to clear latch */
-	i2c_smbus_write_word_data(&ap->client, cmd, 0);
+	i2c_smbus_write_word_data(ap->client, cmd, 0);
 
 	if (!data)
 		return;
@@ -124,7 +115,7 @@
 {
 	struct apanel *ap = container_of(work, struct apanel, led_work);
 
-	i2c_smbus_write_word_data(&ap->client, 0x10, ap->led_bits);
+	i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits);
 }
 
 static void mail_led_set(struct led_classdev *led,
@@ -140,7 +131,7 @@
 	schedule_work(&ap->led_work);
 }
 
-static int apanel_detach_client(struct i2c_client *client)
+static int apanel_remove(struct i2c_client *client)
 {
 	struct apanel *ap = i2c_get_clientdata(client);
 
@@ -148,43 +139,33 @@
 		led_classdev_unregister(&ap->mail_led);
 
 	input_unregister_polled_device(ap->ipdev);
-	i2c_detach_client(&ap->client);
 	input_free_polled_device(ap->ipdev);
 
 	return 0;
 }
 
-/* Function is invoked for every i2c adapter. */
-static int apanel_attach_adapter(struct i2c_adapter *adap)
-{
-	dev_dbg(&adap->dev, APANEL ": attach adapter id=%d\n", adap->id);
-
-	/* Our device is connected only to i801 on laptop */
-	if (adap->id != I2C_HW_SMBUS_I801)
-		return -ENODEV;
-
-	return i2c_probe(adap, &addr_data, apanel_probe);
-}
-
 static void apanel_shutdown(struct i2c_client *client)
 {
-	apanel_detach_client(client);
+	apanel_remove(client);
 }
 
+static struct i2c_device_id apanel_id[] = {
+	{ "fujitsu_apanel", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, apanel_id);
+
 static struct i2c_driver apanel_driver = {
 	.driver = {
 		.name = APANEL,
 	},
-	.attach_adapter = &apanel_attach_adapter,
-	.detach_client  = &apanel_detach_client,
+	.probe		= &apanel_probe,
+	.remove		= &apanel_remove,
 	.shutdown	= &apanel_shutdown,
+	.id_table	= apanel_id,
 };
 
 static struct apanel apanel = {
-	.client = {
-		.driver = &apanel_driver,
-		.name   = APANEL,
-	},
 	.keymap = {
 		[0] = KEY_MAIL,
 		[1] = KEY_WWW,
@@ -204,7 +185,8 @@
 };
 
 /* NB: Only one panel on the i2c. */
-static int apanel_probe(struct i2c_adapter *bus, int address, int kind)
+static int apanel_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
 {
 	struct apanel *ap;
 	struct input_polled_dev *ipdev;
@@ -212,9 +194,6 @@
 	u8 cmd = device_chip[APANEL_DEV_APPBTN] == CHIP_OZ992C ? 0 : 8;
 	int i, err = -ENOMEM;
 
-	dev_dbg(&bus->dev, APANEL ": probe adapter %p addr %d kind %d\n",
-		bus, address, kind);
-
 	ap = &apanel;
 
 	ipdev = input_allocate_polled_device();
@@ -222,18 +201,13 @@
 		goto out1;
 
 	ap->ipdev = ipdev;
-	ap->client.adapter = bus;
-	ap->client.addr = address;
+	ap->client = client;
 
-	i2c_set_clientdata(&ap->client, ap);
+	i2c_set_clientdata(client, ap);
 
-	err = i2c_attach_client(&ap->client);
-	if (err)
-		goto out2;
-
-	err = i2c_smbus_write_word_data(&ap->client, cmd, 0);
+	err = i2c_smbus_write_word_data(client, cmd, 0);
 	if (err) {
-		dev_warn(&ap->client.dev, APANEL ": smbus write error %d\n",
+		dev_warn(&client->dev, APANEL ": smbus write error %d\n",
 			 err);
 		goto out3;
 	}
@@ -246,7 +220,7 @@
 	idev->name = APANEL_NAME " buttons";
 	idev->phys = "apanel/input0";
 	idev->id.bustype = BUS_HOST;
-	idev->dev.parent = &ap->client.dev;
+	idev->dev.parent = &client->dev;
 
 	set_bit(EV_KEY, idev->evbit);
 
@@ -264,7 +238,7 @@
 
 	INIT_WORK(&ap->led_work, led_update);
 	if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
-		err = led_classdev_register(&ap->client.dev, &ap->mail_led);
+		err = led_classdev_register(&client->dev, &ap->mail_led);
 		if (err)
 			goto out4;
 	}
@@ -273,8 +247,6 @@
 out4:
 	input_unregister_polled_device(ipdev);
 out3:
-	i2c_detach_client(&ap->client);
-out2:
 	input_free_polled_device(ipdev);
 out1:
 	return err;
@@ -301,6 +273,7 @@
 	void __iomem *bios;
 	const void __iomem *p;
 	u8 devno;
+	unsigned char i2c_addr;
 	int found = 0;
 
 	bios = ioremap(0xF0000, 0x10000); /* Can't fail */
@@ -313,7 +286,7 @@
 
 	/* just use the first address */
 	p += 8;
-	normal_i2c[0] = readb(p+3) >> 1;
+	i2c_addr = readb(p + 3) >> 1;
 
 	for ( ; (devno = readb(p)) & 0x7f; p += 4) {
 		unsigned char method, slave, chip;
@@ -322,7 +295,7 @@
 		chip = readb(p + 2);
 		slave = readb(p + 3) >> 1;
 
-		if (slave != normal_i2c[0]) {
+		if (slave != i2c_addr) {
 			pr_notice(APANEL ": only one SMBus slave "
 				  "address supported, skiping device...\n");
 			continue;
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
new file mode 100644
index 0000000..039dcb0
--- /dev/null
+++ b/drivers/input/misc/pcf50633-input.c
@@ -0,0 +1,132 @@
+/* NXP PCF50633 Input Driver
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * Author: Balaji Rao <balajirrao@openmoko.org>
+ * All rights reserved.
+ *
+ * Broken down from monstrous PCF50633 driver mainly by
+ * Harald Welte, Andy Green and Werner Almesberger
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+
+#include <linux/mfd/pcf50633/core.h>
+
+#define PCF50633_OOCSTAT_ONKEY	0x01
+#define PCF50633_REG_OOCSTAT	0x12
+#define PCF50633_REG_OOCMODE	0x10
+
+struct pcf50633_input {
+	struct pcf50633 *pcf;
+	struct input_dev *input_dev;
+};
+
+static void
+pcf50633_input_irq(int irq, void *data)
+{
+	struct pcf50633_input *input;
+	int onkey_released;
+
+	input = data;
+
+	/* We report only one event depending on the key press status */
+	onkey_released = pcf50633_reg_read(input->pcf, PCF50633_REG_OOCSTAT)
+						& PCF50633_OOCSTAT_ONKEY;
+
+	if (irq == PCF50633_IRQ_ONKEYF && !onkey_released)
+		input_report_key(input->input_dev, KEY_POWER, 1);
+	else if (irq == PCF50633_IRQ_ONKEYR && onkey_released)
+		input_report_key(input->input_dev, KEY_POWER, 0);
+
+	input_sync(input->input_dev);
+}
+
+static int __devinit pcf50633_input_probe(struct platform_device *pdev)
+{
+	struct pcf50633_input *input;
+	struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data;
+	struct input_dev *input_dev;
+	int ret;
+
+
+	input = kzalloc(sizeof(*input), GFP_KERNEL);
+	if (!input)
+		return -ENOMEM;
+
+	input_dev = input_allocate_device();
+	if (!input_dev) {
+		kfree(input);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, input);
+	input->pcf = pdata->pcf;
+	input->input_dev = input_dev;
+
+	input_dev->name = "PCF50633 PMU events";
+	input_dev->id.bustype = BUS_I2C;
+	input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_PWR);
+	set_bit(KEY_POWER, input_dev->keybit);
+
+	ret = input_register_device(input_dev);
+	if (ret) {
+		input_free_device(input_dev);
+		kfree(input);
+		return ret;
+	}
+	pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ONKEYR,
+				pcf50633_input_irq, input);
+	pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ONKEYF,
+				pcf50633_input_irq, input);
+
+	return 0;
+}
+
+static int __devexit pcf50633_input_remove(struct platform_device *pdev)
+{
+	struct pcf50633_input *input  = platform_get_drvdata(pdev);
+
+	pcf50633_free_irq(input->pcf, PCF50633_IRQ_ONKEYR);
+	pcf50633_free_irq(input->pcf, PCF50633_IRQ_ONKEYF);
+
+	input_unregister_device(input->input_dev);
+	kfree(input);
+
+	return 0;
+}
+
+static struct platform_driver pcf50633_input_driver = {
+	.driver = {
+		.name = "pcf50633-input",
+	},
+	.probe = pcf50633_input_probe,
+	.remove = __devexit_p(pcf50633_input_remove),
+};
+
+static int __init pcf50633_input_init(void)
+{
+	return platform_driver_register(&pcf50633_input_driver);
+}
+module_init(pcf50633_input_init);
+
+static void __exit pcf50633_input_exit(void)
+{
+	platform_driver_unregister(&pcf50633_input_driver);
+}
+module_exit(pcf50633_input_exit);
+
+MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
+MODULE_DESCRIPTION("PCF50633 input driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pcf50633-input");
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index a0f45c4..d297acc 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -186,7 +186,7 @@
 	error = request_irq(irq, pxa930_trkball_interrupt, IRQF_DISABLED,
 			    pdev->name, trkball);
 	if (error) {
-		dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
+		dev_err(&pdev->dev, "failed to request irq: %d\n", error);
 		goto failed_free_io;
 	}
 
@@ -227,7 +227,7 @@
 	iounmap(trkball->mmio_base);
 failed:
 	kfree(trkball);
-	return ret;
+	return error;
 }
 
 static int __devexit pxa930_trkball_remove(struct platform_device *pdev)
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
index 4342e77..fa67d78 100644
--- a/drivers/input/touchscreen/da9034-ts.c
+++ b/drivers/input/touchscreen/da9034-ts.c
@@ -16,6 +16,7 @@
 #include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/input.h>
+#include <linux/workqueue.h>
 #include <linux/mfd/da903x.h>
 
 #define DA9034_MANUAL_CTRL	0x50
diff --git a/drivers/isdn/hardware/eicon/debuglib.h b/drivers/isdn/hardware/eicon/debuglib.h
index 016410c..8ea5877 100644
--- a/drivers/isdn/hardware/eicon/debuglib.h
+++ b/drivers/isdn/hardware/eicon/debuglib.h
@@ -235,7 +235,7 @@
 typedef void ( * DbgEv)  (unsigned short, unsigned long, va_list) ;
 typedef void ( * DbgIrq) (unsigned short, int, char *, va_list) ;
 typedef struct _DbgHandle_
-{ char    Registered ; /* driver successfull registered */
+{ char    Registered ; /* driver successfully registered */
 #define DBG_HANDLE_REG_NEW 0x01  /* this (new) structure    */
 #define DBG_HANDLE_REG_OLD 0x7f  /* old structure (see below)  */
  char    Version;  /* version of this structure  */
diff --git a/drivers/isdn/hardware/eicon/os_4bri.c b/drivers/isdn/hardware/eicon/os_4bri.c
index 7b4ec3f..c964b8d 100644
--- a/drivers/isdn/hardware/eicon/os_4bri.c
+++ b/drivers/isdn/hardware/eicon/os_4bri.c
@@ -997,7 +997,7 @@
 	diva_xdi_display_adapter_features(IoAdapter->ANum);
 
 	for (i = 0; i < IoAdapter->tasks; i++) {
-		DBG_LOG(("A(%d) %s adapter successfull started",
+		DBG_LOG(("A(%d) %s adapter successfully started",
 			 IoAdapter->QuadroList->QuadroAdapter[i]->ANum,
 			 (IoAdapter->tasks == 1) ? "BRI 2.0" : "4BRI"))
 		diva_xdi_didd_register_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum);
diff --git a/drivers/isdn/hardware/eicon/os_bri.c b/drivers/isdn/hardware/eicon/os_bri.c
index f31bba5..08f0199 100644
--- a/drivers/isdn/hardware/eicon/os_bri.c
+++ b/drivers/isdn/hardware/eicon/os_bri.c
@@ -736,7 +736,7 @@
 
 	IoAdapter->Properties.Features = (word) features;
 	diva_xdi_display_adapter_features(IoAdapter->ANum);
-	DBG_LOG(("A(%d) BRI adapter successfull started", IoAdapter->ANum))
+	DBG_LOG(("A(%d) BRI adapter successfully started", IoAdapter->ANum))
 	    /*
 	       Register with DIDD
 	     */
diff --git a/drivers/isdn/hardware/eicon/os_pri.c b/drivers/isdn/hardware/eicon/os_pri.c
index 9033565..5d65405 100644
--- a/drivers/isdn/hardware/eicon/os_pri.c
+++ b/drivers/isdn/hardware/eicon/os_pri.c
@@ -513,7 +513,7 @@
 
 	diva_xdi_display_adapter_features(IoAdapter->ANum);
 
-	DBG_LOG(("A(%d) PRI adapter successfull started", IoAdapter->ANum))
+	DBG_LOG(("A(%d) PRI adapter successfully started", IoAdapter->ANum))
 	/*
 	   Register with DIDD
 	 */
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index 1479348..fd112ae 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -23,3 +23,10 @@
 	   * HFC-8S (8 S/T interfaces on one chip)
 	   * HFC-E1 (E1 interface for 2Mbit ISDN)
 
+config MISDN_HFCUSB
+	tristate "Support for HFC-S USB based TAs"
+	depends on USB
+	help
+	  Enable support for USB ISDN TAs with Cologne Chip AG's
+	  HFC-S USB ISDN Controller
+
diff --git a/drivers/isdn/hardware/mISDN/Makefile b/drivers/isdn/hardware/mISDN/Makefile
index 1e7ca53..b040352 100644
--- a/drivers/isdn/hardware/mISDN/Makefile
+++ b/drivers/isdn/hardware/mISDN/Makefile
@@ -5,3 +5,4 @@
 
 obj-$(CONFIG_MISDN_HFCPCI) += hfcpci.o
 obj-$(CONFIG_MISDN_HFCMULTI) += hfcmulti.o
+obj-$(CONFIG_MISDN_HFCUSB) += hfcsusb.o
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi.h b/drivers/isdn/hardware/mISDN/hfc_multi.h
index 7bbf730..663b77f 100644
--- a/drivers/isdn/hardware/mISDN/hfc_multi.h
+++ b/drivers/isdn/hardware/mISDN/hfc_multi.h
@@ -2,10 +2,6 @@
  * see notice in hfc_multi.c
  */
 
-extern void ztdummy_extern_interrupt(void);
-extern void ztdummy_register_interrupt(void);
-extern int ztdummy_unregister_interrupt(void);
-
 #define DEBUG_HFCMULTI_FIFO	0x00010000
 #define	DEBUG_HFCMULTI_CRC	0x00020000
 #define	DEBUG_HFCMULTI_INIT	0x00040000
@@ -13,6 +9,7 @@
 #define	DEBUG_HFCMULTI_MODE	0x00100000
 #define	DEBUG_HFCMULTI_MSG	0x00200000
 #define	DEBUG_HFCMULTI_STATE	0x00400000
+#define	DEBUG_HFCMULTI_FILL	0x00800000
 #define	DEBUG_HFCMULTI_SYNC	0x01000000
 #define	DEBUG_HFCMULTI_DTMF	0x02000000
 #define	DEBUG_HFCMULTI_LOCK	0x80000000
@@ -170,6 +167,8 @@
 
 	u_long		chip;	/* chip configuration */
 	int		masterclk; /* port that provides master clock -1=off */
+	unsigned char	silence;/* silence byte */
+	unsigned char	silence_data[128];/* silence block */
 	int		dtmf;	/* flag that dtmf is currently in process */
 	int		Flen;	/* F-buffer size */
 	int		Zlen;	/* Z-buffer size (must be int for calculation)*/
@@ -198,6 +197,9 @@
 
 	spinlock_t	lock;	/* the lock */
 
+	struct mISDNclock *iclock; /* isdn clock support */
+	int		iclock_on;
+
 	/*
 	 * the channel index is counted from 0, regardless where the channel
 	 * is located on the hfc-channel.
diff --git a/drivers/isdn/hardware/mISDN/hfc_pci.h b/drivers/isdn/hardware/mISDN/hfc_pci.h
index 5783d22..3132ddc 100644
--- a/drivers/isdn/hardware/mISDN/hfc_pci.h
+++ b/drivers/isdn/hardware/mISDN/hfc_pci.h
@@ -26,7 +26,7 @@
  * change mask and threshold simultaneously
  */
 #define HFCPCI_BTRANS_THRESHOLD 128
-#define HFCPCI_BTRANS_MAX	256
+#define HFCPCI_FILLEMPTY	64
 #define HFCPCI_BTRANS_THRESMASK 0x00
 
 /* defines for PCI config */
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index c63e2f4..595ba8e 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -133,6 +133,12 @@
  *	Give the value of the clock control register (A_ST_CLK_DLY)
  *	of the S/T interfaces in TE mode.
  *	This register is needed for the TBR3 certification, so don't change it.
+ *
+ * clock:
+ *	NOTE: only one clock value must be given once
+ *	Selects interface with clock source for mISDN and applications.
+ *	Set to card number starting with 1. Set to -1 to disable.
+ *	By default, the first card is used as clock source.
  */
 
 /*
@@ -140,7 +146,7 @@
  * #define HFC_REGISTER_DEBUG
  */
 
-static const char *hfcmulti_revision = "2.02";
+#define HFC_MULTI_VERSION	"2.03"
 
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -165,10 +171,6 @@
 static spinlock_t HFClock; /* global hfc list lock */
 
 static void ph_state_change(struct dchannel *);
-static void (*hfc_interrupt)(void);
-static void (*register_interrupt)(void);
-static int (*unregister_interrupt)(void);
-static int interrupt_registered;
 
 static struct hfc_multi *syncmaster;
 static int plxsd_master; /* if we have a master card (yet) */
@@ -184,7 +186,6 @@
 #define	CLKDEL_TE	0x0f	/* CLKDEL in TE mode */
 #define	CLKDEL_NT	0x6c	/* CLKDEL in NT mode
 				   (0x60 MUST be included!) */
-static u_char silence =	0xff;	/* silence by LAW */
 
 #define	DIP_4S	0x1		/* DIP Switches for Beronet 1S/2S/4S cards */
 #define	DIP_8S	0x2		/* DIP Switches for Beronet 8S+ cards */
@@ -195,12 +196,13 @@
  */
 
 static uint	type[MAX_CARDS];
-static uint	pcm[MAX_CARDS];
-static uint	dslot[MAX_CARDS];
+static int	pcm[MAX_CARDS];
+static int	dslot[MAX_CARDS];
 static uint	iomode[MAX_CARDS];
 static uint	port[MAX_PORTS];
 static uint	debug;
 static uint	poll;
+static int	clock;
 static uint	timer;
 static uint	clockdelay_te = CLKDEL_TE;
 static uint	clockdelay_nt = CLKDEL_NT;
@@ -209,14 +211,16 @@
 
 MODULE_AUTHOR("Andreas Eversberg");
 MODULE_LICENSE("GPL");
+MODULE_VERSION(HFC_MULTI_VERSION);
 module_param(debug, uint, S_IRUGO | S_IWUSR);
 module_param(poll, uint, S_IRUGO | S_IWUSR);
+module_param(clock, int, S_IRUGO | S_IWUSR);
 module_param(timer, uint, S_IRUGO | S_IWUSR);
 module_param(clockdelay_te, uint, S_IRUGO | S_IWUSR);
 module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR);
 module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
-module_param_array(pcm, uint, NULL, S_IRUGO | S_IWUSR);
-module_param_array(dslot, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(pcm, int, NULL, S_IRUGO | S_IWUSR);
+module_param_array(dslot, int, NULL, S_IRUGO | S_IWUSR);
 module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR);
 module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
 
@@ -1419,19 +1423,6 @@
 	HFC_outb(hc, R_TI_WD, poll_timer);
 	hc->hw.r_irqmsk_misc |= V_TI_IRQMSK;
 
-	/*
-	 * set up 125us interrupt, only if function pointer is available
-	 * and module parameter timer is set
-	 */
-	if (timer && hfc_interrupt && register_interrupt) {
-		/* only one chip should use this interrupt */
-		timer = 0;
-		interrupt_registered = 1;
-		hc->hw.r_irqmsk_misc |= V_PROC_IRQMSK;
-		/* deactivate other interrupts in ztdummy */
-		register_interrupt();
-	}
-
 	/* set E1 state machine IRQ */
 	if (hc->type == 1)
 		hc->hw.r_irqmsk_misc |= V_STA_IRQMSK;
@@ -1991,6 +1982,17 @@
 		return; /* no data */
 	}
 
+	/* "fill fifo if empty" feature */
+	if (bch && test_bit(FLG_FILLEMPTY, &bch->Flags)
+		&& !test_bit(FLG_HDLC, &bch->Flags) && z2 == z1) {
+		if (debug & DEBUG_HFCMULTI_FILL)
+			printk(KERN_DEBUG "%s: buffer empty, so we have "
+				"underrun\n", __func__);
+		/* fill buffer, to prevent future underrun */
+		hc->write_fifo(hc, hc->silence_data, poll >> 1);
+		Zspace -= (poll >> 1);
+	}
+
 	/* if audio data and connected slot */
 	if (bch && (!test_bit(FLG_HDLC, &bch->Flags)) && (!*txpending)
 		&& slot_tx >= 0) {
@@ -2027,7 +2029,6 @@
 			__func__, hc->id + 1, ch, Zspace, z1, z2, ii-i, len-i,
 			temp ? "HDLC":"TRANS");
 
-
 	/* Have to prep the audio data */
 	hc->write_fifo(hc, d, ii - i);
 	*idxp = ii;
@@ -2066,7 +2067,7 @@
 	 * no more data at all. this prevents sending an undefined value.
 	 */
 	if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
-		HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
+		HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
 }
 
 
@@ -2583,7 +2584,6 @@
 	static int iq1 = 0, iq2 = 0, iq3 = 0, iq4 = 0,
 	    iq5 = 0, iq6 = 0, iqcnt = 0;
 #endif
-	static int		count;
 	struct hfc_multi	*hc = dev_id;
 	struct dchannel		*dch;
 	u_char			r_irq_statech, status, r_irq_misc, r_irq_oview;
@@ -2637,6 +2637,7 @@
 		iqcnt = 0;
 	}
 #endif
+
 	if (!r_irq_statech &&
 	    !(status & (V_DTMF_STA | V_LOST_STA | V_EXT_IRQSTA |
 	    V_MISC_IRQSTA | V_FR_IRQSTA))) {
@@ -2657,6 +2658,7 @@
 	if (status & V_MISC_IRQSTA) {
 		/* misc IRQ */
 		r_irq_misc = HFC_inb_nodebug(hc, R_IRQ_MISC);
+		r_irq_misc &= hc->hw.r_irqmsk_misc; /* ignore disabled irqs */
 		if (r_irq_misc & V_STA_IRQ) {
 			if (hc->type == 1) {
 				/* state machine */
@@ -2691,23 +2693,20 @@
 					plxsd_checksync(hc, 0);
 			}
 		}
-		if (r_irq_misc & V_TI_IRQ)
+		if (r_irq_misc & V_TI_IRQ) {
+			if (hc->iclock_on)
+				mISDN_clock_update(hc->iclock, poll, NULL);
 			handle_timer_irq(hc);
+		}
 
 		if (r_irq_misc & V_DTMF_IRQ) {
-			/* -> DTMF IRQ */
 			hfcmulti_dtmf(hc);
 		}
-		/* TODO: REPLACE !!!! 125 us Interrupts are not acceptable  */
 		if (r_irq_misc & V_IRQ_PROC) {
-			/* IRQ every 125us */
-			count++;
-			/* generate 1kHz signal */
-			if (count == 8) {
-				if (hfc_interrupt)
-					hfc_interrupt();
-				count = 0;
-			}
+			static int irq_proc_cnt;
+			if (!irq_proc_cnt++)
+				printk(KERN_WARNING "%s: got V_IRQ_PROC -"
+				    " this should not happen\n", __func__);
 		}
 
 	}
@@ -2954,7 +2953,7 @@
 			HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
 			HFC_wait(hc);
 			/* tx silence */
-			HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
+			HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
 			HFC_outb(hc, R_SLOT, (((ch / 4) * 8) +
 			    ((ch % 4) * 4)) << 1);
 			HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1));
@@ -2969,7 +2968,7 @@
 			HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
 			HFC_wait(hc);
 			/* tx silence */
-			HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
+			HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
 			/* enable RX fifo */
 			HFC_outb(hc, R_FIFO, (ch<<1)|1);
 			HFC_wait(hc);
@@ -3461,7 +3460,7 @@
 	switch (cq->op) {
 	case MISDN_CTRL_GETOP:
 		cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP
-			| MISDN_CTRL_RX_OFF;
+			| MISDN_CTRL_RX_OFF | MISDN_CTRL_FILL_EMPTY;
 		break;
 	case MISDN_CTRL_RX_OFF: /* turn off / on rx stream */
 		hc->chan[bch->slot].rx_off = !!cq->p1;
@@ -3476,6 +3475,12 @@
 			printk(KERN_DEBUG "%s: RX_OFF request (nr=%d off=%d)\n",
 			    __func__, bch->nr, hc->chan[bch->slot].rx_off);
 		break;
+	case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
+		test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
+		if (debug & DEBUG_HFCMULTI_MSG)
+			printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
+				"off=%d)\n", __func__, bch->nr, !!cq->p1);
+		break;
 	case MISDN_CTRL_HW_FEATURES: /* fill features structure */
 		if (debug & DEBUG_HFCMULTI_MSG)
 			printk(KERN_DEBUG "%s: HW_FEATURE request\n",
@@ -3610,7 +3615,7 @@
 static void
 ph_state_change(struct dchannel *dch)
 {
-	struct hfc_multi *hc = dch->hw;
+	struct hfc_multi *hc;
 	int ch, i;
 
 	if (!dch) {
@@ -3618,6 +3623,7 @@
 		    __func__);
 		return;
 	}
+	hc = dch->hw;
 	ch = dch->slot;
 
 	if (hc->type == 1) {
@@ -3992,6 +3998,7 @@
 	}
 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
 		return -EBUSY; /* b-channel can be only open once */
+	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
 	bch->ch.protocol = rq->protocol;
 	hc->chan[ch].rx_off = 0;
 	rq->ch = &bch->ch;
@@ -4081,6 +4088,15 @@
 	return err;
 }
 
+static int
+clockctl(void *priv, int enable)
+{
+	struct hfc_multi *hc = priv;
+
+	hc->iclock_on = enable;
+	return 0;
+}
+
 /*
  * initialize the card
  */
@@ -4495,10 +4511,14 @@
 		printk(KERN_WARNING "%s: release card (%d) entered\n",
 		    __func__, hc->id);
 
+	/* unregister clock source */
+	if (hc->iclock)
+		mISDN_unregister_clock(hc->iclock);
+
+	/* disable irq */
 	spin_lock_irqsave(&hc->lock, flags);
 	disable_hwirq(hc);
 	spin_unlock_irqrestore(&hc->lock, flags);
-
 	udelay(1000);
 
 	/* dimm leds */
@@ -4699,7 +4719,7 @@
 	} else
 		hc->chan[hc->dslot].jitter = 2; /* default */
 	snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1);
-	ret = mISDN_register_device(&dch->dev, name);
+	ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name);
 	if (ret)
 		goto free_chan;
 	hc->created[0] = 1;
@@ -4807,9 +4827,9 @@
 		test_and_set_bit(HFC_CFG_DIS_ECHANNEL,
 		    &hc->chan[i + 2].cfg);
 	}
-	snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-%ds.%d/%d",
+	snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-%ds.%d-%d",
 		hc->type, HFC_cnt + 1, pt + 1);
-	ret = mISDN_register_device(&dch->dev, name);
+	ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name);
 	if (ret)
 		goto free_chan;
 	hc->created[pt] = 1;
@@ -4828,6 +4848,7 @@
 	struct hfc_multi	*hc;
 	u_long		flags;
 	u_char		dips = 0, pmj = 0; /* dip settings, port mode Jumpers */
+	int		i;
 
 	if (HFC_cnt >= MAX_CARDS) {
 		printk(KERN_ERR "too many cards (max=%d).\n",
@@ -4861,11 +4882,11 @@
 	hc->id = HFC_cnt;
 	hc->pcm = pcm[HFC_cnt];
 	hc->io_mode = iomode[HFC_cnt];
-	if (dslot[HFC_cnt] < 0) {
+	if (dslot[HFC_cnt] < 0 && hc->type == 1) {
 		hc->dslot = 0;
 		printk(KERN_INFO "HFC-E1 card has disabled D-channel, but "
 			"31 B-channels\n");
-	} if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32) {
+	} if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32 && hc->type == 1) {
 		hc->dslot = dslot[HFC_cnt];
 		printk(KERN_INFO "HFC-E1 card has alternating D-channel on "
 			"time slot %d\n", dslot[HFC_cnt]);
@@ -4876,9 +4897,17 @@
 	hc->masterclk = -1;
 	if (type[HFC_cnt] & 0x100) {
 		test_and_set_bit(HFC_CHIP_ULAW, &hc->chip);
-		silence = 0xff; /* ulaw silence */
+		hc->silence = 0xff; /* ulaw silence */
 	} else
-		silence = 0x2a; /* alaw silence */
+		hc->silence = 0x2a; /* alaw silence */
+	if ((poll >> 1) > sizeof(hc->silence_data)) {
+		printk(KERN_ERR "HFCMULTI error: silence_data too small, "
+			"please fix\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < (poll >> 1); i++)
+		hc->silence_data[i] = hc->silence;
+
 	if (!(type[HFC_cnt] & 0x200))
 		test_and_set_bit(HFC_CHIP_DTMF, &hc->chip);
 
@@ -4945,9 +4974,7 @@
 	switch (m->dip_type) {
 	case DIP_4S:
 		/*
-		 * get DIP Setting for beroNet 1S/2S/4S cards
-		 *  check if Port Jumper config matches
-		 * module param 'protocol'
+		 * Get DIP setting for beroNet 1S/2S/4S cards
 		 * DIP Setting: (collect GPIO 13/14/15 (R_GPIO_IN1) +
 		 * GPI 19/23 (R_GPI_IN2))
 		 */
@@ -4966,9 +4993,8 @@
 		break;
 	case DIP_8S:
 		/*
-		 * get DIP Setting for beroNet 8S0+ cards
-		 *
-		 * enable PCI auxbridge function
+		 * Get DIP Setting for beroNet 8S0+ cards
+		 * Enable PCI auxbridge function
 		 */
 		HFC_outb(hc, R_BRG_PCM_CFG, 1 | V_PCM_CLK);
 		/* prepare access to auxport */
@@ -5003,6 +5029,10 @@
 	list_add_tail(&hc->list, &HFClist);
 	spin_unlock_irqrestore(&HFClock, flags);
 
+	/* use as clock source */
+	if (clock == HFC_cnt + 1)
+		hc->iclock = mISDN_register_clock("HFCMulti", 0, clockctl, hc);
+
 	/* initialize hardware */
 	ret_err = init_card(hc);
 	if (ret_err) {
@@ -5137,8 +5167,7 @@
 	{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
 	PCI_DEVICE_ID_CCD_HFC8S, 0, 0, H(14)}, /* old Eval */
 	{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
-	PCI_SUBDEVICE_ID_CCD_IOB8STR, 0, 0, H(15)},
-	    /* IOB8ST Recording */
+	PCI_SUBDEVICE_ID_CCD_IOB8STR, 0, 0, H(15)}, /* IOB8ST Recording */
 	{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
 		PCI_SUBDEVICE_ID_CCD_IOB8ST, 0, 0, H(16)}, /* IOB8ST  */
 	{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
@@ -5188,18 +5217,16 @@
 	struct hm_map	*m = (struct hm_map *)ent->driver_data;
 	int		ret;
 
-	if (m == NULL) {
-		if (ent->vendor == PCI_VENDOR_ID_CCD)
-			if (ent->device == PCI_DEVICE_ID_CCD_HFC4S ||
-			    ent->device == PCI_DEVICE_ID_CCD_HFC8S ||
-			    ent->device == PCI_DEVICE_ID_CCD_HFCE1)
-				printk(KERN_ERR
-				    "unknown HFC multiport controller "
-				    "(vendor:%x device:%x subvendor:%x "
-				    "subdevice:%x) Please contact the "
-				    "driver maintainer for support.\n",
-				    ent->vendor, ent->device,
-				    ent->subvendor, ent->subdevice);
+	if (m == NULL && ent->vendor == PCI_VENDOR_ID_CCD && (
+	    ent->device == PCI_DEVICE_ID_CCD_HFC4S ||
+	    ent->device == PCI_DEVICE_ID_CCD_HFC8S ||
+	    ent->device == PCI_DEVICE_ID_CCD_HFCE1)) {
+		printk(KERN_ERR
+		    "Unknown HFC multiport controller (vendor:%x device:%x "
+		    "subvendor:%x subdevice:%x)\n", ent->vendor, ent->device,
+		    ent->subvendor, ent->subdevice);
+		printk(KERN_ERR
+		    "Please contact the driver maintainer for support.\n");
 		return -ENODEV;
 	}
 	ret = hfcmulti_init(pdev, ent);
@@ -5222,22 +5249,9 @@
 {
 	struct hfc_multi *card, *next;
 
-	/* unload interrupt function symbol */
-	if (hfc_interrupt)
-		symbol_put(ztdummy_extern_interrupt);
-	if (register_interrupt)
-		symbol_put(ztdummy_register_interrupt);
-	if (unregister_interrupt) {
-		if (interrupt_registered) {
-			interrupt_registered = 0;
-			unregister_interrupt();
-		}
-		symbol_put(ztdummy_unregister_interrupt);
-	}
-
+	/* get rid of all devices of this driver */
 	list_for_each_entry_safe(card, next, &HFClist, list)
 		release_card(card);
-	/* get rid of all devices of this driver */
 	pci_unregister_driver(&hfcmultipci_driver);
 }
 
@@ -5246,8 +5260,10 @@
 {
 	int err;
 
+	printk(KERN_INFO "mISDN: HFC-multi driver %s\n", HFC_MULTI_VERSION);
+
 #ifdef IRQ_DEBUG
-	printk(KERN_ERR "%s: IRQ_DEBUG IS ENABLED!\n", __func__);
+	printk(KERN_DEBUG "%s: IRQ_DEBUG IS ENABLED!\n", __func__);
 #endif
 
 	spin_lock_init(&HFClock);
@@ -5256,22 +5272,11 @@
 	if (debug & DEBUG_HFCMULTI_INIT)
 		printk(KERN_DEBUG "%s: init entered\n", __func__);
 
-	hfc_interrupt = symbol_get(ztdummy_extern_interrupt);
-	register_interrupt = symbol_get(ztdummy_register_interrupt);
-	unregister_interrupt = symbol_get(ztdummy_unregister_interrupt);
-	printk(KERN_INFO "mISDN: HFC-multi driver %s\n",
-	    hfcmulti_revision);
-
 	switch (poll) {
 	case 0:
 		poll_timer = 6;
 		poll = 128;
 		break;
-		/*
-		 * wenn dieses break nochmal verschwindet,
-		 * gibt es heisse ohren :-)
-		 * "without the break you will get hot ears ???"
-		 */
 	case 8:
 		poll_timer = 2;
 		break;
@@ -5298,20 +5303,12 @@
 
 	}
 
+	if (!clock)
+		clock = 1;
+
 	err = pci_register_driver(&hfcmultipci_driver);
 	if (err < 0) {
 		printk(KERN_ERR "error registering pci driver: %x\n", err);
-		if (hfc_interrupt)
-			symbol_put(ztdummy_extern_interrupt);
-		if (register_interrupt)
-			symbol_put(ztdummy_register_interrupt);
-		if (unregister_interrupt) {
-			if (interrupt_registered) {
-				interrupt_registered = 0;
-				unregister_interrupt();
-			}
-			symbol_put(ztdummy_unregister_interrupt);
-		}
 		return err;
 	}
 	return 0;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index cd8302a..f0e14df 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -23,6 +23,25 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  *
+ * Module options:
+ *
+ * debug:
+ *	NOTE: only one poll value must be given for all cards
+ *	See hfc_pci.h for debug flags.
+ *
+ * poll:
+ *	NOTE: only one poll value must be given for all cards
+ *	Give the number of samples for each fifo process.
+ *	By default 128 is used. Decrease to reduce delay, increase to
+ *	reduce cpu load. If unsure, don't mess with it!
+ *	A value of 128 will use controller's interrupt. Other values will
+ *	use kernel timer, because the controller will not allow lower values
+ *	than 128.
+ *	Also note that the value depends on the kernel timer frequency.
+ *	If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible.
+ *	If the kernel uses 100 Hz, steps of 80 samples are possible.
+ *	If the kernel uses 300 Hz, steps of about 26 samples are possible.
+ *
  */
 
 #include <linux/module.h>
@@ -34,16 +53,16 @@
 
 static const char *hfcpci_revision = "2.0";
 
-#define MAX_CARDS	8
 static int HFC_cnt;
 static uint debug;
+static uint poll, tics;
+struct timer_list hfc_tl;
+u32	hfc_jiffies;
 
 MODULE_AUTHOR("Karsten Keil");
 MODULE_LICENSE("GPL");
-module_param(debug, uint, 0);
-
-static LIST_HEAD(HFClist);
-static DEFINE_RWLOCK(HFClock);
+module_param(debug, uint, S_IRUGO | S_IWUSR);
+module_param(poll, uint, S_IRUGO | S_IWUSR);
 
 enum {
 	HFC_CCD_2BD0,
@@ -114,7 +133,6 @@
 
 
 struct hfc_pci {
-	struct list_head	list;
 	u_char			subtype;
 	u_char			chanlimit;
 	u_char			initdone;
@@ -520,9 +538,9 @@
 }
 
 /*
- * check for transparent receive data and read max one threshold size if avail
+ * check for transparent receive data and read max one 'poll' size if avail
  */
-static int
+static void
 hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
 {
 	 __le16 *z1r, *z2r;
@@ -534,17 +552,19 @@
 
 	fcnt = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
 	if (!fcnt)
-		return 0;	/* no data avail */
+		return;	/* no data avail */
 
 	if (fcnt <= 0)
 		fcnt += B_FIFO_SIZE;	/* bytes actually buffered */
-	if (fcnt > HFCPCI_BTRANS_THRESHOLD)
-		fcnt = HFCPCI_BTRANS_THRESHOLD;		/* limit size */
-
 	new_z2 = le16_to_cpu(*z2r) + fcnt;	/* new position in fifo */
 	if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
 		new_z2 -= B_FIFO_SIZE;	/* buffer wrap */
 
+	if (fcnt > MAX_DATA_SIZE) {	/* flush, if oversized */
+		*z2r = cpu_to_le16(new_z2);		/* new position */
+		return;
+	}
+
 	bch->rx_skb = mI_alloc_skb(fcnt, GFP_ATOMIC);
 	if (bch->rx_skb) {
 		ptr = skb_put(bch->rx_skb, fcnt);
@@ -569,7 +589,6 @@
 		printk(KERN_WARNING "HFCPCI: receive out of memory\n");
 
 	*z2r = cpu_to_le16(new_z2);		/* new position */
-	return 1;
 }
 
 /*
@@ -580,12 +599,11 @@
 {
 	struct hfc_pci	*hc = bch->hw;
 	int		rcnt, real_fifo;
-	int		receive, count = 5;
+	int		receive = 0, count = 5;
 	struct bzfifo	*bz;
 	u_char		*bdata;
 	struct zt	*zp;
 
-
 	if ((bch->nr & 2) && (!hc->hw.bswapped)) {
 		bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
 		bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
@@ -625,9 +643,10 @@
 			receive = 1;
 		else
 			receive = 0;
-	} else if (test_bit(FLG_TRANSPARENT, &bch->Flags))
-		receive = hfcpci_empty_fifo_trans(bch, bz, bdata);
-	else
+	} else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
+		hfcpci_empty_fifo_trans(bch, bz, bdata);
+		return;
+	} else
 		receive = 0;
 	if (count && receive)
 		goto Begin;
@@ -751,11 +770,41 @@
 			    /* fcnt contains available bytes in fifo */
 		fcnt = B_FIFO_SIZE - fcnt;
 		    /* remaining bytes to send (bytes in fifo) */
+
+		/* "fill fifo if empty" feature */
+		if (test_bit(FLG_FILLEMPTY, &bch->Flags) && !fcnt) {
+			/* printk(KERN_DEBUG "%s: buffer empty, so we have "
+				"underrun\n", __func__); */
+			/* fill buffer, to prevent future underrun */
+			count = HFCPCI_FILLEMPTY;
+			new_z1 = le16_to_cpu(*z1t) + count;
+			   /* new buffer Position */
+			if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
+				new_z1 -= B_FIFO_SIZE;	/* buffer wrap */
+			dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
+			maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
+			    /* end of fifo */
+			if (bch->debug & DEBUG_HW_BFIFO)
+				printk(KERN_DEBUG "hfcpci_FFt fillempty "
+				    "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
+				    fcnt, maxlen, new_z1, dst);
+			fcnt += count;
+			if (maxlen > count)
+				maxlen = count; 	/* limit size */
+			memset(dst, 0x2a, maxlen);	/* first copy */
+			count -= maxlen;		/* remaining bytes */
+			if (count) {
+				dst = bdata;		/* start of buffer */
+				memset(dst, 0x2a, count);
+			}
+			*z1t = cpu_to_le16(new_z1);	/* now send data */
+		}
+
 next_t_frame:
 		count = bch->tx_skb->len - bch->tx_idx;
-		/* maximum fill shall be HFCPCI_BTRANS_MAX */
-		if (count > HFCPCI_BTRANS_MAX - fcnt)
-			count = HFCPCI_BTRANS_MAX - fcnt;
+		/* maximum fill shall be poll*2 */
+		if (count > (poll << 1) - fcnt)
+			count = (poll << 1) - fcnt;
 		if (count <= 0)
 			return;
 		/* data is suitable for fifo */
@@ -1135,37 +1184,37 @@
 		val &= ~0x80;
 		Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
 	}
-	if (val & 0x08) {
+	if (val & 0x08) { 	/* B1 rx */
 		bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
 		if (bch)
 			main_rec_hfcpci(bch);
 		else if (hc->dch.debug)
 			printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
 	}
-	if (val & 0x10) {
+	if (val & 0x10) {	/* B2 rx */
 		bch = Sel_BCS(hc, 2);
 		if (bch)
 			main_rec_hfcpci(bch);
 		else if (hc->dch.debug)
 			printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
 	}
-	if (val & 0x01) {
+	if (val & 0x01) {	/* B1 tx */
 		bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
 		if (bch)
 			tx_birq(bch);
 		else if (hc->dch.debug)
 			printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
 	}
-	if (val & 0x02) {
+	if (val & 0x02) {	/* B2 tx */
 		bch = Sel_BCS(hc, 2);
 		if (bch)
 			tx_birq(bch);
 		else if (hc->dch.debug)
 			printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
 	}
-	if (val & 0x20)
+	if (val & 0x20)		/* D rx */
 		receive_dmsg(hc);
-	if (val & 0x04) {	/* dframe transmitted */
+	if (val & 0x04) {	/* D tx */
 		if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
 			del_timer(&hc->dch.timer);
 		tx_dirq(&hc->dch);
@@ -1283,14 +1332,16 @@
 		}
 		if (fifo2 & 2) {
 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
-			hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
-			    HFCPCI_INTS_B2REC);
+			if (!tics)
+				hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
+				    HFCPCI_INTS_B2REC);
 			hc->hw.ctmt |= 2;
 			hc->hw.conn &= ~0x18;
 		} else {
 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
-			hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
-			    HFCPCI_INTS_B1REC);
+			if (!tics)
+				hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
+				    HFCPCI_INTS_B1REC);
 			hc->hw.ctmt |= 1;
 			hc->hw.conn &= ~0x03;
 		}
@@ -1398,7 +1449,8 @@
 		if (chan & 2) {
 			hc->hw.sctrl_r |= SCTRL_B2_ENA;
 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
-			hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
+			if (!tics)
+				hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
 			hc->hw.ctmt |= 2;
 			hc->hw.conn &= ~0x18;
 #ifdef REVERSE_BITORDER
@@ -1407,7 +1459,8 @@
 		} else {
 			hc->hw.sctrl_r |= SCTRL_B1_ENA;
 			hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
-			hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
+			if (!tics)
+				hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
 			hc->hw.ctmt |= 1;
 			hc->hw.conn &= ~0x03;
 #ifdef REVERSE_BITORDER
@@ -1481,11 +1534,17 @@
 static int
 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
 {
-	int			ret = 0;
+	int	ret = 0;
 
 	switch (cq->op) {
 	case MISDN_CTRL_GETOP:
-		cq->op = 0;
+		cq->op = MISDN_CTRL_FILL_EMPTY;
+		break;
+	case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
+		test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
+		if (debug & DEBUG_HW_OPEN)
+			printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
+				"off=%d)\n", __func__, bch->nr, !!cq->p1);
 		break;
 	default:
 		printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
@@ -1859,6 +1918,10 @@
 		    hc->dch.dev.id, __builtin_return_address(0));
 	if (rq->protocol == ISDN_P_NONE)
 		return -EINVAL;
+	if (rq->adr.channel == 1) {
+		/* TODO: E-Channel */
+		return -EINVAL;
+	}
 	if (!hc->initdone) {
 		if (rq->protocol == ISDN_P_TE_S0) {
 			err = create_l1(&hc->dch, hfc_l1callback);
@@ -1874,6 +1937,11 @@
 		if (rq->protocol != ch->protocol) {
 			if (hc->hw.protocol == ISDN_P_TE_S0)
 				l1_event(hc->dch.l1, CLOSE_CHANNEL);
+			if (rq->protocol == ISDN_P_TE_S0) {
+				err = create_l1(&hc->dch, hfc_l1callback);
+				if (err)
+					return err;
+			}
 			hc->hw.protocol = rq->protocol;
 			ch->protocol = rq->protocol;
 			hfcpci_setmode(hc);
@@ -1903,6 +1971,7 @@
 	bch = &hc->bch[rq->adr.channel - 1];
 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
 		return -EBUSY; /* b-channel can be only open once */
+	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
 	bch->ch.protocol = rq->protocol;
 	rq->ch = &bch->ch; /* TODO: E-channel */
 	if (!try_module_get(THIS_MODULE))
@@ -1928,7 +1997,8 @@
 	switch (cmd) {
 	case OPEN_CHANNEL:
 		rq = arg;
-		if (rq->adr.channel == 0)
+		if ((rq->protocol == ISDN_P_TE_S0) ||
+		    (rq->protocol == ISDN_P_NT_S0))
 			err = open_dchannel(hc, ch, rq);
 		else
 			err = open_bchannel(hc, rq);
@@ -2027,7 +2097,6 @@
 	mISDN_freebchannel(&hc->bch[1]);
 	mISDN_freebchannel(&hc->bch[0]);
 	mISDN_freedchannel(&hc->dch);
-	list_del(&hc->list);
 	pci_set_drvdata(hc->pdev, NULL);
 	kfree(hc);
 }
@@ -2037,12 +2106,8 @@
 {
 	int		err = -EINVAL;
 	u_int		i;
-	u_long		flags;
 	char		name[MISDN_MAX_IDLEN];
 
-	if (HFC_cnt >= MAX_CARDS)
-		return -EINVAL; /* maybe better value */
-
 	card->dch.debug = debug;
 	spin_lock_init(&card->lock);
 	mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
@@ -2068,13 +2133,10 @@
 	if (err)
 		goto error;
 	snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
-	err = mISDN_register_device(&card->dch.dev, name);
+	err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name);
 	if (err)
 		goto error;
 	HFC_cnt++;
-	write_lock_irqsave(&HFClock, flags);
-	list_add_tail(&card->list, &HFClist);
-	write_unlock_irqrestore(&HFClock, flags);
 	printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
 	return 0;
 error:
@@ -2210,15 +2272,12 @@
 hfc_remove_pci(struct pci_dev *pdev)
 {
 	struct hfc_pci	*card = pci_get_drvdata(pdev);
-	u_long		flags;
 
-	if (card) {
-		write_lock_irqsave(&HFClock, flags);
+	if (card)
 		release_card(card);
-		write_unlock_irqrestore(&HFClock, flags);
-	} else
+	else
 		if (debug)
-			printk(KERN_WARNING "%s: drvdata allready removed\n",
+			printk(KERN_WARNING "%s: drvdata already removed\n",
 			    __func__);
 }
 
@@ -2230,25 +2289,97 @@
 	.id_table = hfc_ids,
 };
 
+static int
+_hfcpci_softirq(struct device *dev, void *arg)
+{
+	struct hfc_pci  *hc = dev_get_drvdata(dev);
+	struct bchannel *bch;
+	if (hc == NULL)
+		return 0;
+
+	if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
+		spin_lock(&hc->lock);
+		bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
+		if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
+			main_rec_hfcpci(bch);
+			tx_birq(bch);
+		}
+		bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2);
+		if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */
+			main_rec_hfcpci(bch);
+			tx_birq(bch);
+		}
+		spin_unlock(&hc->lock);
+	}
+	return 0;
+}
+
+static void
+hfcpci_softirq(void *arg)
+{
+	(void) driver_for_each_device(&hfc_driver.driver, NULL, arg,
+					_hfcpci_softirq);
+
+	/* if next event would be in the past ... */
+	if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
+		hfc_jiffies = jiffies + 1;
+	else
+		hfc_jiffies += tics;
+	hfc_tl.expires = hfc_jiffies;
+	add_timer(&hfc_tl);
+}
+
 static int __init
 HFC_init(void)
 {
 	int		err;
 
+	if (!poll)
+		poll = HFCPCI_BTRANS_THRESHOLD;
+
+	if (poll != HFCPCI_BTRANS_THRESHOLD) {
+		tics = (poll * HZ) / 8000;
+		if (tics < 1)
+			tics = 1;
+		poll = (tics * 8000) / HZ;
+		if (poll > 256 || poll < 8) {
+			printk(KERN_ERR "%s: Wrong poll value %d not in range "
+				"of 8..256.\n", __func__, poll);
+			err = -EINVAL;
+			return err;
+		}
+	}
+	if (poll != HFCPCI_BTRANS_THRESHOLD) {
+		printk(KERN_INFO "%s: Using alternative poll value of %d\n",
+			__func__, poll);
+		hfc_tl.function = (void *)hfcpci_softirq;
+		hfc_tl.data = 0;
+		init_timer(&hfc_tl);
+		hfc_tl.expires = jiffies + tics;
+		hfc_jiffies = hfc_tl.expires;
+		add_timer(&hfc_tl);
+	} else
+		tics = 0; /* indicate the use of controller's timer */
+
 	err = pci_register_driver(&hfc_driver);
+	if (err) {
+		if (timer_pending(&hfc_tl))
+			del_timer(&hfc_tl);
+	}
+
 	return err;
 }
 
 static void __exit
 HFC_cleanup(void)
 {
-	struct hfc_pci	*card, *next;
+	if (timer_pending(&hfc_tl))
+		del_timer(&hfc_tl);
 
-	list_for_each_entry_safe(card, next, &HFClist, list) {
-		release_card(card);
-	}
 	pci_unregister_driver(&hfc_driver);
 }
 
 module_init(HFC_init);
 module_exit(HFC_cleanup);
+
+MODULE_DEVICE_TABLE(pci, hfc_ids);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
new file mode 100644
index 0000000..ba6925f
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -0,0 +1,2196 @@
+/* hfcsusb.c
+ * mISDN driver for Colognechip HFC-S USB chip
+ *
+ * Copyright 2001 by Peter Sprenger (sprenger@moving-bytes.de)
+ * Copyright 2008 by Martin Bachem (info@bachem-it.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * module params
+ *   debug=<n>, default=0, with n=0xHHHHGGGG
+ *      H - l1 driver flags described in hfcsusb.h
+ *      G - common mISDN debug flags described at mISDNhw.h
+ *
+ *   poll=<n>, default 128
+ *     n : burst size of PH_DATA_IND at transparent rx data
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/mISDNhw.h>
+#include "hfcsusb.h"
+
+const char *hfcsusb_rev = "Revision: 0.3.3 (socket), 2008-11-05";
+
+static unsigned int debug;
+static int poll = DEFAULT_TRANSP_BURST_SZ;
+
+static LIST_HEAD(HFClist);
+static DEFINE_RWLOCK(HFClock);
+
+
+MODULE_AUTHOR("Martin Bachem");
+MODULE_LICENSE("GPL");
+module_param(debug, uint, S_IRUGO | S_IWUSR);
+module_param(poll, int, 0);
+
+static int hfcsusb_cnt;
+
+/* some function prototypes */
+static void hfcsusb_ph_command(struct hfcsusb *hw, u_char command);
+static void release_hw(struct hfcsusb *hw);
+static void reset_hfcsusb(struct hfcsusb *hw);
+static void setPortMode(struct hfcsusb *hw);
+static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel);
+static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel);
+static int  hfcsusb_setup_bch(struct bchannel *bch, int protocol);
+static void deactivate_bchannel(struct bchannel *bch);
+static void hfcsusb_ph_info(struct hfcsusb *hw);
+
+/* start next background transfer for control channel */
+static void
+ctrl_start_transfer(struct hfcsusb *hw)
+{
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+	if (hw->ctrl_cnt) {
+		hw->ctrl_urb->pipe = hw->ctrl_out_pipe;
+		hw->ctrl_urb->setup_packet = (u_char *)&hw->ctrl_write;
+		hw->ctrl_urb->transfer_buffer = NULL;
+		hw->ctrl_urb->transfer_buffer_length = 0;
+		hw->ctrl_write.wIndex =
+		    cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].hfcs_reg);
+		hw->ctrl_write.wValue =
+		    cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].reg_val);
+
+		usb_submit_urb(hw->ctrl_urb, GFP_ATOMIC);
+	}
+}
+
+/*
+ * queue a control transfer request to write HFC-S USB
+ * chip register using CTRL resuest queue
+ */
+static int write_reg(struct hfcsusb *hw, __u8 reg, __u8 val)
+{
+	struct ctrl_buf *buf;
+
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_DEBUG "%s: %s reg(0x%02x) val(0x%02x)\n",
+			hw->name, __func__, reg, val);
+
+	spin_lock(&hw->ctrl_lock);
+	if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE)
+		return 1;
+	buf = &hw->ctrl_buff[hw->ctrl_in_idx];
+	buf->hfcs_reg = reg;
+	buf->reg_val = val;
+	if (++hw->ctrl_in_idx >= HFC_CTRL_BUFSIZE)
+		hw->ctrl_in_idx = 0;
+	if (++hw->ctrl_cnt == 1)
+		ctrl_start_transfer(hw);
+	spin_unlock(&hw->ctrl_lock);
+
+	return 0;
+}
+
+/* control completion routine handling background control cmds */
+static void
+ctrl_complete(struct urb *urb)
+{
+	struct hfcsusb *hw = (struct hfcsusb *) urb->context;
+	struct ctrl_buf *buf;
+
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+	urb->dev = hw->dev;
+	if (hw->ctrl_cnt) {
+		buf = &hw->ctrl_buff[hw->ctrl_out_idx];
+		hw->ctrl_cnt--;	/* decrement actual count */
+		if (++hw->ctrl_out_idx >= HFC_CTRL_BUFSIZE)
+			hw->ctrl_out_idx = 0;	/* pointer wrap */
+
+		ctrl_start_transfer(hw); /* start next transfer */
+	}
+}
+
+/* handle LED bits   */
+static void
+set_led_bit(struct hfcsusb *hw, signed short led_bits, int set_on)
+{
+	if (set_on) {
+		if (led_bits < 0)
+			hw->led_state &= ~abs(led_bits);
+		else
+			hw->led_state |= led_bits;
+	} else {
+		if (led_bits < 0)
+			hw->led_state |= abs(led_bits);
+		else
+			hw->led_state &= ~led_bits;
+	}
+}
+
+/* handle LED requests  */
+static void
+handle_led(struct hfcsusb *hw, int event)
+{
+	struct hfcsusb_vdata *driver_info = (struct hfcsusb_vdata *)
+		hfcsusb_idtab[hw->vend_idx].driver_info;
+	__u8 tmpled;
+
+	if (driver_info->led_scheme == LED_OFF)
+		return;
+	tmpled = hw->led_state;
+
+	switch (event) {
+	case LED_POWER_ON:
+		set_led_bit(hw, driver_info->led_bits[0], 1);
+		set_led_bit(hw, driver_info->led_bits[1], 0);
+		set_led_bit(hw, driver_info->led_bits[2], 0);
+		set_led_bit(hw, driver_info->led_bits[3], 0);
+		break;
+	case LED_POWER_OFF:
+		set_led_bit(hw, driver_info->led_bits[0], 0);
+		set_led_bit(hw, driver_info->led_bits[1], 0);
+		set_led_bit(hw, driver_info->led_bits[2], 0);
+		set_led_bit(hw, driver_info->led_bits[3], 0);
+		break;
+	case LED_S0_ON:
+		set_led_bit(hw, driver_info->led_bits[1], 1);
+		break;
+	case LED_S0_OFF:
+		set_led_bit(hw, driver_info->led_bits[1], 0);
+		break;
+	case LED_B1_ON:
+		set_led_bit(hw, driver_info->led_bits[2], 1);
+		break;
+	case LED_B1_OFF:
+		set_led_bit(hw, driver_info->led_bits[2], 0);
+		break;
+	case LED_B2_ON:
+		set_led_bit(hw, driver_info->led_bits[3], 1);
+		break;
+	case LED_B2_OFF:
+		set_led_bit(hw, driver_info->led_bits[3], 0);
+		break;
+	}
+
+	if (hw->led_state != tmpled) {
+		if (debug & DBG_HFC_CALL_TRACE)
+			printk(KERN_DEBUG "%s: %s reg(0x%02x) val(x%02x)\n",
+			    hw->name, __func__,
+			    HFCUSB_P_DATA, hw->led_state);
+
+		write_reg(hw, HFCUSB_P_DATA, hw->led_state);
+	}
+}
+
+/*
+ * Layer2 -> Layer 1 Bchannel data
+ */
+static int
+hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+	struct bchannel		*bch = container_of(ch, struct bchannel, ch);
+	struct hfcsusb		*hw = bch->hw;
+	int			ret = -EINVAL;
+	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
+	u_long			flags;
+
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+	switch (hh->prim) {
+	case PH_DATA_REQ:
+		spin_lock_irqsave(&hw->lock, flags);
+		ret = bchannel_senddata(bch, skb);
+		spin_unlock_irqrestore(&hw->lock, flags);
+		if (debug & DBG_HFC_CALL_TRACE)
+			printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n",
+				hw->name, __func__, ret);
+		if (ret > 0) {
+			/*
+			 * other l1 drivers don't send early confirms on
+			 * transp data, but hfcsusb does because tx_next
+			 * skb is needed in tx_iso_complete()
+			 */
+			queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL);
+			ret = 0;
+		}
+		return ret;
+	case PH_ACTIVATE_REQ:
+		if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) {
+			hfcsusb_start_endpoint(hw, bch->nr);
+			ret = hfcsusb_setup_bch(bch, ch->protocol);
+		} else
+			ret = 0;
+		if (!ret)
+			_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
+				0, NULL, GFP_KERNEL);
+		break;
+	case PH_DEACTIVATE_REQ:
+		deactivate_bchannel(bch);
+		_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY,
+			0, NULL, GFP_KERNEL);
+		ret = 0;
+		break;
+	}
+	if (!ret)
+		dev_kfree_skb(skb);
+	return ret;
+}
+
+/*
+ * send full D/B channel status information
+ * as MPH_INFORMATION_IND
+ */
+static void
+hfcsusb_ph_info(struct hfcsusb *hw)
+{
+	struct ph_info *phi;
+	struct dchannel *dch = &hw->dch;
+	int i;
+
+	phi = kzalloc(sizeof(struct ph_info) +
+		dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC);
+	phi->dch.ch.protocol = hw->protocol;
+	phi->dch.ch.Flags = dch->Flags;
+	phi->dch.state = dch->state;
+	phi->dch.num_bch = dch->dev.nrbchan;
+	for (i = 0; i < dch->dev.nrbchan; i++) {
+		phi->bch[i].protocol = hw->bch[i].ch.protocol;
+		phi->bch[i].Flags = hw->bch[i].Flags;
+	}
+	_queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
+		sizeof(struct ph_info_dch) + dch->dev.nrbchan *
+		sizeof(struct ph_info_ch), phi, GFP_ATOMIC);
+}
+
+/*
+ * Layer2 -> Layer 1 Dchannel data
+ */
+static int
+hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+{
+	struct mISDNdevice	*dev = container_of(ch, struct mISDNdevice, D);
+	struct dchannel		*dch = container_of(dev, struct dchannel, dev);
+	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
+	struct hfcsusb		*hw = dch->hw;
+	int			ret = -EINVAL;
+	u_long			flags;
+
+	switch (hh->prim) {
+	case PH_DATA_REQ:
+		if (debug & DBG_HFC_CALL_TRACE)
+			printk(KERN_DEBUG "%s: %s: PH_DATA_REQ\n",
+				hw->name, __func__);
+
+		spin_lock_irqsave(&hw->lock, flags);
+		ret = dchannel_senddata(dch, skb);
+		spin_unlock_irqrestore(&hw->lock, flags);
+		if (ret > 0) {
+			ret = 0;
+			queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL);
+		}
+		break;
+
+	case PH_ACTIVATE_REQ:
+		if (debug & DBG_HFC_CALL_TRACE)
+			printk(KERN_DEBUG "%s: %s: PH_ACTIVATE_REQ %s\n",
+				hw->name, __func__,
+				(hw->protocol == ISDN_P_NT_S0) ? "NT" : "TE");
+
+		if (hw->protocol == ISDN_P_NT_S0) {
+			ret = 0;
+			if (test_bit(FLG_ACTIVE, &dch->Flags)) {
+				_queue_data(&dch->dev.D,
+					PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
+					NULL, GFP_ATOMIC);
+			} else {
+				hfcsusb_ph_command(hw,
+					HFC_L1_ACTIVATE_NT);
+				test_and_set_bit(FLG_L2_ACTIVATED,
+					&dch->Flags);
+			}
+		} else {
+			hfcsusb_ph_command(hw, HFC_L1_ACTIVATE_TE);
+			ret = l1_event(dch->l1, hh->prim);
+		}
+		break;
+
+	case PH_DEACTIVATE_REQ:
+		if (debug & DBG_HFC_CALL_TRACE)
+			printk(KERN_DEBUG "%s: %s: PH_DEACTIVATE_REQ\n",
+				hw->name, __func__);
+		test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+
+		if (hw->protocol == ISDN_P_NT_S0) {
+			hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT);
+			spin_lock_irqsave(&hw->lock, flags);
+			skb_queue_purge(&dch->squeue);
+			if (dch->tx_skb) {
+				dev_kfree_skb(dch->tx_skb);
+				dch->tx_skb = NULL;
+			}
+			dch->tx_idx = 0;
+			if (dch->rx_skb) {
+				dev_kfree_skb(dch->rx_skb);
+				dch->rx_skb = NULL;
+			}
+			test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+			spin_unlock_irqrestore(&hw->lock, flags);
+#ifdef FIXME
+			if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
+				dchannel_sched_event(&hc->dch, D_CLEARBUSY);
+#endif
+			ret = 0;
+		} else
+			ret = l1_event(dch->l1, hh->prim);
+		break;
+	case MPH_INFORMATION_REQ:
+		hfcsusb_ph_info(hw);
+		ret = 0;
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * Layer 1 callback function
+ */
+static int
+hfc_l1callback(struct dchannel *dch, u_int cmd)
+{
+	struct hfcsusb *hw = dch->hw;
+
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_DEBUG "%s: %s cmd 0x%x\n",
+			hw->name, __func__, cmd);
+
+	switch (cmd) {
+	case INFO3_P8:
+	case INFO3_P10:
+	case HW_RESET_REQ:
+	case HW_POWERUP_REQ:
+		break;
+
+	case HW_DEACT_REQ:
+		skb_queue_purge(&dch->squeue);
+		if (dch->tx_skb) {
+			dev_kfree_skb(dch->tx_skb);
+			dch->tx_skb = NULL;
+		}
+		dch->tx_idx = 0;
+		if (dch->rx_skb) {
+			dev_kfree_skb(dch->rx_skb);
+			dch->rx_skb = NULL;
+		}
+		test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+		break;
+	case PH_ACTIVATE_IND:
+		test_and_set_bit(FLG_ACTIVE, &dch->Flags);
+		_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
+			GFP_ATOMIC);
+		break;
+	case PH_DEACTIVATE_IND:
+		test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+		_queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
+			GFP_ATOMIC);
+		break;
+	default:
+		if (dch->debug & DEBUG_HW)
+			printk(KERN_DEBUG "%s: %s: unknown cmd %x\n",
+			hw->name, __func__, cmd);
+		return -1;
+	}
+	hfcsusb_ph_info(hw);
+	return 0;
+}
+
+static int
+open_dchannel(struct hfcsusb *hw, struct mISDNchannel *ch,
+    struct channel_req *rq)
+{
+	int err = 0;
+
+	if (debug & DEBUG_HW_OPEN)
+		printk(KERN_DEBUG "%s: %s: dev(%d) open addr(%i) from %p\n",
+		    hw->name, __func__, hw->dch.dev.id, rq->adr.channel,
+		    __builtin_return_address(0));
+	if (rq->protocol == ISDN_P_NONE)
+		return -EINVAL;
+
+	test_and_clear_bit(FLG_ACTIVE, &hw->dch.Flags);
+	test_and_clear_bit(FLG_ACTIVE, &hw->ech.Flags);
+	hfcsusb_start_endpoint(hw, HFC_CHAN_D);
+
+	/* E-Channel logging */
+	if (rq->adr.channel == 1) {
+		if (hw->fifos[HFCUSB_PCM_RX].pipe) {
+			hfcsusb_start_endpoint(hw, HFC_CHAN_E);
+			set_bit(FLG_ACTIVE, &hw->ech.Flags);
+			_queue_data(&hw->ech.dev.D, PH_ACTIVATE_IND,
+				     MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+		} else
+			return -EINVAL;
+	}
+
+	if (!hw->initdone) {
+		hw->protocol = rq->protocol;
+		if (rq->protocol == ISDN_P_TE_S0) {
+			err = create_l1(&hw->dch, hfc_l1callback);
+			if (err)
+				return err;
+		}
+		setPortMode(hw);
+		ch->protocol = rq->protocol;
+		hw->initdone = 1;
+	} else {
+		if (rq->protocol != ch->protocol)
+			return -EPROTONOSUPPORT;
+	}
+
+	if (((ch->protocol == ISDN_P_NT_S0) && (hw->dch.state == 3)) ||
+	    ((ch->protocol == ISDN_P_TE_S0) && (hw->dch.state == 7)))
+		_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
+		    0, NULL, GFP_KERNEL);
+	rq->ch = ch;
+	if (!try_module_get(THIS_MODULE))
+		printk(KERN_WARNING "%s: %s: cannot get module\n",
+		    hw->name, __func__);
+	return 0;
+}
+
+static int
+open_bchannel(struct hfcsusb *hw, struct channel_req *rq)
+{
+	struct bchannel		*bch;
+
+	if (rq->adr.channel > 2)
+		return -EINVAL;
+	if (rq->protocol == ISDN_P_NONE)
+		return -EINVAL;
+
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_DEBUG "%s: %s B%i\n",
+			hw->name, __func__, rq->adr.channel);
+
+	bch = &hw->bch[rq->adr.channel - 1];
+	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
+		return -EBUSY; /* b-channel can be only open once */
+	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
+	bch->ch.protocol = rq->protocol;
+	rq->ch = &bch->ch;
+
+	/* start USB endpoint for bchannel */
+	if (rq->adr.channel  == 1)
+		hfcsusb_start_endpoint(hw, HFC_CHAN_B1);
+	else
+		hfcsusb_start_endpoint(hw, HFC_CHAN_B2);
+
+	if (!try_module_get(THIS_MODULE))
+		printk(KERN_WARNING "%s: %s:cannot get module\n",
+		    hw->name, __func__);
+	return 0;
+}
+
+static int
+channel_ctrl(struct hfcsusb *hw, struct mISDN_ctrl_req *cq)
+{
+	int ret = 0;
+
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_DEBUG "%s: %s op(0x%x) channel(0x%x)\n",
+		    hw->name, __func__, (cq->op), (cq->channel));
+
+	switch (cq->op) {
+	case MISDN_CTRL_GETOP:
+		cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
+			 MISDN_CTRL_DISCONNECT;
+		break;
+	default:
+		printk(KERN_WARNING "%s: %s: unknown Op %x\n",
+			hw->name, __func__, cq->op);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+/*
+ * device control function
+ */
+static int
+hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+	struct mISDNdevice	*dev = container_of(ch, struct mISDNdevice, D);
+	struct dchannel		*dch = container_of(dev, struct dchannel, dev);
+	struct hfcsusb		*hw = dch->hw;
+	struct channel_req	*rq;
+	int			err = 0;
+
+	if (dch->debug & DEBUG_HW)
+		printk(KERN_DEBUG "%s: %s: cmd:%x %p\n",
+		    hw->name, __func__, cmd, arg);
+	switch (cmd) {
+	case OPEN_CHANNEL:
+		rq = arg;
+		if ((rq->protocol == ISDN_P_TE_S0) ||
+		    (rq->protocol == ISDN_P_NT_S0))
+			err = open_dchannel(hw, ch, rq);
+		else
+			err = open_bchannel(hw, rq);
+		if (!err)
+			hw->open++;
+		break;
+	case CLOSE_CHANNEL:
+		hw->open--;
+		if (debug & DEBUG_HW_OPEN)
+			printk(KERN_DEBUG
+				"%s: %s: dev(%d) close from %p (open %d)\n",
+				hw->name, __func__, hw->dch.dev.id,
+				__builtin_return_address(0), hw->open);
+		if (!hw->open) {
+			hfcsusb_stop_endpoint(hw, HFC_CHAN_D);
+			if (hw->fifos[HFCUSB_PCM_RX].pipe)
+				hfcsusb_stop_endpoint(hw, HFC_CHAN_E);
+			handle_led(hw, LED_POWER_ON);
+		}
+		module_put(THIS_MODULE);
+		break;
+	case CONTROL_CHANNEL:
+		err = channel_ctrl(hw, arg);
+		break;
+	default:
+		if (dch->debug & DEBUG_HW)
+			printk(KERN_DEBUG "%s: %s: unknown command %x\n",
+				hw->name, __func__, cmd);
+		return -EINVAL;
+	}
+	return err;
+}
+
+/*
+ * S0 TE state change event handler
+ */
+static void
+ph_state_te(struct dchannel *dch)
+{
+	struct hfcsusb *hw = dch->hw;
+
+	if (debug & DEBUG_HW) {
+		if (dch->state <= HFC_MAX_TE_LAYER1_STATE)
+			printk(KERN_DEBUG "%s: %s: %s\n", hw->name, __func__,
+			    HFC_TE_LAYER1_STATES[dch->state]);
+		else
+			printk(KERN_DEBUG "%s: %s: TE F%d\n",
+			    hw->name, __func__, dch->state);
+	}
+
+	switch (dch->state) {
+	case 0:
+		l1_event(dch->l1, HW_RESET_IND);
+		break;
+	case 3:
+		l1_event(dch->l1, HW_DEACT_IND);
+		break;
+	case 5:
+	case 8:
+		l1_event(dch->l1, ANYSIGNAL);
+		break;
+	case 6:
+		l1_event(dch->l1, INFO2);
+		break;
+	case 7:
+		l1_event(dch->l1, INFO4_P8);
+		break;
+	}
+	if (dch->state == 7)
+		handle_led(hw, LED_S0_ON);
+	else
+		handle_led(hw, LED_S0_OFF);
+}
+
+/*
+ * S0 NT state change event handler
+ */
+static void
+ph_state_nt(struct dchannel *dch)
+{
+	struct hfcsusb *hw = dch->hw;
+
+	if (debug & DEBUG_HW) {
+		if (dch->state <= HFC_MAX_NT_LAYER1_STATE)
+			printk(KERN_DEBUG "%s: %s: %s\n",
+			    hw->name, __func__,
+			    HFC_NT_LAYER1_STATES[dch->state]);
+
+		else
+			printk(KERN_INFO DRIVER_NAME "%s: %s: NT G%d\n",
+			    hw->name, __func__, dch->state);
+	}
+
+	switch (dch->state) {
+	case (1):
+		test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+		test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+		hw->nt_timer = 0;
+		hw->timers &= ~NT_ACTIVATION_TIMER;
+		handle_led(hw, LED_S0_OFF);
+		break;
+
+	case (2):
+		if (hw->nt_timer < 0) {
+			hw->nt_timer = 0;
+			hw->timers &= ~NT_ACTIVATION_TIMER;
+			hfcsusb_ph_command(dch->hw, HFC_L1_DEACTIVATE_NT);
+		} else {
+			hw->timers |= NT_ACTIVATION_TIMER;
+			hw->nt_timer = NT_T1_COUNT;
+			/* allow G2 -> G3 transition */
+			write_reg(hw, HFCUSB_STATES, 2 | HFCUSB_NT_G2_G3);
+		}
+		break;
+	case (3):
+		hw->nt_timer = 0;
+		hw->timers &= ~NT_ACTIVATION_TIMER;
+		test_and_set_bit(FLG_ACTIVE, &dch->Flags);
+		_queue_data(&dch->dev.D, PH_ACTIVATE_IND,
+			MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+		handle_led(hw, LED_S0_ON);
+		break;
+	case (4):
+		hw->nt_timer = 0;
+		hw->timers &= ~NT_ACTIVATION_TIMER;
+		break;
+	default:
+		break;
+	}
+	hfcsusb_ph_info(hw);
+}
+
+static void
+ph_state(struct dchannel *dch)
+{
+	struct hfcsusb *hw = dch->hw;
+
+	if (hw->protocol == ISDN_P_NT_S0)
+		ph_state_nt(dch);
+	else if (hw->protocol == ISDN_P_TE_S0)
+		ph_state_te(dch);
+}
+
+/*
+ * disable/enable BChannel for desired protocoll
+ */
+static int
+hfcsusb_setup_bch(struct bchannel *bch, int protocol)
+{
+	struct hfcsusb *hw = bch->hw;
+	__u8 conhdlc, sctrl, sctrl_r;
+
+	if (debug & DEBUG_HW)
+		printk(KERN_DEBUG "%s: %s: protocol %x-->%x B%d\n",
+		    hw->name, __func__, bch->state, protocol,
+		    bch->nr);
+
+	/* setup val for CON_HDLC */
+	conhdlc = 0;
+	if (protocol > ISDN_P_NONE)
+		conhdlc = 8;	/* enable FIFO */
+
+	switch (protocol) {
+	case (-1):	/* used for init */
+		bch->state = -1;
+		/* fall trough */
+	case (ISDN_P_NONE):
+		if (bch->state == ISDN_P_NONE)
+			return 0; /* already in idle state */
+		bch->state = ISDN_P_NONE;
+		clear_bit(FLG_HDLC, &bch->Flags);
+		clear_bit(FLG_TRANSPARENT, &bch->Flags);
+		break;
+	case (ISDN_P_B_RAW):
+		conhdlc |= 2;
+		bch->state = protocol;
+		set_bit(FLG_TRANSPARENT, &bch->Flags);
+		break;
+	case (ISDN_P_B_HDLC):
+		bch->state = protocol;
+		set_bit(FLG_HDLC, &bch->Flags);
+		break;
+	default:
+		if (debug & DEBUG_HW)
+			printk(KERN_DEBUG "%s: %s: prot not known %x\n",
+				hw->name, __func__, protocol);
+		return -ENOPROTOOPT;
+	}
+
+	if (protocol >= ISDN_P_NONE) {
+		write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 0 : 2);
+		write_reg(hw, HFCUSB_CON_HDLC, conhdlc);
+		write_reg(hw, HFCUSB_INC_RES_F, 2);
+		write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 1 : 3);
+		write_reg(hw, HFCUSB_CON_HDLC, conhdlc);
+		write_reg(hw, HFCUSB_INC_RES_F, 2);
+
+		sctrl = 0x40 + ((hw->protocol == ISDN_P_TE_S0) ? 0x00 : 0x04);
+		sctrl_r = 0x0;
+		if (test_bit(FLG_ACTIVE, &hw->bch[0].Flags)) {
+			sctrl |= 1;
+			sctrl_r |= 1;
+		}
+		if (test_bit(FLG_ACTIVE, &hw->bch[1].Flags)) {
+			sctrl |= 2;
+			sctrl_r |= 2;
+		}
+		write_reg(hw, HFCUSB_SCTRL, sctrl);
+		write_reg(hw, HFCUSB_SCTRL_R, sctrl_r);
+
+		if (protocol > ISDN_P_NONE)
+			handle_led(hw, (bch->nr == 1) ? LED_B1_ON : LED_B2_ON);
+		else
+			handle_led(hw, (bch->nr == 1) ? LED_B1_OFF :
+				LED_B2_OFF);
+	}
+	hfcsusb_ph_info(hw);
+	return 0;
+}
+
+static void
+hfcsusb_ph_command(struct hfcsusb *hw, u_char command)
+{
+	if (debug & DEBUG_HW)
+		printk(KERN_DEBUG "%s: %s: %x\n",
+		   hw->name, __func__, command);
+
+	switch (command) {
+	case HFC_L1_ACTIVATE_TE:
+		/* force sending sending INFO1 */
+		write_reg(hw, HFCUSB_STATES, 0x14);
+		/* start l1 activation */
+		write_reg(hw, HFCUSB_STATES, 0x04);
+		break;
+
+	case HFC_L1_FORCE_DEACTIVATE_TE:
+		write_reg(hw, HFCUSB_STATES, 0x10);
+		write_reg(hw, HFCUSB_STATES, 0x03);
+		break;
+
+	case HFC_L1_ACTIVATE_NT:
+		if (hw->dch.state == 3)
+			_queue_data(&hw->dch.dev.D, PH_ACTIVATE_IND,
+				MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
+		else
+			write_reg(hw, HFCUSB_STATES, HFCUSB_ACTIVATE |
+				HFCUSB_DO_ACTION | HFCUSB_NT_G2_G3);
+		break;
+
+	case HFC_L1_DEACTIVATE_NT:
+		write_reg(hw, HFCUSB_STATES,
+			HFCUSB_DO_ACTION);
+		break;
+	}
+}
+
+/*
+ * Layer 1 B-channel hardware access
+ */
+static int
+channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
+{
+	int	ret = 0;
+
+	switch (cq->op) {
+	case MISDN_CTRL_GETOP:
+		cq->op = MISDN_CTRL_FILL_EMPTY;
+		break;
+	case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
+		test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
+		if (debug & DEBUG_HW_OPEN)
+			printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
+				"off=%d)\n", __func__, bch->nr, !!cq->p1);
+		break;
+	default:
+		printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+/* collect data from incoming interrupt or isochron USB data */
+static void
+hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
+	int finish)
+{
+	struct hfcsusb	*hw = fifo->hw;
+	struct sk_buff	*rx_skb = NULL;
+	int		maxlen = 0;
+	int		fifon = fifo->fifonum;
+	int		i;
+	int		hdlc = 0;
+
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) "
+		    "dch(%p) bch(%p) ech(%p)\n",
+		    hw->name, __func__, fifon, len,
+		    fifo->dch, fifo->bch, fifo->ech);
+
+	if (!len)
+		return;
+
+	if ((!!fifo->dch + !!fifo->bch + !!fifo->ech) != 1) {
+		printk(KERN_DEBUG "%s: %s: undefined channel\n",
+		       hw->name, __func__);
+		return;
+	}
+
+	spin_lock(&hw->lock);
+	if (fifo->dch) {
+		rx_skb = fifo->dch->rx_skb;
+		maxlen = fifo->dch->maxlen;
+		hdlc = 1;
+	}
+	if (fifo->bch) {
+		rx_skb = fifo->bch->rx_skb;
+		maxlen = fifo->bch->maxlen;
+		hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
+	}
+	if (fifo->ech) {
+		rx_skb = fifo->ech->rx_skb;
+		maxlen = fifo->ech->maxlen;
+		hdlc = 1;
+	}
+
+	if (!rx_skb) {
+		rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC);
+		if (rx_skb) {
+			if (fifo->dch)
+				fifo->dch->rx_skb = rx_skb;
+			if (fifo->bch)
+				fifo->bch->rx_skb = rx_skb;
+			if (fifo->ech)
+				fifo->ech->rx_skb = rx_skb;
+			skb_trim(rx_skb, 0);
+		} else {
+			printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
+			    hw->name, __func__);
+			spin_unlock(&hw->lock);
+			return;
+		}
+	}
+
+	if (fifo->dch || fifo->ech) {
+		/* D/E-Channel SKB range check */
+		if ((rx_skb->len + len) >= MAX_DFRAME_LEN_L1) {
+			printk(KERN_DEBUG "%s: %s: sbk mem exceeded "
+			    "for fifo(%d) HFCUSB_D_RX\n",
+			    hw->name, __func__, fifon);
+			skb_trim(rx_skb, 0);
+			spin_unlock(&hw->lock);
+			return;
+		}
+	} else if (fifo->bch) {
+		/* B-Channel SKB range check */
+		if ((rx_skb->len + len) >= (MAX_BCH_SIZE + 3)) {
+			printk(KERN_DEBUG "%s: %s: sbk mem exceeded "
+			    "for fifo(%d) HFCUSB_B_RX\n",
+			    hw->name, __func__, fifon);
+			skb_trim(rx_skb, 0);
+			spin_unlock(&hw->lock);
+			return;
+		}
+	}
+
+	memcpy(skb_put(rx_skb, len), data, len);
+
+	if (hdlc) {
+		/* we have a complete hdlc packet */
+		if (finish) {
+			if ((rx_skb->len > 3) &&
+			   (!(rx_skb->data[rx_skb->len - 1]))) {
+				if (debug & DBG_HFC_FIFO_VERBOSE) {
+					printk(KERN_DEBUG "%s: %s: fifon(%i)"
+					    " new RX len(%i): ",
+					    hw->name, __func__, fifon,
+					    rx_skb->len);
+					i = 0;
+					while (i < rx_skb->len)
+						printk("%02x ",
+						    rx_skb->data[i++]);
+					printk("\n");
+				}
+
+				/* remove CRC & status */
+				skb_trim(rx_skb, rx_skb->len - 3);
+
+				if (fifo->dch)
+					recv_Dchannel(fifo->dch);
+				if (fifo->bch)
+					recv_Bchannel(fifo->bch);
+				if (fifo->ech)
+					recv_Echannel(fifo->ech,
+						     &hw->dch);
+			} else {
+				if (debug & DBG_HFC_FIFO_VERBOSE) {
+					printk(KERN_DEBUG
+					    "%s: CRC or minlen ERROR fifon(%i) "
+					    "RX len(%i): ",
+					    hw->name, fifon, rx_skb->len);
+					i = 0;
+					while (i < rx_skb->len)
+						printk("%02x ",
+						    rx_skb->data[i++]);
+					printk("\n");
+				}
+				skb_trim(rx_skb, 0);
+			}
+		}
+	} else {
+		/* deliver transparent data to layer2 */
+		if (rx_skb->len >= poll)
+			recv_Bchannel(fifo->bch);
+	}
+	spin_unlock(&hw->lock);
+}
+
+void
+fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe,
+	      void *buf, int num_packets, int packet_size, int interval,
+	      usb_complete_t complete, void *context)
+{
+	int k;
+
+	usb_fill_bulk_urb(urb, dev, pipe, buf, packet_size * num_packets,
+	    complete, context);
+
+	urb->number_of_packets = num_packets;
+	urb->transfer_flags = URB_ISO_ASAP;
+	urb->actual_length = 0;
+	urb->interval = interval;
+
+	for (k = 0; k < num_packets; k++) {
+		urb->iso_frame_desc[k].offset = packet_size * k;
+		urb->iso_frame_desc[k].length = packet_size;
+		urb->iso_frame_desc[k].actual_length = 0;
+	}
+}
+
+/* receive completion routine for all ISO tx fifos   */
+static void
+rx_iso_complete(struct urb *urb)
+{
+	struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context;
+	struct usb_fifo *fifo = context_iso_urb->owner_fifo;
+	struct hfcsusb *hw = fifo->hw;
+	int k, len, errcode, offset, num_isoc_packets, fifon, maxlen,
+	    status, iso_status, i;
+	__u8 *buf;
+	static __u8 eof[8];
+	__u8 s0_state;
+
+	fifon = fifo->fifonum;
+	status = urb->status;
+
+	spin_lock(&hw->lock);
+	if (fifo->stop_gracefull) {
+		fifo->stop_gracefull = 0;
+		fifo->active = 0;
+		spin_unlock(&hw->lock);
+		return;
+	}
+	spin_unlock(&hw->lock);
+
+	/*
+	 * ISO transfer only partially completed,
+	 * look at individual frame status for details
+	 */
+	if (status == -EXDEV) {
+		if (debug & DEBUG_HW)
+			printk(KERN_DEBUG "%s: %s: with -EXDEV "
+			    "urb->status %d, fifonum %d\n",
+			    hw->name, __func__,  status, fifon);
+
+		/* clear status, so go on with ISO transfers */
+		status = 0;
+	}
+
+	s0_state = 0;
+	if (fifo->active && !status) {
+		num_isoc_packets = iso_packets[fifon];
+		maxlen = fifo->usb_packet_maxlen;
+
+		for (k = 0; k < num_isoc_packets; ++k) {
+			len = urb->iso_frame_desc[k].actual_length;
+			offset = urb->iso_frame_desc[k].offset;
+			buf = context_iso_urb->buffer + offset;
+			iso_status = urb->iso_frame_desc[k].status;
+
+			if (iso_status && (debug & DBG_HFC_FIFO_VERBOSE)) {
+				printk(KERN_DEBUG "%s: %s: "
+				    "ISO packet %i, status: %i\n",
+				    hw->name, __func__, k, iso_status);
+			}
+
+			/* USB data log for every D ISO in */
+			if ((fifon == HFCUSB_D_RX) &&
+			    (debug & DBG_HFC_USB_VERBOSE)) {
+				printk(KERN_DEBUG
+				    "%s: %s: %d (%d/%d) len(%d) ",
+				    hw->name, __func__, urb->start_frame,
+				    k, num_isoc_packets-1,
+				    len);
+				for (i = 0; i < len; i++)
+					printk("%x ", buf[i]);
+				printk("\n");
+			}
+
+			if (!iso_status) {
+				if (fifo->last_urblen != maxlen) {
+					/*
+					 * save fifo fill-level threshold bits
+					 * to use them later in TX ISO URB
+					 * completions
+					 */
+					hw->threshold_mask = buf[1];
+
+					if (fifon == HFCUSB_D_RX)
+						s0_state = (buf[0] >> 4);
+
+					eof[fifon] = buf[0] & 1;
+					if (len > 2)
+						hfcsusb_rx_frame(fifo, buf + 2,
+							len - 2, (len < maxlen)
+							? eof[fifon] : 0);
+				} else
+					hfcsusb_rx_frame(fifo, buf, len,
+						(len < maxlen) ?
+						eof[fifon] : 0);
+				fifo->last_urblen = len;
+			}
+		}
+
+		/* signal S0 layer1 state change */
+		if ((s0_state) && (hw->initdone) &&
+		    (s0_state != hw->dch.state)) {
+			hw->dch.state = s0_state;
+			schedule_event(&hw->dch, FLG_PHCHANGE);
+		}
+
+		fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe,
+			      context_iso_urb->buffer, num_isoc_packets,
+			      fifo->usb_packet_maxlen, fifo->intervall,
+			      (usb_complete_t)rx_iso_complete, urb->context);
+		errcode = usb_submit_urb(urb, GFP_ATOMIC);
+		if (errcode < 0) {
+			if (debug & DEBUG_HW)
+				printk(KERN_DEBUG "%s: %s: error submitting "
+				    "ISO URB: %d\n",
+				    hw->name, __func__, errcode);
+		}
+	} else {
+		if (status && (debug & DBG_HFC_URB_INFO))
+			printk(KERN_DEBUG "%s: %s: rx_iso_complete : "
+			    "urb->status %d, fifonum %d\n",
+			    hw->name, __func__, status, fifon);
+	}
+}
+
+/* receive completion routine for all interrupt rx fifos */
+static void
+rx_int_complete(struct urb *urb)
+{
+	int len, status, i;
+	__u8 *buf, maxlen, fifon;
+	struct usb_fifo *fifo = (struct usb_fifo *) urb->context;
+	struct hfcsusb *hw = fifo->hw;
+	static __u8 eof[8];
+
+	spin_lock(&hw->lock);
+	if (fifo->stop_gracefull) {
+		fifo->stop_gracefull = 0;
+		fifo->active = 0;
+		spin_unlock(&hw->lock);
+		return;
+	}
+	spin_unlock(&hw->lock);
+
+	fifon = fifo->fifonum;
+	if ((!fifo->active) || (urb->status)) {
+		if (debug & DBG_HFC_URB_ERROR)
+			printk(KERN_DEBUG
+			    "%s: %s: RX-Fifo %i is going down (%i)\n",
+			    hw->name, __func__, fifon, urb->status);
+
+		fifo->urb->interval = 0; /* cancel automatic rescheduling */
+		return;
+	}
+	len = urb->actual_length;
+	buf = fifo->buffer;
+	maxlen = fifo->usb_packet_maxlen;
+
+	/* USB data log for every D INT in */
+	if ((fifon == HFCUSB_D_RX) && (debug & DBG_HFC_USB_VERBOSE)) {
+		printk(KERN_DEBUG "%s: %s: D RX INT len(%d) ",
+		    hw->name, __func__, len);
+		for (i = 0; i < len; i++)
+			printk("%02x ", buf[i]);
+		printk("\n");
+	}
+
+	if (fifo->last_urblen != fifo->usb_packet_maxlen) {
+		/* the threshold mask is in the 2nd status byte */
+		hw->threshold_mask = buf[1];
+
+		/* signal S0 layer1 state change */
+		if (hw->initdone && ((buf[0] >> 4) != hw->dch.state)) {
+			hw->dch.state = (buf[0] >> 4);
+			schedule_event(&hw->dch, FLG_PHCHANGE);
+		}
+
+		eof[fifon] = buf[0] & 1;
+		/* if we have more than the 2 status bytes -> collect data */
+		if (len > 2)
+			hfcsusb_rx_frame(fifo, buf + 2,
+			   urb->actual_length - 2,
+			   (len < maxlen) ? eof[fifon] : 0);
+	} else {
+		hfcsusb_rx_frame(fifo, buf, urb->actual_length,
+				 (len < maxlen) ? eof[fifon] : 0);
+	}
+	fifo->last_urblen = urb->actual_length;
+
+	status = usb_submit_urb(urb, GFP_ATOMIC);
+	if (status) {
+		if (debug & DEBUG_HW)
+			printk(KERN_DEBUG "%s: %s: error resubmitting USB\n",
+			    hw->name, __func__);
+	}
+}
+
+/* transmit completion routine for all ISO tx fifos */
+static void
+tx_iso_complete(struct urb *urb)
+{
+	struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context;
+	struct usb_fifo *fifo = context_iso_urb->owner_fifo;
+	struct hfcsusb *hw = fifo->hw;
+	struct sk_buff *tx_skb;
+	int k, tx_offset, num_isoc_packets, sink, remain, current_len,
+	    errcode, hdlc, i;
+	int *tx_idx;
+	int frame_complete, fifon, status;
+	__u8 threshbit;
+
+	spin_lock(&hw->lock);
+	if (fifo->stop_gracefull) {
+		fifo->stop_gracefull = 0;
+		fifo->active = 0;
+		spin_unlock(&hw->lock);
+		return;
+	}
+
+	if (fifo->dch) {
+		tx_skb = fifo->dch->tx_skb;
+		tx_idx = &fifo->dch->tx_idx;
+		hdlc = 1;
+	} else if (fifo->bch) {
+		tx_skb = fifo->bch->tx_skb;
+		tx_idx = &fifo->bch->tx_idx;
+		hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
+	} else {
+		printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n",
+		    hw->name, __func__);
+		spin_unlock(&hw->lock);
+		return;
+	}
+
+	fifon = fifo->fifonum;
+	status = urb->status;
+
+	tx_offset = 0;
+
+	/*
+	 * ISO transfer only partially completed,
+	 * look at individual frame status for details
+	 */
+	if (status == -EXDEV) {
+		if (debug & DBG_HFC_URB_ERROR)
+			printk(KERN_DEBUG "%s: %s: "
+			    "-EXDEV (%i) fifon (%d)\n",
+			    hw->name, __func__, status, fifon);
+
+		/* clear status, so go on with ISO transfers */
+		status = 0;
+	}
+
+	if (fifo->active && !status) {
+		/* is FifoFull-threshold set for our channel? */
+		threshbit = (hw->threshold_mask & (1 << fifon));
+		num_isoc_packets = iso_packets[fifon];
+
+		/* predict dataflow to avoid fifo overflow */
+		if (fifon >= HFCUSB_D_TX)
+			sink = (threshbit) ? SINK_DMIN : SINK_DMAX;
+		else
+			sink = (threshbit) ? SINK_MIN : SINK_MAX;
+		fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe,
+			      context_iso_urb->buffer, num_isoc_packets,
+			      fifo->usb_packet_maxlen, fifo->intervall,
+			      (usb_complete_t)tx_iso_complete, urb->context);
+		memset(context_iso_urb->buffer, 0,
+		       sizeof(context_iso_urb->buffer));
+		frame_complete = 0;
+
+		for (k = 0; k < num_isoc_packets; ++k) {
+			/* analyze tx success of previous ISO packets */
+			if (debug & DBG_HFC_URB_ERROR) {
+				errcode = urb->iso_frame_desc[k].status;
+				if (errcode) {
+					printk(KERN_DEBUG "%s: %s: "
+					    "ISO packet %i, status: %i\n",
+					     hw->name, __func__, k, errcode);
+				}
+			}
+
+			/* Generate next ISO Packets */
+			if (tx_skb)
+				remain = tx_skb->len - *tx_idx;
+			else
+				remain = 0;
+
+			if (remain > 0) {
+				fifo->bit_line -= sink;
+				current_len = (0 - fifo->bit_line) / 8;
+				if (current_len > 14)
+					current_len = 14;
+				if (current_len < 0)
+					current_len = 0;
+				if (remain < current_len)
+					current_len = remain;
+
+				/* how much bit do we put on the line? */
+				fifo->bit_line += current_len * 8;
+
+				context_iso_urb->buffer[tx_offset] = 0;
+				if (current_len == remain) {
+					if (hdlc) {
+						/* signal frame completion */
+						context_iso_urb->
+						    buffer[tx_offset] = 1;
+						/* add 2 byte flags and 16bit
+						 * CRC at end of ISDN frame */
+						fifo->bit_line += 32;
+					}
+					frame_complete = 1;
+				}
+
+				/* copy tx data to iso-urb buffer */
+				memcpy(context_iso_urb->buffer + tx_offset + 1,
+				       (tx_skb->data + *tx_idx), current_len);
+				*tx_idx += current_len;
+
+				urb->iso_frame_desc[k].offset = tx_offset;
+				urb->iso_frame_desc[k].length = current_len + 1;
+
+				/* USB data log for every D ISO out */
+				if ((fifon == HFCUSB_D_RX) &&
+				    (debug & DBG_HFC_USB_VERBOSE)) {
+					printk(KERN_DEBUG
+					    "%s: %s (%d/%d) offs(%d) len(%d) ",
+					    hw->name, __func__,
+					    k, num_isoc_packets-1,
+					    urb->iso_frame_desc[k].offset,
+					    urb->iso_frame_desc[k].length);
+
+					for (i = urb->iso_frame_desc[k].offset;
+					     i < (urb->iso_frame_desc[k].offset
+					     + urb->iso_frame_desc[k].length);
+					     i++)
+						printk("%x ",
+						    context_iso_urb->buffer[i]);
+
+					printk(" skb->len(%i) tx-idx(%d)\n",
+					    tx_skb->len, *tx_idx);
+				}
+
+				tx_offset += (current_len + 1);
+			} else {
+				urb->iso_frame_desc[k].offset = tx_offset++;
+				urb->iso_frame_desc[k].length = 1;
+				/* we lower data margin every msec */
+				fifo->bit_line -= sink;
+				if (fifo->bit_line < BITLINE_INF)
+					fifo->bit_line = BITLINE_INF;
+			}
+
+			if (frame_complete) {
+				frame_complete = 0;
+
+				if (debug & DBG_HFC_FIFO_VERBOSE) {
+					printk(KERN_DEBUG  "%s: %s: "
+					    "fifon(%i) new TX len(%i): ",
+					    hw->name, __func__,
+					    fifon, tx_skb->len);
+					i = 0;
+					while (i < tx_skb->len)
+						printk("%02x ",
+						    tx_skb->data[i++]);
+					printk("\n");
+				}
+
+				dev_kfree_skb(tx_skb);
+				tx_skb = NULL;
+				if (fifo->dch && get_next_dframe(fifo->dch))
+					tx_skb = fifo->dch->tx_skb;
+				else if (fifo->bch &&
+				    get_next_bframe(fifo->bch)) {
+					if (test_bit(FLG_TRANSPARENT,
+					    &fifo->bch->Flags))
+						confirm_Bsend(fifo->bch);
+					tx_skb = fifo->bch->tx_skb;
+				}
+			}
+		}
+		errcode = usb_submit_urb(urb, GFP_ATOMIC);
+		if (errcode < 0) {
+			if (debug & DEBUG_HW)
+				printk(KERN_DEBUG
+				    "%s: %s: error submitting ISO URB: %d \n",
+				    hw->name, __func__, errcode);
+		}
+
+		/*
+		 * abuse DChannel tx iso completion to trigger NT mode state
+		 * changes tx_iso_complete is assumed to be called every
+		 * fifo->intervall (ms)
+		 */
+		if ((fifon == HFCUSB_D_TX) && (hw->protocol == ISDN_P_NT_S0)
+		    && (hw->timers & NT_ACTIVATION_TIMER)) {
+			if ((--hw->nt_timer) < 0)
+				schedule_event(&hw->dch, FLG_PHCHANGE);
+		}
+
+	} else {
+		if (status && (debug & DBG_HFC_URB_ERROR))
+			printk(KERN_DEBUG  "%s: %s: urb->status %s (%i)"
+			    "fifonum=%d\n",
+			    hw->name, __func__,
+			    symbolic(urb_errlist, status), status, fifon);
+	}
+	spin_unlock(&hw->lock);
+}
+
+/*
+ * allocs urbs and start isoc transfer with two pending urbs to avoid
+ * gaps in the transfer chain
+ */
+static int
+start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
+		 usb_complete_t complete, int packet_size)
+{
+	struct hfcsusb *hw = fifo->hw;
+	int i, k, errcode;
+
+	if (debug)
+		printk(KERN_DEBUG "%s: %s: fifo %i\n",
+		    hw->name, __func__, fifo->fifonum);
+
+	/* allocate Memory for Iso out Urbs */
+	for (i = 0; i < 2; i++) {
+		if (!(fifo->iso[i].urb)) {
+			fifo->iso[i].urb =
+			    usb_alloc_urb(num_packets_per_urb, GFP_KERNEL);
+			if (!(fifo->iso[i].urb)) {
+				printk(KERN_DEBUG
+				    "%s: %s: alloc urb for fifo %i failed",
+				    hw->name, __func__, fifo->fifonum);
+			}
+			fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
+			fifo->iso[i].indx = i;
+
+			/* Init the first iso */
+			if (ISO_BUFFER_SIZE >=
+			    (fifo->usb_packet_maxlen *
+			     num_packets_per_urb)) {
+				fill_isoc_urb(fifo->iso[i].urb,
+				    fifo->hw->dev, fifo->pipe,
+				    fifo->iso[i].buffer,
+				    num_packets_per_urb,
+				    fifo->usb_packet_maxlen,
+				    fifo->intervall, complete,
+				    &fifo->iso[i]);
+				memset(fifo->iso[i].buffer, 0,
+				       sizeof(fifo->iso[i].buffer));
+
+				for (k = 0; k < num_packets_per_urb; k++) {
+					fifo->iso[i].urb->
+					    iso_frame_desc[k].offset =
+					    k * packet_size;
+					fifo->iso[i].urb->
+					    iso_frame_desc[k].length =
+					    packet_size;
+				}
+			} else {
+				printk(KERN_DEBUG
+				    "%s: %s: ISO Buffer size to small!\n",
+				    hw->name, __func__);
+			}
+		}
+		fifo->bit_line = BITLINE_INF;
+
+		errcode = usb_submit_urb(fifo->iso[i].urb, GFP_KERNEL);
+		fifo->active = (errcode >= 0) ? 1 : 0;
+		fifo->stop_gracefull = 0;
+		if (errcode < 0) {
+			printk(KERN_DEBUG "%s: %s: %s URB nr:%d\n",
+			    hw->name, __func__,
+			    symbolic(urb_errlist, errcode), i);
+		}
+	}
+	return fifo->active;
+}
+
+static void
+stop_iso_gracefull(struct usb_fifo *fifo)
+{
+	struct hfcsusb *hw = fifo->hw;
+	int i, timeout;
+	u_long flags;
+
+	for (i = 0; i < 2; i++) {
+		spin_lock_irqsave(&hw->lock, flags);
+		if (debug)
+			printk(KERN_DEBUG "%s: %s for fifo %i.%i\n",
+			       hw->name, __func__, fifo->fifonum, i);
+		fifo->stop_gracefull = 1;
+		spin_unlock_irqrestore(&hw->lock, flags);
+	}
+
+	for (i = 0; i < 2; i++) {
+		timeout = 3;
+		while (fifo->stop_gracefull && timeout--)
+			schedule_timeout_interruptible((HZ/1000)*16);
+		if (debug && fifo->stop_gracefull)
+			printk(KERN_DEBUG "%s: ERROR %s for fifo %i.%i\n",
+				hw->name, __func__, fifo->fifonum, i);
+	}
+}
+
+static void
+stop_int_gracefull(struct usb_fifo *fifo)
+{
+	struct hfcsusb *hw = fifo->hw;
+	int timeout;
+	u_long flags;
+
+	spin_lock_irqsave(&hw->lock, flags);
+	if (debug)
+		printk(KERN_DEBUG "%s: %s for fifo %i\n",
+		       hw->name, __func__, fifo->fifonum);
+	fifo->stop_gracefull = 1;
+	spin_unlock_irqrestore(&hw->lock, flags);
+
+	timeout = 3;
+	while (fifo->stop_gracefull && timeout--)
+		schedule_timeout_interruptible((HZ/1000)*3);
+	if (debug && fifo->stop_gracefull)
+		printk(KERN_DEBUG "%s: ERROR %s for fifo %i\n",
+		       hw->name, __func__, fifo->fifonum);
+}
+
+/* start the interrupt transfer for the given fifo */
+static void
+start_int_fifo(struct usb_fifo *fifo)
+{
+	struct hfcsusb *hw = fifo->hw;
+	int errcode;
+
+	if (debug)
+		printk(KERN_DEBUG "%s: %s: INT IN fifo:%d\n",
+		    hw->name, __func__, fifo->fifonum);
+
+	if (!fifo->urb) {
+		fifo->urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!fifo->urb)
+			return;
+	}
+	usb_fill_int_urb(fifo->urb, fifo->hw->dev, fifo->pipe,
+	    fifo->buffer, fifo->usb_packet_maxlen,
+	    (usb_complete_t)rx_int_complete, fifo, fifo->intervall);
+	fifo->active = 1;
+	fifo->stop_gracefull = 0;
+	errcode = usb_submit_urb(fifo->urb, GFP_KERNEL);
+	if (errcode) {
+		printk(KERN_DEBUG "%s: %s: submit URB: status:%i\n",
+		    hw->name, __func__, errcode);
+		fifo->active = 0;
+	}
+}
+
+static void
+setPortMode(struct hfcsusb *hw)
+{
+	if (debug & DEBUG_HW)
+		printk(KERN_DEBUG "%s: %s %s\n", hw->name, __func__,
+		   (hw->protocol == ISDN_P_TE_S0) ? "TE" : "NT");
+
+	if (hw->protocol == ISDN_P_TE_S0) {
+		write_reg(hw, HFCUSB_SCTRL, 0x40);
+		write_reg(hw, HFCUSB_SCTRL_E, 0x00);
+		write_reg(hw, HFCUSB_CLKDEL, CLKDEL_TE);
+		write_reg(hw, HFCUSB_STATES, 3 | 0x10);
+		write_reg(hw, HFCUSB_STATES, 3);
+	} else {
+		write_reg(hw, HFCUSB_SCTRL, 0x44);
+		write_reg(hw, HFCUSB_SCTRL_E, 0x09);
+		write_reg(hw, HFCUSB_CLKDEL, CLKDEL_NT);
+		write_reg(hw, HFCUSB_STATES, 1 | 0x10);
+		write_reg(hw, HFCUSB_STATES, 1);
+	}
+}
+
+static void
+reset_hfcsusb(struct hfcsusb *hw)
+{
+	struct usb_fifo *fifo;
+	int i;
+
+	if (debug & DEBUG_HW)
+		printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+	/* do Chip reset */
+	write_reg(hw, HFCUSB_CIRM, 8);
+
+	/* aux = output, reset off */
+	write_reg(hw, HFCUSB_CIRM, 0x10);
+
+	/* set USB_SIZE to match the wMaxPacketSize for INT or BULK transfers */
+	write_reg(hw, HFCUSB_USB_SIZE, (hw->packet_size / 8) |
+	    ((hw->packet_size / 8) << 4));
+
+	/* set USB_SIZE_I to match the the wMaxPacketSize for ISO transfers */
+	write_reg(hw, HFCUSB_USB_SIZE_I, hw->iso_packet_size);
+
+	/* enable PCM/GCI master mode */
+	write_reg(hw, HFCUSB_MST_MODE1, 0);	/* set default values */
+	write_reg(hw, HFCUSB_MST_MODE0, 1);	/* enable master mode */
+
+	/* init the fifos */
+	write_reg(hw, HFCUSB_F_THRES,
+	    (HFCUSB_TX_THRESHOLD / 8) | ((HFCUSB_RX_THRESHOLD / 8) << 4));
+
+	fifo = hw->fifos;
+	for (i = 0; i < HFCUSB_NUM_FIFOS; i++) {
+		write_reg(hw, HFCUSB_FIFO, i);	/* select the desired fifo */
+		fifo[i].max_size =
+		    (i <= HFCUSB_B2_RX) ? MAX_BCH_SIZE : MAX_DFRAME_LEN;
+		fifo[i].last_urblen = 0;
+
+		/* set 2 bit for D- & E-channel */
+		write_reg(hw, HFCUSB_HDLC_PAR, ((i <= HFCUSB_B2_RX) ? 0 : 2));
+
+		/* enable all fifos */
+		if (i == HFCUSB_D_TX)
+			write_reg(hw, HFCUSB_CON_HDLC,
+			    (hw->protocol == ISDN_P_NT_S0) ? 0x08 : 0x09);
+		else
+			write_reg(hw, HFCUSB_CON_HDLC, 0x08);
+		write_reg(hw, HFCUSB_INC_RES_F, 2); /* reset the fifo */
+	}
+
+	write_reg(hw, HFCUSB_SCTRL_R, 0); /* disable both B receivers */
+	handle_led(hw, LED_POWER_ON);
+}
+
+/* start USB data pipes dependand on device's endpoint configuration */
+static void
+hfcsusb_start_endpoint(struct hfcsusb *hw, int channel)
+{
+	/* quick check if endpoint already running */
+	if ((channel == HFC_CHAN_D) && (hw->fifos[HFCUSB_D_RX].active))
+		return;
+	if ((channel == HFC_CHAN_B1) && (hw->fifos[HFCUSB_B1_RX].active))
+		return;
+	if ((channel == HFC_CHAN_B2) && (hw->fifos[HFCUSB_B2_RX].active))
+		return;
+	if ((channel == HFC_CHAN_E) && (hw->fifos[HFCUSB_PCM_RX].active))
+		return;
+
+	/* start rx endpoints using USB INT IN method */
+	if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO)
+		start_int_fifo(hw->fifos + channel*2 + 1);
+
+	/* start rx endpoints using USB ISO IN method */
+	if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO) {
+		switch (channel) {
+		case HFC_CHAN_D:
+			start_isoc_chain(hw->fifos + HFCUSB_D_RX,
+				ISOC_PACKETS_D,
+				(usb_complete_t)rx_iso_complete,
+				16);
+			break;
+		case HFC_CHAN_E:
+			start_isoc_chain(hw->fifos + HFCUSB_PCM_RX,
+				ISOC_PACKETS_D,
+				(usb_complete_t)rx_iso_complete,
+				16);
+			break;
+		case HFC_CHAN_B1:
+			start_isoc_chain(hw->fifos + HFCUSB_B1_RX,
+				ISOC_PACKETS_B,
+				(usb_complete_t)rx_iso_complete,
+				16);
+			break;
+		case HFC_CHAN_B2:
+			start_isoc_chain(hw->fifos + HFCUSB_B2_RX,
+				ISOC_PACKETS_B,
+				(usb_complete_t)rx_iso_complete,
+				16);
+			break;
+		}
+	}
+
+	/* start tx endpoints using USB ISO OUT method */
+	switch (channel) {
+	case HFC_CHAN_D:
+		start_isoc_chain(hw->fifos + HFCUSB_D_TX,
+			ISOC_PACKETS_B,
+			(usb_complete_t)tx_iso_complete, 1);
+		break;
+	case HFC_CHAN_B1:
+		start_isoc_chain(hw->fifos + HFCUSB_B1_TX,
+			ISOC_PACKETS_D,
+			(usb_complete_t)tx_iso_complete, 1);
+		break;
+	case HFC_CHAN_B2:
+		start_isoc_chain(hw->fifos + HFCUSB_B2_TX,
+			ISOC_PACKETS_B,
+			(usb_complete_t)tx_iso_complete, 1);
+		break;
+	}
+}
+
+/* stop USB data pipes dependand on device's endpoint configuration */
+static void
+hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
+{
+	/* quick check if endpoint currently running */
+	if ((channel == HFC_CHAN_D) && (!hw->fifos[HFCUSB_D_RX].active))
+		return;
+	if ((channel == HFC_CHAN_B1) && (!hw->fifos[HFCUSB_B1_RX].active))
+		return;
+	if ((channel == HFC_CHAN_B2) && (!hw->fifos[HFCUSB_B2_RX].active))
+		return;
+	if ((channel == HFC_CHAN_E) && (!hw->fifos[HFCUSB_PCM_RX].active))
+		return;
+
+	/* rx endpoints using USB INT IN method */
+	if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO)
+		stop_int_gracefull(hw->fifos + channel*2 + 1);
+
+	/* rx endpoints using USB ISO IN method */
+	if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO)
+		stop_iso_gracefull(hw->fifos + channel*2 + 1);
+
+	/* tx endpoints using USB ISO OUT method */
+	if (channel != HFC_CHAN_E)
+		stop_iso_gracefull(hw->fifos + channel*2);
+}
+
+
+/* Hardware Initialization */
+int
+setup_hfcsusb(struct hfcsusb *hw)
+{
+	int err;
+	u_char b;
+
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+	/* check the chip id */
+	if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
+		printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
+		    hw->name, __func__);
+		return 1;
+	}
+	if (b != HFCUSB_CHIPID) {
+		printk(KERN_DEBUG "%s: %s: Invalid chip id 0x%02x\n",
+		    hw->name, __func__, b);
+		return 1;
+	}
+
+	/* first set the needed config, interface and alternate */
+	err = usb_set_interface(hw->dev, hw->if_used, hw->alt_used);
+
+	hw->led_state = 0;
+
+	/* init the background machinery for control requests */
+	hw->ctrl_read.bRequestType = 0xc0;
+	hw->ctrl_read.bRequest = 1;
+	hw->ctrl_read.wLength = cpu_to_le16(1);
+	hw->ctrl_write.bRequestType = 0x40;
+	hw->ctrl_write.bRequest = 0;
+	hw->ctrl_write.wLength = 0;
+	usb_fill_control_urb(hw->ctrl_urb, hw->dev, hw->ctrl_out_pipe,
+	    (u_char *)&hw->ctrl_write, NULL, 0,
+	    (usb_complete_t)ctrl_complete, hw);
+
+	reset_hfcsusb(hw);
+	return 0;
+}
+
+static void
+release_hw(struct hfcsusb *hw)
+{
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+	/*
+	 * stop all endpoints gracefully
+	 * TODO: mISDN_core should generate CLOSE_CHANNEL
+	 *       signals after calling mISDN_unregister_device()
+	 */
+	hfcsusb_stop_endpoint(hw, HFC_CHAN_D);
+	hfcsusb_stop_endpoint(hw, HFC_CHAN_B1);
+	hfcsusb_stop_endpoint(hw, HFC_CHAN_B2);
+	if (hw->fifos[HFCUSB_PCM_RX].pipe)
+		hfcsusb_stop_endpoint(hw, HFC_CHAN_E);
+	if (hw->protocol == ISDN_P_TE_S0)
+		l1_event(hw->dch.l1, CLOSE_CHANNEL);
+
+	mISDN_unregister_device(&hw->dch.dev);
+	mISDN_freebchannel(&hw->bch[1]);
+	mISDN_freebchannel(&hw->bch[0]);
+	mISDN_freedchannel(&hw->dch);
+
+	if (hw->ctrl_urb) {
+		usb_kill_urb(hw->ctrl_urb);
+		usb_free_urb(hw->ctrl_urb);
+		hw->ctrl_urb = NULL;
+	}
+
+	if (hw->intf)
+		usb_set_intfdata(hw->intf, NULL);
+	list_del(&hw->list);
+	kfree(hw);
+	hw = NULL;
+}
+
+static void
+deactivate_bchannel(struct bchannel *bch)
+{
+	struct hfcsusb *hw = bch->hw;
+	u_long flags;
+
+	if (bch->debug & DEBUG_HW)
+		printk(KERN_DEBUG "%s: %s: bch->nr(%i)\n",
+		    hw->name, __func__, bch->nr);
+
+	spin_lock_irqsave(&hw->lock, flags);
+	if (test_and_clear_bit(FLG_TX_NEXT, &bch->Flags)) {
+		dev_kfree_skb(bch->next_skb);
+		bch->next_skb = NULL;
+	}
+	if (bch->tx_skb) {
+		dev_kfree_skb(bch->tx_skb);
+		bch->tx_skb = NULL;
+	}
+	bch->tx_idx = 0;
+	if (bch->rx_skb) {
+		dev_kfree_skb(bch->rx_skb);
+		bch->rx_skb = NULL;
+	}
+	clear_bit(FLG_ACTIVE, &bch->Flags);
+	clear_bit(FLG_TX_BUSY, &bch->Flags);
+	spin_unlock_irqrestore(&hw->lock, flags);
+	hfcsusb_setup_bch(bch, ISDN_P_NONE);
+	hfcsusb_stop_endpoint(hw, bch->nr);
+}
+
+/*
+ * Layer 1 B-channel hardware access
+ */
+static int
+hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
+{
+	struct bchannel	*bch = container_of(ch, struct bchannel, ch);
+	int		ret = -EINVAL;
+
+	if (bch->debug & DEBUG_HW)
+		printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
+
+	switch (cmd) {
+	case HW_TESTRX_RAW:
+	case HW_TESTRX_HDLC:
+	case HW_TESTRX_OFF:
+		ret = -EINVAL;
+		break;
+
+	case CLOSE_CHANNEL:
+		test_and_clear_bit(FLG_OPEN, &bch->Flags);
+		if (test_bit(FLG_ACTIVE, &bch->Flags))
+			deactivate_bchannel(bch);
+		ch->protocol = ISDN_P_NONE;
+		ch->peer = NULL;
+		module_put(THIS_MODULE);
+		ret = 0;
+		break;
+	case CONTROL_CHANNEL:
+		ret = channel_bctrl(bch, arg);
+		break;
+	default:
+		printk(KERN_WARNING "%s: unknown prim(%x)\n",
+			__func__, cmd);
+	}
+	return ret;
+}
+
+static int
+setup_instance(struct hfcsusb *hw, struct device *parent)
+{
+	u_long	flags;
+	int	err, i;
+
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
+	spin_lock_init(&hw->ctrl_lock);
+	spin_lock_init(&hw->lock);
+
+	mISDN_initdchannel(&hw->dch, MAX_DFRAME_LEN_L1, ph_state);
+	hw->dch.debug = debug & 0xFFFF;
+	hw->dch.hw = hw;
+	hw->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
+	hw->dch.dev.D.send = hfcusb_l2l1D;
+	hw->dch.dev.D.ctrl = hfc_dctrl;
+
+	/* enable E-Channel logging */
+	if (hw->fifos[HFCUSB_PCM_RX].pipe)
+		mISDN_initdchannel(&hw->ech, MAX_DFRAME_LEN_L1, NULL);
+
+	hw->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
+	    (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
+	hw->dch.dev.nrbchan = 2;
+	for (i = 0; i < 2; i++) {
+		hw->bch[i].nr = i + 1;
+		set_channelmap(i + 1, hw->dch.dev.channelmap);
+		hw->bch[i].debug = debug;
+		mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM);
+		hw->bch[i].hw = hw;
+		hw->bch[i].ch.send = hfcusb_l2l1B;
+		hw->bch[i].ch.ctrl = hfc_bctrl;
+		hw->bch[i].ch.nr = i + 1;
+		list_add(&hw->bch[i].ch.list, &hw->dch.dev.bchannels);
+	}
+
+	hw->fifos[HFCUSB_B1_TX].bch = &hw->bch[0];
+	hw->fifos[HFCUSB_B1_RX].bch = &hw->bch[0];
+	hw->fifos[HFCUSB_B2_TX].bch = &hw->bch[1];
+	hw->fifos[HFCUSB_B2_RX].bch = &hw->bch[1];
+	hw->fifos[HFCUSB_D_TX].dch = &hw->dch;
+	hw->fifos[HFCUSB_D_RX].dch = &hw->dch;
+	hw->fifos[HFCUSB_PCM_RX].ech = &hw->ech;
+	hw->fifos[HFCUSB_PCM_TX].ech = &hw->ech;
+
+	err = setup_hfcsusb(hw);
+	if (err)
+		goto out;
+
+	snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s.%d", DRIVER_NAME,
+	    hfcsusb_cnt + 1);
+	printk(KERN_INFO "%s: registered as '%s'\n",
+	    DRIVER_NAME, hw->name);
+
+	err = mISDN_register_device(&hw->dch.dev, parent, hw->name);
+	if (err)
+		goto out;
+
+	hfcsusb_cnt++;
+	write_lock_irqsave(&HFClock, flags);
+	list_add_tail(&hw->list, &HFClist);
+	write_unlock_irqrestore(&HFClock, flags);
+	return 0;
+
+out:
+	mISDN_freebchannel(&hw->bch[1]);
+	mISDN_freebchannel(&hw->bch[0]);
+	mISDN_freedchannel(&hw->dch);
+	kfree(hw);
+	return err;
+}
+
+static int
+hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+	struct hfcsusb			*hw;
+	struct usb_device		*dev = interface_to_usbdev(intf);
+	struct usb_host_interface	*iface = intf->cur_altsetting;
+	struct usb_host_interface	*iface_used = NULL;
+	struct usb_host_endpoint	*ep;
+	struct hfcsusb_vdata		*driver_info;
+	int ifnum = iface->desc.bInterfaceNumber, i, idx, alt_idx,
+	    probe_alt_setting, vend_idx, cfg_used, *vcf, attr, cfg_found,
+	    ep_addr, cmptbl[16], small_match, iso_packet_size, packet_size,
+	    alt_used = 0;
+
+	vend_idx = 0xffff;
+	for (i = 0; hfcsusb_idtab[i].idVendor; i++) {
+		if ((le16_to_cpu(dev->descriptor.idVendor)
+		       == hfcsusb_idtab[i].idVendor) &&
+		    (le16_to_cpu(dev->descriptor.idProduct)
+		       == hfcsusb_idtab[i].idProduct)) {
+			vend_idx = i;
+			continue;
+		}
+	}
+
+	printk(KERN_DEBUG
+	    "%s: interface(%d) actalt(%d) minor(%d) vend_idx(%d)\n",
+	    __func__, ifnum, iface->desc.bAlternateSetting,
+	    intf->minor, vend_idx);
+
+	if (vend_idx == 0xffff) {
+		printk(KERN_WARNING
+		    "%s: no valid vendor found in USB descriptor\n",
+		    __func__);
+		return -EIO;
+	}
+	/* if vendor and product ID is OK, start probing alternate settings */
+	alt_idx = 0;
+	small_match = -1;
+
+	/* default settings */
+	iso_packet_size = 16;
+	packet_size = 64;
+
+	while (alt_idx < intf->num_altsetting) {
+		iface = intf->altsetting + alt_idx;
+		probe_alt_setting = iface->desc.bAlternateSetting;
+		cfg_used = 0;
+
+		while (validconf[cfg_used][0]) {
+			cfg_found = 1;
+			vcf = validconf[cfg_used];
+			ep = iface->endpoint;
+			memcpy(cmptbl, vcf, 16 * sizeof(int));
+
+			/* check for all endpoints in this alternate setting */
+			for (i = 0; i < iface->desc.bNumEndpoints; i++) {
+				ep_addr = ep->desc.bEndpointAddress;
+
+				/* get endpoint base */
+				idx = ((ep_addr & 0x7f) - 1) * 2;
+				if (ep_addr & 0x80)
+					idx++;
+				attr = ep->desc.bmAttributes;
+
+				if (cmptbl[idx] != EP_NOP) {
+					if (cmptbl[idx] == EP_NUL)
+						cfg_found = 0;
+					if (attr == USB_ENDPOINT_XFER_INT
+						&& cmptbl[idx] == EP_INT)
+						cmptbl[idx] = EP_NUL;
+					if (attr == USB_ENDPOINT_XFER_BULK
+						&& cmptbl[idx] == EP_BLK)
+						cmptbl[idx] = EP_NUL;
+					if (attr == USB_ENDPOINT_XFER_ISOC
+						&& cmptbl[idx] == EP_ISO)
+						cmptbl[idx] = EP_NUL;
+
+					if (attr == USB_ENDPOINT_XFER_INT &&
+						ep->desc.bInterval < vcf[17]) {
+						cfg_found = 0;
+					}
+				}
+				ep++;
+			}
+
+			for (i = 0; i < 16; i++)
+				if (cmptbl[i] != EP_NOP && cmptbl[i] != EP_NUL)
+					cfg_found = 0;
+
+			if (cfg_found) {
+				if (small_match < cfg_used) {
+					small_match = cfg_used;
+					alt_used = probe_alt_setting;
+					iface_used = iface;
+				}
+			}
+			cfg_used++;
+		}
+		alt_idx++;
+	}	/* (alt_idx < intf->num_altsetting) */
+
+	/* not found a valid USB Ta Endpoint config */
+	if (small_match == -1)
+		return -EIO;
+
+	iface = iface_used;
+	hw = kzalloc(sizeof(struct hfcsusb), GFP_KERNEL);
+	if (!hw)
+		return -ENOMEM;	/* got no mem */
+	snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s", DRIVER_NAME);
+
+	ep = iface->endpoint;
+	vcf = validconf[small_match];
+
+	for (i = 0; i < iface->desc.bNumEndpoints; i++) {
+		struct usb_fifo *f;
+
+		ep_addr = ep->desc.bEndpointAddress;
+		/* get endpoint base */
+		idx = ((ep_addr & 0x7f) - 1) * 2;
+		if (ep_addr & 0x80)
+			idx++;
+		f = &hw->fifos[idx & 7];
+
+		/* init Endpoints */
+		if (vcf[idx] == EP_NOP || vcf[idx] == EP_NUL) {
+			ep++;
+			continue;
+		}
+		switch (ep->desc.bmAttributes) {
+		case USB_ENDPOINT_XFER_INT:
+			f->pipe = usb_rcvintpipe(dev,
+				ep->desc.bEndpointAddress);
+			f->usb_transfer_mode = USB_INT;
+			packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
+			break;
+		case USB_ENDPOINT_XFER_BULK:
+			if (ep_addr & 0x80)
+				f->pipe = usb_rcvbulkpipe(dev,
+					ep->desc.bEndpointAddress);
+			else
+				f->pipe = usb_sndbulkpipe(dev,
+					ep->desc.bEndpointAddress);
+			f->usb_transfer_mode = USB_BULK;
+			packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
+			break;
+		case USB_ENDPOINT_XFER_ISOC:
+			if (ep_addr & 0x80)
+				f->pipe = usb_rcvisocpipe(dev,
+					ep->desc.bEndpointAddress);
+			else
+				f->pipe = usb_sndisocpipe(dev,
+					ep->desc.bEndpointAddress);
+			f->usb_transfer_mode = USB_ISOC;
+			iso_packet_size = le16_to_cpu(ep->desc.wMaxPacketSize);
+			break;
+		default:
+			f->pipe = 0;
+		}
+
+		if (f->pipe) {
+			f->fifonum = idx & 7;
+			f->hw = hw;
+			f->usb_packet_maxlen =
+			    le16_to_cpu(ep->desc.wMaxPacketSize);
+			f->intervall = ep->desc.bInterval;
+		}
+		ep++;
+	}
+	hw->dev = dev; /* save device */
+	hw->if_used = ifnum; /* save used interface */
+	hw->alt_used = alt_used; /* and alternate config */
+	hw->ctrl_paksize = dev->descriptor.bMaxPacketSize0; /* control size */
+	hw->cfg_used = vcf[16];	/* store used config */
+	hw->vend_idx = vend_idx; /* store found vendor */
+	hw->packet_size = packet_size;
+	hw->iso_packet_size = iso_packet_size;
+
+	/* create the control pipes needed for register access */
+	hw->ctrl_in_pipe = usb_rcvctrlpipe(hw->dev, 0);
+	hw->ctrl_out_pipe = usb_sndctrlpipe(hw->dev, 0);
+	hw->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
+
+	driver_info =
+		(struct hfcsusb_vdata *)hfcsusb_idtab[vend_idx].driver_info;
+	printk(KERN_DEBUG "%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
+	    hw->name, __func__, driver_info->vend_name,
+	    conf_str[small_match], ifnum, alt_used);
+
+	if (setup_instance(hw, dev->dev.parent))
+		return -EIO;
+
+	hw->intf = intf;
+	usb_set_intfdata(hw->intf, hw);
+	return 0;
+}
+
+/* function called when an active device is removed */
+static void
+hfcsusb_disconnect(struct usb_interface *intf)
+{
+	struct hfcsusb *hw = usb_get_intfdata(intf);
+	struct hfcsusb *next;
+	int cnt = 0;
+
+	printk(KERN_INFO "%s: device disconnected\n", hw->name);
+
+	handle_led(hw, LED_POWER_OFF);
+	release_hw(hw);
+
+	list_for_each_entry_safe(hw, next, &HFClist, list)
+		cnt++;
+	if (!cnt)
+		hfcsusb_cnt = 0;
+
+	usb_set_intfdata(intf, NULL);
+}
+
+static struct usb_driver hfcsusb_drv = {
+	.name = DRIVER_NAME,
+	.id_table = hfcsusb_idtab,
+	.probe = hfcsusb_probe,
+	.disconnect = hfcsusb_disconnect,
+};
+
+static int __init
+hfcsusb_init(void)
+{
+	printk(KERN_INFO DRIVER_NAME " driver Rev. %s debug(0x%x) poll(%i)\n",
+	    hfcsusb_rev, debug, poll);
+
+	if (usb_register(&hfcsusb_drv)) {
+		printk(KERN_INFO DRIVER_NAME
+		    ": Unable to register hfcsusb module at usb stack\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void __exit
+hfcsusb_cleanup(void)
+{
+	if (debug & DBG_HFC_CALL_TRACE)
+		printk(KERN_INFO DRIVER_NAME ": %s\n", __func__);
+
+	/* unregister Hardware */
+	usb_deregister(&hfcsusb_drv);	/* release our driver */
+}
+
+module_init(hfcsusb_init);
+module_exit(hfcsusb_cleanup);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h
new file mode 100644
index 0000000..098486b
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.h
@@ -0,0 +1,418 @@
+/*
+ * hfcsusb.h, HFC-S USB mISDN driver
+ */
+
+#ifndef __HFCSUSB_H__
+#define __HFCSUSB_H__
+
+
+#define DRIVER_NAME "HFC-S_USB"
+
+#define DBG_HFC_CALL_TRACE	0x00010000
+#define DBG_HFC_FIFO_VERBOSE	0x00020000
+#define DBG_HFC_USB_VERBOSE	0x00100000
+#define DBG_HFC_URB_INFO	0x00200000
+#define DBG_HFC_URB_ERROR	0x00400000
+
+#define DEFAULT_TRANSP_BURST_SZ 128
+
+#define HFC_CTRL_TIMEOUT	20	/* 5ms timeout writing/reading regs */
+#define CLKDEL_TE		0x0f	/* CLKDEL in TE mode */
+#define CLKDEL_NT		0x6c	/* CLKDEL in NT mode */
+
+/* hfcsusb Layer1 commands */
+#define HFC_L1_ACTIVATE_TE		1
+#define HFC_L1_ACTIVATE_NT		2
+#define HFC_L1_DEACTIVATE_NT		3
+#define HFC_L1_FORCE_DEACTIVATE_TE	4
+
+/* cmd FLAGS in HFCUSB_STATES register */
+#define HFCUSB_LOAD_STATE	0x10
+#define HFCUSB_ACTIVATE		0x20
+#define HFCUSB_DO_ACTION	0x40
+#define HFCUSB_NT_G2_G3		0x80
+
+/* timers */
+#define NT_ACTIVATION_TIMER	0x01	/* enables NT mode activation Timer */
+#define NT_T1_COUNT		10
+
+#define MAX_BCH_SIZE 		2048	/* allowed B-channel packet size */
+
+#define HFCUSB_RX_THRESHOLD 	64	/* threshold for fifo report bit rx */
+#define HFCUSB_TX_THRESHOLD 	96	/* threshold for fifo report bit tx */
+
+#define HFCUSB_CHIP_ID		0x16	/* Chip ID register index */
+#define HFCUSB_CIRM		0x00	/* cirm register index */
+#define HFCUSB_USB_SIZE		0x07	/* int length register */
+#define HFCUSB_USB_SIZE_I	0x06	/* iso length register */
+#define HFCUSB_F_CROSS		0x0b	/* bit order register */
+#define HFCUSB_CLKDEL		0x37	/* bit delay register */
+#define HFCUSB_CON_HDLC		0xfa	/* channel connect register */
+#define HFCUSB_HDLC_PAR		0xfb
+#define HFCUSB_SCTRL		0x31	/* S-bus control register (tx) */
+#define HFCUSB_SCTRL_E		0x32	/* same for E and special funcs */
+#define HFCUSB_SCTRL_R		0x33	/* S-bus control register (rx) */
+#define HFCUSB_F_THRES		0x0c	/* threshold register */
+#define HFCUSB_FIFO		0x0f	/* fifo select register */
+#define HFCUSB_F_USAGE		0x1a	/* fifo usage register */
+#define HFCUSB_MST_MODE0	0x14
+#define HFCUSB_MST_MODE1	0x15
+#define HFCUSB_P_DATA		0x1f
+#define HFCUSB_INC_RES_F	0x0e
+#define HFCUSB_B1_SSL		0x20
+#define HFCUSB_B2_SSL		0x21
+#define HFCUSB_B1_RSL		0x24
+#define HFCUSB_B2_RSL		0x25
+#define HFCUSB_STATES		0x30
+
+
+#define HFCUSB_CHIPID		0x40	/* ID value of HFC-S USB */
+
+/* fifo registers */
+#define HFCUSB_NUM_FIFOS	8	/* maximum number of fifos */
+#define HFCUSB_B1_TX		0	/* index for B1 transmit bulk/int */
+#define HFCUSB_B1_RX		1	/* index for B1 receive bulk/int */
+#define HFCUSB_B2_TX		2
+#define HFCUSB_B2_RX		3
+#define HFCUSB_D_TX		4
+#define HFCUSB_D_RX		5
+#define HFCUSB_PCM_TX		6
+#define HFCUSB_PCM_RX		7
+
+
+#define USB_INT		0
+#define USB_BULK	1
+#define USB_ISOC	2
+
+#define ISOC_PACKETS_D	8
+#define ISOC_PACKETS_B	8
+#define ISO_BUFFER_SIZE	128
+
+/* defines how much ISO packets are handled in one URB */
+static int iso_packets[8] =
+    { ISOC_PACKETS_B, ISOC_PACKETS_B, ISOC_PACKETS_B, ISOC_PACKETS_B,
+	ISOC_PACKETS_D, ISOC_PACKETS_D, ISOC_PACKETS_D, ISOC_PACKETS_D
+};
+
+
+/* Fifo flow Control for TX ISO */
+#define SINK_MAX	68
+#define SINK_MIN	48
+#define SINK_DMIN	12
+#define SINK_DMAX	18
+#define BITLINE_INF	(-96*8)
+
+/* HFC-S USB register access by Control-URSs */
+#define write_reg_atomic(a, b, c) \
+	usb_control_msg((a)->dev, (a)->ctrl_out_pipe, 0, 0x40, (c), (b), \
+		0, 0, HFC_CTRL_TIMEOUT)
+#define read_reg_atomic(a, b, c) \
+	usb_control_msg((a)->dev, (a)->ctrl_in_pipe, 1, 0xC0, 0, (b), (c), \
+		1, HFC_CTRL_TIMEOUT)
+#define HFC_CTRL_BUFSIZE 64
+
+struct ctrl_buf {
+	__u8 hfcs_reg;		/* register number */
+	__u8 reg_val;		/* value to be written (or read) */
+};
+
+/*
+ * URB error codes
+ * Used to represent a list of values and their respective symbolic names
+ */
+struct hfcusb_symbolic_list {
+	const int num;
+	const char *name;
+};
+
+static struct hfcusb_symbolic_list urb_errlist[] = {
+	{-ENOMEM, "No memory for allocation of internal structures"},
+	{-ENOSPC, "The host controller's bandwidth is already consumed"},
+	{-ENOENT, "URB was canceled by unlink_urb"},
+	{-EXDEV, "ISO transfer only partially completed"},
+	{-EAGAIN, "Too match scheduled for the future"},
+	{-ENXIO, "URB already queued"},
+	{-EFBIG, "Too much ISO frames requested"},
+	{-ENOSR, "Buffer error (overrun)"},
+	{-EPIPE, "Specified endpoint is stalled (device not responding)"},
+	{-EOVERFLOW, "Babble (bad cable?)"},
+	{-EPROTO, "Bit-stuff error (bad cable?)"},
+	{-EILSEQ, "CRC/Timeout"},
+	{-ETIMEDOUT, "NAK (device does not respond)"},
+	{-ESHUTDOWN, "Device unplugged"},
+	{-1, NULL}
+};
+
+static inline const char *
+symbolic(struct hfcusb_symbolic_list list[], const int num)
+{
+	int i;
+	for (i = 0; list[i].name != NULL; i++)
+		if (list[i].num == num)
+			return list[i].name;
+	return "<unkown USB Error>";
+}
+
+/* USB descriptor need to contain one of the following EndPoint combination: */
+#define CNF_4INT3ISO	1	/* 4 INT IN, 3 ISO OUT */
+#define CNF_3INT3ISO	2	/* 3 INT IN, 3 ISO OUT */
+#define CNF_4ISO3ISO	3	/* 4 ISO IN, 3 ISO OUT */
+#define CNF_3ISO3ISO	4	/* 3 ISO IN, 3 ISO OUT */
+
+#define EP_NUL 1	/* Endpoint at this position not allowed */
+#define EP_NOP 2	/* all type of endpoints allowed at this position */
+#define EP_ISO 3	/* Isochron endpoint mandatory at this position */
+#define EP_BLK 4	/* Bulk endpoint mandatory at this position */
+#define EP_INT 5	/* Interrupt endpoint mandatory at this position */
+
+#define HFC_CHAN_B1	0
+#define HFC_CHAN_B2	1
+#define HFC_CHAN_D	2
+#define HFC_CHAN_E	3
+
+
+/*
+ * List of all supported enpoints configiration sets, used to find the
+ * best matching endpoint configuration within a devices' USB descriptor.
+ * We need at least 3 RX endpoints, and 3 TX endpoints, either
+ * INT-in and ISO-out, or ISO-in and ISO-out)
+ * with 4 RX endpoints even E-Channel logging is possible
+ */
+static int
+validconf[][19] = {
+	/* INT in, ISO out config */
+	{EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NOP, EP_INT,
+	 EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_NUL, EP_NUL,
+	 CNF_4INT3ISO, 2, 1},
+	{EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_NUL,
+	 EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_NUL, EP_NUL,
+	 CNF_3INT3ISO, 2, 0},
+	/* ISO in, ISO out config */
+	{EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP,
+	 EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_NOP, EP_ISO,
+	 CNF_4ISO3ISO, 2, 1},
+	{EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL,
+	 EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_NUL, EP_NUL,
+	 CNF_3ISO3ISO, 2, 0},
+	{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} /* EOL element */
+};
+
+/* string description of chosen config */
+char *conf_str[] = {
+	"4 Interrupt IN + 3 Isochron OUT",
+	"3 Interrupt IN + 3 Isochron OUT",
+	"4 Isochron IN + 3 Isochron OUT",
+	"3 Isochron IN + 3 Isochron OUT"
+};
+
+
+#define LED_OFF		0	/* no LED support */
+#define LED_SCHEME1	1	/* LED standard scheme */
+#define LED_SCHEME2	2	/* not used yet... */
+
+#define LED_POWER_ON	1
+#define LED_POWER_OFF	2
+#define LED_S0_ON	3
+#define LED_S0_OFF	4
+#define LED_B1_ON	5
+#define LED_B1_OFF	6
+#define LED_B1_DATA	7
+#define LED_B2_ON	8
+#define LED_B2_OFF	9
+#define LED_B2_DATA	10
+
+#define LED_NORMAL	0	/* LEDs are normal */
+#define LED_INVERTED 	1	/* LEDs are inverted */
+
+/* time in ms to perform a Flashing LED when B-Channel has traffic */
+#define LED_TIME      250
+
+
+
+struct hfcsusb;
+struct usb_fifo;
+
+/* structure defining input+output fifos (interrupt/bulk mode) */
+struct iso_urb {
+	struct urb *urb;
+	__u8 buffer[ISO_BUFFER_SIZE];	/* buffer rx/tx USB URB data */
+	struct usb_fifo *owner_fifo;	/* pointer to owner fifo */
+	__u8 indx; /* Fifos's ISO double buffer 0 or 1 ? */
+#ifdef ISO_FRAME_START_DEBUG
+	int start_frames[ISO_FRAME_START_RING_COUNT];
+	__u8 iso_frm_strt_pos; /* index in start_frame[] */
+#endif
+};
+
+struct usb_fifo {
+	int fifonum;		/* fifo index attached to this structure */
+	int active;		/* fifo is currently active */
+	struct hfcsusb *hw;	/* pointer to main structure */
+	int pipe;		/* address of endpoint */
+	__u8 usb_packet_maxlen;	/* maximum length for usb transfer */
+	unsigned int max_size;	/* maximum size of receive/send packet */
+	__u8 intervall;		/* interrupt interval */
+	struct urb *urb;	/* transfer structure for usb routines */
+	__u8 buffer[128];	/* buffer USB INT OUT URB data */
+	int bit_line;		/* how much bits are in the fifo? */
+
+	__u8 usb_transfer_mode; /* switched between ISO and INT */
+	struct iso_urb	iso[2]; /* two urbs to have one always
+					 one pending */
+
+	struct dchannel *dch;	/* link to hfcsusb_t->dch */
+	struct bchannel *bch;	/* link to hfcsusb_t->bch */
+	struct dchannel *ech;	/* link to hfcsusb_t->ech, TODO: E-CHANNEL */
+	int last_urblen;	/* remember length of last packet */
+	__u8 stop_gracefull;	/* stops URB retransmission */
+};
+
+struct hfcsusb {
+	struct list_head	list;
+	struct dchannel		dch;
+	struct bchannel		bch[2];
+	struct dchannel		ech; /* TODO : wait for struct echannel ;) */
+
+	struct usb_device	*dev;		/* our device */
+	struct usb_interface	*intf;		/* used interface */
+	int			if_used;	/* used interface number */
+	int			alt_used;	/* used alternate config */
+	int			cfg_used;	/* configuration index used */
+	int			vend_idx;	/* index in hfcsusb_idtab */
+	int			packet_size;
+	int			iso_packet_size;
+	struct usb_fifo		fifos[HFCUSB_NUM_FIFOS];
+
+	/* control pipe background handling */
+	struct ctrl_buf		ctrl_buff[HFC_CTRL_BUFSIZE];
+	int			ctrl_in_idx, ctrl_out_idx, ctrl_cnt;
+	struct urb		*ctrl_urb;
+	struct usb_ctrlrequest	ctrl_write;
+	struct usb_ctrlrequest	ctrl_read;
+	int			ctrl_paksize;
+	int			ctrl_in_pipe, ctrl_out_pipe;
+	spinlock_t		ctrl_lock; /* lock for ctrl */
+	spinlock_t              lock;
+
+	__u8			threshold_mask;
+	__u8			led_state;
+
+	__u8			protocol;
+	int			nt_timer;
+	int			open;
+	__u8			timers;
+	__u8			initdone;
+	char			name[MISDN_MAX_IDLEN];
+};
+
+/* private vendor specific data */
+struct hfcsusb_vdata {
+	__u8		led_scheme;  /* led display scheme */
+	signed short	led_bits[8]; /* array of 8 possible LED bitmask */
+	char		*vend_name;  /* device name */
+};
+
+
+#define HFC_MAX_TE_LAYER1_STATE 8
+#define HFC_MAX_NT_LAYER1_STATE 4
+
+const char *HFC_TE_LAYER1_STATES[HFC_MAX_TE_LAYER1_STATE + 1] = {
+	"TE F0 - Reset",
+	"TE F1 - Reset",
+	"TE F2 - Sensing",
+	"TE F3 - Deactivated",
+	"TE F4 - Awaiting signal",
+	"TE F5 - Identifying input",
+	"TE F6 - Synchronized",
+	"TE F7 - Activated",
+	"TE F8 - Lost framing",
+};
+
+const char *HFC_NT_LAYER1_STATES[HFC_MAX_NT_LAYER1_STATE + 1] = {
+	"NT G0 - Reset",
+	"NT G1 - Deactive",
+	"NT G2 - Pending activation",
+	"NT G3 - Active",
+	"NT G4 - Pending deactivation",
+};
+
+/* supported devices */
+static struct usb_device_id hfcsusb_idtab[] = {
+	{
+	 USB_DEVICE(0x0959, 0x2bd0),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_OFF, {4, 0, 2, 1},
+			   "ISDN USB TA (Cologne Chip HFC-S USB based)"}),
+	},
+	{
+	 USB_DEVICE(0x0675, 0x1688),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_SCHEME1, {1, 2, 0, 0},
+			   "DrayTek miniVigor 128 USB ISDN TA"}),
+	},
+	{
+	 USB_DEVICE(0x07b0, 0x0007),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_SCHEME1, {0x80, -64, -32, -16},
+			   "Billion tiny USB ISDN TA 128"}),
+	},
+	{
+	 USB_DEVICE(0x0742, 0x2008),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_SCHEME1, {4, 0, 2, 1},
+			   "Stollmann USB TA"}),
+	},
+	{
+	 USB_DEVICE(0x0742, 0x2009),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_SCHEME1, {4, 0, 2, 1},
+			   "Aceex USB ISDN TA"}),
+	},
+	{
+	 USB_DEVICE(0x0742, 0x200A),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_SCHEME1, {4, 0, 2, 1},
+			   "OEM USB ISDN TA"}),
+	},
+	{
+	 USB_DEVICE(0x08e3, 0x0301),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_SCHEME1, {2, 0, 1, 4},
+			   "Olitec USB RNIS"}),
+	},
+	{
+	 USB_DEVICE(0x07fa, 0x0846),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_SCHEME1, {0x80, -64, -32, -16},
+			   "Bewan Modem RNIS USB"}),
+	},
+	{
+	 USB_DEVICE(0x07fa, 0x0847),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_SCHEME1, {0x80, -64, -32, -16},
+			   "Djinn Numeris USB"}),
+	},
+	{
+	 USB_DEVICE(0x07b0, 0x0006),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_SCHEME1, {0x80, -64, -32, -16},
+			   "Twister ISDN TA"}),
+	},
+	{
+	 USB_DEVICE(0x071d, 0x1005),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_SCHEME1, {0x02, 0, 0x01, 0x04},
+			   "Eicon DIVA USB 4.0"}),
+	},
+	{
+	 USB_DEVICE(0x0586, 0x0102),
+	 .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+			  {LED_SCHEME1, {0x88, -64, -32, -16},
+			   "ZyXEL OMNI.NET USB II"}),
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(usb, hfcsusb_idtab);
+
+#endif	/* __HFCSUSB_H__ */
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index 7ee5bd9..579974c 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -38,16 +38,12 @@
 /* inside the definition.                                                   */
 /****************************************************************************/
 struct net_local {
-	struct net_device netdev;	/* the network device */
-	struct net_device_stats stats;
-	/* additional vars may be added here */
-	char dev_name[9];	/* our own device name */
-
 	/* Tx control lock.  This protects the transmit buffer ring
 	 * state along with the "tx full" state of the driver.  This
 	 * means all netif_queue flow control actions are protected
 	 * by this lock as well.
 	 */
+	struct net_device *dev;
 	spinlock_t lock;
 	struct sk_buff *skbs[MAX_SKB_BUFFERS];	/* pointers to tx-skbs */
 	int in_idx, out_idx;	/* indexes to buffer ring */
@@ -55,15 +51,6 @@
 };				/* net_local */
 
 
-/*****************************************************/
-/* Get the current statistics for this card.         */
-/* This may be called with the card open or closed ! */
-/*****************************************************/
-static struct net_device_stats *
-net_get_stats(struct net_device *dev)
-{
-	return (&((struct net_local *) dev)->stats);
-}				/* net_device_stats */
 
 /*********************************************************************/
 /* Open/initialize the board. This is called (in the current kernel) */
@@ -182,8 +169,8 @@
 	if (!lp->sk_count)
 		return;		/* error condition */
 
-	lp->stats.tx_packets++;
-	lp->stats.tx_bytes += lp->skbs[lp->out_idx]->len;
+	lp->dev->stats.tx_packets++;
+	lp->dev->stats.tx_bytes += lp->skbs[lp->out_idx]->len;
 
 	dev_kfree_skb(lp->skbs[lp->out_idx++]);		/* free skb */
 	if (lp->out_idx >= MAX_SKB_BUFFERS)
@@ -200,29 +187,30 @@
 hysdn_rx_netpkt(hysdn_card * card, unsigned char *buf, unsigned short len)
 {
 	struct net_local *lp = card->netif;
+	struct net_device *dev = lp->dev;
 	struct sk_buff *skb;
 
 	if (!lp)
 		return;		/* non existing device */
 
-	lp->stats.rx_bytes += len;
+	dev->stats.rx_bytes += len;
 
 	skb = dev_alloc_skb(len);
 	if (skb == NULL) {
 		printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
-		       lp->netdev.name);
-		lp->stats.rx_dropped++;
+		       dev->name);
+		dev->stats.rx_dropped++;
 		return;
 	}
 	/* copy the data */
 	memcpy(skb_put(skb, len), buf, len);
 
 	/* determine the used protocol */
-	skb->protocol = eth_type_trans(skb, &lp->netdev);
+	skb->protocol = eth_type_trans(skb, dev);
+
+	dev->stats.rx_packets++;	/* adjust packet count */
 
 	netif_rx(skb);
-	lp->stats.rx_packets++;	/* adjust packet count */
-
 }				/* hysdn_rx_netpkt */
 
 /*****************************************************/
@@ -242,24 +230,15 @@
 	return (lp->skbs[lp->out_idx]);		/* next packet to send */
 }				/* hysdn_tx_netget */
 
+static const struct net_device_ops hysdn_netdev_ops = {
+	.ndo_open 		= net_open,
+	.ndo_stop		= net_close,
+	.ndo_start_xmit		= net_send_packet,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
 
-/*******************************************/
-/* init function called by register device */
-/*******************************************/
-static int
-net_init(struct net_device *dev)
-{
-	/* setup the function table */
-	dev->open = net_open;
-	dev->stop = net_close;
-	dev->hard_start_xmit = net_send_packet;
-	dev->get_stats = net_get_stats;
-
-	/* Fill in the fields of the device structure with ethernet values. */
-	ether_setup(dev);
-
-	return (0);		/* success */
-}				/* net_init */
 
 /*****************************************************************************/
 /* hysdn_net_create creates a new net device for the given card. If a device */
@@ -271,28 +250,34 @@
 {
 	struct net_device *dev;
 	int i;
+	struct net_local *lp;
+
 	if(!card) {
 		printk(KERN_WARNING "No card-pt in hysdn_net_create!\n");
 		return (-ENOMEM);
 	}
 	hysdn_net_release(card);	/* release an existing net device */
-	if ((dev = kzalloc(sizeof(struct net_local), GFP_KERNEL)) == NULL) {
+
+	dev = alloc_etherdev(sizeof(struct net_local));
+	if (!dev) {
 		printk(KERN_WARNING "HYSDN: unable to allocate mem\n");
 		return (-ENOMEM);
 	}
 
+	lp = netdev_priv(dev);
+	lp->dev = dev;
+
+	dev->netdev_ops = &hysdn_netdev_ops;
 	spin_lock_init(&((struct net_local *) dev)->lock);
 
 	/* initialise necessary or informing fields */
 	dev->base_addr = card->iobase;	/* IO address */
 	dev->irq = card->irq;	/* irq */
-	dev->init = net_init;	/* the init function of the device */
-	if(dev->name) {
-		strcpy(dev->name, ((struct net_local *) dev)->dev_name);
-	} 
+
+	dev->netdev_ops = &hysdn_netdev_ops;
 	if ((i = register_netdev(dev))) {
 		printk(KERN_WARNING "HYSDN: unable to create network device\n");
-		kfree(dev);
+		free_netdev(dev);
 		return (i);
 	}
 	dev->ml_priv = card;	/* remember pointer to own data structure */
@@ -316,7 +301,7 @@
 		return (0);	/* non existing */
 
 	card->netif = NULL;	/* clear out pointer */
-	dev->stop(dev);		/* close the device */
+	net_close(dev);
 
 	flush_tx_buffers((struct net_local *) dev);	/* empty buffers */
 
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 023ea11..cb8943d 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -292,7 +292,9 @@
 	lp->dialstate = 0;
 	dev->rx_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL;
 	dev->st_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL;
-	isdn_free_channel(lp->isdn_device, lp->isdn_channel, ISDN_USAGE_NET);
+	if (lp->isdn_device != -1 && lp->isdn_channel != -1)
+		isdn_free_channel(lp->isdn_device, lp->isdn_channel,
+				  ISDN_USAGE_NET);
 	lp->flags &= ~ISDN_NET_CONNECTED;
 	lp->isdn_device = -1;
 	lp->isdn_channel = -1;
@@ -1485,6 +1487,24 @@
 	return (rc);
 }
 
+
+static int isdn_net_ioctl(struct net_device *dev,
+			  struct ifreq *ifr, int cmd)
+{
+	isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+
+	switch (lp->p_encap) {
+#ifdef CONFIG_ISDN_PPP
+	case ISDN_NET_ENCAP_SYNCPPP:
+		return isdn_ppp_dev_ioctl(dev, ifr, cmd);
+#endif
+	case ISDN_NET_ENCAP_CISCOHDLCK:
+		return isdn_ciscohdlck_dev_ioctl(dev, ifr, cmd);
+	default:
+		return -EINVAL;
+	}
+}
+
 /* called via cisco_timer.function */
 static void
 isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data)
@@ -1998,23 +2018,6 @@
 	ushort max_hlhdr_len = 0;
 	int drvidx;
 
-	ether_setup(ndev);
-	ndev->header_ops = NULL;
-
-	/* Setup the generic properties */
-	ndev->mtu = 1500;
-	ndev->flags = IFF_NOARP|IFF_POINTOPOINT;
-	ndev->type = ARPHRD_ETHER;
-	ndev->addr_len = ETH_ALEN;
-	ndev->validate_addr = NULL;
-
-	/* for clients with MPPP maybe higher values better */
-	ndev->tx_queue_len = 30;
-
-	/* The ISDN-specific entries in the device structure. */
-	ndev->open = &isdn_net_open;
-	ndev->hard_start_xmit = &isdn_net_start_xmit;
-
 	/*
 	 *  up till binding we ask the protocol layer to reserve as much
 	 *  as we might need for HL layer
@@ -2026,9 +2029,6 @@
 				max_hlhdr_len = dev->drv[drvidx]->interface->hl_hdrlen;
 
 	ndev->hard_header_len = ETH_HLEN + max_hlhdr_len;
-	ndev->stop = &isdn_net_close;
-	ndev->get_stats = &isdn_net_get_stats;
-	ndev->do_ioctl = NULL;
 	return 0;
 }
 
@@ -2508,6 +2508,18 @@
 	return (isdn_net_force_dial_lp(p->local));
 }
 
+/* The ISDN-specific entries in the device structure. */
+static const struct net_device_ops isdn_netdev_ops = {
+	.ndo_init	      = isdn_net_init,
+	.ndo_open	      = isdn_net_open,
+	.ndo_stop	      = isdn_net_close,
+	.ndo_do_ioctl	      = isdn_net_ioctl,
+
+	.ndo_start_xmit	      = isdn_net_start_xmit,
+	.ndo_get_stats	      = isdn_net_get_stats,
+	.ndo_tx_timeout	      = isdn_net_tx_timeout,
+};
+
 /*
  * Helper for alloc_netdev()
  */
@@ -2515,7 +2527,16 @@
 {
 	isdn_net_local *lp = netdev_priv(dev);
 
-	dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+	ether_setup(dev);
+
+	/* Setup the generic properties */
+	dev->flags = IFF_NOARP|IFF_POINTOPOINT;
+	dev->header_ops = NULL;
+	dev->netdev_ops = &isdn_netdev_ops;
+
+	/* for clients with MPPP maybe higher values better */
+	dev->tx_queue_len = 30;
+
 	lp->p_encap = ISDN_NET_ENCAP_RAWIP;
 	lp->magic = ISDN_NET_MAGIC;
 	lp->last = lp;
@@ -2570,7 +2591,7 @@
 		return NULL;
 	}
 	netdev->local = netdev_priv(netdev->dev);
-	netdev->dev->init = isdn_net_init;
+
 	if (master) {
 		/* Device shall be a slave */
 		struct net_device *p = MASTER_TO_SLAVE(master);
@@ -2588,7 +2609,6 @@
 		/*
 		 * Watchdog timer (currently) for master only.
 		 */
-		netdev->dev->tx_timeout = isdn_net_tx_timeout;
 		netdev->dev->watchdog_timeo = ISDN_NET_TX_TIMEOUT;
 		if (register_netdev(netdev->dev) != 0) {
 			printk(KERN_WARNING "isdn_net: Could not register net-device\n");
@@ -2704,7 +2724,6 @@
 #else
 			p->dev->type = ARPHRD_PPP;	/* change ARP type */
 			p->dev->addr_len = 0;
-			p->dev->do_ioctl = isdn_ppp_dev_ioctl;
 #endif
 			break;
 		case ISDN_NET_ENCAP_X25IFACE:
@@ -2718,7 +2737,6 @@
 #endif
 			break;
 		case ISDN_NET_ENCAP_CISCOHDLCK:
-			p->dev->do_ioctl = isdn_ciscohdlck_dev_ioctl;
 			break;
 		default:
 			if( cfg->p_encap >= 0 &&
diff --git a/drivers/isdn/mISDN/Makefile b/drivers/isdn/mISDN/Makefile
index 1cb5e63..0a6bd2a 100644
--- a/drivers/isdn/mISDN/Makefile
+++ b/drivers/isdn/mISDN/Makefile
@@ -8,6 +8,6 @@
 
 # multi objects
 
-mISDN_core-objs := core.o fsm.o socket.o hwchannel.o stack.o layer1.o layer2.o tei.o timerdev.o
+mISDN_core-objs := core.o fsm.o socket.o clock.o hwchannel.o stack.o layer1.o layer2.o tei.o timerdev.o
 mISDN_dsp-objs := dsp_core.o dsp_cmx.o dsp_tones.o dsp_dtmf.o dsp_audio.o dsp_blowfish.o dsp_pipeline.o dsp_hwec.o
 l1oip-objs := l1oip_core.o l1oip_codec.o
diff --git a/drivers/isdn/mISDN/clock.c b/drivers/isdn/mISDN/clock.c
new file mode 100644
index 0000000..44d9c3d
--- /dev/null
+++ b/drivers/isdn/mISDN/clock.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2008  by Andreas Eversberg <andreas@eversberg.eu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Quick API description:
+ *
+ * A clock source registers using mISDN_register_clock:
+ * 	name = text string to name clock source
+ *	priority = value to priorize clock sources (0 = default)
+ *	ctl = callback function to enable/disable clock source
+ *	priv = private pointer of clock source
+ * 	return = pointer to clock source structure;
+ *
+ * Note: Callback 'ctl' can be called before mISDN_register_clock returns!
+ *       Also it can be called during mISDN_unregister_clock.
+ *
+ * A clock source calls mISDN_clock_update with given samples elapsed, if
+ * enabled. If function call is delayed, tv must be set with the timestamp
+ * of the actual event.
+ *
+ * A clock source unregisters using mISDN_unregister_clock.
+ *
+ * To get current clock, call mISDN_clock_get. The signed short value
+ * counts the number of samples since. Time since last clock event is added.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/mISDNif.h>
+#include "core.h"
+
+static u_int *debug;
+static LIST_HEAD(iclock_list);
+DEFINE_RWLOCK(iclock_lock);
+u16	iclock_count;		/* counter of last clock */
+struct	timeval iclock_tv;	/* time stamp of last clock */
+int	iclock_tv_valid;	/* already received one timestamp */
+struct	mISDNclock *iclock_current;
+
+void
+mISDN_init_clock(u_int *dp)
+{
+	debug = dp;
+	do_gettimeofday(&iclock_tv);
+}
+
+static void
+select_iclock(void)
+{
+	struct mISDNclock *iclock, *bestclock = NULL, *lastclock = NULL;
+	int pri = -128;
+
+	list_for_each_entry(iclock, &iclock_list, list) {
+		if (iclock->pri > pri) {
+			pri = iclock->pri;
+			bestclock = iclock;
+		}
+		if (iclock_current == iclock)
+			lastclock = iclock;
+	}
+	if (lastclock && bestclock != lastclock) {
+		/* last used clock source still exists but changes, disable */
+		if (*debug & DEBUG_CLOCK)
+			printk(KERN_DEBUG "Old clock source '%s' disable.\n",
+				lastclock->name);
+		lastclock->ctl(lastclock->priv, 0);
+	}
+	if (bestclock && bestclock != iclock_current) {
+		/* new clock source selected, enable */
+		if (*debug & DEBUG_CLOCK)
+			printk(KERN_DEBUG "New clock source '%s' enable.\n",
+				bestclock->name);
+		bestclock->ctl(bestclock->priv, 1);
+	}
+	if (bestclock != iclock_current) {
+		/* no clock received yet */
+		iclock_tv_valid = 0;
+	}
+	iclock_current = bestclock;
+}
+
+struct mISDNclock
+*mISDN_register_clock(char *name, int pri, clockctl_func_t *ctl, void *priv)
+{
+	u_long			flags;
+	struct mISDNclock	*iclock;
+
+	if (*debug & (DEBUG_CORE | DEBUG_CLOCK))
+		printk(KERN_DEBUG "%s: %s %d\n", __func__, name, pri);
+	iclock = kzalloc(sizeof(struct mISDNclock), GFP_ATOMIC);
+	if (!iclock) {
+		printk(KERN_ERR "%s: No memory for clock entry.\n", __func__);
+		return NULL;
+	}
+	strncpy(iclock->name, name, sizeof(iclock->name)-1);
+	iclock->pri = pri;
+	iclock->priv = priv;
+	iclock->ctl = ctl;
+	write_lock_irqsave(&iclock_lock, flags);
+	list_add_tail(&iclock->list, &iclock_list);
+	select_iclock();
+	write_unlock_irqrestore(&iclock_lock, flags);
+	return iclock;
+}
+EXPORT_SYMBOL(mISDN_register_clock);
+
+void
+mISDN_unregister_clock(struct mISDNclock *iclock)
+{
+	u_long	flags;
+
+	if (*debug & (DEBUG_CORE | DEBUG_CLOCK))
+		printk(KERN_DEBUG "%s: %s %d\n", __func__, iclock->name,
+			iclock->pri);
+	write_lock_irqsave(&iclock_lock, flags);
+	if (iclock_current == iclock) {
+		if (*debug & DEBUG_CLOCK)
+			printk(KERN_DEBUG
+				"Current clock source '%s' unregisters.\n",
+				iclock->name);
+		iclock->ctl(iclock->priv, 0);
+	}
+	list_del(&iclock->list);
+	select_iclock();
+	write_unlock_irqrestore(&iclock_lock, flags);
+}
+EXPORT_SYMBOL(mISDN_unregister_clock);
+
+void
+mISDN_clock_update(struct mISDNclock *iclock, int samples, struct timeval *tv)
+{
+	u_long		flags;
+	struct timeval	tv_now;
+	time_t		elapsed_sec;
+	int		elapsed_8000th;
+
+	write_lock_irqsave(&iclock_lock, flags);
+	if (iclock_current != iclock) {
+		printk(KERN_ERR "%s: '%s' sends us clock updates, but we do "
+			"listen to '%s'. This is a bug!\n", __func__,
+			iclock->name,
+			iclock_current ? iclock_current->name : "nothing");
+		iclock->ctl(iclock->priv, 0);
+		write_unlock_irqrestore(&iclock_lock, flags);
+		return;
+	}
+	if (iclock_tv_valid) {
+		/* increment sample counter by given samples */
+		iclock_count += samples;
+		if (tv) { /* tv must be set, if function call is delayed */
+			iclock_tv.tv_sec = tv->tv_sec;
+			iclock_tv.tv_usec = tv->tv_usec;
+		} else
+			do_gettimeofday(&iclock_tv);
+	} else {
+		/* calc elapsed time by system clock */
+		if (tv) { /* tv must be set, if function call is delayed */
+			tv_now.tv_sec = tv->tv_sec;
+			tv_now.tv_usec = tv->tv_usec;
+		} else
+			do_gettimeofday(&tv_now);
+		elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec;
+		elapsed_8000th = (tv_now.tv_usec / 125)
+			- (iclock_tv.tv_usec / 125);
+		if (elapsed_8000th < 0) {
+			elapsed_sec -= 1;
+			elapsed_8000th += 8000;
+		}
+		/* add elapsed time to counter and set new timestamp */
+		iclock_count += elapsed_sec * 8000 + elapsed_8000th;
+		iclock_tv.tv_sec = tv_now.tv_sec;
+		iclock_tv.tv_usec = tv_now.tv_usec;
+		iclock_tv_valid = 1;
+		if (*debug & DEBUG_CLOCK)
+			printk("Received first clock from source '%s'.\n",
+			    iclock_current ? iclock_current->name : "nothing");
+	}
+	write_unlock_irqrestore(&iclock_lock, flags);
+}
+EXPORT_SYMBOL(mISDN_clock_update);
+
+unsigned short
+mISDN_clock_get(void)
+{
+	u_long		flags;
+	struct timeval	tv_now;
+	time_t		elapsed_sec;
+	int		elapsed_8000th;
+	u16		count;
+
+	read_lock_irqsave(&iclock_lock, flags);
+	/* calc elapsed time by system clock */
+	do_gettimeofday(&tv_now);
+	elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec;
+	elapsed_8000th = (tv_now.tv_usec / 125) - (iclock_tv.tv_usec / 125);
+	if (elapsed_8000th < 0) {
+		elapsed_sec -= 1;
+		elapsed_8000th += 8000;
+	}
+	/* add elapsed time to counter */
+	count =	iclock_count + elapsed_sec * 8000 + elapsed_8000th;
+	read_unlock_irqrestore(&iclock_lock, flags);
+	return count;
+}
+EXPORT_SYMBOL(mISDN_clock_get);
+
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index 751665c..9426c98 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -25,39 +25,183 @@
 MODULE_LICENSE("GPL");
 module_param(debug, uint, S_IRUGO | S_IWUSR);
 
-static LIST_HEAD(devices);
-static DEFINE_RWLOCK(device_lock);
 static u64		device_ids;
 #define MAX_DEVICE_ID	63
 
 static LIST_HEAD(Bprotocols);
 static DEFINE_RWLOCK(bp_lock);
 
+static void mISDN_dev_release(struct device *dev)
+{
+	/* nothing to do: the device is part of its parent's data structure */
+}
+
+static ssize_t _show_id(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+	if (!mdev)
+		return -ENODEV;
+	return sprintf(buf, "%d\n", mdev->id);
+}
+
+static ssize_t _show_nrbchan(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+	if (!mdev)
+		return -ENODEV;
+	return sprintf(buf, "%d\n", mdev->nrbchan);
+}
+
+static ssize_t _show_d_protocols(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+	if (!mdev)
+		return -ENODEV;
+	return sprintf(buf, "%d\n", mdev->Dprotocols);
+}
+
+static ssize_t _show_b_protocols(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+	if (!mdev)
+		return -ENODEV;
+	return sprintf(buf, "%d\n", mdev->Bprotocols | get_all_Bprotocols());
+}
+
+static ssize_t _show_protocol(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+	if (!mdev)
+		return -ENODEV;
+	return sprintf(buf, "%d\n", mdev->D.protocol);
+}
+
+static ssize_t _show_name(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	strcpy(buf, dev_name(dev));
+	return strlen(buf);
+}
+
+#if 0 /* hangs */
+static ssize_t _set_name(struct device *dev, struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int err = 0;
+	char *out = kmalloc(count + 1, GFP_KERNEL);
+
+	if (!out)
+		return -ENOMEM;
+
+	memcpy(out, buf, count);
+	if (count && out[count - 1] == '\n')
+		out[--count] = 0;
+	if (count)
+		err = device_rename(dev, out);
+	kfree(out);
+
+	return (err < 0) ? err : count;
+}
+#endif
+
+static ssize_t _show_channelmap(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct mISDNdevice *mdev = dev_to_mISDN(dev);
+	char *bp = buf;
+	int i;
+
+	for (i = 0; i <= mdev->nrbchan; i++)
+		*bp++ = test_channelmap(i, mdev->channelmap) ? '1' : '0';
+
+	return bp - buf;
+}
+
+static struct device_attribute mISDN_dev_attrs[] = {
+	__ATTR(id,          S_IRUGO,         _show_id,          NULL),
+	__ATTR(d_protocols, S_IRUGO,         _show_d_protocols, NULL),
+	__ATTR(b_protocols, S_IRUGO,         _show_b_protocols, NULL),
+	__ATTR(protocol,    S_IRUGO,         _show_protocol,    NULL),
+	__ATTR(channelmap,  S_IRUGO,         _show_channelmap,  NULL),
+	__ATTR(nrbchan,     S_IRUGO,         _show_nrbchan,     NULL),
+	__ATTR(name,        S_IRUGO,         _show_name,        NULL),
+/*	__ATTR(name,        S_IRUGO|S_IWUSR, _show_name,       _set_name), */
+	{}
+};
+
+#ifdef CONFIG_HOTPLUG
+static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+	if (!mdev)
+		return 0;
+
+	if (add_uevent_var(env, "nchans=%d", mdev->nrbchan))
+		return -ENOMEM;
+
+	return 0;
+}
+#endif
+
+static void mISDN_class_release(struct class *cls)
+{
+	/* do nothing, it's static */
+}
+
+static struct class mISDN_class = {
+	.name = "mISDN",
+	.owner = THIS_MODULE,
+#ifdef CONFIG_HOTPLUG
+	.dev_uevent = mISDN_uevent,
+#endif
+	.dev_attrs = mISDN_dev_attrs,
+	.dev_release = mISDN_dev_release,
+	.class_release = mISDN_class_release,
+};
+
+static int
+_get_mdevice(struct device *dev, void *id)
+{
+	struct mISDNdevice *mdev = dev_to_mISDN(dev);
+
+	if (!mdev)
+		return 0;
+	if (mdev->id != *(u_int *)id)
+		return 0;
+	return 1;
+}
+
 struct mISDNdevice
 *get_mdevice(u_int id)
 {
-	struct mISDNdevice	*dev;
+	return dev_to_mISDN(class_find_device(&mISDN_class, NULL, &id,
+		_get_mdevice));
+}
 
-	read_lock(&device_lock);
-	list_for_each_entry(dev, &devices, D.list)
-		if (dev->id == id) {
-			read_unlock(&device_lock);
-			return dev;
-		}
-	read_unlock(&device_lock);
-	return NULL;
+static int
+_get_mdevice_count(struct device *dev, void *cnt)
+{
+	*(int *)cnt += 1;
+	return 0;
 }
 
 int
 get_mdevice_count(void)
 {
-	struct mISDNdevice	*dev;
-	int			cnt = 0;
+	int cnt = 0;
 
-	read_lock(&device_lock);
-	list_for_each_entry(dev, &devices, D.list)
-		cnt++;
-	read_unlock(&device_lock);
+	class_for_each_device(&mISDN_class, NULL, &cnt, _get_mdevice_count);
 	return cnt;
 }
 
@@ -68,48 +212,66 @@
 
 	for (i = 0; i <= MAX_DEVICE_ID; i++)
 		if (!test_and_set_bit(i, (u_long *)&device_ids))
-			return i;
-	return -1;
+			break;
+	if (i > MAX_DEVICE_ID)
+		return -1;
+	return i;
 }
 
 int
-mISDN_register_device(struct mISDNdevice *dev, char *name)
+mISDN_register_device(struct mISDNdevice *dev,
+			struct device *parent, char *name)
 {
-	u_long	flags;
 	int	err;
 
 	dev->id = get_free_devid();
+	err = -EBUSY;
 	if (dev->id < 0)
-		return -EBUSY;
+		goto error1;
+
+	device_initialize(&dev->dev);
 	if (name && name[0])
-		strcpy(dev->name, name);
+		dev_set_name(&dev->dev, "%s", name);
 	else
-		sprintf(dev->name, "mISDN%d", dev->id);
+		dev_set_name(&dev->dev, "mISDN%d", dev->id);
 	if (debug & DEBUG_CORE)
 		printk(KERN_DEBUG "mISDN_register %s %d\n",
-			dev->name, dev->id);
+			dev_name(&dev->dev), dev->id);
 	err = create_stack(dev);
 	if (err)
-		return err;
-	write_lock_irqsave(&device_lock, flags);
-	list_add_tail(&dev->D.list, &devices);
-	write_unlock_irqrestore(&device_lock, flags);
+		goto error1;
+
+	dev->dev.class = &mISDN_class;
+	dev->dev.platform_data = dev;
+	dev->dev.parent = parent;
+	dev_set_drvdata(&dev->dev, dev);
+
+	err = device_add(&dev->dev);
+	if (err)
+		goto error3;
 	return 0;
+
+error3:
+	delete_stack(dev);
+	return err;
+error1:
+	return err;
+
 }
 EXPORT_SYMBOL(mISDN_register_device);
 
 void
 mISDN_unregister_device(struct mISDNdevice *dev) {
-	u_long	flags;
-
 	if (debug & DEBUG_CORE)
 		printk(KERN_DEBUG "mISDN_unregister %s %d\n",
-			dev->name, dev->id);
-	write_lock_irqsave(&device_lock, flags);
-	list_del(&dev->D.list);
-	write_unlock_irqrestore(&device_lock, flags);
+			dev_name(&dev->dev), dev->id);
+	/* sysfs_remove_link(&dev->dev.kobj, "device"); */
+	device_del(&dev->dev);
+	dev_set_drvdata(&dev->dev, NULL);
+
 	test_and_clear_bit(dev->id, (u_long *)&device_ids);
 	delete_stack(dev);
+	put_device(&dev->dev);
 }
 EXPORT_SYMBOL(mISDN_unregister_device);
 
@@ -199,43 +361,45 @@
 
 	printk(KERN_INFO "Modular ISDN core version %d.%d.%d\n",
 		MISDN_MAJOR_VERSION, MISDN_MINOR_VERSION, MISDN_RELEASE);
+	mISDN_init_clock(&debug);
 	mISDN_initstack(&debug);
+	err = class_register(&mISDN_class);
+	if (err)
+		goto error1;
 	err = mISDN_inittimer(&debug);
 	if (err)
-		goto error;
+		goto error2;
 	err = l1_init(&debug);
-	if (err) {
-		mISDN_timer_cleanup();
-		goto error;
-	}
+	if (err)
+		goto error3;
 	err = Isdnl2_Init(&debug);
-	if (err) {
-		mISDN_timer_cleanup();
-		l1_cleanup();
-		goto error;
-	}
+	if (err)
+		goto error4;
 	err = misdn_sock_init(&debug);
-	if (err) {
-		mISDN_timer_cleanup();
-		l1_cleanup();
-		Isdnl2_cleanup();
-	}
-error:
+	if (err)
+		goto error5;
+	return 0;
+
+error5:
+	Isdnl2_cleanup();
+error4:
+	l1_cleanup();
+error3:
+	mISDN_timer_cleanup();
+error2:
+	class_unregister(&mISDN_class);
+error1:
 	return err;
 }
 
 static void mISDN_cleanup(void)
 {
 	misdn_sock_cleanup();
-	mISDN_timer_cleanup();
-	l1_cleanup();
 	Isdnl2_cleanup();
+	l1_cleanup();
+	mISDN_timer_cleanup();
+	class_unregister(&mISDN_class);
 
-	if (!list_empty(&devices))
-		printk(KERN_ERR "%s devices still registered\n", __func__);
-
-	if (!list_empty(&Bprotocols))
-		printk(KERN_ERR "%s Bprotocols still registered\n", __func__);
 	printk(KERN_DEBUG "mISDNcore unloaded\n");
 }
 
diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h
index 7da7233..7ac2f81 100644
--- a/drivers/isdn/mISDN/core.h
+++ b/drivers/isdn/mISDN/core.h
@@ -74,4 +74,6 @@
 extern int 	Isdnl2_Init(u_int *);
 extern void	Isdnl2_cleanup(void);
 
+extern void	mISDN_init_clock(u_int *);
+
 #endif
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
index 6c3fed6..98a33c58f 100644
--- a/drivers/isdn/mISDN/dsp.h
+++ b/drivers/isdn/mISDN/dsp.h
@@ -15,6 +15,7 @@
 #define DEBUG_DSP_TONE		0x0020
 #define DEBUG_DSP_BLOWFISH	0x0040
 #define DEBUG_DSP_DELAY		0x0100
+#define DEBUG_DSP_CLOCK		0x0200
 #define DEBUG_DSP_DTMFCOEFF	0x8000 /* heavy output */
 
 /* options may be:
@@ -198,6 +199,7 @@
 	/* hardware stuff */
 	struct dsp_features features;
 	int		features_rx_off; /* set if rx_off is featured */
+	int		features_fill_empty; /* set if fill_empty is featured */
 	int		pcm_slot_rx; /* current PCM slot (or -1) */
 	int		pcm_bank_rx;
 	int		pcm_slot_tx;
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index c884511..58c43e4 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -137,6 +137,7 @@
 /* #define CMX_CONF_DEBUG */
 
 /*#define CMX_DEBUG * massive read/write pointer output */
+/*#define CMX_DELAY_DEBUG * gives rx-buffer delay overview */
 /*#define CMX_TX_DEBUG * massive read/write on tx-buffer with content */
 
 static inline int
@@ -744,11 +745,11 @@
 					if (dsp->pcm_slot_rx >= 0 &&
 					    dsp->pcm_slot_rx <
 					    sizeof(freeslots))
-						freeslots[dsp->pcm_slot_tx] = 0;
+						freeslots[dsp->pcm_slot_rx] = 0;
 					if (dsp->pcm_slot_tx >= 0 &&
 					    dsp->pcm_slot_tx <
 					    sizeof(freeslots))
-						freeslots[dsp->pcm_slot_rx] = 0;
+						freeslots[dsp->pcm_slot_tx] = 0;
 				}
 			}
 			i = 0;
@@ -836,11 +837,11 @@
 					if (dsp->pcm_slot_rx >= 0 &&
 					    dsp->pcm_slot_rx <
 					    sizeof(freeslots))
-						freeslots[dsp->pcm_slot_tx] = 0;
+						freeslots[dsp->pcm_slot_rx] = 0;
 					if (dsp->pcm_slot_tx >= 0 &&
 					    dsp->pcm_slot_tx <
 					    sizeof(freeslots))
-						freeslots[dsp->pcm_slot_rx] = 0;
+						freeslots[dsp->pcm_slot_tx] = 0;
 				}
 			}
 			i1 = 0;
@@ -926,10 +927,6 @@
 
 	/* for more than two members.. */
 
-	/* in case of hdlc, we change to software */
-	if (dsp->hdlc)
-		goto conf_software;
-
 	/* if all members already have the same conference */
 	if (all_conf)
 		return;
@@ -940,6 +937,9 @@
 	if (current_conf >= 0) {
 join_members:
 		list_for_each_entry(member, &conf->mlist, list) {
+			/* in case of hdlc, change to software */
+			if (member->dsp->hdlc)
+				goto conf_software;
 			/* join to current conference */
 			if (member->dsp->hfc_conf == current_conf)
 				continue;
@@ -1135,6 +1135,25 @@
 	return 0;
 }
 
+#ifdef CMX_DELAY_DEBUG
+int delaycount;
+static void
+showdelay(struct dsp *dsp, int samples, int delay)
+{
+	char bar[] = "--------------------------------------------------|";
+	int sdelay;
+
+	delaycount += samples;
+	if (delaycount < 8000)
+		return;
+	delaycount = 0;
+
+	sdelay = delay * 50 / (dsp_poll << 2);
+
+	printk(KERN_DEBUG "DELAY (%s) %3d >%s\n", dsp->name, delay,
+		sdelay > 50 ? "..." : bar + 50 - sdelay);
+}
+#endif
 
 /*
  * audio data is received from card
@@ -1168,11 +1187,18 @@
 		dsp->rx_init = 0;
 		if (dsp->features.unordered) {
 			dsp->rx_R = (hh->id & CMX_BUFF_MASK);
-			dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
-				& CMX_BUFF_MASK;
+			if (dsp->cmx_delay)
+				dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
+					& CMX_BUFF_MASK;
+			else
+				dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1))
+					& CMX_BUFF_MASK;
 		} else {
 			dsp->rx_R = 0;
-			dsp->rx_W = dsp->cmx_delay;
+			if (dsp->cmx_delay)
+				dsp->rx_W = dsp->cmx_delay;
+			else
+				dsp->rx_W = dsp_poll >> 1;
 		}
 	}
 	/* if frame contains time code, write directly */
@@ -1185,19 +1211,25 @@
 	 * we set our new read pointer, and write silence to buffer
 	 */
 	if (((dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK) >= CMX_BUFF_HALF) {
-		if (dsp_debug & DEBUG_DSP_CMX)
+		if (dsp_debug & DEBUG_DSP_CLOCK)
 			printk(KERN_DEBUG
 			    "cmx_receive(dsp=%lx): UNDERRUN (or overrun the "
 			    "maximum delay), adjusting read pointer! "
 			    "(inst %s)\n", (u_long)dsp, dsp->name);
-		/* flush buffer */
+		/* flush rx buffer and set delay to dsp_poll / 2 */
 		if (dsp->features.unordered) {
 			dsp->rx_R = (hh->id & CMX_BUFF_MASK);
-			dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
-				& CMX_BUFF_MASK;
+			if (dsp->cmx_delay)
+				dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
+					& CMX_BUFF_MASK;
+				dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1))
+					& CMX_BUFF_MASK;
 		} else {
 			dsp->rx_R = 0;
-			dsp->rx_W = dsp->cmx_delay;
+			if (dsp->cmx_delay)
+				dsp->rx_W = dsp->cmx_delay;
+			else
+				dsp->rx_W = dsp_poll >> 1;
 		}
 		memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff));
 	}
@@ -1205,7 +1237,7 @@
 	if (dsp->cmx_delay)
 		if (((dsp->rx_W - dsp->rx_R) & CMX_BUFF_MASK) >=
 		    (dsp->cmx_delay << 1)) {
-			if (dsp_debug & DEBUG_DSP_CMX)
+			if (dsp_debug & DEBUG_DSP_CLOCK)
 				printk(KERN_DEBUG
 				    "cmx_receive(dsp=%lx): OVERRUN (because "
 				    "twice the delay is reached), adjusting "
@@ -1243,6 +1275,9 @@
 
 	/* increase write-pointer */
 	dsp->rx_W = ((dsp->rx_W+len) & CMX_BUFF_MASK);
+#ifdef CMX_DELAY_DEBUG
+	showdelay(dsp, len, (dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK);
+#endif
 }
 
 
@@ -1360,8 +1395,12 @@
 				t = (t+1) & CMX_BUFF_MASK;
 				r = (r+1) & CMX_BUFF_MASK;
 			}
-			if (r != rr)
+			if (r != rr) {
+				if (dsp_debug & DEBUG_DSP_CLOCK)
+					printk(KERN_DEBUG "%s: RX empty\n",
+						__func__);
 				memset(d, dsp_silence, (rr-r)&CMX_BUFF_MASK);
+			}
 		/* -> if echo is enabled */
 		} else {
 			/*
@@ -1540,13 +1579,11 @@
 	schedule_work(&dsp->workq);
 }
 
-static u32	samplecount;
+static u32	jittercount; /* counter for jitter check */
 struct timer_list dsp_spl_tl;
 u32	dsp_spl_jiffies; /* calculate the next time to fire */
-#ifdef UNUSED
-static u32	dsp_start_jiffies; /* jiffies at the time, the calculation begins */
-#endif /* UNUSED */
-static struct timeval dsp_start_tv; /* time at start of calculation */
+static u16	dsp_count; /* last sample count */
+static int	dsp_count_valid ; /* if we have last sample count */
 
 void
 dsp_cmx_send(void *arg)
@@ -1560,38 +1597,32 @@
 	int r, rr;
 	int jittercheck = 0, delay, i;
 	u_long flags;
-	struct timeval tv;
-	u32 elapsed;
-	s16 length;
+	u16 length, count;
 
 	/* lock */
 	spin_lock_irqsave(&dsp_lock, flags);
 
-	if (!dsp_start_tv.tv_sec) {
-		do_gettimeofday(&dsp_start_tv);
+	if (!dsp_count_valid) {
+		dsp_count = mISDN_clock_get();
 		length = dsp_poll;
+		dsp_count_valid = 1;
 	} else {
-		do_gettimeofday(&tv);
-		elapsed = ((tv.tv_sec - dsp_start_tv.tv_sec) * 8000)
-		    + ((s32)(tv.tv_usec / 125) - (dsp_start_tv.tv_usec / 125));
-		dsp_start_tv.tv_sec = tv.tv_sec;
-		dsp_start_tv.tv_usec = tv.tv_usec;
-		length = elapsed;
+		count = mISDN_clock_get();
+		length = count - dsp_count;
+		dsp_count = count;
 	}
 	if (length > MAX_POLL + 100)
 		length = MAX_POLL + 100;
-/* printk(KERN_DEBUG "len=%d dsp_count=0x%x.%04x dsp_poll_diff=0x%x.%04x\n",
- length, dsp_count >> 16, dsp_count & 0xffff, dsp_poll_diff >> 16,
- dsp_poll_diff & 0xffff);
- */
+	/* printk(KERN_DEBUG "len=%d dsp_count=0x%x\n", length, dsp_count); */
 
 	/*
-	 * check if jitter needs to be checked
-	 * (this is about every second = 8192 samples)
+	 * check if jitter needs to be checked (this is every second)
 	 */
-	samplecount += length;
-	if ((samplecount & 8191) < length)
+	jittercount += length;
+	if (jittercount >= 8000) {
+		jittercount -= 8000;
 		jittercheck = 1;
+	}
 
 	/* loop all members that do not require conference mixing */
 	list_for_each_entry(dsp, &dsp_ilist, list) {
@@ -1704,17 +1735,19 @@
 			}
 			/*
 			 * remove rx_delay only if we have delay AND we
-			 * have not preset cmx_delay
+			 * have not preset cmx_delay AND
+			 * the delay is greater dsp_poll
 			 */
-			if (delay && !dsp->cmx_delay) {
-				if (dsp_debug & DEBUG_DSP_CMX)
+			if (delay > dsp_poll && !dsp->cmx_delay) {
+				if (dsp_debug & DEBUG_DSP_CLOCK)
 					printk(KERN_DEBUG
 					    "%s lowest rx_delay of %d bytes for"
 					    " dsp %s are now removed.\n",
 					    __func__, delay,
 					    dsp->name);
 				r = dsp->rx_R;
-				rr = (r + delay) & CMX_BUFF_MASK;
+				rr = (r + delay - (dsp_poll >> 1))
+					& CMX_BUFF_MASK;
 				/* delete rx-data */
 				while (r != rr) {
 					p[r] = dsp_silence;
@@ -1736,15 +1769,16 @@
 			 * remove delay only if we have delay AND we
 			 * have enabled tx_dejitter
 			 */
-			if (delay && dsp->tx_dejitter) {
-				if (dsp_debug & DEBUG_DSP_CMX)
+			if (delay > dsp_poll && dsp->tx_dejitter) {
+				if (dsp_debug & DEBUG_DSP_CLOCK)
 					printk(KERN_DEBUG
 					    "%s lowest tx_delay of %d bytes for"
 					    " dsp %s are now removed.\n",
 					    __func__, delay,
 					    dsp->name);
 				r = dsp->tx_R;
-				rr = (r + delay) & CMX_BUFF_MASK;
+				rr = (r + delay - (dsp_poll >> 1))
+					& CMX_BUFF_MASK;
 				/* delete tx-data */
 				while (r != rr) {
 					q[r] = dsp_silence;
@@ -1797,14 +1831,16 @@
 	ww = dsp->tx_R;
 	p = dsp->tx_buff;
 	d = skb->data;
-	space = ww-w;
-	if (space <= 0)
-		space += CMX_BUFF_SIZE;
+	space = (ww - w - 1) & CMX_BUFF_MASK;
 	/* write-pointer should not overrun nor reach read pointer */
-	if (space-1 < skb->len)
+	if (space < skb->len) {
 		/* write to the space we have left */
-		ww = (ww - 1) & CMX_BUFF_MASK;
-	else
+		ww = (ww - 1) & CMX_BUFF_MASK; /* end one byte prior tx_R */
+		if (dsp_debug & DEBUG_DSP_CLOCK)
+			printk(KERN_DEBUG "%s: TX overflow space=%d skb->len="
+			    "%d, w=0x%04x, ww=0x%04x\n", __func__, space,
+			    skb->len, w, ww);
+	} else
 		/* write until all byte are copied */
 		ww = (w + skb->len) & CMX_BUFF_MASK;
 	dsp->tx_W = ww;
@@ -1857,7 +1893,7 @@
 		/* in case of hardware (echo) */
 		if (dsp->pcm_slot_tx >= 0)
 			return;
-		if (dsp->echo)
+		if (dsp->echo) {
 			nskb = skb_clone(skb, GFP_ATOMIC);
 			if (nskb) {
 				hh = mISDN_HEAD_P(nskb);
@@ -1866,6 +1902,7 @@
 				skb_queue_tail(&dsp->sendq, nskb);
 				schedule_work(&dsp->workq);
 			}
+		}
 		return;
 	}
 	/* in case of hardware conference */
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 1dc21d8..3083338 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -191,6 +191,8 @@
 	struct mISDN_ctrl_req	cq;
 	int rx_off = 1;
 
+	memset(&cq, 0, sizeof(cq));
+
 	if (!dsp->features_rx_off)
 		return;
 
@@ -249,6 +251,32 @@
 	}
 }
 
+/* enable "fill empty" feature */
+static void
+dsp_fill_empty(struct dsp *dsp)
+{
+	struct mISDN_ctrl_req	cq;
+
+	memset(&cq, 0, sizeof(cq));
+
+	if (!dsp->ch.peer) {
+		if (dsp_debug & DEBUG_DSP_CORE)
+			printk(KERN_DEBUG "%s: no peer, no fill_empty\n",
+				__func__);
+		return;
+	}
+	cq.op = MISDN_CTRL_FILL_EMPTY;
+	cq.p1 = 1;
+	if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) {
+		printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
+			__func__);
+		return;
+	}
+	if (dsp_debug & DEBUG_DSP_CORE)
+		printk(KERN_DEBUG "%s: %s set fill_empty = 1\n",
+			__func__, dsp->name);
+}
+
 static int
 dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
 {
@@ -273,8 +301,9 @@
 		if (dsp_debug & DEBUG_DSP_CORE)
 			printk(KERN_DEBUG "%s: start dtmf\n", __func__);
 		if (len == sizeof(int)) {
-			printk(KERN_NOTICE "changing DTMF Threshold "
-				"to %d\n", *((int *)data));
+			if (dsp_debug & DEBUG_DSP_CORE)
+				printk(KERN_NOTICE "changing DTMF Threshold "
+					"to %d\n", *((int *)data));
 			dsp->dtmf.treshold = (*(int *)data) * 10000;
 		}
 		/* init goertzel */
@@ -593,8 +622,6 @@
 	struct dsp		*dsp = container_of(ch, struct dsp, ch);
 	struct mISDN_ctrl_req	cq;
 
-	if (dsp_options & DSP_OPT_NOHARDWARE)
-		return;
 	if (!ch->peer) {
 		if (dsp_debug & DEBUG_DSP_CORE)
 			printk(KERN_DEBUG "%s: no peer, no features\n",
@@ -610,6 +637,10 @@
 	}
 	if (cq.op & MISDN_CTRL_RX_OFF)
 		dsp->features_rx_off = 1;
+	if (cq.op & MISDN_CTRL_FILL_EMPTY)
+		dsp->features_fill_empty = 1;
+	if (dsp_options & DSP_OPT_NOHARDWARE)
+		return;
 	if ((cq.op & MISDN_CTRL_HW_FEATURES_OP)) {
 		cq.op = MISDN_CTRL_HW_FEATURES;
 		*((u_long *)&cq.p1) = (u_long)&dsp->features;
@@ -837,11 +868,14 @@
 		}
 		if (dsp->hdlc) {
 			/* hdlc */
-			spin_lock_irqsave(&dsp_lock, flags);
-			if (dsp->b_active) {
-				skb_queue_tail(&dsp->sendq, skb);
-				schedule_work(&dsp->workq);
+			if (!dsp->b_active) {
+				ret = -EIO;
+				break;
 			}
+			hh->prim = PH_DATA_REQ;
+			spin_lock_irqsave(&dsp_lock, flags);
+			skb_queue_tail(&dsp->sendq, skb);
+			schedule_work(&dsp->workq);
 			spin_unlock_irqrestore(&dsp_lock, flags);
 			return 0;
 		}
@@ -865,6 +899,9 @@
 		if (dsp->dtmf.hardware || dsp->dtmf.software)
 			dsp_dtmf_goertzel_init(dsp);
 		get_features(ch);
+		/* enable fill_empty feature */
+		if (dsp->features_fill_empty)
+			dsp_fill_empty(dsp);
 		/* send ph_activate */
 		hh->prim = PH_ACTIVATE_REQ;
 		if (ch->peer)
@@ -1105,7 +1142,7 @@
 	} else {
 		poll = 8;
 		while (poll <= MAX_POLL) {
-			tics = poll * HZ / 8000;
+			tics = (poll * HZ) / 8000;
 			if (tics * 8000 == poll * HZ) {
 				dsp_tics = tics;
 				dsp_poll = poll;
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index 83639be..18cf87c 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -75,6 +75,15 @@
 	__ATTR(args, 0444, attr_show_args, NULL),
 };
 
+static void
+mISDN_dsp_dev_release(struct device *dev)
+{
+	struct dsp_element_entry *entry =
+		container_of(dev, struct dsp_element_entry, dev);
+	list_del(&entry->list);
+	kfree(entry);
+}
+
 int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
 {
 	struct dsp_element_entry *entry;
@@ -83,13 +92,14 @@
 	if (!elem)
 		return -EINVAL;
 
-	entry = kzalloc(sizeof(struct dsp_element_entry), GFP_KERNEL);
+	entry = kzalloc(sizeof(struct dsp_element_entry), GFP_ATOMIC);
 	if (!entry)
 		return -ENOMEM;
 
 	entry->elem = elem;
 
 	entry->dev.class = elements_class;
+	entry->dev.release = mISDN_dsp_dev_release;
 	dev_set_drvdata(&entry->dev, elem);
 	dev_set_name(&entry->dev, elem->name);
 	ret = device_register(&entry->dev);
@@ -98,9 +108,9 @@
 			__func__, elem->name);
 		goto err1;
 	}
+	list_add_tail(&entry->list, &dsp_elements);
 
-	for (i = 0; i < (sizeof(element_attributes)
-		/ sizeof(struct device_attribute)); ++i)
+	for (i = 0; i < ARRAY_SIZE(element_attributes); ++i) {
 		ret = device_create_file(&entry->dev,
 				&element_attributes[i]);
 		if (ret) {
@@ -108,15 +118,17 @@
 				__func__);
 			goto err2;
 		}
+	}
 
-	list_add_tail(&entry->list, &dsp_elements);
-
+#ifdef PIPELINE_DEBUG
 	printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name);
+#endif
 
 	return 0;
 
 err2:
 	device_unregister(&entry->dev);
+	return ret;
 err1:
 	kfree(entry);
 	return ret;
@@ -132,11 +144,11 @@
 
 	list_for_each_entry_safe(entry, n, &dsp_elements, list)
 		if (entry->elem == elem) {
-			list_del(&entry->list);
 			device_unregister(&entry->dev);
-			kfree(entry);
+#ifdef PIPELINE_DEBUG
 			printk(KERN_DEBUG "%s: %s unregistered\n",
 				__func__, elem->name);
+#endif
 			return;
 		}
 	printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name);
@@ -173,7 +185,9 @@
 		kfree(entry);
 	}
 
+#ifdef PIPELINE_DEBUG
 	printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__);
+#endif
 }
 
 int dsp_pipeline_init(struct dsp_pipeline *pipeline)
@@ -239,7 +253,7 @@
 	if (!len)
 		return 0;
 
-	dup = kmalloc(len + 1, GFP_KERNEL);
+	dup = kmalloc(len + 1, GFP_ATOMIC);
 	if (!dup)
 		return 0;
 	strcpy(dup, cfg);
@@ -256,9 +270,9 @@
 				elem = entry->elem;
 
 				pipeline_entry = kmalloc(sizeof(struct
-					dsp_pipeline_entry), GFP_KERNEL);
+					dsp_pipeline_entry), GFP_ATOMIC);
 				if (!pipeline_entry) {
-					printk(KERN_DEBUG "%s: failed to add "
+					printk(KERN_ERR "%s: failed to add "
 					    "entry to pipeline: %s (out of "
 					    "memory)\n", __func__, elem->name);
 					incomplete = 1;
@@ -286,7 +300,7 @@
 						    args : "");
 #endif
 					} else {
-						printk(KERN_DEBUG "%s: failed "
+						printk(KERN_ERR "%s: failed "
 						  "to add entry to pipeline: "
 						  "%s (new() returned NULL)\n",
 						  __func__, elem->name);
@@ -301,7 +315,7 @@
 		if (found)
 			found = 0;
 		else {
-			printk(KERN_DEBUG "%s: element not found, skipping: "
+			printk(KERN_ERR "%s: element not found, skipping: "
 				"%s\n", __func__, name);
 			incomplete = 1;
 		}
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
index 2596fba..ab1168a 100644
--- a/drivers/isdn/mISDN/hwchannel.c
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -50,9 +50,6 @@
 
 	if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
 		while ((skb = skb_dequeue(&bch->rqueue))) {
-			if (bch->rcount >= 64)
-				printk(KERN_WARNING "B-channel %p receive "
-					"queue if full, but empties...\n", bch);
 			bch->rcount--;
 			if (likely(bch->ch.peer)) {
 				err = bch->ch.recv(bch->ch.peer, skb);
@@ -169,6 +166,25 @@
 EXPORT_SYMBOL(recv_Dchannel);
 
 void
+recv_Echannel(struct dchannel *ech, struct dchannel *dch)
+{
+	struct mISDNhead *hh;
+
+	if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
+		dev_kfree_skb(ech->rx_skb);
+		ech->rx_skb = NULL;
+		return;
+	}
+	hh = mISDN_HEAD_P(ech->rx_skb);
+	hh->prim = PH_DATA_E_IND;
+	hh->id = get_sapi_tei(ech->rx_skb->data);
+	skb_queue_tail(&dch->rqueue, ech->rx_skb);
+	ech->rx_skb = NULL;
+	schedule_event(dch, FLG_RECVQUEUE);
+}
+EXPORT_SYMBOL(recv_Echannel);
+
+void
 recv_Bchannel(struct bchannel *bch)
 {
 	struct mISDNhead *hh;
@@ -177,8 +193,10 @@
 	hh->prim = PH_DATA_IND;
 	hh->id = MISDN_ID_ANY;
 	if (bch->rcount >= 64) {
-		dev_kfree_skb(bch->rx_skb);
-		bch->rx_skb = NULL;
+		printk(KERN_WARNING "B-channel %p receive queue overflow, "
+			"fushing!\n", bch);
+		skb_queue_purge(&bch->rqueue);
+		bch->rcount = 0;
 		return;
 	}
 	bch->rcount++;
@@ -200,8 +218,10 @@
 recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
 {
 	if (bch->rcount >= 64) {
-		dev_kfree_skb(skb);
-		return;
+		printk(KERN_WARNING "B-channel %p receive queue overflow, "
+			"fushing!\n", bch);
+		skb_queue_purge(&bch->rqueue);
+		bch->rcount = 0;
 	}
 	bch->rcount++;
 	skb_queue_tail(&bch->rqueue, skb);
@@ -245,8 +265,12 @@
 {
 	struct sk_buff	*skb;
 
-	if (bch->rcount >= 64)
-		return;
+	if (bch->rcount >= 64) {
+		printk(KERN_WARNING "B-channel %p receive queue overflow, "
+			"fushing!\n", bch);
+		skb_queue_purge(&bch->rqueue);
+		bch->rcount = 0;
+	}
 	skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
 	    0, NULL, GFP_ATOMIC);
 	if (!skb) {
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 0884dd6..abe5749 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -777,6 +777,8 @@
 static void
 l1oip_socket_close(struct l1oip *hc)
 {
+	struct dchannel *dch = hc->chan[hc->d_idx].dch;
+
 	/* kill thread */
 	if (hc->socket_thread) {
 		if (debug & DEBUG_L1OIP_SOCKET)
@@ -785,6 +787,16 @@
 		send_sig(SIGTERM, hc->socket_thread, 0);
 		wait_for_completion(&hc->socket_complete);
 	}
+
+	/* if active, we send up a PH_DEACTIVATE and deactivate */
+	if (test_bit(FLG_ACTIVE, &dch->Flags)) {
+		if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
+			printk(KERN_DEBUG "%s: interface become deactivated "
+				"due to timeout\n", __func__);
+		test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
+		_queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
+			NULL, GFP_ATOMIC);
+	}
 }
 
 static int
@@ -944,7 +956,8 @@
 
 	switch (cq->op) {
 	case MISDN_CTRL_GETOP:
-		cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER;
+		cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER
+			| MISDN_CTRL_GETPEER;
 		break;
 	case MISDN_CTRL_SETPEER:
 		hc->remoteip = (u32)cq->p1;
@@ -964,6 +977,13 @@
 		hc->remoteip = 0;
 		l1oip_socket_open(hc);
 		break;
+	case MISDN_CTRL_GETPEER:
+		if (debug & DEBUG_L1OIP_SOCKET)
+			printk(KERN_DEBUG "%s: getting ip address.\n",
+				__func__);
+		cq->p1 = hc->remoteip;
+		cq->p2 = hc->remoteport | (hc->localport << 16);
+		break;
 	default:
 		printk(KERN_WARNING "%s: unknown Op %x\n",
 		    __func__, cq->op);
@@ -1413,7 +1433,8 @@
 		hc->chan[i + ch].bch = bch;
 		set_channelmap(bch->nr, dch->dev.channelmap);
 	}
-	ret = mISDN_register_device(&dch->dev, hc->name);
+	/* TODO: create a parent device for this driver */
+	ret = mISDN_register_device(&dch->dev, NULL, hc->name);
 	if (ret)
 		return ret;
 	hc->registered = 1;
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index b73e952..e826eeb 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -101,7 +101,7 @@
 	va_list va;
 
 	va_start(va, fmt);
-	printk(KERN_DEBUG "%s: ", l1->dch->dev.name);
+	printk(KERN_DEBUG "%s: ", dev_name(&l1->dch->dev.dev));
 	vprintk(fmt, va);
 	printk("\n");
 	va_end(va);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 37a2de1..508945d 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -381,7 +381,7 @@
 			memcpy(di.channelmap, dev->channelmap,
 				sizeof(di.channelmap));
 			di.nrbchan = dev->nrbchan;
-			strcpy(di.name, dev->name);
+			strcpy(di.name, dev_name(&dev->dev));
 			if (copy_to_user((void __user *)arg, &di, sizeof(di)))
 				err = -EFAULT;
 		} else
@@ -460,6 +460,8 @@
 {
 	struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
 	struct sock *sk = sock->sk;
+	struct hlist_node *node;
+	struct sock *csk;
 	int err = 0;
 
 	if (*debug & DEBUG_SOCKET)
@@ -480,6 +482,26 @@
 		err = -ENODEV;
 		goto done;
 	}
+
+	if (sk->sk_protocol < ISDN_P_B_START) {
+		read_lock_bh(&data_sockets.lock);
+		sk_for_each(csk, node, &data_sockets.head) {
+			if (sk == csk)
+				continue;
+			if (_pms(csk)->dev != _pms(sk)->dev)
+				continue;
+			if (csk->sk_protocol >= ISDN_P_B_START)
+				continue;
+			if (IS_ISDN_P_TE(csk->sk_protocol)
+					== IS_ISDN_P_TE(sk->sk_protocol))
+				continue;
+			read_unlock_bh(&data_sockets.lock);
+			err = -EBUSY;
+			goto done;
+		}
+		read_unlock_bh(&data_sockets.lock);
+	}
+
 	_pms(sk)->ch.send = mISDN_send;
 	_pms(sk)->ch.ctrl = mISDN_ctrl;
 
@@ -639,12 +661,27 @@
 			memcpy(di.channelmap, dev->channelmap,
 				sizeof(di.channelmap));
 			di.nrbchan = dev->nrbchan;
-			strcpy(di.name, dev->name);
+			strcpy(di.name, dev_name(&dev->dev));
 			if (copy_to_user((void __user *)arg, &di, sizeof(di)))
 				err = -EFAULT;
 		} else
 			err = -ENODEV;
 		break;
+	case IMSETDEVNAME:
+		{
+			struct mISDN_devrename dn;
+			if (copy_from_user(&dn, (void __user *)arg,
+			    sizeof(dn))) {
+				err = -EFAULT;
+				break;
+			}
+			dev = get_mdevice(dn.id);
+			if (dev)
+				err = device_rename(&dev->dev, dn.name);
+			else
+				err = -ENODEV;
+		}
+		break;
 	default:
 		err = -EINVAL;
 	}
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index d55b14a..e2f4501 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -172,7 +172,8 @@
 		else
 			printk(KERN_WARNING
 			    "%s: dev(%s) prim(%x) id(%x) no channel\n",
-			    __func__, st->dev->name, hh->prim, hh->id);
+			    __func__, dev_name(&st->dev->dev), hh->prim,
+			    hh->id);
 	} else if (lm == 0x8) {
 		WARN_ON(lm == 0x8);
 		ch = get_channel4id(st, hh->id);
@@ -181,11 +182,12 @@
 		else
 			printk(KERN_WARNING
 			    "%s: dev(%s) prim(%x) id(%x) no channel\n",
-			    __func__, st->dev->name, hh->prim, hh->id);
+			    __func__, dev_name(&st->dev->dev), hh->prim,
+			    hh->id);
 	} else {
 		/* broadcast not handled yet */
 		printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
-		    __func__, st->dev->name, hh->prim);
+		    __func__, dev_name(&st->dev->dev), hh->prim);
 	}
 	return -ESRCH;
 }
@@ -209,7 +211,8 @@
 	unlock_kernel();
 #endif
 	if (*debug & DEBUG_MSG_THREAD)
-		printk(KERN_DEBUG "mISDNStackd %s started\n", st->dev->name);
+		printk(KERN_DEBUG "mISDNStackd %s started\n",
+		    dev_name(&st->dev->dev));
 
 	if (st->notify != NULL) {
 		complete(st->notify);
@@ -245,7 +248,7 @@
 					printk(KERN_DEBUG
 					    "%s: %s prim(%x) id(%x) "
 					    "send call(%d)\n",
-					    __func__, st->dev->name,
+					    __func__, dev_name(&st->dev->dev),
 					    mISDN_HEAD_PRIM(skb),
 					    mISDN_HEAD_ID(skb), err);
 				dev_kfree_skb(skb);
@@ -288,7 +291,7 @@
 		    mISDN_STACK_ACTION_MASK));
 		if (*debug & DEBUG_MSG_THREAD)
 			printk(KERN_DEBUG "%s: %s wake status %08lx\n",
-			    __func__, st->dev->name, st->status);
+			    __func__, dev_name(&st->dev->dev), st->status);
 		test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
 
 		test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
@@ -303,15 +306,16 @@
 #ifdef MISDN_MSG_STATS
 	printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
 	    "msg %d sleep %d stopped\n",
-	    st->dev->name, st->msg_cnt, st->sleep_cnt, st->stopped_cnt);
+	    dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
+	    st->stopped_cnt);
 	printk(KERN_DEBUG
 	    "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
-	    st->dev->name, st->thread->utime, st->thread->stime);
+	    dev_name(&st->dev->dev), st->thread->utime, st->thread->stime);
 	printk(KERN_DEBUG
 	    "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
-	    st->dev->name, st->thread->nvcsw, st->thread->nivcsw);
+	    dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
 	printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
-	    st->dev->name);
+	    dev_name(&st->dev->dev));
 #endif
 	test_and_set_bit(mISDN_STACK_KILLED, &st->status);
 	test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
@@ -401,15 +405,16 @@
 	newst->own.send = mISDN_queue_message;
 	newst->own.recv = mISDN_queue_message;
 	if (*debug & DEBUG_CORE_FUNC)
-		printk(KERN_DEBUG "%s: st(%s)\n", __func__, newst->dev->name);
+		printk(KERN_DEBUG "%s: st(%s)\n", __func__,
+		    dev_name(&newst->dev->dev));
 	newst->notify = &done;
 	newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
-		newst->dev->name);
+		dev_name(&newst->dev->dev));
 	if (IS_ERR(newst->thread)) {
 		err = PTR_ERR(newst->thread);
 		printk(KERN_ERR
 			"mISDN:cannot create kernel thread for %s (%d)\n",
-			newst->dev->name, err);
+			dev_name(&newst->dev->dev), err);
 		delete_teimanager(dev->teimgr);
 		kfree(newst);
 	} else
@@ -428,29 +433,21 @@
 
 	if (*debug &  DEBUG_CORE_FUNC)
 		printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
-			__func__, dev->name, protocol, adr->dev, adr->channel,
-			 adr->sapi, adr->tei);
+			__func__, dev_name(&dev->dev), protocol, adr->dev,
+			adr->channel, adr->sapi, adr->tei);
 	switch (protocol) {
 	case ISDN_P_NT_S0:
 	case ISDN_P_NT_E1:
 	case ISDN_P_TE_S0:
 	case ISDN_P_TE_E1:
-#ifdef PROTOCOL_CHECK
-		/* this should be enhanced */
-		if (!list_empty(&dev->D.st->layer2)
-			&& dev->D.protocol != protocol)
-			return -EBUSY;
-		if (!hlist_empty(&dev->D.st->l1sock.head)
-			&& dev->D.protocol != protocol)
-			return -EBUSY;
-#endif
 		ch->recv = mISDN_queue_message;
 		ch->peer = &dev->D.st->own;
 		ch->st = dev->D.st;
 		rq.protocol = protocol;
-		rq.adr.channel = 0;
+		rq.adr.channel = adr->channel;
 		err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
-		printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
+		printk(KERN_DEBUG "%s: ret %d (dev %d)\n", __func__, err,
+			dev->id);
 		if (err)
 			return err;
 		write_lock_bh(&dev->D.st->l1sock.lock);
@@ -473,7 +470,7 @@
 
 	if (*debug &  DEBUG_CORE_FUNC)
 		printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
-			__func__, dev->name, protocol,
+			__func__, dev_name(&dev->dev), protocol,
 			adr->dev, adr->channel, adr->sapi,
 			adr->tei);
 	ch->st = dev->D.st;
@@ -529,7 +526,7 @@
 
 	if (*debug &  DEBUG_CORE_FUNC)
 		printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
-			__func__, dev->name, protocol,
+			__func__, dev_name(&dev->dev), protocol,
 			adr->dev, adr->channel, adr->sapi,
 			adr->tei);
 	rq.protocol = ISDN_P_TE_S0;
@@ -541,15 +538,6 @@
 		if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
 			rq.protocol = ISDN_P_NT_E1;
 	case ISDN_P_LAPD_TE:
-#ifdef PROTOCOL_CHECK
-		/* this should be enhanced */
-		if (!list_empty(&dev->D.st->layer2)
-			&& dev->D.protocol != protocol)
-			return -EBUSY;
-		if (!hlist_empty(&dev->D.st->l1sock.head)
-			&& dev->D.protocol != protocol)
-			return -EBUSY;
-#endif
 		ch->recv = mISDN_queue_message;
 		ch->peer = &dev->D.st->own;
 		ch->st = dev->D.st;
@@ -590,7 +578,7 @@
 	}
 	if (*debug & DEBUG_CORE_FUNC)
 		printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
-		    ch->st->dev->name, ch->protocol);
+		    dev_name(&ch->st->dev->dev), ch->protocol);
 	if (ch->protocol >= ISDN_P_B_START) {
 		if (ch->peer) {
 			ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
@@ -643,7 +631,7 @@
 
 	if (*debug & DEBUG_CORE_FUNC)
 		printk(KERN_DEBUG "%s: st(%s)\n", __func__,
-		    st->dev->name);
+		    dev_name(&st->dev->dev));
 	if (dev->teimgr)
 		delete_teimanager(dev->teimgr);
 	if (st->thread) {
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index 5c43d19..b452dea 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -968,9 +968,9 @@
 
 	if (*debug & DEBUG_L2_TEI)
 		printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
-			__func__, mgr->ch.st->dev->name, crq->protocol,
-			crq->adr.dev, crq->adr.channel, crq->adr.sapi,
-			crq->adr.tei);
+			__func__, dev_name(&mgr->ch.st->dev->dev),
+			crq->protocol, crq->adr.dev, crq->adr.channel,
+			crq->adr.sapi, crq->adr.tei);
 	if (crq->adr.sapi != 0) /* not supported yet */
 		return -EINVAL;
 	if (crq->adr.tei > GROUP_TEI)
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index e7fb7d2..7427136 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -63,6 +63,12 @@
 	help
 	  This option enables support for the PCEngines WRAP programmable LEDs.
 
+config LEDS_ALIX2
+	tristate "LED Support for ALIX.2 and ALIX.3 series"
+	depends on LEDS_CLASS && X86 && EXPERIMENTAL
+	help
+	  This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
+
 config LEDS_H1940
 	tristate "LED Support for iPAQ H1940 device"
 	depends on LEDS_CLASS && ARCH_H1940
@@ -77,7 +83,7 @@
 
 config LEDS_COBALT_RAQ
 	bool "LED Support for the Cobalt Raq series"
-	depends on LEDS_CLASS && MIPS_COBALT
+	depends on LEDS_CLASS=y && MIPS_COBALT
 	select LEDS_TRIGGERS
 	help
 	  This option enables support for the Cobalt Raq series LEDs.
@@ -113,13 +119,6 @@
 	  outputs. To be useful the particular board must have LEDs
 	  and they must be connected to the GPIO lines.
 
-config LEDS_HP_DISK
-	tristate "LED Support for disk protection LED on HP notebooks"
-	depends on LEDS_CLASS && ACPI
-	help
-	  This option enable support for disk protection LED, found on
-	  newer HP notebooks.
-
 config LEDS_CLEVO_MAIL
 	tristate "Mail LED on Clevo notebook (EXPERIMENTAL)"
 	depends on LEDS_CLASS && X86 && SERIO_I8042 && DMI && EXPERIMENTAL
@@ -158,6 +157,13 @@
 	  LED driver chips accessed via the I2C bus.  Supported
 	  devices include PCA9550, PCA9551, PCA9552, and PCA9553.
 
+config LEDS_WM8350
+	tristate "LED Support for WM8350 AudioPlus PMIC"
+	depends on LEDS_CLASS && MFD_WM8350
+	help
+	  This option enables support for LEDs driven by the Wolfson
+	  Microelectronics WM8350 AudioPlus PMIC.
+
 config LEDS_DA903X
 	tristate "LED Support for DA9030/DA9034 PMIC"
 	depends on LEDS_CLASS && PMIC_DA903X
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e1967a2..9d76f0f 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -11,6 +11,7 @@
 obj-$(CONFIG_LEDS_AMS_DELTA)		+= leds-ams-delta.o
 obj-$(CONFIG_LEDS_NET48XX)		+= leds-net48xx.o
 obj-$(CONFIG_LEDS_WRAP)			+= leds-wrap.o
+obj-$(CONFIG_LEDS_ALIX2)		+= leds-alix2.o
 obj-$(CONFIG_LEDS_H1940)		+= leds-h1940.o
 obj-$(CONFIG_LEDS_COBALT_QUBE)		+= leds-cobalt-qube.o
 obj-$(CONFIG_LEDS_COBALT_RAQ)		+= leds-cobalt-raq.o
@@ -22,7 +23,7 @@
 obj-$(CONFIG_LEDS_FSG)			+= leds-fsg.o
 obj-$(CONFIG_LEDS_PCA955X)		+= leds-pca955x.o
 obj-$(CONFIG_LEDS_DA903X)		+= leds-da903x.o
-obj-$(CONFIG_LEDS_HP_DISK)		+= leds-hp-disk.o
+obj-$(CONFIG_LEDS_WM8350)		+= leds-wm8350.o
 
 # LED Triggers
 obj-$(CONFIG_LEDS_TRIGGER_TIMER)	+= ledtrig-timer.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 6c4a326..52f82e3 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -91,9 +91,29 @@
 }
 EXPORT_SYMBOL_GPL(led_classdev_resume);
 
+static int led_suspend(struct device *dev, pm_message_t state)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+	if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
+		led_classdev_suspend(led_cdev);
+
+	return 0;
+}
+
+static int led_resume(struct device *dev)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+	if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
+		led_classdev_resume(led_cdev);
+
+	return 0;
+}
+
 /**
  * led_classdev_register - register a new object of led_classdev class.
- * @dev: The device to register.
+ * @parent: The device to register.
  * @led_cdev: the led_classdev structure for this device.
  */
 int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
@@ -174,6 +194,8 @@
 	leds_class = class_create(THIS_MODULE, "leds");
 	if (IS_ERR(leds_class))
 		return PTR_ERR(leds_class);
+	leds_class->suspend = led_suspend;
+	leds_class->resume = led_resume;
 	return 0;
 }
 
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
new file mode 100644
index 0000000..ddbd773
--- /dev/null
+++ b/drivers/leds/leds-alix2.c
@@ -0,0 +1,181 @@
+/*
+ * LEDs driver for PCEngines ALIX.2 and ALIX.3
+ *
+ * Copyright (C) 2008 Constantin Baranov <const@mimas.ru>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+static int force = 0;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs");
+
+struct alix_led {
+	struct led_classdev cdev;
+	unsigned short port;
+	unsigned int on_value;
+	unsigned int off_value;
+};
+
+static void alix_led_set(struct led_classdev *led_cdev,
+			 enum led_brightness brightness)
+{
+	struct alix_led *led_dev =
+		container_of(led_cdev, struct alix_led, cdev);
+
+	if (brightness)
+		outl(led_dev->on_value, led_dev->port);
+	else
+		outl(led_dev->off_value, led_dev->port);
+}
+
+static struct alix_led alix_leds[] = {
+	{
+		.cdev = {
+			.name = "alix:1",
+			.brightness_set = alix_led_set,
+		},
+		.port = 0x6100,
+		.on_value = 1 << 22,
+		.off_value = 1 << 6,
+	},
+	{
+		.cdev = {
+			.name = "alix:2",
+			.brightness_set = alix_led_set,
+		},
+		.port = 0x6180,
+		.on_value = 1 << 25,
+		.off_value = 1 << 9,
+	},
+	{
+		.cdev = {
+			.name = "alix:3",
+			.brightness_set = alix_led_set,
+		},
+		.port = 0x6180,
+		.on_value = 1 << 27,
+		.off_value = 1 << 11,
+	},
+};
+
+static int __init alix_led_probe(struct platform_device *pdev)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(alix_leds); i++) {
+		alix_leds[i].cdev.flags |= LED_CORE_SUSPENDRESUME;
+		ret = led_classdev_register(&pdev->dev, &alix_leds[i].cdev);
+		if (ret < 0)
+			goto fail;
+	}
+	return 0;
+
+fail:
+	while (--i >= 0)
+		led_classdev_unregister(&alix_leds[i].cdev);
+	return ret;
+}
+
+static int alix_led_remove(struct platform_device *pdev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(alix_leds); i++)
+		led_classdev_unregister(&alix_leds[i].cdev);
+	return 0;
+}
+
+static struct platform_driver alix_led_driver = {
+	.remove = alix_led_remove,
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init alix_present(void)
+{
+	const unsigned long bios_phys = 0x000f0000;
+	const size_t bios_len = 0x00010000;
+	const char alix_sig[] = "PC Engines ALIX.";
+	const size_t alix_sig_len = sizeof(alix_sig) - 1;
+
+	const char *bios_virt;
+	const char *scan_end;
+	const char *p;
+	int ret = 0;
+
+	if (force) {
+		printk(KERN_NOTICE "%s: forced to skip BIOS test, "
+		       "assume system has ALIX.2 style LEDs\n",
+		       KBUILD_MODNAME);
+		ret = 1;
+		goto out;
+	}
+
+	bios_virt = phys_to_virt(bios_phys);
+	scan_end = bios_virt + bios_len - (alix_sig_len + 2);
+	for (p = bios_virt; p < scan_end; p++) {
+		const char *tail;
+
+		if (memcmp(p, alix_sig, alix_sig_len) != 0) {
+			continue;
+		}
+
+		tail = p + alix_sig_len;
+		if ((tail[0] == '2' || tail[0] == '3') && tail[1] == '\0') {
+			printk(KERN_INFO
+			       "%s: system is recognized as \"%s\"\n",
+			       KBUILD_MODNAME, p);
+			ret = 1;
+			break;
+		}
+	}
+
+out:
+	return ret;
+}
+
+static struct platform_device *pdev;
+
+static int __init alix_led_init(void)
+{
+	int ret;
+
+	if (!alix_present()) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
+	if (!IS_ERR(pdev)) {
+		ret = platform_driver_probe(&alix_led_driver, alix_led_probe);
+		if (ret)
+			platform_device_unregister(pdev);
+	} else
+		ret = PTR_ERR(pdev);
+
+out:
+	return ret;
+}
+
+static void __exit alix_led_exit(void)
+{
+	platform_device_unregister(pdev);
+	platform_driver_unregister(&alix_led_driver);
+}
+
+module_init(alix_led_init);
+module_exit(alix_led_exit);
+
+MODULE_AUTHOR("Constantin Baranov <const@mimas.ru>");
+MODULE_DESCRIPTION("PCEngines ALIX.2 and ALIX.3 LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index 1bd590b..4460507 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -79,37 +79,12 @@
 	},
 };
 
-#ifdef CONFIG_PM
-static int ams_delta_led_suspend(struct platform_device *dev,
-		pm_message_t state)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
-		led_classdev_suspend(&ams_delta_leds[i].cdev);
-
-	return 0;
-}
-
-static int ams_delta_led_resume(struct platform_device *dev)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
-		led_classdev_resume(&ams_delta_leds[i].cdev);
-
-	return 0;
-}
-#else
-#define ams_delta_led_suspend NULL
-#define ams_delta_led_resume NULL
-#endif
-
 static int ams_delta_led_probe(struct platform_device *pdev)
 {
 	int i, ret;
 
 	for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) {
+		ams_delta_leds[i].cdev.flags |= LED_CORE_SUSPENDRESUME;
 		ret = led_classdev_register(&pdev->dev,
 				&ams_delta_leds[i].cdev);
 		if (ret < 0)
@@ -127,7 +102,7 @@
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i--)
+	for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
 		led_classdev_unregister(&ams_delta_leds[i].cdev);
 
 	return 0;
@@ -136,8 +111,6 @@
 static struct platform_driver ams_delta_led_driver = {
 	.probe		= ams_delta_led_probe,
 	.remove		= ams_delta_led_remove,
-	.suspend	= ams_delta_led_suspend,
-	.resume		= ams_delta_led_resume,
 	.driver		= {
 		.name = "ams-delta-led",
 		.owner = THIS_MODULE,
@@ -151,7 +124,7 @@
 
 static void __exit ams_delta_led_exit(void)
 {
-	return platform_driver_unregister(&ams_delta_led_driver);
+	platform_driver_unregister(&ams_delta_led_driver);
 }
 
 module_init(ams_delta_led_init);
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index eb3415e..1813c84 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -142,6 +142,7 @@
 	.name			= "clevo::mail",
 	.brightness_set		= clevo_mail_led_set,
 	.blink_set		= clevo_mail_led_blink,
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
 static int __init clevo_mail_led_probe(struct platform_device *pdev)
@@ -155,29 +156,9 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int clevo_mail_led_suspend(struct platform_device *dev,
-				  pm_message_t state)
-{
-	led_classdev_suspend(&clevo_mail_led);
-	return 0;
-}
-
-static int clevo_mail_led_resume(struct platform_device *dev)
-{
-	led_classdev_resume(&clevo_mail_led);
-	return 0;
-}
-#else
-#define clevo_mail_led_suspend    NULL
-#define clevo_mail_led_resume     NULL
-#endif
-
 static struct platform_driver clevo_mail_led_driver = {
 	.probe		= clevo_mail_led_probe,
 	.remove		= clevo_mail_led_remove,
-	.suspend	= clevo_mail_led_suspend,
-	.resume		= clevo_mail_led_resume,
 	.driver		= {
 		.name		= KBUILD_MODNAME,
 		.owner		= THIS_MODULE,
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index 3493515..5f7c9c5 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -99,64 +99,43 @@
 }
 
 
-
 static struct led_classdev fsg_wlan_led = {
 	.name			= "fsg:blue:wlan",
 	.brightness_set		= fsg_led_wlan_set,
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev fsg_wan_led = {
 	.name			= "fsg:blue:wan",
 	.brightness_set		= fsg_led_wan_set,
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev fsg_sata_led = {
 	.name			= "fsg:blue:sata",
 	.brightness_set		= fsg_led_sata_set,
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev fsg_usb_led = {
 	.name			= "fsg:blue:usb",
 	.brightness_set		= fsg_led_usb_set,
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev fsg_sync_led = {
 	.name			= "fsg:blue:sync",
 	.brightness_set		= fsg_led_sync_set,
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev fsg_ring_led = {
 	.name			= "fsg:blue:ring",
 	.brightness_set		= fsg_led_ring_set,
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
 
-
-#ifdef CONFIG_PM
-static int fsg_led_suspend(struct platform_device *dev, pm_message_t state)
-{
-	led_classdev_suspend(&fsg_wlan_led);
-	led_classdev_suspend(&fsg_wan_led);
-	led_classdev_suspend(&fsg_sata_led);
-	led_classdev_suspend(&fsg_usb_led);
-	led_classdev_suspend(&fsg_sync_led);
-	led_classdev_suspend(&fsg_ring_led);
-	return 0;
-}
-
-static int fsg_led_resume(struct platform_device *dev)
-{
-	led_classdev_resume(&fsg_wlan_led);
-	led_classdev_resume(&fsg_wan_led);
-	led_classdev_resume(&fsg_sata_led);
-	led_classdev_resume(&fsg_usb_led);
-	led_classdev_resume(&fsg_sync_led);
-	led_classdev_resume(&fsg_ring_led);
-	return 0;
-}
-#endif
-
-
 static int fsg_led_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -232,10 +211,6 @@
 static struct platform_driver fsg_led_driver = {
 	.probe		= fsg_led_probe,
 	.remove		= fsg_led_remove,
-#ifdef CONFIG_PM
-	.suspend	= fsg_led_suspend,
-	.resume		= fsg_led_resume,
-#endif
 	.driver		= {
 		.name		= "fsg-led",
 	},
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index b13bd29..2e3df08 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -105,6 +105,7 @@
 		}
 		led_dat->cdev.brightness_set = gpio_led_set;
 		led_dat->cdev.brightness = LED_OFF;
+		led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
 
 		gpio_direction_output(led_dat->gpio, led_dat->active_low);
 
@@ -154,44 +155,9 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int gpio_led_suspend(struct platform_device *pdev, pm_message_t state)
-{
-	struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
-	struct gpio_led_data *leds_data;
-	int i;
-
-	leds_data = platform_get_drvdata(pdev);
-
-	for (i = 0; i < pdata->num_leds; i++)
-		led_classdev_suspend(&leds_data[i].cdev);
-
-	return 0;
-}
-
-static int gpio_led_resume(struct platform_device *pdev)
-{
-	struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
-	struct gpio_led_data *leds_data;
-	int i;
-
-	leds_data = platform_get_drvdata(pdev);
-
-	for (i = 0; i < pdata->num_leds; i++)
-		led_classdev_resume(&leds_data[i].cdev);
-
-	return 0;
-}
-#else
-#define gpio_led_suspend NULL
-#define gpio_led_resume NULL
-#endif
-
 static struct platform_driver gpio_led_driver = {
 	.probe		= gpio_led_probe,
 	.remove		= __devexit_p(gpio_led_remove),
-	.suspend	= gpio_led_suspend,
-	.resume		= gpio_led_resume,
 	.driver		= {
 		.name	= "leds-gpio",
 		.owner	= THIS_MODULE,
diff --git a/drivers/leds/leds-hp-disk.c b/drivers/leds/leds-hp-disk.c
deleted file mode 100644
index 44fa757..0000000
--- a/drivers/leds/leds-hp-disk.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- *  leds-hp-disk.c - driver for HP "hard disk protection" LED
- *
- *  Copyright (C) 2008 Pavel Machek
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/dmi.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/kthread.h>
-#include <linux/leds.h>
-#include <acpi/acpi_drivers.h>
-
-#define DRIVER_NAME     "leds-hp-disk"
-#define ACPI_MDPS_CLASS "led"
-
-/* For automatic insertion of the module */
-static struct acpi_device_id hpled_device_ids[] = {
-	{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
-	{"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, hpled_device_ids);
-
-struct acpi_hpled {
-	struct acpi_device	*device;   /* The ACPI device */
-};
-
-static struct acpi_hpled adev;
-
-static acpi_status hpled_acpi_write(acpi_handle handle, int reg)
-{
-	unsigned long long ret; /* Not used when writing */
-	union acpi_object in_obj[1];
-	struct acpi_object_list args = { 1, in_obj };
-
-	in_obj[0].type          = ACPI_TYPE_INTEGER;
-	in_obj[0].integer.value = reg;
-
-	return acpi_evaluate_integer(handle, "ALED", &args, &ret);
-}
-
-static void hpled_set(struct led_classdev *led_cdev,
-			       enum led_brightness value)
-{
-	hpled_acpi_write(adev.device->handle, !!value);
-}
-
-static struct led_classdev hpled_led = {
-	.name			= "hp:red:hddprotection",
-	.default_trigger	= "heartbeat",
-	.brightness_set		= hpled_set,
-};
-
-#ifdef CONFIG_PM
-static int hpled_suspend(struct acpi_device *dev, pm_message_t state)
-{
-	led_classdev_suspend(&hpled_led);
-	return 0;
-}
-
-static int hpled_resume(struct acpi_device *dev)
-{
-	led_classdev_resume(&hpled_led);
-	return 0;
-}
-#else
-#define hpled_suspend NULL
-#define hpled_resume NULL
-#endif
-
-static int hpled_add(struct acpi_device *device)
-{
-	int ret;
-
-	if (!device)
-		return -EINVAL;
-
-	adev.device = device;
-	strcpy(acpi_device_name(device), DRIVER_NAME);
-	strcpy(acpi_device_class(device), ACPI_MDPS_CLASS);
-	device->driver_data = &adev;
-
-	ret = led_classdev_register(NULL, &hpled_led);
-	return ret;
-}
-
-static int hpled_remove(struct acpi_device *device, int type)
-{
-	if (!device)
-		return -EINVAL;
-
-	led_classdev_unregister(&hpled_led);
-	return 0;
-}
-
-
-
-static struct acpi_driver leds_hp_driver = {
-	.name  = DRIVER_NAME,
-	.class = ACPI_MDPS_CLASS,
-	.ids   = hpled_device_ids,
-	.ops = {
-		.add     = hpled_add,
-		.remove  = hpled_remove,
-		.suspend = hpled_suspend,
-		.resume  = hpled_resume,
-	}
-};
-
-static int __init hpled_init_module(void)
-{
-	int ret;
-
-	if (acpi_disabled)
-		return -ENODEV;
-
-	ret = acpi_bus_register_driver(&leds_hp_driver);
-	if (ret < 0)
-		return ret;
-
-	printk(KERN_INFO DRIVER_NAME " driver loaded.\n");
-
-	return 0;
-}
-
-static void __exit hpled_exit_module(void)
-{
-	acpi_bus_unregister_driver(&leds_hp_driver);
-}
-
-MODULE_DESCRIPTION("Driver for HP disk protection LED");
-MODULE_AUTHOR("Pavel Machek <pavel@suse.cz>");
-MODULE_LICENSE("GPL");
-
-module_init(hpled_init_module);
-module_exit(hpled_exit_module);
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index e8fb1ba..e4ce1fd 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -45,30 +45,16 @@
 	.name			= "hp6xx:red",
 	.default_trigger	= "hp6xx-charge",
 	.brightness_set		= hp6xxled_red_set,
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev hp6xx_green_led = {
 	.name			= "hp6xx:green",
 	.default_trigger	= "ide-disk",
 	.brightness_set		= hp6xxled_green_set,
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
-#ifdef CONFIG_PM
-static int hp6xxled_suspend(struct platform_device *dev, pm_message_t state)
-{
-	led_classdev_suspend(&hp6xx_red_led);
-	led_classdev_suspend(&hp6xx_green_led);
-	return 0;
-}
-
-static int hp6xxled_resume(struct platform_device *dev)
-{
-	led_classdev_resume(&hp6xx_red_led);
-	led_classdev_resume(&hp6xx_green_led);
-	return 0;
-}
-#endif
-
 static int hp6xxled_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -98,10 +84,6 @@
 static struct platform_driver hp6xxled_driver = {
 	.probe		= hp6xxled_probe,
 	.remove		= hp6xxled_remove,
-#ifdef CONFIG_PM
-	.suspend	= hp6xxled_suspend,
-	.resume		= hp6xxled_resume,
-#endif
 	.driver		= {
 		.name		= "hp6xx-led",
 		.owner		= THIS_MODULE,
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c
index 0543604..93987a1 100644
--- a/drivers/leds/leds-net48xx.c
+++ b/drivers/leds/leds-net48xx.c
@@ -33,26 +33,9 @@
 static struct led_classdev net48xx_error_led = {
 	.name		= "net48xx::error",
 	.brightness_set	= net48xx_error_led_set,
+	.flags		= LED_CORE_SUSPENDRESUME,
 };
 
-#ifdef CONFIG_PM
-static int net48xx_led_suspend(struct platform_device *dev,
-		pm_message_t state)
-{
-	led_classdev_suspend(&net48xx_error_led);
-	return 0;
-}
-
-static int net48xx_led_resume(struct platform_device *dev)
-{
-	led_classdev_resume(&net48xx_error_led);
-	return 0;
-}
-#else
-#define net48xx_led_suspend NULL
-#define net48xx_led_resume NULL
-#endif
-
 static int net48xx_led_probe(struct platform_device *pdev)
 {
 	return led_classdev_register(&pdev->dev, &net48xx_error_led);
@@ -67,8 +50,6 @@
 static struct platform_driver net48xx_led_driver = {
 	.probe		= net48xx_led_probe,
 	.remove		= net48xx_led_remove,
-	.suspend	= net48xx_led_suspend,
-	.resume		= net48xx_led_resume,
 	.driver		= {
 		.name		= DRVNAME,
 		.owner		= THIS_MODULE,
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 4064d4f..76ec749 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -16,6 +16,7 @@
 #include <linux/leds.h>
 #include <linux/input.h>
 #include <linux/mutex.h>
+#include <linux/workqueue.h>
 #include <linux/leds-pca9532.h>
 
 static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END};
@@ -34,6 +35,7 @@
 	struct pca9532_led leds[16];
 	struct mutex update_lock;
 	struct input_dev    *idev;
+       struct work_struct work;
 	u8 pwm[2];
 	u8 psc[2];
 };
@@ -63,7 +65,7 @@
  * as a compromise we average one pwm to the values requested by all
  * leds that are not ON/OFF.
  * */
-static int pca9532_setpwm(struct i2c_client *client, int pwm, int blink,
+static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
 	enum led_brightness value)
 {
 	int a = 0, b = 0, i = 0;
@@ -84,11 +86,17 @@
 	b = b/a;
 	if (b > 0xFF)
 		return -EINVAL;
-	mutex_lock(&data->update_lock);
 	data->pwm[pwm] = b;
+       data->psc[pwm] = blink;
+       return 0;
+}
+
+static int pca9532_setpwm(struct i2c_client *client, int pwm)
+{
+       struct pca9532_data *data = i2c_get_clientdata(client);
+       mutex_lock(&data->update_lock);
 	i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
 		data->pwm[pwm]);
-	data->psc[pwm] = blink;
 	i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
 		data->psc[pwm]);
 	mutex_unlock(&data->update_lock);
@@ -124,11 +132,11 @@
 		led->state = PCA9532_ON;
 	else {
 		led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
-		err = pca9532_setpwm(led->client, 0, 0, value);
+               err = pca9532_calcpwm(led->client, 0, 0, value);
 		if (err)
 			return; /* XXX: led api doesn't allow error code? */
 	}
-	pca9532_setled(led);
+       schedule_work(&led->work);
 }
 
 static int pca9532_set_blink(struct led_classdev *led_cdev,
@@ -137,6 +145,7 @@
 	struct pca9532_led *led = ldev_to_led(led_cdev);
 	struct i2c_client *client = led->client;
 	int psc;
+       int err = 0;
 
 	if (*delay_on == 0 && *delay_off == 0) {
 	/* led subsystem ask us for a blink rate */
@@ -148,11 +157,15 @@
 
 	/* Thecus specific: only use PSC/PWM 0 */
 	psc = (*delay_on * 152-1)/1000;
-	return pca9532_setpwm(client, 0, psc, led_cdev->brightness);
+       err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
+       if (err)
+               return err;
+       schedule_work(&led->work);
+       return 0;
 }
 
-int pca9532_event(struct input_dev *dev, unsigned int type, unsigned int code,
-	int value)
+static int pca9532_event(struct input_dev *dev, unsigned int type,
+	unsigned int code, int value)
 {
 	struct pca9532_data *data = input_get_drvdata(dev);
 
@@ -165,13 +178,28 @@
 	else
 		data->pwm[1] = 0;
 
-	dev_info(&dev->dev, "setting beep to %d \n", data->pwm[1]);
+       schedule_work(&data->work);
+
+       return 0;
+}
+
+static void pca9532_input_work(struct work_struct *work)
+{
+       struct pca9532_data *data;
+       data = container_of(work, struct pca9532_data, work);
 	mutex_lock(&data->update_lock);
 	i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
 		data->pwm[1]);
 	mutex_unlock(&data->update_lock);
+}
 
-	return 0;
+static void pca9532_led_work(struct work_struct *work)
+{
+       struct pca9532_led *led;
+       led = container_of(work, struct pca9532_led, work);
+       if (led->state == PCA9532_PWM0)
+               pca9532_setpwm(led->client, 0);
+       pca9532_setled(led);
 }
 
 static int pca9532_configure(struct i2c_client *client,
@@ -204,8 +232,9 @@
 			led->ldev.brightness = LED_OFF;
 			led->ldev.brightness_set = pca9532_set_brightness;
 			led->ldev.blink_set = pca9532_set_blink;
-			if (led_classdev_register(&client->dev,
-				&led->ldev) < 0)	{
+                       INIT_WORK(&led->work, pca9532_led_work);
+			err = led_classdev_register(&client->dev, &led->ldev);
+			if (err < 0) {
 				dev_err(&client->dev,
 					"couldn't register LED %s\n",
 					led->name);
@@ -233,9 +262,11 @@
 						BIT_MASK(SND_TONE);
 			data->idev->event = pca9532_event;
 			input_set_drvdata(data->idev, data);
+                       INIT_WORK(&data->work, pca9532_input_work);
 			err = input_register_device(data->idev);
 			if (err) {
 				input_free_device(data->idev);
+                               cancel_work_sync(&data->work);
 				data->idev = NULL;
 				goto exit;
 			}
@@ -252,18 +283,19 @@
 				break;
 			case PCA9532_TYPE_LED:
 				led_classdev_unregister(&data->leds[i].ldev);
+                               cancel_work_sync(&data->leds[i].work);
 				break;
 			case PCA9532_TYPE_N2100_BEEP:
 				if (data->idev != NULL) {
 					input_unregister_device(data->idev);
 					input_free_device(data->idev);
+                                       cancel_work_sync(&data->work);
 					data->idev = NULL;
 				}
 				break;
 			}
 
 	return err;
-
 }
 
 static int pca9532_probe(struct i2c_client *client,
@@ -271,12 +303,16 @@
 {
 	struct pca9532_data *data = i2c_get_clientdata(client);
 	struct pca9532_platform_data *pca9532_pdata = client->dev.platform_data;
+	int err;
+
+	if (!pca9532_pdata)
+		return -EIO;
 
 	if (!i2c_check_functionality(client->adapter,
 		I2C_FUNC_SMBUS_BYTE_DATA))
 		return -EIO;
 
-	data = kzalloc(sizeof(struct pca9532_data), GFP_KERNEL);
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
@@ -285,12 +321,13 @@
 	data->client = client;
 	mutex_init(&data->update_lock);
 
-	if (pca9532_pdata == NULL)
-		return -EIO;
+	err = pca9532_configure(client, data, pca9532_pdata);
+	if (err) {
+		kfree(data);
+		i2c_set_clientdata(client, NULL);
+	}
 
-	pca9532_configure(client, data, pca9532_pdata);
-	return 0;
-
+	return err;
 }
 
 static int pca9532_remove(struct i2c_client *client)
@@ -303,11 +340,13 @@
 			break;
 		case PCA9532_TYPE_LED:
 			led_classdev_unregister(&data->leds[i].ldev);
+                       cancel_work_sync(&data->leds[i].work);
 			break;
 		case PCA9532_TYPE_N2100_BEEP:
 			if (data->idev != NULL) {
 				input_unregister_device(data->idev);
 				input_free_device(data->idev);
+                               cancel_work_sync(&data->work);
 				data->idev = NULL;
 			}
 			break;
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 25a07f2..4d81131 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -82,6 +82,7 @@
 	led->cdev.brightness_set = s3c24xx_led_set;
 	led->cdev.default_trigger = pdata->def_trigger;
 	led->cdev.name = pdata->name;
+	led->cdev.flags |= LED_CORE_SUSPENDRESUME;
 
 	led->pdata = pdata;
 
@@ -111,33 +112,9 @@
 	return ret;
 }
 
-
-#ifdef CONFIG_PM
-static int s3c24xx_led_suspend(struct platform_device *dev, pm_message_t state)
-{
-	struct s3c24xx_gpio_led *led = pdev_to_gpio(dev);
-
-	led_classdev_suspend(&led->cdev);
-	return 0;
-}
-
-static int s3c24xx_led_resume(struct platform_device *dev)
-{
-	struct s3c24xx_gpio_led *led = pdev_to_gpio(dev);
-
-	led_classdev_resume(&led->cdev);
-	return 0;
-}
-#else
-#define s3c24xx_led_suspend NULL
-#define s3c24xx_led_resume NULL
-#endif
-
 static struct platform_driver s3c24xx_led_driver = {
 	.probe		= s3c24xx_led_probe,
 	.remove		= s3c24xx_led_remove,
-	.suspend	= s3c24xx_led_suspend,
-	.resume		= s3c24xx_led_resume,
 	.driver		= {
 		.name		= "s3c24xx_led",
 		.owner		= THIS_MODULE,
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
new file mode 100644
index 0000000..38c6bcb
--- /dev/null
+++ b/drivers/leds/leds-wm8350.c
@@ -0,0 +1,311 @@
+/*
+ * LED driver for WM8350 driven LEDS.
+ *
+ * Copyright(C) 2007, 2008 Wolfson Microelectronics PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <linux/mfd/wm8350/pmic.h>
+#include <linux/regulator/consumer.h>
+
+/* Microamps */
+static const int isink_cur[] = {
+	4,
+	5,
+	6,
+	7,
+	8,
+	10,
+	11,
+	14,
+	16,
+	19,
+	23,
+	27,
+	32,
+	39,
+	46,
+	54,
+	65,
+	77,
+	92,
+	109,
+	130,
+	154,
+	183,
+	218,
+	259,
+	308,
+	367,
+	436,
+	518,
+	616,
+	733,
+	872,
+	1037,
+	1233,
+	1466,
+	1744,
+	2073,
+	2466,
+	2933,
+	3487,
+	4147,
+	4932,
+	5865,
+	6975,
+	8294,
+	9864,
+	11730,
+	13949,
+	16589,
+	19728,
+	23460,
+	27899,
+	33178,
+	39455,
+	46920,
+	55798,
+	66355,
+	78910,
+	93840,
+	111596,
+	132710,
+	157820,
+	187681,
+	223191
+};
+
+#define to_wm8350_led(led_cdev) \
+	container_of(led_cdev, struct wm8350_led, cdev)
+
+static void wm8350_led_enable(struct wm8350_led *led)
+{
+	int ret;
+
+	if (led->enabled)
+		return;
+
+	ret = regulator_enable(led->isink);
+	if (ret != 0) {
+		dev_err(led->cdev.dev, "Failed to enable ISINK: %d\n", ret);
+		return;
+	}
+
+	ret = regulator_enable(led->dcdc);
+	if (ret != 0) {
+		dev_err(led->cdev.dev, "Failed to enable DCDC: %d\n", ret);
+		regulator_disable(led->isink);
+		return;
+	}
+
+	led->enabled = 1;
+}
+
+static void wm8350_led_disable(struct wm8350_led *led)
+{
+	int ret;
+
+	if (!led->enabled)
+		return;
+
+	ret = regulator_disable(led->dcdc);
+	if (ret != 0) {
+		dev_err(led->cdev.dev, "Failed to disable DCDC: %d\n", ret);
+		return;
+	}
+
+	ret = regulator_disable(led->isink);
+	if (ret != 0) {
+		dev_err(led->cdev.dev, "Failed to disable ISINK: %d\n", ret);
+		regulator_enable(led->dcdc);
+		return;
+	}
+
+	led->enabled = 0;
+}
+
+static void led_work(struct work_struct *work)
+{
+	struct wm8350_led *led = container_of(work, struct wm8350_led, work);
+	int ret;
+	int uA;
+	unsigned long flags;
+
+	mutex_lock(&led->mutex);
+
+	spin_lock_irqsave(&led->value_lock, flags);
+
+	if (led->value == LED_OFF) {
+		spin_unlock_irqrestore(&led->value_lock, flags);
+		wm8350_led_disable(led);
+		goto out;
+	}
+
+	/* This scales linearly into the index of valid current
+	 * settings which results in a linear scaling of perceived
+	 * brightness due to the non-linear current settings provided
+	 * by the hardware.
+	 */
+	uA = (led->max_uA_index * led->value) / LED_FULL;
+	spin_unlock_irqrestore(&led->value_lock, flags);
+	BUG_ON(uA >= ARRAY_SIZE(isink_cur));
+
+	ret = regulator_set_current_limit(led->isink, isink_cur[uA],
+					  isink_cur[uA]);
+	if (ret != 0)
+		dev_err(led->cdev.dev, "Failed to set %duA: %d\n",
+			isink_cur[uA], ret);
+
+	wm8350_led_enable(led);
+
+out:
+	mutex_unlock(&led->mutex);
+}
+
+static void wm8350_led_set(struct led_classdev *led_cdev,
+			   enum led_brightness value)
+{
+	struct wm8350_led *led = to_wm8350_led(led_cdev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&led->value_lock, flags);
+	led->value = value;
+	schedule_work(&led->work);
+	spin_unlock_irqrestore(&led->value_lock, flags);
+}
+
+static void wm8350_led_shutdown(struct platform_device *pdev)
+{
+	struct wm8350_led *led = platform_get_drvdata(pdev);
+
+	mutex_lock(&led->mutex);
+	led->value = LED_OFF;
+	wm8350_led_disable(led);
+	mutex_unlock(&led->mutex);
+}
+
+static int wm8350_led_probe(struct platform_device *pdev)
+{
+	struct regulator *isink, *dcdc;
+	struct wm8350_led *led;
+	struct wm8350_led_platform_data *pdata = pdev->dev.platform_data;
+	int ret, i;
+
+	if (pdata == NULL) {
+		dev_err(&pdev->dev, "no platform data\n");
+		return -ENODEV;
+	}
+
+	if (pdata->max_uA < isink_cur[0]) {
+		dev_err(&pdev->dev, "Invalid maximum current %duA\n",
+			pdata->max_uA);
+		return -EINVAL;
+	}
+
+	isink = regulator_get(&pdev->dev, "led_isink");
+	if (IS_ERR(isink)) {
+		printk(KERN_ERR "%s: cant get ISINK\n", __func__);
+		return PTR_ERR(isink);
+	}
+
+	dcdc = regulator_get(&pdev->dev, "led_vcc");
+	if (IS_ERR(dcdc)) {
+		printk(KERN_ERR "%s: cant get DCDC\n", __func__);
+		ret = PTR_ERR(dcdc);
+		goto err_isink;
+	}
+
+	led = kzalloc(sizeof(*led), GFP_KERNEL);
+	if (led == NULL) {
+		ret = -ENOMEM;
+		goto err_dcdc;
+	}
+
+	led->cdev.brightness_set = wm8350_led_set;
+	led->cdev.default_trigger = pdata->default_trigger;
+	led->cdev.name = pdata->name;
+	led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+	led->enabled = regulator_is_enabled(isink);
+	led->isink = isink;
+	led->dcdc = dcdc;
+
+	for (i = 0; i < ARRAY_SIZE(isink_cur) - 1; i++)
+		if (isink_cur[i] >= pdata->max_uA)
+			break;
+	led->max_uA_index = i;
+	if (pdata->max_uA != isink_cur[i])
+		dev_warn(&pdev->dev,
+			 "Maximum current %duA is not directly supported,"
+			 " check platform data\n",
+			 pdata->max_uA);
+
+	spin_lock_init(&led->value_lock);
+	mutex_init(&led->mutex);
+	INIT_WORK(&led->work, led_work);
+	led->value = LED_OFF;
+	platform_set_drvdata(pdev, led);
+
+	ret = led_classdev_register(&pdev->dev, &led->cdev);
+	if (ret < 0)
+		goto err_led;
+
+	return 0;
+
+ err_led:
+	kfree(led);
+ err_dcdc:
+	regulator_put(dcdc);
+ err_isink:
+	regulator_put(isink);
+	return ret;
+}
+
+static int wm8350_led_remove(struct platform_device *pdev)
+{
+	struct wm8350_led *led = platform_get_drvdata(pdev);
+
+	led_classdev_unregister(&led->cdev);
+	flush_scheduled_work();
+	wm8350_led_disable(led);
+	regulator_put(led->dcdc);
+	regulator_put(led->isink);
+	kfree(led);
+	return 0;
+}
+
+static struct platform_driver wm8350_led_driver = {
+	.driver = {
+		   .name = "wm8350-led",
+		   .owner = THIS_MODULE,
+		   },
+	.probe = wm8350_led_probe,
+	.remove = wm8350_led_remove,
+	.shutdown = wm8350_led_shutdown,
+};
+
+static int __devinit wm8350_led_init(void)
+{
+	return platform_driver_register(&wm8350_led_driver);
+}
+module_init(wm8350_led_init);
+
+static void wm8350_led_exit(void)
+{
+	platform_driver_unregister(&wm8350_led_driver);
+}
+module_exit(wm8350_led_exit);
+
+MODULE_AUTHOR("Mark Brown");
+MODULE_DESCRIPTION("WM8350 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm8350-led");
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
index 2f3aa87f..2982c86 100644
--- a/drivers/leds/leds-wrap.c
+++ b/drivers/leds/leds-wrap.c
@@ -56,40 +56,21 @@
 	.name			= "wrap::power",
 	.brightness_set		= wrap_power_led_set,
 	.default_trigger	= "default-on",
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev wrap_error_led = {
 	.name		= "wrap::error",
 	.brightness_set	= wrap_error_led_set,
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev wrap_extra_led = {
 	.name           = "wrap::extra",
 	.brightness_set = wrap_extra_led_set,
+	.flags			= LED_CORE_SUSPENDRESUME,
 };
 
-#ifdef CONFIG_PM
-static int wrap_led_suspend(struct platform_device *dev,
-		pm_message_t state)
-{
-	led_classdev_suspend(&wrap_power_led);
-	led_classdev_suspend(&wrap_error_led);
-	led_classdev_suspend(&wrap_extra_led);
-	return 0;
-}
-
-static int wrap_led_resume(struct platform_device *dev)
-{
-	led_classdev_resume(&wrap_power_led);
-	led_classdev_resume(&wrap_error_led);
-	led_classdev_resume(&wrap_extra_led);
-	return 0;
-}
-#else
-#define wrap_led_suspend NULL
-#define wrap_led_resume NULL
-#endif
-
 static int wrap_led_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -127,8 +108,6 @@
 static struct platform_driver wrap_led_driver = {
 	.probe		= wrap_led_probe,
 	.remove		= wrap_led_remove,
-	.suspend	= wrap_led_suspend,
-	.resume		= wrap_led_resume,
 	.driver		= {
 		.name		= DRVNAME,
 		.owner		= THIS_MODULE,
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index db68196..3d65313 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -199,6 +199,7 @@
 static void timer_trig_deactivate(struct led_classdev *led_cdev)
 {
 	struct timer_trig_data *timer_data = led_cdev->trigger_data;
+	unsigned long on = 0, off = 0;
 
 	if (timer_data) {
 		device_remove_file(led_cdev->dev, &dev_attr_delay_on);
@@ -206,6 +207,10 @@
 		del_timer_sync(&timer_data->timer);
 		kfree(timer_data);
 	}
+
+	/* If there is hardware support for blinking, stop it */
+	if (led_cdev->blink_set)
+		led_cdev->blink_set(led_cdev, &on, &off);
 }
 
 static struct led_trigger timer_led_trigger = {
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index b526596..173cf55 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -138,7 +138,7 @@
 	  Say Y here to enable Macintosh specific extensions of the generic
 	  backlight code. With this enabled, the brightness keys on older
 	  PowerBooks will be enabled so you can change the screen brightness.
-	  Newer models should use an userspace daemon like pbbuttonsd.
+	  Newer models should use a userspace daemon like pbbuttonsd.
 
 config PMAC_BACKLIGHT_LEGACY
 	bool "Provide legacy ioctl's on /dev/pmu for the backlight"
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ab7c8e4..7199437 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -215,7 +215,6 @@
 	/* choose a good rdev and read the page from there */
 
 	mdk_rdev_t *rdev;
-	struct list_head *tmp;
 	sector_t target;
 
 	if (!page)
@@ -223,7 +222,7 @@
 	if (!page)
 		return ERR_PTR(-ENOMEM);
 
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		if (! test_bit(In_sync, &rdev->flags)
 		    || test_bit(Faulty, &rdev->flags))
 			continue;
@@ -964,9 +963,11 @@
 				 */
 				page = bitmap->sb_page;
 				offset = sizeof(bitmap_super_t);
-				read_sb_page(bitmap->mddev, bitmap->offset,
-					     page,
-					     index, count);
+				if (!file)
+					read_sb_page(bitmap->mddev,
+						     bitmap->offset,
+						     page,
+						     index, count);
 			} else if (file) {
 				page = read_page(file, index, bitmap, count);
 				offset = 0;
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index f26c1f9..86d9adf 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -283,7 +283,6 @@
 static int run(mddev_t *mddev)
 {
 	mdk_rdev_t *rdev;
-	struct list_head *tmp;
 	int i;
 
 	conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL);
@@ -296,7 +295,7 @@
 	}
 	conf->nfaults = 0;
 
-	rdev_for_each(rdev, tmp, mddev)
+	list_for_each_entry(rdev, &mddev->disks, same_set)
 		conf->rdev = rdev;
 
 	mddev->array_sectors = mddev->size * 2;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 3b90c5c..1e3aea9 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -105,7 +105,6 @@
 	int i, nb_zone, cnt;
 	sector_t min_sectors;
 	sector_t curr_sector;
-	struct list_head *tmp;
 
 	conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
 			GFP_KERNEL);
@@ -115,7 +114,7 @@
 	cnt = 0;
 	conf->array_sectors = 0;
 
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		int j = rdev->raid_disk;
 		dev_info_t *disk = conf->disks + j;
 
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1b1d326..41e2509 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -214,20 +214,33 @@
 	return mddev;
 }
 
+static void mddev_delayed_delete(struct work_struct *ws)
+{
+	mddev_t *mddev = container_of(ws, mddev_t, del_work);
+	kobject_del(&mddev->kobj);
+	kobject_put(&mddev->kobj);
+}
+
 static void mddev_put(mddev_t *mddev)
 {
 	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
 		return;
-	if (!mddev->raid_disks && list_empty(&mddev->disks)) {
+	if (!mddev->raid_disks && list_empty(&mddev->disks) &&
+	    !mddev->hold_active) {
 		list_del(&mddev->all_mddevs);
-		spin_unlock(&all_mddevs_lock);
-		blk_cleanup_queue(mddev->queue);
-		if (mddev->sysfs_state)
-			sysfs_put(mddev->sysfs_state);
-		mddev->sysfs_state = NULL;
-		kobject_put(&mddev->kobj);
-	} else
-		spin_unlock(&all_mddevs_lock);
+		if (mddev->gendisk) {
+			/* we did a probe so need to clean up.
+			 * Call schedule_work inside the spinlock
+			 * so that flush_scheduled_work() after
+			 * mddev_find will succeed in waiting for the
+			 * work to be done.
+			 */
+			INIT_WORK(&mddev->del_work, mddev_delayed_delete);
+			schedule_work(&mddev->del_work);
+		} else
+			kfree(mddev);
+	}
+	spin_unlock(&all_mddevs_lock);
 }
 
 static mddev_t * mddev_find(dev_t unit)
@@ -236,15 +249,50 @@
 
  retry:
 	spin_lock(&all_mddevs_lock);
-	list_for_each_entry(mddev, &all_mddevs, all_mddevs)
-		if (mddev->unit == unit) {
-			mddev_get(mddev);
-			spin_unlock(&all_mddevs_lock);
-			kfree(new);
-			return mddev;
-		}
 
-	if (new) {
+	if (unit) {
+		list_for_each_entry(mddev, &all_mddevs, all_mddevs)
+			if (mddev->unit == unit) {
+				mddev_get(mddev);
+				spin_unlock(&all_mddevs_lock);
+				kfree(new);
+				return mddev;
+			}
+
+		if (new) {
+			list_add(&new->all_mddevs, &all_mddevs);
+			spin_unlock(&all_mddevs_lock);
+			new->hold_active = UNTIL_IOCTL;
+			return new;
+		}
+	} else if (new) {
+		/* find an unused unit number */
+		static int next_minor = 512;
+		int start = next_minor;
+		int is_free = 0;
+		int dev = 0;
+		while (!is_free) {
+			dev = MKDEV(MD_MAJOR, next_minor);
+			next_minor++;
+			if (next_minor > MINORMASK)
+				next_minor = 0;
+			if (next_minor == start) {
+				/* Oh dear, all in use. */
+				spin_unlock(&all_mddevs_lock);
+				kfree(new);
+				return NULL;
+			}
+				
+			is_free = 1;
+			list_for_each_entry(mddev, &all_mddevs, all_mddevs)
+				if (mddev->unit == dev) {
+					is_free = 0;
+					break;
+				}
+		}
+		new->unit = dev;
+		new->md_minor = MINOR(dev);
+		new->hold_active = UNTIL_STOP;
 		list_add(&new->all_mddevs, &all_mddevs);
 		spin_unlock(&all_mddevs_lock);
 		return new;
@@ -275,16 +323,6 @@
 	new->resync_max = MaxSector;
 	new->level = LEVEL_NONE;
 
-	new->queue = blk_alloc_queue(GFP_KERNEL);
-	if (!new->queue) {
-		kfree(new);
-		return NULL;
-	}
-	/* Can be unlocked because the queue is new: no concurrency */
-	queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
-
-	blk_queue_make_request(new->queue, md_fail_request);
-
 	goto retry;
 }
 
@@ -307,25 +345,23 @@
 
 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
 {
-	mdk_rdev_t * rdev;
-	struct list_head *tmp;
+	mdk_rdev_t *rdev;
 
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set)
 		if (rdev->desc_nr == nr)
 			return rdev;
-	}
+
 	return NULL;
 }
 
 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
 {
-	struct list_head *tmp;
 	mdk_rdev_t *rdev;
 
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set)
 		if (rdev->bdev->bd_dev == dev)
 			return rdev;
-	}
+
 	return NULL;
 }
 
@@ -861,7 +897,6 @@
 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
 {
 	mdp_super_t *sb;
-	struct list_head *tmp;
 	mdk_rdev_t *rdev2;
 	int next_spare = mddev->raid_disks;
 
@@ -933,7 +968,7 @@
 		sb->state |= (1<<MD_SB_BITMAP_PRESENT);
 
 	sb->disks[0].state = (1<<MD_DISK_REMOVED);
-	rdev_for_each(rdev2, tmp, mddev) {
+	list_for_each_entry(rdev2, &mddev->disks, same_set) {
 		mdp_disk_t *d;
 		int desc_nr;
 		if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
@@ -1259,7 +1294,6 @@
 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
 {
 	struct mdp_superblock_1 *sb;
-	struct list_head *tmp;
 	mdk_rdev_t *rdev2;
 	int max_dev, i;
 	/* make rdev->sb match mddev and rdev data. */
@@ -1307,7 +1341,7 @@
 	}
 
 	max_dev = 0;
-	rdev_for_each(rdev2, tmp, mddev)
+	list_for_each_entry(rdev2, &mddev->disks, same_set)
 		if (rdev2->desc_nr+1 > max_dev)
 			max_dev = rdev2->desc_nr+1;
 
@@ -1316,7 +1350,7 @@
 	for (i=0; i<max_dev;i++)
 		sb->dev_roles[i] = cpu_to_le16(0xfffe);
 	
-	rdev_for_each(rdev2, tmp, mddev) {
+	list_for_each_entry(rdev2, &mddev->disks, same_set) {
 		i = rdev2->desc_nr;
 		if (test_bit(Faulty, &rdev2->flags))
 			sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -1466,6 +1500,9 @@
 
 	list_add_rcu(&rdev->same_set, &mddev->disks);
 	bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
+
+	/* May as well allow recovery to be retried once */
+	mddev->recovery_disabled = 0;
 	return 0;
 
  fail:
@@ -1571,8 +1608,7 @@
 
 static void export_array(mddev_t *mddev)
 {
-	struct list_head *tmp;
-	mdk_rdev_t *rdev;
+	mdk_rdev_t *rdev, *tmp;
 
 	rdev_for_each(rdev, tmp, mddev) {
 		if (!rdev->mddev) {
@@ -1593,7 +1629,7 @@
 		desc->major,desc->minor,desc->raid_disk,desc->state);
 }
 
-static void print_sb(mdp_super_t *sb)
+static void print_sb_90(mdp_super_t *sb)
 {
 	int i;
 
@@ -1624,10 +1660,57 @@
 	}
 	printk(KERN_INFO "md:     THIS: ");
 	print_desc(&sb->this_disk);
-
 }
 
-static void print_rdev(mdk_rdev_t *rdev)
+static void print_sb_1(struct mdp_superblock_1 *sb)
+{
+	__u8 *uuid;
+
+	uuid = sb->set_uuid;
+	printk(KERN_INFO "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
+			":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
+	       KERN_INFO "md:    Name: \"%s\" CT:%llu\n",
+		le32_to_cpu(sb->major_version),
+		le32_to_cpu(sb->feature_map),
+		uuid[0], uuid[1], uuid[2], uuid[3],
+		uuid[4], uuid[5], uuid[6], uuid[7],
+		uuid[8], uuid[9], uuid[10], uuid[11],
+		uuid[12], uuid[13], uuid[14], uuid[15],
+		sb->set_name,
+		(unsigned long long)le64_to_cpu(sb->ctime)
+		       & MD_SUPERBLOCK_1_TIME_SEC_MASK);
+
+	uuid = sb->device_uuid;
+	printk(KERN_INFO "md:       L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
+			" RO:%llu\n"
+	       KERN_INFO "md:     Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+			":%02x%02x%02x%02x%02x%02x\n"
+	       KERN_INFO "md:       (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
+	       KERN_INFO "md:         (MaxDev:%u) \n",
+		le32_to_cpu(sb->level),
+		(unsigned long long)le64_to_cpu(sb->size),
+		le32_to_cpu(sb->raid_disks),
+		le32_to_cpu(sb->layout),
+		le32_to_cpu(sb->chunksize),
+		(unsigned long long)le64_to_cpu(sb->data_offset),
+		(unsigned long long)le64_to_cpu(sb->data_size),
+		(unsigned long long)le64_to_cpu(sb->super_offset),
+		(unsigned long long)le64_to_cpu(sb->recovery_offset),
+		le32_to_cpu(sb->dev_number),
+		uuid[0], uuid[1], uuid[2], uuid[3],
+		uuid[4], uuid[5], uuid[6], uuid[7],
+		uuid[8], uuid[9], uuid[10], uuid[11],
+		uuid[12], uuid[13], uuid[14], uuid[15],
+		sb->devflags,
+		(unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
+		(unsigned long long)le64_to_cpu(sb->events),
+		(unsigned long long)le64_to_cpu(sb->resync_offset),
+		le32_to_cpu(sb->sb_csum),
+		le32_to_cpu(sb->max_dev)
+		);
+}
+
+static void print_rdev(mdk_rdev_t *rdev, int major_version)
 {
 	char b[BDEVNAME_SIZE];
 	printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
@@ -1635,15 +1718,22 @@
 	        test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
 	        rdev->desc_nr);
 	if (rdev->sb_loaded) {
-		printk(KERN_INFO "md: rdev superblock:\n");
-		print_sb((mdp_super_t*)page_address(rdev->sb_page));
+		printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
+		switch (major_version) {
+		case 0:
+			print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
+			break;
+		case 1:
+			print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
+			break;
+		}
 	} else
 		printk(KERN_INFO "md: no rdev superblock!\n");
 }
 
 static void md_print_devices(void)
 {
-	struct list_head *tmp, *tmp2;
+	struct list_head *tmp;
 	mdk_rdev_t *rdev;
 	mddev_t *mddev;
 	char b[BDEVNAME_SIZE];
@@ -1658,12 +1748,12 @@
 			bitmap_print_sb(mddev->bitmap);
 		else
 			printk("%s: ", mdname(mddev));
-		rdev_for_each(rdev, tmp2, mddev)
+		list_for_each_entry(rdev, &mddev->disks, same_set)
 			printk("<%s>", bdevname(rdev->bdev,b));
 		printk("\n");
 
-		rdev_for_each(rdev, tmp2, mddev)
-			print_rdev(rdev);
+		list_for_each_entry(rdev, &mddev->disks, same_set)
+			print_rdev(rdev, mddev->major_version);
 	}
 	printk("md:	**********************************\n");
 	printk("\n");
@@ -1679,9 +1769,8 @@
 	 * with the rest of the array)
 	 */
 	mdk_rdev_t *rdev;
-	struct list_head *tmp;
 
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		if (rdev->sb_events == mddev->events ||
 		    (nospares &&
 		     rdev->raid_disk < 0 &&
@@ -1699,7 +1788,6 @@
 
 static void md_update_sb(mddev_t * mddev, int force_change)
 {
-	struct list_head *tmp;
 	mdk_rdev_t *rdev;
 	int sync_req;
 	int nospares = 0;
@@ -1790,7 +1878,7 @@
 		mdname(mddev),mddev->in_sync);
 
 	bitmap_update_sb(mddev->bitmap);
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		char b[BDEVNAME_SIZE];
 		dprintk(KERN_INFO "md: ");
 		if (rdev->sb_loaded != 1)
@@ -1999,7 +2087,6 @@
 		md_wakeup_thread(rdev->mddev->thread);
 	} else if (rdev->mddev->pers) {
 		mdk_rdev_t *rdev2;
-		struct list_head *tmp;
 		/* Activating a spare .. or possibly reactivating
 		 * if we every get bitmaps working here.
 		 */
@@ -2010,7 +2097,7 @@
 		if (rdev->mddev->pers->hot_add_disk == NULL)
 			return -EINVAL;
 
-		rdev_for_each(rdev2, tmp, rdev->mddev)
+		list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
 			if (rdev2->raid_disk == slot)
 				return -EEXIST;
 
@@ -2125,14 +2212,14 @@
 		 */
 		mddev_t *mddev;
 		int overlap = 0;
-		struct list_head *tmp, *tmp2;
+		struct list_head *tmp;
 
 		mddev_unlock(my_mddev);
 		for_each_mddev(mddev, tmp) {
 			mdk_rdev_t *rdev2;
 
 			mddev_lock(mddev);
-			rdev_for_each(rdev2, tmp2, mddev)
+			list_for_each_entry(rdev2, &mddev->disks, same_set)
 				if (test_bit(AllReserved, &rdev2->flags) ||
 				    (rdev->bdev == rdev2->bdev &&
 				     rdev != rdev2 &&
@@ -2328,8 +2415,7 @@
 static void analyze_sbs(mddev_t * mddev)
 {
 	int i;
-	struct list_head *tmp;
-	mdk_rdev_t *rdev, *freshest;
+	mdk_rdev_t *rdev, *freshest, *tmp;
 	char b[BDEVNAME_SIZE];
 
 	freshest = NULL;
@@ -3046,7 +3132,7 @@
 	}
 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 	md_wakeup_thread(mddev->thread);
-	sysfs_notify(&mddev->kobj, NULL, "sync_action");
+	sysfs_notify_dirent(mddev->sysfs_action);
 	return len;
 }
 
@@ -3404,6 +3490,8 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 	rv = mddev_lock(mddev);
+	if (mddev->hold_active == UNTIL_IOCTL)
+		mddev->hold_active = 0;
 	if (!rv) {
 		rv = entry->store(mddev, page, length);
 		mddev_unlock(mddev);
@@ -3414,6 +3502,17 @@
 static void md_free(struct kobject *ko)
 {
 	mddev_t *mddev = container_of(ko, mddev_t, kobj);
+
+	if (mddev->sysfs_state)
+		sysfs_put(mddev->sysfs_state);
+
+	if (mddev->gendisk) {
+		del_gendisk(mddev->gendisk);
+		put_disk(mddev->gendisk);
+	}
+	if (mddev->queue)
+		blk_cleanup_queue(mddev->queue);
+
 	kfree(mddev);
 }
 
@@ -3429,34 +3528,74 @@
 
 int mdp_major = 0;
 
-static struct kobject *md_probe(dev_t dev, int *part, void *data)
+static int md_alloc(dev_t dev, char *name)
 {
 	static DEFINE_MUTEX(disks_mutex);
 	mddev_t *mddev = mddev_find(dev);
 	struct gendisk *disk;
-	int partitioned = (MAJOR(dev) != MD_MAJOR);
-	int shift = partitioned ? MdpMinorShift : 0;
-	int unit = MINOR(dev) >> shift;
+	int partitioned;
+	int shift;
+	int unit;
 	int error;
 
 	if (!mddev)
-		return NULL;
+		return -ENODEV;
+
+	partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
+	shift = partitioned ? MdpMinorShift : 0;
+	unit = MINOR(mddev->unit) >> shift;
+
+	/* wait for any previous instance if this device
+	 * to be completed removed (mddev_delayed_delete).
+	 */
+	flush_scheduled_work();
 
 	mutex_lock(&disks_mutex);
 	if (mddev->gendisk) {
 		mutex_unlock(&disks_mutex);
 		mddev_put(mddev);
-		return NULL;
+		return -EEXIST;
 	}
+
+	if (name) {
+		/* Need to ensure that 'name' is not a duplicate.
+		 */
+		mddev_t *mddev2;
+		spin_lock(&all_mddevs_lock);
+
+		list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
+			if (mddev2->gendisk &&
+			    strcmp(mddev2->gendisk->disk_name, name) == 0) {
+				spin_unlock(&all_mddevs_lock);
+				return -EEXIST;
+			}
+		spin_unlock(&all_mddevs_lock);
+	}
+
+	mddev->queue = blk_alloc_queue(GFP_KERNEL);
+	if (!mddev->queue) {
+		mutex_unlock(&disks_mutex);
+		mddev_put(mddev);
+		return -ENOMEM;
+	}
+	/* Can be unlocked because the queue is new: no concurrency */
+	queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
+
+	blk_queue_make_request(mddev->queue, md_fail_request);
+
 	disk = alloc_disk(1 << shift);
 	if (!disk) {
 		mutex_unlock(&disks_mutex);
+		blk_cleanup_queue(mddev->queue);
+		mddev->queue = NULL;
 		mddev_put(mddev);
-		return NULL;
+		return -ENOMEM;
 	}
-	disk->major = MAJOR(dev);
+	disk->major = MAJOR(mddev->unit);
 	disk->first_minor = unit << shift;
-	if (partitioned)
+	if (name)
+		strcpy(disk->disk_name, name);
+	else if (partitioned)
 		sprintf(disk->disk_name, "md_d%d", unit);
 	else
 		sprintf(disk->disk_name, "md%d", unit);
@@ -3464,7 +3603,7 @@
 	disk->private_data = mddev;
 	disk->queue = mddev->queue;
 	/* Allow extended partitions.  This makes the
-	 * 'mdp' device redundant, but we can really
+	 * 'mdp' device redundant, but we can't really
 	 * remove it now.
 	 */
 	disk->flags |= GENHD_FL_EXT_DEVT;
@@ -3480,9 +3619,35 @@
 		kobject_uevent(&mddev->kobj, KOBJ_ADD);
 		mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
 	}
+	mddev_put(mddev);
+	return 0;
+}
+
+static struct kobject *md_probe(dev_t dev, int *part, void *data)
+{
+	md_alloc(dev, NULL);
 	return NULL;
 }
 
+static int add_named_array(const char *val, struct kernel_param *kp)
+{
+	/* val must be "md_*" where * is not all digits.
+	 * We allocate an array with a large free minor number, and
+	 * set the name to val.  val must not already be an active name.
+	 */
+	int len = strlen(val);
+	char buf[DISK_NAME_LEN];
+
+	while (len && val[len-1] == '\n')
+		len--;
+	if (len >= DISK_NAME_LEN)
+		return -E2BIG;
+	strlcpy(buf, val, len+1);
+	if (strncmp(buf, "md_", 3) != 0)
+		return -EINVAL;
+	return md_alloc(0, buf);
+}
+
 static void md_safemode_timeout(unsigned long data)
 {
 	mddev_t *mddev = (mddev_t *) data;
@@ -3501,7 +3666,6 @@
 {
 	int err;
 	int chunk_size;
-	struct list_head *tmp;
 	mdk_rdev_t *rdev;
 	struct gendisk *disk;
 	struct mdk_personality *pers;
@@ -3540,7 +3704,7 @@
 		}
 
 		/* devices must have minimum size of one chunk */
-		rdev_for_each(rdev, tmp, mddev) {
+		list_for_each_entry(rdev, &mddev->disks, same_set) {
 			if (test_bit(Faulty, &rdev->flags))
 				continue;
 			if (rdev->size < chunk_size / 1024) {
@@ -3565,7 +3729,7 @@
 	 * the only valid external interface is through the md
 	 * device.
 	 */
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		if (test_bit(Faulty, &rdev->flags))
 			continue;
 		sync_blockdev(rdev->bdev);
@@ -3630,10 +3794,10 @@
 		 */
 		char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
 		mdk_rdev_t *rdev2;
-		struct list_head *tmp2;
 		int warned = 0;
-		rdev_for_each(rdev, tmp, mddev) {
-			rdev_for_each(rdev2, tmp2, mddev) {
+
+		list_for_each_entry(rdev, &mddev->disks, same_set)
+			list_for_each_entry(rdev2, &mddev->disks, same_set) {
 				if (rdev < rdev2 &&
 				    rdev->bdev->bd_contains ==
 				    rdev2->bdev->bd_contains) {
@@ -3647,7 +3811,7 @@
 					warned = 1;
 				}
 			}
-		}
+
 		if (warned)
 			printk(KERN_WARNING
 			       "True protection against single-disk"
@@ -3684,6 +3848,7 @@
 			printk(KERN_WARNING
 			       "md: cannot register extra attributes for %s\n",
 			       mdname(mddev));
+		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
 	} else if (mddev->ro == 2) /* auto-readonly not meaningful */
 		mddev->ro = 0;
 
@@ -3694,7 +3859,7 @@
 	mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
 	mddev->in_sync = 1;
 
-	rdev_for_each(rdev, tmp, mddev)
+	list_for_each_entry(rdev, &mddev->disks, same_set)
 		if (rdev->raid_disk >= 0) {
 			char nm[20];
 			sprintf(nm, "rd%d", rdev->raid_disk);
@@ -3725,9 +3890,8 @@
 	 * it will remove the drives and not do the right thing
 	 */
 	if (mddev->degraded && !mddev->sync_thread) {
-		struct list_head *rtmp;
 		int spares = 0;
-		rdev_for_each(rdev, rtmp, mddev)
+		list_for_each_entry(rdev, &mddev->disks, same_set)
 			if (rdev->raid_disk >= 0 &&
 			    !test_bit(In_sync, &rdev->flags) &&
 			    !test_bit(Faulty, &rdev->flags))
@@ -3754,7 +3918,8 @@
 	mddev->changed = 1;
 	md_new_event(mddev);
 	sysfs_notify_dirent(mddev->sysfs_state);
-	sysfs_notify(&mddev->kobj, NULL, "sync_action");
+	if (mddev->sysfs_action)
+		sysfs_notify_dirent(mddev->sysfs_action);
 	sysfs_notify(&mddev->kobj, NULL, "degraded");
 	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
 	return 0;
@@ -3854,9 +4019,12 @@
 			mddev->queue->merge_bvec_fn = NULL;
 			mddev->queue->unplug_fn = NULL;
 			mddev->queue->backing_dev_info.congested_fn = NULL;
-			if (mddev->pers->sync_request)
+			if (mddev->pers->sync_request) {
 				sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
-
+				if (mddev->sysfs_action)
+					sysfs_put(mddev->sysfs_action);
+				mddev->sysfs_action = NULL;
+			}
 			module_put(mddev->pers->owner);
 			mddev->pers = NULL;
 			/* tell userspace to handle 'inactive' */
@@ -3883,7 +4051,6 @@
 	 */
 	if (mode == 0) {
 		mdk_rdev_t *rdev;
-		struct list_head *tmp;
 
 		printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
 
@@ -3895,7 +4062,7 @@
 		}
 		mddev->bitmap_offset = 0;
 
-		rdev_for_each(rdev, tmp, mddev)
+		list_for_each_entry(rdev, &mddev->disks, same_set)
 			if (rdev->raid_disk >= 0) {
 				char nm[20];
 				sprintf(nm, "rd%d", rdev->raid_disk);
@@ -3941,6 +4108,8 @@
 		mddev->barriers_work = 0;
 		mddev->safemode = 0;
 		kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+		if (mddev->hold_active == UNTIL_STOP)
+			mddev->hold_active = 0;
 
 	} else if (mddev->pers)
 		printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -3956,7 +4125,6 @@
 static void autorun_array(mddev_t *mddev)
 {
 	mdk_rdev_t *rdev;
-	struct list_head *tmp;
 	int err;
 
 	if (list_empty(&mddev->disks))
@@ -3964,7 +4132,7 @@
 
 	printk(KERN_INFO "md: running: ");
 
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		char b[BDEVNAME_SIZE];
 		printk("<%s>", bdevname(rdev->bdev,b));
 	}
@@ -3991,8 +4159,7 @@
  */
 static void autorun_devices(int part)
 {
-	struct list_head *tmp;
-	mdk_rdev_t *rdev0, *rdev;
+	mdk_rdev_t *rdev0, *rdev, *tmp;
 	mddev_t *mddev;
 	char b[BDEVNAME_SIZE];
 
@@ -4007,7 +4174,7 @@
 		printk(KERN_INFO "md: considering %s ...\n",
 			bdevname(rdev0->bdev,b));
 		INIT_LIST_HEAD(&candidates);
-		rdev_for_each_list(rdev, tmp, pending_raid_disks)
+		rdev_for_each_list(rdev, tmp, &pending_raid_disks)
 			if (super_90_load(rdev, rdev0, 0) >= 0) {
 				printk(KERN_INFO "md:  adding %s ...\n",
 					bdevname(rdev->bdev,b));
@@ -4053,7 +4220,7 @@
 		} else {
 			printk(KERN_INFO "md: created %s\n", mdname(mddev));
 			mddev->persistent = 1;
-			rdev_for_each_list(rdev, tmp, candidates) {
+			rdev_for_each_list(rdev, tmp, &candidates) {
 				list_del_init(&rdev->same_set);
 				if (bind_rdev_to_array(rdev, mddev))
 					export_rdev(rdev);
@@ -4064,7 +4231,7 @@
 		/* on success, candidates will be empty, on error
 		 * it won't...
 		 */
-		rdev_for_each_list(rdev, tmp, candidates) {
+		rdev_for_each_list(rdev, tmp, &candidates) {
 			list_del_init(&rdev->same_set);
 			export_rdev(rdev);
 		}
@@ -4093,10 +4260,9 @@
 	mdu_array_info_t info;
 	int nr,working,active,failed,spare;
 	mdk_rdev_t *rdev;
-	struct list_head *tmp;
 
 	nr=working=active=failed=spare=0;
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		nr++;
 		if (test_bit(Faulty, &rdev->flags))
 			failed++;
@@ -4614,9 +4780,8 @@
 
 static int update_size(mddev_t *mddev, sector_t num_sectors)
 {
-	mdk_rdev_t * rdev;
+	mdk_rdev_t *rdev;
 	int rv;
-	struct list_head *tmp;
 	int fit = (num_sectors == 0);
 
 	if (mddev->pers->resize == NULL)
@@ -4638,7 +4803,7 @@
 		 * grow, and re-add.
 		 */
 		return -EBUSY;
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		sector_t avail;
 		avail = rdev->size * 2;
 
@@ -5000,6 +5165,9 @@
 
 done_unlock:
 abort_unlock:
+	if (mddev->hold_active == UNTIL_IOCTL &&
+	    err != -EINVAL)
+		mddev->hold_active = 0;
 	mddev_unlock(mddev);
 
 	return err;
@@ -5016,14 +5184,25 @@
 	 * Succeed if we can lock the mddev, which confirms that
 	 * it isn't being stopped right now.
 	 */
-	mddev_t *mddev = bdev->bd_disk->private_data;
+	mddev_t *mddev = mddev_find(bdev->bd_dev);
 	int err;
 
+	if (mddev->gendisk != bdev->bd_disk) {
+		/* we are racing with mddev_put which is discarding this
+		 * bd_disk.
+		 */
+		mddev_put(mddev);
+		/* Wait until bdev->bd_disk is definitely gone */
+		flush_scheduled_work();
+		/* Then retry the open from the top */
+		return -ERESTARTSYS;
+	}
+	BUG_ON(mddev != bdev->bd_disk->private_data);
+
 	if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
 		goto out;
 
 	err = 0;
-	mddev_get(mddev);
 	atomic_inc(&mddev->openers);
 	mddev_unlock(mddev);
 
@@ -5187,11 +5366,10 @@
 {
 	int i = 0;
 	mdk_rdev_t *rdev;
-	struct list_head *tmp;
 
 	seq_printf(seq, "unused devices: ");
 
-	rdev_for_each_list(rdev, tmp, pending_raid_disks) {
+	list_for_each_entry(rdev, &pending_raid_disks, same_set) {
 		char b[BDEVNAME_SIZE];
 		i++;
 		seq_printf(seq, "%s ",
@@ -5350,7 +5528,6 @@
 {
 	mddev_t *mddev = v;
 	sector_t size;
-	struct list_head *tmp2;
 	mdk_rdev_t *rdev;
 	struct mdstat_info *mi = seq->private;
 	struct bitmap *bitmap;
@@ -5387,7 +5564,7 @@
 		}
 
 		size = 0;
-		rdev_for_each(rdev, tmp2, mddev) {
+		list_for_each_entry(rdev, &mddev->disks, same_set) {
 			char b[BDEVNAME_SIZE];
 			seq_printf(seq, " %s[%d]",
 				bdevname(rdev->bdev,b), rdev->desc_nr);
@@ -5694,7 +5871,6 @@
 	struct list_head *tmp;
 	sector_t last_check;
 	int skipped = 0;
-	struct list_head *rtmp;
 	mdk_rdev_t *rdev;
 	char *desc;
 
@@ -5799,7 +5975,7 @@
 		/* recovery follows the physical size of devices */
 		max_sectors = mddev->size << 1;
 		j = MaxSector;
-		rdev_for_each(rdev, rtmp, mddev)
+		list_for_each_entry(rdev, &mddev->disks, same_set)
 			if (rdev->raid_disk >= 0 &&
 			    !test_bit(Faulty, &rdev->flags) &&
 			    !test_bit(In_sync, &rdev->flags) &&
@@ -5949,7 +6125,7 @@
 		} else {
 			if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
 				mddev->curr_resync = MaxSector;
-			rdev_for_each(rdev, rtmp, mddev)
+			list_for_each_entry(rdev, &mddev->disks, same_set)
 				if (rdev->raid_disk >= 0 &&
 				    !test_bit(Faulty, &rdev->flags) &&
 				    !test_bit(In_sync, &rdev->flags) &&
@@ -5985,10 +6161,9 @@
 static int remove_and_add_spares(mddev_t *mddev)
 {
 	mdk_rdev_t *rdev;
-	struct list_head *rtmp;
 	int spares = 0;
 
-	rdev_for_each(rdev, rtmp, mddev)
+	list_for_each_entry(rdev, &mddev->disks, same_set)
 		if (rdev->raid_disk >= 0 &&
 		    !test_bit(Blocked, &rdev->flags) &&
 		    (test_bit(Faulty, &rdev->flags) ||
@@ -6003,8 +6178,8 @@
 			}
 		}
 
-	if (mddev->degraded && ! mddev->ro) {
-		rdev_for_each(rdev, rtmp, mddev) {
+	if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
+		list_for_each_entry(rdev, &mddev->disks, same_set) {
 			if (rdev->raid_disk >= 0 &&
 			    !test_bit(In_sync, &rdev->flags) &&
 			    !test_bit(Blocked, &rdev->flags))
@@ -6056,7 +6231,6 @@
 void md_check_recovery(mddev_t *mddev)
 {
 	mdk_rdev_t *rdev;
-	struct list_head *rtmp;
 
 
 	if (mddev->bitmap)
@@ -6120,7 +6294,7 @@
 		if (mddev->flags)
 			md_update_sb(mddev, 0);
 
-		rdev_for_each(rdev, rtmp, mddev)
+		list_for_each_entry(rdev, &mddev->disks, same_set)
 			if (test_and_clear_bit(StateChanged, &rdev->flags))
 				sysfs_notify_dirent(rdev->sysfs_state);
 
@@ -6149,13 +6323,13 @@
 			 * information must be scrapped
 			 */
 			if (!mddev->degraded)
-				rdev_for_each(rdev, rtmp, mddev)
+				list_for_each_entry(rdev, &mddev->disks, same_set)
 					rdev->saved_raid_disk = -1;
 
 			mddev->recovery = 0;
 			/* flag recovery needed just to double check */
 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-			sysfs_notify(&mddev->kobj, NULL, "sync_action");
+			sysfs_notify_dirent(mddev->sysfs_action);
 			md_new_event(mddev);
 			goto unlock;
 		}
@@ -6216,7 +6390,7 @@
 				mddev->recovery = 0;
 			} else
 				md_wakeup_thread(mddev->sync_thread);
-			sysfs_notify(&mddev->kobj, NULL, "sync_action");
+			sysfs_notify_dirent(mddev->sysfs_action);
 			md_new_event(mddev);
 		}
 	unlock:
@@ -6224,7 +6398,8 @@
 			clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
 			if (test_and_clear_bit(MD_RECOVERY_RECOVER,
 					       &mddev->recovery))
-				sysfs_notify(&mddev->kobj, NULL, "sync_action");
+				if (mddev->sysfs_action)
+					sysfs_notify_dirent(mddev->sysfs_action);
 		}
 		mddev_unlock(mddev);
 	}
@@ -6386,14 +6561,8 @@
 	unregister_sysctl_table(raid_table_header);
 	remove_proc_entry("mdstat", NULL);
 	for_each_mddev(mddev, tmp) {
-		struct gendisk *disk = mddev->gendisk;
-		if (!disk)
-			continue;
 		export_array(mddev);
-		del_gendisk(disk);
-		put_disk(disk);
-		mddev->gendisk = NULL;
-		mddev_put(mddev);
+		mddev->hold_active = 0;
 	}
 }
 
@@ -6418,6 +6587,7 @@
 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
 
+module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
 
 EXPORT_SYMBOL(register_md_personality);
 EXPORT_SYMBOL(unregister_md_personality);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index d4ac47d..f6d08f2 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -408,7 +408,6 @@
 	int disk_idx;
 	struct multipath_info *disk;
 	mdk_rdev_t *rdev;
-	struct list_head *tmp;
 
 	if (mddev->level != LEVEL_MULTIPATH) {
 		printk("multipath: %s: raid level not set to multipath IO (%d)\n",
@@ -441,7 +440,7 @@
 	}
 
 	conf->working_disks = 0;
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		disk_idx = rdev->raid_disk;
 		if (disk_idx < 0 ||
 		    disk_idx >= mddev->raid_disks)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 8ac6488..c605ba8 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -53,11 +53,10 @@
 static int create_strip_zones (mddev_t *mddev)
 {
 	int i, c, j;
-	sector_t current_offset, curr_zone_offset;
+	sector_t current_start, curr_zone_start;
 	sector_t min_spacing;
 	raid0_conf_t *conf = mddev_to_conf(mddev);
 	mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
-	struct list_head *tmp1, *tmp2;
 	struct strip_zone *zone;
 	int cnt;
 	char b[BDEVNAME_SIZE];
@@ -67,19 +66,19 @@
 	 */
 	conf->nr_strip_zones = 0;
  
-	rdev_for_each(rdev1, tmp1, mddev) {
-		printk("raid0: looking at %s\n",
+	list_for_each_entry(rdev1, &mddev->disks, same_set) {
+		printk(KERN_INFO "raid0: looking at %s\n",
 			bdevname(rdev1->bdev,b));
 		c = 0;
-		rdev_for_each(rdev2, tmp2, mddev) {
-			printk("raid0:   comparing %s(%llu)",
+		list_for_each_entry(rdev2, &mddev->disks, same_set) {
+			printk(KERN_INFO "raid0:   comparing %s(%llu)",
 			       bdevname(rdev1->bdev,b),
 			       (unsigned long long)rdev1->size);
-			printk(" with %s(%llu)\n",
+			printk(KERN_INFO " with %s(%llu)\n",
 			       bdevname(rdev2->bdev,b),
 			       (unsigned long long)rdev2->size);
 			if (rdev2 == rdev1) {
-				printk("raid0:   END\n");
+				printk(KERN_INFO "raid0:   END\n");
 				break;
 			}
 			if (rdev2->size == rdev1->size)
@@ -88,19 +87,20 @@
 				 * Not unique, don't count it as a new
 				 * group
 				 */
-				printk("raid0:   EQUAL\n");
+				printk(KERN_INFO "raid0:   EQUAL\n");
 				c = 1;
 				break;
 			}
-			printk("raid0:   NOT EQUAL\n");
+			printk(KERN_INFO "raid0:   NOT EQUAL\n");
 		}
 		if (!c) {
-			printk("raid0:   ==> UNIQUE\n");
+			printk(KERN_INFO "raid0:   ==> UNIQUE\n");
 			conf->nr_strip_zones++;
-			printk("raid0: %d zones\n", conf->nr_strip_zones);
+			printk(KERN_INFO "raid0: %d zones\n",
+				conf->nr_strip_zones);
 		}
 	}
-	printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
+	printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
 
 	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
 				conf->nr_strip_zones, GFP_KERNEL);
@@ -119,16 +119,17 @@
 	cnt = 0;
 	smallest = NULL;
 	zone->dev = conf->devlist;
-	rdev_for_each(rdev1, tmp1, mddev) {
+	list_for_each_entry(rdev1, &mddev->disks, same_set) {
 		int j = rdev1->raid_disk;
 
 		if (j < 0 || j >= mddev->raid_disks) {
-			printk("raid0: bad disk number %d - aborting!\n", j);
+			printk(KERN_ERR "raid0: bad disk number %d - "
+				"aborting!\n", j);
 			goto abort;
 		}
 		if (zone->dev[j]) {
-			printk("raid0: multiple devices for %d - aborting!\n",
-				j);
+			printk(KERN_ERR "raid0: multiple devices for %d - "
+				"aborting!\n", j);
 			goto abort;
 		}
 		zone->dev[j] = rdev1;
@@ -149,16 +150,16 @@
 		cnt++;
 	}
 	if (cnt != mddev->raid_disks) {
-		printk("raid0: too few disks (%d of %d) - aborting!\n",
-			cnt, mddev->raid_disks);
+		printk(KERN_ERR "raid0: too few disks (%d of %d) - "
+			"aborting!\n", cnt, mddev->raid_disks);
 		goto abort;
 	}
 	zone->nb_dev = cnt;
-	zone->size = smallest->size * cnt;
-	zone->zone_offset = 0;
+	zone->sectors = smallest->size * cnt * 2;
+	zone->zone_start = 0;
 
-	current_offset = smallest->size;
-	curr_zone_offset = zone->size;
+	current_start = smallest->size * 2;
+	curr_zone_start = zone->sectors;
 
 	/* now do the other zones */
 	for (i = 1; i < conf->nr_strip_zones; i++)
@@ -166,40 +167,41 @@
 		zone = conf->strip_zone + i;
 		zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
 
-		printk("raid0: zone %d\n", i);
-		zone->dev_offset = current_offset;
+		printk(KERN_INFO "raid0: zone %d\n", i);
+		zone->dev_start = current_start;
 		smallest = NULL;
 		c = 0;
 
 		for (j=0; j<cnt; j++) {
 			char b[BDEVNAME_SIZE];
 			rdev = conf->strip_zone[0].dev[j];
-			printk("raid0: checking %s ...", bdevname(rdev->bdev,b));
-			if (rdev->size > current_offset)
-			{
-				printk(" contained as device %d\n", c);
+			printk(KERN_INFO "raid0: checking %s ...",
+				bdevname(rdev->bdev, b));
+			if (rdev->size > current_start / 2) {
+				printk(KERN_INFO " contained as device %d\n",
+					c);
 				zone->dev[c] = rdev;
 				c++;
 				if (!smallest || (rdev->size <smallest->size)) {
 					smallest = rdev;
-					printk("  (%llu) is smallest!.\n", 
+					printk(KERN_INFO "  (%llu) is smallest!.\n",
 						(unsigned long long)rdev->size);
 				}
 			} else
-				printk(" nope.\n");
+				printk(KERN_INFO " nope.\n");
 		}
 
 		zone->nb_dev = c;
-		zone->size = (smallest->size - current_offset) * c;
-		printk("raid0: zone->nb_dev: %d, size: %llu\n",
-			zone->nb_dev, (unsigned long long)zone->size);
+		zone->sectors = (smallest->size * 2 - current_start) * c;
+		printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
+			zone->nb_dev, (unsigned long long)zone->sectors);
 
-		zone->zone_offset = curr_zone_offset;
-		curr_zone_offset += zone->size;
+		zone->zone_start = curr_zone_start;
+		curr_zone_start += zone->sectors;
 
-		current_offset = smallest->size;
-		printk("raid0: current zone offset: %llu\n",
-			(unsigned long long)current_offset);
+		current_start = smallest->size * 2;
+		printk(KERN_INFO "raid0: current zone start: %llu\n",
+			(unsigned long long)current_start);
 	}
 
 	/* Now find appropriate hash spacing.
@@ -210,16 +212,16 @@
 	 * strip though as it's size has no bearing on the efficacy of the hash
 	 * table.
 	 */
-	conf->hash_spacing = curr_zone_offset;
-	min_spacing = curr_zone_offset;
+	conf->spacing = curr_zone_start;
+	min_spacing = curr_zone_start;
 	sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
 	for (i=0; i < conf->nr_strip_zones-1; i++) {
-		sector_t sz = 0;
-		for (j=i; j<conf->nr_strip_zones-1 &&
-			     sz < min_spacing ; j++)
-			sz += conf->strip_zone[j].size;
-		if (sz >= min_spacing && sz < conf->hash_spacing)
-			conf->hash_spacing = sz;
+		sector_t s = 0;
+		for (j = i; j < conf->nr_strip_zones - 1 &&
+				s < min_spacing; j++)
+			s += conf->strip_zone[j].sectors;
+		if (s >= min_spacing && s < conf->spacing)
+			conf->spacing = s;
 	}
 
 	mddev->queue->unplug_fn = raid0_unplug;
@@ -227,7 +229,7 @@
 	mddev->queue->backing_dev_info.congested_fn = raid0_congested;
 	mddev->queue->backing_dev_info.congested_data = mddev;
 
-	printk("raid0: done.\n");
+	printk(KERN_INFO "raid0: done.\n");
 	return 0;
  abort:
 	return 1;
@@ -262,10 +264,9 @@
 static int raid0_run (mddev_t *mddev)
 {
 	unsigned  cur=0, i=0, nb_zone;
-	s64 size;
+	s64 sectors;
 	raid0_conf_t *conf;
 	mdk_rdev_t *rdev;
-	struct list_head *tmp;
 
 	if (mddev->chunk_size == 0) {
 		printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
@@ -291,54 +292,54 @@
 
 	/* calculate array device size */
 	mddev->array_sectors = 0;
-	rdev_for_each(rdev, tmp, mddev)
+	list_for_each_entry(rdev, &mddev->disks, same_set)
 		mddev->array_sectors += rdev->size * 2;
 
-	printk("raid0 : md_size is %llu blocks.\n", 
-		(unsigned long long)mddev->array_sectors / 2);
-	printk("raid0 : conf->hash_spacing is %llu blocks.\n",
-		(unsigned long long)conf->hash_spacing);
+	printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
+		(unsigned long long)mddev->array_sectors);
+	printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n",
+		(unsigned long long)conf->spacing);
 	{
-		sector_t s = mddev->array_sectors / 2;
-		sector_t space = conf->hash_spacing;
+		sector_t s = mddev->array_sectors;
+		sector_t space = conf->spacing;
 		int round;
-		conf->preshift = 0;
+		conf->sector_shift = 0;
 		if (sizeof(sector_t) > sizeof(u32)) {
 			/*shift down space and s so that sector_div will work */
 			while (space > (sector_t) (~(u32)0)) {
 				s >>= 1;
 				space >>= 1;
 				s += 1; /* force round-up */
-				conf->preshift++;
+				conf->sector_shift++;
 			}
 		}
 		round = sector_div(s, (u32)space) ? 1 : 0;
 		nb_zone = s + round;
 	}
-	printk("raid0 : nb_zone is %d.\n", nb_zone);
+	printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone);
 
-	printk("raid0 : Allocating %Zd bytes for hash.\n",
+	printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n",
 				nb_zone*sizeof(struct strip_zone*));
 	conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
 	if (!conf->hash_table)
 		goto out_free_conf;
-	size = conf->strip_zone[cur].size;
+	sectors = conf->strip_zone[cur].sectors;
 
 	conf->hash_table[0] = conf->strip_zone + cur;
 	for (i=1; i< nb_zone; i++) {
-		while (size <= conf->hash_spacing) {
+		while (sectors <= conf->spacing) {
 			cur++;
-			size += conf->strip_zone[cur].size;
+			sectors += conf->strip_zone[cur].sectors;
 		}
-		size -= conf->hash_spacing;
+		sectors -= conf->spacing;
 		conf->hash_table[i] = conf->strip_zone + cur;
 	}
-	if (conf->preshift) {
-		conf->hash_spacing >>= conf->preshift;
-		/* round hash_spacing up so when we divide by it, we
+	if (conf->sector_shift) {
+		conf->spacing >>= conf->sector_shift;
+		/* round spacing up so when we divide by it, we
 		 * err on the side of too-low, which is safest
 		 */
-		conf->hash_spacing++;
+		conf->spacing++;
 	}
 
 	/* calculate the max read-ahead size.
@@ -387,12 +388,12 @@
 static int raid0_make_request (struct request_queue *q, struct bio *bio)
 {
 	mddev_t *mddev = q->queuedata;
-	unsigned int sect_in_chunk, chunksize_bits,  chunk_size, chunk_sects;
+	unsigned int sect_in_chunk, chunksect_bits, chunk_sects;
 	raid0_conf_t *conf = mddev_to_conf(mddev);
 	struct strip_zone *zone;
 	mdk_rdev_t *tmp_dev;
 	sector_t chunk;
-	sector_t block, rsect;
+	sector_t sector, rsect;
 	const int rw = bio_data_dir(bio);
 	int cpu;
 
@@ -407,11 +408,9 @@
 		      bio_sectors(bio));
 	part_stat_unlock();
 
-	chunk_size = mddev->chunk_size >> 10;
 	chunk_sects = mddev->chunk_size >> 9;
-	chunksize_bits = ffz(~chunk_size);
-	block = bio->bi_sector >> 1;
-	
+	chunksect_bits = ffz(~chunk_sects);
+	sector = bio->bi_sector;
 
 	if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
 		struct bio_pair *bp;
@@ -434,28 +433,27 @@
  
 
 	{
-		sector_t x = block >> conf->preshift;
-		sector_div(x, (u32)conf->hash_spacing);
+		sector_t x = sector >> conf->sector_shift;
+		sector_div(x, (u32)conf->spacing);
 		zone = conf->hash_table[x];
 	}
- 
-	while (block >= (zone->zone_offset + zone->size)) 
+
+	while (sector >= zone->zone_start + zone->sectors)
 		zone++;
-    
-	sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1);
+
+	sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
 
 
 	{
-		sector_t x =  (block - zone->zone_offset) >> chunksize_bits;
+		sector_t x = (sector - zone->zone_start) >> chunksect_bits;
 
 		sector_div(x, zone->nb_dev);
 		chunk = x;
 
-		x = block >> chunksize_bits;
+		x = sector >> chunksect_bits;
 		tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
 	}
-	rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1)
-		+ sect_in_chunk;
+	rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
  
 	bio->bi_bdev = tmp_dev->bdev;
 	bio->bi_sector = rsect + tmp_dev->data_offset;
@@ -467,7 +465,7 @@
 
 bad_map:
 	printk("raid0_make_request bug: can't convert block across chunks"
-		" or bigger than %dk %llu %d\n", chunk_size, 
+		" or bigger than %dk %llu %d\n", chunk_sects / 2,
 		(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 
 	bio_io_error(bio);
@@ -492,10 +490,10 @@
 			seq_printf(seq, "%s/", bdevname(
 				conf->strip_zone[j].dev[k]->bdev,b));
 
-		seq_printf(seq, "] zo=%d do=%d s=%d\n",
-				conf->strip_zone[j].zone_offset,
-				conf->strip_zone[j].dev_offset,
-				conf->strip_zone[j].size);
+		seq_printf(seq, "] zs=%d ds=%d s=%d\n",
+				conf->strip_zone[j].zone_start,
+				conf->strip_zone[j].dev_start,
+				conf->strip_zone[j].sectors);
 	}
 #endif
 	seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 9c788e2..7b4f5f7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1016,12 +1016,16 @@
 	 * else mark the drive as failed
 	 */
 	if (test_bit(In_sync, &rdev->flags)
-	    && (conf->raid_disks - mddev->degraded) == 1)
+	    && (conf->raid_disks - mddev->degraded) == 1) {
 		/*
 		 * Don't fail the drive, act as though we were just a
-		 * normal single drive
+		 * normal single drive.
+		 * However don't try a recovery from this drive as
+		 * it is very likely to fail.
 		 */
+		mddev->recovery_disabled = 1;
 		return;
+	}
 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
 		unsigned long flags;
 		spin_lock_irqsave(&conf->device_lock, flags);
@@ -1919,7 +1923,6 @@
 	int i, j, disk_idx;
 	mirror_info_t *disk;
 	mdk_rdev_t *rdev;
-	struct list_head *tmp;
 
 	if (mddev->level != 1) {
 		printk("raid1: %s: raid level not set to mirroring (%d)\n",
@@ -1964,7 +1967,7 @@
 	spin_lock_init(&conf->device_lock);
 	mddev->queue->queue_lock = &conf->device_lock;
 
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		disk_idx = rdev->raid_disk;
 		if (disk_idx >= mddev->raid_disks
 		    || disk_idx < 0)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 970a96e..6736d6d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2025,7 +2025,6 @@
 	int i, disk_idx;
 	mirror_info_t *disk;
 	mdk_rdev_t *rdev;
-	struct list_head *tmp;
 	int nc, fc, fo;
 	sector_t stride, size;
 
@@ -2108,7 +2107,7 @@
 	spin_lock_init(&conf->device_lock);
 	mddev->queue->queue_lock = &conf->device_lock;
 
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		disk_idx = rdev->raid_disk;
 		if (disk_idx >= mddev->raid_disks
 		    || disk_idx < 0)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a36a743..a5ba080 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3998,7 +3998,6 @@
 	int raid_disk, memory;
 	mdk_rdev_t *rdev;
 	struct disk_info *disk;
-	struct list_head *tmp;
 	int working_disks = 0;
 
 	if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) {
@@ -4108,7 +4107,7 @@
 
 	pr_debug("raid5: run(%s) called.\n", mdname(mddev));
 
-	rdev_for_each(rdev, tmp, mddev) {
+	list_for_each_entry(rdev, &mddev->disks, same_set) {
 		raid_disk = rdev->raid_disk;
 		if (raid_disk >= conf->raid_disks
 		    || raid_disk < 0)
@@ -4533,7 +4532,6 @@
 {
 	raid5_conf_t *conf = mddev_to_conf(mddev);
 	mdk_rdev_t *rdev;
-	struct list_head *rtmp;
 	int spares = 0;
 	int added_devices = 0;
 	unsigned long flags;
@@ -4541,7 +4539,7 @@
 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
 		return -EBUSY;
 
-	rdev_for_each(rdev, rtmp, mddev)
+	list_for_each_entry(rdev, &mddev->disks, same_set)
 		if (rdev->raid_disk < 0 &&
 		    !test_bit(Faulty, &rdev->flags))
 			spares++;
@@ -4563,7 +4561,7 @@
 	/* Add some new drives, as many as will fit.
 	 * We know there are enough to make the newly sized array work.
 	 */
-	rdev_for_each(rdev, rtmp, mddev)
+	list_for_each_entry(rdev, &mddev->disks, same_set)
 		if (rdev->raid_disk < 0 &&
 		    !test_bit(Faulty, &rdev->flags)) {
 			if (raid5_add_disk(mddev, rdev) == 0) {
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index 0ee79fd..4b8662e 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -150,7 +150,7 @@
 	}
 }
 
-struct {
+static struct {
 	unsigned char seq[2];
 } fm_mode[] = {
 	{ { 0x01, 0x81} },	/* Put device into expert mode */
@@ -207,7 +207,6 @@
 	msleep(1);
 
 	if (params->mode == V4L2_TUNER_RADIO) {
-		int i;
 		unsigned char deemphasis[]  = { 0x13, 1 };
 
 		/* FIXME: allow using a different deemphasis */
@@ -767,7 +766,8 @@
 	fe->ops.analog_ops.info.name = name;
 
 	if (priv->ver & TDA8290) {
-		tda8290_init_tuner(fe);
+		if (priv->ver & (TDA8275 | TDA8275A))
+			tda8290_init_tuner(fe);
 		tda8290_init_if(fe);
 	} else if (priv->ver & TDA8295)
 		tda8295_init_if(fe);
diff --git a/drivers/media/dvb/dm1105/Kconfig b/drivers/media/dvb/dm1105/Kconfig
index 1332301..43f4d44 100644
--- a/drivers/media/dvb/dm1105/Kconfig
+++ b/drivers/media/dvb/dm1105/Kconfig
@@ -1,6 +1,7 @@
 config DVB_DM1105
 	tristate "SDMC DM1105 based PCI cards"
 	depends on DVB_CORE && PCI && I2C
+	depends on INPUT
 	select DVB_PLL if !DVB_FE_CUSTOMISE
 	select DVB_STV0299 if !DVB_FE_CUSTOMISE
 	select DVB_STV0288 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 171f9ca..8434077 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -824,7 +824,7 @@
 	return 0;
 }
 
-struct dtv_cmds_h dtv_cmds[] = {
+static struct dtv_cmds_h dtv_cmds[] = {
 	[DTV_TUNE] = {
 		.name	= "DTV_TUNE",
 		.cmd	= DTV_TUNE,
@@ -962,7 +962,7 @@
 	},
 };
 
-void dtv_property_dump(struct dtv_property *tvp)
+static void dtv_property_dump(struct dtv_property *tvp)
 {
 	int i;
 
@@ -993,7 +993,7 @@
 		dprintk("%s() tvp.u.data = 0x%08x\n", __func__, tvp->u.data);
 }
 
-int is_legacy_delivery_system(fe_delivery_system_t s)
+static int is_legacy_delivery_system(fe_delivery_system_t s)
 {
 	if((s == SYS_UNDEFINED) || (s == SYS_DVBC_ANNEX_AC) ||
 	   (s == SYS_DVBC_ANNEX_B) || (s == SYS_DVBT) || (s == SYS_DVBS) ||
@@ -1007,7 +1007,8 @@
  * drivers can use a single set_frontend tuning function, regardless of whether
  * it's being used for the legacy or new API, reducing code and complexity.
  */
-void dtv_property_cache_sync(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static void dtv_property_cache_sync(struct dvb_frontend *fe,
+				    struct dvb_frontend_parameters *p)
 {
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
@@ -1059,7 +1060,7 @@
 /* Ensure the cached values are set correctly in the frontend
  * legacy tuning structures, for the advanced tuning API.
  */
-void dtv_property_legacy_params_sync(struct dvb_frontend *fe)
+static void dtv_property_legacy_params_sync(struct dvb_frontend *fe)
 {
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	struct dvb_frontend_private *fepriv = fe->frontend_priv;
@@ -1114,7 +1115,7 @@
 /* Ensure the cached values are set correctly in the frontend
  * legacy tuning structures, for the legacy tuning API.
  */
-void dtv_property_adv_params_sync(struct dvb_frontend *fe)
+static void dtv_property_adv_params_sync(struct dvb_frontend *fe)
 {
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	struct dvb_frontend_private *fepriv = fe->frontend_priv;
@@ -1149,7 +1150,7 @@
 	}
 }
 
-void dtv_property_cache_submit(struct dvb_frontend *fe)
+static void dtv_property_cache_submit(struct dvb_frontend *fe)
 {
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
@@ -1180,8 +1181,9 @@
 static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
 			unsigned int cmd, void *parg);
 
-int dtv_property_process_get(struct dvb_frontend *fe, struct dtv_property *tvp,
-	struct inode *inode, struct file *file)
+static int dtv_property_process_get(struct dvb_frontend *fe,
+				    struct dtv_property *tvp,
+				    struct inode *inode, struct file *file)
 {
 	int r = 0;
 
@@ -1253,8 +1255,10 @@
 	return r;
 }
 
-int dtv_property_process_set(struct dvb_frontend *fe, struct dtv_property *tvp,
-	struct inode *inode, struct file *file)
+static int dtv_property_process_set(struct dvb_frontend *fe,
+				    struct dtv_property *tvp,
+				    struct inode *inode,
+				    struct file *file)
 {
 	int r = 0;
 	struct dvb_frontend_private *fepriv = fe->frontend_priv;
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 03fd9dd..f6ba846 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -125,7 +125,6 @@
 
 struct dvb_net_priv {
 	int in_use;
-	struct net_device_stats stats;
 	u16 pid;
 	struct net_device *net;
 	struct dvb_net *host;
@@ -384,8 +383,8 @@
 				if (priv->ule_skb) {
 					dev_kfree_skb( priv->ule_skb );
 					/* Prepare for next SNDU. */
-					priv->stats.rx_errors++;
-					priv->stats.rx_frame_errors++;
+					dev->stats.rx_errors++;
+					dev->stats.rx_frame_errors++;
 				}
 				reset_ule(priv);
 				priv->need_pusi = 1;
@@ -438,8 +437,8 @@
 					dev_kfree_skb( priv->ule_skb );
 					/* Prepare for next SNDU. */
 					// reset_ule(priv);  moved to below.
-					priv->stats.rx_errors++;
-					priv->stats.rx_frame_errors++;
+					dev->stats.rx_errors++;
+					dev->stats.rx_frame_errors++;
 				}
 				reset_ule(priv);
 				/* skip to next PUSI. */
@@ -460,8 +459,8 @@
 						/* Drop partly decoded SNDU, reset state, resync on PUSI. */
 						if (priv->ule_skb) {
 							dev_kfree_skb( priv->ule_skb );
-							priv->stats.rx_errors++;
-							priv->stats.rx_frame_errors++;
+							dev->stats.rx_errors++;
+							dev->stats.rx_frame_errors++;
 						}
 						reset_ule(priv);
 						priv->need_pusi = 1;
@@ -477,8 +476,8 @@
 				if (priv->ule_sndu_remain > 183) {
 					/* Current SNDU lacks more data than there could be available in the
 					 * current TS cell. */
-					priv->stats.rx_errors++;
-					priv->stats.rx_length_errors++;
+					dev->stats.rx_errors++;
+					dev->stats.rx_length_errors++;
 					printk(KERN_WARNING "%lu: Expected %d more SNDU bytes, but "
 					       "got PUSI (pf %d, ts_remain %d).  Flushing incomplete payload.\n",
 					       priv->ts_count, priv->ule_sndu_remain, ts[4], ts_remain);
@@ -520,8 +519,8 @@
 				if (priv->ule_sndu_len < 5) {
 					printk(KERN_WARNING "%lu: Invalid ULE SNDU length %u. "
 					       "Resyncing.\n", priv->ts_count, priv->ule_sndu_len);
-					priv->stats.rx_errors++;
-					priv->stats.rx_length_errors++;
+					dev->stats.rx_errors++;
+					dev->stats.rx_length_errors++;
 					priv->ule_sndu_len = 0;
 					priv->need_pusi = 1;
 					new_ts = 1;
@@ -573,7 +572,7 @@
 			if (priv->ule_skb == NULL) {
 				printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
 				       dev->name);
-				priv->stats.rx_dropped++;
+				dev->stats.rx_dropped++;
 				return;
 			}
 
@@ -637,8 +636,8 @@
 				ule_dump = 1;
 #endif
 
-				priv->stats.rx_errors++;
-				priv->stats.rx_crc_errors++;
+				dev->stats.rx_errors++;
+				dev->stats.rx_crc_errors++;
 				dev_kfree_skb(priv->ule_skb);
 			} else {
 				/* CRC32 verified OK. */
@@ -744,8 +743,8 @@
 				 * receive the packet anyhow. */
 				/* if (priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
 					priv->ule_skb->pkt_type = PACKET_HOST; */
-				priv->stats.rx_packets++;
-				priv->stats.rx_bytes += priv->ule_skb->len;
+				dev->stats.rx_packets++;
+				dev->stats.rx_bytes += priv->ule_skb->len;
 				netif_rx(priv->ule_skb);
 			}
 			sndu_done:
@@ -800,8 +799,7 @@
 {
 	u8 *eth;
 	struct sk_buff *skb;
-	struct net_device_stats *stats =
-		&((struct dvb_net_priv *) netdev_priv(dev))->stats;
+	struct net_device_stats *stats = &dev->stats;
 	int snap = 0;
 
 	/* note: pkt_len includes a 32bit checksum */
@@ -1216,28 +1214,29 @@
 	return dvb_net_feed_stop(dev);
 }
 
-static struct net_device_stats * dvb_net_get_stats(struct net_device *dev)
-{
-	return &((struct dvb_net_priv *) netdev_priv(dev))->stats;
-}
-
 static const struct header_ops dvb_header_ops = {
 	.create		= eth_header,
 	.parse		= eth_header_parse,
 	.rebuild	= eth_rebuild_header,
 };
 
+
+static const struct net_device_ops dvb_netdev_ops = {
+	.ndo_open		= dvb_net_open,
+	.ndo_stop		= dvb_net_stop,
+	.ndo_start_xmit		= dvb_net_tx,
+	.ndo_set_multicast_list = dvb_net_set_multicast_list,
+	.ndo_set_mac_address    = dvb_net_set_mac,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 static void dvb_net_setup(struct net_device *dev)
 {
 	ether_setup(dev);
 
 	dev->header_ops		= &dvb_header_ops;
-	dev->open		= dvb_net_open;
-	dev->stop		= dvb_net_stop;
-	dev->hard_start_xmit	= dvb_net_tx;
-	dev->get_stats		= dvb_net_get_stats;
-	dev->set_multicast_list = dvb_net_set_multicast_list;
-	dev->set_mac_address    = dvb_net_set_mac;
+	dev->netdev_ops		= &dvb_netdev_ops;
 	dev->mtu		= 4096;
 	dev->mc_count           = 0;
 
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 5017f08..c6e7b42 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -41,7 +41,7 @@
 static int dvb_usb_anysee_debug;
 module_param_named(debug, dvb_usb_anysee_debug, int, 0644);
 MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS);
-int dvb_usb_anysee_delsys;
+static int dvb_usb_anysee_delsys;
 module_param_named(delsys, dvb_usb_anysee_delsys, int, 0644);
 MODULE_PARM_DESC(delsys, "select delivery mode (0=DVB-C, 1=DVB-T)");
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
diff --git a/drivers/media/dvb/frontends/cx24116.c b/drivers/media/dvb/frontends/cx24116.c
index 4f514d3..28ad609 100644
--- a/drivers/media/dvb/frontends/cx24116.c
+++ b/drivers/media/dvb/frontends/cx24116.c
@@ -369,7 +369,7 @@
  * Not all S2 mmodulation schemes are support and not all rates with
  * a scheme are support. Especially, no auto detect when in S2 mode.
  */
-struct cx24116_modfec {
+static struct cx24116_modfec {
 	fe_delivery_system_t delivery_system;
 	fe_modulation_t modulation;
 	fe_code_rate_t fec;
diff --git a/drivers/media/dvb/frontends/lgdt3304.c b/drivers/media/dvb/frontends/lgdt3304.c
index 469ace5..3bb0c43 100644
--- a/drivers/media/dvb/frontends/lgdt3304.c
+++ b/drivers/media/dvb/frontends/lgdt3304.c
@@ -42,7 +42,7 @@
 
 	for (i=0; i<len-1; i+=3){
 		if((err = i2c_transfer(state->i2c, &i2cmsgs, 1))<0) {
-			printk("%s i2c_transfer error %d\n", __FUNCTION__, err);
+			printk("%s i2c_transfer error %d\n", __func__, err);
 			if (err < 0)
 				return err;
 			else
@@ -73,7 +73,7 @@
 	i2cmsgs[1].buf = &buf;
 
 	if((ret = i2c_transfer(state->i2c, i2cmsgs, 2))<0) {
-		printk("%s i2c_transfer error %d\n", __FUNCTION__, ret);
+		printk("%s i2c_transfer error %d\n", __func__, ret);
 		return ret;
 	}
 
@@ -94,7 +94,7 @@
 	};
 	ret = i2c_transfer(state->i2c, &i2cmsgs, 1);
 	if (ret != 1) {
-		printk("%s i2c_transfer error %d\n", __FUNCTION__, ret);
+		printk("%s i2c_transfer error %d\n", __func__, ret);
 		return ret;
 	}
 
@@ -238,7 +238,7 @@
 		}
 
 		if (err) {
-			printk("%s error setting modulation\n", __FUNCTION__);
+			printk("%s error setting modulation\n", __func__);
 		} else {
 			state->current_modulation = param->u.vsb.modulation;
 		}
@@ -305,7 +305,7 @@
 		}
 		break;
 	default:
-		printk("%s unhandled modulation\n", __FUNCTION__);
+		printk("%s unhandled modulation\n", __func__);
 	}
 
 
diff --git a/drivers/media/dvb/frontends/s921_module.c b/drivers/media/dvb/frontends/s921_module.c
index 3cbb9cb..892af8c 100644
--- a/drivers/media/dvb/frontends/s921_module.c
+++ b/drivers/media/dvb/frontends/s921_module.c
@@ -136,7 +136,7 @@
 	};
 
 	if((err = i2c_transfer(state->i2c, &i2cmsgs, 1))<0) {
-		printk("%s i2c_transfer error %d\n", __FUNCTION__, err);
+		printk("%s i2c_transfer error %d\n", __func__, err);
 		if (err < 0)
 			return err;
 		else
diff --git a/drivers/media/dvb/frontends/stb0899_algo.c b/drivers/media/dvb/frontends/stb0899_algo.c
index ced9b7a..83dc7e1 100644
--- a/drivers/media/dvb/frontends/stb0899_algo.c
+++ b/drivers/media/dvb/frontends/stb0899_algo.c
@@ -54,7 +54,7 @@
  * stb0899_get_srate
  * Get the current symbol rate
  */
-u32 stb0899_get_srate(struct stb0899_state *state)
+static u32 stb0899_get_srate(struct stb0899_state *state)
 {
 	struct stb0899_internal *internal = &state->internal;
 	u8 sfr[3];
@@ -763,7 +763,7 @@
 	stb0899_write_s2reg(state, STB0899_S2DEMOD, STB0899_BASE_CSM_CNTRL1, STB0899_OFF0_CSM_CNTRL1, reg);
 }
 
-long Log2Int(int number)
+static long Log2Int(int number)
 {
 	int i;
 
diff --git a/drivers/media/dvb/frontends/stb0899_drv.c b/drivers/media/dvb/frontends/stb0899_drv.c
index bee28f7..10613ac 100644
--- a/drivers/media/dvb/frontends/stb0899_drv.c
+++ b/drivers/media/dvb/frontends/stb0899_drv.c
@@ -134,7 +134,7 @@
 };
 
 /* DVB-S2 Es/N0 quant in dB/100 vs read value * 100*/
-struct stb0899_tab stb0899_quant_tab[] = {
+static struct stb0899_tab stb0899_quant_tab[] = {
 	{    0,	    0 },
 	{    0,	  100 },
 	{  600,	  200 },
@@ -177,7 +177,7 @@
 };
 
 /* DVB-S2 Es/N0 estimate in dB/100 vs read value */
-struct stb0899_tab stb0899_est_tab[] = {
+static struct stb0899_tab stb0899_est_tab[] = {
 	{    0,	     0 },
 	{    0,	     1 },
 	{  301,	     2 },
@@ -217,7 +217,7 @@
 	{ 5721,	526017 },
 };
 
-int _stb0899_read_reg(struct stb0899_state *state, unsigned int reg)
+static int _stb0899_read_reg(struct stb0899_state *state, unsigned int reg)
 {
 	int ret;
 
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 3507463..bcbc5d4 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -1337,7 +1337,7 @@
 	.tuner_set_rfsiggain	= NULL
 };
 
-struct stb6100_config tt3200_stb6100_config = {
+static struct stb6100_config tt3200_stb6100_config = {
 	.tuner_address	= 0x60,
 	.refclock	= 27000000,
 };
@@ -1450,7 +1450,7 @@
 		if (budget_ci->budget.dvb_frontend) {
 			if (dvb_attach(stb6100_attach, budget_ci->budget.dvb_frontend, &tt3200_stb6100_config, &budget_ci->budget.i2c_adap)) {
 				if (!dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, 0, 0)) {
-					printk("%s: No LNBP21 found!\n", __FUNCTION__);
+					printk("%s: No LNBP21 found!\n", __func__);
 					dvb_frontend_detach(budget_ci->budget.dvb_frontend);
 					budget_ci->budget.dvb_frontend = NULL;
 				}
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 34a39d2..46fd573 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -569,7 +569,6 @@
 
 	cafe_smbus_enable_irq(cam);
 	adap->id = I2C_HW_SMBUS_CAFE;
-	adap->class = I2C_CLASS_CAM_DIGITAL;
 	adap->owner = THIS_MODULE;
 	adap->client_register = cafe_smbus_attach;
 	adap->client_unregister = cafe_smbus_detach;
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index b0f8375..2d250a2 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -69,6 +69,11 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called cx88-dvb.
 
+config VIDEO_CX88_MPEG
+	tristate
+	depends on VIDEO_CX88_DVB || VIDEO_CX88_BLACKBIRD
+	default y
+
 config VIDEO_CX88_VP3054
 	tristate "VP-3054 Secondary I2C Bus Support"
 	default m
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile
index 6ec30f2..b06b127 100644
--- a/drivers/media/video/cx88/Makefile
+++ b/drivers/media/video/cx88/Makefile
@@ -3,7 +3,8 @@
 cx8800-objs	:= cx88-video.o cx88-vbi.o
 cx8802-objs	:= cx88-mpeg.o
 
-obj-$(CONFIG_VIDEO_CX88) += cx88xx.o cx8800.o cx8802.o
+obj-$(CONFIG_VIDEO_CX88) += cx88xx.o cx8800.o
+obj-$(CONFIG_VIDEO_CX88_MPEG) += cx8802.o
 obj-$(CONFIG_VIDEO_CX88_ALSA) += cx88-alsa.o
 obj-$(CONFIG_VIDEO_CX88_BLACKBIRD) += cx88-blackbird.o
 obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index da4dd49..613dfea 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -138,6 +138,28 @@
 	return ret;
 }
 
+static void cx88_dvb_gate_ctrl(struct cx88_core  *core, int open)
+{
+	struct videobuf_dvb_frontends *f;
+	struct videobuf_dvb_frontend *fe;
+
+	if (!core->dvbdev)
+		return;
+
+	f = &core->dvbdev->frontends;
+
+	if (!f)
+		return;
+
+	if (f->gate <= 1) /* undefined or fe0 */
+		fe = videobuf_dvb_get_frontend(f, 1);
+	else
+		fe = videobuf_dvb_get_frontend(f, f->gate);
+
+	if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl)
+		fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, open);
+}
+
 /* ------------------------------------------------------------------ */
 
 static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
@@ -597,12 +619,30 @@
 	struct cx88_core *core = dev->core;
 	struct videobuf_dvb_frontend *fe0, *fe1 = NULL;
 	int mfe_shared = 0; /* bus not shared by default */
+	int i;
 
 	if (0 != core->i2c_rc) {
 		printk(KERN_ERR "%s/2: no i2c-bus available, cannot attach dvb drivers\n", core->name);
 		goto frontend_detach;
 	}
 
+	if (!core->board.num_frontends)
+		return -EINVAL;
+
+	mutex_init(&dev->frontends.lock);
+	INIT_LIST_HEAD(&dev->frontends.felist);
+
+	printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
+			 core->board.num_frontends);
+	for (i = 1; i <= core->board.num_frontends; i++) {
+		fe0 = videobuf_dvb_alloc_frontend(&dev->frontends, i);
+		if (!fe0) {
+			printk(KERN_ERR "%s() failed to alloc\n", __func__);
+			videobuf_dvb_dealloc_frontends(&dev->frontends);
+			goto frontend_detach;
+		}
+	}
+
 	/* Get the first frontend */
 	fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
 	if (!fe0)
@@ -611,6 +651,9 @@
 	/* multi-frontend gate control is undefined or defaults to fe0 */
 	dev->frontends.gate = 0;
 
+	/* Sets the gate control callback to be used by i2c command calls */
+	core->gate_ctrl = cx88_dvb_gate_ctrl;
+
 	/* init frontend(s) */
 	switch (core->boardnr) {
 	case CX88_BOARD_HAUPPAUGE_DVB_T1:
@@ -1109,6 +1152,7 @@
 		&dev->pci->dev, adapter_nr, mfe_shared);
 
 frontend_detach:
+	core->gate_ctrl = NULL;
 	videobuf_dvb_dealloc_frontends(&dev->frontends);
 	return -EINVAL;
 }
@@ -1270,6 +1314,8 @@
 
 	vp3054_i2c_remove(dev);
 
+	core->gate_ctrl = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 1ab691d..c0ff230 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -116,30 +116,16 @@
 
 void cx88_call_i2c_clients(struct cx88_core *core, unsigned int cmd, void *arg)
 {
-#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
-	struct videobuf_dvb_frontends *f = &core->dvbdev->frontends;
-	struct videobuf_dvb_frontend *fe = NULL;
-#endif
 	if (0 != core->i2c_rc)
 		return;
 
-#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
-	if (core->dvbdev && f) {
-		if(f->gate <= 1) /* undefined or fe0 */
-			fe = videobuf_dvb_get_frontend(f, 1);
-		else
-			fe = videobuf_dvb_get_frontend(f, f->gate);
+	if (core->gate_ctrl)
+		core->gate_ctrl(core, 1);
 
-		if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl)
-			fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, 1);
+	i2c_clients_command(&core->i2c_adap, cmd, arg);
 
-		i2c_clients_command(&core->i2c_adap, cmd, arg);
-
-		if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl)
-			fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, 0);
-	} else
-#endif
-		i2c_clients_command(&core->i2c_adap, cmd, arg);
+	if (core->gate_ctrl)
+		core->gate_ctrl(core, 0);
 }
 
 static const struct i2c_algo_bit_data cx8800_i2c_algo_template = {
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 59164fc..b295b76 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -787,6 +787,9 @@
 	dev->pci = pci_dev;
 	dev->core = core;
 
+	/* Maintain a reference so cx88-video can query the 8802 device. */
+	core->dvbdev = dev;
+
 	err = cx8802_init_common(dev);
 	if (err != 0)
 		goto fail_free;
@@ -794,32 +797,6 @@
 	INIT_LIST_HEAD(&dev->drvlist);
 	list_add_tail(&dev->devlist,&cx8802_devlist);
 
-#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
-	mutex_init(&dev->frontends.lock);
-	INIT_LIST_HEAD(&dev->frontends.felist);
-
-	if (core->board.num_frontends) {
-		struct videobuf_dvb_frontend *fe;
-		int i;
-
-		printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
-			core->board.num_frontends);
-		for (i = 1; i <= core->board.num_frontends; i++) {
-			fe = videobuf_dvb_alloc_frontend(&dev->frontends, i);
-			if(fe == NULL) {
-				printk(KERN_ERR "%s() failed to alloc\n",
-					__func__);
-				videobuf_dvb_dealloc_frontends(&dev->frontends);
-				err = -ENOMEM;
-				goto fail_free;
-			}
-		}
-	}
-#endif
-
-	/* Maintain a reference so cx88-video can query the 8802 device. */
-	core->dvbdev = dev;
-
 	/* now autoload cx88-dvb or cx88-blackbird */
 	request_modules(dev);
 	return 0;
@@ -827,6 +804,7 @@
  fail_free:
 	kfree(dev);
  fail_core:
+	core->dvbdev = NULL;
 	cx88_core_put(core,pci_dev);
 	return err;
 }
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index eb9ce30..60a8b31 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -302,6 +302,7 @@
 	struct btcx_riscmem    stopper;
 	u32                    count;
 };
+struct cx88_core;
 
 struct cx88_core {
 	struct list_head           devlist;
@@ -334,7 +335,8 @@
 
 	/* config info -- dvb */
 #if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
-	int 			   (*prev_set_voltage)(struct dvb_frontend* fe, fe_sec_voltage_t voltage);
+	int 			   (*prev_set_voltage)(struct dvb_frontend *fe, fe_sec_voltage_t voltage);
+	void			   (*gate_ctrl)(struct cx88_core  *core, int open);
 #endif
 
 	/* state info */
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index e776699..ef9bf00 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -1842,7 +1842,7 @@
  * em28xx_init_dev()
  * allocates and inits the device structs, registers i2c bus and v4l device
  */
-int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
+static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
 			   int minor)
 {
 	struct em28xx *dev = *devhandle;
@@ -1990,8 +1990,7 @@
 		int check_interface = 1;
 		isoc_pipe = 1;
 		endpoint = &interface->cur_altsetting->endpoint[1].desc;
-		if (usb_endpoint_type(endpoint) !=
-		    USB_ENDPOINT_XFER_ISOC)
+		if (!usb_endpoint_xfer_isoc(endpoint))
 			check_interface = 0;
 
 		if (usb_endpoint_dir_out(endpoint))
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 819ccea..eb5fb05 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -393,7 +393,7 @@
 	return ret;
 }
 
-struct em28xx_vol_table outputs[] = {
+static const struct em28xx_vol_table outputs[] = {
 	{ EM28XX_AOUT_MASTER, AC97_MASTER_VOL      },
 	{ EM28XX_AOUT_LINE,   AC97_LINE_LEVEL_VOL  },
 	{ EM28XX_AOUT_MONO,   AC97_MASTER_MONO_VOL },
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index 42bbaf6..0443afe 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -307,7 +307,7 @@
 	mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
 }
 
-void em28xx_ir_start(struct em28xx_IR *ir)
+static void em28xx_ir_start(struct em28xx_IR *ir)
 {
 	setup_timer(&ir->timer, ir_timer, (unsigned long)ir);
 	INIT_WORK(&ir->work, em28xx_ir_work);
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.c b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
index af3f2dc..ccea4a7 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
@@ -113,7 +113,7 @@
 	return 0;
 }
 
-void s5k83a_dump_registers(struct sd *sd)
+static void s5k83a_dump_registers(struct sd *sd)
 {
 	int address;
 	u8 page, old_page;
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index ca26b0c..05c14a2 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -1347,7 +1347,6 @@
 		.name = "ov7670",
 	},
 	.id 		= I2C_DRIVERID_OV7670,
-	.class 		= I2C_CLASS_CAM_DIGITAL,
 	.attach_adapter = ov7670_attach,
 	.detach_client	= ov7670_detach,
 	.command	= ov7670_command,
diff --git a/drivers/media/video/ovcamchip/ovcamchip_core.c b/drivers/media/video/ovcamchip/ovcamchip_core.c
index 2c4acbf..c841f4e 100644
--- a/drivers/media/video/ovcamchip/ovcamchip_core.c
+++ b/drivers/media/video/ovcamchip/ovcamchip_core.c
@@ -405,7 +405,6 @@
 		.name =		"ovcamchip",
 	},
 	.id =			I2C_DRIVERID_OVCAMCHIP,
-	.class =		I2C_CLASS_CAM_DIGITAL,
 	.attach_adapter =	ovcamchip_attach,
 	.detach_client =	ovcamchip_detach,
 	.command =		ovcamchip_command,
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 8fb92ac..fa304e5 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -3655,7 +3655,7 @@
 	int ret;
 	pvr2_trace(PVR2_TRACE_INIT,"Performing a device reset...");
 	ret = usb_lock_device_for_reset(hdw->usb_dev,NULL);
-	if (ret == 1) {
+	if (ret == 0) {
 		ret = usb_reset_device(hdw->usb_dev);
 		usb_unlock_device(hdw->usb_dev);
 	} else {
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 9d33de2..a1d6008 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -34,12 +34,10 @@
 
 #include <linux/videodev2.h>
 
-#include <asm/dma.h>
+#include <mach/dma.h>
 #include <mach/pxa-regs.h>
 #include <mach/camera.h>
 
-#include "pxa_camera.h"
-
 #define PXA_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5)
 #define PXA_CAM_DRV_NAME "pxa27x-camera"
 
diff --git a/drivers/media/video/pxa_camera.h b/drivers/media/video/pxa_camera.h
deleted file mode 100644
index 89cbfc9..0000000
--- a/drivers/media/video/pxa_camera.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* Camera Interface */
-#define CICR0		__REG(0x50000000)
-#define CICR1		__REG(0x50000004)
-#define CICR2		__REG(0x50000008)
-#define CICR3		__REG(0x5000000C)
-#define CICR4		__REG(0x50000010)
-#define CISR		__REG(0x50000014)
-#define CIFR		__REG(0x50000018)
-#define CITOR		__REG(0x5000001C)
-#define CIBR0		__REG(0x50000028)
-#define CIBR1		__REG(0x50000030)
-#define CIBR2		__REG(0x50000038)
-
-#define CICR0_DMAEN	(1 << 31)	/* DMA request enable */
-#define CICR0_PAR_EN	(1 << 30)	/* Parity enable */
-#define CICR0_SL_CAP_EN	(1 << 29)	/* Capture enable for slave mode */
-#define CICR0_ENB	(1 << 28)	/* Camera interface enable */
-#define CICR0_DIS	(1 << 27)	/* Camera interface disable */
-#define CICR0_SIM	(0x7 << 24)	/* Sensor interface mode mask */
-#define CICR0_TOM	(1 << 9)	/* Time-out mask */
-#define CICR0_RDAVM	(1 << 8)	/* Receive-data-available mask */
-#define CICR0_FEM	(1 << 7)	/* FIFO-empty mask */
-#define CICR0_EOLM	(1 << 6)	/* End-of-line mask */
-#define CICR0_PERRM	(1 << 5)	/* Parity-error mask */
-#define CICR0_QDM	(1 << 4)	/* Quick-disable mask */
-#define CICR0_CDM	(1 << 3)	/* Disable-done mask */
-#define CICR0_SOFM	(1 << 2)	/* Start-of-frame mask */
-#define CICR0_EOFM	(1 << 1)	/* End-of-frame mask */
-#define CICR0_FOM	(1 << 0)	/* FIFO-overrun mask */
-
-#define CICR1_TBIT	(1 << 31)	/* Transparency bit */
-#define CICR1_RGBT_CONV	(0x3 << 29)	/* RGBT conversion mask */
-#define CICR1_PPL	(0x7ff << 15)	/* Pixels per line mask */
-#define CICR1_RGB_CONV	(0x7 << 12)	/* RGB conversion mask */
-#define CICR1_RGB_F	(1 << 11)	/* RGB format */
-#define CICR1_YCBCR_F	(1 << 10)	/* YCbCr format */
-#define CICR1_RGB_BPP	(0x7 << 7)	/* RGB bis per pixel mask */
-#define CICR1_RAW_BPP	(0x3 << 5)	/* Raw bis per pixel mask */
-#define CICR1_COLOR_SP	(0x3 << 3)	/* Color space mask */
-#define CICR1_DW	(0x7 << 0)	/* Data width mask */
-
-#define CICR2_BLW	(0xff << 24)	/* Beginning-of-line pixel clock
-					   wait count mask */
-#define CICR2_ELW	(0xff << 16)	/* End-of-line pixel clock
-					   wait count mask */
-#define CICR2_HSW	(0x3f << 10)	/* Horizontal sync pulse width mask */
-#define CICR2_BFPW	(0x3f << 3)	/* Beginning-of-frame pixel clock
-					   wait count mask */
-#define CICR2_FSW	(0x7 << 0)	/* Frame stabilization
-					   wait count mask */
-
-#define CICR3_BFW	(0xff << 24)	/* Beginning-of-frame line clock
-					   wait count mask */
-#define CICR3_EFW	(0xff << 16)	/* End-of-frame line clock
-					   wait count mask */
-#define CICR3_VSW	(0x3f << 10)	/* Vertical sync pulse width mask */
-#define CICR3_BFPW	(0x3f << 3)	/* Beginning-of-frame pixel clock
-					   wait count mask */
-#define CICR3_LPF	(0x7ff << 0)	/* Lines per frame mask */
-
-#define CICR4_MCLK_DLY	(0x3 << 24)	/* MCLK Data Capture Delay mask */
-#define CICR4_PCLK_EN	(1 << 23)	/* Pixel clock enable */
-#define CICR4_PCP	(1 << 22)	/* Pixel clock polarity */
-#define CICR4_HSP	(1 << 21)	/* Horizontal sync polarity */
-#define CICR4_VSP	(1 << 20)	/* Vertical sync polarity */
-#define CICR4_MCLK_EN	(1 << 19)	/* MCLK enable */
-#define CICR4_FR_RATE	(0x7 << 8)	/* Frame rate mask */
-#define CICR4_DIV	(0xff << 0)	/* Clock divisor mask */
-
-#define CISR_FTO	(1 << 15)	/* FIFO time-out */
-#define CISR_RDAV_2	(1 << 14)	/* Channel 2 receive data available */
-#define CISR_RDAV_1	(1 << 13)	/* Channel 1 receive data available */
-#define CISR_RDAV_0	(1 << 12)	/* Channel 0 receive data available */
-#define CISR_FEMPTY_2	(1 << 11)	/* Channel 2 FIFO empty */
-#define CISR_FEMPTY_1	(1 << 10)	/* Channel 1 FIFO empty */
-#define CISR_FEMPTY_0	(1 << 9)	/* Channel 0 FIFO empty */
-#define CISR_EOL	(1 << 8)	/* End of line */
-#define CISR_PAR_ERR	(1 << 7)	/* Parity error */
-#define CISR_CQD	(1 << 6)	/* Camera interface quick disable */
-#define CISR_CDD	(1 << 5)	/* Camera interface disable done */
-#define CISR_SOF	(1 << 4)	/* Start of frame */
-#define CISR_EOF	(1 << 3)	/* End of frame */
-#define CISR_IFO_2	(1 << 2)	/* FIFO overrun for Channel 2 */
-#define CISR_IFO_1	(1 << 1)	/* FIFO overrun for Channel 1 */
-#define CISR_IFO_0	(1 << 0)	/* FIFO overrun for Channel 0 */
-
-#define CIFR_FLVL2	(0x7f << 23)	/* FIFO 2 level mask */
-#define CIFR_FLVL1	(0x7f << 16)	/* FIFO 1 level mask */
-#define CIFR_FLVL0	(0xff << 8)	/* FIFO 0 level mask */
-#define CIFR_THL_0	(0x3 << 4)	/* Threshold Level for Channel 0 FIFO */
-#define CIFR_RESET_F	(1 << 3)	/* Reset input FIFOs */
-#define CIFR_FEN2	(1 << 2)	/* FIFO enable for channel 2 */
-#define CIFR_FEN1	(1 << 1)	/* FIFO enable for channel 1 */
-#define CIFR_FEN0	(1 << 0)	/* FIFO enable for channel 0 */
-
diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
index f8d85dd..b085496 100644
--- a/drivers/media/video/usbvideo/ibmcam.c
+++ b/drivers/media/video/usbvideo/ibmcam.c
@@ -3779,7 +3779,7 @@
 			err("Alternate settings have different endpoint addresses!");
 			return -ENODEV;
 		}
-		if (usb_endpoint_type(endpoint) != USB_ENDPOINT_XFER_ISOC) {
+		if (!usb_endpoint_xfer_isoc(endpoint)) {
 			err("Interface %d. has non-ISO endpoint!", ifnum);
 			return -ENODEV;
 		}
diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
index 90f0ce6..900ec21 100644
--- a/drivers/media/video/usbvideo/konicawc.c
+++ b/drivers/media/video/usbvideo/konicawc.c
@@ -823,7 +823,7 @@
 			err("Alternate settings have different endpoint addresses!");
 			return -ENODEV;
 		}
-		if (usb_endpoint_type(endpoint) != USB_ENDPOINT_XFER_ISOC) {
+		if (!usb_endpoint_xfer_isoc(endpoint)) {
 			err("Interface %d. has non-ISO endpoint!",
 			    interface->desc.bInterfaceNumber);
 			return -ENODEV;
diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
index 839a082..fbd1b63 100644
--- a/drivers/media/video/usbvideo/ultracam.c
+++ b/drivers/media/video/usbvideo/ultracam.c
@@ -556,7 +556,7 @@
 			err("Alternate settings have different endpoint addresses!");
 			return -ENODEV;
 		}
-		if (usb_endpoint_type(endpoint) != USB_ENDPOINT_XFER_ISOC) {
+		if (!usb_endpoint_xfer_isoc(endpoint)) {
 			err("Interface %d. has non-ISO endpoint!",
 			    interface->desc.bInterfaceNumber);
 			return -ENODEV;
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 2be5e47..2622de0 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1674,8 +1674,7 @@
 		interface = &dev->actconfig->interface[ifnum]->altsetting[0];
 	}
 	endpoint = &interface->endpoint[1].desc;
-	if (usb_endpoint_type(endpoint) !=
-	    USB_ENDPOINT_XFER_ISOC) {
+	if (!usb_endpoint_xfer_isoc(endpoint)) {
 		err("%s: interface %d. has non-ISO endpoint!",
 		    __func__, ifnum);
 		err("%s: Endpoint attributes %d",
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 9eefde0..cf9d4c7 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -29,7 +29,7 @@
 	if (dev == NULL || v4l2_dev == NULL)
 		return -EINVAL;
 	/* Warn if we apparently re-register a device */
-	WARN_ON(dev_get_drvdata(dev));
+	WARN_ON(dev_get_drvdata(dev) != NULL);
 	INIT_LIST_HEAD(&v4l2_dev->subdevs);
 	spin_lock_init(&v4l2_dev->lock);
 	v4l2_dev->dev = dev;
@@ -61,7 +61,7 @@
 	if (dev == NULL || sd == NULL || !sd->name[0])
 		return -EINVAL;
 	/* Warn if we apparently re-register a subdev */
-	WARN_ON(sd->dev);
+	WARN_ON(sd->dev != NULL);
 	if (!try_module_get(sd->owner))
 		return -ENODEV;
 	sd->dev = dev;
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index bc6d5ab..da1790e 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -388,8 +388,7 @@
 	page = alloc_page(GFP_USER | __GFP_DMA32);
 	if (!page)
 		return VM_FAULT_OOM;
-	clear_user_page(page_address(page), (unsigned long)vmf->virtual_address,
-			page);
+	clear_user_highpage(page, (unsigned long)vmf->virtual_address);
 	vmf->page = page;
 	return 0;
 }
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index a3997b7..105a832 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -1553,7 +1553,6 @@
 
 	static struct i2c_adapter adap = {
 		.id =                I2C_HW_SMBUS_W9968CF,
-		.class =             I2C_CLASS_CAM_DIGITAL,
 		.owner =             THIS_MODULE,
 		.client_register =   w9968cf_i2c_attach_inform,
 		.client_unregister = w9968cf_i2c_detach_inform,
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 1f1e398..de143de 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -52,14 +52,14 @@
 };
 
 struct mspro_attr_entry {
-	unsigned int  address;
-	unsigned int  size;
+	__be32 address;
+	__be32 size;
 	unsigned char id;
 	unsigned char reserved[3];
 } __attribute__((packed));
 
 struct mspro_attribute {
-	unsigned short          signature;
+	__be16 signature;
 	unsigned short          version;
 	unsigned char           count;
 	unsigned char           reserved[11];
@@ -69,28 +69,28 @@
 struct mspro_sys_info {
 	unsigned char  class;
 	unsigned char  reserved0;
-	unsigned short block_size;
-	unsigned short block_count;
-	unsigned short user_block_count;
-	unsigned short page_size;
+	__be16 block_size;
+	__be16 block_count;
+	__be16 user_block_count;
+	__be16 page_size;
 	unsigned char  reserved1[2];
 	unsigned char  assembly_date[8];
-	unsigned int   serial_number;
+	__be32 serial_number;
 	unsigned char  assembly_maker_code;
 	unsigned char  assembly_model_code[3];
-	unsigned short memory_maker_code;
-	unsigned short memory_model_code;
+	__be16 memory_maker_code;
+	__be16 memory_model_code;
 	unsigned char  reserved2[4];
 	unsigned char  vcc;
 	unsigned char  vpp;
-	unsigned short controller_number;
-	unsigned short controller_function;
-	unsigned short start_sector;
-	unsigned short unit_size;
+	__be16 controller_number;
+	__be16 controller_function;
+	__be16 start_sector;
+	__be16 unit_size;
 	unsigned char  ms_sub_class;
 	unsigned char  reserved3[4];
 	unsigned char  interface_type;
-	unsigned short controller_code;
+	__be16 controller_code;
 	unsigned char  format_type;
 	unsigned char  reserved4;
 	unsigned char  device_type;
@@ -124,11 +124,11 @@
 } __attribute__((packed));
 
 struct mspro_devinfo {
-	unsigned short cylinders;
-	unsigned short heads;
-	unsigned short bytes_per_track;
-	unsigned short bytes_per_sector;
-	unsigned short sectors_per_track;
+	__be16 cylinders;
+	__be16 heads;
+	__be16 bytes_per_track;
+	__be16 bytes_per_sector;
+	__be16 sectors_per_track;
 	unsigned char  reserved[6];
 } __attribute__((packed));
 
@@ -338,8 +338,7 @@
 	rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly date: "
 			"GMT%+d:%d %04u-%02u-%02u %02u:%02u:%02u\n",
 			date_tz, date_tz_f,
-			be16_to_cpu(*(unsigned short *)
-				    &x_sys->assembly_date[1]),
+			be16_to_cpup((__be16 *)&x_sys->assembly_date[1]),
 			x_sys->assembly_date[3], x_sys->assembly_date[4],
 			x_sys->assembly_date[5], x_sys->assembly_date[6],
 			x_sys->assembly_date[7]);
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
index 10b6ef7..11c0f46 100644
--- a/drivers/message/fusion/lsi/mpi.h
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -6,7 +6,7 @@
  *          Title:  MPI Message independent structures and definitions
  *  Creation Date:  July 27, 2000
  *
- *    mpi.h Version:  01.05.13
+ *    mpi.h Version:  01.05.16
  *
  *  Version History
  *  ---------------
@@ -79,6 +79,9 @@
  *  03-27-06  01.05.11  Bumped MPI_HEADER_VERSION_UNIT.
  *  10-11-06  01.05.12  Bumped MPI_HEADER_VERSION_UNIT.
  *  05-24-07  01.05.13  Bumped MPI_HEADER_VERSION_UNIT.
+ *  08-07-07  01.05.14  Bumped MPI_HEADER_VERSION_UNIT.
+ *  01-15-08  01.05.15  Bumped MPI_HEADER_VERSION_UNIT.
+ *  03-28-08  01.05.16  Bumped MPI_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
  */
 
@@ -109,7 +112,7 @@
 /* Note: The major versions of 0xe0 through 0xff are reserved */
 
 /* versioning for this MPI header set */
-#define MPI_HEADER_VERSION_UNIT             (0x10)
+#define MPI_HEADER_VERSION_UNIT             (0x13)
 #define MPI_HEADER_VERSION_DEV              (0x00)
 #define MPI_HEADER_VERSION_UNIT_MASK        (0xFF00)
 #define MPI_HEADER_VERSION_UNIT_SHIFT       (8)
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index b2db333..013c7d8 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -6,7 +6,7 @@
  *          Title:  MPI Config message, structures, and Pages
  *  Creation Date:  July 27, 2000
  *
- *    mpi_cnfg.h Version:  01.05.15
+ *    mpi_cnfg.h Version:  01.05.18
  *
  *  Version History
  *  ---------------
@@ -308,6 +308,20 @@
  *                      Expander Page 0 Flags field.
  *                      Fixed define for
  *                      MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED.
+ *  08-07-07  01.05.16  Added MPI_IOCPAGE6_CAP_FLAGS_MULTIPORT_DRIVE_SUPPORT
+ *                      define.
+ *                      Added BIOS Page 4 structure.
+ *                      Added MPI_RAID_PHYS_DISK1_PATH_MAX define for RAID
+ *                      Physcial Disk Page 1.
+ *  01-15-07  01.05.17  Added additional bit defines for ExtFlags field of
+ *                      Manufacturing Page 4.
+ *                      Added Solid State Drives Supported bit to IOC Page 6
+ *                      Capabilities Flags.
+ *                      Added new value for AccessStatus field of SAS Device
+ *                      Page 0 (_SATA_NEEDS_INITIALIZATION).
+ *  03-28-08  01.05.18  Defined new bits in Manufacturing Page 4 ExtFlags field
+ *                      to control coercion size and the mixing of SAS and SATA
+ *                      SSD drives.
  *  --------------------------------------------------------------------------
  */
 
@@ -686,6 +700,14 @@
 #define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA                 (0x01)
 
 /* defines for the ExtFlags field */
+#define MPI_MANPAGE4_EXTFLAGS_MASK_COERCION_SIZE        (0x0180)
+#define MPI_MANPAGE4_EXTFLAGS_SHIFT_COERCION_SIZE       (7)
+#define MPI_MANPAGE4_EXTFLAGS_1GB_COERCION_SIZE         (0)
+#define MPI_MANPAGE4_EXTFLAGS_128MB_COERCION_SIZE       (1)
+
+#define MPI_MANPAGE4_EXTFLAGS_NO_MIX_SSD_SAS_SATA       (0x0040)
+#define MPI_MANPAGE4_EXTFLAGS_MIX_SSD_AND_NON_SSD       (0x0020)
+#define MPI_MANPAGE4_EXTFLAGS_DUAL_PORT_SUPPORT         (0x0010)
 #define MPI_MANPAGE4_EXTFLAGS_HIDE_NON_IR_METADATA      (0x0008)
 #define MPI_MANPAGE4_EXTFLAGS_SAS_CACHE_DISABLE         (0x0004)
 #define MPI_MANPAGE4_EXTFLAGS_SATA_CACHE_DISABLE        (0x0002)
@@ -1159,6 +1181,8 @@
 
 /* IOC Page 6 Capabilities Flags */
 
+#define MPI_IOCPAGE6_CAP_FLAGS_SSD_SUPPORT              (0x00000020)
+#define MPI_IOCPAGE6_CAP_FLAGS_MULTIPORT_DRIVE_SUPPORT  (0x00000010)
 #define MPI_IOCPAGE6_CAP_FLAGS_DISABLE_SMART_POLLING    (0x00000008)
 
 #define MPI_IOCPAGE6_CAP_FLAGS_MASK_METADATA_SIZE       (0x00000006)
@@ -1428,6 +1452,15 @@
 #define MPI_BIOSPAGE2_FORM_SAS_WWN                      (0x05)
 #define MPI_BIOSPAGE2_FORM_ENCLOSURE_SLOT               (0x06)
 
+typedef struct _CONFIG_PAGE_BIOS_4
+{
+    CONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U64                     ReassignmentBaseWWID;       /* 04h */
+} CONFIG_PAGE_BIOS_4, MPI_POINTER PTR_CONFIG_PAGE_BIOS_4,
+  BIOSPage4_t, MPI_POINTER pBIOSPage4_t;
+
+#define MPI_BIOSPAGE4_PAGEVERSION                       (0x00)
+
 
 /****************************************************************************
 *   SCSI Port Config Pages
@@ -2419,6 +2452,15 @@
 #define MPI_RAID_PHYSDISK1_FLAG_BROKEN          (0x0002)
 #define MPI_RAID_PHYSDISK1_FLAG_INVALID         (0x0001)
 
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength or NumPhysDiskPaths at runtime.
+ */
+#ifndef MPI_RAID_PHYS_DISK1_PATH_MAX
+#define MPI_RAID_PHYS_DISK1_PATH_MAX    (1)
+#endif
+
 typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1
 {
     CONFIG_PAGE_HEADER              Header;             /* 00h */
@@ -2426,7 +2468,7 @@
     U8                              PhysDiskNum;        /* 05h */
     U16                             Reserved2;          /* 06h */
     U32                             Reserved1;          /* 08h */
-    RAID_PHYS_DISK1_PATH            Path[1];            /* 0Ch */
+    RAID_PHYS_DISK1_PATH            Path[MPI_RAID_PHYS_DISK1_PATH_MAX];/* 0Ch */
 } CONFIG_PAGE_RAID_PHYS_DISK_1, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_1,
   RaidPhysDiskPage1_t, MPI_POINTER pRaidPhysDiskPage1_t;
 
@@ -2844,6 +2886,7 @@
 #define MPI_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED            (0x01)
 #define MPI_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED      (0x02)
 #define MPI_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT   (0x03)
+#define MPI_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION   (0x04)
 /* specific values for SATA Init failures */
 #define MPI_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN                 (0x10)
 #define MPI_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT    (0x11)
diff --git a/drivers/message/fusion/lsi/mpi_fc.h b/drivers/message/fusion/lsi/mpi_fc.h
index 627acfb..7d663ce 100644
--- a/drivers/message/fusion/lsi/mpi_fc.h
+++ b/drivers/message/fusion/lsi/mpi_fc.h
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2004 LSI Corporation.
+ *  Copyright (c) 2000-2008 LSI Corporation.
  *
  *
  *           Name:  mpi_fc.h
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index 3f15fcf..693e4b5 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -3,28 +3,28 @@
  MPI Header File Change History
  ==============================
 
- Copyright (c) 2000-2007 LSI Corporation.
+ Copyright (c) 2000-2008 LSI Corporation.
 
  ---------------------------------------
- Header Set Release Version:    01.05.16
- Header Set Release Date:       05-24-07
+ Header Set Release Version:    01.05.19
+ Header Set Release Date:       03-28-08
  ---------------------------------------
 
  Filename               Current version     Prior version
  ----------             ---------------     -------------
- mpi.h                  01.05.13            01.05.12
- mpi_ioc.h              01.05.14            01.05.13
- mpi_cnfg.h             01.05.15            01.05.14
+ mpi.h                  01.05.16            01.05.15
+ mpi_ioc.h              01.05.16            01.05.15
+ mpi_cnfg.h             01.05.18            01.05.17
  mpi_init.h             01.05.09            01.05.09
  mpi_targ.h             01.05.06            01.05.06
  mpi_fc.h               01.05.01            01.05.01
  mpi_lan.h              01.05.01            01.05.01
- mpi_raid.h             01.05.03            01.05.03
+ mpi_raid.h             01.05.05            01.05.05
  mpi_tool.h             01.05.03            01.05.03
  mpi_inb.h              01.05.01            01.05.01
- mpi_sas.h              01.05.04            01.05.04
+ mpi_sas.h              01.05.05            01.05.05
  mpi_type.h             01.05.02            01.05.02
- mpi_history.txt        01.05.14            01.05.14
+ mpi_history.txt        01.05.19            01.05.18
 
 
  *  Date      Version   Description
@@ -96,6 +96,9 @@
  *  03-27-06  01.05.11  Bumped MPI_HEADER_VERSION_UNIT.
  *  10-11-06  01.05.12  Bumped MPI_HEADER_VERSION_UNIT.
  *  05-24-07  01.05.13  Bumped MPI_HEADER_VERSION_UNIT.
+ *  08-07-07  01.05.14  Bumped MPI_HEADER_VERSION_UNIT.
+ *  01-15-08  01.05.15  Bumped MPI_HEADER_VERSION_UNIT.
+ *  03-28-08  01.05.16  Bumped MPI_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
 
 mpi_ioc.h
@@ -127,7 +130,7 @@
  *  08-08-01  01.02.01  Original release for v1.2 work.
  *                      New format for FWVersion and ProductId in
  *                      MSG_IOC_FACTS_REPLY and MPI_FW_HEADER.
- *  08-31-01  01.02.02  Added event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
+ *  08-31-01  01.02.02  Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
  *                      related structure and defines.
  *                      Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED.
  *                      Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE.
@@ -187,7 +190,7 @@
  *  10-11-06  01.05.12  Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
  *                      Added MaxInitiators field to PortFacts reply.
  *                      Added SAS Device Status Change ReasonCode for
- *                      asynchronous notification.
+ *                      asynchronous notificaiton.
  *                      Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
  *                      data structure.
  *                      Added new ImageType values for FWDownload and FWUpload
@@ -199,6 +202,16 @@
  *                      added _MULTI_PORT_DOMAIN.
  *  05-24-07  01.05.14  Added Common Boot Block type to FWDownload Request.
  *                      Added Common Boot Block type to FWUpload Request.
+ *  08-07-07  01.05.15  Added MPI_EVENT_SAS_INIT_RC_REMOVED define.
+ *                      Added MPI_EVENT_IR2_RC_DUAL_PORT_ADDED and
+ *                      MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED for IR2 event data.
+ *                      Added SASAddress field to SAS Initiator Device Table
+ *                      Overflow event data structure.
+ *  03-28-08  01.05.16  Added two new ReasonCode values to SAS Device Status
+ *                      Change Event data to indicate completion of internally
+ *                      generated task management.
+ *                      Added MPI_EVENT_DSCVRY_ERR_DS_SATA_INIT_FAILURE define.
+ *                      Added MPI_EVENT_SAS_INIT_RC_INACCESSIBLE define.
  *  --------------------------------------------------------------------------
 
 mpi_cnfg.h
@@ -213,7 +226,7 @@
  *                      Added _RESPONSE_ID_MASK definition to SCSI_PORT_1
  *                      page and updated the page version.
  *                      Added Information field and _INFO_PARAMS_NEGOTIATED
- *                      definition to SCSI_DEVICE_0 page.
+ *                      definitionto SCSI_DEVICE_0 page.
  *  06-22-00  01.00.03  Removed batch controls from LAN_0 page and updated the
  *                      page version.
  *                      Added BucketsRemaining to LAN_1 page, redefined the
@@ -496,6 +509,20 @@
  *                      Expander Page 0 Flags field.
  *                      Fixed define for
  *                      MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED.
+ *  08-07-07  01.05.16  Added MPI_IOCPAGE6_CAP_FLAGS_MULTIPORT_DRIVE_SUPPORT
+ *                      define.
+ *                      Added BIOS Page 4 structure.
+ *                      Added MPI_RAID_PHYS_DISK1_PATH_MAX define for RAID
+ *                      Physcial Disk Page 1.
+ *  01-15-07  01.05.17  Added additional bit defines for ExtFlags field of
+ *                      Manufacturing Page 4.
+ *                      Added Solid State Drives Supported bit to IOC Page 6
+ *                      Capabilities Flags.
+ *                      Added new value for AccessStatus field of SAS Device
+ *                      Page 0 (_SATA_NEEDS_INITIALIZATION).
+ *  03-28-08  01.05.18  Defined new bits in Manufacturing Page 4 ExtFlags field
+ *                      to control coercion size and the mixing of SAS and SATA
+ *                      SSD drives.
  *  --------------------------------------------------------------------------
 
 mpi_init.h
@@ -661,6 +688,9 @@
  *                      _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE.
  *  02-28-07  01.05.03  Added new RAID Action, Device FW Update Mode, and
  *                      associated defines.
+ *  08-07-07  01.05.04  Added Disable Full Rebuild bit to the ActionDataWord
+ *                      for the RAID Action MPI_RAID_ACTION_DISABLE_VOLUME.
+ *  01-15-08  01.05.05  Added define for MPI_RAID_ACTION_SET_VOLUME_NAME.
  *  --------------------------------------------------------------------------
 
 mpi_tool.h
@@ -694,6 +724,10 @@
  *                      reply.
  *  10-11-06  01.05.04  Fixed the name of a define for Operation field of SAS IO
  *                      Unit Control request.
+ *  01-15-08  01.05.05  Added support for MPI_SAS_OP_SET_IOC_PARAMETER,
+ *                      including adding IOCParameter and IOCParameter value
+ *                      fields to SAS IO Unit Control Request.
+ *                      Added MPI_SAS_DEVICE_INFO_PRODUCT_SPECIFIC define.
  *  --------------------------------------------------------------------------
 
 mpi_type.h
@@ -709,20 +743,20 @@
 
 mpi_history.txt         Parts list history
 
-Filename    01.05.15   01.05.15
-----------  --------   --------
-mpi.h       01.05.12   01.05.13
-mpi_ioc.h   01.05.13   01.05.14
-mpi_cnfg.h  01.05.14   01.05.15
-mpi_init.h  01.05.09   01.05.09
-mpi_targ.h  01.05.06   01.05.06
-mpi_fc.h    01.05.01   01.05.01
-mpi_lan.h   01.05.01   01.05.01
-mpi_raid.h  01.05.03   01.05.03
-mpi_tool.h  01.05.03   01.05.03
-mpi_inb.h   01.05.01   01.05.01
-mpi_sas.h   01.05.04   01.05.04
-mpi_type.h  01.05.02   01.05.02
+Filename    01.05.19   01.05.18   01.05.17   01.05.16   01.05.15
+----------  --------   --------   --------   --------   --------
+mpi.h       01.05.16   01.05.15   01.05.14   01.05.13   01.05.12
+mpi_ioc.h   01.05.16   01.05.15   01.05.15   01.05.14   01.05.13
+mpi_cnfg.h  01.05.18   01.05.17   01.05.16   01.05.15   01.05.14
+mpi_init.h  01.05.09   01.05.09   01.05.09   01.05.09   01.05.09
+mpi_targ.h  01.05.06   01.05.06   01.05.06   01.05.06   01.05.06
+mpi_fc.h    01.05.01   01.05.01   01.05.01   01.05.01   01.05.01
+mpi_lan.h   01.05.01   01.05.01   01.05.01   01.05.01   01.05.01
+mpi_raid.h  01.05.05   01.05.05   01.05.04   01.05.03   01.05.03
+mpi_tool.h  01.05.03   01.05.03   01.05.03   01.05.03   01.05.03
+mpi_inb.h   01.05.01   01.05.01   01.05.01   01.05.01   01.05.01
+mpi_sas.h   01.05.05   01.05.05   01.05.04   01.05.04   01.05.04
+mpi_type.h  01.05.02   01.05.02   01.05.02   01.05.02   01.05.02
 
 Filename    01.05.14   01.05.13   01.05.12   01.05.11   01.05.10   01.05.09
 ----------  --------   --------   --------   --------   --------   --------
diff --git a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h
index a9e3693..4295d06 100644
--- a/drivers/message/fusion/lsi/mpi_init.h
+++ b/drivers/message/fusion/lsi/mpi_init.h
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2007 LSI Corporation.
+ *  Copyright (c) 2000-2008 LSI Corporation.
  *
  *
  *           Name:  mpi_init.h
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index 5cbb6bd..8faa4fa 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2007 LSI Corporation.
+ *  Copyright (c) 2000-2008 LSI Corporation.
  *
  *
  *           Name:  mpi_ioc.h
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  August 11, 2000
  *
- *    mpi_ioc.h Version:  01.05.14
+ *    mpi_ioc.h Version:  01.05.16
  *
  *  Version History
  *  ---------------
@@ -113,6 +113,16 @@
  *                      added _MULTI_PORT_DOMAIN.
  *  05-24-07  01.05.14  Added Common Boot Block type to FWDownload Request.
  *                      Added Common Boot Block type to FWUpload Request.
+ *  08-07-07  01.05.15  Added MPI_EVENT_SAS_INIT_RC_REMOVED define.
+ *                      Added MPI_EVENT_IR2_RC_DUAL_PORT_ADDED and
+ *                      MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED for IR2 event data.
+ *                      Added SASAddress field to SAS Initiator Device Table
+ *                      Overflow event data structure.
+ *  03-28-08  01.05.16  Added two new ReasonCode values to SAS Device Status
+ *                      Change Event data to indicate completion of internally
+ *                      generated task management.
+ *                      Added MPI_EVENT_DSCVRY_ERR_DS_SATA_INIT_FAILURE define.
+ *                      Added MPI_EVENT_SAS_INIT_RC_INACCESSIBLE define.
  *  --------------------------------------------------------------------------
  */
 
@@ -612,6 +622,8 @@
 #define MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL   (0x0B)
 #define MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL       (0x0C)
 #define MPI_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION        (0x0D)
+#define MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET   (0x0E)
+#define MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL  (0x0F)
 
 
 /* SCSI Event data for Queue Full event */
@@ -708,6 +720,8 @@
 #define MPI_EVENT_IR2_RC_PD_REMOVED                 (0x05)
 #define MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED       (0x06)
 #define MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR       (0x07)
+#define MPI_EVENT_IR2_RC_DUAL_PORT_ADDED            (0x08)
+#define MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED          (0x09)
 
 /* defines for logical disk states */
 #define MPI_LD_STATE_OPTIMAL                        (0x00)
@@ -867,6 +881,7 @@
 #define MPI_EVENT_DSCVRY_ERR_DS_UNSUPPORTED_DEVICE          (0x00000800)
 #define MPI_EVENT_DSCVRY_ERR_DS_MAX_SATA_TARGETS            (0x00001000)
 #define MPI_EVENT_DSCVRY_ERR_DS_MULTI_PORT_DOMAIN           (0x00002000)
+#define MPI_EVENT_DSCVRY_ERR_DS_SATA_INIT_FAILURE           (0x00004000)
 
 /* SAS SMP Error Event data */
 
@@ -902,6 +917,8 @@
 
 /* defines for the ReasonCode field of the SAS Initiator Device Status Change event */
 #define MPI_EVENT_SAS_INIT_RC_ADDED                 (0x01)
+#define MPI_EVENT_SAS_INIT_RC_REMOVED               (0x02)
+#define MPI_EVENT_SAS_INIT_RC_INACCESSIBLE          (0x03)
 
 /* SAS Initiator Device Table Overflow Event data */
 
@@ -910,6 +927,7 @@
     U8                      MaxInit;                    /* 00h */
     U8                      CurrentInit;                /* 01h */
     U16                     Reserved1;                  /* 02h */
+    U64                     SASAddress;                 /* 04h */
 } EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
   MPI_POINTER PTR_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
   MpiEventDataSasInitTableOverflow_t,
diff --git a/drivers/message/fusion/lsi/mpi_lan.h b/drivers/message/fusion/lsi/mpi_lan.h
index 03253b5..f41fcb6 100644
--- a/drivers/message/fusion/lsi/mpi_lan.h
+++ b/drivers/message/fusion/lsi/mpi_lan.h
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2004 LSI Corporation.
+ *  Copyright (c) 2000-2008 LSI Corporation.
  *
  *
  *           Name:  mpi_lan.h
diff --git a/drivers/message/fusion/lsi/mpi_log_fc.h b/drivers/message/fusion/lsi/mpi_log_fc.h
index e4dafce..face6e7 100644
--- a/drivers/message/fusion/lsi/mpi_log_fc.h
+++ b/drivers/message/fusion/lsi/mpi_log_fc.h
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2001 LSI Corporation. All rights reserved.
+ *  Copyright (c) 2000-2008 LSI Corporation. All rights reserved.
  *
  *  NAME:           fc_log.h
  *  SUMMARY:        MPI IocLogInfo definitions for the SYMFC9xx chips
diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h
index af9da03..691620d 100644
--- a/drivers/message/fusion/lsi/mpi_log_sas.h
+++ b/drivers/message/fusion/lsi/mpi_log_sas.h
@@ -1,6 +1,6 @@
 /***************************************************************************
  *                                                                         *
- *  Copyright 2003 LSI Corporation.  All rights reserved.            *
+ *  Copyright (c) 2000-2008 LSI Corporation.  All rights reserved.         *
  *                                                                         *
  * Description                                                             *
  * ------------                                                            *
@@ -73,6 +73,8 @@
 #define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO          (0x00070004)
 #define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO_REQ      (0x00070005)
 
+#define IOP_LOGINFO_CODE_LOG_TIMESTAMP_EVENT                 (0x00080000)
+
 /****************************************************************************/
 /* PL LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = PL            */
 /****************************************************************************/
@@ -92,7 +94,7 @@
 #define PL_LOGINFO_SUB_CODE_OPEN_FAIL_OPEN_TIMEOUT_EXP       (0x0000000C)
 #define PL_LOGINFO_SUB_CODE_OPEN_FAIL_UNUSED_0D              (0x0000000D)
 #define PL_LOGINFO_SUB_CODE_OPEN_FAIL_DVTBLE_ACCSS_FAIL      (0x0000000E)
-#define PL_LOGINFO_SUB CODE_OPEN_FAIL_BAD_DEST               (0x00000011)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_BAD_DEST               (0x00000011)
 #define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RATE_NOT_SUPP          (0x00000012)
 #define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PROT_NOT_SUPP          (0x00000013)
 #define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON0      (0x00000014)
@@ -159,10 +161,11 @@
 
 #define PL_LOGINFO_SUB_CODE_INVALID_SGL                      (0x00000200)
 #define PL_LOGINFO_SUB_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH    (0x00000300)
-#define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR                 (0x00000400) /* Bits 0-3 encode Transport Status Register (offset 0x08) */
-                                                                          /* Bit 0 is Status Bit 0: FrameXferErr */
-                                                                          /* Bit 1 & 2 are Status Bits 16 and 17: FrameXmitErrStatus */
-                                                                          /* Bit 3 is Status Bit 18 WriteDataLengthGTDataLengthErr */
+#define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR                 (0x00000400)
+/* Bits 0-3 encode Transport Status Register (offset 0x08) */
+/* Bit 0 is Status Bit 0: FrameXferErr */
+/* Bit 1 & 2 are Status Bits 16 and 17: FrameXmitErrStatus */
+/* Bit 3 is Status Bit 18 WriteDataLenghtGTDataLengthErr */
 
 #define PL_LOGINFO_SUB_CODE_TX_FM_CONNECTED_LOW              (0x00000500)
 #define PL_LOGINFO_SUB_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET      (0x00000600)
@@ -177,6 +180,11 @@
 #define PL_LOGINFO_SUB_CODE_DISCOVERY_REMOTE_SEP_RESET       (0x00000E01)
 #define PL_LOGINFO_SUB_CODE_SECOND_OPEN                      (0x00000F00)
 #define PL_LOGINFO_SUB_CODE_DSCVRY_SATA_INIT_TIMEOUT         (0x00001000)
+#define PL_LOGINFO_SUB_CODE_BREAK_ON_SATA_CONNECTION         (0x00002000)
+/* not currently used in mainline */
+#define PL_LOGINFO_SUB_CODE_BREAK_ON_STUCK_LINK              (0x00003000)
+#define PL_LOGINFO_SUB_CODE_BREAK_ON_STUCK_LINK_AIP          (0x00004000)
+#define PL_LOGINFO_SUB_CODE_BREAK_ON_INCOMPLETE_BREAK_RCVD   (0x00005000)
 
 #define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_FAILURE          (0x00200000) /* Can't get SMP Frame */
 #define PL_LOGINFO_CODE_ENCL_MGMT_SMP_READ_ERROR             (0x00200010) /* Error occured on SMP Read */
@@ -243,6 +251,8 @@
 #define IR_LOGINFO_VOLUME_ACTIVATE_VOLUME_FAILED               (0x00010014)
 /* Activation failed trying to import the volume */
 #define IR_LOGINFO_VOLUME_ACTIVATING_IMPORT_VOLUME_FAILED      (0x00010015)
+/* Activation failed trying to import the volume */
+#define IR_LOGINFO_VOLUME_ACTIVATING_TOO_MANY_PHYS_DISKS       (0x00010016)
 
 /* Phys Disk failed, too many phys disks */
 #define IR_LOGINFO_PHYSDISK_CREATE_TOO_MANY_DISKS              (0x00010020)
@@ -285,6 +295,21 @@
 /* Compatibility Error : IME size limited to < 2TB */
 #define IR_LOGINFO_COMPAT_ERROR_IME_VOL_NOT_CURRENTLY_SUPPORTED (0x0001003D)
 
+/* Device Firmware Update: DFU can only be started once */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_DFU_IN_PROGRESS            (0x00010050)
+/* Device Firmware Update: Volume must be Optimal/Active/non-Quiesced */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_DEVICE_IN_INVALID_STATE    (0x00010051)
+/* Device Firmware Update: DFU Timeout cannot be zero */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_INVALID_TIMEOUT            (0x00010052)
+/* Device Firmware Update: CREATE TIMER FAILED */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_NO_TIMERS                  (0x00010053)
+/* Device Firmware Update: Failed to read SAS_IO_UNIT_PG_1 */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_READING_CFG_PAGE           (0x00010054)
+/* Device Firmware Update: Invalid SAS_IO_UNIT_PG_1 value(s) */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_PORT_IO_TIMEOUTS_REQUIRED  (0x00010055)
+/* Device Firmware Update: Unable to allocate memory for page */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_ALLOC_CFG_PAGE             (0x00010056)
+
 
 /****************************************************************************/
 /* Defines for convenience                                                  */
diff --git a/drivers/message/fusion/lsi/mpi_raid.h b/drivers/message/fusion/lsi/mpi_raid.h
index 2856108..add60cc 100644
--- a/drivers/message/fusion/lsi/mpi_raid.h
+++ b/drivers/message/fusion/lsi/mpi_raid.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2001-2007 LSI Corporation.
+ *  Copyright (c) 2001-2008 LSI Corporation.
  *
  *
  *           Name:  mpi_raid.h
  *          Title:  MPI RAID message and structures
  *  Creation Date:  February 27, 2001
  *
- *    mpi_raid.h Version:  01.05.03
+ *    mpi_raid.h Version:  01.05.05
  *
  *  Version History
  *  ---------------
@@ -34,6 +34,9 @@
  *                      _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE.
  *  02-28-07  01.05.03  Added new RAID Action, Device FW Update Mode, and
  *                      associated defines.
+ *  08-07-07  01.05.04  Added Disable Full Rebuild bit to the ActionDataWord
+ *                      for the RAID Action MPI_RAID_ACTION_DISABLE_VOLUME.
+ *  01-15-08  01.05.05  Added define for MPI_RAID_ACTION_SET_VOLUME_NAME.
  *  --------------------------------------------------------------------------
  */
 
@@ -93,6 +96,7 @@
 #define MPI_RAID_ACTION_SET_RESYNC_RATE             (0x13)
 #define MPI_RAID_ACTION_SET_DATA_SCRUB_RATE         (0x14)
 #define MPI_RAID_ACTION_DEVICE_FW_UPDATE_MODE       (0x15)
+#define MPI_RAID_ACTION_SET_VOLUME_NAME             (0x16)
 
 /* ActionDataWord defines for use with MPI_RAID_ACTION_CREATE_VOLUME action */
 #define MPI_RAID_ACTION_ADATA_DO_NOT_SYNC           (0x00000001)
@@ -105,6 +109,9 @@
 #define MPI_RAID_ACTION_ADATA_KEEP_LBA0             (0x00000000)
 #define MPI_RAID_ACTION_ADATA_ZERO_LBA0             (0x00000002)
 
+/* ActionDataWord defines for use with MPI_RAID_ACTION_DISABLE_VOLUME action */
+#define MPI_RAID_ACTION_ADATA_DISABLE_FULL_REBUILD  (0x00000001)
+
 /* ActionDataWord defines for use with MPI_RAID_ACTION_ACTIVATE_VOLUME action */
 #define MPI_RAID_ACTION_ADATA_INACTIVATE_ALL        (0x00000001)
 
diff --git a/drivers/message/fusion/lsi/mpi_sas.h b/drivers/message/fusion/lsi/mpi_sas.h
index 33fca83..ab41003 100644
--- a/drivers/message/fusion/lsi/mpi_sas.h
+++ b/drivers/message/fusion/lsi/mpi_sas.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2004-2006 LSI Corporation.
+ *  Copyright (c) 2004-2008 LSI Corporation.
  *
  *
  *           Name:  mpi_sas.h
  *          Title:  MPI Serial Attached SCSI structures and definitions
  *  Creation Date:  August 19, 2004
  *
- *    mpi_sas.h Version:  01.05.04
+ *    mpi_sas.h Version:  01.05.05
  *
  *  Version History
  *  ---------------
@@ -23,6 +23,10 @@
  *                      reply.
  *  10-11-06  01.05.04  Fixed the name of a define for Operation field of SAS IO
  *                      Unit Control request.
+ *  01-15-08  01.05.05  Added support for MPI_SAS_OP_SET_IOC_PARAMETER,
+ *                      including adding IOCParameter and IOCParameter value
+ *                      fields to SAS IO Unit Control Request.
+ *                      Added MPI_SAS_DEVICE_INFO_PRODUCT_SPECIFIC define.
  *  --------------------------------------------------------------------------
  */
 
@@ -60,6 +64,8 @@
  * Values for the SAS DeviceInfo field used in SAS Device Status Change Event
  * data and SAS IO Unit Configuration pages.
  */
+#define MPI_SAS_DEVICE_INFO_PRODUCT_SPECIFIC    (0xF0000000)
+
 #define MPI_SAS_DEVICE_INFO_SEP                 (0x00004000)
 #define MPI_SAS_DEVICE_INFO_ATAPI_DEVICE        (0x00002000)
 #define MPI_SAS_DEVICE_INFO_LSI_DEVICE          (0x00001000)
@@ -216,7 +222,7 @@
     U8                      ChainOffset;        /* 02h */
     U8                      Function;           /* 03h */
     U16                     DevHandle;          /* 04h */
-    U8                      Reserved3;          /* 06h */
+    U8                      IOCParameter;       /* 06h */
     U8                      MsgFlags;           /* 07h */
     U32                     MsgContext;         /* 08h */
     U8                      TargetID;           /* 0Ch */
@@ -225,7 +231,7 @@
     U8                      PrimFlags;          /* 0Fh */
     U32                     Primitive;          /* 10h */
     U64                     SASAddress;         /* 14h */
-    U32                     Reserved4;          /* 1Ch */
+    U32                     IOCParameterValue;  /* 1Ch */
 } MSG_SAS_IOUNIT_CONTROL_REQUEST, MPI_POINTER PTR_MSG_SAS_IOUNIT_CONTROL_REQUEST,
   SasIoUnitControlRequest_t, MPI_POINTER pSasIoUnitControlRequest_t;
 
@@ -241,6 +247,8 @@
 #define MPI_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL  (0x0C)
 #define MPI_SAS_OP_TRANSMIT_REMOVE_DEVICE       (0x0D)  /* obsolete name */
 #define MPI_SAS_OP_REMOVE_DEVICE                (0x0D)
+#define MPI_SAS_OP_SET_IOC_PARAMETER            (0x0E)
+#define MPI_SAS_OP_PRODUCT_SPECIFIC_MIN         (0x80)
 
 /* values for the PrimFlags field */
 #define MPI_SAS_PRIMFLAGS_SINGLE                (0x08)
@@ -256,7 +264,7 @@
     U8                      MsgLength;          /* 02h */
     U8                      Function;           /* 03h */
     U16                     DevHandle;          /* 04h */
-    U8                      Reserved3;          /* 06h */
+    U8                      IOCParameter;       /* 06h */
     U8                      MsgFlags;           /* 07h */
     U32                     MsgContext;         /* 08h */
     U16                     Reserved4;          /* 0Ch */
diff --git a/drivers/message/fusion/lsi/mpi_targ.h b/drivers/message/fusion/lsi/mpi_targ.h
index ff8c37d..c3dea7f 100644
--- a/drivers/message/fusion/lsi/mpi_targ.h
+++ b/drivers/message/fusion/lsi/mpi_targ.h
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2004 LSI Corporation.
+ *  Copyright (c) 2000-2008 LSI Corporation.
  *
  *
  *           Name:  mpi_targ.h
diff --git a/drivers/message/fusion/lsi/mpi_tool.h b/drivers/message/fusion/lsi/mpi_tool.h
index 8834ae6..53cd715 100644
--- a/drivers/message/fusion/lsi/mpi_tool.h
+++ b/drivers/message/fusion/lsi/mpi_tool.h
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2001-2005 LSI Corporation.
+ *  Copyright (c) 2001-2008 LSI Corporation.
  *
  *
  *           Name:  mpi_tool.h
diff --git a/drivers/message/fusion/lsi/mpi_type.h b/drivers/message/fusion/lsi/mpi_type.h
index 08dad9c..888b26d 100644
--- a/drivers/message/fusion/lsi/mpi_type.h
+++ b/drivers/message/fusion/lsi/mpi_type.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2004 LSI Corporation.
+ *  Copyright (c) 2000-2008 LSI Corporation.
  *
  *
  *           Name:  mpi_type.h
  *          Title:  MPI Basic type definitions
  *  Creation Date:  June 6, 2000
  *
- *    mpi_type.h Version:  01.05.01
+ *    mpi_type.h Version:  01.05.02
  *
  *  Version History
  *  ---------------
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index c4e8b9a..96ac883 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -79,9 +79,22 @@
 /*
  *  cmd line parameters
  */
-static int mpt_msi_enable = -1;
-module_param(mpt_msi_enable, int, 0);
-MODULE_PARM_DESC(mpt_msi_enable, " MSI Support Enable (default=0)");
+
+static int mpt_msi_enable_spi;
+module_param(mpt_msi_enable_spi, int, 0);
+MODULE_PARM_DESC(mpt_msi_enable_spi, " Enable MSI Support for SPI \
+		controllers (default=0)");
+
+static int mpt_msi_enable_fc;
+module_param(mpt_msi_enable_fc, int, 0);
+MODULE_PARM_DESC(mpt_msi_enable_fc, " Enable MSI Support for FC \
+		controllers (default=0)");
+
+static int mpt_msi_enable_sas;
+module_param(mpt_msi_enable_sas, int, 1);
+MODULE_PARM_DESC(mpt_msi_enable_sas, " Enable MSI Support for SAS \
+		controllers (default=1)");
+
 
 static int mpt_channel_mapping;
 module_param(mpt_channel_mapping, int, 0);
@@ -91,7 +104,17 @@
 static int mpt_set_debug_level(const char *val, struct kernel_param *kp);
 module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
 		  &mpt_debug_level, 0600);
-MODULE_PARM_DESC(mpt_debug_level, " debug level - refer to mptdebug.h - (default=0)");
+MODULE_PARM_DESC(mpt_debug_level, " debug level - refer to mptdebug.h \
+	- (default=0)");
+
+int mpt_fwfault_debug;
+EXPORT_SYMBOL(mpt_fwfault_debug);
+module_param_call(mpt_fwfault_debug, param_set_int, param_get_int,
+	  &mpt_fwfault_debug, 0600);
+MODULE_PARM_DESC(mpt_fwfault_debug, "Enable detection of Firmware fault"
+	" and halt Firmware on fault - (default=0)");
+
+
 
 #ifdef MFCNT
 static int mfcounter = 0;
@@ -1751,16 +1774,25 @@
 		ioc->bus_type = SAS;
 	}
 
-	if (mpt_msi_enable == -1) {
-		/* Enable on SAS, disable on FC and SPI */
-		if (ioc->bus_type == SAS)
-			ioc->msi_enable = 1;
-		else
-			ioc->msi_enable = 0;
-	} else
-		/* follow flag: 0 - disable; 1 - enable */
-		ioc->msi_enable = mpt_msi_enable;
 
+	switch (ioc->bus_type) {
+
+	case SAS:
+		ioc->msi_enable = mpt_msi_enable_sas;
+		break;
+
+	case SPI:
+		ioc->msi_enable = mpt_msi_enable_spi;
+		break;
+
+	case FC:
+		ioc->msi_enable = mpt_msi_enable_fc;
+		break;
+
+	default:
+		ioc->msi_enable = 0;
+		break;
+	}
 	if (ioc->errata_flag_1064)
 		pci_disable_io_access(pdev);
 
@@ -6313,6 +6345,33 @@
 	*size = y;
 }
 
+
+/**
+ *	mpt_halt_firmware - Halts the firmware if it is operational and panic
+ *	the kernel
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *
+ **/
+void
+mpt_halt_firmware(MPT_ADAPTER *ioc)
+{
+	u32	 ioc_raw_state;
+
+	ioc_raw_state = mpt_GetIocState(ioc, 0);
+
+	if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
+		printk(MYIOC_s_ERR_FMT "IOC is in FAULT state (%04xh)!!!\n",
+			ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
+		panic("%s: IOC Fault (%04xh)!!!\n", ioc->name,
+			ioc_raw_state & MPI_DOORBELL_DATA_MASK);
+	} else {
+		CHIPREG_WRITE32(&ioc->chip->Doorbell, 0xC0FFEE00);
+		panic("%s: Firmware is halted due to command timeout\n",
+			ioc->name);
+	}
+}
+EXPORT_SYMBOL(mpt_halt_firmware);
+
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*
  *	Reset Handling
@@ -6345,6 +6404,8 @@
 	printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name);
 	printk("MF count 0x%x !\n", ioc->mfcnt);
 #endif
+	if (mpt_fwfault_debug)
+		mpt_halt_firmware(ioc);
 
 	/* Reset the adapter. Prevent more than 1 call to
 	 * mpt_do_ioc_recovery at any instant in time.
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index dff048c..b3e981d 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -922,11 +922,14 @@
 extern int	 mpt_findImVolumes(MPT_ADAPTER *ioc);
 extern int	 mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
 extern int	 mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
+extern void     mpt_halt_firmware(MPT_ADAPTER *ioc);
+
 
 /*
  *  Public data decl's...
  */
 extern struct list_head	  ioc_list;
+extern int mpt_fwfault_debug;
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 #endif		/* } __KERNEL__ */
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index b89f476..c638171 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -308,10 +308,11 @@
 {
 	int rc = 1;
 
-	dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
-				ioctl->ioc->name, ioctl->ioc->id));
 	if (ioctl == NULL)
 		return;
+	dctlprintk(ioctl->ioc,
+		   printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
+		   ioctl->ioc->name, ioctl->ioc->id));
 
 	ioctl->wait_done = 0;
 	if (ioctl->reset & MPTCTL_RESET_OK)
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index a13f6ee..c2804f2 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -106,7 +106,6 @@
 
 	u32 total_posted;
 	u32 total_received;
-	struct net_device_stats stats;	/* Per device statistics */
 
 	struct delayed_work post_buckets_task;
 	struct net_device *dev;
@@ -548,15 +547,6 @@
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-static struct net_device_stats *
-mpt_lan_get_stats(struct net_device *dev)
-{
-	struct mpt_lan_priv *priv = netdev_priv(dev);
-
-	return (struct net_device_stats *) &priv->stats;
-}
-
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static int
 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
 {
@@ -594,8 +584,8 @@
 	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
 	sent = priv->SendCtl[ctx].skb;
 
-	priv->stats.tx_packets++;
-	priv->stats.tx_bytes += sent->len;
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += sent->len;
 
 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
 			IOC_AND_NETDEV_NAMES_s_s(dev),
@@ -636,7 +626,7 @@
 
 	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
 	case MPI_IOCSTATUS_SUCCESS:
-		priv->stats.tx_packets += count;
+		dev->stats.tx_packets += count;
 		break;
 
 	case MPI_IOCSTATUS_LAN_CANCELED:
@@ -644,13 +634,13 @@
 		break;
 
 	case MPI_IOCSTATUS_INVALID_SGL:
-		priv->stats.tx_errors += count;
+		dev->stats.tx_errors += count;
 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
 				IOC_AND_NETDEV_NAMES_s_s(dev));
 		goto out;
 
 	default:
-		priv->stats.tx_errors += count;
+		dev->stats.tx_errors += count;
 		break;
 	}
 
@@ -661,7 +651,7 @@
 		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
 
 		sent = priv->SendCtl[ctx].skb;
-		priv->stats.tx_bytes += sent->len;
+		dev->stats.tx_bytes += sent->len;
 
 		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
 				IOC_AND_NETDEV_NAMES_s_s(dev),
@@ -842,8 +832,8 @@
 		 "delivered to upper level.\n",
 			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
 
-	priv->stats.rx_bytes += skb->len;
-	priv->stats.rx_packets++;
+	dev->stats.rx_bytes += skb->len;
+	dev->stats.rx_packets++;
 
 	skb->dev = dev;
 	netif_rx(skb);
@@ -1308,6 +1298,14 @@
 						  post_buckets_task.work));
 }
 
+static const struct net_device_ops mpt_netdev_ops = {
+	.ndo_open       = mpt_lan_open,
+	.ndo_stop       = mpt_lan_close,
+	.ndo_start_xmit = mpt_lan_sdu_send,
+	.ndo_change_mtu = mpt_lan_change_mtu,
+	.ndo_tx_timeout = mpt_lan_tx_timeout,
+};
+
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static struct net_device *
 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
@@ -1372,15 +1370,7 @@
 	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
 			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
 
-	dev->open = mpt_lan_open;
-	dev->stop = mpt_lan_close;
-	dev->get_stats = mpt_lan_get_stats;
-	dev->set_multicast_list = NULL;
-	dev->change_mtu = mpt_lan_change_mtu;
-	dev->hard_start_xmit = mpt_lan_sdu_send;
-
-/* Not in 2.3.42. Need 2.3.45+ */
-	dev->tx_timeout = mpt_lan_tx_timeout;
+	dev->netdev_ops = &mpt_netdev_ops;
 	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
 
 	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index ee09041..e62c6bc 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1846,6 +1846,9 @@
 	if (hd->timeouts < -1)
 		hd->timeouts++;
 
+	if (mpt_fwfault_debug)
+		mpt_halt_firmware(ioc);
+
 	/* Most important!  Set TaskMsgContext to SCpnt's MsgContext!
 	 * (the IO to be ABORT'd)
 	 *
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 1bcdbbb..3d45817 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -390,7 +390,7 @@
  *	@i2o_dev: the I2O device which was added
  *
  *	If a I2O device is added we catch the notification, because I2O classes
- *	other then SCSI peripheral will not be received through
+ *	other than SCSI peripheral will not be received through
  *	i2o_scsi_probe().
  */
 static void i2o_scsi_notify_device_add(struct i2o_device *i2o_dev)
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 416f9e7..06a2b0f 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -217,6 +217,29 @@
 	  I2C as the control interface.  Additional options must be
 	  selected to enable support for the functionality of the chip.
 
+config MFD_PCF50633
+	tristate "Support for NXP PCF50633"
+	depends on I2C
+	help
+	  Say yes here if you have NXP PCF50633 chip on your board.
+	  This core driver provides register access and IRQ handling
+	  facilities, and registers devices for the various functions
+	  so that function-specific drivers can bind to them.
+
+config PCF50633_ADC
+	tristate "Support for NXP PCF50633 ADC"
+	depends on MFD_PCF50633
+	help
+	 Say yes here if you want to include support for ADC in the
+	 NXP PCF50633 chip.
+
+config PCF50633_GPIO
+	tristate "Support for NXP PCF50633 GPIO"
+	depends on MFD_PCF50633
+	help
+	 Say yes here if you want to include support GPIO for pins on
+	 the PCF50633 chip.
+
 endmenu
 
 menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 0c9418b..3afb519 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -37,3 +37,7 @@
 obj-$(CONFIG_UCB1400_CORE)	+= ucb1400_core.o
 
 obj-$(CONFIG_PMIC_DA903X)	+= da903x.o
+
+obj-$(CONFIG_MFD_PCF50633)	+= pcf50633-core.o
+obj-$(CONFIG_PCF50633_ADC)	+= pcf50633-adc.o
+obj-$(CONFIG_PCF50633_GPIO)	+= pcf50633-gpio.o
\ No newline at end of file
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 4214b3f..7ac12cb 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -107,6 +107,9 @@
 	MSP_GPIO(0, SWITCH1), MSP_GPIO(1, SWITCH1),
 	MSP_GPIO(2, SWITCH1), MSP_GPIO(3, SWITCH1),
 	MSP_GPIO(4, SWITCH1),
+	/* switches on MMC/SD sockets */
+	MSP_GPIO(1, SDMMC), MSP_GPIO(2, SDMMC),	/* mmc0 WP, nCD */
+	MSP_GPIO(3, SDMMC), MSP_GPIO(4, SDMMC),	/* mmc1 WP, nCD */
 };
 
 #define MSP_GPIO_REG(offset)	(msp_gpios[(offset)] >> 3)
@@ -304,6 +307,13 @@
 		gpio_export(gpio, false);
 	}
 
+	/* MMC/SD inputs -- right after the last config input */
+	if (client->dev.platform_data) {
+		void (*mmcsd_setup)(unsigned) = client->dev.platform_data;
+
+		mmcsd_setup(dm355evm_msp_gpio.base + 8 + 5);
+	}
+
 	/* RTC is a 32 bit counter, no alarm */
 	if (msp_has_rtc()) {
 		child = add_child(client, "rtc-dm355evm",
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
new file mode 100644
index 0000000..c2d05be
--- /dev/null
+++ b/drivers/mfd/pcf50633-adc.c
@@ -0,0 +1,277 @@
+/* NXP PCF50633 ADC Driver
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * Author: Balaji Rao <balajirrao@openmoko.org>
+ * All rights reserved.
+ *
+ * Broken down from monstrous PCF50633 driver mainly by
+ * Harald Welte, Andy Green and Werner Almesberger
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  NOTE: This driver does not yet support subtractive ADC mode, which means
+ *  you can do only one measurement per read request.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/mfd/pcf50633/adc.h>
+
+struct pcf50633_adc_request {
+	int mux;
+	int avg;
+	int result;
+	void (*callback)(struct pcf50633 *, void *, int);
+	void *callback_param;
+
+	/* Used in case of sync requests */
+	struct completion completion;
+
+};
+
+#define PCF50633_MAX_ADC_FIFO_DEPTH 8
+
+struct pcf50633_adc {
+	struct pcf50633 *pcf;
+
+	/* Private stuff */
+	struct pcf50633_adc_request *queue[PCF50633_MAX_ADC_FIFO_DEPTH];
+	int queue_head;
+	int queue_tail;
+	struct mutex queue_mutex;
+};
+
+static inline struct pcf50633_adc *__to_adc(struct pcf50633 *pcf)
+{
+	return platform_get_drvdata(pcf->adc_pdev);
+}
+
+static void adc_setup(struct pcf50633 *pcf, int channel, int avg)
+{
+	channel &= PCF50633_ADCC1_ADCMUX_MASK;
+
+	/* kill ratiometric, but enable ACCSW biasing */
+	pcf50633_reg_write(pcf, PCF50633_REG_ADCC2, 0x00);
+	pcf50633_reg_write(pcf, PCF50633_REG_ADCC3, 0x01);
+
+	/* start ADC conversion on selected channel */
+	pcf50633_reg_write(pcf, PCF50633_REG_ADCC1, channel | avg |
+		    PCF50633_ADCC1_ADCSTART | PCF50633_ADCC1_RES_10BIT);
+}
+
+static void trigger_next_adc_job_if_any(struct pcf50633 *pcf)
+{
+	struct pcf50633_adc *adc = __to_adc(pcf);
+	int head;
+
+	mutex_lock(&adc->queue_mutex);
+
+	head = adc->queue_head;
+
+	if (!adc->queue[head]) {
+		mutex_unlock(&adc->queue_mutex);
+		return;
+	}
+	mutex_unlock(&adc->queue_mutex);
+
+	adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg);
+}
+
+static int
+adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req)
+{
+	struct pcf50633_adc *adc = __to_adc(pcf);
+	int head, tail;
+
+	mutex_lock(&adc->queue_mutex);
+
+	head = adc->queue_head;
+	tail = adc->queue_tail;
+
+	if (adc->queue[tail]) {
+		mutex_unlock(&adc->queue_mutex);
+		return -EBUSY;
+	}
+
+	adc->queue[tail] = req;
+	adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1);
+
+	mutex_unlock(&adc->queue_mutex);
+
+	trigger_next_adc_job_if_any(pcf);
+
+	return 0;
+}
+
+static void
+pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result)
+{
+	struct pcf50633_adc_request *req = param;
+
+	req->result = result;
+	complete(&req->completion);
+}
+
+int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg)
+{
+	struct pcf50633_adc_request *req;
+
+	/* req is freed when the result is ready, in interrupt handler */
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	req->mux = mux;
+	req->avg = avg;
+	req->callback =  pcf50633_adc_sync_read_callback;
+	req->callback_param = req;
+
+	init_completion(&req->completion);
+	adc_enqueue_request(pcf, req);
+	wait_for_completion(&req->completion);
+
+	return req->result;
+}
+EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read);
+
+int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
+			     void (*callback)(struct pcf50633 *, void *, int),
+			     void *callback_param)
+{
+	struct pcf50633_adc_request *req;
+
+	/* req is freed when the result is ready, in interrupt handler */
+	req = kmalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	req->mux = mux;
+	req->avg = avg;
+	req->callback = callback;
+	req->callback_param = callback_param;
+
+	adc_enqueue_request(pcf, req);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pcf50633_adc_async_read);
+
+static int adc_result(struct pcf50633 *pcf)
+{
+	u8 adcs1, adcs3;
+	u16 result;
+
+	adcs1 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS1);
+	adcs3 = pcf50633_reg_read(pcf, PCF50633_REG_ADCS3);
+	result = (adcs1 << 2) | (adcs3 & PCF50633_ADCS3_ADCDAT1L_MASK);
+
+	dev_dbg(pcf->dev, "adc result = %d\n", result);
+
+	return result;
+}
+
+static void pcf50633_adc_irq(int irq, void *data)
+{
+	struct pcf50633_adc *adc = data;
+	struct pcf50633 *pcf = adc->pcf;
+	struct pcf50633_adc_request *req;
+	int head;
+
+	mutex_lock(&adc->queue_mutex);
+	head = adc->queue_head;
+
+	req = adc->queue[head];
+	if (WARN_ON(!req)) {
+		dev_err(pcf->dev, "pcf50633-adc irq: ADC queue empty!\n");
+		mutex_unlock(&adc->queue_mutex);
+		return;
+	}
+	adc->queue[head] = NULL;
+	adc->queue_head = (head + 1) &
+				      (PCF50633_MAX_ADC_FIFO_DEPTH - 1);
+
+	mutex_unlock(&adc->queue_mutex);
+
+	req->callback(pcf, req->callback_param, adc_result(pcf));
+	kfree(req);
+
+	trigger_next_adc_job_if_any(pcf);
+}
+
+static int __devinit pcf50633_adc_probe(struct platform_device *pdev)
+{
+	struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data;
+	struct pcf50633_adc *adc;
+
+	adc = kzalloc(sizeof(*adc), GFP_KERNEL);
+	if (!adc)
+		return -ENOMEM;
+
+	adc->pcf = pdata->pcf;
+	platform_set_drvdata(pdev, adc);
+
+	pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ADCRDY,
+					pcf50633_adc_irq, adc);
+
+	mutex_init(&adc->queue_mutex);
+
+	return 0;
+}
+
+static int __devexit pcf50633_adc_remove(struct platform_device *pdev)
+{
+	struct pcf50633_adc *adc = platform_get_drvdata(pdev);
+	int i, head;
+
+	pcf50633_free_irq(adc->pcf, PCF50633_IRQ_ADCRDY);
+
+	mutex_lock(&adc->queue_mutex);
+	head = adc->queue_head;
+
+	if (WARN_ON(adc->queue[head]))
+		dev_err(adc->pcf->dev,
+			"adc driver removed with request pending\n");
+
+	for (i = 0; i < PCF50633_MAX_ADC_FIFO_DEPTH; i++)
+		kfree(adc->queue[i]);
+
+	mutex_unlock(&adc->queue_mutex);
+	kfree(adc);
+
+	return 0;
+}
+
+static struct platform_driver pcf50633_adc_driver = {
+	.driver = {
+		.name = "pcf50633-adc",
+	},
+	.probe = pcf50633_adc_probe,
+	.remove = __devexit_p(pcf50633_adc_remove),
+};
+
+static int __init pcf50633_adc_init(void)
+{
+	return platform_driver_register(&pcf50633_adc_driver);
+}
+module_init(pcf50633_adc_init);
+
+static void __exit pcf50633_adc_exit(void)
+{
+	platform_driver_unregister(&pcf50633_adc_driver);
+}
+module_exit(pcf50633_adc_exit);
+
+MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
+MODULE_DESCRIPTION("PCF50633 adc driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pcf50633-adc");
+
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
new file mode 100644
index 0000000..24508e2
--- /dev/null
+++ b/drivers/mfd/pcf50633-core.c
@@ -0,0 +1,710 @@
+/* NXP PCF50633 Power Management Unit (PMU) driver
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * Author: Harald Welte <laforge@openmoko.org>
+ * 	   Balaji Rao <balajirrao@openmoko.org>
+ * All rights reserved.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+
+#include <linux/mfd/pcf50633/core.h>
+
+/* Two MBCS registers used during cold start */
+#define PCF50633_REG_MBCS1		0x4b
+#define PCF50633_REG_MBCS2		0x4c
+#define PCF50633_MBCS1_USBPRES 		0x01
+#define PCF50633_MBCS1_ADAPTPRES	0x01
+
+static int __pcf50633_read(struct pcf50633 *pcf, u8 reg, int num, u8 *data)
+{
+	int ret;
+
+	ret = i2c_smbus_read_i2c_block_data(pcf->i2c_client, reg,
+				num, data);
+	if (ret < 0)
+		dev_err(pcf->dev, "Error reading %d regs at %d\n", num, reg);
+
+	return ret;
+}
+
+static int __pcf50633_write(struct pcf50633 *pcf, u8 reg, int num, u8 *data)
+{
+	int ret;
+
+	ret = i2c_smbus_write_i2c_block_data(pcf->i2c_client, reg,
+				num, data);
+	if (ret < 0)
+		dev_err(pcf->dev, "Error writing %d regs at %d\n", num, reg);
+
+	return ret;
+
+}
+
+/* Read a block of upto 32 regs  */
+int pcf50633_read_block(struct pcf50633 *pcf, u8 reg,
+					int nr_regs, u8 *data)
+{
+	int ret;
+
+	mutex_lock(&pcf->lock);
+	ret = __pcf50633_read(pcf, reg, nr_regs, data);
+	mutex_unlock(&pcf->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pcf50633_read_block);
+
+/* Write a block of upto 32 regs  */
+int pcf50633_write_block(struct pcf50633 *pcf , u8 reg,
+					int nr_regs, u8 *data)
+{
+	int ret;
+
+	mutex_lock(&pcf->lock);
+	ret = __pcf50633_write(pcf, reg, nr_regs, data);
+	mutex_unlock(&pcf->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pcf50633_write_block);
+
+u8 pcf50633_reg_read(struct pcf50633 *pcf, u8 reg)
+{
+	u8 val;
+
+	mutex_lock(&pcf->lock);
+	__pcf50633_read(pcf, reg, 1, &val);
+	mutex_unlock(&pcf->lock);
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(pcf50633_reg_read);
+
+int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val)
+{
+	int ret;
+
+	mutex_lock(&pcf->lock);
+	ret = __pcf50633_write(pcf, reg, 1, &val);
+	mutex_unlock(&pcf->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pcf50633_reg_write);
+
+int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val)
+{
+	int ret;
+	u8 tmp;
+
+	val &= mask;
+
+	mutex_lock(&pcf->lock);
+	ret = __pcf50633_read(pcf, reg, 1, &tmp);
+	if (ret < 0)
+		goto out;
+
+	tmp &= ~mask;
+	tmp |= val;
+	ret = __pcf50633_write(pcf, reg, 1, &tmp);
+
+out:
+	mutex_unlock(&pcf->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pcf50633_reg_set_bit_mask);
+
+int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 val)
+{
+	int ret;
+	u8 tmp;
+
+	mutex_lock(&pcf->lock);
+	ret = __pcf50633_read(pcf, reg, 1, &tmp);
+	if (ret < 0)
+		goto out;
+
+	tmp &= ~val;
+	ret = __pcf50633_write(pcf, reg, 1, &tmp);
+
+out:
+	mutex_unlock(&pcf->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pcf50633_reg_clear_bits);
+
+/* sysfs attributes */
+static ssize_t show_dump_regs(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct pcf50633 *pcf = dev_get_drvdata(dev);
+	u8 dump[16];
+	int n, n1, idx = 0;
+	char *buf1 = buf;
+	static u8 address_no_read[] = { /* must be ascending */
+		PCF50633_REG_INT1,
+		PCF50633_REG_INT2,
+		PCF50633_REG_INT3,
+		PCF50633_REG_INT4,
+		PCF50633_REG_INT5,
+		0 /* terminator */
+	};
+
+	for (n = 0; n < 256; n += sizeof(dump)) {
+		for (n1 = 0; n1 < sizeof(dump); n1++)
+			if (n == address_no_read[idx]) {
+				idx++;
+				dump[n1] = 0x00;
+			} else
+				dump[n1] = pcf50633_reg_read(pcf, n + n1);
+
+		hex_dump_to_buffer(dump, sizeof(dump), 16, 1, buf1, 128, 0);
+		buf1 += strlen(buf1);
+		*buf1++ = '\n';
+		*buf1 = '\0';
+	}
+
+	return buf1 - buf;
+}
+static DEVICE_ATTR(dump_regs, 0400, show_dump_regs, NULL);
+
+static ssize_t show_resume_reason(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct pcf50633 *pcf = dev_get_drvdata(dev);
+	int n;
+
+	n = sprintf(buf, "%02x%02x%02x%02x%02x\n",
+				pcf->resume_reason[0],
+				pcf->resume_reason[1],
+				pcf->resume_reason[2],
+				pcf->resume_reason[3],
+				pcf->resume_reason[4]);
+
+	return n;
+}
+static DEVICE_ATTR(resume_reason, 0400, show_resume_reason, NULL);
+
+static struct attribute *pcf_sysfs_entries[] = {
+	&dev_attr_dump_regs.attr,
+	&dev_attr_resume_reason.attr,
+	NULL,
+};
+
+static struct attribute_group pcf_attr_group = {
+	.name	= NULL,			/* put in device directory */
+	.attrs	= pcf_sysfs_entries,
+};
+
+int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
+			void (*handler) (int, void *), void *data)
+{
+	if (irq < 0 || irq > PCF50633_NUM_IRQ || !handler)
+		return -EINVAL;
+
+	if (WARN_ON(pcf->irq_handler[irq].handler))
+		return -EBUSY;
+
+	mutex_lock(&pcf->lock);
+	pcf->irq_handler[irq].handler = handler;
+	pcf->irq_handler[irq].data = data;
+	mutex_unlock(&pcf->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pcf50633_register_irq);
+
+int pcf50633_free_irq(struct pcf50633 *pcf, int irq)
+{
+	if (irq < 0 || irq > PCF50633_NUM_IRQ)
+		return -EINVAL;
+
+	mutex_lock(&pcf->lock);
+	pcf->irq_handler[irq].handler = NULL;
+	mutex_unlock(&pcf->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pcf50633_free_irq);
+
+static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask)
+{
+	u8 reg, bits, tmp;
+	int ret = 0, idx;
+
+	idx = irq >> 3;
+	reg =  PCF50633_REG_INT1M + idx;
+	bits = 1 << (irq & 0x07);
+
+	mutex_lock(&pcf->lock);
+
+	if (mask) {
+		ret = __pcf50633_read(pcf, reg, 1, &tmp);
+		if (ret < 0)
+			goto out;
+
+		tmp |= bits;
+
+		ret = __pcf50633_write(pcf, reg, 1, &tmp);
+		if (ret < 0)
+			goto out;
+
+		pcf->mask_regs[idx] &= ~bits;
+		pcf->mask_regs[idx] |= bits;
+	} else {
+		ret = __pcf50633_read(pcf, reg, 1, &tmp);
+		if (ret < 0)
+			goto out;
+
+		tmp &= ~bits;
+
+		ret = __pcf50633_write(pcf, reg, 1, &tmp);
+		if (ret < 0)
+			goto out;
+
+		pcf->mask_regs[idx] &= ~bits;
+	}
+out:
+	mutex_unlock(&pcf->lock);
+
+	return ret;
+}
+
+int pcf50633_irq_mask(struct pcf50633 *pcf, int irq)
+{
+	dev_info(pcf->dev, "Masking IRQ %d\n", irq);
+
+	return __pcf50633_irq_mask_set(pcf, irq, 1);
+}
+EXPORT_SYMBOL_GPL(pcf50633_irq_mask);
+
+int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq)
+{
+	dev_info(pcf->dev, "Unmasking IRQ %d\n", irq);
+
+	return __pcf50633_irq_mask_set(pcf, irq, 0);
+}
+EXPORT_SYMBOL_GPL(pcf50633_irq_unmask);
+
+int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq)
+{
+	u8 reg, bits;
+
+	reg =  irq >> 3;
+	bits = 1 << (irq & 0x07);
+
+	return pcf->mask_regs[reg] & bits;
+}
+EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get);
+
+static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq)
+{
+	if (pcf->irq_handler[irq].handler)
+		pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data);
+}
+
+/* Maximum amount of time ONKEY is held before emergency action is taken */
+#define PCF50633_ONKEY1S_TIMEOUT 8
+
+static void pcf50633_irq_worker(struct work_struct *work)
+{
+	struct pcf50633 *pcf;
+	int ret, i, j;
+	u8 pcf_int[5], chgstat;
+
+	pcf = container_of(work, struct pcf50633, irq_work);
+
+	/* Read the 5 INT regs in one transaction */
+	ret = pcf50633_read_block(pcf, PCF50633_REG_INT1,
+						ARRAY_SIZE(pcf_int), pcf_int);
+	if (ret != ARRAY_SIZE(pcf_int)) {
+		dev_err(pcf->dev, "Error reading INT registers\n");
+
+		/*
+		 * If this doesn't ACK the interrupt to the chip, we'll be
+		 * called once again as we're level triggered.
+		 */
+		goto out;
+	}
+
+	/* We immediately read the usb and adapter status. We thus make sure
+	 * only of USBINS/USBREM IRQ handlers are called */
+	if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) {
+		chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
+		if (chgstat & (0x3 << 4))
+			pcf_int[0] &= ~(1 << PCF50633_INT1_USBREM);
+		else
+			pcf_int[0] &= ~(1 << PCF50633_INT1_USBINS);
+	}
+
+	/* Make sure only one of ADPINS or ADPREM is set */
+	if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) {
+		chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
+		if (chgstat & (0x3 << 4))
+			pcf_int[0] &= ~(1 << PCF50633_INT1_ADPREM);
+		else
+			pcf_int[0] &= ~(1 << PCF50633_INT1_ADPINS);
+	}
+
+	dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x "
+			"INT4=0x%02x INT5=0x%02x\n", pcf_int[0],
+			pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]);
+
+	/* Some revisions of the chip don't have a 8s standby mode on
+	 * ONKEY1S press. We try to manually do it in such cases. */
+	if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) {
+		dev_info(pcf->dev, "ONKEY1S held for %d secs\n",
+							pcf->onkey1s_held);
+		if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT)
+			if (pcf->pdata->force_shutdown)
+				pcf->pdata->force_shutdown(pcf);
+	}
+
+	if (pcf_int[2] & PCF50633_INT3_ONKEY1S) {
+		dev_info(pcf->dev, "ONKEY1S held\n");
+		pcf->onkey1s_held = 1 ;
+
+		/* Unmask IRQ_SECOND */
+		pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M,
+						PCF50633_INT1_SECOND);
+
+		/* Unmask IRQ_ONKEYR */
+		pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M,
+						PCF50633_INT2_ONKEYR);
+	}
+
+	if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) {
+		pcf->onkey1s_held = 0;
+
+		/* Mask SECOND and ONKEYR interrupts */
+		if (pcf->mask_regs[0] & PCF50633_INT1_SECOND)
+			pcf50633_reg_set_bit_mask(pcf,
+					PCF50633_REG_INT1M,
+					PCF50633_INT1_SECOND,
+					PCF50633_INT1_SECOND);
+
+		if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR)
+			pcf50633_reg_set_bit_mask(pcf,
+					PCF50633_REG_INT2M,
+					PCF50633_INT2_ONKEYR,
+					PCF50633_INT2_ONKEYR);
+	}
+
+	/* Have we just resumed ? */
+	if (pcf->is_suspended) {
+		pcf->is_suspended = 0;
+
+		/* Set the resume reason filtering out non resumers */
+		for (i = 0; i < ARRAY_SIZE(pcf_int); i++)
+			pcf->resume_reason[i] = pcf_int[i] &
+						pcf->pdata->resumers[i];
+
+		/* Make sure we don't pass on any ONKEY events to
+		 * userspace now */
+		pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(pcf_int); i++) {
+		/* Unset masked interrupts */
+		pcf_int[i] &= ~pcf->mask_regs[i];
+
+		for (j = 0; j < 8 ; j++)
+			if (pcf_int[i] & (1 << j))
+				pcf50633_irq_call_handler(pcf, (i * 8) + j);
+	}
+
+out:
+	put_device(pcf->dev);
+	enable_irq(pcf->irq);
+}
+
+static irqreturn_t pcf50633_irq(int irq, void *data)
+{
+	struct pcf50633 *pcf = data;
+
+	dev_dbg(pcf->dev, "pcf50633_irq\n");
+
+	get_device(pcf->dev);
+	disable_irq(pcf->irq);
+	schedule_work(&pcf->irq_work);
+
+	return IRQ_HANDLED;
+}
+
+static void
+pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
+						struct platform_device **pdev)
+{
+	struct pcf50633_subdev_pdata *subdev_pdata;
+	int ret;
+
+	*pdev = platform_device_alloc(name, -1);
+	if (!*pdev) {
+		dev_err(pcf->dev, "Falied to allocate %s\n", name);
+		return;
+	}
+
+	subdev_pdata = kmalloc(sizeof(*subdev_pdata), GFP_KERNEL);
+	if (!subdev_pdata) {
+		dev_err(pcf->dev, "Error allocating subdev pdata\n");
+		platform_device_put(*pdev);
+	}
+
+	subdev_pdata->pcf = pcf;
+	platform_device_add_data(*pdev, subdev_pdata, sizeof(*subdev_pdata));
+
+	(*pdev)->dev.parent = pcf->dev;
+
+	ret = platform_device_add(*pdev);
+	if (ret) {
+		dev_err(pcf->dev, "Failed to register %s: %d\n", name, ret);
+		platform_device_put(*pdev);
+		*pdev = NULL;
+	}
+}
+
+#ifdef CONFIG_PM
+static int pcf50633_suspend(struct device *dev, pm_message_t state)
+{
+	struct pcf50633 *pcf;
+	int ret = 0, i;
+	u8 res[5];
+
+	pcf = dev_get_drvdata(dev);
+
+	/* Make sure our interrupt handlers are not called
+	 * henceforth */
+	disable_irq(pcf->irq);
+
+	/* Make sure that any running IRQ worker has quit */
+	cancel_work_sync(&pcf->irq_work);
+
+	/* Save the masks */
+	ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M,
+				ARRAY_SIZE(pcf->suspend_irq_masks),
+					pcf->suspend_irq_masks);
+	if (ret < 0) {
+		dev_err(pcf->dev, "error saving irq masks\n");
+		goto out;
+	}
+
+	/* Write wakeup irq masks */
+	for (i = 0; i < ARRAY_SIZE(res); i++)
+		res[i] = ~pcf->pdata->resumers[i];
+
+	ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
+					ARRAY_SIZE(res), &res[0]);
+	if (ret < 0) {
+		dev_err(pcf->dev, "error writing wakeup irq masks\n");
+		goto out;
+	}
+
+	pcf->is_suspended = 1;
+
+out:
+	return ret;
+}
+
+static int pcf50633_resume(struct device *dev)
+{
+	struct pcf50633 *pcf;
+	int ret;
+
+	pcf = dev_get_drvdata(dev);
+
+	/* Write the saved mask registers */
+	ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
+				ARRAY_SIZE(pcf->suspend_irq_masks),
+					pcf->suspend_irq_masks);
+	if (ret < 0)
+		dev_err(pcf->dev, "Error restoring saved suspend masks\n");
+
+	/* Restore regulators' state */
+
+
+	get_device(pcf->dev);
+
+	/*
+	 * Clear any pending interrupts and set resume reason if any.
+	 * This will leave with enable_irq()
+	 */
+	pcf50633_irq_worker(&pcf->irq_work);
+
+	return 0;
+}
+#else
+#define pcf50633_suspend NULL
+#define pcf50633_resume NULL
+#endif
+
+static int __devinit pcf50633_probe(struct i2c_client *client,
+				const struct i2c_device_id *ids)
+{
+	struct pcf50633 *pcf;
+	struct pcf50633_platform_data *pdata = client->dev.platform_data;
+	int i, ret = 0;
+	int version, variant;
+
+	pcf = kzalloc(sizeof(*pcf), GFP_KERNEL);
+	if (!pcf)
+		return -ENOMEM;
+
+	pcf->pdata = pdata;
+
+	mutex_init(&pcf->lock);
+
+	i2c_set_clientdata(client, pcf);
+	pcf->dev = &client->dev;
+	pcf->i2c_client = client;
+	pcf->irq = client->irq;
+
+	INIT_WORK(&pcf->irq_work, pcf50633_irq_worker);
+
+	version = pcf50633_reg_read(pcf, 0);
+	variant = pcf50633_reg_read(pcf, 1);
+	if (version < 0 || variant < 0) {
+		dev_err(pcf->dev, "Unable to probe pcf50633\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	dev_info(pcf->dev, "Probed device version %d variant %d\n",
+							version, variant);
+
+	/* Enable all interrupts except RTC SECOND */
+	pcf->mask_regs[0] = 0x80;
+	pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]);
+	pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00);
+	pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00);
+	pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00);
+	pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00);
+
+	/* Create sub devices */
+	pcf50633_client_dev_register(pcf, "pcf50633-input",
+						&pcf->input_pdev);
+	pcf50633_client_dev_register(pcf, "pcf50633-rtc",
+						&pcf->rtc_pdev);
+	pcf50633_client_dev_register(pcf, "pcf50633-mbc",
+						&pcf->mbc_pdev);
+	pcf50633_client_dev_register(pcf, "pcf50633-adc",
+						&pcf->adc_pdev);
+
+	for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
+		struct platform_device *pdev;
+
+		pdev = platform_device_alloc("pcf50633-regltr", i);
+		if (!pdev) {
+			dev_err(pcf->dev, "Cannot create regulator\n");
+			continue;
+		}
+
+		pdev->dev.parent = pcf->dev;
+		pdev->dev.platform_data = &pdata->reg_init_data[i];
+		pdev->dev.driver_data = pcf;
+		pcf->regulator_pdev[i] = pdev;
+
+		platform_device_add(pdev);
+	}
+
+	if (client->irq) {
+		set_irq_handler(client->irq, handle_level_irq);
+		ret = request_irq(client->irq, pcf50633_irq,
+				IRQF_TRIGGER_LOW, "pcf50633", pcf);
+
+		if (ret) {
+			dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
+			goto err;
+		}
+	} else {
+		dev_err(pcf->dev, "No IRQ configured\n");
+		goto err;
+	}
+
+	if (enable_irq_wake(client->irq) < 0)
+		dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source"
+			"in this hardware revision", client->irq);
+
+	ret = sysfs_create_group(&client->dev.kobj, &pcf_attr_group);
+	if (ret)
+		dev_err(pcf->dev, "error creating sysfs entries\n");
+
+	if (pdata->probe_done)
+		pdata->probe_done(pcf);
+
+	return 0;
+
+err:
+	kfree(pcf);
+	return ret;
+}
+
+static int __devexit pcf50633_remove(struct i2c_client *client)
+{
+	struct pcf50633 *pcf = i2c_get_clientdata(client);
+	int i;
+
+	free_irq(pcf->irq, pcf);
+
+	platform_device_unregister(pcf->input_pdev);
+	platform_device_unregister(pcf->rtc_pdev);
+	platform_device_unregister(pcf->mbc_pdev);
+	platform_device_unregister(pcf->adc_pdev);
+
+	for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
+		platform_device_unregister(pcf->regulator_pdev[i]);
+
+	kfree(pcf);
+
+	return 0;
+}
+
+static struct i2c_device_id pcf50633_id_table[] = {
+	{"pcf50633", 0x73},
+};
+
+static struct i2c_driver pcf50633_driver = {
+	.driver = {
+		.name	= "pcf50633",
+		.suspend = pcf50633_suspend,
+		.resume	= pcf50633_resume,
+	},
+	.id_table = pcf50633_id_table,
+	.probe = pcf50633_probe,
+	.remove = __devexit_p(pcf50633_remove),
+};
+
+static int __init pcf50633_init(void)
+{
+	return i2c_add_driver(&pcf50633_driver);
+}
+
+static void __exit pcf50633_exit(void)
+{
+	i2c_del_driver(&pcf50633_driver);
+}
+
+MODULE_DESCRIPTION("I2C chip driver for NXP PCF50633 PMU");
+MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
+MODULE_LICENSE("GPL");
+
+module_init(pcf50633_init);
+module_exit(pcf50633_exit);
diff --git a/drivers/mfd/pcf50633-gpio.c b/drivers/mfd/pcf50633-gpio.c
new file mode 100644
index 0000000..2fa2eca
--- /dev/null
+++ b/drivers/mfd/pcf50633-gpio.c
@@ -0,0 +1,118 @@
+/* NXP PCF50633 GPIO Driver
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * Author: Balaji Rao <balajirrao@openmoko.org>
+ * All rights reserved.
+ *
+ * Broken down from monstrous PCF50633 driver mainly by
+ * Harald Welte, Andy Green and Werner Almesberger
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/mfd/pcf50633/gpio.h>
+
+enum pcf50633_regulator_id {
+	PCF50633_REGULATOR_AUTO,
+	PCF50633_REGULATOR_DOWN1,
+	PCF50633_REGULATOR_DOWN2,
+	PCF50633_REGULATOR_LDO1,
+	PCF50633_REGULATOR_LDO2,
+	PCF50633_REGULATOR_LDO3,
+	PCF50633_REGULATOR_LDO4,
+	PCF50633_REGULATOR_LDO5,
+	PCF50633_REGULATOR_LDO6,
+	PCF50633_REGULATOR_HCLDO,
+	PCF50633_REGULATOR_MEMLDO,
+};
+
+#define PCF50633_REG_AUTOOUT	0x1a
+#define PCF50633_REG_DOWN1OUT	0x1e
+#define PCF50633_REG_DOWN2OUT	0x22
+#define PCF50633_REG_MEMLDOOUT	0x26
+#define PCF50633_REG_LDO1OUT	0x2d
+#define PCF50633_REG_LDO2OUT	0x2f
+#define PCF50633_REG_LDO3OUT	0x31
+#define PCF50633_REG_LDO4OUT	0x33
+#define PCF50633_REG_LDO5OUT	0x35
+#define PCF50633_REG_LDO6OUT	0x37
+#define PCF50633_REG_HCLDOOUT	0x39
+
+static const u8 pcf50633_regulator_registers[PCF50633_NUM_REGULATORS] = {
+	[PCF50633_REGULATOR_AUTO]	= PCF50633_REG_AUTOOUT,
+	[PCF50633_REGULATOR_DOWN1]	= PCF50633_REG_DOWN1OUT,
+	[PCF50633_REGULATOR_DOWN2]	= PCF50633_REG_DOWN2OUT,
+	[PCF50633_REGULATOR_MEMLDO]	= PCF50633_REG_MEMLDOOUT,
+	[PCF50633_REGULATOR_LDO1]	= PCF50633_REG_LDO1OUT,
+	[PCF50633_REGULATOR_LDO2]	= PCF50633_REG_LDO2OUT,
+	[PCF50633_REGULATOR_LDO3]	= PCF50633_REG_LDO3OUT,
+	[PCF50633_REGULATOR_LDO4]	= PCF50633_REG_LDO4OUT,
+	[PCF50633_REGULATOR_LDO5]	= PCF50633_REG_LDO5OUT,
+	[PCF50633_REGULATOR_LDO6]	= PCF50633_REG_LDO6OUT,
+	[PCF50633_REGULATOR_HCLDO]	= PCF50633_REG_HCLDOOUT,
+};
+
+int pcf50633_gpio_set(struct pcf50633 *pcf, int gpio, u8 val)
+{
+	u8 reg;
+
+	reg = gpio - PCF50633_GPIO1 + PCF50633_REG_GPIO1CFG;
+
+	return pcf50633_reg_set_bit_mask(pcf, reg, 0x07, val);
+}
+EXPORT_SYMBOL_GPL(pcf50633_gpio_set);
+
+u8 pcf50633_gpio_get(struct pcf50633 *pcf, int gpio)
+{
+	u8 reg, val;
+
+	reg = gpio - PCF50633_GPIO1 + PCF50633_REG_GPIO1CFG;
+	val = pcf50633_reg_read(pcf, reg) & 0x07;
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(pcf50633_gpio_get);
+
+int pcf50633_gpio_invert_set(struct pcf50633 *pcf, int gpio, int invert)
+{
+	u8 val, reg;
+
+	reg = gpio - PCF50633_GPIO1 + PCF50633_REG_GPIO1CFG;
+	val = !!invert << 3;
+
+	return pcf50633_reg_set_bit_mask(pcf, reg, 1 << 3, val);
+}
+EXPORT_SYMBOL_GPL(pcf50633_gpio_invert_set);
+
+int pcf50633_gpio_invert_get(struct pcf50633 *pcf, int gpio)
+{
+	u8 reg, val;
+
+	reg = gpio - PCF50633_GPIO1 + PCF50633_REG_GPIO1CFG;
+	val = pcf50633_reg_read(pcf, reg);
+
+	return val & (1 << 3);
+}
+EXPORT_SYMBOL_GPL(pcf50633_gpio_invert_get);
+
+int pcf50633_gpio_power_supply_set(struct pcf50633 *pcf,
+					int gpio, int regulator, int on)
+{
+	u8 reg, val, mask;
+
+	/* the *ENA register is always one after the *OUT register */
+	reg = pcf50633_regulator_registers[regulator] + 1;
+
+	val = !!on << (gpio - PCF50633_GPIO1);
+	mask = 1 << (gpio - PCF50633_GPIO1);
+
+	return pcf50633_reg_set_bit_mask(pcf, reg, mask, val);
+}
+EXPORT_SYMBOL_GPL(pcf50633_gpio_power_supply_set);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 170f9d4..0e5761f 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -41,6 +41,7 @@
 	struct gpio_chip	gpio;
 	struct sm501_gpio	*ourgpio;	/* to get back to parent. */
 	void __iomem		*regbase;
+	void __iomem		*control;	/* address of control reg. */
 };
 
 struct sm501_gpio {
@@ -908,6 +909,25 @@
 	return result & 1UL;
 }
 
+static void sm501_gpio_ensure_gpio(struct sm501_gpio_chip *smchip,
+				   unsigned long bit)
+{
+	unsigned long ctrl;
+
+	/* check and modify if this pin is not set as gpio. */
+
+	if (readl(smchip->control) & bit) {
+		dev_info(sm501_gpio_to_dev(smchip->ourgpio)->dev,
+			 "changing mode of gpio, bit %08lx\n", bit);
+
+		ctrl = readl(smchip->control);
+		ctrl &= ~bit;
+		writel(ctrl, smchip->control);
+
+		sm501_sync_regs(sm501_gpio_to_dev(smchip->ourgpio));
+	}
+}
+
 static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 
 {
@@ -929,6 +949,8 @@
 	writel(val, regs);
 
 	sm501_sync_regs(sm501_gpio_to_dev(smgpio));
+	sm501_gpio_ensure_gpio(smchip, bit);
+
 	spin_unlock_irqrestore(&smgpio->lock, save);
 }
 
@@ -941,8 +963,8 @@
 	unsigned long save;
 	unsigned long ddr;
 
-	dev_info(sm501_gpio_to_dev(smgpio)->dev, "%s(%p,%d)\n",
-		 __func__, chip, offset);
+	dev_dbg(sm501_gpio_to_dev(smgpio)->dev, "%s(%p,%d)\n",
+		__func__, chip, offset);
 
 	spin_lock_irqsave(&smgpio->lock, save);
 
@@ -950,6 +972,8 @@
 	writel(ddr & ~bit, regs + SM501_GPIO_DDR_LOW);
 
 	sm501_sync_regs(sm501_gpio_to_dev(smgpio));
+	sm501_gpio_ensure_gpio(smchip, bit);
+
 	spin_unlock_irqrestore(&smgpio->lock, save);
 
 	return 0;
@@ -1012,9 +1036,11 @@
 		if (base > 0)
 			base += 32;
 		chip->regbase = gpio->regs + SM501_GPIO_DATA_HIGH;
+		chip->control = sm->regs + SM501_GPIO63_32_CONTROL;
 		gchip->label  = "SM501-HIGH";
 	} else {
 		chip->regbase = gpio->regs + SM501_GPIO_DATA_LOW;
+		chip->control = sm->regs + SM501_GPIO31_0_CONTROL;
 		gchip->label  = "SM501-LOW";
 	}
 
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c
index b59c385..e7ab003 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl4030-core.c
@@ -38,6 +38,9 @@
 #include <linux/i2c.h>
 #include <linux/i2c/twl4030.h>
 
+#ifdef CONFIG_ARM
+#include <mach/cpu.h>
+#endif
 
 /*
  * The TWL4030 "Triton 2" is one of a family of a multi-function "Power
@@ -646,7 +649,7 @@
 	return e;
 }
 
-static void __init clocks_init(void)
+static void __init clocks_init(struct device *dev)
 {
 	int e = 0;
 	struct clk *osc;
@@ -655,9 +658,9 @@
 
 #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
 	if (cpu_is_omap2430())
-		osc = clk_get(NULL, "osc_ck");
+		osc = clk_get(dev, "osc_ck");
 	else
-		osc = clk_get(NULL, "osc_sys_ck");
+		osc = clk_get(dev, "osc_sys_ck");
 
 	if (IS_ERR(osc)) {
 		printk(KERN_WARNING "Skipping twl4030 internal clock init and "
@@ -773,7 +776,7 @@
 	inuse = true;
 
 	/* setup clock framework */
-	clocks_init();
+	clocks_init(&client->dev);
 
 	/* Maybe init the T2 Interrupt subsystem */
 	if (client->irq
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 3a273cc..f92595c 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -1453,6 +1453,9 @@
 {
 	int i;
 
+	for (i = 0; i < ARRAY_SIZE(wm8350->pmic.led); i++)
+		platform_device_unregister(wm8350->pmic.led[i].pdev);
+
 	for (i = 0; i < ARRAY_SIZE(wm8350->pmic.pdev); i++)
 		platform_device_unregister(wm8350->pmic.pdev[i]);
 
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index fee7304..419c378 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -120,7 +120,7 @@
 	  cards are supported via 'MMC/SD Card support: TI Flash Media MMC/SD
 	  Interface support (MMC_TIFM_SD)'.
 
-          To compile this driver as a module, choose M here: the module will
+	  To compile this driver as a module, choose M here: the module will
 	  be called tifm_core.
 
 config TIFM_7XX1
@@ -133,100 +133,9 @@
 	  To make actual use of the device, you will have to select some
 	  flash card format drivers, as outlined in the TIFM_CORE Help.
 
-          To compile this driver as a module, choose M here: the module will
+	  To compile this driver as a module, choose M here: the module will
 	  be called tifm_7xx1.
 
-config ACER_WMI
-        tristate "Acer WMI Laptop Extras (EXPERIMENTAL)"
-	depends on X86
-	depends on EXPERIMENTAL
-	depends on ACPI
-	depends on LEDS_CLASS
-	depends on NEW_LEDS
-	depends on BACKLIGHT_CLASS_DEVICE
-	depends on SERIO_I8042
-	depends on RFKILL
-	select ACPI_WMI
-	---help---
-	  This is a driver for newer Acer (and Wistron) laptops. It adds
-	  wireless radio and bluetooth control, and on some laptops,
-	  exposes the mail LED and LCD backlight.
-
-	  For more information about this driver see
-	  <file:Documentation/laptops/acer-wmi.txt>
-
-	  If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
-	  here.
-
-config ASUS_LAPTOP
-        tristate "Asus Laptop Extras (EXPERIMENTAL)"
-        depends on X86
-        depends on ACPI
-	depends on EXPERIMENTAL && !ACPI_ASUS
-	depends on LEDS_CLASS
-	depends on NEW_LEDS
-	depends on BACKLIGHT_CLASS_DEVICE
-        ---help---
-	  This is the new Linux driver for Asus laptops. It may also support some
-	  MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
-	  standard ACPI events that go through /proc/acpi/events. It also adds
-	  support for video output switching, LCD backlight control, Bluetooth and
-	  Wlan control, and most importantly, allows you to blink those fancy LEDs.
-
-	  For more information and a userspace daemon for handling the extra
-	  buttons see <http://acpi4asus.sf.net/>.
-
-	  If you have an ACPI-compatible ASUS laptop, say Y or M here.
-
-config FUJITSU_LAPTOP
-        tristate "Fujitsu Laptop Extras"
-        depends on X86
-        depends on ACPI
-	depends on INPUT
-        depends on BACKLIGHT_CLASS_DEVICE
-        ---help---
-	  This is a driver for laptops built by Fujitsu:
-
-	    * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks
-	    * Possibly other Fujitsu laptop models
-	    * Tested with S6410 and S7020
-
-	  It adds support for LCD brightness control and some hotkeys.
-
-	  If you have a Fujitsu laptop, say Y or M here.
-
-config FUJITSU_LAPTOP_DEBUG
-	bool "Verbose debug mode for Fujitsu Laptop Extras"
-	depends on FUJITSU_LAPTOP
-	default n
-	---help---
-	  Enables extra debug output from the fujitsu extras driver, at the
-	  expense of a slight increase in driver size.
-
-	  If you are not sure, say N here.
-
-config TC1100_WMI
-	tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)"
-	depends on X86 && !X86_64
-	depends on EXPERIMENTAL
-	depends on ACPI
-	select ACPI_WMI
-	---help---
-	  This is a driver for the WMI extensions (wireless and bluetooth power
-	  control) of the HP Compaq TC1100 tablet.
-
-config HP_WMI
-       tristate "HP WMI extras"
-       depends on ACPI_WMI
-       depends on INPUT
-       depends on RFKILL
-       help
-         Say Y here if you want to support WMI-based hotkeys on HP laptops and
-	 to read data from WMI such as docking or ambient light sensor state.
-
-         To compile this driver as a module, choose M here: the module will
-         be called hp-wmi.
-
 config ICS932S401
 	tristate "Integrated Circuits ICS932S401"
 	depends on I2C && EXPERIMENTAL
@@ -237,170 +146,6 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called ics932s401.
 
-config MSI_LAPTOP
-        tristate "MSI Laptop Extras"
-        depends on X86
-        depends on ACPI
-        depends on BACKLIGHT_CLASS_DEVICE
-        ---help---
-	  This is a driver for laptops built by MSI (MICRO-STAR
-	  INTERNATIONAL):
-
-	  MSI MegaBook S270 (MS-1013)
-	  Cytron/TCM/Medion/Tchibo MD96100/SAM2000
-
-	  It adds support for Bluetooth, WLAN and LCD brightness control.
-
-	  More information about this driver is available at
-	  <http://0pointer.de/lennart/tchibo.html>.
-
-	  If you have an MSI S270 laptop, say Y or M here.
-
-config PANASONIC_LAPTOP
-	tristate "Panasonic Laptop Extras"
-	depends on X86 && INPUT && ACPI
-        depends on BACKLIGHT_CLASS_DEVICE
-	---help---
-	  This driver adds support for access to backlight control and hotkeys
-	  on Panasonic Let's Note laptops.
-
-	  If you have a Panasonic Let's note laptop (such as the R1(N variant),
-	  R2, R3, R5, T2, W2 and Y2 series), say Y.
-
-config COMPAL_LAPTOP
-	tristate "Compal Laptop Extras"
-	depends on X86
-	depends on ACPI
-	depends on BACKLIGHT_CLASS_DEVICE
-	---help---
-	  This is a driver for laptops built by Compal:
-
-	  Compal FL90/IFL90
-	  Compal FL91/IFL91
-	  Compal FL92/JFL92
-	  Compal FT00/IFT00
-
-	  It adds support for Bluetooth, WLAN and LCD brightness control.
-
-	  If you have an Compal FL9x/IFL9x/FT00 laptop, say Y or M here.
-
-config SONY_LAPTOP
-	tristate "Sony Laptop Extras"
-	depends on X86 && ACPI
-	select BACKLIGHT_CLASS_DEVICE
-	depends on INPUT
-	  ---help---
-	  This mini-driver drives the SNC and SPIC devices present in the ACPI
-	  BIOS of the Sony Vaio laptops.
-
-	  It gives access to some extra laptop functionalities like Bluetooth,
-	  screen brightness control, Fn keys and allows powering on/off some
-	  devices.
-
-	  Read <file:Documentation/laptops/sony-laptop.txt> for more information.
-
-config SONYPI_COMPAT
-	bool "Sonypi compatibility"
-	depends on SONY_LAPTOP
-	  ---help---
-	  Build the sonypi driver compatibility code into the sony-laptop driver.
-
-config THINKPAD_ACPI
-	tristate "ThinkPad ACPI Laptop Extras"
-	depends on X86 && ACPI
-	select BACKLIGHT_LCD_SUPPORT
-	select BACKLIGHT_CLASS_DEVICE
-	select HWMON
-	select NVRAM
-	select INPUT
-	select NEW_LEDS
-	select LEDS_CLASS
-	select NET
-	select RFKILL
-	---help---
-	  This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
-	  support for Fn-Fx key combinations, Bluetooth control, video
-	  output switching, ThinkLight control, UltraBay eject and more.
-	  For more information about this driver see
-	  <file:Documentation/laptops/thinkpad-acpi.txt> and
-	  <http://ibm-acpi.sf.net/> .
-
-	  This driver was formerly known as ibm-acpi.
-
-	  If you have an IBM or Lenovo ThinkPad laptop, say Y or M here.
-
-config THINKPAD_ACPI_DEBUG
-	bool "Verbose debug mode"
-	depends on THINKPAD_ACPI
-	default n
-	---help---
-	  Enables extra debugging information, at the expense of a slightly
-	  increase in driver size.
-
-	  If you are not sure, say N here.
-
-config THINKPAD_ACPI_DOCK
-	bool "Legacy Docking Station Support"
-	depends on THINKPAD_ACPI
-	depends on ACPI_DOCK=n
-	default n
-	---help---
-	  Allows the thinkpad_acpi driver to handle docking station events.
-	  This support was made obsolete by the generic ACPI docking station
-	  support (CONFIG_ACPI_DOCK).  It will allow locking and removing the
-	  laptop from the docking station, but will not properly connect PCI
-	  devices.
-
-	  If you are not sure, say N here.
-
-config THINKPAD_ACPI_BAY
-	bool "Legacy Removable Bay Support"
-	depends on THINKPAD_ACPI
-	default y
-	---help---
-	  Allows the thinkpad_acpi driver to handle removable bays.  It will
-	  electrically disable the device in the bay, and also generate
-	  notifications when the bay lever is ejected or inserted.
-
-	  If you are not sure, say Y here.
-
-config THINKPAD_ACPI_VIDEO
-	bool "Video output control support"
-	depends on THINKPAD_ACPI
-	default y
-	---help---
-	  Allows the thinkpad_acpi driver to provide an interface to control
-	  the various video output ports.
-
-	  This feature often won't work well, depending on ThinkPad model,
-	  display state, video output devices in use, whether there is a X
-	  server running, phase of the moon, and the current mood of
-	  Schroedinger's cat.  If you can use X.org's RandR to control
-	  your ThinkPad's video output ports instead of this feature,
-	  don't think twice: do it and say N here to save some memory.
-
-	  If you are not sure, say Y here.
-
-config THINKPAD_ACPI_HOTKEY_POLL
-	bool "Support NVRAM polling for hot keys"
-	depends on THINKPAD_ACPI
-	default y
-	---help---
-	  Some thinkpad models benefit from NVRAM polling to detect a few of
-	  the hot key press events.  If you know your ThinkPad model does not
-	  need to do NVRAM polling to support any of the hot keys you use,
-	  unselecting this option will save about 1kB of memory.
-
-	  ThinkPads T40 and newer, R52 and newer, and X31 and newer are
-	  unlikely to need NVRAM polling in their latest BIOS versions.
-
-	  NVRAM polling can detect at most the following keys: ThinkPad/Access
-	  IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute,
-	  Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12).
-
-	  If you are not sure, say Y here.  The driver enables polling only if
-	  it is strictly necessary to do so.
-
 config ATMEL_SSC
 	tristate "Device driver for Atmel SSC peripheral"
 	depends on AVR32 || ARCH_AT91
@@ -413,31 +158,6 @@
 
 	  If unsure, say N.
 
-config INTEL_MENLOW
-	tristate "Thermal Management driver for Intel menlow platform"
-	depends on ACPI_THERMAL
-	select THERMAL
-	depends on X86
-	---help---
-	  ACPI thermal management enhancement driver on
-	  Intel Menlow platform.
-
-	  If unsure, say N.
-
-config EEEPC_LAPTOP
-	tristate "Eee PC Hotkey Driver (EXPERIMENTAL)"
-	depends on X86
-	depends on ACPI
-	depends on BACKLIGHT_CLASS_DEVICE
-	depends on HWMON
-	depends on EXPERIMENTAL
-	depends on RFKILL
-	---help---
-	  This driver supports the Fn-Fx keys on Eee PC laptops.
-	  It also adds the ability to switch camera/wlan on/off.
-
-	  If you have an Eee PC laptop, say Y or M here.
-
 config ENCLOSURE_SERVICES
 	tristate "Enclosure Services"
 	default n
@@ -498,6 +218,18 @@
 	This option enables addition debugging code for the SGI GRU driver. If
 	you are unsure, say N.
 
+config DELL_LAPTOP
+	tristate "Dell Laptop Extras (EXPERIMENTAL)"
+	depends on X86
+	depends on DCDBAS
+	depends on EXPERIMENTAL
+	depends on BACKLIGHT_CLASS_DEVICE
+	depends on RFKILL
+	default n
+	---help---
+	This driver adds support for rfkill and backlight control to Dell
+	laptops.
+
 source "drivers/misc/c2port/Kconfig"
 
 endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 817f7f5..d5749a7 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -1,32 +1,19 @@
 #
 # Makefile for misc devices that really don't fit anywhere else.
 #
-obj- := misc.o	# Dummy rule to force built-in.o to be made
 
 obj-$(CONFIG_IBM_ASM)		+= ibmasm/
 obj-$(CONFIG_HDPU_FEATURES)	+= hdpuftrs/
-obj-$(CONFIG_ASUS_LAPTOP)	+= asus-laptop.o
-obj-$(CONFIG_EEEPC_LAPTOP)	+= eeepc-laptop.o
-obj-$(CONFIG_MSI_LAPTOP)	+= msi-laptop.o
-obj-$(CONFIG_COMPAL_LAPTOP)	+= compal-laptop.o
-obj-$(CONFIG_ACER_WMI)		+= acer-wmi.o
 obj-$(CONFIG_ATMEL_PWM)		+= atmel_pwm.o
 obj-$(CONFIG_ATMEL_SSC)		+= atmel-ssc.o
 obj-$(CONFIG_ATMEL_TCLIB)	+= atmel_tclib.o
-obj-$(CONFIG_HP_WMI)		+= hp-wmi.o
 obj-$(CONFIG_ICS932S401)	+= ics932s401.o
-obj-$(CONFIG_TC1100_WMI)	+= tc1100-wmi.o
 obj-$(CONFIG_LKDTM)		+= lkdtm.o
 obj-$(CONFIG_TIFM_CORE)       	+= tifm_core.o
 obj-$(CONFIG_TIFM_7XX1)       	+= tifm_7xx1.o
 obj-$(CONFIG_PHANTOM)		+= phantom.o
 obj-$(CONFIG_SGI_IOC4)		+= ioc4.o
-obj-$(CONFIG_SONY_LAPTOP)	+= sony-laptop.o
-obj-$(CONFIG_THINKPAD_ACPI)	+= thinkpad_acpi.o
-obj-$(CONFIG_FUJITSU_LAPTOP)	+= fujitsu-laptop.o
-obj-$(CONFIG_PANASONIC_LAPTOP)	+= panasonic-laptop.o
 obj-$(CONFIG_EEPROM_93CX6)	+= eeprom_93cx6.o
-obj-$(CONFIG_INTEL_MENLOW)	+= intel_menlow.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
 obj-$(CONFIG_KGDB_TESTS)	+= kgdbts.o
 obj-$(CONFIG_SGI_XP)		+= sgi-xp/
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
deleted file mode 100644
index 02fe2b8..0000000
--- a/drivers/misc/eeepc-laptop.c
+++ /dev/null
@@ -1,872 +0,0 @@
-/*
- *  eepc-laptop.c - Asus Eee PC extras
- *
- *  Based on asus_acpi.c as patched for the Eee PC by Asus:
- *  ftp://ftp.asus.com/pub/ASUS/EeePC/701/ASUS_ACPI_071126.rar
- *  Based on eee.c from eeepc-linux
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
-#include <linux/uaccess.h>
-#include <linux/input.h>
-#include <linux/rfkill.h>
-
-#define EEEPC_LAPTOP_VERSION	"0.1"
-
-#define EEEPC_HOTK_NAME		"Eee PC Hotkey Driver"
-#define EEEPC_HOTK_FILE		"eeepc"
-#define EEEPC_HOTK_CLASS	"hotkey"
-#define EEEPC_HOTK_DEVICE_NAME	"Hotkey"
-#define EEEPC_HOTK_HID		"ASUS010"
-
-#define EEEPC_LOG	EEEPC_HOTK_FILE ": "
-#define EEEPC_ERR	KERN_ERR	EEEPC_LOG
-#define EEEPC_WARNING	KERN_WARNING	EEEPC_LOG
-#define EEEPC_NOTICE	KERN_NOTICE	EEEPC_LOG
-#define EEEPC_INFO	KERN_INFO	EEEPC_LOG
-
-/*
- * Definitions for Asus EeePC
- */
-#define	NOTIFY_WLAN_ON	0x10
-#define NOTIFY_BRN_MIN	0x20
-#define NOTIFY_BRN_MAX	0x2f
-
-enum {
-	DISABLE_ASL_WLAN = 0x0001,
-	DISABLE_ASL_BLUETOOTH = 0x0002,
-	DISABLE_ASL_IRDA = 0x0004,
-	DISABLE_ASL_CAMERA = 0x0008,
-	DISABLE_ASL_TV = 0x0010,
-	DISABLE_ASL_GPS = 0x0020,
-	DISABLE_ASL_DISPLAYSWITCH = 0x0040,
-	DISABLE_ASL_MODEM = 0x0080,
-	DISABLE_ASL_CARDREADER = 0x0100
-};
-
-enum {
-	CM_ASL_WLAN = 0,
-	CM_ASL_BLUETOOTH,
-	CM_ASL_IRDA,
-	CM_ASL_1394,
-	CM_ASL_CAMERA,
-	CM_ASL_TV,
-	CM_ASL_GPS,
-	CM_ASL_DVDROM,
-	CM_ASL_DISPLAYSWITCH,
-	CM_ASL_PANELBRIGHT,
-	CM_ASL_BIOSFLASH,
-	CM_ASL_ACPIFLASH,
-	CM_ASL_CPUFV,
-	CM_ASL_CPUTEMPERATURE,
-	CM_ASL_FANCPU,
-	CM_ASL_FANCHASSIS,
-	CM_ASL_USBPORT1,
-	CM_ASL_USBPORT2,
-	CM_ASL_USBPORT3,
-	CM_ASL_MODEM,
-	CM_ASL_CARDREADER,
-	CM_ASL_LID
-};
-
-static const char *cm_getv[] = {
-	"WLDG", NULL, NULL, NULL,
-	"CAMG", NULL, NULL, NULL,
-	NULL, "PBLG", NULL, NULL,
-	"CFVG", NULL, NULL, NULL,
-	"USBG", NULL, NULL, "MODG",
-	"CRDG", "LIDG"
-};
-
-static const char *cm_setv[] = {
-	"WLDS", NULL, NULL, NULL,
-	"CAMS", NULL, NULL, NULL,
-	"SDSP", "PBLS", "HDPS", NULL,
-	"CFVS", NULL, NULL, NULL,
-	"USBG", NULL, NULL, "MODS",
-	"CRDS", NULL
-};
-
-#define EEEPC_EC	"\\_SB.PCI0.SBRG.EC0."
-
-#define EEEPC_EC_FAN_PWM	EEEPC_EC "SC02" /* Fan PWM duty cycle (%) */
-#define EEEPC_EC_SC02		0x63
-#define EEEPC_EC_FAN_HRPM	EEEPC_EC "SC05" /* High byte, fan speed (RPM) */
-#define EEEPC_EC_FAN_LRPM	EEEPC_EC "SC06" /* Low byte, fan speed (RPM) */
-#define EEEPC_EC_FAN_CTRL	EEEPC_EC "SFB3" /* Byte containing SF25  */
-#define EEEPC_EC_SFB3		0xD3
-
-/*
- * This is the main structure, we can use it to store useful information
- * about the hotk device
- */
-struct eeepc_hotk {
-	struct acpi_device *device;	/* the device we are in */
-	acpi_handle handle;		/* the handle of the hotk device */
-	u32 cm_supported;		/* the control methods supported
-					   by this BIOS */
-	uint init_flag;			/* Init flags */
-	u16 event_count[128];		/* count for each event */
-	struct input_dev *inputdev;
-	u16 *keycode_map;
-	struct rfkill *eeepc_wlan_rfkill;
-	struct rfkill *eeepc_bluetooth_rfkill;
-};
-
-/* The actual device the driver binds to */
-static struct eeepc_hotk *ehotk;
-
-/* Platform device/driver */
-static struct platform_driver platform_driver = {
-	.driver = {
-		.name = EEEPC_HOTK_FILE,
-		.owner = THIS_MODULE,
-	}
-};
-
-static struct platform_device *platform_device;
-
-struct key_entry {
-	char type;
-	u8 code;
-	u16 keycode;
-};
-
-enum { KE_KEY, KE_END };
-
-static struct key_entry eeepc_keymap[] = {
-	/* Sleep already handled via generic ACPI code */
-	{KE_KEY, 0x10, KEY_WLAN },
-	{KE_KEY, 0x12, KEY_PROG1 },
-	{KE_KEY, 0x13, KEY_MUTE },
-	{KE_KEY, 0x14, KEY_VOLUMEDOWN },
-	{KE_KEY, 0x15, KEY_VOLUMEUP },
-	{KE_KEY, 0x30, KEY_SWITCHVIDEOMODE },
-	{KE_KEY, 0x31, KEY_SWITCHVIDEOMODE },
-	{KE_KEY, 0x32, KEY_SWITCHVIDEOMODE },
-	{KE_END, 0},
-};
-
-/*
- * The hotkey driver declaration
- */
-static int eeepc_hotk_add(struct acpi_device *device);
-static int eeepc_hotk_remove(struct acpi_device *device, int type);
-
-static const struct acpi_device_id eeepc_device_ids[] = {
-	{EEEPC_HOTK_HID, 0},
-	{"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
-
-static struct acpi_driver eeepc_hotk_driver = {
-	.name = EEEPC_HOTK_NAME,
-	.class = EEEPC_HOTK_CLASS,
-	.ids = eeepc_device_ids,
-	.ops = {
-		.add = eeepc_hotk_add,
-		.remove = eeepc_hotk_remove,
-	},
-};
-
-/* The backlight device /sys/class/backlight */
-static struct backlight_device *eeepc_backlight_device;
-
-/* The hwmon device */
-static struct device *eeepc_hwmon_device;
-
-/*
- * The backlight class declaration
- */
-static int read_brightness(struct backlight_device *bd);
-static int update_bl_status(struct backlight_device *bd);
-static struct backlight_ops eeepcbl_ops = {
-	.get_brightness = read_brightness,
-	.update_status = update_bl_status,
-};
-
-MODULE_AUTHOR("Corentin Chary, Eric Cooper");
-MODULE_DESCRIPTION(EEEPC_HOTK_NAME);
-MODULE_LICENSE("GPL");
-
-/*
- * ACPI Helpers
- */
-static int write_acpi_int(acpi_handle handle, const char *method, int val,
-			  struct acpi_buffer *output)
-{
-	struct acpi_object_list params;
-	union acpi_object in_obj;
-	acpi_status status;
-
-	params.count = 1;
-	params.pointer = &in_obj;
-	in_obj.type = ACPI_TYPE_INTEGER;
-	in_obj.integer.value = val;
-
-	status = acpi_evaluate_object(handle, (char *)method, &params, output);
-	return (status == AE_OK ? 0 : -1);
-}
-
-static int read_acpi_int(acpi_handle handle, const char *method, int *val)
-{
-	acpi_status status;
-	unsigned long long result;
-
-	status = acpi_evaluate_integer(handle, (char *)method, NULL, &result);
-	if (ACPI_FAILURE(status)) {
-		*val = -1;
-		return -1;
-	} else {
-		*val = result;
-		return 0;
-	}
-}
-
-static int set_acpi(int cm, int value)
-{
-	if (ehotk->cm_supported & (0x1 << cm)) {
-		const char *method = cm_setv[cm];
-		if (method == NULL)
-			return -ENODEV;
-		if (write_acpi_int(ehotk->handle, method, value, NULL))
-			printk(EEEPC_WARNING "Error writing %s\n", method);
-	}
-	return 0;
-}
-
-static int get_acpi(int cm)
-{
-	int value = -1;
-	if ((ehotk->cm_supported & (0x1 << cm))) {
-		const char *method = cm_getv[cm];
-		if (method == NULL)
-			return -ENODEV;
-		if (read_acpi_int(ehotk->handle, method, &value))
-			printk(EEEPC_WARNING "Error reading %s\n", method);
-	}
-	return value;
-}
-
-/*
- * Backlight
- */
-static int read_brightness(struct backlight_device *bd)
-{
-	return get_acpi(CM_ASL_PANELBRIGHT);
-}
-
-static int set_brightness(struct backlight_device *bd, int value)
-{
-	value = max(0, min(15, value));
-	return set_acpi(CM_ASL_PANELBRIGHT, value);
-}
-
-static int update_bl_status(struct backlight_device *bd)
-{
-	return set_brightness(bd, bd->props.brightness);
-}
-
-/*
- * Rfkill helpers
- */
-
-static int eeepc_wlan_rfkill_set(void *data, enum rfkill_state state)
-{
-	if (state == RFKILL_STATE_SOFT_BLOCKED)
-		return set_acpi(CM_ASL_WLAN, 0);
-	else
-		return set_acpi(CM_ASL_WLAN, 1);
-}
-
-static int eeepc_wlan_rfkill_state(void *data, enum rfkill_state *state)
-{
-	if (get_acpi(CM_ASL_WLAN) == 1)
-		*state = RFKILL_STATE_UNBLOCKED;
-	else
-		*state = RFKILL_STATE_SOFT_BLOCKED;
-	return 0;
-}
-
-static int eeepc_bluetooth_rfkill_set(void *data, enum rfkill_state state)
-{
-	if (state == RFKILL_STATE_SOFT_BLOCKED)
-		return set_acpi(CM_ASL_BLUETOOTH, 0);
-	else
-		return set_acpi(CM_ASL_BLUETOOTH, 1);
-}
-
-static int eeepc_bluetooth_rfkill_state(void *data, enum rfkill_state *state)
-{
-	if (get_acpi(CM_ASL_BLUETOOTH) == 1)
-		*state = RFKILL_STATE_UNBLOCKED;
-	else
-		*state = RFKILL_STATE_SOFT_BLOCKED;
-	return 0;
-}
-
-/*
- * Sys helpers
- */
-static int parse_arg(const char *buf, unsigned long count, int *val)
-{
-	if (!count)
-		return 0;
-	if (sscanf(buf, "%i", val) != 1)
-		return -EINVAL;
-	return count;
-}
-
-static ssize_t store_sys_acpi(int cm, const char *buf, size_t count)
-{
-	int rv, value;
-
-	rv = parse_arg(buf, count, &value);
-	if (rv > 0)
-		set_acpi(cm, value);
-	return rv;
-}
-
-static ssize_t show_sys_acpi(int cm, char *buf)
-{
-	return sprintf(buf, "%d\n", get_acpi(cm));
-}
-
-#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm)				\
-	static ssize_t show_##_name(struct device *dev,			\
-				    struct device_attribute *attr,	\
-				    char *buf)				\
-	{								\
-		return show_sys_acpi(_cm, buf);				\
-	}								\
-	static ssize_t store_##_name(struct device *dev,		\
-				     struct device_attribute *attr,	\
-				     const char *buf, size_t count)	\
-	{								\
-		return store_sys_acpi(_cm, buf, count);			\
-	}								\
-	static struct device_attribute dev_attr_##_name = {		\
-		.attr = {						\
-			.name = __stringify(_name),			\
-			.mode = 0644 },					\
-		.show   = show_##_name,					\
-		.store  = store_##_name,				\
-	}
-
-EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
-EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
-EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
-
-static struct attribute *platform_attributes[] = {
-	&dev_attr_camera.attr,
-	&dev_attr_cardr.attr,
-	&dev_attr_disp.attr,
-	NULL
-};
-
-static struct attribute_group platform_attribute_group = {
-	.attrs = platform_attributes
-};
-
-/*
- * Hotkey functions
- */
-static struct key_entry *eepc_get_entry_by_scancode(int code)
-{
-	struct key_entry *key;
-
-	for (key = eeepc_keymap; key->type != KE_END; key++)
-		if (code == key->code)
-			return key;
-
-	return NULL;
-}
-
-static struct key_entry *eepc_get_entry_by_keycode(int code)
-{
-	struct key_entry *key;
-
-	for (key = eeepc_keymap; key->type != KE_END; key++)
-		if (code == key->keycode && key->type == KE_KEY)
-			return key;
-
-	return NULL;
-}
-
-static int eeepc_getkeycode(struct input_dev *dev, int scancode, int *keycode)
-{
-	struct key_entry *key = eepc_get_entry_by_scancode(scancode);
-
-	if (key && key->type == KE_KEY) {
-		*keycode = key->keycode;
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
-{
-	struct key_entry *key;
-	int old_keycode;
-
-	if (keycode < 0 || keycode > KEY_MAX)
-		return -EINVAL;
-
-	key = eepc_get_entry_by_scancode(scancode);
-	if (key && key->type == KE_KEY) {
-		old_keycode = key->keycode;
-		key->keycode = keycode;
-		set_bit(keycode, dev->keybit);
-		if (!eepc_get_entry_by_keycode(old_keycode))
-			clear_bit(old_keycode, dev->keybit);
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static int eeepc_hotk_check(void)
-{
-	const struct key_entry *key;
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-	int result;
-
-	result = acpi_bus_get_status(ehotk->device);
-	if (result)
-		return result;
-	if (ehotk->device->status.present) {
-		if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag,
-				    &buffer)) {
-			printk(EEEPC_ERR "Hotkey initialization failed\n");
-			return -ENODEV;
-		} else {
-			printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n",
-			       ehotk->init_flag);
-		}
-		/* get control methods supported */
-		if (read_acpi_int(ehotk->handle, "CMSG"
-				   , &ehotk->cm_supported)) {
-			printk(EEEPC_ERR
-			       "Get control methods supported failed\n");
-			return -ENODEV;
-		} else {
-			printk(EEEPC_INFO
-			       "Get control methods supported: 0x%x\n",
-			       ehotk->cm_supported);
-		}
-		ehotk->inputdev = input_allocate_device();
-		if (!ehotk->inputdev) {
-			printk(EEEPC_INFO "Unable to allocate input device\n");
-			return 0;
-		}
-		ehotk->inputdev->name = "Asus EeePC extra buttons";
-		ehotk->inputdev->phys = EEEPC_HOTK_FILE "/input0";
-		ehotk->inputdev->id.bustype = BUS_HOST;
-		ehotk->inputdev->getkeycode = eeepc_getkeycode;
-		ehotk->inputdev->setkeycode = eeepc_setkeycode;
-
-		for (key = eeepc_keymap; key->type != KE_END; key++) {
-			switch (key->type) {
-			case KE_KEY:
-				set_bit(EV_KEY, ehotk->inputdev->evbit);
-				set_bit(key->keycode, ehotk->inputdev->keybit);
-				break;
-			}
-		}
-		result = input_register_device(ehotk->inputdev);
-		if (result) {
-			printk(EEEPC_INFO "Unable to register input device\n");
-			input_free_device(ehotk->inputdev);
-			return 0;
-		}
-	} else {
-		printk(EEEPC_ERR "Hotkey device not present, aborting\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static void notify_brn(void)
-{
-	struct backlight_device *bd = eeepc_backlight_device;
-	bd->props.brightness = read_brightness(bd);
-}
-
-static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
-{
-	static struct key_entry *key;
-	if (!ehotk)
-		return;
-	if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
-		notify_brn();
-	acpi_bus_generate_proc_event(ehotk->device, event,
-				     ehotk->event_count[event % 128]++);
-	if (ehotk->inputdev) {
-		key = eepc_get_entry_by_scancode(event);
-		if (key) {
-			switch (key->type) {
-			case KE_KEY:
-				input_report_key(ehotk->inputdev, key->keycode,
-						 1);
-				input_sync(ehotk->inputdev);
-				input_report_key(ehotk->inputdev, key->keycode,
-						 0);
-				input_sync(ehotk->inputdev);
-				break;
-			}
-		}
-	}
-}
-
-static int eeepc_hotk_add(struct acpi_device *device)
-{
-	acpi_status status = AE_OK;
-	int result;
-
-	if (!device)
-		 return -EINVAL;
-	printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n");
-	ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
-	if (!ehotk)
-		return -ENOMEM;
-	ehotk->init_flag = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
-	ehotk->handle = device->handle;
-	strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME);
-	strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS);
-	device->driver_data = ehotk;
-	ehotk->device = device;
-	result = eeepc_hotk_check();
-	if (result)
-		goto end;
-	status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
-					     eeepc_hotk_notify, ehotk);
-	if (ACPI_FAILURE(status))
-		printk(EEEPC_ERR "Error installing notify handler\n");
-
-	if (get_acpi(CM_ASL_WLAN) != -1) {
-		ehotk->eeepc_wlan_rfkill = rfkill_allocate(&device->dev,
-							   RFKILL_TYPE_WLAN);
-
-		if (!ehotk->eeepc_wlan_rfkill)
-			goto end;
-
-		ehotk->eeepc_wlan_rfkill->name = "eeepc-wlan";
-		ehotk->eeepc_wlan_rfkill->toggle_radio = eeepc_wlan_rfkill_set;
-		ehotk->eeepc_wlan_rfkill->get_state = eeepc_wlan_rfkill_state;
-		if (get_acpi(CM_ASL_WLAN) == 1)
-			ehotk->eeepc_wlan_rfkill->state =
-				RFKILL_STATE_UNBLOCKED;
-		else
-			ehotk->eeepc_wlan_rfkill->state =
-				RFKILL_STATE_SOFT_BLOCKED;
-		rfkill_register(ehotk->eeepc_wlan_rfkill);
-	}
-
-	if (get_acpi(CM_ASL_BLUETOOTH) != -1) {
-		ehotk->eeepc_bluetooth_rfkill =
-			rfkill_allocate(&device->dev, RFKILL_TYPE_BLUETOOTH);
-
-		if (!ehotk->eeepc_bluetooth_rfkill)
-			goto end;
-
-		ehotk->eeepc_bluetooth_rfkill->name = "eeepc-bluetooth";
-		ehotk->eeepc_bluetooth_rfkill->toggle_radio =
-			eeepc_bluetooth_rfkill_set;
-		ehotk->eeepc_bluetooth_rfkill->get_state =
-			eeepc_bluetooth_rfkill_state;
-		if (get_acpi(CM_ASL_BLUETOOTH) == 1)
-			ehotk->eeepc_bluetooth_rfkill->state =
-				RFKILL_STATE_UNBLOCKED;
-		else
-			ehotk->eeepc_bluetooth_rfkill->state =
-				RFKILL_STATE_SOFT_BLOCKED;
-		rfkill_register(ehotk->eeepc_bluetooth_rfkill);
-	}
-
- end:
-	if (result) {
-		kfree(ehotk);
-		ehotk = NULL;
-	}
-	return result;
-}
-
-static int eeepc_hotk_remove(struct acpi_device *device, int type)
-{
-	acpi_status status = 0;
-
-	if (!device || !acpi_driver_data(device))
-		 return -EINVAL;
-	status = acpi_remove_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
-					    eeepc_hotk_notify);
-	if (ACPI_FAILURE(status))
-		printk(EEEPC_ERR "Error removing notify handler\n");
-	kfree(ehotk);
-	return 0;
-}
-
-/*
- * Hwmon
- */
-static int eeepc_get_fan_pwm(void)
-{
-	int value = 0;
-
-	read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value);
-	value = value * 255 / 100;
-	return (value);
-}
-
-static void eeepc_set_fan_pwm(int value)
-{
-	value = SENSORS_LIMIT(value, 0, 255);
-	value = value * 100 / 255;
-	ec_write(EEEPC_EC_SC02, value);
-}
-
-static int eeepc_get_fan_rpm(void)
-{
-	int high = 0;
-	int low = 0;
-
-	read_acpi_int(NULL, EEEPC_EC_FAN_HRPM, &high);
-	read_acpi_int(NULL, EEEPC_EC_FAN_LRPM, &low);
-	return (high << 8 | low);
-}
-
-static int eeepc_get_fan_ctrl(void)
-{
-	int value = 0;
-
-	read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
-	return ((value & 0x02 ? 1 : 0));
-}
-
-static void eeepc_set_fan_ctrl(int manual)
-{
-	int value = 0;
-
-	read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
-	if (manual)
-		value |= 0x02;
-	else
-		value &= ~0x02;
-	ec_write(EEEPC_EC_SFB3, value);
-}
-
-static ssize_t store_sys_hwmon(void (*set)(int), const char *buf, size_t count)
-{
-	int rv, value;
-
-	rv = parse_arg(buf, count, &value);
-	if (rv > 0)
-		set(value);
-	return rv;
-}
-
-static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
-{
-	return sprintf(buf, "%d\n", get());
-}
-
-#define EEEPC_CREATE_SENSOR_ATTR(_name, _mode, _set, _get)		\
-	static ssize_t show_##_name(struct device *dev,			\
-				    struct device_attribute *attr,	\
-				    char *buf)				\
-	{								\
-		return show_sys_hwmon(_set, buf);			\
-	}								\
-	static ssize_t store_##_name(struct device *dev,		\
-				     struct device_attribute *attr,	\
-				     const char *buf, size_t count)	\
-	{								\
-		return store_sys_hwmon(_get, buf, count);		\
-	}								\
-	static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
-
-EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
-EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
-			 eeepc_get_fan_pwm, eeepc_set_fan_pwm);
-EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
-			 eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
-
-static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	return sprintf(buf, "eeepc\n");
-}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
-
-static struct attribute *hwmon_attributes[] = {
-	&sensor_dev_attr_pwm1.dev_attr.attr,
-	&sensor_dev_attr_fan1_input.dev_attr.attr,
-	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
-	&sensor_dev_attr_name.dev_attr.attr,
-	NULL
-};
-
-static struct attribute_group hwmon_attribute_group = {
-	.attrs = hwmon_attributes
-};
-
-/*
- * exit/init
- */
-static void eeepc_backlight_exit(void)
-{
-	if (eeepc_backlight_device)
-		backlight_device_unregister(eeepc_backlight_device);
-	if (ehotk->inputdev)
-		input_unregister_device(ehotk->inputdev);
-	if (ehotk->eeepc_wlan_rfkill)
-		rfkill_unregister(ehotk->eeepc_wlan_rfkill);
-	if (ehotk->eeepc_bluetooth_rfkill)
-		rfkill_unregister(ehotk->eeepc_bluetooth_rfkill);
-	eeepc_backlight_device = NULL;
-}
-
-static void eeepc_hwmon_exit(void)
-{
-	struct device *hwmon;
-
-	hwmon = eeepc_hwmon_device;
-	if (!hwmon)
-		return ;
-	sysfs_remove_group(&hwmon->kobj,
-			   &hwmon_attribute_group);
-	hwmon_device_unregister(hwmon);
-	eeepc_hwmon_device = NULL;
-}
-
-static void __exit eeepc_laptop_exit(void)
-{
-	eeepc_backlight_exit();
-	eeepc_hwmon_exit();
-	acpi_bus_unregister_driver(&eeepc_hotk_driver);
-	sysfs_remove_group(&platform_device->dev.kobj,
-			   &platform_attribute_group);
-	platform_device_unregister(platform_device);
-	platform_driver_unregister(&platform_driver);
-}
-
-static int eeepc_backlight_init(struct device *dev)
-{
-	struct backlight_device *bd;
-
-	bd = backlight_device_register(EEEPC_HOTK_FILE, dev,
-				       NULL, &eeepcbl_ops);
-	if (IS_ERR(bd)) {
-		printk(EEEPC_ERR
-		       "Could not register eeepc backlight device\n");
-		eeepc_backlight_device = NULL;
-		return PTR_ERR(bd);
-	}
-	eeepc_backlight_device = bd;
-	bd->props.max_brightness = 15;
-	bd->props.brightness = read_brightness(NULL);
-	bd->props.power = FB_BLANK_UNBLANK;
-	backlight_update_status(bd);
-	return 0;
-}
-
-static int eeepc_hwmon_init(struct device *dev)
-{
-	struct device *hwmon;
-	int result;
-
-	hwmon = hwmon_device_register(dev);
-	if (IS_ERR(hwmon)) {
-		printk(EEEPC_ERR
-		       "Could not register eeepc hwmon device\n");
-		eeepc_hwmon_device = NULL;
-		return PTR_ERR(hwmon);
-	}
-	eeepc_hwmon_device = hwmon;
-	result = sysfs_create_group(&hwmon->kobj,
-				    &hwmon_attribute_group);
-	if (result)
-		eeepc_hwmon_exit();
-	return result;
-}
-
-static int __init eeepc_laptop_init(void)
-{
-	struct device *dev;
-	int result;
-
-	if (acpi_disabled)
-		return -ENODEV;
-	result = acpi_bus_register_driver(&eeepc_hotk_driver);
-	if (result < 0)
-		return result;
-	if (!ehotk) {
-		acpi_bus_unregister_driver(&eeepc_hotk_driver);
-		return -ENODEV;
-	}
-	dev = acpi_get_physical_device(ehotk->device->handle);
-
-	if (!acpi_video_backlight_support()) {
-		result = eeepc_backlight_init(dev);
-		if (result)
-			goto fail_backlight;
-	} else
-		printk(EEEPC_INFO "Backlight controlled by ACPI video "
-		       "driver\n");
-
-	result = eeepc_hwmon_init(dev);
-	if (result)
-		goto fail_hwmon;
-	/* Register platform stuff */
-	result = platform_driver_register(&platform_driver);
-	if (result)
-		goto fail_platform_driver;
-	platform_device = platform_device_alloc(EEEPC_HOTK_FILE, -1);
-	if (!platform_device) {
-		result = -ENOMEM;
-		goto fail_platform_device1;
-	}
-	result = platform_device_add(platform_device);
-	if (result)
-		goto fail_platform_device2;
-	result = sysfs_create_group(&platform_device->dev.kobj,
-				    &platform_attribute_group);
-	if (result)
-		goto fail_sysfs;
-	return 0;
-fail_sysfs:
-	platform_device_del(platform_device);
-fail_platform_device2:
-	platform_device_put(platform_device);
-fail_platform_device1:
-	platform_driver_unregister(&platform_driver);
-fail_platform_driver:
-	eeepc_hwmon_exit();
-fail_hwmon:
-	eeepc_backlight_exit();
-fail_backlight:
-	return result;
-}
-
-module_init(eeepc_laptop_init);
-module_exit(eeepc_laptop_exit);
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 0736cff..3cf61ec 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -119,7 +119,7 @@
 	edev->edev.class = &enclosure_class;
 	edev->edev.parent = get_device(dev);
 	edev->cb = cb;
-	snprintf(edev->edev.bus_id, BUS_ID_SIZE, "%s", name);
+	dev_set_name(&edev->edev, name);
 	err = device_register(&edev->edev);
 	if (err)
 		goto err;
@@ -170,7 +170,7 @@
 static void enclosure_link_name(struct enclosure_component *cdev, char *name)
 {
 	strcpy(name, "enclosure_device:");
-	strcat(name, cdev->cdev.bus_id);
+	strcat(name, dev_name(&cdev->cdev));
 }
 
 static void enclosure_remove_links(struct enclosure_component *cdev)
@@ -256,9 +256,9 @@
 	cdev = &ecomp->cdev;
 	cdev->parent = get_device(&edev->edev);
 	if (name)
-		snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name);
+		dev_set_name(cdev, name);
 	else
-		snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number);
+		dev_set_name(cdev, "%u", number);
 
 	cdev->release = enclosure_component_release;
 	cdev->groups = enclosure_groups;
diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/misc/fujitsu-laptop.c
deleted file mode 100644
index a7dd3e9..0000000
--- a/drivers/misc/fujitsu-laptop.c
+++ /dev/null
@@ -1,1126 +0,0 @@
-/*-*-linux-c-*-*/
-
-/*
-  Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
-  Copyright (C) 2008 Peter Gruber <nokos@gmx.net>
-  Based on earlier work:
-    Copyright (C) 2003 Shane Spencer <shane@bogomip.com>
-    Adrian Yee <brewt-fujitsu@brewt.org>
-
-  Templated from msi-laptop.c and thinkpad_acpi.c which is copyright
-  by its respective authors.
-
-  This program is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 2 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful, but
-  WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-  General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program; if not, write to the Free Software
-  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-  02110-1301, USA.
- */
-
-/*
- * fujitsu-laptop.c - Fujitsu laptop support, providing access to additional
- * features made available on a range of Fujitsu laptops including the
- * P2xxx/P5xxx/S6xxx/S7xxx series.
- *
- * This driver exports a few files in /sys/devices/platform/fujitsu-laptop/;
- * others may be added at a later date.
- *
- *   lcd_level - Screen brightness: contains a single integer in the
- *   range 0..7. (rw)
- *
- * In addition to these platform device attributes the driver
- * registers itself in the Linux backlight control subsystem and is
- * available to userspace under /sys/class/backlight/fujitsu-laptop/.
- *
- * Hotkeys present on certain Fujitsu laptops (eg: the S6xxx series) are
- * also supported by this driver.
- *
- * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and
- * P8010.  It should work on most P-series and S-series Lifebooks, but
- * YMMV.
- *
- * The module parameter use_alt_lcd_levels switches between different ACPI
- * brightness controls which are used by different Fujitsu laptops.  In most
- * cases the correct method is automatically detected. "use_alt_lcd_levels=1"
- * is applicable for a Fujitsu Lifebook S6410 if autodetection fails.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/dmi.h>
-#include <linux/backlight.h>
-#include <linux/input.h>
-#include <linux/kfifo.h>
-#include <linux/video_output.h>
-#include <linux/platform_device.h>
-
-#define FUJITSU_DRIVER_VERSION "0.4.3"
-
-#define FUJITSU_LCD_N_LEVELS 8
-
-#define ACPI_FUJITSU_CLASS              "fujitsu"
-#define ACPI_FUJITSU_HID                "FUJ02B1"
-#define ACPI_FUJITSU_DRIVER_NAME	"Fujitsu laptop FUJ02B1 ACPI brightness driver"
-#define ACPI_FUJITSU_DEVICE_NAME        "Fujitsu FUJ02B1"
-#define ACPI_FUJITSU_HOTKEY_HID 	"FUJ02E3"
-#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
-#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3"
-
-#define ACPI_FUJITSU_NOTIFY_CODE1     0x80
-
-#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS     0x86
-#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS     0x87
-
-/* Hotkey details */
-#define KEY1_CODE	0x410	/* codes for the keys in the GIRB register */
-#define KEY2_CODE	0x411
-#define KEY3_CODE	0x412
-#define KEY4_CODE	0x413
-
-#define MAX_HOTKEY_RINGBUFFER_SIZE 100
-#define RINGBUFFERSIZE 40
-
-/* Debugging */
-#define FUJLAPTOP_LOG	   ACPI_FUJITSU_HID ": "
-#define FUJLAPTOP_ERR	   KERN_ERR FUJLAPTOP_LOG
-#define FUJLAPTOP_NOTICE   KERN_NOTICE FUJLAPTOP_LOG
-#define FUJLAPTOP_INFO	   KERN_INFO FUJLAPTOP_LOG
-#define FUJLAPTOP_DEBUG    KERN_DEBUG FUJLAPTOP_LOG
-
-#define FUJLAPTOP_DBG_ALL	  0xffff
-#define FUJLAPTOP_DBG_ERROR	  0x0001
-#define FUJLAPTOP_DBG_WARN	  0x0002
-#define FUJLAPTOP_DBG_INFO	  0x0004
-#define FUJLAPTOP_DBG_TRACE	  0x0008
-
-#define dbg_printk(a_dbg_level, format, arg...) \
-	do { if (dbg_level & a_dbg_level) \
-		printk(FUJLAPTOP_DEBUG "%s: " format, __func__ , ## arg); \
-	} while (0)
-#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
-#define vdbg_printk(a_dbg_level, format, arg...) \
-	dbg_printk(a_dbg_level, format, ## arg)
-#else
-#define vdbg_printk(a_dbg_level, format, arg...)
-#endif
-
-/* Device controlling the backlight and associated keys */
-struct fujitsu_t {
-	acpi_handle acpi_handle;
-	struct acpi_device *dev;
-	struct input_dev *input;
-	char phys[32];
-	struct backlight_device *bl_device;
-	struct platform_device *pf_device;
-	int keycode1, keycode2, keycode3, keycode4;
-
-	unsigned int max_brightness;
-	unsigned int brightness_changed;
-	unsigned int brightness_level;
-};
-
-static struct fujitsu_t *fujitsu;
-static int use_alt_lcd_levels = -1;
-static int disable_brightness_keys = -1;
-static int disable_brightness_adjust = -1;
-
-/* Device used to access other hotkeys on the laptop */
-struct fujitsu_hotkey_t {
-	acpi_handle acpi_handle;
-	struct acpi_device *dev;
-	struct input_dev *input;
-	char phys[32];
-	struct platform_device *pf_device;
-	struct kfifo *fifo;
-	spinlock_t fifo_lock;
-
-	unsigned int irb;	/* info about the pressed buttons */
-};
-
-static struct fujitsu_hotkey_t *fujitsu_hotkey;
-
-static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
-				       void *data);
-
-#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
-static u32 dbg_level = 0x03;
-#endif
-
-static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data);
-
-/* Hardware access for LCD brightness control */
-
-static int set_lcd_level(int level)
-{
-	acpi_status status = AE_OK;
-	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-	struct acpi_object_list arg_list = { 1, &arg0 };
-	acpi_handle handle = NULL;
-
-	vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
-		    level);
-
-	if (level < 0 || level >= fujitsu->max_brightness)
-		return -EINVAL;
-
-	if (!fujitsu)
-		return -EINVAL;
-
-	status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle);
-	if (ACPI_FAILURE(status)) {
-		vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");
-		return -ENODEV;
-	}
-
-	arg0.integer.value = level;
-
-	status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
-	if (ACPI_FAILURE(status))
-		return -ENODEV;
-
-	return 0;
-}
-
-static int set_lcd_level_alt(int level)
-{
-	acpi_status status = AE_OK;
-	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-	struct acpi_object_list arg_list = { 1, &arg0 };
-	acpi_handle handle = NULL;
-
-	vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
-		    level);
-
-	if (level < 0 || level >= fujitsu->max_brightness)
-		return -EINVAL;
-
-	if (!fujitsu)
-		return -EINVAL;
-
-	status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle);
-	if (ACPI_FAILURE(status)) {
-		vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");
-		return -ENODEV;
-	}
-
-	arg0.integer.value = level;
-
-	status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
-	if (ACPI_FAILURE(status))
-		return -ENODEV;
-
-	return 0;
-}
-
-static int get_lcd_level(void)
-{
-	unsigned long long state = 0;
-	acpi_status status = AE_OK;
-
-	vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
-
-	status =
-	    acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state);
-	if (status < 0)
-		return status;
-
-	fujitsu->brightness_level = state & 0x0fffffff;
-
-	if (state & 0x80000000)
-		fujitsu->brightness_changed = 1;
-	else
-		fujitsu->brightness_changed = 0;
-
-	return fujitsu->brightness_level;
-}
-
-static int get_max_brightness(void)
-{
-	unsigned long long state = 0;
-	acpi_status status = AE_OK;
-
-	vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
-
-	status =
-	    acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state);
-	if (status < 0)
-		return status;
-
-	fujitsu->max_brightness = state;
-
-	return fujitsu->max_brightness;
-}
-
-static int get_lcd_level_alt(void)
-{
-	unsigned long long state = 0;
-	acpi_status status = AE_OK;
-
-	vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLS\n");
-
-	status =
-	    acpi_evaluate_integer(fujitsu->acpi_handle, "GBLS", NULL, &state);
-	if (status < 0)
-		return status;
-
-	fujitsu->brightness_level = state & 0x0fffffff;
-
-	if (state & 0x80000000)
-		fujitsu->brightness_changed = 1;
-	else
-		fujitsu->brightness_changed = 0;
-
-	return fujitsu->brightness_level;
-}
-
-/* Backlight device stuff */
-
-static int bl_get_brightness(struct backlight_device *b)
-{
-	if (use_alt_lcd_levels)
-		return get_lcd_level_alt();
-	else
-		return get_lcd_level();
-}
-
-static int bl_update_status(struct backlight_device *b)
-{
-	if (use_alt_lcd_levels)
-		return set_lcd_level_alt(b->props.brightness);
-	else
-		return set_lcd_level(b->props.brightness);
-}
-
-static struct backlight_ops fujitsubl_ops = {
-	.get_brightness = bl_get_brightness,
-	.update_status = bl_update_status,
-};
-
-/* Platform LCD brightness device */
-
-static ssize_t
-show_max_brightness(struct device *dev,
-		    struct device_attribute *attr, char *buf)
-{
-
-	int ret;
-
-	ret = get_max_brightness();
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t
-show_brightness_changed(struct device *dev,
-			struct device_attribute *attr, char *buf)
-{
-
-	int ret;
-
-	ret = fujitsu->brightness_changed;
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t show_lcd_level(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-
-	int ret;
-
-	if (use_alt_lcd_levels)
-		ret = get_lcd_level_alt();
-	else
-		ret = get_lcd_level();
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t store_lcd_level(struct device *dev,
-			       struct device_attribute *attr, const char *buf,
-			       size_t count)
-{
-
-	int level, ret;
-
-	if (sscanf(buf, "%i", &level) != 1
-	    || (level < 0 || level >= fujitsu->max_brightness))
-		return -EINVAL;
-
-	if (use_alt_lcd_levels)
-		ret = set_lcd_level_alt(level);
-	else
-		ret = set_lcd_level(level);
-	if (ret < 0)
-		return ret;
-
-	if (use_alt_lcd_levels)
-		ret = get_lcd_level_alt();
-	else
-		ret = get_lcd_level();
-	if (ret < 0)
-		return ret;
-
-	return count;
-}
-
-/* Hardware access for hotkey device */
-
-static int get_irb(void)
-{
-	unsigned long long state = 0;
-	acpi_status status = AE_OK;
-
-	vdbg_printk(FUJLAPTOP_DBG_TRACE, "Get irb\n");
-
-	status =
-	    acpi_evaluate_integer(fujitsu_hotkey->acpi_handle, "GIRB", NULL,
-				  &state);
-	if (status < 0)
-		return status;
-
-	fujitsu_hotkey->irb = state;
-
-	return fujitsu_hotkey->irb;
-}
-
-static ssize_t
-ignore_store(struct device *dev,
-	     struct device_attribute *attr, const char *buf, size_t count)
-{
-	return count;
-}
-
-static DEVICE_ATTR(max_brightness, 0444, show_max_brightness, ignore_store);
-static DEVICE_ATTR(brightness_changed, 0444, show_brightness_changed,
-		   ignore_store);
-static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
-
-static struct attribute *fujitsupf_attributes[] = {
-	&dev_attr_brightness_changed.attr,
-	&dev_attr_max_brightness.attr,
-	&dev_attr_lcd_level.attr,
-	NULL
-};
-
-static struct attribute_group fujitsupf_attribute_group = {
-	.attrs = fujitsupf_attributes
-};
-
-static struct platform_driver fujitsupf_driver = {
-	.driver = {
-		   .name = "fujitsu-laptop",
-		   .owner = THIS_MODULE,
-		   }
-};
-
-static void dmi_check_cb_common(const struct dmi_system_id *id)
-{
-	acpi_handle handle;
-	int have_blnf;
-	printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n",
-	       id->ident);
-	have_blnf = ACPI_SUCCESS
-	    (acpi_get_handle(NULL, "\\_SB.PCI0.GFX0.LCD.BLNF", &handle));
-	if (use_alt_lcd_levels == -1) {
-		vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detecting usealt\n");
-		use_alt_lcd_levels = 1;
-	}
-	if (disable_brightness_keys == -1) {
-		vdbg_printk(FUJLAPTOP_DBG_TRACE,
-			    "auto-detecting disable_keys\n");
-		disable_brightness_keys = have_blnf ? 1 : 0;
-	}
-	if (disable_brightness_adjust == -1) {
-		vdbg_printk(FUJLAPTOP_DBG_TRACE,
-			    "auto-detecting disable_adjust\n");
-		disable_brightness_adjust = have_blnf ? 0 : 1;
-	}
-}
-
-static int dmi_check_cb_s6410(const struct dmi_system_id *id)
-{
-	dmi_check_cb_common(id);
-	fujitsu->keycode1 = KEY_SCREENLOCK;	/* "Lock" */
-	fujitsu->keycode2 = KEY_HELP;	/* "Mobility Center" */
-	return 0;
-}
-
-static int dmi_check_cb_s6420(const struct dmi_system_id *id)
-{
-	dmi_check_cb_common(id);
-	fujitsu->keycode1 = KEY_SCREENLOCK;	/* "Lock" */
-	fujitsu->keycode2 = KEY_HELP;	/* "Mobility Center" */
-	return 0;
-}
-
-static int dmi_check_cb_p8010(const struct dmi_system_id *id)
-{
-	dmi_check_cb_common(id);
-	fujitsu->keycode1 = KEY_HELP;	/* "Support" */
-	fujitsu->keycode3 = KEY_SWITCHVIDEOMODE;	/* "Presentation" */
-	fujitsu->keycode4 = KEY_WWW;	/* "Internet" */
-	return 0;
-}
-
-static struct dmi_system_id fujitsu_dmi_table[] = {
-	{
-	 .ident = "Fujitsu Siemens S6410",
-	 .matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"),
-		     },
-	 .callback = dmi_check_cb_s6410},
-	{
-	 .ident = "Fujitsu Siemens S6420",
-	 .matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6420"),
-		     },
-	 .callback = dmi_check_cb_s6420},
-	{
-	 .ident = "Fujitsu LifeBook P8010",
-	 .matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P8010"),
-		     },
-	 .callback = dmi_check_cb_p8010},
-	{}
-};
-
-/* ACPI device for LCD brightness control */
-
-static int acpi_fujitsu_add(struct acpi_device *device)
-{
-	acpi_status status;
-	acpi_handle handle;
-	int result = 0;
-	int state = 0;
-	struct input_dev *input;
-	int error;
-
-	if (!device)
-		return -EINVAL;
-
-	fujitsu->acpi_handle = device->handle;
-	sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME);
-	sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
-	device->driver_data = fujitsu;
-
-	status = acpi_install_notify_handler(device->handle,
-					     ACPI_DEVICE_NOTIFY,
-					     acpi_fujitsu_notify, fujitsu);
-
-	if (ACPI_FAILURE(status)) {
-		printk(KERN_ERR "Error installing notify handler\n");
-		error = -ENODEV;
-		goto err_stop;
-	}
-
-	fujitsu->input = input = input_allocate_device();
-	if (!input) {
-		error = -ENOMEM;
-		goto err_uninstall_notify;
-	}
-
-	snprintf(fujitsu->phys, sizeof(fujitsu->phys),
-		 "%s/video/input0", acpi_device_hid(device));
-
-	input->name = acpi_device_name(device);
-	input->phys = fujitsu->phys;
-	input->id.bustype = BUS_HOST;
-	input->id.product = 0x06;
-	input->dev.parent = &device->dev;
-	input->evbit[0] = BIT(EV_KEY);
-	set_bit(KEY_BRIGHTNESSUP, input->keybit);
-	set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
-	set_bit(KEY_UNKNOWN, input->keybit);
-
-	error = input_register_device(input);
-	if (error)
-		goto err_free_input_dev;
-
-	result = acpi_bus_get_power(fujitsu->acpi_handle, &state);
-	if (result) {
-		printk(KERN_ERR "Error reading power state\n");
-		goto end;
-	}
-
-	printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
-	       acpi_device_name(device), acpi_device_bid(device),
-	       !device->power.state ? "on" : "off");
-
-	fujitsu->dev = device;
-
-	if (ACPI_SUCCESS
-	    (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
-		vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
-		if (ACPI_FAILURE
-		    (acpi_evaluate_object
-		     (device->handle, METHOD_NAME__INI, NULL, NULL)))
-			printk(KERN_ERR "_INI Method failed\n");
-	}
-
-	/* do config (detect defaults) */
-	use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
-	disable_brightness_keys = disable_brightness_keys == 1 ? 1 : 0;
-	disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
-	vdbg_printk(FUJLAPTOP_DBG_INFO,
-		    "config: [alt interface: %d], [key disable: %d], [adjust disable: %d]\n",
-		    use_alt_lcd_levels, disable_brightness_keys,
-		    disable_brightness_adjust);
-
-	if (get_max_brightness() <= 0)
-		fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
-	if (use_alt_lcd_levels)
-		get_lcd_level_alt();
-	else
-		get_lcd_level();
-
-	return result;
-
-end:
-err_free_input_dev:
-	input_free_device(input);
-err_uninstall_notify:
-	acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
-				   acpi_fujitsu_notify);
-err_stop:
-
-	return result;
-}
-
-static int acpi_fujitsu_remove(struct acpi_device *device, int type)
-{
-	acpi_status status;
-	struct fujitsu_t *fujitsu = NULL;
-
-	if (!device || !acpi_driver_data(device))
-		return -EINVAL;
-
-	fujitsu = acpi_driver_data(device);
-
-	status = acpi_remove_notify_handler(fujitsu->acpi_handle,
-					    ACPI_DEVICE_NOTIFY,
-					    acpi_fujitsu_notify);
-
-	if (!device || !acpi_driver_data(device))
-		return -EINVAL;
-
-	fujitsu->acpi_handle = NULL;
-
-	return 0;
-}
-
-/* Brightness notify */
-
-static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
-{
-	struct input_dev *input;
-	int keycode;
-	int oldb, newb;
-
-	input = fujitsu->input;
-
-	switch (event) {
-	case ACPI_FUJITSU_NOTIFY_CODE1:
-		keycode = 0;
-		oldb = fujitsu->brightness_level;
-		get_lcd_level();  /* the alt version always yields changed */
-		newb = fujitsu->brightness_level;
-
-		vdbg_printk(FUJLAPTOP_DBG_TRACE,
-			    "brightness button event [%i -> %i (%i)]\n",
-			    oldb, newb, fujitsu->brightness_changed);
-
-		if (oldb == newb && fujitsu->brightness_changed) {
-			keycode = 0;
-			if (disable_brightness_keys != 1) {
-				if (oldb == 0) {
-					acpi_bus_generate_proc_event
-					    (fujitsu->dev,
-					     ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS,
-					     0);
-					keycode = KEY_BRIGHTNESSDOWN;
-				} else if (oldb ==
-					   (fujitsu->max_brightness) - 1) {
-					acpi_bus_generate_proc_event
-					    (fujitsu->dev,
-					     ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS,
-					     0);
-					keycode = KEY_BRIGHTNESSUP;
-				}
-			}
-		} else if (oldb < newb) {
-			if (disable_brightness_adjust != 1) {
-				if (use_alt_lcd_levels)
-					set_lcd_level_alt(newb);
-				else
-					set_lcd_level(newb);
-			}
-			if (disable_brightness_keys != 1) {
-				acpi_bus_generate_proc_event(fujitsu->dev,
-					ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 0);
-				keycode = KEY_BRIGHTNESSUP;
-			}
-		} else if (oldb > newb) {
-			if (disable_brightness_adjust != 1) {
-				if (use_alt_lcd_levels)
-					set_lcd_level_alt(newb);
-				else
-					set_lcd_level(newb);
-			}
-			if (disable_brightness_keys != 1) {
-				acpi_bus_generate_proc_event(fujitsu->dev,
-					ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 0);
-				keycode = KEY_BRIGHTNESSDOWN;
-			}
-		} else {
-			keycode = KEY_UNKNOWN;
-		}
-		break;
-	default:
-		keycode = KEY_UNKNOWN;
-		vdbg_printk(FUJLAPTOP_DBG_WARN,
-			    "unsupported event [0x%x]\n", event);
-		break;
-	}
-
-	if (keycode != 0) {
-		input_report_key(input, keycode, 1);
-		input_sync(input);
-		input_report_key(input, keycode, 0);
-		input_sync(input);
-	}
-
-	return;
-}
-
-/* ACPI device for hotkey handling */
-
-static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
-{
-	acpi_status status;
-	acpi_handle handle;
-	int result = 0;
-	int state = 0;
-	struct input_dev *input;
-	int error;
-	int i;
-
-	if (!device)
-		return -EINVAL;
-
-	fujitsu_hotkey->acpi_handle = device->handle;
-	sprintf(acpi_device_name(device), "%s",
-		ACPI_FUJITSU_HOTKEY_DEVICE_NAME);
-	sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
-	device->driver_data = fujitsu_hotkey;
-
-	status = acpi_install_notify_handler(device->handle,
-					     ACPI_DEVICE_NOTIFY,
-					     acpi_fujitsu_hotkey_notify,
-					     fujitsu_hotkey);
-
-	if (ACPI_FAILURE(status)) {
-		printk(KERN_ERR "Error installing notify handler\n");
-		error = -ENODEV;
-		goto err_stop;
-	}
-
-	/* kfifo */
-	spin_lock_init(&fujitsu_hotkey->fifo_lock);
-	fujitsu_hotkey->fifo =
-	    kfifo_alloc(RINGBUFFERSIZE * sizeof(int), GFP_KERNEL,
-			&fujitsu_hotkey->fifo_lock);
-	if (IS_ERR(fujitsu_hotkey->fifo)) {
-		printk(KERN_ERR "kfifo_alloc failed\n");
-		error = PTR_ERR(fujitsu_hotkey->fifo);
-		goto err_stop;
-	}
-
-	fujitsu_hotkey->input = input = input_allocate_device();
-	if (!input) {
-		error = -ENOMEM;
-		goto err_uninstall_notify;
-	}
-
-	snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys),
-		 "%s/video/input0", acpi_device_hid(device));
-
-	input->name = acpi_device_name(device);
-	input->phys = fujitsu_hotkey->phys;
-	input->id.bustype = BUS_HOST;
-	input->id.product = 0x06;
-	input->dev.parent = &device->dev;
-	input->evbit[0] = BIT(EV_KEY);
-	set_bit(fujitsu->keycode1, input->keybit);
-	set_bit(fujitsu->keycode2, input->keybit);
-	set_bit(fujitsu->keycode3, input->keybit);
-	set_bit(fujitsu->keycode4, input->keybit);
-	set_bit(KEY_UNKNOWN, input->keybit);
-
-	error = input_register_device(input);
-	if (error)
-		goto err_free_input_dev;
-
-	result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state);
-	if (result) {
-		printk(KERN_ERR "Error reading power state\n");
-		goto end;
-	}
-
-	printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
-	       acpi_device_name(device), acpi_device_bid(device),
-	       !device->power.state ? "on" : "off");
-
-	fujitsu_hotkey->dev = device;
-
-	if (ACPI_SUCCESS
-	    (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
-		vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
-		if (ACPI_FAILURE
-		    (acpi_evaluate_object
-		     (device->handle, METHOD_NAME__INI, NULL, NULL)))
-			printk(KERN_ERR "_INI Method failed\n");
-	}
-
-	i = 0;			/* Discard hotkey ringbuffer */
-	while (get_irb() != 0 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) ;
-	vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);
-
-	return result;
-
-end:
-err_free_input_dev:
-	input_free_device(input);
-err_uninstall_notify:
-	acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
-				   acpi_fujitsu_hotkey_notify);
-	kfifo_free(fujitsu_hotkey->fifo);
-err_stop:
-
-	return result;
-}
-
-static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
-{
-	acpi_status status;
-	struct fujitsu_hotkey_t *fujitsu_hotkey = NULL;
-
-	if (!device || !acpi_driver_data(device))
-		return -EINVAL;
-
-	fujitsu_hotkey = acpi_driver_data(device);
-
-	status = acpi_remove_notify_handler(fujitsu_hotkey->acpi_handle,
-					    ACPI_DEVICE_NOTIFY,
-					    acpi_fujitsu_hotkey_notify);
-
-	fujitsu_hotkey->acpi_handle = NULL;
-
-	kfifo_free(fujitsu_hotkey->fifo);
-
-	return 0;
-}
-
-static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
-				       void *data)
-{
-	struct input_dev *input;
-	int keycode, keycode_r;
-	unsigned int irb = 1;
-	int i, status;
-
-	input = fujitsu_hotkey->input;
-
-	vdbg_printk(FUJLAPTOP_DBG_TRACE, "Hotkey event\n");
-
-	switch (event) {
-	case ACPI_FUJITSU_NOTIFY_CODE1:
-		i = 0;
-		while ((irb = get_irb()) != 0
-		       && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {
-			vdbg_printk(FUJLAPTOP_DBG_TRACE, "GIRB result [%x]\n",
-				    irb);
-
-			switch (irb & 0x4ff) {
-			case KEY1_CODE:
-				keycode = fujitsu->keycode1;
-				break;
-			case KEY2_CODE:
-				keycode = fujitsu->keycode2;
-				break;
-			case KEY3_CODE:
-				keycode = fujitsu->keycode3;
-				break;
-			case KEY4_CODE:
-				keycode = fujitsu->keycode4;
-				break;
-			case 0:
-				keycode = 0;
-				break;
-			default:
-				vdbg_printk(FUJLAPTOP_DBG_WARN,
-					    "Unknown GIRB result [%x]\n", irb);
-				keycode = -1;
-				break;
-			}
-			if (keycode > 0) {
-				vdbg_printk(FUJLAPTOP_DBG_TRACE,
-					"Push keycode into ringbuffer [%d]\n",
-					keycode);
-				status = kfifo_put(fujitsu_hotkey->fifo,
-						   (unsigned char *)&keycode,
-						   sizeof(keycode));
-				if (status != sizeof(keycode)) {
-					vdbg_printk(FUJLAPTOP_DBG_WARN,
-					    "Could not push keycode [0x%x]\n",
-					    keycode);
-				} else {
-					input_report_key(input, keycode, 1);
-					input_sync(input);
-				}
-			} else if (keycode == 0) {
-				while ((status =
-					kfifo_get
-					(fujitsu_hotkey->fifo, (unsigned char *)
-					 &keycode_r,
-					 sizeof
-					 (keycode_r))) == sizeof(keycode_r)) {
-					input_report_key(input, keycode_r, 0);
-					input_sync(input);
-					vdbg_printk(FUJLAPTOP_DBG_TRACE,
-					  "Pop keycode from ringbuffer [%d]\n",
-					  keycode_r);
-				}
-			}
-		}
-
-		break;
-	default:
-		keycode = KEY_UNKNOWN;
-		vdbg_printk(FUJLAPTOP_DBG_WARN,
-			    "Unsupported event [0x%x]\n", event);
-		input_report_key(input, keycode, 1);
-		input_sync(input);
-		input_report_key(input, keycode, 0);
-		input_sync(input);
-		break;
-	}
-
-	return;
-}
-
-/* Initialization */
-
-static const struct acpi_device_id fujitsu_device_ids[] = {
-	{ACPI_FUJITSU_HID, 0},
-	{"", 0},
-};
-
-static struct acpi_driver acpi_fujitsu_driver = {
-	.name = ACPI_FUJITSU_DRIVER_NAME,
-	.class = ACPI_FUJITSU_CLASS,
-	.ids = fujitsu_device_ids,
-	.ops = {
-		.add = acpi_fujitsu_add,
-		.remove = acpi_fujitsu_remove,
-		},
-};
-
-static const struct acpi_device_id fujitsu_hotkey_device_ids[] = {
-	{ACPI_FUJITSU_HOTKEY_HID, 0},
-	{"", 0},
-};
-
-static struct acpi_driver acpi_fujitsu_hotkey_driver = {
-	.name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME,
-	.class = ACPI_FUJITSU_CLASS,
-	.ids = fujitsu_hotkey_device_ids,
-	.ops = {
-		.add = acpi_fujitsu_hotkey_add,
-		.remove = acpi_fujitsu_hotkey_remove,
-		},
-};
-
-static int __init fujitsu_init(void)
-{
-	int ret, result, max_brightness;
-
-	if (acpi_disabled)
-		return -ENODEV;
-
-	fujitsu = kmalloc(sizeof(struct fujitsu_t), GFP_KERNEL);
-	if (!fujitsu)
-		return -ENOMEM;
-	memset(fujitsu, 0, sizeof(struct fujitsu_t));
-	fujitsu->keycode1 = KEY_PROG1;
-	fujitsu->keycode2 = KEY_PROG2;
-	fujitsu->keycode3 = KEY_PROG3;
-	fujitsu->keycode4 = KEY_PROG4;
-	dmi_check_system(fujitsu_dmi_table);
-
-	result = acpi_bus_register_driver(&acpi_fujitsu_driver);
-	if (result < 0) {
-		ret = -ENODEV;
-		goto fail_acpi;
-	}
-
-	/* Register platform stuff */
-
-	fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1);
-	if (!fujitsu->pf_device) {
-		ret = -ENOMEM;
-		goto fail_platform_driver;
-	}
-
-	ret = platform_device_add(fujitsu->pf_device);
-	if (ret)
-		goto fail_platform_device1;
-
-	ret =
-	    sysfs_create_group(&fujitsu->pf_device->dev.kobj,
-			       &fujitsupf_attribute_group);
-	if (ret)
-		goto fail_platform_device2;
-
-	/* Register backlight stuff */
-
-	if (!acpi_video_backlight_support()) {
-		fujitsu->bl_device =
-			backlight_device_register("fujitsu-laptop", NULL, NULL,
-						  &fujitsubl_ops);
-		if (IS_ERR(fujitsu->bl_device))
-			return PTR_ERR(fujitsu->bl_device);
-		max_brightness = fujitsu->max_brightness;
-		fujitsu->bl_device->props.max_brightness = max_brightness - 1;
-		fujitsu->bl_device->props.brightness = fujitsu->brightness_level;
-	}
-
-	ret = platform_driver_register(&fujitsupf_driver);
-	if (ret)
-		goto fail_backlight;
-
-	/* Register hotkey driver */
-
-	fujitsu_hotkey = kmalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL);
-	if (!fujitsu_hotkey) {
-		ret = -ENOMEM;
-		goto fail_hotkey;
-	}
-	memset(fujitsu_hotkey, 0, sizeof(struct fujitsu_hotkey_t));
-
-	result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver);
-	if (result < 0) {
-		ret = -ENODEV;
-		goto fail_hotkey1;
-	}
-
-	printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION
-	       " successfully loaded.\n");
-
-	return 0;
-
-fail_hotkey1:
-
-	kfree(fujitsu_hotkey);
-
-fail_hotkey:
-
-	platform_driver_unregister(&fujitsupf_driver);
-
-fail_backlight:
-
-	if (fujitsu->bl_device)
-		backlight_device_unregister(fujitsu->bl_device);
-
-fail_platform_device2:
-
-	platform_device_del(fujitsu->pf_device);
-
-fail_platform_device1:
-
-	platform_device_put(fujitsu->pf_device);
-
-fail_platform_driver:
-
-	acpi_bus_unregister_driver(&acpi_fujitsu_driver);
-
-fail_acpi:
-
-	kfree(fujitsu);
-
-	return ret;
-}
-
-static void __exit fujitsu_cleanup(void)
-{
-	sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
-			   &fujitsupf_attribute_group);
-	platform_device_unregister(fujitsu->pf_device);
-	platform_driver_unregister(&fujitsupf_driver);
-	if (fujitsu->bl_device)
-		backlight_device_unregister(fujitsu->bl_device);
-
-	acpi_bus_unregister_driver(&acpi_fujitsu_driver);
-
-	kfree(fujitsu);
-
-	acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver);
-
-	kfree(fujitsu_hotkey);
-
-	printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n");
-}
-
-module_init(fujitsu_init);
-module_exit(fujitsu_cleanup);
-
-module_param(use_alt_lcd_levels, uint, 0644);
-MODULE_PARM_DESC(use_alt_lcd_levels,
-		 "Use alternative interface for lcd_levels (needed for Lifebook s6410).");
-module_param(disable_brightness_keys, uint, 0644);
-MODULE_PARM_DESC(disable_brightness_keys,
-		 "Disable brightness keys (eg. if they are already handled by the generic ACPI_VIDEO device).");
-module_param(disable_brightness_adjust, uint, 0644);
-MODULE_PARM_DESC(disable_brightness_adjust, "Disable brightness adjustment .");
-#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
-module_param_named(debug, dbg_level, uint, 0644);
-MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
-#endif
-
-MODULE_AUTHOR("Jonathan Woithe, Peter Gruber");
-MODULE_DESCRIPTION("Fujitsu laptop extras support");
-MODULE_VERSION(FUJITSU_DRIVER_VERSION);
-MODULE_LICENSE("GPL");
-
-MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
-MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
-
-static struct pnp_device_id pnp_ids[] = {
-	{.id = "FUJ02bf"},
-	{.id = "FUJ02B1"},
-	{.id = "FUJ02E3"},
-	{.id = ""}
-};
-
-MODULE_DEVICE_TABLE(pnp, pnp_ids);
diff --git a/drivers/misc/hp-wmi.c b/drivers/misc/hp-wmi.c
deleted file mode 100644
index 4b7c24c..0000000
--- a/drivers/misc/hp-wmi.c
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * HP WMI hotkeys
- *
- * Copyright (C) 2008 Red Hat <mjg@redhat.com>
- *
- * Portions based on wistron_btns.c:
- * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
- * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
- * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/input.h>
-#include <acpi/acpi_drivers.h>
-#include <linux/platform_device.h>
-#include <linux/acpi.h>
-#include <linux/rfkill.h>
-#include <linux/string.h>
-
-MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>");
-MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
-MODULE_LICENSE("GPL");
-
-MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
-MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
-
-#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
-#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
-
-#define HPWMI_DISPLAY_QUERY 0x1
-#define HPWMI_HDDTEMP_QUERY 0x2
-#define HPWMI_ALS_QUERY 0x3
-#define HPWMI_DOCK_QUERY 0x4
-#define HPWMI_WIRELESS_QUERY 0x5
-#define HPWMI_HOTKEY_QUERY 0xc
-
-static int __init hp_wmi_bios_setup(struct platform_device *device);
-static int __exit hp_wmi_bios_remove(struct platform_device *device);
-
-struct bios_args {
-	u32 signature;
-	u32 command;
-	u32 commandtype;
-	u32 datasize;
-	u32 data;
-};
-
-struct bios_return {
-	u32 sigpass;
-	u32 return_code;
-	u32 value;
-};
-
-struct key_entry {
-	char type;		/* See KE_* below */
-	u16 code;
-	u16 keycode;
-};
-
-enum { KE_KEY, KE_SW, KE_END };
-
-static struct key_entry hp_wmi_keymap[] = {
-	{KE_SW, 0x01, SW_DOCK},
-	{KE_KEY, 0x02, KEY_BRIGHTNESSUP},
-	{KE_KEY, 0x03, KEY_BRIGHTNESSDOWN},
-	{KE_KEY, 0x20e6, KEY_PROG1},
-	{KE_KEY, 0x2142, KEY_MEDIA},
-	{KE_KEY, 0x213b, KEY_INFO},
-	{KE_KEY, 0x231b, KEY_HELP},
-	{KE_END, 0}
-};
-
-static struct input_dev *hp_wmi_input_dev;
-static struct platform_device *hp_wmi_platform_dev;
-
-static struct rfkill *wifi_rfkill;
-static struct rfkill *bluetooth_rfkill;
-static struct rfkill *wwan_rfkill;
-
-static struct platform_driver hp_wmi_driver = {
-	.driver = {
-		   .name = "hp-wmi",
-		   .owner = THIS_MODULE,
-	},
-	.probe = hp_wmi_bios_setup,
-	.remove = hp_wmi_bios_remove,
-};
-
-static int hp_wmi_perform_query(int query, int write, int value)
-{
-	struct bios_return bios_return;
-	acpi_status status;
-	union acpi_object *obj;
-	struct bios_args args = {
-		.signature = 0x55434553,
-		.command = write ? 0x2 : 0x1,
-		.commandtype = query,
-		.datasize = write ? 0x4 : 0,
-		.data = value,
-	};
-	struct acpi_buffer input = { sizeof(struct bios_args), &args };
-	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-
-	status = wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output);
-
-	obj = output.pointer;
-
-	if (!obj || obj->type != ACPI_TYPE_BUFFER)
-		return -EINVAL;
-
-	bios_return = *((struct bios_return *)obj->buffer.pointer);
-	if (bios_return.return_code > 0)
-		return bios_return.return_code * -1;
-	else
-		return bios_return.value;
-}
-
-static int hp_wmi_display_state(void)
-{
-	return hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, 0);
-}
-
-static int hp_wmi_hddtemp_state(void)
-{
-	return hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, 0);
-}
-
-static int hp_wmi_als_state(void)
-{
-	return hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, 0);
-}
-
-static int hp_wmi_dock_state(void)
-{
-	return hp_wmi_perform_query(HPWMI_DOCK_QUERY, 0, 0);
-}
-
-static int hp_wmi_wifi_set(void *data, enum rfkill_state state)
-{
-	if (state)
-		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x101);
-	else
-		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x100);
-}
-
-static int hp_wmi_bluetooth_set(void *data, enum rfkill_state state)
-{
-	if (state)
-		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x202);
-	else
-		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x200);
-}
-
-static int hp_wmi_wwan_set(void *data, enum rfkill_state state)
-{
-	if (state)
-		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x404);
-	else
-		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x400);
-}
-
-static int hp_wmi_wifi_state(void)
-{
-	int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
-
-	if (wireless & 0x100)
-		return RFKILL_STATE_UNBLOCKED;
-	else
-		return RFKILL_STATE_SOFT_BLOCKED;
-}
-
-static int hp_wmi_bluetooth_state(void)
-{
-	int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
-
-	if (wireless & 0x10000)
-		return RFKILL_STATE_UNBLOCKED;
-	else
-		return RFKILL_STATE_SOFT_BLOCKED;
-}
-
-static int hp_wmi_wwan_state(void)
-{
-	int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
-
-	if (wireless & 0x1000000)
-		return RFKILL_STATE_UNBLOCKED;
-	else
-		return RFKILL_STATE_SOFT_BLOCKED;
-}
-
-static ssize_t show_display(struct device *dev, struct device_attribute *attr,
-			    char *buf)
-{
-	int value = hp_wmi_display_state();
-	if (value < 0)
-		return -EINVAL;
-	return sprintf(buf, "%d\n", value);
-}
-
-static ssize_t show_hddtemp(struct device *dev, struct device_attribute *attr,
-			    char *buf)
-{
-	int value = hp_wmi_hddtemp_state();
-	if (value < 0)
-		return -EINVAL;
-	return sprintf(buf, "%d\n", value);
-}
-
-static ssize_t show_als(struct device *dev, struct device_attribute *attr,
-			char *buf)
-{
-	int value = hp_wmi_als_state();
-	if (value < 0)
-		return -EINVAL;
-	return sprintf(buf, "%d\n", value);
-}
-
-static ssize_t show_dock(struct device *dev, struct device_attribute *attr,
-			 char *buf)
-{
-	int value = hp_wmi_dock_state();
-	if (value < 0)
-		return -EINVAL;
-	return sprintf(buf, "%d\n", value);
-}
-
-static ssize_t set_als(struct device *dev, struct device_attribute *attr,
-		       const char *buf, size_t count)
-{
-	u32 tmp = simple_strtoul(buf, NULL, 10);
-	hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, tmp);
-	return count;
-}
-
-static DEVICE_ATTR(display, S_IRUGO, show_display, NULL);
-static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL);
-static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als);
-static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL);
-
-static struct key_entry *hp_wmi_get_entry_by_scancode(int code)
-{
-	struct key_entry *key;
-
-	for (key = hp_wmi_keymap; key->type != KE_END; key++)
-		if (code == key->code)
-			return key;
-
-	return NULL;
-}
-
-static struct key_entry *hp_wmi_get_entry_by_keycode(int keycode)
-{
-	struct key_entry *key;
-
-	for (key = hp_wmi_keymap; key->type != KE_END; key++)
-		if (key->type == KE_KEY && keycode == key->keycode)
-			return key;
-
-	return NULL;
-}
-
-static int hp_wmi_getkeycode(struct input_dev *dev, int scancode, int *keycode)
-{
-	struct key_entry *key = hp_wmi_get_entry_by_scancode(scancode);
-
-	if (key && key->type == KE_KEY) {
-		*keycode = key->keycode;
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static int hp_wmi_setkeycode(struct input_dev *dev, int scancode, int keycode)
-{
-	struct key_entry *key;
-	int old_keycode;
-
-	if (keycode < 0 || keycode > KEY_MAX)
-		return -EINVAL;
-
-	key = hp_wmi_get_entry_by_scancode(scancode);
-	if (key && key->type == KE_KEY) {
-		old_keycode = key->keycode;
-		key->keycode = keycode;
-		set_bit(keycode, dev->keybit);
-		if (!hp_wmi_get_entry_by_keycode(old_keycode))
-			clear_bit(old_keycode, dev->keybit);
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static void hp_wmi_notify(u32 value, void *context)
-{
-	struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
-	static struct key_entry *key;
-	union acpi_object *obj;
-
-	wmi_get_event_data(value, &response);
-
-	obj = (union acpi_object *)response.pointer;
-
-	if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == 8) {
-		int eventcode = *((u8 *) obj->buffer.pointer);
-		if (eventcode == 0x4)
-			eventcode = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
-							 0);
-		key = hp_wmi_get_entry_by_scancode(eventcode);
-		if (key) {
-			switch (key->type) {
-			case KE_KEY:
-				input_report_key(hp_wmi_input_dev,
-						 key->keycode, 1);
-				input_sync(hp_wmi_input_dev);
-				input_report_key(hp_wmi_input_dev,
-						 key->keycode, 0);
-				input_sync(hp_wmi_input_dev);
-				break;
-			case KE_SW:
-				input_report_switch(hp_wmi_input_dev,
-						    key->keycode,
-						    hp_wmi_dock_state());
-				input_sync(hp_wmi_input_dev);
-				break;
-			}
-		} else if (eventcode == 0x5) {
-			if (wifi_rfkill)
-				rfkill_force_state(wifi_rfkill,
-						   hp_wmi_wifi_state());
-			if (bluetooth_rfkill)
-				rfkill_force_state(bluetooth_rfkill,
-						   hp_wmi_bluetooth_state());
-			if (wwan_rfkill)
-				rfkill_force_state(wwan_rfkill,
-						   hp_wmi_wwan_state());
-		} else
-			printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
-			       eventcode);
-	} else
-		printk(KERN_INFO "HP WMI: Unknown response received\n");
-}
-
-static int __init hp_wmi_input_setup(void)
-{
-	struct key_entry *key;
-	int err;
-
-	hp_wmi_input_dev = input_allocate_device();
-
-	hp_wmi_input_dev->name = "HP WMI hotkeys";
-	hp_wmi_input_dev->phys = "wmi/input0";
-	hp_wmi_input_dev->id.bustype = BUS_HOST;
-	hp_wmi_input_dev->getkeycode = hp_wmi_getkeycode;
-	hp_wmi_input_dev->setkeycode = hp_wmi_setkeycode;
-
-	for (key = hp_wmi_keymap; key->type != KE_END; key++) {
-		switch (key->type) {
-		case KE_KEY:
-			set_bit(EV_KEY, hp_wmi_input_dev->evbit);
-			set_bit(key->keycode, hp_wmi_input_dev->keybit);
-			break;
-		case KE_SW:
-			set_bit(EV_SW, hp_wmi_input_dev->evbit);
-			set_bit(key->keycode, hp_wmi_input_dev->swbit);
-			break;
-		}
-	}
-
-	err = input_register_device(hp_wmi_input_dev);
-
-	if (err) {
-		input_free_device(hp_wmi_input_dev);
-		return err;
-	}
-
-	return 0;
-}
-
-static void cleanup_sysfs(struct platform_device *device)
-{
-	device_remove_file(&device->dev, &dev_attr_display);
-	device_remove_file(&device->dev, &dev_attr_hddtemp);
-	device_remove_file(&device->dev, &dev_attr_als);
-	device_remove_file(&device->dev, &dev_attr_dock);
-}
-
-static int __init hp_wmi_bios_setup(struct platform_device *device)
-{
-	int err;
-	int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
-
-	err = device_create_file(&device->dev, &dev_attr_display);
-	if (err)
-		goto add_sysfs_error;
-	err = device_create_file(&device->dev, &dev_attr_hddtemp);
-	if (err)
-		goto add_sysfs_error;
-	err = device_create_file(&device->dev, &dev_attr_als);
-	if (err)
-		goto add_sysfs_error;
-	err = device_create_file(&device->dev, &dev_attr_dock);
-	if (err)
-		goto add_sysfs_error;
-
-	if (wireless & 0x1) {
-		wifi_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WLAN);
-		wifi_rfkill->name = "hp-wifi";
-		wifi_rfkill->state = hp_wmi_wifi_state();
-		wifi_rfkill->toggle_radio = hp_wmi_wifi_set;
-		wifi_rfkill->user_claim_unsupported = 1;
-		rfkill_register(wifi_rfkill);
-	}
-
-	if (wireless & 0x2) {
-		bluetooth_rfkill = rfkill_allocate(&device->dev,
-						   RFKILL_TYPE_BLUETOOTH);
-		bluetooth_rfkill->name = "hp-bluetooth";
-		bluetooth_rfkill->state = hp_wmi_bluetooth_state();
-		bluetooth_rfkill->toggle_radio = hp_wmi_bluetooth_set;
-		bluetooth_rfkill->user_claim_unsupported = 1;
-		rfkill_register(bluetooth_rfkill);
-	}
-
-	if (wireless & 0x4) {
-		wwan_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WWAN);
-		wwan_rfkill->name = "hp-wwan";
-		wwan_rfkill->state = hp_wmi_wwan_state();
-		wwan_rfkill->toggle_radio = hp_wmi_wwan_set;
-		wwan_rfkill->user_claim_unsupported = 1;
-		rfkill_register(wwan_rfkill);
-	}
-
-	return 0;
-add_sysfs_error:
-	cleanup_sysfs(device);
-	return err;
-}
-
-static int __exit hp_wmi_bios_remove(struct platform_device *device)
-{
-	cleanup_sysfs(device);
-
-	if (wifi_rfkill)
-		rfkill_unregister(wifi_rfkill);
-	if (bluetooth_rfkill)
-		rfkill_unregister(bluetooth_rfkill);
-	if (wwan_rfkill)
-		rfkill_unregister(wwan_rfkill);
-
-	return 0;
-}
-
-static int __init hp_wmi_init(void)
-{
-	int err;
-
-	if (wmi_has_guid(HPWMI_EVENT_GUID)) {
-		err = wmi_install_notify_handler(HPWMI_EVENT_GUID,
-						 hp_wmi_notify, NULL);
-		if (!err)
-			hp_wmi_input_setup();
-	}
-
-	if (wmi_has_guid(HPWMI_BIOS_GUID)) {
-		err = platform_driver_register(&hp_wmi_driver);
-		if (err)
-			return 0;
-		hp_wmi_platform_dev = platform_device_alloc("hp-wmi", -1);
-		if (!hp_wmi_platform_dev) {
-			platform_driver_unregister(&hp_wmi_driver);
-			return 0;
-		}
-		platform_device_add(hp_wmi_platform_dev);
-	}
-
-	return 0;
-}
-
-static void __exit hp_wmi_exit(void)
-{
-	if (wmi_has_guid(HPWMI_EVENT_GUID)) {
-		wmi_remove_notify_handler(HPWMI_EVENT_GUID);
-		input_unregister_device(hp_wmi_input_dev);
-	}
-	if (hp_wmi_platform_dev) {
-		platform_device_del(hp_wmi_platform_dev);
-		platform_driver_unregister(&hp_wmi_driver);
-	}
-}
-
-module_init(hp_wmi_init);
-module_exit(hp_wmi_exit);
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index fda6a4d..68a0a5b 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -50,7 +50,7 @@
  * Store the event in the circular event buffer, wake up any sleeping
  * event readers.
  * There is no reader marker in the buffer, therefore readers are
- * responsible for keeping up with the writer, or they will loose events.
+ * responsible for keeping up with the writer, or they will lose events.
  */
 void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size)
 {
diff --git a/drivers/misc/panasonic-laptop.c b/drivers/misc/panasonic-laptop.c
deleted file mode 100644
index 4a1bc644..0000000
--- a/drivers/misc/panasonic-laptop.c
+++ /dev/null
@@ -1,766 +0,0 @@
-/*
- *  Panasonic HotKey and LCD brightness control driver
- *  (C) 2004 Hiroshi Miura <miura@da-cha.org>
- *  (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/
- *  (C) YOKOTA Hiroshi <yokota (at) netlab. is. tsukuba. ac. jp>
- *  (C) 2004 David Bronaugh <dbronaugh>
- *  (C) 2006-2008 Harald Welte <laforge@gnumonks.org>
- *
- *  derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  publicshed by the Free Software Foundation.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
- *
- *---------------------------------------------------------------------------
- *
- * ChangeLog:
- *	Sep.23, 2008	Harald Welte <laforge@gnumonks.org>
- *		-v0.95	rename driver from drivers/acpi/pcc_acpi.c to
- *			drivers/misc/panasonic-laptop.c
- *
- * 	Jul.04, 2008	Harald Welte <laforge@gnumonks.org>
- * 		-v0.94	replace /proc interface with device attributes
- * 			support {set,get}keycode on th input device
- *
- *      Jun.27, 2008	Harald Welte <laforge@gnumonks.org>
- *      	-v0.92	merge with 2.6.26-rc6 input API changes
- *      		remove broken <= 2.6.15 kernel support
- *      		resolve all compiler warnings
- *      		various coding style fixes (checkpatch.pl)
- *      		add support for backlight api
- *      		major code restructuring
- *
- * 	Dac.28, 2007	Harald Welte <laforge@gnumonks.org>
- * 		-v0.91	merge with 2.6.24-rc6 ACPI changes
- *
- * 	Nov.04, 2006	Hiroshi Miura <miura@da-cha.org>
- * 		-v0.9	remove warning about section reference.
- * 			remove acpi_os_free
- * 			add /proc/acpi/pcc/brightness interface for HAL access
- * 			merge dbronaugh's enhancement
- * 			Aug.17, 2004 David Bronaugh (dbronaugh)
- *  				- Added screen brightness setting interface
- *				  Thanks to FreeBSD crew (acpi_panasonic.c)
- * 				  for the ideas I needed to accomplish it
- *
- *	May.29, 2006	Hiroshi Miura <miura@da-cha.org>
- *		-v0.8.4 follow to change keyinput structure
- *			thanks Fabian Yamaguchi <fabs@cs.tu-berlin.de>,
- *			Jacob Bower <jacob.bower@ic.ac.uk> and
- *			Hiroshi Yokota for providing solutions.
- *
- *	Oct.02, 2004	Hiroshi Miura <miura@da-cha.org>
- *		-v0.8.2	merge code of YOKOTA Hiroshi
- *					<yokota@netlab.is.tsukuba.ac.jp>.
- *			Add sticky key mode interface.
- *			Refactoring acpi_pcc_generate_keyinput().
- *
- *	Sep.15, 2004	Hiroshi Miura <miura@da-cha.org>
- *		-v0.8	Generate key input event on input subsystem.
- *			This is based on yet another driver written by
- *							Ryuta Nakanishi.
- *
- *	Sep.10, 2004	Hiroshi Miura <miura@da-cha.org>
- *		-v0.7	Change proc interface functions using seq_file
- *			facility as same as other ACPI drivers.
- *
- *	Aug.28, 2004	Hiroshi Miura <miura@da-cha.org>
- *		-v0.6.4 Fix a silly error with status checking
- *
- *	Aug.25, 2004	Hiroshi Miura <miura@da-cha.org>
- *		-v0.6.3 replace read_acpi_int by standard function
- *							acpi_evaluate_integer
- *			some clean up and make smart copyright notice.
- *			fix return value of pcc_acpi_get_key()
- *			fix checking return value of acpi_bus_register_driver()
- *
- *      Aug.22, 2004    David Bronaugh <dbronaugh@linuxboxen.org>
- *              -v0.6.2 Add check on ACPI data (num_sifr)
- *                      Coding style cleanups, better error messages/handling
- *			Fixed an off-by-one error in memory allocation
- *
- *      Aug.21, 2004    David Bronaugh <dbronaugh@linuxboxen.org>
- *              -v0.6.1 Fix a silly error with status checking
- *
- *      Aug.20, 2004    David Bronaugh <dbronaugh@linuxboxen.org>
- *              - v0.6  Correct brightness controls to reflect reality
- *                      based on information gleaned by Hiroshi Miura
- *                      and discussions with Hiroshi Miura
- *
- *	Aug.10, 2004	Hiroshi Miura <miura@da-cha.org>
- *		- v0.5  support LCD brightness control
- *			based on the disclosed information by MEI.
- *
- *	Jul.25, 2004	Hiroshi Miura <miura@da-cha.org>
- *		- v0.4  first post version
- *		        add function to retrive SIFR
- *
- *	Jul.24, 2004	Hiroshi Miura <miura@da-cha.org>
- *		- v0.3  get proper status of hotkey
- *
- *      Jul.22, 2004	Hiroshi Miura <miura@da-cha.org>
- *		- v0.2  add HotKey handler
- *
- *      Jul.17, 2004	Hiroshi Miura <miura@da-cha.org>
- *		- v0.1  start from toshiba_acpi driver written by John Belmonte
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/backlight.h>
-#include <linux/ctype.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include <linux/input.h>
-
-
-#ifndef ACPI_HOTKEY_COMPONENT
-#define ACPI_HOTKEY_COMPONENT	0x10000000
-#endif
-
-#define _COMPONENT		ACPI_HOTKEY_COMPONENT
-
-MODULE_AUTHOR("Hiroshi Miura, David Bronaugh and Harald Welte");
-MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops");
-MODULE_LICENSE("GPL");
-
-#define LOGPREFIX "pcc_acpi: "
-
-/* Define ACPI PATHs */
-/* Lets note hotkeys */
-#define METHOD_HKEY_QUERY	"HINF"
-#define METHOD_HKEY_SQTY	"SQTY"
-#define METHOD_HKEY_SINF	"SINF"
-#define METHOD_HKEY_SSET	"SSET"
-#define HKEY_NOTIFY		 0x80
-
-#define ACPI_PCC_DRIVER_NAME	"Panasonic Laptop Support"
-#define ACPI_PCC_DEVICE_NAME	"Hotkey"
-#define ACPI_PCC_CLASS		"pcc"
-
-#define ACPI_PCC_INPUT_PHYS	"panasonic/hkey0"
-
-/* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent
-   ENV_STATEs: Normal temp=0x01, High temp=0x81, N/A=0x00
-*/
-enum SINF_BITS { SINF_NUM_BATTERIES = 0,
-		 SINF_LCD_TYPE,
-		 SINF_AC_MAX_BRIGHT,
-		 SINF_AC_MIN_BRIGHT,
-		 SINF_AC_CUR_BRIGHT,
-		 SINF_DC_MAX_BRIGHT,
-		 SINF_DC_MIN_BRIGHT,
-		 SINF_DC_CUR_BRIGHT,
-		 SINF_MUTE,
-		 SINF_RESERVED,
-		 SINF_ENV_STATE,
-		 SINF_STICKY_KEY = 0x80,
-	};
-/* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */
-
-static int acpi_pcc_hotkey_add(struct acpi_device *device);
-static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type);
-static int acpi_pcc_hotkey_resume(struct acpi_device *device);
-
-static const struct acpi_device_id pcc_device_ids[] = {
-	{ "MAT0012", 0},
-	{ "MAT0013", 0},
-	{ "MAT0018", 0},
-	{ "MAT0019", 0},
-	{ "", 0},
-};
-
-static struct acpi_driver acpi_pcc_driver = {
-	.name =		ACPI_PCC_DRIVER_NAME,
-	.class =	ACPI_PCC_CLASS,
-	.ids =		pcc_device_ids,
-	.ops =		{
-				.add =		acpi_pcc_hotkey_add,
-				.remove =	acpi_pcc_hotkey_remove,
-				.resume =       acpi_pcc_hotkey_resume,
-			},
-};
-
-#define KEYMAP_SIZE		11
-static const int initial_keymap[KEYMAP_SIZE] = {
-	/*  0 */ KEY_RESERVED,
-	/*  1 */ KEY_BRIGHTNESSDOWN,
-	/*  2 */ KEY_BRIGHTNESSUP,
-	/*  3 */ KEY_DISPLAYTOGGLE,
-	/*  4 */ KEY_MUTE,
-	/*  5 */ KEY_VOLUMEDOWN,
-	/*  6 */ KEY_VOLUMEUP,
-	/*  7 */ KEY_SLEEP,
-	/*  8 */ KEY_PROG1, /* Change CPU boost */
-	/*  9 */ KEY_BATTERY,
-	/* 10 */ KEY_SUSPEND,
-};
-
-struct pcc_acpi {
-	acpi_handle		handle;
-	unsigned long		num_sifr;
-	int			sticky_mode;
-	u32 			*sinf;
-	struct acpi_device	*device;
-	struct input_dev	*input_dev;
-	struct backlight_device	*backlight;
-	int			keymap[KEYMAP_SIZE];
-};
-
-struct pcc_keyinput {
-	struct acpi_hotkey      *hotkey;
-};
-
-/* method access functions */
-static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val)
-{
-	union acpi_object in_objs[] = {
-		{ .integer.type  = ACPI_TYPE_INTEGER,
-		  .integer.value = func, },
-		{ .integer.type  = ACPI_TYPE_INTEGER,
-		  .integer.value = val, },
-	};
-	struct acpi_object_list params = {
-		.count   = ARRAY_SIZE(in_objs),
-		.pointer = in_objs,
-	};
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE("acpi_pcc_write_sset");
-
-	status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET,
-				      &params, NULL);
-
-	return status == AE_OK;
-}
-
-static inline int acpi_pcc_get_sqty(struct acpi_device *device)
-{
-	unsigned long long s;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE("acpi_pcc_get_sqty");
-
-	status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY,
-				       NULL, &s);
-	if (ACPI_SUCCESS(status))
-		return s;
-	else {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				  "evaluation error HKEY.SQTY\n"));
-		return -EINVAL;
-	}
-}
-
-static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf)
-{
-	acpi_status status;
-	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
-	union acpi_object *hkey = NULL;
-	int i;
-
-	ACPI_FUNCTION_TRACE("acpi_pcc_retrieve_biosdata");
-
-	status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0,
-				      &buffer);
-	if (ACPI_FAILURE(status)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				  "evaluation error HKEY.SINF\n"));
-		return 0;
-	}
-
-	hkey = buffer.pointer;
-	if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n"));
-		goto end;
-	}
-
-	if (pcc->num_sifr < hkey->package.count) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				 "SQTY reports bad SINF length\n"));
-		status = AE_ERROR;
-		goto end;
-	}
-
-	for (i = 0; i < hkey->package.count; i++) {
-		union acpi_object *element = &(hkey->package.elements[i]);
-		if (likely(element->type == ACPI_TYPE_INTEGER)) {
-			sinf[i] = element->integer.value;
-		} else
-			ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-					 "Invalid HKEY.SINF data\n"));
-	}
-	sinf[hkey->package.count] = -1;
-
-end:
-	kfree(buffer.pointer);
-	return status == AE_OK;
-}
-
-/* backlight API interface functions */
-
-/* This driver currently treats AC and DC brightness identical,
- * since we don't need to invent an interface to the core ACPI
- * logic to receive events in case a power supply is plugged in
- * or removed */
-
-static int bl_get(struct backlight_device *bd)
-{
-	struct pcc_acpi *pcc = bl_get_data(bd);
-
-	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
-		return -EIO;
-
-	return pcc->sinf[SINF_AC_CUR_BRIGHT];
-}
-
-static int bl_set_status(struct backlight_device *bd)
-{
-	struct pcc_acpi *pcc = bl_get_data(bd);
-	int bright = bd->props.brightness;
-	int rc;
-
-	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
-		return -EIO;
-
-	if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT])
-		bright = pcc->sinf[SINF_AC_MIN_BRIGHT];
-
-	if (bright < pcc->sinf[SINF_DC_MIN_BRIGHT])
-		bright = pcc->sinf[SINF_DC_MIN_BRIGHT];
-
-	if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT] ||
-	    bright > pcc->sinf[SINF_AC_MAX_BRIGHT])
-		return -EINVAL;
-
-	rc = acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, bright);
-	if (rc < 0)
-		return rc;
-
-	return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
-}
-
-static struct backlight_ops pcc_backlight_ops = {
-	.get_brightness	= bl_get,
-	.update_status	= bl_set_status,
-};
-
-
-/* sysfs user interface functions */
-
-static ssize_t show_numbatt(struct device *dev, struct device_attribute *attr,
-			    char *buf)
-{
-	struct acpi_device *acpi = to_acpi_device(dev);
-	struct pcc_acpi *pcc = acpi_driver_data(acpi);
-
-	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
-		return -EIO;
-
-	return sprintf(buf, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]);
-}
-
-static ssize_t show_lcdtype(struct device *dev, struct device_attribute *attr,
-			    char *buf)
-{
-	struct acpi_device *acpi = to_acpi_device(dev);
-	struct pcc_acpi *pcc = acpi_driver_data(acpi);
-
-	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
-		return -EIO;
-
-	return sprintf(buf, "%u\n", pcc->sinf[SINF_LCD_TYPE]);
-}
-
-static ssize_t show_mute(struct device *dev, struct device_attribute *attr,
-			 char *buf)
-{
-	struct acpi_device *acpi = to_acpi_device(dev);
-	struct pcc_acpi *pcc = acpi_driver_data(acpi);
-
-	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
-		return -EIO;
-
-	return sprintf(buf, "%u\n", pcc->sinf[SINF_MUTE]);
-}
-
-static ssize_t show_sticky(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	struct acpi_device *acpi = to_acpi_device(dev);
-	struct pcc_acpi *pcc = acpi_driver_data(acpi);
-
-	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
-		return -EIO;
-
-	return sprintf(buf, "%u\n", pcc->sinf[SINF_STICKY_KEY]);
-}
-
-static ssize_t set_sticky(struct device *dev, struct device_attribute *attr,
-			  const char *buf, size_t count)
-{
-	struct acpi_device *acpi = to_acpi_device(dev);
-	struct pcc_acpi *pcc = acpi_driver_data(acpi);
-	int val;
-
-	if (count && sscanf(buf, "%i", &val) == 1 &&
-	    (val == 0 || val == 1)) {
-		acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, val);
-		pcc->sticky_mode = val;
-	}
-
-	return count;
-}
-
-static DEVICE_ATTR(numbatt, S_IRUGO, show_numbatt, NULL);
-static DEVICE_ATTR(lcdtype, S_IRUGO, show_lcdtype, NULL);
-static DEVICE_ATTR(mute, S_IRUGO, show_mute, NULL);
-static DEVICE_ATTR(sticky_key, S_IRUGO | S_IWUSR, show_sticky, set_sticky);
-
-static struct attribute *pcc_sysfs_entries[] = {
-	&dev_attr_numbatt.attr,
-	&dev_attr_lcdtype.attr,
-	&dev_attr_mute.attr,
-	&dev_attr_sticky_key.attr,
-	NULL,
-};
-
-static struct attribute_group pcc_attr_group = {
-	.name	= NULL,		/* put in device directory */
-	.attrs	= pcc_sysfs_entries,
-};
-
-
-/* hotkey input device driver */
-
-static int pcc_getkeycode(struct input_dev *dev, int scancode, int *keycode)
-{
-	struct pcc_acpi *pcc = input_get_drvdata(dev);
-
-	if (scancode >= ARRAY_SIZE(pcc->keymap))
-		return -EINVAL;
-
-	*keycode = pcc->keymap[scancode];
-
-	return 0;
-}
-
-static int keymap_get_by_keycode(struct pcc_acpi *pcc, int keycode)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++) {
-		if (pcc->keymap[i] == keycode)
-			return i+1;
-	}
-
-	return 0;
-}
-
-static int pcc_setkeycode(struct input_dev *dev, int scancode, int keycode)
-{
-	struct pcc_acpi *pcc = input_get_drvdata(dev);
-	int oldkeycode;
-
-	if (scancode >= ARRAY_SIZE(pcc->keymap))
-		return -EINVAL;
-
-	if (keycode < 0 || keycode > KEY_MAX)
-		return -EINVAL;
-
-	oldkeycode = pcc->keymap[scancode];
-	pcc->keymap[scancode] = keycode;
-
-	set_bit(keycode, dev->keybit);
-
-	if (!keymap_get_by_keycode(pcc, oldkeycode))
-		clear_bit(oldkeycode, dev->keybit);
-
-	return 0;
-}
-
-static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
-{
-	struct input_dev *hotk_input_dev = pcc->input_dev;
-	int rc;
-	int key_code, hkey_num;
-	unsigned long long result;
-
-	ACPI_FUNCTION_TRACE("acpi_pcc_generate_keyinput");
-
-	rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
-				   NULL, &result);
-	if (!ACPI_SUCCESS(rc)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				 "error getting hotkey status\n"));
-		return;
-	}
-
-	acpi_bus_generate_proc_event(pcc->device, HKEY_NOTIFY, result);
-
-	hkey_num = result & 0xf;
-
-	if (hkey_num < 0 || hkey_num > ARRAY_SIZE(pcc->keymap)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				  "hotkey number out of range: %d\n",
-				  hkey_num));
-		return;
-	}
-
-	key_code = pcc->keymap[hkey_num];
-
-	if (key_code != KEY_RESERVED) {
-		int pushed = (result & 0x80) ? TRUE : FALSE;
-
-		input_report_key(hotk_input_dev, key_code, pushed);
-		input_sync(hotk_input_dev);
-	}
-
-	return;
-}
-
-static void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data)
-{
-	struct pcc_acpi *pcc = (struct pcc_acpi *) data;
-
-	ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_notify");
-
-	switch (event) {
-	case HKEY_NOTIFY:
-		acpi_pcc_generate_keyinput(pcc);
-		break;
-	default:
-		/* nothing to do */
-		break;
-	}
-}
-
-static int acpi_pcc_init_input(struct pcc_acpi *pcc)
-{
-	int i, rc;
-
-	ACPI_FUNCTION_TRACE("acpi_pcc_init_input");
-
-	pcc->input_dev = input_allocate_device();
-	if (!pcc->input_dev) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				  "Couldn't allocate input device for hotkey"));
-		return -ENOMEM;
-	}
-
-	pcc->input_dev->evbit[0] = BIT(EV_KEY);
-
-	pcc->input_dev->name = ACPI_PCC_DRIVER_NAME;
-	pcc->input_dev->phys = ACPI_PCC_INPUT_PHYS;
-	pcc->input_dev->id.bustype = BUS_HOST;
-	pcc->input_dev->id.vendor = 0x0001;
-	pcc->input_dev->id.product = 0x0001;
-	pcc->input_dev->id.version = 0x0100;
-	pcc->input_dev->getkeycode = pcc_getkeycode;
-	pcc->input_dev->setkeycode = pcc_setkeycode;
-
-	/* load initial keymap */
-	memcpy(pcc->keymap, initial_keymap, sizeof(pcc->keymap));
-
-	for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++)
-		__set_bit(pcc->keymap[i], pcc->input_dev->keybit);
-	__clear_bit(KEY_RESERVED, pcc->input_dev->keybit);
-
-	input_set_drvdata(pcc->input_dev, pcc);
-
-	rc = input_register_device(pcc->input_dev);
-	if (rc < 0)
-		input_free_device(pcc->input_dev);
-
-	return rc;
-}
-
-/* kernel module interface */
-
-static int acpi_pcc_hotkey_resume(struct acpi_device *device)
-{
-	struct pcc_acpi *pcc = acpi_driver_data(device);
-	acpi_status status = AE_OK;
-
-	ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_resume");
-
-	if (device == NULL || pcc == NULL)
-		return -EINVAL;
-
-	ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Sticky mode restore: %d\n",
-			  pcc->sticky_mode));
-
-	status = acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode);
-
-	return status == AE_OK ? 0 : -EINVAL;
-}
-
-static int acpi_pcc_hotkey_add(struct acpi_device *device)
-{
-	acpi_status status;
-	struct pcc_acpi *pcc;
-	int num_sifr, result;
-
-	ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_add");
-
-	if (!device)
-		return -EINVAL;
-
-	num_sifr = acpi_pcc_get_sqty(device);
-
-	if (num_sifr > 255) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr too large"));
-		return -ENODEV;
-	}
-
-	pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL);
-	if (!pcc) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				  "Couldn't allocate mem for pcc"));
-		return -ENOMEM;
-	}
-
-	pcc->sinf = kzalloc(sizeof(u32) * (num_sifr + 1), GFP_KERNEL);
-	if (!pcc->sinf) {
-		result = -ENOMEM;
-		goto out_hotkey;
-	}
-
-	pcc->device = device;
-	pcc->handle = device->handle;
-	pcc->num_sifr = num_sifr;
-	device->driver_data = pcc;
-	strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME);
-	strcpy(acpi_device_class(device), ACPI_PCC_CLASS);
-
-	result = acpi_pcc_init_input(pcc);
-	if (result) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				  "Error installing keyinput handler\n"));
-		goto out_sinf;
-	}
-
-	/* initialize hotkey input device */
-	status = acpi_install_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
-					     acpi_pcc_hotkey_notify, pcc);
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				  "Error installing notify handler\n"));
-		result = -ENODEV;
-		goto out_input;
-	}
-
-	/* initialize backlight */
-	pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
-						   &pcc_backlight_ops);
-	if (IS_ERR(pcc->backlight))
-		goto out_notify;
-
-	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				 "Couldn't retrieve BIOS data\n"));
-		goto out_backlight;
-	}
-
-	/* read the initial brightness setting from the hardware */
-	pcc->backlight->props.max_brightness =
-					pcc->sinf[SINF_AC_MAX_BRIGHT];
-	pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
-
-	/* read the initial sticky key mode from the hardware */
-	pcc->sticky_mode = pcc->sinf[SINF_STICKY_KEY];
-
-	/* add sysfs attributes */
-	result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
-	if (result)
-		goto out_backlight;
-
-	return 0;
-
-out_backlight:
-	backlight_device_unregister(pcc->backlight);
-out_notify:
-	acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
-				   acpi_pcc_hotkey_notify);
-out_input:
-	input_unregister_device(pcc->input_dev);
-	/* no need to input_free_device() since core input API refcount and
-	 * free()s the device */
-out_sinf:
-	kfree(pcc->sinf);
-out_hotkey:
-	kfree(pcc);
-
-	return result;
-}
-
-static int __init acpi_pcc_init(void)
-{
-	int result = 0;
-
-	ACPI_FUNCTION_TRACE("acpi_pcc_init");
-
-	if (acpi_disabled)
-		return -ENODEV;
-
-	result = acpi_bus_register_driver(&acpi_pcc_driver);
-	if (result < 0) {
-		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-				  "Error registering hotkey driver\n"));
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type)
-{
-	struct pcc_acpi *pcc = acpi_driver_data(device);
-
-	ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_remove");
-
-	if (!device || !pcc)
-		return -EINVAL;
-
-	sysfs_remove_group(&device->dev.kobj, &pcc_attr_group);
-
-	backlight_device_unregister(pcc->backlight);
-
-	acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
-				   acpi_pcc_hotkey_notify);
-
-	input_unregister_device(pcc->input_dev);
-	/* no need to input_free_device() since core input API refcount and
-	 * free()s the device */
-
-	kfree(pcc->sinf);
-	kfree(pcc);
-
-	return 0;
-}
-
-static void __exit acpi_pcc_exit(void)
-{
-	ACPI_FUNCTION_TRACE("acpi_pcc_exit");
-
-	acpi_bus_unregister_driver(&acpi_pcc_driver);
-}
-
-module_init(acpi_pcc_init);
-module_exit(acpi_pcc_exit);
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index abdebe3..fa57b67 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -6,7 +6,7 @@
  *  the Free Software Foundation; either version 2 of the License, or
  *  (at your option) any later version.
  *
- *  You need an userspace library to cooperate with this driver. It (and other
+ *  You need a userspace library to cooperate with this driver. It (and other
  *  info) may be obtained here:
  *  http://www.fi.muni.cz/~xslaby/phantom.html
  *  or alternatively, you might use OpenHaptics provided by Sensable.
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 73b7fb8..82fb995 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -899,7 +899,7 @@
 	dev_dbg(xpc_part, "  remote_vars_pa = 0x%016lx\n",
 		part_sn2->remote_vars_pa);
 
-	part->last_heartbeat = remote_vars->heartbeat;
+	part->last_heartbeat = remote_vars->heartbeat - 1;
 	dev_dbg(xpc_part, "  last_heartbeat = 0x%016lx\n",
 		part->last_heartbeat);
 
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 81152b3..7957f52 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -95,11 +95,6 @@
 	atomic_t use_count;
 };
 
-/* driver specific structure pointed to by the device structure */
-struct xpnet_dev_private {
-	struct net_device_stats stats;
-};
-
 struct net_device *xpnet_device;
 
 /*
@@ -153,7 +148,6 @@
 	struct sk_buff *skb;
 	void *dst;
 	enum xp_retval ret;
-	struct xpnet_dev_private *priv = netdev_priv(xpnet_device);
 
 	if (!XPNET_VALID_MSG(msg)) {
 		/*
@@ -161,7 +155,7 @@
 		 */
 		xpc_received(partid, channel, (void *)msg);
 
-		priv->stats.rx_errors++;
+		xpnet_device->stats.rx_errors++;
 
 		return;
 	}
@@ -176,7 +170,7 @@
 
 		xpc_received(partid, channel, (void *)msg);
 
-		priv->stats.rx_errors++;
+		xpnet_device->stats.rx_errors++;
 
 		return;
 	}
@@ -226,7 +220,7 @@
 
 			xpc_received(partid, channel, (void *)msg);
 
-			priv->stats.rx_errors++;
+			xpnet_device->stats.rx_errors++;
 
 			return;
 		}
@@ -247,8 +241,8 @@
 		skb_end_pointer(skb), skb->len);
 
 	xpnet_device->last_rx = jiffies;
-	priv->stats.rx_packets++;
-	priv->stats.rx_bytes += skb->len + ETH_HLEN;
+	xpnet_device->stats.rx_packets++;
+	xpnet_device->stats.rx_bytes += skb->len + ETH_HLEN;
 
 	netif_rx_ni(skb);
 	xpc_received(partid, channel, (void *)msg);
@@ -353,26 +347,6 @@
 }
 
 /*
- * Required for the net_device structure.
- */
-static int
-xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
-{
-	return 0;
-}
-
-/*
- * Return statistics to the caller.
- */
-static struct net_device_stats *
-xpnet_dev_get_stats(struct net_device *dev)
-{
-	struct xpnet_dev_private *priv = netdev_priv(dev);
-
-	return &priv->stats;
-}
-
-/*
  * Notification that the other end has received the message and
  * DMA'd the skb information.  At this point, they are done with
  * our side.  When all recipients are done processing, we
@@ -453,7 +427,6 @@
 	struct xpnet_pending_msg *queued_msg;
 	u64 start_addr, end_addr;
 	short dest_partid;
-	struct xpnet_dev_private *priv = netdev_priv(dev);
 	u16 embedded_bytes = 0;
 
 	dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
@@ -476,7 +449,7 @@
 		dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
 			 "packet\n", sizeof(struct xpnet_pending_msg));
 
-		priv->stats.tx_errors++;
+		dev->stats.tx_errors++;
 		return -ENOMEM;
 	}
 
@@ -526,8 +499,8 @@
 		kfree(queued_msg);
 	}
 
-	priv->stats.tx_packets++;
-	priv->stats.tx_bytes += skb->len;
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
 
 	return 0;
 }
@@ -538,12 +511,19 @@
 static void
 xpnet_dev_tx_timeout(struct net_device *dev)
 {
-	struct xpnet_dev_private *priv = netdev_priv(dev);
-
-	priv->stats.tx_errors++;
-	return;
+	dev->stats.tx_errors++;
 }
 
+static const struct net_device_ops xpnet_netdev_ops = {
+	.ndo_open		= xpnet_dev_open,
+	.ndo_stop		= xpnet_dev_stop,
+	.ndo_start_xmit		= xpnet_dev_hard_start_xmit,
+	.ndo_change_mtu		= xpnet_dev_change_mtu,
+	.ndo_tx_timeout		= xpnet_dev_tx_timeout,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 static int __init
 xpnet_init(void)
 {
@@ -563,8 +543,7 @@
 	 * use ether_setup() to init the majority of our device
 	 * structure and then override the necessary pieces.
 	 */
-	xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
-				    XPNET_DEVICE_NAME, ether_setup);
+	xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, ether_setup);
 	if (xpnet_device == NULL) {
 		kfree(xpnet_broadcast_partitions);
 		return -ENOMEM;
@@ -573,13 +552,6 @@
 	netif_carrier_off(xpnet_device);
 
 	xpnet_device->mtu = XPNET_DEF_MTU;
-	xpnet_device->change_mtu = xpnet_dev_change_mtu;
-	xpnet_device->open = xpnet_dev_open;
-	xpnet_device->get_stats = xpnet_dev_get_stats;
-	xpnet_device->stop = xpnet_dev_stop;
-	xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit;
-	xpnet_device->tx_timeout = xpnet_dev_tx_timeout;
-	xpnet_device->set_config = xpnet_dev_set_config;
 
 	/*
 	 * Multicast assumes the LSB of the first octet is set for multicast
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
deleted file mode 100644
index 571b211..0000000
--- a/drivers/misc/sony-laptop.c
+++ /dev/null
@@ -1,2781 +0,0 @@
-/*
- * ACPI Sony Notebook Control Driver (SNC and SPIC)
- *
- * Copyright (C) 2004-2005 Stelian Pop <stelian@popies.net>
- * Copyright (C) 2007 Mattia Dongili <malattia@linux.it>
- *
- * Parts of this driver inspired from asus_acpi.c and ibm_acpi.c
- * which are copyrighted by their respective authors.
- *
- * The SNY6001 driver part is based on the sonypi driver which includes
- * material from:
- *
- * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net>
- *
- * Copyright (C) 2005 Narayanan R S <nars@kadamba.org>
- *
- * Copyright (C) 2001-2002 Alcôve <www.alcove.com>
- *
- * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au>
- *
- * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp>
- *
- * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp>
- *
- * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com>
- *
- * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/smp_lock.h>
-#include <linux/types.h>
-#include <linux/backlight.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/dmi.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/kfifo.h>
-#include <linux/workqueue.h>
-#include <linux/acpi.h>
-#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_bus.h>
-#include <asm/uaccess.h>
-#include <linux/sonypi.h>
-#include <linux/sony-laptop.h>
-#ifdef CONFIG_SONYPI_COMPAT
-#include <linux/poll.h>
-#include <linux/miscdevice.h>
-#endif
-
-#define DRV_PFX			"sony-laptop: "
-#define dprintk(msg...)		do {			\
-	if (debug) printk(KERN_WARNING DRV_PFX  msg);	\
-} while (0)
-
-#define SONY_LAPTOP_DRIVER_VERSION	"0.6"
-
-#define SONY_NC_CLASS		"sony-nc"
-#define SONY_NC_HID		"SNY5001"
-#define SONY_NC_DRIVER_NAME	"Sony Notebook Control Driver"
-
-#define SONY_PIC_CLASS		"sony-pic"
-#define SONY_PIC_HID		"SNY6001"
-#define SONY_PIC_DRIVER_NAME	"Sony Programmable IO Control Driver"
-
-MODULE_AUTHOR("Stelian Pop, Mattia Dongili");
-MODULE_DESCRIPTION("Sony laptop extras driver (SPIC and SNC ACPI device)");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(SONY_LAPTOP_DRIVER_VERSION);
-
-static int debug;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "set this to 1 (and RTFM) if you want to help "
-		 "the development of this driver");
-
-static int no_spic;		/* = 0 */
-module_param(no_spic, int, 0444);
-MODULE_PARM_DESC(no_spic,
-		 "set this if you don't want to enable the SPIC device");
-
-static int compat;		/* = 0 */
-module_param(compat, int, 0444);
-MODULE_PARM_DESC(compat,
-		 "set this if you want to enable backward compatibility mode");
-
-static unsigned long mask = 0xffffffff;
-module_param(mask, ulong, 0644);
-MODULE_PARM_DESC(mask,
-		 "set this to the mask of event you want to enable (see doc)");
-
-static int camera;		/* = 0 */
-module_param(camera, int, 0444);
-MODULE_PARM_DESC(camera,
-		 "set this to 1 to enable Motion Eye camera controls "
-		 "(only use it if you have a C1VE or C1VN model)");
-
-#ifdef CONFIG_SONYPI_COMPAT
-static int minor = -1;
-module_param(minor, int, 0);
-MODULE_PARM_DESC(minor,
-		 "minor number of the misc device for the SPIC compatibility code, "
-		 "default is -1 (automatic)");
-#endif
-
-/*********** Input Devices ***********/
-
-#define SONY_LAPTOP_BUF_SIZE	128
-struct sony_laptop_input_s {
-	atomic_t		users;
-	struct input_dev	*jog_dev;
-	struct input_dev	*key_dev;
-	struct kfifo		*fifo;
-	spinlock_t		fifo_lock;
-	struct workqueue_struct	*wq;
-};
-static struct sony_laptop_input_s sony_laptop_input = {
-	.users = ATOMIC_INIT(0),
-};
-
-struct sony_laptop_keypress {
-	struct input_dev *dev;
-	int key;
-};
-
-/* Correspondance table between sonypi events
- * and input layer indexes in the keymap
- */
-static int sony_laptop_input_index[] = {
-	-1,	/*  0 no event */
-	-1,	/*  1 SONYPI_EVENT_JOGDIAL_DOWN */
-	-1,	/*  2 SONYPI_EVENT_JOGDIAL_UP */
-	-1,	/*  3 SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */
-	-1,	/*  4 SONYPI_EVENT_JOGDIAL_UP_PRESSED */
-	-1,	/*  5 SONYPI_EVENT_JOGDIAL_PRESSED */
-	-1,	/*  6 SONYPI_EVENT_JOGDIAL_RELEASED */
-	 0,	/*  7 SONYPI_EVENT_CAPTURE_PRESSED */
-	 1,	/*  8 SONYPI_EVENT_CAPTURE_RELEASED */
-	 2,	/*  9 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
-	 3,	/* 10 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
-	 4,	/* 11 SONYPI_EVENT_FNKEY_ESC */
-	 5,	/* 12 SONYPI_EVENT_FNKEY_F1 */
-	 6,	/* 13 SONYPI_EVENT_FNKEY_F2 */
-	 7,	/* 14 SONYPI_EVENT_FNKEY_F3 */
-	 8,	/* 15 SONYPI_EVENT_FNKEY_F4 */
-	 9,	/* 16 SONYPI_EVENT_FNKEY_F5 */
-	10,	/* 17 SONYPI_EVENT_FNKEY_F6 */
-	11,	/* 18 SONYPI_EVENT_FNKEY_F7 */
-	12,	/* 19 SONYPI_EVENT_FNKEY_F8 */
-	13,	/* 20 SONYPI_EVENT_FNKEY_F9 */
-	14,	/* 21 SONYPI_EVENT_FNKEY_F10 */
-	15,	/* 22 SONYPI_EVENT_FNKEY_F11 */
-	16,	/* 23 SONYPI_EVENT_FNKEY_F12 */
-	17,	/* 24 SONYPI_EVENT_FNKEY_1 */
-	18,	/* 25 SONYPI_EVENT_FNKEY_2 */
-	19,	/* 26 SONYPI_EVENT_FNKEY_D */
-	20,	/* 27 SONYPI_EVENT_FNKEY_E */
-	21,	/* 28 SONYPI_EVENT_FNKEY_F */
-	22,	/* 29 SONYPI_EVENT_FNKEY_S */
-	23,	/* 30 SONYPI_EVENT_FNKEY_B */
-	24,	/* 31 SONYPI_EVENT_BLUETOOTH_PRESSED */
-	25,	/* 32 SONYPI_EVENT_PKEY_P1 */
-	26,	/* 33 SONYPI_EVENT_PKEY_P2 */
-	27,	/* 34 SONYPI_EVENT_PKEY_P3 */
-	28,	/* 35 SONYPI_EVENT_BACK_PRESSED */
-	-1,	/* 36 SONYPI_EVENT_LID_CLOSED */
-	-1,	/* 37 SONYPI_EVENT_LID_OPENED */
-	29,	/* 38 SONYPI_EVENT_BLUETOOTH_ON */
-	30,	/* 39 SONYPI_EVENT_BLUETOOTH_OFF */
-	31,	/* 40 SONYPI_EVENT_HELP_PRESSED */
-	32,	/* 41 SONYPI_EVENT_FNKEY_ONLY */
-	33,	/* 42 SONYPI_EVENT_JOGDIAL_FAST_DOWN */
-	34,	/* 43 SONYPI_EVENT_JOGDIAL_FAST_UP */
-	35,	/* 44 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
-	36,	/* 45 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
-	37,	/* 46 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
-	38,	/* 47 SONYPI_EVENT_JOGDIAL_VFAST_UP */
-	39,	/* 48 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
-	40,	/* 49 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
-	41,	/* 50 SONYPI_EVENT_ZOOM_PRESSED */
-	42,	/* 51 SONYPI_EVENT_THUMBPHRASE_PRESSED */
-	43,	/* 52 SONYPI_EVENT_MEYE_FACE */
-	44,	/* 53 SONYPI_EVENT_MEYE_OPPOSITE */
-	45,	/* 54 SONYPI_EVENT_MEMORYSTICK_INSERT */
-	46,	/* 55 SONYPI_EVENT_MEMORYSTICK_EJECT */
-	-1,	/* 56 SONYPI_EVENT_ANYBUTTON_RELEASED */
-	-1,	/* 57 SONYPI_EVENT_BATTERY_INSERT */
-	-1,	/* 58 SONYPI_EVENT_BATTERY_REMOVE */
-	-1,	/* 59 SONYPI_EVENT_FNKEY_RELEASED */
-	47,	/* 60 SONYPI_EVENT_WIRELESS_ON */
-	48,	/* 61 SONYPI_EVENT_WIRELESS_OFF */
-	49,	/* 62 SONYPI_EVENT_ZOOM_IN_PRESSED */
-	50,	/* 63 SONYPI_EVENT_ZOOM_OUT_PRESSED */
-};
-
-static int sony_laptop_input_keycode_map[] = {
-	KEY_CAMERA,	/*  0 SONYPI_EVENT_CAPTURE_PRESSED */
-	KEY_RESERVED,	/*  1 SONYPI_EVENT_CAPTURE_RELEASED */
-	KEY_RESERVED,	/*  2 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
-	KEY_RESERVED,	/*  3 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
-	KEY_FN_ESC,	/*  4 SONYPI_EVENT_FNKEY_ESC */
-	KEY_FN_F1,	/*  5 SONYPI_EVENT_FNKEY_F1 */
-	KEY_FN_F2,	/*  6 SONYPI_EVENT_FNKEY_F2 */
-	KEY_FN_F3,	/*  7 SONYPI_EVENT_FNKEY_F3 */
-	KEY_FN_F4,	/*  8 SONYPI_EVENT_FNKEY_F4 */
-	KEY_FN_F5,	/*  9 SONYPI_EVENT_FNKEY_F5 */
-	KEY_FN_F6,	/* 10 SONYPI_EVENT_FNKEY_F6 */
-	KEY_FN_F7,	/* 11 SONYPI_EVENT_FNKEY_F7 */
-	KEY_FN_F8,	/* 12 SONYPI_EVENT_FNKEY_F8 */
-	KEY_FN_F9,	/* 13 SONYPI_EVENT_FNKEY_F9 */
-	KEY_FN_F10,	/* 14 SONYPI_EVENT_FNKEY_F10 */
-	KEY_FN_F11,	/* 15 SONYPI_EVENT_FNKEY_F11 */
-	KEY_FN_F12,	/* 16 SONYPI_EVENT_FNKEY_F12 */
-	KEY_FN_F1,	/* 17 SONYPI_EVENT_FNKEY_1 */
-	KEY_FN_F2,	/* 18 SONYPI_EVENT_FNKEY_2 */
-	KEY_FN_D,	/* 19 SONYPI_EVENT_FNKEY_D */
-	KEY_FN_E,	/* 20 SONYPI_EVENT_FNKEY_E */
-	KEY_FN_F,	/* 21 SONYPI_EVENT_FNKEY_F */
-	KEY_FN_S,	/* 22 SONYPI_EVENT_FNKEY_S */
-	KEY_FN_B,	/* 23 SONYPI_EVENT_FNKEY_B */
-	KEY_BLUETOOTH,	/* 24 SONYPI_EVENT_BLUETOOTH_PRESSED */
-	KEY_PROG1,	/* 25 SONYPI_EVENT_PKEY_P1 */
-	KEY_PROG2,	/* 26 SONYPI_EVENT_PKEY_P2 */
-	KEY_PROG3,	/* 27 SONYPI_EVENT_PKEY_P3 */
-	KEY_BACK,	/* 28 SONYPI_EVENT_BACK_PRESSED */
-	KEY_BLUETOOTH,	/* 29 SONYPI_EVENT_BLUETOOTH_ON */
-	KEY_BLUETOOTH,	/* 30 SONYPI_EVENT_BLUETOOTH_OFF */
-	KEY_HELP,	/* 31 SONYPI_EVENT_HELP_PRESSED */
-	KEY_FN,		/* 32 SONYPI_EVENT_FNKEY_ONLY */
-	KEY_RESERVED,	/* 33 SONYPI_EVENT_JOGDIAL_FAST_DOWN */
-	KEY_RESERVED,	/* 34 SONYPI_EVENT_JOGDIAL_FAST_UP */
-	KEY_RESERVED,	/* 35 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
-	KEY_RESERVED,	/* 36 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
-	KEY_RESERVED,	/* 37 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
-	KEY_RESERVED,	/* 38 SONYPI_EVENT_JOGDIAL_VFAST_UP */
-	KEY_RESERVED,	/* 39 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
-	KEY_RESERVED,	/* 40 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
-	KEY_ZOOM,	/* 41 SONYPI_EVENT_ZOOM_PRESSED */
-	BTN_THUMB,	/* 42 SONYPI_EVENT_THUMBPHRASE_PRESSED */
-	KEY_RESERVED,	/* 43 SONYPI_EVENT_MEYE_FACE */
-	KEY_RESERVED,	/* 44 SONYPI_EVENT_MEYE_OPPOSITE */
-	KEY_RESERVED,	/* 45 SONYPI_EVENT_MEMORYSTICK_INSERT */
-	KEY_RESERVED,	/* 46 SONYPI_EVENT_MEMORYSTICK_EJECT */
-	KEY_WLAN,	/* 47 SONYPI_EVENT_WIRELESS_ON */
-	KEY_WLAN,	/* 48 SONYPI_EVENT_WIRELESS_OFF */
-	KEY_ZOOMIN,	/* 49 SONYPI_EVENT_ZOOM_IN_PRESSED */
-	KEY_ZOOMOUT	/* 50 SONYPI_EVENT_ZOOM_OUT_PRESSED */
-};
-
-/* release buttons after a short delay if pressed */
-static void do_sony_laptop_release_key(struct work_struct *work)
-{
-	struct sony_laptop_keypress kp;
-
-	while (kfifo_get(sony_laptop_input.fifo, (unsigned char *)&kp,
-			 sizeof(kp)) == sizeof(kp)) {
-		msleep(10);
-		input_report_key(kp.dev, kp.key, 0);
-		input_sync(kp.dev);
-	}
-}
-static DECLARE_WORK(sony_laptop_release_key_work,
-		do_sony_laptop_release_key);
-
-/* forward event to the input subsystem */
-static void sony_laptop_report_input_event(u8 event)
-{
-	struct input_dev *jog_dev = sony_laptop_input.jog_dev;
-	struct input_dev *key_dev = sony_laptop_input.key_dev;
-	struct sony_laptop_keypress kp = { NULL };
-
-	if (event == SONYPI_EVENT_FNKEY_RELEASED) {
-		/* Nothing, not all VAIOs generate this event */
-		return;
-	}
-
-	/* report events */
-	switch (event) {
-	/* jog_dev events */
-	case SONYPI_EVENT_JOGDIAL_UP:
-	case SONYPI_EVENT_JOGDIAL_UP_PRESSED:
-		input_report_rel(jog_dev, REL_WHEEL, 1);
-		input_sync(jog_dev);
-		return;
-
-	case SONYPI_EVENT_JOGDIAL_DOWN:
-	case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED:
-		input_report_rel(jog_dev, REL_WHEEL, -1);
-		input_sync(jog_dev);
-		return;
-
-	/* key_dev events */
-	case SONYPI_EVENT_JOGDIAL_PRESSED:
-		kp.key = BTN_MIDDLE;
-		kp.dev = jog_dev;
-		break;
-
-	default:
-		if (event >= ARRAY_SIZE(sony_laptop_input_index)) {
-			dprintk("sony_laptop_report_input_event, event not known: %d\n", event);
-			break;
-		}
-		if (sony_laptop_input_index[event] != -1) {
-			kp.key = sony_laptop_input_keycode_map[sony_laptop_input_index[event]];
-			if (kp.key != KEY_UNKNOWN)
-				kp.dev = key_dev;
-		}
-		break;
-	}
-
-	if (kp.dev) {
-		input_report_key(kp.dev, kp.key, 1);
-		/* we emit the scancode so we can always remap the key */
-		input_event(kp.dev, EV_MSC, MSC_SCAN, event);
-		input_sync(kp.dev);
-		kfifo_put(sony_laptop_input.fifo,
-			  (unsigned char *)&kp, sizeof(kp));
-
-		if (!work_pending(&sony_laptop_release_key_work))
-			queue_work(sony_laptop_input.wq,
-					&sony_laptop_release_key_work);
-	} else
-		dprintk("unknown input event %.2x\n", event);
-}
-
-static int sony_laptop_setup_input(struct acpi_device *acpi_device)
-{
-	struct input_dev *jog_dev;
-	struct input_dev *key_dev;
-	int i;
-	int error;
-
-	/* don't run again if already initialized */
-	if (atomic_add_return(1, &sony_laptop_input.users) > 1)
-		return 0;
-
-	/* kfifo */
-	spin_lock_init(&sony_laptop_input.fifo_lock);
-	sony_laptop_input.fifo =
-		kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
-			    &sony_laptop_input.fifo_lock);
-	if (IS_ERR(sony_laptop_input.fifo)) {
-		printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
-		error = PTR_ERR(sony_laptop_input.fifo);
-		goto err_dec_users;
-	}
-
-	/* init workqueue */
-	sony_laptop_input.wq = create_singlethread_workqueue("sony-laptop");
-	if (!sony_laptop_input.wq) {
-		printk(KERN_ERR DRV_PFX
-				"Unabe to create workqueue.\n");
-		error = -ENXIO;
-		goto err_free_kfifo;
-	}
-
-	/* input keys */
-	key_dev = input_allocate_device();
-	if (!key_dev) {
-		error = -ENOMEM;
-		goto err_destroy_wq;
-	}
-
-	key_dev->name = "Sony Vaio Keys";
-	key_dev->id.bustype = BUS_ISA;
-	key_dev->id.vendor = PCI_VENDOR_ID_SONY;
-	key_dev->dev.parent = &acpi_device->dev;
-
-	/* Initialize the Input Drivers: special keys */
-	set_bit(EV_KEY, key_dev->evbit);
-	set_bit(EV_MSC, key_dev->evbit);
-	set_bit(MSC_SCAN, key_dev->mscbit);
-	key_dev->keycodesize = sizeof(sony_laptop_input_keycode_map[0]);
-	key_dev->keycodemax = ARRAY_SIZE(sony_laptop_input_keycode_map);
-	key_dev->keycode = &sony_laptop_input_keycode_map;
-	for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++) {
-		if (sony_laptop_input_keycode_map[i] != KEY_RESERVED) {
-			set_bit(sony_laptop_input_keycode_map[i],
-				key_dev->keybit);
-		}
-	}
-
-	error = input_register_device(key_dev);
-	if (error)
-		goto err_free_keydev;
-
-	sony_laptop_input.key_dev = key_dev;
-
-	/* jogdial */
-	jog_dev = input_allocate_device();
-	if (!jog_dev) {
-		error = -ENOMEM;
-		goto err_unregister_keydev;
-	}
-
-	jog_dev->name = "Sony Vaio Jogdial";
-	jog_dev->id.bustype = BUS_ISA;
-	jog_dev->id.vendor = PCI_VENDOR_ID_SONY;
-	key_dev->dev.parent = &acpi_device->dev;
-
-	jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
-	jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE);
-	jog_dev->relbit[0] = BIT_MASK(REL_WHEEL);
-
-	error = input_register_device(jog_dev);
-	if (error)
-		goto err_free_jogdev;
-
-	sony_laptop_input.jog_dev = jog_dev;
-
-	return 0;
-
-err_free_jogdev:
-	input_free_device(jog_dev);
-
-err_unregister_keydev:
-	input_unregister_device(key_dev);
-	/* to avoid kref underflow below at input_free_device */
-	key_dev = NULL;
-
-err_free_keydev:
-	input_free_device(key_dev);
-
-err_destroy_wq:
-	destroy_workqueue(sony_laptop_input.wq);
-
-err_free_kfifo:
-	kfifo_free(sony_laptop_input.fifo);
-
-err_dec_users:
-	atomic_dec(&sony_laptop_input.users);
-	return error;
-}
-
-static void sony_laptop_remove_input(void)
-{
-	/* cleanup only after the last user has gone */
-	if (!atomic_dec_and_test(&sony_laptop_input.users))
-		return;
-
-	/* flush workqueue first */
-	flush_workqueue(sony_laptop_input.wq);
-
-	/* destroy input devs */
-	input_unregister_device(sony_laptop_input.key_dev);
-	sony_laptop_input.key_dev = NULL;
-
-	if (sony_laptop_input.jog_dev) {
-		input_unregister_device(sony_laptop_input.jog_dev);
-		sony_laptop_input.jog_dev = NULL;
-	}
-
-	destroy_workqueue(sony_laptop_input.wq);
-	kfifo_free(sony_laptop_input.fifo);
-}
-
-/*********** Platform Device ***********/
-
-static atomic_t sony_pf_users = ATOMIC_INIT(0);
-static struct platform_driver sony_pf_driver = {
-	.driver = {
-		   .name = "sony-laptop",
-		   .owner = THIS_MODULE,
-		   }
-};
-static struct platform_device *sony_pf_device;
-
-static int sony_pf_add(void)
-{
-	int ret = 0;
-
-	/* don't run again if already initialized */
-	if (atomic_add_return(1, &sony_pf_users) > 1)
-		return 0;
-
-	ret = platform_driver_register(&sony_pf_driver);
-	if (ret)
-		goto out;
-
-	sony_pf_device = platform_device_alloc("sony-laptop", -1);
-	if (!sony_pf_device) {
-		ret = -ENOMEM;
-		goto out_platform_registered;
-	}
-
-	ret = platform_device_add(sony_pf_device);
-	if (ret)
-		goto out_platform_alloced;
-
-	return 0;
-
-      out_platform_alloced:
-	platform_device_put(sony_pf_device);
-	sony_pf_device = NULL;
-      out_platform_registered:
-	platform_driver_unregister(&sony_pf_driver);
-      out:
-	atomic_dec(&sony_pf_users);
-	return ret;
-}
-
-static void sony_pf_remove(void)
-{
-	/* deregister only after the last user has gone */
-	if (!atomic_dec_and_test(&sony_pf_users))
-		return;
-
-	platform_device_del(sony_pf_device);
-	platform_device_put(sony_pf_device);
-	platform_driver_unregister(&sony_pf_driver);
-}
-
-/*********** SNC (SNY5001) Device ***********/
-
-/* the device uses 1-based values, while the backlight subsystem uses
-   0-based values */
-#define SONY_MAX_BRIGHTNESS	8
-
-#define SNC_VALIDATE_IN		0
-#define SNC_VALIDATE_OUT	1
-
-static ssize_t sony_nc_sysfs_show(struct device *, struct device_attribute *,
-			      char *);
-static ssize_t sony_nc_sysfs_store(struct device *, struct device_attribute *,
-			       const char *, size_t);
-static int boolean_validate(const int, const int);
-static int brightness_default_validate(const int, const int);
-
-struct sony_nc_value {
-	char *name;		/* name of the entry */
-	char **acpiget;		/* names of the ACPI get function */
-	char **acpiset;		/* names of the ACPI set function */
-	int (*validate)(const int, const int);	/* input/output validation */
-	int value;		/* current setting */
-	int valid;		/* Has ever been set */
-	int debug;		/* active only in debug mode ? */
-	struct device_attribute devattr;	/* sysfs atribute */
-};
-
-#define SNC_HANDLE_NAMES(_name, _values...) \
-	static char *snc_##_name[] = { _values, NULL }
-
-#define SNC_HANDLE(_name, _getters, _setters, _validate, _debug) \
-	{ \
-		.name		= __stringify(_name), \
-		.acpiget	= _getters, \
-		.acpiset	= _setters, \
-		.validate	= _validate, \
-		.debug		= _debug, \
-		.devattr	= __ATTR(_name, 0, sony_nc_sysfs_show, sony_nc_sysfs_store), \
-	}
-
-#define SNC_HANDLE_NULL	{ .name = NULL }
-
-SNC_HANDLE_NAMES(fnkey_get, "GHKE");
-
-SNC_HANDLE_NAMES(brightness_def_get, "GPBR");
-SNC_HANDLE_NAMES(brightness_def_set, "SPBR");
-
-SNC_HANDLE_NAMES(cdpower_get, "GCDP");
-SNC_HANDLE_NAMES(cdpower_set, "SCDP", "CDPW");
-
-SNC_HANDLE_NAMES(audiopower_get, "GAZP");
-SNC_HANDLE_NAMES(audiopower_set, "AZPW");
-
-SNC_HANDLE_NAMES(lanpower_get, "GLNP");
-SNC_HANDLE_NAMES(lanpower_set, "LNPW");
-
-SNC_HANDLE_NAMES(lidstate_get, "GLID");
-
-SNC_HANDLE_NAMES(indicatorlamp_get, "GILS");
-SNC_HANDLE_NAMES(indicatorlamp_set, "SILS");
-
-SNC_HANDLE_NAMES(gainbass_get, "GMGB");
-SNC_HANDLE_NAMES(gainbass_set, "CMGB");
-
-SNC_HANDLE_NAMES(PID_get, "GPID");
-
-SNC_HANDLE_NAMES(CTR_get, "GCTR");
-SNC_HANDLE_NAMES(CTR_set, "SCTR");
-
-SNC_HANDLE_NAMES(PCR_get, "GPCR");
-SNC_HANDLE_NAMES(PCR_set, "SPCR");
-
-SNC_HANDLE_NAMES(CMI_get, "GCMI");
-SNC_HANDLE_NAMES(CMI_set, "SCMI");
-
-static struct sony_nc_value sony_nc_values[] = {
-	SNC_HANDLE(brightness_default, snc_brightness_def_get,
-			snc_brightness_def_set, brightness_default_validate, 0),
-	SNC_HANDLE(fnkey, snc_fnkey_get, NULL, NULL, 0),
-	SNC_HANDLE(cdpower, snc_cdpower_get, snc_cdpower_set, boolean_validate, 0),
-	SNC_HANDLE(audiopower, snc_audiopower_get, snc_audiopower_set,
-			boolean_validate, 0),
-	SNC_HANDLE(lanpower, snc_lanpower_get, snc_lanpower_set,
-			boolean_validate, 1),
-	SNC_HANDLE(lidstate, snc_lidstate_get, NULL,
-			boolean_validate, 0),
-	SNC_HANDLE(indicatorlamp, snc_indicatorlamp_get, snc_indicatorlamp_set,
-			boolean_validate, 0),
-	SNC_HANDLE(gainbass, snc_gainbass_get, snc_gainbass_set,
-			boolean_validate, 0),
-	/* unknown methods */
-	SNC_HANDLE(PID, snc_PID_get, NULL, NULL, 1),
-	SNC_HANDLE(CTR, snc_CTR_get, snc_CTR_set, NULL, 1),
-	SNC_HANDLE(PCR, snc_PCR_get, snc_PCR_set, NULL, 1),
-	SNC_HANDLE(CMI, snc_CMI_get, snc_CMI_set, NULL, 1),
-	SNC_HANDLE_NULL
-};
-
-static acpi_handle sony_nc_acpi_handle;
-static struct acpi_device *sony_nc_acpi_device = NULL;
-
-/*
- * acpi_evaluate_object wrappers
- */
-static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
-{
-	struct acpi_buffer output;
-	union acpi_object out_obj;
-	acpi_status status;
-
-	output.length = sizeof(out_obj);
-	output.pointer = &out_obj;
-
-	status = acpi_evaluate_object(handle, name, NULL, &output);
-	if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) {
-		*result = out_obj.integer.value;
-		return 0;
-	}
-
-	printk(KERN_WARNING DRV_PFX "acpi_callreadfunc failed\n");
-
-	return -1;
-}
-
-static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
-			    int *result)
-{
-	struct acpi_object_list params;
-	union acpi_object in_obj;
-	struct acpi_buffer output;
-	union acpi_object out_obj;
-	acpi_status status;
-
-	params.count = 1;
-	params.pointer = &in_obj;
-	in_obj.type = ACPI_TYPE_INTEGER;
-	in_obj.integer.value = value;
-
-	output.length = sizeof(out_obj);
-	output.pointer = &out_obj;
-
-	status = acpi_evaluate_object(handle, name, &params, &output);
-	if (status == AE_OK) {
-		if (result != NULL) {
-			if (out_obj.type != ACPI_TYPE_INTEGER) {
-				printk(KERN_WARNING DRV_PFX "acpi_evaluate_object bad "
-				       "return type\n");
-				return -1;
-			}
-			*result = out_obj.integer.value;
-		}
-		return 0;
-	}
-
-	printk(KERN_WARNING DRV_PFX "acpi_evaluate_object failed\n");
-
-	return -1;
-}
-
-/*
- * sony_nc_values input/output validate functions
- */
-
-/* brightness_default_validate:
- *
- * manipulate input output values to keep consistency with the
- * backlight framework for which brightness values are 0-based.
- */
-static int brightness_default_validate(const int direction, const int value)
-{
-	switch (direction) {
-		case SNC_VALIDATE_OUT:
-			return value - 1;
-		case SNC_VALIDATE_IN:
-			if (value >= 0 && value < SONY_MAX_BRIGHTNESS)
-				return value + 1;
-	}
-	return -EINVAL;
-}
-
-/* boolean_validate:
- *
- * on input validate boolean values 0/1, on output just pass the
- * received value.
- */
-static int boolean_validate(const int direction, const int value)
-{
-	if (direction == SNC_VALIDATE_IN) {
-		if (value != 0 && value != 1)
-			return -EINVAL;
-	}
-	return value;
-}
-
-/*
- * Sysfs show/store common to all sony_nc_values
- */
-static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr,
-			      char *buffer)
-{
-	int value;
-	struct sony_nc_value *item =
-	    container_of(attr, struct sony_nc_value, devattr);
-
-	if (!*item->acpiget)
-		return -EIO;
-
-	if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0)
-		return -EIO;
-
-	if (item->validate)
-		value = item->validate(SNC_VALIDATE_OUT, value);
-
-	return snprintf(buffer, PAGE_SIZE, "%d\n", value);
-}
-
-static ssize_t sony_nc_sysfs_store(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buffer, size_t count)
-{
-	int value;
-	struct sony_nc_value *item =
-	    container_of(attr, struct sony_nc_value, devattr);
-
-	if (!item->acpiset)
-		return -EIO;
-
-	if (count > 31)
-		return -EINVAL;
-
-	value = simple_strtoul(buffer, NULL, 10);
-
-	if (item->validate)
-		value = item->validate(SNC_VALIDATE_IN, value);
-
-	if (value < 0)
-		return value;
-
-	if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0)
-		return -EIO;
-	item->value = value;
-	item->valid = 1;
-	return count;
-}
-
-
-/*
- * Backlight device
- */
-static int sony_backlight_update_status(struct backlight_device *bd)
-{
-	return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
-				bd->props.brightness + 1, NULL);
-}
-
-static int sony_backlight_get_brightness(struct backlight_device *bd)
-{
-	int value;
-
-	if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value))
-		return 0;
-	/* brightness levels are 1-based, while backlight ones are 0-based */
-	return value - 1;
-}
-
-static struct backlight_device *sony_backlight_device;
-static struct backlight_ops sony_backlight_ops = {
-	.update_status = sony_backlight_update_status,
-	.get_brightness = sony_backlight_get_brightness,
-};
-
-/*
- * New SNC-only Vaios event mapping to driver known keys
- */
-struct sony_nc_event {
-	u8	data;
-	u8	event;
-};
-
-static struct sony_nc_event *sony_nc_events;
-
-/* Vaio C* --maybe also FE*, N* and AR* ?-- special init sequence
- * for Fn keys
- */
-static int sony_nc_C_enable(const struct dmi_system_id *id)
-{
-	int result = 0;
-
-	printk(KERN_NOTICE DRV_PFX "detected %s\n", id->ident);
-
-	sony_nc_events = id->driver_data;
-
-	if (acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0x4, &result) < 0
-			|| acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x2, &result) < 0
-			|| acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0x10, &result) < 0
-			|| acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x0, &result) < 0
-			|| acpi_callsetfunc(sony_nc_acpi_handle, "SN03", 0x2, &result) < 0
-			|| acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x101, &result) < 0) {
-		printk(KERN_WARNING DRV_PFX "failed to initialize SNC, some "
-				"functionalities may be missing\n");
-		return 1;
-	}
-	return 0;
-}
-
-static struct sony_nc_event sony_C_events[] = {
-	{ 0x81, SONYPI_EVENT_FNKEY_F1 },
-	{ 0x01, SONYPI_EVENT_FNKEY_RELEASED },
-	{ 0x85, SONYPI_EVENT_FNKEY_F5 },
-	{ 0x05, SONYPI_EVENT_FNKEY_RELEASED },
-	{ 0x86, SONYPI_EVENT_FNKEY_F6 },
-	{ 0x06, SONYPI_EVENT_FNKEY_RELEASED },
-	{ 0x87, SONYPI_EVENT_FNKEY_F7 },
-	{ 0x07, SONYPI_EVENT_FNKEY_RELEASED },
-	{ 0x8A, SONYPI_EVENT_FNKEY_F10 },
-	{ 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
-	{ 0x8C, SONYPI_EVENT_FNKEY_F12 },
-	{ 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
-	{ 0, 0 },
-};
-
-/* SNC-only model map */
-static const struct dmi_system_id sony_nc_ids[] = {
-		{
-			.ident = "Sony Vaio FE Series",
-			.callback = sony_nc_C_enable,
-			.driver_data = sony_C_events,
-			.matches = {
-				DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-				DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FE"),
-			},
-		},
-		{
-			.ident = "Sony Vaio FZ Series",
-			.callback = sony_nc_C_enable,
-			.driver_data = sony_C_events,
-			.matches = {
-				DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-				DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ"),
-			},
-		},
-		{
-			.ident = "Sony Vaio C Series",
-			.callback = sony_nc_C_enable,
-			.driver_data = sony_C_events,
-			.matches = {
-				DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-				DMI_MATCH(DMI_PRODUCT_NAME, "VGN-C"),
-			},
-		},
-		{
-			.ident = "Sony Vaio N Series",
-			.callback = sony_nc_C_enable,
-			.driver_data = sony_C_events,
-			.matches = {
-				DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-				DMI_MATCH(DMI_PRODUCT_NAME, "VGN-N"),
-			},
-		},
-		{ }
-};
-
-/*
- * ACPI callbacks
- */
-static void sony_acpi_notify(acpi_handle handle, u32 event, void *data)
-{
-	struct sony_nc_event *evmap;
-	u32 ev = event;
-	int result;
-
-	if (ev == 0x92) {
-		/* read the key pressed from EC.GECR
-		 * A call to SN07 with 0x0202 will do it as well respecting
-		 * the current protocol on different OSes
-		 *
-		 * Note: the path for GECR may be
-		 *   \_SB.PCI0.LPCB.EC (C, FE, AR, N and friends)
-		 *   \_SB.PCI0.PIB.EC0 (VGN-FR notifications are sent directly, no GECR)
-		 *
-		 * TODO: we may want to do the same for the older GHKE -need
-		 *       dmi list- so this snippet may become one more callback.
-		 */
-		if (acpi_callsetfunc(handle, "SN07", 0x0202, &result) < 0)
-			dprintk("sony_acpi_notify, unable to decode event 0x%.2x\n", ev);
-		else
-			ev = result & 0xFF;
-	}
-
-	if (sony_nc_events)
-		for (evmap = sony_nc_events; evmap->event; evmap++) {
-			if (evmap->data == ev) {
-				ev = evmap->event;
-				break;
-			}
-		}
-
-	dprintk("sony_acpi_notify, event: 0x%.2x\n", ev);
-	sony_laptop_report_input_event(ev);
-	acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev);
-}
-
-static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
-				      void *context, void **return_value)
-{
-	struct acpi_namespace_node *node;
-	union acpi_operand_object *operand;
-
-	node = (struct acpi_namespace_node *)handle;
-	operand = (union acpi_operand_object *)node->object;
-
-	printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n", node->name.ascii,
-	       (u32) operand->method.param_count);
-
-	return AE_OK;
-}
-
-/*
- * ACPI device
- */
-static int sony_nc_resume(struct acpi_device *device)
-{
-	struct sony_nc_value *item;
-
-	for (item = sony_nc_values; item->name; item++) {
-		int ret;
-
-		if (!item->valid)
-			continue;
-		ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
-				       item->value, NULL);
-		if (ret < 0) {
-			printk("%s: %d\n", __func__, ret);
-			break;
-		}
-	}
-
-	/* set the last requested brightness level */
-	if (sony_backlight_device &&
-			!sony_backlight_update_status(sony_backlight_device))
-		printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n");
-
-	/* re-initialize models with specific requirements */
-	dmi_check_system(sony_nc_ids);
-
-	return 0;
-}
-
-static int sony_nc_add(struct acpi_device *device)
-{
-	acpi_status status;
-	int result = 0;
-	acpi_handle handle;
-	struct sony_nc_value *item;
-
-	printk(KERN_INFO DRV_PFX "%s v%s.\n",
-		SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
-
-	sony_nc_acpi_device = device;
-	strcpy(acpi_device_class(device), "sony/hotkey");
-
-	sony_nc_acpi_handle = device->handle;
-
-	/* read device status */
-	result = acpi_bus_get_status(device);
-	/* bail IFF the above call was successful and the device is not present */
-	if (!result && !device->status.present) {
-		dprintk("Device not present\n");
-		result = -ENODEV;
-		goto outwalk;
-	}
-
-	if (debug) {
-		status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_nc_acpi_handle,
-					     1, sony_walk_callback, NULL, NULL);
-		if (ACPI_FAILURE(status)) {
-			printk(KERN_WARNING DRV_PFX "unable to walk acpi resources\n");
-			result = -ENODEV;
-			goto outwalk;
-		}
-	}
-
-	/* try to _INI the device if such method exists (ACPI spec 3.0-6.5.1
-	 * should be respected as we already checked for the device presence above */
-	if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, METHOD_NAME__INI, &handle))) {
-		dprintk("Invoking _INI\n");
-		if (ACPI_FAILURE(acpi_evaluate_object(sony_nc_acpi_handle, METHOD_NAME__INI,
-						NULL, NULL)))
-			dprintk("_INI Method failed\n");
-	}
-
-	/* setup input devices and helper fifo */
-	result = sony_laptop_setup_input(device);
-	if (result) {
-		printk(KERN_ERR DRV_PFX
-				"Unabe to create input devices.\n");
-		goto outwalk;
-	}
-
-	status = acpi_install_notify_handler(sony_nc_acpi_handle,
-					     ACPI_DEVICE_NOTIFY,
-					     sony_acpi_notify, NULL);
-	if (ACPI_FAILURE(status)) {
-		printk(KERN_WARNING DRV_PFX "unable to install notify handler (%u)\n", status);
-		result = -ENODEV;
-		goto outinput;
-	}
-
-	if (acpi_video_backlight_support()) {
-		printk(KERN_INFO DRV_PFX "brightness ignored, must be "
-		       "controlled by ACPI video driver\n");
-	} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
-						&handle))) {
-		sony_backlight_device = backlight_device_register("sony", NULL,
-								  NULL,
-								  &sony_backlight_ops);
-
-		if (IS_ERR(sony_backlight_device)) {
-			printk(KERN_WARNING DRV_PFX "unable to register backlight device\n");
-			sony_backlight_device = NULL;
-		} else {
-			sony_backlight_device->props.brightness =
-			    sony_backlight_get_brightness
-			    (sony_backlight_device);
-			sony_backlight_device->props.max_brightness =
-			    SONY_MAX_BRIGHTNESS - 1;
-		}
-
-	}
-
-	/* initialize models with specific requirements */
-	dmi_check_system(sony_nc_ids);
-
-	result = sony_pf_add();
-	if (result)
-		goto outbacklight;
-
-	/* create sony_pf sysfs attributes related to the SNC device */
-	for (item = sony_nc_values; item->name; ++item) {
-
-		if (!debug && item->debug)
-			continue;
-
-		/* find the available acpiget as described in the DSDT */
-		for (; item->acpiget && *item->acpiget; ++item->acpiget) {
-			if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
-							 *item->acpiget,
-							 &handle))) {
-				dprintk("Found %s getter: %s\n",
-						item->name, *item->acpiget);
-				item->devattr.attr.mode |= S_IRUGO;
-				break;
-			}
-		}
-
-		/* find the available acpiset as described in the DSDT */
-		for (; item->acpiset && *item->acpiset; ++item->acpiset) {
-			if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
-							 *item->acpiset,
-							 &handle))) {
-				dprintk("Found %s setter: %s\n",
-						item->name, *item->acpiset);
-				item->devattr.attr.mode |= S_IWUSR;
-				break;
-			}
-		}
-
-		if (item->devattr.attr.mode != 0) {
-			result =
-			    device_create_file(&sony_pf_device->dev,
-					       &item->devattr);
-			if (result)
-				goto out_sysfs;
-		}
-	}
-
-	return 0;
-
-      out_sysfs:
-	for (item = sony_nc_values; item->name; ++item) {
-		device_remove_file(&sony_pf_device->dev, &item->devattr);
-	}
-	sony_pf_remove();
-
-      outbacklight:
-	if (sony_backlight_device)
-		backlight_device_unregister(sony_backlight_device);
-
-	status = acpi_remove_notify_handler(sony_nc_acpi_handle,
-					    ACPI_DEVICE_NOTIFY,
-					    sony_acpi_notify);
-	if (ACPI_FAILURE(status))
-		printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n");
-
-      outinput:
-	sony_laptop_remove_input();
-
-      outwalk:
-	return result;
-}
-
-static int sony_nc_remove(struct acpi_device *device, int type)
-{
-	acpi_status status;
-	struct sony_nc_value *item;
-
-	if (sony_backlight_device)
-		backlight_device_unregister(sony_backlight_device);
-
-	sony_nc_acpi_device = NULL;
-
-	status = acpi_remove_notify_handler(sony_nc_acpi_handle,
-					    ACPI_DEVICE_NOTIFY,
-					    sony_acpi_notify);
-	if (ACPI_FAILURE(status))
-		printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n");
-
-	for (item = sony_nc_values; item->name; ++item) {
-		device_remove_file(&sony_pf_device->dev, &item->devattr);
-	}
-
-	sony_pf_remove();
-	sony_laptop_remove_input();
-	dprintk(SONY_NC_DRIVER_NAME " removed.\n");
-
-	return 0;
-}
-
-static const struct acpi_device_id sony_device_ids[] = {
-	{SONY_NC_HID, 0},
-	{SONY_PIC_HID, 0},
-	{"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, sony_device_ids);
-
-static const struct acpi_device_id sony_nc_device_ids[] = {
-	{SONY_NC_HID, 0},
-	{"", 0},
-};
-
-static struct acpi_driver sony_nc_driver = {
-	.name = SONY_NC_DRIVER_NAME,
-	.class = SONY_NC_CLASS,
-	.ids = sony_nc_device_ids,
-	.owner = THIS_MODULE,
-	.ops = {
-		.add = sony_nc_add,
-		.remove = sony_nc_remove,
-		.resume = sony_nc_resume,
-		},
-};
-
-/*********** SPIC (SNY6001) Device ***********/
-
-#define SONYPI_DEVICE_TYPE1	0x00000001
-#define SONYPI_DEVICE_TYPE2	0x00000002
-#define SONYPI_DEVICE_TYPE3	0x00000004
-#define SONYPI_DEVICE_TYPE4	0x00000008
-
-#define SONYPI_TYPE1_OFFSET	0x04
-#define SONYPI_TYPE2_OFFSET	0x12
-#define SONYPI_TYPE3_OFFSET	0x12
-#define SONYPI_TYPE4_OFFSET	0x12
-
-struct sony_pic_ioport {
-	struct acpi_resource_io	io1;
-	struct acpi_resource_io	io2;
-	struct list_head	list;
-};
-
-struct sony_pic_irq {
-	struct acpi_resource_irq	irq;
-	struct list_head		list;
-};
-
-struct sonypi_eventtypes {
-	u8			data;
-	unsigned long		mask;
-	struct sonypi_event	*events;
-};
-
-struct device_ctrl {
-	int				model;
-	int				(*handle_irq)(const u8, const u8);
-	u16				evport_offset;
-	u8				has_camera;
-	u8				has_bluetooth;
-	u8				has_wwan;
-	struct sonypi_eventtypes	*event_types;
-};
-
-struct sony_pic_dev {
-	struct device_ctrl	*control;
-	struct acpi_device	*acpi_dev;
-	struct sony_pic_irq	*cur_irq;
-	struct sony_pic_ioport	*cur_ioport;
-	struct list_head	interrupts;
-	struct list_head	ioports;
-	struct mutex		lock;
-	u8			camera_power;
-	u8			bluetooth_power;
-	u8			wwan_power;
-};
-
-static struct sony_pic_dev spic_dev = {
-	.interrupts	= LIST_HEAD_INIT(spic_dev.interrupts),
-	.ioports	= LIST_HEAD_INIT(spic_dev.ioports),
-};
-
-/* Event masks */
-#define SONYPI_JOGGER_MASK			0x00000001
-#define SONYPI_CAPTURE_MASK			0x00000002
-#define SONYPI_FNKEY_MASK			0x00000004
-#define SONYPI_BLUETOOTH_MASK			0x00000008
-#define SONYPI_PKEY_MASK			0x00000010
-#define SONYPI_BACK_MASK			0x00000020
-#define SONYPI_HELP_MASK			0x00000040
-#define SONYPI_LID_MASK				0x00000080
-#define SONYPI_ZOOM_MASK			0x00000100
-#define SONYPI_THUMBPHRASE_MASK			0x00000200
-#define SONYPI_MEYE_MASK			0x00000400
-#define SONYPI_MEMORYSTICK_MASK			0x00000800
-#define SONYPI_BATTERY_MASK			0x00001000
-#define SONYPI_WIRELESS_MASK			0x00002000
-
-struct sonypi_event {
-	u8	data;
-	u8	event;
-};
-
-/* The set of possible button release events */
-static struct sonypi_event sonypi_releaseev[] = {
-	{ 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED },
-	{ 0, 0 }
-};
-
-/* The set of possible jogger events  */
-static struct sonypi_event sonypi_joggerev[] = {
-	{ 0x1f, SONYPI_EVENT_JOGDIAL_UP },
-	{ 0x01, SONYPI_EVENT_JOGDIAL_DOWN },
-	{ 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED },
-	{ 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED },
-	{ 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP },
-	{ 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN },
-	{ 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED },
-	{ 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED },
-	{ 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP },
-	{ 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN },
-	{ 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED },
-	{ 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED },
-	{ 0x40, SONYPI_EVENT_JOGDIAL_PRESSED },
-	{ 0, 0 }
-};
-
-/* The set of possible capture button events */
-static struct sonypi_event sonypi_captureev[] = {
-	{ 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED },
-	{ 0x07, SONYPI_EVENT_CAPTURE_PRESSED },
-	{ 0x40, SONYPI_EVENT_CAPTURE_PRESSED },
-	{ 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED },
-	{ 0, 0 }
-};
-
-/* The set of possible fnkeys events */
-static struct sonypi_event sonypi_fnkeyev[] = {
-	{ 0x10, SONYPI_EVENT_FNKEY_ESC },
-	{ 0x11, SONYPI_EVENT_FNKEY_F1 },
-	{ 0x12, SONYPI_EVENT_FNKEY_F2 },
-	{ 0x13, SONYPI_EVENT_FNKEY_F3 },
-	{ 0x14, SONYPI_EVENT_FNKEY_F4 },
-	{ 0x15, SONYPI_EVENT_FNKEY_F5 },
-	{ 0x16, SONYPI_EVENT_FNKEY_F6 },
-	{ 0x17, SONYPI_EVENT_FNKEY_F7 },
-	{ 0x18, SONYPI_EVENT_FNKEY_F8 },
-	{ 0x19, SONYPI_EVENT_FNKEY_F9 },
-	{ 0x1a, SONYPI_EVENT_FNKEY_F10 },
-	{ 0x1b, SONYPI_EVENT_FNKEY_F11 },
-	{ 0x1c, SONYPI_EVENT_FNKEY_F12 },
-	{ 0x1f, SONYPI_EVENT_FNKEY_RELEASED },
-	{ 0x21, SONYPI_EVENT_FNKEY_1 },
-	{ 0x22, SONYPI_EVENT_FNKEY_2 },
-	{ 0x31, SONYPI_EVENT_FNKEY_D },
-	{ 0x32, SONYPI_EVENT_FNKEY_E },
-	{ 0x33, SONYPI_EVENT_FNKEY_F },
-	{ 0x34, SONYPI_EVENT_FNKEY_S },
-	{ 0x35, SONYPI_EVENT_FNKEY_B },
-	{ 0x36, SONYPI_EVENT_FNKEY_ONLY },
-	{ 0, 0 }
-};
-
-/* The set of possible program key events */
-static struct sonypi_event sonypi_pkeyev[] = {
-	{ 0x01, SONYPI_EVENT_PKEY_P1 },
-	{ 0x02, SONYPI_EVENT_PKEY_P2 },
-	{ 0x04, SONYPI_EVENT_PKEY_P3 },
-	{ 0, 0 }
-};
-
-/* The set of possible bluetooth events */
-static struct sonypi_event sonypi_blueev[] = {
-	{ 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED },
-	{ 0x59, SONYPI_EVENT_BLUETOOTH_ON },
-	{ 0x5a, SONYPI_EVENT_BLUETOOTH_OFF },
-	{ 0, 0 }
-};
-
-/* The set of possible wireless events */
-static struct sonypi_event sonypi_wlessev[] = {
-	{ 0x59, SONYPI_EVENT_WIRELESS_ON },
-	{ 0x5a, SONYPI_EVENT_WIRELESS_OFF },
-	{ 0, 0 }
-};
-
-/* The set of possible back button events */
-static struct sonypi_event sonypi_backev[] = {
-	{ 0x20, SONYPI_EVENT_BACK_PRESSED },
-	{ 0, 0 }
-};
-
-/* The set of possible help button events */
-static struct sonypi_event sonypi_helpev[] = {
-	{ 0x3b, SONYPI_EVENT_HELP_PRESSED },
-	{ 0, 0 }
-};
-
-
-/* The set of possible lid events */
-static struct sonypi_event sonypi_lidev[] = {
-	{ 0x51, SONYPI_EVENT_LID_CLOSED },
-	{ 0x50, SONYPI_EVENT_LID_OPENED },
-	{ 0, 0 }
-};
-
-/* The set of possible zoom events */
-static struct sonypi_event sonypi_zoomev[] = {
-	{ 0x39, SONYPI_EVENT_ZOOM_PRESSED },
-	{ 0x10, SONYPI_EVENT_ZOOM_IN_PRESSED },
-	{ 0x20, SONYPI_EVENT_ZOOM_OUT_PRESSED },
-	{ 0, 0 }
-};
-
-/* The set of possible thumbphrase events */
-static struct sonypi_event sonypi_thumbphraseev[] = {
-	{ 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED },
-	{ 0, 0 }
-};
-
-/* The set of possible motioneye camera events */
-static struct sonypi_event sonypi_meyeev[] = {
-	{ 0x00, SONYPI_EVENT_MEYE_FACE },
-	{ 0x01, SONYPI_EVENT_MEYE_OPPOSITE },
-	{ 0, 0 }
-};
-
-/* The set of possible memorystick events */
-static struct sonypi_event sonypi_memorystickev[] = {
-	{ 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT },
-	{ 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT },
-	{ 0, 0 }
-};
-
-/* The set of possible battery events */
-static struct sonypi_event sonypi_batteryev[] = {
-	{ 0x20, SONYPI_EVENT_BATTERY_INSERT },
-	{ 0x30, SONYPI_EVENT_BATTERY_REMOVE },
-	{ 0, 0 }
-};
-
-static struct sonypi_eventtypes type1_events[] = {
-	{ 0, 0xffffffff, sonypi_releaseev },
-	{ 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
-	{ 0x30, SONYPI_LID_MASK, sonypi_lidev },
-	{ 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
-	{ 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
-	{ 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
-	{ 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
-	{ 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
-	{ 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
-	{ 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
-	{ 0 },
-};
-static struct sonypi_eventtypes type2_events[] = {
-	{ 0, 0xffffffff, sonypi_releaseev },
-	{ 0x38, SONYPI_LID_MASK, sonypi_lidev },
-	{ 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
-	{ 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
-	{ 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
-	{ 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
-	{ 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
-	{ 0x11, SONYPI_BACK_MASK, sonypi_backev },
-	{ 0x21, SONYPI_HELP_MASK, sonypi_helpev },
-	{ 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
-	{ 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
-	{ 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
-	{ 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
-	{ 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
-	{ 0 },
-};
-static struct sonypi_eventtypes type3_events[] = {
-	{ 0, 0xffffffff, sonypi_releaseev },
-	{ 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
-	{ 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
-	{ 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
-	{ 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
-	{ 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
-	{ 0 },
-};
-static struct sonypi_eventtypes type4_events[] = {
-	{ 0, 0xffffffff, sonypi_releaseev },
-	{ 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
-	{ 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
-	{ 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
-	{ 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
-	{ 0x05, SONYPI_PKEY_MASK, sonypi_pkeyev },
-	{ 0x05, SONYPI_ZOOM_MASK, sonypi_zoomev },
-	{ 0x05, SONYPI_CAPTURE_MASK, sonypi_captureev },
-	{ 0 },
-};
-
-/* low level spic calls */
-#define ITERATIONS_LONG		10000
-#define ITERATIONS_SHORT	10
-#define wait_on_command(command, iterations) {				\
-	unsigned int n = iterations;					\
-	while (--n && (command))					\
-		udelay(1);						\
-	if (!n)								\
-		dprintk("command failed at %s : %s (line %d)\n",	\
-				__FILE__, __func__, __LINE__);	\
-}
-
-static u8 sony_pic_call1(u8 dev)
-{
-	u8 v1, v2;
-
-	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
-			ITERATIONS_LONG);
-	outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
-	v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4);
-	v2 = inb_p(spic_dev.cur_ioport->io1.minimum);
-	dprintk("sony_pic_call1(0x%.2x): 0x%.4x\n", dev, (v2 << 8) | v1);
-	return v2;
-}
-
-static u8 sony_pic_call2(u8 dev, u8 fn)
-{
-	u8 v1;
-
-	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
-			ITERATIONS_LONG);
-	outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
-	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
-			ITERATIONS_LONG);
-	outb(fn, spic_dev.cur_ioport->io1.minimum);
-	v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
-	dprintk("sony_pic_call2(0x%.2x - 0x%.2x): 0x%.4x\n", dev, fn, v1);
-	return v1;
-}
-
-static u8 sony_pic_call3(u8 dev, u8 fn, u8 v)
-{
-	u8 v1;
-
-	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
-	outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
-	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
-	outb(fn, spic_dev.cur_ioport->io1.minimum);
-	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
-	outb(v, spic_dev.cur_ioport->io1.minimum);
-	v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
-	dprintk("sony_pic_call3(0x%.2x - 0x%.2x - 0x%.2x): 0x%.4x\n",
-			dev, fn, v, v1);
-	return v1;
-}
-
-/*
- * minidrivers for SPIC models
- */
-static int type4_handle_irq(const u8 data_mask, const u8 ev)
-{
-	/*
-	 * 0x31 could mean we have to take some extra action and wait for
-	 * the next irq for some Type4 models, it will generate a new
-	 * irq and we can read new data from the device:
-	 *  - 0x5c and 0x5f requires 0xA0
-	 *  - 0x61 requires 0xB3
-	 */
-	if (data_mask == 0x31) {
-		if (ev == 0x5c || ev == 0x5f)
-			sony_pic_call1(0xA0);
-		else if (ev == 0x61)
-			sony_pic_call1(0xB3);
-		return 0;
-	}
-	return 1;
-}
-
-static struct device_ctrl spic_types[] = {
-	{
-		.model = SONYPI_DEVICE_TYPE1,
-		.handle_irq = NULL,
-		.evport_offset = SONYPI_TYPE1_OFFSET,
-		.event_types = type1_events,
-	},
-	{
-		.model = SONYPI_DEVICE_TYPE2,
-		.handle_irq = NULL,
-		.evport_offset = SONYPI_TYPE2_OFFSET,
-		.event_types = type2_events,
-	},
-	{
-		.model = SONYPI_DEVICE_TYPE3,
-		.handle_irq = NULL,
-		.evport_offset = SONYPI_TYPE3_OFFSET,
-		.event_types = type3_events,
-	},
-	{
-		.model = SONYPI_DEVICE_TYPE4,
-		.handle_irq = type4_handle_irq,
-		.evport_offset = SONYPI_TYPE4_OFFSET,
-		.event_types = type4_events,
-	},
-};
-
-static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
-{
-	struct pci_dev *pcidev;
-
-	pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
-			PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
-	if (pcidev) {
-		dev->control = &spic_types[0];
-		goto out;
-	}
-
-	pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
-			PCI_DEVICE_ID_INTEL_ICH6_1, NULL);
-	if (pcidev) {
-		dev->control = &spic_types[2];
-		goto out;
-	}
-
-	pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
-			PCI_DEVICE_ID_INTEL_ICH7_1, NULL);
-	if (pcidev) {
-		dev->control = &spic_types[3];
-		goto out;
-	}
-
-	pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
-			PCI_DEVICE_ID_INTEL_ICH8_4, NULL);
-	if (pcidev) {
-		dev->control = &spic_types[3];
-		goto out;
-	}
-
-	/* default */
-	dev->control = &spic_types[1];
-
-out:
-	if (pcidev)
-		pci_dev_put(pcidev);
-
-	printk(KERN_INFO DRV_PFX "detected Type%d model\n",
-			dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 :
-			dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 :
-			dev->control->model == SONYPI_DEVICE_TYPE3 ? 3 : 4);
-}
-
-/* camera tests and poweron/poweroff */
-#define SONYPI_CAMERA_PICTURE		5
-#define SONYPI_CAMERA_CONTROL		0x10
-
-#define SONYPI_CAMERA_BRIGHTNESS		0
-#define SONYPI_CAMERA_CONTRAST			1
-#define SONYPI_CAMERA_HUE			2
-#define SONYPI_CAMERA_COLOR			3
-#define SONYPI_CAMERA_SHARPNESS			4
-
-#define SONYPI_CAMERA_EXPOSURE_MASK		0xC
-#define SONYPI_CAMERA_WHITE_BALANCE_MASK	0x3
-#define SONYPI_CAMERA_PICTURE_MODE_MASK		0x30
-#define SONYPI_CAMERA_MUTE_MASK			0x40
-
-/* the rest don't need a loop until not 0xff */
-#define SONYPI_CAMERA_AGC			6
-#define SONYPI_CAMERA_AGC_MASK			0x30
-#define SONYPI_CAMERA_SHUTTER_MASK 		0x7
-
-#define SONYPI_CAMERA_SHUTDOWN_REQUEST		7
-#define SONYPI_CAMERA_CONTROL			0x10
-
-#define SONYPI_CAMERA_STATUS 			7
-#define SONYPI_CAMERA_STATUS_READY 		0x2
-#define SONYPI_CAMERA_STATUS_POSITION		0x4
-
-#define SONYPI_DIRECTION_BACKWARDS 		0x4
-
-#define SONYPI_CAMERA_REVISION 			8
-#define SONYPI_CAMERA_ROMVERSION 		9
-
-static int __sony_pic_camera_ready(void)
-{
-	u8 v;
-
-	v = sony_pic_call2(0x8f, SONYPI_CAMERA_STATUS);
-	return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY));
-}
-
-static int __sony_pic_camera_off(void)
-{
-	if (!camera) {
-		printk(KERN_WARNING DRV_PFX "camera control not enabled\n");
-		return -ENODEV;
-	}
-
-	wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE,
-				SONYPI_CAMERA_MUTE_MASK),
-			ITERATIONS_SHORT);
-
-	if (spic_dev.camera_power) {
-		sony_pic_call2(0x91, 0);
-		spic_dev.camera_power = 0;
-	}
-	return 0;
-}
-
-static int __sony_pic_camera_on(void)
-{
-	int i, j, x;
-
-	if (!camera) {
-		printk(KERN_WARNING DRV_PFX "camera control not enabled\n");
-		return -ENODEV;
-	}
-
-	if (spic_dev.camera_power)
-		return 0;
-
-	for (j = 5; j > 0; j--) {
-
-		for (x = 0; x < 100 && sony_pic_call2(0x91, 0x1); x++)
-			msleep(10);
-		sony_pic_call1(0x93);
-
-		for (i = 400; i > 0; i--) {
-			if (__sony_pic_camera_ready())
-				break;
-			msleep(10);
-		}
-		if (i)
-			break;
-	}
-
-	if (j == 0) {
-		printk(KERN_WARNING DRV_PFX "failed to power on camera\n");
-		return -ENODEV;
-	}
-
-	wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTROL,
-				0x5a),
-			ITERATIONS_SHORT);
-
-	spic_dev.camera_power = 1;
-	return 0;
-}
-
-/* External camera command (exported to the motion eye v4l driver) */
-int sony_pic_camera_command(int command, u8 value)
-{
-	if (!camera)
-		return -EIO;
-
-	mutex_lock(&spic_dev.lock);
-
-	switch (command) {
-	case SONY_PIC_COMMAND_SETCAMERA:
-		if (value)
-			__sony_pic_camera_on();
-		else
-			__sony_pic_camera_off();
-		break;
-	case SONY_PIC_COMMAND_SETCAMERABRIGHTNESS:
-		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_BRIGHTNESS, value),
-				ITERATIONS_SHORT);
-		break;
-	case SONY_PIC_COMMAND_SETCAMERACONTRAST:
-		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTRAST, value),
-				ITERATIONS_SHORT);
-		break;
-	case SONY_PIC_COMMAND_SETCAMERAHUE:
-		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_HUE, value),
-				ITERATIONS_SHORT);
-		break;
-	case SONY_PIC_COMMAND_SETCAMERACOLOR:
-		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_COLOR, value),
-				ITERATIONS_SHORT);
-		break;
-	case SONY_PIC_COMMAND_SETCAMERASHARPNESS:
-		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_SHARPNESS, value),
-				ITERATIONS_SHORT);
-		break;
-	case SONY_PIC_COMMAND_SETCAMERAPICTURE:
-		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE, value),
-				ITERATIONS_SHORT);
-		break;
-	case SONY_PIC_COMMAND_SETCAMERAAGC:
-		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_AGC, value),
-				ITERATIONS_SHORT);
-		break;
-	default:
-		printk(KERN_ERR DRV_PFX "sony_pic_camera_command invalid: %d\n",
-		       command);
-		break;
-	}
-	mutex_unlock(&spic_dev.lock);
-	return 0;
-}
-EXPORT_SYMBOL(sony_pic_camera_command);
-
-/* gprs/edge modem (SZ460N and SZ210P), thanks to Joshua Wise */
-static void sony_pic_set_wwanpower(u8 state)
-{
-	state = !!state;
-	mutex_lock(&spic_dev.lock);
-	if (spic_dev.wwan_power == state) {
-		mutex_unlock(&spic_dev.lock);
-		return;
-	}
-	sony_pic_call2(0xB0, state);
-	spic_dev.wwan_power = state;
-	mutex_unlock(&spic_dev.lock);
-}
-
-static ssize_t sony_pic_wwanpower_store(struct device *dev,
-		struct device_attribute *attr,
-		const char *buffer, size_t count)
-{
-	unsigned long value;
-	if (count > 31)
-		return -EINVAL;
-
-	value = simple_strtoul(buffer, NULL, 10);
-	sony_pic_set_wwanpower(value);
-
-	return count;
-}
-
-static ssize_t sony_pic_wwanpower_show(struct device *dev,
-		struct device_attribute *attr, char *buffer)
-{
-	ssize_t count;
-	mutex_lock(&spic_dev.lock);
-	count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.wwan_power);
-	mutex_unlock(&spic_dev.lock);
-	return count;
-}
-
-/* bluetooth subsystem power state */
-static void __sony_pic_set_bluetoothpower(u8 state)
-{
-	state = !!state;
-	if (spic_dev.bluetooth_power == state)
-		return;
-	sony_pic_call2(0x96, state);
-	sony_pic_call1(0x82);
-	spic_dev.bluetooth_power = state;
-}
-
-static ssize_t sony_pic_bluetoothpower_store(struct device *dev,
-		struct device_attribute *attr,
-		const char *buffer, size_t count)
-{
-	unsigned long value;
-	if (count > 31)
-		return -EINVAL;
-
-	value = simple_strtoul(buffer, NULL, 10);
-	mutex_lock(&spic_dev.lock);
-	__sony_pic_set_bluetoothpower(value);
-	mutex_unlock(&spic_dev.lock);
-
-	return count;
-}
-
-static ssize_t sony_pic_bluetoothpower_show(struct device *dev,
-		struct device_attribute *attr, char *buffer)
-{
-	ssize_t count = 0;
-	mutex_lock(&spic_dev.lock);
-	count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.bluetooth_power);
-	mutex_unlock(&spic_dev.lock);
-	return count;
-}
-
-/* fan speed */
-/* FAN0 information (reverse engineered from ACPI tables) */
-#define SONY_PIC_FAN0_STATUS	0x93
-static int sony_pic_set_fanspeed(unsigned long value)
-{
-	return ec_write(SONY_PIC_FAN0_STATUS, value);
-}
-
-static int sony_pic_get_fanspeed(u8 *value)
-{
-	return ec_read(SONY_PIC_FAN0_STATUS, value);
-}
-
-static ssize_t sony_pic_fanspeed_store(struct device *dev,
-		struct device_attribute *attr,
-		const char *buffer, size_t count)
-{
-	unsigned long value;
-	if (count > 31)
-		return -EINVAL;
-
-	value = simple_strtoul(buffer, NULL, 10);
-	if (sony_pic_set_fanspeed(value))
-		return -EIO;
-
-	return count;
-}
-
-static ssize_t sony_pic_fanspeed_show(struct device *dev,
-		struct device_attribute *attr, char *buffer)
-{
-	u8 value = 0;
-	if (sony_pic_get_fanspeed(&value))
-		return -EIO;
-
-	return snprintf(buffer, PAGE_SIZE, "%d\n", value);
-}
-
-#define SPIC_ATTR(_name, _mode)					\
-struct device_attribute spic_attr_##_name = __ATTR(_name,	\
-		_mode, sony_pic_## _name ##_show,		\
-		sony_pic_## _name ##_store)
-
-static SPIC_ATTR(bluetoothpower, 0644);
-static SPIC_ATTR(wwanpower, 0644);
-static SPIC_ATTR(fanspeed, 0644);
-
-static struct attribute *spic_attributes[] = {
-	&spic_attr_bluetoothpower.attr,
-	&spic_attr_wwanpower.attr,
-	&spic_attr_fanspeed.attr,
-	NULL
-};
-
-static struct attribute_group spic_attribute_group = {
-	.attrs = spic_attributes
-};
-
-/******** SONYPI compatibility **********/
-#ifdef CONFIG_SONYPI_COMPAT
-
-/* battery / brightness / temperature  addresses */
-#define SONYPI_BAT_FLAGS	0x81
-#define SONYPI_LCD_LIGHT	0x96
-#define SONYPI_BAT1_PCTRM	0xa0
-#define SONYPI_BAT1_LEFT	0xa2
-#define SONYPI_BAT1_MAXRT	0xa4
-#define SONYPI_BAT2_PCTRM	0xa8
-#define SONYPI_BAT2_LEFT	0xaa
-#define SONYPI_BAT2_MAXRT	0xac
-#define SONYPI_BAT1_MAXTK	0xb0
-#define SONYPI_BAT1_FULL	0xb2
-#define SONYPI_BAT2_MAXTK	0xb8
-#define SONYPI_BAT2_FULL	0xba
-#define SONYPI_TEMP_STATUS	0xC1
-
-struct sonypi_compat_s {
-	struct fasync_struct	*fifo_async;
-	struct kfifo		*fifo;
-	spinlock_t		fifo_lock;
-	wait_queue_head_t	fifo_proc_list;
-	atomic_t		open_count;
-};
-static struct sonypi_compat_s sonypi_compat = {
-	.open_count = ATOMIC_INIT(0),
-};
-
-static int sonypi_misc_fasync(int fd, struct file *filp, int on)
-{
-	int retval;
-
-	retval = fasync_helper(fd, filp, on, &sonypi_compat.fifo_async);
-	if (retval < 0)
-		return retval;
-	return 0;
-}
-
-static int sonypi_misc_release(struct inode *inode, struct file *file)
-{
-	atomic_dec(&sonypi_compat.open_count);
-	return 0;
-}
-
-static int sonypi_misc_open(struct inode *inode, struct file *file)
-{
-	/* Flush input queue on first open */
-	lock_kernel();
-	if (atomic_inc_return(&sonypi_compat.open_count) == 1)
-		kfifo_reset(sonypi_compat.fifo);
-	unlock_kernel();
-	return 0;
-}
-
-static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
-				size_t count, loff_t *pos)
-{
-	ssize_t ret;
-	unsigned char c;
-
-	if ((kfifo_len(sonypi_compat.fifo) == 0) &&
-	    (file->f_flags & O_NONBLOCK))
-		return -EAGAIN;
-
-	ret = wait_event_interruptible(sonypi_compat.fifo_proc_list,
-				       kfifo_len(sonypi_compat.fifo) != 0);
-	if (ret)
-		return ret;
-
-	while (ret < count &&
-	       (kfifo_get(sonypi_compat.fifo, &c, sizeof(c)) == sizeof(c))) {
-		if (put_user(c, buf++))
-			return -EFAULT;
-		ret++;
-	}
-
-	if (ret > 0) {
-		struct inode *inode = file->f_path.dentry->d_inode;
-		inode->i_atime = current_fs_time(inode->i_sb);
-	}
-
-	return ret;
-}
-
-static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
-{
-	poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
-	if (kfifo_len(sonypi_compat.fifo))
-		return POLLIN | POLLRDNORM;
-	return 0;
-}
-
-static int ec_read16(u8 addr, u16 *value)
-{
-	u8 val_lb, val_hb;
-	if (ec_read(addr, &val_lb))
-		return -1;
-	if (ec_read(addr + 1, &val_hb))
-		return -1;
-	*value = val_lb | (val_hb << 8);
-	return 0;
-}
-
-static int sonypi_misc_ioctl(struct inode *ip, struct file *fp,
-			     unsigned int cmd, unsigned long arg)
-{
-	int ret = 0;
-	void __user *argp = (void __user *)arg;
-	u8 val8;
-	u16 val16;
-	int value;
-
-	mutex_lock(&spic_dev.lock);
-	switch (cmd) {
-	case SONYPI_IOCGBRT:
-		if (sony_backlight_device == NULL) {
-			ret = -EIO;
-			break;
-		}
-		if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) {
-			ret = -EIO;
-			break;
-		}
-		val8 = ((value & 0xff) - 1) << 5;
-		if (copy_to_user(argp, &val8, sizeof(val8)))
-				ret = -EFAULT;
-		break;
-	case SONYPI_IOCSBRT:
-		if (sony_backlight_device == NULL) {
-			ret = -EIO;
-			break;
-		}
-		if (copy_from_user(&val8, argp, sizeof(val8))) {
-			ret = -EFAULT;
-			break;
-		}
-		if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
-				(val8 >> 5) + 1, NULL)) {
-			ret = -EIO;
-			break;
-		}
-		/* sync the backlight device status */
-		sony_backlight_device->props.brightness =
-		    sony_backlight_get_brightness(sony_backlight_device);
-		break;
-	case SONYPI_IOCGBAT1CAP:
-		if (ec_read16(SONYPI_BAT1_FULL, &val16)) {
-			ret = -EIO;
-			break;
-		}
-		if (copy_to_user(argp, &val16, sizeof(val16)))
-			ret = -EFAULT;
-		break;
-	case SONYPI_IOCGBAT1REM:
-		if (ec_read16(SONYPI_BAT1_LEFT, &val16)) {
-			ret = -EIO;
-			break;
-		}
-		if (copy_to_user(argp, &val16, sizeof(val16)))
-			ret = -EFAULT;
-		break;
-	case SONYPI_IOCGBAT2CAP:
-		if (ec_read16(SONYPI_BAT2_FULL, &val16)) {
-			ret = -EIO;
-			break;
-		}
-		if (copy_to_user(argp, &val16, sizeof(val16)))
-			ret = -EFAULT;
-		break;
-	case SONYPI_IOCGBAT2REM:
-		if (ec_read16(SONYPI_BAT2_LEFT, &val16)) {
-			ret = -EIO;
-			break;
-		}
-		if (copy_to_user(argp, &val16, sizeof(val16)))
-			ret = -EFAULT;
-		break;
-	case SONYPI_IOCGBATFLAGS:
-		if (ec_read(SONYPI_BAT_FLAGS, &val8)) {
-			ret = -EIO;
-			break;
-		}
-		val8 &= 0x07;
-		if (copy_to_user(argp, &val8, sizeof(val8)))
-			ret = -EFAULT;
-		break;
-	case SONYPI_IOCGBLUE:
-		val8 = spic_dev.bluetooth_power;
-		if (copy_to_user(argp, &val8, sizeof(val8)))
-			ret = -EFAULT;
-		break;
-	case SONYPI_IOCSBLUE:
-		if (copy_from_user(&val8, argp, sizeof(val8))) {
-			ret = -EFAULT;
-			break;
-		}
-		__sony_pic_set_bluetoothpower(val8);
-		break;
-	/* FAN Controls */
-	case SONYPI_IOCGFAN:
-		if (sony_pic_get_fanspeed(&val8)) {
-			ret = -EIO;
-			break;
-		}
-		if (copy_to_user(argp, &val8, sizeof(val8)))
-			ret = -EFAULT;
-		break;
-	case SONYPI_IOCSFAN:
-		if (copy_from_user(&val8, argp, sizeof(val8))) {
-			ret = -EFAULT;
-			break;
-		}
-		if (sony_pic_set_fanspeed(val8))
-			ret = -EIO;
-		break;
-	/* GET Temperature (useful under APM) */
-	case SONYPI_IOCGTEMP:
-		if (ec_read(SONYPI_TEMP_STATUS, &val8)) {
-			ret = -EIO;
-			break;
-		}
-		if (copy_to_user(argp, &val8, sizeof(val8)))
-			ret = -EFAULT;
-		break;
-	default:
-		ret = -EINVAL;
-	}
-	mutex_unlock(&spic_dev.lock);
-	return ret;
-}
-
-static const struct file_operations sonypi_misc_fops = {
-	.owner		= THIS_MODULE,
-	.read		= sonypi_misc_read,
-	.poll		= sonypi_misc_poll,
-	.open		= sonypi_misc_open,
-	.release	= sonypi_misc_release,
-	.fasync		= sonypi_misc_fasync,
-	.ioctl		= sonypi_misc_ioctl,
-};
-
-static struct miscdevice sonypi_misc_device = {
-	.minor		= MISC_DYNAMIC_MINOR,
-	.name		= "sonypi",
-	.fops		= &sonypi_misc_fops,
-};
-
-static void sonypi_compat_report_event(u8 event)
-{
-	kfifo_put(sonypi_compat.fifo, (unsigned char *)&event, sizeof(event));
-	kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN);
-	wake_up_interruptible(&sonypi_compat.fifo_proc_list);
-}
-
-static int sonypi_compat_init(void)
-{
-	int error;
-
-	spin_lock_init(&sonypi_compat.fifo_lock);
-	sonypi_compat.fifo = kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
-					 &sonypi_compat.fifo_lock);
-	if (IS_ERR(sonypi_compat.fifo)) {
-		printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
-		return PTR_ERR(sonypi_compat.fifo);
-	}
-
-	init_waitqueue_head(&sonypi_compat.fifo_proc_list);
-
-	if (minor != -1)
-		sonypi_misc_device.minor = minor;
-	error = misc_register(&sonypi_misc_device);
-	if (error) {
-		printk(KERN_ERR DRV_PFX "misc_register failed\n");
-		goto err_free_kfifo;
-	}
-	if (minor == -1)
-		printk(KERN_INFO DRV_PFX "device allocated minor is %d\n",
-		       sonypi_misc_device.minor);
-
-	return 0;
-
-err_free_kfifo:
-	kfifo_free(sonypi_compat.fifo);
-	return error;
-}
-
-static void sonypi_compat_exit(void)
-{
-	misc_deregister(&sonypi_misc_device);
-	kfifo_free(sonypi_compat.fifo);
-}
-#else
-static int sonypi_compat_init(void) { return 0; }
-static void sonypi_compat_exit(void) { }
-static void sonypi_compat_report_event(u8 event) { }
-#endif /* CONFIG_SONYPI_COMPAT */
-
-/*
- * ACPI callbacks
- */
-static acpi_status
-sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
-{
-	u32 i;
-	struct sony_pic_dev *dev = (struct sony_pic_dev *)context;
-
-	switch (resource->type) {
-	case ACPI_RESOURCE_TYPE_START_DEPENDENT:
-		{
-			/* start IO enumeration */
-			struct sony_pic_ioport *ioport = kzalloc(sizeof(*ioport), GFP_KERNEL);
-			if (!ioport)
-				return AE_ERROR;
-
-			list_add(&ioport->list, &dev->ioports);
-			return AE_OK;
-		}
-
-	case ACPI_RESOURCE_TYPE_END_DEPENDENT:
-		/* end IO enumeration */
-		return AE_OK;
-
-	case ACPI_RESOURCE_TYPE_IRQ:
-		{
-			struct acpi_resource_irq *p = &resource->data.irq;
-			struct sony_pic_irq *interrupt = NULL;
-			if (!p || !p->interrupt_count) {
-				/*
-				 * IRQ descriptors may have no IRQ# bits set,
-				 * particularly those those w/ _STA disabled
-				 */
-				dprintk("Blank IRQ resource\n");
-				return AE_OK;
-			}
-			for (i = 0; i < p->interrupt_count; i++) {
-				if (!p->interrupts[i]) {
-					printk(KERN_WARNING DRV_PFX
-							"Invalid IRQ %d\n",
-							p->interrupts[i]);
-					continue;
-				}
-				interrupt = kzalloc(sizeof(*interrupt),
-						GFP_KERNEL);
-				if (!interrupt)
-					return AE_ERROR;
-
-				list_add(&interrupt->list, &dev->interrupts);
-				interrupt->irq.triggering = p->triggering;
-				interrupt->irq.polarity = p->polarity;
-				interrupt->irq.sharable = p->sharable;
-				interrupt->irq.interrupt_count = 1;
-				interrupt->irq.interrupts[0] = p->interrupts[i];
-			}
-			return AE_OK;
-		}
-	case ACPI_RESOURCE_TYPE_IO:
-		{
-			struct acpi_resource_io *io = &resource->data.io;
-			struct sony_pic_ioport *ioport =
-				list_first_entry(&dev->ioports, struct sony_pic_ioport, list);
-			if (!io) {
-				dprintk("Blank IO resource\n");
-				return AE_OK;
-			}
-
-			if (!ioport->io1.minimum) {
-				memcpy(&ioport->io1, io, sizeof(*io));
-				dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum,
-						ioport->io1.address_length);
-			}
-			else if (!ioport->io2.minimum) {
-				memcpy(&ioport->io2, io, sizeof(*io));
-				dprintk("IO2 at 0x%.4x (0x%.2x)\n", ioport->io2.minimum,
-						ioport->io2.address_length);
-			}
-			else {
-				printk(KERN_ERR DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n");
-				return AE_ERROR;
-			}
-			return AE_OK;
-		}
-	default:
-		dprintk("Resource %d isn't an IRQ nor an IO port\n",
-				resource->type);
-
-	case ACPI_RESOURCE_TYPE_END_TAG:
-		return AE_OK;
-	}
-	return AE_CTRL_TERMINATE;
-}
-
-static int sony_pic_possible_resources(struct acpi_device *device)
-{
-	int result = 0;
-	acpi_status status = AE_OK;
-
-	if (!device)
-		return -EINVAL;
-
-	/* get device status */
-	/* see acpi_pci_link_get_current acpi_pci_link_get_possible */
-	dprintk("Evaluating _STA\n");
-	result = acpi_bus_get_status(device);
-	if (result) {
-		printk(KERN_WARNING DRV_PFX "Unable to read status\n");
-		goto end;
-	}
-
-	if (!device->status.enabled)
-		dprintk("Device disabled\n");
-	else
-		dprintk("Device enabled\n");
-
-	/*
-	 * Query and parse 'method'
-	 */
-	dprintk("Evaluating %s\n", METHOD_NAME__PRS);
-	status = acpi_walk_resources(device->handle, METHOD_NAME__PRS,
-			sony_pic_read_possible_resource, &spic_dev);
-	if (ACPI_FAILURE(status)) {
-		printk(KERN_WARNING DRV_PFX
-				"Failure evaluating %s\n",
-				METHOD_NAME__PRS);
-		result = -ENODEV;
-	}
-end:
-	return result;
-}
-
-/*
- *  Disable the spic device by calling its _DIS method
- */
-static int sony_pic_disable(struct acpi_device *device)
-{
-	acpi_status ret = acpi_evaluate_object(device->handle, "_DIS", NULL,
-					       NULL);
-
-	if (ACPI_FAILURE(ret) && ret != AE_NOT_FOUND)
-		return -ENXIO;
-
-	dprintk("Device disabled\n");
-	return 0;
-}
-
-
-/*
- *  Based on drivers/acpi/pci_link.c:acpi_pci_link_set
- *
- *  Call _SRS to set current resources
- */
-static int sony_pic_enable(struct acpi_device *device,
-		struct sony_pic_ioport *ioport, struct sony_pic_irq *irq)
-{
-	acpi_status status;
-	int result = 0;
-	/* Type 1 resource layout is:
-	 *    IO
-	 *    IO
-	 *    IRQNoFlags
-	 *    End
-	 *
-	 * Type 2 and 3 resource layout is:
-	 *    IO
-	 *    IRQNoFlags
-	 *    End
-	 */
-	struct {
-		struct acpi_resource res1;
-		struct acpi_resource res2;
-		struct acpi_resource res3;
-		struct acpi_resource res4;
-	} *resource;
-	struct acpi_buffer buffer = { 0, NULL };
-
-	if (!ioport || !irq)
-		return -EINVAL;
-
-	/* init acpi_buffer */
-	resource = kzalloc(sizeof(*resource) + 1, GFP_KERNEL);
-	if (!resource)
-		return -ENOMEM;
-
-	buffer.length = sizeof(*resource) + 1;
-	buffer.pointer = resource;
-
-	/* setup Type 1 resources */
-	if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) {
-
-		/* setup io resources */
-		resource->res1.type = ACPI_RESOURCE_TYPE_IO;
-		resource->res1.length = sizeof(struct acpi_resource);
-		memcpy(&resource->res1.data.io, &ioport->io1,
-				sizeof(struct acpi_resource_io));
-
-		resource->res2.type = ACPI_RESOURCE_TYPE_IO;
-		resource->res2.length = sizeof(struct acpi_resource);
-		memcpy(&resource->res2.data.io, &ioport->io2,
-				sizeof(struct acpi_resource_io));
-
-		/* setup irq resource */
-		resource->res3.type = ACPI_RESOURCE_TYPE_IRQ;
-		resource->res3.length = sizeof(struct acpi_resource);
-		memcpy(&resource->res3.data.irq, &irq->irq,
-				sizeof(struct acpi_resource_irq));
-		/* we requested a shared irq */
-		resource->res3.data.irq.sharable = ACPI_SHARED;
-
-		resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG;
-
-	}
-	/* setup Type 2/3 resources */
-	else {
-		/* setup io resource */
-		resource->res1.type = ACPI_RESOURCE_TYPE_IO;
-		resource->res1.length = sizeof(struct acpi_resource);
-		memcpy(&resource->res1.data.io, &ioport->io1,
-				sizeof(struct acpi_resource_io));
-
-		/* setup irq resource */
-		resource->res2.type = ACPI_RESOURCE_TYPE_IRQ;
-		resource->res2.length = sizeof(struct acpi_resource);
-		memcpy(&resource->res2.data.irq, &irq->irq,
-				sizeof(struct acpi_resource_irq));
-		/* we requested a shared irq */
-		resource->res2.data.irq.sharable = ACPI_SHARED;
-
-		resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG;
-	}
-
-	/* Attempt to set the resource */
-	dprintk("Evaluating _SRS\n");
-	status = acpi_set_current_resources(device->handle, &buffer);
-
-	/* check for total failure */
-	if (ACPI_FAILURE(status)) {
-		printk(KERN_ERR DRV_PFX "Error evaluating _SRS\n");
-		result = -ENODEV;
-		goto end;
-	}
-
-	/* Necessary device initializations calls (from sonypi) */
-	sony_pic_call1(0x82);
-	sony_pic_call2(0x81, 0xff);
-	sony_pic_call1(compat ? 0x92 : 0x82);
-
-end:
-	kfree(resource);
-	return result;
-}
-
-/*****************
- *
- * ISR: some event is available
- *
- *****************/
-static irqreturn_t sony_pic_irq(int irq, void *dev_id)
-{
-	int i, j;
-	u8 ev = 0;
-	u8 data_mask = 0;
-	u8 device_event = 0;
-
-	struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id;
-
-	ev = inb_p(dev->cur_ioport->io1.minimum);
-	if (dev->cur_ioport->io2.minimum)
-		data_mask = inb_p(dev->cur_ioport->io2.minimum);
-	else
-		data_mask = inb_p(dev->cur_ioport->io1.minimum +
-				dev->control->evport_offset);
-
-	dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
-			ev, data_mask, dev->cur_ioport->io1.minimum,
-			dev->control->evport_offset);
-
-	if (ev == 0x00 || ev == 0xff)
-		return IRQ_HANDLED;
-
-	for (i = 0; dev->control->event_types[i].mask; i++) {
-
-		if ((data_mask & dev->control->event_types[i].data) !=
-		    dev->control->event_types[i].data)
-			continue;
-
-		if (!(mask & dev->control->event_types[i].mask))
-			continue;
-
-		for (j = 0; dev->control->event_types[i].events[j].event; j++) {
-			if (ev == dev->control->event_types[i].events[j].data) {
-				device_event =
-					dev->control->
-						event_types[i].events[j].event;
-				goto found;
-			}
-		}
-	}
-	/* Still not able to decode the event try to pass
-	 * it over to the minidriver
-	 */
-	if (dev->control->handle_irq &&
-			dev->control->handle_irq(data_mask, ev) == 0)
-		return IRQ_HANDLED;
-
-	dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
-			ev, data_mask, dev->cur_ioport->io1.minimum,
-			dev->control->evport_offset);
-	return IRQ_HANDLED;
-
-found:
-	sony_laptop_report_input_event(device_event);
-	acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event);
-	sonypi_compat_report_event(device_event);
-
-	return IRQ_HANDLED;
-}
-
-/*****************
- *
- *  ACPI driver
- *
- *****************/
-static int sony_pic_remove(struct acpi_device *device, int type)
-{
-	struct sony_pic_ioport *io, *tmp_io;
-	struct sony_pic_irq *irq, *tmp_irq;
-
-	if (sony_pic_disable(device)) {
-		printk(KERN_ERR DRV_PFX "Couldn't disable device.\n");
-		return -ENXIO;
-	}
-
-	free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
-	release_region(spic_dev.cur_ioport->io1.minimum,
-			spic_dev.cur_ioport->io1.address_length);
-	if (spic_dev.cur_ioport->io2.minimum)
-		release_region(spic_dev.cur_ioport->io2.minimum,
-				spic_dev.cur_ioport->io2.address_length);
-
-	sonypi_compat_exit();
-
-	sony_laptop_remove_input();
-
-	/* pf attrs */
-	sysfs_remove_group(&sony_pf_device->dev.kobj, &spic_attribute_group);
-	sony_pf_remove();
-
-	list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) {
-		list_del(&io->list);
-		kfree(io);
-	}
-	list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) {
-		list_del(&irq->list);
-		kfree(irq);
-	}
-	spic_dev.cur_ioport = NULL;
-	spic_dev.cur_irq = NULL;
-
-	dprintk(SONY_PIC_DRIVER_NAME " removed.\n");
-	return 0;
-}
-
-static int sony_pic_add(struct acpi_device *device)
-{
-	int result;
-	struct sony_pic_ioport *io, *tmp_io;
-	struct sony_pic_irq *irq, *tmp_irq;
-
-	printk(KERN_INFO DRV_PFX "%s v%s.\n",
-		SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
-
-	spic_dev.acpi_dev = device;
-	strcpy(acpi_device_class(device), "sony/hotkey");
-	sony_pic_detect_device_type(&spic_dev);
-	mutex_init(&spic_dev.lock);
-
-	/* read _PRS resources */
-	result = sony_pic_possible_resources(device);
-	if (result) {
-		printk(KERN_ERR DRV_PFX
-				"Unabe to read possible resources.\n");
-		goto err_free_resources;
-	}
-
-	/* setup input devices and helper fifo */
-	result = sony_laptop_setup_input(device);
-	if (result) {
-		printk(KERN_ERR DRV_PFX
-				"Unabe to create input devices.\n");
-		goto err_free_resources;
-	}
-
-	if (sonypi_compat_init())
-		goto err_remove_input;
-
-	/* request io port */
-	list_for_each_entry_reverse(io, &spic_dev.ioports, list) {
-		if (request_region(io->io1.minimum, io->io1.address_length,
-					"Sony Programable I/O Device")) {
-			dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n",
-					io->io1.minimum, io->io1.maximum,
-					io->io1.address_length);
-			/* Type 1 have 2 ioports */
-			if (io->io2.minimum) {
-				if (request_region(io->io2.minimum,
-						io->io2.address_length,
-						"Sony Programable I/O Device")) {
-					dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n",
-							io->io2.minimum, io->io2.maximum,
-							io->io2.address_length);
-					spic_dev.cur_ioport = io;
-					break;
-				}
-				else {
-					dprintk("Unable to get I/O port2: "
-							"0x%.4x (0x%.4x) + 0x%.2x\n",
-							io->io2.minimum, io->io2.maximum,
-							io->io2.address_length);
-					release_region(io->io1.minimum,
-							io->io1.address_length);
-				}
-			}
-			else {
-				spic_dev.cur_ioport = io;
-				break;
-			}
-		}
-	}
-	if (!spic_dev.cur_ioport) {
-		printk(KERN_ERR DRV_PFX "Failed to request_region.\n");
-		result = -ENODEV;
-		goto err_remove_compat;
-	}
-
-	/* request IRQ */
-	list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) {
-		if (!request_irq(irq->irq.interrupts[0], sony_pic_irq,
-					IRQF_SHARED, "sony-laptop", &spic_dev)) {
-			dprintk("IRQ: %d - triggering: %d - "
-					"polarity: %d - shr: %d\n",
-					irq->irq.interrupts[0],
-					irq->irq.triggering,
-					irq->irq.polarity,
-					irq->irq.sharable);
-			spic_dev.cur_irq = irq;
-			break;
-		}
-	}
-	if (!spic_dev.cur_irq) {
-		printk(KERN_ERR DRV_PFX "Failed to request_irq.\n");
-		result = -ENODEV;
-		goto err_release_region;
-	}
-
-	/* set resource status _SRS */
-	result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
-	if (result) {
-		printk(KERN_ERR DRV_PFX "Couldn't enable device.\n");
-		goto err_free_irq;
-	}
-
-	spic_dev.bluetooth_power = -1;
-	/* create device attributes */
-	result = sony_pf_add();
-	if (result)
-		goto err_disable_device;
-
-	result = sysfs_create_group(&sony_pf_device->dev.kobj, &spic_attribute_group);
-	if (result)
-		goto err_remove_pf;
-
-	return 0;
-
-err_remove_pf:
-	sony_pf_remove();
-
-err_disable_device:
-	sony_pic_disable(device);
-
-err_free_irq:
-	free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
-
-err_release_region:
-	release_region(spic_dev.cur_ioport->io1.minimum,
-			spic_dev.cur_ioport->io1.address_length);
-	if (spic_dev.cur_ioport->io2.minimum)
-		release_region(spic_dev.cur_ioport->io2.minimum,
-				spic_dev.cur_ioport->io2.address_length);
-
-err_remove_compat:
-	sonypi_compat_exit();
-
-err_remove_input:
-	sony_laptop_remove_input();
-
-err_free_resources:
-	list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) {
-		list_del(&io->list);
-		kfree(io);
-	}
-	list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) {
-		list_del(&irq->list);
-		kfree(irq);
-	}
-	spic_dev.cur_ioport = NULL;
-	spic_dev.cur_irq = NULL;
-
-	return result;
-}
-
-static int sony_pic_suspend(struct acpi_device *device, pm_message_t state)
-{
-	if (sony_pic_disable(device))
-		return -ENXIO;
-	return 0;
-}
-
-static int sony_pic_resume(struct acpi_device *device)
-{
-	sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
-	return 0;
-}
-
-static const struct acpi_device_id sony_pic_device_ids[] = {
-	{SONY_PIC_HID, 0},
-	{"", 0},
-};
-
-static struct acpi_driver sony_pic_driver = {
-	.name = SONY_PIC_DRIVER_NAME,
-	.class = SONY_PIC_CLASS,
-	.ids = sony_pic_device_ids,
-	.owner = THIS_MODULE,
-	.ops = {
-		.add = sony_pic_add,
-		.remove = sony_pic_remove,
-		.suspend = sony_pic_suspend,
-		.resume = sony_pic_resume,
-		},
-};
-
-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
-	{
-		.ident = "Sony Vaio",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"),
-		},
-	},
-	{
-		.ident = "Sony Vaio",
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"),
-		},
-	},
-	{ }
-};
-
-static int __init sony_laptop_init(void)
-{
-	int result;
-
-	if (!no_spic && dmi_check_system(sonypi_dmi_table)) {
-		result = acpi_bus_register_driver(&sony_pic_driver);
-		if (result) {
-			printk(KERN_ERR DRV_PFX
-					"Unable to register SPIC driver.");
-			goto out;
-		}
-	}
-
-	result = acpi_bus_register_driver(&sony_nc_driver);
-	if (result) {
-		printk(KERN_ERR DRV_PFX "Unable to register SNC driver.");
-		goto out_unregister_pic;
-	}
-
-	return 0;
-
-out_unregister_pic:
-	if (!no_spic)
-		acpi_bus_unregister_driver(&sony_pic_driver);
-out:
-	return result;
-}
-
-static void __exit sony_laptop_exit(void)
-{
-	acpi_bus_unregister_driver(&sony_nc_driver);
-	if (!no_spic)
-		acpi_bus_unregister_driver(&sony_pic_driver);
-}
-
-module_init(sony_laptop_init);
-module_exit(sony_laptop_exit);
diff --git a/drivers/misc/tc1100-wmi.c b/drivers/misc/tc1100-wmi.c
deleted file mode 100644
index f25e4c9..0000000
--- a/drivers/misc/tc1100-wmi.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- *  HP Compaq TC1100 Tablet WMI Extras Driver
- *
- *  Copyright (C) 2007 Carlos Corbacho <carlos@strangeworlds.co.uk>
- *  Copyright (C) 2004 Jamey Hicks <jamey.hicks@hp.com>
- *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
- *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or (at
- *  your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <acpi/acpi.h>
-#include <acpi/actypes.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include <linux/platform_device.h>
-
-#define GUID "C364AC71-36DB-495A-8494-B439D472A505"
-
-#define TC1100_INSTANCE_WIRELESS		1
-#define TC1100_INSTANCE_JOGDIAL		2
-
-#define TC1100_LOGPREFIX "tc1100-wmi: "
-#define TC1100_INFO KERN_INFO TC1100_LOGPREFIX
-
-MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
-MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505");
-
-static int tc1100_probe(struct platform_device *device);
-static int tc1100_remove(struct platform_device *device);
-static int tc1100_suspend(struct platform_device *device, pm_message_t state);
-static int tc1100_resume(struct platform_device *device);
-
-static struct platform_driver tc1100_driver = {
-	.driver = {
-		.name = "tc1100-wmi",
-		.owner = THIS_MODULE,
-	},
-	.probe = tc1100_probe,
-	.remove = tc1100_remove,
-	.suspend = tc1100_suspend,
-	.resume = tc1100_resume,
-};
-
-static struct platform_device *tc1100_device;
-
-struct tc1100_data {
-	u32 wireless;
-	u32 jogdial;
-};
-
-static struct tc1100_data suspend_data;
-
-/* --------------------------------------------------------------------------
-				Device Management
-   -------------------------------------------------------------------------- */
-
-static int get_state(u32 *out, u8 instance)
-{
-	u32 tmp;
-	acpi_status status;
-	struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *obj;
-
-	if (!out)
-		return -EINVAL;
-
-	if (instance > 2)
-		return -ENODEV;
-
-	status = wmi_query_block(GUID, instance, &result);
-	if (ACPI_FAILURE(status))
-		return -ENODEV;
-
-	obj = (union acpi_object *) result.pointer;
-	if (obj && obj->type == ACPI_TYPE_BUFFER &&
-		obj->buffer.length == sizeof(u32)) {
-		tmp = *((u32 *) obj->buffer.pointer);
-	} else {
-		tmp = 0;
-	}
-
-	if (result.length > 0 && result.pointer)
-		kfree(result.pointer);
-
-	switch (instance) {
-	case TC1100_INSTANCE_WIRELESS:
-		*out = (tmp == 3) ? 1 : 0;
-		return 0;
-	case TC1100_INSTANCE_JOGDIAL:
-		*out = (tmp == 1) ? 1 : 0;
-		return 0;
-	default:
-		return -ENODEV;
-	}
-}
-
-static int set_state(u32 *in, u8 instance)
-{
-	u32 value;
-	acpi_status status;
-	struct acpi_buffer input;
-
-	if (!in)
-		return -EINVAL;
-
-	if (instance > 2)
-		return -ENODEV;
-
-	switch (instance) {
-	case TC1100_INSTANCE_WIRELESS:
-		value = (*in) ? 1 : 2;
-		break;
-	case TC1100_INSTANCE_JOGDIAL:
-		value = (*in) ? 0 : 1;
-		break;
-	default:
-		return -ENODEV;
-	}
-
-	input.length = sizeof(u32);
-	input.pointer = &value;
-
-	status = wmi_set_block(GUID, instance, &input);
-	if (ACPI_FAILURE(status))
-		return -ENODEV;
-
-	return 0;
-}
-
-/* --------------------------------------------------------------------------
-				FS Interface (/sys)
-   -------------------------------------------------------------------------- */
-
-/*
- * Read/ write bool sysfs macro
- */
-#define show_set_bool(value, instance) \
-static ssize_t \
-show_bool_##value(struct device *dev, struct device_attribute *attr, \
-	char *buf) \
-{ \
-	u32 result; \
-	acpi_status status = get_state(&result, instance); \
-	if (ACPI_SUCCESS(status)) \
-		return sprintf(buf, "%d\n", result); \
-	return sprintf(buf, "Read error\n"); \
-} \
-\
-static ssize_t \
-set_bool_##value(struct device *dev, struct device_attribute *attr, \
-	const char *buf, size_t count) \
-{ \
-	u32 tmp = simple_strtoul(buf, NULL, 10); \
-	acpi_status status = set_state(&tmp, instance); \
-		if (ACPI_FAILURE(status)) \
-			return -EINVAL; \
-	return count; \
-} \
-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
-	show_bool_##value, set_bool_##value);
-
-show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
-show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL);
-
-static void remove_fs(void)
-{
-	device_remove_file(&tc1100_device->dev, &dev_attr_wireless);
-	device_remove_file(&tc1100_device->dev, &dev_attr_jogdial);
-}
-
-static int add_fs(void)
-{
-	int ret;
-
-	ret = device_create_file(&tc1100_device->dev, &dev_attr_wireless);
-	if (ret)
-		goto add_sysfs_error;
-
-	ret = device_create_file(&tc1100_device->dev, &dev_attr_jogdial);
-	if (ret)
-		goto add_sysfs_error;
-
-	return ret;
-
-add_sysfs_error:
-	remove_fs();
-	return ret;
-}
-
-/* --------------------------------------------------------------------------
-				Driver Model
-   -------------------------------------------------------------------------- */
-
-static int tc1100_probe(struct platform_device *device)
-{
-	int result = 0;
-
-	result = add_fs();
-	return result;
-}
-
-
-static int tc1100_remove(struct platform_device *device)
-{
-	remove_fs();
-	return 0;
-}
-
-static int tc1100_suspend(struct platform_device *dev, pm_message_t state)
-{
-	int ret;
-
-	ret = get_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
-	if (ret)
-		return ret;
-
-	ret = get_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
-	if (ret)
-		return ret;
-
-	return ret;
-}
-
-static int tc1100_resume(struct platform_device *dev)
-{
-	int ret;
-
-	ret = set_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
-	if (ret)
-		return ret;
-
-	ret = set_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
-	if (ret)
-		return ret;
-
-	return ret;
-}
-
-static int __init tc1100_init(void)
-{
-	int result = 0;
-
-	if (!wmi_has_guid(GUID))
-		return -ENODEV;
-
-	result = platform_driver_register(&tc1100_driver);
-	if (result)
-		return result;
-
-	tc1100_device = platform_device_alloc("tc1100-wmi", -1);
-	platform_device_add(tc1100_device);
-
-	printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras loaded\n");
-
-	return result;
-}
-
-static void __exit tc1100_exit(void)
-{
-	platform_device_del(tc1100_device);
-	platform_driver_unregister(&tc1100_driver);
-
-	printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras unloaded\n");
-}
-
-module_init(tc1100_init);
-module_exit(tc1100_exit);
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
deleted file mode 100644
index 899766e..0000000
--- a/drivers/misc/thinkpad_acpi.c
+++ /dev/null
@@ -1,6949 +0,0 @@
-/*
- *  thinkpad_acpi.c - ThinkPad ACPI Extras
- *
- *
- *  Copyright (C) 2004-2005 Borislav Deianov <borislav@users.sf.net>
- *  Copyright (C) 2006-2008 Henrique de Moraes Holschuh <hmh@hmh.eng.br>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- *  02110-1301, USA.
- */
-
-#define TPACPI_VERSION "0.21"
-#define TPACPI_SYSFS_VERSION 0x020200
-
-/*
- *  Changelog:
- *  2007-10-20		changelog trimmed down
- *
- *  2007-03-27  0.14	renamed to thinkpad_acpi and moved to
- *  			drivers/misc.
- *
- *  2006-11-22	0.13	new maintainer
- *  			changelog now lives in git commit history, and will
- *  			not be updated further in-file.
- *
- *  2005-03-17	0.11	support for 600e, 770x
- *			    thanks to Jamie Lentin <lentinj@dial.pipex.com>
- *
- *  2005-01-16	0.9	use MODULE_VERSION
- *			    thanks to Henrik Brix Andersen <brix@gentoo.org>
- *			fix parameter passing on module loading
- *			    thanks to Rusty Russell <rusty@rustcorp.com.au>
- *			    thanks to Jim Radford <radford@blackbean.org>
- *  2004-11-08	0.8	fix init error case, don't return from a macro
- *			    thanks to Chris Wright <chrisw@osdl.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-#include <linux/delay.h>
-
-#include <linux/nvram.h>
-#include <linux/proc_fs.h>
-#include <linux/sysfs.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/platform_device.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/input.h>
-#include <linux/leds.h>
-#include <linux/rfkill.h>
-#include <asm/uaccess.h>
-
-#include <linux/dmi.h>
-#include <linux/jiffies.h>
-#include <linux/workqueue.h>
-
-#include <acpi/acpi_drivers.h>
-#include <acpi/acnamesp.h>
-
-#include <linux/pci_ids.h>
-
-
-/* ThinkPad CMOS commands */
-#define TP_CMOS_VOLUME_DOWN	0
-#define TP_CMOS_VOLUME_UP	1
-#define TP_CMOS_VOLUME_MUTE	2
-#define TP_CMOS_BRIGHTNESS_UP	4
-#define TP_CMOS_BRIGHTNESS_DOWN	5
-#define TP_CMOS_THINKLIGHT_ON	12
-#define TP_CMOS_THINKLIGHT_OFF	13
-
-/* NVRAM Addresses */
-enum tp_nvram_addr {
-	TP_NVRAM_ADDR_HK2		= 0x57,
-	TP_NVRAM_ADDR_THINKLIGHT	= 0x58,
-	TP_NVRAM_ADDR_VIDEO		= 0x59,
-	TP_NVRAM_ADDR_BRIGHTNESS	= 0x5e,
-	TP_NVRAM_ADDR_MIXER		= 0x60,
-};
-
-/* NVRAM bit masks */
-enum {
-	TP_NVRAM_MASK_HKT_THINKPAD	= 0x08,
-	TP_NVRAM_MASK_HKT_ZOOM		= 0x20,
-	TP_NVRAM_MASK_HKT_DISPLAY	= 0x40,
-	TP_NVRAM_MASK_HKT_HIBERNATE	= 0x80,
-	TP_NVRAM_MASK_THINKLIGHT	= 0x10,
-	TP_NVRAM_MASK_HKT_DISPEXPND	= 0x30,
-	TP_NVRAM_MASK_HKT_BRIGHTNESS	= 0x20,
-	TP_NVRAM_MASK_LEVEL_BRIGHTNESS	= 0x0f,
-	TP_NVRAM_POS_LEVEL_BRIGHTNESS	= 0,
-	TP_NVRAM_MASK_MUTE		= 0x40,
-	TP_NVRAM_MASK_HKT_VOLUME	= 0x80,
-	TP_NVRAM_MASK_LEVEL_VOLUME	= 0x0f,
-	TP_NVRAM_POS_LEVEL_VOLUME	= 0,
-};
-
-/* ACPI HIDs */
-#define TPACPI_ACPI_HKEY_HID		"IBM0068"
-
-/* Input IDs */
-#define TPACPI_HKEY_INPUT_PRODUCT	0x5054 /* "TP" */
-#define TPACPI_HKEY_INPUT_VERSION	0x4101
-
-
-/****************************************************************************
- * Main driver
- */
-
-#define TPACPI_NAME "thinkpad"
-#define TPACPI_DESC "ThinkPad ACPI Extras"
-#define TPACPI_FILE TPACPI_NAME "_acpi"
-#define TPACPI_URL "http://ibm-acpi.sf.net/"
-#define TPACPI_MAIL "ibm-acpi-devel@lists.sourceforge.net"
-
-#define TPACPI_PROC_DIR "ibm"
-#define TPACPI_ACPI_EVENT_PREFIX "ibm"
-#define TPACPI_DRVR_NAME TPACPI_FILE
-#define TPACPI_DRVR_SHORTNAME "tpacpi"
-#define TPACPI_HWMON_DRVR_NAME TPACPI_NAME "_hwmon"
-
-#define TPACPI_NVRAM_KTHREAD_NAME "ktpacpi_nvramd"
-#define TPACPI_WORKQUEUE_NAME "ktpacpid"
-
-#define TPACPI_MAX_ACPI_ARGS 3
-
-/* rfkill switches */
-enum {
-	TPACPI_RFK_BLUETOOTH_SW_ID = 0,
-	TPACPI_RFK_WWAN_SW_ID,
-};
-
-/* Debugging */
-#define TPACPI_LOG TPACPI_FILE ": "
-#define TPACPI_ERR	   KERN_ERR    TPACPI_LOG
-#define TPACPI_NOTICE KERN_NOTICE TPACPI_LOG
-#define TPACPI_INFO   KERN_INFO   TPACPI_LOG
-#define TPACPI_DEBUG  KERN_DEBUG  TPACPI_LOG
-
-#define TPACPI_DBG_ALL		0xffff
-#define TPACPI_DBG_INIT		0x0001
-#define TPACPI_DBG_EXIT		0x0002
-#define dbg_printk(a_dbg_level, format, arg...) \
-	do { if (dbg_level & a_dbg_level) \
-		printk(TPACPI_DEBUG "%s: " format, __func__ , ## arg); \
-	} while (0)
-#ifdef CONFIG_THINKPAD_ACPI_DEBUG
-#define vdbg_printk(a_dbg_level, format, arg...) \
-	dbg_printk(a_dbg_level, format, ## arg)
-static const char *str_supported(int is_supported);
-#else
-#define vdbg_printk(a_dbg_level, format, arg...)
-#endif
-
-#define onoff(status, bit) ((status) & (1 << (bit)) ? "on" : "off")
-#define enabled(status, bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
-#define strlencmp(a, b) (strncmp((a), (b), strlen(b)))
-
-
-/****************************************************************************
- * Driver-wide structs and misc. variables
- */
-
-struct ibm_struct;
-
-struct tp_acpi_drv_struct {
-	const struct acpi_device_id *hid;
-	struct acpi_driver *driver;
-
-	void (*notify) (struct ibm_struct *, u32);
-	acpi_handle *handle;
-	u32 type;
-	struct acpi_device *device;
-};
-
-struct ibm_struct {
-	char *name;
-
-	int (*read) (char *);
-	int (*write) (char *);
-	void (*exit) (void);
-	void (*resume) (void);
-	void (*suspend) (pm_message_t state);
-
-	struct list_head all_drivers;
-
-	struct tp_acpi_drv_struct *acpi;
-
-	struct {
-		u8 acpi_driver_registered:1;
-		u8 acpi_notify_installed:1;
-		u8 proc_created:1;
-		u8 init_called:1;
-		u8 experimental:1;
-	} flags;
-};
-
-struct ibm_init_struct {
-	char param[32];
-
-	int (*init) (struct ibm_init_struct *);
-	struct ibm_struct *data;
-};
-
-static struct {
-#ifdef CONFIG_THINKPAD_ACPI_BAY
-	u32 bay_status:1;
-	u32 bay_eject:1;
-	u32 bay_status2:1;
-	u32 bay_eject2:1;
-#endif
-	u32 bluetooth:1;
-	u32 hotkey:1;
-	u32 hotkey_mask:1;
-	u32 hotkey_wlsw:1;
-	u32 hotkey_tablet:1;
-	u32 light:1;
-	u32 light_status:1;
-	u32 bright_16levels:1;
-	u32 bright_acpimode:1;
-	u32 wan:1;
-	u32 fan_ctrl_status_undef:1;
-	u32 input_device_registered:1;
-	u32 platform_drv_registered:1;
-	u32 platform_drv_attrs_registered:1;
-	u32 sensors_pdrv_registered:1;
-	u32 sensors_pdrv_attrs_registered:1;
-	u32 sensors_pdev_attrs_registered:1;
-	u32 hotkey_poll_active:1;
-} tp_features;
-
-static struct {
-	u16 hotkey_mask_ff:1;
-	u16 bright_cmos_ec_unsync:1;
-} tp_warned;
-
-struct thinkpad_id_data {
-	unsigned int vendor;	/* ThinkPad vendor:
-				 * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */
-
-	char *bios_version_str;	/* Something like 1ZET51WW (1.03z) */
-	char *ec_version_str;	/* Something like 1ZHT51WW-1.04a */
-
-	u16 bios_model;		/* Big Endian, TP-1Y = 0x5931, 0 = unknown */
-	u16 ec_model;
-
-	char *model_str;	/* ThinkPad T43 */
-	char *nummodel_str;	/* 9384A9C for a 9384-A9C model */
-};
-static struct thinkpad_id_data thinkpad_id;
-
-static enum {
-	TPACPI_LIFE_INIT = 0,
-	TPACPI_LIFE_RUNNING,
-	TPACPI_LIFE_EXITING,
-} tpacpi_lifecycle;
-
-static int experimental;
-static u32 dbg_level;
-
-static struct workqueue_struct *tpacpi_wq;
-
-/* Special LED class that can defer work */
-struct tpacpi_led_classdev {
-	struct led_classdev led_classdev;
-	struct work_struct work;
-	enum led_brightness new_brightness;
-	unsigned int led;
-};
-
-/****************************************************************************
- ****************************************************************************
- *
- * ACPI Helpers and device model
- *
- ****************************************************************************
- ****************************************************************************/
-
-/*************************************************************************
- * ACPI basic handles
- */
-
-static acpi_handle root_handle;
-
-#define TPACPI_HANDLE(object, parent, paths...)			\
-	static acpi_handle  object##_handle;			\
-	static acpi_handle *object##_parent = &parent##_handle;	\
-	static char        *object##_path;			\
-	static char        *object##_paths[] = { paths }
-
-TPACPI_HANDLE(ec, root, "\\_SB.PCI0.ISA.EC0",	/* 240, 240x */
-	   "\\_SB.PCI.ISA.EC",	/* 570 */
-	   "\\_SB.PCI0.ISA0.EC0",	/* 600e/x, 770e, 770x */
-	   "\\_SB.PCI0.ISA.EC",	/* A21e, A2xm/p, T20-22, X20-21 */
-	   "\\_SB.PCI0.AD4S.EC0",	/* i1400, R30 */
-	   "\\_SB.PCI0.ICH3.EC0",	/* R31 */
-	   "\\_SB.PCI0.LPC.EC",	/* all others */
-	   );
-
-TPACPI_HANDLE(ecrd, ec, "ECRD");	/* 570 */
-TPACPI_HANDLE(ecwr, ec, "ECWR");	/* 570 */
-
-TPACPI_HANDLE(cmos, root, "\\UCMS",	/* R50, R50e, R50p, R51, */
-					/* T4x, X31, X40 */
-	   "\\CMOS",		/* A3x, G4x, R32, T23, T30, X22-24, X30 */
-	   "\\CMS",		/* R40, R40e */
-	   );			/* all others */
-
-TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY",	/* 600e/x, 770e, 770x */
-	   "^HKEY",		/* R30, R31 */
-	   "HKEY",		/* all others */
-	   );			/* 570 */
-
-TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA",	/* 570 */
-	   "\\_SB.PCI0.AGP0.VID0",	/* 600e/x, 770x */
-	   "\\_SB.PCI0.VID0",	/* 770e */
-	   "\\_SB.PCI0.VID",	/* A21e, G4x, R50e, X30, X40 */
-	   "\\_SB.PCI0.AGP.VID",	/* all others */
-	   );				/* R30, R31 */
-
-
-/*************************************************************************
- * ACPI helpers
- */
-
-static int acpi_evalf(acpi_handle handle,
-		      void *res, char *method, char *fmt, ...)
-{
-	char *fmt0 = fmt;
-	struct acpi_object_list params;
-	union acpi_object in_objs[TPACPI_MAX_ACPI_ARGS];
-	struct acpi_buffer result, *resultp;
-	union acpi_object out_obj;
-	acpi_status status;
-	va_list ap;
-	char res_type;
-	int success;
-	int quiet;
-
-	if (!*fmt) {
-		printk(TPACPI_ERR "acpi_evalf() called with empty format\n");
-		return 0;
-	}
-
-	if (*fmt == 'q') {
-		quiet = 1;
-		fmt++;
-	} else
-		quiet = 0;
-
-	res_type = *(fmt++);
-
-	params.count = 0;
-	params.pointer = &in_objs[0];
-
-	va_start(ap, fmt);
-	while (*fmt) {
-		char c = *(fmt++);
-		switch (c) {
-		case 'd':	/* int */
-			in_objs[params.count].integer.value = va_arg(ap, int);
-			in_objs[params.count++].type = ACPI_TYPE_INTEGER;
-			break;
-			/* add more types as needed */
-		default:
-			printk(TPACPI_ERR "acpi_evalf() called "
-			       "with invalid format character '%c'\n", c);
-			return 0;
-		}
-	}
-	va_end(ap);
-
-	if (res_type != 'v') {
-		result.length = sizeof(out_obj);
-		result.pointer = &out_obj;
-		resultp = &result;
-	} else
-		resultp = NULL;
-
-	status = acpi_evaluate_object(handle, method, &params, resultp);
-
-	switch (res_type) {
-	case 'd':		/* int */
-		if (res)
-			*(int *)res = out_obj.integer.value;
-		success = status == AE_OK && out_obj.type == ACPI_TYPE_INTEGER;
-		break;
-	case 'v':		/* void */
-		success = status == AE_OK;
-		break;
-		/* add more types as needed */
-	default:
-		printk(TPACPI_ERR "acpi_evalf() called "
-		       "with invalid format character '%c'\n", res_type);
-		return 0;
-	}
-
-	if (!success && !quiet)
-		printk(TPACPI_ERR "acpi_evalf(%s, %s, ...) failed: %d\n",
-		       method, fmt0, status);
-
-	return success;
-}
-
-static int acpi_ec_read(int i, u8 *p)
-{
-	int v;
-
-	if (ecrd_handle) {
-		if (!acpi_evalf(ecrd_handle, &v, NULL, "dd", i))
-			return 0;
-		*p = v;
-	} else {
-		if (ec_read(i, p) < 0)
-			return 0;
-	}
-
-	return 1;
-}
-
-static int acpi_ec_write(int i, u8 v)
-{
-	if (ecwr_handle) {
-		if (!acpi_evalf(ecwr_handle, NULL, NULL, "vdd", i, v))
-			return 0;
-	} else {
-		if (ec_write(i, v) < 0)
-			return 0;
-	}
-
-	return 1;
-}
-
-#if defined(CONFIG_THINKPAD_ACPI_DOCK) || defined(CONFIG_THINKPAD_ACPI_BAY)
-static int _sta(acpi_handle handle)
-{
-	int status;
-
-	if (!handle || !acpi_evalf(handle, &status, "_STA", "d"))
-		status = 0;
-
-	return status;
-}
-#endif
-
-static int issue_thinkpad_cmos_command(int cmos_cmd)
-{
-	if (!cmos_handle)
-		return -ENXIO;
-
-	if (!acpi_evalf(cmos_handle, NULL, NULL, "vd", cmos_cmd))
-		return -EIO;
-
-	return 0;
-}
-
-/*************************************************************************
- * ACPI device model
- */
-
-#define TPACPI_ACPIHANDLE_INIT(object) \
-	drv_acpi_handle_init(#object, &object##_handle, *object##_parent, \
-		object##_paths, ARRAY_SIZE(object##_paths), &object##_path)
-
-static void drv_acpi_handle_init(char *name,
-			   acpi_handle *handle, acpi_handle parent,
-			   char **paths, int num_paths, char **path)
-{
-	int i;
-	acpi_status status;
-
-	vdbg_printk(TPACPI_DBG_INIT, "trying to locate ACPI handle for %s\n",
-		name);
-
-	for (i = 0; i < num_paths; i++) {
-		status = acpi_get_handle(parent, paths[i], handle);
-		if (ACPI_SUCCESS(status)) {
-			*path = paths[i];
-			dbg_printk(TPACPI_DBG_INIT,
-				   "Found ACPI handle %s for %s\n",
-				   *path, name);
-			return;
-		}
-	}
-
-	vdbg_printk(TPACPI_DBG_INIT, "ACPI handle for %s not found\n",
-		    name);
-	*handle = NULL;
-}
-
-static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data)
-{
-	struct ibm_struct *ibm = data;
-
-	if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
-		return;
-
-	if (!ibm || !ibm->acpi || !ibm->acpi->notify)
-		return;
-
-	ibm->acpi->notify(ibm, event);
-}
-
-static int __init setup_acpi_notify(struct ibm_struct *ibm)
-{
-	acpi_status status;
-	int rc;
-
-	BUG_ON(!ibm->acpi);
-
-	if (!*ibm->acpi->handle)
-		return 0;
-
-	vdbg_printk(TPACPI_DBG_INIT,
-		"setting up ACPI notify for %s\n", ibm->name);
-
-	rc = acpi_bus_get_device(*ibm->acpi->handle, &ibm->acpi->device);
-	if (rc < 0) {
-		printk(TPACPI_ERR "acpi_bus_get_device(%s) failed: %d\n",
-			ibm->name, rc);
-		return -ENODEV;
-	}
-
-	ibm->acpi->device->driver_data = ibm;
-	sprintf(acpi_device_class(ibm->acpi->device), "%s/%s",
-		TPACPI_ACPI_EVENT_PREFIX,
-		ibm->name);
-
-	status = acpi_install_notify_handler(*ibm->acpi->handle,
-			ibm->acpi->type, dispatch_acpi_notify, ibm);
-	if (ACPI_FAILURE(status)) {
-		if (status == AE_ALREADY_EXISTS) {
-			printk(TPACPI_NOTICE
-			       "another device driver is already "
-			       "handling %s events\n", ibm->name);
-		} else {
-			printk(TPACPI_ERR
-			       "acpi_install_notify_handler(%s) failed: %d\n",
-			       ibm->name, status);
-		}
-		return -ENODEV;
-	}
-	ibm->flags.acpi_notify_installed = 1;
-	return 0;
-}
-
-static int __init tpacpi_device_add(struct acpi_device *device)
-{
-	return 0;
-}
-
-static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
-{
-	int rc;
-
-	dbg_printk(TPACPI_DBG_INIT,
-		"registering %s as an ACPI driver\n", ibm->name);
-
-	BUG_ON(!ibm->acpi);
-
-	ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL);
-	if (!ibm->acpi->driver) {
-		printk(TPACPI_ERR
-		       "failed to allocate memory for ibm->acpi->driver\n");
-		return -ENOMEM;
-	}
-
-	sprintf(ibm->acpi->driver->name, "%s_%s", TPACPI_NAME, ibm->name);
-	ibm->acpi->driver->ids = ibm->acpi->hid;
-
-	ibm->acpi->driver->ops.add = &tpacpi_device_add;
-
-	rc = acpi_bus_register_driver(ibm->acpi->driver);
-	if (rc < 0) {
-		printk(TPACPI_ERR "acpi_bus_register_driver(%s) failed: %d\n",
-		       ibm->name, rc);
-		kfree(ibm->acpi->driver);
-		ibm->acpi->driver = NULL;
-	} else if (!rc)
-		ibm->flags.acpi_driver_registered = 1;
-
-	return rc;
-}
-
-
-/****************************************************************************
- ****************************************************************************
- *
- * Procfs Helpers
- *
- ****************************************************************************
- ****************************************************************************/
-
-static int dispatch_procfs_read(char *page, char **start, off_t off,
-			int count, int *eof, void *data)
-{
-	struct ibm_struct *ibm = data;
-	int len;
-
-	if (!ibm || !ibm->read)
-		return -EINVAL;
-
-	len = ibm->read(page);
-	if (len < 0)
-		return len;
-
-	if (len <= off + count)
-		*eof = 1;
-	*start = page + off;
-	len -= off;
-	if (len > count)
-		len = count;
-	if (len < 0)
-		len = 0;
-
-	return len;
-}
-
-static int dispatch_procfs_write(struct file *file,
-			const char __user *userbuf,
-			unsigned long count, void *data)
-{
-	struct ibm_struct *ibm = data;
-	char *kernbuf;
-	int ret;
-
-	if (!ibm || !ibm->write)
-		return -EINVAL;
-
-	kernbuf = kmalloc(count + 2, GFP_KERNEL);
-	if (!kernbuf)
-		return -ENOMEM;
-
-	if (copy_from_user(kernbuf, userbuf, count)) {
-		kfree(kernbuf);
-		return -EFAULT;
-	}
-
-	kernbuf[count] = 0;
-	strcat(kernbuf, ",");
-	ret = ibm->write(kernbuf);
-	if (ret == 0)
-		ret = count;
-
-	kfree(kernbuf);
-
-	return ret;
-}
-
-static char *next_cmd(char **cmds)
-{
-	char *start = *cmds;
-	char *end;
-
-	while ((end = strchr(start, ',')) && end == start)
-		start = end + 1;
-
-	if (!end)
-		return NULL;
-
-	*end = 0;
-	*cmds = end + 1;
-	return start;
-}
-
-
-/****************************************************************************
- ****************************************************************************
- *
- * Device model: input, hwmon and platform
- *
- ****************************************************************************
- ****************************************************************************/
-
-static struct platform_device *tpacpi_pdev;
-static struct platform_device *tpacpi_sensors_pdev;
-static struct device *tpacpi_hwmon;
-static struct input_dev *tpacpi_inputdev;
-static struct mutex tpacpi_inputdev_send_mutex;
-static LIST_HEAD(tpacpi_all_drivers);
-
-static int tpacpi_suspend_handler(struct platform_device *pdev,
-				  pm_message_t state)
-{
-	struct ibm_struct *ibm, *itmp;
-
-	list_for_each_entry_safe(ibm, itmp,
-				 &tpacpi_all_drivers,
-				 all_drivers) {
-		if (ibm->suspend)
-			(ibm->suspend)(state);
-	}
-
-	return 0;
-}
-
-static int tpacpi_resume_handler(struct platform_device *pdev)
-{
-	struct ibm_struct *ibm, *itmp;
-
-	list_for_each_entry_safe(ibm, itmp,
-				 &tpacpi_all_drivers,
-				 all_drivers) {
-		if (ibm->resume)
-			(ibm->resume)();
-	}
-
-	return 0;
-}
-
-static struct platform_driver tpacpi_pdriver = {
-	.driver = {
-		.name = TPACPI_DRVR_NAME,
-		.owner = THIS_MODULE,
-	},
-	.suspend = tpacpi_suspend_handler,
-	.resume = tpacpi_resume_handler,
-};
-
-static struct platform_driver tpacpi_hwmon_pdriver = {
-	.driver = {
-		.name = TPACPI_HWMON_DRVR_NAME,
-		.owner = THIS_MODULE,
-	},
-};
-
-/*************************************************************************
- * sysfs support helpers
- */
-
-struct attribute_set {
-	unsigned int members, max_members;
-	struct attribute_group group;
-};
-
-struct attribute_set_obj {
-	struct attribute_set s;
-	struct attribute *a;
-} __attribute__((packed));
-
-static struct attribute_set *create_attr_set(unsigned int max_members,
-						const char *name)
-{
-	struct attribute_set_obj *sobj;
-
-	if (max_members == 0)
-		return NULL;
-
-	/* Allocates space for implicit NULL at the end too */
-	sobj = kzalloc(sizeof(struct attribute_set_obj) +
-		    max_members * sizeof(struct attribute *),
-		    GFP_KERNEL);
-	if (!sobj)
-		return NULL;
-	sobj->s.max_members = max_members;
-	sobj->s.group.attrs = &sobj->a;
-	sobj->s.group.name = name;
-
-	return &sobj->s;
-}
-
-#define destroy_attr_set(_set) \
-	kfree(_set);
-
-/* not multi-threaded safe, use it in a single thread per set */
-static int add_to_attr_set(struct attribute_set *s, struct attribute *attr)
-{
-	if (!s || !attr)
-		return -EINVAL;
-
-	if (s->members >= s->max_members)
-		return -ENOMEM;
-
-	s->group.attrs[s->members] = attr;
-	s->members++;
-
-	return 0;
-}
-
-static int add_many_to_attr_set(struct attribute_set *s,
-			struct attribute **attr,
-			unsigned int count)
-{
-	int i, res;
-
-	for (i = 0; i < count; i++) {
-		res = add_to_attr_set(s, attr[i]);
-		if (res)
-			return res;
-	}
-
-	return 0;
-}
-
-static void delete_attr_set(struct attribute_set *s, struct kobject *kobj)
-{
-	sysfs_remove_group(kobj, &s->group);
-	destroy_attr_set(s);
-}
-
-#define register_attr_set_with_sysfs(_attr_set, _kobj) \
-	sysfs_create_group(_kobj, &_attr_set->group)
-
-static int parse_strtoul(const char *buf,
-		unsigned long max, unsigned long *value)
-{
-	char *endp;
-
-	while (*buf && isspace(*buf))
-		buf++;
-	*value = simple_strtoul(buf, &endp, 0);
-	while (*endp && isspace(*endp))
-		endp++;
-	if (*endp || *value > max)
-		return -EINVAL;
-
-	return 0;
-}
-
-static void tpacpi_disable_brightness_delay(void)
-{
-	if (acpi_evalf(hkey_handle, NULL, "PWMS", "qvd", 0))
-		printk(TPACPI_NOTICE
-			"ACPI backlight control delay disabled\n");
-}
-
-static int __init tpacpi_query_bcl_levels(acpi_handle handle)
-{
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *obj;
-	int rc;
-
-	if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
-		obj = (union acpi_object *)buffer.pointer;
-		if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
-			printk(TPACPI_ERR "Unknown _BCL data, "
-			       "please report this to %s\n", TPACPI_MAIL);
-			rc = 0;
-		} else {
-			rc = obj->package.count;
-		}
-	} else {
-		return 0;
-	}
-
-	kfree(buffer.pointer);
-	return rc;
-}
-
-static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle,
-					u32 lvl, void *context, void **rv)
-{
-	char name[ACPI_PATH_SEGMENT_LENGTH];
-	struct acpi_buffer buffer = { sizeof(name), &name };
-
-	if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
-	    !strncmp("_BCL", name, sizeof(name) - 1)) {
-		BUG_ON(!rv || !*rv);
-		**(int **)rv = tpacpi_query_bcl_levels(handle);
-		return AE_CTRL_TERMINATE;
-	} else {
-		return AE_OK;
-	}
-}
-
-/*
- * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map
- */
-static int __init tpacpi_check_std_acpi_brightness_support(void)
-{
-	int status;
-	int bcl_levels = 0;
-	void *bcl_ptr = &bcl_levels;
-
-	if (!vid_handle) {
-		TPACPI_ACPIHANDLE_INIT(vid);
-	}
-	if (!vid_handle)
-		return 0;
-
-	/*
-	 * Search for a _BCL method, and execute it.  This is safe on all
-	 * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista
-	 * BIOS in ACPI backlight control mode.  We do NOT have to care
-	 * about calling the _BCL method in an enabled video device, any
-	 * will do for our purposes.
-	 */
-
-	status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
-				     tpacpi_acpi_walk_find_bcl, NULL,
-				     &bcl_ptr);
-
-	if (ACPI_SUCCESS(status) && bcl_levels > 2) {
-		tp_features.bright_acpimode = 1;
-		return (bcl_levels - 2);
-	}
-
-	return 0;
-}
-
-static int __init tpacpi_new_rfkill(const unsigned int id,
-			struct rfkill **rfk,
-			const enum rfkill_type rfktype,
-			const char *name,
-			int (*toggle_radio)(void *, enum rfkill_state),
-			int (*get_state)(void *, enum rfkill_state *))
-{
-	int res;
-	enum rfkill_state initial_state;
-
-	*rfk = rfkill_allocate(&tpacpi_pdev->dev, rfktype);
-	if (!*rfk) {
-		printk(TPACPI_ERR
-			"failed to allocate memory for rfkill class\n");
-		return -ENOMEM;
-	}
-
-	(*rfk)->name = name;
-	(*rfk)->get_state = get_state;
-	(*rfk)->toggle_radio = toggle_radio;
-
-	if (!get_state(NULL, &initial_state))
-		(*rfk)->state = initial_state;
-
-	res = rfkill_register(*rfk);
-	if (res < 0) {
-		printk(TPACPI_ERR
-			"failed to register %s rfkill switch: %d\n",
-			name, res);
-		rfkill_free(*rfk);
-		*rfk = NULL;
-		return res;
-	}
-
-	return 0;
-}
-
-/*************************************************************************
- * thinkpad-acpi driver attributes
- */
-
-/* interface_version --------------------------------------------------- */
-static ssize_t tpacpi_driver_interface_version_show(
-				struct device_driver *drv,
-				char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "0x%08x\n", TPACPI_SYSFS_VERSION);
-}
-
-static DRIVER_ATTR(interface_version, S_IRUGO,
-		tpacpi_driver_interface_version_show, NULL);
-
-/* debug_level --------------------------------------------------------- */
-static ssize_t tpacpi_driver_debug_show(struct device_driver *drv,
-						char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "0x%04x\n", dbg_level);
-}
-
-static ssize_t tpacpi_driver_debug_store(struct device_driver *drv,
-						const char *buf, size_t count)
-{
-	unsigned long t;
-
-	if (parse_strtoul(buf, 0xffff, &t))
-		return -EINVAL;
-
-	dbg_level = t;
-
-	return count;
-}
-
-static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
-		tpacpi_driver_debug_show, tpacpi_driver_debug_store);
-
-/* version ------------------------------------------------------------- */
-static ssize_t tpacpi_driver_version_show(struct device_driver *drv,
-						char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%s v%s\n",
-			TPACPI_DESC, TPACPI_VERSION);
-}
-
-static DRIVER_ATTR(version, S_IRUGO,
-		tpacpi_driver_version_show, NULL);
-
-/* --------------------------------------------------------------------- */
-
-static struct driver_attribute *tpacpi_driver_attributes[] = {
-	&driver_attr_debug_level, &driver_attr_version,
-	&driver_attr_interface_version,
-};
-
-static int __init tpacpi_create_driver_attributes(struct device_driver *drv)
-{
-	int i, res;
-
-	i = 0;
-	res = 0;
-	while (!res && i < ARRAY_SIZE(tpacpi_driver_attributes)) {
-		res = driver_create_file(drv, tpacpi_driver_attributes[i]);
-		i++;
-	}
-
-	return res;
-}
-
-static void tpacpi_remove_driver_attributes(struct device_driver *drv)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(tpacpi_driver_attributes); i++)
-		driver_remove_file(drv, tpacpi_driver_attributes[i]);
-}
-
-/****************************************************************************
- ****************************************************************************
- *
- * Subdrivers
- *
- ****************************************************************************
- ****************************************************************************/
-
-/*************************************************************************
- * thinkpad-acpi init subdriver
- */
-
-static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
-{
-	printk(TPACPI_INFO "%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
-	printk(TPACPI_INFO "%s\n", TPACPI_URL);
-
-	printk(TPACPI_INFO "ThinkPad BIOS %s, EC %s\n",
-		(thinkpad_id.bios_version_str) ?
-			thinkpad_id.bios_version_str : "unknown",
-		(thinkpad_id.ec_version_str) ?
-			thinkpad_id.ec_version_str : "unknown");
-
-	if (thinkpad_id.vendor && thinkpad_id.model_str)
-		printk(TPACPI_INFO "%s %s, model %s\n",
-			(thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
-				"IBM" : ((thinkpad_id.vendor ==
-						PCI_VENDOR_ID_LENOVO) ?
-					"Lenovo" : "Unknown vendor"),
-			thinkpad_id.model_str,
-			(thinkpad_id.nummodel_str) ?
-				thinkpad_id.nummodel_str : "unknown");
-
-	return 0;
-}
-
-static int thinkpad_acpi_driver_read(char *p)
-{
-	int len = 0;
-
-	len += sprintf(p + len, "driver:\t\t%s\n", TPACPI_DESC);
-	len += sprintf(p + len, "version:\t%s\n", TPACPI_VERSION);
-
-	return len;
-}
-
-static struct ibm_struct thinkpad_acpi_driver_data = {
-	.name = "driver",
-	.read = thinkpad_acpi_driver_read,
-};
-
-/*************************************************************************
- * Hotkey subdriver
- */
-
-enum {	/* hot key scan codes (derived from ACPI DSDT) */
-	TP_ACPI_HOTKEYSCAN_FNF1		= 0,
-	TP_ACPI_HOTKEYSCAN_FNF2,
-	TP_ACPI_HOTKEYSCAN_FNF3,
-	TP_ACPI_HOTKEYSCAN_FNF4,
-	TP_ACPI_HOTKEYSCAN_FNF5,
-	TP_ACPI_HOTKEYSCAN_FNF6,
-	TP_ACPI_HOTKEYSCAN_FNF7,
-	TP_ACPI_HOTKEYSCAN_FNF8,
-	TP_ACPI_HOTKEYSCAN_FNF9,
-	TP_ACPI_HOTKEYSCAN_FNF10,
-	TP_ACPI_HOTKEYSCAN_FNF11,
-	TP_ACPI_HOTKEYSCAN_FNF12,
-	TP_ACPI_HOTKEYSCAN_FNBACKSPACE,
-	TP_ACPI_HOTKEYSCAN_FNINSERT,
-	TP_ACPI_HOTKEYSCAN_FNDELETE,
-	TP_ACPI_HOTKEYSCAN_FNHOME,
-	TP_ACPI_HOTKEYSCAN_FNEND,
-	TP_ACPI_HOTKEYSCAN_FNPAGEUP,
-	TP_ACPI_HOTKEYSCAN_FNPAGEDOWN,
-	TP_ACPI_HOTKEYSCAN_FNSPACE,
-	TP_ACPI_HOTKEYSCAN_VOLUMEUP,
-	TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
-	TP_ACPI_HOTKEYSCAN_MUTE,
-	TP_ACPI_HOTKEYSCAN_THINKPAD,
-};
-
-enum {	/* Keys available through NVRAM polling */
-	TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U,
-	TPACPI_HKEY_NVRAM_GOOD_MASK  = 0x00fb8000U,
-};
-
-enum {	/* Positions of some of the keys in hotkey masks */
-	TP_ACPI_HKEY_DISPSWTCH_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNF7,
-	TP_ACPI_HKEY_DISPXPAND_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNF8,
-	TP_ACPI_HKEY_HIBERNATE_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNF12,
-	TP_ACPI_HKEY_BRGHTUP_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNHOME,
-	TP_ACPI_HKEY_BRGHTDWN_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNEND,
-	TP_ACPI_HKEY_THNKLGHT_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNPAGEUP,
-	TP_ACPI_HKEY_ZOOM_MASK		= 1 << TP_ACPI_HOTKEYSCAN_FNSPACE,
-	TP_ACPI_HKEY_VOLUP_MASK		= 1 << TP_ACPI_HOTKEYSCAN_VOLUMEUP,
-	TP_ACPI_HKEY_VOLDWN_MASK	= 1 << TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
-	TP_ACPI_HKEY_MUTE_MASK		= 1 << TP_ACPI_HOTKEYSCAN_MUTE,
-	TP_ACPI_HKEY_THINKPAD_MASK	= 1 << TP_ACPI_HOTKEYSCAN_THINKPAD,
-};
-
-enum {	/* NVRAM to ACPI HKEY group map */
-	TP_NVRAM_HKEY_GROUP_HK2		= TP_ACPI_HKEY_THINKPAD_MASK |
-					  TP_ACPI_HKEY_ZOOM_MASK |
-					  TP_ACPI_HKEY_DISPSWTCH_MASK |
-					  TP_ACPI_HKEY_HIBERNATE_MASK,
-	TP_NVRAM_HKEY_GROUP_BRIGHTNESS	= TP_ACPI_HKEY_BRGHTUP_MASK |
-					  TP_ACPI_HKEY_BRGHTDWN_MASK,
-	TP_NVRAM_HKEY_GROUP_VOLUME	= TP_ACPI_HKEY_VOLUP_MASK |
-					  TP_ACPI_HKEY_VOLDWN_MASK |
-					  TP_ACPI_HKEY_MUTE_MASK,
-};
-
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-struct tp_nvram_state {
-       u16 thinkpad_toggle:1;
-       u16 zoom_toggle:1;
-       u16 display_toggle:1;
-       u16 thinklight_toggle:1;
-       u16 hibernate_toggle:1;
-       u16 displayexp_toggle:1;
-       u16 display_state:1;
-       u16 brightness_toggle:1;
-       u16 volume_toggle:1;
-       u16 mute:1;
-
-       u8 brightness_level;
-       u8 volume_level;
-};
-
-static struct task_struct *tpacpi_hotkey_task;
-static u32 hotkey_source_mask;		/* bit mask 0=ACPI,1=NVRAM */
-static int hotkey_poll_freq = 10;	/* Hz */
-static struct mutex hotkey_thread_mutex;
-static struct mutex hotkey_thread_data_mutex;
-static unsigned int hotkey_config_change;
-
-#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-
-#define hotkey_source_mask 0U
-
-#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-
-static struct mutex hotkey_mutex;
-
-static enum {	/* Reasons for waking up */
-	TP_ACPI_WAKEUP_NONE = 0,	/* None or unknown */
-	TP_ACPI_WAKEUP_BAYEJ,		/* Bay ejection request */
-	TP_ACPI_WAKEUP_UNDOCK,		/* Undock request */
-} hotkey_wakeup_reason;
-
-static int hotkey_autosleep_ack;
-
-static int hotkey_orig_status;
-static u32 hotkey_orig_mask;
-static u32 hotkey_all_mask;
-static u32 hotkey_reserved_mask;
-static u32 hotkey_mask;
-
-static unsigned int hotkey_report_mode;
-
-static u16 *hotkey_keycode_map;
-
-static struct attribute_set *hotkey_dev_attributes;
-
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-#define HOTKEY_CONFIG_CRITICAL_START \
-	do { \
-		mutex_lock(&hotkey_thread_data_mutex); \
-		hotkey_config_change++; \
-	} while (0);
-#define HOTKEY_CONFIG_CRITICAL_END \
-	mutex_unlock(&hotkey_thread_data_mutex);
-#else
-#define HOTKEY_CONFIG_CRITICAL_START
-#define HOTKEY_CONFIG_CRITICAL_END
-#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-
-/* HKEY.MHKG() return bits */
-#define TP_HOTKEY_TABLET_MASK (1 << 3)
-
-static int hotkey_get_wlsw(int *status)
-{
-	if (!acpi_evalf(hkey_handle, status, "WLSW", "d"))
-		return -EIO;
-	return 0;
-}
-
-static int hotkey_get_tablet_mode(int *status)
-{
-	int s;
-
-	if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
-		return -EIO;
-
-	*status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
-	return 0;
-}
-
-/*
- * Call with hotkey_mutex held
- */
-static int hotkey_mask_get(void)
-{
-	u32 m = 0;
-
-	if (tp_features.hotkey_mask) {
-		if (!acpi_evalf(hkey_handle, &m, "DHKN", "d"))
-			return -EIO;
-	}
-	hotkey_mask = m | (hotkey_source_mask & hotkey_mask);
-
-	return 0;
-}
-
-/*
- * Call with hotkey_mutex held
- */
-static int hotkey_mask_set(u32 mask)
-{
-	int i;
-	int rc = 0;
-
-	if (tp_features.hotkey_mask) {
-		if (!tp_warned.hotkey_mask_ff &&
-		    (mask == 0xffff || mask == 0xffffff ||
-		     mask == 0xffffffff)) {
-			tp_warned.hotkey_mask_ff = 1;
-			printk(TPACPI_NOTICE
-			       "setting the hotkey mask to 0x%08x is likely "
-			       "not the best way to go about it\n", mask);
-			printk(TPACPI_NOTICE
-			       "please consider using the driver defaults, "
-			       "and refer to up-to-date thinkpad-acpi "
-			       "documentation\n");
-		}
-
-		HOTKEY_CONFIG_CRITICAL_START
-		for (i = 0; i < 32; i++) {
-			u32 m = 1 << i;
-			/* enable in firmware mask only keys not in NVRAM
-			 * mode, but enable the key in the cached hotkey_mask
-			 * regardless of mode, or the key will end up
-			 * disabled by hotkey_mask_get() */
-			if (!acpi_evalf(hkey_handle,
-					NULL, "MHKM", "vdd", i + 1,
-					!!((mask & ~hotkey_source_mask) & m))) {
-				rc = -EIO;
-				break;
-			} else {
-				hotkey_mask = (hotkey_mask & ~m) | (mask & m);
-			}
-		}
-		HOTKEY_CONFIG_CRITICAL_END
-
-		/* hotkey_mask_get must be called unconditionally below */
-		if (!hotkey_mask_get() && !rc &&
-		    (hotkey_mask & ~hotkey_source_mask) !=
-		     (mask & ~hotkey_source_mask)) {
-			printk(TPACPI_NOTICE
-			       "requested hot key mask 0x%08x, but "
-			       "firmware forced it to 0x%08x\n",
-			       mask, hotkey_mask);
-		}
-	} else {
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-		HOTKEY_CONFIG_CRITICAL_START
-		hotkey_mask = mask & hotkey_source_mask;
-		HOTKEY_CONFIG_CRITICAL_END
-		hotkey_mask_get();
-		if (hotkey_mask != mask) {
-			printk(TPACPI_NOTICE
-			       "requested hot key mask 0x%08x, "
-			       "forced to 0x%08x (NVRAM poll mask is "
-			       "0x%08x): no firmware mask support\n",
-			       mask, hotkey_mask, hotkey_source_mask);
-		}
-#else
-		hotkey_mask_get();
-		rc = -ENXIO;
-#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-	}
-
-	return rc;
-}
-
-static int hotkey_status_get(int *status)
-{
-	if (!acpi_evalf(hkey_handle, status, "DHKC", "d"))
-		return -EIO;
-
-	return 0;
-}
-
-static int hotkey_status_set(int status)
-{
-	if (!acpi_evalf(hkey_handle, NULL, "MHKC", "vd", status))
-		return -EIO;
-
-	return 0;
-}
-
-static void tpacpi_input_send_tabletsw(void)
-{
-	int state;
-
-	if (tp_features.hotkey_tablet &&
-	    !hotkey_get_tablet_mode(&state)) {
-		mutex_lock(&tpacpi_inputdev_send_mutex);
-
-		input_report_switch(tpacpi_inputdev,
-				    SW_TABLET_MODE, !!state);
-		input_sync(tpacpi_inputdev);
-
-		mutex_unlock(&tpacpi_inputdev_send_mutex);
-	}
-}
-
-static void tpacpi_input_send_key(unsigned int scancode)
-{
-	unsigned int keycode;
-
-	keycode = hotkey_keycode_map[scancode];
-
-	if (keycode != KEY_RESERVED) {
-		mutex_lock(&tpacpi_inputdev_send_mutex);
-
-		input_report_key(tpacpi_inputdev, keycode, 1);
-		if (keycode == KEY_UNKNOWN)
-			input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
-				    scancode);
-		input_sync(tpacpi_inputdev);
-
-		input_report_key(tpacpi_inputdev, keycode, 0);
-		if (keycode == KEY_UNKNOWN)
-			input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
-				    scancode);
-		input_sync(tpacpi_inputdev);
-
-		mutex_unlock(&tpacpi_inputdev_send_mutex);
-	}
-}
-
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
-
-static void tpacpi_hotkey_send_key(unsigned int scancode)
-{
-	tpacpi_input_send_key(scancode);
-	if (hotkey_report_mode < 2) {
-		acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device,
-						0x80, 0x1001 + scancode);
-	}
-}
-
-static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m)
-{
-	u8 d;
-
-	if (m & TP_NVRAM_HKEY_GROUP_HK2) {
-		d = nvram_read_byte(TP_NVRAM_ADDR_HK2);
-		n->thinkpad_toggle = !!(d & TP_NVRAM_MASK_HKT_THINKPAD);
-		n->zoom_toggle = !!(d & TP_NVRAM_MASK_HKT_ZOOM);
-		n->display_toggle = !!(d & TP_NVRAM_MASK_HKT_DISPLAY);
-		n->hibernate_toggle = !!(d & TP_NVRAM_MASK_HKT_HIBERNATE);
-	}
-	if (m & TP_ACPI_HKEY_THNKLGHT_MASK) {
-		d = nvram_read_byte(TP_NVRAM_ADDR_THINKLIGHT);
-		n->thinklight_toggle = !!(d & TP_NVRAM_MASK_THINKLIGHT);
-	}
-	if (m & TP_ACPI_HKEY_DISPXPAND_MASK) {
-		d = nvram_read_byte(TP_NVRAM_ADDR_VIDEO);
-		n->displayexp_toggle =
-				!!(d & TP_NVRAM_MASK_HKT_DISPEXPND);
-	}
-	if (m & TP_NVRAM_HKEY_GROUP_BRIGHTNESS) {
-		d = nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS);
-		n->brightness_level = (d & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
-				>> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
-		n->brightness_toggle =
-				!!(d & TP_NVRAM_MASK_HKT_BRIGHTNESS);
-	}
-	if (m & TP_NVRAM_HKEY_GROUP_VOLUME) {
-		d = nvram_read_byte(TP_NVRAM_ADDR_MIXER);
-		n->volume_level = (d & TP_NVRAM_MASK_LEVEL_VOLUME)
-				>> TP_NVRAM_POS_LEVEL_VOLUME;
-		n->mute = !!(d & TP_NVRAM_MASK_MUTE);
-		n->volume_toggle = !!(d & TP_NVRAM_MASK_HKT_VOLUME);
-	}
-}
-
-#define TPACPI_COMPARE_KEY(__scancode, __member) \
-	do { \
-		if ((mask & (1 << __scancode)) && \
-		    oldn->__member != newn->__member) \
-		tpacpi_hotkey_send_key(__scancode); \
-	} while (0)
-
-#define TPACPI_MAY_SEND_KEY(__scancode) \
-	do { if (mask & (1 << __scancode)) \
-		tpacpi_hotkey_send_key(__scancode); } while (0)
-
-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
-					   struct tp_nvram_state *newn,
-					   u32 mask)
-{
-	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
-	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
-	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
-	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF12, hibernate_toggle);
-
-	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNPAGEUP, thinklight_toggle);
-
-	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF8, displayexp_toggle);
-
-	/* handle volume */
-	if (oldn->volume_toggle != newn->volume_toggle) {
-		if (oldn->mute != newn->mute) {
-			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
-		}
-		if (oldn->volume_level > newn->volume_level) {
-			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
-		} else if (oldn->volume_level < newn->volume_level) {
-			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
-		} else if (oldn->mute == newn->mute) {
-			/* repeated key presses that didn't change state */
-			if (newn->mute) {
-				TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
-			} else if (newn->volume_level != 0) {
-				TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
-			} else {
-				TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
-			}
-		}
-	}
-
-	/* handle brightness */
-	if (oldn->brightness_toggle != newn->brightness_toggle) {
-		if (oldn->brightness_level < newn->brightness_level) {
-			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
-		} else if (oldn->brightness_level > newn->brightness_level) {
-			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
-		} else {
-			/* repeated key presses that didn't change state */
-			if (newn->brightness_level != 0) {
-				TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
-			} else {
-				TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
-			}
-		}
-	}
-}
-
-#undef TPACPI_COMPARE_KEY
-#undef TPACPI_MAY_SEND_KEY
-
-static int hotkey_kthread(void *data)
-{
-	struct tp_nvram_state s[2];
-	u32 mask;
-	unsigned int si, so;
-	unsigned long t;
-	unsigned int change_detector, must_reset;
-
-	mutex_lock(&hotkey_thread_mutex);
-
-	if (tpacpi_lifecycle == TPACPI_LIFE_EXITING)
-		goto exit;
-
-	set_freezable();
-
-	so = 0;
-	si = 1;
-	t = 0;
-
-	/* Initial state for compares */
-	mutex_lock(&hotkey_thread_data_mutex);
-	change_detector = hotkey_config_change;
-	mask = hotkey_source_mask & hotkey_mask;
-	mutex_unlock(&hotkey_thread_data_mutex);
-	hotkey_read_nvram(&s[so], mask);
-
-	while (!kthread_should_stop() && hotkey_poll_freq) {
-		if (t == 0)
-			t = 1000/hotkey_poll_freq;
-		t = msleep_interruptible(t);
-		if (unlikely(kthread_should_stop()))
-			break;
-		must_reset = try_to_freeze();
-		if (t > 0 && !must_reset)
-			continue;
-
-		mutex_lock(&hotkey_thread_data_mutex);
-		if (must_reset || hotkey_config_change != change_detector) {
-			/* forget old state on thaw or config change */
-			si = so;
-			t = 0;
-			change_detector = hotkey_config_change;
-		}
-		mask = hotkey_source_mask & hotkey_mask;
-		mutex_unlock(&hotkey_thread_data_mutex);
-
-		if (likely(mask)) {
-			hotkey_read_nvram(&s[si], mask);
-			if (likely(si != so)) {
-				hotkey_compare_and_issue_event(&s[so], &s[si],
-								mask);
-			}
-		}
-
-		so = si;
-		si ^= 1;
-	}
-
-exit:
-	mutex_unlock(&hotkey_thread_mutex);
-	return 0;
-}
-
-static void hotkey_poll_stop_sync(void)
-{
-	if (tpacpi_hotkey_task) {
-		if (frozen(tpacpi_hotkey_task) ||
-		    freezing(tpacpi_hotkey_task))
-			thaw_process(tpacpi_hotkey_task);
-
-		kthread_stop(tpacpi_hotkey_task);
-		tpacpi_hotkey_task = NULL;
-		mutex_lock(&hotkey_thread_mutex);
-		/* at this point, the thread did exit */
-		mutex_unlock(&hotkey_thread_mutex);
-	}
-}
-
-/* call with hotkey_mutex held */
-static void hotkey_poll_setup(int may_warn)
-{
-	if ((hotkey_source_mask & hotkey_mask) != 0 &&
-	    hotkey_poll_freq > 0 &&
-	    (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) {
-		if (!tpacpi_hotkey_task) {
-			tpacpi_hotkey_task = kthread_run(hotkey_kthread,
-					NULL, TPACPI_NVRAM_KTHREAD_NAME);
-			if (IS_ERR(tpacpi_hotkey_task)) {
-				tpacpi_hotkey_task = NULL;
-				printk(TPACPI_ERR
-				       "could not create kernel thread "
-				       "for hotkey polling\n");
-			}
-		}
-	} else {
-		hotkey_poll_stop_sync();
-		if (may_warn &&
-		    hotkey_source_mask != 0 && hotkey_poll_freq == 0) {
-			printk(TPACPI_NOTICE
-				"hot keys 0x%08x require polling, "
-				"which is currently disabled\n",
-				hotkey_source_mask);
-		}
-	}
-}
-
-static void hotkey_poll_setup_safe(int may_warn)
-{
-	mutex_lock(&hotkey_mutex);
-	hotkey_poll_setup(may_warn);
-	mutex_unlock(&hotkey_mutex);
-}
-
-#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-
-static void hotkey_poll_setup_safe(int __unused)
-{
-}
-
-#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-
-static int hotkey_inputdev_open(struct input_dev *dev)
-{
-	switch (tpacpi_lifecycle) {
-	case TPACPI_LIFE_INIT:
-		/*
-		 * hotkey_init will call hotkey_poll_setup_safe
-		 * at the appropriate moment
-		 */
-		return 0;
-	case TPACPI_LIFE_EXITING:
-		return -EBUSY;
-	case TPACPI_LIFE_RUNNING:
-		hotkey_poll_setup_safe(0);
-		return 0;
-	}
-
-	/* Should only happen if tpacpi_lifecycle is corrupt */
-	BUG();
-	return -EBUSY;
-}
-
-static void hotkey_inputdev_close(struct input_dev *dev)
-{
-	/* disable hotkey polling when possible */
-	if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING)
-		hotkey_poll_setup_safe(0);
-}
-
-/* sysfs hotkey enable ------------------------------------------------- */
-static ssize_t hotkey_enable_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	int res, status;
-
-	res = hotkey_status_get(&status);
-	if (res)
-		return res;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", status);
-}
-
-static ssize_t hotkey_enable_store(struct device *dev,
-			    struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	unsigned long t;
-	int res;
-
-	if (parse_strtoul(buf, 1, &t))
-		return -EINVAL;
-
-	res = hotkey_status_set(t);
-
-	return (res) ? res : count;
-}
-
-static struct device_attribute dev_attr_hotkey_enable =
-	__ATTR(hotkey_enable, S_IWUSR | S_IRUGO,
-		hotkey_enable_show, hotkey_enable_store);
-
-/* sysfs hotkey mask --------------------------------------------------- */
-static ssize_t hotkey_mask_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	int res;
-
-	if (mutex_lock_interruptible(&hotkey_mutex))
-		return -ERESTARTSYS;
-	res = hotkey_mask_get();
-	mutex_unlock(&hotkey_mutex);
-
-	return (res)?
-		res : snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_mask);
-}
-
-static ssize_t hotkey_mask_store(struct device *dev,
-			    struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	unsigned long t;
-	int res;
-
-	if (parse_strtoul(buf, 0xffffffffUL, &t))
-		return -EINVAL;
-
-	if (mutex_lock_interruptible(&hotkey_mutex))
-		return -ERESTARTSYS;
-
-	res = hotkey_mask_set(t);
-
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-	hotkey_poll_setup(1);
-#endif
-
-	mutex_unlock(&hotkey_mutex);
-
-	return (res) ? res : count;
-}
-
-static struct device_attribute dev_attr_hotkey_mask =
-	__ATTR(hotkey_mask, S_IWUSR | S_IRUGO,
-		hotkey_mask_show, hotkey_mask_store);
-
-/* sysfs hotkey bios_enabled ------------------------------------------- */
-static ssize_t hotkey_bios_enabled_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_orig_status);
-}
-
-static struct device_attribute dev_attr_hotkey_bios_enabled =
-	__ATTR(hotkey_bios_enabled, S_IRUGO, hotkey_bios_enabled_show, NULL);
-
-/* sysfs hotkey bios_mask ---------------------------------------------- */
-static ssize_t hotkey_bios_mask_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_orig_mask);
-}
-
-static struct device_attribute dev_attr_hotkey_bios_mask =
-	__ATTR(hotkey_bios_mask, S_IRUGO, hotkey_bios_mask_show, NULL);
-
-/* sysfs hotkey all_mask ----------------------------------------------- */
-static ssize_t hotkey_all_mask_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "0x%08x\n",
-				hotkey_all_mask | hotkey_source_mask);
-}
-
-static struct device_attribute dev_attr_hotkey_all_mask =
-	__ATTR(hotkey_all_mask, S_IRUGO, hotkey_all_mask_show, NULL);
-
-/* sysfs hotkey recommended_mask --------------------------------------- */
-static ssize_t hotkey_recommended_mask_show(struct device *dev,
-					    struct device_attribute *attr,
-					    char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "0x%08x\n",
-			(hotkey_all_mask | hotkey_source_mask)
-			& ~hotkey_reserved_mask);
-}
-
-static struct device_attribute dev_attr_hotkey_recommended_mask =
-	__ATTR(hotkey_recommended_mask, S_IRUGO,
-		hotkey_recommended_mask_show, NULL);
-
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-
-/* sysfs hotkey hotkey_source_mask ------------------------------------- */
-static ssize_t hotkey_source_mask_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_source_mask);
-}
-
-static ssize_t hotkey_source_mask_store(struct device *dev,
-			    struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	unsigned long t;
-
-	if (parse_strtoul(buf, 0xffffffffUL, &t) ||
-		((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
-		return -EINVAL;
-
-	if (mutex_lock_interruptible(&hotkey_mutex))
-		return -ERESTARTSYS;
-
-	HOTKEY_CONFIG_CRITICAL_START
-	hotkey_source_mask = t;
-	HOTKEY_CONFIG_CRITICAL_END
-
-	hotkey_poll_setup(1);
-
-	mutex_unlock(&hotkey_mutex);
-
-	return count;
-}
-
-static struct device_attribute dev_attr_hotkey_source_mask =
-	__ATTR(hotkey_source_mask, S_IWUSR | S_IRUGO,
-		hotkey_source_mask_show, hotkey_source_mask_store);
-
-/* sysfs hotkey hotkey_poll_freq --------------------------------------- */
-static ssize_t hotkey_poll_freq_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_poll_freq);
-}
-
-static ssize_t hotkey_poll_freq_store(struct device *dev,
-			    struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	unsigned long t;
-
-	if (parse_strtoul(buf, 25, &t))
-		return -EINVAL;
-
-	if (mutex_lock_interruptible(&hotkey_mutex))
-		return -ERESTARTSYS;
-
-	hotkey_poll_freq = t;
-
-	hotkey_poll_setup(1);
-	mutex_unlock(&hotkey_mutex);
-
-	return count;
-}
-
-static struct device_attribute dev_attr_hotkey_poll_freq =
-	__ATTR(hotkey_poll_freq, S_IWUSR | S_IRUGO,
-		hotkey_poll_freq_show, hotkey_poll_freq_store);
-
-#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-
-/* sysfs hotkey radio_sw (pollable) ------------------------------------ */
-static ssize_t hotkey_radio_sw_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	int res, s;
-	res = hotkey_get_wlsw(&s);
-	if (res < 0)
-		return res;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", !!s);
-}
-
-static struct device_attribute dev_attr_hotkey_radio_sw =
-	__ATTR(hotkey_radio_sw, S_IRUGO, hotkey_radio_sw_show, NULL);
-
-static void hotkey_radio_sw_notify_change(void)
-{
-	if (tp_features.hotkey_wlsw)
-		sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
-			     "hotkey_radio_sw");
-}
-
-/* sysfs hotkey tablet mode (pollable) --------------------------------- */
-static ssize_t hotkey_tablet_mode_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	int res, s;
-	res = hotkey_get_tablet_mode(&s);
-	if (res < 0)
-		return res;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", !!s);
-}
-
-static struct device_attribute dev_attr_hotkey_tablet_mode =
-	__ATTR(hotkey_tablet_mode, S_IRUGO, hotkey_tablet_mode_show, NULL);
-
-static void hotkey_tablet_mode_notify_change(void)
-{
-	if (tp_features.hotkey_tablet)
-		sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
-			     "hotkey_tablet_mode");
-}
-
-/* sysfs hotkey report_mode -------------------------------------------- */
-static ssize_t hotkey_report_mode_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-		(hotkey_report_mode != 0) ? hotkey_report_mode : 1);
-}
-
-static struct device_attribute dev_attr_hotkey_report_mode =
-	__ATTR(hotkey_report_mode, S_IRUGO, hotkey_report_mode_show, NULL);
-
-/* sysfs wakeup reason (pollable) -------------------------------------- */
-static ssize_t hotkey_wakeup_reason_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason);
-}
-
-static struct device_attribute dev_attr_hotkey_wakeup_reason =
-	__ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
-
-static void hotkey_wakeup_reason_notify_change(void)
-{
-	if (tp_features.hotkey_mask)
-		sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
-			     "wakeup_reason");
-}
-
-/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */
-static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack);
-}
-
-static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
-	__ATTR(wakeup_hotunplug_complete, S_IRUGO,
-	       hotkey_wakeup_hotunplug_complete_show, NULL);
-
-static void hotkey_wakeup_hotunplug_complete_notify_change(void)
-{
-	if (tp_features.hotkey_mask)
-		sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
-			     "wakeup_hotunplug_complete");
-}
-
-/* --------------------------------------------------------------------- */
-
-static struct attribute *hotkey_attributes[] __initdata = {
-	&dev_attr_hotkey_enable.attr,
-	&dev_attr_hotkey_bios_enabled.attr,
-	&dev_attr_hotkey_report_mode.attr,
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-	&dev_attr_hotkey_mask.attr,
-	&dev_attr_hotkey_all_mask.attr,
-	&dev_attr_hotkey_recommended_mask.attr,
-	&dev_attr_hotkey_source_mask.attr,
-	&dev_attr_hotkey_poll_freq.attr,
-#endif
-};
-
-static struct attribute *hotkey_mask_attributes[] __initdata = {
-	&dev_attr_hotkey_bios_mask.attr,
-#ifndef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-	&dev_attr_hotkey_mask.attr,
-	&dev_attr_hotkey_all_mask.attr,
-	&dev_attr_hotkey_recommended_mask.attr,
-#endif
-	&dev_attr_hotkey_wakeup_reason.attr,
-	&dev_attr_hotkey_wakeup_hotunplug_complete.attr,
-};
-
-static void bluetooth_update_rfk(void);
-static void wan_update_rfk(void);
-static void tpacpi_send_radiosw_update(void)
-{
-	int wlsw;
-
-	/* Sync these BEFORE sending any rfkill events */
-	if (tp_features.bluetooth)
-		bluetooth_update_rfk();
-	if (tp_features.wan)
-		wan_update_rfk();
-
-	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
-		mutex_lock(&tpacpi_inputdev_send_mutex);
-
-		input_report_switch(tpacpi_inputdev,
-				    SW_RFKILL_ALL, !!wlsw);
-		input_sync(tpacpi_inputdev);
-
-		mutex_unlock(&tpacpi_inputdev_send_mutex);
-	}
-	hotkey_radio_sw_notify_change();
-}
-
-static void hotkey_exit(void)
-{
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-	hotkey_poll_stop_sync();
-#endif
-
-	if (hotkey_dev_attributes)
-		delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj);
-
-	kfree(hotkey_keycode_map);
-
-	if (tp_features.hotkey) {
-		dbg_printk(TPACPI_DBG_EXIT,
-			   "restoring original hot key mask\n");
-		/* no short-circuit boolean operator below! */
-		if ((hotkey_mask_set(hotkey_orig_mask) |
-		     hotkey_status_set(hotkey_orig_status)) != 0)
-			printk(TPACPI_ERR
-			       "failed to restore hot key mask "
-			       "to BIOS defaults\n");
-	}
-}
-
-static int __init hotkey_init(struct ibm_init_struct *iibm)
-{
-	/* Requirements for changing the default keymaps:
-	 *
-	 * 1. Many of the keys are mapped to KEY_RESERVED for very
-	 *    good reasons.  Do not change them unless you have deep
-	 *    knowledge on the IBM and Lenovo ThinkPad firmware for
-	 *    the various ThinkPad models.  The driver behaves
-	 *    differently for KEY_RESERVED: such keys have their
-	 *    hot key mask *unset* in mask_recommended, and also
-	 *    in the initial hot key mask programmed into the
-	 *    firmware at driver load time, which means the firm-
-	 *    ware may react very differently if you change them to
-	 *    something else;
-	 *
-	 * 2. You must be subscribed to the linux-thinkpad and
-	 *    ibm-acpi-devel mailing lists, and you should read the
-	 *    list archives since 2007 if you want to change the
-	 *    keymaps.  This requirement exists so that you will
-	 *    know the past history of problems with the thinkpad-
-	 *    acpi driver keymaps, and also that you will be
-	 *    listening to any bug reports;
-	 *
-	 * 3. Do not send thinkpad-acpi specific patches directly to
-	 *    for merging, *ever*.  Send them to the linux-acpi
-	 *    mailinglist for comments.  Merging is to be done only
-	 *    through acpi-test and the ACPI maintainer.
-	 *
-	 * If the above is too much to ask, don't change the keymap.
-	 * Ask the thinkpad-acpi maintainer to do it, instead.
-	 */
-	static u16 ibm_keycode_map[] __initdata = {
-		/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
-		KEY_FN_F1,	KEY_FN_F2,	KEY_COFFEE,	KEY_SLEEP,
-		KEY_WLAN,	KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
-		KEY_FN_F9,	KEY_FN_F10,	KEY_FN_F11,	KEY_SUSPEND,
-
-		/* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
-		KEY_UNKNOWN,	/* 0x0C: FN+BACKSPACE */
-		KEY_UNKNOWN,	/* 0x0D: FN+INSERT */
-		KEY_UNKNOWN,	/* 0x0E: FN+DELETE */
-
-		/* brightness: firmware always reacts to them, unless
-		 * X.org did some tricks in the radeon BIOS scratch
-		 * registers of *some* models */
-		KEY_RESERVED,	/* 0x0F: FN+HOME (brightness up) */
-		KEY_RESERVED,	/* 0x10: FN+END (brightness down) */
-
-		/* Thinklight: firmware always react to it */
-		KEY_RESERVED,	/* 0x11: FN+PGUP (thinklight toggle) */
-
-		KEY_UNKNOWN,	/* 0x12: FN+PGDOWN */
-		KEY_ZOOM,	/* 0x13: FN+SPACE (zoom) */
-
-		/* Volume: firmware always react to it and reprograms
-		 * the built-in *extra* mixer.  Never map it to control
-		 * another mixer by default. */
-		KEY_RESERVED,	/* 0x14: VOLUME UP */
-		KEY_RESERVED,	/* 0x15: VOLUME DOWN */
-		KEY_RESERVED,	/* 0x16: MUTE */
-
-		KEY_VENDOR,	/* 0x17: Thinkpad/AccessIBM/Lenovo */
-
-		/* (assignments unknown, please report if found) */
-		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
-		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
-	};
-	static u16 lenovo_keycode_map[] __initdata = {
-		/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
-		KEY_FN_F1,	KEY_COFFEE,	KEY_BATTERY,	KEY_SLEEP,
-		KEY_WLAN,	KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
-		KEY_FN_F9,	KEY_FN_F10,	KEY_FN_F11,	KEY_SUSPEND,
-
-		/* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
-		KEY_UNKNOWN,	/* 0x0C: FN+BACKSPACE */
-		KEY_UNKNOWN,	/* 0x0D: FN+INSERT */
-		KEY_UNKNOWN,	/* 0x0E: FN+DELETE */
-
-		/* These either have to go through ACPI video, or
-		 * act like in the IBM ThinkPads, so don't ever
-		 * enable them by default */
-		KEY_RESERVED,	/* 0x0F: FN+HOME (brightness up) */
-		KEY_RESERVED,	/* 0x10: FN+END (brightness down) */
-
-		KEY_RESERVED,	/* 0x11: FN+PGUP (thinklight toggle) */
-
-		KEY_UNKNOWN,	/* 0x12: FN+PGDOWN */
-		KEY_ZOOM,	/* 0x13: FN+SPACE (zoom) */
-
-		/* Volume: z60/z61, T60 (BIOS version?): firmware always
-		 * react to it and reprograms the built-in *extra* mixer.
-		 * Never map it to control another mixer by default.
-		 *
-		 * T60?, T61, R60?, R61: firmware and EC tries to send
-		 * these over the regular keyboard, so these are no-ops,
-		 * but there are still weird bugs re. MUTE, so do not
-		 * change unless you get test reports from all Lenovo
-		 * models.  May cause the BIOS to interfere with the
-		 * HDA mixer.
-		 */
-		KEY_RESERVED,	/* 0x14: VOLUME UP */
-		KEY_RESERVED,	/* 0x15: VOLUME DOWN */
-		KEY_RESERVED,	/* 0x16: MUTE */
-
-		KEY_VENDOR,	/* 0x17: Thinkpad/AccessIBM/Lenovo */
-
-		/* (assignments unknown, please report if found) */
-		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
-		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
-	};
-
-#define TPACPI_HOTKEY_MAP_LEN		ARRAY_SIZE(ibm_keycode_map)
-#define TPACPI_HOTKEY_MAP_SIZE		sizeof(ibm_keycode_map)
-#define TPACPI_HOTKEY_MAP_TYPESIZE	sizeof(ibm_keycode_map[0])
-
-	int res, i;
-	int status;
-	int hkeyv;
-
-	vdbg_printk(TPACPI_DBG_INIT, "initializing hotkey subdriver\n");
-
-	BUG_ON(!tpacpi_inputdev);
-	BUG_ON(tpacpi_inputdev->open != NULL ||
-	       tpacpi_inputdev->close != NULL);
-
-	TPACPI_ACPIHANDLE_INIT(hkey);
-	mutex_init(&hotkey_mutex);
-
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-	mutex_init(&hotkey_thread_mutex);
-	mutex_init(&hotkey_thread_data_mutex);
-#endif
-
-	/* hotkey not supported on 570 */
-	tp_features.hotkey = hkey_handle != NULL;
-
-	vdbg_printk(TPACPI_DBG_INIT, "hotkeys are %s\n",
-		str_supported(tp_features.hotkey));
-
-	if (!tp_features.hotkey)
-		return 1;
-
-	tpacpi_disable_brightness_delay();
-
-	hotkey_dev_attributes = create_attr_set(13, NULL);
-	if (!hotkey_dev_attributes)
-		return -ENOMEM;
-	res = add_many_to_attr_set(hotkey_dev_attributes,
-			hotkey_attributes,
-			ARRAY_SIZE(hotkey_attributes));
-	if (res)
-		goto err_exit;
-
-	/* mask not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
-	   A30, R30, R31, T20-22, X20-21, X22-24.  Detected by checking
-	   for HKEY interface version 0x100 */
-	if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
-		if ((hkeyv >> 8) != 1) {
-			printk(TPACPI_ERR "unknown version of the "
-			       "HKEY interface: 0x%x\n", hkeyv);
-			printk(TPACPI_ERR "please report this to %s\n",
-			       TPACPI_MAIL);
-		} else {
-			/*
-			 * MHKV 0x100 in A31, R40, R40e,
-			 * T4x, X31, and later
-			 */
-			tp_features.hotkey_mask = 1;
-		}
-	}
-
-	vdbg_printk(TPACPI_DBG_INIT, "hotkey masks are %s\n",
-		str_supported(tp_features.hotkey_mask));
-
-	if (tp_features.hotkey_mask) {
-		if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
-				"MHKA", "qd")) {
-			printk(TPACPI_ERR
-			       "missing MHKA handler, "
-			       "please report this to %s\n",
-			       TPACPI_MAIL);
-			/* FN+F12, FN+F4, FN+F3 */
-			hotkey_all_mask = 0x080cU;
-		}
-	}
-
-	/* hotkey_source_mask *must* be zero for
-	 * the first hotkey_mask_get */
-	res = hotkey_status_get(&hotkey_orig_status);
-	if (res)
-		goto err_exit;
-
-	if (tp_features.hotkey_mask) {
-		res = hotkey_mask_get();
-		if (res)
-			goto err_exit;
-
-		hotkey_orig_mask = hotkey_mask;
-		res = add_many_to_attr_set(
-				hotkey_dev_attributes,
-				hotkey_mask_attributes,
-				ARRAY_SIZE(hotkey_mask_attributes));
-		if (res)
-			goto err_exit;
-	}
-
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
-	if (tp_features.hotkey_mask) {
-		hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
-					& ~hotkey_all_mask;
-	} else {
-		hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK;
-	}
-
-	vdbg_printk(TPACPI_DBG_INIT,
-		    "hotkey source mask 0x%08x, polling freq %d\n",
-		    hotkey_source_mask, hotkey_poll_freq);
-#endif
-
-	/* Not all thinkpads have a hardware radio switch */
-	if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
-		tp_features.hotkey_wlsw = 1;
-		printk(TPACPI_INFO
-			"radio switch found; radios are %s\n",
-			enabled(status, 0));
-	}
-	if (tp_features.hotkey_wlsw)
-		res = add_to_attr_set(hotkey_dev_attributes,
-				&dev_attr_hotkey_radio_sw.attr);
-
-	/* For X41t, X60t, X61t Tablets... */
-	if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
-		tp_features.hotkey_tablet = 1;
-		printk(TPACPI_INFO
-			"possible tablet mode switch found; "
-			"ThinkPad in %s mode\n",
-			(status & TP_HOTKEY_TABLET_MASK)?
-				"tablet" : "laptop");
-		res = add_to_attr_set(hotkey_dev_attributes,
-				&dev_attr_hotkey_tablet_mode.attr);
-	}
-
-	if (!res)
-		res = register_attr_set_with_sysfs(
-				hotkey_dev_attributes,
-				&tpacpi_pdev->dev.kobj);
-	if (res)
-		goto err_exit;
-
-	/* Set up key map */
-
-	hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
-					GFP_KERNEL);
-	if (!hotkey_keycode_map) {
-		printk(TPACPI_ERR
-			"failed to allocate memory for key map\n");
-		res = -ENOMEM;
-		goto err_exit;
-	}
-
-	if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) {
-		dbg_printk(TPACPI_DBG_INIT,
-			   "using Lenovo default hot key map\n");
-		memcpy(hotkey_keycode_map, &lenovo_keycode_map,
-			TPACPI_HOTKEY_MAP_SIZE);
-	} else {
-		dbg_printk(TPACPI_DBG_INIT,
-			   "using IBM default hot key map\n");
-		memcpy(hotkey_keycode_map, &ibm_keycode_map,
-			TPACPI_HOTKEY_MAP_SIZE);
-	}
-
-	set_bit(EV_KEY, tpacpi_inputdev->evbit);
-	set_bit(EV_MSC, tpacpi_inputdev->evbit);
-	set_bit(MSC_SCAN, tpacpi_inputdev->mscbit);
-	tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
-	tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN;
-	tpacpi_inputdev->keycode = hotkey_keycode_map;
-	for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) {
-		if (hotkey_keycode_map[i] != KEY_RESERVED) {
-			set_bit(hotkey_keycode_map[i],
-				tpacpi_inputdev->keybit);
-		} else {
-			if (i < sizeof(hotkey_reserved_mask)*8)
-				hotkey_reserved_mask |= 1 << i;
-		}
-	}
-
-	if (tp_features.hotkey_wlsw) {
-		set_bit(EV_SW, tpacpi_inputdev->evbit);
-		set_bit(SW_RFKILL_ALL, tpacpi_inputdev->swbit);
-	}
-	if (tp_features.hotkey_tablet) {
-		set_bit(EV_SW, tpacpi_inputdev->evbit);
-		set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit);
-	}
-
-	/* Do not issue duplicate brightness change events to
-	 * userspace */
-	if (!tp_features.bright_acpimode)
-		/* update bright_acpimode... */
-		tpacpi_check_std_acpi_brightness_support();
-
-	if (tp_features.bright_acpimode) {
-		printk(TPACPI_INFO
-		       "This ThinkPad has standard ACPI backlight "
-		       "brightness control, supported by the ACPI "
-		       "video driver\n");
-		printk(TPACPI_NOTICE
-		       "Disabling thinkpad-acpi brightness events "
-		       "by default...\n");
-
-		/* The hotkey_reserved_mask change below is not
-		 * necessary while the keys are at KEY_RESERVED in the
-		 * default map, but better safe than sorry, leave it
-		 * here as a marker of what we have to do, especially
-		 * when we finally become able to set this at runtime
-		 * on response to X.org requests */
-		hotkey_reserved_mask |=
-			(1 << TP_ACPI_HOTKEYSCAN_FNHOME)
-			| (1 << TP_ACPI_HOTKEYSCAN_FNEND);
-	}
-
-	dbg_printk(TPACPI_DBG_INIT, "enabling hot key handling\n");
-	res = hotkey_status_set(1);
-	if (res) {
-		hotkey_exit();
-		return res;
-	}
-	res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask)
-				& ~hotkey_reserved_mask)
-				| hotkey_orig_mask);
-	if (res < 0 && res != -ENXIO) {
-		hotkey_exit();
-		return res;
-	}
-
-	dbg_printk(TPACPI_DBG_INIT,
-			"legacy hot key reporting over procfs %s\n",
-			(hotkey_report_mode < 2) ?
-				"enabled" : "disabled");
-
-	tpacpi_inputdev->open = &hotkey_inputdev_open;
-	tpacpi_inputdev->close = &hotkey_inputdev_close;
-
-	hotkey_poll_setup_safe(1);
-	tpacpi_send_radiosw_update();
-	tpacpi_input_send_tabletsw();
-
-	return 0;
-
-err_exit:
-	delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj);
-	hotkey_dev_attributes = NULL;
-
-	return (res < 0)? res : 1;
-}
-
-static void hotkey_notify(struct ibm_struct *ibm, u32 event)
-{
-	u32 hkey;
-	unsigned int scancode;
-	int send_acpi_ev;
-	int ignore_acpi_ev;
-	int unk_ev;
-
-	if (event != 0x80) {
-		printk(TPACPI_ERR
-		       "unknown HKEY notification event %d\n", event);
-		/* forward it to userspace, maybe it knows how to handle it */
-		acpi_bus_generate_netlink_event(
-					ibm->acpi->device->pnp.device_class,
-					ibm->acpi->device->dev.bus_id,
-					event, 0);
-		return;
-	}
-
-	while (1) {
-		if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
-			printk(TPACPI_ERR "failed to retrieve HKEY event\n");
-			return;
-		}
-
-		if (hkey == 0) {
-			/* queue empty */
-			return;
-		}
-
-		send_acpi_ev = 1;
-		ignore_acpi_ev = 0;
-		unk_ev = 0;
-
-		switch (hkey >> 12) {
-		case 1:
-			/* 0x1000-0x1FFF: key presses */
-			scancode = hkey & 0xfff;
-			if (scancode > 0 && scancode < 0x21) {
-				scancode--;
-				if (!(hotkey_source_mask & (1 << scancode))) {
-					tpacpi_input_send_key(scancode);
-					send_acpi_ev = 0;
-				} else {
-					ignore_acpi_ev = 1;
-				}
-			} else {
-				unk_ev = 1;
-			}
-			break;
-		case 2:
-			/* Wakeup reason */
-			switch (hkey) {
-			case 0x2304: /* suspend, undock */
-			case 0x2404: /* hibernation, undock */
-				hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
-				ignore_acpi_ev = 1;
-				break;
-			case 0x2305: /* suspend, bay eject */
-			case 0x2405: /* hibernation, bay eject */
-				hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
-				ignore_acpi_ev = 1;
-				break;
-			default:
-				unk_ev = 1;
-			}
-			if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) {
-				printk(TPACPI_INFO
-				       "woke up due to a hot-unplug "
-				       "request...\n");
-				hotkey_wakeup_reason_notify_change();
-			}
-			break;
-		case 3:
-			/* bay-related wakeups */
-			if (hkey == 0x3003) {
-				hotkey_autosleep_ack = 1;
-				printk(TPACPI_INFO
-				       "bay ejected\n");
-				hotkey_wakeup_hotunplug_complete_notify_change();
-			} else {
-				unk_ev = 1;
-			}
-			break;
-		case 4:
-			/* dock-related wakeups */
-			if (hkey == 0x4003) {
-				hotkey_autosleep_ack = 1;
-				printk(TPACPI_INFO
-				       "undocked\n");
-				hotkey_wakeup_hotunplug_complete_notify_change();
-			} else {
-				unk_ev = 1;
-			}
-			break;
-		case 5:
-			/* 0x5000-0x5FFF: human interface helpers */
-			switch (hkey) {
-			case 0x5010: /* Lenovo new BIOS: brightness changed */
-			case 0x500b: /* X61t: tablet pen inserted into bay */
-			case 0x500c: /* X61t: tablet pen removed from bay */
-				break;
-			case 0x5009: /* X41t-X61t: swivel up (tablet mode) */
-			case 0x500a: /* X41t-X61t: swivel down (normal mode) */
-				tpacpi_input_send_tabletsw();
-				hotkey_tablet_mode_notify_change();
-				send_acpi_ev = 0;
-				break;
-			case 0x5001:
-			case 0x5002:
-				/* LID switch events.  Do not propagate */
-				ignore_acpi_ev = 1;
-				break;
-			default:
-				unk_ev = 1;
-			}
-			break;
-		case 7:
-			/* 0x7000-0x7FFF: misc */
-			if (tp_features.hotkey_wlsw && hkey == 0x7000) {
-				tpacpi_send_radiosw_update();
-				send_acpi_ev = 0;
-				break;
-			}
-			/* fallthrough to default */
-		default:
-			unk_ev = 1;
-		}
-		if (unk_ev) {
-			printk(TPACPI_NOTICE
-			       "unhandled HKEY event 0x%04x\n", hkey);
-		}
-
-		/* Legacy events */
-		if (!ignore_acpi_ev &&
-		    (send_acpi_ev || hotkey_report_mode < 2)) {
-			acpi_bus_generate_proc_event(ibm->acpi->device,
-						     event, hkey);
-		}
-
-		/* netlink events */
-		if (!ignore_acpi_ev && send_acpi_ev) {
-			acpi_bus_generate_netlink_event(
-					ibm->acpi->device->pnp.device_class,
-					ibm->acpi->device->dev.bus_id,
-					event, hkey);
-		}
-	}
-}
-
-static void hotkey_suspend(pm_message_t state)
-{
-	/* Do these on suspend, we get the events on early resume! */
-	hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
-	hotkey_autosleep_ack = 0;
-}
-
-static void hotkey_resume(void)
-{
-	tpacpi_disable_brightness_delay();
-
-	if (hotkey_mask_get())
-		printk(TPACPI_ERR
-		       "error while trying to read hot key mask "
-		       "from firmware\n");
-	tpacpi_send_radiosw_update();
-	hotkey_tablet_mode_notify_change();
-	hotkey_wakeup_reason_notify_change();
-	hotkey_wakeup_hotunplug_complete_notify_change();
-	hotkey_poll_setup_safe(0);
-}
-
-/* procfs -------------------------------------------------------------- */
-static int hotkey_read(char *p)
-{
-	int res, status;
-	int len = 0;
-
-	if (!tp_features.hotkey) {
-		len += sprintf(p + len, "status:\t\tnot supported\n");
-		return len;
-	}
-
-	if (mutex_lock_interruptible(&hotkey_mutex))
-		return -ERESTARTSYS;
-	res = hotkey_status_get(&status);
-	if (!res)
-		res = hotkey_mask_get();
-	mutex_unlock(&hotkey_mutex);
-	if (res)
-		return res;
-
-	len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
-	if (tp_features.hotkey_mask) {
-		len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_mask);
-		len += sprintf(p + len,
-			       "commands:\tenable, disable, reset, <mask>\n");
-	} else {
-		len += sprintf(p + len, "mask:\t\tnot supported\n");
-		len += sprintf(p + len, "commands:\tenable, disable, reset\n");
-	}
-
-	return len;
-}
-
-static int hotkey_write(char *buf)
-{
-	int res, status;
-	u32 mask;
-	char *cmd;
-
-	if (!tp_features.hotkey)
-		return -ENODEV;
-
-	if (mutex_lock_interruptible(&hotkey_mutex))
-		return -ERESTARTSYS;
-
-	status = -1;
-	mask = hotkey_mask;
-
-	res = 0;
-	while ((cmd = next_cmd(&buf))) {
-		if (strlencmp(cmd, "enable") == 0) {
-			status = 1;
-		} else if (strlencmp(cmd, "disable") == 0) {
-			status = 0;
-		} else if (strlencmp(cmd, "reset") == 0) {
-			status = hotkey_orig_status;
-			mask = hotkey_orig_mask;
-		} else if (sscanf(cmd, "0x%x", &mask) == 1) {
-			/* mask set */
-		} else if (sscanf(cmd, "%x", &mask) == 1) {
-			/* mask set */
-		} else {
-			res = -EINVAL;
-			goto errexit;
-		}
-	}
-	if (status != -1)
-		res = hotkey_status_set(status);
-
-	if (!res && mask != hotkey_mask)
-		res = hotkey_mask_set(mask);
-
-errexit:
-	mutex_unlock(&hotkey_mutex);
-	return res;
-}
-
-static const struct acpi_device_id ibm_htk_device_ids[] = {
-	{TPACPI_ACPI_HKEY_HID, 0},
-	{"", 0},
-};
-
-static struct tp_acpi_drv_struct ibm_hotkey_acpidriver = {
-	.hid = ibm_htk_device_ids,
-	.notify = hotkey_notify,
-	.handle = &hkey_handle,
-	.type = ACPI_DEVICE_NOTIFY,
-};
-
-static struct ibm_struct hotkey_driver_data = {
-	.name = "hotkey",
-	.read = hotkey_read,
-	.write = hotkey_write,
-	.exit = hotkey_exit,
-	.resume = hotkey_resume,
-	.suspend = hotkey_suspend,
-	.acpi = &ibm_hotkey_acpidriver,
-};
-
-/*************************************************************************
- * Bluetooth subdriver
- */
-
-enum {
-	/* ACPI GBDC/SBDC bits */
-	TP_ACPI_BLUETOOTH_HWPRESENT	= 0x01,	/* Bluetooth hw available */
-	TP_ACPI_BLUETOOTH_RADIOSSW	= 0x02,	/* Bluetooth radio enabled */
-	TP_ACPI_BLUETOOTH_UNK		= 0x04,	/* unknown function */
-};
-
-static struct rfkill *tpacpi_bluetooth_rfkill;
-
-static int bluetooth_get_radiosw(void)
-{
-	int status;
-
-	if (!tp_features.bluetooth)
-		return -ENODEV;
-
-	/* WLSW overrides bluetooth in firmware/hardware, reflect that */
-	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
-		return RFKILL_STATE_HARD_BLOCKED;
-
-	if (!acpi_evalf(hkey_handle, &status, "GBDC", "d"))
-		return -EIO;
-
-	return ((status & TP_ACPI_BLUETOOTH_RADIOSSW) != 0) ?
-		RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
-}
-
-static void bluetooth_update_rfk(void)
-{
-	int status;
-
-	if (!tpacpi_bluetooth_rfkill)
-		return;
-
-	status = bluetooth_get_radiosw();
-	if (status < 0)
-		return;
-	rfkill_force_state(tpacpi_bluetooth_rfkill, status);
-}
-
-static int bluetooth_set_radiosw(int radio_on, int update_rfk)
-{
-	int status;
-
-	if (!tp_features.bluetooth)
-		return -ENODEV;
-
-	/* WLSW overrides bluetooth in firmware/hardware, but there is no
-	 * reason to risk weird behaviour. */
-	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status
-	    && radio_on)
-		return -EPERM;
-
-	if (!acpi_evalf(hkey_handle, &status, "GBDC", "d"))
-		return -EIO;
-	if (radio_on)
-		status |= TP_ACPI_BLUETOOTH_RADIOSSW;
-	else
-		status &= ~TP_ACPI_BLUETOOTH_RADIOSSW;
-	if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
-		return -EIO;
-
-	if (update_rfk)
-		bluetooth_update_rfk();
-
-	return 0;
-}
-
-/* sysfs bluetooth enable ---------------------------------------------- */
-static ssize_t bluetooth_enable_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	int status;
-
-	status = bluetooth_get_radiosw();
-	if (status < 0)
-		return status;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			(status == RFKILL_STATE_UNBLOCKED) ? 1 : 0);
-}
-
-static ssize_t bluetooth_enable_store(struct device *dev,
-			    struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	unsigned long t;
-	int res;
-
-	if (parse_strtoul(buf, 1, &t))
-		return -EINVAL;
-
-	res = bluetooth_set_radiosw(t, 1);
-
-	return (res) ? res : count;
-}
-
-static struct device_attribute dev_attr_bluetooth_enable =
-	__ATTR(bluetooth_enable, S_IWUSR | S_IRUGO,
-		bluetooth_enable_show, bluetooth_enable_store);
-
-/* --------------------------------------------------------------------- */
-
-static struct attribute *bluetooth_attributes[] = {
-	&dev_attr_bluetooth_enable.attr,
-	NULL
-};
-
-static const struct attribute_group bluetooth_attr_group = {
-	.attrs = bluetooth_attributes,
-};
-
-static int tpacpi_bluetooth_rfk_get(void *data, enum rfkill_state *state)
-{
-	int bts = bluetooth_get_radiosw();
-
-	if (bts < 0)
-		return bts;
-
-	*state = bts;
-	return 0;
-}
-
-static int tpacpi_bluetooth_rfk_set(void *data, enum rfkill_state state)
-{
-	return bluetooth_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
-}
-
-static void bluetooth_exit(void)
-{
-	if (tpacpi_bluetooth_rfkill)
-		rfkill_unregister(tpacpi_bluetooth_rfkill);
-
-	sysfs_remove_group(&tpacpi_pdev->dev.kobj,
-			&bluetooth_attr_group);
-}
-
-static int __init bluetooth_init(struct ibm_init_struct *iibm)
-{
-	int res;
-	int status = 0;
-
-	vdbg_printk(TPACPI_DBG_INIT, "initializing bluetooth subdriver\n");
-
-	TPACPI_ACPIHANDLE_INIT(hkey);
-
-	/* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
-	   G4x, R30, R31, R40e, R50e, T20-22, X20-21 */
-	tp_features.bluetooth = hkey_handle &&
-	    acpi_evalf(hkey_handle, &status, "GBDC", "qd");
-
-	vdbg_printk(TPACPI_DBG_INIT, "bluetooth is %s, status 0x%02x\n",
-		str_supported(tp_features.bluetooth),
-		status);
-
-	if (tp_features.bluetooth &&
-	    !(status & TP_ACPI_BLUETOOTH_HWPRESENT)) {
-		/* no bluetooth hardware present in system */
-		tp_features.bluetooth = 0;
-		dbg_printk(TPACPI_DBG_INIT,
-			   "bluetooth hardware not installed\n");
-	}
-
-	if (!tp_features.bluetooth)
-		return 1;
-
-	res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
-				&bluetooth_attr_group);
-	if (res)
-		return res;
-
-	res = tpacpi_new_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID,
-				&tpacpi_bluetooth_rfkill,
-				RFKILL_TYPE_BLUETOOTH,
-				"tpacpi_bluetooth_sw",
-				tpacpi_bluetooth_rfk_set,
-				tpacpi_bluetooth_rfk_get);
-	if (res) {
-		bluetooth_exit();
-		return res;
-	}
-
-	return 0;
-}
-
-/* procfs -------------------------------------------------------------- */
-static int bluetooth_read(char *p)
-{
-	int len = 0;
-	int status = bluetooth_get_radiosw();
-
-	if (!tp_features.bluetooth)
-		len += sprintf(p + len, "status:\t\tnot supported\n");
-	else {
-		len += sprintf(p + len, "status:\t\t%s\n",
-				(status == RFKILL_STATE_UNBLOCKED) ?
-					"enabled" : "disabled");
-		len += sprintf(p + len, "commands:\tenable, disable\n");
-	}
-
-	return len;
-}
-
-static int bluetooth_write(char *buf)
-{
-	char *cmd;
-
-	if (!tp_features.bluetooth)
-		return -ENODEV;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (strlencmp(cmd, "enable") == 0) {
-			bluetooth_set_radiosw(1, 1);
-		} else if (strlencmp(cmd, "disable") == 0) {
-			bluetooth_set_radiosw(0, 1);
-		} else
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-static struct ibm_struct bluetooth_driver_data = {
-	.name = "bluetooth",
-	.read = bluetooth_read,
-	.write = bluetooth_write,
-	.exit = bluetooth_exit,
-};
-
-/*************************************************************************
- * Wan subdriver
- */
-
-enum {
-	/* ACPI GWAN/SWAN bits */
-	TP_ACPI_WANCARD_HWPRESENT	= 0x01,	/* Wan hw available */
-	TP_ACPI_WANCARD_RADIOSSW	= 0x02,	/* Wan radio enabled */
-	TP_ACPI_WANCARD_UNK		= 0x04,	/* unknown function */
-};
-
-static struct rfkill *tpacpi_wan_rfkill;
-
-static int wan_get_radiosw(void)
-{
-	int status;
-
-	if (!tp_features.wan)
-		return -ENODEV;
-
-	/* WLSW overrides WWAN in firmware/hardware, reflect that */
-	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
-		return RFKILL_STATE_HARD_BLOCKED;
-
-	if (!acpi_evalf(hkey_handle, &status, "GWAN", "d"))
-		return -EIO;
-
-	return ((status & TP_ACPI_WANCARD_RADIOSSW) != 0) ?
-		RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
-}
-
-static void wan_update_rfk(void)
-{
-	int status;
-
-	if (!tpacpi_wan_rfkill)
-		return;
-
-	status = wan_get_radiosw();
-	if (status < 0)
-		return;
-	rfkill_force_state(tpacpi_wan_rfkill, status);
-}
-
-static int wan_set_radiosw(int radio_on, int update_rfk)
-{
-	int status;
-
-	if (!tp_features.wan)
-		return -ENODEV;
-
-	/* WLSW overrides bluetooth in firmware/hardware, but there is no
-	 * reason to risk weird behaviour. */
-	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status
-	    && radio_on)
-		return -EPERM;
-
-	if (!acpi_evalf(hkey_handle, &status, "GWAN", "d"))
-		return -EIO;
-	if (radio_on)
-		status |= TP_ACPI_WANCARD_RADIOSSW;
-	else
-		status &= ~TP_ACPI_WANCARD_RADIOSSW;
-	if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
-		return -EIO;
-
-	if (update_rfk)
-		wan_update_rfk();
-
-	return 0;
-}
-
-/* sysfs wan enable ---------------------------------------------------- */
-static ssize_t wan_enable_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	int status;
-
-	status = wan_get_radiosw();
-	if (status < 0)
-		return status;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			(status == RFKILL_STATE_UNBLOCKED) ? 1 : 0);
-}
-
-static ssize_t wan_enable_store(struct device *dev,
-			    struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	unsigned long t;
-	int res;
-
-	if (parse_strtoul(buf, 1, &t))
-		return -EINVAL;
-
-	res = wan_set_radiosw(t, 1);
-
-	return (res) ? res : count;
-}
-
-static struct device_attribute dev_attr_wan_enable =
-	__ATTR(wwan_enable, S_IWUSR | S_IRUGO,
-		wan_enable_show, wan_enable_store);
-
-/* --------------------------------------------------------------------- */
-
-static struct attribute *wan_attributes[] = {
-	&dev_attr_wan_enable.attr,
-	NULL
-};
-
-static const struct attribute_group wan_attr_group = {
-	.attrs = wan_attributes,
-};
-
-static int tpacpi_wan_rfk_get(void *data, enum rfkill_state *state)
-{
-	int wans = wan_get_radiosw();
-
-	if (wans < 0)
-		return wans;
-
-	*state = wans;
-	return 0;
-}
-
-static int tpacpi_wan_rfk_set(void *data, enum rfkill_state state)
-{
-	return wan_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
-}
-
-static void wan_exit(void)
-{
-	if (tpacpi_wan_rfkill)
-		rfkill_unregister(tpacpi_wan_rfkill);
-
-	sysfs_remove_group(&tpacpi_pdev->dev.kobj,
-		&wan_attr_group);
-}
-
-static int __init wan_init(struct ibm_init_struct *iibm)
-{
-	int res;
-	int status = 0;
-
-	vdbg_printk(TPACPI_DBG_INIT, "initializing wan subdriver\n");
-
-	TPACPI_ACPIHANDLE_INIT(hkey);
-
-	tp_features.wan = hkey_handle &&
-	    acpi_evalf(hkey_handle, &status, "GWAN", "qd");
-
-	vdbg_printk(TPACPI_DBG_INIT, "wan is %s, status 0x%02x\n",
-		str_supported(tp_features.wan),
-		status);
-
-	if (tp_features.wan &&
-	    !(status & TP_ACPI_WANCARD_HWPRESENT)) {
-		/* no wan hardware present in system */
-		tp_features.wan = 0;
-		dbg_printk(TPACPI_DBG_INIT,
-			   "wan hardware not installed\n");
-	}
-
-	if (!tp_features.wan)
-		return 1;
-
-	res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
-				&wan_attr_group);
-	if (res)
-		return res;
-
-	res = tpacpi_new_rfkill(TPACPI_RFK_WWAN_SW_ID,
-				&tpacpi_wan_rfkill,
-				RFKILL_TYPE_WWAN,
-				"tpacpi_wwan_sw",
-				tpacpi_wan_rfk_set,
-				tpacpi_wan_rfk_get);
-	if (res) {
-		wan_exit();
-		return res;
-	}
-
-	return 0;
-}
-
-/* procfs -------------------------------------------------------------- */
-static int wan_read(char *p)
-{
-	int len = 0;
-	int status = wan_get_radiosw();
-
-	if (!tp_features.wan)
-		len += sprintf(p + len, "status:\t\tnot supported\n");
-	else {
-		len += sprintf(p + len, "status:\t\t%s\n",
-				(status == RFKILL_STATE_UNBLOCKED) ?
-					"enabled" : "disabled");
-		len += sprintf(p + len, "commands:\tenable, disable\n");
-	}
-
-	return len;
-}
-
-static int wan_write(char *buf)
-{
-	char *cmd;
-
-	if (!tp_features.wan)
-		return -ENODEV;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (strlencmp(cmd, "enable") == 0) {
-			wan_set_radiosw(1, 1);
-		} else if (strlencmp(cmd, "disable") == 0) {
-			wan_set_radiosw(0, 1);
-		} else
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-static struct ibm_struct wan_driver_data = {
-	.name = "wan",
-	.read = wan_read,
-	.write = wan_write,
-	.exit = wan_exit,
-};
-
-/*************************************************************************
- * Video subdriver
- */
-
-#ifdef CONFIG_THINKPAD_ACPI_VIDEO
-
-enum video_access_mode {
-	TPACPI_VIDEO_NONE = 0,
-	TPACPI_VIDEO_570,	/* 570 */
-	TPACPI_VIDEO_770,	/* 600e/x, 770e, 770x */
-	TPACPI_VIDEO_NEW,	/* all others */
-};
-
-enum {	/* video status flags, based on VIDEO_570 */
-	TP_ACPI_VIDEO_S_LCD = 0x01,	/* LCD output enabled */
-	TP_ACPI_VIDEO_S_CRT = 0x02,	/* CRT output enabled */
-	TP_ACPI_VIDEO_S_DVI = 0x08,	/* DVI output enabled */
-};
-
-enum {  /* TPACPI_VIDEO_570 constants */
-	TP_ACPI_VIDEO_570_PHSCMD = 0x87,	/* unknown magic constant :( */
-	TP_ACPI_VIDEO_570_PHSMASK = 0x03,	/* PHS bits that map to
-						 * video_status_flags */
-	TP_ACPI_VIDEO_570_PHS2CMD = 0x8b,	/* unknown magic constant :( */
-	TP_ACPI_VIDEO_570_PHS2SET = 0x80,	/* unknown magic constant :( */
-};
-
-static enum video_access_mode video_supported;
-static int video_orig_autosw;
-
-static int video_autosw_get(void);
-static int video_autosw_set(int enable);
-
-TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID");	/* G41 */
-
-static int __init video_init(struct ibm_init_struct *iibm)
-{
-	int ivga;
-
-	vdbg_printk(TPACPI_DBG_INIT, "initializing video subdriver\n");
-
-	TPACPI_ACPIHANDLE_INIT(vid);
-	TPACPI_ACPIHANDLE_INIT(vid2);
-
-	if (vid2_handle && acpi_evalf(NULL, &ivga, "\\IVGA", "d") && ivga)
-		/* G41, assume IVGA doesn't change */
-		vid_handle = vid2_handle;
-
-	if (!vid_handle)
-		/* video switching not supported on R30, R31 */
-		video_supported = TPACPI_VIDEO_NONE;
-	else if (acpi_evalf(vid_handle, &video_orig_autosw, "SWIT", "qd"))
-		/* 570 */
-		video_supported = TPACPI_VIDEO_570;
-	else if (acpi_evalf(vid_handle, &video_orig_autosw, "^VADL", "qd"))
-		/* 600e/x, 770e, 770x */
-		video_supported = TPACPI_VIDEO_770;
-	else
-		/* all others */
-		video_supported = TPACPI_VIDEO_NEW;
-
-	vdbg_printk(TPACPI_DBG_INIT, "video is %s, mode %d\n",
-		str_supported(video_supported != TPACPI_VIDEO_NONE),
-		video_supported);
-
-	return (video_supported != TPACPI_VIDEO_NONE)? 0 : 1;
-}
-
-static void video_exit(void)
-{
-	dbg_printk(TPACPI_DBG_EXIT,
-		   "restoring original video autoswitch mode\n");
-	if (video_autosw_set(video_orig_autosw))
-		printk(TPACPI_ERR "error while trying to restore original "
-			"video autoswitch mode\n");
-}
-
-static int video_outputsw_get(void)
-{
-	int status = 0;
-	int i;
-
-	switch (video_supported) {
-	case TPACPI_VIDEO_570:
-		if (!acpi_evalf(NULL, &i, "\\_SB.PHS", "dd",
-				 TP_ACPI_VIDEO_570_PHSCMD))
-			return -EIO;
-		status = i & TP_ACPI_VIDEO_570_PHSMASK;
-		break;
-	case TPACPI_VIDEO_770:
-		if (!acpi_evalf(NULL, &i, "\\VCDL", "d"))
-			return -EIO;
-		if (i)
-			status |= TP_ACPI_VIDEO_S_LCD;
-		if (!acpi_evalf(NULL, &i, "\\VCDC", "d"))
-			return -EIO;
-		if (i)
-			status |= TP_ACPI_VIDEO_S_CRT;
-		break;
-	case TPACPI_VIDEO_NEW:
-		if (!acpi_evalf(NULL, NULL, "\\VUPS", "vd", 1) ||
-		    !acpi_evalf(NULL, &i, "\\VCDC", "d"))
-			return -EIO;
-		if (i)
-			status |= TP_ACPI_VIDEO_S_CRT;
-
-		if (!acpi_evalf(NULL, NULL, "\\VUPS", "vd", 0) ||
-		    !acpi_evalf(NULL, &i, "\\VCDL", "d"))
-			return -EIO;
-		if (i)
-			status |= TP_ACPI_VIDEO_S_LCD;
-		if (!acpi_evalf(NULL, &i, "\\VCDD", "d"))
-			return -EIO;
-		if (i)
-			status |= TP_ACPI_VIDEO_S_DVI;
-		break;
-	default:
-		return -ENOSYS;
-	}
-
-	return status;
-}
-
-static int video_outputsw_set(int status)
-{
-	int autosw;
-	int res = 0;
-
-	switch (video_supported) {
-	case TPACPI_VIDEO_570:
-		res = acpi_evalf(NULL, NULL,
-				 "\\_SB.PHS2", "vdd",
-				 TP_ACPI_VIDEO_570_PHS2CMD,
-				 status | TP_ACPI_VIDEO_570_PHS2SET);
-		break;
-	case TPACPI_VIDEO_770:
-		autosw = video_autosw_get();
-		if (autosw < 0)
-			return autosw;
-
-		res = video_autosw_set(1);
-		if (res)
-			return res;
-		res = acpi_evalf(vid_handle, NULL,
-				 "ASWT", "vdd", status * 0x100, 0);
-		if (!autosw && video_autosw_set(autosw)) {
-			printk(TPACPI_ERR
-			       "video auto-switch left enabled due to error\n");
-			return -EIO;
-		}
-		break;
-	case TPACPI_VIDEO_NEW:
-		res = acpi_evalf(NULL, NULL, "\\VUPS", "vd", 0x80) &&
-		      acpi_evalf(NULL, NULL, "\\VSDS", "vdd", status, 1);
-		break;
-	default:
-		return -ENOSYS;
-	}
-
-	return (res)? 0 : -EIO;
-}
-
-static int video_autosw_get(void)
-{
-	int autosw = 0;
-
-	switch (video_supported) {
-	case TPACPI_VIDEO_570:
-		if (!acpi_evalf(vid_handle, &autosw, "SWIT", "d"))
-			return -EIO;
-		break;
-	case TPACPI_VIDEO_770:
-	case TPACPI_VIDEO_NEW:
-		if (!acpi_evalf(vid_handle, &autosw, "^VDEE", "d"))
-			return -EIO;
-		break;
-	default:
-		return -ENOSYS;
-	}
-
-	return autosw & 1;
-}
-
-static int video_autosw_set(int enable)
-{
-	if (!acpi_evalf(vid_handle, NULL, "_DOS", "vd", (enable)? 1 : 0))
-		return -EIO;
-	return 0;
-}
-
-static int video_outputsw_cycle(void)
-{
-	int autosw = video_autosw_get();
-	int res;
-
-	if (autosw < 0)
-		return autosw;
-
-	switch (video_supported) {
-	case TPACPI_VIDEO_570:
-		res = video_autosw_set(1);
-		if (res)
-			return res;
-		res = acpi_evalf(ec_handle, NULL, "_Q16", "v");
-		break;
-	case TPACPI_VIDEO_770:
-	case TPACPI_VIDEO_NEW:
-		res = video_autosw_set(1);
-		if (res)
-			return res;
-		res = acpi_evalf(vid_handle, NULL, "VSWT", "v");
-		break;
-	default:
-		return -ENOSYS;
-	}
-	if (!autosw && video_autosw_set(autosw)) {
-		printk(TPACPI_ERR
-		       "video auto-switch left enabled due to error\n");
-		return -EIO;
-	}
-
-	return (res)? 0 : -EIO;
-}
-
-static int video_expand_toggle(void)
-{
-	switch (video_supported) {
-	case TPACPI_VIDEO_570:
-		return acpi_evalf(ec_handle, NULL, "_Q17", "v")?
-			0 : -EIO;
-	case TPACPI_VIDEO_770:
-		return acpi_evalf(vid_handle, NULL, "VEXP", "v")?
-			0 : -EIO;
-	case TPACPI_VIDEO_NEW:
-		return acpi_evalf(NULL, NULL, "\\VEXP", "v")?
-			0 : -EIO;
-	default:
-		return -ENOSYS;
-	}
-	/* not reached */
-}
-
-static int video_read(char *p)
-{
-	int status, autosw;
-	int len = 0;
-
-	if (video_supported == TPACPI_VIDEO_NONE) {
-		len += sprintf(p + len, "status:\t\tnot supported\n");
-		return len;
-	}
-
-	status = video_outputsw_get();
-	if (status < 0)
-		return status;
-
-	autosw = video_autosw_get();
-	if (autosw < 0)
-		return autosw;
-
-	len += sprintf(p + len, "status:\t\tsupported\n");
-	len += sprintf(p + len, "lcd:\t\t%s\n", enabled(status, 0));
-	len += sprintf(p + len, "crt:\t\t%s\n", enabled(status, 1));
-	if (video_supported == TPACPI_VIDEO_NEW)
-		len += sprintf(p + len, "dvi:\t\t%s\n", enabled(status, 3));
-	len += sprintf(p + len, "auto:\t\t%s\n", enabled(autosw, 0));
-	len += sprintf(p + len, "commands:\tlcd_enable, lcd_disable\n");
-	len += sprintf(p + len, "commands:\tcrt_enable, crt_disable\n");
-	if (video_supported == TPACPI_VIDEO_NEW)
-		len += sprintf(p + len, "commands:\tdvi_enable, dvi_disable\n");
-	len += sprintf(p + len, "commands:\tauto_enable, auto_disable\n");
-	len += sprintf(p + len, "commands:\tvideo_switch, expand_toggle\n");
-
-	return len;
-}
-
-static int video_write(char *buf)
-{
-	char *cmd;
-	int enable, disable, status;
-	int res;
-
-	if (video_supported == TPACPI_VIDEO_NONE)
-		return -ENODEV;
-
-	enable = 0;
-	disable = 0;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (strlencmp(cmd, "lcd_enable") == 0) {
-			enable |= TP_ACPI_VIDEO_S_LCD;
-		} else if (strlencmp(cmd, "lcd_disable") == 0) {
-			disable |= TP_ACPI_VIDEO_S_LCD;
-		} else if (strlencmp(cmd, "crt_enable") == 0) {
-			enable |= TP_ACPI_VIDEO_S_CRT;
-		} else if (strlencmp(cmd, "crt_disable") == 0) {
-			disable |= TP_ACPI_VIDEO_S_CRT;
-		} else if (video_supported == TPACPI_VIDEO_NEW &&
-			   strlencmp(cmd, "dvi_enable") == 0) {
-			enable |= TP_ACPI_VIDEO_S_DVI;
-		} else if (video_supported == TPACPI_VIDEO_NEW &&
-			   strlencmp(cmd, "dvi_disable") == 0) {
-			disable |= TP_ACPI_VIDEO_S_DVI;
-		} else if (strlencmp(cmd, "auto_enable") == 0) {
-			res = video_autosw_set(1);
-			if (res)
-				return res;
-		} else if (strlencmp(cmd, "auto_disable") == 0) {
-			res = video_autosw_set(0);
-			if (res)
-				return res;
-		} else if (strlencmp(cmd, "video_switch") == 0) {
-			res = video_outputsw_cycle();
-			if (res)
-				return res;
-		} else if (strlencmp(cmd, "expand_toggle") == 0) {
-			res = video_expand_toggle();
-			if (res)
-				return res;
-		} else
-			return -EINVAL;
-	}
-
-	if (enable || disable) {
-		status = video_outputsw_get();
-		if (status < 0)
-			return status;
-		res = video_outputsw_set((status & ~disable) | enable);
-		if (res)
-			return res;
-	}
-
-	return 0;
-}
-
-static struct ibm_struct video_driver_data = {
-	.name = "video",
-	.read = video_read,
-	.write = video_write,
-	.exit = video_exit,
-};
-
-#endif /* CONFIG_THINKPAD_ACPI_VIDEO */
-
-/*************************************************************************
- * Light (thinklight) subdriver
- */
-
-TPACPI_HANDLE(lght, root, "\\LGHT");	/* A21e, A2xm/p, T20-22, X20-21 */
-TPACPI_HANDLE(ledb, ec, "LEDB");		/* G4x */
-
-static int light_get_status(void)
-{
-	int status = 0;
-
-	if (tp_features.light_status) {
-		if (!acpi_evalf(ec_handle, &status, "KBLT", "d"))
-			return -EIO;
-		return (!!status);
-	}
-
-	return -ENXIO;
-}
-
-static int light_set_status(int status)
-{
-	int rc;
-
-	if (tp_features.light) {
-		if (cmos_handle) {
-			rc = acpi_evalf(cmos_handle, NULL, NULL, "vd",
-					(status)?
-						TP_CMOS_THINKLIGHT_ON :
-						TP_CMOS_THINKLIGHT_OFF);
-		} else {
-			rc = acpi_evalf(lght_handle, NULL, NULL, "vd",
-					(status)? 1 : 0);
-		}
-		return (rc)? 0 : -EIO;
-	}
-
-	return -ENXIO;
-}
-
-static void light_set_status_worker(struct work_struct *work)
-{
-	struct tpacpi_led_classdev *data =
-			container_of(work, struct tpacpi_led_classdev, work);
-
-	if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
-		light_set_status((data->new_brightness != LED_OFF));
-}
-
-static void light_sysfs_set(struct led_classdev *led_cdev,
-			enum led_brightness brightness)
-{
-	struct tpacpi_led_classdev *data =
-		container_of(led_cdev,
-			     struct tpacpi_led_classdev,
-			     led_classdev);
-	data->new_brightness = brightness;
-	queue_work(tpacpi_wq, &data->work);
-}
-
-static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev)
-{
-	return (light_get_status() == 1)? LED_FULL : LED_OFF;
-}
-
-static struct tpacpi_led_classdev tpacpi_led_thinklight = {
-	.led_classdev = {
-		.name		= "tpacpi::thinklight",
-		.brightness_set	= &light_sysfs_set,
-		.brightness_get	= &light_sysfs_get,
-	}
-};
-
-static int __init light_init(struct ibm_init_struct *iibm)
-{
-	int rc;
-
-	vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n");
-
-	TPACPI_ACPIHANDLE_INIT(ledb);
-	TPACPI_ACPIHANDLE_INIT(lght);
-	TPACPI_ACPIHANDLE_INIT(cmos);
-	INIT_WORK(&tpacpi_led_thinklight.work, light_set_status_worker);
-
-	/* light not supported on 570, 600e/x, 770e, 770x, G4x, R30, R31 */
-	tp_features.light = (cmos_handle || lght_handle) && !ledb_handle;
-
-	if (tp_features.light)
-		/* light status not supported on
-		   570, 600e/x, 770e, 770x, G4x, R30, R31, R32, X20 */
-		tp_features.light_status =
-			acpi_evalf(ec_handle, NULL, "KBLT", "qv");
-
-	vdbg_printk(TPACPI_DBG_INIT, "light is %s, light status is %s\n",
-		str_supported(tp_features.light),
-		str_supported(tp_features.light_status));
-
-	if (!tp_features.light)
-		return 1;
-
-	rc = led_classdev_register(&tpacpi_pdev->dev,
-				   &tpacpi_led_thinklight.led_classdev);
-
-	if (rc < 0) {
-		tp_features.light = 0;
-		tp_features.light_status = 0;
-	} else  {
-		rc = 0;
-	}
-
-	return rc;
-}
-
-static void light_exit(void)
-{
-	led_classdev_unregister(&tpacpi_led_thinklight.led_classdev);
-	if (work_pending(&tpacpi_led_thinklight.work))
-		flush_workqueue(tpacpi_wq);
-}
-
-static int light_read(char *p)
-{
-	int len = 0;
-	int status;
-
-	if (!tp_features.light) {
-		len += sprintf(p + len, "status:\t\tnot supported\n");
-	} else if (!tp_features.light_status) {
-		len += sprintf(p + len, "status:\t\tunknown\n");
-		len += sprintf(p + len, "commands:\ton, off\n");
-	} else {
-		status = light_get_status();
-		if (status < 0)
-			return status;
-		len += sprintf(p + len, "status:\t\t%s\n", onoff(status, 0));
-		len += sprintf(p + len, "commands:\ton, off\n");
-	}
-
-	return len;
-}
-
-static int light_write(char *buf)
-{
-	char *cmd;
-	int newstatus = 0;
-
-	if (!tp_features.light)
-		return -ENODEV;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (strlencmp(cmd, "on") == 0) {
-			newstatus = 1;
-		} else if (strlencmp(cmd, "off") == 0) {
-			newstatus = 0;
-		} else
-			return -EINVAL;
-	}
-
-	return light_set_status(newstatus);
-}
-
-static struct ibm_struct light_driver_data = {
-	.name = "light",
-	.read = light_read,
-	.write = light_write,
-	.exit = light_exit,
-};
-
-/*************************************************************************
- * Dock subdriver
- */
-
-#ifdef CONFIG_THINKPAD_ACPI_DOCK
-
-static void dock_notify(struct ibm_struct *ibm, u32 event);
-static int dock_read(char *p);
-static int dock_write(char *buf);
-
-TPACPI_HANDLE(dock, root, "\\_SB.GDCK",	/* X30, X31, X40 */
-	   "\\_SB.PCI0.DOCK",	/* 600e/x,770e,770x,A2xm/p,T20-22,X20-21 */
-	   "\\_SB.PCI0.PCI1.DOCK",	/* all others */
-	   "\\_SB.PCI.ISA.SLCE",	/* 570 */
-    );				/* A21e,G4x,R30,R31,R32,R40,R40e,R50e */
-
-/* don't list other alternatives as we install a notify handler on the 570 */
-TPACPI_HANDLE(pci, root, "\\_SB.PCI");	/* 570 */
-
-static const struct acpi_device_id ibm_pci_device_ids[] = {
-	{PCI_ROOT_HID_STRING, 0},
-	{"", 0},
-};
-
-static struct tp_acpi_drv_struct ibm_dock_acpidriver[2] = {
-	{
-	 .notify = dock_notify,
-	 .handle = &dock_handle,
-	 .type = ACPI_SYSTEM_NOTIFY,
-	},
-	{
-	/* THIS ONE MUST NEVER BE USED FOR DRIVER AUTOLOADING.
-	 * We just use it to get notifications of dock hotplug
-	 * in very old thinkpads */
-	 .hid = ibm_pci_device_ids,
-	 .notify = dock_notify,
-	 .handle = &pci_handle,
-	 .type = ACPI_SYSTEM_NOTIFY,
-	},
-};
-
-static struct ibm_struct dock_driver_data[2] = {
-	{
-	 .name = "dock",
-	 .read = dock_read,
-	 .write = dock_write,
-	 .acpi = &ibm_dock_acpidriver[0],
-	},
-	{
-	 .name = "dock",
-	 .acpi = &ibm_dock_acpidriver[1],
-	},
-};
-
-#define dock_docked() (_sta(dock_handle) & 1)
-
-static int __init dock_init(struct ibm_init_struct *iibm)
-{
-	vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver\n");
-
-	TPACPI_ACPIHANDLE_INIT(dock);
-
-	vdbg_printk(TPACPI_DBG_INIT, "dock is %s\n",
-		str_supported(dock_handle != NULL));
-
-	return (dock_handle)? 0 : 1;
-}
-
-static int __init dock_init2(struct ibm_init_struct *iibm)
-{
-	int dock2_needed;
-
-	vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver part 2\n");
-
-	if (dock_driver_data[0].flags.acpi_driver_registered &&
-	    dock_driver_data[0].flags.acpi_notify_installed) {
-		TPACPI_ACPIHANDLE_INIT(pci);
-		dock2_needed = (pci_handle != NULL);
-		vdbg_printk(TPACPI_DBG_INIT,
-			    "dock PCI handler for the TP 570 is %s\n",
-			    str_supported(dock2_needed));
-	} else {
-		vdbg_printk(TPACPI_DBG_INIT,
-		"dock subdriver part 2 not required\n");
-		dock2_needed = 0;
-	}
-
-	return (dock2_needed)? 0 : 1;
-}
-
-static void dock_notify(struct ibm_struct *ibm, u32 event)
-{
-	int docked = dock_docked();
-	int pci = ibm->acpi->hid && ibm->acpi->device &&
-		acpi_match_device_ids(ibm->acpi->device, ibm_pci_device_ids);
-	int data;
-
-	if (event == 1 && !pci)	/* 570 */
-		data = 1;	/* button */
-	else if (event == 1 && pci)	/* 570 */
-		data = 3;	/* dock */
-	else if (event == 3 && docked)
-		data = 1;	/* button */
-	else if (event == 3 && !docked)
-		data = 2;	/* undock */
-	else if (event == 0 && docked)
-		data = 3;	/* dock */
-	else {
-		printk(TPACPI_ERR "unknown dock event %d, status %d\n",
-		       event, _sta(dock_handle));
-		data = 0;	/* unknown */
-	}
-	acpi_bus_generate_proc_event(ibm->acpi->device, event, data);
-	acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
-					  ibm->acpi->device->dev.bus_id,
-					  event, data);
-}
-
-static int dock_read(char *p)
-{
-	int len = 0;
-	int docked = dock_docked();
-
-	if (!dock_handle)
-		len += sprintf(p + len, "status:\t\tnot supported\n");
-	else if (!docked)
-		len += sprintf(p + len, "status:\t\tundocked\n");
-	else {
-		len += sprintf(p + len, "status:\t\tdocked\n");
-		len += sprintf(p + len, "commands:\tdock, undock\n");
-	}
-
-	return len;
-}
-
-static int dock_write(char *buf)
-{
-	char *cmd;
-
-	if (!dock_docked())
-		return -ENODEV;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (strlencmp(cmd, "undock") == 0) {
-			if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 0) ||
-			    !acpi_evalf(dock_handle, NULL, "_EJ0", "vd", 1))
-				return -EIO;
-		} else if (strlencmp(cmd, "dock") == 0) {
-			if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 1))
-				return -EIO;
-		} else
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-#endif /* CONFIG_THINKPAD_ACPI_DOCK */
-
-/*************************************************************************
- * Bay subdriver
- */
-
-#ifdef CONFIG_THINKPAD_ACPI_BAY
-
-TPACPI_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST",	/* 570 */
-	   "\\_SB.PCI0.IDE0.IDES.IDSM",	/* 600e/x, 770e, 770x */
-	   "\\_SB.PCI0.SATA.SCND.MSTR",	/* T60, X60, Z60 */
-	   "\\_SB.PCI0.IDE0.SCND.MSTR",	/* all others */
-	   );				/* A21e, R30, R31 */
-TPACPI_HANDLE(bay_ej, bay, "_EJ3",	/* 600e/x, A2xm/p, A3x */
-	   "_EJ0",		/* all others */
-	   );			/* 570,A21e,G4x,R30,R31,R32,R40e,R50e */
-TPACPI_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV",	/* A3x, R32 */
-	   "\\_SB.PCI0.IDE0.IDEP.IDPS",	/* 600e/x, 770e, 770x */
-	   );				/* all others */
-TPACPI_HANDLE(bay2_ej, bay2, "_EJ3",	/* 600e/x, 770e, A3x */
-	   "_EJ0",			/* 770x */
-	   );				/* all others */
-
-static int __init bay_init(struct ibm_init_struct *iibm)
-{
-	vdbg_printk(TPACPI_DBG_INIT, "initializing bay subdriver\n");
-
-	TPACPI_ACPIHANDLE_INIT(bay);
-	if (bay_handle)
-		TPACPI_ACPIHANDLE_INIT(bay_ej);
-	TPACPI_ACPIHANDLE_INIT(bay2);
-	if (bay2_handle)
-		TPACPI_ACPIHANDLE_INIT(bay2_ej);
-
-	tp_features.bay_status = bay_handle &&
-		acpi_evalf(bay_handle, NULL, "_STA", "qv");
-	tp_features.bay_status2 = bay2_handle &&
-		acpi_evalf(bay2_handle, NULL, "_STA", "qv");
-
-	tp_features.bay_eject = bay_handle && bay_ej_handle &&
-		(strlencmp(bay_ej_path, "_EJ0") == 0 || experimental);
-	tp_features.bay_eject2 = bay2_handle && bay2_ej_handle &&
-		(strlencmp(bay2_ej_path, "_EJ0") == 0 || experimental);
-
-	vdbg_printk(TPACPI_DBG_INIT,
-		"bay 1: status %s, eject %s; bay 2: status %s, eject %s\n",
-		str_supported(tp_features.bay_status),
-		str_supported(tp_features.bay_eject),
-		str_supported(tp_features.bay_status2),
-		str_supported(tp_features.bay_eject2));
-
-	return (tp_features.bay_status || tp_features.bay_eject ||
-		tp_features.bay_status2 || tp_features.bay_eject2)? 0 : 1;
-}
-
-static void bay_notify(struct ibm_struct *ibm, u32 event)
-{
-	acpi_bus_generate_proc_event(ibm->acpi->device, event, 0);
-	acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
-					  ibm->acpi->device->dev.bus_id,
-					  event, 0);
-}
-
-#define bay_occupied(b) (_sta(b##_handle) & 1)
-
-static int bay_read(char *p)
-{
-	int len = 0;
-	int occupied = bay_occupied(bay);
-	int occupied2 = bay_occupied(bay2);
-	int eject, eject2;
-
-	len += sprintf(p + len, "status:\t\t%s\n",
-		tp_features.bay_status ?
-			(occupied ? "occupied" : "unoccupied") :
-				"not supported");
-	if (tp_features.bay_status2)
-		len += sprintf(p + len, "status2:\t%s\n", occupied2 ?
-			       "occupied" : "unoccupied");
-
-	eject = tp_features.bay_eject && occupied;
-	eject2 = tp_features.bay_eject2 && occupied2;
-
-	if (eject && eject2)
-		len += sprintf(p + len, "commands:\teject, eject2\n");
-	else if (eject)
-		len += sprintf(p + len, "commands:\teject\n");
-	else if (eject2)
-		len += sprintf(p + len, "commands:\teject2\n");
-
-	return len;
-}
-
-static int bay_write(char *buf)
-{
-	char *cmd;
-
-	if (!tp_features.bay_eject && !tp_features.bay_eject2)
-		return -ENODEV;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (tp_features.bay_eject && strlencmp(cmd, "eject") == 0) {
-			if (!acpi_evalf(bay_ej_handle, NULL, NULL, "vd", 1))
-				return -EIO;
-		} else if (tp_features.bay_eject2 &&
-			   strlencmp(cmd, "eject2") == 0) {
-			if (!acpi_evalf(bay2_ej_handle, NULL, NULL, "vd", 1))
-				return -EIO;
-		} else
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-static struct tp_acpi_drv_struct ibm_bay_acpidriver = {
-	.notify = bay_notify,
-	.handle = &bay_handle,
-	.type = ACPI_SYSTEM_NOTIFY,
-};
-
-static struct ibm_struct bay_driver_data = {
-	.name = "bay",
-	.read = bay_read,
-	.write = bay_write,
-	.acpi = &ibm_bay_acpidriver,
-};
-
-#endif /* CONFIG_THINKPAD_ACPI_BAY */
-
-/*************************************************************************
- * CMOS subdriver
- */
-
-/* sysfs cmos_command -------------------------------------------------- */
-static ssize_t cmos_command_store(struct device *dev,
-			    struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	unsigned long cmos_cmd;
-	int res;
-
-	if (parse_strtoul(buf, 21, &cmos_cmd))
-		return -EINVAL;
-
-	res = issue_thinkpad_cmos_command(cmos_cmd);
-	return (res)? res : count;
-}
-
-static struct device_attribute dev_attr_cmos_command =
-	__ATTR(cmos_command, S_IWUSR, NULL, cmos_command_store);
-
-/* --------------------------------------------------------------------- */
-
-static int __init cmos_init(struct ibm_init_struct *iibm)
-{
-	int res;
-
-	vdbg_printk(TPACPI_DBG_INIT,
-		"initializing cmos commands subdriver\n");
-
-	TPACPI_ACPIHANDLE_INIT(cmos);
-
-	vdbg_printk(TPACPI_DBG_INIT, "cmos commands are %s\n",
-		str_supported(cmos_handle != NULL));
-
-	res = device_create_file(&tpacpi_pdev->dev, &dev_attr_cmos_command);
-	if (res)
-		return res;
-
-	return (cmos_handle)? 0 : 1;
-}
-
-static void cmos_exit(void)
-{
-	device_remove_file(&tpacpi_pdev->dev, &dev_attr_cmos_command);
-}
-
-static int cmos_read(char *p)
-{
-	int len = 0;
-
-	/* cmos not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
-	   R30, R31, T20-22, X20-21 */
-	if (!cmos_handle)
-		len += sprintf(p + len, "status:\t\tnot supported\n");
-	else {
-		len += sprintf(p + len, "status:\t\tsupported\n");
-		len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-21)\n");
-	}
-
-	return len;
-}
-
-static int cmos_write(char *buf)
-{
-	char *cmd;
-	int cmos_cmd, res;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (sscanf(cmd, "%u", &cmos_cmd) == 1 &&
-		    cmos_cmd >= 0 && cmos_cmd <= 21) {
-			/* cmos_cmd set */
-		} else
-			return -EINVAL;
-
-		res = issue_thinkpad_cmos_command(cmos_cmd);
-		if (res)
-			return res;
-	}
-
-	return 0;
-}
-
-static struct ibm_struct cmos_driver_data = {
-	.name = "cmos",
-	.read = cmos_read,
-	.write = cmos_write,
-	.exit = cmos_exit,
-};
-
-/*************************************************************************
- * LED subdriver
- */
-
-enum led_access_mode {
-	TPACPI_LED_NONE = 0,
-	TPACPI_LED_570,	/* 570 */
-	TPACPI_LED_OLD,	/* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
-	TPACPI_LED_NEW,	/* all others */
-};
-
-enum {	/* For TPACPI_LED_OLD */
-	TPACPI_LED_EC_HLCL = 0x0c,	/* EC reg to get led to power on */
-	TPACPI_LED_EC_HLBL = 0x0d,	/* EC reg to blink a lit led */
-	TPACPI_LED_EC_HLMS = 0x0e,	/* EC reg to select led to command */
-};
-
-enum led_status_t {
-	TPACPI_LED_OFF = 0,
-	TPACPI_LED_ON,
-	TPACPI_LED_BLINK,
-};
-
-static enum led_access_mode led_supported;
-
-TPACPI_HANDLE(led, ec, "SLED",	/* 570 */
-	   "SYSL",		/* 600e/x, 770e, 770x, A21e, A2xm/p, */
-				/* T20-22, X20-21 */
-	   "LED",		/* all others */
-	   );			/* R30, R31 */
-
-#define TPACPI_LED_NUMLEDS 8
-static struct tpacpi_led_classdev *tpacpi_leds;
-static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS];
-static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
-	/* there's a limit of 19 chars + NULL before 2.6.26 */
-	"tpacpi::power",
-	"tpacpi:orange:batt",
-	"tpacpi:green:batt",
-	"tpacpi::dock_active",
-	"tpacpi::bay_active",
-	"tpacpi::dock_batt",
-	"tpacpi::unknown_led",
-	"tpacpi::standby",
-};
-
-static int led_get_status(const unsigned int led)
-{
-	int status;
-	enum led_status_t led_s;
-
-	switch (led_supported) {
-	case TPACPI_LED_570:
-		if (!acpi_evalf(ec_handle,
-				&status, "GLED", "dd", 1 << led))
-			return -EIO;
-		led_s = (status == 0)?
-				TPACPI_LED_OFF :
-				((status == 1)?
-					TPACPI_LED_ON :
-					TPACPI_LED_BLINK);
-		tpacpi_led_state_cache[led] = led_s;
-		return led_s;
-	default:
-		return -ENXIO;
-	}
-
-	/* not reached */
-}
-
-static int led_set_status(const unsigned int led,
-			  const enum led_status_t ledstatus)
-{
-	/* off, on, blink. Index is led_status_t */
-	static const unsigned int led_sled_arg1[] = { 0, 1, 3 };
-	static const unsigned int led_led_arg1[] = { 0, 0x80, 0xc0 };
-
-	int rc = 0;
-
-	switch (led_supported) {
-	case TPACPI_LED_570:
-		/* 570 */
-		if (led > 7)
-			return -EINVAL;
-		if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
-				(1 << led), led_sled_arg1[ledstatus]))
-			rc = -EIO;
-		break;
-	case TPACPI_LED_OLD:
-		/* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */
-		if (led > 7)
-			return -EINVAL;
-		rc = ec_write(TPACPI_LED_EC_HLMS, (1 << led));
-		if (rc >= 0)
-			rc = ec_write(TPACPI_LED_EC_HLBL,
-				      (ledstatus == TPACPI_LED_BLINK) << led);
-		if (rc >= 0)
-			rc = ec_write(TPACPI_LED_EC_HLCL,
-				      (ledstatus != TPACPI_LED_OFF) << led);
-		break;
-	case TPACPI_LED_NEW:
-		/* all others */
-		if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
-				led, led_led_arg1[ledstatus]))
-			rc = -EIO;
-		break;
-	default:
-		rc = -ENXIO;
-	}
-
-	if (!rc)
-		tpacpi_led_state_cache[led] = ledstatus;
-
-	return rc;
-}
-
-static void led_sysfs_set_status(unsigned int led,
-				 enum led_brightness brightness)
-{
-	led_set_status(led,
-			(brightness == LED_OFF) ?
-			TPACPI_LED_OFF :
-			(tpacpi_led_state_cache[led] == TPACPI_LED_BLINK) ?
-				TPACPI_LED_BLINK : TPACPI_LED_ON);
-}
-
-static void led_set_status_worker(struct work_struct *work)
-{
-	struct tpacpi_led_classdev *data =
-		container_of(work, struct tpacpi_led_classdev, work);
-
-	if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
-		led_sysfs_set_status(data->led, data->new_brightness);
-}
-
-static void led_sysfs_set(struct led_classdev *led_cdev,
-			enum led_brightness brightness)
-{
-	struct tpacpi_led_classdev *data = container_of(led_cdev,
-			     struct tpacpi_led_classdev, led_classdev);
-
-	data->new_brightness = brightness;
-	queue_work(tpacpi_wq, &data->work);
-}
-
-static int led_sysfs_blink_set(struct led_classdev *led_cdev,
-			unsigned long *delay_on, unsigned long *delay_off)
-{
-	struct tpacpi_led_classdev *data = container_of(led_cdev,
-			     struct tpacpi_led_classdev, led_classdev);
-
-	/* Can we choose the flash rate? */
-	if (*delay_on == 0 && *delay_off == 0) {
-		/* yes. set them to the hardware blink rate (1 Hz) */
-		*delay_on = 500; /* ms */
-		*delay_off = 500; /* ms */
-	} else if ((*delay_on != 500) || (*delay_off != 500))
-		return -EINVAL;
-
-	data->new_brightness = TPACPI_LED_BLINK;
-	queue_work(tpacpi_wq, &data->work);
-
-	return 0;
-}
-
-static enum led_brightness led_sysfs_get(struct led_classdev *led_cdev)
-{
-	int rc;
-
-	struct tpacpi_led_classdev *data = container_of(led_cdev,
-			     struct tpacpi_led_classdev, led_classdev);
-
-	rc = led_get_status(data->led);
-
-	if (rc == TPACPI_LED_OFF || rc < 0)
-		rc = LED_OFF;	/* no error handling in led class :( */
-	else
-		rc = LED_FULL;
-
-	return rc;
-}
-
-static void led_exit(void)
-{
-	unsigned int i;
-
-	for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
-		if (tpacpi_leds[i].led_classdev.name)
-			led_classdev_unregister(&tpacpi_leds[i].led_classdev);
-	}
-
-	kfree(tpacpi_leds);
-}
-
-static int __init led_init(struct ibm_init_struct *iibm)
-{
-	unsigned int i;
-	int rc;
-
-	vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n");
-
-	TPACPI_ACPIHANDLE_INIT(led);
-
-	if (!led_handle)
-		/* led not supported on R30, R31 */
-		led_supported = TPACPI_LED_NONE;
-	else if (strlencmp(led_path, "SLED") == 0)
-		/* 570 */
-		led_supported = TPACPI_LED_570;
-	else if (strlencmp(led_path, "SYSL") == 0)
-		/* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
-		led_supported = TPACPI_LED_OLD;
-	else
-		/* all others */
-		led_supported = TPACPI_LED_NEW;
-
-	vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
-		str_supported(led_supported), led_supported);
-
-	tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
-			      GFP_KERNEL);
-	if (!tpacpi_leds) {
-		printk(TPACPI_ERR "Out of memory for LED data\n");
-		return -ENOMEM;
-	}
-
-	for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
-		tpacpi_leds[i].led = i;
-
-		tpacpi_leds[i].led_classdev.brightness_set = &led_sysfs_set;
-		tpacpi_leds[i].led_classdev.blink_set = &led_sysfs_blink_set;
-		if (led_supported == TPACPI_LED_570)
-			tpacpi_leds[i].led_classdev.brightness_get =
-							&led_sysfs_get;
-
-		tpacpi_leds[i].led_classdev.name = tpacpi_led_names[i];
-
-		INIT_WORK(&tpacpi_leds[i].work, led_set_status_worker);
-
-		rc = led_classdev_register(&tpacpi_pdev->dev,
-					   &tpacpi_leds[i].led_classdev);
-		if (rc < 0) {
-			tpacpi_leds[i].led_classdev.name = NULL;
-			led_exit();
-			return rc;
-		}
-	}
-
-	return (led_supported != TPACPI_LED_NONE)? 0 : 1;
-}
-
-#define str_led_status(s) \
-	((s) == TPACPI_LED_OFF ? "off" : \
-		((s) == TPACPI_LED_ON ? "on" : "blinking"))
-
-static int led_read(char *p)
-{
-	int len = 0;
-
-	if (!led_supported) {
-		len += sprintf(p + len, "status:\t\tnot supported\n");
-		return len;
-	}
-	len += sprintf(p + len, "status:\t\tsupported\n");
-
-	if (led_supported == TPACPI_LED_570) {
-		/* 570 */
-		int i, status;
-		for (i = 0; i < 8; i++) {
-			status = led_get_status(i);
-			if (status < 0)
-				return -EIO;
-			len += sprintf(p + len, "%d:\t\t%s\n",
-				       i, str_led_status(status));
-		}
-	}
-
-	len += sprintf(p + len, "commands:\t"
-		       "<led> on, <led> off, <led> blink (<led> is 0-7)\n");
-
-	return len;
-}
-
-static int led_write(char *buf)
-{
-	char *cmd;
-	int led, rc;
-	enum led_status_t s;
-
-	if (!led_supported)
-		return -ENODEV;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 7)
-			return -EINVAL;
-
-		if (strstr(cmd, "off")) {
-			s = TPACPI_LED_OFF;
-		} else if (strstr(cmd, "on")) {
-			s = TPACPI_LED_ON;
-		} else if (strstr(cmd, "blink")) {
-			s = TPACPI_LED_BLINK;
-		} else {
-			return -EINVAL;
-		}
-
-		rc = led_set_status(led, s);
-		if (rc < 0)
-			return rc;
-	}
-
-	return 0;
-}
-
-static struct ibm_struct led_driver_data = {
-	.name = "led",
-	.read = led_read,
-	.write = led_write,
-	.exit = led_exit,
-};
-
-/*************************************************************************
- * Beep subdriver
- */
-
-TPACPI_HANDLE(beep, ec, "BEEP");	/* all except R30, R31 */
-
-static int __init beep_init(struct ibm_init_struct *iibm)
-{
-	vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n");
-
-	TPACPI_ACPIHANDLE_INIT(beep);
-
-	vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n",
-		str_supported(beep_handle != NULL));
-
-	return (beep_handle)? 0 : 1;
-}
-
-static int beep_read(char *p)
-{
-	int len = 0;
-
-	if (!beep_handle)
-		len += sprintf(p + len, "status:\t\tnot supported\n");
-	else {
-		len += sprintf(p + len, "status:\t\tsupported\n");
-		len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-17)\n");
-	}
-
-	return len;
-}
-
-static int beep_write(char *buf)
-{
-	char *cmd;
-	int beep_cmd;
-
-	if (!beep_handle)
-		return -ENODEV;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (sscanf(cmd, "%u", &beep_cmd) == 1 &&
-		    beep_cmd >= 0 && beep_cmd <= 17) {
-			/* beep_cmd set */
-		} else
-			return -EINVAL;
-		if (!acpi_evalf(beep_handle, NULL, NULL, "vdd", beep_cmd, 0))
-			return -EIO;
-	}
-
-	return 0;
-}
-
-static struct ibm_struct beep_driver_data = {
-	.name = "beep",
-	.read = beep_read,
-	.write = beep_write,
-};
-
-/*************************************************************************
- * Thermal subdriver
- */
-
-enum thermal_access_mode {
-	TPACPI_THERMAL_NONE = 0,	/* No thermal support */
-	TPACPI_THERMAL_ACPI_TMP07,	/* Use ACPI TMP0-7 */
-	TPACPI_THERMAL_ACPI_UPDT,	/* Use ACPI TMP0-7 with UPDT */
-	TPACPI_THERMAL_TPEC_8,		/* Use ACPI EC regs, 8 sensors */
-	TPACPI_THERMAL_TPEC_16,		/* Use ACPI EC regs, 16 sensors */
-};
-
-enum { /* TPACPI_THERMAL_TPEC_* */
-	TP_EC_THERMAL_TMP0 = 0x78,	/* ACPI EC regs TMP 0..7 */
-	TP_EC_THERMAL_TMP8 = 0xC0,	/* ACPI EC regs TMP 8..15 */
-	TP_EC_THERMAL_TMP_NA = -128,	/* ACPI EC sensor not available */
-};
-
-#define TPACPI_MAX_THERMAL_SENSORS 16	/* Max thermal sensors supported */
-struct ibm_thermal_sensors_struct {
-	s32 temp[TPACPI_MAX_THERMAL_SENSORS];
-};
-
-static enum thermal_access_mode thermal_read_mode;
-
-/* idx is zero-based */
-static int thermal_get_sensor(int idx, s32 *value)
-{
-	int t;
-	s8 tmp;
-	char tmpi[5];
-
-	t = TP_EC_THERMAL_TMP0;
-
-	switch (thermal_read_mode) {
-#if TPACPI_MAX_THERMAL_SENSORS >= 16
-	case TPACPI_THERMAL_TPEC_16:
-		if (idx >= 8 && idx <= 15) {
-			t = TP_EC_THERMAL_TMP8;
-			idx -= 8;
-		}
-		/* fallthrough */
-#endif
-	case TPACPI_THERMAL_TPEC_8:
-		if (idx <= 7) {
-			if (!acpi_ec_read(t + idx, &tmp))
-				return -EIO;
-			*value = tmp * 1000;
-			return 0;
-		}
-		break;
-
-	case TPACPI_THERMAL_ACPI_UPDT:
-		if (idx <= 7) {
-			snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
-			if (!acpi_evalf(ec_handle, NULL, "UPDT", "v"))
-				return -EIO;
-			if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
-				return -EIO;
-			*value = (t - 2732) * 100;
-			return 0;
-		}
-		break;
-
-	case TPACPI_THERMAL_ACPI_TMP07:
-		if (idx <= 7) {
-			snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
-			if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
-				return -EIO;
-			if (t > 127 || t < -127)
-				t = TP_EC_THERMAL_TMP_NA;
-			*value = t * 1000;
-			return 0;
-		}
-		break;
-
-	case TPACPI_THERMAL_NONE:
-	default:
-		return -ENOSYS;
-	}
-
-	return -EINVAL;
-}
-
-static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
-{
-	int res, i;
-	int n;
-
-	n = 8;
-	i = 0;
-
-	if (!s)
-		return -EINVAL;
-
-	if (thermal_read_mode == TPACPI_THERMAL_TPEC_16)
-		n = 16;
-
-	for (i = 0 ; i < n; i++) {
-		res = thermal_get_sensor(i, &s->temp[i]);
-		if (res)
-			return res;
-	}
-
-	return n;
-}
-
-/* sysfs temp##_input -------------------------------------------------- */
-
-static ssize_t thermal_temp_input_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	struct sensor_device_attribute *sensor_attr =
-					to_sensor_dev_attr(attr);
-	int idx = sensor_attr->index;
-	s32 value;
-	int res;
-
-	res = thermal_get_sensor(idx, &value);
-	if (res)
-		return res;
-	if (value == TP_EC_THERMAL_TMP_NA * 1000)
-		return -ENXIO;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", value);
-}
-
-#define THERMAL_SENSOR_ATTR_TEMP(_idxA, _idxB) \
-	 SENSOR_ATTR(temp##_idxA##_input, S_IRUGO, \
-		     thermal_temp_input_show, NULL, _idxB)
-
-static struct sensor_device_attribute sensor_dev_attr_thermal_temp_input[] = {
-	THERMAL_SENSOR_ATTR_TEMP(1, 0),
-	THERMAL_SENSOR_ATTR_TEMP(2, 1),
-	THERMAL_SENSOR_ATTR_TEMP(3, 2),
-	THERMAL_SENSOR_ATTR_TEMP(4, 3),
-	THERMAL_SENSOR_ATTR_TEMP(5, 4),
-	THERMAL_SENSOR_ATTR_TEMP(6, 5),
-	THERMAL_SENSOR_ATTR_TEMP(7, 6),
-	THERMAL_SENSOR_ATTR_TEMP(8, 7),
-	THERMAL_SENSOR_ATTR_TEMP(9, 8),
-	THERMAL_SENSOR_ATTR_TEMP(10, 9),
-	THERMAL_SENSOR_ATTR_TEMP(11, 10),
-	THERMAL_SENSOR_ATTR_TEMP(12, 11),
-	THERMAL_SENSOR_ATTR_TEMP(13, 12),
-	THERMAL_SENSOR_ATTR_TEMP(14, 13),
-	THERMAL_SENSOR_ATTR_TEMP(15, 14),
-	THERMAL_SENSOR_ATTR_TEMP(16, 15),
-};
-
-#define THERMAL_ATTRS(X) \
-	&sensor_dev_attr_thermal_temp_input[X].dev_attr.attr
-
-static struct attribute *thermal_temp_input_attr[] = {
-	THERMAL_ATTRS(8),
-	THERMAL_ATTRS(9),
-	THERMAL_ATTRS(10),
-	THERMAL_ATTRS(11),
-	THERMAL_ATTRS(12),
-	THERMAL_ATTRS(13),
-	THERMAL_ATTRS(14),
-	THERMAL_ATTRS(15),
-	THERMAL_ATTRS(0),
-	THERMAL_ATTRS(1),
-	THERMAL_ATTRS(2),
-	THERMAL_ATTRS(3),
-	THERMAL_ATTRS(4),
-	THERMAL_ATTRS(5),
-	THERMAL_ATTRS(6),
-	THERMAL_ATTRS(7),
-	NULL
-};
-
-static const struct attribute_group thermal_temp_input16_group = {
-	.attrs = thermal_temp_input_attr
-};
-
-static const struct attribute_group thermal_temp_input8_group = {
-	.attrs = &thermal_temp_input_attr[8]
-};
-
-#undef THERMAL_SENSOR_ATTR_TEMP
-#undef THERMAL_ATTRS
-
-/* --------------------------------------------------------------------- */
-
-static int __init thermal_init(struct ibm_init_struct *iibm)
-{
-	u8 t, ta1, ta2;
-	int i;
-	int acpi_tmp7;
-	int res;
-
-	vdbg_printk(TPACPI_DBG_INIT, "initializing thermal subdriver\n");
-
-	acpi_tmp7 = acpi_evalf(ec_handle, NULL, "TMP7", "qv");
-
-	if (thinkpad_id.ec_model) {
-		/*
-		 * Direct EC access mode: sensors at registers
-		 * 0x78-0x7F, 0xC0-0xC7.  Registers return 0x00 for
-		 * non-implemented, thermal sensors return 0x80 when
-		 * not available
-		 */
-
-		ta1 = ta2 = 0;
-		for (i = 0; i < 8; i++) {
-			if (acpi_ec_read(TP_EC_THERMAL_TMP0 + i, &t)) {
-				ta1 |= t;
-			} else {
-				ta1 = 0;
-				break;
-			}
-			if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
-				ta2 |= t;
-			} else {
-				ta1 = 0;
-				break;
-			}
-		}
-		if (ta1 == 0) {
-			/* This is sheer paranoia, but we handle it anyway */
-			if (acpi_tmp7) {
-				printk(TPACPI_ERR
-				       "ThinkPad ACPI EC access misbehaving, "
-				       "falling back to ACPI TMPx access "
-				       "mode\n");
-				thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
-			} else {
-				printk(TPACPI_ERR
-				       "ThinkPad ACPI EC access misbehaving, "
-				       "disabling thermal sensors access\n");
-				thermal_read_mode = TPACPI_THERMAL_NONE;
-			}
-		} else {
-			thermal_read_mode =
-			    (ta2 != 0) ?
-			    TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
-		}
-	} else if (acpi_tmp7) {
-		if (acpi_evalf(ec_handle, NULL, "UPDT", "qv")) {
-			/* 600e/x, 770e, 770x */
-			thermal_read_mode = TPACPI_THERMAL_ACPI_UPDT;
-		} else {
-			/* Standard ACPI TMPx access, max 8 sensors */
-			thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
-		}
-	} else {
-		/* temperatures not supported on 570, G4x, R30, R31, R32 */
-		thermal_read_mode = TPACPI_THERMAL_NONE;
-	}
-
-	vdbg_printk(TPACPI_DBG_INIT, "thermal is %s, mode %d\n",
-		str_supported(thermal_read_mode != TPACPI_THERMAL_NONE),
-		thermal_read_mode);
-
-	switch (thermal_read_mode) {
-	case TPACPI_THERMAL_TPEC_16:
-		res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
-				&thermal_temp_input16_group);
-		if (res)
-			return res;
-		break;
-	case TPACPI_THERMAL_TPEC_8:
-	case TPACPI_THERMAL_ACPI_TMP07:
-	case TPACPI_THERMAL_ACPI_UPDT:
-		res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
-				&thermal_temp_input8_group);
-		if (res)
-			return res;
-		break;
-	case TPACPI_THERMAL_NONE:
-	default:
-		return 1;
-	}
-
-	return 0;
-}
-
-static void thermal_exit(void)
-{
-	switch (thermal_read_mode) {
-	case TPACPI_THERMAL_TPEC_16:
-		sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
-				   &thermal_temp_input16_group);
-		break;
-	case TPACPI_THERMAL_TPEC_8:
-	case TPACPI_THERMAL_ACPI_TMP07:
-	case TPACPI_THERMAL_ACPI_UPDT:
-		sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
-				   &thermal_temp_input16_group);
-		break;
-	case TPACPI_THERMAL_NONE:
-	default:
-		break;
-	}
-}
-
-static int thermal_read(char *p)
-{
-	int len = 0;
-	int n, i;
-	struct ibm_thermal_sensors_struct t;
-
-	n = thermal_get_sensors(&t);
-	if (unlikely(n < 0))
-		return n;
-
-	len += sprintf(p + len, "temperatures:\t");
-
-	if (n > 0) {
-		for (i = 0; i < (n - 1); i++)
-			len += sprintf(p + len, "%d ", t.temp[i] / 1000);
-		len += sprintf(p + len, "%d\n", t.temp[i] / 1000);
-	} else
-		len += sprintf(p + len, "not supported\n");
-
-	return len;
-}
-
-static struct ibm_struct thermal_driver_data = {
-	.name = "thermal",
-	.read = thermal_read,
-	.exit = thermal_exit,
-};
-
-/*************************************************************************
- * EC Dump subdriver
- */
-
-static u8 ecdump_regs[256];
-
-static int ecdump_read(char *p)
-{
-	int len = 0;
-	int i, j;
-	u8 v;
-
-	len += sprintf(p + len, "EC      "
-		       " +00 +01 +02 +03 +04 +05 +06 +07"
-		       " +08 +09 +0a +0b +0c +0d +0e +0f\n");
-	for (i = 0; i < 256; i += 16) {
-		len += sprintf(p + len, "EC 0x%02x:", i);
-		for (j = 0; j < 16; j++) {
-			if (!acpi_ec_read(i + j, &v))
-				break;
-			if (v != ecdump_regs[i + j])
-				len += sprintf(p + len, " *%02x", v);
-			else
-				len += sprintf(p + len, "  %02x", v);
-			ecdump_regs[i + j] = v;
-		}
-		len += sprintf(p + len, "\n");
-		if (j != 16)
-			break;
-	}
-
-	/* These are way too dangerous to advertise openly... */
-#if 0
-	len += sprintf(p + len, "commands:\t0x<offset> 0x<value>"
-		       " (<offset> is 00-ff, <value> is 00-ff)\n");
-	len += sprintf(p + len, "commands:\t0x<offset> <value>  "
-		       " (<offset> is 00-ff, <value> is 0-255)\n");
-#endif
-	return len;
-}
-
-static int ecdump_write(char *buf)
-{
-	char *cmd;
-	int i, v;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (sscanf(cmd, "0x%x 0x%x", &i, &v) == 2) {
-			/* i and v set */
-		} else if (sscanf(cmd, "0x%x %u", &i, &v) == 2) {
-			/* i and v set */
-		} else
-			return -EINVAL;
-		if (i >= 0 && i < 256 && v >= 0 && v < 256) {
-			if (!acpi_ec_write(i, v))
-				return -EIO;
-		} else
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-static struct ibm_struct ecdump_driver_data = {
-	.name = "ecdump",
-	.read = ecdump_read,
-	.write = ecdump_write,
-	.flags.experimental = 1,
-};
-
-/*************************************************************************
- * Backlight/brightness subdriver
- */
-
-#define TPACPI_BACKLIGHT_DEV_NAME "thinkpad_screen"
-
-enum {
-	TP_EC_BACKLIGHT = 0x31,
-
-	/* TP_EC_BACKLIGHT bitmasks */
-	TP_EC_BACKLIGHT_LVLMSK = 0x1F,
-	TP_EC_BACKLIGHT_CMDMSK = 0xE0,
-	TP_EC_BACKLIGHT_MAPSW = 0x20,
-};
-
-static struct backlight_device *ibm_backlight_device;
-static int brightness_mode;
-static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */
-
-static struct mutex brightness_mutex;
-
-/*
- * ThinkPads can read brightness from two places: EC 0x31, or
- * CMOS NVRAM byte 0x5E, bits 0-3.
- *
- * EC 0x31 has the following layout
- *   Bit 7: unknown function
- *   Bit 6: unknown function
- *   Bit 5: Z: honour scale changes, NZ: ignore scale changes
- *   Bit 4: must be set to zero to avoid problems
- *   Bit 3-0: backlight brightness level
- *
- * brightness_get_raw returns status data in the EC 0x31 layout
- */
-static int brightness_get_raw(int *status)
-{
-	u8 lec = 0, lcmos = 0, level = 0;
-
-	if (brightness_mode & 1) {
-		if (!acpi_ec_read(TP_EC_BACKLIGHT, &lec))
-			return -EIO;
-		level = lec & TP_EC_BACKLIGHT_LVLMSK;
-	};
-	if (brightness_mode & 2) {
-		lcmos = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
-			 & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
-			>> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
-		lcmos &= (tp_features.bright_16levels)? 0x0f : 0x07;
-		level = lcmos;
-	}
-
-	if (brightness_mode == 3) {
-		*status = lec;	/* Prefer EC, CMOS is just a backing store */
-		lec &= TP_EC_BACKLIGHT_LVLMSK;
-		if (lec == lcmos)
-			tp_warned.bright_cmos_ec_unsync = 0;
-		else {
-			if (!tp_warned.bright_cmos_ec_unsync) {
-				printk(TPACPI_ERR
-					"CMOS NVRAM (%u) and EC (%u) do not "
-					"agree on display brightness level\n",
-					(unsigned int) lcmos,
-					(unsigned int) lec);
-				tp_warned.bright_cmos_ec_unsync = 1;
-			}
-			return -EIO;
-		}
-	} else {
-		*status = level;
-	}
-
-	return 0;
-}
-
-/* May return EINTR which can always be mapped to ERESTARTSYS */
-static int brightness_set(int value)
-{
-	int cmos_cmd, inc, i, res;
-	int current_value;
-	int command_bits;
-
-	if (value > ((tp_features.bright_16levels)? 15 : 7) ||
-	    value < 0)
-		return -EINVAL;
-
-	res = mutex_lock_interruptible(&brightness_mutex);
-	if (res < 0)
-		return res;
-
-	res = brightness_get_raw(&current_value);
-	if (res < 0)
-		goto errout;
-
-	command_bits = current_value & TP_EC_BACKLIGHT_CMDMSK;
-	current_value &= TP_EC_BACKLIGHT_LVLMSK;
-
-	cmos_cmd = value > current_value ?
-			TP_CMOS_BRIGHTNESS_UP :
-			TP_CMOS_BRIGHTNESS_DOWN;
-	inc = (value > current_value)? 1 : -1;
-
-	res = 0;
-	for (i = current_value; i != value; i += inc) {
-		if ((brightness_mode & 2) &&
-		    issue_thinkpad_cmos_command(cmos_cmd)) {
-			res = -EIO;
-			goto errout;
-		}
-		if ((brightness_mode & 1) &&
-		    !acpi_ec_write(TP_EC_BACKLIGHT,
-				   (i + inc) | command_bits)) {
-			res = -EIO;
-			goto errout;;
-		}
-	}
-
-errout:
-	mutex_unlock(&brightness_mutex);
-	return res;
-}
-
-/* sysfs backlight class ----------------------------------------------- */
-
-static int brightness_update_status(struct backlight_device *bd)
-{
-	/* it is the backlight class's job (caller) to handle
-	 * EINTR and other errors properly */
-	return brightness_set(
-		(bd->props.fb_blank == FB_BLANK_UNBLANK &&
-		 bd->props.power == FB_BLANK_UNBLANK) ?
-				bd->props.brightness : 0);
-}
-
-static int brightness_get(struct backlight_device *bd)
-{
-	int status, res;
-
-	res = brightness_get_raw(&status);
-	if (res < 0)
-		return 0; /* FIXME: teach backlight about error handling */
-
-	return status & TP_EC_BACKLIGHT_LVLMSK;
-}
-
-static struct backlight_ops ibm_backlight_data = {
-	.get_brightness = brightness_get,
-	.update_status  = brightness_update_status,
-};
-
-/* --------------------------------------------------------------------- */
-
-static int __init brightness_init(struct ibm_init_struct *iibm)
-{
-	int b;
-
-	vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n");
-
-	mutex_init(&brightness_mutex);
-
-	/*
-	 * We always attempt to detect acpi support, so as to switch
-	 * Lenovo Vista BIOS to ACPI brightness mode even if we are not
-	 * going to publish a backlight interface
-	 */
-	b = tpacpi_check_std_acpi_brightness_support();
-	if (b > 0) {
-
-		if (acpi_video_backlight_support()) {
-			if (brightness_enable > 1) {
-				printk(TPACPI_NOTICE
-				       "Standard ACPI backlight interface "
-				       "available, not loading native one.\n");
-				return 1;
-			} else if (brightness_enable == 1) {
-				printk(TPACPI_NOTICE
-				       "Backlight control force enabled, even if standard "
-				       "ACPI backlight interface is available\n");
-			}
-		} else {
-			if (brightness_enable > 1) {
-				printk(TPACPI_NOTICE
-				       "Standard ACPI backlight interface not "
-				       "available, thinkpad_acpi native "
-				       "brightness control enabled\n");
-			}
-		}
-	}
-
-	if (!brightness_enable) {
-		dbg_printk(TPACPI_DBG_INIT,
-			   "brightness support disabled by "
-			   "module parameter\n");
-		return 1;
-	}
-
-	if (b > 16) {
-		printk(TPACPI_ERR
-		       "Unsupported brightness interface, "
-		       "please contact %s\n", TPACPI_MAIL);
-		return 1;
-	}
-	if (b == 16)
-		tp_features.bright_16levels = 1;
-
-	if (!brightness_mode) {
-		if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO)
-			brightness_mode = 2;
-		else
-			brightness_mode = 3;
-
-		dbg_printk(TPACPI_DBG_INIT, "selected brightness_mode=%d\n",
-			brightness_mode);
-	}
-
-	if (brightness_mode > 3)
-		return -EINVAL;
-
-	if (brightness_get_raw(&b) < 0)
-		return 1;
-
-	if (tp_features.bright_16levels)
-		printk(TPACPI_INFO
-		       "detected a 16-level brightness capable ThinkPad\n");
-
-	ibm_backlight_device = backlight_device_register(
-					TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL,
-					&ibm_backlight_data);
-	if (IS_ERR(ibm_backlight_device)) {
-		printk(TPACPI_ERR "Could not register backlight device\n");
-		return PTR_ERR(ibm_backlight_device);
-	}
-	vdbg_printk(TPACPI_DBG_INIT, "brightness is supported\n");
-
-	ibm_backlight_device->props.max_brightness =
-				(tp_features.bright_16levels)? 15 : 7;
-	ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
-	backlight_update_status(ibm_backlight_device);
-
-	return 0;
-}
-
-static void brightness_exit(void)
-{
-	if (ibm_backlight_device) {
-		vdbg_printk(TPACPI_DBG_EXIT,
-			    "calling backlight_device_unregister()\n");
-		backlight_device_unregister(ibm_backlight_device);
-	}
-}
-
-static int brightness_read(char *p)
-{
-	int len = 0;
-	int level;
-
-	level = brightness_get(NULL);
-	if (level < 0) {
-		len += sprintf(p + len, "level:\t\tunreadable\n");
-	} else {
-		len += sprintf(p + len, "level:\t\t%d\n", level);
-		len += sprintf(p + len, "commands:\tup, down\n");
-		len += sprintf(p + len, "commands:\tlevel <level>"
-			       " (<level> is 0-%d)\n",
-			       (tp_features.bright_16levels) ? 15 : 7);
-	}
-
-	return len;
-}
-
-static int brightness_write(char *buf)
-{
-	int level;
-	int rc;
-	char *cmd;
-	int max_level = (tp_features.bright_16levels) ? 15 : 7;
-
-	level = brightness_get(NULL);
-	if (level < 0)
-		return level;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (strlencmp(cmd, "up") == 0) {
-			if (level < max_level)
-				level++;
-		} else if (strlencmp(cmd, "down") == 0) {
-			if (level > 0)
-				level--;
-		} else if (sscanf(cmd, "level %d", &level) == 1 &&
-			   level >= 0 && level <= max_level) {
-			/* new level set */
-		} else
-			return -EINVAL;
-	}
-
-	/*
-	 * Now we know what the final level should be, so we try to set it.
-	 * Doing it this way makes the syscall restartable in case of EINTR
-	 */
-	rc = brightness_set(level);
-	return (rc == -EINTR)? ERESTARTSYS : rc;
-}
-
-static struct ibm_struct brightness_driver_data = {
-	.name = "brightness",
-	.read = brightness_read,
-	.write = brightness_write,
-	.exit = brightness_exit,
-};
-
-/*************************************************************************
- * Volume subdriver
- */
-
-static int volume_offset = 0x30;
-
-static int volume_read(char *p)
-{
-	int len = 0;
-	u8 level;
-
-	if (!acpi_ec_read(volume_offset, &level)) {
-		len += sprintf(p + len, "level:\t\tunreadable\n");
-	} else {
-		len += sprintf(p + len, "level:\t\t%d\n", level & 0xf);
-		len += sprintf(p + len, "mute:\t\t%s\n", onoff(level, 6));
-		len += sprintf(p + len, "commands:\tup, down, mute\n");
-		len += sprintf(p + len, "commands:\tlevel <level>"
-			       " (<level> is 0-15)\n");
-	}
-
-	return len;
-}
-
-static int volume_write(char *buf)
-{
-	int cmos_cmd, inc, i;
-	u8 level, mute;
-	int new_level, new_mute;
-	char *cmd;
-
-	while ((cmd = next_cmd(&buf))) {
-		if (!acpi_ec_read(volume_offset, &level))
-			return -EIO;
-		new_mute = mute = level & 0x40;
-		new_level = level = level & 0xf;
-
-		if (strlencmp(cmd, "up") == 0) {
-			if (mute)
-				new_mute = 0;
-			else
-				new_level = level == 15 ? 15 : level + 1;
-		} else if (strlencmp(cmd, "down") == 0) {
-			if (mute)
-				new_mute = 0;
-			else
-				new_level = level == 0 ? 0 : level - 1;
-		} else if (sscanf(cmd, "level %d", &new_level) == 1 &&
-			   new_level >= 0 && new_level <= 15) {
-			/* new_level set */
-		} else if (strlencmp(cmd, "mute") == 0) {
-			new_mute = 0x40;
-		} else
-			return -EINVAL;
-
-		if (new_level != level) {
-			/* mute doesn't change */
-
-			cmos_cmd = (new_level > level) ?
-					TP_CMOS_VOLUME_UP : TP_CMOS_VOLUME_DOWN;
-			inc = new_level > level ? 1 : -1;
-
-			if (mute && (issue_thinkpad_cmos_command(cmos_cmd) ||
-				     !acpi_ec_write(volume_offset, level)))
-				return -EIO;
-
-			for (i = level; i != new_level; i += inc)
-				if (issue_thinkpad_cmos_command(cmos_cmd) ||
-				    !acpi_ec_write(volume_offset, i + inc))
-					return -EIO;
-
-			if (mute &&
-			    (issue_thinkpad_cmos_command(TP_CMOS_VOLUME_MUTE) ||
-			     !acpi_ec_write(volume_offset, new_level + mute))) {
-				return -EIO;
-			}
-		}
-
-		if (new_mute != mute) {
-			/* level doesn't change */
-
-			cmos_cmd = (new_mute) ?
-				   TP_CMOS_VOLUME_MUTE : TP_CMOS_VOLUME_UP;
-
-			if (issue_thinkpad_cmos_command(cmos_cmd) ||
-			    !acpi_ec_write(volume_offset, level + new_mute))
-				return -EIO;
-		}
-	}
-
-	return 0;
-}
-
-static struct ibm_struct volume_driver_data = {
-	.name = "volume",
-	.read = volume_read,
-	.write = volume_write,
-};
-
-/*************************************************************************
- * Fan subdriver
- */
-
-/*
- * FAN ACCESS MODES
- *
- * TPACPI_FAN_RD_ACPI_GFAN:
- * 	ACPI GFAN method: returns fan level
- *
- * 	see TPACPI_FAN_WR_ACPI_SFAN
- * 	EC 0x2f (HFSP) not available if GFAN exists
- *
- * TPACPI_FAN_WR_ACPI_SFAN:
- * 	ACPI SFAN method: sets fan level, 0 (stop) to 7 (max)
- *
- * 	EC 0x2f (HFSP) might be available *for reading*, but do not use
- * 	it for writing.
- *
- * TPACPI_FAN_WR_TPEC:
- * 	ThinkPad EC register 0x2f (HFSP): fan control loop mode
- * 	Supported on almost all ThinkPads
- *
- * 	Fan speed changes of any sort (including those caused by the
- * 	disengaged mode) are usually done slowly by the firmware as the
- * 	maximum ammount of fan duty cycle change per second seems to be
- * 	limited.
- *
- * 	Reading is not available if GFAN exists.
- * 	Writing is not available if SFAN exists.
- *
- * 	Bits
- *	 7	automatic mode engaged;
- *  		(default operation mode of the ThinkPad)
- * 		fan level is ignored in this mode.
- *	 6	full speed mode (takes precedence over bit 7);
- *		not available on all thinkpads.  May disable
- *		the tachometer while the fan controller ramps up
- *		the speed (which can take up to a few *minutes*).
- *		Speeds up fan to 100% duty-cycle, which is far above
- *		the standard RPM levels.  It is not impossible that
- *		it could cause hardware damage.
- *	5-3	unused in some models.  Extra bits for fan level
- *		in others, but still useless as all values above
- *		7 map to the same speed as level 7 in these models.
- *	2-0	fan level (0..7 usually)
- *			0x00 = stop
- * 			0x07 = max (set when temperatures critical)
- * 		Some ThinkPads may have other levels, see
- * 		TPACPI_FAN_WR_ACPI_FANS (X31/X40/X41)
- *
- *	FIRMWARE BUG: on some models, EC 0x2f might not be initialized at
- *	boot. Apparently the EC does not intialize it, so unless ACPI DSDT
- *	does so, its initial value is meaningless (0x07).
- *
- *	For firmware bugs, refer to:
- *	http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
- *
- * 	----
- *
- *	ThinkPad EC register 0x84 (LSB), 0x85 (MSB):
- *	Main fan tachometer reading (in RPM)
- *
- *	This register is present on all ThinkPads with a new-style EC, and
- *	it is known not to be present on the A21m/e, and T22, as there is
- *	something else in offset 0x84 according to the ACPI DSDT.  Other
- *	ThinkPads from this same time period (and earlier) probably lack the
- *	tachometer as well.
- *
- *	Unfortunately a lot of ThinkPads with new-style ECs but whose firwmare
- *	was never fixed by IBM to report the EC firmware version string
- *	probably support the tachometer (like the early X models), so
- *	detecting it is quite hard.  We need more data to know for sure.
- *
- *	FIRMWARE BUG: always read 0x84 first, otherwise incorrect readings
- *	might result.
- *
- *	FIRMWARE BUG: may go stale while the EC is switching to full speed
- *	mode.
- *
- *	For firmware bugs, refer to:
- *	http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
- *
- * TPACPI_FAN_WR_ACPI_FANS:
- *	ThinkPad X31, X40, X41.  Not available in the X60.
- *
- *	FANS ACPI handle: takes three arguments: low speed, medium speed,
- *	high speed.  ACPI DSDT seems to map these three speeds to levels
- *	as follows: STOP LOW LOW MED MED HIGH HIGH HIGH HIGH
- *	(this map is stored on FAN0..FAN8 as "0,1,1,2,2,3,3,3,3")
- *
- * 	The speeds are stored on handles
- * 	(FANA:FAN9), (FANC:FANB), (FANE:FAND).
- *
- * 	There are three default speed sets, acessible as handles:
- * 	FS1L,FS1M,FS1H; FS2L,FS2M,FS2H; FS3L,FS3M,FS3H
- *
- * 	ACPI DSDT switches which set is in use depending on various
- * 	factors.
- *
- * 	TPACPI_FAN_WR_TPEC is also available and should be used to
- * 	command the fan.  The X31/X40/X41 seems to have 8 fan levels,
- * 	but the ACPI tables just mention level 7.
- */
-
-enum {					/* Fan control constants */
-	fan_status_offset = 0x2f,	/* EC register 0x2f */
-	fan_rpm_offset = 0x84,		/* EC register 0x84: LSB, 0x85 MSB (RPM)
-					 * 0x84 must be read before 0x85 */
-
-	TP_EC_FAN_FULLSPEED = 0x40,	/* EC fan mode: full speed */
-	TP_EC_FAN_AUTO	    = 0x80,	/* EC fan mode: auto fan control */
-
-	TPACPI_FAN_LAST_LEVEL = 0x100,	/* Use cached last-seen fan level */
-};
-
-enum fan_status_access_mode {
-	TPACPI_FAN_NONE = 0,		/* No fan status or control */
-	TPACPI_FAN_RD_ACPI_GFAN,	/* Use ACPI GFAN */
-	TPACPI_FAN_RD_TPEC,		/* Use ACPI EC regs 0x2f, 0x84-0x85 */
-};
-
-enum fan_control_access_mode {
-	TPACPI_FAN_WR_NONE = 0,		/* No fan control */
-	TPACPI_FAN_WR_ACPI_SFAN,	/* Use ACPI SFAN */
-	TPACPI_FAN_WR_TPEC,		/* Use ACPI EC reg 0x2f */
-	TPACPI_FAN_WR_ACPI_FANS,	/* Use ACPI FANS and EC reg 0x2f */
-};
-
-enum fan_control_commands {
-	TPACPI_FAN_CMD_SPEED 	= 0x0001,	/* speed command */
-	TPACPI_FAN_CMD_LEVEL 	= 0x0002,	/* level command  */
-	TPACPI_FAN_CMD_ENABLE	= 0x0004,	/* enable/disable cmd,
-						 * and also watchdog cmd */
-};
-
-static int fan_control_allowed;
-
-static enum fan_status_access_mode fan_status_access_mode;
-static enum fan_control_access_mode fan_control_access_mode;
-static enum fan_control_commands fan_control_commands;
-
-static u8 fan_control_initial_status;
-static u8 fan_control_desired_level;
-static u8 fan_control_resume_level;
-static int fan_watchdog_maxinterval;
-
-static struct mutex fan_mutex;
-
-static void fan_watchdog_fire(struct work_struct *ignored);
-static DECLARE_DELAYED_WORK(fan_watchdog_task, fan_watchdog_fire);
-
-TPACPI_HANDLE(fans, ec, "FANS");	/* X31, X40, X41 */
-TPACPI_HANDLE(gfan, ec, "GFAN",	/* 570 */
-	   "\\FSPD",		/* 600e/x, 770e, 770x */
-	   );			/* all others */
-TPACPI_HANDLE(sfan, ec, "SFAN",	/* 570 */
-	   "JFNS",		/* 770x-JL */
-	   );			/* all others */
-
-/*
- * Call with fan_mutex held
- */
-static void fan_update_desired_level(u8 status)
-{
-	if ((status &
-	     (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) {
-		if (status > 7)
-			fan_control_desired_level = 7;
-		else
-			fan_control_desired_level = status;
-	}
-}
-
-static int fan_get_status(u8 *status)
-{
-	u8 s;
-
-	/* TODO:
-	 * Add TPACPI_FAN_RD_ACPI_FANS ? */
-
-	switch (fan_status_access_mode) {
-	case TPACPI_FAN_RD_ACPI_GFAN:
-		/* 570, 600e/x, 770e, 770x */
-
-		if (unlikely(!acpi_evalf(gfan_handle, &s, NULL, "d")))
-			return -EIO;
-
-		if (likely(status))
-			*status = s & 0x07;
-
-		break;
-
-	case TPACPI_FAN_RD_TPEC:
-		/* all except 570, 600e/x, 770e, 770x */
-		if (unlikely(!acpi_ec_read(fan_status_offset, &s)))
-			return -EIO;
-
-		if (likely(status))
-			*status = s;
-
-		break;
-
-	default:
-		return -ENXIO;
-	}
-
-	return 0;
-}
-
-static int fan_get_status_safe(u8 *status)
-{
-	int rc;
-	u8 s;
-
-	if (mutex_lock_interruptible(&fan_mutex))
-		return -ERESTARTSYS;
-	rc = fan_get_status(&s);
-	if (!rc)
-		fan_update_desired_level(s);
-	mutex_unlock(&fan_mutex);
-
-	if (status)
-		*status = s;
-
-	return rc;
-}
-
-static int fan_get_speed(unsigned int *speed)
-{
-	u8 hi, lo;
-
-	switch (fan_status_access_mode) {
-	case TPACPI_FAN_RD_TPEC:
-		/* all except 570, 600e/x, 770e, 770x */
-		if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) ||
-			     !acpi_ec_read(fan_rpm_offset + 1, &hi)))
-			return -EIO;
-
-		if (likely(speed))
-			*speed = (hi << 8) | lo;
-
-		break;
-
-	default:
-		return -ENXIO;
-	}
-
-	return 0;
-}
-
-static int fan_set_level(int level)
-{
-	if (!fan_control_allowed)
-		return -EPERM;
-
-	switch (fan_control_access_mode) {
-	case TPACPI_FAN_WR_ACPI_SFAN:
-		if (level >= 0 && level <= 7) {
-			if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", level))
-				return -EIO;
-		} else
-			return -EINVAL;
-		break;
-
-	case TPACPI_FAN_WR_ACPI_FANS:
-	case TPACPI_FAN_WR_TPEC:
-		if (!(level & TP_EC_FAN_AUTO) &&
-		    !(level & TP_EC_FAN_FULLSPEED) &&
-		    ((level < 0) || (level > 7)))
-			return -EINVAL;
-
-		/* safety net should the EC not support AUTO
-		 * or FULLSPEED mode bits and just ignore them */
-		if (level & TP_EC_FAN_FULLSPEED)
-			level |= 7;	/* safety min speed 7 */
-		else if (level & TP_EC_FAN_AUTO)
-			level |= 4;	/* safety min speed 4 */
-
-		if (!acpi_ec_write(fan_status_offset, level))
-			return -EIO;
-		else
-			tp_features.fan_ctrl_status_undef = 0;
-		break;
-
-	default:
-		return -ENXIO;
-	}
-	return 0;
-}
-
-static int fan_set_level_safe(int level)
-{
-	int rc;
-
-	if (!fan_control_allowed)
-		return -EPERM;
-
-	if (mutex_lock_interruptible(&fan_mutex))
-		return -ERESTARTSYS;
-
-	if (level == TPACPI_FAN_LAST_LEVEL)
-		level = fan_control_desired_level;
-
-	rc = fan_set_level(level);
-	if (!rc)
-		fan_update_desired_level(level);
-
-	mutex_unlock(&fan_mutex);
-	return rc;
-}
-
-static int fan_set_enable(void)
-{
-	u8 s;
-	int rc;
-
-	if (!fan_control_allowed)
-		return -EPERM;
-
-	if (mutex_lock_interruptible(&fan_mutex))
-		return -ERESTARTSYS;
-
-	switch (fan_control_access_mode) {
-	case TPACPI_FAN_WR_ACPI_FANS:
-	case TPACPI_FAN_WR_TPEC:
-		rc = fan_get_status(&s);
-		if (rc < 0)
-			break;
-
-		/* Don't go out of emergency fan mode */
-		if (s != 7) {
-			s &= 0x07;
-			s |= TP_EC_FAN_AUTO | 4; /* min fan speed 4 */
-		}
-
-		if (!acpi_ec_write(fan_status_offset, s))
-			rc = -EIO;
-		else {
-			tp_features.fan_ctrl_status_undef = 0;
-			rc = 0;
-		}
-		break;
-
-	case TPACPI_FAN_WR_ACPI_SFAN:
-		rc = fan_get_status(&s);
-		if (rc < 0)
-			break;
-
-		s &= 0x07;
-
-		/* Set fan to at least level 4 */
-		s |= 4;
-
-		if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", s))
-			rc = -EIO;
-		else
-			rc = 0;
-		break;
-
-	default:
-		rc = -ENXIO;
-	}
-
-	mutex_unlock(&fan_mutex);
-	return rc;
-}
-
-static int fan_set_disable(void)
-{
-	int rc;
-
-	if (!fan_control_allowed)
-		return -EPERM;
-
-	if (mutex_lock_interruptible(&fan_mutex))
-		return -ERESTARTSYS;
-
-	rc = 0;
-	switch (fan_control_access_mode) {
-	case TPACPI_FAN_WR_ACPI_FANS:
-	case TPACPI_FAN_WR_TPEC:
-		if (!acpi_ec_write(fan_status_offset, 0x00))
-			rc = -EIO;
-		else {
-			fan_control_desired_level = 0;
-			tp_features.fan_ctrl_status_undef = 0;
-		}
-		break;
-
-	case TPACPI_FAN_WR_ACPI_SFAN:
-		if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", 0x00))
-			rc = -EIO;
-		else
-			fan_control_desired_level = 0;
-		break;
-
-	default:
-		rc = -ENXIO;
-	}
-
-
-	mutex_unlock(&fan_mutex);
-	return rc;
-}
-
-static int fan_set_speed(int speed)
-{
-	int rc;
-
-	if (!fan_control_allowed)
-		return -EPERM;
-
-	if (mutex_lock_interruptible(&fan_mutex))
-		return -ERESTARTSYS;
-
-	rc = 0;
-	switch (fan_control_access_mode) {
-	case TPACPI_FAN_WR_ACPI_FANS:
-		if (speed >= 0 && speed <= 65535) {
-			if (!acpi_evalf(fans_handle, NULL, NULL, "vddd",
-					speed, speed, speed))
-				rc = -EIO;
-		} else
-			rc = -EINVAL;
-		break;
-
-	default:
-		rc = -ENXIO;
-	}
-
-	mutex_unlock(&fan_mutex);
-	return rc;
-}
-
-static void fan_watchdog_reset(void)
-{
-	static int fan_watchdog_active;
-
-	if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
-		return;
-
-	if (fan_watchdog_active)
-		cancel_delayed_work(&fan_watchdog_task);
-
-	if (fan_watchdog_maxinterval > 0 &&
-	    tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
-		fan_watchdog_active = 1;
-		if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
-				msecs_to_jiffies(fan_watchdog_maxinterval
-						 * 1000))) {
-			printk(TPACPI_ERR
-			       "failed to queue the fan watchdog, "
-			       "watchdog will not trigger\n");
-		}
-	} else
-		fan_watchdog_active = 0;
-}
-
-static void fan_watchdog_fire(struct work_struct *ignored)
-{
-	int rc;
-
-	if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
-		return;
-
-	printk(TPACPI_NOTICE "fan watchdog: enabling fan\n");
-	rc = fan_set_enable();
-	if (rc < 0) {
-		printk(TPACPI_ERR "fan watchdog: error %d while enabling fan, "
-			"will try again later...\n", -rc);
-		/* reschedule for later */
-		fan_watchdog_reset();
-	}
-}
-
-/*
- * SYSFS fan layout: hwmon compatible (device)
- *
- * pwm*_enable:
- * 	0: "disengaged" mode
- * 	1: manual mode
- * 	2: native EC "auto" mode (recommended, hardware default)
- *
- * pwm*: set speed in manual mode, ignored otherwise.
- * 	0 is level 0; 255 is level 7. Intermediate points done with linear
- * 	interpolation.
- *
- * fan*_input: tachometer reading, RPM
- *
- *
- * SYSFS fan layout: extensions
- *
- * fan_watchdog (driver):
- * 	fan watchdog interval in seconds, 0 disables (default), max 120
- */
-
-/* sysfs fan pwm1_enable ----------------------------------------------- */
-static ssize_t fan_pwm1_enable_show(struct device *dev,
-				    struct device_attribute *attr,
-				    char *buf)
-{
-	int res, mode;
-	u8 status;
-
-	res = fan_get_status_safe(&status);
-	if (res)
-		return res;
-
-	if (unlikely(tp_features.fan_ctrl_status_undef)) {
-		if (status != fan_control_initial_status) {
-			tp_features.fan_ctrl_status_undef = 0;
-		} else {
-			/* Return most likely status. In fact, it
-			 * might be the only possible status */
-			status = TP_EC_FAN_AUTO;
-		}
-	}
-
-	if (status & TP_EC_FAN_FULLSPEED) {
-		mode = 0;
-	} else if (status & TP_EC_FAN_AUTO) {
-		mode = 2;
-	} else
-		mode = 1;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", mode);
-}
-
-static ssize_t fan_pwm1_enable_store(struct device *dev,
-				     struct device_attribute *attr,
-				     const char *buf, size_t count)
-{
-	unsigned long t;
-	int res, level;
-
-	if (parse_strtoul(buf, 2, &t))
-		return -EINVAL;
-
-	switch (t) {
-	case 0:
-		level = TP_EC_FAN_FULLSPEED;
-		break;
-	case 1:
-		level = TPACPI_FAN_LAST_LEVEL;
-		break;
-	case 2:
-		level = TP_EC_FAN_AUTO;
-		break;
-	case 3:
-		/* reserved for software-controlled auto mode */
-		return -ENOSYS;
-	default:
-		return -EINVAL;
-	}
-
-	res = fan_set_level_safe(level);
-	if (res == -ENXIO)
-		return -EINVAL;
-	else if (res < 0)
-		return res;
-
-	fan_watchdog_reset();
-
-	return count;
-}
-
-static struct device_attribute dev_attr_fan_pwm1_enable =
-	__ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
-		fan_pwm1_enable_show, fan_pwm1_enable_store);
-
-/* sysfs fan pwm1 ------------------------------------------------------ */
-static ssize_t fan_pwm1_show(struct device *dev,
-			     struct device_attribute *attr,
-			     char *buf)
-{
-	int res;
-	u8 status;
-
-	res = fan_get_status_safe(&status);
-	if (res)
-		return res;
-
-	if (unlikely(tp_features.fan_ctrl_status_undef)) {
-		if (status != fan_control_initial_status) {
-			tp_features.fan_ctrl_status_undef = 0;
-		} else {
-			status = TP_EC_FAN_AUTO;
-		}
-	}
-
-	if ((status &
-	     (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) != 0)
-		status = fan_control_desired_level;
-
-	if (status > 7)
-		status = 7;
-
-	return snprintf(buf, PAGE_SIZE, "%u\n", (status * 255) / 7);
-}
-
-static ssize_t fan_pwm1_store(struct device *dev,
-			      struct device_attribute *attr,
-			      const char *buf, size_t count)
-{
-	unsigned long s;
-	int rc;
-	u8 status, newlevel;
-
-	if (parse_strtoul(buf, 255, &s))
-		return -EINVAL;
-
-	/* scale down from 0-255 to 0-7 */
-	newlevel = (s >> 5) & 0x07;
-
-	if (mutex_lock_interruptible(&fan_mutex))
-		return -ERESTARTSYS;
-
-	rc = fan_get_status(&status);
-	if (!rc && (status &
-		    (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) {
-		rc = fan_set_level(newlevel);
-		if (rc == -ENXIO)
-			rc = -EINVAL;
-		else if (!rc) {
-			fan_update_desired_level(newlevel);
-			fan_watchdog_reset();
-		}
-	}
-
-	mutex_unlock(&fan_mutex);
-	return (rc)? rc : count;
-}
-
-static struct device_attribute dev_attr_fan_pwm1 =
-	__ATTR(pwm1, S_IWUSR | S_IRUGO,
-		fan_pwm1_show, fan_pwm1_store);
-
-/* sysfs fan fan1_input ------------------------------------------------ */
-static ssize_t fan_fan1_input_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	int res;
-	unsigned int speed;
-
-	res = fan_get_speed(&speed);
-	if (res < 0)
-		return res;
-
-	return snprintf(buf, PAGE_SIZE, "%u\n", speed);
-}
-
-static struct device_attribute dev_attr_fan_fan1_input =
-	__ATTR(fan1_input, S_IRUGO,
-		fan_fan1_input_show, NULL);
-
-/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
-static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
-				     char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%u\n", fan_watchdog_maxinterval);
-}
-
-static ssize_t fan_fan_watchdog_store(struct device_driver *drv,
-				      const char *buf, size_t count)
-{
-	unsigned long t;
-
-	if (parse_strtoul(buf, 120, &t))
-		return -EINVAL;
-
-	if (!fan_control_allowed)
-		return -EPERM;
-
-	fan_watchdog_maxinterval = t;
-	fan_watchdog_reset();
-
-	return count;
-}
-
-static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO,
-		fan_fan_watchdog_show, fan_fan_watchdog_store);
-
-/* --------------------------------------------------------------------- */
-static struct attribute *fan_attributes[] = {
-	&dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr,
-	&dev_attr_fan_fan1_input.attr,
-	NULL
-};
-
-static const struct attribute_group fan_attr_group = {
-	.attrs = fan_attributes,
-};
-
-static int __init fan_init(struct ibm_init_struct *iibm)
-{
-	int rc;
-
-	vdbg_printk(TPACPI_DBG_INIT, "initializing fan subdriver\n");
-
-	mutex_init(&fan_mutex);
-	fan_status_access_mode = TPACPI_FAN_NONE;
-	fan_control_access_mode = TPACPI_FAN_WR_NONE;
-	fan_control_commands = 0;
-	fan_watchdog_maxinterval = 0;
-	tp_features.fan_ctrl_status_undef = 0;
-	fan_control_desired_level = 7;
-
-	TPACPI_ACPIHANDLE_INIT(fans);
-	TPACPI_ACPIHANDLE_INIT(gfan);
-	TPACPI_ACPIHANDLE_INIT(sfan);
-
-	if (gfan_handle) {
-		/* 570, 600e/x, 770e, 770x */
-		fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
-	} else {
-		/* all other ThinkPads: note that even old-style
-		 * ThinkPad ECs supports the fan control register */
-		if (likely(acpi_ec_read(fan_status_offset,
-					&fan_control_initial_status))) {
-			fan_status_access_mode = TPACPI_FAN_RD_TPEC;
-
-			/* In some ThinkPads, neither the EC nor the ACPI
-			 * DSDT initialize the fan status, and it ends up
-			 * being set to 0x07 when it *could* be either
-			 * 0x07 or 0x80.
-			 *
-			 * Enable for TP-1Y (T43), TP-78 (R51e),
-			 * TP-76 (R52), TP-70 (T43, R52), which are known
-			 * to be buggy. */
-			if (fan_control_initial_status == 0x07) {
-				switch (thinkpad_id.ec_model) {
-				case 0x5931: /* TP-1Y */
-				case 0x3837: /* TP-78 */
-				case 0x3637: /* TP-76 */
-				case 0x3037: /* TP-70 */
-					printk(TPACPI_NOTICE
-					       "fan_init: initial fan status "
-					       "is unknown, assuming it is "
-					       "in auto mode\n");
-					tp_features.fan_ctrl_status_undef = 1;
-					;;
-				}
-			}
-		} else {
-			printk(TPACPI_ERR
-			       "ThinkPad ACPI EC access misbehaving, "
-			       "fan status and control unavailable\n");
-			return 1;
-		}
-	}
-
-	if (sfan_handle) {
-		/* 570, 770x-JL */
-		fan_control_access_mode = TPACPI_FAN_WR_ACPI_SFAN;
-		fan_control_commands |=
-		    TPACPI_FAN_CMD_LEVEL | TPACPI_FAN_CMD_ENABLE;
-	} else {
-		if (!gfan_handle) {
-			/* gfan without sfan means no fan control */
-			/* all other models implement TP EC 0x2f control */
-
-			if (fans_handle) {
-				/* X31, X40, X41 */
-				fan_control_access_mode =
-				    TPACPI_FAN_WR_ACPI_FANS;
-				fan_control_commands |=
-				    TPACPI_FAN_CMD_SPEED |
-				    TPACPI_FAN_CMD_LEVEL |
-				    TPACPI_FAN_CMD_ENABLE;
-			} else {
-				fan_control_access_mode = TPACPI_FAN_WR_TPEC;
-				fan_control_commands |=
-				    TPACPI_FAN_CMD_LEVEL |
-				    TPACPI_FAN_CMD_ENABLE;
-			}
-		}
-	}
-
-	vdbg_printk(TPACPI_DBG_INIT, "fan is %s, modes %d, %d\n",
-		str_supported(fan_status_access_mode != TPACPI_FAN_NONE ||
-		  fan_control_access_mode != TPACPI_FAN_WR_NONE),
-		fan_status_access_mode, fan_control_access_mode);
-
-	/* fan control master switch */
-	if (!fan_control_allowed) {
-		fan_control_access_mode = TPACPI_FAN_WR_NONE;
-		fan_control_commands = 0;
-		dbg_printk(TPACPI_DBG_INIT,
-			   "fan control features disabled by parameter\n");
-	}
-
-	/* update fan_control_desired_level */
-	if (fan_status_access_mode != TPACPI_FAN_NONE)
-		fan_get_status_safe(NULL);
-
-	if (fan_status_access_mode != TPACPI_FAN_NONE ||
-	    fan_control_access_mode != TPACPI_FAN_WR_NONE) {
-		rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
-					 &fan_attr_group);
-		if (rc < 0)
-			return rc;
-
-		rc = driver_create_file(&tpacpi_hwmon_pdriver.driver,
-					&driver_attr_fan_watchdog);
-		if (rc < 0) {
-			sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
-					&fan_attr_group);
-			return rc;
-		}
-		return 0;
-	} else
-		return 1;
-}
-
-static void fan_exit(void)
-{
-	vdbg_printk(TPACPI_DBG_EXIT,
-		    "cancelling any pending fan watchdog tasks\n");
-
-	/* FIXME: can we really do this unconditionally? */
-	sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, &fan_attr_group);
-	driver_remove_file(&tpacpi_hwmon_pdriver.driver,
-			   &driver_attr_fan_watchdog);
-
-	cancel_delayed_work(&fan_watchdog_task);
-	flush_workqueue(tpacpi_wq);
-}
-
-static void fan_suspend(pm_message_t state)
-{
-	int rc;
-
-	if (!fan_control_allowed)
-		return;
-
-	/* Store fan status in cache */
-	fan_control_resume_level = 0;
-	rc = fan_get_status_safe(&fan_control_resume_level);
-	if (rc < 0)
-		printk(TPACPI_NOTICE
-			"failed to read fan level for later "
-			"restore during resume: %d\n", rc);
-
-	/* if it is undefined, don't attempt to restore it.
-	 * KEEP THIS LAST */
-	if (tp_features.fan_ctrl_status_undef)
-		fan_control_resume_level = 0;
-}
-
-static void fan_resume(void)
-{
-	u8 current_level = 7;
-	bool do_set = false;
-	int rc;
-
-	/* DSDT *always* updates status on resume */
-	tp_features.fan_ctrl_status_undef = 0;
-
-	if (!fan_control_allowed ||
-	    !fan_control_resume_level ||
-	    (fan_get_status_safe(&current_level) < 0))
-		return;
-
-	switch (fan_control_access_mode) {
-	case TPACPI_FAN_WR_ACPI_SFAN:
-		/* never decrease fan level */
-		do_set = (fan_control_resume_level > current_level);
-		break;
-	case TPACPI_FAN_WR_ACPI_FANS:
-	case TPACPI_FAN_WR_TPEC:
-		/* never decrease fan level, scale is:
-		 * TP_EC_FAN_FULLSPEED > 7 >= TP_EC_FAN_AUTO
-		 *
-		 * We expect the firmware to set either 7 or AUTO, but we
-		 * handle FULLSPEED out of paranoia.
-		 *
-		 * So, we can safely only restore FULLSPEED or 7, anything
-		 * else could slow the fan.  Restoring AUTO is useless, at
-		 * best that's exactly what the DSDT already set (it is the
-		 * slower it uses).
-		 *
-		 * Always keep in mind that the DSDT *will* have set the
-		 * fans to what the vendor supposes is the best level.  We
-		 * muck with it only to speed the fan up.
-		 */
-		if (fan_control_resume_level != 7 &&
-		    !(fan_control_resume_level & TP_EC_FAN_FULLSPEED))
-			return;
-		else
-			do_set = !(current_level & TP_EC_FAN_FULLSPEED) &&
-				 (current_level != fan_control_resume_level);
-		break;
-	default:
-		return;
-	}
-	if (do_set) {
-		printk(TPACPI_NOTICE
-			"restoring fan level to 0x%02x\n",
-			fan_control_resume_level);
-		rc = fan_set_level_safe(fan_control_resume_level);
-		if (rc < 0)
-			printk(TPACPI_NOTICE
-				"failed to restore fan level: %d\n", rc);
-	}
-}
-
-static int fan_read(char *p)
-{
-	int len = 0;
-	int rc;
-	u8 status;
-	unsigned int speed = 0;
-
-	switch (fan_status_access_mode) {
-	case TPACPI_FAN_RD_ACPI_GFAN:
-		/* 570, 600e/x, 770e, 770x */
-		rc = fan_get_status_safe(&status);
-		if (rc < 0)
-			return rc;
-
-		len += sprintf(p + len, "status:\t\t%s\n"
-			       "level:\t\t%d\n",
-			       (status != 0) ? "enabled" : "disabled", status);
-		break;
-
-	case TPACPI_FAN_RD_TPEC:
-		/* all except 570, 600e/x, 770e, 770x */
-		rc = fan_get_status_safe(&status);
-		if (rc < 0)
-			return rc;
-
-		if (unlikely(tp_features.fan_ctrl_status_undef)) {
-			if (status != fan_control_initial_status)
-				tp_features.fan_ctrl_status_undef = 0;
-			else
-				/* Return most likely status. In fact, it
-				 * might be the only possible status */
-				status = TP_EC_FAN_AUTO;
-		}
-
-		len += sprintf(p + len, "status:\t\t%s\n",
-			       (status != 0) ? "enabled" : "disabled");
-
-		rc = fan_get_speed(&speed);
-		if (rc < 0)
-			return rc;
-
-		len += sprintf(p + len, "speed:\t\t%d\n", speed);
-
-		if (status & TP_EC_FAN_FULLSPEED)
-			/* Disengaged mode takes precedence */
-			len += sprintf(p + len, "level:\t\tdisengaged\n");
-		else if (status & TP_EC_FAN_AUTO)
-			len += sprintf(p + len, "level:\t\tauto\n");
-		else
-			len += sprintf(p + len, "level:\t\t%d\n", status);
-		break;
-
-	case TPACPI_FAN_NONE:
-	default:
-		len += sprintf(p + len, "status:\t\tnot supported\n");
-	}
-
-	if (fan_control_commands & TPACPI_FAN_CMD_LEVEL) {
-		len += sprintf(p + len, "commands:\tlevel <level>");
-
-		switch (fan_control_access_mode) {
-		case TPACPI_FAN_WR_ACPI_SFAN:
-			len += sprintf(p + len, " (<level> is 0-7)\n");
-			break;
-
-		default:
-			len += sprintf(p + len, " (<level> is 0-7, "
-				       "auto, disengaged, full-speed)\n");
-			break;
-		}
-	}
-
-	if (fan_control_commands & TPACPI_FAN_CMD_ENABLE)
-		len += sprintf(p + len, "commands:\tenable, disable\n"
-			       "commands:\twatchdog <timeout> (<timeout> "
-			       "is 0 (off), 1-120 (seconds))\n");
-
-	if (fan_control_commands & TPACPI_FAN_CMD_SPEED)
-		len += sprintf(p + len, "commands:\tspeed <speed>"
-			       " (<speed> is 0-65535)\n");
-
-	return len;
-}
-
-static int fan_write_cmd_level(const char *cmd, int *rc)
-{
-	int level;
-
-	if (strlencmp(cmd, "level auto") == 0)
-		level = TP_EC_FAN_AUTO;
-	else if ((strlencmp(cmd, "level disengaged") == 0) |
-			(strlencmp(cmd, "level full-speed") == 0))
-		level = TP_EC_FAN_FULLSPEED;
-	else if (sscanf(cmd, "level %d", &level) != 1)
-		return 0;
-
-	*rc = fan_set_level_safe(level);
-	if (*rc == -ENXIO)
-		printk(TPACPI_ERR "level command accepted for unsupported "
-		       "access mode %d", fan_control_access_mode);
-
-	return 1;
-}
-
-static int fan_write_cmd_enable(const char *cmd, int *rc)
-{
-	if (strlencmp(cmd, "enable") != 0)
-		return 0;
-
-	*rc = fan_set_enable();
-	if (*rc == -ENXIO)
-		printk(TPACPI_ERR "enable command accepted for unsupported "
-		       "access mode %d", fan_control_access_mode);
-
-	return 1;
-}
-
-static int fan_write_cmd_disable(const char *cmd, int *rc)
-{
-	if (strlencmp(cmd, "disable") != 0)
-		return 0;
-
-	*rc = fan_set_disable();
-	if (*rc == -ENXIO)
-		printk(TPACPI_ERR "disable command accepted for unsupported "
-		       "access mode %d", fan_control_access_mode);
-
-	return 1;
-}
-
-static int fan_write_cmd_speed(const char *cmd, int *rc)
-{
-	int speed;
-
-	/* TODO:
-	 * Support speed <low> <medium> <high> ? */
-
-	if (sscanf(cmd, "speed %d", &speed) != 1)
-		return 0;
-
-	*rc = fan_set_speed(speed);
-	if (*rc == -ENXIO)
-		printk(TPACPI_ERR "speed command accepted for unsupported "
-		       "access mode %d", fan_control_access_mode);
-
-	return 1;
-}
-
-static int fan_write_cmd_watchdog(const char *cmd, int *rc)
-{
-	int interval;
-
-	if (sscanf(cmd, "watchdog %d", &interval) != 1)
-		return 0;
-
-	if (interval < 0 || interval > 120)
-		*rc = -EINVAL;
-	else
-		fan_watchdog_maxinterval = interval;
-
-	return 1;
-}
-
-static int fan_write(char *buf)
-{
-	char *cmd;
-	int rc = 0;
-
-	while (!rc && (cmd = next_cmd(&buf))) {
-		if (!((fan_control_commands & TPACPI_FAN_CMD_LEVEL) &&
-		      fan_write_cmd_level(cmd, &rc)) &&
-		    !((fan_control_commands & TPACPI_FAN_CMD_ENABLE) &&
-		      (fan_write_cmd_enable(cmd, &rc) ||
-		       fan_write_cmd_disable(cmd, &rc) ||
-		       fan_write_cmd_watchdog(cmd, &rc))) &&
-		    !((fan_control_commands & TPACPI_FAN_CMD_SPEED) &&
-		      fan_write_cmd_speed(cmd, &rc))
-		    )
-			rc = -EINVAL;
-		else if (!rc)
-			fan_watchdog_reset();
-	}
-
-	return rc;
-}
-
-static struct ibm_struct fan_driver_data = {
-	.name = "fan",
-	.read = fan_read,
-	.write = fan_write,
-	.exit = fan_exit,
-	.suspend = fan_suspend,
-	.resume = fan_resume,
-};
-
-/****************************************************************************
- ****************************************************************************
- *
- * Infrastructure
- *
- ****************************************************************************
- ****************************************************************************/
-
-/* sysfs name ---------------------------------------------------------- */
-static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME);
-}
-
-static struct device_attribute dev_attr_thinkpad_acpi_pdev_name =
-	__ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL);
-
-/* --------------------------------------------------------------------- */
-
-/* /proc support */
-static struct proc_dir_entry *proc_dir;
-
-/*
- * Module and infrastructure proble, init and exit handling
- */
-
-static int force_load;
-
-#ifdef CONFIG_THINKPAD_ACPI_DEBUG
-static const char * __init str_supported(int is_supported)
-{
-	static char text_unsupported[] __initdata = "not supported";
-
-	return (is_supported)? &text_unsupported[4] : &text_unsupported[0];
-}
-#endif /* CONFIG_THINKPAD_ACPI_DEBUG */
-
-static void ibm_exit(struct ibm_struct *ibm)
-{
-	dbg_printk(TPACPI_DBG_EXIT, "removing %s\n", ibm->name);
-
-	list_del_init(&ibm->all_drivers);
-
-	if (ibm->flags.acpi_notify_installed) {
-		dbg_printk(TPACPI_DBG_EXIT,
-			"%s: acpi_remove_notify_handler\n", ibm->name);
-		BUG_ON(!ibm->acpi);
-		acpi_remove_notify_handler(*ibm->acpi->handle,
-					   ibm->acpi->type,
-					   dispatch_acpi_notify);
-		ibm->flags.acpi_notify_installed = 0;
-		ibm->flags.acpi_notify_installed = 0;
-	}
-
-	if (ibm->flags.proc_created) {
-		dbg_printk(TPACPI_DBG_EXIT,
-			"%s: remove_proc_entry\n", ibm->name);
-		remove_proc_entry(ibm->name, proc_dir);
-		ibm->flags.proc_created = 0;
-	}
-
-	if (ibm->flags.acpi_driver_registered) {
-		dbg_printk(TPACPI_DBG_EXIT,
-			"%s: acpi_bus_unregister_driver\n", ibm->name);
-		BUG_ON(!ibm->acpi);
-		acpi_bus_unregister_driver(ibm->acpi->driver);
-		kfree(ibm->acpi->driver);
-		ibm->acpi->driver = NULL;
-		ibm->flags.acpi_driver_registered = 0;
-	}
-
-	if (ibm->flags.init_called && ibm->exit) {
-		ibm->exit();
-		ibm->flags.init_called = 0;
-	}
-
-	dbg_printk(TPACPI_DBG_INIT, "finished removing %s\n", ibm->name);
-}
-
-static int __init ibm_init(struct ibm_init_struct *iibm)
-{
-	int ret;
-	struct ibm_struct *ibm = iibm->data;
-	struct proc_dir_entry *entry;
-
-	BUG_ON(ibm == NULL);
-
-	INIT_LIST_HEAD(&ibm->all_drivers);
-
-	if (ibm->flags.experimental && !experimental)
-		return 0;
-
-	dbg_printk(TPACPI_DBG_INIT,
-		"probing for %s\n", ibm->name);
-
-	if (iibm->init) {
-		ret = iibm->init(iibm);
-		if (ret > 0)
-			return 0;	/* probe failed */
-		if (ret)
-			return ret;
-
-		ibm->flags.init_called = 1;
-	}
-
-	if (ibm->acpi) {
-		if (ibm->acpi->hid) {
-			ret = register_tpacpi_subdriver(ibm);
-			if (ret)
-				goto err_out;
-		}
-
-		if (ibm->acpi->notify) {
-			ret = setup_acpi_notify(ibm);
-			if (ret == -ENODEV) {
-				printk(TPACPI_NOTICE "disabling subdriver %s\n",
-					ibm->name);
-				ret = 0;
-				goto err_out;
-			}
-			if (ret < 0)
-				goto err_out;
-		}
-	}
-
-	dbg_printk(TPACPI_DBG_INIT,
-		"%s installed\n", ibm->name);
-
-	if (ibm->read) {
-		entry = create_proc_entry(ibm->name,
-					  S_IFREG | S_IRUGO | S_IWUSR,
-					  proc_dir);
-		if (!entry) {
-			printk(TPACPI_ERR "unable to create proc entry %s\n",
-			       ibm->name);
-			ret = -ENODEV;
-			goto err_out;
-		}
-		entry->owner = THIS_MODULE;
-		entry->data = ibm;
-		entry->read_proc = &dispatch_procfs_read;
-		if (ibm->write)
-			entry->write_proc = &dispatch_procfs_write;
-		ibm->flags.proc_created = 1;
-	}
-
-	list_add_tail(&ibm->all_drivers, &tpacpi_all_drivers);
-
-	return 0;
-
-err_out:
-	dbg_printk(TPACPI_DBG_INIT,
-		"%s: at error exit path with result %d\n",
-		ibm->name, ret);
-
-	ibm_exit(ibm);
-	return (ret < 0)? ret : 0;
-}
-
-/* Probing */
-
-/* returns 0 - probe ok, or < 0 - probe error.
- * Probe ok doesn't mean thinkpad found.
- * On error, kfree() cleanup on tp->* is not performed, caller must do it */
-static int __must_check __init get_thinkpad_model_data(
-						struct thinkpad_id_data *tp)
-{
-	const struct dmi_device *dev = NULL;
-	char ec_fw_string[18];
-	char const *s;
-
-	if (!tp)
-		return -EINVAL;
-
-	memset(tp, 0, sizeof(*tp));
-
-	if (dmi_name_in_vendors("IBM"))
-		tp->vendor = PCI_VENDOR_ID_IBM;
-	else if (dmi_name_in_vendors("LENOVO"))
-		tp->vendor = PCI_VENDOR_ID_LENOVO;
-	else
-		return 0;
-
-	s = dmi_get_system_info(DMI_BIOS_VERSION);
-	tp->bios_version_str = kstrdup(s, GFP_KERNEL);
-	if (s && !tp->bios_version_str)
-		return -ENOMEM;
-	if (!tp->bios_version_str)
-		return 0;
-	tp->bios_model = tp->bios_version_str[0]
-			 | (tp->bios_version_str[1] << 8);
-
-	/*
-	 * ThinkPad T23 or newer, A31 or newer, R50e or newer,
-	 * X32 or newer, all Z series;  Some models must have an
-	 * up-to-date BIOS or they will not be detected.
-	 *
-	 * See http://thinkwiki.org/wiki/List_of_DMI_IDs
-	 */
-	while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) {
-		if (sscanf(dev->name,
-			   "IBM ThinkPad Embedded Controller -[%17c",
-			   ec_fw_string) == 1) {
-			ec_fw_string[sizeof(ec_fw_string) - 1] = 0;
-			ec_fw_string[strcspn(ec_fw_string, " ]")] = 0;
-
-			tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
-			if (!tp->ec_version_str)
-				return -ENOMEM;
-			tp->ec_model = ec_fw_string[0]
-					| (ec_fw_string[1] << 8);
-			break;
-		}
-	}
-
-	s = dmi_get_system_info(DMI_PRODUCT_VERSION);
-	if (s && !strnicmp(s, "ThinkPad", 8)) {
-		tp->model_str = kstrdup(s, GFP_KERNEL);
-		if (!tp->model_str)
-			return -ENOMEM;
-	}
-
-	s = dmi_get_system_info(DMI_PRODUCT_NAME);
-	tp->nummodel_str = kstrdup(s, GFP_KERNEL);
-	if (s && !tp->nummodel_str)
-		return -ENOMEM;
-
-	return 0;
-}
-
-static int __init probe_for_thinkpad(void)
-{
-	int is_thinkpad;
-
-	if (acpi_disabled)
-		return -ENODEV;
-
-	/*
-	 * Non-ancient models have better DMI tagging, but very old models
-	 * don't.
-	 */
-	is_thinkpad = (thinkpad_id.model_str != NULL);
-
-	/* ec is required because many other handles are relative to it */
-	TPACPI_ACPIHANDLE_INIT(ec);
-	if (!ec_handle) {
-		if (is_thinkpad)
-			printk(TPACPI_ERR
-				"Not yet supported ThinkPad detected!\n");
-		return -ENODEV;
-	}
-
-	/*
-	 * Risks a regression on very old machines, but reduces potential
-	 * false positives a damn great deal
-	 */
-	if (!is_thinkpad)
-		is_thinkpad = (thinkpad_id.vendor == PCI_VENDOR_ID_IBM);
-
-	if (!is_thinkpad && !force_load)
-		return -ENODEV;
-
-	return 0;
-}
-
-
-/* Module init, exit, parameters */
-
-static struct ibm_init_struct ibms_init[] __initdata = {
-	{
-		.init = thinkpad_acpi_driver_init,
-		.data = &thinkpad_acpi_driver_data,
-	},
-	{
-		.init = hotkey_init,
-		.data = &hotkey_driver_data,
-	},
-	{
-		.init = bluetooth_init,
-		.data = &bluetooth_driver_data,
-	},
-	{
-		.init = wan_init,
-		.data = &wan_driver_data,
-	},
-#ifdef CONFIG_THINKPAD_ACPI_VIDEO
-	{
-		.init = video_init,
-		.data = &video_driver_data,
-	},
-#endif
-	{
-		.init = light_init,
-		.data = &light_driver_data,
-	},
-#ifdef CONFIG_THINKPAD_ACPI_DOCK
-	{
-		.init = dock_init,
-		.data = &dock_driver_data[0],
-	},
-	{
-		.init = dock_init2,
-		.data = &dock_driver_data[1],
-	},
-#endif
-#ifdef CONFIG_THINKPAD_ACPI_BAY
-	{
-		.init = bay_init,
-		.data = &bay_driver_data,
-	},
-#endif
-	{
-		.init = cmos_init,
-		.data = &cmos_driver_data,
-	},
-	{
-		.init = led_init,
-		.data = &led_driver_data,
-	},
-	{
-		.init = beep_init,
-		.data = &beep_driver_data,
-	},
-	{
-		.init = thermal_init,
-		.data = &thermal_driver_data,
-	},
-	{
-		.data = &ecdump_driver_data,
-	},
-	{
-		.init = brightness_init,
-		.data = &brightness_driver_data,
-	},
-	{
-		.data = &volume_driver_data,
-	},
-	{
-		.init = fan_init,
-		.data = &fan_driver_data,
-	},
-};
-
-static int __init set_ibm_param(const char *val, struct kernel_param *kp)
-{
-	unsigned int i;
-	struct ibm_struct *ibm;
-
-	if (!kp || !kp->name || !val)
-		return -EINVAL;
-
-	for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
-		ibm = ibms_init[i].data;
-		WARN_ON(ibm == NULL);
-
-		if (!ibm || !ibm->name)
-			continue;
-
-		if (strcmp(ibm->name, kp->name) == 0 && ibm->write) {
-			if (strlen(val) > sizeof(ibms_init[i].param) - 2)
-				return -ENOSPC;
-			strcpy(ibms_init[i].param, val);
-			strcat(ibms_init[i].param, ",");
-			return 0;
-		}
-	}
-
-	return -EINVAL;
-}
-
-module_param(experimental, int, 0);
-MODULE_PARM_DESC(experimental,
-		 "Enables experimental features when non-zero");
-
-module_param_named(debug, dbg_level, uint, 0);
-MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
-
-module_param(force_load, bool, 0);
-MODULE_PARM_DESC(force_load,
-		 "Attempts to load the driver even on a "
-		 "mis-identified ThinkPad when true");
-
-module_param_named(fan_control, fan_control_allowed, bool, 0);
-MODULE_PARM_DESC(fan_control,
-		 "Enables setting fan parameters features when true");
-
-module_param_named(brightness_mode, brightness_mode, int, 0);
-MODULE_PARM_DESC(brightness_mode,
-		 "Selects brightness control strategy: "
-		 "0=auto, 1=EC, 2=CMOS, 3=both");
-
-module_param(brightness_enable, uint, 0);
-MODULE_PARM_DESC(brightness_enable,
-		 "Enables backlight control when 1, disables when 0");
-
-module_param(hotkey_report_mode, uint, 0);
-MODULE_PARM_DESC(hotkey_report_mode,
-		 "used for backwards compatibility with userspace, "
-		 "see documentation");
-
-#define TPACPI_PARAM(feature) \
-	module_param_call(feature, set_ibm_param, NULL, NULL, 0); \
-	MODULE_PARM_DESC(feature, "Simulates thinkpad-acpi procfs command " \
-			 "at module load, see documentation")
-
-TPACPI_PARAM(hotkey);
-TPACPI_PARAM(bluetooth);
-TPACPI_PARAM(video);
-TPACPI_PARAM(light);
-#ifdef CONFIG_THINKPAD_ACPI_DOCK
-TPACPI_PARAM(dock);
-#endif
-#ifdef CONFIG_THINKPAD_ACPI_BAY
-TPACPI_PARAM(bay);
-#endif /* CONFIG_THINKPAD_ACPI_BAY */
-TPACPI_PARAM(cmos);
-TPACPI_PARAM(led);
-TPACPI_PARAM(beep);
-TPACPI_PARAM(ecdump);
-TPACPI_PARAM(brightness);
-TPACPI_PARAM(volume);
-TPACPI_PARAM(fan);
-
-static void thinkpad_acpi_module_exit(void)
-{
-	struct ibm_struct *ibm, *itmp;
-
-	tpacpi_lifecycle = TPACPI_LIFE_EXITING;
-
-	list_for_each_entry_safe_reverse(ibm, itmp,
-					 &tpacpi_all_drivers,
-					 all_drivers) {
-		ibm_exit(ibm);
-	}
-
-	dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
-
-	if (tpacpi_inputdev) {
-		if (tp_features.input_device_registered)
-			input_unregister_device(tpacpi_inputdev);
-		else
-			input_free_device(tpacpi_inputdev);
-	}
-
-	if (tpacpi_hwmon)
-		hwmon_device_unregister(tpacpi_hwmon);
-
-	if (tp_features.sensors_pdev_attrs_registered)
-		device_remove_file(&tpacpi_sensors_pdev->dev,
-				   &dev_attr_thinkpad_acpi_pdev_name);
-	if (tpacpi_sensors_pdev)
-		platform_device_unregister(tpacpi_sensors_pdev);
-	if (tpacpi_pdev)
-		platform_device_unregister(tpacpi_pdev);
-
-	if (tp_features.sensors_pdrv_attrs_registered)
-		tpacpi_remove_driver_attributes(&tpacpi_hwmon_pdriver.driver);
-	if (tp_features.platform_drv_attrs_registered)
-		tpacpi_remove_driver_attributes(&tpacpi_pdriver.driver);
-
-	if (tp_features.sensors_pdrv_registered)
-		platform_driver_unregister(&tpacpi_hwmon_pdriver);
-
-	if (tp_features.platform_drv_registered)
-		platform_driver_unregister(&tpacpi_pdriver);
-
-	if (proc_dir)
-		remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir);
-
-	if (tpacpi_wq)
-		destroy_workqueue(tpacpi_wq);
-
-	kfree(thinkpad_id.bios_version_str);
-	kfree(thinkpad_id.ec_version_str);
-	kfree(thinkpad_id.model_str);
-}
-
-
-static int __init thinkpad_acpi_module_init(void)
-{
-	int ret, i;
-
-	tpacpi_lifecycle = TPACPI_LIFE_INIT;
-
-	/* Parameter checking */
-	if (hotkey_report_mode > 2)
-		return -EINVAL;
-
-	/* Driver-level probe */
-
-	ret = get_thinkpad_model_data(&thinkpad_id);
-	if (ret) {
-		printk(TPACPI_ERR
-			"unable to get DMI data: %d\n", ret);
-		thinkpad_acpi_module_exit();
-		return ret;
-	}
-	ret = probe_for_thinkpad();
-	if (ret) {
-		thinkpad_acpi_module_exit();
-		return ret;
-	}
-
-	/* Driver initialization */
-
-	TPACPI_ACPIHANDLE_INIT(ecrd);
-	TPACPI_ACPIHANDLE_INIT(ecwr);
-
-	tpacpi_wq = create_singlethread_workqueue(TPACPI_WORKQUEUE_NAME);
-	if (!tpacpi_wq) {
-		thinkpad_acpi_module_exit();
-		return -ENOMEM;
-	}
-
-	proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir);
-	if (!proc_dir) {
-		printk(TPACPI_ERR
-		       "unable to create proc dir " TPACPI_PROC_DIR);
-		thinkpad_acpi_module_exit();
-		return -ENODEV;
-	}
-	proc_dir->owner = THIS_MODULE;
-
-	ret = platform_driver_register(&tpacpi_pdriver);
-	if (ret) {
-		printk(TPACPI_ERR
-		       "unable to register main platform driver\n");
-		thinkpad_acpi_module_exit();
-		return ret;
-	}
-	tp_features.platform_drv_registered = 1;
-
-	ret = platform_driver_register(&tpacpi_hwmon_pdriver);
-	if (ret) {
-		printk(TPACPI_ERR
-		       "unable to register hwmon platform driver\n");
-		thinkpad_acpi_module_exit();
-		return ret;
-	}
-	tp_features.sensors_pdrv_registered = 1;
-
-	ret = tpacpi_create_driver_attributes(&tpacpi_pdriver.driver);
-	if (!ret) {
-		tp_features.platform_drv_attrs_registered = 1;
-		ret = tpacpi_create_driver_attributes(
-					&tpacpi_hwmon_pdriver.driver);
-	}
-	if (ret) {
-		printk(TPACPI_ERR
-		       "unable to create sysfs driver attributes\n");
-		thinkpad_acpi_module_exit();
-		return ret;
-	}
-	tp_features.sensors_pdrv_attrs_registered = 1;
-
-
-	/* Device initialization */
-	tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
-							NULL, 0);
-	if (IS_ERR(tpacpi_pdev)) {
-		ret = PTR_ERR(tpacpi_pdev);
-		tpacpi_pdev = NULL;
-		printk(TPACPI_ERR "unable to register platform device\n");
-		thinkpad_acpi_module_exit();
-		return ret;
-	}
-	tpacpi_sensors_pdev = platform_device_register_simple(
-						TPACPI_HWMON_DRVR_NAME,
-						-1, NULL, 0);
-	if (IS_ERR(tpacpi_sensors_pdev)) {
-		ret = PTR_ERR(tpacpi_sensors_pdev);
-		tpacpi_sensors_pdev = NULL;
-		printk(TPACPI_ERR
-		       "unable to register hwmon platform device\n");
-		thinkpad_acpi_module_exit();
-		return ret;
-	}
-	ret = device_create_file(&tpacpi_sensors_pdev->dev,
-				 &dev_attr_thinkpad_acpi_pdev_name);
-	if (ret) {
-		printk(TPACPI_ERR
-		       "unable to create sysfs hwmon device attributes\n");
-		thinkpad_acpi_module_exit();
-		return ret;
-	}
-	tp_features.sensors_pdev_attrs_registered = 1;
-	tpacpi_hwmon = hwmon_device_register(&tpacpi_sensors_pdev->dev);
-	if (IS_ERR(tpacpi_hwmon)) {
-		ret = PTR_ERR(tpacpi_hwmon);
-		tpacpi_hwmon = NULL;
-		printk(TPACPI_ERR "unable to register hwmon device\n");
-		thinkpad_acpi_module_exit();
-		return ret;
-	}
-	mutex_init(&tpacpi_inputdev_send_mutex);
-	tpacpi_inputdev = input_allocate_device();
-	if (!tpacpi_inputdev) {
-		printk(TPACPI_ERR "unable to allocate input device\n");
-		thinkpad_acpi_module_exit();
-		return -ENOMEM;
-	} else {
-		/* Prepare input device, but don't register */
-		tpacpi_inputdev->name = "ThinkPad Extra Buttons";
-		tpacpi_inputdev->phys = TPACPI_DRVR_NAME "/input0";
-		tpacpi_inputdev->id.bustype = BUS_HOST;
-		tpacpi_inputdev->id.vendor = (thinkpad_id.vendor) ?
-						thinkpad_id.vendor :
-						PCI_VENDOR_ID_IBM;
-		tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
-		tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
-	}
-	for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
-		ret = ibm_init(&ibms_init[i]);
-		if (ret >= 0 && *ibms_init[i].param)
-			ret = ibms_init[i].data->write(ibms_init[i].param);
-		if (ret < 0) {
-			thinkpad_acpi_module_exit();
-			return ret;
-		}
-	}
-	ret = input_register_device(tpacpi_inputdev);
-	if (ret < 0) {
-		printk(TPACPI_ERR "unable to register input device\n");
-		thinkpad_acpi_module_exit();
-		return ret;
-	} else {
-		tp_features.input_device_registered = 1;
-	}
-
-	tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
-	return 0;
-}
-
-/* Please remove this in year 2009 */
-MODULE_ALIAS("ibm_acpi");
-
-MODULE_ALIAS(TPACPI_DRVR_SHORTNAME);
-
-/*
- * DMI matching for module autoloading
- *
- * See http://thinkwiki.org/wiki/List_of_DMI_IDs
- * See http://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
- *
- * Only models listed in thinkwiki will be supported, so add yours
- * if it is not there yet.
- */
-#define IBM_BIOS_MODULE_ALIAS(__type) \
-	MODULE_ALIAS("dmi:bvnIBM:bvr" __type "ET??WW")
-
-/* Non-ancient thinkpads */
-MODULE_ALIAS("dmi:bvnIBM:*:svnIBM:*:pvrThinkPad*:rvnIBM:*");
-MODULE_ALIAS("dmi:bvnLENOVO:*:svnLENOVO:*:pvrThinkPad*:rvnLENOVO:*");
-
-/* Ancient thinkpad BIOSes have to be identified by
- * BIOS type or model number, and there are far less
- * BIOS types than model numbers... */
-IBM_BIOS_MODULE_ALIAS("I[B,D,H,I,M,N,O,T,W,V,Y,Z]");
-IBM_BIOS_MODULE_ALIAS("1[0,3,6,8,A-G,I,K,M-P,S,T]");
-IBM_BIOS_MODULE_ALIAS("K[U,X-Z]");
-
-MODULE_AUTHOR("Borislav Deianov, Henrique de Moraes Holschuh");
-MODULE_DESCRIPTION(TPACPI_DESC);
-MODULE_VERSION(TPACPI_VERSION);
-MODULE_LICENSE("GPL");
-
-module_init(thinkpad_acpi_module_init);
-module_exit(thinkpad_acpi_module_exit);
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 7a3f243..76bfe16 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -25,8 +25,8 @@
 #include <linux/stat.h>
 
 #include <linux/mmc/host.h>
+#include <linux/atmel-mci.h>
 
-#include <asm/atmel-mci.h>
 #include <asm/io.h>
 #include <asm/unaligned.h>
 
@@ -55,7 +55,6 @@
 
 struct atmel_mci_dma {
 #ifdef CONFIG_MMC_ATMELMCI_DMA
-	struct dma_client		client;
 	struct dma_chan			*chan;
 	struct dma_async_tx_descriptor	*data_desc;
 #endif
@@ -593,10 +592,8 @@
 
 	/* If we don't have a channel, we can't do DMA */
 	chan = host->dma.chan;
-	if (chan) {
-		dma_chan_get(chan);
+	if (chan)
 		host->data_chan = chan;
-	}
 
 	if (!chan)
 		return -ENODEV;
@@ -1443,60 +1440,6 @@
 	return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_MMC_ATMELMCI_DMA
-
-static inline struct atmel_mci *
-dma_client_to_atmel_mci(struct dma_client *client)
-{
-	return container_of(client, struct atmel_mci, dma.client);
-}
-
-static enum dma_state_client atmci_dma_event(struct dma_client *client,
-		struct dma_chan *chan, enum dma_state state)
-{
-	struct atmel_mci	*host;
-	enum dma_state_client	ret = DMA_NAK;
-
-	host = dma_client_to_atmel_mci(client);
-
-	switch (state) {
-	case DMA_RESOURCE_AVAILABLE:
-		spin_lock_bh(&host->lock);
-		if (!host->dma.chan) {
-			host->dma.chan = chan;
-			ret = DMA_ACK;
-		}
-		spin_unlock_bh(&host->lock);
-
-		if (ret == DMA_ACK)
-			dev_info(&host->pdev->dev,
-					"Using %s for DMA transfers\n",
-					chan->dev.bus_id);
-		break;
-
-	case DMA_RESOURCE_REMOVED:
-		spin_lock_bh(&host->lock);
-		if (host->dma.chan == chan) {
-			host->dma.chan = NULL;
-			ret = DMA_ACK;
-		}
-		spin_unlock_bh(&host->lock);
-
-		if (ret == DMA_ACK)
-			dev_info(&host->pdev->dev,
-					"Lost %s, falling back to PIO\n",
-					chan->dev.bus_id);
-		break;
-
-	default:
-		break;
-	}
-
-
-	return ret;
-}
-#endif /* CONFIG_MMC_ATMELMCI_DMA */
-
 static int __init atmci_init_slot(struct atmel_mci *host,
 		struct mci_slot_pdata *slot_data, unsigned int id,
 		u32 sdc_reg)
@@ -1600,6 +1543,18 @@
 	mmc_free_host(slot->mmc);
 }
 
+#ifdef CONFIG_MMC_ATMELMCI_DMA
+static bool filter(struct dma_chan *chan, void *slave)
+{
+	struct dw_dma_slave *dws = slave;
+
+	if (dws->dma_dev == chan->device->dev)
+		return true;
+	else
+		return false;
+}
+#endif
+
 static int __init atmci_probe(struct platform_device *pdev)
 {
 	struct mci_platform_data	*pdata;
@@ -1652,22 +1607,20 @@
 		goto err_request_irq;
 
 #ifdef CONFIG_MMC_ATMELMCI_DMA
-	if (pdata->dma_slave) {
-		struct dma_slave *slave = pdata->dma_slave;
+	if (pdata->dma_slave.dma_dev) {
+		struct dw_dma_slave *dws = &pdata->dma_slave;
+		dma_cap_mask_t mask;
 
-		slave->tx_reg = regs->start + MCI_TDR;
-		slave->rx_reg = regs->start + MCI_RDR;
+		dws->tx_reg = regs->start + MCI_TDR;
+		dws->rx_reg = regs->start + MCI_RDR;
 
 		/* Try to grab a DMA channel */
-		host->dma.client.event_callback = atmci_dma_event;
-		dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask);
-		host->dma.client.slave = slave;
-
-		dma_async_client_register(&host->dma.client);
-		dma_async_client_chan_request(&host->dma.client);
-	} else {
-		dev_notice(&pdev->dev, "DMA not available, using PIO\n");
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+		host->dma.chan = dma_request_channel(mask, filter, dws);
 	}
+	if (!host->dma.chan)
+		dev_notice(&pdev->dev, "DMA not available, using PIO\n");
 #endif /* CONFIG_MMC_ATMELMCI_DMA */
 
 	platform_set_drvdata(pdev, host);
@@ -1699,8 +1652,8 @@
 
 err_init_slot:
 #ifdef CONFIG_MMC_ATMELMCI_DMA
-	if (pdata->dma_slave)
-		dma_async_client_unregister(&host->dma.client);
+	if (host->dma.chan)
+		dma_release_channel(host->dma.chan);
 #endif
 	free_irq(irq, host);
 err_request_irq:
@@ -1731,8 +1684,8 @@
 	clk_disable(host->mck);
 
 #ifdef CONFIG_MMC_ATMELMCI_DMA
-	if (host->dma.client.slave)
-		dma_async_client_unregister(&host->dma.client);
+	if (host->dma.chan)
+		dma_release_channel(host->dma.chan);
 #endif
 
 	free_irq(platform_get_irq(pdev, 0), host);
@@ -1761,7 +1714,7 @@
 	platform_driver_unregister(&atmci_driver);
 }
 
-module_init(atmci_init);
+late_initcall(atmci_init); /* try to load after dma driver when built-in */
 module_exit(atmci_exit);
 
 MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index a90d50c..7d04fb9 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -45,6 +45,14 @@
 	  devices. Partitioning on NFTL 'devices' is a different - that's the
 	  'normal' form of partitioning used on a block device.
 
+config MTD_TESTS
+	tristate "MTD tests support"
+	depends on m
+	help
+	  This option includes various MTD tests into compilation. The tests
+	  should normally be compiled as kernel modules. The modules perform
+	  various checks and verifications when loaded.
+
 config MTD_REDBOOT_PARTS
 	tristate "RedBoot partition table parsing"
 	depends on MTD_PARTITIONS
@@ -316,6 +324,8 @@
 
 source "drivers/mtd/onenand/Kconfig"
 
+source "drivers/mtd/lpddr/Kconfig"
+
 source "drivers/mtd/ubi/Kconfig"
 
 endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 4b77335..4521b1e 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -29,6 +29,6 @@
 nftl-objs		:= nftlcore.o nftlmount.o
 inftl-objs		:= inftlcore.o inftlmount.o
 
-obj-y		+= chips/ maps/ devices/ nand/ onenand/
+obj-y		+= chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/
 
 obj-$(CONFIG_MTD_UBI)		+= ubi/
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c93a8be..f5ab6fa 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -58,8 +58,8 @@
 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
 static void cfi_intelext_sync (struct mtd_info *);
-static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
-static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
+static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 #ifdef CONFIG_MTD_OTP
 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
@@ -558,8 +558,8 @@
 	}
 
 	for (i=0; i<mtd->numeraseregions;i++){
-		printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
-		       i,mtd->eraseregions[i].offset,
+		printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
+		       i,(unsigned long long)mtd->eraseregions[i].offset,
 		       mtd->eraseregions[i].erasesize,
 		       mtd->eraseregions[i].numblocks);
 	}
@@ -2058,7 +2058,7 @@
 	return ret;
 }
 
-static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	int ret;
 
@@ -2082,7 +2082,7 @@
 	return ret;
 }
 
-static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	int ret;
 
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index d74ec46..94bb61e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -71,8 +71,8 @@
 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
 #include "fwh_lock.h"
 
-static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
-static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
+static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 
 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
 	.probe		= NULL, /* Not usable directly */
@@ -322,6 +322,14 @@
 };
 
 
+static void cfi_fixup_major_minor(struct cfi_private *cfi,
+				  struct cfi_pri_amdstd *extp)
+{
+	if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
+	    extp->MajorVersion == '0')
+		extp->MajorVersion = '1';
+}
+
 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
 {
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -363,6 +371,8 @@
 			return NULL;
 		}
 
+		cfi_fixup_major_minor(cfi, extp);
+
 		if (extp->MajorVersion != '1' ||
 		    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
 			printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
@@ -1774,12 +1784,12 @@
 	return ret;
 }
 
-static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
 }
 
-static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
 }
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index d4714dd..6c740f3 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -42,8 +42,8 @@
 		unsigned long count, loff_t to, size_t *retlen);
 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
 static void cfi_staa_sync (struct mtd_info *);
-static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
-static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
+static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 static int cfi_staa_suspend (struct mtd_info *);
 static void cfi_staa_resume (struct mtd_info *);
 
@@ -221,8 +221,8 @@
 		}
 
 		for (i=0; i<mtd->numeraseregions;i++){
-			printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
-			       i,mtd->eraseregions[i].offset,
+			printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
+			       i, (unsigned long long)mtd->eraseregions[i].offset,
 			       mtd->eraseregions[i].erasesize,
 			       mtd->eraseregions[i].numblocks);
 		}
@@ -964,7 +964,7 @@
 		adr += regions[i].erasesize;
 		len -= regions[i].erasesize;
 
-		if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
+		if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
 			i++;
 
 		if (adr >> cfi->chipshift) {
@@ -1135,7 +1135,7 @@
 	spin_unlock_bh(chip->mutex);
 	return 0;
 }
-static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
@@ -1284,7 +1284,7 @@
 	spin_unlock_bh(chip->mutex);
 	return 0;
 }
-static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index ab44f2b..57e0e4e 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -77,7 +77,7 @@
 }
 
 
-static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	int ret;
 
@@ -88,7 +88,7 @@
 }
 
 
-static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	int ret;
 
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 6fde0a2..bc33200 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -120,6 +120,13 @@
 	  doesn't have access to, memory beyond the mem=xxx limit, nvram,
 	  memory on the video card, etc...
 
+config MTD_PS3VRAM
+	tristate "PS3 video RAM"
+	depends on FB_PS3
+	help
+	  This driver allows you to use excess PS3 video RAM as volatile
+	  storage or system swap.
+
 config MTD_LART
 	tristate "28F160xx flash driver for LART"
 	depends on SA1100_LART
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 0993d5c..e51521d 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -16,3 +16,4 @@
 obj-$(CONFIG_MTD_BLOCK2MTD)	+= block2mtd.o
 obj-$(CONFIG_MTD_DATAFLASH)	+= mtd_dataflash.o
 obj-$(CONFIG_MTD_M25P80)	+= m25p80.o
+obj-$(CONFIG_MTD_PS3VRAM)	+= ps3vram.o
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index f4bda4c..578de1c 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -619,7 +619,7 @@
 };
 #endif
 
-int __init lart_flash_init (void)
+static int __init lart_flash_init (void)
 {
    int result;
    memset (&mtd,0,sizeof (mtd));
@@ -690,7 +690,7 @@
    return (result);
 }
 
-void __exit lart_flash_exit (void)
+static void __exit lart_flash_exit (void)
 {
 #ifndef HAVE_PARTITIONS
    del_mtd_device (&mtd);
@@ -705,5 +705,3 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Abraham vd Merwe <abraham@2d3d.co.za>");
 MODULE_DESCRIPTION("MTD driver for Intel 28F160F3 on LART board");
-
-
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 5733f06..7c3fc76 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -20,6 +20,7 @@
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/mutex.h>
+#include <linux/math64.h>
 
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -169,9 +170,9 @@
  */
 static int erase_chip(struct m25p *flash)
 {
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
-			dev_name(&flash->spi->dev), __func__,
-			flash->mtd.size / 1024);
+	DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n",
+	      dev_name(&flash->spi->dev), __func__,
+	      (long long)(flash->mtd.size >> 10));
 
 	/* Wait until finished previous write command. */
 	if (wait_till_ready(flash))
@@ -232,18 +233,18 @@
 {
 	struct m25p *flash = mtd_to_m25p(mtd);
 	u32 addr,len;
+	uint32_t rem;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n",
-			dev_name(&flash->spi->dev), __func__, "at",
-			(u32)instr->addr, instr->len);
+	DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n",
+	      dev_name(&flash->spi->dev), __func__, "at",
+	      (long long)instr->addr, (long long)instr->len);
 
 	/* sanity checks */
 	if (instr->addr + instr->len > flash->mtd.size)
 		return -EINVAL;
-	if ((instr->addr % mtd->erasesize) != 0
-			|| (instr->len % mtd->erasesize) != 0) {
+	div_u64_rem(instr->len, mtd->erasesize, &rem);
+	if (rem)
 		return -EINVAL;
-	}
 
 	addr = instr->addr;
 	len = instr->len;
@@ -677,24 +678,24 @@
 		flash->mtd.erasesize = info->sector_size;
 	}
 
-	dev_info(&spi->dev, "%s (%d Kbytes)\n", info->name,
-			flash->mtd.size / 1024);
+	dev_info(&spi->dev, "%s (%lld Kbytes)\n", info->name,
+			(long long)flash->mtd.size >> 10);
 
 	DEBUG(MTD_DEBUG_LEVEL2,
-		"mtd .name = %s, .size = 0x%.8x (%uMiB) "
+		"mtd .name = %s, .size = 0x%llx (%lldMiB) "
 			".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
 		flash->mtd.name,
-		flash->mtd.size, flash->mtd.size / (1024*1024),
+		(long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
 		flash->mtd.erasesize, flash->mtd.erasesize / 1024,
 		flash->mtd.numeraseregions);
 
 	if (flash->mtd.numeraseregions)
 		for (i = 0; i < flash->mtd.numeraseregions; i++)
 			DEBUG(MTD_DEBUG_LEVEL2,
-				"mtd.eraseregions[%d] = { .offset = 0x%.8x, "
+				"mtd.eraseregions[%d] = { .offset = 0x%llx, "
 				".erasesize = 0x%.8x (%uKiB), "
 				".numblocks = %d }\n",
-				i, flash->mtd.eraseregions[i].offset,
+				i, (long long)flash->mtd.eraseregions[i].offset,
 				flash->mtd.eraseregions[i].erasesize,
 				flash->mtd.eraseregions[i].erasesize / 1024,
 				flash->mtd.eraseregions[i].numblocks);
@@ -722,12 +723,12 @@
 		if (nr_parts > 0) {
 			for (i = 0; i < nr_parts; i++) {
 				DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
-					"{.name = %s, .offset = 0x%.8x, "
-						".size = 0x%.8x (%uKiB) }\n",
+					"{.name = %s, .offset = 0x%llx, "
+						".size = 0x%llx (%lldKiB) }\n",
 					i, parts[i].name,
-					parts[i].offset,
-					parts[i].size,
-					parts[i].size / 1024);
+					(long long)parts[i].offset,
+					(long long)parts[i].size,
+					(long long)(parts[i].size >> 10));
 			}
 			flash->partitioned = 1;
 			return add_mtd_partitions(&flash->mtd, parts, nr_parts);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 65126cd..d44f741 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -16,6 +16,7 @@
 #include <linux/device.h>
 #include <linux/mutex.h>
 #include <linux/err.h>
+#include <linux/math64.h>
 
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
@@ -152,15 +153,20 @@
 	struct spi_message	msg;
 	unsigned		blocksize = priv->page_size << 3;
 	uint8_t			*command;
+	uint32_t		rem;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n",
-			dev_name(&spi->dev),
-			instr->addr, instr->len);
+	DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n",
+	      dev_name(&spi->dev), (long long)instr->addr,
+	      (long long)instr->len);
 
 	/* Sanity checks */
-	if ((instr->addr + instr->len) > mtd->size
-			|| (instr->len % priv->page_size) != 0
-			|| (instr->addr % priv->page_size) != 0)
+	if (instr->addr + instr->len > mtd->size)
+		return -EINVAL;
+	div_u64_rem(instr->len, priv->page_size, &rem);
+	if (rem)
+		return -EINVAL;
+	div_u64_rem(instr->addr, priv->page_size, &rem);
+	if (rem)
 		return -EINVAL;
 
 	spi_message_init(&msg);
@@ -178,7 +184,7 @@
 		/* Calculate flash page address; use block erase (for speed) if
 		 * we're at a block boundary and need to erase the whole block.
 		 */
-		pageaddr = instr->addr / priv->page_size;
+		pageaddr = div_u64(instr->len, priv->page_size);
 		do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
 		pageaddr = pageaddr << priv->page_offset;
 
@@ -667,8 +673,8 @@
 	if (revision >= 'c')
 		otp_tag = otp_setup(device, revision);
 
-	dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes%s\n",
-			name, DIV_ROUND_UP(device->size, 1024),
+	dev_info(&spi->dev, "%s (%lld KBytes) pagesize %d bytes%s\n",
+			name, (long long)((device->size + 1023) >> 10),
 			pagesize, otp_tag);
 	dev_set_drvdata(&spi->dev, priv);
 
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index d38bca6..d2fd550 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -34,7 +34,7 @@
  *	aperture size, not the dram size, and the V370PDC supplies no
  *	other method for memory size discovery.  This problem is
  *	mostly only relevant when compiled as a module, as the
- *	unloading of the module with an aperture size smaller then
+ *	unloading of the module with an aperture size smaller than
  *	the ram will cause the driver to detect the onboard memory
  *	size to be equal to the aperture size when the module is
  *	reloaded.  Soooo, to help, the module supports an msize
diff --git a/drivers/mtd/devices/ps3vram.c b/drivers/mtd/devices/ps3vram.c
new file mode 100644
index 0000000..d21e9be
--- /dev/null
+++ b/drivers/mtd/devices/ps3vram.c
@@ -0,0 +1,768 @@
+/**
+ * ps3vram - Use extra PS3 video ram as MTD block device.
+ *
+ * Copyright (c) 2007-2008 Jim Paris <jim@jtan.com>
+ * Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr>
+ */
+
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+
+#include <asm/lv1call.h>
+#include <asm/ps3.h>
+
+#define DEVICE_NAME		"ps3vram"
+
+#define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */
+#define XDR_IOIF 0x0c000000
+
+#define FIFO_BASE XDR_IOIF
+#define FIFO_SIZE (64 * 1024)
+
+#define DMA_PAGE_SIZE (4 * 1024)
+
+#define CACHE_PAGE_SIZE (256 * 1024)
+#define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE)
+
+#define CACHE_OFFSET CACHE_PAGE_SIZE
+#define FIFO_OFFSET 0
+
+#define CTRL_PUT 0x10
+#define CTRL_GET 0x11
+#define CTRL_TOP 0x15
+
+#define UPLOAD_SUBCH	1
+#define DOWNLOAD_SUBCH	2
+
+#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN	0x0000030c
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY	0x00000104
+
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
+
+struct mtd_info ps3vram_mtd;
+
+#define CACHE_PAGE_PRESENT 1
+#define CACHE_PAGE_DIRTY   2
+
+struct ps3vram_tag {
+	unsigned int address;
+	unsigned int flags;
+};
+
+struct ps3vram_cache {
+	unsigned int page_count;
+	unsigned int page_size;
+	struct ps3vram_tag *tags;
+};
+
+struct ps3vram_priv {
+	u64 memory_handle;
+	u64 context_handle;
+	u32 *ctrl;
+	u32 *reports;
+	u8 __iomem *ddr_base;
+	u8 *xdr_buf;
+
+	u32 *fifo_base;
+	u32 *fifo_ptr;
+
+	struct device *dev;
+	struct ps3vram_cache cache;
+
+	/* Used to serialize cache/DMA operations */
+	struct mutex lock;
+};
+
+#define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */
+#define DMA_NOTIFIER_OFFSET_BASE 0x1000     /* first DMA notifier offset */
+#define DMA_NOTIFIER_SIZE        0x40
+#define NOTIFIER 7	/* notifier used for completion report */
+
+/* A trailing '-' means to subtract off ps3fb_videomemory.size */
+char *size = "256M-";
+module_param(size, charp, 0);
+MODULE_PARM_DESC(size, "memory size");
+
+static u32 *ps3vram_get_notifier(u32 *reports, int notifier)
+{
+	return (void *) reports +
+		DMA_NOTIFIER_OFFSET_BASE +
+		DMA_NOTIFIER_SIZE * notifier;
+}
+
+static void ps3vram_notifier_reset(struct mtd_info *mtd)
+{
+	int i;
+
+	struct ps3vram_priv *priv = mtd->priv;
+	u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
+	for (i = 0; i < 4; i++)
+		notify[i] = 0xffffffff;
+}
+
+static int ps3vram_notifier_wait(struct mtd_info *mtd, unsigned int timeout_ms)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+	u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
+	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+	do {
+		if (!notify[3])
+			return 0;
+		msleep(1);
+	} while (time_before(jiffies, timeout));
+
+	return -ETIMEDOUT;
+}
+
+static void ps3vram_init_ring(struct mtd_info *mtd)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+
+	priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
+	priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET;
+}
+
+static int ps3vram_wait_ring(struct mtd_info *mtd, unsigned int timeout_ms)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+	do {
+		if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET])
+			return 0;
+		msleep(1);
+	} while (time_before(jiffies, timeout));
+
+	dev_dbg(priv->dev, "%s:%d: FIFO timeout (%08x/%08x/%08x)\n", __func__,
+		__LINE__, priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET],
+		priv->ctrl[CTRL_TOP]);
+
+	return -ETIMEDOUT;
+}
+
+static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data)
+{
+	*(priv->fifo_ptr)++ = data;
+}
+
+static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan,
+				      u32 tag, u32 size)
+{
+	ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag);
+}
+
+static void ps3vram_rewind_ring(struct mtd_info *mtd)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+	u64 status;
+
+	ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET));
+
+	priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
+
+	/* asking the HV for a blit will kick the fifo */
+	status = lv1_gpu_context_attribute(priv->context_handle,
+					   L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
+					   0, 0, 0, 0);
+	if (status)
+		dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n",
+			__func__, __LINE__);
+
+	priv->fifo_ptr = priv->fifo_base;
+}
+
+static void ps3vram_fire_ring(struct mtd_info *mtd)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+	u64 status;
+
+	mutex_lock(&ps3_gpu_mutex);
+
+	priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET +
+		(priv->fifo_ptr - priv->fifo_base) * sizeof(u32);
+
+	/* asking the HV for a blit will kick the fifo */
+	status = lv1_gpu_context_attribute(priv->context_handle,
+					   L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
+					   0, 0, 0, 0);
+	if (status)
+		dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n",
+			__func__, __LINE__);
+
+	if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) >
+		FIFO_SIZE - 1024) {
+		dev_dbg(priv->dev, "%s:%d: fifo full, rewinding\n", __func__,
+			__LINE__);
+		ps3vram_wait_ring(mtd, 200);
+		ps3vram_rewind_ring(mtd);
+	}
+
+	mutex_unlock(&ps3_gpu_mutex);
+}
+
+static void ps3vram_bind(struct mtd_info *mtd)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+
+	ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1);
+	ps3vram_out_ring(priv, 0x31337303);
+	ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3);
+	ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
+	ps3vram_out_ring(priv, 0xfeed0001);	/* DMA system RAM instance */
+	ps3vram_out_ring(priv, 0xfeed0000);     /* DMA video RAM instance */
+
+	ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1);
+	ps3vram_out_ring(priv, 0x3137c0de);
+	ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3);
+	ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
+	ps3vram_out_ring(priv, 0xfeed0000);	/* DMA video RAM instance */
+	ps3vram_out_ring(priv, 0xfeed0001);	/* DMA system RAM instance */
+
+	ps3vram_fire_ring(mtd);
+}
+
+static int ps3vram_upload(struct mtd_info *mtd, unsigned int src_offset,
+			  unsigned int dst_offset, int len, int count)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+
+	ps3vram_begin_ring(priv, UPLOAD_SUBCH,
+			   NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
+	ps3vram_out_ring(priv, XDR_IOIF + src_offset);
+	ps3vram_out_ring(priv, dst_offset);
+	ps3vram_out_ring(priv, len);
+	ps3vram_out_ring(priv, len);
+	ps3vram_out_ring(priv, len);
+	ps3vram_out_ring(priv, count);
+	ps3vram_out_ring(priv, (1 << 8) | 1);
+	ps3vram_out_ring(priv, 0);
+
+	ps3vram_notifier_reset(mtd);
+	ps3vram_begin_ring(priv, UPLOAD_SUBCH,
+			   NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
+	ps3vram_out_ring(priv, 0);
+	ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1);
+	ps3vram_out_ring(priv, 0);
+	ps3vram_fire_ring(mtd);
+	if (ps3vram_notifier_wait(mtd, 200) < 0) {
+		dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__,
+			__LINE__);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ps3vram_download(struct mtd_info *mtd, unsigned int src_offset,
+			    unsigned int dst_offset, int len, int count)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+
+	ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
+			   NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
+	ps3vram_out_ring(priv, src_offset);
+	ps3vram_out_ring(priv, XDR_IOIF + dst_offset);
+	ps3vram_out_ring(priv, len);
+	ps3vram_out_ring(priv, len);
+	ps3vram_out_ring(priv, len);
+	ps3vram_out_ring(priv, count);
+	ps3vram_out_ring(priv, (1 << 8) | 1);
+	ps3vram_out_ring(priv, 0);
+
+	ps3vram_notifier_reset(mtd);
+	ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
+			   NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
+	ps3vram_out_ring(priv, 0);
+	ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1);
+	ps3vram_out_ring(priv, 0);
+	ps3vram_fire_ring(mtd);
+	if (ps3vram_notifier_wait(mtd, 200) < 0) {
+		dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__,
+			__LINE__);
+		return -1;
+	}
+
+	return 0;
+}
+
+static void ps3vram_cache_evict(struct mtd_info *mtd, int entry)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+	struct ps3vram_cache *cache = &priv->cache;
+
+	if (cache->tags[entry].flags & CACHE_PAGE_DIRTY) {
+		dev_dbg(priv->dev, "%s:%d: flushing %d : 0x%08x\n", __func__,
+			__LINE__, entry, cache->tags[entry].address);
+		if (ps3vram_upload(mtd,
+				   CACHE_OFFSET + entry * cache->page_size,
+				   cache->tags[entry].address,
+				   DMA_PAGE_SIZE,
+				   cache->page_size / DMA_PAGE_SIZE) < 0) {
+			dev_dbg(priv->dev, "%s:%d: failed to upload from "
+				"0x%x to 0x%x size 0x%x\n", __func__, __LINE__,
+				entry * cache->page_size,
+				cache->tags[entry].address, cache->page_size);
+		}
+		cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY;
+	}
+}
+
+static void ps3vram_cache_load(struct mtd_info *mtd, int entry,
+			       unsigned int address)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+	struct ps3vram_cache *cache = &priv->cache;
+
+	dev_dbg(priv->dev, "%s:%d: fetching %d : 0x%08x\n", __func__, __LINE__,
+		entry, address);
+	if (ps3vram_download(mtd,
+			     address,
+			     CACHE_OFFSET + entry * cache->page_size,
+			     DMA_PAGE_SIZE,
+			     cache->page_size / DMA_PAGE_SIZE) < 0) {
+		dev_err(priv->dev, "%s:%d: failed to download from "
+			"0x%x to 0x%x size 0x%x\n", __func__, __LINE__, address,
+			entry * cache->page_size, cache->page_size);
+	}
+
+	cache->tags[entry].address = address;
+	cache->tags[entry].flags |= CACHE_PAGE_PRESENT;
+}
+
+
+static void ps3vram_cache_flush(struct mtd_info *mtd)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+	struct ps3vram_cache *cache = &priv->cache;
+	int i;
+
+	dev_dbg(priv->dev, "%s:%d: FLUSH\n", __func__, __LINE__);
+	for (i = 0; i < cache->page_count; i++) {
+		ps3vram_cache_evict(mtd, i);
+		cache->tags[i].flags = 0;
+	}
+}
+
+static unsigned int ps3vram_cache_match(struct mtd_info *mtd, loff_t address)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+	struct ps3vram_cache *cache = &priv->cache;
+	unsigned int base;
+	unsigned int offset;
+	int i;
+	static int counter;
+
+	offset = (unsigned int) (address & (cache->page_size - 1));
+	base = (unsigned int) (address - offset);
+
+	/* fully associative check */
+	for (i = 0; i < cache->page_count; i++) {
+		if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) &&
+		    cache->tags[i].address == base) {
+			dev_dbg(priv->dev, "%s:%d: found entry %d : 0x%08x\n",
+				__func__, __LINE__, i, cache->tags[i].address);
+			return i;
+		}
+	}
+
+	/* choose a random entry */
+	i = (jiffies + (counter++)) % cache->page_count;
+	dev_dbg(priv->dev, "%s:%d: using entry %d\n", __func__, __LINE__, i);
+
+	ps3vram_cache_evict(mtd, i);
+	ps3vram_cache_load(mtd, i, base);
+
+	return i;
+}
+
+static int ps3vram_cache_init(struct mtd_info *mtd)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+
+	priv->cache.page_count = CACHE_PAGE_COUNT;
+	priv->cache.page_size = CACHE_PAGE_SIZE;
+	priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) *
+				   CACHE_PAGE_COUNT, GFP_KERNEL);
+	if (priv->cache.tags == NULL) {
+		dev_err(priv->dev, "%s:%d: could not allocate cache tags\n",
+			__func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	dev_info(priv->dev, "created ram cache: %d entries, %d KiB each\n",
+		CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024);
+
+	return 0;
+}
+
+static void ps3vram_cache_cleanup(struct mtd_info *mtd)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+
+	ps3vram_cache_flush(mtd);
+	kfree(priv->cache.tags);
+}
+
+static int ps3vram_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+
+	if (instr->addr + instr->len > mtd->size)
+		return -EINVAL;
+
+	mutex_lock(&priv->lock);
+
+	ps3vram_cache_flush(mtd);
+
+	/* Set bytes to 0xFF */
+	memset_io(priv->ddr_base + instr->addr, 0xFF, instr->len);
+
+	mutex_unlock(&priv->lock);
+
+	instr->state = MTD_ERASE_DONE;
+	mtd_erase_callback(instr);
+
+	return 0;
+}
+
+static int ps3vram_read(struct mtd_info *mtd, loff_t from, size_t len,
+			size_t *retlen, u_char *buf)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+	unsigned int cached, count;
+
+	dev_dbg(priv->dev, "%s:%d: from=0x%08x len=0x%zx\n", __func__, __LINE__,
+		(unsigned int)from, len);
+
+	if (from >= mtd->size)
+		return -EINVAL;
+
+	if (len > mtd->size - from)
+		len = mtd->size - from;
+
+	/* Copy from vram to buf */
+	count = len;
+	while (count) {
+		unsigned int offset, avail;
+		unsigned int entry;
+
+		offset = (unsigned int) (from & (priv->cache.page_size - 1));
+		avail  = priv->cache.page_size - offset;
+
+		mutex_lock(&priv->lock);
+
+		entry = ps3vram_cache_match(mtd, from);
+		cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
+
+		dev_dbg(priv->dev, "%s:%d: from=%08x cached=%08x offset=%08x "
+			"avail=%08x count=%08x\n", __func__, __LINE__,
+			(unsigned int)from, cached, offset, avail, count);
+
+		if (avail > count)
+			avail = count;
+		memcpy(buf, priv->xdr_buf + cached, avail);
+
+		mutex_unlock(&priv->lock);
+
+		buf += avail;
+		count -= avail;
+		from += avail;
+	}
+
+	*retlen = len;
+	return 0;
+}
+
+static int ps3vram_write(struct mtd_info *mtd, loff_t to, size_t len,
+			 size_t *retlen, const u_char *buf)
+{
+	struct ps3vram_priv *priv = mtd->priv;
+	unsigned int cached, count;
+
+	if (to >= mtd->size)
+		return -EINVAL;
+
+	if (len > mtd->size - to)
+		len = mtd->size - to;
+
+	/* Copy from buf to vram */
+	count = len;
+	while (count) {
+		unsigned int offset, avail;
+		unsigned int entry;
+
+		offset = (unsigned int) (to & (priv->cache.page_size - 1));
+		avail  = priv->cache.page_size - offset;
+
+		mutex_lock(&priv->lock);
+
+		entry = ps3vram_cache_match(mtd, to);
+		cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
+
+		dev_dbg(priv->dev, "%s:%d: to=%08x cached=%08x offset=%08x "
+			"avail=%08x count=%08x\n", __func__, __LINE__,
+			(unsigned int)to, cached, offset, avail, count);
+
+		if (avail > count)
+			avail = count;
+		memcpy(priv->xdr_buf + cached, buf, avail);
+
+		priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY;
+
+		mutex_unlock(&priv->lock);
+
+		buf += avail;
+		count -= avail;
+		to += avail;
+	}
+
+	*retlen = len;
+	return 0;
+}
+
+static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
+{
+	struct ps3vram_priv *priv;
+	int status;
+	u64 ddr_lpar;
+	u64 ctrl_lpar;
+	u64 info_lpar;
+	u64 reports_lpar;
+	u64 ddr_size;
+	u64 reports_size;
+	int ret = -ENOMEM;
+	char *rest;
+
+	ret = -EIO;
+	ps3vram_mtd.priv = kzalloc(sizeof(struct ps3vram_priv), GFP_KERNEL);
+	if (!ps3vram_mtd.priv)
+		goto out;
+	priv = ps3vram_mtd.priv;
+
+	mutex_init(&priv->lock);
+	priv->dev = &dev->core;
+
+	/* Allocate XDR buffer (1MiB aligned) */
+	priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL,
+		get_order(XDR_BUF_SIZE));
+	if (priv->xdr_buf == NULL) {
+		dev_dbg(&dev->core, "%s:%d: could not allocate XDR buffer\n",
+			__func__, __LINE__);
+		ret = -ENOMEM;
+		goto out_free_priv;
+	}
+
+	/* Put FIFO at begginning of XDR buffer */
+	priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET);
+	priv->fifo_ptr = priv->fifo_base;
+
+	/* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */
+	if (ps3_open_hv_device(dev)) {
+		dev_err(&dev->core, "%s:%d: ps3_open_hv_device failed\n",
+			__func__, __LINE__);
+		ret = -EAGAIN;
+		goto out_close_gpu;
+	}
+
+	/* Request memory */
+	status = -1;
+	ddr_size = memparse(size, &rest);
+	if (*rest == '-')
+		ddr_size -= ps3fb_videomemory.size;
+	ddr_size = ALIGN(ddr_size, 1024*1024);
+	if (ddr_size <= 0) {
+		dev_err(&dev->core, "%s:%d: specified size is too small\n",
+			__func__, __LINE__);
+		ret = -EINVAL;
+		goto out_close_gpu;
+	}
+
+	while (ddr_size > 0) {
+		status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0,
+						 &priv->memory_handle,
+						 &ddr_lpar);
+		if (!status)
+			break;
+		ddr_size -= 1024*1024;
+	}
+	if (status || ddr_size <= 0) {
+		dev_err(&dev->core, "%s:%d: lv1_gpu_memory_allocate failed\n",
+			__func__, __LINE__);
+		ret = -ENOMEM;
+		goto out_free_xdr_buf;
+	}
+
+	/* Request context */
+	status = lv1_gpu_context_allocate(priv->memory_handle,
+					  0,
+					  &priv->context_handle,
+					  &ctrl_lpar,
+					  &info_lpar,
+					  &reports_lpar,
+					  &reports_size);
+	if (status) {
+		dev_err(&dev->core, "%s:%d: lv1_gpu_context_allocate failed\n",
+			__func__, __LINE__);
+		ret = -ENOMEM;
+		goto out_free_memory;
+	}
+
+	/* Map XDR buffer to RSX */
+	status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
+				       ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)),
+				       XDR_BUF_SIZE, 0);
+	if (status) {
+		dev_err(&dev->core, "%s:%d: lv1_gpu_context_iomap failed\n",
+			__func__, __LINE__);
+		ret = -ENOMEM;
+		goto out_free_context;
+	}
+
+	priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE);
+
+	if (!priv->ddr_base) {
+		dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
+			__LINE__);
+		ret = -ENOMEM;
+		goto out_free_context;
+	}
+
+	priv->ctrl = ioremap(ctrl_lpar, 64 * 1024);
+	if (!priv->ctrl) {
+		dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
+			__LINE__);
+		ret = -ENOMEM;
+		goto out_unmap_vram;
+	}
+
+	priv->reports = ioremap(reports_lpar, reports_size);
+	if (!priv->reports) {
+		dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
+			__LINE__);
+		ret = -ENOMEM;
+		goto out_unmap_ctrl;
+	}
+
+	mutex_lock(&ps3_gpu_mutex);
+	ps3vram_init_ring(&ps3vram_mtd);
+	mutex_unlock(&ps3_gpu_mutex);
+
+	ps3vram_mtd.name = "ps3vram";
+	ps3vram_mtd.size = ddr_size;
+	ps3vram_mtd.flags = MTD_CAP_RAM;
+	ps3vram_mtd.erase = ps3vram_erase;
+	ps3vram_mtd.point = NULL;
+	ps3vram_mtd.unpoint = NULL;
+	ps3vram_mtd.read = ps3vram_read;
+	ps3vram_mtd.write = ps3vram_write;
+	ps3vram_mtd.owner = THIS_MODULE;
+	ps3vram_mtd.type = MTD_RAM;
+	ps3vram_mtd.erasesize = CACHE_PAGE_SIZE;
+	ps3vram_mtd.writesize = 1;
+
+	ps3vram_bind(&ps3vram_mtd);
+
+	mutex_lock(&ps3_gpu_mutex);
+	ret = ps3vram_wait_ring(&ps3vram_mtd, 100);
+	mutex_unlock(&ps3_gpu_mutex);
+	if (ret < 0) {
+		dev_err(&dev->core, "%s:%d: failed to initialize channels\n",
+			__func__, __LINE__);
+		ret = -ETIMEDOUT;
+		goto out_unmap_reports;
+	}
+
+	ps3vram_cache_init(&ps3vram_mtd);
+
+	if (add_mtd_device(&ps3vram_mtd)) {
+		dev_err(&dev->core, "%s:%d: add_mtd_device failed\n",
+			__func__, __LINE__);
+		ret = -EAGAIN;
+		goto out_cache_cleanup;
+	}
+
+	dev_info(&dev->core, "reserved %u MiB of gpu memory\n",
+		(unsigned int)(ddr_size / 1024 / 1024));
+
+	return 0;
+
+out_cache_cleanup:
+	ps3vram_cache_cleanup(&ps3vram_mtd);
+out_unmap_reports:
+	iounmap(priv->reports);
+out_unmap_ctrl:
+	iounmap(priv->ctrl);
+out_unmap_vram:
+	iounmap(priv->ddr_base);
+out_free_context:
+	lv1_gpu_context_free(priv->context_handle);
+out_free_memory:
+	lv1_gpu_memory_free(priv->memory_handle);
+out_close_gpu:
+	ps3_close_hv_device(dev);
+out_free_xdr_buf:
+	free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
+out_free_priv:
+	kfree(ps3vram_mtd.priv);
+	ps3vram_mtd.priv = NULL;
+out:
+	return ret;
+}
+
+static int ps3vram_shutdown(struct ps3_system_bus_device *dev)
+{
+	struct ps3vram_priv *priv;
+
+	priv = ps3vram_mtd.priv;
+
+	del_mtd_device(&ps3vram_mtd);
+	ps3vram_cache_cleanup(&ps3vram_mtd);
+	iounmap(priv->reports);
+	iounmap(priv->ctrl);
+	iounmap(priv->ddr_base);
+	lv1_gpu_context_free(priv->context_handle);
+	lv1_gpu_memory_free(priv->memory_handle);
+	ps3_close_hv_device(dev);
+	free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
+	kfree(priv);
+	return 0;
+}
+
+static struct ps3_system_bus_driver ps3vram_driver = {
+	.match_id	= PS3_MATCH_ID_GPU,
+	.match_sub_id	= PS3_MATCH_SUB_ID_GPU_RAMDISK,
+	.core.name	= DEVICE_NAME,
+	.core.owner	= THIS_MODULE,
+	.probe		= ps3vram_probe,
+	.remove		= ps3vram_shutdown,
+	.shutdown	= ps3vram_shutdown,
+};
+
+static int __init ps3vram_init(void)
+{
+	return ps3_system_bus_driver_register(&ps3vram_driver);
+}
+
+static void __exit ps3vram_exit(void)
+{
+	ps3_system_bus_driver_unregister(&ps3vram_driver);
+}
+
+module_init(ps3vram_init);
+module_exit(ps3vram_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jim Paris <jim@jtan.com>");
+MODULE_DESCRIPTION("MTD driver for PS3 video RAM");
+MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK);
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 9bf581c..a790c06 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -109,25 +109,25 @@
 /* Each memory region corresponds to a minor device */
 typedef struct partition_t {
     struct mtd_blktrans_dev mbd;
-    u_int32_t		state;
-    u_int32_t		*VirtualBlockMap;
-    u_int32_t		*VirtualPageMap;
-    u_int32_t		FreeTotal;
+    uint32_t		state;
+    uint32_t		*VirtualBlockMap;
+    uint32_t		*VirtualPageMap;
+    uint32_t		FreeTotal;
     struct eun_info_t {
-	u_int32_t		Offset;
-	u_int32_t		EraseCount;
-	u_int32_t		Free;
-	u_int32_t		Deleted;
+	uint32_t		Offset;
+	uint32_t		EraseCount;
+	uint32_t		Free;
+	uint32_t		Deleted;
     } *EUNInfo;
     struct xfer_info_t {
-	u_int32_t		Offset;
-	u_int32_t		EraseCount;
-	u_int16_t		state;
+	uint32_t		Offset;
+	uint32_t		EraseCount;
+	uint16_t		state;
     } *XferInfo;
-    u_int16_t		bam_index;
-    u_int32_t		*bam_cache;
-    u_int16_t		DataUnits;
-    u_int32_t		BlocksPerUnit;
+    uint16_t		bam_index;
+    uint32_t		*bam_cache;
+    uint16_t		DataUnits;
+    uint32_t		BlocksPerUnit;
     erase_unit_header_t	header;
 } partition_t;
 
@@ -199,8 +199,8 @@
 static int build_maps(partition_t *part)
 {
     erase_unit_header_t header;
-    u_int16_t xvalid, xtrans, i;
-    u_int blocks, j;
+    uint16_t xvalid, xtrans, i;
+    unsigned blocks, j;
     int hdr_ok, ret = -1;
     ssize_t retval;
     loff_t offset;
@@ -269,14 +269,14 @@
 
     /* Set up virtual page map */
     blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
-    part->VirtualBlockMap = vmalloc(blocks * sizeof(u_int32_t));
+    part->VirtualBlockMap = vmalloc(blocks * sizeof(uint32_t));
     if (!part->VirtualBlockMap)
 	    goto out_XferInfo;
 
-    memset(part->VirtualBlockMap, 0xff, blocks * sizeof(u_int32_t));
+    memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t));
     part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;
 
-    part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(u_int32_t),
+    part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(uint32_t),
 			      GFP_KERNEL);
     if (!part->bam_cache)
 	    goto out_VirtualBlockMap;
@@ -290,7 +290,7 @@
 	offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
 
 	ret = part->mbd.mtd->read(part->mbd.mtd, offset,
-			      part->BlocksPerUnit * sizeof(u_int32_t), &retval,
+			      part->BlocksPerUnit * sizeof(uint32_t), &retval,
 			      (unsigned char *)part->bam_cache);
 
 	if (ret)
@@ -332,7 +332,7 @@
 ======================================================================*/
 
 static int erase_xfer(partition_t *part,
-		      u_int16_t xfernum)
+		      uint16_t xfernum)
 {
     int ret;
     struct xfer_info_t *xfer;
@@ -408,7 +408,7 @@
     erase_unit_header_t header;
     struct xfer_info_t *xfer;
     int nbam, ret;
-    u_int32_t ctl;
+    uint32_t ctl;
     ssize_t retlen;
     loff_t offset;
 
@@ -430,15 +430,15 @@
     }
 
     /* Write the BAM stub */
-    nbam = (part->BlocksPerUnit * sizeof(u_int32_t) +
+    nbam = (part->BlocksPerUnit * sizeof(uint32_t) +
 	    le32_to_cpu(part->header.BAMOffset) + SECTOR_SIZE - 1) / SECTOR_SIZE;
 
     offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset);
     ctl = cpu_to_le32(BLOCK_CONTROL);
 
-    for (i = 0; i < nbam; i++, offset += sizeof(u_int32_t)) {
+    for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
 
-	ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t),
+	ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
 			       &retlen, (u_char *)&ctl);
 
 	if (ret)
@@ -461,18 +461,18 @@
 
 ======================================================================*/
 
-static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
-			   u_int16_t xferunit)
+static int copy_erase_unit(partition_t *part, uint16_t srcunit,
+			   uint16_t xferunit)
 {
     u_char buf[SECTOR_SIZE];
     struct eun_info_t *eun;
     struct xfer_info_t *xfer;
-    u_int32_t src, dest, free, i;
-    u_int16_t unit;
+    uint32_t src, dest, free, i;
+    uint16_t unit;
     int ret;
     ssize_t retlen;
     loff_t offset;
-    u_int16_t srcunitswap = cpu_to_le16(srcunit);
+    uint16_t srcunitswap = cpu_to_le16(srcunit);
 
     eun = &part->EUNInfo[srcunit];
     xfer = &part->XferInfo[xferunit];
@@ -486,7 +486,7 @@
 	offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
 
 	ret = part->mbd.mtd->read(part->mbd.mtd, offset,
-			      part->BlocksPerUnit * sizeof(u_int32_t),
+			      part->BlocksPerUnit * sizeof(uint32_t),
 			      &retlen, (u_char *) (part->bam_cache));
 
 	/* mark the cache bad, in case we get an error later */
@@ -503,7 +503,7 @@
     offset = xfer->Offset + 20; /* Bad! */
     unit = cpu_to_le16(0x7fff);
 
-    ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int16_t),
+    ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t),
 			   &retlen, (u_char *) &unit);
 
     if (ret) {
@@ -560,7 +560,7 @@
 
 
     /* All clear? Then update the LogicalEUN again */
-    ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(u_int16_t),
+    ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
 			   &retlen, (u_char *)&srcunitswap);
 
     if (ret) {
@@ -605,8 +605,8 @@
 
 static int reclaim_block(partition_t *part)
 {
-    u_int16_t i, eun, xfer;
-    u_int32_t best;
+    uint16_t i, eun, xfer;
+    uint32_t best;
     int queued, ret;
 
     DEBUG(0, "ftl_cs: reclaiming space...\n");
@@ -723,10 +723,10 @@
 }
 #endif
 
-static u_int32_t find_free(partition_t *part)
+static uint32_t find_free(partition_t *part)
 {
-    u_int16_t stop, eun;
-    u_int32_t blk;
+    uint16_t stop, eun;
+    uint32_t blk;
     size_t retlen;
     int ret;
 
@@ -749,7 +749,7 @@
 
 	ret = part->mbd.mtd->read(part->mbd.mtd,
 		       part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
-		       part->BlocksPerUnit * sizeof(u_int32_t),
+		       part->BlocksPerUnit * sizeof(uint32_t),
 		       &retlen, (u_char *) (part->bam_cache));
 
 	if (ret) {
@@ -786,7 +786,7 @@
 static int ftl_read(partition_t *part, caddr_t buffer,
 		    u_long sector, u_long nblocks)
 {
-    u_int32_t log_addr, bsize;
+    uint32_t log_addr, bsize;
     u_long i;
     int ret;
     size_t offset, retlen;
@@ -829,14 +829,14 @@
 
 ======================================================================*/
 
-static int set_bam_entry(partition_t *part, u_int32_t log_addr,
-			 u_int32_t virt_addr)
+static int set_bam_entry(partition_t *part, uint32_t log_addr,
+			 uint32_t virt_addr)
 {
-    u_int32_t bsize, blk, le_virt_addr;
+    uint32_t bsize, blk, le_virt_addr;
 #ifdef PSYCHO_DEBUG
-    u_int32_t old_addr;
+    uint32_t old_addr;
 #endif
-    u_int16_t eun;
+    uint16_t eun;
     int ret;
     size_t retlen, offset;
 
@@ -845,11 +845,11 @@
     bsize = 1 << part->header.EraseUnitSize;
     eun = log_addr / bsize;
     blk = (log_addr % bsize) / SECTOR_SIZE;
-    offset = (part->EUNInfo[eun].Offset + blk * sizeof(u_int32_t) +
+    offset = (part->EUNInfo[eun].Offset + blk * sizeof(uint32_t) +
 		  le32_to_cpu(part->header.BAMOffset));
 
 #ifdef PSYCHO_DEBUG
-    ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(u_int32_t),
+    ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t),
                         &retlen, (u_char *)&old_addr);
     if (ret) {
 	printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
@@ -886,7 +886,7 @@
 #endif
 	part->bam_cache[blk] = le_virt_addr;
     }
-    ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t),
+    ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
                             &retlen, (u_char *)&le_virt_addr);
 
     if (ret) {
@@ -900,7 +900,7 @@
 static int ftl_write(partition_t *part, caddr_t buffer,
 		     u_long sector, u_long nblocks)
 {
-    u_int32_t bsize, log_addr, virt_addr, old_addr, blk;
+    uint32_t bsize, log_addr, virt_addr, old_addr, blk;
     u_long i;
     int ret;
     size_t retlen, offset;
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 50ce138..73f0522 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -50,7 +50,7 @@
 	struct INFTLrecord *inftl;
 	unsigned long temp;
 
-	if (mtd->type != MTD_NANDFLASH)
+	if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
 		return;
 	/* OK, this is moderately ugly.  But probably safe.  Alternatives? */
 	if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 9113628..f751dd9 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -63,7 +63,7 @@
 	 * otherwise.
 	 */
 	inftl->EraseSize = inftl->mbd.mtd->erasesize;
-        inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize;
+        inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
 
 	inftl->MediaUnit = BLOCK_NIL;
 
@@ -187,7 +187,7 @@
 				mh->BlockMultiplierBits);
 			inftl->EraseSize = inftl->mbd.mtd->erasesize <<
 				mh->BlockMultiplierBits;
-			inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize;
+			inftl->nb_blocks = (u32)inftl->mbd.mtd->size / inftl->EraseSize;
 			block >>= mh->BlockMultiplierBits;
 		}
 
diff --git a/drivers/mtd/lpddr/Kconfig b/drivers/mtd/lpddr/Kconfig
new file mode 100644
index 0000000..acd4ea9
--- /dev/null
+++ b/drivers/mtd/lpddr/Kconfig
@@ -0,0 +1,22 @@
+# drivers/mtd/chips/Kconfig
+
+menu "LPDDR flash memory drivers"
+	depends on MTD!=n
+
+config MTD_LPDDR
+	tristate "Support for LPDDR flash chips"
+	select MTD_QINFO_PROBE
+	help
+	  This option enables support of LPDDR (Low power double data rate)
+	  flash chips. Synonymous with Mobile-DDR. It is a new standard for
+	  DDR memories, intended for battery-operated systems.
+
+config MTD_QINFO_PROBE
+	tristate "Detect flash chips by QINFO probe"
+	help
+	    Device Information for LPDDR chips is offered through the Overlay
+	    Window QINFO interface, permits software to be used for entire
+	    families of devices. This serves similar purpose of CFI on legacy
+	    Flash products
+endmenu
+
diff --git a/drivers/mtd/lpddr/Makefile b/drivers/mtd/lpddr/Makefile
new file mode 100644
index 0000000..da48e46
--- /dev/null
+++ b/drivers/mtd/lpddr/Makefile
@@ -0,0 +1,6 @@
+#
+# linux/drivers/mtd/lpddr/Makefile
+#
+
+obj-$(CONFIG_MTD_QINFO_PROBE)	+= qinfo_probe.o
+obj-$(CONFIG_MTD_LPDDR)	+= lpddr_cmds.o
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
new file mode 100644
index 0000000..e22ca49
--- /dev/null
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -0,0 +1,796 @@
+/*
+ * LPDDR flash memory device operations. This module provides read, write,
+ * erase, lock/unlock support for LPDDR flash memories
+ * (C) 2008 Korolev Alexey <akorolev@infradead.org>
+ * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
+ * Many thanks to Roman Borisov for intial enabling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ * TODO:
+ * Implement VPP management
+ * Implement XIP support
+ * Implement OTP support
+ */
+#include <linux/mtd/pfow.h>
+#include <linux/mtd/qinfo.h>
+
+static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
+					size_t *retlen, u_char *buf);
+static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
+				size_t len, size_t *retlen, const u_char *buf);
+static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
+				unsigned long count, loff_t to, size_t *retlen);
+static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
+static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
+			size_t *retlen, void **mtdbuf, resource_size_t *phys);
+static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
+static int get_chip(struct map_info *map, struct flchip *chip, int mode);
+static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
+static void put_chip(struct map_info *map, struct flchip *chip);
+
+struct mtd_info *lpddr_cmdset(struct map_info *map)
+{
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	struct flchip_shared *shared;
+	struct flchip *chip;
+	struct mtd_info *mtd;
+	int numchips;
+	int i, j;
+
+	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
+	if (!mtd) {
+		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
+		return NULL;
+	}
+	mtd->priv = map;
+	mtd->type = MTD_NORFLASH;
+
+	/* Fill in the default mtd operations */
+	mtd->read = lpddr_read;
+	mtd->type = MTD_NORFLASH;
+	mtd->flags = MTD_CAP_NORFLASH;
+	mtd->flags &= ~MTD_BIT_WRITEABLE;
+	mtd->erase = lpddr_erase;
+	mtd->write = lpddr_write_buffers;
+	mtd->writev = lpddr_writev;
+	mtd->read_oob = NULL;
+	mtd->write_oob = NULL;
+	mtd->sync = NULL;
+	mtd->lock = lpddr_lock;
+	mtd->unlock = lpddr_unlock;
+	mtd->suspend = NULL;
+	mtd->resume = NULL;
+	if (map_is_linear(map)) {
+		mtd->point = lpddr_point;
+		mtd->unpoint = lpddr_unpoint;
+	}
+	mtd->block_isbad = NULL;
+	mtd->block_markbad = NULL;
+	mtd->size = 1 << lpddr->qinfo->DevSizeShift;
+	mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
+	mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
+
+	shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips,
+						GFP_KERNEL);
+	if (!shared) {
+		kfree(lpddr);
+		kfree(mtd);
+		return NULL;
+	}
+
+	chip = &lpddr->chips[0];
+	numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
+	for (i = 0; i < numchips; i++) {
+		shared[i].writing = shared[i].erasing = NULL;
+		spin_lock_init(&shared[i].lock);
+		for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
+			*chip = lpddr->chips[i];
+			chip->start += j << lpddr->chipshift;
+			chip->oldstate = chip->state = FL_READY;
+			chip->priv = &shared[i];
+			/* those should be reset too since
+			   they create memory references. */
+			init_waitqueue_head(&chip->wq);
+			spin_lock_init(&chip->_spinlock);
+			chip->mutex = &chip->_spinlock;
+			chip++;
+		}
+	}
+
+	return mtd;
+}
+EXPORT_SYMBOL(lpddr_cmdset);
+
+static int wait_for_ready(struct map_info *map, struct flchip *chip,
+		unsigned int chip_op_time)
+{
+	unsigned int timeo, reset_timeo, sleep_time;
+	unsigned int dsr;
+	flstate_t chip_state = chip->state;
+	int ret = 0;
+
+	/* set our timeout to 8 times the expected delay */
+	timeo = chip_op_time * 8;
+	if (!timeo)
+		timeo = 500000;
+	reset_timeo = timeo;
+	sleep_time = chip_op_time / 2;
+
+	for (;;) {
+		dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
+		if (dsr & DSR_READY_STATUS)
+			break;
+		if (!timeo) {
+			printk(KERN_ERR "%s: Flash timeout error state %d \n",
+							map->name, chip_state);
+			ret = -ETIME;
+			break;
+		}
+
+		/* OK Still waiting. Drop the lock, wait a while and retry. */
+		spin_unlock(chip->mutex);
+		if (sleep_time >= 1000000/HZ) {
+			/*
+			 * Half of the normal delay still remaining
+			 * can be performed with a sleeping delay instead
+			 * of busy waiting.
+			 */
+			msleep(sleep_time/1000);
+			timeo -= sleep_time;
+			sleep_time = 1000000/HZ;
+		} else {
+			udelay(1);
+			cond_resched();
+			timeo--;
+		}
+		spin_lock(chip->mutex);
+
+		while (chip->state != chip_state) {
+			/* Someone's suspended the operation: sleep */
+			DECLARE_WAITQUEUE(wait, current);
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			add_wait_queue(&chip->wq, &wait);
+			spin_unlock(chip->mutex);
+			schedule();
+			remove_wait_queue(&chip->wq, &wait);
+			spin_lock(chip->mutex);
+		}
+		if (chip->erase_suspended || chip->write_suspended)  {
+			/* Suspend has occured while sleep: reset timeout */
+			timeo = reset_timeo;
+			chip->erase_suspended = chip->write_suspended = 0;
+		}
+	}
+	/* check status for errors */
+	if (dsr & DSR_ERR) {
+		/* Clear DSR*/
+		map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
+		printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
+				map->name, dsr);
+		print_drs_error(dsr);
+		ret = -EIO;
+	}
+	chip->state = FL_READY;
+	return ret;
+}
+
+static int get_chip(struct map_info *map, struct flchip *chip, int mode)
+{
+	int ret;
+	DECLARE_WAITQUEUE(wait, current);
+
+ retry:
+	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
+		&& chip->state != FL_SYNCING) {
+		/*
+		 * OK. We have possibility for contension on the write/erase
+		 * operations which are global to the real chip and not per
+		 * partition.  So let's fight it over in the partition which
+		 * currently has authority on the operation.
+		 *
+		 * The rules are as follows:
+		 *
+		 * - any write operation must own shared->writing.
+		 *
+		 * - any erase operation must own _both_ shared->writing and
+		 *   shared->erasing.
+		 *
+		 * - contension arbitration is handled in the owner's context.
+		 *
+		 * The 'shared' struct can be read and/or written only when
+		 * its lock is taken.
+		 */
+		struct flchip_shared *shared = chip->priv;
+		struct flchip *contender;
+		spin_lock(&shared->lock);
+		contender = shared->writing;
+		if (contender && contender != chip) {
+			/*
+			 * The engine to perform desired operation on this
+			 * partition is already in use by someone else.
+			 * Let's fight over it in the context of the chip
+			 * currently using it.  If it is possible to suspend,
+			 * that other partition will do just that, otherwise
+			 * it'll happily send us to sleep.  In any case, when
+			 * get_chip returns success we're clear to go ahead.
+			 */
+			ret = spin_trylock(contender->mutex);
+			spin_unlock(&shared->lock);
+			if (!ret)
+				goto retry;
+			spin_unlock(chip->mutex);
+			ret = chip_ready(map, contender, mode);
+			spin_lock(chip->mutex);
+
+			if (ret == -EAGAIN) {
+				spin_unlock(contender->mutex);
+				goto retry;
+			}
+			if (ret) {
+				spin_unlock(contender->mutex);
+				return ret;
+			}
+			spin_lock(&shared->lock);
+
+			/* We should not own chip if it is already in FL_SYNCING
+			 * state. Put contender and retry. */
+			if (chip->state == FL_SYNCING) {
+				put_chip(map, contender);
+				spin_unlock(contender->mutex);
+				goto retry;
+			}
+			spin_unlock(contender->mutex);
+		}
+
+		/* Check if we have suspended erase on this chip.
+		   Must sleep in such a case. */
+		if (mode == FL_ERASING && shared->erasing
+		    && shared->erasing->oldstate == FL_ERASING) {
+			spin_unlock(&shared->lock);
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			add_wait_queue(&chip->wq, &wait);
+			spin_unlock(chip->mutex);
+			schedule();
+			remove_wait_queue(&chip->wq, &wait);
+			spin_lock(chip->mutex);
+			goto retry;
+		}
+
+		/* We now own it */
+		shared->writing = chip;
+		if (mode == FL_ERASING)
+			shared->erasing = chip;
+		spin_unlock(&shared->lock);
+	}
+
+	ret = chip_ready(map, chip, mode);
+	if (ret == -EAGAIN)
+		goto retry;
+
+	return ret;
+}
+
+static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
+{
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	int ret = 0;
+	DECLARE_WAITQUEUE(wait, current);
+
+	/* Prevent setting state FL_SYNCING for chip in suspended state. */
+	if (FL_SYNCING == mode && FL_READY != chip->oldstate)
+		goto sleep;
+
+	switch (chip->state) {
+	case FL_READY:
+	case FL_JEDEC_QUERY:
+		return 0;
+
+	case FL_ERASING:
+		if (!lpddr->qinfo->SuspEraseSupp ||
+			!(mode == FL_READY || mode == FL_POINT))
+			goto sleep;
+
+		map_write(map, CMD(LPDDR_SUSPEND),
+			map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
+		chip->oldstate = FL_ERASING;
+		chip->state = FL_ERASE_SUSPENDING;
+		ret = wait_for_ready(map, chip, 0);
+		if (ret) {
+			/* Oops. something got wrong. */
+			/* Resume and pretend we weren't here.  */
+			map_write(map, CMD(LPDDR_RESUME),
+				map->pfow_base + PFOW_COMMAND_CODE);
+			map_write(map, CMD(LPDDR_START_EXECUTION),
+				map->pfow_base + PFOW_COMMAND_EXECUTE);
+			chip->state = FL_ERASING;
+			chip->oldstate = FL_READY;
+			printk(KERN_ERR "%s: suspend operation failed."
+					"State may be wrong \n", map->name);
+			return -EIO;
+		}
+		chip->erase_suspended = 1;
+		chip->state = FL_READY;
+		return 0;
+		/* Erase suspend */
+	case FL_POINT:
+		/* Only if there's no operation suspended... */
+		if (mode == FL_READY && chip->oldstate == FL_READY)
+			return 0;
+
+	default:
+sleep:
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		add_wait_queue(&chip->wq, &wait);
+		spin_unlock(chip->mutex);
+		schedule();
+		remove_wait_queue(&chip->wq, &wait);
+		spin_lock(chip->mutex);
+		return -EAGAIN;
+	}
+}
+
+static void put_chip(struct map_info *map, struct flchip *chip)
+{
+	if (chip->priv) {
+		struct flchip_shared *shared = chip->priv;
+		spin_lock(&shared->lock);
+		if (shared->writing == chip && chip->oldstate == FL_READY) {
+			/* We own the ability to write, but we're done */
+			shared->writing = shared->erasing;
+			if (shared->writing && shared->writing != chip) {
+				/* give back the ownership */
+				struct flchip *loaner = shared->writing;
+				spin_lock(loaner->mutex);
+				spin_unlock(&shared->lock);
+				spin_unlock(chip->mutex);
+				put_chip(map, loaner);
+				spin_lock(chip->mutex);
+				spin_unlock(loaner->mutex);
+				wake_up(&chip->wq);
+				return;
+			}
+			shared->erasing = NULL;
+			shared->writing = NULL;
+		} else if (shared->erasing == chip && shared->writing != chip) {
+			/*
+			 * We own the ability to erase without the ability
+			 * to write, which means the erase was suspended
+			 * and some other partition is currently writing.
+			 * Don't let the switch below mess things up since
+			 * we don't have ownership to resume anything.
+			 */
+			spin_unlock(&shared->lock);
+			wake_up(&chip->wq);
+			return;
+		}
+		spin_unlock(&shared->lock);
+	}
+
+	switch (chip->oldstate) {
+	case FL_ERASING:
+		chip->state = chip->oldstate;
+		map_write(map, CMD(LPDDR_RESUME),
+				map->pfow_base + PFOW_COMMAND_CODE);
+		map_write(map, CMD(LPDDR_START_EXECUTION),
+				map->pfow_base + PFOW_COMMAND_EXECUTE);
+		chip->oldstate = FL_READY;
+		chip->state = FL_ERASING;
+		break;
+	case FL_READY:
+		break;
+	default:
+		printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
+				map->name, chip->oldstate);
+	}
+	wake_up(&chip->wq);
+}
+
+int do_write_buffer(struct map_info *map, struct flchip *chip,
+			unsigned long adr, const struct kvec **pvec,
+			unsigned long *pvec_seek, int len)
+{
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	map_word datum;
+	int ret, wbufsize, word_gap, words;
+	const struct kvec *vec;
+	unsigned long vec_seek;
+	unsigned long prog_buf_ofs;
+
+	wbufsize = 1 << lpddr->qinfo->BufSizeShift;
+
+	spin_lock(chip->mutex);
+	ret = get_chip(map, chip, FL_WRITING);
+	if (ret) {
+		spin_unlock(chip->mutex);
+		return ret;
+	}
+	/* Figure out the number of words to write */
+	word_gap = (-adr & (map_bankwidth(map)-1));
+	words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
+	if (!word_gap) {
+		words--;
+	} else {
+		word_gap = map_bankwidth(map) - word_gap;
+		adr -= word_gap;
+		datum = map_word_ff(map);
+	}
+	/* Write data */
+	/* Get the program buffer offset from PFOW register data first*/
+	prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
+				map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
+	vec = *pvec;
+	vec_seek = *pvec_seek;
+	do {
+		int n = map_bankwidth(map) - word_gap;
+
+		if (n > vec->iov_len - vec_seek)
+			n = vec->iov_len - vec_seek;
+		if (n > len)
+			n = len;
+
+		if (!word_gap && (len < map_bankwidth(map)))
+			datum = map_word_ff(map);
+
+		datum = map_word_load_partial(map, datum,
+				vec->iov_base + vec_seek, word_gap, n);
+
+		len -= n;
+		word_gap += n;
+		if (!len || word_gap == map_bankwidth(map)) {
+			map_write(map, datum, prog_buf_ofs);
+			prog_buf_ofs += map_bankwidth(map);
+			word_gap = 0;
+		}
+
+		vec_seek += n;
+		if (vec_seek == vec->iov_len) {
+			vec++;
+			vec_seek = 0;
+		}
+	} while (len);
+	*pvec = vec;
+	*pvec_seek = vec_seek;
+
+	/* GO GO GO */
+	send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
+	chip->state = FL_WRITING;
+	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
+	if (ret)	{
+		printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
+			map->name, ret, adr);
+		goto out;
+	}
+
+ out:	put_chip(map, chip);
+	spin_unlock(chip->mutex);
+	return ret;
+}
+
+int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
+{
+	struct map_info *map = mtd->priv;
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	int chipnum = adr >> lpddr->chipshift;
+	struct flchip *chip = &lpddr->chips[chipnum];
+	int ret;
+
+	spin_lock(chip->mutex);
+	ret = get_chip(map, chip, FL_ERASING);
+	if (ret) {
+		spin_unlock(chip->mutex);
+		return ret;
+	}
+	send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
+	chip->state = FL_ERASING;
+	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
+	if (ret) {
+		printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
+			map->name, ret, adr);
+		goto out;
+	}
+ out:	put_chip(map, chip);
+	spin_unlock(chip->mutex);
+	return ret;
+}
+
+static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
+			size_t *retlen, u_char *buf)
+{
+	struct map_info *map = mtd->priv;
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	int chipnum = adr >> lpddr->chipshift;
+	struct flchip *chip = &lpddr->chips[chipnum];
+	int ret = 0;
+
+	spin_lock(chip->mutex);
+	ret = get_chip(map, chip, FL_READY);
+	if (ret) {
+		spin_unlock(chip->mutex);
+		return ret;
+	}
+
+	map_copy_from(map, buf, adr, len);
+	*retlen = len;
+
+	put_chip(map, chip);
+	spin_unlock(chip->mutex);
+	return ret;
+}
+
+static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
+			size_t *retlen, void **mtdbuf, resource_size_t *phys)
+{
+	struct map_info *map = mtd->priv;
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	int chipnum = adr >> lpddr->chipshift;
+	unsigned long ofs, last_end = 0;
+	struct flchip *chip = &lpddr->chips[chipnum];
+	int ret = 0;
+
+	if (!map->virt || (adr + len > mtd->size))
+		return -EINVAL;
+
+	/* ofs: offset within the first chip that the first read should start */
+	ofs = adr - (chipnum << lpddr->chipshift);
+
+	*mtdbuf = (void *)map->virt + chip->start + ofs;
+	*retlen = 0;
+
+	while (len) {
+		unsigned long thislen;
+
+		if (chipnum >= lpddr->numchips)
+			break;
+
+		/* We cannot point across chips that are virtually disjoint */
+		if (!last_end)
+			last_end = chip->start;
+		else if (chip->start != last_end)
+			break;
+
+		if ((len + ofs - 1) >> lpddr->chipshift)
+			thislen = (1<<lpddr->chipshift) - ofs;
+		else
+			thislen = len;
+		/* get the chip */
+		spin_lock(chip->mutex);
+		ret = get_chip(map, chip, FL_POINT);
+		spin_unlock(chip->mutex);
+		if (ret)
+			break;
+
+		chip->state = FL_POINT;
+		chip->ref_point_counter++;
+		*retlen += thislen;
+		len -= thislen;
+
+		ofs = 0;
+		last_end += 1 << lpddr->chipshift;
+		chipnum++;
+		chip = &lpddr->chips[chipnum];
+	}
+	return 0;
+}
+
+static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
+{
+	struct map_info *map = mtd->priv;
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	int chipnum = adr >> lpddr->chipshift;
+	unsigned long ofs;
+
+	/* ofs: offset within the first chip that the first read should start */
+	ofs = adr - (chipnum << lpddr->chipshift);
+
+	while (len) {
+		unsigned long thislen;
+		struct flchip *chip;
+
+		chip = &lpddr->chips[chipnum];
+		if (chipnum >= lpddr->numchips)
+			break;
+
+		if ((len + ofs - 1) >> lpddr->chipshift)
+			thislen = (1<<lpddr->chipshift) - ofs;
+		else
+			thislen = len;
+
+		spin_lock(chip->mutex);
+		if (chip->state == FL_POINT) {
+			chip->ref_point_counter--;
+			if (chip->ref_point_counter == 0)
+				chip->state = FL_READY;
+		} else
+			printk(KERN_WARNING "%s: Warning: unpoint called on non"
+					"pointed region\n", map->name);
+
+		put_chip(map, chip);
+		spin_unlock(chip->mutex);
+
+		len -= thislen;
+		ofs = 0;
+		chipnum++;
+	}
+}
+
+static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
+				size_t *retlen, const u_char *buf)
+{
+	struct kvec vec;
+
+	vec.iov_base = (void *) buf;
+	vec.iov_len = len;
+
+	return lpddr_writev(mtd, &vec, 1, to, retlen);
+}
+
+
+static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
+				unsigned long count, loff_t to, size_t *retlen)
+{
+	struct map_info *map = mtd->priv;
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	int ret = 0;
+	int chipnum;
+	unsigned long ofs, vec_seek, i;
+	int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
+
+	size_t len = 0;
+
+	for (i = 0; i < count; i++)
+		len += vecs[i].iov_len;
+
+	*retlen = 0;
+	if (!len)
+		return 0;
+
+	chipnum = to >> lpddr->chipshift;
+
+	ofs = to;
+	vec_seek = 0;
+
+	do {
+		/* We must not cross write block boundaries */
+		int size = wbufsize - (ofs & (wbufsize-1));
+
+		if (size > len)
+			size = len;
+
+		ret = do_write_buffer(map, &lpddr->chips[chipnum],
+					  ofs, &vecs, &vec_seek, size);
+		if (ret)
+			return ret;
+
+		ofs += size;
+		(*retlen) += size;
+		len -= size;
+
+		/* Be nice and reschedule with the chip in a usable
+		 * state for other processes */
+		cond_resched();
+
+	} while (len);
+
+	return 0;
+}
+
+static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	unsigned long ofs, len;
+	int ret;
+	struct map_info *map = mtd->priv;
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
+
+	ofs = instr->addr;
+	len = instr->len;
+
+	if (ofs > mtd->size || (len + ofs) > mtd->size)
+		return -EINVAL;
+
+	while (len > 0) {
+		ret = do_erase_oneblock(mtd, ofs);
+		if (ret)
+			return ret;
+		ofs += size;
+		len -= size;
+	}
+	instr->state = MTD_ERASE_DONE;
+	mtd_erase_callback(instr);
+
+	return 0;
+}
+
+#define DO_XXLOCK_LOCK		1
+#define DO_XXLOCK_UNLOCK	2
+int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
+{
+	int ret = 0;
+	struct map_info *map = mtd->priv;
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	int chipnum = adr >> lpddr->chipshift;
+	struct flchip *chip = &lpddr->chips[chipnum];
+
+	spin_lock(chip->mutex);
+	ret = get_chip(map, chip, FL_LOCKING);
+	if (ret) {
+		spin_unlock(chip->mutex);
+		return ret;
+	}
+
+	if (thunk == DO_XXLOCK_LOCK) {
+		send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
+		chip->state = FL_LOCKING;
+	} else if (thunk == DO_XXLOCK_UNLOCK) {
+		send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
+		chip->state = FL_UNLOCKING;
+	} else
+		BUG();
+
+	ret = wait_for_ready(map, chip, 1);
+	if (ret)	{
+		printk(KERN_ERR "%s: block unlock error status %d \n",
+				map->name, ret);
+		goto out;
+	}
+out:	put_chip(map, chip);
+	spin_unlock(chip->mutex);
+	return ret;
+}
+
+static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
+}
+
+static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
+}
+
+int word_program(struct map_info *map, loff_t adr, uint32_t curval)
+{
+    int ret;
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	int chipnum = adr >> lpddr->chipshift;
+	struct flchip *chip = &lpddr->chips[chipnum];
+
+	spin_lock(chip->mutex);
+	ret = get_chip(map, chip, FL_WRITING);
+	if (ret) {
+		spin_unlock(chip->mutex);
+		return ret;
+	}
+
+	send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval);
+
+	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime));
+	if (ret)	{
+		printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n",
+			map->name, adr, curval);
+		goto out;
+	}
+
+out:	put_chip(map, chip);
+	spin_unlock(chip->mutex);
+	return ret;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
+MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
new file mode 100644
index 0000000..79bf40f
--- /dev/null
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -0,0 +1,255 @@
+/*
+ * Probing flash chips with QINFO records.
+ * (C) 2008 Korolev Alexey <akorolev@infradead.org>
+ * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/xip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/pfow.h>
+#include <linux/mtd/qinfo.h>
+
+static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr);
+struct mtd_info *lpddr_probe(struct map_info *map);
+static struct lpddr_private *lpddr_probe_chip(struct map_info *map);
+static int lpddr_pfow_present(struct map_info *map,
+			struct lpddr_private *lpddr);
+
+static struct qinfo_query_info qinfo_array[] = {
+	/* General device info */
+	{0, 0, "DevSizeShift", "Device size 2^n bytes"},
+	{0, 3, "BufSizeShift", "Program buffer size 2^n bytes"},
+	/* Erase block information */
+	{1, 1, "TotalBlocksNum", "Total number of blocks"},
+	{1, 2, "UniformBlockSizeShift", "Uniform block size 2^n bytes"},
+	/* Partition information */
+	{2, 1, "HWPartsNum", "Number of hardware partitions"},
+	/* Optional features */
+	{5, 1, "SuspEraseSupp", "Suspend erase supported"},
+	/* Operation typical time */
+	{10, 0, "SingleWordProgTime", "Single word program 2^n u-sec"},
+	{10, 1, "ProgBufferTime", "Program buffer write 2^n u-sec"},
+	{10, 2, "BlockEraseTime", "Block erase 2^n m-sec"},
+	{10, 3, "FullChipEraseTime", "Full chip erase 2^n m-sec"},
+};
+
+static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
+{
+	int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info);
+	int i;
+	int bankwidth = map_bankwidth(map) * 8;
+	int major, minor;
+
+	for (i = 0; i < qinfo_lines; i++) {
+		if (strcmp(id_str, qinfo_array[i].id_str) == 0) {
+			major = qinfo_array[i].major & ((1 << bankwidth) - 1);
+			minor = qinfo_array[i].minor & ((1 << bankwidth) - 1);
+			return minor | (major << bankwidth);
+		}
+	}
+	printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name);
+	BUG();
+	return -1;
+}
+
+static uint16_t lpddr_info_query(struct map_info *map, char *id_str)
+{
+	unsigned int dsr, val;
+	int bits_per_chip = map_bankwidth(map) * 8;
+	unsigned long adr = lpddr_get_qinforec_pos(map, id_str);
+	int attempts = 20;
+
+	/* Write a request for the PFOW record */
+	map_write(map, CMD(LPDDR_INFO_QUERY),
+			map->pfow_base + PFOW_COMMAND_CODE);
+	map_write(map, CMD(adr & ((1 << bits_per_chip) - 1)),
+			map->pfow_base + PFOW_COMMAND_ADDRESS_L);
+	map_write(map, CMD(adr >> bits_per_chip),
+			map->pfow_base + PFOW_COMMAND_ADDRESS_H);
+	map_write(map, CMD(LPDDR_START_EXECUTION),
+			map->pfow_base + PFOW_COMMAND_EXECUTE);
+
+	while ((attempts--) > 0) {
+		dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
+		if (dsr & DSR_READY_STATUS)
+			break;
+		udelay(10);
+	}
+
+	val = CMDVAL(map_read(map, map->pfow_base + PFOW_COMMAND_DATA));
+	return val;
+}
+
+static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
+{
+	map_word pfow_val[4];
+
+	/* Check identification string */
+	pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
+	pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
+	pfow_val[2] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_O);
+	pfow_val[3] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_W);
+
+	if (!map_word_equal(map, CMD('P'), pfow_val[0]))
+		goto out;
+
+	if (!map_word_equal(map, CMD('F'), pfow_val[1]))
+		goto out;
+
+	if (!map_word_equal(map, CMD('O'), pfow_val[2]))
+		goto out;
+
+	if (!map_word_equal(map, CMD('W'), pfow_val[3]))
+		goto out;
+
+	return 1;	/* "PFOW" is found */
+out:
+	printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n",
+					map->name, map->pfow_base);
+	return 0;
+}
+
+static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
+{
+
+	lpddr->qinfo = kmalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
+	if (!lpddr->qinfo) {
+		printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n",
+				map->name);
+		return 0;
+	}
+	memset(lpddr->qinfo, 0, sizeof(struct qinfo_chip));
+
+	/* Get the ManuID */
+	lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
+	/* Get the DeviceID */
+	lpddr->DevId = CMDVAL(map_read(map, map->pfow_base + PFOW_DEVICE_ID));
+	/* read parameters from chip qinfo table */
+	lpddr->qinfo->DevSizeShift = lpddr_info_query(map, "DevSizeShift");
+	lpddr->qinfo->TotalBlocksNum = lpddr_info_query(map, "TotalBlocksNum");
+	lpddr->qinfo->BufSizeShift = lpddr_info_query(map, "BufSizeShift");
+	lpddr->qinfo->HWPartsNum = lpddr_info_query(map, "HWPartsNum");
+	lpddr->qinfo->UniformBlockSizeShift =
+				lpddr_info_query(map, "UniformBlockSizeShift");
+	lpddr->qinfo->SuspEraseSupp = lpddr_info_query(map, "SuspEraseSupp");
+	lpddr->qinfo->SingleWordProgTime =
+				lpddr_info_query(map, "SingleWordProgTime");
+	lpddr->qinfo->ProgBufferTime = lpddr_info_query(map, "ProgBufferTime");
+	lpddr->qinfo->BlockEraseTime = lpddr_info_query(map, "BlockEraseTime");
+	return 1;
+}
+static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
+{
+	struct lpddr_private lpddr;
+	struct lpddr_private *retlpddr;
+	int numvirtchips;
+
+
+	if ((map->pfow_base + 0x1000) >= map->size) {
+		printk(KERN_NOTICE"%s Probe at base (0x%08lx) past the end of"
+				"the map(0x%08lx)\n", map->name,
+				(unsigned long)map->pfow_base, map->size - 1);
+		return NULL;
+	}
+	memset(&lpddr, 0, sizeof(struct lpddr_private));
+	if (!lpddr_pfow_present(map, &lpddr))
+		return NULL;
+
+	if (!lpddr_chip_setup(map, &lpddr))
+		return NULL;
+
+	/* Ok so we found a chip */
+	lpddr.chipshift = lpddr.qinfo->DevSizeShift;
+	lpddr.numchips = 1;
+
+	numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
+	retlpddr = kmalloc(sizeof(struct lpddr_private) +
+			numvirtchips * sizeof(struct flchip), GFP_KERNEL);
+	if (!retlpddr)
+		return NULL;
+
+	memset(retlpddr, 0, sizeof(struct lpddr_private) +
+				numvirtchips * sizeof(struct flchip));
+	memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
+
+	retlpddr->numchips = numvirtchips;
+	retlpddr->chipshift = retlpddr->qinfo->DevSizeShift -
+				__ffs(retlpddr->qinfo->HWPartsNum);
+
+	return retlpddr;
+}
+
+struct mtd_info *lpddr_probe(struct map_info *map)
+{
+	struct mtd_info *mtd = NULL;
+	struct lpddr_private *lpddr;
+
+	/* First probe the map to see if we havecan open PFOW here */
+	lpddr = lpddr_probe_chip(map);
+	if (!lpddr)
+		return NULL;
+
+	map->fldrv_priv = lpddr;
+	mtd = lpddr_cmdset(map);
+	if (mtd) {
+		if (mtd->size > map->size) {
+			printk(KERN_WARNING "Reducing visibility of %ldKiB chip"
+				"to %ldKiB\n", (unsigned long)mtd->size >> 10,
+				(unsigned long)map->size >> 10);
+			mtd->size = map->size;
+		}
+		return mtd;
+	}
+
+	kfree(lpddr->qinfo);
+	kfree(lpddr);
+	map->fldrv_priv = NULL;
+	return NULL;
+}
+
+static struct mtd_chip_driver lpddr_chipdrv = {
+	.probe		= lpddr_probe,
+	.name		= "qinfo_probe",
+	.module		= THIS_MODULE
+};
+
+static int __init lpddr_probe_init(void)
+{
+	register_mtd_chip_driver(&lpddr_chipdrv);
+	return 0;
+}
+
+static void __exit lpddr_probe_exit(void)
+{
+	unregister_mtd_chip_driver(&lpddr_chipdrv);
+}
+
+module_init(lpddr_probe_init);
+module_exit(lpddr_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vasiliy Leonenko <vasiliy.leonenko@gmail.com>");
+MODULE_DESCRIPTION("Driver to probe qinfo flash chips");
+
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 5ea1693..0225cbb 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -10,8 +10,8 @@
 	  paged mappings of flash chips.
 
 config MTD_PHYSMAP
-	tristate "CFI Flash device in physical memory map"
-	depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM
+	tristate "Flash device in physical memory map"
+	depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_LPDDR
 	help
 	  This provides a 'mapping' driver which allows the NOR Flash and
 	  ROM driver code to communicate with chips which are mapped
@@ -23,9 +23,20 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called physmap.
 
+config MTD_PHYSMAP_COMPAT
+	bool "Physmap compat support"
+	depends on MTD_PHYSMAP
+	default n
+	help
+	  Setup a simple mapping via the Kconfig options.  Normally the
+	  physmap configuration options are done via your board's
+	  resource file.
+
+	  If unsure, say N here.
+
 config MTD_PHYSMAP_START
 	hex "Physical start address of flash mapping"
-	depends on MTD_PHYSMAP
+	depends on MTD_PHYSMAP_COMPAT
 	default "0x8000000"
 	help
 	  This is the physical memory location at which the flash chips
@@ -37,7 +48,7 @@
 
 config MTD_PHYSMAP_LEN
 	hex "Physical length of flash mapping"
-	depends on MTD_PHYSMAP
+	depends on MTD_PHYSMAP_COMPAT
 	default "0"
 	help
 	  This is the total length of the mapping of the flash chips on
@@ -51,7 +62,7 @@
 
 config MTD_PHYSMAP_BANKWIDTH
 	int "Bank width in octets"
-	depends on MTD_PHYSMAP
+	depends on MTD_PHYSMAP_COMPAT
 	default "2"
 	help
 	  This is the total width of the data bus of the flash devices
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c
index 82811bc..845ad4f 100644
--- a/drivers/mtd/maps/alchemy-flash.c
+++ b/drivers/mtd/maps/alchemy-flash.c
@@ -111,7 +111,7 @@
 
 static struct mtd_info *mymtd;
 
-int __init alchemy_mtd_init(void)
+static int __init alchemy_mtd_init(void)
 {
 	struct mtd_partition *parts;
 	int nb_parts = 0;
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index d1eec7d..237733d 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -232,8 +232,8 @@
 		/* Trim the size if we are larger than the map */
 		if (map->mtd->size > map->map.size) {
 			printk(KERN_WARNING MOD_NAME
-				" rom(%u) larger than window(%lu). fixing...\n",
-				map->mtd->size, map->map.size);
+				" rom(%llu) larger than window(%lu). fixing...\n",
+				(unsigned long long)map->mtd->size, map->map.size);
 			map->mtd->size = map->map.size;
 		}
 		if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index 0ecc3f6..b4ed816 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -88,7 +88,7 @@
 
 static struct mtd_info *mymtd;
 
-int __init init_flagadm(void)
+static int __init init_flagadm(void)
 {
 	printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n",
 			FLASH_SIZE, FLASH_PHYS_ADDR);
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 1a6feb4..5f7a245 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -263,8 +263,8 @@
 		/* Trim the size if we are larger than the map */
 		if (map->mtd->size > map->map.size) {
 			printk(KERN_WARNING MOD_NAME
-				" rom(%u) larger than window(%lu). fixing...\n",
-				map->mtd->size, map->map.size);
+				" rom(%llu) larger than window(%lu). fixing...\n",
+				(unsigned long long)map->mtd->size, map->map.size);
 			map->mtd->size = map->map.size;
 		}
 		if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index e115667..cfacfa6 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -69,7 +69,7 @@
 	.phys		= WINDOW_ADDR,
 };
 
-int __init init_dbox2_flash(void)
+static int __init init_dbox2_flash(void)
 {
        	printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR);
 	dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
index 9433738..be9e90b 100644
--- a/drivers/mtd/maps/edb7312.c
+++ b/drivers/mtd/maps/edb7312.c
@@ -71,7 +71,7 @@
 static int                   mtd_parts_nb = 0;
 static struct mtd_partition *mtd_parts    = 0;
 
-int __init init_edb7312nor(void)
+static int __init init_edb7312nor(void)
 {
 	static const char *rom_probe_types[] = PROBETYPES;
 	const char **type;
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index bbbcdd4..11a2f57 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -324,8 +324,8 @@
 		/* Trim the size if we are larger than the map */
 		if (map->mtd->size > map->map.size) {
 			printk(KERN_WARNING MOD_NAME
-				" rom(%u) larger than window(%lu). fixing...\n",
-				map->mtd->size, map->map.size);
+				" rom(%llu) larger than window(%lu). fixing...\n",
+				(unsigned long long)map->mtd->size, map->map.size);
 			map->mtd->size = map->map.size;
 		}
 		if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index a8e3fde..1e43124 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -181,7 +181,7 @@
 /* Backwards-spelling-compatibility */
 __setup("MTD_Partion=", MTD_New_Partition);
 
-int __init init_fortunet(void)
+static int __init init_fortunet(void)
 {
 	int	ix,iy;
 	for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 3b959fa..72c724f 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -65,7 +65,7 @@
 /*
  * Initialize FLASH support
  */
-int __init h720x_mtd_init(void)
+static int __init h720x_mtd_init(void)
 {
 
 	char	*part_type = NULL;
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index aeb6c91..c32bc28 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -258,8 +258,8 @@
 		/* Trim the size if we are larger than the map */
 		if (map->mtd->size > map->map.size) {
 			printk(KERN_WARNING MOD_NAME
-				" rom(%u) larger than window(%lu). fixing...\n",
-				map->mtd->size, map->map.size);
+				" rom(%llu) larger than window(%lu). fixing...\n",
+				(unsigned long long)map->mtd->size, map->map.size);
 			map->mtd->size = map->map.size;
 		}
 		if (window->rsrc.parent) {
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 2682ab5..998a27d 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -70,7 +70,7 @@
 
 static const char *probes[] = { "cmdlinepart", NULL };
 
-int __init init_impa7(void)
+static int __init init_impa7(void)
 {
 	static const char *rom_probe_types[] = PROBETYPES;
 	const char **type;
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c
index ed58f6a..748c85f 100644
--- a/drivers/mtd/maps/ipaq-flash.c
+++ b/drivers/mtd/maps/ipaq-flash.c
@@ -202,7 +202,7 @@
 
 static int __init h1900_special_case(void);
 
-int __init ipaq_mtd_init(void)
+static int __init ipaq_mtd_init(void)
 {
 	struct mtd_partition *parts = NULL;
 	int nb_parts = 0;
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
index 706f673..0eb5a7c 100644
--- a/drivers/mtd/maps/mbx860.c
+++ b/drivers/mtd/maps/mbx860.c
@@ -55,7 +55,7 @@
 	.bankwidth = 4,
 };
 
-int __init init_mbx(void)
+static int __init init_mbx(void)
 {
 	printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR);
 	mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 965e6c6..a97133e 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -226,7 +226,7 @@
 
 	if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) {
 		printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n",
-			amd_mtd->size>>10);
+			(int)(amd_mtd->size>>10));
 
 		amd_mtd->owner = THIS_MODULE;
 
@@ -357,13 +357,12 @@
 		*intel1par = 0;
 	}
 
-	printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %dK\n",
-		(intel_mtd->size >> 10));
+	printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %lldKiB\n",
+	       (unsigned long long)(intel_mtd->size >> 10));
 
 	intel_mtd->owner = THIS_MODULE;
 
-	num_intel_partitions = sizeof(nettel_intel_partitions) /
-		sizeof(nettel_intel_partitions[0]);
+	num_intel_partitions = ARRAY_SIZE(nettel_intel_partitions);
 
 	if (intelboot) {
 		/*
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
index 43e04c1..2b2e450 100644
--- a/drivers/mtd/maps/octagon-5066.c
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -184,7 +184,7 @@
 	release_region(PAGE_IO, 1);
 }
 
-int __init init_oct5066(void)
+static int __init init_oct5066(void)
 {
 	int i;
 	int ret = 0;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 1db16e5..8774366 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -29,7 +29,6 @@
 	struct map_info		map[MAX_RESOURCES];
 #ifdef CONFIG_MTD_PARTITIONS
 	int			nr_parts;
-	struct mtd_partition	*parts;
 #endif
 };
 
@@ -56,14 +55,10 @@
 	for (i = 0; i < MAX_RESOURCES; i++) {
 		if (info->mtd[i] != NULL) {
 #ifdef CONFIG_MTD_PARTITIONS
-			if (info->nr_parts) {
+			if (info->nr_parts || physmap_data->nr_parts)
 				del_mtd_partitions(info->mtd[i]);
-				kfree(info->parts);
-			} else if (physmap_data->nr_parts) {
-				del_mtd_partitions(info->mtd[i]);
-			} else {
+			else
 				del_mtd_device(info->mtd[i]);
-			}
 #else
 			del_mtd_device(info->mtd[i]);
 #endif
@@ -73,7 +68,12 @@
 	return 0;
 }
 
-static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
+static const char *rom_probe_types[] = {
+					"cfi_probe",
+					"jedec_probe",
+					"qinfo_probe",
+					"map_rom",
+					NULL };
 #ifdef CONFIG_MTD_PARTITIONS
 static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
 #endif
@@ -86,6 +86,9 @@
 	int err = 0;
 	int i;
 	int devices_found = 0;
+#ifdef CONFIG_MTD_PARTITIONS
+	struct mtd_partition *parts;
+#endif
 
 	physmap_data = dev->dev.platform_data;
 	if (physmap_data == NULL)
@@ -119,6 +122,7 @@
 		info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1;
 		info->map[i].bankwidth = physmap_data->width;
 		info->map[i].set_vpp = physmap_data->set_vpp;
+		info->map[i].pfow_base = physmap_data->pfow_base;
 
 		info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
 						 info->map[i].size);
@@ -163,9 +167,10 @@
 		goto err_out;
 
 #ifdef CONFIG_MTD_PARTITIONS
-	err = parse_mtd_partitions(info->cmtd, part_probe_types, &info->parts, 0);
+	err = parse_mtd_partitions(info->cmtd, part_probe_types, &parts, 0);
 	if (err > 0) {
-		add_mtd_partitions(info->cmtd, info->parts, err);
+		add_mtd_partitions(info->cmtd, parts, err);
+		kfree(parts);
 		return 0;
 	}
 
@@ -251,14 +256,7 @@
 };
 
 
-#ifdef CONFIG_MTD_PHYSMAP_LEN
-#if CONFIG_MTD_PHYSMAP_LEN != 0
-#warning using PHYSMAP compat code
-#define PHYSMAP_COMPAT
-#endif
-#endif
-
-#ifdef PHYSMAP_COMPAT
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
 static struct physmap_flash_data physmap_flash_data = {
 	.width		= CONFIG_MTD_PHYSMAP_BANKWIDTH,
 };
@@ -302,7 +300,7 @@
 	int err;
 
 	err = platform_driver_register(&physmap_flash_driver);
-#ifdef PHYSMAP_COMPAT
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
 	if (err == 0)
 		platform_device_register(&physmap_flash);
 #endif
@@ -312,7 +310,7 @@
 
 static void __exit physmap_exit(void)
 {
-#ifdef PHYSMAP_COMPAT
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
 	platform_device_unregister(&physmap_flash);
 #endif
 	platform_driver_unregister(&physmap_flash_driver);
@@ -326,8 +324,7 @@
 MODULE_DESCRIPTION("Generic configurable MTD map driver");
 
 /* legacy platform drivers can't hotplug or coldplg */
-#ifndef PHYSMAP_COMPAT
+#ifndef CONFIG_MTD_PHYSMAP_COMPAT
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:physmap-flash");
 #endif
-
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index f43ba28..4768bd5 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -48,7 +48,7 @@
 
 #define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__)
 
-int __init init_msp_flash(void)
+static int __init init_msp_flash(void)
 {
 	int i, j;
 	int offset, coff;
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
index de002eb..933c0b6 100644
--- a/drivers/mtd/maps/redwood.c
+++ b/drivers/mtd/maps/redwood.c
@@ -122,7 +122,7 @@
 
 static struct mtd_info *redwood_mtd;
 
-int __init init_redwood_flash(void)
+static int __init init_redwood_flash(void)
 {
 	int err;
 
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
index 14d90ed..3e3ef53 100644
--- a/drivers/mtd/maps/rpxlite.c
+++ b/drivers/mtd/maps/rpxlite.c
@@ -23,7 +23,7 @@
 	.phys = WINDOW_ADDR,
 };
 
-int __init init_rpxlite(void)
+static int __init init_rpxlite(void)
 {
 	printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
 	rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
index 6e1e99c..d5374cd 100644
--- a/drivers/mtd/maps/sbc8240.c
+++ b/drivers/mtd/maps/sbc8240.c
@@ -136,7 +136,7 @@
 #endif	/* CONFIG_MTD_PARTITIONS */
 
 
-int __init init_sbc8240_mtd (void)
+static int __init init_sbc8240_mtd (void)
 {
 	static struct _cjs {
 		u_long addr;
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 21169e6..7e329f0 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -118,7 +118,8 @@
 		struct mtd_erase_region_info *region = &mtd->eraseregions[i];
 
 		if (region->numblocks * region->erasesize > mtd->size) {
-			region->numblocks = (mtd->size / region->erasesize);
+			region->numblocks = ((unsigned long)mtd->size /
+						region->erasesize);
 			done = 1;
 		} else {
 			region->numblocks = 0;
@@ -187,8 +188,9 @@
 		return -ENODEV;
 	}
 
-	printk(KERN_NOTICE MODNAME ": chip size 0x%x at offset 0x%x\n",
-	       scb2_mtd->size, SCB2_WINDOW - scb2_mtd->size);
+	printk(KERN_NOTICE MODNAME ": chip size 0x%llx at offset 0x%llx\n",
+	       (unsigned long long)scb2_mtd->size,
+	       (unsigned long long)(SCB2_WINDOW - scb2_mtd->size));
 
 	add_mtd_device(scb2_mtd);
 
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index 026eab0..b392f09 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -47,7 +47,7 @@
 	}
 };
 
-int __init init_sharpsl(void)
+static int __init init_sharpsl(void)
 {
 	struct mtd_partition *parts;
 	int nb_parts = 0;
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index a5d3d85..6014698 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -109,7 +109,7 @@
 };
 #endif
 
-int __init init_tqm_mtd(void)
+static int __init init_tqm_mtd(void)
 {
 	int idx = 0, ret = 0;
 	unsigned long flash_addr, flash_size, mtd_size = 0;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 0dc645f..81756e3 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -51,7 +51,7 @@
 
 /****************************************************************************/
 
-int __init uclinux_mtd_init(void)
+static int __init uclinux_mtd_init(void)
 {
 	struct mtd_info *mtd;
 	struct map_info *mapp;
@@ -94,7 +94,7 @@
 
 /****************************************************************************/
 
-void __exit uclinux_mtd_cleanup(void)
+static void __exit uclinux_mtd_cleanup(void)
 {
 	if (uclinux_ram_mtdinfo) {
 		del_mtd_partitions(uclinux_ram_mtdinfo);
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index 5a0c9a3..6d452dc 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -146,7 +146,7 @@
 	iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START);
 }
 
-int __init init_vmax301(void)
+static int __init init_vmax301(void)
 {
 	int i;
 	unsigned long iomapadr;
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 413b0cf..933a2b6 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -74,7 +74,7 @@
 	}							\
 } while (0);
 
-int __init init_sbc82xx_flash(void)
+static int __init init_sbc82xx_flash(void)
 {
 	volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
 	int bigflash;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index bcffeda..e9ec59e 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -450,16 +450,20 @@
 		if (!erase)
 			ret = -ENOMEM;
 		else {
+			struct erase_info_user einfo;
+
 			wait_queue_head_t waitq;
 			DECLARE_WAITQUEUE(wait, current);
 
 			init_waitqueue_head(&waitq);
 
-			if (copy_from_user(&erase->addr, argp,
+			if (copy_from_user(&einfo, argp,
 				    sizeof(struct erase_info_user))) {
 				kfree(erase);
 				return -EFAULT;
 			}
+			erase->addr = einfo.start;
+			erase->len = einfo.length;
 			erase->mtd = mtd;
 			erase->callback = mtdchar_erase_callback;
 			erase->priv = (unsigned long)&waitq;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 1a05cf3..3dbb1b3 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -197,7 +197,7 @@
 			continue;
 		}
 
-		size = min(total_len, (size_t)(subdev->size - to));
+		size = min_t(uint64_t, total_len, subdev->size - to);
 		wsize = size; /* store for future use */
 
 		entry_high = entry_low;
@@ -385,7 +385,7 @@
 	struct mtd_concat *concat = CONCAT(mtd);
 	struct mtd_info *subdev;
 	int i, err;
-	u_int32_t length, offset = 0;
+	uint64_t length, offset = 0;
 	struct erase_info *erase;
 
 	if (!(mtd->flags & MTD_WRITEABLE))
@@ -518,7 +518,7 @@
 	return 0;
 }
 
-static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	struct mtd_concat *concat = CONCAT(mtd);
 	int i, err = -EINVAL;
@@ -528,7 +528,7 @@
 
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
-		size_t size;
+		uint64_t size;
 
 		if (ofs >= subdev->size) {
 			size = 0;
@@ -556,7 +556,7 @@
 	return err;
 }
 
-static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	struct mtd_concat *concat = CONCAT(mtd);
 	int i, err = 0;
@@ -566,7 +566,7 @@
 
 	for (i = 0; i < concat->num_subdev; i++) {
 		struct mtd_info *subdev = concat->subdev[i];
-		size_t size;
+		uint64_t size;
 
 		if (ofs >= subdev->size) {
 			size = 0;
@@ -696,7 +696,7 @@
 	int i;
 	size_t size;
 	struct mtd_concat *concat;
-	u_int32_t max_erasesize, curr_erasesize;
+	uint32_t max_erasesize, curr_erasesize;
 	int num_erase_region;
 
 	printk(KERN_NOTICE "Concatenating MTD devices:\n");
@@ -842,12 +842,14 @@
 		concat->mtd.erasesize = curr_erasesize;
 		concat->mtd.numeraseregions = 0;
 	} else {
+		uint64_t tmp64;
+
 		/*
 		 * erase block size varies across the subdevices: allocate
 		 * space to store the data describing the variable erase regions
 		 */
 		struct mtd_erase_region_info *erase_region_p;
-		u_int32_t begin, position;
+		uint64_t begin, position;
 
 		concat->mtd.erasesize = max_erasesize;
 		concat->mtd.numeraseregions = num_erase_region;
@@ -879,8 +881,9 @@
 					erase_region_p->offset = begin;
 					erase_region_p->erasesize =
 					    curr_erasesize;
-					erase_region_p->numblocks =
-					    (position - begin) / curr_erasesize;
+					tmp64 = position - begin;
+					do_div(tmp64, curr_erasesize);
+					erase_region_p->numblocks = tmp64;
 					begin = position;
 
 					curr_erasesize = subdev[i]->erasesize;
@@ -897,9 +900,9 @@
 						erase_region_p->offset = begin;
 						erase_region_p->erasesize =
 						    curr_erasesize;
-						erase_region_p->numblocks =
-						    (position -
-						     begin) / curr_erasesize;
+						tmp64 = position - begin;
+						do_div(tmp64, curr_erasesize);
+						erase_region_p->numblocks = tmp64;
 						begin = position;
 
 						curr_erasesize =
@@ -909,14 +912,16 @@
 					}
 					position +=
 					    subdev[i]->eraseregions[j].
-					    numblocks * curr_erasesize;
+					    numblocks * (uint64_t)curr_erasesize;
 				}
 			}
 		}
 		/* Now write the final entry */
 		erase_region_p->offset = begin;
 		erase_region_p->erasesize = curr_erasesize;
-		erase_region_p->numblocks = (position - begin) / curr_erasesize;
+		tmp64 = position - begin;
+		do_div(tmp64, curr_erasesize);
+		erase_region_p->numblocks = tmp64;
 	}
 
 	return &concat->mtd;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index a9d2469..76fe0a1 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -57,6 +57,19 @@
 			mtd->index = i;
 			mtd->usecount = 0;
 
+			if (is_power_of_2(mtd->erasesize))
+				mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+			else
+				mtd->erasesize_shift = 0;
+
+			if (is_power_of_2(mtd->writesize))
+				mtd->writesize_shift = ffs(mtd->writesize) - 1;
+			else
+				mtd->writesize_shift = 0;
+
+			mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
+			mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
+
 			/* Some chips always power up locked. Unlock them now */
 			if ((mtd->flags & MTD_WRITEABLE)
 			    && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
@@ -344,7 +357,8 @@
 	if (!this)
 		return 0;
 
-	return sprintf(buf, "mtd%d: %8.8x %8.8x \"%s\"\n", i, this->size,
+	return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i,
+		       (unsigned long long)this->size,
 		       this->erasesize, this->name);
 }
 
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index aebb3b2..1a6b3be 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -80,9 +80,9 @@
 	if (ret) {
 		set_current_state(TASK_RUNNING);
 		remove_wait_queue(&wait_q, &wait);
-		printk (KERN_WARNING "mtdoops: erase of region [0x%x, 0x%x] "
+		printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] "
 				     "on \"%s\" failed\n",
-			erase.addr, erase.len, mtd->name);
+			(unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name);
 		return ret;
 	}
 
@@ -289,7 +289,10 @@
 	}
 
 	cxt->mtd = mtd;
-	cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
+	if (mtd->size > INT_MAX)
+		cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE;
+	else
+		cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE;
 
 	find_next_position(cxt);
 
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 3728913..144e6b6 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -26,7 +26,7 @@
 struct mtd_part {
 	struct mtd_info mtd;
 	struct mtd_info *master;
-	u_int32_t offset;
+	uint64_t offset;
 	int index;
 	struct list_head list;
 	int registered;
@@ -235,7 +235,7 @@
 }
 EXPORT_SYMBOL_GPL(mtd_erase_callback);
 
-static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	struct mtd_part *part = PART(mtd);
 	if ((len + ofs) > mtd->size)
@@ -243,7 +243,7 @@
 	return part->master->lock(part->master, ofs + part->offset, len);
 }
 
-static int part_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	struct mtd_part *part = PART(mtd);
 	if ((len + ofs) > mtd->size)
@@ -317,7 +317,7 @@
 
 static struct mtd_part *add_one_partition(struct mtd_info *master,
 		const struct mtd_partition *part, int partno,
-		u_int32_t cur_offset)
+		uint64_t cur_offset)
 {
 	struct mtd_part *slave;
 
@@ -395,19 +395,19 @@
 		slave->offset = cur_offset;
 	if (slave->offset == MTDPART_OFS_NXTBLK) {
 		slave->offset = cur_offset;
-		if ((cur_offset % master->erasesize) != 0) {
+		if (mtd_mod_by_eb(cur_offset, master) != 0) {
 			/* Round up to next erasesize */
-			slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
+			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
 			printk(KERN_NOTICE "Moving partition %d: "
-			       "0x%08x -> 0x%08x\n", partno,
-			       cur_offset, slave->offset);
+			       "0x%012llx -> 0x%012llx\n", partno,
+			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
 		}
 	}
 	if (slave->mtd.size == MTDPART_SIZ_FULL)
 		slave->mtd.size = master->size - slave->offset;
 
-	printk(KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
-		slave->offset + slave->mtd.size, slave->mtd.name);
+	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
+		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
 
 	/* let's do some sanity checks */
 	if (slave->offset >= master->size) {
@@ -420,13 +420,13 @@
 	}
 	if (slave->offset + slave->mtd.size > master->size) {
 		slave->mtd.size = master->size - slave->offset;
-		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
-			part->name, master->name, slave->mtd.size);
+		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
+			part->name, master->name, (unsigned long long)slave->mtd.size);
 	}
 	if (master->numeraseregions > 1) {
 		/* Deal with variable erase size stuff */
 		int i, max = master->numeraseregions;
-		u32 end = slave->offset + slave->mtd.size;
+		u64 end = slave->offset + slave->mtd.size;
 		struct mtd_erase_region_info *regions = master->eraseregions;
 
 		/* Find the first erase regions which is part of this
@@ -449,7 +449,7 @@
 	}
 
 	if ((slave->mtd.flags & MTD_WRITEABLE) &&
-	    (slave->offset % slave->mtd.erasesize)) {
+	    mtd_mod_by_eb(slave->offset, &slave->mtd)) {
 		/* Doesn't start on a boundary of major erase size */
 		/* FIXME: Let it be writable if it is on a boundary of
 		 * _minor_ erase size though */
@@ -458,7 +458,7 @@
 			part->name);
 	}
 	if ((slave->mtd.flags & MTD_WRITEABLE) &&
-	    (slave->mtd.size % slave->mtd.erasesize)) {
+	    mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
 		slave->mtd.flags &= ~MTD_WRITEABLE;
 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
 			part->name);
@@ -466,7 +466,7 @@
 
 	slave->mtd.ecclayout = master->ecclayout;
 	if (master->block_isbad) {
-		uint32_t offs = 0;
+		uint64_t offs = 0;
 
 		while (offs < slave->mtd.size) {
 			if (master->block_isbad(master,
@@ -501,7 +501,7 @@
 		       int nbparts)
 {
 	struct mtd_part *slave;
-	u_int32_t cur_offset = 0;
+	uint64_t cur_offset = 0;
 	int i;
 
 	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index f8ae040..8b12e6e 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -163,6 +163,13 @@
 	  incorrect ECC generation, and if using these, the default of
 	  software ECC is preferable.
 
+config MTD_NAND_NDFC
+	tristate "NDFC NanD Flash Controller"
+	depends on 4xx
+	select MTD_NAND_ECC_SMC
+	help
+	 NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
+
 config MTD_NAND_S3C2410_CLKSTOP
 	bool "S3C2410 NAND IDLE clock stop"
 	depends on MTD_NAND_S3C2410
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 9623803..6d96491 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -676,11 +676,11 @@
 		goto error;
 
 	al->write_out = usb_sndbulkpipe(al->dev,
-			ep_wr->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+			usb_endpoint_num(ep_wr));
 	al->bulk_in = usb_rcvbulkpipe(al->dev,
-			ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+			usb_endpoint_num(ep_in));
 	al->bulk_out = usb_sndbulkpipe(al->dev,
-			ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+			usb_endpoint_num(ep_out));
 
 	/* second device is identical up to now */
 	memcpy(al+1, al, sizeof(*al));
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index b8064bf..22a6b2e 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -90,7 +90,7 @@
 module_param_array(timing, int, &numtimings, 0644);
 
 #ifdef CONFIG_MTD_PARTITIONS
-static const char *part_probes[] = { "RedBoot", NULL };
+static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
 #endif
 
 /* Hrm. Why isn't this already conditional on something in the struct device? */
@@ -805,10 +805,13 @@
 	add_mtd_device(mtd);
 
 #ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+	mtd->name = "cafe_nand";
+#endif
 	nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
 	if (nr_parts > 0) {
 		cafe->parts = parts;
-		dev_info(&cafe->pdev->dev, "%d RedBoot partitions found\n", nr_parts);
+		dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
 		add_mtd_partitions(mtd, parts, nr_parts);
 	}
 #endif
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 4aa5bd6..65929db 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -777,7 +777,9 @@
 	/* Fill in fsl_elbc_mtd structure */
 	priv->mtd.priv = chip;
 	priv->mtd.owner = THIS_MODULE;
-	priv->fmr = 0; /* rest filled in later */
+
+	/* Set the ECCM according to the settings in bootloader.*/
+	priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM;
 
 	/* fill in nand_chip structure */
 	/* set up function call table */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 0a9c9cd..0c3afcc 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2014,13 +2014,14 @@
 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
 		    int allowbbt)
 {
-	int page, len, status, pages_per_block, ret, chipnr;
+	int page, status, pages_per_block, ret, chipnr;
 	struct nand_chip *chip = mtd->priv;
-	int rewrite_bbt[NAND_MAX_CHIPS]={0};
+	loff_t rewrite_bbt[NAND_MAX_CHIPS]={0};
 	unsigned int bbt_masked_page = 0xffffffff;
+	loff_t len;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%08x, len = %i\n",
-	      (unsigned int)instr->addr, (unsigned int)instr->len);
+	DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%012llx, len = %llu\n",
+	      (unsigned long long)instr->addr, (unsigned long long)instr->len);
 
 	/* Start address must align on block boundary */
 	if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
@@ -2116,7 +2117,8 @@
 			DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
 			      "Failed erase, page 0x%08x\n", page);
 			instr->state = MTD_ERASE_FAILED;
-			instr->fail_addr = (page << chip->page_shift);
+			instr->fail_addr =
+				((loff_t)page << chip->page_shift);
 			goto erase_exit;
 		}
 
@@ -2126,7 +2128,8 @@
 		 */
 		if (bbt_masked_page != 0xffffffff &&
 		    (page & BBT_PAGE_MASK) == bbt_masked_page)
-			    rewrite_bbt[chipnr] = (page << chip->page_shift);
+			    rewrite_bbt[chipnr] =
+					((loff_t)page << chip->page_shift);
 
 		/* Increment page address and decrement length */
 		len -= (1 << chip->phys_erase_shift);
@@ -2173,7 +2176,7 @@
 			continue;
 		/* update the BBT for chip */
 		DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt "
-		      "(%d:0x%0x 0x%0x)\n", chipnr, rewrite_bbt[chipnr],
+		      "(%d:0x%0llx 0x%0x)\n", chipnr, rewrite_bbt[chipnr],
 		      chip->bbt_td->pages[chipnr]);
 		nand_update_bbt(mtd, rewrite_bbt[chipnr]);
 	}
@@ -2365,7 +2368,7 @@
 	if (!mtd->name)
 		mtd->name = type->name;
 
-	chip->chipsize = type->chipsize << 20;
+	chip->chipsize = (uint64_t)type->chipsize << 20;
 
 	/* Newer devices have all the information in additional id bytes */
 	if (!type->pagesize) {
@@ -2423,7 +2426,10 @@
 
 	chip->bbt_erase_shift = chip->phys_erase_shift =
 		ffs(mtd->erasesize) - 1;
-	chip->chip_shift = ffs(chip->chipsize) - 1;
+	if (chip->chipsize & 0xffffffff)
+		chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
+	else
+		chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
 
 	/* Set the bad block position */
 	chip->badblockpos = mtd->writesize > 512 ?
@@ -2517,7 +2523,6 @@
 /**
  * nand_scan_tail - [NAND Interface] Scan for the NAND device
  * @mtd:	    MTD device structure
- * @maxchips:	    Number of chips to scan for
  *
  * This is the second phase of the normal nand_scan() function. It
  * fills out all the uninitialized function pointers with the defaults
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 0b1c485..55c23e5 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -171,16 +171,16 @@
 				if (tmp == msk)
 					continue;
 				if (reserved_block_code && (tmp == reserved_block_code)) {
-					printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n",
-					       ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+					printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n",
+					       (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
 					this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
 					mtd->ecc_stats.bbtblocks++;
 					continue;
 				}
 				/* Leave it for now, if its matured we can move this
 				 * message to MTD_DEBUG_LEVEL0 */
-				printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n",
-				       ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+				printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n",
+				       (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
 				/* Factory marked bad or worn out ? */
 				if (tmp == 0)
 					this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
@@ -284,7 +284,7 @@
 
 	/* Read the primary version, if available */
 	if (td->options & NAND_BBT_VERSION) {
-		scan_read_raw(mtd, buf, td->pages[0] << this->page_shift,
+		scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
 			      mtd->writesize);
 		td->version[0] = buf[mtd->writesize + td->veroffs];
 		printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
@@ -293,7 +293,7 @@
 
 	/* Read the mirror version, if available */
 	if (md && (md->options & NAND_BBT_VERSION)) {
-		scan_read_raw(mtd, buf, md->pages[0] << this->page_shift,
+		scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
 			      mtd->writesize);
 		md->version[0] = buf[mtd->writesize + md->veroffs];
 		printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
@@ -411,7 +411,7 @@
 		numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
 		startblock = chip * numblocks;
 		numblocks += startblock;
-		from = startblock << (this->bbt_erase_shift - 1);
+		from = (loff_t)startblock << (this->bbt_erase_shift - 1);
 	}
 
 	for (i = startblock; i < numblocks;) {
@@ -428,8 +428,8 @@
 
 		if (ret) {
 			this->bbt[i >> 3] |= 0x03 << (i & 0x6);
-			printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
-			       i >> 1, (unsigned int)from);
+			printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n",
+			       i >> 1, (unsigned long long)from);
 			mtd->ecc_stats.badblocks++;
 		}
 
@@ -495,7 +495,7 @@
 		for (block = 0; block < td->maxblocks; block++) {
 
 			int actblock = startblock + dir * block;
-			loff_t offs = actblock << this->bbt_erase_shift;
+			loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
 
 			/* Read first page */
 			scan_read_raw(mtd, buf, offs, mtd->writesize);
@@ -719,7 +719,7 @@
 
 		memset(&einfo, 0, sizeof(einfo));
 		einfo.mtd = mtd;
-		einfo.addr = (unsigned long)to;
+		einfo.addr = to;
 		einfo.len = 1 << this->bbt_erase_shift;
 		res = nand_erase_nand(mtd, &einfo, 1);
 		if (res < 0)
@@ -729,8 +729,8 @@
 		if (res < 0)
 			goto outerr;
 
-		printk(KERN_DEBUG "Bad block table written to 0x%08x, version "
-		       "0x%02X\n", (unsigned int)to, td->version[chip]);
+		printk(KERN_DEBUG "Bad block table written to 0x%012llx, version "
+		       "0x%02X\n", (unsigned long long)to, td->version[chip]);
 
 		/* Mark it as used */
 		td->pages[chip] = page;
@@ -910,7 +910,7 @@
 			newval = oldval | (0x2 << (block & 0x06));
 			this->bbt[(block >> 3)] = newval;
 			if ((oldval != newval) && td->reserved_block_code)
-				nand_update_bbt(mtd, block << (this->bbt_erase_shift - 1));
+				nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
 			continue;
 		}
 		update = 0;
@@ -931,7 +931,7 @@
 		   new ones have been marked, then we need to update the stored
 		   bbts.  This should only happen once. */
 		if (update && td->reserved_block_code)
-			nand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1));
+			nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
 	}
 }
 
@@ -1027,7 +1027,6 @@
 	if (!this->bbt || !td)
 		return -EINVAL;
 
-	len = mtd->size >> (this->bbt_erase_shift + 2);
 	/* Allocate a temporary buffer for one eraseblock incl. oob */
 	len = (1 << this->bbt_erase_shift);
 	len += (len >> this->page_shift) * mtd->oobsize;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index ae7c577..cd0711b 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -38,6 +38,9 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
 
 /* Default simulator parameters values */
 #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE)  || \
@@ -100,6 +103,7 @@
 static char *gravepages = NULL;
 static unsigned int rptwear = 0;
 static unsigned int overridesize = 0;
+static char *cache_file = NULL;
 
 module_param(first_id_byte,  uint, 0400);
 module_param(second_id_byte, uint, 0400);
@@ -122,12 +126,13 @@
 module_param(gravepages,     charp, 0400);
 module_param(rptwear,        uint, 0400);
 module_param(overridesize,   uint, 0400);
+module_param(cache_file,     charp, 0400);
 
 MODULE_PARM_DESC(first_id_byte,  "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
 MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
 MODULE_PARM_DESC(third_id_byte,  "The third byte returned by NAND Flash 'read ID' command");
 MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
-MODULE_PARM_DESC(access_delay,   "Initial page access delay (microiseconds)");
+MODULE_PARM_DESC(access_delay,   "Initial page access delay (microseconds)");
 MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
 MODULE_PARM_DESC(erase_delay,    "Sector erase delay (milliseconds)");
 MODULE_PARM_DESC(output_cycle,   "Word output (from flash) time (nanodeconds)");
@@ -153,6 +158,7 @@
 MODULE_PARM_DESC(overridesize,   "Specifies the NAND Flash size overriding the ID bytes. "
 				 "The size is specified in erase blocks and as the exponent of a power of two"
 				 " e.g. 5 means a size of 32 erase blocks");
+MODULE_PARM_DESC(cache_file,     "File to use to cache nand pages instead of memory");
 
 /* The largest possible page size */
 #define NS_LARGEST_PAGE_SIZE	2048
@@ -266,6 +272,9 @@
  */
 #define NS_MAX_PREVSTATES 1
 
+/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
+#define NS_MAX_HELD_PAGES 16
+
 /*
  * A union to represent flash memory contents and flash buffer.
  */
@@ -295,6 +304,9 @@
 	/* The simulated NAND flash pages array */
 	union ns_mem *pages;
 
+	/* Slab allocator for nand pages */
+	struct kmem_cache *nand_pages_slab;
+
 	/* Internal buffer of page + OOB size bytes */
 	union ns_mem buf;
 
@@ -335,6 +347,13 @@
                 int ale; /* address Latch Enable */
                 int wp;  /* write Protect */
         } lines;
+
+	/* Fields needed when using a cache file */
+	struct file *cfile; /* Open file */
+	unsigned char *pages_written; /* Which pages have been written */
+	void *file_buf;
+	struct page *held_pages[NS_MAX_HELD_PAGES];
+	int held_cnt;
 };
 
 /*
@@ -420,25 +439,69 @@
 static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
 
 /*
- * Allocate array of page pointers and initialize the array to NULL
- * pointers.
+ * Allocate array of page pointers, create slab allocation for an array
+ * and initialize the array by NULL pointers.
  *
  * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
  */
 static int alloc_device(struct nandsim *ns)
 {
-	int i;
+	struct file *cfile;
+	int i, err;
+
+	if (cache_file) {
+		cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
+		if (IS_ERR(cfile))
+			return PTR_ERR(cfile);
+		if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
+			NS_ERR("alloc_device: cache file not readable\n");
+			err = -EINVAL;
+			goto err_close;
+		}
+		if (!cfile->f_op->write && !cfile->f_op->aio_write) {
+			NS_ERR("alloc_device: cache file not writeable\n");
+			err = -EINVAL;
+			goto err_close;
+		}
+		ns->pages_written = vmalloc(ns->geom.pgnum);
+		if (!ns->pages_written) {
+			NS_ERR("alloc_device: unable to allocate pages written array\n");
+			err = -ENOMEM;
+			goto err_close;
+		}
+		ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+		if (!ns->file_buf) {
+			NS_ERR("alloc_device: unable to allocate file buf\n");
+			err = -ENOMEM;
+			goto err_free;
+		}
+		ns->cfile = cfile;
+		memset(ns->pages_written, 0, ns->geom.pgnum);
+		return 0;
+	}
 
 	ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
 	if (!ns->pages) {
-		NS_ERR("alloc_map: unable to allocate page array\n");
+		NS_ERR("alloc_device: unable to allocate page array\n");
 		return -ENOMEM;
 	}
 	for (i = 0; i < ns->geom.pgnum; i++) {
 		ns->pages[i].byte = NULL;
 	}
+	ns->nand_pages_slab = kmem_cache_create("nandsim",
+						ns->geom.pgszoob, 0, 0, NULL);
+	if (!ns->nand_pages_slab) {
+		NS_ERR("cache_create: unable to create kmem_cache\n");
+		return -ENOMEM;
+	}
 
 	return 0;
+
+err_free:
+	vfree(ns->pages_written);
+err_close:
+	filp_close(cfile, NULL);
+	return err;
 }
 
 /*
@@ -448,11 +511,20 @@
 {
 	int i;
 
+	if (ns->cfile) {
+		kfree(ns->file_buf);
+		vfree(ns->pages_written);
+		filp_close(ns->cfile, NULL);
+		return;
+	}
+
 	if (ns->pages) {
 		for (i = 0; i < ns->geom.pgnum; i++) {
 			if (ns->pages[i].byte)
-				kfree(ns->pages[i].byte);
+				kmem_cache_free(ns->nand_pages_slab,
+						ns->pages[i].byte);
 		}
+		kmem_cache_destroy(ns->nand_pages_slab);
 		vfree(ns->pages);
 	}
 }
@@ -464,7 +536,7 @@
 	return kstrdup(buf, GFP_KERNEL);
 }
 
-static u_int64_t divide(u_int64_t n, u_int32_t d)
+static uint64_t divide(uint64_t n, uint32_t d)
 {
 	do_div(n, d);
 	return n;
@@ -480,8 +552,8 @@
 	struct nand_chip *chip = (struct nand_chip *)mtd->priv;
 	struct nandsim   *ns   = (struct nandsim *)(chip->priv);
 	int i, ret = 0;
-	u_int64_t remains;
-	u_int64_t next_offset;
+	uint64_t remains;
+	uint64_t next_offset;
 
 	if (NS_IS_INITIALIZED(ns)) {
 		NS_ERR("init_nandsim: nandsim is already initialized\n");
@@ -548,7 +620,7 @@
 	remains = ns->geom.totsz;
 	next_offset = 0;
 	for (i = 0; i < parts_num; ++i) {
-		u_int64_t part_sz = (u_int64_t)parts[i] * ns->geom.secsz;
+		uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
 
 		if (!part_sz || part_sz > remains) {
 			NS_ERR("bad partition size.\n");
@@ -1211,6 +1283,97 @@
 	return -1;
 }
 
+static void put_pages(struct nandsim *ns)
+{
+	int i;
+
+	for (i = 0; i < ns->held_cnt; i++)
+		page_cache_release(ns->held_pages[i]);
+}
+
+/* Get page cache pages in advance to provide NOFS memory allocation */
+static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
+{
+	pgoff_t index, start_index, end_index;
+	struct page *page;
+	struct address_space *mapping = file->f_mapping;
+
+	start_index = pos >> PAGE_CACHE_SHIFT;
+	end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+	if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
+		return -EINVAL;
+	ns->held_cnt = 0;
+	for (index = start_index; index <= end_index; index++) {
+		page = find_get_page(mapping, index);
+		if (page == NULL) {
+			page = find_or_create_page(mapping, index, GFP_NOFS);
+			if (page == NULL) {
+				write_inode_now(mapping->host, 1);
+				page = find_or_create_page(mapping, index, GFP_NOFS);
+			}
+			if (page == NULL) {
+				put_pages(ns);
+				return -ENOMEM;
+			}
+			unlock_page(page);
+		}
+		ns->held_pages[ns->held_cnt++] = page;
+	}
+	return 0;
+}
+
+static int set_memalloc(void)
+{
+	if (current->flags & PF_MEMALLOC)
+		return 0;
+	current->flags |= PF_MEMALLOC;
+	return 1;
+}
+
+static void clear_memalloc(int memalloc)
+{
+	if (memalloc)
+		current->flags &= ~PF_MEMALLOC;
+}
+
+static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+{
+	mm_segment_t old_fs;
+	ssize_t tx;
+	int err, memalloc;
+
+	err = get_pages(ns, file, count, *pos);
+	if (err)
+		return err;
+	old_fs = get_fs();
+	set_fs(get_ds());
+	memalloc = set_memalloc();
+	tx = vfs_read(file, (char __user *)buf, count, pos);
+	clear_memalloc(memalloc);
+	set_fs(old_fs);
+	put_pages(ns);
+	return tx;
+}
+
+static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+{
+	mm_segment_t old_fs;
+	ssize_t tx;
+	int err, memalloc;
+
+	err = get_pages(ns, file, count, *pos);
+	if (err)
+		return err;
+	old_fs = get_fs();
+	set_fs(get_ds());
+	memalloc = set_memalloc();
+	tx = vfs_write(file, (char __user *)buf, count, pos);
+	clear_memalloc(memalloc);
+	set_fs(old_fs);
+	put_pages(ns);
+	return tx;
+}
+
 /*
  * Returns a pointer to the current page.
  */
@@ -1227,6 +1390,38 @@
 	return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
 }
 
+int do_read_error(struct nandsim *ns, int num)
+{
+	unsigned int page_no = ns->regs.row;
+
+	if (read_error(page_no)) {
+		int i;
+		memset(ns->buf.byte, 0xFF, num);
+		for (i = 0; i < num; ++i)
+			ns->buf.byte[i] = random32();
+		NS_WARN("simulating read error in page %u\n", page_no);
+		return 1;
+	}
+	return 0;
+}
+
+void do_bit_flips(struct nandsim *ns, int num)
+{
+	if (bitflips && random32() < (1 << 22)) {
+		int flips = 1;
+		if (bitflips > 1)
+			flips = (random32() % (int) bitflips) + 1;
+		while (flips--) {
+			int pos = random32() % (num * 8);
+			ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
+			NS_WARN("read_page: flipping bit %d in page %d "
+				"reading from %d ecc: corrected=%u failed=%u\n",
+				pos, ns->regs.row, ns->regs.column + ns->regs.off,
+				nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
+		}
+	}
+}
+
 /*
  * Fill the NAND buffer with data read from the specified page.
  */
@@ -1234,36 +1429,40 @@
 {
 	union ns_mem *mypage;
 
+	if (ns->cfile) {
+		if (!ns->pages_written[ns->regs.row]) {
+			NS_DBG("read_page: page %d not written\n", ns->regs.row);
+			memset(ns->buf.byte, 0xFF, num);
+		} else {
+			loff_t pos;
+			ssize_t tx;
+
+			NS_DBG("read_page: page %d written, reading from %d\n",
+				ns->regs.row, ns->regs.column + ns->regs.off);
+			if (do_read_error(ns, num))
+				return;
+			pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
+			tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
+			if (tx != num) {
+				NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+				return;
+			}
+			do_bit_flips(ns, num);
+		}
+		return;
+	}
+
 	mypage = NS_GET_PAGE(ns);
 	if (mypage->byte == NULL) {
 		NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
 		memset(ns->buf.byte, 0xFF, num);
 	} else {
-		unsigned int page_no = ns->regs.row;
 		NS_DBG("read_page: page %d allocated, reading from %d\n",
 			ns->regs.row, ns->regs.column + ns->regs.off);
-		if (read_error(page_no)) {
-			int i;
-			memset(ns->buf.byte, 0xFF, num);
-			for (i = 0; i < num; ++i)
-				ns->buf.byte[i] = random32();
-			NS_WARN("simulating read error in page %u\n", page_no);
+		if (do_read_error(ns, num))
 			return;
-		}
 		memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
-		if (bitflips && random32() < (1 << 22)) {
-			int flips = 1;
-			if (bitflips > 1)
-				flips = (random32() % (int) bitflips) + 1;
-			while (flips--) {
-				int pos = random32() % (num * 8);
-				ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
-				NS_WARN("read_page: flipping bit %d in page %d "
-					"reading from %d ecc: corrected=%u failed=%u\n",
-					pos, ns->regs.row, ns->regs.column + ns->regs.off,
-					nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
-			}
-		}
+		do_bit_flips(ns, num);
 	}
 }
 
@@ -1275,11 +1474,20 @@
 	union ns_mem *mypage;
 	int i;
 
+	if (ns->cfile) {
+		for (i = 0; i < ns->geom.pgsec; i++)
+			if (ns->pages_written[ns->regs.row + i]) {
+				NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
+				ns->pages_written[ns->regs.row + i] = 0;
+			}
+		return;
+	}
+
 	mypage = NS_GET_PAGE(ns);
 	for (i = 0; i < ns->geom.pgsec; i++) {
 		if (mypage->byte != NULL) {
 			NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
-			kfree(mypage->byte);
+			kmem_cache_free(ns->nand_pages_slab, mypage->byte);
 			mypage->byte = NULL;
 		}
 		mypage++;
@@ -1295,16 +1503,57 @@
 	union ns_mem *mypage;
 	u_char *pg_off;
 
+	if (ns->cfile) {
+		loff_t off, pos;
+		ssize_t tx;
+		int all;
+
+		NS_DBG("prog_page: writing page %d\n", ns->regs.row);
+		pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
+		off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
+		if (!ns->pages_written[ns->regs.row]) {
+			all = 1;
+			memset(ns->file_buf, 0xff, ns->geom.pgszoob);
+		} else {
+			all = 0;
+			pos = off;
+			tx = read_file(ns, ns->cfile, pg_off, num, &pos);
+			if (tx != num) {
+				NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+				return -1;
+			}
+		}
+		for (i = 0; i < num; i++)
+			pg_off[i] &= ns->buf.byte[i];
+		if (all) {
+			pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+			tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
+			if (tx != ns->geom.pgszoob) {
+				NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+				return -1;
+			}
+			ns->pages_written[ns->regs.row] = 1;
+		} else {
+			pos = off;
+			tx = write_file(ns, ns->cfile, pg_off, num, &pos);
+			if (tx != num) {
+				NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+				return -1;
+			}
+		}
+		return 0;
+	}
+
 	mypage = NS_GET_PAGE(ns);
 	if (mypage->byte == NULL) {
 		NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
 		/*
 		 * We allocate memory with GFP_NOFS because a flash FS may
 		 * utilize this. If it is holding an FS lock, then gets here,
-		 * then kmalloc runs writeback which goes to the FS again
-		 * and deadlocks. This was seen in practice.
+		 * then kernel memory alloc runs writeback which goes to the FS
+		 * again and deadlocks. This was seen in practice.
 		 */
-		mypage->byte = kmalloc(ns->geom.pgszoob, GFP_NOFS);
+		mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
 		if (mypage->byte == NULL) {
 			NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
 			return -1;
@@ -1736,13 +1985,17 @@
 
 		/* Check if chip is expecting command */
 		if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
-			/*
-			 * We are in situation when something else (not command)
-			 * was expected but command was input. In this case ignore
-			 * previous command(s)/state(s) and accept the last one.
-			 */
-			NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
-				"ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+			/* Do not warn if only 2 id bytes are read */
+			if (!(ns->regs.command == NAND_CMD_READID &&
+			    NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
+				/*
+				 * We are in situation when something else (not command)
+				 * was expected but command was input. In this case ignore
+				 * previous command(s)/state(s) and accept the last one.
+				 */
+				NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
+					"ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+			}
 			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
 		}
 
@@ -2044,7 +2297,7 @@
 	}
 
 	if (overridesize) {
-		u_int64_t new_size = (u_int64_t)nsmtd->erasesize << overridesize;
+		uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
 		if (new_size >> overridesize != nsmtd->erasesize) {
 			NS_ERR("overridesize is too big\n");
 			goto err_exit;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 955959e..582cf80 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -2,12 +2,20 @@
  *  drivers/mtd/ndfc.c
  *
  *  Overview:
- *   Platform independend driver for NDFC (NanD Flash Controller)
+ *   Platform independent driver for NDFC (NanD Flash Controller)
  *   integrated into EP440 cores
  *
+ *   Ported to an OF platform driver by Sean MacLennan
+ *
+ *   The NDFC supports multiple chips, but this driver only supports a
+ *   single chip since I do not have access to any boards with
+ *   multiple chips.
+ *
  *  Author: Thomas Gleixner
  *
  *  Copyright 2006 IBM
+ *  Copyright 2008 PIKA Technologies
+ *    Sean MacLennan <smaclennan@pikatech.com>
  *
  *  This program is free software; you can redistribute	 it and/or modify it
  *  under  the terms of	 the GNU General  Public License as published by the
@@ -21,27 +29,20 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/ndfc.h>
 #include <linux/mtd/mtd.h>
-#include <linux/platform_device.h>
-
+#include <linux/of_platform.h>
 #include <asm/io.h>
-#ifdef CONFIG_40x
-#include <asm/ibm405.h>
-#else
-#include <asm/ibm44x.h>
-#endif
 
-struct ndfc_nand_mtd {
-	struct mtd_info			mtd;
-	struct nand_chip		chip;
-	struct platform_nand_chip	*pl_chip;
-};
-
-static struct ndfc_nand_mtd ndfc_mtd[NDFC_MAX_BANKS];
 
 struct ndfc_controller {
-	void __iomem		*ndfcbase;
-	struct nand_hw_control	ndfc_control;
-	atomic_t		childs_active;
+	struct of_device *ofdev;
+	void __iomem *ndfcbase;
+	struct mtd_info mtd;
+	struct nand_chip chip;
+	int chip_select;
+	struct nand_hw_control ndfc_control;
+#ifdef CONFIG_MTD_PARTITIONS
+	struct mtd_partition *parts;
+#endif
 };
 
 static struct ndfc_controller ndfc_ctrl;
@@ -50,17 +51,14 @@
 {
 	uint32_t ccr;
 	struct ndfc_controller *ndfc = &ndfc_ctrl;
-	struct nand_chip *nandchip = mtd->priv;
-	struct ndfc_nand_mtd *nandmtd = nandchip->priv;
-	struct platform_nand_chip *pchip = nandmtd->pl_chip;
 
-	ccr = __raw_readl(ndfc->ndfcbase + NDFC_CCR);
+	ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
 	if (chip >= 0) {
 		ccr &= ~NDFC_CCR_BS_MASK;
-		ccr |= NDFC_CCR_BS(chip + pchip->chip_offset);
+		ccr |= NDFC_CCR_BS(chip + ndfc->chip_select);
 	} else
 		ccr |= NDFC_CCR_RESET_CE;
-	__raw_writel(ccr, ndfc->ndfcbase + NDFC_CCR);
+	out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
 }
 
 static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
@@ -80,7 +78,7 @@
 {
 	struct ndfc_controller *ndfc = &ndfc_ctrl;
 
-	return __raw_readl(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
+	return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
 }
 
 static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
@@ -88,9 +86,9 @@
 	uint32_t ccr;
 	struct ndfc_controller *ndfc = &ndfc_ctrl;
 
-	ccr = __raw_readl(ndfc->ndfcbase + NDFC_CCR);
+	ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
 	ccr |= NDFC_CCR_RESET_ECC;
-	__raw_writel(ccr, ndfc->ndfcbase + NDFC_CCR);
+	out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
 	wmb();
 }
 
@@ -102,9 +100,10 @@
 	uint8_t *p = (uint8_t *)&ecc;
 
 	wmb();
-	ecc = __raw_readl(ndfc->ndfcbase + NDFC_ECC);
-	ecc_code[0] = p[1];
-	ecc_code[1] = p[2];
+	ecc = in_be32(ndfc->ndfcbase + NDFC_ECC);
+	/* The NDFC uses Smart Media (SMC) bytes order */
+	ecc_code[0] = p[2];
+	ecc_code[1] = p[1];
 	ecc_code[2] = p[3];
 
 	return 0;
@@ -123,7 +122,7 @@
 	uint32_t *p = (uint32_t *) buf;
 
 	for(;len > 0; len -= 4)
-		*p++ = __raw_readl(ndfc->ndfcbase + NDFC_DATA);
+		*p++ = in_be32(ndfc->ndfcbase + NDFC_DATA);
 }
 
 static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
@@ -132,7 +131,7 @@
 	uint32_t *p = (uint32_t *) buf;
 
 	for(;len > 0; len -= 4)
-		__raw_writel(*p++, ndfc->ndfcbase + NDFC_DATA);
+		out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
 }
 
 static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
@@ -141,7 +140,7 @@
 	uint32_t *p = (uint32_t *) buf;
 
 	for(;len > 0; len -= 4)
-		if (*p++ != __raw_readl(ndfc->ndfcbase + NDFC_DATA))
+		if (*p++ != in_be32(ndfc->ndfcbase + NDFC_DATA))
 			return -EFAULT;
 	return 0;
 }
@@ -149,10 +148,19 @@
 /*
  * Initialize chip structure
  */
-static void ndfc_chip_init(struct ndfc_nand_mtd *mtd)
+static int ndfc_chip_init(struct ndfc_controller *ndfc,
+			  struct device_node *node)
 {
-	struct ndfc_controller *ndfc = &ndfc_ctrl;
-	struct nand_chip *chip = &mtd->chip;
+#ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+	static const char *part_types[] = { "cmdlinepart", NULL };
+#else
+	static const char *part_types[] = { NULL };
+#endif
+#endif
+	struct device_node *flash_np;
+	struct nand_chip *chip = &ndfc->chip;
+	int ret;
 
 	chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
 	chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
@@ -160,8 +168,6 @@
 	chip->dev_ready = ndfc_ready;
 	chip->select_chip = ndfc_select_chip;
 	chip->chip_delay = 50;
-	chip->priv = mtd;
-	chip->options = mtd->pl_chip->options;
 	chip->controller = &ndfc->ndfc_control;
 	chip->read_buf = ndfc_read_buf;
 	chip->write_buf = ndfc_write_buf;
@@ -172,143 +178,136 @@
 	chip->ecc.mode = NAND_ECC_HW;
 	chip->ecc.size = 256;
 	chip->ecc.bytes = 3;
-	chip->ecclayout = chip->ecc.layout = mtd->pl_chip->ecclayout;
-	mtd->mtd.priv = chip;
-	mtd->mtd.owner = THIS_MODULE;
-}
 
-static int ndfc_chip_probe(struct platform_device *pdev)
-{
-	struct platform_nand_chip *nc = pdev->dev.platform_data;
-	struct ndfc_chip_settings *settings = nc->priv;
-	struct ndfc_controller *ndfc = &ndfc_ctrl;
-	struct ndfc_nand_mtd *nandmtd;
+	ndfc->mtd.priv = chip;
+	ndfc->mtd.owner = THIS_MODULE;
 
-	if (nc->chip_offset >= NDFC_MAX_BANKS || nc->nr_chips > NDFC_MAX_BANKS)
-		return -EINVAL;
-
-	/* Set the bank settings */
-	__raw_writel(settings->bank_settings,
-		     ndfc->ndfcbase + NDFC_BCFG0 + (nc->chip_offset << 2));
-
-	nandmtd = &ndfc_mtd[pdev->id];
-	if (nandmtd->pl_chip)
-		return -EBUSY;
-
-	nandmtd->pl_chip = nc;
-	ndfc_chip_init(nandmtd);
-
-	/* Scan for chips */
-	if (nand_scan(&nandmtd->mtd, nc->nr_chips)) {
-		nandmtd->pl_chip = NULL;
+	flash_np = of_get_next_child(node, NULL);
+	if (!flash_np)
 		return -ENODEV;
+
+	ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
+				   ndfc->ofdev->dev.bus_id, flash_np->name);
+	if (!ndfc->mtd.name) {
+		ret = -ENOMEM;
+		goto err;
 	}
 
+	ret = nand_scan(&ndfc->mtd, 1);
+	if (ret)
+		goto err;
+
 #ifdef CONFIG_MTD_PARTITIONS
-	printk("Number of partitions %d\n", nc->nr_partitions);
-	if (nc->nr_partitions) {
-		/* Add the full device, so complete dumps can be made */
-		add_mtd_device(&nandmtd->mtd);
-		add_mtd_partitions(&nandmtd->mtd, nc->partitions,
-				   nc->nr_partitions);
+	ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0);
+	if (ret < 0)
+		goto err;
 
-	} else
-#else
-		add_mtd_device(&nandmtd->mtd);
-#endif
-
-	atomic_inc(&ndfc->childs_active);
-	return 0;
-}
-
-static int ndfc_chip_remove(struct platform_device *pdev)
-{
-	return 0;
-}
-
-static int ndfc_nand_probe(struct platform_device *pdev)
-{
-	struct platform_nand_ctrl *nc = pdev->dev.platform_data;
-	struct ndfc_controller_settings *settings = nc->priv;
-	struct resource *res = pdev->resource;
-	struct ndfc_controller *ndfc = &ndfc_ctrl;
-	unsigned long long phys = settings->ndfc_erpn | res->start;
-
-#ifndef CONFIG_PHYS_64BIT
-	ndfc->ndfcbase = ioremap((phys_addr_t)phys, res->end - res->start + 1);
-#else
-	ndfc->ndfcbase = ioremap64(phys, res->end - res->start + 1);
-#endif
-	if (!ndfc->ndfcbase) {
-		printk(KERN_ERR "NDFC: ioremap failed\n");
-		return -EIO;
+#ifdef CONFIG_MTD_OF_PARTS
+	if (ret == 0) {
+		ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
+					      &ndfc->parts);
+		if (ret < 0)
+			goto err;
 	}
+#endif
 
-	__raw_writel(settings->ccr_settings, ndfc->ndfcbase + NDFC_CCR);
+	if (ret > 0)
+		ret = add_mtd_partitions(&ndfc->mtd, ndfc->parts, ret);
+	else
+#endif
+		ret = add_mtd_device(&ndfc->mtd);
+
+err:
+	of_node_put(flash_np);
+	if (ret)
+		kfree(ndfc->mtd.name);
+	return ret;
+}
+
+static int __devinit ndfc_probe(struct of_device *ofdev,
+				const struct of_device_id *match)
+{
+	struct ndfc_controller *ndfc = &ndfc_ctrl;
+	const u32 *reg;
+	u32 ccr;
+	int err, len;
 
 	spin_lock_init(&ndfc->ndfc_control.lock);
 	init_waitqueue_head(&ndfc->ndfc_control.wq);
+	ndfc->ofdev = ofdev;
+	dev_set_drvdata(&ofdev->dev, ndfc);
 
-	platform_set_drvdata(pdev, ndfc);
-
-	printk("NDFC NAND Driver initialized. Chip-Rev: 0x%08x\n",
-	       __raw_readl(ndfc->ndfcbase + NDFC_REVID));
-
-	return 0;
-}
-
-static int ndfc_nand_remove(struct platform_device *pdev)
-{
-	struct ndfc_controller *ndfc = platform_get_drvdata(pdev);
-
-	if (atomic_read(&ndfc->childs_active))
-		return -EBUSY;
-
-	if (ndfc) {
-		platform_set_drvdata(pdev, NULL);
-		iounmap(ndfc_ctrl.ndfcbase);
-		ndfc_ctrl.ndfcbase = NULL;
+	/* Read the reg property to get the chip select */
+	reg = of_get_property(ofdev->node, "reg", &len);
+	if (reg == NULL || len != 12) {
+		dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
+		return -ENOENT;
 	}
+	ndfc->chip_select = reg[0];
+
+	ndfc->ndfcbase = of_iomap(ofdev->node, 0);
+	if (!ndfc->ndfcbase) {
+		dev_err(&ofdev->dev, "failed to get memory\n");
+		return -EIO;
+	}
+
+	ccr = NDFC_CCR_BS(ndfc->chip_select);
+
+	/* It is ok if ccr does not exist - just default to 0 */
+	reg = of_get_property(ofdev->node, "ccr", NULL);
+	if (reg)
+		ccr |= *reg;
+
+	out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+
+	/* Set the bank settings if given */
+	reg = of_get_property(ofdev->node, "bank-settings", NULL);
+	if (reg) {
+		int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
+		out_be32(ndfc->ndfcbase + offset, *reg);
+	}
+
+	err = ndfc_chip_init(ndfc, ofdev->node);
+	if (err) {
+		iounmap(ndfc->ndfcbase);
+		return err;
+	}
+
 	return 0;
 }
 
-/* driver device registration */
+static int __devexit ndfc_remove(struct of_device *ofdev)
+{
+	struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
 
-static struct platform_driver ndfc_chip_driver = {
-	.probe		= ndfc_chip_probe,
-	.remove		= ndfc_chip_remove,
-	.driver		= {
-		.name	= "ndfc-chip",
-		.owner	= THIS_MODULE,
-	},
+	nand_release(&ndfc->mtd);
+
+	return 0;
+}
+
+static const struct of_device_id ndfc_match[] = {
+	{ .compatible = "ibm,ndfc", },
+	{}
 };
+MODULE_DEVICE_TABLE(of, ndfc_match);
 
-static struct platform_driver ndfc_nand_driver = {
-	.probe		= ndfc_nand_probe,
-	.remove		= ndfc_nand_remove,
-	.driver		= {
-		.name	= "ndfc-nand",
-		.owner	= THIS_MODULE,
+static struct of_platform_driver ndfc_driver = {
+	.driver = {
+		.name	= "ndfc",
 	},
+	.match_table = ndfc_match,
+	.probe = ndfc_probe,
+	.remove = __devexit_p(ndfc_remove),
 };
 
 static int __init ndfc_nand_init(void)
 {
-	int ret;
-
-	spin_lock_init(&ndfc_ctrl.ndfc_control.lock);
-	init_waitqueue_head(&ndfc_ctrl.ndfc_control.wq);
-
-	ret = platform_driver_register(&ndfc_nand_driver);
-	if (!ret)
-		ret = platform_driver_register(&ndfc_chip_driver);
-	return ret;
+	return of_register_platform_driver(&ndfc_driver);
 }
 
 static void __exit ndfc_nand_exit(void)
 {
-	platform_driver_unregister(&ndfc_chip_driver);
-	platform_driver_unregister(&ndfc_nand_driver);
+	of_unregister_platform_driver(&ndfc_driver);
 }
 
 module_init(ndfc_nand_init);
@@ -316,6 +315,4 @@
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
-MODULE_DESCRIPTION("Platform driver for NDFC");
-MODULE_ALIAS("platform:ndfc-chip");
-MODULE_ALIAS("platform:ndfc-nand");
+MODULE_DESCRIPTION("OF Platform driver for NDFC");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index fc41444..cc55cbc 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -298,7 +298,7 @@
 #define NDTR1_tAR(c)	(min((c), 15) << 0)
 
 /* convert nano-seconds to nand flash controller clock cycles */
-#define ns2cycle(ns, clk)	(int)(((ns) * (clk / 1000000) / 1000) + 1)
+#define ns2cycle(ns, clk)	(int)(((ns) * (clk / 1000000) / 1000) - 1)
 
 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
 				   const struct pxa3xx_nand_timing *t)
@@ -368,14 +368,14 @@
 		/* large block, 2 cycles for column address
 		 * row address starts from 3rd cycle
 		 */
-		info->ndcb1 |= (page_addr << 16) | (column & 0xffff);
+		info->ndcb1 |= page_addr << 16;
 		if (info->row_addr_cycles == 3)
 			info->ndcb2 = (page_addr >> 16) & 0xff;
 	} else
 		/* small block, 1 cycles for column address
 		 * row address starts from 2nd cycle
 		 */
-		info->ndcb1 = (page_addr << 8) | (column & 0xff);
+		info->ndcb1 = page_addr << 8;
 
 	if (cmd == cmdset->program)
 		info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 30a518e2..54ec754 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -2,6 +2,7 @@
  * drivers/mtd/nand/sharpsl.c
  *
  *  Copyright (C) 2004 Richard Purdie
+ *  Copyright (C) 2008 Dmitry Baryshkov
  *
  *  Based on Sharp's NAND driver sharp_sl.c
  *
@@ -19,22 +20,31 @@
 #include <linux/mtd/nand.h>
 #include <linux/mtd/nand_ecc.h>
 #include <linux/mtd/partitions.h>
+#include <linux/mtd/sharpsl.h>
 #include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
 #include <asm/io.h>
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
 
-static void __iomem *sharpsl_io_base;
-static int sharpsl_phys_base = 0x0C000000;
+struct sharpsl_nand {
+	struct mtd_info		mtd;
+	struct nand_chip	chip;
+
+	void __iomem		*io;
+};
+
+#define mtd_to_sharpsl(_mtd)	container_of(_mtd, struct sharpsl_nand, mtd)
 
 /* register offset */
-#define ECCLPLB	 	sharpsl_io_base+0x00	/* line parity 7 - 0 bit */
-#define ECCLPUB	 	sharpsl_io_base+0x04	/* line parity 15 - 8 bit */
-#define ECCCP	   	sharpsl_io_base+0x08	/* column parity 5 - 0 bit */
-#define ECCCNTR	 	sharpsl_io_base+0x0C	/* ECC byte counter */
-#define ECCCLRR	 	sharpsl_io_base+0x10	/* cleare ECC */
-#define FLASHIO	 	sharpsl_io_base+0x14	/* Flash I/O */
-#define FLASHCTL	sharpsl_io_base+0x18	/* Flash Control */
+#define ECCLPLB		0x00	/* line parity 7 - 0 bit */
+#define ECCLPUB		0x04	/* line parity 15 - 8 bit */
+#define ECCCP		0x08	/* column parity 5 - 0 bit */
+#define ECCCNTR		0x0C	/* ECC byte counter */
+#define ECCCLRR		0x10	/* cleare ECC */
+#define FLASHIO		0x14	/* Flash I/O */
+#define FLASHCTL	0x18	/* Flash Control */
 
 /* Flash control bit */
 #define FLRYBY		(1 << 5)
@@ -45,35 +55,6 @@
 #define FLCE0		(1 << 0)
 
 /*
- * MTD structure for SharpSL
- */
-static struct mtd_info *sharpsl_mtd = NULL;
-
-/*
- * Define partitions for flash device
- */
-#define DEFAULT_NUM_PARTITIONS 3
-
-static int nr_partitions;
-static struct mtd_partition sharpsl_nand_default_partition_info[] = {
-	{
-	 .name = "System Area",
-	 .offset = 0,
-	 .size = 7 * 1024 * 1024,
-	 },
-	{
-	 .name = "Root Filesystem",
-	 .offset = 7 * 1024 * 1024,
-	 .size = 30 * 1024 * 1024,
-	 },
-	{
-	 .name = "Home Filesystem",
-	 .offset = MTDPART_OFS_APPEND,
-	 .size = MTDPART_SIZ_FULL,
-	 },
-};
-
-/*
  *	hardware specific access to control-lines
  *	ctrl:
  *	NAND_CNE: bit 0 -> ! bit 0 & 4
@@ -84,6 +65,7 @@
 static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
 				   unsigned int ctrl)
 {
+	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
 	struct nand_chip *chip = mtd->priv;
 
 	if (ctrl & NAND_CTRL_CHANGE) {
@@ -93,103 +75,97 @@
 
 		bits ^= 0x11;
 
-		writeb((readb(FLASHCTL) & ~0x17) | bits, FLASHCTL);
+		writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL);
 	}
 
 	if (cmd != NAND_CMD_NONE)
 		writeb(cmd, chip->IO_ADDR_W);
 }
 
-static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-
-static struct nand_bbt_descr sharpsl_bbt = {
-	.options = 0,
-	.offs = 4,
-	.len = 2,
-	.pattern = scan_ff_pattern
-};
-
-static struct nand_bbt_descr sharpsl_akita_bbt = {
-	.options = 0,
-	.offs = 4,
-	.len = 1,
-	.pattern = scan_ff_pattern
-};
-
-static struct nand_ecclayout akita_oobinfo = {
-	.eccbytes = 24,
-	.eccpos = {
-		   0x5, 0x1, 0x2, 0x3, 0x6, 0x7, 0x15, 0x11,
-		   0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23,
-		   0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37},
-	.oobfree = {{0x08, 0x09}}
-};
-
 static int sharpsl_nand_dev_ready(struct mtd_info *mtd)
 {
-	return !((readb(FLASHCTL) & FLRYBY) == 0);
+	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+	return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
 }
 
 static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode)
 {
-	writeb(0, ECCCLRR);
+	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+	writeb(0, sharpsl->io + ECCCLRR);
 }
 
 static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code)
 {
-	ecc_code[0] = ~readb(ECCLPUB);
-	ecc_code[1] = ~readb(ECCLPLB);
-	ecc_code[2] = (~readb(ECCCP) << 2) | 0x03;
-	return readb(ECCCNTR) != 0;
+	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+	ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
+	ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
+	ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
+	return readb(sharpsl->io + ECCCNTR) != 0;
 }
 
 #ifdef CONFIG_MTD_PARTITIONS
-const char *part_probes[] = { "cmdlinepart", NULL };
+static const char *part_probes[] = { "cmdlinepart", NULL };
 #endif
 
 /*
  * Main initialization routine
  */
-static int __init sharpsl_nand_init(void)
+static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
 {
 	struct nand_chip *this;
+#ifdef CONFIG_MTD_PARTITIONS
 	struct mtd_partition *sharpsl_partition_info;
+	int nr_partitions;
+#endif
+	struct resource *r;
 	int err = 0;
+	struct sharpsl_nand *sharpsl;
+	struct sharpsl_nand_platform_data *data = pdev->dev.platform_data;
+
+	if (!data) {
+		dev_err(&pdev->dev, "no platform data!\n");
+		return -EINVAL;
+	}
 
 	/* Allocate memory for MTD device structure and private data */
-	sharpsl_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
-	if (!sharpsl_mtd) {
+	sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
+	if (!sharpsl) {
 		printk("Unable to allocate SharpSL NAND MTD device structure.\n");
 		return -ENOMEM;
 	}
 
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		dev_err(&pdev->dev, "no io memory resource defined!\n");
+		err = -ENODEV;
+		goto err_get_res;
+	}
+
 	/* map physical address */
-	sharpsl_io_base = ioremap(sharpsl_phys_base, 0x1000);
-	if (!sharpsl_io_base) {
+	sharpsl->io = ioremap(r->start, resource_size(r));
+	if (!sharpsl->io) {
 		printk("ioremap to access Sharp SL NAND chip failed\n");
-		kfree(sharpsl_mtd);
-		return -EIO;
+		err = -EIO;
+		goto err_ioremap;
 	}
 
 	/* Get pointer to private data */
-	this = (struct nand_chip *)(&sharpsl_mtd[1]);
-
-	/* Initialize structures */
-	memset(sharpsl_mtd, 0, sizeof(struct mtd_info));
-	memset(this, 0, sizeof(struct nand_chip));
+	this = (struct nand_chip *)(&sharpsl->chip);
 
 	/* Link the private data with the MTD structure */
-	sharpsl_mtd->priv = this;
-	sharpsl_mtd->owner = THIS_MODULE;
+	sharpsl->mtd.priv = this;
+	sharpsl->mtd.owner = THIS_MODULE;
+
+	platform_set_drvdata(pdev, sharpsl);
 
 	/*
 	 * PXA initialize
 	 */
-	writeb(readb(FLASHCTL) | FLWP, FLASHCTL);
+	writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
 
 	/* Set address of NAND IO lines */
-	this->IO_ADDR_R = FLASHIO;
-	this->IO_ADDR_W = FLASHIO;
+	this->IO_ADDR_R = sharpsl->io + FLASHIO;
+	this->IO_ADDR_W = sharpsl->io + FLASHIO;
 	/* Set address of hardware control function */
 	this->cmd_ctrl = sharpsl_nand_hwcontrol;
 	this->dev_ready = sharpsl_nand_dev_ready;
@@ -199,68 +175,89 @@
 	this->ecc.mode = NAND_ECC_HW;
 	this->ecc.size = 256;
 	this->ecc.bytes = 3;
-	this->badblock_pattern = &sharpsl_bbt;
-	if (machine_is_akita() || machine_is_borzoi()) {
-		this->badblock_pattern = &sharpsl_akita_bbt;
-		this->ecc.layout = &akita_oobinfo;
-	}
+	this->badblock_pattern = data->badblock_pattern;
+	this->ecc.layout = data->ecc_layout;
 	this->ecc.hwctl = sharpsl_nand_enable_hwecc;
 	this->ecc.calculate = sharpsl_nand_calculate_ecc;
 	this->ecc.correct = nand_correct_data;
 
 	/* Scan to find existence of the device */
-	err = nand_scan(sharpsl_mtd, 1);
-	if (err) {
-		iounmap(sharpsl_io_base);
-		kfree(sharpsl_mtd);
-		return err;
-	}
+	err = nand_scan(&sharpsl->mtd, 1);
+	if (err)
+		goto err_scan;
 
 	/* Register the partitions */
-	sharpsl_mtd->name = "sharpsl-nand";
-	nr_partitions = parse_mtd_partitions(sharpsl_mtd, part_probes, &sharpsl_partition_info, 0);
-
+	sharpsl->mtd.name = "sharpsl-nand";
+#ifdef CONFIG_MTD_PARTITIONS
+	nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
 	if (nr_partitions <= 0) {
-		nr_partitions = DEFAULT_NUM_PARTITIONS;
-		sharpsl_partition_info = sharpsl_nand_default_partition_info;
-		if (machine_is_poodle()) {
-			sharpsl_partition_info[1].size = 22 * 1024 * 1024;
-		} else if (machine_is_corgi() || machine_is_shepherd()) {
-			sharpsl_partition_info[1].size = 25 * 1024 * 1024;
-		} else if (machine_is_husky()) {
-			sharpsl_partition_info[1].size = 53 * 1024 * 1024;
-		} else if (machine_is_spitz()) {
-			sharpsl_partition_info[1].size = 5 * 1024 * 1024;
-		} else if (machine_is_akita()) {
-			sharpsl_partition_info[1].size = 58 * 1024 * 1024;
-		} else if (machine_is_borzoi()) {
-			sharpsl_partition_info[1].size = 32 * 1024 * 1024;
-		}
+		nr_partitions = data->nr_partitions;
+		sharpsl_partition_info = data->partitions;
 	}
 
-	add_mtd_partitions(sharpsl_mtd, sharpsl_partition_info, nr_partitions);
+	if (nr_partitions > 0)
+		err = add_mtd_partitions(&sharpsl->mtd, sharpsl_partition_info, nr_partitions);
+	else
+#endif
+	err = add_mtd_device(&sharpsl->mtd);
+	if (err)
+		goto err_add;
 
 	/* Return happy */
 	return 0;
-}
 
-module_init(sharpsl_nand_init);
+err_add:
+	nand_release(&sharpsl->mtd);
+
+err_scan:
+	platform_set_drvdata(pdev, NULL);
+	iounmap(sharpsl->io);
+err_ioremap:
+err_get_res:
+	kfree(sharpsl);
+	return err;
+}
 
 /*
  * Clean up routine
  */
-static void __exit sharpsl_nand_cleanup(void)
+static int __devexit sharpsl_nand_remove(struct platform_device *pdev)
 {
-	/* Release resources, unregister device */
-	nand_release(sharpsl_mtd);
+	struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
 
-	iounmap(sharpsl_io_base);
+	/* Release resources, unregister device */
+	nand_release(&sharpsl->mtd);
+
+	platform_set_drvdata(pdev, NULL);
+
+	iounmap(sharpsl->io);
 
 	/* Free the MTD device structure */
-	kfree(sharpsl_mtd);
+	kfree(sharpsl);
+
+	return 0;
 }
 
-module_exit(sharpsl_nand_cleanup);
+static struct platform_driver sharpsl_nand_driver = {
+	.driver = {
+		.name	= "sharpsl-nand",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= sharpsl_nand_probe,
+	.remove		= __devexit_p(sharpsl_nand_remove),
+};
+
+static int __init sharpsl_nand_init(void)
+{
+	return platform_driver_register(&sharpsl_nand_driver);
+}
+module_init(sharpsl_nand_init);
+
+static void __exit sharpsl_nand_exit(void)
+{
+	platform_driver_unregister(&sharpsl_nand_driver);
+}
+module_exit(sharpsl_nand_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 320b929..d1c4546 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -39,7 +39,7 @@
 	struct NFTLrecord *nftl;
 	unsigned long temp;
 
-	if (mtd->type != MTD_NANDFLASH)
+	if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
 		return;
 	/* OK, this is moderately ugly.  But probably safe.  Alternatives? */
 	if (memcmp(mtd->name, "DiskOnChip", 10))
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index ccc4f20..8b22b18 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -51,7 +51,7 @@
 	   the mtd device accordingly.  We could even get rid of
 	   nftl->EraseSize if there were any point in doing so. */
 	nftl->EraseSize = nftl->mbd.mtd->erasesize;
-        nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize;
+        nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
 
 	nftl->MediaUnit = BLOCK_NIL;
 	nftl->SpareMediaUnit = BLOCK_NIL;
@@ -168,7 +168,7 @@
 			printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n",
 			       mh->UnitSizeFactor);
 			nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor);
-			nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize;
+			nftl->nb_blocks = (u32)nftl->mbd.mtd->size / nftl->EraseSize;
 		}
 #endif
 		nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 90ed319..529af27 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1772,7 +1772,7 @@
 	int len;
 	int ret = 0;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len);
+	DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len);
 
 	block_size = (1 << this->erase_shift);
 
@@ -1810,7 +1810,7 @@
 
 		/* Check if we have a bad block, we do not erase bad blocks */
 		if (onenand_block_isbad_nolock(mtd, addr, 0)) {
-			printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%08x\n", (unsigned int) addr);
+			printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%012llx\n", (unsigned long long) addr);
 			instr->state = MTD_ERASE_FAILED;
 			goto erase_exit;
 		}
@@ -2029,7 +2029,7 @@
  *
  * Lock one or more blocks
  */
-static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	int ret;
 
@@ -2047,7 +2047,7 @@
  *
  * Unlock one or more blocks
  */
-static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
 	int ret;
 
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index e538c0a..d2aa9c46 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -21,8 +21,6 @@
 
 #include <asm/types.h>
 
-#define const_cpu_to_le16	__constant_cpu_to_le16
-
 static int block_size = 0;
 module_param(block_size, int, 0);
 MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
@@ -156,7 +154,7 @@
 	size_t retlen;
 
 	sectors_per_block = part->block_size / SECTOR_SIZE;
-	part->total_blocks = part->mbd.mtd->size / part->block_size;
+	part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
 
 	if (part->total_blocks < 2)
 		return -ENOENT;
@@ -276,16 +274,17 @@
 
 	part = (struct partition*)erase->priv;
 
-	i = erase->addr / part->block_size;
-	if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) {
-		printk(KERN_ERR PREFIX "erase callback for unknown offset %x "
-				"on '%s'\n", erase->addr, part->mbd.mtd->name);
+	i = (u32)erase->addr / part->block_size;
+	if (i >= part->total_blocks || part->blocks[i].offset != erase->addr ||
+	    erase->addr > UINT_MAX) {
+		printk(KERN_ERR PREFIX "erase callback for unknown offset %llx "
+				"on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name);
 		return;
 	}
 
 	if (erase->state != MTD_ERASE_DONE) {
-		printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', "
-				"state %d\n", erase->addr,
+		printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', "
+				"state %d\n", (unsigned long long)erase->addr,
 				part->mbd.mtd->name, erase->state);
 
 		part->blocks[i].state = BLOCK_FAILED;
@@ -297,7 +296,7 @@
 		return;
 	}
 
-	magic = const_cpu_to_le16(RFD_MAGIC);
+	magic = cpu_to_le16(RFD_MAGIC);
 
 	part->blocks[i].state = BLOCK_ERASED;
 	part->blocks[i].free_sectors = part->data_sectors_per_block;
@@ -345,9 +344,9 @@
 	rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
 
 	if (rc) {
-		printk(KERN_ERR PREFIX "erase of region %x,%x on '%s' "
-				"failed\n", erase->addr, erase->len,
-				part->mbd.mtd->name);
+		printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
+				"failed\n", (unsigned long long)erase->addr,
+				(unsigned long long)erase->len, part->mbd.mtd->name);
 		kfree(erase);
 	}
 
@@ -587,7 +586,7 @@
 	int block, offset, rc;
 	u_long addr;
 	size_t retlen;
-	u16 del = const_cpu_to_le16(SECTOR_DELETED);
+	u16 del = cpu_to_le16(SECTOR_DELETED);
 
 	block = old_addr / part->block_size;
 	offset = (old_addr % part->block_size) / SECTOR_SIZE -
@@ -763,7 +762,7 @@
 {
 	struct partition *part;
 
-	if (mtd->type != MTD_NORFLASH)
+	if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
 		return;
 
 	part = kzalloc(sizeof(struct partition), GFP_KERNEL);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 33a5d6e..3f67e00 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -294,7 +294,8 @@
 	int cis_sector;
 
 	/* Check for small page NAND flash */
-	if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE)
+	if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE ||
+	    mtd->size > UINT_MAX)
 		return;
 
 	/* Check for SSDFC format by reading CIS/IDI sector */
@@ -316,7 +317,7 @@
 
 	ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT);
 	ssfdc->erase_size = mtd->erasesize;
-	ssfdc->map_len = mtd->size / mtd->erasesize;
+	ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
 
 	DEBUG(MTD_DEBUG_LEVEL1,
 		"SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
@@ -327,7 +328,7 @@
 	ssfdc->heads = 16;
 	ssfdc->sectors = 32;
 	get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors);
-	ssfdc->cylinders = (unsigned short)((mtd->size >> SECTOR_SHIFT) /
+	ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
 			((long)ssfdc->sectors * (long)ssfdc->heads));
 
 	DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
new file mode 100644
index 0000000..c1d5013
--- /dev/null
+++ b/drivers/mtd/tests/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
new file mode 100644
index 0000000..afbc3f8
--- /dev/null
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -0,0 +1,742 @@
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Test OOB read and write on MTD device.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#include <asm/div64.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+
+#define PRINT_PREF KERN_INFO "mtd_oobtest: "
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *readbuf;
+static unsigned char *writebuf;
+static unsigned char *bbt;
+
+static int ebcnt;
+static int pgcnt;
+static int errcnt;
+static int use_offset;
+static int use_len;
+static int use_len_max;
+static int vary_offset;
+static unsigned long next = 1;
+
+static inline unsigned int simple_rand(void)
+{
+	next = next * 1103515245 + 12345;
+	return (unsigned int)((next / 65536) % 32768);
+}
+
+static inline void simple_srand(unsigned long seed)
+{
+	next = seed;
+}
+
+static void set_random_data(unsigned char *buf, size_t len)
+{
+	size_t i;
+
+	for (i = 0; i < len; ++i)
+		buf[i] = simple_rand();
+}
+
+static int erase_eraseblock(int ebnum)
+{
+	int err;
+	struct erase_info ei;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	memset(&ei, 0, sizeof(struct erase_info));
+	ei.mtd  = mtd;
+	ei.addr = addr;
+	ei.len  = mtd->erasesize;
+
+	err = mtd->erase(mtd, &ei);
+	if (err) {
+		printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+		return err;
+	}
+
+	if (ei.state == MTD_ERASE_FAILED) {
+		printk(PRINT_PREF "some erase error occurred at EB %d\n",
+		       ebnum);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int erase_whole_device(void)
+{
+	int err;
+	unsigned int i;
+
+	printk(PRINT_PREF "erasing whole device\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = erase_eraseblock(i);
+		if (err)
+			return err;
+		cond_resched();
+	}
+	printk(PRINT_PREF "erased %u eraseblocks\n", i);
+	return 0;
+}
+
+static void do_vary_offset(void)
+{
+	use_len -= 1;
+	if (use_len < 1) {
+		use_offset += 1;
+		if (use_offset >= use_len_max)
+			use_offset = 0;
+		use_len = use_len_max - use_offset;
+	}
+}
+
+static int write_eraseblock(int ebnum)
+{
+	int i;
+	struct mtd_oob_ops ops;
+	int err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
+		set_random_data(writebuf, use_len);
+		ops.mode      = MTD_OOB_AUTO;
+		ops.len       = 0;
+		ops.retlen    = 0;
+		ops.ooblen    = use_len;
+		ops.oobretlen = 0;
+		ops.ooboffs   = use_offset;
+		ops.datbuf    = 0;
+		ops.oobbuf    = writebuf;
+		err = mtd->write_oob(mtd, addr, &ops);
+		if (err || ops.oobretlen != use_len) {
+			printk(PRINT_PREF "error: writeoob failed at %#llx\n",
+			       (long long)addr);
+			printk(PRINT_PREF "error: use_len %d, use_offset %d\n",
+			       use_len, use_offset);
+			errcnt += 1;
+			return err ? err : -1;
+		}
+		if (vary_offset)
+			do_vary_offset();
+	}
+
+	return err;
+}
+
+static int write_whole_device(void)
+{
+	int err;
+	unsigned int i;
+
+	printk(PRINT_PREF "writing OOBs of whole device\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = write_eraseblock(i);
+		if (err)
+			return err;
+		if (i % 256 == 0)
+			printk(PRINT_PREF "written up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "written %u eraseblocks\n", i);
+	return 0;
+}
+
+static int verify_eraseblock(int ebnum)
+{
+	int i;
+	struct mtd_oob_ops ops;
+	int err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
+		set_random_data(writebuf, use_len);
+		ops.mode      = MTD_OOB_AUTO;
+		ops.len       = 0;
+		ops.retlen    = 0;
+		ops.ooblen    = use_len;
+		ops.oobretlen = 0;
+		ops.ooboffs   = use_offset;
+		ops.datbuf    = 0;
+		ops.oobbuf    = readbuf;
+		err = mtd->read_oob(mtd, addr, &ops);
+		if (err || ops.oobretlen != use_len) {
+			printk(PRINT_PREF "error: readoob failed at %#llx\n",
+			       (long long)addr);
+			errcnt += 1;
+			return err ? err : -1;
+		}
+		if (memcmp(readbuf, writebuf, use_len)) {
+			printk(PRINT_PREF "error: verify failed at %#llx\n",
+			       (long long)addr);
+			errcnt += 1;
+			if (errcnt > 1000) {
+				printk(PRINT_PREF "error: too many errors\n");
+				return -1;
+			}
+		}
+		if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
+			int k;
+
+			ops.mode      = MTD_OOB_AUTO;
+			ops.len       = 0;
+			ops.retlen    = 0;
+			ops.ooblen    = mtd->ecclayout->oobavail;
+			ops.oobretlen = 0;
+			ops.ooboffs   = 0;
+			ops.datbuf    = 0;
+			ops.oobbuf    = readbuf;
+			err = mtd->read_oob(mtd, addr, &ops);
+			if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
+				printk(PRINT_PREF "error: readoob failed at "
+				       "%#llx\n", (long long)addr);
+				errcnt += 1;
+				return err ? err : -1;
+			}
+			if (memcmp(readbuf + use_offset, writebuf, use_len)) {
+				printk(PRINT_PREF "error: verify failed at "
+				       "%#llx\n", (long long)addr);
+				errcnt += 1;
+				if (errcnt > 1000) {
+					printk(PRINT_PREF "error: too many "
+					       "errors\n");
+					return -1;
+				}
+			}
+			for (k = 0; k < use_offset; ++k)
+				if (readbuf[k] != 0xff) {
+					printk(PRINT_PREF "error: verify 0xff "
+					       "failed at %#llx\n",
+					       (long long)addr);
+					errcnt += 1;
+					if (errcnt > 1000) {
+						printk(PRINT_PREF "error: too "
+						       "many errors\n");
+						return -1;
+					}
+				}
+			for (k = use_offset + use_len;
+			     k < mtd->ecclayout->oobavail; ++k)
+				if (readbuf[k] != 0xff) {
+					printk(PRINT_PREF "error: verify 0xff "
+					       "failed at %#llx\n",
+					       (long long)addr);
+					errcnt += 1;
+					if (errcnt > 1000) {
+						printk(PRINT_PREF "error: too "
+						       "many errors\n");
+						return -1;
+					}
+				}
+		}
+		if (vary_offset)
+			do_vary_offset();
+	}
+	return err;
+}
+
+static int verify_eraseblock_in_one_go(int ebnum)
+{
+	struct mtd_oob_ops ops;
+	int err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+	size_t len = mtd->ecclayout->oobavail * pgcnt;
+
+	set_random_data(writebuf, len);
+	ops.mode      = MTD_OOB_AUTO;
+	ops.len       = 0;
+	ops.retlen    = 0;
+	ops.ooblen    = len;
+	ops.oobretlen = 0;
+	ops.ooboffs   = 0;
+	ops.datbuf    = 0;
+	ops.oobbuf    = readbuf;
+	err = mtd->read_oob(mtd, addr, &ops);
+	if (err || ops.oobretlen != len) {
+		printk(PRINT_PREF "error: readoob failed at %#llx\n",
+		       (long long)addr);
+		errcnt += 1;
+		return err ? err : -1;
+	}
+	if (memcmp(readbuf, writebuf, len)) {
+		printk(PRINT_PREF "error: verify failed at %#llx\n",
+		       (long long)addr);
+		errcnt += 1;
+		if (errcnt > 1000) {
+			printk(PRINT_PREF "error: too many errors\n");
+			return -1;
+		}
+	}
+
+	return err;
+}
+
+static int verify_all_eraseblocks(void)
+{
+	int err;
+	unsigned int i;
+
+	printk(PRINT_PREF "verifying all eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = verify_eraseblock(i);
+		if (err)
+			return err;
+		if (i % 256 == 0)
+			printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "verified %u eraseblocks\n", i);
+	return 0;
+}
+
+static int is_block_bad(int ebnum)
+{
+	int ret;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	ret = mtd->block_isbad(mtd, addr);
+	if (ret)
+		printk(PRINT_PREF "block %d is bad\n", ebnum);
+	return ret;
+}
+
+static int scan_for_bad_eraseblocks(void)
+{
+	int i, bad = 0;
+
+	bbt = kmalloc(ebcnt, GFP_KERNEL);
+	if (!bbt) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		return -ENOMEM;
+	}
+	memset(bbt, 0 , ebcnt);
+
+	printk(PRINT_PREF "scanning for bad eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		bbt[i] = is_block_bad(i) ? 1 : 0;
+		if (bbt[i])
+			bad += 1;
+		cond_resched();
+	}
+	printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+	return 0;
+}
+
+static int __init mtd_oobtest_init(void)
+{
+	int err = 0;
+	unsigned int i;
+	uint64_t tmp;
+	struct mtd_oob_ops ops;
+	loff_t addr = 0, addr0;
+
+	printk(KERN_INFO "\n");
+	printk(KERN_INFO "=================================================\n");
+	printk(PRINT_PREF "MTD device: %d\n", dev);
+
+	mtd = get_mtd_device(NULL, dev);
+	if (IS_ERR(mtd)) {
+		err = PTR_ERR(mtd);
+		printk(PRINT_PREF "error: cannot get MTD device\n");
+		return err;
+	}
+
+	if (mtd->type != MTD_NANDFLASH) {
+		printk(PRINT_PREF "this test requires NAND flash\n");
+		goto out;
+	}
+
+	tmp = mtd->size;
+	do_div(tmp, mtd->erasesize);
+	ebcnt = tmp;
+	pgcnt = mtd->erasesize / mtd->writesize;
+
+	printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+	       "page size %u, count of eraseblocks %u, pages per "
+	       "eraseblock %u, OOB size %u\n",
+	       (unsigned long long)mtd->size, mtd->erasesize,
+	       mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
+
+	err = -ENOMEM;
+	mtd->erasesize = mtd->erasesize;
+	readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+	if (!readbuf) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out;
+	}
+	writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+	if (!writebuf) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out;
+	}
+
+	err = scan_for_bad_eraseblocks();
+	if (err)
+		goto out;
+
+	use_offset = 0;
+	use_len = mtd->ecclayout->oobavail;
+	use_len_max = mtd->ecclayout->oobavail;
+	vary_offset = 0;
+
+	/* First test: write all OOB, read it back and verify */
+	printk(PRINT_PREF "test 1 of 5\n");
+
+	err = erase_whole_device();
+	if (err)
+		goto out;
+
+	simple_srand(1);
+	err = write_whole_device();
+	if (err)
+		goto out;
+
+	simple_srand(1);
+	err = verify_all_eraseblocks();
+	if (err)
+		goto out;
+
+	/*
+	 * Second test: write all OOB, a block at a time, read it back and
+	 * verify.
+	 */
+	printk(PRINT_PREF "test 2 of 5\n");
+
+	err = erase_whole_device();
+	if (err)
+		goto out;
+
+	simple_srand(3);
+	err = write_whole_device();
+	if (err)
+		goto out;
+
+	/* Check all eraseblocks */
+	simple_srand(3);
+	printk(PRINT_PREF "verifying all eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = verify_eraseblock_in_one_go(i);
+		if (err)
+			goto out;
+		if (i % 256 == 0)
+			printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "verified %u eraseblocks\n", i);
+
+	/*
+	 * Third test: write OOB at varying offsets and lengths, read it back
+	 * and verify.
+	 */
+	printk(PRINT_PREF "test 3 of 5\n");
+
+	err = erase_whole_device();
+	if (err)
+		goto out;
+
+	/* Write all eraseblocks */
+	use_offset = 0;
+	use_len = mtd->ecclayout->oobavail;
+	use_len_max = mtd->ecclayout->oobavail;
+	vary_offset = 1;
+	simple_srand(5);
+	printk(PRINT_PREF "writing OOBs of whole device\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = write_eraseblock(i);
+		if (err)
+			goto out;
+		if (i % 256 == 0)
+			printk(PRINT_PREF "written up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "written %u eraseblocks\n", i);
+
+	/* Check all eraseblocks */
+	use_offset = 0;
+	use_len = mtd->ecclayout->oobavail;
+	use_len_max = mtd->ecclayout->oobavail;
+	vary_offset = 1;
+	simple_srand(5);
+	err = verify_all_eraseblocks();
+	if (err)
+		goto out;
+
+	use_offset = 0;
+	use_len = mtd->ecclayout->oobavail;
+	use_len_max = mtd->ecclayout->oobavail;
+	vary_offset = 0;
+
+	/* Fourth test: try to write off end of device */
+	printk(PRINT_PREF "test 4 of 5\n");
+
+	err = erase_whole_device();
+	if (err)
+		goto out;
+
+	addr0 = 0;
+	for (i = 0; bbt[i] && i < ebcnt; ++i)
+		addr0 += mtd->erasesize;
+
+	/* Attempt to write off end of OOB */
+	ops.mode      = MTD_OOB_AUTO;
+	ops.len       = 0;
+	ops.retlen    = 0;
+	ops.ooblen    = 1;
+	ops.oobretlen = 0;
+	ops.ooboffs   = mtd->ecclayout->oobavail;
+	ops.datbuf    = 0;
+	ops.oobbuf    = writebuf;
+	printk(PRINT_PREF "attempting to start write past end of OOB\n");
+	printk(PRINT_PREF "an error is expected...\n");
+	err = mtd->write_oob(mtd, addr0, &ops);
+	if (err) {
+		printk(PRINT_PREF "error occurred as expected\n");
+		err = 0;
+	} else {
+		printk(PRINT_PREF "error: can write past end of OOB\n");
+		errcnt += 1;
+	}
+
+	/* Attempt to read off end of OOB */
+	ops.mode      = MTD_OOB_AUTO;
+	ops.len       = 0;
+	ops.retlen    = 0;
+	ops.ooblen    = 1;
+	ops.oobretlen = 0;
+	ops.ooboffs   = mtd->ecclayout->oobavail;
+	ops.datbuf    = 0;
+	ops.oobbuf    = readbuf;
+	printk(PRINT_PREF "attempting to start read past end of OOB\n");
+	printk(PRINT_PREF "an error is expected...\n");
+	err = mtd->read_oob(mtd, addr0, &ops);
+	if (err) {
+		printk(PRINT_PREF "error occurred as expected\n");
+		err = 0;
+	} else {
+		printk(PRINT_PREF "error: can read past end of OOB\n");
+		errcnt += 1;
+	}
+
+	if (bbt[ebcnt - 1])
+		printk(PRINT_PREF "skipping end of device tests because last "
+		       "block is bad\n");
+	else {
+		/* Attempt to write off end of device */
+		ops.mode      = MTD_OOB_AUTO;
+		ops.len       = 0;
+		ops.retlen    = 0;
+		ops.ooblen    = mtd->ecclayout->oobavail + 1;
+		ops.oobretlen = 0;
+		ops.ooboffs   = 0;
+		ops.datbuf    = 0;
+		ops.oobbuf    = writebuf;
+		printk(PRINT_PREF "attempting to write past end of device\n");
+		printk(PRINT_PREF "an error is expected...\n");
+		err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+		if (err) {
+			printk(PRINT_PREF "error occurred as expected\n");
+			err = 0;
+		} else {
+			printk(PRINT_PREF "error: wrote past end of device\n");
+			errcnt += 1;
+		}
+
+		/* Attempt to read off end of device */
+		ops.mode      = MTD_OOB_AUTO;
+		ops.len       = 0;
+		ops.retlen    = 0;
+		ops.ooblen    = mtd->ecclayout->oobavail + 1;
+		ops.oobretlen = 0;
+		ops.ooboffs   = 0;
+		ops.datbuf    = 0;
+		ops.oobbuf    = readbuf;
+		printk(PRINT_PREF "attempting to read past end of device\n");
+		printk(PRINT_PREF "an error is expected...\n");
+		err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+		if (err) {
+			printk(PRINT_PREF "error occurred as expected\n");
+			err = 0;
+		} else {
+			printk(PRINT_PREF "error: read past end of device\n");
+			errcnt += 1;
+		}
+
+		err = erase_eraseblock(ebcnt - 1);
+		if (err)
+			goto out;
+
+		/* Attempt to write off end of device */
+		ops.mode      = MTD_OOB_AUTO;
+		ops.len       = 0;
+		ops.retlen    = 0;
+		ops.ooblen    = mtd->ecclayout->oobavail;
+		ops.oobretlen = 0;
+		ops.ooboffs   = 1;
+		ops.datbuf    = 0;
+		ops.oobbuf    = writebuf;
+		printk(PRINT_PREF "attempting to write past end of device\n");
+		printk(PRINT_PREF "an error is expected...\n");
+		err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+		if (err) {
+			printk(PRINT_PREF "error occurred as expected\n");
+			err = 0;
+		} else {
+			printk(PRINT_PREF "error: wrote past end of device\n");
+			errcnt += 1;
+		}
+
+		/* Attempt to read off end of device */
+		ops.mode      = MTD_OOB_AUTO;
+		ops.len       = 0;
+		ops.retlen    = 0;
+		ops.ooblen    = mtd->ecclayout->oobavail;
+		ops.oobretlen = 0;
+		ops.ooboffs   = 1;
+		ops.datbuf    = 0;
+		ops.oobbuf    = readbuf;
+		printk(PRINT_PREF "attempting to read past end of device\n");
+		printk(PRINT_PREF "an error is expected...\n");
+		err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+		if (err) {
+			printk(PRINT_PREF "error occurred as expected\n");
+			err = 0;
+		} else {
+			printk(PRINT_PREF "error: read past end of device\n");
+			errcnt += 1;
+		}
+	}
+
+	/* Fifth test: write / read across block boundaries */
+	printk(PRINT_PREF "test 5 of 5\n");
+
+	/* Erase all eraseblocks */
+	err = erase_whole_device();
+	if (err)
+		goto out;
+
+	/* Write all eraseblocks */
+	simple_srand(11);
+	printk(PRINT_PREF "writing OOBs of whole device\n");
+	for (i = 0; i < ebcnt - 1; ++i) {
+		int cnt = 2;
+		int pg;
+		size_t sz = mtd->ecclayout->oobavail;
+		if (bbt[i] || bbt[i + 1])
+			continue;
+		addr = (i + 1) * mtd->erasesize - mtd->writesize;
+		for (pg = 0; pg < cnt; ++pg) {
+			set_random_data(writebuf, sz);
+			ops.mode      = MTD_OOB_AUTO;
+			ops.len       = 0;
+			ops.retlen    = 0;
+			ops.ooblen    = sz;
+			ops.oobretlen = 0;
+			ops.ooboffs   = 0;
+			ops.datbuf    = 0;
+			ops.oobbuf    = writebuf;
+			err = mtd->write_oob(mtd, addr, &ops);
+			if (err)
+				goto out;
+			if (i % 256 == 0)
+				printk(PRINT_PREF "written up to eraseblock "
+				       "%u\n", i);
+			cond_resched();
+			addr += mtd->writesize;
+		}
+	}
+	printk(PRINT_PREF "written %u eraseblocks\n", i);
+
+	/* Check all eraseblocks */
+	simple_srand(11);
+	printk(PRINT_PREF "verifying all eraseblocks\n");
+	for (i = 0; i < ebcnt - 1; ++i) {
+		if (bbt[i] || bbt[i + 1])
+			continue;
+		set_random_data(writebuf, mtd->ecclayout->oobavail * 2);
+		addr = (i + 1) * mtd->erasesize - mtd->writesize;
+		ops.mode      = MTD_OOB_AUTO;
+		ops.len       = 0;
+		ops.retlen    = 0;
+		ops.ooblen    = mtd->ecclayout->oobavail * 2;
+		ops.oobretlen = 0;
+		ops.ooboffs   = 0;
+		ops.datbuf    = 0;
+		ops.oobbuf    = readbuf;
+		err = mtd->read_oob(mtd, addr, &ops);
+		if (err)
+			goto out;
+		if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
+			printk(PRINT_PREF "error: verify failed at %#llx\n",
+			       (long long)addr);
+			errcnt += 1;
+			if (errcnt > 1000) {
+				printk(PRINT_PREF "error: too many errors\n");
+				goto out;
+			}
+		}
+		if (i % 256 == 0)
+			printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "verified %u eraseblocks\n", i);
+
+	printk(PRINT_PREF "finished with %d errors\n", errcnt);
+out:
+	kfree(bbt);
+	kfree(writebuf);
+	kfree(readbuf);
+	put_mtd_device(mtd);
+	if (err)
+		printk(PRINT_PREF "error %d occurred\n", err);
+	printk(KERN_INFO "=================================================\n");
+	return err;
+}
+module_init(mtd_oobtest_init);
+
+static void __exit mtd_oobtest_exit(void)
+{
+	return;
+}
+module_exit(mtd_oobtest_exit);
+
+MODULE_DESCRIPTION("Out-of-band test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
new file mode 100644
index 0000000..9648818
--- /dev/null
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -0,0 +1,632 @@
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Test page read and write on MTD device.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#include <asm/div64.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+
+#define PRINT_PREF KERN_INFO "mtd_pagetest: "
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *twopages;
+static unsigned char *writebuf;
+static unsigned char *boundary;
+static unsigned char *bbt;
+
+static int pgsize;
+static int bufsize;
+static int ebcnt;
+static int pgcnt;
+static int errcnt;
+static unsigned long next = 1;
+
+static inline unsigned int simple_rand(void)
+{
+	next = next * 1103515245 + 12345;
+	return (unsigned int)((next / 65536) % 32768);
+}
+
+static inline void simple_srand(unsigned long seed)
+{
+	next = seed;
+}
+
+static void set_random_data(unsigned char *buf, size_t len)
+{
+	size_t i;
+
+	for (i = 0; i < len; ++i)
+		buf[i] = simple_rand();
+}
+
+static int erase_eraseblock(int ebnum)
+{
+	int err;
+	struct erase_info ei;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	memset(&ei, 0, sizeof(struct erase_info));
+	ei.mtd  = mtd;
+	ei.addr = addr;
+	ei.len  = mtd->erasesize;
+
+	err = mtd->erase(mtd, &ei);
+	if (err) {
+		printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+		return err;
+	}
+
+	if (ei.state == MTD_ERASE_FAILED) {
+		printk(PRINT_PREF "some erase error occurred at EB %d\n",
+		       ebnum);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int write_eraseblock(int ebnum)
+{
+	int err = 0;
+	size_t written = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	set_random_data(writebuf, mtd->erasesize);
+	cond_resched();
+	err = mtd->write(mtd, addr, mtd->erasesize, &written, writebuf);
+	if (err || written != mtd->erasesize)
+		printk(PRINT_PREF "error: write failed at %#llx\n",
+		       (long long)addr);
+
+	return err;
+}
+
+static int verify_eraseblock(int ebnum)
+{
+	uint32_t j;
+	size_t read = 0;
+	int err = 0, i;
+	loff_t addr0, addrn;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	addr0 = 0;
+	for (i = 0; bbt[i] && i < ebcnt; ++i)
+		addr0 += mtd->erasesize;
+
+	addrn = mtd->size;
+	for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i)
+		addrn -= mtd->erasesize;
+
+	set_random_data(writebuf, mtd->erasesize);
+	for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
+		/* Do a read to set the internal dataRAMs to different data */
+		err = mtd->read(mtd, addr0, bufsize, &read, twopages);
+		if (err == -EUCLEAN)
+			err = 0;
+		if (err || read != bufsize) {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       (long long)addr0);
+			return err;
+		}
+		err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
+		if (err == -EUCLEAN)
+			err = 0;
+		if (err || read != bufsize) {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       (long long)(addrn - bufsize));
+			return err;
+		}
+		memset(twopages, 0, bufsize);
+		read = 0;
+		err = mtd->read(mtd, addr, bufsize, &read, twopages);
+		if (err == -EUCLEAN)
+			err = 0;
+		if (err || read != bufsize) {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       (long long)addr);
+			break;
+		}
+		if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
+			printk(PRINT_PREF "error: verify failed at %#llx\n",
+			       (long long)addr);
+			errcnt += 1;
+		}
+	}
+	/* Check boundary between eraseblocks */
+	if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
+		unsigned long oldnext = next;
+		/* Do a read to set the internal dataRAMs to different data */
+		err = mtd->read(mtd, addr0, bufsize, &read, twopages);
+		if (err == -EUCLEAN)
+			err = 0;
+		if (err || read != bufsize) {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       (long long)addr0);
+			return err;
+		}
+		err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
+		if (err == -EUCLEAN)
+			err = 0;
+		if (err || read != bufsize) {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       (long long)(addrn - bufsize));
+			return err;
+		}
+		memset(twopages, 0, bufsize);
+		read = 0;
+		err = mtd->read(mtd, addr, bufsize, &read, twopages);
+		if (err == -EUCLEAN)
+			err = 0;
+		if (err || read != bufsize) {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       (long long)addr);
+			return err;
+		}
+		memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
+		set_random_data(boundary + pgsize, pgsize);
+		if (memcmp(twopages, boundary, bufsize)) {
+			printk(PRINT_PREF "error: verify failed at %#llx\n",
+			       (long long)addr);
+			errcnt += 1;
+		}
+		next = oldnext;
+	}
+	return err;
+}
+
+static int crosstest(void)
+{
+	size_t read = 0;
+	int err = 0, i;
+	loff_t addr, addr0, addrn;
+	unsigned char *pp1, *pp2, *pp3, *pp4;
+
+	printk(PRINT_PREF "crosstest\n");
+	pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
+	if (!pp1) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		return -ENOMEM;
+	}
+	pp2 = pp1 + pgsize;
+	pp3 = pp2 + pgsize;
+	pp4 = pp3 + pgsize;
+	memset(pp1, 0, pgsize * 4);
+
+	addr0 = 0;
+	for (i = 0; bbt[i] && i < ebcnt; ++i)
+		addr0 += mtd->erasesize;
+
+	addrn = mtd->size;
+	for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i)
+		addrn -= mtd->erasesize;
+
+	/* Read 2nd-to-last page to pp1 */
+	read = 0;
+	addr = addrn - pgsize - pgsize;
+	err = mtd->read(mtd, addr, pgsize, &read, pp1);
+	if (err == -EUCLEAN)
+		err = 0;
+	if (err || read != pgsize) {
+		printk(PRINT_PREF "error: read failed at %#llx\n",
+		       (long long)addr);
+		kfree(pp1);
+		return err;
+	}
+
+	/* Read 3rd-to-last page to pp1 */
+	read = 0;
+	addr = addrn - pgsize - pgsize - pgsize;
+	err = mtd->read(mtd, addr, pgsize, &read, pp1);
+	if (err == -EUCLEAN)
+		err = 0;
+	if (err || read != pgsize) {
+		printk(PRINT_PREF "error: read failed at %#llx\n",
+		       (long long)addr);
+		kfree(pp1);
+		return err;
+	}
+
+	/* Read first page to pp2 */
+	read = 0;
+	addr = addr0;
+	printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+	err = mtd->read(mtd, addr, pgsize, &read, pp2);
+	if (err == -EUCLEAN)
+		err = 0;
+	if (err || read != pgsize) {
+		printk(PRINT_PREF "error: read failed at %#llx\n",
+		       (long long)addr);
+		kfree(pp1);
+		return err;
+	}
+
+	/* Read last page to pp3 */
+	read = 0;
+	addr = addrn - pgsize;
+	printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+	err = mtd->read(mtd, addr, pgsize, &read, pp3);
+	if (err == -EUCLEAN)
+		err = 0;
+	if (err || read != pgsize) {
+		printk(PRINT_PREF "error: read failed at %#llx\n",
+		       (long long)addr);
+		kfree(pp1);
+		return err;
+	}
+
+	/* Read first page again to pp4 */
+	read = 0;
+	addr = addr0;
+	printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
+	err = mtd->read(mtd, addr, pgsize, &read, pp4);
+	if (err == -EUCLEAN)
+		err = 0;
+	if (err || read != pgsize) {
+		printk(PRINT_PREF "error: read failed at %#llx\n",
+		       (long long)addr);
+		kfree(pp1);
+		return err;
+	}
+
+	/* pp2 and pp4 should be the same */
+	printk(PRINT_PREF "verifying pages read at %#llx match\n",
+	       (long long)addr0);
+	if (memcmp(pp2, pp4, pgsize)) {
+		printk(PRINT_PREF "verify failed!\n");
+		errcnt += 1;
+	} else if (!err)
+		printk(PRINT_PREF "crosstest ok\n");
+	kfree(pp1);
+	return err;
+}
+
+static int erasecrosstest(void)
+{
+	size_t read = 0, written = 0;
+	int err = 0, i, ebnum, ok = 1, ebnum2;
+	loff_t addr0;
+	char *readbuf = twopages;
+
+	printk(PRINT_PREF "erasecrosstest\n");
+
+	ebnum = 0;
+	addr0 = 0;
+	for (i = 0; bbt[i] && i < ebcnt; ++i) {
+		addr0 += mtd->erasesize;
+		ebnum += 1;
+	}
+
+	ebnum2 = ebcnt - 1;
+	while (ebnum2 && bbt[ebnum2])
+		ebnum2 -= 1;
+
+	printk(PRINT_PREF "erasing block %d\n", ebnum);
+	err = erase_eraseblock(ebnum);
+	if (err)
+		return err;
+
+	printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+	set_random_data(writebuf, pgsize);
+	strcpy(writebuf, "There is no data like this!");
+	err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+	if (err || written != pgsize) {
+		printk(PRINT_PREF "error: write failed at %#llx\n",
+		       (long long)addr0);
+		return err ? err : -1;
+	}
+
+	printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+	memset(readbuf, 0, pgsize);
+	err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
+	if (err == -EUCLEAN)
+		err = 0;
+	if (err || read != pgsize) {
+		printk(PRINT_PREF "error: read failed at %#llx\n",
+		       (long long)addr0);
+		return err ? err : -1;
+	}
+
+	printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
+	if (memcmp(writebuf, readbuf, pgsize)) {
+		printk(PRINT_PREF "verify failed!\n");
+		errcnt += 1;
+		ok = 0;
+		return err;
+	}
+
+	printk(PRINT_PREF "erasing block %d\n", ebnum);
+	err = erase_eraseblock(ebnum);
+	if (err)
+		return err;
+
+	printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+	set_random_data(writebuf, pgsize);
+	strcpy(writebuf, "There is no data like this!");
+	err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+	if (err || written != pgsize) {
+		printk(PRINT_PREF "error: write failed at %#llx\n",
+		       (long long)addr0);
+		return err ? err : -1;
+	}
+
+	printk(PRINT_PREF "erasing block %d\n", ebnum2);
+	err = erase_eraseblock(ebnum2);
+	if (err)
+		return err;
+
+	printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+	memset(readbuf, 0, pgsize);
+	err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
+	if (err == -EUCLEAN)
+		err = 0;
+	if (err || read != pgsize) {
+		printk(PRINT_PREF "error: read failed at %#llx\n",
+		       (long long)addr0);
+		return err ? err : -1;
+	}
+
+	printk(PRINT_PREF "verifying 1st page of block %d\n", ebnum);
+	if (memcmp(writebuf, readbuf, pgsize)) {
+		printk(PRINT_PREF "verify failed!\n");
+		errcnt += 1;
+		ok = 0;
+	}
+
+	if (ok && !err)
+		printk(PRINT_PREF "erasecrosstest ok\n");
+	return err;
+}
+
+static int erasetest(void)
+{
+	size_t read = 0, written = 0;
+	int err = 0, i, ebnum, ok = 1;
+	loff_t addr0;
+
+	printk(PRINT_PREF "erasetest\n");
+
+	ebnum = 0;
+	addr0 = 0;
+	for (i = 0; bbt[i] && i < ebcnt; ++i) {
+		addr0 += mtd->erasesize;
+		ebnum += 1;
+	}
+
+	printk(PRINT_PREF "erasing block %d\n", ebnum);
+	err = erase_eraseblock(ebnum);
+	if (err)
+		return err;
+
+	printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
+	set_random_data(writebuf, pgsize);
+	err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+	if (err || written != pgsize) {
+		printk(PRINT_PREF "error: write failed at %#llx\n",
+		       (long long)addr0);
+		return err ? err : -1;
+	}
+
+	printk(PRINT_PREF "erasing block %d\n", ebnum);
+	err = erase_eraseblock(ebnum);
+	if (err)
+		return err;
+
+	printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
+	err = mtd->read(mtd, addr0, pgsize, &read, twopages);
+	if (err == -EUCLEAN)
+		err = 0;
+	if (err || read != pgsize) {
+		printk(PRINT_PREF "error: read failed at %#llx\n",
+		       (long long)addr0);
+		return err ? err : -1;
+	}
+
+	printk(PRINT_PREF "verifying 1st page of block %d is all 0xff\n",
+	       ebnum);
+	for (i = 0; i < pgsize; ++i)
+		if (twopages[i] != 0xff) {
+			printk(PRINT_PREF "verifying all 0xff failed at %d\n",
+			       i);
+			errcnt += 1;
+			ok = 0;
+			break;
+		}
+
+	if (ok && !err)
+		printk(PRINT_PREF "erasetest ok\n");
+
+	return err;
+}
+
+static int is_block_bad(int ebnum)
+{
+	loff_t addr = ebnum * mtd->erasesize;
+	int ret;
+
+	ret = mtd->block_isbad(mtd, addr);
+	if (ret)
+		printk(PRINT_PREF "block %d is bad\n", ebnum);
+	return ret;
+}
+
+static int scan_for_bad_eraseblocks(void)
+{
+	int i, bad = 0;
+
+	bbt = kmalloc(ebcnt, GFP_KERNEL);
+	if (!bbt) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		return -ENOMEM;
+	}
+	memset(bbt, 0 , ebcnt);
+
+	printk(PRINT_PREF "scanning for bad eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		bbt[i] = is_block_bad(i) ? 1 : 0;
+		if (bbt[i])
+			bad += 1;
+		cond_resched();
+	}
+	printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+	return 0;
+}
+
+static int __init mtd_pagetest_init(void)
+{
+	int err = 0;
+	uint64_t tmp;
+	uint32_t i;
+
+	printk(KERN_INFO "\n");
+	printk(KERN_INFO "=================================================\n");
+	printk(PRINT_PREF "MTD device: %d\n", dev);
+
+	mtd = get_mtd_device(NULL, dev);
+	if (IS_ERR(mtd)) {
+		err = PTR_ERR(mtd);
+		printk(PRINT_PREF "error: cannot get MTD device\n");
+		return err;
+	}
+
+	if (mtd->type != MTD_NANDFLASH) {
+		printk(PRINT_PREF "this test requires NAND flash\n");
+		goto out;
+	}
+
+	tmp = mtd->size;
+	do_div(tmp, mtd->erasesize);
+	ebcnt = tmp;
+	pgcnt = mtd->erasesize / mtd->writesize;
+
+	printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+	       "page size %u, count of eraseblocks %u, pages per "
+	       "eraseblock %u, OOB size %u\n",
+	       (unsigned long long)mtd->size, mtd->erasesize,
+	       pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+	err = -ENOMEM;
+	bufsize = pgsize * 2;
+	writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+	if (!writebuf) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out;
+	}
+	twopages = kmalloc(bufsize, GFP_KERNEL);
+	if (!twopages) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out;
+	}
+	boundary = kmalloc(bufsize, GFP_KERNEL);
+	if (!boundary) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out;
+	}
+
+	err = scan_for_bad_eraseblocks();
+	if (err)
+		goto out;
+
+	/* Erase all eraseblocks */
+	printk(PRINT_PREF "erasing whole device\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = erase_eraseblock(i);
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	printk(PRINT_PREF "erased %u eraseblocks\n", i);
+
+	/* Write all eraseblocks */
+	simple_srand(1);
+	printk(PRINT_PREF "writing whole device\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = write_eraseblock(i);
+		if (err)
+			goto out;
+		if (i % 256 == 0)
+			printk(PRINT_PREF "written up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "written %u eraseblocks\n", i);
+
+	/* Check all eraseblocks */
+	simple_srand(1);
+	printk(PRINT_PREF "verifying all eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = verify_eraseblock(i);
+		if (err)
+			goto out;
+		if (i % 256 == 0)
+			printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "verified %u eraseblocks\n", i);
+
+	err = crosstest();
+	if (err)
+		goto out;
+
+	err = erasecrosstest();
+	if (err)
+		goto out;
+
+	err = erasetest();
+	if (err)
+		goto out;
+
+	printk(PRINT_PREF "finished with %d errors\n", errcnt);
+out:
+
+	kfree(bbt);
+	kfree(boundary);
+	kfree(twopages);
+	kfree(writebuf);
+	put_mtd_device(mtd);
+	if (err)
+		printk(PRINT_PREF "error %d occurred\n", err);
+	printk(KERN_INFO "=================================================\n");
+	return err;
+}
+module_init(mtd_pagetest_init);
+
+static void __exit mtd_pagetest_exit(void)
+{
+	return;
+}
+module_exit(mtd_pagetest_exit);
+
+MODULE_DESCRIPTION("NAND page test");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
new file mode 100644
index 0000000..645e77f
--- /dev/null
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Check MTD device read.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+
+#define PRINT_PREF KERN_INFO "mtd_readtest: "
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *iobuf;
+static unsigned char *iobuf1;
+static unsigned char *bbt;
+
+static int pgsize;
+static int ebcnt;
+static int pgcnt;
+
+static int read_eraseblock_by_page(int ebnum)
+{
+	size_t read = 0;
+	int i, ret, err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+	void *buf = iobuf;
+	void *oobbuf = iobuf1;
+
+	for (i = 0; i < pgcnt; i++) {
+		memset(buf, 0 , pgcnt);
+		ret = mtd->read(mtd, addr, pgsize, &read, buf);
+		if (ret == -EUCLEAN)
+			ret = 0;
+		if (ret || read != pgsize) {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       (long long)addr);
+			if (!err)
+				err = ret;
+			if (!err)
+				err = -EINVAL;
+		}
+		if (mtd->oobsize) {
+			struct mtd_oob_ops ops;
+
+			ops.mode      = MTD_OOB_PLACE;
+			ops.len       = 0;
+			ops.retlen    = 0;
+			ops.ooblen    = mtd->oobsize;
+			ops.oobretlen = 0;
+			ops.ooboffs   = 0;
+			ops.datbuf    = 0;
+			ops.oobbuf    = oobbuf;
+			ret = mtd->read_oob(mtd, addr, &ops);
+			if (ret || ops.oobretlen != mtd->oobsize) {
+				printk(PRINT_PREF "error: read oob failed at "
+						  "%#llx\n", (long long)addr);
+				if (!err)
+					err = ret;
+				if (!err)
+					err = -EINVAL;
+			}
+			oobbuf += mtd->oobsize;
+		}
+		addr += pgsize;
+		buf += pgsize;
+	}
+
+	return err;
+}
+
+static void dump_eraseblock(int ebnum)
+{
+	int i, j, n;
+	char line[128];
+	int pg, oob;
+
+	printk(PRINT_PREF "dumping eraseblock %d\n", ebnum);
+	n = mtd->erasesize;
+	for (i = 0; i < n;) {
+		char *p = line;
+
+		p += sprintf(p, "%05x: ", i);
+		for (j = 0; j < 32 && i < n; j++, i++)
+			p += sprintf(p, "%02x", (unsigned int)iobuf[i]);
+		printk(KERN_CRIT "%s\n", line);
+		cond_resched();
+	}
+	if (!mtd->oobsize)
+		return;
+	printk(PRINT_PREF "dumping oob from eraseblock %d\n", ebnum);
+	n = mtd->oobsize;
+	for (pg = 0, i = 0; pg < pgcnt; pg++)
+		for (oob = 0; oob < n;) {
+			char *p = line;
+
+			p += sprintf(p, "%05x: ", i);
+			for (j = 0; j < 32 && oob < n; j++, oob++, i++)
+				p += sprintf(p, "%02x",
+					     (unsigned int)iobuf1[i]);
+			printk(KERN_CRIT "%s\n", line);
+			cond_resched();
+		}
+}
+
+static int is_block_bad(int ebnum)
+{
+	loff_t addr = ebnum * mtd->erasesize;
+	int ret;
+
+	ret = mtd->block_isbad(mtd, addr);
+	if (ret)
+		printk(PRINT_PREF "block %d is bad\n", ebnum);
+	return ret;
+}
+
+static int scan_for_bad_eraseblocks(void)
+{
+	int i, bad = 0;
+
+	bbt = kmalloc(ebcnt, GFP_KERNEL);
+	if (!bbt) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		return -ENOMEM;
+	}
+	memset(bbt, 0 , ebcnt);
+
+	printk(PRINT_PREF "scanning for bad eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		bbt[i] = is_block_bad(i) ? 1 : 0;
+		if (bbt[i])
+			bad += 1;
+		cond_resched();
+	}
+	printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+	return 0;
+}
+
+static int __init mtd_readtest_init(void)
+{
+	uint64_t tmp;
+	int err, i;
+
+	printk(KERN_INFO "\n");
+	printk(KERN_INFO "=================================================\n");
+	printk(PRINT_PREF "MTD device: %d\n", dev);
+
+	mtd = get_mtd_device(NULL, dev);
+	if (IS_ERR(mtd)) {
+		err = PTR_ERR(mtd);
+		printk(PRINT_PREF "error: Cannot get MTD device\n");
+		return err;
+	}
+
+	if (mtd->writesize == 1) {
+		printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+		       "bytes.\n");
+		pgsize = 512;
+	} else
+		pgsize = mtd->writesize;
+
+	tmp = mtd->size;
+	do_div(tmp, mtd->erasesize);
+	ebcnt = tmp;
+	pgcnt = mtd->erasesize / mtd->writesize;
+
+	printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+	       "page size %u, count of eraseblocks %u, pages per "
+	       "eraseblock %u, OOB size %u\n",
+	       (unsigned long long)mtd->size, mtd->erasesize,
+	       pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+	err = -ENOMEM;
+	iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+	if (!iobuf) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out;
+	}
+	iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
+	if (!iobuf1) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out;
+	}
+
+	err = scan_for_bad_eraseblocks();
+	if (err)
+		goto out;
+
+	/* Read all eraseblocks 1 page at a time */
+	printk(PRINT_PREF "testing page read\n");
+	for (i = 0; i < ebcnt; ++i) {
+		int ret;
+
+		if (bbt[i])
+			continue;
+		ret = read_eraseblock_by_page(i);
+		if (ret) {
+			dump_eraseblock(i);
+			if (!err)
+				err = ret;
+		}
+		cond_resched();
+	}
+
+	if (err)
+		printk(PRINT_PREF "finished with errors\n");
+	else
+		printk(PRINT_PREF "finished\n");
+
+out:
+
+	kfree(iobuf);
+	kfree(iobuf1);
+	kfree(bbt);
+	put_mtd_device(mtd);
+	if (err)
+		printk(PRINT_PREF "error %d occurred\n", err);
+	printk(KERN_INFO "=================================================\n");
+	return err;
+}
+module_init(mtd_readtest_init);
+
+static void __exit mtd_readtest_exit(void)
+{
+	return;
+}
+module_exit(mtd_readtest_exit);
+
+MODULE_DESCRIPTION("Read test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
new file mode 100644
index 0000000..141363a
--- /dev/null
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright (C) 2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Test read and write speed of a MTD device.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+
+#define PRINT_PREF KERN_INFO "mtd_speedtest: "
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *iobuf;
+static unsigned char *bbt;
+
+static int pgsize;
+static int ebcnt;
+static int pgcnt;
+static int goodebcnt;
+static struct timeval start, finish;
+static unsigned long next = 1;
+
+static inline unsigned int simple_rand(void)
+{
+	next = next * 1103515245 + 12345;
+	return (unsigned int)((next / 65536) % 32768);
+}
+
+static inline void simple_srand(unsigned long seed)
+{
+	next = seed;
+}
+
+static void set_random_data(unsigned char *buf, size_t len)
+{
+	size_t i;
+
+	for (i = 0; i < len; ++i)
+		buf[i] = simple_rand();
+}
+
+static int erase_eraseblock(int ebnum)
+{
+	int err;
+	struct erase_info ei;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	memset(&ei, 0, sizeof(struct erase_info));
+	ei.mtd  = mtd;
+	ei.addr = addr;
+	ei.len  = mtd->erasesize;
+
+	err = mtd->erase(mtd, &ei);
+	if (err) {
+		printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+		return err;
+	}
+
+	if (ei.state == MTD_ERASE_FAILED) {
+		printk(PRINT_PREF "some erase error occurred at EB %d\n",
+		       ebnum);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int erase_whole_device(void)
+{
+	int err;
+	unsigned int i;
+
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = erase_eraseblock(i);
+		if (err)
+			return err;
+		cond_resched();
+	}
+	return 0;
+}
+
+static int write_eraseblock(int ebnum)
+{
+	size_t written = 0;
+	int err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	err = mtd->write(mtd, addr, mtd->erasesize, &written, iobuf);
+	if (err || written != mtd->erasesize) {
+		printk(PRINT_PREF "error: write failed at %#llx\n", addr);
+		if (!err)
+			err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int write_eraseblock_by_page(int ebnum)
+{
+	size_t written = 0;
+	int i, err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+	void *buf = iobuf;
+
+	for (i = 0; i < pgcnt; i++) {
+		err = mtd->write(mtd, addr, pgsize, &written, buf);
+		if (err || written != pgsize) {
+			printk(PRINT_PREF "error: write failed at %#llx\n",
+			       addr);
+			if (!err)
+				err = -EINVAL;
+			break;
+		}
+		addr += pgsize;
+		buf += pgsize;
+	}
+
+	return err;
+}
+
+static int write_eraseblock_by_2pages(int ebnum)
+{
+	size_t written = 0, sz = pgsize * 2;
+	int i, n = pgcnt / 2, err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+	void *buf = iobuf;
+
+	for (i = 0; i < n; i++) {
+		err = mtd->write(mtd, addr, sz, &written, buf);
+		if (err || written != sz) {
+			printk(PRINT_PREF "error: write failed at %#llx\n",
+			       addr);
+			if (!err)
+				err = -EINVAL;
+			return err;
+		}
+		addr += sz;
+		buf += sz;
+	}
+	if (pgcnt % 2) {
+		err = mtd->write(mtd, addr, pgsize, &written, buf);
+		if (err || written != pgsize) {
+			printk(PRINT_PREF "error: write failed at %#llx\n",
+			       addr);
+			if (!err)
+				err = -EINVAL;
+		}
+	}
+
+	return err;
+}
+
+static int read_eraseblock(int ebnum)
+{
+	size_t read = 0;
+	int err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf);
+	/* Ignore corrected ECC errors */
+	if (err == -EUCLEAN)
+		err = 0;
+	if (err || read != mtd->erasesize) {
+		printk(PRINT_PREF "error: read failed at %#llx\n", addr);
+		if (!err)
+			err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int read_eraseblock_by_page(int ebnum)
+{
+	size_t read = 0;
+	int i, err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+	void *buf = iobuf;
+
+	for (i = 0; i < pgcnt; i++) {
+		err = mtd->read(mtd, addr, pgsize, &read, buf);
+		/* Ignore corrected ECC errors */
+		if (err == -EUCLEAN)
+			err = 0;
+		if (err || read != pgsize) {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       addr);
+			if (!err)
+				err = -EINVAL;
+			break;
+		}
+		addr += pgsize;
+		buf += pgsize;
+	}
+
+	return err;
+}
+
+static int read_eraseblock_by_2pages(int ebnum)
+{
+	size_t read = 0, sz = pgsize * 2;
+	int i, n = pgcnt / 2, err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+	void *buf = iobuf;
+
+	for (i = 0; i < n; i++) {
+		err = mtd->read(mtd, addr, sz, &read, buf);
+		/* Ignore corrected ECC errors */
+		if (err == -EUCLEAN)
+			err = 0;
+		if (err || read != sz) {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       addr);
+			if (!err)
+				err = -EINVAL;
+			return err;
+		}
+		addr += sz;
+		buf += sz;
+	}
+	if (pgcnt % 2) {
+		err = mtd->read(mtd, addr, pgsize, &read, buf);
+		/* Ignore corrected ECC errors */
+		if (err == -EUCLEAN)
+			err = 0;
+		if (err || read != pgsize) {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       addr);
+			if (!err)
+				err = -EINVAL;
+		}
+	}
+
+	return err;
+}
+
+static int is_block_bad(int ebnum)
+{
+	loff_t addr = ebnum * mtd->erasesize;
+	int ret;
+
+	ret = mtd->block_isbad(mtd, addr);
+	if (ret)
+		printk(PRINT_PREF "block %d is bad\n", ebnum);
+	return ret;
+}
+
+static inline void start_timing(void)
+{
+	do_gettimeofday(&start);
+}
+
+static inline void stop_timing(void)
+{
+	do_gettimeofday(&finish);
+}
+
+static long calc_speed(void)
+{
+	long ms, k, speed;
+
+	ms = (finish.tv_sec - start.tv_sec) * 1000 +
+	     (finish.tv_usec - start.tv_usec) / 1000;
+	k = goodebcnt * mtd->erasesize / 1024;
+	speed = (k * 1000) / ms;
+	return speed;
+}
+
+static int scan_for_bad_eraseblocks(void)
+{
+	int i, bad = 0;
+
+	bbt = kmalloc(ebcnt, GFP_KERNEL);
+	if (!bbt) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		return -ENOMEM;
+	}
+	memset(bbt, 0 , ebcnt);
+
+	printk(PRINT_PREF "scanning for bad eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		bbt[i] = is_block_bad(i) ? 1 : 0;
+		if (bbt[i])
+			bad += 1;
+		cond_resched();
+	}
+	printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+	goodebcnt = ebcnt - bad;
+	return 0;
+}
+
+static int __init mtd_speedtest_init(void)
+{
+	int err, i;
+	long speed;
+	uint64_t tmp;
+
+	printk(KERN_INFO "\n");
+	printk(KERN_INFO "=================================================\n");
+	printk(PRINT_PREF "MTD device: %d\n", dev);
+
+	mtd = get_mtd_device(NULL, dev);
+	if (IS_ERR(mtd)) {
+		err = PTR_ERR(mtd);
+		printk(PRINT_PREF "error: cannot get MTD device\n");
+		return err;
+	}
+
+	if (mtd->writesize == 1) {
+		printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+		       "bytes.\n");
+		pgsize = 512;
+	} else
+		pgsize = mtd->writesize;
+
+	tmp = mtd->size;
+	do_div(tmp, mtd->erasesize);
+	ebcnt = tmp;
+	pgcnt = mtd->erasesize / mtd->writesize;
+
+	printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+	       "page size %u, count of eraseblocks %u, pages per "
+	       "eraseblock %u, OOB size %u\n",
+	       (unsigned long long)mtd->size, mtd->erasesize,
+	       pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+	err = -ENOMEM;
+	iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
+	if (!iobuf) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out;
+	}
+
+	simple_srand(1);
+	set_random_data(iobuf, mtd->erasesize);
+
+	err = scan_for_bad_eraseblocks();
+	if (err)
+		goto out;
+
+	err = erase_whole_device();
+	if (err)
+		goto out;
+
+	/* Write all eraseblocks, 1 eraseblock at a time */
+	printk(PRINT_PREF "testing eraseblock write speed\n");
+	start_timing();
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = write_eraseblock(i);
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	stop_timing();
+	speed = calc_speed();
+	printk(PRINT_PREF "eraseblock write speed is %ld KiB/s\n", speed);
+
+	/* Read all eraseblocks, 1 eraseblock at a time */
+	printk(PRINT_PREF "testing eraseblock read speed\n");
+	start_timing();
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = read_eraseblock(i);
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	stop_timing();
+	speed = calc_speed();
+	printk(PRINT_PREF "eraseblock read speed is %ld KiB/s\n", speed);
+
+	err = erase_whole_device();
+	if (err)
+		goto out;
+
+	/* Write all eraseblocks, 1 page at a time */
+	printk(PRINT_PREF "testing page write speed\n");
+	start_timing();
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = write_eraseblock_by_page(i);
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	stop_timing();
+	speed = calc_speed();
+	printk(PRINT_PREF "page write speed is %ld KiB/s\n", speed);
+
+	/* Read all eraseblocks, 1 page at a time */
+	printk(PRINT_PREF "testing page read speed\n");
+	start_timing();
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = read_eraseblock_by_page(i);
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	stop_timing();
+	speed = calc_speed();
+	printk(PRINT_PREF "page read speed is %ld KiB/s\n", speed);
+
+	err = erase_whole_device();
+	if (err)
+		goto out;
+
+	/* Write all eraseblocks, 2 pages at a time */
+	printk(PRINT_PREF "testing 2 page write speed\n");
+	start_timing();
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = write_eraseblock_by_2pages(i);
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	stop_timing();
+	speed = calc_speed();
+	printk(PRINT_PREF "2 page write speed is %ld KiB/s\n", speed);
+
+	/* Read all eraseblocks, 2 pages at a time */
+	printk(PRINT_PREF "testing 2 page read speed\n");
+	start_timing();
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = read_eraseblock_by_2pages(i);
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	stop_timing();
+	speed = calc_speed();
+	printk(PRINT_PREF "2 page read speed is %ld KiB/s\n", speed);
+
+	/* Erase all eraseblocks */
+	printk(PRINT_PREF "Testing erase speed\n");
+	start_timing();
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = erase_eraseblock(i);
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	stop_timing();
+	speed = calc_speed();
+	printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed);
+
+	printk(PRINT_PREF "finished\n");
+out:
+	kfree(iobuf);
+	kfree(bbt);
+	put_mtd_device(mtd);
+	if (err)
+		printk(PRINT_PREF "error %d occurred\n", err);
+	printk(KERN_INFO "=================================================\n");
+	return err;
+}
+module_init(mtd_speedtest_init);
+
+static void __exit mtd_speedtest_exit(void)
+{
+	return;
+}
+module_exit(mtd_speedtest_exit);
+
+MODULE_DESCRIPTION("Speed test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
new file mode 100644
index 0000000..6392047
--- /dev/null
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Test random reads, writes and erases on MTD device.
+ *
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#define PRINT_PREF KERN_INFO "mtd_stresstest: "
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static int count = 10000;
+module_param(count, int, S_IRUGO);
+MODULE_PARM_DESC(count, "Number of operations to do (default is 10000)");
+
+static struct mtd_info *mtd;
+static unsigned char *writebuf;
+static unsigned char *readbuf;
+static unsigned char *bbt;
+static int *offsets;
+
+static int pgsize;
+static int bufsize;
+static int ebcnt;
+static int pgcnt;
+static unsigned long next = 1;
+
+static inline unsigned int simple_rand(void)
+{
+	next = next * 1103515245 + 12345;
+	return (unsigned int)((next / 65536) % 32768);
+}
+
+static inline void simple_srand(unsigned long seed)
+{
+	next = seed;
+}
+
+static int rand_eb(void)
+{
+	int eb;
+
+again:
+	if (ebcnt < 32768)
+		eb = simple_rand();
+	else
+		eb = (simple_rand() << 15) | simple_rand();
+	/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
+	eb %= (ebcnt - 1);
+	if (bbt[eb])
+		goto again;
+	return eb;
+}
+
+static int rand_offs(void)
+{
+	int offs;
+
+	if (bufsize < 32768)
+		offs = simple_rand();
+	else
+		offs = (simple_rand() << 15) | simple_rand();
+	offs %= bufsize;
+	return offs;
+}
+
+static int rand_len(int offs)
+{
+	int len;
+
+	if (bufsize < 32768)
+		len = simple_rand();
+	else
+		len = (simple_rand() << 15) | simple_rand();
+	len %= (bufsize - offs);
+	return len;
+}
+
+static int erase_eraseblock(int ebnum)
+{
+	int err;
+	struct erase_info ei;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	memset(&ei, 0, sizeof(struct erase_info));
+	ei.mtd  = mtd;
+	ei.addr = addr;
+	ei.len  = mtd->erasesize;
+
+	err = mtd->erase(mtd, &ei);
+	if (unlikely(err)) {
+		printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+		return err;
+	}
+
+	if (unlikely(ei.state == MTD_ERASE_FAILED)) {
+		printk(PRINT_PREF "some erase error occurred at EB %d\n",
+		       ebnum);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int is_block_bad(int ebnum)
+{
+	loff_t addr = ebnum * mtd->erasesize;
+	int ret;
+
+	ret = mtd->block_isbad(mtd, addr);
+	if (ret)
+		printk(PRINT_PREF "block %d is bad\n", ebnum);
+	return ret;
+}
+
+static int do_read(void)
+{
+	size_t read = 0;
+	int eb = rand_eb();
+	int offs = rand_offs();
+	int len = rand_len(offs), err;
+	loff_t addr;
+
+	if (bbt[eb + 1]) {
+		if (offs >= mtd->erasesize)
+			offs -= mtd->erasesize;
+		if (offs + len > mtd->erasesize)
+			len = mtd->erasesize - offs;
+	}
+	addr = eb * mtd->erasesize + offs;
+	err = mtd->read(mtd, addr, len, &read, readbuf);
+	if (err == -EUCLEAN)
+		err = 0;
+	if (unlikely(err || read != len)) {
+		printk(PRINT_PREF "error: read failed at 0x%llx\n",
+		       (long long)addr);
+		if (!err)
+			err = -EINVAL;
+		return err;
+	}
+	return 0;
+}
+
+static int do_write(void)
+{
+	int eb = rand_eb(), offs, err, len;
+	size_t written = 0;
+	loff_t addr;
+
+	offs = offsets[eb];
+	if (offs >= mtd->erasesize) {
+		err = erase_eraseblock(eb);
+		if (err)
+			return err;
+		offs = offsets[eb] = 0;
+	}
+	len = rand_len(offs);
+	len = ((len + pgsize - 1) / pgsize) * pgsize;
+	if (offs + len > mtd->erasesize) {
+		if (bbt[eb + 1])
+			len = mtd->erasesize - offs;
+		else {
+			err = erase_eraseblock(eb + 1);
+			if (err)
+				return err;
+			offsets[eb + 1] = 0;
+		}
+	}
+	addr = eb * mtd->erasesize + offs;
+	err = mtd->write(mtd, addr, len, &written, writebuf);
+	if (unlikely(err || written != len)) {
+		printk(PRINT_PREF "error: write failed at 0x%llx\n",
+		       (long long)addr);
+		if (!err)
+			err = -EINVAL;
+		return err;
+	}
+	offs += len;
+	while (offs > mtd->erasesize) {
+		offsets[eb++] = mtd->erasesize;
+		offs -= mtd->erasesize;
+	}
+	offsets[eb] = offs;
+	return 0;
+}
+
+static int do_operation(void)
+{
+	if (simple_rand() & 1)
+		return do_read();
+	else
+		return do_write();
+}
+
+static int scan_for_bad_eraseblocks(void)
+{
+	int i, bad = 0;
+
+	bbt = kmalloc(ebcnt, GFP_KERNEL);
+	if (!bbt) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		return -ENOMEM;
+	}
+	memset(bbt, 0 , ebcnt);
+
+	printk(PRINT_PREF "scanning for bad eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		bbt[i] = is_block_bad(i) ? 1 : 0;
+		if (bbt[i])
+			bad += 1;
+		cond_resched();
+	}
+	printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+	return 0;
+}
+
+static int __init mtd_stresstest_init(void)
+{
+	int err;
+	int i, op;
+	uint64_t tmp;
+
+	printk(KERN_INFO "\n");
+	printk(KERN_INFO "=================================================\n");
+	printk(PRINT_PREF "MTD device: %d\n", dev);
+
+	mtd = get_mtd_device(NULL, dev);
+	if (IS_ERR(mtd)) {
+		err = PTR_ERR(mtd);
+		printk(PRINT_PREF "error: cannot get MTD device\n");
+		return err;
+	}
+
+	if (mtd->writesize == 1) {
+		printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+		       "bytes.\n");
+		pgsize = 512;
+	} else
+		pgsize = mtd->writesize;
+
+	tmp = mtd->size;
+	do_div(tmp, mtd->erasesize);
+	ebcnt = tmp;
+	pgcnt = mtd->erasesize / mtd->writesize;
+
+	printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+	       "page size %u, count of eraseblocks %u, pages per "
+	       "eraseblock %u, OOB size %u\n",
+	       (unsigned long long)mtd->size, mtd->erasesize,
+	       pgsize, ebcnt, pgcnt, mtd->oobsize);
+
+	/* Read or write up 2 eraseblocks at a time */
+	bufsize = mtd->erasesize * 2;
+
+	err = -ENOMEM;
+	readbuf = vmalloc(bufsize);
+	writebuf = vmalloc(bufsize);
+	offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
+	if (!readbuf || !writebuf || !offsets) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out;
+	}
+	for (i = 0; i < ebcnt; i++)
+		offsets[i] = mtd->erasesize;
+	simple_srand(current->pid);
+	for (i = 0; i < bufsize; i++)
+		writebuf[i] = simple_rand();
+
+	err = scan_for_bad_eraseblocks();
+	if (err)
+		goto out;
+
+	/* Do operations */
+	printk(PRINT_PREF "doing operations\n");
+	for (op = 0; op < count; op++) {
+		if ((op & 1023) == 0)
+			printk(PRINT_PREF "%d operations done\n", op);
+		err = do_operation();
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	printk(PRINT_PREF "finished, %d operations done\n", op);
+
+out:
+	kfree(offsets);
+	kfree(bbt);
+	vfree(writebuf);
+	vfree(readbuf);
+	put_mtd_device(mtd);
+	if (err)
+		printk(PRINT_PREF "error %d occurred\n", err);
+	printk(KERN_INFO "=================================================\n");
+	return err;
+}
+module_init(mtd_stresstest_init);
+
+static void __exit mtd_stresstest_exit(void)
+{
+	return;
+}
+module_exit(mtd_stresstest_exit);
+
+MODULE_DESCRIPTION("Stress test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
new file mode 100644
index 0000000..5b88972
--- /dev/null
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Test sub-page read and write on MTD device.
+ * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+
+#define PRINT_PREF KERN_INFO "mtd_subpagetest: "
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static struct mtd_info *mtd;
+static unsigned char *writebuf;
+static unsigned char *readbuf;
+static unsigned char *bbt;
+
+static int subpgsize;
+static int bufsize;
+static int ebcnt;
+static int pgcnt;
+static int errcnt;
+static unsigned long next = 1;
+
+static inline unsigned int simple_rand(void)
+{
+	next = next * 1103515245 + 12345;
+	return (unsigned int)((next / 65536) % 32768);
+}
+
+static inline void simple_srand(unsigned long seed)
+{
+	next = seed;
+}
+
+static void set_random_data(unsigned char *buf, size_t len)
+{
+	size_t i;
+
+	for (i = 0; i < len; ++i)
+		buf[i] = simple_rand();
+}
+
+static inline void clear_data(unsigned char *buf, size_t len)
+{
+	memset(buf, 0, len);
+}
+
+static int erase_eraseblock(int ebnum)
+{
+	int err;
+	struct erase_info ei;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	memset(&ei, 0, sizeof(struct erase_info));
+	ei.mtd  = mtd;
+	ei.addr = addr;
+	ei.len  = mtd->erasesize;
+
+	err = mtd->erase(mtd, &ei);
+	if (err) {
+		printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+		return err;
+	}
+
+	if (ei.state == MTD_ERASE_FAILED) {
+		printk(PRINT_PREF "some erase error occurred at EB %d\n",
+		       ebnum);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int erase_whole_device(void)
+{
+	int err;
+	unsigned int i;
+
+	printk(PRINT_PREF "erasing whole device\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = erase_eraseblock(i);
+		if (err)
+			return err;
+		cond_resched();
+	}
+	printk(PRINT_PREF "erased %u eraseblocks\n", i);
+	return 0;
+}
+
+static int write_eraseblock(int ebnum)
+{
+	size_t written = 0;
+	int err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	set_random_data(writebuf, subpgsize);
+	err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+	if (unlikely(err || written != subpgsize)) {
+		printk(PRINT_PREF "error: write failed at %#llx\n",
+		       (long long)addr);
+		if (written != subpgsize) {
+			printk(PRINT_PREF "  write size: %#x\n", subpgsize);
+			printk(PRINT_PREF "  written: %#zx\n", written);
+		}
+		return err ? err : -1;
+	}
+
+	addr += subpgsize;
+
+	set_random_data(writebuf, subpgsize);
+	err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+	if (unlikely(err || written != subpgsize)) {
+		printk(PRINT_PREF "error: write failed at %#llx\n",
+		       (long long)addr);
+		if (written != subpgsize) {
+			printk(PRINT_PREF "  write size: %#x\n", subpgsize);
+			printk(PRINT_PREF "  written: %#zx\n", written);
+		}
+		return err ? err : -1;
+	}
+
+	return err;
+}
+
+static int write_eraseblock2(int ebnum)
+{
+	size_t written = 0;
+	int err = 0, k;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	for (k = 1; k < 33; ++k) {
+		if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
+			break;
+		set_random_data(writebuf, subpgsize * k);
+		err = mtd->write(mtd, addr, subpgsize * k, &written, writebuf);
+		if (unlikely(err || written != subpgsize * k)) {
+			printk(PRINT_PREF "error: write failed at %#llx\n",
+			       (long long)addr);
+			if (written != subpgsize) {
+				printk(PRINT_PREF "  write size: %#x\n",
+				       subpgsize * k);
+				printk(PRINT_PREF "  written: %#08zx\n",
+				       written);
+			}
+			return err ? err : -1;
+		}
+		addr += subpgsize * k;
+	}
+
+	return err;
+}
+
+static void print_subpage(unsigned char *p)
+{
+	int i, j;
+
+	for (i = 0; i < subpgsize; ) {
+		for (j = 0; i < subpgsize && j < 32; ++i, ++j)
+			printk("%02x", *p++);
+		printk("\n");
+	}
+}
+
+static int verify_eraseblock(int ebnum)
+{
+	size_t read = 0;
+	int err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	set_random_data(writebuf, subpgsize);
+	clear_data(readbuf, subpgsize);
+	read = 0;
+	err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+	if (unlikely(err || read != subpgsize)) {
+		if (err == -EUCLEAN && read == subpgsize) {
+			printk(PRINT_PREF "ECC correction at %#llx\n",
+			       (long long)addr);
+			err = 0;
+		} else {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       (long long)addr);
+			return err ? err : -1;
+		}
+	}
+	if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
+		printk(PRINT_PREF "error: verify failed at %#llx\n",
+		       (long long)addr);
+		printk(PRINT_PREF "------------- written----------------\n");
+		print_subpage(writebuf);
+		printk(PRINT_PREF "------------- read ------------------\n");
+		print_subpage(readbuf);
+		printk(PRINT_PREF "-------------------------------------\n");
+		errcnt += 1;
+	}
+
+	addr += subpgsize;
+
+	set_random_data(writebuf, subpgsize);
+	clear_data(readbuf, subpgsize);
+	read = 0;
+	err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+	if (unlikely(err || read != subpgsize)) {
+		if (err == -EUCLEAN && read == subpgsize) {
+			printk(PRINT_PREF "ECC correction at %#llx\n",
+			       (long long)addr);
+			err = 0;
+		} else {
+			printk(PRINT_PREF "error: read failed at %#llx\n",
+			       (long long)addr);
+			return err ? err : -1;
+		}
+	}
+	if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
+		printk(PRINT_PREF "error: verify failed at %#llx\n",
+		       (long long)addr);
+		printk(PRINT_PREF "------------- written----------------\n");
+		print_subpage(writebuf);
+		printk(PRINT_PREF "------------- read ------------------\n");
+		print_subpage(readbuf);
+		printk(PRINT_PREF "-------------------------------------\n");
+		errcnt += 1;
+	}
+
+	return err;
+}
+
+static int verify_eraseblock2(int ebnum)
+{
+	size_t read = 0;
+	int err = 0, k;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	for (k = 1; k < 33; ++k) {
+		if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
+			break;
+		set_random_data(writebuf, subpgsize * k);
+		clear_data(readbuf, subpgsize * k);
+		read = 0;
+		err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf);
+		if (unlikely(err || read != subpgsize * k)) {
+			if (err == -EUCLEAN && read == subpgsize * k) {
+				printk(PRINT_PREF "ECC correction at %#llx\n",
+				       (long long)addr);
+				err = 0;
+			} else {
+				printk(PRINT_PREF "error: read failed at "
+				       "%#llx\n", (long long)addr);
+				return err ? err : -1;
+			}
+		}
+		if (unlikely(memcmp(readbuf, writebuf, subpgsize * k))) {
+			printk(PRINT_PREF "error: verify failed at %#llx\n",
+			       (long long)addr);
+			errcnt += 1;
+		}
+		addr += subpgsize * k;
+	}
+
+	return err;
+}
+
+static int verify_eraseblock_ff(int ebnum)
+{
+	uint32_t j;
+	size_t read = 0;
+	int err = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	memset(writebuf, 0xff, subpgsize);
+	for (j = 0; j < mtd->erasesize / subpgsize; ++j) {
+		clear_data(readbuf, subpgsize);
+		read = 0;
+		err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+		if (unlikely(err || read != subpgsize)) {
+			if (err == -EUCLEAN && read == subpgsize) {
+				printk(PRINT_PREF "ECC correction at %#llx\n",
+				       (long long)addr);
+				err = 0;
+			} else {
+				printk(PRINT_PREF "error: read failed at "
+				       "%#llx\n", (long long)addr);
+				return err ? err : -1;
+			}
+		}
+		if (unlikely(memcmp(readbuf, writebuf, subpgsize))) {
+			printk(PRINT_PREF "error: verify 0xff failed at "
+			       "%#llx\n", (long long)addr);
+			errcnt += 1;
+		}
+		addr += subpgsize;
+	}
+
+	return err;
+}
+
+static int verify_all_eraseblocks_ff(void)
+{
+	int err;
+	unsigned int i;
+
+	printk(PRINT_PREF "verifying all eraseblocks for 0xff\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = verify_eraseblock_ff(i);
+		if (err)
+			return err;
+		if (i % 256 == 0)
+			printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "verified %u eraseblocks\n", i);
+	return 0;
+}
+
+static int is_block_bad(int ebnum)
+{
+	loff_t addr = ebnum * mtd->erasesize;
+	int ret;
+
+	ret = mtd->block_isbad(mtd, addr);
+	if (ret)
+		printk(PRINT_PREF "block %d is bad\n", ebnum);
+	return ret;
+}
+
+static int scan_for_bad_eraseblocks(void)
+{
+	int i, bad = 0;
+
+	bbt = kmalloc(ebcnt, GFP_KERNEL);
+	if (!bbt) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		return -ENOMEM;
+	}
+	memset(bbt, 0 , ebcnt);
+
+	printk(PRINT_PREF "scanning for bad eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		bbt[i] = is_block_bad(i) ? 1 : 0;
+		if (bbt[i])
+			bad += 1;
+		cond_resched();
+	}
+	printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+	return 0;
+}
+
+static int __init mtd_subpagetest_init(void)
+{
+	int err = 0;
+	uint32_t i;
+	uint64_t tmp;
+
+	printk(KERN_INFO "\n");
+	printk(KERN_INFO "=================================================\n");
+	printk(PRINT_PREF "MTD device: %d\n", dev);
+
+	mtd = get_mtd_device(NULL, dev);
+	if (IS_ERR(mtd)) {
+		err = PTR_ERR(mtd);
+		printk(PRINT_PREF "error: cannot get MTD device\n");
+		return err;
+	}
+
+	if (mtd->type != MTD_NANDFLASH) {
+		printk(PRINT_PREF "this test requires NAND flash\n");
+		goto out;
+	}
+
+	subpgsize = mtd->writesize >> mtd->subpage_sft;
+	printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
+	       "page size %u, subpage size %u, count of eraseblocks %u, "
+	       "pages per eraseblock %u, OOB size %u\n",
+	       (unsigned long long)mtd->size, mtd->erasesize,
+	       mtd->writesize, subpgsize, ebcnt, pgcnt, mtd->oobsize);
+
+	err = -ENOMEM;
+	bufsize = subpgsize * 32;
+	writebuf = kmalloc(bufsize, GFP_KERNEL);
+	if (!writebuf) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out;
+	}
+	readbuf = kmalloc(bufsize, GFP_KERNEL);
+	if (!readbuf) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out;
+	}
+
+	tmp = mtd->size;
+	do_div(tmp, mtd->erasesize);
+	ebcnt = tmp;
+	pgcnt = mtd->erasesize / mtd->writesize;
+
+	err = scan_for_bad_eraseblocks();
+	if (err)
+		goto out;
+
+	err = erase_whole_device();
+	if (err)
+		goto out;
+
+	printk(PRINT_PREF "writing whole device\n");
+	simple_srand(1);
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = write_eraseblock(i);
+		if (unlikely(err))
+			goto out;
+		if (i % 256 == 0)
+			printk(PRINT_PREF "written up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "written %u eraseblocks\n", i);
+
+	simple_srand(1);
+	printk(PRINT_PREF "verifying all eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = verify_eraseblock(i);
+		if (unlikely(err))
+			goto out;
+		if (i % 256 == 0)
+			printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "verified %u eraseblocks\n", i);
+
+	err = erase_whole_device();
+	if (err)
+		goto out;
+
+	err = verify_all_eraseblocks_ff();
+	if (err)
+		goto out;
+
+	/* Write all eraseblocks */
+	simple_srand(3);
+	printk(PRINT_PREF "writing whole device\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = write_eraseblock2(i);
+		if (unlikely(err))
+			goto out;
+		if (i % 256 == 0)
+			printk(PRINT_PREF "written up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "written %u eraseblocks\n", i);
+
+	/* Check all eraseblocks */
+	simple_srand(3);
+	printk(PRINT_PREF "verifying all eraseblocks\n");
+	for (i = 0; i < ebcnt; ++i) {
+		if (bbt[i])
+			continue;
+		err = verify_eraseblock2(i);
+		if (unlikely(err))
+			goto out;
+		if (i % 256 == 0)
+			printk(PRINT_PREF "verified up to eraseblock %u\n", i);
+		cond_resched();
+	}
+	printk(PRINT_PREF "verified %u eraseblocks\n", i);
+
+	err = erase_whole_device();
+	if (err)
+		goto out;
+
+	err = verify_all_eraseblocks_ff();
+	if (err)
+		goto out;
+
+	printk(PRINT_PREF "finished with %d errors\n", errcnt);
+
+out:
+	kfree(bbt);
+	kfree(readbuf);
+	kfree(writebuf);
+	put_mtd_device(mtd);
+	if (err)
+		printk(PRINT_PREF "error %d occurred\n", err);
+	printk(KERN_INFO "=================================================\n");
+	return err;
+}
+module_init(mtd_subpagetest_init);
+
+static void __exit mtd_subpagetest_exit(void)
+{
+	return;
+}
+module_exit(mtd_subpagetest_exit);
+
+MODULE_DESCRIPTION("Subpage test module");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
new file mode 100644
index 0000000..631a0ab
--- /dev/null
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -0,0 +1,530 @@
+/*
+ * Copyright (C) 2006-2008 Artem Bityutskiy
+ * Copyright (C) 2006-2008 Jarkko Lavinen
+ * Copyright (C) 2006-2008 Adrian Hunter
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; see the file COPYING. If not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Authors: Artem Bityutskiy, Jarkko Lavinen, Adria Hunter
+ *
+ * WARNING: this test program may kill your flash and your device. Do not
+ * use it unless you know what you do. Authors are not responsible for any
+ * damage caused by this program.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+
+#define PRINT_PREF KERN_INFO "mtd_torturetest: "
+#define RETRIES 3
+
+static int eb = 8;
+module_param(eb, int, S_IRUGO);
+MODULE_PARM_DESC(eb, "eraseblock number within the selected MTD device");
+
+static int ebcnt = 32;
+module_param(ebcnt, int, S_IRUGO);
+MODULE_PARM_DESC(ebcnt, "number of consecutive eraseblocks to torture");
+
+static int pgcnt;
+module_param(pgcnt, int, S_IRUGO);
+MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)");
+
+static int dev;
+module_param(dev, int, S_IRUGO);
+MODULE_PARM_DESC(dev, "MTD device number to use");
+
+static int gran = 512;
+module_param(gran, int, S_IRUGO);
+MODULE_PARM_DESC(gran, "how often the status information should be printed");
+
+static int check = 1;
+module_param(check, int, S_IRUGO);
+MODULE_PARM_DESC(check, "if the written data should be checked");
+
+static unsigned int cycles_count;
+module_param(cycles_count, uint, S_IRUGO);
+MODULE_PARM_DESC(cycles_count, "how many erase cycles to do "
+			       "(infinite by default)");
+
+static struct mtd_info *mtd;
+
+/* This buffer contains 0x555555...0xAAAAAA... pattern */
+static unsigned char *patt_5A5;
+/* This buffer contains 0xAAAAAA...0x555555... pattern */
+static unsigned char *patt_A5A;
+/* This buffer contains all 0xFF bytes */
+static unsigned char *patt_FF;
+/* This a temporary buffer is use when checking data */
+static unsigned char *check_buf;
+/* How many erase cycles were done */
+static unsigned int erase_cycles;
+
+static int pgsize;
+static struct timeval start, finish;
+
+static void report_corrupt(unsigned char *read, unsigned char *written);
+
+static inline void start_timing(void)
+{
+	do_gettimeofday(&start);
+}
+
+static inline void stop_timing(void)
+{
+	do_gettimeofday(&finish);
+}
+
+/*
+ * Erase eraseblock number @ebnum.
+ */
+static inline int erase_eraseblock(int ebnum)
+{
+	int err;
+	struct erase_info ei;
+	loff_t addr = ebnum * mtd->erasesize;
+
+	memset(&ei, 0, sizeof(struct erase_info));
+	ei.mtd  = mtd;
+	ei.addr = addr;
+	ei.len  = mtd->erasesize;
+
+	err = mtd->erase(mtd, &ei);
+	if (err) {
+		printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
+		return err;
+	}
+
+	if (ei.state == MTD_ERASE_FAILED) {
+		printk(PRINT_PREF "some erase error occurred at EB %d\n",
+		       ebnum);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * Check that the contents of eraseblock number @enbum is equivalent to the
+ * @buf buffer.
+ */
+static inline int check_eraseblock(int ebnum, unsigned char *buf)
+{
+	int err, retries = 0;
+	size_t read = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+	size_t len = mtd->erasesize;
+
+	if (pgcnt) {
+		addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
+		len = pgcnt * pgsize;
+	}
+
+retry:
+	err = mtd->read(mtd, addr, len, &read, check_buf);
+	if (err == -EUCLEAN)
+		printk(PRINT_PREF "single bit flip occurred at EB %d "
+		       "MTD reported that it was fixed.\n", ebnum);
+	else if (err) {
+		printk(PRINT_PREF "error %d while reading EB %d, "
+		       "read %zd\n", err, ebnum, read);
+		return err;
+	}
+
+	if (read != len) {
+		printk(PRINT_PREF "failed to read %zd bytes from EB %d, "
+		       "read only %zd, but no error reported\n",
+		       len, ebnum, read);
+		return -EIO;
+	}
+
+	if (memcmp(buf, check_buf, len)) {
+		printk(PRINT_PREF "read wrong data from EB %d\n", ebnum);
+		report_corrupt(check_buf, buf);
+
+		if (retries++ < RETRIES) {
+			/* Try read again */
+			yield();
+			printk(PRINT_PREF "re-try reading data from EB %d\n",
+			       ebnum);
+			goto retry;
+		} else {
+			printk(PRINT_PREF "retried %d times, still errors, "
+			       "give-up\n", RETRIES);
+			return -EINVAL;
+		}
+	}
+
+	if (retries != 0)
+		printk(PRINT_PREF "only attempt number %d was OK (!!!)\n",
+		       retries);
+
+	return 0;
+}
+
+static inline int write_pattern(int ebnum, void *buf)
+{
+	int err;
+	size_t written = 0;
+	loff_t addr = ebnum * mtd->erasesize;
+	size_t len = mtd->erasesize;
+
+	if (pgcnt) {
+		addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
+		len = pgcnt * pgsize;
+	}
+	err = mtd->write(mtd, addr, len, &written, buf);
+	if (err) {
+		printk(PRINT_PREF "error %d while writing EB %d, written %zd"
+		      " bytes\n", err, ebnum, written);
+		return err;
+	}
+	if (written != len) {
+		printk(PRINT_PREF "written only %zd bytes of %zd, but no error"
+		       " reported\n", written, len);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int __init tort_init(void)
+{
+	int err = 0, i, infinite = !cycles_count;
+	int bad_ebs[ebcnt];
+
+	printk(KERN_INFO "\n");
+	printk(KERN_INFO "=================================================\n");
+	printk(PRINT_PREF "Warning: this program is trying to wear out your "
+	       "flash, stop it if this is not wanted.\n");
+	printk(PRINT_PREF "MTD device: %d\n", dev);
+	printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n",
+	       ebcnt, eb, eb + ebcnt - 1, dev);
+	if (pgcnt)
+		printk(PRINT_PREF "torturing just %d pages per eraseblock\n",
+			pgcnt);
+	printk(PRINT_PREF "write verify %s\n", check ? "enabled" : "disabled");
+
+	mtd = get_mtd_device(NULL, dev);
+	if (IS_ERR(mtd)) {
+		err = PTR_ERR(mtd);
+		printk(PRINT_PREF "error: cannot get MTD device\n");
+		return err;
+	}
+
+	if (mtd->writesize == 1) {
+		printk(PRINT_PREF "not NAND flash, assume page size is 512 "
+		       "bytes.\n");
+		pgsize = 512;
+	} else
+		pgsize = mtd->writesize;
+
+	if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) {
+		printk(PRINT_PREF "error: invalid pgcnt value %d\n", pgcnt);
+		goto out_mtd;
+	}
+
+	err = -ENOMEM;
+	patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
+	if (!patt_5A5) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out_mtd;
+	}
+
+	patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
+	if (!patt_A5A) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out_patt_5A5;
+	}
+
+	patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
+	if (!patt_FF) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out_patt_A5A;
+	}
+
+	check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
+	if (!check_buf) {
+		printk(PRINT_PREF "error: cannot allocate memory\n");
+		goto out_patt_FF;
+	}
+
+	err = 0;
+
+	/* Initialize patterns */
+	memset(patt_FF, 0xFF, mtd->erasesize);
+	for (i = 0; i < mtd->erasesize / pgsize; i++) {
+		if (!(i & 1)) {
+			memset(patt_5A5 + i * pgsize, 0x55, pgsize);
+			memset(patt_A5A + i * pgsize, 0xAA, pgsize);
+		} else {
+			memset(patt_5A5 + i * pgsize, 0xAA, pgsize);
+			memset(patt_A5A + i * pgsize, 0x55, pgsize);
+		}
+	}
+
+	/*
+	 * Check if there is a bad eraseblock among those we are going to test.
+	 */
+	memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
+	if (mtd->block_isbad) {
+		for (i = eb; i < eb + ebcnt; i++) {
+			err = mtd->block_isbad(mtd,
+					       (loff_t)i * mtd->erasesize);
+
+			if (err < 0) {
+				printk(PRINT_PREF "block_isbad() returned %d "
+				       "for EB %d\n", err, i);
+				goto out;
+			}
+
+			if (err) {
+				printk("EB %d is bad. Skip it.\n", i);
+				bad_ebs[i - eb] = 1;
+			}
+		}
+	}
+
+	start_timing();
+	while (1) {
+		int i;
+		void *patt;
+
+		/* Erase all eraseblocks */
+		for (i = eb; i < eb + ebcnt; i++) {
+			if (bad_ebs[i - eb])
+				continue;
+			err = erase_eraseblock(i);
+			if (err)
+				goto out;
+			cond_resched();
+		}
+
+		/* Check if the eraseblocks contain only 0xFF bytes */
+		if (check) {
+			for (i = eb; i < eb + ebcnt; i++) {
+				if (bad_ebs[i - eb])
+					continue;
+				err = check_eraseblock(i, patt_FF);
+				if (err) {
+					printk(PRINT_PREF "verify failed"
+					       " for 0xFF... pattern\n");
+					goto out;
+				}
+				cond_resched();
+			}
+		}
+
+		/* Write the pattern */
+		for (i = eb; i < eb + ebcnt; i++) {
+			if (bad_ebs[i - eb])
+				continue;
+			if ((eb + erase_cycles) & 1)
+				patt = patt_5A5;
+			else
+				patt = patt_A5A;
+			err = write_pattern(i, patt);
+			if (err)
+				goto out;
+			cond_resched();
+		}
+
+		/* Verify what we wrote */
+		if (check) {
+			for (i = eb; i < eb + ebcnt; i++) {
+				if (bad_ebs[i - eb])
+					continue;
+				if ((eb + erase_cycles) & 1)
+					patt = patt_5A5;
+				else
+					patt = patt_A5A;
+				err = check_eraseblock(i, patt);
+				if (err) {
+					printk(PRINT_PREF "verify failed for %s"
+					       " pattern\n",
+					       ((eb + erase_cycles) & 1) ?
+					       "0x55AA55..." : "0xAA55AA...");
+					goto out;
+				}
+				cond_resched();
+			}
+		}
+
+		erase_cycles += 1;
+
+		if (erase_cycles % gran == 0) {
+			long ms;
+
+			stop_timing();
+			ms = (finish.tv_sec - start.tv_sec) * 1000 +
+			     (finish.tv_usec - start.tv_usec) / 1000;
+			printk(PRINT_PREF "%08u erase cycles done, took %lu "
+			       "milliseconds (%lu seconds)\n",
+			       erase_cycles, ms, ms / 1000);
+			start_timing();
+		}
+
+		if (!infinite && --cycles_count == 0)
+			break;
+	}
+out:
+
+	printk(PRINT_PREF "finished after %u erase cycles\n",
+	       erase_cycles);
+	kfree(check_buf);
+out_patt_FF:
+	kfree(patt_FF);
+out_patt_A5A:
+	kfree(patt_A5A);
+out_patt_5A5:
+	kfree(patt_5A5);
+out_mtd:
+	put_mtd_device(mtd);
+	if (err)
+		printk(PRINT_PREF "error %d occurred during torturing\n", err);
+	printk(KERN_INFO "=================================================\n");
+	return err;
+}
+module_init(tort_init);
+
+static void __exit tort_exit(void)
+{
+	return;
+}
+module_exit(tort_exit);
+
+static int countdiffs(unsigned char *buf, unsigned char *check_buf,
+		      unsigned offset, unsigned len, unsigned *bytesp,
+		      unsigned *bitsp);
+static void print_bufs(unsigned char *read, unsigned char *written, int start,
+		       int len);
+
+/*
+ * Report the detailed information about how the read EB differs from what was
+ * written.
+ */
+static void report_corrupt(unsigned char *read, unsigned char *written)
+{
+	int i;
+	int bytes, bits, pages, first;
+	int offset, len;
+	size_t check_len = mtd->erasesize;
+
+	if (pgcnt)
+		check_len = pgcnt * pgsize;
+
+	bytes = bits = pages = 0;
+	for (i = 0; i < check_len; i += pgsize)
+		if (countdiffs(written, read, i, pgsize, &bytes,
+			       &bits) >= 0)
+			pages++;
+
+	printk(PRINT_PREF "verify fails on %d pages, %d bytes/%d bits\n",
+	       pages, bytes, bits);
+	printk(PRINT_PREF "The following is a list of all differences between"
+	       " what was read from flash and what was expected\n");
+
+	for (i = 0; i < check_len; i += pgsize) {
+		cond_resched();
+		bytes = bits = 0;
+		first = countdiffs(written, read, i, pgsize, &bytes,
+				   &bits);
+		if (first < 0)
+			continue;
+
+		printk("-------------------------------------------------------"
+		       "----------------------------------\n");
+
+		printk(PRINT_PREF "Page %zd has %d bytes/%d bits failing verify,"
+		       " starting at offset 0x%x\n",
+		       (mtd->erasesize - check_len + i) / pgsize,
+		       bytes, bits, first);
+
+		offset = first & ~0x7;
+		len = ((first + bytes) | 0x7) + 1 - offset;
+
+		print_bufs(read, written, offset, len);
+	}
+}
+
+static void print_bufs(unsigned char *read, unsigned char *written, int start,
+		       int len)
+{
+	int i = 0, j1, j2;
+	char *diff;
+
+	printk("Offset       Read                          Written\n");
+	while (i < len) {
+		printk("0x%08x: ", start + i);
+		diff = "   ";
+		for (j1 = 0; j1 < 8 && i + j1 < len; j1++) {
+			printk(" %02x", read[start + i + j1]);
+			if (read[start + i + j1] != written[start + i + j1])
+				diff = "***";
+		}
+
+		while (j1 < 8) {
+			printk(" ");
+			j1 += 1;
+		}
+
+		printk("  %s ", diff);
+
+		for (j2 = 0; j2 < 8 && i + j2 < len; j2++)
+			printk(" %02x", written[start + i + j2]);
+		printk("\n");
+		i += 8;
+	}
+}
+
+/*
+ * Count the number of differing bytes and bits and return the first differing
+ * offset.
+ */
+static int countdiffs(unsigned char *buf, unsigned char *check_buf,
+		      unsigned offset, unsigned len, unsigned *bytesp,
+		      unsigned *bitsp)
+{
+	unsigned i, bit;
+	int first = -1;
+
+	for (i = offset; i < offset + len; i++)
+		if (buf[i] != check_buf[i]) {
+			first = i;
+			break;
+		}
+
+	while (i < offset + len) {
+		if (buf[i] != check_buf[i]) {
+			(*bytesp)++;
+			bit = 1;
+			while (bit < 256) {
+				if ((buf[i] & bit) != (check_buf[i] & bit))
+					(*bitsp)++;
+				bit <<= 1;
+			}
+		}
+		i++;
+	}
+
+	return first;
+}
+
+MODULE_DESCRIPTION("Eraseblock torturing module");
+MODULE_AUTHOR("Artem Bityutskiy, Jarkko Lavinen, Adrian Hunter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 7caf22c..9082768 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -561,7 +561,7 @@
 	 */
 
 	ubi->peb_size   = ubi->mtd->erasesize;
-	ubi->peb_count  = ubi->mtd->size / ubi->mtd->erasesize;
+	ubi->peb_count  = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
 	ubi->flash_size = ubi->mtd->size;
 
 	if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 048a606..25def34 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -717,7 +717,7 @@
  * to the real data size, although the @buf buffer has to contain the
  * alignment. In all other cases, @len has to be aligned.
  *
- * It is prohibited to write more then once to logical eraseblocks of static
+ * It is prohibited to write more than once to logical eraseblocks of static
  * volumes. This function returns zero in case of success and a negative error
  * code in case of failure.
  */
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 605812b..6dd4f5e 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -215,7 +215,8 @@
 	struct ubi_volume *vol;
 	struct ubi_device *ubi;
 
-	dbg_gen("erase %u bytes at offset %u", instr->len, instr->addr);
+	dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len,
+		 (unsigned long long)instr->addr);
 
 	if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
 		return -EINVAL;
@@ -223,11 +224,11 @@
 	if (instr->len < 0 || instr->addr + instr->len > mtd->size)
 		return -EINVAL;
 
-	if (instr->addr % mtd->writesize || instr->len % mtd->writesize)
+	if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
 		return -EINVAL;
 
-	lnum = instr->addr / mtd->erasesize;
-	count = instr->len / mtd->erasesize;
+	lnum = mtd_div_by_eb(instr->addr, mtd);
+	count = mtd_div_by_eb(instr->len, mtd);
 
 	vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
 	ubi = vol->ubi;
@@ -255,7 +256,7 @@
 
 out_err:
 	instr->state = MTD_ERASE_FAILED;
-	instr->fail_addr = lnum * mtd->erasesize;
+	instr->fail_addr = (long long)lnum * mtd->erasesize;
 	return err;
 }
 
@@ -294,7 +295,7 @@
 	 * bytes.
 	 */
 	if (vol->vol_type == UBI_DYNAMIC_VOLUME)
-		mtd->size = vol->usable_leb_size * vol->reserved_pebs;
+		mtd->size = (long long)vol->usable_leb_size * vol->reserved_pebs;
 	else
 		mtd->size = vol->used_bytes;
 
@@ -304,8 +305,8 @@
 		return -ENFILE;
 	}
 
-	dbg_gen("added mtd%d (\"%s\"), size %u, EB size %u",
-		mtd->index, mtd->name, mtd->size, mtd->erasesize);
+	dbg_gen("added mtd%d (\"%s\"), size %llu, EB size %u",
+		mtd->index, mtd->name, (unsigned long long)mtd->size, mtd->erasesize);
 	return 0;
 }
 
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index a74118c..fe81039 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -465,7 +465,7 @@
  * This function synchronously erases physical eraseblock @pnum. If @torture
  * flag is not zero, the physical eraseblock is checked by means of writing
  * different patterns to it and reading them back. If the torturing is enabled,
- * the physical eraseblock is erased more then once.
+ * the physical eraseblock is erased more than once.
  *
  * This function returns the number of erasures made in case of success, %-EIO
  * if the erasure failed or the torturing test failed, and other negative error
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 5d9bcf1..4abbe57 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -564,7 +564,7 @@
  * @dtype: expected data type
  *
  * This function maps an un-mapped logical eraseblock @lnum to a physical
- * eraseblock. This means, that after a successfull invocation of this
+ * eraseblock. This means, that after a successful invocation of this
  * function the logical eraseblock @lnum will be empty (contain only %0xFF
  * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
  * happens.
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 41d47e1..ecde202 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -478,7 +478,7 @@
 			return 0;
 		} else {
 			/*
-			 * This logical eraseblock is older then the one found
+			 * This logical eraseblock is older than the one found
 			 * previously.
 			 */
 			if (cmp_res & 4)
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 2ad9404..8419fdc 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -135,7 +135,7 @@
  * The erase counter header takes 64 bytes and has a plenty of unused space for
  * future usage. The unused fields are zeroed. The @version field is used to
  * indicate the version of UBI implementation which is supposed to be able to
- * work with this UBI image. If @version is greater then the current UBI
+ * work with this UBI image. If @version is greater than the current UBI
  * version, the image is rejected. This may be useful in future if something
  * is changed radically. This field is duplicated in the volume identifier
  * header.
@@ -187,7 +187,7 @@
  * (sequence number) is used to distinguish between older and newer versions of
  * logical eraseblocks.
  *
- * There are 2 situations when there may be more then one physical eraseblock
+ * There are 2 situations when there may be more than one physical eraseblock
  * corresponding to the same logical eraseblock, i.e., having the same @vol_id
  * and @lnum values in the volume identifier header. Suppose we have a logical
  * eraseblock L and it is mapped to the physical eraseblock P.
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 333c894..1afc61e 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -577,7 +577,7 @@
 		if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
 			/* Auto re-size flag may be set only for one volume */
 			if (ubi->autoresize_vol_id != -1) {
-				ubi_err("more then one auto-resize volume (%d "
+				ubi_err("more than one auto-resize volume (%d "
 					"and %d)", ubi->autoresize_vol_id, i);
 				kfree(vol);
 				return -EINVAL;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 14901cb..891534f 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -128,7 +128,7 @@
  * situation when the picked physical eraseblock is constantly erased after the
  * data is written to it. So, we have a constant which limits the highest erase
  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
- * does not pick eraseblocks with erase counter greater then the lowest erase
+ * does not pick eraseblocks with erase counter greater than the lowest erase
  * counter plus %WL_FREE_MAX_DIFF.
  */
 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
@@ -917,7 +917,7 @@
 		/*
 		 * We schedule wear-leveling only if the difference between the
 		 * lowest erase counter of used physical eraseblocks and a high
-		 * erase counter of free physical eraseblocks is greater then
+		 * erase counter of free physical eraseblocks is greater than
 		 * %UBI_WL_THRESHOLD.
 		 */
 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index c092c39..5b91a85 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -177,6 +177,7 @@
 	.ndo_get_stats		= eip_get_stats,
 	.ndo_set_multicast_list = eip_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller 	= eip_poll,
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 665e7fd..cdbbb62 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -3109,6 +3109,8 @@
 	struct vortex_private *vp = netdev_priv(dev);
 	void __iomem *ioaddr = vp->ioaddr;
 
+	device_set_wakeup_enable(vp->gendev, vp->enable_wol);
+
 	if (vp->enable_wol) {
 		/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
 		EL3WINDOW(7);
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index dd7ac82..4e19ae3 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1821,6 +1821,7 @@
 	.ndo_open		= cp_open,
 	.ndo_stop		= cp_close,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_set_multicast_list	= cp_set_rx_mode,
 	.ndo_get_stats		= cp_get_stats,
 	.ndo_do_ioctl		= cp_ioctl,
@@ -1832,6 +1833,7 @@
 #ifdef BROKEN
 	.ndo_change_mtu		= cp_change_mtu,
 #endif
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= cp_poll_controller,
 #endif
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index fe370f8..a5b2420 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -917,6 +917,7 @@
 	.ndo_stop		= rtl8139_close,
 	.ndo_get_stats		= rtl8139_get_stats,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_start_xmit		= rtl8139_start_xmit,
 	.ndo_set_multicast_list	= rtl8139_set_rx_mode,
 	.ndo_do_ioctl		= netdev_ioctl,
@@ -924,7 +925,6 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= rtl8139_poll_controller,
 #endif
-
 };
 
 static int __devinit rtl8139_init_one (struct pci_dev *pdev,
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index fbe609a..ec3e22e 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -63,6 +63,7 @@
 	.ndo_get_stats		= ei_get_stats,
 	.ndo_set_multicast_list = ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ei_poll,
diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c
index ee70b35..da863c9 100644
--- a/drivers/net/8390p.c
+++ b/drivers/net/8390p.c
@@ -68,6 +68,7 @@
 	.ndo_get_stats		= eip_get_stats,
 	.ndo_set_multicast_list = eip_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= eip_poll,
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9a18270..9fe8cb7 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -830,7 +830,7 @@
 
 config BFIN_MAC
 	tristate "Blackfin on-chip MAC support"
-	depends on NET_ETHERNET && (BF526 || BF527 || BF536 || BF537)
+	depends on NET_ETHERNET && (BF516 || BF518 || BF526 || BF527 || BF536 || BF537)
 	select CRC32
 	select MII
 	select PHYLIB
@@ -1600,7 +1600,7 @@
 	  old RX-reset behavior.  If unsure, say N.
 
 config R6040
-	tristate "RDC R6040 Fast Ethernet Adapter support (EXPERIMENTAL)"
+	tristate "RDC R6040 Fast Ethernet Adapter support"
 	depends on NET_PCI && PCI
 	select CRC32
 	select MII
@@ -2614,6 +2614,8 @@
 
 source "drivers/net/wireless/Kconfig"
 
+source "drivers/net/wimax/Kconfig"
+
 source "drivers/net/usb/Kconfig"
 
 source "drivers/net/pcmcia/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e5c34b4..a3c5c00 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -263,3 +263,4 @@
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_SFC) += sfc/
 
+obj-$(CONFIG_WIMAX) += wimax/
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 5b396ff..9589d620 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -460,6 +460,7 @@
 	.ndo_get_stats		= ace_get_stats,
 	.ndo_start_xmit		= ace_start_xmit,
 	.ndo_set_multicast_list	= ace_set_multicast_list,
+	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_mac_address	= ace_set_mac_addr,
 	.ndo_change_mtu		= ace_change_mtu,
 #if ACENIC_DO_VLAN
diff --git a/drivers/net/acenic_firmware.h b/drivers/net/acenic_firmware.h
deleted file mode 100644
index fd41f78..0000000
--- a/drivers/net/acenic_firmware.h
+++ /dev/null
@@ -1,9456 +0,0 @@
-/*
- * Declare these here even if Tigon I support is disabled to avoid
- * the compiler complaining about undefined symbols.
- */
-#define tigonFwReleaseMajor 0xc
-#define tigonFwReleaseMinor 0x4
-#define tigonFwReleaseFix 0xb
-#define tigonFwStartAddr 0x00004000
-#define tigonFwTextAddr 0x00004000
-#define tigonFwTextLen 0x11140
-#define tigonFwRodataAddr 0x00015140
-#define tigonFwRodataLen 0xac0
-#define tigonFwDataAddr 0x00015c20
-#define tigonFwDataLen 0x170
-#define tigonFwSbssAddr 0x00015d90
-#define tigonFwSbssLen 0x38
-#define tigonFwBssAddr 0x00015dd0
-#define tigonFwBssLen 0x2080
-#ifdef CONFIG_ACENIC_OMIT_TIGON_I
-#define tigonFwText NULL
-#define tigonFwData NULL
-#define tigonFwRodata NULL
-#else
-/* Generated by genfw.c */
-static u32 tigonFwText[(MAX_TEXT_LEN/4) + 1] __devinitdata = {
-0x10000003,
-0x0, 0xd, 0xd, 0x3c1d0001,
-0x8fbd5c54, 0x3a0f021, 0x3c100000, 0x26104000,
-0xc00100c, 0x0, 0xd, 0x27bdffd8,
-0x3c1cc000, 0x3c1b0013, 0x377bd800, 0xd021,
-0x3c170013, 0x36f75418, 0x2e02021, 0x340583e8,
-0xafbf0024, 0xc002488, 0xafb00020, 0xc0023e8,
-0x0, 0x3c040001, 0x248451a4, 0x24050001,
-0x2e03021, 0x3821, 0x3c100001, 0x26107e50,
-0xafb00010, 0xc002403, 0xafbb0014, 0x3c02000f,
-0x3442ffff, 0x2021024, 0x362102b, 0x10400009,
-0x24050003, 0x3c040001, 0x248451b0, 0x2003021,
-0x3603821, 0x3c020010, 0xafa20010, 0xc002403,
-0xafa00014, 0x2021, 0x3405c000, 0x3c010001,
-0x370821, 0xa02083b0, 0x3c010001, 0x370821,
-0xa02083b2, 0x3c010001, 0x370821, 0xa02083b3,
-0x3c010001, 0x370821, 0xac2083b4, 0xa2e004d8,
-0x418c0, 0x24840001, 0x771021, 0xac40727c,
-0x771021, 0xac407280, 0x2e31021, 0xa445727c,
-0x2c820020, 0x1440fff7, 0x418c0, 0x2021,
-0x3405c000, 0x418c0, 0x24840001, 0x771021,
-0xac40737c, 0x771021, 0xac407380, 0x2e31021,
-0xa445737c, 0x2c820080, 0x5440fff7, 0x418c0,
-0xaf800054, 0xaf80011c, 0x8f820044, 0x34420040,
-0xaf820044, 0x8f820044, 0x34420020, 0xaf820044,
-0x8f420218, 0x30420002, 0x10400009, 0x0,
-0x8f420220, 0x3c030002, 0x34630004, 0x431025,
-0xaee204c4, 0x8f42021c, 0x8001074, 0x34420004,
-0x8f420220, 0x3c030002, 0x34630006, 0x431025,
-0xaee204c4, 0x8f42021c, 0x34420006, 0xaee204cc,
-0x8f420218, 0x30420010, 0x1040000a, 0x0,
-0x8f42021c, 0x34420004, 0xaee204c8, 0x8f420220,
-0x3c03000a, 0x34630004, 0x431025, 0x800108a,
-0xaee204c0, 0x8f420220, 0x3c03000a, 0x34630006,
-0x431025, 0xaee204c0, 0x8f42021c, 0x34420006,
-0xaee204c8, 0x8f420218, 0x30420200, 0x10400003,
-0x24020001, 0x8001091, 0xa2e27248, 0xa2e07248,
-0x24020001, 0xaf8200a0, 0xaf8200b0, 0x8f830054,
-0x8f820054, 0x8001099, 0x24630064, 0x8f820054,
-0x621023, 0x2c420065, 0x1440fffc, 0x0,
-0xaf800044, 0x8f420208, 0x8f43020c, 0xaee20010,
-0xaee30014, 0x8ee40010, 0x8ee50014, 0x26e20030,
-0xaee20028, 0x24020490, 0xaee20018, 0xaf840090,
-0xaf850094, 0x8ee20028, 0xaf8200b4, 0x96e2001a,
-0xaf82009c, 0x8f8200b0, 0x8ee304cc, 0x431025,
-0xaf8200b0, 0x8f8200b0, 0x30420004, 0x1440fffd,
-0x0, 0x8ee20450, 0x8ee30454, 0xaee304fc,
-0x8ee204fc, 0x2442e000, 0x2c422001, 0x1440000d,
-0x26e40030, 0x8ee20450, 0x8ee30454, 0x3c040001,
-0x248451bc, 0x3c050001, 0xafa00010, 0xafa00014,
-0x8ee704fc, 0x34a5f000, 0xc002403, 0x603021,
-0x26e40030, 0xc002488, 0x24050400, 0x27440080,
-0xc002488, 0x24050080, 0x26e4777c, 0xc002488,
-0x24050400, 0x8f42025c, 0x26e40094, 0xaee20060,
-0x8f420260, 0x27450200, 0x24060008, 0xaee20068,
-0x24020006, 0xc00249a, 0xaee20064, 0x3c023b9a,
-0x3442ca00, 0x2021, 0x24030002, 0xaee30074,
-0xaee30070, 0xaee2006c, 0x240203e8, 0xaee20104,
-0x24020001, 0xaee30100, 0xaee2010c, 0x3c030001,
-0x641821, 0x90635c20, 0x2e41021, 0x24840001,
-0xa043009c, 0x2c82000f, 0x1440fff8, 0x0,
-0x8f820040, 0x2e41821, 0x24840001, 0x21702,
-0x24420030, 0xa062009c, 0x2e41021, 0xa040009c,
-0x96e2046a, 0x30420003, 0x14400009, 0x0,
-0x96e2047a, 0x30420003, 0x50400131, 0x3c030800,
-0x96e2046a, 0x30420003, 0x1040002a, 0x3c020700,
-0x96e2047a, 0x30420003, 0x10400026, 0x3c020700,
-0x96e3047a, 0x96e2046a, 0x14620022, 0x3c020700,
-0x8ee204c0, 0x24030001, 0xa2e34e20, 0x34420e00,
-0xaee204c0, 0x8f420218, 0x30420100, 0x10400005,
-0x0, 0x3c020001, 0x2442e168, 0x800111d,
-0x21100, 0x3c020001, 0x2442d35c, 0x21100,
-0x21182, 0x3c030800, 0x431025, 0x3c010001,
-0xac221238, 0x3c020001, 0x2442f680, 0x21100,
-0x21182, 0x3c030800, 0x431025, 0x3c010001,
-0xac221278, 0x8ee20000, 0x34424000, 0x8001238,
-0xaee20000, 0x34423000, 0xafa20018, 0x8ee20608,
-0x8f430228, 0x24420001, 0x304900ff, 0x512300e2,
-0xafa00010, 0x8ee20608, 0x210c0, 0x571021,
-0x8fa30018, 0x8fa4001c, 0xac43060c, 0xac440610,
-0x8f870120, 0x27623800, 0x24e80020, 0x102102b,
-0x50400001, 0x27683000, 0x8f820128, 0x11020004,
-0x0, 0x8f820124, 0x15020007, 0x1021,
-0x8ee201a4, 0x3021, 0x24420001, 0xaee201a4,
-0x80011a0, 0x8ee201a4, 0x8ee40608, 0x420c0,
-0x801821, 0x8ee40430, 0x8ee50434, 0xa32821,
-0xa3302b, 0x822021, 0x862021, 0xace40000,
-0xace50004, 0x8ee30608, 0x24020008, 0xa4e2000e,
-0x2402000d, 0xace20018, 0xace9001c, 0x318c0,
-0x2463060c, 0x2e31021, 0xace20008, 0x8ee204c4,
-0xace20010, 0xaf880120, 0x92e24e20, 0x14400037,
-0x24060001, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x8c830000, 0x24020007, 0x1462001f,
-0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
-0x24030040, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e34, 0x8ee54e30, 0x24420001, 0x10430007,
-0x0, 0x8ee24e34, 0x24420001, 0x10a20005,
-0x0, 0x800118a, 0x0, 0x14a00005,
-0x0, 0x8f820128, 0x24420020, 0xaf820128,
-0x8f820128, 0x8c820004, 0x2c420011, 0x50400013,
-0xac800000, 0x80011a0, 0x0, 0x8ee24e30,
-0x24030040, 0x24420001, 0x50430003, 0x1021,
-0x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x24020007,
-0xac820000, 0x24020001, 0xac820004, 0x54c0000c,
-0xaee90608, 0x3c040001, 0x248451c8, 0xafa00010,
-0xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
-0xc002403, 0x34a5f000, 0x8001223, 0x0,
-0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
-0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
-0x0, 0x8f820124, 0x14c20007, 0x0,
-0x8ee201a4, 0x3021, 0x24420001, 0xaee201a4,
-0x8001207, 0x8ee201a4, 0x8ee20608, 0xac62001c,
-0x8ee404a0, 0x8ee504a4, 0x2462001c, 0xac620008,
-0x24020008, 0xa462000e, 0x24020011, 0xac620018,
-0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
-0xaf860120, 0x92e24e20, 0x14400037, 0x24060001,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c830000, 0x24020012, 0x1462001f, 0x0,
-0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x24030040,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
-0x8ee54e30, 0x24420001, 0x10430007, 0x0,
-0x8ee24e34, 0x24420001, 0x10a20005, 0x0,
-0x80011f1, 0x0, 0x14a00005, 0x0,
-0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
-0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
-0x8001207, 0x0, 0x8ee24e30, 0x24030040,
-0x24420001, 0x50430003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x24020012, 0xac820000,
-0x24020001, 0xac820004, 0x14c0001b, 0x0,
-0x3c040001, 0x248451d0, 0xafa00010, 0xafa00014,
-0x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
-0x34a5f001, 0x8ee201b0, 0x24420001, 0xaee201b0,
-0x8001223, 0x8ee201b0, 0x3c040001, 0x248451dc,
-0xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
-0xc002403, 0x34a5f005, 0x8ee201ac, 0x24420001,
-0xaee201ac, 0x8ee201ac, 0x8ee20160, 0x3c040001,
-0x248451e8, 0x3405f001, 0x24420001, 0xaee20160,
-0x8ee20160, 0x3021, 0x3821, 0xafa00010,
-0xc002403, 0xafa00014, 0x8001238, 0x0,
-0x3c020001, 0x2442f5a8, 0x21100, 0x21182,
-0x431025, 0x3c010001, 0xac221278, 0x96e2045a,
-0x30420003, 0x10400025, 0x3c050fff, 0x8ee204c8,
-0x34a5ffff, 0x34420a00, 0xaee204c8, 0x8ee304c8,
-0x3c040001, 0x248451f4, 0x24020001, 0xa2e204ec,
-0xa2e204ed, 0x3c020002, 0x621825, 0x3c020001,
-0x2442a390, 0x451024, 0x21082, 0xaee304c8,
-0x3c030800, 0x431025, 0x3c010001, 0xac221220,
-0x3c020001, 0x2442add4, 0x451024, 0x21082,
-0x431025, 0x3c010001, 0xac221280, 0x96e6045a,
-0x3821, 0x24050011, 0xafa00010, 0xc002403,
-0xafa00014, 0x8001268, 0x0, 0x3c020001,
-0x2442a9d4, 0x21100, 0x21182, 0x3c030800,
-0x431025, 0x3c010001, 0xac221280, 0x96e2046a,
-0x30420010, 0x14400009, 0x0, 0x96e2047a,
-0x30420010, 0x10400112, 0x0, 0x96e2046a,
-0x30420010, 0x10400005, 0x3c020700, 0x96e2047a,
-0x30420010, 0x14400102, 0x3c020700, 0x34423000,
-0xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
-0x304900ff, 0x512300e2, 0xafa00010, 0x8ee20608,
-0x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
-0xac43060c, 0xac440610, 0x8f870120, 0x27623800,
-0x24e80020, 0x102102b, 0x50400001, 0x27683000,
-0x8f820128, 0x11020004, 0x0, 0x8f820124,
-0x15020007, 0x1021, 0x8ee201a4, 0x3021,
-0x24420001, 0xaee201a4, 0x80012ea, 0x8ee201a4,
-0x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
-0x8ee50434, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xace40000, 0xace50004, 0x8ee30608,
-0x24020008, 0xa4e2000e, 0x2402000d, 0xace20018,
-0xace9001c, 0x318c0, 0x2463060c, 0x2e31021,
-0xace20008, 0x8ee204c4, 0xace20010, 0xaf880120,
-0x92e24e20, 0x14400037, 0x24060001, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
-0x24020007, 0x1462001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
-0x24420001, 0x10430007, 0x0, 0x8ee24e34,
-0x24420001, 0x10a20005, 0x0, 0x80012d4,
-0x0, 0x14a00005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400013, 0xac800000, 0x80012ea,
-0x0, 0x8ee24e30, 0x24030040, 0x24420001,
-0x50430003, 0x1021, 0x8ee24e30, 0x24420001,
-0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x24020007, 0xac820000, 0x24020001,
-0xac820004, 0x54c0000c, 0xaee90608, 0x3c040001,
-0x248451c8, 0xafa00010, 0xafa00014, 0x8ee60608,
-0x8f470228, 0x3c050009, 0xc002403, 0x34a5f000,
-0x800136d, 0x0, 0x8f830120, 0x27623800,
-0x24660020, 0xc2102b, 0x50400001, 0x27663000,
-0x8f820128, 0x10c20004, 0x0, 0x8f820124,
-0x14c20007, 0x0, 0x8ee201a4, 0x3021,
-0x24420001, 0xaee201a4, 0x8001351, 0x8ee201a4,
-0x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
-0x2462001c, 0xac620008, 0x24020008, 0xa462000e,
-0x24020011, 0xac620018, 0xac640000, 0xac650004,
-0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
-0x14400037, 0x24060001, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x8c830000, 0x24020012,
-0x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
-0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
-0xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
-0x10430007, 0x0, 0x8ee24e34, 0x24420001,
-0x10a20005, 0x0, 0x800133b, 0x0,
-0x14a00005, 0x0, 0x8f820128, 0x24420020,
-0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
-0x50400013, 0xac800000, 0x8001351, 0x0,
-0x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x24020012, 0xac820000, 0x24020001, 0xac820004,
-0x14c0001b, 0x0, 0x3c040001, 0x248451d0,
-0xafa00010, 0xafa00014, 0x8ee60608, 0x8f470228,
-0x3c050009, 0xc002403, 0x34a5f001, 0x8ee201b0,
-0x24420001, 0xaee201b0, 0x800136d, 0x8ee201b0,
-0x3c040001, 0x248451dc, 0xafa00014, 0x8ee60608,
-0x8f470228, 0x3c050009, 0xc002403, 0x34a5f005,
-0x8ee201ac, 0x24420001, 0xaee201ac, 0x8ee201ac,
-0x8ee20160, 0x3c040001, 0x248451e8, 0x3405f002,
-0x24420001, 0xaee20160, 0x8ee20160, 0x3021,
-0x3821, 0xafa00010, 0xc002403, 0xafa00014,
-0x96e6047a, 0x96e7046a, 0x3c040001, 0x24845200,
-0x24050012, 0xafa00010, 0xc002403, 0xafa00014,
-0xc004500, 0x0, 0xc002318, 0x0,
-0x3c060001, 0x34c63800, 0xaee00608, 0xaf400228,
-0xaf40022c, 0x96e30458, 0x8ee40000, 0x3c0512d8,
-0x34a5c358, 0x27623800, 0xaee27258, 0x27623800,
-0xaee27260, 0x27623800, 0xaee27264, 0x3661021,
-0xaee27270, 0x2402ffff, 0xaee004d4, 0xaee004e0,
-0xaee004e4, 0xaee004f0, 0xa2e004f4, 0xaee00e0c,
-0xaee00e18, 0xaee00e10, 0xaee00e14, 0xaee00e1c,
-0xaee0724c, 0xaee05244, 0xaee05240, 0xaee0523c,
-0xaee07250, 0xaee07254, 0xaee0725c, 0xaee07268,
-0xaee004d0, 0x2463ffff, 0x852025, 0xaee304f8,
-0xaee40000, 0xaf800060, 0xaf820064, 0x3c020100,
-0xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
-0x304900ff, 0x512300e2, 0xafa00010, 0x8ee20608,
-0x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
-0xac43060c, 0xac440610, 0x8f870120, 0x27623800,
-0x24e80020, 0x102102b, 0x50400001, 0x27683000,
-0x8f820128, 0x11020004, 0x0, 0x8f820124,
-0x15020007, 0x1021, 0x8ee201a4, 0x3021,
-0x24420001, 0xaee201a4, 0x8001422, 0x8ee201a4,
-0x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
-0x8ee50434, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xace40000, 0xace50004, 0x8ee30608,
-0x24020008, 0xa4e2000e, 0x2402000d, 0xace20018,
-0xace9001c, 0x318c0, 0x2463060c, 0x2e31021,
-0xace20008, 0x8ee204c4, 0xace20010, 0xaf880120,
-0x92e24e20, 0x14400037, 0x24060001, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
-0x24020007, 0x1462001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
-0x24420001, 0x10430007, 0x0, 0x8ee24e34,
-0x24420001, 0x10a20005, 0x0, 0x800140c,
-0x0, 0x14a00005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400013, 0xac800000, 0x8001422,
-0x0, 0x8ee24e30, 0x24030040, 0x24420001,
-0x50430003, 0x1021, 0x8ee24e30, 0x24420001,
-0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x24020007, 0xac820000, 0x24020001,
-0xac820004, 0x54c0000c, 0xaee90608, 0x3c040001,
-0x248451c8, 0xafa00010, 0xafa00014, 0x8ee60608,
-0x8f470228, 0x3c050009, 0xc002403, 0x34a5f000,
-0x80014a5, 0x0, 0x8f830120, 0x27623800,
-0x24660020, 0xc2102b, 0x50400001, 0x27663000,
-0x8f820128, 0x10c20004, 0x0, 0x8f820124,
-0x14c20007, 0x0, 0x8ee201a4, 0x3021,
-0x24420001, 0xaee201a4, 0x8001489, 0x8ee201a4,
-0x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
-0x2462001c, 0xac620008, 0x24020008, 0xa462000e,
-0x24020011, 0xac620018, 0xac640000, 0xac650004,
-0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
-0x14400037, 0x24060001, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x8c830000, 0x24020012,
-0x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
-0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
-0xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
-0x10430007, 0x0, 0x8ee24e34, 0x24420001,
-0x10a20005, 0x0, 0x8001473, 0x0,
-0x14a00005, 0x0, 0x8f820128, 0x24420020,
-0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
-0x50400013, 0xac800000, 0x8001489, 0x0,
-0x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x24020012, 0xac820000, 0x24020001, 0xac820004,
-0x14c0001b, 0x0, 0x3c040001, 0x248451d0,
-0xafa00010, 0xafa00014, 0x8ee60608, 0x8f470228,
-0x3c050009, 0xc002403, 0x34a5f001, 0x8ee201b0,
-0x24420001, 0xaee201b0, 0x80014a5, 0x8ee201b0,
-0x3c040001, 0x248451dc, 0xafa00014, 0x8ee60608,
-0x8f470228, 0x3c050009, 0xc002403, 0x34a5f005,
-0x8ee201ac, 0x24420001, 0xaee201ac, 0x8ee201ac,
-0x8ee20154, 0x24420001, 0xaee20154, 0xc0014dc,
-0x8ee20154, 0x8f8200a0, 0x30420004, 0x1440fffd,
-0x0, 0x8f820040, 0x30420001, 0x14400008,
-0x0, 0x8f430104, 0x24020001, 0x10620004,
-0x0, 0x8f420264, 0x10400006, 0x0,
-0x8ee2017c, 0x24420001, 0xaee2017c, 0x80014c5,
-0x8ee2017c, 0x8f820044, 0x34420004, 0xaf820044,
-0x8ee20178, 0x24420001, 0xaee20178, 0x8ee20178,
-0x8f8200d8, 0x8f8300d4, 0x431023, 0xaee2726c,
-0x8ee2726c, 0x1c400003, 0x3c030001, 0x431021,
-0xaee2726c, 0xc004064, 0x0, 0xc004440,
-0xaf800228, 0x8fbf0024, 0x8fb00020, 0x3e00008,
-0x27bd0028, 0x3e00008, 0x0, 0x3e00008,
-0x0, 0x0, 0x0, 0x2402002c,
-0xaf820050, 0xaee07274, 0x8f420238, 0xaee27278,
-0x8f820054, 0x24420067, 0xaf820058, 0xaee07b88,
-0xaee07b8c, 0xaee07b84, 0x3c010001, 0x370821,
-0xac2083bc, 0x3c010001, 0x370821, 0x3e00008,
-0xa02083b9, 0x27bdffd8, 0xafbf0024, 0xafb00020,
-0x8f820054, 0x3c030001, 0x8c635cd8, 0x24420067,
-0x1060000d, 0xaf820058, 0x3c020001, 0x571021,
-0x904283b8, 0x10400005, 0x3c030200, 0x3c010001,
-0x370821, 0x8001503, 0xa02083b8, 0x8ee20000,
-0x431025, 0xaee20000, 0x8f420218, 0x30420100,
-0x104000c6, 0x0, 0x8f8200b0, 0x30420004,
-0x104000c2, 0x0, 0x3c030001, 0x771821,
-0x8c6383d0, 0x8f820104, 0x146200b4, 0x0,
-0x3c030001, 0x771821, 0x8c6383d4, 0x8f8200b4,
-0x146200ae, 0x0, 0x8f8200b0, 0x3c030080,
-0x431024, 0x1040000d, 0x0, 0x8f82011c,
-0x34420002, 0xaf82011c, 0x8f8200b0, 0x2403fffb,
-0x431024, 0xaf8200b0, 0x8f82011c, 0x2403fffd,
-0x431024, 0x80015cc, 0xaf82011c, 0x3c030001,
-0x771821, 0x8c6383d0, 0x8f820104, 0x14620082,
-0x0, 0x3c030001, 0x771821, 0x8c6383d4,
-0x8f8200b4, 0x1462007c, 0x0, 0x3c070001,
-0xf73821, 0x8ce783d0, 0x8f8200b0, 0x3c040001,
-0x24845270, 0xafa00014, 0xafa20010, 0x8f8600b0,
-0x3c050005, 0xc002403, 0x34a50900, 0x8f82011c,
-0x34420002, 0xaf82011c, 0x8f830104, 0x8f8200b0,
-0x34420001, 0xaf8200b0, 0xaf830104, 0x8f830120,
-0x27623800, 0x24660020, 0xc2102b, 0x50400001,
-0x27663000, 0x8f820128, 0x10c20004, 0x0,
-0x8f820124, 0x14c20006, 0x0, 0x8ee201a4,
-0x24420001, 0xaee201a4, 0x80015a0, 0x8ee201a4,
-0x8f440208, 0x8f45020c, 0x26e20030, 0xac620008,
-0x24020400, 0xa462000e, 0x2402000f, 0xac620018,
-0xac60001c, 0xac640000, 0xac650004, 0x8ee204c4,
-0xac620010, 0xaf860120, 0x92e24e20, 0x14400037,
-0x0, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x8c830000, 0x24020007, 0x1462001f,
-0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
-0x24030040, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e34, 0x8ee54e30, 0x24420001, 0x10430007,
-0x0, 0x8ee24e34, 0x24420001, 0x10a20005,
-0x0, 0x800158a, 0x0, 0x14a00005,
-0x0, 0x8f820128, 0x24420020, 0xaf820128,
-0x8f820128, 0x8c820004, 0x2c420011, 0x50400013,
-0xac800000, 0x80015a0, 0x0, 0x8ee24e30,
-0x24030040, 0x24420001, 0x50430003, 0x1021,
-0x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x24020007,
-0xac820000, 0x24020001, 0xac820004, 0x8f82011c,
-0x2403fffd, 0x431024, 0xaf82011c, 0x8ee201e4,
-0x3c070001, 0xf73821, 0x8ce783d0, 0x24420001,
-0xaee201e4, 0x8ee201e4, 0x3c040001, 0x2484527c,
-0x80015bd, 0xafa00010, 0x8f820104, 0x3c010001,
-0x370821, 0xac2283d0, 0x8f8200b4, 0x3c070001,
-0xf73821, 0x8ce783d0, 0x3c040001, 0x24845284,
-0x3c010001, 0x370821, 0xac2283d4, 0xafa00010,
-0xafa00014, 0x8f8600b0, 0x3c050005, 0xc002403,
-0x34a50900, 0x80015cc, 0x0, 0x8f820104,
-0x3c010001, 0x370821, 0xac2283d0, 0x8f8200b4,
-0x3c010001, 0x370821, 0xac2283d4, 0x8ee27274,
-0x92e304f4, 0x24420067, 0x14600006, 0xaee27274,
-0x8ee27274, 0x8f430234, 0x43102b, 0x1440007b,
-0x0, 0x8ee304e4, 0x8ee204f8, 0x14620004,
-0x0, 0x92e204f4, 0x50400074, 0xa2e004f4,
-0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
-0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
-0x0, 0x8f820124, 0x14c20007, 0x0,
-0x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
-0x8001637, 0x8ee201a4, 0x8ee204e4, 0xac62001c,
-0x8ee404b0, 0x8ee504b4, 0x2462001c, 0xac620008,
-0x24020008, 0xa462000e, 0x24020011, 0xac620018,
-0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
-0xaf860120, 0x92e24e20, 0x14400037, 0x24100001,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c830000, 0x24020012, 0x1462001f, 0x0,
-0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x24030040,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
-0x8ee54e30, 0x24420001, 0x10430007, 0x0,
-0x8ee24e34, 0x24420001, 0x10a20005, 0x0,
-0x8001621, 0x0, 0x14a00005, 0x0,
-0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
-0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
-0x8001637, 0x0, 0x8ee24e30, 0x24030040,
-0x24420001, 0x50430003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x24020012, 0xac820000,
-0x24020001, 0xac820004, 0x5600000b, 0x24100001,
-0x8ee204e4, 0x3c040001, 0x2484528c, 0xafa00014,
-0xafa20010, 0x8ee60608, 0x8f470228, 0x3c050009,
-0xc002403, 0x34a5f006, 0x16000003, 0x24020001,
-0x8001650, 0xa2e204f4, 0x8ee20170, 0x24420001,
-0xaee20170, 0x8ee20170, 0x8ee204e4, 0xa2e004f4,
-0xaee004f0, 0xaee07274, 0xaee204f8, 0x8ee20e1c,
-0x1040006d, 0x0, 0x8f830120, 0x27623800,
-0x24660020, 0xc2102b, 0x50400001, 0x27663000,
-0x8f820128, 0x10c20004, 0x0, 0x8f820124,
-0x14c20007, 0x0, 0x8ee201a4, 0x8021,
-0x24420001, 0xaee201a4, 0x80016ad, 0x8ee201a4,
-0x8ee2724c, 0xac62001c, 0x8ee404a8, 0x8ee504ac,
-0x2462001c, 0xac620008, 0x24020008, 0xa462000e,
-0x24020011, 0xac620018, 0xac640000, 0xac650004,
-0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
-0x14400037, 0x24100001, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x8c830000, 0x24020012,
-0x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
-0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
-0xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
-0x10430007, 0x0, 0x8ee24e34, 0x24420001,
-0x10a20005, 0x0, 0x8001697, 0x0,
-0x14a00005, 0x0, 0x8f820128, 0x24420020,
-0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
-0x50400013, 0xac800000, 0x80016ad, 0x0,
-0x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x24020012, 0xac820000, 0x24020001, 0xac820004,
-0x5600000b, 0x24100001, 0x8ee2724c, 0x3c040001,
-0x24845298, 0xafa00014, 0xafa20010, 0x8ee6724c,
-0x8f470280, 0x3c050009, 0xc002403, 0x34a5f008,
-0x56000001, 0xaee00e1c, 0x8ee20174, 0x24420001,
-0xaee20174, 0x8ee20174, 0x8ee24e24, 0x10400019,
-0x0, 0xaee04e24, 0x8f820040, 0x30420001,
-0x14400008, 0x0, 0x8f430104, 0x24020001,
-0x10620004, 0x0, 0x8f420264, 0x10400006,
-0x0, 0x8ee2017c, 0x24420001, 0xaee2017c,
-0x80016da, 0x8ee2017c, 0x8f820044, 0x34420004,
-0xaf820044, 0x8ee20178, 0x24420001, 0xaee20178,
-0x8ee20178, 0x8ee27278, 0x2442ff99, 0xaee27278,
-0x8ee27278, 0x1c4002ad, 0x0, 0x8f420238,
-0x104002aa, 0x0, 0x3c020001, 0x571021,
-0x904283e0, 0x144002a5, 0x0, 0x8f420080,
-0xaee2004c, 0x8f4200c0, 0xaee20048, 0x8f420084,
-0xaee20038, 0x8f420084, 0xaee20244, 0x8f420088,
-0xaee20248, 0x8f42008c, 0xaee2024c, 0x8f420090,
-0xaee20250, 0x8f420094, 0xaee20254, 0x8f420098,
-0xaee20258, 0x8f42009c, 0xaee2025c, 0x8f4200a0,
-0xaee20260, 0x8f4200a4, 0xaee20264, 0x8f4200a8,
-0xaee20268, 0x8f4200ac, 0xaee2026c, 0x8f4200b0,
-0xaee20270, 0x8f4200b4, 0xaee20274, 0x8f4200b8,
-0xaee20278, 0x8f4200bc, 0x24040001, 0xaee2027c,
-0xaee0003c, 0x41080, 0x571021, 0x8ee3003c,
-0x8c420244, 0x24840001, 0x621821, 0x2c82000f,
-0xaee3003c, 0x1440fff8, 0x41080, 0x8f4200cc,
-0xaee20050, 0x8f4200d0, 0xaee20054, 0x8f830120,
-0x27623800, 0x24660020, 0xc2102b, 0x50400001,
-0x27663000, 0x8f820128, 0x10c20004, 0x0,
-0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
-0x8021, 0x24420001, 0xaee201a4, 0x8001775,
-0x8ee201a4, 0x8f440208, 0x8f45020c, 0x26e20030,
-0xac620008, 0x24020400, 0xa462000e, 0x2402000f,
-0xac620018, 0xac60001c, 0xac640000, 0xac650004,
-0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
-0x14400037, 0x24100001, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x8c830000, 0x24020007,
-0x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
-0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
-0xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
-0x10430007, 0x0, 0x8ee24e34, 0x24420001,
-0x10a20005, 0x0, 0x800175f, 0x0,
-0x14a00005, 0x0, 0x8f820128, 0x24420020,
-0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
-0x50400013, 0xac800000, 0x8001775, 0x0,
-0x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x24020007, 0xac820000, 0x24020001, 0xac820004,
-0x12000212, 0x3c020400, 0xafa20018, 0x3c020001,
-0x571021, 0x904283b0, 0x1040010b, 0x0,
-0x8ee20608, 0x8f430228, 0x24420001, 0x304a00ff,
-0x514300fd, 0xafa00010, 0x8ee20608, 0x210c0,
-0x571021, 0x8fa30018, 0x8fa4001c, 0xac43060c,
-0xac440610, 0x8f830054, 0x8f820054, 0x24690032,
-0x1221023, 0x2c420033, 0x1040006a, 0x5821,
-0x24180008, 0x240f000d, 0x240d0007, 0x240c0040,
-0x240e0001, 0x8f870120, 0x27623800, 0x24e80020,
-0x102102b, 0x50400001, 0x27683000, 0x8f820128,
-0x11020004, 0x0, 0x8f820124, 0x15020007,
-0x1021, 0x8ee201a4, 0x8021, 0x24420001,
-0xaee201a4, 0x80017f3, 0x8ee201a4, 0x8ee40608,
-0x420c0, 0x801821, 0x8ee40430, 0x8ee50434,
-0xa32821, 0xa3302b, 0x822021, 0x862021,
-0xace40000, 0xace50004, 0x8ee20608, 0xa4f8000e,
-0xacef0018, 0xacea001c, 0x210c0, 0x2442060c,
-0x2e21021, 0xace20008, 0x8ee204c4, 0xace20010,
-0xaf880120, 0x92e24e20, 0x14400033, 0x24100001,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c820000, 0x144d001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
-0x24420001, 0x104c0007, 0x0, 0x8ee24e34,
-0x24420001, 0x10620005, 0x0, 0x80017e0,
-0x0, 0x14600005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400010, 0xac800000, 0x80017f3,
-0x0, 0x8ee24e30, 0x24420001, 0x504c0003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0xac8d0000, 0xac8e0004, 0x56000006, 0x240b0001,
-0x8f820054, 0x1221023, 0x2c420033, 0x1440ff9d,
-0x0, 0x316300ff, 0x24020001, 0x14620077,
-0x3c050009, 0xaeea0608, 0x8f830054, 0x8f820054,
-0x24690032, 0x1221023, 0x2c420033, 0x10400061,
-0x5821, 0x240d0008, 0x240c0011, 0x24080012,
-0x24070040, 0x240a0001, 0x8f830120, 0x27623800,
-0x24660020, 0xc2102b, 0x50400001, 0x27663000,
-0x8f820128, 0x10c20004, 0x0, 0x8f820124,
-0x14c20007, 0x0, 0x8ee201a4, 0x8021,
-0x24420001, 0xaee201a4, 0x800185f, 0x8ee201a4,
-0x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
-0x2462001c, 0xac620008, 0xa46d000e, 0xac6c0018,
-0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
-0xaf860120, 0x92e24e20, 0x14400033, 0x24100001,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c820000, 0x1448001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
-0x24420001, 0x10470007, 0x0, 0x8ee24e34,
-0x24420001, 0x10620005, 0x0, 0x800184c,
-0x0, 0x14600005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400010, 0xac800000, 0x800185f,
-0x0, 0x8ee24e30, 0x24420001, 0x50470003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0xac880000, 0xac8a0004, 0x56000006, 0x240b0001,
-0x8f820054, 0x1221023, 0x2c420033, 0x1440ffa6,
-0x0, 0x316300ff, 0x24020001, 0x14620003,
-0x3c050009, 0x800197c, 0x24100001, 0x3c040001,
-0x248452a4, 0xafa00010, 0xafa00014, 0x8f860120,
-0x8f870124, 0x800187b, 0x34a5f011, 0x3c040001,
-0x248452b0, 0xafa00010, 0xafa00014, 0x8f860120,
-0x8f870124, 0x34a5f010, 0xc002403, 0x8021,
-0x800197c, 0x0, 0x3c040001, 0x248452bc,
-0xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
-0x8001975, 0x34a5f00f, 0x8ee20608, 0x8f430228,
-0x24420001, 0x304900ff, 0x512300e2, 0xafa00010,
-0x8ee20608, 0x210c0, 0x571021, 0x8fa30018,
-0x8fa4001c, 0xac43060c, 0xac440610, 0x8f870120,
-0x27623800, 0x24e80020, 0x102102b, 0x50400001,
-0x27683000, 0x8f820128, 0x11020004, 0x0,
-0x8f820124, 0x15020007, 0x1021, 0x8ee201a4,
-0x8021, 0x24420001, 0xaee201a4, 0x80018f7,
-0x8ee201a4, 0x8ee40608, 0x420c0, 0x801821,
-0x8ee40430, 0x8ee50434, 0xa32821, 0xa3302b,
-0x822021, 0x862021, 0xace40000, 0xace50004,
-0x8ee30608, 0x24020008, 0xa4e2000e, 0x2402000d,
-0xace20018, 0xace9001c, 0x318c0, 0x2463060c,
-0x2e31021, 0xace20008, 0x8ee204c4, 0xace20010,
-0xaf880120, 0x92e24e20, 0x14400037, 0x24100001,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c830000, 0x24020007, 0x1462001f, 0x0,
-0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x24030040,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
-0x8ee54e30, 0x24420001, 0x10430007, 0x0,
-0x8ee24e34, 0x24420001, 0x10a20005, 0x0,
-0x80018e1, 0x0, 0x14a00005, 0x0,
-0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
-0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
-0x80018f7, 0x0, 0x8ee24e30, 0x24030040,
-0x24420001, 0x50430003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x24020007, 0xac820000,
-0x24020001, 0xac820004, 0x5600000c, 0xaee90608,
-0x3c040001, 0x248452c8, 0xafa00010, 0xafa00014,
-0x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
-0x34a5f000, 0x800197c, 0x0, 0x8f830120,
-0x27623800, 0x24660020, 0xc2102b, 0x50400001,
-0x27663000, 0x8f820128, 0x10c20004, 0x0,
-0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
-0x8021, 0x24420001, 0xaee201a4, 0x800195e,
-0x8ee201a4, 0x8ee20608, 0xac62001c, 0x8ee404a0,
-0x8ee504a4, 0x2462001c, 0xac620008, 0x24020008,
-0xa462000e, 0x24020011, 0xac620018, 0xac640000,
-0xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
-0x92e24e20, 0x14400037, 0x24100001, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
-0x24020012, 0x1462001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
-0x24420001, 0x10430007, 0x0, 0x8ee24e34,
-0x24420001, 0x10a20005, 0x0, 0x8001948,
-0x0, 0x14a00005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400013, 0xac800000, 0x800195e,
-0x0, 0x8ee24e30, 0x24030040, 0x24420001,
-0x50430003, 0x1021, 0x8ee24e30, 0x24420001,
-0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x24020012, 0xac820000, 0x24020001,
-0xac820004, 0x5600001d, 0x24100001, 0x3c040001,
-0x248452d0, 0xafa00010, 0xafa00014, 0x8ee60608,
-0x8f470228, 0x3c050009, 0xc002403, 0x34a5f001,
-0x8ee201b0, 0x24420001, 0xaee201b0, 0x800197c,
-0x8ee201b0, 0x3c040001, 0x248452dc, 0xafa00014,
-0x8ee60608, 0x8f470228, 0x3c050009, 0x34a5f005,
-0xc002403, 0x0, 0x8ee201ac, 0x8021,
-0x24420001, 0xaee201ac, 0x8ee201ac, 0x1200000c,
-0x24020001, 0x3c010001, 0x370821, 0xa02083b0,
-0x8f420238, 0x8ee30158, 0x24630001, 0xaee30158,
-0x8ee30158, 0x800198c, 0xaee27278, 0x24020001,
-0x3c010001, 0x370821, 0xa02283b0, 0x3c020001,
-0x8c425cd8, 0x10400187, 0x0, 0x8ee27b84,
-0x24430001, 0x284200c9, 0x144001a4, 0xaee37b84,
-0x8ee204d4, 0x30420002, 0x14400119, 0xaee07b84,
-0x8ee204d4, 0x3c030600, 0x34631000, 0x34420002,
-0xaee204d4, 0xafa30018, 0x8ee20608, 0x8f430228,
-0x24420001, 0x304a00ff, 0x514300fd, 0xafa00010,
-0x8ee20608, 0x210c0, 0x571021, 0x8fa30018,
-0x8fa4001c, 0xac43060c, 0xac440610, 0x8f830054,
-0x8f820054, 0x24690032, 0x1221023, 0x2c420033,
-0x1040006a, 0x5821, 0x24180008, 0x240f000d,
-0x240d0007, 0x240c0040, 0x240e0001, 0x8f870120,
-0x27623800, 0x24e80020, 0x102102b, 0x50400001,
-0x27683000, 0x8f820128, 0x11020004, 0x0,
-0x8f820124, 0x15020007, 0x1021, 0x8ee201a4,
-0x8021, 0x24420001, 0xaee201a4, 0x8001a15,
-0x8ee201a4, 0x8ee40608, 0x420c0, 0x801821,
-0x8ee40430, 0x8ee50434, 0xa32821, 0xa3302b,
-0x822021, 0x862021, 0xace40000, 0xace50004,
-0x8ee20608, 0xa4f8000e, 0xacef0018, 0xacea001c,
-0x210c0, 0x2442060c, 0x2e21021, 0xace20008,
-0x8ee204c4, 0xace20010, 0xaf880120, 0x92e24e20,
-0x14400033, 0x24100001, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x8c820000, 0x144d001f,
-0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
-0x0, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e34, 0x8ee34e30, 0x24420001, 0x104c0007,
-0x0, 0x8ee24e34, 0x24420001, 0x10620005,
-0x0, 0x8001a02, 0x0, 0x14600005,
-0x0, 0x8f820128, 0x24420020, 0xaf820128,
-0x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
-0xac800000, 0x8001a15, 0x0, 0x8ee24e30,
-0x24420001, 0x504c0003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0xac8d0000, 0xac8e0004,
-0x56000006, 0x240b0001, 0x8f820054, 0x1221023,
-0x2c420033, 0x1440ff9d, 0x0, 0x316300ff,
-0x24020001, 0x54620078, 0xafa00010, 0xaeea0608,
-0x8f830054, 0x8f820054, 0x24690032, 0x1221023,
-0x2c420033, 0x10400061, 0x5821, 0x240d0008,
-0x240c0011, 0x24080012, 0x24070040, 0x240a0001,
-0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
-0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
-0x0, 0x8f820124, 0x14c20007, 0x0,
-0x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
-0x8001a81, 0x8ee201a4, 0x8ee20608, 0xac62001c,
-0x8ee404a0, 0x8ee504a4, 0x2462001c, 0xac620008,
-0xa46d000e, 0xac6c0018, 0xac640000, 0xac650004,
-0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
-0x14400033, 0x24100001, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x8c820000, 0x1448001f,
-0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
-0x0, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e34, 0x8ee34e30, 0x24420001, 0x10470007,
-0x0, 0x8ee24e34, 0x24420001, 0x10620005,
-0x0, 0x8001a6e, 0x0, 0x14600005,
-0x0, 0x8f820128, 0x24420020, 0xaf820128,
-0x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
-0xac800000, 0x8001a81, 0x0, 0x8ee24e30,
-0x24420001, 0x50470003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0xac880000, 0xac8a0004,
-0x56000006, 0x240b0001, 0x8f820054, 0x1221023,
-0x2c420033, 0x1440ffa6, 0x0, 0x316300ff,
-0x24020001, 0x10620022, 0x0, 0x3c040001,
-0x248452a4, 0xafa00010, 0xafa00014, 0x8f860120,
-0x8f870124, 0x3c050009, 0xc002403, 0x34a5f011,
-0x8001aad, 0x0, 0x3c040001, 0x248452b0,
-0xafa00014, 0x8f860120, 0x8f870124, 0x3c050009,
-0xc002403, 0x34a5f010, 0x8001aad, 0x0,
-0x3c040001, 0x248452bc, 0xafa00014, 0x8ee60608,
-0x8f470228, 0x3c050009, 0xc002403, 0x34a5f00f,
-0x8ee201ac, 0x24420001, 0xaee201ac, 0x8ee201ac,
-0x8ee2015c, 0x24420001, 0xaee2015c, 0x8ee2015c,
-0x8ee204d4, 0x30420001, 0x10400055, 0x0,
-0x8f420218, 0x30420080, 0x10400029, 0x0,
-0x8f820044, 0x34420040, 0xaf820044, 0x8ee27b7c,
-0x402821, 0x8ee200c0, 0x8ee300c4, 0x24060000,
-0x2407ffff, 0x2021, 0x461024, 0x1444000d,
-0x671824, 0x1465000b, 0x0, 0x8ee27b80,
-0x402821, 0x8ee200e0, 0x8ee300e4, 0x2021,
-0x461024, 0x14440003, 0x671824, 0x1065000b,
-0x0, 0x8ee200c0, 0x8ee300c4, 0x8ee400e0,
-0x8ee500e4, 0xaee37b7c, 0xaee57b80, 0x8f820044,
-0x38420020, 0x8001b38, 0xaf820044, 0x8f820044,
-0x2403ffdf, 0x431024, 0x8001b38, 0xaf820044,
-0x8f820044, 0x2403ffdf, 0x431024, 0xaf820044,
-0x8ee27b7c, 0x402821, 0x8ee200c0, 0x8ee300c4,
-0x24060000, 0x2407ffff, 0x2021, 0x461024,
-0x1444000d, 0x671824, 0x1465000b, 0x0,
-0x8ee27b80, 0x402821, 0x8ee200e0, 0x8ee300e4,
-0x2021, 0x461024, 0x14440003, 0x671824,
-0x1065000b, 0x0, 0x8ee200c0, 0x8ee300c4,
-0x8ee400e0, 0x8ee500e4, 0xaee37b7c, 0xaee57b80,
-0x8f820044, 0x38420040, 0x8001b38, 0xaf820044,
-0x8f820044, 0x34420040, 0x8001b38, 0xaf820044,
-0x8f820044, 0x34420040, 0xaf820044, 0x8ee27b8c,
-0x24430001, 0x28420015, 0x14400028, 0xaee37b8c,
-0x8f820044, 0x38420020, 0xaf820044, 0x8001b38,
-0xaee07b8c, 0x8ee204d4, 0x30420001, 0x10400011,
-0x0, 0x8f420218, 0x30420080, 0x10400009,
-0x0, 0x8f820044, 0x34420020, 0xaf820044,
-0x8f820044, 0x2403ffbf, 0x431024, 0x8001b36,
-0xaf820044, 0x8f820044, 0x34420060, 0x8001b36,
-0xaf820044, 0x8f820044, 0x34420040, 0xaf820044,
-0x8ee27b88, 0x24430001, 0x28421389, 0x14400005,
-0xaee37b88, 0x8f820044, 0x38420020, 0xaf820044,
-0xaee07b88, 0xc004603, 0x0, 0x8fbf0024,
-0x8fb00020, 0x3e00008, 0x27bd0028, 0x27bdffb8,
-0xafbf0044, 0xafb60040, 0xafb5003c, 0xafb40038,
-0xafb30034, 0xafb20030, 0xafb1002c, 0xafb00028,
-0x8f960064, 0x32c20004, 0x1040000c, 0x24020004,
-0xaf820064, 0x8f420114, 0xaee204e0, 0x8f820060,
-0x34420008, 0xaf820060, 0x8ee2016c, 0x24420001,
-0xaee2016c, 0x80022f4, 0x8ee2016c, 0x32c20001,
-0x10400004, 0x24020001, 0xaf820064, 0x80022f4,
-0x0, 0x32c20002, 0x1440000c, 0x3c050003,
-0x3c040001, 0x24845354, 0x34a50001, 0x2c03021,
-0x3821, 0xafa00010, 0xc002403, 0xafa00014,
-0x2402fff8, 0x80022f4, 0xaf820064, 0x8f43022c,
-0x8f42010c, 0x5062000c, 0xafa00010, 0x8f42022c,
-0x21080, 0x5a1021, 0x8c420300, 0xafa20020,
-0x8f42022c, 0x24070001, 0x24420001, 0x3042003f,
-0x8001b80, 0xaf42022c, 0x3c040001, 0x24845360,
-0xafa00014, 0x8f46022c, 0x8f47010c, 0x3c050003,
-0xc002403, 0x34a5f01f, 0x3821, 0x14e00003,
-0x0, 0x80022ed, 0xaf960064, 0x93a20020,
-0x2443ffff, 0x2c620011, 0x10400658, 0x31080,
-0x3c010001, 0x220821, 0x8c225418, 0x400008,
-0x0, 0x8fa20020, 0x30420fff, 0xaee20e0c,
-0x8f820060, 0x34420200, 0xaf820060, 0x8ee20118,
-0x24420001, 0xaee20118, 0x80022e8, 0x8ee20118,
-0x8fa20020, 0x24030001, 0x3c010001, 0x370821,
-0xa02383b1, 0x30420fff, 0xaee25238, 0x8f820060,
-0x34420100, 0xaf820060, 0x8ee20144, 0x24420001,
-0xaee20144, 0x80022e8, 0x8ee20144, 0x8fa20020,
-0x21200, 0x22502, 0x24020001, 0x10820005,
-0x24020002, 0x10820009, 0x2402fffe, 0x8001bc9,
-0xafa00010, 0x8ee204d4, 0xaee40070, 0xaee40074,
-0x34420001, 0x8001bbd, 0xaee204d4, 0x8ee304d4,
-0xaee40070, 0xaee40074, 0x621824, 0xaee304d4,
-0x8f840054, 0x41442, 0x41c82, 0x431021,
-0x41cc2, 0x431023, 0x41d02, 0x431021,
-0x41d42, 0x431023, 0x8001bd0, 0xaee20078,
-0x3c040001, 0x2484536c, 0xafa00014, 0x8fa60020,
-0x3c050003, 0xc002403, 0x34a50004, 0x8ee20110,
-0x24420001, 0xaee20110, 0x80022e8, 0x8ee20110,
-0x27440212, 0xc0022fe, 0x24050006, 0x3049001f,
-0x920c0, 0x2e41021, 0x9442727c, 0x30424000,
-0x1040000a, 0x971021, 0x97430212, 0xa443727e,
-0x8f430214, 0x971021, 0xac437280, 0x2e41821,
-0x34028000, 0x8001c79, 0xa462727c, 0x9443727e,
-0x97420212, 0x14620006, 0x2e41021, 0x971021,
-0x8c437280, 0x8f420214, 0x1062009f, 0x2e41021,
-0x9442727c, 0x30428000, 0x1040002a, 0x2406ffff,
-0x2021, 0x410c0, 0x2e21021, 0x9442737c,
-0x30424000, 0x54400005, 0x803021, 0x24840001,
-0x2c820080, 0x1440fff8, 0x410c0, 0x4c10010,
-0x618c0, 0x610c0, 0x571821, 0x8c63737c,
-0x571021, 0xafa30010, 0x8c427380, 0x3c040001,
-0x24845378, 0xafa20014, 0x8f470214, 0x3c050003,
-0xc002403, 0x34a50013, 0x8001c90, 0x3c020800,
-0x97440212, 0x771021, 0xa444737e, 0x8f440214,
-0x771021, 0x2e31821, 0xac447380, 0x34028000,
-0xa462737c, 0x910c0, 0x2e21021, 0x8001c79,
-0xa446727c, 0x2e41021, 0x9445727c, 0x8001c2e,
-0x510c0, 0x9443737e, 0x97420212, 0x14620006,
-0x510c0, 0x971021, 0x8c437380, 0x8f420214,
-0x10620065, 0x510c0, 0x2e21021, 0x9445737c,
-0x510c0, 0x2e21021, 0x9442737c, 0x30428000,
-0x1040fff0, 0x971021, 0x520c0, 0x971021,
-0x9443737e, 0x97420212, 0x14620006, 0x2406ffff,
-0x971021, 0x8c437380, 0x8f420214, 0x10620053,
-0x3c020800, 0x2021, 0x410c0, 0x2e21021,
-0x9442737c, 0x30424000, 0x54400005, 0x803021,
-0x24840001, 0x2c820080, 0x1440fff8, 0x410c0,
-0x4c10023, 0x618c0, 0x910c0, 0x571821,
-0x8c63727c, 0x571021, 0xafa30010, 0x8c427280,
-0x3c040001, 0x24845384, 0xafa20014, 0x8f470214,
-0x3c050003, 0xc002403, 0x34a5f017, 0x8001c90,
-0x3c020800, 0x8f430210, 0xb71021, 0xac43777c,
-0x8f430214, 0xb71021, 0xac437780, 0x3c020001,
-0x571021, 0x8c4283b4, 0x24420001, 0x3c010001,
-0x370821, 0xac2283b4, 0x3c030001, 0x771821,
-0x8c6383b4, 0x2e51021, 0x8001c82, 0xa443777c,
-0x97440212, 0x771021, 0xa444737e, 0x8f440214,
-0x771021, 0x2e31821, 0xac447380, 0x34028000,
-0xa462737c, 0x510c0, 0x2e21021, 0xa446737c,
-0x2021, 0x428c0, 0x2e51021, 0x9442777c,
-0x1040ffdc, 0x24840001, 0x2c820080, 0x5440fffa,
-0x428c0, 0x92e204d8, 0x10400006, 0x24020001,
-0x8ee304dc, 0x1221004, 0x621825, 0x8001c8f,
-0xaee304dc, 0x8f830228, 0x24020001, 0x1221004,
-0x621825, 0xaf830228, 0x3c020800, 0x34421000,
-0xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
-0x304a00ff, 0x514300fd, 0xafa00010, 0x8ee20608,
-0x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
-0xac43060c, 0xac440610, 0x8f830054, 0x8f820054,
-0x24690032, 0x1221023, 0x2c420033, 0x1040006a,
-0x5821, 0x24100008, 0x240f000d, 0x240d0007,
-0x240c0040, 0x240e0001, 0x8f870120, 0x27623800,
-0x24e80020, 0x102102b, 0x50400001, 0x27683000,
-0x8f820128, 0x11020004, 0x0, 0x8f820124,
-0x15020007, 0x1021, 0x8ee201a4, 0x3821,
-0x24420001, 0xaee201a4, 0x8001d08, 0x8ee201a4,
-0x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
-0x8ee50434, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xace40000, 0xace50004, 0x8ee20608,
-0xa4f0000e, 0xacef0018, 0xacea001c, 0x210c0,
-0x2442060c, 0x2e21021, 0xace20008, 0x8ee204c4,
-0xace20010, 0xaf880120, 0x92e24e20, 0x14400033,
-0x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x8c820000, 0x144d001f, 0x0,
-0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
-0x8ee34e30, 0x24420001, 0x104c0007, 0x0,
-0x8ee24e34, 0x24420001, 0x10620005, 0x0,
-0x8001cf5, 0x0, 0x14600005, 0x0,
-0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
-0x8c820004, 0x2c420011, 0x50400010, 0xac800000,
-0x8001d08, 0x0, 0x8ee24e30, 0x24420001,
-0x504c0003, 0x1021, 0x8ee24e30, 0x24420001,
-0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0xac8d0000, 0xac8e0004, 0x54e00006,
-0x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
-0x1440ff9d, 0x0, 0x316300ff, 0x24020001,
-0x54620078, 0xafa00010, 0xaeea0608, 0x8f830054,
-0x8f820054, 0x24690032, 0x1221023, 0x2c420033,
-0x10400061, 0x5821, 0x240e0008, 0x240d0011,
-0x240a0012, 0x24080040, 0x240c0001, 0x8f830120,
-0x27623800, 0x24660020, 0xc2102b, 0x50400001,
-0x27663000, 0x8f820128, 0x10c20004, 0x0,
-0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
-0x3821, 0x24420001, 0xaee201a4, 0x8001d74,
-0x8ee201a4, 0x8ee20608, 0xac62001c, 0x8ee404a0,
-0x8ee504a4, 0x2462001c, 0xac620008, 0xa46e000e,
-0xac6d0018, 0xac640000, 0xac650004, 0x8ee204c4,
-0xac620010, 0xaf860120, 0x92e24e20, 0x14400033,
-0x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x8c820000, 0x144a001f, 0x0,
-0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
-0x8ee34e30, 0x24420001, 0x10480007, 0x0,
-0x8ee24e34, 0x24420001, 0x10620005, 0x0,
-0x8001d61, 0x0, 0x14600005, 0x0,
-0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
-0x8c820004, 0x2c420011, 0x50400010, 0xac800000,
-0x8001d74, 0x0, 0x8ee24e30, 0x24420001,
-0x50480003, 0x1021, 0x8ee24e30, 0x24420001,
-0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0xac8a0000, 0xac8c0004, 0x54e00006,
-0x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
-0x1440ffa6, 0x0, 0x316300ff, 0x24020001,
-0x10620022, 0x0, 0x3c040001, 0x24845390,
-0xafa00010, 0xafa00014, 0x8f860120, 0x8f870124,
-0x3c050009, 0xc002403, 0x34a5f011, 0x8001da0,
-0x0, 0x3c040001, 0x2484539c, 0xafa00014,
-0x8f860120, 0x8f870124, 0x3c050009, 0xc002403,
-0x34a5f010, 0x8001da0, 0x0, 0x3c040001,
-0x248453a8, 0xafa00014, 0x8ee60608, 0x8f470228,
-0x3c050009, 0xc002403, 0x34a5f00f, 0x8ee201ac,
-0x24420001, 0xaee201ac, 0x8ee201ac, 0x8ee20124,
-0x24420001, 0xaee20124, 0x8001f97, 0x8ee20124,
-0x27440212, 0xc0022fe, 0x24050006, 0x3049001f,
-0x928c0, 0x2e51021, 0x9442727c, 0x30428000,
-0x1040002f, 0x2e51021, 0x9442727c, 0x30424000,
-0x1440001c, 0xb71021, 0x9443727e, 0x97420212,
-0x14620018, 0xb71021, 0x8c437280, 0x8f420214,
-0x54620016, 0xafa20010, 0x92e204d8, 0x10400007,
-0x24020001, 0x8ee304dc, 0x1221004, 0x21027,
-0x621824, 0x8001dc9, 0xaee304dc, 0x8f830228,
-0x1221004, 0x21027, 0x621824, 0xaf830228,
-0x910c0, 0x2e21821, 0x3402c000, 0x8001e4e,
-0xa462727c, 0x8f420214, 0xafa20010, 0x910c0,
-0x571021, 0x8c42727c, 0x3c040001, 0x248453b4,
-0x3c050003, 0xafa20014, 0x8f470210, 0x34a5f01c,
-0xc002403, 0x1203021, 0x8001e83, 0x3c020800,
-0xb71021, 0x9443727e, 0x97420212, 0x14620019,
-0x918c0, 0xb71021, 0x8c437280, 0x8f420214,
-0x14620014, 0x918c0, 0x2e51021, 0x9447727c,
-0x720c0, 0x971021, 0x9443737e, 0xb71021,
-0xa443727e, 0x971021, 0x8c437380, 0xb71021,
-0xac437280, 0x2e41021, 0x9443737c, 0x2e51021,
-0xa443727c, 0x2e41821, 0x3402c000, 0x8001e4e,
-0xa462737c, 0x2e31021, 0x9447727c, 0x3021,
-0x720c0, 0x2e41021, 0x9442737c, 0x4021,
-0x30428000, 0x14400025, 0xe02821, 0x605021,
-0x340bc000, 0x971021, 0x9443737e, 0x97420212,
-0x54620015, 0xe02821, 0x971021, 0x8c437380,
-0x8f420214, 0x54620010, 0xe02821, 0x11000006,
-0x2e41021, 0x9443737c, 0x510c0, 0x2e21021,
-0x8001e1a, 0xa443737c, 0x9443737c, 0x2ea1021,
-0xa443727c, 0x710c0, 0x2e21021, 0xa44b737c,
-0x8001e28, 0x24060001, 0x510c0, 0x2e21021,
-0x9447737c, 0x720c0, 0x2e41021, 0x9442737c,
-0x30428000, 0x1040ffdf, 0x25080001, 0x30c200ff,
-0x14400025, 0x2021, 0x720c0, 0x971021,
-0x9443737e, 0x97420212, 0x1462000f, 0x910c0,
-0x971021, 0x8c437380, 0x8f420214, 0x1462000a,
-0x910c0, 0x2e41821, 0x3402c000, 0x15000015,
-0xa462737c, 0x910c0, 0x2e21821, 0x34028000,
-0x8001e4e, 0xa462727c, 0x571021, 0x8c42727c,
-0x3c040001, 0x248453c0, 0x3c050003, 0xafa20010,
-0x710c0, 0x571021, 0x8c42737c, 0x34a5001e,
-0x1203021, 0xc002403, 0xafa20014, 0x8001e83,
-0x3c020800, 0x2021, 0x428c0, 0xb71021,
-0x9443777e, 0x97420212, 0x5462002b, 0x24840001,
-0xb71021, 0x8c437780, 0x8f420214, 0x54620026,
-0x24840001, 0x3c020001, 0x571021, 0x8c4283b4,
-0x2442ffff, 0x3c010001, 0x370821, 0xac2283b4,
-0x3c020001, 0x571021, 0x8c4283b4, 0x809021,
-0x242102b, 0x1040000e, 0x24b1777c, 0x24b07784,
-0x2f02021, 0x2f12821, 0xc002490, 0x24060008,
-0x26310008, 0x3c020001, 0x571021, 0x8c4283b4,
-0x26520001, 0x242102b, 0x1440fff5, 0x26100008,
-0x3c040001, 0x972021, 0x8c8483b4, 0x24050008,
-0x420c0, 0x2484777c, 0xc002488, 0x2e42021,
-0x8001e83, 0x3c020800, 0x2c820080, 0x1440ffcf,
-0x428c0, 0x3c020800, 0x34422000, 0xafa20018,
-0x8ee20608, 0x8f430228, 0x24420001, 0x304a00ff,
-0x514300fd, 0xafa00010, 0x8ee20608, 0x210c0,
-0x571021, 0x8fa30018, 0x8fa4001c, 0xac43060c,
-0xac440610, 0x8f830054, 0x8f820054, 0x24690032,
-0x1221023, 0x2c420033, 0x1040006a, 0x5821,
-0x24100008, 0x240f000d, 0x240d0007, 0x240c0040,
-0x240e0001, 0x8f870120, 0x27623800, 0x24e80020,
-0x102102b, 0x50400001, 0x27683000, 0x8f820128,
-0x11020004, 0x0, 0x8f820124, 0x15020007,
-0x1021, 0x8ee201a4, 0x3821, 0x24420001,
-0xaee201a4, 0x8001efb, 0x8ee201a4, 0x8ee40608,
-0x420c0, 0x801821, 0x8ee40430, 0x8ee50434,
-0xa32821, 0xa3302b, 0x822021, 0x862021,
-0xace40000, 0xace50004, 0x8ee20608, 0xa4f0000e,
-0xacef0018, 0xacea001c, 0x210c0, 0x2442060c,
-0x2e21021, 0xace20008, 0x8ee204c4, 0xace20010,
-0xaf880120, 0x92e24e20, 0x14400033, 0x24070001,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c820000, 0x144d001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
-0x24420001, 0x104c0007, 0x0, 0x8ee24e34,
-0x24420001, 0x10620005, 0x0, 0x8001ee8,
-0x0, 0x14600005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400010, 0xac800000, 0x8001efb,
-0x0, 0x8ee24e30, 0x24420001, 0x504c0003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0xac8d0000, 0xac8e0004, 0x54e00006, 0x240b0001,
-0x8f820054, 0x1221023, 0x2c420033, 0x1440ff9d,
-0x0, 0x316300ff, 0x24020001, 0x54620078,
-0xafa00010, 0xaeea0608, 0x8f830054, 0x8f820054,
-0x24690032, 0x1221023, 0x2c420033, 0x10400061,
-0x5821, 0x240e0008, 0x240d0011, 0x240a0012,
-0x24080040, 0x240c0001, 0x8f830120, 0x27623800,
-0x24660020, 0xc2102b, 0x50400001, 0x27663000,
-0x8f820128, 0x10c20004, 0x0, 0x8f820124,
-0x14c20007, 0x0, 0x8ee201a4, 0x3821,
-0x24420001, 0xaee201a4, 0x8001f67, 0x8ee201a4,
-0x8ee20608, 0xac62001c, 0x8ee404a0, 0x8ee504a4,
-0x2462001c, 0xac620008, 0xa46e000e, 0xac6d0018,
-0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
-0xaf860120, 0x92e24e20, 0x14400033, 0x24070001,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c820000, 0x144a001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
-0x24420001, 0x10480007, 0x0, 0x8ee24e34,
-0x24420001, 0x10620005, 0x0, 0x8001f54,
-0x0, 0x14600005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400010, 0xac800000, 0x8001f67,
-0x0, 0x8ee24e30, 0x24420001, 0x50480003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0xac8a0000, 0xac8c0004, 0x54e00006, 0x240b0001,
-0x8f820054, 0x1221023, 0x2c420033, 0x1440ffa6,
-0x0, 0x316300ff, 0x24020001, 0x10620022,
-0x0, 0x3c040001, 0x24845390, 0xafa00010,
-0xafa00014, 0x8f860120, 0x8f870124, 0x3c050009,
-0xc002403, 0x34a5f011, 0x8001f93, 0x0,
-0x3c040001, 0x2484539c, 0xafa00014, 0x8f860120,
-0x8f870124, 0x3c050009, 0xc002403, 0x34a5f010,
-0x8001f93, 0x0, 0x3c040001, 0x248453a8,
-0xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
-0xc002403, 0x34a5f00f, 0x8ee201ac, 0x24420001,
-0xaee201ac, 0x8ee201ac, 0x8ee20128, 0x24420001,
-0xaee20128, 0x8ee20128, 0x8ee20164, 0x24420001,
-0xaee20164, 0x80022e8, 0x8ee20164, 0x8fa20020,
-0x21200, 0x21d02, 0x24020001, 0x10620005,
-0x24020002, 0x1062000d, 0x0, 0x8001fb7,
-0xafa00010, 0x92e204d8, 0x14400006, 0x24020001,
-0x8f820228, 0xaee204dc, 0x2402ffff, 0xaf820228,
-0x24020001, 0x8001fbe, 0xa2e204d8, 0x92e204d8,
-0x5040000c, 0xa2e004d8, 0x8ee204dc, 0xaf820228,
-0x8001fbe, 0xa2e004d8, 0x3c040001, 0x248453c8,
-0xafa00014, 0x8fa60020, 0x3c050003, 0xc002403,
-0x34a5f009, 0x8ee2013c, 0x24420001, 0xaee2013c,
-0x80022e8, 0x8ee2013c, 0x8fa20020, 0x21200,
-0x22502, 0x24020001, 0x10820005, 0x24020002,
-0x1082000f, 0x0, 0x8001fe3, 0xafa00010,
-0x8f820220, 0x3c0308ff, 0x3463ffff, 0x431024,
-0x34420008, 0xaf820220, 0x24020001, 0x3c010001,
-0x370821, 0xa02283b2, 0x8001fea, 0xaee40108,
-0x8f820220, 0x3c0308ff, 0x3463fff7, 0x431024,
-0xaf820220, 0x3c010001, 0x370821, 0xa02083b2,
-0x8001fea, 0xaee40108, 0x3c040001, 0x248453d4,
-0xafa00014, 0x8fa60020, 0x3c050003, 0xc002403,
-0x34a5f00a, 0x8ee2012c, 0x24420001, 0xaee2012c,
-0x80022e8, 0x8ee2012c, 0x8fa20020, 0x21200,
-0x21d02, 0x24020001, 0x10620005, 0x24020002,
-0x1062000e, 0x0, 0x8002011, 0xafa00010,
-0x8f820220, 0x3c0308ff, 0x3463ffff, 0x431024,
-0x34420008, 0xaf820220, 0x24020001, 0x3c010001,
-0x370821, 0x8002018, 0xa02283b3, 0x3c020001,
-0x571021, 0x904283b2, 0x3c010001, 0x370821,
-0x1440000e, 0xa02083b3, 0x8f820220, 0x3c0308ff,
-0x3463fff7, 0x431024, 0x8002018, 0xaf820220,
-0x3c040001, 0x248453e0, 0xafa00014, 0x8fa60020,
-0x3c050003, 0xc002403, 0x34a5f00b, 0x8ee20114,
-0x24420001, 0xaee20114, 0x80022e8, 0x8ee20114,
-0x27840208, 0x27450200, 0xc00249a, 0x24060008,
-0x26e40094, 0x27450200, 0xc00249a, 0x24060008,
-0x8ee20134, 0x24420001, 0xaee20134, 0x80022e8,
-0x8ee20134, 0x8f460248, 0x2021, 0xc005108,
-0x24050004, 0x8ee20130, 0x24420001, 0xaee20130,
-0x80022e8, 0x8ee20130, 0x8ef301cc, 0x8ef401d0,
-0x8ef501d8, 0x8ee20140, 0x26e40030, 0x24420001,
-0xaee20140, 0x8ef00140, 0x8ef10074, 0x8ef20070,
-0xc002488, 0x24050400, 0xaef301cc, 0xaef401d0,
-0xaef501d8, 0xaef00140, 0xaef10074, 0xaef20070,
-0x8f42025c, 0x26e40094, 0xaee20060, 0x8f420260,
-0x27450200, 0x24060008, 0xaee20068, 0x24020006,
-0xc00249a, 0xaee20064, 0x3c023b9a, 0x3442ca00,
-0xaee2006c, 0x240203e8, 0x24040002, 0x24030001,
-0xaee20104, 0xaee40100, 0xaee3010c, 0x8f820220,
-0x30420008, 0x10400004, 0x0, 0xaee30108,
-0x8002061, 0x2021, 0xaee40108, 0x2021,
-0x3c030001, 0x641821, 0x90635c30, 0x2e41021,
-0x24840001, 0xa043009c, 0x2c82000f, 0x1440fff8,
-0x0, 0x8f820040, 0x2e41821, 0x24840001,
-0x21702, 0x24420030, 0xa062009c, 0x2e41021,
-0x80022e8, 0xa040009c, 0x24020001, 0x3c010001,
-0x370821, 0xa02283e0, 0x240b0400, 0x24080014,
-0x240a0040, 0x24090001, 0x8f830100, 0x27623000,
-0x24660020, 0xc2102b, 0x50400001, 0x27662800,
-0x8f820108, 0x10c20004, 0x0, 0x8f820104,
-0x14c20007, 0x26e20030, 0x8ee201a8, 0x3821,
-0x24420001, 0xaee201a8, 0x80020a8, 0x8ee201a8,
-0x8ee404b8, 0x8ee504bc, 0xac620008, 0xa46b000e,
-0xac680018, 0xac60001c, 0xac640000, 0xac650004,
-0x8ee204cc, 0xac620010, 0xaf860100, 0x92e204ec,
-0x1440000e, 0x24070001, 0x8ee24e28, 0x24420001,
-0x504a0003, 0x1021, 0x8ee24e28, 0x24420001,
-0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
-0x2e21021, 0xac480000, 0xac490004, 0x10e0ffd2,
-0x0, 0x80022e8, 0x0, 0x3c020900,
-0xaee05238, 0xaee0523c, 0xaee05240, 0xaee05244,
-0xaee001d0, 0x3c010001, 0x370821, 0xa02083b1,
-0xafa20018, 0x8ee20608, 0x8f430228, 0x24420001,
-0x304a00ff, 0x514300fd, 0xafa00010, 0x8ee20608,
-0x210c0, 0x571021, 0x8fa30018, 0x8fa4001c,
-0xac43060c, 0xac440610, 0x8f830054, 0x8f820054,
-0x24690032, 0x1221023, 0x2c420033, 0x1040006a,
-0x5821, 0x24100008, 0x240f000d, 0x240d0007,
-0x240c0040, 0x240e0001, 0x8f870120, 0x27623800,
-0x24e80020, 0x102102b, 0x50400001, 0x27683000,
-0x8f820128, 0x11020004, 0x0, 0x8f820124,
-0x15020007, 0x1021, 0x8ee201a4, 0x3821,
-0x24420001, 0xaee201a4, 0x800212c, 0x8ee201a4,
-0x8ee40608, 0x420c0, 0x801821, 0x8ee40430,
-0x8ee50434, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xace40000, 0xace50004, 0x8ee20608,
-0xa4f0000e, 0xacef0018, 0xacea001c, 0x210c0,
-0x2442060c, 0x2e21021, 0xace20008, 0x8ee204c4,
-0xace20010, 0xaf880120, 0x92e24e20, 0x14400033,
-0x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x8c820000, 0x144d001f, 0x0,
-0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
-0x8ee34e30, 0x24420001, 0x104c0007, 0x0,
-0x8ee24e34, 0x24420001, 0x10620005, 0x0,
-0x8002119, 0x0, 0x14600005, 0x0,
-0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
-0x8c820004, 0x2c420011, 0x50400010, 0xac800000,
-0x800212c, 0x0, 0x8ee24e30, 0x24420001,
-0x504c0003, 0x1021, 0x8ee24e30, 0x24420001,
-0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0xac8d0000, 0xac8e0004, 0x54e00006,
-0x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
-0x1440ff9d, 0x0, 0x316300ff, 0x24020001,
-0x54620078, 0xafa00010, 0xaeea0608, 0x8f830054,
-0x8f820054, 0x24690032, 0x1221023, 0x2c420033,
-0x10400061, 0x5821, 0x240e0008, 0x240d0011,
-0x240a0012, 0x24080040, 0x240c0001, 0x8f830120,
-0x27623800, 0x24660020, 0xc2102b, 0x50400001,
-0x27663000, 0x8f820128, 0x10c20004, 0x0,
-0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
-0x3821, 0x24420001, 0xaee201a4, 0x8002198,
-0x8ee201a4, 0x8ee20608, 0xac62001c, 0x8ee404a0,
-0x8ee504a4, 0x2462001c, 0xac620008, 0xa46e000e,
-0xac6d0018, 0xac640000, 0xac650004, 0x8ee204c4,
-0xac620010, 0xaf860120, 0x92e24e20, 0x14400033,
-0x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x8c820000, 0x144a001f, 0x0,
-0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
-0x8ee34e30, 0x24420001, 0x10480007, 0x0,
-0x8ee24e34, 0x24420001, 0x10620005, 0x0,
-0x8002185, 0x0, 0x14600005, 0x0,
-0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
-0x8c820004, 0x2c420011, 0x50400010, 0xac800000,
-0x8002198, 0x0, 0x8ee24e30, 0x24420001,
-0x50480003, 0x1021, 0x8ee24e30, 0x24420001,
-0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0xac8a0000, 0xac8c0004, 0x54e00006,
-0x240b0001, 0x8f820054, 0x1221023, 0x2c420033,
-0x1440ffa6, 0x0, 0x316300ff, 0x24020001,
-0x10620022, 0x0, 0x3c040001, 0x24845390,
-0xafa00010, 0xafa00014, 0x8f860120, 0x8f870124,
-0x3c050009, 0xc002403, 0x34a5f011, 0x80021c4,
-0x0, 0x3c040001, 0x2484539c, 0xafa00014,
-0x8f860120, 0x8f870124, 0x3c050009, 0xc002403,
-0x34a5f010, 0x80021c4, 0x0, 0x3c040001,
-0x248453a8, 0xafa00014, 0x8ee60608, 0x8f470228,
-0x3c050009, 0xc002403, 0x34a5f00f, 0x8ee201ac,
-0x24420001, 0xaee201ac, 0x8ee201ac, 0x8ee20120,
-0x24420001, 0xaee20120, 0x8ee20120, 0x8ee20168,
-0x24420001, 0xaee20168, 0x80022e8, 0x8ee20168,
-0x8f42025c, 0x26e40094, 0xaee20060, 0x8f420260,
-0x27450200, 0x24060008, 0xc00249a, 0xaee20068,
-0x8f820220, 0x30420008, 0x14400002, 0x24020001,
-0x24020002, 0xaee20108, 0x8ee2011c, 0x24420001,
-0xaee2011c, 0x80022e8, 0x8ee2011c, 0x3c040001,
-0x248453ec, 0xafa00010, 0xafa00014, 0x8fa60020,
-0x3c050003, 0xc002403, 0x34a5f00f, 0x93a20020,
-0x3c030700, 0x34631000, 0x431025, 0xafa20018,
-0x8ee20608, 0x8f430228, 0x24420001, 0x304900ff,
-0x512300e2, 0xafa00010, 0x8ee20608, 0x210c0,
-0x571021, 0x8fa30018, 0x8fa4001c, 0xac43060c,
-0xac440610, 0x8f870120, 0x27623800, 0x24e80020,
-0x102102b, 0x50400001, 0x27683000, 0x8f820128,
-0x11020004, 0x0, 0x8f820124, 0x15020007,
-0x1021, 0x8ee201a4, 0x3821, 0x24420001,
-0xaee201a4, 0x800225d, 0x8ee201a4, 0x8ee40608,
-0x420c0, 0x801821, 0x8ee40430, 0x8ee50434,
-0xa32821, 0xa3302b, 0x822021, 0x862021,
-0xace40000, 0xace50004, 0x8ee30608, 0x24020008,
-0xa4e2000e, 0x2402000d, 0xace20018, 0xace9001c,
-0x318c0, 0x2463060c, 0x2e31021, 0xace20008,
-0x8ee204c4, 0xace20010, 0xaf880120, 0x92e24e20,
-0x14400037, 0x24070001, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x8c830000, 0x24020007,
-0x1462001f, 0x0, 0x8ee34e30, 0x8ee24e34,
-0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
-0xac820004, 0x8ee24e34, 0x8ee54e30, 0x24420001,
-0x10430007, 0x0, 0x8ee24e34, 0x24420001,
-0x10a20005, 0x0, 0x8002247, 0x0,
-0x14a00005, 0x0, 0x8f820128, 0x24420020,
-0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
-0x50400013, 0xac800000, 0x800225d, 0x0,
-0x8ee24e30, 0x24030040, 0x24420001, 0x50430003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x24020007, 0xac820000, 0x24020001, 0xac820004,
-0x54e0000c, 0xaee90608, 0x3c040001, 0x248453f4,
-0xafa00010, 0xafa00014, 0x8ee60608, 0x8f470228,
-0x3c050009, 0xc002403, 0x34a5f000, 0x80022e0,
-0x0, 0x8f830120, 0x27623800, 0x24660020,
-0xc2102b, 0x50400001, 0x27663000, 0x8f820128,
-0x10c20004, 0x0, 0x8f820124, 0x14c20007,
-0x0, 0x8ee201a4, 0x3821, 0x24420001,
-0xaee201a4, 0x80022c4, 0x8ee201a4, 0x8ee20608,
-0xac62001c, 0x8ee404a0, 0x8ee504a4, 0x2462001c,
-0xac620008, 0x24020008, 0xa462000e, 0x24020011,
-0xac620018, 0xac640000, 0xac650004, 0x8ee204c4,
-0xac620010, 0xaf860120, 0x92e24e20, 0x14400037,
-0x24070001, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x8c830000, 0x24020012, 0x1462001f,
-0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
-0x24030040, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e34, 0x8ee54e30, 0x24420001, 0x10430007,
-0x0, 0x8ee24e34, 0x24420001, 0x10a20005,
-0x0, 0x80022ae, 0x0, 0x14a00005,
-0x0, 0x8f820128, 0x24420020, 0xaf820128,
-0x8f820128, 0x8c820004, 0x2c420011, 0x50400013,
-0xac800000, 0x80022c4, 0x0, 0x8ee24e30,
-0x24030040, 0x24420001, 0x50430003, 0x1021,
-0x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x24020012,
-0xac820000, 0x24020001, 0xac820004, 0x14e0001b,
-0x0, 0x3c040001, 0x248453fc, 0xafa00010,
-0xafa00014, 0x8ee60608, 0x8f470228, 0x3c050009,
-0xc002403, 0x34a5f001, 0x8ee201b0, 0x24420001,
-0xaee201b0, 0x80022e0, 0x8ee201b0, 0x3c040001,
-0x24845408, 0xafa00014, 0x8ee60608, 0x8f470228,
-0x3c050009, 0xc002403, 0x34a5f005, 0x8ee201ac,
-0x24420001, 0xaee201ac, 0x8ee201ac, 0x8ee20150,
-0x24420001, 0xaee20150, 0x8ee20150, 0x8ee20160,
-0x24420001, 0xaee20160, 0x8ee20160, 0x8f43022c,
-0x8f42010c, 0x14620009, 0x24020002, 0xaf820064,
-0x8f820064, 0x14400005, 0x0, 0x8f43022c,
-0x8f42010c, 0x1462f875, 0x0, 0x8fbf0044,
-0x8fb60040, 0x8fb5003c, 0x8fb40038, 0x8fb30034,
-0x8fb20030, 0x8fb1002c, 0x8fb00028, 0x3e00008,
-0x27bd0048, 0x27bdfff8, 0x2408ffff, 0x10a00014,
-0x4821, 0x3c0aedb8, 0x354a8320, 0x90870000,
-0x24840001, 0x3021, 0x1071026, 0x30420001,
-0x10400002, 0x81842, 0x6a1826, 0x604021,
-0x24c60001, 0x2cc20008, 0x1440fff7, 0x73842,
-0x25290001, 0x125102b, 0x1440fff0, 0x0,
-0x1001021, 0x3e00008, 0x27bd0008, 0x27bdffe8,
-0x27642800, 0xafbf0010, 0xc002488, 0x24051000,
-0x24020021, 0xaf800100, 0xaf800104, 0xaf800108,
-0xaf800110, 0xaf800114, 0xaf800118, 0xaf800120,
-0xaf800124, 0xaf800128, 0xaf800130, 0xaf800134,
-0xaf800138, 0xaee04e28, 0xaee04e2c, 0xaee04e30,
-0xaee04e34, 0xaf82011c, 0x8f420218, 0x30420040,
-0x10400004, 0x0, 0x8f82011c, 0x34420004,
-0xaf82011c, 0x8fbf0010, 0x3e00008, 0x27bd0018,
-0x27bdffe0, 0xafbf0018, 0x8f820104, 0xafa20010,
-0x8f820100, 0x3c050002, 0xafa20014, 0x8f8600b0,
-0x8f87011c, 0x3c040001, 0x248454c0, 0xc002403,
-0x34a5f000, 0x8f8300b0, 0x3c027f00, 0x621824,
-0x3c020400, 0x10620029, 0x43102b, 0x14400008,
-0x3c022000, 0x3c020100, 0x10620024, 0x3c020200,
-0x10620011, 0x0, 0x8002374, 0x0,
-0x10620008, 0x3c024000, 0x1462001c, 0x0,
-0x8ee20190, 0x24420001, 0xaee20190, 0x8002374,
-0x8ee20190, 0x8ee2018c, 0x24420001, 0xaee2018c,
-0x8002374, 0x8ee2018c, 0x8f82011c, 0x34420002,
-0xaf82011c, 0x8f830104, 0x8f8200b0, 0x34420001,
-0xaf8200b0, 0xaf830104, 0x8f82011c, 0x2403fffd,
-0x431024, 0xaf82011c, 0x8ee201a0, 0x24420001,
-0xaee201a0, 0x8002377, 0x8ee201a0, 0x8f8200b0,
-0x34420001, 0xaf8200b0, 0x8fbf0018, 0x3e00008,
-0x27bd0020, 0x27bdffe0, 0xafbf001c, 0xafb00018,
-0x8f820120, 0xafa20010, 0x8f820124, 0x3c050001,
-0xafa20014, 0x8f8600a0, 0x8f87011c, 0x3c040001,
-0x248454cc, 0xc002403, 0x34a5f000, 0x8f8300a0,
-0x3c027f00, 0x621824, 0x3c020400, 0x10620053,
-0x8021, 0x43102b, 0x14400008, 0x3c042000,
-0x3c020100, 0x1062004d, 0x3c020200, 0x1062003a,
-0x0, 0x80023e0, 0x0, 0x10640003,
-0x3c024000, 0x14620045, 0x0, 0x8f8200a0,
-0x441024, 0x10400006, 0x0, 0x8ee20194,
-0x24420001, 0xaee20194, 0x80023a9, 0x8ee20194,
-0x8ee20198, 0x24420001, 0xaee20198, 0x8ee20198,
-0x8f82011c, 0x34420002, 0xaf82011c, 0x8f82011c,
-0x30420200, 0x1040001b, 0x0, 0x8f8300a0,
-0x8f840124, 0x8f8200ac, 0x14400007, 0x24020001,
-0x3c020001, 0x3442f000, 0x621024, 0x50400001,
-0x24100001, 0x24020001, 0x1200000d, 0xaf8200a0,
-0x8f820124, 0x2442ffe0, 0xaf820124, 0x8f820124,
-0x8f820124, 0x27633000, 0x43102b, 0x10400005,
-0x276237e0, 0xaf820124, 0x80023ca, 0x0,
-0xaf840124, 0x8f82011c, 0x2403fffd, 0x431024,
-0x80023e3, 0xaf82011c, 0x8f82011c, 0x34420002,
-0xaf82011c, 0x8f830124, 0x8f8200a0, 0x34420001,
-0xaf8200a0, 0xaf830124, 0x8f82011c, 0x2403fffd,
-0x431024, 0xaf82011c, 0x8ee2019c, 0x24420001,
-0xaee2019c, 0x80023e3, 0x8ee2019c, 0x8f8200a0,
-0x34420001, 0xaf8200a0, 0x8fbf001c, 0x8fb00018,
-0x3e00008, 0x27bd0020, 0x0, 0x3c020001,
-0x8c425c58, 0x27bdffe8, 0xafbf0014, 0x14400012,
-0xafb00010, 0x3c100001, 0x26105dd0, 0x2002021,
-0xc002488, 0x24052000, 0x26021fe0, 0x3c010001,
-0xac225d94, 0x3c010001, 0xac225d90, 0xaf420250,
-0x24022000, 0xaf500254, 0xaf420258, 0x24020001,
-0x3c010001, 0xac225c58, 0x8fbf0014, 0x8fb00010,
-0x3e00008, 0x27bd0018, 0x3c030001, 0x8c635d94,
-0x8c820000, 0x8fa80010, 0x8fa90014, 0xac620000,
-0x3c020001, 0x8c425d94, 0x8c830004, 0xac430004,
-0xac450008, 0x8f840054, 0x2443ffe0, 0xac460010,
-0xac470014, 0xac480018, 0xac49001c, 0x3c010001,
-0xac235d94, 0xac44000c, 0x3c020001, 0x24425dd0,
-0x62182b, 0x10600005, 0x0, 0x3c020001,
-0x8c425d90, 0x3c010001, 0xac225d94, 0x3c030001,
-0x8c635d94, 0x3c020001, 0x8c425c40, 0xac620000,
-0x3c030001, 0x8c635d94, 0x3c020001, 0x8c425c40,
-0xac620004, 0x3e00008, 0xaf430250, 0x3c030001,
-0x8c635d94, 0x3c020001, 0x8c425c40, 0x27bdffd0,
-0xafb40020, 0x8fb40040, 0xafb00010, 0x808021,
-0xafb50024, 0x8fb50044, 0x8fa40048, 0xafb10014,
-0xa08821, 0xafbf0028, 0xafb3001c, 0xafb20018,
-0xac620000, 0x3c050001, 0x8ca55d94, 0x3c020001,
-0x8c425c40, 0xc09021, 0xe09821, 0x10800006,
-0xaca20004, 0x24a50008, 0xc002490, 0x24060018,
-0x800244e, 0x0, 0x24a40008, 0xc002488,
-0x24050018, 0x3c020001, 0x8c425d94, 0x3c050001,
-0x24a55dd0, 0x2442ffe0, 0x3c010001, 0xac225d94,
-0x45102b, 0x10400005, 0x0, 0x3c020001,
-0x8c425d90, 0x3c010001, 0xac225d94, 0x3c030001,
-0x8c635d94, 0x8e020000, 0xac620000, 0x3c030001,
-0x8c635d94, 0x8e020004, 0xac620004, 0xac710008,
-0x8f840054, 0x2462ffe0, 0x3c010001, 0xac225d94,
-0x45102b, 0xac720010, 0xac730014, 0xac740018,
-0xac75001c, 0x10400005, 0xac64000c, 0x3c020001,
-0x8c425d90, 0x3c010001, 0xac225d94, 0x3c030001,
-0x8c635d94, 0x3c020001, 0x8c425c40, 0xac620000,
-0x3c030001, 0x8c635d94, 0x3c020001, 0x8c425c40,
-0xac620004, 0xaf430250, 0x8fbf0028, 0x8fb50024,
-0x8fb40020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
-0x8fb00010, 0x3e00008, 0x27bd0030, 0x10a00005,
-0x0, 0xac800000, 0x24a5fffc, 0x14a0fffd,
-0x24840004, 0x3e00008, 0x0, 0x10c00007,
-0x0, 0x8c820000, 0x24840004, 0x24c6fffc,
-0xaca20000, 0x14c0fffb, 0x24a50004, 0x3e00008,
-0x0, 0x10c00007, 0x0, 0x8ca20000,
-0x24a50004, 0x24c6fffc, 0xac820000, 0x14c0fffb,
-0x24840004, 0x3e00008, 0x0, 0x3e00008,
-0x0, 0x27bdffd8, 0xafbf0020, 0x8ee304e4,
-0x8ee204e0, 0x10620436, 0x0, 0x8ee204e4,
-0x8ee304fc, 0x21100, 0x626021, 0x95870008,
-0x8d8a0000, 0x8d8b0004, 0x958d000a, 0x8ee2725c,
-0x8ee3726c, 0x30e4ffff, 0x441021, 0x62182b,
-0x10600015, 0x31a20004, 0x8f8200d8, 0x8ee37258,
-0x431023, 0xaee2726c, 0x8ee2726c, 0x1c400003,
-0x3c030001, 0x431021, 0xaee2726c, 0x8ee2725c,
-0x8ee3726c, 0x441021, 0x62182b, 0x10600006,
-0x31a20004, 0x8ee201b8, 0x24420001, 0xaee201b8,
-0x80028e1, 0x8ee201b8, 0x10400240, 0x31a20200,
-0x1040014d, 0x4821, 0x96e2045a, 0x30420010,
-0x10400149, 0x0, 0x8f840100, 0x27623000,
-0x24850020, 0xa2102b, 0x50400001, 0x27652800,
-0x8f820108, 0x10a20004, 0x0, 0x8f820104,
-0x14a20006, 0x2402000c, 0x8ee201a8, 0x24420001,
-0xaee201a8, 0x800252c, 0x8ee201a8, 0xac8a0000,
-0xac8b0004, 0x8ee37264, 0x24060005, 0xa482000e,
-0xac860018, 0xac830008, 0x8ee204e4, 0xac82001c,
-0x8ee204c8, 0xac820010, 0xaf850100, 0x92e204ec,
-0x14400036, 0x24090001, 0x8ee24e28, 0x210c0,
-0x24424e38, 0x2e22021, 0x8c820000, 0x1446001f,
-0x0, 0x8ee34e28, 0x8ee24e2c, 0x1062001b,
-0x24030040, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e2c, 0x8ee54e28, 0x24420001, 0x10430007,
-0x0, 0x8ee24e2c, 0x24420001, 0x10a20005,
-0x0, 0x8002516, 0x0, 0x14a00005,
-0x0, 0x8f820108, 0x24420020, 0xaf820108,
-0x8f820108, 0x8c820004, 0x2c420011, 0x50400013,
-0xac800000, 0x800252c, 0x0, 0x8ee24e28,
-0x24030040, 0x24420001, 0x50430003, 0x1021,
-0x8ee24e28, 0x24420001, 0xaee24e28, 0x8ee24e28,
-0x210c0, 0x24424e38, 0x2e22021, 0x24020005,
-0xac820000, 0x24020001, 0xac820004, 0x1520000a,
-0x3c040001, 0xafab0010, 0x8ee27264, 0x3c040001,
-0x24845730, 0x3c050004, 0xafa20014, 0x8ee604e4,
-0x80028be, 0x34a5f114, 0x8ee27264, 0x34843800,
-0x3641821, 0x24420010, 0x43102b, 0x14400073,
-0x0, 0x8ee27264, 0x24480010, 0x3641021,
-0x102102b, 0x14400002, 0x3c02ffff, 0x1024021,
-0x8f850100, 0x27623000, 0x24a60020, 0xc2102b,
-0x50400001, 0x27662800, 0x8f820108, 0x10c20004,
-0x0, 0x8f820104, 0x14c20007, 0x2563000c,
-0x8ee201a8, 0x4821, 0x24420001, 0xaee201a8,
-0x80025a0, 0x8ee201a8, 0x2c64000c, 0x1441021,
-0xaca20000, 0xaca30004, 0x24e2fff4, 0xa4a2000e,
-0x24020006, 0xaca80008, 0xaca20018, 0x8ee204e4,
-0xaca2001c, 0x8ee204c8, 0x3c030002, 0x431025,
-0xaca20010, 0xaf860100, 0x92e204ec, 0x14400037,
-0x24090001, 0x8ee24e28, 0x210c0, 0x24424e38,
-0x2e22021, 0x8c830000, 0x24020005, 0x1462001f,
-0x0, 0x8ee34e28, 0x8ee24e2c, 0x1062001b,
-0x24030040, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e2c, 0x8ee54e28, 0x24420001, 0x10430007,
-0x0, 0x8ee24e2c, 0x24420001, 0x10a20005,
-0x0, 0x800258a, 0x0, 0x14a00005,
-0x0, 0x8f820108, 0x24420020, 0xaf820108,
-0x8f820108, 0x8c820004, 0x2c420011, 0x50400013,
-0xac800000, 0x80025a0, 0x0, 0x8ee24e28,
-0x24030040, 0x24420001, 0x50430003, 0x1021,
-0x8ee24e28, 0x24420001, 0xaee24e28, 0x8ee24e28,
-0x210c0, 0x24424e38, 0x2e22021, 0x24020005,
-0xac820000, 0x24020001, 0xac820004, 0x1520000a,
-0x2508fffc, 0xafab0010, 0x8ee27264, 0x3c040001,
-0x24845730, 0x3c050004, 0xafa20014, 0x8ee604e4,
-0x80028be, 0x34a5f125, 0x34028100, 0xa5020000,
-0x9582000e, 0x800261d, 0xa5020002, 0x8f850100,
-0x27623000, 0x24a60020, 0xc2102b, 0x50400001,
-0x27662800, 0x8f820108, 0x10c20004, 0x0,
-0x8f820104, 0x14c20007, 0x2563000c, 0x8ee201a8,
-0x4821, 0x24420001, 0xaee201a8, 0x800260d,
-0x8ee201a8, 0x2c64000c, 0x1441021, 0xaca20000,
-0xaca30004, 0x8ee37264, 0x24e2fff4, 0xa4a2000e,
-0x24020006, 0xaca20018, 0x24630010, 0xaca30008,
-0x8ee204e4, 0xaca2001c, 0x8ee204c8, 0x3c030002,
-0x431025, 0xaca20010, 0xaf860100, 0x92e204ec,
-0x14400037, 0x24090001, 0x8ee24e28, 0x210c0,
-0x24424e38, 0x2e22021, 0x8c830000, 0x24020005,
-0x1462001f, 0x0, 0x8ee34e28, 0x8ee24e2c,
-0x1062001b, 0x24030040, 0x8c820004, 0x24420001,
-0xac820004, 0x8ee24e2c, 0x8ee54e28, 0x24420001,
-0x10430007, 0x0, 0x8ee24e2c, 0x24420001,
-0x10a20005, 0x0, 0x80025f7, 0x0,
-0x14a00005, 0x0, 0x8f820108, 0x24420020,
-0xaf820108, 0x8f820108, 0x8c820004, 0x2c420011,
-0x50400013, 0xac800000, 0x800260d, 0x0,
-0x8ee24e28, 0x24030040, 0x24420001, 0x50430003,
-0x1021, 0x8ee24e28, 0x24420001, 0xaee24e28,
-0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
-0x24020005, 0xac820000, 0x24020001, 0xac820004,
-0x1520000a, 0x34028100, 0xafab0010, 0x8ee27264,
-0x3c040001, 0x24845730, 0x3c050004, 0xafa20014,
-0x8ee604e4, 0x80028be, 0x34a5f015, 0x8ee37264,
-0xa462000c, 0x8ee37264, 0x9582000e, 0xa462000e,
-0x8002681, 0x24e70004, 0x8f840100, 0x27623000,
-0x24850020, 0xa2102b, 0x50400001, 0x27652800,
-0x8f820108, 0x10a20004, 0x0, 0x8f820104,
-0x14a20007, 0x24020006, 0x8ee201a8, 0x4821,
-0x24420001, 0xaee201a8, 0x8002677, 0x8ee201a8,
-0xac8a0000, 0xac8b0004, 0x8ee37264, 0xa487000e,
-0xac820018, 0xac830008, 0x8ee204e4, 0xac82001c,
-0x8ee204c8, 0x3c030002, 0x431025, 0xac820010,
-0xaf850100, 0x92e204ec, 0x14400037, 0x24090001,
-0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
-0x8c830000, 0x24020005, 0x1462001f, 0x0,
-0x8ee34e28, 0x8ee24e2c, 0x1062001b, 0x24030040,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e2c,
-0x8ee54e28, 0x24420001, 0x10430007, 0x0,
-0x8ee24e2c, 0x24420001, 0x10a20005, 0x0,
-0x8002661, 0x0, 0x14a00005, 0x0,
-0x8f820108, 0x24420020, 0xaf820108, 0x8f820108,
-0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
-0x8002677, 0x0, 0x8ee24e28, 0x24030040,
-0x24420001, 0x50430003, 0x1021, 0x8ee24e28,
-0x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
-0x24424e38, 0x2e22021, 0x24020005, 0xac820000,
-0x24020001, 0xac820004, 0x15200009, 0x3c050004,
-0xafab0010, 0x8ee27264, 0x3c040001, 0x24845730,
-0xafa20014, 0x8ee604e4, 0x80028be, 0x34a5f004,
-0x8ee2725c, 0x30e7ffff, 0x471021, 0xaee2725c,
-0x8ee204e4, 0x8ee304fc, 0x8ee47258, 0x21100,
-0x431021, 0xac44000c, 0x8ee27258, 0xafa20018,
-0x8ee3725c, 0xafa3001c, 0x8ee2725c, 0x2c42003c,
-0x10400004, 0x24620001, 0x2403fffe, 0x431024,
-0xafa2001c, 0x8ee27264, 0x3c060001, 0x34c63800,
-0x8ee3725c, 0x2405fff8, 0x471021, 0x24420007,
-0x451024, 0x24630007, 0xaee27258, 0x8ee2726c,
-0x8ee47258, 0x651824, 0x431023, 0xaee2726c,
-0x3661021, 0x82202b, 0x14800004, 0x3c03ffff,
-0x8ee27258, 0x431021, 0xaee27258, 0x8ee27258,
-0xaee27264, 0x8f8200f0, 0x24470008, 0x27621800,
-0xe2102b, 0x50400001, 0x27671000, 0x8f8200f4,
-0x14e20007, 0x0, 0x8ee201b4, 0x4821,
-0x24420001, 0xaee201b4, 0x80026c4, 0x8ee201b4,
-0x8f8200f0, 0x24090001, 0x8fa30018, 0x8fa4001c,
-0xac430000, 0xac440004, 0xaf8700f0, 0x15200012,
-0xd1142, 0x8f8200f0, 0xafa20010, 0x8f8200f4,
-0x3c040001, 0x2484573c, 0xafa20014, 0x8fa60018,
-0x8fa7001c, 0x3c050004, 0xc002403, 0x34a5f005,
-0x8ee20088, 0x24420001, 0xaee20088, 0x8ee20088,
-0x80028d3, 0xaee0725c, 0x30430003, 0x24020002,
-0x10620016, 0x28620003, 0x10400005, 0x24020001,
-0x10620008, 0x0, 0x8002703, 0x0,
-0x24020003, 0x10620017, 0x0, 0x8002703,
-0x0, 0x8ee200e8, 0x8ee300ec, 0x24630001,
-0x2c640001, 0x441021, 0xaee200e8, 0xaee300ec,
-0x8ee200e8, 0x8002703, 0x8ee300ec, 0x8ee200f0,
-0x8ee300f4, 0x24630001, 0x2c640001, 0x441021,
-0xaee200f0, 0xaee300f4, 0x8ee200f0, 0x8002703,
-0x8ee300f4, 0x8ee200f8, 0x8ee300fc, 0x24630001,
-0x2c640001, 0x441021, 0xaee200f8, 0xaee300fc,
-0x8ee200f8, 0x8ee300fc, 0x8ee2725c, 0x8ee400e0,
-0x8ee500e4, 0x401821, 0x1021, 0xa32821,
-0xa3302b, 0x822021, 0x862021, 0xaee400e0,
-0xaee500e4, 0x80028d3, 0xaee0725c, 0x30e2ffff,
-0x104001c1, 0x31a20200, 0x1040014d, 0x4821,
-0x96e2045a, 0x30420010, 0x10400149, 0x0,
-0x8f840100, 0x27623000, 0x24850020, 0xa2102b,
-0x50400001, 0x27652800, 0x8f820108, 0x10a20004,
-0x0, 0x8f820104, 0x14a20006, 0x2402000c,
-0x8ee201a8, 0x24420001, 0xaee201a8, 0x800276e,
-0x8ee201a8, 0xac8a0000, 0xac8b0004, 0x8ee37264,
-0x24060005, 0xa482000e, 0xac860018, 0xac830008,
-0x8ee204e4, 0xac82001c, 0x8ee204c8, 0xac820010,
-0xaf850100, 0x92e204ec, 0x14400036, 0x24090001,
-0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
-0x8c820000, 0x1446001f, 0x0, 0x8ee34e28,
-0x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
-0x24420001, 0x10430007, 0x0, 0x8ee24e2c,
-0x24420001, 0x10a20005, 0x0, 0x8002758,
-0x0, 0x14a00005, 0x0, 0x8f820108,
-0x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
-0x2c420011, 0x50400013, 0xac800000, 0x800276e,
-0x0, 0x8ee24e28, 0x24030040, 0x24420001,
-0x50430003, 0x1021, 0x8ee24e28, 0x24420001,
-0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
-0x2e22021, 0x24020005, 0xac820000, 0x24020001,
-0xac820004, 0x1520000a, 0x3c040001, 0xafab0010,
-0x8ee27264, 0x3c040001, 0x24845730, 0x3c050004,
-0xafa20014, 0x8ee604e4, 0x80028be, 0x34a5f014,
-0x8ee27264, 0x34843800, 0x3641821, 0x24420010,
-0x43102b, 0x14400073, 0x0, 0x8ee27264,
-0x24480010, 0x3641021, 0x102102b, 0x14400002,
-0x3c02ffff, 0x1024021, 0x8f850100, 0x27623000,
-0x24a60020, 0xc2102b, 0x50400001, 0x27662800,
-0x8f820108, 0x10c20004, 0x0, 0x8f820104,
-0x14c20007, 0x2563000c, 0x8ee201a8, 0x4821,
-0x24420001, 0xaee201a8, 0x80027e2, 0x8ee201a8,
-0x2c64000c, 0x1441021, 0xaca20000, 0xaca30004,
-0x24e2fff4, 0xa4a2000e, 0x24020006, 0xaca80008,
-0xaca20018, 0x8ee204e4, 0xaca2001c, 0x8ee204c8,
-0x3c030002, 0x431025, 0xaca20010, 0xaf860100,
-0x92e204ec, 0x14400037, 0x24090001, 0x8ee24e28,
-0x210c0, 0x24424e38, 0x2e22021, 0x8c830000,
-0x24020005, 0x1462001f, 0x0, 0x8ee34e28,
-0x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
-0x24420001, 0x10430007, 0x0, 0x8ee24e2c,
-0x24420001, 0x10a20005, 0x0, 0x80027cc,
-0x0, 0x14a00005, 0x0, 0x8f820108,
-0x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
-0x2c420011, 0x50400013, 0xac800000, 0x80027e2,
-0x0, 0x8ee24e28, 0x24030040, 0x24420001,
-0x50430003, 0x1021, 0x8ee24e28, 0x24420001,
-0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
-0x2e22021, 0x24020005, 0xac820000, 0x24020001,
-0xac820004, 0x1520000a, 0x2508fffc, 0xafab0010,
-0x8ee27264, 0x3c040001, 0x24845730, 0x3c050004,
-0xafa20014, 0x8ee604e4, 0x80028be, 0x34a5f015,
-0x34028100, 0xa5020000, 0x9582000e, 0x800285f,
-0xa5020002, 0x8f850100, 0x27623000, 0x24a60020,
-0xc2102b, 0x50400001, 0x27662800, 0x8f820108,
-0x10c20004, 0x0, 0x8f820104, 0x14c20007,
-0x2563000c, 0x8ee201a8, 0x4821, 0x24420001,
-0xaee201a8, 0x800284f, 0x8ee201a8, 0x2c64000c,
-0x1441021, 0xaca20000, 0xaca30004, 0x8ee37264,
-0x24e2fff4, 0xa4a2000e, 0x24020006, 0xaca20018,
-0x24630010, 0xaca30008, 0x8ee204e4, 0xaca2001c,
-0x8ee204c8, 0x3c030002, 0x431025, 0xaca20010,
-0xaf860100, 0x92e204ec, 0x14400037, 0x24090001,
-0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
-0x8c830000, 0x24020005, 0x1462001f, 0x0,
-0x8ee34e28, 0x8ee24e2c, 0x1062001b, 0x24030040,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e2c,
-0x8ee54e28, 0x24420001, 0x10430007, 0x0,
-0x8ee24e2c, 0x24420001, 0x10a20005, 0x0,
-0x8002839, 0x0, 0x14a00005, 0x0,
-0x8f820108, 0x24420020, 0xaf820108, 0x8f820108,
-0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
-0x800284f, 0x0, 0x8ee24e28, 0x24030040,
-0x24420001, 0x50430003, 0x1021, 0x8ee24e28,
-0x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
-0x24424e38, 0x2e22021, 0x24020005, 0xac820000,
-0x24020001, 0xac820004, 0x1520000a, 0x34028100,
-0xafab0010, 0x8ee27264, 0x3c040001, 0x24845730,
-0x3c050004, 0xafa20014, 0x8ee604e4, 0x80028be,
-0x34a5f016, 0x8ee37264, 0xa462000c, 0x8ee37264,
-0x9582000e, 0xa462000e, 0x80028c2, 0x24e70004,
-0x8f830100, 0x27623000, 0x24640020, 0x82102b,
-0x50400001, 0x27642800, 0x8f820108, 0x10820004,
-0x0, 0x8f820104, 0x14820007, 0x24050005,
-0x8ee201a8, 0x4821, 0x24420001, 0xaee201a8,
-0x80028b6, 0x8ee201a8, 0xac6a0000, 0xac6b0004,
-0x8ee27264, 0xa467000e, 0xac650018, 0xac620008,
-0x8ee204e4, 0xac62001c, 0x8ee204c8, 0xac620010,
-0xaf840100, 0x92e204ec, 0x14400036, 0x24090001,
-0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
-0x8c820000, 0x1445001f, 0x0, 0x8ee34e28,
-0x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
-0x24420001, 0x10430007, 0x0, 0x8ee24e2c,
-0x24420001, 0x10a20005, 0x0, 0x80028a0,
-0x0, 0x14a00005, 0x0, 0x8f820108,
-0x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
-0x2c420011, 0x50400013, 0xac800000, 0x80028b6,
-0x0, 0x8ee24e28, 0x24030040, 0x24420001,
-0x50430003, 0x1021, 0x8ee24e28, 0x24420001,
-0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
-0x2e22021, 0x24020005, 0xac820000, 0x24020001,
-0xac820004, 0x1520000b, 0x3c050004, 0x3c040001,
-0x24845748, 0xafab0010, 0xafa00014, 0x8ee604e4,
-0x34a5f017, 0xc002403, 0x30e7ffff, 0x80028e1,
-0x0, 0x8ee27264, 0x3c050001, 0x30e4ffff,
-0x441021, 0xaee27264, 0x8ee2725c, 0x8ee37264,
-0x34a53800, 0x441021, 0xaee2725c, 0x3651021,
-0x62182b, 0x14600004, 0x3c03ffff, 0x8ee27264,
-0x431021, 0xaee27264, 0x8ee304e4, 0x96e20458,
-0x24630001, 0x2442ffff, 0x621824, 0xaee304e4,
-0x8ee304e4, 0x8ee204e0, 0x14620005, 0x0,
-0x8f820060, 0x2403fff7, 0x431024, 0xaf820060,
-0x8fbf0020, 0x3e00008, 0x27bd0028, 0x27bdffe0,
-0xafbf0018, 0x8ee304e8, 0x8ee204e0, 0x10620189,
-0x0, 0x8ee204e8, 0x8ee304fc, 0x21100,
-0x621821, 0x94670008, 0x92e204ed, 0x8c680000,
-0x8c690004, 0x10400023, 0x946a000a, 0x8ee204c8,
-0x34460400, 0x31420200, 0x1040001f, 0x0,
-0x96e2045a, 0x30420010, 0x1040001b, 0x3c028000,
-0x3c010001, 0x370821, 0xac2283d8, 0x8ee27264,
-0x9464000e, 0x3c050001, 0x34a53800, 0x24420004,
-0xaee27264, 0x8ee37264, 0x42400, 0x3651021,
-0x3c010001, 0x370821, 0xac2483dc, 0x62182b,
-0x14600005, 0x24e70004, 0x8ee27264, 0x3c03ffff,
-0x431021, 0xaee27264, 0x8ee27264, 0x8002917,
-0xaee27258, 0x8ee604c8, 0x8ee2726c, 0x30e4ffff,
-0x44102a, 0x10400015, 0x0, 0x8f8200d8,
-0x8ee37258, 0x431023, 0xaee2726c, 0x8ee2726c,
-0x1c400007, 0x44102a, 0x8ee2726c, 0x3c030001,
-0x431021, 0xaee2726c, 0x8ee2726c, 0x44102a,
-0x10400006, 0x0, 0x8ee201b8, 0x24420001,
-0xaee201b8, 0x8002a72, 0x8ee201b8, 0x3c020001,
-0x571021, 0x8c4283d8, 0x54400001, 0x24e7fffc,
-0x31420004, 0x104000b9, 0x30e2ffff, 0x3c020001,
-0x571021, 0x8c4283d8, 0x1040002f, 0x5021,
-0x8f840100, 0x27623000, 0x24850020, 0xa2102b,
-0x50400001, 0x27652800, 0x8f820108, 0x10a20032,
-0x0, 0x8f820104, 0x10a2002f, 0x24020015,
-0xac880000, 0xac890004, 0x8ee37264, 0xa487000e,
-0xac820018, 0xac830008, 0x8ee204e8, 0x3c030001,
-0x771821, 0x8c6383dc, 0xac860010, 0x431025,
-0xac82001c, 0xaf850100, 0x92e204ec, 0x14400066,
-0x240a0001, 0x8ee24e28, 0x24030040, 0x24420001,
-0x50430003, 0x1021, 0x8ee24e28, 0x24420001,
-0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
-0x2e21821, 0x24020015, 0xac620000, 0x24020001,
-0x80029bf, 0xac620004, 0x8f840100, 0x27623000,
-0x24850020, 0xa2102b, 0x50400001, 0x27652800,
-0x8f820108, 0x10a20004, 0x0, 0x8f820104,
-0x14a20006, 0x24020006, 0x8ee201a8, 0x24420001,
-0xaee201a8, 0x80029bf, 0x8ee201a8, 0xac880000,
-0xac890004, 0x8ee37264, 0xa487000e, 0xac820018,
-0xac830008, 0x8ee204e8, 0xac860010, 0xac82001c,
-0xaf850100, 0x92e204ec, 0x14400037, 0x240a0001,
-0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
-0x8c830000, 0x24020005, 0x1462001f, 0x0,
-0x8ee34e28, 0x8ee24e2c, 0x1062001b, 0x24030040,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e2c,
-0x8ee54e28, 0x24420001, 0x10430007, 0x0,
-0x8ee24e2c, 0x24420001, 0x10a20005, 0x0,
-0x80029a9, 0x0, 0x14a00005, 0x0,
-0x8f820108, 0x24420020, 0xaf820108, 0x8f820108,
-0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
-0x80029bf, 0x0, 0x8ee24e28, 0x24030040,
-0x24420001, 0x50430003, 0x1021, 0x8ee24e28,
-0x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
-0x24424e38, 0x2e22021, 0x24020005, 0xac820000,
-0x24020001, 0xac820004, 0x1540000a, 0x24020001,
-0xafa90010, 0x8ee27264, 0x3c040001, 0x24845730,
-0x3c050004, 0xafa20014, 0x8ee604e4, 0x8002a4f,
-0x34a5f204, 0xa2e204ed, 0x8ee204e8, 0x8ee304fc,
-0x8ee47258, 0x3c060001, 0x34c63800, 0x3c010001,
-0x370821, 0xac2083d8, 0x3c010001, 0x370821,
-0xac2083dc, 0x21100, 0x431021, 0xac44000c,
-0x8ee27264, 0x2405fff8, 0x30e3ffff, 0x431021,
-0x24420007, 0x451024, 0x24630007, 0xaee27258,
-0x8ee2726c, 0x8ee47258, 0x651824, 0x431023,
-0xaee2726c, 0x3661021, 0x82202b, 0x14800004,
-0x3c03ffff, 0x8ee27258, 0x431021, 0xaee27258,
-0x8ee27258, 0x8002a64, 0xaee27264, 0x10400073,
-0x0, 0x8f830100, 0x27623000, 0x24640020,
-0x82102b, 0x14400002, 0x5021, 0x27642800,
-0x8f820108, 0x10820004, 0x0, 0x8f820104,
-0x14820006, 0x24050005, 0x8ee201a8, 0x24420001,
-0xaee201a8, 0x8002a46, 0x8ee201a8, 0xac680000,
-0xac690004, 0x8ee27264, 0xa467000e, 0xac650018,
-0xac620008, 0x8ee204e8, 0xac660010, 0xac62001c,
-0xaf840100, 0x92e204ec, 0x14400036, 0x240a0001,
-0x8ee24e28, 0x210c0, 0x24424e38, 0x2e22021,
-0x8c820000, 0x1445001f, 0x0, 0x8ee34e28,
-0x8ee24e2c, 0x1062001b, 0x24030040, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e2c, 0x8ee54e28,
-0x24420001, 0x10430007, 0x0, 0x8ee24e2c,
-0x24420001, 0x10a20005, 0x0, 0x8002a30,
-0x0, 0x14a00005, 0x0, 0x8f820108,
-0x24420020, 0xaf820108, 0x8f820108, 0x8c820004,
-0x2c420011, 0x50400013, 0xac800000, 0x8002a46,
-0x0, 0x8ee24e28, 0x24030040, 0x24420001,
-0x50430003, 0x1021, 0x8ee24e28, 0x24420001,
-0xaee24e28, 0x8ee24e28, 0x210c0, 0x24424e38,
-0x2e22021, 0x24020005, 0xac820000, 0x24020001,
-0xac820004, 0x1540000c, 0x30e5ffff, 0x3c040001,
-0x24845748, 0x3c050004, 0xafa90010, 0xafa00014,
-0x8ee604e4, 0x34a5f237, 0xc002403, 0x30e7ffff,
-0x8002a72, 0x0, 0x8ee27264, 0x451021,
-0xaee27264, 0x8ee2726c, 0x8ee37264, 0x3c040001,
-0x34843800, 0xa2e004ed, 0x451023, 0xaee2726c,
-0x3641021, 0x62182b, 0x14600004, 0x3c03ffff,
-0x8ee27264, 0x431021, 0xaee27264, 0x8ee304e8,
-0x96e20458, 0x24630001, 0x2442ffff, 0x621824,
-0xaee304e8, 0x8ee304e8, 0x8ee204e0, 0x14620005,
-0x0, 0x8f820060, 0x2403fff7, 0x431024,
-0xaf820060, 0x8fbf0018, 0x3e00008, 0x27bd0020,
-0x27bdffe0, 0xafbf001c, 0xafb00018, 0x8f820100,
-0x8ee34e2c, 0x8f820104, 0x8f850108, 0x24020040,
-0x24630001, 0x50620003, 0x1021, 0x8ee24e2c,
-0x24420001, 0xaee24e2c, 0x8ee24e2c, 0x8ee34e2c,
-0x210c0, 0x24424e38, 0x2e22021, 0x8ee24e28,
-0x8c870004, 0x14620007, 0xa03021, 0x8f820108,
-0x24420020, 0xaf820108, 0x8f820108, 0x8002aa2,
-0xac800000, 0x8ee24e2c, 0x24030040, 0x24420001,
-0x50430003, 0x1021, 0x8ee24e2c, 0x24420001,
-0x210c0, 0x24424e38, 0x2e22021, 0x8c820004,
-0x8f830108, 0x21140, 0x621821, 0xaf830108,
-0xac800000, 0x8cc20018, 0x2443fffe, 0x2c620013,
-0x104000c1, 0x31080, 0x3c010001, 0x220821,
-0x8c225770, 0x400008, 0x0, 0x8ee204f0,
-0x471021, 0xaee204f0, 0x8ee204f0, 0x8f43023c,
-0x43102b, 0x144000be, 0x0, 0x8ee304e4,
-0x8ee204f8, 0x506200ba, 0xa2e004f4, 0x8f830120,
-0x27623800, 0x24660020, 0xc2102b, 0x50400001,
-0x27663000, 0x8f820128, 0x10c20004, 0x0,
-0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
-0x8021, 0x24420001, 0xaee201a4, 0x8002b12,
-0x8ee201a4, 0x8ee204e4, 0xac62001c, 0x8ee404b0,
-0x8ee504b4, 0x2462001c, 0xac620008, 0x24020008,
-0xa462000e, 0x24020011, 0xac620018, 0xac640000,
-0xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
-0x92e24e20, 0x14400037, 0x24100001, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
-0x24020012, 0x1462001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x24030040, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee54e30,
-0x24420001, 0x10430007, 0x0, 0x8ee24e34,
-0x24420001, 0x10a20005, 0x0, 0x8002afc,
-0x0, 0x14a00005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400013, 0xac800000, 0x8002b12,
-0x0, 0x8ee24e30, 0x24030040, 0x24420001,
-0x50430003, 0x1021, 0x8ee24e30, 0x24420001,
-0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x24020012, 0xac820000, 0x24020001,
-0xac820004, 0x5600000b, 0x24100001, 0x8ee204e4,
-0x3c040001, 0x24845754, 0xafa00014, 0xafa20010,
-0x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
-0x34a5f006, 0x16000003, 0x24020001, 0x8002b71,
-0xa2e204f4, 0x8ee20170, 0x24420001, 0xaee20170,
-0x8ee20170, 0x8ee204e4, 0xa2e004f4, 0xaee004f0,
-0xaee204f8, 0x8f42023c, 0x50400045, 0xaee07274,
-0x8ee20184, 0x24420001, 0xaee20184, 0x8ee20184,
-0x8002b71, 0xaee07274, 0x8ee20504, 0x24030040,
-0x24420001, 0x50430003, 0x1021, 0x8ee20504,
-0x24420001, 0xaee20504, 0x8ee20504, 0x8cc30018,
-0x21080, 0x571021, 0x8c440508, 0x24020003,
-0x1462000f, 0x0, 0x3c020001, 0x571021,
-0x904283b1, 0x10400014, 0x0, 0x8ee201d0,
-0x8ee35240, 0x441021, 0xaee201d0, 0x8ee201d8,
-0x641821, 0x306300ff, 0x8002b59, 0xaee35240,
-0x8ee201cc, 0x8ee30e10, 0x441021, 0xaee201cc,
-0x8ee201d8, 0x641821, 0x306301ff, 0xaee30e10,
-0x441021, 0xaee201d8, 0x8ee20000, 0x34420040,
-0x8002b71, 0xaee20000, 0x8ee2014c, 0x3c010001,
-0x370821, 0xa02083e0, 0x24420001, 0xaee2014c,
-0x8002b71, 0x8ee2014c, 0x94c7000e, 0x8cc2001c,
-0x3c040001, 0x24845760, 0xafa60014, 0xafa20010,
-0x8cc60018, 0x3c050008, 0xc002403, 0x34a50910,
-0x8fbf001c, 0x8fb00018, 0x3e00008, 0x27bd0020,
-0x27bdff98, 0xafbf0060, 0xafbe005c, 0xafb60058,
-0xafb50054, 0xafb40050, 0xafb3004c, 0xafb20048,
-0xafb10044, 0xafb00040, 0x8f830108, 0x8f820104,
-0xafa00024, 0x106203e7, 0xafa0002c, 0x3c1e0001,
-0x37de3800, 0x3c0bffff, 0x8f930108, 0x8e620018,
-0x8f830104, 0x2443fffe, 0x2c620014, 0x104003cf,
-0x31080, 0x3c010001, 0x220821, 0x8c2257c0,
-0x400008, 0x0, 0x9663000e, 0x8ee2725c,
-0x8ee404f0, 0x431021, 0xaee2725c, 0x8e63001c,
-0x96e20458, 0x24840001, 0xaee404f0, 0x24630001,
-0x2442ffff, 0x621824, 0xaee304e4, 0x8f42023c,
-0x82202b, 0x148003b9, 0x0, 0x8f830120,
-0x27623800, 0x24660020, 0xc2102b, 0x50400001,
-0x27663000, 0x8f820128, 0x10c20004, 0x0,
-0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
-0x8021, 0x24420001, 0xaee201a4, 0x8002bfe,
-0x8ee201a4, 0x8ee204e4, 0xac62001c, 0x8ee404b0,
-0x8ee504b4, 0x2462001c, 0xac620008, 0x24020008,
-0xa462000e, 0x24020011, 0xac620018, 0xac640000,
-0xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
-0x92e24e20, 0x14400037, 0x24100001, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
-0x24020012, 0x1462001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x240c0040, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
-0x24420001, 0x104c0007, 0x0, 0x8ee24e34,
-0x24420001, 0x10620005, 0x0, 0x8002be8,
-0x0, 0x14600005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400013, 0xac800000, 0x8002bfe,
-0x0, 0x8ee24e30, 0x240c0040, 0x24420001,
-0x504c0003, 0x1021, 0x8ee24e30, 0x24420001,
-0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x24020012, 0x240c0001, 0xac820000,
-0xac8c0004, 0x5600000d, 0x24100001, 0x8ee204e4,
-0x3c040001, 0x24845754, 0xafa00014, 0xafa20010,
-0x8ee60608, 0x8f470228, 0x3c050009, 0x34a5f006,
-0xc002403, 0xafab0038, 0x8fab0038, 0x1200030a,
-0x240c0001, 0x8002f19, 0x0, 0x966c001c,
-0xafac002c, 0x9662001e, 0x3c0c8000, 0xafac0024,
-0xae62001c, 0x8e75001c, 0x8ee204fc, 0x8ee404fc,
-0x151900, 0x621021, 0x8c52000c, 0x92e27b98,
-0x641821, 0x9476000a, 0x14400003, 0x32c20002,
-0xaef27ba4, 0xaef57b9c, 0x1040004b, 0x8021,
-0x96e2045a, 0x30420002, 0x10400047, 0x0,
-0x8e63001c, 0x8ee204fc, 0x32100, 0x821021,
-0x8c42000c, 0x37e1821, 0x24420022, 0x43102b,
-0x1440000a, 0x24050014, 0x8ee204fc, 0x821021,
-0x8c44000c, 0xafab0038, 0xc002f75, 0x2484000e,
-0x8fab0038, 0x8002c52, 0x3050ffff, 0x8ee204fc,
-0x821021, 0x8c42000c, 0x9450000e, 0x94430010,
-0x94440012, 0x94450014, 0x2038021, 0x2048021,
-0x2058021, 0x94430016, 0x94440018, 0x9445001a,
-0x2038021, 0x2048021, 0x2058021, 0x9443001c,
-0x9444001e, 0x94420020, 0x2038021, 0x2048021,
-0x2028021, 0x101c02, 0x3202ffff, 0x628021,
-0x8e63001c, 0x8ee204fc, 0x102402, 0x32900,
-0xa21021, 0x8c43000c, 0x3202ffff, 0x828021,
-0x37e1021, 0x24630018, 0x62182b, 0x14600009,
-0x0, 0x8ee204fc, 0xa21021, 0x8c43000c,
-0x101027, 0x3c01ffff, 0x230821, 0x8002c6f,
-0xa4220018, 0x8ee204fc, 0xa21021, 0x8c43000c,
-0x101027, 0xa4620018, 0x96e2045a, 0x8821,
-0x30420008, 0x14400063, 0xa021, 0x8e63001c,
-0x8ee204fc, 0x33100, 0xc21021, 0x8c42000c,
-0x37e1821, 0x24420022, 0x43102b, 0x14400035,
-0x0, 0x8ee204fc, 0xc21021, 0x8c42000c,
-0x24470010, 0x37e1021, 0xe2102b, 0x50400001,
-0xeb3821, 0x8ee204fc, 0x94f10000, 0xc21021,
-0x8c42000c, 0x24470016, 0x37e1021, 0xe2102b,
-0x14400002, 0x2634ffec, 0xeb3821, 0x8ee204fc,
-0x90e30001, 0xc21021, 0x8c42000c, 0x2447001a,
-0x37e1021, 0xe2102b, 0x14400002, 0x2838821,
-0xeb3821, 0x94e20000, 0x24e70002, 0x2228821,
-0x37e1021, 0xe2102b, 0x50400001, 0xeb3821,
-0x94e20000, 0x24e70002, 0x2228821, 0x37e1021,
-0xe2102b, 0x50400001, 0xeb3821, 0x94e20000,
-0x24e70002, 0x2228821, 0x37e1021, 0xe2102b,
-0x50400001, 0xeb3821, 0x94e20000, 0x8002cd0,
-0x2228821, 0x8ee204fc, 0xc21021, 0x8c43000c,
-0x8ee204fc, 0x94710010, 0x8ee304fc, 0xc21021,
-0x8c44000c, 0xc31821, 0x8c62000c, 0x2634ffec,
-0x90840017, 0x8ee304fc, 0x9442001a, 0x2848821,
-0xc31821, 0x8c65000c, 0x8ee304fc, 0x2228821,
-0x8ee204fc, 0xc31821, 0xc21021, 0x8c44000c,
-0x8c62000c, 0x94a3001c, 0x9484001e, 0x94420020,
-0x2238821, 0x2248821, 0x2228821, 0x111c02,
-0x3222ffff, 0x628821, 0x111c02, 0x3222ffff,
-0x628821, 0x32c20001, 0x104000b2, 0x0,
-0x96e2045a, 0x30420001, 0x104000ae, 0x32c20080,
-0x10400008, 0x0, 0x92e27b98, 0x14400005,
-0x0, 0x240c0001, 0xa2ec7b98, 0xaef57b9c,
-0xaef27ba4, 0x8ee304fc, 0x151100, 0x431021,
-0x8c47000c, 0x37e1821, 0x24e2000e, 0x43102b,
-0x14400008, 0xe02021, 0x2405000e, 0xc002f75,
-0xafab0038, 0x3042ffff, 0x8fab0038, 0x8002d09,
-0x2028021, 0x94e60000, 0x24e70002, 0x94e50000,
-0x24e70002, 0x94e30000, 0x24e70002, 0x94e20000,
-0x24e70002, 0x94e40000, 0x24e70002, 0x2068021,
-0x2058021, 0x2038021, 0x2028021, 0x94e20000,
-0x94e30002, 0x2048021, 0x2028021, 0x2038021,
-0x101c02, 0x3202ffff, 0x628021, 0x101c02,
-0x3202ffff, 0x8ee47b9c, 0x628021, 0x14950004,
-0x3205ffff, 0x96620016, 0x8002d17, 0x512021,
-0x96620016, 0x542021, 0x41402, 0x3083ffff,
-0x432021, 0x852023, 0x41402, 0x822021,
-0x3084ffff, 0x50800001, 0x3404ffff, 0x8ee27ba4,
-0x24430017, 0x37e1021, 0x62102b, 0x50400001,
-0x6b1821, 0x90630000, 0x24020011, 0x14620031,
-0x24020006, 0x8ee27ba4, 0x37e1821, 0x24420028,
-0x43102b, 0x14400018, 0x0, 0x8ee27b9c,
-0x12a2000a, 0x32c20100, 0x8ee27ba4, 0x3c01ffff,
-0x220821, 0x94220028, 0x822021, 0x41c02,
-0x3082ffff, 0x622021, 0x32c20100, 0x14400004,
-0x41027, 0x92e27b98, 0x14400002, 0x41027,
-0x3044ffff, 0x8ee27ba4, 0x3c01ffff, 0x220821,
-0x8002d8a, 0xa4240028, 0x8ee27b9c, 0x12a20008,
-0x32c20100, 0x8ee27ba4, 0x94420028, 0x822021,
-0x41c02, 0x3082ffff, 0x622021, 0x32c20100,
-0x14400004, 0x41027, 0x92e27b98, 0x14400002,
-0x41027, 0x3044ffff, 0x8ee27ba4, 0x8002d8a,
-0xa4440028, 0x1462002f, 0x37e1821, 0x8ee27ba4,
-0x24420032, 0x43102b, 0x14400018, 0x0,
-0x8ee27b9c, 0x12a2000a, 0x32c20100, 0x8ee27ba4,
-0x3c01ffff, 0x220821, 0x94220032, 0x822021,
-0x41c02, 0x3082ffff, 0x622021, 0x32c20100,
-0x14400004, 0x41027, 0x92e27b98, 0x14400002,
-0x41027, 0x3044ffff, 0x8ee27ba4, 0x3c01ffff,
-0x220821, 0x8002d8a, 0xa4240032, 0x8ee27b9c,
-0x12a20008, 0x32c20100, 0x8ee27ba4, 0x94420032,
-0x822021, 0x41c02, 0x3082ffff, 0x622021,
-0x32c20100, 0x14400004, 0x41027, 0x92e27b98,
-0x14400002, 0x41027, 0x3044ffff, 0x8ee27ba4,
-0xa4440032, 0x8fac0024, 0x1180002c, 0x37e1821,
-0x8e420000, 0xae42fffc, 0x2642000a, 0x43102b,
-0x1440001b, 0x34038100, 0x26430004, 0x37e1021,
-0x62102b, 0x14400003, 0x602021, 0x6b1821,
-0x602021, 0x8c620000, 0x24630004, 0xae420000,
-0x37e1021, 0x62102b, 0x50400001, 0x6b1821,
-0x8c620000, 0xac820000, 0x34028100, 0xa4620000,
-0x24630002, 0x37e1021, 0x62102b, 0x50400001,
-0x6b1821, 0x97ac002e, 0x8002db4, 0xa46c0000,
-0x8e420004, 0x8e440008, 0xa6430008, 0x97ac002e,
-0xa64c000a, 0xae420000, 0xae440004, 0x9662000e,
-0x2652fffc, 0x24420004, 0xa662000e, 0x9662000e,
-0x8ee3725c, 0x621821, 0xaee3725c, 0xafb20018,
-0x8ee3725c, 0xafa3001c, 0x8ee2725c, 0x2c42003c,
-0x10400004, 0x24620001, 0x2403fffe, 0x431024,
-0xafa2001c, 0x32c20080, 0x1040000c, 0x32c20100,
-0x8ee27ba8, 0x24430001, 0x210c0, 0x571021,
-0xaee37ba8, 0x8fa30018, 0x8fa4001c, 0xac437bac,
-0xac447bb0, 0x8002ea0, 0xaee0725c, 0x10400072,
-0x0, 0x8ee27ba8, 0x24430001, 0x210c0,
-0x571021, 0xaee37ba8, 0x8fa30018, 0x8fa4001c,
-0xac437bac, 0xac447bb0, 0x8ee27ba8, 0x10400063,
-0x4821, 0x5021, 0x8f8200f0, 0x24480008,
-0x27621800, 0x102102b, 0x50400001, 0x27681000,
-0x8f8200f4, 0x15020007, 0x0, 0x8ee201b4,
-0x8021, 0x24420001, 0xaee201b4, 0x8002dfa,
-0x8ee201b4, 0x8f8300f0, 0x24100001, 0x1571021,
-0x8c447bac, 0x8c457bb0, 0xac640000, 0xac650004,
-0xaf8800f0, 0x16000006, 0x2ea1021, 0x8ee20088,
-0x24420001, 0xaee20088, 0x8002e3f, 0x8ee20088,
-0x8c427bb0, 0x8ee400e0, 0x8ee500e4, 0x8ee67b9c,
-0x401821, 0x1021, 0xa32821, 0xa3382b,
-0x822021, 0x872021, 0x8ee204fc, 0xc93021,
-0x63100, 0xaee400e0, 0xaee500e4, 0xc23021,
-0x94c2000a, 0x240c0002, 0x21142, 0x30430003,
-0x106c0016, 0x28620003, 0x10400005, 0x240c0001,
-0x106c0008, 0x0, 0x8002e3f, 0x0,
-0x240c0003, 0x106c0017, 0x0, 0x8002e3f,
-0x0, 0x8ee200e8, 0x8ee300ec, 0x24630001,
-0x2c640001, 0x441021, 0xaee200e8, 0xaee300ec,
-0x8ee200e8, 0x8002e3f, 0x8ee300ec, 0x8ee200f0,
-0x8ee300f4, 0x24630001, 0x2c640001, 0x441021,
-0xaee200f0, 0xaee300f4, 0x8ee200f0, 0x8002e3f,
-0x8ee300f4, 0x8ee200f8, 0x8ee300fc, 0x24630001,
-0x2c640001, 0x441021, 0xaee200f8, 0xaee300fc,
-0x8ee200f8, 0x8ee300fc, 0x8ee27ba8, 0x25290001,
-0x122102b, 0x1440ffa0, 0x254a0008, 0xa2e07b98,
-0x8002e9f, 0xaee07ba8, 0x8f8200f0, 0x24470008,
-0x27621800, 0xe2102b, 0x50400001, 0x27671000,
-0x8f8200f4, 0x14e20007, 0x0, 0x8ee201b4,
-0x8021, 0x24420001, 0xaee201b4, 0x8002e5d,
-0x8ee201b4, 0x8f8200f0, 0x24100001, 0x8fa30018,
-0x8fa4001c, 0xac430000, 0xac440004, 0xaf8700f0,
-0x16000007, 0x0, 0x8ee20088, 0x24420001,
-0xaee20088, 0x8ee20088, 0x8002ea0, 0xaee0725c,
-0x8ee2725c, 0x8ee400e0, 0x8ee500e4, 0x240c0002,
-0x401821, 0x1021, 0xa32821, 0xa3302b,
-0x822021, 0x862021, 0x161142, 0x30430003,
-0xaee400e0, 0xaee500e4, 0x106c0017, 0x2c620003,
-0x10400005, 0x240c0001, 0x106c0008, 0x0,
-0x8002ea0, 0xaee0725c, 0x240c0003, 0x106c0019,
-0x0, 0x8002ea0, 0xaee0725c, 0x8ee200e8,
-0x8ee300ec, 0x24630001, 0x2c640001, 0x441021,
-0xaee200e8, 0xaee300ec, 0x8ee200e8, 0x8ee300ec,
-0x8002ea0, 0xaee0725c, 0x8ee200f0, 0x8ee300f4,
-0x24630001, 0x2c640001, 0x441021, 0xaee200f0,
-0xaee300f4, 0x8ee200f0, 0x8ee300f4, 0x8002ea0,
-0xaee0725c, 0x8ee200f8, 0x8ee300fc, 0x24630001,
-0x2c640001, 0x441021, 0xaee200f8, 0xaee300fc,
-0x8ee200f8, 0x8ee300fc, 0xaee0725c, 0x8e62001c,
-0x96e30458, 0x8ee404f0, 0x24420001, 0x2463ffff,
-0x431024, 0x24840001, 0xaee204e4, 0xaee404f0,
-0x8f42023c, 0x82202b, 0x148000b0, 0x0,
-0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
-0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
-0x0, 0x8f820124, 0x14c20007, 0x0,
-0x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
-0x8002f07, 0x8ee201a4, 0x8ee204e4, 0xac62001c,
-0x8ee404b0, 0x8ee504b4, 0x2462001c, 0xac620008,
-0x24020008, 0xa462000e, 0x24020011, 0xac620018,
-0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
-0xaf860120, 0x92e24e20, 0x14400037, 0x24100001,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c830000, 0x24020012, 0x1462001f, 0x0,
-0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x240c0040,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
-0x8ee34e30, 0x24420001, 0x104c0007, 0x0,
-0x8ee24e34, 0x24420001, 0x10620005, 0x0,
-0x8002ef1, 0x0, 0x14600005, 0x0,
-0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
-0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
-0x8002f07, 0x0, 0x8ee24e30, 0x240c0040,
-0x24420001, 0x504c0003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x24020012, 0x240c0001,
-0xac820000, 0xac8c0004, 0x5600000d, 0x24100001,
-0x8ee204e4, 0x3c040001, 0x24845754, 0xafa00014,
-0xafa20010, 0x8ee60608, 0x8f470228, 0x3c050009,
-0x34a5f006, 0xc002403, 0xafab0038, 0x8fab0038,
-0x16000003, 0x240c0001, 0x8002f5c, 0xa2ec04f4,
-0x8ee20170, 0x24420001, 0xaee20170, 0x8ee20170,
-0x8ee204e4, 0xa2e004f4, 0xaee004f0, 0xaee07274,
-0xaee204f8, 0x8f42023c, 0x10400038, 0x0,
-0x8ee20184, 0x24420001, 0xaee20184, 0x8002f5c,
-0x8ee20184, 0x8ee20504, 0x240c0040, 0x24420001,
-0x504c0003, 0x1021, 0x8ee20504, 0x24420001,
-0xaee20504, 0x8ee20504, 0x8e630018, 0x240c0003,
-0x21080, 0x571021, 0x146c000f, 0x8c440508,
-0x3c020001, 0x571021, 0x904283b1, 0x10400014,
-0x0, 0x8ee201d0, 0x8ee35240, 0x441021,
-0xaee201d0, 0x8ee201d8, 0x641821, 0x306300ff,
-0x8002f4f, 0xaee35240, 0x8ee201cc, 0x8ee30e10,
-0x441021, 0xaee201cc, 0x8ee201d8, 0x641821,
-0x306301ff, 0xaee30e10, 0x441021, 0xaee201d8,
-0x8ee20000, 0x34420040, 0x8002f5c, 0xaee20000,
-0x8ee2014c, 0x3c010001, 0x370821, 0xa02083e0,
-0x24420001, 0xaee2014c, 0x8ee2014c, 0x8f820108,
-0x24420020, 0xaf820108, 0x8f820108, 0x8f820108,
-0x27633000, 0x43102b, 0x14400002, 0x27622800,
-0xaf820108, 0x8f830108, 0x8f820104, 0x1462fc1e,
-0x0, 0x8fbf0060, 0x8fbe005c, 0x8fb60058,
-0x8fb50054, 0x8fb40050, 0x8fb3004c, 0x8fb20048,
-0x8fb10044, 0x8fb00040, 0x3e00008, 0x27bd0068,
-0x52843, 0x10a0000d, 0x3021, 0x3c030001,
-0x34633800, 0x3c07ffff, 0x3631021, 0x82102b,
-0x50400001, 0x872021, 0x94820000, 0x24840002,
-0x24a5ffff, 0x14a0fff8, 0xc23021, 0x61c02,
-0x30c2ffff, 0x623021, 0x61c02, 0x30c2ffff,
-0x623021, 0x3e00008, 0x30c2ffff, 0x27bdff88,
-0x240f0001, 0xafbf0070, 0xafbe006c, 0xafb60068,
-0xafb50064, 0xafb40060, 0xafb3005c, 0xafb20058,
-0xafb10054, 0xafb00050, 0xa3a00027, 0xafaf002c,
-0x8ee204d4, 0x8021, 0x30420001, 0x1440002a,
-0xa3a00037, 0x8f8700e0, 0x8f8800c4, 0x8f8200e8,
-0xe22023, 0x2c821000, 0x50400001, 0x24841000,
-0x420c2, 0x801821, 0x8ee400c8, 0x8ee500cc,
-0x1021, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xaee400c8, 0xaee500cc, 0x8f8300c8,
-0x3c02000a, 0x3442efff, 0x1032023, 0x44102b,
-0x10400003, 0x3c02000a, 0x3442f000, 0x822021,
-0x801821, 0x8ee400c0, 0x8ee500c4, 0x1021,
-0xa32821, 0xa3302b, 0x822021, 0x862021,
-0xaee400c0, 0xaee500c4, 0xaf8800c8, 0xaf8700e4,
-0x80034cc, 0xaf8700e8, 0x3c020001, 0x571021,
-0x904283c0, 0x1040000b, 0x0, 0x3c140001,
-0x297a021, 0x8e9483c4, 0x3c130001, 0x2779821,
-0x8e7383c8, 0x3c120001, 0x2579021, 0x8003193,
-0x8e5283cc, 0x8f8300e0, 0x8f8200e4, 0x10430007,
-0x8821, 0x8f8200e4, 0x24110001, 0x8c430000,
-0x8c440004, 0xafa30018, 0xafa4001c, 0x1620000e,
-0x3c02ffff, 0x8f8200c4, 0xafa20010, 0x8f8200c8,
-0x3c040001, 0x24845870, 0xafa20014, 0x8f8600e0,
-0x8f8700e4, 0x3c050006, 0xc002403, 0x34a5f000,
-0x80034cc, 0x0, 0x8fa3001c, 0x8fb20018,
-0x3074ffff, 0x2694fffc, 0x621024, 0x10400058,
-0x2409821, 0x3c020080, 0x621024, 0x1040000a,
-0x3c040040, 0x8ee2007c, 0x24420001, 0xaee2007c,
-0x8ee2007c, 0x8ee201fc, 0x24420001, 0xaee201fc,
-0x80034c6, 0x8ee201fc, 0x3c060004, 0x3c0b0001,
-0x3c0a0002, 0x3c050010, 0x3c090008, 0x8ee20080,
-0x3c080020, 0x34078000, 0x24420001, 0xaee20080,
-0x8ee20080, 0x8fa2001c, 0x441824, 0x10660021,
-0xc3102b, 0x14400007, 0x0, 0x106b0011,
-0x0, 0x106a0015, 0x0, 0x8003049,
-0x42042, 0x10650023, 0xa3102b, 0x14400005,
-0x0, 0x10690019, 0x0, 0x8003049,
-0x42042, 0x10680021, 0x0, 0x8003049,
-0x42042, 0x8ee20034, 0x24420001, 0xaee20034,
-0x8ee20034, 0x8003049, 0x42042, 0x8ee201ec,
-0x24420001, 0xaee201ec, 0x8ee201ec, 0x8003049,
-0x42042, 0x8ee201f0, 0x24420001, 0xaee201f0,
-0x8ee201f0, 0x8003049, 0x42042, 0x8ee201f4,
-0x24420001, 0xaee201f4, 0x8ee201f4, 0x8003049,
-0x42042, 0x8ee20030, 0x24420001, 0xaee20030,
-0x8ee20030, 0x8003049, 0x42042, 0x8ee201f8,
-0x24420001, 0xaee201f8, 0x8ee201f8, 0x42042,
-0x1087047c, 0x0, 0x800300e, 0x0,
-0x3c020001, 0x571021, 0x904283b2, 0x14400084,
-0x24020001, 0x3c030001, 0x771821, 0x906383b3,
-0x1462007f, 0x3c020100, 0x8e430000, 0x621024,
-0x1040006f, 0x2402ffff, 0x14620005, 0x24100001,
-0x96430004, 0x3402ffff, 0x10620075, 0x0,
-0x92e204d8, 0x14400072, 0x0, 0x3c020001,
-0x571021, 0x8c4283b4, 0x28420005, 0x10400020,
-0x3821, 0x3c020001, 0x571021, 0x8c4283b4,
-0x18400016, 0x2821, 0x96660000, 0x520c0,
-0x971021, 0x9442777e, 0x14460009, 0x971021,
-0x94437780, 0x96620002, 0x14620005, 0x971021,
-0x94437782, 0x96620004, 0x50620008, 0x24070001,
-0x3c020001, 0x571021, 0x8c4283b4, 0x24a50001,
-0xa2102a, 0x5440ffee, 0x520c0, 0x30e200ff,
-0x10400440, 0x0, 0x80030d5, 0x0,
-0x2402021, 0xc0022fe, 0x24050006, 0x3044001f,
-0x428c0, 0x2e51021, 0x9442727c, 0x30424000,
-0x14400434, 0xb71021, 0x9443727e, 0x96620000,
-0x1462000b, 0x418c0, 0xb71021, 0x94437280,
-0x96620002, 0x14620006, 0x418c0, 0xb71021,
-0x94437282, 0x96620004, 0x10620035, 0x418c0,
-0x2e31021, 0x9442727c, 0x30428000, 0x14400421,
-0x2e31021, 0x944b727c, 0x96670000, 0xb28c0,
-0xb71021, 0x9442737e, 0x80030b7, 0x3021,
-0x420c0, 0x2e41021, 0x9443737c, 0x2e41021,
-0x944b737c, 0x30638000, 0x14600010, 0xb28c0,
-0xb71021, 0x9442737e, 0x1447fff5, 0x1602021,
-0xb71021, 0x94437380, 0x96620002, 0x5462fff1,
-0x420c0, 0xb71021, 0x94437382, 0x96620004,
-0x5462ffec, 0x420c0, 0x24060001, 0x30c200ff,
-0x10400400, 0x0, 0x80030d5, 0x0,
-0x97430202, 0x96420000, 0x146203fa, 0x0,
-0x97430204, 0x96420002, 0x146203f6, 0x0,
-0x97430206, 0x96420004, 0x146203f2, 0x0,
-0x92420000, 0x3a030001, 0x30420001, 0x431024,
-0x10400074, 0x2402ffff, 0x8e630000, 0x14620004,
-0x3402ffff, 0x96630004, 0x1062006f, 0x240f0002,
-0x3c020001, 0x571021, 0x904283b2, 0x1440006a,
-0x240f0003, 0x92e204d8, 0x54400068, 0xafaf002c,
-0x3c020001, 0x571021, 0x8c4283b4, 0x28420005,
-0x10400020, 0x3821, 0x3c020001, 0x571021,
-0x8c4283b4, 0x18400016, 0x2821, 0x96660000,
-0x520c0, 0x971021, 0x9442777e, 0x14460009,
-0x971021, 0x94437780, 0x96620002, 0x14620005,
-0x971021, 0x94437782, 0x96620004, 0x50620008,
-0x24070001, 0x3c020001, 0x571021, 0x8c4283b4,
-0x24a50001, 0xa2102a, 0x5440ffee, 0x520c0,
-0x30e200ff, 0x14400044, 0x240f0003, 0x80034c6,
-0x0, 0x2402021, 0xc0022fe, 0x24050006,
-0x3044001f, 0x428c0, 0x2e51021, 0x9442727c,
-0x30424000, 0x144003af, 0xb71021, 0x9443727e,
-0x96620000, 0x1462000b, 0x418c0, 0xb71021,
-0x94437280, 0x96620002, 0x14620006, 0x418c0,
-0xb71021, 0x94437282, 0x96620004, 0x10620027,
-0x418c0, 0x2e31021, 0x9442727c, 0x30428000,
-0x1440039c, 0x2e31021, 0x944b727c, 0x96670000,
-0xb28c0, 0xb71021, 0x9442737e, 0x800313c,
-0x3021, 0x420c0, 0x2e41021, 0x9443737c,
-0x2e41021, 0x944b737c, 0x30638000, 0x14600010,
-0xb28c0, 0xb71021, 0x9442737e, 0x1447fff5,
-0x1602021, 0xb71021, 0x94437380, 0x96620002,
-0x5462fff1, 0x420c0, 0xb71021, 0x94437382,
-0x96620004, 0x5462ffec, 0x420c0, 0x24060001,
-0x30c200ff, 0x1040037b, 0x0, 0x800314f,
-0x240f0003, 0x240f0001, 0xafaf002c, 0x8f420260,
-0x54102b, 0x1040003a, 0x0, 0x8f8300e4,
-0x8f8200e0, 0x10620003, 0x24630008, 0xaf8300e4,
-0xaf8300e8, 0x8ee400c0, 0x8ee500c4, 0x2801821,
-0x1021, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xaee400c0, 0xaee500c4, 0x8ee20058,
-0x24420001, 0xaee20058, 0x8ee20058, 0x8ee2007c,
-0x24420001, 0xaee2007c, 0x8ee2007c, 0x8f8200e0,
-0xafa20010, 0x8f8200e4, 0x3c040001, 0x24845878,
-0xafa20014, 0x8fa60018, 0x8fa7001c, 0x3c050006,
-0xc002403, 0x34a5f003, 0x80034cc, 0x0,
-0x8ee25240, 0xafa20010, 0x8ee25244, 0x3c040001,
-0x24845884, 0xafa20014, 0x8ee60e10, 0x8ee70e18,
-0x3c050006, 0xc002403, 0x34a5f002, 0x8ee201c0,
-0x24420001, 0xaee201c0, 0x8ee20000, 0x8ee301c0,
-0x2403ffbf, 0x431024, 0x8003470, 0xaee20000,
-0x96e20468, 0x54102b, 0x10400003, 0x0,
-0x240f0001, 0xa3af0027, 0x12800301, 0x24160007,
-0x24150040, 0x241e0001, 0x240e0012, 0x8ee2724c,
-0x8f430280, 0x24420001, 0x304207ff, 0x106202d3,
-0x0, 0x93a20027, 0x10400014, 0x0,
-0x8ee35240, 0x8ee25244, 0x10620009, 0x26ed5244,
-0x8ee65244, 0x8ee35244, 0x21140, 0x24425248,
-0x2e28021, 0x24630001, 0x80031bf, 0x306b00ff,
-0x92e27248, 0x1440ffca, 0x0, 0x8ee201e0,
-0x24420001, 0xaee201e0, 0x8ee201e0, 0x8ee30e10,
-0x8ee20e18, 0x1062ffc2, 0x26ed0e18, 0x8ee60e18,
-0x8ee30e18, 0x21140, 0x24420e20, 0x2e28021,
-0x24630001, 0x306b01ff, 0x96e2046a, 0x30420010,
-0x10400019, 0x0, 0x9642000c, 0x340f8100,
-0x144f0015, 0x0, 0x3c020001, 0x571021,
-0x904283c0, 0x14400010, 0x0, 0x9642000e,
-0xa6020016, 0x8e420008, 0x8e430004, 0x8e440000,
-0x2694fffc, 0xae42000c, 0xae430008, 0xae440004,
-0x9602000e, 0x26730004, 0x240f0001, 0xa3af0037,
-0x34420200, 0xa602000e, 0x8e020000, 0x8e030004,
-0x3c040001, 0x34843800, 0x306a0007, 0x26a9823,
-0x3641021, 0x262102b, 0x10400005, 0x28aa021,
-0x2641023, 0x3621823, 0x3c020020, 0x439823,
-0x26820007, 0x2404fff8, 0x9603000a, 0x446024,
-0x6a1821, 0x6c102b, 0x10400002, 0x1803821,
-0x603821, 0xae130018, 0x8f880120, 0x24e20007,
-0x443824, 0x27623800, 0x25090020, 0x122102b,
-0x50400001, 0x27693000, 0x8f820128, 0x11220004,
-0x0, 0x8f820124, 0x15220007, 0x1401821,
-0x8ee201a4, 0x8821, 0x24420001, 0xaee201a4,
-0x800324c, 0x8ee201a4, 0x8e040000, 0x8e050004,
-0x1021, 0xad130008, 0xa507000e, 0xad160018,
-0xad06001c, 0xa3302b, 0xa32823, 0x822023,
-0x862023, 0xad040000, 0xad050004, 0x8ee204c0,
-0xad020010, 0xaf890120, 0x92e24e20, 0x14400033,
-0x24110001, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x8c820000, 0x1456001f, 0x0,
-0x8ee34e30, 0x8ee24e34, 0x1062001b, 0x0,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
-0x8ee34e30, 0x24420001, 0x10550007, 0x0,
-0x8ee24e34, 0x24420001, 0x10620005, 0x0,
-0x8003239, 0x0, 0x14600005, 0x0,
-0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
-0x8c820004, 0x2c420011, 0x50400010, 0xac800000,
-0x800324c, 0x0, 0x8ee24e30, 0x24420001,
-0x50550003, 0x1021, 0x8ee24e30, 0x24420001,
-0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0xac960000, 0xac9e0004, 0x16200018,
-0x3c050006, 0x8e020018, 0x3c040001, 0x24845890,
-0xafa20010, 0x8e020000, 0x8e030004, 0x34a5f009,
-0x2003021, 0xc002403, 0xafa30014, 0x93a20037,
-0x10400216, 0x340f8100, 0x8e420004, 0x8e430008,
-0x8e44000c, 0xa64f000c, 0xae420000, 0xae430004,
-0xae440008, 0x96020016, 0x8003470, 0xa642000e,
-0x14ec0168, 0x28a1823, 0x960c000a, 0x9603000e,
-0x28a1023, 0xa602000a, 0x34620004, 0xa602000e,
-0x8f880120, 0x27623800, 0x25090020, 0x122102b,
-0x14400002, 0x306affff, 0x27693000, 0x8f820128,
-0x11220004, 0x0, 0x8f820124, 0x15220007,
-0x24040020, 0x8ee201a4, 0x8821, 0x24420001,
-0xaee201a4, 0x80032ca, 0x8ee201a4, 0x8ee5724c,
-0x8ee60490, 0x8ee70494, 0xa504000e, 0x24040004,
-0xad100008, 0xad040018, 0x52940, 0xa01821,
-0x1021, 0xe33821, 0xe3202b, 0xc23021,
-0xc43021, 0xad060000, 0xad070004, 0x8ee2724c,
-0xad02001c, 0x8ee204c4, 0xad020010, 0xaf890120,
-0x92e24e20, 0x14400033, 0x24110001, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x8c820000,
-0x1456001f, 0x0, 0x8ee34e30, 0x8ee24e34,
-0x1062001b, 0x0, 0x8c820004, 0x24420001,
-0xac820004, 0x8ee24e34, 0x8ee34e30, 0x24420001,
-0x10550007, 0x0, 0x8ee24e34, 0x24420001,
-0x10620005, 0x0, 0x80032b7, 0x0,
-0x14600005, 0x0, 0x8f820128, 0x24420020,
-0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
-0x50400010, 0xac800000, 0x80032ca, 0x0,
-0x8ee24e30, 0x24420001, 0x50550003, 0x1021,
-0x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0xac960000,
-0xac9e0004, 0x1620000d, 0x0, 0xa60c000a,
-0xa60a000e, 0x8f820100, 0xafa20010, 0x8f820104,
-0x3c040001, 0x2484589c, 0x3c050006, 0xafa20014,
-0x8ee6724c, 0x800343b, 0x34a5f00b, 0x3c010001,
-0x370821, 0xa02083c0, 0xadab0000, 0x8ee201d8,
-0x8ee3724c, 0x2442ffff, 0xaee201d8, 0x8ee201d8,
-0x24630001, 0x306307ff, 0x26e25244, 0x15a20006,
-0xaee3724c, 0x8ee201d0, 0x2442ffff, 0xaee201d0,
-0x80032ef, 0x8ee201d0, 0x8ee201cc, 0x2442ffff,
-0xaee201cc, 0x8ee201cc, 0x8f420240, 0x10400073,
-0x0, 0x8ee20e1c, 0x24420001, 0xaee20e1c,
-0x8f430240, 0x43102b, 0x14400176, 0xa021,
-0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
-0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
-0x0, 0x8f820124, 0x14c20007, 0x0,
-0x8ee201a4, 0x8821, 0x24420001, 0xaee201a4,
-0x800334f, 0x8ee201a4, 0x8ee2724c, 0xac62001c,
-0x8ee404a8, 0x8ee504ac, 0x2462001c, 0xac620008,
-0x24020008, 0xa462000e, 0x24020011, 0xac620018,
-0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
-0xaf860120, 0x92e24e20, 0x14400033, 0x24110001,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c820000, 0x144e001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
-0x24420001, 0x10550007, 0x0, 0x8ee24e34,
-0x24420001, 0x10620005, 0x0, 0x800333c,
-0x0, 0x14600005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400010, 0xac800000, 0x800334f,
-0x0, 0x8ee24e30, 0x24420001, 0x50550003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0xac8e0000, 0xac9e0004, 0x5620000d, 0x24110001,
-0x8ee2724c, 0x3c040001, 0x248458a8, 0xafa00014,
-0xafa20010, 0x8ee6724c, 0x8f470280, 0x3c050009,
-0x34a5f008, 0xc002403, 0xafae0048, 0x8fae0048,
-0x56200001, 0xaee00e1c, 0x8ee20188, 0x24420001,
-0xaee20188, 0x80033c8, 0x8ee20188, 0x8f830120,
-0x27623800, 0x24660020, 0xc2102b, 0x50400001,
-0x27663000, 0x8f820128, 0x10c20004, 0x0,
-0x8f820124, 0x14c20007, 0x0, 0x8ee201a4,
-0x8821, 0x24420001, 0xaee201a4, 0x80033ba,
-0x8ee201a4, 0x8ee2724c, 0xac62001c, 0x8ee404a8,
-0x8ee504ac, 0x2462001c, 0xac620008, 0x24020008,
-0xa462000e, 0x24020011, 0xac620018, 0xac640000,
-0xac650004, 0x8ee204c4, 0xac620010, 0xaf860120,
-0x92e24e20, 0x14400033, 0x24110001, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x8c820000,
-0x144e001f, 0x0, 0x8ee34e30, 0x8ee24e34,
-0x1062001b, 0x0, 0x8c820004, 0x24420001,
-0xac820004, 0x8ee24e34, 0x8ee34e30, 0x24420001,
-0x10550007, 0x0, 0x8ee24e34, 0x24420001,
-0x10620005, 0x0, 0x80033a7, 0x0,
-0x14600005, 0x0, 0x8f820128, 0x24420020,
-0xaf820128, 0x8f820128, 0x8c820004, 0x2c420011,
-0x50400010, 0xac800000, 0x80033ba, 0x0,
-0x8ee24e30, 0x24420001, 0x50550003, 0x1021,
-0x8ee24e30, 0x24420001, 0xaee24e30, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0xac8e0000,
-0xac9e0004, 0x1620000d, 0x0, 0x8ee2724c,
-0x3c040001, 0x248458a8, 0xafa00014, 0xafa20010,
-0x8ee6724c, 0x8f470280, 0x3c050009, 0x34a5f008,
-0xc002403, 0xafae0048, 0x8fae0048, 0x8ee20174,
-0x24420001, 0xaee20174, 0x8ee20174, 0x800346e,
-0xa021, 0x960c000a, 0x183102b, 0x54400001,
-0x1801821, 0xa603000a, 0x8f880120, 0x27623800,
-0x25090020, 0x122102b, 0x50400001, 0x27693000,
-0x8f820128, 0x11220004, 0x0, 0x8f820124,
-0x15220007, 0x24040020, 0x8ee201a4, 0x8821,
-0x24420001, 0xaee201a4, 0x800342f, 0x8ee201a4,
-0x8ee5724c, 0x8ee60490, 0x8ee70494, 0xa504000e,
-0x24040004, 0xad100008, 0xad040018, 0x52940,
-0xa01821, 0x1021, 0xe33821, 0xe3202b,
-0xc23021, 0xc43021, 0xad060000, 0xad070004,
-0x8ee2724c, 0xad02001c, 0x8ee204c4, 0xad020010,
-0xaf890120, 0x92e24e20, 0x14400033, 0x24110001,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c820000, 0x1456001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
-0x24420001, 0x10550007, 0x0, 0x8ee24e34,
-0x24420001, 0x10620005, 0x0, 0x800341c,
-0x0, 0x14600005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400010, 0xac800000, 0x800342f,
-0x0, 0x8ee24e30, 0x24420001, 0x50550003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0xac960000, 0xac9e0004, 0x1620001d, 0x0,
-0xa60c000a, 0x8f820100, 0xafa20010, 0x8f820104,
-0x3c040001, 0x2484589c, 0x3c050006, 0xafa20014,
-0x8ee6724c, 0x34a5f00d, 0xc002403, 0x2003821,
-0x93a20037, 0x10400031, 0x340f8100, 0x8e420004,
-0x8e430008, 0x8e44000c, 0xa64f000c, 0xae420000,
-0xae430004, 0xae440008, 0x96020016, 0xa642000e,
-0x9602000e, 0x3042fdff, 0x8003470, 0xa602000e,
-0x8ee201d8, 0x2442ffff, 0xaee201d8, 0x8ee201d8,
-0x8ee201cc, 0x3c04001f, 0x3c010001, 0x370821,
-0xa03e83c0, 0x2442ffff, 0xaee201cc, 0x9603000a,
-0x3484ffff, 0x8ee201cc, 0x6a1821, 0x2639821,
-0x93202b, 0x10800003, 0x3c02fff5, 0x34421000,
-0x2629821, 0xadab0000, 0x8ee2724c, 0x24420001,
-0x304207ff, 0xaee2724c, 0x8f420240, 0x10400004,
-0x283a023, 0x8ee20e1c, 0x24420001, 0xaee20e1c,
-0xa3a00027, 0x1680fd29, 0x0, 0x12800024,
-0x0, 0x3c010001, 0x370821, 0xac3483c4,
-0x3c010001, 0x370821, 0xac3383c8, 0x3c010001,
-0x370821, 0xac3283cc, 0x93a20037, 0x10400008,
-0x0, 0x3c020001, 0x571021, 0x8c4283cc,
-0x24420004, 0x3c010001, 0x370821, 0xac2283cc,
-0x8ee2724c, 0x8f430280, 0x24420001, 0x304207ff,
-0x14620006, 0x0, 0x8ee201c4, 0x24420001,
-0xaee201c4, 0x80034cc, 0x8ee201c4, 0x8ee201bc,
-0x24420001, 0xaee201bc, 0x80034cc, 0x8ee201bc,
-0x97a4001e, 0x2484fffc, 0x801821, 0x8ee400c0,
-0x8ee500c4, 0x1021, 0xa32821, 0xa3302b,
-0x822021, 0x862021, 0xaee400c0, 0xaee500c4,
-0x8faf002c, 0x24020002, 0x11e2000f, 0x29e20003,
-0x14400017, 0x24020003, 0x15e20015, 0x0,
-0x8ee200d0, 0x8ee300d4, 0x24630001, 0x2c640001,
-0x441021, 0xaee200d0, 0xaee300d4, 0x8ee200d0,
-0x80034c6, 0x8ee300d4, 0x8ee200d8, 0x8ee300dc,
-0x24630001, 0x2c640001, 0x441021, 0xaee200d8,
-0xaee300dc, 0x8ee200d8, 0x80034c6, 0x8ee300dc,
-0x8ee200c8, 0x8ee300cc, 0x24630001, 0x2c640001,
-0x441021, 0xaee200c8, 0xaee300cc, 0x8ee200c8,
-0x8ee300cc, 0x8f8300e4, 0x8f8200e0, 0x10620003,
-0x24630008, 0xaf8300e4, 0xaf8300e8, 0x8fbf0070,
-0x8fbe006c, 0x8fb60068, 0x8fb50064, 0x8fb40060,
-0x8fb3005c, 0x8fb20058, 0x8fb10054, 0x8fb00050,
-0x3e00008, 0x27bd0078, 0x27bdffb0, 0xafb50044,
-0xa821, 0xafb00030, 0x8021, 0xafbf004c,
-0xafb60048, 0xafb40040, 0xafb3003c, 0xafb20038,
-0xafb10034, 0x8ee204d4, 0x24140001, 0x30420001,
-0x1440002a, 0xb021, 0x8f8700e0, 0x8f8800c4,
-0x8f8200e8, 0xe22023, 0x2c821000, 0x50400001,
-0x24841000, 0x420c2, 0x801821, 0x8ee400c8,
-0x8ee500cc, 0x1021, 0xa32821, 0xa3302b,
-0x822021, 0x862021, 0xaee400c8, 0xaee500cc,
-0x8f8300c8, 0x3c02000a, 0x3442efff, 0x1032023,
-0x44102b, 0x10400003, 0x3c02000a, 0x3442f000,
-0x822021, 0x801821, 0x8ee400c0, 0x8ee500c4,
-0x1021, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xaee400c0, 0xaee500c4, 0xaf8800c8,
-0xaf8700e4, 0x8003850, 0xaf8700e8, 0x3c020001,
-0x571021, 0x904283c0, 0x1040000b, 0x0,
-0x3c130001, 0x2779821, 0x8e7383c4, 0x3c110001,
-0x2378821, 0x8e3183c8, 0x3c120001, 0x2579021,
-0x80036e8, 0x8e5283cc, 0x8f8300e0, 0x8f8200e4,
-0x10430007, 0x4821, 0x8f8200e4, 0x24090001,
-0x8c430000, 0x8c440004, 0xafa30018, 0xafa4001c,
-0x1520000e, 0x3c02ffff, 0x8f8200c4, 0xafa20010,
-0x8f8200c8, 0x3c040001, 0x24845870, 0xafa20014,
-0x8f8600e0, 0x8f8700e4, 0x3c050006, 0xc002403,
-0x34a5f000, 0x8003850, 0x0, 0x8fa3001c,
-0x8fb20018, 0x3073ffff, 0x2673fffc, 0x621024,
-0x10400058, 0x2408821, 0x3c020080, 0x621024,
-0x1040000a, 0x3c040040, 0x8ee2007c, 0x24420001,
-0xaee2007c, 0x8ee2007c, 0x8ee201fc, 0x24420001,
-0xaee201fc, 0x800384a, 0x8ee201fc, 0x3c060004,
-0x3c0b0001, 0x3c0a0002, 0x3c050010, 0x3c090008,
-0x8ee20080, 0x3c080020, 0x34078000, 0x24420001,
-0xaee20080, 0x8ee20080, 0x8fa2001c, 0x441824,
-0x10660021, 0xc3102b, 0x14400007, 0x0,
-0x106b0011, 0x0, 0x106a0015, 0x0,
-0x8003592, 0x42042, 0x10650023, 0xa3102b,
-0x14400005, 0x0, 0x10690019, 0x0,
-0x8003592, 0x42042, 0x10680021, 0x0,
-0x8003592, 0x42042, 0x8ee20034, 0x24420001,
-0xaee20034, 0x8ee20034, 0x8003592, 0x42042,
-0x8ee201ec, 0x24420001, 0xaee201ec, 0x8ee201ec,
-0x8003592, 0x42042, 0x8ee201f0, 0x24420001,
-0xaee201f0, 0x8ee201f0, 0x8003592, 0x42042,
-0x8ee201f4, 0x24420001, 0xaee201f4, 0x8ee201f4,
-0x8003592, 0x42042, 0x8ee20030, 0x24420001,
-0xaee20030, 0x8ee20030, 0x8003592, 0x42042,
-0x8ee201f8, 0x24420001, 0xaee201f8, 0x8ee201f8,
-0x42042, 0x108702b7, 0x0, 0x8003557,
-0x0, 0x3c020001, 0x571021, 0x904283b2,
-0x14400084, 0x24020001, 0x3c030001, 0x771821,
-0x906383b3, 0x1462007f, 0x3c020100, 0x8e430000,
-0x621024, 0x1040006f, 0x2402ffff, 0x14620005,
-0x24100001, 0x96430004, 0x3402ffff, 0x10620075,
-0x0, 0x92e204d8, 0x14400072, 0x0,
-0x3c020001, 0x571021, 0x8c4283b4, 0x28420005,
-0x10400020, 0x3821, 0x3c020001, 0x571021,
-0x8c4283b4, 0x18400016, 0x2821, 0x96260000,
-0x520c0, 0x971021, 0x9442777e, 0x14460009,
-0x971021, 0x94437780, 0x96220002, 0x14620005,
-0x971021, 0x94437782, 0x96220004, 0x50620008,
-0x24070001, 0x3c020001, 0x571021, 0x8c4283b4,
-0x24a50001, 0xa2102a, 0x5440ffee, 0x520c0,
-0x30e200ff, 0x1040027b, 0x0, 0x800361e,
-0x0, 0x2402021, 0xc0022fe, 0x24050006,
-0x3044001f, 0x428c0, 0x2e51021, 0x9442727c,
-0x30424000, 0x1440026f, 0xb71021, 0x9443727e,
-0x96220000, 0x1462000b, 0x418c0, 0xb71021,
-0x94437280, 0x96220002, 0x14620006, 0x418c0,
-0xb71021, 0x94437282, 0x96220004, 0x10620035,
-0x418c0, 0x2e31021, 0x9442727c, 0x30428000,
-0x1440025c, 0x2e31021, 0x9448727c, 0x96270000,
-0x828c0, 0xb71021, 0x9442737e, 0x8003600,
-0x3021, 0x420c0, 0x2e41021, 0x9443737c,
-0x2e41021, 0x9448737c, 0x30638000, 0x14600010,
-0x828c0, 0xb71021, 0x9442737e, 0x1447fff5,
-0x1002021, 0xb71021, 0x94437380, 0x96220002,
-0x5462fff1, 0x420c0, 0xb71021, 0x94437382,
-0x96220004, 0x5462ffec, 0x420c0, 0x24060001,
-0x30c200ff, 0x1040023b, 0x0, 0x800361e,
-0x0, 0x97430202, 0x96420000, 0x14620235,
-0x0, 0x97430204, 0x96420002, 0x14620231,
-0x0, 0x97430206, 0x96420004, 0x1462022d,
-0x0, 0x92420000, 0x3a030001, 0x30420001,
-0x431024, 0x10400074, 0x2402ffff, 0x8e230000,
-0x14620004, 0x3402ffff, 0x96230004, 0x1062006f,
-0x24140002, 0x3c020001, 0x571021, 0x904283b2,
-0x1440006a, 0x24140003, 0x92e204d8, 0x14400067,
-0x0, 0x3c020001, 0x571021, 0x8c4283b4,
-0x28420005, 0x10400020, 0x3821, 0x3c020001,
-0x571021, 0x8c4283b4, 0x18400016, 0x2821,
-0x96260000, 0x520c0, 0x971021, 0x9442777e,
-0x14460009, 0x971021, 0x94437780, 0x96220002,
-0x14620005, 0x971021, 0x94437782, 0x96220004,
-0x50620008, 0x24070001, 0x3c020001, 0x571021,
-0x8c4283b4, 0x24a50001, 0xa2102a, 0x5440ffee,
-0x520c0, 0x30e200ff, 0x14400044, 0x24140003,
-0x800384a, 0x0, 0x2402021, 0xc0022fe,
-0x24050006, 0x3044001f, 0x428c0, 0x2e51021,
-0x9442727c, 0x30424000, 0x144001ea, 0xb71021,
-0x9443727e, 0x96220000, 0x1462000b, 0x418c0,
-0xb71021, 0x94437280, 0x96220002, 0x14620006,
-0x418c0, 0xb71021, 0x94437282, 0x96220004,
-0x10620027, 0x418c0, 0x2e31021, 0x9442727c,
-0x30428000, 0x144001d7, 0x2e31021, 0x9448727c,
-0x96270000, 0x828c0, 0xb71021, 0x9442737e,
-0x8003685, 0x3021, 0x420c0, 0x2e41021,
-0x9443737c, 0x2e41021, 0x9448737c, 0x30638000,
-0x14600010, 0x828c0, 0xb71021, 0x9442737e,
-0x1447fff5, 0x1002021, 0xb71021, 0x94437380,
-0x96220002, 0x5462fff1, 0x420c0, 0xb71021,
-0x94437382, 0x96220004, 0x5462ffec, 0x420c0,
-0x24060001, 0x30c200ff, 0x104001b6, 0x0,
-0x8003698, 0x24140003, 0x24140001, 0x8f420260,
-0x53102b, 0x10400049, 0x0, 0x8f8300e4,
-0x8f8200e0, 0x10620003, 0x24630008, 0xaf8300e4,
-0xaf8300e8, 0x8ee400c0, 0x8ee500c4, 0x2601821,
-0x1021, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xaee400c0, 0xaee500c4, 0x8ee20058,
-0x24420001, 0xaee20058, 0x8ee20058, 0x8ee2007c,
-0x24420001, 0xaee2007c, 0x8ee2007c, 0x8f8200e0,
-0xafa20010, 0x8f8200e4, 0x3c040001, 0x24845878,
-0xafa20014, 0x8fa60018, 0x8fa7001c, 0x3c050006,
-0xc002403, 0x34a5f003, 0x8003850, 0x0,
-0x8ee25240, 0xafa20010, 0x8ee25244, 0x3c040001,
-0x24845884, 0xafa20014, 0x8ee60e10, 0x8ee70e18,
-0xc002403, 0x34a5f002, 0x8ee201c0, 0x24420001,
-0xaee201c0, 0x8ee20000, 0x8ee301c0, 0x2403ffbf,
-0x431024, 0x80037f8, 0xaee20000, 0x8ee25240,
-0xafa20010, 0x8ee25244, 0x3c040001, 0x24845884,
-0xafa20014, 0x8ee60e10, 0x8ee70e18, 0x3c050006,
-0xc002403, 0x34a5f002, 0x8ee201c0, 0x24420001,
-0xaee201c0, 0x80037f8, 0x8ee201c0, 0x96e20468,
-0x53102b, 0x54400001, 0x3c158000, 0x12600131,
-0x3c0c001f, 0x358cffff, 0x8ee2724c, 0x8f430280,
-0x24420001, 0x304207ff, 0x10620108, 0x0,
-0x12a00014, 0x0, 0x8ee35240, 0x8ee25244,
-0x10620009, 0x26ee5244, 0x8eeb5244, 0x8ee35244,
-0x21140, 0x24425248, 0x2e28021, 0x24630001,
-0x8003712, 0x306800ff, 0x92e27248, 0x1440ffc0,
-0x3c050006, 0x8ee201e0, 0x24420001, 0xaee201e0,
-0x8ee201e0, 0x8ee30e10, 0x8ee20e18, 0x1062ffcb,
-0x26ee0e18, 0x8eeb0e18, 0xa821, 0x8ee30e18,
-0x21140, 0x24420e20, 0x2e28021, 0x24630001,
-0x306801ff, 0x96e2046a, 0x30420010, 0x10400017,
-0x34028100, 0x9643000c, 0x14620014, 0x0,
-0x3c020001, 0x571021, 0x904283c0, 0x1440000f,
-0x0, 0x9642000e, 0xa6020016, 0x8e420008,
-0x8e430004, 0x8e440000, 0x2673fffc, 0xae42000c,
-0xae430008, 0xae440004, 0x9602000e, 0x26310004,
-0x24160001, 0x34420200, 0xa602000e, 0x9603000a,
-0x2605021, 0x73102b, 0x10400002, 0x2606821,
-0x605021, 0x2d42003d, 0x1040002a, 0x3821,
-0x9623000c, 0x24020800, 0x54620027, 0xae110018,
-0x3c020001, 0x571021, 0x904283c0, 0x54400022,
-0xae110018, 0x26220017, 0x182102b, 0x10400013,
-0x0, 0x3c02fff5, 0x511021, 0x90421017,
-0x38430006, 0x2c630001, 0x38420011, 0x2c420001,
-0x621825, 0x10600013, 0x26220010, 0x182102b,
-0x1040000e, 0x0, 0x3c07fff5, 0xf13821,
-0x94e71010, 0x800375e, 0x24e7000e, 0x92220017,
-0x38430006, 0x2c630001, 0x38420011, 0x2c420001,
-0x621825, 0x50600004, 0xae110018, 0x96270010,
-0x24e7000e, 0xae110018, 0x3c020001, 0x571021,
-0x904283c0, 0x2102b, 0x14e00002, 0x24ec0,
-0x1403821, 0x8f830120, 0x27623800, 0x24660020,
-0xc2102b, 0x50400001, 0x27663000, 0x8f820128,
-0x10c20004, 0x0, 0x8f820124, 0x14c20007,
-0x2402000b, 0x8ee201a4, 0x4821, 0x24420001,
-0xaee201a4, 0x80037bf, 0x8ee201a4, 0x8e040000,
-0x8e050004, 0xac620018, 0x1751025, 0x491025,
-0xac710008, 0xa467000e, 0xac62001c, 0xac640000,
-0xac650004, 0x8ee204c0, 0xac620010, 0xaf860120,
-0x92e24e20, 0x14400038, 0x24090001, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
-0x24020007, 0x14620020, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001c, 0x0, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee34e34, 0x8ee54e30,
-0x24020040, 0x24630001, 0x10620007, 0x0,
-0x8ee24e34, 0x24420001, 0x10a20005, 0x0,
-0x80037a9, 0x0, 0x14a00005, 0x0,
-0x8f820128, 0x24420020, 0xaf820128, 0x8f820128,
-0x8c820004, 0x2c420011, 0x50400013, 0xac800000,
-0x80037bf, 0x0, 0x8ee24e30, 0x24030040,
-0x24420001, 0x50430003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x24020007, 0xac820000,
-0x24020001, 0xac820004, 0x15200018, 0x3c050006,
-0x8e020018, 0x3c040001, 0x24845890, 0xafa20010,
-0x8e020000, 0x8e030004, 0x34a5f009, 0x2003021,
-0xc002403, 0xafa30014, 0x32c200ff, 0x1040002b,
-0x34028100, 0x8e430004, 0x8e440008, 0x8e45000c,
-0xa642000c, 0xae430000, 0xae440004, 0xae450008,
-0x96020016, 0x80037f8, 0xa642000e, 0x154d000a,
-0x0, 0x9602000e, 0xa613000a, 0x34420004,
-0xa602000e, 0x3c010001, 0x370821, 0xa02083c0,
-0x80037f6, 0x9821, 0x9604000a, 0x93102b,
-0x10400002, 0x2601821, 0x801821, 0x24020001,
-0xa603000a, 0x3c010001, 0x370821, 0xa02283c0,
-0x9604000a, 0x2248821, 0x191102b, 0x10400003,
-0x3c02fff5, 0x34421000, 0x2228821, 0x2649823,
-0xa821, 0x1660fef4, 0xadc80000, 0x12600021,
-0x32c200ff, 0x3c010001, 0x370821, 0xac3383c4,
-0x3c010001, 0x370821, 0xac3183c8, 0x3c010001,
-0x370821, 0x10400008, 0xac3283cc, 0x3c020001,
-0x571021, 0x8c4283cc, 0x24420004, 0x3c010001,
-0x370821, 0xac2283cc, 0x8ee2724c, 0x8f430280,
-0x24420001, 0x14620006, 0x0, 0x8ee201c4,
-0x24420001, 0xaee201c4, 0x8003850, 0x8ee201c4,
-0x8ee201bc, 0x24420001, 0xaee201bc, 0x8003850,
-0x8ee201bc, 0x97a4001e, 0x2484fffc, 0x801821,
-0x8ee400c0, 0x8ee500c4, 0x1021, 0xa32821,
-0xa3302b, 0x822021, 0x862021, 0x24020002,
-0xaee400c0, 0xaee500c4, 0x1282000f, 0x2a820003,
-0x14400017, 0x24020003, 0x16820015, 0x0,
-0x8ee200d0, 0x8ee300d4, 0x24630001, 0x2c640001,
-0x441021, 0xaee200d0, 0xaee300d4, 0x8ee200d0,
-0x800384a, 0x8ee300d4, 0x8ee200d8, 0x8ee300dc,
-0x24630001, 0x2c640001, 0x441021, 0xaee200d8,
-0xaee300dc, 0x8ee200d8, 0x800384a, 0x8ee300dc,
-0x8ee200c8, 0x8ee300cc, 0x24630001, 0x2c640001,
-0x441021, 0xaee200c8, 0xaee300cc, 0x8ee200c8,
-0x8ee300cc, 0x8f8300e4, 0x8f8200e0, 0x10620003,
-0x24630008, 0xaf8300e4, 0xaf8300e8, 0x8fbf004c,
-0x8fb60048, 0x8fb50044, 0x8fb40040, 0x8fb3003c,
-0x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
-0x27bd0050, 0x27bdff90, 0xafb60060, 0xb021,
-0xafbf0068, 0xafbe0064, 0xafb5005c, 0xafb40058,
-0xafb30054, 0xafb20050, 0xafb1004c, 0xafb00048,
-0x8ee204d4, 0x8821, 0x24150001, 0x30420001,
-0x1440002a, 0xa3a0002f, 0x8f8700e0, 0x8f8800c4,
-0x8f8200e8, 0xe22023, 0x2c821000, 0x50400001,
-0x24841000, 0x420c2, 0x801821, 0x8ee400c8,
-0x8ee500cc, 0x1021, 0xa32821, 0xa3302b,
-0x822021, 0x862021, 0xaee400c8, 0xaee500cc,
-0x8f8300c8, 0x3c02000a, 0x3442efff, 0x1032023,
-0x44102b, 0x10400003, 0x3c02000a, 0x3442f000,
-0x822021, 0x801821, 0x8ee400c0, 0x8ee500c4,
-0x1021, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xaee400c0, 0xaee500c4, 0xaf8800c8,
-0xaf8700e4, 0x8003c5b, 0xaf8700e8, 0x3c020001,
-0x571021, 0x904283c0, 0x1040000b, 0x0,
-0x3c130001, 0x2779821, 0x8e7383c4, 0x3c100001,
-0x2178021, 0x8e1083c8, 0x3c120001, 0x2579021,
-0x8003a59, 0x8e5283cc, 0x8f8300e0, 0x8f8200e4,
-0x10430007, 0x3821, 0x8f8200e4, 0x24070001,
-0x8c430000, 0x8c440004, 0xafa30018, 0xafa4001c,
-0x14e0000e, 0x3c02ffff, 0x8f8200c4, 0xafa20010,
-0x8f8200c8, 0x3c040001, 0x248458b4, 0xafa20014,
-0x8f8600e0, 0x8f8700e4, 0x3c050006, 0xc002403,
-0x34a5f200, 0x8003c5b, 0x0, 0x8fa3001c,
-0x8fb20018, 0x3073ffff, 0x2673fffc, 0x621024,
-0x10400058, 0x2408021, 0x3c020080, 0x621024,
-0x1040000a, 0x3c040040, 0x8ee2007c, 0x24420001,
-0xaee2007c, 0x8ee2007c, 0x8ee201fc, 0x24420001,
-0xaee201fc, 0x8003c55, 0x8ee201fc, 0x3c060004,
-0x3c0b0001, 0x3c0a0002, 0x3c050010, 0x3c090008,
-0x8ee20080, 0x3c080020, 0x34078000, 0x24420001,
-0xaee20080, 0x8ee20080, 0x8fa2001c, 0x441824,
-0x10660021, 0xc3102b, 0x14400007, 0x0,
-0x106b0011, 0x0, 0x106a0015, 0x0,
-0x8003916, 0x42042, 0x10650023, 0xa3102b,
-0x14400005, 0x0, 0x10690019, 0x0,
-0x8003916, 0x42042, 0x10680021, 0x0,
-0x8003916, 0x42042, 0x8ee20034, 0x24420001,
-0xaee20034, 0x8ee20034, 0x8003916, 0x42042,
-0x8ee201ec, 0x24420001, 0xaee201ec, 0x8ee201ec,
-0x8003916, 0x42042, 0x8ee201f0, 0x24420001,
-0xaee201f0, 0x8ee201f0, 0x8003916, 0x42042,
-0x8ee201f4, 0x24420001, 0xaee201f4, 0x8ee201f4,
-0x8003916, 0x42042, 0x8ee20030, 0x24420001,
-0xaee20030, 0x8ee20030, 0x8003916, 0x42042,
-0x8ee201f8, 0x24420001, 0xaee201f8, 0x8ee201f8,
-0x42042, 0x1087033e, 0x0, 0x80038db,
-0x0, 0x3c020001, 0x571021, 0x904283b2,
-0x14400084, 0x24020001, 0x3c030001, 0x771821,
-0x906383b3, 0x1462007f, 0x3c020100, 0x8e430000,
-0x621024, 0x1040006f, 0x2402ffff, 0x14620005,
-0x24110001, 0x96430004, 0x3402ffff, 0x10620075,
-0x0, 0x92e204d8, 0x14400072, 0x0,
-0x3c020001, 0x571021, 0x8c4283b4, 0x28420005,
-0x10400020, 0x3821, 0x3c020001, 0x571021,
-0x8c4283b4, 0x18400016, 0x2821, 0x96060000,
-0x520c0, 0x971021, 0x9442777e, 0x14460009,
-0x971021, 0x94437780, 0x96020002, 0x14620005,
-0x971021, 0x94437782, 0x96020004, 0x50620008,
-0x24070001, 0x3c020001, 0x571021, 0x8c4283b4,
-0x24a50001, 0xa2102a, 0x5440ffee, 0x520c0,
-0x30e200ff, 0x10400302, 0x0, 0x80039a2,
-0x0, 0x2402021, 0xc0022fe, 0x24050006,
-0x3044001f, 0x428c0, 0x2e51021, 0x9442727c,
-0x30424000, 0x144002f6, 0xb71021, 0x9443727e,
-0x96020000, 0x1462000b, 0x418c0, 0xb71021,
-0x94437280, 0x96020002, 0x14620006, 0x418c0,
-0xb71021, 0x94437282, 0x96020004, 0x10620035,
-0x418c0, 0x2e31021, 0x9442727c, 0x30428000,
-0x144002e3, 0x2e31021, 0x944d727c, 0x96070000,
-0xd28c0, 0xb71021, 0x9442737e, 0x8003984,
-0x3021, 0x420c0, 0x2e41021, 0x9443737c,
-0x2e41021, 0x944d737c, 0x30638000, 0x14600010,
-0xd28c0, 0xb71021, 0x9442737e, 0x1447fff5,
-0x1a02021, 0xb71021, 0x94437380, 0x96020002,
-0x5462fff1, 0x420c0, 0xb71021, 0x94437382,
-0x96020004, 0x5462ffec, 0x420c0, 0x24060001,
-0x30c200ff, 0x104002c2, 0x0, 0x80039a2,
-0x0, 0x97430202, 0x96420000, 0x146202bc,
-0x0, 0x97430204, 0x96420002, 0x146202b8,
-0x0, 0x97430206, 0x96420004, 0x146202b4,
-0x0, 0x92420000, 0x3a230001, 0x30420001,
-0x431024, 0x10400074, 0x2402ffff, 0x8e030000,
-0x14620004, 0x3402ffff, 0x96030004, 0x1062006f,
-0x24150002, 0x3c020001, 0x571021, 0x904283b2,
-0x1440006a, 0x24150003, 0x92e204d8, 0x14400067,
-0x0, 0x3c020001, 0x571021, 0x8c4283b4,
-0x28420005, 0x10400020, 0x3821, 0x3c020001,
-0x571021, 0x8c4283b4, 0x18400016, 0x2821,
-0x96060000, 0x520c0, 0x971021, 0x9442777e,
-0x14460009, 0x971021, 0x94437780, 0x96020002,
-0x14620005, 0x971021, 0x94437782, 0x96020004,
-0x50620008, 0x24070001, 0x3c020001, 0x571021,
-0x8c4283b4, 0x24a50001, 0xa2102a, 0x5440ffee,
-0x520c0, 0x30e200ff, 0x14400044, 0x24150003,
-0x8003c55, 0x0, 0x2402021, 0xc0022fe,
-0x24050006, 0x3044001f, 0x428c0, 0x2e51021,
-0x9442727c, 0x30424000, 0x14400271, 0xb71021,
-0x9443727e, 0x96020000, 0x1462000b, 0x418c0,
-0xb71021, 0x94437280, 0x96020002, 0x14620006,
-0x418c0, 0xb71021, 0x94437282, 0x96020004,
-0x10620027, 0x418c0, 0x2e31021, 0x9442727c,
-0x30428000, 0x1440025e, 0x2e31021, 0x944d727c,
-0x96070000, 0xd28c0, 0xb71021, 0x9442737e,
-0x8003a09, 0x3021, 0x420c0, 0x2e41021,
-0x9443737c, 0x2e41021, 0x944d737c, 0x30638000,
-0x14600010, 0xd28c0, 0xb71021, 0x9442737e,
-0x1447fff5, 0x1a02021, 0xb71021, 0x94437380,
-0x96020002, 0x5462fff1, 0x420c0, 0xb71021,
-0x94437382, 0x96020004, 0x5462ffec, 0x420c0,
-0x24060001, 0x30c200ff, 0x1040023d, 0x0,
-0x8003a1c, 0x24150003, 0x24150001, 0x8f420260,
-0x53102b, 0x10400036, 0x0, 0x8f8300e4,
-0x8f8200e0, 0x10620003, 0x24630008, 0xaf8300e4,
-0xaf8300e8, 0x8ee400c0, 0x8ee500c4, 0x2601821,
-0x1021, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xaee400c0, 0xaee500c4, 0x8ee20058,
-0x24420001, 0xaee20058, 0x8ee20058, 0x8ee2007c,
-0x24420001, 0xaee2007c, 0x8ee2007c, 0x8f8200e0,
-0xafa20010, 0x8f8200e4, 0x3c040001, 0x248458c0,
-0xafa20014, 0x8fa60018, 0x8fa7001c, 0x3c050006,
-0xc002403, 0x34a5f203, 0x8003c5b, 0x0,
-0x8ee25240, 0xafa20010, 0x8ee25244, 0x3c040001,
-0x248458cc, 0xafa20014, 0x8ee60e10, 0x8ee70e18,
-0x3c050006, 0xc002403, 0x34a5f202, 0x8ee201c0,
-0x24420001, 0xaee201c0, 0x8003c02, 0x8ee201c0,
-0x96e20468, 0x53102b, 0x54400001, 0x3c168000,
-0x126001cb, 0x3c0e001f, 0x35ceffff, 0x3c0ffff5,
-0x35ef1000, 0x241e0040, 0x8ee2724c, 0x8f430280,
-0x24420001, 0x304207ff, 0x1062019e, 0x0,
-0x12c00012, 0x0, 0x8ee35240, 0x8ee25244,
-0x1062000a, 0x26f85244, 0x8ef45244, 0xafb80024,
-0x8ee35244, 0x21140, 0x24425248, 0x2e28821,
-0x24630001, 0x8003a85, 0x306d00ff, 0x8ee201e0,
-0x24420001, 0xaee201e0, 0x8ee201e0, 0x8ee30e10,
-0x8ee20e18, 0x1062ffca, 0x26f80e18, 0x8ef40e18,
-0xb021, 0xafb80024, 0x8ee30e18, 0x21140,
-0x24420e20, 0x2e28821, 0x24630001, 0x306d01ff,
-0x96e2046a, 0x30420010, 0x10400018, 0x34028100,
-0x9643000c, 0x14620015, 0x0, 0x3c020001,
-0x571021, 0x904283c0, 0x14400010, 0x0,
-0x9642000e, 0xa6220016, 0x8e420008, 0x8e430004,
-0x8e440000, 0x2673fffc, 0xae42000c, 0xae430008,
-0xae440004, 0x9622000e, 0x26100004, 0x24180001,
-0xa3b8002f, 0x34420200, 0xa622000e, 0x8e220000,
-0x8e230004, 0x3c040001, 0x34843800, 0x2003021,
-0x306a0007, 0x20a8023, 0x3641021, 0x202102b,
-0x10400005, 0x26a9821, 0x2041023, 0x3621823,
-0x3c020020, 0x438023, 0x26620007, 0x9623000a,
-0x2418fff8, 0x58c824, 0x6a1821, 0x79102b,
-0x10400002, 0x3206021, 0x606021, 0x1801821,
-0x24620007, 0x2418fff8, 0x586024, 0x26c102b,
-0x14400004, 0x1932823, 0x1832823, 0x8003ac3,
-0xc31021, 0xd31021, 0x4a2023, 0x1c4102b,
-0x54400001, 0x8f2021, 0x25420040, 0x4c102b,
-0x14400035, 0x5821, 0x94c3000c, 0x24020800,
-0x54620032, 0xae260018, 0x3c020001, 0x571021,
-0x904283c0, 0x5440002d, 0xae260018, 0x24c20017,
-0x1c2102b, 0x10400013, 0x0, 0x3c02fff5,
-0x461021, 0x90421017, 0x38430006, 0x2c630001,
-0x38420011, 0x2c420001, 0x621825, 0x10600014,
-0x24c20010, 0x1c2102b, 0x1040000e, 0x0,
-0x3c0bfff5, 0x1665821, 0x956b1010, 0x8003af4,
-0x2562000e, 0x90c20017, 0x38430006, 0x2c630001,
-0x38420011, 0x2c420001, 0x621825, 0x10600005,
-0x1601821, 0x94cb0010, 0x2562000e, 0x4a5821,
-0x1601821, 0x24620007, 0x2418fff8, 0x585824,
-0xc31021, 0x4a2023, 0x1c4102b, 0x10400002,
-0x1632823, 0x8f2021, 0xae260018, 0x3c020001,
-0x571021, 0x904283c0, 0x2102b, 0x216c0,
-0x15600002, 0xafa20044, 0x1805821, 0x30820001,
-0x10400007, 0x4021, 0x90880000, 0x24840001,
-0x1c4102b, 0x10400002, 0x24a5ffff, 0x8f2021,
-0x50a00012, 0x81c02, 0x2ca20002, 0x54400009,
-0x24a5ffff, 0x94820000, 0x24840002, 0x1024021,
-0x1c4102b, 0x10400006, 0x24a5fffe, 0x8003b21,
-0x8f2021, 0x90820000, 0x21200, 0x1024021,
-0x14a0fff2, 0x2ca20002, 0x81c02, 0x3102ffff,
-0x624021, 0x3108ffff, 0x1402821, 0x11400011,
-0x2002021, 0x2ca20002, 0x54400009, 0x24a5ffff,
-0x94820000, 0x24840002, 0x1024021, 0x1c4102b,
-0x10400006, 0x24a5fffe, 0x8003b38, 0x8f2021,
-0x90820000, 0x21200, 0x1024021, 0x14a0fff2,
-0x2ca20002, 0x81c02, 0x3102ffff, 0x624021,
-0x81c02, 0x3102ffff, 0x8f890120, 0x624021,
-0x27623800, 0x25230020, 0x62102b, 0x14400002,
-0x3108ffff, 0x27633000, 0x8f820128, 0x10620004,
-0x0, 0x8f820124, 0x14620007, 0x1402821,
-0x8ee201a4, 0x3821, 0x24420001, 0xaee201a4,
-0x8003bc9, 0x8ee201a4, 0x8e260000, 0x8e270004,
-0x81400, 0x3448000b, 0xad300008, 0xa52b000e,
-0xad280018, 0x8fb80044, 0x2021, 0x2961025,
-0x581025, 0xad22001c, 0xe5102b, 0xe53823,
-0xc43023, 0xc23023, 0xad260000, 0xad270004,
-0x8ee204c0, 0xad220010, 0xaf830120, 0x92e24e20,
-0x1440005f, 0x24070001, 0x2502ffee, 0x2c420002,
-0x14400003, 0x24020011, 0x15020024, 0x0,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c830000, 0x24020012, 0x1462000f, 0x0,
-0x8ee34e30, 0x8ee24e34, 0x1062000b, 0x0,
-0x8c820004, 0x24420001, 0xac820004, 0x8ee24e34,
-0x8ee34e30, 0x24420001, 0x105e002a, 0x0,
-0x8003ba8, 0x0, 0x8ee24e30, 0x24420001,
-0x505e0003, 0x1021, 0x8ee24e30, 0x24420001,
-0xaee24e30, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x8003bc6, 0x24020012, 0x8ee24e30,
-0x210c0, 0x24425038, 0x2e22021, 0x8c830000,
-0x24020007, 0x1462001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
-0x24420001, 0x105e0007, 0x0, 0x8ee24e34,
-0x24420001, 0x10620005, 0x0, 0x8003bb4,
-0x0, 0x14600005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400012, 0xac800000, 0x8003bc9,
-0x0, 0x8ee24e30, 0x24420001, 0x505e0003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x24020007, 0xac820000, 0x24020001, 0xac820004,
-0x14e00019, 0x3c050006, 0x3c040001, 0x24845890,
-0x8e220018, 0x34a5f209, 0xafa20010, 0x8e220000,
-0x8e230004, 0x2203021, 0x1603821, 0xc002403,
-0xafa30014, 0x93a2002f, 0x1040002a, 0x34028100,
-0x8e430004, 0x8e440008, 0x8e45000c, 0xa642000c,
-0xae430000, 0xae440004, 0xae450008, 0x96220016,
-0x8003c02, 0xa642000e, 0x1599000a, 0x26a1823,
-0x9622000e, 0xa623000a, 0x34420004, 0xa622000e,
-0x3c010001, 0x370821, 0xa02083c0, 0x8003bff,
-0x9821, 0x9624000a, 0x83102b, 0x54400001,
-0x801821, 0x24020001, 0xa623000a, 0x3c010001,
-0x370821, 0xa02283c0, 0x9622000a, 0x4a1821,
-0x2038021, 0x1d0102b, 0x54400001, 0x20f8021,
-0x2639823, 0xb021, 0x8fb80024, 0x1660fe5e,
-0xaf0d0000, 0x12600022, 0x0, 0x3c010001,
-0x370821, 0xac3383c4, 0x3c010001, 0x370821,
-0xac3083c8, 0x3c010001, 0x370821, 0xac3283cc,
-0x93a2002f, 0x10400008, 0x0, 0x3c020001,
-0x571021, 0x8c4283cc, 0x24420004, 0x3c010001,
-0x370821, 0xac2283cc, 0x8f430280, 0x8ee2724c,
-0x14620006, 0x0, 0x8ee201c4, 0x24420001,
-0xaee201c4, 0x8003c5b, 0x8ee201c4, 0x8ee201bc,
-0x24420001, 0xaee201bc, 0x8003c5b, 0x8ee201bc,
-0x97a4001e, 0x2484fffc, 0x801821, 0x8ee400c0,
-0x8ee500c4, 0x1021, 0xa32821, 0xa3302b,
-0x822021, 0x862021, 0x24020002, 0xaee400c0,
-0xaee500c4, 0x12a2000f, 0x2aa20003, 0x14400017,
-0x24020003, 0x16a20015, 0x0, 0x8ee200d0,
-0x8ee300d4, 0x24630001, 0x2c640001, 0x441021,
-0xaee200d0, 0xaee300d4, 0x8ee200d0, 0x8003c55,
-0x8ee300d4, 0x8ee200d8, 0x8ee300dc, 0x24630001,
-0x2c640001, 0x441021, 0xaee200d8, 0xaee300dc,
-0x8ee200d8, 0x8003c55, 0x8ee300dc, 0x8ee200c8,
-0x8ee300cc, 0x24630001, 0x2c640001, 0x441021,
-0xaee200c8, 0xaee300cc, 0x8ee200c8, 0x8ee300cc,
-0x8f8300e4, 0x8f8200e0, 0x10620003, 0x24630008,
-0xaf8300e4, 0xaf8300e8, 0x8fbf0068, 0x8fbe0064,
-0x8fb60060, 0x8fb5005c, 0x8fb40058, 0x8fb30054,
-0x8fb20050, 0x8fb1004c, 0x8fb00048, 0x3e00008,
-0x27bd0070, 0x27bdffe0, 0xafbf0018, 0x8ee30e14,
-0x8ee20e0c, 0x10620074, 0x0, 0x8ee30e0c,
-0x8ee20e14, 0x622023, 0x4820001, 0x24840200,
-0x8ee30e18, 0x8ee20e14, 0x43102b, 0x14400004,
-0x24020200, 0x8ee30e14, 0x8003c7d, 0x431823,
-0x8ee20e18, 0x8ee30e14, 0x431023, 0x2443ffff,
-0x804821, 0x69102a, 0x54400001, 0x604821,
-0x8f870100, 0x27623000, 0x24e80020, 0x102102b,
-0x50400001, 0x27682800, 0x8f820108, 0x11020004,
-0x0, 0x8f820104, 0x15020007, 0x1021,
-0x8ee201a8, 0x2021, 0x24420001, 0xaee201a8,
-0x8003cbf, 0x8ee201a8, 0x8ee40e14, 0x42140,
-0x801821, 0x8ee40460, 0x8ee50464, 0xa32821,
-0xa3302b, 0x822021, 0x862021, 0xace40000,
-0xace50004, 0x8ee30e14, 0x91140, 0xa4e2000e,
-0x24020002, 0xace20018, 0x31940, 0x24630e20,
-0x2e31021, 0xace20008, 0x8ee20e14, 0xace2001c,
-0x8ee204cc, 0xace20010, 0xaf880100, 0x92e204ec,
-0x14400011, 0x24040001, 0x8ee24e28, 0x24030040,
-0x24420001, 0x50430003, 0x1021, 0x8ee24e28,
-0x24420001, 0xaee24e28, 0x8ee24e28, 0x210c0,
-0x24424e38, 0x2e21821, 0x24020002, 0xac620000,
-0x24020001, 0xac620004, 0x1480000e, 0x24030040,
-0x8ee20e14, 0xafa20010, 0x8ee20e18, 0x3c050007,
-0xafa20014, 0x8ee60e0c, 0x8ee70e10, 0x3c040001,
-0x248458d4, 0xc002403, 0x34a5f001, 0x8003cdd,
-0x0, 0x8ee20500, 0x24420001, 0x50430003,
-0x1021, 0x8ee20500, 0x24420001, 0xaee20500,
-0x8ee20500, 0x21080, 0x571021, 0xac490508,
-0x8ee20e14, 0x491021, 0x304201ff, 0xaee20e14,
-0x8ee30e14, 0x8ee20e0c, 0x14620005, 0x0,
-0x8f820060, 0x2403fdff, 0x431024, 0xaf820060,
-0x8fbf0018, 0x3e00008, 0x27bd0020, 0x27bdffe0,
-0xafbf0018, 0x8ee3523c, 0x8ee25238, 0x10620074,
-0x0, 0x8ee35238, 0x8ee2523c, 0x622023,
-0x4820001, 0x24840100, 0x8ee35244, 0x8ee2523c,
-0x43102b, 0x14400004, 0x24020100, 0x8ee3523c,
-0x8003cff, 0x431823, 0x8ee25244, 0x8ee3523c,
-0x431023, 0x2443ffff, 0x804821, 0x69102a,
-0x54400001, 0x604821, 0x8f870100, 0x27623000,
-0x24e80020, 0x102102b, 0x50400001, 0x27682800,
-0x8f820108, 0x11020004, 0x0, 0x8f820104,
-0x15020007, 0x1021, 0x8ee201a8, 0x2021,
-0x24420001, 0xaee201a8, 0x8003d41, 0x8ee201a8,
-0x8ee4523c, 0x42140, 0x801821, 0x8ee40470,
-0x8ee50474, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xace40000, 0xace50004, 0x8ee3523c,
-0x91140, 0xa4e2000e, 0x24020003, 0xace20018,
-0x31940, 0x24635248, 0x2e31021, 0xace20008,
-0x8ee2523c, 0xace2001c, 0x8ee204cc, 0xace20010,
-0xaf880100, 0x92e204ec, 0x14400011, 0x24040001,
-0x8ee24e28, 0x24030040, 0x24420001, 0x50430003,
-0x1021, 0x8ee24e28, 0x24420001, 0xaee24e28,
-0x8ee24e28, 0x210c0, 0x24424e38, 0x2e21821,
-0x24020003, 0xac620000, 0x24020001, 0xac620004,
-0x1480000e, 0x24030040, 0x8ee2523c, 0xafa20010,
-0x8ee25244, 0x3c050007, 0xafa20014, 0x8ee65238,
-0x8ee75240, 0x3c040001, 0x248458e0, 0xc002403,
-0x34a5f010, 0x8003d5f, 0x0, 0x8ee20500,
-0x24420001, 0x50430003, 0x1021, 0x8ee20500,
-0x24420001, 0xaee20500, 0x8ee20500, 0x21080,
-0x571021, 0xac490508, 0x8ee2523c, 0x491021,
-0x304200ff, 0xaee2523c, 0x8ee3523c, 0x8ee25238,
-0x14620005, 0x0, 0x8f820060, 0x2403feff,
-0x431024, 0xaf820060, 0x8fbf0018, 0x3e00008,
-0x27bd0020, 0x8f820120, 0x8ee34e34, 0x8f820124,
-0x8f860128, 0x24020040, 0x24630001, 0x50620003,
-0x1021, 0x8ee24e34, 0x24420001, 0xaee24e34,
-0x8ee24e34, 0x8ee44e34, 0x8ee34e30, 0x210c0,
-0x24425038, 0x14830007, 0x2e22821, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8003d92,
-0xaca00000, 0x8ee24e34, 0x24030040, 0x24420001,
-0x50430003, 0x1021, 0x8ee24e34, 0x24420001,
-0x210c0, 0x24425038, 0x2e22821, 0x8ca20004,
-0x8f830128, 0x21140, 0x621821, 0xaf830128,
-0xaca00000, 0x8cc20018, 0x2443fffe, 0x2c620012,
-0x10400008, 0x31080, 0x3c010001, 0x220821,
-0x8c2258f0, 0x400008, 0x0, 0x24020001,
-0xaee24e24, 0x3e00008, 0x0, 0x27bdffc8,
-0xafbf0030, 0xafb5002c, 0xafb40028, 0xafb30024,
-0xafb20020, 0xafb1001c, 0xafb00018, 0x8f830128,
-0x8f820124, 0x106202b0, 0x9821, 0x3c11001f,
-0x3631ffff, 0x3c12fff5, 0x36521000, 0x24150012,
-0x24140040, 0x8f8c0128, 0x8f820128, 0x24420020,
-0xaf820128, 0x9182001b, 0x8f830128, 0x2443fffe,
-0x2c620012, 0x1040029c, 0x31080, 0x3c010001,
-0x220821, 0x8c225948, 0x400008, 0x0,
-0x8f420218, 0x30420100, 0x10400007, 0x0,
-0x95830016, 0x95820018, 0x621823, 0x31402,
-0x431021, 0xa5820016, 0x8d82001c, 0x3c038000,
-0x3044ffff, 0x436824, 0x3c030800, 0x431824,
-0x11a00004, 0xad84001c, 0x41140, 0x8003dd8,
-0x24425248, 0x41140, 0x24420e20, 0x2e25821,
-0x9562000e, 0x3042fffc, 0x10600004, 0xa562000e,
-0x95840016, 0x8003ec0, 0x0, 0x8d690018,
-0x4021, 0x952a0000, 0x25290002, 0x95270000,
-0x25290002, 0x95260000, 0x25290002, 0x95250000,
-0x25290002, 0x95240000, 0x25290002, 0x95230000,
-0x25290002, 0x95220000, 0x25290002, 0x1475021,
-0x1465021, 0x1455021, 0x1445021, 0x1435021,
-0x1425021, 0xa1c02, 0x3142ffff, 0x625021,
-0xa1c02, 0x3142ffff, 0x625021, 0x96e2046a,
-0x314effff, 0x30420002, 0x10400044, 0x5021,
-0x25220014, 0x222102b, 0x10400014, 0x1201821,
-0x2405000a, 0x2021, 0x223102b, 0x54400001,
-0x721821, 0x94620000, 0x24630002, 0x24a5ffff,
-0x14a0fff9, 0x822021, 0x41c02, 0x3082ffff,
-0x622021, 0x41402, 0x3083ffff, 0x431021,
-0x3042ffff, 0x8003e33, 0x1425021, 0x952a0000,
-0x25290002, 0x95280000, 0x25290002, 0x95270000,
-0x25290002, 0x95260000, 0x25290002, 0x95250000,
-0x25290002, 0x95230000, 0x25290002, 0x95220000,
-0x25290002, 0x95240000, 0x25290002, 0x1485021,
-0x1475021, 0x1465021, 0x1455021, 0x1435021,
-0x1425021, 0x95220000, 0x95230002, 0x1445021,
-0x1425021, 0x1435021, 0xa1c02, 0x3142ffff,
-0x625021, 0xa1c02, 0x3142ffff, 0x625021,
-0x3148ffff, 0x51000001, 0x3408ffff, 0x8d620018,
-0x9443000c, 0x24020800, 0x54620005, 0xa5680010,
-0x9562000e, 0x34420002, 0xa562000e, 0xa5680010,
-0x96e2046a, 0x2821, 0x30420008, 0x14400056,
-0x3021, 0x8d630018, 0x24620024, 0x222102b,
-0x10400034, 0x24690010, 0x229102b, 0x54400001,
-0x1324821, 0x95250000, 0x24690014, 0x229102b,
-0x10400002, 0x24a5ffec, 0x1324821, 0x95220000,
-0x30420fff, 0x14400003, 0x25290002, 0x8003e60,
-0x24130001, 0x9821, 0xa03021, 0x229102b,
-0x54400001, 0x1324821, 0x91220001, 0x25290002,
-0xa22821, 0x229102b, 0x54400001, 0x1324821,
-0x25290002, 0x229102b, 0x54400001, 0x1324821,
-0x95220000, 0x25290002, 0xa22821, 0x229102b,
-0x54400001, 0x1324821, 0x95220000, 0x25290002,
-0xa22821, 0x229102b, 0x54400001, 0x1324821,
-0x95220000, 0x25290002, 0xa22821, 0x229102b,
-0x54400001, 0x1324821, 0x95220000, 0x8003e99,
-0xa22821, 0x94650010, 0x94620014, 0x24690016,
-0x30420fff, 0x14400003, 0x24a5ffec, 0x8003e8c,
-0x24130001, 0x9821, 0xa03021, 0x91230001,
-0x25290004, 0x95220000, 0x25290002, 0x95240000,
-0x25290002, 0xa32821, 0xa22821, 0x95220000,
-0x95230002, 0xa42821, 0xa22821, 0xa32821,
-0x51c02, 0x30a2ffff, 0x622821, 0x51c02,
-0x30a2ffff, 0x622821, 0x96e2046a, 0x30420001,
-0x1040001e, 0x2021, 0x95820016, 0x4e2023,
-0x41402, 0x822021, 0x326200ff, 0x50400002,
-0x862021, 0x852021, 0x41402, 0x822021,
-0x3084ffff, 0x50800001, 0x3404ffff, 0x8d620018,
-0x24430017, 0x223102b, 0x54400001, 0x721821,
-0x90620000, 0x38430011, 0x2c630001, 0x38420006,
-0x2c420001, 0x621825, 0x10600004, 0x0,
-0x9562000e, 0x34420001, 0xa562000e, 0x9562000e,
-0x240a0002, 0x30420004, 0x10400002, 0xa5640012,
-0x240a0004, 0x8f880120, 0x27623800, 0x25090020,
-0x122102b, 0x50400001, 0x27693000, 0x8f820128,
-0x11220004, 0x0, 0x8f820124, 0x15220007,
-0x24040020, 0x8ee201a4, 0x8021, 0x24420001,
-0xaee201a4, 0x8003f4f, 0x8ee201a4, 0x8ee5724c,
-0x8ee60490, 0x8ee70494, 0xad0b0008, 0xa504000e,
-0xad0a0018, 0x52940, 0xa01821, 0x1021,
-0xe33821, 0xe3202b, 0xc23021, 0xc43021,
-0xad060000, 0xad070004, 0x8ee2724c, 0x4d1025,
-0xad02001c, 0x8ee204c4, 0xad020010, 0xaf890120,
-0x92e24e20, 0x14400060, 0x24100001, 0x2543ffee,
-0x2c630002, 0x39420011, 0x2c420001, 0x621825,
-0x10600024, 0x0, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x8c820000, 0x1455000f,
-0x0, 0x8ee34e30, 0x8ee24e34, 0x1062000b,
-0x0, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e34, 0x8ee34e30, 0x24420001, 0x1054002b,
-0x0, 0x8003f2e, 0x0, 0x8ee24e30,
-0x24420001, 0x50540003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x24020001, 0x8003f4e,
-0xac950000, 0x8ee24e30, 0x210c0, 0x24425038,
-0x2e22021, 0x8c830000, 0x24020007, 0x1462001f,
-0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
-0x0, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e34, 0x8ee34e30, 0x24420001, 0x10540007,
-0x0, 0x8ee24e34, 0x24420001, 0x10620005,
-0x0, 0x8003f3a, 0x0, 0x14600005,
-0x0, 0x8f820128, 0x24420020, 0xaf820128,
-0x8f820128, 0x8c820004, 0x2c420011, 0x50400012,
-0xac800000, 0x8003f4f, 0x0, 0x8ee24e30,
-0x24420001, 0x50540003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x24020007, 0xac820000,
-0x24020001, 0xac820004, 0x1600000d, 0x0,
-0x8f820120, 0x3c040001, 0x24845938, 0xafa00014,
-0xafa20010, 0x8d86001c, 0x8f870124, 0x3c050008,
-0xc002403, 0x34a50001, 0x8004057, 0x0,
-0x8ee2724c, 0x24420001, 0x304207ff, 0x11a00006,
-0xaee2724c, 0x8ee201d0, 0x2442ffff, 0xaee201d0,
-0x8003f6b, 0x8ee201d0, 0x8ee201cc, 0x2442ffff,
-0xaee201cc, 0x8ee201cc, 0x8ee201d8, 0x2442ffff,
-0xaee201d8, 0x8004057, 0x8ee201d8, 0x8f420240,
-0x104000e5, 0x0, 0x8ee20e1c, 0x24420001,
-0x8004057, 0xaee20e1c, 0x9582001e, 0xad82001c,
-0x8f420240, 0x10400072, 0x0, 0x8ee20e1c,
-0x24420001, 0xaee20e1c, 0x8f430240, 0x43102b,
-0x144000d5, 0x0, 0x8f830120, 0x27623800,
-0x24660020, 0xc2102b, 0x50400001, 0x27663000,
-0x8f820128, 0x10c20004, 0x0, 0x8f820124,
-0x14c20007, 0x0, 0x8ee201a4, 0x8021,
-0x24420001, 0xaee201a4, 0x8003fda, 0x8ee201a4,
-0x8ee2724c, 0xac62001c, 0x8ee404a8, 0x8ee504ac,
-0x2462001c, 0xac620008, 0x24020008, 0xa462000e,
-0x24020011, 0xac620018, 0xac640000, 0xac650004,
-0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
-0x14400034, 0x24100001, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x8c820000, 0x1455001f,
-0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
-0x0, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e34, 0x8ee34e30, 0x24420001, 0x10540007,
-0x0, 0x8ee24e34, 0x24420001, 0x10620005,
-0x0, 0x8003fc6, 0x0, 0x14600005,
-0x0, 0x8f820128, 0x24420020, 0xaf820128,
-0x8f820128, 0x8c820004, 0x2c420011, 0x50400011,
-0xac800000, 0x8003fda, 0x0, 0x8ee24e30,
-0x24420001, 0x50540003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x24020001, 0xac950000,
-0xac820004, 0x5600000b, 0x24100001, 0x8ee2724c,
-0x3c040001, 0x248458a8, 0xafa00014, 0xafa20010,
-0x8ee6724c, 0x8f470280, 0x3c050009, 0xc002403,
-0x34a5f008, 0x56000001, 0xaee00e1c, 0x8ee20188,
-0x24420001, 0xaee20188, 0x8004050, 0x8ee20188,
-0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
-0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
-0x0, 0x8f820124, 0x14c20007, 0x0,
-0x8ee201a4, 0x8021, 0x24420001, 0xaee201a4,
-0x8004044, 0x8ee201a4, 0x8ee2724c, 0xac62001c,
-0x8ee404a8, 0x8ee504ac, 0x2462001c, 0xac620008,
-0x24020008, 0xa462000e, 0x24020011, 0xac620018,
-0xac640000, 0xac650004, 0x8ee204c4, 0xac620010,
-0xaf860120, 0x92e24e20, 0x14400034, 0x24100001,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x8c820000, 0x1455001f, 0x0, 0x8ee34e30,
-0x8ee24e34, 0x1062001b, 0x0, 0x8c820004,
-0x24420001, 0xac820004, 0x8ee24e34, 0x8ee34e30,
-0x24420001, 0x10540007, 0x0, 0x8ee24e34,
-0x24420001, 0x10620005, 0x0, 0x8004030,
-0x0, 0x14600005, 0x0, 0x8f820128,
-0x24420020, 0xaf820128, 0x8f820128, 0x8c820004,
-0x2c420011, 0x50400011, 0xac800000, 0x8004044,
-0x0, 0x8ee24e30, 0x24420001, 0x50540003,
-0x1021, 0x8ee24e30, 0x24420001, 0xaee24e30,
-0x8ee24e30, 0x210c0, 0x24425038, 0x2e22021,
-0x24020001, 0xac950000, 0xac820004, 0x1600000b,
-0x0, 0x8ee2724c, 0x3c040001, 0x248458a8,
-0xafa00014, 0xafa20010, 0x8ee6724c, 0x8f470280,
-0x3c050009, 0xc002403, 0x34a5f008, 0x8ee20174,
-0x24420001, 0xaee20174, 0x8004057, 0x8ee20174,
-0x24020001, 0xaee24e24, 0x8f830128, 0x8f820124,
-0x1462fd58, 0x0, 0x8fbf0030, 0x8fb5002c,
-0x8fb40028, 0x8fb30024, 0x8fb20020, 0x8fb1001c,
-0x8fb00018, 0x3e00008, 0x27bd0038, 0x27bdffe8,
-0x27840208, 0x27450200, 0x24060008, 0xafbf0014,
-0xc00249a, 0xafb00010, 0x2021, 0x24100001,
-0x2402241f, 0xaf900210, 0xaf900200, 0xaf800204,
-0xaf820214, 0x8f460248, 0x24030004, 0x3c020040,
-0x3c010001, 0xac235cc4, 0x3c010001, 0xac235cc8,
-0x3c010001, 0xac205d9c, 0x3c010001, 0xac225cc0,
-0x3c010001, 0xac235cc8, 0xc005108, 0x24050004,
-0xc004822, 0x0, 0x8ee20000, 0x3c03feff,
-0x3463fffd, 0x431024, 0xaee20000, 0x3c023c00,
-0xaf82021c, 0x3c010001, 0x370821, 0xac3083ac,
-0x8fbf0014, 0x8fb00010, 0x3e00008, 0x27bd0018,
-0x27bdffe0, 0x3c050008, 0x34a50400, 0xafbf0018,
-0xafa00010, 0xafa00014, 0x8f860200, 0x3c040001,
-0x248459f0, 0xc002403, 0x3821, 0x8ee20280,
-0x24420001, 0xaee20280, 0x8ee20280, 0x8f830200,
-0x3c023f00, 0x621824, 0x8fbf0018, 0x3c020400,
-0x3e00008, 0x27bd0020, 0x27bdffd8, 0xafbf0020,
-0xafb1001c, 0xafb00018, 0x8f900220, 0x8ee20214,
-0x3821, 0x24420001, 0xaee20214, 0x8ee20214,
-0x3c020300, 0x2021024, 0x10400027, 0x3c110400,
-0xc00429b, 0x0, 0x3c020100, 0x2021024,
-0x10400007, 0x0, 0x8ee20218, 0x24420001,
-0xaee20218, 0x8ee20218, 0x80040c6, 0x3c03fdff,
-0x8ee2021c, 0x24420001, 0xaee2021c, 0x8ee2021c,
-0x3c03fdff, 0x3463ffff, 0x3c0808ff, 0x3508ffff,
-0x8ee20000, 0x3c040001, 0x248459fc, 0x3c050008,
-0x2003021, 0x431024, 0xaee20000, 0x8f820220,
-0x3821, 0x3c030300, 0x481024, 0x431025,
-0xaf820220, 0xafa00010, 0xc002403, 0xafa00014,
-0x8004296, 0x0, 0x2111024, 0x1040001f,
-0x3c024000, 0x8f830224, 0x24021402, 0x1462000b,
-0x3c03fdff, 0x3c040001, 0x24845a08, 0x3c050008,
-0xafa00010, 0xafa00014, 0x8f860224, 0x34a5ffff,
-0xc002403, 0x3821, 0x3c03fdff, 0x8ee20000,
-0x3463ffff, 0x2002021, 0x431024, 0xc004e54,
-0xaee20000, 0x8ee20220, 0x24420001, 0xaee20220,
-0x8ee20220, 0x8f820220, 0x3c0308ff, 0x3463ffff,
-0x431024, 0x8004295, 0x511025, 0x2021024,
-0x10400142, 0x0, 0x8ee2022c, 0x24420001,
-0xaee2022c, 0x8ee2022c, 0x8f820220, 0x3c0308ff,
-0x3463ffff, 0x431024, 0x34420004, 0xaf820220,
-0x8f830054, 0x8f820054, 0x800410e, 0x24630002,
-0x8f820054, 0x621023, 0x2c420003, 0x1440fffc,
-0x0, 0x8f8600e0, 0x8f8400e4, 0x30c20007,
-0x10400012, 0x0, 0x8f8300e4, 0x2402fff8,
-0xc21024, 0x1043000d, 0x0, 0x8f820054,
-0x8f8300e0, 0x14c30009, 0x24440050, 0x8f820054,
-0x821023, 0x2c420051, 0x10400004, 0x0,
-0x8f8200e0, 0x10c2fff9, 0x0, 0x8f820220,
-0x3c0308ff, 0x3463fffd, 0x431024, 0xaf820220,
-0x8f8600e0, 0x30c20007, 0x10400003, 0x2402fff8,
-0xc23024, 0xaf8600e0, 0x8f8300c4, 0x3c02001f,
-0x3442ffff, 0x24680008, 0x48102b, 0x10400003,
-0x3c02fff5, 0x34421000, 0x1024021, 0x8f8b00c8,
-0x8f850120, 0x8f840124, 0x8004145, 0x6021,
-0x27623800, 0x82102b, 0x50400001, 0x27643000,
-0x10a40010, 0x318200ff, 0x8c820018, 0x38430007,
-0x2c630001, 0x3842000b, 0x2c420001, 0x621825,
-0x5060fff3, 0x24840020, 0x8ee20240, 0x240c0001,
-0x24420001, 0xaee20240, 0x8ee20240, 0x8c8b0008,
-0x318200ff, 0x14400065, 0x0, 0x3c020001,
-0x571021, 0x904283c0, 0x14400060, 0x0,
-0x8f8400e4, 0xc41023, 0x218c3, 0x4620001,
-0x24630200, 0x8f8900c4, 0x10600005, 0x24020001,
-0x10620009, 0x0, 0x8004187, 0x0,
-0x8ee20230, 0x1205821, 0x24420001, 0xaee20230,
-0x80041bc, 0x8ee20230, 0x8ee20234, 0x3c05000a,
-0x24420001, 0xaee20234, 0x8c8b0000, 0x34a5f000,
-0x8ee20234, 0x12b1823, 0xa3102b, 0x54400001,
-0x651821, 0x2c62233f, 0x14400040, 0x0,
-0x8f8200e8, 0x24420008, 0xaf8200e8, 0x8f8200e8,
-0x8f8200e4, 0x1205821, 0x24420008, 0xaf8200e4,
-0x80041bc, 0x8f8200e4, 0x8ee20238, 0x3c03000a,
-0x24420001, 0xaee20238, 0x8c840000, 0x3463f000,
-0x8ee20238, 0x883823, 0x67102b, 0x54400001,
-0xe33821, 0x3c020003, 0x34420d40, 0x47102b,
-0x10400003, 0x0, 0x80041bc, 0x805821,
-0x8f8200e4, 0x24440008, 0xaf8400e4, 0x8f8400e4,
-0x10860018, 0x3c05000a, 0x34a5f000, 0x3c0a0003,
-0x354a0d40, 0x8ee2007c, 0x24420001, 0xaee2007c,
-0x8c830000, 0x8ee2007c, 0x683823, 0xa7102b,
-0x54400001, 0xe53821, 0x147102b, 0x54400007,
-0x605821, 0x8f8200e4, 0x24440008, 0xaf8400e4,
-0x8f8400e4, 0x1486ffef, 0x0, 0x14860005,
-0x0, 0x1205821, 0xaf8600e4, 0x80041bc,
-0xaf8600e8, 0xaf8400e4, 0xaf8400e8, 0x8f8200c8,
-0x3c03000a, 0x3463f000, 0x483823, 0x67102b,
-0x54400001, 0xe33821, 0x3c020003, 0x34420d3f,
-0x47102b, 0x54400007, 0x6021, 0x1683823,
-0x67102b, 0x54400003, 0xe33821, 0x80041cf,
-0x3c020003, 0x3c020003, 0x34420d3f, 0x47102b,
-0x14400016, 0x318200ff, 0x14400006, 0x0,
-0x3c020001, 0x571021, 0x904283c0, 0x1040000f,
-0x0, 0x8ee2023c, 0x3c04fdff, 0x8ee30000,
-0x3484ffff, 0x24420001, 0xaee2023c, 0x8ee2023c,
-0x24020001, 0x641824, 0x3c010001, 0x370821,
-0xa02283b8, 0x800422c, 0xaee30000, 0xaf8b00c8,
-0x8f8300c8, 0x8f8200c4, 0x3c04000a, 0x3484f000,
-0x623823, 0x87102b, 0x54400001, 0xe43821,
-0x3c020003, 0x34420d40, 0x47102b, 0x2ce30001,
-0x431025, 0x10400008, 0x0, 0x8f820220,
-0x3c0308ff, 0x3463ffff, 0x431024, 0x3c034000,
-0x431025, 0xaf820220, 0x8f8600e0, 0x8f8400e4,
-0x10c4002a, 0x0, 0x8ee2007c, 0x24420001,
-0xaee2007c, 0x8ee2007c, 0x24c2fff8, 0xaf8200e0,
-0x3c020001, 0x8c427e30, 0x3c030008, 0x8f8600e0,
-0x431024, 0x1040001d, 0x0, 0x10c4001b,
-0x240dfff8, 0x3c0a000a, 0x354af000, 0x3c0c0080,
-0x24850008, 0x27622800, 0x50a20001, 0x27651800,
-0x8c880004, 0x8c820000, 0x8ca90000, 0x3103ffff,
-0x431021, 0x4d1024, 0x24430010, 0x6b102b,
-0x54400001, 0x6a1821, 0x12b102b, 0x54400001,
-0x12a4821, 0x10690002, 0x10c1025, 0xac820004,
-0xa02021, 0x14c4ffeb, 0x24850008, 0x8f820220,
-0x3c0308ff, 0x3463ffff, 0x431024, 0x34420002,
-0xaf820220, 0x8f830054, 0x8f820054, 0x8004237,
-0x24630001, 0x8f820054, 0x621023, 0x2c420002,
-0x1440fffc, 0x0, 0x8f820220, 0x3c0308ff,
-0x3463fffb, 0x431024, 0xaf820220, 0x6010055,
-0x0, 0x8ee20228, 0x24420001, 0xaee20228,
-0x8ee20228, 0x8f820220, 0x3c0308ff, 0x3463ffff,
-0x431024, 0x34420004, 0xaf820220, 0x8f830054,
-0x8f820054, 0x8004251, 0x24630002, 0x8f820054,
-0x621023, 0x2c420003, 0x1440fffc, 0x0,
-0x8f8600e0, 0x30c20007, 0x10400012, 0x0,
-0x8f8300e4, 0x2402fff8, 0xc21024, 0x1043000d,
-0x0, 0x8f820054, 0x8f8300e0, 0x14c30009,
-0x24440032, 0x8f820054, 0x821023, 0x2c420033,
-0x10400004, 0x0, 0x8f8200e0, 0x10c2fff9,
-0x0, 0x8f820220, 0x3c0308ff, 0x3463fffd,
-0x431024, 0xaf820220, 0x8f8600e0, 0x30c20007,
-0x10400003, 0x2402fff8, 0xc23024, 0xaf8600e0,
-0x240301f5, 0x8f8200e8, 0x673823, 0x718c0,
-0x431021, 0xaf8200e8, 0x8f8200e8, 0xaf8200e4,
-0x8ee2007c, 0x3c0408ff, 0x3484ffff, 0x471021,
-0xaee2007c, 0x8f820220, 0x3c038000, 0x34630002,
-0x441024, 0x431025, 0xaf820220, 0x8f830054,
-0x8f820054, 0x800428d, 0x24630001, 0x8f820054,
-0x621023, 0x2c420002, 0x1440fffc, 0x0,
-0x8f820220, 0x3c0308ff, 0x3463fffb, 0x431024,
-0xaf820220, 0x8fbf0020, 0x8fb1001c, 0x8fb00018,
-0x3e00008, 0x27bd0028, 0x3c020001, 0x8c425cd8,
-0x27bdffd8, 0x10400012, 0xafbf0020, 0x3c040001,
-0x24845a14, 0x3c050008, 0x24020001, 0x3c010001,
-0x370821, 0xac2283ac, 0xafa00010, 0xafa00014,
-0x8f860220, 0x34a50498, 0x3c010001, 0xac205cd8,
-0x3c010001, 0xac225ccc, 0xc002403, 0x3821,
-0x8f420268, 0x3c037fff, 0x3463ffff, 0x431024,
-0xaf420268, 0x8ee204d0, 0x8ee404d4, 0x2403fffe,
-0x431024, 0x30840002, 0x1080011e, 0xaee204d0,
-0x8ee204d4, 0x2403fffd, 0x431024, 0xaee204d4,
-0x8f820044, 0x3c030600, 0x34632000, 0x34420020,
-0xaf820044, 0xafa30018, 0x8ee20608, 0x8f430228,
-0x24420001, 0x304a00ff, 0x514300fe, 0xafa00010,
-0x8ee20608, 0x210c0, 0x571021, 0x8fa30018,
-0x8fa4001c, 0xac43060c, 0xac440610, 0x8f830054,
-0x8f820054, 0x24690032, 0x1221023, 0x2c420033,
-0x1040006a, 0x5821, 0x24180008, 0x240f000d,
-0x240d0007, 0x240c0040, 0x240e0001, 0x8f870120,
-0x27623800, 0x24e80020, 0x102102b, 0x50400001,
-0x27683000, 0x8f820128, 0x11020004, 0x0,
-0x8f820124, 0x15020007, 0x1021, 0x8ee201a4,
-0x2821, 0x24420001, 0xaee201a4, 0x800433d,
-0x8ee201a4, 0x8ee40608, 0x420c0, 0x801821,
-0x8ee40430, 0x8ee50434, 0xa32821, 0xa3302b,
-0x822021, 0x862021, 0xace40000, 0xace50004,
-0x8ee20608, 0xa4f8000e, 0xacef0018, 0xacea001c,
-0x210c0, 0x2442060c, 0x2e21021, 0xace20008,
-0x8ee204c4, 0xace20010, 0xaf880120, 0x92e24e20,
-0x14400033, 0x24050001, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x8c820000, 0x144d001f,
-0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
-0x0, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e34, 0x8ee34e30, 0x24420001, 0x104c0007,
-0x0, 0x8ee24e34, 0x24420001, 0x10620005,
-0x0, 0x800432a, 0x0, 0x14600005,
-0x0, 0x8f820128, 0x24420020, 0xaf820128,
-0x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
-0xac800000, 0x800433d, 0x0, 0x8ee24e30,
-0x24420001, 0x504c0003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0xac8d0000, 0xac8e0004,
-0x54a00006, 0x240b0001, 0x8f820054, 0x1221023,
-0x2c420033, 0x1440ff9d, 0x0, 0x316300ff,
-0x24020001, 0x54620079, 0xafa00010, 0xaeea0608,
-0x8f830054, 0x8f820054, 0x24690032, 0x1221023,
-0x2c420033, 0x10400061, 0x5821, 0x240d0008,
-0x240c0011, 0x24080012, 0x24070040, 0x240a0001,
-0x8f830120, 0x27623800, 0x24660020, 0xc2102b,
-0x50400001, 0x27663000, 0x8f820128, 0x10c20004,
-0x0, 0x8f820124, 0x14c20007, 0x0,
-0x8ee201a4, 0x2821, 0x24420001, 0xaee201a4,
-0x80043a9, 0x8ee201a4, 0x8ee20608, 0xac62001c,
-0x8ee404a0, 0x8ee504a4, 0x2462001c, 0xac620008,
-0xa46d000e, 0xac6c0018, 0xac640000, 0xac650004,
-0x8ee204c4, 0xac620010, 0xaf860120, 0x92e24e20,
-0x14400033, 0x24050001, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0x8c820000, 0x1448001f,
-0x0, 0x8ee34e30, 0x8ee24e34, 0x1062001b,
-0x0, 0x8c820004, 0x24420001, 0xac820004,
-0x8ee24e34, 0x8ee34e30, 0x24420001, 0x10470007,
-0x0, 0x8ee24e34, 0x24420001, 0x10620005,
-0x0, 0x8004396, 0x0, 0x14600005,
-0x0, 0x8f820128, 0x24420020, 0xaf820128,
-0x8f820128, 0x8c820004, 0x2c420011, 0x50400010,
-0xac800000, 0x80043a9, 0x0, 0x8ee24e30,
-0x24420001, 0x50470003, 0x1021, 0x8ee24e30,
-0x24420001, 0xaee24e30, 0x8ee24e30, 0x210c0,
-0x24425038, 0x2e22021, 0xac880000, 0xac8a0004,
-0x54a00006, 0x240b0001, 0x8f820054, 0x1221023,
-0x2c420033, 0x1440ffa6, 0x0, 0x316300ff,
-0x24020001, 0x54620003, 0xafa00010, 0x80043d6,
-0x0, 0x3c040001, 0x24845a20, 0xafa00014,
-0x8f860120, 0x8f870124, 0x3c050009, 0xc002403,
-0x34a5f011, 0x80043d6, 0x0, 0x3c040001,
-0x24845a2c, 0xafa00014, 0x8f860120, 0x8f870124,
-0x3c050009, 0xc002403, 0x34a5f010, 0x80043d6,
-0x0, 0x3c040001, 0x24845a38, 0xafa00014,
-0x8ee60608, 0x8f470228, 0x3c050009, 0xc002403,
-0x34a5f00f, 0x8ee201ac, 0x24420001, 0xaee201ac,
-0x8ee201ac, 0x8ee2015c, 0x24420001, 0xaee2015c,
-0x8ee2015c, 0x8fbf0020, 0x3e00008, 0x27bd0028,
-0x3c020001, 0x8c425cd8, 0x27bdffe0, 0x1440000d,
-0xafbf0018, 0x3c040001, 0x24845a44, 0x3c050008,
-0xafa00010, 0xafa00014, 0x8f860220, 0x34a50499,
-0x24020001, 0x3c010001, 0xac225cd8, 0xc002403,
-0x3821, 0x8ee204d0, 0x3c030001, 0x771821,
-0x946383b2, 0x34420001, 0x10600007, 0xaee204d0,
-0x8f820220, 0x3c0308ff, 0x3463ffff, 0x431024,
-0x34420008, 0xaf820220, 0x2021, 0xc0052a2,
-0x24050004, 0xaf420268, 0x8fbf0018, 0x3e00008,
-0x27bd0020, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x3c120001,
-0x26521200, 0x3c140001, 0x8e945c50, 0x3c100001,
-0x26101120, 0x3c15c000, 0x36b50060, 0x8e8a0000,
-0x8eb30000, 0x26a400b, 0x248000a, 0x200f821,
-0x0, 0xd, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x80014d6,
-0x0, 0x80014d8, 0x3c0a0001, 0x80014d8,
-0x3c0a0002, 0x80014d8, 0x0, 0x80024a6,
-0x0, 0x80014d8, 0x3c0a0003, 0x80014d8,
-0x3c0a0004, 0x8002f8c, 0x0, 0x80014d8,
-0x3c0a0005, 0x8003ce8, 0x0, 0x8003c66,
-0x0, 0x80014d8, 0x3c0a0006, 0x80014d8,
-0x3c0a0007, 0x80014d8, 0x0, 0x80014d8,
-0x0, 0x80014d8, 0x0, 0x8002a75,
-0x0, 0x80014d8, 0x3c0a000b, 0x80014d8,
-0x3c0a000c, 0x80014d8, 0x3c0a000d, 0x800237a,
-0x0, 0x8002339, 0x0, 0x80014d8,
-0x3c0a000e, 0x8001b3c, 0x0, 0x80024a4,
-0x0, 0x80014d8, 0x3c0a000f, 0x80040a7,
-0x0, 0x8004091, 0x0, 0x80014d8,
-0x3c0a0010, 0x80014ee, 0x0, 0x80014d8,
-0x3c0a0011, 0x80014d8, 0x3c0a0012, 0x80014d8,
-0x3c0a0013, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x3c030001,
-0x34633800, 0x24050080, 0x2404001f, 0x2406ffff,
-0x24020001, 0xaf80021c, 0xaf820200, 0xaf820220,
-0x3631021, 0xaf8200c0, 0x3631021, 0xaf8200c4,
-0x3631021, 0xaf8200c8, 0x27623800, 0xaf8200d0,
-0x27623800, 0xaf8200d4, 0x27623800, 0xaf8200d8,
-0x27621800, 0xaf8200e0, 0x27621800, 0xaf8200e4,
-0x27621800, 0xaf8200e8, 0x27621000, 0xaf8200f0,
-0x27621000, 0xaf8200f4, 0x27621000, 0xaf8200f8,
-0xaca00000, 0x2484ffff, 0x1486fffd, 0x24a50004,
-0x8f830040, 0x3c02f000, 0x621824, 0x3c025000,
-0x1062000c, 0x43102b, 0x14400006, 0x3c026000,
-0x3c024000, 0x10620008, 0x24020800, 0x8004539,
-0x0, 0x10620004, 0x24020800, 0x8004539,
-0x0, 0x24020700, 0x3c010001, 0xac225cdc,
-0x3e00008, 0x0, 0x27bdffd8, 0xafbf0024,
-0xafb00020, 0x8f830054, 0x8f820054, 0x3c010001,
-0xac205cc4, 0x8004545, 0x24630064, 0x8f820054,
-0x621023, 0x2c420065, 0x1440fffc, 0x0,
-0xc004d71, 0x0, 0x24040001, 0x2821,
-0x27a60018, 0x34028000, 0xc00498e, 0xa7a20018,
-0x8f830054, 0x8f820054, 0x8004556, 0x24630064,
-0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
-0x24040001, 0x24050001, 0xc00494c, 0x27a60018,
-0x8f830054, 0x8f820054, 0x8004562, 0x24630064,
-0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
-0x24040001, 0x24050001, 0xc00494c, 0x27a60018,
-0x8f830054, 0x8f820054, 0x800456e, 0x24630064,
-0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
-0x24040001, 0x3c060001, 0x24c65da0, 0xc00494c,
-0x24050002, 0x8f830054, 0x8f820054, 0x800457b,
-0x24630064, 0x8f820054, 0x621023, 0x2c420065,
-0x1440fffc, 0x24040001, 0x24050003, 0x3c100001,
-0x26105da2, 0xc00494c, 0x2003021, 0x97a60018,
-0x3c070001, 0x94e75da0, 0x3c040001, 0x24845ab0,
-0xafa00014, 0x96020000, 0x3c05000d, 0x34a50100,
-0xc002403, 0xafa20010, 0x97a20018, 0x1040004c,
-0x24036040, 0x96020000, 0x3042fff0, 0x1443000a,
-0x24020020, 0x3c030001, 0x94635da0, 0x54620009,
-0x24027830, 0x24020003, 0x3c010001, 0xac225cc4,
-0x80045ac, 0x24020005, 0x3c030001, 0x94635da0,
-0x24027830, 0x1462000f, 0x24030010, 0x3c020001,
-0x94425da2, 0x3042fff0, 0x1443000a, 0x24020003,
-0x3c010001, 0xac225cc4, 0x24020006, 0x3c010001,
-0xac225db0, 0x3c010001, 0xac225dbc, 0x80045e6,
-0x3c09fff0, 0x3c020001, 0x8c425cc4, 0x3c030001,
-0x94635da0, 0x34420001, 0x3c010001, 0xac225cc4,
-0x24020015, 0x1462000f, 0x0, 0x3c020001,
-0x94425da2, 0x3042fff0, 0x3843f420, 0x2c630001,
-0x3842f430, 0x2c420001, 0x621825, 0x10600005,
-0x24020003, 0x3c010001, 0xac225dbc, 0x80045e6,
-0x3c09fff0, 0x3c030001, 0x94635da0, 0x24027810,
-0x1462000b, 0x24020002, 0x3c020001, 0x94425da2,
-0x3042fff0, 0x14400006, 0x24020002, 0x24020004,
-0x3c010001, 0xac225dbc, 0x80045e6, 0x3c09fff0,
-0x3c010001, 0xac225dbc, 0x80045e6, 0x3c09fff0,
-0x3c020001, 0x8c425cc4, 0x24030001, 0x3c010001,
-0xac235dbc, 0x34420004, 0x3c010001, 0xac225cc4,
-0x3c09fff0, 0x3529bdc0, 0x3c060001, 0x8cc65cc4,
-0x3c040001, 0x24845ab0, 0x24020001, 0x3c010001,
-0xac225ccc, 0x8f820054, 0x3c070001, 0x8ce75dbc,
-0x3c030001, 0x94635da0, 0x3c080001, 0x95085da2,
-0x3c05000d, 0x34a50100, 0x3c010001, 0xac205cc8,
-0x491021, 0x3c010001, 0xac225dac, 0xafa30010,
-0xc002403, 0xafa80014, 0x8fbf0024, 0x8fb00020,
-0x3e00008, 0x27bd0028, 0x27bdffe8, 0x3c050001,
-0x8ca55cc8, 0x24060004, 0x24020001, 0x14a20014,
-0xafbf0010, 0x3c020001, 0x8c427e3c, 0x30428000,
-0x10400005, 0x3c04000f, 0x3c030001, 0x8c635dbc,
-0x8004617, 0x34844240, 0x3c040004, 0x3c030001,
-0x8c635dbc, 0x348493e0, 0x24020005, 0x14620016,
-0x0, 0x3c04003d, 0x800462f, 0x34840900,
-0x3c020001, 0x8c427e38, 0x30428000, 0x10400005,
-0x3c04001e, 0x3c030001, 0x8c635dbc, 0x800462a,
-0x34848480, 0x3c04000f, 0x3c030001, 0x8c635dbc,
-0x34844240, 0x24020005, 0x14620003, 0x0,
-0x3c04007a, 0x34841200, 0x3c020001, 0x8c425dac,
-0x8f830054, 0x441021, 0x431023, 0x44102b,
-0x14400037, 0x0, 0x3c020001, 0x8c425cd0,
-0x14400033, 0x0, 0x3c010001, 0x10c00025,
-0xac205ce0, 0x3c090001, 0x8d295cc4, 0x24070001,
-0x3c044000, 0x3c080001, 0x25087e3c, 0x250afffc,
-0x52842, 0x14a00002, 0x24c6ffff, 0x24050008,
-0xa91024, 0x10400010, 0x0, 0x14a70008,
-0x0, 0x8d020000, 0x441024, 0x1040000a,
-0x0, 0x3c010001, 0x800465b, 0xac255ce0,
-0x8d420000, 0x441024, 0x10400003, 0x0,
-0x3c010001, 0xac275ce0, 0x3c020001, 0x8c425ce0,
-0x6182b, 0x2c420001, 0x431024, 0x5440ffe5,
-0x52842, 0x8f820054, 0x3c030001, 0x8c635ce0,
-0x3c010001, 0xac225dac, 0x1060002a, 0x24020001,
-0x3c010001, 0xac255cc8, 0x3c010001, 0xac225ccc,
-0x3c020001, 0x8c425ce0, 0x10400022, 0x0,
-0x3c020001, 0x8c425ccc, 0x1040000a, 0x24020001,
-0x3c010001, 0xac205ccc, 0x3c010001, 0x370821,
-0xac2283ac, 0x3c010001, 0xac205d4c, 0x3c010001,
-0xac225d04, 0x3c030001, 0x771821, 0x8c6383ac,
-0x24020008, 0x10620005, 0x24020001, 0xc004695,
-0x0, 0x8004692, 0x0, 0x3c030001,
-0x8c635cc8, 0x10620007, 0x2402000e, 0x3c030001,
-0x8c637dd0, 0x10620003, 0x0, 0xc004e54,
-0x8f840220, 0x8fbf0010, 0x3e00008, 0x27bd0018,
-0x27bdffe0, 0x3c02fdff, 0xafbf0018, 0x8ee30000,
-0x3c050001, 0x8ca55cc8, 0x3c040001, 0x8c845cf0,
-0x3442ffff, 0x621824, 0x14a40008, 0xaee30000,
-0x3c030001, 0x771821, 0x8c6383ac, 0x3c020001,
-0x8c425cf4, 0x10620008, 0x0, 0x3c020001,
-0x571021, 0x8c4283ac, 0x3c010001, 0xac255cf0,
-0x3c010001, 0xac225cf4, 0x3c030001, 0x8c635cc8,
-0x24020002, 0x10620169, 0x2c620003, 0x10400005,
-0x24020001, 0x10620008, 0x0, 0x800481c,
-0x0, 0x24020004, 0x106200b1, 0x24020001,
-0x800481d, 0x0, 0x3c020001, 0x571021,
-0x8c4283ac, 0x2443ffff, 0x2c620008, 0x1040015a,
-0x31080, 0x3c010001, 0x220821, 0x8c225ac8,
-0x400008, 0x0, 0x3c030001, 0x8c635dbc,
-0x24020005, 0x14620014, 0x0, 0x3c020001,
-0x8c425cd4, 0x1040000a, 0x24020003, 0xc004822,
-0x0, 0x24020002, 0x3c010001, 0x370821,
-0xac2283ac, 0x3c010001, 0x80046e0, 0xac205cd4,
-0x3c010001, 0x370821, 0xac2283ac, 0x3c010001,
-0x800481f, 0xac205c60, 0xc004822, 0x0,
-0x3c020001, 0x8c425cd4, 0x3c010001, 0xac205c60,
-0x104000dd, 0x24020002, 0x3c010001, 0x370821,
-0xac2283ac, 0x3c010001, 0x800481f, 0xac205cd4,
-0x3c030001, 0x8c635dbc, 0x24020005, 0x14620003,
-0x24020001, 0x3c010001, 0xac225d00, 0xc0049cf,
-0x0, 0x3c030001, 0x8c635d00, 0x800478e,
-0x24020011, 0x3c050001, 0x8ca55cc8, 0x3c060001,
-0x8cc67e3c, 0xc005108, 0x2021, 0x24020005,
-0x3c010001, 0xac205cd4, 0x3c010001, 0x370821,
-0x800481f, 0xac2283ac, 0x3c040001, 0x24845abc,
-0x3c05000f, 0x34a50100, 0x3021, 0x3821,
-0xafa00010, 0xc002403, 0xafa00014, 0x800481f,
-0x0, 0x8f820220, 0x3c03f700, 0x431025,
-0x80047b7, 0xaf820220, 0x8f820220, 0x3c030004,
-0x431024, 0x144000a9, 0x24020007, 0x8f830054,
-0x3c020001, 0x8c425da4, 0x2463d8f0, 0x431023,
-0x2c422710, 0x144000f8, 0x24020001, 0x800481d,
-0x0, 0x3c050001, 0x8ca55cc8, 0xc0052a2,
-0x2021, 0xc005386, 0x2021, 0x3c030001,
-0x8c637e34, 0x46100ea, 0x24020001, 0x3c020008,
-0x621024, 0x10400006, 0x0, 0x8f820214,
-0x3c03ffff, 0x431024, 0x8004741, 0x3442251f,
-0x8f820214, 0x3c03ffff, 0x431024, 0x3442241f,
-0xaf820214, 0x8ee20000, 0x3c030200, 0x431025,
-0xaee20000, 0x8f820220, 0x2403fffb, 0x431024,
-0xaf820220, 0x8f820220, 0x34420002, 0xaf820220,
-0x24020008, 0x3c010001, 0x370821, 0xac2283ac,
-0x8f820220, 0x3c030004, 0x431024, 0x14400005,
-0x0, 0x8f820220, 0x3c03f700, 0x431025,
-0xaf820220, 0x3c030001, 0x8c635dbc, 0x24020005,
-0x1462000a, 0x0, 0x3c020001, 0x94425da2,
-0x24429fbc, 0x2c420004, 0x10400004, 0x24040018,
-0x24050002, 0xc004d93, 0x24060020, 0xc0043dd,
-0x0, 0x3c010001, 0x800481f, 0xac205d50,
-0x3c020001, 0x571021, 0x8c4283ac, 0x2443ffff,
-0x2c620008, 0x104000ac, 0x31080, 0x3c010001,
-0x220821, 0x8c225ae8, 0x400008, 0x0,
-0xc00429b, 0x0, 0x3c010001, 0xac205ccc,
-0xaf800204, 0x3c010001, 0xc004822, 0xac207e20,
-0x24020001, 0x3c010001, 0xac225ce4, 0x24020002,
-0x3c010001, 0x370821, 0x800481f, 0xac2283ac,
-0xc00489f, 0x0, 0x3c030001, 0x8c635ce4,
-0x24020009, 0x14620090, 0x24020003, 0x3c010001,
-0x370821, 0x800481f, 0xac2283ac, 0x3c020001,
-0x8c427e38, 0x30424000, 0x10400005, 0x0,
-0x8f820044, 0x3c03ffff, 0x800479f, 0x34637fff,
-0x8f820044, 0x2403ff7f, 0x431024, 0xaf820044,
-0x8f830054, 0x80047b9, 0x24020004, 0x8f830054,
-0x3c020001, 0x8c425da4, 0x2463d8f0, 0x431023,
-0x2c422710, 0x14400074, 0x24020005, 0x3c010001,
-0x370821, 0x800481f, 0xac2283ac, 0x8f820220,
-0x3c03f700, 0x431025, 0xaf820220, 0xaf800204,
-0x3c010001, 0xac207e20, 0x8f830054, 0x24020006,
-0x3c010001, 0x370821, 0xac2283ac, 0x3c010001,
-0x800481f, 0xac235da4, 0x8f830054, 0x3c020001,
-0x8c425da4, 0x2463fff6, 0x431023, 0x2c42000a,
-0x14400059, 0x0, 0x24020007, 0x3c010001,
-0x370821, 0x800481f, 0xac2283ac, 0x8f820220,
-0x3c04f700, 0x441025, 0xaf820220, 0x8f820220,
-0x3c030300, 0x431024, 0x14400005, 0x1821,
-0x8f820220, 0x24030001, 0x441025, 0xaf820220,
-0x10600043, 0x24020001, 0x8f820214, 0x3c03ffff,
-0x3c040001, 0x8c845d98, 0x431024, 0x3442251f,
-0xaf820214, 0x24020008, 0x3c010001, 0x370821,
-0x1080000b, 0xac2283ac, 0x3c020001, 0x8c425d74,
-0x14400007, 0x24020001, 0x3c010001, 0xac227dd0,
-0xc004e54, 0x8f840220, 0x800480c, 0x0,
-0x8f820220, 0x3c030008, 0x431024, 0x14400017,
-0x2402000e, 0x3c010001, 0xac227dd0, 0x8ee20000,
-0x2021, 0x3c030200, 0x431025, 0xc005386,
-0xaee20000, 0x8f820220, 0x2403fffb, 0x431024,
-0xaf820220, 0x8f820220, 0x34420002, 0xc0043dd,
-0xaf820220, 0x3c050001, 0x8ca55cc8, 0xc0052a2,
-0x2021, 0x800481f, 0x0, 0x3c020001,
-0x8c425d74, 0x10400010, 0x0, 0x3c020001,
-0x8c425d70, 0x2442ffff, 0x3c010001, 0xac225d70,
-0x14400009, 0x24020002, 0x3c010001, 0xac205d74,
-0x3c010001, 0x800481f, 0xac225d70, 0x24020001,
-0x3c010001, 0xac225ccc, 0x8fbf0018, 0x3e00008,
-0x27bd0020, 0x8f820200, 0x8f820220, 0x8f820220,
-0x34420004, 0xaf820220, 0x8f820200, 0x3c060001,
-0x8cc65cc8, 0x34420004, 0xaf820200, 0x24020002,
-0x10c2003a, 0x2cc20003, 0x10400005, 0x24020001,
-0x10c20008, 0x0, 0x8004868, 0x0,
-0x24020004, 0x10c20013, 0x24020001, 0x8004868,
-0x0, 0x3c030001, 0x8c635cb8, 0x3c020001,
-0x8c425cc0, 0x3c040001, 0x8c845cdc, 0x3c050001,
-0x8ca55cbc, 0xaf860200, 0xaf860220, 0x34630022,
-0x441025, 0x451025, 0x34420002, 0x8004867,
-0xaf830200, 0x3c030001, 0x8c635d98, 0xaf820200,
-0x10600009, 0xaf820220, 0x3c020001, 0x8c425d74,
-0x14400005, 0x3c033f00, 0x3c020001, 0x8c425cb0,
-0x800485b, 0x346300e0, 0x3c020001, 0x8c425cb0,
-0x3c033f00, 0x346300e2, 0x431025, 0xaf820200,
-0x3c030001, 0x8c635cb4, 0x3c04f700, 0x3c020001,
-0x8c425cc0, 0x3c050001, 0x8ca55cdc, 0x641825,
-0x431025, 0x451025, 0xaf820220, 0x3e00008,
-0x0, 0x8f820220, 0x3c030001, 0x8c635cc8,
-0x34420004, 0xaf820220, 0x24020001, 0x1062000f,
-0x0, 0x8f830054, 0x8f820054, 0x24630002,
-0x621023, 0x2c420003, 0x10400011, 0x0,
-0x8f820054, 0x621023, 0x2c420003, 0x1040000c,
-0x0, 0x8004879, 0x0, 0x8f830054,
-0x8f820054, 0x8004885, 0x24630007, 0x8f820054,
-0x621023, 0x2c420008, 0x1440fffc, 0x0,
-0x8f8400e0, 0x30820007, 0x1040000d, 0x0,
-0x8f820054, 0x8f8300e0, 0x14830009, 0x24450032,
-0x8f820054, 0xa21023, 0x2c420033, 0x10400004,
-0x0, 0x8f8200e0, 0x1082fff9, 0x0,
-0x8f820220, 0x2403fffd, 0x431024, 0xaf820220,
-0x3e00008, 0x0, 0x3c030001, 0x8c635ce4,
-0x3c020001, 0x8c425ce8, 0x50620004, 0x2463ffff,
-0x3c010001, 0xac235ce8, 0x2463ffff, 0x2c620009,
-0x1040009d, 0x31080, 0x3c010001, 0x220821,
-0x8c225b08, 0x400008, 0x0, 0x8f820044,
-0x34428080, 0xaf820044, 0x8f830054, 0x8004938,
-0x24020002, 0x8f830054, 0x3c020001, 0x8c425da8,
-0x2463d8f0, 0x431023, 0x2c422710, 0x1440008a,
-0x24020003, 0x8004945, 0x0, 0x8f820044,
-0x3c03ffff, 0x34637fff, 0x431024, 0xaf820044,
-0x8f830054, 0x8004938, 0x24020004, 0x8f830054,
-0x3c020001, 0x8c425da8, 0x2463fff6, 0x431023,
-0x2c42000a, 0x14400078, 0x24020005, 0x8004945,
-0x0, 0x8f820220, 0x3c03f700, 0x431025,
-0xaf820220, 0x8f820220, 0x2403fffb, 0x431024,
-0xaf820220, 0x8f820220, 0x34420002, 0xaf820220,
-0x3c023f00, 0x344200e0, 0xaf820200, 0x8f820200,
-0x2403fffd, 0x431024, 0xaf820200, 0x24040001,
-0x3405ffff, 0xaf840204, 0x8f830054, 0x8f820054,
-0x80048ec, 0x24630001, 0x8f820054, 0x621023,
-0x2c420002, 0x1440fffc, 0x0, 0x8f820224,
-0x42040, 0xa4102b, 0x1040fff2, 0x0,
-0x8f820220, 0x3c03f700, 0x431025, 0xaf820220,
-0x8f820214, 0x3c03ffff, 0x431024, 0x3442251f,
-0xaf820214, 0x8f820220, 0x2403fffb, 0x431024,
-0xaf820220, 0x8f820220, 0x3c04f700, 0x34840008,
-0x34420002, 0xaf820220, 0x8f820220, 0x3c033f00,
-0x346300e2, 0x441025, 0xaf820220, 0xaf830200,
-0x8f8400f0, 0x276217f8, 0x14820002, 0x24850008,
-0x27651000, 0x8f8200f4, 0x10a20007, 0x3c038000,
-0x34630040, 0x3c020001, 0x24425c70, 0xac820000,
-0xac830004, 0xaf8500f0, 0x8f830054, 0x8004938,
-0x24020006, 0x8f830054, 0x3c020001, 0x8c425da8,
-0x2463fff6, 0x431023, 0x2c42000a, 0x14400022,
-0x24020007, 0x8004945, 0x0, 0x8f8200e0,
-0xaf8200e4, 0x8f8200e0, 0xaf8200e8, 0x8f820220,
-0x34420004, 0xaf820220, 0x8f820220, 0x2403fff7,
-0x431024, 0xaf820220, 0x8f820044, 0x34428080,
-0xaf820044, 0x8f830054, 0x24020008, 0x3c010001,
-0xac225ce4, 0x3c010001, 0x8004947, 0xac235da8,
-0x8f830054, 0x3c020001, 0x8c425da8, 0x2463d8f0,
-0x431023, 0x2c422710, 0x14400003, 0x24020009,
-0x3c010001, 0xac225ce4, 0x3e00008, 0x0,
-0x0, 0x0, 0x0, 0x27bdffd8,
-0xafb20018, 0x809021, 0xafb3001c, 0xa09821,
-0xafb10014, 0xc08821, 0xafb00010, 0x8021,
-0xafbf0020, 0xa6200000, 0xc004d4b, 0x24040001,
-0x26100001, 0x2e020020, 0x1440fffb, 0x0,
-0xc004d4b, 0x2021, 0xc004d4b, 0x24040001,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0x24100010, 0x2501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d4b, 0x108042, 0x1600fffa,
-0x2501024, 0x24100010, 0x2701024, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fffa, 0x2701024, 0xc004d71, 0x34108000,
-0xc004d71, 0x0, 0xc004d2b, 0x0,
-0x50400005, 0x108042, 0x96220000, 0x501025,
-0xa6220000, 0x108042, 0x1600fff7, 0x0,
-0xc004d71, 0x0, 0x8fbf0020, 0x8fb3001c,
-0x8fb20018, 0x8fb10014, 0x8fb00010, 0x3e00008,
-0x27bd0028, 0x27bdffd8, 0xafb10014, 0x808821,
-0xafb20018, 0xa09021, 0xafb3001c, 0xc09821,
-0xafb00010, 0x8021, 0xafbf0020, 0xc004d4b,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0x24100010, 0x2301024, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fffa, 0x2301024, 0x24100010, 0x2501024,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x2501024, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0x34108000,
-0x96620000, 0x501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
-0x0, 0xc004d71, 0x0, 0x8fbf0020,
-0x8fb3001c, 0x8fb20018, 0x8fb10014, 0x8fb00010,
-0x3e00008, 0x27bd0028, 0x3c030001, 0x8c635d00,
-0x3c020001, 0x8c425d48, 0x27bdffd8, 0xafbf0020,
-0xafb1001c, 0x10620003, 0xafb00018, 0x3c010001,
-0xac235d48, 0x2463ffff, 0x2c620013, 0x10400349,
-0x31080, 0x3c010001, 0x220821, 0x8c225b30,
-0x400008, 0x0, 0xc004d71, 0x8021,
-0x34028000, 0xa7a20010, 0x27b10010, 0xc004d4b,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0xc004d4b,
-0x2021, 0x108042, 0x1600fffc, 0x0,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fff8, 0x0, 0xc004d71, 0x0,
-0x8004d24, 0x24020002, 0x27b10010, 0xa7a00010,
-0x8021, 0xc004d4b, 0x24040001, 0x26100001,
-0x2e020020, 0x1440fffb, 0x0, 0xc004d4b,
-0x2021, 0xc004d4b, 0x24040001, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0x24100010,
-0x32020001, 0x10400002, 0x2021, 0x24040001,
-0xc004d4b, 0x108042, 0x1600fffa, 0x32020001,
-0x24100010, 0xc004d4b, 0x2021, 0x108042,
-0x1600fffc, 0x0, 0xc004d71, 0x34108000,
-0xc004d71, 0x0, 0xc004d2b, 0x0,
-0x50400005, 0x108042, 0x96220000, 0x501025,
-0xa6220000, 0x108042, 0x1600fff7, 0x0,
-0xc004d71, 0x0, 0x97a20010, 0x30428000,
-0x144002dc, 0x24020003, 0x8004d24, 0x0,
-0x24021200, 0xa7a20010, 0x27b10010, 0x8021,
-0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0xc004d4b, 0x2021, 0x108042, 0x1600fffc,
-0x0, 0xc004d4b, 0x24040001, 0xc004d4b,
-0x2021, 0x34108000, 0x96220000, 0x501024,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fff8, 0x0, 0xc004d71,
-0x0, 0x8f830054, 0x8004d16, 0x24020004,
-0x8f830054, 0x3c020001, 0x8c425db8, 0x2463ff9c,
-0x431023, 0x2c420064, 0x1440029e, 0x24020002,
-0x3c030001, 0x8c635dbc, 0x10620297, 0x2c620003,
-0x14400296, 0x24020011, 0x24020003, 0x10620005,
-0x24020004, 0x10620291, 0x2402000f, 0x8004d24,
-0x24020011, 0x8004d24, 0x24020005, 0x24020014,
-0xa7a20010, 0x27b10010, 0x8021, 0xc004d4b,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0x32020012,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020012, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0x34108000,
-0x96220000, 0x501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
-0x0, 0xc004d71, 0x0, 0x8f830054,
-0x8004d16, 0x24020006, 0x8f830054, 0x3c020001,
-0x8c425db8, 0x2463ff9c, 0x431023, 0x2c420064,
-0x14400250, 0x24020007, 0x8004d24, 0x0,
-0x24020006, 0xa7a20010, 0x27b10010, 0x8021,
-0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020013, 0x10400002, 0x2021, 0x24040001,
-0xc004d4b, 0x108042, 0x1600fffa, 0x32020013,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fff8, 0x0, 0xc004d71, 0x0,
-0x8f830054, 0x8004d16, 0x24020008, 0x8f830054,
-0x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
-0x2c420064, 0x1440020f, 0x24020009, 0x8004d24,
-0x0, 0x27b10010, 0xa7a00010, 0x8021,
-0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
-0xc004d4b, 0x2021, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020018, 0x10400002, 0x2021, 0x24040001,
-0xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
-0xc004d71, 0x34108000, 0xc004d71, 0x0,
-0xc004d2b, 0x0, 0x50400005, 0x108042,
-0x96220000, 0x501025, 0xa6220000, 0x108042,
-0x1600fff7, 0x0, 0xc004d71, 0x8021,
-0x97a20010, 0x27b10010, 0x34420001, 0xa7a20010,
-0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020018, 0x10400002, 0x2021, 0x24040001,
-0xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fff8, 0x0, 0xc004d71, 0x0,
-0x8f830054, 0x8004d16, 0x2402000a, 0x8f830054,
-0x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
-0x2c420064, 0x1440019b, 0x2402000b, 0x8004d24,
-0x0, 0x27b10010, 0xa7a00010, 0x8021,
-0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
-0xc004d4b, 0x2021, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020017, 0x10400002, 0x2021, 0x24040001,
-0xc004d4b, 0x108042, 0x1600fffa, 0x32020017,
-0xc004d71, 0x34108000, 0xc004d71, 0x0,
-0xc004d2b, 0x0, 0x50400005, 0x108042,
-0x96220000, 0x501025, 0xa6220000, 0x108042,
-0x1600fff7, 0x0, 0xc004d71, 0x8021,
-0x97a20010, 0x27b10010, 0x34420700, 0xa7a20010,
-0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020017, 0x10400002, 0x2021, 0x24040001,
-0xc004d4b, 0x108042, 0x1600fffa, 0x32020017,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fff8, 0x0, 0xc004d71, 0x0,
-0x8f830054, 0x8004d16, 0x2402000c, 0x8f830054,
-0x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
-0x2c420064, 0x14400127, 0x24020012, 0x8004d24,
-0x0, 0x27b10010, 0xa7a00010, 0x8021,
-0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
-0xc004d4b, 0x2021, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020014, 0x10400002, 0x2021, 0x24040001,
-0xc004d4b, 0x108042, 0x1600fffa, 0x32020014,
-0xc004d71, 0x34108000, 0xc004d71, 0x0,
-0xc004d2b, 0x0, 0x50400005, 0x108042,
-0x96220000, 0x501025, 0xa6220000, 0x108042,
-0x1600fff7, 0x0, 0xc004d71, 0x8021,
-0x97a20010, 0x27b10010, 0x34420010, 0xa7a20010,
-0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020014, 0x10400002, 0x2021, 0x24040001,
-0xc004d4b, 0x108042, 0x1600fffa, 0x32020014,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fff8, 0x0, 0xc004d71, 0x0,
-0x8f830054, 0x8004d16, 0x24020013, 0x8f830054,
-0x3c020001, 0x8c425db8, 0x2463ff9c, 0x431023,
-0x2c420064, 0x144000b3, 0x2402000d, 0x8004d24,
-0x0, 0x27b10010, 0xa7a00010, 0x8021,
-0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x24040001,
-0xc004d4b, 0x2021, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020018, 0x10400002, 0x2021, 0x24040001,
-0xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
-0xc004d71, 0x34108000, 0xc004d71, 0x0,
-0xc004d2b, 0x0, 0x50400005, 0x108042,
-0x96220000, 0x501025, 0xa6220000, 0x108042,
-0x1600fff7, 0x0, 0xc004d71, 0x8021,
-0x97a20010, 0x27b10010, 0x3042fffe, 0xa7a20010,
-0xc004d4b, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0xc004d4b, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020018, 0x10400002, 0x2021, 0x24040001,
-0xc004d4b, 0x108042, 0x1600fffa, 0x32020018,
-0xc004d4b, 0x24040001, 0xc004d4b, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fff8, 0x0, 0xc004d71, 0x0,
-0x8f830054, 0x8004d16, 0x2402000e, 0x24020840,
-0xa7a20010, 0x27b10010, 0x8021, 0xc004d4b,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0x32020013,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x32020013, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0x34108000,
-0x96220000, 0x501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
-0x0, 0xc004d71, 0x0, 0x8f830054,
-0x24020010, 0x3c010001, 0xac225d00, 0x3c010001,
-0x8004d26, 0xac235db8, 0x8f830054, 0x3c020001,
-0x8c425db8, 0x2463ff9c, 0x431023, 0x2c420064,
-0x14400004, 0x0, 0x24020011, 0x3c010001,
-0xac225d00, 0x8fbf0020, 0x8fb1001c, 0x8fb00018,
-0x3e00008, 0x27bd0028, 0x8f850044, 0x8f820044,
-0x3c030001, 0x431025, 0x3c030008, 0xaf820044,
-0x8f840054, 0x8f820054, 0xa32824, 0x8004d37,
-0x24840001, 0x8f820054, 0x821023, 0x2c420002,
-0x1440fffc, 0x0, 0x8f820044, 0x3c03fffe,
-0x3463ffff, 0x431024, 0xaf820044, 0x8f830054,
-0x8f820054, 0x8004d45, 0x24630001, 0x8f820054,
-0x621023, 0x2c420002, 0x1440fffc, 0x0,
-0x3e00008, 0xa01021, 0x8f830044, 0x3c02fff0,
-0x3442ffff, 0x42480, 0x621824, 0x3c020002,
-0x822025, 0x641825, 0xaf830044, 0x8f820044,
-0x3c03fffe, 0x3463ffff, 0x431024, 0xaf820044,
-0x8f830054, 0x8f820054, 0x8004d5e, 0x24630001,
-0x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
-0x0, 0x8f820044, 0x3c030001, 0x431025,
-0xaf820044, 0x8f830054, 0x8f820054, 0x8004d6b,
-0x24630001, 0x8f820054, 0x621023, 0x2c420002,
-0x1440fffc, 0x0, 0x3e00008, 0x0,
-0x8f820044, 0x3c03fff0, 0x3463ffff, 0x431024,
-0xaf820044, 0x8f820044, 0x3c030001, 0x431025,
-0xaf820044, 0x8f830054, 0x8f820054, 0x8004d7f,
-0x24630001, 0x8f820054, 0x621023, 0x2c420002,
-0x1440fffc, 0x0, 0x8f820044, 0x3c03fffe,
-0x3463ffff, 0x431024, 0xaf820044, 0x8f830054,
-0x8f820054, 0x8004d8d, 0x24630001, 0x8f820054,
-0x621023, 0x2c420002, 0x1440fffc, 0x0,
-0x3e00008, 0x0, 0x27bdffc8, 0xafb30024,
-0x809821, 0xafb5002c, 0xa0a821, 0xafb20020,
-0xc09021, 0x32a2ffff, 0xafbf0030, 0xafb40028,
-0xafb1001c, 0xafb00018, 0x14400034, 0xa7b20010,
-0x3271ffff, 0x27b20010, 0x8021, 0xc004d4b,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0x2301024,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x2301024, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0x34108000,
-0x96420000, 0x501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d4b, 0x108042, 0x12000075,
-0x0, 0x8004dc9, 0x0, 0x3274ffff,
-0x27b10010, 0xa7a00010, 0x8021, 0xc004d4b,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x24040001, 0xc004d4b,
-0x2021, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0x2901024,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x2901024, 0xc004d71,
-0x34108000, 0xc004d71, 0x0, 0xc004d2b,
-0x0, 0x50400005, 0x108042, 0x96220000,
-0x501025, 0xa6220000, 0x108042, 0x1600fff7,
-0x0, 0xc004d71, 0x0, 0x32a5ffff,
-0x24020001, 0x54a20004, 0x24020002, 0x97a20010,
-0x8004e14, 0x521025, 0x14a20006, 0x3271ffff,
-0x97a20010, 0x121827, 0x431024, 0xa7a20010,
-0x3271ffff, 0x27b20010, 0x8021, 0xc004d4b,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0xc004d4b,
-0x24040001, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d4b, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0x2301024,
-0x10400002, 0x2021, 0x24040001, 0xc004d4b,
-0x108042, 0x1600fffa, 0x2301024, 0xc004d4b,
-0x24040001, 0xc004d4b, 0x2021, 0x34108000,
-0x96420000, 0x501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d4b, 0x108042, 0x1600fff8,
-0x0, 0xc004d71, 0x0, 0x8fbf0030,
-0x8fb5002c, 0x8fb40028, 0x8fb30024, 0x8fb20020,
-0x8fb1001c, 0x8fb00018, 0x3e00008, 0x27bd0038,
-0x0, 0x0, 0x0, 0x27bdffe8,
-0xafbf0010, 0x3c030001, 0x771821, 0x8c6383ac,
-0x24020008, 0x1462022c, 0x803021, 0x3c020001,
-0x8c425d98, 0x14400033, 0x0, 0x8f850224,
-0x38a30020, 0x2c630001, 0x38a20010, 0x2c420001,
-0x621825, 0x1460000d, 0x38a30030, 0x2c630001,
-0x38a20400, 0x2c420001, 0x621825, 0x14600007,
-0x38a30402, 0x2c630001, 0x38a20404, 0x2c420001,
-0x621825, 0x10600005, 0x0, 0xc00429b,
-0x0, 0x8004e8d, 0x2402000e, 0xc0043dd,
-0x0, 0x3c050001, 0x8ca55cc8, 0xc0052a2,
-0x2021, 0x3c030001, 0x8c635cc8, 0x24020004,
-0x14620005, 0x2403fffb, 0x3c020001, 0x8c425cc4,
-0x8004e89, 0x2403fff7, 0x3c020001, 0x8c425cc4,
-0x431024, 0x3c010001, 0xac225cc4, 0x2402000e,
-0x3c010001, 0xc00429b, 0xac227dd0, 0x8005087,
-0x0, 0x8f820220, 0x3c030400, 0x431024,
-0x10400027, 0x2403ffbf, 0x8f850224, 0x3c020001,
-0x8c427ddc, 0xa32024, 0x431024, 0x1482000c,
-0x0, 0x3c020001, 0x8c427de0, 0x24420001,
-0x3c010001, 0xac227de0, 0x2c420002, 0x14400008,
-0x24020001, 0x3c010001, 0x8004ead, 0xac227e00,
-0x3c010001, 0xac207de0, 0x3c010001, 0xac207e00,
-0x3c020001, 0x8c427e00, 0x10400006, 0x30a20040,
-0x10400004, 0x24020001, 0x3c010001, 0x8004eb8,
-0xac227e04, 0x3c010001, 0xac207e04, 0x3c010001,
-0xac257ddc, 0x3c010001, 0x8004ec8, 0xac207e10,
-0x24020001, 0x3c010001, 0xac227e10, 0x3c010001,
-0xac207e00, 0x3c010001, 0xac207de0, 0x3c010001,
-0xac207e04, 0x3c010001, 0xac207ddc, 0x3c030001,
-0x8c637dd0, 0x3c020001, 0x8c427dd4, 0x10620003,
-0x3c020200, 0x3c010001, 0xac237dd4, 0xc21024,
-0x10400007, 0x2463ffff, 0x8f820220, 0x24030001,
-0x3c010001, 0xac235ccc, 0x8005085, 0x3c03f700,
-0x2c62000e, 0x104001a8, 0x31080, 0x3c010001,
-0x220821, 0x8c225b80, 0x400008, 0x0,
-0x3c010001, 0xac207e00, 0x3c010001, 0xac207de0,
-0x3c010001, 0xac207ddc, 0x3c010001, 0xac207e04,
-0x3c010001, 0xac207df8, 0x3c010001, 0xac207df0,
-0xc00486a, 0xaf800224, 0x24020002, 0x3c010001,
-0xac227dd0, 0x3c020001, 0x8c427e10, 0x14400056,
-0x3c03fdff, 0x8ee20000, 0x3463ffff, 0x431024,
-0xc00429b, 0xaee20000, 0xaf800204, 0x8f820200,
-0x2403fffd, 0x431024, 0xaf820200, 0x3c010001,
-0xac207e20, 0x8f830054, 0x3c020001, 0x8c427df8,
-0x24040001, 0x3c010001, 0xac247e0c, 0x24420001,
-0x3c010001, 0xac227df8, 0x2c420004, 0x3c010001,
-0xac237df4, 0x14400006, 0x24020003, 0x3c010001,
-0xac245ccc, 0x3c010001, 0x8005083, 0xac207df8,
-0x3c010001, 0x8005083, 0xac227dd0, 0x8f830054,
-0x3c020001, 0x8c427df4, 0x2463d8f0, 0x431023,
-0x2c422710, 0x14400003, 0x24020004, 0x3c010001,
-0xac227dd0, 0x3c020001, 0x8c427e10, 0x14400026,
-0x3c03fdff, 0x8ee20000, 0x3463ffff, 0x431024,
-0x8005083, 0xaee20000, 0x3c040001, 0x8c845d9c,
-0x3c010001, 0xc00508a, 0xac207de8, 0x3c020001,
-0x8c427e1c, 0xaf820204, 0x3c020001, 0x8c427e10,
-0x14400015, 0x3c03fdff, 0x8ee20000, 0x3463ffff,
-0x431024, 0xaee20000, 0x8f820204, 0x30420030,
-0x1440013c, 0x24020002, 0x3c030001, 0x8c637e1c,
-0x24020005, 0x3c010001, 0xac227dd0, 0x3c010001,
-0x8005083, 0xac237e20, 0x3c020001, 0x8c427e10,
-0x10400010, 0x3c03fdff, 0x3c020001, 0x8c425d6c,
-0x24420001, 0x3c010001, 0xac225d6c, 0x2c420002,
-0x14400131, 0x24020001, 0x3c010001, 0xac225d74,
-0x3c010001, 0xac205d6c, 0x3c010001, 0x8005083,
-0xac225ccc, 0x8ee20000, 0x3463ffff, 0x431024,
-0xaee20000, 0x3c020001, 0x8c427e00, 0x10400122,
-0x0, 0x3c020001, 0x8c427ddc, 0x1040011e,
-0x0, 0x3c010001, 0xac227e08, 0x24020003,
-0x3c010001, 0xac227de0, 0x8005024, 0x24020006,
-0x3c010001, 0xac207de8, 0x8f820204, 0x34420040,
-0xaf820204, 0x3c020001, 0x8c427e20, 0x24030007,
-0x3c010001, 0xac237dd0, 0x34420040, 0x3c010001,
-0xac227e20, 0x3c020001, 0x8c427e00, 0x10400005,
-0x0, 0x3c020001, 0x8c427ddc, 0x104000f9,
-0x24020002, 0x3c050001, 0x24a57de0, 0x8ca20000,
-0x2c424e21, 0x104000f3, 0x24020002, 0x3c020001,
-0x8c427e04, 0x104000f8, 0x2404ffbf, 0x3c020001,
-0x8c427ddc, 0x3c030001, 0x8c637e08, 0x441024,
-0x641824, 0x10430004, 0x24020001, 0x3c010001,
-0x8005083, 0xac227dd0, 0x24020003, 0xaca20000,
-0x24020008, 0x3c010001, 0xac227dd0, 0x3c020001,
-0x8c427e0c, 0x1040000c, 0x24020001, 0x3c040001,
-0xc005097, 0x8c847ddc, 0x3c020001, 0x8c427e28,
-0x14400005, 0x24020001, 0x3c020001, 0x8c427e24,
-0x10400006, 0x24020001, 0x3c010001, 0xac225ccc,
-0x3c010001, 0x8005083, 0xac207df8, 0x3c020001,
-0x8c427df0, 0x3c030001, 0x8c637ddc, 0x2c420001,
-0x210c0, 0x30630008, 0x3c010001, 0xac227df0,
-0x3c010001, 0xac237dec, 0x8f830054, 0x24020009,
-0x3c010001, 0xac227dd0, 0x3c010001, 0x8005083,
-0xac237df4, 0x8f830054, 0x3c020001, 0x8c427df4,
-0x2463d8f0, 0x431023, 0x2c422710, 0x144000a8,
-0x0, 0x3c020001, 0x8c427e00, 0x10400005,
-0x0, 0x3c020001, 0x8c427ddc, 0x104000a9,
-0x24020002, 0x3c030001, 0x24637de0, 0x8c620000,
-0x2c424e21, 0x104000a3, 0x24020002, 0x3c020001,
-0x8c427e0c, 0x1040000e, 0x0, 0x3c020001,
-0x8c427ddc, 0x3c010001, 0xac207e0c, 0x30420080,
-0x1040002f, 0x2402000c, 0x8f820204, 0x30420080,
-0x1440000c, 0x24020003, 0x8005011, 0x2402000c,
-0x3c020001, 0x8c427ddc, 0x30420080, 0x14400005,
-0x24020003, 0x8f820204, 0x30420080, 0x1040001f,
-0x24020003, 0xac620000, 0x2402000a, 0x3c010001,
-0xac227dd0, 0x3c040001, 0x24847e18, 0x8c820000,
-0x3c030001, 0x8c637df0, 0x431025, 0xaf820204,
-0x8c830000, 0x3c040001, 0x8c847df0, 0x2402000b,
-0x3c010001, 0xac227dd0, 0x641825, 0x3c010001,
-0xac237e20, 0x3c050001, 0x24a57de0, 0x8ca20000,
-0x2c424e21, 0x1040006f, 0x24020002, 0x3c020001,
-0x8c427e10, 0x10400005, 0x0, 0x2402000c,
-0x3c010001, 0x8005083, 0xac227dd0, 0x3c020001,
-0x8c427e00, 0x1040006c, 0x0, 0x3c040001,
-0x8c847ddc, 0x1080005e, 0x30820008, 0x3c030001,
-0x8c637dec, 0x10620064, 0x24020003, 0x3c010001,
-0xac247e08, 0xaca20000, 0x24020006, 0x3c010001,
-0x8005083, 0xac227dd0, 0x8f820200, 0x34420002,
-0xaf820200, 0x8f830054, 0x2402000d, 0x3c010001,
-0xac227dd0, 0x3c010001, 0xac237df4, 0x8f830054,
-0x3c020001, 0x8c427df4, 0x2463d8f0, 0x431023,
-0x2c422710, 0x1440003a, 0x0, 0x3c020001,
-0x8c427e10, 0x10400029, 0x2402000e, 0x3c030001,
-0x8c637e24, 0x3c010001, 0x14600015, 0xac227dd0,
-0xc0043dd, 0x0, 0x3c050001, 0x8ca55cc8,
-0xc0052a2, 0x2021, 0x3c030001, 0x8c635cc8,
-0x24020004, 0x14620005, 0x2403fffb, 0x3c020001,
-0x8c425cc4, 0x8005052, 0x2403fff7, 0x3c020001,
-0x8c425cc4, 0x431024, 0x3c010001, 0xac225cc4,
-0x8ee20000, 0x3c030200, 0x431025, 0xaee20000,
-0x8f820224, 0x3c010001, 0xac227e2c, 0x8f820220,
-0x2403fffb, 0x431024, 0xaf820220, 0x8f820220,
-0x34420002, 0x8005083, 0xaf820220, 0x3c020001,
-0x8c427e00, 0x10400005, 0x0, 0x3c020001,
-0x8c427ddc, 0x1040000f, 0x24020002, 0x3c020001,
-0x8c427de0, 0x2c424e21, 0x1040000a, 0x24020002,
-0x3c020001, 0x8c427e00, 0x1040000f, 0x0,
-0x3c020001, 0x8c427ddc, 0x1440000b, 0x0,
-0x24020002, 0x3c010001, 0x8005083, 0xac227dd0,
-0x3c020001, 0x8c427e00, 0x10400003, 0x0,
-0xc00429b, 0x0, 0x8f820220, 0x3c03f700,
-0x431025, 0xaf820220, 0x8fbf0010, 0x3e00008,
-0x27bd0018, 0x3c030001, 0x24637e28, 0x8c620000,
-0x10400005, 0x34422000, 0x3c010001, 0xac227e1c,
-0x8005095, 0xac600000, 0x3c010001, 0xac247e1c,
-0x3e00008, 0x0, 0x27bdffe0, 0x30820030,
-0xafbf0018, 0x3c010001, 0xac227e24, 0x14400067,
-0x3c02ffff, 0x34421f0e, 0x821024, 0x14400061,
-0x24020030, 0x30822000, 0x1040005d, 0x30838000,
-0x31a02, 0x30820001, 0x21200, 0x3c040001,
-0x8c845d9c, 0x621825, 0x331c2, 0x3c030001,
-0x24635d78, 0x30828000, 0x21202, 0x30840001,
-0x42200, 0x441025, 0x239c2, 0x61080,
-0x431021, 0x471021, 0x90430000, 0x24020001,
-0x10620025, 0x0, 0x10600007, 0x24020002,
-0x10620013, 0x24020003, 0x1062002c, 0x3c05000f,
-0x80050f9, 0x0, 0x8f820200, 0x2403feff,
-0x431024, 0xaf820200, 0x8f820220, 0x3c03fffe,
-0x3463ffff, 0x431024, 0xaf820220, 0x3c010001,
-0xac207e44, 0x3c010001, 0x8005104, 0xac207e4c,
-0x8f820200, 0x34420100, 0xaf820200, 0x8f820220,
-0x3c03fffe, 0x3463ffff, 0x431024, 0xaf820220,
-0x24020100, 0x3c010001, 0xac227e44, 0x3c010001,
-0x8005104, 0xac207e4c, 0x8f820200, 0x2403feff,
-0x431024, 0xaf820200, 0x8f820220, 0x3c030001,
-0x431025, 0xaf820220, 0x3c010001, 0xac207e44,
-0x3c010001, 0x8005104, 0xac237e4c, 0x8f820200,
-0x34420100, 0xaf820200, 0x8f820220, 0x3c030001,
-0x431025, 0xaf820220, 0x24020100, 0x3c010001,
-0xac227e44, 0x3c010001, 0x8005104, 0xac237e4c,
-0x34a5ffff, 0x3c040001, 0x24845bb8, 0xafa30010,
-0xc002403, 0xafa00014, 0x8005104, 0x0,
-0x24020030, 0x3c010001, 0xac227e28, 0x8fbf0018,
-0x3e00008, 0x27bd0020, 0x0, 0x27bdffc8,
-0xafb20028, 0x809021, 0xafb3002c, 0xa09821,
-0xafb00020, 0xc08021, 0x3c040001, 0x24845bd0,
-0x3c050009, 0x3c020001, 0x8c425cc8, 0x34a59001,
-0x2403021, 0x2603821, 0xafbf0030, 0xafb10024,
-0xa7a0001a, 0xafb00014, 0xc002403, 0xafa20010,
-0x24020002, 0x12620083, 0x2e620003, 0x10400005,
-0x24020001, 0x1262000a, 0x0, 0x800529b,
-0x0, 0x24020004, 0x126200fa, 0x24020008,
-0x126200f9, 0x3c02ffec, 0x800529b, 0x0,
-0x3c020001, 0x8c425cc4, 0x30420002, 0x14400004,
-0x128940, 0x3c02fffb, 0x3442ffff, 0x2028024,
-0x3c010001, 0x310821, 0xac307e3c, 0x3c024000,
-0x2021024, 0x1040004e, 0x1023c2, 0x30840030,
-0x101382, 0x3042001c, 0x3c030001, 0x24635d08,
-0x431021, 0x823821, 0x3c020020, 0x2021024,
-0x10400006, 0x24020100, 0x3c010001, 0x310821,
-0xac227e40, 0x8005150, 0x3c020080, 0x3c010001,
-0x310821, 0xac207e40, 0x3c020080, 0x2021024,
-0x10400006, 0x121940, 0x3c020001, 0x3c010001,
-0x230821, 0x800515c, 0xac227e48, 0x121140,
-0x3c010001, 0x220821, 0xac207e48, 0x94e40000,
-0x3c030001, 0x8c635dbc, 0x24020005, 0x10620010,
-0xa7a40018, 0x32024000, 0x10400002, 0x34824000,
-0xa7a20018, 0x24040001, 0x94e20002, 0x24050004,
-0x24e60002, 0x34420001, 0xc00498e, 0xa4e20002,
-0x24040001, 0x2821, 0xc00498e, 0x27a60018,
-0x3c020001, 0x8c425cc8, 0x24110001, 0x3c010001,
-0xac315cd4, 0x14530004, 0x32028000, 0xc00429b,
-0x0, 0x32028000, 0x1040011f, 0x0,
-0xc00429b, 0x0, 0x3c030001, 0x8c635dbc,
-0x24020005, 0x10620118, 0x24020002, 0x3c010001,
-0xac315ccc, 0x3c010001, 0x800529b, 0xac225cc8,
-0x24040001, 0x24050004, 0x27b0001a, 0xc00498e,
-0x2003021, 0x24040001, 0x2821, 0xc00498e,
-0x2003021, 0x3c020001, 0x511021, 0x8c427e34,
-0x3c040001, 0x8c845cc8, 0x3c03bfff, 0x3463ffff,
-0x3c010001, 0xac335cd4, 0x431024, 0x3c010001,
-0x310821, 0x109300fa, 0xac227e34, 0x800529b,
-0x0, 0x3c022000, 0x2021024, 0x10400005,
-0x24020001, 0x3c010001, 0xac225d98, 0x80051ad,
-0x128940, 0x3c010001, 0xac205d98, 0x128940,
-0x3c010001, 0x310821, 0xac307e38, 0x3c024000,
-0x2021024, 0x14400016, 0x0, 0x3c020001,
-0x8c425d98, 0x10400008, 0x24040004, 0x24050001,
-0xc004d93, 0x24062000, 0x24020001, 0x3c010001,
-0x370821, 0xac2283ac, 0x3c020001, 0x511021,
-0x8c427e30, 0x3c03bfff, 0x3463ffff, 0x431024,
-0x3c010001, 0x310821, 0x8005299, 0xac227e30,
-0x3c020001, 0x8c425d98, 0x10400028, 0x3c0300a0,
-0x2031024, 0x5443000d, 0x3c020020, 0x3c020001,
-0x8c425d9c, 0x24030100, 0x3c010001, 0x310821,
-0xac237e44, 0x3c030001, 0x3c010001, 0x310821,
-0xac237e4c, 0x80051f0, 0x34420400, 0x2021024,
-0x10400008, 0x24030100, 0x3c020001, 0x8c425d9c,
-0x3c010001, 0x310821, 0xac237e44, 0x80051f0,
-0x34420800, 0x3c020080, 0x2021024, 0x1040002e,
-0x3c030001, 0x3c020001, 0x8c425d9c, 0x3c010001,
-0x310821, 0xac237e4c, 0x34420c00, 0x3c010001,
-0xac225d9c, 0x8005218, 0x24040001, 0x3c020020,
-0x2021024, 0x10400006, 0x24020100, 0x3c010001,
-0x310821, 0xac227e44, 0x8005201, 0x3c020080,
-0x3c010001, 0x310821, 0xac207e44, 0x3c020080,
-0x2021024, 0x10400007, 0x121940, 0x3c020001,
-0x3c010001, 0x230821, 0xac227e4c, 0x800520f,
-0x24040001, 0x121140, 0x3c010001, 0x220821,
-0xac207e4c, 0x24040001, 0x2821, 0x27b0001e,
-0xc00494c, 0x2003021, 0x24040001, 0x2821,
-0xc00494c, 0x2003021, 0x24040001, 0x24050001,
-0x27b0001c, 0xc00494c, 0x2003021, 0x24040001,
-0x24050001, 0xc00494c, 0x2003021, 0x8005299,
-0x0, 0x3c02ffec, 0x3442ffff, 0x2028024,
-0x3c020008, 0x2028025, 0x121140, 0x3c010001,
-0x220821, 0xac307e38, 0x3c022000, 0x2021024,
-0x10400009, 0x0, 0x3c020001, 0x8c425d74,
-0x14400005, 0x24020001, 0x3c010001, 0xac225d98,
-0x800523a, 0x3c024000, 0x3c010001, 0xac205d98,
-0x3c024000, 0x2021024, 0x1440001e, 0x0,
-0x3c020001, 0x8c425d98, 0x3c010001, 0xac205ce0,
-0x10400007, 0x24022020, 0x3c010001, 0xac225d9c,
-0x24020001, 0x3c010001, 0x370821, 0xac2283ac,
-0x3c04bfff, 0x121940, 0x3c020001, 0x431021,
-0x8c427e30, 0x3c050001, 0x8ca55cc8, 0x3484ffff,
-0x441024, 0x3c010001, 0x230821, 0xac227e30,
-0x24020001, 0x10a20044, 0x0, 0x8005299,
-0x0, 0x3c020001, 0x8c425d98, 0x1040001c,
-0x24022000, 0x3c010001, 0xac225d9c, 0x3c0300a0,
-0x2031024, 0x14430005, 0x121140, 0x3402a000,
-0x3c010001, 0x8005294, 0xac225d9c, 0x3c030001,
-0x621821, 0x8c637e38, 0x3c020020, 0x621024,
-0x10400004, 0x24022001, 0x3c010001, 0x8005294,
-0xac225d9c, 0x3c020080, 0x621024, 0x1040001f,
-0x3402a001, 0x3c010001, 0x8005294, 0xac225d9c,
-0x3c020020, 0x2021024, 0x10400007, 0x121940,
-0x24020100, 0x3c010001, 0x230821, 0xac227e44,
-0x8005288, 0x3c020080, 0x121140, 0x3c010001,
-0x220821, 0xac207e44, 0x3c020080, 0x2021024,
-0x10400006, 0x121940, 0x3c020001, 0x3c010001,
-0x230821, 0x8005294, 0xac227e4c, 0x121140,
-0x3c010001, 0x220821, 0xac207e4c, 0x3c030001,
-0x8c635cc8, 0x24020001, 0x10620003, 0x0,
-0xc00429b, 0x0, 0x8fbf0030, 0x8fb3002c,
-0x8fb20028, 0x8fb10024, 0x8fb00020, 0x3e00008,
-0x27bd0038, 0x27bdffd8, 0xafb20020, 0x809021,
-0xafb1001c, 0x8821, 0x24020002, 0xafbf0024,
-0xafb00018, 0xa7a00012, 0x10a200d3, 0xa7a00010,
-0x2ca20003, 0x10400005, 0x24020001, 0x10a2000a,
-0x128140, 0x8005380, 0x2201021, 0x24020004,
-0x10a2007d, 0x24020008, 0x10a2007c, 0x122940,
-0x8005380, 0x2201021, 0x3c030001, 0x701821,
-0x8c637e3c, 0x3c024000, 0x621024, 0x14400009,
-0x24040001, 0x3c027fff, 0x3442ffff, 0x628824,
-0x3c010001, 0x300821, 0xac317e34, 0x8005380,
-0x2201021, 0x24050001, 0xc00494c, 0x27a60010,
-0x24040001, 0x24050001, 0xc00494c, 0x27a60010,
-0x97a20010, 0x30420004, 0x10400034, 0x3c114000,
-0x3c020001, 0x8c425dbc, 0x2443ffff, 0x2c620006,
-0x10400034, 0x31080, 0x3c010001, 0x220821,
-0x8c225be0, 0x400008, 0x0, 0x24040001,
-0x24050011, 0x27b00012, 0xc00494c, 0x2003021,
-0x24040001, 0x24050011, 0xc00494c, 0x2003021,
-0x97a50012, 0x30a24000, 0x10400002, 0x3c040010,
-0x3c040008, 0x3c030001, 0x8005301, 0x30a28000,
-0x24040001, 0x24050014, 0x27b00012, 0xc00494c,
-0x2003021, 0x24040001, 0x24050014, 0xc00494c,
-0x2003021, 0x97a50012, 0x30a21000, 0x10400002,
-0x3c040010, 0x3c040008, 0x3c030001, 0x30a20800,
-0x54400001, 0x3c030002, 0x3c028000, 0x2221025,
-0x641825, 0x800530e, 0x438825, 0x3c110001,
-0x2308821, 0x8e317e3c, 0x3c027fff, 0x3442ffff,
-0x2228824, 0x3c020001, 0x8c425cd8, 0x1040001d,
-0x121140, 0x3c020001, 0x8c425d98, 0x10400002,
-0x3c022000, 0x2228825, 0x121140, 0x3c010001,
-0x220821, 0x8c227e40, 0x10400003, 0x3c020020,
-0x8005322, 0x2228825, 0x3c02ffdf, 0x3442ffff,
-0x2228824, 0x121140, 0x3c010001, 0x220821,
-0x8c227e48, 0x10400003, 0x3c020080, 0x800532d,
-0x2228825, 0x3c02ff7f, 0x3442ffff, 0x2228824,
-0x121140, 0x3c010001, 0x220821, 0xac317e34,
-0x8005380, 0x2201021, 0x122940, 0x3c030001,
-0x651821, 0x8c637e38, 0x3c024000, 0x621024,
-0x14400008, 0x3c027fff, 0x3442ffff, 0x628824,
-0x3c010001, 0x250821, 0xac317e30, 0x8005380,
-0x2201021, 0x3c020001, 0x8c425cd8, 0x10400033,
-0x3c11c00c, 0x3c020001, 0x8c425d74, 0x3c04c00c,
-0x34842000, 0x3c030001, 0x8c635d98, 0x2102b,
-0x21023, 0x441024, 0x10600003, 0x518825,
-0x3c022000, 0x2228825, 0x3c020001, 0x451021,
-0x8c427e44, 0x10400003, 0x3c020020, 0x800535d,
-0x2228825, 0x3c02ffdf, 0x3442ffff, 0x2228824,
-0x121140, 0x3c010001, 0x220821, 0x8c227e4c,
-0x10400003, 0x3c020080, 0x8005368, 0x2228825,
-0x3c02ff7f, 0x3442ffff, 0x2228824, 0x3c020001,
-0x8c425d60, 0x10400002, 0x3c020800, 0x2228825,
-0x3c020001, 0x8c425d64, 0x10400002, 0x3c020400,
-0x2228825, 0x3c020001, 0x8c425d68, 0x10400006,
-0x3c020100, 0x800537b, 0x2228825, 0x3c027fff,
-0x3442ffff, 0x628824, 0x121140, 0x3c010001,
-0x220821, 0xac317e30, 0x2201021, 0x8fbf0024,
-0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x3e00008,
-0x27bd0028, 0x27bdffd8, 0xafb40020, 0x80a021,
-0xafbf0024, 0xafb3001c, 0xafb20018, 0xafb10014,
-0xafb00010, 0x8f900200, 0x3c030001, 0x8c635cc8,
-0x8f930220, 0x24020002, 0x10620063, 0x2c620003,
-0x10400005, 0x24020001, 0x1062000a, 0x141940,
-0x8005448, 0x0, 0x24020004, 0x1062005a,
-0x24020008, 0x10620059, 0x149140, 0x8005448,
-0x0, 0x3c040001, 0x832021, 0x8c847e3c,
-0x3c110001, 0x2238821, 0x8e317e34, 0x3c024000,
-0x821024, 0x1040003e, 0x3c020008, 0x2221024,
-0x10400020, 0x36100002, 0x3c020001, 0x431021,
-0x8c427e40, 0x10400005, 0x36100020, 0x36100100,
-0x3c020020, 0x80053bd, 0x2228825, 0x2402feff,
-0x2028024, 0x3c02ffdf, 0x3442ffff, 0x2228824,
-0x141140, 0x3c010001, 0x220821, 0x8c227e48,
-0x10400005, 0x3c020001, 0x2629825, 0x3c020080,
-0x80053dc, 0x2228825, 0x3c02fffe, 0x3442ffff,
-0x2629824, 0x3c02ff7f, 0x3442ffff, 0x80053dc,
-0x2228824, 0x2402fedf, 0x2028024, 0x3c02fffe,
-0x3442ffff, 0x2629824, 0x3c02ff5f, 0x3442ffff,
-0x2228824, 0x3c010001, 0x230821, 0xac207e40,
-0x3c010001, 0x230821, 0xac207e48, 0xc00486a,
-0x0, 0xaf900200, 0xaf930220, 0x8f820220,
-0x2403fffb, 0x431024, 0xaf820220, 0x8f820220,
-0x34420002, 0xaf820220, 0x80053f3, 0x141140,
-0x8f820200, 0x2403fffd, 0x431024, 0xc00486a,
-0xaf820200, 0x3c02bfff, 0x3442ffff, 0xc00429b,
-0x2228824, 0x141140, 0x3c010001, 0x220821,
-0x8005448, 0xac317e34, 0x149140, 0x3c040001,
-0x922021, 0x8c847e38, 0x3c110001, 0x2328821,
-0x8e317e30, 0x3c024000, 0x821024, 0x14400011,
-0x0, 0x3c020001, 0x8c425d98, 0x14400006,
-0x3c02bfff, 0x8f820200, 0x34420002, 0xc00486a,
-0xaf820200, 0x3c02bfff, 0x3442ffff, 0xc00429b,
-0x2228824, 0x3c010001, 0x320821, 0x8005448,
-0xac317e30, 0x3c020001, 0x8c425d98, 0x10400005,
-0x3c020020, 0x3c020001, 0x8c425d74, 0x1040002b,
-0x3c020020, 0x821024, 0x10400007, 0x36100020,
-0x24020100, 0x3c010001, 0x320821, 0xac227e44,
-0x8005428, 0x36100100, 0x3c010001, 0x320821,
-0xac207e44, 0x2402feff, 0x2028024, 0x3c020080,
-0x821024, 0x10400007, 0x141940, 0x3c020001,
-0x3c010001, 0x230821, 0xac227e4c, 0x8005439,
-0x2629825, 0x141140, 0x3c010001, 0x220821,
-0xac207e4c, 0x3c02fffe, 0x3442ffff, 0x2629824,
-0xc00486a, 0x0, 0xaf900200, 0xaf930220,
-0x8f820220, 0x2403fffb, 0x431024, 0xaf820220,
-0x8f820220, 0x34420002, 0xaf820220, 0x141140,
-0x3c010001, 0x220821, 0xac317e30, 0x8fbf0024,
-0x8fb40020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
-0x8fb00010, 0x3e00008, 0x27bd0028, 0x0 };
-static u32 tigonFwRodata[(MAX_RODATA_LEN/4) + 1] __devinitdata = {
-0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
-0x2f66776d, 0x61696e2e, 0x632c7620, 0x312e312e,
-0x322e3131, 0x20313939, 0x382f3034, 0x2f323720,
-0x32323a31, 0x333a3432, 0x20736875, 0x616e6720,
-0x45787020, 0x24000000, 0x7468655f, 0x4441574e,
-0x0, 0x53544143, 0x4b5f3120, 0x0,
-0x42616453, 0x6e64526e, 0x67000000, 0x3f456e71,
-0x45767400, 0x3f6e6f51, 0x64457650, 0x0,
-0x6576526e, 0x6746756c, 0x6c000000, 0x496c6c43,
-0x6f6e6652, 0x78000000, 0x53656e64, 0x436b5375,
-0x6d000000, 0x52656376, 0x566c616e, 0x0,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
-0x2f74696d, 0x65722e63, 0x2c762031, 0x2e312e32,
-0x2e382031, 0x3939382f, 0x30372f33, 0x31203137,
-0x3a35383a, 0x34352073, 0x6875616e, 0x67204578,
-0x70202400, 0x542d446d, 0x61526431, 0x0,
-0x542d446d, 0x61424200, 0x542d446d, 0x61320000,
-0x3f6e6f51, 0x64547845, 0x0, 0x3f6e6f51,
-0x64527845, 0x0, 0x656e714d, 0x45765046,
-0x61696c00, 0x656e714d, 0x45764661, 0x696c0000,
-0x6661696c, 0x456e454d, 0x0, 0x3f456e71,
-0x45767400, 0x3f6e6f51, 0x64457650, 0x0,
-0x6576526e, 0x6746756c, 0x6c000000, 0x0,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
-0x2f636f6d, 0x6d616e64, 0x2e632c76, 0x20312e31,
-0x2e322e31, 0x30203139, 0x39382f31, 0x312f3138,
-0x2031373a, 0x31313a31, 0x38207368, 0x75616e67,
-0x20457870, 0x20240000, 0x3f4d626f, 0x78457674,
-0x0, 0x4e4f636f, 0x6d616e64, 0x0,
-0x68737465, 0x5f455252, 0x0, 0x412d4572,
-0x72427563, 0x0, 0x4552524f, 0x522d4164,
-0x64000000, 0x656e714d, 0x45765046, 0x61696c00,
-0x656e714d, 0x45764661, 0x696c0000, 0x6661696c,
-0x456e454d, 0x0, 0x442d4572, 0x724c6173,
-0x74000000, 0x442d4572, 0x72320000, 0x6d437374,
-0x4d644552, 0x52000000, 0x70726f6d, 0x4d644552,
-0x52000000, 0x46696c74, 0x4d644552, 0x52000000,
-0x636d645f, 0x45525200, 0x3f456e71, 0x45767400,
-0x3f6e6f51, 0x64457650, 0x0, 0x6576526e,
-0x6746756c, 0x6c000000, 0x0, 0x6ea0,
-0x7fbc, 0x6e38, 0x8734, 0x82b0,
-0x8780, 0x8780, 0x6f54, 0x7694,
-0x7f0c, 0x80a8, 0x8074, 0x8780,
-0x7e70, 0x80cc, 0x6e64, 0x81cc,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
-0x2f646d61, 0x2e632c76, 0x20312e31, 0x2e322e33,
-0x20313939, 0x382f3034, 0x2f323720, 0x32323a31,
-0x333a3431, 0x20736875, 0x616e6720, 0x45787020,
-0x24000000, 0x646d6172, 0x6441544e, 0x0,
-0x646d6177, 0x7241544e, 0x0, 0x0,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
-0x2f747261, 0x63652e63, 0x2c762031, 0x2e312e32,
-0x2e322031, 0x3939382f, 0x30342f32, 0x37203232,
-0x3a31333a, 0x35302073, 0x6875616e, 0x67204578,
-0x70202400, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
-0x2f646174, 0x612e632c, 0x7620312e, 0x312e322e,
-0x32203139, 0x39382f30, 0x342f3237, 0x2032323a,
-0x31333a34, 0x30207368, 0x75616e67, 0x20457870,
-0x20240000, 0x46575f56, 0x45525349, 0x4f4e3a20,
-0x23312046, 0x72692041, 0x70722037, 0x2031373a,
-0x35353a34, 0x38205044, 0x54203230, 0x30300000,
-0x46575f43, 0x4f4d5049, 0x4c455f54, 0x494d453a,
-0x2031373a, 0x35353a34, 0x38000000, 0x46575f43,
-0x4f4d5049, 0x4c455f42, 0x593a2064, 0x65767263,
-0x73000000, 0x46575f43, 0x4f4d5049, 0x4c455f48,
-0x4f53543a, 0x20636f6d, 0x70757465, 0x0,
-0x46575f43, 0x4f4d5049, 0x4c455f44, 0x4f4d4149,
-0x4e3a2065, 0x6e672e61, 0x6374656f, 0x6e2e636f,
-0x6d000000, 0x46575f43, 0x4f4d5049, 0x4c45523a,
-0x20676363, 0x20766572, 0x73696f6e, 0x20322e37,
-0x2e320000, 0x0, 0x0, 0x0,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
-0x2f6d656d, 0x2e632c76, 0x20312e31, 0x2e322e32,
-0x20313939, 0x382f3034, 0x2f323720, 0x32323a31,
-0x333a3434, 0x20736875, 0x616e6720, 0x45787020,
-0x24000000, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
-0x2f73656e, 0x642e632c, 0x7620312e, 0x312e322e,
-0x31312031, 0x3939382f, 0x31322f32, 0x32203137,
-0x3a31373a, 0x35352073, 0x6875616e, 0x67204578,
-0x70202400, 0x736e6464, 0x654e6f51, 0x20000000,
-0x6e6f454e, 0x515f5458, 0x0, 0x736e6464,
-0x744e6f51, 0x20000000, 0x3f6e6f51, 0x64547845,
-0x0, 0x756e6b72, 0x64747970, 0x65000000,
-0x0, 0xaccc, 0xaccc, 0xad9c,
-0xaab0, 0xaab0, 0xad9c, 0xad9c,
-0xad9c, 0xad9c, 0xad9c, 0xad9c,
-0xad9c, 0xad9c, 0xad9c, 0xad9c,
-0xad9c, 0xad9c, 0xad9c, 0xad7c,
-0x0, 0xbca8, 0xbca8, 0xbd70,
-0xae4c, 0xb058, 0xbd70, 0xbd70,
-0xbd70, 0xbd70, 0xbd70, 0xbd70,
-0xbd70, 0xbd70, 0xbd70, 0xbd70,
-0xbd70, 0xbd70, 0xbd70, 0xbd54,
-0xb040, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
-0x2f726563, 0x762e632c, 0x7620312e, 0x312e322e,
-0x31392031, 0x3939382f, 0x30372f32, 0x34203231,
-0x3a33303a, 0x30352073, 0x6875616e, 0x67204578,
-0x70202400, 0x706b5278, 0x45525200, 0x66726d32,
-0x4c617267, 0x65000000, 0x72784e6f, 0x52784264,
-0x0, 0x72785144, 0x6d614446, 0x0,
-0x72785144, 0x6d614246, 0x0, 0x3f6e6f51,
-0x64527845, 0x0, 0x706b5278, 0x45525273,
-0x0, 0x66726d32, 0x4c726753, 0x0,
-0x72784e6f, 0x42645300, 0x3f724264, 0x446d6146,
-0x0, 0x3f724a42, 0x64446d46, 0x0,
-0x0, 0xf678, 0xf678, 0xf678,
-0xf678, 0xf678, 0xf678, 0xf678,
-0xf678, 0xf678, 0xf678, 0xf678,
-0xf678, 0xf678, 0xf678, 0xf678,
-0xf670, 0xf670, 0xf670, 0x572d444d,
-0x41456e46, 0x0, 0x0, 0xfdc0,
-0x1015c, 0xfddc, 0x1015c, 0x1015c,
-0x1015c, 0x1015c, 0x1015c, 0x1015c,
-0xf704, 0x1015c, 0x1015c, 0x1015c,
-0x1015c, 0x1015c, 0x10154, 0x10154,
-0x10154, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
-0x2f6d6163, 0x2e632c76, 0x20312e31, 0x2e322e31,
-0x32203139, 0x39382f30, 0x342f3237, 0x2032323a,
-0x31333a34, 0x32207368, 0x75616e67, 0x20457870,
-0x20240000, 0x6d616374, 0x7841544e, 0x0,
-0x4e745379, 0x6e264c6b, 0x0, 0x72656d61,
-0x73737274, 0x0, 0x6c696e6b, 0x444f574e,
-0x0, 0x656e714d, 0x45765046, 0x61696c00,
-0x656e714d, 0x45764661, 0x696c0000, 0x6661696c,
-0x456e454d, 0x0, 0x6c696e6b, 0x55500000,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
-0x2f636b73, 0x756d2e63, 0x2c762031, 0x2e312e32,
-0x2e322031, 0x3939382f, 0x30342f32, 0x37203232,
-0x3a31333a, 0x33392073, 0x6875616e, 0x67204578,
-0x70202400, 0x50726f62, 0x65506879, 0x0,
-0x6c6e6b41, 0x53535254, 0x0, 0x11b2c,
-0x11bc4, 0x11bf8, 0x11c2c, 0x11c58,
-0x11c6c, 0x11ca8, 0x1207c, 0x11de4,
-0x11e24, 0x11e50, 0x11e90, 0x11ec0,
-0x11efc, 0x11f30, 0x1207c, 0x122c0,
-0x122d8, 0x12300, 0x12320, 0x12348,
-0x12478, 0x124a0, 0x124f4, 0x1251c,
-0x0, 0x1278c, 0x1285c, 0x12934,
-0x12a04, 0x12a60, 0x12b3c, 0x12b64,
-0x12c40, 0x12c68, 0x12e10, 0x12e38,
-0x12fe0, 0x131d8, 0x1346c, 0x13380,
-0x1346c, 0x13498, 0x13008, 0x131b0,
-0x0, 0x13b84, 0x13bc8, 0x13c60,
-0x13cac, 0x13d1c, 0x13db4, 0x13de8,
-0x13e70, 0x13f08, 0x13fd8, 0x14018,
-0x1409c, 0x140c0, 0x141f4, 0x646f4261,
-0x73655067, 0x0, 0x0, 0x0,
-0x0, 0x73746d61, 0x634c4e4b, 0x0,
-0x0, 0x14c38, 0x14c38, 0x14b80,
-0x14bc4, 0x14c38, 0x14c38, 0x0,
-0x0, 0x0 };
-static u32 tigonFwData[(MAX_DATA_LEN/4) + 1] __devinitdata = {
-0x416c7465,
-0x6f6e2041, 0x63654e49, 0x43205600, 0x416c7465,
-0x6f6e2041, 0x63654e49, 0x43205600, 0x42424242,
-0x0, 0x0, 0x0, 0x135418,
-0x13e7fc, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x60cf00,
-0x60, 0xcf000000, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x3, 0x0,
-0x1, 0x0, 0x0, 0x0,
-0x1, 0x0, 0x1, 0x0,
-0x0, 0x0, 0x0, 0x1,
-0x1, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x1000000, 0x21000000,
-0x12000140, 0x0, 0x0, 0x20000000,
-0x120000a0, 0x0, 0x12000060, 0x12000180,
-0x120001e0, 0x0, 0x0, 0x0,
-0x1, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x2,
-0x0, 0x0, 0x30001, 0x1,
-0x30201, 0x0, 0x0, 0x0 };
-#endif
-/* Generated by genfw.c */
-#define tigon2FwReleaseMajor 0xc
-#define tigon2FwReleaseMinor 0x4
-#define tigon2FwReleaseFix 0xb
-#define tigon2FwStartAddr 0x00004000
-#define tigon2FwTextAddr 0x00004000
-#define tigon2FwTextLen 0x11bc0
-#define tigon2FwRodataAddr 0x00015bc0
-#define tigon2FwRodataLen 0x10d0
-#define tigon2FwDataAddr 0x00016cc0
-#define tigon2FwDataLen 0x1c0
-#define tigon2FwSbssAddr 0x00016e80
-#define tigon2FwSbssLen 0xcc
-#define tigon2FwBssAddr 0x00016f50
-#define tigon2FwBssLen 0x20c0
-static u32 tigon2FwText[(MAX_TEXT_LEN/4) + 1] __devinitdata = {
-0x0,
-0x10000003, 0x0, 0xd, 0xd,
-0x3c1d0001, 0x8fbd6d20, 0x3a0f021, 0x3c100000,
-0x26104000, 0xc0010c0, 0x0, 0xd,
-0x3c1d0001, 0x8fbd6d24, 0x3a0f021, 0x3c100000,
-0x26104000, 0xc0017e0, 0x0, 0xd,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x2000008,
-0x0, 0x800172f, 0x3c0a0001, 0x800172f,
-0x3c0a0002, 0x800172f, 0x0, 0x8002cac,
-0x0, 0x8002c4f, 0x0, 0x800172f,
-0x3c0a0004, 0x800328a, 0x0, 0x8001a52,
-0x0, 0x800394d, 0x0, 0x80038f4,
-0x0, 0x800172f, 0x3c0a0006, 0x80039bb,
-0x3c0a0007, 0x800172f, 0x3c0a0008, 0x800172f,
-0x3c0a0009, 0x8003a13, 0x0, 0x8002ea6,
-0x0, 0x800172f, 0x3c0a000b, 0x800172f,
-0x3c0a000c, 0x800172f, 0x3c0a000d, 0x80028fb,
-0x0, 0x8002890, 0x0, 0x800172f,
-0x3c0a000e, 0x800208c, 0x0, 0x8001964,
-0x0, 0x8001a04, 0x0, 0x8003ca6,
-0x0, 0x8003c94, 0x0, 0x800172f,
-0x0, 0x800191a, 0x0, 0x800172f,
-0x0, 0x800172f, 0x3c0a0013, 0x800172f,
-0x3c0a0014, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x27bdffe0,
-0x3c1cc000, 0xafbf001c, 0xafb00018, 0x8f820140,
-0x24030003, 0xaf8300ec, 0x34420004, 0xc002b20,
-0xaf820140, 0x3c0100c0, 0xc001763, 0xac203ffc,
-0x401821, 0x3c020010, 0x3c010001, 0xac236e9c,
-0x10620011, 0x43102b, 0x14400002, 0x3c020020,
-0x3c020008, 0x1062000c, 0x24050100, 0x3c060001,
-0x8cc66e9c, 0x3c040001, 0x24845c74, 0x3821,
-0xafa00010, 0xc002b3b, 0xafa00014, 0x3c020020,
-0x3c010001, 0xac226e9c, 0x24020008, 0x3c010001,
-0xac226eb4, 0x2402001f, 0x3c010001, 0xac226ec4,
-0x24020016, 0x3c010001, 0xac226e98, 0x3c05fffe,
-0x34a56f08, 0x3c020001, 0x8c426e9c, 0x3c030002,
-0x24639010, 0x3c040001, 0x8c846cc4, 0x431023,
-0x14800002, 0x458021, 0x2610fa38, 0x2402f000,
-0x2028024, 0xc001785, 0x2002021, 0x2022823,
-0x3c040020, 0x821823, 0x651823, 0x247bb000,
-0x3c03fffe, 0x3463bf08, 0x363b821, 0x3c0600bf,
-0x34c6f000, 0x3c070001, 0x8ce76cc0, 0x3c0300bf,
-0x3463e000, 0x852023, 0x3c010001, 0xac246ea8,
-0x822023, 0x3c010001, 0xac256e90, 0x52842,
-0x3c010001, 0xac226e84, 0x27620ffc, 0x3c010001,
-0xac226d20, 0x27621ffc, 0xdb3023, 0x7b1823,
-0x3c010001, 0xac246e88, 0x3c010001, 0xac256eac,
-0x3c010001, 0xac226d24, 0xaf860150, 0x10e00011,
-0xaf830250, 0x3c1d0001, 0x8fbd6ccc, 0x3a0f021,
-0xc001749, 0x0, 0x3c020001, 0x8c426cd0,
-0x3c030001, 0x8c636cd4, 0x2442fe00, 0x24630200,
-0x3c010001, 0xac226cd0, 0x3c010001, 0x10000004,
-0xac236cd4, 0x3c1d0001, 0x8fbd6d20, 0x3a0f021,
-0x3c020001, 0x8c426cc4, 0x1040000d, 0x26fafa38,
-0x3c020001, 0x8c426cd0, 0x3c030001, 0x8c636cd4,
-0x3c1a0001, 0x8f5a6cd4, 0x2442fa38, 0x246305c8,
-0x3c010001, 0xac226cd0, 0x3c010001, 0xac236cd4,
-0x3c020001, 0x8c426cc8, 0x14400003, 0x0,
-0x3c010001, 0xac206cd0, 0xc001151, 0x0,
-0x8fbf001c, 0x8fb00018, 0x3e00008, 0x27bd0020,
-0x3c020001, 0x8c426cd0, 0x3c030001, 0x8c636cd4,
-0x27bdff98, 0xafb00048, 0x3c100001, 0x8e1066b8,
-0xafb20050, 0x3c120000, 0x26524100, 0xafbf0060,
-0xafbe005c, 0xafb50058, 0xafb30054, 0xafb1004c,
-0xafa20034, 0xafa30030, 0xafa00010, 0xafa00014,
-0x8f860040, 0x3c040001, 0x24845c80, 0x24050200,
-0x3c010001, 0xac326e80, 0xc002b3b, 0x2003821,
-0x8f830040, 0x3c02f000, 0x621824, 0x3c026000,
-0x1062000b, 0xa3a0003f, 0x240e0001, 0x3c040001,
-0x24845c88, 0xa3ae003f, 0xafa00010, 0xafa00014,
-0x8f860040, 0x24050300, 0xc002b3b, 0x2003821,
-0x8f820240, 0x3c030001, 0x431025, 0xaf820240,
-0xaf800048, 0x8f820048, 0x14400005, 0x0,
-0xaf800048, 0x8f820048, 0x10400004, 0x0,
-0xaf800048, 0x10000003, 0x2e02021, 0xaf80004c,
-0x2e02021, 0x3c050001, 0xc002ba8, 0x34a540f8,
-0x3402021, 0xc002ba8, 0x240505c8, 0x3c020001,
-0x8c426ea8, 0x3c0d0001, 0x8dad6e88, 0x3c030001,
-0x8c636e84, 0x3c080001, 0x8d086e90, 0x3c090001,
-0x8d296eac, 0x3c0a0001, 0x8d4a6eb4, 0x3c0b0001,
-0x8d6b6ec4, 0x3c0c0001, 0x8d8c6e98, 0x3c040001,
-0x24845c94, 0x24050400, 0xaf42013c, 0x8f42013c,
-0x24060001, 0x24070001, 0xaf400000, 0xaf4d0138,
-0xaf430144, 0xaf480148, 0xaf49014c, 0xaf4a0150,
-0xaf4b0154, 0xaf4c0158, 0x2442ff80, 0xaf420140,
-0x24020001, 0xafa20010, 0xc002b3b, 0xafa00014,
-0x8f420138, 0xafa20010, 0x8f42013c, 0xafa20014,
-0x8f460144, 0x8f470148, 0x3c040001, 0x24845ca0,
-0xc002b3b, 0x24050500, 0xafb70010, 0xafba0014,
-0x8f46014c, 0x8f470150, 0x3c040001, 0x24845cac,
-0xc002b3b, 0x24050600, 0x3c020001, 0x8c426e9c,
-0x3603821, 0x3c060002, 0x24c69010, 0x2448ffff,
-0x1061824, 0xe81024, 0x43102b, 0x10400006,
-0x24050900, 0x3c040001, 0x24845cb8, 0xafa80010,
-0xc002b3b, 0xafa00014, 0x8f82000c, 0xafa20010,
-0x8f82003c, 0xafa20014, 0x8f860000, 0x8f870004,
-0x3c040001, 0x24845cc4, 0xc002b3b, 0x24051000,
-0x8c020220, 0x8c030224, 0x8c060218, 0x8c07021c,
-0x3c040001, 0x24845ccc, 0x24051100, 0xafa20010,
-0xc002b3b, 0xafa30014, 0xaf800054, 0xaf80011c,
-0x8c020218, 0x30420002, 0x10400009, 0x0,
-0x8c020220, 0x3c030002, 0x34630004, 0x431025,
-0xaf42000c, 0x8c02021c, 0x10000008, 0x34420004,
-0x8c020220, 0x3c030002, 0x34630006, 0x431025,
-0xaf42000c, 0x8c02021c, 0x34420006, 0xaf420014,
-0x8c020218, 0x30420010, 0x1040000a, 0x0,
-0x8c02021c, 0x34420004, 0xaf420010, 0x8c020220,
-0x3c03000a, 0x34630004, 0x431025, 0x10000009,
-0xaf420008, 0x8c020220, 0x3c03000a, 0x34630006,
-0x431025, 0xaf420008, 0x8c02021c, 0x34420006,
-0xaf420010, 0x24020001, 0xaf8200a0, 0xaf8200b0,
-0x8f830054, 0x8f820054, 0xaf8000d0, 0xaf8000c0,
-0x10000002, 0x24630064, 0x8f820054, 0x621023,
-0x2c420065, 0x1440fffc, 0x0, 0x8c040208,
-0x8c05020c, 0x26e20028, 0xaee20020, 0x24020490,
-0xaee20010, 0xaee40008, 0xaee5000c, 0x26e40008,
-0x8c820000, 0x8c830004, 0xaf820090, 0xaf830094,
-0x8c820018, 0xaf8200b4, 0x9482000a, 0xaf82009c,
-0x8f420014, 0xaf8200b0, 0x8f8200b0, 0x30420004,
-0x1440fffd, 0x0, 0x8f8200b0, 0x3c03ef00,
-0x431024, 0x10400021, 0x0, 0x8f8200b4,
-0xafa20010, 0x8f820090, 0x8f830094, 0x3c040001,
-0x24845cd4, 0xafa30014, 0x8f8600b0, 0x8f87009c,
-0x3c050001, 0xc002b3b, 0x34a5200d, 0x3c040001,
-0x24845ce0, 0x240203c0, 0xafa20010, 0xafa00014,
-0x8f860144, 0x3c070001, 0x24e75ce8, 0xc002b3b,
-0x3405dead, 0x8f82011c, 0x34420002, 0xaf82011c,
-0x8f820220, 0x34420004, 0xaf820220, 0x8f820140,
-0x3c030001, 0x431025, 0xaf820140, 0x96e20472,
-0x96e60452, 0x96e70462, 0xafa20010, 0x96e20482,
-0x3c040001, 0x24845d14, 0x24051200, 0xc002b3b,
-0xafa20014, 0x96f00452, 0x32020001, 0x10400002,
-0xb021, 0x24160001, 0x32020002, 0x54400001,
-0x36d60002, 0x32020008, 0x54400001, 0x36d60004,
-0x32020010, 0x54400001, 0x36d60008, 0x32020020,
-0x54400001, 0x36d60010, 0x32020040, 0x54400001,
-0x36d60020, 0x32020080, 0x54400001, 0x36d60040,
-0x96e60482, 0x30c20200, 0x54400001, 0x36d64000,
-0x96e30472, 0x30620200, 0x10400003, 0x30620100,
-0x10000003, 0x36d62000, 0x54400001, 0x36d61000,
-0x96f00462, 0x32c24000, 0x14400004, 0x3207009b,
-0x30c2009b, 0x14e20007, 0x240e0001, 0x32c22000,
-0x1440000d, 0x32020001, 0x3062009b, 0x10e20009,
-0x240e0001, 0x3c040001, 0x24845d20, 0x24051300,
-0x2003821, 0xa3ae003f, 0xafa30010, 0xc002b3b,
-0xafa00014, 0x32020001, 0x54400001, 0x36d60080,
-0x32020002, 0x54400001, 0x36d60100, 0x32020008,
-0x54400001, 0x36d60200, 0x32020010, 0x54400001,
-0x36d60400, 0x32020080, 0x54400001, 0x36d60800,
-0x8c020218, 0x30420200, 0x10400002, 0x3c020008,
-0x2c2b025, 0x8c020218, 0x30420800, 0x10400002,
-0x3c020080, 0x2c2b025, 0x8c020218, 0x30420400,
-0x10400002, 0x3c020100, 0x2c2b025, 0x8c020218,
-0x30420100, 0x10400002, 0x3c020200, 0x2c2b025,
-0x8c020218, 0x30420080, 0x10400002, 0x3c020400,
-0x2c2b025, 0x8c020218, 0x30422000, 0x10400002,
-0x3c020010, 0x2c2b025, 0x8c020218, 0x30424000,
-0x10400002, 0x3c020020, 0x2c2b025, 0x8c020218,
-0x30421000, 0x10400002, 0x3c020040, 0x2c2b025,
-0x8ee20498, 0x8ee3049c, 0xaf420160, 0xaf430164,
-0x8ee204a0, 0x8ee304a4, 0xaf420168, 0xaf43016c,
-0x8ee204a8, 0x8ee304ac, 0xaf420170, 0xaf430174,
-0x8ee20428, 0x8ee3042c, 0xaf420178, 0xaf43017c,
-0x8ee20448, 0x8ee3044c, 0xaf420180, 0xaf430184,
-0x8ee20458, 0x8ee3045c, 0xaf420188, 0xaf43018c,
-0x8ee20468, 0x8ee3046c, 0xaf420190, 0xaf430194,
-0x8ee20478, 0x8ee3047c, 0xaf420198, 0xaf43019c,
-0x8ee20488, 0x8ee3048c, 0xaf4201a0, 0xaf4301a4,
-0x8ee204b0, 0x8ee304b4, 0x24040080, 0xaf4201a8,
-0xaf4301ac, 0xc002ba8, 0x24050080, 0x8c02025c,
-0x27440224, 0xaf4201f0, 0x8c020260, 0x24050200,
-0x24060008, 0xc002bbf, 0xaf4201f8, 0x3c043b9a,
-0x3484ca00, 0x3821, 0x24020006, 0x24030002,
-0xaf4201f4, 0x240203e8, 0xaf430204, 0xaf430200,
-0xaf4401fc, 0xaf420294, 0x24020001, 0xaf430290,
-0xaf42029c, 0x3c030001, 0x671821, 0x90636cd8,
-0x3471021, 0x24e70001, 0xa043022c, 0x2ce2000f,
-0x1440fff8, 0x3471821, 0x24e70001, 0x3c080001,
-0x350840f8, 0x8f820040, 0x3c040001, 0x24845d2c,
-0x24051400, 0x21702, 0x24420030, 0xa062022c,
-0x3471021, 0xa040022c, 0x8c070218, 0x2c03021,
-0x240205c8, 0xafa20010, 0xc002b3b, 0xafa80014,
-0x3c040001, 0x24845d38, 0x3c050000, 0x24a55c80,
-0x24060010, 0x27b10030, 0x2203821, 0x27b30034,
-0xc0017a3, 0xafb30010, 0x3c030001, 0x8c636cc8,
-0x1060000a, 0x408021, 0x8fa30030, 0x2405ff00,
-0x8fa20034, 0x246400ff, 0x852024, 0x831823,
-0x431023, 0xafa20034, 0xafa40030, 0x3c040001,
-0x24845d44, 0x3c050000, 0x24a54100, 0x24060108,
-0x2203821, 0xc0017a3, 0xafb30010, 0x409021,
-0x32c20003, 0x3c010001, 0xac326e80, 0x10400045,
-0x2203821, 0x8f820050, 0x3c030010, 0x431024,
-0x10400016, 0x0, 0x8c020218, 0x30420040,
-0x1040000f, 0x24020001, 0x8f820050, 0x8c030218,
-0x240e0001, 0x3c040001, 0x24845d50, 0xa3ae003f,
-0xafa20010, 0xafa30014, 0x8f870040, 0x24051500,
-0xc002b3b, 0x2c03021, 0x10000004, 0x0,
-0x3c010001, 0x370821, 0xa02240f4, 0x3c040001,
-0x24845d5c, 0x3c050001, 0x24a55b40, 0x3c060001,
-0x24c65bac, 0xc53023, 0x8f420010, 0x27b30030,
-0x2603821, 0x27b10034, 0x34420a00, 0xaf420010,
-0xc0017a3, 0xafb10010, 0x3c040001, 0x24845d70,
-0x3c050001, 0x24a5b714, 0x3c060001, 0x24c6ba90,
-0xc53023, 0x2603821, 0xaf420108, 0xc0017a3,
-0xafb10010, 0x3c040001, 0x24845d8c, 0x3c050001,
-0x24a5be58, 0x3c060001, 0x24c6c900, 0xc53023,
-0x2603821, 0x3c010001, 0xac226ef4, 0xc0017a3,
-0xafb10010, 0x3c040001, 0x24845da4, 0x10000024,
-0x24051600, 0x3c040001, 0x24845dac, 0x3c050001,
-0x24a5a10c, 0x3c060001, 0x24c6a238, 0xc53023,
-0xc0017a3, 0xafb30010, 0x3c040001, 0x24845dbc,
-0x3c050001, 0x24a5b2b0, 0x3c060001, 0x24c6b70c,
-0xc53023, 0x2203821, 0xaf420108, 0xc0017a3,
-0xafb30010, 0x3c040001, 0x24845dd0, 0x3c050001,
-0x24a5ba98, 0x3c060001, 0x24c6be50, 0xc53023,
-0x2203821, 0x3c010001, 0xac226ef4, 0xc0017a3,
-0xafb30010, 0x3c040001, 0x24845de4, 0x24051650,
-0x2c03021, 0x3821, 0x3c010001, 0xac226ef8,
-0xafa00010, 0xc002b3b, 0xafa00014, 0x32c20020,
-0x10400021, 0x27a70030, 0x3c040001, 0x24845df0,
-0x3c050001, 0x24a5b13c, 0x3c060001, 0x24c6b2a8,
-0xc53023, 0x24022000, 0xaf42001c, 0x27a20034,
-0xc0017a3, 0xafa20010, 0x21900, 0x31982,
-0x3c040800, 0x641825, 0xae430028, 0x24030010,
-0xaf43003c, 0x96e30450, 0xaf430040, 0x8f430040,
-0x3c040001, 0x24845e04, 0xafa00014, 0xafa30010,
-0x8f47001c, 0x24051660, 0x3c010001, 0xac226ef0,
-0x10000025, 0x32c60020, 0x8ee20448, 0x8ee3044c,
-0xaf43001c, 0x8f42001c, 0x2442e000, 0x2c422001,
-0x1440000a, 0x240e0001, 0x3c040001, 0x24845e10,
-0xa3ae003f, 0xafa00010, 0xafa00014, 0x8f46001c,
-0x24051700, 0xc002b3b, 0x3821, 0x3c020000,
-0x24425cbc, 0x21100, 0x21182, 0x3c030800,
-0x431025, 0xae420028, 0x24020008, 0xaf42003c,
-0x96e20450, 0xaf420040, 0x8f420040, 0x3c040001,
-0x24845e1c, 0xafa00014, 0xafa20010, 0x8f47001c,
-0x24051800, 0x32c60020, 0xc002b3b, 0x0,
-0x3c050fff, 0x3c030001, 0x8c636ef4, 0x34a5ffff,
-0x2403021, 0x3c020001, 0x8c426ef8, 0x3c040800,
-0x651824, 0x31882, 0x641825, 0x451024,
-0x21082, 0x441025, 0xacc20080, 0x32c20180,
-0x10400056, 0xacc30020, 0x8f82005c, 0x3c030080,
-0x431024, 0x1040000d, 0x0, 0x8f820050,
-0xafa20010, 0x8f82005c, 0x240e0001, 0x3c040001,
-0x24845e28, 0xa3ae003f, 0xafa20014, 0x8f870040,
-0x24051900, 0xc002b3b, 0x2c03021, 0x8f820050,
-0x3c030010, 0x431024, 0x10400016, 0x0,
-0x8c020218, 0x30420040, 0x1040000f, 0x24020001,
-0x8f820050, 0x8c030218, 0x240e0001, 0x3c040001,
-0x24845d50, 0xa3ae003f, 0xafa20010, 0xafa30014,
-0x8f870040, 0x24052000, 0xc002b3b, 0x2c03021,
-0x10000004, 0x0, 0x3c010001, 0x370821,
-0xa02240f4, 0x3c040001, 0x24845e34, 0x3c050001,
-0x24a55ac0, 0x3c060001, 0x24c65b38, 0xc53023,
-0x8f420008, 0x27b30030, 0x2603821, 0x27b10034,
-0x34420e00, 0xaf420008, 0xc0017a3, 0xafb10010,
-0x3c040001, 0x24845e4c, 0x3c050001, 0x24a5d8b4,
-0x3c060001, 0x24c6e3c8, 0xc53023, 0x2603821,
-0xaf42010c, 0xc0017a3, 0xafb10010, 0x3c040001,
-0x24845e64, 0x3c050001, 0x24a5e9ac, 0x3c060001,
-0x24c6f0f0, 0xc53023, 0x2603821, 0x3c010001,
-0xac226f04, 0xc0017a3, 0xafb10010, 0x3c040001,
-0x24845e7c, 0x10000027, 0x24052100, 0x3c040001,
-0x24845e84, 0x3c050001, 0x24a59fc8, 0x3c060001,
-0x24c6a104, 0xc53023, 0x27b10030, 0x2203821,
-0x27b30034, 0xc0017a3, 0xafb30010, 0x3c040001,
-0x24845e94, 0x3c050001, 0x24a5cad4, 0x3c060001,
-0x24c6d8ac, 0xc53023, 0x2203821, 0xaf42010c,
-0xc0017a3, 0xafb30010, 0x3c040001, 0x24845ea4,
-0x3c050001, 0x24a5e84c, 0x3c060001, 0x24c6e9a4,
-0xc53023, 0x2203821, 0x3c010001, 0xac226f04,
-0xc0017a3, 0xafb30010, 0x3c040001, 0x24845eb8,
-0x24052150, 0x2c03021, 0x3821, 0x3c010001,
-0xac226f10, 0xafa00010, 0xc002b3b, 0xafa00014,
-0x3c110fff, 0x3c030001, 0x8c636f04, 0x3631ffff,
-0x2409821, 0x3c020001, 0x8c426f10, 0x3c0e0800,
-0x711824, 0x31882, 0x6e1825, 0x511024,
-0x21082, 0x4e1025, 0xae630038, 0xae620078,
-0x8c020218, 0x30420040, 0x14400004, 0x24020001,
-0x3c010001, 0x370821, 0xa02240f4, 0x3c040001,
-0x24845ec4, 0x3c050001, 0x24a5e3d0, 0x3c060001,
-0x24c6e52c, 0xc53023, 0x27be0030, 0x3c03821,
-0x27b50034, 0xc0017a3, 0xafb50010, 0x3c010001,
-0xac226efc, 0x511024, 0x21082, 0x3c0e0800,
-0x4e1025, 0xae620050, 0x32c22000, 0x10400006,
-0x3c03821, 0x3c020000, 0x24425cbc, 0x2221024,
-0x1000000f, 0x21082, 0x3c040001, 0x24845ed8,
-0x3c050001, 0x24a5e534, 0x3c060001, 0x24c6e6e4,
-0xc53023, 0xc0017a3, 0xafb50010, 0x3c010001,
-0xac226f14, 0x511024, 0x21082, 0x3c0e0800,
-0x4e1025, 0xae620048, 0x32c24000, 0x10400005,
-0x27a70030, 0x3c020000, 0x24425cbc, 0x1000000e,
-0x21100, 0x3c040001, 0x24845ef0, 0x3c050001,
-0x24a5e6ec, 0x3c060001, 0x24c6e844, 0xc53023,
-0x27a20034, 0xc0017a3, 0xafa20010, 0x3c010001,
-0xac226f08, 0x21100, 0x21182, 0x3c030800,
-0x431025, 0xae420060, 0x3c040001, 0x24845f08,
-0x3c050001, 0x24a58230, 0x3c060001, 0x24c68650,
-0xc53023, 0x27b10030, 0x2203821, 0x27b30034,
-0xc0017a3, 0xafb30010, 0x3c0e0fff, 0x35ceffff,
-0x3c040001, 0x24845f14, 0x3c050000, 0x24a56468,
-0x3c060000, 0x24c66588, 0xc53023, 0x2203821,
-0x240f021, 0x3c010001, 0xac226edc, 0x4e1024,
-0x21082, 0x3c150800, 0x551025, 0xafae0044,
-0xafc200b8, 0xc0017a3, 0xafb30010, 0x3c040001,
-0x24845f20, 0x3c050000, 0x24a56590, 0x3c060000,
-0x24c66808, 0x8fae0044, 0xc53023, 0x2203821,
-0x3c010001, 0xac226ed0, 0x4e1024, 0x21082,
-0x551025, 0xafc200e8, 0xc0017a3, 0xafb30010,
-0x3c040001, 0x24845f38, 0x3c050000, 0x24a56810,
-0x3c060000, 0x24c66940, 0x8fae0044, 0xc53023,
-0x2203821, 0x3c010001, 0xac226ec8, 0x4e1024,
-0x21082, 0x551025, 0xafc200c0, 0xc0017a3,
-0xafb30010, 0x3c040001, 0x24845f50, 0x3c050001,
-0x24a5fad0, 0x3c060001, 0x24c6fba8, 0x8fae0044,
-0xc53023, 0x2203821, 0x3c010001, 0xac226ed4,
-0x4e1024, 0x21082, 0x551025, 0xafc200c8,
-0xc0017a3, 0xafb30010, 0x3c040001, 0x24845f5c,
-0x3c050001, 0x24a5c93c, 0x3c060001, 0x24c6ca20,
-0xc53023, 0x2203821, 0xaf420110, 0xc0017a3,
-0xafb30010, 0x3c040001, 0x24845f6c, 0x3c050001,
-0x24a5c910, 0x3c060001, 0x24c6c934, 0xc53023,
-0x2203821, 0xaf420124, 0xc0017a3, 0xafb30010,
-0x3c040001, 0x24845f7c, 0x3c050001, 0x24a55a80,
-0x3c060001, 0x24c65aac, 0xc53023, 0x2203821,
-0xaf420120, 0xaf420114, 0xc0017a3, 0xafb30010,
-0x3c040001, 0x24845f88, 0x3c050001, 0x24a5f298,
-0x3c060001, 0x24c6f6b4, 0xc53023, 0x2203821,
-0xaf420118, 0xc0017a3, 0xafb30010, 0x8fae0044,
-0x3c010001, 0xac226f18, 0x4e1024, 0x21082,
-0x551025, 0xc003fc3, 0xafc200d0, 0xc003c40,
-0x0, 0xc0027a8, 0x0, 0xac000228,
-0xac00022c, 0x96e20450, 0x2442ffff, 0xaf420038,
-0x96e20460, 0xaf420080, 0x32c24000, 0x14400003,
-0x0, 0x96e20480, 0xaf420084, 0x96e70490,
-0x50e00001, 0x24070800, 0x24e2ffff, 0xaf420088,
-0xaf42007c, 0x24020800, 0x10e2000f, 0x32c24000,
-0x10400003, 0x24020400, 0x10e2000b, 0x0,
-0x240e0001, 0x3c040001, 0x24845f98, 0xa3ae003f,
-0x96e60490, 0x24052170, 0x2c03821, 0xafa00010,
-0xc002b3b, 0xafa00014, 0x8f430138, 0x8f440138,
-0x24020001, 0xa34205c2, 0xaf430094, 0xaf440098,
-0xafa00010, 0xafa00014, 0x8f460080, 0x8f470084,
-0x3c040001, 0x24845fa4, 0xc002b3b, 0x24052200,
-0xc0024a4, 0x3c110800, 0x3c1433d8, 0x3694cb58,
-0x3c020800, 0x34420080, 0x3c040001, 0x24845fb0,
-0x3c050000, 0x24a55d00, 0x3c060000, 0x24c65d1c,
-0xc53023, 0x27a70030, 0xaf820060, 0x2402ffff,
-0xaf820064, 0x27a20034, 0xc0017a3, 0xafa20010,
-0x3c010001, 0xac226eb8, 0x21100, 0x21182,
-0x511025, 0xc0018fc, 0xae420000, 0x8f820240,
-0x3c030001, 0x431025, 0xaf820240, 0x3c020000,
-0x24424034, 0xaf820244, 0xaf800240, 0x8f820060,
-0x511024, 0x14400005, 0x3c030800, 0x8f820060,
-0x431024, 0x1040fffd, 0x0, 0xc003c4d,
-0x8821, 0x3c020100, 0xafa20020, 0x8f530018,
-0x240200ff, 0x56620001, 0x26710001, 0x8c020228,
-0x1622000e, 0x1330c0, 0x8f42033c, 0x24420001,
-0xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
-0x24845c24, 0x3c050009, 0xafa00014, 0xafa20010,
-0x8fa60020, 0x1000003f, 0x34a50100, 0xd71021,
-0x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
-0xc01821, 0x8f440178, 0x8f45017c, 0x1021,
-0x24070004, 0xafa70010, 0xafb10014, 0x8f48000c,
-0x24c604c0, 0x2e63021, 0xafa80018, 0x8f48010c,
-0x24070008, 0xa32821, 0xa3482b, 0x822021,
-0x100f809, 0x892021, 0x1440000b, 0x24070008,
-0x8f820120, 0xafa20010, 0x8f820124, 0x3c040001,
-0x24845c2c, 0x3c050009, 0xafa20014, 0x8fa60020,
-0x1000001c, 0x34a50200, 0x8f440160, 0x8f450164,
-0x8f43000c, 0xaf510018, 0x8f860120, 0x24020010,
-0xafa20010, 0xafb10014, 0xafa30018, 0x8f42010c,
-0x40f809, 0x24c6001c, 0x14400010, 0x0,
-0x8f420340, 0x24420001, 0xaf420340, 0x8f420340,
-0x8f820120, 0xafa20010, 0x8f820124, 0x3c040001,
-0x24845c34, 0x3c050009, 0xafa20014, 0x8fa60020,
-0x34a50300, 0xc002b3b, 0x2603821, 0x8f4202e4,
-0x24420001, 0xaf4202e4, 0x8f4202e4, 0x93a2003f,
-0x10400069, 0x3c020700, 0x34423000, 0xafa20028,
-0x8f530018, 0x240200ff, 0x12620002, 0x8821,
-0x26710001, 0x8c020228, 0x1622000e, 0x1330c0,
-0x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
-0x8c020228, 0x3c040001, 0x24845c24, 0x3c050009,
-0xafa00014, 0xafa20010, 0x8fa60028, 0x1000003f,
-0x34a50100, 0xd71021, 0x8fa30028, 0x8fa4002c,
-0xac4304c0, 0xac4404c4, 0xc01821, 0x8f440178,
-0x8f45017c, 0x1021, 0x24070004, 0xafa70010,
-0xafb10014, 0x8f48000c, 0x24c604c0, 0x2e63021,
-0xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
-0xa3482b, 0x822021, 0x100f809, 0x892021,
-0x1440000b, 0x24070008, 0x8f820120, 0xafa20010,
-0x8f820124, 0x3c040001, 0x24845c2c, 0x3c050009,
-0xafa20014, 0x8fa60028, 0x1000001c, 0x34a50200,
-0x8f440160, 0x8f450164, 0x8f43000c, 0xaf510018,
-0x8f860120, 0x24020010, 0xafa20010, 0xafb10014,
-0xafa30018, 0x8f42010c, 0x40f809, 0x24c6001c,
-0x14400010, 0x0, 0x8f420340, 0x24420001,
-0xaf420340, 0x8f420340, 0x8f820120, 0xafa20010,
-0x8f820124, 0x3c040001, 0x24845c34, 0x3c050009,
-0xafa20014, 0x8fa60028, 0x34a50300, 0xc002b3b,
-0x2603821, 0x8f4202f0, 0x24420001, 0xaf4202f0,
-0x8f4202f0, 0x3c040001, 0x24845fc0, 0xafa00010,
-0xafa00014, 0x8fa60028, 0x24052300, 0xc002b3b,
-0x3821, 0x10000004, 0x0, 0x8c020264,
-0x10400005, 0x0, 0x8f8200a0, 0x30420004,
-0x1440fffa, 0x0, 0x8f820044, 0x34420004,
-0xaf820044, 0x8f420308, 0x24420001, 0xaf420308,
-0x8f420308, 0x8f8200d8, 0x8f8300d4, 0x431023,
-0x2442ff80, 0xaf420090, 0x8f420090, 0x2842ff81,
-0x10400006, 0x24020001, 0x8f420090, 0x8f430144,
-0x431021, 0xaf420090, 0x24020001, 0xaf42008c,
-0x32c20008, 0x10400006, 0x0, 0x8f820214,
-0x3c038100, 0x3042ffff, 0x431025, 0xaf820214,
-0x3c030001, 0x8c636d94, 0x30620002, 0x10400009,
-0x30620001, 0x3c040001, 0x24845fcc, 0x3c050000,
-0x24a56d50, 0x3c060000, 0x24c671c8, 0x10000012,
-0xc53023, 0x10400009, 0x0, 0x3c040001,
-0x24845fdc, 0x3c050000, 0x24a571d0, 0x3c060000,
-0x24c67678, 0x10000008, 0xc53023, 0x3c040001,
-0x24845fec, 0x3c050000, 0x24a56948, 0x3c060000,
-0x24c66d48, 0xc53023, 0x27a70030, 0x27a20034,
-0xc0017a3, 0xafa20010, 0x3c010001, 0xac226ecc,
-0x3c020001, 0x8c426ecc, 0x3c030800, 0x21100,
-0x21182, 0x431025, 0xae420040, 0x8f8200a0,
-0xafa20010, 0x8f8200b0, 0xafa20014, 0x8f86005c,
-0x8f87011c, 0x3c040001, 0x24845ffc, 0x3c010001,
-0xac366ea4, 0x3c010001, 0xac206e94, 0x3c010001,
-0xac3c6e8c, 0x3c010001, 0xac3b6ebc, 0x3c010001,
-0xac376ec0, 0x3c010001, 0xac3a6ea0, 0xc002b3b,
-0x24052400, 0x8f820200, 0xafa20010, 0x8f820220,
-0xafa20014, 0x8f860044, 0x8f870050, 0x3c040001,
-0x24846008, 0xc002b3b, 0x24052500, 0x8f830060,
-0x74100b, 0x242000a, 0x200f821, 0x0,
-0xd, 0x8fbf0060, 0x8fbe005c, 0x8fb50058,
-0x8fb30054, 0x8fb20050, 0x8fb1004c, 0x8fb00048,
-0x3e00008, 0x27bd0068, 0x27bdffe0, 0x3c040001,
-0x24846014, 0x24052600, 0x3021, 0x3821,
-0xafbf0018, 0xafa00010, 0xc002b3b, 0xafa00014,
-0x8fbf0018, 0x3e00008, 0x27bd0020, 0x3e00008,
-0x0, 0x3e00008, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x3e00008, 0x0, 0x3e00008, 0x0,
-0x27bdfde0, 0x27a50018, 0x3c04dead, 0x3484beef,
-0xafbf0218, 0x8f820150, 0x3c03001f, 0x3463ffff,
-0xafa40018, 0xa22823, 0xa32824, 0x8ca20000,
-0x1044000a, 0x0, 0xafa50010, 0x8ca20000,
-0xafa20014, 0x8f860150, 0x8f870250, 0x3c040001,
-0x2484601c, 0xc002b3b, 0x24052700, 0x8fbf0218,
-0x3e00008, 0x27bd0220, 0x27bdffe0, 0x3c06abba,
-0x34c6babe, 0xafb00018, 0x3c100004, 0x3c07007f,
-0x34e7ffff, 0xafbf001c, 0x102840, 0x8e040000,
-0x8ca30000, 0xaca00000, 0xae060000, 0x8ca20000,
-0xaca30000, 0x10460005, 0xae040000, 0xa08021,
-0xf0102b, 0x1040fff5, 0x102840, 0x3c040001,
-0x24846028, 0x24052800, 0x2003021, 0x3821,
-0xafa00010, 0xc002b3b, 0xafa00014, 0x2001021,
-0x8fbf001c, 0x8fb00018, 0x3e00008, 0x27bd0020,
-0x8c020224, 0x3047003f, 0x10e00010, 0x803021,
-0x2821, 0x24030020, 0xe31024, 0x10400002,
-0x63042, 0xa62821, 0x31842, 0x1460fffb,
-0xe31024, 0x2402f000, 0xa22824, 0x3402ffff,
-0x45102b, 0x14400003, 0x3c020001, 0x10000008,
-0x3c020001, 0x3442ffff, 0x851823, 0x43102b,
-0x14400003, 0xa01021, 0x3c02fffe, 0x821021,
-0x3e00008, 0x0, 0x27bdffd0, 0xafb50028,
-0x8fb50040, 0xafb20020, 0xa09021, 0xafb1001c,
-0x24c60003, 0xafbf002c, 0xafb30024, 0xafb00018,
-0x8ea20000, 0x2403fffc, 0xc38024, 0x50102b,
-0x1440001b, 0xe08821, 0x8e330000, 0xafb00010,
-0x8ea20000, 0xafa20014, 0x8e270000, 0x24053000,
-0xc002b3b, 0x2403021, 0x8e230000, 0x702021,
-0x64102b, 0x10400007, 0x2402821, 0x8ca20000,
-0xac620000, 0x24630004, 0x64102b, 0x1440fffb,
-0x24a50004, 0x8ea20000, 0x501023, 0xaea20000,
-0x8e220000, 0x501021, 0x1000000b, 0xae220000,
-0x2402002d, 0xa0820000, 0xafb00010, 0x8ea20000,
-0x2409821, 0xafa20014, 0x8e270000, 0x24053100,
-0xc002b3b, 0x2603021, 0x2601021, 0x8fbf002c,
-0x8fb50028, 0x8fb30024, 0x8fb20020, 0x8fb1001c,
-0x8fb00018, 0x3e00008, 0x27bd0030, 0x27bdffe8,
-0x3c1cc000, 0x3c05fffe, 0x3c030001, 0x8c636e84,
-0x3c040001, 0x8c846e90, 0x34a5bf08, 0x24021ffc,
-0x3c010001, 0xac226cd0, 0x3c0200c0, 0x3c010001,
-0xac226cd4, 0x3c020020, 0xafbf0010, 0x3c0100c0,
-0xac201ffc, 0x431023, 0x441023, 0x245bb000,
-0x365b821, 0x3c1d0001, 0x8fbd6ccc, 0x3a0f021,
-0x3c0400c0, 0x34840200, 0x3c1a00c0, 0x3c0300c0,
-0x346307c8, 0x24021dfc, 0x3c010001, 0xac226cd0,
-0x24021834, 0x3c010001, 0xac246cd4, 0x3c010001,
-0xac226cd0, 0x3c010001, 0xac236cd4, 0xc00180d,
-0x375a0200, 0x8fbf0010, 0x3e00008, 0x27bd0018,
-0x27bdffc8, 0x3c040001, 0x24846034, 0x24053200,
-0x3c020001, 0x8c426cd0, 0x3c030001, 0x8c636cd4,
-0x3021, 0x3603821, 0xafbf0030, 0xafb3002c,
-0xafb20028, 0xafb10024, 0xafb00020, 0xafa2001c,
-0xafa30018, 0xafb70010, 0xc002b3b, 0xafba0014,
-0xc001916, 0x0, 0x8f820240, 0x34420004,
-0xaf820240, 0x24020001, 0xaf420000, 0x3c020001,
-0x571021, 0x904240f4, 0x10400092, 0x2403fffc,
-0x3c100001, 0x2610ac73, 0x3c120001, 0x2652a84c,
-0x2121023, 0x438024, 0x8fa3001c, 0x3c040001,
-0x24846040, 0x70102b, 0x1440001a, 0x27b30018,
-0x8fb10018, 0x24053000, 0x2403021, 0xafb00010,
-0xafa30014, 0xc002b3b, 0x2203821, 0x8fa30018,
-0x702021, 0x64102b, 0x10400007, 0x2403021,
-0x8cc20000, 0xac620000, 0x24630004, 0x64102b,
-0x1440fffb, 0x24c60004, 0x8fa2001c, 0x501023,
-0xafa2001c, 0x8e620000, 0x501021, 0x1000000a,
-0xae620000, 0x2408821, 0x24053100, 0xafb00010,
-0xafa30014, 0x8fa70018, 0x2203021, 0x2402002d,
-0xc002b3b, 0xa0820000, 0x24070020, 0x8fa3001c,
-0x3c040001, 0x2484605c, 0x24120020, 0x3c010001,
-0xac316eb0, 0x2c620020, 0x1440001d, 0x27b10018,
-0x8fb00018, 0x24053000, 0x3c060001, 0x24c66f50,
-0xafa70010, 0xafa30014, 0xc002b3b, 0x2003821,
-0x8fa30018, 0x3c040001, 0x24846f50, 0x24650020,
-0x65102b, 0x10400007, 0x0, 0x8c820000,
-0xac620000, 0x24630004, 0x65102b, 0x1440fffb,
-0x24840004, 0x8fa2001c, 0x521023, 0xafa2001c,
-0x8e220000, 0x521021, 0x1000000b, 0xae220000,
-0x3c100001, 0x26106f50, 0x24053100, 0xafa70010,
-0xafa30014, 0x8fa70018, 0x2003021, 0x2402002d,
-0xc002b3b, 0xa0820000, 0x24070020, 0x3c040001,
-0x24846070, 0x8fa3001c, 0x24120020, 0x3c010001,
-0xac306ee4, 0x2c620020, 0x1440001d, 0x27b10018,
-0x8fb00018, 0x24053000, 0x3c060001, 0x24c66f70,
-0xafa70010, 0xafa30014, 0xc002b3b, 0x2003821,
-0x8fa30018, 0x3c040001, 0x24846f70, 0x24650020,
-0x65102b, 0x10400007, 0x0, 0x8c820000,
-0xac620000, 0x24630004, 0x65102b, 0x1440fffb,
-0x24840004, 0x8fa2001c, 0x521023, 0xafa2001c,
-0x8e220000, 0x521021, 0x1000000b, 0xae220000,
-0x3c100001, 0x26106f70, 0x24053100, 0xafa70010,
-0xafa30014, 0x8fa70018, 0x2003021, 0x2402002d,
-0xc002b3b, 0xa0820000, 0x3c010001, 0x10000031,
-0xac306ee0, 0x3c100001, 0x2610821f, 0x3c120001,
-0x2652809c, 0x2121023, 0x438024, 0x8fa3001c,
-0x3c040001, 0x24846084, 0x70102b, 0x1440001a,
-0x27b30018, 0x8fb10018, 0x24053000, 0x2403021,
-0xafb00010, 0xafa30014, 0xc002b3b, 0x2203821,
-0x8fa30018, 0x702021, 0x64102b, 0x10400007,
-0x2403021, 0x8cc20000, 0xac620000, 0x24630004,
-0x64102b, 0x1440fffb, 0x24c60004, 0x8fa2001c,
-0x501023, 0xafa2001c, 0x8e620000, 0x501021,
-0x1000000a, 0xae620000, 0x2408821, 0x24053100,
-0xafb00010, 0xafa30014, 0x8fa70018, 0x2203021,
-0x2402002d, 0xc002b3b, 0xa0820000, 0x3c010001,
-0xac316eb0, 0x3c030001, 0x8c636eb0, 0x24020400,
-0x60f809, 0xaf820070, 0x8fbf0030, 0x8fb3002c,
-0x8fb20028, 0x8fb10024, 0x8fb00020, 0x3e00008,
-0x27bd0038, 0x0, 0x0, 0x8f820040,
-0x3c03f000, 0x431024, 0x3c036000, 0x14430006,
-0x0, 0x8f820050, 0x2403ff80, 0x431024,
-0x34420055, 0xaf820050, 0x8f820054, 0x244203e8,
-0xaf820058, 0x240201f4, 0xaf4200e0, 0x24020004,
-0xaf4200e8, 0x24020002, 0xaf4001b0, 0xaf4000e4,
-0xaf4200dc, 0xaf4000d8, 0xaf4000d4, 0x3e00008,
-0xaf4000d0, 0x8f820054, 0x24420005, 0x3e00008,
-0xaf820078, 0x27bdffe8, 0xafbf0010, 0x8f820054,
-0x244203e8, 0xaf820058, 0x3c020800, 0x2c21024,
-0x10400004, 0x3c02f7ff, 0x3442ffff, 0x2c2b024,
-0x36940040, 0x3c020001, 0x8c426da8, 0x10400017,
-0x3c020200, 0x3c030001, 0x8c636f1c, 0x10600016,
-0x282a025, 0x3c020001, 0x8c426e44, 0x14400012,
-0x3c020200, 0x3c020001, 0x8c426d94, 0x30420003,
-0x1440000d, 0x3c020200, 0x8f830224, 0x3c020002,
-0x8c428fec, 0x10620008, 0x3c020200, 0xc003daf,
-0x0, 0x10000004, 0x3c020200, 0xc004196,
-0x0, 0x3c020200, 0x2c21024, 0x10400003,
-0x0, 0xc001f4b, 0x0, 0x8f4200d8,
-0x8f4300dc, 0x24420001, 0xaf4200d8, 0x43102b,
-0x14400003, 0x0, 0xaf4000d8, 0x36940080,
-0x8c030238, 0x1060000c, 0x0, 0x8f4201b0,
-0x244203e8, 0xaf4201b0, 0x43102b, 0x14400006,
-0x0, 0x934205c5, 0x14400003, 0x0,
-0xc001da0, 0x0, 0x8fbf0010, 0x3e00008,
-0x27bd0018, 0x3e00008, 0x0, 0x27bdffd8,
-0xafbf0020, 0x8f43002c, 0x8f420038, 0x10620059,
-0x0, 0x3c020001, 0x571021, 0x904240f0,
-0x10400026, 0x24070008, 0x8f440170, 0x8f450174,
-0x8f48000c, 0x8f860120, 0x24020020, 0xafa20010,
-0xafa30014, 0xafa80018, 0x8f42010c, 0x40f809,
-0x24c6001c, 0x14400011, 0x24020001, 0x3c010001,
-0x370821, 0xa02240f0, 0x8f820124, 0xafa20010,
-0x8f820128, 0x3c040001, 0x24846128, 0xafa20014,
-0x8f46002c, 0x8f870120, 0x3c050009, 0xc002b3b,
-0x34a50900, 0x1000005c, 0x0, 0x8f420300,
-0x24420001, 0xaf420300, 0x8f420300, 0x8f42002c,
-0xa34005c1, 0x10000027, 0xaf420038, 0x8f440170,
-0x8f450174, 0x8f43002c, 0x8f48000c, 0x8f860120,
-0x24020080, 0xafa20010, 0xafa30014, 0xafa80018,
-0x8f42010c, 0x40f809, 0x24c6001c, 0x14400011,
-0x24020001, 0x3c010001, 0x370821, 0xa02240f1,
-0x8f820124, 0xafa20010, 0x8f820128, 0x3c040001,
-0x24846134, 0xafa20014, 0x8f46002c, 0x8f870120,
-0x3c050009, 0xc002b3b, 0x34a51100, 0x10000036,
-0x0, 0x8f420300, 0x8f43002c, 0x24420001,
-0xaf420300, 0x8f420300, 0x24020001, 0xa34205c1,
-0xaf430038, 0x3c010001, 0x370821, 0xa02040f1,
-0x3c010001, 0x370821, 0xa02040f0, 0x10000026,
-0xaf400034, 0x934205c1, 0x1040001d, 0x0,
-0xa34005c1, 0x8f820040, 0x30420001, 0x14400008,
-0x2021, 0x8c030104, 0x24020001, 0x50620005,
-0x24040001, 0x8c020264, 0x10400003, 0x801021,
-0x24040001, 0x801021, 0x10400006, 0x0,
-0x8f42030c, 0x24420001, 0xaf42030c, 0x10000008,
-0x8f42030c, 0x8f820044, 0x34420004, 0xaf820044,
-0x8f420308, 0x24420001, 0xaf420308, 0x8f420308,
-0x3c010001, 0x370821, 0xa02040f0, 0x3c010001,
-0x370821, 0xa02040f1, 0x8f420000, 0x10400007,
-0x0, 0xaf80004c, 0x8f82004c, 0x1040fffd,
-0x0, 0x10000005, 0x0, 0xaf800048,
-0x8f820048, 0x1040fffd, 0x0, 0x8f820060,
-0x3c03ff7f, 0x3463ffff, 0x431024, 0xaf820060,
-0x8f420000, 0x10400003, 0x0, 0x10000002,
-0xaf80004c, 0xaf800048, 0x8fbf0020, 0x3e00008,
-0x27bd0028, 0x3e00008, 0x0, 0x27bdffd8,
-0xafbf0020, 0x8f430044, 0x8f42007c, 0x10620029,
-0x24070008, 0x8f440168, 0x8f45016c, 0x8f48000c,
-0x8f860120, 0x24020040, 0xafa20010, 0xafa30014,
-0xafa80018, 0x8f42010c, 0x40f809, 0x24c6001c,
-0x14400011, 0x24020001, 0x3c010001, 0x370821,
-0xa02240f2, 0x8f820124, 0xafa20010, 0x8f820128,
-0x3c040001, 0x2484613c, 0xafa20014, 0x8f460044,
-0x8f870120, 0x3c050009, 0xc002b3b, 0x34a51300,
-0x1000000f, 0x0, 0x8f420304, 0x24420001,
-0xaf420304, 0x8f420304, 0x8f420044, 0xaf42007c,
-0x3c010001, 0x370821, 0xa02040f2, 0x10000004,
-0xaf400078, 0x3c010001, 0x370821, 0xa02040f2,
-0x8f420000, 0x10400007, 0x0, 0xaf80004c,
-0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
-0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
-0x0, 0x8f820060, 0x3c03feff, 0x3463ffff,
-0x431024, 0xaf820060, 0x8f420000, 0x10400003,
-0x0, 0x10000002, 0xaf80004c, 0xaf800048,
-0x8fbf0020, 0x3e00008, 0x27bd0028, 0x3e00008,
-0x0, 0x3c020001, 0x8c426da8, 0x27bdffa8,
-0xafbf0050, 0xafbe004c, 0xafb50048, 0xafb30044,
-0xafb20040, 0xafb1003c, 0xafb00038, 0x104000d5,
-0x8f900044, 0x8f4200d0, 0x24430001, 0x2842000b,
-0x144000e4, 0xaf4300d0, 0x8f420004, 0x30420002,
-0x1440009c, 0xaf4000d0, 0x8f420004, 0x3c030001,
-0x8c636d98, 0x34420002, 0xaf420004, 0x24020001,
-0x14620003, 0x3c020600, 0x10000002, 0x34423000,
-0x34421000, 0xafa20020, 0x8f4a0018, 0xafaa0034,
-0x27aa0020, 0xafaa002c, 0x8faa0034, 0x240200ff,
-0x11420002, 0x1821, 0x25430001, 0x8c020228,
-0x609821, 0x1662000e, 0x3c050009, 0x8f42033c,
-0x24420001, 0xaf42033c, 0x8f42033c, 0x8c020228,
-0x8fa70034, 0x3c040001, 0x2484610c, 0xafa00014,
-0xafa20010, 0x8fa60020, 0x10000070, 0x34a50500,
-0x8faa0034, 0xa38c0, 0xf71021, 0x8fa30020,
-0x8fa40024, 0xac4304c0, 0xac4404c4, 0x8f830054,
-0x8f820054, 0x247103e8, 0x2221023, 0x2c4203e9,
-0x1040001b, 0xa821, 0xe09021, 0x265e04c0,
-0x8f440178, 0x8f45017c, 0x2401821, 0x240a0004,
-0xafaa0010, 0xafb30014, 0x8f48000c, 0x1021,
-0x2fe3021, 0xafa80018, 0x8f48010c, 0x24070008,
-0xa32821, 0xa3482b, 0x822021, 0x100f809,
-0x892021, 0x54400006, 0x24150001, 0x8f820054,
-0x2221023, 0x2c4203e9, 0x1440ffe9, 0x0,
-0x32a200ff, 0x54400018, 0xaf530018, 0x8f420378,
-0x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
-0x8faa002c, 0x8fa70034, 0xafa20010, 0x8f820124,
-0x3c040001, 0x24846118, 0xafa20014, 0x8d460000,
-0x3c050009, 0x10000035, 0x34a50600, 0x8f420308,
-0x24150001, 0x24420001, 0xaf420308, 0x8f420308,
-0x1000001e, 0x32a200ff, 0x8f830054, 0x8f820054,
-0x247103e8, 0x2221023, 0x2c4203e9, 0x10400016,
-0xa821, 0x3c1e0020, 0x24120010, 0x8f42000c,
-0x8f440160, 0x8f450164, 0x8f860120, 0xafb20010,
-0xafb30014, 0x5e1025, 0xafa20018, 0x8f42010c,
-0x24070008, 0x40f809, 0x24c6001c, 0x1440ffe3,
-0x0, 0x8f820054, 0x2221023, 0x2c4203e9,
-0x1440ffee, 0x0, 0x32a200ff, 0x14400011,
-0x3c050009, 0x8f420378, 0x24420001, 0xaf420378,
-0x8f420378, 0x8f820120, 0x8faa002c, 0x8fa70034,
-0xafa20010, 0x8f820124, 0x3c040001, 0x24846120,
-0xafa20014, 0x8d460000, 0x34a50700, 0xc002b3b,
-0x0, 0x8f4202ec, 0x24420001, 0xaf4202ec,
-0x8f4202ec, 0x8f420004, 0x30420001, 0x50400029,
-0x36100040, 0x3c020400, 0x2c21024, 0x10400013,
-0x2404ffdf, 0x8f420250, 0x8f430254, 0x8f4401b4,
-0x14640006, 0x36100040, 0x8f420270, 0x8f430274,
-0x8f4401b8, 0x10640007, 0x2402ffdf, 0x8f420250,
-0x8f430254, 0x8f440270, 0x8f450274, 0x10000012,
-0x3a100020, 0x1000002b, 0x2028024, 0x8f420250,
-0x8f430254, 0x8f4501b4, 0x14650006, 0x2048024,
-0x8f420270, 0x8f430274, 0x8f4401b8, 0x50640021,
-0x36100040, 0x8f420250, 0x8f430254, 0x8f440270,
-0x8f450274, 0x3a100040, 0xaf4301b4, 0x10000019,
-0xaf4501b8, 0x8f4200d4, 0x24430001, 0x10000011,
-0x28420033, 0x8f420004, 0x30420001, 0x10400009,
-0x3c020400, 0x2c21024, 0x10400004, 0x2402ffdf,
-0x2028024, 0x1000000b, 0x36100040, 0x10000009,
-0x36100060, 0x8f4200d4, 0x36100040, 0x24430001,
-0x284201f5, 0x14400003, 0xaf4300d4, 0xaf4000d4,
-0x3a100020, 0xaf900044, 0x2402ff7f, 0x282a024,
-0x8fbf0050, 0x8fbe004c, 0x8fb50048, 0x8fb30044,
-0x8fb20040, 0x8fb1003c, 0x8fb00038, 0x3e00008,
-0x27bd0058, 0x3e00008, 0x0, 0x3c020001,
-0x8c426da8, 0x27bdffb0, 0xafbf0048, 0xafbe0044,
-0xafb50040, 0xafb3003c, 0xafb20038, 0xafb10034,
-0x104000c7, 0xafb00030, 0x8f4200d0, 0x24430001,
-0x2842000b, 0x144000da, 0xaf4300d0, 0x8f420004,
-0x30420002, 0x14400097, 0xaf4000d0, 0x8f420004,
-0x3c030001, 0x8c636d98, 0x34420002, 0xaf420004,
-0x24020001, 0x14620003, 0x3c020600, 0x10000002,
-0x34423000, 0x34421000, 0xafa20020, 0x1821,
-0x8f5e0018, 0x27aa0020, 0x240200ff, 0x13c20002,
-0xafaa002c, 0x27c30001, 0x8c020228, 0x609021,
-0x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
-0xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
-0x2484610c, 0x3c050009, 0xafa00014, 0xafa20010,
-0x8fa60020, 0x1000006d, 0x34a50500, 0xf71021,
-0x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
-0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
-0x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
-0x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
-0x240a0004, 0xafaa0010, 0xafb20014, 0x8f48000c,
-0x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
-0x24070008, 0xa32821, 0xa3482b, 0x822021,
-0x100f809, 0x892021, 0x54400006, 0x24130001,
-0x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
-0x0, 0x326200ff, 0x54400017, 0xaf520018,
-0x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
-0x8f820120, 0x8faa002c, 0xafa20010, 0x8f820124,
-0x3c040001, 0x24846118, 0x3c050009, 0xafa20014,
-0x8d460000, 0x10000035, 0x34a50600, 0x8f420308,
-0x24130001, 0x24420001, 0xaf420308, 0x8f420308,
-0x1000001e, 0x326200ff, 0x8f830054, 0x8f820054,
-0x247003e8, 0x2021023, 0x2c4203e9, 0x10400016,
-0x9821, 0x3c150020, 0x24110010, 0x8f42000c,
-0x8f440160, 0x8f450164, 0x8f860120, 0xafb10010,
-0xafb20014, 0x551025, 0xafa20018, 0x8f42010c,
-0x24070008, 0x40f809, 0x24c6001c, 0x1440ffe3,
-0x0, 0x8f820054, 0x2021023, 0x2c4203e9,
-0x1440ffee, 0x0, 0x326200ff, 0x14400011,
-0x0, 0x8f420378, 0x24420001, 0xaf420378,
-0x8f420378, 0x8f820120, 0x8faa002c, 0xafa20010,
-0x8f820124, 0x3c040001, 0x24846120, 0x3c050009,
-0xafa20014, 0x8d460000, 0x34a50700, 0xc002b3b,
-0x3c03821, 0x8f4202ec, 0x24420001, 0xaf4202ec,
-0x8f4202ec, 0x8f420004, 0x30420001, 0x10400018,
-0x24040001, 0x8f420250, 0x8f430254, 0x8f4501b4,
-0x3c010001, 0x14650006, 0xa0246cf1, 0x8f420270,
-0x8f430274, 0x8f4401b8, 0x10640021, 0x0,
-0x8f420250, 0x8f430254, 0x3c040001, 0x90846cf0,
-0x8f460270, 0x8f470274, 0x38840001, 0xaf4301b4,
-0xaf4701b8, 0x3c010001, 0x10000025, 0xa0246cf0,
-0x8f4200d4, 0x3c010001, 0xa0206cf0, 0x24430001,
-0x28420033, 0x1440001e, 0xaf4300d4, 0x3c020001,
-0x90426cf1, 0xaf4000d4, 0x10000017, 0x38420001,
-0x8f420004, 0x30420001, 0x10400008, 0x0,
-0xc00565a, 0x2021, 0x3c010001, 0xa0206cf1,
-0x3c010001, 0x1000000e, 0xa0206cf0, 0x8f4200d4,
-0x3c010001, 0xa0206cf0, 0x24430001, 0x284201f5,
-0x14400007, 0xaf4300d4, 0x3c020001, 0x90426cf1,
-0xaf4000d4, 0x421026, 0x3c010001, 0xa0226cf1,
-0x3c030001, 0x8c636d98, 0x24020002, 0x1462000c,
-0x3c030002, 0x3c030001, 0x90636cf1, 0x24020001,
-0x5462001f, 0x2021, 0x3c020001, 0x90426cf0,
-0x1443001b, 0x24040005, 0x10000019, 0x24040006,
-0x3c020002, 0x8c428ff4, 0x431024, 0x1040000b,
-0x24020001, 0x3c030001, 0x90636cf1, 0x54620010,
-0x2021, 0x3c020001, 0x90426cf0, 0x1443000c,
-0x24040003, 0x1000000a, 0x24040004, 0x3c030001,
-0x90636cf1, 0x14620006, 0x2021, 0x3c020001,
-0x90426cf0, 0x24040001, 0x50440001, 0x24040002,
-0xc00565a, 0x0, 0x2402ff7f, 0x282a024,
-0x8fbf0048, 0x8fbe0044, 0x8fb50040, 0x8fb3003c,
-0x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
-0x27bd0050, 0x3e00008, 0x0, 0x3c020001,
-0x8c426da8, 0x27bdffb0, 0xafbf0048, 0xafbe0044,
-0xafb50040, 0xafb3003c, 0xafb20038, 0xafb10034,
-0x104000de, 0xafb00030, 0x8f4200d0, 0x3c040001,
-0x8c846d98, 0x24430001, 0x2842000b, 0xaf4400e8,
-0x144000fe, 0xaf4300d0, 0x8f420004, 0x30420002,
-0x14400095, 0xaf4000d0, 0x8f420004, 0x34420002,
-0xaf420004, 0x24020001, 0x14820003, 0x3c020600,
-0x10000002, 0x34423000, 0x34421000, 0xafa20020,
-0x1821, 0x8f5e0018, 0x27aa0020, 0x240200ff,
-0x13c20002, 0xafaa002c, 0x27c30001, 0x8c020228,
-0x609021, 0x1642000e, 0x1e38c0, 0x8f42033c,
-0x24420001, 0xaf42033c, 0x8f42033c, 0x8c020228,
-0x3c040001, 0x2484610c, 0x3c050009, 0xafa00014,
-0xafa20010, 0x8fa60020, 0x1000006d, 0x34a50500,
-0xf71021, 0x8fa30020, 0x8fa40024, 0xac4304c0,
-0xac4404c4, 0x8f830054, 0x8f820054, 0x247003e8,
-0x2021023, 0x2c4203e9, 0x1040001b, 0x9821,
-0xe08821, 0x263504c0, 0x8f440178, 0x8f45017c,
-0x2201821, 0x240a0004, 0xafaa0010, 0xafb20014,
-0x8f48000c, 0x1021, 0x2f53021, 0xafa80018,
-0x8f48010c, 0x24070008, 0xa32821, 0xa3482b,
-0x822021, 0x100f809, 0x892021, 0x54400006,
-0x24130001, 0x8f820054, 0x2021023, 0x2c4203e9,
-0x1440ffe9, 0x0, 0x326200ff, 0x54400017,
-0xaf520018, 0x8f420378, 0x24420001, 0xaf420378,
-0x8f420378, 0x8f820120, 0x8faa002c, 0xafa20010,
-0x8f820124, 0x3c040001, 0x24846118, 0x3c050009,
-0xafa20014, 0x8d460000, 0x10000035, 0x34a50600,
-0x8f420308, 0x24130001, 0x24420001, 0xaf420308,
-0x8f420308, 0x1000001e, 0x326200ff, 0x8f830054,
-0x8f820054, 0x247003e8, 0x2021023, 0x2c4203e9,
-0x10400016, 0x9821, 0x3c150020, 0x24110010,
-0x8f42000c, 0x8f440160, 0x8f450164, 0x8f860120,
-0xafb10010, 0xafb20014, 0x551025, 0xafa20018,
-0x8f42010c, 0x24070008, 0x40f809, 0x24c6001c,
-0x1440ffe3, 0x0, 0x8f820054, 0x2021023,
-0x2c4203e9, 0x1440ffee, 0x0, 0x326200ff,
-0x14400011, 0x0, 0x8f420378, 0x24420001,
-0xaf420378, 0x8f420378, 0x8f820120, 0x8faa002c,
-0xafa20010, 0x8f820124, 0x3c040001, 0x24846120,
-0x3c050009, 0xafa20014, 0x8d460000, 0x34a50700,
-0xc002b3b, 0x3c03821, 0x8f4202ec, 0x24420001,
-0xaf4202ec, 0x8f4202ec, 0x8f420004, 0x30420001,
-0x10400033, 0x3c020400, 0x2c21024, 0x10400017,
-0x0, 0x934205c0, 0x8f440250, 0x8f450254,
-0x8f4301b4, 0x34420020, 0x14a30006, 0xa34205c0,
-0x8f420270, 0x8f430274, 0x8f4401b8, 0x10640008,
-0x0, 0x8f420250, 0x8f430254, 0x934405c0,
-0x8f460270, 0x8f470274, 0x10000016, 0x38840040,
-0x934205c0, 0x10000048, 0x304200bf, 0x934205c0,
-0x8f440250, 0x8f450254, 0x8f4301b4, 0x304200bf,
-0x14a30006, 0xa34205c0, 0x8f420270, 0x8f430274,
-0x8f4401b8, 0x1064000b, 0x0, 0x8f420250,
-0x8f430254, 0x934405c0, 0x8f460270, 0x8f470274,
-0x38840020, 0xaf4301b4, 0xaf4701b8, 0x10000033,
-0xa34405c0, 0x934205c0, 0x1000002f, 0x34420020,
-0x934205c0, 0x8f4300d4, 0x34420020, 0xa34205c0,
-0x24620001, 0x10000023, 0x28630033, 0x8f4200e4,
-0x8f4300e0, 0x24420001, 0xaf4200e4, 0x43102a,
-0x14400006, 0x24030001, 0x8f4200e8, 0x14430002,
-0xaf4000e4, 0x24030004, 0xaf4300e8, 0x8f420004,
-0x30420001, 0x1040000d, 0x3c020400, 0x2c21024,
-0x10400007, 0x0, 0x934205c0, 0x34420040,
-0xa34205c0, 0x934205c0, 0x1000000f, 0x304200df,
-0x934205c0, 0x1000000c, 0x34420060, 0x934205c0,
-0x8f4300d4, 0x34420020, 0xa34205c0, 0x24620001,
-0x286300fb, 0x14600005, 0xaf4200d4, 0x934205c0,
-0xaf4000d4, 0x38420040, 0xa34205c0, 0x934205c0,
-0x8f4300e8, 0x3042007f, 0xa34205c0, 0x24020001,
-0x14620005, 0x0, 0x934405c0, 0x42102,
-0x10000003, 0x348400f0, 0x934405c0, 0x3484000f,
-0xc005640, 0x0, 0x2402ff7f, 0x282a024,
-0x8fbf0048, 0x8fbe0044, 0x8fb50040, 0x8fb3003c,
-0x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
-0x27bd0050, 0x3e00008, 0x0, 0x27bdffb0,
-0x274401c0, 0x26e30028, 0x24650400, 0x65102b,
-0xafbf0048, 0xafbe0044, 0xafb50040, 0xafb3003c,
-0xafb20038, 0xafb10034, 0x10400007, 0xafb00030,
-0x8c820000, 0xac620000, 0x24630004, 0x65102b,
-0x1440fffb, 0x24840004, 0x8c020080, 0xaee20044,
-0x8c0200c0, 0xaee20040, 0x8c020084, 0xaee20030,
-0x8c020084, 0xaee2023c, 0x8c020088, 0xaee20240,
-0x8c02008c, 0xaee20244, 0x8c020090, 0xaee20248,
-0x8c020094, 0xaee2024c, 0x8c020098, 0xaee20250,
-0x8c02009c, 0xaee20254, 0x8c0200a0, 0xaee20258,
-0x8c0200a4, 0xaee2025c, 0x8c0200a8, 0xaee20260,
-0x8c0200ac, 0xaee20264, 0x8c0200b0, 0xaee20268,
-0x8c0200b4, 0xaee2026c, 0x8c0200b8, 0xaee20270,
-0x8c0200bc, 0x24040001, 0xaee20274, 0xaee00034,
-0x41080, 0x571021, 0x8ee30034, 0x8c42023c,
-0x24840001, 0x621821, 0x2c82000f, 0xaee30034,
-0x1440fff8, 0x41080, 0x8c0200cc, 0xaee20048,
-0x8c0200d0, 0xaee2004c, 0x8c0200e0, 0xaee201f8,
-0x8c0200e4, 0xaee201fc, 0x8c0200e8, 0xaee20200,
-0x8c0200ec, 0xaee20204, 0x8c0200f0, 0xaee20208,
-0x8ee400c0, 0x8ee500c4, 0x8c0200fc, 0x45102b,
-0x1040000b, 0x0, 0x8ee200c0, 0x8ee300c4,
-0x24040001, 0x24050000, 0x651821, 0x65302b,
-0x441021, 0x461021, 0xaee200c0, 0xaee300c4,
-0x8c0200fc, 0x8ee400c0, 0x8ee500c4, 0x2408ffff,
-0x24090000, 0x401821, 0x1021, 0x882024,
-0xa92824, 0x822025, 0xa32825, 0xaee400c0,
-0xaee500c4, 0x8ee400d0, 0x8ee500d4, 0x8c0200f4,
-0x45102b, 0x1040000b, 0x0, 0x8ee200d0,
-0x8ee300d4, 0x24040001, 0x24050000, 0x651821,
-0x65302b, 0x441021, 0x461021, 0xaee200d0,
-0xaee300d4, 0x8c0200f4, 0x8ee400d0, 0x8ee500d4,
-0x401821, 0x1021, 0x882024, 0xa92824,
-0x822025, 0xa32825, 0xaee400d0, 0xaee500d4,
-0x8ee400c8, 0x8ee500cc, 0x8c0200f8, 0x45102b,
-0x1040000b, 0x0, 0x8ee200c8, 0x8ee300cc,
-0x24040001, 0x24050000, 0x651821, 0x65302b,
-0x441021, 0x461021, 0xaee200c8, 0xaee300cc,
-0x8c0200f8, 0x8ee400c8, 0x8ee500cc, 0x401821,
-0x1021, 0x882024, 0xa92824, 0x822025,
-0xa32825, 0x24020008, 0xaee400c8, 0xaee500cc,
-0xafa20010, 0xafa00014, 0x8f42000c, 0x8c040208,
-0x8c05020c, 0xafa20018, 0x8f42010c, 0x26e60028,
-0x40f809, 0x24070400, 0x104000f0, 0x3c020400,
-0xafa20020, 0x934205c6, 0x10400089, 0x1821,
-0x8f5e0018, 0x27aa0020, 0x240200ff, 0x13c20002,
-0xafaa002c, 0x27c30001, 0x8c020228, 0x609021,
-0x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
-0xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
-0x2484610c, 0x3c050009, 0xafa00014, 0xafa20010,
-0x8fa60020, 0x1000006b, 0x34a50500, 0xf71021,
-0x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
-0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
-0x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
-0x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
-0x240a0004, 0xafaa0010, 0xafb20014, 0x8f48000c,
-0x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
-0x24070008, 0xa32821, 0xa3482b, 0x822021,
-0x100f809, 0x892021, 0x54400006, 0x24130001,
-0x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
-0x0, 0x326200ff, 0x54400017, 0xaf520018,
-0x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
-0x8f820120, 0x8faa002c, 0xafa20010, 0x8f820124,
-0x3c040001, 0x24846118, 0x3c050009, 0xafa20014,
-0x8d460000, 0x10000033, 0x34a50600, 0x8f420308,
-0x24130001, 0x24420001, 0xaf420308, 0x8f420308,
-0x1000001c, 0x326200ff, 0x8f830054, 0x8f820054,
-0x247003e8, 0x2021023, 0x2c4203e9, 0x10400014,
-0x9821, 0x24110010, 0x8f42000c, 0x8f440160,
-0x8f450164, 0x8f860120, 0xafb10010, 0xafb20014,
-0xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
-0x24c6001c, 0x1440ffe5, 0x0, 0x8f820054,
-0x2021023, 0x2c4203e9, 0x1440ffef, 0x0,
-0x326200ff, 0x54400012, 0x24020001, 0x8f420378,
-0x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
-0x8faa002c, 0xafa20010, 0x8f820124, 0x3c040001,
-0x24846120, 0x3c050009, 0xafa20014, 0x8d460000,
-0x34a50700, 0xc002b3b, 0x3c03821, 0x1021,
-0x1440005b, 0x24020001, 0x10000065, 0x0,
-0x8f510018, 0x240200ff, 0x12220002, 0x8021,
-0x26300001, 0x8c020228, 0x1602000e, 0x1130c0,
-0x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
-0x8c020228, 0x3c040001, 0x248460f4, 0x3c050009,
-0xafa00014, 0xafa20010, 0x8fa60020, 0x1000003f,
-0x34a50100, 0xd71021, 0x8fa30020, 0x8fa40024,
-0xac4304c0, 0xac4404c4, 0xc01821, 0x8f440178,
-0x8f45017c, 0x1021, 0x24070004, 0xafa70010,
-0xafb00014, 0x8f48000c, 0x24c604c0, 0x2e63021,
-0xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
-0xa3482b, 0x822021, 0x100f809, 0x892021,
-0x1440000b, 0x24070008, 0x8f820120, 0xafa20010,
-0x8f820124, 0x3c040001, 0x248460fc, 0x3c050009,
-0xafa20014, 0x8fa60020, 0x1000001c, 0x34a50200,
-0x8f440160, 0x8f450164, 0x8f43000c, 0xaf500018,
-0x8f860120, 0x24020010, 0xafa20010, 0xafb00014,
-0xafa30018, 0x8f42010c, 0x40f809, 0x24c6001c,
-0x54400011, 0x24020001, 0x8f420340, 0x24420001,
-0xaf420340, 0x8f420340, 0x8f820120, 0xafa20010,
-0x8f820124, 0x3c040001, 0x24846104, 0x3c050009,
-0xafa20014, 0x8fa60020, 0x34a50300, 0xc002b3b,
-0x2203821, 0x1021, 0x1040000d, 0x24020001,
-0x8f4202e8, 0xa34005c6, 0xaf4001b0, 0x24420001,
-0xaf4202e8, 0x8f4202e8, 0x8ee20150, 0x24420001,
-0xaee20150, 0x10000003, 0x8ee20150, 0x24020001,
-0xa34205c6, 0x8fbf0048, 0x8fbe0044, 0x8fb50040,
-0x8fb3003c, 0x8fb20038, 0x8fb10034, 0x8fb00030,
-0x3e00008, 0x27bd0050, 0x27bdffd8, 0xafbf0020,
-0x8f8200b0, 0x30420004, 0x10400068, 0x0,
-0x8f430128, 0x8f820104, 0x14620005, 0x0,
-0x8f430130, 0x8f8200b4, 0x10620006, 0x0,
-0x8f820104, 0xaf420128, 0x8f8200b4, 0x1000005b,
-0xaf420130, 0x8f8200b0, 0x3c030080, 0x431024,
-0x1040000d, 0x0, 0x8f82011c, 0x34420002,
-0xaf82011c, 0x8f8200b0, 0x2403fffb, 0x431024,
-0xaf8200b0, 0x8f82011c, 0x2403fffd, 0x431024,
-0x1000004a, 0xaf82011c, 0x8f430128, 0x8f820104,
-0x14620005, 0x0, 0x8f430130, 0x8f8200b4,
-0x10620010, 0x0, 0x8f820104, 0xaf420128,
-0x8f8200b4, 0x8f430128, 0xaf420130, 0xafa30010,
-0x8f420130, 0x3c040001, 0x24846144, 0xafa20014,
-0x8f86011c, 0x8f8700b0, 0x3c050005, 0x10000031,
-0x34a50900, 0x8f420128, 0xafa20010, 0x8f420130,
-0x3c040001, 0x24846150, 0xafa20014, 0x8f86011c,
-0x8f8700b0, 0x3c050005, 0xc002b3b, 0x34a51000,
-0x8f82011c, 0x34420002, 0xaf82011c, 0x8f830104,
-0x8f8200b0, 0x34420001, 0xaf8200b0, 0x24020008,
-0xaf830104, 0xafa20010, 0xafa00014, 0x8f42000c,
-0x8c040208, 0x8c05020c, 0xafa20018, 0x8f42010c,
-0x26e60028, 0x40f809, 0x24070400, 0x8f82011c,
-0x2403fffd, 0x431024, 0xaf82011c, 0x8ee201dc,
-0x24420001, 0xaee201dc, 0x8ee201dc, 0x8f420128,
-0xafa20010, 0x8f420130, 0x3c040001, 0x2484615c,
-0xafa20014, 0x8f86011c, 0x8f8700b0, 0x3c050005,
-0x34a51100, 0xc002b3b, 0x0, 0x8f8200a0,
-0x30420004, 0x10400069, 0x0, 0x8f43012c,
-0x8f820124, 0x14620005, 0x0, 0x8f430134,
-0x8f8200a4, 0x10620006, 0x0, 0x8f820124,
-0xaf42012c, 0x8f8200a4, 0x1000005c, 0xaf420134,
-0x8f8200a0, 0x3c030080, 0x431024, 0x1040000d,
-0x0, 0x8f82011c, 0x34420002, 0xaf82011c,
-0x8f8200a0, 0x2403fffb, 0x431024, 0xaf8200a0,
-0x8f82011c, 0x2403fffd, 0x431024, 0x1000004b,
-0xaf82011c, 0x8f43012c, 0x8f820124, 0x14620005,
-0x0, 0x8f430134, 0x8f8200a4, 0x10620010,
-0x0, 0x8f820124, 0xaf42012c, 0x8f8200a4,
-0x8f43012c, 0xaf420134, 0xafa30010, 0x8f420134,
-0x3c040001, 0x24846168, 0xafa20014, 0x8f86011c,
-0x8f8700a0, 0x3c050005, 0x10000032, 0x34a51200,
-0x8f42012c, 0xafa20010, 0x8f420134, 0x3c040001,
-0x24846174, 0xafa20014, 0x8f86011c, 0x8f8700a0,
-0x3c050005, 0xc002b3b, 0x34a51300, 0x8f82011c,
-0x34420002, 0xaf82011c, 0x8f830124, 0x8f8200a0,
-0x34420001, 0xaf8200a0, 0x24020080, 0xaf830124,
-0xafa20010, 0xafa00014, 0x8f420014, 0x8c040208,
-0x8c05020c, 0xafa20018, 0x8f420108, 0x3c060001,
-0x24c66ed8, 0x40f809, 0x24070004, 0x8f82011c,
-0x2403fffd, 0x431024, 0xaf82011c, 0x8ee201dc,
-0x24420001, 0xaee201dc, 0x8ee201dc, 0x8f42012c,
-0xafa20010, 0x8f420134, 0x3c040001, 0x24846180,
-0xafa20014, 0x8f86011c, 0x8f8700a0, 0x3c050005,
-0x34a51400, 0xc002b3b, 0x0, 0x8fbf0020,
-0x3e00008, 0x27bd0028, 0x3c081000, 0x24070001,
-0x3c060080, 0x3c050100, 0x8f820070, 0x481024,
-0x1040fffd, 0x0, 0x8f820054, 0x24420005,
-0xaf820078, 0x8c040234, 0x10800016, 0x1821,
-0x3c020001, 0x571021, 0x8c4240e8, 0x24420005,
-0x3c010001, 0x370821, 0xac2240e8, 0x3c020001,
-0x571021, 0x8c4240e8, 0x44102b, 0x14400009,
-0x0, 0x3c030080, 0x3c010001, 0x370821,
-0xac2040e8, 0x3c010001, 0x370821, 0x1000000b,
-0xa02740f0, 0x3c020001, 0x571021, 0x904240f0,
-0x54400006, 0x661825, 0x3c020001, 0x571021,
-0x904240f1, 0x54400001, 0x661825, 0x8c040230,
-0x10800013, 0x0, 0x3c020001, 0x571021,
-0x8c4240ec, 0x24420005, 0x3c010001, 0x370821,
-0xac2240ec, 0x3c020001, 0x571021, 0x8c4240ec,
-0x44102b, 0x14400006, 0x0, 0x3c010001,
-0x370821, 0xac2040ec, 0x10000006, 0x651825,
-0x3c020001, 0x571021, 0x904240f2, 0x54400001,
-0x651825, 0x1060ffbc, 0x0, 0x8f420000,
-0x10400007, 0x0, 0xaf80004c, 0x8f82004c,
-0x1040fffd, 0x0, 0x10000005, 0x0,
-0xaf800048, 0x8f820048, 0x1040fffd, 0x0,
-0x8f820060, 0x431025, 0xaf820060, 0x8f420000,
-0x10400003, 0x0, 0x1000ffa7, 0xaf80004c,
-0x1000ffa5, 0xaf800048, 0x3e00008, 0x0,
-0x0, 0x0, 0x0, 0x27bdffe0,
-0xafbf0018, 0x8f860064, 0x30c20004, 0x10400025,
-0x24040004, 0x8c020114, 0xaf420020, 0xaf840064,
-0x8f4202fc, 0x24420001, 0xaf4202fc, 0x8f4202fc,
-0x8f820064, 0x30420004, 0x14400005, 0x0,
-0x8c030114, 0x8f420020, 0x1462fff2, 0x0,
-0x8f420000, 0x10400007, 0x8f43003c, 0xaf80004c,
-0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
-0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
-0x0, 0x8f820060, 0x431025, 0xaf820060,
-0x8f420000, 0x10400073, 0x0, 0x1000006f,
-0x0, 0x30c20008, 0x10400020, 0x24040008,
-0x8c02011c, 0xaf420048, 0xaf840064, 0x8f4202a8,
-0x24420001, 0xaf4202a8, 0x8f4202a8, 0x8f820064,
-0x30420008, 0x14400005, 0x0, 0x8c03011c,
-0x8f420048, 0x1462fff2, 0x0, 0x8f420000,
-0x10400007, 0x0, 0xaf80004c, 0x8f82004c,
-0x1040fffd, 0x0, 0x10000005, 0x0,
-0xaf800048, 0x8f820048, 0x1040fffd, 0x0,
-0x8f820060, 0x1000ffd9, 0x34420200, 0x30c20020,
-0x10400023, 0x24040020, 0x8c02012c, 0xaf420068,
-0xaf840064, 0x8f4202d8, 0x24420001, 0xaf4202d8,
-0x8f4202d8, 0x8f820064, 0x30420020, 0x14400005,
-0x32c24000, 0x8c03012c, 0x8f420068, 0x1462fff2,
-0x32c24000, 0x14400002, 0x3c020001, 0x2c2b025,
-0x8f420000, 0x10400007, 0x0, 0xaf80004c,
-0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
-0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
-0x0, 0x8f820060, 0x1000ffb4, 0x34420800,
-0x30c20010, 0x10400029, 0x24040010, 0x8c020124,
-0xaf420058, 0xaf840064, 0x8f4202d4, 0x24420001,
-0xaf4202d4, 0x8f4202d4, 0x8f820064, 0x30420010,
-0x14400005, 0x32c22000, 0x8c030124, 0x8f420058,
-0x1462fff2, 0x32c22000, 0x50400001, 0x36d68000,
-0x8f420000, 0x10400007, 0x0, 0xaf80004c,
-0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
-0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
-0x0, 0x8f820060, 0x34420100, 0xaf820060,
-0x8f420000, 0x10400003, 0x0, 0x1000006c,
-0xaf80004c, 0x1000006a, 0xaf800048, 0x30c20001,
-0x10400004, 0x24020001, 0xaf820064, 0x10000064,
-0x0, 0x30c20002, 0x1440000b, 0x3c050003,
-0x3c040001, 0x24846244, 0x34a50500, 0x3821,
-0xafa00010, 0xc002b3b, 0xafa00014, 0x2402ffc0,
-0x10000057, 0xaf820064, 0x8c05022c, 0x8c02010c,
-0x10a20048, 0x51080, 0x8c460300, 0x24a20001,
-0x3045003f, 0x24020003, 0xac05022c, 0x61e02,
-0x10620005, 0x24020010, 0x1062001d, 0x30c20fff,
-0x10000039, 0x0, 0x8f4302a8, 0x8f440000,
-0x30c20fff, 0xaf420048, 0x24630001, 0xaf4302a8,
-0x10800007, 0x8f4202a8, 0xaf80004c, 0x8f82004c,
-0x1040fffd, 0x0, 0x10000005, 0x0,
-0xaf800048, 0x8f820048, 0x1040fffd, 0x0,
-0x8f820060, 0x34420200, 0xaf820060, 0x8f420000,
-0x1040001f, 0x0, 0x1000001b, 0x0,
-0xaf420058, 0x32c22000, 0x50400001, 0x36d68000,
-0x8f4202d4, 0x8f430000, 0x24420001, 0xaf4202d4,
-0x10600007, 0x8f4202d4, 0xaf80004c, 0x8f82004c,
-0x1040fffd, 0x0, 0x10000005, 0x0,
-0xaf800048, 0x8f820048, 0x1040fffd, 0x0,
-0x8f820060, 0x34420100, 0xaf820060, 0x8f420000,
-0x10400003, 0x0, 0x10000006, 0xaf80004c,
-0x10000004, 0xaf800048, 0xc002196, 0xc02021,
-0x402821, 0x8c02010c, 0x14a20002, 0x24020002,
-0xaf820064, 0x8f820064, 0x30420002, 0x14400004,
-0x0, 0x8c02010c, 0x14a2ffac, 0x0,
-0x8fbf0018, 0x3e00008, 0x27bd0020, 0x3e00008,
-0x0, 0x27bdffa0, 0xafb00040, 0x808021,
-0x101602, 0x2442ffff, 0x304300ff, 0x2c620013,
-0xafbf0058, 0xafbe0054, 0xafb50050, 0xafb3004c,
-0xafb20048, 0xafb10044, 0x104001f3, 0xafa50034,
-0x31080, 0x3c010001, 0x220821, 0x8c226288,
-0x400008, 0x0, 0x101302, 0x30440fff,
-0x24020001, 0x10820005, 0x24020002, 0x1082000c,
-0x2402fffe, 0x10000024, 0x3c050003, 0x8f430004,
-0x3c020001, 0x8c426f04, 0xaf440200, 0xaf440204,
-0x3c040001, 0x8c846e80, 0x10000009, 0x34630001,
-0x8f430004, 0xaf440200, 0xaf440204, 0x3c040001,
-0x8c846e80, 0x621824, 0x3c020001, 0x2442ca28,
-0x21100, 0x21182, 0xaf430004, 0x3c030800,
-0x431025, 0xac820038, 0x8f840054, 0x41442,
-0x41c82, 0x431021, 0x41cc2, 0x431023,
-0x41d02, 0x431021, 0x41d42, 0x431023,
-0x10000009, 0xaf420208, 0x3c040001, 0x24846250,
-0x34a51000, 0x2003021, 0x3821, 0xafa00010,
-0xc002b3b, 0xafa00014, 0x8f4202a0, 0x24420001,
-0xaf4202a0, 0x1000021f, 0x8f4202a0, 0x27b00028,
-0x2002021, 0x24050210, 0xc002bbf, 0x24060008,
-0xc002518, 0x2002021, 0x10000216, 0x0,
-0x8faa0034, 0x27a40028, 0xa1880, 0x25420001,
-0x3042003f, 0xafa20034, 0x8c650300, 0x8faa0034,
-0x21080, 0x8c430300, 0x25420001, 0x3042003f,
-0xafa20034, 0xac02022c, 0xafa50028, 0xc002518,
-0xafa3002c, 0x10000203, 0x0, 0x27b00028,
-0x2002021, 0x24050210, 0xc002bbf, 0x24060008,
-0xc002657, 0x2002021, 0x100001fa, 0x0,
-0x8faa0034, 0x27a40028, 0xa1880, 0x25420001,
-0x3042003f, 0xafa20034, 0x8c650300, 0x8faa0034,
-0x21080, 0x8c430300, 0x25420001, 0x3042003f,
-0xafa20034, 0xac02022c, 0xafa50028, 0xc002657,
-0xafa3002c, 0x100001e7, 0x0, 0x101302,
-0x30430fff, 0x24020001, 0x10620005, 0x24020002,
-0x1062001e, 0x3c020002, 0x10000033, 0x3c050003,
-0x3c030002, 0x2c31024, 0x54400037, 0x2c3b025,
-0x8f820228, 0x3c010001, 0x370821, 0xac2238d8,
-0x8f82022c, 0x3c010001, 0x370821, 0xac2238dc,
-0x8f820230, 0x3c010001, 0x370821, 0xac2238e0,
-0x8f820234, 0x3c010001, 0x370821, 0xac2238e4,
-0x2402ffff, 0xaf820228, 0xaf82022c, 0xaf820230,
-0xaf820234, 0x10000020, 0x2c3b025, 0x2c21024,
-0x10400012, 0x3c02fffd, 0x3c020001, 0x571021,
-0x8c4238d8, 0xaf820228, 0x3c020001, 0x571021,
-0x8c4238dc, 0xaf82022c, 0x3c020001, 0x571021,
-0x8c4238e0, 0xaf820230, 0x3c020001, 0x571021,
-0x8c4238e4, 0xaf820234, 0x3c02fffd, 0x3442ffff,
-0x10000009, 0x2c2b024, 0x3c040001, 0x2484625c,
-0x34a51100, 0x2003021, 0x3821, 0xafa00010,
-0xc002b3b, 0xafa00014, 0x8f4202cc, 0x24420001,
-0xaf4202cc, 0x1000019f, 0x8f4202cc, 0x101302,
-0x30450fff, 0x24020001, 0x10a20005, 0x24020002,
-0x10a2000d, 0x3c0408ff, 0x10000014, 0x3c050003,
-0x3c0208ff, 0x3442ffff, 0x8f830220, 0x3c040004,
-0x2c4b025, 0x621824, 0x34630008, 0xaf830220,
-0x10000012, 0xaf450298, 0x3484fff7, 0x3c03fffb,
-0x8f820220, 0x3463ffff, 0x2c3b024, 0x441024,
-0xaf820220, 0x10000009, 0xaf450298, 0x3c040001,
-0x24846268, 0x34a51200, 0x2003021, 0x3821,
-0xafa00010, 0xc002b3b, 0xafa00014, 0x8f4202bc,
-0x24420001, 0xaf4202bc, 0x10000176, 0x8f4202bc,
-0x27840208, 0x24050200, 0xc002bbf, 0x24060008,
-0x27440224, 0x24050200, 0xc002bbf, 0x24060008,
-0x8f4202c4, 0x24420001, 0xaf4202c4, 0x10000169,
-0x8f4202c4, 0x101302, 0x30430fff, 0x24020001,
-0x10620011, 0x28620002, 0x50400005, 0x24020002,
-0x10600007, 0x0, 0x10000017, 0x0,
-0x1062000f, 0x0, 0x10000013, 0x0,
-0x8c060248, 0x2021, 0xc005104, 0x24050004,
-0x10000007, 0x0, 0x8c060248, 0x2021,
-0xc005104, 0x24050004, 0x10000010, 0x0,
-0x8c06024c, 0x2021, 0xc005104, 0x24050001,
-0x1000000a, 0x0, 0x3c040001, 0x24846274,
-0x3c050003, 0x34a51300, 0x2003021, 0x3821,
-0xafa00010, 0xc002b3b, 0xafa00014, 0x8f4202c0,
-0x24420001, 0xaf4202c0, 0x1000013a, 0x8f4202c0,
-0xc002426, 0x0, 0x10000136, 0x0,
-0x24020001, 0xa34205c5, 0x24100100, 0x8f4401a8,
-0x8f4501ac, 0xafb00010, 0xafa00014, 0x8f420014,
-0xafa20018, 0x8f420108, 0x26e60028, 0x40f809,
-0x24070400, 0x1040fff5, 0x0, 0x10000125,
-0x0, 0x3c03ffff, 0x34637fff, 0x8f420368,
-0x8f440360, 0x2c3b024, 0x1821, 0xaf400058,
-0xaf40005c, 0xaf400060, 0xaf400064, 0x441023,
-0xaf420368, 0x3c020900, 0xaf400360, 0xafa20020,
-0x8f5e0018, 0x27aa0020, 0x240200ff, 0x13c20002,
-0xafaa003c, 0x27c30001, 0x8c020228, 0x609021,
-0x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
-0xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
-0x2484620c, 0x3c050009, 0xafa00014, 0xafa20010,
-0x8fa60020, 0x1000006b, 0x34a50500, 0xf71021,
-0x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
-0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
-0x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
-0x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
-0x240a0004, 0xafaa0010, 0xafb20014, 0x8f48000c,
-0x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
-0x24070008, 0xa32821, 0xa3482b, 0x822021,
-0x100f809, 0x892021, 0x54400006, 0x24130001,
-0x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
-0x0, 0x326200ff, 0x54400017, 0xaf520018,
-0x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
-0x8f820120, 0x8faa003c, 0xafa20010, 0x8f820124,
-0x3c040001, 0x24846218, 0x3c050009, 0xafa20014,
-0x8d460000, 0x10000033, 0x34a50600, 0x8f420308,
-0x24130001, 0x24420001, 0xaf420308, 0x8f420308,
-0x1000001c, 0x326200ff, 0x8f830054, 0x8f820054,
-0x247003e8, 0x2021023, 0x2c4203e9, 0x10400014,
-0x9821, 0x24110010, 0x8f42000c, 0x8f440160,
-0x8f450164, 0x8f860120, 0xafb10010, 0xafb20014,
-0xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
-0x24c6001c, 0x1440ffe5, 0x0, 0x8f820054,
-0x2021023, 0x2c4203e9, 0x1440ffef, 0x0,
-0x326200ff, 0x14400011, 0x0, 0x8f420378,
-0x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
-0x8faa003c, 0xafa20010, 0x8f820124, 0x3c040001,
-0x24846220, 0x3c050009, 0xafa20014, 0x8d460000,
-0x34a50700, 0xc002b3b, 0x3c03821, 0x8f4202b0,
-0x24420001, 0xaf4202b0, 0x8f4202b0, 0x8f4202f8,
-0x24420001, 0xaf4202f8, 0x1000008a, 0x8f4202f8,
-0x8c02025c, 0x27440224, 0xaf4201f0, 0x8c020260,
-0x24050200, 0x24060008, 0xc002bbf, 0xaf4201f8,
-0x8f820220, 0x30420008, 0x14400002, 0x24020001,
-0x24020002, 0xaf420298, 0x8f4202ac, 0x24420001,
-0xaf4202ac, 0x10000077, 0x8f4202ac, 0x3c0200ff,
-0x3442ffff, 0x2021824, 0x32c20180, 0x14400006,
-0x3402fffb, 0x43102b, 0x14400003, 0x0,
-0x1000006c, 0xaf4300bc, 0x3c040001, 0x24846280,
-0x3c050003, 0x34a51500, 0x2003021, 0x3821,
-0xafa00010, 0xc002b3b, 0xafa00014, 0x3c020700,
-0x34421000, 0x101e02, 0x621825, 0xafa30020,
-0x8f510018, 0x240200ff, 0x12220002, 0x8021,
-0x26300001, 0x8c020228, 0x1602000e, 0x1130c0,
-0x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
-0x8c020228, 0x3c040001, 0x248461f4, 0x3c050009,
-0xafa00014, 0xafa20010, 0x8fa60020, 0x1000003f,
-0x34a50100, 0xd71021, 0x8fa30020, 0x8fa40024,
-0xac4304c0, 0xac4404c4, 0xc01821, 0x8f440178,
-0x8f45017c, 0x1021, 0x24070004, 0xafa70010,
-0xafb00014, 0x8f48000c, 0x24c604c0, 0x2e63021,
-0xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
-0xa3482b, 0x822021, 0x100f809, 0x892021,
-0x1440000b, 0x24070008, 0x8f820120, 0xafa20010,
-0x8f820124, 0x3c040001, 0x248461fc, 0x3c050009,
-0xafa20014, 0x8fa60020, 0x1000001c, 0x34a50200,
-0x8f440160, 0x8f450164, 0x8f43000c, 0xaf500018,
-0x8f860120, 0x24020010, 0xafa20010, 0xafb00014,
-0xafa30018, 0x8f42010c, 0x40f809, 0x24c6001c,
-0x14400010, 0x0, 0x8f420340, 0x24420001,
-0xaf420340, 0x8f420340, 0x8f820120, 0xafa20010,
-0x8f820124, 0x3c040001, 0x24846204, 0x3c050009,
-0xafa20014, 0x8fa60020, 0x34a50300, 0xc002b3b,
-0x2203821, 0x8f4202e0, 0x24420001, 0xaf4202e0,
-0x8f4202e0, 0x8f4202f0, 0x24420001, 0xaf4202f0,
-0x8f4202f0, 0x8fa20034, 0x8fbf0058, 0x8fbe0054,
-0x8fb50050, 0x8fb3004c, 0x8fb20048, 0x8fb10044,
-0x8fb00040, 0x3e00008, 0x27bd0060, 0x27bdfff8,
-0x2408ffff, 0x10a00014, 0x4821, 0x3c0aedb8,
-0x354a8320, 0x90870000, 0x24840001, 0x3021,
-0x1071026, 0x30420001, 0x10400002, 0x81842,
-0x6a1826, 0x604021, 0x24c60001, 0x2cc20008,
-0x1440fff7, 0x73842, 0x25290001, 0x125102b,
-0x1440fff0, 0x0, 0x1001021, 0x3e00008,
-0x27bd0008, 0x27bdffb0, 0xafbf0048, 0xafbe0044,
-0xafb50040, 0xafb3003c, 0xafb20038, 0xafb10034,
-0xafb00030, 0x8f870220, 0xafa70024, 0x8f870200,
-0xafa7002c, 0x8f820220, 0x3c0308ff, 0x3463ffff,
-0x431024, 0x34420004, 0xaf820220, 0x8f820200,
-0x3c03c0ff, 0x3463ffff, 0x431024, 0x34420004,
-0xaf820200, 0x8f530358, 0x8f55035c, 0x8f5e0360,
-0x8f470364, 0xafa70014, 0x8f470368, 0xafa7001c,
-0x8f4202d0, 0x274401c0, 0x24420001, 0xaf4202d0,
-0x8f5002d0, 0x8f510204, 0x8f520200, 0xc002ba8,
-0x24050400, 0xaf530358, 0xaf55035c, 0xaf5e0360,
-0x8fa70014, 0xaf470364, 0x8fa7001c, 0xaf470368,
-0xaf5002d0, 0xaf510204, 0xaf520200, 0x8c02025c,
-0x27440224, 0xaf4201f0, 0x8c020260, 0x24050200,
-0x24060008, 0xaf4201f8, 0x24020006, 0xc002bbf,
-0xaf4201f4, 0x3c023b9a, 0x3442ca00, 0xaf4201fc,
-0x240203e8, 0x24040002, 0x24030001, 0xaf420294,
-0xaf440290, 0xaf43029c, 0x8f820220, 0x30420008,
-0x10400004, 0x0, 0xaf430298, 0x10000003,
-0x3021, 0xaf440298, 0x3021, 0x3c030001,
-0x661821, 0x90636d00, 0x3461021, 0x24c60001,
-0xa043022c, 0x2cc2000f, 0x1440fff8, 0x3461821,
-0x24c60001, 0x8f820040, 0x24040080, 0x24050080,
-0x21702, 0x24420030, 0xa062022c, 0x3461021,
-0xc002ba8, 0xa040022c, 0x8fa70024, 0x30e20004,
-0x14400006, 0x0, 0x8f820220, 0x3c0308ff,
-0x3463fffb, 0x431024, 0xaf820220, 0x8fa7002c,
-0x30e20004, 0x14400006, 0x0, 0x8f820200,
-0x3c03c0ff, 0x3463fffb, 0x431024, 0xaf820200,
-0x8fbf0048, 0x8fbe0044, 0x8fb50040, 0x8fb3003c,
-0x8fb20038, 0x8fb10034, 0x8fb00030, 0x3e00008,
-0x27bd0050, 0x0, 0x0, 0xaf400104,
-0x24040001, 0x410c0, 0x2e21821, 0x24820001,
-0x3c010001, 0x230821, 0xa42234d0, 0x402021,
-0x2c820080, 0x1440fff8, 0x410c0, 0x24020001,
-0x3c010001, 0x370821, 0xa42038d0, 0xaf420100,
-0xaf800228, 0xaf80022c, 0xaf800230, 0xaf800234,
-0x3e00008, 0x0, 0x27bdffe8, 0xafbf0014,
-0xafb00010, 0x8f420104, 0x28420005, 0x10400026,
-0x808021, 0x3c020001, 0x8f430104, 0x344230d0,
-0x2e22021, 0x318c0, 0x621821, 0x2e31821,
-0x83102b, 0x10400015, 0x1021, 0x96070000,
-0x24840006, 0x24660006, 0x9482fffc, 0x14470009,
-0x2821, 0x9483fffe, 0x96020002, 0x14620006,
-0xa01021, 0x94820000, 0x96030004, 0x431026,
-0x2c450001, 0xa01021, 0x14400009, 0x24840008,
-0x86102b, 0x1440fff0, 0x1021, 0x304200ff,
-0x14400030, 0x24020001, 0x1000002e, 0x1021,
-0x1000fffa, 0x24020001, 0x2002021, 0xc00240c,
-0x24050006, 0x3042007f, 0x218c0, 0x2e31021,
-0x3c010001, 0x220821, 0x942230d0, 0x1040fff2,
-0x2e31021, 0x3c060001, 0xc23021, 0x94c630d0,
-0x10c0ffed, 0x3c080001, 0x350834d2, 0x96070000,
-0x610c0, 0x572021, 0x882021, 0x94820000,
-0x14470009, 0x2821, 0x94830002, 0x96020002,
-0x14620006, 0xa01021, 0x94820004, 0x96030004,
-0x431026, 0x2c450001, 0xa01021, 0x14400007,
-0x610c0, 0x2e21021, 0x3c060001, 0xc23021,
-0x94c634d0, 0x14c0ffeb, 0x610c0, 0x10c0ffd2,
-0x24020001, 0x8fbf0014, 0x8fb00010, 0x3e00008,
-0x27bd0018, 0x3e00008, 0x0, 0x27bdffb0,
-0x801021, 0xafb00030, 0x24500002, 0x2002021,
-0x24050006, 0xafb10034, 0x408821, 0xafbf0048,
-0xafbe0044, 0xafb50040, 0xafb3003c, 0xc00240c,
-0xafb20038, 0x3047007f, 0x710c0, 0x2e21021,
-0x3c050001, 0xa22821, 0x94a530d0, 0x50a0001c,
-0xa03021, 0x3c090001, 0x352934d2, 0x96280002,
-0x510c0, 0x572021, 0x892021, 0x94820000,
-0x14480009, 0x3021, 0x94830002, 0x96020002,
-0x14620006, 0xc01021, 0x94820004, 0x96030004,
-0x431026, 0x2c460001, 0xc01021, 0x14400007,
-0x510c0, 0x2e21021, 0x3c050001, 0xa22821,
-0x94a534d0, 0x14a0ffeb, 0x510c0, 0xa03021,
-0x10c00014, 0x610c0, 0x571821, 0x3c010001,
-0x230821, 0x8c2334d0, 0x571021, 0xafa30010,
-0x3c010001, 0x220821, 0x8c2234d4, 0x3c040001,
-0x24846394, 0xafa20014, 0x8e260000, 0x8e270004,
-0x3c050004, 0xc002b3b, 0x34a50400, 0x10000063,
-0x3c020800, 0x8f450100, 0x10a00006, 0x510c0,
-0x2e21021, 0x3c010001, 0x220821, 0x942234d0,
-0xaf420100, 0xa03021, 0x14c00011, 0x628c0,
-0x710c0, 0x2e21021, 0xafa70010, 0x3c010001,
-0x220821, 0x942230d0, 0x3c040001, 0x248463a0,
-0xafa20014, 0x8e260000, 0x8e270004, 0x3c050004,
-0xc002b3b, 0x34a50500, 0x10000048, 0x3c020800,
-0xb71821, 0x3c020001, 0x96040000, 0x344234d2,
-0x621821, 0xa4640000, 0x8e020002, 0x720c0,
-0xac620002, 0x2e41021, 0x3c030001, 0x621821,
-0x946330d0, 0x2e51021, 0x3c010001, 0x220821,
-0xa42334d0, 0x2e41021, 0x3c010001, 0x220821,
-0xa42630d0, 0x8f420104, 0x24420001, 0x28420080,
-0x1040000f, 0x3c020002, 0x8f420104, 0x3c040001,
-0x348430d2, 0x96030000, 0x210c0, 0x571021,
-0x441021, 0xa4430000, 0x8e030002, 0xac430002,
-0x8f420104, 0x24420001, 0xaf420104, 0x3c020002,
-0x2c21024, 0x10400011, 0x72142, 0x3c030001,
-0x346338d8, 0x24020003, 0x441023, 0x21080,
-0x572021, 0x832021, 0x571021, 0x431021,
-0x30e5001f, 0x8c430000, 0x24020001, 0xa21004,
-0x621825, 0x1000000c, 0xac830000, 0x24020003,
-0x441023, 0x21080, 0x5c2821, 0x5c1021,
-0x30e4001f, 0x8c430228, 0x24020001, 0x821004,
-0x621825, 0xaca30228, 0x3c020800, 0x34421000,
-0x1821, 0xafa20020, 0x8f5e0018, 0x27aa0020,
-0x240200ff, 0x13c20002, 0xafaa002c, 0x27c30001,
-0x8c020228, 0x609021, 0x1642000e, 0x1e38c0,
-0x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
-0x8c020228, 0x3c040001, 0x2484635c, 0x3c050009,
-0xafa00014, 0xafa20010, 0x8fa60020, 0x1000006b,
-0x34a50500, 0xf71021, 0x8fa30020, 0x8fa40024,
-0xac4304c0, 0xac4404c4, 0x8f830054, 0x8f820054,
-0x247003e8, 0x2021023, 0x2c4203e9, 0x1040001b,
-0x9821, 0xe08821, 0x263504c0, 0x8f440178,
-0x8f45017c, 0x2201821, 0x240a0004, 0xafaa0010,
-0xafb20014, 0x8f48000c, 0x1021, 0x2f53021,
-0xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
-0xa3482b, 0x822021, 0x100f809, 0x892021,
-0x54400006, 0x24130001, 0x8f820054, 0x2021023,
-0x2c4203e9, 0x1440ffe9, 0x0, 0x326200ff,
-0x54400017, 0xaf520018, 0x8f420378, 0x24420001,
-0xaf420378, 0x8f420378, 0x8f820120, 0x8faa002c,
-0xafa20010, 0x8f820124, 0x3c040001, 0x24846368,
-0x3c050009, 0xafa20014, 0x8d460000, 0x10000033,
-0x34a50600, 0x8f420308, 0x24130001, 0x24420001,
-0xaf420308, 0x8f420308, 0x1000001c, 0x326200ff,
-0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
-0x2c4203e9, 0x10400014, 0x9821, 0x24110010,
-0x8f42000c, 0x8f440160, 0x8f450164, 0x8f860120,
-0xafb10010, 0xafb20014, 0xafa20018, 0x8f42010c,
-0x24070008, 0x40f809, 0x24c6001c, 0x1440ffe5,
-0x0, 0x8f820054, 0x2021023, 0x2c4203e9,
-0x1440ffef, 0x0, 0x326200ff, 0x14400011,
-0x0, 0x8f420378, 0x24420001, 0xaf420378,
-0x8f420378, 0x8f820120, 0x8faa002c, 0xafa20010,
-0x8f820124, 0x3c040001, 0x24846370, 0x3c050009,
-0xafa20014, 0x8d460000, 0x34a50700, 0xc002b3b,
-0x3c03821, 0x8f4202b4, 0x24420001, 0xaf4202b4,
-0x8f4202b4, 0x8f4202f4, 0x24420001, 0xaf4202f4,
-0x8f4202f4, 0x8fbf0048, 0x8fbe0044, 0x8fb50040,
-0x8fb3003c, 0x8fb20038, 0x8fb10034, 0x8fb00030,
-0x3e00008, 0x27bd0050, 0x27bdffa0, 0x801021,
-0xafb00040, 0x24500002, 0x2002021, 0x24050006,
-0xafb10044, 0x408821, 0xafbf0058, 0xafbe0054,
-0xafb50050, 0xafb3004c, 0xc00240c, 0xafb20048,
-0x3048007f, 0x810c0, 0x2e21021, 0x3c060001,
-0xc23021, 0x94c630d0, 0x10c0001c, 0x3821,
-0x3c0a0001, 0x354a34d2, 0x96290002, 0x610c0,
-0x572021, 0x8a2021, 0x94820000, 0x14490009,
-0x2821, 0x94830002, 0x96020002, 0x14620006,
-0xa01021, 0x94820004, 0x96030004, 0x431026,
-0x2c450001, 0xa01021, 0x14400008, 0x610c0,
-0xc03821, 0x2e21021, 0x3c060001, 0xc23021,
-0x94c634d0, 0x14c0ffea, 0x610c0, 0x14c00011,
-0xafa70028, 0x810c0, 0x2e21021, 0xafa80010,
-0x3c010001, 0x220821, 0x942230d0, 0x3c040001,
-0x248463ac, 0xafa20014, 0x8e260000, 0x8e270004,
-0x3c050004, 0xc002b3b, 0x34a50900, 0x10000075,
-0x3c020800, 0x10e0000c, 0x610c0, 0x2e21021,
-0x3c030001, 0x621821, 0x946334d0, 0x710c0,
-0x2e21021, 0x3c010001, 0x220821, 0xa42334d0,
-0x1000000b, 0x3c040001, 0x2e21021, 0x3c030001,
-0x621821, 0x946334d0, 0x810c0, 0x2e21021,
-0x3c010001, 0x220821, 0xa42330d0, 0x3c040001,
-0x348430d0, 0x8f430100, 0x610c0, 0x2e21021,
-0x3c010001, 0x220821, 0xa42334d0, 0x8f420104,
-0x2e43821, 0x2821, 0x18400029, 0xaf460100,
-0x24e60006, 0x94c3fffc, 0x96020000, 0x14620009,
-0x2021, 0x94c3fffe, 0x96020002, 0x14620006,
-0x801021, 0x94c20000, 0x96030004, 0x431026,
-0x2c440001, 0x801021, 0x50400014, 0x24a50001,
-0x8f420104, 0x2442ffff, 0xa2102a, 0x1040000b,
-0x24e40004, 0x94820006, 0x8c830008, 0xa482fffe,
-0xac830000, 0x8f420104, 0x24a50001, 0x2442ffff,
-0xa2102a, 0x1440fff7, 0x24840008, 0x8f420104,
-0x2442ffff, 0x10000006, 0xaf420104, 0x8f420104,
-0x24c60008, 0xa2102a, 0x1440ffda, 0x24e70008,
-0x810c0, 0x2e21021, 0x3c010001, 0x220821,
-0x942230d0, 0x14400023, 0x3c020800, 0x3c020002,
-0x2c21024, 0x10400012, 0x82142, 0x3c030001,
-0x346338d8, 0x24020003, 0x441023, 0x21080,
-0x572021, 0x832021, 0x571021, 0x431021,
-0x3105001f, 0x24030001, 0x8c420000, 0xa31804,
-0x31827, 0x431024, 0x1000000d, 0xac820000,
-0x24020003, 0x441023, 0x21080, 0x5c2821,
-0x5c1021, 0x3104001f, 0x24030001, 0x8c420228,
-0x831804, 0x31827, 0x431024, 0xaca20228,
-0x3c020800, 0x34422000, 0x1821, 0xafa20020,
-0x8f5e0018, 0x27ab0020, 0x240200ff, 0x13c20002,
-0xafab0034, 0x27c30001, 0x8c020228, 0x609021,
-0x1642000e, 0x1e38c0, 0x8f42033c, 0x24420001,
-0xaf42033c, 0x8f42033c, 0x8c020228, 0x3c040001,
-0x2484635c, 0x3c050009, 0xafa00014, 0xafa20010,
-0x8fa60020, 0x1000006b, 0x34a50500, 0xf71021,
-0x8fa30020, 0x8fa40024, 0xac4304c0, 0xac4404c4,
-0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
-0x2c4203e9, 0x1040001b, 0x9821, 0xe08821,
-0x263504c0, 0x8f440178, 0x8f45017c, 0x2201821,
-0x240b0004, 0xafab0010, 0xafb20014, 0x8f48000c,
-0x1021, 0x2f53021, 0xafa80018, 0x8f48010c,
-0x24070008, 0xa32821, 0xa3482b, 0x822021,
-0x100f809, 0x892021, 0x54400006, 0x24130001,
-0x8f820054, 0x2021023, 0x2c4203e9, 0x1440ffe9,
-0x0, 0x326200ff, 0x54400017, 0xaf520018,
-0x8f420378, 0x24420001, 0xaf420378, 0x8f420378,
-0x8f820120, 0x8fab0034, 0xafa20010, 0x8f820124,
-0x3c040001, 0x24846368, 0x3c050009, 0xafa20014,
-0x8d660000, 0x10000033, 0x34a50600, 0x8f420308,
-0x24130001, 0x24420001, 0xaf420308, 0x8f420308,
-0x1000001c, 0x326200ff, 0x8f830054, 0x8f820054,
-0x247003e8, 0x2021023, 0x2c4203e9, 0x10400014,
-0x9821, 0x24110010, 0x8f42000c, 0x8f440160,
-0x8f450164, 0x8f860120, 0xafb10010, 0xafb20014,
-0xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
-0x24c6001c, 0x1440ffe5, 0x0, 0x8f820054,
-0x2021023, 0x2c4203e9, 0x1440ffef, 0x0,
-0x326200ff, 0x14400011, 0x0, 0x8f420378,
-0x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
-0x8fab0034, 0xafa20010, 0x8f820124, 0x3c040001,
-0x24846370, 0x3c050009, 0xafa20014, 0x8d660000,
-0x34a50700, 0xc002b3b, 0x3c03821, 0x8f4202b8,
-0x24420001, 0xaf4202b8, 0x8f4202b8, 0x8f4202f4,
-0x24420001, 0xaf4202f4, 0x8f4202f4, 0x8fbf0058,
-0x8fbe0054, 0x8fb50050, 0x8fb3004c, 0x8fb20048,
-0x8fb10044, 0x8fb00040, 0x3e00008, 0x27bd0060,
-0x0, 0x0, 0x0, 0x27bdffe0,
-0x27644000, 0xafbf0018, 0xc002ba8, 0x24051000,
-0x3c030001, 0x34632cc0, 0x3c040001, 0x34842ec8,
-0x24020020, 0xaf82011c, 0x2e31021, 0xaf800100,
-0xaf800104, 0xaf800108, 0xaf800110, 0xaf800114,
-0xaf800118, 0xaf800120, 0xaf800124, 0xaf800128,
-0xaf800130, 0xaf800134, 0xaf800138, 0xaf4200ec,
-0x2e31021, 0xaf4200f0, 0x2e41021, 0xaf4200f4,
-0x2e41021, 0xaf4200f8, 0x3c020001, 0x571021,
-0x904240f4, 0x1440001c, 0x3c050001, 0x8f82011c,
-0x3c040001, 0x24846470, 0x3c050001, 0x34420001,
-0xaf82011c, 0xafa00010, 0xafa00014, 0x8f86011c,
-0x34a50100, 0xc002b3b, 0x3821, 0x8c020218,
-0x30420040, 0x10400014, 0x0, 0x8f82011c,
-0x3c040001, 0x2484647c, 0x3c050001, 0x34420004,
-0xaf82011c, 0xafa00010, 0xafa00014, 0x8f86011c,
-0x10000007, 0x34a50200, 0x3c040001, 0x24846484,
-0xafa00010, 0xafa00014, 0x8f86011c, 0x34a50300,
-0xc002b3b, 0x3821, 0x8fbf0018, 0x3e00008,
-0x27bd0020, 0x8fa90010, 0x8f83012c, 0x8faa0014,
-0x8fab0018, 0x1060000a, 0x27624fe0, 0x14620002,
-0x24680020, 0x27684800, 0x8f820128, 0x11020004,
-0x0, 0x8f820124, 0x15020007, 0x0,
-0x8f430334, 0x1021, 0x24630001, 0xaf430334,
-0x10000039, 0x8f430334, 0xac640000, 0xac650004,
-0xac660008, 0xa467000e, 0xac690018, 0xac6a001c,
-0xac6b0010, 0xac620014, 0xaf880120, 0x8f4200fc,
-0x8f4400f4, 0x2442ffff, 0xaf4200fc, 0x8c820000,
-0x10490005, 0x3042ff8f, 0x10400019, 0x3122ff8f,
-0x10400018, 0x3c020001, 0x8c830004, 0x2c620010,
-0x10400013, 0x3c020001, 0x24630001, 0xac830004,
-0x8f4300f8, 0x344230c8, 0x2e21021, 0x54620004,
-0x24620008, 0x3c020001, 0x34422ec8, 0x2e21021,
-0x14440015, 0x24020001, 0x8f820128, 0x24420020,
-0xaf820128, 0x8f820128, 0x1000000f, 0x24020001,
-0x3c020001, 0x344230c8, 0x2e21021, 0x54820004,
-0x24820008, 0x3c020001, 0x34422ec8, 0x2e21021,
-0x402021, 0x24020001, 0xaf4400f4, 0xac890000,
-0xac820004, 0x24020001, 0x3e00008, 0x0,
-0x3e00008, 0x0, 0x8fa90010, 0x8f83010c,
-0x8faa0014, 0x8fab0018, 0x1060000a, 0x276247e0,
-0x14620002, 0x24680020, 0x27684000, 0x8f820108,
-0x11020004, 0x0, 0x8f820104, 0x15020007,
-0x0, 0x8f430338, 0x1021, 0x24630001,
-0xaf430338, 0x10000035, 0x8f430338, 0xac640000,
-0xac650004, 0xac660008, 0xa467000e, 0xac690018,
-0xac6a001c, 0xac6b0010, 0xac620014, 0xaf880100,
-0x8f4400ec, 0x8c820000, 0x30420006, 0x10400019,
-0x31220006, 0x10400018, 0x3c020001, 0x8c830004,
-0x2c620010, 0x10400013, 0x3c020001, 0x24630001,
-0xac830004, 0x8f4300f0, 0x34422ec0, 0x2e21021,
-0x54620004, 0x24620008, 0x3c020001, 0x34422cc0,
-0x2e21021, 0x14440015, 0x24020001, 0x8f820108,
-0x24420020, 0xaf820108, 0x8f820108, 0x1000000f,
-0x24020001, 0x3c020001, 0x34422ec0, 0x2e21021,
-0x54820004, 0x24820008, 0x3c020001, 0x34422cc0,
-0x2e21021, 0x402021, 0x24020001, 0xaf4400ec,
-0xac890000, 0xac820004, 0x24020001, 0x3e00008,
-0x0, 0x3e00008, 0x0, 0x27bdffd8,
-0x3c040001, 0x2484648c, 0x3c050001, 0xafbf0024,
-0xafb20020, 0xafb1001c, 0xafb00018, 0x8f900104,
-0x8f9100b0, 0x8f92011c, 0x34a52500, 0x8f820100,
-0x2403021, 0x2203821, 0xafa20010, 0xc002b3b,
-0xafb00014, 0x8e020008, 0xafa20010, 0x8e02000c,
-0x3c040001, 0x24846498, 0xafa20014, 0x8e060000,
-0x8e070004, 0x3c050001, 0xc002b3b, 0x34a52510,
-0x8e020018, 0xafa20010, 0x8e02001c, 0x3c040001,
-0x248464a4, 0xafa20014, 0x8e060010, 0x8e070014,
-0x3c050001, 0xc002b3b, 0x34a52520, 0x3c027f00,
-0x2221024, 0x3c030800, 0x54430016, 0x3c030200,
-0x8f82009c, 0x3042ffff, 0x14400012, 0x3c030200,
-0x3c040001, 0x248464b0, 0x3c050002, 0x34a5f030,
-0x3021, 0x3821, 0x36420002, 0xaf82011c,
-0x36220001, 0xaf8200b0, 0xaf900104, 0xaf92011c,
-0xafa00010, 0xc002b3b, 0xafa00014, 0x10000024,
-0x0, 0x2c31024, 0x1040000d, 0x2231024,
-0x1040000b, 0x36420002, 0xaf82011c, 0x36220001,
-0xaf8200b0, 0xaf900104, 0xaf92011c, 0x8f420330,
-0x24420001, 0xaf420330, 0x10000015, 0x8f420330,
-0x3c040001, 0x248464b8, 0x240202a9, 0xafa20010,
-0xafa00014, 0x8f860144, 0x3c070001, 0x24e764c0,
-0xc002b3b, 0x3405dead, 0x8f82011c, 0x34420002,
-0xaf82011c, 0x8f820220, 0x34420004, 0xaf820220,
-0x8f820140, 0x3c030001, 0x431025, 0xaf820140,
-0x8fbf0024, 0x8fb20020, 0x8fb1001c, 0x8fb00018,
-0x3e00008, 0x27bd0028, 0x27bdffd8, 0x3c040001,
-0x248464e8, 0x3c050001, 0xafbf0024, 0xafb20020,
-0xafb1001c, 0xafb00018, 0x8f900124, 0x8f9100a0,
-0x8f92011c, 0x34a52600, 0x8f820120, 0x2403021,
-0x2203821, 0xafa20010, 0xc002b3b, 0xafb00014,
-0x8e020008, 0xafa20010, 0x8e02000c, 0x3c040001,
-0x248464f4, 0xafa20014, 0x8e060000, 0x8e070004,
-0x3c050001, 0xc002b3b, 0x34a52610, 0x8e020018,
-0xafa20010, 0x8e02001c, 0x3c040001, 0x24846500,
-0xafa20014, 0x8e060010, 0x8e070014, 0x3c050001,
-0xc002b3b, 0x34a52620, 0x3c027f00, 0x2221024,
-0x3c030800, 0x54430016, 0x3c030200, 0x8f8200ac,
-0x3042ffff, 0x14400012, 0x3c030200, 0x3c040001,
-0x2484650c, 0x3c050001, 0x34a5f030, 0x3021,
-0x3821, 0x36420002, 0xaf82011c, 0x36220001,
-0xaf8200a0, 0xaf900124, 0xaf92011c, 0xafa00010,
-0xc002b3b, 0xafa00014, 0x10000024, 0x0,
-0x2c31024, 0x1040000d, 0x2231024, 0x1040000b,
-0x36420002, 0xaf82011c, 0x36220001, 0xaf8200a0,
-0xaf900124, 0xaf92011c, 0x8f42032c, 0x24420001,
-0xaf42032c, 0x10000015, 0x8f42032c, 0x3c040001,
-0x248464b8, 0x240202e2, 0xafa20010, 0xafa00014,
-0x8f860144, 0x3c070001, 0x24e764c0, 0xc002b3b,
-0x3405dead, 0x8f82011c, 0x34420002, 0xaf82011c,
-0x8f820220, 0x34420004, 0xaf820220, 0x8f820140,
-0x3c030001, 0x431025, 0xaf820140, 0x8fbf0024,
-0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x3e00008,
-0x27bd0028, 0x6021, 0x5021, 0x3021,
-0x2821, 0x6821, 0x4821, 0x7821,
-0x7021, 0x8f880124, 0x8f870104, 0x1580002e,
-0x8f8b011c, 0x11a00014, 0x31620800, 0x8f820120,
-0x10460029, 0x0, 0x3c040001, 0x8c846ee4,
-0x8cc20000, 0x8cc30004, 0xac820000, 0xac830004,
-0x8cc20008, 0xac820008, 0x94c2000e, 0xa482000e,
-0x8cc20010, 0x240c0001, 0xac820010, 0x8cc20014,
-0x10000012, 0x24c60020, 0x10400017, 0x0,
-0x3c040001, 0x8c846ee4, 0x8d020000, 0x8d030004,
-0xac820000, 0xac830004, 0x8d020008, 0xac820008,
-0x9502000e, 0xa482000e, 0x8d020010, 0x25060020,
-0xac820010, 0x8d020014, 0x240c0001, 0xc01821,
-0xac820014, 0x27624fe0, 0x43102b, 0x54400001,
-0x27634800, 0x603021, 0x1540002f, 0x31620100,
-0x11200014, 0x31628000, 0x8f820100, 0x1045002a,
-0x31620100, 0x3c040001, 0x8c846ee0, 0x8ca20000,
-0x8ca30004, 0xac820000, 0xac830004, 0x8ca20008,
-0xac820008, 0x94a2000e, 0xa482000e, 0x8ca20010,
-0x240a0001, 0xac820010, 0x8ca20014, 0x10000012,
-0x24a50020, 0x10400018, 0x31620100, 0x3c040001,
-0x8c846ee0, 0x8ce20000, 0x8ce30004, 0xac820000,
-0xac830004, 0x8ce20008, 0xac820008, 0x94e2000e,
-0xa482000e, 0x8ce20010, 0x24e50020, 0xac820010,
-0x8ce20014, 0x240a0001, 0xa01821, 0xac820014,
-0x276247e0, 0x43102b, 0x54400001, 0x27634000,
-0x602821, 0x31620100, 0x5440001d, 0x31621000,
-0x11a00009, 0x31a20800, 0x10400004, 0x25020020,
-0x8f8200a8, 0xa5e20000, 0x25020020, 0xaf820124,
-0x8f880124, 0x6821, 0x11800011, 0x31621000,
-0x3c040001, 0x8c846ee4, 0x8c820000, 0x8c830004,
-0xaf820080, 0xaf830084, 0x8c820008, 0xaf8200a4,
-0x9482000e, 0xaf8200ac, 0x8c820010, 0x6021,
-0xaf8200a0, 0x8c8d0010, 0x8c8f0014, 0x31621000,
-0x1440ff82, 0x0, 0x1120000f, 0x31220800,
-0x10400004, 0x3c020002, 0x8f8200b8, 0xa5c20000,
-0x3c020002, 0x1221024, 0x10400004, 0x24e20020,
-0x8f8200b4, 0xaf8200d4, 0x24e20020, 0xaf820104,
-0x8f870104, 0x4821, 0x1140ff70, 0x0,
-0x3c040001, 0x8c846ee0, 0x8c820000, 0x8c830004,
-0xaf820090, 0xaf830094, 0x8c820008, 0xaf8200b4,
-0x9482000e, 0xaf82009c, 0x8c820010, 0x5021,
-0xaf8200b0, 0x8c890010, 0x1000ff60, 0x8c8e0014,
-0x3e00008, 0x0, 0x6021, 0x5821,
-0x3021, 0x2821, 0x6821, 0x5021,
-0x7821, 0x7021, 0x8f880124, 0x8f870104,
-0x3c180100, 0x1580002e, 0x8f89011c, 0x11a00014,
-0x31220800, 0x8f820120, 0x10460029, 0x0,
-0x3c040001, 0x8c846ee4, 0x8cc20000, 0x8cc30004,
-0xac820000, 0xac830004, 0x8cc20008, 0xac820008,
-0x94c2000e, 0xa482000e, 0x8cc20010, 0x240c0001,
-0xac820010, 0x8cc20014, 0x10000012, 0x24c60020,
-0x10400017, 0x0, 0x3c040001, 0x8c846ee4,
-0x8d020000, 0x8d030004, 0xac820000, 0xac830004,
-0x8d020008, 0xac820008, 0x9502000e, 0xa482000e,
-0x8d020010, 0x25060020, 0xac820010, 0x8d020014,
-0x240c0001, 0xc01821, 0xac820014, 0x27624fe0,
-0x43102b, 0x54400001, 0x27634800, 0x603021,
-0x1560002f, 0x31220100, 0x11400014, 0x31228000,
-0x8f820100, 0x1045002a, 0x31220100, 0x3c040001,
-0x8c846ee0, 0x8ca20000, 0x8ca30004, 0xac820000,
-0xac830004, 0x8ca20008, 0xac820008, 0x94a2000e,
-0xa482000e, 0x8ca20010, 0x240b0001, 0xac820010,
-0x8ca20014, 0x10000012, 0x24a50020, 0x10400018,
-0x31220100, 0x3c040001, 0x8c846ee0, 0x8ce20000,
-0x8ce30004, 0xac820000, 0xac830004, 0x8ce20008,
-0xac820008, 0x94e2000e, 0xa482000e, 0x8ce20010,
-0x24e50020, 0xac820010, 0x8ce20014, 0x240b0001,
-0xa01821, 0xac820014, 0x276247e0, 0x43102b,
-0x54400001, 0x27634000, 0x602821, 0x31220100,
-0x5440001d, 0x31221000, 0x11a00009, 0x31a20800,
-0x10400004, 0x25020020, 0x8f8200a8, 0xa5e20000,
-0x25020020, 0xaf820124, 0x8f880124, 0x6821,
-0x11800011, 0x31221000, 0x3c040001, 0x8c846ee4,
-0x8c820000, 0x8c830004, 0xaf820080, 0xaf830084,
-0x8c820008, 0xaf8200a4, 0x9482000e, 0xaf8200ac,
-0x8c820010, 0x6021, 0xaf8200a0, 0x8c8d0010,
-0x8c8f0014, 0x31221000, 0x14400022, 0x0,
-0x1140000f, 0x31420800, 0x10400004, 0x3c020002,
-0x8f8200b8, 0xa5c20000, 0x3c020002, 0x1421024,
-0x10400004, 0x24e20020, 0x8f8200b4, 0xaf8200d4,
-0x24e20020, 0xaf820104, 0x8f870104, 0x5021,
-0x11600010, 0x0, 0x3c040001, 0x8c846ee0,
-0x8c820000, 0x8c830004, 0xaf820090, 0xaf830094,
-0x8c820008, 0xaf8200b4, 0x9482000e, 0xaf82009c,
-0x8c820010, 0x5821, 0xaf8200b0, 0x8c8a0010,
-0x8c8e0014, 0x8f820070, 0x3c031000, 0x431024,
-0x1040ff5c, 0x0, 0x8f820054, 0x24420005,
-0xaf820078, 0x8c040234, 0x10800016, 0x1821,
-0x3c020001, 0x571021, 0x8c4240e8, 0x24420005,
-0x3c010001, 0x370821, 0xac2240e8, 0x3c020001,
-0x571021, 0x8c4240e8, 0x44102b, 0x14400009,
-0x24020001, 0x3c030080, 0x3c010001, 0x370821,
-0xac2040e8, 0x3c010001, 0x370821, 0x1000000c,
-0xa02240f0, 0x3c020001, 0x571021, 0x904240f0,
-0x14400006, 0x3c020080, 0x3c020001, 0x571021,
-0x904240f1, 0x10400002, 0x3c020080, 0x621825,
-0x8c040230, 0x10800013, 0x0, 0x3c020001,
-0x571021, 0x8c4240ec, 0x24420005, 0x3c010001,
-0x370821, 0xac2240ec, 0x3c020001, 0x571021,
-0x8c4240ec, 0x44102b, 0x14400006, 0x0,
-0x3c010001, 0x370821, 0xac2040ec, 0x10000006,
-0x781825, 0x3c020001, 0x571021, 0x904240f2,
-0x54400001, 0x781825, 0x1060ff1a, 0x0,
-0x8f420000, 0x10400007, 0x0, 0xaf80004c,
-0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
-0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
-0x0, 0x8f820060, 0x431025, 0xaf820060,
-0x8f420000, 0x10400003, 0x0, 0x1000ff05,
-0xaf80004c, 0x1000ff03, 0xaf800048, 0x3e00008,
-0x0, 0x0, 0x0, 0x3c020001,
-0x8c426d28, 0x27bdffe8, 0xafbf0014, 0x14400012,
-0xafb00010, 0x3c100001, 0x26106f90, 0x2002021,
-0xc002ba8, 0x24052000, 0x26021fe0, 0x3c010001,
-0xac226eec, 0x3c010001, 0xac226ee8, 0xac020250,
-0x24022000, 0xac100254, 0xac020258, 0x24020001,
-0x3c010001, 0xac226d28, 0x8fbf0014, 0x8fb00010,
-0x3e00008, 0x27bd0018, 0x3c090001, 0x8d296eec,
-0x8c820000, 0x8fa30010, 0x8fa80014, 0xad220000,
-0x8c820004, 0xad250008, 0xad220004, 0x8f820054,
-0xad260010, 0xad270014, 0xad230018, 0xad28001c,
-0xad22000c, 0x2529ffe0, 0x3c020001, 0x24426f90,
-0x122102b, 0x10400003, 0x0, 0x3c090001,
-0x8d296ee8, 0x3c020001, 0x8c426d10, 0xad220000,
-0x3c020001, 0x8c426d10, 0x3c010001, 0xac296eec,
-0xad220004, 0xac090250, 0x3e00008, 0x0,
-0x27bdffd0, 0xafb00010, 0x3c100001, 0x8e106eec,
-0x3c020001, 0x8c426d10, 0xafb10014, 0x808821,
-0xafbe0024, 0x8fbe0040, 0x8fa40048, 0xafb20018,
-0xa09021, 0xafbf0028, 0xafb50020, 0xafb3001c,
-0xae020000, 0x3c020001, 0x8c426d10, 0xc09821,
-0xe0a821, 0x10800006, 0xae020004, 0x26050008,
-0xc002bb3, 0x24060018, 0x10000005, 0x2610ffe0,
-0x26040008, 0xc002ba8, 0x24050018, 0x2610ffe0,
-0x3c030001, 0x24636f90, 0x203102b, 0x10400003,
-0x0, 0x3c100001, 0x8e106ee8, 0x8e220000,
-0xae020000, 0x8e220004, 0xae120008, 0xae020004,
-0x8f820054, 0xae130010, 0xae150014, 0xae1e0018,
-0x8fa80044, 0xae08001c, 0xae02000c, 0x2610ffe0,
-0x203102b, 0x10400003, 0x0, 0x3c100001,
-0x8e106ee8, 0x3c020001, 0x8c426d10, 0xae020000,
-0x3c020001, 0x8c426d10, 0x3c010001, 0xac306eec,
-0xae020004, 0xac100250, 0x8fbf0028, 0x8fbe0024,
-0x8fb50020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
-0x8fb00010, 0x3e00008, 0x27bd0030, 0x851821,
-0x83102b, 0x10400006, 0x0, 0xac800000,
-0x24840004, 0x83102b, 0x5440fffd, 0xac800000,
-0x3e00008, 0x0, 0xa61821, 0xa3102b,
-0x10400007, 0x0, 0x8c820000, 0xaca20000,
-0x24a50004, 0xa3102b, 0x1440fffb, 0x24840004,
-0x3e00008, 0x0, 0x861821, 0x83102b,
-0x10400007, 0x0, 0x8ca20000, 0xac820000,
-0x24840004, 0x83102b, 0x1440fffb, 0x24a50004,
-0x3e00008, 0x0, 0x63080, 0x861821,
-0x83102b, 0x10400006, 0x0, 0xac850000,
-0x24840004, 0x83102b, 0x5440fffd, 0xac850000,
-0x3e00008, 0x0, 0x0, 0x26e50028,
-0xa03021, 0x274301c0, 0x8f4d0358, 0x8f47035c,
-0x8f480360, 0x8f490364, 0x8f4a0368, 0x8f4b0204,
-0x8f4c0200, 0x24640400, 0x64102b, 0x10400008,
-0x3c0208ff, 0x8cc20000, 0xac620000, 0x24630004,
-0x64102b, 0x1440fffb, 0x24c60004, 0x3c0208ff,
-0x3442ffff, 0x3c03c0ff, 0xaf4d0358, 0xaf47035c,
-0xaf480360, 0xaf490364, 0xaf4a0368, 0xaf4b0204,
-0xaf4c0200, 0x8f840220, 0x3463ffff, 0x8f860200,
-0x821024, 0x34420004, 0xc31824, 0x34630004,
-0xaf820220, 0xaf830200, 0x8ca20214, 0xac020084,
-0x8ca20218, 0xac020088, 0x8ca2021c, 0xac02008c,
-0x8ca20220, 0xac020090, 0x8ca20224, 0xac020094,
-0x8ca20228, 0xac020098, 0x8ca2022c, 0xac02009c,
-0x8ca20230, 0xac0200a0, 0x8ca20234, 0xac0200a4,
-0x8ca20238, 0xac0200a8, 0x8ca2023c, 0xac0200ac,
-0x8ca20240, 0xac0200b0, 0x8ca20244, 0xac0200b4,
-0x8ca20248, 0xac0200b8, 0x8ca2024c, 0xac0200bc,
-0x8ca2001c, 0xac020080, 0x8ca20018, 0xac0200c0,
-0x8ca20020, 0xac0200cc, 0x8ca20024, 0xac0200d0,
-0x8ca201d0, 0xac0200e0, 0x8ca201d4, 0xac0200e4,
-0x8ca201d8, 0xac0200e8, 0x8ca201dc, 0xac0200ec,
-0x8ca201e0, 0xac0200f0, 0x8ca20098, 0x8ca3009c,
-0xac0300fc, 0x8ca200a8, 0x8ca300ac, 0xac0300f4,
-0x8ca200a0, 0x8ca300a4, 0x30840004, 0xac0300f8,
-0x14800007, 0x30c20004, 0x8f820220, 0x3c0308ff,
-0x3463fffb, 0x431024, 0xaf820220, 0x30c20004,
-0x14400006, 0x0, 0x8f820200, 0x3c03c0ff,
-0x3463fffb, 0x431024, 0xaf820200, 0x8f4202dc,
-0xa34005c5, 0x24420001, 0xaf4202dc, 0x8f4202dc,
-0x3e00008, 0x0, 0x27bdffd8, 0xafbf0024,
-0xafb00020, 0x8f430024, 0x8f420020, 0x10620038,
-0x0, 0x8f430020, 0x8f420024, 0x622023,
-0x4810003, 0x0, 0x8f420040, 0x822021,
-0x8f430030, 0x8f420024, 0x43102b, 0x14400005,
-0x0, 0x8f430040, 0x8f420024, 0x10000005,
-0x621023, 0x8f420030, 0x8f430024, 0x431023,
-0x2442ffff, 0x406021, 0x8c102a, 0x54400001,
-0x806021, 0x8f4a0024, 0x8f490040, 0x8f480024,
-0x8f440180, 0x8f450184, 0x8f460024, 0x8f4b001c,
-0x24070001, 0xafa70010, 0x84100, 0x1001821,
-0x14c5021, 0x2529ffff, 0x1498024, 0xafb00014,
-0x8f470014, 0x1021, 0x63100, 0xafa70018,
-0xa32821, 0xa3382b, 0x822021, 0x872021,
-0x8f420108, 0x1663021, 0x40f809, 0xc3900,
-0x54400001, 0xaf500024, 0x8f430024, 0x8f420020,
-0x14620018, 0x0, 0x8f420000, 0x10400007,
-0x0, 0xaf80004c, 0x8f82004c, 0x1040fffd,
-0x0, 0x10000005, 0x0, 0xaf800048,
-0x8f820048, 0x1040fffd, 0x0, 0x8f820060,
-0x2403ffef, 0x431024, 0xaf820060, 0x8f420000,
-0x10400003, 0x0, 0x10000002, 0xaf80004c,
-0xaf800048, 0x8fbf0024, 0x8fb00020, 0x3e00008,
-0x27bd0028, 0x3e00008, 0x0, 0x27bdffc0,
-0x32c20020, 0xafbf0038, 0xafb30034, 0xafb20030,
-0xafb1002c, 0x10400004, 0xafb00028, 0x8f530028,
-0x10000002, 0x0, 0x8f530020, 0x8f420030,
-0x105300eb, 0x21100, 0x8f43001c, 0x628021,
-0x8e040000, 0x8e050004, 0x96120008, 0x8f420090,
-0x9611000a, 0x3246ffff, 0x46102a, 0x10400017,
-0x0, 0x8f8200d8, 0x8f430098, 0x431023,
-0x2442dcbe, 0xaf420090, 0x8f420090, 0x2842dcbf,
-0x10400005, 0x0, 0x8f420090, 0x8f430144,
-0x431021, 0xaf420090, 0x8f420090, 0x46102a,
-0x10400006, 0x0, 0x8f420348, 0x24420001,
-0xaf420348, 0x100000e1, 0x8f420348, 0x8f8200fc,
-0x14400006, 0x0, 0x8f420344, 0x24420001,
-0xaf420344, 0x100000d9, 0x8f420344, 0x934205c2,
-0x1040000b, 0x32c20008, 0x10400008, 0x32220200,
-0x10400006, 0x3c034000, 0x9602000e, 0xaf4300ac,
-0x21400, 0x10000002, 0xaf4200b0, 0xaf4000ac,
-0x32220004, 0x1040007f, 0x32220800, 0x10400003,
-0x3247ffff, 0x10000002, 0x24020020, 0x24020004,
-0xafa20010, 0x8f420030, 0xafa20014, 0x8f420010,
-0x3c030002, 0x431025, 0xafa20018, 0x8f460098,
-0x8f420108, 0x40f809, 0x0, 0x104000b7,
-0x0, 0x8f42009c, 0x8f430094, 0x2421021,
-0xaf42009c, 0xae03000c, 0x8f4200ac, 0x10400008,
-0x3c034000, 0x8f420094, 0x431025, 0xafa20020,
-0x8f42009c, 0x8f4300b0, 0x10000004, 0x431025,
-0x8f420094, 0xafa20020, 0x8f42009c, 0xafa20024,
-0x8f8200fc, 0x8fa30020, 0x8fa40024, 0xac430000,
-0xac440004, 0x24420008, 0xaf8200f0, 0x8f42009c,
-0x8f440270, 0x8f450274, 0x401821, 0x1021,
-0xa32821, 0xa3302b, 0x822021, 0x862021,
-0x32230060, 0x24020040, 0xaf440270, 0xaf450274,
-0x10620017, 0x2c620041, 0x10400005, 0x24020020,
-0x10620008, 0x24020001, 0x10000026, 0x0,
-0x24020060, 0x10620019, 0x24020001, 0x10000021,
-0x0, 0x8f420278, 0x8f43027c, 0x24630001,
-0x2c640001, 0x441021, 0xaf420278, 0xaf43027c,
-0x8f420278, 0x8f43027c, 0x10000016, 0x24020001,
-0x8f420280, 0x8f430284, 0x24630001, 0x2c640001,
-0x441021, 0xaf420280, 0xaf430284, 0x8f420280,
-0x8f430284, 0x1000000b, 0x24020001, 0x8f420288,
-0x8f43028c, 0x24630001, 0x2c640001, 0x441021,
-0xaf420288, 0xaf43028c, 0x8f420288, 0x8f43028c,
-0x24020001, 0xa34205c2, 0x8f420098, 0x3244ffff,
-0x2406fff8, 0x8f45013c, 0x441021, 0x24420007,
-0x461024, 0x24840007, 0xaf420094, 0x8f420090,
-0x8f430094, 0x862024, 0x441023, 0x65182b,
-0x14600005, 0xaf420090, 0x8f420094, 0x8f430144,
-0x431023, 0xaf420094, 0x8f420094, 0x10000023,
-0xaf40009c, 0x3247ffff, 0x50e00022, 0x32c20020,
-0x14400002, 0x24020010, 0x24020002, 0xafa20010,
-0x8f420030, 0xafa20014, 0x8f420010, 0xafa20018,
-0x8f460098, 0x8f420108, 0x40f809, 0x0,
-0x1040003a, 0x3245ffff, 0x8f420098, 0x8f430090,
-0x8f46013c, 0x451021, 0xaf420098, 0x8f42009c,
-0x8f440098, 0xa34005c2, 0x651823, 0xaf430090,
-0x451021, 0x86202b, 0x14800005, 0xaf42009c,
-0x8f420098, 0x8f430144, 0x431023, 0xaf420098,
-0x32c20020, 0x10400005, 0x0, 0x8f420358,
-0x2442ffff, 0xaf420358, 0x8f420358, 0x8f420030,
-0x8f430040, 0x24420001, 0x2463ffff, 0x431024,
-0xaf420030, 0x8f420030, 0x14530018, 0x0,
-0x8f420000, 0x10400007, 0x0, 0xaf80004c,
-0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
-0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
-0x0, 0x8f820060, 0x2403fff7, 0x431024,
-0xaf820060, 0x8f420000, 0x10400003, 0x0,
-0x10000002, 0xaf80004c, 0xaf800048, 0x8fbf0038,
-0x8fb30034, 0x8fb20030, 0x8fb1002c, 0x8fb00028,
-0x3e00008, 0x27bd0040, 0x3e00008, 0x0,
-0x27bdffd0, 0x32c20020, 0xafbf002c, 0xafb20028,
-0xafb10024, 0x10400004, 0xafb00020, 0x8f520028,
-0x10000002, 0x0, 0x8f520020, 0x8f420030,
-0x105200b5, 0x21100, 0x8f43001c, 0x628021,
-0x8e040000, 0x8e050004, 0x96110008, 0x8f420090,
-0x9607000a, 0x3226ffff, 0x46102a, 0x10400017,
-0x0, 0x8f8200d8, 0x8f430098, 0x431023,
-0x2442dc46, 0xaf420090, 0x8f420090, 0x2842dc47,
-0x10400005, 0x0, 0x8f420090, 0x8f430144,
-0x431021, 0xaf420090, 0x8f420090, 0x46102a,
-0x10400006, 0x0, 0x8f420348, 0x24420001,
-0xaf420348, 0x100000ab, 0x8f420348, 0x8f8600fc,
-0x10c0000c, 0x0, 0x8f8200f4, 0x2403fff8,
-0x431024, 0x461023, 0x218c3, 0x58600001,
-0x24630100, 0x8f42008c, 0x43102b, 0x14400006,
-0x712c2, 0x8f420344, 0x24420001, 0xaf420344,
-0x10000098, 0x8f420344, 0x934305c2, 0x1060000f,
-0x30460001, 0x8f420010, 0x34480400, 0x32c20008,
-0x10400008, 0x30e20200, 0x10400006, 0x3c034000,
-0x9602000e, 0xaf4300ac, 0x21400, 0x10000004,
-0xaf4200b0, 0x10000002, 0xaf4000ac, 0x8f480010,
-0x30e20004, 0x10400045, 0x3227ffff, 0x8f4900ac,
-0x11200005, 0x30c200ff, 0x14400006, 0x24020040,
-0x10000004, 0x24020008, 0x14400002, 0x24020020,
-0x24020004, 0xafa20010, 0x8f430030, 0x11200004,
-0xafa30014, 0x8f4200b0, 0x621025, 0xafa20014,
-0x3c020002, 0x1021025, 0xafa20018, 0x8f460098,
-0x8f420108, 0x40f809, 0x0, 0x10400069,
-0x3224ffff, 0x8f42008c, 0x8f430094, 0x24420001,
-0xaf42008c, 0x24020001, 0xae03000c, 0xa34205c2,
-0x8f420098, 0x2406fff8, 0x8f45013c, 0x441021,
-0x24420007, 0x461024, 0x24840007, 0xaf420094,
-0x8f420090, 0x8f430094, 0x862024, 0x441023,
-0x65182b, 0x14600005, 0xaf420090, 0x8f420094,
-0x8f430144, 0x431023, 0xaf420094, 0x8f430094,
-0x8f420140, 0x43102b, 0x10400009, 0x0,
-0x8f43013c, 0x8f440094, 0x8f420090, 0x8f450138,
-0x641823, 0x431023, 0xaf420090, 0xaf450094,
-0x8f420094, 0x1000001f, 0xaf420098, 0x10e0001d,
-0x30c200ff, 0x14400002, 0x24020010, 0x24020002,
-0xafa20010, 0x8f420030, 0xafa80018, 0xafa20014,
-0x8f460098, 0x8f420108, 0x40f809, 0x0,
-0x10400030, 0x3225ffff, 0x8f420098, 0x8f44013c,
-0x451021, 0xaf420098, 0x8f420090, 0x8f430098,
-0xa34005c2, 0x451023, 0x64182b, 0x14600005,
-0xaf420090, 0x8f420098, 0x8f430144, 0x431023,
-0xaf420098, 0x8f420030, 0x8f430040, 0x24420001,
-0x2463ffff, 0x431024, 0xaf420030, 0x8f420030,
-0x14520018, 0x0, 0x8f420000, 0x10400007,
-0x0, 0xaf80004c, 0x8f82004c, 0x1040fffd,
-0x0, 0x10000005, 0x0, 0xaf800048,
-0x8f820048, 0x1040fffd, 0x0, 0x8f820060,
-0x2403fff7, 0x431024, 0xaf820060, 0x8f420000,
-0x10400003, 0x0, 0x10000002, 0xaf80004c,
-0xaf800048, 0x8fbf002c, 0x8fb20028, 0x8fb10024,
-0x8fb00020, 0x3e00008, 0x27bd0030, 0x3e00008,
-0x0, 0x27bdffd8, 0x3c020001, 0x34422ec0,
-0xafbf0020, 0x8f4300f0, 0x8f840108, 0x2e21021,
-0x54620004, 0x24620008, 0x3c020001, 0x34422cc0,
-0x2e21021, 0x401821, 0xaf4300f0, 0xac600000,
-0x8f4200ec, 0x8c660004, 0x14620004, 0x3c020001,
-0x24820020, 0x1000000f, 0xaf820108, 0x8f4300f0,
-0x34422ec0, 0x2e21021, 0x54620004, 0x24620008,
-0x3c020001, 0x34422cc0, 0x2e21021, 0x401821,
-0x8c620004, 0x21140, 0x821021, 0xaf820108,
-0xac600000, 0x8c850018, 0x30a20036, 0x1040006c,
-0x30a20001, 0x8c82001c, 0x8f430040, 0x8f440034,
-0x24420001, 0x2463ffff, 0x431024, 0x862021,
-0xaf42002c, 0x30a20030, 0x14400006, 0xaf440034,
-0x8f420034, 0x8c03023c, 0x43102b, 0x144000b4,
-0x0, 0x32c20010, 0x10400028, 0x24070008,
-0x8f440170, 0x8f450174, 0x8f43002c, 0x8f48000c,
-0x8f860120, 0x24020080, 0xafa20010, 0xafa30014,
-0xafa80018, 0x8f42010c, 0x40f809, 0x24c6001c,
-0x14400011, 0x24020001, 0x3c010001, 0x370821,
-0xa02240f1, 0x8f820124, 0xafa20010, 0x8f820128,
-0x3c040001, 0x248467c4, 0xafa20014, 0x8f46002c,
-0x8f870120, 0x3c050009, 0xc002b3b, 0x34a51100,
-0x10000036, 0x0, 0x8f420300, 0x8f43002c,
-0x24420001, 0xaf420300, 0x8f420300, 0x24020001,
-0xa34205c1, 0x10000026, 0xaf430038, 0x8f440170,
-0x8f450174, 0x8f43002c, 0x8f48000c, 0x8f860120,
-0x24020020, 0xafa20010, 0xafa30014, 0xafa80018,
-0x8f42010c, 0x40f809, 0x24c6001c, 0x14400011,
-0x24020001, 0x3c010001, 0x370821, 0xa02240f0,
-0x8f820124, 0xafa20010, 0x8f820128, 0x3c040001,
-0x248467b8, 0xafa20014, 0x8f46002c, 0x8f870120,
-0x3c050009, 0xc002b3b, 0x34a50900, 0x1000000f,
-0x0, 0x8f420300, 0x24420001, 0xaf420300,
-0x8f420300, 0x8f42002c, 0xa34005c1, 0xaf420038,
-0x3c010001, 0x370821, 0xa02040f1, 0x3c010001,
-0x370821, 0xa02040f0, 0xaf400034, 0x8f420314,
-0x24420001, 0xaf420314, 0x10000059, 0x8f420314,
-0x10400022, 0x30a27000, 0x8c85001c, 0x8f420028,
-0xa22023, 0x4810003, 0x0, 0x8f420040,
-0x822021, 0x8f420358, 0x8f430000, 0xaf450028,
-0x441021, 0x10600007, 0xaf420358, 0xaf80004c,
-0x8f82004c, 0x1040fffd, 0x0, 0x10000005,
-0x0, 0xaf800048, 0x8f820048, 0x1040fffd,
-0x0, 0x8f820060, 0x34420008, 0xaf820060,
-0x8f420000, 0x10400003, 0x0, 0x10000038,
-0xaf80004c, 0x10000036, 0xaf800048, 0x1040002f,
-0x30a21000, 0x1040000c, 0x30a24000, 0x8c83001c,
-0x8f420050, 0x622023, 0x4820001, 0x24840200,
-0x8f42035c, 0x441021, 0xaf42035c, 0x8f420368,
-0x1000001a, 0xaf430050, 0x1040000c, 0x32c28000,
-0x8c83001c, 0x8f420070, 0x622023, 0x4820001,
-0x24840400, 0x8f420364, 0x441021, 0xaf420364,
-0x8f420368, 0x1000000d, 0xaf430070, 0x1040000e,
-0x3c020800, 0x8c83001c, 0x8f420060, 0x622023,
-0x4820001, 0x24840100, 0x8f420360, 0x441021,
-0xaf420360, 0x8f420368, 0xaf430060, 0x441021,
-0xaf420368, 0x3c020800, 0x2c21024, 0x50400008,
-0x36940040, 0x10000006, 0x0, 0x30a20100,
-0x10400003, 0x0, 0xc002bd8, 0x0,
-0x8fbf0020, 0x3e00008, 0x27bd0028, 0x3e00008,
-0x0, 0x27bdffa8, 0xafbf0050, 0xafbe004c,
-0xafb50048, 0xafb30044, 0xafb20040, 0xafb1003c,
-0xafb00038, 0x8f910108, 0x26220020, 0xaf820108,
-0x8e320018, 0xa821, 0x32420024, 0x104001ba,
-0xf021, 0x8e26001c, 0x8f43001c, 0x61100,
-0x621821, 0x8c70000c, 0x9604000c, 0x962d0016,
-0x9473000a, 0x2c8305dd, 0x38828870, 0x2c420001,
-0x621825, 0x10600015, 0x2821, 0x32c20040,
-0x10400015, 0x24020800, 0x96030014, 0x14620012,
-0x3402aaaa, 0x9603000e, 0x14620007, 0x2021,
-0x96030010, 0x24020300, 0x14620004, 0x801021,
-0x96020012, 0x2c440001, 0x801021, 0x54400006,
-0x24050016, 0x10000004, 0x0, 0x24020800,
-0x50820001, 0x2405000e, 0x934205c3, 0x14400008,
-0x5821, 0x240b0001, 0x32620180, 0xaf4500a8,
-0xaf5000a0, 0x10400002, 0xaf4600a4, 0xa34b05c3,
-0x10a00085, 0x2054021, 0x91020000, 0x3821,
-0x3042000f, 0x25080, 0x32c20002, 0x10400012,
-0x10a1821, 0x32620002, 0x10400010, 0x32c20001,
-0x1002021, 0x94820000, 0x24840002, 0xe23821,
-0x83102b, 0x1440fffb, 0x30e2ffff, 0x71c02,
-0x623821, 0x71c02, 0x30e2ffff, 0x623821,
-0x71027, 0xa502000a, 0x32c20001, 0x1040006a,
-0x32620001, 0x10400068, 0x0, 0x8f4200a8,
-0x10400065, 0x0, 0x8f4200a0, 0x8f4300a8,
-0x431021, 0x904c0009, 0x318900ff, 0x39230006,
-0x3182b, 0x39220011, 0x2102b, 0x621824,
-0x1060000c, 0x3c050006, 0x8f4200a4, 0x3c040001,
-0x248467d4, 0xafa20010, 0x8f4200a0, 0x34a54600,
-0x1203821, 0xc002b3b, 0xafa20014, 0x1000004e,
-0x0, 0x32c20004, 0x14400013, 0x2821,
-0x316200ff, 0x14400004, 0x0, 0x95020002,
-0x1000000d, 0x4a2823, 0x9505000c, 0x9502000e,
-0x95030010, 0xa22821, 0xa32821, 0x95030012,
-0x91040009, 0x95020002, 0xa32821, 0xa42821,
-0x4a1023, 0xa22821, 0x2002021, 0x94820000,
-0x24840002, 0xe23821, 0x88102b, 0x1440fffb,
-0x71c02, 0x30e2ffff, 0x623821, 0x71c02,
-0x30e2ffff, 0x623821, 0x1a52821, 0x51c02,
-0x30a2ffff, 0x622821, 0x51c02, 0x30a2ffff,
-0x622821, 0xa72823, 0x51402, 0xa22821,
-0x30a5ffff, 0x50a00001, 0x3405ffff, 0x316200ff,
-0x14400008, 0x318300ff, 0x8f4300a0, 0x8f4200a8,
-0x624021, 0x91020000, 0x3042000f, 0x25080,
-0x318300ff, 0x24020006, 0x14620003, 0x10a1021,
-0x10000002, 0x24440010, 0x24440006, 0x316200ff,
-0x14400006, 0x0, 0x94820000, 0xa22821,
-0x51c02, 0x30a2ffff, 0x622821, 0x934205c3,
-0x10400003, 0x32620100, 0x50400003, 0xa4850000,
-0x52827, 0xa4850000, 0x9622000e, 0x8f43009c,
-0x621821, 0x32a200ff, 0x10400007, 0xaf43009c,
-0x3c024000, 0x2021025, 0xafa20020, 0x8f42009c,
-0x10000003, 0x5e1025, 0xafb00020, 0x8f42009c,
-0xafa20024, 0x32620080, 0x10400010, 0x32620100,
-0x8f4200b4, 0x24430001, 0x210c0, 0x571021,
-0xaf4300b4, 0x8fa30020, 0x8fa40024, 0x3c010001,
-0x220821, 0xac2338e8, 0x3c010001, 0x220821,
-0xac2438ec, 0x100000a5, 0x32c20020, 0x10400064,
-0x0, 0x8f4200b4, 0x24430001, 0x210c0,
-0x571021, 0xaf4300b4, 0x8fa30020, 0x8fa40024,
-0x3c010001, 0x220821, 0xac2338e8, 0x3c010001,
-0x220821, 0xac2438ec, 0x8f4200b4, 0x10400051,
-0x3821, 0x3c090001, 0x352938e8, 0x3c08001f,
-0x3508ffff, 0x240bffff, 0x340affff, 0x710c0,
-0x571021, 0x491021, 0x8c430000, 0x8c440004,
-0xafa30028, 0xafa4002c, 0x8f8200fc, 0x8fa30028,
-0x8fa4002c, 0xac430000, 0xac440004, 0x24420008,
-0xaf8200f0, 0x8f42008c, 0x2442ffff, 0xaf42008c,
-0x97a2002e, 0x8f440270, 0x8f450274, 0x401821,
-0x1021, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xaf440270, 0xaf450274, 0x8fa20028,
-0x481024, 0x90430000, 0x30630001, 0x1460000b,
-0x402021, 0x8f420278, 0x8f43027c, 0x24630001,
-0x2c640001, 0x441021, 0xaf420278, 0xaf43027c,
-0x8f420278, 0x1000001a, 0x8f43027c, 0x8c820000,
-0x144b000e, 0x0, 0x94820004, 0x144a000b,
-0x0, 0x8f420288, 0x8f43028c, 0x24630001,
-0x2c640001, 0x441021, 0xaf420288, 0xaf43028c,
-0x8f420288, 0x1000000a, 0x8f43028c, 0x8f420280,
-0x8f430284, 0x24630001, 0x2c640001, 0x441021,
-0xaf420280, 0xaf430284, 0x8f420280, 0x8f430284,
-0x8f4200b4, 0x24e70001, 0xe2102b, 0x1440ffb8,
-0x710c0, 0xa34005c3, 0x1000003f, 0xaf4000b4,
-0x8f8200fc, 0x8fa30020, 0x8fa40024, 0xac430000,
-0xac440004, 0x24420008, 0xaf8200f0, 0x8f42009c,
-0x8f46008c, 0x8f440270, 0x8f450274, 0x401821,
-0x1021, 0x24c6ffff, 0xaf46008c, 0xa32821,
-0xa3302b, 0x822021, 0x862021, 0xaf440270,
-0xaf450274, 0x92020000, 0x30420001, 0x1440000c,
-0x2402ffff, 0x8f420278, 0x8f43027c, 0x24630001,
-0x2c640001, 0x441021, 0xaf420278, 0xaf43027c,
-0x8f420278, 0x8f43027c, 0x1000001c, 0x32c20020,
-0x8e030000, 0x1462000f, 0x3402ffff, 0x96030004,
-0x1462000c, 0x0, 0x8f420288, 0x8f43028c,
-0x24630001, 0x2c640001, 0x441021, 0xaf420288,
-0xaf43028c, 0x8f420288, 0x8f43028c, 0x1000000b,
-0x32c20020, 0x8f420280, 0x8f430284, 0x24630001,
-0x2c640001, 0x441021, 0xaf420280, 0xaf430284,
-0x8f420280, 0x8f430284, 0x32c20020, 0x10400005,
-0xaf40009c, 0x8f420358, 0x2442ffff, 0xaf420358,
-0x8f420358, 0x8e22001c, 0x8f430040, 0x24420001,
-0x2463ffff, 0x431024, 0xaf42002c, 0x32420060,
-0x14400008, 0x32c20010, 0x8f420034, 0x24420001,
-0xaf420034, 0x8c03023c, 0x43102b, 0x14400102,
-0x32c20010, 0x10400018, 0x24070008, 0x8f440170,
-0x8f450174, 0x8f43002c, 0x8f48000c, 0x8f860120,
-0x24020080, 0xafa20010, 0xafa30014, 0xafa80018,
-0x8f42010c, 0x40f809, 0x24c6001c, 0x10400047,
-0x24020001, 0x8f420300, 0x8f43002c, 0x24420001,
-0xaf420300, 0x8f420300, 0x24020001, 0xa34205c1,
-0x1000007c, 0xaf430038, 0x8f440170, 0x8f450174,
-0x8f43002c, 0x8f48000c, 0x8f860120, 0x24020020,
-0xafa20010, 0xafa30014, 0xafa80018, 0x8f42010c,
-0x40f809, 0x24c6001c, 0x10400057, 0x24020001,
-0x10000065, 0x0, 0x32420012, 0x10400075,
-0x32420001, 0x9622000e, 0x8f43009c, 0x621821,
-0x32c20020, 0x10400005, 0xaf43009c, 0x8f420358,
-0x2442ffff, 0xaf420358, 0x8f420358, 0x8e22001c,
-0x8f430040, 0x24420001, 0x2463ffff, 0x431024,
-0xaf42002c, 0x32420010, 0x14400008, 0x32c20010,
-0x8f420034, 0x24420001, 0xaf420034, 0x8c03023c,
-0x43102b, 0x144000bc, 0x32c20010, 0x10400028,
-0x24070008, 0x8f440170, 0x8f450174, 0x8f43002c,
-0x8f48000c, 0x8f860120, 0x24020080, 0xafa20010,
-0xafa30014, 0xafa80018, 0x8f42010c, 0x40f809,
-0x24c6001c, 0x14400011, 0x24020001, 0x3c010001,
-0x370821, 0xa02240f1, 0x8f820124, 0xafa20010,
-0x8f820128, 0x3c040001, 0x248467c4, 0xafa20014,
-0x8f46002c, 0x8f870120, 0x3c050009, 0xc002b3b,
-0x34a51100, 0x10000036, 0x0, 0x8f420300,
-0x8f43002c, 0x24420001, 0xaf420300, 0x8f420300,
-0x24020001, 0xa34205c1, 0x10000026, 0xaf430038,
-0x8f440170, 0x8f450174, 0x8f43002c, 0x8f48000c,
-0x8f860120, 0x24020020, 0xafa20010, 0xafa30014,
-0xafa80018, 0x8f42010c, 0x40f809, 0x24c6001c,
-0x14400011, 0x24020001, 0x3c010001, 0x370821,
-0xa02240f0, 0x8f820124, 0xafa20010, 0x8f820128,
-0x3c040001, 0x248467b8, 0xafa20014, 0x8f46002c,
-0x8f870120, 0x3c050009, 0xc002b3b, 0x34a50900,
-0x1000000f, 0x0, 0x8f420300, 0x24420001,
-0xaf420300, 0x8f420300, 0x8f42002c, 0xa34005c1,
-0xaf420038, 0x3c010001, 0x370821, 0xa02040f1,
-0x3c010001, 0x370821, 0xa02040f0, 0xaf400034,
-0x8f420314, 0x24420001, 0xaf420314, 0x10000062,
-0x8f420314, 0x10400022, 0x32427000, 0x8e25001c,
-0x8f420028, 0xa22023, 0x4810003, 0x0,
-0x8f420040, 0x822021, 0x8f420358, 0x8f430000,
-0xaf450028, 0x441021, 0x10600007, 0xaf420358,
-0xaf80004c, 0x8f82004c, 0x1040fffd, 0x0,
-0x10000005, 0x0, 0xaf800048, 0x8f820048,
-0x1040fffd, 0x0, 0x8f820060, 0x34420008,
-0xaf820060, 0x8f420000, 0x10400003, 0x0,
-0x10000041, 0xaf80004c, 0x1000003f, 0xaf800048,
-0x1040002f, 0x32421000, 0x1040000c, 0x32424000,
-0x8e23001c, 0x8f420050, 0x622023, 0x4820001,
-0x24840200, 0x8f42035c, 0x441021, 0xaf42035c,
-0x8f420368, 0x1000001a, 0xaf430050, 0x1040000c,
-0x32c28000, 0x8e23001c, 0x8f420070, 0x622023,
-0x4820001, 0x24840400, 0x8f420364, 0x441021,
-0xaf420364, 0x8f420368, 0x1000000d, 0xaf430070,
-0x1040000e, 0x3c020800, 0x8e23001c, 0x8f420060,
-0x622023, 0x4820001, 0x24840100, 0x8f420360,
-0x441021, 0xaf420360, 0x8f420368, 0xaf430060,
-0x441021, 0xaf420368, 0x3c020800, 0x2c21024,
-0x50400011, 0x36940040, 0x1000000f, 0x0,
-0x32420048, 0x10400007, 0x24150001, 0x8e22001c,
-0x3c03ffff, 0x43f024, 0x3042ffff, 0x1000fd75,
-0xae22001c, 0x32420100, 0x10400003, 0x0,
-0xc002bd8, 0x0, 0x8fbf0050, 0x8fbe004c,
-0x8fb50048, 0x8fb30044, 0x8fb20040, 0x8fb1003c,
-0x8fb00038, 0x3e00008, 0x27bd0058, 0x3e00008,
-0x0, 0x0, 0x0, 0x8f8300e4,
-0x8f8200e0, 0x2404fff8, 0x441024, 0x621026,
-0x2102b, 0x21023, 0x3e00008, 0x621024,
-0x3e00008, 0x0, 0x27bdffe0, 0xafbf001c,
-0xafb00018, 0x8f8600c4, 0x8f8400e0, 0x8f8500e4,
-0x2402fff8, 0x821824, 0x10a30009, 0x27623ff8,
-0x14a20002, 0x24a20008, 0x27623000, 0x408021,
-0x16030005, 0x30820004, 0x10400004, 0xc02021,
-0x10000022, 0x1021, 0x8e040000, 0x8f42011c,
-0x14a20003, 0x0, 0x8f420120, 0xaf420114,
-0x8ca30000, 0x8f420148, 0x831823, 0x43102b,
-0x10400003, 0x0, 0x8f420148, 0x621821,
-0x94a20006, 0x24420050, 0x62102b, 0x1440000f,
-0xa01021, 0xafa40010, 0xafa30014, 0x8ca60000,
-0x8ca70004, 0x3c040001, 0xc002b3b, 0x24846894,
-0x8f42020c, 0x24420001, 0xaf42020c, 0x8f42020c,
-0x1021, 0xaf9000e8, 0xaf9000e4, 0x8fbf001c,
-0x8fb00018, 0x3e00008, 0x27bd0020, 0x3e00008,
-0x0, 0x8f8400e0, 0x8f8800c4, 0x8f8300e8,
-0x2402fff8, 0x823824, 0xe32023, 0x2c821000,
-0x50400001, 0x24841000, 0x420c2, 0x801821,
-0x8f440258, 0x8f45025c, 0x1021, 0xa32821,
-0xa3302b, 0x822021, 0x862021, 0xaf440258,
-0xaf45025c, 0x8f8300c8, 0x8f420148, 0x1032023,
-0x82102b, 0x14400004, 0x801821, 0x8f420148,
-0x822021, 0x801821, 0x8f440250, 0x8f450254,
-0x1021, 0xa32821, 0xa3302b, 0x822021,
-0x862021, 0xaf440250, 0xaf450254, 0xaf8800c8,
-0xaf8700e4, 0xaf8700e8, 0x3e00008, 0x0,
-0x27bdff30, 0x240a0001, 0xafbf00c8, 0xafbe00c4,
-0xafb500c0, 0xafb300bc, 0xafb200b8, 0xafb100b4,
-0xafb000b0, 0xa3a00097, 0xafa00044, 0xafaa005c,
-0x934205c4, 0xa7a0008e, 0x1040000a, 0xa7a00086,
-0x8f4b00c4, 0xafab0064, 0x8f4a00c0, 0xafaa006c,
-0x8f4b00cc, 0xafab0074, 0x8f4a00c8, 0x10000129,
-0xafaa007c, 0x8f420114, 0x40f809, 0x0,
-0x403021, 0x10c0034f, 0x0, 0x8cc20000,
-0x8cc30004, 0xafa20020, 0xafa30024, 0x8fab0024,
-0x8faa0020, 0x3162ffff, 0x2442fffc, 0xafa2006c,
-0x3c020006, 0x2c21024, 0xafab007c, 0x14400015,
-0xafaa0064, 0x91420000, 0x30420001, 0x10400011,
-0x2402ffff, 0x8d430000, 0x14620004, 0x3402ffff,
-0x95430004, 0x1062000b, 0x0, 0xc0024bb,
-0x8fa40064, 0x304200ff, 0x14400006, 0x0,
-0x8f420118, 0x40f809, 0x0, 0x1000032d,
-0x0, 0x8fa20024, 0x3c03ffbf, 0x3463ffff,
-0x431024, 0x3c03ffff, 0x431824, 0x14600003,
-0xafa20024, 0x10000040, 0x1821, 0x3c020080,
-0x621024, 0x10400007, 0x0, 0x8f42038c,
-0x24420001, 0xaf42038c, 0x8f42038c, 0x10000036,
-0x24030001, 0x8f420210, 0x24420001, 0xaf420210,
-0x8f420210, 0x3c020001, 0x621024, 0x10400006,
-0x3c020002, 0x8f4201c4, 0x24420001, 0xaf4201c4,
-0x8f4201c4, 0x3c020002, 0x621024, 0x10400006,
-0x3c020004, 0x8f42037c, 0x24420001, 0xaf42037c,
-0x8f42037c, 0x3c020004, 0x621024, 0x10400006,
-0x3c020008, 0x8f420380, 0x24420001, 0xaf420380,
-0x8f420380, 0x3c020008, 0x621024, 0x10400006,
-0x3c020010, 0x8f420384, 0x24420001, 0xaf420384,
-0x8f420384, 0x3c020010, 0x621024, 0x10400006,
-0x3c020020, 0x8f4201c0, 0x24420001, 0xaf4201c0,
-0x8f4201c0, 0x3c020020, 0x621024, 0x10400006,
-0x24030001, 0x8f420388, 0x24420001, 0xaf420388,
-0x8f420388, 0x24030001, 0x8c020260, 0x8fab006c,
-0x4b102b, 0x10400014, 0x307000ff, 0x8f4201e8,
-0x24420001, 0xaf4201e8, 0x8f4201e8, 0x8faa007c,
-0x8f8200e0, 0x354a0100, 0xafaa007c, 0xafa20010,
-0x8f8200e4, 0x24100001, 0x3c040001, 0x248468a0,
-0xafa20014, 0x8fa60020, 0x8fa70024, 0x3c050007,
-0xc002b3b, 0x34a50800, 0x12000010, 0x3c020080,
-0x2c21024, 0x1440000e, 0x32c20400, 0x8fab007c,
-0x3c020080, 0x34420100, 0x1621024, 0x10400005,
-0x0, 0x8f42020c, 0x24420001, 0xaf42020c,
-0x8f42020c, 0x100002b0, 0x8fa3006c, 0x32c20400,
-0x10400015, 0x34028100, 0x8faa0064, 0x9543000c,
-0x14620012, 0x3c020100, 0x240b0200, 0xa7ab008e,
-0x9542000e, 0x8d430008, 0x8d440004, 0x8d450000,
-0x8faa006c, 0x8fab0064, 0x254afffc, 0xafaa006c,
-0xa7a20086, 0xad63000c, 0xad640008, 0xad650004,
-0x256b0004, 0xafab0064, 0x3c020100, 0x2c21024,
-0x10400004, 0x0, 0x8faa006c, 0x254a0004,
-0xafaa006c, 0x8f4200bc, 0x5040000a, 0xafa00074,
-0x8fab006c, 0x4b102b, 0x50400006, 0xafa00074,
-0x8f4200bc, 0x1621023, 0xafa20074, 0x8f4a00bc,
-0xafaa006c, 0x8f420080, 0x8fab006c, 0x4b102b,
-0x10400056, 0x32c28000, 0x1040005e, 0x240a0003,
-0x32c21000, 0x1040005b, 0xafaa005c, 0x10000058,
-0x240b0004, 0x8f420350, 0x2403ffbf, 0x283a024,
-0x24420001, 0xaf420350, 0x1000024f, 0x8f420350,
-0x2c2b025, 0x2402ffbf, 0x282a024, 0x8f830128,
-0x3c040001, 0x248468d0, 0x26620001, 0xafa20014,
-0xafa30010, 0x8f860120, 0x8f870124, 0x3c050007,
-0xc002b3b, 0x34a52250, 0x1000023f, 0x0,
-0x2c2b025, 0x2402ffbf, 0x282a024, 0x8f830128,
-0x3c040001, 0x248468d0, 0x24020002, 0xafa20014,
-0xafa30010, 0x8f860120, 0x8f870124, 0x3c050007,
-0xc002b3b, 0x34a52450, 0x1000022f, 0x0,
-0x8ea20000, 0x8ea30004, 0x3c040001, 0x248468e8,
-0xafb00010, 0xafbe0014, 0x8ea70018, 0x34a52800,
-0xc002b3b, 0x603021, 0x10000223, 0x0,
-0xa6b1000a, 0x8f820124, 0x3c040001, 0x248468f0,
-0xafbe0014, 0xafa20010, 0x8f460044, 0x8f870120,
-0x3c050007, 0xc002b3b, 0x34a53000, 0x10000216,
-0x0, 0xa6b1000a, 0xa6b2000e, 0x8f820124,
-0x3c040001, 0x248468fc, 0xafbe0014, 0xafa20010,
-0x8f460044, 0x8f870120, 0x3c050007, 0xc002b3b,
-0x34a53200, 0x10000208, 0x0, 0x8f420084,
-0x8faa006c, 0x4a102b, 0x14400007, 0x3c020001,
-0x2c21024, 0x10400004, 0x0, 0x240b0002,
-0xafab005c, 0x8faa006c, 0x1140021b, 0x27ab0020,
-0xafab00a4, 0x3c0a001f, 0x354affff, 0xafaa009c,
-0x8fab005c, 0x240a0001, 0x556a0021, 0x240a0002,
-0x8f430054, 0x8f420050, 0x1062000b, 0x274b0054,
-0x8f5e0054, 0x3403ecc0, 0xafab004c, 0x27c20001,
-0x304201ff, 0xafa20054, 0x1e1140, 0x431021,
-0x1000006b, 0x2e2a821, 0x8f420044, 0x8faa006c,
-0x3c040001, 0x248468ac, 0xafaa0014, 0xafa20010,
-0x8f460054, 0x8f470050, 0x3c050007, 0xc002b3b,
-0x34a51300, 0x8f430350, 0x2402ffbf, 0x282a024,
-0x24630001, 0xaf430350, 0x100001d3, 0x8f420350,
-0x156a001d, 0x0, 0x8f430074, 0x8f420070,
-0x1062000a, 0x274b0074, 0x8f5e0074, 0xafab004c,
-0x27c20001, 0x304203ff, 0xafa20054, 0x1e1140,
-0x24426cc0, 0x1000004a, 0x2e2a821, 0x8f420044,
-0x8faa006c, 0x3c040001, 0x248468b8, 0x3c050007,
-0xafaa0014, 0xafa20010, 0x8f460074, 0x8f470070,
-0x34a51500, 0x240b0001, 0xc002b3b, 0xafab005c,
-0x1000ffc3, 0x0, 0x8f430064, 0x8f420060,
-0x1062001a, 0x274a0064, 0x8f5e0064, 0x8fab005c,
-0xafaa004c, 0x27c20001, 0x304200ff, 0xafa20054,
-0x24020004, 0x1562000e, 0x1e1140, 0x1e1180,
-0x24420cc0, 0x2e21021, 0xafa20044, 0x9442002a,
-0x8faa0044, 0x8fab006c, 0x4b102b, 0x10400024,
-0x25550020, 0x240a0001, 0x10000021, 0xa3aa0097,
-0x24424cc0, 0x1000001e, 0x2e2a821, 0x8f420044,
-0x8fab006c, 0x3c040001, 0x248468c4, 0xafab0014,
-0xafa20010, 0x8f460064, 0x8f470060, 0x3c050007,
-0xc002b3b, 0x34a51800, 0x3c020008, 0x2c21024,
-0x1440ff34, 0x0, 0x8f420370, 0x240a0001,
-0xafaa005c, 0x24420001, 0xaf420370, 0x1000ff90,
-0x8f420370, 0x27a30036, 0x131040, 0x621821,
-0x94620000, 0x441021, 0x10000020, 0xa4620000,
-0x8fab0064, 0xaeab0018, 0x93a20097, 0x10400072,
-0x9821, 0x8faa0044, 0x8fa4006c, 0x8fa300a4,
-0x25420020, 0xafa20028, 0x25420008, 0xafa20030,
-0x25420010, 0xafaa002c, 0xafa20034, 0x9542002a,
-0xa7a20038, 0x95420018, 0xa7a2003a, 0x9542001a,
-0xa7a2003c, 0x9542001c, 0xa7a2003e, 0x94620018,
-0x24630002, 0x822023, 0x1880ffde, 0x26730001,
-0x2e620004, 0x1440fff9, 0x0, 0x8f4200fc,
-0x26650001, 0xa2102a, 0x1440002b, 0x24030001,
-0x8f83012c, 0x10600023, 0x0, 0x8f820124,
-0x431023, 0x22143, 0x58800001, 0x24840040,
-0x8f820128, 0x431023, 0x21943, 0x58600001,
-0x24630040, 0x64102a, 0x54400001, 0x602021,
-0xaf4400fc, 0x8f4200fc, 0xa2102a, 0x10400011,
-0x24030001, 0x10000015, 0x306200ff, 0x8fab0064,
-0x96070018, 0xafab0010, 0x8e220008, 0x3c040001,
-0x248468dc, 0x8c430004, 0x8c420000, 0x34a52400,
-0x2403021, 0xc002b3b, 0xafa30014, 0x1000002b,
-0x0, 0x8f420334, 0x1821, 0x24420001,
-0xaf420334, 0x8f420334, 0x306200ff, 0x5040fedc,
-0x3c020800, 0x12600021, 0x9021, 0x8fb100a4,
-0x2208021, 0x8e220008, 0x96070018, 0x8fa60064,
-0x8c440000, 0x8c450004, 0x240a0001, 0xafaa0010,
-0xafbe0014, 0x8f420008, 0xafa20018, 0x8f42010c,
-0x40f809, 0x0, 0x1040ffd8, 0x3c050007,
-0x96020018, 0x8fab0064, 0x8faa009c, 0x1625821,
-0x14b102b, 0x10400004, 0xafab0064, 0x8f420148,
-0x1625823, 0xafab0064, 0x26100002, 0x26520001,
-0x253102b, 0x1440ffe3, 0x26310004, 0x8fb0006c,
-0x10000036, 0x97b10038, 0x8f4200fc, 0x24050002,
-0xa2102a, 0x1440001b, 0x24030001, 0x8f83012c,
-0x10600013, 0x0, 0x8f820124, 0x431023,
-0x22143, 0x58800001, 0x24840040, 0x8f820128,
-0x431023, 0x21943, 0x58600001, 0x24630040,
-0x64102a, 0x54400001, 0x602021, 0xaf4400fc,
-0x8f4200fc, 0xa2102a, 0x14400006, 0x24030001,
-0x8f420334, 0x1821, 0x24420001, 0xaf420334,
-0x8f420334, 0x306200ff, 0x1040fea5, 0x3c020800,
-0x96b1000a, 0x8fb0006c, 0x3223ffff, 0x70102b,
-0x54400001, 0x608021, 0x8ea40000, 0x8ea50004,
-0x240b0001, 0xafab0010, 0xafbe0014, 0x8f420008,
-0x8fa60064, 0xafa20018, 0x8f42010c, 0x40f809,
-0x2003821, 0x1040fea2, 0x3c050007, 0x96a3000e,
-0x97aa008e, 0x11400007, 0x609021, 0x934205c4,
-0x14400004, 0x0, 0x97ab0086, 0x6a1825,
-0xa6ab0016, 0x8faa007c, 0x3c02ffff, 0x1421024,
-0x10400003, 0xa1402, 0x34630400, 0xa6a20014,
-0x8fab006c, 0x560b0072, 0xa6a3000e, 0x34620004,
-0xa6a2000e, 0x8faa0074, 0x16a1021, 0xa6a2000a,
-0x8f430044, 0x8f4401a0, 0x8f4501a4, 0x34028000,
-0xafa20010, 0x8f420044, 0x2a03021, 0x24070020,
-0xafa20014, 0x8f42000c, 0x31940, 0x604821,
-0xafa20018, 0x8f42010c, 0x4021, 0xa92821,
-0xa9182b, 0x882021, 0x40f809, 0x832021,
-0x5040fe7f, 0xa6b2000e, 0x8f420368, 0xafa0006c,
-0xa34005c4, 0x2442ffff, 0xaf420368, 0x8fab005c,
-0x240a0001, 0x8f420368, 0x156a0006, 0x240a0002,
-0x8f42035c, 0x2442ffff, 0xaf42035c, 0x1000000c,
-0x8f42035c, 0x156a0006, 0x0, 0x8f420364,
-0x2442ffff, 0xaf420364, 0x10000005, 0x8f420364,
-0x8f420360, 0x2442ffff, 0xaf420360, 0x8f420360,
-0x8faa0054, 0x8fab004c, 0xad6a0000, 0x8f420044,
-0x8f440088, 0x8f430078, 0x24420001, 0x441024,
-0x24630001, 0xaf420044, 0xaf430078, 0x8c020240,
-0x62182b, 0x14600075, 0x24070008, 0x8f440168,
-0x8f45016c, 0x8f430044, 0x8f48000c, 0x8f860120,
-0x24020040, 0xafa20010, 0xafa30014, 0xafa80018,
-0x8f42010c, 0x40f809, 0x24c6001c, 0x14400011,
-0x240b0001, 0x3c010001, 0x370821, 0xa02b40f2,
-0x8f820124, 0xafa20010, 0x8f820128, 0x3c040001,
-0x2484688c, 0xafa20014, 0x8f460044, 0x8f870120,
-0x3c050009, 0xc002b3b, 0x34a51300, 0x1000000b,
-0x0, 0x8f420304, 0x24420001, 0xaf420304,
-0x8f420304, 0x8f420044, 0xaf42007c, 0x3c010001,
-0x370821, 0xa02040f2, 0xaf400078, 0x8f420318,
-0x24420001, 0xaf420318, 0x10000048, 0x8f420318,
-0xa6b0000a, 0x8f430044, 0x8f4401a0, 0x8f4501a4,
-0x34028000, 0xafa20010, 0x8f420044, 0x2a03021,
-0x24070020, 0xafa20014, 0x8f42000c, 0x31940,
-0x604821, 0xafa20018, 0x8f42010c, 0x4021,
-0xa92821, 0xa9182b, 0x882021, 0x40f809,
-0x832021, 0x1040fe1f, 0x240a0001, 0xa34a05c4,
-0x8fab006c, 0x8faa0064, 0x1705823, 0xafab006c,
-0x8fab009c, 0x1505021, 0x16a102b, 0x10400004,
-0xafaa0064, 0x8f420148, 0x1425023, 0xafaa0064,
-0x8f420368, 0x2442ffff, 0xaf420368, 0x8faa005c,
-0x240b0001, 0x8f420368, 0x154b0006, 0x240b0002,
-0x8f42035c, 0x2442ffff, 0xaf42035c, 0x1000000c,
-0x8f42035c, 0x114b0006, 0x0, 0x8f420360,
-0x2442ffff, 0xaf420360, 0x10000005, 0x8f420360,
-0x8f420364, 0x2442ffff, 0xaf420364, 0x8f420364,
-0x8fab0054, 0x8faa004c, 0xad4b0000, 0x8f420044,
-0x8f440088, 0x8f430078, 0x24420001, 0x441024,
-0x24630001, 0xaf420044, 0xaf430078, 0x8faa006c,
-0x1540fe0b, 0x0, 0x8fab006c, 0x1160001e,
-0x0, 0x934205c4, 0x10400009, 0x0,
-0x8faa0064, 0xaf4a00c4, 0xaf4b00c0, 0x8fab007c,
-0xaf4b00c8, 0x8faa0074, 0x1000000e, 0xaf4a00cc,
-0x97ab008e, 0x1160000b, 0x34038100, 0x8fa20020,
-0x8c46000c, 0xa443000c, 0x97aa0086, 0x8c440004,
-0x8c450008, 0xa44a000e, 0xac440000, 0xac450004,
-0xac460008, 0x8f42034c, 0x24420001, 0xaf42034c,
-0x10000010, 0x8f42034c, 0x8fab007c, 0x3164ffff,
-0x2484fffc, 0x801821, 0x8f440250, 0x8f450254,
-0x8f460118, 0x1021, 0xa32821, 0xa3382b,
-0x822021, 0x872021, 0xaf440250, 0xc0f809,
-0xaf450254, 0x8fbf00c8, 0x8fbe00c4, 0x8fb500c0,
-0x8fb300bc, 0x8fb200b8, 0x8fb100b4, 0x8fb000b0,
-0x3e00008, 0x27bd00d0, 0x3e00008, 0x0,
-0x27bdff38, 0x240b0001, 0xafbf00c0, 0xafbe00bc,
-0xafb500b8, 0xafb300b4, 0xafb200b0, 0xafb100ac,
-0xafb000a8, 0xa3a00087, 0xafa00044, 0xafab005c,
-0x934205c4, 0xa7a00076, 0x10400007, 0xa7a0007e,
-0x8f4c00c0, 0xafac0064, 0x8f4b00c8, 0x8f5e00c4,
-0x10000130, 0xafab006c, 0x8f420114, 0x40f809,
-0x0, 0x403021, 0x10c002a1, 0x0,
-0x8cc20000, 0x8cc30004, 0xafa20020, 0xafa30024,
-0x8fac0024, 0x8fbe0020, 0x3182ffff, 0x2442fffc,
-0xafa20064, 0x3c020006, 0x2c21024, 0x14400015,
-0xafac006c, 0x93c20000, 0x30420001, 0x10400011,
-0x2402ffff, 0x8fc30000, 0x14620004, 0x3402ffff,
-0x97c30004, 0x1062000b, 0x0, 0xc0024bb,
-0x3c02021, 0x304200ff, 0x14400006, 0x0,
-0x8f420118, 0x40f809, 0x0, 0x10000280,
-0x0, 0x8fa20024, 0x3c03ffbf, 0x3463ffff,
-0x431024, 0x3c03ffff, 0x431824, 0x14600003,
-0xafa20024, 0x10000040, 0x8021, 0x3c020080,
-0x621024, 0x10400007, 0x0, 0x8f42038c,
-0x24420001, 0xaf42038c, 0x8f42038c, 0x10000036,
-0x24100001, 0x8f420210, 0x24420001, 0xaf420210,
-0x8f420210, 0x3c020001, 0x621024, 0x10400006,
-0x3c020002, 0x8f4201c4, 0x24420001, 0xaf4201c4,
-0x8f4201c4, 0x3c020002, 0x621024, 0x10400006,
-0x3c020004, 0x8f42037c, 0x24420001, 0xaf42037c,
-0x8f42037c, 0x3c020004, 0x621024, 0x10400006,
-0x3c020008, 0x8f420380, 0x24420001, 0xaf420380,
-0x8f420380, 0x3c020008, 0x621024, 0x10400006,
-0x3c020010, 0x8f420384, 0x24420001, 0xaf420384,
-0x8f420384, 0x3c020010, 0x621024, 0x10400006,
-0x3c020020, 0x8f4201c0, 0x24420001, 0xaf4201c0,
-0x8f4201c0, 0x3c020020, 0x621024, 0x10400006,
-0x24100001, 0x8f420388, 0x24420001, 0xaf420388,
-0x8f420388, 0x24100001, 0x8c020260, 0x8fab0064,
-0x4b102b, 0x10400015, 0x320200ff, 0x8f4201e8,
-0x24420001, 0xaf4201e8, 0x8f4201e8, 0x8fac006c,
-0x8f8200e0, 0x358c0100, 0xafac006c, 0xafa20010,
-0x8f8200e4, 0x24100001, 0x3c040001, 0x248468a0,
-0xafa20014, 0x8fa60020, 0x8fa70024, 0x3c050007,
-0xc002b3b, 0x34a53600, 0x320200ff, 0x10400010,
-0x3c020080, 0x2c21024, 0x1440000e, 0x32c20400,
-0x8fab006c, 0x3c020080, 0x34420100, 0x1621024,
-0x10400005, 0x0, 0x8f42020c, 0x24420001,
-0xaf42020c, 0x8f42020c, 0x10000202, 0x8fa30064,
-0x32c20400, 0x10400012, 0x34028100, 0x97c3000c,
-0x1462000f, 0x0, 0x240c0200, 0xa7ac0076,
-0x97c2000e, 0x8fc30008, 0x8fc40004, 0x8fab0064,
-0x8fc50000, 0x256bfffc, 0xafab0064, 0xa7a2007e,
-0xafc3000c, 0xafc40008, 0xafc50004, 0x27de0004,
-0x8fa70064, 0x320200ff, 0x14400034, 0x3c020100,
-0x97c4000c, 0x2c8305dd, 0x38828870, 0x2c420001,
-0x621825, 0x10600015, 0x2821, 0x32c20800,
-0x10400015, 0x24020800, 0x97c30014, 0x14620012,
-0x3402aaaa, 0x97c3000e, 0x14620007, 0x2021,
-0x97c30010, 0x24020300, 0x14620004, 0x801021,
-0x97c20012, 0x2c440001, 0x801021, 0x54400006,
-0x24050016, 0x10000004, 0x0, 0x24020800,
-0x50820001, 0x2405000e, 0x10a00013, 0x3c52021,
-0x24830009, 0x3c02001f, 0x3442ffff, 0x43102b,
-0x10400003, 0x0, 0x8f420148, 0x621823,
-0x90620000, 0x38430006, 0x2c630001, 0x38420011,
-0x2c420001, 0x621825, 0x10600004, 0x3c020100,
-0x94820002, 0x453821, 0x3c020100, 0x2c21024,
-0x5040000e, 0xafa70064, 0x8fac0064, 0x10ec0008,
-0x3c050007, 0x3c040001, 0x24846908, 0x8fa60064,
-0x34a54000, 0xafa00010, 0xc002b3b, 0xafa00014,
-0x8fab0064, 0x256b0004, 0xafab0064, 0x8f420080,
-0x8fac0064, 0x4c102b, 0x1040002c, 0x32c28000,
-0x10400034, 0x240b0003, 0x32c21000, 0x10400031,
-0xafab005c, 0x1000002e, 0x240c0004, 0x8f420350,
-0x2403ffbf, 0x283a024, 0x24420001, 0xaf420350,
-0x10000173, 0x8f420350, 0x3c020800, 0x2c2b025,
-0x2402ffbf, 0x282a024, 0x8f830128, 0x3c040001,
-0x248468d0, 0x26620001, 0xafa20014, 0xafa30010,
-0x8f860120, 0x8f870124, 0x3c050007, 0xc002b3b,
-0x34a55300, 0x10000162, 0x0, 0x8ea20000,
-0x8ea30004, 0x3c040001, 0x248468e8, 0xafb00010,
-0xafb10014, 0x8ea70018, 0x34a55900, 0xc002b3b,
-0x603021, 0x10000156, 0x0, 0x8f420084,
-0x8fab0064, 0x4b102b, 0x14400007, 0x3c020001,
-0x2c21024, 0x10400004, 0x0, 0x240c0002,
-0xafac005c, 0x8fab0064, 0x11600166, 0x27ac0020,
-0xafac008c, 0x8fab005c, 0x240c0001, 0x556c0021,
-0x240c0002, 0x8f430054, 0x8f420050, 0x1062000b,
-0x274b0054, 0x8f510054, 0x3403ecc0, 0xafab004c,
-0x26220001, 0x304201ff, 0xafa20054, 0x111140,
-0x431021, 0x1000006b, 0x2e2a821, 0x8f420044,
-0x8fac0064, 0x3c040001, 0x248468ac, 0xafac0014,
-0xafa20010, 0x8f460054, 0x8f470050, 0x3c050007,
-0xc002b3b, 0x34a54300, 0x8f430350, 0x2402ffbf,
-0x282a024, 0x24630001, 0xaf430350, 0x10000124,
-0x8f420350, 0x156c001d, 0x0, 0x8f430074,
-0x8f420070, 0x1062000a, 0x274b0074, 0x8f510074,
-0xafab004c, 0x26220001, 0x304203ff, 0xafa20054,
-0x111140, 0x24426cc0, 0x1000004a, 0x2e2a821,
-0x8f420044, 0x8fac0064, 0x3c040001, 0x248468b8,
-0x3c050007, 0xafac0014, 0xafa20010, 0x8f460074,
-0x8f470070, 0x34a54500, 0x240b0001, 0xc002b3b,
-0xafab005c, 0x1000ffc3, 0x0, 0x8f430064,
-0x8f420060, 0x1062001a, 0x274c0064, 0x8f510064,
-0x8fab005c, 0xafac004c, 0x26220001, 0x304200ff,
-0xafa20054, 0x24020004, 0x1562000e, 0x111140,
-0x111180, 0x24420cc0, 0x2e21021, 0xafa20044,
-0x9442002a, 0x8fac0044, 0x8fab0064, 0x4b102b,
-0x10400024, 0x25950020, 0x240c0001, 0x10000021,
-0xa3ac0087, 0x24424cc0, 0x1000001e, 0x2e2a821,
-0x8f420044, 0x8fab0064, 0x3c040001, 0x248468c4,
-0xafab0014, 0xafa20010, 0x8f460064, 0x8f470060,
-0x3c050007, 0xc002b3b, 0x34a54800, 0x3c020008,
-0x2c21024, 0x1440ff61, 0x0, 0x8f420370,
-0x240c0001, 0xafac005c, 0x24420001, 0xaf420370,
-0x1000ff90, 0x8f420370, 0x27a30036, 0x131040,
-0x621821, 0x94620000, 0x441021, 0x1000001f,
-0xa4620000, 0xaebe0018, 0x93a20087, 0x10400084,
-0x9821, 0x8fab0044, 0x8fa40064, 0x8fa3008c,
-0x25620020, 0xafa20028, 0x25620008, 0xafa20030,
-0x25620010, 0xafab002c, 0xafa20034, 0x9562002a,
-0xa7a20038, 0x95620018, 0xa7a2003a, 0x9562001a,
-0xa7a2003c, 0x9562001c, 0xa7a2003e, 0x94620018,
-0x24630002, 0x822023, 0x1880ffdf, 0x26730001,
-0x2e620004, 0x1440fff9, 0x0, 0x8f4200fc,
-0x262102a, 0x14400030, 0x24030001, 0x8f83012c,
-0x10600028, 0x0, 0x8f820124, 0x431023,
-0x22143, 0x58800001, 0x24840040, 0x8f820128,
-0x431023, 0x21943, 0x58600001, 0x24630040,
-0x64102a, 0x54400001, 0x602021, 0xaf4400fc,
-0x8f4200fc, 0x262102a, 0x10400016, 0x24030001,
-0x1000001a, 0x306200ff, 0x8fac008c, 0x101040,
-0x4c1021, 0x94470018, 0x101080, 0x4c1021,
-0xafbe0010, 0x8c420008, 0x3c040001, 0x248468dc,
-0x3c050007, 0x8c430004, 0x8c420000, 0x34a55500,
-0x2003021, 0xc002b3b, 0xafa30014, 0x10000039,
-0x0, 0x8f420334, 0x1821, 0x24420001,
-0xaf420334, 0x8f420334, 0x306200ff, 0x1040ff06,
-0x8021, 0x8f430008, 0x2402fbff, 0x1260002d,
-0x625024, 0x3c0b4000, 0x22b4025, 0x8fb1008c,
-0x2669ffff, 0x2209021, 0x8e420008, 0x96270018,
-0x8c440000, 0x8c450004, 0x56090004, 0x240b0001,
-0x240c0002, 0x10000002, 0xafac0010, 0xafab0010,
-0x16000004, 0xafa80014, 0x8f420008, 0x10000002,
-0xafa20018, 0xafaa0018, 0x8f42010c, 0x3c03021,
-0xafa80098, 0xafa9009c, 0x40f809, 0xafaa00a0,
-0x8fa80098, 0x8fa9009c, 0x8faa00a0, 0x1040ffc2,
-0x3c02001f, 0x96230018, 0x3442ffff, 0x3c3f021,
-0x5e102b, 0x10400003, 0x26310002, 0x8f420148,
-0x3c2f023, 0x26100001, 0x213102b, 0x1440ffda,
-0x26520004, 0x8fb00064, 0x1000001a, 0x0,
-0x96a3000a, 0x8fb00064, 0x70102b, 0x54400001,
-0x608021, 0x8ea40000, 0x8ea50004, 0x8fab005c,
-0x240c0002, 0xafac0010, 0x934305c4, 0xb1700,
-0x10600003, 0x2223025, 0x3c020800, 0xc23025,
-0xafa60014, 0x8f420008, 0xafa20018, 0x8f42010c,
-0x3c03021, 0x40f809, 0x2003821, 0x1040fecb,
-0x3c050007, 0x97ac0076, 0x11800007, 0x96a3000e,
-0x934205c4, 0x14400004, 0x0, 0x97ab007e,
-0x6c1825, 0xa6ab0016, 0x8fac006c, 0x3c02ffff,
-0x1821024, 0x10400003, 0xc1402, 0x34630400,
-0xa6a20014, 0xa6b0000a, 0x8fab0064, 0x560b0006,
-0x3d0f021, 0x34620004, 0xafa00064, 0xa6a2000e,
-0x1000000d, 0xa34005c4, 0x8fac0064, 0x3c02001f,
-0x3442ffff, 0x5e102b, 0x1906023, 0xafac0064,
-0xa6a3000e, 0x240b0001, 0x10400003, 0xa34b05c4,
-0x8f420148, 0x3c2f023, 0x8fab0054, 0x8fac004c,
-0xad8b0000, 0x8fac0064, 0x1580feba, 0x0,
-0x8fab0064, 0x1160001b, 0x0, 0x934205c4,
-0x10400006, 0x0, 0xaf5e00c4, 0xaf4b00c0,
-0x8fac006c, 0x1000000e, 0xaf4c00c8, 0x97ab0076,
-0x1160000b, 0x34038100, 0x8fa20020, 0x8c46000c,
-0xa443000c, 0x97ac007e, 0x8c440004, 0x8c450008,
-0xa44c000e, 0xac440000, 0xac450004, 0xac460008,
-0x8f42034c, 0x24420001, 0xaf42034c, 0x10000010,
-0x8f42034c, 0x8fab006c, 0x3164ffff, 0x2484fffc,
-0x801821, 0x8f440250, 0x8f450254, 0x8f460118,
-0x1021, 0xa32821, 0xa3382b, 0x822021,
-0x872021, 0xaf440250, 0xc0f809, 0xaf450254,
-0x8fbf00c0, 0x8fbe00bc, 0x8fb500b8, 0x8fb300b4,
-0x8fb200b0, 0x8fb100ac, 0x8fb000a8, 0x3e00008,
-0x27bd00c8, 0x3e00008, 0x0, 0x27bdffd8,
-0xafbf0024, 0xafb00020, 0x8f43004c, 0x8f420048,
-0x10620034, 0x0, 0x8f430048, 0x8f42004c,
-0x622023, 0x4820001, 0x24840200, 0x8f430054,
-0x8f42004c, 0x43102b, 0x14400004, 0x24020200,
-0x8f43004c, 0x10000005, 0x431023, 0x8f420054,
-0x8f43004c, 0x431023, 0x2442ffff, 0x405021,
-0x8a102a, 0x54400001, 0x805021, 0x8f49004c,
-0x8f48004c, 0x8f440188, 0x8f45018c, 0x8f46004c,
-0x24071000, 0xafa70010, 0x84140, 0x1001821,
-0x12a4821, 0x313001ff, 0xafb00014, 0x8f470014,
-0x1021, 0x63140, 0xafa70018, 0xa32821,
-0xa3382b, 0x822021, 0x872021, 0x3402ecc0,
-0xc23021, 0x8f420108, 0x2e63021, 0x40f809,
-0xa3940, 0x54400001, 0xaf50004c, 0x8f43004c,
-0x8f420048, 0x14620018, 0x0, 0x8f420000,
-0x10400007, 0x0, 0xaf80004c, 0x8f82004c,
-0x1040fffd, 0x0, 0x10000005, 0x0,
-0xaf800048, 0x8f820048, 0x1040fffd, 0x0,
-0x8f820060, 0x2403fdff, 0x431024, 0xaf820060,
-0x8f420000, 0x10400003, 0x0, 0x10000002,
-0xaf80004c, 0xaf800048, 0x8fbf0024, 0x8fb00020,
-0x3e00008, 0x27bd0028, 0x3e00008, 0x0,
-0x27bdffd8, 0xafbf0024, 0xafb00020, 0x8f43005c,
-0x8f420058, 0x10620049, 0x0, 0x8f430058,
-0x8f42005c, 0x622023, 0x4820001, 0x24840100,
-0x8f430064, 0x8f42005c, 0x43102b, 0x14400004,
-0x24020100, 0x8f43005c, 0x10000005, 0x431023,
-0x8f420064, 0x8f43005c, 0x431023, 0x2442ffff,
-0x403821, 0x87102a, 0x54400001, 0x803821,
-0x8f42005c, 0x471021, 0x305000ff, 0x32c21000,
-0x10400015, 0x24082000, 0x8f49005c, 0x8f440190,
-0x8f450194, 0x8f46005c, 0x73980, 0xafa80010,
-0xafb00014, 0x8f480014, 0x94980, 0x1201821,
-0x1021, 0xa32821, 0xa3482b, 0x822021,
-0x892021, 0x63180, 0xafa80018, 0x8f420108,
-0x10000014, 0x24c60cc0, 0x8f49005c, 0x8f440190,
-0x8f450194, 0x8f46005c, 0x73940, 0xafa80010,
-0xafb00014, 0x8f480014, 0x94940, 0x1201821,
-0x1021, 0xa32821, 0xa3482b, 0x822021,
-0x892021, 0x63140, 0xafa80018, 0x8f420108,
-0x24c64cc0, 0x40f809, 0x2e63021, 0x54400001,
-0xaf50005c, 0x8f43005c, 0x8f420058, 0x14620018,
-0x0, 0x8f420000, 0x10400007, 0x0,
-0xaf80004c, 0x8f82004c, 0x1040fffd, 0x0,
-0x10000005, 0x0, 0xaf800048, 0x8f820048,
-0x1040fffd, 0x0, 0x8f820060, 0x2403feff,
-0x431024, 0xaf820060, 0x8f420000, 0x10400003,
-0x0, 0x10000002, 0xaf80004c, 0xaf800048,
-0x8fbf0024, 0x8fb00020, 0x3e00008, 0x27bd0028,
-0x3e00008, 0x0, 0x27bdffd8, 0xafbf0024,
-0xafb00020, 0x8f43006c, 0x8f420068, 0x10620033,
-0x0, 0x8f430068, 0x8f42006c, 0x622023,
-0x4820001, 0x24840400, 0x8f430074, 0x8f42006c,
-0x43102b, 0x14400004, 0x24020400, 0x8f43006c,
-0x10000005, 0x431023, 0x8f420074, 0x8f43006c,
-0x431023, 0x2442ffff, 0x405021, 0x8a102a,
-0x54400001, 0x805021, 0x8f49006c, 0x8f48006c,
-0x8f440198, 0x8f45019c, 0x8f46006c, 0x24074000,
-0xafa70010, 0x84140, 0x1001821, 0x12a4821,
-0x313003ff, 0xafb00014, 0x8f470014, 0x1021,
-0x63140, 0x24c66cc0, 0xafa70018, 0xa32821,
-0xa3382b, 0x822021, 0x872021, 0x8f420108,
-0x2e63021, 0x40f809, 0xa3940, 0x54400001,
-0xaf50006c, 0x8f43006c, 0x8f420068, 0x14620018,
-0x0, 0x8f420000, 0x10400007, 0x0,
-0xaf80004c, 0x8f82004c, 0x1040fffd, 0x0,
-0x10000005, 0x0, 0xaf800048, 0x8f820048,
-0x1040fffd, 0x0, 0x8f820060, 0x2403f7ff,
-0x431024, 0xaf820060, 0x8f420000, 0x10400003,
-0x0, 0x10000002, 0xaf80004c, 0xaf800048,
-0x8fbf0024, 0x8fb00020, 0x3e00008, 0x27bd0028,
-0x3e00008, 0x0, 0x8f4200fc, 0x3c030001,
-0x8f4400f8, 0x346330c8, 0x24420001, 0xaf4200fc,
-0x8f850128, 0x2e31021, 0x54820004, 0x24820008,
-0x3c020001, 0x34422ec8, 0x2e21021, 0x401821,
-0xaf4300f8, 0xac600000, 0x8f4200f4, 0x14620004,
-0x3c020001, 0x24a20020, 0x1000000f, 0xaf820128,
-0x8f4300f8, 0x344230c8, 0x2e21021, 0x54620004,
-0x24620008, 0x3c020001, 0x34422ec8, 0x2e21021,
-0x401821, 0x8c620004, 0x21140, 0xa21021,
-0xaf820128, 0xac600000, 0x8ca30018, 0x30620070,
-0x1040002d, 0x30620020, 0x10400004, 0x3c020010,
-0x2c21024, 0x1040000d, 0x0, 0x30620040,
-0x10400004, 0x3c020020, 0x2c21024, 0x10400007,
-0x0, 0x30620010, 0x1040001f, 0x3c020040,
-0x2c21024, 0x1440001c, 0x0, 0x8f820040,
-0x30420001, 0x14400008, 0x2021, 0x8c030104,
-0x24020001, 0x50620005, 0x24040001, 0x8c020264,
-0x10400003, 0x801021, 0x24040001, 0x801021,
-0x10400006, 0x0, 0x8f42030c, 0x24420001,
-0xaf42030c, 0x10000008, 0x8f42030c, 0x8f820044,
-0x34420004, 0xaf820044, 0x8f420308, 0x24420001,
-0xaf420308, 0x8f420308, 0x3e00008, 0x0,
-0x3e00008, 0x0, 0x27bdff98, 0xafbf0060,
-0xafbe005c, 0xafb50058, 0xafb30054, 0xafb20050,
-0xafb1004c, 0xafb00048, 0x8f4200fc, 0x24420001,
-0xaf4200fc, 0x8f880128, 0x25020020, 0xaf820128,
-0x8d030018, 0x30620070, 0x1040002e, 0x30620020,
-0x10400004, 0x3c020010, 0x2c21024, 0x1040000d,
-0x0, 0x30620040, 0x10400004, 0x3c020020,
-0x2c21024, 0x10400007, 0x0, 0x30620010,
-0x104001a9, 0x3c020040, 0x2c21024, 0x144001a6,
-0x0, 0x8f820040, 0x30420001, 0x14400008,
-0x2021, 0x8c030104, 0x24020001, 0x50620005,
-0x24040001, 0x8c020264, 0x10400003, 0x801021,
-0x24040001, 0x801021, 0x10400006, 0x0,
-0x8f42030c, 0x24420001, 0xaf42030c, 0x10000192,
-0x8f42030c, 0x8f820044, 0x34420004, 0xaf820044,
-0x8f420308, 0x24420001, 0xaf420308, 0x1000018a,
-0x8f420308, 0x30620002, 0x1040014b, 0x3c020800,
-0x8d1e001c, 0x1e5702, 0xafaa0034, 0x950a0016,
-0x3c22024, 0xafaa0024, 0x8faa0034, 0x24020001,
-0x15420006, 0x33deffff, 0x1e1140, 0x3403ecc0,
-0x431021, 0x10000010, 0x2e2a821, 0x24020002,
-0x15420005, 0x24020003, 0x1e1140, 0x24426cc0,
-0x10000009, 0x2e2a821, 0x15420005, 0x1e1180,
-0x1e1140, 0x24424cc0, 0x10000003, 0x2e2a821,
-0x571021, 0x24550ce0, 0x96a2000e, 0x304afffc,
-0x30420400, 0x10400003, 0xafaa002c, 0x100000e1,
-0x8821, 0x10800004, 0x8821, 0x97b10026,
-0x100000dd, 0xa6b10012, 0x8eb30018, 0x966a000c,
-0xa7aa003e, 0x97a5003e, 0x2ca305dd, 0x38a28870,
-0x2c420001, 0x621825, 0x10600015, 0x2021,
-0x32c20800, 0x10400015, 0x24020800, 0x96630014,
-0x14620012, 0x3402aaaa, 0x9663000e, 0x14620007,
-0x2821, 0x96630010, 0x24020300, 0x14620004,
-0xa01021, 0x96620012, 0x2c450001, 0xa01021,
-0x54400006, 0x24040016, 0x10000004, 0x0,
-0x24020800, 0x50a20001, 0x2404000e, 0x108000b9,
-0x2649021, 0x92420000, 0x3042000f, 0x28080,
-0x32c20100, 0x10400020, 0x2501821, 0x3c020020,
-0x43102b, 0x1440000e, 0x2402021, 0x2821,
-0x94820000, 0x24840002, 0xa22821, 0x83102b,
-0x1440fffb, 0x30a2ffff, 0x51c02, 0x622821,
-0x51c02, 0x30a2ffff, 0x10000009, 0x622821,
-0x8f470148, 0x8f420110, 0x102842, 0x3c060020,
-0x40f809, 0xafa80040, 0x3045ffff, 0x8fa80040,
-0x50a00001, 0x3405ffff, 0x8faa002c, 0x354a0002,
-0x10000002, 0xafaa002c, 0x2821, 0x32c20080,
-0x10400090, 0xa6a50010, 0x26430009, 0x3c02001f,
-0x3442ffff, 0x43102b, 0x10400003, 0x0,
-0x8f420148, 0x621823, 0x90660000, 0x30c200ff,
-0x38430006, 0x2c630001, 0x38420011, 0x2c420001,
-0x621825, 0x1060007f, 0x24020800, 0x8821,
-0x97a3003e, 0x1462000f, 0x2602021, 0x96710000,
-0x96620002, 0x96630004, 0x96640006, 0x2228821,
-0x2238821, 0x2248821, 0x96620008, 0x9663000a,
-0x9664000c, 0x2228821, 0x2238821, 0x10000007,
-0x2248821, 0x94820000, 0x24840002, 0x2228821,
-0x92102b, 0x1440fffb, 0x0, 0x111c02,
-0x3222ffff, 0x628821, 0x111c02, 0x3222ffff,
-0x628821, 0x32c20200, 0x10400003, 0x26440006,
-0x1000003e, 0x8021, 0x3c05001f, 0x34a5ffff,
-0xa4102b, 0x10400003, 0x0, 0x8f420148,
-0x822023, 0x94820000, 0x30421fff, 0x10400004,
-0x2644000c, 0x96420002, 0x10000030, 0x508023,
-0x96420002, 0x26430014, 0x508023, 0x3c020020,
-0x43102b, 0x1440000a, 0xd08021, 0x9642000c,
-0x2028021, 0x9642000e, 0x96430010, 0x96440012,
-0x2028021, 0x2038021, 0x10000020, 0x2048021,
-0xa4102b, 0x10400003, 0x0, 0x8f420148,
-0x822023, 0x94820000, 0x24840002, 0x2028021,
-0xa4102b, 0x10400003, 0x0, 0x8f420148,
-0x822023, 0x94820000, 0x24840002, 0x2028021,
-0xa4102b, 0x10400003, 0x0, 0x8f420148,
-0x822023, 0x94820000, 0x24840002, 0x2028021,
-0xa4102b, 0x10400003, 0x0, 0x8f420148,
-0x822023, 0x94820000, 0x2028021, 0x3c020100,
-0x2c21024, 0x1040000e, 0x0, 0x8faa002c,
-0x31420004, 0x1040000a, 0x0, 0x9504000e,
-0x2642021, 0xc003eec, 0x2484fffc, 0x3042ffff,
-0x2228821, 0x111c02, 0x3222ffff, 0x628821,
-0x8faa0024, 0x1518823, 0x111402, 0x2228821,
-0x2308821, 0x111402, 0x2228821, 0x3231ffff,
-0x52200001, 0x3411ffff, 0x8faa002c, 0x354a0001,
-0xafaa002c, 0xa6b10012, 0x97aa002e, 0xa6aa000e,
-0x8faa002c, 0x31420004, 0x10400002, 0x24091000,
-0x34098000, 0x8f480044, 0x8f4401a0, 0x8f4501a4,
-0xafa90010, 0x8f490044, 0x84140, 0x1001821,
-0xafa90014, 0x8f48000c, 0x2a03021, 0x24070020,
-0xafa80018, 0x8f48010c, 0x1021, 0xa32821,
-0xa3482b, 0x822021, 0x100f809, 0x892021,
-0x1440000b, 0x0, 0x8f820128, 0x3c040001,
-0x24846914, 0xafbe0014, 0xafa20010, 0x8f860124,
-0x8f870120, 0x3c050007, 0xc002b3b, 0x34a59920,
-0x8f420368, 0x2442ffff, 0xaf420368, 0x8f420044,
-0x8f430088, 0x24420001, 0x431024, 0xaf420044,
-0x8faa0034, 0x8f440368, 0x24020001, 0x15420006,
-0x24020002, 0x8f42035c, 0x2442ffff, 0xaf42035c,
-0x10000049, 0x8f42035c, 0x15420006, 0x0,
-0x8f420364, 0x2442ffff, 0xaf420364, 0x10000042,
-0x8f420364, 0x8f420360, 0x2442ffff, 0xaf420360,
-0x1000003d, 0x8f420360, 0x30621000, 0x10400005,
-0x30628000, 0x8f420078, 0x24420001, 0x10000036,
-0xaf420078, 0x10400034, 0x0, 0x8f420078,
-0x24420001, 0xaf420078, 0x8c030240, 0x43102b,
-0x1440002d, 0x24070008, 0x8f440168, 0x8f45016c,
-0x8f430044, 0x8f48000c, 0x8f860120, 0x24020040,
-0xafa20010, 0xafa30014, 0xafa80018, 0x8f42010c,
-0x40f809, 0x24c6001c, 0x14400011, 0x24020001,
-0x3c010001, 0x370821, 0xa02240f2, 0x8f820124,
-0xafa20010, 0x8f820128, 0x3c040001, 0x2484688c,
-0xafa20014, 0x8f460044, 0x8f870120, 0x3c050009,
-0xc002b3b, 0x34a51300, 0x1000000b, 0x0,
-0x8f420304, 0x24420001, 0xaf420304, 0x8f420304,
-0x8f420044, 0xaf42007c, 0x3c010001, 0x370821,
-0xa02040f2, 0xaf400078, 0x8f420318, 0x24420001,
-0xaf420318, 0x8f420318, 0x8fbf0060, 0x8fbe005c,
-0x8fb50058, 0x8fb30054, 0x8fb20050, 0x8fb1004c,
-0x8fb00048, 0x3e00008, 0x27bd0068, 0x3e00008,
-0x0, 0x0, 0x0, 0x8f42013c,
-0xaf8200c0, 0x8f42013c, 0xaf8200c4, 0x8f42013c,
-0xaf8200c8, 0x8f420138, 0xaf8200d0, 0x8f420138,
-0xaf8200d4, 0x8f420138, 0x3e00008, 0xaf8200d8,
-0x27bdffe0, 0x27840208, 0x24050200, 0xafbf0018,
-0xc002bbf, 0x24060008, 0x8c020204, 0xc004012,
-0xaf820210, 0x3c020001, 0x8c426d94, 0x30420002,
-0x1040000e, 0x2021, 0x8c060248, 0x24020002,
-0x3c010001, 0xac226d98, 0xc005104, 0x24050002,
-0x2021, 0x8c060248, 0x24020001, 0x3c010001,
-0xac226d98, 0x10000011, 0x24050001, 0x8c060248,
-0x24020004, 0x3c010001, 0xac226d98, 0xc005104,
-0x24050004, 0x3c020001, 0x8c426d94, 0x30420001,
-0x10400008, 0x24020001, 0x3c010001, 0xac226d98,
-0x2021, 0x24050001, 0x3c06601b, 0xc005104,
-0x0, 0x3c040001, 0x248469d0, 0x8f420150,
-0x8f430154, 0x3c050008, 0x8f460158, 0x21640,
-0x31940, 0x34630403, 0x431025, 0x633c0,
-0x461025, 0xaf82021c, 0xafa00010, 0xafa00014,
-0x8f86021c, 0x34a50200, 0xc002b3b, 0x3821,
-0x3c010001, 0xac206d90, 0x3c010001, 0xac206da8,
-0x8fbf0018, 0x3e00008, 0x27bd0020, 0x27bdffe0,
-0x3c050008, 0x34a50300, 0xafbf0018, 0xafa00010,
-0xafa00014, 0x8f860200, 0x3c040001, 0x248469dc,
-0xc002b3b, 0x3821, 0x8f420410, 0x24420001,
-0xaf420410, 0x8f420410, 0x8fbf0018, 0x3e00008,
-0x27bd0020, 0x27bdffd8, 0xafbf0020, 0xafb1001c,
-0xafb00018, 0x8f4203a4, 0x24420001, 0xaf4203a4,
-0x8f4203a4, 0x8f900220, 0x8f8200e0, 0xafa20010,
-0x8f8200e4, 0xafa20014, 0x8f8600c4, 0x8f8700c8,
-0x3c040001, 0x248469e8, 0xc002b3b, 0x2002821,
-0x3c044000, 0x2041024, 0x504000b4, 0x3c040100,
-0x8f4203bc, 0x24420001, 0xaf4203bc, 0x8f4203bc,
-0x8f8700c4, 0x8f8300c8, 0x8f420148, 0x671823,
-0x43102b, 0x10400003, 0x0, 0x8f420148,
-0x621821, 0x10600005, 0x0, 0x8f42014c,
-0x43102b, 0x1040000b, 0x0, 0x8f8200e0,
-0x8f430124, 0xaf42011c, 0xaf430114, 0x8f820220,
-0x3c0308ff, 0x3463fffb, 0x431024, 0x100000ce,
-0x441025, 0x8f820220, 0x3c0308ff, 0x3463ffff,
-0x431024, 0x34420004, 0xaf820220, 0x8f8200e0,
-0x8f430124, 0xaf42011c, 0xaf430114, 0x8f8600c8,
-0x8f840120, 0x8f830124, 0x10000005, 0x2821,
-0x14620002, 0x24620020, 0x27624800, 0x401821,
-0x1064000c, 0x30a200ff, 0x8c620018, 0x30420003,
-0x1040fff7, 0x27624fe0, 0x8f4203d0, 0x24050001,
-0x24420001, 0xaf4203d0, 0x8f4203d0, 0x8c660008,
-0x30a200ff, 0x14400058, 0x0, 0x934205c4,
-0x14400055, 0x0, 0x8f8700c4, 0x8f8800e0,
-0x8f8400e4, 0x2402fff8, 0x1024024, 0x1041023,
-0x218c3, 0x4620001, 0x24630200, 0x10600005,
-0x24020001, 0x10620009, 0x0, 0x1000001f,
-0x0, 0x8f4203c0, 0xe03021, 0x24420001,
-0xaf4203c0, 0x10000040, 0x8f4203c0, 0x8f4203c4,
-0x24420001, 0xaf4203c4, 0x8c860000, 0x8f420148,
-0x8f4303c4, 0xe61823, 0x43102b, 0x10400004,
-0x2c62233f, 0x8f420148, 0x621821, 0x2c62233f,
-0x14400031, 0x0, 0x8f42020c, 0x24420001,
-0xaf42020c, 0x8f42020c, 0xe03021, 0x24820008,
-0xaf8200e4, 0x10000028, 0xaf8200e8, 0x8f4203c8,
-0x24420001, 0xaf4203c8, 0x8f4203c8, 0x8c850000,
-0x8f420148, 0xa71823, 0x43102b, 0x10400003,
-0x0, 0x8f420148, 0x621821, 0x8f42014c,
-0x43102b, 0x5440000a, 0xa03021, 0x8f42020c,
-0x24420001, 0xaf42020c, 0x8f42020c, 0x24820008,
-0xaf8200e4, 0x8f8400e4, 0x1488ffec, 0xaf8400e8,
-0x1488000d, 0x27623000, 0x14820002, 0x2482fff8,
-0x27623ff8, 0x94430006, 0x3c02001f, 0x3442ffff,
-0xc33021, 0x46102b, 0x10400003, 0x0,
-0x8f420148, 0xc23023, 0xaf8600c8, 0x8f8300c4,
-0x8f420148, 0xc31823, 0x43102b, 0x10400003,
-0x0, 0x8f420148, 0x621821, 0x10600005,
-0x0, 0x8f42014c, 0x43102b, 0x50400008,
-0x3c02fdff, 0x8f820220, 0x3c0308ff, 0x3463fffb,
-0x431024, 0x3c034000, 0x1000003f, 0x431025,
-0x8f4303cc, 0x3442ffff, 0x282a024, 0x24630001,
-0xaf4303cc, 0x10000039, 0x8f4203cc, 0x2041024,
-0x1040000e, 0x3c110200, 0x8f4203a8, 0x24420001,
-0xaf4203a8, 0x8f4203a8, 0x8f820220, 0x3c0308ff,
-0x3463ffff, 0x431024, 0x441025, 0xc003daf,
-0xaf820220, 0x10000029, 0x0, 0x2111024,
-0x50400008, 0x3c110400, 0x8f4203ac, 0x24420001,
-0xaf4203ac, 0xc003daf, 0x8f4203ac, 0x10000019,
-0x0, 0x2111024, 0x1040001c, 0x0,
-0x8f830224, 0x24021402, 0x14620009, 0x3c050008,
-0x3c040001, 0x248469f4, 0xafa00010, 0xafa00014,
-0x8f860224, 0x34a50500, 0xc002b3b, 0x3821,
-0x8f4203b0, 0x24420001, 0xaf4203b0, 0x8f4203b0,
-0x8f820220, 0x2002021, 0x34420002, 0xc004e9c,
-0xaf820220, 0x8f820220, 0x3c0308ff, 0x3463ffff,
-0x431024, 0x511025, 0xaf820220, 0x8fbf0020,
-0x8fb1001c, 0x8fb00018, 0x3e00008, 0x27bd0028,
-0x3e00008, 0x0, 0x3c020001, 0x8c426da8,
-0x27bdffb0, 0xafbf0048, 0xafbe0044, 0xafb50040,
-0xafb3003c, 0xafb20038, 0xafb10034, 0x1040000f,
-0xafb00030, 0x3c040001, 0x24846a00, 0x3c050008,
-0xafa00010, 0xafa00014, 0x8f860220, 0x34a50600,
-0x24020001, 0x3c010001, 0xac206da8, 0x3c010001,
-0xac226d9c, 0xc002b3b, 0x3821, 0x3c037fff,
-0x8c020268, 0x3463ffff, 0x3c04fdff, 0x431024,
-0xac020268, 0x8f420004, 0x3484ffff, 0x30420002,
-0x10400092, 0x284a024, 0x3c040600, 0x34842000,
-0x8f420004, 0x2821, 0x2403fffd, 0x431024,
-0xaf420004, 0xafa40020, 0x8f5e0018, 0x27aa0020,
-0x240200ff, 0x13c20002, 0xafaa002c, 0x27c50001,
-0x8c020228, 0xa09021, 0x1642000e, 0x1e38c0,
-0x8f42033c, 0x24420001, 0xaf42033c, 0x8f42033c,
-0x8c020228, 0x3c040001, 0x24846998, 0x3c050009,
-0xafa00014, 0xafa20010, 0x8fa60020, 0x1000006d,
-0x34a50500, 0xf71021, 0x8fa30020, 0x8fa40024,
-0xac4304c0, 0xac4404c4, 0x8f830054, 0x8f820054,
-0x247003e8, 0x2021023, 0x2c4203e9, 0x1040001b,
-0x9821, 0xe08821, 0x263504c0, 0x8f440178,
-0x8f45017c, 0x2201821, 0x240a0004, 0xafaa0010,
-0xafb20014, 0x8f48000c, 0x1021, 0x2f53021,
-0xafa80018, 0x8f48010c, 0x24070008, 0xa32821,
-0xa3482b, 0x822021, 0x100f809, 0x892021,
-0x54400006, 0x24130001, 0x8f820054, 0x2021023,
-0x2c4203e9, 0x1440ffe9, 0x0, 0x326200ff,
-0x54400017, 0xaf520018, 0x8f420378, 0x24420001,
-0xaf420378, 0x8f420378, 0x8f820120, 0x8faa002c,
-0xafa20010, 0x8f820124, 0x3c040001, 0x248469a4,
-0x3c050009, 0xafa20014, 0x8d460000, 0x10000035,
-0x34a50600, 0x8f420308, 0x24130001, 0x24420001,
-0xaf420308, 0x8f420308, 0x1000001e, 0x326200ff,
-0x8f830054, 0x8f820054, 0x247003e8, 0x2021023,
-0x2c4203e9, 0x10400016, 0x9821, 0x3c150020,
-0x24110010, 0x8f42000c, 0x8f440160, 0x8f450164,
-0x8f860120, 0xafb10010, 0xafb20014, 0x551025,
-0xafa20018, 0x8f42010c, 0x24070008, 0x40f809,
-0x24c6001c, 0x1440ffe3, 0x0, 0x8f820054,
-0x2021023, 0x2c4203e9, 0x1440ffee, 0x0,
-0x326200ff, 0x14400011, 0x0, 0x8f420378,
-0x24420001, 0xaf420378, 0x8f420378, 0x8f820120,
-0x8faa002c, 0xafa20010, 0x8f820124, 0x3c040001,
-0x248469ac, 0x3c050009, 0xafa20014, 0x8d460000,
-0x34a50700, 0xc002b3b, 0x3c03821, 0x8f4202ec,
-0x24420001, 0xaf4202ec, 0x8f4202ec, 0x8fbf0048,
-0x8fbe0044, 0x8fb50040, 0x8fb3003c, 0x8fb20038,
-0x8fb10034, 0x8fb00030, 0x3e00008, 0x27bd0050,
-0x3c020001, 0x8c426da8, 0x27bdffe0, 0x1440000d,
-0xafbf0018, 0x3c040001, 0x24846a0c, 0x3c050008,
-0xafa00010, 0xafa00014, 0x8f860220, 0x34a50700,
-0x24020001, 0x3c010001, 0xac226da8, 0xc002b3b,
-0x3821, 0x3c020004, 0x2c21024, 0x10400007,
-0x0, 0x8f820220, 0x3c0308ff, 0x3463ffff,
-0x431024, 0x34420008, 0xaf820220, 0x3c050001,
-0x8ca56d98, 0x24020001, 0x14a20007, 0x2021,
-0xc00529b, 0x24050001, 0xac02026c, 0x8c03026c,
-0x10000006, 0x3c020007, 0xc00529b, 0x2021,
-0xac020268, 0x8c030268, 0x3c020007, 0x621824,
-0x3c020002, 0x5062000d, 0x3c0205f5, 0x43102b,
-0x14400006, 0x3c020004, 0x3c020001, 0x10620009,
-0x3c020098, 0x1000000b, 0x0, 0x14620009,
-0x3c023b9a, 0x10000004, 0x3442ca00, 0x10000002,
-0x3442e100, 0x34429680, 0xaf4201fc, 0x8f4201fc,
-0xaee20064, 0x8fbf0018, 0x3e00008, 0x27bd0020,
-0x0, 0x0, 0x0, 0x86102b,
-0x50400001, 0x872023, 0xc41023, 0x24843,
-0x125102b, 0x1040001b, 0x91040, 0x824021,
-0x88102b, 0x10400007, 0x1821, 0x94820000,
-0x24840002, 0x621821, 0x88102b, 0x1440fffb,
-0x0, 0x602021, 0xc73023, 0xa91023,
-0x21040, 0xc22821, 0xc5102b, 0x10400007,
-0x1821, 0x94c20000, 0x24c60002, 0x621821,
-0xc5102b, 0x1440fffb, 0x0, 0x1000000d,
-0x832021, 0x51040, 0x822821, 0x85102b,
-0x10400007, 0x1821, 0x94820000, 0x24840002,
-0x621821, 0x85102b, 0x1440fffb, 0x0,
-0x602021, 0x41c02, 0x3082ffff, 0x622021,
-0x41c02, 0x3082ffff, 0x622021, 0x3e00008,
-0x3082ffff, 0x3e00008, 0x0, 0x802821,
-0x30a20001, 0x1040002b, 0x3c03001f, 0x3463ffff,
-0x24a20004, 0x62102b, 0x54400007, 0x65102b,
-0x90a20001, 0x90a40003, 0x90a30000, 0x90a50002,
-0x1000002a, 0x441021, 0x10400003, 0x0,
-0x8f420148, 0xa22823, 0x90a40000, 0x24a50001,
-0x65102b, 0x10400003, 0x0, 0x8f420148,
-0xa22823, 0x90a20000, 0x24a50001, 0x21200,
-0x822021, 0x65102b, 0x10400003, 0x0,
-0x8f420148, 0xa22823, 0x90a20000, 0x24a50001,
-0x822021, 0x65102b, 0x10400003, 0x0,
-0x8f420148, 0xa22823, 0x90a20000, 0x1000002d,
-0x21200, 0x3463ffff, 0x24a20004, 0x62102b,
-0x5440000a, 0x65102b, 0x90a20000, 0x90a40002,
-0x90a30001, 0x90a50003, 0x441021, 0x21200,
-0x651821, 0x10000020, 0x432021, 0x10400003,
-0x0, 0x8f420148, 0xa22823, 0x90a20000,
-0x24a50001, 0x22200, 0x65102b, 0x10400003,
-0x0, 0x8f420148, 0xa22823, 0x90a20000,
-0x24a50001, 0x822021, 0x65102b, 0x10400003,
-0x0, 0x8f420148, 0xa22823, 0x90a20000,
-0x24a50001, 0x21200, 0x822021, 0x65102b,
-0x10400003, 0x0, 0x8f420148, 0xa22823,
-0x90a20000, 0x822021, 0x41c02, 0x3082ffff,
-0x622021, 0x41c02, 0x3082ffff, 0x622021,
-0x3e00008, 0x3082ffff, 0x0, 0x8f820220,
-0x34420002, 0xaf820220, 0x3c020002, 0x8c428ff8,
-0x30424000, 0x10400054, 0x24040001, 0x8f820200,
-0x24067fff, 0x8f830200, 0x30450002, 0x2402fffd,
-0x621824, 0xaf830200, 0xaf840204, 0x8f830054,
-0x8f820054, 0x10000002, 0x24630001, 0x8f820054,
-0x621023, 0x2c420002, 0x1440fffc, 0x0,
-0x8f820224, 0x1444004d, 0x42040, 0xc4102b,
-0x1040fff1, 0x0, 0x8f820200, 0x451025,
-0xaf820200, 0x8f820220, 0x34428000, 0xaf820220,
-0x8f830054, 0x8f820054, 0x10000002, 0x24630001,
-0x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
-0x0, 0x8f820220, 0x3c030004, 0x431024,
-0x1440000f, 0x0, 0x8f820220, 0x3c03ffff,
-0x34637fff, 0x431024, 0xaf820220, 0x8f830054,
-0x8f820054, 0x10000002, 0x24630001, 0x8f820054,
-0x621023, 0x2c420002, 0x1440fffc, 0x0,
-0x8f820220, 0x3c030004, 0x431024, 0x1440000d,
-0x0, 0x8f820220, 0x34428000, 0xaf820220,
-0x8f830054, 0x8f820054, 0x10000002, 0x24630001,
-0x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
-0x0, 0x8f820220, 0x3c030004, 0x431024,
-0x1040001b, 0x1021, 0x8f830220, 0x24020001,
-0x10000015, 0x3c04f700, 0x8f820220, 0x3c04f700,
-0x441025, 0xaf820220, 0x8f820220, 0x2403fffd,
-0x431024, 0xaf820220, 0x8f820220, 0x3c030300,
-0x431024, 0x14400003, 0x0, 0x10000008,
-0x1021, 0x8f820220, 0x34420002, 0xaf820220,
-0x8f830220, 0x24020001, 0x641825, 0xaf830220,
-0x3e00008, 0x0, 0x2021, 0x3c050100,
-0x24020001, 0xaf80021c, 0xaf820200, 0xaf820220,
-0x27625000, 0xaf8200c0, 0x27625000, 0xaf8200c4,
-0x27625000, 0xaf8200c8, 0x27625000, 0xaf8200d0,
-0x27625000, 0xaf8200d4, 0x27625000, 0xaf8200d8,
-0x27623000, 0xaf8200e0, 0x27623000, 0xaf8200e4,
-0x27623000, 0xaf8200e8, 0x27622800, 0xaf8200f0,
-0x27622800, 0xaf8200f4, 0x27622800, 0xaf8200f8,
-0x418c0, 0x24840001, 0x3631021, 0xac453004,
-0x3631021, 0xac403000, 0x28820200, 0x1440fff9,
-0x418c0, 0x2021, 0x418c0, 0x24840001,
-0x3631021, 0xac402804, 0x3631021, 0xac402800,
-0x28820100, 0x1440fff9, 0x418c0, 0xaf80023c,
-0x24030080, 0x24040100, 0xac600000, 0x24630004,
-0x64102b, 0x5440fffd, 0xac600000, 0x8f830040,
-0x3c02f000, 0x621824, 0x3c025000, 0x1062000c,
-0x43102b, 0x14400006, 0x3c026000, 0x3c024000,
-0x10620008, 0x24020800, 0x10000008, 0x0,
-0x10620004, 0x24020800, 0x10000004, 0x0,
-0x24020700, 0x3c010001, 0xac226dac, 0x3e00008,
-0x0, 0x3c020001, 0x8c426dbc, 0x27bdffd0,
-0xafbf002c, 0xafb20028, 0xafb10024, 0xafb00020,
-0x3c010001, 0x10400005, 0xac206d94, 0xc004d9e,
-0x0, 0x3c010001, 0xac206dbc, 0x8f830054,
-0x8f820054, 0x10000002, 0x24630064, 0x8f820054,
-0x621023, 0x2c420065, 0x1440fffc, 0x0,
-0xc004db9, 0x0, 0x24040001, 0x2821,
-0x27a60018, 0x34028000, 0xc0045be, 0xa7a20018,
-0x8f830054, 0x8f820054, 0x10000002, 0x24630064,
-0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
-0x24040001, 0x24050001, 0xc00457c, 0x27a60018,
-0x8f830054, 0x8f820054, 0x10000002, 0x24630064,
-0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
-0x24040001, 0x24050001, 0xc00457c, 0x27a60018,
-0x8f830054, 0x8f820054, 0x10000002, 0x24630064,
-0x8f820054, 0x621023, 0x2c420065, 0x1440fffc,
-0x24040001, 0x3c060001, 0x24c66f24, 0xc00457c,
-0x24050002, 0x8f830054, 0x8f820054, 0x10000002,
-0x24630064, 0x8f820054, 0x621023, 0x2c420065,
-0x1440fffc, 0x24040001, 0x24050003, 0x3c100001,
-0x26106f26, 0xc00457c, 0x2003021, 0x97a60018,
-0x3c070001, 0x94e76f24, 0x3c040001, 0x24846ae0,
-0xafa00014, 0x96020000, 0x3c05000d, 0x34a50100,
-0xc002b3b, 0xafa20010, 0x97a20018, 0x1040004d,
-0x24036040, 0x96020000, 0x3042fff0, 0x1443000c,
-0x24020020, 0x3c030001, 0x94636f24, 0x1462000b,
-0x24027830, 0x24020003, 0x3c010001, 0xac226d94,
-0x24020005, 0x3c010001, 0x1000003f, 0xac226f34,
-0x3c030001, 0x94636f24, 0x24027830, 0x1462000c,
-0x24030010, 0x3c020001, 0x94426f26, 0x3042fff0,
-0x14430007, 0x24020003, 0x3c010001, 0xac226d94,
-0x24020006, 0x3c010001, 0x1000002f, 0xac226f34,
-0x3c020001, 0x8c426d94, 0x3c030001, 0x94636f24,
-0x34420001, 0x3c010001, 0xac226d94, 0x24020015,
-0x1462000b, 0x0, 0x3c020001, 0x94426f26,
-0x3042fff0, 0x3843f420, 0x2c630001, 0x3842f430,
-0x2c420001, 0x621825, 0x1460001b, 0x24020003,
-0x3c030001, 0x94636f24, 0x24027810, 0x14620016,
-0x24020002, 0x3c020001, 0x94426f26, 0x3042fff0,
-0x14400011, 0x24020002, 0x1000000f, 0x24020004,
-0x3c020001, 0x8c426d94, 0x34420008, 0x3c010001,
-0xac226d94, 0x1000005e, 0x24020004, 0x3c020001,
-0x8c426d94, 0x34420004, 0x3c010001, 0x100000af,
-0xac226d94, 0x24020001, 0x3c010001, 0xac226f40,
-0x3c020001, 0x8c426d94, 0x30420002, 0x144000b2,
-0x3c09fff0, 0x24020e00, 0xaf820238, 0x8f840054,
-0x8f820054, 0x24030008, 0x3c010001, 0xac236d98,
-0x10000002, 0x248401f4, 0x8f820054, 0x821023,
-0x2c4201f5, 0x1440fffc, 0x3c0200c8, 0x344201fb,
-0xaf820238, 0x8f830054, 0x8f820054, 0x10000002,
-0x246301f4, 0x8f820054, 0x621023, 0x2c4201f5,
-0x1440fffc, 0x8021, 0x24120001, 0x24110009,
-0xc004482, 0x0, 0x3c010001, 0xac326db4,
-0xc004547, 0x0, 0x3c020001, 0x8c426db4,
-0x1451fffb, 0x3c0200c8, 0x344201f6, 0xaf820238,
-0x8f830054, 0x8f820054, 0x10000002, 0x2463000a,
-0x8f820054, 0x621023, 0x2c42000b, 0x1440fffc,
-0x0, 0x8f820220, 0x24040001, 0x34420002,
-0xaf820220, 0x8f830200, 0x24057fff, 0x2402fffd,
-0x621824, 0xaf830200, 0xaf840204, 0x8f830054,
-0x8f820054, 0x10000002, 0x24630001, 0x8f820054,
-0x621023, 0x2c420002, 0x1440fffc, 0x0,
-0x8f820224, 0x14440005, 0x34028000, 0x42040,
-0xa4102b, 0x1040fff0, 0x34028000, 0x1082ffa0,
-0x26100001, 0x2e020014, 0x1440ffcd, 0x24020004,
-0x3c010001, 0xac226d98, 0x8021, 0x24120009,
-0x3c11ffff, 0x36313f7f, 0xc004482, 0x0,
-0x24020001, 0x3c010001, 0xac226db4, 0xc004547,
-0x0, 0x3c020001, 0x8c426db4, 0x1452fffb,
-0x0, 0x8f820044, 0x511024, 0x34425080,
-0xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
-0x2463000a, 0x8f820054, 0x621023, 0x2c42000b,
-0x1440fffc, 0x0, 0x8f820044, 0x511024,
-0x3442f080, 0xaf820044, 0x8f830054, 0x8f820054,
-0x10000002, 0x2463000a, 0x8f820054, 0x621023,
-0x2c42000b, 0x1440fffc, 0x0, 0x8f820220,
-0x3c03f700, 0x431025, 0xaf820220, 0x8f830054,
-0x8f820054, 0x10000002, 0x24630064, 0x8f820054,
-0x621023, 0x2c420065, 0x1440fffc, 0x0,
-0x8f820220, 0x24040001, 0x34420002, 0xaf820220,
-0x8f830200, 0x24057fff, 0x2402fffd, 0x621824,
-0xaf830200, 0xaf840204, 0x8f830054, 0x8f820054,
-0x10000002, 0x24630001, 0x8f820054, 0x621023,
-0x2c420002, 0x1440fffc, 0x0, 0x8f820224,
-0x14440005, 0x34028000, 0x42040, 0xa4102b,
-0x1040fff0, 0x34028000, 0x1082ff50, 0x26100001,
-0x2e020064, 0x1440ffb0, 0x0, 0x3c020001,
-0x8c426d94, 0x30420004, 0x14400007, 0x3c09fff0,
-0x8f820044, 0x3c03ffff, 0x34633f7f, 0x431024,
-0xaf820044, 0x3c09fff0, 0x3529bdc0, 0x3c060001,
-0x8cc66d94, 0x3c040001, 0x24846ae0, 0x24020001,
-0x3c010001, 0xac226d9c, 0x8f820054, 0x3c070001,
-0x8ce76f40, 0x3c030001, 0x94636f24, 0x3c080001,
-0x95086f26, 0x3c05000d, 0x34a50100, 0x3c010001,
-0xac206d98, 0x491021, 0x3c010001, 0xac226f30,
-0xafa30010, 0xc002b3b, 0xafa80014, 0x8fbf002c,
-0x8fb20028, 0x8fb10024, 0x8fb00020, 0x3e00008,
-0x27bd0030, 0x27bdffe8, 0x3c050001, 0x8ca56d98,
-0x24060004, 0x24020001, 0x14a20014, 0xafbf0010,
-0x3c020002, 0x8c428ffc, 0x30428000, 0x10400005,
-0x3c04000f, 0x3c030001, 0x8c636f40, 0x10000005,
-0x34844240, 0x3c040004, 0x3c030001, 0x8c636f40,
-0x348493e0, 0x24020005, 0x14620016, 0x0,
-0x3c04003d, 0x10000013, 0x34840900, 0x3c020002,
-0x8c428ff8, 0x30428000, 0x10400005, 0x3c04001e,
-0x3c030001, 0x8c636f40, 0x10000005, 0x34848480,
-0x3c04000f, 0x3c030001, 0x8c636f40, 0x34844240,
-0x24020005, 0x14620003, 0x0, 0x3c04007a,
-0x34841200, 0x3c020001, 0x8c426f30, 0x8f830054,
-0x441021, 0x431023, 0x44102b, 0x1440004c,
-0x0, 0x3c020001, 0x8c426da0, 0x14400048,
-0x0, 0x3c010001, 0x10c00025, 0xac206db0,
-0x3c090001, 0x8d296d94, 0x24070001, 0x3c044000,
-0x3c080002, 0x25088ffc, 0x250afffc, 0x52842,
-0x14a00002, 0x24c6ffff, 0x24050008, 0xa91024,
-0x10400010, 0x0, 0x14a70008, 0x0,
-0x8d020000, 0x441024, 0x1040000a, 0x0,
-0x3c010001, 0x10000007, 0xac256db0, 0x8d420000,
-0x441024, 0x10400003, 0x0, 0x3c010001,
-0xac276db0, 0x3c020001, 0x8c426db0, 0x6182b,
-0x2c420001, 0x431024, 0x5440ffe5, 0x52842,
-0x8f820054, 0x3c030001, 0x8c636db0, 0x3c010001,
-0xac226f30, 0x1060003b, 0x24020005, 0x3c030001,
-0x8c636f40, 0x3c010001, 0xac256d98, 0x14620012,
-0x24020001, 0x3c020002, 0x8c428ff8, 0x3c032000,
-0x34635000, 0x431024, 0x14400006, 0x24020001,
-0x3c010001, 0xac206f1c, 0x3c010001, 0xac226d98,
-0x24020001, 0x3c010001, 0xac226e24, 0x3c010001,
-0xac226da4, 0x24020001, 0x3c010001, 0xac226d9c,
-0x3c020001, 0x8c426db0, 0x1040001e, 0x0,
-0x3c020001, 0x8c426d9c, 0x10400008, 0x24020001,
-0x3c010001, 0xac206d9c, 0xaee204b8, 0x3c010001,
-0xac206e1c, 0x3c010001, 0xac226dd4, 0x8ee304b8,
-0x24020008, 0x10620005, 0x24020001, 0xc004239,
-0x0, 0x1000000b, 0x0, 0x3c030001,
-0x8c636d98, 0x10620007, 0x2402000e, 0x3c030002,
-0x8c638f90, 0x10620003, 0x0, 0xc004e9c,
-0x8f840220, 0x8fbf0010, 0x3e00008, 0x27bd0018,
-0x27bdffe0, 0x3c03fdff, 0x3c040001, 0x8c846d98,
-0x3c020001, 0x8c426dc0, 0x3463ffff, 0x283a024,
-0x14820006, 0xafbf0018, 0x8ee304b8, 0x3c020001,
-0x8c426dc4, 0x10620006, 0x0, 0x8ee204b8,
-0x3c010001, 0xac246dc0, 0x3c010001, 0xac226dc4,
-0x3c030001, 0x8c636d98, 0x24020002, 0x1062019c,
-0x2c620003, 0x10400005, 0x24020001, 0x1062000a,
-0x0, 0x10000226, 0x0, 0x24020004,
-0x106200b6, 0x24020008, 0x1062010a, 0x24020001,
-0x1000021f, 0x0, 0x8ee204b8, 0x2443ffff,
-0x2c620008, 0x1040021c, 0x31080, 0x3c010001,
-0x220821, 0x8c226af8, 0x400008, 0x0,
-0x3c030001, 0x8c636f40, 0x24020005, 0x14620010,
-0x0, 0x3c020001, 0x8c426da4, 0x10400008,
-0x24020003, 0xc004482, 0x0, 0x24020002,
-0xaee204b8, 0x3c010001, 0x10000002, 0xac206da4,
-0xaee204b8, 0x3c010001, 0x10000203, 0xac206d30,
-0xc004482, 0x0, 0x3c020001, 0x8c426da4,
-0x3c010001, 0xac206d30, 0x1440017a, 0x24020002,
-0x1000019d, 0x24020007, 0x3c030001, 0x8c636f40,
-0x24020005, 0x14620003, 0x24020001, 0x3c010001,
-0xac226dd0, 0xc0045ff, 0x0, 0x3c030001,
-0x8c636dd0, 0x10000174, 0x24020011, 0x3c050001,
-0x8ca56d98, 0x3c060002, 0x8cc68ffc, 0xc005104,
-0x2021, 0x24020005, 0x3c010001, 0xac206da4,
-0x100001e1, 0xaee204b8, 0x3c040001, 0x24846aec,
-0x3c05000f, 0x34a50100, 0x3021, 0x3821,
-0xafa00010, 0xc002b3b, 0xafa00014, 0x100001d6,
-0x0, 0x8f820220, 0x3c030004, 0x431024,
-0x14400175, 0x24020007, 0x8f830054, 0x3c020001,
-0x8c426f28, 0x2463d8f0, 0x431023, 0x2c422710,
-0x14400003, 0x24020001, 0x3c010001, 0xac226d9c,
-0x3c020002, 0x8c428ffc, 0x30425000, 0x104001c2,
-0x0, 0x8f820220, 0x30428000, 0x1040017d,
-0x0, 0x10000175, 0x0, 0x3c050001,
-0x8ca56d98, 0xc00529b, 0x2021, 0xc00551b,
-0x2021, 0x3c030002, 0x8c638ff4, 0x46101b0,
-0x24020001, 0x3c020008, 0x621024, 0x10400006,
-0x0, 0x8f820214, 0x3c03ffff, 0x431024,
-0x10000005, 0x3442251f, 0x8f820214, 0x3c03ffff,
-0x431024, 0x3442241f, 0xaf820214, 0x8f820220,
-0x3c030200, 0x34420002, 0xaf820220, 0x24020008,
-0xaee204b8, 0x8f820220, 0x283a025, 0x3c030004,
-0x431024, 0x14400016, 0x0, 0x3c020002,
-0x8c428ffc, 0x30425000, 0x1040000d, 0x0,
-0x8f820220, 0x30428000, 0x10400006, 0x0,
-0x8f820220, 0x3c03ffff, 0x34637fff, 0x10000003,
-0x431024, 0x8f820220, 0x34428000, 0xaf820220,
-0x8f820220, 0x3c03f700, 0x431025, 0xaf820220,
-0x3c030001, 0x8c636f40, 0x24020005, 0x1462000a,
-0x0, 0x3c020001, 0x94426f26, 0x24429fbc,
-0x2c420004, 0x10400004, 0x24040018, 0x24050002,
-0xc004ddb, 0x24060020, 0xc003e6d, 0x0,
-0x3c010001, 0x10000170, 0xac206e20, 0x8ee204b8,
-0x2443ffff, 0x2c620008, 0x1040016b, 0x31080,
-0x3c010001, 0x220821, 0x8c226b18, 0x400008,
-0x0, 0xc004547, 0x0, 0x3c030001,
-0x8c636db4, 0x100000e8, 0x24020009, 0x3c020002,
-0x8c428ff8, 0x30424000, 0x10400004, 0x0,
-0x8f820044, 0x10000006, 0x3442f080, 0x8f820044,
-0x3c03ffff, 0x34633f7f, 0x431024, 0x3442a080,
-0xaf820044, 0x8f830054, 0x100000ea, 0x24020004,
-0x8f830054, 0x3c020001, 0x8c426f28, 0x2463d8f0,
-0x431023, 0x2c422710, 0x14400147, 0x24020005,
-0x100000d8, 0x0, 0x8f820220, 0x3c03f700,
-0x431025, 0xaf820220, 0xaf800204, 0x3c010002,
-0x100000d6, 0xac208fe0, 0x8f830054, 0x3c020001,
-0x8c426f28, 0x2463fff6, 0x431023, 0x2c42000a,
-0x14400135, 0x24020007, 0x100000d7, 0x0,
-0xc003f50, 0x0, 0x1040012d, 0x24020001,
-0x8f820214, 0x3c03ffff, 0x3c040001, 0x8c846f1c,
-0x431024, 0x3442251f, 0xaf820214, 0x24020008,
-0x10800005, 0xaee204b8, 0x3c020001, 0x8c426e44,
-0x10400064, 0x24020001, 0x8f820220, 0x3c030008,
-0x431024, 0x1040006a, 0x3c020200, 0x10000078,
-0x0, 0x8ee204b8, 0x2443ffff, 0x2c620007,
-0x10400115, 0x31080, 0x3c010001, 0x220821,
-0x8c226b38, 0x400008, 0x0, 0xc003daf,
-0x0, 0x3c010001, 0xac206d9c, 0xaf800204,
-0x3c010002, 0xc004482, 0xac208fe0, 0x24020001,
-0x3c010001, 0xac226db4, 0x24020002, 0x10000102,
-0xaee204b8, 0xc004547, 0x0, 0x3c030001,
-0x8c636db4, 0x10000084, 0x24020009, 0x3c020002,
-0x8c428ff8, 0x30424000, 0x10400003, 0x3c0200c8,
-0x10000002, 0x344201f6, 0x344201fe, 0xaf820238,
-0x8f830054, 0x1000008b, 0x24020004, 0x8f830054,
-0x3c020001, 0x8c426f28, 0x2463d8f0, 0x431023,
-0x2c422710, 0x144000e8, 0x24020005, 0x10000079,
-0x0, 0x8f820220, 0x3c03f700, 0x431025,
-0xaf820220, 0xaf800204, 0x3c010002, 0x10000077,
-0xac208fe0, 0x8f830054, 0x3c020001, 0x8c426f28,
-0x2463fff6, 0x431023, 0x2c42000a, 0x144000d6,
-0x24020007, 0x10000078, 0x0, 0xc003f50,
-0x0, 0x104000ce, 0x24020001, 0x8f820214,
-0x3c03ffff, 0x3c040001, 0x8c846f1c, 0x431024,
-0x3442251f, 0xaf820214, 0x24020008, 0x1080000f,
-0xaee204b8, 0x3c020001, 0x8c426e44, 0x1440000b,
-0x0, 0x8f820220, 0x34420002, 0xaf820220,
-0x24020001, 0x3c010002, 0xac228f90, 0xc004e9c,
-0x8f840220, 0x10000016, 0x0, 0x8f820220,
-0x3c030008, 0x431024, 0x14400011, 0x3c020200,
-0x282a025, 0x2402000e, 0x3c010002, 0xac228f90,
-0xc00551b, 0x2021, 0x8f820220, 0x34420002,
-0xc003e6d, 0xaf820220, 0x3c050001, 0x8ca56d98,
-0xc00529b, 0x2021, 0x100000a3, 0x0,
-0x3c020001, 0x8c426e44, 0x1040009f, 0x0,
-0x3c020001, 0x8c426e40, 0x2442ffff, 0x3c010001,
-0xac226e40, 0x14400098, 0x24020002, 0x3c010001,
-0xac206e44, 0x3c010001, 0x10000093, 0xac226e40,
-0x8ee204b8, 0x2443ffff, 0x2c620007, 0x1040008e,
-0x31080, 0x3c010001, 0x220821, 0x8c226b58,
-0x400008, 0x0, 0x3c020001, 0x8c426da4,
-0x10400018, 0x24020005, 0xc004482, 0x0,
-0x24020002, 0xaee204b8, 0x3c010001, 0x1000007e,
-0xac206da4, 0xc004963, 0x0, 0x3c030001,
-0x8c636dd4, 0x24020006, 0x14620077, 0x24020003,
-0x10000075, 0xaee204b8, 0x3c050001, 0x8ca56d98,
-0x3c060002, 0x8cc68ff8, 0xc005104, 0x2021,
-0x24020005, 0x1000006c, 0xaee204b8, 0x8f820220,
-0x3c03f700, 0x431025, 0xaf820220, 0x8f830054,
-0x24020006, 0xaee204b8, 0x3c010001, 0x10000062,
-0xac236f28, 0x8f820220, 0x3c030004, 0x431024,
-0x10400003, 0x24020007, 0x1000005b, 0xaee204b8,
-0x8f830054, 0x3c020001, 0x8c426f28, 0x2463d8f0,
-0x431023, 0x2c422710, 0x14400003, 0x24020001,
-0x3c010001, 0xac226d9c, 0x3c020002, 0x8c428ff8,
-0x30425000, 0x1040004c, 0x0, 0x8f820220,
-0x30428000, 0x10400007, 0x0, 0x8f820220,
-0x3c03ffff, 0x34637fff, 0x431024, 0x10000042,
-0xaf820220, 0x8f820220, 0x34428000, 0x1000003e,
-0xaf820220, 0x3c050001, 0x8ca56d98, 0xc00529b,
-0x2021, 0xc00551b, 0x2021, 0x3c020002,
-0x8c428ff0, 0x4410032, 0x24020001, 0x8f820214,
-0x3c03ffff, 0x431024, 0x3442251f, 0xaf820214,
-0x24020008, 0xaee204b8, 0x8f820220, 0x34420002,
-0xaf820220, 0x8f820220, 0x3c030004, 0x431024,
-0x14400016, 0x0, 0x3c020002, 0x8c428ff8,
-0x30425000, 0x1040000d, 0x0, 0x8f820220,
-0x30428000, 0x10400006, 0x0, 0x8f820220,
-0x3c03ffff, 0x34637fff, 0x10000003, 0x431024,
-0x8f820220, 0x34428000, 0xaf820220, 0x8f820220,
-0x3c03f700, 0x431025, 0xaf820220, 0x3c020001,
-0x94426f26, 0x24429fbc, 0x2c420004, 0x10400004,
-0x24040018, 0x24050002, 0xc004ddb, 0x24060020,
-0xc003e6d, 0x0, 0x10000003, 0x0,
-0x3c010001, 0xac226d9c, 0x8fbf0018, 0x3e00008,
-0x27bd0020, 0x8f820200, 0x8f820220, 0x8f820220,
-0x34420004, 0xaf820220, 0x8f820200, 0x3c050001,
-0x8ca56d98, 0x34420004, 0xaf820200, 0x24020002,
-0x10a2004b, 0x2ca20003, 0x10400005, 0x24020001,
-0x10a2000a, 0x0, 0x100000b1, 0x0,
-0x24020004, 0x10a20072, 0x24020008, 0x10a20085,
-0x3c02f0ff, 0x100000aa, 0x0, 0x8f830050,
-0x3c02f0ff, 0x3442ffff, 0x3c040001, 0x8c846f40,
-0x621824, 0x3c020700, 0x621825, 0x24020e00,
-0x2484fffb, 0x2c840002, 0xaf830050, 0xaf850200,
-0xaf850220, 0x14800006, 0xaf820238, 0x8f820044,
-0x3c03ffff, 0x34633f7f, 0x431024, 0xaf820044,
-0x3c030001, 0x8c636f40, 0x24020005, 0x14620004,
-0x0, 0x8f820044, 0x34425000, 0xaf820044,
-0x3c020001, 0x8c426d88, 0x3c030001, 0x8c636f40,
-0x34420022, 0x2463fffc, 0x2c630002, 0x1460000c,
-0xaf820200, 0x3c020001, 0x8c426dac, 0x3c030001,
-0x8c636d90, 0x3c040001, 0x8c846d8c, 0x34428000,
-0x621825, 0x641825, 0x1000000a, 0x34620002,
-0x3c020001, 0x8c426d90, 0x3c030001, 0x8c636dac,
-0x3c040001, 0x8c846d8c, 0x431025, 0x441025,
-0x34420002, 0xaf820220, 0x1000002f, 0x24020001,
-0x24020e01, 0xaf820238, 0x8f830050, 0x3c02f0ff,
-0x3442ffff, 0x3c040001, 0x8c846f1c, 0x621824,
-0x3c020d00, 0x621825, 0x24020001, 0xaf830050,
-0xaf820200, 0xaf820220, 0x10800005, 0x3c033f00,
-0x3c020001, 0x8c426d80, 0x10000004, 0x34630070,
-0x3c020001, 0x8c426d80, 0x34630072, 0x431025,
-0xaf820200, 0x3c030001, 0x8c636d84, 0x3c02f700,
-0x621825, 0x3c020001, 0x8c426d90, 0x3c040001,
-0x8c846dac, 0x3c050001, 0x8ca56f40, 0x431025,
-0x441025, 0xaf820220, 0x24020005, 0x14a20006,
-0x24020001, 0x8f820044, 0x2403afff, 0x431024,
-0xaf820044, 0x24020001, 0x1000003d, 0xaf820238,
-0x8f830050, 0x3c02f0ff, 0x3442ffff, 0x3c040001,
-0x8c846f1c, 0x621824, 0x3c020a00, 0x621825,
-0x24020001, 0xaf830050, 0xaf820200, 0x1080001e,
-0xaf820220, 0x3c020001, 0x8c426e44, 0x1440001a,
-0x3c033f00, 0x3c020001, 0x8c426d80, 0x1000001a,
-0x346300e0, 0x8f830050, 0x3c040001, 0x8c846f1c,
-0x3442ffff, 0x621824, 0x1080000f, 0xaf830050,
-0x3c020001, 0x8c426e44, 0x1440000b, 0x3c043f00,
-0x3c030001, 0x8c636d80, 0x348400e0, 0x24020001,
-0xaf820200, 0xaf820220, 0x641825, 0xaf830200,
-0x10000008, 0x3c05f700, 0x3c020001, 0x8c426d80,
-0x3c033f00, 0x346300e2, 0x431025, 0xaf820200,
-0x3c05f700, 0x34a58000, 0x3c030001, 0x8c636d84,
-0x3c020001, 0x8c426d90, 0x3c040001, 0x8c846dac,
-0x651825, 0x431025, 0x441025, 0xaf820220,
-0x3e00008, 0x0, 0x3c030001, 0x8c636db4,
-0x3c020001, 0x8c426db8, 0x10620003, 0x24020002,
-0x3c010001, 0xac236db8, 0x1062001d, 0x2c620003,
-0x10400025, 0x24020001, 0x14620023, 0x24020004,
-0x3c030001, 0x8c636d98, 0x10620006, 0x24020008,
-0x1462000c, 0x3c0200c8, 0x344201fb, 0x10000009,
-0xaf820238, 0x24020e01, 0xaf820238, 0x8f820044,
-0x3c03ffff, 0x34633f7f, 0x431024, 0x34420080,
-0xaf820044, 0x8f830054, 0x24020002, 0x3c010001,
-0xac226db4, 0x3c010001, 0x1000000b, 0xac236f2c,
-0x8f830054, 0x3c020001, 0x8c426f2c, 0x2463d8f0,
-0x431023, 0x2c422710, 0x14400003, 0x24020009,
-0x3c010001, 0xac226db4, 0x3e00008, 0x0,
-0x0, 0x0, 0x0, 0x27bdffd8,
-0xafb20018, 0x809021, 0xafb3001c, 0xa09821,
-0xafb10014, 0xc08821, 0xafb00010, 0x8021,
-0xafbf0020, 0xa6200000, 0xc004d78, 0x24040001,
-0x26100001, 0x2e020020, 0x1440fffb, 0x0,
-0xc004d78, 0x2021, 0xc004d78, 0x24040001,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x24100010, 0x2501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
-0x2501024, 0x24100010, 0x2701024, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x2701024, 0xc004db9, 0x34108000,
-0xc004db9, 0x0, 0xc004d58, 0x0,
-0x50400005, 0x108042, 0x96220000, 0x501025,
-0xa6220000, 0x108042, 0x1600fff7, 0x0,
-0xc004db9, 0x0, 0x8fbf0020, 0x8fb3001c,
-0x8fb20018, 0x8fb10014, 0x8fb00010, 0x3e00008,
-0x27bd0028, 0x27bdffd8, 0xafb10014, 0x808821,
-0xafb20018, 0xa09021, 0xafb3001c, 0xc09821,
-0xafb00010, 0x8021, 0xafbf0020, 0xc004d78,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0x24100010, 0x2301024, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x2301024, 0x24100010, 0x2501024,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x2501024, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0x34108000,
-0x96620000, 0x501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x1600fff8,
-0x0, 0xc004db9, 0x0, 0x8fbf0020,
-0x8fb3001c, 0x8fb20018, 0x8fb10014, 0x8fb00010,
-0x3e00008, 0x27bd0028, 0x3c040001, 0x8c846dd0,
-0x3c020001, 0x8c426e18, 0x27bdffd8, 0xafbf0020,
-0xafb1001c, 0x10820003, 0xafb00018, 0x3c010001,
-0xac246e18, 0x3c030001, 0x8c636f40, 0x24020005,
-0x14620005, 0x2483ffff, 0xc004963, 0x0,
-0x1000034c, 0x0, 0x2c620013, 0x10400349,
-0x31080, 0x3c010001, 0x220821, 0x8c226b80,
-0x400008, 0x0, 0xc004db9, 0x8021,
-0x34028000, 0xa7a20010, 0x27b10010, 0xc004d78,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0xc004d78,
-0x2021, 0x108042, 0x1600fffc, 0x0,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fff8, 0x0, 0xc004db9, 0x0,
-0x1000030e, 0x24020002, 0x27b10010, 0xa7a00010,
-0x8021, 0xc004d78, 0x24040001, 0x26100001,
-0x2e020020, 0x1440fffb, 0x0, 0xc004d78,
-0x2021, 0xc004d78, 0x24040001, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0x24100010,
-0x32020001, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020001,
-0x24100010, 0xc004d78, 0x2021, 0x108042,
-0x1600fffc, 0x0, 0xc004db9, 0x34108000,
-0xc004db9, 0x0, 0xc004d58, 0x0,
-0x50400005, 0x108042, 0x96220000, 0x501025,
-0xa6220000, 0x108042, 0x1600fff7, 0x0,
-0xc004db9, 0x0, 0x97a20010, 0x30428000,
-0x144002dc, 0x24020003, 0x100002d8, 0x0,
-0x24021200, 0xa7a20010, 0x27b10010, 0x8021,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0xc004d78, 0x2021, 0x108042, 0x1600fffc,
-0x0, 0xc004d78, 0x24040001, 0xc004d78,
-0x2021, 0x34108000, 0x96220000, 0x501024,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fff8, 0x0, 0xc004db9,
-0x0, 0x8f830054, 0x10000296, 0x24020004,
-0x8f830054, 0x3c020001, 0x8c426f3c, 0x2463ff9c,
-0x431023, 0x2c420064, 0x1440029e, 0x24020002,
-0x3c030001, 0x8c636f40, 0x10620297, 0x2c620003,
-0x14400296, 0x24020011, 0x24020003, 0x10620005,
-0x24020004, 0x10620291, 0x2402000f, 0x1000028f,
-0x24020011, 0x1000028d, 0x24020005, 0x24020014,
-0xa7a20010, 0x27b10010, 0x8021, 0xc004d78,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0x32020012,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020012, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0x34108000,
-0x96220000, 0x501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x1600fff8,
-0x0, 0xc004db9, 0x0, 0x8f830054,
-0x10000248, 0x24020006, 0x8f830054, 0x3c020001,
-0x8c426f3c, 0x2463ff9c, 0x431023, 0x2c420064,
-0x14400250, 0x24020007, 0x1000024c, 0x0,
-0x24020006, 0xa7a20010, 0x27b10010, 0x8021,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020013, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020013,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fff8, 0x0, 0xc004db9, 0x0,
-0x8f830054, 0x10000207, 0x24020008, 0x8f830054,
-0x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
-0x2c420064, 0x1440020f, 0x24020009, 0x1000020b,
-0x0, 0x27b10010, 0xa7a00010, 0x8021,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
-0xc004d78, 0x2021, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020018, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020018,
-0xc004db9, 0x34108000, 0xc004db9, 0x0,
-0xc004d58, 0x0, 0x50400005, 0x108042,
-0x96220000, 0x501025, 0xa6220000, 0x108042,
-0x1600fff7, 0x0, 0xc004db9, 0x8021,
-0x97a20010, 0x27b10010, 0x34420001, 0xa7a20010,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020018, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020018,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fff8, 0x0, 0xc004db9, 0x0,
-0x8f830054, 0x10000193, 0x2402000a, 0x8f830054,
-0x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
-0x2c420064, 0x1440019b, 0x2402000b, 0x10000197,
-0x0, 0x27b10010, 0xa7a00010, 0x8021,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
-0xc004d78, 0x2021, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020017, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020017,
-0xc004db9, 0x34108000, 0xc004db9, 0x0,
-0xc004d58, 0x0, 0x50400005, 0x108042,
-0x96220000, 0x501025, 0xa6220000, 0x108042,
-0x1600fff7, 0x0, 0xc004db9, 0x8021,
-0x97a20010, 0x27b10010, 0x34420700, 0xa7a20010,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020017, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020017,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fff8, 0x0, 0xc004db9, 0x0,
-0x8f830054, 0x1000011f, 0x2402000c, 0x8f830054,
-0x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
-0x2c420064, 0x14400127, 0x24020012, 0x10000123,
-0x0, 0x27b10010, 0xa7a00010, 0x8021,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
-0xc004d78, 0x2021, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020014, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020014,
-0xc004db9, 0x34108000, 0xc004db9, 0x0,
-0xc004d58, 0x0, 0x50400005, 0x108042,
-0x96220000, 0x501025, 0xa6220000, 0x108042,
-0x1600fff7, 0x0, 0xc004db9, 0x8021,
-0x97a20010, 0x27b10010, 0x34420010, 0xa7a20010,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020014, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020014,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fff8, 0x0, 0xc004db9, 0x0,
-0x8f830054, 0x100000ab, 0x24020013, 0x8f830054,
-0x3c020001, 0x8c426f3c, 0x2463ff9c, 0x431023,
-0x2c420064, 0x144000b3, 0x2402000d, 0x100000af,
-0x0, 0x27b10010, 0xa7a00010, 0x8021,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
-0xc004d78, 0x2021, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020018, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020018,
-0xc004db9, 0x34108000, 0xc004db9, 0x0,
-0xc004d58, 0x0, 0x50400005, 0x108042,
-0x96220000, 0x501025, 0xa6220000, 0x108042,
-0x1600fff7, 0x0, 0xc004db9, 0x8021,
-0x97a20010, 0x27b10010, 0x3042fffe, 0xa7a20010,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020018, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020018,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x34108000, 0x96220000, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fff8, 0x0, 0xc004db9, 0x0,
-0x8f830054, 0x10000037, 0x2402000e, 0x24020840,
-0xa7a20010, 0x27b10010, 0x8021, 0xc004d78,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0x32020013,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020013, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0x34108000,
-0x96220000, 0x501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x1600fff8,
-0x0, 0xc004db9, 0x0, 0x8f830054,
-0x24020010, 0x3c010001, 0xac226dd0, 0x3c010001,
-0x1000000c, 0xac236f3c, 0x8f830054, 0x3c020001,
-0x8c426f3c, 0x2463ff9c, 0x431023, 0x2c420064,
-0x14400004, 0x0, 0x24020011, 0x3c010001,
-0xac226dd0, 0x8fbf0020, 0x8fb1001c, 0x8fb00018,
-0x3e00008, 0x27bd0028, 0x3c030001, 0x8c636d98,
-0x27bdffc8, 0x24020002, 0xafbf0034, 0xafb20030,
-0xafb1002c, 0x14620004, 0xafb00028, 0x3c120002,
-0x10000003, 0x8e528ff8, 0x3c120002, 0x8e528ffc,
-0x3c030001, 0x8c636dd4, 0x3c020001, 0x8c426e1c,
-0x50620004, 0x2463ffff, 0x3c010001, 0xac236e1c,
-0x2463ffff, 0x2c620006, 0x10400377, 0x31080,
-0x3c010001, 0x220821, 0x8c226bd8, 0x400008,
-0x0, 0x2021, 0x2821, 0xc004ddb,
-0x34068000, 0x24040010, 0x24050002, 0x24060002,
-0x24020002, 0xc004ddb, 0xa7a20018, 0x24020002,
-0x3c010001, 0x10000364, 0xac226dd4, 0x27b10018,
-0xa7a00018, 0x8021, 0xc004d78, 0x24040001,
-0x26100001, 0x2e020020, 0x1440fffb, 0x0,
-0xc004d78, 0x2021, 0xc004d78, 0x24040001,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x24100010, 0x32020001, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
-0x32020001, 0x24100010, 0xc004d78, 0x2021,
-0x108042, 0x1600fffc, 0x0, 0xc004db9,
-0x34108000, 0xc004db9, 0x0, 0xc004d58,
-0x0, 0x50400005, 0x108042, 0x96220000,
-0x501025, 0xa6220000, 0x108042, 0x1600fff7,
-0x0, 0xc004db9, 0x0, 0x97a20018,
-0x30428000, 0x14400004, 0x24020003, 0x3c010001,
-0xac226dd4, 0x24020003, 0x3c010001, 0x1000032a,
-0xac226dd4, 0x24040010, 0x24050002, 0x24060002,
-0x24020002, 0xc004ddb, 0xa7a20018, 0x3c030001,
-0x8c636e20, 0x24020001, 0x146201e1, 0x8021,
-0x27b10018, 0xa7a00018, 0xc004d78, 0x24040001,
-0x26100001, 0x2e020020, 0x1440fffb, 0x0,
-0xc004d78, 0x2021, 0xc004d78, 0x24040001,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x24100010, 0x32020001, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
-0x32020001, 0x24100010, 0x32020018, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x32020018, 0xc004db9, 0x34108000,
-0xc004db9, 0x0, 0xc004d58, 0x0,
-0x50400005, 0x108042, 0x96220000, 0x501025,
-0xa6220000, 0x108042, 0x1600fff7, 0x0,
-0xc004db9, 0x8021, 0x27b10018, 0xa7a00018,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
-0xc004d78, 0x2021, 0x24100010, 0x32020001,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x32020001, 0x24100010,
-0x32020018, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020018,
-0xc004db9, 0x34108000, 0xc004db9, 0x0,
-0xc004d58, 0x0, 0x50400005, 0x108042,
-0x96220000, 0x501025, 0xa6220000, 0x108042,
-0x1600fff7, 0x0, 0xc004db9, 0x8021,
-0x24040018, 0x2821, 0xc004ddb, 0x24060404,
-0xa7a0001a, 0xc004d78, 0x24040001, 0x26100001,
-0x2e020020, 0x1440fffb, 0x0, 0xc004d78,
-0x2021, 0xc004d78, 0x24040001, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0x24100010,
-0x32020001, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020001,
-0x24100010, 0x32020018, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
-0x32020018, 0xc004db9, 0x34108000, 0xc004db9,
-0x0, 0xc004d58, 0x0, 0x50400005,
-0x108042, 0x97a2001a, 0x501025, 0xa7a2001a,
-0x108042, 0x1600fff7, 0x0, 0xc004db9,
-0x8021, 0xa7a0001a, 0xc004d78, 0x24040001,
-0x26100001, 0x2e020020, 0x1440fffb, 0x0,
-0xc004d78, 0x2021, 0xc004d78, 0x24040001,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x24100010, 0x32020001, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
-0x32020001, 0x24100010, 0x32020018, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x32020018, 0xc004db9, 0x34108000,
-0xc004db9, 0x0, 0xc004d58, 0x0,
-0x50400005, 0x108042, 0x97a2001a, 0x501025,
-0xa7a2001a, 0x108042, 0x1600fff7, 0x0,
-0xc004db9, 0x8021, 0xa7a0001c, 0xc004d78,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0xc004d78, 0x24040001, 0xc004d78,
-0x2021, 0x24100010, 0xc004d78, 0x2021,
-0x108042, 0x1600fffc, 0x0, 0x24100010,
-0x3202001e, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x3202001e,
-0xc004db9, 0x34108000, 0xc004db9, 0x0,
-0xc004d58, 0x0, 0x50400005, 0x108042,
-0x97a2001c, 0x501025, 0xa7a2001c, 0x108042,
-0x1600fff7, 0x0, 0xc004db9, 0x8021,
-0xa7a0001c, 0xc004d78, 0x24040001, 0x26100001,
-0x2e020020, 0x1440fffb, 0x0, 0xc004d78,
-0x2021, 0xc004d78, 0x24040001, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0x24100010,
-0xc004d78, 0x2021, 0x108042, 0x1600fffc,
-0x0, 0x24100010, 0x3202001e, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x3202001e, 0xc004db9, 0x34108000,
-0xc004db9, 0x0, 0xc004d58, 0x0,
-0x50400005, 0x108042, 0x97a2001c, 0x501025,
-0xa7a2001c, 0x108042, 0x1600fff7, 0x0,
-0xc004db9, 0x8021, 0x24020002, 0xa7a2001e,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0x24100010, 0xc004d78,
-0x2021, 0x108042, 0x1600fffc, 0x0,
-0x24100010, 0x3202001e, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
-0x3202001e, 0xc004d78, 0x24040001, 0xc004d78,
-0x2021, 0x34108000, 0x97a2001e, 0x501024,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fff8, 0x0, 0xc004db9,
-0x8021, 0xa7a00020, 0xc004d78, 0x24040001,
-0x26100001, 0x2e020020, 0x1440fffb, 0x0,
-0xc004d78, 0x2021, 0xc004d78, 0x24040001,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x24100010, 0xc004d78, 0x2021, 0x108042,
-0x1600fffc, 0x0, 0x24100010, 0x3202001e,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x3202001e, 0xc004db9,
-0x34108000, 0xc004db9, 0x0, 0xc004d58,
-0x0, 0x50400005, 0x108042, 0x97a20020,
-0x501025, 0xa7a20020, 0x108042, 0x1600fff7,
-0x0, 0xc004db9, 0x8021, 0xa7a00020,
-0xc004d78, 0x24040001, 0x26100001, 0x2e020020,
-0x1440fffb, 0x0, 0xc004d78, 0x2021,
-0xc004d78, 0x24040001, 0xc004d78, 0x24040001,
-0xc004d78, 0x2021, 0x24100010, 0xc004d78,
-0x2021, 0x108042, 0x1600fffc, 0x0,
-0x24100010, 0x3202001e, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x1600fffa,
-0x3202001e, 0xc004db9, 0x34108000, 0xc004db9,
-0x0, 0xc004d58, 0x0, 0x50400005,
-0x108042, 0x97a20020, 0x501025, 0xa7a20020,
-0x108042, 0x1600fff7, 0x0, 0xc004db9,
-0x8021, 0xa7a00022, 0xc004d78, 0x24040001,
-0x26100001, 0x2e020020, 0x1440fffb, 0x0,
-0xc004d78, 0x2021, 0xc004d78, 0x24040001,
-0xc004d78, 0x2021, 0xc004d78, 0x24040001,
-0x24100010, 0xc004d78, 0x2021, 0x108042,
-0x1600fffc, 0x0, 0x24100010, 0xc004d78,
-0x2021, 0x108042, 0x1600fffc, 0x0,
-0xc004d78, 0x24040001, 0xc004d78, 0x2021,
-0x34108000, 0x97a20022, 0x501024, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fff8, 0x0, 0xc004db9, 0x0,
-0x24040018, 0x24050002, 0xc004ddb, 0x24060004,
-0x3c100001, 0x8e106e24, 0x24020001, 0x1602011d,
-0x0, 0x3c020001, 0x94426f26, 0x3c010001,
-0xac206e24, 0x24429fbc, 0x2c420004, 0x1040000c,
-0x24040009, 0x24050001, 0xc004ddb, 0x24060400,
-0x24040018, 0x24050001, 0xc004ddb, 0x24060020,
-0x24040018, 0x24050001, 0xc004ddb, 0x24062000,
-0x3c024000, 0x2421024, 0x10400123, 0x3c022000,
-0x2421024, 0x10400004, 0x0, 0x3c010001,
-0x10000003, 0xac306f1c, 0x3c010001, 0xac206f1c,
-0x3c030001, 0x8c636f34, 0x24020005, 0x146200f9,
-0x0, 0x3c020001, 0x8c426f1c, 0x10400067,
-0x3c020004, 0x2421024, 0x10400011, 0xa7a00018,
-0x3c020008, 0x2421024, 0x10400002, 0x24020200,
-0xa7a20018, 0x3c020010, 0x2421024, 0x10400004,
-0x0, 0x97a20018, 0x34420100, 0xa7a20018,
-0x97a60018, 0x24040009, 0x10000004, 0x2821,
-0x24040009, 0x2821, 0x3021, 0xc004ddb,
-0x0, 0x24020001, 0xa7a2001a, 0x3c020008,
-0x2421024, 0x1040000c, 0x3c020002, 0x2421024,
-0x10400002, 0x24020101, 0xa7a2001a, 0x3c020001,
-0x2421024, 0x10400005, 0x3c020010, 0x97a2001a,
-0x34420040, 0xa7a2001a, 0x3c020010, 0x2421024,
-0x1040000e, 0x3c020002, 0x2421024, 0x10400005,
-0x3c020001, 0x97a2001a, 0x34420080, 0xa7a2001a,
-0x3c020001, 0x2421024, 0x10400005, 0x3c0300a0,
-0x97a2001a, 0x34420020, 0xa7a2001a, 0x3c0300a0,
-0x2431024, 0x54430004, 0x3c020020, 0x97a2001a,
-0x1000000c, 0x34420400, 0x2421024, 0x50400004,
-0x3c020080, 0x97a2001a, 0x10000006, 0x34420800,
-0x2421024, 0x10400004, 0x0, 0x97a2001a,
-0x34420c00, 0xa7a2001a, 0x97a6001a, 0x24040004,
-0xc004ddb, 0x2821, 0x3c020004, 0x2421024,
-0x10400004, 0xa7a0001c, 0x32425000, 0x14400004,
-0x0, 0x32424000, 0x10400005, 0x2021,
-0xc004cf9, 0x2402021, 0x10000096, 0x0,
-0x97a6001c, 0x2821, 0x34c61200, 0xc004ddb,
-0xa7a6001c, 0x1000008f, 0x0, 0x2421024,
-0x10400004, 0xa7a00018, 0x32425000, 0x14400004,
-0x0, 0x32424000, 0x10400005, 0x3c020010,
-0xc004cf9, 0x2402021, 0x10000019, 0xa7a0001a,
-0x2421024, 0x10400004, 0x0, 0x97a20018,
-0x10000004, 0xa7a20018, 0x97a20018, 0x34420100,
-0xa7a20018, 0x3c020001, 0x2421024, 0x10400004,
-0x0, 0x97a20018, 0x10000004, 0xa7a20018,
-0x97a20018, 0x34422000, 0xa7a20018, 0x97a60018,
-0x2021, 0xc004ddb, 0x2821, 0xa7a0001a,
-0x8021, 0xc004d78, 0x24040001, 0x26100001,
-0x2e020020, 0x1440fffb, 0x0, 0xc004d78,
-0x2021, 0xc004d78, 0x24040001, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0x24100010,
-0x32020001, 0x10400002, 0x2021, 0x24040001,
-0xc004d78, 0x108042, 0x1600fffa, 0x32020001,
-0x24100010, 0xc004d78, 0x2021, 0x108042,
-0x1600fffc, 0x0, 0xc004db9, 0x34108000,
-0xc004db9, 0x0, 0xc004d58, 0x0,
-0x50400005, 0x108042, 0x97a2001a, 0x501025,
-0xa7a2001a, 0x108042, 0x1600fff7, 0x0,
-0xc004db9, 0x8021, 0xa7a0001a, 0xc004d78,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0xc004d78, 0x24040001, 0xc004d78,
-0x2021, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0xc004d78,
-0x2021, 0x108042, 0x1600fffc, 0x0,
-0xc004db9, 0x34108000, 0xc004db9, 0x0,
-0xc004d58, 0x0, 0x50400005, 0x108042,
-0x97a2001a, 0x501025, 0xa7a2001a, 0x108042,
-0x1600fff7, 0x0, 0xc004db9, 0x0,
-0x3c040001, 0x24846bcc, 0x97a60018, 0x97a7001a,
-0x3c020001, 0x8c426d98, 0x3c030001, 0x8c636f1c,
-0x3c05000d, 0x34a50205, 0xafa20010, 0xc002b3b,
-0xafa30014, 0x8f830054, 0x24020004, 0x3c010001,
-0xac226dd4, 0x3c010001, 0x10000017, 0xac236f38,
-0x8f830054, 0x3c020001, 0x8c426f38, 0x2463ff9c,
-0x431023, 0x2c420064, 0x1440000f, 0x0,
-0x8f820220, 0x24030005, 0x3c010001, 0xac236dd4,
-0x3c03f700, 0x431025, 0x10000007, 0xaf820220,
-0x24020006, 0x3c010001, 0xac226dd4, 0x24020011,
-0x3c010001, 0xac226dd0, 0x8fbf0034, 0x8fb20030,
-0x8fb1002c, 0x8fb00028, 0x3e00008, 0x27bd0038,
-0x27bdffd8, 0xafb00018, 0x808021, 0xafb1001c,
-0x8821, 0x32024000, 0x10400013, 0xafbf0020,
-0x3c020010, 0x2021024, 0x2c420001, 0x21023,
-0x30434100, 0x3c020001, 0x2021024, 0x14400006,
-0x34714000, 0x3c020002, 0x2021024, 0x14400002,
-0x34716000, 0x34714040, 0x2021, 0x2821,
-0x10000036, 0x2203021, 0x32021000, 0x10400035,
-0x2021, 0x2821, 0xc004ddb, 0x24060040,
-0x24040018, 0x2821, 0xc004ddb, 0x24060c00,
-0x24040017, 0x2821, 0xc004ddb, 0x24060400,
-0x24040016, 0x2821, 0xc004ddb, 0x24060006,
-0x24040017, 0x2821, 0xc004ddb, 0x24062500,
-0x24040016, 0x2821, 0xc004ddb, 0x24060006,
-0x24040017, 0x2821, 0xc004ddb, 0x24064600,
-0x24040016, 0x2821, 0xc004ddb, 0x24060006,
-0x24040017, 0x2821, 0xc004ddb, 0x24066700,
-0x24040016, 0x2821, 0xc004ddb, 0x24060006,
-0x2404001f, 0x2821, 0xc004ddb, 0x24060010,
-0x24040009, 0x2821, 0xc004ddb, 0x24061500,
-0x24040009, 0x2821, 0x24061d00, 0xc004ddb,
-0x0, 0x3c040001, 0x24846bf0, 0x3c05000e,
-0x34a50100, 0x2003021, 0x2203821, 0xafa00010,
-0xc002b3b, 0xafa00014, 0x8fbf0020, 0x8fb1001c,
-0x8fb00018, 0x3e00008, 0x27bd0028, 0x8f850044,
-0x8f820044, 0x3c030001, 0x431025, 0x3c030008,
-0xaf820044, 0x8f840054, 0x8f820054, 0xa32824,
-0x10000002, 0x24840001, 0x8f820054, 0x821023,
-0x2c420002, 0x1440fffc, 0x0, 0x8f820044,
-0x3c03fffe, 0x3463ffff, 0x431024, 0xaf820044,
-0x8f830054, 0x8f820054, 0x10000002, 0x24630001,
-0x8f820054, 0x621023, 0x2c420002, 0x1440fffc,
-0x0, 0x3e00008, 0xa01021, 0x8f830044,
-0x3c02fff0, 0x3442ffff, 0x42480, 0x621824,
-0x3c020002, 0x822025, 0x641825, 0xaf830044,
-0x8f820044, 0x3c03fffe, 0x3463ffff, 0x431024,
-0xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
-0x24630001, 0x8f820054, 0x621023, 0x2c420002,
-0x1440fffc, 0x0, 0x8f820044, 0x3c030001,
-0x431025, 0xaf820044, 0x8f830054, 0x8f820054,
-0x10000002, 0x24630001, 0x8f820054, 0x621023,
-0x2c420002, 0x1440fffc, 0x0, 0x3e00008,
-0x0, 0x8f820044, 0x2403ff7f, 0x431024,
-0xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
-0x24630001, 0x8f820054, 0x621023, 0x2c420002,
-0x1440fffc, 0x0, 0x8f820044, 0x34420080,
-0xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
-0x24630001, 0x8f820054, 0x621023, 0x2c420002,
-0x1440fffc, 0x0, 0x3e00008, 0x0,
-0x8f820044, 0x3c03fff0, 0x3463ffff, 0x431024,
-0xaf820044, 0x8f820044, 0x3c030001, 0x431025,
-0xaf820044, 0x8f830054, 0x8f820054, 0x10000002,
-0x24630001, 0x8f820054, 0x621023, 0x2c420002,
-0x1440fffc, 0x0, 0x8f820044, 0x3c03fffe,
-0x3463ffff, 0x431024, 0xaf820044, 0x8f830054,
-0x8f820054, 0x10000002, 0x24630001, 0x8f820054,
-0x621023, 0x2c420002, 0x1440fffc, 0x0,
-0x3e00008, 0x0, 0x27bdffc8, 0xafb30024,
-0x809821, 0xafbe002c, 0xa0f021, 0xafb20020,
-0xc09021, 0x33c2ffff, 0xafbf0030, 0xafb50028,
-0xafb1001c, 0xafb00018, 0x14400034, 0xa7b20010,
-0x3271ffff, 0x27b20010, 0x8021, 0xc004d78,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0x2301024,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x2301024, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0x34108000,
-0x96420000, 0x501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x12000075,
-0x0, 0x1000fff6, 0x0, 0x3275ffff,
-0x27b10010, 0xa7a00010, 0x8021, 0xc004d78,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0xc004d78, 0x24040001, 0xc004d78,
-0x2021, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0x2b01024,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x2b01024, 0xc004db9,
-0x34108000, 0xc004db9, 0x0, 0xc004d58,
-0x0, 0x50400005, 0x108042, 0x96220000,
-0x501025, 0xa6220000, 0x108042, 0x1600fff7,
-0x0, 0xc004db9, 0x0, 0x33c5ffff,
-0x24020001, 0x54a20004, 0x24020002, 0x97a20010,
-0x10000006, 0x521025, 0x14a20006, 0x3271ffff,
-0x97a20010, 0x121827, 0x431024, 0xa7a20010,
-0x3271ffff, 0x27b20010, 0x8021, 0xc004d78,
-0x24040001, 0x26100001, 0x2e020020, 0x1440fffb,
-0x0, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0xc004d78,
-0x24040001, 0x24100010, 0x32020001, 0x10400002,
-0x2021, 0x24040001, 0xc004d78, 0x108042,
-0x1600fffa, 0x32020001, 0x24100010, 0x2301024,
-0x10400002, 0x2021, 0x24040001, 0xc004d78,
-0x108042, 0x1600fffa, 0x2301024, 0xc004d78,
-0x24040001, 0xc004d78, 0x2021, 0x34108000,
-0x96420000, 0x501024, 0x10400002, 0x2021,
-0x24040001, 0xc004d78, 0x108042, 0x1600fff8,
-0x0, 0xc004db9, 0x0, 0x8fbf0030,
-0x8fbe002c, 0x8fb50028, 0x8fb30024, 0x8fb20020,
-0x8fb1001c, 0x8fb00018, 0x3e00008, 0x27bd0038,
-0x0, 0x0, 0x0, 0x27bdffe8,
-0xafbf0010, 0x8ee304b8, 0x24020008, 0x146201e0,
-0x0, 0x3c020001, 0x8c426f1c, 0x14400005,
-0x0, 0xc003daf, 0x8f840224, 0x100001d8,
-0x0, 0x8f820220, 0x3c030008, 0x431024,
-0x10400026, 0x24020001, 0x8f840224, 0x8f820220,
-0x3c030400, 0x431024, 0x10400006, 0x0,
-0x3c010002, 0xac208fa0, 0x3c010002, 0x1000000b,
-0xac208fc0, 0x3c030002, 0x24638fa0, 0x8c620000,
-0x24420001, 0xac620000, 0x2c420002, 0x14400003,
-0x24020001, 0x3c010002, 0xac228fc0, 0x3c020002,
-0x8c428fc0, 0x10400006, 0x30820040, 0x10400004,
-0x24020001, 0x3c010002, 0x10000003, 0xac228fc4,
-0x3c010002, 0xac208fc4, 0x3c010002, 0xac248f9c,
-0x3c010002, 0x1000000b, 0xac208fd0, 0x3c010002,
-0xac228fd0, 0x3c010002, 0xac208fc0, 0x3c010002,
-0xac208fa0, 0x3c010002, 0xac208fc4, 0x3c010002,
-0xac208f9c, 0x3c030002, 0x8c638f90, 0x3c020002,
-0x8c428f94, 0x50620004, 0x2463ffff, 0x3c010002,
-0xac238f94, 0x2463ffff, 0x2c62000e, 0x10400194,
-0x31080, 0x3c010001, 0x220821, 0x8c226c00,
-0x400008, 0x0, 0x24020002, 0x3c010002,
-0xac208fc0, 0x3c010002, 0xac208fa0, 0x3c010002,
-0xac208f9c, 0x3c010002, 0xac208fc4, 0x3c010002,
-0xac208fb8, 0x3c010002, 0xac208fb0, 0xaf800224,
-0x3c010002, 0xac228f90, 0x3c020002, 0x8c428fd0,
-0x1440004f, 0x3c02fdff, 0x3442ffff, 0xc003daf,
-0x282a024, 0xaf800204, 0x8f820200, 0x2403fffd,
-0x431024, 0xaf820200, 0x3c010002, 0xac208fe0,
-0x8f830054, 0x3c020002, 0x8c428fb8, 0x24040001,
-0x3c010002, 0xac248fcc, 0x24420001, 0x3c010002,
-0xac228fb8, 0x2c420004, 0x3c010002, 0xac238fb4,
-0x14400006, 0x24020003, 0x3c010001, 0xac246d9c,
-0x3c010002, 0x1000015e, 0xac208fb8, 0x3c010002,
-0x1000015b, 0xac228f90, 0x8f830054, 0x3c020002,
-0x8c428fb4, 0x2463d8f0, 0x431023, 0x2c422710,
-0x14400003, 0x24020004, 0x3c010002, 0xac228f90,
-0x3c020002, 0x8c428fd0, 0x14400021, 0x3c02fdff,
-0x3442ffff, 0x1000014a, 0x282a024, 0x3c040001,
-0x8c846f20, 0x3c010002, 0xc005084, 0xac208fa8,
-0x3c020002, 0x8c428fdc, 0xaf820204, 0x3c020002,
-0x8c428fd0, 0x14400012, 0x3c03fdff, 0x8f820204,
-0x3463ffff, 0x30420030, 0x1440012f, 0x283a024,
-0x3c030002, 0x8c638fdc, 0x24020005, 0x3c010002,
-0xac228f90, 0x3c010002, 0x10000131, 0xac238fe0,
-0x3c020002, 0x8c428fd0, 0x10400010, 0x3c02fdff,
-0x3c020001, 0x8c426e3c, 0x24420001, 0x3c010001,
-0xac226e3c, 0x2c420002, 0x14400125, 0x24020001,
-0x3c010001, 0xac226e44, 0x3c010001, 0xac206e3c,
-0x3c010001, 0x1000011e, 0xac226d9c, 0x3c030002,
-0x8c638fc0, 0x3442ffff, 0x10600119, 0x282a024,
-0x3c020002, 0x8c428f9c, 0x10400115, 0x0,
-0x3c010002, 0xac228fc8, 0x24020003, 0x3c010002,
-0xac228fa0, 0x100000b8, 0x24020006, 0x3c010002,
-0xac208fa8, 0x8f820204, 0x34420040, 0xaf820204,
-0x3c020002, 0x8c428fe0, 0x24030007, 0x3c010002,
-0xac238f90, 0x34420040, 0x3c010002, 0xac228fe0,
-0x3c020002, 0x8c428fc0, 0x10400005, 0x0,
-0x3c020002, 0x8c428f9c, 0x104000f0, 0x24020002,
-0x3c050002, 0x24a58fa0, 0x8ca20000, 0x2c424e21,
-0x104000ea, 0x24020002, 0x3c020002, 0x8c428fc4,
-0x104000ef, 0x2404ffbf, 0x3c020002, 0x8c428f9c,
-0x3c030002, 0x8c638fc8, 0x441024, 0x641824,
-0x10430004, 0x24020001, 0x3c010002, 0x100000e4,
-0xac228f90, 0x24020003, 0xaca20000, 0x24020008,
-0x3c010002, 0xac228f90, 0x3c020002, 0x8c428fcc,
-0x1040000c, 0x24020001, 0x3c040002, 0xc005091,
-0x8c848f9c, 0x3c020002, 0x8c428fe8, 0x14400005,
-0x24020001, 0x3c020002, 0x8c428fe4, 0x10400006,
-0x24020001, 0x3c010001, 0xac226d9c, 0x3c010002,
-0x100000cb, 0xac208fb8, 0x3c020002, 0x8c428fb0,
-0x3c030002, 0x8c638f9c, 0x2c420001, 0x210c0,
-0x30630008, 0x3c010002, 0xac228fb0, 0x3c010002,
-0xac238fac, 0x8f830054, 0x24020009, 0x3c010002,
-0xac228f90, 0x3c010002, 0x100000b9, 0xac238fb4,
-0x8f830054, 0x3c020002, 0x8c428fb4, 0x2463d8f0,
-0x431023, 0x2c422710, 0x1440009f, 0x0,
-0x3c020002, 0x8c428fc0, 0x10400005, 0x0,
-0x3c020002, 0x8c428f9c, 0x104000a0, 0x24020002,
-0x3c030002, 0x24638fa0, 0x8c620000, 0x2c424e21,
-0x1040009a, 0x24020002, 0x3c020002, 0x8c428fcc,
-0x1040000e, 0x0, 0x3c020002, 0x8c428f9c,
-0x3c010002, 0xac208fcc, 0x30420080, 0x1040002f,
-0x2402000c, 0x8f820204, 0x30420080, 0x1440000c,
-0x24020003, 0x10000029, 0x2402000c, 0x3c020002,
-0x8c428f9c, 0x30420080, 0x14400005, 0x24020003,
-0x8f820204, 0x30420080, 0x1040001f, 0x24020003,
-0xac620000, 0x2402000a, 0x3c010002, 0xac228f90,
-0x3c040002, 0x24848fd8, 0x8c820000, 0x3c030002,
-0x8c638fb0, 0x431025, 0xaf820204, 0x8c830000,
-0x3c040002, 0x8c848fb0, 0x2402000b, 0x3c010002,
-0xac228f90, 0x641825, 0x3c010002, 0xac238fe0,
-0x3c050002, 0x24a58fa0, 0x8ca20000, 0x2c424e21,
-0x10400066, 0x24020002, 0x3c020002, 0x8c428fd0,
-0x10400005, 0x0, 0x2402000c, 0x3c010002,
-0x10000067, 0xac228f90, 0x3c020002, 0x8c428fc0,
-0x10400063, 0x0, 0x3c040002, 0x8c848f9c,
-0x10800055, 0x30820008, 0x3c030002, 0x8c638fac,
-0x1062005b, 0x24020003, 0x3c010002, 0xac248fc8,
-0xaca20000, 0x24020006, 0x3c010002, 0x10000054,
-0xac228f90, 0x8f820200, 0x34420002, 0xaf820200,
-0x8f830054, 0x2402000d, 0x3c010002, 0xac228f90,
-0x3c010002, 0xac238fb4, 0x8f830054, 0x3c020002,
-0x8c428fb4, 0x2463d8f0, 0x431023, 0x2c422710,
-0x14400031, 0x0, 0x3c020002, 0x8c428fd0,
-0x10400020, 0x2402000e, 0x3c030002, 0x8c638fe4,
-0x3c010002, 0x14600015, 0xac228f90, 0xc003e6d,
-0x0, 0x3c050001, 0x8ca56d98, 0xc00529b,
-0x2021, 0x3c030001, 0x8c636d98, 0x24020004,
-0x14620005, 0x2403fffb, 0x3c020001, 0x8c426d94,
-0x10000003, 0x2403fff7, 0x3c020001, 0x8c426d94,
-0x431024, 0x3c010001, 0xac226d94, 0x8f830224,
-0x3c020200, 0x3c010002, 0xac238fec, 0x10000020,
-0x282a025, 0x3c020002, 0x8c428fc0, 0x10400005,
-0x0, 0x3c020002, 0x8c428f9c, 0x1040000f,
-0x24020002, 0x3c020002, 0x8c428fa0, 0x2c424e21,
-0x1040000a, 0x24020002, 0x3c020002, 0x8c428fc0,
-0x1040000f, 0x0, 0x3c020002, 0x8c428f9c,
-0x1440000b, 0x0, 0x24020002, 0x3c010002,
-0x10000007, 0xac228f90, 0x3c020002, 0x8c428fc0,
-0x10400003, 0x0, 0xc003daf, 0x0,
-0x8f820220, 0x3c03f700, 0x431025, 0xaf820220,
-0x8fbf0010, 0x3e00008, 0x27bd0018, 0x3c030002,
-0x24638fe8, 0x8c620000, 0x10400005, 0x34422000,
-0x3c010002, 0xac228fdc, 0x10000003, 0xac600000,
-0x3c010002, 0xac248fdc, 0x3e00008, 0x0,
-0x27bdffe0, 0x30820030, 0xafbf0018, 0x3c010002,
-0xac228fe4, 0x14400067, 0x3c02ffff, 0x34421f0e,
-0x821024, 0x14400061, 0x24020030, 0x30822000,
-0x1040005d, 0x30838000, 0x31a02, 0x30820001,
-0x21200, 0x3c040001, 0x8c846f20, 0x621825,
-0x331c2, 0x3c030001, 0x24636e48, 0x30828000,
-0x21202, 0x30840001, 0x42200, 0x441025,
-0x239c2, 0x61080, 0x431021, 0x471021,
-0x90430000, 0x24020001, 0x10620025, 0x0,
-0x10600007, 0x24020002, 0x10620013, 0x24020003,
-0x1062002c, 0x3c05000f, 0x10000037, 0x0,
-0x8f820200, 0x2403feff, 0x431024, 0xaf820200,
-0x8f820220, 0x3c03fffe, 0x3463ffff, 0x431024,
-0xaf820220, 0x3c010002, 0xac209004, 0x3c010002,
-0x10000034, 0xac20900c, 0x8f820200, 0x34420100,
-0xaf820200, 0x8f820220, 0x3c03fffe, 0x3463ffff,
-0x431024, 0xaf820220, 0x24020100, 0x3c010002,
-0xac229004, 0x3c010002, 0x10000026, 0xac20900c,
-0x8f820200, 0x2403feff, 0x431024, 0xaf820200,
-0x8f820220, 0x3c030001, 0x431025, 0xaf820220,
-0x3c010002, 0xac209004, 0x3c010002, 0x10000019,
-0xac23900c, 0x8f820200, 0x34420100, 0xaf820200,
-0x8f820220, 0x3c030001, 0x431025, 0xaf820220,
-0x24020100, 0x3c010002, 0xac229004, 0x3c010002,
-0x1000000c, 0xac23900c, 0x34a5ffff, 0x3c040001,
-0x24846c38, 0xafa30010, 0xc002b3b, 0xafa00014,
-0x10000004, 0x0, 0x24020030, 0x3c010002,
-0xac228fe8, 0x8fbf0018, 0x3e00008, 0x27bd0020,
-0x0, 0x0, 0x0, 0x27bdffc8,
-0xafb20028, 0x809021, 0xafb3002c, 0xa09821,
-0xafb00020, 0xc08021, 0x3c040001, 0x24846c50,
-0x3c050009, 0x3c020001, 0x8c426d98, 0x34a59001,
-0x2403021, 0x2603821, 0xafbf0030, 0xafb10024,
-0xa7a0001a, 0xafb00014, 0xc002b3b, 0xafa20010,
-0x24020002, 0x12620083, 0x2e620003, 0x10400005,
-0x24020001, 0x1262000a, 0x0, 0x10000173,
-0x0, 0x24020004, 0x126200f8, 0x24020008,
-0x126200f7, 0x3c02ffec, 0x1000016c, 0x0,
-0x3c020001, 0x8c426d94, 0x30420002, 0x14400004,
-0x128940, 0x3c02fffb, 0x3442ffff, 0x2028024,
-0x3c010002, 0x310821, 0xac308ffc, 0x3c024000,
-0x2021024, 0x1040004e, 0x1023c2, 0x30840030,
-0x101382, 0x3042001c, 0x3c030001, 0x24636dd8,
-0x431021, 0x823821, 0x3c020020, 0x2021024,
-0x10400006, 0x24020100, 0x3c010002, 0x310821,
-0xac229000, 0x10000005, 0x3c020080, 0x3c010002,
-0x310821, 0xac209000, 0x3c020080, 0x2021024,
-0x10400006, 0x121940, 0x3c020001, 0x3c010002,
-0x230821, 0x10000005, 0xac229008, 0x121140,
-0x3c010002, 0x220821, 0xac209008, 0x94e40000,
-0x3c030001, 0x8c636f40, 0x24020005, 0x10620010,
-0xa7a40018, 0x32024000, 0x10400002, 0x34824000,
-0xa7a20018, 0x24040001, 0x94e20002, 0x24050004,
-0x24e60002, 0x34420001, 0xc0045be, 0xa4e20002,
-0x24040001, 0x2821, 0xc0045be, 0x27a60018,
-0x3c020001, 0x8c426d98, 0x24110001, 0x3c010001,
-0xac316da4, 0x14530004, 0x32028000, 0xc003daf,
-0x0, 0x32028000, 0x1040011c, 0x0,
-0xc003daf, 0x0, 0x3c030001, 0x8c636f40,
-0x24020005, 0x10620115, 0x24020002, 0x3c010001,
-0xac316d9c, 0x3c010001, 0x10000110, 0xac226d98,
-0x24040001, 0x24050004, 0x27b0001a, 0xc0045be,
-0x2003021, 0x24040001, 0x2821, 0xc0045be,
-0x2003021, 0x3c020002, 0x511021, 0x8c428ff4,
-0x3c040001, 0x8c846d98, 0x3c03bfff, 0x3463ffff,
-0x3c010001, 0xac336da4, 0x431024, 0x3c010002,
-0x310821, 0x109300f7, 0xac228ff4, 0x100000f7,
-0x0, 0x3c022000, 0x2021024, 0x10400005,
-0x24020001, 0x3c010001, 0xac226f1c, 0x10000004,
-0x128940, 0x3c010001, 0xac206f1c, 0x128940,
-0x3c010002, 0x310821, 0xac308ff8, 0x3c024000,
-0x2021024, 0x14400014, 0x0, 0x3c020001,
-0x8c426f1c, 0x10400006, 0x24040004, 0x24050001,
-0xc004ddb, 0x24062000, 0x24020001, 0xaee204b8,
-0x3c020002, 0x511021, 0x8c428ff0, 0x3c03bfff,
-0x3463ffff, 0x431024, 0x3c010002, 0x310821,
-0x100000d0, 0xac228ff0, 0x3c020001, 0x8c426f1c,
-0x10400028, 0x3c0300a0, 0x2031024, 0x5443000d,
-0x3c020020, 0x3c020001, 0x8c426f20, 0x24030100,
-0x3c010002, 0x310821, 0xac239004, 0x3c030001,
-0x3c010002, 0x310821, 0xac23900c, 0x10000015,
-0x34420400, 0x2021024, 0x10400008, 0x24030100,
-0x3c020001, 0x8c426f20, 0x3c010002, 0x310821,
-0xac239004, 0x1000000b, 0x34420800, 0x3c020080,
-0x2021024, 0x1040002e, 0x3c030001, 0x3c020001,
-0x8c426f20, 0x3c010002, 0x310821, 0xac23900c,
-0x34420c00, 0x3c010001, 0xac226f20, 0x10000025,
-0x24040001, 0x3c020020, 0x2021024, 0x10400006,
-0x24020100, 0x3c010002, 0x310821, 0xac229004,
-0x10000005, 0x3c020080, 0x3c010002, 0x310821,
-0xac209004, 0x3c020080, 0x2021024, 0x10400007,
-0x121940, 0x3c020001, 0x3c010002, 0x230821,
-0xac22900c, 0x10000006, 0x24040001, 0x121140,
-0x3c010002, 0x220821, 0xac20900c, 0x24040001,
-0x2821, 0x27b0001e, 0xc00457c, 0x2003021,
-0x24040001, 0x2821, 0xc00457c, 0x2003021,
-0x24040001, 0x24050001, 0x27b0001c, 0xc00457c,
-0x2003021, 0x24040001, 0x24050001, 0xc00457c,
-0x2003021, 0x10000077, 0x0, 0x3c02ffec,
-0x3442ffff, 0x2028024, 0x3c020008, 0x2028025,
-0x121140, 0x3c010002, 0x220821, 0xac308ff8,
-0x3c022000, 0x2021024, 0x10400009, 0x0,
-0x3c020001, 0x8c426e44, 0x14400005, 0x24020001,
-0x3c010001, 0xac226f1c, 0x10000004, 0x3c024000,
-0x3c010001, 0xac206f1c, 0x3c024000, 0x2021024,
-0x1440001d, 0x24020e01, 0x3c030001, 0x8c636f1c,
-0xaf820238, 0x3c010001, 0xac206db0, 0x10600005,
-0x24022020, 0x3c010001, 0xac226f20, 0x24020001,
-0xaee204b8, 0x3c04bfff, 0x121940, 0x3c020002,
-0x431021, 0x8c428ff0, 0x3c050001, 0x8ca56d98,
-0x3484ffff, 0x441024, 0x3c010002, 0x230821,
-0xac228ff0, 0x24020001, 0x10a20044, 0x0,
-0x10000040, 0x0, 0x3c020001, 0x8c426f1c,
-0x1040001c, 0x24022000, 0x3c010001, 0xac226f20,
-0x3c0300a0, 0x2031024, 0x14430005, 0x121140,
-0x3402a000, 0x3c010001, 0x1000002d, 0xac226f20,
-0x3c030002, 0x621821, 0x8c638ff8, 0x3c020020,
-0x621024, 0x10400004, 0x24022001, 0x3c010001,
-0x10000023, 0xac226f20, 0x3c020080, 0x621024,
-0x1040001f, 0x3402a001, 0x3c010001, 0x1000001c,
-0xac226f20, 0x3c020020, 0x2021024, 0x10400007,
-0x121940, 0x24020100, 0x3c010002, 0x230821,
-0xac229004, 0x10000006, 0x3c020080, 0x121140,
-0x3c010002, 0x220821, 0xac209004, 0x3c020080,
-0x2021024, 0x10400006, 0x121940, 0x3c020001,
-0x3c010002, 0x230821, 0x10000005, 0xac22900c,
-0x121140, 0x3c010002, 0x220821, 0xac20900c,
-0x3c030001, 0x8c636d98, 0x24020001, 0x10620003,
-0x0, 0xc003daf, 0x0, 0x8fbf0030,
-0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
-0x3e00008, 0x27bd0038, 0x27bdffb0, 0xafb3003c,
-0x9821, 0xafb50040, 0xa821, 0xafb10034,
-0x8821, 0x24020002, 0xafbf0048, 0xafbe0044,
-0xafb20038, 0xafb00030, 0xafa4002c, 0xa7a0001a,
-0xa7a00018, 0xa7a00020, 0xa7a0001e, 0xa7a00022,
-0x10a20130, 0xa7a0001c, 0x2ca20003, 0x10400005,
-0x24020001, 0x10a2000a, 0x3c024000, 0x1000025d,
-0x2201021, 0x24020004, 0x10a2020a, 0x24020008,
-0x10a20208, 0x2201021, 0x10000256, 0x0,
-0x8fa8002c, 0x88140, 0x3c030002, 0x701821,
-0x8c638ffc, 0x621024, 0x14400009, 0x24040001,
-0x3c027fff, 0x3442ffff, 0x628824, 0x3c010002,
-0x300821, 0xac318ff4, 0x10000246, 0x2201021,
-0x24050001, 0xc00457c, 0x27a60018, 0x24040001,
-0x24050001, 0xc00457c, 0x27a60018, 0x97a20018,
-0x30420004, 0x104000d9, 0x3c114000, 0x3c020001,
-0x8c426f40, 0x2443ffff, 0x2c620006, 0x104000d9,
-0x31080, 0x3c010001, 0x220821, 0x8c226c68,
-0x400008, 0x0, 0x24040001, 0x24050011,
-0x27b0001a, 0xc00457c, 0x2003021, 0x24040001,
-0x24050011, 0xc00457c, 0x2003021, 0x97a3001a,
-0x30624000, 0x10400002, 0x3c150010, 0x3c150008,
-0x30628000, 0x104000aa, 0x3c130001, 0x100000a8,
-0x3c130002, 0x24040001, 0x24050014, 0x27b0001a,
-0xc00457c, 0x2003021, 0x24040001, 0x24050014,
-0xc00457c, 0x2003021, 0x97a3001a, 0x30621000,
-0x10400002, 0x3c150010, 0x3c150008, 0x30620800,
-0x10400097, 0x3c130001, 0x10000095, 0x3c130002,
-0x24040001, 0x24050019, 0x27b0001c, 0xc00457c,
-0x2003021, 0x24040001, 0x24050019, 0xc00457c,
-0x2003021, 0x97a2001c, 0x30430700, 0x24020400,
-0x10620027, 0x28620401, 0x1040000e, 0x24020200,
-0x1062001f, 0x28620201, 0x10400005, 0x24020100,
-0x5062001e, 0x3c130001, 0x1000001e, 0x24040001,
-0x24020300, 0x50620019, 0x3c130002, 0x10000019,
-0x24040001, 0x24020600, 0x1062000d, 0x28620601,
-0x10400005, 0x24020500, 0x5062000b, 0x3c130002,
-0x10000010, 0x24040001, 0x24020700, 0x1462000d,
-0x24040001, 0x3c130004, 0x1000000a, 0x3c150008,
-0x10000006, 0x3c130004, 0x10000005, 0x3c150008,
-0x3c130001, 0x10000002, 0x3c150008, 0x3c150010,
-0x24040001, 0x24050018, 0x27b0001e, 0xc00457c,
-0x2003021, 0x24040001, 0x24050018, 0xc00457c,
-0x2003021, 0x8fa8002c, 0x97a7001e, 0x81140,
-0x3c060002, 0xc23021, 0x8cc68ff4, 0x97a20022,
-0x3c100001, 0x26106c5c, 0x2002021, 0xafa20010,
-0x97a2001c, 0x3c05000c, 0x34a50303, 0xc002b3b,
-0xafa20014, 0x3c020004, 0x16620010, 0x3c020001,
-0x8f840054, 0x24030001, 0x24020002, 0x3c010001,
-0xac236d9c, 0x3c010001, 0xac226d98, 0x3c010001,
-0xac236da4, 0x3c010001, 0xac236e24, 0x3c010001,
-0xac246f30, 0x1000004f, 0x2b38825, 0x16620039,
-0x3c028000, 0x3c020001, 0x8c426e20, 0x1440001e,
-0x24040018, 0x2021, 0x2821, 0xc004ddb,
-0x34068000, 0x8f830054, 0x8f820054, 0x2b38825,
-0x10000002, 0x24630032, 0x8f820054, 0x621023,
-0x2c420033, 0x1440fffc, 0x0, 0x8f830054,
-0x24020001, 0x3c010001, 0xac226e20, 0x3c010001,
-0xac226d9c, 0x3c010001, 0xac226d98, 0x3c010001,
-0xac226da4, 0x3c010001, 0xac226e24, 0x3c010001,
-0x1000002c, 0xac236f30, 0x2821, 0xc004ddb,
-0x24060404, 0x2021, 0x2405001e, 0x27a60018,
-0x24020002, 0xc0045be, 0xa7a20018, 0x2021,
-0x2821, 0x27a60018, 0xc0045be, 0xa7a00018,
-0x24040018, 0x24050002, 0xc004ddb, 0x24060004,
-0x3c028000, 0x2221025, 0x2b31825, 0x10000015,
-0x438825, 0x2221025, 0x2751825, 0x438825,
-0x2002021, 0x97a6001c, 0x3c070001, 0x8ce76d98,
-0x3c05000c, 0x34a50326, 0xafb30010, 0xc002b3b,
-0xafb10014, 0x10000007, 0x0, 0x3c110002,
-0x2308821, 0x8e318ffc, 0x3c027fff, 0x3442ffff,
-0x2228824, 0x3c020001, 0x8c426da8, 0x1040001e,
-0x0, 0x3c020001, 0x8c426f1c, 0x10400002,
-0x3c022000, 0x2228825, 0x8fa8002c, 0x81140,
-0x3c010002, 0x220821, 0x8c229000, 0x10400003,
-0x3c020020, 0x10000005, 0x2228825, 0x3c02ffdf,
-0x3442ffff, 0x2228824, 0x8fa8002c, 0x81140,
-0x3c010002, 0x220821, 0x8c229008, 0x10400003,
-0x3c020080, 0x10000004, 0x2228825, 0x3c02ff7f,
-0x3442ffff, 0x2228824, 0x8fa8002c, 0x81140,
-0x3c010002, 0x220821, 0xac318ff4, 0x10000135,
-0x2201021, 0x8fa8002c, 0x8f140, 0x3c030002,
-0x7e1821, 0x8c638ff8, 0x3c024000, 0x621024,
-0x14400009, 0x24040001, 0x3c027fff, 0x3442ffff,
-0x628824, 0x3c010002, 0x3e0821, 0xac318ff0,
-0x10000124, 0x2201021, 0x2821, 0xc00457c,
-0x27a60018, 0x24040001, 0x2821, 0xc00457c,
-0x27a60018, 0x24040001, 0x24050001, 0x27b20020,
-0xc00457c, 0x2403021, 0x24040001, 0x24050001,
-0xc00457c, 0x2403021, 0x24040001, 0x24050004,
-0x27b1001e, 0xc00457c, 0x2203021, 0x24040001,
-0x24050004, 0xc00457c, 0x2203021, 0x24040001,
-0x24050005, 0x27b00022, 0xc00457c, 0x2003021,
-0x24040001, 0x24050005, 0xc00457c, 0x2003021,
-0x24040001, 0x24050010, 0xc00457c, 0x27a60018,
-0x24040001, 0x24050010, 0xc00457c, 0x27a60018,
-0x24040001, 0x2405000a, 0xc00457c, 0x2403021,
-0x24040001, 0x2405000a, 0xc00457c, 0x2403021,
-0x24040001, 0x24050018, 0xc00457c, 0x2203021,
-0x24040001, 0x24050018, 0xc00457c, 0x2203021,
-0x24040001, 0x24050001, 0xc00457c, 0x27a60018,
-0x24040001, 0x24050001, 0xc00457c, 0x27a60018,
-0x97a20018, 0x30420004, 0x10400066, 0x3c114000,
-0x3c030001, 0x8c636f34, 0x24020005, 0x14620067,
-0x24040001, 0x24050019, 0x27b0001c, 0xc00457c,
-0x2003021, 0x24040001, 0x24050019, 0xc00457c,
-0x2003021, 0x97a2001c, 0x30430700, 0x24020400,
-0x10620027, 0x28620401, 0x1040000e, 0x24020200,
-0x1062001f, 0x28620201, 0x10400005, 0x24020100,
-0x5062001e, 0x3c130001, 0x1000001e, 0x3c020004,
-0x24020300, 0x50620019, 0x3c130002, 0x10000019,
-0x3c020004, 0x24020600, 0x1062000d, 0x28620601,
-0x10400005, 0x24020500, 0x5062000b, 0x3c130002,
-0x10000010, 0x3c020004, 0x24020700, 0x1462000d,
-0x3c020004, 0x3c130004, 0x1000000a, 0x3c150008,
-0x10000006, 0x3c130004, 0x10000005, 0x3c150008,
-0x3c130001, 0x10000002, 0x3c150008, 0x3c150010,
-0x3c020004, 0x12620017, 0x3c028000, 0x8f820054,
-0x24100001, 0x3c010001, 0xac306d9c, 0x3c010001,
-0xac306d98, 0x3c010001, 0xac306da4, 0x3c010001,
-0xac306e24, 0x3c010001, 0xac226f30, 0x3c020001,
-0x16620022, 0x2758825, 0x2021, 0x2821,
-0xc004ddb, 0x34068000, 0x3c010001, 0x1000001b,
-0xac306e20, 0x2221025, 0x2b31825, 0x438825,
-0x97a6001c, 0x3c020001, 0x8c426f1c, 0x3c070001,
-0x8ce76d98, 0x3c040001, 0x24846c5c, 0xafa20010,
-0x97a2001e, 0x3c05000c, 0x34a50323, 0x3c010001,
-0xac206e20, 0xc002b3b, 0xafa20014, 0x10000007,
-0x0, 0x3c110002, 0x23e8821, 0x8e318ff0,
-0x3c027fff, 0x3442ffff, 0x2228824, 0x3c020001,
-0x8c426da8, 0x10400069, 0x0, 0x3c020001,
-0x8c426f1c, 0x10400002, 0x3c022000, 0x2228825,
-0x8fa8002c, 0x81140, 0x3c010002, 0x220821,
-0x8c229004, 0x10400003, 0x3c020020, 0x10000005,
-0x2228825, 0x3c02ffdf, 0x3442ffff, 0x2228824,
-0x8fa8002c, 0x81140, 0x3c010002, 0x220821,
-0x8c22900c, 0x10400003, 0x3c020080, 0x1000004f,
-0x2228825, 0x3c02ff7f, 0x3442ffff, 0x1000004b,
-0x2228824, 0x8fa8002c, 0x82940, 0x3c030002,
-0x651821, 0x8c638ff8, 0x3c024000, 0x621024,
-0x14400008, 0x3c027fff, 0x3442ffff, 0x628824,
-0x3c010002, 0x250821, 0xac318ff0, 0x10000041,
-0x2201021, 0x3c020001, 0x8c426da8, 0x10400034,
-0x3c11c00c, 0x3c020001, 0x8c426e44, 0x3c04c00c,
-0x34842000, 0x3c030001, 0x8c636f1c, 0x2102b,
-0x21023, 0x441024, 0x10600003, 0x518825,
-0x3c022000, 0x2228825, 0x3c020002, 0x451021,
-0x8c429004, 0x10400003, 0x3c020020, 0x10000004,
-0x2228825, 0x3c02ffdf, 0x3442ffff, 0x2228824,
-0x8fa8002c, 0x81140, 0x3c010002, 0x220821,
-0x8c22900c, 0x10400003, 0x3c020080, 0x10000004,
-0x2228825, 0x3c02ff7f, 0x3442ffff, 0x2228824,
-0x3c020001, 0x8c426e30, 0x10400002, 0x3c020800,
-0x2228825, 0x3c020001, 0x8c426e34, 0x10400002,
-0x3c020400, 0x2228825, 0x3c020001, 0x8c426e38,
-0x10400006, 0x3c020100, 0x10000004, 0x2228825,
-0x3c027fff, 0x3442ffff, 0x628824, 0x8fa8002c,
-0x81140, 0x3c010002, 0x220821, 0xac318ff0,
-0x2201021, 0x8fbf0048, 0x8fbe0044, 0x8fb50040,
-0x8fb3003c, 0x8fb20038, 0x8fb10034, 0x8fb00030,
-0x3e00008, 0x27bd0050, 0x27bdffd0, 0xafb20028,
-0x809021, 0xafbf002c, 0xafb10024, 0xafb00020,
-0x8f840200, 0x3c100001, 0x8e106d98, 0x8f860220,
-0x24020002, 0x1202005c, 0x2e020003, 0x10400005,
-0x24020001, 0x1202000a, 0x121940, 0x1000010c,
-0x0, 0x24020004, 0x120200bf, 0x24020008,
-0x120200be, 0x128940, 0x10000105, 0x0,
-0x3c050002, 0xa32821, 0x8ca58ffc, 0x3c100002,
-0x2038021, 0x8e108ff4, 0x3c024000, 0xa21024,
-0x10400038, 0x3c020008, 0x2021024, 0x10400020,
-0x34840002, 0x3c020002, 0x431021, 0x8c429000,
-0x10400005, 0x34840020, 0x34840100, 0x3c020020,
-0x10000006, 0x2028025, 0x2402feff, 0x822024,
-0x3c02ffdf, 0x3442ffff, 0x2028024, 0x121140,
-0x3c010002, 0x220821, 0x8c229008, 0x10400005,
-0x3c020001, 0xc23025, 0x3c020080, 0x10000016,
-0x2028025, 0x3c02fffe, 0x3442ffff, 0xc23024,
-0x3c02ff7f, 0x3442ffff, 0x1000000f, 0x2028024,
-0x2402fedf, 0x822024, 0x3c02fffe, 0x3442ffff,
-0xc23024, 0x3c02ff5f, 0x3442ffff, 0x2028024,
-0x3c010002, 0x230821, 0xac209000, 0x3c010002,
-0x230821, 0xac209008, 0xaf840200, 0xaf860220,
-0x8f820220, 0x34420002, 0xaf820220, 0x1000000a,
-0x121140, 0x3c02bfff, 0x3442ffff, 0x8f830200,
-0x2028024, 0x2402fffd, 0x621824, 0xc003daf,
-0xaf830200, 0x121140, 0x3c010002, 0x220821,
-0x100000b7, 0xac308ff4, 0x3c020001, 0x8c426f1c,
-0x10400069, 0x24050004, 0x24040001, 0xc00457c,
-0x27a60018, 0x24040001, 0x24050005, 0xc00457c,
-0x27a6001a, 0x97a30018, 0x97a2001a, 0x3c040001,
-0x24846e48, 0x30630c00, 0x31a82, 0x30420c00,
-0x21282, 0xa7a2001a, 0x21080, 0x441021,
-0x431021, 0xa7a30018, 0x90480000, 0x24020001,
-0x3103ffff, 0x10620029, 0x28620002, 0x10400005,
-0x0, 0x10600009, 0x0, 0x1000003d,
-0x0, 0x10700013, 0x24020003, 0x1062002c,
-0x0, 0x10000037, 0x0, 0x8f820200,
-0x2403feff, 0x431024, 0xaf820200, 0x8f820220,
-0x3c03fffe, 0x3463ffff, 0x431024, 0xaf820220,
-0x3c010002, 0xac209004, 0x3c010002, 0x10000032,
-0xac20900c, 0x8f820200, 0x34420100, 0xaf820200,
-0x8f820220, 0x3c03fffe, 0x3463ffff, 0x431024,
-0xaf820220, 0x24020100, 0x3c010002, 0xac229004,
-0x3c010002, 0x10000024, 0xac20900c, 0x8f820200,
-0x2403feff, 0x431024, 0xaf820200, 0x8f820220,
-0x3c030001, 0x431025, 0xaf820220, 0x3c010002,
-0xac209004, 0x3c010002, 0x10000017, 0xac23900c,
-0x8f820200, 0x34420100, 0xaf820200, 0x8f820220,
-0x3c030001, 0x431025, 0xaf820220, 0x24020100,
-0x3c010002, 0xac229004, 0x3c010002, 0x1000000a,
-0xac23900c, 0x3c040001, 0x24846c80, 0x97a6001a,
-0x97a70018, 0x3c050001, 0x34a5ffff, 0xafa80010,
-0xc002b3b, 0xafa00014, 0x8f820200, 0x34420002,
-0x1000004b, 0xaf820200, 0x128940, 0x3c050002,
-0xb12821, 0x8ca58ff8, 0x3c100002, 0x2118021,
-0x8e108ff0, 0x3c024000, 0xa21024, 0x14400010,
-0x0, 0x3c020001, 0x8c426f1c, 0x14400005,
-0x3c02bfff, 0x8f820200, 0x34420002, 0xaf820200,
-0x3c02bfff, 0x3442ffff, 0xc003daf, 0x2028024,
-0x3c010002, 0x310821, 0x10000031, 0xac308ff0,
-0x3c020001, 0x8c426f1c, 0x10400005, 0x3c020020,
-0x3c020001, 0x8c426e44, 0x10400025, 0x3c020020,
-0xa21024, 0x10400007, 0x34840020, 0x24020100,
-0x3c010002, 0x310821, 0xac229004, 0x10000006,
-0x34840100, 0x3c010002, 0x310821, 0xac209004,
-0x2402feff, 0x822024, 0x3c020080, 0xa21024,
-0x10400007, 0x121940, 0x3c020001, 0x3c010002,
-0x230821, 0xac22900c, 0x10000008, 0xc23025,
-0x121140, 0x3c010002, 0x220821, 0xac20900c,
-0x3c02fffe, 0x3442ffff, 0xc23024, 0xaf840200,
-0xaf860220, 0x8f820220, 0x34420002, 0xaf820220,
-0x121140, 0x3c010002, 0x220821, 0xac308ff0,
-0x8fbf002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
-0x3e00008, 0x27bd0030, 0x0, 0x1821,
-0x308400ff, 0x2405ffdf, 0x2406ffbf, 0x641007,
-0x30420001, 0x10400004, 0x0, 0x8f820044,
-0x10000003, 0x34420040, 0x8f820044, 0x461024,
-0xaf820044, 0x8f820044, 0x34420020, 0xaf820044,
-0x8f820044, 0x451024, 0xaf820044, 0x24630001,
-0x28620008, 0x5440ffee, 0x641007, 0x3e00008,
-0x0, 0x2c820008, 0x1040001b, 0x0,
-0x2405ffdf, 0x2406ffbf, 0x41880, 0x3c020001,
-0x24426e60, 0x621821, 0x24640004, 0x90620000,
-0x10400004, 0x0, 0x8f820044, 0x10000003,
-0x34420040, 0x8f820044, 0x461024, 0xaf820044,
-0x8f820044, 0x34420020, 0xaf820044, 0x8f820044,
-0x451024, 0xaf820044, 0x24630001, 0x64102b,
-0x1440ffee, 0x0, 0x3e00008, 0x0,
-0x0, 0x0, 0x0, 0x8f8400c4,
-0x8f8600e0, 0x8f8700e4, 0x2402fff8, 0xc22824,
-0x10e5001a, 0x27623ff8, 0x14e20002, 0x24e80008,
-0x27683000, 0x55050004, 0x8d0a0000, 0x30c20004,
-0x14400012, 0x805021, 0x8ce90000, 0x8f42013c,
-0x1494823, 0x49182b, 0x94eb0006, 0x10600002,
-0x25630050, 0x494821, 0x123182b, 0x50400003,
-0x8f4201fc, 0x3e00008, 0xe01021, 0xaf8800e8,
-0x24420001, 0xaf4201fc, 0xaf8800e4, 0x3e00008,
-0x1021, 0x3e00008, 0x0, 0x8f8300e4,
-0x27623ff8, 0x10620004, 0x24620008, 0xaf8200e8,
-0x3e00008, 0xaf8200e4, 0x27623000, 0xaf8200e8,
-0x3e00008, 0xaf8200e4, 0x3e00008, 0x0,
-0x0, 0x0, 0x0, 0x8f880120,
-0x27624fe0, 0x8f830128, 0x15020002, 0x25090020,
-0x27694800, 0x11230012, 0x8fa20010, 0xad040000,
-0xad050004, 0xad060008, 0xa507000e, 0x8fa30014,
-0xad020018, 0x8fa20018, 0xad03001c, 0x25030016,
-0xad020010, 0xad030014, 0xaf890120, 0x8f4300fc,
-0x24020001, 0x2463ffff, 0x3e00008, 0xaf4300fc,
-0x8f430324, 0x1021, 0x24630001, 0x3e00008,
-0xaf430324, 0x3e00008, 0x0, 0x8f880100,
-0x276247e0, 0x8f830108, 0x15020002, 0x25090020,
-0x27694000, 0x1123000f, 0x8fa20010, 0xad040000,
-0xad050004, 0xad060008, 0xa507000e, 0x8fa30014,
-0xad020018, 0x8fa20018, 0xad03001c, 0x25030016,
-0xad020010, 0xad030014, 0xaf890100, 0x3e00008,
-0x24020001, 0x8f430328, 0x1021, 0x24630001,
-0x3e00008, 0xaf430328, 0x3e00008, 0x0,
-0x0, 0x0, 0x0, 0x0 };
-static u32 tigon2FwRodata[(MAX_RODATA_LEN/4) + 1] __devinitdata = {
-0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f6677, 0x6d61696e, 0x2e632c76, 0x20312e31,
-0x2e322e34, 0x35203139, 0x39392f30, 0x312f3234,
-0x2030303a, 0x31303a35, 0x35207368, 0x75616e67,
-0x20457870, 0x20240000, 0x65767452, 0x6e674600,
-0x51657674, 0x46000000, 0x51657674, 0x505f4600,
-0x4d657674, 0x526e6746, 0x0, 0x4d516576,
-0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
-0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
-0x51725072, 0x6f644600, 0x6261644d, 0x656d537a,
-0x0, 0x68775665, 0x72000000, 0x62616448,
-0x77566572, 0x0, 0x2a2a4441, 0x574e5f41,
-0x0, 0x74785278, 0x4266537a, 0x0,
-0x62664174, 0x6e4d726b, 0x0, 0x7265645a,
-0x6f6e6531, 0x0, 0x70636943, 0x6f6e6600,
-0x67656e43, 0x6f6e6600, 0x2a646d61, 0x5244666c,
-0x0, 0x2a50414e, 0x49432a00, 0x2e2e2f2e,
-0x2e2f2e2e, 0x2f2e2e2f, 0x2e2e2f73, 0x72632f6e,
-0x69632f66, 0x77322f63, 0x6f6d6d6f, 0x6e2f6677,
-0x6d61696e, 0x2e630000, 0x72636246, 0x6c616773,
-0x0, 0x62616452, 0x78526362, 0x0,
-0x676c6f62, 0x466c6773, 0x0, 0x2b5f6469,
-0x73705f6c, 0x6f6f7000, 0x2b65765f, 0x68616e64,
-0x6c657200, 0x63616e74, 0x31446d61, 0x0,
-0x2b715f64, 0x6d615f74, 0x6f5f6e69, 0x635f636b,
-0x73756d00, 0x2b685f73, 0x656e645f, 0x64617461,
-0x5f726561, 0x64795f63, 0x6b73756d, 0x0,
-0x2b685f64, 0x6d615f72, 0x645f6173, 0x73697374,
-0x5f636b73, 0x756d0000, 0x74436b73, 0x6d4f6e00,
-0x2b715f64, 0x6d615f74, 0x6f5f6e69, 0x63000000,
-0x2b685f73, 0x656e645f, 0x64617461, 0x5f726561,
-0x64790000, 0x2b685f64, 0x6d615f72, 0x645f6173,
-0x73697374, 0x0, 0x74436b73, 0x6d4f6666,
-0x0, 0x2b685f73, 0x656e645f, 0x62645f72,
-0x65616479, 0x0, 0x68737453, 0x52696e67,
-0x0, 0x62616453, 0x52696e67, 0x0,
-0x6e696353, 0x52696e67, 0x0, 0x77446d61,
-0x416c6c41, 0x0, 0x2b715f64, 0x6d615f74,
-0x6f5f686f, 0x73745f63, 0x6b73756d, 0x0,
-0x2b685f6d, 0x61635f72, 0x785f636f, 0x6d705f63,
-0x6b73756d, 0x0, 0x2b685f64, 0x6d615f77,
-0x725f6173, 0x73697374, 0x5f636b73, 0x756d0000,
-0x72436b73, 0x6d4f6e00, 0x2b715f64, 0x6d615f74,
-0x6f5f686f, 0x73740000, 0x2b685f6d, 0x61635f72,
-0x785f636f, 0x6d700000, 0x2b685f64, 0x6d615f77,
-0x725f6173, 0x73697374, 0x0, 0x72436b73,
-0x6d4f6666, 0x0, 0x2b685f72, 0x6563765f,
-0x62645f72, 0x65616479, 0x0, 0x2b685f72,
-0x6563765f, 0x6a756d62, 0x6f5f6264, 0x5f726561,
-0x64790000, 0x2b685f72, 0x6563765f, 0x6d696e69,
-0x5f62645f, 0x72656164, 0x79000000, 0x2b6d685f,
-0x636f6d6d, 0x616e6400, 0x2b685f74, 0x696d6572,
-0x0, 0x2b685f64, 0x6f5f7570, 0x64617465,
-0x5f74785f, 0x636f6e73, 0x0, 0x2b685f64,
-0x6f5f7570, 0x64617465, 0x5f72785f, 0x70726f64,
-0x0, 0x2b636b73, 0x756d3136, 0x0,
-0x2b706565, 0x6b5f6d61, 0x635f7278, 0x5f776100,
-0x2b706565, 0x6b5f6d61, 0x635f7278, 0x0,
-0x2b646571, 0x5f6d6163, 0x5f727800, 0x2b685f6d,
-0x61635f72, 0x785f6174, 0x746e0000, 0x62616452,
-0x6574537a, 0x0, 0x72784264, 0x4266537a,
-0x0, 0x2b6e756c, 0x6c5f6861, 0x6e646c65,
-0x72000000, 0x66774f70, 0x4661696c, 0x0,
-0x2b685f75, 0x70646174, 0x655f6c65, 0x64340000,
-0x2b685f75, 0x70646174, 0x655f6c65, 0x64360000,
-0x2b685f75, 0x70646174, 0x655f6c65, 0x64320000,
-0x696e7453, 0x74617465, 0x0, 0x2a2a696e,
-0x69744370, 0x0, 0x23736372, 0x65616d00,
-0x69537461, 0x636b4572, 0x0, 0x70726f62,
-0x654d656d, 0x0, 0x2a2a4441, 0x574e5f42,
-0x0, 0x2b73775f, 0x646d615f, 0x61737369,
-0x73745f70, 0x6c75735f, 0x74696d65, 0x72000000,
-0x2b267072, 0x656c6f61, 0x645f7772, 0x5f646573,
-0x63720000, 0x2b267072, 0x656c6f61, 0x645f7264,
-0x5f646573, 0x63720000, 0x2b685f68, 0x665f7469,
-0x6d657200, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f7469, 0x6d65722e, 0x632c7620, 0x312e312e,
-0x322e3335, 0x20313939, 0x392f3031, 0x2f323720,
-0x31393a30, 0x393a3530, 0x20686179, 0x65732045,
-0x78702024, 0x0, 0x65767452, 0x6e674600,
-0x51657674, 0x46000000, 0x51657674, 0x505f4600,
-0x4d657674, 0x526e6746, 0x0, 0x4d516576,
-0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
-0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
-0x51725072, 0x6f644600, 0x542d446d, 0x61526432,
-0x0, 0x542d446d, 0x61526431, 0x0,
-0x542d446d, 0x61526442, 0x0, 0x542d446d,
-0x61577232, 0x0, 0x542d446d, 0x61577231,
-0x0, 0x542d446d, 0x61577242, 0x0,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f636f, 0x6d6d616e, 0x642e632c, 0x7620312e,
-0x312e322e, 0x32382031, 0x3939392f, 0x30312f32,
-0x30203139, 0x3a34393a, 0x34392073, 0x6875616e,
-0x67204578, 0x70202400, 0x65767452, 0x6e674600,
-0x51657674, 0x46000000, 0x51657674, 0x505f4600,
-0x4d657674, 0x526e6746, 0x0, 0x4d516576,
-0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
-0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
-0x51725072, 0x6f644600, 0x3f48636d, 0x644d6278,
-0x0, 0x3f636d64, 0x48737453, 0x0,
-0x3f636d64, 0x4d634d64, 0x0, 0x3f636d64,
-0x50726f6d, 0x0, 0x3f636d64, 0x4c696e6b,
-0x0, 0x3f636d64, 0x45727200, 0x86ac,
-0x8e5c, 0x8e5c, 0x8de4, 0x8b78,
-0x8e30, 0x8e5c, 0x8790, 0x8800,
-0x8990, 0x8a68, 0x8a34, 0x8e5c,
-0x8870, 0x8b24, 0x8e5c, 0x8b34,
-0x87b4, 0x8824, 0x0, 0x0,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f6d63, 0x6173742e, 0x632c7620, 0x312e312e,
-0x322e3820, 0x31393938, 0x2f31322f, 0x30382030,
-0x323a3336, 0x3a333620, 0x73687561, 0x6e672045,
-0x78702024, 0x0, 0x65767452, 0x6e674600,
-0x51657674, 0x46000000, 0x51657674, 0x505f4600,
-0x4d657674, 0x526e6746, 0x0, 0x4d516576,
-0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
-0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
-0x51725072, 0x6f644600, 0x6164644d, 0x63447570,
-0x0, 0x6164644d, 0x6346756c, 0x0,
-0x64656c4d, 0x634e6f45, 0x0, 0x0,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f646d, 0x612e632c, 0x7620312e, 0x312e322e,
-0x32342031, 0x3939382f, 0x31322f32, 0x31203030,
-0x3a33333a, 0x30392073, 0x6875616e, 0x67204578,
-0x70202400, 0x65767452, 0x6e674600, 0x51657674,
-0x46000000, 0x51657674, 0x505f4600, 0x4d657674,
-0x526e6746, 0x0, 0x4d516576, 0x74460000,
-0x4d516576, 0x505f4600, 0x5173436f, 0x6e495f46,
-0x0, 0x5173436f, 0x6e734600, 0x51725072,
-0x6f644600, 0x7377446d, 0x614f6666, 0x0,
-0x31446d61, 0x4f6e0000, 0x7377446d, 0x614f6e00,
-0x2372446d, 0x6141544e, 0x0, 0x72446d61,
-0x41544e30, 0x0, 0x72446d61, 0x41544e31,
-0x0, 0x72446d61, 0x34476200, 0x2a50414e,
-0x49432a00, 0x2e2e2f2e, 0x2e2f2e2e, 0x2f2e2e2f,
-0x2e2e2f73, 0x72632f6e, 0x69632f66, 0x77322f63,
-0x6f6d6d6f, 0x6e2f646d, 0x612e6300, 0x2377446d,
-0x6141544e, 0x0, 0x77446d61, 0x41544e30,
-0x0, 0x77446d61, 0x41544e31, 0x0,
-0x77446d61, 0x34476200, 0x0, 0x0,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f7472, 0x6163652e, 0x632c7620, 0x312e312e,
-0x322e3520, 0x31393938, 0x2f30392f, 0x33302031,
-0x383a3530, 0x3a323820, 0x73687561, 0x6e672045,
-0x78702024, 0x0, 0x0, 0x0,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f6461, 0x74612e63, 0x2c762031, 0x2e312e32,
-0x2e313220, 0x31393939, 0x2f30312f, 0x32302031,
-0x393a3439, 0x3a353120, 0x73687561, 0x6e672045,
-0x78702024, 0x0, 0x46575f56, 0x45525349,
-0x4f4e3a20, 0x23312046, 0x72692041, 0x70722037,
-0x2031373a, 0x35373a35, 0x32205044, 0x54203230,
-0x30300000, 0x46575f43, 0x4f4d5049, 0x4c455f54,
-0x494d453a, 0x2031373a, 0x35373a35, 0x32000000,
-0x46575f43, 0x4f4d5049, 0x4c455f42, 0x593a2064,
-0x65767263, 0x73000000, 0x46575f43, 0x4f4d5049,
-0x4c455f48, 0x4f53543a, 0x20636f6d, 0x70757465,
-0x0, 0x46575f43, 0x4f4d5049, 0x4c455f44,
-0x4f4d4149, 0x4e3a2065, 0x6e672e61, 0x6374656f,
-0x6e2e636f, 0x6d000000, 0x46575f43, 0x4f4d5049,
-0x4c45523a, 0x20676363, 0x20766572, 0x73696f6e,
-0x20322e37, 0x2e320000, 0x0, 0x12041100,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f6d65, 0x6d2e632c, 0x7620312e, 0x312e322e,
-0x35203139, 0x39382f30, 0x392f3330, 0x2031383a,
-0x35303a30, 0x38207368, 0x75616e67, 0x20457870,
-0x20240000, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f7365, 0x6e642e63, 0x2c762031, 0x2e312e32,
-0x2e343420, 0x31393938, 0x2f31322f, 0x32312030,
-0x303a3333, 0x3a313820, 0x73687561, 0x6e672045,
-0x78702024, 0x0, 0x65767452, 0x6e674600,
-0x51657674, 0x46000000, 0x51657674, 0x505f4600,
-0x4d657674, 0x526e6746, 0x0, 0x4d516576,
-0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
-0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
-0x51725072, 0x6f644600, 0x69736e74, 0x54637055,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f7265, 0x63762e63, 0x2c762031, 0x2e312e32,
-0x2e353320, 0x31393939, 0x2f30312f, 0x31362030,
-0x323a3535, 0x3a343320, 0x73687561, 0x6e672045,
-0x78702024, 0x0, 0x65767452, 0x6e674600,
-0x51657674, 0x46000000, 0x51657674, 0x505f4600,
-0x4d657674, 0x526e6746, 0x0, 0x4d516576,
-0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
-0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
-0x51725072, 0x6f644600, 0x724d6163, 0x43686b30,
-0x0, 0x72784672, 0x6d324c67, 0x0,
-0x72784e6f, 0x53744264, 0x0, 0x72784e6f,
-0x4d694264, 0x0, 0x72784e6f, 0x4a6d4264,
-0x0, 0x7278436b, 0x446d6146, 0x0,
-0x72785144, 0x6d457846, 0x0, 0x72785144,
-0x6d614600, 0x72785144, 0x4c426446, 0x0,
-0x72785144, 0x6d426446, 0x0, 0x72784372,
-0x63506164, 0x0, 0x72536d51, 0x446d6146,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f6d61, 0x632e632c, 0x7620312e, 0x312e322e,
-0x32322031, 0x3939382f, 0x31322f30, 0x38203032,
-0x3a33363a, 0x33302073, 0x6875616e, 0x67204578,
-0x70202400, 0x65767452, 0x6e674600, 0x51657674,
-0x46000000, 0x51657674, 0x505f4600, 0x4d657674,
-0x526e6746, 0x0, 0x4d516576, 0x74460000,
-0x4d516576, 0x505f4600, 0x5173436f, 0x6e495f46,
-0x0, 0x5173436f, 0x6e734600, 0x51725072,
-0x6f644600, 0x6d616354, 0x68726573, 0x0,
-0x23744d61, 0x6341544e, 0x0, 0x23724d61,
-0x6341544e, 0x0, 0x72656d41, 0x73737274,
-0x0, 0x6c696e6b, 0x444f574e, 0x0,
-0x6c696e6b, 0x55500000, 0x0, 0x0,
-0x0, 0x24486561, 0x6465723a, 0x202f7072,
-0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
-0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
-0x6e2f636b, 0x73756d2e, 0x632c7620, 0x312e312e,
-0x322e3920, 0x31393939, 0x2f30312f, 0x31342030,
-0x303a3033, 0x3a343820, 0x73687561, 0x6e672045,
-0x78702024, 0x0, 0x65767452, 0x6e674600,
-0x51657674, 0x46000000, 0x51657674, 0x505f4600,
-0x4d657674, 0x526e6746, 0x0, 0x4d516576,
-0x74460000, 0x4d516576, 0x505f4600, 0x5173436f,
-0x6e495f46, 0x0, 0x5173436f, 0x6e734600,
-0x51725072, 0x6f644600, 0x0, 0x0,
-0x0, 0x50726f62, 0x65506879, 0x0,
-0x6c6e6b41, 0x53535254, 0x0, 0x109a4,
-0x10a1c, 0x10a50, 0x10a7c, 0x11050,
-0x10aa8, 0x10b10, 0x111fc, 0x10dc0,
-0x10c68, 0x10c80, 0x10cc4, 0x10cec,
-0x10d0c, 0x10d34, 0x111fc, 0x10dc0,
-0x10df8, 0x10e10, 0x10e40, 0x10e68,
-0x10e88, 0x10eb0, 0x0, 0x10fdc,
-0x11008, 0x1102c, 0x111fc, 0x11050,
-0x11078, 0x11108, 0x0, 0x0,
-0x0, 0x1186c, 0x1193c, 0x11a14,
-0x11ae4, 0x11b40, 0x11c1c, 0x11c44,
-0x11d20, 0x11d48, 0x11ef0, 0x11f18,
-0x120c0, 0x122b8, 0x1254c, 0x12460,
-0x1254c, 0x12578, 0x120e8, 0x12290,
-0x7273745f, 0x676d6969, 0x0, 0x12608,
-0x12640, 0x12728, 0x13374, 0x133b4,
-0x133cc, 0x7365746c, 0x6f6f7000, 0x0,
-0x0, 0x13bbc, 0x13bfc, 0x13c8c,
-0x13cd0, 0x13d34, 0x13dc0, 0x13df4,
-0x13e7c, 0x13f14, 0x13fe4, 0x14024,
-0x140a8, 0x140cc, 0x141dc, 0x646f4261,
-0x73655067, 0x0, 0x0, 0x0,
-0x0, 0x73746d61, 0x634c4e4b, 0x0,
-0x6765746d, 0x636c6e6b, 0x0, 0x14ed8,
-0x14ed8, 0x14b8c, 0x14bd8, 0x14c24,
-0x14ed8, 0x7365746d, 0x61636163, 0x74000000,
-0x0, 0x0 };
-static u32 tigon2FwData[(MAX_DATA_LEN/4) + 1] __devinitdata = {
-0x1,
-0x1, 0x1, 0xc001fc, 0x3ffc,
-0xc00000, 0x416c7465, 0x6f6e2041, 0x63654e49,
-0x43205600, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x416c7465,
-0x6f6e2041, 0x63654e49, 0x43205600, 0x42424242,
-0x0, 0x0, 0x0, 0x1ffffc,
-0x1fff7c, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x60cf00,
-0x60, 0xcf000000, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x3, 0x0,
-0x1, 0x0, 0x0, 0x0,
-0x1, 0x0, 0x1, 0x0,
-0x0, 0x0, 0x0, 0x1,
-0x1, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x1000000, 0x21000000,
-0x12000140, 0x0, 0x0, 0x20000000,
-0x120000a0, 0x0, 0x12000060, 0x12000180,
-0x120001e0, 0x0, 0x0, 0x0,
-0x1, 0x0, 0x0, 0x0,
-0x0, 0x0, 0x0, 0x2,
-0x0, 0x0, 0x30001, 0x1,
-0x30201, 0x0, 0x0, 0x1010101,
-0x1010100, 0x10100, 0x1010001, 0x10001,
-0x1000101, 0x101, 0x0, 0x0 };
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 187ac6e..7709992 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1813,6 +1813,25 @@
 	lp->ext_phy_addr = 1;
 }
 
+static const struct net_device_ops amd8111e_netdev_ops = {
+	.ndo_open		= amd8111e_open,
+	.ndo_stop		= amd8111e_close,
+	.ndo_start_xmit		= amd8111e_start_xmit,
+	.ndo_tx_timeout		= amd8111e_tx_timeout,
+	.ndo_get_stats		= amd8111e_get_stats,
+	.ndo_set_multicast_list = amd8111e_set_multicast_list,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= amd8111e_set_mac_address,
+	.ndo_do_ioctl		= amd8111e_ioctl,
+	.ndo_change_mtu		= amd8111e_change_mtu,
+#if AMD8111E_VLAN_TAG_USED
+	.ndo_vlan_rx_register	= amd8111e_vlan_rx_register,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	 = amd8111e_poll,
+#endif
+};
+
 static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
 				  const struct pci_device_id *ent)
 {
@@ -1872,7 +1891,6 @@
 
 #if AMD8111E_VLAN_TAG_USED
 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
-	dev->vlan_rx_register =amd8111e_vlan_rx_register;
 #endif
 
 	lp = netdev_priv(dev);
@@ -1901,27 +1919,16 @@
 	if(dynamic_ipg[card_idx++])
 		lp->options |= OPTION_DYN_IPG_ENABLE;
 
+
 	/* Initialize driver entry points */
-	dev->open = amd8111e_open;
-	dev->hard_start_xmit = amd8111e_start_xmit;
-	dev->stop = amd8111e_close;
-	dev->get_stats = amd8111e_get_stats;
-	dev->set_multicast_list = amd8111e_set_multicast_list;
-	dev->set_mac_address = amd8111e_set_mac_address;
-	dev->do_ioctl = amd8111e_ioctl;
-	dev->change_mtu = amd8111e_change_mtu;
+	dev->netdev_ops = &amd8111e_netdev_ops;
 	SET_ETHTOOL_OPS(dev, &ops);
 	dev->irq =pdev->irq;
-	dev->tx_timeout = amd8111e_tx_timeout;
 	dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
 	netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = amd8111e_poll;
-#endif
 
 #if AMD8111E_VLAN_TAG_USED
 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-	dev->vlan_rx_register =amd8111e_vlan_rx_register;
 #endif
 	/* Probe the external PHY */
 	amd8111e_probe_ext_phy(dev);
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 9a0be9b..da64ba8 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -48,12 +48,18 @@
 
 /* Index to functions, as function prototypes. */
 static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev);
-static struct net_device_stats *ipddp_get_stats(struct net_device *dev);
 static int ipddp_create(struct ipddp_route *new_rt);
 static int ipddp_delete(struct ipddp_route *rt);
 static struct ipddp_route* ipddp_find_route(struct ipddp_route *rt);
 static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 
+static const struct net_device_ops ipddp_netdev_ops = {
+	.ndo_start_xmit		= ipddp_xmit,
+	.ndo_do_ioctl   	= ipddp_ioctl,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
 
 static struct net_device * __init ipddp_init(void)
 {
@@ -61,7 +67,7 @@
 	struct net_device *dev;
 	int err;
 
-	dev = alloc_etherdev(sizeof(struct net_device_stats));
+	dev = alloc_etherdev(0);
 	if (!dev)
 		return ERR_PTR(-ENOMEM);
 
@@ -71,9 +77,7 @@
                 printk(version);
 
 	/* Initalize the device structure. */
-        dev->hard_start_xmit = ipddp_xmit;
-        dev->get_stats      = ipddp_get_stats;
-        dev->do_ioctl       = ipddp_ioctl;
+	dev->netdev_ops = &ipddp_netdev_ops;
 
         dev->type = ARPHRD_IPDDP;       	/* IP over DDP tunnel */
         dev->mtu = 585;
@@ -103,13 +107,6 @@
         return dev;
 }
 
-/*
- * Get the current statistics. This may be called with the card open or closed.
- */
-static struct net_device_stats *ipddp_get_stats(struct net_device *dev)
-{
-	return netdev_priv(dev);
-}
 
 /*
  * Transmit LLAP/ELAP frame using aarp_send_ddp.
@@ -170,8 +167,8 @@
 
         skb->protocol = htons(ETH_P_ATALK);     /* Protocol has changed */
 
-	((struct net_device_stats *) netdev_priv(dev))->tx_packets++;
-	((struct net_device_stats *) netdev_priv(dev))->tx_bytes += skb->len;
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
 
         if(aarp_send_ddp(rt->dev, skb, &rt->at, NULL) < 0)
                 dev_kfree_skb(skb);
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 6278606..d15d8b7 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -646,6 +646,7 @@
 	.ndo_get_stats		= ei_get_stats,
 	.ndo_set_multicast_list = ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= eth_set_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ei_poll,
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 9ad22d1..1cf2f94 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -1357,6 +1357,7 @@
 	.ndo_start_xmit		= ks8695_start_xmit,
 	.ndo_tx_timeout		= ks8695_timeout,
 	.ndo_set_mac_address	= ks8695_set_mac,
+	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_multicast_list	= ks8695_set_multicast,
 };
 
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index ea493ce..4317b3e 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -204,8 +204,7 @@
 static void net_rx(struct net_device *dev);
 static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode);
 static int net_close(struct net_device *dev);
-static void set_rx_mode_8002(struct net_device *dev);
-static void set_rx_mode_8012(struct net_device *dev);
+static void set_rx_mode(struct net_device *dev);
 static void tx_timeout(struct net_device *dev);
 
 
@@ -242,6 +241,17 @@
 	return -ENODEV;
 }
 
+static const struct net_device_ops atp_netdev_ops = {
+	.ndo_open		= net_open,
+	.ndo_stop		= net_close,
+	.ndo_start_xmit		= atp_send_packet,
+	.ndo_set_multicast_list = set_rx_mode,
+	.ndo_tx_timeout		= tx_timeout,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 static int __init atp_probe1(long ioaddr)
 {
 	struct net_device *dev = NULL;
@@ -342,12 +352,7 @@
 	if (dev->mem_end & 0xf)
 		net_debug = dev->mem_end & 7;
 
-	dev->open		= net_open;
-	dev->stop		= net_close;
-	dev->hard_start_xmit	= atp_send_packet;
-	dev->set_multicast_list =
-	  lp->chip_type == RTL8002 ? &set_rx_mode_8002 : &set_rx_mode_8012;
-	dev->tx_timeout		= tx_timeout;
+	dev->netdev_ops 	= &atp_netdev_ops;
 	dev->watchdog_timeo	= TX_TIMEOUT;
 
 	res = register_netdev(dev);
@@ -903,6 +908,17 @@
     write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
 }
 
+static void set_rx_mode(struct net_device *dev)
+{
+	struct net_local *lp = netdev_priv(dev);
+
+	if (lp->chip_type == RTL8002)
+		return set_rx_mode_8002(dev);
+	else
+		return set_rx_mode_8012(dev);
+}
+
+
 static int __init atp_init_module(void) {
 	if (debug)					/* Emit version even if no cards detected. */
 		printk(KERN_INFO "%s", version);
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 337488e..a4eb6c4 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -37,7 +37,10 @@
 #define __ei_open       ax_ei_open
 #define __ei_close      ax_ei_close
 #define __ei_poll	ax_ei_poll
+#define __ei_start_xmit ax_ei_start_xmit
 #define __ei_tx_timeout ax_ei_tx_timeout
+#define __ei_get_stats  ax_ei_get_stats
+#define __ei_set_multicast_list ax_ei_set_multicast_list
 #define __ei_interrupt  ax_ei_interrupt
 #define ____alloc_ei_netdev ax__alloc_ei_netdev
 #define __NS8390_init   ax_NS8390_init
@@ -623,6 +626,23 @@
 }
 #endif
 
+static const struct net_device_ops ax_netdev_ops = {
+	.ndo_open		= ax_open,
+	.ndo_stop		= ax_close,
+	.ndo_do_ioctl		= ax_ioctl,
+
+	.ndo_start_xmit		= ax_ei_start_xmit,
+	.ndo_tx_timeout		= ax_ei_tx_timeout,
+	.ndo_get_stats		= ax_ei_get_stats,
+	.ndo_set_multicast_list = ax_ei_set_multicast_list,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_change_mtu		= eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= ax_ei_poll,
+#endif
+};
+
 /* setup code */
 
 static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
@@ -738,9 +758,7 @@
 	ei_status.get_8390_hdr	= &ax_get_8390_hdr;
 	ei_status.priv = 0;
 
-	dev->open		= ax_open;
-	dev->stop		= ax_close;
-	dev->do_ioctl		= ax_ioctl;
+	dev->netdev_ops		= &ax_netdev_ops;
 	dev->ethtool_ops	= &ax_ethtool_ops;
 
 	ax->msg_enable		= NETIF_MSG_LINK;
@@ -753,9 +771,6 @@
 	ax->mii.mdio_write	= ax_phy_write;
 	ax->mii.dev		= dev;
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = ax_ei_poll;
-#endif
 	ax_NS8390_init(dev, 0);
 
 	if (first_init)
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 0e7470a..c38512e 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -73,8 +73,8 @@
 	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
 #define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
 
-#define RX_PKT_OFFSET		30
-#define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET + 64)
+#define RX_PKT_OFFSET		(RX_HEADER_LEN + 2)
+#define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET)
 
 /* minimum number of free TX descriptors required to wake up TX process */
 #define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
@@ -679,10 +679,10 @@
 			dev_kfree_skb_any(skb);
 			return -ENOMEM;
 		}
+		bp->force_copybreak = 1;
 	}
 
 	rh = (struct rx_header *) skb->data;
-	skb_reserve(skb, RX_PKT_OFFSET);
 
 	rh->len = 0;
 	rh->flags = 0;
@@ -693,13 +693,13 @@
 	if (src_map != NULL)
 		src_map->skb = NULL;
 
-	ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
+	ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
 	if (dest_idx == (B44_RX_RING_SIZE - 1))
 		ctrl |= DESC_CTRL_EOT;
 
 	dp = &bp->rx_ring[dest_idx];
 	dp->ctrl = cpu_to_le32(ctrl);
-	dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
+	dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
 
 	if (bp->flags & B44_FLAG_RX_RING_HACK)
 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
@@ -801,7 +801,7 @@
 		/* Omit CRC. */
 		len -= 4;
 
-		if (len > RX_COPY_THRESHOLD) {
+		if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
 			int skb_size;
 			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
 			if (skb_size < 0)
@@ -809,8 +809,8 @@
 			ssb_dma_unmap_single(bp->sdev, map,
 					     skb_size, DMA_FROM_DEVICE);
 			/* Leave out rx_header */
-                	skb_put(skb, len + RX_PKT_OFFSET);
-            	        skb_pull(skb, RX_PKT_OFFSET);
+			skb_put(skb, len + RX_PKT_OFFSET);
+			skb_pull(skb, RX_PKT_OFFSET);
 		} else {
 			struct sk_buff *copy_skb;
 
@@ -2108,6 +2108,22 @@
 	return err;
 }
 
+static const struct net_device_ops b44_netdev_ops = {
+	.ndo_open		= b44_open,
+	.ndo_stop		= b44_close,
+	.ndo_start_xmit		= b44_start_xmit,
+	.ndo_get_stats		= b44_get_stats,
+	.ndo_set_multicast_list = b44_set_rx_mode,
+	.ndo_set_mac_address	= b44_set_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= b44_ioctl,
+	.ndo_tx_timeout		= b44_tx_timeout,
+	.ndo_change_mtu		= b44_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= b44_poll_controller,
+#endif
+};
+
 static int __devinit b44_init_one(struct ssb_device *sdev,
 				  const struct ssb_device_id *ent)
 {
@@ -2137,6 +2153,7 @@
 	bp = netdev_priv(dev);
 	bp->sdev = sdev;
 	bp->dev = dev;
+	bp->force_copybreak = 0;
 
 	bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
 
@@ -2145,20 +2162,9 @@
 	bp->rx_pending = B44_DEF_RX_RING_PENDING;
 	bp->tx_pending = B44_DEF_TX_RING_PENDING;
 
-	dev->open = b44_open;
-	dev->stop = b44_close;
-	dev->hard_start_xmit = b44_start_xmit;
-	dev->get_stats = b44_get_stats;
-	dev->set_multicast_list = b44_set_rx_mode;
-	dev->set_mac_address = b44_set_mac_addr;
-	dev->do_ioctl = b44_ioctl;
-	dev->tx_timeout = b44_tx_timeout;
+	dev->netdev_ops = &b44_netdev_ops;
 	netif_napi_add(dev, &bp->napi, b44_poll, 64);
 	dev->watchdog_timeo = B44_TX_TIMEOUT;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = b44_poll_controller;
-#endif
-	dev->change_mtu = b44_change_mtu;
 	dev->irq = sdev->irq;
 	SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
 
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 7db0c84..e678498 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -395,7 +395,7 @@
 	u32			rx_pending;
 	u32			tx_pending;
 	u8			phy_addr;
-
+	u8			force_copybreak;
 	struct mii_if_info	mii_if;
 };
 
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index fd705d1..6fcccef 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -20,6 +20,11 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define BCM_VLAN			1
+#endif
+
+
 /* error/debug prints */
 
 #define DRV_MODULE_NAME		"bnx2x"
@@ -78,11 +83,6 @@
 #endif
 
 
-#ifdef NETIF_F_HW_VLAN_TX
-#define BCM_VLAN			1
-#endif
-
-
 #define U64_LO(x)			(u32)(((u64)(x)) & 0xffffffff)
 #define U64_HI(x)			(u32)(((u64)(x)) >> 32)
 #define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
@@ -150,6 +150,9 @@
 
 #define PAGES_PER_SGE_SHIFT		0
 #define PAGES_PER_SGE			(1 << PAGES_PER_SGE_SHIFT)
+#define SGE_PAGE_SIZE			PAGE_SIZE
+#define SGE_PAGE_SHIFT			PAGE_SHIFT
+#define SGE_PAGE_ALIGN(addr)		PAGE_ALIGN(addr)
 
 #define BCM_RX_ETH_PAYLOAD_ALIGN	64
 
@@ -736,7 +739,7 @@
 	struct bnx2x_fastpath	fp[MAX_CONTEXT];
 	void __iomem		*regview;
 	void __iomem		*doorbells;
-#define BNX2X_DB_SIZE		(16*2048)
+#define BNX2X_DB_SIZE		(16*BCM_PAGE_SIZE)
 
 	struct net_device	*dev;
 	struct pci_dev		*pdev;
@@ -801,6 +804,8 @@
 #define TPA_ENABLE_FLAG			0x80
 #define NO_MCP_FLAG			0x100
 #define BP_NOMCP(bp)			(bp->flags & NO_MCP_FLAG)
+#define HW_VLAN_TX_FLAG			0x400
+#define HW_VLAN_RX_FLAG			0x800
 
 	int			func;
 #define BP_PORT(bp)			(bp->func % PORT_MAX)
@@ -811,7 +816,7 @@
 	int			pm_cap;
 	int			pcie_cap;
 
-	struct work_struct	sp_task;
+	struct delayed_work	sp_task;
 	struct work_struct	reset_task;
 
 	struct timer_list	timer;
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index 67de94f..fefa6ab 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -3359,7 +3359,7 @@
 	u8 shift = 8*4;
 	u8 digit;
 	if (len < 10) {
-		/* Need more then 10chars for this format */
+		/* Need more than 10chars for this format */
 		*str_ptr = '\0';
 		return -EINVAL;
 	}
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index ef8103b..7c53379 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -38,9 +38,7 @@
 #include <linux/time.h>
 #include <linux/ethtool.h>
 #include <linux/mii.h>
-#ifdef NETIF_F_HW_VLAN_TX
-	#include <linux/if_vlan.h>
-#endif
+#include <linux/if_vlan.h>
 #include <net/ip.h>
 #include <net/tcp.h>
 #include <net/checksum.h>
@@ -95,6 +93,7 @@
 module_param(use_multi, int, 0);
 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
 #endif
+static struct workqueue_struct *bnx2x_wq;
 
 enum bnx2x_board_type {
 	BCM57710 = 0,
@@ -671,7 +670,8 @@
 		synchronize_irq(bp->pdev->irq);
 
 	/* make sure sp_task is not running */
-	cancel_work_sync(&bp->sp_task);
+	cancel_delayed_work(&bp->sp_task);
+	flush_workqueue(bnx2x_wq);
 }
 
 /* fast path */
@@ -972,7 +972,7 @@
 		return;
 
 	pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
-		       BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+		       SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
 	__free_pages(page, PAGES_PER_SGE_SHIFT);
 
 	sw_buf->page = NULL;
@@ -1000,7 +1000,7 @@
 	if (unlikely(page == NULL))
 		return -ENOMEM;
 
-	mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
+	mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
 			       PCI_DMA_FROMDEVICE);
 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 		__free_pages(page, PAGES_PER_SGE_SHIFT);
@@ -1096,9 +1096,9 @@
 				  struct eth_fast_path_rx_cqe *fp_cqe)
 {
 	struct bnx2x *bp = fp->bp;
-	u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
+	u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
 				     le16_to_cpu(fp_cqe->len_on_bd)) >>
-		      BCM_PAGE_SHIFT;
+		      SGE_PAGE_SHIFT;
 	u16 last_max, last_elem, first_elem;
 	u16 delta = 0;
 	u16 i;
@@ -1203,22 +1203,22 @@
 			       u16 cqe_idx)
 {
 	struct sw_rx_page *rx_pg, old_rx_pg;
-	struct page *sge;
 	u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
 	u32 i, frag_len, frag_size, pages;
 	int err;
 	int j;
 
 	frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
-	pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
+	pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
 
 	/* This is needed in order to enable forwarding support */
 	if (frag_size)
-		skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
+		skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
 					       max(frag_size, (u32)len_on_bd));
 
 #ifdef BNX2X_STOP_ON_ERROR
-	if (pages > 8*PAGES_PER_SGE) {
+	if (pages >
+	    min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 			  pages, cqe_idx);
 		BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
@@ -1234,9 +1234,8 @@
 
 		/* FW gives the indices of the SGE as if the ring is an array
 		   (meaning that "next" element will consume 2 indices) */
-		frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
+		frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
 		rx_pg = &fp->rx_page_ring[sge_idx];
-		sge = rx_pg->page;
 		old_rx_pg = *rx_pg;
 
 		/* If we fail to allocate a substitute page, we simply stop
@@ -1249,7 +1248,7 @@
 
 		/* Unmap the page as we r going to pass it to the stack */
 		pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
-			      BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+			      SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
 
 		/* Add one frag and update the appropriate fields in the skb */
 		skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1282,6 +1281,13 @@
 	if (likely(new_skb)) {
 		/* fix ip xsum and give it to the stack */
 		/* (no need to map the new skb) */
+#ifdef BCM_VLAN
+		int is_vlan_cqe =
+			(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
+			 PARSING_FLAGS_VLAN);
+		int is_not_hwaccel_vlan_cqe =
+			(is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
+#endif
 
 		prefetch(skb);
 		prefetch(((char *)(skb)) + 128);
@@ -1306,6 +1312,12 @@
 			struct iphdr *iph;
 
 			iph = (struct iphdr *)skb->data;
+#ifdef BCM_VLAN
+			/* If there is no Rx VLAN offloading -
+			   take VLAN tag into an account */
+			if (unlikely(is_not_hwaccel_vlan_cqe))
+				iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
+#endif
 			iph->check = 0;
 			iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
 		}
@@ -1313,9 +1325,8 @@
 		if (!bnx2x_fill_frag_skb(bp, fp, skb,
 					 &cqe->fast_path_cqe, cqe_idx)) {
 #ifdef BCM_VLAN
-			if ((bp->vlgrp != NULL) &&
-			    (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
-			     PARSING_FLAGS_VLAN))
+			if ((bp->vlgrp != NULL) && is_vlan_cqe &&
+			    (!is_not_hwaccel_vlan_cqe))
 				vlan_hwaccel_receive_skb(skb, bp->vlgrp,
 						le16_to_cpu(cqe->fast_path_cqe.
 							    vlan_tag));
@@ -1355,11 +1366,23 @@
 	rx_prods.cqe_prod = rx_comp_prod;
 	rx_prods.sge_prod = rx_sge_prod;
 
+	/*
+	 * Make sure that the BD and SGE data is updated before updating the
+	 * producers since FW might read the BD/SGE right after the producer
+	 * is updated.
+	 * This is only applicable for weak-ordered memory model archs such
+	 * as IA-64. The following barrier is also mandatory since FW will
+	 * assumes BDs must have buffers.
+	 */
+	wmb();
+
 	for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
 		REG_WR(bp, BAR_TSTRORM_INTMEM +
 		       TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
 		       ((u32 *)&rx_prods)[i]);
 
+	mmiowb(); /* keep prod updates ordered */
+
 	DP(NETIF_MSG_RX_STATUS,
 	   "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
 	   bd_prod, rx_comp_prod, rx_sge_prod);
@@ -1415,7 +1438,7 @@
 		DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
 		   "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
 		   cqe_fp_flags, cqe->fast_path_cqe.status_flags,
-		   cqe->fast_path_cqe.rss_hash_result,
+		   le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
 		   le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
 		   le16_to_cpu(cqe->fast_path_cqe.pkt_len));
 
@@ -1547,7 +1570,7 @@
 		}
 
 #ifdef BCM_VLAN
-		if ((bp->vlgrp != NULL) &&
+		if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
 		    (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
 		     PARSING_FLAGS_VLAN))
 			vlan_hwaccel_receive_skb(skb, bp->vlgrp,
@@ -1580,7 +1603,6 @@
 	/* Update producers */
 	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
 			     fp->rx_sge_prod);
-	mmiowb(); /* keep prod updates ordered */
 
 	fp->rx_pkt += rx_pkt;
 	fp->rx_calls++;
@@ -1660,7 +1682,7 @@
 
 
 	if (unlikely(status & 0x1)) {
-		schedule_work(&bp->sp_task);
+		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 
 		status &= ~0x1;
 		if (!status)
@@ -1887,7 +1909,8 @@
 
 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
 {
-	switch (bp->link_vars.ieee_fc) {
+	switch (bp->link_vars.ieee_fc &
+		MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
 	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
 		bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
 					  ADVERTISED_Pause);
@@ -1957,10 +1980,11 @@
 		rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
 		bnx2x_release_phy_lock(bp);
 
+		bnx2x_calc_fc_adv(bp);
+
 		if (bp->link_vars.link_up)
 			bnx2x_link_report(bp);
 
-		bnx2x_calc_fc_adv(bp);
 
 		return rc;
 	}
@@ -2220,9 +2244,7 @@
 	/* Make sure that we are synced with the current statistics */
 	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 
-	bnx2x_acquire_phy_lock(bp);
 	bnx2x_link_update(&bp->link_params, &bp->link_vars);
-	bnx2x_release_phy_lock(bp);
 
 	if (bp->link_vars.link_up) {
 
@@ -2471,6 +2493,8 @@
 	if (asserted & ATTN_HARD_WIRED_MASK) {
 		if (asserted & ATTN_NIG_FOR_FUNC) {
 
+			bnx2x_acquire_phy_lock(bp);
+
 			/* save nig interrupt mask */
 			bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
 			REG_WR(bp, nig_int_mask_addr, 0);
@@ -2526,8 +2550,10 @@
 	REG_WR(bp, hc_addr, asserted);
 
 	/* now set back the mask */
-	if (asserted & ATTN_NIG_FOR_FUNC)
+	if (asserted & ATTN_NIG_FOR_FUNC) {
 		REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
+		bnx2x_release_phy_lock(bp);
+	}
 }
 
 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -2795,8 +2821,10 @@
 static void bnx2x_attn_int(struct bnx2x *bp)
 {
 	/* read local copy of bits */
-	u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
-	u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
+	u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
+								attn_bits);
+	u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
+								attn_bits_ack);
 	u32 attn_state = bp->attn_state;
 
 	/* look for changed bits */
@@ -2820,7 +2848,7 @@
 
 static void bnx2x_sp_task(struct work_struct *work)
 {
-	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
+	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
 	u16 status;
 
 
@@ -2844,7 +2872,7 @@
 	if (status & 0x2)
 		bp->stats_pending = 0;
 
-	bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
+	bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
 		     IGU_INT_NOP, 1);
 	bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
 		     IGU_INT_NOP, 1);
@@ -2875,7 +2903,7 @@
 		return IRQ_HANDLED;
 #endif
 
-	schedule_work(&bp->sp_task);
+	queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 
 	return IRQ_HANDLED;
 }
@@ -2892,7 +2920,7 @@
 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
 	do { \
 		s_lo += a_lo; \
-		s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
+		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
 	} while (0)
 
 /* difference = minuend - subtrahend */
@@ -4496,7 +4524,7 @@
 
 static void bnx2x_init_ind_table(struct bnx2x *bp)
 {
-	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
 	int i;
 
 	if (!is_multi(bp))
@@ -4505,10 +4533,8 @@
 	DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
 	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
 		REG_WR8(bp, BAR_TSTRORM_INTMEM +
-			TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
-			i % bp->num_queues);
-
-	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+			TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
+			BP_CL_ID(bp) + (i % bp->num_queues));
 }
 
 static void bnx2x_set_client_config(struct bnx2x *bp)
@@ -4517,12 +4543,12 @@
 	int port = BP_PORT(bp);
 	int i;
 
-	tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
+	tstorm_client.mtu = bp->dev->mtu;
 	tstorm_client.statistics_counter_id = BP_CL_ID(bp);
 	tstorm_client.config_flags =
 				TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
 #ifdef BCM_VLAN
-	if (bp->rx_mode && bp->vlgrp) {
+	if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
 		tstorm_client.config_flags |=
 				TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
 		DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -4531,7 +4557,7 @@
 
 	if (bp->flags & TPA_ENABLE_FLAG) {
 		tstorm_client.max_sges_for_packet =
-			BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
+			SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
 		tstorm_client.max_sges_for_packet =
 			((tstorm_client.max_sges_for_packet +
 			  PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
@@ -4714,10 +4740,11 @@
 			 bp->e1hov);
 	}
 
-	/* Init CQ ring mapping and aggregation size */
-	max_agg_size = min((u32)(bp->rx_buf_size +
-				 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
-			   (u32)0xffff);
+	/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
+	max_agg_size =
+		min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
+			  SGE_PAGE_SIZE * PAGES_PER_SGE),
+		    (u32)0xffff);
 	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];
 
@@ -4785,6 +4812,15 @@
 	bnx2x_init_context(bp);
 	bnx2x_init_internal(bp, load_code);
 	bnx2x_init_ind_table(bp);
+	bnx2x_stats_init(bp);
+
+	/* At this point, we are ready for interrupts */
+	atomic_set(&bp->intr_sem, 0);
+
+	/* flush all before enabling interrupts */
+	mb();
+	mmiowb();
+
 	bnx2x_int_enable(bp);
 }
 
@@ -5134,7 +5170,6 @@
 	REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
 	REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
 	REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
-	REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
 
 /*	REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
 	REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
@@ -5212,6 +5247,7 @@
 	}
 
 	bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
+	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
 	/* set NIC mode */
 	REG_WR(bp, PRS_REG_NIC_MODE, 1);
 	if (CHIP_IS_E1H(bp))
@@ -6393,17 +6429,8 @@
 		}
 	}
 
-	bnx2x_stats_init(bp);
-
 	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
 
-	/* Enable Rx interrupt handling before sending the ramrod
-	   as it's completed on Rx FP queue */
-	bnx2x_napi_enable(bp);
-
-	/* Enable interrupt handling */
-	atomic_set(&bp->intr_sem, 0);
-
 	rc = bnx2x_setup_leading(bp);
 	if (rc) {
 		BNX2X_ERR("Setup leading failed!\n");
@@ -7501,7 +7528,7 @@
 
 	mutex_init(&bp->port.phy_mutex);
 
-	INIT_WORK(&bp->sp_task, bnx2x_sp_task);
+	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
 	INIT_WORK(&bp->reset_task, bnx2x_reset_task);
 
 	rc = bnx2x_get_hwinfo(bp);
@@ -8243,6 +8270,9 @@
 	struct bnx2x *bp = netdev_priv(dev);
 	int rc;
 
+	if (!netif_running(dev))
+		return -EAGAIN;
+
 	DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
 	   DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
 	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
@@ -8724,6 +8754,8 @@
 	tx_bd->general_data = ((UNICAST_ADDRESS <<
 				ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
 
+	wmb();
+
 	fp->hw_tx_prods->bds_prod =
 		cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
 	mb(); /* FW restriction: must not reorder writing nbd and packets */
@@ -8775,7 +8807,6 @@
 	/* Update producers */
 	bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
 			     fp->rx_sge_prod);
-	mmiowb(); /* keep prod updates ordered */
 
 test_loopback_exit:
 	bp->link_params.loopback_mode = LOOPBACK_NONE;
@@ -9546,11 +9577,14 @@
 	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
 	   pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
 
-	if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
+#ifdef BCM_VLAN
+	if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
+	    (bp->flags & HW_VLAN_TX_FLAG)) {
 		tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
 		tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
 		vlan_off += 4;
 	} else
+#endif
 		tx_bd->vlan = cpu_to_le16(pkt_prod);
 
 	if (xmit_type) {
@@ -9702,6 +9736,15 @@
 
 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
 
+	/*
+	 * Make sure that the BD data is updated before updating the producer
+	 * since FW might read the BD right after the producer is updated.
+	 * This is only applicable for weak-ordered memory model archs such
+	 * as IA-64. The following barrier is also mandatory since FW will
+	 * assumes packets must have BDs.
+	 */
+	wmb();
+
 	fp->hw_tx_prods->bds_prod =
 		cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
 	mb(); /* FW restriction: must not reorder writing nbd and packets */
@@ -9715,6 +9758,9 @@
 	dev->trans_start = jiffies;
 
 	if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
+		/* We want bnx2x_tx_int to "see" the updated tx_bd_prod
+		   if we put Tx into XOFF state. */
+		smp_mb();
 		netif_stop_queue(dev);
 		bp->eth_stats.driver_xoff++;
 		if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
@@ -9984,6 +10030,16 @@
 	struct bnx2x *bp = netdev_priv(dev);
 
 	bp->vlgrp = vlgrp;
+
+	/* Set flags according to the required capabilities */
+	bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
+
+	if (dev->features & NETIF_F_HW_VLAN_TX)
+		bp->flags |= HW_VLAN_TX_FLAG;
+
+	if (dev->features & NETIF_F_HW_VLAN_RX)
+		bp->flags |= HW_VLAN_RX_FLAG;
+
 	if (netif_running(dev))
 		bnx2x_set_client_config(bp);
 }
@@ -10140,6 +10196,7 @@
 		dev->features |= NETIF_F_HIGHDMA;
 #ifdef BCM_VLAN
 	dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+	bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
 #endif
 	dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
 	dev->features |= NETIF_F_TSO6;
@@ -10516,12 +10573,20 @@
 
 static int __init bnx2x_init(void)
 {
+	bnx2x_wq = create_singlethread_workqueue("bnx2x");
+	if (bnx2x_wq == NULL) {
+		printk(KERN_ERR PFX "Cannot create workqueue\n");
+		return -ENOMEM;
+	}
+
 	return pci_register_driver(&bnx2x_pci_driver);
 }
 
 static void __exit bnx2x_cleanup(void)
 {
 	pci_unregister_driver(&bnx2x_pci_driver);
+
+	destroy_workqueue(bnx2x_wq);
 }
 
 module_init(bnx2x_init);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 460c2ca..9fb3883 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4148,7 +4148,7 @@
 
 	bond_for_each_slave(bond, slave, i) {
 		pr_debug("s %p s->p %p c_m %p\n", slave,
-			slave->prev, slave->dev->change_mtu);
+			slave->prev, slave->dev->netdev_ops->ndo_change_mtu);
 
 		res = dev_set_mtu(slave->dev, new_mtu);
 
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 321f43d..840b3d1 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4977,6 +4977,22 @@
 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
 }
 
+static const struct net_device_ops cas_netdev_ops = {
+	.ndo_open		= cas_open,
+	.ndo_stop		= cas_close,
+	.ndo_start_xmit		= cas_start_xmit,
+	.ndo_get_stats 		= cas_get_stats,
+	.ndo_set_multicast_list = cas_set_multicast,
+	.ndo_do_ioctl		= cas_ioctl,
+	.ndo_tx_timeout		= cas_tx_timeout,
+	.ndo_change_mtu		= cas_change_mtu,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= cas_netpoll,
+#endif
+};
+
 static int __devinit cas_init_one(struct pci_dev *pdev,
 				  const struct pci_device_id *ent)
 {
@@ -5166,22 +5182,13 @@
 	for (i = 0; i < N_RX_FLOWS; i++)
 		skb_queue_head_init(&cp->rx_flows[i]);
 
-	dev->open = cas_open;
-	dev->stop = cas_close;
-	dev->hard_start_xmit = cas_start_xmit;
-	dev->get_stats = cas_get_stats;
-	dev->set_multicast_list = cas_set_multicast;
-	dev->do_ioctl = cas_ioctl;
+	dev->netdev_ops = &cas_netdev_ops;
 	dev->ethtool_ops = &cas_ethtool_ops;
-	dev->tx_timeout = cas_tx_timeout;
 	dev->watchdog_timeo = CAS_TX_TIMEOUT;
-	dev->change_mtu = cas_change_mtu;
+
 #ifdef USE_NAPI
 	netif_napi_add(dev, &cp->napi, cas_poll, 64);
 #endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = cas_netpoll;
-#endif
 	dev->irq = pdev->irq;
 	dev->dma = 0;
 
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 5b346f9..a89d8cc 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -50,12 +50,17 @@
 struct adapter;
 struct sge_qset;
 
+enum {			/* rx_offload flags */
+	T3_RX_CSUM	= 1 << 0,
+	T3_LRO		= 1 << 1,
+};
+
 struct port_info {
 	struct adapter *adapter;
 	struct vlan_group *vlan_grp;
 	struct sge_qset *qs;
 	u8 port_id;
-	u8 rx_csum_offload;
+	u8 rx_offload;
 	u8 nqsets;
 	u8 first_qset;
 	struct cphy phy;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 2847f94..0089746 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -546,7 +546,7 @@
 		pi->qs = &adap->sge.qs[pi->first_qset];
 		for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
 		     ++j, ++qset_idx) {
-			set_qset_lro(dev, qset_idx, pi->rx_csum_offload);
+			set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
 			err = t3_sge_alloc_qset(adap, qset_idx, 1,
 				(adap->flags & USING_MSIX) ? qset_idx + 1 :
 							     irq_idx,
@@ -1657,17 +1657,19 @@
 {
 	struct port_info *p = netdev_priv(dev);
 
-	return p->rx_csum_offload;
+	return p->rx_offload & T3_RX_CSUM;
 }
 
 static int set_rx_csum(struct net_device *dev, u32 data)
 {
 	struct port_info *p = netdev_priv(dev);
 
-	p->rx_csum_offload = data;
-	if (!data) {
+	if (data) {
+		p->rx_offload |= T3_RX_CSUM;
+	} else {
 		int i;
 
+		p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
 		for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
 			set_qset_lro(dev, i, 0);
 	}
@@ -1830,15 +1832,18 @@
 	int i;
 
 	if (data & ETH_FLAG_LRO) {
-		if (!pi->rx_csum_offload)
+		if (!(pi->rx_offload & T3_RX_CSUM))
 			return -EINVAL;
 
+		pi->rx_offload |= T3_LRO;
 		for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
 			set_qset_lro(dev, i, 1);
 
-	} else
+	} else {
+		pi->rx_offload &= ~T3_LRO;
 		for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
 			set_qset_lro(dev, i, 0);
+	}
 
 	return 0;
 }
@@ -1926,7 +1931,7 @@
 				pi = adap2pinfo(adapter, i);
 				if (t.qset_idx >= pi->first_qset &&
 				    t.qset_idx < pi->first_qset + pi->nqsets &&
-				    !pi->rx_csum_offload)
+				    !(pi->rx_offload & T3_RX_CSUM))
 					return -EINVAL;
 			}
 
@@ -2946,7 +2951,7 @@
 		adapter->port[i] = netdev;
 		pi = netdev_priv(netdev);
 		pi->adapter = adapter;
-		pi->rx_csum_offload = 1;
+		pi->rx_offload = T3_RX_CSUM | T3_LRO;
 		pi->port_id = i;
 		netif_carrier_off(netdev);
 		netif_tx_stop_all_queues(netdev);
@@ -2955,6 +2960,7 @@
 		netdev->mem_end = mmio_start + mmio_len - 1;
 		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
 		netdev->features |= NETIF_F_LLTX;
+		netdev->features |= NETIF_F_LRO;
 		if (pci_using_dac)
 			netdev->features |= NETIF_F_HIGHDMA;
 
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 6c641a8..14f9fb3 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1932,7 +1932,7 @@
 	skb_pull(skb, sizeof(*p) + pad);
 	skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
 	pi = netdev_priv(skb->dev);
-	if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
+	if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) &&
 	    !p->fragment) {
 		qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index 970f820..de63f1d 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -378,6 +378,16 @@
 	 */
 }
 
+static const struct net_device_ops de600_netdev_ops = {
+	.ndo_open		= de600_open,
+	.ndo_stop		= de600_close,
+	.ndo_start_xmit		= de600_start_xmit,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+
 static struct net_device * __init de600_probe(void)
 {
 	int	i;
@@ -439,9 +449,7 @@
 
 	printk(", Ethernet Address: %pM\n", dev->dev_addr);
 
-	dev->open = de600_open;
-	dev->stop = de600_close;
-	dev->hard_start_xmit = &de600_start_xmit;
+	dev->netdev_ops = &de600_netdev_ops;
 
 	dev->flags&=~IFF_MULTICAST;
 
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index bdfa894..d52f34c 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -784,6 +784,17 @@
 	return 0; /* all ok */
 }
 
+static const struct net_device_ops de620_netdev_ops = {
+	.ndo_open 		= de620_open,
+	.ndo_stop 		= de620_close,
+	.ndo_start_xmit 	= de620_start_xmit,
+	.ndo_tx_timeout 	= de620_timeout,
+	.ndo_set_multicast_list = de620_set_multicast_list,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 /******************************************************************************
  *
  * Only start-up code below
@@ -861,12 +872,8 @@
 	else
 		printk(" UTP)\n");
 
-	dev->open 		= de620_open;
-	dev->stop 		= de620_close;
-	dev->hard_start_xmit 	= de620_start_xmit;
-	dev->tx_timeout 	= de620_timeout;
+	dev->netdev_ops = &de620_netdev_ops;
 	dev->watchdog_timeo	= HZ*2;
-	dev->set_multicast_list = de620_set_multicast_list;
 
 	/* base_addr and irq are already set, see above! */
 
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 134b2d6..86bb876 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -161,6 +161,7 @@
 #include <linux/skbuff.h>
 #include <linux/ethtool.h>
 #include <linux/string.h>
+#include <linux/firmware.h>
 #include <asm/unaligned.h>
 
 
@@ -174,10 +175,17 @@
 #define E100_WATCHDOG_PERIOD	(2 * HZ)
 #define E100_NAPI_WEIGHT	16
 
+#define FIRMWARE_D101M		"e100/d101m_ucode.bin"
+#define FIRMWARE_D101S		"e100/d101s_ucode.bin"
+#define FIRMWARE_D102E		"e100/d102e_ucode.bin"
+
 MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_AUTHOR(DRV_COPYRIGHT);
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
+MODULE_FIRMWARE(FIRMWARE_D101M);
+MODULE_FIRMWARE(FIRMWARE_D101S);
+MODULE_FIRMWARE(FIRMWARE_D102E);
 
 static int debug = 3;
 static int eeprom_bad_csum_allow = 0;
@@ -1049,178 +1057,6 @@
 		c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
 }
 
-/********************************************************/
-/*  Micro code for 8086:1229 Rev 8                      */
-/********************************************************/
-
-/*  Parameter values for the D101M B-step  */
-#define D101M_CPUSAVER_TIMER_DWORD		78
-#define D101M_CPUSAVER_BUNDLE_DWORD		65
-#define D101M_CPUSAVER_MIN_SIZE_DWORD		126
-
-#define D101M_B_RCVBUNDLE_UCODE \
-{\
-0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
-0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
-0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
-0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
-0x00380438, 0x00000000, 0x00140000, 0x00380555, \
-0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
-0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
-0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
-0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
-0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
-0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
-0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
-0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
-0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
-0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
-0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
-0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
-0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
-0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
-0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
-0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
-0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
-0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
-0x00380559, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
-0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
-0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
-}
-
-/********************************************************/
-/*  Micro code for 8086:1229 Rev 9                      */
-/********************************************************/
-
-/*  Parameter values for the D101S  */
-#define D101S_CPUSAVER_TIMER_DWORD		78
-#define D101S_CPUSAVER_BUNDLE_DWORD		67
-#define D101S_CPUSAVER_MIN_SIZE_DWORD		128
-
-#define D101S_RCVBUNDLE_UCODE \
-{\
-0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
-0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
-0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
-0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
-0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
-0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
-0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
-0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
-0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
-0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
-0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
-0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
-0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
-0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
-0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
-0x00101313, 0x00380700, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
-0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
-0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
-0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
-0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
-0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
-0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
-0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
-0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
-0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00130831, \
-0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
-0x00041000, 0x00010004, 0x00380700  \
-}
-
-/********************************************************/
-/*  Micro code for the 8086:1229 Rev F/10               */
-/********************************************************/
-
-/*  Parameter values for the D102 E-step  */
-#define D102_E_CPUSAVER_TIMER_DWORD		42
-#define D102_E_CPUSAVER_BUNDLE_DWORD		54
-#define D102_E_CPUSAVER_MIN_SIZE_DWORD		46
-
-#define     D102_E_RCVBUNDLE_UCODE \
-{\
-0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
-0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
-0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
-0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
-0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
-0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
-0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
-0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
-0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-0x00000000, 0x00000000, 0x00000000, 0x00000000, \
-}
-
-static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
-{
-/* *INDENT-OFF* */
-	static struct {
-		u32 ucode[UCODE_SIZE + 1];
-		u8 mac;
-		u8 timer_dword;
-		u8 bundle_dword;
-		u8 min_size_dword;
-	} ucode_opts[] = {
-		{ D101M_B_RCVBUNDLE_UCODE,
-		  mac_82559_D101M,
-		  D101M_CPUSAVER_TIMER_DWORD,
-		  D101M_CPUSAVER_BUNDLE_DWORD,
-		  D101M_CPUSAVER_MIN_SIZE_DWORD },
-		{ D101S_RCVBUNDLE_UCODE,
-		  mac_82559_D101S,
-		  D101S_CPUSAVER_TIMER_DWORD,
-		  D101S_CPUSAVER_BUNDLE_DWORD,
-		  D101S_CPUSAVER_MIN_SIZE_DWORD },
-		{ D102_E_RCVBUNDLE_UCODE,
-		  mac_82551_F,
-		  D102_E_CPUSAVER_TIMER_DWORD,
-		  D102_E_CPUSAVER_BUNDLE_DWORD,
-		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
-		{ D102_E_RCVBUNDLE_UCODE,
-		  mac_82551_10,
-		  D102_E_CPUSAVER_TIMER_DWORD,
-		  D102_E_CPUSAVER_BUNDLE_DWORD,
-		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
-		{ {0}, 0, 0, 0, 0}
-	}, *opts;
-/* *INDENT-ON* */
-
 /*************************************************************************
 *  CPUSaver parameters
 *
@@ -1280,42 +1116,101 @@
 #define BUNDLEMAX (u16)6
 #define INTDELAY (u16)1536 /* 0x600 */
 
+/* Initialize firmware */
+static const struct firmware *e100_request_firmware(struct nic *nic)
+{
+	const char *fw_name;
+	const struct firmware *fw;
+	u8 timer, bundle, min_size;
+	int err;
+
 	/* do not load u-code for ICH devices */
 	if (nic->flags & ich)
-		goto noloaducode;
+		return NULL;
 
 	/* Search for ucode match against h/w revision */
-	for (opts = ucode_opts; opts->mac; opts++) {
-		int i;
-		u32 *ucode = opts->ucode;
-		if (nic->mac != opts->mac)
-			continue;
+	if (nic->mac == mac_82559_D101M)
+		fw_name = FIRMWARE_D101M;
+	else if (nic->mac == mac_82559_D101S)
+		fw_name = FIRMWARE_D101S;
+	else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
+		fw_name = FIRMWARE_D102E;
+	else /* No ucode on other devices */
+		return NULL;
 
-		/* Insert user-tunable settings */
-		ucode[opts->timer_dword] &= 0xFFFF0000;
-		ucode[opts->timer_dword] |= INTDELAY;
-		ucode[opts->bundle_dword] &= 0xFFFF0000;
-		ucode[opts->bundle_dword] |= BUNDLEMAX;
-		ucode[opts->min_size_dword] &= 0xFFFF0000;
-		ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
-
-		for (i = 0; i < UCODE_SIZE; i++)
-			cb->u.ucode[i] = cpu_to_le32(ucode[i]);
-		cb->command = cpu_to_le16(cb_ucode | cb_el);
-		return;
+	err = request_firmware(&fw, fw_name, &nic->pdev->dev);
+	if (err) {
+		DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
+			fw_name, err);
+		return ERR_PTR(err);
+	}
+	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
+	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
+	if (fw->size != UCODE_SIZE * 4 + 3) {
+		DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
+			fw_name, fw->size);
+		release_firmware(fw);
+		return ERR_PTR(-EINVAL);
 	}
 
-noloaducode:
-	cb->command = cpu_to_le16(cb_nop | cb_el);
+	/* Read timer, bundle and min_size from end of firmware blob */
+	timer = fw->data[UCODE_SIZE * 4];
+	bundle = fw->data[UCODE_SIZE * 4 + 1];
+	min_size = fw->data[UCODE_SIZE * 4 + 2];
+
+	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
+	    min_size >= UCODE_SIZE) {
+		DPRINTK(PROBE, ERR,
+			"\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
+			fw_name, timer, bundle, min_size);
+		release_firmware(fw);
+		return ERR_PTR(-EINVAL);
+	}
+	/* OK, firmware is validated and ready to use... */
+	return fw;
 }
 
-static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
-	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
+static void e100_setup_ucode(struct nic *nic, struct cb *cb,
+			     struct sk_buff *skb)
 {
+	const struct firmware *fw = (void *)skb;
+	u8 timer, bundle, min_size;
+
+	/* It's not a real skb; we just abused the fact that e100_exec_cb
+	   will pass it through to here... */
+	cb->skb = NULL;
+
+	/* firmware is stored as little endian already */
+	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
+
+	/* Read timer, bundle and min_size from end of firmware blob */
+	timer = fw->data[UCODE_SIZE * 4];
+	bundle = fw->data[UCODE_SIZE * 4 + 1];
+	min_size = fw->data[UCODE_SIZE * 4 + 2];
+
+	/* Insert user-tunable settings in cb->u.ucode */
+	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
+	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
+	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
+	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
+	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
+	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
+
+	cb->command = cpu_to_le16(cb_ucode | cb_el);
+}
+
+static inline int e100_load_ucode_wait(struct nic *nic)
+{
+	const struct firmware *fw;
 	int err = 0, counter = 50;
 	struct cb *cb = nic->cb_to_clean;
 
-	if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
+	fw = e100_request_firmware(nic);
+	/* If it's NULL, then no ucode is required */
+	if (!fw || IS_ERR(fw))
+		return PTR_ERR(fw);
+
+	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
 		DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
 
 	/* must restart cuc */
@@ -1435,7 +1330,7 @@
 		return err;
 	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
 		return err;
-	if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
+	if ((err = e100_load_ucode_wait(nic)))
 		return err;
 	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
 		return err;
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index d04eef53..e1a3fc1 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -6758,7 +6758,7 @@
  * returns: - E1000_ERR_XXX
  *            E1000_SUCCESS
  *
- * For phy's older then IGP, this function simply reads the polarity bit in the
+ * For phy's older than IGP, this function simply reads the polarity bit in the
  * Phy Status register.  For IGP phy's, this bit is valid only if link speed is
  * 10 Mbps.  If the link speed is 100 Mbps there is no polarity so this bit will
  * return 0.  If the link speed is 1000 Mbps the polarity status is in the
@@ -6834,7 +6834,7 @@
  * returns: - E1000_ERR_XXX
  *            E1000_SUCCESS
  *
- * For phy's older then IGP, this function reads the Downshift bit in the Phy
+ * For phy's older than IGP, this function reads the Downshift bit in the Phy
  * Specific Status register.  For IGP phy's, it reads the Downgrade bit in the
  * Link Health register.  In IGP this bit is latched high, so the driver must
  * read it immediately after link is established.
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index f2a5963..e415e81 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -390,7 +390,8 @@
 }
 
 static DEFINE_MUTEX(nvm_mutex);
-static pid_t nvm_owner = -1;
+static pid_t nvm_owner_pid = -1;
+static char nvm_owner_name[TASK_COMM_LEN] = "";
 
 /**
  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
@@ -408,11 +409,15 @@
 	might_sleep();
 
 	if (!mutex_trylock(&nvm_mutex)) {
-		WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n",
-		     nvm_owner);
+		WARN(1, KERN_ERR "e1000e mutex contention. Owned by process "
+		     "%s (pid %d), required by process %s (pid %d)\n",
+		     nvm_owner_name, nvm_owner_pid,
+		     current->comm, current->pid);
+
 		mutex_lock(&nvm_mutex);
 	}
-	nvm_owner = current->pid;
+	nvm_owner_pid = current->pid;
+	strncpy(nvm_owner_name, current->comm, TASK_COMM_LEN);
 
 	while (timeout) {
 		extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -430,7 +435,8 @@
 		hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
 		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
 		ew32(EXTCNF_CTRL, extcnf_ctrl);
-		nvm_owner = -1;
+		nvm_owner_pid = -1;
+		strcpy(nvm_owner_name, "");
 		mutex_unlock(&nvm_mutex);
 		return -E1000_ERR_CONFIG;
 	}
@@ -454,7 +460,8 @@
 	extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
 	ew32(EXTCNF_CTRL, extcnf_ctrl);
 
-	nvm_owner = -1;
+	nvm_owner_pid = -1;
+	strcpy(nvm_owner_name, "");
 	mutex_unlock(&nvm_mutex);
 }
 
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d4639fa..91817d0 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4807,7 +4807,7 @@
 		}
 	}
 
-	err = pci_request_selected_regions(pdev,
+	err = pci_request_selected_regions_exclusive(pdev,
 	                                  pci_select_bars(pdev, IORESOURCE_MEM),
 	                                  e1000e_driver_name);
 	if (err)
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index 20eb05c..b07ba19 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -169,6 +169,7 @@
 	.ndo_get_stats		= ei_get_stats,
 	.ndo_set_multicast_list = ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller 	= ei_poll,
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index e3131ea..dfe9226 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -132,7 +132,7 @@
 	int x;
 	unsigned char *deb = adr;
 	for (x = 0; x < len; x += 16) {
-		printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
+		printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
 			  deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
 		deb += 16;
 	}
@@ -883,7 +883,7 @@
 
 	while (eqe) {
 		qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
-		ehea_error("QP aff_err: entry=0x%lx, token=0x%x",
+		ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
 			   eqe->entry, qp_token);
 
 		qp = port->port_res[qp_token].qp;
@@ -1159,7 +1159,7 @@
 		netif_stop_queue(port->netdev);
 		break;
 	default:
-		ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
+		ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
 		break;
 	}
 }
@@ -1971,7 +1971,7 @@
 		}
 
 		if (dev->mc_count > port->adapter->max_mc_mac) {
-			ehea_info("Mcast registration limit reached (0x%lx). "
+			ehea_info("Mcast registration limit reached (0x%llx). "
 				  "Use ALLMULTI!",
 				  port->adapter->max_mc_mac);
 			goto out;
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 2a33a61..8fe9dca 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -214,7 +214,7 @@
 			     u64 *qp_handle, struct h_epas *h_epas)
 {
 	u64 hret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	u64 allocate_controls =
 	    EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
@@ -312,7 +312,7 @@
 			     u64 *cq_handle, struct h_epas *epas)
 {
 	u64 hret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
 				 outs,
@@ -374,7 +374,7 @@
 			     struct ehea_eq_attr *eq_attr, u64 *eq_handle)
 {
 	u64 hret, allocate_controls;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	/* resource type */
 	allocate_controls =
@@ -407,7 +407,7 @@
 			  u16 *out_swr, u16 *out_rwr)
 {
 	u64 hret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
 				 outs,
@@ -449,7 +449,7 @@
 			struct ehea_mr *mr)
 {
 	u64 hret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	hret = ehea_plpar_hcall9(H_REGISTER_SMR,
 				 outs,
@@ -468,7 +468,7 @@
 
 u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
 {
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
 				 outs,
@@ -493,7 +493,7 @@
 			     const u32 pd, u64 *mr_handle, u32 *lkey)
 {
 	u64 hret;
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 
 	hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
 				 outs,
@@ -564,7 +564,7 @@
 			    const u8 cb_cat, const u64 select_mask,
 			    void *cb_addr)
 {
-	u64 outs[PLPAR_HCALL9_BUFSIZE];
+	unsigned long outs[PLPAR_HCALL9_BUFSIZE];
 	u64 port_info;
 	u64 arr_index = 0;
 	u64 cb_logaddr = virt_to_abs(cb_addr);
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 225c692..49d766e 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -168,7 +168,7 @@
 					     cq->fw_handle, rpage, 1);
 		if (hret < H_SUCCESS) {
 			ehea_error("register_rpage_cq failed ehea_cq=%p "
-				   "hret=%lx counter=%i act_pages=%i",
+				   "hret=%llx counter=%i act_pages=%i",
 				   cq, hret, counter, cq->attr.nr_pages);
 			goto out_kill_hwq;
 		}
@@ -178,13 +178,13 @@
 
 			if ((hret != H_SUCCESS) || (vpage)) {
 				ehea_error("registration of pages not "
-					   "complete hret=%lx\n", hret);
+					   "complete hret=%llx\n", hret);
 				goto out_kill_hwq;
 			}
 		} else {
 			if (hret != H_PAGE_REGISTERED) {
 				ehea_error("CQ: registration of page failed "
-					   "hret=%lx\n", hret);
+					   "hret=%llx\n", hret);
 				goto out_kill_hwq;
 			}
 		}
@@ -986,15 +986,15 @@
 		length = EHEA_PAGESIZE;
 
 	if (type == 0x8) /* Queue Pair */
-		ehea_error("QP (resource=%lX) state: AER=0x%lX, AERR=0x%lX, "
-			   "port=%lX", resource, data[6], data[12], data[22]);
+		ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
+			   "port=%llX", resource, data[6], data[12], data[22]);
 
 	if (type == 0x4) /* Completion Queue */
-		ehea_error("CQ (resource=%lX) state: AER=0x%lX", resource,
+		ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
 			   data[6]);
 
 	if (type == 0x3) /* Event Queue */
-		ehea_error("EQ (resource=%lX) state: AER=0x%lX", resource,
+		ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
 			   data[6]);
 
 	ehea_dump(data, length, "error data");
@@ -1016,11 +1016,11 @@
 				rblock);
 
 	if (ret == H_R_STATE)
-		ehea_error("No error data is available: %lX.", res_handle);
+		ehea_error("No error data is available: %llX.", res_handle);
 	else if (ret == H_SUCCESS)
 		print_error_data(rblock);
 	else
-		ehea_error("Error data could not be fetched: %lX", res_handle);
+		ehea_error("Error data could not be fetched: %llX", res_handle);
 
 	kfree(rblock);
 }
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index cefe1d9..fc6cc03 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1531,6 +1531,17 @@
 	return enc28j60_hw_init(priv);
 }
 
+static const struct net_device_ops enc28j60_netdev_ops = {
+	.ndo_open		= enc28j60_net_open,
+	.ndo_stop		= enc28j60_net_close,
+	.ndo_start_xmit		= enc28j60_send_packet,
+	.ndo_set_multicast_list = enc28j60_set_multicast_list,
+	.ndo_set_mac_address	= enc28j60_set_mac_address,
+	.ndo_tx_timeout		= enc28j60_tx_timeout,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 static int __devinit enc28j60_probe(struct spi_device *spi)
 {
 	struct net_device *dev;
@@ -1585,12 +1596,7 @@
 
 	dev->if_port = IF_PORT_10BASET;
 	dev->irq = spi->irq;
-	dev->open = enc28j60_net_open;
-	dev->stop = enc28j60_net_close;
-	dev->hard_start_xmit = enc28j60_send_packet;
-	dev->set_multicast_list = &enc28j60_set_multicast_list;
-	dev->set_mac_address = enc28j60_set_mac_address;
-	dev->tx_timeout = &enc28j60_tx_timeout;
+	dev->netdev_ops = &enc28j60_netdev_ops;
 	dev->watchdog_timeo = TX_TIMEOUT;
 	SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops);
 
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index d039e16..7d60551 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1599,6 +1599,7 @@
 	.ndo_start_xmit		= enic_hard_start_xmit,
 	.ndo_get_stats		= enic_get_stats,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_set_multicast_list	= enic_set_multicast_list,
 	.ndo_change_mtu		= enic_change_mtu,
 	.ndo_vlan_rx_register	= enic_vlan_rx_register,
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index f9b37c8..a539bc3 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -308,7 +308,18 @@
 static struct net_device_stats *epic_get_stats(struct net_device *dev);
 static void set_rx_mode(struct net_device *dev);
 
-
+static const struct net_device_ops epic_netdev_ops = {
+	.ndo_open		= epic_open,
+	.ndo_stop		= epic_close,
+	.ndo_start_xmit		= epic_start_xmit,
+	.ndo_tx_timeout 	= epic_tx_timeout,
+	.ndo_get_stats		= epic_get_stats,
+	.ndo_set_multicast_list = set_rx_mode,
+	.ndo_do_ioctl 		= netdev_ioctl,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
 
 static int __devinit epic_init_one (struct pci_dev *pdev,
 				    const struct pci_device_id *ent)
@@ -483,15 +494,9 @@
 	dev->if_port = ep->default_port = option;
 
 	/* The Epic-specific entries in the device structure. */
-	dev->open = &epic_open;
-	dev->hard_start_xmit = &epic_start_xmit;
-	dev->stop = &epic_close;
-	dev->get_stats = &epic_get_stats;
-	dev->set_multicast_list = &set_rx_mode;
-	dev->do_ioctl = &netdev_ioctl;
+	dev->netdev_ops = &epic_netdev_ops;
 	dev->ethtool_ops = &netdev_ethtool_ops;
 	dev->watchdog_timeo = TX_TIMEOUT;
-	dev->tx_timeout = &epic_tx_timeout;
 	netif_napi_add(dev, &ep->napi, epic_poll, 64);
 
 	ret = register_netdev(dev);
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 31ab1ff..daf7272c 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -467,6 +467,18 @@
 	}
 }
 
+static const struct net_device_ops netdev_ops = {
+	.ndo_open		= netdev_open,
+	.ndo_stop		= netdev_close,
+	.ndo_start_xmit		= start_tx,
+	.ndo_get_stats 		= get_stats,
+	.ndo_set_multicast_list = set_rx_mode,
+	.ndo_do_ioctl		= mii_ioctl,
+	.ndo_tx_timeout		= fealnx_tx_timeout,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
 
 static int __devinit fealnx_init_one(struct pci_dev *pdev,
 				     const struct pci_device_id *ent)
@@ -649,15 +661,8 @@
 		np->mii.force_media = 1;
 	}
 
-	/* The chip-specific entries in the device structure. */
-	dev->open = &netdev_open;
-	dev->hard_start_xmit = &start_tx;
-	dev->stop = &netdev_close;
-	dev->get_stats = &get_stats;
-	dev->set_multicast_list = &set_rx_mode;
-	dev->do_ioctl = &mii_ioctl;
+	dev->netdev_ops = &netdev_ops;
 	dev->ethtool_ops = &netdev_ethtool_ops;
-	dev->tx_timeout = &fealnx_tx_timeout;
 	dev->watchdog_timeo = TX_TIMEOUT;
 
 	err = register_netdev(dev);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5b68dc2..5b910cf 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -13,7 +13,7 @@
  * Copyright (C) 2004 Andrew de Quincey (wol support)
  * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
  *		IRQ rate fixes, bigendian fixes, cleanups, verification)
- * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
+ * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -39,7 +39,7 @@
  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  * superfluous timer interrupts from the nic.
  */
-#define FORCEDETH_VERSION		"0.61"
+#define FORCEDETH_VERSION		"0.62"
 #define DRV_NAME			"forcedeth"
 
 #include <linux/module.h>
@@ -2096,14 +2096,15 @@
 			   ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
 	}
 
+	spin_lock_irqsave(&np->lock, flags);
 	empty_slots = nv_get_empty_tx_slots(np);
 	if (unlikely(empty_slots <= entries)) {
-		spin_lock_irqsave(&np->lock, flags);
 		netif_stop_queue(dev);
 		np->tx_stop = 1;
 		spin_unlock_irqrestore(&np->lock, flags);
 		return NETDEV_TX_BUSY;
 	}
+	spin_unlock_irqrestore(&np->lock, flags);
 
 	start_tx = put_tx = np->put_tx.orig;
 
@@ -2214,14 +2215,15 @@
 			   ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
 	}
 
+	spin_lock_irqsave(&np->lock, flags);
 	empty_slots = nv_get_empty_tx_slots(np);
 	if (unlikely(empty_slots <= entries)) {
-		spin_lock_irqsave(&np->lock, flags);
 		netif_stop_queue(dev);
 		np->tx_stop = 1;
 		spin_unlock_irqrestore(&np->lock, flags);
 		return NETDEV_TX_BUSY;
 	}
+	spin_unlock_irqrestore(&np->lock, flags);
 
 	start_tx = put_tx = np->put_tx.ex;
 	start_tx_ctx = np->put_tx_ctx;
@@ -3403,10 +3405,10 @@
 
 #ifdef CONFIG_FORCEDETH_NAPI
 		if (events & NVREG_IRQ_RX_ALL) {
+			spin_lock(&np->lock);
 			netif_rx_schedule(&np->napi);
 
 			/* Disable furthur receive irq's */
-			spin_lock(&np->lock);
 			np->irqmask &= ~NVREG_IRQ_RX_ALL;
 
 			if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -3520,10 +3522,10 @@
 
 #ifdef CONFIG_FORCEDETH_NAPI
 		if (events & NVREG_IRQ_RX_ALL) {
+			spin_lock(&np->lock);
 			netif_rx_schedule(&np->napi);
 
 			/* Disable furthur receive irq's */
-			spin_lock(&np->lock);
 			np->irqmask &= ~NVREG_IRQ_RX_ALL;
 
 			if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -6167,19 +6169,19 @@
 	},
 	{	/* MCP79 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
-		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
 	},
 	{	/* MCP79 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
 	},
 	{	/* MCP79 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
 	},
 	{	/* MCP79 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
 	},
 	{0,},
 };
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 4e6a919..ce900e5 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -795,6 +795,7 @@
 
 	err = fs_init_phy(dev);
 	if (err) {
+		free_irq(fep->interrupt, dev);
 		if (fep->fpi->use_napi)
 			napi_disable(&fep->napi);
 		return err;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c672ecf..ea53067 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -238,8 +238,8 @@
 			goto err_out;
 		}
 
-		snprintf(priv->phy_bus_id, BUS_ID_SIZE, PHY_ID_FMT, "0",
-				fixed_link[0]);
+		snprintf(priv->phy_bus_id, sizeof(priv->phy_bus_id),
+				PHY_ID_FMT, "0", fixed_link[0]);
 	} else {
 		phy = of_find_node_by_phandle(*ph);
 
@@ -256,7 +256,7 @@
 		of_node_put(mdio);
 
 		gfar_mdio_bus_name(bus_name, mdio);
-		snprintf(priv->phy_bus_id, BUS_ID_SIZE, "%s:%02x",
+		snprintf(priv->phy_bus_id, sizeof(priv->phy_bus_id), "%s:%02x",
 				bus_name, *id);
 	}
 
@@ -296,6 +296,20 @@
 	return err;
 }
 
+/* Ioctl MII Interface */
+static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	if (!priv->phydev)
+		return -ENODEV;
+
+	return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
+}
+
 /* Set up the ethernet device structure, private data,
  * and anything else we need before we start */
 static int gfar_probe(struct of_device *ofdev,
@@ -366,6 +380,7 @@
 	dev->set_multicast_list = gfar_set_multi;
 
 	dev->ethtool_ops = &gfar_ethtool_ops;
+	dev->do_ioctl = gfar_ioctl;
 
 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
 		priv->rx_csum_enable = 1;
@@ -1607,10 +1622,18 @@
 static void gfar_schedule_cleanup(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->txlock, flags);
+	spin_lock(&priv->rxlock);
+
 	if (netif_rx_schedule_prep(&priv->napi)) {
 		gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
 		__netif_rx_schedule(&priv->napi);
 	}
+
+	spin_unlock(&priv->rxlock);
+	spin_unlock_irqrestore(&priv->txlock, flags);
 }
 
 /* Interrupt Handler for Transmit complete */
@@ -1973,6 +1996,8 @@
 			case 1000:
 				tempval =
 				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+
+				ecntrl &= ~(ECNTRL_R100);
 				break;
 			case 100:
 			case 10:
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 3220022..7e8b3c5 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -576,6 +576,7 @@
 	.ndo_set_multicast_list	= set_rx_mode,
 	.ndo_change_mtu		= eth_change_mtu,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_tx_timeout		= hamachi_tx_timeout,
 	.ndo_do_ioctl		= netdev_ioctl,
 };
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 50f1e17..2d40898 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -717,11 +717,12 @@
 	unsigned int cmd, unsigned long arg)
 {
 	struct sixpack *sp = sp_get(tty);
-	struct net_device *dev = sp->dev;
+	struct net_device *dev;
 	unsigned int tmp, err;
 
 	if (!sp)
 		return -ENXIO;
+	dev = sp->dev;
 
 	switch(cmd) {
 	case SIOCGIFNAME:
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index b507dbc..5e070f4 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -166,6 +166,7 @@
 	.ndo_get_stats		= eip_get_stats,
 	.ndo_set_multicast_list = eip_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= eip_poll,
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index ebe7651..ad8be7e 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -425,6 +425,28 @@
 }
 #endif /* !MODULE && CONFIG_ISA */
 
+static const struct net_device_ops hp100_bm_netdev_ops = {
+	.ndo_open		= hp100_open,
+	.ndo_stop		= hp100_close,
+	.ndo_start_xmit		= hp100_start_xmit_bm,
+	.ndo_get_stats 		= hp100_get_stats,
+	.ndo_set_multicast_list = hp100_set_multicast_list,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static const struct net_device_ops hp100_netdev_ops = {
+	.ndo_open		= hp100_open,
+	.ndo_stop		= hp100_close,
+	.ndo_start_xmit		= hp100_start_xmit,
+	.ndo_get_stats 		= hp100_get_stats,
+	.ndo_set_multicast_list = hp100_set_multicast_list,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 static int __devinit hp100_probe1(struct net_device *dev, int ioaddr,
 				  u_char bus, struct pci_dev *pci_dev)
 {
@@ -657,16 +679,10 @@
 	lp->virt_memory_size = virt_memory_size;
 	lp->rx_ratio = hp100_rx_ratio;	/* can be conf'd with insmod */
 
-	dev->open = hp100_open;
-	dev->stop = hp100_close;
-
 	if (lp->mode == 1)	/* busmaster */
-		dev->hard_start_xmit = hp100_start_xmit_bm;
+		dev->netdev_ops = &hp100_bm_netdev_ops;
 	else
-		dev->hard_start_xmit = hp100_start_xmit;
-
-	dev->get_stats = hp100_get_stats;
-	dev->set_multicast_list = &hp100_set_multicast_list;
+		dev->netdev_ops = &hp100_netdev_ops;
 
 	/* Ask the card for which IRQ line it is configured */
 	if (bus == HP100_BUS_PCI) {
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 9cb38a8..8ac0930 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -103,6 +103,7 @@
 	.ndo_get_stats		= ei_get_stats,
 	.ndo_set_multicast_list = ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ei_poll,
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index ecf9798..2a2fc17 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -613,7 +613,9 @@
 	INIT_LIST_HEAD(&mal->list);
 	spin_lock_init(&mal->lock);
 
-	netif_napi_add(NULL, &mal->napi, mal_poll,
+	init_dummy_netdev(&mal->dummy_dev);
+
+	netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
 		       CONFIG_IBM_NEW_EMAC_POLL_WEIGHT);
 
 	/* Load power-on reset defaults */
diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
index 2f0a873..9ededfb 100644
--- a/drivers/net/ibm_newemac/mal.h
+++ b/drivers/net/ibm_newemac/mal.h
@@ -214,6 +214,8 @@
 	int			index;
 	spinlock_t		lock;
 
+	struct net_device	dummy_dev;
+
 	unsigned int features;
 };
 
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 9bc0f17..dfa6348 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -602,7 +602,7 @@
 
 	if(lpar_rc != H_SUCCESS) {
 		ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
-		ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
+		ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n",
 				     adapter->buffer_list_dma,
 				     adapter->filter_list_dma,
 				     rxq_desc.desc,
@@ -754,7 +754,7 @@
 				    void (*done) (struct net_device *, u32))
 {
 	struct ibmveth_adapter *adapter = netdev_priv(dev);
-	u64 set_attr, clr_attr, ret_attr;
+	unsigned long set_attr, clr_attr, ret_attr;
 	long ret;
 	int rc1 = 0, rc2 = 0;
 	int restart = 0;
@@ -1209,7 +1209,7 @@
 	long ret;
 	struct net_device *netdev;
 	struct ibmveth_adapter *adapter;
-	u64 set_attr, ret_attr;
+	unsigned long set_attr, ret_attr;
 
 	unsigned char *mac_addr_p;
 	unsigned int *mcastFilterSize_p;
@@ -1378,13 +1378,13 @@
 	seq_printf(seq, "Firmware MAC:    %pM\n", firmware_mac);
 
 	seq_printf(seq, "\nAdapter Statistics:\n");
-	seq_printf(seq, "  TX:  vio_map_single failres:      %ld\n", adapter->tx_map_failed);
-	seq_printf(seq, "       send failures:               %ld\n", adapter->tx_send_failed);
-	seq_printf(seq, "  RX:  replenish task cycles:       %ld\n", adapter->replenish_task_cycles);
-	seq_printf(seq, "       alloc_skb_failures:          %ld\n", adapter->replenish_no_mem);
-	seq_printf(seq, "       add buffer failures:         %ld\n", adapter->replenish_add_buff_failure);
-	seq_printf(seq, "       invalid buffers:             %ld\n", adapter->rx_invalid_buffer);
-	seq_printf(seq, "       no buffers:                  %ld\n", adapter->rx_no_buffer);
+	seq_printf(seq, "  TX:  vio_map_single failres:      %lld\n", adapter->tx_map_failed);
+	seq_printf(seq, "       send failures:               %lld\n", adapter->tx_send_failed);
+	seq_printf(seq, "  RX:  replenish task cycles:       %lld\n", adapter->replenish_task_cycles);
+	seq_printf(seq, "       alloc_skb_failures:          %lld\n", adapter->replenish_no_mem);
+	seq_printf(seq, "       add buffer failures:         %lld\n", adapter->replenish_add_buff_failure);
+	seq_printf(seq, "       invalid buffers:             %lld\n", adapter->rx_invalid_buffer);
+	seq_printf(seq, "       no buffers:                  %lld\n", adapter->rx_no_buffer);
 
 	return 0;
 }
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index d281869..ec76ace 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -39,11 +39,11 @@
 #define IbmVethMcastRemoveFilter     0x2UL
 #define IbmVethMcastClearFilterTable 0x3UL
 
-#define IBMVETH_ILLAN_PADDED_PKT_CSUM	0x0000000000002000ULL
-#define IBMVETH_ILLAN_TRUNK_PRI_MASK	0x0000000000000F00ULL
-#define IBMVETH_ILLAN_IPV6_TCP_CSUM		0x0000000000000004ULL
-#define IBMVETH_ILLAN_IPV4_TCP_CSUM		0x0000000000000002ULL
-#define IBMVETH_ILLAN_ACTIVE_TRUNK		0x0000000000000001ULL
+#define IBMVETH_ILLAN_PADDED_PKT_CSUM	0x0000000000002000UL
+#define IBMVETH_ILLAN_TRUNK_PRI_MASK	0x0000000000000F00UL
+#define IBMVETH_ILLAN_IPV6_TCP_CSUM		0x0000000000000004UL
+#define IBMVETH_ILLAN_IPV4_TCP_CSUM		0x0000000000000002UL
+#define IBMVETH_ILLAN_ACTIVE_TRUNK		0x0000000000000001UL
 
 /* hcall macros */
 #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 7b6d435..360aa5e 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -2210,6 +2210,19 @@
 	pci_set_drvdata(pdev, NULL);
 }
 
+static const struct net_device_ops ipg_netdev_ops = {
+	.ndo_open		= ipg_nic_open,
+	.ndo_stop		= ipg_nic_stop,
+	.ndo_start_xmit		= ipg_nic_hard_start_xmit,
+	.ndo_get_stats		= ipg_nic_get_stats,
+	.ndo_set_multicast_list = ipg_nic_set_multicast_list,
+	.ndo_do_ioctl		= ipg_ioctl,
+	.ndo_tx_timeout 	= ipg_tx_timeout,
+	.ndo_change_mtu 	= ipg_nic_change_mtu,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 static int __devinit ipg_probe(struct pci_dev *pdev,
 			       const struct pci_device_id *id)
 {
@@ -2258,15 +2271,7 @@
 
 	/* Declare IPG NIC functions for Ethernet device methods.
 	 */
-	dev->open = &ipg_nic_open;
-	dev->stop = &ipg_nic_stop;
-	dev->hard_start_xmit = &ipg_nic_hard_start_xmit;
-	dev->get_stats = &ipg_nic_get_stats;
-	dev->set_multicast_list = &ipg_nic_set_multicast_list;
-	dev->do_ioctl = ipg_ioctl;
-	dev->tx_timeout = ipg_tx_timeout;
-	dev->change_mtu = &ipg_nic_change_mtu;
-
+	dev->netdev_ops = &ipg_netdev_ops;
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
 
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 3c58e67..17779f9 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -109,7 +109,6 @@
 static int  ali_ircc_net_close(struct net_device *dev);
 static int  ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
-static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev);
 
 /* SIR function */
 static int  ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -366,7 +365,6 @@
 	dev->open            = ali_ircc_net_open;
 	dev->stop            = ali_ircc_net_close;
 	dev->do_ioctl        = ali_ircc_net_ioctl;
-	dev->get_stats	     = ali_ircc_net_get_stats;
 
 	err = register_netdev(dev);
 	if (err) {
@@ -876,7 +874,7 @@
          * async_unwrap_char will deliver all found frames  
 	 */
 	do {
-		async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 
+		async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
 				  inb(iobase+UART_RX));
 
 		/* Make sure we don't stay here too long */
@@ -943,7 +941,7 @@
 			netif_wake_queue(self->netdev);	
 		}
 			
-		self->stats.tx_packets++;
+		self->netdev->stats.tx_packets++;
 		
 		/* Turn on receive interrupts */
 		outb(UART_IER_RDI, iobase+UART_IER);
@@ -1467,7 +1465,7 @@
 	self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
 	self->tx_fifo.tail += skb->len;
 
-	self->stats.tx_bytes += skb->len;
+	dev->stats.tx_bytes += skb->len;
 
 	skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
 		      skb->len);
@@ -1661,12 +1659,12 @@
 	
 	{
 		IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__);
-		self->stats.tx_errors++;
-		self->stats.tx_fifo_errors++;		
+		self->netdev->stats.tx_errors++;
+		self->netdev->stats.tx_fifo_errors++;
 	}
 	else 
 	{
-		self->stats.tx_packets++;
+		self->netdev->stats.tx_packets++;
 	}
 
 	/* Check if we need to change the speed */
@@ -1831,35 +1829,35 @@
 			IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ );
 			
 			/* Skip frame */
-			self->stats.rx_errors++;
+			self->netdev->stats.rx_errors++;
 			
 			self->rx_buff.data += len;
 			
 			if (status & LSR_FIFO_UR) 
 			{
-				self->stats.rx_frame_errors++;
+				self->netdev->stats.rx_frame_errors++;
 				IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ );
 			}	
 			if (status & LSR_FRAME_ERROR)
 			{
-				self->stats.rx_frame_errors++;
+				self->netdev->stats.rx_frame_errors++;
 				IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ );
 			}
 							
 			if (status & LSR_CRC_ERROR) 
 			{
-				self->stats.rx_crc_errors++;
+				self->netdev->stats.rx_crc_errors++;
 				IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ );
 			}
 			
 			if(self->rcvFramesOverflow)
 			{
-				self->stats.rx_frame_errors++;
+				self->netdev->stats.rx_frame_errors++;
 				IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ );
 			}
 			if(len == 0)
 			{
-				self->stats.rx_frame_errors++;
+				self->netdev->stats.rx_frame_errors++;
 				IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ );
 			}
 		}	 
@@ -1910,7 +1908,7 @@
 				IRDA_WARNING("%s(), memory squeeze, "
 					     "dropping frame.\n",
 					     __func__);
-				self->stats.rx_dropped++;
+				self->netdev->stats.rx_dropped++;
 
 				return FALSE;
 			}
@@ -1924,8 +1922,8 @@
 
 			/* Move to next frame */
 			self->rx_buff.data += len;
-			self->stats.rx_bytes += len;
-			self->stats.rx_packets++;
+			self->netdev->stats.rx_bytes += len;
+			self->netdev->stats.rx_packets++;
 
 			skb->dev = self->netdev;
 			skb_reset_mac_header(skb);
@@ -1994,7 +1992,7 @@
 	self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 
 					   self->tx_buff.truesize);
 	
-	self->stats.tx_bytes += self->tx_buff.len;
+	self->netdev->stats.tx_bytes += self->tx_buff.len;
 
 	/* Turn on transmit finished interrupt. Will fire immediately!  */
 	outb(UART_IER_THRI, iobase+UART_IER); 
@@ -2111,17 +2109,6 @@
 	return status;
 }
 
-static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev)
-{
-	struct ali_ircc_cb *self = netdev_priv(dev);
-	
-	IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
-		
-	IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
-	
-	return &self->stats;
-}
-
 static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
 {
 	struct ali_ircc_cb *self = platform_get_drvdata(dev);
diff --git a/drivers/net/irda/ali-ircc.h b/drivers/net/irda/ali-ircc.h
index ed35d99..0c8edb4 100644
--- a/drivers/net/irda/ali-ircc.h
+++ b/drivers/net/irda/ali-ircc.h
@@ -191,7 +191,6 @@
 	struct tx_fifo tx_fifo;    /* Info about frames to be transmitted */
 
 	struct net_device *netdev;     /* Yes! we are some kind of netdevice */
-	struct net_device_stats stats;
 	
 	struct irlap_cb *irlap;    /* The link layer we are binded to */
 	struct qos_info qos;       /* QoS capabilities for this device */
diff --git a/drivers/net/irda/au1000_ircc.h b/drivers/net/irda/au1000_ircc.h
index b4763f2..c072c09 100644
--- a/drivers/net/irda/au1000_ircc.h
+++ b/drivers/net/irda/au1000_ircc.h
@@ -107,7 +107,6 @@
 	iobuff_t rx_buff;
 
 	struct net_device *netdev;
-	struct net_device_stats stats;
 	
 	struct timeval stamp;
 	struct timeval now;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 6c4b53f..9411640 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -53,7 +53,6 @@
 static int au1k_irda_rx(struct net_device *);
 static void au1k_irda_interrupt(int, void *);
 static void au1k_tx_timeout(struct net_device *);
-static struct net_device_stats *au1k_irda_stats(struct net_device *);
 static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int);
 static int au1k_irda_set_speed(struct net_device *dev, int speed);
 
@@ -213,7 +212,6 @@
 	dev->open = au1k_irda_start;
 	dev->hard_start_xmit = au1k_irda_hard_xmit;
 	dev->stop = au1k_irda_stop;
-	dev->get_stats = au1k_irda_stats;
 	dev->do_ioctl = au1k_irda_ioctl;
 	dev->tx_timeout = au1k_tx_timeout;
 
@@ -596,7 +594,7 @@
 			update_rx_stats(dev, flags, count);
 			skb=alloc_skb(count+1,GFP_ATOMIC);
 			if (skb == NULL) {
-				aup->stats.rx_dropped++;
+				aup->netdev->stats.rx_dropped++;
 				continue;
 			}
 			skb_reserve(skb, 1);
@@ -832,13 +830,6 @@
 	return ret;
 }
 
-
-static struct net_device_stats *au1k_irda_stats(struct net_device *dev)
-{
-	struct au1k_private *aup = netdev_priv(dev);
-	return &aup->stats;
-}
-
 MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
 MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
 
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 687c2d5..6f3e7f7 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1194,13 +1194,13 @@
               txp = txpc;
               txpc++;
               txpc %= TX_SLOTS;
-              self->stats.tx_packets++;
+              self->netdev->stats.tx_packets++;
               if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
                   self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX;
             }
-          self->stats.tx_packets--;
+          self->netdev->stats.tx_packets--;
 #else
-          self->stats.tx_packets++;
+          self->netdev->stats.tx_packets++;
 #endif
           toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
         }
@@ -1280,7 +1280,7 @@
                       skb_put (skb, len);
                       skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs],
 					      len);
-                      self->stats.rx_packets++;
+                      self->netdev->stats.rx_packets++;
                       skb->dev = self->netdev;
                       skb_reset_mac_header(skb);
                       skb->protocol = htons (ETH_P_IRDA);
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 1e67720f..0dbd193 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -308,7 +308,6 @@
 struct toshoboe_cb
 {
   struct net_device *netdev;    /* Yes! we are some kind of netdevice */
-  struct net_device_stats stats;
   struct tty_driver ttydev;
 
   struct irlap_cb *irlap;       /* The link layer we are binded to */
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 205e4e8..3a22dc4 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -122,7 +122,6 @@
 static int irda_usb_net_close(struct net_device *dev);
 static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static void irda_usb_net_timeout(struct net_device *dev);
-static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev);
 
 /************************ TRANSMIT ROUTINES ************************/
 /*
@@ -525,13 +524,13 @@
 	/* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
 	if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
 		IRDA_WARNING("%s(), failed Tx URB\n", __func__);
-		self->stats.tx_errors++;
+		netdev->stats.tx_errors++;
 		/* Let USB recover : We will catch that in the watchdog */
 		/*netif_start_queue(netdev);*/
 	} else {
 		/* Increment packet stats */
-		self->stats.tx_packets++;
-                self->stats.tx_bytes += skb->len;
+		netdev->stats.tx_packets++;
+                netdev->stats.tx_bytes += skb->len;
 		
 		netdev->trans_start = jiffies;
 	}
@@ -677,7 +676,7 @@
 		IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
 
 		/* Increase error count */
-		self->stats.tx_errors++;
+		netdev->stats.tx_errors++;
 
 #ifdef IU_BUG_KICK_TIMEOUT
 		/* Can't be a bad idea to reset the speed ;-) - Jean II */
@@ -826,7 +825,7 @@
 	if (urb->status != 0) {
 		switch (urb->status) {
 		case -EILSEQ:
-			self->stats.rx_crc_errors++;	
+			self->netdev->stats.rx_crc_errors++;
 			/* Also precursor to a hot-unplug on UHCI. */
 			/* Fallthrough... */
 		case -ECONNRESET:
@@ -839,7 +838,7 @@
 		case -ETIME:
 			/* Usually precursor to a hot-unplug on OHCI. */
 		default:
-			self->stats.rx_errors++;
+			self->netdev->stats.rx_errors++;
 			IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags);
 			break;
 		}
@@ -890,7 +889,7 @@
 				       IRDA_SKB_MAX_MTU);
 
 	if (!newskb)  {
-		self->stats.rx_dropped++;
+		self->netdev->stats.rx_dropped++;
 		/* We could deliver the current skb, but this would stall
 		 * the Rx path. Better drop the packet... Jean II */
 		goto done;  
@@ -927,8 +926,8 @@
 	netif_rx(dataskb);
 
 	/* Keep stats up to date */
-	self->stats.rx_bytes += len;
-	self->stats.rx_packets++;
+	self->netdev->stats.rx_bytes += len;
+	self->netdev->stats.rx_packets++;
 
 done:
 	/* Note : at this point, the URB we've just received (urb)
@@ -1074,7 +1073,7 @@
 {
 	unsigned int i;
 	int ret;
-	char stir421x_fw_name[11];
+	char stir421x_fw_name[12];
 	const struct firmware *fw;
 	const unsigned char *fw_version_ptr; /* pointer to version string */
 	unsigned long fw_version = 0;
@@ -1342,14 +1341,6 @@
 }
 
 /*------------------------------------------------------------------*/
-/*
- * Get device stats (for /proc/net/dev and ifconfig)
- */
-static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev)
-{
-	struct irda_usb_cb *self = netdev_priv(dev);
-	return &self->stats;
-}
 
 /********************* IRDA CONFIG SUBROUTINES *********************/
 /*
@@ -1428,7 +1419,6 @@
 	netdev->watchdog_timeo  = 250*HZ/1000;	/* 250 ms > USB timeout */
 	netdev->open            = irda_usb_net_open;
 	netdev->stop            = irda_usb_net_close;
-	netdev->get_stats	= irda_usb_net_get_stats;
 	netdev->do_ioctl        = irda_usb_net_ioctl;
 
 	return register_netdev(netdev);
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index a0ca9c1..ac0443d 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -152,7 +152,6 @@
 	struct urb *speed_urb;		/* URB used to send speed commands */
 	
 	struct net_device *netdev;	/* Yes! we are some kind of netdev. */
-	struct net_device_stats stats;
 	struct irlap_cb   *irlap;	/* The link layer we are binded to */
 	struct qos_info qos;
 	char *speed_buff;		/* Buffer for speed changes */
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index c747c87..b4a6171 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -105,7 +105,7 @@
 	struct usb_device *usbdev;      /* init: probe_irda */
 	struct net_device *netdev;      /* network layer */
 	struct irlap_cb   *irlap;       /* The link layer we are binded to */
-	struct net_device_stats stats;	/* network statistics */
+
 	struct qos_info   qos;
 
 	__u8		  *in_buf;	/* receive buffer */
@@ -186,12 +186,12 @@
 		case -EPIPE:
 			break;
 		default:
-			kingsun->stats.tx_errors++;
+			netdev->stats.tx_errors++;
 			netif_start_queue(netdev);
 		}
 	} else {
-		kingsun->stats.tx_packets++;
-		kingsun->stats.tx_bytes += skb->len;
+		netdev->stats.tx_packets++;
+		netdev->stats.tx_bytes += skb->len;
 	}
 
 	dev_kfree_skb(skb);
@@ -232,7 +232,7 @@
 		if (bytes[0] >= 1 && bytes[0] < kingsun->max_rx) {
 			for (i = 1; i <= bytes[0]; i++) {
 				async_unwrap_char(kingsun->netdev,
-						  &kingsun->stats,
+						  &kingsun->netdev->stats,
 						  &kingsun->rx_buff, bytes[i]);
 			}
 			do_gettimeofday(&kingsun->rx_time);
@@ -418,15 +418,6 @@
 	return ret;
 }
 
-/*
- * Get device stats (for /proc/net/dev and ifconfig)
- */
-static struct net_device_stats *
-kingsun_net_get_stats(struct net_device *netdev)
-{
-	struct kingsun_cb *kingsun = netdev_priv(netdev);
-	return &kingsun->stats;
-}
 
 /*
  * This routine is called by the USB subsystem for each new device
@@ -532,7 +523,6 @@
 	net->hard_start_xmit = kingsun_hard_xmit;
 	net->open            = kingsun_net_open;
 	net->stop            = kingsun_net_close;
-	net->get_stats	     = kingsun_net_get_stats;
 	net->do_ioctl        = kingsun_net_ioctl;
 
 	ret = register_netdev(net);
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 600d96f..55322fb 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -174,7 +174,7 @@
 	struct usb_device *usbdev;	/* init: probe_irda */
 	struct net_device *netdev;	/* network layer */
 	struct irlap_cb *irlap;	/* The link layer we are binded to */
-	struct net_device_stats stats;	/* network statistics */
+
 	struct qos_info qos;
 
 	struct usb_ctrlrequest *tx_setuprequest;
@@ -366,7 +366,7 @@
 				case -EPIPE:
 					break;
 				default:
-					kingsun->stats.tx_errors++;
+					netdev->stats.tx_errors++;
 					netif_start_queue(netdev);
 				}
 			}
@@ -416,12 +416,12 @@
 		case -EPIPE:
 			break;
 		default:
-			kingsun->stats.tx_errors++;
+			netdev->stats.tx_errors++;
 			netif_start_queue(netdev);
 		}
 	} else {
-		kingsun->stats.tx_packets++;
-		kingsun->stats.tx_bytes += skb->len;
+		netdev->stats.tx_packets++;
+		netdev->stats.tx_bytes += skb->len;
 
 	}
 
@@ -469,7 +469,7 @@
 			 */
 			if (kingsun->rx_variable_xormask != 0) {
 				async_unwrap_char(kingsun->netdev,
-						  &kingsun->stats,
+						  &kingsun->netdev->stats,
 						  &kingsun->rx_unwrap_buff,
 						  bytes[i]);
 			}
@@ -669,15 +669,6 @@
 }
 
 /*
- * Get device stats (for /proc/net/dev and ifconfig)
- */
-static struct net_device_stats *ks959_net_get_stats(struct net_device *netdev)
-{
-	struct ks959_cb *kingsun = netdev_priv(netdev);
-	return &kingsun->stats;
-}
-
-/*
  * This routine is called by the USB subsystem for each new device
  * in the system. We need to check if the device is ours, and in
  * this case start handling it.
@@ -792,7 +783,6 @@
 	net->hard_start_xmit = ks959_hard_xmit;
 	net->open = ks959_net_open;
 	net->stop = ks959_net_close;
-	net->get_stats = ks959_net_get_stats;
 	net->do_ioctl = ks959_net_ioctl;
 
 	ret = register_netdev(net);
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 0e7f893..5b327b0 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -140,7 +140,7 @@
 	struct usb_device *usbdev;	/* init: probe_irda */
 	struct net_device *netdev;	/* network layer */
 	struct irlap_cb *irlap;	/* The link layer we are binded to */
-	struct net_device_stats stats;	/* network statistics */
+
 	struct qos_info qos;
 
 	struct urb *tx_urb;
@@ -278,7 +278,7 @@
 				case -EPIPE:
 					break;
 				default:
-					kingsun->stats.tx_errors++;
+					netdev->stats.tx_errors++;
 					netif_start_queue(netdev);
 				}
 			}
@@ -329,12 +329,12 @@
 		case -EPIPE:
 			break;
 		default:
-			kingsun->stats.tx_errors++;
+			netdev->stats.tx_errors++;
 			netif_start_queue(netdev);
 		}
 	} else {
-		kingsun->stats.tx_packets++;
-		kingsun->stats.tx_bytes += skb->len;
+		netdev->stats.tx_packets++;
+		netdev->stats.tx_bytes += skb->len;
 
 	}
 
@@ -348,9 +348,10 @@
 static void ksdazzle_rcv_irq(struct urb *urb)
 {
 	struct ksdazzle_cb *kingsun = urb->context;
+	struct net_device *netdev = kingsun->netdev;
 
 	/* in process of stopping, just drop data */
-	if (!netif_running(kingsun->netdev)) {
+	if (!netif_running(netdev)) {
 		kingsun->receiving = 0;
 		return;
 	}
@@ -368,7 +369,7 @@
 		unsigned int i;
 
 		for (i = 0; i < urb->actual_length; i++) {
-			async_unwrap_char(kingsun->netdev, &kingsun->stats,
+			async_unwrap_char(netdev, &netdev->stats,
 					  &kingsun->rx_unwrap_buff, bytes[i]);
 		}
 		kingsun->receiving =
@@ -562,16 +563,6 @@
 }
 
 /*
- * Get device stats (for /proc/net/dev and ifconfig)
- */
-static struct net_device_stats *ksdazzle_net_get_stats(struct net_device
-						       *netdev)
-{
-	struct ksdazzle_cb *kingsun = netdev_priv(netdev);
-	return &kingsun->stats;
-}
-
-/*
  * This routine is called by the USB subsystem for each new device
  * in the system. We need to check if the device is ours, and in
  * this case start handling it.
@@ -696,7 +687,6 @@
 	net->hard_start_xmit = ksdazzle_hard_xmit;
 	net->open = ksdazzle_net_open;
 	net->stop = ksdazzle_net_close;
-	net->get_stats = ksdazzle_net_get_stats;
 	net->do_ioctl = ksdazzle_net_ioctl;
 
 	ret = register_netdev(net);
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 904c961..7eafdca 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -403,8 +403,8 @@
 	if(unlikely(new_len <= 0)) {
 		IRDA_ERROR("%s short frame length %d\n",
 			     mcs->netdev->name, new_len);
-		++mcs->stats.rx_errors;
-		++mcs->stats.rx_length_errors;
+		++mcs->netdev->stats.rx_errors;
+		++mcs->netdev->stats.rx_length_errors;
 		return;
 	}
 	fcs = 0;
@@ -413,14 +413,14 @@
 	if(fcs != GOOD_FCS) {
 		IRDA_ERROR("crc error calc 0x%x len %d\n",
 			   fcs, new_len);
-		mcs->stats.rx_errors++;
-		mcs->stats.rx_crc_errors++;
+		mcs->netdev->stats.rx_errors++;
+		mcs->netdev->stats.rx_crc_errors++;
 		return;
 	}
 
 	skb = dev_alloc_skb(new_len + 1);
 	if(unlikely(!skb)) {
-		++mcs->stats.rx_dropped;
+		++mcs->netdev->stats.rx_dropped;
 		return;
 	}
 
@@ -433,8 +433,8 @@
 
 	netif_rx(skb);
 
-	mcs->stats.rx_packets++;
-	mcs->stats.rx_bytes += new_len;
+	mcs->netdev->stats.rx_packets++;
+	mcs->netdev->stats.rx_bytes += new_len;
 
 	return;
 }
@@ -458,22 +458,22 @@
 	if(unlikely(new_len <= 0)) {
 		IRDA_ERROR("%s short frame length %d\n",
 			   mcs->netdev->name, new_len);
-		++mcs->stats.rx_errors;
-		++mcs->stats.rx_length_errors;
+		++mcs->netdev->stats.rx_errors;
+		++mcs->netdev->stats.rx_length_errors;
 		return;
 	}
 
 	fcs = ~(crc32_le(~0, buf, new_len));
 	if(fcs != get_unaligned_le32(buf + new_len)) {
 		IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len);
-		mcs->stats.rx_errors++;
-		mcs->stats.rx_crc_errors++;
+		mcs->netdev->stats.rx_errors++;
+		mcs->netdev->stats.rx_crc_errors++;
 		return;
 	}
 
 	skb = dev_alloc_skb(new_len + 1);
 	if(unlikely(!skb)) {
-		++mcs->stats.rx_dropped;
+		++mcs->netdev->stats.rx_dropped;
 		return;
 	}
 
@@ -486,8 +486,8 @@
 
 	netif_rx(skb);
 
-	mcs->stats.rx_packets++;
-	mcs->stats.rx_bytes += new_len;
+	mcs->netdev->stats.rx_packets++;
+	mcs->netdev->stats.rx_bytes += new_len;
 
 	return;
 }
@@ -756,14 +756,6 @@
 		return ret;
 }
 
-
-/* Get device stats for /proc/net/dev and ifconfig */
-static struct net_device_stats *mcs_net_get_stats(struct net_device *netdev)
-{
-	struct mcs_cb *mcs = netdev_priv(netdev);
-	return &mcs->stats;
-}
-
 /* Receive callback function.  */
 static void mcs_receive_irq(struct urb *urb)
 {
@@ -786,14 +778,14 @@
 		 */
 		/* SIR speed */
 		if(mcs->speed < 576000) {
-			async_unwrap_char(mcs->netdev, &mcs->stats,
+			async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
 				  &mcs->rx_buff, 0xc0);
 
 			for (i = 0; i < urb->actual_length; i++)
-				async_unwrap_char(mcs->netdev, &mcs->stats,
+				async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
 					  &mcs->rx_buff, bytes[i]);
 
-			async_unwrap_char(mcs->netdev, &mcs->stats,
+			async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
 				  &mcs->rx_buff, 0xc1);
 		}
 		/* MIR speed */
@@ -868,12 +860,12 @@
 		case -EPIPE:
 			break;
 		default:
-			mcs->stats.tx_errors++;
+			mcs->netdev->stats.tx_errors++;
 			netif_start_queue(ndev);
 		}
 	} else {
-		mcs->stats.tx_packets++;
-		mcs->stats.tx_bytes += skb->len;
+		mcs->netdev->stats.tx_packets++;
+		mcs->netdev->stats.tx_bytes += skb->len;
 	}
 
 	dev_kfree_skb(skb);
@@ -931,7 +923,6 @@
 	ndev->hard_start_xmit = mcs_hard_xmit;
 	ndev->open = mcs_net_open;
 	ndev->stop = mcs_net_close;
-	ndev->get_stats = mcs_net_get_stats;
 	ndev->do_ioctl = mcs_net_ioctl;
 
 	if (!intf->cur_altsetting)
diff --git a/drivers/net/irda/mcs7780.h b/drivers/net/irda/mcs7780.h
index b18148c..6bdc621 100644
--- a/drivers/net/irda/mcs7780.h
+++ b/drivers/net/irda/mcs7780.h
@@ -104,7 +104,6 @@
 	struct usb_device *usbdev;	/* init: probe_irda */
 	struct net_device *netdev;	/* network layer */
 	struct irlap_cb *irlap;	/* The link layer we are binded to */
-	struct net_device_stats stats;	/* network statistics */
 	struct qos_info qos;
 	unsigned int speed;	/* Current speed */
 	unsigned int new_speed;	/* new speed */
@@ -154,7 +153,6 @@
 static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd);
 static int mcs_net_close(struct net_device *netdev);
 static int mcs_net_open(struct net_device *netdev);
-static struct net_device_stats *mcs_net_get_stats(struct net_device *netdev);
 
 static void mcs_receive_irq(struct urb *urb);
 static void mcs_send_irq(struct urb *urb);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 2c6bf2d..61e509c 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -185,7 +185,6 @@
 static int  nsc_ircc_net_open(struct net_device *dev);
 static int  nsc_ircc_net_close(struct net_device *dev);
 static int  nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev);
 
 /* Globals */
 static int pnp_registered;
@@ -446,7 +445,6 @@
 	dev->open            = nsc_ircc_net_open;
 	dev->stop            = nsc_ircc_net_close;
 	dev->do_ioctl        = nsc_ircc_net_ioctl;
-	dev->get_stats	     = nsc_ircc_net_get_stats;
 
 	err = register_netdev(dev);
 	if (err) {
@@ -1401,7 +1399,7 @@
 	self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 
 					   self->tx_buff.truesize);
 
-	self->stats.tx_bytes += self->tx_buff.len;
+	dev->stats.tx_bytes += self->tx_buff.len;
 	
 	/* Add interrupt on tx low level (will fire immediately) */
 	switch_bank(iobase, BANK0);
@@ -1473,7 +1471,7 @@
 	self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
 	self->tx_fifo.tail += skb->len;
 
-	self->stats.tx_bytes += skb->len;
+	dev->stats.tx_bytes += skb->len;
 
 	skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
 		      skb->len);
@@ -1652,13 +1650,13 @@
 	
 	/* Check for underrrun! */
 	if (inb(iobase+ASCR) & ASCR_TXUR) {
-		self->stats.tx_errors++;
-		self->stats.tx_fifo_errors++;
+		self->netdev->stats.tx_errors++;
+		self->netdev->stats.tx_fifo_errors++;
 		
 		/* Clear bit, by writing 1 into it */
 		outb(ASCR_TXUR, iobase+ASCR);
 	} else {
-		self->stats.tx_packets++;
+		self->netdev->stats.tx_packets++;
 	}
 
 	/* Finished with this frame, so prepare for next */
@@ -1793,28 +1791,28 @@
 		if (status & FRM_ST_ERR_MSK) {
 			if (status & FRM_ST_LOST_FR) {
 				/* Add number of lost frames to stats */
-				self->stats.rx_errors += len;	
+				self->netdev->stats.rx_errors += len;
 			} else {
 				/* Skip frame */
-				self->stats.rx_errors++;
+				self->netdev->stats.rx_errors++;
 				
 				self->rx_buff.data += len;
 			
 				if (status & FRM_ST_MAX_LEN)
-					self->stats.rx_length_errors++;
+					self->netdev->stats.rx_length_errors++;
 				
 				if (status & FRM_ST_PHY_ERR) 
-					self->stats.rx_frame_errors++;
+					self->netdev->stats.rx_frame_errors++;
 				
 				if (status & FRM_ST_BAD_CRC) 
-					self->stats.rx_crc_errors++;
+					self->netdev->stats.rx_crc_errors++;
 			}
 			/* The errors below can be reported in both cases */
 			if (status & FRM_ST_OVR1)
-				self->stats.rx_fifo_errors++;		       
+				self->netdev->stats.rx_fifo_errors++;
 			
 			if (status & FRM_ST_OVR2)
-				self->stats.rx_fifo_errors++;
+				self->netdev->stats.rx_fifo_errors++;
 		} else {
 			/*  
 			 * First we must make sure that the frame we
@@ -1863,7 +1861,7 @@
 				IRDA_WARNING("%s(), memory squeeze, "
 					     "dropping frame.\n",
 					     __func__);
-				self->stats.rx_dropped++;
+				self->netdev->stats.rx_dropped++;
 
 				/* Restore bank register */
 				outb(bank, iobase+BSR);
@@ -1889,8 +1887,8 @@
 
 			/* Move to next frame */
 			self->rx_buff.data += len;
-			self->stats.rx_bytes += len;
-			self->stats.rx_packets++;
+			self->netdev->stats.rx_bytes += len;
+			self->netdev->stats.rx_packets++;
 
 			skb->dev = self->netdev;
 			skb_reset_mac_header(skb);
@@ -1920,8 +1918,8 @@
 	/*  Receive all characters in Rx FIFO */
 	do {
 		byte = inb(iobase+RXD);
-		async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 
-				  byte);
+		async_unwrap_char(self->netdev, &self->netdev->stats,
+				  &self->rx_buff, byte);
 	} while (inb(iobase+LSR) & LSR_RXDA); /* Data available */	
 }
 
@@ -1952,7 +1950,7 @@
 			self->ier = IER_TXLDL_IE;
 		else { 
 
-			self->stats.tx_packets++;
+			self->netdev->stats.tx_packets++;
 			netif_wake_queue(self->netdev);
 			self->ier = IER_TXEMP_IE;
 		}
@@ -2307,13 +2305,6 @@
 	return ret;
 }
 
-static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev)
-{
-	struct nsc_ircc_cb *self = netdev_priv(dev);
-	
-	return &self->stats;
-}
-
 static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
 {
      	struct nsc_ircc_cb *self = platform_get_drvdata(dev);
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index 71cd3c5..7ba7738 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -251,7 +251,6 @@
 	struct tx_fifo tx_fifo;    /* Info about frames to be transmitted */
 
 	struct net_device *netdev;     /* Yes! we are some kind of netdevice */
-	struct net_device_stats stats;
 	
 	struct irlap_cb *irlap;    /* The link layer we are binded to */
 	struct qos_info qos;       /* QoS capabilities for this device */
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 004a9aa..31794c2 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -108,7 +108,6 @@
 	int			txdma;
 	int			rxdma;
 
-	struct net_device_stats	stats;
 	struct irlap_cb		*irlap;
 	struct qos_info		qos;
 
@@ -258,14 +257,15 @@
 			data = STRBR;
 			if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
 				printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
-				si->stats.rx_errors++;
+				dev->stats.rx_errors++;
 				if (lsr & LSR_FE)
-					si->stats.rx_frame_errors++;
+					dev->stats.rx_frame_errors++;
 				if (lsr & LSR_OE)
-					si->stats.rx_fifo_errors++;
+					dev->stats.rx_fifo_errors++;
 			} else {
-				si->stats.rx_bytes++;
-				async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
+				dev->stats.rx_bytes++;
+				async_unwrap_char(dev, &dev->stats,
+						  &si->rx_buff, data);
 			}
 			lsr = STLSR;
 		}
@@ -277,8 +277,8 @@
 
 	case 0x0C: /* Character Timeout Indication */
 	  	do  {
-		    si->stats.rx_bytes++;
-	            async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR);
+		    dev->stats.rx_bytes++;
+	            async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR);
 	  	} while (STLSR & LSR_DR);
 		si->last_oscr = OSCR;
 	  	break;
@@ -290,9 +290,8 @@
 	    	}
 
 		if (si->tx_buff.len == 0) {
-			si->stats.tx_packets++;
-			si->stats.tx_bytes += si->tx_buff.data -
-					      si->tx_buff.head;
+			dev->stats.tx_packets++;
+			dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
 
                         /* We need to ensure that the transmitter has finished. */
 			while ((STLSR & LSR_TEMT) == 0)
@@ -343,10 +342,10 @@
 	DCSR(channel) = dcsr & ~DCSR_RUN;
 
 	if (dcsr & DCSR_ENDINTR)  {
-		si->stats.tx_packets++;
-		si->stats.tx_bytes += si->dma_tx_buff_len;
+		dev->stats.tx_packets++;
+		dev->stats.tx_bytes += si->dma_tx_buff_len;
 	} else {
-		si->stats.tx_errors++;
+		dev->stats.tx_errors++;
 	}
 
 	while (ICSR1 & ICSR1_TBY)
@@ -392,14 +391,14 @@
 		data = ICDR;
 
 		if (stat & (ICSR1_CRE | ICSR1_ROR)) {
-			si->stats.rx_errors++;
+			dev->stats.rx_errors++;
 			if (stat & ICSR1_CRE) {
 				printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
-				si->stats.rx_crc_errors++;
+				dev->stats.rx_crc_errors++;
 			}
 			if (stat & ICSR1_ROR) {
 				printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
-				si->stats.rx_over_errors++;
+				dev->stats.rx_over_errors++;
 			}
 		} else	{
 			si->dma_rx_buff[len++] = data;
@@ -415,14 +414,14 @@
 
 		if (icsr0 & ICSR0_FRE) {
 			printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
-			si->stats.rx_dropped++;
+			dev->stats.rx_dropped++;
 			return;
 		}
 
 		skb = alloc_skb(len+1,GFP_ATOMIC);
 		if (!skb)  {
 			printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
-			si->stats.rx_dropped++;
+			dev->stats.rx_dropped++;
 			return;
 		}
 
@@ -437,8 +436,8 @@
 		skb->protocol = htons(ETH_P_IRDA);
 		netif_rx(skb);
 
-		si->stats.rx_packets++;
-		si->stats.rx_bytes += len;
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += len;
 	}
 }
 
@@ -457,10 +456,10 @@
 	if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
 		if (icsr0 & ICSR0_FRE) {
 		        printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
-			si->stats.rx_frame_errors++;
+			dev->stats.rx_frame_errors++;
 		} else {
 			printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
-			si->stats.rx_errors++;
+			dev->stats.rx_errors++;
 		}
 		ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
 	}
@@ -589,12 +588,6 @@
 	return ret;
 }
 
-static struct net_device_stats *pxa_irda_stats(struct net_device *dev)
-{
-	struct pxa_irda *si = netdev_priv(dev);
-	return &si->stats;
-}
-
 static void pxa_irda_startup(struct pxa_irda *si)
 {
 	/* Disable STUART interrupts */
@@ -857,7 +850,6 @@
 	dev->open		= pxa_irda_start;
 	dev->stop		= pxa_irda_stop;
 	dev->do_ioctl		= pxa_irda_ioctl;
-	dev->get_stats		= pxa_irda_stats;
 
 	irda_init_max_qos_capabilies(&si->qos);
 
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index d302bcf..7a2b003 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -60,7 +60,6 @@
 	dma_regs_t		*txdma;
 	dma_regs_t		*rxdma;
 
-	struct net_device_stats	stats;
 	struct device		*dev;
 	struct irda_platform_data *pdata;
 	struct irlap_cb		*irlap;
@@ -375,13 +374,13 @@
 		data = Ser2UTDR;
 
 		if (stat & (UTSR1_FRE | UTSR1_ROR)) {
-			si->stats.rx_errors++;
+			dev->stats.rx_errors++;
 			if (stat & UTSR1_FRE)
-				si->stats.rx_frame_errors++;
+				dev->stats.rx_frame_errors++;
 			if (stat & UTSR1_ROR)
-				si->stats.rx_fifo_errors++;
+				dev->stats.rx_fifo_errors++;
 		} else
-			async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
+			async_unwrap_char(dev, &dev->stats, &si->rx_buff, data);
 
 		status = Ser2UTSR0;
 	}
@@ -396,9 +395,9 @@
 		 * There are at least 4 bytes in the FIFO.  Read 3 bytes
 		 * and leave the rest to the block below.
 		 */
-		async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
-		async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
-		async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
+		async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
+		async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
+		async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
 	}
 
 	if (status & (UTSR0_RFS | UTSR0_RID)) {
@@ -406,7 +405,7 @@
 		 * Fifo contains more than 1 character.
 		 */
 		do {
-			async_unwrap_char(dev, &si->stats, &si->rx_buff,
+			async_unwrap_char(dev, &dev->stats, &si->rx_buff,
 					  Ser2UTDR);
 		} while (Ser2UTSR1 & UTSR1_RNE);
 
@@ -422,8 +421,8 @@
 		} while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len);
 
 		if (si->tx_buff.len == 0) {
-			si->stats.tx_packets++;
-			si->stats.tx_bytes += si->tx_buff.data -
+			dev->stats.tx_packets++;
+			dev->stats.tx_bytes += si->tx_buff.data -
 					      si->tx_buff.head;
 
 			/*
@@ -482,11 +481,11 @@
 		data = Ser2HSDR;
 
 		if (stat & (HSSR1_CRE | HSSR1_ROR)) {
-			si->stats.rx_errors++;
+			dev->stats.rx_errors++;
 			if (stat & HSSR1_CRE)
-				si->stats.rx_crc_errors++;
+				dev->stats.rx_crc_errors++;
 			if (stat & HSSR1_ROR)
-				si->stats.rx_frame_errors++;
+				dev->stats.rx_frame_errors++;
 		} else
 			skb->data[len++] = data;
 
@@ -505,8 +504,8 @@
 		skb->dev = dev;
 		skb_reset_mac_header(skb);
 		skb->protocol = htons(ETH_P_IRDA);
-		si->stats.rx_packets++;
-		si->stats.rx_bytes += len;
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += len;
 
 		/*
 		 * Before we pass the buffer up, allocate a new one.
@@ -545,10 +544,10 @@
 	 * from the fifo.
 	 */
 	if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) {
-		si->stats.rx_errors++;
+		dev->stats.rx_errors++;
 
 		if (Ser2HSSR0 & HSSR0_FRE)
-			si->stats.rx_frame_errors++;
+			dev->stats.rx_frame_errors++;
 
 		/*
 		 * Clear out the DMA...
@@ -633,8 +632,8 @@
 	 */
 	if (skb) {
 		dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE);
-		si->stats.tx_packets ++;
-		si->stats.tx_bytes += skb->len;
+		dev->stats.tx_packets ++;
+		dev->stats.tx_bytes += skb->len;
 		dev_kfree_skb_irq(skb);
 	}
 
@@ -762,12 +761,6 @@
 	return ret;
 }
 
-static struct net_device_stats *sa1100_irda_stats(struct net_device *dev)
-{
-	struct sa1100_irda *si = netdev_priv(dev);
-	return &si->stats;
-}
-
 static int sa1100_irda_start(struct net_device *dev)
 {
 	struct sa1100_irda *si = netdev_priv(dev);
@@ -924,7 +917,6 @@
 	dev->open		= sa1100_irda_start;
 	dev->stop		= sa1100_irda_stop;
 	dev->do_ioctl		= sa1100_irda_ioctl;
-	dev->get_stats		= sa1100_irda_stats;
 	dev->irq		= IRQ_Ser2ICP;
 
 	irda_init_max_qos_capabilies(&si->qos);
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index 2a57bc6..6d5b1e2 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -160,7 +160,6 @@
 
 struct sir_dev {
 	struct net_device *netdev;
-	struct net_device_stats stats;
 
 	struct irlap_cb    *irlap;
 
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index ceef040..5b58624 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -455,8 +455,8 @@
 			if ((skb=dev->tx_skb) != NULL) {
 				dev->tx_skb = NULL;
 				dev_kfree_skb_any(skb);
-				dev->stats.tx_errors++;		      
-				dev->stats.tx_dropped++;		      
+				dev->netdev->stats.tx_errors++;
+				dev->netdev->stats.tx_dropped++;
 			}
 			dev->tx_buff.len = 0;
 		}
@@ -493,8 +493,8 @@
 		
 	if ((skb=dev->tx_skb) != NULL) {
 		dev->tx_skb = NULL;
-		dev->stats.tx_packets++;		      
-		dev->stats.tx_bytes += skb->len;
+		dev->netdev->stats.tx_packets++;
+		dev->netdev->stats.tx_bytes += skb->len;
 		dev_kfree_skb_any(skb);
 	}
 
@@ -548,7 +548,7 @@
 		 * just update stats and set media busy
 		 */
 		irda_device_set_media_busy(dev->netdev, TRUE);
-		dev->stats.rx_dropped++;
+		dev->netdev->stats.rx_dropped++;
 		IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
 		return 0;
 	}
@@ -557,7 +557,7 @@
 	if (likely(atomic_read(&dev->enable_rx))) {
 		while (count--)
 			/* Unwrap and destuff one byte */
-			async_unwrap_char(dev->netdev, &dev->stats, 
+			async_unwrap_char(dev->netdev, &dev->netdev->stats,
 					  &dev->rx_buff, *cp++);
 	} else {
 		while (count--) {
@@ -582,13 +582,6 @@
 
 /* callbacks from network layer */
 
-static struct net_device_stats *sirdev_get_stats(struct net_device *ndev)
-{
-	struct sir_dev *dev = netdev_priv(ndev);
-
-	return (dev) ? &dev->stats : NULL;
-}
-
 static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
 	struct sir_dev *dev = netdev_priv(ndev);
@@ -654,7 +647,7 @@
 	 */
 	atomic_set(&dev->enable_rx, 0);
 	if (unlikely(sirdev_is_receiving(dev)))
-		dev->stats.collisions++;
+		dev->netdev->stats.collisions++;
 
 	actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
 
@@ -669,8 +662,8 @@
 		IRDA_ERROR("%s: drv->do_write failed (%d)\n",
 			   __func__, actual);
 		dev_kfree_skb_any(skb);
-		dev->stats.tx_errors++;		      
-		dev->stats.tx_dropped++;		      
+		dev->netdev->stats.tx_errors++;
+		dev->netdev->stats.tx_dropped++;
 		netif_wake_queue(ndev);
 	}
 	spin_unlock_irqrestore(&dev->tx_lock, flags);
@@ -918,7 +911,6 @@
 	ndev->hard_start_xmit = sirdev_hard_xmit;
 	ndev->open = sirdev_open;
 	ndev->stop = sirdev_close;
-	ndev->get_stats = sirdev_get_stats;
 	ndev->do_ioctl = sirdev_ioctl;
 
 	if (register_netdev(ndev)) {
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 5d09e15..dd73cce 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -150,7 +150,6 @@
 /* Private data for each instance */
 struct smsc_ircc_cb {
 	struct net_device *netdev;     /* Yes! we are some kind of netdevice */
-	struct net_device_stats stats;
 	struct irlap_cb    *irlap; /* The link layer we are binded to */
 
 	chipio_t io;               /* IrDA controller information */
@@ -215,7 +214,6 @@
 #if SMSC_IRCC2_C_NET_TIMEOUT
 static void smsc_ircc_timeout(struct net_device *dev);
 #endif
-static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev);
 static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self);
 static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self);
 static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed);
@@ -529,7 +527,6 @@
 	dev->open            = smsc_ircc_net_open;
 	dev->stop            = smsc_ircc_net_close;
 	dev->do_ioctl        = smsc_ircc_net_ioctl;
-	dev->get_stats	     = smsc_ircc_net_get_stats;
 
 	self = netdev_priv(dev);
 	self->netdev = dev;
@@ -834,13 +831,6 @@
 	return ret;
 }
 
-static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev)
-{
-	struct smsc_ircc_cb *self = netdev_priv(dev);
-
-	return &self->stats;
-}
-
 #if SMSC_IRCC2_C_NET_TIMEOUT
 /*
  * Function smsc_ircc_timeout (struct net_device *dev)
@@ -920,7 +910,7 @@
 	self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
 					   self->tx_buff.truesize);
 
-	self->stats.tx_bytes += self->tx_buff.len;
+	dev->stats.tx_bytes += self->tx_buff.len;
 
 	/* Turn on transmit finished interrupt. Will fire immediately!  */
 	outb(UART_IER_THRI, self->io.sir_base + UART_IER);
@@ -1320,16 +1310,16 @@
 	/* Check for underrun! */
 	register_bank(iobase, 0);
 	if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) {
-		self->stats.tx_errors++;
-		self->stats.tx_fifo_errors++;
+		self->netdev->stats.tx_errors++;
+		self->netdev->stats.tx_fifo_errors++;
 
 		/* Reset error condition */
 		register_bank(iobase, 0);
 		outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER);
 		outb(0x00, iobase + IRCC_MASTER);
 	} else {
-		self->stats.tx_packets++;
-		self->stats.tx_bytes += self->tx_buff.len;
+		self->netdev->stats.tx_packets++;
+		self->netdev->stats.tx_bytes += self->tx_buff.len;
 	}
 
 	/* Check if it's time to change the speed */
@@ -1429,15 +1419,15 @@
 
 	/* Look for errors */
 	if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
-		self->stats.rx_errors++;
+		self->netdev->stats.rx_errors++;
 		if (lsr & IRCC_LSR_FRAME_ERROR)
-			self->stats.rx_frame_errors++;
+			self->netdev->stats.rx_frame_errors++;
 		if (lsr & IRCC_LSR_CRC_ERROR)
-			self->stats.rx_crc_errors++;
+			self->netdev->stats.rx_crc_errors++;
 		if (lsr & IRCC_LSR_SIZE_ERROR)
-			self->stats.rx_length_errors++;
+			self->netdev->stats.rx_length_errors++;
 		if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN))
-			self->stats.rx_length_errors++;
+			self->netdev->stats.rx_length_errors++;
 		return;
 	}
 
@@ -1460,8 +1450,8 @@
 	skb_reserve(skb, 1);
 
 	memcpy(skb_put(skb, len), self->rx_buff.data, len);
-	self->stats.rx_packets++;
-	self->stats.rx_bytes += len;
+	self->netdev->stats.rx_packets++;
+	self->netdev->stats.rx_bytes += len;
 
 	skb->dev = self->netdev;
 	skb_reset_mac_header(skb);
@@ -1489,7 +1479,7 @@
          * async_unwrap_char will deliver all found frames
 	 */
 	do {
-		async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
+		async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
 				  inb(iobase + UART_RX));
 
 		/* Make sure we don't stay here to long */
@@ -1992,7 +1982,7 @@
 			/* Tell network layer that we want more frames */
 			netif_wake_queue(self->netdev);
 		}
-		self->stats.tx_packets++;
+		self->netdev->stats.tx_packets++;
 
 		if (self->io.speed <= 115200) {
 			/*
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index ca4cd92..8b1658c 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -164,7 +164,7 @@
         struct usb_device *usbdev;      /* init: probe_irda */
         struct net_device *netdev;      /* network layer */
         struct irlap_cb   *irlap;       /* The link layer we are binded to */
-        struct net_device_stats stats;	/* network statistics */
+
         struct qos_info   qos;
 	unsigned 	  speed;	/* Current speed */
 
@@ -323,16 +323,16 @@
 		pr_debug("%s: short frame len %d\n",
 			 stir->netdev->name, len);
 
-		++stir->stats.rx_errors;
-		++stir->stats.rx_length_errors;
+		++stir->netdev->stats.rx_errors;
+		++stir->netdev->stats.rx_length_errors;
 		return;
 	}
 
 	fcs = ~(crc32_le(~0, rx_buff->data, len));
 	if (fcs != get_unaligned_le32(rx_buff->data + len)) {
 		pr_debug("crc error calc 0x%x len %d\n", fcs, len);
-		stir->stats.rx_errors++;
-		stir->stats.rx_crc_errors++;
+		stir->netdev->stats.rx_errors++;
+		stir->netdev->stats.rx_crc_errors++;
 		return;
 	}
 
@@ -340,7 +340,7 @@
 	if (len < IRDA_RX_COPY_THRESHOLD) {
 		nskb = dev_alloc_skb(len + 1);
 		if (unlikely(!nskb)) {
-			++stir->stats.rx_dropped;
+			++stir->netdev->stats.rx_dropped;
 			return;
 		}
 		skb_reserve(nskb, 1);
@@ -349,7 +349,7 @@
 	} else {
 		nskb = dev_alloc_skb(rx_buff->truesize);
 		if (unlikely(!nskb)) {
-			++stir->stats.rx_dropped;
+			++stir->netdev->stats.rx_dropped;
 			return;
 		}
 		skb_reserve(nskb, 1);
@@ -366,8 +366,8 @@
 
 	netif_rx(skb);
 
-	stir->stats.rx_packets++;
-	stir->stats.rx_bytes += len;
+	stir->netdev->stats.rx_packets++;
+	stir->netdev->stats.rx_bytes += len;
 
 	rx_buff->data = rx_buff->head;
 	rx_buff->len = 0;
@@ -437,7 +437,7 @@
 		if (unlikely(rx_buff->len >= rx_buff->truesize)) {
 			pr_debug("%s: fir frame exceeds %d\n",
 				 stir->netdev->name, rx_buff->truesize);
-			++stir->stats.rx_over_errors;
+			++stir->netdev->stats.rx_over_errors;
 			goto error_recovery;
 		}
 
@@ -445,10 +445,10 @@
 		continue;
 
 	frame_error:
-		++stir->stats.rx_frame_errors;
+		++stir->netdev->stats.rx_frame_errors;
 
 	error_recovery:
-		++stir->stats.rx_errors;
+		++stir->netdev->stats.rx_errors;
 		rx_buff->state = OUTSIDE_FRAME;
 		rx_buff->in_frame = FALSE;
 	}
@@ -461,7 +461,7 @@
 	int i;
 
 	for (i = 0; i < len; i++)
-		async_unwrap_char(stir->netdev, &stir->stats,
+		async_unwrap_char(stir->netdev, &stir->netdev->stats,
 				  &stir->rx_buff, bytes[i]);
 }
 
@@ -692,7 +692,7 @@
 	usb_kill_urb(stir->rx_urb);
 
 	if (stir->rx_buff.in_frame) 
-		stir->stats.collisions++;
+		stir->netdev->stats.collisions++;
 }
 /*
  * Wrap data in socket buffer and send it.
@@ -718,15 +718,15 @@
 	if (!first_frame)
 		fifo_txwait(stir, wraplen);
 
-	stir->stats.tx_packets++;
-	stir->stats.tx_bytes += skb->len;
+	stir->netdev->stats.tx_packets++;
+	stir->netdev->stats.tx_bytes += skb->len;
 	stir->netdev->trans_start = jiffies;
 	pr_debug("send %d (%d)\n", skb->len, wraplen);
 
 	if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
 			 stir->io_buf, wraplen,
 			 NULL, TRANSMIT_TIMEOUT))
-		stir->stats.tx_errors++;
+		stir->netdev->stats.tx_errors++;
 }
 
 /*
@@ -1008,15 +1008,6 @@
 }
 
 /*
- * Get device stats (for /proc/net/dev and ifconfig)
- */
-static struct net_device_stats *stir_net_get_stats(struct net_device *netdev)
-{
-	struct stir_cb *stir = netdev_priv(netdev);
-	return &stir->stats;
-}
-
-/*
  * This routine is called by the USB subsystem for each new device
  * in the system. We need to check if the device is ours, and in
  * this case start handling it.
@@ -1066,7 +1057,6 @@
 	net->hard_start_xmit = stir_hard_xmit;
 	net->open            = stir_net_open;
 	net->stop            = stir_net_close;
-	net->get_stats	     = stir_net_get_stats;
 	net->do_ioctl        = stir_net_ioctl;
 
 	ret = register_netdev(net);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 74c78cf..8b3e545 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -101,8 +101,6 @@
 static int via_ircc_net_close(struct net_device *dev);
 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
 			      int cmd);
-static struct net_device_stats *via_ircc_net_get_stats(struct net_device
-						       *dev);
 static void via_ircc_change_dongle_speed(int iobase, int speed,
 					 int dongle_id);
 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
@@ -434,7 +432,6 @@
 	dev->open = via_ircc_net_open;
 	dev->stop = via_ircc_net_close;
 	dev->do_ioctl = via_ircc_net_ioctl;
-	dev->get_stats = via_ircc_net_get_stats;
 
 	err = register_netdev(dev);
 	if (err)
@@ -855,7 +852,7 @@
 	    async_wrap_skb(skb, self->tx_buff.data,
 			   self->tx_buff.truesize);
 
-	self->stats.tx_bytes += self->tx_buff.len;
+	dev->stats.tx_bytes += self->tx_buff.len;
 	/* Send this frame with old speed */
 	SetBaudRate(iobase, self->io.speed);
 	SetPulseWidth(iobase, 12);
@@ -921,7 +918,7 @@
 	self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
 
 	self->tx_fifo.tail += skb->len;
-	self->stats.tx_bytes += skb->len;
+	dev->stats.tx_bytes += skb->len;
 	skb_copy_from_linear_data(skb,
 		      self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
 	self->tx_fifo.len++;
@@ -990,12 +987,12 @@
 	/* Clear bit, by writing 1 into it */
 	Tx_status = GetTXStatus(iobase);
 	if (Tx_status & 0x08) {
-		self->stats.tx_errors++;
-		self->stats.tx_fifo_errors++;
+		self->netdev->stats.tx_errors++;
+		self->netdev->stats.tx_fifo_errors++;
 		hwreset(self);
 // how to clear underrrun ?
 	} else {
-		self->stats.tx_packets++;
+		self->netdev->stats.tx_packets++;
 		ResetChip(iobase, 3);
 		ResetChip(iobase, 4);
 	}
@@ -1119,8 +1116,8 @@
 		}
 		// Move to next frame 
 		self->rx_buff.data += len;
-		self->stats.rx_bytes += len;
-		self->stats.rx_packets++;
+		self->netdev->stats.rx_bytes += len;
+		self->netdev->stats.rx_packets++;
 		skb->dev = self->netdev;
 		skb_reset_mac_header(skb);
 		skb->protocol = htons(ETH_P_IRDA);
@@ -1180,7 +1177,7 @@
 		 */
 		if ((skb == NULL) || (skb->data == NULL)
 		    || (self->rx_buff.data == NULL) || (len < 6)) {
-			self->stats.rx_dropped++;
+			self->netdev->stats.rx_dropped++;
 			return TRUE;
 		}
 		skb_reserve(skb, 1);
@@ -1192,8 +1189,8 @@
 
 		// Move to next frame 
 		self->rx_buff.data += len;
-		self->stats.rx_bytes += len;
-		self->stats.rx_packets++;
+		self->netdev->stats.rx_bytes += len;
+		self->netdev->stats.rx_packets++;
 		skb->dev = self->netdev;
 		skb_reset_mac_header(skb);
 		skb->protocol = htons(ETH_P_IRDA);
@@ -1220,13 +1217,13 @@
 	IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
 
 	if ((len - 4) < 2) {
-		self->stats.rx_dropped++;
+		self->netdev->stats.rx_dropped++;
 		return FALSE;
 	}
 
 	skb = dev_alloc_skb(len + 1);
 	if (skb == NULL) {
-		self->stats.rx_dropped++;
+		self->netdev->stats.rx_dropped++;
 		return FALSE;
 	}
 	skb_reserve(skb, 1);
@@ -1238,8 +1235,8 @@
 		st_fifo->tail = 0;
 	// Move to next frame 
 	self->rx_buff.data += len;
-	self->stats.rx_bytes += len;
-	self->stats.rx_packets++;
+	self->netdev->stats.rx_bytes += len;
+	self->netdev->stats.rx_packets++;
 	skb->dev = self->netdev;
 	skb_reset_mac_header(skb);
 	skb->protocol = htons(ETH_P_IRDA);
@@ -1295,7 +1292,7 @@
 			 */
 			if ((skb == NULL) || (skb->data == NULL)
 			    || (self->rx_buff.data == NULL) || (len < 6)) {
-				self->stats.rx_dropped++;
+				self->netdev->stats.rx_dropped++;
 				continue;
 			}
 			skb_reserve(skb, 1);
@@ -1307,8 +1304,8 @@
 
 			// Move to next frame 
 			self->rx_buff.data += len;
-			self->stats.rx_bytes += len;
-			self->stats.rx_packets++;
+			self->netdev->stats.rx_bytes += len;
+			self->netdev->stats.rx_packets++;
 			skb->dev = self->netdev;
 			skb_reset_mac_header(skb);
 			skb->protocol = htons(ETH_P_IRDA);
@@ -1523,7 +1520,7 @@
 
 	IRDA_ASSERT(dev != NULL, return -1;);
 	self = netdev_priv(dev);
-	self->stats.rx_packets = 0;
+	dev->stats.rx_packets = 0;
 	IRDA_ASSERT(self != NULL, return 0;);
 	iobase = self->io.fir_base;
 	if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
@@ -1660,14 +1657,6 @@
 	return ret;
 }
 
-static struct net_device_stats *via_ircc_net_get_stats(struct net_device
-						       *dev)
-{
-	struct via_ircc_cb *self = netdev_priv(dev);
-
-	return &self->stats;
-}
-
 MODULE_AUTHOR("VIA Technologies,inc");
 MODULE_DESCRIPTION("VIA IrDA Device Driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index 403c3f7..d9d1db0 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -95,7 +95,6 @@
 	struct tx_fifo tx_fifo;	/* Info about frames to be transmitted */
 
 	struct net_device *netdev;	/* Yes! we are some kind of netdevice */
-	struct net_device_stats stats;
 
 	struct irlap_cb *irlap;	/* The link layer we are binded to */
 	struct qos_info qos;	/* QoS capabilities for this device */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 0d30f8d..723c458 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -291,14 +291,14 @@
 		now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);	
 
 	seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
-		idev->stats.rx_packets, idev->stats.rx_bytes, idev->stats.rx_errors,
-		idev->stats.rx_dropped);
+		ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
+		ndev->stats.rx_dropped);
 	seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
-		idev->stats.rx_over_errors, idev->stats.rx_length_errors,
-		idev->stats.rx_frame_errors, idev->stats.rx_crc_errors);
+		ndev->stats.rx_over_errors, ndev->stats.rx_length_errors,
+		ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors);
 	seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
-		idev->stats.tx_packets, idev->stats.tx_bytes, idev->stats.tx_errors,
-		idev->stats.tx_dropped, idev->stats.tx_fifo_errors);
+		ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors,
+		ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors);
 
 }
 		
@@ -651,21 +651,21 @@
 
 		if (ret < 0) {
 			ret = -ret;
-			idev->stats.rx_errors++;
+			ndev->stats.rx_errors++;
 			if (ret & VLSI_RX_DROP)  
-				idev->stats.rx_dropped++;
+				ndev->stats.rx_dropped++;
 			if (ret & VLSI_RX_OVER)  
-				idev->stats.rx_over_errors++;
+				ndev->stats.rx_over_errors++;
 			if (ret & VLSI_RX_LENGTH)  
-				idev->stats.rx_length_errors++;
+				ndev->stats.rx_length_errors++;
 			if (ret & VLSI_RX_FRAME)  
-				idev->stats.rx_frame_errors++;
+				ndev->stats.rx_frame_errors++;
 			if (ret & VLSI_RX_CRC)  
-				idev->stats.rx_crc_errors++;
+				ndev->stats.rx_crc_errors++;
 		}
 		else if (ret > 0) {
-			idev->stats.rx_packets++;
-			idev->stats.rx_bytes += ret;
+			ndev->stats.rx_packets++;
+			ndev->stats.rx_bytes += ret;
 		}
 	}
 
@@ -686,6 +686,7 @@
 
 static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
 {
+	struct net_device *ndev = pci_get_drvdata(idev->pdev);
 	struct vlsi_ring *r = idev->rx_ring;
 	struct ring_descr *rd;
 	int ret;
@@ -711,21 +712,21 @@
 
 		if (ret < 0) {
 			ret = -ret;
-			idev->stats.rx_errors++;
+			ndev->stats.rx_errors++;
 			if (ret & VLSI_RX_DROP)  
-				idev->stats.rx_dropped++;
+				ndev->stats.rx_dropped++;
 			if (ret & VLSI_RX_OVER)  
-				idev->stats.rx_over_errors++;
+				ndev->stats.rx_over_errors++;
 			if (ret & VLSI_RX_LENGTH)  
-				idev->stats.rx_length_errors++;
+				ndev->stats.rx_length_errors++;
 			if (ret & VLSI_RX_FRAME)  
-				idev->stats.rx_frame_errors++;
+				ndev->stats.rx_frame_errors++;
 			if (ret & VLSI_RX_CRC)  
-				idev->stats.rx_crc_errors++;
+				ndev->stats.rx_crc_errors++;
 		}
 		else if (ret > 0) {
-			idev->stats.rx_packets++;
-			idev->stats.rx_bytes += ret;
+			ndev->stats.rx_packets++;
+			ndev->stats.rx_bytes += ret;
 		}
 	}
 }
@@ -1050,8 +1051,8 @@
 drop:
 	IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg);
 	dev_kfree_skb_any(skb);
-	idev->stats.tx_errors++;
-	idev->stats.tx_dropped++;
+	ndev->stats.tx_errors++;
+	ndev->stats.tx_dropped++;
 	/* Don't even think about returning NET_XMIT_DROP (=1) here!
 	 * In fact any retval!=0 causes the packet scheduler to requeue the
 	 * packet for later retry of transmission - which isn't exactly
@@ -1078,15 +1079,15 @@
 
 		if (ret < 0) {
 			ret = -ret;
-			idev->stats.tx_errors++;
+			ndev->stats.tx_errors++;
 			if (ret & VLSI_TX_DROP)
-				idev->stats.tx_dropped++;
+				ndev->stats.tx_dropped++;
 			if (ret & VLSI_TX_FIFO)
-				idev->stats.tx_fifo_errors++;
+				ndev->stats.tx_fifo_errors++;
 		}
 		else if (ret > 0){
-			idev->stats.tx_packets++;
-			idev->stats.tx_bytes += ret;
+			ndev->stats.tx_packets++;
+			ndev->stats.tx_bytes += ret;
 		}
 	}
 
@@ -1122,6 +1123,7 @@
 
 static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
 {
+	struct net_device *ndev = pci_get_drvdata(idev->pdev);
 	struct vlsi_ring *r = idev->tx_ring;
 	struct ring_descr *rd;
 	int ret;
@@ -1145,15 +1147,15 @@
 
 		if (ret < 0) {
 			ret = -ret;
-			idev->stats.tx_errors++;
+			ndev->stats.tx_errors++;
 			if (ret & VLSI_TX_DROP)
-				idev->stats.tx_dropped++;
+				ndev->stats.tx_dropped++;
 			if (ret & VLSI_TX_FIFO)
-				idev->stats.tx_fifo_errors++;
+				ndev->stats.tx_fifo_errors++;
 		}
 		else if (ret > 0){
-			idev->stats.tx_packets++;
-			idev->stats.tx_bytes += ret;
+			ndev->stats.tx_packets++;
+			ndev->stats.tx_bytes += ret;
 		}
 	}
 
@@ -1373,13 +1375,6 @@
 
 /**************************************************************/
 
-static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
-{
-	vlsi_irda_dev_t *idev = netdev_priv(ndev);
-
-	return &idev->stats;
-}
-
 static void vlsi_tx_timeout(struct net_device *ndev)
 {
 	vlsi_irda_dev_t *idev = netdev_priv(ndev);
@@ -1615,7 +1610,6 @@
  
 	ndev->open	      = vlsi_open;
 	ndev->stop	      = vlsi_close;
-	ndev->get_stats	      = vlsi_get_stats;
 	ndev->hard_start_xmit = vlsi_hard_start_xmit;
 	ndev->do_ioctl	      = vlsi_ioctl;
 	ndev->tx_timeout      = vlsi_tx_timeout;
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 9b18843..3050d1a 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -712,7 +712,6 @@
 
 typedef struct vlsi_irda_dev {
 	struct pci_dev		*pdev;
-	struct net_device_stats	stats;
 
 	struct irlap_cb		*irlap;
 
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 30ec913..dc0a2e4 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -102,7 +102,6 @@
 static int  w83977af_net_open(struct net_device *dev);
 static int  w83977af_net_close(struct net_device *dev);
 static int  w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev);
 
 /*
  * Function w83977af_init ()
@@ -237,7 +236,6 @@
 	dev->open            = w83977af_net_open;
 	dev->stop            = w83977af_net_close;
 	dev->do_ioctl        = w83977af_net_ioctl;
-	dev->get_stats	     = w83977af_net_get_stats;
 
 	err = register_netdev(dev);
 	if (err) {
@@ -702,13 +700,13 @@
 	if (inb(iobase+AUDR) & AUDR_UNDR) {
 		IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
 		
-		self->stats.tx_errors++;
-		self->stats.tx_fifo_errors++;
+		self->netdev->stats.tx_errors++;
+		self->netdev->stats.tx_fifo_errors++;
 
 		/* Clear bit, by writing 1 to it */
 		outb(AUDR_UNDR, iobase+AUDR);
 	} else
-		self->stats.tx_packets++;
+		self->netdev->stats.tx_packets++;
 
 	
 	if (self->new_speed) {
@@ -846,28 +844,28 @@
 		if (status & FS_FO_ERR_MSK) {
 			if (status & FS_FO_LST_FR) {
 				/* Add number of lost frames to stats */
-				self->stats.rx_errors += len;	
+				self->netdev->stats.rx_errors += len;
 			} else {
 				/* Skip frame */
-				self->stats.rx_errors++;
+				self->netdev->stats.rx_errors++;
 				
 				self->rx_buff.data += len;
 				
 				if (status & FS_FO_MX_LEX)
-					self->stats.rx_length_errors++;
+					self->netdev->stats.rx_length_errors++;
 				
 				if (status & FS_FO_PHY_ERR) 
-					self->stats.rx_frame_errors++;
+					self->netdev->stats.rx_frame_errors++;
 				
 				if (status & FS_FO_CRC_ERR) 
-					self->stats.rx_crc_errors++;
+					self->netdev->stats.rx_crc_errors++;
 			}
 			/* The errors below can be reported in both cases */
 			if (status & FS_FO_RX_OV)
-				self->stats.rx_fifo_errors++;
+				self->netdev->stats.rx_fifo_errors++;
 			
 			if (status & FS_FO_FSF_OV)
-				self->stats.rx_fifo_errors++;
+				self->netdev->stats.rx_fifo_errors++;
 			
 		} else {
 			/* Check if we have transferred all data to memory */
@@ -917,7 +915,7 @@
 
 			/* Move to next frame */
 			self->rx_buff.data += len;
-			self->stats.rx_packets++;
+			self->netdev->stats.rx_packets++;
 			
 			skb->dev = self->netdev;
 			skb_reset_mac_header(skb);
@@ -951,7 +949,7 @@
 	/*  Receive all characters in Rx FIFO */
 	do {
 		byte = inb(iobase+RBR);
-		async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 
+		async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
 				  byte);
 	} while (inb(iobase+USR) & USR_RDR); /* Data available */	
 }
@@ -994,7 +992,7 @@
 			outb(AUDR_SFEND, iobase+AUDR);
 			outb(set, iobase+SSR); 
 
-			self->stats.tx_packets++;
+			self->netdev->stats.tx_packets++;
 
 			/* Feed me more packets */
 			netif_wake_queue(self->netdev);
@@ -1336,13 +1334,6 @@
 	return ret;
 }
 
-static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev)
-{
-	struct w83977af_ir *self = netdev_priv(dev);
-	
-	return &self->stats;
-}
-
 MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
 MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/w83977af_ir.h b/drivers/net/irda/w83977af_ir.h
index 87c3975..fefe9b1 100644
--- a/drivers/net/irda/w83977af_ir.h
+++ b/drivers/net/irda/w83977af_ir.h
@@ -172,7 +172,6 @@
 	int tx_len;          /* Number of frames in tx_buff */
 
 	struct net_device *netdev; /* Yes! we are some kind of netdevice */
-	struct net_device_stats stats;
 	
 	struct irlap_cb    *irlap; /* The link layer we are binded to */
 	struct qos_info     qos;   /* QoS capabilities for this device */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index c7457f9..cb793c2 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -429,7 +429,7 @@
 SIMPLE_PORT_ATTR(num_mcast);
 CUSTOM_PORT_ATTR(lpar_map, "0x%X\n", port->lpar_map);
 CUSTOM_PORT_ATTR(stopped_map, "0x%X\n", port->stopped_map);
-CUSTOM_PORT_ATTR(mac_addr, "0x%lX\n", port->mac_addr);
+CUSTOM_PORT_ATTR(mac_addr, "0x%llX\n", port->mac_addr);
 
 #define GET_PORT_ATTR(_name)	(&veth_port_attr_##_name.attr)
 static struct attribute *veth_port_default_attrs[] = {
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 4a5580c..1d6e48e 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -84,7 +84,10 @@
 #define KORINA_NUM_RDS	64  /* number of receive descriptors */
 #define KORINA_NUM_TDS	64  /* number of transmit descriptors */
 
-#define KORINA_RBSIZE	536 /* size of one resource buffer = Ether MTU */
+/* KORINA_RBSIZE is the hardware's default maximum receive
+ * frame size in bytes. Having this hardcoded means that there
+ * is no support for MTU sizes greater than 1500. */
+#define KORINA_RBSIZE	1536 /* size of one resource buffer = Ether MTU */
 #define KORINA_RDS_MASK	(KORINA_NUM_RDS - 1)
 #define KORINA_TDS_MASK	(KORINA_NUM_TDS - 1)
 #define RD_RING_SIZE 	(KORINA_NUM_RDS * sizeof(struct dma_desc))
@@ -196,7 +199,7 @@
 	struct korina_private *lp = netdev_priv(dev);
 	unsigned long flags;
 	u32 length;
-	u32 chain_index;
+	u32 chain_prev, chain_next;
 	struct dma_desc *td;
 
 	spin_lock_irqsave(&lp->lock, flags);
@@ -228,8 +231,8 @@
 	/* Setup the transmit descriptor. */
 	dma_cache_inv((u32) td, sizeof(*td));
 	td->ca = CPHYSADDR(skb->data);
-	chain_index = (lp->tx_chain_tail - 1) &
-			KORINA_TDS_MASK;
+	chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
+	chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;
 
 	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
 		if (lp->tx_chain_status == desc_empty) {
@@ -237,7 +240,7 @@
 			td->control = DMA_COUNT(length) |
 					DMA_DESC_COF | DMA_DESC_IOF;
 			/* Move tail */
-			lp->tx_chain_tail = chain_index;
+			lp->tx_chain_tail = chain_next;
 			/* Write to NDPTR */
 			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 					&lp->tx_dma_regs->dmandptr);
@@ -248,12 +251,12 @@
 			td->control = DMA_COUNT(length) |
 					DMA_DESC_COF | DMA_DESC_IOF;
 			/* Link to prev */
-			lp->td_ring[chain_index].control &=
+			lp->td_ring[chain_prev].control &=
 					~DMA_DESC_COF;
 			/* Link to prev */
-			lp->td_ring[chain_index].link =  CPHYSADDR(td);
+			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
 			/* Move tail */
-			lp->tx_chain_tail = chain_index;
+			lp->tx_chain_tail = chain_next;
 			/* Write to NDPTR */
 			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 					&(lp->tx_dma_regs->dmandptr));
@@ -267,17 +270,16 @@
 			td->control = DMA_COUNT(length) |
 					DMA_DESC_COF | DMA_DESC_IOF;
 			/* Move tail */
-			lp->tx_chain_tail = chain_index;
+			lp->tx_chain_tail = chain_next;
 			lp->tx_chain_status = desc_filled;
-			netif_stop_queue(dev);
 		} else {
 			/* Update tail */
 			td->control = DMA_COUNT(length) |
 					DMA_DESC_COF | DMA_DESC_IOF;
-			lp->td_ring[chain_index].control &=
+			lp->td_ring[chain_prev].control &=
 					~DMA_DESC_COF;
-			lp->td_ring[chain_index].link =  CPHYSADDR(td);
-			lp->tx_chain_tail = chain_index;
+			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
+			lp->tx_chain_tail = chain_next;
 		}
 	}
 	dma_cache_wback((u32) td, sizeof(*td));
@@ -327,13 +329,13 @@
 
 	dmas = readl(&lp->rx_dma_regs->dmas);
 	if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
-		netif_rx_schedule_prep(&lp->napi);
-
 		dmasm = readl(&lp->rx_dma_regs->dmasm);
 		writel(dmasm | (DMA_STAT_DONE |
 				DMA_STAT_HALT | DMA_STAT_ERR),
 				&lp->rx_dma_regs->dmasm);
 
+		netif_rx_schedule(&lp->napi);
+
 		if (dmas & DMA_STAT_ERR)
 			printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
 
@@ -350,15 +352,20 @@
 	struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
 	struct sk_buff *skb, *skb_new;
 	u8 *pkt_buf;
-	u32 devcs, pkt_len, dmas, rx_free_desc;
+	u32 devcs, pkt_len, dmas;
 	int count;
 
 	dma_cache_inv((u32)rd, sizeof(*rd));
 
 	for (count = 0; count < limit; count++) {
+		skb = lp->rx_skb[lp->rx_next_done];
+		skb_new = NULL;
 
 		devcs = rd->devcs;
 
+		if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
+			break;
+
 		/* Update statistics counters */
 		if (devcs & ETH_RX_CRC)
 			dev->stats.rx_crc_errors++;
@@ -381,63 +388,55 @@
 			 * in Rc32434 (errata ref #077) */
 			dev->stats.rx_errors++;
 			dev->stats.rx_dropped++;
-		}
-
-		while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
-			/* init the var. used for the later
-			 * operations within the while loop */
-			skb_new = NULL;
+		} else if ((devcs & ETH_RX_ROK)) {
 			pkt_len = RCVPKT_LENGTH(devcs);
-			skb = lp->rx_skb[lp->rx_next_done];
 
-			if ((devcs & ETH_RX_ROK)) {
-				/* must be the (first and) last
-				 * descriptor then */
-				pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
+			/* must be the (first and) last
+			 * descriptor then */
+			pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
 
-				/* invalidate the cache */
-				dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
+			/* invalidate the cache */
+			dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
 
-				/* Malloc up new buffer. */
-				skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
+			/* Malloc up new buffer. */
+			skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
 
-				if (!skb_new)
-					break;
-				/* Do not count the CRC */
-				skb_put(skb, pkt_len - 4);
-				skb->protocol = eth_type_trans(skb, dev);
+			if (!skb_new)
+				break;
+			/* Do not count the CRC */
+			skb_put(skb, pkt_len - 4);
+			skb->protocol = eth_type_trans(skb, dev);
 
-				/* Pass the packet to upper layers */
-				netif_receive_skb(skb);
-				dev->stats.rx_packets++;
-				dev->stats.rx_bytes += pkt_len;
+			/* Pass the packet to upper layers */
+			netif_receive_skb(skb);
+			dev->stats.rx_packets++;
+			dev->stats.rx_bytes += pkt_len;
 
-				/* Update the mcast stats */
-				if (devcs & ETH_RX_MP)
-					dev->stats.multicast++;
+			/* Update the mcast stats */
+			if (devcs & ETH_RX_MP)
+				dev->stats.multicast++;
 
-				lp->rx_skb[lp->rx_next_done] = skb_new;
-			}
-
-			rd->devcs = 0;
-
-			/* Restore descriptor's curr_addr */
-			if (skb_new)
-				rd->ca = CPHYSADDR(skb_new->data);
-			else
-				rd->ca = CPHYSADDR(skb->data);
-
-			rd->control = DMA_COUNT(KORINA_RBSIZE) |
-				DMA_DESC_COD | DMA_DESC_IOD;
-			lp->rd_ring[(lp->rx_next_done - 1) &
-				KORINA_RDS_MASK].control &=
-				~DMA_DESC_COD;
-
-			lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
-			dma_cache_wback((u32)rd, sizeof(*rd));
-			rd = &lp->rd_ring[lp->rx_next_done];
-			writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
+			lp->rx_skb[lp->rx_next_done] = skb_new;
 		}
+
+		rd->devcs = 0;
+
+		/* Restore descriptor's curr_addr */
+		if (skb_new)
+			rd->ca = CPHYSADDR(skb_new->data);
+		else
+			rd->ca = CPHYSADDR(skb->data);
+
+		rd->control = DMA_COUNT(KORINA_RBSIZE) |
+			DMA_DESC_COD | DMA_DESC_IOD;
+		lp->rd_ring[(lp->rx_next_done - 1) &
+			KORINA_RDS_MASK].control &=
+			~DMA_DESC_COD;
+
+		lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
+		dma_cache_wback((u32)rd, sizeof(*rd));
+		rd = &lp->rd_ring[lp->rx_next_done];
+		writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
 	}
 
 	dmas = readl(&lp->rx_dma_regs->dmas);
@@ -623,12 +622,12 @@
 	dmas = readl(&lp->tx_dma_regs->dmas);
 
 	if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
-		korina_tx(dev);
-
 		dmasm = readl(&lp->tx_dma_regs->dmasm);
 		writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
 				&lp->tx_dma_regs->dmasm);
 
+		korina_tx(dev);
+
 		if (lp->tx_chain_status == desc_filled &&
 			(readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
 			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
@@ -901,6 +900,8 @@
 
 	korina_free_ring(dev);
 
+	napi_disable(&lp->napi);
+
 	ret = korina_init(dev);
 	if (ret < 0) {
 		printk(KERN_ERR DRV_NAME "%s: cannot restart device\n",
@@ -999,14 +1000,14 @@
 	 * that handles the Done Finished
 	 * Ovr and Und Events */
 	ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt,
-		IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Rx", dev);
+			IRQF_DISABLED, "Korina ethernet Rx", dev);
 	if (ret < 0) {
 		printk(KERN_ERR DRV_NAME "%s: unable to get Rx DMA IRQ %d\n",
 		    dev->name, lp->rx_irq);
 		goto err_release;
 	}
 	ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt,
-		IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Tx", dev);
+			IRQF_DISABLED, "Korina ethernet Tx", dev);
 	if (ret < 0) {
 		printk(KERN_ERR DRV_NAME "%s: unable to get Tx DMA IRQ %d\n",
 		    dev->name, lp->tx_irq);
@@ -1015,7 +1016,7 @@
 
 	/* Install handler for overrun error. */
 	ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt,
-			IRQF_SHARED | IRQF_DISABLED, "Ethernet Overflow", dev);
+			IRQF_DISABLED, "Ethernet Overflow", dev);
 	if (ret < 0) {
 		printk(KERN_ERR DRV_NAME"%s: unable to get OVR IRQ %d\n",
 		    dev->name, lp->ovr_irq);
@@ -1024,7 +1025,7 @@
 
 	/* Install handler for underflow error. */
 	ret = request_irq(lp->und_irq, &korina_und_interrupt,
-			IRQF_SHARED | IRQF_DISABLED, "Ethernet Underflow", dev);
+			IRQF_DISABLED, "Ethernet Underflow", dev);
 	if (ret < 0) {
 		printk(KERN_ERR DRV_NAME "%s: unable to get UND IRQ %d\n",
 		    dev->name, lp->und_irq);
@@ -1067,6 +1068,8 @@
 
 	korina_free_ring(dev);
 
+	napi_disable(&lp->napi);
+
 	free_irq(lp->rx_irq, dev);
 	free_irq(lp->tx_irq, dev);
 	free_irq(lp->ovr_irq, dev);
@@ -1089,7 +1092,6 @@
 		return -ENOMEM;
 	}
 	SET_NETDEV_DEV(dev, &pdev->dev);
-	platform_set_drvdata(pdev, dev);
 	lp = netdev_priv(dev);
 
 	bif->dev = dev;
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 57716e2..8e88486 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -486,6 +486,7 @@
 	.ndo_get_stats		= ei_get_stats,
 	.ndo_set_multicast_list = ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ei_poll,
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 15bb38d..9f6644a 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -952,6 +952,7 @@
 	.ndo_get_stats		= mlx4_en_get_stats,
 	.ndo_set_multicast_list	= mlx4_en_set_multicast,
 	.ndo_set_mac_address	= mlx4_en_set_mac,
+	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_change_mtu		= mlx4_en_change_mtu,
 	.ndo_tx_timeout		= mlx4_en_tx_timeout,
 	.ndo_vlan_rx_register	= mlx4_en_vlan_rx_register,
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
index cfeef0f..c1bd040 100644
--- a/drivers/net/mlx4/en_params.c
+++ b/drivers/net/mlx4/en_params.c
@@ -399,8 +399,10 @@
 
 	rx_size = roundup_pow_of_two(param->rx_pending);
 	rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
+	rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
 	tx_size = roundup_pow_of_two(param->tx_pending);
 	tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
+	tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
 
 	if (rx_size == priv->prof->rx_ring_size &&
 	    tx_size == priv->prof->tx_ring_size)
@@ -440,8 +442,8 @@
 	struct mlx4_en_dev *mdev = priv->mdev;
 
 	memset(param, 0, sizeof(*param));
-	param->rx_max_pending = mdev->dev->caps.max_rq_sg;
-	param->tx_max_pending = mdev->dev->caps.max_sq_sg;
+	param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
+	param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
 	param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
 	param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
 }
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index ff4d752..4afd599 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -203,19 +203,21 @@
 
 	/* Optimize the common case when there are no wraparounds */
 	if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
-		if (tx_info->linear) {
-			pci_unmap_single(mdev->pdev,
-					 (dma_addr_t) be64_to_cpu(data->addr),
+		if (!tx_info->inl) {
+			if (tx_info->linear) {
+				pci_unmap_single(mdev->pdev,
+					(dma_addr_t) be64_to_cpu(data->addr),
 					 be32_to_cpu(data->byte_count),
 					 PCI_DMA_TODEVICE);
-			++data;
-		}
+				++data;
+			}
 
-		for (i = 0; i < frags; i++) {
-			frag = &skb_shinfo(skb)->frags[i];
-			pci_unmap_page(mdev->pdev,
-				       (dma_addr_t) be64_to_cpu(data[i].addr),
-				       frag->size, PCI_DMA_TODEVICE);
+			for (i = 0; i < frags; i++) {
+				frag = &skb_shinfo(skb)->frags[i];
+				pci_unmap_page(mdev->pdev,
+					(dma_addr_t) be64_to_cpu(data[i].addr),
+					frag->size, PCI_DMA_TODEVICE);
+			}
 		}
 		/* Stamp the freed descriptor */
 		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
@@ -224,27 +226,29 @@
 		}
 
 	} else {
-		if ((void *) data >= end) {
-			data = (struct mlx4_wqe_data_seg *)
-					(ring->buf + ((void *) data - end));
-		}
+		if (!tx_info->inl) {
+			if ((void *) data >= end) {
+				data = (struct mlx4_wqe_data_seg *)
+						(ring->buf + ((void *) data - end));
+			}
 
-		if (tx_info->linear) {
-			pci_unmap_single(mdev->pdev,
-					 (dma_addr_t) be64_to_cpu(data->addr),
+			if (tx_info->linear) {
+				pci_unmap_single(mdev->pdev,
+					(dma_addr_t) be64_to_cpu(data->addr),
 					 be32_to_cpu(data->byte_count),
 					 PCI_DMA_TODEVICE);
-			++data;
-		}
+				++data;
+			}
 
-		for (i = 0; i < frags; i++) {
-			/* Check for wraparound before unmapping */
-			if ((void *) data >= end)
-				data = (struct mlx4_wqe_data_seg *) ring->buf;
-			frag = &skb_shinfo(skb)->frags[i];
-			pci_unmap_page(mdev->pdev,
+			for (i = 0; i < frags; i++) {
+				/* Check for wraparound before unmapping */
+				if ((void *) data >= end)
+					data = (struct mlx4_wqe_data_seg *) ring->buf;
+				frag = &skb_shinfo(skb)->frags[i];
+				pci_unmap_page(mdev->pdev,
 					(dma_addr_t) be64_to_cpu(data->addr),
 					 frag->size, PCI_DMA_TODEVICE);
+			}
 		}
 		/* Stamp the freed descriptor */
 		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
@@ -790,8 +794,11 @@
 			wmb();
 			data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
 		}
-	} else
+		tx_info->inl = 0;
+	} else {
 		build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
+		tx_info->inl = 1;
+	}
 
 	ring->prod += nr_txbb;
 
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 710c79e..6ef2490 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -912,8 +912,8 @@
 	int i;
 
 	if (msi_x) {
-		nreq = min(dev->caps.num_eqs - dev->caps.reserved_eqs,
-			   num_possible_cpus() + 1);
+		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
+			     num_possible_cpus() + 1);
 		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
 		if (!entries)
 			goto no_msi;
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 2e96c7b..e9af32d 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -115,6 +115,10 @@
 };
 #define MLX4_EN_MAX_RX_FRAGS	4
 
+/* Maximum ring sizes */
+#define MLX4_EN_MAX_TX_SIZE	8192
+#define MLX4_EN_MAX_RX_SIZE	8192
+
 /* Minimum ring size for our page-allocation sceme to work */
 #define MLX4_EN_MIN_RX_SIZE	(MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
 #define MLX4_EN_MIN_TX_SIZE	(4096 / TXBB_SIZE)
@@ -202,6 +206,7 @@
 	u32 nr_txbb;
 	u8 linear;
 	u8 data_offset;
+	u8 inl;
 };
 
 
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index 919fb9e..cebdf32 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -107,9 +107,9 @@
 	profile[MLX4_RES_AUXC].num    = request->num_qp;
 	profile[MLX4_RES_SRQ].num     = request->num_srq;
 	profile[MLX4_RES_CQ].num      = request->num_cq;
-	profile[MLX4_RES_EQ].num      = min(dev_cap->max_eqs,
-					    dev_cap->reserved_eqs +
-					    num_possible_cpus() + 1);
+	profile[MLX4_RES_EQ].num      = min_t(unsigned, dev_cap->max_eqs,
+					      dev_cap->reserved_eqs +
+					      num_possible_cpus() + 1);
 	profile[MLX4_RES_DMPT].num    = request->num_mpt;
 	profile[MLX4_RES_CMPT].num    = MLX4_NUM_CMPTS;
 	profile[MLX4_RES_MTT].num     = request->num_mtt;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 478edb9..c5dec54 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -779,6 +779,22 @@
 
 }
 
+static const struct net_device_ops natsemi_netdev_ops = {
+	.ndo_open		= netdev_open,
+	.ndo_stop		= netdev_close,
+	.ndo_start_xmit		= start_tx,
+	.ndo_get_stats		= get_stats,
+	.ndo_set_multicast_list = set_rx_mode,
+	.ndo_change_mtu		= natsemi_change_mtu,
+	.ndo_do_ioctl		= netdev_ioctl,
+	.ndo_tx_timeout 	= ns_tx_timeout,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= natsemi_poll_controller,
+#endif
+};
+
 static int __devinit natsemi_probe1 (struct pci_dev *pdev,
 	const struct pci_device_id *ent)
 {
@@ -911,20 +927,9 @@
 	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt])
 		np->full_duplex = 1;
 
-	/* The chip-specific entries in the device structure. */
-	dev->open = &netdev_open;
-	dev->hard_start_xmit = &start_tx;
-	dev->stop = &netdev_close;
-	dev->get_stats = &get_stats;
-	dev->set_multicast_list = &set_rx_mode;
-	dev->change_mtu = &natsemi_change_mtu;
-	dev->do_ioctl = &netdev_ioctl;
-	dev->tx_timeout = &ns_tx_timeout;
+	dev->netdev_ops = &natsemi_netdev_ops;
 	dev->watchdog_timeo = TX_TIMEOUT;
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = &natsemi_poll_controller;
-#endif
 	SET_ETHTOOL_OPS(dev, &ethtool_ops);
 
 	if (mtu)
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index b572391..7bd6662 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -202,6 +202,7 @@
 	.ndo_get_stats		= ei_get_stats,
 	.ndo_set_multicast_list = ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ei_poll,
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 62f20ba..f090d3b 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -208,6 +208,7 @@
 	.ndo_get_stats		= ei_get_stats,
 	.ndo_set_multicast_list = ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = ei_poll,
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index f8e601c..c11c568 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -308,27 +308,16 @@
 #define netxen_set_cmd_desc_ctxid(cmd_desc, var)	\
 	((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
 
-#define netxen_set_cmd_desc_flags(cmd_desc, val)	\
-	(cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \
-		~cpu_to_le16(0x7f)) | cpu_to_le16((val) & 0x7f)
-#define netxen_set_cmd_desc_opcode(cmd_desc, val)	\
-	(cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \
-		~cpu_to_le16((u16)0x3f << 7)) | cpu_to_le16(((val) & 0x3f) << 7)
+#define netxen_set_tx_port(_desc, _port) \
+	(_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)
 
-#define netxen_set_cmd_desc_num_of_buff(cmd_desc, val)	\
-	(cmd_desc)->num_of_buffers_total_length = \
-		((cmd_desc)->num_of_buffers_total_length & \
-		~cpu_to_le32(0xff)) | cpu_to_le32((val) & 0xff)
-#define netxen_set_cmd_desc_totallength(cmd_desc, val)	\
-	(cmd_desc)->num_of_buffers_total_length = \
-		((cmd_desc)->num_of_buffers_total_length & \
-		~cpu_to_le32((u32)0xffffff << 8)) | \
-		cpu_to_le32(((val) & 0xffffff) << 8)
+#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \
+	(_desc)->flags_opcode = \
+	cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))
 
-#define netxen_get_cmd_desc_opcode(cmd_desc)	\
-	((le16_to_cpu((cmd_desc)->flags_opcode) >> 7) & 0x003f)
-#define netxen_get_cmd_desc_totallength(cmd_desc)	\
-	((le32_to_cpu((cmd_desc)->num_of_buffers_total_length) >> 8) & 0xffffff)
+#define netxen_set_tx_frags_len(_desc, _frags, _len) \
+	(_desc)->num_of_buffers_total_length = \
+	cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))
 
 struct cmd_desc_type0 {
 	u8 tcp_hdr_offset;	/* For LSO only */
@@ -510,7 +499,8 @@
 	NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a,
 	NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b,
 	NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031,
-	NETXEN_BRDTYPE_P3_10G_XFP = 0x0032
+	NETXEN_BRDTYPE_P3_10G_XFP = 0x0032,
+	NETXEN_BRDTYPE_P3_10G_TP = 0x0080
 
 } netxen_brdtype_t;
 
@@ -757,7 +747,7 @@
  */
 struct netxen_skb_frag {
 	u64 dma;
-	u32 length;
+	ulong length;
 };
 
 #define _netxen_set_bits(config_word, start, bits, val)	{\
@@ -783,13 +773,7 @@
 struct netxen_cmd_buffer {
 	struct sk_buff *skb;
 	struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
-	u32 total_length;
-	u32 mss;
-	u16 port;
-	u8 cmd;
-	u8 frag_count;
-	unsigned long time_stamp;
-	u32 state;
+	u32 frag_count;
 };
 
 /* In rx_buffer, we do not need multiple fragments as is a single buffer */
@@ -876,7 +860,6 @@
 	u32 skb_size;
 	struct netxen_rx_buffer *rx_buf_arr;	/* rx buffers for receive   */
 	struct list_head free_list;
-	int begin_alloc;
 };
 
 /*
@@ -995,31 +978,31 @@
  */
 
 typedef struct {
-	u64 host_phys_addr;	/* Ring base addr */
-	u32 ring_size;		/* Ring entries */
-	u16 msi_index;
-	u16 rsvd;		/* Padding */
+	__le64 host_phys_addr;	/* Ring base addr */
+	__le32 ring_size;		/* Ring entries */
+	__le16 msi_index;
+	__le16 rsvd;		/* Padding */
 } nx_hostrq_sds_ring_t;
 
 typedef struct {
-	u64 host_phys_addr;	/* Ring base addr */
-	u64 buff_size;		/* Packet buffer size */
-	u32 ring_size;		/* Ring entries */
-	u32 ring_kind;		/* Class of ring */
+	__le64 host_phys_addr;	/* Ring base addr */
+	__le64 buff_size;		/* Packet buffer size */
+	__le32 ring_size;		/* Ring entries */
+	__le32 ring_kind;		/* Class of ring */
 } nx_hostrq_rds_ring_t;
 
 typedef struct {
-	u64 host_rsp_dma_addr;	/* Response dma'd here */
-	u32 capabilities[4];	/* Flag bit vector */
-	u32 host_int_crb_mode;	/* Interrupt crb usage */
-	u32 host_rds_crb_mode;	/* RDS crb usage */
+	__le64 host_rsp_dma_addr;	/* Response dma'd here */
+	__le32 capabilities[4];	/* Flag bit vector */
+	__le32 host_int_crb_mode;	/* Interrupt crb usage */
+	__le32 host_rds_crb_mode;	/* RDS crb usage */
 	/* These ring offsets are relative to data[0] below */
-	u32 rds_ring_offset;	/* Offset to RDS config */
-	u32 sds_ring_offset;	/* Offset to SDS config */
-	u16 num_rds_rings;	/* Count of RDS rings */
-	u16 num_sds_rings;	/* Count of SDS rings */
-	u16 rsvd1;		/* Padding */
-	u16 rsvd2;		/* Padding */
+	__le32 rds_ring_offset;	/* Offset to RDS config */
+	__le32 sds_ring_offset;	/* Offset to SDS config */
+	__le16 num_rds_rings;	/* Count of RDS rings */
+	__le16 num_sds_rings;	/* Count of SDS rings */
+	__le16 rsvd1;		/* Padding */
+	__le16 rsvd2;		/* Padding */
 	u8  reserved[128]; 	/* reserve space for future expansion*/
 	/* MUST BE 64-bit aligned.
 	   The following is packed:
@@ -1029,24 +1012,24 @@
 } nx_hostrq_rx_ctx_t;
 
 typedef struct {
-	u32 host_producer_crb;	/* Crb to use */
-	u32 rsvd1;		/* Padding */
+	__le32 host_producer_crb;	/* Crb to use */
+	__le32 rsvd1;		/* Padding */
 } nx_cardrsp_rds_ring_t;
 
 typedef struct {
-	u32 host_consumer_crb;	/* Crb to use */
-	u32 interrupt_crb;	/* Crb to use */
+	__le32 host_consumer_crb;	/* Crb to use */
+	__le32 interrupt_crb;	/* Crb to use */
 } nx_cardrsp_sds_ring_t;
 
 typedef struct {
 	/* These ring offsets are relative to data[0] below */
-	u32 rds_ring_offset;	/* Offset to RDS config */
-	u32 sds_ring_offset;	/* Offset to SDS config */
-	u32 host_ctx_state;	/* Starting State */
-	u32 num_fn_per_port;	/* How many PCI fn share the port */
-	u16 num_rds_rings;	/* Count of RDS rings */
-	u16 num_sds_rings;	/* Count of SDS rings */
-	u16 context_id;		/* Handle for context */
+	__le32 rds_ring_offset;	/* Offset to RDS config */
+	__le32 sds_ring_offset;	/* Offset to SDS config */
+	__le32 host_ctx_state;	/* Starting State */
+	__le32 num_fn_per_port;	/* How many PCI fn share the port */
+	__le16 num_rds_rings;	/* Count of RDS rings */
+	__le16 num_sds_rings;	/* Count of SDS rings */
+	__le16 context_id;		/* Handle for context */
 	u8  phys_port;		/* Physical id of port */
 	u8  virt_port;		/* Virtual/Logical id of port */
 	u8  reserved[128];	/* save space for future expansion */
@@ -1072,34 +1055,34 @@
  */
 
 typedef struct {
-	u64 host_phys_addr;	/* Ring base addr */
-	u32 ring_size;		/* Ring entries */
-	u32 rsvd;		/* Padding */
+	__le64 host_phys_addr;	/* Ring base addr */
+	__le32 ring_size;		/* Ring entries */
+	__le32 rsvd;		/* Padding */
 } nx_hostrq_cds_ring_t;
 
 typedef struct {
-	u64 host_rsp_dma_addr;	/* Response dma'd here */
-	u64 cmd_cons_dma_addr;	/*  */
-	u64 dummy_dma_addr;	/*  */
-	u32 capabilities[4];	/* Flag bit vector */
-	u32 host_int_crb_mode;	/* Interrupt crb usage */
-	u32 rsvd1;		/* Padding */
-	u16 rsvd2;		/* Padding */
-	u16 interrupt_ctl;
-	u16 msi_index;
-	u16 rsvd3;		/* Padding */
+	__le64 host_rsp_dma_addr;	/* Response dma'd here */
+	__le64 cmd_cons_dma_addr;	/*  */
+	__le64 dummy_dma_addr;	/*  */
+	__le32 capabilities[4];	/* Flag bit vector */
+	__le32 host_int_crb_mode;	/* Interrupt crb usage */
+	__le32 rsvd1;		/* Padding */
+	__le16 rsvd2;		/* Padding */
+	__le16 interrupt_ctl;
+	__le16 msi_index;
+	__le16 rsvd3;		/* Padding */
 	nx_hostrq_cds_ring_t cds_ring;	/* Desc of cds ring */
 	u8  reserved[128];	/* future expansion */
 } nx_hostrq_tx_ctx_t;
 
 typedef struct {
-	u32 host_producer_crb;	/* Crb to use */
-	u32 interrupt_crb;	/* Crb to use */
+	__le32 host_producer_crb;	/* Crb to use */
+	__le32 interrupt_crb;	/* Crb to use */
 } nx_cardrsp_cds_ring_t;
 
 typedef struct {
-	u32 host_ctx_state;	/* Starting state */
-	u16 context_id;		/* Handle for context */
+	__le32 host_ctx_state;	/* Starting state */
+	__le16 context_id;		/* Handle for context */
 	u8  phys_port;		/* Physical id of port */
 	u8  virt_port;		/* Virtual/Logical id of port */
 	nx_cardrsp_cds_ring_t cds_ring;	/* Card cds settings */
@@ -1202,9 +1185,9 @@
 #define VPORT_MISS_MODE_ACCEPT_MULTI	2 /* accept unmatched multicast */
 
 typedef struct {
-	u64 qhdr;
-	u64 req_hdr;
-	u64 words[6];
+	__le64 qhdr;
+	__le64 req_hdr;
+	__le64 words[6];
 } nx_nic_req_t;
 
 typedef struct {
@@ -1486,8 +1469,6 @@
 
 void netxen_initialize_adapter_ops(struct netxen_adapter *adapter);
 int netxen_init_firmware(struct netxen_adapter *adapter);
-void netxen_tso_check(struct netxen_adapter *adapter,
-		      struct cmd_desc_type0 *desc, struct sk_buff *skb);
 void netxen_nic_clear_stats(struct netxen_adapter *adapter);
 void netxen_watchdog_task(struct work_struct *work);
 void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
@@ -1496,6 +1477,7 @@
 u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
 void netxen_p2_nic_set_multi(struct net_device *netdev);
 void netxen_p3_nic_set_multi(struct net_device *netdev);
+void netxen_p3_free_mac_list(struct netxen_adapter *adapter);
 int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32);
 int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
 
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 64b5164..746bdb4 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -76,7 +76,7 @@
 static u32
 netxen_poll_rsp(struct netxen_adapter *adapter)
 {
-	u32 raw_rsp, rsp = NX_CDRP_RSP_OK;
+	u32 rsp = NX_CDRP_RSP_OK;
 	int	timeout = 0;
 
 	do {
@@ -86,10 +86,7 @@
 		if (++timeout > NX_OS_CRB_RETRY_COUNT)
 			return NX_CDRP_RSP_TIMEOUT;
 
-		netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET,
-				&raw_rsp);
-
-		rsp = le32_to_cpu(raw_rsp);
+		netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET, &rsp);
 	} while (!NX_CDRP_IS_RSP(rsp));
 
 	return rsp;
@@ -109,20 +106,16 @@
 	if (netxen_api_lock(adapter))
 		return NX_RCODE_TIMEOUT;
 
-	netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET,
-			cpu_to_le32(signature));
+	netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET, signature);
 
-	netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET,
-			cpu_to_le32(arg1));
+	netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET, arg1);
 
-	netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET,
-			cpu_to_le32(arg2));
+	netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET, arg2);
 
-	netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET,
-			cpu_to_le32(arg3));
+	netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET, arg3);
 
 	netxen_nic_write_w1(adapter, NX_CDRP_CRB_OFFSET,
-			cpu_to_le32(NX_CDRP_FORM_CMD(cmd)));
+			NX_CDRP_FORM_CMD(cmd));
 
 	rsp = netxen_poll_rsp(adapter);
 
@@ -133,7 +126,6 @@
 		rcode = NX_RCODE_TIMEOUT;
 	} else if (rsp == NX_CDRP_RSP_FAIL) {
 		netxen_nic_read_w1(adapter, NX_ARG1_CRB_OFFSET, &rcode);
-		rcode = le32_to_cpu(rcode);
 
 		printk(KERN_ERR "%s: failed card response code:0x%x\n",
 				netxen_nic_driver_name, rcode);
@@ -183,7 +175,7 @@
 
 	int i, nrds_rings, nsds_rings;
 	size_t rq_size, rsp_size;
-	u32 cap, reg;
+	u32 cap, reg, val;
 
 	int err;
 
@@ -225,11 +217,14 @@
 
 	prq->num_rds_rings = cpu_to_le16(nrds_rings);
 	prq->num_sds_rings = cpu_to_le16(nsds_rings);
-	prq->rds_ring_offset = 0;
-	prq->sds_ring_offset = prq->rds_ring_offset +
-		(sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
+	prq->rds_ring_offset = cpu_to_le32(0);
 
-	prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + prq->rds_ring_offset);
+	val = le32_to_cpu(prq->rds_ring_offset) +
+		(sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
+	prq->sds_ring_offset = cpu_to_le32(val);
+
+	prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
+			le32_to_cpu(prq->rds_ring_offset));
 
 	for (i = 0; i < nrds_rings; i++) {
 
@@ -241,17 +236,14 @@
 		prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
 	}
 
-	prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + prq->sds_ring_offset);
+	prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
+			le32_to_cpu(prq->sds_ring_offset));
 
 	prq_sds[0].host_phys_addr =
 		cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
 	prq_sds[0].ring_size = cpu_to_le32(adapter->max_rx_desc_count);
 	/* only one msix vector for now */
-	prq_sds[0].msi_index = cpu_to_le32(0);
-
-	/* now byteswap offsets */
-	prq->rds_ring_offset = cpu_to_le32(prq->rds_ring_offset);
-	prq->sds_ring_offset = cpu_to_le32(prq->sds_ring_offset);
+	prq_sds[0].msi_index = cpu_to_le16(0);
 
 	phys_addr = hostrq_phys_addr;
 	err = netxen_issue_cmd(adapter,
@@ -269,9 +261,9 @@
 
 
 	prsp_rds = ((nx_cardrsp_rds_ring_t *)
-			 &prsp->data[prsp->rds_ring_offset]);
+			 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
 
-	for (i = 0; i < le32_to_cpu(prsp->num_rds_rings); i++) {
+	for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
 		rds_ring = &recv_ctx->rds_rings[i];
 
 		reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
@@ -279,7 +271,7 @@
 	}
 
 	prsp_sds = ((nx_cardrsp_sds_ring_t *)
-			&prsp->data[prsp->sds_ring_offset]);
+			&prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
 	reg = le32_to_cpu(prsp_sds[0].host_consumer_crb);
 	recv_ctx->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200);
 
@@ -288,7 +280,7 @@
 
 	recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
 	recv_ctx->context_id = le16_to_cpu(prsp->context_id);
-	recv_ctx->virt_port = le16_to_cpu(prsp->virt_port);
+	recv_ctx->virt_port = prsp->virt_port;
 
 out_free_rsp:
 	pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index e45ce29..c0bd40f 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -136,11 +136,9 @@
 
 		ecmd->port = PORT_TP;
 
-		if (netif_running(dev)) {
-			ecmd->speed = adapter->link_speed;
-			ecmd->duplex = adapter->link_duplex;
-			ecmd->autoneg = adapter->link_autoneg;
-		}
+		ecmd->speed = adapter->link_speed;
+		ecmd->duplex = adapter->link_duplex;
+		ecmd->autoneg = adapter->link_autoneg;
 
 	} else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
 		u32 val;
@@ -171,7 +169,7 @@
 	} else
 		return -EIO;
 
-	ecmd->phy_address = adapter->portnum;
+	ecmd->phy_address = adapter->physical_port;
 	ecmd->transceiver = XCVR_EXTERNAL;
 
 	switch ((netxen_brdtype_t) boardinfo->board_type) {
@@ -180,13 +178,13 @@
 	case NETXEN_BRDTYPE_P3_REF_QG:
 	case NETXEN_BRDTYPE_P3_4_GB:
 	case NETXEN_BRDTYPE_P3_4_GB_MM:
-	case NETXEN_BRDTYPE_P3_10000_BASE_T:
 
 		ecmd->supported |= SUPPORTED_Autoneg;
 		ecmd->advertising |= ADVERTISED_Autoneg;
 	case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
 	case NETXEN_BRDTYPE_P3_10G_CX4:
 	case NETXEN_BRDTYPE_P3_10G_CX4_LP:
+	case NETXEN_BRDTYPE_P3_10000_BASE_T:
 		ecmd->supported |= SUPPORTED_TP;
 		ecmd->advertising |= ADVERTISED_TP;
 		ecmd->port = PORT_TP;
@@ -204,16 +202,33 @@
 		ecmd->port = PORT_FIBRE;
 		ecmd->autoneg = AUTONEG_DISABLE;
 		break;
-	case NETXEN_BRDTYPE_P2_SB31_10G:
 	case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
 	case NETXEN_BRDTYPE_P3_10G_SFP_CT:
 	case NETXEN_BRDTYPE_P3_10G_SFP_QT:
+		ecmd->advertising |= ADVERTISED_TP;
+		ecmd->supported |= SUPPORTED_TP;
+	case NETXEN_BRDTYPE_P2_SB31_10G:
 	case NETXEN_BRDTYPE_P3_10G_XFP:
 		ecmd->supported |= SUPPORTED_FIBRE;
 		ecmd->advertising |= ADVERTISED_FIBRE;
 		ecmd->port = PORT_FIBRE;
 		ecmd->autoneg = AUTONEG_DISABLE;
 		break;
+	case NETXEN_BRDTYPE_P3_10G_TP:
+		if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
+			ecmd->autoneg = AUTONEG_DISABLE;
+			ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+			ecmd->advertising |=
+				(ADVERTISED_FIBRE | ADVERTISED_TP);
+			ecmd->port = PORT_FIBRE;
+		} else {
+			ecmd->autoneg = AUTONEG_ENABLE;
+			ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
+			ecmd->advertising |=
+				(ADVERTISED_TP | ADVERTISED_Autoneg);
+			ecmd->port = PORT_TP;
+		}
+		break;
 	default:
 		printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
 		       (netxen_brdtype_t) boardinfo->board_type);
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index aa6e603..821cff6 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -503,17 +503,15 @@
 
 	i = 0;
 
+	netif_tx_lock_bh(adapter->netdev);
+
 	producer = adapter->cmd_producer;
 	do {
 		cmd_desc = &cmd_desc_arr[i];
 
 		pbuf = &adapter->cmd_buf_arr[producer];
-		pbuf->mss = 0;
-		pbuf->total_length = 0;
 		pbuf->skb = NULL;
-		pbuf->cmd = 0;
 		pbuf->frag_count = 0;
-		pbuf->port = 0;
 
 		/* adapter->ahw.cmd_desc_head[producer] = *cmd_desc; */
 		memcpy(&adapter->ahw.cmd_desc_head[producer],
@@ -531,6 +529,8 @@
 
 	netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer);
 
+	netif_tx_unlock_bh(adapter->netdev);
+
 	return 0;
 }
 
@@ -539,16 +539,19 @@
 {
 	struct netxen_adapter *adapter = netdev_priv(dev);
 	nx_nic_req_t req;
-	nx_mac_req_t mac_req;
+	nx_mac_req_t *mac_req;
+	u64 word;
 	int rv;
 
 	memset(&req, 0, sizeof(nx_nic_req_t));
-	req.qhdr |= (NX_NIC_REQUEST << 23);
-	req.req_hdr |= NX_MAC_EVENT;
-	req.req_hdr |= ((u64)adapter->portnum << 16);
-	mac_req.op = op;
-	memcpy(&mac_req.mac_addr, addr, 6);
-	req.words[0] = cpu_to_le64(*(u64 *)&mac_req);
+	req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
+
+	word = NX_MAC_EVENT | ((u64)adapter->portnum << 16);
+	req.req_hdr = cpu_to_le64(word);
+
+	mac_req = (nx_mac_req_t *)&req.words[0];
+	mac_req->op = op;
+	memcpy(mac_req->mac_addr, addr, 6);
 
 	rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 	if (rv != 0) {
@@ -612,18 +615,35 @@
 int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
 {
 	nx_nic_req_t req;
+	u64 word;
 
 	memset(&req, 0, sizeof(nx_nic_req_t));
 
-	req.qhdr |= (NX_HOST_REQUEST << 23);
-	req.req_hdr |= NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE;
-	req.req_hdr |= ((u64)adapter->portnum << 16);
+	req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+	word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
+			((u64)adapter->portnum << 16);
+	req.req_hdr = cpu_to_le64(word);
+
 	req.words[0] = cpu_to_le64(mode);
 
 	return netxen_send_cmd_descs(adapter,
 				(struct cmd_desc_type0 *)&req, 1);
 }
 
+void netxen_p3_free_mac_list(struct netxen_adapter *adapter)
+{
+	nx_mac_list_t *cur, *next;
+
+	cur = adapter->mac_list;
+
+	while (cur) {
+		next = cur->next;
+		kfree(cur);
+		cur = next;
+	}
+}
+
 #define	NETXEN_CONFIG_INTR_COALESCE	3
 
 /*
@@ -632,13 +652,15 @@
 int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
 {
 	nx_nic_req_t req;
+	u64 word;
 	int rv;
 
 	memset(&req, 0, sizeof(nx_nic_req_t));
 
-	req.qhdr |= (NX_NIC_REQUEST << 23);
-	req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE;
-	req.req_hdr |= ((u64)adapter->portnum << 16);
+	req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
+
+	word = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+	req.req_hdr = cpu_to_le64(word);
 
 	memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal));
 
@@ -772,13 +794,10 @@
 	adapter->hw_read_wx(adapter, crbaddr, &mac_lo, 4);
 	adapter->hw_read_wx(adapter, crbaddr+4, &mac_hi, 4);
 
-	mac_hi = cpu_to_le32(mac_hi);
-	mac_lo = cpu_to_le32(mac_lo);
-
 	if (pci_func & 1)
-		*mac = ((mac_lo >> 16) | ((u64)mac_hi << 16));
+		*mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
 	else
-		*mac = ((mac_lo) | ((u64)mac_hi << 32));
+		*mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
 
 	return 0;
 }
@@ -937,7 +956,7 @@
 {
 	int i;
 	u32 data, size = 0;
-	u32 flashaddr = NETXEN_BOOTLD_START, memaddr = NETXEN_BOOTLD_START;
+	u32 flashaddr = NETXEN_BOOTLD_START;
 
 	size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START)/4;
 
@@ -949,10 +968,8 @@
 		if (netxen_rom_fast_read(adapter, flashaddr, (int *)&data) != 0)
 			return -EIO;
 
-		adapter->pci_mem_write(adapter, memaddr, &data, 4);
+		adapter->pci_mem_write(adapter, flashaddr, &data, 4);
 		flashaddr += 4;
-		memaddr += 4;
-		cond_resched();
 	}
 	msleep(1);
 
@@ -2034,7 +2051,13 @@
 		rv = -1;
 	}
 
-	DPRINTK(INFO, "Discovered board type:0x%x  ", boardinfo->board_type);
+	if (boardinfo->board_type == NETXEN_BRDTYPE_P3_4_GB_MM) {
+		u32 gpio = netxen_nic_reg_read(adapter,
+				NETXEN_ROMUSB_GLB_PAD_GPIO_I);
+		if ((gpio & 0x8000) == 0)
+			boardinfo->board_type = NETXEN_BRDTYPE_P3_10G_TP;
+	}
+
 	switch ((netxen_brdtype_t) boardinfo->board_type) {
 	case NETXEN_BRDTYPE_P2_SB35_4G:
 		adapter->ahw.board_type = NETXEN_NIC_GBE;
@@ -2053,7 +2076,6 @@
 	case NETXEN_BRDTYPE_P3_10G_SFP_QT:
 	case NETXEN_BRDTYPE_P3_10G_XFP:
 	case NETXEN_BRDTYPE_P3_10000_BASE_T:
-
 		adapter->ahw.board_type = NETXEN_NIC_XGBE;
 		break;
 	case NETXEN_BRDTYPE_P1_BD:
@@ -2063,9 +2085,12 @@
 	case NETXEN_BRDTYPE_P3_REF_QG:
 	case NETXEN_BRDTYPE_P3_4_GB:
 	case NETXEN_BRDTYPE_P3_4_GB_MM:
-
 		adapter->ahw.board_type = NETXEN_NIC_GBE;
 		break;
+	case NETXEN_BRDTYPE_P3_10G_TP:
+		adapter->ahw.board_type = (adapter->portnum < 2) ?
+			NETXEN_NIC_XGBE : NETXEN_NIC_GBE;
+		break;
 	default:
 		printk("%s: Unknown(%x)\n", netxen_nic_driver_name,
 		       boardinfo->board_type);
@@ -2110,12 +2135,16 @@
 {
 	__u32 status;
 	__u32 autoneg;
-	__u32 mode;
 	__u32 port_mode;
 
-	netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
-	if (netxen_get_niu_enable_ge(mode)) {	/* Gb 10/100/1000 Mbps mode */
+	if (!netif_carrier_ok(adapter->netdev)) {
+		adapter->link_speed   = 0;
+		adapter->link_duplex  = -1;
+		adapter->link_autoneg = AUTONEG_ENABLE;
+		return;
+	}
 
+	if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
 		adapter->hw_read_wx(adapter,
 				NETXEN_PORT_MODE_ADDR, &port_mode, 4);
 		if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
@@ -2141,7 +2170,7 @@
 					adapter->link_speed = SPEED_1000;
 					break;
 				default:
-					adapter->link_speed = -1;
+					adapter->link_speed = 0;
 					break;
 				}
 				switch (netxen_get_phy_duplex(status)) {
@@ -2164,7 +2193,7 @@
 				goto link_down;
 		} else {
 		      link_down:
-			adapter->link_speed = -1;
+			adapter->link_speed = 0;
 			adapter->link_duplex = -1;
 		}
 	}
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index d924468..ca7c8d8 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -308,7 +308,6 @@
 			}
 			memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE);
 			INIT_LIST_HEAD(&rds_ring->free_list);
-			rds_ring->begin_alloc = 0;
 			/*
 			 * Now go through all of them, set reference handles
 			 * and put them in the queues.
@@ -439,6 +438,8 @@
 	long timeout = 0;
 	long done = 0;
 
+	cond_resched();
+
 	while (done == 0) {
 		done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
 		done &= 2;
@@ -533,12 +534,9 @@
 static int do_rom_fast_read(struct netxen_adapter *adapter,
 			    int addr, int *valp)
 {
-	cond_resched();
-
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
-	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
-	udelay(100);		/* prevent bursting on CRB */
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
 	if (netxen_wait_rom_done(adapter)) {
 		printk("Error waiting for rom done\n");
@@ -546,7 +544,7 @@
 	}
 	/* reset abyte_cnt and dummy_byte_cnt */
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
-	udelay(100);		/* prevent bursting on CRB */
+	udelay(10);
 	netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
 
 	*valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
@@ -884,14 +882,16 @@
 int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
 {
 	int addr, val;
-	int i, init_delay = 0;
+	int i, n, init_delay = 0;
 	struct crb_addr_pair *buf;
-	unsigned offset, n;
+	unsigned offset;
 	u32 off;
 
 	/* resetall */
+	rom_lock(adapter);
 	netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
 				    0xffffffff);
+	netxen_rom_unlock(adapter);
 
 	if (verbose) {
 		if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
@@ -910,7 +910,7 @@
 
 	if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
 		if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
-			(n != 0xcafecafeUL) ||
+			(n != 0xcafecafe) ||
 			netxen_rom_fast_read(adapter, 4, &n) != 0) {
 			printk(KERN_ERR "%s: ERROR Reading crb_init area: "
 					"n: %08x\n", netxen_nic_driver_name, n);
@@ -975,6 +975,14 @@
 			/* do not reset PCI */
 			if (off == (ROMUSB_GLB + 0xbc))
 				continue;
+			if (off == (ROMUSB_GLB + 0xa8))
+				continue;
+			if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+				continue;
+			if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+				continue;
+			if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+				continue;
 			if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
 				buf[i].data = 0x1020;
 			/* skip the function enable register */
@@ -992,23 +1000,21 @@
 			continue;
 		}
 
+		init_delay = 1;
 		/* After writing this register, HW needs time for CRB */
 		/* to quiet down (else crb_window returns 0xffffffff) */
 		if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
-			init_delay = 1;
+			init_delay = 1000;
 			if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
 				/* hold xdma in reset also */
 				buf[i].data = NETXEN_NIC_XDMA_RESET;
+				buf[i].data = 0x8000ff;
 			}
 		}
 
 		adapter->hw_write_wx(adapter, off, &buf[i].data, 4);
 
-		if (init_delay == 1) {
-			msleep(1000);
-			init_delay = 0;
-		}
-		msleep(1);
+		msleep(init_delay);
 	}
 	kfree(buf);
 
@@ -1277,7 +1283,7 @@
 
 		dev_kfree_skb_any(skb);
 		for (i = 0; i < nr_frags; i++) {
-			index = frag_desc->frag_handles[i];
+			index = le16_to_cpu(frag_desc->frag_handles[i]);
 			skb = netxen_process_rxbuf(adapter,
 					rds_ring, index, cksum);
 			if (skb)
@@ -1428,7 +1434,6 @@
 	struct rcv_desc *pdesc;
 	struct netxen_rx_buffer *buffer;
 	int count = 0;
-	int index = 0;
 	netxen_ctx_msg msg = 0;
 	dma_addr_t dma;
 	struct list_head *head;
@@ -1436,7 +1441,6 @@
 	rds_ring = &recv_ctx->rds_rings[ringid];
 
 	producer = rds_ring->producer;
-	index = rds_ring->begin_alloc;
 	head = &rds_ring->free_list;
 
 	/* We can start writing rx descriptors into the phantom memory. */
@@ -1444,39 +1448,37 @@
 
 		skb = dev_alloc_skb(rds_ring->skb_size);
 		if (unlikely(!skb)) {
-			rds_ring->begin_alloc = index;
 			break;
 		}
 
+		if (!adapter->ahw.cut_through)
+			skb_reserve(skb, 2);
+
+		dma = pci_map_single(pdev, skb->data,
+				rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(pdev, dma)) {
+			dev_kfree_skb_any(skb);
+			break;
+		}
+
+		count++;
 		buffer = list_entry(head->next, struct netxen_rx_buffer, list);
 		list_del(&buffer->list);
 
-		count++;	/* now there should be no failure */
-		pdesc = &rds_ring->desc_head[producer];
-
-		if (!adapter->ahw.cut_through)
-			skb_reserve(skb, 2);
-		/* This will be setup when we receive the
-		 * buffer after it has been filled  FSL  TBD TBD
-		 * skb->dev = netdev;
-		 */
-		dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
-				     PCI_DMA_FROMDEVICE);
-		pdesc->addr_buffer = cpu_to_le64(dma);
 		buffer->skb = skb;
 		buffer->state = NETXEN_BUFFER_BUSY;
 		buffer->dma = dma;
+
 		/* make a rcv descriptor  */
+		pdesc = &rds_ring->desc_head[producer];
+		pdesc->addr_buffer = cpu_to_le64(dma);
 		pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
 		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
-		DPRINTK(INFO, "done writing descripter\n");
-		producer =
-		    get_next_index(producer, rds_ring->max_rx_desc_count);
-		index = get_next_index(index, rds_ring->max_rx_desc_count);
+
+		producer = get_next_index(producer, rds_ring->max_rx_desc_count);
 	}
 	/* if we did allocate buffers, then write the count to Phantom */
 	if (count) {
-		rds_ring->begin_alloc = index;
 		rds_ring->producer = producer;
 			/* Window = 1 */
 		adapter->pci_write_normalize(adapter,
@@ -1515,49 +1517,50 @@
 	struct rcv_desc *pdesc;
 	struct netxen_rx_buffer *buffer;
 	int count = 0;
-	int index = 0;
 	struct list_head *head;
+	dma_addr_t dma;
 
 	rds_ring = &recv_ctx->rds_rings[ringid];
 
 	producer = rds_ring->producer;
-	index = rds_ring->begin_alloc;
 	head = &rds_ring->free_list;
 	/* We can start writing rx descriptors into the phantom memory. */
 	while (!list_empty(head)) {
 
 		skb = dev_alloc_skb(rds_ring->skb_size);
 		if (unlikely(!skb)) {
-			rds_ring->begin_alloc = index;
 			break;
 		}
 
+		if (!adapter->ahw.cut_through)
+			skb_reserve(skb, 2);
+
+		dma = pci_map_single(pdev, skb->data,
+				rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(pdev, dma)) {
+			dev_kfree_skb_any(skb);
+			break;
+		}
+
+		count++;
 		buffer = list_entry(head->next, struct netxen_rx_buffer, list);
 		list_del(&buffer->list);
 
-		count++;	/* now there should be no failure */
-		pdesc = &rds_ring->desc_head[producer];
-		if (!adapter->ahw.cut_through)
-			skb_reserve(skb, 2);
 		buffer->skb = skb;
 		buffer->state = NETXEN_BUFFER_BUSY;
-		buffer->dma = pci_map_single(pdev, skb->data,
-					     rds_ring->dma_size,
-					     PCI_DMA_FROMDEVICE);
+		buffer->dma = dma;
 
 		/* make a rcv descriptor  */
+		pdesc = &rds_ring->desc_head[producer];
 		pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
 		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
 		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
-		producer =
-		    get_next_index(producer, rds_ring->max_rx_desc_count);
-		index = get_next_index(index, rds_ring->max_rx_desc_count);
-		buffer = &rds_ring->rx_buf_arr[index];
+
+		producer = get_next_index(producer, rds_ring->max_rx_desc_count);
 	}
 
 	/* if we did allocate buffers, then write the count to Phantom */
 	if (count) {
-		rds_ring->begin_alloc = index;
 		rds_ring->producer = producer;
 			/* Window = 1 */
 		adapter->pci_write_normalize(adapter,
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index ba01524..d854f07 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -39,7 +39,9 @@
 #include "netxen_nic_phan_reg.h"
 
 #include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
 #include <net/ip.h>
+#include <linux/ipv6.h>
 
 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
 MODULE_LICENSE("GPL");
@@ -242,7 +244,7 @@
 	case NETXEN_BRDTYPE_P3_4_GB:
 	case NETXEN_BRDTYPE_P3_4_GB_MM:
 		adapter->msix_supported = !!use_msi_x;
-		adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
+		adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
 		break;
 
 	case NETXEN_BRDTYPE_P2_SB35_4G:
@@ -251,6 +253,14 @@
 		adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
 		break;
 
+	case NETXEN_BRDTYPE_P3_10G_TP:
+		adapter->msix_supported = !!use_msi_x;
+		if (adapter->ahw.board_type == NETXEN_NIC_XGBE)
+			adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
+		else
+			adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
+		break;
+
 	default:
 		adapter->msix_supported = 0;
 		adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
@@ -271,10 +281,15 @@
 static int
 netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
 {
-	int ret = 0;
+	u32 val, timeout;
 
 	if (first_boot == 0x55555555) {
 		/* This is the first boot after power up */
+		adapter->pci_write_normalize(adapter,
+			NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
+
+		if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+			return 0;
 
 		/* PCI bus master workaround */
 		adapter->hw_read_wx(adapter,
@@ -294,18 +309,26 @@
 			/* clear the register for future unloads/loads */
 			adapter->pci_write_normalize(adapter,
 					NETXEN_CAM_RAM(0x1fc), 0);
-			ret = -1;
+			return -EIO;
 		}
 
-		if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
-			/* Start P2 boot loader */
-			adapter->pci_write_normalize(adapter,
-				NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
-			adapter->pci_write_normalize(adapter,
-					NETXEN_ROMUSB_GLB_PEGTUNE_DONE, 1);
-		}
+		/* Start P2 boot loader */
+		val = adapter->pci_read_normalize(adapter,
+				NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
+		adapter->pci_write_normalize(adapter,
+				NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
+		timeout = 0;
+		do {
+			msleep(1);
+			val = adapter->pci_read_normalize(adapter,
+					NETXEN_CAM_RAM(0x1fc));
+
+			if (++timeout > 5000)
+				return -EIO;
+
+		} while (val == NETXEN_BDINFO_MAGIC);
 	}
-	return ret;
+	return 0;
 }
 
 static void netxen_set_port_mode(struct netxen_adapter *adapter)
@@ -784,8 +807,8 @@
 						CRB_CMDPEG_STATE, 0);
 			netxen_pinit_from_rom(adapter, 0);
 			msleep(1);
-			netxen_load_firmware(adapter);
 		}
+		netxen_load_firmware(adapter);
 
 		if (NX_IS_REVISION_P3(revision_id))
 			netxen_pcie_strap_init(adapter);
@@ -801,13 +824,6 @@
 
 		}
 
-		if ((first_boot == 0x55555555) &&
-			(NX_IS_REVISION_P2(revision_id))) {
-			/* Unlock the HW, prompting the boot sequence */
-			adapter->pci_write_normalize(adapter,
-					NETXEN_ROMUSB_GLB_PEGTUNE_DONE, 1);
-		}
-
 		err = netxen_initialize_adapter_offload(adapter);
 		if (err)
 			goto err_out_iounmap;
@@ -821,7 +837,9 @@
 		adapter->pci_write_normalize(adapter, CRB_DRIVER_VERSION, i);
 
 		/* Handshake with the card before we register the devices. */
-		netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
+		err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
+		if (err)
+			goto err_out_free_offload;
 
 	}	/* first_driver */
 
@@ -925,6 +943,7 @@
 	if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
 		pci_disable_msi(pdev);
 
+err_out_free_offload:
 	if (first_driver)
 		netxen_free_adapter_offload(adapter);
 
@@ -968,6 +987,9 @@
 		netxen_free_hw_resources(adapter);
 		netxen_release_rx_buffers(adapter);
 		netxen_free_sw_resources(adapter);
+
+		if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+			netxen_p3_free_mac_list(adapter);
 	}
 
 	if (adapter->portnum == 0)
@@ -983,8 +1005,10 @@
 
 	iounmap(adapter->ahw.db_base);
 	iounmap(adapter->ahw.pci_base0);
-	iounmap(adapter->ahw.pci_base1);
-	iounmap(adapter->ahw.pci_base2);
+	if (adapter->ahw.pci_base1 != NULL)
+		iounmap(adapter->ahw.pci_base1);
+	if (adapter->ahw.pci_base2 != NULL)
+		iounmap(adapter->ahw.pci_base2);
 
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
@@ -1137,29 +1161,64 @@
 	return 0;
 }
 
-void netxen_tso_check(struct netxen_adapter *adapter,
+static bool netxen_tso_check(struct net_device *netdev,
 		      struct cmd_desc_type0 *desc, struct sk_buff *skb)
 {
-	if (desc->mss) {
-		desc->total_hdr_length = (sizeof(struct ethhdr) +
-					  ip_hdrlen(skb) + tcp_hdrlen(skb));
+	bool tso = false;
+	u8 opcode = TX_ETHER_PKT;
 
-		if ((NX_IS_REVISION_P3(adapter->ahw.revision_id)) &&
-				(skb->protocol == htons(ETH_P_IPV6)))
-			netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO6);
-		else
-			netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
+	if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+			skb_shinfo(skb)->gso_size > 0) {
+
+		desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+		desc->total_hdr_length =
+			skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+		opcode = (skb->protocol == htons(ETH_P_IPV6)) ?
+				TX_TCP_LSO6 : TX_TCP_LSO;
+		tso = true;
 
 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
-			netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT);
-		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
-			netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT);
-		else
-			return;
+		u8 l4proto;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			l4proto = ip_hdr(skb)->protocol;
+
+			if (l4proto == IPPROTO_TCP)
+				opcode = TX_TCP_PKT;
+			else if(l4proto == IPPROTO_UDP)
+				opcode = TX_UDP_PKT;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+			l4proto = ipv6_hdr(skb)->nexthdr;
+
+			if (l4proto == IPPROTO_TCP)
+				opcode = TX_TCPV6_PKT;
+			else if(l4proto == IPPROTO_UDP)
+				opcode = TX_UDPV6_PKT;
+		}
 	}
 	desc->tcp_hdr_offset = skb_transport_offset(skb);
 	desc->ip_hdr_offset = skb_network_offset(skb);
+	netxen_set_tx_flags_opcode(desc, 0, opcode);
+	return tso;
+}
+
+static void
+netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
+		struct netxen_cmd_buffer *pbuf, int last)
+{
+	int k;
+	struct netxen_skb_frag *buffrag;
+
+	buffrag = &pbuf->frag_array[0];
+	pci_unmap_single(pdev, buffrag->dma,
+			buffrag->length, PCI_DMA_TODEVICE);
+
+	for (k = 1; k < last; k++) {
+		buffrag = &pbuf->frag_array[k];
+		pci_unmap_page(pdev, buffrag->dma,
+			buffrag->length, PCI_DMA_TODEVICE);
+	}
 }
 
 static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -1167,33 +1226,22 @@
 	struct netxen_adapter *adapter = netdev_priv(netdev);
 	struct netxen_hardware_context *hw = &adapter->ahw;
 	unsigned int first_seg_len = skb->len - skb->data_len;
+	struct netxen_cmd_buffer *pbuf;
 	struct netxen_skb_frag *buffrag;
-	unsigned int i;
+	struct cmd_desc_type0 *hwdesc;
+	struct pci_dev *pdev = adapter->pdev;
+	dma_addr_t temp_dma;
+	int i, k;
 
 	u32 producer, consumer;
-	u32 saved_producer = 0;
-	struct cmd_desc_type0 *hwdesc;
-	int k;
-	struct netxen_cmd_buffer *pbuf = NULL;
-	int frag_count;
-	int no_of_desc;
+	int frag_count, no_of_desc;
 	u32 num_txd = adapter->max_tx_desc_count;
+	bool is_tso = false;
 
 	frag_count = skb_shinfo(skb)->nr_frags + 1;
 
 	/* There 4 fragments per descriptor */
 	no_of_desc = (frag_count + 3) >> 2;
-	if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) {
-		if (skb_shinfo(skb)->gso_size > 0) {
-
-			no_of_desc++;
-			if ((ip_hdrlen(skb) + tcp_hdrlen(skb) +
-			     sizeof(struct ethhdr)) >
-			    (sizeof(struct cmd_desc_type0) - 2)) {
-				no_of_desc++;
-			}
-		}
-	}
 
 	producer = adapter->cmd_producer;
 	smp_mb();
@@ -1205,34 +1253,26 @@
 	}
 
 	/* Copy the descriptors into the hardware    */
-	saved_producer = producer;
 	hwdesc = &hw->cmd_desc_head[producer];
 	memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
 	/* Take skb->data itself */
 	pbuf = &adapter->cmd_buf_arr[producer];
-	if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
-			skb_shinfo(skb)->gso_size > 0) {
-		pbuf->mss = skb_shinfo(skb)->gso_size;
-		hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
-	} else {
-		pbuf->mss = 0;
-		hwdesc->mss = 0;
-	}
-	pbuf->total_length = skb->len;
-	pbuf->skb = skb;
-	pbuf->cmd = TX_ETHER_PKT;
-	pbuf->frag_count = frag_count;
-	pbuf->port = adapter->portnum;
-	buffrag = &pbuf->frag_array[0];
-	buffrag->dma = pci_map_single(adapter->pdev, skb->data, first_seg_len,
-				      PCI_DMA_TODEVICE);
-	buffrag->length = first_seg_len;
-	netxen_set_cmd_desc_totallength(hwdesc, skb->len);
-	netxen_set_cmd_desc_num_of_buff(hwdesc, frag_count);
-	netxen_set_cmd_desc_opcode(hwdesc, TX_ETHER_PKT);
 
-	netxen_set_cmd_desc_port(hwdesc, adapter->portnum);
-	netxen_set_cmd_desc_ctxid(hwdesc, adapter->portnum);
+	is_tso = netxen_tso_check(netdev, hwdesc, skb);
+
+	pbuf->skb = skb;
+	pbuf->frag_count = frag_count;
+	buffrag = &pbuf->frag_array[0];
+	temp_dma = pci_map_single(pdev, skb->data, first_seg_len,
+				      PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(pdev, temp_dma))
+		goto drop_packet;
+
+	buffrag->dma = temp_dma;
+	buffrag->length = first_seg_len;
+	netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
+	netxen_set_tx_port(hwdesc, adapter->portnum);
+
 	hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
 	hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
 
@@ -1240,7 +1280,6 @@
 		struct skb_frag_struct *frag;
 		int len, temp_len;
 		unsigned long offset;
-		dma_addr_t temp_dma;
 
 		/* move to next desc. if there is a need */
 		if ((i & 0x3) == 0) {
@@ -1256,8 +1295,12 @@
 		offset = frag->page_offset;
 
 		temp_len = len;
-		temp_dma = pci_map_page(adapter->pdev, frag->page, offset,
+		temp_dma = pci_map_page(pdev, frag->page, offset,
 					len, PCI_DMA_TODEVICE);
+		if (pci_dma_mapping_error(pdev, temp_dma)) {
+			netxen_clean_tx_dma_mapping(pdev, pbuf, i);
+			goto drop_packet;
+		}
 
 		buffrag++;
 		buffrag->dma = temp_dma;
@@ -1285,16 +1328,12 @@
 	}
 	producer = get_next_index(producer, num_txd);
 
-	/* might change opcode to TX_TCP_LSO */
-	netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb);
-
 	/* For LSO, we need to copy the MAC/IP/TCP headers into
 	 * the descriptor ring
 	 */
-	if (netxen_get_cmd_desc_opcode(&hw->cmd_desc_head[saved_producer])
-	    == TX_TCP_LSO) {
+	if (is_tso) {
 		int hdr_len, first_hdr_len, more_hdr;
-		hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length;
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
 		if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
 			first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
 			more_hdr = 1;
@@ -1336,6 +1375,11 @@
 	netdev->trans_start = jiffies;
 
 	return NETDEV_TX_OK;
+
+drop_packet:
+	adapter->stats.txdropped++;
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
 }
 
 static int netxen_nic_check_temp(struct netxen_adapter *adapter)
@@ -1407,6 +1451,8 @@
 			netif_carrier_off(netdev);
 			netif_stop_queue(netdev);
 		}
+
+		netxen_nic_set_link_parameters(adapter);
 	} else if (!adapter->ahw.linkup && linkup) {
 		printk(KERN_INFO "%s: %s NIC Link is up\n",
 		       netxen_nic_driver_name, netdev->name);
@@ -1415,6 +1461,8 @@
 			netif_carrier_on(netdev);
 			netif_wake_queue(netdev);
 		}
+
+		netxen_nic_set_link_parameters(adapter);
 	}
 }
 
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 46b0772..e80294d 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1956,7 +1956,11 @@
 	.ndo_change_mtu		= ns83820_change_mtu,
 	.ndo_set_multicast_list = ns83820_set_multicast,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_tx_timeout		= ns83820_tx_timeout,
+#ifdef NS83820_VLAN_ACCEL_SUPPORT
+	.ndo_vlan_rx_register	= ns83820_vlan_rx_register,
+#endif
 };
 
 static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
@@ -2216,7 +2220,6 @@
 #ifdef NS83820_VLAN_ACCEL_SUPPORT
 	/* We also support hardware vlan acceleration */
 	ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-	ndev->vlan_rx_register = ns83820_vlan_rx_register;
 #endif
 
 	if (using_dac) {
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 5b7a574..d0349e7 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -712,7 +712,7 @@
 	rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
 	ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno));
 
-	printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n",
+	printk(KERN_ERR "pasemi_mac: rx error. macrx %016llx, rx status %llx\n",
 		macrx, *chan->status);
 
 	printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
@@ -730,8 +730,8 @@
 
 	cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno));
 
-	printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\
-		"tx status 0x%016lx\n", mactx, *chan->status);
+	printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016llx, "\
+		"tx status 0x%016llx\n", mactx, *chan->status);
 
 	printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
 }
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 044b7b0..665a428 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1568,6 +1568,22 @@
 	return err;
 }
 
+static const struct net_device_ops pcnet32_netdev_ops = {
+	.ndo_open		= pcnet32_open,
+	.ndo_stop 		= pcnet32_close,
+	.ndo_start_xmit		= pcnet32_start_xmit,
+	.ndo_tx_timeout		= pcnet32_tx_timeout,
+	.ndo_get_stats		= pcnet32_get_stats,
+	.ndo_set_multicast_list = pcnet32_set_multicast_list,
+	.ndo_do_ioctl		= pcnet32_ioctl,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= pcnet32_poll_controller,
+#endif
+};
+
 /* pcnet32_probe1
  *  Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
  *  pdev will be NULL when called from pcnet32_probe_vlbus.
@@ -1934,20 +1950,10 @@
 	lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
 
 	/* The PCNET32-specific entries in the device structure. */
-	dev->open = &pcnet32_open;
-	dev->hard_start_xmit = &pcnet32_start_xmit;
-	dev->stop = &pcnet32_close;
-	dev->get_stats = &pcnet32_get_stats;
-	dev->set_multicast_list = &pcnet32_set_multicast_list;
-	dev->do_ioctl = &pcnet32_ioctl;
+	dev->netdev_ops = &pcnet32_netdev_ops;
 	dev->ethtool_ops = &pcnet32_ethtool_ops;
-	dev->tx_timeout = pcnet32_tx_timeout;
 	dev->watchdog_timeo = (5 * HZ);
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = pcnet32_poll_controller;
-#endif
-
 	/* Fill in the generic fields of the device structure. */
 	if (register_netdev(dev))
 		goto err_free_ring;
@@ -2276,7 +2282,7 @@
 	if (lp->chip_version >= PCNET32_79C970A) {
 		/* Print the link status and start the watchdog */
 		pcnet32_check_media(dev, 1);
-		mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+		mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
 	}
 
 	i = 0;
@@ -2911,7 +2917,7 @@
 	pcnet32_check_media(dev, 0);
 	spin_unlock_irqrestore(&lp->lock, flags);
 
-	mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+	mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
 }
 
 static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e354601..0a06e4f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -231,15 +231,6 @@
 	if ((phy_id & 0x1fffffff) == 0x1fffffff)
 		return NULL;
 
-	/*
-	 * Broken hardware is sometimes missing the pull-up resistor on the
-	 * MDIO line, which results in reads to non-existent devices returning
-	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
-	 * device as well.
-	 */
-	if (phy_id == 0)
-		return NULL;
-
 	dev = phy_device_create(bus, addr, phy_id);
 
 	return dev;
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 0c46d60..0be0f0b 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -265,6 +265,13 @@
 	.cache  = plip_hard_header_cache,
 };
 
+static const struct net_device_ops plip_netdev_ops = {
+	.ndo_open		 = plip_open,
+	.ndo_stop		 = plip_close,
+	.ndo_start_xmit		 = plip_tx_packet,
+	.ndo_do_ioctl		 = plip_ioctl,
+};
+
 /* Entry point of PLIP driver.
    Probe the hardware, and register/initialize the driver.
 
@@ -280,15 +287,11 @@
 	struct net_local *nl = netdev_priv(dev);
 
 	/* Then, override parts of it */
-	dev->hard_start_xmit	 = plip_tx_packet;
-	dev->open		 = plip_open;
-	dev->stop		 = plip_close;
-	dev->do_ioctl		 = plip_ioctl;
-
 	dev->tx_queue_len 	 = 10;
 	dev->flags	         = IFF_POINTOPOINT|IFF_NOARP;
 	memset(dev->dev_addr, 0xfc, ETH_ALEN);
 
+	dev->netdev_ops		 = &plip_netdev_ops;
 	dev->header_ops          = &plip_header_ops;
 
 
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 06b4482..7b2728b 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -250,6 +250,7 @@
 static int ppp_disconnect_channel(struct channel *pch);
 static void ppp_destroy_channel(struct channel *pch);
 static int unit_get(struct idr *p, void *ptr);
+static int unit_set(struct idr *p, void *ptr, int n);
 static void unit_put(struct idr *p, int n);
 static void *unit_find(struct idr *p, int n);
 
@@ -2432,11 +2433,18 @@
 	} else {
 		if (unit_find(&ppp_units_idr, unit))
 			goto out2; /* unit already exists */
-		else {
-			/* darn, someone is cheating us? */
-			*retp = -EINVAL;
+		/*
+		 * if caller need a specified unit number
+		 * lets try to satisfy him, otherwise --
+		 * he should better ask us for new unit number
+		 *
+		 * NOTE: yes I know that returning EEXIST it's not
+		 * fair but at least pppd will ask us to allocate
+		 * new unit in this case so user is happy :)
+		 */
+		unit = unit_set(&ppp_units_idr, ppp, unit);
+		if (unit < 0)
 			goto out2;
-		}
 	}
 
 	/* Initialize the new ppp unit */
@@ -2677,14 +2685,37 @@
  * by holding all_ppp_mutex
  */
 
+/* associate pointer with specified number */
+static int unit_set(struct idr *p, void *ptr, int n)
+{
+	int unit, err;
+
+again:
+	if (!idr_pre_get(p, GFP_KERNEL)) {
+		printk(KERN_ERR "PPP: No free memory for idr\n");
+		return -ENOMEM;
+	}
+
+	err = idr_get_new_above(p, ptr, n, &unit);
+	if (err == -EAGAIN)
+		goto again;
+
+	if (unit != n) {
+		idr_remove(p, unit);
+		return -EINVAL;
+	}
+
+	return unit;
+}
+
 /* get new free unit number and associate pointer with it */
 static int unit_get(struct idr *p, void *ptr)
 {
 	int unit, err;
 
 again:
-	if (idr_pre_get(p, GFP_KERNEL) == 0) {
-		printk(KERN_ERR "Out of memory expanding drawable idr\n");
+	if (!idr_pre_get(p, GFP_KERNEL)) {
+		printk(KERN_ERR "PPP: No free memory for idr\n");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 459663a..c1dadad 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -28,11 +28,11 @@
        } while (0)
 
 #define QLGE_VENDOR_ID    0x1077
-#define QLGE_DEVICE_ID1    0x8012
-#define QLGE_DEVICE_ID   0x8000
+#define QLGE_DEVICE_ID    0x8012
 
-#define MAX_RX_RINGS 128
-#define MAX_TX_RINGS 128
+#define MAX_CPUS 8
+#define MAX_TX_RINGS MAX_CPUS
+#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
 
 #define NUM_TX_RING_ENTRIES	256
 #define NUM_RX_RING_ENTRIES	256
@@ -45,6 +45,7 @@
 #define MAX_SPLIT_SIZE 1023
 #define QLGE_SB_PAD 32
 
+#define MAX_CQ 128
 #define DFLT_COALESCE_WAIT 100	/* 100 usec wait for coalescing */
 #define MAX_INTER_FRAME_WAIT 10	/* 10 usec max interframe-wait for coalescing */
 #define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
@@ -961,8 +962,7 @@
 #define IB_MAC_IOCB_RSP_DS	0x40	/* data is in small buffer */
 #define IB_MAC_IOCB_RSP_DL	0x80	/* data is in large buffer */
 	__le32 data_len;	/* */
-	__le32 data_addr_lo;	/* */
-	__le32 data_addr_hi;	/* */
+	__le64 data_addr;	/* */
 	__le32 rss;		/* */
 	__le16 vlan_id;		/* 12 bits */
 #define IB_MAC_IOCB_RSP_C	0x1000	/* VLAN CFI bit */
@@ -976,8 +976,7 @@
 #define IB_MAC_IOCB_RSP_HS	0x40
 #define IB_MAC_IOCB_RSP_HL	0x80
 	__le32 hdr_len;		/* */
-	__le32 hdr_addr_lo;	/* */
-	__le32 hdr_addr_hi;	/* */
+	__le64 hdr_addr;	/* */
 } __attribute((packed));
 
 struct ib_ae_iocb_rsp {
@@ -1042,10 +1041,8 @@
 	__le16 cq_id_rss;
 #define Q_CQ_ID_RSS_RV 0x8000
 	__le16 rid;
-	__le32 addr_lo;
-	__le32 addr_hi;
-	__le32 cnsmr_idx_addr_lo;
-	__le32 cnsmr_idx_addr_hi;
+	__le64 addr;
+	__le64 cnsmr_idx_addr;
 } __attribute((packed));
 
 /*
@@ -1070,18 +1067,14 @@
 #define LEN_CPP_64	0x0002
 #define LEN_CPP_128	0x0003
 	__le16 rid;
-	__le32 addr_lo;
-	__le32 addr_hi;
-	__le32 prod_idx_addr_lo;
-	__le32 prod_idx_addr_hi;
+	__le64 addr;
+	__le64 prod_idx_addr;
 	__le16 pkt_delay;
 	__le16 irq_delay;
-	__le32 lbq_addr_lo;
-	__le32 lbq_addr_hi;
+	__le64 lbq_addr;
 	__le16 lbq_buf_size;
 	__le16 lbq_len;		/* entry count */
-	__le32 sbq_addr_lo;
-	__le32 sbq_addr_hi;
+	__le64 sbq_addr;
 	__le16 sbq_buf_size;
 	__le16 sbq_len;		/* entry count */
 } __attribute((packed));
@@ -1145,7 +1138,7 @@
 	struct wqicb wqicb;	/* structure used to inform chip of new queue */
 	void *wq_base;		/* pci_alloc:virtual addr for tx */
 	dma_addr_t wq_base_dma;	/* pci_alloc:dma addr for tx */
-	u32 *cnsmr_idx_sh_reg;	/* shadow copy of consumer idx */
+	__le32 *cnsmr_idx_sh_reg;	/* shadow copy of consumer idx */
 	dma_addr_t cnsmr_idx_sh_reg_dma;	/* dma-shadow copy of consumer */
 	u32 wq_size;		/* size in bytes of queue area */
 	u32 wq_len;		/* number of entries in queue */
@@ -1181,7 +1174,7 @@
 	u32 cq_size;
 	u32 cq_len;
 	u16 cq_id;
-	volatile __le32 *prod_idx_sh_reg;	/* Shadowed producer register. */
+	__le32 *prod_idx_sh_reg;	/* Shadowed producer register. */
 	dma_addr_t prod_idx_sh_reg_dma;
 	void __iomem *cnsmr_idx_db_reg;	/* PCI doorbell mem area + 0 */
 	u32 cnsmr_idx;		/* current sw idx */
@@ -1402,9 +1395,11 @@
 	int rx_ring_count;
 	int ring_mem_size;
 	void *ring_mem;
-	struct rx_ring *rx_ring;
+
+	struct rx_ring rx_ring[MAX_RX_RINGS];
+	struct tx_ring tx_ring[MAX_TX_RINGS];
+
 	int rx_csum;
-	struct tx_ring *tx_ring;
 	u32 default_rx_queue;
 
 	u16 rx_coalesce_usecs;	/* cqicb->int_delay */
@@ -1459,6 +1454,24 @@
 	mmiowb();
 }
 
+/*
+ * Shadow Registers:
+ * Outbound queues have a consumer index that is maintained by the chip.
+ * Inbound queues have a producer index that is maintained by the chip.
+ * For lower overhead, these registers are "shadowed" to host memory
+ * which allows the device driver to track the queue progress without
+ * PCI reads. When an entry is placed on an inbound queue, the chip will
+ * update the relevant index register and then copy the value to the
+ * shadow register in host memory.
+ */
+static inline u32 ql_read_sh_reg(__le32  *addr)
+{
+	u32 reg;
+	reg =  le32_to_cpu(*addr);
+	rmb();
+	return reg;
+}
+
 extern char qlge_driver_name[];
 extern const char qlge_driver_version[];
 extern const struct ethtool_ops qlge_ethtool_ops;
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 3f5e02d..379b895 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -435,14 +435,10 @@
 	printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
 	       le16_to_cpu(wqicb->cq_id_rss));
 	printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
-	printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n",
-	       le32_to_cpu(wqicb->addr_lo));
-	printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n",
-	       le32_to_cpu(wqicb->addr_hi));
-	printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n",
-	       le32_to_cpu(wqicb->cnsmr_idx_addr_lo));
-	printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n",
-	       le32_to_cpu(wqicb->cnsmr_idx_addr_hi));
+	printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n",
+	       (unsigned long long) le64_to_cpu(wqicb->addr));
+	printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n",
+	       (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
 }
 
 void ql_dump_tx_ring(struct tx_ring *tx_ring)
@@ -455,10 +451,11 @@
 	printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
 	printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
 	       (unsigned long long) tx_ring->wq_base_dma);
-	printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n",
-	       tx_ring->cnsmr_idx_sh_reg);
-	printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n",
-	       (unsigned long long) tx_ring->cnsmr_idx_sh_reg_dma);
+	printk(KERN_ERR PFX
+	       "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n",
+	       tx_ring->cnsmr_idx_sh_reg,
+	       tx_ring->cnsmr_idx_sh_reg
+			? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
 	printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
 	printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
 	printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
@@ -510,30 +507,22 @@
 	printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
 	printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
 	printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
-	printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n",
-	       le32_to_cpu(cqicb->addr_lo));
-	printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n",
-	       le32_to_cpu(cqicb->addr_hi));
-	printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n",
-	       le32_to_cpu(cqicb->prod_idx_addr_lo));
-	printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n",
-	       le32_to_cpu(cqicb->prod_idx_addr_hi));
+	printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n",
+	       (unsigned long long) le64_to_cpu(cqicb->addr));
+	printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n",
+	       (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
 	printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
 	       le16_to_cpu(cqicb->pkt_delay));
 	printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
 	       le16_to_cpu(cqicb->irq_delay));
-	printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n",
-	       le32_to_cpu(cqicb->lbq_addr_lo));
-	printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n",
-	       le32_to_cpu(cqicb->lbq_addr_hi));
+	printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n",
+	       (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
 	printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
 	       le16_to_cpu(cqicb->lbq_buf_size));
 	printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
 	       le16_to_cpu(cqicb->lbq_len));
-	printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n",
-	       le32_to_cpu(cqicb->sbq_addr_lo));
-	printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n",
-	       le32_to_cpu(cqicb->sbq_addr_hi));
+	printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n",
+	       (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
 	printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
 	       le16_to_cpu(cqicb->sbq_buf_size));
 	printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
@@ -558,9 +547,10 @@
 	printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
 	printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
 	printk(KERN_ERR PFX
-	       "rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n",
+	       "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n",
 	       rx_ring->prod_idx_sh_reg,
-	       rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0);
+	       rx_ring->prod_idx_sh_reg
+			? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
 	printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
 	       (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
 	printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
@@ -809,10 +799,8 @@
 
 	printk(KERN_ERR PFX "data_len	= %d\n",
 	       le32_to_cpu(ib_mac_rsp->data_len));
-	printk(KERN_ERR PFX "data_addr_hi    = 0x%x\n",
-	       le32_to_cpu(ib_mac_rsp->data_addr_hi));
-	printk(KERN_ERR PFX "data_addr_lo    = 0x%x\n",
-	       le32_to_cpu(ib_mac_rsp->data_addr_lo));
+	printk(KERN_ERR PFX "data_addr    = 0x%llx\n",
+	       (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
 		printk(KERN_ERR PFX "rss    = %x\n",
 		       le32_to_cpu(ib_mac_rsp->rss));
@@ -828,10 +816,8 @@
 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
 		printk(KERN_ERR PFX "hdr length	= %d.\n",
 		       le32_to_cpu(ib_mac_rsp->hdr_len));
-		printk(KERN_ERR PFX "hdr addr_hi    = 0x%x.\n",
-		       le32_to_cpu(ib_mac_rsp->hdr_addr_hi));
-		printk(KERN_ERR PFX "hdr addr_lo    = 0x%x.\n",
-		       le32_to_cpu(ib_mac_rsp->hdr_addr_lo));
+		printk(KERN_ERR PFX "hdr addr    = 0x%llx.\n",
+		       (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
 	}
 }
 #endif
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index f4c0160..45421c8 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -76,7 +76,6 @@
 
 static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
-	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
 	/* required last entry */
 	{0,}
 };
@@ -127,12 +126,12 @@
 
 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
 {
-	unsigned int seconds = 3;
+	unsigned int wait_count = 30;
 	do {
 		if (!ql_sem_trylock(qdev, sem_mask))
 			return 0;
-		ssleep(1);
-	} while (--seconds);
+		udelay(100);
+	} while (--wait_count);
 	return -ETIMEDOUT;
 }
 
@@ -1545,7 +1544,7 @@
 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
 {
 	struct ql_adapter *qdev = rx_ring->qdev;
-	u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
+	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
 	struct ob_mac_iocb_rsp *net_rsp = NULL;
 	int count = 0;
 
@@ -1571,7 +1570,7 @@
 		}
 		count++;
 		ql_update_cq(rx_ring);
-		prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
+		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
 	}
 	ql_write_cq_idx(rx_ring);
 	if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
@@ -1591,7 +1590,7 @@
 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
 {
 	struct ql_adapter *qdev = rx_ring->qdev;
-	u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
+	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
 	struct ql_net_rsp_iocb *net_rsp;
 	int count = 0;
 
@@ -1624,7 +1623,7 @@
 		}
 		count++;
 		ql_update_cq(rx_ring);
-		prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg);
+		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
 		if (count == budget)
 			break;
 	}
@@ -1787,7 +1786,7 @@
 	 * Check the default queue and wake handler if active.
 	 */
 	rx_ring = &qdev->rx_ring[0];
-	if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
+	if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
 		QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
 		ql_disable_completion_interrupt(qdev, intr_context->intr);
 		queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
@@ -1801,7 +1800,7 @@
 		 */
 		for (i = 1; i < qdev->rx_ring_count; i++) {
 			rx_ring = &qdev->rx_ring[i];
-			if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) !=
+			if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
 			    rx_ring->cnsmr_idx) {
 				QPRINTK(qdev, INTR, INFO,
 					"Waking handler for rx_ring[%d].\n", i);
@@ -2356,28 +2355,6 @@
 	}
 }
 
-static void ql_free_ring_cb(struct ql_adapter *qdev)
-{
-	kfree(qdev->ring_mem);
-}
-
-static int ql_alloc_ring_cb(struct ql_adapter *qdev)
-{
-	/* Allocate space for tx/rx ring control blocks. */
-	qdev->ring_mem_size =
-	    (qdev->tx_ring_count * sizeof(struct tx_ring)) +
-	    (qdev->rx_ring_count * sizeof(struct rx_ring));
-	qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
-	if (qdev->ring_mem == NULL) {
-		return -ENOMEM;
-	} else {
-		qdev->rx_ring = qdev->ring_mem;
-		qdev->tx_ring = qdev->ring_mem +
-		    (qdev->rx_ring_count * sizeof(struct rx_ring));
-	}
-	return 0;
-}
-
 static void ql_free_mem_resources(struct ql_adapter *qdev)
 {
 	int i;
@@ -2467,12 +2444,9 @@
 	bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
 	cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
 
-	cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
-	cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
+	cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
 
-	cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma);
-	cqicb->prod_idx_addr_hi =
-	    cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
+	cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
 
 	/*
 	 * Set up the control block load flags.
@@ -2483,10 +2457,8 @@
 	if (rx_ring->lbq_len) {
 		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
 		*((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
-		cqicb->lbq_addr_lo =
-		    cpu_to_le32(rx_ring->lbq_base_indirect_dma);
-		cqicb->lbq_addr_hi =
-		    cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
+		cqicb->lbq_addr =
+		    cpu_to_le64(rx_ring->lbq_base_indirect_dma);
 		bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
 			(u16) rx_ring->lbq_buf_size;
 		cqicb->lbq_buf_size = cpu_to_le16(bq_len);
@@ -2501,10 +2473,8 @@
 	if (rx_ring->sbq_len) {
 		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
 		*((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
-		cqicb->sbq_addr_lo =
-		    cpu_to_le32(rx_ring->sbq_base_indirect_dma);
-		cqicb->sbq_addr_hi =
-		    cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
+		cqicb->sbq_addr =
+		    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
 		cqicb->sbq_buf_size =
 		    cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
 		bq_len = (rx_ring->sbq_len == 65536) ? 0 :
@@ -2611,12 +2581,9 @@
 				   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
 	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
 	wqicb->rid = 0;
-	wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma);
-	wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
+	wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
 
-	wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma);
-	wqicb->cnsmr_idx_addr_hi =
-	    cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
+	wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
 
 	ql_init_tx_ring(qdev, tx_ring);
 
@@ -2746,14 +2713,14 @@
 				 * Outbound queue is for outbound completions only.
 				 */
 				intr_context->handler = qlge_msix_tx_isr;
-				sprintf(intr_context->name, "%s-txq-%d",
+				sprintf(intr_context->name, "%s-tx-%d",
 					qdev->ndev->name, i);
 			} else {
 				/*
 				 * Inbound queues handle unicast frames only.
 				 */
 				intr_context->handler = qlge_msix_rx_isr;
-				sprintf(intr_context->name, "%s-rxq-%d",
+				sprintf(intr_context->name, "%s-rx-%d",
 					qdev->ndev->name, i);
 			}
 		}
@@ -3247,7 +3214,6 @@
 		msleep(1);
 	ql_adapter_down(qdev);
 	ql_release_adapter_resources(qdev);
-	ql_free_ring_cb(qdev);
 	return 0;
 }
 
@@ -3273,8 +3239,8 @@
 	 * This limitation can be removed when requested.
 	 */
 
-	if (cpu_cnt > 8)
-		cpu_cnt = 8;
+	if (cpu_cnt > MAX_CPUS)
+		cpu_cnt = MAX_CPUS;
 
 	/*
 	 * rx_ring[0] is always the default queue.
@@ -3294,9 +3260,6 @@
 	 */
 	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
 
-	if (ql_alloc_ring_cb(qdev))
-		return -ENOMEM;
-
 	for (i = 0; i < qdev->tx_ring_count; i++) {
 		tx_ring = &qdev->tx_ring[i];
 		memset((void *)tx_ring, 0, sizeof(tx_ring));
@@ -3393,7 +3356,6 @@
 
 error_up:
 	ql_release_adapter_resources(qdev);
-	ql_free_ring_cb(qdev);
 	return err;
 }
 
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 53bbddf..72fd9e9 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
 #include <asm/processor.h>
 
 #define DRV_NAME	"r6040"
-#define DRV_VERSION	"0.19"
-#define DRV_RELDATE	"18Dec2008"
+#define DRV_VERSION	"0.21"
+#define DRV_RELDATE	"09Jan2009"
 
 /* PHY CHIP Address */
 #define PHY1_ADDR	1	/* For MAC1 */
@@ -200,7 +200,7 @@
 
 static char version[] __devinitdata = KERN_INFO DRV_NAME
 	": RDC R6040 NAPI net driver,"
-	"version "DRV_VERSION " (" DRV_RELDATE ")\n";
+	"version "DRV_VERSION " (" DRV_RELDATE ")";
 
 static int phy_table[] = { PHY1_ADDR, PHY2_ADDR };
 
@@ -330,7 +330,7 @@
 	do {
 		skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
 		if (!skb) {
-			printk(KERN_ERR "%s: failed to alloc skb for rx\n", dev->name);
+			printk(KERN_ERR DRV_NAME "%s: failed to alloc skb for rx\n", dev->name);
 			rc = -ENOMEM;
 			goto err_exit;
 		}
@@ -457,22 +457,12 @@
 	iowrite16(adrp[0], ioaddr + MID_0L);
 	iowrite16(adrp[1], ioaddr + MID_0M);
 	iowrite16(adrp[2], ioaddr + MID_0H);
-	free_irq(dev->irq, dev);
-
-	/* Free RX buffer */
-	r6040_free_rxbufs(dev);
-
-	/* Free TX buffer */
-	r6040_free_txbufs(dev);
-
-	/* Free Descriptor memory */
-	pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
-	pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
 }
 
 static int r6040_close(struct net_device *dev)
 {
 	struct r6040_private *lp = netdev_priv(dev);
+	struct pci_dev *pdev = lp->pdev;
 
 	/* deleted timer */
 	del_timer_sync(&lp->timer);
@@ -481,8 +471,28 @@
 	napi_disable(&lp->napi);
 	netif_stop_queue(dev);
 	r6040_down(dev);
+
+	free_irq(dev->irq, dev);
+
+	/* Free RX buffer */
+	r6040_free_rxbufs(dev);
+
+	/* Free TX buffer */
+	r6040_free_txbufs(dev);
+
 	spin_unlock_irq(&lp->lock);
 
+	/* Free Descriptor memory */
+	if (lp->rx_ring) {
+		pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
+		lp->rx_ring = 0;
+	}
+
+	if (lp->tx_ring) {
+		pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
+		lp->tx_ring = 0;
+	}
+
 	return 0;
 }
 
@@ -1049,6 +1059,7 @@
 	.ndo_set_multicast_list = r6040_multicast_list,
 	.ndo_change_mtu		= eth_change_mtu,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_do_ioctl		= r6040_ioctl,
 	.ndo_tx_timeout		= r6040_tx_timeout,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1077,20 +1088,20 @@
 	/* this should always be supported */
 	err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
 	if (err) {
-		printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses"
+		printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
 				"not supported by the card\n");
 		goto err_out;
 	}
 	err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
 	if (err) {
-		printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses"
+		printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
 				"not supported by the card\n");
 		goto err_out;
 	}
 
 	/* IO Size check */
 	if (pci_resource_len(pdev, 0) < io_size) {
-		printk(KERN_ERR DRV_NAME "Insufficient PCI resources, aborting\n");
+		printk(KERN_ERR DRV_NAME ": Insufficient PCI resources, aborting\n");
 		err = -EIO;
 		goto err_out;
 	}
@@ -1100,7 +1111,7 @@
 
 	dev = alloc_etherdev(sizeof(struct r6040_private));
 	if (!dev) {
-		printk(KERN_ERR DRV_NAME "Failed to allocate etherdev\n");
+		printk(KERN_ERR DRV_NAME ": Failed to allocate etherdev\n");
 		err = -ENOMEM;
 		goto err_out;
 	}
@@ -1116,11 +1127,15 @@
 
 	ioaddr = pci_iomap(pdev, bar, io_size);
 	if (!ioaddr) {
-		printk(KERN_ERR "ioremap failed for device %s\n",
+		printk(KERN_ERR DRV_NAME ": ioremap failed for device %s\n",
 			pci_name(pdev));
 		err = -EIO;
 		goto err_out_free_res;
 	}
+	/* If PHY status change register is still set to zero it means the
+	 * bootloader didn't initialize it */
+	if (ioread16(ioaddr + PHY_CC) == 0)
+		iowrite16(0x9f07, ioaddr + PHY_CC);
 
 	/* Init system & device */
 	lp->base = ioaddr;
@@ -1137,6 +1152,13 @@
 	adrp[1] = ioread16(ioaddr + MID_0M);
 	adrp[2] = ioread16(ioaddr + MID_0H);
 
+	/* Some bootloader/BIOSes do not initialize
+	 * MAC address, warn about that */
+	if (!(adrp[0] || adrp[1] || adrp[2])) {
+		printk(KERN_WARNING DRV_NAME ": MAC address not initialized, generating random\n");
+		random_ether_addr(dev->dev_addr);
+	}
+
 	/* Link new device into r6040_root_dev */
 	lp->pdev = pdev;
 	lp->dev = dev;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index be30253..fc0e38b 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -134,6 +134,16 @@
 };
 MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids);
 
+static const struct net_device_ops sb1000_netdev_ops = {
+	.ndo_open		= sb1000_open,
+	.ndo_start_xmit		= sb1000_start_xmit,
+	.ndo_do_ioctl		= sb1000_dev_ioctl,
+	.ndo_stop		= sb1000_close,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 static int
 sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
 {
@@ -192,11 +202,7 @@
 	if (sb1000_debug > 0)
 		printk(KERN_NOTICE "%s", version);
 
-	/* The SB1000-specific entries in the device structure. */
-	dev->open		= sb1000_open;
-	dev->do_ioctl		= sb1000_dev_ioctl;
-	dev->hard_start_xmit	= sb1000_start_xmit;
-	dev->stop		= sb1000_close;
+	dev->netdev_ops	= &sb1000_netdev_ops;
 
 	/* hardware address is 0:0:serial_number */
 	dev->dev_addr[2]	= serial_number >> 24 & 0xff;
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 42fd312..8b75bef 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1408,6 +1408,7 @@
 	.ndo_set_multicast_list	= sc92031_set_multicast_list,
 	.ndo_change_mtu		= eth_change_mtu,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_tx_timeout		= sc92031_tx_timeout,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= sc92031_poll_controller,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 6884dc8..5b9f2d9 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1403,9 +1403,9 @@
 	}
 
 	/* Disable both devices */
-	pci_disable_device(efx->pci_dev);
+	pci_clear_master(efx->pci_dev);
 	if (FALCON_IS_DUAL_FUNC(efx))
-		pci_disable_device(nic_data->pci_dev2);
+		pci_clear_master(nic_data->pci_dev2);
 	falcon_disable_interrupts(efx);
 
 	if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index b976876..9ecb77d 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -636,10 +636,11 @@
 {
 	int reg;
 
-	if (efx->phy_type == PHY_TYPE_SFT9001B) {
+	if (efx->phy_type == PHY_TYPE_SFT9001B)
 		device_remove_file(&efx->pci_dev->dev,
 				   &dev_attr_phy_short_reach);
-	} else {
+
+	if (efx->phy_type == PHY_TYPE_SFX7101) {
 		/* Power down the LNPGA */
 		reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
 		mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 83cc3c5..a973268 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1782,6 +1782,21 @@
 		generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
 }
 
+static const struct net_device_ops sis190_netdev_ops = {
+	.ndo_open		= sis190_open,
+	.ndo_stop		= sis190_close,
+	.ndo_do_ioctl		= sis190_ioctl,
+	.ndo_start_xmit		= sis190_start_xmit,
+	.ndo_tx_timeout		= sis190_tx_timeout,
+	.ndo_set_multicast_list = sis190_set_rx_mode,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	 = sis190_netpoll,
+#endif
+};
+
 static int __devinit sis190_init_one(struct pci_dev *pdev,
 				     const struct pci_device_id *ent)
 {
@@ -1815,19 +1830,12 @@
 
 	INIT_WORK(&tp->phy_task, sis190_phy_task);
 
-	dev->open = sis190_open;
-	dev->stop = sis190_close;
-	dev->do_ioctl = sis190_ioctl;
-	dev->tx_timeout = sis190_tx_timeout;
-	dev->watchdog_timeo = SIS190_TX_TIMEOUT;
-	dev->hard_start_xmit = sis190_start_xmit;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = sis190_netpoll;
-#endif
-	dev->set_multicast_list = sis190_set_rx_mode;
+	dev->netdev_ops = &sis190_netdev_ops;
+
 	SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
 	dev->irq = pdev->irq;
 	dev->base_addr = (unsigned long) 0xdead;
+	dev->watchdog_timeo = SIS190_TX_TIMEOUT;
 
 	spin_lock_init(&tp->lock);
 
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 4acd41a..be4465bc 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -389,6 +389,7 @@
 	.ndo_set_multicast_list	= set_rx_mode,
 	.ndo_change_mtu		= eth_change_mtu,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_do_ioctl		= mii_ioctl,
 	.ndo_tx_timeout		= sis900_tx_timeout,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -508,10 +509,10 @@
 	else
 		ret = sis900_get_mac_addr(pci_dev, net_dev);
 
-	if (ret == 0) {
-		printk(KERN_WARNING "%s: Cannot read MAC address.\n", dev_name);
-		ret = -ENODEV;
-		goto err_unmap_rx;
+	if (!ret || !is_valid_ether_addr(net_dev->dev_addr)) {
+		random_ether_addr(net_dev->dev_addr);
+		printk(KERN_WARNING "%s: Unreadable or invalid MAC address,"
+				"using random generated one\n", dev_name);
 	}
 
 	/* 630ET : set the mii access mode as software-mode */
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 8e1c0ba..5c61d5f 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -603,7 +603,6 @@
 	dev->mtu		= sl->mtu;
 	dev->type		= ARPHRD_SLIP + sl->mode;
 #ifdef SL_CHECK_TRANSMIT
-	dev->tx_timeout		= sl_tx_timeout;
 	dev->watchdog_timeo	= 20*HZ;
 #endif
 	return 0;
@@ -617,19 +616,26 @@
 	sl_free_bufs(sl);
 }
 
+static const struct net_device_ops sl_netdev_ops = {
+	.ndo_init		= sl_init,
+	.ndo_uninit	  	= sl_uninit,
+	.ndo_open		= sl_open,
+	.ndo_stop		= sl_close,
+	.ndo_start_xmit		= sl_xmit,
+	.ndo_get_stats	        = sl_get_stats,
+	.ndo_change_mtu		= sl_change_mtu,
+	.ndo_tx_timeout		= sl_tx_timeout,
+#ifdef CONFIG_SLIP_SMART
+	.ndo_do_ioctl		= sl_ioctl,
+#endif
+};
+
+
 static void sl_setup(struct net_device *dev)
 {
-	dev->init		= sl_init;
-	dev->uninit	  	= sl_uninit;
-	dev->open		= sl_open;
+	dev->netdev_ops		= &sl_netdev_ops;
 	dev->destructor		= free_netdev;
-	dev->stop		= sl_close;
-	dev->get_stats	        = sl_get_stats;
-	dev->change_mtu		= sl_change_mtu;
-	dev->hard_start_xmit	= sl_xmit;
-#ifdef CONFIG_SLIP_SMART
-	dev->do_ioctl		= sl_ioctl;
-#endif
+
 	dev->hard_header_len	= 0;
 	dev->addr_len		= 0;
 	dev->tx_queue_len	= 10;
diff --git a/drivers/net/slip.h b/drivers/net/slip.h
index 853e0f6..9ea5c11 100644
--- a/drivers/net/slip.h
+++ b/drivers/net/slip.h
@@ -75,7 +75,7 @@
   unsigned long         tx_errors;      /* Planned stuff                */
   unsigned long         rx_dropped;     /* No memory for skb            */
   unsigned long         tx_dropped;     /* When MTU change              */
-  unsigned long         rx_over_errors; /* Frame bigger then SLIP buf.  */
+  unsigned long         rx_over_errors; /* Frame bigger than SLIP buf.  */
 #ifdef SL_INCLUDE_CSLIP
   unsigned long		tx_compressed;
   unsigned long		rx_compressed;
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index 404b80e..8d36d40 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -192,6 +192,7 @@
 	.ndo_get_stats		= ei_get_stats,
 	.ndo_set_multicast_list = ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller 	= ei_poll,
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index b386608..2033fee 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -196,6 +196,7 @@
 	.ndo_get_stats		= ei_get_stats,
 	.ndo_set_multicast_list = ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller 	= ei_poll,
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index dc3f110..f513bdf 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -144,6 +144,7 @@
 	}
 
 	BUG();
+	return 0;
 }
 
 static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
@@ -1740,6 +1741,7 @@
 	.ndo_set_multicast_list	= smsc911x_set_multicast_list,
 	.ndo_do_ioctl		= smsc911x_do_ioctl,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= smsc911x_poll_controller,
 #endif
@@ -1967,7 +1969,7 @@
 	smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
 
 	retval = request_irq(dev->irq, smsc911x_irqhandler, IRQF_DISABLED,
-			     SMSC_CHIPNAME, dev);
+			     dev->name, dev);
 	if (retval) {
 		SMSC_WARNING(PROBE,
 			"Unable to claim requested irq: %d", dev->irq);
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 27e017d..c14a4c6 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1551,6 +1551,7 @@
 	.ndo_set_multicast_list	= smsc9420_set_multicast_list,
 	.ndo_do_ioctl		= smsc9420_do_ioctl,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= smsc9420_poll_controller,
 #endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 57fb1f7..da3a76b 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -648,6 +648,24 @@
 #endif /* VLAN_SUPPORT */
 
 
+static const struct net_device_ops netdev_ops = {
+	.ndo_open		= netdev_open,
+	.ndo_stop		= netdev_close,
+	.ndo_start_xmit		= start_tx,
+	.ndo_tx_timeout 	= tx_timeout,
+	.ndo_get_stats 		= get_stats,
+	.ndo_set_multicast_list = &set_rx_mode,
+	.ndo_do_ioctl 		= netdev_ioctl,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+#ifdef VLAN_SUPPORT
+	.ndo_vlan_rx_register	= netdev_vlan_rx_register,
+	.ndo_vlan_rx_add_vid	= netdev_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= netdev_vlan_rx_kill_vid,
+#endif
+};
+
 static int __devinit starfire_init_one(struct pci_dev *pdev,
 				       const struct pci_device_id *ent)
 {
@@ -710,11 +728,9 @@
 	if (enable_hw_cksum)
 		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
 #endif /* ZEROCOPY */
+
 #ifdef VLAN_SUPPORT
 	dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
-	dev->vlan_rx_register = netdev_vlan_rx_register;
-	dev->vlan_rx_add_vid = netdev_vlan_rx_add_vid;
-	dev->vlan_rx_kill_vid = netdev_vlan_rx_kill_vid;
 #endif /* VLAN_RX_KILL_VID */
 #ifdef ADDR_64BITS
 	dev->features |= NETIF_F_HIGHDMA;
@@ -810,18 +826,12 @@
 		}
 	}
 
-	/* The chip-specific entries in the device structure. */
-	dev->open = &netdev_open;
-	dev->hard_start_xmit = &start_tx;
-	dev->tx_timeout = tx_timeout;
+	dev->netdev_ops = &netdev_ops;
 	dev->watchdog_timeo = TX_TIMEOUT;
-	netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
-	dev->stop = &netdev_close;
-	dev->get_stats = &get_stats;
-	dev->set_multicast_list = &set_rx_mode;
-	dev->do_ioctl = &netdev_ioctl;
 	SET_ETHTOOL_OPS(dev, &ethtool_ops);
 
+	netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
+
 	if (mtu)
 		dev->mtu = mtu;
 
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 698893b..feaf0e0 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -449,6 +449,19 @@
 	}
 }
 
+static const struct net_device_ops netdev_ops = {
+	.ndo_open		= netdev_open,
+	.ndo_stop		= netdev_close,
+	.ndo_start_xmit		= start_tx,
+	.ndo_get_stats 		= get_stats,
+	.ndo_set_multicast_list = set_rx_mode,
+	.ndo_do_ioctl 		= netdev_ioctl,
+	.ndo_tx_timeout		= tx_timeout,
+	.ndo_change_mtu		= change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 static int __devinit sundance_probe1 (struct pci_dev *pdev,
 				      const struct pci_device_id *ent)
 {
@@ -530,16 +543,10 @@
 	np->mii_if.reg_num_mask = 0x1f;
 
 	/* The chip-specific entries in the device structure. */
-	dev->open = &netdev_open;
-	dev->hard_start_xmit = &start_tx;
-	dev->stop = &netdev_close;
-	dev->get_stats = &get_stats;
-	dev->set_multicast_list = &set_rx_mode;
-	dev->do_ioctl = &netdev_ioctl;
+	dev->netdev_ops = &netdev_ops;
 	SET_ETHTOOL_OPS(dev, &ethtool_ops);
-	dev->tx_timeout = &tx_timeout;
 	dev->watchdog_timeo = TX_TIMEOUT;
-	dev->change_mtu = &change_mtu;
+
 	pci_set_drvdata(pdev, dev);
 
 	i = register_netdev(dev);
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 8a74604..86c765d 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2989,6 +2989,19 @@
 	}
 }
 
+static const struct net_device_ops gem_netdev_ops = {
+	.ndo_open		= gem_open,
+	.ndo_stop		= gem_close,
+	.ndo_start_xmit		= gem_start_xmit,
+	.ndo_get_stats		= gem_get_stats,
+	.ndo_set_multicast_list = gem_set_multicast,
+	.ndo_do_ioctl		= gem_ioctl,
+	.ndo_tx_timeout		= gem_tx_timeout,
+	.ndo_change_mtu		= gem_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 static int __devinit gem_init_one(struct pci_dev *pdev,
 				  const struct pci_device_id *ent)
 {
@@ -3142,17 +3155,10 @@
 	if (gem_get_device_address(gp))
 		goto err_out_free_consistent;
 
-	dev->open = gem_open;
-	dev->stop = gem_close;
-	dev->hard_start_xmit = gem_start_xmit;
-	dev->get_stats = gem_get_stats;
-	dev->set_multicast_list = gem_set_multicast;
-	dev->do_ioctl = gem_ioctl;
+	dev->netdev_ops = &gem_netdev_ops;
 	netif_napi_add(dev, &gp->napi, gem_poll, 64);
 	dev->ethtool_ops = &gem_ethtool_ops;
-	dev->tx_timeout = gem_tx_timeout;
 	dev->watchdog_timeo = 5 * HZ;
-	dev->change_mtu = gem_change_mtu;
 	dev->irq = pdev->irq;
 	dev->dma = 0;
 	dev->set_mac_address = gem_set_mac_address;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index b22d335..7a72a31 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2607,6 +2607,18 @@
 }
 #endif /* CONFIG_PCI */
 
+static const struct net_device_ops hme_netdev_ops = {
+	.ndo_open		= happy_meal_open,
+	.ndo_stop		= happy_meal_close,
+	.ndo_start_xmit		= happy_meal_start_xmit,
+	.ndo_tx_timeout		= happy_meal_tx_timeout,
+	.ndo_get_stats		= happy_meal_get_stats,
+	.ndo_set_multicast_list = happy_meal_set_multicast,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 #ifdef CONFIG_SBUS
 static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
 {
@@ -2750,12 +2762,7 @@
 	init_timer(&hp->happy_timer);
 
 	hp->dev = dev;
-	dev->open = &happy_meal_open;
-	dev->stop = &happy_meal_close;
-	dev->hard_start_xmit = &happy_meal_start_xmit;
-	dev->get_stats = &happy_meal_get_stats;
-	dev->set_multicast_list = &happy_meal_set_multicast;
-	dev->tx_timeout = &happy_meal_tx_timeout;
+	dev->netdev_ops = &hme_netdev_ops;
 	dev->watchdog_timeo = 5*HZ;
 	dev->ethtool_ops = &hme_ethtool_ops;
 
@@ -3076,12 +3083,7 @@
 	init_timer(&hp->happy_timer);
 
 	hp->dev = dev;
-	dev->open = &happy_meal_open;
-	dev->stop = &happy_meal_close;
-	dev->hard_start_xmit = &happy_meal_start_xmit;
-	dev->get_stats = &happy_meal_get_stats;
-	dev->set_multicast_list = &happy_meal_set_multicast;
-	dev->tx_timeout = &happy_meal_tx_timeout;
+	dev->netdev_ops = &hme_netdev_ops;
 	dev->watchdog_timeo = 5*HZ;
 	dev->ethtool_ops = &hme_ethtool_ops;
 	dev->irq = pdev->irq;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index 233f1cd..611230f 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -336,7 +336,7 @@
 	if (IS_ERR(desc))
 		return PTR_ERR(desc);
 
-	viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%lx:%lx]\n",
+	viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
 	       desc->hdr.state, desc->hdr.ack,
 	       desc->size, desc->ncookies,
 	       desc->cookies[0].cookie_addr,
@@ -394,14 +394,14 @@
 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
 	struct vio_driver_state *vio = &port->vio;
 
-	viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016lx] rcv_nxt[%016lx]\n",
+	viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
 	       pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
 
 	if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
 		return 0;
 	if (unlikely(pkt->seq != dr->rcv_nxt)) {
-		printk(KERN_ERR PFX "RX out of sequence seq[0x%lx] "
-		       "rcv_nxt[0x%lx]\n", pkt->seq, dr->rcv_nxt);
+		printk(KERN_ERR PFX "RX out of sequence seq[0x%llx] "
+		       "rcv_nxt[0x%llx]\n", pkt->seq, dr->rcv_nxt);
 		return 0;
 	}
 
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index a10a83a..a7a4dc4 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -1004,7 +1004,7 @@
  * skb for rx. It assumes that Rx is desabled in HW
  * funcs are grouped for better cache usage
  *
- * RxD fifo is smaller then RxF fifo by design. Upon high load, RxD will be
+ * RxD fifo is smaller than RxF fifo by design. Upon high load, RxD will be
  * filled and packets will be dropped by nic without getting into host or
  * cousing interrupt. Anyway, in that condition, host has no chance to proccess
  * all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles
@@ -1826,7 +1826,7 @@
  *
  * Pushes desc to TxD fifo and overlaps it if needed.
  * NOTE: this func does not check for available space. this is responsibility
- *    of the caller. Neither does it check that data size is smaller then
+ *    of the caller. Neither does it check that data size is smaller than
  *    fifo size.
  */
 static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 85ef8b7..68b967b 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -831,6 +831,21 @@
 }
 #endif
 
+static const struct net_device_ops TLan_netdev_ops = {
+	.ndo_open 		= TLan_Open,
+	.ndo_stop		= TLan_Close,
+	.ndo_start_xmit		= TLan_StartTx,
+	.ndo_tx_timeout		= TLan_tx_timeout,
+	.ndo_get_stats		= TLan_GetStats,
+	.ndo_set_multicast_list = TLan_SetMulticastList,
+	.ndo_do_ioctl		= TLan_ioctl,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	 = TLan_Poll,
+#endif
+};
 
 
 
@@ -892,16 +907,7 @@
 	netif_carrier_off(dev);
 
 	/* Device methods */
-	dev->open = &TLan_Open;
-	dev->hard_start_xmit = &TLan_StartTx;
-	dev->stop = &TLan_Close;
-	dev->get_stats = &TLan_GetStats;
-	dev->set_multicast_list = &TLan_SetMulticastList;
-	dev->do_ioctl = &TLan_ioctl;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = &TLan_Poll;
-#endif
-	dev->tx_timeout = &TLan_tx_timeout;
+	dev->netdev_ops = &TLan_netdev_ops;
 	dev->watchdog_timeo = TX_TIMEOUT;
 
 	return 0;
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index a011666..50eb29c 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -3064,7 +3064,7 @@
  * will consequently cause a timeout.
  *
  * NOTE 1: If the monitor_state is MS_BEACON_TEST_STATE, all transmit
- * queues other then the one used for the lobe_media_test should be
+ * queues other than the one used for the lobe_media_test should be
  * disabled.!?
  *
  * NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 5166be9..d5d53b6 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1922,6 +1922,18 @@
 	goto fill_defaults;
 }
 
+static const struct net_device_ops de_netdev_ops = {
+	.ndo_open		= de_open,
+	.ndo_stop		= de_close,
+	.ndo_set_multicast_list = de_set_rx_mode,
+	.ndo_start_xmit		= de_start_xmit,
+	.ndo_get_stats		= de_get_stats,
+	.ndo_tx_timeout 	= de_tx_timeout,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
 static int __devinit de_init_one (struct pci_dev *pdev,
 				  const struct pci_device_id *ent)
 {
@@ -1944,14 +1956,9 @@
 	if (!dev)
 		return -ENOMEM;
 
+	dev->netdev_ops = &de_netdev_ops;
 	SET_NETDEV_DEV(dev, &pdev->dev);
-	dev->open = de_open;
-	dev->stop = de_close;
-	dev->set_multicast_list = de_set_rx_mode;
-	dev->hard_start_xmit = de_start_xmit;
-	dev->get_stats = de_get_stats;
 	dev->ethtool_ops = &de_ethtool_ops;
-	dev->tx_timeout = de_tx_timeout;
 	dev->watchdog_timeo = TX_TIMEOUT;
 
 	de = netdev_priv(dev);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 67bfd6f..6418f74 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1077,6 +1077,18 @@
     mdelay(2);                           /* Wait for 2ms */\
 }
 
+static const struct net_device_ops de4x5_netdev_ops = {
+    .ndo_open		= de4x5_open,
+    .ndo_stop		= de4x5_close,
+    .ndo_start_xmit	= de4x5_queue_pkt,
+    .ndo_get_stats	= de4x5_get_stats,
+    .ndo_set_multicast_list = set_multicast_list,
+    .ndo_do_ioctl	= de4x5_ioctl,
+    .ndo_change_mtu	= eth_change_mtu,
+    .ndo_set_mac_address= eth_mac_addr,
+    .ndo_validate_addr	= eth_validate_addr,
+};
+
 
 static int __devinit
 de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
@@ -1258,13 +1270,7 @@
 
     /* The DE4X5-specific entries in the device structure. */
     SET_NETDEV_DEV(dev, gendev);
-    dev->open = &de4x5_open;
-    dev->hard_start_xmit = &de4x5_queue_pkt;
-    dev->stop = &de4x5_close;
-    dev->get_stats = &de4x5_get_stats;
-    dev->set_multicast_list = &set_multicast_list;
-    dev->do_ioctl = &de4x5_ioctl;
-
+    dev->netdev_ops = &de4x5_netdev_ops;
     dev->mem_start = 0;
 
     /* Fill in the generic fields of the device structure. */
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 28a5c51..2e5c999 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -257,9 +257,6 @@
 	u8 wol_mode;			/* user WOL settings */
 	struct timer_list timer;
 
-	/* System defined statistic counter */
-	struct net_device_stats stats;
-
 	/* Driver defined statistic counter */
 	unsigned long tx_fifo_underrun;
 	unsigned long tx_loss_carrier;
@@ -316,7 +313,6 @@
 static int dmfe_open(struct DEVICE *);
 static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
 static int dmfe_stop(struct DEVICE *);
-static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
 static void dmfe_set_filter_mode(struct DEVICE *);
 static const struct ethtool_ops netdev_ethtool_ops;
 static u16 read_srom_word(long ,int);
@@ -351,6 +347,19 @@
 
 /* DM910X network board routine ---------------------------- */
 
+static const struct net_device_ops netdev_ops = {
+	.ndo_open 		= dmfe_open,
+	.ndo_stop		= dmfe_stop,
+	.ndo_start_xmit		= dmfe_start_xmit,
+	.ndo_set_multicast_list = dmfe_set_filter_mode,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= poll_dmfe,
+#endif
+};
+
 /*
  *	Search DM910X board ,allocate space and register it
  */
@@ -442,14 +451,7 @@
 	dev->base_addr = db->ioaddr;
 	dev->irq = pdev->irq;
 	pci_set_drvdata(pdev, dev);
-	dev->open = &dmfe_open;
-	dev->hard_start_xmit = &dmfe_start_xmit;
-	dev->stop = &dmfe_stop;
-	dev->get_stats = &dmfe_get_stats;
-	dev->set_multicast_list = &dmfe_set_filter_mode;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = &poll_dmfe;
-#endif
+	dev->netdev_ops = &netdev_ops;
 	dev->ethtool_ops = &netdev_ethtool_ops;
 	netif_carrier_off(dev);
 	spin_lock_init(&db->lock);
@@ -867,15 +869,15 @@
 
 		/* A packet sent completed */
 		db->tx_packet_cnt--;
-		db->stats.tx_packets++;
+		dev->stats.tx_packets++;
 
 		/* Transmit statistic counter */
 		if ( tdes0 != 0x7fffffff ) {
 			/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
-			db->stats.collisions += (tdes0 >> 3) & 0xf;
-			db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
+			dev->stats.collisions += (tdes0 >> 3) & 0xf;
+			dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
 			if (tdes0 & TDES0_ERR_MASK) {
-				db->stats.tx_errors++;
+				dev->stats.tx_errors++;
 
 				if (tdes0 & 0x0002) {	/* UnderRun */
 					db->tx_fifo_underrun++;
@@ -969,13 +971,13 @@
 			if (rdes0 & 0x8000) {
 				/* This is a error packet */
 				//printk(DRV_NAME ": rdes0: %lx\n", rdes0);
-				db->stats.rx_errors++;
+				dev->stats.rx_errors++;
 				if (rdes0 & 1)
-					db->stats.rx_fifo_errors++;
+					dev->stats.rx_fifo_errors++;
 				if (rdes0 & 2)
-					db->stats.rx_crc_errors++;
+					dev->stats.rx_crc_errors++;
 				if (rdes0 & 0x80)
-					db->stats.rx_length_errors++;
+					dev->stats.rx_length_errors++;
 			}
 
 			if ( !(rdes0 & 0x8000) ||
@@ -1008,8 +1010,8 @@
 
 					skb->protocol = eth_type_trans(skb, dev);
 					netif_rx(skb);
-					db->stats.rx_packets++;
-					db->stats.rx_bytes += rxlen;
+					dev->stats.rx_packets++;
+					dev->stats.rx_bytes += rxlen;
 				}
 			} else {
 				/* Reuse SKB buffer when the packet is error */
@@ -1024,20 +1026,6 @@
 	db->rx_ready_ptr = rxptr;
 }
 
-
-/*
- *	Get statistics from driver.
- */
-
-static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
-{
-	struct dmfe_board_info *db = netdev_priv(dev);
-
-	DMFE_DBUG(0, "dmfe_get_stats", 0);
-	return &db->stats;
-}
-
-
 /*
  * Set DM910X multicast address
  */
@@ -1161,7 +1149,7 @@
 
 	/* Operating Mode Check */
 	if ( (db->dm910x_chk_mode & 0x1) &&
-		(db->stats.rx_packets > MAX_CHECK_PACKET) )
+		(dev->stats.rx_packets > MAX_CHECK_PACKET) )
 		db->dm910x_chk_mode = 0x4;
 
 	/* Dynamic reset DM910X : system error or transmit time-out */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index ff84bab..bee75fa 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1225,6 +1225,22 @@
 	return 0;
 }
 
+static const struct net_device_ops tulip_netdev_ops = {
+	.ndo_open		= tulip_open,
+	.ndo_start_xmit		= tulip_start_xmit,
+	.ndo_tx_timeout		= tulip_tx_timeout,
+	.ndo_stop		= tulip_close,
+	.ndo_get_stats		= tulip_get_stats,
+	.ndo_do_ioctl 		= private_ioctl,
+	.ndo_set_multicast_list = set_rx_mode,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	 = poll_tulip,
+#endif
+};
+
 static int __devinit tulip_init_one (struct pci_dev *pdev,
 				     const struct pci_device_id *ent)
 {
@@ -1601,20 +1617,11 @@
 	}
 
 	/* The Tulip-specific entries in the device structure. */
-	dev->open = tulip_open;
-	dev->hard_start_xmit = tulip_start_xmit;
-	dev->tx_timeout = tulip_tx_timeout;
+	dev->netdev_ops = &tulip_netdev_ops;
 	dev->watchdog_timeo = TX_TIMEOUT;
 #ifdef CONFIG_TULIP_NAPI
 	netif_napi_add(dev, &tp->napi, tulip_poll, 16);
 #endif
-	dev->stop = tulip_close;
-	dev->get_stats = tulip_get_stats;
-	dev->do_ioctl = private_ioctl;
-	dev->set_multicast_list = set_rx_mode;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = &poll_tulip;
-#endif
 	SET_ETHTOOL_OPS(dev, &ops);
 
 	if (register_netdev(dev))
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 00cbc52..030e02e 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -168,9 +168,6 @@
 	u8 wait_reset;			/* Hardware failed, need to reset */
 	struct timer_list timer;
 
-	/* System defined statistic counter */
-	struct net_device_stats stats;
-
 	/* Driver defined statistic counter */
 	unsigned long tx_fifo_underrun;
 	unsigned long tx_loss_carrier;
@@ -220,7 +217,6 @@
 static int uli526x_open(struct net_device *);
 static int uli526x_start_xmit(struct sk_buff *, struct net_device *);
 static int uli526x_stop(struct net_device *);
-static struct net_device_stats * uli526x_get_stats(struct net_device *);
 static void uli526x_set_filter_mode(struct net_device *);
 static const struct ethtool_ops netdev_ethtool_ops;
 static u16 read_srom_word(long, int);
@@ -251,6 +247,19 @@
 
 /* ULI526X network board routine ---------------------------- */
 
+static const struct net_device_ops netdev_ops = {
+	.ndo_open		= uli526x_open,
+	.ndo_stop		= uli526x_stop,
+	.ndo_start_xmit		= uli526x_start_xmit,
+	.ndo_set_multicast_list = uli526x_set_filter_mode,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller 	= uli526x_poll,
+#endif
+};
+
 /*
  *	Search ULI526X board, allocate space and register it
  */
@@ -335,15 +344,9 @@
 	pci_set_drvdata(pdev, dev);
 
 	/* Register some necessary functions */
-	dev->open = &uli526x_open;
-	dev->hard_start_xmit = &uli526x_start_xmit;
-	dev->stop = &uli526x_stop;
-	dev->get_stats = &uli526x_get_stats;
-	dev->set_multicast_list = &uli526x_set_filter_mode;
+	dev->netdev_ops = &netdev_ops;
 	dev->ethtool_ops = &netdev_ethtool_ops;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = &uli526x_poll;
-#endif
+
 	spin_lock_init(&db->lock);
 
 
@@ -733,7 +736,8 @@
  *	Free TX resource after TX complete
  */
 
-static void uli526x_free_tx_pkt(struct net_device *dev, struct uli526x_board_info * db)
+static void uli526x_free_tx_pkt(struct net_device *dev,
+				struct uli526x_board_info * db)
 {
 	struct tx_desc *txptr;
 	u32 tdes0;
@@ -747,15 +751,15 @@
 
 		/* A packet sent completed */
 		db->tx_packet_cnt--;
-		db->stats.tx_packets++;
+		dev->stats.tx_packets++;
 
 		/* Transmit statistic counter */
 		if ( tdes0 != 0x7fffffff ) {
 			/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
-			db->stats.collisions += (tdes0 >> 3) & 0xf;
-			db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
+			dev->stats.collisions += (tdes0 >> 3) & 0xf;
+			dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
 			if (tdes0 & TDES0_ERR_MASK) {
-				db->stats.tx_errors++;
+				dev->stats.tx_errors++;
 				if (tdes0 & 0x0002) {	/* UnderRun */
 					db->tx_fifo_underrun++;
 					if ( !(db->cr6_data & CR6_SFT) ) {
@@ -825,13 +829,13 @@
 			if (rdes0 & 0x8000) {
 				/* This is a error packet */
 				//printk(DRV_NAME ": rdes0: %lx\n", rdes0);
-				db->stats.rx_errors++;
+				dev->stats.rx_errors++;
 				if (rdes0 & 1)
-					db->stats.rx_fifo_errors++;
+					dev->stats.rx_fifo_errors++;
 				if (rdes0 & 2)
-					db->stats.rx_crc_errors++;
+					dev->stats.rx_crc_errors++;
 				if (rdes0 & 0x80)
-					db->stats.rx_length_errors++;
+					dev->stats.rx_length_errors++;
 			}
 
 			if ( !(rdes0 & 0x8000) ||
@@ -854,8 +858,8 @@
 
 				skb->protocol = eth_type_trans(skb, dev);
 				netif_rx(skb);
-				db->stats.rx_packets++;
-				db->stats.rx_bytes += rxlen;
+				dev->stats.rx_packets++;
+				dev->stats.rx_bytes += rxlen;
 
 			} else {
 				/* Reuse SKB buffer when the packet is error */
@@ -872,19 +876,6 @@
 
 
 /*
- *	Get statistics from driver.
- */
-
-static struct net_device_stats * uli526x_get_stats(struct net_device *dev)
-{
-	struct uli526x_board_info *db = netdev_priv(dev);
-
-	ULI526X_DBUG(0, "uli526x_get_stats", 0);
-	return &db->stats;
-}
-
-
-/*
  * Set ULI526X multicast address
  */
 
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 022d99a..f467bf8 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -343,7 +343,18 @@
 static const struct ethtool_ops netdev_ethtool_ops;
 static int  netdev_close(struct net_device *dev);
 
-
+static const struct net_device_ops netdev_ops = {
+	.ndo_open		= netdev_open,
+	.ndo_stop		= netdev_close,
+	.ndo_start_xmit		= start_tx,
+	.ndo_get_stats		= get_stats,
+	.ndo_set_multicast_list = set_rx_mode,
+	.ndo_do_ioctl		= netdev_ioctl,
+	.ndo_tx_timeout		= tx_timeout,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
 
 static int __devinit w840_probe1 (struct pci_dev *pdev,
 				  const struct pci_device_id *ent)
@@ -420,14 +431,8 @@
 		np->mii_if.force_media = 1;
 
 	/* The chip-specific entries in the device structure. */
-	dev->open = &netdev_open;
-	dev->hard_start_xmit = &start_tx;
-	dev->stop = &netdev_close;
-	dev->get_stats = &get_stats;
-	dev->set_multicast_list = &set_rx_mode;
-	dev->do_ioctl = &netdev_ioctl;
+	dev->netdev_ops = &netdev_ops;
 	dev->ethtool_ops = &netdev_ethtool_ops;
-	dev->tx_timeout = &tx_timeout;
 	dev->watchdog_timeo = TX_TIMEOUT;
 
 	i = register_netdev(dev);
@@ -1555,7 +1560,7 @@
  * 	rtnl_lock, & netif_device_detach after the rtnl_unlock.
  * - get_stats:
  * 	spin_lock_irq(np->lock), doesn't touch hw if not present
- * - hard_start_xmit:
+ * - start_xmit:
  * 	synchronize_irq + netif_tx_disable;
  * - tx_timeout:
  * 	netif_device_detach + netif_tx_disable;
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 13c8703..c2ca9f4 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -104,10 +104,8 @@
 	 */
 	spinlock_t lock;
 
-
 	struct pci_dev *pdev;
 	struct net_device *dev;
-	struct net_device_stats stats;
 };
 
 
@@ -119,7 +117,6 @@
 static int xircom_open(struct net_device *dev);
 static int xircom_close(struct net_device *dev);
 static void xircom_up(struct xircom_private *card);
-static struct net_device_stats *xircom_get_stats(struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void xircom_poll_controller(struct net_device *dev);
 #endif
@@ -194,6 +191,18 @@
 	.get_drvinfo		= netdev_get_drvinfo,
 };
 
+static const struct net_device_ops netdev_ops = {
+	.ndo_open		= xircom_open,
+	.ndo_stop		= xircom_close,
+	.ndo_start_xmit		= xircom_start_xmit,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= xircom_poll_controller,
+#endif
+};
+
 /* xircom_probe is the code that gets called on device insertion.
    it sets up the hardware and registers the device to the networklayer.
 
@@ -266,13 +275,7 @@
 	read_mac_address(private);
 	setup_descriptors(private);
 
-	dev->open = &xircom_open;
-	dev->hard_start_xmit = &xircom_start_xmit;
-	dev->stop = &xircom_close;
-	dev->get_stats = &xircom_get_stats;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = &xircom_poll_controller;
-#endif
+	dev->netdev_ops = &netdev_ops;
 	SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
 	pci_set_drvdata(pdev, dev);
 
@@ -497,14 +500,6 @@
 }
 
 
-
-static struct net_device_stats *xircom_get_stats(struct net_device *dev)
-{
-        struct xircom_private *card = netdev_priv(dev);
-        return &card->stats;
-}
-
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void xircom_poll_controller(struct net_device *dev)
 {
@@ -1193,7 +1188,7 @@
 
 			skb = dev_alloc_skb(pkt_len + 2);
 			if (skb == NULL) {
-				card->stats.rx_dropped++;
+				dev->stats.rx_dropped++;
 				goto out;
 			}
 			skb_reserve(skb, 2);
@@ -1201,8 +1196,8 @@
 			skb_put(skb, pkt_len);
 			skb->protocol = eth_type_trans(skb, dev);
 			netif_rx(skb);
-			card->stats.rx_packets++;
-			card->stats.rx_bytes += pkt_len;
+			dev->stats.rx_packets++;
+			dev->stats.rx_bytes += pkt_len;
 
 		      out:
 			/* give the buffer back to the card */
@@ -1232,16 +1227,16 @@
 #endif
 		if (status > 0) {	/* bit 31 is 0 when done */
 			if (card->tx_skb[descnr]!=NULL) {
-				card->stats.tx_bytes += card->tx_skb[descnr]->len;
+				dev->stats.tx_bytes += card->tx_skb[descnr]->len;
 				dev_kfree_skb_irq(card->tx_skb[descnr]);
 			}
 			card->tx_skb[descnr] = NULL;
 			/* Bit 8 in the status field is 1 if there was a collision */
 			if (status&(1<<8))
-				card->stats.collisions++;
+				dev->stats.collisions++;
 			card->tx_buffer[4*descnr] = 0; /* descriptor is free again */
 			netif_wake_queue (dev);
-			card->stats.tx_packets++;
+			dev->stats.tx_packets++;
 		}
 
 		leave("investigate_write_descriptor");
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 0009f4e..3af9a95 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -2296,6 +2296,19 @@
 	return mode;
 }
 
+static const struct net_device_ops typhoon_netdev_ops = {
+	.ndo_open		= typhoon_open,
+	.ndo_stop		= typhoon_close,
+	.ndo_start_xmit		= typhoon_start_tx,
+	.ndo_set_multicast_list	= typhoon_set_rx_mode,
+	.ndo_tx_timeout		= typhoon_tx_timeout,
+	.ndo_get_stats		= typhoon_get_stats,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= typhoon_set_mac_address,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_vlan_rx_register	= typhoon_vlan_rx_register,
+};
+
 static int __devinit
 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -2495,16 +2508,9 @@
 	}
 
 	/* The chip-specific entries in the device structure. */
-	dev->open		= typhoon_open;
-	dev->hard_start_xmit	= typhoon_start_tx;
-	dev->stop		= typhoon_close;
-	dev->set_multicast_list	= typhoon_set_rx_mode;
-	dev->tx_timeout		= typhoon_tx_timeout;
+	dev->netdev_ops		= &typhoon_netdev_ops;
 	netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
 	dev->watchdog_timeo	= TX_TIMEOUT;
-	dev->get_stats		= typhoon_get_stats;
-	dev->set_mac_address	= typhoon_set_mac_address;
-	dev->vlan_rx_register	= typhoon_vlan_rx_register;
 
 	SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
 
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 7d5a130..1144122 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -442,40 +442,30 @@
 {
 	struct ucc_fast_private *uccf;
 	struct ucc_geth __iomem *ug_regs;
-	u32 maccfg2, uccm;
 
 	uccf = ugeth->uccf;
 	ug_regs = ugeth->ug_regs;
 
 	/* Enable interrupts for magic packet detection */
-	uccm = in_be32(uccf->p_uccm);
-	uccm |= UCCE_MPD;
-	out_be32(uccf->p_uccm, uccm);
+	setbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD);
 
 	/* Enable magic packet detection */
-	maccfg2 = in_be32(&ug_regs->maccfg2);
-	maccfg2 |= MACCFG2_MPE;
-	out_be32(&ug_regs->maccfg2, maccfg2);
+	setbits32(&ug_regs->maccfg2, MACCFG2_MPE);
 }
 
 static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
 {
 	struct ucc_fast_private *uccf;
 	struct ucc_geth __iomem *ug_regs;
-	u32 maccfg2, uccm;
 
 	uccf = ugeth->uccf;
 	ug_regs = ugeth->ug_regs;
 
 	/* Disable interrupts for magic packet detection */
-	uccm = in_be32(uccf->p_uccm);
-	uccm &= ~UCCE_MPD;
-	out_be32(uccf->p_uccm, uccm);
+	clrbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD);
 
 	/* Disable magic packet detection */
-	maccfg2 = in_be32(&ug_regs->maccfg2);
-	maccfg2 &= ~MACCFG2_MPE;
-	out_be32(&ug_regs->maccfg2, maccfg2);
+	clrbits32(&ug_regs->maccfg2, MACCFG2_MPE);
 }
 #endif /* MAGIC_PACKET */
 
@@ -585,7 +575,8 @@
 
 	/* Hardware only if user handed pointer and driver actually
 	gathers hardware statistics */
-	if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
+	if (hardware_statistics &&
+	    (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
 		hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
 		hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
 		hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
@@ -1181,9 +1172,7 @@
 	out_be32(uempr_register, value);
 
 	/* Set UPSMR register */
-	value = in_be32(upsmr_register);
-	value |= automatic_flow_control_mode;
-	out_be32(upsmr_register, value);
+	setbits32(upsmr_register, automatic_flow_control_mode);
 
 	value = in_be32(maccfg1_register);
 	if (rx_flow_control_enable)
@@ -1200,14 +1189,11 @@
 					     u32 __iomem *upsmr_register,
 					     u16 __iomem *uescr_register)
 {
-	u32 upsmr_value = 0;
 	u16 uescr_value = 0;
+
 	/* Enable hardware statistics gathering if requested */
-	if (enable_hardware_statistics) {
-		upsmr_value = in_be32(upsmr_register);
-		upsmr_value |= UPSMR_HSE;
-		out_be32(upsmr_register, upsmr_value);
-	}
+	if (enable_hardware_statistics)
+		setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
 
 	/* Clear hardware statistics counters */
 	uescr_value = in_be16(uescr_register);
@@ -1233,23 +1219,17 @@
 {
 	/* Note: this function does not check if */
 	/* the parameters it receives are NULL   */
-	u16 temoder_value;
-	u32 remoder_value;
 
 	if (enable_tx_firmware_statistics) {
 		out_be32(tx_rmon_base_ptr,
 			 tx_firmware_statistics_structure_address);
-		temoder_value = in_be16(temoder_register);
-		temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
-		out_be16(temoder_register, temoder_value);
+		setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
 	}
 
 	if (enable_rx_firmware_statistics) {
 		out_be32(rx_rmon_base_ptr,
 			 rx_firmware_statistics_structure_address);
-		remoder_value = in_be32(remoder_register);
-		remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
-		out_be32(remoder_register, remoder_value);
+		setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
 	}
 
 	return 0;
@@ -1316,15 +1296,12 @@
 static int init_preamble_length(u8 preamble_length,
 				u32 __iomem *maccfg2_register)
 {
-	u32 value = 0;
-
 	if ((preamble_length < 3) || (preamble_length > 7))
 		return -EINVAL;
 
-	value = in_be32(maccfg2_register);
-	value &= ~MACCFG2_PREL_MASK;
-	value |= (preamble_length << MACCFG2_PREL_SHIFT);
-	out_be32(maccfg2_register, value);
+	clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
+			preamble_length << MACCFG2_PREL_SHIFT);
+
 	return 0;
 }
 
@@ -1337,19 +1314,19 @@
 	value = in_be32(upsmr_register);
 
 	if (reject_broadcast)
-		value |= UPSMR_BRO;
+		value |= UCC_GETH_UPSMR_BRO;
 	else
-		value &= ~UPSMR_BRO;
+		value &= ~UCC_GETH_UPSMR_BRO;
 
 	if (receive_short_frames)
-		value |= UPSMR_RSH;
+		value |= UCC_GETH_UPSMR_RSH;
 	else
-		value &= ~UPSMR_RSH;
+		value &= ~UCC_GETH_UPSMR_RSH;
 
 	if (promiscuous)
-		value |= UPSMR_PRO;
+		value |= UCC_GETH_UPSMR_PRO;
 	else
-		value &= ~UPSMR_PRO;
+		value &= ~UCC_GETH_UPSMR_PRO;
 
 	out_be32(upsmr_register, value);
 
@@ -1410,26 +1387,27 @@
 
 	/*                    Set UPSMR                      */
 	upsmr = in_be32(&uf_regs->upsmr);
-	upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
+	upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
+		   UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
 	if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
 	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
 	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
 	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
 	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
 	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
-		upsmr |= UPSMR_RPM;
+		upsmr |= UCC_GETH_UPSMR_RPM;
 		switch (ugeth->max_speed) {
 		case SPEED_10:
-			upsmr |= UPSMR_R10M;
+			upsmr |= UCC_GETH_UPSMR_R10M;
 			/* FALLTHROUGH */
 		case SPEED_100:
 			if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
-				upsmr |= UPSMR_RMM;
+				upsmr |= UCC_GETH_UPSMR_RMM;
 		}
 	}
 	if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
 	    (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
-		upsmr |= UPSMR_TBIM;
+		upsmr |= UCC_GETH_UPSMR_TBIM;
 	}
 	out_be32(&uf_regs->upsmr, upsmr);
 
@@ -1517,9 +1495,9 @@
 				    (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
 				    (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
 					if (phydev->speed == SPEED_10)
-						upsmr |= UPSMR_R10M;
+						upsmr |= UCC_GETH_UPSMR_R10M;
 					else
-						upsmr &= ~(UPSMR_R10M);
+						upsmr &= ~UCC_GETH_UPSMR_R10M;
 				}
 				break;
 			default:
@@ -1602,10 +1580,8 @@
 	uccf = ugeth->uccf;
 
 	/* Mask GRACEFUL STOP TX interrupt bit and clear it */
-	temp = in_be32(uccf->p_uccm);
-	temp &= ~UCCE_GRA;
-	out_be32(uccf->p_uccm, temp);
-	out_be32(uccf->p_ucce, UCCE_GRA);	/* clear by writing 1 */
+	clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
+	out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA);  /* clear by writing 1 */
 
 	/* Issue host command */
 	cecr_subblock =
@@ -1617,7 +1593,7 @@
 	do {
 		msleep(10);
 		temp = in_be32(uccf->p_ucce);
-	} while (!(temp & UCCE_GRA) && --i);
+	} while (!(temp & UCC_GETH_UCCE_GRA) && --i);
 
 	uccf->stopped_tx = 1;
 
@@ -1975,12 +1951,9 @@
 	uf_regs = ugeth->uccf->uf_regs;
 
 	if (dev->flags & IFF_PROMISC) {
-
-		out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO);
-
+		setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
 	} else {
-
-		out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO);
+		clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
 
 		p_82xx_addr_filt =
 		    (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
@@ -2020,7 +1993,6 @@
 {
 	struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
 	struct phy_device *phydev = ugeth->phydev;
-	u32 tempval;
 
 	ugeth_vdbg("%s: IN", __func__);
 
@@ -2037,9 +2009,7 @@
 	out_be32(ugeth->uccf->p_ucce, 0xffffffff);
 
 	/* Disable Rx and Tx */
-	tempval = in_be32(&ug_regs->maccfg1);
-	tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
-	out_be32(&ug_regs->maccfg1, tempval);
+	clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
 
 	ucc_geth_memclean(ugeth);
 }
@@ -2153,10 +2123,10 @@
 	/* Generate uccm_mask for receive */
 	uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
 	for (i = 0; i < ug_info->numQueuesRx; i++)
-		uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
+		uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
 
 	for (i = 0; i < ug_info->numQueuesTx; i++)
-		uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
+		uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
 	/* Initialize the general fast UCC block. */
 	if (ucc_fast_init(uf_info, &ugeth->uccf)) {
 		if (netif_msg_probe(ugeth))
@@ -2185,7 +2155,7 @@
 	struct ucc_geth __iomem *ug_regs;
 	int ret_val = -EINVAL;
 	u32 remoder = UCC_GETH_REMODER_INIT;
-	u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
+	u32 init_enet_pram_offset, cecr_subblock, command;
 	u32 ifstat, i, j, size, l2qt, l3qt, length;
 	u16 temoder = UCC_GETH_TEMODER_INIT;
 	u16 test;
@@ -2281,10 +2251,7 @@
 				 &uf_regs->upsmr,
 				 &ug_regs->uempr, &ug_regs->maccfg1);
 
-	maccfg1 = in_be32(&ug_regs->maccfg1);
-	maccfg1 |= MACCFG1_ENABLE_RX;
-	maccfg1 |= MACCFG1_ENABLE_TX;
-	out_be32(&ug_regs->maccfg1, maccfg1);
+	setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
 
 	/*                    Set IPGIFG                     */
 	/* For more details see the hardware spec.           */
@@ -3274,7 +3241,6 @@
 static int ucc_geth_poll(struct napi_struct *napi, int budget)
 {
 	struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
-	struct net_device *dev = ugeth->dev;
 	struct ucc_geth_info *ug_info;
 	int howmany, i;
 
@@ -3285,14 +3251,8 @@
 		howmany += ucc_geth_rx(ugeth, i, budget - howmany);
 
 	if (howmany < budget) {
-		struct ucc_fast_private *uccf;
-		u32 uccm;
-
 		netif_rx_complete(napi);
-		uccf = ugeth->uccf;
-		uccm = in_be32(uccf->p_uccm);
-		uccm |= UCCE_RX_EVENTS;
-		out_be32(uccf->p_uccm, uccm);
+		setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS);
 	}
 
 	return howmany;
@@ -3332,7 +3292,7 @@
 	/* Tx event processing */
 	if (ucce & UCCE_TX_EVENTS) {
 		spin_lock(&ugeth->lock);
-		tx_mask = UCCE_TXBF_SINGLE_MASK;
+		tx_mask = UCC_GETH_UCCE_TXB0;
 		for (i = 0; i < ug_info->numQueuesTx; i++) {
 			if (ucce & tx_mask)
 				ucc_geth_tx(dev, i);
@@ -3344,12 +3304,10 @@
 
 	/* Errors and other events */
 	if (ucce & UCCE_OTHER) {
-		if (ucce & UCCE_BSY) {
+		if (ucce & UCC_GETH_UCCE_BSY)
 			dev->stats.rx_errors++;
-		}
-		if (ucce & UCCE_TXE) {
+		if (ucce & UCC_GETH_UCCE_TXE)
 			dev->stats.tx_errors++;
-		}
 	}
 
 	return IRQ_HANDLED;
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index d74d2f7..8f699cb 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -162,92 +162,27 @@
 								   boundary */
 
 /* UCC GETH Event Register */
-#define UCCE_MPD                                0x80000000	/* Magic packet
-								   detection */
-#define UCCE_SCAR                               0x40000000
-#define UCCE_GRA                                0x20000000	/* Tx graceful
-								   stop
-								   complete */
-#define UCCE_CBPR                               0x10000000
-#define UCCE_BSY                                0x08000000
-#define UCCE_RXC                                0x04000000
-#define UCCE_TXC                                0x02000000
-#define UCCE_TXE                                0x01000000
-#define UCCE_TXB7                               0x00800000
-#define UCCE_TXB6                               0x00400000
-#define UCCE_TXB5                               0x00200000
-#define UCCE_TXB4                               0x00100000
-#define UCCE_TXB3                               0x00080000
-#define UCCE_TXB2                               0x00040000
-#define UCCE_TXB1                               0x00020000
-#define UCCE_TXB0                               0x00010000
-#define UCCE_RXB7                               0x00008000
-#define UCCE_RXB6                               0x00004000
-#define UCCE_RXB5                               0x00002000
-#define UCCE_RXB4                               0x00001000
-#define UCCE_RXB3                               0x00000800
-#define UCCE_RXB2                               0x00000400
-#define UCCE_RXB1                               0x00000200
-#define UCCE_RXB0                               0x00000100
-#define UCCE_RXF7                               0x00000080
-#define UCCE_RXF6                               0x00000040
-#define UCCE_RXF5                               0x00000020
-#define UCCE_RXF4                               0x00000010
-#define UCCE_RXF3                               0x00000008
-#define UCCE_RXF2                               0x00000004
-#define UCCE_RXF1                               0x00000002
-#define UCCE_RXF0                               0x00000001
+#define UCCE_TXB   (UCC_GETH_UCCE_TXB7 | UCC_GETH_UCCE_TXB6 | \
+		    UCC_GETH_UCCE_TXB5 | UCC_GETH_UCCE_TXB4 | \
+		    UCC_GETH_UCCE_TXB3 | UCC_GETH_UCCE_TXB2 | \
+		    UCC_GETH_UCCE_TXB1 | UCC_GETH_UCCE_TXB0)
 
-#define UCCE_RXBF_SINGLE_MASK                   (UCCE_RXF0)
-#define UCCE_TXBF_SINGLE_MASK                   (UCCE_TXB0)
+#define UCCE_RXB   (UCC_GETH_UCCE_RXB7 | UCC_GETH_UCCE_RXB6 | \
+		    UCC_GETH_UCCE_RXB5 | UCC_GETH_UCCE_RXB4 | \
+		    UCC_GETH_UCCE_RXB3 | UCC_GETH_UCCE_RXB2 | \
+		    UCC_GETH_UCCE_RXB1 | UCC_GETH_UCCE_RXB0)
 
-#define UCCE_TXB         (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\
-			UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0)
-#define UCCE_RXB         (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\
-			UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0)
-#define UCCE_RXF         (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\
-			UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0)
-#define UCCE_OTHER       (UCCE_SCAR | UCCE_GRA  | UCCE_CBPR | UCCE_BSY  |\
-			UCCE_RXC  | UCCE_TXC  | UCCE_TXE)
+#define UCCE_RXF   (UCC_GETH_UCCE_RXF7 | UCC_GETH_UCCE_RXF6 | \
+		    UCC_GETH_UCCE_RXF5 | UCC_GETH_UCCE_RXF4 | \
+		    UCC_GETH_UCCE_RXF3 | UCC_GETH_UCCE_RXF2 | \
+		    UCC_GETH_UCCE_RXF1 | UCC_GETH_UCCE_RXF0)
 
-#define UCCE_RX_EVENTS							(UCCE_RXF | UCCE_BSY)
-#define UCCE_TX_EVENTS							(UCCE_TXB | UCCE_TXE)
+#define UCCE_OTHER (UCC_GETH_UCCE_SCAR | UCC_GETH_UCCE_GRA | \
+		    UCC_GETH_UCCE_CBPR | UCC_GETH_UCCE_BSY | \
+		    UCC_GETH_UCCE_RXC  | UCC_GETH_UCCE_TXC | UCC_GETH_UCCE_TXE)
 
-/* UCC GETH UPSMR (Protocol Specific Mode Register) */
-#define UPSMR_ECM                               0x04000000	/* Enable CAM
-								   Miss or
-								   Enable
-								   Filtering
-								   Miss */
-#define UPSMR_HSE                               0x02000000	/* Hardware
-								   Statistics
-								   Enable */
-#define UPSMR_PRO                               0x00400000	/* Promiscuous*/
-#define UPSMR_CAP                               0x00200000	/* CAM polarity
-								 */
-#define UPSMR_RSH                               0x00100000	/* Receive
-								   Short Frames
-								 */
-#define UPSMR_RPM                               0x00080000	/* Reduced Pin
-								   Mode
-								   interfaces */
-#define UPSMR_R10M                              0x00040000	/* RGMII/RMII
-								   10 Mode */
-#define UPSMR_RLPB                              0x00020000	/* RMII
-								   Loopback
-								   Mode */
-#define UPSMR_TBIM                              0x00010000	/* Ten-bit
-								   Interface
-								   Mode */
-#define UPSMR_RMM                               0x00001000	/* RMII/RGMII
-								   Mode */
-#define UPSMR_CAM                               0x00000400	/* CAM Address
-								   Matching */
-#define UPSMR_BRO                               0x00000200	/* Broadcast
-								   Address */
-#define UPSMR_RES1                              0x00002000	/* Reserved
-								   feild - must
-								   be 1 */
+#define UCCE_RX_EVENTS  (UCCE_RXF | UCC_GETH_UCCE_BSY)
+#define UCCE_TX_EVENTS	(UCCE_TXB | UCC_GETH_UCCE_TXE)
 
 /* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
 #define MACCFG1_FLOW_RX                         0x00000020	/* Flow Control
@@ -945,9 +880,10 @@
 #define UCC_GETH_REMODER_INIT                   0	/* bits that must be
 							   set */
 #define UCC_GETH_TEMODER_INIT                   0xC000	/* bits that must */
-#define UCC_GETH_UPSMR_INIT                     (UPSMR_RES1)	/* Start value
-								   for this
-								   register */
+
+/* Initial value for UPSMR */
+#define UCC_GETH_UPSMR_INIT                     UCC_GETH_UPSMR_RES1
+
 #define UCC_GETH_MACCFG1_INIT                   0
 #define UCC_GETH_MACCFG2_INIT                   (MACCFG2_RESERVED_1)
 
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index edd244f..5b67bbf 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -23,7 +23,7 @@
 #include <linux/usb/usbnet.h>
 
 /* datasheet:
- http://www.davicom.com.tw/big5/download/Data%20Sheet/DM9601-DS-P01-930914.pdf
+ http://ptm2.cc.utu.fi/ftp/network/cards/DM9601/From_NET/DM9601-DS-P01-930914.pdf
 */
 
 /* control requests */
@@ -397,16 +397,24 @@
 	dm_write_reg_async(dev, DM_RX_CTRL, rx_ctl);
 }
 
+static void __dm9601_set_mac_address(struct usbnet *dev)
+{
+	dm_write_async(dev, DM_PHY_ADDR, ETH_ALEN, dev->net->dev_addr);
+}
+
 static int dm9601_set_mac_address(struct net_device *net, void *p)
 {
 	struct sockaddr *addr = p;
 	struct usbnet *dev = netdev_priv(net);
 
-	if (!is_valid_ether_addr(addr->sa_data))
+	if (!is_valid_ether_addr(addr->sa_data)) {
+		dev_err(&net->dev, "not setting invalid mac address %pM\n",
+								addr->sa_data);
 		return -EINVAL;
+	}
 
 	memcpy(net->dev_addr, addr->sa_data, net->addr_len);
-	dm_write_async(dev, DM_PHY_ADDR, net->addr_len, net->dev_addr);
+	__dm9601_set_mac_address(dev);
 
 	return 0;
 }
@@ -414,6 +422,7 @@
 static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
 {
 	int ret;
+	u8 mac[ETH_ALEN];
 
 	ret = usbnet_get_endpoints(dev, intf);
 	if (ret)
@@ -438,12 +447,24 @@
 	udelay(20);
 
 	/* read MAC */
-	if (dm_read(dev, DM_PHY_ADDR, ETH_ALEN, dev->net->dev_addr) < 0) {
+	if (dm_read(dev, DM_PHY_ADDR, ETH_ALEN, mac) < 0) {
 		printk(KERN_ERR "Error reading MAC address\n");
 		ret = -ENODEV;
 		goto out;
 	}
 
+	/*
+	 * Overwrite the auto-generated address only with good ones.
+	 */
+	if (is_valid_ether_addr(mac))
+		memcpy(dev->net->dev_addr, mac, ETH_ALEN);
+	else {
+		printk(KERN_WARNING
+			"dm9601: No valid MAC address in EEPROM, using %pM\n",
+			dev->net->dev_addr);
+		__dm9601_set_mac_address(dev);
+	}
+
 	/* power up phy */
 	dm_write_reg(dev, DM_GPR_CTRL, 1);
 	dm_write_reg(dev, DM_GPR_DATA, 0);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index c4918b8..0d0fa91 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1297,6 +1297,7 @@
 	/* setup */
 	spin_lock_irq(&serial->serial_lock);
 	tty->driver_data = serial;
+	tty_kref_put(serial->tty);
 	serial->tty = tty_kref_get(tty);
 	spin_unlock_irq(&serial->serial_lock);
 
@@ -1792,8 +1793,8 @@
 
 	/* initialize */
 	ctrl_req->wValue = 0;
-	ctrl_req->wIndex = hso_port_to_mux(port);
-	ctrl_req->wLength = size;
+	ctrl_req->wIndex = cpu_to_le16(hso_port_to_mux(port));
+	ctrl_req->wLength = cpu_to_le16(size);
 
 	if (type == USB_CDC_GET_ENCAPSULATED_RESPONSE) {
 		/* Reading command */
@@ -2043,9 +2044,8 @@
 		return -2;
 	}
 
-	spin_lock(&serial->serial_lock);
+	/* All callers to put_rxbuf_data hold serial_lock */
 	tty = tty_kref_get(serial->tty);
-	spin_unlock(&serial->serial_lock);
 
 	/* Push data to tty */
 	if (tty) {
@@ -2053,8 +2053,10 @@
 			serial->curr_rx_urb_offset;
 		D1("data to push to tty");
 		while (write_length_remaining) {
-			if (test_bit(TTY_THROTTLED, &tty->flags))
+			if (test_bit(TTY_THROTTLED, &tty->flags)) {
+				tty_kref_put(tty);
 				return -1;
+			}
 			curr_write_len =  tty_insert_flip_string
 				(tty, urb->transfer_buffer +
 				 serial->curr_rx_urb_offset,
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 2ee034f..7cb10a0 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -251,7 +251,6 @@
 	struct net_device_stats stats;
 };
 
-
 /****************************************************************
  *     kaweth_control
  ****************************************************************/
@@ -283,9 +282,9 @@
 
 	dr->bRequestType= requesttype;
 	dr->bRequest = request;
-	dr->wValue = cpu_to_le16p(&value);
-	dr->wIndex = cpu_to_le16p(&index);
-	dr->wLength = cpu_to_le16p(&size);
+	dr->wValue = cpu_to_le16(value);
+	dr->wIndex = cpu_to_le16(index);
+	dr->wLength = cpu_to_le16(size);
 
 	return kaweth_internal_control_msg(kaweth->dev,
 					pipe,
@@ -975,6 +974,17 @@
 /****************************************************************
  *     kaweth_probe
  ****************************************************************/
+
+
+static const struct net_device_ops kaweth_netdev_ops = {
+	.ndo_open =			kaweth_open,
+	.ndo_stop =			kaweth_close,
+	.ndo_start_xmit =		kaweth_start_xmit,
+	.ndo_tx_timeout =		kaweth_tx_timeout,
+	.ndo_set_multicast_list =	kaweth_set_rx_mode,
+	.ndo_get_stats =		kaweth_netdev_stats,
+};
+
 static int kaweth_probe(
 		struct usb_interface *intf,
 		const struct usb_device_id *id      /* from id_table */
@@ -1147,22 +1157,13 @@
 	memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr,
                sizeof(kaweth->configuration.hw_addr));
 
-	netdev->open = kaweth_open;
-	netdev->stop = kaweth_close;
-
+	netdev->netdev_ops = &kaweth_netdev_ops;
 	netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
-	netdev->tx_timeout = kaweth_tx_timeout;
-
-	netdev->hard_start_xmit = kaweth_start_xmit;
-	netdev->set_multicast_list = kaweth_set_rx_mode;
-	netdev->get_stats = kaweth_netdev_stats;
 	netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size);
 	SET_ETHTOOL_OPS(netdev, &ops);
 
 	/* kaweth is zeroed as part of alloc_netdev */
-
 	INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
-
 	usb_set_intfdata(intf, kaweth);
 
 #if 0
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 166880c..a8228d8 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -93,6 +93,7 @@
 MODULE_PARM_DESC (msg_level, "Override default message level");
 
 MODULE_DEVICE_TABLE(usb, pegasus_ids);
+static const struct net_device_ops pegasus_netdev_ops;
 
 static int update_eth_regs_async(pegasus_t *);
 /* Aargh!!! I _really_ hate such tweaks */
@@ -150,8 +151,8 @@
 	pegasus->dr.bRequestType = PEGASUS_REQT_READ;
 	pegasus->dr.bRequest = PEGASUS_REQ_GET_REGS;
 	pegasus->dr.wValue = cpu_to_le16(0);
-	pegasus->dr.wIndex = cpu_to_le16p(&indx);
-	pegasus->dr.wLength = cpu_to_le16p(&size);
+	pegasus->dr.wIndex = cpu_to_le16(indx);
+	pegasus->dr.wLength = cpu_to_le16(size);
 	pegasus->ctrl_urb->transfer_buffer_length = size;
 
 	usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
@@ -208,8 +209,8 @@
 	pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
 	pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
 	pegasus->dr.wValue = cpu_to_le16(0);
-	pegasus->dr.wIndex = cpu_to_le16p(&indx);
-	pegasus->dr.wLength = cpu_to_le16p(&size);
+	pegasus->dr.wIndex = cpu_to_le16(indx);
+	pegasus->dr.wLength = cpu_to_le16(size);
 	pegasus->ctrl_urb->transfer_buffer_length = size;
 
 	usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
@@ -261,7 +262,7 @@
 	pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
 	pegasus->dr.bRequest = PEGASUS_REQ_SET_REG;
 	pegasus->dr.wValue = cpu_to_le16(data);
-	pegasus->dr.wIndex = cpu_to_le16p(&indx);
+	pegasus->dr.wIndex = cpu_to_le16(indx);
 	pegasus->dr.wLength = cpu_to_le16(1);
 	pegasus->ctrl_urb->transfer_buffer_length = 1;
 
@@ -476,7 +477,7 @@
 
 	for (i = 0; i < 3; i++) {
 		read_eprom_word(pegasus, i, &w16);
-		((__le16 *) id)[i] = cpu_to_le16p(&w16);
+		((__le16 *) id)[i] = cpu_to_le16(w16);
 	}
 }
 
@@ -1360,14 +1361,10 @@
 	pegasus->intf = intf;
 	pegasus->usb = dev;
 	pegasus->net = net;
-	net->open = pegasus_open;
-	net->stop = pegasus_close;
+
+
 	net->watchdog_timeo = PEGASUS_TX_TIMEOUT;
-	net->tx_timeout = pegasus_tx_timeout;
-	net->do_ioctl = pegasus_ioctl;
-	net->hard_start_xmit = pegasus_start_xmit;
-	net->set_multicast_list = pegasus_set_multicast;
-	net->get_stats = pegasus_netdev_stats;
+	net->netdev_ops = &pegasus_netdev_ops;
 	SET_ETHTOOL_OPS(net, &ops);
 	pegasus->mii.dev = net;
 	pegasus->mii.mdio_read = mdio_read;
@@ -1482,6 +1479,16 @@
 	return 0;
 }
 
+static const struct net_device_ops pegasus_netdev_ops = {
+	.ndo_open =			pegasus_open,
+	.ndo_stop =			pegasus_close,
+	.ndo_do_ioctl =			pegasus_ioctl,
+	.ndo_start_xmit =		pegasus_start_xmit,
+	.ndo_set_multicast_list =	pegasus_set_multicast,
+	.ndo_get_stats =		pegasus_netdev_stats,
+	.ndo_tx_timeout =		pegasus_tx_timeout,
+};
+
 static struct usb_driver pegasus_driver = {
 	.name = driver_name,
 	.probe = pegasus_probe,
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index ac07cc6..3b8e632 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -622,6 +622,7 @@
 	.ndo_get_stats		 = rhine_get_stats,
 	.ndo_set_multicast_list	 = rhine_set_rx_mode,
 	.ndo_validate_addr	 = eth_validate_addr,
+	.ndo_set_mac_address 	 = eth_mac_addr,
 	.ndo_do_ioctl		 = netdev_ioctl,
 	.ndo_tx_timeout 	 = rhine_tx_timeout,
 #ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 58e25d0..a75f91d 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -855,6 +855,7 @@
 	.ndo_start_xmit		= velocity_xmit,
 	.ndo_get_stats		= velocity_get_stats,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_set_multicast_list	= velocity_set_multi,
 	.ndo_change_mtu		= velocity_change_mtu,
 	.ndo_do_ioctl		= velocity_ioctl,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b7004ff..43f6523 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -624,6 +624,18 @@
 	return 0;
 }
 
+static const struct net_device_ops virtnet_netdev = {
+	.ndo_open            = virtnet_open,
+	.ndo_stop   	     = virtnet_close,
+	.ndo_start_xmit      = start_xmit,
+	.ndo_validate_addr   = eth_validate_addr,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_change_mtu	     = virtnet_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = virtnet_netpoll,
+#endif
+};
+
 static int virtnet_probe(struct virtio_device *vdev)
 {
 	int err;
@@ -636,14 +648,8 @@
 		return -ENOMEM;
 
 	/* Set up network device as normal. */
-	dev->open = virtnet_open;
-	dev->stop = virtnet_close;
-	dev->hard_start_xmit = start_xmit;
-	dev->change_mtu = virtnet_change_mtu;
+	dev->netdev_ops = &virtnet_netdev;
 	dev->features = NETIF_F_HIGHDMA;
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	dev->poll_controller = virtnet_netpoll;
-#endif
 	SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
 	SET_NETDEV_DEV(dev, &vdev->dev);
 
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 2dc2416..0dbd85b 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -622,7 +622,7 @@
 	printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
 #endif
 	qmgr_disable_irq(queue_ids[port->id].rx);
-	netif_rx_schedule(dev, &port->napi);
+	netif_rx_schedule(&port->napi);
 }
 
 static int hss_hdlc_poll(struct napi_struct *napi, int budget)
@@ -651,7 +651,7 @@
 			printk(KERN_DEBUG "%s: hss_hdlc_poll"
 			       " netif_rx_complete\n", dev->name);
 #endif
-			netif_rx_complete(dev, napi);
+			netif_rx_complete(napi);
 			qmgr_enable_irq(rxq);
 			if (!qmgr_stat_empty(rxq) &&
 			    netif_rx_reschedule(napi)) {
@@ -1069,7 +1069,7 @@
 	hss_start_hdlc(port);
 
 	/* we may already have RX data, enables IRQ */
-	netif_rx_schedule(dev, &port->napi);
+	netif_rx_schedule(&port->napi);
 	return 0;
 
 err_unlock:
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index 3c1edda..d8322d2 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -155,6 +155,7 @@
 	.ndo_get_stats		= ei_get_stats,
 	.ndo_set_multicast_list = ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller 	= ei_poll,
diff --git a/drivers/net/wimax/Kconfig b/drivers/net/wimax/Kconfig
new file mode 100644
index 0000000..565018e
--- /dev/null
+++ b/drivers/net/wimax/Kconfig
@@ -0,0 +1,17 @@
+#
+# WiMAX LAN device drivers configuration
+#
+
+
+comment "Enable WiMAX (Networking options) to see the WiMAX drivers"
+	depends on WIMAX = n
+
+if WIMAX
+
+menu "WiMAX Wireless Broadband devices"
+
+source "drivers/net/wimax/i2400m/Kconfig"
+
+endmenu
+
+endif
diff --git a/drivers/net/wimax/Makefile b/drivers/net/wimax/Makefile
new file mode 100644
index 0000000..992bc02
--- /dev/null
+++ b/drivers/net/wimax/Makefile
@@ -0,0 +1,5 @@
+
+obj-$(CONFIG_WIMAX_I2400M)	+= i2400m/
+
+# (from Sam Ravnborg) force kbuild to create built-in.o
+obj- := dummy.o
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
new file mode 100644
index 0000000..d623b3d
--- /dev/null
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -0,0 +1,49 @@
+
+config WIMAX_I2400M
+	tristate
+	depends on WIMAX
+	select FW_LOADER
+
+comment "Enable USB support to see WiMAX USB drivers"
+	depends on USB = n
+
+comment "Enable MMC support to see WiMAX SDIO drivers"
+	depends on MMC = n
+
+config WIMAX_I2400M_USB
+	tristate "Intel Wireless WiMAX Connection 2400 over USB (including 5x50)"
+	depends on WIMAX && USB
+	select WIMAX_I2400M
+	help
+	  Select if you have a device based on the Intel WiMAX
+	  Connection 2400 over USB (like any of the Intel Wireless
+	  WiMAX/WiFi Link 5x50 series).
+
+	  If unsure, it is safe to select M (module).
+
+config WIMAX_I2400M_SDIO
+	tristate "Intel Wireless WiMAX Connection 2400 over SDIO"
+	depends on WIMAX && MMC
+	select WIMAX_I2400M
+	help
+	  Select if you have a device based on the Intel WiMAX
+	  Connection 2400 over SDIO.
+
+	  If unsure, it is safe to select M (module).
+
+config WIMAX_I2400M_DEBUG_LEVEL
+	int "WiMAX i2400m debug level"
+	depends on WIMAX_I2400M
+	default 8
+	help
+
+	  Select the maximum debug verbosity level to be compiled into
+	  the WiMAX i2400m driver code.
+
+	  By default, this is disabled at runtime and can be
+	  selectively enabled at runtime for different parts of the
+	  code using the sysfs debug-levels file.
+
+	  If set at zero, this will compile out all the debug code.
+
+	  It is recommended that it is left at 8.
diff --git a/drivers/net/wimax/i2400m/Makefile b/drivers/net/wimax/i2400m/Makefile
new file mode 100644
index 0000000..1696e93
--- /dev/null
+++ b/drivers/net/wimax/i2400m/Makefile
@@ -0,0 +1,29 @@
+
+obj-$(CONFIG_WIMAX_I2400M) += i2400m.o
+obj-$(CONFIG_WIMAX_I2400M_USB) += i2400m-usb.o
+obj-$(CONFIG_WIMAX_I2400M_SDIO) += i2400m-sdio.o
+
+i2400m-y :=		\
+	control.o	\
+	driver.o	\
+	fw.o		\
+	op-rfkill.o	\
+	netdev.o	\
+	tx.o		\
+	rx.o
+
+i2400m-$(CONFIG_DEBUG_FS) += debugfs.o
+
+i2400m-usb-y :=			\
+	usb-fw.o		\
+	usb-notif.o		\
+	usb-tx.o		\
+	usb-rx.o		\
+	usb.o
+
+
+i2400m-sdio-y := 		\
+	sdio.o      		\
+	sdio-tx.o   		\
+	sdio-fw.o	 	\
+	sdio-rx.o
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
new file mode 100644
index 0000000..d3d37fe
--- /dev/null
+++ b/drivers/net/wimax/i2400m/control.c
@@ -0,0 +1,1291 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Miscellaneous control functions for managing the device
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Initial implementation
+ *
+ * This is a collection of functions used to control the device (plus
+ * a few helpers).
+ *
+ * There are utilities for handling TLV buffers, hooks on the device's
+ * reports to act on device changes of state [i2400m_report_hook()],
+ * on acks to commands [i2400m_msg_ack_hook()], a helper for sending
+ * commands to the device and blocking until a reply arrives
+ * [i2400m_msg_to_dev()], a few high level commands for manipulating
+ * the device state, powersving mode and configuration plus the
+ * routines to setup the device once communication is stablished with
+ * it [i2400m_dev_initialize()].
+ *
+ * ROADMAP
+ *
+ * i2400m_dev_initalize()       Called by i2400m_dev_start()
+ *   i2400m_set_init_config()
+ *   i2400m_firmware_check()
+ *   i2400m_cmd_get_state()
+ * i2400m_dev_shutdown()        Called by i2400m_dev_stop()
+ *   i2400m->bus_reset()
+ *
+ * i2400m_{cmd,get,set}_*()
+ *   i2400m_msg_to_dev()
+ *   i2400m_msg_check_status()
+ *
+ * i2400m_report_hook()         Called on reception of an event
+ *   i2400m_report_state_hook()
+ *     i2400m_tlv_buffer_walk()
+ *     i2400m_tlv_match()
+ *     i2400m_report_tlv_system_state()
+ *     i2400m_report_tlv_rf_switches_status()
+ *     i2400m_report_tlv_media_status()
+ *   i2400m_cmd_enter_powersave()
+ *
+ * i2400m_msg_ack_hook()        Called on reception of a reply to a
+ *                              command, get or set
+ */
+
+#include <stdarg.h>
+#include "i2400m.h"
+#include <linux/kernel.h>
+#include <linux/wimax/i2400m.h>
+
+
+#define D_SUBMODULE control
+#include "debug-levels.h"
+
+
+/*
+ * Return if a TLV is of a give type and size
+ *
+ * @tlv_hdr: pointer to the TLV
+ * @tlv_type: type of the TLV we are looking for
+ * @tlv_size: expected size of the TLV we are looking for (if -1,
+ *            don't check the size). This includes the header
+ * Returns: 0 if the TLV matches
+ *          < 0 if it doesn't match at all
+ *          > 0 total TLV + payload size, if the type matches, but not
+ *              the size
+ */
+static
+ssize_t i2400m_tlv_match(const struct i2400m_tlv_hdr *tlv,
+		     enum i2400m_tlv tlv_type, ssize_t tlv_size)
+{
+	if (le16_to_cpu(tlv->type) != tlv_type)	/* Not our type? skip */
+		return -1;
+	if (tlv_size != -1
+	    && le16_to_cpu(tlv->length) + sizeof(*tlv) != tlv_size) {
+		size_t size = le16_to_cpu(tlv->length) + sizeof(*tlv);
+		printk(KERN_WARNING "W: tlv type 0x%x mismatched because of "
+		       "size (got %zu vs %zu expected)\n",
+		       tlv_type, size, tlv_size);
+		return size;
+	}
+	return 0;
+}
+
+
+/*
+ * Given a buffer of TLVs, iterate over them
+ *
+ * @i2400m: device instance
+ * @tlv_buf: pointer to the beginning of the TLV buffer
+ * @buf_size: buffer size in bytes
+ * @tlv_pos: seek position; this is assumed to be a pointer returned
+ *           by i2400m_tlv_buffer_walk() [and thus, validated]. The
+ *           TLV returned will be the one following this one.
+ *
+ * Usage:
+ *
+ * tlv_itr = NULL;
+ * while (tlv_itr = i2400m_tlv_buffer_walk(i2400m, buf, size, tlv_itr))  {
+ *         ...
+ *         // Do stuff with tlv_itr, DON'T MODIFY IT
+ *         ...
+ * }
+ */
+static
+const struct i2400m_tlv_hdr *i2400m_tlv_buffer_walk(
+	struct i2400m *i2400m,
+	const void *tlv_buf, size_t buf_size,
+	const struct i2400m_tlv_hdr *tlv_pos)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	const struct i2400m_tlv_hdr *tlv_top = tlv_buf + buf_size;
+	size_t offset, length, avail_size;
+	unsigned type;
+
+	if (tlv_pos == NULL)	/* Take the first one? */
+		tlv_pos = tlv_buf;
+	else			/* Nope, the next one */
+		tlv_pos = (void *) tlv_pos
+			+ le16_to_cpu(tlv_pos->length) + sizeof(*tlv_pos);
+	if (tlv_pos == tlv_top) {	/* buffer done */
+		tlv_pos = NULL;
+		goto error_beyond_end;
+	}
+	if (tlv_pos > tlv_top) {
+		tlv_pos = NULL;
+		WARN_ON(1);
+		goto error_beyond_end;
+	}
+	offset = (void *) tlv_pos - (void *) tlv_buf;
+	avail_size = buf_size - offset;
+	if (avail_size < sizeof(*tlv_pos)) {
+		dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], tlv @%zu: "
+			"short header\n", tlv_buf, buf_size, offset);
+		goto error_short_header;
+	}
+	type = le16_to_cpu(tlv_pos->type);
+	length = le16_to_cpu(tlv_pos->length);
+	if (avail_size < sizeof(*tlv_pos) + length) {
+		dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], "
+			"tlv type 0x%04x @%zu: "
+			"short data (%zu bytes vs %zu needed)\n",
+			tlv_buf, buf_size, type, offset, avail_size,
+			sizeof(*tlv_pos) + length);
+		goto error_short_header;
+	}
+error_short_header:
+error_beyond_end:
+	return tlv_pos;
+}
+
+
+/*
+ * Find a TLV in a buffer of sequential TLVs
+ *
+ * @i2400m: device descriptor
+ * @tlv_hdr: pointer to the first TLV in the sequence
+ * @size: size of the buffer in bytes; all TLVs are assumed to fit
+ *        fully in the buffer (otherwise we'll complain).
+ * @tlv_type: type of the TLV we are looking for
+ * @tlv_size: expected size of the TLV we are looking for (if -1,
+ *            don't check the size). This includes the header
+ *
+ * Returns: NULL if the TLV is not found, otherwise a pointer to
+ *          it. If the sizes don't match, an error is printed and NULL
+ *          returned.
+ */
+static
+const struct i2400m_tlv_hdr *i2400m_tlv_find(
+	struct i2400m *i2400m,
+	const struct i2400m_tlv_hdr *tlv_hdr, size_t size,
+	enum i2400m_tlv tlv_type, ssize_t tlv_size)
+{
+	ssize_t match;
+	struct device *dev = i2400m_dev(i2400m);
+	const struct i2400m_tlv_hdr *tlv = NULL;
+	while ((tlv = i2400m_tlv_buffer_walk(i2400m, tlv_hdr, size, tlv))) {
+		match = i2400m_tlv_match(tlv, tlv_type, tlv_size);
+		if (match == 0)		/* found it :) */
+			break;
+		if (match > 0)
+			dev_warn(dev, "TLV type 0x%04x found with size "
+				 "mismatch (%zu vs %zu needed)\n",
+				 tlv_type, match, tlv_size);
+	}
+	return tlv;
+}
+
+
+static const struct
+{
+	char *msg;
+	int errno;
+} ms_to_errno[I2400M_MS_MAX] = {
+	[I2400M_MS_DONE_OK] = { "", 0 },
+	[I2400M_MS_DONE_IN_PROGRESS] = { "", 0 },
+	[I2400M_MS_INVALID_OP] = { "invalid opcode", -ENOSYS },
+	[I2400M_MS_BAD_STATE] = { "invalid state", -EILSEQ },
+	[I2400M_MS_ILLEGAL_VALUE] = { "illegal value", -EINVAL },
+	[I2400M_MS_MISSING_PARAMS] = { "missing parameters", -ENOMSG },
+	[I2400M_MS_VERSION_ERROR] = { "bad version", -EIO },
+	[I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
+	[I2400M_MS_BUSY] = { "busy", -EBUSY },
+	[I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
+	[I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ },
+	[I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
+	[I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
+	[I2400M_MS_NO_RF] = { "no RF", -EIO },
+	[I2400M_MS_NOT_READY_FOR_POWERSAVE] =
+		{ "not ready for powersave", -EACCES },
+	[I2400M_MS_THERMAL_CRITICAL] = { "thermal critical", -EL3HLT },
+};
+
+
+/*
+ * i2400m_msg_check_status - translate a message's status code
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: message header
+ * @strbuf: buffer to place a formatted error message (unless NULL).
+ * @strbuf_size: max amount of available space; larger messages will
+ * be truncated.
+ *
+ * Returns: errno code corresponding to the status code in @l3l4_hdr
+ *          and a message in @strbuf describing the error.
+ */
+int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr,
+			    char *strbuf, size_t strbuf_size)
+{
+	int result;
+	enum i2400m_ms status = le16_to_cpu(l3l4_hdr->status);
+	const char *str;
+
+	if (status == 0)
+		return 0;
+	if (status > ARRAY_SIZE(ms_to_errno)) {
+		str = "unknown status code";
+		result = -EBADR;
+	} else {
+		str = ms_to_errno[status].msg;
+		result = ms_to_errno[status].errno;
+	}
+	if (strbuf)
+		snprintf(strbuf, strbuf_size, "%s (%d)", str, status);
+	return result;
+}
+
+
+/*
+ * Act on a TLV System State reported by the device
+ *
+ * @i2400m: device descriptor
+ * @ss: validated System State TLV
+ */
+static
+void i2400m_report_tlv_system_state(struct i2400m *i2400m,
+				    const struct i2400m_tlv_system_state *ss)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+	enum i2400m_system_state i2400m_state = le32_to_cpu(ss->state);
+
+	d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state);
+
+	if (unlikely(i2400m->ready == 0))	/* act if up */
+		goto out;
+	if (i2400m->state != i2400m_state) {
+		i2400m->state = i2400m_state;
+		wake_up_all(&i2400m->state_wq);
+	}
+	switch (i2400m_state) {
+	case I2400M_SS_UNINITIALIZED:
+	case I2400M_SS_INIT:
+	case I2400M_SS_CONFIG:
+	case I2400M_SS_PRODUCTION:
+		wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
+		break;
+
+	case I2400M_SS_RF_OFF:
+	case I2400M_SS_RF_SHUTDOWN:
+		wimax_state_change(wimax_dev, WIMAX_ST_RADIO_OFF);
+		break;
+
+	case I2400M_SS_READY:
+	case I2400M_SS_STANDBY:
+	case I2400M_SS_SLEEPACTIVE:
+		wimax_state_change(wimax_dev, WIMAX_ST_READY);
+		break;
+
+	case I2400M_SS_CONNECTING:
+	case I2400M_SS_WIMAX_CONNECTED:
+		wimax_state_change(wimax_dev, WIMAX_ST_READY);
+		break;
+
+	case I2400M_SS_SCAN:
+	case I2400M_SS_OUT_OF_ZONE:
+		wimax_state_change(wimax_dev, WIMAX_ST_SCANNING);
+		break;
+
+	case I2400M_SS_IDLE:
+		d_printf(1, dev, "entering BS-negotiated idle mode\n");
+	case I2400M_SS_DISCONNECTING:
+	case I2400M_SS_DATA_PATH_CONNECTED:
+		wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED);
+		break;
+
+	default:
+		/* Huh? just in case, shut it down */
+		dev_err(dev, "HW BUG? unknown state %u: shutting down\n",
+			i2400m_state);
+		i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+		break;
+	};
+out:
+	d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
+		i2400m, ss, i2400m_state);
+}
+
+
+/*
+ * Parse and act on a TLV Media Status sent by the device
+ *
+ * @i2400m: device descriptor
+ * @ms: validated Media Status TLV
+ *
+ * This will set the carrier up on down based on the device's link
+ * report. This is done asides of what the WiMAX stack does based on
+ * the device's state as sometimes we need to do a link-renew (the BS
+ * wants us to renew a DHCP lease, for example).
+ *
+ * In fact, doc says that everytime we get a link-up, we should do a
+ * DHCP negotiation...
+ */
+static
+void i2400m_report_tlv_media_status(struct i2400m *i2400m,
+				    const struct i2400m_tlv_media_status *ms)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+	struct net_device *net_dev = wimax_dev->net_dev;
+	enum i2400m_media_status status = le32_to_cpu(ms->media_status);
+
+	d_fnstart(3, dev, "(i2400m %p ms %p [%u])\n", i2400m, ms, status);
+
+	if (unlikely(i2400m->ready == 0))	/* act if up */
+		goto out;
+	switch (status) {
+	case I2400M_MEDIA_STATUS_LINK_UP:
+		netif_carrier_on(net_dev);
+		break;
+	case I2400M_MEDIA_STATUS_LINK_DOWN:
+		netif_carrier_off(net_dev);
+		break;
+	/*
+	 * This is the network telling us we need to retrain the DHCP
+	 * lease -- so far, we are trusting the WiMAX Network Service
+	 * in user space to pick this up and poke the DHCP client.
+	 */
+	case I2400M_MEDIA_STATUS_LINK_RENEW:
+		netif_carrier_on(net_dev);
+		break;
+	default:
+		dev_err(dev, "HW BUG? unknown media status %u\n",
+			status);
+	};
+out:
+	d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
+		i2400m, ms, status);
+}
+
+
+/*
+ * Parse a 'state report' and extract carrier on/off information
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: pointer to message; it has been already validated for
+ *            consistent size.
+ * @size: size of the message (header + payload). The header length
+ *        declaration is assumed to be congruent with @size (as in
+ *        sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
+ *
+ * Extract from the report state the system state TLV and infer from
+ * there if we have a carrier or not. Update our local state and tell
+ * netdev.
+ *
+ * When setting the carrier, it's fine to set OFF twice (for example),
+ * as netif_carrier_off() will not generate two OFF events (just on
+ * the transitions).
+ */
+static
+void i2400m_report_state_hook(struct i2400m *i2400m,
+			      const struct i2400m_l3l4_hdr *l3l4_hdr,
+			      size_t size, const char *tag)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	const struct i2400m_tlv_hdr *tlv;
+	const struct i2400m_tlv_system_state *ss;
+	const struct i2400m_tlv_rf_switches_status *rfss;
+	const struct i2400m_tlv_media_status *ms;
+	size_t tlv_size = le16_to_cpu(l3l4_hdr->length);
+
+	d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n",
+		  i2400m, l3l4_hdr, size, tag);
+	tlv = NULL;
+
+	while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl,
+					     tlv_size, tlv))) {
+		if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE,
+					  sizeof(*ss))) {
+			ss = container_of(tlv, typeof(*ss), hdr);
+			d_printf(2, dev, "%s: system state TLV "
+				 "found (0x%04x), state 0x%08x\n",
+				 tag, I2400M_TLV_SYSTEM_STATE,
+				 le32_to_cpu(ss->state));
+			i2400m_report_tlv_system_state(i2400m, ss);
+		}
+		if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS,
+					  sizeof(*rfss))) {
+			rfss = container_of(tlv, typeof(*rfss), hdr);
+			d_printf(2, dev, "%s: RF status TLV "
+				 "found (0x%04x), sw 0x%02x hw 0x%02x\n",
+				 tag, I2400M_TLV_RF_STATUS,
+				 le32_to_cpu(rfss->sw_rf_switch),
+				 le32_to_cpu(rfss->hw_rf_switch));
+			i2400m_report_tlv_rf_switches_status(i2400m, rfss);
+		}
+		if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS,
+					  sizeof(*ms))) {
+			ms = container_of(tlv, typeof(*ms), hdr);
+			d_printf(2, dev, "%s: Media Status TLV: %u\n",
+				 tag, le32_to_cpu(ms->media_status));
+			i2400m_report_tlv_media_status(i2400m, ms);
+		}
+	}
+	d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n",
+		i2400m, l3l4_hdr, size, tag);
+}
+
+
+/*
+ * i2400m_report_hook - (maybe) act on a report
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: pointer to message; it has been already validated for
+ *            consistent size.
+ * @size: size of the message (header + payload). The header length
+ *        declaration is assumed to be congruent with @size (as in
+ *        sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
+ *
+ * Extract information we might need (like carrien on/off) from a
+ * device report.
+ */
+void i2400m_report_hook(struct i2400m *i2400m,
+			const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	unsigned msg_type;
+
+	d_fnstart(3, dev, "(i2400m %p l3l4_hdr %p size %zu)\n",
+		  i2400m, l3l4_hdr, size);
+	/* Chew on the message, we might need some information from
+	 * here */
+	msg_type = le16_to_cpu(l3l4_hdr->type);
+	switch (msg_type) {
+	case I2400M_MT_REPORT_STATE:	/* carrier detection... */
+		i2400m_report_state_hook(i2400m,
+					 l3l4_hdr, size, "REPORT STATE");
+		break;
+	/* If the device is ready for power save, then ask it to do
+	 * it. */
+	case I2400M_MT_REPORT_POWERSAVE_READY:	/* zzzzz */
+		if (l3l4_hdr->status == cpu_to_le16(I2400M_MS_DONE_OK)) {
+			d_printf(1, dev, "ready for powersave, requesting\n");
+			i2400m_cmd_enter_powersave(i2400m);
+		}
+		break;
+	};
+	d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n",
+		i2400m, l3l4_hdr, size);
+}
+
+
+/*
+ * i2400m_msg_ack_hook - process cmd/set/get ack for internal status
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: pointer to message; it has been already validated for
+ *            consistent size.
+ * @size: size of the message
+ *
+ * Extract information we might need from acks to commands and act on
+ * it. This is akin to i2400m_report_hook(). Note most of this
+ * processing should be done in the function that calls the
+ * command. This is here for some cases where it can't happen...
+ */
+void i2400m_msg_ack_hook(struct i2400m *i2400m,
+			 const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	unsigned ack_type, ack_status;
+	char strerr[32];
+
+	/* Chew on the message, we might need some information from
+	 * here */
+	ack_type = le16_to_cpu(l3l4_hdr->type);
+	ack_status = le16_to_cpu(l3l4_hdr->status);
+	switch (ack_type) {
+	case I2400M_MT_CMD_ENTER_POWERSAVE:
+		/* This is just left here for the sake of example, as
+		 * the processing is done somewhere else. */
+		if (0) {
+			result = i2400m_msg_check_status(
+				l3l4_hdr, strerr, sizeof(strerr));
+			if (result >= 0)
+				d_printf(1, dev, "ready for power save: %zd\n",
+					 size);
+		}
+		break;
+	};
+	return;
+}
+
+
+/*
+ * i2400m_msg_size_check() - verify message size and header are congruent
+ *
+ * It is ok if the total message size is larger than the expected
+ * size, as there can be padding.
+ */
+int i2400m_msg_size_check(struct i2400m *i2400m,
+			  const struct i2400m_l3l4_hdr *l3l4_hdr,
+			  size_t msg_size)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	size_t expected_size;
+	d_fnstart(4, dev, "(i2400m %p l3l4_hdr %p msg_size %zu)\n",
+		  i2400m, l3l4_hdr, msg_size);
+	if (msg_size < sizeof(*l3l4_hdr)) {
+		dev_err(dev, "bad size for message header "
+			"(expected at least %zu, got %zu)\n",
+			(size_t) sizeof(*l3l4_hdr), msg_size);
+		result = -EIO;
+		goto error_hdr_size;
+	}
+	expected_size = le16_to_cpu(l3l4_hdr->length) + sizeof(*l3l4_hdr);
+	if (msg_size < expected_size) {
+		dev_err(dev, "bad size for message code 0x%04x (expected %zu, "
+			"got %zu)\n", le16_to_cpu(l3l4_hdr->type),
+			expected_size, msg_size);
+		result = -EIO;
+	} else
+		result = 0;
+error_hdr_size:
+	d_fnend(4, dev,
+		"(i2400m %p l3l4_hdr %p msg_size %zu) = %d\n",
+		i2400m, l3l4_hdr, msg_size, result);
+	return result;
+}
+
+
+
+/*
+ * Cancel a wait for a command ACK
+ *
+ * @i2400m: device descriptor
+ * @code: [negative] errno code to cancel with (don't use
+ *     -EINPROGRESS)
+ *
+ * If there is an ack already filled out, free it.
+ */
+void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code)
+{
+	struct sk_buff *ack_skb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	ack_skb = i2400m->ack_skb;
+	if (ack_skb && !IS_ERR(ack_skb))
+		kfree(ack_skb);
+	i2400m->ack_skb = ERR_PTR(code);
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+}
+
+
+/**
+ * i2400m_msg_to_dev - Send a control message to the device and get a response
+ *
+ * @i2400m: device descriptor
+ *
+ * @msg_skb: an skb  *
+ *
+ * @buf: pointer to the buffer containing the message to be sent; it
+ *           has to start with a &struct i2400M_l3l4_hdr and then
+ *           followed by the payload. Once this function returns, the
+ *           buffer can be reused.
+ *
+ * @buf_len: buffer size
+ *
+ * Returns:
+ *
+ * Pointer to skb containing the ack message. You need to check the
+ * pointer with IS_ERR(), as it might be an error code. Error codes
+ * could happen because:
+ *
+ *  - the message wasn't formatted correctly
+ *  - couldn't send the message
+ *  - failed waiting for a response
+ *  - the ack message wasn't formatted correctly
+ *
+ * The returned skb has been allocated with wimax_msg_to_user_alloc(),
+ * it contains the reponse in a netlink attribute and is ready to be
+ * passed up to user space with wimax_msg_to_user_send(). To access
+ * the payload and its length, use wimax_msg_{data,len}() on the skb.
+ *
+ * The skb has to be freed with kfree_skb() once done.
+ *
+ * Description:
+ *
+ * This function delivers a message/command to the device and waits
+ * for an ack to be received. The format is described in
+ * linux/wimax/i2400m.h. In summary, a command/get/set is followed by an
+ * ack.
+ *
+ * This function will not check the ack status, that's left up to the
+ * caller.  Once done with the ack skb, it has to be kfree_skb()ed.
+ *
+ * The i2400m handles only one message at the same time, thus we need
+ * the mutex to exclude other players.
+ *
+ * We write the message and then wait for an answer to come back. The
+ * RX path intercepts control messages and handles them in
+ * i2400m_rx_ctl(). Reports (notifications) are (maybe) processed
+ * locally and then forwarded (as needed) to user space on the WiMAX
+ * stack message pipe. Acks are saved and passed back to us through an
+ * skb in i2400m->ack_skb which is ready to be given to generic
+ * netlink if need be.
+ */
+struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
+				  const void *buf, size_t buf_len)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	const struct i2400m_l3l4_hdr *msg_l3l4_hdr;
+	struct sk_buff *ack_skb;
+	const struct i2400m_l3l4_hdr *ack_l3l4_hdr;
+	size_t ack_len;
+	int ack_timeout;
+	unsigned msg_type;
+	unsigned long flags;
+
+	d_fnstart(3, dev, "(i2400m %p buf %p len %zu)\n",
+		  i2400m, buf, buf_len);
+
+	if (i2400m->boot_mode)
+		return ERR_PTR(-ENODEV);
+
+	msg_l3l4_hdr = buf;
+	/* Check msg & payload consistency */
+	result = i2400m_msg_size_check(i2400m, msg_l3l4_hdr, buf_len);
+	if (result < 0)
+		goto error_bad_msg;
+	msg_type = le16_to_cpu(msg_l3l4_hdr->type);
+	d_printf(1, dev, "CMD/GET/SET 0x%04x %zu bytes\n",
+		 msg_type, buf_len);
+	d_dump(2, dev, buf, buf_len);
+
+	/* Setup the completion, ack_skb ("we are waiting") and send
+	 * the message to the device */
+	mutex_lock(&i2400m->msg_mutex);
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	i2400m->ack_skb = ERR_PTR(-EINPROGRESS);
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+	init_completion(&i2400m->msg_completion);
+	result = i2400m_tx(i2400m, buf, buf_len, I2400M_PT_CTRL);
+	if (result < 0) {
+		dev_err(dev, "can't send message 0x%04x: %d\n",
+			le16_to_cpu(msg_l3l4_hdr->type), result);
+		goto error_tx;
+	}
+
+	/* Some commands take longer to execute because of crypto ops,
+	 * so we give them some more leeway on timeout */
+	switch (msg_type) {
+	case I2400M_MT_GET_TLS_OPERATION_RESULT:
+	case I2400M_MT_CMD_SEND_EAP_RESPONSE:
+		ack_timeout = 5 * HZ;
+		break;
+	default:
+		ack_timeout = HZ;
+	};
+
+	/* The RX path in rx.c will put any response for this message
+	 * in i2400m->ack_skb and wake us up. If we cancel the wait,
+	 * we need to change the value of i2400m->ack_skb to something
+	 * not -EINPROGRESS so RX knows there is no one waiting. */
+	result = wait_for_completion_interruptible_timeout(
+		&i2400m->msg_completion, ack_timeout);
+	if (result == 0) {
+		dev_err(dev, "timeout waiting for reply to message 0x%04x\n",
+			msg_type);
+		result = -ETIMEDOUT;
+		i2400m_msg_to_dev_cancel_wait(i2400m, result);
+		goto error_wait_for_completion;
+	} else if (result < 0) {
+		dev_err(dev, "error waiting for reply to message 0x%04x: %d\n",
+			msg_type, result);
+		i2400m_msg_to_dev_cancel_wait(i2400m, result);
+		goto error_wait_for_completion;
+	}
+
+	/* Pull out the ack data from i2400m->ack_skb -- see if it is
+	 * an error and act accordingly */
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	ack_skb = i2400m->ack_skb;
+	if (IS_ERR(ack_skb))
+		result = PTR_ERR(ack_skb);
+	else
+		result = 0;
+	i2400m->ack_skb = NULL;
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+	if (result < 0)
+		goto error_ack_status;
+	ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len);
+
+	/* Check the ack and deliver it if it is ok */
+	result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len);
+	if (result < 0) {
+		dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n",
+			msg_type, result);
+		goto error_bad_ack_len;
+	}
+	if (msg_type != le16_to_cpu(ack_l3l4_hdr->type)) {
+		dev_err(dev, "HW BUG? bad reply 0x%04x to message 0x%04x\n",
+			le16_to_cpu(ack_l3l4_hdr->type), msg_type);
+		result = -EIO;
+		goto error_bad_ack_type;
+	}
+	i2400m_msg_ack_hook(i2400m, ack_l3l4_hdr, ack_len);
+	mutex_unlock(&i2400m->msg_mutex);
+	d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %p\n",
+		i2400m, buf, buf_len, ack_skb);
+	return ack_skb;
+
+error_bad_ack_type:
+error_bad_ack_len:
+	kfree_skb(ack_skb);
+error_ack_status:
+error_wait_for_completion:
+error_tx:
+	mutex_unlock(&i2400m->msg_mutex);
+error_bad_msg:
+	d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %d\n",
+		i2400m, buf, buf_len, result);
+	return ERR_PTR(result);
+}
+
+
+/*
+ * Definitions for the Enter Power Save command
+ *
+ * The Enter Power Save command requests the device to go into power
+ * saving mode. The device will ack or nak the command depending on it
+ * being ready for it. If it acks, we tell the USB subsystem to
+ *
+ * As well, the device might request to go into power saving mode by
+ * sending a report (REPORT_POWERSAVE_READY), in which case, we issue
+ * this command. The hookups in the RX coder allow
+ */
+enum {
+	I2400M_WAKEUP_ENABLED  = 0x01,
+	I2400M_WAKEUP_DISABLED = 0x02,
+	I2400M_TLV_TYPE_WAKEUP_MODE = 144,
+};
+
+struct i2400m_cmd_enter_power_save {
+	struct i2400m_l3l4_hdr hdr;
+	struct i2400m_tlv_hdr tlv;
+	__le32 val;
+} __attribute__((packed));
+
+
+/*
+ * Request entering power save
+ *
+ * This command is (mainly) executed when the device indicates that it
+ * is ready to go into powersave mode via a REPORT_POWERSAVE_READY.
+ */
+int i2400m_cmd_enter_powersave(struct i2400m *i2400m)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct sk_buff *ack_skb;
+	struct i2400m_cmd_enter_power_save *cmd;
+	char strerr[32];
+
+	result = -ENOMEM;
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		goto error_alloc;
+	cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_ENTER_POWERSAVE);
+	cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr));
+	cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
+	cmd->tlv.type = cpu_to_le16(I2400M_TLV_TYPE_WAKEUP_MODE);
+	cmd->tlv.length = cpu_to_le16(sizeof(cmd->val));
+	cmd->val = cpu_to_le32(I2400M_WAKEUP_ENABLED);
+
+	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+	result = PTR_ERR(ack_skb);
+	if (IS_ERR(ack_skb)) {
+		dev_err(dev, "Failed to issue 'Enter power save' command: %d\n",
+			result);
+		goto error_msg_to_dev;
+	}
+	result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+					 strerr, sizeof(strerr));
+	if (result == -EACCES)
+		d_printf(1, dev, "Cannot enter power save mode\n");
+	else if (result < 0)
+		dev_err(dev, "'Enter power save' (0x%04x) command failed: "
+			"%d - %s\n", I2400M_MT_CMD_ENTER_POWERSAVE,
+			result, strerr);
+	else
+		d_printf(1, dev, "device ready to power save\n");
+	kfree_skb(ack_skb);
+error_msg_to_dev:
+	kfree(cmd);
+error_alloc:
+	return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_cmd_enter_powersave);
+
+
+/*
+ * Definitions for getting device information
+ */
+enum {
+	I2400M_TLV_DETAILED_DEVICE_INFO = 140
+};
+
+/**
+ * i2400m_get_device_info - Query the device for detailed device information
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: an skb whose skb->data points to a 'struct
+ *    i2400m_tlv_detailed_device_info'. When done, kfree_skb() it. The
+ *    skb is *guaranteed* to contain the whole TLV data structure.
+ *
+ *    On error, IS_ERR(skb) is true and ERR_PTR(skb) is the error
+ *    code.
+ */
+struct sk_buff *i2400m_get_device_info(struct i2400m *i2400m)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct sk_buff *ack_skb;
+	struct i2400m_l3l4_hdr *cmd;
+	const struct i2400m_l3l4_hdr *ack;
+	size_t ack_len;
+	const struct i2400m_tlv_hdr *tlv;
+	const struct i2400m_tlv_detailed_device_info *ddi;
+	char strerr[32];
+
+	ack_skb = ERR_PTR(-ENOMEM);
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		goto error_alloc;
+	cmd->type = cpu_to_le16(I2400M_MT_GET_DEVICE_INFO);
+	cmd->length = 0;
+	cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+	if (IS_ERR(ack_skb)) {
+		dev_err(dev, "Failed to issue 'get device info' command: %ld\n",
+			PTR_ERR(ack_skb));
+		goto error_msg_to_dev;
+	}
+	ack = wimax_msg_data_len(ack_skb, &ack_len);
+	result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
+	if (result < 0) {
+		dev_err(dev, "'get device info' (0x%04x) command failed: "
+			"%d - %s\n", I2400M_MT_GET_DEVICE_INFO, result,
+			strerr);
+		goto error_cmd_failed;
+	}
+	tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack),
+			      I2400M_TLV_DETAILED_DEVICE_INFO, sizeof(*ddi));
+	if (tlv == NULL) {
+		dev_err(dev, "GET DEVICE INFO: "
+			"detailed device info TLV not found (0x%04x)\n",
+			I2400M_TLV_DETAILED_DEVICE_INFO);
+		result = -EIO;
+		goto error_no_tlv;
+	}
+	skb_pull(ack_skb, (void *) tlv - (void *) ack_skb->data);
+error_msg_to_dev:
+	kfree(cmd);
+error_alloc:
+	return ack_skb;
+
+error_no_tlv:
+error_cmd_failed:
+	kfree_skb(ack_skb);
+	kfree(cmd);
+	return ERR_PTR(result);
+}
+
+
+/* Firmware interface versions we support */
+enum {
+	I2400M_HDIv_MAJOR = 9,
+	I2400M_HDIv_MAJOR_2 = 8,
+	I2400M_HDIv_MINOR = 1,
+};
+
+
+/**
+ * i2400m_firmware_check - check firmware versions are compatible with
+ * the driver
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code an error and a message in the
+ *    kernel log.
+ *
+ * Long function, but quite simple; first chunk launches the command
+ * and double checks the reply for the right TLV. Then we process the
+ * TLV (where the meat is).
+ */
+int i2400m_firmware_check(struct i2400m *i2400m)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct sk_buff *ack_skb;
+	struct i2400m_l3l4_hdr *cmd;
+	const struct i2400m_l3l4_hdr *ack;
+	size_t ack_len;
+	const struct i2400m_tlv_hdr *tlv;
+	const struct i2400m_tlv_l4_message_versions *l4mv;
+	char strerr[32];
+	unsigned major, minor, branch;
+
+	result = -ENOMEM;
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		goto error_alloc;
+	cmd->type = cpu_to_le16(I2400M_MT_GET_LM_VERSION);
+	cmd->length = 0;
+	cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+	if (IS_ERR(ack_skb)) {
+		result = PTR_ERR(ack_skb);
+		dev_err(dev, "Failed to issue 'get lm version' command: %-d\n",
+			result);
+		goto error_msg_to_dev;
+	}
+	ack = wimax_msg_data_len(ack_skb, &ack_len);
+	result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
+	if (result < 0) {
+		dev_err(dev, "'get lm version' (0x%04x) command failed: "
+			"%d - %s\n", I2400M_MT_GET_LM_VERSION, result,
+			strerr);
+		goto error_cmd_failed;
+	}
+	tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack),
+			      I2400M_TLV_L4_MESSAGE_VERSIONS, sizeof(*l4mv));
+	if (tlv == NULL) {
+		dev_err(dev, "get lm version: TLV not found (0x%04x)\n",
+			I2400M_TLV_L4_MESSAGE_VERSIONS);
+		result = -EIO;
+		goto error_no_tlv;
+	}
+	l4mv = container_of(tlv, typeof(*l4mv), hdr);
+	major = le16_to_cpu(l4mv->major);
+	minor = le16_to_cpu(l4mv->minor);
+	branch = le16_to_cpu(l4mv->branch);
+	result = -EINVAL;
+	if (major != I2400M_HDIv_MAJOR
+	    && major != I2400M_HDIv_MAJOR_2) {
+		dev_err(dev, "unsupported major fw interface version "
+			"%u.%u.%u\n", major, minor, branch);
+		goto error_bad_major;
+	}
+	if (major == I2400M_HDIv_MAJOR_2)
+		dev_err(dev, "deprecated major fw interface version "
+			"%u.%u.%u\n", major, minor, branch);
+	result = 0;
+	if (minor != I2400M_HDIv_MINOR)
+		dev_warn(dev, "untested minor fw firmware version %u.%u.%u\n",
+			 major, minor, branch);
+error_bad_major:
+	dev_info(dev, "firmware interface version %u.%u.%u\n",
+		 major, minor, branch);
+error_no_tlv:
+error_cmd_failed:
+	kfree_skb(ack_skb);
+error_msg_to_dev:
+	kfree(cmd);
+error_alloc:
+	return result;
+}
+
+
+/*
+ * Send an DoExitIdle command to the device to ask it to go out of
+ * basestation-idle mode.
+ *
+ * @i2400m: device descriptor
+ *
+ * This starts a renegotiation with the basestation that might involve
+ * another crypto handshake with user space.
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ */
+int i2400m_cmd_exit_idle(struct i2400m *i2400m)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct sk_buff *ack_skb;
+	struct i2400m_l3l4_hdr *cmd;
+	char strerr[32];
+
+	result = -ENOMEM;
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		goto error_alloc;
+	cmd->type = cpu_to_le16(I2400M_MT_CMD_EXIT_IDLE);
+	cmd->length = 0;
+	cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+	result = PTR_ERR(ack_skb);
+	if (IS_ERR(ack_skb)) {
+		dev_err(dev, "Failed to issue 'exit idle' command: %d\n",
+			result);
+		goto error_msg_to_dev;
+	}
+	result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+					 strerr, sizeof(strerr));
+	kfree_skb(ack_skb);
+error_msg_to_dev:
+	kfree(cmd);
+error_alloc:
+	return result;
+
+}
+
+
+/*
+ * Query the device for its state, update the WiMAX stack's idea of it
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Executes a 'Get State' command and parses the returned
+ * TLVs.
+ *
+ * Because this is almost identical to a 'Report State', we use
+ * i2400m_report_state_hook() to parse the answer. This will set the
+ * carrier state, as well as the RF Kill switches state.
+ */
+int i2400m_cmd_get_state(struct i2400m *i2400m)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct sk_buff *ack_skb;
+	struct i2400m_l3l4_hdr *cmd;
+	const struct i2400m_l3l4_hdr *ack;
+	size_t ack_len;
+	char strerr[32];
+
+	result = -ENOMEM;
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		goto error_alloc;
+	cmd->type = cpu_to_le16(I2400M_MT_GET_STATE);
+	cmd->length = 0;
+	cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+	if (IS_ERR(ack_skb)) {
+		dev_err(dev, "Failed to issue 'get state' command: %ld\n",
+			PTR_ERR(ack_skb));
+		result = PTR_ERR(ack_skb);
+		goto error_msg_to_dev;
+	}
+	ack = wimax_msg_data_len(ack_skb, &ack_len);
+	result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
+	if (result < 0) {
+		dev_err(dev, "'get state' (0x%04x) command failed: "
+			"%d - %s\n", I2400M_MT_GET_STATE, result, strerr);
+		goto error_cmd_failed;
+	}
+	i2400m_report_state_hook(i2400m, ack, ack_len - sizeof(*ack),
+				 "GET STATE");
+	result = 0;
+	kfree_skb(ack_skb);
+error_cmd_failed:
+error_msg_to_dev:
+	kfree(cmd);
+error_alloc:
+	return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_cmd_get_state);
+
+
+/**
+ * Set basic configuration settings
+ *
+ * @i2400m: device descriptor
+ * @args: array of pointers to the TLV headers to send for
+ *     configuration (each followed by its payload).
+ *     TLV headers and payloads must be properly initialized, with the
+ *     right endianess (LE).
+ * @arg_size: number of pointers in the @args array
+ */
+int i2400m_set_init_config(struct i2400m *i2400m,
+			   const struct i2400m_tlv_hdr **arg, size_t args)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct sk_buff *ack_skb;
+	struct i2400m_l3l4_hdr *cmd;
+	char strerr[32];
+	unsigned argc, argsize, tlv_size;
+	const struct i2400m_tlv_hdr *tlv_hdr;
+	void *buf, *itr;
+
+	d_fnstart(3, dev, "(i2400m %p arg %p args %zu)\n", i2400m, arg, args);
+	result = 0;
+	if (args == 0)
+		goto none;
+	/* Compute the size of all the TLVs, so we can alloc a
+	 * contiguous command block to copy them. */
+	argsize = 0;
+	for (argc = 0; argc < args; argc++) {
+		tlv_hdr = arg[argc];
+		argsize += sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length);
+	}
+	WARN_ON(argc >= 9);	/* As per hw spec */
+
+	/* Alloc the space for the command and TLVs*/
+	result = -ENOMEM;
+	buf = kzalloc(sizeof(*cmd) + argsize, GFP_KERNEL);
+	if (buf == NULL)
+		goto error_alloc;
+	cmd = buf;
+	cmd->type = cpu_to_le16(I2400M_MT_SET_INIT_CONFIG);
+	cmd->length = cpu_to_le16(argsize);
+	cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+	/* Copy the TLVs */
+	itr = buf + sizeof(*cmd);
+	for (argc = 0; argc < args; argc++) {
+		tlv_hdr = arg[argc];
+		tlv_size = sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length);
+		memcpy(itr, tlv_hdr, tlv_size);
+		itr += tlv_size;
+	}
+
+	/* Send the message! */
+	ack_skb = i2400m_msg_to_dev(i2400m, buf, sizeof(*cmd) + argsize);
+	result = PTR_ERR(ack_skb);
+	if (IS_ERR(ack_skb)) {
+		dev_err(dev, "Failed to issue 'init config' command: %d\n",
+			result);
+
+		goto error_msg_to_dev;
+	}
+	result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+					 strerr, sizeof(strerr));
+	if (result < 0)
+		dev_err(dev, "'init config' (0x%04x) command failed: %d - %s\n",
+			I2400M_MT_SET_INIT_CONFIG, result, strerr);
+	kfree_skb(ack_skb);
+error_msg_to_dev:
+	kfree(buf);
+error_alloc:
+none:
+	d_fnend(3, dev, "(i2400m %p arg %p args %zu) = %d\n",
+		i2400m, arg, args, result);
+	return result;
+
+}
+EXPORT_SYMBOL_GPL(i2400m_set_init_config);
+
+
+/**
+ * i2400m_dev_initialize - Initialize the device once communications are ready
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Configures the device to work the way we like it.
+ *
+ * At the point of this call, the device is registered with the WiMAX
+ * and netdev stacks, firmware is uploaded and we can talk to the
+ * device normally.
+ */
+int i2400m_dev_initialize(struct i2400m *i2400m)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400m_tlv_config_idle_parameters idle_params;
+	const struct i2400m_tlv_hdr *args[9];
+	unsigned argc = 0;
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	/* Useless for now...might change */
+	if (i2400m_idle_mode_disabled) {
+		idle_params.hdr.type =
+			cpu_to_le16(I2400M_TLV_CONFIG_IDLE_PARAMETERS);
+		idle_params.hdr.length = cpu_to_le16(
+			sizeof(idle_params) - sizeof(idle_params.hdr));
+		idle_params.idle_timeout = 0;
+		idle_params.idle_paging_interval = 0;
+		args[argc++] = &idle_params.hdr;
+	}
+	result = i2400m_set_init_config(i2400m, args, argc);
+	if (result < 0)
+		goto error;
+	result = i2400m_firmware_check(i2400m);	/* fw versions ok? */
+	if (result < 0)
+		goto error;
+	/*
+	 * Update state: Here it just calls a get state; parsing the
+	 * result (System State TLV and RF Status TLV [done in the rx
+	 * path hooks]) will set the hardware and software RF-Kill
+	 * status.
+	 */
+	result = i2400m_cmd_get_state(i2400m);
+error:
+	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+	return result;
+}
+
+
+/**
+ * i2400m_dev_shutdown - Shutdown a running device
+ *
+ * @i2400m: device descriptor
+ *
+ * Gracefully stops the device, moving it to the lowest power
+ * consumption state possible.
+ */
+void i2400m_dev_shutdown(struct i2400m *i2400m)
+{
+	int result = -ENODEV;
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	result = i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+	d_fnend(3, dev, "(i2400m %p) = void [%d]\n", i2400m, result);
+	return;
+}
diff --git a/drivers/net/wimax/i2400m/debug-levels.h b/drivers/net/wimax/i2400m/debug-levels.h
new file mode 100644
index 0000000..3183baa
--- /dev/null
+++ b/drivers/net/wimax/i2400m/debug-levels.h
@@ -0,0 +1,45 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Debug levels control file for the i2400m module
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef __debug_levels__h__
+#define __debug_levels__h__
+
+/* Maximum compile and run time debug level for all submodules */
+#define D_MODULENAME i2400m
+#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
+
+#include <linux/wimax/debug.h>
+
+/* List of all the enabled modules */
+enum d_module {
+	D_SUBMODULE_DECLARE(control),
+	D_SUBMODULE_DECLARE(driver),
+	D_SUBMODULE_DECLARE(debugfs),
+	D_SUBMODULE_DECLARE(fw),
+	D_SUBMODULE_DECLARE(netdev),
+	D_SUBMODULE_DECLARE(rfkill),
+	D_SUBMODULE_DECLARE(rx),
+	D_SUBMODULE_DECLARE(tx),
+};
+
+
+#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
new file mode 100644
index 0000000..6266329
--- /dev/null
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -0,0 +1,392 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Debugfs interfaces to manipulate driver and device information
+ *
+ *
+ * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include "i2400m.h"
+
+
+#define D_SUBMODULE debugfs
+#include "debug-levels.h"
+
+static
+int debugfs_netdev_queue_stopped_get(void *data, u64 *val)
+{
+	struct i2400m *i2400m = data;
+	*val = netif_queue_stopped(i2400m->wimax_dev.net_dev);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_netdev_queue_stopped,
+			debugfs_netdev_queue_stopped_get,
+			NULL, "%llu\n");
+
+
+static
+struct dentry *debugfs_create_netdev_queue_stopped(
+	const char *name, struct dentry *parent, struct i2400m *i2400m)
+{
+	return debugfs_create_file(name, 0400, parent, i2400m,
+				   &fops_netdev_queue_stopped);
+}
+
+
+/*
+ * inode->i_private has the @data argument to debugfs_create_file()
+ */
+static
+int i2400m_stats_open(struct inode *inode, struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+/*
+ * We don't allow partial reads of this file, as then the reader would
+ * get weirdly confused data as it is updated.
+ *
+ * So or you read it all or nothing; if you try to read with an offset
+ * != 0, we consider you are done reading.
+ */
+static
+ssize_t i2400m_rx_stats_read(struct file *filp, char __user *buffer,
+			     size_t count, loff_t *ppos)
+{
+	struct i2400m *i2400m = filp->private_data;
+	char buf[128];
+	unsigned long flags;
+
+	if (*ppos != 0)
+		return 0;
+	if (count < sizeof(buf))
+		return -ENOSPC;
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	snprintf(buf, sizeof(buf), "%u %u %u %u %u %u %u\n",
+		 i2400m->rx_pl_num, i2400m->rx_pl_min,
+		 i2400m->rx_pl_max, i2400m->rx_num,
+		 i2400m->rx_size_acc,
+		 i2400m->rx_size_min, i2400m->rx_size_max);
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+}
+
+
+/* Any write clears the stats */
+static
+ssize_t i2400m_rx_stats_write(struct file *filp, const char __user *buffer,
+			      size_t count, loff_t *ppos)
+{
+	struct i2400m *i2400m = filp->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	i2400m->rx_pl_num = 0;
+	i2400m->rx_pl_max = 0;
+	i2400m->rx_pl_min = UINT_MAX;
+	i2400m->rx_num = 0;
+	i2400m->rx_size_acc = 0;
+	i2400m->rx_size_min = UINT_MAX;
+	i2400m->rx_size_max = 0;
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+	return count;
+}
+
+static
+const struct file_operations i2400m_rx_stats_fops = {
+	.owner =	THIS_MODULE,
+	.open =		i2400m_stats_open,
+	.read =		i2400m_rx_stats_read,
+	.write =	i2400m_rx_stats_write,
+};
+
+
+/* See i2400m_rx_stats_read() */
+static
+ssize_t i2400m_tx_stats_read(struct file *filp, char __user *buffer,
+			     size_t count, loff_t *ppos)
+{
+	struct i2400m *i2400m = filp->private_data;
+	char buf[128];
+	unsigned long flags;
+
+	if (*ppos != 0)
+		return 0;
+	if (count < sizeof(buf))
+		return -ENOSPC;
+	spin_lock_irqsave(&i2400m->tx_lock, flags);
+	snprintf(buf, sizeof(buf), "%u %u %u %u %u %u %u\n",
+		 i2400m->tx_pl_num, i2400m->tx_pl_min,
+		 i2400m->tx_pl_max, i2400m->tx_num,
+		 i2400m->tx_size_acc,
+		 i2400m->tx_size_min, i2400m->tx_size_max);
+	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+}
+
+/* Any write clears the stats */
+static
+ssize_t i2400m_tx_stats_write(struct file *filp, const char __user *buffer,
+			      size_t count, loff_t *ppos)
+{
+	struct i2400m *i2400m = filp->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&i2400m->tx_lock, flags);
+	i2400m->tx_pl_num = 0;
+	i2400m->tx_pl_max = 0;
+	i2400m->tx_pl_min = UINT_MAX;
+	i2400m->tx_num = 0;
+	i2400m->tx_size_acc = 0;
+	i2400m->tx_size_min = UINT_MAX;
+	i2400m->tx_size_max = 0;
+	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+	return count;
+}
+
+static
+const struct file_operations i2400m_tx_stats_fops = {
+	.owner =	THIS_MODULE,
+	.open =		i2400m_stats_open,
+	.read =		i2400m_tx_stats_read,
+	.write =	i2400m_tx_stats_write,
+};
+
+
+/* Write 1 to ask the device to go into suspend */
+static
+int debugfs_i2400m_suspend_set(void *data, u64 val)
+{
+	int result;
+	struct i2400m *i2400m = data;
+	result = i2400m_cmd_enter_powersave(i2400m);
+	if (result >= 0)
+		result = 0;
+	return result;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_suspend,
+			NULL, debugfs_i2400m_suspend_set,
+			"%llu\n");
+
+static
+struct dentry *debugfs_create_i2400m_suspend(
+	const char *name, struct dentry *parent, struct i2400m *i2400m)
+{
+	return debugfs_create_file(name, 0200, parent, i2400m,
+				   &fops_i2400m_suspend);
+}
+
+
+/*
+ * Reset the device
+ *
+ * Write 0 to ask the device to soft reset, 1 to cold reset, 2 to bus
+ * reset (as defined by enum i2400m_reset_type).
+ */
+static
+int debugfs_i2400m_reset_set(void *data, u64 val)
+{
+	int result;
+	struct i2400m *i2400m = data;
+	enum i2400m_reset_type rt = val;
+	switch(rt) {
+	case I2400M_RT_WARM:
+	case I2400M_RT_COLD:
+	case I2400M_RT_BUS:
+		result = i2400m->bus_reset(i2400m, rt);
+		if (result >= 0)
+			result = 0;
+	default:
+		result = -EINVAL;
+	}
+	return result;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_reset,
+			NULL, debugfs_i2400m_reset_set,
+			"%llu\n");
+
+static
+struct dentry *debugfs_create_i2400m_reset(
+	const char *name, struct dentry *parent, struct i2400m *i2400m)
+{
+	return debugfs_create_file(name, 0200, parent, i2400m,
+				   &fops_i2400m_reset);
+}
+
+/*
+ * Debug levels control; see debug.h
+ */
+struct d_level D_LEVEL[] = {
+	D_SUBMODULE_DEFINE(control),
+	D_SUBMODULE_DEFINE(driver),
+	D_SUBMODULE_DEFINE(debugfs),
+	D_SUBMODULE_DEFINE(fw),
+	D_SUBMODULE_DEFINE(netdev),
+	D_SUBMODULE_DEFINE(rfkill),
+	D_SUBMODULE_DEFINE(rx),
+	D_SUBMODULE_DEFINE(tx),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+#define __debugfs_register(prefix, name, parent)			\
+do {									\
+	result = d_level_register_debugfs(prefix, name, parent);	\
+	if (result < 0)							\
+		goto error;						\
+} while (0)
+
+
+int i2400m_debugfs_add(struct i2400m *i2400m)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct dentry *dentry = i2400m->wimax_dev.debugfs_dentry;
+	struct dentry *fd;
+
+	dentry = debugfs_create_dir("i2400m", dentry);
+	result = PTR_ERR(dentry);
+	if (IS_ERR(dentry)) {
+		if (result == -ENODEV)
+			result = 0;	/* No debugfs support */
+		goto error;
+	}
+	i2400m->debugfs_dentry = dentry;
+	__debugfs_register("dl_", control, dentry);
+	__debugfs_register("dl_", driver, dentry);
+	__debugfs_register("dl_", debugfs, dentry);
+	__debugfs_register("dl_", fw, dentry);
+	__debugfs_register("dl_", netdev, dentry);
+	__debugfs_register("dl_", rfkill, dentry);
+	__debugfs_register("dl_", rx, dentry);
+	__debugfs_register("dl_", tx, dentry);
+
+	fd = debugfs_create_size_t("tx_in", 0400, dentry,
+				   &i2400m->tx_in);
+	result = PTR_ERR(fd);
+	if (IS_ERR(fd) && result != -ENODEV) {
+		dev_err(dev, "Can't create debugfs entry "
+			"tx_in: %d\n", result);
+		goto error;
+	}
+
+	fd = debugfs_create_size_t("tx_out", 0400, dentry,
+				   &i2400m->tx_out);
+	result = PTR_ERR(fd);
+	if (IS_ERR(fd) && result != -ENODEV) {
+		dev_err(dev, "Can't create debugfs entry "
+			"tx_out: %d\n", result);
+		goto error;
+	}
+
+	fd = debugfs_create_u32("state", 0600, dentry,
+				&i2400m->state);
+	result = PTR_ERR(fd);
+	if (IS_ERR(fd) && result != -ENODEV) {
+		dev_err(dev, "Can't create debugfs entry "
+			"state: %d\n", result);
+		goto error;
+	}
+
+	/*
+	 * Trace received messages from user space
+	 *
+	 * In order to tap the bidirectional message stream in the
+	 * 'msg' pipe, user space can read from the 'msg' pipe;
+	 * however, due to limitations in libnl, we can't know what
+	 * the different applications are sending down to the kernel.
+	 *
+	 * So we have this hack where the driver will echo any message
+	 * received on the msg pipe from user space [through a call to
+	 * wimax_dev->op_msg_from_user() into
+	 * i2400m_op_msg_from_user()] into the 'trace' pipe that this
+	 * driver creates.
+	 *
+	 * So then, reading from both the 'trace' and 'msg' pipes in
+	 * user space will provide a full dump of the traffic.
+	 *
+	 * Write 1 to activate, 0 to clear.
+	 *
+	 * It is not really very atomic, but it is also not too
+	 * critical.
+	 */
+	fd = debugfs_create_u8("trace_msg_from_user", 0600, dentry,
+			       &i2400m->trace_msg_from_user);
+	result = PTR_ERR(fd);
+	if (IS_ERR(fd) && result != -ENODEV) {
+		dev_err(dev, "Can't create debugfs entry "
+			"trace_msg_from_user: %d\n", result);
+		goto error;
+	}
+
+	fd = debugfs_create_netdev_queue_stopped("netdev_queue_stopped",
+						 dentry, i2400m);
+	result = PTR_ERR(fd);
+	if (IS_ERR(fd) && result != -ENODEV) {
+		dev_err(dev, "Can't create debugfs entry "
+			"netdev_queue_stopped: %d\n", result);
+		goto error;
+	}
+
+	fd = debugfs_create_file("rx_stats", 0600, dentry, i2400m,
+				 &i2400m_rx_stats_fops);
+	result = PTR_ERR(fd);
+	if (IS_ERR(fd) && result != -ENODEV) {
+		dev_err(dev, "Can't create debugfs entry "
+			"rx_stats: %d\n", result);
+		goto error;
+	}
+
+	fd = debugfs_create_file("tx_stats", 0600, dentry, i2400m,
+				 &i2400m_tx_stats_fops);
+	result = PTR_ERR(fd);
+	if (IS_ERR(fd) && result != -ENODEV) {
+		dev_err(dev, "Can't create debugfs entry "
+			"tx_stats: %d\n", result);
+		goto error;
+	}
+
+	fd = debugfs_create_i2400m_suspend("suspend", dentry, i2400m);
+	result = PTR_ERR(fd);
+	if (IS_ERR(fd) && result != -ENODEV) {
+		dev_err(dev, "Can't create debugfs entry suspend: %d\n",
+			result);
+		goto error;
+	}
+
+	fd = debugfs_create_i2400m_reset("reset", dentry, i2400m);
+	result = PTR_ERR(fd);
+	if (IS_ERR(fd) && result != -ENODEV) {
+		dev_err(dev, "Can't create debugfs entry reset: %d\n", result);
+		goto error;
+	}
+
+	result = 0;
+error:
+	return result;
+}
+
+void i2400m_debugfs_rm(struct i2400m *i2400m)
+{
+	debugfs_remove_recursive(i2400m->debugfs_dentry);
+}
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
new file mode 100644
index 0000000..5f98047
--- /dev/null
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -0,0 +1,728 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Generic probe/disconnect, reset and message passing
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * See i2400m.h for driver documentation. This contains helpers for
+ * the driver model glue [_setup()/_release()], handling device resets
+ * [_dev_reset_handle()], and the backends for the WiMAX stack ops
+ * reset [_op_reset()] and message from user [_op_msg_from_user()].
+ *
+ * ROADMAP:
+ *
+ * i2400m_op_msg_from_user()
+ *   i2400m_msg_to_dev()
+ *   wimax_msg_to_user_send()
+ *
+ * i2400m_op_reset()
+ *   i240m->bus_reset()
+ *
+ * i2400m_dev_reset_handle()
+ *   __i2400m_dev_reset_handle()
+ *     __i2400m_dev_stop()
+ *     __i2400m_dev_start()
+ *
+ * i2400m_setup()
+ *   i2400m_bootrom_init()
+ *   register_netdev()
+ *   i2400m_dev_start()
+ *     __i2400m_dev_start()
+ *       i2400m_dev_bootstrap()
+ *       i2400m_tx_setup()
+ *       i2400m->bus_dev_start()
+ *       i2400m_check_mac_addr()
+ *   wimax_dev_add()
+ *
+ * i2400m_release()
+ *   wimax_dev_rm()
+ *   i2400m_dev_stop()
+ *     __i2400m_dev_stop()
+ *       i2400m_dev_shutdown()
+ *       i2400m->bus_dev_stop()
+ *       i2400m_tx_release()
+ *   unregister_netdev()
+ */
+#include "i2400m.h"
+#include <linux/wimax/i2400m.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#define D_SUBMODULE driver
+#include "debug-levels.h"
+
+
+int i2400m_idle_mode_disabled;	/* 0 (idle mode enabled) by default */
+module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
+MODULE_PARM_DESC(idle_mode_disabled,
+		 "If true, the device will not enable idle mode negotiation "
+		 "with the base station (when connected) to save power.");
+
+/**
+ * i2400m_queue_work - schedule work on a i2400m's queue
+ *
+ * @i2400m: device descriptor
+ *
+ * @fn: function to run to execute work. It gets passed a 'struct
+ *     work_struct' that is wrapped in a 'struct i2400m_work'. Once
+ *     done, you have to (1) i2400m_put(i2400m_work->i2400m) and then
+ *     (2) kfree(i2400m_work).
+ *
+ * @gfp_flags: GFP flags for memory allocation.
+ *
+ * @pl: pointer to a payload buffer that you want to pass to the _work
+ *     function. Use this to pack (for example) a struct with extra
+ *     arguments.
+ *
+ * @pl_size: size of the payload buffer.
+ *
+ * We do this quite often, so this just saves typing; allocate a
+ * wrapper for a i2400m, get a ref to it, pack arguments and launch
+ * the work.
+ *
+ * A usual workflow is:
+ *
+ * struct my_work_args {
+ *         void *something;
+ *         int whatever;
+ * };
+ * ...
+ *
+ * struct my_work_args my_args = {
+ *         .something = FOO,
+ *         .whaetever = BLAH
+ * };
+ * i2400m_queue_work(i2400m, 1, my_work_function, GFP_KERNEL,
+ *                   &args, sizeof(args))
+ *
+ * And now the work function can unpack the arguments and call the
+ * real function (or do the job itself):
+ *
+ * static
+ * void my_work_fn((struct work_struct *ws)
+ * {
+ *         struct i2400m_work *iw =
+ *	           container_of(ws, struct i2400m_work, ws);
+ *	   struct my_work_args *my_args = (void *) iw->pl;
+ *
+ *	   my_work(iw->i2400m, my_args->something, my_args->whatevert);
+ * }
+ */
+int i2400m_queue_work(struct i2400m *i2400m,
+		      void (*fn)(struct work_struct *), gfp_t gfp_flags,
+		      const void *pl, size_t pl_size)
+{
+	int result;
+	struct i2400m_work *iw;
+
+	BUG_ON(i2400m->work_queue == NULL);
+	result = -ENOMEM;
+	iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
+	if (iw == NULL)
+		goto error_kzalloc;
+	iw->i2400m = i2400m_get(i2400m);
+	memcpy(iw->pl, pl, pl_size);
+	INIT_WORK(&iw->ws, fn);
+	result = queue_work(i2400m->work_queue, &iw->ws);
+error_kzalloc:
+	return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_queue_work);
+
+
+/*
+ * Schedule i2400m's specific work on the system's queue.
+ *
+ * Used for a few cases where we really need it; otherwise, identical
+ * to i2400m_queue_work().
+ *
+ * Returns < 0 errno code on error, 1 if ok.
+ *
+ * If it returns zero, something really bad happened, as it means the
+ * works struct was already queued, but we have just allocated it, so
+ * it should not happen.
+ */
+int i2400m_schedule_work(struct i2400m *i2400m,
+			 void (*fn)(struct work_struct *), gfp_t gfp_flags)
+{
+	int result;
+	struct i2400m_work *iw;
+
+	BUG_ON(i2400m->work_queue == NULL);
+	result = -ENOMEM;
+	iw = kzalloc(sizeof(*iw), gfp_flags);
+	if (iw == NULL)
+		goto error_kzalloc;
+	iw->i2400m = i2400m_get(i2400m);
+	INIT_WORK(&iw->ws, fn);
+	result = schedule_work(&iw->ws);
+	if (result == 0)
+		result = -ENXIO;
+error_kzalloc:
+	return result;
+}
+
+
+/*
+ * WiMAX stack operation: relay a message from user space
+ *
+ * @wimax_dev: device descriptor
+ * @pipe_name: named pipe the message is for
+ * @msg_buf: pointer to the message bytes
+ * @msg_len: length of the buffer
+ * @genl_info: passed by the generic netlink layer
+ *
+ * The WiMAX stack will call this function when a message was received
+ * from user space.
+ *
+ * For the i2400m, this is an L3L4 message, as specified in
+ * include/linux/wimax/i2400m.h, and thus prefixed with a 'struct
+ * i2400m_l3l4_hdr'. Driver (and device) expect the messages to be
+ * coded in Little Endian.
+ *
+ * This function just verifies that the header declaration and the
+ * payload are consistent and then deals with it, either forwarding it
+ * to the device or procesing it locally.
+ *
+ * In the i2400m, messages are basically commands that will carry an
+ * ack, so we use i2400m_msg_to_dev() and then deliver the ack back to
+ * user space. The rx.c code might intercept the response and use it
+ * to update the driver's state, but then it will pass it on so it can
+ * be relayed back to user space.
+ *
+ * Note that asynchronous events from the device are processed and
+ * sent to user space in rx.c.
+ */
+static
+int i2400m_op_msg_from_user(struct wimax_dev *wimax_dev,
+			    const char *pipe_name,
+			    const void *msg_buf, size_t msg_len,
+			    const struct genl_info *genl_info)
+{
+	int result;
+	struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev);
+	struct device *dev = i2400m_dev(i2400m);
+	struct sk_buff *ack_skb;
+
+	d_fnstart(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p "
+		  "msg_len %zu genl_info %p)\n", wimax_dev, i2400m,
+		  msg_buf, msg_len, genl_info);
+	ack_skb = i2400m_msg_to_dev(i2400m, msg_buf, msg_len);
+	result = PTR_ERR(ack_skb);
+	if (IS_ERR(ack_skb))
+		goto error_msg_to_dev;
+	if (unlikely(i2400m->trace_msg_from_user))
+		wimax_msg(&i2400m->wimax_dev, "trace",
+			  msg_buf, msg_len, GFP_KERNEL);
+	result = wimax_msg_send(&i2400m->wimax_dev, ack_skb);
+error_msg_to_dev:
+	d_fnend(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p msg_len %zu "
+		"genl_info %p) = %d\n", wimax_dev, i2400m, msg_buf, msg_len,
+		genl_info, result);
+	return result;
+}
+
+
+/*
+ * Context to wait for a reset to finalize
+ */
+struct i2400m_reset_ctx {
+	struct completion completion;
+	int result;
+};
+
+
+/*
+ * WiMAX stack operation: reset a device
+ *
+ * @wimax_dev: device descriptor
+ *
+ * See the documentation for wimax_reset() and wimax_dev->op_reset for
+ * the requirements of this function. The WiMAX stack guarantees
+ * serialization on calls to this function.
+ *
+ * Do a warm reset on the device; if it fails, resort to a cold reset
+ * and return -ENODEV. On successful warm reset, we need to block
+ * until it is complete.
+ *
+ * The bus-driver implementation of reset takes care of falling back
+ * to cold reset if warm fails.
+ */
+static
+int i2400m_op_reset(struct wimax_dev *wimax_dev)
+{
+	int result;
+	struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev);
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400m_reset_ctx ctx = {
+		.completion = COMPLETION_INITIALIZER_ONSTACK(ctx.completion),
+		.result = 0,
+	};
+
+	d_fnstart(4, dev, "(wimax_dev %p)\n", wimax_dev);
+	mutex_lock(&i2400m->init_mutex);
+	i2400m->reset_ctx = &ctx;
+	mutex_unlock(&i2400m->init_mutex);
+	result = i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+	if (result < 0)
+		goto out;
+	result = wait_for_completion_timeout(&ctx.completion, 4*HZ);
+	if (result == 0)
+		result = -ETIMEDOUT;
+	else if (result > 0)
+		result = ctx.result;
+	/* if result < 0, pass it on */
+	mutex_lock(&i2400m->init_mutex);
+	i2400m->reset_ctx = NULL;
+	mutex_unlock(&i2400m->init_mutex);
+out:
+	d_fnend(4, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
+	return result;
+}
+
+
+/*
+ * Check the MAC address we got from boot mode is ok
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ */
+static
+int i2400m_check_mac_addr(struct i2400m *i2400m)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct sk_buff *skb;
+	const struct i2400m_tlv_detailed_device_info *ddi;
+	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+	const unsigned char zeromac[ETH_ALEN] = { 0 };
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	skb = i2400m_get_device_info(i2400m);
+	if (IS_ERR(skb)) {
+		result = PTR_ERR(skb);
+		dev_err(dev, "Cannot verify MAC address, error reading: %d\n",
+			result);
+		goto error;
+	}
+	/* Extract MAC addresss */
+	ddi = (void *) skb->data;
+	BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
+	d_printf(2, dev, "GET DEVICE INFO: mac addr "
+		 "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		 ddi->mac_address[0], ddi->mac_address[1],
+		 ddi->mac_address[2], ddi->mac_address[3],
+		 ddi->mac_address[4], ddi->mac_address[5]);
+	if (!memcmp(net_dev->perm_addr, ddi->mac_address,
+		   sizeof(ddi->mac_address)))
+		goto ok;
+	dev_warn(dev, "warning: device reports a different MAC address "
+		 "to that of boot mode's\n");
+	dev_warn(dev, "device reports     %02x:%02x:%02x:%02x:%02x:%02x\n",
+		 ddi->mac_address[0], ddi->mac_address[1],
+		 ddi->mac_address[2], ddi->mac_address[3],
+		 ddi->mac_address[4], ddi->mac_address[5]);
+	dev_warn(dev, "boot mode reported %02x:%02x:%02x:%02x:%02x:%02x\n",
+		 net_dev->perm_addr[0], net_dev->perm_addr[1],
+		 net_dev->perm_addr[2], net_dev->perm_addr[3],
+		 net_dev->perm_addr[4], net_dev->perm_addr[5]);
+	if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
+		dev_err(dev, "device reports an invalid MAC address, "
+			"not updating\n");
+	else {
+		dev_warn(dev, "updating MAC address\n");
+		net_dev->addr_len = ETH_ALEN;
+		memcpy(net_dev->perm_addr, ddi->mac_address, ETH_ALEN);
+		memcpy(net_dev->dev_addr, ddi->mac_address, ETH_ALEN);
+	}
+ok:
+	result = 0;
+	kfree_skb(skb);
+error:
+	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+	return result;
+}
+
+
+/**
+ * __i2400m_dev_start - Bring up driver communication with the device
+ *
+ * @i2400m: device descriptor
+ * @flags: boot mode flags
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Uploads firmware and brings up all the resources needed to be able
+ * to communicate with the device.
+ *
+ * TX needs to be setup before the bus-specific code (otherwise on
+ * shutdown, the bus-tx code could try to access it).
+ */
+static
+int __i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri flags)
+{
+	int result;
+	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+	struct net_device *net_dev = wimax_dev->net_dev;
+	struct device *dev = i2400m_dev(i2400m);
+	int times = 3;
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+retry:
+	result = i2400m_dev_bootstrap(i2400m, flags);
+	if (result < 0) {
+		dev_err(dev, "cannot bootstrap device: %d\n", result);
+		goto error_bootstrap;
+	}
+	result = i2400m_tx_setup(i2400m);
+	if (result < 0)
+		goto error_tx_setup;
+	result = i2400m->bus_dev_start(i2400m);
+	if (result < 0)
+		goto error_bus_dev_start;
+	i2400m->work_queue = create_singlethread_workqueue(wimax_dev->name);
+	if (i2400m->work_queue == NULL) {
+		result = -ENOMEM;
+		dev_err(dev, "cannot create workqueue\n");
+		goto error_create_workqueue;
+	}
+	/* At this point is ok to send commands to the device */
+	result = i2400m_check_mac_addr(i2400m);
+	if (result < 0)
+		goto error_check_mac_addr;
+	i2400m->ready = 1;
+	wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
+	result = i2400m_dev_initialize(i2400m);
+	if (result < 0)
+		goto error_dev_initialize;
+	/* At this point, reports will come for the device and set it
+	 * to the right state if it is different than UNINITIALIZED */
+	d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
+		net_dev, i2400m, result);
+	return result;
+
+error_dev_initialize:
+error_check_mac_addr:
+	destroy_workqueue(i2400m->work_queue);
+error_create_workqueue:
+	i2400m->bus_dev_stop(i2400m);
+error_bus_dev_start:
+	i2400m_tx_release(i2400m);
+error_tx_setup:
+error_bootstrap:
+	if (result == -ERESTARTSYS && times-- > 0) {
+		flags = I2400M_BRI_SOFT;
+		goto retry;
+	}
+	d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
+		net_dev, i2400m, result);
+	return result;
+}
+
+
+static
+int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
+{
+	int result;
+	mutex_lock(&i2400m->init_mutex);	/* Well, start the device */
+	result = __i2400m_dev_start(i2400m, bm_flags);
+	if (result >= 0)
+		i2400m->updown = 1;
+	mutex_unlock(&i2400m->init_mutex);
+	return result;
+}
+
+
+/**
+ * i2400m_dev_stop - Tear down driver communication with the device
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Releases all the resources allocated to communicate with the device.
+ */
+static
+void __i2400m_dev_stop(struct i2400m *i2400m)
+{
+	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
+	i2400m_dev_shutdown(i2400m);
+	i2400m->ready = 0;
+	destroy_workqueue(i2400m->work_queue);
+	i2400m->bus_dev_stop(i2400m);
+	i2400m_tx_release(i2400m);
+	wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
+	d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
+}
+
+
+/*
+ * Watch out -- we only need to stop if there is a need for it. The
+ * device could have reset itself and failed to come up again (see
+ * _i2400m_dev_reset_handle()).
+ */
+static
+void i2400m_dev_stop(struct i2400m *i2400m)
+{
+	mutex_lock(&i2400m->init_mutex);
+	if (i2400m->updown) {
+		__i2400m_dev_stop(i2400m);
+		i2400m->updown = 0;
+	}
+	mutex_unlock(&i2400m->init_mutex);
+}
+
+
+/*
+ * The device has rebooted; fix up the device and the driver
+ *
+ * Tear down the driver communication with the device, reload the
+ * firmware and reinitialize the communication with the device.
+ *
+ * If someone calls a reset when the device's firmware is down, in
+ * theory we won't see it because we are not listening. However, just
+ * in case, leave the code to handle it.
+ *
+ * If there is a reset context, use it; this means someone is waiting
+ * for us to tell him when the reset operation is complete and the
+ * device is ready to rock again.
+ *
+ * NOTE: if we are in the process of bringing up or down the
+ *       communication with the device [running i2400m_dev_start() or
+ *       _stop()], don't do anything, let it fail and handle it.
+ *
+ * This function is ran always in a thread context
+ */
+static
+void __i2400m_dev_reset_handle(struct work_struct *ws)
+{
+	int result;
+	struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
+	struct i2400m *i2400m = iw->i2400m;
+	struct device *dev = i2400m_dev(i2400m);
+	enum wimax_st wimax_state;
+	struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
+
+	d_fnstart(3, dev, "(ws %p i2400m %p)\n", ws, i2400m);
+	result = 0;
+	if (mutex_trylock(&i2400m->init_mutex) == 0) {
+		/* We are still in i2400m_dev_start() [let it fail] or
+		 * i2400m_dev_stop() [we are shutting down anyway, so
+		 * ignore it] or we are resetting somewhere else. */
+		dev_err(dev, "device rebooted\n");
+		i2400m_msg_to_dev_cancel_wait(i2400m, -ERESTARTSYS);
+		complete(&i2400m->msg_completion);
+		goto out;
+	}
+	wimax_state = wimax_state_get(&i2400m->wimax_dev);
+	if (wimax_state < WIMAX_ST_UNINITIALIZED) {
+		dev_info(dev, "device rebooted: it is down, ignoring\n");
+		goto out_unlock;	/* ifconfig up/down wasn't called */
+	}
+	dev_err(dev, "device rebooted: reinitializing driver\n");
+	__i2400m_dev_stop(i2400m);
+	i2400m->updown = 0;
+	result = __i2400m_dev_start(i2400m,
+				    I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
+	if (result < 0) {
+		dev_err(dev, "device reboot: cannot start the device: %d\n",
+			result);
+		result = i2400m->bus_reset(i2400m, I2400M_RT_BUS);
+		if (result >= 0)
+			result = -ENODEV;
+	} else
+		i2400m->updown = 1;
+out_unlock:
+	if (i2400m->reset_ctx) {
+		ctx->result = result;
+		complete(&ctx->completion);
+	}
+	mutex_unlock(&i2400m->init_mutex);
+out:
+	i2400m_put(i2400m);
+	kfree(iw);
+	d_fnend(3, dev, "(ws %p i2400m %p) = void\n", ws, i2400m);
+	return;
+}
+
+
+/**
+ * i2400m_dev_reset_handle - Handle a device's reset in a thread context
+ *
+ * Schedule a device reset handling out on a thread context, so it
+ * is safe to call from atomic context. We can't use the i2400m's
+ * queue as we are going to destroy it and reinitialize it as part of
+ * the driver bringup/bringup process.
+ *
+ * See __i2400m_dev_reset_handle() for details; that takes care of
+ * reinitializing the driver to handle the reset, calling into the
+ * bus-specific functions ops as needed.
+ */
+int i2400m_dev_reset_handle(struct i2400m *i2400m)
+{
+	return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
+				    GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
+
+
+/**
+ * i2400m_setup - bus-generic setup function for the i2400m device
+ *
+ * @i2400m: device descriptor (bus-specific parts have been initialized)
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Initializes the bus-generic parts of the i2400m driver; the
+ * bus-specific parts have been initialized, function pointers filled
+ * out by the bus-specific probe function.
+ *
+ * As well, this registers the WiMAX and net device nodes. Once this
+ * function returns, the device is operative and has to be ready to
+ * receive and send network traffic and WiMAX control operations.
+ */
+int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
+{
+	int result = -ENODEV;
+	struct device *dev = i2400m_dev(i2400m);
+	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+
+	snprintf(wimax_dev->name, sizeof(wimax_dev->name),
+		 "i2400m-%s:%s", dev->bus->name, dev->bus_id);
+
+	i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL);
+	if (i2400m->bm_cmd_buf == NULL) {
+		dev_err(dev, "cannot allocate USB command buffer\n");
+		goto error_bm_cmd_kzalloc;
+	}
+	i2400m->bm_ack_buf = kzalloc(I2400M_BM_ACK_BUF_SIZE, GFP_KERNEL);
+	if (i2400m->bm_ack_buf == NULL) {
+		dev_err(dev, "cannot allocate USB ack buffer\n");
+		goto error_bm_ack_buf_kzalloc;
+	}
+	result = i2400m_bootrom_init(i2400m, bm_flags);
+	if (result < 0) {
+		dev_err(dev, "read mac addr: bootrom init "
+			"failed: %d\n", result);
+		goto error_bootrom_init;
+	}
+	result = i2400m_read_mac_addr(i2400m);
+	if (result < 0)
+		goto error_read_mac_addr;
+
+	result = register_netdev(net_dev);	/* Okey dokey, bring it up */
+	if (result < 0) {
+		dev_err(dev, "cannot register i2400m network device: %d\n",
+			result);
+		goto error_register_netdev;
+	}
+	netif_carrier_off(net_dev);
+
+	result = i2400m_dev_start(i2400m, bm_flags);
+	if (result < 0)
+		goto error_dev_start;
+
+	i2400m->wimax_dev.op_msg_from_user = i2400m_op_msg_from_user;
+	i2400m->wimax_dev.op_rfkill_sw_toggle = i2400m_op_rfkill_sw_toggle;
+	i2400m->wimax_dev.op_reset = i2400m_op_reset;
+	result = wimax_dev_add(&i2400m->wimax_dev, net_dev);
+	if (result < 0)
+		goto error_wimax_dev_add;
+	/* User space needs to do some init stuff */
+	wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
+
+	/* Now setup all that requires a registered net and wimax device. */
+	result = i2400m_debugfs_add(i2400m);
+	if (result < 0) {
+		dev_err(dev, "cannot setup i2400m's debugfs: %d\n", result);
+		goto error_debugfs_setup;
+	}
+	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+	return result;
+
+error_debugfs_setup:
+	wimax_dev_rm(&i2400m->wimax_dev);
+error_wimax_dev_add:
+	i2400m_dev_stop(i2400m);
+error_dev_start:
+	unregister_netdev(net_dev);
+error_register_netdev:
+error_read_mac_addr:
+error_bootrom_init:
+	kfree(i2400m->bm_ack_buf);
+error_bm_ack_buf_kzalloc:
+	kfree(i2400m->bm_cmd_buf);
+error_bm_cmd_kzalloc:
+	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+	return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_setup);
+
+
+/**
+ * i2400m_release - release the bus-generic driver resources
+ *
+ * Sends a disconnect message and undoes any setup done by i2400m_setup()
+ */
+void i2400m_release(struct i2400m *i2400m)
+{
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	netif_stop_queue(i2400m->wimax_dev.net_dev);
+
+	i2400m_debugfs_rm(i2400m);
+	wimax_dev_rm(&i2400m->wimax_dev);
+	i2400m_dev_stop(i2400m);
+	unregister_netdev(i2400m->wimax_dev.net_dev);
+	kfree(i2400m->bm_ack_buf);
+	kfree(i2400m->bm_cmd_buf);
+	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+EXPORT_SYMBOL_GPL(i2400m_release);
+
+
+static
+int __init i2400m_driver_init(void)
+{
+	return 0;
+}
+module_init(i2400m_driver_init);
+
+static
+void __exit i2400m_driver_exit(void)
+{
+	/* for scheds i2400m_dev_reset_handle() */
+	flush_scheduled_work();
+	return;
+}
+module_exit(i2400m_driver_exit);
+
+MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
+MODULE_DESCRIPTION("Intel 2400M WiMAX networking bus-generic driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
new file mode 100644
index 0000000..1d8271f
--- /dev/null
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -0,0 +1,1095 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Firmware uploader
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Initial implementation
+ *
+ *
+ * THE PROCEDURE
+ *
+ * (this is decribed for USB, but for SDIO is similar)
+ *
+ * The 2400m works in two modes: boot-mode or normal mode. In boot
+ * mode we can execute only a handful of commands targeted at
+ * uploading the firmware and launching it.
+ *
+ * The 2400m enters boot mode when it is first connected to the
+ * system, when it crashes and when you ask it to reboot. There are
+ * two submodes of the boot mode: signed and non-signed. Signed takes
+ * firmwares signed with a certain private key, non-signed takes any
+ * firmware. Normal hardware takes only signed firmware.
+ *
+ * Upon entrance to boot mode, the device sends a few zero length
+ * packets (ZLPs) on the notification endpoint, then a reboot barker
+ * (4 le32 words with value I2400M_{S,N}BOOT_BARKER). We ack it by
+ * sending the same barker on the bulk out endpoint. The device acks
+ * with a reboot ack barker (4 le32 words with value 0xfeedbabe) and
+ * then the device is fully rebooted. At this point we can upload the
+ * firmware.
+ *
+ * This process is accomplished by the i2400m_bootrom_init()
+ * function. All the device interaction happens through the
+ * i2400m_bm_cmd() [boot mode command]. Special return values will
+ * indicate if the device resets.
+ *
+ * After this, we read the MAC address and then (if needed)
+ * reinitialize the device. We need to read it ahead of time because
+ * in the future, we might not upload the firmware until userspace
+ * 'ifconfig up's the device.
+ *
+ * We can then upload the firmware file. The file is composed of a BCF
+ * header (basic data, keys and signatures) and a list of write
+ * commands and payloads. We first upload the header
+ * [i2400m_dnload_init()] and then pass the commands and payloads
+ * verbatim to the i2400m_bm_cmd() function
+ * [i2400m_dnload_bcf()]. Then we tell the device to jump to the new
+ * firmware [i2400m_dnload_finalize()].
+ *
+ * Once firmware is uploaded, we are good to go :)
+ *
+ * When we don't know in which mode we are, we first try by sending a
+ * warm reset request that will take us to boot-mode. If we time out
+ * waiting for a reboot barker, that means maybe we are already in
+ * boot mode, so we send a reboot barker.
+ *
+ * COMMAND EXECUTION
+ *
+ * This code (and process) is single threaded; for executing commands,
+ * we post a URB to the notification endpoint, post the command, wait
+ * for data on the notification buffer. We don't need to worry about
+ * others as we know we are the only ones in there.
+ *
+ * BACKEND IMPLEMENTATION
+ *
+ * This code is bus-generic; the bus-specific driver provides back end
+ * implementations to send a boot mode command to the device and to
+ * read an acknolwedgement from it (or an asynchronous notification)
+ * from it.
+ *
+ * ROADMAP
+ *
+ * i2400m_dev_bootstrap               Called by __i2400m_dev_start()
+ *   request_firmware
+ *   i2400m_fw_check
+ *   i2400m_fw_dnload
+ *   release_firmware
+ *
+ * i2400m_fw_dnload
+ *   i2400m_bootrom_init
+ *     i2400m_bm_cmd
+ *     i2400m->bus_reset
+ *   i2400m_dnload_init
+ *     i2400m_dnload_init_signed
+ *     i2400m_dnload_init_nonsigned
+ *       i2400m_download_chunk
+ *         i2400m_bm_cmd
+ *   i2400m_dnload_bcf
+ *     i2400m_bm_cmd
+ *   i2400m_dnload_finalize
+ *     i2400m_bm_cmd
+ *
+ * i2400m_bm_cmd
+ *   i2400m->bus_bm_cmd_send()
+ *   i2400m->bus_bm_wait_for_ack
+ *   __i2400m_bm_ack_verify
+ *
+ * i2400m_bm_cmd_prepare              Used by bus-drivers to prep
+ *                                    commands before sending
+ */
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/usb.h>
+#include "i2400m.h"
+
+
+#define D_SUBMODULE fw
+#include "debug-levels.h"
+
+
+static const __le32 i2400m_ACK_BARKER[4] = {
+	__constant_cpu_to_le32(I2400M_ACK_BARKER),
+	__constant_cpu_to_le32(I2400M_ACK_BARKER),
+	__constant_cpu_to_le32(I2400M_ACK_BARKER),
+	__constant_cpu_to_le32(I2400M_ACK_BARKER)
+};
+
+
+/**
+ * Prepare a boot-mode command for delivery
+ *
+ * @cmd: pointer to bootrom header to prepare
+ *
+ * Computes checksum if so needed. After calling this function, DO NOT
+ * modify the command or header as the checksum won't work anymore.
+ *
+ * We do it from here because some times we cannot do it in the
+ * original context the command was sent (it is a const), so when we
+ * copy it to our staging buffer, we add the checksum there.
+ */
+void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *cmd)
+{
+	if (i2400m_brh_get_use_checksum(cmd)) {
+		int i;
+		u32 checksum = 0;
+		const u32 *checksum_ptr = (void *) cmd->payload;
+		for (i = 0; i < cmd->data_size / 4; i++)
+			checksum += cpu_to_le32(*checksum_ptr++);
+		checksum += cmd->command + cmd->target_addr + cmd->data_size;
+		cmd->block_checksum = cpu_to_le32(checksum);
+	}
+}
+EXPORT_SYMBOL_GPL(i2400m_bm_cmd_prepare);
+
+
+/*
+ * Verify the ack data received
+ *
+ * Given a reply to a boot mode command, chew it and verify everything
+ * is ok.
+ *
+ * @opcode: opcode which generated this ack. For error messages.
+ * @ack: pointer to ack data we received
+ * @ack_size: size of that data buffer
+ * @flags: I2400M_BM_CMD_* flags we called the command with.
+ *
+ * Way too long function -- maybe it should be further split
+ */
+static
+ssize_t __i2400m_bm_ack_verify(struct i2400m *i2400m, int opcode,
+			       struct i2400m_bootrom_header *ack,
+			       size_t ack_size, int flags)
+{
+	ssize_t result = -ENOMEM;
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(8, dev, "(i2400m %p opcode %d ack %p size %zu)\n",
+		  i2400m, opcode, ack, ack_size);
+	if (ack_size < sizeof(*ack)) {
+		result = -EIO;
+		dev_err(dev, "boot-mode cmd %d: HW BUG? notification didn't "
+			"return enough data (%zu bytes vs %zu expected)\n",
+			opcode, ack_size, sizeof(*ack));
+		goto error_ack_short;
+	}
+	if (ack_size == sizeof(i2400m_NBOOT_BARKER)
+		 && memcmp(ack, i2400m_NBOOT_BARKER, sizeof(*ack)) == 0) {
+		result = -ERESTARTSYS;
+		i2400m->sboot = 0;
+		d_printf(6, dev, "boot-mode cmd %d: "
+			 "HW non-signed boot barker\n", opcode);
+		goto error_reboot;
+	}
+	if (ack_size == sizeof(i2400m_SBOOT_BARKER)
+		 && memcmp(ack, i2400m_SBOOT_BARKER, sizeof(*ack)) == 0) {
+		result = -ERESTARTSYS;
+		i2400m->sboot = 1;
+		d_printf(6, dev, "boot-mode cmd %d: HW signed reboot barker\n",
+			 opcode);
+		goto error_reboot;
+	}
+	if (ack_size == sizeof(i2400m_ACK_BARKER)
+		 && memcmp(ack, i2400m_ACK_BARKER, sizeof(*ack)) == 0) {
+		result = -EISCONN;
+		d_printf(3, dev, "boot-mode cmd %d: HW reboot ack barker\n",
+			 opcode);
+		goto error_reboot_ack;
+	}
+	result = 0;
+	if (flags & I2400M_BM_CMD_RAW)
+		goto out_raw;
+	ack->data_size = le32_to_cpu(ack->data_size);
+	ack->target_addr = le32_to_cpu(ack->target_addr);
+	ack->block_checksum = le32_to_cpu(ack->block_checksum);
+	d_printf(5, dev, "boot-mode cmd %d: notification for opcode %u "
+		 "response %u csum %u rr %u da %u\n",
+		 opcode, i2400m_brh_get_opcode(ack),
+		 i2400m_brh_get_response(ack),
+		 i2400m_brh_get_use_checksum(ack),
+		 i2400m_brh_get_response_required(ack),
+		 i2400m_brh_get_direct_access(ack));
+	result = -EIO;
+	if (i2400m_brh_get_signature(ack) != 0xcbbc) {
+		dev_err(dev, "boot-mode cmd %d: HW BUG? wrong signature "
+			"0x%04x\n", opcode, i2400m_brh_get_signature(ack));
+		goto error_ack_signature;
+	}
+	if (opcode != -1 && opcode != i2400m_brh_get_opcode(ack)) {
+		dev_err(dev, "boot-mode cmd %d: HW BUG? "
+			"received response for opcode %u, expected %u\n",
+			opcode, i2400m_brh_get_opcode(ack), opcode);
+		goto error_ack_opcode;
+	}
+	if (i2400m_brh_get_response(ack) != 0) {	/* failed? */
+		dev_err(dev, "boot-mode cmd %d: error; hw response %u\n",
+			opcode, i2400m_brh_get_response(ack));
+		goto error_ack_failed;
+	}
+	if (ack_size < ack->data_size + sizeof(*ack)) {
+		dev_err(dev, "boot-mode cmd %d: SW BUG "
+			"driver provided only %zu bytes for %zu bytes "
+			"of data\n", opcode, ack_size,
+			(size_t) le32_to_cpu(ack->data_size) + sizeof(*ack));
+		goto error_ack_short_buffer;
+	}
+	result = ack_size;
+	/* Don't you love this stack of empty targets? Well, I don't
+	 * either, but it helps track exactly who comes in here and
+	 * why :) */
+error_ack_short_buffer:
+error_ack_failed:
+error_ack_opcode:
+error_ack_signature:
+out_raw:
+error_reboot_ack:
+error_reboot:
+error_ack_short:
+	d_fnend(8, dev, "(i2400m %p opcode %d ack %p size %zu) = %d\n",
+		i2400m, opcode, ack, ack_size, (int) result);
+	return result;
+}
+
+
+/**
+ * i2400m_bm_cmd - Execute a boot mode command
+ *
+ * @cmd: buffer containing the command data (pointing at the header).
+ *     This data can be ANYWHERE (for USB, we will copy it to an
+ *     specific buffer). Make sure everything is in proper little
+ *     endian.
+ *
+ *     A raw buffer can be also sent, just cast it and set flags to
+ *     I2400M_BM_CMD_RAW.
+ *
+ *     This function will generate a checksum for you if the
+ *     checksum bit in the command is set (unless I2400M_BM_CMD_RAW
+ *     is set).
+ *
+ *     You can use the i2400m->bm_cmd_buf to stage your commands and
+ *     send them.
+ *
+ *     If NULL, no command is sent (we just wait for an ack).
+ *
+ * @cmd_size: size of the command. Will be auto padded to the
+ *     bus-specific drivers padding requirements.
+ *
+ * @ack: buffer where to place the acknowledgement. If it is a regular
+ *     command response, all fields will be returned with the right,
+ *     native endianess.
+ *
+ *     You *cannot* use i2400m->bm_ack_buf for this buffer.
+ *
+ * @ack_size: size of @ack, 16 aligned; you need to provide at least
+ *     sizeof(*ack) bytes and then enough to contain the return data
+ *     from the command
+ *
+ * @flags: see I2400M_BM_CMD_* above.
+ *
+ * @returns: bytes received by the notification; if < 0, an errno code
+ *     denoting an error or:
+ *
+ *     -ERESTARTSYS  The device has rebooted
+ *
+ * Executes a boot-mode command and waits for a response, doing basic
+ * validation on it; if a zero length response is received, it retries
+ * waiting for a response until a non-zero one is received (timing out
+ * after %I2400M_BOOT_RETRIES retries).
+ */
+static
+ssize_t i2400m_bm_cmd(struct i2400m *i2400m,
+		      const struct i2400m_bootrom_header *cmd, size_t cmd_size,
+		      struct i2400m_bootrom_header *ack, size_t ack_size,
+		      int flags)
+{
+	ssize_t result = -ENOMEM, rx_bytes;
+	struct device *dev = i2400m_dev(i2400m);
+	int opcode = cmd == NULL ? -1 : i2400m_brh_get_opcode(cmd);
+
+	d_fnstart(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu)\n",
+		  i2400m, cmd, cmd_size, ack, ack_size);
+	BUG_ON(ack_size < sizeof(*ack));
+	BUG_ON(i2400m->boot_mode == 0);
+
+	if (cmd != NULL) {		/* send the command */
+		memcpy(i2400m->bm_cmd_buf, cmd, cmd_size);
+		result = i2400m->bus_bm_cmd_send(i2400m, cmd, cmd_size, flags);
+		if (result < 0)
+			goto error_cmd_send;
+		if ((flags & I2400M_BM_CMD_RAW) == 0)
+			d_printf(5, dev,
+				 "boot-mode cmd %d csum %u rr %u da %u: "
+				 "addr 0x%04x size %u block csum 0x%04x\n",
+				 opcode, i2400m_brh_get_use_checksum(cmd),
+				 i2400m_brh_get_response_required(cmd),
+				 i2400m_brh_get_direct_access(cmd),
+				 cmd->target_addr, cmd->data_size,
+				 cmd->block_checksum);
+	}
+	result = i2400m->bus_bm_wait_for_ack(i2400m, ack, ack_size);
+	if (result < 0) {
+		dev_err(dev, "boot-mode cmd %d: error waiting for an ack: %d\n",
+			opcode, (int) result);	/* bah, %zd doesn't work */
+		goto error_wait_for_ack;
+	}
+	rx_bytes = result;
+	/* verify the ack and read more if neccessary [result is the
+	 * final amount of bytes we get in the ack]  */
+	result = __i2400m_bm_ack_verify(i2400m, opcode, ack, ack_size, flags);
+	if (result < 0)
+		goto error_bad_ack;
+	/* Don't you love this stack of empty targets? Well, I don't
+	 * either, but it helps track exactly who comes in here and
+	 * why :) */
+	result = rx_bytes;
+error_bad_ack:
+error_wait_for_ack:
+error_cmd_send:
+	d_fnend(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu) = %d\n",
+		i2400m, cmd, cmd_size, ack, ack_size, (int) result);
+	return result;
+}
+
+
+/**
+ * i2400m_download_chunk - write a single chunk of data to the device's memory
+ *
+ * @i2400m: device descriptor
+ * @buf: the buffer to write
+ * @buf_len: length of the buffer to write
+ * @addr: address in the device memory space
+ * @direct: bootrom write mode
+ * @do_csum: should a checksum validation be performed
+ */
+static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
+				 size_t __chunk_len, unsigned long addr,
+				 unsigned int direct, unsigned int do_csum)
+{
+	int ret;
+	size_t chunk_len = ALIGN(__chunk_len, I2400M_PL_PAD);
+	struct device *dev = i2400m_dev(i2400m);
+	struct {
+		struct i2400m_bootrom_header cmd;
+		u8 cmd_payload[chunk_len];
+	} __attribute__((packed)) *buf;
+	struct i2400m_bootrom_header ack;
+
+	d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
+		  "direct %u do_csum %u)\n", i2400m, chunk, __chunk_len,
+		  addr, direct, do_csum);
+	buf = i2400m->bm_cmd_buf;
+	memcpy(buf->cmd_payload, chunk, __chunk_len);
+	memset(buf->cmd_payload + __chunk_len, 0xad, chunk_len - __chunk_len);
+
+	buf->cmd.command = i2400m_brh_command(I2400M_BRH_WRITE,
+					      __chunk_len & 0x3 ? 0 : do_csum,
+					      __chunk_len & 0xf ? 0 : direct);
+	buf->cmd.target_addr = cpu_to_le32(addr);
+	buf->cmd.data_size = cpu_to_le32(__chunk_len);
+	ret = i2400m_bm_cmd(i2400m, &buf->cmd, sizeof(buf->cmd) + chunk_len,
+			    &ack, sizeof(ack), 0);
+	if (ret >= 0)
+		ret = 0;
+	d_fnend(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
+		"direct %u do_csum %u) = %d\n", i2400m, chunk, __chunk_len,
+		addr, direct, do_csum, ret);
+	return ret;
+}
+
+
+/*
+ * Download a BCF file's sections to the device
+ *
+ * @i2400m: device descriptor
+ * @bcf: pointer to firmware data (followed by the payloads). Assumed
+ *       verified and consistent.
+ * @bcf_len: length (in bytes) of the @bcf buffer.
+ *
+ * Returns: < 0 errno code on error or the offset to the jump instruction.
+ *
+ * Given a BCF file, downloads each section (a command and a payload)
+ * to the device's address space. Actually, it just executes each
+ * command i the BCF file.
+ *
+ * The section size has to be aligned to 4 bytes AND the padding has
+ * to be taken from the firmware file, as the signature takes it into
+ * account.
+ */
+static
+ssize_t i2400m_dnload_bcf(struct i2400m *i2400m,
+			  const struct i2400m_bcf_hdr *bcf, size_t bcf_len)
+{
+	ssize_t ret;
+	struct device *dev = i2400m_dev(i2400m);
+	size_t offset,		/* iterator offset */
+		data_size,	/* Size of the data payload */
+		section_size,	/* Size of the whole section (cmd + payload) */
+		section = 1;
+	const struct i2400m_bootrom_header *bh;
+	struct i2400m_bootrom_header ack;
+
+	d_fnstart(3, dev, "(i2400m %p bcf %p bcf_len %zu)\n",
+		  i2400m, bcf, bcf_len);
+	/* Iterate over the command blocks in the BCF file that start
+	 * after the header */
+	offset = le32_to_cpu(bcf->header_len) * sizeof(u32);
+	while (1) {	/* start sending the file */
+		bh = (void *) bcf + offset;
+		data_size = le32_to_cpu(bh->data_size);
+		section_size = ALIGN(sizeof(*bh) + data_size, 4);
+		d_printf(7, dev,
+			 "downloading section #%zu (@%zu %zu B) to 0x%08x\n",
+			 section, offset, sizeof(*bh) + data_size,
+			 le32_to_cpu(bh->target_addr));
+		if (i2400m_brh_get_opcode(bh) == I2400M_BRH_SIGNED_JUMP) {
+			/* Secure boot needs to stop here */
+			d_printf(5, dev,  "signed jump found @%zu\n", offset);
+			break;
+		}
+		if (offset + section_size == bcf_len)
+			/* Non-secure boot stops here */
+			break;
+		if (offset + section_size > bcf_len) {
+			dev_err(dev, "fw %s: bad section #%zu, "
+				"end (@%zu) beyond EOF (@%zu)\n",
+				i2400m->bus_fw_name, section,
+				offset + section_size,  bcf_len);
+			ret = -EINVAL;
+			goto error_section_beyond_eof;
+		}
+		__i2400m_msleep(20);
+		ret = i2400m_bm_cmd(i2400m, bh, section_size,
+				    &ack, sizeof(ack), I2400M_BM_CMD_RAW);
+		if (ret < 0) {
+			dev_err(dev, "fw %s: section #%zu (@%zu %zu B) "
+				"failed %d\n", i2400m->bus_fw_name, section,
+				offset, sizeof(*bh) + data_size, (int) ret);
+			goto error_send;
+		}
+		offset += section_size;
+		section++;
+	}
+	ret = offset;
+error_section_beyond_eof:
+error_send:
+	d_fnend(3, dev, "(i2400m %p bcf %p bcf_len %zu) = %d\n",
+		i2400m, bcf, bcf_len, (int) ret);
+	return ret;
+}
+
+
+/*
+ * Do the final steps of uploading firmware
+ *
+ * Depending on the boot mode (signed vs non-signed), different
+ * actions need to be taken.
+ */
+static
+int i2400m_dnload_finalize(struct i2400m *i2400m,
+			   const struct i2400m_bcf_hdr *bcf, size_t offset)
+{
+	int ret = 0;
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400m_bootrom_header *cmd, ack;
+	struct {
+		struct i2400m_bootrom_header cmd;
+		u8 cmd_pl[0];
+	} __attribute__((packed)) *cmd_buf;
+	size_t signature_block_offset, signature_block_size;
+
+	d_fnstart(3, dev, "offset %zu\n", offset);
+	cmd = (void *) bcf + offset;
+	if (i2400m->sboot == 0) {
+		struct i2400m_bootrom_header jump_ack;
+		d_printf(3, dev, "unsecure boot, jumping to 0x%08x\n",
+			le32_to_cpu(cmd->target_addr));
+		i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP);
+		cmd->data_size = 0;
+		ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
+				    &jump_ack, sizeof(jump_ack), 0);
+	} else {
+		d_printf(3, dev, "secure boot, jumping to 0x%08x\n",
+			 le32_to_cpu(cmd->target_addr));
+		cmd_buf = i2400m->bm_cmd_buf;
+		memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
+		signature_block_offset =
+			sizeof(*bcf)
+			+ le32_to_cpu(bcf->key_size) * sizeof(u32)
+			+ le32_to_cpu(bcf->exponent_size) * sizeof(u32);
+		signature_block_size =
+			le32_to_cpu(bcf->modulus_size) * sizeof(u32);
+		memcpy(cmd_buf->cmd_pl, (void *) bcf + signature_block_offset,
+		       signature_block_size);
+		ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd,
+				    sizeof(cmd_buf->cmd) + signature_block_size,
+				    &ack, sizeof(ack), I2400M_BM_CMD_RAW);
+	}
+	d_fnend(3, dev, "returning %d\n", ret);
+	return ret;
+}
+
+
+/**
+ * i2400m_bootrom_init - Reboots a powered device into boot mode
+ *
+ * @i2400m: device descriptor
+ * @flags:
+ *      I2400M_BRI_SOFT: a reboot notification has been seen
+ *          already, so don't wait for it.
+ *
+ *      I2400M_BRI_NO_REBOOT: Don't send a reboot command, but wait
+ *          for a reboot barker notification. This is a one shot; if
+ *          the state machine needs to send a reboot command it will.
+ *
+ * Returns:
+ *
+ *     < 0 errno code on error, 0 if ok.
+ *
+ *     i2400m->sboot set to 0 for unsecure boot process, 1 for secure
+ *     boot process.
+ *
+ * Description:
+ *
+ * Tries hard enough to put the device in boot-mode. There are two
+ * main phases to this:
+ *
+ * a. (1) send a reboot command and (2) get a reboot barker
+ * b. (1) ack the reboot sending a reboot barker and (2) getting an
+ *        ack barker in return
+ *
+ * We want to skip (a) in some cases [soft]. The state machine is
+ * horrible, but it is basically: on each phase, send what has to be
+ * sent (if any), wait for the answer and act on the answer. We might
+ * have to backtrack and retry, so we keep a max tries counter for
+ * that.
+ *
+ * If we get a timeout after sending a warm reset, we do it again.
+ */
+int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400m_bootrom_header *cmd;
+	struct i2400m_bootrom_header ack;
+	int count = I2400M_BOOT_RETRIES;
+	int ack_timeout_cnt = 1;
+
+	BUILD_BUG_ON(sizeof(*cmd) != sizeof(i2400m_NBOOT_BARKER));
+	BUILD_BUG_ON(sizeof(ack) != sizeof(i2400m_ACK_BARKER));
+
+	d_fnstart(4, dev, "(i2400m %p flags 0x%08x)\n", i2400m, flags);
+	result = -ENOMEM;
+	cmd = i2400m->bm_cmd_buf;
+	if (flags & I2400M_BRI_SOFT)
+		goto do_reboot_ack;
+do_reboot:
+	if (--count < 0)
+		goto error_timeout;
+	d_printf(4, dev, "device reboot: reboot command [%d # left]\n",
+		 count);
+	if ((flags & I2400M_BRI_NO_REBOOT) == 0)
+		i2400m->bus_reset(i2400m, I2400M_RT_WARM);
+	result = i2400m_bm_cmd(i2400m, NULL, 0, &ack, sizeof(ack),
+			       I2400M_BM_CMD_RAW);
+	flags &= ~I2400M_BRI_NO_REBOOT;
+	switch (result) {
+	case -ERESTARTSYS:
+		d_printf(4, dev, "device reboot: got reboot barker\n");
+		break;
+	case -EISCONN:	/* we don't know how it got here...but we follow it */
+		d_printf(4, dev, "device reboot: got ack barker - whatever\n");
+		goto do_reboot;
+	case -ETIMEDOUT:	/* device has timed out, we might be in boot
+				 * mode already and expecting an ack, let's try
+				 * that */
+		dev_info(dev, "warm reset timed out, trying an ack\n");
+		goto do_reboot_ack;
+	case -EPROTO:
+	case -ESHUTDOWN:	/* dev is gone */
+	case -EINTR:		/* user cancelled */
+		goto error_dev_gone;
+	default:
+		dev_err(dev, "device reboot: error %d while waiting "
+			"for reboot barker - rebooting\n", result);
+		goto do_reboot;
+	}
+	/* At this point we ack back with 4 REBOOT barkers and expect
+	 * 4 ACK barkers. This is ugly, as we send a raw command --
+	 * hence the cast. _bm_cmd() will catch the reboot ack
+	 * notification and report it as -EISCONN. */
+do_reboot_ack:
+	d_printf(4, dev, "device reboot ack: sending ack [%d # left]\n", count);
+	if (i2400m->sboot == 0)
+		memcpy(cmd, i2400m_NBOOT_BARKER,
+		       sizeof(i2400m_NBOOT_BARKER));
+	else
+		memcpy(cmd, i2400m_SBOOT_BARKER,
+		       sizeof(i2400m_SBOOT_BARKER));
+	result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
+			       &ack, sizeof(ack), I2400M_BM_CMD_RAW);
+	switch (result) {
+	case -ERESTARTSYS:
+		d_printf(4, dev, "reboot ack: got reboot barker - retrying\n");
+		if (--count < 0)
+			goto error_timeout;
+		goto do_reboot_ack;
+	case -EISCONN:
+		d_printf(4, dev, "reboot ack: got ack barker - good\n");
+		break;
+	case -ETIMEDOUT:	/* no response, maybe it is the other type? */
+		if (ack_timeout_cnt-- >= 0) {
+			d_printf(4, dev, "reboot ack timedout: "
+				 "trying the other type?\n");
+			i2400m->sboot = !i2400m->sboot;
+			goto do_reboot_ack;
+		} else {
+			dev_err(dev, "reboot ack timedout too long: "
+				"trying reboot\n");
+			goto do_reboot;
+		}
+		break;
+	case -EPROTO:
+	case -ESHUTDOWN:	/* dev is gone */
+		goto error_dev_gone;
+	default:
+		dev_err(dev, "device reboot ack: error %d while waiting for "
+			"reboot ack barker - rebooting\n", result);
+		goto do_reboot;
+	}
+	d_printf(2, dev, "device reboot ack: got ack barker - boot done\n");
+	result = 0;
+exit_timeout:
+error_dev_gone:
+	d_fnend(4, dev, "(i2400m %p flags 0x%08x) = %d\n",
+		i2400m, flags, result);
+	return result;
+
+error_timeout:
+	dev_err(dev, "Timed out waiting for reboot ack, resetting\n");
+	i2400m->bus_reset(i2400m, I2400M_RT_BUS);
+	result = -ETIMEDOUT;
+	goto exit_timeout;
+}
+
+
+/*
+ * Read the MAC addr
+ *
+ * The position this function reads is fixed in device memory and
+ * always available, even without firmware.
+ *
+ * Note we specify we want to read only six bytes, but provide space
+ * for 16, as we always get it rounded up.
+ */
+int i2400m_read_mac_addr(struct i2400m *i2400m)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+	struct i2400m_bootrom_header *cmd;
+	struct {
+		struct i2400m_bootrom_header ack;
+		u8 ack_pl[16];
+	} __attribute__((packed)) ack_buf;
+
+	d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
+	cmd = i2400m->bm_cmd_buf;
+	cmd->command = i2400m_brh_command(I2400M_BRH_READ, 0, 1);
+	cmd->target_addr = cpu_to_le32(0x00203fe8);
+	cmd->data_size = cpu_to_le32(6);
+	result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
+			       &ack_buf.ack, sizeof(ack_buf), 0);
+	if (result < 0) {
+		dev_err(dev, "BM: read mac addr failed: %d\n", result);
+		goto error_read_mac;
+	}
+	d_printf(2, dev,
+		 "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
+		 ack_buf.ack_pl[0], ack_buf.ack_pl[1],
+		 ack_buf.ack_pl[2], ack_buf.ack_pl[3],
+		 ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+	if (i2400m->bus_bm_mac_addr_impaired == 1) {
+		ack_buf.ack_pl[0] = 0x00;
+		ack_buf.ack_pl[1] = 0x16;
+		ack_buf.ack_pl[2] = 0xd3;
+		get_random_bytes(&ack_buf.ack_pl[3], 3);
+		dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
+			"mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
+			ack_buf.ack_pl[0], ack_buf.ack_pl[1],
+			ack_buf.ack_pl[2], ack_buf.ack_pl[3],
+			ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+		result = 0;
+	}
+	net_dev->addr_len = ETH_ALEN;
+	memcpy(net_dev->perm_addr, ack_buf.ack_pl, ETH_ALEN);
+	memcpy(net_dev->dev_addr, ack_buf.ack_pl, ETH_ALEN);
+error_read_mac:
+	d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, result);
+	return result;
+}
+
+
+/*
+ * Initialize a non signed boot
+ *
+ * This implies sending some magic values to the device's memory. Note
+ * we convert the values to little endian in the same array
+ * declaration.
+ */
+static
+int i2400m_dnload_init_nonsigned(struct i2400m *i2400m)
+{
+#define POKE(a, d) {					\
+	.address = __constant_cpu_to_le32(a),		\
+	.data = __constant_cpu_to_le32(d)		\
+}
+	static const struct {
+		__le32 address;
+		__le32 data;
+	} i2400m_pokes[] = {
+		POKE(0x081A58, 0xA7810230),
+		POKE(0x080040, 0x00000000),
+		POKE(0x080048, 0x00000082),
+		POKE(0x08004C, 0x0000081F),
+		POKE(0x080054, 0x00000085),
+		POKE(0x080058, 0x00000180),
+		POKE(0x08005C, 0x00000018),
+		POKE(0x080060, 0x00000010),
+		POKE(0x080574, 0x00000001),
+		POKE(0x080550, 0x00000005),
+		POKE(0xAE0000, 0x00000000),
+	};
+#undef POKE
+	unsigned i;
+	int ret;
+	struct device *dev = i2400m_dev(i2400m);
+
+	dev_warn(dev, "WARNING!!! non-signed boot UNTESTED PATH!\n");
+
+	d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
+	for (i = 0; i < ARRAY_SIZE(i2400m_pokes); i++) {
+		ret = i2400m_download_chunk(i2400m, &i2400m_pokes[i].data,
+					    sizeof(i2400m_pokes[i].data),
+					    i2400m_pokes[i].address, 1, 1);
+		if (ret < 0)
+			break;
+	}
+	d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
+	return ret;
+}
+
+
+/*
+ * Initialize the signed boot process
+ *
+ * @i2400m: device descriptor
+ *
+ * @bcf_hdr: pointer to the firmware header; assumes it is fully in
+ *     memory (it has gone through basic validation).
+ *
+ * Returns: 0 if ok, < 0 errno code on error, -ERESTARTSYS if the hw
+ *     rebooted.
+ *
+ * This writes the firmware BCF header to the device using the
+ * HASH_PAYLOAD_ONLY command.
+ */
+static
+int i2400m_dnload_init_signed(struct i2400m *i2400m,
+			      const struct i2400m_bcf_hdr *bcf_hdr)
+{
+	int ret;
+	struct device *dev = i2400m_dev(i2400m);
+	struct {
+		struct i2400m_bootrom_header cmd;
+		struct i2400m_bcf_hdr cmd_pl;
+	} __attribute__((packed)) *cmd_buf;
+	struct i2400m_bootrom_header ack;
+
+	d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr);
+	cmd_buf = i2400m->bm_cmd_buf;
+	cmd_buf->cmd.command =
+		i2400m_brh_command(I2400M_BRH_HASH_PAYLOAD_ONLY, 0, 0);
+	cmd_buf->cmd.target_addr = 0;
+	cmd_buf->cmd.data_size = cpu_to_le32(sizeof(cmd_buf->cmd_pl));
+	memcpy(&cmd_buf->cmd_pl, bcf_hdr, sizeof(*bcf_hdr));
+	ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd, sizeof(*cmd_buf),
+			    &ack, sizeof(ack), 0);
+	if (ret >= 0)
+		ret = 0;
+	d_fnend(5, dev, "(i2400m %p bcf_hdr %p) = %d\n", i2400m, bcf_hdr, ret);
+	return ret;
+}
+
+
+/*
+ * Initialize the firmware download at the device size
+ *
+ * Multiplex to the one that matters based on the device's mode
+ * (signed or non-signed).
+ */
+static
+int i2400m_dnload_init(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	u32 module_id = le32_to_cpu(bcf->module_id);
+
+	if (i2400m->sboot == 0
+	    && (module_id & I2400M_BCF_MOD_ID_POKES) == 0) {
+		/* non-signed boot process without pokes */
+		result = i2400m_dnload_init_nonsigned(i2400m);
+		if (result == -ERESTARTSYS)
+			return result;
+		if (result < 0)
+			dev_err(dev, "fw %s: non-signed download "
+				"initialization failed: %d\n",
+				i2400m->bus_fw_name, result);
+	} else if (i2400m->sboot == 0
+		 && (module_id & I2400M_BCF_MOD_ID_POKES)) {
+		/* non-signed boot process with pokes, nothing to do */
+		result = 0;
+	} else {		 /* signed boot process */
+		result = i2400m_dnload_init_signed(i2400m, bcf);
+		if (result == -ERESTARTSYS)
+			return result;
+		if (result < 0)
+			dev_err(dev, "fw %s: signed boot download "
+				"initialization failed: %d\n",
+				i2400m->bus_fw_name, result);
+	}
+	return result;
+}
+
+
+/*
+ * Run quick consistency tests on the firmware file
+ *
+ * Check for the firmware being made for the i2400m device,
+ * etc...These checks are mostly informative, as the device will make
+ * them too; but the driver's response is more informative on what
+ * went wrong.
+ */
+static
+int i2400m_fw_check(struct i2400m *i2400m,
+		    const struct i2400m_bcf_hdr *bcf,
+		    size_t bcf_size)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	unsigned module_type, header_len, major_version, minor_version,
+		module_id, module_vendor, date, size;
+
+	/* Check hard errors */
+	result = -EINVAL;
+	if (bcf_size < sizeof(*bcf)) {	/* big enough header? */
+		dev_err(dev, "firmware %s too short: "
+			"%zu B vs %zu (at least) expected\n",
+			i2400m->bus_fw_name, bcf_size, sizeof(*bcf));
+		goto error;
+	}
+
+	module_type = bcf->module_type;
+	header_len = sizeof(u32) * le32_to_cpu(bcf->header_len);
+	major_version = le32_to_cpu(bcf->header_version) & 0xffff0000 >> 16;
+	minor_version = le32_to_cpu(bcf->header_version) & 0x0000ffff;
+	module_id = le32_to_cpu(bcf->module_id);
+	module_vendor = le32_to_cpu(bcf->module_vendor);
+	date = le32_to_cpu(bcf->date);
+	size = sizeof(u32) * le32_to_cpu(bcf->size);
+
+	if (bcf_size != size) {		/* annoyingly paranoid */
+		dev_err(dev, "firmware %s: bad size, got "
+			"%zu B vs %u expected\n",
+			i2400m->bus_fw_name, bcf_size, size);
+		goto error;
+	}
+
+	d_printf(2, dev, "type 0x%x id 0x%x vendor 0x%x; header v%u.%u (%zu B) "
+		 "date %08x (%zu B)\n",
+		 module_type, module_id, module_vendor,
+		 major_version, minor_version, (size_t) header_len,
+		 date, (size_t) size);
+
+	if (module_type != 6) {		/* built for the right hardware? */
+		dev_err(dev, "bad fw %s: unexpected module type 0x%x; "
+			"aborting\n", i2400m->bus_fw_name, module_type);
+		goto error;
+	}
+
+	/* Check soft-er errors */
+	result = 0;
+	if (module_vendor != 0x8086)
+		dev_err(dev, "bad fw %s? unexpected vendor 0x%04x\n",
+			i2400m->bus_fw_name, module_vendor);
+	if (date < 0x20080300)
+		dev_err(dev, "bad fw %s? build date too old %08x\n",
+			i2400m->bus_fw_name, date);
+error:
+	return result;
+}
+
+
+/*
+ * Download the firmware to the device
+ *
+ * @i2400m: device descriptor
+ * @bcf: pointer to loaded (and minimally verified for consistency)
+ *    firmware
+ * @bcf_size: size of the @bcf buffer (header plus payloads)
+ *
+ * The process for doing this is described in this file's header.
+ *
+ * Note we only reinitialize boot-mode if the flags say so. Some hw
+ * iterations need it, some don't. In any case, if we loop, we always
+ * need to reinitialize the boot room, hence the flags modification.
+ */
+static
+int i2400m_fw_dnload(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf,
+		     size_t bcf_size, enum i2400m_bri flags)
+{
+	int ret = 0;
+	struct device *dev = i2400m_dev(i2400m);
+	int count = I2400M_BOOT_RETRIES;
+
+	d_fnstart(5, dev, "(i2400m %p bcf %p size %zu)\n",
+		  i2400m, bcf, bcf_size);
+	i2400m->boot_mode = 1;
+hw_reboot:
+	if (count-- == 0) {
+		ret = -ERESTARTSYS;
+		dev_err(dev, "device rebooted too many times, aborting\n");
+		goto error_too_many_reboots;
+	}
+	if (flags & I2400M_BRI_MAC_REINIT) {
+		ret = i2400m_bootrom_init(i2400m, flags);
+		if (ret < 0) {
+			dev_err(dev, "bootrom init failed: %d\n", ret);
+			goto error_bootrom_init;
+		}
+	}
+	flags |= I2400M_BRI_MAC_REINIT;
+
+	/*
+	 * Initialize the download, push the bytes to the device and
+	 * then jump to the new firmware. Note @ret is passed with the
+	 * offset of the jump instruction to _dnload_finalize()
+	 */
+	ret = i2400m_dnload_init(i2400m, bcf);	/* Init device's dnload */
+	if (ret == -ERESTARTSYS)
+		goto error_dev_rebooted;
+	if (ret < 0)
+		goto error_dnload_init;
+
+	ret = i2400m_dnload_bcf(i2400m, bcf, bcf_size);
+	if (ret == -ERESTARTSYS)
+		goto error_dev_rebooted;
+	if (ret < 0) {
+		dev_err(dev, "fw %s: download failed: %d\n",
+			i2400m->bus_fw_name, ret);
+		goto error_dnload_bcf;
+	}
+
+	ret = i2400m_dnload_finalize(i2400m, bcf, ret);
+	if (ret == -ERESTARTSYS)
+		goto error_dev_rebooted;
+	if (ret < 0) {
+		dev_err(dev, "fw %s: "
+			"download finalization failed: %d\n",
+			i2400m->bus_fw_name, ret);
+		goto error_dnload_finalize;
+	}
+
+	d_printf(2, dev, "fw %s successfully uploaded\n",
+		 i2400m->bus_fw_name);
+	i2400m->boot_mode = 0;
+error_dnload_finalize:
+error_dnload_bcf:
+error_dnload_init:
+error_bootrom_init:
+error_too_many_reboots:
+	d_fnend(5, dev, "(i2400m %p bcf %p size %zu) = %d\n",
+		i2400m, bcf, bcf_size, ret);
+	return ret;
+
+error_dev_rebooted:
+	dev_err(dev, "device rebooted, %d tries left\n", count);
+	/* we got the notification already, no need to wait for it again */
+	flags |= I2400M_BRI_SOFT;
+	goto hw_reboot;
+}
+
+
+/**
+ * i2400m_dev_bootstrap - Bring the device to a known state and upload firmware
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: >= 0 if ok, < 0 errno code on error.
+ *
+ * This sets up the firmware upload environment, loads the firmware
+ * file from disk, verifies and then calls the firmware upload process
+ * per se.
+ *
+ * Can be called either from probe, or after a warm reset.  Can not be
+ * called from within an interrupt.  All the flow in this code is
+ * single-threade; all I/Os are synchronous.
+ */
+int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
+{
+	int ret = 0;
+	struct device *dev = i2400m_dev(i2400m);
+	const struct firmware *fw;
+	const struct i2400m_bcf_hdr *bcf;	/* Firmware data */
+
+	d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
+	/* Load firmware files to memory. */
+	ret = request_firmware(&fw, i2400m->bus_fw_name, dev);
+	if (ret) {
+		dev_err(dev, "fw %s: request failed: %d\n",
+			i2400m->bus_fw_name, ret);
+		goto error_fw_req;
+	}
+	bcf = (void *) fw->data;
+
+	ret = i2400m_fw_check(i2400m, bcf, fw->size);
+	if (ret < 0)
+		goto error_fw_bad;
+	ret = i2400m_fw_dnload(i2400m, bcf, fw->size, flags);
+error_fw_bad:
+	release_firmware(fw);
+error_fw_req:
+	d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i2400m_dev_bootstrap);
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
new file mode 100644
index 0000000..08c2fb7
--- /dev/null
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -0,0 +1,132 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * SDIO-specific i2400m driver definitions
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Brian Bian <brian.bian@intel.com>
+ * Dirk Brandewie <dirk.j.brandewie@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ *  - Initial implementation
+ *
+ *
+ * This driver implements the bus-specific part of the i2400m for
+ * SDIO. Check i2400m.h for a generic driver description.
+ *
+ * ARCHITECTURE
+ *
+ * This driver sits under the bus-generic i2400m driver, providing the
+ * connection to the device.
+ *
+ * When probed, all the function pointers are setup and then the
+ * bus-generic code called. The generic driver will then use the
+ * provided pointers for uploading firmware (i2400ms_bus_bm*() in
+ * sdio-fw.c) and then setting up the device (i2400ms_dev_*() in
+ * sdio.c).
+ *
+ * Once firmware is uploaded, TX functions (sdio-tx.c) are called when
+ * data is ready for transmission in the TX fifo; then the SDIO IRQ is
+ * fired and data is available (sdio-rx.c), it is sent to the generic
+ * driver for processing with i2400m_rx.
+ */
+
+#ifndef __I2400M_SDIO_H__
+#define __I2400M_SDIO_H__
+
+#include "i2400m.h"
+
+/* Host-Device interface for SDIO */
+enum {
+	I2400MS_BLK_SIZE = 256,
+	I2400MS_PL_SIZE_MAX = 0x3E00,
+
+	I2400MS_DATA_ADDR = 0x0,
+	I2400MS_INTR_STATUS_ADDR = 0x13,
+	I2400MS_INTR_CLEAR_ADDR = 0x13,
+	I2400MS_INTR_ENABLE_ADDR = 0x14,
+	I2400MS_INTR_GET_SIZE_ADDR = 0x2C,
+	/* The number of ticks to wait for the device to signal that
+	 * it is ready */
+	I2400MS_INIT_SLEEP_INTERVAL = 10,
+};
+
+
+/**
+ * struct i2400ms - descriptor for a SDIO connected i2400m
+ *
+ * @i2400m: bus-generic i2400m implementation; has to be first (see
+ *     it's documentation in i2400m.h).
+ *
+ * @func: pointer to our SDIO function
+ *
+ * @tx_worker: workqueue struct used to TX data when the bus-generic
+ *     code signals packets are pending for transmission to the device.
+ *
+ * @tx_workqueue: workqeueue used for data TX; we don't use the
+ *     system's workqueue as that might cause deadlocks with code in
+ *     the bus-generic driver.
+ */
+struct i2400ms {
+	struct i2400m i2400m;		/* FIRST! See doc */
+	struct sdio_func *func;
+
+	struct work_struct tx_worker;
+	struct workqueue_struct *tx_workqueue;
+	char tx_wq_name[32];
+
+	struct dentry *debugfs_dentry;
+};
+
+
+static inline
+void i2400ms_init(struct i2400ms *i2400ms)
+{
+	i2400m_init(&i2400ms->i2400m);
+}
+
+
+extern int i2400ms_rx_setup(struct i2400ms *);
+extern void i2400ms_rx_release(struct i2400ms *);
+extern ssize_t __i2400ms_rx_get_size(struct i2400ms *);
+
+extern int i2400ms_tx_setup(struct i2400ms *);
+extern void i2400ms_tx_release(struct i2400ms *);
+extern void i2400ms_bus_tx_kick(struct i2400m *);
+
+extern ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *,
+				       const struct i2400m_bootrom_header *,
+				       size_t, int);
+extern ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *,
+					   struct i2400m_bootrom_header *,
+					   size_t);
+#endif /* #ifndef __I2400M_SDIO_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
new file mode 100644
index 0000000..6f76558
--- /dev/null
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -0,0 +1,264 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * USB-specific i2400m driver definitions
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ *  - Initial implementation
+ *
+ *
+ * This driver implements the bus-specific part of the i2400m for
+ * USB. Check i2400m.h for a generic driver description.
+ *
+ * ARCHITECTURE
+ *
+ * This driver listens to notifications sent from the notification
+ * endpoint (in usb-notif.c); when data is ready to read, the code in
+ * there schedules a read from the device (usb-rx.c) and then passes
+ * the data to the generic RX code (rx.c).
+ *
+ * When the generic driver needs to send data (network or control), it
+ * queues up in the TX FIFO (tx.c) and that will notify the driver
+ * through the i2400m->bus_tx_kick() callback
+ * (usb-tx.c:i2400mu_bus_tx_kick) which will send the items in the
+ * FIFO queue.
+ *
+ * This driver, as well, implements the USB-specific ops for the generic
+ * driver to be able to setup/teardown communication with the device
+ * [i2400m_bus_dev_start() and i2400m_bus_dev_stop()], reseting the
+ * device [i2400m_bus_reset()] and performing firmware upload
+ * [i2400m_bus_bm_cmd() and i2400_bus_bm_wait_for_ack()].
+ */
+
+#ifndef __I2400M_USB_H__
+#define __I2400M_USB_H__
+
+#include "i2400m.h"
+#include <linux/kthread.h>
+
+
+/*
+ * Error Density Count: cheapo error density (over time) counter
+ *
+ * Originally by Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * Embed an 'struct edc' somewhere. Each time there is a soft or
+ * retryable error, call edc_inc() and check if the error top
+ * watermark has been reached.
+ */
+enum {
+	EDC_MAX_ERRORS = 10,
+	EDC_ERROR_TIMEFRAME = HZ,
+};
+
+/* error density counter */
+struct edc {
+	unsigned long timestart;
+	u16 errorcount;
+};
+
+static inline void edc_init(struct edc *edc)
+{
+	edc->timestart = jiffies;
+}
+
+/**
+ * edc_inc - report a soft error and check if we are over the watermark
+ *
+ * @edc: pointer to error density counter.
+ * @max_err: maximum number of errors we can accept over the timeframe
+ * @timeframe: lenght of the timeframe (in jiffies).
+ *
+ * Returns: !0 1 if maximum acceptable errors per timeframe has been
+ *     exceeded. 0 otherwise.
+ *
+ * This is way to determine if the number of acceptable errors per time
+ * period has been exceeded. It is not accurate as there are cases in which
+ * this scheme will not work, for example if there are periodic occurences
+ * of errors that straddle updates to the start time. This scheme is
+ * sufficient for our usage.
+ *
+ * To use, embed a 'struct edc' somewhere, initialize it with
+ * edc_init() and when an error hits:
+ *
+ * if (do_something_fails_with_a_soft_error) {
+ *        if (edc_inc(&my->edc, MAX_ERRORS, MAX_TIMEFRAME))
+ * 	           Ops, hard error, do something about it
+ *        else
+ *                 Retry or ignore, depending on whatever
+ * }
+ */
+static inline int edc_inc(struct edc *edc, u16 max_err, u16 timeframe)
+{
+	unsigned long now;
+
+	now = jiffies;
+	if (now - edc->timestart > timeframe) {
+		edc->errorcount = 1;
+		edc->timestart = now;
+	} else if (++edc->errorcount > max_err) {
+		edc->errorcount = 0;
+		edc->timestart = now;
+		return 1;
+	}
+	return 0;
+}
+
+/* Host-Device interface for USB */
+enum {
+	I2400MU_MAX_NOTIFICATION_LEN = 256,
+	I2400MU_BLK_SIZE = 16,
+	I2400MU_PL_SIZE_MAX = 0x3EFF,
+
+	/* Endpoints */
+	I2400MU_EP_BULK_OUT = 0,
+	I2400MU_EP_NOTIFICATION,
+	I2400MU_EP_RESET_COLD,
+	I2400MU_EP_BULK_IN,
+};
+
+
+/**
+ * struct i2400mu - descriptor for a USB connected i2400m
+ *
+ * @i2400m: bus-generic i2400m implementation; has to be first (see
+ *     it's documentation in i2400m.h).
+ *
+ * @usb_dev: pointer to our USB device
+ *
+ * @usb_iface: pointer to our USB interface
+ *
+ * @urb_edc: error density counter; used to keep a density-on-time tab
+ *     on how many soft (retryable or ignorable) errors we get. If we
+ *     go over the threshold, we consider the bus transport is failing
+ *     too much and reset.
+ *
+ * @notif_urb: URB for receiving notifications from the device.
+ *
+ * @tx_kthread: thread we use for data TX. We use a thread because in
+ *     order to do deep power saving and put the device to sleep, we
+ *     need to call usb_autopm_*() [blocking functions].
+ *
+ * @tx_wq: waitqueue for the TX kthread to sleep when there is no data
+ *     to be sent; when more data is available, it is woken up by
+ *     i2400mu_bus_tx_kick().
+ *
+ * @rx_kthread: thread we use for data RX. We use a thread because in
+ *     order to do deep power saving and put the device to sleep, we
+ *     need to call usb_autopm_*() [blocking functions].
+ *
+ * @rx_wq: waitqueue for the RX kthread to sleep when there is no data
+ *     to receive. When data is available, it is woken up by
+ *     usb-notif.c:i2400mu_notification_grok().
+ *
+ * @rx_pending_count: number of rx-data-ready notifications that were
+ *     still not handled by the RX kthread.
+ *
+ * @rx_size: current RX buffer size that is being used.
+ *
+ * @rx_size_acc: accumulator of the sizes of the previous read
+ *     transactions.
+ *
+ * @rx_size_cnt: number of read transactions accumulated in
+ *     @rx_size_acc.
+ *
+ * @do_autopm: disable(0)/enable(>0) calling the
+ *     usb_autopm_get/put_interface() barriers when executing
+ *     commands. See doc in i2400mu_suspend() for more information.
+ *
+ * @rx_size_auto_shrink: if true, the rx_size is shrinked
+ *     automatically based on the average size of the received
+ *     transactions. This allows the receive code to allocate smaller
+ *     chunks of memory and thus reduce pressure on the memory
+ *     allocator by not wasting so much space. By default it is
+ *     enabled.
+ *
+ * @debugfs_dentry: hookup for debugfs files.
+ *     These have to be in a separate directory, a child of
+ *     (wimax_dev->debugfs_dentry) so they can be removed when the
+ *     module unloads, as we don't keep each dentry.
+ */
+struct i2400mu {
+	struct i2400m i2400m;		/* FIRST! See doc */
+
+	struct usb_device *usb_dev;
+	struct usb_interface *usb_iface;
+	struct edc urb_edc;		/* Error density counter */
+
+	struct urb *notif_urb;
+	struct task_struct *tx_kthread;
+	wait_queue_head_t tx_wq;
+
+	struct task_struct *rx_kthread;
+	wait_queue_head_t rx_wq;
+	atomic_t rx_pending_count;
+	size_t rx_size, rx_size_acc, rx_size_cnt;
+	atomic_t do_autopm;
+	u8 rx_size_auto_shrink;
+
+	struct dentry *debugfs_dentry;
+};
+
+
+static inline
+void i2400mu_init(struct i2400mu *i2400mu)
+{
+	i2400m_init(&i2400mu->i2400m);
+	edc_init(&i2400mu->urb_edc);
+	init_waitqueue_head(&i2400mu->tx_wq);
+	atomic_set(&i2400mu->rx_pending_count, 0);
+	init_waitqueue_head(&i2400mu->rx_wq);
+	i2400mu->rx_size = PAGE_SIZE - sizeof(struct skb_shared_info);
+	atomic_set(&i2400mu->do_autopm, 1);
+	i2400mu->rx_size_auto_shrink = 1;
+}
+
+extern int i2400mu_notification_setup(struct i2400mu *);
+extern void i2400mu_notification_release(struct i2400mu *);
+
+extern int i2400mu_rx_setup(struct i2400mu *);
+extern void i2400mu_rx_release(struct i2400mu *);
+extern void i2400mu_rx_kick(struct i2400mu *);
+
+extern int i2400mu_tx_setup(struct i2400mu *);
+extern void i2400mu_tx_release(struct i2400mu *);
+extern void i2400mu_bus_tx_kick(struct i2400m *);
+
+extern ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
+				       const struct i2400m_bootrom_header *,
+				       size_t, int);
+extern ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
+					   struct i2400m_bootrom_header *,
+					   size_t);
+#endif /* #ifndef __I2400M_USB_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
new file mode 100644
index 0000000..067c871
--- /dev/null
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -0,0 +1,755 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Declarations for bus-generic internal APIs
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ *  - Initial implementation
+ *
+ *
+ * GENERAL DRIVER ARCHITECTURE
+ *
+ * The i2400m driver is split in the following two major parts:
+ *
+ *  - bus specific driver
+ *  - bus generic driver (this part)
+ *
+ * The bus specific driver sets up stuff specific to the bus the
+ * device is connected to (USB, SDIO, PCI, tam-tam...non-authoritative
+ * nor binding list) which is basically the device-model management
+ * (probe/disconnect, etc), moving data from device to kernel and
+ * back, doing the power saving details and reseting the device.
+ *
+ * For details on each bus-specific driver, see it's include file,
+ * i2400m-BUSNAME.h
+ *
+ * The bus-generic functionality break up is:
+ *
+ *  - Firmware upload: fw.c - takes care of uploading firmware to the
+ *        device. bus-specific driver just needs to provides a way to
+ *        execute boot-mode commands and to reset the device.
+ *
+ *  - RX handling: rx.c - receives data from the bus-specific code and
+ *        feeds it to the network or WiMAX stack or uses it to modify
+ *        the driver state. bus-specific driver only has to receive
+ *        frames and pass them to this module.
+ *
+ *  - TX handling: tx.c - manages the TX FIFO queue and provides means
+ *        for the bus-specific TX code to pull data from the FIFO
+ *        queue. bus-specific code just pulls frames from this module
+ *        to sends them to the device.
+ *
+ *  - netdev glue: netdev.c - interface with Linux networking
+ *        stack. Pass around data frames, and configure when the
+ *        device is up and running or shutdown (through ifconfig up /
+ *        down). Bus-generic only.
+ *
+ *  - control ops: control.c - implements various commmands for
+ *        controlling the device. bus-generic only.
+ *
+ *  - device model glue: driver.c - implements helpers for the
+ *        device-model glue done by the bus-specific layer
+ *        (setup/release the driver resources), turning the device on
+ *        and off, handling the device reboots/resets and a few simple
+ *        WiMAX stack ops.
+ *
+ * Code is also broken up in linux-glue / device-glue.
+ *
+ * Linux glue contains functions that deal mostly with gluing with the
+ * rest of the Linux kernel.
+ *
+ * Device-glue are functions that deal mostly with the way the device
+ * does things and talk the device's language.
+ *
+ * device-glue code is licensed BSD so other open source OSes can take
+ * it to implement their drivers.
+ *
+ *
+ * APIs AND HEADER FILES
+ *
+ * This bus generic code exports three APIs:
+ *
+ *  - HDI (host-device interface) definitions common to all busses
+ *    (include/linux/wimax/i2400m.h); these can be also used by user
+ *    space code.
+ *  - internal API for the bus-generic code
+ *  - external API for the bus-specific drivers
+ *
+ *
+ * LIFE CYCLE:
+ *
+ * When the bus-specific driver probes, it allocates a network device
+ * with enough space for it's data structue, that must contain a
+ * &struct i2400m at the top.
+ *
+ * On probe, it needs to fill the i2400m members marked as [fill], as
+ * well as i2400m->wimax_dev.net_dev and call i2400m_setup(). The
+ * i2400m driver will only register with the WiMAX and network stacks;
+ * the only access done to the device is to read the MAC address so we
+ * can register a network device. This calls i2400m_dev_start() to
+ * load firmware, setup communication with the device and configure it
+ * for operation.
+ *
+ * At this point, control and data communications are possible.
+ *
+ * On disconnect/driver unload, the bus-specific disconnect function
+ * calls i2400m_release() to undo i2400m_setup(). i2400m_dev_stop()
+ * shuts the firmware down and releases resources uses to communicate
+ * with the device.
+ *
+ * While the device is up, it might reset. The bus-specific driver has
+ * to catch that situation and call i2400m_dev_reset_handle() to deal
+ * with it (reset the internal driver structures and go back to square
+ * one).
+ */
+
+#ifndef __I2400M_H__
+#define __I2400M_H__
+
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/completion.h>
+#include <linux/rwsem.h>
+#include <asm/atomic.h>
+#include <net/wimax.h>
+#include <linux/wimax/i2400m.h>
+#include <asm/byteorder.h>
+
+/* Misc constants */
+enum {
+	/* Firmware uploading */
+	I2400M_BOOT_RETRIES = 3,
+	/* Size of the Boot Mode Command buffer */
+	I2400M_BM_CMD_BUF_SIZE = 16 * 1024,
+	I2400M_BM_ACK_BUF_SIZE = 256,
+};
+
+
+/* Firmware version we request when pulling the fw image file */
+#define I2400M_FW_VERSION "1.3"
+
+
+/**
+ * i2400m_reset_type - methods to reset a device
+ *
+ * @I2400M_RT_WARM: Reset without device disconnection, device handles
+ *     are kept valid but state is back to power on, with firmware
+ *     re-uploaded.
+ * @I2400M_RT_COLD: Tell the device to disconnect itself from the bus
+ *     and reconnect. Renders all device handles invalid.
+ * @I2400M_RT_BUS: Tells the bus to reset the device; last measure
+ *     used when both types above don't work.
+ */
+enum i2400m_reset_type {
+	I2400M_RT_WARM,	/* first measure */
+	I2400M_RT_COLD,	/* second measure */
+	I2400M_RT_BUS,	/* call in artillery */
+};
+
+struct i2400m_reset_ctx;
+
+/**
+ * struct i2400m - descriptor for an Intel 2400m
+ *
+ * Members marked with [fill] must be filled out/initialized before
+ * calling i2400m_setup().
+ *
+ * @bus_tx_block_size: [fill] SDIO imposes a 256 block size, USB 16,
+ *     so we have a tx_blk_size variable that the bus layer sets to
+ *     tell the engine how much of that we need.
+ *
+ * @bus_pl_size_max: [fill] Maximum payload size.
+ *
+ * @bus_dev_start: [fill] Function called by the bus-generic code
+ *     [i2400m_dev_start()] to setup the bus-specific communications
+ *     to the the device. See LIFE CYCLE above.
+ *
+ *     NOTE: Doesn't need to upload the firmware, as that is taken
+ *     care of by the bus-generic code.
+ *
+ * @bus_dev_stop: [fill] Function called by the bus-generic code
+ *     [i2400m_dev_stop()] to shutdown the bus-specific communications
+ *     to the the device. See LIFE CYCLE above.
+ *
+ *     This function does not need to reset the device, just tear down
+ *     all the host resources created to  handle communication with
+ *     the device.
+ *
+ * @bus_tx_kick: [fill] Function called by the bus-generic code to let
+ *     the bus-specific code know that there is data available in the
+ *     TX FIFO for transmission to the device.
+ *
+ *     This function cannot sleep.
+ *
+ * @bus_reset: [fill] Function called by the bus-generic code to reset
+ *     the device in in various ways. Doesn't need to wait for the
+ *     reset to finish.
+ *
+ *     If warm or cold reset fail, this function is expected to do a
+ *     bus-specific reset (eg: USB reset) to get the device to a
+ *     working state (even if it implies device disconecction).
+ *
+ *     Note the warm reset is used by the firmware uploader to
+ *     reinitialize the device.
+ *
+ *     IMPORTANT: this is called very early in the device setup
+ *     process, so it cannot rely on common infrastructure being laid
+ *     out.
+ *
+ * @bus_bm_cmd_send: [fill] Function called to send a boot-mode
+ *     command. Flags are defined in 'enum i2400m_bm_cmd_flags'. This
+ *     is synchronous and has to return 0 if ok or < 0 errno code in
+ *     any error condition.
+ *
+ * @bus_bm_wait_for_ack: [fill] Function called to wait for a
+ *     boot-mode notification (that can be a response to a previously
+ *     issued command or an asynchronous one). Will read until all the
+ *     indicated size is read or timeout. Reading more or less data
+ *     than asked for is an error condition. Return 0 if ok, < 0 errno
+ *     code on error.
+ *
+ *     The caller to this function will check if the response is a
+ *     barker that indicates the device going into reset mode.
+ *
+ * @bus_fw_name: [fill] name of the firmware image (in most cases,
+ *     they are all the same for a single release, except that they
+ *     have the type of the bus embedded in the name (eg:
+ *     i2400m-fw-X-VERSION.sbcf, where X is the bus name).
+ *
+ * @bus_bm_mac_addr_impaired: [fill] Set to true if the device's MAC
+ *     address provided in boot mode is kind of broken and needs to
+ *     be re-read later on.
+ *
+ *
+ * @wimax_dev: WiMAX generic device for linkage into the kernel WiMAX
+ *     stack. Due to the way a net_device is allocated, we need to
+ *     force this to be the first field so that we can get from
+ *     netdev_priv() the right pointer.
+ *
+ * @state: device's state (as reported by it)
+ *
+ * @state_wq: waitqueue that is woken up whenever the state changes
+ *
+ * @tx_lock: spinlock to protect TX members
+ *
+ * @tx_buf: FIFO buffer for TX; we queue data here
+ *
+ * @tx_in: FIFO index for incoming data. Note this doesn't wrap around
+ *     and it is always greater than @tx_out.
+ *
+ * @tx_out: FIFO index for outgoing data
+ *
+ * @tx_msg: current TX message that is active in the FIFO for
+ *     appending payloads.
+ *
+ * @tx_sequence: current sequence number for TX messages from the
+ *     device to the host.
+ *
+ * @tx_msg_size: size of the current message being transmitted by the
+ *     bus-specific code.
+ *
+ * @tx_pl_num: total number of payloads sent
+ *
+ * @tx_pl_max: maximum number of payloads sent in a TX message
+ *
+ * @tx_pl_min: minimum number of payloads sent in a TX message
+ *
+ * @tx_num: number of TX messages sent
+ *
+ * @tx_size_acc: number of bytes in all TX messages sent
+ *     (this is different to net_dev's statistics as it also counts
+ *     control messages).
+ *
+ * @tx_size_min: smallest TX message sent.
+ *
+ * @tx_size_max: biggest TX message sent.
+ *
+ * @rx_lock: spinlock to protect RX members
+ *
+ * @rx_pl_num: total number of payloads received
+ *
+ * @rx_pl_max: maximum number of payloads received in a RX message
+ *
+ * @rx_pl_min: minimum number of payloads received in a RX message
+ *
+ * @rx_num: number of RX messages received
+ *
+ * @rx_size_acc: number of bytes in all RX messages received
+ *     (this is different to net_dev's statistics as it also counts
+ *     control messages).
+ *
+ * @rx_size_min: smallest RX message received.
+ *
+ * @rx_size_max: buggest RX message received.
+ *
+ * @init_mutex: Mutex used for serializing the device bringup
+ *     sequence; this way if the device reboots in the middle, we
+ *     don't try to do a bringup again while we are tearing down the
+ *     one that failed.
+ *
+ *     Can't reuse @msg_mutex because from within the bringup sequence
+ *     we need to send messages to the device and thus use @msg_mutex.
+ *
+ * @msg_mutex: mutex used to send control commands to the device (we
+ *     only allow one at a time, per host-device interface design).
+ *
+ * @msg_completion: used to wait for an ack to a control command sent
+ *     to the device.
+ *
+ * @ack_skb: used to store the actual ack to a control command if the
+ *     reception of the command was successful. Otherwise, a ERR_PTR()
+ *     errno code that indicates what failed with the ack reception.
+ *
+ *     Only valid after @msg_completion is woken up. Only updateable
+ *     if @msg_completion is armed. Only touched by
+ *     i2400m_msg_to_dev().
+ *
+ *     Protected by @rx_lock. In theory the command execution flow is
+ *     sequential, but in case the device sends an out-of-phase or
+ *     very delayed response, we need to avoid it trampling current
+ *     execution.
+ *
+ * @bm_cmd_buf: boot mode command buffer for composing firmware upload
+ *     commands.
+ *
+ *     USB can't r/w to stack, vmalloc, etc...as well, we end up
+ *     having to alloc/free a lot to compose commands, so we use these
+ *     for stagging and not having to realloc all the time.
+ *
+ *     This assumes the code always runs serialized. Only one thread
+ *     can call i2400m_bm_cmd() at the same time.
+ *
+ * @bm_ack_buf: boot mode acknoledge buffer for staging reception of
+ *     responses to commands.
+ *
+ *     See @bm_cmd_buf.
+ *
+ * @work_queue: work queue for processing device reports. This
+ *     workqueue cannot be used for processing TX or RX to the device,
+ *     as from it we'll process device reports, which might require
+ *     further communication with the device.
+ *
+ * @debugfs_dentry: hookup for debugfs files.
+ *     These have to be in a separate directory, a child of
+ *     (wimax_dev->debugfs_dentry) so they can be removed when the
+ *     module unloads, as we don't keep each dentry.
+ */
+struct i2400m {
+	struct wimax_dev wimax_dev;	/* FIRST! See doc */
+
+	unsigned updown:1;		/* Network device is up or down */
+	unsigned boot_mode:1;		/* is the device in boot mode? */
+	unsigned sboot:1;		/* signed or unsigned fw boot */
+	unsigned ready:1;		/* all probing steps done */
+	u8 trace_msg_from_user;		/* echo rx msgs to 'trace' pipe */
+					/* typed u8 so debugfs/u8 can tweak */
+	enum i2400m_system_state state;
+	wait_queue_head_t state_wq;	/* Woken up when on state updates */
+
+	size_t bus_tx_block_size;
+	size_t bus_pl_size_max;
+	int (*bus_dev_start)(struct i2400m *);
+	void (*bus_dev_stop)(struct i2400m *);
+	void (*bus_tx_kick)(struct i2400m *);
+	int (*bus_reset)(struct i2400m *, enum i2400m_reset_type);
+	ssize_t (*bus_bm_cmd_send)(struct i2400m *,
+				   const struct i2400m_bootrom_header *,
+				   size_t, int flags);
+	ssize_t (*bus_bm_wait_for_ack)(struct i2400m *,
+				       struct i2400m_bootrom_header *, size_t);
+	const char *bus_fw_name;
+	unsigned bus_bm_mac_addr_impaired:1;
+
+	spinlock_t tx_lock;		/* protect TX state */
+	void *tx_buf;
+	size_t tx_in, tx_out;
+	struct i2400m_msg_hdr *tx_msg;
+	size_t tx_sequence, tx_msg_size;
+	/* TX stats */
+	unsigned tx_pl_num, tx_pl_max, tx_pl_min,
+		tx_num, tx_size_acc, tx_size_min, tx_size_max;
+
+	/* RX stats */
+	spinlock_t rx_lock;		/* protect RX state */
+	unsigned rx_pl_num, rx_pl_max, rx_pl_min,
+		rx_num, rx_size_acc, rx_size_min, rx_size_max;
+
+	struct mutex msg_mutex;		/* serialize command execution */
+	struct completion msg_completion;
+	struct sk_buff *ack_skb;	/* protected by rx_lock */
+
+	void *bm_ack_buf;		/* for receiving acks over USB */
+	void *bm_cmd_buf;		/* for issuing commands over USB */
+
+	struct workqueue_struct *work_queue;
+
+	struct mutex init_mutex;	/* protect bringup seq */
+	struct i2400m_reset_ctx *reset_ctx;	/* protected by init_mutex */
+
+	struct work_struct wake_tx_ws;
+	struct sk_buff *wake_tx_skb;
+
+	struct dentry *debugfs_dentry;
+};
+
+
+/*
+ * Initialize a 'struct i2400m' from all zeroes
+ *
+ * This is a bus-generic API call.
+ */
+static inline
+void i2400m_init(struct i2400m *i2400m)
+{
+	wimax_dev_init(&i2400m->wimax_dev);
+
+	i2400m->boot_mode = 1;
+	init_waitqueue_head(&i2400m->state_wq);
+
+	spin_lock_init(&i2400m->tx_lock);
+	i2400m->tx_pl_min = UINT_MAX;
+	i2400m->tx_size_min = UINT_MAX;
+
+	spin_lock_init(&i2400m->rx_lock);
+	i2400m->rx_pl_min = UINT_MAX;
+	i2400m->rx_size_min = UINT_MAX;
+
+	mutex_init(&i2400m->msg_mutex);
+	init_completion(&i2400m->msg_completion);
+
+	mutex_init(&i2400m->init_mutex);
+	/* wake_tx_ws is initialized in i2400m_tx_setup() */
+}
+
+
+/*
+ * Bus-generic internal APIs
+ * -------------------------
+ */
+
+static inline
+struct i2400m *wimax_dev_to_i2400m(struct wimax_dev *wimax_dev)
+{
+	return container_of(wimax_dev, struct i2400m, wimax_dev);
+}
+
+static inline
+struct i2400m *net_dev_to_i2400m(struct net_device *net_dev)
+{
+	return wimax_dev_to_i2400m(netdev_priv(net_dev));
+}
+
+/*
+ * Boot mode support
+ */
+
+/**
+ * i2400m_bm_cmd_flags - flags to i2400m_bm_cmd()
+ *
+ * @I2400M_BM_CMD_RAW: send the command block as-is, without doing any
+ *     extra processing for adding CRC.
+ */
+enum i2400m_bm_cmd_flags {
+	I2400M_BM_CMD_RAW	= 1 << 2,
+};
+
+/**
+ * i2400m_bri - Boot-ROM indicators
+ *
+ * Flags for i2400m_bootrom_init() and i2400m_dev_bootstrap() [which
+ * are passed from things like i2400m_setup()]. Can be combined with
+ * |.
+ *
+ * @I2400M_BRI_SOFT: The device rebooted already and a reboot
+ *     barker received, proceed directly to ack the boot sequence.
+ * @I2400M_BRI_NO_REBOOT: Do not reboot the device and proceed
+ *     directly to wait for a reboot barker from the device.
+ * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot
+ *     rom after reading the MAC adress. This is quite a dirty hack,
+ *     if you ask me -- the device requires the bootrom to be
+ *     intialized after reading the MAC address.
+ */
+enum i2400m_bri {
+	I2400M_BRI_SOFT       = 1 << 1,
+	I2400M_BRI_NO_REBOOT  = 1 << 2,
+	I2400M_BRI_MAC_REINIT = 1 << 3,
+};
+
+extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
+extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
+extern int i2400m_read_mac_addr(struct i2400m *);
+extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
+
+/* Make/grok boot-rom header commands */
+
+static inline
+__le32 i2400m_brh_command(enum i2400m_brh_opcode opcode, unsigned use_checksum,
+			  unsigned direct_access)
+{
+	return cpu_to_le32(
+		I2400M_BRH_SIGNATURE
+		| (direct_access ? I2400M_BRH_DIRECT_ACCESS : 0)
+		| I2400M_BRH_RESPONSE_REQUIRED /* response always required */
+		| (use_checksum ? I2400M_BRH_USE_CHECKSUM : 0)
+		| (opcode & I2400M_BRH_OPCODE_MASK));
+}
+
+static inline
+void i2400m_brh_set_opcode(struct i2400m_bootrom_header *hdr,
+			   enum i2400m_brh_opcode opcode)
+{
+	hdr->command = cpu_to_le32(
+		(le32_to_cpu(hdr->command) & ~I2400M_BRH_OPCODE_MASK)
+		| (opcode & I2400M_BRH_OPCODE_MASK));
+}
+
+static inline
+unsigned i2400m_brh_get_opcode(const struct i2400m_bootrom_header *hdr)
+{
+	return le32_to_cpu(hdr->command) & I2400M_BRH_OPCODE_MASK;
+}
+
+static inline
+unsigned i2400m_brh_get_response(const struct i2400m_bootrom_header *hdr)
+{
+	return (le32_to_cpu(hdr->command) & I2400M_BRH_RESPONSE_MASK)
+		>> I2400M_BRH_RESPONSE_SHIFT;
+}
+
+static inline
+unsigned i2400m_brh_get_use_checksum(const struct i2400m_bootrom_header *hdr)
+{
+	return le32_to_cpu(hdr->command) & I2400M_BRH_USE_CHECKSUM;
+}
+
+static inline
+unsigned i2400m_brh_get_response_required(
+	const struct i2400m_bootrom_header *hdr)
+{
+	return le32_to_cpu(hdr->command) & I2400M_BRH_RESPONSE_REQUIRED;
+}
+
+static inline
+unsigned i2400m_brh_get_direct_access(const struct i2400m_bootrom_header *hdr)
+{
+	return le32_to_cpu(hdr->command) & I2400M_BRH_DIRECT_ACCESS;
+}
+
+static inline
+unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
+{
+	return (le32_to_cpu(hdr->command) & I2400M_BRH_SIGNATURE_MASK)
+		>> I2400M_BRH_SIGNATURE_SHIFT;
+}
+
+
+/*
+ * Driver / device setup and internal functions
+ */
+extern void i2400m_netdev_setup(struct net_device *net_dev);
+extern int i2400m_tx_setup(struct i2400m *);
+extern void i2400m_wake_tx_work(struct work_struct *);
+extern void i2400m_tx_release(struct i2400m *);
+
+extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
+			  const void *, int);
+enum i2400m_pt;
+extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
+
+#ifdef CONFIG_DEBUG_FS
+extern int i2400m_debugfs_add(struct i2400m *);
+extern void i2400m_debugfs_rm(struct i2400m *);
+#else
+static inline int i2400m_debugfs_add(struct i2400m *i2400m)
+{
+	return 0;
+}
+static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
+#endif
+
+/* Called by _dev_start()/_dev_stop() to initialize the device itself */
+extern int i2400m_dev_initialize(struct i2400m *);
+extern void i2400m_dev_shutdown(struct i2400m *);
+
+extern struct attribute_group i2400m_dev_attr_group;
+
+extern int i2400m_schedule_work(struct i2400m *,
+				void (*)(struct work_struct *), gfp_t);
+
+/* HDI message's payload description handling */
+
+static inline
+size_t i2400m_pld_size(const struct i2400m_pld *pld)
+{
+	return I2400M_PLD_SIZE_MASK & le32_to_cpu(pld->val);
+}
+
+static inline
+enum i2400m_pt i2400m_pld_type(const struct i2400m_pld *pld)
+{
+	return (I2400M_PLD_TYPE_MASK & le32_to_cpu(pld->val))
+		>> I2400M_PLD_TYPE_SHIFT;
+}
+
+static inline
+void i2400m_pld_set(struct i2400m_pld *pld, size_t size,
+		    enum i2400m_pt type)
+{
+	pld->val = cpu_to_le32(
+		((type << I2400M_PLD_TYPE_SHIFT) & I2400M_PLD_TYPE_MASK)
+		|  (size & I2400M_PLD_SIZE_MASK));
+}
+
+
+/*
+ * API for the bus-specific drivers
+ * --------------------------------
+ */
+
+static inline
+struct i2400m *i2400m_get(struct i2400m *i2400m)
+{
+	dev_hold(i2400m->wimax_dev.net_dev);
+	return i2400m;
+}
+
+static inline
+void i2400m_put(struct i2400m *i2400m)
+{
+	dev_put(i2400m->wimax_dev.net_dev);
+}
+
+extern int i2400m_dev_reset_handle(struct i2400m *);
+
+/*
+ * _setup()/_release() are called by the probe/disconnect functions of
+ * the bus-specific drivers.
+ */
+extern int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
+extern void i2400m_release(struct i2400m *);
+
+extern int i2400m_rx(struct i2400m *, struct sk_buff *);
+extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
+extern void i2400m_tx_msg_sent(struct i2400m *);
+
+static const __le32 i2400m_NBOOT_BARKER[4] = {
+	__constant_cpu_to_le32(I2400M_NBOOT_BARKER),
+	__constant_cpu_to_le32(I2400M_NBOOT_BARKER),
+	__constant_cpu_to_le32(I2400M_NBOOT_BARKER),
+	__constant_cpu_to_le32(I2400M_NBOOT_BARKER)
+};
+
+static const __le32 i2400m_SBOOT_BARKER[4] = {
+	__constant_cpu_to_le32(I2400M_SBOOT_BARKER),
+	__constant_cpu_to_le32(I2400M_SBOOT_BARKER),
+	__constant_cpu_to_le32(I2400M_SBOOT_BARKER),
+	__constant_cpu_to_le32(I2400M_SBOOT_BARKER)
+};
+
+
+/*
+ * Utility functions
+ */
+
+static inline
+struct device *i2400m_dev(struct i2400m *i2400m)
+{
+	return i2400m->wimax_dev.net_dev->dev.parent;
+}
+
+/*
+ * Helper for scheduling simple work functions
+ *
+ * This struct can get any kind of payload attached (normally in the
+ * form of a struct where you pack the stuff you want to pass to the
+ * _work function).
+ */
+struct i2400m_work {
+	struct work_struct ws;
+	struct i2400m *i2400m;
+	u8 pl[0];
+};
+extern int i2400m_queue_work(struct i2400m *,
+			     void (*)(struct work_struct *), gfp_t,
+				const void *, size_t);
+
+extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
+				   char *, size_t);
+extern int i2400m_msg_size_check(struct i2400m *,
+				 const struct i2400m_l3l4_hdr *, size_t);
+extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
+extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
+extern void i2400m_msg_ack_hook(struct i2400m *,
+				const struct i2400m_l3l4_hdr *, size_t);
+extern void i2400m_report_hook(struct i2400m *,
+			       const struct i2400m_l3l4_hdr *, size_t);
+extern int i2400m_cmd_enter_powersave(struct i2400m *);
+extern int i2400m_cmd_get_state(struct i2400m *);
+extern int i2400m_cmd_exit_idle(struct i2400m *);
+extern struct sk_buff *i2400m_get_device_info(struct i2400m *);
+extern int i2400m_firmware_check(struct i2400m *);
+extern int i2400m_set_init_config(struct i2400m *,
+				  const struct i2400m_tlv_hdr **, size_t);
+
+static inline
+struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
+{
+	return &iface->cur_altsetting->endpoint[ep].desc;
+}
+
+extern int i2400m_op_rfkill_sw_toggle(struct wimax_dev *,
+				      enum wimax_rf_state);
+extern void i2400m_report_tlv_rf_switches_status(
+	struct i2400m *, const struct i2400m_tlv_rf_switches_status *);
+
+
+/*
+ * Do a millisecond-sleep for allowing wireshark to dump all the data
+ * packets. Used only for debugging.
+ */
+static inline
+void __i2400m_msleep(unsigned ms)
+{
+#if 1
+#else
+	msleep(ms);
+#endif
+}
+
+/* Module parameters */
+
+extern int i2400m_idle_mode_disabled;
+
+
+#endif /* #ifndef __I2400M_H__ */
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
new file mode 100644
index 0000000..63fe708
--- /dev/null
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -0,0 +1,524 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Glue with the networking stack
+ *
+ *
+ * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This implements an ethernet device for the i2400m.
+ *
+ * We fake being an ethernet device to simplify the support from user
+ * space and from the other side. The world is (sadly) configured to
+ * take in only Ethernet devices...
+ *
+ * Because of this, currently there is an copy-each-rxed-packet
+ * overhead on the RX path. Each IP packet has to be reallocated to
+ * add an ethernet header (as there is no space in what we get from
+ * the device). This is a known drawback and coming versions of the
+ * device's firmware are being changed to add header space that can be
+ * used to insert the ethernet header without having to reallocate and
+ * copy.
+ *
+ * TX error handling is tricky; because we have to FIFO/queue the
+ * buffers for transmission (as the hardware likes it aggregated), we
+ * just give the skb to the TX subsystem and by the time it is
+ * transmitted, we have long forgotten about it. So we just don't care
+ * too much about it.
+ *
+ * Note that when the device is in idle mode with the basestation, we
+ * need to negotiate coming back up online. That involves negotiation
+ * and possible user space interaction. Thus, we defer to a workqueue
+ * to do all that. By default, we only queue a single packet and drop
+ * the rest, as potentially the time to go back from idle to normal is
+ * long.
+ *
+ * ROADMAP
+ *
+ * i2400m_open         Called on ifconfig up
+ * i2400m_stop         Called on ifconfig down
+ *
+ * i2400m_hard_start_xmit Called by the network stack to send a packet
+ *   i2400m_net_wake_tx	  Wake up device from basestation-IDLE & TX
+ *     i2400m_wake_tx_work
+ *       i2400m_cmd_exit_idle
+ *       i2400m_tx
+ *   i2400m_net_tx        TX a data frame
+ *     i2400m_tx
+ *
+ * i2400m_change_mtu      Called on ifconfig mtu XXX
+ *
+ * i2400m_tx_timeout      Called when the device times out
+ *
+ * i2400m_net_rx          Called by the RX code when a data frame is
+ *                        available.
+ * i2400m_netdev_setup    Called to setup all the netdev stuff from
+ *                        alloc_netdev.
+ */
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include "i2400m.h"
+
+
+#define D_SUBMODULE netdev
+#include "debug-levels.h"
+
+enum {
+/* netdev interface */
+	/*
+	 * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
+	 *
+	 * The MTU is 1400 or less
+	 */
+	I2400M_MAX_MTU = 1400,
+	I2400M_TX_TIMEOUT = HZ,
+	I2400M_TX_QLEN = 5,
+};
+
+
+static
+int i2400m_open(struct net_device *net_dev)
+{
+	int result;
+	struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
+	if (i2400m->ready == 0) {
+		dev_err(dev, "Device is still initializing\n");
+		result = -EBUSY;
+	} else
+		result = 0;
+	d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
+		net_dev, i2400m, result);
+	return result;
+}
+
+
+/*
+ *
+ * On kernel versions where cancel_work_sync() didn't return anything,
+ * we rely on wake_tx_skb() being non-NULL.
+ */
+static
+int i2400m_stop(struct net_device *net_dev)
+{
+	struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
+	/* See i2400m_hard_start_xmit(), references are taken there
+	 * and here we release them if the work was still
+	 * pending. Note we can't differentiate work not pending vs
+	 * never scheduled, so the NULL check does that. */
+	if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
+	    && i2400m->wake_tx_skb != NULL) {
+		unsigned long flags;
+		struct sk_buff *wake_tx_skb;
+		spin_lock_irqsave(&i2400m->tx_lock, flags);
+		wake_tx_skb = i2400m->wake_tx_skb;	/* compat help */
+		i2400m->wake_tx_skb = NULL;	/* compat help */
+		spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+		i2400m_put(i2400m);
+		kfree_skb(wake_tx_skb);
+	}
+	d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
+	return 0;
+}
+
+
+/*
+ * Wake up the device and transmit a held SKB, then restart the net queue
+ *
+ * When the device goes into basestation-idle mode, we need to tell it
+ * to exit that mode; it will negotiate with the base station, user
+ * space may have to intervene to rehandshake crypto and then tell us
+ * when it is ready to transmit the packet we have "queued". Still we
+ * need to give it sometime after it reports being ok.
+ *
+ * On error, there is not much we can do. If the error was on TX, we
+ * still wake the queue up to see if the next packet will be luckier.
+ *
+ * If _cmd_exit_idle() fails...well, it could be many things; most
+ * commonly it is that something else took the device out of IDLE mode
+ * (for example, the base station). In that case we get an -EILSEQ and
+ * we are just going to ignore that one. If the device is back to
+ * connected, then fine -- if it is someother state, the packet will
+ * be dropped anyway.
+ */
+void i2400m_wake_tx_work(struct work_struct *ws)
+{
+	int result;
+	struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
+	struct device *dev = i2400m_dev(i2400m);
+	struct sk_buff *skb = i2400m->wake_tx_skb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&i2400m->tx_lock, flags);
+	skb = i2400m->wake_tx_skb;
+	i2400m->wake_tx_skb = NULL;
+	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+	d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
+	result = -EINVAL;
+	if (skb == NULL) {
+		dev_err(dev, "WAKE&TX: skb dissapeared!\n");
+		goto out_put;
+	}
+	result = i2400m_cmd_exit_idle(i2400m);
+	if (result == -EILSEQ)
+		result = 0;
+	if (result < 0) {
+		dev_err(dev, "WAKE&TX: device didn't get out of idle: "
+			"%d\n", result);
+			goto error;
+	}
+	result = wait_event_timeout(i2400m->state_wq,
+				    i2400m->state != I2400M_SS_IDLE, 5 * HZ);
+	if (result == 0)
+		result = -ETIMEDOUT;
+	if (result < 0) {
+		dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
+			"%d\n", result);
+		goto error;
+	}
+	msleep(20);	/* device still needs some time or it drops it */
+	result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
+	netif_wake_queue(i2400m->wimax_dev.net_dev);
+error:
+	kfree_skb(skb);	/* refcount transferred by _hard_start_xmit() */
+out_put:
+	i2400m_put(i2400m);
+	d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n",
+		ws, i2400m, skb, result);
+}
+
+
+/*
+ * Prepare the data payload TX header
+ *
+ * The i2400m expects a 4 byte header in front of a data packet.
+ *
+ * Because we pretend to be an ethernet device, this packet comes with
+ * an ethernet header. Pull it and push our header.
+ */
+static
+void i2400m_tx_prep_header(struct sk_buff *skb)
+{
+	struct i2400m_pl_data_hdr *pl_hdr;
+	skb_pull(skb, ETH_HLEN);
+	pl_hdr = (struct i2400m_pl_data_hdr *) skb_push(skb, sizeof(*pl_hdr));
+	pl_hdr->reserved = 0;
+}
+
+
+/*
+ * TX an skb to an idle device
+ *
+ * When the device is in basestation-idle mode, we need to wake it up
+ * and then TX. So we queue a work_struct for doing so.
+ *
+ * We need to get an extra ref for the skb (so it is not dropped), as
+ * well as be careful not to queue more than one request (won't help
+ * at all). If more than one request comes or there are errors, we
+ * just drop the packets (see i2400m_hard_start_xmit()).
+ */
+static
+int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
+		       struct sk_buff *skb)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	unsigned long flags;
+
+	d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
+	if (net_ratelimit()) {
+		d_printf(3, dev, "WAKE&NETTX: "
+			 "skb %p sending %d bytes to radio\n",
+			 skb, skb->len);
+		d_dump(4, dev, skb->data, skb->len);
+	}
+	/* We hold a ref count for i2400m and skb, so when
+	 * stopping() the device, we need to cancel that work
+	 * and if pending, release those resources. */
+	result = 0;
+	spin_lock_irqsave(&i2400m->tx_lock, flags);
+	if (!work_pending(&i2400m->wake_tx_ws)) {
+		netif_stop_queue(net_dev);
+		i2400m_get(i2400m);
+		i2400m->wake_tx_skb = skb_get(skb);	/* transfer ref count */
+		i2400m_tx_prep_header(skb);
+		result = schedule_work(&i2400m->wake_tx_ws);
+		WARN_ON(result == 0);
+	}
+	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+	if (result == 0) {
+		/* Yes, this happens even if we stopped the
+		 * queue -- blame the queue disciplines that
+		 * queue without looking -- I guess there is a reason
+		 * for that. */
+		if (net_ratelimit())
+			d_printf(1, dev, "NETTX: device exiting idle, "
+				 "dropping skb %p, queue running %d\n",
+				 skb, netif_queue_stopped(net_dev));
+		result = -EBUSY;
+	}
+	d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
+	return result;
+}
+
+
+/*
+ * Transmit a packet to the base station on behalf of the network stack.
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * We need to pull the ethernet header and add the hardware header,
+ * which is currently set to all zeroes and reserved.
+ */
+static
+int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
+		  struct sk_buff *skb)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
+		  i2400m, net_dev, skb);
+	/* FIXME: check eth hdr, only IPv4 is routed by the device as of now */
+	net_dev->trans_start = jiffies;
+	i2400m_tx_prep_header(skb);
+	d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
+		 skb, skb->len);
+	d_dump(4, dev, skb->data, skb->len);
+	result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
+	d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n",
+		i2400m, net_dev, skb, result);
+	return result;
+}
+
+
+/*
+ * Transmit a packet to the base station on behalf of the network stack
+ *
+ *
+ * Returns: NETDEV_TX_OK (always, even in case of error)
+ *
+ * In case of error, we just drop it. Reasons:
+ *
+ *  - we add a hw header to each skb, and if the network stack
+ *    retries, we have no way to know if that skb has it or not.
+ *
+ *  - network protocols have their own drop-recovery mechanisms
+ *
+ *  - there is not much else we can do
+ *
+ * If the device is idle, we need to wake it up; that is an operation
+ * that will sleep. See i2400m_net_wake_tx() for details.
+ */
+static
+int i2400m_hard_start_xmit(struct sk_buff *skb,
+			   struct net_device *net_dev)
+{
+	int result;
+	struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
+	if (i2400m->state == I2400M_SS_IDLE)
+		result = i2400m_net_wake_tx(i2400m, net_dev, skb);
+	else
+		result = i2400m_net_tx(i2400m, net_dev, skb);
+	if (result <  0)
+		net_dev->stats.tx_dropped++;
+	else {
+		net_dev->stats.tx_packets++;
+		net_dev->stats.tx_bytes += skb->len;
+	}
+	kfree_skb(skb);
+	result = NETDEV_TX_OK;
+	d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
+	return result;
+}
+
+
+static
+int i2400m_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+	int result;
+	struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+	struct device *dev = i2400m_dev(i2400m);
+
+	if (new_mtu >= I2400M_MAX_MTU) {
+		dev_err(dev, "Cannot change MTU to %d (max is %d)\n",
+			new_mtu, I2400M_MAX_MTU);
+		result = -EINVAL;
+	} else {
+		net_dev->mtu = new_mtu;
+		result = 0;
+	}
+	return result;
+}
+
+
+static
+void i2400m_tx_timeout(struct net_device *net_dev)
+{
+	/*
+	 * We might want to kick the device
+	 *
+	 * There is not much we can do though, as the device requires
+	 * that we send the data aggregated. By the time we receive
+	 * this, there might be data pending to be sent or not...
+	 */
+	net_dev->stats.tx_errors++;
+	return;
+}
+
+
+/*
+ * Create a fake ethernet header
+ *
+ * For emulating an ethernet device, every received IP header has to
+ * be prefixed with an ethernet header.
+ *
+ * What we receive has (potentially) many IP packets concatenated with
+ * no ETH_HLEN bytes prefixed. Thus there is no space for an eth
+ * header.
+ *
+ * We would have to reallocate or do ugly fragment tricks in order to
+ * add it.
+ *
+ * But what we do is use the header space of the RX transaction
+ * (*msg_hdr) as we don't need it anymore; then we'll point all the
+ * data skbs there, as they share the same backing store.
+ *
+ * We only support IPv4 for v3 firmware.
+ */
+static
+void i2400m_rx_fake_eth_header(struct net_device *net_dev,
+			       void *_eth_hdr)
+{
+	struct ethhdr *eth_hdr = _eth_hdr;
+
+	memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
+	memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_dest));
+	eth_hdr->h_proto = __constant_cpu_to_be16(ETH_P_IP);
+}
+
+
+/*
+ * i2400m_net_rx - pass a network packet to the stack
+ *
+ * @i2400m: device instance
+ * @skb_rx: the skb where the buffer pointed to by @buf is
+ * @i: 1 if payload is the only one
+ * @buf: pointer to the buffer containing the data
+ * @len: buffer's length
+ *
+ * We just clone the skb and set it up so that it's skb->data pointer
+ * points to "buf" and it's length.
+ *
+ * Note that if the payload is the last (or the only one) in a
+ * multi-payload message, we don't clone the SKB but just reuse it.
+ *
+ * This function is normally run from a thread context. However, we
+ * still use netif_rx() instead of netif_receive_skb() as was
+ * recommended in the mailing list. Reason is in some stress tests
+ * when sending/receiving a lot of data we seem to hit a softlock in
+ * the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using
+ * netif_rx() took care of the issue.
+ *
+ * This is, of course, still open to do more research on why running
+ * with netif_receive_skb() hits this softlock. FIXME.
+ *
+ * FIXME: currently we don't do any efforts at distinguishing if what
+ * we got was an IPv4 or IPv6 header, to setup the protocol field
+ * correctly.
+ */
+void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
+		   unsigned i, const void *buf, int buf_len)
+{
+	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+	struct device *dev = i2400m_dev(i2400m);
+	struct sk_buff *skb;
+
+	d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n",
+		  i2400m, buf, buf_len);
+	if (i) {
+		skb = skb_get(skb_rx);
+		d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
+		skb_pull(skb, buf - (void *) skb->data);
+		skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
+	} else {
+		/* Yes, this is bad -- a lot of overhead -- see
+		 * comments at the top of the file */
+		skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
+		if (skb == NULL) {
+			dev_err(dev, "NETRX: no memory to realloc skb\n");
+			net_dev->stats.rx_dropped++;
+			goto error_skb_realloc;
+		}
+		memcpy(skb_put(skb, buf_len), buf, buf_len);
+	}
+	i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
+				  skb->data - ETH_HLEN);
+	skb_set_mac_header(skb, -ETH_HLEN);
+	skb->dev = i2400m->wimax_dev.net_dev;
+	skb->protocol = htons(ETH_P_IP);
+	net_dev->stats.rx_packets++;
+	net_dev->stats.rx_bytes += buf_len;
+	d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n",
+		buf_len);
+	d_dump(4, dev, buf, buf_len);
+	netif_rx_ni(skb);	/* see notes in function header */
+error_skb_realloc:
+	d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n",
+		i2400m, buf, buf_len);
+}
+
+
+/**
+ * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
+ *
+ * Called by alloc_netdev()
+ */
+void i2400m_netdev_setup(struct net_device *net_dev)
+{
+	d_fnstart(3, NULL, "(net_dev %p)\n", net_dev);
+	ether_setup(net_dev);
+	net_dev->mtu = I2400M_MAX_MTU;
+	net_dev->tx_queue_len = I2400M_TX_QLEN;
+	net_dev->features =
+		  NETIF_F_VLAN_CHALLENGED
+		| NETIF_F_HIGHDMA;
+	net_dev->flags =
+		IFF_NOARP		/* i2400m is apure IP device */
+		& (~IFF_BROADCAST	/* i2400m is P2P */
+		   & ~IFF_MULTICAST);
+	net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
+	net_dev->open = i2400m_open;
+	net_dev->stop = i2400m_stop;
+	net_dev->hard_start_xmit = i2400m_hard_start_xmit;
+	net_dev->change_mtu = i2400m_change_mtu;
+	net_dev->tx_timeout = i2400m_tx_timeout;
+	d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
+}
+EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
+
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
new file mode 100644
index 0000000..487ec58
--- /dev/null
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -0,0 +1,207 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Implement backend for the WiMAX stack rfkill support
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * The WiMAX kernel stack integrates into RF-Kill and keeps the
+ * switches's status. We just need to:
+ *
+ * - report changes in the HW RF Kill switch [with
+ *   wimax_rfkill_{sw,hw}_report(), which happens when we detect those
+ *   indications coming through hardware reports]. We also do it on
+ *   initialization to let the stack know the intial HW state.
+ *
+ * - implement indications from the stack to change the SW RF Kill
+ *   switch (coming from sysfs, the wimax stack or user space).
+ */
+#include "i2400m.h"
+#include <linux/wimax/i2400m.h>
+
+
+
+#define D_SUBMODULE rfkill
+#include "debug-levels.h"
+
+/*
+ * Return true if the i2400m radio is in the requested wimax_rf_state state
+ *
+ */
+static
+int i2400m_radio_is(struct i2400m *i2400m, enum wimax_rf_state state)
+{
+	if (state == WIMAX_RF_OFF)
+		return i2400m->state == I2400M_SS_RF_OFF
+			|| i2400m->state == I2400M_SS_RF_SHUTDOWN;
+	else if (state == WIMAX_RF_ON)
+		/* state == WIMAX_RF_ON */
+		return i2400m->state != I2400M_SS_RF_OFF
+			&& i2400m->state != I2400M_SS_RF_SHUTDOWN;
+	else
+		BUG();
+}
+
+
+/*
+ * WiMAX stack operation: implement SW RFKill toggling
+ *
+ * @wimax_dev: device descriptor
+ * @skb: skb where the message has been received; skb->data is
+ *       expected to point to the message payload.
+ * @genl_info: passed by the generic netlink layer
+ *
+ * Generic Netlink will call this function when a message is sent from
+ * userspace to change the software RF-Kill switch status.
+ *
+ * This function will set the device's sofware RF-Kill switch state to
+ * match what is requested.
+ *
+ * NOTE: the i2400m has a strict state machine; we can only set the
+ *       RF-Kill switch when it is on, the HW RF-Kill is on and the
+ *       device is initialized. So we ignore errors steaming from not
+ *       being in the right state (-EILSEQ).
+ */
+int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
+			       enum wimax_rf_state state)
+{
+	int result;
+	struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev);
+	struct device *dev = i2400m_dev(i2400m);
+	struct sk_buff *ack_skb;
+	struct {
+		struct i2400m_l3l4_hdr hdr;
+		struct i2400m_tlv_rf_operation sw_rf;
+	} __attribute__((packed)) *cmd;
+	char strerr[32];
+
+	d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state);
+
+	result = -ENOMEM;
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (cmd == NULL)
+		goto error_alloc;
+	cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
+	cmd->hdr.length = sizeof(cmd->sw_rf);
+	cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
+	cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
+	cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));
+	switch (state) {
+	case WIMAX_RF_OFF:	/* RFKILL ON, radio OFF */
+		cmd->sw_rf.status = cpu_to_le32(2);
+		break;
+	case WIMAX_RF_ON:	/* RFKILL OFF, radio ON */
+		cmd->sw_rf.status = cpu_to_le32(1);
+		break;
+	default:
+		BUG();
+	}
+
+	ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+	result = PTR_ERR(ack_skb);
+	if (IS_ERR(ack_skb)) {
+		dev_err(dev, "Failed to issue 'RF Control' command: %d\n",
+			result);
+		goto error_msg_to_dev;
+	}
+	result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+					 strerr, sizeof(strerr));
+	if (result < 0) {
+		dev_err(dev, "'RF Control' (0x%04x) command failed: %d - %s\n",
+			I2400M_MT_CMD_RF_CONTROL, result, strerr);
+		goto error_cmd;
+	}
+
+	/* Now we wait for the state to change to RADIO_OFF or RADIO_ON */
+	result = wait_event_timeout(
+		i2400m->state_wq, i2400m_radio_is(i2400m, state),
+		5 * HZ);
+	if (result == 0)
+		result = -ETIMEDOUT;
+	if (result < 0)
+		dev_err(dev, "Error waiting for device to toggle RF state: "
+			"%d\n", result);
+	result = 0;
+error_cmd:
+	kfree_skb(ack_skb);
+error_msg_to_dev:
+error_alloc:
+	d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
+		wimax_dev, state, result);
+	return result;
+}
+
+
+/*
+ * Inform the WiMAX stack of changes in the RF Kill switches reported
+ * by the device
+ *
+ * @i2400m: device descriptor
+ * @rfss: TLV for RF Switches status; already validated
+ *
+ * NOTE: the reports on RF switch status cannot be trusted
+ *       or used until the device is in a state of RADIO_OFF
+ *       or greater.
+ */
+void i2400m_report_tlv_rf_switches_status(
+	struct i2400m *i2400m,
+	const struct i2400m_tlv_rf_switches_status *rfss)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	enum i2400m_rf_switch_status hw, sw;
+	enum wimax_st wimax_state;
+
+	sw = le32_to_cpu(rfss->sw_rf_switch);
+	hw = le32_to_cpu(rfss->hw_rf_switch);
+
+	d_fnstart(3, dev, "(i2400m %p rfss %p [hw %u sw %u])\n",
+		  i2400m, rfss, hw, sw);
+	/* We only process rw switch evens when the device has been
+	 * fully initialized */
+	wimax_state = wimax_state_get(&i2400m->wimax_dev);
+	if (wimax_state < WIMAX_ST_RADIO_OFF) {
+		d_printf(3, dev, "ignoring RF switches report, state %u\n",
+			 wimax_state);
+		goto out;
+	}
+	switch (sw) {
+	case I2400M_RF_SWITCH_ON:	/* RF Kill disabled (radio on) */
+		wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_ON);
+		break;
+	case I2400M_RF_SWITCH_OFF:	/* RF Kill enabled (radio off) */
+		wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_OFF);
+		break;
+	default:
+		dev_err(dev, "HW BUG? Unknown RF SW state 0x%x\n", sw);
+	}
+
+	switch (hw) {
+	case I2400M_RF_SWITCH_ON:	/* RF Kill disabled (radio on) */
+		wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_ON);
+		break;
+	case I2400M_RF_SWITCH_OFF:	/* RF Kill enabled (radio off) */
+		wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_OFF);
+		break;
+	default:
+		dev_err(dev, "HW BUG? Unknown RF HW state 0x%x\n", hw);
+	}
+out:
+	d_fnend(3, dev, "(i2400m %p rfss %p [hw %u sw %u]) = void\n",
+		i2400m, rfss, hw, sw);
+}
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
new file mode 100644
index 0000000..69220227
--- /dev/null
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -0,0 +1,534 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Handle incoming traffic and deliver it to the control or data planes
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ *  - Initial implementation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Use skb_clone(), break up processing in chunks
+ *  - Split transport/device specific
+ *  - Make buffer size dynamic to exert less memory pressure
+ *
+ *
+ * This handles the RX path.
+ *
+ * We receive an RX message from the bus-specific driver, which
+ * contains one or more payloads that have potentially different
+ * destinataries (data or control paths).
+ *
+ * So we just take that payload from the transport specific code in
+ * the form of an skb, break it up in chunks (a cloned skb each in the
+ * case of network packets) and pass it to netdev or to the
+ * command/ack handler (and from there to the WiMAX stack).
+ *
+ * PROTOCOL FORMAT
+ *
+ * The format of the buffer is:
+ *
+ * HEADER                      (struct i2400m_msg_hdr)
+ * PAYLOAD DESCRIPTOR 0        (struct i2400m_pld)
+ * PAYLOAD DESCRIPTOR 1
+ * ...
+ * PAYLOAD DESCRIPTOR N
+ * PAYLOAD 0                   (raw bytes)
+ * PAYLOAD 1
+ * ...
+ * PAYLOAD N
+ *
+ * See tx.c for a deeper description on alignment requirements and
+ * other fun facts of it.
+ *
+ * ROADMAP
+ *
+ * i2400m_rx
+ *   i2400m_rx_msg_hdr_check
+ *   i2400m_rx_pl_descr_check
+ *   i2400m_rx_payload
+ *     i2400m_net_rx
+ *     i2400m_rx_ctl
+ *       i2400m_msg_size_check
+ *       i2400m_report_hook_work    [in a workqueue]
+ *         i2400m_report_hook
+ *       wimax_msg_to_user
+ *       i2400m_rx_ctl_ack
+ *         wimax_msg_to_user_alloc
+ *     i2400m_rx_trace
+ *       i2400m_msg_size_check
+ *       wimax_msg
+ */
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include "i2400m.h"
+
+
+#define D_SUBMODULE rx
+#include "debug-levels.h"
+
+struct i2400m_report_hook_args {
+	struct sk_buff *skb_rx;
+	const struct i2400m_l3l4_hdr *l3l4_hdr;
+	size_t size;
+};
+
+
+/*
+ * Execute i2400m_report_hook in a workqueue
+ *
+ * Unpacks arguments from the deferred call, executes it and then
+ * drops the references.
+ *
+ * Obvious NOTE: References are needed because we are a separate
+ *     thread; otherwise the buffer changes under us because it is
+ *     released by the original caller.
+ */
+static
+void i2400m_report_hook_work(struct work_struct *ws)
+{
+	struct i2400m_work *iw =
+		container_of(ws, struct i2400m_work, ws);
+	struct i2400m_report_hook_args *args = (void *) iw->pl;
+	i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size);
+	kfree_skb(args->skb_rx);
+	i2400m_put(iw->i2400m);
+	kfree(iw);
+}
+
+
+/*
+ * Process an ack to a command
+ *
+ * @i2400m: device descriptor
+ * @payload: pointer to message
+ * @size: size of the message
+ *
+ * Pass the acknodledgment (in an skb) to the thread that is waiting
+ * for it in i2400m->msg_completion.
+ *
+ * We need to coordinate properly with the thread waiting for the
+ * ack. Check if it is waiting or if it is gone. We loose the spinlock
+ * to avoid allocating on atomic contexts (yeah, could use GFP_ATOMIC,
+ * but this is not so speed critical).
+ */
+static
+void i2400m_rx_ctl_ack(struct i2400m *i2400m,
+		       const void *payload, size_t size)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+	unsigned long flags;
+	struct sk_buff *ack_skb;
+
+	/* Anyone waiting for an answer? */
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
+		dev_err(dev, "Huh? reply to command with no waiters\n");
+		goto error_no_waiter;
+	}
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+
+	ack_skb = wimax_msg_alloc(wimax_dev, NULL, payload, size, GFP_KERNEL);
+
+	/* Check waiter didn't time out waiting for the answer... */
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
+		d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
+		goto error_waiter_cancelled;
+	}
+	if (ack_skb == NULL) {
+		dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
+		i2400m->ack_skb = ERR_PTR(-ENOMEM);
+	} else
+		i2400m->ack_skb = ack_skb;
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+	complete(&i2400m->msg_completion);
+	return;
+
+error_waiter_cancelled:
+	if (ack_skb)
+		kfree_skb(ack_skb);
+error_no_waiter:
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+	return;
+}
+
+
+/*
+ * Receive and process a control payload
+ *
+ * @i2400m: device descriptor
+ * @skb_rx: skb that contains the payload (for reference counting)
+ * @payload: pointer to message
+ * @size: size of the message
+ *
+ * There are two types of control RX messages: reports (asynchronous,
+ * like your every day interrupts) and 'acks' (reponses to a command,
+ * get or set request).
+ *
+ * If it is a report, we run hooks on it (to extract information for
+ * things we need to do in the driver) and then pass it over to the
+ * WiMAX stack to send it to user space.
+ *
+ * NOTE: report processing is done in a workqueue specific to the
+ *     generic driver, to avoid deadlocks in the system.
+ *
+ * If it is not a report, it is an ack to a previously executed
+ * command, set or get, so wake up whoever is waiting for it from
+ * i2400m_msg_to_dev(). i2400m_rx_ctl_ack() takes care of that.
+ *
+ * Note that the sizes we pass to other functions from here are the
+ * sizes of the _l3l4_hdr + payload, not full buffer sizes, as we have
+ * verified in _msg_size_check() that they are congruent.
+ *
+ * For reports: We can't clone the original skb where the data is
+ * because we need to send this up via netlink; netlink has to add
+ * headers and we can't overwrite what's preceeding the payload...as
+ * it is another message. So we just dup them.
+ */
+static
+void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
+		   const void *payload, size_t size)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
+	unsigned msg_type;
+
+	result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
+	if (result < 0) {
+		dev_err(dev, "HW BUG? device sent a bad message: %d\n",
+			result);
+		goto error_check;
+	}
+	msg_type = le16_to_cpu(l3l4_hdr->type);
+	d_printf(1, dev, "%s 0x%04x: %zu bytes\n",
+		 msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
+		 msg_type, size);
+	d_dump(2, dev, l3l4_hdr, size);
+	if (msg_type & I2400M_MT_REPORT_MASK) {
+		/* These hooks have to be ran serialized; as well, the
+		 * handling might force the execution of commands, and
+		 * that might cause reentrancy issues with
+		 * bus-specific subdrivers and workqueues. So we run
+		 * it in a separate workqueue. */
+		struct i2400m_report_hook_args args = {
+			.skb_rx = skb_rx,
+			.l3l4_hdr = l3l4_hdr,
+			.size = size
+		};
+		if (unlikely(i2400m->ready == 0))	/* only send if up */
+			return;
+		skb_get(skb_rx);
+		i2400m_queue_work(i2400m, i2400m_report_hook_work,
+				  GFP_KERNEL, &args, sizeof(args));
+		result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size,
+				   GFP_KERNEL);
+		if (result < 0)
+			dev_err(dev, "error sending report to userspace: %d\n",
+				result);
+	} else		/* an ack to a CMD, GET or SET */
+		i2400m_rx_ctl_ack(i2400m, payload, size);
+error_check:
+	return;
+}
+
+
+
+
+/*
+ * Receive and send up a trace
+ *
+ * @i2400m: device descriptor
+ * @skb_rx: skb that contains the trace (for reference counting)
+ * @payload: pointer to trace message inside the skb
+ * @size: size of the message
+ *
+ * THe i2400m might produce trace information (diagnostics) and we
+ * send them through a different kernel-to-user pipe (to avoid
+ * clogging it).
+ *
+ * As in i2400m_rx_ctl(), we can't clone the original skb where the
+ * data is because we need to send this up via netlink; netlink has to
+ * add headers and we can't overwrite what's preceeding the
+ * payload...as it is another message. So we just dup them.
+ */
+static
+void i2400m_rx_trace(struct i2400m *i2400m,
+		     const void *payload, size_t size)
+{
+	int result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+	const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
+	unsigned msg_type;
+
+	result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
+	if (result < 0) {
+		dev_err(dev, "HW BUG? device sent a bad trace message: %d\n",
+			result);
+		goto error_check;
+	}
+	msg_type = le16_to_cpu(l3l4_hdr->type);
+	d_printf(1, dev, "Trace %s 0x%04x: %zu bytes\n",
+		 msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
+		 msg_type, size);
+	d_dump(2, dev, l3l4_hdr, size);
+	if (unlikely(i2400m->ready == 0))	/* only send if up */
+		return;
+	result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL);
+	if (result < 0)
+		dev_err(dev, "error sending trace to userspace: %d\n",
+			result);
+error_check:
+	return;
+}
+
+
+/*
+ * Act on a received payload
+ *
+ * @i2400m: device instance
+ * @skb_rx: skb where the transaction was received
+ * @single: 1 if there is only one payload, 0 otherwise
+ * @pld: payload descriptor
+ * @payload: payload data
+ *
+ * Upon reception of a payload, look at its guts in the payload
+ * descriptor and decide what to do with it.
+ */
+static
+void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx,
+		       unsigned single, const struct i2400m_pld *pld,
+		       const void *payload)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	size_t pl_size = i2400m_pld_size(pld);
+	enum i2400m_pt pl_type = i2400m_pld_type(pld);
+
+	switch (pl_type) {
+	case I2400M_PT_DATA:
+		d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size);
+		i2400m_net_rx(i2400m, skb_rx, single, payload, pl_size);
+		break;
+	case I2400M_PT_CTRL:
+		i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size);
+		break;
+	case I2400M_PT_TRACE:
+		i2400m_rx_trace(i2400m, payload, pl_size);
+		break;
+	default:	/* Anything else shouldn't come to the host */
+		if (printk_ratelimit())
+			dev_err(dev, "RX: HW BUG? unexpected payload type %u\n",
+				pl_type);
+	}
+}
+
+
+/*
+ * Check a received transaction's message header
+ *
+ * @i2400m: device descriptor
+ * @msg_hdr: message header
+ * @buf_size: size of the received buffer
+ *
+ * Check that the declarations done by a RX buffer message header are
+ * sane and consistent with the amount of data that was received.
+ */
+static
+int i2400m_rx_msg_hdr_check(struct i2400m *i2400m,
+			    const struct i2400m_msg_hdr *msg_hdr,
+			    size_t buf_size)
+{
+	int result = -EIO;
+	struct device *dev = i2400m_dev(i2400m);
+	if (buf_size < sizeof(*msg_hdr)) {
+		dev_err(dev, "RX: HW BUG? message with short header (%zu "
+			"vs %zu bytes expected)\n", buf_size, sizeof(*msg_hdr));
+		goto error;
+	}
+	if (msg_hdr->barker != cpu_to_le32(I2400M_D2H_MSG_BARKER)) {
+		dev_err(dev, "RX: HW BUG? message received with unknown "
+			"barker 0x%08x (buf_size %zu bytes)\n",
+			le32_to_cpu(msg_hdr->barker), buf_size);
+		goto error;
+	}
+	if (msg_hdr->num_pls == 0) {
+		dev_err(dev, "RX: HW BUG? zero payload packets in message\n");
+		goto error;
+	}
+	if (le16_to_cpu(msg_hdr->num_pls) > I2400M_MAX_PLS_IN_MSG) {
+		dev_err(dev, "RX: HW BUG? message contains more payload "
+			"than maximum; ignoring.\n");
+		goto error;
+	}
+	result = 0;
+error:
+	return result;
+}
+
+
+/*
+ * Check a payload descriptor against the received data
+ *
+ * @i2400m: device descriptor
+ * @pld: payload descriptor
+ * @pl_itr: offset (in bytes) in the received buffer the payload is
+ *          located
+ * @buf_size: size of the received buffer
+ *
+ * Given a payload descriptor (part of a RX buffer), check it is sane
+ * and that the data it declares fits in the buffer.
+ */
+static
+int i2400m_rx_pl_descr_check(struct i2400m *i2400m,
+			      const struct i2400m_pld *pld,
+			      size_t pl_itr, size_t buf_size)
+{
+	int result = -EIO;
+	struct device *dev = i2400m_dev(i2400m);
+	size_t pl_size = i2400m_pld_size(pld);
+	enum i2400m_pt pl_type = i2400m_pld_type(pld);
+
+	if (pl_size > i2400m->bus_pl_size_max) {
+		dev_err(dev, "RX: HW BUG? payload @%zu: size %zu is "
+			"bigger than maximum %zu; ignoring message\n",
+			pl_itr, pl_size, i2400m->bus_pl_size_max);
+		goto error;
+	}
+	if (pl_itr + pl_size > buf_size) {	/* enough? */
+		dev_err(dev, "RX: HW BUG? payload @%zu: size %zu "
+			"goes beyond the received buffer "
+			"size (%zu bytes); ignoring message\n",
+			pl_itr, pl_size, buf_size);
+		goto error;
+	}
+	if (pl_type >= I2400M_PT_ILLEGAL) {
+		dev_err(dev, "RX: HW BUG? illegal payload type %u; "
+			"ignoring message\n", pl_type);
+		goto error;
+	}
+	result = 0;
+error:
+	return result;
+}
+
+
+/**
+ * i2400m_rx - Receive a buffer of data from the device
+ *
+ * @i2400m: device descriptor
+ * @skb: skbuff where the data has been received
+ *
+ * Parse in a buffer of data that contains an RX message sent from the
+ * device. See the file header for the format. Run all checks on the
+ * buffer header, then run over each payload's descriptors, verify
+ * their consistency and act on each payload's contents.  If
+ * everything is succesful, update the device's statistics.
+ *
+ * Note: You need to set the skb to contain only the length of the
+ * received buffer; for that, use skb_trim(skb, RECEIVED_SIZE).
+ *
+ * Returns:
+ *
+ * 0 if ok, < 0 errno on error
+ *
+ * If ok, this function owns now the skb and the caller DOESN'T have
+ * to run kfree_skb() on it. However, on error, the caller still owns
+ * the skb and it is responsible for releasing it.
+ */
+int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
+{
+	int i, result;
+	struct device *dev = i2400m_dev(i2400m);
+	const struct i2400m_msg_hdr *msg_hdr;
+	size_t pl_itr, pl_size, skb_len;
+	unsigned long flags;
+	unsigned num_pls;
+
+	skb_len = skb->len;
+	d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n",
+		  i2400m, skb, skb_len);
+	result = -EIO;
+	msg_hdr = (void *) skb->data;
+	result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len);
+	if (result < 0)
+		goto error_msg_hdr_check;
+	result = -EIO;
+	num_pls = le16_to_cpu(msg_hdr->num_pls);
+	pl_itr = sizeof(*msg_hdr) +	/* Check payload descriptor(s) */
+		num_pls * sizeof(msg_hdr->pld[0]);
+	pl_itr = ALIGN(pl_itr, I2400M_PL_PAD);
+	if (pl_itr > skb->len) {	/* got all the payload descriptors? */
+		dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
+			"%u payload descriptors (%zu each, total %zu)\n",
+			skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
+		goto error_pl_descr_short;
+	}
+	/* Walk each payload payload--check we really got it */
+	for (i = 0; i < num_pls; i++) {
+		/* work around old gcc warnings */
+		pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
+		result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
+						  pl_itr, skb->len);
+		if (result < 0)
+			goto error_pl_descr_check;
+		i2400m_rx_payload(i2400m, skb, num_pls == 1, &msg_hdr->pld[i],
+				  skb->data + pl_itr);
+		pl_itr += ALIGN(pl_size, I2400M_PL_PAD);
+		cond_resched();		/* Don't monopolize */
+	}
+	kfree_skb(skb);
+	/* Update device statistics */
+	spin_lock_irqsave(&i2400m->rx_lock, flags);
+	i2400m->rx_pl_num += i;
+	if (i > i2400m->rx_pl_max)
+		i2400m->rx_pl_max = i;
+	if (i < i2400m->rx_pl_min)
+		i2400m->rx_pl_min = i;
+	i2400m->rx_num++;
+	i2400m->rx_size_acc += skb->len;
+	if (skb->len < i2400m->rx_size_min)
+		i2400m->rx_size_min = skb->len;
+	if (skb->len > i2400m->rx_size_max)
+		i2400m->rx_size_max = skb->len;
+	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+error_pl_descr_check:
+error_pl_descr_short:
+error_msg_hdr_check:
+	d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n",
+		i2400m, skb, skb_len, result);
+	return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_rx);
diff --git a/drivers/net/wimax/i2400m/sdio-debug-levels.h b/drivers/net/wimax/i2400m/sdio-debug-levels.h
new file mode 100644
index 0000000..c519987
--- /dev/null
+++ b/drivers/net/wimax/i2400m/sdio-debug-levels.h
@@ -0,0 +1,22 @@
+/*
+ * debug levels control file for the i2400m module's
+ */
+#ifndef __debug_levels__h__
+#define __debug_levels__h__
+
+/* Maximum compile and run time debug level for all submodules */
+#define D_MODULENAME i2400m_sdio
+#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
+
+#include <linux/wimax/debug.h>
+
+/* List of all the enabled modules */
+enum d_module {
+	D_SUBMODULE_DECLARE(main),
+	D_SUBMODULE_DECLARE(tx),
+	D_SUBMODULE_DECLARE(rx),
+	D_SUBMODULE_DECLARE(fw)
+};
+
+
+#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/net/wimax/i2400m/sdio-fw.c b/drivers/net/wimax/i2400m/sdio-fw.c
new file mode 100644
index 0000000..3487205
--- /dev/null
+++ b/drivers/net/wimax/i2400m/sdio-fw.c
@@ -0,0 +1,224 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Firmware uploader's SDIO specifics
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Initial implementation
+ *
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Bus generic/specific split for USB
+ *
+ * Dirk Brandewie <dirk.j.brandewie@intel.com>
+ *  - Initial implementation for SDIO
+ *
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - SDIO rehash for changes in the bus-driver model
+ *
+ * THE PROCEDURE
+ *
+ * See fw.c for the generic description of this procedure.
+ *
+ * This file implements only the SDIO specifics. It boils down to how
+ * to send a command and waiting for an acknowledgement from the
+ * device. We do polled reads.
+ *
+ * COMMAND EXECUTION
+ *
+ * THe generic firmware upload code will call i2400m_bus_bm_cmd_send()
+ * to send commands.
+ *
+ * The SDIO devices expects things in 256 byte blocks, so it will pad
+ * it, compute the checksum (if needed) and pass it to SDIO.
+ *
+ * ACK RECEPTION
+ *
+ * This works in polling mode -- the fw loader says when to wait for
+ * data and for that it calls i2400ms_bus_bm_wait_for_ack().
+ *
+ * This will poll the device for data until it is received. We need to
+ * receive at least as much bytes as where asked for (although it'll
+ * always be a multiple of 256 bytes).
+ */
+#include <linux/mmc/sdio_func.h>
+#include "i2400m-sdio.h"
+
+
+#define D_SUBMODULE fw
+#include "sdio-debug-levels.h"
+
+/*
+ * Send a boot-mode command to the SDIO function
+ *
+ * We use a bounce buffer (i2400m->bm_cmd_buf) because we need to
+ * touch the header if the RAW flag is not set.
+ *
+ * @flags: pass thru from i2400m_bm_cmd()
+ * @return: cmd_size if ok, < 0 errno code on error.
+ *
+ * Note the command is padded to the SDIO block size for the device.
+ */
+ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m,
+				const struct i2400m_bootrom_header *_cmd,
+				size_t cmd_size, int flags)
+{
+	ssize_t result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+	int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd);
+	struct i2400m_bootrom_header *cmd;
+	/* SDIO restriction */
+	size_t cmd_size_a = ALIGN(cmd_size, I2400MS_BLK_SIZE);
+
+	d_fnstart(5, dev, "(i2400m %p cmd %p size %zu)\n",
+		  i2400m, _cmd, cmd_size);
+	result = -E2BIG;
+	if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
+		goto error_too_big;
+
+	memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size);	/* Prep command */
+	cmd = i2400m->bm_cmd_buf;
+	if (cmd_size_a > cmd_size)			/* Zero pad space */
+		memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
+	if ((flags & I2400M_BM_CMD_RAW) == 0) {
+		if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0))
+			dev_warn(dev, "SW BUG: response_required == 0\n");
+		i2400m_bm_cmd_prepare(cmd);
+	}
+	d_printf(4, dev, "BM cmd %d: %zu bytes (%zu padded)\n",
+		 opcode, cmd_size, cmd_size_a);
+	d_dump(5, dev, cmd, cmd_size);
+
+	sdio_claim_host(i2400ms->func);			/* Send & check */
+	result = sdio_memcpy_toio(i2400ms->func, I2400MS_DATA_ADDR,
+				  i2400m->bm_cmd_buf, cmd_size_a);
+	sdio_release_host(i2400ms->func);
+	if (result < 0) {
+		dev_err(dev, "BM cmd %d: cannot send: %ld\n",
+			opcode, (long) result);
+		goto error_cmd_send;
+	}
+	result = cmd_size;
+error_cmd_send:
+error_too_big:
+	d_fnend(5, dev, "(i2400m %p cmd %p size %zu) = %d\n",
+		i2400m, _cmd, cmd_size, (int) result);
+	return result;
+}
+
+
+/*
+ * Read an ack from the device's boot-mode (polling)
+ *
+ * @i2400m:
+ * @_ack: pointer to where to store the read data
+ * @ack_size: how many bytes we should read
+ *
+ * Returns: < 0 errno code on error; otherwise, amount of received bytes.
+ *
+ * The ACK for a BM command is always at least sizeof(*ack) bytes, so
+ * check for that. We don't need to check for device reboots
+ *
+ * NOTE: We do an artificial timeout of 1 sec over the SDIO timeout;
+ *     this way we have control over it...there is no way that I know
+ *     of setting an SDIO transaction timeout.
+ */
+ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
+				    struct i2400m_bootrom_header *ack,
+				    size_t ack_size)
+{
+	int result;
+	ssize_t rx_size;
+	u64 timeout;
+	struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+	struct sdio_func *func = i2400ms->func;
+	struct device *dev = &func->dev;
+
+	BUG_ON(sizeof(*ack) > ack_size);
+
+	d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
+		  i2400m, ack, ack_size);
+
+	timeout = get_jiffies_64() + 2 * HZ;
+	sdio_claim_host(func);
+	while (1) {
+		if (time_after64(get_jiffies_64(), timeout)) {
+			rx_size = -ETIMEDOUT;
+			dev_err(dev, "timeout waiting for ack data\n");
+			goto error_timedout;
+		}
+
+		/* Find the RX size, check if it fits or not -- it if
+		 * doesn't fit, fail, as we have no way to dispose of
+		 * the extra data. */
+		rx_size = __i2400ms_rx_get_size(i2400ms);
+		if (rx_size < 0)
+			goto error_rx_get_size;
+		result = -ENOSPC;		/* Check it fits */
+		if (rx_size < sizeof(*ack)) {
+			rx_size = -EIO;
+			dev_err(dev, "HW BUG? received is too small (%zu vs "
+				"%zu needed)\n", sizeof(*ack), rx_size);
+			goto error_too_small;
+		}
+		if (rx_size > I2400M_BM_ACK_BUF_SIZE) {
+			dev_err(dev, "SW BUG? BM_ACK_BUF is too small (%u vs "
+				"%zu needed)\n", I2400M_BM_ACK_BUF_SIZE,
+				rx_size);
+			goto error_too_small;
+		}
+
+		/* Read it */
+		result = sdio_memcpy_fromio(func, i2400m->bm_ack_buf,
+					    I2400MS_DATA_ADDR, rx_size);
+		if (result == -ETIMEDOUT || result == -ETIME)
+			continue;
+		if (result < 0) {
+			dev_err(dev, "BM SDIO receive (%zu B) failed: %d\n",
+				rx_size, result);
+			goto error_read;
+		} else
+			break;
+	}
+	rx_size = min((ssize_t)ack_size, rx_size);
+	memcpy(ack, i2400m->bm_ack_buf, rx_size);
+error_read:
+error_too_small:
+error_rx_get_size:
+error_timedout:
+	sdio_release_host(func);
+	d_fnend(5, dev, "(i2400m %p ack %p size %zu) = %ld\n",
+		i2400m, ack, ack_size, (long) rx_size);
+	return rx_size;
+}
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
new file mode 100644
index 0000000..a3008b9
--- /dev/null
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -0,0 +1,255 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * SDIO RX handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Dirk Brandewie <dirk.j.brandewie@intel.com>
+ *  - Initial implementation
+ *
+ *
+ * This handles the RX path on SDIO.
+ *
+ * The SDIO bus driver calls the "irq" routine when data is available.
+ * This is not a traditional interrupt routine since the SDIO bus
+ * driver calls us from its irq thread context.  Because of this
+ * sleeping in the SDIO RX IRQ routine is okay.
+ *
+ * From there on, we obtain the size of the data that is available,
+ * allocate an skb, copy it and then pass it to the generic driver's
+ * RX routine [i2400m_rx()].
+ *
+ * ROADMAP
+ *
+ * i2400ms_irq()
+ *   i2400ms_rx()
+ *     __i2400ms_rx_get_size()
+ *     i2400m_rx()
+ *
+ * i2400ms_rx_setup()
+ *
+ * i2400ms_rx_release()
+ */
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include "i2400m-sdio.h"
+
+#define D_SUBMODULE rx
+#include "sdio-debug-levels.h"
+
+
+/*
+ * Read and return the amount of bytes available for RX
+ *
+ * The RX size has to be read like this: byte reads of three
+ * sequential locations; then glue'em together.
+ *
+ * sdio_readl() doesn't work.
+ */
+ssize_t __i2400ms_rx_get_size(struct i2400ms *i2400ms)
+{
+	int ret, cnt, val;
+	ssize_t rx_size;
+	unsigned xfer_size_addr;
+	struct sdio_func *func = i2400ms->func;
+	struct device *dev = &i2400ms->func->dev;
+
+	d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
+	xfer_size_addr = I2400MS_INTR_GET_SIZE_ADDR;
+	rx_size = 0;
+	for (cnt = 0; cnt < 3; cnt++) {
+		val = sdio_readb(func, xfer_size_addr + cnt, &ret);
+		if (ret < 0) {
+			dev_err(dev, "RX: Can't read byte %d of RX size from "
+				"0x%08x: %d\n", cnt, xfer_size_addr + cnt, ret);
+			rx_size = ret;
+			goto error_read;
+		}
+		rx_size = rx_size << 8 | (val & 0xff);
+	}
+	d_printf(6, dev, "RX: rx_size is %ld\n", (long) rx_size);
+error_read:
+	d_fnend(7, dev, "(i2400ms %p) = %ld\n", i2400ms, (long) rx_size);
+	return rx_size;
+}
+
+
+/*
+ * Read data from the device (when in normal)
+ *
+ * Allocate an SKB of the right size, read the data in and then
+ * deliver it to the generic layer.
+ *
+ * We also check for a reboot barker. That means the device died and
+ * we have to reboot it.
+ */
+static
+void i2400ms_rx(struct i2400ms *i2400ms)
+{
+	int ret;
+	struct sdio_func *func = i2400ms->func;
+	struct device *dev = &func->dev;
+	struct i2400m *i2400m = &i2400ms->i2400m;
+	struct sk_buff *skb;
+	ssize_t rx_size;
+
+	d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
+	rx_size = __i2400ms_rx_get_size(i2400ms);
+	if (rx_size < 0) {
+		ret = rx_size;
+		goto error_get_size;
+	}
+	ret = -ENOMEM;
+	skb = alloc_skb(rx_size, GFP_ATOMIC);
+	if (NULL == skb) {
+		dev_err(dev, "RX: unable to alloc skb\n");
+		goto error_alloc_skb;
+	}
+
+	ret = sdio_memcpy_fromio(func, skb->data,
+				 I2400MS_DATA_ADDR, rx_size);
+	if (ret < 0) {
+		dev_err(dev, "RX: SDIO data read failed: %d\n", ret);
+		goto error_memcpy_fromio;
+	}
+	/* Check if device has reset */
+	if (!memcmp(skb->data, i2400m_NBOOT_BARKER,
+		    sizeof(i2400m_NBOOT_BARKER))
+	    || !memcmp(skb->data, i2400m_SBOOT_BARKER,
+		       sizeof(i2400m_SBOOT_BARKER))) {
+		ret = i2400m_dev_reset_handle(i2400m);
+		kfree_skb(skb);
+	} else {
+		skb_put(skb, rx_size);
+		i2400m_rx(i2400m, skb);
+	}
+	d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms);
+	return;
+
+error_memcpy_fromio:
+	kfree_skb(skb);
+error_alloc_skb:
+error_get_size:
+	d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
+	return;
+}
+
+
+/*
+ * Process an interrupt from the SDIO card
+ *
+ * FIXME: need to process other events that are not just ready-to-read
+ *
+ * Checks there is data ready and then proceeds to read it.
+ */
+static
+void i2400ms_irq(struct sdio_func *func)
+{
+	int ret;
+	struct i2400ms *i2400ms = sdio_get_drvdata(func);
+	struct i2400m *i2400m = &i2400ms->i2400m;
+	struct device *dev = &func->dev;
+	int val;
+
+	d_fnstart(6, dev, "(i2400ms %p)\n", i2400ms);
+	val = sdio_readb(func, I2400MS_INTR_STATUS_ADDR, &ret);
+	if (ret < 0) {
+		dev_err(dev, "RX: Can't read interrupt status: %d\n", ret);
+		goto error_no_irq;
+	}
+	if (!val) {
+		dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n");
+		goto error_no_irq;
+	}
+	sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
+	if (WARN_ON(i2400m->boot_mode != 0))
+		dev_err(dev, "RX: SW BUG? boot mode and IRQ is up?\n");
+	else
+		i2400ms_rx(i2400ms);
+error_no_irq:
+	d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
+	return;
+}
+
+
+/*
+ * Setup SDIO RX
+ *
+ * Hooks up the IRQ handler and then enables IRQs.
+ */
+int i2400ms_rx_setup(struct i2400ms *i2400ms)
+{
+	int result;
+	struct sdio_func *func = i2400ms->func;
+	struct device *dev = &func->dev;
+
+	d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
+	sdio_claim_host(func);
+	result = sdio_claim_irq(func, i2400ms_irq);
+	if (result < 0) {
+		dev_err(dev, "Cannot claim IRQ: %d\n", result);
+		goto error_irq_claim;
+	}
+	result = 0;
+	sdio_writeb(func, 1, I2400MS_INTR_ENABLE_ADDR, &result);
+	if (result < 0) {
+		sdio_release_irq(func);
+		dev_err(dev, "Failed to enable interrupts %d\n", result);
+	}
+error_irq_claim:
+	sdio_release_host(func);
+	d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
+	return result;
+}
+
+
+/*
+ * Tear down SDIO RX
+ *
+ * Disables IRQs in the device and removes the IRQ handler.
+ */
+void i2400ms_rx_release(struct i2400ms *i2400ms)
+{
+	int result;
+	struct sdio_func *func = i2400ms->func;
+	struct device *dev = &func->dev;
+
+	d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
+	sdio_claim_host(func);
+	sdio_writeb(func, 0, I2400MS_INTR_ENABLE_ADDR, &result);
+	sdio_release_irq(func);
+	sdio_release_host(func);
+	d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
+}
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
new file mode 100644
index 0000000..5105a5e
--- /dev/null
+++ b/drivers/net/wimax/i2400m/sdio-tx.c
@@ -0,0 +1,153 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * SDIO TX transaction backends
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Dirk Brandewie <dirk.j.brandewie@intel.com>
+ *  - Initial implementation
+ *
+ *
+ * Takes the TX messages in the i2400m's driver TX FIFO and sends them
+ * to the device until there are no more.
+ *
+ * If we fail sending the message, we just drop it. There isn't much
+ * we can do at this point. Most of the traffic is network, which has
+ * recovery methods for dropped packets.
+ *
+ * The SDIO functions are not atomic, so we can't run from the context
+ * where i2400m->bus_tx_kick() [i2400ms_bus_tx_kick()] is being called
+ * (some times atomic). Thus, the actual TX work is deferred to a
+ * workqueue.
+ *
+ * ROADMAP
+ *
+ * i2400ms_bus_tx_kick()
+ *   i2400ms_tx_submit()     [through workqueue]
+ *
+ * i2400m_tx_setup()
+ *
+ * i2400m_tx_release()
+ */
+#include <linux/mmc/sdio_func.h>
+#include "i2400m-sdio.h"
+
+#define D_SUBMODULE tx
+#include "sdio-debug-levels.h"
+
+
+/*
+ * Pull TX transations from the TX FIFO and send them to the device
+ * until there are no more.
+ */
+static
+void i2400ms_tx_submit(struct work_struct *ws)
+{
+	int result;
+	struct i2400ms *i2400ms = container_of(ws, struct i2400ms, tx_worker);
+	struct i2400m *i2400m = &i2400ms->i2400m;
+	struct sdio_func *func = i2400ms->func;
+	struct device *dev = &func->dev;
+	struct i2400m_msg_hdr *tx_msg;
+	size_t tx_msg_size;
+
+	d_fnstart(4, dev, "(i2400ms %p, i2400m %p)\n", i2400ms, i2400ms);
+
+	while (NULL != (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size))) {
+		d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size);
+		d_dump(5, dev, tx_msg, tx_msg_size);
+
+		sdio_claim_host(func);
+		result = sdio_memcpy_toio(func, 0, tx_msg, tx_msg_size);
+		sdio_release_host(func);
+
+		i2400m_tx_msg_sent(i2400m);
+
+		if (result < 0) {
+			dev_err(dev, "TX: cannot submit TX; tx_msg @%zu %zu B:"
+				" %d\n", (void *) tx_msg - i2400m->tx_buf,
+				tx_msg_size, result);
+		}
+
+		d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
+	}
+
+	d_fnend(4, dev, "(i2400ms %p) = void\n", i2400ms);
+}
+
+
+/*
+ * The generic driver notifies us that there is data ready for TX
+ *
+ * Schedule a run of i2400ms_tx_submit() to handle it.
+ */
+void i2400ms_bus_tx_kick(struct i2400m *i2400m)
+{
+	struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+	struct device *dev = &i2400ms->func->dev;
+
+	d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
+
+	/* schedule tx work, this is because tx may block, therefore
+	 * it has to run in a thread context.
+	 */
+	queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
+
+	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+int i2400ms_tx_setup(struct i2400ms *i2400ms)
+{
+	int result;
+	struct device *dev = &i2400ms->func->dev;
+	struct i2400m *i2400m = &i2400ms->i2400m;
+
+	d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
+
+	INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
+	snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
+		 "%s-tx", i2400m->wimax_dev.name);
+	i2400ms->tx_workqueue =
+		create_singlethread_workqueue(i2400ms->tx_wq_name);
+	if (NULL == i2400ms->tx_workqueue) {
+		dev_err(dev, "TX: failed to create workqueue\n");
+		result = -ENOMEM;
+	} else
+		result = 0;
+	d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
+	return result;
+}
+
+void i2400ms_tx_release(struct i2400ms *i2400ms)
+{
+	destroy_workqueue(i2400ms->tx_workqueue);
+}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
new file mode 100644
index 0000000..1bfa283
--- /dev/null
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -0,0 +1,511 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Linux driver model glue for the SDIO device, reset & fw upload
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Dirk Brandewie <dirk.j.brandewie@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * See i2400m-sdio.h for a general description of this driver.
+ *
+ * This file implements driver model glue, and hook ups for the
+ * generic driver to implement the bus-specific functions (device
+ * communication setup/tear down, firmware upload and resetting).
+ *
+ * ROADMAP
+ *
+ * i2400m_probe()
+ *   alloc_netdev()
+ *     i2400ms_netdev_setup()
+ *       i2400ms_init()
+ *       i2400m_netdev_setup()
+ *   i2400ms_enable_function()
+ *   i2400m_setup()
+ *
+ * i2400m_remove()
+ *     i2400m_release()
+ *     free_netdev(net_dev)
+ *
+ * i2400ms_bus_reset()            Called by i2400m->bus_reset
+ *   __i2400ms_reset()
+ *     __i2400ms_send_barker()
+ *
+ * i2400ms_bus_dev_start()        Called by i2400m_dev_start() [who is
+ *   i2400ms_tx_setup()           called by i2400m_setup()]
+ *   i2400ms_rx_setup()
+ *
+ * i2400ms_bus_dev_stop()         Called by i2400m_dev_stop() [who is
+ *   i2400ms_rx_release()         is called by i2400m_release()]
+ *   i2400ms_tx_release()
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include "i2400m-sdio.h"
+#include <linux/wimax/i2400m.h>
+
+#define D_SUBMODULE main
+#include "sdio-debug-levels.h"
+
+/* IOE WiMAX function timeout in seconds */
+static int ioe_timeout = 2;
+module_param(ioe_timeout, int, 0);
+
+/* Our firmware file name */
+#define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-" I2400M_FW_VERSION ".sbcf"
+
+/*
+ * Enable the SDIO function
+ *
+ * Tries to enable the SDIO function; might fail if it is still not
+ * ready (in some hardware, the SDIO WiMAX function is only enabled
+ * when we ask it to explicitly doing). Tries until a timeout is
+ * reached.
+ *
+ * The reverse of this is...sdio_disable_function()
+ *
+ * Returns: 0 if the SDIO function was enabled, < 0 errno code on
+ *     error (-ENODEV when it was unable to enable the function).
+ */
+static
+int i2400ms_enable_function(struct sdio_func *func)
+{
+	u64 timeout;
+	int err;
+	struct device *dev = &func->dev;
+
+	d_fnstart(3, dev, "(func %p)\n", func);
+	/* Setup timeout (FIXME: This needs to read the CIS table to
+	 * get a real timeout) and then wait for the device to signal
+	 * it is ready */
+	timeout = get_jiffies_64() + ioe_timeout * HZ;
+	err = -ENODEV;
+	while (err != 0 && time_before64(get_jiffies_64(), timeout)) {
+		sdio_claim_host(func);
+		err = sdio_enable_func(func);
+		if (0 == err) {
+			sdio_release_host(func);
+			d_printf(2, dev, "SDIO function enabled\n");
+			goto function_enabled;
+		}
+		d_printf(2, dev, "SDIO function failed to enable: %d\n", err);
+		sdio_disable_func(func);
+		sdio_release_host(func);
+		msleep(I2400MS_INIT_SLEEP_INTERVAL);
+	}
+	/* If timed out, device is not there yet -- get -ENODEV so
+	 * the device driver core will retry later on. */
+	if (err == -ETIME) {
+		dev_err(dev, "Can't enable WiMAX function; "
+			" has the function been enabled?\n");
+		err = -ENODEV;
+	}
+function_enabled:
+	d_fnend(3, dev, "(func %p) = %d\n", func, err);
+	return err;
+}
+
+
+/*
+ * Setup driver resources needed to communicate with the device
+ *
+ * The fw needs some time to settle, and it was just uploaded,
+ * so give it a break first. I'd prefer to just wait for the device to
+ * send something, but seems the poking we do to enable SDIO stuff
+ * interferes with it, so just give it a break before starting...
+ */
+static
+int i2400ms_bus_dev_start(struct i2400m *i2400m)
+{
+	int result;
+	struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+	struct sdio_func *func = i2400ms->func;
+	struct device *dev = &func->dev;
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	msleep(200);
+	result = i2400ms_rx_setup(i2400ms);
+	if (result < 0)
+		goto error_rx_setup;
+	result = i2400ms_tx_setup(i2400ms);
+	if (result < 0)
+		goto error_tx_setup;
+	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+	return result;
+
+	i2400ms_tx_release(i2400ms);
+error_tx_setup:
+	i2400ms_rx_release(i2400ms);
+error_rx_setup:
+	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+	return result;
+}
+
+
+static
+void i2400ms_bus_dev_stop(struct i2400m *i2400m)
+{
+	struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+	struct sdio_func *func = i2400ms->func;
+	struct device *dev = &func->dev;
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	i2400ms_rx_release(i2400ms);
+	i2400ms_tx_release(i2400ms);
+	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+/*
+ * Sends a barker buffer to the device
+ *
+ * This helper will allocate a kmalloced buffer and use it to transmit
+ * (then free it). Reason for this is that the SDIO host controller
+ * expects alignment (unknown exactly which) which the stack won't
+ * really provide and certain arches/host-controller combinations
+ * cannot use stack/vmalloc/text areas for DMA transfers.
+ */
+static
+int __i2400ms_send_barker(struct i2400ms *i2400ms,
+			  const __le32 *barker, size_t barker_size)
+{
+	int  ret;
+	struct sdio_func *func = i2400ms->func;
+	struct device *dev = &func->dev;
+	void *buffer;
+
+	ret = -ENOMEM;
+	buffer = kmalloc(I2400MS_BLK_SIZE, GFP_KERNEL);
+	if (buffer == NULL)
+		goto error_kzalloc;
+
+	memcpy(buffer, barker, barker_size);
+	sdio_claim_host(func);
+	ret = sdio_memcpy_toio(func, 0, buffer, I2400MS_BLK_SIZE);
+	sdio_release_host(func);
+
+	if (ret < 0)
+		d_printf(0, dev, "E: barker error: %d\n", ret);
+
+	kfree(buffer);
+error_kzalloc:
+	return ret;
+}
+
+
+/*
+ * Reset a device at different levels (warm, cold or bus)
+ *
+ * @i2400ms: device descriptor
+ * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS)
+ *
+ * FIXME: not tested -- need to confirm expected effects
+ *
+ * Warm and cold resets get an SDIO reset if they fail (unimplemented)
+ *
+ * Warm reset:
+ *
+ * The device will be fully reset internally, but won't be
+ * disconnected from the USB bus (so no reenumeration will
+ * happen). Firmware upload will be neccessary.
+ *
+ * The device will send a reboot barker in the notification endpoint
+ * that will trigger the driver to reinitialize the state
+ * automatically from notif.c:i2400m_notification_grok() into
+ * i2400m_dev_bootstrap_delayed().
+ *
+ * Cold and bus (USB) reset:
+ *
+ * The device will be fully reset internally, disconnected from the
+ * USB bus an a reenumeration will happen. Firmware upload will be
+ * neccessary. Thus, we don't do any locking or struct
+ * reinitialization, as we are going to be fully disconnected and
+ * reenumerated.
+ *
+ * Note we need to return -ENODEV if a warm reset was requested and we
+ * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset()
+ * and wimax_dev->op_reset.
+ *
+ * WARNING: no driver state saved/fixed
+ */
+static
+int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
+{
+	int result;
+	struct i2400ms *i2400ms =
+		container_of(i2400m, struct i2400ms, i2400m);
+	struct device *dev = i2400m_dev(i2400m);
+	static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
+		__constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+	};
+	static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
+		__constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+	};
+
+	if (rt == I2400M_RT_WARM)
+		result = __i2400ms_send_barker(i2400ms, i2400m_WARM_BOOT_BARKER,
+					       sizeof(i2400m_WARM_BOOT_BARKER));
+	else if (rt == I2400M_RT_COLD)
+		result = __i2400ms_send_barker(i2400ms, i2400m_COLD_BOOT_BARKER,
+					       sizeof(i2400m_COLD_BOOT_BARKER));
+	else if (rt == I2400M_RT_BUS) {
+do_bus_reset:
+		dev_err(dev, "FIXME: SDIO bus reset not implemented\n");
+		result = rt == I2400M_RT_WARM ? -ENODEV : -ENOSYS;
+	} else
+		BUG();
+	if (result < 0 && rt != I2400M_RT_BUS) {
+		dev_err(dev, "%s reset failed (%d); trying SDIO reset\n",
+			rt == I2400M_RT_WARM ? "warm" : "cold", result);
+		rt = I2400M_RT_BUS;
+		goto do_bus_reset;
+	}
+	return result;
+}
+
+
+static
+void i2400ms_netdev_setup(struct net_device *net_dev)
+{
+	struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+	struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+	i2400ms_init(i2400ms);
+	i2400m_netdev_setup(net_dev);
+}
+
+
+/*
+ * Debug levels control; see debug.h
+ */
+struct d_level D_LEVEL[] = {
+	D_SUBMODULE_DEFINE(main),
+	D_SUBMODULE_DEFINE(tx),
+	D_SUBMODULE_DEFINE(rx),
+	D_SUBMODULE_DEFINE(fw),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+
+#define __debugfs_register(prefix, name, parent)			\
+do {									\
+	result = d_level_register_debugfs(prefix, name, parent);	\
+	if (result < 0)							\
+		goto error;						\
+} while (0)
+
+
+static
+int i2400ms_debugfs_add(struct i2400ms *i2400ms)
+{
+	int result;
+	struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry;
+
+	dentry = debugfs_create_dir("i2400m-usb", dentry);
+	result = PTR_ERR(dentry);
+	if (IS_ERR(dentry)) {
+		if (result == -ENODEV)
+			result = 0;	/* No debugfs support */
+		goto error;
+	}
+	i2400ms->debugfs_dentry = dentry;
+	__debugfs_register("dl_", main, dentry);
+	__debugfs_register("dl_", tx, dentry);
+	__debugfs_register("dl_", rx, dentry);
+	__debugfs_register("dl_", fw, dentry);
+
+	return 0;
+
+error:
+	debugfs_remove_recursive(i2400ms->debugfs_dentry);
+	return result;
+}
+
+
+/*
+ * Probe a i2400m interface and register it
+ *
+ * @func:    SDIO function
+ * @id:      SDIO device ID
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Alloc a net device, initialize the bus-specific details and then
+ * calls the bus-generic initialization routine. That will register
+ * the wimax and netdev devices, upload the firmware [using
+ * _bus_bm_*()], call _bus_dev_start() to finalize the setup of the
+ * communication with the device and then will start to talk to it to
+ * finnish setting it up.
+ *
+ * Initialization is tricky; some instances of the hw are packed with
+ * others in a way that requires a third driver that enables the WiMAX
+ * function. In those cases, we can't enable the SDIO function and
+ * we'll return with -ENODEV. When the driver that enables the WiMAX
+ * function does its thing, it has to do a bus_rescan_devices() on the
+ * SDIO bus so this driver is called again to enumerate the WiMAX
+ * function.
+ */
+static
+int i2400ms_probe(struct sdio_func *func,
+		  const struct sdio_device_id *id)
+{
+	int result;
+	struct net_device *net_dev;
+	struct device *dev = &func->dev;
+	struct i2400m *i2400m;
+	struct i2400ms *i2400ms;
+
+	/* Allocate instance [calls i2400m_netdev_setup() on it]. */
+	result = -ENOMEM;
+	net_dev = alloc_netdev(sizeof(*i2400ms), "wmx%d",
+			       i2400ms_netdev_setup);
+	if (net_dev == NULL) {
+		dev_err(dev, "no memory for network device instance\n");
+		goto error_alloc_netdev;
+	}
+	SET_NETDEV_DEV(net_dev, dev);
+	i2400m = net_dev_to_i2400m(net_dev);
+	i2400ms = container_of(i2400m, struct i2400ms, i2400m);
+	i2400m->wimax_dev.net_dev = net_dev;
+	i2400ms->func = func;
+	sdio_set_drvdata(func, i2400ms);
+
+	i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
+	i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
+	i2400m->bus_dev_start = i2400ms_bus_dev_start;
+	i2400m->bus_dev_stop = i2400ms_bus_dev_stop;
+	i2400m->bus_tx_kick = i2400ms_bus_tx_kick;
+	i2400m->bus_reset = i2400ms_bus_reset;
+	i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send;
+	i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack;
+	i2400m->bus_fw_name = I2400MS_FW_FILE_NAME;
+	i2400m->bus_bm_mac_addr_impaired = 1;
+
+	result = i2400ms_enable_function(i2400ms->func);
+	if (result < 0) {
+		dev_err(dev, "Cannot enable SDIO function: %d\n", result);
+		goto error_func_enable;
+	}
+
+	sdio_claim_host(func);
+	result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
+	if (result < 0) {
+		dev_err(dev, "Failed to set block size: %d\n", result);
+		goto error_set_blk_size;
+	}
+	sdio_release_host(func);
+
+	result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT);
+	if (result < 0) {
+		dev_err(dev, "cannot setup device: %d\n", result);
+		goto error_setup;
+	}
+
+	result = i2400ms_debugfs_add(i2400ms);
+	if (result < 0) {
+		dev_err(dev, "cannot create SDIO debugfs: %d\n",
+			result);
+		goto error_debugfs_add;
+	}
+	return 0;
+
+error_debugfs_add:
+	i2400m_release(i2400m);
+error_setup:
+	sdio_set_drvdata(func, NULL);
+	sdio_claim_host(func);
+error_set_blk_size:
+	sdio_disable_func(func);
+	sdio_release_host(func);
+error_func_enable:
+	free_netdev(net_dev);
+error_alloc_netdev:
+	return result;
+}
+
+
+static
+void i2400ms_remove(struct sdio_func *func)
+{
+	struct device *dev = &func->dev;
+	struct i2400ms *i2400ms = sdio_get_drvdata(func);
+	struct i2400m *i2400m = &i2400ms->i2400m;
+	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+
+	d_fnstart(3, dev, "SDIO func %p\n", func);
+	debugfs_remove_recursive(i2400ms->debugfs_dentry);
+	i2400m_release(i2400m);
+	sdio_set_drvdata(func, NULL);
+	sdio_claim_host(func);
+	sdio_disable_func(func);
+	sdio_release_host(func);
+	free_netdev(net_dev);
+	d_fnend(3, dev, "SDIO func %p\n", func);
+}
+
+enum {
+	I2400MS_INTEL_VID = 0x89,
+};
+
+static
+const struct sdio_device_id i2400ms_sdio_ids[] = {
+	/* Intel: i2400m WiMAX over SDIO */
+	{ SDIO_DEVICE(I2400MS_INTEL_VID, 0x1402) },
+	{ }, 			/* end: all zeroes */
+};
+MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids);
+
+
+static
+struct sdio_driver i2400m_sdio_driver = {
+	.name		= KBUILD_MODNAME,
+	.probe		= i2400ms_probe,
+	.remove		= i2400ms_remove,
+	.id_table	= i2400ms_sdio_ids,
+};
+
+
+static
+int __init i2400ms_driver_init(void)
+{
+	return sdio_register_driver(&i2400m_sdio_driver);
+}
+module_init(i2400ms_driver_init);
+
+
+static
+void __exit i2400ms_driver_exit(void)
+{
+	flush_scheduled_work();	/* for the stuff we schedule */
+	sdio_unregister_driver(&i2400m_sdio_driver);
+}
+module_exit(i2400ms_driver_exit);
+
+
+MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
+MODULE_DESCRIPTION("Intel 2400M WiMAX networking for SDIO");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(I2400MS_FW_FILE_NAME);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
new file mode 100644
index 0000000..613a88f
--- /dev/null
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -0,0 +1,817 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Generic (non-bus specific) TX handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ *  - Initial implementation
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Rewritten to use a single FIFO to lower the memory allocation
+ *    pressure and optimize cache hits when copying to the queue, as
+ *    well as splitting out bus-specific code.
+ *
+ *
+ * Implements data transmission to the device; this is done through a
+ * software FIFO, as data/control frames can be coalesced (while the
+ * device is reading the previous tx transaction, others accumulate).
+ *
+ * A FIFO is used because at the end it is resource-cheaper that trying
+ * to implement scatter/gather over USB. As well, most traffic is going
+ * to be download (vs upload).
+ *
+ * The format for sending/receiving data to/from the i2400m is
+ * described in detail in rx.c:PROTOCOL FORMAT. In here we implement
+ * the transmission of that. This is split between a bus-independent
+ * part that just prepares everything and a bus-specific part that
+ * does the actual transmission over the bus to the device (in the
+ * bus-specific driver).
+ *
+ *
+ * The general format of a device-host transaction is MSG-HDR, PLD1,
+ * PLD2...PLDN, PL1, PL2,...PLN, PADDING.
+ *
+ * Because we need the send payload descriptors and then payloads and
+ * because it is kind of expensive to do scatterlists in USB (one URB
+ * per node), it becomes cheaper to append all the data to a FIFO
+ * (copying to a FIFO potentially in cache is cheaper).
+ *
+ * Then the bus-specific code takes the parts of that FIFO that are
+ * written and passes them to the device.
+ *
+ * So the concepts to keep in mind there are:
+ *
+ * We use a FIFO to queue the data in a linear buffer. We first append
+ * a MSG-HDR, space for I2400M_TX_PLD_MAX payload descriptors and then
+ * go appending payloads until we run out of space or of payload
+ * descriptors. Then we append padding to make the whole transaction a
+ * multiple of i2400m->bus_tx_block_size (as defined by the bus layer).
+ *
+ * - A TX message: a combination of a message header, payload
+ *   descriptors and payloads.
+ *
+ *     Open: it is marked as active (i2400m->tx_msg is valid) and we
+ *       can keep adding payloads to it.
+ *
+ *     Closed: we are not appending more payloads to this TX message
+ *       (exahusted space in the queue, too many payloads or
+ *       whichever).  We have appended padding so the whole message
+ *       length is aligned to i2400m->bus_tx_block_size (as set by the
+ *       bus/transport layer).
+ *
+ * - Most of the time we keep a TX message open to which we append
+ *   payloads.
+ *
+ * - If we are going to append and there is no more space (we are at
+ *   the end of the FIFO), we close the message, mark the rest of the
+ *   FIFO space unusable (skip_tail), create a new message at the
+ *   beginning of the FIFO (if there is space) and append the message
+ *   there.
+ *
+ *   This is because we need to give linear TX messages to the bus
+ *   engine. So we don't write a message to the remaining FIFO space
+ *   until the tail and continue at the head of it.
+ *
+ * - We overload one of the fields in the message header to use it as
+ *   'size' of the TX message, so we can iterate over them. It also
+ *   contains a flag that indicates if we have to skip it or not.
+ *   When we send the buffer, we update that to its real on-the-wire
+ *   value.
+ *
+ * - The MSG-HDR PLD1...PLD2 stuff has to be a size multiple of 16.
+ *
+ *   It follows that if MSG-HDR says we have N messages, the whole
+ *   header + descriptors is 16 + 4*N; for those to be a multiple of
+ *   16, it follows that N can be 4, 8, 12, ... (32, 48, 64, 80...
+ *   bytes).
+ *
+ *   So if we have only 1 payload, we have to submit a header that in
+ *   all truth has space for 4.
+ *
+ *   The implication is that we reserve space for 12 (64 bytes); but
+ *   if we fill up only (eg) 2, our header becomes 32 bytes only. So
+ *   the TX engine has to shift those 32 bytes of msg header and 2
+ *   payloads and padding so that right after it the payloads start
+ *   and the TX engine has to know about that.
+ *
+ *   It is cheaper to move the header up than the whole payloads down.
+ *
+ *   We do this in i2400m_tx_close(). See 'i2400m_msg_hdr->offset'.
+ *
+ * - Each payload has to be size-padded to 16 bytes; before appending
+ *   it, we just do it.
+ *
+ * - The whole message has to be padded to i2400m->bus_tx_block_size;
+ *   we do this at close time. Thus, when reserving space for the
+ *   payload, we always make sure there is also free space for this
+ *   padding that sooner or later will happen.
+ *
+ * When we append a message, we tell the bus specific code to kick in
+ * TXs. It will TX (in parallel) until the buffer is exhausted--hence
+ * the lockin we do. The TX code will only send a TX message at the
+ * time (which remember, might contain more than one payload). Of
+ * course, when the bus-specific driver attempts to TX a message that
+ * is still open, it gets closed first.
+ *
+ * Gee, this is messy; well a picture. In the example below we have a
+ * partially full FIFO, with a closed message ready to be delivered
+ * (with a moved message header to make sure it is size-aligned to
+ * 16), TAIL room that was unusable (and thus is marked with a message
+ * header that says 'skip this') and at the head of the buffer, an
+ * imcomplete message with a couple of payloads.
+ *
+ * N   ___________________________________________________
+ *    |                                                   |
+ *    |     TAIL room                                     |
+ *    |                                                   |
+ *    |  msg_hdr to skip (size |= 0x80000)                |
+ *    |---------------------------------------------------|-------
+ *    |                                                   |  /|\
+ *    |                                                   |   |
+ *    |  TX message padding                               |   |
+ *    |                                                   |   |
+ *    |                                                   |   |
+ *    |- - - - - - - - - - - - - - - - - - - - - - - - - -|   |
+ *    |                                                   |   |
+ *    |  payload 1                                        |   |
+ *    |                                                   | N * tx_block_size
+ *    |                                                   |   |
+ *    |- - - - - - - - - - - - - - - - - - - - - - - - - -|   |
+ *    |                                                   |   |
+ *    |  payload 1                                        |   |
+ *    |                                                   |   |
+ *    |                                                   |   |
+ *    |- - - - - - - - - - - - - - - - - - - - - - - - - -|- -|- - - -
+ *    |  padding 3                  /|\                   |   |   /|\
+ *    |  padding 2                   |                    |   |    |
+ *    |  pld 1                32 bytes (2 * 16)           |   |    |
+ *    |  pld 0                       |                    |   |    |
+ *    |  moved msg_hdr              \|/                   |  \|/   |
+ *    |- - - - - - - - - - - - - - - - - - - - - - - - - -|- - -   |
+ *    |                                                   |    _PLD_SIZE
+ *    |  unused                                           |        |
+ *    |                                                   |        |
+ *    |- - - - - - - - - - - - - - - - - - - - - - - - - -|        |
+ *    |  msg_hdr (size X)       [this message is closed]  |       \|/
+ *    |===================================================|========== <=== OUT
+ *    |                                                   |
+ *    |                                                   |
+ *    |                                                   |
+ *    |          Free rooom                               |
+ *    |                                                   |
+ *    |                                                   |
+ *    |                                                   |
+ *    |                                                   |
+ *    |                                                   |
+ *    |                                                   |
+ *    |                                                   |
+ *    |                                                   |
+ *    |                                                   |
+ *    |===================================================|========== <=== IN
+ *    |                                                   |
+ *    |                                                   |
+ *    |                                                   |
+ *    |                                                   |
+ *    |  payload 1                                        |
+ *    |                                                   |
+ *    |                                                   |
+ *    |- - - - - - - - - - - - - - - - - - - - - - - - - -|
+ *    |                                                   |
+ *    |  payload 0                                        |
+ *    |                                                   |
+ *    |                                                   |
+ *    |- - - - - - - - - - - - - - - - - - - - - - - - - -|
+ *    |  pld 11                     /|\                   |
+ *    |  ...                         |                    |
+ *    |  pld 1                64 bytes (2 * 16)           |
+ *    |  pld 0                       |                    |
+ *    |  msg_hdr (size X)           \|/ [message is open] |
+ * 0   ---------------------------------------------------
+ *
+ *
+ * ROADMAP
+ *
+ * i2400m_tx_setup()           Called by i2400m_setup
+ * i2400m_tx_release()         Called by i2400m_release()
+ *
+ *  i2400m_tx()                 Called to send data or control frames
+ *    i2400m_tx_fifo_push()     Allocates append-space in the FIFO
+ *    i2400m_tx_new()           Opens a new message in the FIFO
+ *    i2400m_tx_fits()          Checks if a new payload fits in the message
+ *    i2400m_tx_close()         Closes an open message in the FIFO
+ *    i2400m_tx_skip_tail()     Marks unusable FIFO tail space
+ *    i2400m->bus_tx_kick()
+ *
+ * Now i2400m->bus_tx_kick() is the the bus-specific driver backend
+ * implementation; that would do:
+ *
+ * i2400m->bus_tx_kick()
+ *   i2400m_tx_msg_get()	Gets first message ready to go
+ *   ...sends it...
+ *   i2400m_tx_msg_sent()       Ack the message is sent; repeat from
+ *                              _tx_msg_get() until it returns NULL
+ *                               (FIFO empty).
+ */
+#include <linux/netdevice.h>
+#include "i2400m.h"
+
+
+#define D_SUBMODULE tx
+#include "debug-levels.h"
+
+enum {
+	/**
+	 * TX Buffer size
+	 *
+	 * Doc says maximum transaction is 16KiB. If we had 16KiB en
+	 * route and 16KiB being queued, it boils down to needing
+	 * 32KiB.
+	 */
+	I2400M_TX_BUF_SIZE = 32768,
+	/**
+	 * Message header and payload descriptors have to be 16
+	 * aligned (16 + 4 * N = 16 * M). If we take that average sent
+	 * packets are MTU size (~1400-~1500) it follows that we could
+	 * fit at most 10-11 payloads in one transaction. To meet the
+	 * alignment requirement, that means we need to leave space
+	 * for 12 (64 bytes). To simplify, we leave space for that. If
+	 * at the end there are less, we pad up to the nearest
+	 * multiple of 16.
+	 */
+	I2400M_TX_PLD_MAX = 12,
+	I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr)
+	+ I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld),
+	I2400M_TX_SKIP = 0x80000000,
+};
+
+#define TAIL_FULL ((void *)~(unsigned long)NULL)
+
+/*
+ * Allocate @size bytes in the TX fifo, return a pointer to it
+ *
+ * @i2400m: device descriptor
+ * @size: size of the buffer we need to allocate
+ * @padding: ensure that there is at least this many bytes of free
+ *     contiguous space in the fifo. This is needed because later on
+ *     we might need to add padding.
+ *
+ * Returns:
+ *
+ *     Pointer to the allocated space. NULL if there is no
+ *     space. TAIL_FULL if there is no space at the tail but there is at
+ *     the head (Case B below).
+ *
+ * These are the two basic cases we need to keep an eye for -- it is
+ * much better explained in linux/kernel/kfifo.c, but this code
+ * basically does the same. No rocket science here.
+ *
+ *       Case A               Case B
+ * N  ___________          ___________
+ *   | tail room |        |   data    |
+ *   |           |        |           |
+ *   |<-  IN   ->|        |<-  OUT  ->|
+ *   |           |        |           |
+ *   |   data    |        |   room    |
+ *   |           |        |           |
+ *   |<-  OUT  ->|        |<-  IN   ->|
+ *   |           |        |           |
+ *   | head room |        |   data    |
+ * 0  -----------          -----------
+ *
+ * We allocate only *contiguous* space.
+ *
+ * We can allocate only from 'room'. In Case B, it is simple; in case
+ * A, we only try from the tail room; if it is not enough, we just
+ * fail and return TAIL_FULL and let the caller figure out if we wants to
+ * skip the tail room and try to allocate from the head.
+ *
+ * Note:
+ *
+ *     Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ *
+ *     The indexes keep increasing and we reset them to zero when we
+ *     pop data off the queue
+ */
+static
+void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	size_t room, tail_room, needed_size;
+	void *ptr;
+
+	needed_size = size + padding;
+	room = I2400M_TX_BUF_SIZE - (i2400m->tx_in - i2400m->tx_out);
+	if (room < needed_size)	{ /* this takes care of Case B */
+		d_printf(2, dev, "fifo push %zu/%zu: no space\n",
+			 size, padding);
+		return NULL;
+	}
+	/* Is there space at the tail? */
+	tail_room = I2400M_TX_BUF_SIZE - i2400m->tx_in % I2400M_TX_BUF_SIZE;
+	if (tail_room < needed_size) {
+		if (i2400m->tx_out % I2400M_TX_BUF_SIZE
+		    < i2400m->tx_in % I2400M_TX_BUF_SIZE) {
+			d_printf(2, dev, "fifo push %zu/%zu: tail full\n",
+				 size, padding);
+			return TAIL_FULL;	/* There might be head space */
+		} else {
+			d_printf(2, dev, "fifo push %zu/%zu: no head space\n",
+				 size, padding);
+			return NULL;	/* There is no space */
+		}
+	}
+	ptr = i2400m->tx_buf + i2400m->tx_in % I2400M_TX_BUF_SIZE;
+	d_printf(2, dev, "fifo push %zu/%zu: at @%zu\n", size, padding,
+		 i2400m->tx_in % I2400M_TX_BUF_SIZE);
+	i2400m->tx_in += size;
+	return ptr;
+}
+
+
+/*
+ * Mark the tail of the FIFO buffer as 'to-skip'
+ *
+ * We should never hit the BUG_ON() because all the sizes we push to
+ * the FIFO are padded to be a multiple of 16 -- the size of *msg
+ * (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the
+ * header).
+ *
+ * Note:
+ *
+ *     Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ */
+static
+void i2400m_tx_skip_tail(struct i2400m *i2400m)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
+	size_t tail_room = I2400M_TX_BUF_SIZE - tx_in;
+	struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in;
+	BUG_ON(tail_room < sizeof(*msg));
+	msg->size = tail_room | I2400M_TX_SKIP;
+	d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n",
+		 tail_room, tx_in);
+	i2400m->tx_in += tail_room;
+}
+
+
+/*
+ * Check if a skb will fit in the TX queue's current active TX
+ * message (if there are still descriptors left unused).
+ *
+ * Returns:
+ *     0 if the message won't fit, 1 if it will.
+ *
+ * Note:
+ *
+ *     Assumes a TX message is active (i2400m->tx_msg).
+ *
+ *     Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ */
+static
+unsigned i2400m_tx_fits(struct i2400m *i2400m)
+{
+	struct i2400m_msg_hdr *msg_hdr = i2400m->tx_msg;
+	return le16_to_cpu(msg_hdr->num_pls) < I2400M_TX_PLD_MAX;
+
+}
+
+
+/*
+ * Start a new TX message header in the queue.
+ *
+ * Reserve memory from the base FIFO engine and then just initialize
+ * the message header.
+ *
+ * We allocate the biggest TX message header we might need (one that'd
+ * fit I2400M_TX_PLD_MAX payloads) -- when it is closed it will be
+ * 'ironed it out' and the unneeded parts removed.
+ *
+ * NOTE:
+ *
+ *     Assumes that the previous message is CLOSED (eg: either
+ *     there was none or 'i2400m_tx_close()' was called on it).
+ *
+ *     Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ */
+static
+void i2400m_tx_new(struct i2400m *i2400m)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400m_msg_hdr *tx_msg;
+	BUG_ON(i2400m->tx_msg != NULL);
+try_head:
+	tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 0);
+	if (tx_msg == NULL)
+		goto out;
+	else if (tx_msg == TAIL_FULL) {
+		i2400m_tx_skip_tail(i2400m);
+		d_printf(2, dev, "new TX message: tail full, trying head\n");
+		goto try_head;
+	}
+	memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
+	tx_msg->size = I2400M_TX_PLD_SIZE;
+out:
+	i2400m->tx_msg = tx_msg;
+	d_printf(2, dev, "new TX message: %p @%zu\n",
+		 tx_msg, (void *) tx_msg - i2400m->tx_buf);
+}
+
+
+/*
+ * Finalize the current TX message header
+ *
+ * Sets the message header to be at the proper location depending on
+ * how many descriptors we have (check documentation at the file's
+ * header for more info on that).
+ *
+ * Appends padding bytes to make sure the whole TX message (counting
+ * from the 'relocated' message header) is aligned to
+ * tx_block_size. We assume the _append() code has left enough space
+ * in the FIFO for that. If there are no payloads, just pass, as it
+ * won't be transferred.
+ *
+ * The amount of padding bytes depends on how many payloads are in the
+ * TX message, as the "msg header and payload descriptors" will be
+ * shifted up in the buffer.
+ */
+static
+void i2400m_tx_close(struct i2400m *i2400m)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg;
+	struct i2400m_msg_hdr *tx_msg_moved;
+	size_t aligned_size, padding, hdr_size;
+	void *pad_buf;
+
+	if (tx_msg->size & I2400M_TX_SKIP)	/* a skipper? nothing to do */
+		goto out;
+
+	/* Relocate the message header
+	 *
+	 * Find the current header size, align it to 16 and if we need
+	 * to move it so the tail is next to the payloads, move it and
+	 * set the offset.
+	 *
+	 * If it moved, this header is good only for transmission; the
+	 * original one (it is kept if we moved) is still used to
+	 * figure out where the next TX message starts (and where the
+	 * offset to the moved header is).
+	 */
+	hdr_size = sizeof(*tx_msg)
+		+ le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]);
+	hdr_size = ALIGN(hdr_size, I2400M_PL_PAD);
+	tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size;
+	tx_msg_moved = (void *) tx_msg + tx_msg->offset;
+	memmove(tx_msg_moved, tx_msg, hdr_size);
+	tx_msg_moved->size -= tx_msg->offset;
+	/*
+	 * Now figure out how much we have to add to the (moved!)
+	 * message so the size is a multiple of i2400m->bus_tx_block_size.
+	 */
+	aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size);
+	padding = aligned_size - tx_msg_moved->size;
+	if (padding > 0) {
+		pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0);
+		if (unlikely(WARN_ON(pad_buf == NULL
+				     || pad_buf == TAIL_FULL))) {
+			/* This should not happen -- append should verify
+			 * there is always space left at least to append
+			 * tx_block_size */
+			dev_err(dev,
+				"SW BUG! Possible data leakage from memory the "
+				"device should not read for padding - "
+				"size %lu aligned_size %zu tx_buf %p in "
+				"%zu out %zu\n",
+				(unsigned long) tx_msg_moved->size,
+				aligned_size, i2400m->tx_buf, i2400m->tx_in,
+				i2400m->tx_out);
+		} else
+			memset(pad_buf, 0xad, padding);
+	}
+	tx_msg_moved->padding = cpu_to_le16(padding);
+	tx_msg_moved->size += padding;
+	if (tx_msg != tx_msg_moved)
+		tx_msg->size += padding;
+out:
+	i2400m->tx_msg = NULL;
+}
+
+
+/**
+ * i2400m_tx - send the data in a buffer to the device
+ *
+ * @buf: pointer to the buffer to transmit
+ *
+ * @buf_len: buffer size
+ *
+ * @pl_type: type of the payload we are sending.
+ *
+ * Returns:
+ *     0 if ok, < 0 errno code on error (-ENOSPC, if there is no more
+ *     room for the message in the queue).
+ *
+ * Appends the buffer to the TX FIFO and notifies the bus-specific
+ * part of the driver that there is new data ready to transmit.
+ * Once this function returns, the buffer has been copied, so it can
+ * be reused.
+ *
+ * The steps followed to append are explained in detail in the file
+ * header.
+ *
+ * Whenever we write to a message, we increase msg->size, so it
+ * reflects exactly how big the message is. This is needed so that if
+ * we concatenate two messages before they can be sent, the code that
+ * sends the messages can find the boundaries (and it will replace the
+ * size with the real barker before sending).
+ *
+ * Note:
+ *
+ *     Cold and warm reset payloads need to be sent as a single
+ *     payload, so we handle that.
+ */
+int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
+	      enum i2400m_pt pl_type)
+{
+	int result = -ENOSPC;
+	struct device *dev = i2400m_dev(i2400m);
+	unsigned long flags;
+	size_t padded_len;
+	void *ptr;
+	unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
+		|| pl_type == I2400M_PT_RESET_COLD;
+
+	d_fnstart(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u)\n",
+		  i2400m, buf, buf_len, pl_type);
+	padded_len = ALIGN(buf_len, I2400M_PL_PAD);
+	d_printf(5, dev, "padded_len %zd buf_len %zd\n", padded_len, buf_len);
+	/* If there is no current TX message, create one; if the
+	 * current one is out of payload slots or we have a singleton,
+	 * close it and start a new one */
+	spin_lock_irqsave(&i2400m->tx_lock, flags);
+try_new:
+	if (unlikely(i2400m->tx_msg == NULL))
+		i2400m_tx_new(i2400m);
+	else if (unlikely(!i2400m_tx_fits(i2400m)
+			  || (is_singleton && i2400m->tx_msg->num_pls != 0))) {
+		d_printf(2, dev, "closing TX message (fits %u singleton "
+			 "%u num_pls %u)\n", i2400m_tx_fits(i2400m),
+			 is_singleton, i2400m->tx_msg->num_pls);
+		i2400m_tx_close(i2400m);
+		i2400m_tx_new(i2400m);
+	}
+	if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) {
+		d_printf(2, dev, "TX: message too big, going new\n");
+		i2400m_tx_close(i2400m);
+		i2400m_tx_new(i2400m);
+	}
+	if (i2400m->tx_msg == NULL)
+		goto error_tx_new;
+	/* So we have a current message header; now append space for
+	 * the message -- if there is not enough, try the head */
+	ptr = i2400m_tx_fifo_push(i2400m, padded_len,
+				  i2400m->bus_tx_block_size);
+	if (ptr == TAIL_FULL) {	/* Tail is full, try head */
+		d_printf(2, dev, "pl append: tail full\n");
+		i2400m_tx_close(i2400m);
+		i2400m_tx_skip_tail(i2400m);
+		goto try_new;
+	} else if (ptr == NULL) {	/* All full */
+		result = -ENOSPC;
+		d_printf(2, dev, "pl append: all full\n");
+	} else {			/* Got space, copy it, set padding */
+		struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg;
+		unsigned num_pls = le16_to_cpu(tx_msg->num_pls);
+		memcpy(ptr, buf, buf_len);
+		memset(ptr + buf_len, 0xad, padded_len - buf_len);
+		i2400m_pld_set(&tx_msg->pld[num_pls], buf_len, pl_type);
+		d_printf(3, dev, "pld 0x%08x (type 0x%1x len 0x%04zx\n",
+			 le32_to_cpu(tx_msg->pld[num_pls].val),
+			 pl_type, buf_len);
+		tx_msg->num_pls = le16_to_cpu(num_pls+1);
+		tx_msg->size += padded_len;
+		d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n",
+			padded_len, tx_msg->size, num_pls+1);
+		d_printf(2, dev,
+			 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
+			 (void *)tx_msg - i2400m->tx_buf, (size_t)tx_msg->size,
+			 num_pls+1, ptr - i2400m->tx_buf, buf_len, padded_len);
+		result = 0;
+		if (is_singleton)
+			i2400m_tx_close(i2400m);
+	}
+error_tx_new:
+	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+	i2400m->bus_tx_kick(i2400m);	/* always kick, might free up space */
+	d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n",
+		i2400m, buf, buf_len, pl_type, result);
+	return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_tx);
+
+
+/**
+ * i2400m_tx_msg_get - Get the first TX message in the FIFO to start sending it
+ *
+ * @i2400m: device descriptors
+ * @bus_size: where to place the size of the TX message
+ *
+ * Called by the bus-specific driver to get the first TX message at
+ * the FIF that is ready for transmission.
+ *
+ * It sets the state in @i2400m to indicate the bus-specific driver is
+ * transfering that message (i2400m->tx_msg_size).
+ *
+ * Once the transfer is completed, call i2400m_tx_msg_sent().
+ *
+ * Notes:
+ *
+ *     The size of the TX message to be transmitted might be smaller than
+ *     that of the TX message in the FIFO (in case the header was
+ *     shorter). Hence, we copy it in @bus_size, for the bus layer to
+ *     use. We keep the message's size in i2400m->tx_msg_size so that
+ *     when the bus later is done transferring we know how much to
+ *     advance the fifo.
+ *
+ *     We collect statistics here as all the data is available and we
+ *     assume it is going to work [see i2400m_tx_msg_sent()].
+ */
+struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m,
+					 size_t *bus_size)
+{
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400m_msg_hdr *tx_msg, *tx_msg_moved;
+	unsigned long flags, pls;
+
+	d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size);
+	spin_lock_irqsave(&i2400m->tx_lock, flags);
+skip:
+	tx_msg_moved = NULL;
+	if (i2400m->tx_in == i2400m->tx_out) {	/* Empty FIFO? */
+		i2400m->tx_in = 0;
+		i2400m->tx_out = 0;
+		d_printf(2, dev, "TX: FIFO empty: resetting\n");
+		goto out_unlock;
+	}
+	tx_msg = i2400m->tx_buf + i2400m->tx_out % I2400M_TX_BUF_SIZE;
+	if (tx_msg->size & I2400M_TX_SKIP) {	/* skip? */
+		d_printf(2, dev, "TX: skip: msg @%zu (%zu b)\n",
+			 i2400m->tx_out % I2400M_TX_BUF_SIZE,
+			 (size_t) tx_msg->size & ~I2400M_TX_SKIP);
+		i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP;
+		goto skip;
+	}
+
+	if (tx_msg->num_pls == 0) {		/* No payloads? */
+		if (tx_msg == i2400m->tx_msg) {	/* open, we are done */
+			d_printf(2, dev,
+				 "TX: FIFO empty: open msg w/o payloads @%zu\n",
+				 (void *) tx_msg - i2400m->tx_buf);
+			tx_msg = NULL;
+			goto out_unlock;
+		} else {			/* closed, skip it */
+			d_printf(2, dev,
+				 "TX: skip msg w/o payloads @%zu (%zu b)\n",
+				 (void *) tx_msg - i2400m->tx_buf,
+				 (size_t) tx_msg->size);
+			i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP;
+			goto skip;
+		}
+	}
+	if (tx_msg == i2400m->tx_msg)		/* open msg? */
+		i2400m_tx_close(i2400m);
+
+	/* Now we have a valid TX message (with payloads) to TX */
+	tx_msg_moved = (void *) tx_msg + tx_msg->offset;
+	i2400m->tx_msg_size = tx_msg->size;
+	*bus_size = tx_msg_moved->size;
+	d_printf(2, dev, "TX: pid %d msg hdr at @%zu offset +@%zu "
+		 "size %zu bus_size %zu\n",
+		 current->pid, (void *) tx_msg - i2400m->tx_buf,
+		 (size_t) tx_msg->offset, (size_t) tx_msg->size,
+		 (size_t) tx_msg_moved->size);
+	tx_msg_moved->barker = le32_to_cpu(I2400M_H2D_PREVIEW_BARKER);
+	tx_msg_moved->sequence = le32_to_cpu(i2400m->tx_sequence++);
+
+	pls = le32_to_cpu(tx_msg_moved->num_pls);
+	i2400m->tx_pl_num += pls;		/* Update stats */
+	if (pls > i2400m->tx_pl_max)
+		i2400m->tx_pl_max = pls;
+	if (pls < i2400m->tx_pl_min)
+		i2400m->tx_pl_min = pls;
+	i2400m->tx_num++;
+	i2400m->tx_size_acc += *bus_size;
+	if (*bus_size < i2400m->tx_size_min)
+		i2400m->tx_size_min = *bus_size;
+	if (*bus_size > i2400m->tx_size_max)
+		i2400m->tx_size_max = *bus_size;
+out_unlock:
+	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+	d_fnstart(3, dev, "(i2400m %p bus_size %p [%zu]) = %p\n",
+		  i2400m, bus_size, *bus_size, tx_msg_moved);
+	return tx_msg_moved;
+}
+EXPORT_SYMBOL_GPL(i2400m_tx_msg_get);
+
+
+/**
+ * i2400m_tx_msg_sent - indicate the transmission of a TX message
+ *
+ * @i2400m: device descriptor
+ *
+ * Called by the bus-specific driver when a message has been sent;
+ * this pops it from the FIFO; and as there is space, start the queue
+ * in case it was stopped.
+ *
+ * Should be called even if the message send failed and we are
+ * dropping this TX message.
+ */
+void i2400m_tx_msg_sent(struct i2400m *i2400m)
+{
+	unsigned n;
+	unsigned long flags;
+	struct device *dev = i2400m_dev(i2400m);
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	spin_lock_irqsave(&i2400m->tx_lock, flags);
+	i2400m->tx_out += i2400m->tx_msg_size;
+	d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size);
+	i2400m->tx_msg_size = 0;
+	BUG_ON(i2400m->tx_out > i2400m->tx_in);
+	/* level them FIFO markers off */
+	n = i2400m->tx_out / I2400M_TX_BUF_SIZE;
+	i2400m->tx_out %= I2400M_TX_BUF_SIZE;
+	i2400m->tx_in -= n * I2400M_TX_BUF_SIZE;
+	netif_start_queue(i2400m->wimax_dev.net_dev);
+	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent);
+
+
+/**
+ * i2400m_tx_setup - Initialize the TX queue and infrastructure
+ *
+ * Make sure we reset the TX sequence to zero, as when this function
+ * is called, the firmware has been just restarted.
+ */
+int i2400m_tx_setup(struct i2400m *i2400m)
+{
+	int result;
+
+	/* Do this here only once -- can't do on
+	 * i2400m_hard_start_xmit() as we'll cause race conditions if
+	 * the WS was scheduled on another CPU */
+	INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work);
+
+	i2400m->tx_sequence = 0;
+	i2400m->tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_KERNEL);
+	if (i2400m->tx_buf == NULL)
+		result = -ENOMEM;
+	else
+		result = 0;
+	/* Huh? the bus layer has to define this... */
+	BUG_ON(i2400m->bus_tx_block_size == 0);
+	return result;
+
+}
+
+
+/**
+ * i2400m_tx_release - Tear down the TX queue and infrastructure
+ */
+void i2400m_tx_release(struct i2400m *i2400m)
+{
+	kfree(i2400m->tx_buf);
+}
diff --git a/drivers/net/wimax/i2400m/usb-debug-levels.h b/drivers/net/wimax/i2400m/usb-debug-levels.h
new file mode 100644
index 0000000..e4358bd
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb-debug-levels.h
@@ -0,0 +1,42 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Debug levels control file for the i2400m-usb module
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef __debug_levels__h__
+#define __debug_levels__h__
+
+/* Maximum compile and run time debug level for all submodules */
+#define D_MODULENAME i2400m_usb
+#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
+
+#include <linux/wimax/debug.h>
+
+/* List of all the enabled modules */
+enum d_module {
+	D_SUBMODULE_DECLARE(usb),
+	D_SUBMODULE_DECLARE(fw),
+	D_SUBMODULE_DECLARE(notif),
+	D_SUBMODULE_DECLARE(rx),
+	D_SUBMODULE_DECLARE(tx),
+};
+
+
+#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
new file mode 100644
index 0000000..5ad287c
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -0,0 +1,340 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Firmware uploader's USB specifics
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Initial implementation
+ *
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - bus generic/specific split
+ *
+ * THE PROCEDURE
+ *
+ * See fw.c for the generic description of this procedure.
+ *
+ * This file implements only the USB specifics. It boils down to how
+ * to send a command and waiting for an acknowledgement from the
+ * device.
+ *
+ * This code (and process) is single threaded. It assumes it is the
+ * only thread poking around (guaranteed by fw.c).
+ *
+ * COMMAND EXECUTION
+ *
+ * A write URB is posted with the buffer to the bulk output endpoint.
+ *
+ * ACK RECEPTION
+ *
+ * We just post a URB to the notification endpoint and wait for
+ * data. We repeat until we get all the data we expect (as indicated
+ * by the call from the bus generic code).
+ *
+ * The data is not read from the bulk in endpoint for boot mode.
+ *
+ * ROADMAP
+ *
+ * i2400mu_bus_bm_cmd_send
+ *   i2400m_bm_cmd_prepare...
+ *   i2400mu_tx_bulk_out
+ *
+ * i2400mu_bus_bm_wait_for_ack
+ *   i2400m_notif_submit
+ */
+#include <linux/usb.h>
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE fw
+#include "usb-debug-levels.h"
+
+
+/*
+ * Synchronous write to the device
+ *
+ * Takes care of updating EDC counts and thus, handle device errors.
+ */
+static
+ssize_t i2400mu_tx_bulk_out(struct i2400mu *i2400mu, void *buf, size_t buf_size)
+{
+	int result;
+	struct device *dev = &i2400mu->usb_iface->dev;
+	int len;
+	struct usb_endpoint_descriptor *epd;
+	int pipe, do_autopm = 1;
+
+	result = usb_autopm_get_interface(i2400mu->usb_iface);
+	if (result < 0) {
+		dev_err(dev, "BM-CMD: can't get autopm: %d\n", result);
+		do_autopm = 0;
+	}
+	epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT);
+	pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+retry:
+	result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, HZ);
+	switch (result) {
+	case 0:
+		if (len != buf_size) {
+			dev_err(dev, "BM-CMD: short write (%u B vs %zu "
+				"expected)\n", len, buf_size);
+			result = -EIO;
+			break;
+		}
+		result = len;
+		break;
+	case -EINVAL:			/* while removing driver */
+	case -ENODEV:			/* dev disconnect ... */
+	case -ENOENT:			/* just ignore it */
+	case -ESHUTDOWN:		/* and exit */
+	case -ECONNRESET:
+		result = -ESHUTDOWN;
+		break;
+	case -ETIMEDOUT:			/* bah... */
+		break;
+	default:				/* any other? */
+		if (edc_inc(&i2400mu->urb_edc,
+			    EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+				dev_err(dev, "BM-CMD: maximum errors in "
+					"URB exceeded; resetting device\n");
+				usb_queue_reset_device(i2400mu->usb_iface);
+				result = -ENODEV;
+				break;
+		}
+		dev_err(dev, "BM-CMD: URB error %d, retrying\n",
+			result);
+		goto retry;
+	}
+	result = len;
+	if (do_autopm)
+		usb_autopm_put_interface(i2400mu->usb_iface);
+	return result;
+}
+
+
+/*
+ * Send a boot-mode command over the bulk-out pipe
+ *
+ * Command can be a raw command, which requires no preparation (and
+ * which might not even be following the command format). Checks that
+ * the right amount of data was transfered.
+ *
+ * To satisfy USB requirements (no onstack, vmalloc or in data segment
+ * buffers), we copy the command to i2400m->bm_cmd_buf and send it from
+ * there.
+ *
+ * @flags: pass thru from i2400m_bm_cmd()
+ * @return: cmd_size if ok, < 0 errno code on error.
+ */
+ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *i2400m,
+				const struct i2400m_bootrom_header *_cmd,
+				size_t cmd_size, int flags)
+{
+	ssize_t result;
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+	int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd);
+	struct i2400m_bootrom_header *cmd;
+	size_t cmd_size_a = ALIGN(cmd_size, 16);	/* USB restriction */
+
+	d_fnstart(8, dev, "(i2400m %p cmd %p size %zu)\n",
+		  i2400m, _cmd, cmd_size);
+	result = -E2BIG;
+	if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
+		goto error_too_big;
+	memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size);
+	cmd = i2400m->bm_cmd_buf;
+	if (cmd_size_a > cmd_size)			/* Zero pad space */
+		memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
+	if ((flags & I2400M_BM_CMD_RAW) == 0) {
+		if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0))
+			dev_warn(dev, "SW BUG: response_required == 0\n");
+		i2400m_bm_cmd_prepare(cmd);
+	}
+	result = i2400mu_tx_bulk_out(i2400mu, i2400m->bm_cmd_buf, cmd_size);
+	if (result < 0) {
+		dev_err(dev, "boot-mode cmd %d: cannot send: %zd\n",
+			opcode, result);
+		goto error_cmd_send;
+	}
+	if (result != cmd_size) {		/* all was transferred? */
+		dev_err(dev, "boot-mode cmd %d: incomplete transfer "
+			"(%zu vs %zu submitted)\n",  opcode, result, cmd_size);
+		result = -EIO;
+		goto error_cmd_size;
+	}
+error_cmd_size:
+error_cmd_send:
+error_too_big:
+	d_fnend(8, dev, "(i2400m %p cmd %p size %zu) = %zd\n",
+		i2400m, _cmd, cmd_size, result);
+	return result;
+}
+
+
+static
+void __i2400mu_bm_notif_cb(struct urb *urb)
+{
+	complete(urb->context);
+}
+
+
+/*
+ * submit a read to the notification endpoint
+ *
+ * @i2400m: device descriptor
+ * @urb: urb to use
+ * @completion: completion varible to complete when done
+ *
+ * Data is always read to i2400m->bm_ack_buf
+ */
+static
+int i2400mu_notif_submit(struct i2400mu *i2400mu, struct urb *urb,
+			 struct completion *completion)
+{
+	struct i2400m *i2400m = &i2400mu->i2400m;
+	struct usb_endpoint_descriptor *epd;
+	int pipe;
+
+	epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION);
+	pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+	usb_fill_int_urb(urb, i2400mu->usb_dev, pipe,
+			 i2400m->bm_ack_buf, I2400M_BM_ACK_BUF_SIZE,
+			 __i2400mu_bm_notif_cb, completion,
+			 epd->bInterval);
+	return usb_submit_urb(urb, GFP_KERNEL);
+}
+
+
+/*
+ * Read an ack from  the notification endpoint
+ *
+ * @i2400m:
+ * @_ack: pointer to where to store the read data
+ * @ack_size: how many bytes we should read
+ *
+ * Returns: < 0 errno code on error; otherwise, amount of received bytes.
+ *
+ * Submits a notification read, appends the read data to the given ack
+ * buffer and then repeats (until @ack_size bytes have been
+ * received).
+ */
+ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
+				    struct i2400m_bootrom_header *_ack,
+				    size_t ack_size)
+{
+	ssize_t result = -ENOMEM;
+	struct device *dev = i2400m_dev(i2400m);
+	struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+	struct urb notif_urb;
+	void *ack = _ack;
+	size_t offset, len;
+	long val;
+	int do_autopm = 1;
+	DECLARE_COMPLETION_ONSTACK(notif_completion);
+
+	d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
+		  i2400m, ack, ack_size);
+	BUG_ON(_ack == i2400m->bm_ack_buf);
+	result = usb_autopm_get_interface(i2400mu->usb_iface);
+	if (result < 0) {
+		dev_err(dev, "BM-ACK: can't get autopm: %d\n", (int) result);
+		do_autopm = 0;
+	}
+	usb_init_urb(&notif_urb);	/* ready notifications */
+	usb_get_urb(&notif_urb);
+	offset = 0;
+	while (offset < ack_size) {
+		init_completion(&notif_completion);
+		result = i2400mu_notif_submit(i2400mu, &notif_urb,
+					      &notif_completion);
+		if (result < 0)
+			goto error_notif_urb_submit;
+		val = wait_for_completion_interruptible_timeout(
+			&notif_completion, HZ);
+		if (val == 0) {
+			result = -ETIMEDOUT;
+			usb_kill_urb(&notif_urb);	/* Timedout */
+			goto error_notif_wait;
+		}
+		if (val == -ERESTARTSYS) {
+			result = -EINTR;		/* Interrupted */
+			usb_kill_urb(&notif_urb);
+			goto error_notif_wait;
+		}
+		result = notif_urb.status;		/* How was the ack? */
+		switch (result) {
+		case 0:
+			break;
+		case -EINVAL:			/* while removing driver */
+		case -ENODEV:			/* dev disconnect ... */
+		case -ENOENT:			/* just ignore it */
+		case -ESHUTDOWN:		/* and exit */
+		case -ECONNRESET:
+			result = -ESHUTDOWN;
+			goto error_dev_gone;
+		default:				/* any other? */
+			usb_kill_urb(&notif_urb);	/* Timedout */
+			if (edc_inc(&i2400mu->urb_edc,
+				    EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
+				goto error_exceeded;
+			dev_err(dev, "BM-ACK: URB error %d, "
+				"retrying\n", notif_urb.status);
+			continue;	/* retry */
+		}
+		if (notif_urb.actual_length == 0) {
+			d_printf(6, dev, "ZLP received, retrying\n");
+			continue;
+		}
+		/* Got data, append it to the buffer */
+		len = min(ack_size - offset, (size_t) notif_urb.actual_length);
+		memcpy(ack + offset, i2400m->bm_ack_buf, len);
+		offset += len;
+	}
+	result = offset;
+error_notif_urb_submit:
+error_notif_wait:
+error_dev_gone:
+out:
+	if (do_autopm)
+		usb_autopm_put_interface(i2400mu->usb_iface);
+	d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %zd\n",
+		i2400m, ack, ack_size, result);
+	return result;
+
+error_exceeded:
+	dev_err(dev, "bm: maximum errors in notification URB exceeded; "
+		"resetting device\n");
+	usb_queue_reset_device(i2400mu->usb_iface);
+	goto out;
+}
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
new file mode 100644
index 0000000..9702c22b
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -0,0 +1,269 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m over USB
+ * Notification handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Initial implementation
+ *
+ *
+ * The notification endpoint is active when the device is not in boot
+ * mode; in here we just read and get notifications; based on those,
+ * we act to either reinitialize the device after a reboot or to
+ * submit a RX request.
+ *
+ * ROADMAP
+ *
+ * i2400mu_usb_notification_setup()
+ *
+ * i2400mu_usb_notification_release()
+ *
+ * i2400mu_usb_notification_cb()	Called when a URB is ready
+ *   i2400mu_notif_grok()
+ *     i2400m_dev_reset_handle()
+ *     i2400mu_rx_kick()
+ */
+#include <linux/usb.h>
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE notif
+#include "usb-debug-levels.h"
+
+
+static const
+__le32 i2400m_ZERO_BARKER[4] = { 0, 0, 0, 0 };
+
+
+/*
+ * Process a received notification
+ *
+ * In normal operation mode, we can only receive two types of payloads
+ * on the notification endpoint:
+ *
+ *   - a reboot barker, we do a bootstrap (the device has reseted).
+ *
+ *   - a block of zeroes: there is pending data in the IN endpoint
+ */
+static
+int i2400mu_notification_grok(struct i2400mu *i2400mu, const void *buf,
+				 size_t buf_len)
+{
+	int ret;
+	struct device *dev = &i2400mu->usb_iface->dev;
+	struct i2400m *i2400m = &i2400mu->i2400m;
+
+	d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n",
+		  i2400mu, buf, buf_len);
+	ret = -EIO;
+	if (buf_len < sizeof(i2400m_NBOOT_BARKER))
+		/* Not a bug, just ignore */
+		goto error_bad_size;
+	if (!memcmp(i2400m_NBOOT_BARKER, buf, sizeof(i2400m_NBOOT_BARKER))
+	    || !memcmp(i2400m_SBOOT_BARKER, buf, sizeof(i2400m_SBOOT_BARKER)))
+		ret = i2400m_dev_reset_handle(i2400m);
+	else if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
+		i2400mu_rx_kick(i2400mu);
+		ret = 0;
+	} else {	/* Unknown or unexpected data in the notif message */
+		char prefix[64];
+		ret = -EIO;
+		dev_err(dev, "HW BUG? Unknown/unexpected data in notification "
+			"message (%zu bytes)\n", buf_len);
+		snprintf(prefix, sizeof(prefix), "%s %s: ",
+			 dev_driver_string(dev) , dev->bus_id);
+		if (buf_len > 64) {
+			print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+				       8, 4, buf, 64, 0);
+			printk(KERN_ERR "%s... (only first 64 bytes "
+			       "dumped)\n", prefix);
+		} else
+			print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+				       8, 4, buf, buf_len, 0);
+	}
+error_bad_size:
+	d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n",
+		i2400mu, buf, buf_len, ret);
+	return ret;
+}
+
+
+/*
+ * URB callback for the notification endpoint
+ *
+ * @urb: the urb received from the notification endpoint
+ *
+ * This function will just process the USB side of the transaction,
+ * checking everything is fine, pass the processing to
+ * i2400m_notification_grok() and resubmit the URB.
+ */
+static
+void i2400mu_notification_cb(struct urb *urb)
+{
+	int ret;
+	struct i2400mu *i2400mu = urb->context;
+	struct device *dev = &i2400mu->usb_iface->dev;
+
+	d_fnstart(4, dev, "(urb %p status %d actual_length %d)\n",
+		  urb, urb->status, urb->actual_length);
+	ret = urb->status;
+	switch (ret) {
+	case 0:
+		ret = i2400mu_notification_grok(i2400mu, urb->transfer_buffer,
+						urb->actual_length);
+		if (ret == -EIO && edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS,
+					   EDC_ERROR_TIMEFRAME))
+			goto error_exceeded;
+		if (ret == -ENOMEM)	/* uff...power cycle? shutdown? */
+			goto error_exceeded;
+		break;
+	case -EINVAL:			/* while removing driver */
+	case -ENODEV:			/* dev disconnect ... */
+	case -ENOENT:			/* ditto */
+	case -ESHUTDOWN:		/* URB killed */
+	case -ECONNRESET:		/* disconnection */
+		goto out;		/* Notify around */
+	default:			/* Some error? */
+		if (edc_inc(&i2400mu->urb_edc,
+			    EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
+			goto error_exceeded;
+		dev_err(dev, "notification: URB error %d, retrying\n",
+			urb->status);
+	}
+	usb_mark_last_busy(i2400mu->usb_dev);
+	ret = usb_submit_urb(i2400mu->notif_urb, GFP_ATOMIC);
+	switch (ret) {
+	case 0:
+	case -EINVAL:			/* while removing driver */
+	case -ENODEV:			/* dev disconnect ... */
+	case -ENOENT:			/* ditto */
+	case -ESHUTDOWN:		/* URB killed */
+	case -ECONNRESET:		/* disconnection */
+		break;			/* just ignore */
+	default:			/* Some error? */
+		dev_err(dev, "notification: cannot submit URB: %d\n", ret);
+		goto error_submit;
+	}
+	d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
+		urb, urb->status, urb->actual_length);
+	return;
+
+error_exceeded:
+	dev_err(dev, "maximum errors in notification URB exceeded; "
+		"resetting device\n");
+error_submit:
+	usb_queue_reset_device(i2400mu->usb_iface);
+out:
+	d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
+		urb, urb->status, urb->actual_length);
+	return;
+}
+
+
+/*
+ * setup the notification endpoint
+ *
+ * @i2400m: device descriptor
+ *
+ * This procedure prepares the notification urb and handler for receiving
+ * unsolicited barkers from the device.
+ */
+int i2400mu_notification_setup(struct i2400mu *i2400mu)
+{
+	struct device *dev = &i2400mu->usb_iface->dev;
+	int usb_pipe, ret = 0;
+	struct usb_endpoint_descriptor *epd;
+	char *buf;
+
+	d_fnstart(4, dev, "(i2400m %p)\n", i2400mu);
+	buf = kmalloc(I2400MU_MAX_NOTIFICATION_LEN, GFP_KERNEL | GFP_DMA);
+	if (buf == NULL) {
+		dev_err(dev, "notification: buffer allocation failed\n");
+		ret = -ENOMEM;
+		goto error_buf_alloc;
+	}
+
+	i2400mu->notif_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!i2400mu->notif_urb) {
+		ret = -ENOMEM;
+		dev_err(dev, "notification: cannot allocate URB\n");
+		goto error_alloc_urb;
+	}
+	epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION);
+	usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+	usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe,
+			 buf, I2400MU_MAX_NOTIFICATION_LEN,
+			 i2400mu_notification_cb, i2400mu, epd->bInterval);
+	ret = usb_submit_urb(i2400mu->notif_urb, GFP_KERNEL);
+	if (ret != 0) {
+		dev_err(dev, "notification: cannot submit URB: %d\n", ret);
+		goto error_submit;
+	}
+	d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret);
+	return ret;
+
+error_submit:
+	usb_free_urb(i2400mu->notif_urb);
+error_alloc_urb:
+	kfree(buf);
+error_buf_alloc:
+	d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret);
+	return ret;
+}
+
+
+/*
+ * Tear down of the notification mechanism
+ *
+ * @i2400m: device descriptor
+ *
+ * Kill the interrupt endpoint urb, free any allocated resources.
+ *
+ * We need to check if we have done it before as for example,
+ * _suspend() call this; if after a suspend() we get a _disconnect()
+ * (as the case is when hibernating), nothing bad happens.
+ */
+void i2400mu_notification_release(struct i2400mu *i2400mu)
+{
+	struct device *dev = &i2400mu->usb_iface->dev;
+
+	d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+	if (i2400mu->notif_urb != NULL) {
+		usb_kill_urb(i2400mu->notif_urb);
+		kfree(i2400mu->notif_urb->transfer_buffer);
+		usb_free_urb(i2400mu->notif_urb);
+		i2400mu->notif_urb = NULL;
+	}
+	d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
+}
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
new file mode 100644
index 0000000..074cc1f
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -0,0 +1,417 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * USB RX handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ *  - Initial implementation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Use skb_clone(), break up processing in chunks
+ *  - Split transport/device specific
+ *  - Make buffer size dynamic to exert less memory pressure
+ *
+ *
+ * This handles the RX path on USB.
+ *
+ * When a notification is received that says 'there is RX data ready',
+ * we call i2400mu_rx_kick(); that wakes up the RX kthread, which
+ * reads a buffer from USB and passes it to i2400m_rx() in the generic
+ * handling code. The RX buffer has an specific format that is
+ * described in rx.c.
+ *
+ * We use a kernel thread in a loop because:
+ *
+ *  - we want to be able to call the USB power management get/put
+ *    functions (blocking) before each transaction.
+ *
+ *  - We might get a lot of notifications and we don't want to submit
+ *    a zillion reads; by serializing, we are throttling.
+ *
+ *  - RX data processing can get heavy enough so that it is not
+ *    appropiate for doing it in the USB callback; thus we run it in a
+ *    process context.
+ *
+ * We provide a read buffer of an arbitrary size (short of a page); if
+ * the callback reports -EOVERFLOW, it means it was too small, so we
+ * just double the size and retry (being careful to append, as
+ * sometimes the device provided some data). Every now and then we
+ * check if the average packet size is smaller than the current packet
+ * size and if so, we halve it. At the end, the size of the
+ * preallocated buffer should be following the average received
+ * transaction size, adapting dynamically to it.
+ *
+ * ROADMAP
+ *
+ * i2400mu_rx_kick()		   Called from notif.c when we get a
+ *   			           'data ready' notification
+ * i2400mu_rxd()                   Kernel RX daemon
+ *   i2400mu_rx()                  Receive USB data
+ *   i2400m_rx()                   Send data to generic i2400m RX handling
+ *
+ * i2400mu_rx_setup()              called from i2400mu_bus_dev_start()
+ *
+ * i2400mu_rx_release()            called from i2400mu_bus_dev_stop()
+ */
+#include <linux/workqueue.h>
+#include <linux/usb.h>
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE rx
+#include "usb-debug-levels.h"
+
+/*
+ * Dynamic RX size
+ *
+ * We can't let the rx_size be a multiple of 512 bytes (the RX
+ * endpoint's max packet size). On some USB host controllers (we
+ * haven't been able to fully characterize which), if the device is
+ * about to send (for example) X bytes and we only post a buffer to
+ * receive n*512, it will fail to mark that as babble (so that
+ * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the
+ * rest).
+ *
+ * So on growing or shrinking, if it is a multiple of the
+ * maxpacketsize, we remove some (instead of incresing some, so in a
+ * buddy allocator we try to waste less space).
+ *
+ * Note we also need a hook for this on i2400mu_rx() -- when we do the
+ * first read, we are sure we won't hit this spot because
+ * i240mm->rx_size has been set properly. However, if we have to
+ * double because of -EOVERFLOW, when we launch the read to get the
+ * rest of the data, we *have* to make sure that also is not a
+ * multiple of the max_pkt_size.
+ */
+
+static
+size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu)
+{
+	struct device *dev = &i2400mu->usb_iface->dev;
+	size_t rx_size;
+	const size_t max_pkt_size = 512;
+
+	rx_size = 2 * i2400mu->rx_size;
+	if (rx_size % max_pkt_size == 0) {
+		rx_size -= 8;
+		d_printf(1, dev,
+			 "RX: expected size grew to %zu [adjusted -8] "
+			 "from %zu\n",
+			 rx_size, i2400mu->rx_size);
+	} else
+		d_printf(1, dev,
+			 "RX: expected size grew to %zu from %zu\n",
+			 rx_size, i2400mu->rx_size);
+	return rx_size;
+}
+
+
+static
+void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
+{
+	const size_t max_pkt_size = 512;
+	struct device *dev = &i2400mu->usb_iface->dev;
+
+	if (unlikely(i2400mu->rx_size_cnt >= 100
+		     && i2400mu->rx_size_auto_shrink)) {
+		size_t avg_rx_size =
+			i2400mu->rx_size_acc / i2400mu->rx_size_cnt;
+		size_t new_rx_size = i2400mu->rx_size / 2;
+		if (avg_rx_size < new_rx_size) {
+			if (new_rx_size % max_pkt_size == 0) {
+				new_rx_size -= 8;
+				d_printf(1, dev,
+					 "RX: expected size shrank to %zu "
+					 "[adjusted -8] from %zu\n",
+					 new_rx_size, i2400mu->rx_size);
+			} else
+				d_printf(1, dev,
+					 "RX: expected size shrank to %zu "
+					 "from %zu\n",
+					 new_rx_size, i2400mu->rx_size);
+			i2400mu->rx_size = new_rx_size;
+			i2400mu->rx_size_cnt = 0;
+			i2400mu->rx_size_acc = i2400mu->rx_size;
+		}
+	}
+}
+
+/*
+ * Receive a message with payloads from the USB bus into an skb
+ *
+ * @i2400mu: USB device descriptor
+ * @rx_skb: skb where to place the received message
+ *
+ * Deals with all the USB-specifics of receiving, dynamically
+ * increasing the buffer size if so needed. Returns the payload in the
+ * skb, ready to process. On a zero-length packet, we retry.
+ *
+ * On soft USB errors, we retry (until they become too frequent and
+ * then are promoted to hard); on hard USB errors, we reset the
+ * device. On other errors (skb realloacation, we just drop it and
+ * hope for the next invocation to solve it).
+ *
+ * Returns: pointer to the skb if ok, ERR_PTR on error.
+ *   NOTE: this function might realloc the skb (if it is too small),
+ *   so always update with the one returned.
+ *   ERR_PTR() is < 0 on error.
+ */
+static
+struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
+{
+	int result = 0;
+	struct device *dev = &i2400mu->usb_iface->dev;
+	int usb_pipe, read_size, rx_size, do_autopm;
+	struct usb_endpoint_descriptor *epd;
+	const size_t max_pkt_size = 512;
+
+	d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+	do_autopm = atomic_read(&i2400mu->do_autopm);
+	result = do_autopm ?
+		usb_autopm_get_interface(i2400mu->usb_iface) : 0;
+	if (result < 0) {
+		dev_err(dev, "RX: can't get autopm: %d\n", result);
+		do_autopm = 0;
+	}
+	epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_IN);
+	usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+retry:
+	rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
+	if (unlikely(rx_size % max_pkt_size == 0)) {
+		rx_size -= 8;
+		d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
+	}
+	result = usb_bulk_msg(
+		i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
+		rx_size, &read_size, HZ);
+	usb_mark_last_busy(i2400mu->usb_dev);
+	switch (result) {
+	case 0:
+		if (read_size == 0)
+			goto retry;	/* ZLP, just resubmit */
+		skb_put(rx_skb, read_size);
+		break;
+	case -EINVAL:			/* while removing driver */
+	case -ENODEV:			/* dev disconnect ... */
+	case -ENOENT:			/* just ignore it */
+	case -ESHUTDOWN:
+	case -ECONNRESET:
+		break;
+	case -EOVERFLOW: {		/* too small, reallocate */
+		struct sk_buff *new_skb;
+		rx_size = i2400mu_rx_size_grow(i2400mu);
+		if (rx_size <= (1 << 16))	/* cap it */
+			i2400mu->rx_size = rx_size;
+		else if (printk_ratelimit()) {
+			dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
+			result = -EINVAL;
+			goto out;
+		}
+		skb_put(rx_skb, read_size);
+		new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
+					  GFP_KERNEL);
+		if (new_skb == NULL) {
+			if (printk_ratelimit())
+				dev_err(dev, "RX: Can't reallocate skb to %d; "
+					"RX dropped\n", rx_size);
+			kfree(rx_skb);
+			result = 0;
+			goto out;	/* drop it...*/
+		}
+		kfree_skb(rx_skb);
+		rx_skb = new_skb;
+		i2400mu->rx_size_cnt = 0;
+		i2400mu->rx_size_acc = i2400mu->rx_size;
+		d_printf(1, dev, "RX: size changed to %d, received %d, "
+			 "copied %d, capacity %ld\n",
+			 rx_size, read_size, rx_skb->len,
+			 (long) (skb_end_pointer(new_skb) - new_skb->head));
+		goto retry;
+	}
+		/* In most cases, it happens due to the hardware scheduling a
+		 * read when there was no data - unfortunately, we have no way
+		 * to tell this timeout from a USB timeout. So we just ignore
+		 * it. */
+	case -ETIMEDOUT:
+		dev_err(dev, "RX: timeout: %d\n", result);
+		result = 0;
+		break;
+	default:			/* Any error */
+		if (edc_inc(&i2400mu->urb_edc,
+			    EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
+			goto error_reset;
+		dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
+		goto retry;
+	}
+out:
+	if (do_autopm)
+		usb_autopm_put_interface(i2400mu->usb_iface);
+	d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
+	return rx_skb;
+
+error_reset:
+	dev_err(dev, "RX: maximum errors in URB exceeded; "
+		"resetting device\n");
+	usb_queue_reset_device(i2400mu->usb_iface);
+	rx_skb = ERR_PTR(result);
+	goto out;
+}
+
+
+/*
+ * Kernel thread for USB reception of data
+ *
+ * This thread waits for a kick; once kicked, it will allocate an skb
+ * and receive a single message to it from USB (using
+ * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
+ * code for processing.
+ *
+ * When done processing, it runs some dirty statistics to verify if
+ * the last 100 messages received were smaller than half of the
+ * current RX buffer size. In that case, the RX buffer size is
+ * halved. This will helps lowering the pressure on the memory
+ * allocator.
+ *
+ * Hard errors force the thread to exit.
+ */
+static
+int i2400mu_rxd(void *_i2400mu)
+{
+	int result = 0;
+	struct i2400mu *i2400mu = _i2400mu;
+	struct i2400m *i2400m = &i2400mu->i2400m;
+	struct device *dev = &i2400mu->usb_iface->dev;
+	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+	size_t pending;
+	int rx_size;
+	struct sk_buff *rx_skb;
+
+	d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+	while (1) {
+		d_printf(2, dev, "TX: waiting for messages\n");
+		pending = 0;
+		wait_event_interruptible(
+			i2400mu->rx_wq,
+			(kthread_should_stop()	/* check this first! */
+			 || (pending = atomic_read(&i2400mu->rx_pending_count)))
+			);
+		if (kthread_should_stop())
+			break;
+		if (pending == 0)
+			continue;
+		rx_size = i2400mu->rx_size;
+		d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
+		rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
+		if (rx_skb == NULL) {
+			dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
+				rx_size);
+			msleep(50);	/* give it some time? */
+			continue;
+		}
+
+		/* Receive the message with the payloads */
+		rx_skb = i2400mu_rx(i2400mu, rx_skb);
+		result = PTR_ERR(rx_skb);
+		if (IS_ERR(rx_skb))
+			goto out;
+		atomic_dec(&i2400mu->rx_pending_count);
+		if (rx_skb->len == 0) {	/* some ignorable condition */
+			kfree_skb(rx_skb);
+			continue;
+		}
+
+		/* Deliver the message to the generic i2400m code */
+		i2400mu->rx_size_cnt++;
+		i2400mu->rx_size_acc += rx_skb->len;
+		result = i2400m_rx(i2400m, rx_skb);
+		if (result == -EIO
+		    && edc_inc(&i2400mu->urb_edc,
+			       EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+			goto error_reset;
+		}
+
+		/* Maybe adjust RX buffer size */
+		i2400mu_rx_size_maybe_shrink(i2400mu);
+	}
+	result = 0;
+out:
+	d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
+	return result;
+
+error_reset:
+	dev_err(dev, "RX: maximum errors in received buffer exceeded; "
+		"resetting device\n");
+	usb_queue_reset_device(i2400mu->usb_iface);
+	goto out;
+}
+
+
+/*
+ * Start reading from the device
+ *
+ * @i2400m: device instance
+ *
+ * Notify the RX thread that there is data pending.
+ */
+void i2400mu_rx_kick(struct i2400mu *i2400mu)
+{
+	struct i2400m *i2400m = &i2400mu->i2400m;
+	struct device *dev = &i2400mu->usb_iface->dev;
+
+	d_fnstart(3, dev, "(i2400mu %p)\n", i2400m);
+	atomic_inc(&i2400mu->rx_pending_count);
+	wake_up_all(&i2400mu->rx_wq);
+	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+int i2400mu_rx_setup(struct i2400mu *i2400mu)
+{
+	int result = 0;
+	struct i2400m *i2400m = &i2400mu->i2400m;
+	struct device *dev = &i2400mu->usb_iface->dev;
+	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+
+	i2400mu->rx_kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
+					  wimax_dev->name);
+	if (IS_ERR(i2400mu->rx_kthread)) {
+		result = PTR_ERR(i2400mu->rx_kthread);
+		dev_err(dev, "RX: cannot start thread: %d\n", result);
+	}
+	return result;
+}
+
+void i2400mu_rx_release(struct i2400mu *i2400mu)
+{
+	kthread_stop(i2400mu->rx_kthread);
+}
+
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
new file mode 100644
index 0000000..dfd8933
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -0,0 +1,229 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * USB specific TX handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ *  - Initial implementation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Split transport/device specific
+ *
+ *
+ * Takes the TX messages in the i2400m's driver TX FIFO and sends them
+ * to the device until there are no more.
+ *
+ * If we fail sending the message, we just drop it. There isn't much
+ * we can do at this point. We could also retry, but the USB stack has
+ * already retried and still failed, so there is not much of a
+ * point. As well, most of the traffic is network, which has recovery
+ * methods for dropped packets.
+ *
+ * For sending we just obtain a FIFO buffer to send, send it to the
+ * USB bulk out, tell the TX FIFO code we have sent it; query for
+ * another one, etc... until done.
+ *
+ * We use a thread so we can call usb_autopm_enable() and
+ * usb_autopm_disable() for each transaction; this way when the device
+ * goes idle, it will suspend. It also has less overhead than a
+ * dedicated workqueue, as it is being used for a single task.
+ *
+ * ROADMAP
+ *
+ * i2400mu_tx_setup()
+ * i2400mu_tx_release()
+ *
+ * i2400mu_bus_tx_kick()	- Called by the tx.c code when there
+ *                                is new data in the FIFO.
+ * i2400mu_txd()
+ *   i2400m_tx_msg_get()
+ *   i2400m_tx_msg_sent()
+ */
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE tx
+#include "usb-debug-levels.h"
+
+
+/*
+ * Get the next TX message in the TX FIFO and send it to the device
+ *
+ * Note that any iteration consumes a message to be sent, no matter if
+ * it succeeds or fails (we have no real way to retry or complain).
+ *
+ * Return: 0 if ok, < 0 errno code on hard error.
+ */
+static
+int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg,
+	       size_t tx_msg_size)
+{
+	int result = 0;
+	struct i2400m *i2400m = &i2400mu->i2400m;
+	struct device *dev = &i2400mu->usb_iface->dev;
+	int usb_pipe, sent_size, do_autopm;
+	struct usb_endpoint_descriptor *epd;
+
+	d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+	do_autopm = atomic_read(&i2400mu->do_autopm);
+	result = do_autopm ?
+		usb_autopm_get_interface(i2400mu->usb_iface) : 0;
+	if (result < 0) {
+		dev_err(dev, "TX: can't get autopm: %d\n", result);
+		do_autopm = 0;
+	}
+	epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT);
+	usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+retry:
+	result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe,
+			      tx_msg, tx_msg_size, &sent_size, HZ);
+	usb_mark_last_busy(i2400mu->usb_dev);
+	switch (result) {
+	case 0:
+		if (sent_size != tx_msg_size) {	/* Too short? drop it */
+			dev_err(dev, "TX: short write (%d B vs %zu "
+				"expected)\n", sent_size, tx_msg_size);
+			result = -EIO;
+		}
+		break;
+	case -EINVAL:			/* while removing driver */
+	case -ENODEV:			/* dev disconnect ... */
+	case -ENOENT:			/* just ignore it */
+	case -ESHUTDOWN:		/* and exit */
+	case -ECONNRESET:
+		result = -ESHUTDOWN;
+		break;
+	default:			/* Some error? */
+		if (edc_inc(&i2400mu->urb_edc,
+			    EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+			dev_err(dev, "TX: maximum errors in URB "
+				"exceeded; resetting device\n");
+			usb_queue_reset_device(i2400mu->usb_iface);
+		} else {
+			dev_err(dev, "TX: cannot send URB; retrying. "
+				"tx_msg @%zu %zu B [%d sent]: %d\n",
+				(void *) tx_msg - i2400m->tx_buf,
+				tx_msg_size, sent_size, result);
+			goto retry;
+		}
+	}
+	if (do_autopm)
+		usb_autopm_put_interface(i2400mu->usb_iface);
+	d_fnend(4, dev, "(i2400mu %p) = result\n", i2400mu);
+	return result;
+}
+
+
+/*
+ * Get the next TX message in the TX FIFO and send it to the device
+ *
+ * Note we exit the loop if i2400mu_tx() fails; that funtion only
+ * fails on hard error (failing to tx a buffer not being one of them,
+ * see its doc).
+ *
+ * Return: 0
+ */
+static
+int i2400mu_txd(void *_i2400mu)
+{
+	int result = 0;
+	struct i2400mu *i2400mu = _i2400mu;
+	struct i2400m *i2400m = &i2400mu->i2400m;
+	struct device *dev = &i2400mu->usb_iface->dev;
+	struct i2400m_msg_hdr *tx_msg;
+	size_t tx_msg_size;
+
+	d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+
+	while (1) {
+		d_printf(2, dev, "TX: waiting for messages\n");
+		tx_msg = NULL;
+		wait_event_interruptible(
+			i2400mu->tx_wq,
+			(kthread_should_stop()	/* check this first! */
+			 || (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size)))
+			);
+		if (kthread_should_stop())
+			break;
+		WARN_ON(tx_msg == NULL);	/* should not happen...*/
+		d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size);
+		d_dump(5, dev, tx_msg, tx_msg_size);
+		/* Yeah, we ignore errors ... not much we can do */
+		i2400mu_tx(i2400mu, tx_msg, tx_msg_size);
+		i2400m_tx_msg_sent(i2400m);	/* ack it, advance the FIFO */
+		if (result < 0)
+			break;
+	}
+	d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
+	return result;
+}
+
+
+/*
+ * i2400m TX engine notifies us that there is data in the FIFO ready
+ * for TX
+ *
+ * If there is a URB in flight, don't do anything; when it finishes,
+ * it will see there is data in the FIFO and send it. Else, just
+ * submit a write.
+ */
+void i2400mu_bus_tx_kick(struct i2400m *i2400m)
+{
+	struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+	struct device *dev = &i2400mu->usb_iface->dev;
+
+	d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
+	wake_up_all(&i2400mu->tx_wq);
+	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+int i2400mu_tx_setup(struct i2400mu *i2400mu)
+{
+	int result = 0;
+	struct i2400m *i2400m = &i2400mu->i2400m;
+	struct device *dev = &i2400mu->usb_iface->dev;
+	struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+
+	i2400mu->tx_kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx",
+					  wimax_dev->name);
+	if (IS_ERR(i2400mu->tx_kthread)) {
+		result = PTR_ERR(i2400mu->tx_kthread);
+		dev_err(dev, "TX: cannot start thread: %d\n", result);
+	}
+	return result;
+}
+
+void i2400mu_tx_release(struct i2400mu *i2400mu)
+{
+	kthread_stop(i2400mu->tx_kthread);
+}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
new file mode 100644
index 0000000..c6d9346
--- /dev/null
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -0,0 +1,597 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Linux driver model glue for USB device, reset & fw upload
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * See i2400m-usb.h for a general description of this driver.
+ *
+ * This file implements driver model glue, and hook ups for the
+ * generic driver to implement the bus-specific functions (device
+ * communication setup/tear down, firmware upload and resetting).
+ *
+ * ROADMAP
+ *
+ * i2400mu_probe()
+ *   alloc_netdev()...
+ *     i2400mu_netdev_setup()
+ *       i2400mu_init()
+ *       i2400m_netdev_setup()
+ *   i2400m_setup()...
+ *
+ * i2400mu_disconnect
+ *   i2400m_release()
+ *   free_netdev()
+ *
+ * i2400mu_suspend()
+ *   i2400m_cmd_enter_powersave()
+ *   i2400mu_notification_release()
+ *
+ * i2400mu_resume()
+ *   i2400mu_notification_setup()
+ *
+ * i2400mu_bus_dev_start()        Called by i2400m_dev_start() [who is
+ *   i2400mu_tx_setup()           called by i2400m_setup()]
+ *   i2400mu_rx_setup()
+ *   i2400mu_notification_setup()
+ *
+ * i2400mu_bus_dev_stop()         Called by i2400m_dev_stop() [who is
+ *   i2400mu_notification_release()  called by i2400m_release()]
+ *   i2400mu_rx_release()
+ *   i2400mu_tx_release()
+ *
+ * i2400mu_bus_reset()            Called by i2400m->bus_reset
+ *   __i2400mu_reset()
+ *     __i2400mu_send_barker()
+ *   usb_reset_device()
+ */
+#include "i2400m-usb.h"
+#include <linux/wimax/i2400m.h>
+#include <linux/debugfs.h>
+
+
+#define D_SUBMODULE usb
+#include "usb-debug-levels.h"
+
+
+/* Our firmware file name */
+#define I2400MU_FW_FILE_NAME "i2400m-fw-usb-" I2400M_FW_VERSION ".sbcf"
+
+static
+int i2400mu_bus_dev_start(struct i2400m *i2400m)
+{
+	int result;
+	struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+	struct device *dev = &i2400mu->usb_iface->dev;
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	result = i2400mu_tx_setup(i2400mu);
+	if (result < 0)
+		goto error_usb_tx_setup;
+	result = i2400mu_rx_setup(i2400mu);
+	if (result < 0)
+		goto error_usb_rx_setup;
+	result = i2400mu_notification_setup(i2400mu);
+	if (result < 0)
+		goto error_notif_setup;
+	d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+	return result;
+
+error_notif_setup:
+	i2400mu_rx_release(i2400mu);
+error_usb_rx_setup:
+	i2400mu_tx_release(i2400mu);
+error_usb_tx_setup:
+	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+	return result;
+}
+
+
+static
+void i2400mu_bus_dev_stop(struct i2400m *i2400m)
+{
+	struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+	struct device *dev = &i2400mu->usb_iface->dev;
+
+	d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+	i2400mu_notification_release(i2400mu);
+	i2400mu_rx_release(i2400mu);
+	i2400mu_tx_release(i2400mu);
+	d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+/*
+ * Sends a barker buffer to the device
+ *
+ * This helper will allocate a kmalloced buffer and use it to transmit
+ * (then free it). Reason for this is that other arches cannot use
+ * stack/vmalloc/text areas for DMA transfers.
+ *
+ * Error recovery here is simpler: anything is considered a hard error
+ * and will move the reset code to use a last-resort bus-based reset.
+ */
+static
+int __i2400mu_send_barker(struct i2400mu *i2400mu,
+			  const __le32 *barker,
+			  size_t barker_size,
+			  unsigned endpoint)
+{
+	struct usb_endpoint_descriptor *epd = NULL;
+	int pipe, actual_len, ret;
+	struct device *dev = &i2400mu->usb_iface->dev;
+	void *buffer;
+	int do_autopm = 1;
+
+	ret = usb_autopm_get_interface(i2400mu->usb_iface);
+	if (ret < 0) {
+		dev_err(dev, "RESET: can't get autopm: %d\n", ret);
+		do_autopm = 0;
+	}
+	ret = -ENOMEM;
+	buffer = kmalloc(barker_size, GFP_KERNEL);
+	if (buffer == NULL)
+		goto error_kzalloc;
+	epd = usb_get_epd(i2400mu->usb_iface, endpoint);
+	pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+	memcpy(buffer, barker, barker_size);
+	ret = usb_bulk_msg(i2400mu->usb_dev, pipe, buffer, barker_size,
+			   &actual_len, HZ);
+	if (ret < 0) {
+		if (ret != -EINVAL)
+			dev_err(dev, "E: barker error: %d\n", ret);
+	} else if (actual_len != barker_size) {
+		dev_err(dev, "E: only %d bytes transmitted\n", actual_len);
+		ret = -EIO;
+	}
+	kfree(buffer);
+error_kzalloc:
+	if (do_autopm)
+		usb_autopm_put_interface(i2400mu->usb_iface);
+	return ret;
+}
+
+
+/*
+ * Reset a device at different levels (warm, cold or bus)
+ *
+ * @i2400m: device descriptor
+ * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS)
+ *
+ * Warm and cold resets get a USB reset if they fail.
+ *
+ * Warm reset:
+ *
+ * The device will be fully reset internally, but won't be
+ * disconnected from the USB bus (so no reenumeration will
+ * happen). Firmware upload will be neccessary.
+ *
+ * The device will send a reboot barker in the notification endpoint
+ * that will trigger the driver to reinitialize the state
+ * automatically from notif.c:i2400m_notification_grok() into
+ * i2400m_dev_bootstrap_delayed().
+ *
+ * Cold and bus (USB) reset:
+ *
+ * The device will be fully reset internally, disconnected from the
+ * USB bus an a reenumeration will happen. Firmware upload will be
+ * neccessary. Thus, we don't do any locking or struct
+ * reinitialization, as we are going to be fully disconnected and
+ * reenumerated.
+ *
+ * Note we need to return -ENODEV if a warm reset was requested and we
+ * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset()
+ * and wimax_dev->op_reset.
+ *
+ * WARNING: no driver state saved/fixed
+ */
+static
+int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
+{
+	int result;
+	struct i2400mu *i2400mu =
+		container_of(i2400m, struct i2400mu, i2400m);
+	struct device *dev = i2400m_dev(i2400m);
+	static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
+		__constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_WARM_RESET_BARKER),
+	};
+	static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
+		__constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+		__constant_cpu_to_le32(I2400M_COLD_RESET_BARKER),
+	};
+
+	d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt);
+	if (rt == I2400M_RT_WARM)
+		result = __i2400mu_send_barker(i2400mu, i2400m_WARM_BOOT_BARKER,
+					       sizeof(i2400m_WARM_BOOT_BARKER),
+					       I2400MU_EP_BULK_OUT);
+	else if (rt == I2400M_RT_COLD)
+		result = __i2400mu_send_barker(i2400mu, i2400m_COLD_BOOT_BARKER,
+					       sizeof(i2400m_COLD_BOOT_BARKER),
+					       I2400MU_EP_RESET_COLD);
+	else if (rt == I2400M_RT_BUS) {
+do_bus_reset:
+		result = usb_reset_device(i2400mu->usb_dev);
+		switch (result) {
+		case 0:
+		case -EINVAL:	/* device is gone */
+		case -ENODEV:
+		case -ENOENT:
+		case -ESHUTDOWN:
+			result = rt == I2400M_RT_WARM ? -ENODEV : 0;
+			break;	/* We assume the device is disconnected */
+		default:
+			dev_err(dev, "USB reset failed (%d), giving up!\n",
+				result);
+		}
+	} else
+		BUG();
+	if (result < 0
+	    && result != -EINVAL	/* device is gone */
+	    && rt != I2400M_RT_BUS) {
+		dev_err(dev, "%s reset failed (%d); trying USB reset\n",
+			rt == I2400M_RT_WARM ? "warm" : "cold", result);
+		rt = I2400M_RT_BUS;
+		goto do_bus_reset;
+	}
+	d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result);
+	return result;
+}
+
+
+static
+void i2400mu_netdev_setup(struct net_device *net_dev)
+{
+	struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+	struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+	i2400mu_init(i2400mu);
+	i2400m_netdev_setup(net_dev);
+}
+
+
+/*
+ * Debug levels control; see debug.h
+ */
+struct d_level D_LEVEL[] = {
+	D_SUBMODULE_DEFINE(usb),
+	D_SUBMODULE_DEFINE(fw),
+	D_SUBMODULE_DEFINE(notif),
+	D_SUBMODULE_DEFINE(rx),
+	D_SUBMODULE_DEFINE(tx),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+
+#define __debugfs_register(prefix, name, parent)			\
+do {									\
+	result = d_level_register_debugfs(prefix, name, parent);	\
+	if (result < 0)							\
+		goto error;						\
+} while (0)
+
+
+static
+int i2400mu_debugfs_add(struct i2400mu *i2400mu)
+{
+	int result;
+	struct device *dev = &i2400mu->usb_iface->dev;
+	struct dentry *dentry = i2400mu->i2400m.wimax_dev.debugfs_dentry;
+	struct dentry *fd;
+
+	dentry = debugfs_create_dir("i2400m-usb", dentry);
+	result = PTR_ERR(dentry);
+	if (IS_ERR(dentry)) {
+		if (result == -ENODEV)
+			result = 0;	/* No debugfs support */
+		goto error;
+	}
+	i2400mu->debugfs_dentry = dentry;
+	__debugfs_register("dl_", usb, dentry);
+	__debugfs_register("dl_", fw, dentry);
+	__debugfs_register("dl_", notif, dentry);
+	__debugfs_register("dl_", rx, dentry);
+	__debugfs_register("dl_", tx, dentry);
+
+	/* Don't touch these if you don't know what you are doing */
+	fd = debugfs_create_u8("rx_size_auto_shrink", 0600, dentry,
+			       &i2400mu->rx_size_auto_shrink);
+	result = PTR_ERR(fd);
+	if (IS_ERR(fd) && result != -ENODEV) {
+		dev_err(dev, "Can't create debugfs entry "
+			"rx_size_auto_shrink: %d\n", result);
+		goto error;
+	}
+
+	fd = debugfs_create_size_t("rx_size", 0600, dentry,
+				   &i2400mu->rx_size);
+	result = PTR_ERR(fd);
+	if (IS_ERR(fd) && result != -ENODEV) {
+		dev_err(dev, "Can't create debugfs entry "
+			"rx_size: %d\n", result);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	debugfs_remove_recursive(i2400mu->debugfs_dentry);
+	return result;
+}
+
+
+/*
+ * Probe a i2400m interface and register it
+ *
+ * @iface:   USB interface to link to
+ * @id:      USB class/subclass/protocol id
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Alloc a net device, initialize the bus-specific details and then
+ * calls the bus-generic initialization routine. That will register
+ * the wimax and netdev devices, upload the firmware [using
+ * _bus_bm_*()], call _bus_dev_start() to finalize the setup of the
+ * communication with the device and then will start to talk to it to
+ * finnish setting it up.
+ */
+static
+int i2400mu_probe(struct usb_interface *iface,
+		  const struct usb_device_id *id)
+{
+	int result;
+	struct net_device *net_dev;
+	struct device *dev = &iface->dev;
+	struct i2400m *i2400m;
+	struct i2400mu *i2400mu;
+	struct usb_device *usb_dev = interface_to_usbdev(iface);
+
+	if (usb_dev->speed != USB_SPEED_HIGH)
+		dev_err(dev, "device not connected as high speed\n");
+
+	/* Allocate instance [calls i2400m_netdev_setup() on it]. */
+	result = -ENOMEM;
+	net_dev = alloc_netdev(sizeof(*i2400mu), "wmx%d",
+			       i2400mu_netdev_setup);
+	if (net_dev == NULL) {
+		dev_err(dev, "no memory for network device instance\n");
+		goto error_alloc_netdev;
+	}
+	SET_NETDEV_DEV(net_dev, dev);
+	i2400m = net_dev_to_i2400m(net_dev);
+	i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+	i2400m->wimax_dev.net_dev = net_dev;
+	i2400mu->usb_dev = usb_get_dev(usb_dev);
+	i2400mu->usb_iface = iface;
+	usb_set_intfdata(iface, i2400mu);
+
+	i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
+	i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
+	i2400m->bus_dev_start = i2400mu_bus_dev_start;
+	i2400m->bus_dev_stop = i2400mu_bus_dev_stop;
+	i2400m->bus_tx_kick = i2400mu_bus_tx_kick;
+	i2400m->bus_reset = i2400mu_bus_reset;
+	i2400m->bus_bm_cmd_send = i2400mu_bus_bm_cmd_send;
+	i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
+	i2400m->bus_fw_name = I2400MU_FW_FILE_NAME;
+	i2400m->bus_bm_mac_addr_impaired = 0;
+
+#ifdef CONFIG_PM
+	iface->needs_remote_wakeup = 1;		/* autosuspend (15s delay) */
+	device_init_wakeup(dev, 1);
+	usb_autopm_enable(i2400mu->usb_iface);
+	usb_dev->autosuspend_delay = 15 * HZ;
+	usb_dev->autosuspend_disabled = 0;
+#endif
+
+	result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT);
+	if (result < 0) {
+		dev_err(dev, "cannot setup device: %d\n", result);
+		goto error_setup;
+	}
+	result = i2400mu_debugfs_add(i2400mu);
+	if (result < 0) {
+		dev_err(dev, "Can't register i2400mu's debugfs: %d\n", result);
+		goto error_debugfs_add;
+	}
+	return 0;
+
+error_debugfs_add:
+	i2400m_release(i2400m);
+error_setup:
+	usb_set_intfdata(iface, NULL);
+	usb_put_dev(i2400mu->usb_dev);
+	free_netdev(net_dev);
+error_alloc_netdev:
+	return result;
+}
+
+
+/*
+ * Disconect a i2400m from the system.
+ *
+ * i2400m_stop() has been called before, so al the rx and tx contexts
+ * have been taken down already. Make sure the queue is stopped,
+ * unregister netdev and i2400m, free and kill.
+ */
+static
+void i2400mu_disconnect(struct usb_interface *iface)
+{
+	struct i2400mu *i2400mu = usb_get_intfdata(iface);
+	struct i2400m *i2400m = &i2400mu->i2400m;
+	struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+	struct device *dev = &iface->dev;
+
+	d_fnstart(3, dev, "(iface %p i2400m %p)\n", iface, i2400m);
+
+	debugfs_remove_recursive(i2400mu->debugfs_dentry);
+	i2400m_release(i2400m);
+	usb_set_intfdata(iface, NULL);
+	usb_put_dev(i2400mu->usb_dev);
+	free_netdev(net_dev);
+	d_fnend(3, dev, "(iface %p i2400m %p) = void\n", iface, i2400m);
+}
+
+
+/*
+ * Get the device ready for USB port or system standby and hibernation
+ *
+ * USB port and system standby are handled the same.
+ *
+ * When the system hibernates, the USB device is powered down and then
+ * up, so we don't really have to do much here, as it will be seen as
+ * a reconnect. Still for simplicity we consider this case the same as
+ * suspend, so that the device has a chance to do notify the base
+ * station (if connected).
+ *
+ * So at the end, the three cases require common handling.
+ *
+ * If at the time of this call the device's firmware is not loaded,
+ * nothing has to be done.
+ *
+ * If the firmware is loaded, we need to:
+ *
+ *  - tell the device to go into host interface power save mode, wait
+ *    for it to ack
+ *
+ *    This is quite more interesting than it is; we need to execute a
+ *    command, but this time, we don't want the code in usb-{tx,rx}.c
+ *    to call the usb_autopm_get/put_interface() barriers as it'd
+ *    deadlock, so we need to decrement i2400mu->do_autopm, that acts
+ *    as a poor man's semaphore. Ugly, but it works.
+ *
+ *    As well, the device might refuse going to sleep for whichever
+ *    reason. In this case we just fail. For system suspend/hibernate,
+ *    we *can't* fail. We look at usb_dev->auto_pm to see if the
+ *    suspend call comes from the USB stack or from the system and act
+ *    in consequence.
+ *
+ *  - stop the notification endpoint polling
+ */
+static
+int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
+{
+	int result = 0;
+	struct device *dev = &iface->dev;
+	struct i2400mu *i2400mu = usb_get_intfdata(iface);
+#ifdef CONFIG_PM
+	struct usb_device *usb_dev = i2400mu->usb_dev;
+#endif
+	struct i2400m *i2400m = &i2400mu->i2400m;
+
+	d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
+	if (i2400m->updown == 0)
+		goto no_firmware;
+	d_printf(1, dev, "fw up, requesting standby\n");
+	atomic_dec(&i2400mu->do_autopm);
+	result = i2400m_cmd_enter_powersave(i2400m);
+	atomic_inc(&i2400mu->do_autopm);
+#ifdef CONFIG_PM
+	if (result < 0 && usb_dev->auto_pm == 0) {
+		/* System suspend, can't fail */
+		dev_err(dev, "failed to suspend, will reset on resume\n");
+		result = 0;
+	}
+#endif
+	if (result < 0)
+		goto error_enter_powersave;
+	i2400mu_notification_release(i2400mu);
+	d_printf(1, dev, "fw up, got standby\n");
+error_enter_powersave:
+no_firmware:
+	d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n",
+		iface, pm_msg.event, result);
+	return result;
+}
+
+
+static
+int i2400mu_resume(struct usb_interface *iface)
+{
+	int ret = 0;
+	struct device *dev = &iface->dev;
+	struct i2400mu *i2400mu = usb_get_intfdata(iface);
+	struct i2400m *i2400m = &i2400mu->i2400m;
+
+	d_fnstart(3, dev, "(iface %p)\n", iface);
+	if (i2400m->updown == 0) {
+		d_printf(1, dev, "fw was down, no resume neeed\n");
+		goto out;
+	}
+	d_printf(1, dev, "fw was up, resuming\n");
+	i2400mu_notification_setup(i2400mu);
+	/* USB has flow control, so we don't need to give it time to
+	 * come back; otherwise, we'd use something like a get-state
+	 * command... */
+out:
+	d_fnend(3, dev, "(iface %p) = %d\n", iface, ret);
+	return ret;
+}
+
+
+static
+struct usb_device_id i2400mu_id_table[] = {
+	{ USB_DEVICE(0x8086, 0x0181) },
+	{ USB_DEVICE(0x8086, 0x1403) },
+	{ USB_DEVICE(0x8086, 0x1405) },
+	{ USB_DEVICE(0x8086, 0x0180) },
+	{ USB_DEVICE(0x8086, 0x0182) },
+	{ USB_DEVICE(0x8086, 0x1406) },
+	{ USB_DEVICE(0x8086, 0x1403) },
+	{ },
+};
+MODULE_DEVICE_TABLE(usb, i2400mu_id_table);
+
+
+static
+struct usb_driver i2400mu_driver = {
+	.name = KBUILD_MODNAME,
+	.suspend = i2400mu_suspend,
+	.resume = i2400mu_resume,
+	.probe = i2400mu_probe,
+	.disconnect = i2400mu_disconnect,
+	.id_table = i2400mu_id_table,
+	.supports_autosuspend = 1,
+};
+
+static
+int __init i2400mu_driver_init(void)
+{
+	return usb_register(&i2400mu_driver);
+}
+module_init(i2400mu_driver_init);
+
+
+static
+void __exit i2400mu_driver_exit(void)
+{
+	flush_scheduled_work();	/* for the stuff we schedule from sysfs.c */
+	usb_deregister(&i2400mu_driver);
+}
+module_exit(i2400mu_driver_exit);
+
+MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
+MODULE_DESCRIPTION("Intel 2400M WiMAX networking for USB");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(I2400MU_FW_FILE_NAME);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index ea543fc..e4f9f74 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -111,7 +111,7 @@
 	  lets you choose drivers.
 
 config PCMCIA_RAYCS
-	tristate "Aviator/Raytheon 2.4MHz wireless support"
+	tristate "Aviator/Raytheon 2.4GHz wireless support"
 	depends on PCMCIA && WLAN_80211
 	select WIRELESS_EXT
 	---help---
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 4af2607..8ef8735 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -2644,7 +2644,7 @@
 		if (skb_headroom(skb) < padsize) {
 			ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
 				  " headroom to pad %d\n", hdrlen, padsize);
-			return -1;
+			return NETDEV_TX_BUSY;
 		}
 		skb_push(skb, padsize);
 		memmove(skb->data, skb->data+padsize, hdrlen);
@@ -2655,7 +2655,7 @@
 		ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
 		spin_unlock_irqrestore(&sc->txbuflock, flags);
 		ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
-		return -1;
+		return NETDEV_TX_BUSY;
 	}
 	bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
 	list_del(&bf->list);
@@ -2673,10 +2673,10 @@
 		sc->txbuf_len++;
 		spin_unlock_irqrestore(&sc->txbuflock, flags);
 		dev_kfree_skb_any(skb);
-		return 0;
+		return NETDEV_TX_OK;
 	}
 
-	return 0;
+	return NETDEV_TX_OK;
 }
 
 static int
diff --git a/drivers/net/wireless/ath5k/dma.c b/drivers/net/wireless/ath5k/dma.c
index 7e2b1a6..b65b4fe 100644
--- a/drivers/net/wireless/ath5k/dma.c
+++ b/drivers/net/wireless/ath5k/dma.c
@@ -594,7 +594,7 @@
 		 * XXX: BMISS interrupts may occur after association.
 		 * I found this on 5210 code but it needs testing. If this is
 		 * true we should disable them before assoc and re-enable them
-		 * after a successfull assoc + some jiffies.
+		 * after a successful assoc + some jiffies.
 			interrupt_mask &= ~AR5K_INT_BMISS;
 		 */
 	}
diff --git a/drivers/net/wireless/ath5k/pcu.c b/drivers/net/wireless/ath5k/pcu.c
index 0cac05c..75eb9f4 100644
--- a/drivers/net/wireless/ath5k/pcu.c
+++ b/drivers/net/wireless/ath5k/pcu.c
@@ -65,7 +65,7 @@
 		if (ah->ah_version == AR5K_AR5210)
 			pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
 		else
-			AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_ADHOC);
+			AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
 		break;
 
 	case NL80211_IFTYPE_AP:
@@ -75,7 +75,7 @@
 		if (ah->ah_version == AR5K_AR5210)
 			pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
 		else
-			AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_ADHOC);
+			AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
 		break;
 
 	case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 91aaeaf..9189ab1 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -73,7 +73,7 @@
 #define	AR5K_CFG_SWRD		0x00000004	/* Byte-swap RX descriptor */
 #define	AR5K_CFG_SWRB		0x00000008	/* Byte-swap RX buffer */
 #define	AR5K_CFG_SWRG		0x00000010	/* Byte-swap Register access */
-#define AR5K_CFG_ADHOC		0x00000020 	/* AP/Adhoc indication [5211+] */
+#define AR5K_CFG_IBSS		0x00000020 	/* 0-BSS, 1-IBSS [5211+] */
 #define AR5K_CFG_PHY_OK		0x00000100	/* [5211+] */
 #define AR5K_CFG_EEBS		0x00000200	/* EEPROM is busy */
 #define	AR5K_CFG_CLKGD		0x00000400	/* Clock gated (Disable dynamic clock) */
diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath9k/Kconfig
index c43bd32..90a8dd8 100644
--- a/drivers/net/wireless/ath9k/Kconfig
+++ b/drivers/net/wireless/ath9k/Kconfig
@@ -1,6 +1,7 @@
 config ATH9K
 	tristate "Atheros 802.11n wireless cards support"
 	depends on PCI && MAC80211 && WLAN_80211
+	depends on RFKILL || RFKILL=n
 	select MAC80211_LEDS
 	select LEDS_CLASS
 	select NEW_LEDS
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 191eec5..727f067 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -2164,13 +2164,13 @@
 						    conf->ht.channel_type);
 		}
 
+		ath_update_chainmask(sc, conf->ht.enabled);
+
 		if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0) {
 			DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n");
 			mutex_unlock(&sc->mutex);
 			return -EINVAL;
 		}
-
-		ath_update_chainmask(sc, conf->ht.enabled);
 	}
 
 	if (changed & IEEE80211_CONF_CHANGE_POWER)
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index 3bfc3b9..c92f0c6 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -126,15 +126,7 @@
 		tx_info->flags |= IEEE80211_TX_STAT_ACK;
 	}
 
-	tx_info->status.rates[0].count = tx_status->retries;
-	if (tx_info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
-		/* Change idx from internal table index to MCS index */
-		int idx = tx_info->status.rates[0].idx;
-		struct ath_rate_table *rate_table = sc->cur_rate_table;
-		if (idx >= 0 && idx < rate_table->rate_cnt)
-			tx_info->status.rates[0].idx =
-				rate_table->info[idx].ratecode & 0x7f;
-	}
+	tx_info->status.rates[0].count = tx_status->retries + 1;
 
 	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
 	padsize = hdrlen & 3;
@@ -264,25 +256,22 @@
 	}
 
 	/* Get seqno */
-
-	if (ieee80211_is_data(fc) && !is_pae(skb)) {
-		/* For HT capable stations, we save tidno for later use.
-		 * We also override seqno set by upper layer with the one
-		 * in tx aggregation state.
-		 *
-		 * If fragmentation is on, the sequence number is
-		 * not overridden, since it has been
-		 * incremented by the fragmentation routine.
-		 *
-		 * FIXME: check if the fragmentation threshold exceeds
-		 * IEEE80211 max.
-		 */
-		tid = ATH_AN_2_TID(an, bf->bf_tidno);
-		hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
-					    IEEE80211_SEQ_SEQ_SHIFT);
-		bf->bf_seqno = tid->seq_next;
-		INCR(tid->seq_next, IEEE80211_SEQ_MAX);
-	}
+	/* For HT capable stations, we save tidno for later use.
+	 * We also override seqno set by upper layer with the one
+	 * in tx aggregation state.
+	 *
+	 * If fragmentation is on, the sequence number is
+	 * not overridden, since it has been
+	 * incremented by the fragmentation routine.
+	 *
+	 * FIXME: check if the fragmentation threshold exceeds
+	 * IEEE80211 max.
+	 */
+	tid = ATH_AN_2_TID(an, bf->bf_tidno);
+	hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
+			IEEE80211_SEQ_SEQ_SHIFT);
+	bf->bf_seqno = tid->seq_next;
+	INCR(tid->seq_next, IEEE80211_SEQ_MAX);
 }
 
 static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
@@ -1718,11 +1707,10 @@
 
 	/* Assign seqno, tidno */
 
-	if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
+	if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
 		assign_aggr_tid_seqno(skb, bf);
 
 	/* DMA setup */
-
 	bf->bf_mpdu = skb;
 
 	bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 350157f..4223672 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -3836,7 +3836,7 @@
 	   This routine is also responsible for initialising some
 	   hardware-specific fields in the atmel_private structure,
 	   including a copy of the firmware's hostinfo stucture
-	   which is the route into the rest of the firmare datastructures. */
+	   which is the route into the rest of the firmware datastructures. */
 
 	struct atmel_private *priv = netdev_priv(dev);
 	u8 configuration;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 7b31a32..c788bad 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3261,7 +3261,7 @@
 	struct b43_wldev *down_dev;
 	struct b43_wldev *d;
 	int err;
-	bool gmode;
+	bool uninitialized_var(gmode);
 	int prev_status;
 
 	/* Find a device and PHY which supports the band. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index c1324e3..fb996c2 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2465,7 +2465,7 @@
 static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
 				      unsigned int new_mode)
 {
-	struct b43legacy_wldev *up_dev;
+	struct b43legacy_wldev *uninitialized_var(up_dev);
 	struct b43legacy_wldev *down_dev;
 	int err;
 	bool gmode = 0;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 1667065..823c2bf 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1332,7 +1332,7 @@
 		       IPW_AUX_HOST_RESET_REG_STOP_MASTER);
 
 	/* Step 2. Wait for stop Master Assert
-	 *         (not more then 50us, otherwise ret error */
+	 *         (not more than 50us, otherwise ret error */
 	i = 5;
 	do {
 		udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY);
@@ -1830,7 +1830,7 @@
 		cancel_delayed_work(&priv->rf_kill);
 	}
 
-	/* Kill the firmare hang check timer */
+	/* Kill the firmware hang check timer */
 	if (!priv->stop_hang_check) {
 		priv->stop_hang_check = 1;
 		cancel_delayed_work(&priv->hang_check);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 8fdb342..45cfa1cf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -2219,7 +2219,7 @@
 		/* set tx power value for all OFDM rates */
 		for (rate_index = 0; rate_index < IWL_OFDM_RATES;
 		     rate_index++) {
-			s32 power_idx;
+			s32 uninitialized_var(power_idx);
 			int rc;
 
 			/* use channel group's clip-power table,
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 52966ff..ba99720 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -255,7 +255,7 @@
  *        0x3)  54 Mbps
  *
  * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
- *  3-0:   10)  1 Mbps
+ *  6-0:   10)  1 Mbps
  *         20)  2 Mbps
  *         55)  5.5 Mbps
  *        110)  11 Mbps
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 01a2169..8c71ad4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -51,6 +51,7 @@
 		IWL_CMD(REPLY_REMOVE_STA);
 		IWL_CMD(REPLY_REMOVE_ALL_STA);
 		IWL_CMD(REPLY_WEPKEY);
+		IWL_CMD(REPLY_3945_RX);
 		IWL_CMD(REPLY_TX);
 		IWL_CMD(REPLY_RATE_SCALE);
 		IWL_CMD(REPLY_LEDS_CMD);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 3dba836..4e0007d 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -1369,7 +1369,7 @@
 
 void lbs_stop_card(struct lbs_private *priv)
 {
-	struct net_device *dev = priv->dev;
+	struct net_device *dev;
 	struct cmd_ctrl_node *cmdnode;
 	unsigned long flags;
 
@@ -1377,9 +1377,10 @@
 
 	if (!priv)
 		goto out;
+	dev = priv->dev;
 
-	netif_stop_queue(priv->dev);
-	netif_carrier_off(priv->dev);
+	netif_stop_queue(dev);
+	netif_carrier_off(dev);
 
 	lbs_debugfs_remove_one(priv);
 	if (priv->mesh_tlv) {
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index d1fc305..e7289e2 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -206,7 +206,7 @@
 	 * there are no buffered multicast frames to send
 	 */
 	ieee80211_stop_queues(priv->hw);
-	return 0;
+	return NETDEV_TX_OK;
 }
 
 static void lbtf_tx_work(struct work_struct *work)
diff --git a/drivers/net/wireless/orinoco/orinoco.c b/drivers/net/wireless/orinoco/orinoco.c
index bc84e27..c3bb85e 100644
--- a/drivers/net/wireless/orinoco/orinoco.c
+++ b/drivers/net/wireless/orinoco/orinoco.c
@@ -1610,6 +1610,16 @@
 	struct orinoco_rx_data *rx_data, *temp;
 	struct hermes_rx_descriptor *desc;
 	struct sk_buff *skb;
+	unsigned long flags;
+
+	/* orinoco_rx requires the driver lock, and we also need to
+	 * protect priv->rx_list, so just hold the lock over the
+	 * lot.
+	 *
+	 * If orinoco_lock fails, we've unplugged the card. In this
+	 * case just abort. */
+	if (orinoco_lock(priv, &flags) != 0)
+		return;
 
 	/* extract desc and skb from queue */
 	list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
@@ -1622,6 +1632,8 @@
 
 		kfree(desc);
 	}
+
+	orinoco_unlock(priv, &flags);
 }
 
 /********************************************************************/
@@ -3645,12 +3657,22 @@
 void free_orinocodev(struct net_device *dev)
 {
 	struct orinoco_private *priv = netdev_priv(dev);
+	struct orinoco_rx_data *rx_data, *temp;
 
-	/* No need to empty priv->rx_list: if the tasklet is scheduled
-	 * when we call tasklet_kill it will run one final time,
-	 * emptying the list */
+	/* If the tasklet is scheduled when we call tasklet_kill it
+	 * will run one final time. However the tasklet will only
+	 * drain priv->rx_list if the hw is still available. */
 	tasklet_kill(&priv->rx_tasklet);
 
+	/* Explicitly drain priv->rx_list */
+	list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
+		list_del(&rx_data->list);
+
+		dev_kfree_skb(rx_data->skb);
+		kfree(rx_data->desc);
+		kfree(rx_data);
+	}
+
 	unregister_pm_notifier(&priv->pm_notifier);
 	orinoco_uncache_fw(priv);
 
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index f127602..0b32215 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -435,6 +435,7 @@
 	PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
 	PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
 	PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
+	PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */
 	PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
 	PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
 	PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 82354b9..c6a370f 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -138,6 +138,7 @@
 	u8 *fw_version = NULL;
 	size_t len;
 	int i;
+	int maxlen;
 
 	if (priv->rx_start)
 		return 0;
@@ -195,6 +196,16 @@
 			else
 				priv->rx_mtu = (size_t)
 					0x620 - priv->tx_hdr_len;
+			maxlen = priv->tx_hdr_len + /* USB devices */
+				 sizeof(struct p54_rx_data) +
+				 4 + /* rx alignment */
+				 IEEE80211_MAX_FRAG_THRESHOLD;
+			if (priv->rx_mtu > maxlen && PAGE_SIZE == 4096) {
+				printk(KERN_INFO "p54: rx_mtu reduced from %d "
+					         "to %d\n", priv->rx_mtu,
+						 maxlen);
+				priv->rx_mtu = maxlen;
+			}
 			break;
 			}
 		case BR_CODE_EXPOSED_IF:
@@ -575,6 +586,7 @@
 	u16 freq = le16_to_cpu(hdr->freq);
 	size_t header_len = sizeof(*hdr);
 	u32 tsf32;
+	u8 rate = hdr->rate & 0xf;
 
 	/*
 	 * If the device is in a unspecified state we have to
@@ -603,8 +615,11 @@
 	rx_status.qual = (100 * hdr->rssi) / 127;
 	if (hdr->rate & 0x10)
 		rx_status.flag |= RX_FLAG_SHORTPRE;
-	rx_status.rate_idx = (dev->conf.channel->band == IEEE80211_BAND_2GHZ ?
-			hdr->rate : (hdr->rate - 4)) & 0xf;
+	if (dev->conf.channel->band == IEEE80211_BAND_5GHZ)
+		rx_status.rate_idx = (rate < 4) ? 0 : rate - 4;
+	else
+		rx_status.rate_idx = rate;
+
 	rx_status.freq = freq;
 	rx_status.band =  dev->conf.channel->band;
 	rx_status.antenna = hdr->antenna;
@@ -798,6 +813,16 @@
 			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
 		info->status.ack_signal = p54_rssi_to_dbm(dev,
 				(int)payload->ack_rssi);
+
+		if (entry_data->key_type == P54_CRYPTO_TKIPMICHAEL) {
+			u8 *iv = (u8 *)(entry_data->align + pad +
+				        entry_data->crypt_offset);
+
+			/* Restore the original TKIP IV. */
+			iv[2] = iv[0];
+			iv[0] = iv[1];
+			iv[1] = (iv[0] | 0x20) & 0x7f;	/* WEPSeed - 8.3.2.2 */
+		}
 		skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
 		ieee80211_tx_status_irqsafe(dev, entry);
 		goto out;
@@ -1383,7 +1408,6 @@
 	hdr->tries = ridx;
 	txhdr->rts_rate_idx = 0;
 	if (info->control.hw_key) {
-		crypt_offset += info->control.hw_key->iv_len;
 		txhdr->key_type = p54_convert_algo(info->control.hw_key->alg);
 		txhdr->key_len = min((u8)16, info->control.hw_key->keylen);
 		memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len);
@@ -1397,6 +1421,8 @@
 		}
 		/* reserve some space for ICV */
 		len += info->control.hw_key->icv_len;
+		memset(skb_put(skb, info->control.hw_key->icv_len), 0,
+		       info->control.hw_key->icv_len);
 	} else {
 		txhdr->key_type = 0;
 		txhdr->key_len = 0;
@@ -1824,7 +1850,7 @@
 
 static int p54_config(struct ieee80211_hw *dev, u32 changed)
 {
-	int ret;
+	int ret = 0;
 	struct p54_common *priv = dev->priv;
 	struct ieee80211_conf *conf = &dev->conf;
 
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index c44a200..6a6a72f 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -56,6 +56,7 @@
 	{USB_DEVICE(0x050d, 0x7050)},	/* Belkin F5D7050 ver 1000 */
 	{USB_DEVICE(0x0572, 0x2000)},	/* Cohiba Proto board */
 	{USB_DEVICE(0x0572, 0x2002)},	/* Cohiba Proto board */
+	{USB_DEVICE(0x06b9, 0x0121)},	/* Thomson SpeedTouch 121g */
 	{USB_DEVICE(0x0707, 0xee13)},   /* SMC 2862W-G version 2 */
 	{USB_DEVICE(0x083a, 0x4521)},   /* Siemens Gigaset USB Adapter 54 version 2 */
 	{USB_DEVICE(0x0846, 0x4240)},	/* Netgear WG111 (v2) */
@@ -284,6 +285,7 @@
 	usb_fill_bulk_urb(data_urb, priv->udev,
 			  usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
 			  skb->data, skb->len, p54u_tx_cb, skb);
+	data_urb->transfer_flags |= URB_ZERO_PACKET;
 
 	usb_anchor_urb(data_urb, &priv->submitted);
 	if (usb_submit_urb(data_urb, GFP_ATOMIC)) {
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 30028e2..af6b584 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -38,7 +38,7 @@
 /*
  * Allow hardware encryption to be disabled.
  */
-static int modparam_nohwcrypt = 1;
+static int modparam_nohwcrypt = 0;
 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
 
@@ -376,11 +376,11 @@
 
 		/*
 		 * The driver does not support the IV/EIV generation
-		 * in hardware. However it doesn't support the IV/EIV
-		 * inside the ieee80211 frame either, but requires it
-		 * to be provided seperately for the descriptor.
-		 * rt2x00lib will cut the IV/EIV data out of all frames
-		 * given to us by mac80211, but we must tell mac80211
+		 * in hardware. However it demands the data to be provided
+		 * both seperately as well as inside the frame.
+		 * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib
+		 * to ensure rt2x00lib will not strip the data from the
+		 * frame after the copy, now we must tell mac80211
 		 * to generate the IV/EIV data.
 		 */
 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -1181,7 +1181,7 @@
 			   test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
 	rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
 	rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
-	rt2x00_set_field32(&word, TXD_W0_CIPHER, txdesc->cipher);
+	rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
 	rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
 	rt2x00_desc_write(txd, 0, word);
 }
@@ -1334,14 +1334,7 @@
 
 		/* ICV is located at the end of frame */
 
-		/*
-		 * Hardware has stripped IV/EIV data from 802.11 frame during
-		 * decryption. It has provided the data seperately but rt2x00lib
-		 * should decide if it should be reinserted.
-		 */
-		rxdesc->flags |= RX_FLAG_IV_STRIPPED;
-		if (rxdesc->cipher != CIPHER_TKIP)
-			rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+		rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
 		if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
 			rxdesc->flags |= RX_FLAG_DECRYPTED;
 		else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 37ad0d2..aee9cba 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -184,8 +184,8 @@
 	 * Make room for new data, note that we increase both
 	 * headsize and tailsize when required. The tailsize is
 	 * only needed when ICV data needs to be inserted and
-	 * the padding is smaller then the ICV data.
-	 * When alignment requirements is greater then the
+	 * the padding is smaller than the ICV data.
+	 * When alignment requirements is greater than the
 	 * ICV data we must trim the skb to the correct size
 	 * because we need to remove the extra bytes.
 	 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 6d92542..87c0f2c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -807,13 +807,11 @@
 {
 	entry->flags = 0;
 	entry->bitrate = rate->bitrate;
-	entry->hw_value = rt2x00_create_rate_hw_value(index, 0);
-	entry->hw_value_short = entry->hw_value;
+	entry->hw_value =index;
+	entry->hw_value_short = index;
 
-	if (rate->flags & DEV_RATE_SHORT_PREAMBLE) {
+	if (rate->flags & DEV_RATE_SHORT_PREAMBLE)
 		entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE;
-		entry->hw_value_short |= rt2x00_create_rate_hw_value(index, 1);
-	}
 }
 
 static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index 68f4e0fc..a0cd35b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -97,7 +97,7 @@
 
 void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled)
 {
-	if (rt2x00dev->led_radio.type == LED_TYPE_ASSOC)
+	if (rt2x00dev->led_radio.type == LED_TYPE_RADIO)
 		rt2x00led_led_simple(&rt2x00dev->led_radio, enabled);
 }
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 0302432..86cd26f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -52,22 +52,11 @@
 
 extern const struct rt2x00_rate rt2x00_supported_rates[12];
 
-static inline u16 rt2x00_create_rate_hw_value(const u16 index,
-					      const u16 short_preamble)
-{
-	return (short_preamble << 8) | (index & 0xff);
-}
-
 static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value)
 {
 	return &rt2x00_supported_rates[hw_value & 0xff];
 }
 
-static inline int rt2x00_get_rate_preamble(const u16 hw_value)
-{
-	return (hw_value & 0xff00);
-}
-
 /*
  * Radio control handlers.
  */
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index eaec6bd..746a8f3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -313,7 +313,7 @@
 		 * When preamble is enabled we should set the
 		 * preamble bit for the signal.
 		 */
-		if (rt2x00_get_rate_preamble(rate->hw_value))
+		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 			txdesc->signal |= 0x08;
 	}
 }
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 83df312..0b29d76 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -434,11 +434,11 @@
 
 		if (usb_endpoint_is_bulk_in(ep_desc)) {
 			rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
-		} else if (usb_endpoint_is_bulk_out(ep_desc)) {
+		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
+			   (queue != queue_end(rt2x00dev))) {
 			rt2x00usb_assign_endpoint(queue, ep_desc);
+			queue = queue_next(queue);
 
-			if (queue != queue_end(rt2x00dev))
-				queue = queue_next(queue);
 			tx_ep_desc = ep_desc;
 		}
 	}
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index d638a8a..96a8d69 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2321,6 +2321,7 @@
 	/* Linksys */
 	{ USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) },
+	{ USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) },
 	/* MSI */
 	{ USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) },
 	{ USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 5f887fb..387c133 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -897,6 +897,7 @@
 	dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
 		     IEEE80211_HW_RX_INCLUDES_FCS |
 		     IEEE80211_HW_SIGNAL_UNSPEC;
+	dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 	dev->queues = 1;
 	dev->max_signal = 65;
 
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 00ce3ef..6ad6bac 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -213,7 +213,7 @@
 	urb = usb_alloc_urb(0, GFP_ATOMIC);
 	if (!urb) {
 		kfree_skb(skb);
-		return -ENOMEM;
+		return NETDEV_TX_OK;
 	}
 
 	flags = skb->len;
@@ -281,7 +281,7 @@
 	}
 	usb_free_urb(urb);
 
-	return rc;
+	return NETDEV_TX_OK;
 }
 
 static void rtl8187_rx_cb(struct urb *urb)
@@ -1471,6 +1471,7 @@
 	ieee80211_unregister_hw(dev);
 
 	priv = dev->priv;
+	usb_reset_device(priv->udev);
 	usb_put_dev(interface_to_usbdev(intf));
 	ieee80211_free_hw(dev);
 }
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index dd0de3a..7015f24 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -236,7 +236,7 @@
 	unsigned long tx_errors;	/* Planned stuff                */
 	unsigned long rx_dropped;	/* No memory for skb            */
 	unsigned long tx_dropped;	/* When MTU change              */
-	unsigned long rx_over_errors;	/* Frame bigger then STRIP buf. */
+	unsigned long rx_over_errors;	/* Frame bigger than STRIP buf. */
 
 	unsigned long pps_timer;	/* Timer to determine pps       */
 	unsigned long rx_pps_count;	/* Counter to determine pps     */
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 9caa96a..a611ad8 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -287,7 +287,7 @@
  * @skb - a sk-buffer
  * @flags: extra flags to set in the TX status info
  * @ackssi: ACK signal strength
- * @success - True for successfull transmission of the frame
+ * @success - True for successful transmission of the frame
  *
  * This information calls ieee80211_tx_status_irqsafe() if required by the
  * control information. It copies the control information into the status
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 761635b..cd6184e 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1105,6 +1105,16 @@
 	gnttab_free_grant_references(np->gref_rx_head);
 }
 
+static const struct net_device_ops xennet_netdev_ops = {
+	.ndo_open            = xennet_open,
+	.ndo_uninit          = xennet_uninit,
+	.ndo_stop            = xennet_close,
+	.ndo_start_xmit      = xennet_start_xmit,
+	.ndo_change_mtu	     = xennet_change_mtu,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_validate_addr   = eth_validate_addr,
+};
+
 static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
 {
 	int i, err;
@@ -1161,12 +1171,9 @@
 		goto exit_free_tx;
 	}
 
-	netdev->open            = xennet_open;
-	netdev->hard_start_xmit = xennet_start_xmit;
-	netdev->stop            = xennet_close;
+	netdev->netdev_ops	= &xennet_netdev_ops;
+
 	netif_napi_add(netdev, &np->napi, xennet_poll, 64);
-	netdev->uninit          = xennet_uninit;
-	netdev->change_mtu	= xennet_change_mtu;
 	netdev->features        = NETIF_F_IP_CSUM;
 
 	SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index cf97129..2f1645d 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -362,6 +362,7 @@
 	.ndo_set_multicast_list = set_rx_mode,
 	.ndo_change_mtu		= eth_change_mtu,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_do_ioctl 		= netdev_ioctl,
 	.ndo_tx_timeout 	= yellowfin_tx_timeout,
 };
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index affd904..37c84e3 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -147,6 +147,7 @@
 	.ndo_get_stats		= ei_get_stats,
 	.ndo_set_multicast_list = ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ei_poll,
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index e1b0ad6..fa65a2b 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -66,4 +66,23 @@
 }
 EXPORT_SYMBOL(of_register_i2c_devices);
 
+static int of_dev_node_match(struct device *dev, void *data)
+{
+        return dev_archdata_get_node(&dev->archdata) == data;
+}
+
+/* must call put_device() when done with returned i2c_client device */
+struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+	struct device *dev;
+
+	dev = bus_find_device(&i2c_bus_type, NULL, node,
+					 of_dev_node_match);
+	if (!dev)
+		return NULL;
+
+	return to_i2c_client(dev);
+}
+EXPORT_SYMBOL(of_find_i2c_device_by_node);
+
 MODULE_LICENSE("GPL");
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 65e8294..9da5a4b 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -1,11 +1,12 @@
 /**
  * @file buffer_sync.c
  *
- * @remark Copyright 2002 OProfile authors
+ * @remark Copyright 2002-2009 OProfile authors
  * @remark Read the file COPYING
  *
  * @author John Levon <levon@movementarian.org>
  * @author Barry Kasindorf
+ * @author Robert Richter <robert.richter@amd.com>
  *
  * This is the core of the buffer management. Each
  * CPU buffer is processed and entered into the
@@ -315,88 +316,73 @@
 	add_event_entry(TRACE_BEGIN_CODE);
 }
 
-#ifdef CONFIG_OPROFILE_IBS
-
-#define IBS_FETCH_CODE_SIZE	2
-#define IBS_OP_CODE_SIZE	5
-
-/*
- * Add IBS fetch and op entries to event buffer
- */
-static void add_ibs_begin(int cpu, int code, struct mm_struct *mm)
+static void add_data(struct op_entry *entry, struct mm_struct *mm)
 {
-	unsigned long rip;
-	int i, count;
-	unsigned long ibs_cookie = 0;
+	unsigned long code, pc, val;
+	unsigned long cookie;
 	off_t offset;
-	struct op_sample *sample;
 
-	sample = cpu_buffer_read_entry(cpu);
-	if (!sample)
-		goto Error;
-	rip = sample->eip;
-
-#ifdef __LP64__
-	rip += sample->event << 32;
-#endif
+	if (!op_cpu_buffer_get_data(entry, &code))
+		return;
+	if (!op_cpu_buffer_get_data(entry, &pc))
+		return;
+	if (!op_cpu_buffer_get_size(entry))
+		return;
 
 	if (mm) {
-		ibs_cookie = lookup_dcookie(mm, rip, &offset);
+		cookie = lookup_dcookie(mm, pc, &offset);
 
-		if (ibs_cookie == NO_COOKIE)
-			offset = rip;
-		if (ibs_cookie == INVALID_COOKIE) {
+		if (cookie == NO_COOKIE)
+			offset = pc;
+		if (cookie == INVALID_COOKIE) {
 			atomic_inc(&oprofile_stats.sample_lost_no_mapping);
-			offset = rip;
+			offset = pc;
 		}
-		if (ibs_cookie != last_cookie) {
-			add_cookie_switch(ibs_cookie);
-			last_cookie = ibs_cookie;
+		if (cookie != last_cookie) {
+			add_cookie_switch(cookie);
+			last_cookie = cookie;
 		}
 	} else
-		offset = rip;
+		offset = pc;
 
 	add_event_entry(ESCAPE_CODE);
 	add_event_entry(code);
 	add_event_entry(offset);	/* Offset from Dcookie */
 
-	/* we send the Dcookie offset, but send the raw Linear Add also*/
-	add_event_entry(sample->eip);
-	add_event_entry(sample->event);
-
-	if (code == IBS_FETCH_CODE)
-		count = IBS_FETCH_CODE_SIZE;	/*IBS FETCH is 2 int64s*/
-	else
-		count = IBS_OP_CODE_SIZE;	/*IBS OP is 5 int64s*/
-
-	for (i = 0; i < count; i++) {
-		sample = cpu_buffer_read_entry(cpu);
-		if (!sample)
-			goto Error;
-		add_event_entry(sample->eip);
-		add_event_entry(sample->event);
-	}
-
-	return;
-
-Error:
-	return;
+	while (op_cpu_buffer_get_data(entry, &val))
+		add_event_entry(val);
 }
 
-#endif
-
-static void add_sample_entry(unsigned long offset, unsigned long event)
+static inline void add_sample_entry(unsigned long offset, unsigned long event)
 {
 	add_event_entry(offset);
 	add_event_entry(event);
 }
 
 
-static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
+/*
+ * Add a sample to the global event buffer. If possible the
+ * sample is converted into a persistent dentry/offset pair
+ * for later lookup from userspace. Return 0 on failure.
+ */
+static int
+add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
 {
 	unsigned long cookie;
 	off_t offset;
 
+	if (in_kernel) {
+		add_sample_entry(s->eip, s->event);
+		return 1;
+	}
+
+	/* add userspace sample */
+
+	if (!mm) {
+		atomic_inc(&oprofile_stats.sample_lost_no_mm);
+		return 0;
+	}
+
 	cookie = lookup_dcookie(mm, s->eip, &offset);
 
 	if (cookie == INVALID_COOKIE) {
@@ -415,25 +401,6 @@
 }
 
 
-/* Add a sample to the global event buffer. If possible the
- * sample is converted into a persistent dentry/offset pair
- * for later lookup from userspace.
- */
-static int
-add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
-{
-	if (in_kernel) {
-		add_sample_entry(s->eip, s->event);
-		return 1;
-	} else if (mm) {
-		return add_us_sample(mm, s);
-	} else {
-		atomic_inc(&oprofile_stats.sample_lost_no_mm);
-	}
-	return 0;
-}
-
-
 static void release_mm(struct mm_struct *mm)
 {
 	if (!mm)
@@ -526,66 +493,69 @@
 {
 	struct mm_struct *mm = NULL;
 	struct mm_struct *oldmm;
+	unsigned long val;
 	struct task_struct *new;
 	unsigned long cookie = 0;
 	int in_kernel = 1;
 	sync_buffer_state state = sb_buffer_start;
 	unsigned int i;
 	unsigned long available;
+	unsigned long flags;
+	struct op_entry entry;
+	struct op_sample *sample;
 
 	mutex_lock(&buffer_mutex);
 
 	add_cpu_switch(cpu);
 
-	cpu_buffer_reset(cpu);
-	available = cpu_buffer_entries(cpu);
+	op_cpu_buffer_reset(cpu);
+	available = op_cpu_buffer_entries(cpu);
 
 	for (i = 0; i < available; ++i) {
-		struct op_sample *s = cpu_buffer_read_entry(cpu);
-		if (!s)
+		sample = op_cpu_buffer_read_entry(&entry, cpu);
+		if (!sample)
 			break;
 
-		if (is_code(s->eip)) {
-			switch (s->event) {
-			case 0:
-			case CPU_IS_KERNEL:
-				/* kernel/userspace switch */
-				in_kernel = s->event;
-				if (state == sb_buffer_start)
-					state = sb_sample_start;
-				add_kernel_ctx_switch(s->event);
-				break;
-			case CPU_TRACE_BEGIN:
+		if (is_code(sample->eip)) {
+			flags = sample->event;
+			if (flags & TRACE_BEGIN) {
 				state = sb_bt_start;
 				add_trace_begin();
-				break;
-#ifdef CONFIG_OPROFILE_IBS
-			case IBS_FETCH_BEGIN:
-				state = sb_bt_start;
-				add_ibs_begin(cpu, IBS_FETCH_CODE, mm);
-				break;
-			case IBS_OP_BEGIN:
-				state = sb_bt_start;
-				add_ibs_begin(cpu, IBS_OP_CODE, mm);
-				break;
-#endif
-			default:
+			}
+			if (flags & KERNEL_CTX_SWITCH) {
+				/* kernel/userspace switch */
+				in_kernel = flags & IS_KERNEL;
+				if (state == sb_buffer_start)
+					state = sb_sample_start;
+				add_kernel_ctx_switch(flags & IS_KERNEL);
+			}
+			if (flags & USER_CTX_SWITCH
+			    && op_cpu_buffer_get_data(&entry, &val)) {
 				/* userspace context switch */
+				new = (struct task_struct *)val;
 				oldmm = mm;
-				new = (struct task_struct *)s->event;
 				release_mm(oldmm);
 				mm = take_tasks_mm(new);
 				if (mm != oldmm)
 					cookie = get_exec_dcookie(mm);
 				add_user_ctx_switch(new, cookie);
-				break;
 			}
-		} else if (state >= sb_bt_start &&
-			   !add_sample(mm, s, in_kernel)) {
-			if (state == sb_bt_start) {
-				state = sb_bt_ignore;
-				atomic_inc(&oprofile_stats.bt_lost_no_mapping);
-			}
+			if (op_cpu_buffer_get_size(&entry))
+				add_data(&entry, mm);
+			continue;
+		}
+
+		if (state < sb_bt_start)
+			/* ignore sample */
+			continue;
+
+		if (add_sample(mm, sample, in_kernel))
+			continue;
+
+		/* ignore backtraces if failed to add a sample */
+		if (state == sb_bt_start) {
+			state = sb_bt_ignore;
+			atomic_inc(&oprofile_stats.bt_lost_no_mapping);
 		}
 	}
 	release_mm(mm);
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 6109096..2e03b6d 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -1,11 +1,12 @@
 /**
  * @file cpu_buffer.c
  *
- * @remark Copyright 2002 OProfile authors
+ * @remark Copyright 2002-2009 OProfile authors
  * @remark Read the file COPYING
  *
  * @author John Levon <levon@movementarian.org>
  * @author Barry Kasindorf <barry.kasindorf@amd.com>
+ * @author Robert Richter <robert.richter@amd.com>
  *
  * Each CPU has a local buffer that stores PC value/event
  * pairs. We also log context switches when we notice them.
@@ -45,8 +46,8 @@
  * can be changed to a single buffer solution when the ring buffer
  * access is implemented as non-locking atomic code.
  */
-struct ring_buffer *op_ring_buffer_read;
-struct ring_buffer *op_ring_buffer_write;
+static struct ring_buffer *op_ring_buffer_read;
+static struct ring_buffer *op_ring_buffer_write;
 DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
 
 static void wq_sync_buffer(struct work_struct *work);
@@ -54,6 +55,19 @@
 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
 static int work_enabled;
 
+unsigned long oprofile_get_cpu_buffer_size(void)
+{
+	return oprofile_cpu_buffer_size;
+}
+
+void oprofile_cpu_buffer_inc_smpl_lost(void)
+{
+	struct oprofile_cpu_buffer *cpu_buf
+		= &__get_cpu_var(cpu_buffer);
+
+	cpu_buf->sample_lost_overflow++;
+}
+
 void free_cpu_buffers(void)
 {
 	if (op_ring_buffer_read)
@@ -64,24 +78,11 @@
 	op_ring_buffer_write = NULL;
 }
 
-unsigned long oprofile_get_cpu_buffer_size(void)
-{
-	return fs_cpu_buffer_size;
-}
-
-void oprofile_cpu_buffer_inc_smpl_lost(void)
-{
-	struct oprofile_cpu_buffer *cpu_buf
-		= &__get_cpu_var(cpu_buffer);
-
-	cpu_buf->sample_lost_overflow++;
-}
-
 int alloc_cpu_buffers(void)
 {
 	int i;
 
-	unsigned long buffer_size = fs_cpu_buffer_size;
+	unsigned long buffer_size = oprofile_cpu_buffer_size;
 
 	op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
 	if (!op_ring_buffer_read)
@@ -97,8 +98,6 @@
 		b->last_is_kernel = -1;
 		b->tracing = 0;
 		b->buffer_size = buffer_size;
-		b->tail_pos = 0;
-		b->head_pos = 0;
 		b->sample_received = 0;
 		b->sample_lost_overflow = 0;
 		b->backtrace_aborted = 0;
@@ -145,47 +144,156 @@
 	flush_scheduled_work();
 }
 
-static inline int
-add_sample(struct oprofile_cpu_buffer *cpu_buf,
-	   unsigned long pc, unsigned long event)
+/*
+ * This function prepares the cpu buffer to write a sample.
+ *
+ * Struct op_entry is used during operations on the ring buffer while
+ * struct op_sample contains the data that is stored in the ring
+ * buffer. Struct entry can be uninitialized. The function reserves a
+ * data array that is specified by size. Use
+ * op_cpu_buffer_write_commit() after preparing the sample. In case of
+ * errors a null pointer is returned, otherwise the pointer to the
+ * sample.
+ *
+ */
+struct op_sample
+*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
+{
+	entry->event = ring_buffer_lock_reserve
+		(op_ring_buffer_write, sizeof(struct op_sample) +
+		 size * sizeof(entry->sample->data[0]), &entry->irq_flags);
+	if (entry->event)
+		entry->sample = ring_buffer_event_data(entry->event);
+	else
+		entry->sample = NULL;
+
+	if (!entry->sample)
+		return NULL;
+
+	entry->size = size;
+	entry->data = entry->sample->data;
+
+	return entry->sample;
+}
+
+int op_cpu_buffer_write_commit(struct op_entry *entry)
+{
+	return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
+					 entry->irq_flags);
+}
+
+struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
+{
+	struct ring_buffer_event *e;
+	e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
+	if (e)
+		goto event;
+	if (ring_buffer_swap_cpu(op_ring_buffer_read,
+				 op_ring_buffer_write,
+				 cpu))
+		return NULL;
+	e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
+	if (e)
+		goto event;
+	return NULL;
+
+event:
+	entry->event = e;
+	entry->sample = ring_buffer_event_data(e);
+	entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
+		/ sizeof(entry->sample->data[0]);
+	entry->data = entry->sample->data;
+	return entry->sample;
+}
+
+unsigned long op_cpu_buffer_entries(int cpu)
+{
+	return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
+		+ ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
+}
+
+static int
+op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
+	    int is_kernel, struct task_struct *task)
 {
 	struct op_entry entry;
-	int ret;
+	struct op_sample *sample;
+	unsigned long flags;
+	int size;
 
-	ret = cpu_buffer_write_entry(&entry);
-	if (ret)
-		return ret;
+	flags = 0;
 
-	entry.sample->eip = pc;
-	entry.sample->event = event;
+	if (backtrace)
+		flags |= TRACE_BEGIN;
 
-	ret = cpu_buffer_write_commit(&entry);
-	if (ret)
-		return ret;
+	/* notice a switch from user->kernel or vice versa */
+	is_kernel = !!is_kernel;
+	if (cpu_buf->last_is_kernel != is_kernel) {
+		cpu_buf->last_is_kernel = is_kernel;
+		flags |= KERNEL_CTX_SWITCH;
+		if (is_kernel)
+			flags |= IS_KERNEL;
+	}
+
+	/* notice a task switch */
+	if (cpu_buf->last_task != task) {
+		cpu_buf->last_task = task;
+		flags |= USER_CTX_SWITCH;
+	}
+
+	if (!flags)
+		/* nothing to do */
+		return 0;
+
+	if (flags & USER_CTX_SWITCH)
+		size = 1;
+	else
+		size = 0;
+
+	sample = op_cpu_buffer_write_reserve(&entry, size);
+	if (!sample)
+		return -ENOMEM;
+
+	sample->eip = ESCAPE_CODE;
+	sample->event = flags;
+
+	if (size)
+		op_cpu_buffer_add_data(&entry, (unsigned long)task);
+
+	op_cpu_buffer_write_commit(&entry);
 
 	return 0;
 }
 
 static inline int
-add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
+op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
+	      unsigned long pc, unsigned long event)
 {
-	return add_sample(buffer, ESCAPE_CODE, value);
+	struct op_entry entry;
+	struct op_sample *sample;
+
+	sample = op_cpu_buffer_write_reserve(&entry, 0);
+	if (!sample)
+		return -ENOMEM;
+
+	sample->eip = pc;
+	sample->event = event;
+
+	return op_cpu_buffer_write_commit(&entry);
 }
 
-/* This must be safe from any context. It's safe writing here
- * because of the head/tail separation of the writer and reader
- * of the CPU buffer.
+/*
+ * This must be safe from any context.
  *
  * is_kernel is needed because on some architectures you cannot
  * tell if you are in kernel or user space simply by looking at
  * pc. We tag this in the buffer by generating kernel enter/exit
  * events whenever is_kernel changes
  */
-static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
-		      int is_kernel, unsigned long event)
+static int
+log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
+	   unsigned long backtrace, int is_kernel, unsigned long event)
 {
-	struct task_struct *task;
-
 	cpu_buf->sample_received++;
 
 	if (pc == ESCAPE_CODE) {
@@ -193,25 +301,10 @@
 		return 0;
 	}
 
-	is_kernel = !!is_kernel;
+	if (op_add_code(cpu_buf, backtrace, is_kernel, current))
+		goto fail;
 
-	task = current;
-
-	/* notice a switch from user->kernel or vice versa */
-	if (cpu_buf->last_is_kernel != is_kernel) {
-		cpu_buf->last_is_kernel = is_kernel;
-		if (add_code(cpu_buf, is_kernel))
-			goto fail;
-	}
-
-	/* notice a task switch */
-	if (cpu_buf->last_task != task) {
-		cpu_buf->last_task = task;
-		if (add_code(cpu_buf, (unsigned long)task))
-			goto fail;
-	}
-
-	if (add_sample(cpu_buf, pc, event))
+	if (op_add_sample(cpu_buf, pc, event))
 		goto fail;
 
 	return 1;
@@ -221,109 +314,102 @@
 	return 0;
 }
 
-static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
+static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
 {
-	add_code(cpu_buf, CPU_TRACE_BEGIN);
 	cpu_buf->tracing = 1;
-	return 1;
 }
 
-static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
+static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
 {
 	cpu_buf->tracing = 0;
 }
 
-void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
-				unsigned long event, int is_kernel)
+static inline void
+__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
+			  unsigned long event, int is_kernel)
 {
 	struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
-
-	if (!backtrace_depth) {
-		log_sample(cpu_buf, pc, is_kernel, event);
-		return;
-	}
-
-	if (!oprofile_begin_trace(cpu_buf))
-		return;
+	unsigned long backtrace = oprofile_backtrace_depth;
 
 	/*
 	 * if log_sample() fail we can't backtrace since we lost the
 	 * source of this event
 	 */
-	if (log_sample(cpu_buf, pc, is_kernel, event))
-		oprofile_ops.backtrace(regs, backtrace_depth);
+	if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
+		/* failed */
+		return;
+
+	if (!backtrace)
+		return;
+
+	oprofile_begin_trace(cpu_buf);
+	oprofile_ops.backtrace(regs, backtrace);
 	oprofile_end_trace(cpu_buf);
 }
 
+void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
+			     unsigned long event, int is_kernel)
+{
+	__oprofile_add_ext_sample(pc, regs, event, is_kernel);
+}
+
 void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
 {
 	int is_kernel = !user_mode(regs);
 	unsigned long pc = profile_pc(regs);
 
-	oprofile_add_ext_sample(pc, regs, event, is_kernel);
+	__oprofile_add_ext_sample(pc, regs, event, is_kernel);
 }
 
-#ifdef CONFIG_OPROFILE_IBS
-
-#define MAX_IBS_SAMPLE_SIZE 14
-
-void oprofile_add_ibs_sample(struct pt_regs * const regs,
-			     unsigned int * const ibs_sample, int ibs_code)
+/*
+ * Add samples with data to the ring buffer.
+ *
+ * Use oprofile_add_data(&entry, val) to add data and
+ * oprofile_write_commit(&entry) to commit the sample.
+ */
+void
+oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
+		       unsigned long pc, int code, int size)
 {
+	struct op_sample *sample;
 	int is_kernel = !user_mode(regs);
 	struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
-	struct task_struct *task;
-	int fail = 0;
 
 	cpu_buf->sample_received++;
 
-	/* notice a switch from user->kernel or vice versa */
-	if (cpu_buf->last_is_kernel != is_kernel) {
-		if (add_code(cpu_buf, is_kernel))
-			goto fail;
-		cpu_buf->last_is_kernel = is_kernel;
-	}
-
-	/* notice a task switch */
-	if (!is_kernel) {
-		task = current;
-		if (cpu_buf->last_task != task) {
-			if (add_code(cpu_buf, (unsigned long)task))
-				goto fail;
-			cpu_buf->last_task = task;
-		}
-	}
-
-	fail = fail || add_code(cpu_buf, ibs_code);
-	fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
-	fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
-	fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
-
-	if (ibs_code == IBS_OP_BEGIN) {
-		fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
-		fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
-		fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
-	}
-
-	if (fail)
+	/* no backtraces for samples with data */
+	if (op_add_code(cpu_buf, 0, is_kernel, current))
 		goto fail;
 
-	if (backtrace_depth)
-		oprofile_ops.backtrace(regs, backtrace_depth);
+	sample = op_cpu_buffer_write_reserve(entry, size + 2);
+	if (!sample)
+		goto fail;
+	sample->eip = ESCAPE_CODE;
+	sample->event = 0;		/* no flags */
+
+	op_cpu_buffer_add_data(entry, code);
+	op_cpu_buffer_add_data(entry, pc);
 
 	return;
 
 fail:
 	cpu_buf->sample_lost_overflow++;
-	return;
 }
 
-#endif
+int oprofile_add_data(struct op_entry *entry, unsigned long val)
+{
+	return op_cpu_buffer_add_data(entry, val);
+}
+
+int oprofile_write_commit(struct op_entry *entry)
+{
+	return op_cpu_buffer_write_commit(entry);
+}
 
 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
 {
 	struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
-	log_sample(cpu_buf, pc, is_kernel, event);
+	log_sample(cpu_buf, pc, 0, is_kernel, event);
 }
 
 void oprofile_add_trace(unsigned long pc)
@@ -340,7 +426,7 @@
 	if (pc == ESCAPE_CODE)
 		goto fail;
 
-	if (add_sample(cpu_buf, pc, 0))
+	if (op_add_sample(cpu_buf, pc, 0))
 		goto fail;
 
 	return;
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index aacb0f0..63f81c4 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -1,10 +1,11 @@
 /**
  * @file cpu_buffer.h
  *
- * @remark Copyright 2002 OProfile authors
+ * @remark Copyright 2002-2009 OProfile authors
  * @remark Read the file COPYING
  *
  * @author John Levon <levon@movementarian.org>
+ * @author Robert Richter <robert.richter@amd.com>
  */
 
 #ifndef OPROFILE_CPU_BUFFER_H
@@ -31,17 +32,12 @@
 struct op_sample {
 	unsigned long eip;
 	unsigned long event;
+	unsigned long data[0];
 };
 
-struct op_entry {
-	struct ring_buffer_event *event;
-	struct op_sample *sample;
-	unsigned long irq_flags;
-};
+struct op_entry;
 
 struct oprofile_cpu_buffer {
-	volatile unsigned long head_pos;
-	volatile unsigned long tail_pos;
 	unsigned long buffer_size;
 	struct task_struct *last_task;
 	int last_is_kernel;
@@ -54,8 +50,6 @@
 	struct delayed_work work;
 };
 
-extern struct ring_buffer *op_ring_buffer_read;
-extern struct ring_buffer *op_ring_buffer_write;
 DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
 
 /*
@@ -64,7 +58,7 @@
  * reset these to invalid values; the next sample collected will
  * populate the buffer with proper values to initialize the buffer
  */
-static inline void cpu_buffer_reset(int cpu)
+static inline void op_cpu_buffer_reset(int cpu)
 {
 	struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
 
@@ -72,55 +66,48 @@
 	cpu_buf->last_task = NULL;
 }
 
-static inline int cpu_buffer_write_entry(struct op_entry *entry)
+struct op_sample
+*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
+int op_cpu_buffer_write_commit(struct op_entry *entry);
+struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
+unsigned long op_cpu_buffer_entries(int cpu);
+
+/* returns the remaining free size of data in the entry */
+static inline
+int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
 {
-	entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
-						sizeof(struct op_sample),
-						&entry->irq_flags);
-	if (entry->event)
-		entry->sample = ring_buffer_event_data(entry->event);
-	else
-		entry->sample = NULL;
-
-	if (!entry->sample)
-		return -ENOMEM;
-
-	return 0;
+	if (!entry->size)
+		return 0;
+	*entry->data = val;
+	entry->size--;
+	entry->data++;
+	return entry->size;
 }
 
-static inline int cpu_buffer_write_commit(struct op_entry *entry)
+/* returns the size of data in the entry */
+static inline
+int op_cpu_buffer_get_size(struct op_entry *entry)
 {
-	return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
-					 entry->irq_flags);
+	return entry->size;
 }
 
-static inline struct op_sample *cpu_buffer_read_entry(int cpu)
+/* returns 0 if empty or the size of data including the current value */
+static inline
+int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
 {
-	struct ring_buffer_event *e;
-	e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
-	if (e)
-		return ring_buffer_event_data(e);
-	if (ring_buffer_swap_cpu(op_ring_buffer_read,
-				 op_ring_buffer_write,
-				 cpu))
-		return NULL;
-	e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
-	if (e)
-		return ring_buffer_event_data(e);
-	return NULL;
+	int size = entry->size;
+	if (!size)
+		return 0;
+	*val = *entry->data;
+	entry->size--;
+	entry->data++;
+	return size;
 }
 
-/* "acquire" as many cpu buffer slots as we can */
-static inline unsigned long cpu_buffer_entries(int cpu)
-{
-	return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
-		+ ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
-}
-
-/* transient events for the CPU buffer -> event buffer */
-#define CPU_IS_KERNEL 1
-#define CPU_TRACE_BEGIN 2
-#define IBS_FETCH_BEGIN 3
-#define IBS_OP_BEGIN    4
+/* extra data flags */
+#define KERNEL_CTX_SWITCH	(1UL << 0)
+#define IS_KERNEL		(1UL << 1)
+#define TRACE_BEGIN		(1UL << 2)
+#define USER_CTX_SWITCH		(1UL << 3)
 
 #endif /* OPROFILE_CPU_BUFFER_H */
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 191a320..2b7ae36 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -73,8 +73,8 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&oprofilefs_lock, flags);
-	buffer_size = fs_buffer_size;
-	buffer_watershed = fs_buffer_watershed;
+	buffer_size = oprofile_buffer_size;
+	buffer_watershed = oprofile_buffer_watershed;
 	spin_unlock_irqrestore(&oprofilefs_lock, flags);
 
 	if (buffer_watershed >= buffer_size)
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index cd37590..3cffce9 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -23,7 +23,7 @@
 struct oprofile_operations oprofile_ops;
 
 unsigned long oprofile_started;
-unsigned long backtrace_depth;
+unsigned long oprofile_backtrace_depth;
 static unsigned long is_setup;
 static DEFINE_MUTEX(start_mutex);
 
@@ -172,7 +172,7 @@
 		goto out;
 	}
 
-	backtrace_depth = val;
+	oprofile_backtrace_depth = val;
 
 out:
 	mutex_unlock(&start_mutex);
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 5df0c21..c288d3c 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -21,12 +21,12 @@
 
 struct oprofile_operations;
 
-extern unsigned long fs_buffer_size;
-extern unsigned long fs_cpu_buffer_size;
-extern unsigned long fs_buffer_watershed;
+extern unsigned long oprofile_buffer_size;
+extern unsigned long oprofile_cpu_buffer_size;
+extern unsigned long oprofile_buffer_watershed;
 extern struct oprofile_operations oprofile_ops;
 extern unsigned long oprofile_started;
-extern unsigned long backtrace_depth;
+extern unsigned long oprofile_backtrace_depth;
 
 struct super_block;
 struct dentry;
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index d8201998..5d36ffc 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -14,17 +14,18 @@
 #include "oprofile_stats.h"
 #include "oprof.h"
 
-#define FS_BUFFER_SIZE_DEFAULT		131072
-#define FS_CPU_BUFFER_SIZE_DEFAULT	8192
-#define FS_BUFFER_WATERSHED_DEFAULT	32768	/* FIXME: tune */
+#define BUFFER_SIZE_DEFAULT		131072
+#define CPU_BUFFER_SIZE_DEFAULT		8192
+#define BUFFER_WATERSHED_DEFAULT	32768	/* FIXME: tune */
 
-unsigned long fs_buffer_size;
-unsigned long fs_cpu_buffer_size;
-unsigned long fs_buffer_watershed;
+unsigned long oprofile_buffer_size;
+unsigned long oprofile_cpu_buffer_size;
+unsigned long oprofile_buffer_watershed;
 
 static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
-	return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
+	return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count,
+					offset);
 }
 
 
@@ -125,16 +126,16 @@
 void oprofile_create_files(struct super_block *sb, struct dentry *root)
 {
 	/* reinitialize default values */
-	fs_buffer_size =	FS_BUFFER_SIZE_DEFAULT;
-	fs_cpu_buffer_size =	FS_CPU_BUFFER_SIZE_DEFAULT;
-	fs_buffer_watershed =	FS_BUFFER_WATERSHED_DEFAULT;
+	oprofile_buffer_size =		BUFFER_SIZE_DEFAULT;
+	oprofile_cpu_buffer_size =	CPU_BUFFER_SIZE_DEFAULT;
+	oprofile_buffer_watershed =	BUFFER_WATERSHED_DEFAULT;
 
 	oprofilefs_create_file(sb, root, "enable", &enable_fops);
 	oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
 	oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
-	oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
-	oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
-	oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
+	oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size);
+	oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed);
+	oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
 	oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
 	oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
 	oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
diff --git a/drivers/parisc/asp.c b/drivers/parisc/asp.c
index 8213691..79311335 100644
--- a/drivers/parisc/asp.c
+++ b/drivers/parisc/asp.c
@@ -71,8 +71,7 @@
  */
 #define ASP_INTERRUPT_ADDR 0xf0800000
 
-int __init
-asp_init_chip(struct parisc_device *dev)
+static int __init asp_init_chip(struct parisc_device *dev)
 {
 	struct gsc_irq gsc_irq;
 	int ret;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index dcc1e99..cd4dd7e 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -555,7 +555,7 @@
  * (Load Coherence Index) instruction.  The 8 bits used for the virtual
  * index are bits 12:19 of the value returned by LCI.
  */ 
-void CCIO_INLINE
+static void CCIO_INLINE
 ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
 		   unsigned long hints)
 {
@@ -1578,8 +1578,6 @@
 
 	ioc_count++;
 
-	parisc_vmerge_boundary = IOVP_SIZE;
-	parisc_vmerge_max_size = BITS_PER_LONG * IOVP_SIZE;
 	parisc_has_iommu();
 	return 0;
 }
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 3bc54b3..d539d9d 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -287,7 +287,7 @@
 DINO_PORT_OUT(w, 16, 2)
 DINO_PORT_OUT(l, 32, 0)
 
-struct pci_port_ops dino_port_ops = {
+static struct pci_port_ops dino_port_ops = {
 	.inb	= dino_in8,
 	.inw	= dino_in16,
 	.inl	= dino_in32,
@@ -547,7 +547,7 @@
 	** The additional "-1" adjusts for skewing the IRQ<->slot.
 	*/
 	dino_cfg_read(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 1, &irq_pin); 
-	dev->irq = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ;
+	dev->irq = pci_swizzle_interrupt_pin(dev, irq_pin) - 1;
 
 	/* Shouldn't really need to do this but it's in case someone tries
 	** to bypass PCI services and look at the card themselves.
@@ -672,7 +672,7 @@
 			
 			dino_cfg_read(dev->bus, dev->devfn, 
 				      PCI_INTERRUPT_PIN, 1, &irq_pin);
-			irq_pin = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ;
+			irq_pin = pci_swizzle_interrupt_pin(dev, irq_pin) - 1;
 			printk(KERN_WARNING "Device %s has undefined IRQ, "
 					"setting to %d\n", pci_name(dev), irq_pin);
 			dino_cfg_write(dev->bus, dev->devfn, 
@@ -690,7 +690,7 @@
 }
 
 
-struct pci_bios_ops dino_bios_ops = {
+static struct pci_bios_ops dino_bios_ops = {
 	.init		= dino_bios_init,
 	.fixup_bus	= dino_fixup_bus
 };
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index 65eee67..1385641 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -29,7 +29,7 @@
 	struct hppb_card *next;
 };
 
-struct hppb_card hppb_card_head = {
+static struct hppb_card hppb_card_head = {
 	.hpa = 0,
 	.next = NULL,
 };
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 9dedbbd..0797659 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -519,8 +519,7 @@
 		**
 		** Advantage is it's really easy to implement.
 		*/
-		intr_pin = ((intr_pin-1)+PCI_SLOT(pcidev->devfn)) % 4;
-		intr_pin++;	/* convert back to INTA-D (1-4) */
+		intr_pin = pci_swizzle_interrupt_pin(pcidev, intr_pin);
 #endif /* PCI_BRIDGE_FUNCS */
 
 		/*
diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c
index bee5100..e65727c 100644
--- a/drivers/parisc/lasi.c
+++ b/drivers/parisc/lasi.c
@@ -107,7 +107,7 @@
 
 #else
 
-void __init lasi_led_init(unsigned long lasi_hpa)
+static void __init lasi_led_init(unsigned long lasi_hpa)
 {
 	unsigned long datareg;
 
@@ -163,8 +163,7 @@
 	gsc_writel(0x02, datareg);
 }
 
-int __init
-lasi_init_chip(struct parisc_device *dev)
+static int __init lasi_init_chip(struct parisc_device *dev)
 {
 	extern void (*chassis_power_off)(void);
 	struct gsc_asic *lasi;
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index a28c894..d8233de 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -824,7 +824,7 @@
 }
 
 
-struct pci_bios_ops lba_bios_ops = {
+static struct pci_bios_ops lba_bios_ops = {
 	.init =		lba_bios_init,
 	.fixup_bus =	lba_fixup_bus,
 };
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index bc73b96..3fac8f8 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -561,7 +561,7 @@
  * IOMMU uses little endian for the pdir.
  */
 
-void SBA_INLINE
+static void SBA_INLINE
 sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
 		  unsigned long hint)
 {
@@ -1874,7 +1874,7 @@
 	{ 0, }
 };
 
-int sba_driver_callback(struct parisc_device *);
+static int sba_driver_callback(struct parisc_device *);
 
 static struct parisc_driver sba_driver = {
 	.name =		MODULE_NAME,
@@ -1887,8 +1887,7 @@
 ** If so, initialize the chip and tell other partners in crime they
 ** have work to do.
 */
-int
-sba_driver_callback(struct parisc_device *dev)
+static int sba_driver_callback(struct parisc_device *dev)
 {
 	struct sba_device *sba_dev;
 	u32 func_class;
@@ -1979,8 +1978,6 @@
 	proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
 #endif
 
-	parisc_vmerge_boundary = IOVP_SIZE;
-	parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG;
 	parisc_has_iommu();
 	return 0;
 }
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 1e93c83..4fa3bb2 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -405,7 +405,6 @@
 	serial_port.type	= PORT_16550A;
 	serial_port.uartclk	= 115200*16;
 	serial_port.fifosize	= 16;
-	spin_lock_init(&serial_port.lock);
 
 	/* serial port #1 */
 	serial_port.iobase	= sio_dev.sp1_base;
diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c
index 892a83b..da9d5ad 100644
--- a/drivers/parisc/wax.c
+++ b/drivers/parisc/wax.c
@@ -68,8 +68,7 @@
 //	gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */
 }
 
-int __init
-wax_init_chip(struct parisc_device *dev)
+static int __init wax_init_chip(struct parisc_device *dev)
 {
 	struct gsc_asic *wax;
 	struct parisc_device *parent;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index e1ca425..2a4501d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -42,6 +42,15 @@
 
 	  When in doubt, say N.
 
+config PCI_STUB
+	tristate "PCI Stub driver"
+	depends on PCI
+	help
+	  Say Y or M here if you want be able to reserve a PCI device
+	  when it is going to be assigned to a guest operating system.
+
+	  When in doubt, say N.
+
 config HT_IRQ
 	bool "Interrupts on hypertransport devices"
 	default y
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index af3bfe2..3d07ce2 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -53,6 +53,8 @@
 
 obj-$(CONFIG_PCI_SYSCALL) += syscall.o
 
+obj-$(CONFIG_PCI_STUB) += pci-stub.o
+
 ifeq ($(CONFIG_PCI_DEBUG),y)
 EXTRA_CFLAGS += -DDEBUG
 endif
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 39bb96b..3814447 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -66,6 +66,39 @@
 EXPORT_SYMBOL(pci_bus_write_config_word);
 EXPORT_SYMBOL(pci_bus_write_config_dword);
 
+
+/**
+ * pci_read_vpd - Read one entry from Vital Product Data
+ * @dev:	pci device struct
+ * @pos:	offset in vpd space
+ * @count:	number of bytes to read
+ * @buf:	pointer to where to store result
+ *
+ */
+ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
+{
+	if (!dev->vpd || !dev->vpd->ops)
+		return -ENODEV;
+	return dev->vpd->ops->read(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_read_vpd);
+
+/**
+ * pci_write_vpd - Write entry to Vital Product Data
+ * @dev:	pci device struct
+ * @pos:	offset in vpd space
+ * @count:	number of bytes to read
+ * @val:	value to write
+ *
+ */
+ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
+{
+	if (!dev->vpd || !dev->vpd->ops)
+		return -ENODEV;
+	return dev->vpd->ops->write(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_write_vpd);
+
 /*
  * The following routines are to prevent the user from accessing PCI config
  * space when it's unsafe to do so.  Some devices require this during BIST and
@@ -133,125 +166,145 @@
 
 struct pci_vpd_pci22 {
 	struct pci_vpd base;
-	spinlock_t lock; /* controls access to hardware and the flags */
-	u8	cap;
+	struct mutex lock;
+	u16	flag;
 	bool	busy;
-	bool	flag; /* value of F bit to wait for */
+	u8	cap;
 };
 
-/* Wait for last operation to complete */
+/*
+ * Wait for last operation to complete.
+ * This code has to spin since there is no other notification from the PCI
+ * hardware. Since the VPD is often implemented by serial attachment to an
+ * EEPROM, it may take many milliseconds to complete.
+ */
 static int pci_vpd_pci22_wait(struct pci_dev *dev)
 {
 	struct pci_vpd_pci22 *vpd =
 		container_of(dev->vpd, struct pci_vpd_pci22, base);
-	u16 flag, status;
-	int wait;
+	unsigned long timeout = jiffies + HZ/20 + 2;
+	u16 status;
 	int ret;
 
 	if (!vpd->busy)
 		return 0;
 
-	flag = vpd->flag ? PCI_VPD_ADDR_F : 0;
-	wait = vpd->flag ? 10 : 1000; /* read: 100 us; write: 10 ms */
 	for (;;) {
-		ret = pci_user_read_config_word(dev,
-						vpd->cap + PCI_VPD_ADDR,
+		ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
 						&status);
-		if (ret < 0)
+		if (ret)
 			return ret;
-		if ((status & PCI_VPD_ADDR_F) == flag) {
+
+		if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
 			vpd->busy = false;
 			return 0;
 		}
-		if (wait-- == 0)
+
+		if (time_after(jiffies, timeout))
 			return -ETIMEDOUT;
-		udelay(10);
+		if (fatal_signal_pending(current))
+			return -EINTR;
+		if (!cond_resched())
+			udelay(10);
 	}
 }
 
-static int pci_vpd_pci22_read(struct pci_dev *dev, int pos, int size,
-			      char *buf)
+static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
+				  void *arg)
 {
 	struct pci_vpd_pci22 *vpd =
 		container_of(dev->vpd, struct pci_vpd_pci22, base);
-	u32 val;
 	int ret;
-	int begin, end, i;
+	loff_t end = pos + count;
+	u8 *buf = arg;
 
-	if (pos < 0 || pos > vpd->base.len || size > vpd->base.len  - pos)
+	if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
 		return -EINVAL;
-	if (size == 0)
-		return 0;
 
-	spin_lock_irq(&vpd->lock);
+	if (mutex_lock_killable(&vpd->lock))
+		return -EINTR;
+
 	ret = pci_vpd_pci22_wait(dev);
 	if (ret < 0)
 		goto out;
-	ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
-					 pos & ~3);
-	if (ret < 0)
-		goto out;
-	vpd->busy = true;
-	vpd->flag = 1;
-	ret = pci_vpd_pci22_wait(dev);
-	if (ret < 0)
-		goto out;
-	ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA,
-					 &val);
-out:
-	spin_unlock_irq(&vpd->lock);
-	if (ret < 0)
-		return ret;
 
-	/* Convert to bytes */
-	begin = pos & 3;
-	end = min(4, begin + size);
-	for (i = 0; i < end; ++i) {
-		if (i >= begin)
-			*buf++ = val;
-		val >>= 8;
+	while (pos < end) {
+		u32 val;
+		unsigned int i, skip;
+
+		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
+						 pos & ~3);
+		if (ret < 0)
+			break;
+		vpd->busy = true;
+		vpd->flag = PCI_VPD_ADDR_F;
+		ret = pci_vpd_pci22_wait(dev);
+		if (ret < 0)
+			break;
+
+		ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
+		if (ret < 0)
+			break;
+
+		skip = pos & 3;
+		for (i = 0;  i < sizeof(u32); i++) {
+			if (i >= skip) {
+				*buf++ = val;
+				if (++pos == end)
+					break;
+			}
+			val >>= 8;
+		}
 	}
-	return end - begin;
+out:
+	mutex_unlock(&vpd->lock);
+	return ret ? ret : count;
 }
 
-static int pci_vpd_pci22_write(struct pci_dev *dev, int pos, int size,
-			       const char *buf)
+static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count,
+				   const void *arg)
 {
 	struct pci_vpd_pci22 *vpd =
 		container_of(dev->vpd, struct pci_vpd_pci22, base);
-	u32 val;
-	int ret;
+	const u8 *buf = arg;
+	loff_t end = pos + count;
+	int ret = 0;
 
-	if (pos < 0 || pos > vpd->base.len || pos & 3 ||
-	    size > vpd->base.len - pos || size < 4)
+	if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
 		return -EINVAL;
 
-	val = (u8) *buf++;
-	val |= ((u8) *buf++) << 8;
-	val |= ((u8) *buf++) << 16;
-	val |= ((u32)(u8) *buf++) << 24;
+	if (mutex_lock_killable(&vpd->lock))
+		return -EINTR;
 
-	spin_lock_irq(&vpd->lock);
 	ret = pci_vpd_pci22_wait(dev);
 	if (ret < 0)
 		goto out;
-	ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA,
-					  val);
-	if (ret < 0)
-		goto out;
-	ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
-					 pos | PCI_VPD_ADDR_F);
-	if (ret < 0)
-		goto out;
-	vpd->busy = true;
-	vpd->flag = 0;
-	ret = pci_vpd_pci22_wait(dev);
+
+	while (pos < end) {
+		u32 val;
+
+		val = *buf++;
+		val |= *buf++ << 8;
+		val |= *buf++ << 16;
+		val |= *buf++ << 24;
+
+		ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
+		if (ret < 0)
+			break;
+		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
+						 pos | PCI_VPD_ADDR_F);
+		if (ret < 0)
+			break;
+
+		vpd->busy = true;
+		vpd->flag = 0;
+		ret = pci_vpd_pci22_wait(dev);
+
+		pos += sizeof(u32);
+	}
 out:
-	spin_unlock_irq(&vpd->lock);
-	if (ret < 0)
-		return ret;
-
-	return 4;
+	mutex_unlock(&vpd->lock);
+	return ret ? ret : count;
 }
 
 static void pci_vpd_pci22_release(struct pci_dev *dev)
@@ -259,7 +312,7 @@
 	kfree(container_of(dev->vpd, struct pci_vpd_pci22, base));
 }
 
-static struct pci_vpd_ops pci_vpd_pci22_ops = {
+static const struct pci_vpd_ops pci_vpd_pci22_ops = {
 	.read = pci_vpd_pci22_read,
 	.write = pci_vpd_pci22_write,
 	.release = pci_vpd_pci22_release,
@@ -279,7 +332,7 @@
 
 	vpd->base.len = PCI_VPD_PCI22_SIZE;
 	vpd->base.ops = &pci_vpd_pci22_ops;
-	spin_lock_init(&vpd->lock);
+	mutex_init(&vpd->lock);
 	vpd->cap = cap;
 	vpd->busy = false;
 	dev->vpd = &vpd->base;
@@ -287,6 +340,29 @@
 }
 
 /**
+ * pci_vpd_truncate - Set available Vital Product Data size
+ * @dev:	pci device struct
+ * @size:	available memory in bytes
+ *
+ * Adjust size of available VPD area.
+ */
+int pci_vpd_truncate(struct pci_dev *dev, size_t size)
+{
+	if (!dev->vpd)
+		return -EINVAL;
+
+	/* limited by the access method */
+	if (size > dev->vpd->len)
+		return -EINVAL;
+
+	dev->vpd->len = size;
+	dev->vpd->attr->size = size;
+
+	return 0;
+}
+EXPORT_SYMBOL(pci_vpd_truncate);
+
+/**
  * pci_block_user_cfg_access - Block userspace PCI config reads/writes
  * @dev:	pci device struct
  *
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 999cc40..52b54f0 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -71,7 +71,7 @@
 }
 
 /**
- * add a single device
+ * pci_bus_add_device - add a single device
  * @dev: device to add
  *
  * This adds a single pci device to the global
@@ -91,6 +91,37 @@
 }
 
 /**
+ * pci_bus_add_child - add a child bus
+ * @bus: bus to add
+ *
+ * This adds sysfs entries for a single bus
+ */
+int pci_bus_add_child(struct pci_bus *bus)
+{
+	int retval;
+
+	if (bus->bridge)
+		bus->dev.parent = bus->bridge;
+
+	retval = device_register(&bus->dev);
+	if (retval)
+		return retval;
+
+	bus->is_added = 1;
+
+	retval = device_create_file(&bus->dev, &dev_attr_cpuaffinity);
+	if (retval)
+		return retval;
+
+	retval = device_create_file(&bus->dev, &dev_attr_cpulistaffinity);
+
+	/* Create legacy_io and legacy_mem files for this bus */
+	pci_create_legacy_files(bus);
+
+	return retval;
+}
+
+/**
  * pci_bus_add_devices - insert newly discovered PCI devices
  * @bus: bus to check for new devices
  *
@@ -105,7 +136,7 @@
 void pci_bus_add_devices(struct pci_bus *bus)
 {
 	struct pci_dev *dev;
-	struct pci_bus *child_bus;
+	struct pci_bus *child;
 	int retval;
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -120,45 +151,29 @@
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		BUG_ON(!dev->is_added);
 
+		child = dev->subordinate;
 		/*
 		 * If there is an unattached subordinate bus, attach
 		 * it and then scan for unattached PCI devices.
 		 */
-		if (dev->subordinate) {
-		       if (list_empty(&dev->subordinate->node)) {
-			       down_write(&pci_bus_sem);
-			       list_add_tail(&dev->subordinate->node,
-					       &dev->bus->children);
-			       up_write(&pci_bus_sem);
-			}
-			pci_bus_add_devices(dev->subordinate);
-
-			/* register the bus with sysfs as the parent is now
-			 * properly registered. */
-			child_bus = dev->subordinate;
-			if (child_bus->is_added)
-				continue;
-			child_bus->dev.parent = child_bus->bridge;
-			retval = device_register(&child_bus->dev);
-			if (retval)
-				dev_err(&dev->dev, "Error registering pci_bus,"
-					" continuing...\n");
-			else {
-				child_bus->is_added = 1;
-				retval = device_create_file(&child_bus->dev,
-							&dev_attr_cpuaffinity);
-			}
-			if (retval)
-				dev_err(&dev->dev, "Error creating cpuaffinity"
-					" file, continuing...\n");
-
-			retval = device_create_file(&child_bus->dev,
-						&dev_attr_cpulistaffinity);
-			if (retval)
-				dev_err(&dev->dev,
-					"Error creating cpulistaffinity"
-					" file, continuing...\n");
+		if (!child)
+			continue;
+		if (list_empty(&child->node)) {
+			down_write(&pci_bus_sem);
+			list_add_tail(&child->node, &dev->bus->children);
+			up_write(&pci_bus_sem);
 		}
+		pci_bus_add_devices(child);
+
+		/*
+		 * register the bus with sysfs as the parent is now
+		 * properly registered.
+		 */
+		if (child->is_added)
+			continue;
+		retval = pci_bus_add_child(child);
+		if (retval)
+			dev_err(&dev->dev, "Error adding bus, continuing\n");
 	}
 }
 
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 9bdbe1a..e31fb91 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -55,6 +55,9 @@
 				pciehp_ctrl.o	\
 				pciehp_pci.o	\
 				pciehp_hpc.o
+ifdef CONFIG_ACPI
+pciehp-objs		+=	pciehp_acpi.o
+endif
 
 shpchp-objs		:=	shpchp_core.o	\
 				shpchp_ctrl.o	\
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index e17ef54..1c11418 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -33,7 +33,6 @@
 #include <linux/pci-acpi.h>
 #include <acpi/acpi.h>
 #include <acpi/acpi_bus.h>
-#include <acpi/actypes.h>
 
 #define MY_NAME	"acpi_pcihp"
 
@@ -501,5 +500,74 @@
 }
 EXPORT_SYMBOL_GPL(acpi_root_bridge);
 
+
+static int is_ejectable(acpi_handle handle)
+{
+	acpi_status status;
+	acpi_handle tmp;
+	unsigned long long removable;
+	status = acpi_get_handle(handle, "_ADR", &tmp);
+	if (ACPI_FAILURE(status))
+		return 0;
+	status = acpi_get_handle(handle, "_EJ0", &tmp);
+	if (ACPI_SUCCESS(status))
+		return 1;
+	status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable);
+	if (ACPI_SUCCESS(status) && removable)
+		return 1;
+	return 0;
+}
+
+/**
+ * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot
+ * @pbus: the PCI bus of the PCI slot corresponding to 'handle'
+ * @handle: ACPI handle to check
+ *
+ * Return 1 if handle is ejectable PCI slot, 0 otherwise.
+ */
+int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle)
+{
+	acpi_handle bridge_handle, parent_handle;
+
+	if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus)))
+		return 0;
+	if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle))))
+		return 0;
+	if (bridge_handle != parent_handle)
+		return 0;
+	return is_ejectable(handle);
+}
+EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable);
+
+static acpi_status
+check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+	int *found = (int *)context;
+	if (is_ejectable(handle)) {
+		*found = 1;
+		return AE_CTRL_TERMINATE;
+	}
+	return AE_OK;
+}
+
+/**
+ * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots
+ * @pbus - PCI bus to scan
+ *
+ * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise.
+ */
+int acpi_pci_detect_ejectable(struct pci_bus *pbus)
+{
+	acpi_handle handle;
+	int found = 0;
+
+	if (!(handle = acpi_pci_get_bridge_handle(pbus)))
+		return 0;
+	acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
+			    check_hotplug, (void *)&found, NULL);
+	return found;
+}
+EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable);
+
 module_param(debug_acpi, bool, 0644);
 MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not");
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 9bcb6cb..4fc168b 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -44,7 +44,7 @@
 	do {							\
 		if (acpiphp_debug)				\
 			printk(KERN_DEBUG "%s: " format,	\
-				MY_NAME , ## arg); 		\
+				MY_NAME , ## arg);		\
 	} while (0)
 #define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
 #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 3affc64..803d9dd 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -46,6 +46,7 @@
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/pci_hotplug.h>
+#include <linux/pci-acpi.h>
 #include <linux/mutex.h>
 
 #include "../pci.h"
@@ -62,61 +63,6 @@
 static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus);
 static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
 
-
-/*
- * initialization & terminatation routines
- */
-
-/**
- * is_ejectable - determine if a slot is ejectable
- * @handle: handle to acpi namespace
- *
- * Ejectable slot should satisfy at least these conditions:
- *
- *  1. has _ADR method
- *  2. has _EJ0 method
- *
- * optionally
- *
- *  1. has _STA method
- *  2. has _PS0 method
- *  3. has _PS3 method
- *  4. ..
- */
-static int is_ejectable(acpi_handle handle)
-{
-	acpi_status status;
-	acpi_handle tmp;
-
-	status = acpi_get_handle(handle, "_ADR", &tmp);
-	if (ACPI_FAILURE(status)) {
-		return 0;
-	}
-
-	status = acpi_get_handle(handle, "_EJ0", &tmp);
-	if (ACPI_FAILURE(status)) {
-		return 0;
-	}
-
-	return 1;
-}
-
-
-/* callback routine to check for the existence of ejectable slots */
-static acpi_status
-is_ejectable_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
-	int *count = (int *)context;
-
-	if (is_ejectable(handle)) {
-		(*count)++;
-		/* only one ejectable slot is enough */
-		return AE_CTRL_TERMINATE;
-	} else {
-		return AE_OK;
-	}
-}
-
 /* callback routine to check for the existence of a pci dock device */
 static acpi_status
 is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv)
@@ -131,9 +77,6 @@
 	}
 }
 
-
-
-
 /*
  * the _DCK method can do funny things... and sometimes not
  * hah-hah funny.
@@ -160,9 +103,9 @@
 
 	if (((buses >> 8) & 0xff) != bus->secondary) {
 		buses = (buses & 0xff000000)
-	     		| ((unsigned int)(bus->primary)     <<  0)
-	     		| ((unsigned int)(bus->secondary)   <<  8)
-	     		| ((unsigned int)(bus->subordinate) << 16);
+			| ((unsigned int)(bus->primary)     <<  0)
+			| ((unsigned int)(bus->secondary)   <<  8)
+			| ((unsigned int)(bus->subordinate) << 16);
 		pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
 	}
 	return NOTIFY_OK;
@@ -184,17 +127,12 @@
 	acpi_status status = AE_OK;
 	unsigned long long adr, sun;
 	int device, function, retval;
+	struct pci_bus *pbus = bridge->pci_bus;
 
-	status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
-
-	if (ACPI_FAILURE(status))
+	if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
 		return AE_OK;
 
-	status = acpi_get_handle(handle, "_EJ0", &tmp);
-
-	if (ACPI_FAILURE(status) && !(is_dock_device(handle)))
-		return AE_OK;
-
+	acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
 	device = (adr >> 16) & 0xffff;
 	function = adr & 0xffff;
 
@@ -205,7 +143,8 @@
 	INIT_LIST_HEAD(&newfunc->sibling);
 	newfunc->handle = handle;
 	newfunc->function = function;
-	if (ACPI_SUCCESS(status))
+
+	if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
 		newfunc->flags = FUNC_HAS_EJ0;
 
 	if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp)))
@@ -256,8 +195,7 @@
 		bridge->nr_slots++;
 
 		dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
-				slot->sun, pci_domain_nr(bridge->pci_bus),
-				bridge->pci_bus->number, slot->device);
+		    slot->sun, pci_domain_nr(pbus), pbus->number, device);
 		retval = acpiphp_register_hotplug_slot(slot);
 		if (retval) {
 			if (retval == -EBUSY)
@@ -274,8 +212,7 @@
 	list_add_tail(&newfunc->sibling, &slot->funcs);
 
 	/* associate corresponding pci_dev */
-	newfunc->pci_dev = pci_get_slot(bridge->pci_bus,
-					 PCI_DEVFN(device, function));
+	newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function));
 	if (newfunc->pci_dev) {
 		slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
 	}
@@ -324,27 +261,17 @@
 
 
 /* see if it's worth looking at this bridge */
-static int detect_ejectable_slots(acpi_handle *bridge_handle)
+static int detect_ejectable_slots(struct pci_bus *pbus)
 {
-	acpi_status status;
-	int count;
-
-	count = 0;
-
-	/* only check slots defined directly below bridge object */
-	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1,
-				     is_ejectable_slot, (void *)&count, NULL);
-
-	/*
-	 * we also need to add this bridge if there is a dock bridge or
-	 * other pci device on a dock station (removable)
-	 */
-	if (!count)
-		status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle,
-				(u32)1, is_pci_dock_device, (void *)&count,
-				NULL);
-
-	return count;
+	int found = acpi_pci_detect_ejectable(pbus);
+	if (!found) {
+		acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus);
+		if (!bridge_handle)
+			return 0;
+		acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1,
+				    is_pci_dock_device, (void *)&found, NULL);
+	}
+	return found;
 }
 
 
@@ -554,7 +481,7 @@
 		goto out;
 
 	/* check if this bridge has ejectable slots */
-	if ((detect_ejectable_slots(handle) > 0)) {
+	if ((detect_ejectable_slots(dev->subordinate) > 0)) {
 		dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev));
 		add_p2p_bridge(handle, dev);
 	}
@@ -615,7 +542,7 @@
 	}
 
 	/* check if this bridge has ejectable slots */
-	if (detect_ejectable_slots(handle) > 0) {
+	if (detect_ejectable_slots(pci_bus) > 0) {
 		dbg("found PCI host-bus bridge with hot-pluggable slots\n");
 		add_host_bridge(handle, pci_bus);
 	}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 881fdd2..5befa7e 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -271,7 +271,7 @@
 		dbg("%s: generationg bus event\n", __func__);
 		acpi_bus_generate_proc_event(note->device, note->event, detail);
 		acpi_bus_generate_netlink_event(note->device->pnp.device_class,
-						  note->device->dev.bus_id,
+						  dev_name(&note->device->dev),
 						  note->event, detail);
 	} else
 		note->event = event;
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index a60a252..cc227a8 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1954,7 +1954,7 @@
 			return ;
 		}
 
-		if (func != NULL && ctrl != NULL) {
+		if (ctrl != NULL) {
 			if (cpqhp_process_SI(ctrl, func) != 0) {
 				amber_LED_on(ctrl, hp_slot);
 				green_LED_off(ctrl, hp_slot);
@@ -2604,7 +2604,7 @@
 			for (cloop = 0; cloop < 4; cloop++) {
 				if (irqs.valid_INT & (0x01 << cloop)) {
 					rc = cpqhp_set_irq(func->bus, func->device,
-							   0x0A + cloop, irqs.interrupt[cloop]);
+							   cloop + 1, irqs.interrupt[cloop]);
 					if (rc)
 						goto free_and_out;
 				}
@@ -2945,7 +2945,7 @@
 		}
 
 		if (!behind_bridge) {
-			rc = cpqhp_set_irq(func->bus, func->device, temp_byte + 0x09, IRQ);
+			rc = cpqhp_set_irq(func->bus, func->device, temp_byte, IRQ);
 			if (rc)
 				return 1;
 		} else {
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index df146be..6c0ed0f 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -171,7 +171,7 @@
 		fakebus->number = bus_num;
 		dbg("%s: dev %d, bus %d, pin %d, num %d\n",
 		    __func__, dev_num, bus_num, int_pin, irq_num);
-		rc = pcibios_set_irq_routing(fakedev, int_pin - 0x0a, irq_num);
+		rc = pcibios_set_irq_routing(fakedev, int_pin - 1, irq_num);
 		kfree(fakedev);
 		kfree(fakebus);
 		dbg("%s: rc %d\n", __func__, rc);
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index 3a2637a..b0e7de9 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -324,6 +324,7 @@
 
 		if (test_and_set_bit(0, &dslot->removed)) {
 			dbg("Slot already scheduled for removal\n");
+			pci_dev_put(dev);
 			return -ENODEV;
 		}
 
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index b2801a7..db85284 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -217,14 +217,25 @@
 #ifdef CONFIG_ACPI
 #include <acpi/acpi.h>
 #include <acpi/acpi_bus.h>
-#include <acpi/actypes.h>
 #include <linux/pci-acpi.h>
 
+extern void __init pciehp_acpi_slot_detection_init(void);
+extern int pciehp_acpi_slot_detection_check(struct pci_dev *dev);
+
+static inline void pciehp_firmware_init(void)
+{
+	pciehp_acpi_slot_detection_init();
+}
+
 static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
 {
+	int retval;
 	u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
 		     OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-	return acpi_get_hp_hw_control_from_firmware(dev, flags);
+	retval = acpi_get_hp_hw_control_from_firmware(dev, flags);
+	if (retval)
+		return retval;
+	return pciehp_acpi_slot_detection_check(dev);
 }
 
 static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
@@ -235,6 +246,7 @@
 	return 0;
 }
 #else
+#define pciehp_firmware_init()				do {} while (0)
 #define pciehp_get_hp_hw_control_from_firmware(dev) 	0
 #define pciehp_get_hp_params_from_firmware(dev, hpp)    (-ENODEV)
 #endif 				/* CONFIG_ACPI */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
new file mode 100644
index 0000000..438d795
--- /dev/null
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -0,0 +1,141 @@
+/*
+ * ACPI related functions for PCI Express Hot Plug driver.
+ *
+ * Copyright (C) 2008 Kenji Kaneshige
+ * Copyright (C) 2008 Fujitsu Limited.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include "pciehp.h"
+
+#define PCIEHP_DETECT_PCIE	(0)
+#define PCIEHP_DETECT_ACPI	(1)
+#define PCIEHP_DETECT_AUTO	(2)
+#define PCIEHP_DETECT_DEFAULT	PCIEHP_DETECT_AUTO
+
+static int slot_detection_mode;
+static char *pciehp_detect_mode;
+module_param(pciehp_detect_mode, charp, 0444);
+MODULE_PARM_DESC(pciehp_detect_mode,
+	 "Slot detection mode: pcie, acpi, auto\n"
+	 "  pcie          - Use PCIe based slot detection\n"
+	 "  acpi          - Use ACPI for slot detection\n"
+	 "  auto(default) - Auto select mode. Use acpi option if duplicate\n"
+	 "                  slot ids are found. Otherwise, use pcie option\n");
+
+int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
+{
+	if (slot_detection_mode != PCIEHP_DETECT_ACPI)
+		return 0;
+	if (acpi_pci_detect_ejectable(dev->subordinate))
+		return 0;
+	return -ENODEV;
+}
+
+static int __init parse_detect_mode(void)
+{
+	if (!pciehp_detect_mode)
+		return PCIEHP_DETECT_DEFAULT;
+	if (!strcmp(pciehp_detect_mode, "pcie"))
+		return PCIEHP_DETECT_PCIE;
+	if (!strcmp(pciehp_detect_mode, "acpi"))
+		return PCIEHP_DETECT_ACPI;
+	if (!strcmp(pciehp_detect_mode, "auto"))
+		return PCIEHP_DETECT_AUTO;
+	warn("bad specifier '%s' for pciehp_detect_mode. Use default\n",
+	     pciehp_detect_mode);
+	return PCIEHP_DETECT_DEFAULT;
+}
+
+static struct pcie_port_service_id __initdata port_pci_ids[] = {
+	{
+		.vendor = PCI_ANY_ID,
+		.device = PCI_ANY_ID,
+		.port_type = PCIE_ANY_PORT,
+		.service_type = PCIE_PORT_SERVICE_HP,
+		.driver_data =  0,
+        }, { /* end: all zeroes */ }
+};
+
+static int __initdata dup_slot_id;
+static int __initdata acpi_slot_detected;
+static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
+
+/* Dummy driver for dumplicate name detection */
+static int __init dummy_probe(struct pcie_device *dev,
+			      const struct pcie_port_service_id *id)
+{
+	int pos;
+	u32 slot_cap;
+	struct slot *slot, *tmp;
+	struct pci_dev *pdev = dev->port;
+	struct pci_bus *pbus = pdev->subordinate;
+	if (!(slot = kzalloc(sizeof(*slot), GFP_KERNEL)))
+		return -ENOMEM;
+	/* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
+	if (pciehp_get_hp_hw_control_from_firmware(pdev))
+		return -ENODEV;
+	if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP)))
+		return -ENODEV;
+	pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
+	slot->number = slot_cap >> 19;
+	list_for_each_entry(tmp, &dummy_slots, slot_list) {
+		if (tmp->number == slot->number)
+			dup_slot_id++;
+	}
+	list_add_tail(&slot->slot_list, &dummy_slots);
+	if (!acpi_slot_detected && acpi_pci_detect_ejectable(pbus))
+		acpi_slot_detected = 1;
+	return -ENODEV;         /* dummy driver always returns error */
+}
+
+static struct pcie_port_service_driver __initdata dummy_driver = {
+        .name           = "pciehp_dummy",
+        .id_table       = port_pci_ids,
+        .probe          = dummy_probe,
+};
+
+static int __init select_detection_mode(void)
+{
+	struct slot *slot, *tmp;
+	pcie_port_service_register(&dummy_driver);
+	pcie_port_service_unregister(&dummy_driver);
+	list_for_each_entry_safe(slot, tmp, &dummy_slots, slot_list) {
+		list_del(&slot->slot_list);
+		kfree(slot);
+	}
+	if (acpi_slot_detected && dup_slot_id)
+		return PCIEHP_DETECT_ACPI;
+	return PCIEHP_DETECT_PCIE;
+}
+
+void __init pciehp_acpi_slot_detection_init(void)
+{
+	slot_detection_mode = parse_detect_mode();
+	if (slot_detection_mode != PCIEHP_DETECT_AUTO)
+		goto out;
+	slot_detection_mode = select_detection_mode();
+out:
+	if (slot_detection_mode == PCIEHP_DETECT_ACPI)
+		info("Using ACPI for slot detection.\n");
+}
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 39cf248..5482d4e 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -522,6 +522,7 @@
 {
 	int retval = 0;
 
+	pciehp_firmware_init();
 	retval = pcie_port_service_register(&hpdriver_portdrv);
  	dbg("pcie_port_service_register = %d\n", retval);
   	info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index fead63c..ff40345 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -178,15 +178,14 @@
 				 "Issue of Slot Power Off command failed\n");
 			return;
 		}
+		/*
+		 * After turning power off, we must wait for at least 1 second
+		 * before taking any action that relies on power having been
+		 * removed from the slot/adapter.
+		 */
+		msleep(1000);
 	}
 
-	/*
-	 * After turning power off, we must wait for at least 1 second
-	 * before taking any action that relies on power having been
-	 * removed from the slot/adapter.
-	 */
-	msleep(1000);
-
 	if (PWR_LED(ctrl))
 		pslot->hpc_ops->green_led_off(pslot);
 
@@ -286,15 +285,14 @@
 				 "Issue of Slot Disable command failed\n");
 			return retval;
 		}
+		/*
+		 * After turning power off, we must wait for at least 1 second
+		 * before taking any action that relies on power having been
+		 * removed from the slot/adapter.
+		 */
+		msleep(1000);
 	}
 
-	/*
-	 * After turning power off, we must wait for at least 1 second
-	 * before taking any action that relies on power having been
-	 * removed from the slot/adapter.
-	 */
-	msleep(1000);
-
 	if (PWR_LED(ctrl))
 		/* turn off Green LED */
 		p_slot->hpc_ops->green_led_off(p_slot);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b643ca1..71a8012 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -42,42 +42,6 @@
 
 static atomic_t pciehp_num_controllers = ATOMIC_INIT(0);
 
-struct ctrl_reg {
-	u8 cap_id;
-	u8 nxt_ptr;
-	u16 cap_reg;
-	u32 dev_cap;
-	u16 dev_ctrl;
-	u16 dev_status;
-	u32 lnk_cap;
-	u16 lnk_ctrl;
-	u16 lnk_status;
-	u32 slot_cap;
-	u16 slot_ctrl;
-	u16 slot_status;
-	u16 root_ctrl;
-	u16 rsvp;
-	u32 root_status;
-} __attribute__ ((packed));
-
-/* offsets to the controller registers based on the above structure layout */
-enum ctrl_offsets {
-	PCIECAPID	=	offsetof(struct ctrl_reg, cap_id),
-	NXTCAPPTR	=	offsetof(struct ctrl_reg, nxt_ptr),
-	CAPREG		=	offsetof(struct ctrl_reg, cap_reg),
-	DEVCAP		=	offsetof(struct ctrl_reg, dev_cap),
-	DEVCTRL		=	offsetof(struct ctrl_reg, dev_ctrl),
-	DEVSTATUS	=	offsetof(struct ctrl_reg, dev_status),
-	LNKCAP		=	offsetof(struct ctrl_reg, lnk_cap),
-	LNKCTRL		=	offsetof(struct ctrl_reg, lnk_ctrl),
-	LNKSTATUS	=	offsetof(struct ctrl_reg, lnk_status),
-	SLOTCAP		=	offsetof(struct ctrl_reg, slot_cap),
-	SLOTCTRL	=	offsetof(struct ctrl_reg, slot_ctrl),
-	SLOTSTATUS	=	offsetof(struct ctrl_reg, slot_status),
-	ROOTCTRL	=	offsetof(struct ctrl_reg, root_ctrl),
-	ROOTSTATUS	=	offsetof(struct ctrl_reg, root_status),
-};
-
 static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
 {
 	struct pci_dev *dev = ctrl->pci_dev;
@@ -102,95 +66,9 @@
 	return pci_write_config_dword(dev, ctrl->cap_base + reg, value);
 }
 
-/* Field definitions in PCI Express Capabilities Register */
-#define CAP_VER			0x000F
-#define DEV_PORT_TYPE		0x00F0
-#define SLOT_IMPL		0x0100
-#define MSG_NUM			0x3E00
-
-/* Device or Port Type */
-#define NAT_ENDPT		0x00
-#define LEG_ENDPT		0x01
-#define ROOT_PORT		0x04
-#define UP_STREAM		0x05
-#define	DN_STREAM		0x06
-#define PCIE_PCI_BRDG		0x07
-#define PCI_PCIE_BRDG		0x10
-
-/* Field definitions in Device Capabilities Register */
-#define DATTN_BUTTN_PRSN	0x1000
-#define DATTN_LED_PRSN		0x2000
-#define DPWR_LED_PRSN		0x4000
-
-/* Field definitions in Link Capabilities Register */
-#define MAX_LNK_SPEED		0x000F
-#define MAX_LNK_WIDTH		0x03F0
-#define LINK_ACTIVE_REPORTING	0x00100000
-
-/* Link Width Encoding */
-#define LNK_X1		0x01
-#define LNK_X2		0x02
-#define LNK_X4		0x04
-#define LNK_X8		0x08
-#define LNK_X12		0x0C
-#define LNK_X16		0x10
-#define LNK_X32		0x20
-
-/*Field definitions of Link Status Register */
-#define LNK_SPEED	0x000F
-#define NEG_LINK_WD	0x03F0
-#define LNK_TRN_ERR	0x0400
-#define	LNK_TRN		0x0800
-#define SLOT_CLK_CONF	0x1000
-#define LINK_ACTIVE	0x2000
-
-/* Field definitions in Slot Capabilities Register */
-#define ATTN_BUTTN_PRSN	0x00000001
-#define	PWR_CTRL_PRSN	0x00000002
-#define MRL_SENS_PRSN	0x00000004
-#define ATTN_LED_PRSN	0x00000008
-#define PWR_LED_PRSN	0x00000010
-#define HP_SUPR_RM_SUP	0x00000020
-#define HP_CAP		0x00000040
-#define SLOT_PWR_VALUE	0x000003F8
-#define SLOT_PWR_LIMIT	0x00000C00
-#define PSN		0xFFF80000	/* PSN: Physical Slot Number */
-
-/* Field definitions in Slot Control Register */
-#define ATTN_BUTTN_ENABLE		0x0001
-#define PWR_FAULT_DETECT_ENABLE		0x0002
-#define MRL_DETECT_ENABLE		0x0004
-#define PRSN_DETECT_ENABLE		0x0008
-#define CMD_CMPL_INTR_ENABLE		0x0010
-#define HP_INTR_ENABLE			0x0020
-#define ATTN_LED_CTRL			0x00C0
-#define PWR_LED_CTRL			0x0300
-#define PWR_CTRL			0x0400
-#define EMI_CTRL			0x0800
-
-/* Attention indicator and Power indicator states */
-#define LED_ON		0x01
-#define LED_BLINK	0x10
-#define LED_OFF		0x11
-
 /* Power Control Command */
 #define POWER_ON	0
-#define POWER_OFF	0x0400
-
-/* EMI Status defines */
-#define EMI_DISENGAGED	0
-#define EMI_ENGAGED	1
-
-/* Field definitions in Slot Status Register */
-#define ATTN_BUTTN_PRESSED	0x0001
-#define PWR_FAULT_DETECTED	0x0002
-#define MRL_SENS_CHANGED	0x0004
-#define PRSN_DETECT_CHANGED	0x0008
-#define CMD_COMPLETED		0x0010
-#define MRL_STATE		0x0020
-#define PRSN_STATE		0x0040
-#define EMI_STATE		0x0080
-#define EMI_STATUS_BIT		7
+#define POWER_OFF	PCI_EXP_SLTCTL_PCC
 
 static irqreturn_t pcie_isr(int irq, void *dev_id);
 static void start_int_poll_timer(struct controller *ctrl, int sec);
@@ -253,22 +131,20 @@
 static int pcie_poll_cmd(struct controller *ctrl)
 {
 	u16 slot_status;
-	int timeout = 1000;
+	int err, timeout = 1000;
 
-	if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) {
-		if (slot_status & CMD_COMPLETED) {
-			pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
-			return 1;
-		}
+	err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
+	if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) {
+		pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC);
+		return 1;
 	}
 	while (timeout > 0) {
 		msleep(10);
 		timeout -= 10;
-		if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) {
-			if (slot_status & CMD_COMPLETED) {
-				pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
-				return 1;
-			}
+		err = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
+		if (!err && (slot_status & PCI_EXP_SLTSTA_CC)) {
+			pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC);
+			return 1;
 		}
 	}
 	return 0;	/* timeout */
@@ -302,14 +178,14 @@
 
 	mutex_lock(&ctrl->ctrl_lock);
 
-	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
+	retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
 	if (retval) {
 		ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
 			 __func__);
 		goto out;
 	}
 
-	if (slot_status & CMD_COMPLETED) {
+	if (slot_status & PCI_EXP_SLTSTA_CC) {
 		if (!ctrl->no_cmd_complete) {
 			/*
 			 * After 1 sec and CMD_COMPLETED still not set, just
@@ -332,7 +208,7 @@
 		}
 	}
 
-	retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
+	retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
 	if (retval) {
 		ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
 		goto out;
@@ -342,7 +218,7 @@
 	slot_ctrl |= (cmd & mask);
 	ctrl->cmd_busy = 1;
 	smp_mb();
-	retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
+	retval = pciehp_writew(ctrl, PCI_EXP_SLTCTL, slot_ctrl);
 	if (retval)
 		ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n");
 
@@ -356,8 +232,8 @@
 		 * completed interrupt is not enabled, we need to poll
 		 * command completed event.
 		 */
-		if (!(slot_ctrl & HP_INTR_ENABLE) ||
-		    !(slot_ctrl & CMD_CMPL_INTR_ENABLE))
+		if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) ||
+		    !(slot_ctrl & PCI_EXP_SLTCTL_CCIE))
 			poll = 1;
                 pcie_wait_cmd(ctrl, poll);
 	}
@@ -370,9 +246,9 @@
 {
 	u16 link_status;
 
-	if (pciehp_readw(ctrl, LNKSTATUS, &link_status))
+	if (pciehp_readw(ctrl, PCI_EXP_LNKSTA, &link_status))
 		return 0;
-	return !!(link_status & LINK_ACTIVE);
+	return !!(link_status & PCI_EXP_LNKSTA_DLLLA);
 }
 
 static void pcie_wait_link_active(struct controller *ctrl)
@@ -412,15 +288,15 @@
         } else
                 msleep(1000);
 
-	retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
+	retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
 	if (retval) {
 		ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
 		return retval;
 	}
 
 	ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
-	if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) ||
-		!(lnk_status & NEG_LINK_WD)) {
+	if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
+	    !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
 		ctrl_err(ctrl, "Link Training Error occurs \n");
 		retval = -1;
 		return retval;
@@ -436,16 +312,16 @@
 	u8 atten_led_state;
 	int retval = 0;
 
-	retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
+	retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
 	if (retval) {
 		ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
 		return retval;
 	}
 
 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n",
-		 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
+		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl);
 
-	atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6;
+	atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6;
 
 	switch (atten_led_state) {
 	case 0:
@@ -475,15 +351,15 @@
 	u8 pwr_state;
 	int	retval = 0;
 
-	retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
+	retval = pciehp_readw(ctrl, PCI_EXP_SLTCTL, &slot_ctrl);
 	if (retval) {
 		ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
 		return retval;
 	}
 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n",
-		 __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
+		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl);
 
-	pwr_state = (slot_ctrl & PWR_CTRL) >> 10;
+	pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10;
 
 	switch (pwr_state) {
 	case 0:
@@ -504,17 +380,15 @@
 {
 	struct controller *ctrl = slot->ctrl;
 	u16 slot_status;
-	int retval = 0;
+	int retval;
 
-	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
+	retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
 	if (retval) {
 		ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
 			 __func__);
 		return retval;
 	}
-
-	*status = (((slot_status & MRL_STATE) >> 5) == 0) ? 0 : 1;
-
+	*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
 	return 0;
 }
 
@@ -522,18 +396,15 @@
 {
 	struct controller *ctrl = slot->ctrl;
 	u16 slot_status;
-	u8 card_state;
-	int retval = 0;
+	int retval;
 
-	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
+	retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
 	if (retval) {
 		ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
 			 __func__);
 		return retval;
 	}
-	card_state = (u8)((slot_status & PRSN_STATE) >> 6);
-	*status = (card_state == 1) ? 1 : 0;
-
+	*status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
 	return 0;
 }
 
@@ -541,32 +412,28 @@
 {
 	struct controller *ctrl = slot->ctrl;
 	u16 slot_status;
-	u8 pwr_fault;
-	int retval = 0;
+	int retval;
 
-	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
+	retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
 	if (retval) {
 		ctrl_err(ctrl, "Cannot check for power fault\n");
 		return retval;
 	}
-	pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1);
-
-	return pwr_fault;
+	return !!(slot_status & PCI_EXP_SLTSTA_PFD);
 }
 
 static int hpc_get_emi_status(struct slot *slot, u8 *status)
 {
 	struct controller *ctrl = slot->ctrl;
 	u16 slot_status;
-	int retval = 0;
+	int retval;
 
-	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
+	retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
 	if (retval) {
 		ctrl_err(ctrl, "Cannot check EMI status\n");
 		return retval;
 	}
-	*status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT;
-
+	*status = !!(slot_status & PCI_EXP_SLTSTA_EIS);
 	return retval;
 }
 
@@ -576,8 +443,8 @@
 	u16 cmd_mask;
 	int rc;
 
-	slot_cmd = EMI_CTRL;
-	cmd_mask = EMI_CTRL;
+	slot_cmd = PCI_EXP_SLTCTL_EIC;
+	cmd_mask = PCI_EXP_SLTCTL_EIC;
 	rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
 	slot->last_emi_toggle = get_seconds();
 
@@ -591,7 +458,7 @@
 	u16 cmd_mask;
 	int rc;
 
-	cmd_mask = ATTN_LED_CTRL;
+	cmd_mask = PCI_EXP_SLTCTL_AIC;
 	switch (value) {
 		case 0 :	/* turn off */
 			slot_cmd = 0x00C0;
@@ -607,7 +474,7 @@
 	}
 	rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
 
 	return rc;
 }
@@ -619,10 +486,10 @@
 	u16 cmd_mask;
 
 	slot_cmd = 0x0100;
-	cmd_mask = PWR_LED_CTRL;
+	cmd_mask = PCI_EXP_SLTCTL_PIC;
 	pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
 }
 
 static void hpc_set_green_led_off(struct slot *slot)
@@ -632,10 +499,10 @@
 	u16 cmd_mask;
 
 	slot_cmd = 0x0300;
-	cmd_mask = PWR_LED_CTRL;
+	cmd_mask = PCI_EXP_SLTCTL_PIC;
 	pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
 }
 
 static void hpc_set_green_led_blink(struct slot *slot)
@@ -645,10 +512,10 @@
 	u16 cmd_mask;
 
 	slot_cmd = 0x0200;
-	cmd_mask = PWR_LED_CTRL;
+	cmd_mask = PCI_EXP_SLTCTL_PIC;
 	pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
 }
 
 static int hpc_power_on_slot(struct slot * slot)
@@ -662,15 +529,15 @@
 	ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
 
 	/* Clear sticky power-fault bit from previous power failures */
-	retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
+	retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
 	if (retval) {
 		ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n",
 			 __func__);
 		return retval;
 	}
-	slot_status &= PWR_FAULT_DETECTED;
+	slot_status &= PCI_EXP_SLTSTA_PFD;
 	if (slot_status) {
-		retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status);
+		retval = pciehp_writew(ctrl, PCI_EXP_SLTSTA, slot_status);
 		if (retval) {
 			ctrl_err(ctrl,
 				 "%s: Cannot write to SLOTSTATUS register\n",
@@ -680,13 +547,13 @@
 	}
 
 	slot_cmd = POWER_ON;
-	cmd_mask = PWR_CTRL;
+	cmd_mask = PCI_EXP_SLTCTL_PCC;
 	/* Enable detection that we turned off at slot power-off time */
 	if (!pciehp_poll_mode) {
-		slot_cmd |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
-			     PRSN_DETECT_ENABLE);
-		cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
-			     PRSN_DETECT_ENABLE);
+		slot_cmd |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
+			     PCI_EXP_SLTCTL_PDCE);
+		cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
+			     PCI_EXP_SLTCTL_PDCE);
 	}
 
 	retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
@@ -696,7 +563,7 @@
 		return -1;
 	}
 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
 
 	return retval;
 }
@@ -753,7 +620,7 @@
 	changed = pcie_mask_bad_dllp(ctrl);
 
 	slot_cmd = POWER_OFF;
-	cmd_mask = PWR_CTRL;
+	cmd_mask = PCI_EXP_SLTCTL_PCC;
 	/*
 	 * If we get MRL or presence detect interrupts now, the isr
 	 * will notice the sticky power-fault bit too and issue power
@@ -762,10 +629,10 @@
 	 * till the slot is powered on again.
 	 */
 	if (!pciehp_poll_mode) {
-		slot_cmd &= ~(PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
-			      PRSN_DETECT_ENABLE);
-		cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
-			     PRSN_DETECT_ENABLE);
+		slot_cmd &= ~(PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
+			      PCI_EXP_SLTCTL_PDCE);
+		cmd_mask |= (PCI_EXP_SLTCTL_PFDE | PCI_EXP_SLTCTL_MRLSCE |
+			     PCI_EXP_SLTCTL_PDCE);
 	}
 
 	retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
@@ -775,7 +642,7 @@
 		goto out;
 	}
 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
-		 __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
+		 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
  out:
 	if (changed)
 		pcie_unmask_bad_dllp(ctrl);
@@ -796,19 +663,19 @@
 	 */
 	intr_loc = 0;
 	do {
-		if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) {
+		if (pciehp_readw(ctrl, PCI_EXP_SLTSTA, &detected)) {
 			ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n",
 				 __func__);
 			return IRQ_NONE;
 		}
 
-		detected &= (ATTN_BUTTN_PRESSED | PWR_FAULT_DETECTED |
-			     MRL_SENS_CHANGED | PRSN_DETECT_CHANGED |
-			     CMD_COMPLETED);
+		detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+			     PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+			     PCI_EXP_SLTSTA_CC);
 		intr_loc |= detected;
 		if (!intr_loc)
 			return IRQ_NONE;
-		if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) {
+		if (detected && pciehp_writew(ctrl, PCI_EXP_SLTSTA, detected)) {
 			ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n",
 				 __func__);
 			return IRQ_NONE;
@@ -818,31 +685,31 @@
 	ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
 
 	/* Check Command Complete Interrupt Pending */
-	if (intr_loc & CMD_COMPLETED) {
+	if (intr_loc & PCI_EXP_SLTSTA_CC) {
 		ctrl->cmd_busy = 0;
 		smp_mb();
 		wake_up(&ctrl->queue);
 	}
 
-	if (!(intr_loc & ~CMD_COMPLETED))
+	if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
 		return IRQ_HANDLED;
 
 	p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
 
 	/* Check MRL Sensor Changed */
-	if (intr_loc & MRL_SENS_CHANGED)
+	if (intr_loc & PCI_EXP_SLTSTA_MRLSC)
 		pciehp_handle_switch_change(p_slot);
 
 	/* Check Attention Button Pressed */
-	if (intr_loc & ATTN_BUTTN_PRESSED)
+	if (intr_loc & PCI_EXP_SLTSTA_ABP)
 		pciehp_handle_attention_button(p_slot);
 
 	/* Check Presence Detect Changed */
-	if (intr_loc & PRSN_DETECT_CHANGED)
+	if (intr_loc & PCI_EXP_SLTSTA_PDC)
 		pciehp_handle_presence_change(p_slot);
 
 	/* Check Power Fault Detected */
-	if (intr_loc & PWR_FAULT_DETECTED)
+	if (intr_loc & PCI_EXP_SLTSTA_PFD)
 		pciehp_handle_power_fault(p_slot);
 
 	return IRQ_HANDLED;
@@ -855,7 +722,7 @@
 	u32	lnk_cap;
 	int retval = 0;
 
-	retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
+	retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap);
 	if (retval) {
 		ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
 		return retval;
@@ -884,13 +751,13 @@
 	u32	lnk_cap;
 	int retval = 0;
 
-	retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
+	retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap);
 	if (retval) {
 		ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
 		return retval;
 	}
 
-	switch ((lnk_cap & 0x03F0) >> 4){
+	switch ((lnk_cap & PCI_EXP_LNKSTA_NLW) >> 4){
 	case 0:
 		lnk_wdth = PCIE_LNK_WIDTH_RESRV;
 		break;
@@ -933,14 +800,14 @@
 	int retval = 0;
 	u16 lnk_status;
 
-	retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
+	retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
 	if (retval) {
 		ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
 			 __func__);
 		return retval;
 	}
 
-	switch (lnk_status & 0x0F) {
+	switch (lnk_status & PCI_EXP_LNKSTA_CLS) {
 	case 1:
 		lnk_speed = PCIE_2PT5GB;
 		break;
@@ -963,14 +830,14 @@
 	int retval = 0;
 	u16 lnk_status;
 
-	retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
+	retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
 	if (retval) {
 		ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
 			 __func__);
 		return retval;
 	}
 
-	switch ((lnk_status & 0x03F0) >> 4){
+	switch ((lnk_status & PCI_EXP_LNKSTA_NLW) >> 4){
 	case 0:
 		lnk_wdth = PCIE_LNK_WIDTH_RESRV;
 		break;
@@ -1036,18 +903,19 @@
 {
 	u16 cmd, mask;
 
-	cmd = PRSN_DETECT_ENABLE;
+	cmd = PCI_EXP_SLTCTL_PDCE;
 	if (ATTN_BUTTN(ctrl))
-		cmd |= ATTN_BUTTN_ENABLE;
+		cmd |= PCI_EXP_SLTCTL_ABPE;
 	if (POWER_CTRL(ctrl))
-		cmd |= PWR_FAULT_DETECT_ENABLE;
+		cmd |= PCI_EXP_SLTCTL_PFDE;
 	if (MRL_SENS(ctrl))
-		cmd |= MRL_DETECT_ENABLE;
+		cmd |= PCI_EXP_SLTCTL_MRLSCE;
 	if (!pciehp_poll_mode)
-		cmd |= HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
+		cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
 
-	mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
-	       PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
+	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
+		PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
+		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
 
 	if (pcie_write_cmd(ctrl, cmd, mask)) {
 		ctrl_err(ctrl, "Cannot enable software notification\n");
@@ -1059,8 +927,9 @@
 static void pcie_disable_notification(struct controller *ctrl)
 {
 	u16 mask;
-	mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
-	       PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
+	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
+		PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
+		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
 	if (pcie_write_cmd(ctrl, 0, mask))
 		ctrl_warn(ctrl, "Cannot disable software notification\n");
 }
@@ -1157,9 +1026,9 @@
 		  EMI(ctrl)        ? "yes" : "no");
 	ctrl_info(ctrl, "  Command Completed    : %3s\n",
 		  NO_CMD_CMPL(ctrl) ? "no" : "yes");
-	pciehp_readw(ctrl, SLOTSTATUS, &reg16);
+	pciehp_readw(ctrl, PCI_EXP_SLTSTA, &reg16);
 	ctrl_info(ctrl, "Slot Status            : 0x%04x\n", reg16);
-	pciehp_readw(ctrl, SLOTCTRL, &reg16);
+	pciehp_readw(ctrl, PCI_EXP_SLTCTL, &reg16);
 	ctrl_info(ctrl, "Slot Control           : 0x%04x\n", reg16);
 }
 
@@ -1183,7 +1052,7 @@
 		ctrl_err(ctrl, "Cannot find PCI Express capability\n");
 		goto abort_ctrl;
 	}
-	if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) {
+	if (pciehp_readl(ctrl, PCI_EXP_SLTCAP, &slot_cap)) {
 		ctrl_err(ctrl, "Cannot read SLOTCAP register\n");
 		goto abort_ctrl;
 	}
@@ -1208,17 +1077,17 @@
 	    ctrl->no_cmd_complete = 1;
 
         /* Check if Data Link Layer Link Active Reporting is implemented */
-        if (pciehp_readl(ctrl, LNKCAP, &link_cap)) {
+        if (pciehp_readl(ctrl, PCI_EXP_LNKCAP, &link_cap)) {
                 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
                 goto abort_ctrl;
         }
-        if (link_cap & LINK_ACTIVE_REPORTING) {
+        if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
                 ctrl_dbg(ctrl, "Link Active Reporting supported\n");
                 ctrl->link_active_reporting = 1;
         }
 
 	/* Clear all remaining event bits in Slot Status register */
-	if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f))
+	if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f))
 		goto abort_ctrl;
 
 	/* Disable sotfware notification */
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 235fb7a..3dfecb2 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -438,7 +438,8 @@
 			continue;
 
 		for (i = 0; i < drhd->devices_cnt; i++)
-			if (drhd->devices[i]->bus->number == bus &&
+			if (drhd->devices[i] &&
+			    drhd->devices[i]->bus->number == bus &&
 			    drhd->devices[i]->devfn == devfn)
 				return drhd->iommu;
 
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index 6441dfa..de01174 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -15,7 +15,7 @@
 
 	dev_printk(KERN_ERR, &pdev->dev,
 		   "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
-		   parent->dev.bus_id, parent->vendor, parent->device);
+		   dev_name(&parent->dev), parent->vendor, parent->device);
 	dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason);
 	dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n");
 	WARN_ON(1);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 11a51f8..b4a90ba 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -776,28 +776,19 @@
 	pci_msi_enable = 0;
 }
 
+/**
+ * pci_msi_enabled - is MSI enabled?
+ *
+ * Returns true if MSI has not been disabled by the command-line option
+ * pci=nomsi.
+ **/
+int pci_msi_enabled(void)
+{
+	return pci_msi_enable;
+}
+EXPORT_SYMBOL(pci_msi_enabled);
+
 void pci_msi_init_pci_dev(struct pci_dev *dev)
 {
 	INIT_LIST_HEAD(&dev->msi_list);
 }
-
-#ifdef CONFIG_ACPI
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
-static void __devinit msi_acpi_init(void)
-{
-	if (acpi_pci_disabled)
-		return;
-	pci_osc_support_set(OSC_MSI_SUPPORT);
-	pcie_osc_support_set(OSC_MSI_SUPPORT);
-}
-#else
-static inline void msi_acpi_init(void) { }
-#endif /* CONFIG_ACPI */
-
-void __devinit msi_init(void)
-{
-	if (!pci_msi_enable)
-		return;
-	msi_acpi_init();
-}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index ae5ec76..deea8a1 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -13,8 +13,6 @@
 #include <linux/module.h>
 #include <linux/pci-aspm.h>
 #include <acpi/acpi.h>
-#include <acpi/acnamesp.h>
-#include <acpi/acresrc.h>
 #include <acpi/acpi_bus.h>
 
 #include <linux/pci-acpi.h>
@@ -24,13 +22,14 @@
 	acpi_handle handle;
 	u32 support_set;
 	u32 control_set;
+	u32 control_query;
+	int is_queried;
 	struct list_head sibiling;
 };
 static LIST_HEAD(acpi_osc_data_list);
 
 struct acpi_osc_args {
 	u32 capbuf[3];
-	u32 ctrl_result;
 };
 
 static DEFINE_MUTEX(pci_acpi_lock);
@@ -56,7 +55,7 @@
 			  0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
 
 static acpi_status acpi_run_osc(acpi_handle handle,
-				struct acpi_osc_args *osc_args)
+				struct acpi_osc_args *osc_args, u32 *retval)
 {
 	acpi_status status;
 	struct acpi_object_list input;
@@ -112,8 +111,7 @@
 		goto out_kfree;
 	}
 out_success:
-	osc_args->ctrl_result =
-		*((u32 *)(out_obj->buffer.pointer + 8));
+	*retval = *((u32 *)(out_obj->buffer.pointer + 8));
 	status = AE_OK;
 
 out_kfree:
@@ -121,11 +119,10 @@
 	return status;
 }
 
-static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data,
-				    u32 *result)
+static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data)
 {
 	acpi_status status;
-	u32 support_set;
+	u32 support_set, result;
 	struct acpi_osc_args osc_args;
 
 	/* do _OSC query for all possible controls */
@@ -134,56 +131,45 @@
 	osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
 	osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
 
-	status = acpi_run_osc(osc_data->handle, &osc_args);
+	status = acpi_run_osc(osc_data->handle, &osc_args, &result);
 	if (ACPI_SUCCESS(status)) {
 		osc_data->support_set = support_set;
-		*result = osc_args.ctrl_result;
+		osc_data->control_query = result;
+		osc_data->is_queried = 1;
 	}
 
 	return status;
 }
 
-static acpi_status acpi_query_osc(acpi_handle handle,
-				  u32 level, void *context, void **retval)
+/*
+ * pci_acpi_osc_support: Invoke _OSC indicating support for the given feature
+ * @flags: Bitmask of flags to support
+ *
+ * See the ACPI spec for the definition of the flags
+ */
+int pci_acpi_osc_support(acpi_handle handle, u32 flags)
 {
 	acpi_status status;
-	struct acpi_osc_data *osc_data;
-	u32 flags = (unsigned long)context, dummy;
 	acpi_handle tmp;
+	struct acpi_osc_data *osc_data;
+	int rc = 0;
 
 	status = acpi_get_handle(handle, "_OSC", &tmp);
 	if (ACPI_FAILURE(status))
-		return AE_OK;
+		return -ENOTTY;
 
 	mutex_lock(&pci_acpi_lock);
 	osc_data = acpi_get_osc_data(handle);
 	if (!osc_data) {
 		printk(KERN_ERR "acpi osc data array is full\n");
+		rc = -ENOMEM;
 		goto out;
 	}
 
-	__acpi_query_osc(flags, osc_data, &dummy);
+	__acpi_query_osc(flags, osc_data);
 out:
 	mutex_unlock(&pci_acpi_lock);
-	return AE_OK;
-}
-
-/**
- * __pci_osc_support_set - register OS support to Firmware
- * @flags: OS support bits
- * @hid: hardware ID
- *
- * Update OS support fields and doing a _OSC Query to obtain an update
- * from Firmware on supported control bits.
- **/
-acpi_status __pci_osc_support_set(u32 flags, const char *hid)
-{
-	if (!(flags & OSC_SUPPORT_MASKS))
-		return AE_TYPE;
-
-	acpi_get_devices(hid, acpi_query_osc,
-			 (void *)(unsigned long)flags, NULL);
-	return AE_OK;
+	return rc;
 }
 
 /**
@@ -196,7 +182,7 @@
 acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
 {
 	acpi_status status;
-	u32 ctrlset, control_set, result;
+	u32 control_req, control_set, result;
 	acpi_handle tmp;
 	struct acpi_osc_data *osc_data;
 	struct acpi_osc_args osc_args;
@@ -213,28 +199,34 @@
 		goto out;
 	}
 
-	ctrlset = (flags & OSC_CONTROL_MASKS);
-	if (!ctrlset) {
+	control_req = (flags & OSC_CONTROL_MASKS);
+	if (!control_req) {
 		status = AE_TYPE;
 		goto out;
 	}
 
-	status = __acpi_query_osc(osc_data->support_set, osc_data, &result);
-	if (ACPI_FAILURE(status))
+	/* No need to evaluate _OSC if the control was already granted. */
+	if ((osc_data->control_set & control_req) == control_req)
 		goto out;
 
-	if ((result & ctrlset) != ctrlset) {
+	if (!osc_data->is_queried) {
+		status = __acpi_query_osc(osc_data->support_set, osc_data);
+		if (ACPI_FAILURE(status))
+			goto out;
+	}
+
+	if ((osc_data->control_query & control_req) != control_req) {
 		status = AE_SUPPORT;
 		goto out;
 	}
 
-	control_set = osc_data->control_set | ctrlset;
+	control_set = osc_data->control_set | control_req;
 	osc_args.capbuf[OSC_QUERY_TYPE] = 0;
 	osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set;
 	osc_args.capbuf[OSC_CONTROL_TYPE] = control_set;
-	status = acpi_run_osc(handle, &osc_args);
+	status = acpi_run_osc(handle, &osc_args, &result);
 	if (ACPI_SUCCESS(status))
-		osc_data->control_set = control_set;
+		osc_data->control_set = result;
 out:
 	mutex_unlock(&pci_acpi_lock);
 	return status;
@@ -375,7 +367,7 @@
 	 * The string should be the same as root bridge's name
 	 * Please look at 'pci_scan_bus_parented'
 	 */
-	num = sscanf(dev->bus_id, "pci%04x:%02x", &seg, &bus);
+	num = sscanf(dev_name(dev), "pci%04x:%02x", &seg, &bus);
 	if (num != 2)
 		return -ENODEV;
 	*handle = acpi_get_pci_rootbridge_handle(seg, bus);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 99d867b..c697f26 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -16,6 +16,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/cpu.h>
 #include "pci.h"
 
 /*
@@ -48,7 +49,7 @@
 		subdevice=PCI_ANY_ID, class=0, class_mask=0;
 	unsigned long driver_data=0;
 	int fields=0;
-	int retval;
+	int retval=0;
 
 	fields = sscanf(buf, "%x %x %x %x %x %x %lx",
 			&vendor, &device, &subvendor, &subdevice,
@@ -58,16 +59,18 @@
 
 	/* Only accept driver_data values that match an existing id_table
 	   entry */
-	retval = -EINVAL;
-	while (ids->vendor || ids->subvendor || ids->class_mask) {
-		if (driver_data == ids->driver_data) {
-			retval = 0;
-			break;
+	if (ids) {
+		retval = -EINVAL;
+		while (ids->vendor || ids->subvendor || ids->class_mask) {
+			if (driver_data == ids->driver_data) {
+				retval = 0;
+				break;
+			}
+			ids++;
 		}
-		ids++;
+		if (retval)	/* No match */
+			return retval;
 	}
-	if (retval)	/* No match */
-		return retval;
 
 	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
 	if (!dynid)
@@ -183,32 +186,43 @@
 	return pci_match_id(drv->id_table, dev);
 }
 
+struct drv_dev_and_id {
+	struct pci_driver *drv;
+	struct pci_dev *dev;
+	const struct pci_device_id *id;
+};
+
+static long local_pci_probe(void *_ddi)
+{
+	struct drv_dev_and_id *ddi = _ddi;
+
+	return ddi->drv->probe(ddi->dev, ddi->id);
+}
+
 static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
 			  const struct pci_device_id *id)
 {
-	int error;
-#ifdef CONFIG_NUMA
-	/* Execute driver initialization on node where the
-	   device's bus is attached to.  This way the driver likely
-	   allocates its local memory on the right node without
-	   any need to change it. */
-	struct mempolicy *oldpol;
-	cpumask_t oldmask = current->cpus_allowed;
-	int node = dev_to_node(&dev->dev);
+	int error, node;
+	struct drv_dev_and_id ddi = { drv, dev, id };
 
+	/* Execute driver initialization on node where the device's
+	   bus is attached to.  This way the driver likely allocates
+	   its local memory on the right node without any need to
+	   change it. */
+	node = dev_to_node(&dev->dev);
 	if (node >= 0) {
+		int cpu;
 		node_to_cpumask_ptr(nodecpumask, node);
-		set_cpus_allowed_ptr(current, nodecpumask);
-	}
-	/* And set default memory allocation policy */
-	oldpol = current->mempolicy;
-	current->mempolicy = NULL;	/* fall back to system default policy */
-#endif
-	error = drv->probe(dev, id);
-#ifdef CONFIG_NUMA
-	set_cpus_allowed_ptr(current, &oldmask);
-	current->mempolicy = oldpol;
-#endif
+
+		get_online_cpus();
+		cpu = cpumask_any_and(nodecpumask, cpu_online_mask);
+		if (cpu < nr_cpu_ids)
+			error = work_on_cpu(cpu, local_pci_probe, &ddi);
+		else
+			error = local_pci_probe(&ddi);
+		put_online_cpus();
+	} else
+		error = local_pci_probe(&ddi);
 	return error;
 }
 
@@ -300,21 +314,12 @@
 
 #ifdef CONFIG_PM_SLEEP
 
-static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
-{
-	struct pci_driver *drv = pci_dev->driver;
-
-	return drv && (drv->suspend || drv->suspend_late || drv->resume
-		|| drv->resume_early);
-}
-
 /*
  * Default "suspend" method for devices that have no driver provided suspend,
- * or not even a driver at all.
+ * or not even a driver at all (second part).
  */
-static void pci_default_pm_suspend(struct pci_dev *pci_dev)
+static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
 {
-	pci_save_state(pci_dev);
 	/*
 	 * mark its power state as "unknown", since we don't know if
 	 * e.g. the BIOS will change its device state when we suspend.
@@ -325,19 +330,9 @@
 
 /*
  * Default "resume" method for devices that have no driver provided resume,
- * or not even a driver at all (first part).
- */
-static void pci_default_pm_resume_early(struct pci_dev *pci_dev)
-{
-	/* restore the PCI config space */
-	pci_restore_state(pci_dev);
-}
-
-/*
- * Default "resume" method for devices that have no driver provided resume,
  * or not even a driver at all (second part).
  */
-static int pci_default_pm_resume_late(struct pci_dev *pci_dev)
+static int pci_pm_reenable_device(struct pci_dev *pci_dev)
 {
 	int retval;
 
@@ -363,8 +358,16 @@
 		i = drv->suspend(pci_dev, state);
 		suspend_report_result(drv->suspend, i);
 	} else {
-		pci_default_pm_suspend(pci_dev);
+		pci_save_state(pci_dev);
+		/*
+		 * This is for compatibility with existing code with legacy PM
+		 * support.
+		 */
+		pci_pm_set_unknown_state(pci_dev);
 	}
+
+	pci_fixup_device(pci_fixup_suspend, pci_dev);
+
 	return i;
 }
 
@@ -381,32 +384,130 @@
 	return i;
 }
 
-static int pci_legacy_resume(struct device *dev)
-{
-	int error;
-	struct pci_dev * pci_dev = to_pci_dev(dev);
-	struct pci_driver * drv = pci_dev->driver;
-
-	if (drv && drv->resume) {
-		error = drv->resume(pci_dev);
-	} else {
-		pci_default_pm_resume_early(pci_dev);
-		error = pci_default_pm_resume_late(pci_dev);
-	}
-	return error;
-}
-
 static int pci_legacy_resume_early(struct device *dev)
 {
 	int error = 0;
 	struct pci_dev * pci_dev = to_pci_dev(dev);
 	struct pci_driver * drv = pci_dev->driver;
 
+	pci_fixup_device(pci_fixup_resume_early, pci_dev);
+
 	if (drv && drv->resume_early)
 		error = drv->resume_early(pci_dev);
 	return error;
 }
 
+static int pci_legacy_resume(struct device *dev)
+{
+	int error;
+	struct pci_dev * pci_dev = to_pci_dev(dev);
+	struct pci_driver * drv = pci_dev->driver;
+
+	pci_fixup_device(pci_fixup_resume, pci_dev);
+
+	if (drv && drv->resume) {
+		error = drv->resume(pci_dev);
+	} else {
+		/* restore the PCI config space */
+		pci_restore_state(pci_dev);
+		error = pci_pm_reenable_device(pci_dev);
+	}
+	return error;
+}
+
+/* Auxiliary functions used by the new power management framework */
+
+static int pci_restore_standard_config(struct pci_dev *pci_dev)
+{
+	struct pci_dev *parent = pci_dev->bus->self;
+	int error = 0;
+
+	/* Check if the device's bus is operational */
+	if (!parent || parent->current_state == PCI_D0) {
+		pci_restore_state(pci_dev);
+		pci_update_current_state(pci_dev, PCI_D0);
+	} else {
+		dev_warn(&pci_dev->dev, "unable to restore config, "
+			"bridge %s in low power state D%d\n", pci_name(parent),
+			parent->current_state);
+		pci_dev->current_state = PCI_UNKNOWN;
+		error = -EAGAIN;
+	}
+
+	return error;
+}
+
+static bool pci_is_bridge(struct pci_dev *pci_dev)
+{
+	return !!(pci_dev->subordinate);
+}
+
+static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
+{
+	if (pci_restore_standard_config(pci_dev))
+		pci_fixup_device(pci_fixup_resume_early, pci_dev);
+}
+
+static int pci_pm_default_resume(struct pci_dev *pci_dev)
+{
+	/*
+	 * pci_restore_standard_config() should have been called once already,
+	 * but it would have failed if the device's parent bridge had not been
+	 * in power state D0 at that time.  Check it and try again if necessary.
+	 */
+	if (pci_dev->current_state == PCI_UNKNOWN) {
+		int error = pci_restore_standard_config(pci_dev);
+		if (error)
+			return error;
+	}
+
+	pci_fixup_device(pci_fixup_resume, pci_dev);
+
+	if (!pci_is_bridge(pci_dev))
+		pci_enable_wake(pci_dev, PCI_D0, false);
+
+	return pci_pm_reenable_device(pci_dev);
+}
+
+static void pci_pm_default_suspend_generic(struct pci_dev *pci_dev)
+{
+	/* If device is enabled at this point, disable it */
+	pci_disable_enabled_device(pci_dev);
+	/*
+	 * Save state with interrupts enabled, because in principle the bus the
+	 * device is on may be put into a low power state after this code runs.
+	 */
+	pci_save_state(pci_dev);
+}
+
+static void pci_pm_default_suspend(struct pci_dev *pci_dev)
+{
+	pci_pm_default_suspend_generic(pci_dev);
+
+	if (!pci_is_bridge(pci_dev))
+		pci_prepare_to_sleep(pci_dev);
+
+	pci_fixup_device(pci_fixup_suspend, pci_dev);
+}
+
+static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
+{
+	struct pci_driver *drv = pci_dev->driver;
+	bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
+		|| drv->resume_early);
+
+	/*
+	 * Legacy PM support is used by default, so warn if the new framework is
+	 * supported as well.  Drivers are supposed to support either the
+	 * former, or the latter, but not both at the same time.
+	 */
+	WARN_ON(ret && drv->driver.pm);
+
+	return ret;
+}
+
+/* New power management framework */
+
 static int pci_pm_prepare(struct device *dev)
 {
 	struct device_driver *drv = dev->driver;
@@ -434,15 +535,16 @@
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
-	if (drv && drv->pm) {
-		if (drv->pm->suspend) {
-			error = drv->pm->suspend(dev);
-			suspend_report_result(drv->pm->suspend, error);
-		}
-	} else if (pci_has_legacy_pm_support(pci_dev)) {
-		error = pci_legacy_suspend(dev, PMSG_SUSPEND);
+	if (pci_has_legacy_pm_support(pci_dev))
+		return pci_legacy_suspend(dev, PMSG_SUSPEND);
+
+	if (drv && drv->pm && drv->pm->suspend) {
+		error = drv->pm->suspend(dev);
+		suspend_report_result(drv->pm->suspend, error);
 	}
-	pci_fixup_device(pci_fixup_suspend, pci_dev);
+
+	if (!error)
+		pci_pm_default_suspend(pci_dev);
 
 	return error;
 }
@@ -453,36 +555,16 @@
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
-	if (drv && drv->pm) {
-		if (drv->pm->suspend_noirq) {
-			error = drv->pm->suspend_noirq(dev);
-			suspend_report_result(drv->pm->suspend_noirq, error);
-		}
-	} else if (pci_has_legacy_pm_support(pci_dev)) {
-		error = pci_legacy_suspend_late(dev, PMSG_SUSPEND);
-	} else {
-		pci_default_pm_suspend(pci_dev);
+	if (pci_has_legacy_pm_support(pci_dev))
+		return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
+
+	if (drv && drv->pm && drv->pm->suspend_noirq) {
+		error = drv->pm->suspend_noirq(dev);
+		suspend_report_result(drv->pm->suspend_noirq, error);
 	}
 
-	return error;
-}
-
-static int pci_pm_resume(struct device *dev)
-{
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-	struct device_driver *drv = dev->driver;
-	int error = 0;
-
-	pci_fixup_device(pci_fixup_resume, pci_dev);
-
-	if (drv && drv->pm) {
-		if (drv->pm->resume)
-			error = drv->pm->resume(dev);
-	} else if (pci_has_legacy_pm_support(pci_dev)) {
-		error = pci_legacy_resume(dev);
-	} else {
-		error = pci_default_pm_resume_late(pci_dev);
-	}
+	if (!error)
+		pci_pm_set_unknown_state(pci_dev);
 
 	return error;
 }
@@ -493,16 +575,30 @@
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
-	pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev));
+	if (pci_has_legacy_pm_support(pci_dev))
+		return pci_legacy_resume_early(dev);
 
-	if (drv && drv->pm) {
-		if (drv->pm->resume_noirq)
-			error = drv->pm->resume_noirq(dev);
-	} else if (pci_has_legacy_pm_support(pci_dev)) {
-		error = pci_legacy_resume_early(dev);
-	} else {
-		pci_default_pm_resume_early(pci_dev);
-	}
+	pci_pm_default_resume_noirq(pci_dev);
+
+	if (drv && drv->pm && drv->pm->resume_noirq)
+		error = drv->pm->resume_noirq(dev);
+
+	return error;
+}
+
+static int pci_pm_resume(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct device_driver *drv = dev->driver;
+	int error = 0;
+
+	if (pci_has_legacy_pm_support(pci_dev))
+		return pci_legacy_resume(dev);
+
+	error = pci_pm_default_resume(pci_dev);
+
+	if (!error && drv && drv->pm && drv->pm->resume)
+		error = drv->pm->resume(dev);
 
 	return error;
 }
@@ -524,16 +620,17 @@
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
-	if (drv && drv->pm) {
-		if (drv->pm->freeze) {
-			error = drv->pm->freeze(dev);
-			suspend_report_result(drv->pm->freeze, error);
-		}
-	} else if (pci_has_legacy_pm_support(pci_dev)) {
-		error = pci_legacy_suspend(dev, PMSG_FREEZE);
-		pci_fixup_device(pci_fixup_suspend, pci_dev);
+	if (pci_has_legacy_pm_support(pci_dev))
+		return pci_legacy_suspend(dev, PMSG_FREEZE);
+
+	if (drv && drv->pm && drv->pm->freeze) {
+		error = drv->pm->freeze(dev);
+		suspend_report_result(drv->pm->freeze, error);
 	}
 
+	if (!error)
+		pci_pm_default_suspend_generic(pci_dev);
+
 	return error;
 }
 
@@ -543,33 +640,16 @@
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
-	if (drv && drv->pm) {
-		if (drv->pm->freeze_noirq) {
-			error = drv->pm->freeze_noirq(dev);
-			suspend_report_result(drv->pm->freeze_noirq, error);
-		}
-	} else if (pci_has_legacy_pm_support(pci_dev)) {
-		error = pci_legacy_suspend_late(dev, PMSG_FREEZE);
-	} else {
-		pci_default_pm_suspend(pci_dev);
+	if (pci_has_legacy_pm_support(pci_dev))
+		return pci_legacy_suspend_late(dev, PMSG_FREEZE);
+
+	if (drv && drv->pm && drv->pm->freeze_noirq) {
+		error = drv->pm->freeze_noirq(dev);
+		suspend_report_result(drv->pm->freeze_noirq, error);
 	}
 
-	return error;
-}
-
-static int pci_pm_thaw(struct device *dev)
-{
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-	struct device_driver *drv = dev->driver;
-	int error = 0;
-
-	if (drv && drv->pm) {
-		if (drv->pm->thaw)
-			error =  drv->pm->thaw(dev);
-	} else if (pci_has_legacy_pm_support(pci_dev)) {
-		pci_fixup_device(pci_fixup_resume, pci_dev);
-		error = pci_legacy_resume(dev);
-	}
+	if (!error)
+		pci_pm_set_unknown_state(pci_dev);
 
 	return error;
 }
@@ -580,13 +660,30 @@
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
-	if (drv && drv->pm) {
-		if (drv->pm->thaw_noirq)
-			error = drv->pm->thaw_noirq(dev);
-	} else if (pci_has_legacy_pm_support(pci_dev)) {
-		pci_fixup_device(pci_fixup_resume_early, to_pci_dev(dev));
-		error = pci_legacy_resume_early(dev);
-	}
+	if (pci_has_legacy_pm_support(pci_dev))
+		return pci_legacy_resume_early(dev);
+
+	pci_update_current_state(pci_dev, PCI_D0);
+
+	if (drv && drv->pm && drv->pm->thaw_noirq)
+		error = drv->pm->thaw_noirq(dev);
+
+	return error;
+}
+
+static int pci_pm_thaw(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct device_driver *drv = dev->driver;
+	int error = 0;
+
+	if (pci_has_legacy_pm_support(pci_dev))
+		return pci_legacy_resume(dev);
+
+	pci_pm_reenable_device(pci_dev);
+
+	if (drv && drv->pm && drv->pm->thaw)
+		error =  drv->pm->thaw(dev);
 
 	return error;
 }
@@ -597,17 +694,17 @@
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
-	pci_fixup_device(pci_fixup_suspend, pci_dev);
+	if (pci_has_legacy_pm_support(pci_dev))
+		return pci_legacy_suspend(dev, PMSG_HIBERNATE);
 
-	if (drv && drv->pm) {
-		if (drv->pm->poweroff) {
-			error = drv->pm->poweroff(dev);
-			suspend_report_result(drv->pm->poweroff, error);
-		}
-	} else if (pci_has_legacy_pm_support(pci_dev)) {
-		error = pci_legacy_suspend(dev, PMSG_HIBERNATE);
+	if (drv && drv->pm && drv->pm->poweroff) {
+		error = drv->pm->poweroff(dev);
+		suspend_report_result(drv->pm->poweroff, error);
 	}
 
+	if (!error)
+		pci_pm_default_suspend(pci_dev);
+
 	return error;
 }
 
@@ -616,54 +713,47 @@
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
-	if (drv && drv->pm) {
-		if (drv->pm->poweroff_noirq) {
-			error = drv->pm->poweroff_noirq(dev);
-			suspend_report_result(drv->pm->poweroff_noirq, error);
-		}
-	} else if (pci_has_legacy_pm_support(to_pci_dev(dev))) {
-		error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
+	if (pci_has_legacy_pm_support(to_pci_dev(dev)))
+		return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
+
+	if (drv && drv->pm && drv->pm->poweroff_noirq) {
+		error = drv->pm->poweroff_noirq(dev);
+		suspend_report_result(drv->pm->poweroff_noirq, error);
 	}
 
 	return error;
 }
 
-static int pci_pm_restore(struct device *dev)
-{
-	struct pci_dev *pci_dev = to_pci_dev(dev);
-	struct device_driver *drv = dev->driver;
-	int error = 0;
-
-	if (drv && drv->pm) {
-		if (drv->pm->restore)
-			error = drv->pm->restore(dev);
-	} else if (pci_has_legacy_pm_support(pci_dev)) {
-		error = pci_legacy_resume(dev);
-	} else {
-		error = pci_default_pm_resume_late(pci_dev);
-	}
-	pci_fixup_device(pci_fixup_resume, pci_dev);
-
-	return error;
-}
-
 static int pci_pm_restore_noirq(struct device *dev)
 {
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct device_driver *drv = dev->driver;
 	int error = 0;
 
-	pci_fixup_device(pci_fixup_resume, pci_dev);
+	if (pci_has_legacy_pm_support(pci_dev))
+		return pci_legacy_resume_early(dev);
 
-	if (drv && drv->pm) {
-		if (drv->pm->restore_noirq)
-			error = drv->pm->restore_noirq(dev);
-	} else if (pci_has_legacy_pm_support(pci_dev)) {
-		error = pci_legacy_resume_early(dev);
-	} else {
-		pci_default_pm_resume_early(pci_dev);
-	}
-	pci_fixup_device(pci_fixup_resume_early, pci_dev);
+	pci_pm_default_resume_noirq(pci_dev);
+
+	if (drv && drv->pm && drv->pm->restore_noirq)
+		error = drv->pm->restore_noirq(dev);
+
+	return error;
+}
+
+static int pci_pm_restore(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct device_driver *drv = dev->driver;
+	int error = 0;
+
+	if (pci_has_legacy_pm_support(pci_dev))
+		return pci_legacy_resume(dev);
+
+	error = pci_pm_default_resume(pci_dev);
+
+	if (!error && drv && drv->pm && drv->pm->restore)
+		error = drv->pm->restore(dev);
 
 	return error;
 }
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
new file mode 100644
index 0000000..74fbec0
--- /dev/null
+++ b/drivers/pci/pci-stub.c
@@ -0,0 +1,47 @@
+/* pci-stub - simple stub driver to reserve a pci device
+ *
+ * Copyright (C) 2008 Red Hat, Inc.
+ * Author:
+ * 	Chris Wright
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Usage is simple, allocate a new id to the stub driver and bind the
+ * device to it.  For example:
+ * 
+ * # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id
+ * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind
+ * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind
+ * # ls -l /sys/bus/pci/devices/0000:00:19.0/driver
+ * .../0000:00:19.0/driver -> ../../../bus/pci/drivers/pci-stub
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	return 0;
+}
+
+static struct pci_driver stub_driver = {
+	.name		= "pci-stub",
+	.id_table	= NULL,	/* only dynamic id's */
+	.probe		= pci_stub_probe,
+};
+
+static int __init pci_stub_init(void)
+{
+	return pci_register_driver(&stub_driver);
+}
+
+static void __exit pci_stub_exit(void)
+{
+	pci_unregister_driver(&stub_driver);
+}
+
+module_init(pci_stub_init);
+module_exit(pci_stub_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chris Wright <chrisw@sous-sol.org>");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c884858..db7ec14 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -58,23 +58,24 @@
 					  const char *buf, size_t count)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
-	ssize_t consumed = -EINVAL;
+	unsigned long val;
 
-	if ((count > 0) && (*buf == '0' || *buf == '1')) {
-		pdev->broken_parity_status = *buf == '1' ? 1 : 0;
-		consumed = count;
-	}
-	return consumed;
+	if (strict_strtoul(buf, 0, &val) < 0)
+		return -EINVAL;
+
+	pdev->broken_parity_status = !!val;
+
+	return count;
 }
 
 static ssize_t local_cpus_show(struct device *dev,
 			struct device_attribute *attr, char *buf)
 {		
-	cpumask_t mask;
+	const struct cpumask *mask;
 	int len;
 
-	mask = pcibus_to_cpumask(to_pci_dev(dev)->bus);
-	len = cpumask_scnprintf(buf, PAGE_SIZE-2, &mask);
+	mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
+	len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
 	buf[len++] = '\n';
 	buf[len] = '\0';
 	return len;
@@ -84,11 +85,11 @@
 static ssize_t local_cpulist_show(struct device *dev,
 			struct device_attribute *attr, char *buf)
 {
-	cpumask_t mask;
+	const struct cpumask *mask;
 	int len;
 
-	mask = pcibus_to_cpumask(to_pci_dev(dev)->bus);
-	len = cpulist_scnprintf(buf, PAGE_SIZE-2, &mask);
+	mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
+	len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
 	buf[len++] = '\n';
 	buf[len] = '\0';
 	return len;
@@ -101,11 +102,13 @@
 	struct pci_dev * pci_dev = to_pci_dev(dev);
 	char * str = buf;
 	int i;
-	int max = 7;
+	int max;
 	resource_size_t start, end;
 
 	if (pci_dev->subordinate)
 		max = DEVICE_COUNT_RESOURCE;
+	else
+		max = PCI_BRIDGE_RESOURCES;
 
 	for (i = 0; i < max; i++) {
 		struct resource *res =  &pci_dev->resource[i];
@@ -133,19 +136,23 @@
 				struct device_attribute *attr, const char *buf,
 				size_t count)
 {
-	ssize_t result = -EINVAL;
 	struct pci_dev *pdev = to_pci_dev(dev);
+	unsigned long val;
+	ssize_t result = strict_strtoul(buf, 0, &val);
+
+	if (result < 0)
+		return result;
 
 	/* this can crash the machine when done on the "wrong" device */
 	if (!capable(CAP_SYS_ADMIN))
-		return count;
+		return -EPERM;
 
-	if (*buf == '0') {
+	if (!val) {
 		if (atomic_read(&pdev->enable_cnt) != 0)
 			pci_disable_device(pdev);
 		else
 			result = -EIO;
-	} else if (*buf == '1')
+	} else
 		result = pci_enable_device(pdev);
 
 	return result < 0 ? result : count;
@@ -185,25 +192,28 @@
 	      const char *buf, size_t count)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
+	unsigned long val;
+
+	if (strict_strtoul(buf, 0, &val) < 0)
+		return -EINVAL;
 
 	/* bad things may happen if the no_msi flag is changed
 	 * while some drivers are loaded */
 	if (!capable(CAP_SYS_ADMIN))
-		return count;
+		return -EPERM;
 
+	/* Maybe pci devices without subordinate busses shouldn't even have this
+	 * attribute in the first place?  */
 	if (!pdev->subordinate)
 		return count;
 
-	if (*buf == '0') {
-		pdev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
-		dev_warn(&pdev->dev, "forced subordinate bus to not support MSI,"
-			 " bad things could happen.\n");
-	}
+	/* Is the flag going to change, or keep the value it already had? */
+	if (!(pdev->subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) ^
+	    !!val) {
+		pdev->subordinate->bus_flags ^= PCI_BUS_FLAGS_NO_MSI;
 
-	if (*buf == '1') {
-		pdev->subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
-		dev_warn(&pdev->dev, "forced subordinate bus to support MSI,"
-			 " bad things could happen.\n");
+		dev_warn(&pdev->dev, "forced subordinate bus to%s support MSI,"
+			 " bad things could happen\n", val ? "" : " not");
 	}
 
 	return count;
@@ -361,55 +371,33 @@
 }
 
 static ssize_t
-pci_read_vpd(struct kobject *kobj, struct bin_attribute *bin_attr,
-	     char *buf, loff_t off, size_t count)
-{
-	struct pci_dev *dev =
-		to_pci_dev(container_of(kobj, struct device, kobj));
-	int end;
-	int ret;
-
-	if (off > bin_attr->size)
-		count = 0;
-	else if (count > bin_attr->size - off)
-		count = bin_attr->size - off;
-	end = off + count;
-
-	while (off < end) {
-		ret = dev->vpd->ops->read(dev, off, end - off, buf);
-		if (ret < 0)
-			return ret;
-		buf += ret;
-		off += ret;
-	}
-
-	return count;
-}
-
-static ssize_t
-pci_write_vpd(struct kobject *kobj, struct bin_attribute *bin_attr,
+read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
 	      char *buf, loff_t off, size_t count)
 {
 	struct pci_dev *dev =
 		to_pci_dev(container_of(kobj, struct device, kobj));
-	int end;
-	int ret;
 
 	if (off > bin_attr->size)
 		count = 0;
 	else if (count > bin_attr->size - off)
 		count = bin_attr->size - off;
-	end = off + count;
 
-	while (off < end) {
-		ret = dev->vpd->ops->write(dev, off, end - off, buf);
-		if (ret < 0)
-			return ret;
-		buf += ret;
-		off += ret;
-	}
+	return pci_read_vpd(dev, off, count, buf);
+}
 
-	return count;
+static ssize_t
+write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
+	       char *buf, loff_t off, size_t count)
+{
+	struct pci_dev *dev =
+		to_pci_dev(container_of(kobj, struct device, kobj));
+
+	if (off > bin_attr->size)
+		count = 0;
+	else if (count > bin_attr->size - off)
+		count = bin_attr->size - off;
+
+	return pci_write_vpd(dev, off, count, buf);
 }
 
 #ifdef HAVE_PCI_LEGACY
@@ -569,7 +557,7 @@
 
 #ifdef HAVE_PCI_MMAP
 
-static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
 {
 	unsigned long nr, start, size;
 
@@ -620,6 +608,9 @@
 	vma->vm_pgoff += start >> PAGE_SHIFT;
 	mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
 
+	if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start))
+		return -EINVAL;
+
 	return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
 }
 
@@ -832,8 +823,8 @@
 		attr->size = dev->vpd->len;
 		attr->attr.name = "vpd";
 		attr->attr.mode = S_IRUSR | S_IWUSR;
-		attr->read = pci_read_vpd;
-		attr->write = pci_write_vpd;
+		attr->read = read_vpd_attr;
+		attr->write = write_vpd_attr;
 		retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
 		if (retval) {
 			kfree(dev->vpd->attr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 061d1ee..e491fde 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -56,6 +56,22 @@
 }
 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 
+#ifdef CONFIG_HAS_IOMEM
+void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
+{
+	/*
+	 * Make sure the BAR is actually a memory resource, not an IO resource
+	 */
+	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+		WARN_ON(1);
+		return NULL;
+	}
+	return ioremap_nocache(pci_resource_start(pdev, bar),
+				     pci_resource_len(pdev, bar));
+}
+EXPORT_SYMBOL_GPL(pci_ioremap_bar);
+#endif
+
 #if 0
 /**
  * pci_max_busnr - returns maximum PCI bus number
@@ -360,25 +376,10 @@
 static void
 pci_restore_bars(struct pci_dev *dev)
 {
-	int i, numres;
+	int i;
 
-	switch (dev->hdr_type) {
-	case PCI_HEADER_TYPE_NORMAL:
-		numres = 6;
-		break;
-	case PCI_HEADER_TYPE_BRIDGE:
-		numres = 2;
-		break;
-	case PCI_HEADER_TYPE_CARDBUS:
-		numres = 1;
-		break;
-	default:
-		/* Should never get here, but just in case... */
-		return;
-	}
-
-	for (i = 0; i < numres; i ++)
-		pci_update_resource(dev, &dev->resource[i], i);
+	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
+		pci_update_resource(dev, i);
 }
 
 static struct pci_platform_pm_ops *pci_platform_pm;
@@ -524,14 +525,17 @@
  * pci_update_current_state - Read PCI power state of given device from its
  *                            PCI PM registers and cache it
  * @dev: PCI device to handle.
+ * @state: State to cache in case the device doesn't have the PM capability
  */
-static void pci_update_current_state(struct pci_dev *dev)
+void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 {
 	if (dev->pm_cap) {
 		u16 pmcsr;
 
 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+	} else {
+		dev->current_state = state;
 	}
 }
 
@@ -574,7 +578,7 @@
 		 */
 		int ret = platform_pci_set_power_state(dev, PCI_D0);
 		if (!ret)
-			pci_update_current_state(dev);
+			pci_update_current_state(dev, PCI_D0);
 	}
 	/* This device is quirked not to be put into D3, so
 	   don't put it in D3 */
@@ -587,7 +591,7 @@
 		/* Allow the platform to finalize the transition */
 		int ret = platform_pci_set_power_state(dev, state);
 		if (!ret) {
-			pci_update_current_state(dev);
+			pci_update_current_state(dev, state);
 			error = 0;
 		}
 	}
@@ -640,19 +644,14 @@
 	int pos, i = 0;
 	struct pci_cap_saved_state *save_state;
 	u16 *cap;
-	int found = 0;
 
 	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
 	if (pos <= 0)
 		return 0;
 
 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
-	if (!save_state)
-		save_state = kzalloc(sizeof(*save_state) + sizeof(u16) * 4, GFP_KERNEL);
-	else
-		found = 1;
 	if (!save_state) {
-		dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n");
+		dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__);
 		return -ENOMEM;
 	}
 	cap = (u16 *)&save_state->data[0];
@@ -661,9 +660,7 @@
 	pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
 	pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
 	pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
-	save_state->cap_nr = PCI_CAP_ID_EXP;
-	if (!found)
-		pci_add_saved_cap(dev, save_state);
+
 	return 0;
 }
 
@@ -688,30 +685,21 @@
 
 static int pci_save_pcix_state(struct pci_dev *dev)
 {
-	int pos, i = 0;
+	int pos;
 	struct pci_cap_saved_state *save_state;
-	u16 *cap;
-	int found = 0;
 
 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 	if (pos <= 0)
 		return 0;
 
 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
-	if (!save_state)
-		save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL);
-	else
-		found = 1;
 	if (!save_state) {
-		dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n");
+		dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__);
 		return -ENOMEM;
 	}
-	cap = (u16 *)&save_state->data[0];
 
-	pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]);
-	save_state->cap_nr = PCI_CAP_ID_PCIX;
-	if (!found)
-		pci_add_saved_cap(dev, save_state);
+	pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data);
+
 	return 0;
 }
 
@@ -982,6 +970,32 @@
  */
 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
 
+static void do_pci_disable_device(struct pci_dev *dev)
+{
+	u16 pci_command;
+
+	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
+	if (pci_command & PCI_COMMAND_MASTER) {
+		pci_command &= ~PCI_COMMAND_MASTER;
+		pci_write_config_word(dev, PCI_COMMAND, pci_command);
+	}
+
+	pcibios_disable_device(dev);
+}
+
+/**
+ * pci_disable_enabled_device - Disable device without updating enable_cnt
+ * @dev: PCI device to disable
+ *
+ * NOTE: This function is a backend of PCI power management routines and is
+ * not supposed to be called drivers.
+ */
+void pci_disable_enabled_device(struct pci_dev *dev)
+{
+	if (atomic_read(&dev->enable_cnt))
+		do_pci_disable_device(dev);
+}
+
 /**
  * pci_disable_device - Disable PCI device after use
  * @dev: PCI device to be disabled
@@ -996,7 +1010,6 @@
 pci_disable_device(struct pci_dev *dev)
 {
 	struct pci_devres *dr;
-	u16 pci_command;
 
 	dr = find_pci_dr(dev);
 	if (dr)
@@ -1005,14 +1018,9 @@
 	if (atomic_sub_return(1, &dev->enable_cnt) != 0)
 		return;
 
-	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
-	if (pci_command & PCI_COMMAND_MASTER) {
-		pci_command &= ~PCI_COMMAND_MASTER;
-		pci_write_config_word(dev, PCI_COMMAND, pci_command);
-	}
-	dev->is_busmaster = 0;
+	do_pci_disable_device(dev);
 
-	pcibios_disable_device(dev);
+	dev->is_busmaster = 0;
 }
 
 /**
@@ -1107,7 +1115,7 @@
 	int error = 0;
 	bool pme_done = false;
 
-	if (!device_may_wakeup(&dev->dev))
+	if (enable && !device_may_wakeup(&dev->dev))
 		return -EINVAL;
 
 	/*
@@ -1301,6 +1309,71 @@
 }
 
 /**
+ * platform_pci_wakeup_init - init platform wakeup if present
+ * @dev: PCI device
+ *
+ * Some devices don't have PCI PM caps but can still generate wakeup
+ * events through platform methods (like ACPI events).  If @dev supports
+ * platform wakeup events, set the device flag to indicate as much.  This
+ * may be redundant if the device also supports PCI PM caps, but double
+ * initialization should be safe in that case.
+ */
+void platform_pci_wakeup_init(struct pci_dev *dev)
+{
+	if (!platform_pci_can_wakeup(dev))
+		return;
+
+	device_set_wakeup_capable(&dev->dev, true);
+	device_set_wakeup_enable(&dev->dev, false);
+	platform_pci_sleep_wake(dev, false);
+}
+
+/**
+ * pci_add_save_buffer - allocate buffer for saving given capability registers
+ * @dev: the PCI device
+ * @cap: the capability to allocate the buffer for
+ * @size: requested size of the buffer
+ */
+static int pci_add_cap_save_buffer(
+	struct pci_dev *dev, char cap, unsigned int size)
+{
+	int pos;
+	struct pci_cap_saved_state *save_state;
+
+	pos = pci_find_capability(dev, cap);
+	if (pos <= 0)
+		return 0;
+
+	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
+	if (!save_state)
+		return -ENOMEM;
+
+	save_state->cap_nr = cap;
+	pci_add_saved_cap(dev, save_state);
+
+	return 0;
+}
+
+/**
+ * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
+ * @dev: the PCI device
+ */
+void pci_allocate_cap_save_buffers(struct pci_dev *dev)
+{
+	int error;
+
+	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16));
+	if (error)
+		dev_err(&dev->dev,
+			"unable to preallocate PCI Express save buffer\n");
+
+	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
+	if (error)
+		dev_err(&dev->dev,
+			"unable to preallocate PCI-X save buffer\n");
+}
+
+/**
  * pci_enable_ari - enable ARI forwarding if hardware support it
  * @dev: the PCI device
  */
@@ -1337,6 +1410,20 @@
 	bridge->ari_enabled = 1;
 }
 
+/**
+ * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
+ * @dev: the PCI device
+ * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
+ *
+ * Perform INTx swizzling for a device behind one level of bridge.  This is
+ * required by section 9.1 of the PCI-to-PCI bridge specification for devices
+ * behind bridges on add-in cards.
+ */
+u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
+{
+	return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1;
+}
+
 int
 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
 {
@@ -1345,9 +1432,9 @@
 	pin = dev->pin;
 	if (!pin)
 		return -1;
-	pin--;
+
 	while (dev->bus->self) {
-		pin = (pin + PCI_SLOT(dev->devfn)) % 4;
+		pin = pci_swizzle_interrupt_pin(dev, pin);
 		dev = dev->bus->self;
 	}
 	*bridge = dev;
@@ -1355,6 +1442,26 @@
 }
 
 /**
+ * pci_common_swizzle - swizzle INTx all the way to root bridge
+ * @dev: the PCI device
+ * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
+ *
+ * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
+ * bridges all the way up to a PCI root bus.
+ */
+u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+	u8 pin = *pinp;
+
+	while (dev->bus->self) {
+		pin = pci_swizzle_interrupt_pin(dev, pin);
+		dev = dev->bus->self;
+	}
+	*pinp = pin;
+	return PCI_SLOT(dev->devfn);
+}
+
+/**
  *	pci_release_region - Release a PCI bar
  *	@pdev: PCI device whose resources were previously reserved by pci_request_region
  *	@bar: BAR to release
@@ -1395,7 +1502,8 @@
  *	Returns 0 on success, or %EBUSY on error.  A warning
  *	message is also printed on failure.
  */
-int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
+									int exclusive)
 {
 	struct pci_devres *dr;
 
@@ -1408,8 +1516,9 @@
 			goto err_out;
 	}
 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
-		if (!request_mem_region(pci_resource_start(pdev, bar),
-				        pci_resource_len(pdev, bar), res_name))
+		if (!__request_mem_region(pci_resource_start(pdev, bar),
+					pci_resource_len(pdev, bar), res_name,
+					exclusive))
 			goto err_out;
 	}
 
@@ -1428,6 +1537,47 @@
 }
 
 /**
+ *	pci_request_region - Reserved PCI I/O and memory resource
+ *	@pdev: PCI device whose resources are to be reserved
+ *	@bar: BAR to be reserved
+ *	@res_name: Name to be associated with resource.
+ *
+ *	Mark the PCI region associated with PCI device @pdev BR @bar as
+ *	being reserved by owner @res_name.  Do not access any
+ *	address inside the PCI regions unless this call returns
+ *	successfully.
+ *
+ *	Returns 0 on success, or %EBUSY on error.  A warning
+ *	message is also printed on failure.
+ */
+int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+{
+	return __pci_request_region(pdev, bar, res_name, 0);
+}
+
+/**
+ *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
+ *	@pdev: PCI device whose resources are to be reserved
+ *	@bar: BAR to be reserved
+ *	@res_name: Name to be associated with resource.
+ *
+ *	Mark the PCI region associated with PCI device @pdev BR @bar as
+ *	being reserved by owner @res_name.  Do not access any
+ *	address inside the PCI regions unless this call returns
+ *	successfully.
+ *
+ *	Returns 0 on success, or %EBUSY on error.  A warning
+ *	message is also printed on failure.
+ *
+ *	The key difference that _exclusive makes it that userspace is
+ *	explicitly not allowed to map the resource via /dev/mem or
+ * 	sysfs.
+ */
+int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
+{
+	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
+}
+/**
  * pci_release_selected_regions - Release selected PCI I/O and memory resources
  * @pdev: PCI device whose resources were previously reserved
  * @bars: Bitmask of BARs to be released
@@ -1444,20 +1594,14 @@
 			pci_release_region(pdev, i);
 }
 
-/**
- * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
- * @pdev: PCI device whose resources are to be reserved
- * @bars: Bitmask of BARs to be requested
- * @res_name: Name to be associated with resource
- */
-int pci_request_selected_regions(struct pci_dev *pdev, int bars,
-				 const char *res_name)
+int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
+				 const char *res_name, int excl)
 {
 	int i;
 
 	for (i = 0; i < 6; i++)
 		if (bars & (1 << i))
-			if(pci_request_region(pdev, i, res_name))
+			if (__pci_request_region(pdev, i, res_name, excl))
 				goto err_out;
 	return 0;
 
@@ -1469,6 +1613,26 @@
 	return -EBUSY;
 }
 
+
+/**
+ * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
+ * @pdev: PCI device whose resources are to be reserved
+ * @bars: Bitmask of BARs to be requested
+ * @res_name: Name to be associated with resource
+ */
+int pci_request_selected_regions(struct pci_dev *pdev, int bars,
+				 const char *res_name)
+{
+	return __pci_request_selected_regions(pdev, bars, res_name, 0);
+}
+
+int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
+				 int bars, const char *res_name)
+{
+	return __pci_request_selected_regions(pdev, bars, res_name,
+			IORESOURCE_EXCLUSIVE);
+}
+
 /**
  *	pci_release_regions - Release reserved PCI I/O and memory resources
  *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
@@ -1502,27 +1666,66 @@
 }
 
 /**
+ *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
+ *	@pdev: PCI device whose resources are to be reserved
+ *	@res_name: Name to be associated with resource.
+ *
+ *	Mark all PCI regions associated with PCI device @pdev as
+ *	being reserved by owner @res_name.  Do not access any
+ *	address inside the PCI regions unless this call returns
+ *	successfully.
+ *
+ *	pci_request_regions_exclusive() will mark the region so that
+ * 	/dev/mem and the sysfs MMIO access will not be allowed.
+ *
+ *	Returns 0 on success, or %EBUSY on error.  A warning
+ *	message is also printed on failure.
+ */
+int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
+{
+	return pci_request_selected_regions_exclusive(pdev,
+					((1 << 6) - 1), res_name);
+}
+
+static void __pci_set_master(struct pci_dev *dev, bool enable)
+{
+	u16 old_cmd, cmd;
+
+	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
+	if (enable)
+		cmd = old_cmd | PCI_COMMAND_MASTER;
+	else
+		cmd = old_cmd & ~PCI_COMMAND_MASTER;
+	if (cmd != old_cmd) {
+		dev_dbg(&dev->dev, "%s bus mastering\n",
+			enable ? "enabling" : "disabling");
+		pci_write_config_word(dev, PCI_COMMAND, cmd);
+	}
+	dev->is_busmaster = enable;
+}
+
+/**
  * pci_set_master - enables bus-mastering for device dev
  * @dev: the PCI device to enable
  *
  * Enables bus-mastering on the device and calls pcibios_set_master()
  * to do the needed arch specific settings.
  */
-void
-pci_set_master(struct pci_dev *dev)
+void pci_set_master(struct pci_dev *dev)
 {
-	u16 cmd;
-
-	pci_read_config_word(dev, PCI_COMMAND, &cmd);
-	if (! (cmd & PCI_COMMAND_MASTER)) {
-		dev_dbg(&dev->dev, "enabling bus mastering\n");
-		cmd |= PCI_COMMAND_MASTER;
-		pci_write_config_word(dev, PCI_COMMAND, cmd);
-	}
-	dev->is_busmaster = 1;
+	__pci_set_master(dev, true);
 	pcibios_set_master(dev);
 }
 
+/**
+ * pci_clear_master - disables bus-mastering for device dev
+ * @dev: the PCI device to disable
+ */
+void pci_clear_master(struct pci_dev *dev)
+{
+	__pci_set_master(dev, false);
+}
+
 #ifdef PCI_DISABLE_MWI
 int pci_set_mwi(struct pci_dev *dev)
 {
@@ -1751,24 +1954,7 @@
 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
 #endif
 
-/**
- * pci_execute_reset_function() - Reset a PCI device function
- * @dev: Device function to reset
- *
- * Some devices allow an individual function to be reset without affecting
- * other functions in the same device.  The PCI device must be responsive
- * to PCI config space in order to use this function.
- *
- * The device function is presumed to be unused when this function is called.
- * Resetting the device will make the contents of PCI configuration space
- * random, so any caller of this must be prepared to reinitialise the
- * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
- * etc.
- *
- * Returns 0 if the device function was successfully reset or -ENOTTY if the
- * device doesn't support resetting a single function.
- */
-int pci_execute_reset_function(struct pci_dev *dev)
+static int __pcie_flr(struct pci_dev *dev, int probe)
 {
 	u16 status;
 	u32 cap;
@@ -1780,6 +1966,9 @@
 	if (!(cap & PCI_EXP_DEVCAP_FLR))
 		return -ENOTTY;
 
+	if (probe)
+		return 0;
+
 	pci_block_user_cfg_access(dev);
 
 	/* Wait for Transaction Pending bit clean */
@@ -1802,6 +1991,80 @@
 	pci_unblock_user_cfg_access(dev);
 	return 0;
 }
+
+static int __pci_af_flr(struct pci_dev *dev, int probe)
+{
+	int cappos = pci_find_capability(dev, PCI_CAP_ID_AF);
+	u8 status;
+	u8 cap;
+
+	if (!cappos)
+		return -ENOTTY;
+	pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap);
+	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
+		return -ENOTTY;
+
+	if (probe)
+		return 0;
+
+	pci_block_user_cfg_access(dev);
+
+	/* Wait for Transaction Pending bit clean */
+	msleep(100);
+	pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
+	if (status & PCI_AF_STATUS_TP) {
+		dev_info(&dev->dev, "Busy after 100ms while trying to"
+				" reset; sleeping for 1 second\n");
+		ssleep(1);
+		pci_read_config_byte(dev,
+				cappos + PCI_AF_STATUS, &status);
+		if (status & PCI_AF_STATUS_TP)
+			dev_info(&dev->dev, "Still busy after 1s; "
+					"proceeding with reset anyway\n");
+	}
+	pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
+	mdelay(100);
+
+	pci_unblock_user_cfg_access(dev);
+	return 0;
+}
+
+static int __pci_reset_function(struct pci_dev *pdev, int probe)
+{
+	int res;
+
+	res = __pcie_flr(pdev, probe);
+	if (res != -ENOTTY)
+		return res;
+
+	res = __pci_af_flr(pdev, probe);
+	if (res != -ENOTTY)
+		return res;
+
+	return res;
+}
+
+/**
+ * pci_execute_reset_function() - Reset a PCI device function
+ * @dev: Device function to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device.  The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * The device function is presumed to be unused when this function is called.
+ * Resetting the device will make the contents of PCI configuration space
+ * random, so any caller of this must be prepared to reinitialise the
+ * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
+ * etc.
+ *
+ * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * device doesn't support resetting a single function.
+ */
+int pci_execute_reset_function(struct pci_dev *dev)
+{
+	return __pci_reset_function(dev, 0);
+}
 EXPORT_SYMBOL_GPL(pci_execute_reset_function);
 
 /**
@@ -1822,15 +2085,10 @@
  */
 int pci_reset_function(struct pci_dev *dev)
 {
-	u32 cap;
-	int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
-	int r;
+	int r = __pci_reset_function(dev, 1);
 
-	if (!exppos)
-		return -ENOTTY;
-	pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
-	if (!(cap & PCI_EXP_DEVCAP_FLR))
-		return -ENOTTY;
+	if (r < 0)
+		return r;
 
 	if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
 		disable_irq(dev->irq);
@@ -2022,6 +2280,28 @@
 	return bars;
 }
 
+/**
+ * pci_resource_bar - get position of the BAR associated with a resource
+ * @dev: the PCI device
+ * @resno: the resource number
+ * @type: the BAR type to be filled in
+ *
+ * Returns BAR position in config space, or 0 if the BAR is invalid.
+ */
+int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
+{
+	if (resno < PCI_ROM_RESOURCE) {
+		*type = pci_bar_unknown;
+		return PCI_BASE_ADDRESS_0 + 4 * resno;
+	} else if (resno == PCI_ROM_RESOURCE) {
+		*type = pci_bar_mem32;
+		return dev->rom_base_reg;
+	}
+
+	dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
+	return 0;
+}
+
 static void __devinit pci_no_domains(void)
 {
 #ifdef CONFIG_PCI_DOMAINS
@@ -2029,6 +2309,19 @@
 #endif
 }
 
+/**
+ * pci_ext_cfg_enabled - can we access extended PCI config space?
+ * @dev: The PCI device of the root bridge.
+ *
+ * Returns 1 if we can access PCI extended config space (offsets
+ * greater than 0xff). This is the default implementation. Architecture
+ * implementations can override this.
+ */
+int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
+{
+	return 1;
+}
+
 static int __devinit pci_init(void)
 {
 	struct pci_dev *dev = NULL;
@@ -2037,8 +2330,6 @@
 		pci_fixup_device(pci_fixup_final, dev);
 	}
 
-	msi_init();
-
 	return 0;
 }
 
@@ -2083,11 +2374,15 @@
 EXPORT_SYMBOL(pci_bus_find_capability);
 EXPORT_SYMBOL(pci_release_regions);
 EXPORT_SYMBOL(pci_request_regions);
+EXPORT_SYMBOL(pci_request_regions_exclusive);
 EXPORT_SYMBOL(pci_release_region);
 EXPORT_SYMBOL(pci_request_region);
+EXPORT_SYMBOL(pci_request_region_exclusive);
 EXPORT_SYMBOL(pci_release_selected_regions);
 EXPORT_SYMBOL(pci_request_selected_regions);
+EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
 EXPORT_SYMBOL(pci_set_master);
+EXPORT_SYMBOL(pci_clear_master);
 EXPORT_SYMBOL(pci_set_mwi);
 EXPORT_SYMBOL(pci_try_set_mwi);
 EXPORT_SYMBOL(pci_clear_mwi);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9de87e9..1351bb4 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -10,6 +10,10 @@
 extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
 extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
 extern void pci_cleanup_rom(struct pci_dev *dev);
+#ifdef HAVE_PCI_MMAP
+extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
+			 struct vm_area_struct *vma);
+#endif
 
 /**
  * Firmware PM callbacks
@@ -40,7 +44,11 @@
 };
 
 extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
+extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+extern void pci_disable_enabled_device(struct pci_dev *dev);
 extern void pci_pm_init(struct pci_dev *dev);
+extern void platform_pci_wakeup_init(struct pci_dev *dev);
+extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
 
 extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
 extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
@@ -50,14 +58,14 @@
 extern int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
 
 struct pci_vpd_ops {
-	int (*read)(struct pci_dev *dev, int pos, int size, char *buf);
-	int (*write)(struct pci_dev *dev, int pos, int size, const char *buf);
+	ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
+	ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
 	void (*release)(struct pci_dev *dev);
 };
 
 struct pci_vpd {
 	unsigned int len;
-	struct pci_vpd_ops *ops;
+	const struct pci_vpd_ops *ops;
 	struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
 };
 
@@ -98,11 +106,9 @@
 #ifdef CONFIG_PCI_MSI
 void pci_no_msi(void);
 extern void pci_msi_init_pci_dev(struct pci_dev *dev);
-extern void __devinit msi_init(void);
 #else
 static inline void pci_no_msi(void) { }
 static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
-static inline void msi_init(void) { }
 #endif
 
 #ifdef CONFIG_PCIEAER
@@ -159,16 +165,28 @@
 };
 #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr)
 
+enum pci_bar_type {
+	pci_bar_unknown,	/* Standard PCI BAR probe */
+	pci_bar_io,		/* An io port BAR */
+	pci_bar_mem32,		/* A 32-bit memory BAR */
+	pci_bar_mem64,		/* A 64-bit memory BAR */
+};
+
+extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+				struct resource *res, unsigned int reg);
+extern int pci_resource_bar(struct pci_dev *dev, int resno,
+			    enum pci_bar_type *type);
+extern int pci_bus_add_child(struct pci_bus *bus);
 extern void pci_enable_ari(struct pci_dev *dev);
 /**
  * pci_ari_enabled - query ARI forwarding status
- * @dev: the PCI device
+ * @bus: the PCI bus
  *
  * Returns 1 if ARI forwarding is enabled, or 0 if not enabled;
  */
-static inline int pci_ari_enabled(struct pci_dev *dev)
+static inline int pci_ari_enabled(struct pci_bus *bus)
 {
-	return dev->ari_enabled;
+	return bus->self && bus->self->ari_enabled;
 }
 
 #endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 6dd7b13..ebce26c 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -38,7 +38,6 @@
 
 	handle = acpi_find_root_bridge_handle(pdev);
 	if (handle) {
-		pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT);
 		status = pci_osc_control_set(handle,
 					OSC_PCI_EXPRESS_AER_CONTROL |
 					OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 3933d4f3..0fc29ae 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -233,7 +233,7 @@
 
 		if (info->flags & AER_TLP_HEADER_VALID_FLAG) {
 			unsigned char *tlp = (unsigned char *) &info->tlp;
-			printk("%sTLB Header:\n", loglevel);
+			printk("%sTLP Header:\n", loglevel);
 			printk("%s%02x%02x%02x%02x %02x%02x%02x%02x"
 				" %02x%02x%02x%02x %02x%02x%02x%02x\n",
 				loglevel,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 9aad608..586b6f7 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/jiffies.h>
+#include <linux/delay.h>
 #include <linux/pci-aspm.h>
 #include "../pci.h"
 
@@ -33,6 +34,11 @@
 struct pcie_link_state {
 	struct list_head sibiling;
 	struct pci_dev *pdev;
+	bool downstream_has_switch;
+
+	struct pcie_link_state *parent;
+	struct list_head children;
+	struct list_head link;
 
 	/* ASPM state */
 	unsigned int support_state;
@@ -70,6 +76,8 @@
 	[POLICY_POWERSAVE] = "powersave"
 };
 
+#define LINK_RETRAIN_TIMEOUT HZ
+
 static int policy_to_aspm_state(struct pci_dev *pdev)
 {
 	struct pcie_link_state *link_state = pdev->link_state;
@@ -125,7 +133,7 @@
 	link_state->clk_pm_enabled = !!enable;
 }
 
-static void pcie_check_clock_pm(struct pci_dev *pdev)
+static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist)
 {
 	int pos;
 	u32 reg32;
@@ -149,10 +157,26 @@
 		if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
 			enabled = 0;
 	}
-	link_state->clk_pm_capable = capable;
 	link_state->clk_pm_enabled = enabled;
 	link_state->bios_clk_state = enabled;
-	pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
+	if (!blacklist) {
+		link_state->clk_pm_capable = capable;
+		pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
+	} else {
+		link_state->clk_pm_capable = 0;
+		pcie_set_clock_pm(pdev, 0);
+	}
+}
+
+static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
+{
+	struct pci_dev *child_dev;
+
+	list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
+		if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM)
+			return true;
+	}
+	return false;
 }
 
 /*
@@ -217,16 +241,18 @@
 	pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
 
 	/* Wait for link training end */
-	/* break out after waiting for 1 second */
+	/* break out after waiting for timeout */
 	start_jiffies = jiffies;
-	while ((jiffies - start_jiffies) < HZ) {
+	for (;;) {
 		pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
 		if (!(reg16 & PCI_EXP_LNKSTA_LT))
 			break;
-		cpu_relax();
+		if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
+			break;
+		msleep(1);
 	}
 	/* training failed -> recover */
-	if ((jiffies - start_jiffies) >= HZ) {
+	if (reg16 & PCI_EXP_LNKSTA_LT) {
 		dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
 			    " common clock\n");
 		i = 0;
@@ -419,9 +445,9 @@
 {
 	struct pci_dev *child_dev;
 
-	/* If no child, disable the link */
+	/* If no child, ignore the link */
 	if (list_empty(&pdev->subordinate->devices))
-		return 0;
+		return state;
 	list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
 		if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
 			/*
@@ -462,6 +488,9 @@
 	int valid = 1;
 	struct pcie_link_state *link_state = pdev->link_state;
 
+	/* If no child, disable the link */
+	if (list_empty(&pdev->subordinate->devices))
+		state = 0;
 	/*
 	 * if the downstream component has pci bridge function, don't do ASPM
 	 * now
@@ -493,20 +522,52 @@
 	link_state->enabled_state = state;
 }
 
+static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link)
+{
+	struct pcie_link_state *root_port_link = link;
+	while (root_port_link->parent)
+		root_port_link = root_port_link->parent;
+	return root_port_link;
+}
+
+/* check the whole hierarchy, and configure each link in the hierarchy */
 static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
 	unsigned int state)
 {
 	struct pcie_link_state *link_state = pdev->link_state;
+	struct pcie_link_state *root_port_link = get_root_port_link(link_state);
+	struct pcie_link_state *leaf;
 
-	if (link_state->support_state == 0)
-		return;
 	state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
 
-	/* state 0 means disabling aspm */
-	state = pcie_aspm_check_state(pdev, state);
+	/* check all links who have specific root port link */
+	list_for_each_entry(leaf, &link_list, sibiling) {
+		if (!list_empty(&leaf->children) ||
+			get_root_port_link(leaf) != root_port_link)
+			continue;
+		state = pcie_aspm_check_state(leaf->pdev, state);
+	}
+	/* check root port link too in case it hasn't children */
+	state = pcie_aspm_check_state(root_port_link->pdev, state);
+
 	if (link_state->enabled_state == state)
 		return;
-	__pcie_aspm_config_link(pdev, state);
+
+	/*
+	 * we must change the hierarchy. See comments in
+	 * __pcie_aspm_config_link for the order
+	 **/
+	if (state & PCIE_LINK_STATE_L1) {
+		list_for_each_entry(leaf, &link_list, sibiling) {
+			if (get_root_port_link(leaf) == root_port_link)
+				__pcie_aspm_config_link(leaf->pdev, state);
+		}
+	} else {
+		list_for_each_entry_reverse(leaf, &link_list, sibiling) {
+			if (get_root_port_link(leaf) == root_port_link)
+				__pcie_aspm_config_link(leaf->pdev, state);
+		}
+	}
 }
 
 /*
@@ -570,6 +631,7 @@
 	unsigned int state;
 	struct pcie_link_state *link_state;
 	int error = 0;
+	int blacklist;
 
 	if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
 		return;
@@ -580,29 +642,58 @@
 	if (list_empty(&pdev->subordinate->devices))
 		goto out;
 
-	if (pcie_aspm_sanity_check(pdev))
-		goto out;
+	blacklist = !!pcie_aspm_sanity_check(pdev);
 
 	mutex_lock(&aspm_lock);
 
 	link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
 	if (!link_state)
 		goto unlock_out;
+
+	link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev);
+	INIT_LIST_HEAD(&link_state->children);
+	INIT_LIST_HEAD(&link_state->link);
+	if (pdev->bus->self) {/* this is a switch */
+		struct pcie_link_state *parent_link_state;
+
+		parent_link_state = pdev->bus->parent->self->link_state;
+		if (!parent_link_state) {
+			kfree(link_state);
+			goto unlock_out;
+		}
+		list_add(&link_state->link, &parent_link_state->children);
+		link_state->parent = parent_link_state;
+	}
+
 	pdev->link_state = link_state;
 
-	pcie_aspm_configure_common_clock(pdev);
-
-	pcie_aspm_cap_init(pdev);
-
-	/* config link state to avoid BIOS error */
-	state = pcie_aspm_check_state(pdev, policy_to_aspm_state(pdev));
-	__pcie_aspm_config_link(pdev, state);
-
-	pcie_check_clock_pm(pdev);
+	if (!blacklist) {
+		pcie_aspm_configure_common_clock(pdev);
+		pcie_aspm_cap_init(pdev);
+	} else {
+		link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
+		link_state->bios_aspm_state = 0;
+		/* Set support state to 0, so we will disable ASPM later */
+		link_state->support_state = 0;
+	}
 
 	link_state->pdev = pdev;
 	list_add(&link_state->sibiling, &link_list);
 
+	if (link_state->downstream_has_switch) {
+		/*
+		 * If link has switch, delay the link config. The leaf link
+		 * initialization will config the whole hierarchy. but we must
+		 * make sure BIOS doesn't set unsupported link state
+		 **/
+		state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
+		__pcie_aspm_config_link(pdev, state);
+	} else
+		__pcie_aspm_configure_link_state(pdev,
+			policy_to_aspm_state(pdev));
+
+	pcie_check_clock_pm(pdev, blacklist);
+
 unlock_out:
 	if (error)
 		free_link_state(pdev);
@@ -635,6 +726,7 @@
 	/* All functions are removed, so just disable ASPM for the link */
 	__pcie_aspm_config_one_dev(parent, 0);
 	list_del(&link_state->sibiling);
+	list_del(&link_state->link);
 	/* Clock PM is for endpoint device */
 
 	free_link_state(parent);
@@ -857,24 +949,15 @@
 		aspm_disabled = 1;
 }
 
-#ifdef CONFIG_ACPI
-#include <acpi/acpi_bus.h>
-#include <linux/pci-acpi.h>
-static void pcie_aspm_platform_init(void)
+/**
+ * pcie_aspm_enabled - is PCIe ASPM enabled?
+ *
+ * Returns true if ASPM has not been disabled by the command-line option
+ * pcie_aspm=off.
+ **/
+int pcie_aspm_enabled(void)
 {
-	pcie_osc_support_set(OSC_ACTIVE_STATE_PWR_SUPPORT|
-		OSC_CLOCK_PWR_CAPABILITY_SUPPORT);
+       return !aspm_disabled;
 }
-#else
-static inline void pcie_aspm_platform_init(void) { }
-#endif
+EXPORT_SYMBOL(pcie_aspm_enabled);
 
-static int __init pcie_aspm_init(void)
-{
-	if (aspm_disabled)
-		return 0;
-	pcie_aspm_platform_init();
-	return 0;
-}
-
-fs_initcall(pcie_aspm_init);
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index 359fe55..eec89b7 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -16,14 +16,10 @@
 #include "portdrv.h"
 
 static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
-static int pcie_port_bus_suspend(struct device *dev, pm_message_t state);
-static int pcie_port_bus_resume(struct device *dev);
 
 struct bus_type pcie_port_bus_type = {
 	.name 		= "pci_express",
 	.match 		= pcie_port_bus_match,
-	.suspend	= pcie_port_bus_suspend,
-	.resume		= pcie_port_bus_resume, 
 };
 EXPORT_SYMBOL_GPL(pcie_port_bus_type);
 
@@ -49,32 +45,12 @@
 	return 1;
 }
 
-static int pcie_port_bus_suspend(struct device *dev, pm_message_t state)
+int pcie_port_bus_register(void)
 {
-	struct pcie_device *pciedev;
-	struct pcie_port_service_driver *driver;
-
-	if (!dev || !dev->driver)
-		return 0;
-
-	pciedev = to_pcie_device(dev);
- 	driver = to_service_driver(dev->driver);
-	if (driver && driver->suspend)
-		driver->suspend(pciedev, state);
-	return 0;
+	return bus_register(&pcie_port_bus_type);
 }
 
-static int pcie_port_bus_resume(struct device *dev)
+void pcie_port_bus_unregister(void)
 {
-	struct pcie_device *pciedev;
-	struct pcie_port_service_driver *driver;
-
-	if (!dev || !dev->driver)
-		return 0;
-
-	pciedev = to_pcie_device(dev);
- 	driver = to_service_driver(dev->driver);
-	if (driver && driver->resume)
-		driver->resume(pciedev);
-	return 0;
+	bus_unregister(&pcie_port_bus_type);
 }
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 2e091e0..8b3f8c1 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -19,91 +19,15 @@
 
 extern int pcie_mch_quirk;	/* MSI-quirk Indicator */
 
-static int pcie_port_probe_service(struct device *dev)
-{
-	struct pcie_device *pciedev;
-	struct pcie_port_service_driver *driver;
-	int status;
-
-	if (!dev || !dev->driver)
-		return -ENODEV;
-
- 	driver = to_service_driver(dev->driver);
-	if (!driver || !driver->probe)
-		return -ENODEV;
-
-	pciedev = to_pcie_device(dev);
-	status = driver->probe(pciedev, driver->id_table);
-	if (!status) {
-		dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
-			driver->name);
-		get_device(dev);
-	}
-	return status;
-}
-
-static int pcie_port_remove_service(struct device *dev)
-{
-	struct pcie_device *pciedev;
-	struct pcie_port_service_driver *driver;
-
-	if (!dev || !dev->driver)
-		return 0;
-
-	pciedev = to_pcie_device(dev);
- 	driver = to_service_driver(dev->driver);
-	if (driver && driver->remove) { 
-		dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
-			driver->name);
-		driver->remove(pciedev);
-		put_device(dev);
-	}
-	return 0;
-}
-
-static void pcie_port_shutdown_service(struct device *dev) {}
-
-static int pcie_port_suspend_service(struct device *dev, pm_message_t state)
-{
-	struct pcie_device *pciedev;
-	struct pcie_port_service_driver *driver;
-
-	if (!dev || !dev->driver)
-		return 0;
-
-	pciedev = to_pcie_device(dev);
- 	driver = to_service_driver(dev->driver);
-	if (driver && driver->suspend)
-		driver->suspend(pciedev, state);
-	return 0;
-}
-
-static int pcie_port_resume_service(struct device *dev)
-{
-	struct pcie_device *pciedev;
-	struct pcie_port_service_driver *driver;
-
-	if (!dev || !dev->driver)
-		return 0;
-
-	pciedev = to_pcie_device(dev);
- 	driver = to_service_driver(dev->driver);
-
-	if (driver && driver->resume)
-		driver->resume(pciedev);
-	return 0;
-}
-
-/*
- * release_pcie_device
- *	
- *	Being invoked automatically when device is being removed 
- *	in response to device_unregister(dev) call.
- *	Release all resources being claimed.
+/**
+ * release_pcie_device - free PCI Express port service device structure
+ * @dev: Port service device to release
+ *
+ * Invoked automatically when device is being removed in response to
+ * device_unregister(dev).  Release all resources being claimed.
  */
 static void release_pcie_device(struct device *dev)
 {
-	dev_printk(KERN_DEBUG, dev, "free port service\n");
 	kfree(to_pcie_device(dev));			
 }
 
@@ -128,7 +52,16 @@
 	}
 	return quirk;
 }
-	
+
+/**
+ * assign_interrupt_mode - choose interrupt mode for PCI Express port services
+ *                         (INTx, MSI-X, MSI) and set up vectors
+ * @dev: PCI Express port to handle
+ * @vectors: Array of interrupt vectors to populate
+ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
+ *
+ * Return value: Interrupt mode associated with the port
+ */
 static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
 {
 	int i, pos, nvec, status = -EINVAL;
@@ -150,7 +83,6 @@
 	if (pos) {
 		struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = 
 			{{0, 0}, {0, 1}, {0, 2}, {0, 3}};
-		dev_info(&dev->dev, "found MSI-X capability\n");
 		status = pci_enable_msix(dev, msix_entries, nvec);
 		if (!status) {
 			int j = 0;
@@ -165,7 +97,6 @@
 	if (status) {
 		pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
 		if (pos) {
-			dev_info(&dev->dev, "found MSI capability\n");
 			status = pci_enable_msi(dev);
 			if (!status) {
 				interrupt_mode = PCIE_PORT_MSI_MODE;
@@ -177,6 +108,16 @@
 	return interrupt_mode;
 }
 
+/**
+ * get_port_device_capability - discover capabilities of a PCI Express port
+ * @dev: PCI Express port to examine
+ *
+ * The capabilities are read from the port's PCI Express configuration registers
+ * as described in PCI Express Base Specification 1.0a sections 7.8.2, 7.8.9 and
+ * 7.9 - 7.11.
+ *
+ * Return value: Bitmask of discovered port capabilities
+ */
 static int get_port_device_capability(struct pci_dev *dev)
 {
 	int services = 0, pos;
@@ -204,6 +145,15 @@
 	return services;
 }
 
+/**
+ * pcie_device_init - initialize PCI Express port service device
+ * @dev: Port service device to initialize
+ * @parent: PCI Express port to associate the service device with
+ * @port_type: Type of the port
+ * @service_type: Type of service to associate with the service device
+ * @irq: Interrupt vector to associate with the service device
+ * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
+ */
 static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, 
 	int port_type, int service_type, int irq, int irq_mode)
 {
@@ -224,11 +174,19 @@
 	device->driver = NULL;
 	device->driver_data = NULL;
 	device->release = release_pcie_device;	/* callback to free pcie dev */
-	snprintf(device->bus_id, sizeof(device->bus_id), "%s:pcie%02x",
+	dev_set_name(device, "%s:pcie%02x",
 		 pci_name(parent), get_descriptor_id(port_type, service_type));
 	device->parent = &parent->dev;
 }
 
+/**
+ * alloc_pcie_device - allocate PCI Express port service device structure
+ * @parent: PCI Express port to associate the service device with
+ * @port_type: Type of the port
+ * @service_type: Type of service to associate with the service device
+ * @irq: Interrupt vector to associate with the service device
+ * @irq_mode: Interrupt mode of the service (INTx, MSI-X, MSI)
+ */
 static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
 	int port_type, int service_type, int irq, int irq_mode)
 {
@@ -239,10 +197,13 @@
 		return NULL;
 
 	pcie_device_init(parent, device, port_type, service_type, irq,irq_mode);
-	dev_printk(KERN_DEBUG, &device->device, "allocate port service\n");
 	return device;
 }
 
+/**
+ * pcie_port_device_probe - check if device is a PCI Express port
+ * @dev: Device to check
+ */
 int pcie_port_device_probe(struct pci_dev *dev)
 {
 	int pos, type;
@@ -260,6 +221,13 @@
 	return -ENODEV;
 }
 
+/**
+ * pcie_port_device_register - register PCI Express port
+ * @dev: PCI Express port to register
+ *
+ * Allocate the port extension structure and register services associated with
+ * the port.
+ */
 int pcie_port_device_register(struct pci_dev *dev)
 {
 	struct pcie_port_device_ext *p_ext;
@@ -323,6 +291,11 @@
 	return 0;
 }
 
+/**
+ * pcie_port_device_suspend - suspend port services associated with a PCIe port
+ * @dev: PCI Express port to handle
+ * @state: Representation of system power management transition in progress
+ */
 int pcie_port_device_suspend(struct pci_dev *dev, pm_message_t state)
 {
 	return device_for_each_child(&dev->dev, &state, suspend_iter);
@@ -341,6 +314,10 @@
 	return 0;
 }
 
+/**
+ * pcie_port_device_suspend - resume port services associated with a PCIe port
+ * @dev: PCI Express port to handle
+ */
 int pcie_port_device_resume(struct pci_dev *dev)
 {
 	return device_for_each_child(&dev->dev, NULL, resume_iter);
@@ -363,6 +340,13 @@
 	return 0;
 }
 
+/**
+ * pcie_port_device_remove - unregister PCI Express port service devices
+ * @dev: PCI Express port the service devices to unregister are associated with
+ *
+ * Remove PCI Express port service devices associated with given port and
+ * disable MSI-X or MSI for the port.
+ */
 void pcie_port_device_remove(struct pci_dev *dev)
 {
 	struct device *device;
@@ -386,16 +370,80 @@
 		pci_disable_msi(dev);
 }
 
-int pcie_port_bus_register(void)
+/**
+ * pcie_port_probe_service - probe driver for given PCI Express port service
+ * @dev: PCI Express port service device to probe against
+ *
+ * If PCI Express port service driver is registered with
+ * pcie_port_service_register(), this function will be called by the driver core
+ * whenever match is found between the driver and a port service device.
+ */
+static int pcie_port_probe_service(struct device *dev)
 {
-	return bus_register(&pcie_port_bus_type);
+	struct pcie_device *pciedev;
+	struct pcie_port_service_driver *driver;
+	int status;
+
+	if (!dev || !dev->driver)
+		return -ENODEV;
+
+	driver = to_service_driver(dev->driver);
+	if (!driver || !driver->probe)
+		return -ENODEV;
+
+	pciedev = to_pcie_device(dev);
+	status = driver->probe(pciedev, driver->id_table);
+	if (!status) {
+		dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
+			driver->name);
+		get_device(dev);
+	}
+	return status;
 }
 
-void pcie_port_bus_unregister(void)
+/**
+ * pcie_port_remove_service - detach driver from given PCI Express port service
+ * @dev: PCI Express port service device to handle
+ *
+ * If PCI Express port service driver is registered with
+ * pcie_port_service_register(), this function will be called by the driver core
+ * when device_unregister() is called for the port service device associated
+ * with the driver.
+ */
+static int pcie_port_remove_service(struct device *dev)
 {
-	bus_unregister(&pcie_port_bus_type);
+	struct pcie_device *pciedev;
+	struct pcie_port_service_driver *driver;
+
+	if (!dev || !dev->driver)
+		return 0;
+
+	pciedev = to_pcie_device(dev);
+	driver = to_service_driver(dev->driver);
+	if (driver && driver->remove) {
+		dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
+			driver->name);
+		driver->remove(pciedev);
+		put_device(dev);
+	}
+	return 0;
 }
 
+/**
+ * pcie_port_shutdown_service - shut down given PCI Express port service
+ * @dev: PCI Express port service device to handle
+ *
+ * If PCI Express port service driver is registered with
+ * pcie_port_service_register(), this function will be called by the driver core
+ * when device_shutdown() is called for the port service device associated
+ * with the driver.
+ */
+static void pcie_port_shutdown_service(struct device *dev) {}
+
+/**
+ * pcie_port_service_register - register PCI Express port service driver
+ * @new: PCI Express port service driver to register
+ */
 int pcie_port_service_register(struct pcie_port_service_driver *new)
 {
 	new->driver.name = (char *)new->name;
@@ -403,15 +451,17 @@
 	new->driver.probe = pcie_port_probe_service;
 	new->driver.remove = pcie_port_remove_service;
 	new->driver.shutdown = pcie_port_shutdown_service;
-	new->driver.suspend = pcie_port_suspend_service;
-	new->driver.resume = pcie_port_resume_service;
 
 	return driver_register(&new->driver);
 }
 
-void pcie_port_service_unregister(struct pcie_port_service_driver *new)
+/**
+ * pcie_port_service_unregister - unregister PCI Express port service driver
+ * @drv: PCI Express port service driver to unregister
+ */
+void pcie_port_service_unregister(struct pcie_port_service_driver *drv)
 {
-	driver_unregister(&new->driver);
+	driver_unregister(&drv->driver);
 }
 
 EXPORT_SYMBOL(pcie_port_service_register);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 584422d..99a914a 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -41,7 +41,6 @@
 {
 	int retval;
 
-	pci_restore_state(dev);
 	retval = pci_enable_device(dev);
 	if (retval)
 		return retval;
@@ -52,11 +51,18 @@
 #ifdef CONFIG_PM
 static int pcie_portdrv_suspend(struct pci_dev *dev, pm_message_t state)
 {
-	int ret = pcie_port_device_suspend(dev, state);
+	return pcie_port_device_suspend(dev, state);
 
-	if (!ret)
-		ret = pcie_portdrv_save_config(dev);
-	return ret;
+}
+
+static int pcie_portdrv_suspend_late(struct pci_dev *dev, pm_message_t state)
+{
+	return pci_save_state(dev);
+}
+
+static int pcie_portdrv_resume_early(struct pci_dev *dev)
+{
+	return pci_restore_state(dev);
 }
 
 static int pcie_portdrv_resume(struct pci_dev *dev)
@@ -66,6 +72,8 @@
 }
 #else
 #define pcie_portdrv_suspend NULL
+#define pcie_portdrv_suspend_late NULL
+#define pcie_portdrv_resume_early NULL
 #define pcie_portdrv_resume NULL
 #endif
 
@@ -221,6 +229,7 @@
 
 	/* If fatal, restore cfg space for possible link reset at upstream */
 	if (dev->error_state == pci_channel_io_frozen) {
+		pci_restore_state(dev);
 		pcie_portdrv_restore_config(dev);
 		pci_enable_pcie_error_reporting(dev);
 	}
@@ -283,6 +292,8 @@
 	.remove		= pcie_portdrv_remove,
 
 	.suspend	= pcie_portdrv_suspend,
+	.suspend_late	= pcie_portdrv_suspend_late,
+	.resume_early	= pcie_portdrv_resume_early,
 	.resume		= pcie_portdrv_resume,
 
 	.err_handler 	= &pcie_portdrv_err_handler,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 5b3f593..55ec44a 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -51,12 +51,12 @@
 					char *buf)
 {
 	int ret;
-	cpumask_t cpumask;
+	const struct cpumask *cpumask;
 
-	cpumask = pcibus_to_cpumask(to_pci_bus(dev));
+	cpumask = cpumask_of_pcibus(to_pci_bus(dev));
 	ret = type?
-		cpulist_scnprintf(buf, PAGE_SIZE-2, &cpumask) :
-		cpumask_scnprintf(buf, PAGE_SIZE-2, &cpumask);
+		cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) :
+		cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask);
 	buf[ret++] = '\n';
 	buf[ret] = '\0';
 	return ret;
@@ -135,13 +135,6 @@
 	return size;
 }
 
-enum pci_bar_type {
-	pci_bar_unknown,	/* Standard PCI BAR probe */
-	pci_bar_io,		/* An io port BAR */
-	pci_bar_mem32,		/* A 32-bit memory BAR */
-	pci_bar_mem64,		/* A 64-bit memory BAR */
-};
-
 static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
 {
 	if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
@@ -156,11 +149,16 @@
 	return pci_bar_mem32;
 }
 
-/*
- * If the type is not unknown, we assume that the lowest bit is 'enable'.
- * Returns 1 if the BAR was 64-bit and 0 if it was 32-bit.
+/**
+ * pci_read_base - read a PCI BAR
+ * @dev: the PCI device
+ * @type: type of the BAR
+ * @res: resource buffer to be filled in
+ * @pos: BAR position in the config space
+ *
+ * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
  */
-static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
 			struct resource *res, unsigned int pos)
 {
 	u32 l, sz, mask;
@@ -400,19 +398,17 @@
 	if (!child)
 		return NULL;
 
-	child->self = bridge;
 	child->parent = parent;
 	child->ops = parent->ops;
 	child->sysdata = parent->sysdata;
 	child->bus_flags = parent->bus_flags;
-	child->bridge = get_device(&bridge->dev);
 
 	/* initialize some portions of the bus device, but don't register it
 	 * now as the parent is not properly set up yet.  This device will get
 	 * registered later in pci_bus_add_devices()
 	 */
 	child->dev.class = &pcibus_class;
-	sprintf(child->dev.bus_id, "%04x:%02x", pci_domain_nr(child), busnr);
+	dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
 
 	/*
 	 * Set up the primary, secondary and subordinate
@@ -422,8 +418,14 @@
 	child->primary = parent->secondary;
 	child->subordinate = 0xff;
 
+	if (!bridge)
+		return child;
+
+	child->self = bridge;
+	child->bridge = get_device(&bridge->dev);
+
 	/* Set up default resource pointers and names.. */
-	for (i = 0; i < 4; i++) {
+	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
 		child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
 		child->resource[i]->name = child->name;
 	}
@@ -958,8 +960,12 @@
 	/* MSI/MSI-X list */
 	pci_msi_init_pci_dev(dev);
 
+	/* Buffers for saving PCIe and PCI-X capabilities */
+	pci_allocate_cap_save_buffers(dev);
+
 	/* Power Management */
 	pci_pm_init(dev);
+	platform_pci_wakeup_init(dev);
 
 	/* Vital Product Data */
 	pci_vpd_pci22_init(dev);
@@ -1130,7 +1136,7 @@
 	memset(dev, 0, sizeof(*dev));
 	dev->parent = parent;
 	dev->release = pci_release_bus_bridge_dev;
-	sprintf(dev->bus_id, "pci%04x:%02x", pci_domain_nr(b), bus);
+	dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
 	error = device_register(dev);
 	if (error)
 		goto dev_reg_err;
@@ -1141,7 +1147,7 @@
 
 	b->dev.class = &pcibus_class;
 	b->dev.parent = b->bridge;
-	sprintf(b->dev.bus_id, "%04x:%02x", pci_domain_nr(b), bus);
+	dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
 	error = device_register(&b->dev);
 	if (error)
 		goto class_dev_reg_err;
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index e1098c3..593bb84 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -252,11 +252,20 @@
 	const struct proc_dir_entry *dp = PDE(inode);
 	struct pci_dev *dev = dp->data;
 	struct pci_filp_private *fpriv = file->private_data;
-	int ret;
+	int i, ret;
 
 	if (!capable(CAP_SYS_RAWIO))
 		return -EPERM;
 
+	/* Make sure the caller is mapping a real resource for this device */
+	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+		if (pci_mmap_fits(dev, i, vma))
+			break;
+	}
+
+	if (i >= PCI_ROM_RESOURCE)
+		return -ENODEV;
+
 	ret = pci_mmap_page_range(dev, vma,
 				  fpriv->mmap_state,
 				  fpriv->write_combine);
@@ -352,15 +361,16 @@
 			dev->vendor,
 			dev->device,
 			dev->irq);
-	/* Here should be 7 and not PCI_NUM_RESOURCES as we need to preserve compatibility */
-	for (i=0; i<7; i++) {
+
+	/* only print standard and ROM resources to preserve compatibility */
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
 		resource_size_t start, end;
 		pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
 		seq_printf(m, "\t%16llx",
 			(unsigned long long)(start |
 			(dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
 	}
-	for (i=0; i<7; i++) {
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
 		resource_size_t start, end;
 		pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
 		seq_printf(m, "\t%16llx",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ce09856..baad093 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -56,7 +56,7 @@
 	while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
 		pci_read_config_byte(d, 0x82, &dlc);
 		if (!(dlc & 1<<1)) {
-			dev_err(&d->dev, "PIIX3: Enabling Passive Release\n");
+			dev_info(&d->dev, "PIIX3: Enabling Passive Release\n");
 			dlc |= 1<<1;
 			pci_write_config_byte(d, 0x82, dlc);
 		}
@@ -449,7 +449,7 @@
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801EB_0,		quirk_ich4_lpc_acpi);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_ESB_1,		quirk_ich4_lpc_acpi);
 
-static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev)
+static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev)
 {
 	u32 region;
 
@@ -459,20 +459,95 @@
 	pci_read_config_dword(dev, 0x48, &region);
 	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
 }
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich6_lpc_acpi);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich6_lpc_acpi);
+
+static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
+{
+	u32 val;
+	u32 size, base;
+
+	pci_read_config_dword(dev, reg, &val);
+
+	/* Enabled? */
+	if (!(val & 1))
+		return;
+	base = val & 0xfffc;
+	if (dynsize) {
+		/*
+		 * This is not correct. It is 16, 32 or 64 bytes depending on
+		 * register D31:F0:ADh bits 5:4.
+		 *
+		 * But this gets us at least _part_ of it.
+		 */
+		size = 16;
+	} else {
+		size = 128;
+	}
+	base &= ~(size-1);
+
+	/* Just print it out for now. We should reserve it after more debugging */
+	dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
+}
+
+static void __devinit quirk_ich6_lpc(struct pci_dev *dev)
+{
+	/* Shared ACPI/GPIO decode with all ICH6+ */
+	ich6_lpc_acpi_gpio(dev);
+
+	/* ICH6-specific generic IO decode */
+	ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
+	ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
+
+static void __devinit ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
+{
+	u32 val;
+	u32 mask, base;
+
+	pci_read_config_dword(dev, reg, &val);
+
+	/* Enabled? */
+	if (!(val & 1))
+		return;
+
+	/*
+	 * IO base in bits 15:2, mask in bits 23:18, both
+	 * are dword-based
+	 */
+	base = val & 0xfffc;
+	mask = (val >> 16) & 0xfc;
+	mask |= 3;
+
+	/* Just print it out for now. We should reserve it after more debugging */
+	dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
+}
+
+/* ICH7-10 has the same common LPC generic IO decode registers */
+static void __devinit quirk_ich7_lpc(struct pci_dev *dev)
+{
+	/* We share the common ACPI/DPIO decode with ICH6 */
+	ich6_lpc_acpi_gpio(dev);
+
+	/* And have 4 ICH7+ generic decodes */
+	ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
+	ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
+	ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
+	ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
 
 /*
  * VIA ACPI: One IO region pointed to by longword at
@@ -2074,11 +2149,12 @@
 
 #endif /* CONFIG_PCI_MSI */
 
-static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
+static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+			  struct pci_fixup *end)
 {
 	while (f < end) {
 		if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
- 		    (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
+		    (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
 			dev_dbg(&dev->dev, "calling %pF\n", f->hook);
 			f->hook(dev);
 		}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index ea979f2..7046089 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -536,9 +536,8 @@
                 if (!res)
                         continue;
 
-		printk(KERN_INFO "bus: %02x index %x %s: %pR\n",
-		       bus->number, i,
-		       (res->flags & IORESOURCE_IO) ? "io port" : "mmio", res);
+		dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i,
+			   (res->flags & IORESOURCE_IO) ? "io: " : "mem:", res);
         }
 }
 
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 2dbd96c..32e8d88 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -26,11 +26,13 @@
 #include "pci.h"
 
 
-void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
+void pci_update_resource(struct pci_dev *dev, int resno)
 {
 	struct pci_bus_region region;
 	u32 new, check, mask;
 	int reg;
+	enum pci_bar_type type;
+	struct resource *res = dev->resource + resno;
 
 	/*
 	 * Ignore resources for unimplemented BARs and unused resource slots
@@ -61,17 +63,13 @@
 	else
 		mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
 
-	if (resno < 6) {
-		reg = PCI_BASE_ADDRESS_0 + 4 * resno;
-	} else if (resno == PCI_ROM_RESOURCE) {
+	reg = pci_resource_bar(dev, resno, &type);
+	if (!reg)
+		return;
+	if (type != pci_bar_unknown) {
 		if (!(res->flags & IORESOURCE_ROM_ENABLE))
 			return;
 		new |= PCI_ROM_ADDRESS_ENABLE;
-		reg = dev->rom_base_reg;
-	} else {
-		/* Hmm, non-standard resource. */
-	
-		return;		/* kill uninitialised var warning */
 	}
 
 	pci_write_config_dword(dev, reg, new);
@@ -134,7 +132,7 @@
 
 	align = resource_alignment(res);
 	if (!align) {
-		dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus "
+		dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
 			"alignment) %pR flags %#lx\n",
 			resno, res, res->flags);
 		return -EINVAL;
@@ -157,12 +155,12 @@
 	}
 
 	if (ret) {
-		dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
+		dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
 			resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
 	} else {
 		res->flags &= ~IORESOURCE_STARTALIGN;
 		if (resno < PCI_BRIDGE_RESOURCES)
-			pci_update_resource(dev, res, resno);
+			pci_update_resource(dev, resno);
 	}
 
 	return ret;
@@ -197,7 +195,7 @@
 		dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
 			resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
 	} else if (resno < PCI_BRIDGE_RESOURCES) {
-		pci_update_resource(dev, res, resno);
+		pci_update_resource(dev, resno);
 	}
 
 	return ret;
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 645d7a6..ec22284 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -14,10 +14,8 @@
 #include <asm/uaccess.h>
 #include "pci.h"
 
-asmlinkage long
-sys_pciconfig_read(unsigned long bus, unsigned long dfn,
-		   unsigned long off, unsigned long len,
-		   void __user *buf)
+SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
+		unsigned long, off, unsigned long, len, void __user *, buf)
 {
 	struct pci_dev *dev;
 	u8 byte;
@@ -86,10 +84,8 @@
 	return err;
 }
 
-asmlinkage long
-sys_pciconfig_write(unsigned long bus, unsigned long dfn,
-		    unsigned long off, unsigned long len,
-		    void __user *buf)
+SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+		unsigned long, off, unsigned long, len, void __user *, buf)
 {
 	struct pci_dev *dev;
 	u8 byte;
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index a34284b1..d187ba4 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -297,7 +297,7 @@
 		goto fail3;
 	}
 
-	dev_info(device, "at mem 0x%lx io 0x%lx irq %d\n",
+	dev_info(device, "at mem 0x%lx io 0x%llx irq %d\n",
 		 cf->mem_phys, io.start, cf->irq);
 
 	cf->active = 1;
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
new file mode 100644
index 0000000..9652c3f
--- /dev/null
+++ b/drivers/platform/Kconfig
@@ -0,0 +1,5 @@
+# drivers/platform/Kconfig
+
+if X86
+source "drivers/platform/x86/Kconfig"
+endif
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
new file mode 100644
index 0000000..782953a
--- /dev/null
+++ b/drivers/platform/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for linux/drivers/platform
+#
+
+obj-$(CONFIG_X86)		+= x86/
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
new file mode 100644
index 0000000..1a266d4
--- /dev/null
+++ b/drivers/platform/x86/Kconfig
@@ -0,0 +1,398 @@
+#
+# X86 Platform Specific Drivers
+#
+
+menuconfig X86_PLATFORM_DEVICES
+	bool "X86 Platform Specific Device Drivers"
+	default y
+	---help---
+	  Say Y here to get to see options for device drivers for various
+	  x86 platforms, including vendor-specific laptop extension drivers.
+	  This option alone does not add any kernel code.
+
+	  If you say N, all options in this submenu will be skipped and disabled.
+
+if X86_PLATFORM_DEVICES
+
+config ACER_WMI
+	tristate "Acer WMI Laptop Extras (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	depends on ACPI
+	depends on LEDS_CLASS
+	depends on NEW_LEDS
+	depends on BACKLIGHT_CLASS_DEVICE
+	depends on SERIO_I8042
+	depends on RFKILL
+	select ACPI_WMI
+	---help---
+	  This is a driver for newer Acer (and Wistron) laptops. It adds
+	  wireless radio and bluetooth control, and on some laptops,
+	  exposes the mail LED and LCD backlight.
+
+	  For more information about this driver see
+	  <file:Documentation/laptops/acer-wmi.txt>
+
+	  If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
+	  here.
+
+config ASUS_LAPTOP
+	tristate "Asus Laptop Extras (EXPERIMENTAL)"
+	depends on ACPI
+	depends on EXPERIMENTAL && !ACPI_ASUS
+	depends on LEDS_CLASS
+	depends on NEW_LEDS
+	depends on BACKLIGHT_CLASS_DEVICE
+	---help---
+	  This is the new Linux driver for Asus laptops. It may also support some
+	  MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
+	  standard ACPI events that go through /proc/acpi/events. It also adds
+	  support for video output switching, LCD backlight control, Bluetooth and
+	  Wlan control, and most importantly, allows you to blink those fancy LEDs.
+
+	  For more information and a userspace daemon for handling the extra
+	  buttons see <http://acpi4asus.sf.net/>.
+
+	  If you have an ACPI-compatible ASUS laptop, say Y or M here.
+
+config DELL_LAPTOP
+	tristate "Dell Laptop Extras (EXPERIMENTAL)"
+	depends on X86
+	depends on DCDBAS
+	depends on EXPERIMENTAL
+	depends on BACKLIGHT_CLASS_DEVICE
+	depends on RFKILL
+	default n
+	---help---
+	This driver adds support for rfkill and backlight control to Dell
+	laptops.
+
+config FUJITSU_LAPTOP
+	tristate "Fujitsu Laptop Extras"
+	depends on ACPI
+	depends on INPUT
+	depends on BACKLIGHT_CLASS_DEVICE
+	---help---
+	  This is a driver for laptops built by Fujitsu:
+
+	    * P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks
+	    * Possibly other Fujitsu laptop models
+	    * Tested with S6410 and S7020
+
+	  It adds support for LCD brightness control and some hotkeys.
+
+	  If you have a Fujitsu laptop, say Y or M here.
+
+config FUJITSU_LAPTOP_DEBUG
+	bool "Verbose debug mode for Fujitsu Laptop Extras"
+	depends on FUJITSU_LAPTOP
+	default n
+	---help---
+	  Enables extra debug output from the fujitsu extras driver, at the
+	  expense of a slight increase in driver size.
+
+	  If you are not sure, say N here.
+
+config TC1100_WMI
+	tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)"
+	depends on !X86_64
+	depends on EXPERIMENTAL
+	depends on ACPI
+	select ACPI_WMI
+	---help---
+	  This is a driver for the WMI extensions (wireless and bluetooth power
+	  control) of the HP Compaq TC1100 tablet.
+
+config HP_WMI
+	tristate "HP WMI extras"
+	depends on ACPI_WMI
+	depends on INPUT
+	depends on RFKILL
+	help
+	 Say Y here if you want to support WMI-based hotkeys on HP laptops and
+	 to read data from WMI such as docking or ambient light sensor state.
+
+	 To compile this driver as a module, choose M here: the module will
+	 be called hp-wmi.
+
+config MSI_LAPTOP
+	tristate "MSI Laptop Extras"
+	depends on ACPI
+	depends on BACKLIGHT_CLASS_DEVICE
+	---help---
+	  This is a driver for laptops built by MSI (MICRO-STAR
+	  INTERNATIONAL):
+
+	  MSI MegaBook S270 (MS-1013)
+	  Cytron/TCM/Medion/Tchibo MD96100/SAM2000
+
+	  It adds support for Bluetooth, WLAN and LCD brightness control.
+
+	  More information about this driver is available at
+	  <http://0pointer.de/lennart/tchibo.html>.
+
+	  If you have an MSI S270 laptop, say Y or M here.
+
+config PANASONIC_LAPTOP
+	tristate "Panasonic Laptop Extras"
+	depends on INPUT && ACPI
+	depends on BACKLIGHT_CLASS_DEVICE
+	---help---
+	  This driver adds support for access to backlight control and hotkeys
+	  on Panasonic Let's Note laptops.
+
+	  If you have a Panasonic Let's note laptop (such as the R1(N variant),
+	  R2, R3, R5, T2, W2 and Y2 series), say Y.
+
+config COMPAL_LAPTOP
+	tristate "Compal Laptop Extras"
+	depends on ACPI
+	depends on BACKLIGHT_CLASS_DEVICE
+	---help---
+	  This is a driver for laptops built by Compal:
+
+	  Compal FL90/IFL90
+	  Compal FL91/IFL91
+	  Compal FL92/JFL92
+	  Compal FT00/IFT00
+
+	  It adds support for Bluetooth, WLAN and LCD brightness control.
+
+	  If you have an Compal FL9x/IFL9x/FT00 laptop, say Y or M here.
+
+config SONY_LAPTOP
+	tristate "Sony Laptop Extras"
+	depends on ACPI
+	select BACKLIGHT_CLASS_DEVICE
+	depends on INPUT
+	  ---help---
+	  This mini-driver drives the SNC and SPIC devices present in the ACPI
+	  BIOS of the Sony Vaio laptops.
+
+	  It gives access to some extra laptop functionalities like Bluetooth,
+	  screen brightness control, Fn keys and allows powering on/off some
+	  devices.
+
+	  Read <file:Documentation/laptops/sony-laptop.txt> for more information.
+
+config SONYPI_COMPAT
+	bool "Sonypi compatibility"
+	depends on SONY_LAPTOP
+	  ---help---
+	  Build the sonypi driver compatibility code into the sony-laptop driver.
+
+config THINKPAD_ACPI
+	tristate "ThinkPad ACPI Laptop Extras"
+	depends on ACPI
+	select BACKLIGHT_LCD_SUPPORT
+	select BACKLIGHT_CLASS_DEVICE
+	select HWMON
+	select NVRAM
+	select INPUT
+	select NEW_LEDS
+	select LEDS_CLASS
+	select NET
+	select RFKILL
+	---help---
+	  This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
+	  support for Fn-Fx key combinations, Bluetooth control, video
+	  output switching, ThinkLight control, UltraBay eject and more.
+	  For more information about this driver see
+	  <file:Documentation/laptops/thinkpad-acpi.txt> and
+	  <http://ibm-acpi.sf.net/> .
+
+	  This driver was formerly known as ibm-acpi.
+
+	  If you have an IBM or Lenovo ThinkPad laptop, say Y or M here.
+
+config THINKPAD_ACPI_DEBUGFACILITIES
+	bool "Maintainer debug facilities"
+	depends on THINKPAD_ACPI
+	default n
+	---help---
+	  Enables extra stuff in the thinkpad-acpi which is completely useless
+	  for normal use.  Read the driver source to find out what it does.
+
+	  Say N here, unless you were told by a kernel maintainer to do
+	  otherwise.
+
+config THINKPAD_ACPI_DEBUG
+	bool "Verbose debug mode"
+	depends on THINKPAD_ACPI
+	default n
+	---help---
+	  Enables extra debugging information, at the expense of a slightly
+	  increase in driver size.
+
+	  If you are not sure, say N here.
+
+config THINKPAD_ACPI_DOCK
+	bool "Legacy Docking Station Support"
+	depends on THINKPAD_ACPI
+	depends on ACPI_DOCK=n
+	default n
+	---help---
+	  Allows the thinkpad_acpi driver to handle docking station events.
+	  This support was made obsolete by the generic ACPI docking station
+	  support (CONFIG_ACPI_DOCK).  It will allow locking and removing the
+	  laptop from the docking station, but will not properly connect PCI
+	  devices.
+
+	  If you are not sure, say N here.
+
+config THINKPAD_ACPI_BAY
+	bool "Legacy Removable Bay Support"
+	depends on THINKPAD_ACPI
+	default y
+	---help---
+	  Allows the thinkpad_acpi driver to handle removable bays.  It will
+	  electrically disable the device in the bay, and also generate
+	  notifications when the bay lever is ejected or inserted.
+
+	  If you are not sure, say Y here.
+
+config THINKPAD_ACPI_VIDEO
+	bool "Video output control support"
+	depends on THINKPAD_ACPI
+	default y
+	---help---
+	  Allows the thinkpad_acpi driver to provide an interface to control
+	  the various video output ports.
+
+	  This feature often won't work well, depending on ThinkPad model,
+	  display state, video output devices in use, whether there is a X
+	  server running, phase of the moon, and the current mood of
+	  Schroedinger's cat.  If you can use X.org's RandR to control
+	  your ThinkPad's video output ports instead of this feature,
+	  don't think twice: do it and say N here to save some memory.
+
+	  If you are not sure, say Y here.
+
+config THINKPAD_ACPI_HOTKEY_POLL
+	bool "Support NVRAM polling for hot keys"
+	depends on THINKPAD_ACPI
+	default y
+	---help---
+	  Some thinkpad models benefit from NVRAM polling to detect a few of
+	  the hot key press events.  If you know your ThinkPad model does not
+	  need to do NVRAM polling to support any of the hot keys you use,
+	  unselecting this option will save about 1kB of memory.
+
+	  ThinkPads T40 and newer, R52 and newer, and X31 and newer are
+	  unlikely to need NVRAM polling in their latest BIOS versions.
+
+	  NVRAM polling can detect at most the following keys: ThinkPad/Access
+	  IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute,
+	  Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12).
+
+	  If you are not sure, say Y here.  The driver enables polling only if
+	  it is strictly necessary to do so.
+
+config INTEL_MENLOW
+	tristate "Thermal Management driver for Intel menlow platform"
+	depends on ACPI_THERMAL
+	select THERMAL
+	---help---
+	  ACPI thermal management enhancement driver on
+	  Intel Menlow platform.
+
+	  If unsure, say N.
+
+config EEEPC_LAPTOP
+	tristate "Eee PC Hotkey Driver (EXPERIMENTAL)"
+	depends on ACPI
+	depends on EXPERIMENTAL
+	select BACKLIGHT_CLASS_DEVICE
+	select HWMON
+	select RFKILL
+	---help---
+	  This driver supports the Fn-Fx keys on Eee PC laptops.
+	  It also adds the ability to switch camera/wlan on/off.
+
+	  If you have an Eee PC laptop, say Y or M here.
+
+
+config ACPI_WMI
+	tristate "WMI (EXPERIMENTAL)"
+	depends on ACPI
+	depends on EXPERIMENTAL
+	help
+	  This driver adds support for the ACPI-WMI (Windows Management
+	  Instrumentation) mapper device (PNP0C14) found on some systems.
+
+	  ACPI-WMI is a proprietary extension to ACPI to expose parts of the
+	  ACPI firmware to userspace - this is done through various vendor
+	  defined methods and data blocks in a PNP0C14 device, which are then
+	  made available for userspace to call.
+
+	  The implementation of this in Linux currently only exposes this to
+	  other kernel space drivers.
+
+	  This driver is a required dependency to build the firmware specific
+	  drivers needed on many machines, including Acer and HP laptops.
+
+	  It is safe to enable this driver even if your DSDT doesn't define
+	  any ACPI-WMI devices.
+
+config ACPI_ASUS
+	tristate "ASUS/Medion Laptop Extras"
+	depends on ACPI
+	select BACKLIGHT_CLASS_DEVICE
+	---help---
+	  This driver provides support for extra features of ACPI-compatible
+	  ASUS laptops. As some of Medion laptops are made by ASUS, it may also
+	  support some Medion laptops (such as 9675 for example).  It makes all
+	  the extra buttons generate standard ACPI events that go through
+	  /proc/acpi/events, and (on some models) adds support for changing the
+	  display brightness and output, switching the LCD backlight on and off,
+	  and most importantly, allows you to blink those fancy LEDs intended
+	  for reporting mail and wireless status.
+
+	  Note: display switching code is currently considered EXPERIMENTAL,
+	  toying with these values may even lock your machine.
+
+	  All settings are changed via /proc/acpi/asus directory entries. Owner
+	  and group for these entries can be set with asus_uid and asus_gid
+	  parameters.
+
+	  More information and a userspace daemon for handling the extra buttons
+	  at <http://sourceforge.net/projects/acpi4asus/>.
+
+	  If you have an ACPI-compatible ASUS laptop, say Y or M here. This
+	  driver is still under development, so if your laptop is unsupported or
+	  something works not quite as expected, please use the mailing list
+	  available on the above page (acpi4asus-user@lists.sourceforge.net).
+
+	  NOTE: This driver is deprecated and will probably be removed soon,
+	  use asus-laptop instead.
+
+config ACPI_TOSHIBA
+	tristate "Toshiba Laptop Extras"
+	depends on ACPI
+	depends on INPUT
+	select INPUT_POLLDEV
+	select NET
+	select RFKILL
+	select BACKLIGHT_CLASS_DEVICE
+	---help---
+	  This driver adds support for access to certain system settings
+	  on "legacy free" Toshiba laptops.  These laptops can be recognized by
+	  their lack of a BIOS setup menu and APM support.
+
+	  On these machines, all system configuration is handled through the
+	  ACPI.  This driver is required for access to controls not covered
+	  by the general ACPI drivers, such as LCD brightness, video output,
+	  etc.
+
+	  This driver differs from the non-ACPI Toshiba laptop driver (located
+	  under "Processor type and features") in several aspects.
+	  Configuration is accessed by reading and writing text files in the
+	  /proc tree instead of by program interface to /dev.  Furthermore, no
+	  power management functions are exposed, as those are handled by the
+	  general ACPI drivers.
+
+	  More information about this driver is available at
+	  <http://memebeam.org/toys/ToshibaAcpiDriver>.
+
+	  If you have a legacy free Toshiba laptop (such as the Libretto L1
+	  series), say Y.
+endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
new file mode 100644
index 0000000..e290651
--- /dev/null
+++ b/drivers/platform/x86/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for linux/drivers/platform/x86
+# x86 Platform-Specific Drivers
+#
+obj-$(CONFIG_ASUS_LAPTOP)	+= asus-laptop.o
+obj-$(CONFIG_EEEPC_LAPTOP)	+= eeepc-laptop.o
+obj-$(CONFIG_MSI_LAPTOP)	+= msi-laptop.o
+obj-$(CONFIG_COMPAL_LAPTOP)	+= compal-laptop.o
+obj-$(CONFIG_DELL_LAPTOP)	+= dell-laptop.o
+obj-$(CONFIG_ACER_WMI)		+= acer-wmi.o
+obj-$(CONFIG_HP_WMI)		+= hp-wmi.o
+obj-$(CONFIG_TC1100_WMI)	+= tc1100-wmi.o
+obj-$(CONFIG_SONY_LAPTOP)	+= sony-laptop.o
+obj-$(CONFIG_THINKPAD_ACPI)	+= thinkpad_acpi.o
+obj-$(CONFIG_FUJITSU_LAPTOP)	+= fujitsu-laptop.o
+obj-$(CONFIG_PANASONIC_LAPTOP)	+= panasonic-laptop.o
+obj-$(CONFIG_INTEL_MENLOW)	+= intel_menlow.o
+obj-$(CONFIG_ACPI_WMI)		+= wmi.o
+obj-$(CONFIG_ACPI_ASUS)		+= asus_acpi.o
+obj-$(CONFIG_ACPI_TOSHIBA)	+= toshiba_acpi.o
diff --git a/drivers/misc/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
similarity index 100%
rename from drivers/misc/acer-wmi.c
rename to drivers/platform/x86/acer-wmi.c
diff --git a/drivers/misc/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
similarity index 100%
rename from drivers/misc/asus-laptop.c
rename to drivers/platform/x86/asus-laptop.c
diff --git a/drivers/acpi/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
similarity index 100%
rename from drivers/acpi/asus_acpi.c
rename to drivers/platform/x86/asus_acpi.c
diff --git a/drivers/misc/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
similarity index 100%
rename from drivers/misc/compal-laptop.c
rename to drivers/platform/x86/compal-laptop.c
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
new file mode 100644
index 0000000..16e11c2
--- /dev/null
+++ b/drivers/platform/x86/dell-laptop.c
@@ -0,0 +1,436 @@
+/*
+ *  Driver for Dell laptop extras
+ *
+ *  Copyright (c) Red Hat <mjg@redhat.com>
+ *
+ *  Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
+ *  Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/dmi.h>
+#include <linux/io.h>
+#include <linux/rfkill.h>
+#include <linux/power_supply.h>
+#include <linux/acpi.h>
+#include "../../firmware/dcdbas.h"
+
+#define BRIGHTNESS_TOKEN 0x7d
+
+/* This structure will be modified by the firmware when we enter
+ * system management mode, hence the volatiles */
+
+struct calling_interface_buffer {
+	u16 class;
+	u16 select;
+	volatile u32 input[4];
+	volatile u32 output[4];
+} __packed;
+
+struct calling_interface_token {
+	u16 tokenID;
+	u16 location;
+	union {
+		u16 value;
+		u16 stringlength;
+	};
+};
+
+struct calling_interface_structure {
+	struct dmi_header header;
+	u16 cmdIOAddress;
+	u8 cmdIOCode;
+	u32 supportedCmds;
+	struct calling_interface_token tokens[];
+} __packed;
+
+static int da_command_address;
+static int da_command_code;
+static int da_num_tokens;
+static struct calling_interface_token *da_tokens;
+
+static struct backlight_device *dell_backlight_device;
+static struct rfkill *wifi_rfkill;
+static struct rfkill *bluetooth_rfkill;
+static struct rfkill *wwan_rfkill;
+
+static const struct dmi_system_id __initdata dell_device_table[] = {
+	{
+		.ident = "Dell laptop",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
+		},
+	},
+	{ }
+};
+
+static void parse_da_table(const struct dmi_header *dm)
+{
+	/* Final token is a terminator, so we don't want to copy it */
+	int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
+	struct calling_interface_structure *table =
+		container_of(dm, struct calling_interface_structure, header);
+
+	/* 4 bytes of table header, plus 7 bytes of Dell header, plus at least
+	   6 bytes of entry */
+
+	if (dm->length < 17)
+		return;
+
+	da_command_address = table->cmdIOAddress;
+	da_command_code = table->cmdIOCode;
+
+	da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
+			     sizeof(struct calling_interface_token),
+			     GFP_KERNEL);
+
+	if (!da_tokens)
+		return;
+
+	memcpy(da_tokens+da_num_tokens, table->tokens,
+	       sizeof(struct calling_interface_token) * tokens);
+
+	da_num_tokens += tokens;
+}
+
+static void find_tokens(const struct dmi_header *dm)
+{
+	switch (dm->type) {
+	case 0xd4: /* Indexed IO */
+		break;
+	case 0xd5: /* Protected Area Type 1 */
+		break;
+	case 0xd6: /* Protected Area Type 2 */
+		break;
+	case 0xda: /* Calling interface */
+		parse_da_table(dm);
+		break;
+	}
+}
+
+static int find_token_location(int tokenid)
+{
+	int i;
+	for (i = 0; i < da_num_tokens; i++) {
+		if (da_tokens[i].tokenID == tokenid)
+			return da_tokens[i].location;
+	}
+
+	return -1;
+}
+
+static struct calling_interface_buffer *
+dell_send_request(struct calling_interface_buffer *buffer, int class,
+		  int select)
+{
+	struct smi_cmd command;
+
+	command.magic = SMI_CMD_MAGIC;
+	command.command_address = da_command_address;
+	command.command_code = da_command_code;
+	command.ebx = virt_to_phys(buffer);
+	command.ecx = 0x42534931;
+
+	buffer->class = class;
+	buffer->select = select;
+
+	dcdbas_smi_request(&command);
+
+	return buffer;
+}
+
+/* Derived from information in DellWirelessCtl.cpp:
+   Class 17, select 11 is radio control. It returns an array of 32-bit values.
+
+   result[0]: return code
+   result[1]:
+     Bit 0:      Hardware switch supported
+     Bit 1:      Wifi locator supported
+     Bit 2:      Wifi is supported
+     Bit 3:      Bluetooth is supported
+     Bit 4:      WWAN is supported
+     Bit 5:      Wireless keyboard supported
+     Bits 6-7:   Reserved
+     Bit 8:      Wifi is installed
+     Bit 9:      Bluetooth is installed
+     Bit 10:     WWAN is installed
+     Bits 11-15: Reserved
+     Bit 16:     Hardware switch is on
+     Bit 17:     Wifi is blocked
+     Bit 18:     Bluetooth is blocked
+     Bit 19:     WWAN is blocked
+     Bits 20-31: Reserved
+   result[2]: NVRAM size in bytes
+   result[3]: NVRAM format version number
+*/
+
+static int dell_rfkill_set(int radio, enum rfkill_state state)
+{
+	struct calling_interface_buffer buffer;
+	int disable = (state == RFKILL_STATE_UNBLOCKED) ? 0 : 1;
+
+	memset(&buffer, 0, sizeof(struct calling_interface_buffer));
+	buffer.input[0] = (1 | (radio<<8) | (disable << 16));
+	dell_send_request(&buffer, 17, 11);
+
+	return 0;
+}
+
+static int dell_wifi_set(void *data, enum rfkill_state state)
+{
+	return dell_rfkill_set(1, state);
+}
+
+static int dell_bluetooth_set(void *data, enum rfkill_state state)
+{
+	return dell_rfkill_set(2, state);
+}
+
+static int dell_wwan_set(void *data, enum rfkill_state state)
+{
+	return dell_rfkill_set(3, state);
+}
+
+static int dell_rfkill_get(int bit, enum rfkill_state *state)
+{
+	struct calling_interface_buffer buffer;
+	int status;
+	int new_state = RFKILL_STATE_HARD_BLOCKED;
+
+	memset(&buffer, 0, sizeof(struct calling_interface_buffer));
+	dell_send_request(&buffer, 17, 11);
+	status = buffer.output[1];
+
+	if (status & (1<<16))
+		new_state = RFKILL_STATE_SOFT_BLOCKED;
+
+	if (status & (1<<bit))
+		*state = new_state;
+	else
+		*state = RFKILL_STATE_UNBLOCKED;
+
+	return 0;
+}
+
+static int dell_wifi_get(void *data, enum rfkill_state *state)
+{
+	return dell_rfkill_get(17, state);
+}
+
+static int dell_bluetooth_get(void *data, enum rfkill_state *state)
+{
+	return dell_rfkill_get(18, state);
+}
+
+static int dell_wwan_get(void *data, enum rfkill_state *state)
+{
+	return dell_rfkill_get(19, state);
+}
+
+static int dell_setup_rfkill(void)
+{
+	struct calling_interface_buffer buffer;
+	int status;
+	int ret;
+
+	memset(&buffer, 0, sizeof(struct calling_interface_buffer));
+	dell_send_request(&buffer, 17, 11);
+	status = buffer.output[1];
+
+	if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
+		wifi_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_WLAN);
+		if (!wifi_rfkill)
+			goto err_wifi;
+		wifi_rfkill->name = "dell-wifi";
+		wifi_rfkill->toggle_radio = dell_wifi_set;
+		wifi_rfkill->get_state = dell_wifi_get;
+		ret = rfkill_register(wifi_rfkill);
+		if (ret)
+			goto err_wifi;
+	}
+
+	if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
+		bluetooth_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_BLUETOOTH);
+		if (!bluetooth_rfkill)
+			goto err_bluetooth;
+		bluetooth_rfkill->name = "dell-bluetooth";
+		bluetooth_rfkill->toggle_radio = dell_bluetooth_set;
+		bluetooth_rfkill->get_state = dell_bluetooth_get;
+		ret = rfkill_register(bluetooth_rfkill);
+		if (ret)
+			goto err_bluetooth;
+	}
+
+	if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
+		wwan_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_WWAN);
+		if (!wwan_rfkill)
+			goto err_wwan;
+		wwan_rfkill->name = "dell-wwan";
+		wwan_rfkill->toggle_radio = dell_wwan_set;
+		wwan_rfkill->get_state = dell_wwan_get;
+		ret = rfkill_register(wwan_rfkill);
+		if (ret)
+			goto err_wwan;
+	}
+
+	return 0;
+err_wwan:
+	if (wwan_rfkill)
+		rfkill_free(wwan_rfkill);
+	if (bluetooth_rfkill) {
+		rfkill_unregister(bluetooth_rfkill);
+		bluetooth_rfkill = NULL;
+	}
+err_bluetooth:
+	if (bluetooth_rfkill)
+		rfkill_free(bluetooth_rfkill);
+	if (wifi_rfkill) {
+		rfkill_unregister(wifi_rfkill);
+		wifi_rfkill = NULL;
+	}
+err_wifi:
+	if (wifi_rfkill)
+		rfkill_free(wifi_rfkill);
+
+	return ret;
+}
+
+static int dell_send_intensity(struct backlight_device *bd)
+{
+	struct calling_interface_buffer buffer;
+
+	memset(&buffer, 0, sizeof(struct calling_interface_buffer));
+	buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN);
+	buffer.input[1] = bd->props.brightness;
+
+	if (buffer.input[0] == -1)
+		return -ENODEV;
+
+	if (power_supply_is_system_supplied() > 0)
+		dell_send_request(&buffer, 1, 2);
+	else
+		dell_send_request(&buffer, 1, 1);
+
+	return 0;
+}
+
+static int dell_get_intensity(struct backlight_device *bd)
+{
+	struct calling_interface_buffer buffer;
+
+	memset(&buffer, 0, sizeof(struct calling_interface_buffer));
+	buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN);
+
+	if (buffer.input[0] == -1)
+		return -ENODEV;
+
+	if (power_supply_is_system_supplied() > 0)
+		dell_send_request(&buffer, 0, 2);
+	else
+		dell_send_request(&buffer, 0, 1);
+
+	return buffer.output[1];
+}
+
+static struct backlight_ops dell_ops = {
+	.get_brightness = dell_get_intensity,
+	.update_status  = dell_send_intensity,
+};
+
+static int __init dell_init(void)
+{
+	struct calling_interface_buffer buffer;
+	int max_intensity = 0;
+	int ret;
+
+	if (!dmi_check_system(dell_device_table))
+		return -ENODEV;
+
+	dmi_walk(find_tokens);
+
+	if (!da_tokens)  {
+		printk(KERN_INFO "dell-laptop: Unable to find dmi tokens\n");
+		return -ENODEV;
+	}
+
+	ret = dell_setup_rfkill();
+
+	if (ret) {
+		printk(KERN_WARNING "dell-laptop: Unable to setup rfkill\n");
+		goto out;
+	}
+
+#ifdef CONFIG_ACPI
+	/* In the event of an ACPI backlight being available, don't
+	 * register the platform controller.
+	 */
+	if (acpi_video_backlight_support())
+		return 0;
+#endif
+
+	memset(&buffer, 0, sizeof(struct calling_interface_buffer));
+	buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN);
+
+	if (buffer.input[0] != -1) {
+		dell_send_request(&buffer, 0, 2);
+		max_intensity = buffer.output[3];
+	}
+
+	if (max_intensity) {
+		dell_backlight_device = backlight_device_register(
+			"dell_backlight",
+			NULL, NULL,
+			&dell_ops);
+
+		if (IS_ERR(dell_backlight_device)) {
+			ret = PTR_ERR(dell_backlight_device);
+			dell_backlight_device = NULL;
+			goto out;
+		}
+
+		dell_backlight_device->props.max_brightness = max_intensity;
+		dell_backlight_device->props.brightness =
+			dell_get_intensity(dell_backlight_device);
+		backlight_update_status(dell_backlight_device);
+	}
+
+	return 0;
+out:
+	if (wifi_rfkill)
+		rfkill_unregister(wifi_rfkill);
+	if (bluetooth_rfkill)
+		rfkill_unregister(bluetooth_rfkill);
+	if (wwan_rfkill)
+		rfkill_unregister(wwan_rfkill);
+	kfree(da_tokens);
+	return ret;
+}
+
+static void __exit dell_exit(void)
+{
+	backlight_device_unregister(dell_backlight_device);
+	if (wifi_rfkill)
+		rfkill_unregister(wifi_rfkill);
+	if (bluetooth_rfkill)
+		rfkill_unregister(bluetooth_rfkill);
+	if (wwan_rfkill)
+		rfkill_unregister(wwan_rfkill);
+}
+
+module_init(dell_init);
+module_exit(dell_exit);
+
+MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_DESCRIPTION("Dell laptop driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("dmi:*svnDellInc.:*:ct8:*");
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
new file mode 100644
index 0000000..9d93cb9
--- /dev/null
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -0,0 +1,872 @@
+/*
+ *  eepc-laptop.c - Asus Eee PC extras
+ *
+ *  Based on asus_acpi.c as patched for the Eee PC by Asus:
+ *  ftp://ftp.asus.com/pub/ASUS/EeePC/701/ASUS_ACPI_071126.rar
+ *  Based on eee.c from eeepc-linux
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+#include <linux/uaccess.h>
+#include <linux/input.h>
+#include <linux/rfkill.h>
+
+#define EEEPC_LAPTOP_VERSION	"0.1"
+
+#define EEEPC_HOTK_NAME		"Eee PC Hotkey Driver"
+#define EEEPC_HOTK_FILE		"eeepc"
+#define EEEPC_HOTK_CLASS	"hotkey"
+#define EEEPC_HOTK_DEVICE_NAME	"Hotkey"
+#define EEEPC_HOTK_HID		"ASUS010"
+
+#define EEEPC_LOG	EEEPC_HOTK_FILE ": "
+#define EEEPC_ERR	KERN_ERR	EEEPC_LOG
+#define EEEPC_WARNING	KERN_WARNING	EEEPC_LOG
+#define EEEPC_NOTICE	KERN_NOTICE	EEEPC_LOG
+#define EEEPC_INFO	KERN_INFO	EEEPC_LOG
+
+/*
+ * Definitions for Asus EeePC
+ */
+#define	NOTIFY_WLAN_ON	0x10
+#define NOTIFY_BRN_MIN	0x20
+#define NOTIFY_BRN_MAX	0x2f
+
+enum {
+	DISABLE_ASL_WLAN = 0x0001,
+	DISABLE_ASL_BLUETOOTH = 0x0002,
+	DISABLE_ASL_IRDA = 0x0004,
+	DISABLE_ASL_CAMERA = 0x0008,
+	DISABLE_ASL_TV = 0x0010,
+	DISABLE_ASL_GPS = 0x0020,
+	DISABLE_ASL_DISPLAYSWITCH = 0x0040,
+	DISABLE_ASL_MODEM = 0x0080,
+	DISABLE_ASL_CARDREADER = 0x0100
+};
+
+enum {
+	CM_ASL_WLAN = 0,
+	CM_ASL_BLUETOOTH,
+	CM_ASL_IRDA,
+	CM_ASL_1394,
+	CM_ASL_CAMERA,
+	CM_ASL_TV,
+	CM_ASL_GPS,
+	CM_ASL_DVDROM,
+	CM_ASL_DISPLAYSWITCH,
+	CM_ASL_PANELBRIGHT,
+	CM_ASL_BIOSFLASH,
+	CM_ASL_ACPIFLASH,
+	CM_ASL_CPUFV,
+	CM_ASL_CPUTEMPERATURE,
+	CM_ASL_FANCPU,
+	CM_ASL_FANCHASSIS,
+	CM_ASL_USBPORT1,
+	CM_ASL_USBPORT2,
+	CM_ASL_USBPORT3,
+	CM_ASL_MODEM,
+	CM_ASL_CARDREADER,
+	CM_ASL_LID
+};
+
+static const char *cm_getv[] = {
+	"WLDG", "BTHG", NULL, NULL,
+	"CAMG", NULL, NULL, NULL,
+	NULL, "PBLG", NULL, NULL,
+	"CFVG", NULL, NULL, NULL,
+	"USBG", NULL, NULL, "MODG",
+	"CRDG", "LIDG"
+};
+
+static const char *cm_setv[] = {
+	"WLDS", "BTHS", NULL, NULL,
+	"CAMS", NULL, NULL, NULL,
+	"SDSP", "PBLS", "HDPS", NULL,
+	"CFVS", NULL, NULL, NULL,
+	"USBG", NULL, NULL, "MODS",
+	"CRDS", NULL
+};
+
+#define EEEPC_EC	"\\_SB.PCI0.SBRG.EC0."
+
+#define EEEPC_EC_FAN_PWM	EEEPC_EC "SC02" /* Fan PWM duty cycle (%) */
+#define EEEPC_EC_SC02		0x63
+#define EEEPC_EC_FAN_HRPM	EEEPC_EC "SC05" /* High byte, fan speed (RPM) */
+#define EEEPC_EC_FAN_LRPM	EEEPC_EC "SC06" /* Low byte, fan speed (RPM) */
+#define EEEPC_EC_FAN_CTRL	EEEPC_EC "SFB3" /* Byte containing SF25  */
+#define EEEPC_EC_SFB3		0xD3
+
+/*
+ * This is the main structure, we can use it to store useful information
+ * about the hotk device
+ */
+struct eeepc_hotk {
+	struct acpi_device *device;	/* the device we are in */
+	acpi_handle handle;		/* the handle of the hotk device */
+	u32 cm_supported;		/* the control methods supported
+					   by this BIOS */
+	uint init_flag;			/* Init flags */
+	u16 event_count[128];		/* count for each event */
+	struct input_dev *inputdev;
+	u16 *keycode_map;
+	struct rfkill *eeepc_wlan_rfkill;
+	struct rfkill *eeepc_bluetooth_rfkill;
+};
+
+/* The actual device the driver binds to */
+static struct eeepc_hotk *ehotk;
+
+/* Platform device/driver */
+static struct platform_driver platform_driver = {
+	.driver = {
+		.name = EEEPC_HOTK_FILE,
+		.owner = THIS_MODULE,
+	}
+};
+
+static struct platform_device *platform_device;
+
+struct key_entry {
+	char type;
+	u8 code;
+	u16 keycode;
+};
+
+enum { KE_KEY, KE_END };
+
+static struct key_entry eeepc_keymap[] = {
+	/* Sleep already handled via generic ACPI code */
+	{KE_KEY, 0x10, KEY_WLAN },
+	{KE_KEY, 0x12, KEY_PROG1 },
+	{KE_KEY, 0x13, KEY_MUTE },
+	{KE_KEY, 0x14, KEY_VOLUMEDOWN },
+	{KE_KEY, 0x15, KEY_VOLUMEUP },
+	{KE_KEY, 0x30, KEY_SWITCHVIDEOMODE },
+	{KE_KEY, 0x31, KEY_SWITCHVIDEOMODE },
+	{KE_KEY, 0x32, KEY_SWITCHVIDEOMODE },
+	{KE_END, 0},
+};
+
+/*
+ * The hotkey driver declaration
+ */
+static int eeepc_hotk_add(struct acpi_device *device);
+static int eeepc_hotk_remove(struct acpi_device *device, int type);
+
+static const struct acpi_device_id eeepc_device_ids[] = {
+	{EEEPC_HOTK_HID, 0},
+	{"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
+
+static struct acpi_driver eeepc_hotk_driver = {
+	.name = EEEPC_HOTK_NAME,
+	.class = EEEPC_HOTK_CLASS,
+	.ids = eeepc_device_ids,
+	.ops = {
+		.add = eeepc_hotk_add,
+		.remove = eeepc_hotk_remove,
+	},
+};
+
+/* The backlight device /sys/class/backlight */
+static struct backlight_device *eeepc_backlight_device;
+
+/* The hwmon device */
+static struct device *eeepc_hwmon_device;
+
+/*
+ * The backlight class declaration
+ */
+static int read_brightness(struct backlight_device *bd);
+static int update_bl_status(struct backlight_device *bd);
+static struct backlight_ops eeepcbl_ops = {
+	.get_brightness = read_brightness,
+	.update_status = update_bl_status,
+};
+
+MODULE_AUTHOR("Corentin Chary, Eric Cooper");
+MODULE_DESCRIPTION(EEEPC_HOTK_NAME);
+MODULE_LICENSE("GPL");
+
+/*
+ * ACPI Helpers
+ */
+static int write_acpi_int(acpi_handle handle, const char *method, int val,
+			  struct acpi_buffer *output)
+{
+	struct acpi_object_list params;
+	union acpi_object in_obj;
+	acpi_status status;
+
+	params.count = 1;
+	params.pointer = &in_obj;
+	in_obj.type = ACPI_TYPE_INTEGER;
+	in_obj.integer.value = val;
+
+	status = acpi_evaluate_object(handle, (char *)method, &params, output);
+	return (status == AE_OK ? 0 : -1);
+}
+
+static int read_acpi_int(acpi_handle handle, const char *method, int *val)
+{
+	acpi_status status;
+	unsigned long long result;
+
+	status = acpi_evaluate_integer(handle, (char *)method, NULL, &result);
+	if (ACPI_FAILURE(status)) {
+		*val = -1;
+		return -1;
+	} else {
+		*val = result;
+		return 0;
+	}
+}
+
+static int set_acpi(int cm, int value)
+{
+	if (ehotk->cm_supported & (0x1 << cm)) {
+		const char *method = cm_setv[cm];
+		if (method == NULL)
+			return -ENODEV;
+		if (write_acpi_int(ehotk->handle, method, value, NULL))
+			printk(EEEPC_WARNING "Error writing %s\n", method);
+	}
+	return 0;
+}
+
+static int get_acpi(int cm)
+{
+	int value = -1;
+	if ((ehotk->cm_supported & (0x1 << cm))) {
+		const char *method = cm_getv[cm];
+		if (method == NULL)
+			return -ENODEV;
+		if (read_acpi_int(ehotk->handle, method, &value))
+			printk(EEEPC_WARNING "Error reading %s\n", method);
+	}
+	return value;
+}
+
+/*
+ * Backlight
+ */
+static int read_brightness(struct backlight_device *bd)
+{
+	return get_acpi(CM_ASL_PANELBRIGHT);
+}
+
+static int set_brightness(struct backlight_device *bd, int value)
+{
+	value = max(0, min(15, value));
+	return set_acpi(CM_ASL_PANELBRIGHT, value);
+}
+
+static int update_bl_status(struct backlight_device *bd)
+{
+	return set_brightness(bd, bd->props.brightness);
+}
+
+/*
+ * Rfkill helpers
+ */
+
+static int eeepc_wlan_rfkill_set(void *data, enum rfkill_state state)
+{
+	if (state == RFKILL_STATE_SOFT_BLOCKED)
+		return set_acpi(CM_ASL_WLAN, 0);
+	else
+		return set_acpi(CM_ASL_WLAN, 1);
+}
+
+static int eeepc_wlan_rfkill_state(void *data, enum rfkill_state *state)
+{
+	if (get_acpi(CM_ASL_WLAN) == 1)
+		*state = RFKILL_STATE_UNBLOCKED;
+	else
+		*state = RFKILL_STATE_SOFT_BLOCKED;
+	return 0;
+}
+
+static int eeepc_bluetooth_rfkill_set(void *data, enum rfkill_state state)
+{
+	if (state == RFKILL_STATE_SOFT_BLOCKED)
+		return set_acpi(CM_ASL_BLUETOOTH, 0);
+	else
+		return set_acpi(CM_ASL_BLUETOOTH, 1);
+}
+
+static int eeepc_bluetooth_rfkill_state(void *data, enum rfkill_state *state)
+{
+	if (get_acpi(CM_ASL_BLUETOOTH) == 1)
+		*state = RFKILL_STATE_UNBLOCKED;
+	else
+		*state = RFKILL_STATE_SOFT_BLOCKED;
+	return 0;
+}
+
+/*
+ * Sys helpers
+ */
+static int parse_arg(const char *buf, unsigned long count, int *val)
+{
+	if (!count)
+		return 0;
+	if (sscanf(buf, "%i", val) != 1)
+		return -EINVAL;
+	return count;
+}
+
+static ssize_t store_sys_acpi(int cm, const char *buf, size_t count)
+{
+	int rv, value;
+
+	rv = parse_arg(buf, count, &value);
+	if (rv > 0)
+		set_acpi(cm, value);
+	return rv;
+}
+
+static ssize_t show_sys_acpi(int cm, char *buf)
+{
+	return sprintf(buf, "%d\n", get_acpi(cm));
+}
+
+#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm)				\
+	static ssize_t show_##_name(struct device *dev,			\
+				    struct device_attribute *attr,	\
+				    char *buf)				\
+	{								\
+		return show_sys_acpi(_cm, buf);				\
+	}								\
+	static ssize_t store_##_name(struct device *dev,		\
+				     struct device_attribute *attr,	\
+				     const char *buf, size_t count)	\
+	{								\
+		return store_sys_acpi(_cm, buf, count);			\
+	}								\
+	static struct device_attribute dev_attr_##_name = {		\
+		.attr = {						\
+			.name = __stringify(_name),			\
+			.mode = 0644 },					\
+		.show   = show_##_name,					\
+		.store  = store_##_name,				\
+	}
+
+EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
+EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
+EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
+
+static struct attribute *platform_attributes[] = {
+	&dev_attr_camera.attr,
+	&dev_attr_cardr.attr,
+	&dev_attr_disp.attr,
+	NULL
+};
+
+static struct attribute_group platform_attribute_group = {
+	.attrs = platform_attributes
+};
+
+/*
+ * Hotkey functions
+ */
+static struct key_entry *eepc_get_entry_by_scancode(int code)
+{
+	struct key_entry *key;
+
+	for (key = eeepc_keymap; key->type != KE_END; key++)
+		if (code == key->code)
+			return key;
+
+	return NULL;
+}
+
+static struct key_entry *eepc_get_entry_by_keycode(int code)
+{
+	struct key_entry *key;
+
+	for (key = eeepc_keymap; key->type != KE_END; key++)
+		if (code == key->keycode && key->type == KE_KEY)
+			return key;
+
+	return NULL;
+}
+
+static int eeepc_getkeycode(struct input_dev *dev, int scancode, int *keycode)
+{
+	struct key_entry *key = eepc_get_entry_by_scancode(scancode);
+
+	if (key && key->type == KE_KEY) {
+		*keycode = key->keycode;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
+{
+	struct key_entry *key;
+	int old_keycode;
+
+	if (keycode < 0 || keycode > KEY_MAX)
+		return -EINVAL;
+
+	key = eepc_get_entry_by_scancode(scancode);
+	if (key && key->type == KE_KEY) {
+		old_keycode = key->keycode;
+		key->keycode = keycode;
+		set_bit(keycode, dev->keybit);
+		if (!eepc_get_entry_by_keycode(old_keycode))
+			clear_bit(old_keycode, dev->keybit);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int eeepc_hotk_check(void)
+{
+	const struct key_entry *key;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	int result;
+
+	result = acpi_bus_get_status(ehotk->device);
+	if (result)
+		return result;
+	if (ehotk->device->status.present) {
+		if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag,
+				    &buffer)) {
+			printk(EEEPC_ERR "Hotkey initialization failed\n");
+			return -ENODEV;
+		} else {
+			printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n",
+			       ehotk->init_flag);
+		}
+		/* get control methods supported */
+		if (read_acpi_int(ehotk->handle, "CMSG"
+				   , &ehotk->cm_supported)) {
+			printk(EEEPC_ERR
+			       "Get control methods supported failed\n");
+			return -ENODEV;
+		} else {
+			printk(EEEPC_INFO
+			       "Get control methods supported: 0x%x\n",
+			       ehotk->cm_supported);
+		}
+		ehotk->inputdev = input_allocate_device();
+		if (!ehotk->inputdev) {
+			printk(EEEPC_INFO "Unable to allocate input device\n");
+			return 0;
+		}
+		ehotk->inputdev->name = "Asus EeePC extra buttons";
+		ehotk->inputdev->phys = EEEPC_HOTK_FILE "/input0";
+		ehotk->inputdev->id.bustype = BUS_HOST;
+		ehotk->inputdev->getkeycode = eeepc_getkeycode;
+		ehotk->inputdev->setkeycode = eeepc_setkeycode;
+
+		for (key = eeepc_keymap; key->type != KE_END; key++) {
+			switch (key->type) {
+			case KE_KEY:
+				set_bit(EV_KEY, ehotk->inputdev->evbit);
+				set_bit(key->keycode, ehotk->inputdev->keybit);
+				break;
+			}
+		}
+		result = input_register_device(ehotk->inputdev);
+		if (result) {
+			printk(EEEPC_INFO "Unable to register input device\n");
+			input_free_device(ehotk->inputdev);
+			return 0;
+		}
+	} else {
+		printk(EEEPC_ERR "Hotkey device not present, aborting\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void notify_brn(void)
+{
+	struct backlight_device *bd = eeepc_backlight_device;
+	bd->props.brightness = read_brightness(bd);
+}
+
+static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
+{
+	static struct key_entry *key;
+	if (!ehotk)
+		return;
+	if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
+		notify_brn();
+	acpi_bus_generate_proc_event(ehotk->device, event,
+				     ehotk->event_count[event % 128]++);
+	if (ehotk->inputdev) {
+		key = eepc_get_entry_by_scancode(event);
+		if (key) {
+			switch (key->type) {
+			case KE_KEY:
+				input_report_key(ehotk->inputdev, key->keycode,
+						 1);
+				input_sync(ehotk->inputdev);
+				input_report_key(ehotk->inputdev, key->keycode,
+						 0);
+				input_sync(ehotk->inputdev);
+				break;
+			}
+		}
+	}
+}
+
+static int eeepc_hotk_add(struct acpi_device *device)
+{
+	acpi_status status = AE_OK;
+	int result;
+
+	if (!device)
+		 return -EINVAL;
+	printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n");
+	ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
+	if (!ehotk)
+		return -ENOMEM;
+	ehotk->init_flag = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
+	ehotk->handle = device->handle;
+	strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME);
+	strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS);
+	device->driver_data = ehotk;
+	ehotk->device = device;
+	result = eeepc_hotk_check();
+	if (result)
+		goto end;
+	status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
+					     eeepc_hotk_notify, ehotk);
+	if (ACPI_FAILURE(status))
+		printk(EEEPC_ERR "Error installing notify handler\n");
+
+	if (get_acpi(CM_ASL_WLAN) != -1) {
+		ehotk->eeepc_wlan_rfkill = rfkill_allocate(&device->dev,
+							   RFKILL_TYPE_WLAN);
+
+		if (!ehotk->eeepc_wlan_rfkill)
+			goto end;
+
+		ehotk->eeepc_wlan_rfkill->name = "eeepc-wlan";
+		ehotk->eeepc_wlan_rfkill->toggle_radio = eeepc_wlan_rfkill_set;
+		ehotk->eeepc_wlan_rfkill->get_state = eeepc_wlan_rfkill_state;
+		if (get_acpi(CM_ASL_WLAN) == 1)
+			ehotk->eeepc_wlan_rfkill->state =
+				RFKILL_STATE_UNBLOCKED;
+		else
+			ehotk->eeepc_wlan_rfkill->state =
+				RFKILL_STATE_SOFT_BLOCKED;
+		rfkill_register(ehotk->eeepc_wlan_rfkill);
+	}
+
+	if (get_acpi(CM_ASL_BLUETOOTH) != -1) {
+		ehotk->eeepc_bluetooth_rfkill =
+			rfkill_allocate(&device->dev, RFKILL_TYPE_BLUETOOTH);
+
+		if (!ehotk->eeepc_bluetooth_rfkill)
+			goto end;
+
+		ehotk->eeepc_bluetooth_rfkill->name = "eeepc-bluetooth";
+		ehotk->eeepc_bluetooth_rfkill->toggle_radio =
+			eeepc_bluetooth_rfkill_set;
+		ehotk->eeepc_bluetooth_rfkill->get_state =
+			eeepc_bluetooth_rfkill_state;
+		if (get_acpi(CM_ASL_BLUETOOTH) == 1)
+			ehotk->eeepc_bluetooth_rfkill->state =
+				RFKILL_STATE_UNBLOCKED;
+		else
+			ehotk->eeepc_bluetooth_rfkill->state =
+				RFKILL_STATE_SOFT_BLOCKED;
+		rfkill_register(ehotk->eeepc_bluetooth_rfkill);
+	}
+
+ end:
+	if (result) {
+		kfree(ehotk);
+		ehotk = NULL;
+	}
+	return result;
+}
+
+static int eeepc_hotk_remove(struct acpi_device *device, int type)
+{
+	acpi_status status = 0;
+
+	if (!device || !acpi_driver_data(device))
+		 return -EINVAL;
+	status = acpi_remove_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
+					    eeepc_hotk_notify);
+	if (ACPI_FAILURE(status))
+		printk(EEEPC_ERR "Error removing notify handler\n");
+	kfree(ehotk);
+	return 0;
+}
+
+/*
+ * Hwmon
+ */
+static int eeepc_get_fan_pwm(void)
+{
+	int value = 0;
+
+	read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value);
+	value = value * 255 / 100;
+	return (value);
+}
+
+static void eeepc_set_fan_pwm(int value)
+{
+	value = SENSORS_LIMIT(value, 0, 255);
+	value = value * 100 / 255;
+	ec_write(EEEPC_EC_SC02, value);
+}
+
+static int eeepc_get_fan_rpm(void)
+{
+	int high = 0;
+	int low = 0;
+
+	read_acpi_int(NULL, EEEPC_EC_FAN_HRPM, &high);
+	read_acpi_int(NULL, EEEPC_EC_FAN_LRPM, &low);
+	return (high << 8 | low);
+}
+
+static int eeepc_get_fan_ctrl(void)
+{
+	int value = 0;
+
+	read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
+	return ((value & 0x02 ? 1 : 0));
+}
+
+static void eeepc_set_fan_ctrl(int manual)
+{
+	int value = 0;
+
+	read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
+	if (manual)
+		value |= 0x02;
+	else
+		value &= ~0x02;
+	ec_write(EEEPC_EC_SFB3, value);
+}
+
+static ssize_t store_sys_hwmon(void (*set)(int), const char *buf, size_t count)
+{
+	int rv, value;
+
+	rv = parse_arg(buf, count, &value);
+	if (rv > 0)
+		set(value);
+	return rv;
+}
+
+static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
+{
+	return sprintf(buf, "%d\n", get());
+}
+
+#define EEEPC_CREATE_SENSOR_ATTR(_name, _mode, _set, _get)		\
+	static ssize_t show_##_name(struct device *dev,			\
+				    struct device_attribute *attr,	\
+				    char *buf)				\
+	{								\
+		return show_sys_hwmon(_set, buf);			\
+	}								\
+	static ssize_t store_##_name(struct device *dev,		\
+				     struct device_attribute *attr,	\
+				     const char *buf, size_t count)	\
+	{								\
+		return store_sys_hwmon(_get, buf, count);		\
+	}								\
+	static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
+
+EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
+EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
+			 eeepc_get_fan_pwm, eeepc_set_fan_pwm);
+EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+			 eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
+
+static ssize_t
+show_name(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "eeepc\n");
+}
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+
+static struct attribute *hwmon_attributes[] = {
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	&sensor_dev_attr_fan1_input.dev_attr.attr,
+	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
+	&sensor_dev_attr_name.dev_attr.attr,
+	NULL
+};
+
+static struct attribute_group hwmon_attribute_group = {
+	.attrs = hwmon_attributes
+};
+
+/*
+ * exit/init
+ */
+static void eeepc_backlight_exit(void)
+{
+	if (eeepc_backlight_device)
+		backlight_device_unregister(eeepc_backlight_device);
+	if (ehotk->inputdev)
+		input_unregister_device(ehotk->inputdev);
+	if (ehotk->eeepc_wlan_rfkill)
+		rfkill_unregister(ehotk->eeepc_wlan_rfkill);
+	if (ehotk->eeepc_bluetooth_rfkill)
+		rfkill_unregister(ehotk->eeepc_bluetooth_rfkill);
+	eeepc_backlight_device = NULL;
+}
+
+static void eeepc_hwmon_exit(void)
+{
+	struct device *hwmon;
+
+	hwmon = eeepc_hwmon_device;
+	if (!hwmon)
+		return ;
+	sysfs_remove_group(&hwmon->kobj,
+			   &hwmon_attribute_group);
+	hwmon_device_unregister(hwmon);
+	eeepc_hwmon_device = NULL;
+}
+
+static void __exit eeepc_laptop_exit(void)
+{
+	eeepc_backlight_exit();
+	eeepc_hwmon_exit();
+	acpi_bus_unregister_driver(&eeepc_hotk_driver);
+	sysfs_remove_group(&platform_device->dev.kobj,
+			   &platform_attribute_group);
+	platform_device_unregister(platform_device);
+	platform_driver_unregister(&platform_driver);
+}
+
+static int eeepc_backlight_init(struct device *dev)
+{
+	struct backlight_device *bd;
+
+	bd = backlight_device_register(EEEPC_HOTK_FILE, dev,
+				       NULL, &eeepcbl_ops);
+	if (IS_ERR(bd)) {
+		printk(EEEPC_ERR
+		       "Could not register eeepc backlight device\n");
+		eeepc_backlight_device = NULL;
+		return PTR_ERR(bd);
+	}
+	eeepc_backlight_device = bd;
+	bd->props.max_brightness = 15;
+	bd->props.brightness = read_brightness(NULL);
+	bd->props.power = FB_BLANK_UNBLANK;
+	backlight_update_status(bd);
+	return 0;
+}
+
+static int eeepc_hwmon_init(struct device *dev)
+{
+	struct device *hwmon;
+	int result;
+
+	hwmon = hwmon_device_register(dev);
+	if (IS_ERR(hwmon)) {
+		printk(EEEPC_ERR
+		       "Could not register eeepc hwmon device\n");
+		eeepc_hwmon_device = NULL;
+		return PTR_ERR(hwmon);
+	}
+	eeepc_hwmon_device = hwmon;
+	result = sysfs_create_group(&hwmon->kobj,
+				    &hwmon_attribute_group);
+	if (result)
+		eeepc_hwmon_exit();
+	return result;
+}
+
+static int __init eeepc_laptop_init(void)
+{
+	struct device *dev;
+	int result;
+
+	if (acpi_disabled)
+		return -ENODEV;
+	result = acpi_bus_register_driver(&eeepc_hotk_driver);
+	if (result < 0)
+		return result;
+	if (!ehotk) {
+		acpi_bus_unregister_driver(&eeepc_hotk_driver);
+		return -ENODEV;
+	}
+	dev = acpi_get_physical_device(ehotk->device->handle);
+
+	if (!acpi_video_backlight_support()) {
+		result = eeepc_backlight_init(dev);
+		if (result)
+			goto fail_backlight;
+	} else
+		printk(EEEPC_INFO "Backlight controlled by ACPI video "
+		       "driver\n");
+
+	result = eeepc_hwmon_init(dev);
+	if (result)
+		goto fail_hwmon;
+	/* Register platform stuff */
+	result = platform_driver_register(&platform_driver);
+	if (result)
+		goto fail_platform_driver;
+	platform_device = platform_device_alloc(EEEPC_HOTK_FILE, -1);
+	if (!platform_device) {
+		result = -ENOMEM;
+		goto fail_platform_device1;
+	}
+	result = platform_device_add(platform_device);
+	if (result)
+		goto fail_platform_device2;
+	result = sysfs_create_group(&platform_device->dev.kobj,
+				    &platform_attribute_group);
+	if (result)
+		goto fail_sysfs;
+	return 0;
+fail_sysfs:
+	platform_device_del(platform_device);
+fail_platform_device2:
+	platform_device_put(platform_device);
+fail_platform_device1:
+	platform_driver_unregister(&platform_driver);
+fail_platform_driver:
+	eeepc_hwmon_exit();
+fail_hwmon:
+	eeepc_backlight_exit();
+fail_backlight:
+	return result;
+}
+
+module_init(eeepc_laptop_init);
+module_exit(eeepc_laptop_exit);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
new file mode 100644
index 0000000..65dc415
--- /dev/null
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -0,0 +1,1293 @@
+/*-*-linux-c-*-*/
+
+/*
+  Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
+  Copyright (C) 2008 Peter Gruber <nokos@gmx.net>
+  Copyright (C) 2008 Tony Vroon <tony@linx.net>
+  Based on earlier work:
+    Copyright (C) 2003 Shane Spencer <shane@bogomip.com>
+    Adrian Yee <brewt-fujitsu@brewt.org>
+
+  Templated from msi-laptop.c and thinkpad_acpi.c which is copyright
+  by its respective authors.
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+  02110-1301, USA.
+ */
+
+/*
+ * fujitsu-laptop.c - Fujitsu laptop support, providing access to additional
+ * features made available on a range of Fujitsu laptops including the
+ * P2xxx/P5xxx/S6xxx/S7xxx series.
+ *
+ * This driver exports a few files in /sys/devices/platform/fujitsu-laptop/;
+ * others may be added at a later date.
+ *
+ *   lcd_level - Screen brightness: contains a single integer in the
+ *   range 0..7. (rw)
+ *
+ * In addition to these platform device attributes the driver
+ * registers itself in the Linux backlight control subsystem and is
+ * available to userspace under /sys/class/backlight/fujitsu-laptop/.
+ *
+ * Hotkeys present on certain Fujitsu laptops (eg: the S6xxx series) are
+ * also supported by this driver.
+ *
+ * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and
+ * P8010.  It should work on most P-series and S-series Lifebooks, but
+ * YMMV.
+ *
+ * The module parameter use_alt_lcd_levels switches between different ACPI
+ * brightness controls which are used by different Fujitsu laptops.  In most
+ * cases the correct method is automatically detected. "use_alt_lcd_levels=1"
+ * is applicable for a Fujitsu Lifebook S6410 if autodetection fails.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/backlight.h>
+#include <linux/input.h>
+#include <linux/kfifo.h>
+#include <linux/video_output.h>
+#include <linux/platform_device.h>
+#ifdef CONFIG_LEDS_CLASS
+#include <linux/leds.h>
+#endif
+
+#define FUJITSU_DRIVER_VERSION "0.5.0"
+
+#define FUJITSU_LCD_N_LEVELS 8
+
+#define ACPI_FUJITSU_CLASS              "fujitsu"
+#define ACPI_FUJITSU_HID                "FUJ02B1"
+#define ACPI_FUJITSU_DRIVER_NAME	"Fujitsu laptop FUJ02B1 ACPI brightness driver"
+#define ACPI_FUJITSU_DEVICE_NAME        "Fujitsu FUJ02B1"
+#define ACPI_FUJITSU_HOTKEY_HID 	"FUJ02E3"
+#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
+#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3"
+
+#define ACPI_FUJITSU_NOTIFY_CODE1     0x80
+
+#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS     0x86
+#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS     0x87
+
+/* FUNC interface - command values */
+#define FUNC_RFKILL	0x1000
+#define FUNC_LEDS	0x1001
+#define FUNC_BUTTONS	0x1002
+#define FUNC_BACKLIGHT  0x1004
+
+/* FUNC interface - responses */
+#define UNSUPPORTED_CMD 0x80000000
+
+#ifdef CONFIG_LEDS_CLASS
+/* FUNC interface - LED control */
+#define FUNC_LED_OFF	0x1
+#define FUNC_LED_ON	0x30001
+#define KEYBOARD_LAMPS	0x100
+#define LOGOLAMP_POWERON 0x2000
+#define LOGOLAMP_ALWAYS  0x4000
+#endif
+
+/* Hotkey details */
+#define KEY1_CODE	0x410	/* codes for the keys in the GIRB register */
+#define KEY2_CODE	0x411
+#define KEY3_CODE	0x412
+#define KEY4_CODE	0x413
+
+#define MAX_HOTKEY_RINGBUFFER_SIZE 100
+#define RINGBUFFERSIZE 40
+
+/* Debugging */
+#define FUJLAPTOP_LOG	   ACPI_FUJITSU_HID ": "
+#define FUJLAPTOP_ERR	   KERN_ERR FUJLAPTOP_LOG
+#define FUJLAPTOP_NOTICE   KERN_NOTICE FUJLAPTOP_LOG
+#define FUJLAPTOP_INFO	   KERN_INFO FUJLAPTOP_LOG
+#define FUJLAPTOP_DEBUG    KERN_DEBUG FUJLAPTOP_LOG
+
+#define FUJLAPTOP_DBG_ALL	  0xffff
+#define FUJLAPTOP_DBG_ERROR	  0x0001
+#define FUJLAPTOP_DBG_WARN	  0x0002
+#define FUJLAPTOP_DBG_INFO	  0x0004
+#define FUJLAPTOP_DBG_TRACE	  0x0008
+
+#define dbg_printk(a_dbg_level, format, arg...) \
+	do { if (dbg_level & a_dbg_level) \
+		printk(FUJLAPTOP_DEBUG "%s: " format, __func__ , ## arg); \
+	} while (0)
+#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
+#define vdbg_printk(a_dbg_level, format, arg...) \
+	dbg_printk(a_dbg_level, format, ## arg)
+#else
+#define vdbg_printk(a_dbg_level, format, arg...)
+#endif
+
+/* Device controlling the backlight and associated keys */
+struct fujitsu_t {
+	acpi_handle acpi_handle;
+	struct acpi_device *dev;
+	struct input_dev *input;
+	char phys[32];
+	struct backlight_device *bl_device;
+	struct platform_device *pf_device;
+	int keycode1, keycode2, keycode3, keycode4;
+
+	unsigned int max_brightness;
+	unsigned int brightness_changed;
+	unsigned int brightness_level;
+};
+
+static struct fujitsu_t *fujitsu;
+static int use_alt_lcd_levels = -1;
+static int disable_brightness_adjust = -1;
+
+/* Device used to access other hotkeys on the laptop */
+struct fujitsu_hotkey_t {
+	acpi_handle acpi_handle;
+	struct acpi_device *dev;
+	struct input_dev *input;
+	char phys[32];
+	struct platform_device *pf_device;
+	struct kfifo *fifo;
+	spinlock_t fifo_lock;
+	int rfkill_state;
+	int logolamp_registered;
+	int kblamps_registered;
+};
+
+static struct fujitsu_hotkey_t *fujitsu_hotkey;
+
+static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
+				       void *data);
+
+#ifdef CONFIG_LEDS_CLASS
+static enum led_brightness logolamp_get(struct led_classdev *cdev);
+static void logolamp_set(struct led_classdev *cdev,
+			       enum led_brightness brightness);
+
+struct led_classdev logolamp_led = {
+ .name = "fujitsu::logolamp",
+ .brightness_get = logolamp_get,
+ .brightness_set = logolamp_set
+};
+
+static enum led_brightness kblamps_get(struct led_classdev *cdev);
+static void kblamps_set(struct led_classdev *cdev,
+			       enum led_brightness brightness);
+
+struct led_classdev kblamps_led = {
+ .name = "fujitsu::kblamps",
+ .brightness_get = kblamps_get,
+ .brightness_set = kblamps_set
+};
+#endif
+
+#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
+static u32 dbg_level = 0x03;
+#endif
+
+static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data);
+
+/* Fujitsu ACPI interface function */
+
+static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
+{
+	acpi_status status = AE_OK;
+	union acpi_object params[4] = {
+	{ .type = ACPI_TYPE_INTEGER },
+	{ .type = ACPI_TYPE_INTEGER },
+	{ .type = ACPI_TYPE_INTEGER },
+	{ .type = ACPI_TYPE_INTEGER }
+	};
+	struct acpi_object_list arg_list = { 4, &params[0] };
+	struct acpi_buffer output;
+	union acpi_object out_obj;
+	acpi_handle handle = NULL;
+
+	status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
+	if (ACPI_FAILURE(status)) {
+		vdbg_printk(FUJLAPTOP_DBG_ERROR,
+				"FUNC interface is not present\n");
+		return -ENODEV;
+	}
+
+	params[0].integer.value = cmd;
+	params[1].integer.value = arg0;
+	params[2].integer.value = arg1;
+	params[3].integer.value = arg2;
+
+	output.length = sizeof(out_obj);
+	output.pointer = &out_obj;
+
+	status = acpi_evaluate_object(handle, NULL, &arg_list, &output);
+	if (ACPI_FAILURE(status)) {
+		vdbg_printk(FUJLAPTOP_DBG_WARN,
+			"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n",
+				cmd, arg0, arg1, arg2);
+		return -ENODEV;
+	}
+
+	if (out_obj.type != ACPI_TYPE_INTEGER) {
+		vdbg_printk(FUJLAPTOP_DBG_WARN,
+			"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) did not "
+			"return an integer\n",
+			cmd, arg0, arg1, arg2);
+		return -ENODEV;
+	}
+
+	vdbg_printk(FUJLAPTOP_DBG_TRACE,
+		"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
+			cmd, arg0, arg1, arg2, (int)out_obj.integer.value);
+	return out_obj.integer.value;
+}
+
+#ifdef CONFIG_LEDS_CLASS
+/* LED class callbacks */
+
+static void logolamp_set(struct led_classdev *cdev,
+			       enum led_brightness brightness)
+{
+	if (brightness >= LED_FULL) {
+		call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
+		call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON);
+	} else if (brightness >= LED_HALF) {
+		call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
+		call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF);
+	} else {
+		call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF);
+	}
+}
+
+static void kblamps_set(struct led_classdev *cdev,
+			       enum led_brightness brightness)
+{
+	if (brightness >= LED_FULL)
+		call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON);
+	else
+		call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
+}
+
+static enum led_brightness logolamp_get(struct led_classdev *cdev)
+{
+	enum led_brightness brightness = LED_OFF;
+	int poweron, always;
+
+	poweron = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_POWERON, 0x0);
+	if (poweron == FUNC_LED_ON) {
+		brightness = LED_HALF;
+		always = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_ALWAYS, 0x0);
+		if (always == FUNC_LED_ON)
+			brightness = LED_FULL;
+	}
+	return brightness;
+}
+
+static enum led_brightness kblamps_get(struct led_classdev *cdev)
+{
+	enum led_brightness brightness = LED_OFF;
+
+	if (call_fext_func(FUNC_LEDS, 0x2, KEYBOARD_LAMPS, 0x0) == FUNC_LED_ON)
+		brightness = LED_FULL;
+
+	return brightness;
+}
+#endif
+
+/* Hardware access for LCD brightness control */
+
+static int set_lcd_level(int level)
+{
+	acpi_status status = AE_OK;
+	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+	struct acpi_object_list arg_list = { 1, &arg0 };
+	acpi_handle handle = NULL;
+
+	vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
+		    level);
+
+	if (level < 0 || level >= fujitsu->max_brightness)
+		return -EINVAL;
+
+	if (!fujitsu)
+		return -EINVAL;
+
+	status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle);
+	if (ACPI_FAILURE(status)) {
+		vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");
+		return -ENODEV;
+	}
+
+	arg0.integer.value = level;
+
+	status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	return 0;
+}
+
+static int set_lcd_level_alt(int level)
+{
+	acpi_status status = AE_OK;
+	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+	struct acpi_object_list arg_list = { 1, &arg0 };
+	acpi_handle handle = NULL;
+
+	vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
+		    level);
+
+	if (level < 0 || level >= fujitsu->max_brightness)
+		return -EINVAL;
+
+	if (!fujitsu)
+		return -EINVAL;
+
+	status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle);
+	if (ACPI_FAILURE(status)) {
+		vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");
+		return -ENODEV;
+	}
+
+	arg0.integer.value = level;
+
+	status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	return 0;
+}
+
+static int get_lcd_level(void)
+{
+	unsigned long long state = 0;
+	acpi_status status = AE_OK;
+
+	vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
+
+	status =
+	    acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state);
+	if (status < 0)
+		return status;
+
+	fujitsu->brightness_level = state & 0x0fffffff;
+
+	if (state & 0x80000000)
+		fujitsu->brightness_changed = 1;
+	else
+		fujitsu->brightness_changed = 0;
+
+	return fujitsu->brightness_level;
+}
+
+static int get_max_brightness(void)
+{
+	unsigned long long state = 0;
+	acpi_status status = AE_OK;
+
+	vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
+
+	status =
+	    acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state);
+	if (status < 0)
+		return status;
+
+	fujitsu->max_brightness = state;
+
+	return fujitsu->max_brightness;
+}
+
+/* Backlight device stuff */
+
+static int bl_get_brightness(struct backlight_device *b)
+{
+	return get_lcd_level();
+}
+
+static int bl_update_status(struct backlight_device *b)
+{
+	int ret;
+	if (b->props.power == 4)
+		ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
+	else
+		ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
+	if (ret != 0)
+		vdbg_printk(FUJLAPTOP_DBG_ERROR,
+			"Unable to adjust backlight power, error code %i\n",
+			ret);
+
+	if (use_alt_lcd_levels)
+		ret = set_lcd_level_alt(b->props.brightness);
+	else
+		ret = set_lcd_level(b->props.brightness);
+	if (ret != 0)
+		vdbg_printk(FUJLAPTOP_DBG_ERROR,
+			"Unable to adjust LCD brightness, error code %i\n",
+			ret);
+	return ret;
+}
+
+static struct backlight_ops fujitsubl_ops = {
+	.get_brightness = bl_get_brightness,
+	.update_status = bl_update_status,
+};
+
+/* Platform LCD brightness device */
+
+static ssize_t
+show_max_brightness(struct device *dev,
+		    struct device_attribute *attr, char *buf)
+{
+
+	int ret;
+
+	ret = get_max_brightness();
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+show_brightness_changed(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+
+	int ret;
+
+	ret = fujitsu->brightness_changed;
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t show_lcd_level(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+
+	int ret;
+
+	ret = get_lcd_level();
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t store_lcd_level(struct device *dev,
+			       struct device_attribute *attr, const char *buf,
+			       size_t count)
+{
+
+	int level, ret;
+
+	if (sscanf(buf, "%i", &level) != 1
+	    || (level < 0 || level >= fujitsu->max_brightness))
+		return -EINVAL;
+
+	if (use_alt_lcd_levels)
+		ret = set_lcd_level_alt(level);
+	else
+		ret = set_lcd_level(level);
+	if (ret < 0)
+		return ret;
+
+	ret = get_lcd_level();
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static ssize_t
+ignore_store(struct device *dev,
+	     struct device_attribute *attr, const char *buf, size_t count)
+{
+	return count;
+}
+
+static ssize_t
+show_lid_state(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD)
+		return sprintf(buf, "unknown\n");
+	if (fujitsu_hotkey->rfkill_state & 0x100)
+		return sprintf(buf, "open\n");
+	else
+		return sprintf(buf, "closed\n");
+}
+
+static ssize_t
+show_dock_state(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD)
+		return sprintf(buf, "unknown\n");
+	if (fujitsu_hotkey->rfkill_state & 0x200)
+		return sprintf(buf, "docked\n");
+	else
+		return sprintf(buf, "undocked\n");
+}
+
+static ssize_t
+show_radios_state(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	if (fujitsu_hotkey->rfkill_state == UNSUPPORTED_CMD)
+		return sprintf(buf, "unknown\n");
+	if (fujitsu_hotkey->rfkill_state & 0x20)
+		return sprintf(buf, "on\n");
+	else
+		return sprintf(buf, "killed\n");
+}
+
+static DEVICE_ATTR(max_brightness, 0444, show_max_brightness, ignore_store);
+static DEVICE_ATTR(brightness_changed, 0444, show_brightness_changed,
+		   ignore_store);
+static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
+static DEVICE_ATTR(lid, 0444, show_lid_state, ignore_store);
+static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store);
+static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store);
+
+static struct attribute *fujitsupf_attributes[] = {
+	&dev_attr_brightness_changed.attr,
+	&dev_attr_max_brightness.attr,
+	&dev_attr_lcd_level.attr,
+	&dev_attr_lid.attr,
+	&dev_attr_dock.attr,
+	&dev_attr_radios.attr,
+	NULL
+};
+
+static struct attribute_group fujitsupf_attribute_group = {
+	.attrs = fujitsupf_attributes
+};
+
+static struct platform_driver fujitsupf_driver = {
+	.driver = {
+		   .name = "fujitsu-laptop",
+		   .owner = THIS_MODULE,
+		   }
+};
+
+static void dmi_check_cb_common(const struct dmi_system_id *id)
+{
+	acpi_handle handle;
+	printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n",
+	       id->ident);
+	if (use_alt_lcd_levels == -1) {
+		if (ACPI_SUCCESS(acpi_get_handle(NULL,
+				"\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
+			use_alt_lcd_levels = 1;
+		else
+			use_alt_lcd_levels = 0;
+		vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as "
+			"%i\n", use_alt_lcd_levels);
+	}
+}
+
+static int dmi_check_cb_s6410(const struct dmi_system_id *id)
+{
+	dmi_check_cb_common(id);
+	fujitsu->keycode1 = KEY_SCREENLOCK;	/* "Lock" */
+	fujitsu->keycode2 = KEY_HELP;	/* "Mobility Center" */
+	return 0;
+}
+
+static int dmi_check_cb_s6420(const struct dmi_system_id *id)
+{
+	dmi_check_cb_common(id);
+	fujitsu->keycode1 = KEY_SCREENLOCK;	/* "Lock" */
+	fujitsu->keycode2 = KEY_HELP;	/* "Mobility Center" */
+	return 0;
+}
+
+static int dmi_check_cb_p8010(const struct dmi_system_id *id)
+{
+	dmi_check_cb_common(id);
+	fujitsu->keycode1 = KEY_HELP;	/* "Support" */
+	fujitsu->keycode3 = KEY_SWITCHVIDEOMODE;	/* "Presentation" */
+	fujitsu->keycode4 = KEY_WWW;	/* "Internet" */
+	return 0;
+}
+
+static struct dmi_system_id fujitsu_dmi_table[] = {
+	{
+	 .ident = "Fujitsu Siemens S6410",
+	 .matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"),
+		     },
+	 .callback = dmi_check_cb_s6410},
+	{
+	 .ident = "Fujitsu Siemens S6420",
+	 .matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6420"),
+		     },
+	 .callback = dmi_check_cb_s6420},
+	{
+	 .ident = "Fujitsu LifeBook P8010",
+	 .matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P8010"),
+		     },
+	 .callback = dmi_check_cb_p8010},
+	{}
+};
+
+/* ACPI device for LCD brightness control */
+
+static int acpi_fujitsu_add(struct acpi_device *device)
+{
+	acpi_status status;
+	acpi_handle handle;
+	int result = 0;
+	int state = 0;
+	struct input_dev *input;
+	int error;
+
+	if (!device)
+		return -EINVAL;
+
+	fujitsu->acpi_handle = device->handle;
+	sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME);
+	sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
+	device->driver_data = fujitsu;
+
+	status = acpi_install_notify_handler(device->handle,
+					     ACPI_DEVICE_NOTIFY,
+					     acpi_fujitsu_notify, fujitsu);
+
+	if (ACPI_FAILURE(status)) {
+		printk(KERN_ERR "Error installing notify handler\n");
+		error = -ENODEV;
+		goto err_stop;
+	}
+
+	fujitsu->input = input = input_allocate_device();
+	if (!input) {
+		error = -ENOMEM;
+		goto err_uninstall_notify;
+	}
+
+	snprintf(fujitsu->phys, sizeof(fujitsu->phys),
+		 "%s/video/input0", acpi_device_hid(device));
+
+	input->name = acpi_device_name(device);
+	input->phys = fujitsu->phys;
+	input->id.bustype = BUS_HOST;
+	input->id.product = 0x06;
+	input->dev.parent = &device->dev;
+	input->evbit[0] = BIT(EV_KEY);
+	set_bit(KEY_BRIGHTNESSUP, input->keybit);
+	set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
+	set_bit(KEY_UNKNOWN, input->keybit);
+
+	error = input_register_device(input);
+	if (error)
+		goto err_free_input_dev;
+
+	result = acpi_bus_get_power(fujitsu->acpi_handle, &state);
+	if (result) {
+		printk(KERN_ERR "Error reading power state\n");
+		goto end;
+	}
+
+	printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
+	       acpi_device_name(device), acpi_device_bid(device),
+	       !device->power.state ? "on" : "off");
+
+	fujitsu->dev = device;
+
+	if (ACPI_SUCCESS
+	    (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+		vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
+		if (ACPI_FAILURE
+		    (acpi_evaluate_object
+		     (device->handle, METHOD_NAME__INI, NULL, NULL)))
+			printk(KERN_ERR "_INI Method failed\n");
+	}
+
+	/* do config (detect defaults) */
+	use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
+	disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
+	vdbg_printk(FUJLAPTOP_DBG_INFO,
+		    "config: [alt interface: %d], [adjust disable: %d]\n",
+		    use_alt_lcd_levels, disable_brightness_adjust);
+
+	if (get_max_brightness() <= 0)
+		fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
+	get_lcd_level();
+
+	return result;
+
+end:
+err_free_input_dev:
+	input_free_device(input);
+err_uninstall_notify:
+	acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+				   acpi_fujitsu_notify);
+err_stop:
+
+	return result;
+}
+
+static int acpi_fujitsu_remove(struct acpi_device *device, int type)
+{
+	acpi_status status;
+	struct fujitsu_t *fujitsu = NULL;
+
+	if (!device || !acpi_driver_data(device))
+		return -EINVAL;
+
+	fujitsu = acpi_driver_data(device);
+
+	status = acpi_remove_notify_handler(fujitsu->acpi_handle,
+					    ACPI_DEVICE_NOTIFY,
+					    acpi_fujitsu_notify);
+
+	if (!device || !acpi_driver_data(device))
+		return -EINVAL;
+
+	fujitsu->acpi_handle = NULL;
+
+	return 0;
+}
+
+/* Brightness notify */
+
+static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
+{
+	struct input_dev *input;
+	int keycode;
+	int oldb, newb;
+
+	input = fujitsu->input;
+
+	switch (event) {
+	case ACPI_FUJITSU_NOTIFY_CODE1:
+		keycode = 0;
+		oldb = fujitsu->brightness_level;
+		get_lcd_level();
+		newb = fujitsu->brightness_level;
+
+		vdbg_printk(FUJLAPTOP_DBG_TRACE,
+			    "brightness button event [%i -> %i (%i)]\n",
+			    oldb, newb, fujitsu->brightness_changed);
+
+		if (oldb < newb) {
+			if (disable_brightness_adjust != 1) {
+				if (use_alt_lcd_levels)
+					set_lcd_level_alt(newb);
+				else
+					set_lcd_level(newb);
+			}
+			acpi_bus_generate_proc_event(fujitsu->dev,
+				ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 0);
+			keycode = KEY_BRIGHTNESSUP;
+		} else if (oldb > newb) {
+			if (disable_brightness_adjust != 1) {
+				if (use_alt_lcd_levels)
+					set_lcd_level_alt(newb);
+				else
+					set_lcd_level(newb);
+			}
+			acpi_bus_generate_proc_event(fujitsu->dev,
+				ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 0);
+			keycode = KEY_BRIGHTNESSDOWN;
+		}
+		break;
+	default:
+		keycode = KEY_UNKNOWN;
+		vdbg_printk(FUJLAPTOP_DBG_WARN,
+			    "unsupported event [0x%x]\n", event);
+		break;
+	}
+
+	if (keycode != 0) {
+		input_report_key(input, keycode, 1);
+		input_sync(input);
+		input_report_key(input, keycode, 0);
+		input_sync(input);
+	}
+
+	return;
+}
+
+/* ACPI device for hotkey handling */
+
+static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
+{
+	acpi_status status;
+	acpi_handle handle;
+	int result = 0;
+	int state = 0;
+	struct input_dev *input;
+	int error;
+	int i;
+
+	if (!device)
+		return -EINVAL;
+
+	fujitsu_hotkey->acpi_handle = device->handle;
+	sprintf(acpi_device_name(device), "%s",
+		ACPI_FUJITSU_HOTKEY_DEVICE_NAME);
+	sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
+	device->driver_data = fujitsu_hotkey;
+
+	status = acpi_install_notify_handler(device->handle,
+					     ACPI_DEVICE_NOTIFY,
+					     acpi_fujitsu_hotkey_notify,
+					     fujitsu_hotkey);
+
+	if (ACPI_FAILURE(status)) {
+		printk(KERN_ERR "Error installing notify handler\n");
+		error = -ENODEV;
+		goto err_stop;
+	}
+
+	/* kfifo */
+	spin_lock_init(&fujitsu_hotkey->fifo_lock);
+	fujitsu_hotkey->fifo =
+	    kfifo_alloc(RINGBUFFERSIZE * sizeof(int), GFP_KERNEL,
+			&fujitsu_hotkey->fifo_lock);
+	if (IS_ERR(fujitsu_hotkey->fifo)) {
+		printk(KERN_ERR "kfifo_alloc failed\n");
+		error = PTR_ERR(fujitsu_hotkey->fifo);
+		goto err_stop;
+	}
+
+	fujitsu_hotkey->input = input = input_allocate_device();
+	if (!input) {
+		error = -ENOMEM;
+		goto err_uninstall_notify;
+	}
+
+	snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys),
+		 "%s/video/input0", acpi_device_hid(device));
+
+	input->name = acpi_device_name(device);
+	input->phys = fujitsu_hotkey->phys;
+	input->id.bustype = BUS_HOST;
+	input->id.product = 0x06;
+	input->dev.parent = &device->dev;
+
+	set_bit(EV_KEY, input->evbit);
+	set_bit(fujitsu->keycode1, input->keybit);
+	set_bit(fujitsu->keycode2, input->keybit);
+	set_bit(fujitsu->keycode3, input->keybit);
+	set_bit(fujitsu->keycode4, input->keybit);
+	set_bit(KEY_UNKNOWN, input->keybit);
+
+	error = input_register_device(input);
+	if (error)
+		goto err_free_input_dev;
+
+	result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state);
+	if (result) {
+		printk(KERN_ERR "Error reading power state\n");
+		goto end;
+	}
+
+	printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
+	       acpi_device_name(device), acpi_device_bid(device),
+	       !device->power.state ? "on" : "off");
+
+	fujitsu_hotkey->dev = device;
+
+	if (ACPI_SUCCESS
+	    (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+		vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
+		if (ACPI_FAILURE
+		    (acpi_evaluate_object
+		     (device->handle, METHOD_NAME__INI, NULL, NULL)))
+			printk(KERN_ERR "_INI Method failed\n");
+	}
+
+	i = 0;
+	while (call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0) != 0
+		&& (i++) < MAX_HOTKEY_RINGBUFFER_SIZE)
+		; /* No action, result is discarded */
+	vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);
+
+	fujitsu_hotkey->rfkill_state =
+		call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
+
+	/* Suspect this is a keymap of the application panel, print it */
+	printk(KERN_INFO "fujitsu-laptop: BTNI: [0x%x]\n",
+		call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
+
+	#ifdef CONFIG_LEDS_CLASS
+	if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
+		result = led_classdev_register(&fujitsu->pf_device->dev,
+						&logolamp_led);
+		if (result == 0) {
+			fujitsu_hotkey->logolamp_registered = 1;
+		} else {
+			printk(KERN_ERR "fujitsu-laptop: Could not register "
+			"LED handler for logo lamp, error %i\n", result);
+		}
+	}
+
+	if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
+	   (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
+		result = led_classdev_register(&fujitsu->pf_device->dev,
+						&kblamps_led);
+		if (result == 0) {
+			fujitsu_hotkey->kblamps_registered = 1;
+		} else {
+			printk(KERN_ERR "fujitsu-laptop: Could not register "
+			"LED handler for keyboard lamps, error %i\n", result);
+		}
+	}
+	#endif
+
+	return result;
+
+end:
+err_free_input_dev:
+	input_free_device(input);
+err_uninstall_notify:
+	acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+				   acpi_fujitsu_hotkey_notify);
+	kfifo_free(fujitsu_hotkey->fifo);
+err_stop:
+
+	return result;
+}
+
+static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
+{
+	acpi_status status;
+	struct fujitsu_hotkey_t *fujitsu_hotkey = NULL;
+
+	if (!device || !acpi_driver_data(device))
+		return -EINVAL;
+
+	fujitsu_hotkey = acpi_driver_data(device);
+
+	status = acpi_remove_notify_handler(fujitsu_hotkey->acpi_handle,
+					    ACPI_DEVICE_NOTIFY,
+					    acpi_fujitsu_hotkey_notify);
+
+	fujitsu_hotkey->acpi_handle = NULL;
+
+	kfifo_free(fujitsu_hotkey->fifo);
+
+	return 0;
+}
+
+static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
+				       void *data)
+{
+	struct input_dev *input;
+	int keycode, keycode_r;
+	unsigned int irb = 1;
+	int i, status;
+
+	input = fujitsu_hotkey->input;
+
+	fujitsu_hotkey->rfkill_state =
+		call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
+
+	switch (event) {
+	case ACPI_FUJITSU_NOTIFY_CODE1:
+		i = 0;
+		while ((irb =
+			call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0)) != 0
+				&& (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {
+			switch (irb & 0x4ff) {
+			case KEY1_CODE:
+				keycode = fujitsu->keycode1;
+				break;
+			case KEY2_CODE:
+				keycode = fujitsu->keycode2;
+				break;
+			case KEY3_CODE:
+				keycode = fujitsu->keycode3;
+				break;
+			case KEY4_CODE:
+				keycode = fujitsu->keycode4;
+				break;
+			case 0:
+				keycode = 0;
+				break;
+			default:
+				vdbg_printk(FUJLAPTOP_DBG_WARN,
+					    "Unknown GIRB result [%x]\n", irb);
+				keycode = -1;
+				break;
+			}
+			if (keycode > 0) {
+				vdbg_printk(FUJLAPTOP_DBG_TRACE,
+					"Push keycode into ringbuffer [%d]\n",
+					keycode);
+				status = kfifo_put(fujitsu_hotkey->fifo,
+						   (unsigned char *)&keycode,
+						   sizeof(keycode));
+				if (status != sizeof(keycode)) {
+					vdbg_printk(FUJLAPTOP_DBG_WARN,
+					    "Could not push keycode [0x%x]\n",
+					    keycode);
+				} else {
+					input_report_key(input, keycode, 1);
+					input_sync(input);
+				}
+			} else if (keycode == 0) {
+				while ((status =
+					kfifo_get
+					(fujitsu_hotkey->fifo, (unsigned char *)
+					 &keycode_r,
+					 sizeof
+					 (keycode_r))) == sizeof(keycode_r)) {
+					input_report_key(input, keycode_r, 0);
+					input_sync(input);
+					vdbg_printk(FUJLAPTOP_DBG_TRACE,
+					  "Pop keycode from ringbuffer [%d]\n",
+					  keycode_r);
+				}
+			}
+		}
+
+		break;
+	default:
+		keycode = KEY_UNKNOWN;
+		vdbg_printk(FUJLAPTOP_DBG_WARN,
+			    "Unsupported event [0x%x]\n", event);
+		input_report_key(input, keycode, 1);
+		input_sync(input);
+		input_report_key(input, keycode, 0);
+		input_sync(input);
+		break;
+	}
+
+	return;
+}
+
+/* Initialization */
+
+static const struct acpi_device_id fujitsu_device_ids[] = {
+	{ACPI_FUJITSU_HID, 0},
+	{"", 0},
+};
+
+static struct acpi_driver acpi_fujitsu_driver = {
+	.name = ACPI_FUJITSU_DRIVER_NAME,
+	.class = ACPI_FUJITSU_CLASS,
+	.ids = fujitsu_device_ids,
+	.ops = {
+		.add = acpi_fujitsu_add,
+		.remove = acpi_fujitsu_remove,
+		},
+};
+
+static const struct acpi_device_id fujitsu_hotkey_device_ids[] = {
+	{ACPI_FUJITSU_HOTKEY_HID, 0},
+	{"", 0},
+};
+
+static struct acpi_driver acpi_fujitsu_hotkey_driver = {
+	.name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME,
+	.class = ACPI_FUJITSU_CLASS,
+	.ids = fujitsu_hotkey_device_ids,
+	.ops = {
+		.add = acpi_fujitsu_hotkey_add,
+		.remove = acpi_fujitsu_hotkey_remove,
+		},
+};
+
+static int __init fujitsu_init(void)
+{
+	int ret, result, max_brightness;
+
+	if (acpi_disabled)
+		return -ENODEV;
+
+	fujitsu = kmalloc(sizeof(struct fujitsu_t), GFP_KERNEL);
+	if (!fujitsu)
+		return -ENOMEM;
+	memset(fujitsu, 0, sizeof(struct fujitsu_t));
+	fujitsu->keycode1 = KEY_PROG1;
+	fujitsu->keycode2 = KEY_PROG2;
+	fujitsu->keycode3 = KEY_PROG3;
+	fujitsu->keycode4 = KEY_PROG4;
+	dmi_check_system(fujitsu_dmi_table);
+
+	result = acpi_bus_register_driver(&acpi_fujitsu_driver);
+	if (result < 0) {
+		ret = -ENODEV;
+		goto fail_acpi;
+	}
+
+	/* Register platform stuff */
+
+	fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1);
+	if (!fujitsu->pf_device) {
+		ret = -ENOMEM;
+		goto fail_platform_driver;
+	}
+
+	ret = platform_device_add(fujitsu->pf_device);
+	if (ret)
+		goto fail_platform_device1;
+
+	ret =
+	    sysfs_create_group(&fujitsu->pf_device->dev.kobj,
+			       &fujitsupf_attribute_group);
+	if (ret)
+		goto fail_platform_device2;
+
+	/* Register backlight stuff */
+
+	if (!acpi_video_backlight_support()) {
+		fujitsu->bl_device =
+			backlight_device_register("fujitsu-laptop", NULL, NULL,
+						  &fujitsubl_ops);
+		if (IS_ERR(fujitsu->bl_device))
+			return PTR_ERR(fujitsu->bl_device);
+		max_brightness = fujitsu->max_brightness;
+		fujitsu->bl_device->props.max_brightness = max_brightness - 1;
+		fujitsu->bl_device->props.brightness = fujitsu->brightness_level;
+	}
+
+	ret = platform_driver_register(&fujitsupf_driver);
+	if (ret)
+		goto fail_backlight;
+
+	/* Register hotkey driver */
+
+	fujitsu_hotkey = kmalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL);
+	if (!fujitsu_hotkey) {
+		ret = -ENOMEM;
+		goto fail_hotkey;
+	}
+	memset(fujitsu_hotkey, 0, sizeof(struct fujitsu_hotkey_t));
+
+	result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver);
+	if (result < 0) {
+		ret = -ENODEV;
+		goto fail_hotkey1;
+	}
+
+	/* Sync backlight power status (needs FUJ02E3 device, hence deferred) */
+
+	if (!acpi_video_backlight_support()) {
+		if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
+			fujitsu->bl_device->props.power = 4;
+		else
+			fujitsu->bl_device->props.power = 0;
+	}
+
+	printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION
+	       " successfully loaded.\n");
+
+	return 0;
+
+fail_hotkey1:
+
+	kfree(fujitsu_hotkey);
+
+fail_hotkey:
+
+	platform_driver_unregister(&fujitsupf_driver);
+
+fail_backlight:
+
+	if (fujitsu->bl_device)
+		backlight_device_unregister(fujitsu->bl_device);
+
+fail_platform_device2:
+
+	platform_device_del(fujitsu->pf_device);
+
+fail_platform_device1:
+
+	platform_device_put(fujitsu->pf_device);
+
+fail_platform_driver:
+
+	acpi_bus_unregister_driver(&acpi_fujitsu_driver);
+
+fail_acpi:
+
+	kfree(fujitsu);
+
+	return ret;
+}
+
+static void __exit fujitsu_cleanup(void)
+{
+	#ifdef CONFIG_LEDS_CLASS
+	if (fujitsu_hotkey->logolamp_registered != 0)
+		led_classdev_unregister(&logolamp_led);
+
+	if (fujitsu_hotkey->kblamps_registered != 0)
+		led_classdev_unregister(&kblamps_led);
+	#endif
+
+	sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
+			   &fujitsupf_attribute_group);
+	platform_device_unregister(fujitsu->pf_device);
+	platform_driver_unregister(&fujitsupf_driver);
+	if (fujitsu->bl_device)
+		backlight_device_unregister(fujitsu->bl_device);
+
+	acpi_bus_unregister_driver(&acpi_fujitsu_driver);
+
+	kfree(fujitsu);
+
+	acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver);
+
+	kfree(fujitsu_hotkey);
+
+	printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n");
+}
+
+module_init(fujitsu_init);
+module_exit(fujitsu_cleanup);
+
+module_param(use_alt_lcd_levels, uint, 0644);
+MODULE_PARM_DESC(use_alt_lcd_levels,
+		 "Use alternative interface for lcd_levels (needed for Lifebook s6410).");
+module_param(disable_brightness_adjust, uint, 0644);
+MODULE_PARM_DESC(disable_brightness_adjust, "Disable brightness adjustment .");
+#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
+module_param_named(debug, dbg_level, uint, 0644);
+MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
+#endif
+
+MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon");
+MODULE_DESCRIPTION("Fujitsu laptop extras support");
+MODULE_VERSION(FUJITSU_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
+MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
+MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
+
+static struct pnp_device_id pnp_ids[] = {
+	{.id = "FUJ02bf"},
+	{.id = "FUJ02B1"},
+	{.id = "FUJ02E3"},
+	{.id = ""}
+};
+
+MODULE_DEVICE_TABLE(pnp, pnp_ids);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
new file mode 100644
index 0000000..7c789f0
--- /dev/null
+++ b/drivers/platform/x86/hp-wmi.c
@@ -0,0 +1,521 @@
+/*
+ * HP WMI hotkeys
+ *
+ * Copyright (C) 2008 Red Hat <mjg@redhat.com>
+ *
+ * Portions based on wistron_btns.c:
+ * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
+ * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
+ * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/rfkill.h>
+#include <linux/string.h>
+
+MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>");
+MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
+MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+
+#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
+#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
+
+#define HPWMI_DISPLAY_QUERY 0x1
+#define HPWMI_HDDTEMP_QUERY 0x2
+#define HPWMI_ALS_QUERY 0x3
+#define HPWMI_DOCK_QUERY 0x4
+#define HPWMI_WIRELESS_QUERY 0x5
+#define HPWMI_HOTKEY_QUERY 0xc
+
+static int __init hp_wmi_bios_setup(struct platform_device *device);
+static int __exit hp_wmi_bios_remove(struct platform_device *device);
+
+struct bios_args {
+	u32 signature;
+	u32 command;
+	u32 commandtype;
+	u32 datasize;
+	u32 data;
+};
+
+struct bios_return {
+	u32 sigpass;
+	u32 return_code;
+	u32 value;
+};
+
+struct key_entry {
+	char type;		/* See KE_* below */
+	u16 code;
+	u16 keycode;
+};
+
+enum { KE_KEY, KE_SW, KE_END };
+
+static struct key_entry hp_wmi_keymap[] = {
+	{KE_SW, 0x01, SW_DOCK},
+	{KE_KEY, 0x02, KEY_BRIGHTNESSUP},
+	{KE_KEY, 0x03, KEY_BRIGHTNESSDOWN},
+	{KE_KEY, 0x20e6, KEY_PROG1},
+	{KE_KEY, 0x2142, KEY_MEDIA},
+	{KE_KEY, 0x213b, KEY_INFO},
+	{KE_KEY, 0x231b, KEY_HELP},
+	{KE_END, 0}
+};
+
+static struct input_dev *hp_wmi_input_dev;
+static struct platform_device *hp_wmi_platform_dev;
+
+static struct rfkill *wifi_rfkill;
+static struct rfkill *bluetooth_rfkill;
+static struct rfkill *wwan_rfkill;
+
+static struct platform_driver hp_wmi_driver = {
+	.driver = {
+		   .name = "hp-wmi",
+		   .owner = THIS_MODULE,
+	},
+	.probe = hp_wmi_bios_setup,
+	.remove = hp_wmi_bios_remove,
+};
+
+static int hp_wmi_perform_query(int query, int write, int value)
+{
+	struct bios_return bios_return;
+	acpi_status status;
+	union acpi_object *obj;
+	struct bios_args args = {
+		.signature = 0x55434553,
+		.command = write ? 0x2 : 0x1,
+		.commandtype = query,
+		.datasize = write ? 0x4 : 0,
+		.data = value,
+	};
+	struct acpi_buffer input = { sizeof(struct bios_args), &args };
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	status = wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output);
+
+	obj = output.pointer;
+
+	if (!obj || obj->type != ACPI_TYPE_BUFFER)
+		return -EINVAL;
+
+	bios_return = *((struct bios_return *)obj->buffer.pointer);
+	if (bios_return.return_code > 0)
+		return bios_return.return_code * -1;
+	else
+		return bios_return.value;
+}
+
+static int hp_wmi_display_state(void)
+{
+	return hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, 0);
+}
+
+static int hp_wmi_hddtemp_state(void)
+{
+	return hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, 0);
+}
+
+static int hp_wmi_als_state(void)
+{
+	return hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, 0);
+}
+
+static int hp_wmi_dock_state(void)
+{
+	return hp_wmi_perform_query(HPWMI_DOCK_QUERY, 0, 0);
+}
+
+static int hp_wmi_wifi_set(void *data, enum rfkill_state state)
+{
+	if (state)
+		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x101);
+	else
+		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x100);
+}
+
+static int hp_wmi_bluetooth_set(void *data, enum rfkill_state state)
+{
+	if (state)
+		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x202);
+	else
+		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x200);
+}
+
+static int hp_wmi_wwan_set(void *data, enum rfkill_state state)
+{
+	if (state)
+		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x404);
+	else
+		return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 0x400);
+}
+
+static int hp_wmi_wifi_state(void)
+{
+	int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
+
+	if (wireless & 0x100)
+		return RFKILL_STATE_UNBLOCKED;
+	else
+		return RFKILL_STATE_SOFT_BLOCKED;
+}
+
+static int hp_wmi_bluetooth_state(void)
+{
+	int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
+
+	if (wireless & 0x10000)
+		return RFKILL_STATE_UNBLOCKED;
+	else
+		return RFKILL_STATE_SOFT_BLOCKED;
+}
+
+static int hp_wmi_wwan_state(void)
+{
+	int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
+
+	if (wireless & 0x1000000)
+		return RFKILL_STATE_UNBLOCKED;
+	else
+		return RFKILL_STATE_SOFT_BLOCKED;
+}
+
+static ssize_t show_display(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	int value = hp_wmi_display_state();
+	if (value < 0)
+		return -EINVAL;
+	return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t show_hddtemp(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	int value = hp_wmi_hddtemp_state();
+	if (value < 0)
+		return -EINVAL;
+	return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t show_als(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	int value = hp_wmi_als_state();
+	if (value < 0)
+		return -EINVAL;
+	return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t show_dock(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	int value = hp_wmi_dock_state();
+	if (value < 0)
+		return -EINVAL;
+	return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t set_als(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	u32 tmp = simple_strtoul(buf, NULL, 10);
+	hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, tmp);
+	return count;
+}
+
+static DEVICE_ATTR(display, S_IRUGO, show_display, NULL);
+static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL);
+static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als);
+static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL);
+
+static struct key_entry *hp_wmi_get_entry_by_scancode(int code)
+{
+	struct key_entry *key;
+
+	for (key = hp_wmi_keymap; key->type != KE_END; key++)
+		if (code == key->code)
+			return key;
+
+	return NULL;
+}
+
+static struct key_entry *hp_wmi_get_entry_by_keycode(int keycode)
+{
+	struct key_entry *key;
+
+	for (key = hp_wmi_keymap; key->type != KE_END; key++)
+		if (key->type == KE_KEY && keycode == key->keycode)
+			return key;
+
+	return NULL;
+}
+
+static int hp_wmi_getkeycode(struct input_dev *dev, int scancode, int *keycode)
+{
+	struct key_entry *key = hp_wmi_get_entry_by_scancode(scancode);
+
+	if (key && key->type == KE_KEY) {
+		*keycode = key->keycode;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int hp_wmi_setkeycode(struct input_dev *dev, int scancode, int keycode)
+{
+	struct key_entry *key;
+	int old_keycode;
+
+	if (keycode < 0 || keycode > KEY_MAX)
+		return -EINVAL;
+
+	key = hp_wmi_get_entry_by_scancode(scancode);
+	if (key && key->type == KE_KEY) {
+		old_keycode = key->keycode;
+		key->keycode = keycode;
+		set_bit(keycode, dev->keybit);
+		if (!hp_wmi_get_entry_by_keycode(old_keycode))
+			clear_bit(old_keycode, dev->keybit);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static void hp_wmi_notify(u32 value, void *context)
+{
+	struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+	static struct key_entry *key;
+	union acpi_object *obj;
+
+	wmi_get_event_data(value, &response);
+
+	obj = (union acpi_object *)response.pointer;
+
+	if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == 8) {
+		int eventcode = *((u8 *) obj->buffer.pointer);
+		if (eventcode == 0x4)
+			eventcode = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
+							 0);
+		key = hp_wmi_get_entry_by_scancode(eventcode);
+		if (key) {
+			switch (key->type) {
+			case KE_KEY:
+				input_report_key(hp_wmi_input_dev,
+						 key->keycode, 1);
+				input_sync(hp_wmi_input_dev);
+				input_report_key(hp_wmi_input_dev,
+						 key->keycode, 0);
+				input_sync(hp_wmi_input_dev);
+				break;
+			case KE_SW:
+				input_report_switch(hp_wmi_input_dev,
+						    key->keycode,
+						    hp_wmi_dock_state());
+				input_sync(hp_wmi_input_dev);
+				break;
+			}
+		} else if (eventcode == 0x5) {
+			if (wifi_rfkill)
+				rfkill_force_state(wifi_rfkill,
+						   hp_wmi_wifi_state());
+			if (bluetooth_rfkill)
+				rfkill_force_state(bluetooth_rfkill,
+						   hp_wmi_bluetooth_state());
+			if (wwan_rfkill)
+				rfkill_force_state(wwan_rfkill,
+						   hp_wmi_wwan_state());
+		} else
+			printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
+			       eventcode);
+	} else
+		printk(KERN_INFO "HP WMI: Unknown response received\n");
+}
+
+static int __init hp_wmi_input_setup(void)
+{
+	struct key_entry *key;
+	int err;
+
+	hp_wmi_input_dev = input_allocate_device();
+
+	hp_wmi_input_dev->name = "HP WMI hotkeys";
+	hp_wmi_input_dev->phys = "wmi/input0";
+	hp_wmi_input_dev->id.bustype = BUS_HOST;
+	hp_wmi_input_dev->getkeycode = hp_wmi_getkeycode;
+	hp_wmi_input_dev->setkeycode = hp_wmi_setkeycode;
+
+	for (key = hp_wmi_keymap; key->type != KE_END; key++) {
+		switch (key->type) {
+		case KE_KEY:
+			set_bit(EV_KEY, hp_wmi_input_dev->evbit);
+			set_bit(key->keycode, hp_wmi_input_dev->keybit);
+			break;
+		case KE_SW:
+			set_bit(EV_SW, hp_wmi_input_dev->evbit);
+			set_bit(key->keycode, hp_wmi_input_dev->swbit);
+			break;
+		}
+	}
+
+	err = input_register_device(hp_wmi_input_dev);
+
+	if (err) {
+		input_free_device(hp_wmi_input_dev);
+		return err;
+	}
+
+	return 0;
+}
+
+static void cleanup_sysfs(struct platform_device *device)
+{
+	device_remove_file(&device->dev, &dev_attr_display);
+	device_remove_file(&device->dev, &dev_attr_hddtemp);
+	device_remove_file(&device->dev, &dev_attr_als);
+	device_remove_file(&device->dev, &dev_attr_dock);
+}
+
+static int __init hp_wmi_bios_setup(struct platform_device *device)
+{
+	int err;
+	int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
+
+	err = device_create_file(&device->dev, &dev_attr_display);
+	if (err)
+		goto add_sysfs_error;
+	err = device_create_file(&device->dev, &dev_attr_hddtemp);
+	if (err)
+		goto add_sysfs_error;
+	err = device_create_file(&device->dev, &dev_attr_als);
+	if (err)
+		goto add_sysfs_error;
+	err = device_create_file(&device->dev, &dev_attr_dock);
+	if (err)
+		goto add_sysfs_error;
+
+	if (wireless & 0x1) {
+		wifi_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WLAN);
+		wifi_rfkill->name = "hp-wifi";
+		wifi_rfkill->state = hp_wmi_wifi_state();
+		wifi_rfkill->toggle_radio = hp_wmi_wifi_set;
+		wifi_rfkill->user_claim_unsupported = 1;
+		err = rfkill_register(wifi_rfkill);
+		if (err)
+			goto add_sysfs_error;
+	}
+
+	if (wireless & 0x2) {
+		bluetooth_rfkill = rfkill_allocate(&device->dev,
+						   RFKILL_TYPE_BLUETOOTH);
+		bluetooth_rfkill->name = "hp-bluetooth";
+		bluetooth_rfkill->state = hp_wmi_bluetooth_state();
+		bluetooth_rfkill->toggle_radio = hp_wmi_bluetooth_set;
+		bluetooth_rfkill->user_claim_unsupported = 1;
+		err = rfkill_register(bluetooth_rfkill);
+			goto register_bluetooth_error;
+	}
+
+	if (wireless & 0x4) {
+		wwan_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WWAN);
+		wwan_rfkill->name = "hp-wwan";
+		wwan_rfkill->state = hp_wmi_wwan_state();
+		wwan_rfkill->toggle_radio = hp_wmi_wwan_set;
+		wwan_rfkill->user_claim_unsupported = 1;
+		err = rfkill_register(wwan_rfkill);
+		if (err)
+			goto register_wwan_err;
+	}
+
+	return 0;
+register_wwan_err:
+	rfkill_unregister(bluetooth_rfkill);
+register_bluetooth_error:
+	rfkill_unregister(wifi_rfkill);
+add_sysfs_error:
+	cleanup_sysfs(device);
+	return err;
+}
+
+static int __exit hp_wmi_bios_remove(struct platform_device *device)
+{
+	cleanup_sysfs(device);
+
+	if (wifi_rfkill)
+		rfkill_unregister(wifi_rfkill);
+	if (bluetooth_rfkill)
+		rfkill_unregister(bluetooth_rfkill);
+	if (wwan_rfkill)
+		rfkill_unregister(wwan_rfkill);
+
+	return 0;
+}
+
+static int __init hp_wmi_init(void)
+{
+	int err;
+
+	if (wmi_has_guid(HPWMI_EVENT_GUID)) {
+		err = wmi_install_notify_handler(HPWMI_EVENT_GUID,
+						 hp_wmi_notify, NULL);
+		if (!err)
+			hp_wmi_input_setup();
+	}
+
+	if (wmi_has_guid(HPWMI_BIOS_GUID)) {
+		err = platform_driver_register(&hp_wmi_driver);
+		if (err)
+			return 0;
+		hp_wmi_platform_dev = platform_device_alloc("hp-wmi", -1);
+		if (!hp_wmi_platform_dev) {
+			platform_driver_unregister(&hp_wmi_driver);
+			return 0;
+		}
+		platform_device_add(hp_wmi_platform_dev);
+	}
+
+	return 0;
+}
+
+static void __exit hp_wmi_exit(void)
+{
+	if (wmi_has_guid(HPWMI_EVENT_GUID)) {
+		wmi_remove_notify_handler(HPWMI_EVENT_GUID);
+		input_unregister_device(hp_wmi_input_dev);
+	}
+	if (hp_wmi_platform_dev) {
+		platform_device_del(hp_wmi_platform_dev);
+		platform_driver_unregister(&hp_wmi_driver);
+	}
+}
+
+module_init(hp_wmi_init);
+module_exit(hp_wmi_exit);
diff --git a/drivers/misc/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
similarity index 100%
rename from drivers/misc/intel_menlow.c
rename to drivers/platform/x86/intel_menlow.c
diff --git a/drivers/misc/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
similarity index 100%
rename from drivers/misc/msi-laptop.c
rename to drivers/platform/x86/msi-laptop.c
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
new file mode 100644
index 0000000..f30db36
--- /dev/null
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -0,0 +1,744 @@
+/*
+ *  Panasonic HotKey and LCD brightness control driver
+ *  (C) 2004 Hiroshi Miura <miura@da-cha.org>
+ *  (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/
+ *  (C) YOKOTA Hiroshi <yokota (at) netlab. is. tsukuba. ac. jp>
+ *  (C) 2004 David Bronaugh <dbronaugh>
+ *  (C) 2006-2008 Harald Welte <laforge@gnumonks.org>
+ *
+ *  derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  publicshed by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+ *
+ *---------------------------------------------------------------------------
+ *
+ * ChangeLog:
+ *	Sep.23, 2008	Harald Welte <laforge@gnumonks.org>
+ *		-v0.95	rename driver from drivers/acpi/pcc_acpi.c to
+ *			drivers/misc/panasonic-laptop.c
+ *
+ * 	Jul.04, 2008	Harald Welte <laforge@gnumonks.org>
+ * 		-v0.94	replace /proc interface with device attributes
+ * 			support {set,get}keycode on th input device
+ *
+ *      Jun.27, 2008	Harald Welte <laforge@gnumonks.org>
+ *      	-v0.92	merge with 2.6.26-rc6 input API changes
+ *      		remove broken <= 2.6.15 kernel support
+ *      		resolve all compiler warnings
+ *      		various coding style fixes (checkpatch.pl)
+ *      		add support for backlight api
+ *      		major code restructuring
+ *
+ * 	Dac.28, 2007	Harald Welte <laforge@gnumonks.org>
+ * 		-v0.91	merge with 2.6.24-rc6 ACPI changes
+ *
+ * 	Nov.04, 2006	Hiroshi Miura <miura@da-cha.org>
+ * 		-v0.9	remove warning about section reference.
+ * 			remove acpi_os_free
+ * 			add /proc/acpi/pcc/brightness interface for HAL access
+ * 			merge dbronaugh's enhancement
+ * 			Aug.17, 2004 David Bronaugh (dbronaugh)
+ *  				- Added screen brightness setting interface
+ *				  Thanks to FreeBSD crew (acpi_panasonic.c)
+ * 				  for the ideas I needed to accomplish it
+ *
+ *	May.29, 2006	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.8.4 follow to change keyinput structure
+ *			thanks Fabian Yamaguchi <fabs@cs.tu-berlin.de>,
+ *			Jacob Bower <jacob.bower@ic.ac.uk> and
+ *			Hiroshi Yokota for providing solutions.
+ *
+ *	Oct.02, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.8.2	merge code of YOKOTA Hiroshi
+ *					<yokota@netlab.is.tsukuba.ac.jp>.
+ *			Add sticky key mode interface.
+ *			Refactoring acpi_pcc_generate_keyinput().
+ *
+ *	Sep.15, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.8	Generate key input event on input subsystem.
+ *			This is based on yet another driver written by
+ *							Ryuta Nakanishi.
+ *
+ *	Sep.10, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.7	Change proc interface functions using seq_file
+ *			facility as same as other ACPI drivers.
+ *
+ *	Aug.28, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.6.4 Fix a silly error with status checking
+ *
+ *	Aug.25, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		-v0.6.3 replace read_acpi_int by standard function
+ *							acpi_evaluate_integer
+ *			some clean up and make smart copyright notice.
+ *			fix return value of pcc_acpi_get_key()
+ *			fix checking return value of acpi_bus_register_driver()
+ *
+ *      Aug.22, 2004    David Bronaugh <dbronaugh@linuxboxen.org>
+ *              -v0.6.2 Add check on ACPI data (num_sifr)
+ *                      Coding style cleanups, better error messages/handling
+ *			Fixed an off-by-one error in memory allocation
+ *
+ *      Aug.21, 2004    David Bronaugh <dbronaugh@linuxboxen.org>
+ *              -v0.6.1 Fix a silly error with status checking
+ *
+ *      Aug.20, 2004    David Bronaugh <dbronaugh@linuxboxen.org>
+ *              - v0.6  Correct brightness controls to reflect reality
+ *                      based on information gleaned by Hiroshi Miura
+ *                      and discussions with Hiroshi Miura
+ *
+ *	Aug.10, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		- v0.5  support LCD brightness control
+ *			based on the disclosed information by MEI.
+ *
+ *	Jul.25, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		- v0.4  first post version
+ *		        add function to retrive SIFR
+ *
+ *	Jul.24, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		- v0.3  get proper status of hotkey
+ *
+ *      Jul.22, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		- v0.2  add HotKey handler
+ *
+ *      Jul.17, 2004	Hiroshi Miura <miura@da-cha.org>
+ *		- v0.1  start from toshiba_acpi driver written by John Belmonte
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/backlight.h>
+#include <linux/ctype.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/input.h>
+
+
+#ifndef ACPI_HOTKEY_COMPONENT
+#define ACPI_HOTKEY_COMPONENT	0x10000000
+#endif
+
+#define _COMPONENT		ACPI_HOTKEY_COMPONENT
+
+MODULE_AUTHOR("Hiroshi Miura, David Bronaugh and Harald Welte");
+MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops");
+MODULE_LICENSE("GPL");
+
+#define LOGPREFIX "pcc_acpi: "
+
+/* Define ACPI PATHs */
+/* Lets note hotkeys */
+#define METHOD_HKEY_QUERY	"HINF"
+#define METHOD_HKEY_SQTY	"SQTY"
+#define METHOD_HKEY_SINF	"SINF"
+#define METHOD_HKEY_SSET	"SSET"
+#define HKEY_NOTIFY		 0x80
+
+#define ACPI_PCC_DRIVER_NAME	"Panasonic Laptop Support"
+#define ACPI_PCC_DEVICE_NAME	"Hotkey"
+#define ACPI_PCC_CLASS		"pcc"
+
+#define ACPI_PCC_INPUT_PHYS	"panasonic/hkey0"
+
+/* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent
+   ENV_STATEs: Normal temp=0x01, High temp=0x81, N/A=0x00
+*/
+enum SINF_BITS { SINF_NUM_BATTERIES = 0,
+		 SINF_LCD_TYPE,
+		 SINF_AC_MAX_BRIGHT,
+		 SINF_AC_MIN_BRIGHT,
+		 SINF_AC_CUR_BRIGHT,
+		 SINF_DC_MAX_BRIGHT,
+		 SINF_DC_MIN_BRIGHT,
+		 SINF_DC_CUR_BRIGHT,
+		 SINF_MUTE,
+		 SINF_RESERVED,
+		 SINF_ENV_STATE,
+		 SINF_STICKY_KEY = 0x80,
+	};
+/* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */
+
+static int acpi_pcc_hotkey_add(struct acpi_device *device);
+static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type);
+static int acpi_pcc_hotkey_resume(struct acpi_device *device);
+
+static const struct acpi_device_id pcc_device_ids[] = {
+	{ "MAT0012", 0},
+	{ "MAT0013", 0},
+	{ "MAT0018", 0},
+	{ "MAT0019", 0},
+	{ "", 0},
+};
+
+static struct acpi_driver acpi_pcc_driver = {
+	.name =		ACPI_PCC_DRIVER_NAME,
+	.class =	ACPI_PCC_CLASS,
+	.ids =		pcc_device_ids,
+	.ops =		{
+				.add =		acpi_pcc_hotkey_add,
+				.remove =	acpi_pcc_hotkey_remove,
+				.resume =       acpi_pcc_hotkey_resume,
+			},
+};
+
+#define KEYMAP_SIZE		11
+static const int initial_keymap[KEYMAP_SIZE] = {
+	/*  0 */ KEY_RESERVED,
+	/*  1 */ KEY_BRIGHTNESSDOWN,
+	/*  2 */ KEY_BRIGHTNESSUP,
+	/*  3 */ KEY_DISPLAYTOGGLE,
+	/*  4 */ KEY_MUTE,
+	/*  5 */ KEY_VOLUMEDOWN,
+	/*  6 */ KEY_VOLUMEUP,
+	/*  7 */ KEY_SLEEP,
+	/*  8 */ KEY_PROG1, /* Change CPU boost */
+	/*  9 */ KEY_BATTERY,
+	/* 10 */ KEY_SUSPEND,
+};
+
+struct pcc_acpi {
+	acpi_handle		handle;
+	unsigned long		num_sifr;
+	int			sticky_mode;
+	u32 			*sinf;
+	struct acpi_device	*device;
+	struct input_dev	*input_dev;
+	struct backlight_device	*backlight;
+	int			keymap[KEYMAP_SIZE];
+};
+
+struct pcc_keyinput {
+	struct acpi_hotkey      *hotkey;
+};
+
+/* method access functions */
+static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val)
+{
+	union acpi_object in_objs[] = {
+		{ .integer.type  = ACPI_TYPE_INTEGER,
+		  .integer.value = func, },
+		{ .integer.type  = ACPI_TYPE_INTEGER,
+		  .integer.value = val, },
+	};
+	struct acpi_object_list params = {
+		.count   = ARRAY_SIZE(in_objs),
+		.pointer = in_objs,
+	};
+	acpi_status status = AE_OK;
+
+	status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET,
+				      &params, NULL);
+
+	return status == AE_OK;
+}
+
+static inline int acpi_pcc_get_sqty(struct acpi_device *device)
+{
+	unsigned long long s;
+	acpi_status status;
+
+	status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY,
+				       NULL, &s);
+	if (ACPI_SUCCESS(status))
+		return s;
+	else {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				  "evaluation error HKEY.SQTY\n"));
+		return -EINVAL;
+	}
+}
+
+static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf)
+{
+	acpi_status status;
+	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+	union acpi_object *hkey = NULL;
+	int i;
+
+	status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0,
+				      &buffer);
+	if (ACPI_FAILURE(status)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				  "evaluation error HKEY.SINF\n"));
+		return 0;
+	}
+
+	hkey = buffer.pointer;
+	if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n"));
+		goto end;
+	}
+
+	if (pcc->num_sifr < hkey->package.count) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "SQTY reports bad SINF length\n"));
+		status = AE_ERROR;
+		goto end;
+	}
+
+	for (i = 0; i < hkey->package.count; i++) {
+		union acpi_object *element = &(hkey->package.elements[i]);
+		if (likely(element->type == ACPI_TYPE_INTEGER)) {
+			sinf[i] = element->integer.value;
+		} else
+			ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+					 "Invalid HKEY.SINF data\n"));
+	}
+	sinf[hkey->package.count] = -1;
+
+end:
+	kfree(buffer.pointer);
+	return status == AE_OK;
+}
+
+/* backlight API interface functions */
+
+/* This driver currently treats AC and DC brightness identical,
+ * since we don't need to invent an interface to the core ACPI
+ * logic to receive events in case a power supply is plugged in
+ * or removed */
+
+static int bl_get(struct backlight_device *bd)
+{
+	struct pcc_acpi *pcc = bl_get_data(bd);
+
+	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+		return -EIO;
+
+	return pcc->sinf[SINF_AC_CUR_BRIGHT];
+}
+
+static int bl_set_status(struct backlight_device *bd)
+{
+	struct pcc_acpi *pcc = bl_get_data(bd);
+	int bright = bd->props.brightness;
+	int rc;
+
+	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+		return -EIO;
+
+	if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT])
+		bright = pcc->sinf[SINF_AC_MIN_BRIGHT];
+
+	if (bright < pcc->sinf[SINF_DC_MIN_BRIGHT])
+		bright = pcc->sinf[SINF_DC_MIN_BRIGHT];
+
+	if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT] ||
+	    bright > pcc->sinf[SINF_AC_MAX_BRIGHT])
+		return -EINVAL;
+
+	rc = acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, bright);
+	if (rc < 0)
+		return rc;
+
+	return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
+}
+
+static struct backlight_ops pcc_backlight_ops = {
+	.get_brightness	= bl_get,
+	.update_status	= bl_set_status,
+};
+
+
+/* sysfs user interface functions */
+
+static ssize_t show_numbatt(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct acpi_device *acpi = to_acpi_device(dev);
+	struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+		return -EIO;
+
+	return sprintf(buf, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]);
+}
+
+static ssize_t show_lcdtype(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct acpi_device *acpi = to_acpi_device(dev);
+	struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+		return -EIO;
+
+	return sprintf(buf, "%u\n", pcc->sinf[SINF_LCD_TYPE]);
+}
+
+static ssize_t show_mute(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct acpi_device *acpi = to_acpi_device(dev);
+	struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+		return -EIO;
+
+	return sprintf(buf, "%u\n", pcc->sinf[SINF_MUTE]);
+}
+
+static ssize_t show_sticky(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct acpi_device *acpi = to_acpi_device(dev);
+	struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+		return -EIO;
+
+	return sprintf(buf, "%u\n", pcc->sinf[SINF_STICKY_KEY]);
+}
+
+static ssize_t set_sticky(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct acpi_device *acpi = to_acpi_device(dev);
+	struct pcc_acpi *pcc = acpi_driver_data(acpi);
+	int val;
+
+	if (count && sscanf(buf, "%i", &val) == 1 &&
+	    (val == 0 || val == 1)) {
+		acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, val);
+		pcc->sticky_mode = val;
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR(numbatt, S_IRUGO, show_numbatt, NULL);
+static DEVICE_ATTR(lcdtype, S_IRUGO, show_lcdtype, NULL);
+static DEVICE_ATTR(mute, S_IRUGO, show_mute, NULL);
+static DEVICE_ATTR(sticky_key, S_IRUGO | S_IWUSR, show_sticky, set_sticky);
+
+static struct attribute *pcc_sysfs_entries[] = {
+	&dev_attr_numbatt.attr,
+	&dev_attr_lcdtype.attr,
+	&dev_attr_mute.attr,
+	&dev_attr_sticky_key.attr,
+	NULL,
+};
+
+static struct attribute_group pcc_attr_group = {
+	.name	= NULL,		/* put in device directory */
+	.attrs	= pcc_sysfs_entries,
+};
+
+
+/* hotkey input device driver */
+
+static int pcc_getkeycode(struct input_dev *dev, int scancode, int *keycode)
+{
+	struct pcc_acpi *pcc = input_get_drvdata(dev);
+
+	if (scancode >= ARRAY_SIZE(pcc->keymap))
+		return -EINVAL;
+
+	*keycode = pcc->keymap[scancode];
+
+	return 0;
+}
+
+static int keymap_get_by_keycode(struct pcc_acpi *pcc, int keycode)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++) {
+		if (pcc->keymap[i] == keycode)
+			return i+1;
+	}
+
+	return 0;
+}
+
+static int pcc_setkeycode(struct input_dev *dev, int scancode, int keycode)
+{
+	struct pcc_acpi *pcc = input_get_drvdata(dev);
+	int oldkeycode;
+
+	if (scancode >= ARRAY_SIZE(pcc->keymap))
+		return -EINVAL;
+
+	if (keycode < 0 || keycode > KEY_MAX)
+		return -EINVAL;
+
+	oldkeycode = pcc->keymap[scancode];
+	pcc->keymap[scancode] = keycode;
+
+	set_bit(keycode, dev->keybit);
+
+	if (!keymap_get_by_keycode(pcc, oldkeycode))
+		clear_bit(oldkeycode, dev->keybit);
+
+	return 0;
+}
+
+static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
+{
+	struct input_dev *hotk_input_dev = pcc->input_dev;
+	int rc;
+	int key_code, hkey_num;
+	unsigned long long result;
+
+	rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
+				   NULL, &result);
+	if (!ACPI_SUCCESS(rc)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "error getting hotkey status\n"));
+		return;
+	}
+
+	acpi_bus_generate_proc_event(pcc->device, HKEY_NOTIFY, result);
+
+	hkey_num = result & 0xf;
+
+	if (hkey_num < 0 || hkey_num > ARRAY_SIZE(pcc->keymap)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				  "hotkey number out of range: %d\n",
+				  hkey_num));
+		return;
+	}
+
+	key_code = pcc->keymap[hkey_num];
+
+	if (key_code != KEY_RESERVED) {
+		int pushed = (result & 0x80) ? TRUE : FALSE;
+
+		input_report_key(hotk_input_dev, key_code, pushed);
+		input_sync(hotk_input_dev);
+	}
+
+	return;
+}
+
+static void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data)
+{
+	struct pcc_acpi *pcc = (struct pcc_acpi *) data;
+
+	switch (event) {
+	case HKEY_NOTIFY:
+		acpi_pcc_generate_keyinput(pcc);
+		break;
+	default:
+		/* nothing to do */
+		break;
+	}
+}
+
+static int acpi_pcc_init_input(struct pcc_acpi *pcc)
+{
+	int i, rc;
+
+	pcc->input_dev = input_allocate_device();
+	if (!pcc->input_dev) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				  "Couldn't allocate input device for hotkey"));
+		return -ENOMEM;
+	}
+
+	pcc->input_dev->evbit[0] = BIT(EV_KEY);
+
+	pcc->input_dev->name = ACPI_PCC_DRIVER_NAME;
+	pcc->input_dev->phys = ACPI_PCC_INPUT_PHYS;
+	pcc->input_dev->id.bustype = BUS_HOST;
+	pcc->input_dev->id.vendor = 0x0001;
+	pcc->input_dev->id.product = 0x0001;
+	pcc->input_dev->id.version = 0x0100;
+	pcc->input_dev->getkeycode = pcc_getkeycode;
+	pcc->input_dev->setkeycode = pcc_setkeycode;
+
+	/* load initial keymap */
+	memcpy(pcc->keymap, initial_keymap, sizeof(pcc->keymap));
+
+	for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++)
+		__set_bit(pcc->keymap[i], pcc->input_dev->keybit);
+	__clear_bit(KEY_RESERVED, pcc->input_dev->keybit);
+
+	input_set_drvdata(pcc->input_dev, pcc);
+
+	rc = input_register_device(pcc->input_dev);
+	if (rc < 0)
+		input_free_device(pcc->input_dev);
+
+	return rc;
+}
+
+/* kernel module interface */
+
+static int acpi_pcc_hotkey_resume(struct acpi_device *device)
+{
+	struct pcc_acpi *pcc = acpi_driver_data(device);
+	acpi_status status = AE_OK;
+
+	if (device == NULL || pcc == NULL)
+		return -EINVAL;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Sticky mode restore: %d\n",
+			  pcc->sticky_mode));
+
+	status = acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode);
+
+	return status == AE_OK ? 0 : -EINVAL;
+}
+
+static int acpi_pcc_hotkey_add(struct acpi_device *device)
+{
+	acpi_status status;
+	struct pcc_acpi *pcc;
+	int num_sifr, result;
+
+	if (!device)
+		return -EINVAL;
+
+	num_sifr = acpi_pcc_get_sqty(device);
+
+	if (num_sifr > 255) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr too large"));
+		return -ENODEV;
+	}
+
+	pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL);
+	if (!pcc) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				  "Couldn't allocate mem for pcc"));
+		return -ENOMEM;
+	}
+
+	pcc->sinf = kzalloc(sizeof(u32) * (num_sifr + 1), GFP_KERNEL);
+	if (!pcc->sinf) {
+		result = -ENOMEM;
+		goto out_hotkey;
+	}
+
+	pcc->device = device;
+	pcc->handle = device->handle;
+	pcc->num_sifr = num_sifr;
+	device->driver_data = pcc;
+	strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME);
+	strcpy(acpi_device_class(device), ACPI_PCC_CLASS);
+
+	result = acpi_pcc_init_input(pcc);
+	if (result) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				  "Error installing keyinput handler\n"));
+		goto out_sinf;
+	}
+
+	/* initialize hotkey input device */
+	status = acpi_install_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
+					     acpi_pcc_hotkey_notify, pcc);
+
+	if (ACPI_FAILURE(status)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				  "Error installing notify handler\n"));
+		result = -ENODEV;
+		goto out_input;
+	}
+
+	/* initialize backlight */
+	pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
+						   &pcc_backlight_ops);
+	if (IS_ERR(pcc->backlight))
+		goto out_notify;
+
+	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				 "Couldn't retrieve BIOS data\n"));
+		goto out_backlight;
+	}
+
+	/* read the initial brightness setting from the hardware */
+	pcc->backlight->props.max_brightness =
+					pcc->sinf[SINF_AC_MAX_BRIGHT];
+	pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+
+	/* read the initial sticky key mode from the hardware */
+	pcc->sticky_mode = pcc->sinf[SINF_STICKY_KEY];
+
+	/* add sysfs attributes */
+	result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
+	if (result)
+		goto out_backlight;
+
+	return 0;
+
+out_backlight:
+	backlight_device_unregister(pcc->backlight);
+out_notify:
+	acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
+				   acpi_pcc_hotkey_notify);
+out_input:
+	input_unregister_device(pcc->input_dev);
+	/* no need to input_free_device() since core input API refcount and
+	 * free()s the device */
+out_sinf:
+	kfree(pcc->sinf);
+out_hotkey:
+	kfree(pcc);
+
+	return result;
+}
+
+static int __init acpi_pcc_init(void)
+{
+	int result = 0;
+
+	if (acpi_disabled)
+		return -ENODEV;
+
+	result = acpi_bus_register_driver(&acpi_pcc_driver);
+	if (result < 0) {
+		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+				  "Error registering hotkey driver\n"));
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type)
+{
+	struct pcc_acpi *pcc = acpi_driver_data(device);
+
+	if (!device || !pcc)
+		return -EINVAL;
+
+	sysfs_remove_group(&device->dev.kobj, &pcc_attr_group);
+
+	backlight_device_unregister(pcc->backlight);
+
+	acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
+				   acpi_pcc_hotkey_notify);
+
+	input_unregister_device(pcc->input_dev);
+	/* no need to input_free_device() since core input API refcount and
+	 * free()s the device */
+
+	kfree(pcc->sinf);
+	kfree(pcc);
+
+	return 0;
+}
+
+static void __exit acpi_pcc_exit(void)
+{
+	acpi_bus_unregister_driver(&acpi_pcc_driver);
+}
+
+module_init(acpi_pcc_init);
+module_exit(acpi_pcc_exit);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
new file mode 100644
index 0000000..537959d
--- /dev/null
+++ b/drivers/platform/x86/sony-laptop.c
@@ -0,0 +1,2784 @@
+/*
+ * ACPI Sony Notebook Control Driver (SNC and SPIC)
+ *
+ * Copyright (C) 2004-2005 Stelian Pop <stelian@popies.net>
+ * Copyright (C) 2007 Mattia Dongili <malattia@linux.it>
+ *
+ * Parts of this driver inspired from asus_acpi.c and ibm_acpi.c
+ * which are copyrighted by their respective authors.
+ *
+ * The SNY6001 driver part is based on the sonypi driver which includes
+ * material from:
+ *
+ * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net>
+ *
+ * Copyright (C) 2005 Narayanan R S <nars@kadamba.org>
+ *
+ * Copyright (C) 2001-2002 Alcôve <www.alcove.com>
+ *
+ * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au>
+ *
+ * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp>
+ *
+ * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp>
+ *
+ * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com>
+ *
+ * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/smp_lock.h>
+#include <linux/types.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/dmi.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/kfifo.h>
+#include <linux/workqueue.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+#include <asm/uaccess.h>
+#include <linux/sonypi.h>
+#include <linux/sony-laptop.h>
+#ifdef CONFIG_SONYPI_COMPAT
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#endif
+
+#define DRV_PFX			"sony-laptop: "
+#define dprintk(msg...)		do {			\
+	if (debug) printk(KERN_WARNING DRV_PFX  msg);	\
+} while (0)
+
+#define SONY_LAPTOP_DRIVER_VERSION	"0.6"
+
+#define SONY_NC_CLASS		"sony-nc"
+#define SONY_NC_HID		"SNY5001"
+#define SONY_NC_DRIVER_NAME	"Sony Notebook Control Driver"
+
+#define SONY_PIC_CLASS		"sony-pic"
+#define SONY_PIC_HID		"SNY6001"
+#define SONY_PIC_DRIVER_NAME	"Sony Programmable IO Control Driver"
+
+MODULE_AUTHOR("Stelian Pop, Mattia Dongili");
+MODULE_DESCRIPTION("Sony laptop extras driver (SPIC and SNC ACPI device)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SONY_LAPTOP_DRIVER_VERSION);
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "set this to 1 (and RTFM) if you want to help "
+		 "the development of this driver");
+
+static int no_spic;		/* = 0 */
+module_param(no_spic, int, 0444);
+MODULE_PARM_DESC(no_spic,
+		 "set this if you don't want to enable the SPIC device");
+
+static int compat;		/* = 0 */
+module_param(compat, int, 0444);
+MODULE_PARM_DESC(compat,
+		 "set this if you want to enable backward compatibility mode");
+
+static unsigned long mask = 0xffffffff;
+module_param(mask, ulong, 0644);
+MODULE_PARM_DESC(mask,
+		 "set this to the mask of event you want to enable (see doc)");
+
+static int camera;		/* = 0 */
+module_param(camera, int, 0444);
+MODULE_PARM_DESC(camera,
+		 "set this to 1 to enable Motion Eye camera controls "
+		 "(only use it if you have a C1VE or C1VN model)");
+
+#ifdef CONFIG_SONYPI_COMPAT
+static int minor = -1;
+module_param(minor, int, 0);
+MODULE_PARM_DESC(minor,
+		 "minor number of the misc device for the SPIC compatibility code, "
+		 "default is -1 (automatic)");
+#endif
+
+/*********** Input Devices ***********/
+
+#define SONY_LAPTOP_BUF_SIZE	128
+struct sony_laptop_input_s {
+	atomic_t		users;
+	struct input_dev	*jog_dev;
+	struct input_dev	*key_dev;
+	struct kfifo		*fifo;
+	spinlock_t		fifo_lock;
+	struct workqueue_struct	*wq;
+};
+static struct sony_laptop_input_s sony_laptop_input = {
+	.users = ATOMIC_INIT(0),
+};
+
+struct sony_laptop_keypress {
+	struct input_dev *dev;
+	int key;
+};
+
+/* Correspondance table between sonypi events
+ * and input layer indexes in the keymap
+ */
+static int sony_laptop_input_index[] = {
+	-1,	/*  0 no event */
+	-1,	/*  1 SONYPI_EVENT_JOGDIAL_DOWN */
+	-1,	/*  2 SONYPI_EVENT_JOGDIAL_UP */
+	-1,	/*  3 SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */
+	-1,	/*  4 SONYPI_EVENT_JOGDIAL_UP_PRESSED */
+	-1,	/*  5 SONYPI_EVENT_JOGDIAL_PRESSED */
+	-1,	/*  6 SONYPI_EVENT_JOGDIAL_RELEASED */
+	 0,	/*  7 SONYPI_EVENT_CAPTURE_PRESSED */
+	 1,	/*  8 SONYPI_EVENT_CAPTURE_RELEASED */
+	 2,	/*  9 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
+	 3,	/* 10 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
+	 4,	/* 11 SONYPI_EVENT_FNKEY_ESC */
+	 5,	/* 12 SONYPI_EVENT_FNKEY_F1 */
+	 6,	/* 13 SONYPI_EVENT_FNKEY_F2 */
+	 7,	/* 14 SONYPI_EVENT_FNKEY_F3 */
+	 8,	/* 15 SONYPI_EVENT_FNKEY_F4 */
+	 9,	/* 16 SONYPI_EVENT_FNKEY_F5 */
+	10,	/* 17 SONYPI_EVENT_FNKEY_F6 */
+	11,	/* 18 SONYPI_EVENT_FNKEY_F7 */
+	12,	/* 19 SONYPI_EVENT_FNKEY_F8 */
+	13,	/* 20 SONYPI_EVENT_FNKEY_F9 */
+	14,	/* 21 SONYPI_EVENT_FNKEY_F10 */
+	15,	/* 22 SONYPI_EVENT_FNKEY_F11 */
+	16,	/* 23 SONYPI_EVENT_FNKEY_F12 */
+	17,	/* 24 SONYPI_EVENT_FNKEY_1 */
+	18,	/* 25 SONYPI_EVENT_FNKEY_2 */
+	19,	/* 26 SONYPI_EVENT_FNKEY_D */
+	20,	/* 27 SONYPI_EVENT_FNKEY_E */
+	21,	/* 28 SONYPI_EVENT_FNKEY_F */
+	22,	/* 29 SONYPI_EVENT_FNKEY_S */
+	23,	/* 30 SONYPI_EVENT_FNKEY_B */
+	24,	/* 31 SONYPI_EVENT_BLUETOOTH_PRESSED */
+	25,	/* 32 SONYPI_EVENT_PKEY_P1 */
+	26,	/* 33 SONYPI_EVENT_PKEY_P2 */
+	27,	/* 34 SONYPI_EVENT_PKEY_P3 */
+	28,	/* 35 SONYPI_EVENT_BACK_PRESSED */
+	-1,	/* 36 SONYPI_EVENT_LID_CLOSED */
+	-1,	/* 37 SONYPI_EVENT_LID_OPENED */
+	29,	/* 38 SONYPI_EVENT_BLUETOOTH_ON */
+	30,	/* 39 SONYPI_EVENT_BLUETOOTH_OFF */
+	31,	/* 40 SONYPI_EVENT_HELP_PRESSED */
+	32,	/* 41 SONYPI_EVENT_FNKEY_ONLY */
+	33,	/* 42 SONYPI_EVENT_JOGDIAL_FAST_DOWN */
+	34,	/* 43 SONYPI_EVENT_JOGDIAL_FAST_UP */
+	35,	/* 44 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
+	36,	/* 45 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
+	37,	/* 46 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
+	38,	/* 47 SONYPI_EVENT_JOGDIAL_VFAST_UP */
+	39,	/* 48 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
+	40,	/* 49 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
+	41,	/* 50 SONYPI_EVENT_ZOOM_PRESSED */
+	42,	/* 51 SONYPI_EVENT_THUMBPHRASE_PRESSED */
+	43,	/* 52 SONYPI_EVENT_MEYE_FACE */
+	44,	/* 53 SONYPI_EVENT_MEYE_OPPOSITE */
+	45,	/* 54 SONYPI_EVENT_MEMORYSTICK_INSERT */
+	46,	/* 55 SONYPI_EVENT_MEMORYSTICK_EJECT */
+	-1,	/* 56 SONYPI_EVENT_ANYBUTTON_RELEASED */
+	-1,	/* 57 SONYPI_EVENT_BATTERY_INSERT */
+	-1,	/* 58 SONYPI_EVENT_BATTERY_REMOVE */
+	-1,	/* 59 SONYPI_EVENT_FNKEY_RELEASED */
+	47,	/* 60 SONYPI_EVENT_WIRELESS_ON */
+	48,	/* 61 SONYPI_EVENT_WIRELESS_OFF */
+	49,	/* 62 SONYPI_EVENT_ZOOM_IN_PRESSED */
+	50,	/* 63 SONYPI_EVENT_ZOOM_OUT_PRESSED */
+};
+
+static int sony_laptop_input_keycode_map[] = {
+	KEY_CAMERA,	/*  0 SONYPI_EVENT_CAPTURE_PRESSED */
+	KEY_RESERVED,	/*  1 SONYPI_EVENT_CAPTURE_RELEASED */
+	KEY_RESERVED,	/*  2 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
+	KEY_RESERVED,	/*  3 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
+	KEY_FN_ESC,	/*  4 SONYPI_EVENT_FNKEY_ESC */
+	KEY_FN_F1,	/*  5 SONYPI_EVENT_FNKEY_F1 */
+	KEY_FN_F2,	/*  6 SONYPI_EVENT_FNKEY_F2 */
+	KEY_FN_F3,	/*  7 SONYPI_EVENT_FNKEY_F3 */
+	KEY_FN_F4,	/*  8 SONYPI_EVENT_FNKEY_F4 */
+	KEY_FN_F5,	/*  9 SONYPI_EVENT_FNKEY_F5 */
+	KEY_FN_F6,	/* 10 SONYPI_EVENT_FNKEY_F6 */
+	KEY_FN_F7,	/* 11 SONYPI_EVENT_FNKEY_F7 */
+	KEY_FN_F8,	/* 12 SONYPI_EVENT_FNKEY_F8 */
+	KEY_FN_F9,	/* 13 SONYPI_EVENT_FNKEY_F9 */
+	KEY_FN_F10,	/* 14 SONYPI_EVENT_FNKEY_F10 */
+	KEY_FN_F11,	/* 15 SONYPI_EVENT_FNKEY_F11 */
+	KEY_FN_F12,	/* 16 SONYPI_EVENT_FNKEY_F12 */
+	KEY_FN_F1,	/* 17 SONYPI_EVENT_FNKEY_1 */
+	KEY_FN_F2,	/* 18 SONYPI_EVENT_FNKEY_2 */
+	KEY_FN_D,	/* 19 SONYPI_EVENT_FNKEY_D */
+	KEY_FN_E,	/* 20 SONYPI_EVENT_FNKEY_E */
+	KEY_FN_F,	/* 21 SONYPI_EVENT_FNKEY_F */
+	KEY_FN_S,	/* 22 SONYPI_EVENT_FNKEY_S */
+	KEY_FN_B,	/* 23 SONYPI_EVENT_FNKEY_B */
+	KEY_BLUETOOTH,	/* 24 SONYPI_EVENT_BLUETOOTH_PRESSED */
+	KEY_PROG1,	/* 25 SONYPI_EVENT_PKEY_P1 */
+	KEY_PROG2,	/* 26 SONYPI_EVENT_PKEY_P2 */
+	KEY_PROG3,	/* 27 SONYPI_EVENT_PKEY_P3 */
+	KEY_BACK,	/* 28 SONYPI_EVENT_BACK_PRESSED */
+	KEY_BLUETOOTH,	/* 29 SONYPI_EVENT_BLUETOOTH_ON */
+	KEY_BLUETOOTH,	/* 30 SONYPI_EVENT_BLUETOOTH_OFF */
+	KEY_HELP,	/* 31 SONYPI_EVENT_HELP_PRESSED */
+	KEY_FN,		/* 32 SONYPI_EVENT_FNKEY_ONLY */
+	KEY_RESERVED,	/* 33 SONYPI_EVENT_JOGDIAL_FAST_DOWN */
+	KEY_RESERVED,	/* 34 SONYPI_EVENT_JOGDIAL_FAST_UP */
+	KEY_RESERVED,	/* 35 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
+	KEY_RESERVED,	/* 36 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
+	KEY_RESERVED,	/* 37 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
+	KEY_RESERVED,	/* 38 SONYPI_EVENT_JOGDIAL_VFAST_UP */
+	KEY_RESERVED,	/* 39 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
+	KEY_RESERVED,	/* 40 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
+	KEY_ZOOM,	/* 41 SONYPI_EVENT_ZOOM_PRESSED */
+	BTN_THUMB,	/* 42 SONYPI_EVENT_THUMBPHRASE_PRESSED */
+	KEY_RESERVED,	/* 43 SONYPI_EVENT_MEYE_FACE */
+	KEY_RESERVED,	/* 44 SONYPI_EVENT_MEYE_OPPOSITE */
+	KEY_RESERVED,	/* 45 SONYPI_EVENT_MEMORYSTICK_INSERT */
+	KEY_RESERVED,	/* 46 SONYPI_EVENT_MEMORYSTICK_EJECT */
+	KEY_WLAN,	/* 47 SONYPI_EVENT_WIRELESS_ON */
+	KEY_WLAN,	/* 48 SONYPI_EVENT_WIRELESS_OFF */
+	KEY_ZOOMIN,	/* 49 SONYPI_EVENT_ZOOM_IN_PRESSED */
+	KEY_ZOOMOUT	/* 50 SONYPI_EVENT_ZOOM_OUT_PRESSED */
+};
+
+/* release buttons after a short delay if pressed */
+static void do_sony_laptop_release_key(struct work_struct *work)
+{
+	struct sony_laptop_keypress kp;
+
+	while (kfifo_get(sony_laptop_input.fifo, (unsigned char *)&kp,
+			 sizeof(kp)) == sizeof(kp)) {
+		msleep(10);
+		input_report_key(kp.dev, kp.key, 0);
+		input_sync(kp.dev);
+	}
+}
+static DECLARE_WORK(sony_laptop_release_key_work,
+		do_sony_laptop_release_key);
+
+/* forward event to the input subsystem */
+static void sony_laptop_report_input_event(u8 event)
+{
+	struct input_dev *jog_dev = sony_laptop_input.jog_dev;
+	struct input_dev *key_dev = sony_laptop_input.key_dev;
+	struct sony_laptop_keypress kp = { NULL };
+
+	if (event == SONYPI_EVENT_FNKEY_RELEASED) {
+		/* Nothing, not all VAIOs generate this event */
+		return;
+	}
+
+	/* report events */
+	switch (event) {
+	/* jog_dev events */
+	case SONYPI_EVENT_JOGDIAL_UP:
+	case SONYPI_EVENT_JOGDIAL_UP_PRESSED:
+		input_report_rel(jog_dev, REL_WHEEL, 1);
+		input_sync(jog_dev);
+		return;
+
+	case SONYPI_EVENT_JOGDIAL_DOWN:
+	case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED:
+		input_report_rel(jog_dev, REL_WHEEL, -1);
+		input_sync(jog_dev);
+		return;
+
+	/* key_dev events */
+	case SONYPI_EVENT_JOGDIAL_PRESSED:
+		kp.key = BTN_MIDDLE;
+		kp.dev = jog_dev;
+		break;
+
+	default:
+		if (event >= ARRAY_SIZE(sony_laptop_input_index)) {
+			dprintk("sony_laptop_report_input_event, event not known: %d\n", event);
+			break;
+		}
+		if (sony_laptop_input_index[event] != -1) {
+			kp.key = sony_laptop_input_keycode_map[sony_laptop_input_index[event]];
+			if (kp.key != KEY_UNKNOWN)
+				kp.dev = key_dev;
+		}
+		break;
+	}
+
+	if (kp.dev) {
+		input_report_key(kp.dev, kp.key, 1);
+		/* we emit the scancode so we can always remap the key */
+		input_event(kp.dev, EV_MSC, MSC_SCAN, event);
+		input_sync(kp.dev);
+		kfifo_put(sony_laptop_input.fifo,
+			  (unsigned char *)&kp, sizeof(kp));
+
+		if (!work_pending(&sony_laptop_release_key_work))
+			queue_work(sony_laptop_input.wq,
+					&sony_laptop_release_key_work);
+	} else
+		dprintk("unknown input event %.2x\n", event);
+}
+
+static int sony_laptop_setup_input(struct acpi_device *acpi_device)
+{
+	struct input_dev *jog_dev;
+	struct input_dev *key_dev;
+	int i;
+	int error;
+
+	/* don't run again if already initialized */
+	if (atomic_add_return(1, &sony_laptop_input.users) > 1)
+		return 0;
+
+	/* kfifo */
+	spin_lock_init(&sony_laptop_input.fifo_lock);
+	sony_laptop_input.fifo =
+		kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
+			    &sony_laptop_input.fifo_lock);
+	if (IS_ERR(sony_laptop_input.fifo)) {
+		printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
+		error = PTR_ERR(sony_laptop_input.fifo);
+		goto err_dec_users;
+	}
+
+	/* init workqueue */
+	sony_laptop_input.wq = create_singlethread_workqueue("sony-laptop");
+	if (!sony_laptop_input.wq) {
+		printk(KERN_ERR DRV_PFX
+				"Unabe to create workqueue.\n");
+		error = -ENXIO;
+		goto err_free_kfifo;
+	}
+
+	/* input keys */
+	key_dev = input_allocate_device();
+	if (!key_dev) {
+		error = -ENOMEM;
+		goto err_destroy_wq;
+	}
+
+	key_dev->name = "Sony Vaio Keys";
+	key_dev->id.bustype = BUS_ISA;
+	key_dev->id.vendor = PCI_VENDOR_ID_SONY;
+	key_dev->dev.parent = &acpi_device->dev;
+
+	/* Initialize the Input Drivers: special keys */
+	set_bit(EV_KEY, key_dev->evbit);
+	set_bit(EV_MSC, key_dev->evbit);
+	set_bit(MSC_SCAN, key_dev->mscbit);
+	key_dev->keycodesize = sizeof(sony_laptop_input_keycode_map[0]);
+	key_dev->keycodemax = ARRAY_SIZE(sony_laptop_input_keycode_map);
+	key_dev->keycode = &sony_laptop_input_keycode_map;
+	for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++) {
+		if (sony_laptop_input_keycode_map[i] != KEY_RESERVED) {
+			set_bit(sony_laptop_input_keycode_map[i],
+				key_dev->keybit);
+		}
+	}
+
+	error = input_register_device(key_dev);
+	if (error)
+		goto err_free_keydev;
+
+	sony_laptop_input.key_dev = key_dev;
+
+	/* jogdial */
+	jog_dev = input_allocate_device();
+	if (!jog_dev) {
+		error = -ENOMEM;
+		goto err_unregister_keydev;
+	}
+
+	jog_dev->name = "Sony Vaio Jogdial";
+	jog_dev->id.bustype = BUS_ISA;
+	jog_dev->id.vendor = PCI_VENDOR_ID_SONY;
+	key_dev->dev.parent = &acpi_device->dev;
+
+	jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
+	jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE);
+	jog_dev->relbit[0] = BIT_MASK(REL_WHEEL);
+
+	error = input_register_device(jog_dev);
+	if (error)
+		goto err_free_jogdev;
+
+	sony_laptop_input.jog_dev = jog_dev;
+
+	return 0;
+
+err_free_jogdev:
+	input_free_device(jog_dev);
+
+err_unregister_keydev:
+	input_unregister_device(key_dev);
+	/* to avoid kref underflow below at input_free_device */
+	key_dev = NULL;
+
+err_free_keydev:
+	input_free_device(key_dev);
+
+err_destroy_wq:
+	destroy_workqueue(sony_laptop_input.wq);
+
+err_free_kfifo:
+	kfifo_free(sony_laptop_input.fifo);
+
+err_dec_users:
+	atomic_dec(&sony_laptop_input.users);
+	return error;
+}
+
+static void sony_laptop_remove_input(void)
+{
+	/* cleanup only after the last user has gone */
+	if (!atomic_dec_and_test(&sony_laptop_input.users))
+		return;
+
+	/* flush workqueue first */
+	flush_workqueue(sony_laptop_input.wq);
+
+	/* destroy input devs */
+	input_unregister_device(sony_laptop_input.key_dev);
+	sony_laptop_input.key_dev = NULL;
+
+	if (sony_laptop_input.jog_dev) {
+		input_unregister_device(sony_laptop_input.jog_dev);
+		sony_laptop_input.jog_dev = NULL;
+	}
+
+	destroy_workqueue(sony_laptop_input.wq);
+	kfifo_free(sony_laptop_input.fifo);
+}
+
+/*********** Platform Device ***********/
+
+static atomic_t sony_pf_users = ATOMIC_INIT(0);
+static struct platform_driver sony_pf_driver = {
+	.driver = {
+		   .name = "sony-laptop",
+		   .owner = THIS_MODULE,
+		   }
+};
+static struct platform_device *sony_pf_device;
+
+static int sony_pf_add(void)
+{
+	int ret = 0;
+
+	/* don't run again if already initialized */
+	if (atomic_add_return(1, &sony_pf_users) > 1)
+		return 0;
+
+	ret = platform_driver_register(&sony_pf_driver);
+	if (ret)
+		goto out;
+
+	sony_pf_device = platform_device_alloc("sony-laptop", -1);
+	if (!sony_pf_device) {
+		ret = -ENOMEM;
+		goto out_platform_registered;
+	}
+
+	ret = platform_device_add(sony_pf_device);
+	if (ret)
+		goto out_platform_alloced;
+
+	return 0;
+
+      out_platform_alloced:
+	platform_device_put(sony_pf_device);
+	sony_pf_device = NULL;
+      out_platform_registered:
+	platform_driver_unregister(&sony_pf_driver);
+      out:
+	atomic_dec(&sony_pf_users);
+	return ret;
+}
+
+static void sony_pf_remove(void)
+{
+	/* deregister only after the last user has gone */
+	if (!atomic_dec_and_test(&sony_pf_users))
+		return;
+
+	platform_device_del(sony_pf_device);
+	platform_device_put(sony_pf_device);
+	platform_driver_unregister(&sony_pf_driver);
+}
+
+/*********** SNC (SNY5001) Device ***********/
+
+/* the device uses 1-based values, while the backlight subsystem uses
+   0-based values */
+#define SONY_MAX_BRIGHTNESS	8
+
+#define SNC_VALIDATE_IN		0
+#define SNC_VALIDATE_OUT	1
+
+static ssize_t sony_nc_sysfs_show(struct device *, struct device_attribute *,
+			      char *);
+static ssize_t sony_nc_sysfs_store(struct device *, struct device_attribute *,
+			       const char *, size_t);
+static int boolean_validate(const int, const int);
+static int brightness_default_validate(const int, const int);
+
+struct sony_nc_value {
+	char *name;		/* name of the entry */
+	char **acpiget;		/* names of the ACPI get function */
+	char **acpiset;		/* names of the ACPI set function */
+	int (*validate)(const int, const int);	/* input/output validation */
+	int value;		/* current setting */
+	int valid;		/* Has ever been set */
+	int debug;		/* active only in debug mode ? */
+	struct device_attribute devattr;	/* sysfs atribute */
+};
+
+#define SNC_HANDLE_NAMES(_name, _values...) \
+	static char *snc_##_name[] = { _values, NULL }
+
+#define SNC_HANDLE(_name, _getters, _setters, _validate, _debug) \
+	{ \
+		.name		= __stringify(_name), \
+		.acpiget	= _getters, \
+		.acpiset	= _setters, \
+		.validate	= _validate, \
+		.debug		= _debug, \
+		.devattr	= __ATTR(_name, 0, sony_nc_sysfs_show, sony_nc_sysfs_store), \
+	}
+
+#define SNC_HANDLE_NULL	{ .name = NULL }
+
+SNC_HANDLE_NAMES(fnkey_get, "GHKE");
+
+SNC_HANDLE_NAMES(brightness_def_get, "GPBR");
+SNC_HANDLE_NAMES(brightness_def_set, "SPBR");
+
+SNC_HANDLE_NAMES(cdpower_get, "GCDP");
+SNC_HANDLE_NAMES(cdpower_set, "SCDP", "CDPW");
+
+SNC_HANDLE_NAMES(audiopower_get, "GAZP");
+SNC_HANDLE_NAMES(audiopower_set, "AZPW");
+
+SNC_HANDLE_NAMES(lanpower_get, "GLNP");
+SNC_HANDLE_NAMES(lanpower_set, "LNPW");
+
+SNC_HANDLE_NAMES(lidstate_get, "GLID");
+
+SNC_HANDLE_NAMES(indicatorlamp_get, "GILS");
+SNC_HANDLE_NAMES(indicatorlamp_set, "SILS");
+
+SNC_HANDLE_NAMES(gainbass_get, "GMGB");
+SNC_HANDLE_NAMES(gainbass_set, "CMGB");
+
+SNC_HANDLE_NAMES(PID_get, "GPID");
+
+SNC_HANDLE_NAMES(CTR_get, "GCTR");
+SNC_HANDLE_NAMES(CTR_set, "SCTR");
+
+SNC_HANDLE_NAMES(PCR_get, "GPCR");
+SNC_HANDLE_NAMES(PCR_set, "SPCR");
+
+SNC_HANDLE_NAMES(CMI_get, "GCMI");
+SNC_HANDLE_NAMES(CMI_set, "SCMI");
+
+static struct sony_nc_value sony_nc_values[] = {
+	SNC_HANDLE(brightness_default, snc_brightness_def_get,
+			snc_brightness_def_set, brightness_default_validate, 0),
+	SNC_HANDLE(fnkey, snc_fnkey_get, NULL, NULL, 0),
+	SNC_HANDLE(cdpower, snc_cdpower_get, snc_cdpower_set, boolean_validate, 0),
+	SNC_HANDLE(audiopower, snc_audiopower_get, snc_audiopower_set,
+			boolean_validate, 0),
+	SNC_HANDLE(lanpower, snc_lanpower_get, snc_lanpower_set,
+			boolean_validate, 1),
+	SNC_HANDLE(lidstate, snc_lidstate_get, NULL,
+			boolean_validate, 0),
+	SNC_HANDLE(indicatorlamp, snc_indicatorlamp_get, snc_indicatorlamp_set,
+			boolean_validate, 0),
+	SNC_HANDLE(gainbass, snc_gainbass_get, snc_gainbass_set,
+			boolean_validate, 0),
+	/* unknown methods */
+	SNC_HANDLE(PID, snc_PID_get, NULL, NULL, 1),
+	SNC_HANDLE(CTR, snc_CTR_get, snc_CTR_set, NULL, 1),
+	SNC_HANDLE(PCR, snc_PCR_get, snc_PCR_set, NULL, 1),
+	SNC_HANDLE(CMI, snc_CMI_get, snc_CMI_set, NULL, 1),
+	SNC_HANDLE_NULL
+};
+
+static acpi_handle sony_nc_acpi_handle;
+static struct acpi_device *sony_nc_acpi_device = NULL;
+
+/*
+ * acpi_evaluate_object wrappers
+ */
+static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
+{
+	struct acpi_buffer output;
+	union acpi_object out_obj;
+	acpi_status status;
+
+	output.length = sizeof(out_obj);
+	output.pointer = &out_obj;
+
+	status = acpi_evaluate_object(handle, name, NULL, &output);
+	if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) {
+		*result = out_obj.integer.value;
+		return 0;
+	}
+
+	printk(KERN_WARNING DRV_PFX "acpi_callreadfunc failed\n");
+
+	return -1;
+}
+
+static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
+			    int *result)
+{
+	struct acpi_object_list params;
+	union acpi_object in_obj;
+	struct acpi_buffer output;
+	union acpi_object out_obj;
+	acpi_status status;
+
+	params.count = 1;
+	params.pointer = &in_obj;
+	in_obj.type = ACPI_TYPE_INTEGER;
+	in_obj.integer.value = value;
+
+	output.length = sizeof(out_obj);
+	output.pointer = &out_obj;
+
+	status = acpi_evaluate_object(handle, name, &params, &output);
+	if (status == AE_OK) {
+		if (result != NULL) {
+			if (out_obj.type != ACPI_TYPE_INTEGER) {
+				printk(KERN_WARNING DRV_PFX "acpi_evaluate_object bad "
+				       "return type\n");
+				return -1;
+			}
+			*result = out_obj.integer.value;
+		}
+		return 0;
+	}
+
+	printk(KERN_WARNING DRV_PFX "acpi_evaluate_object failed\n");
+
+	return -1;
+}
+
+/*
+ * sony_nc_values input/output validate functions
+ */
+
+/* brightness_default_validate:
+ *
+ * manipulate input output values to keep consistency with the
+ * backlight framework for which brightness values are 0-based.
+ */
+static int brightness_default_validate(const int direction, const int value)
+{
+	switch (direction) {
+		case SNC_VALIDATE_OUT:
+			return value - 1;
+		case SNC_VALIDATE_IN:
+			if (value >= 0 && value < SONY_MAX_BRIGHTNESS)
+				return value + 1;
+	}
+	return -EINVAL;
+}
+
+/* boolean_validate:
+ *
+ * on input validate boolean values 0/1, on output just pass the
+ * received value.
+ */
+static int boolean_validate(const int direction, const int value)
+{
+	if (direction == SNC_VALIDATE_IN) {
+		if (value != 0 && value != 1)
+			return -EINVAL;
+	}
+	return value;
+}
+
+/*
+ * Sysfs show/store common to all sony_nc_values
+ */
+static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr,
+			      char *buffer)
+{
+	int value;
+	struct sony_nc_value *item =
+	    container_of(attr, struct sony_nc_value, devattr);
+
+	if (!*item->acpiget)
+		return -EIO;
+
+	if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0)
+		return -EIO;
+
+	if (item->validate)
+		value = item->validate(SNC_VALIDATE_OUT, value);
+
+	return snprintf(buffer, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t sony_nc_sysfs_store(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buffer, size_t count)
+{
+	int value;
+	struct sony_nc_value *item =
+	    container_of(attr, struct sony_nc_value, devattr);
+
+	if (!item->acpiset)
+		return -EIO;
+
+	if (count > 31)
+		return -EINVAL;
+
+	value = simple_strtoul(buffer, NULL, 10);
+
+	if (item->validate)
+		value = item->validate(SNC_VALIDATE_IN, value);
+
+	if (value < 0)
+		return value;
+
+	if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0)
+		return -EIO;
+	item->value = value;
+	item->valid = 1;
+	return count;
+}
+
+
+/*
+ * Backlight device
+ */
+static int sony_backlight_update_status(struct backlight_device *bd)
+{
+	return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
+				bd->props.brightness + 1, NULL);
+}
+
+static int sony_backlight_get_brightness(struct backlight_device *bd)
+{
+	int value;
+
+	if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value))
+		return 0;
+	/* brightness levels are 1-based, while backlight ones are 0-based */
+	return value - 1;
+}
+
+static struct backlight_device *sony_backlight_device;
+static struct backlight_ops sony_backlight_ops = {
+	.update_status = sony_backlight_update_status,
+	.get_brightness = sony_backlight_get_brightness,
+};
+
+/*
+ * New SNC-only Vaios event mapping to driver known keys
+ */
+struct sony_nc_event {
+	u8	data;
+	u8	event;
+};
+
+static struct sony_nc_event *sony_nc_events;
+
+/* Vaio C* --maybe also FE*, N* and AR* ?-- special init sequence
+ * for Fn keys
+ */
+static int sony_nc_C_enable(const struct dmi_system_id *id)
+{
+	int result = 0;
+
+	printk(KERN_NOTICE DRV_PFX "detected %s\n", id->ident);
+
+	sony_nc_events = id->driver_data;
+
+	if (acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0x4, &result) < 0
+			|| acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x2, &result) < 0
+			|| acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0x10, &result) < 0
+			|| acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x0, &result) < 0
+			|| acpi_callsetfunc(sony_nc_acpi_handle, "SN03", 0x2, &result) < 0
+			|| acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x101, &result) < 0) {
+		printk(KERN_WARNING DRV_PFX "failed to initialize SNC, some "
+				"functionalities may be missing\n");
+		return 1;
+	}
+	return 0;
+}
+
+static struct sony_nc_event sony_C_events[] = {
+	{ 0x81, SONYPI_EVENT_FNKEY_F1 },
+	{ 0x01, SONYPI_EVENT_FNKEY_RELEASED },
+	{ 0x85, SONYPI_EVENT_FNKEY_F5 },
+	{ 0x05, SONYPI_EVENT_FNKEY_RELEASED },
+	{ 0x86, SONYPI_EVENT_FNKEY_F6 },
+	{ 0x06, SONYPI_EVENT_FNKEY_RELEASED },
+	{ 0x87, SONYPI_EVENT_FNKEY_F7 },
+	{ 0x07, SONYPI_EVENT_FNKEY_RELEASED },
+	{ 0x8A, SONYPI_EVENT_FNKEY_F10 },
+	{ 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
+	{ 0x8C, SONYPI_EVENT_FNKEY_F12 },
+	{ 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
+	{ 0, 0 },
+};
+
+/* SNC-only model map */
+static const struct dmi_system_id sony_nc_ids[] = {
+		{
+			.ident = "Sony Vaio FE Series",
+			.callback = sony_nc_C_enable,
+			.driver_data = sony_C_events,
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FE"),
+			},
+		},
+		{
+			.ident = "Sony Vaio FZ Series",
+			.callback = sony_nc_C_enable,
+			.driver_data = sony_C_events,
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ"),
+			},
+		},
+		{
+			.ident = "Sony Vaio C Series",
+			.callback = sony_nc_C_enable,
+			.driver_data = sony_C_events,
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "VGN-C"),
+			},
+		},
+		{
+			.ident = "Sony Vaio N Series",
+			.callback = sony_nc_C_enable,
+			.driver_data = sony_C_events,
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "VGN-N"),
+			},
+		},
+		{ }
+};
+
+/*
+ * ACPI callbacks
+ */
+static void sony_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+	struct sony_nc_event *evmap;
+	u32 ev = event;
+	int result;
+
+	if (ev == 0x92) {
+		/* read the key pressed from EC.GECR
+		 * A call to SN07 with 0x0202 will do it as well respecting
+		 * the current protocol on different OSes
+		 *
+		 * Note: the path for GECR may be
+		 *   \_SB.PCI0.LPCB.EC (C, FE, AR, N and friends)
+		 *   \_SB.PCI0.PIB.EC0 (VGN-FR notifications are sent directly, no GECR)
+		 *
+		 * TODO: we may want to do the same for the older GHKE -need
+		 *       dmi list- so this snippet may become one more callback.
+		 */
+		if (acpi_callsetfunc(handle, "SN07", 0x0202, &result) < 0)
+			dprintk("sony_acpi_notify, unable to decode event 0x%.2x\n", ev);
+		else
+			ev = result & 0xFF;
+	}
+
+	if (sony_nc_events)
+		for (evmap = sony_nc_events; evmap->event; evmap++) {
+			if (evmap->data == ev) {
+				ev = evmap->event;
+				break;
+			}
+		}
+
+	dprintk("sony_acpi_notify, event: 0x%.2x\n", ev);
+	sony_laptop_report_input_event(ev);
+	acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev);
+}
+
+static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
+				      void *context, void **return_value)
+{
+	struct acpi_device_info *info;
+	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+
+	if (ACPI_SUCCESS(acpi_get_object_info(handle, &buffer))) {
+		info = buffer.pointer;
+
+		printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n",
+			(char *)&info->name, info->param_count);
+
+		kfree(buffer.pointer);
+	}
+
+	return AE_OK;
+}
+
+/*
+ * ACPI device
+ */
+static int sony_nc_resume(struct acpi_device *device)
+{
+	struct sony_nc_value *item;
+
+	for (item = sony_nc_values; item->name; item++) {
+		int ret;
+
+		if (!item->valid)
+			continue;
+		ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
+				       item->value, NULL);
+		if (ret < 0) {
+			printk("%s: %d\n", __func__, ret);
+			break;
+		}
+	}
+
+	/* set the last requested brightness level */
+	if (sony_backlight_device &&
+			!sony_backlight_update_status(sony_backlight_device))
+		printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n");
+
+	/* re-initialize models with specific requirements */
+	dmi_check_system(sony_nc_ids);
+
+	return 0;
+}
+
+static int sony_nc_add(struct acpi_device *device)
+{
+	acpi_status status;
+	int result = 0;
+	acpi_handle handle;
+	struct sony_nc_value *item;
+
+	printk(KERN_INFO DRV_PFX "%s v%s.\n",
+		SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
+
+	sony_nc_acpi_device = device;
+	strcpy(acpi_device_class(device), "sony/hotkey");
+
+	sony_nc_acpi_handle = device->handle;
+
+	/* read device status */
+	result = acpi_bus_get_status(device);
+	/* bail IFF the above call was successful and the device is not present */
+	if (!result && !device->status.present) {
+		dprintk("Device not present\n");
+		result = -ENODEV;
+		goto outwalk;
+	}
+
+	if (debug) {
+		status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_nc_acpi_handle,
+					     1, sony_walk_callback, NULL, NULL);
+		if (ACPI_FAILURE(status)) {
+			printk(KERN_WARNING DRV_PFX "unable to walk acpi resources\n");
+			result = -ENODEV;
+			goto outwalk;
+		}
+	}
+
+	/* try to _INI the device if such method exists (ACPI spec 3.0-6.5.1
+	 * should be respected as we already checked for the device presence above */
+	if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, METHOD_NAME__INI, &handle))) {
+		dprintk("Invoking _INI\n");
+		if (ACPI_FAILURE(acpi_evaluate_object(sony_nc_acpi_handle, METHOD_NAME__INI,
+						NULL, NULL)))
+			dprintk("_INI Method failed\n");
+	}
+
+	/* setup input devices and helper fifo */
+	result = sony_laptop_setup_input(device);
+	if (result) {
+		printk(KERN_ERR DRV_PFX
+				"Unabe to create input devices.\n");
+		goto outwalk;
+	}
+
+	status = acpi_install_notify_handler(sony_nc_acpi_handle,
+					     ACPI_DEVICE_NOTIFY,
+					     sony_acpi_notify, NULL);
+	if (ACPI_FAILURE(status)) {
+		printk(KERN_WARNING DRV_PFX "unable to install notify handler (%u)\n", status);
+		result = -ENODEV;
+		goto outinput;
+	}
+
+	if (acpi_video_backlight_support()) {
+		printk(KERN_INFO DRV_PFX "brightness ignored, must be "
+		       "controlled by ACPI video driver\n");
+	} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
+						&handle))) {
+		sony_backlight_device = backlight_device_register("sony", NULL,
+								  NULL,
+								  &sony_backlight_ops);
+
+		if (IS_ERR(sony_backlight_device)) {
+			printk(KERN_WARNING DRV_PFX "unable to register backlight device\n");
+			sony_backlight_device = NULL;
+		} else {
+			sony_backlight_device->props.brightness =
+			    sony_backlight_get_brightness
+			    (sony_backlight_device);
+			sony_backlight_device->props.max_brightness =
+			    SONY_MAX_BRIGHTNESS - 1;
+		}
+
+	}
+
+	/* initialize models with specific requirements */
+	dmi_check_system(sony_nc_ids);
+
+	result = sony_pf_add();
+	if (result)
+		goto outbacklight;
+
+	/* create sony_pf sysfs attributes related to the SNC device */
+	for (item = sony_nc_values; item->name; ++item) {
+
+		if (!debug && item->debug)
+			continue;
+
+		/* find the available acpiget as described in the DSDT */
+		for (; item->acpiget && *item->acpiget; ++item->acpiget) {
+			if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
+							 *item->acpiget,
+							 &handle))) {
+				dprintk("Found %s getter: %s\n",
+						item->name, *item->acpiget);
+				item->devattr.attr.mode |= S_IRUGO;
+				break;
+			}
+		}
+
+		/* find the available acpiset as described in the DSDT */
+		for (; item->acpiset && *item->acpiset; ++item->acpiset) {
+			if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
+							 *item->acpiset,
+							 &handle))) {
+				dprintk("Found %s setter: %s\n",
+						item->name, *item->acpiset);
+				item->devattr.attr.mode |= S_IWUSR;
+				break;
+			}
+		}
+
+		if (item->devattr.attr.mode != 0) {
+			result =
+			    device_create_file(&sony_pf_device->dev,
+					       &item->devattr);
+			if (result)
+				goto out_sysfs;
+		}
+	}
+
+	return 0;
+
+      out_sysfs:
+	for (item = sony_nc_values; item->name; ++item) {
+		device_remove_file(&sony_pf_device->dev, &item->devattr);
+	}
+	sony_pf_remove();
+
+      outbacklight:
+	if (sony_backlight_device)
+		backlight_device_unregister(sony_backlight_device);
+
+	status = acpi_remove_notify_handler(sony_nc_acpi_handle,
+					    ACPI_DEVICE_NOTIFY,
+					    sony_acpi_notify);
+	if (ACPI_FAILURE(status))
+		printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n");
+
+      outinput:
+	sony_laptop_remove_input();
+
+      outwalk:
+	return result;
+}
+
+static int sony_nc_remove(struct acpi_device *device, int type)
+{
+	acpi_status status;
+	struct sony_nc_value *item;
+
+	if (sony_backlight_device)
+		backlight_device_unregister(sony_backlight_device);
+
+	sony_nc_acpi_device = NULL;
+
+	status = acpi_remove_notify_handler(sony_nc_acpi_handle,
+					    ACPI_DEVICE_NOTIFY,
+					    sony_acpi_notify);
+	if (ACPI_FAILURE(status))
+		printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n");
+
+	for (item = sony_nc_values; item->name; ++item) {
+		device_remove_file(&sony_pf_device->dev, &item->devattr);
+	}
+
+	sony_pf_remove();
+	sony_laptop_remove_input();
+	dprintk(SONY_NC_DRIVER_NAME " removed.\n");
+
+	return 0;
+}
+
+static const struct acpi_device_id sony_device_ids[] = {
+	{SONY_NC_HID, 0},
+	{SONY_PIC_HID, 0},
+	{"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, sony_device_ids);
+
+static const struct acpi_device_id sony_nc_device_ids[] = {
+	{SONY_NC_HID, 0},
+	{"", 0},
+};
+
+static struct acpi_driver sony_nc_driver = {
+	.name = SONY_NC_DRIVER_NAME,
+	.class = SONY_NC_CLASS,
+	.ids = sony_nc_device_ids,
+	.owner = THIS_MODULE,
+	.ops = {
+		.add = sony_nc_add,
+		.remove = sony_nc_remove,
+		.resume = sony_nc_resume,
+		},
+};
+
+/*********** SPIC (SNY6001) Device ***********/
+
+#define SONYPI_DEVICE_TYPE1	0x00000001
+#define SONYPI_DEVICE_TYPE2	0x00000002
+#define SONYPI_DEVICE_TYPE3	0x00000004
+#define SONYPI_DEVICE_TYPE4	0x00000008
+
+#define SONYPI_TYPE1_OFFSET	0x04
+#define SONYPI_TYPE2_OFFSET	0x12
+#define SONYPI_TYPE3_OFFSET	0x12
+#define SONYPI_TYPE4_OFFSET	0x12
+
+struct sony_pic_ioport {
+	struct acpi_resource_io	io1;
+	struct acpi_resource_io	io2;
+	struct list_head	list;
+};
+
+struct sony_pic_irq {
+	struct acpi_resource_irq	irq;
+	struct list_head		list;
+};
+
+struct sonypi_eventtypes {
+	u8			data;
+	unsigned long		mask;
+	struct sonypi_event	*events;
+};
+
+struct device_ctrl {
+	int				model;
+	int				(*handle_irq)(const u8, const u8);
+	u16				evport_offset;
+	u8				has_camera;
+	u8				has_bluetooth;
+	u8				has_wwan;
+	struct sonypi_eventtypes	*event_types;
+};
+
+struct sony_pic_dev {
+	struct device_ctrl	*control;
+	struct acpi_device	*acpi_dev;
+	struct sony_pic_irq	*cur_irq;
+	struct sony_pic_ioport	*cur_ioport;
+	struct list_head	interrupts;
+	struct list_head	ioports;
+	struct mutex		lock;
+	u8			camera_power;
+	u8			bluetooth_power;
+	u8			wwan_power;
+};
+
+static struct sony_pic_dev spic_dev = {
+	.interrupts	= LIST_HEAD_INIT(spic_dev.interrupts),
+	.ioports	= LIST_HEAD_INIT(spic_dev.ioports),
+};
+
+/* Event masks */
+#define SONYPI_JOGGER_MASK			0x00000001
+#define SONYPI_CAPTURE_MASK			0x00000002
+#define SONYPI_FNKEY_MASK			0x00000004
+#define SONYPI_BLUETOOTH_MASK			0x00000008
+#define SONYPI_PKEY_MASK			0x00000010
+#define SONYPI_BACK_MASK			0x00000020
+#define SONYPI_HELP_MASK			0x00000040
+#define SONYPI_LID_MASK				0x00000080
+#define SONYPI_ZOOM_MASK			0x00000100
+#define SONYPI_THUMBPHRASE_MASK			0x00000200
+#define SONYPI_MEYE_MASK			0x00000400
+#define SONYPI_MEMORYSTICK_MASK			0x00000800
+#define SONYPI_BATTERY_MASK			0x00001000
+#define SONYPI_WIRELESS_MASK			0x00002000
+
+struct sonypi_event {
+	u8	data;
+	u8	event;
+};
+
+/* The set of possible button release events */
+static struct sonypi_event sonypi_releaseev[] = {
+	{ 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED },
+	{ 0, 0 }
+};
+
+/* The set of possible jogger events  */
+static struct sonypi_event sonypi_joggerev[] = {
+	{ 0x1f, SONYPI_EVENT_JOGDIAL_UP },
+	{ 0x01, SONYPI_EVENT_JOGDIAL_DOWN },
+	{ 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED },
+	{ 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED },
+	{ 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP },
+	{ 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN },
+	{ 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED },
+	{ 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED },
+	{ 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP },
+	{ 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN },
+	{ 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED },
+	{ 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED },
+	{ 0x40, SONYPI_EVENT_JOGDIAL_PRESSED },
+	{ 0, 0 }
+};
+
+/* The set of possible capture button events */
+static struct sonypi_event sonypi_captureev[] = {
+	{ 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED },
+	{ 0x07, SONYPI_EVENT_CAPTURE_PRESSED },
+	{ 0x40, SONYPI_EVENT_CAPTURE_PRESSED },
+	{ 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED },
+	{ 0, 0 }
+};
+
+/* The set of possible fnkeys events */
+static struct sonypi_event sonypi_fnkeyev[] = {
+	{ 0x10, SONYPI_EVENT_FNKEY_ESC },
+	{ 0x11, SONYPI_EVENT_FNKEY_F1 },
+	{ 0x12, SONYPI_EVENT_FNKEY_F2 },
+	{ 0x13, SONYPI_EVENT_FNKEY_F3 },
+	{ 0x14, SONYPI_EVENT_FNKEY_F4 },
+	{ 0x15, SONYPI_EVENT_FNKEY_F5 },
+	{ 0x16, SONYPI_EVENT_FNKEY_F6 },
+	{ 0x17, SONYPI_EVENT_FNKEY_F7 },
+	{ 0x18, SONYPI_EVENT_FNKEY_F8 },
+	{ 0x19, SONYPI_EVENT_FNKEY_F9 },
+	{ 0x1a, SONYPI_EVENT_FNKEY_F10 },
+	{ 0x1b, SONYPI_EVENT_FNKEY_F11 },
+	{ 0x1c, SONYPI_EVENT_FNKEY_F12 },
+	{ 0x1f, SONYPI_EVENT_FNKEY_RELEASED },
+	{ 0x21, SONYPI_EVENT_FNKEY_1 },
+	{ 0x22, SONYPI_EVENT_FNKEY_2 },
+	{ 0x31, SONYPI_EVENT_FNKEY_D },
+	{ 0x32, SONYPI_EVENT_FNKEY_E },
+	{ 0x33, SONYPI_EVENT_FNKEY_F },
+	{ 0x34, SONYPI_EVENT_FNKEY_S },
+	{ 0x35, SONYPI_EVENT_FNKEY_B },
+	{ 0x36, SONYPI_EVENT_FNKEY_ONLY },
+	{ 0, 0 }
+};
+
+/* The set of possible program key events */
+static struct sonypi_event sonypi_pkeyev[] = {
+	{ 0x01, SONYPI_EVENT_PKEY_P1 },
+	{ 0x02, SONYPI_EVENT_PKEY_P2 },
+	{ 0x04, SONYPI_EVENT_PKEY_P3 },
+	{ 0, 0 }
+};
+
+/* The set of possible bluetooth events */
+static struct sonypi_event sonypi_blueev[] = {
+	{ 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED },
+	{ 0x59, SONYPI_EVENT_BLUETOOTH_ON },
+	{ 0x5a, SONYPI_EVENT_BLUETOOTH_OFF },
+	{ 0, 0 }
+};
+
+/* The set of possible wireless events */
+static struct sonypi_event sonypi_wlessev[] = {
+	{ 0x59, SONYPI_EVENT_WIRELESS_ON },
+	{ 0x5a, SONYPI_EVENT_WIRELESS_OFF },
+	{ 0, 0 }
+};
+
+/* The set of possible back button events */
+static struct sonypi_event sonypi_backev[] = {
+	{ 0x20, SONYPI_EVENT_BACK_PRESSED },
+	{ 0, 0 }
+};
+
+/* The set of possible help button events */
+static struct sonypi_event sonypi_helpev[] = {
+	{ 0x3b, SONYPI_EVENT_HELP_PRESSED },
+	{ 0, 0 }
+};
+
+
+/* The set of possible lid events */
+static struct sonypi_event sonypi_lidev[] = {
+	{ 0x51, SONYPI_EVENT_LID_CLOSED },
+	{ 0x50, SONYPI_EVENT_LID_OPENED },
+	{ 0, 0 }
+};
+
+/* The set of possible zoom events */
+static struct sonypi_event sonypi_zoomev[] = {
+	{ 0x39, SONYPI_EVENT_ZOOM_PRESSED },
+	{ 0x10, SONYPI_EVENT_ZOOM_IN_PRESSED },
+	{ 0x20, SONYPI_EVENT_ZOOM_OUT_PRESSED },
+	{ 0, 0 }
+};
+
+/* The set of possible thumbphrase events */
+static struct sonypi_event sonypi_thumbphraseev[] = {
+	{ 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED },
+	{ 0, 0 }
+};
+
+/* The set of possible motioneye camera events */
+static struct sonypi_event sonypi_meyeev[] = {
+	{ 0x00, SONYPI_EVENT_MEYE_FACE },
+	{ 0x01, SONYPI_EVENT_MEYE_OPPOSITE },
+	{ 0, 0 }
+};
+
+/* The set of possible memorystick events */
+static struct sonypi_event sonypi_memorystickev[] = {
+	{ 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT },
+	{ 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT },
+	{ 0, 0 }
+};
+
+/* The set of possible battery events */
+static struct sonypi_event sonypi_batteryev[] = {
+	{ 0x20, SONYPI_EVENT_BATTERY_INSERT },
+	{ 0x30, SONYPI_EVENT_BATTERY_REMOVE },
+	{ 0, 0 }
+};
+
+static struct sonypi_eventtypes type1_events[] = {
+	{ 0, 0xffffffff, sonypi_releaseev },
+	{ 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
+	{ 0x30, SONYPI_LID_MASK, sonypi_lidev },
+	{ 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
+	{ 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
+	{ 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+	{ 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+	{ 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
+	{ 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+	{ 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
+	{ 0 },
+};
+static struct sonypi_eventtypes type2_events[] = {
+	{ 0, 0xffffffff, sonypi_releaseev },
+	{ 0x38, SONYPI_LID_MASK, sonypi_lidev },
+	{ 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
+	{ 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
+	{ 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+	{ 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+	{ 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
+	{ 0x11, SONYPI_BACK_MASK, sonypi_backev },
+	{ 0x21, SONYPI_HELP_MASK, sonypi_helpev },
+	{ 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
+	{ 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
+	{ 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+	{ 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+	{ 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+	{ 0 },
+};
+static struct sonypi_eventtypes type3_events[] = {
+	{ 0, 0xffffffff, sonypi_releaseev },
+	{ 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+	{ 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
+	{ 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+	{ 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+	{ 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+	{ 0 },
+};
+static struct sonypi_eventtypes type4_events[] = {
+	{ 0, 0xffffffff, sonypi_releaseev },
+	{ 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+	{ 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
+	{ 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+	{ 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+	{ 0x05, SONYPI_PKEY_MASK, sonypi_pkeyev },
+	{ 0x05, SONYPI_ZOOM_MASK, sonypi_zoomev },
+	{ 0x05, SONYPI_CAPTURE_MASK, sonypi_captureev },
+	{ 0 },
+};
+
+/* low level spic calls */
+#define ITERATIONS_LONG		10000
+#define ITERATIONS_SHORT	10
+#define wait_on_command(command, iterations) {				\
+	unsigned int n = iterations;					\
+	while (--n && (command))					\
+		udelay(1);						\
+	if (!n)								\
+		dprintk("command failed at %s : %s (line %d)\n",	\
+				__FILE__, __func__, __LINE__);	\
+}
+
+static u8 sony_pic_call1(u8 dev)
+{
+	u8 v1, v2;
+
+	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
+			ITERATIONS_LONG);
+	outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
+	v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4);
+	v2 = inb_p(spic_dev.cur_ioport->io1.minimum);
+	dprintk("sony_pic_call1(0x%.2x): 0x%.4x\n", dev, (v2 << 8) | v1);
+	return v2;
+}
+
+static u8 sony_pic_call2(u8 dev, u8 fn)
+{
+	u8 v1;
+
+	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
+			ITERATIONS_LONG);
+	outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
+	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
+			ITERATIONS_LONG);
+	outb(fn, spic_dev.cur_ioport->io1.minimum);
+	v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
+	dprintk("sony_pic_call2(0x%.2x - 0x%.2x): 0x%.4x\n", dev, fn, v1);
+	return v1;
+}
+
+static u8 sony_pic_call3(u8 dev, u8 fn, u8 v)
+{
+	u8 v1;
+
+	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
+	outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
+	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
+	outb(fn, spic_dev.cur_ioport->io1.minimum);
+	wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
+	outb(v, spic_dev.cur_ioport->io1.minimum);
+	v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
+	dprintk("sony_pic_call3(0x%.2x - 0x%.2x - 0x%.2x): 0x%.4x\n",
+			dev, fn, v, v1);
+	return v1;
+}
+
+/*
+ * minidrivers for SPIC models
+ */
+static int type4_handle_irq(const u8 data_mask, const u8 ev)
+{
+	/*
+	 * 0x31 could mean we have to take some extra action and wait for
+	 * the next irq for some Type4 models, it will generate a new
+	 * irq and we can read new data from the device:
+	 *  - 0x5c and 0x5f requires 0xA0
+	 *  - 0x61 requires 0xB3
+	 */
+	if (data_mask == 0x31) {
+		if (ev == 0x5c || ev == 0x5f)
+			sony_pic_call1(0xA0);
+		else if (ev == 0x61)
+			sony_pic_call1(0xB3);
+		return 0;
+	}
+	return 1;
+}
+
+static struct device_ctrl spic_types[] = {
+	{
+		.model = SONYPI_DEVICE_TYPE1,
+		.handle_irq = NULL,
+		.evport_offset = SONYPI_TYPE1_OFFSET,
+		.event_types = type1_events,
+	},
+	{
+		.model = SONYPI_DEVICE_TYPE2,
+		.handle_irq = NULL,
+		.evport_offset = SONYPI_TYPE2_OFFSET,
+		.event_types = type2_events,
+	},
+	{
+		.model = SONYPI_DEVICE_TYPE3,
+		.handle_irq = NULL,
+		.evport_offset = SONYPI_TYPE3_OFFSET,
+		.event_types = type3_events,
+	},
+	{
+		.model = SONYPI_DEVICE_TYPE4,
+		.handle_irq = type4_handle_irq,
+		.evport_offset = SONYPI_TYPE4_OFFSET,
+		.event_types = type4_events,
+	},
+};
+
+static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
+{
+	struct pci_dev *pcidev;
+
+	pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+			PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
+	if (pcidev) {
+		dev->control = &spic_types[0];
+		goto out;
+	}
+
+	pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+			PCI_DEVICE_ID_INTEL_ICH6_1, NULL);
+	if (pcidev) {
+		dev->control = &spic_types[2];
+		goto out;
+	}
+
+	pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+			PCI_DEVICE_ID_INTEL_ICH7_1, NULL);
+	if (pcidev) {
+		dev->control = &spic_types[3];
+		goto out;
+	}
+
+	pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+			PCI_DEVICE_ID_INTEL_ICH8_4, NULL);
+	if (pcidev) {
+		dev->control = &spic_types[3];
+		goto out;
+	}
+
+	/* default */
+	dev->control = &spic_types[1];
+
+out:
+	if (pcidev)
+		pci_dev_put(pcidev);
+
+	printk(KERN_INFO DRV_PFX "detected Type%d model\n",
+			dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 :
+			dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 :
+			dev->control->model == SONYPI_DEVICE_TYPE3 ? 3 : 4);
+}
+
+/* camera tests and poweron/poweroff */
+#define SONYPI_CAMERA_PICTURE		5
+#define SONYPI_CAMERA_CONTROL		0x10
+
+#define SONYPI_CAMERA_BRIGHTNESS		0
+#define SONYPI_CAMERA_CONTRAST			1
+#define SONYPI_CAMERA_HUE			2
+#define SONYPI_CAMERA_COLOR			3
+#define SONYPI_CAMERA_SHARPNESS			4
+
+#define SONYPI_CAMERA_EXPOSURE_MASK		0xC
+#define SONYPI_CAMERA_WHITE_BALANCE_MASK	0x3
+#define SONYPI_CAMERA_PICTURE_MODE_MASK		0x30
+#define SONYPI_CAMERA_MUTE_MASK			0x40
+
+/* the rest don't need a loop until not 0xff */
+#define SONYPI_CAMERA_AGC			6
+#define SONYPI_CAMERA_AGC_MASK			0x30
+#define SONYPI_CAMERA_SHUTTER_MASK 		0x7
+
+#define SONYPI_CAMERA_SHUTDOWN_REQUEST		7
+#define SONYPI_CAMERA_CONTROL			0x10
+
+#define SONYPI_CAMERA_STATUS 			7
+#define SONYPI_CAMERA_STATUS_READY 		0x2
+#define SONYPI_CAMERA_STATUS_POSITION		0x4
+
+#define SONYPI_DIRECTION_BACKWARDS 		0x4
+
+#define SONYPI_CAMERA_REVISION 			8
+#define SONYPI_CAMERA_ROMVERSION 		9
+
+static int __sony_pic_camera_ready(void)
+{
+	u8 v;
+
+	v = sony_pic_call2(0x8f, SONYPI_CAMERA_STATUS);
+	return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY));
+}
+
+static int __sony_pic_camera_off(void)
+{
+	if (!camera) {
+		printk(KERN_WARNING DRV_PFX "camera control not enabled\n");
+		return -ENODEV;
+	}
+
+	wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE,
+				SONYPI_CAMERA_MUTE_MASK),
+			ITERATIONS_SHORT);
+
+	if (spic_dev.camera_power) {
+		sony_pic_call2(0x91, 0);
+		spic_dev.camera_power = 0;
+	}
+	return 0;
+}
+
+static int __sony_pic_camera_on(void)
+{
+	int i, j, x;
+
+	if (!camera) {
+		printk(KERN_WARNING DRV_PFX "camera control not enabled\n");
+		return -ENODEV;
+	}
+
+	if (spic_dev.camera_power)
+		return 0;
+
+	for (j = 5; j > 0; j--) {
+
+		for (x = 0; x < 100 && sony_pic_call2(0x91, 0x1); x++)
+			msleep(10);
+		sony_pic_call1(0x93);
+
+		for (i = 400; i > 0; i--) {
+			if (__sony_pic_camera_ready())
+				break;
+			msleep(10);
+		}
+		if (i)
+			break;
+	}
+
+	if (j == 0) {
+		printk(KERN_WARNING DRV_PFX "failed to power on camera\n");
+		return -ENODEV;
+	}
+
+	wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTROL,
+				0x5a),
+			ITERATIONS_SHORT);
+
+	spic_dev.camera_power = 1;
+	return 0;
+}
+
+/* External camera command (exported to the motion eye v4l driver) */
+int sony_pic_camera_command(int command, u8 value)
+{
+	if (!camera)
+		return -EIO;
+
+	mutex_lock(&spic_dev.lock);
+
+	switch (command) {
+	case SONY_PIC_COMMAND_SETCAMERA:
+		if (value)
+			__sony_pic_camera_on();
+		else
+			__sony_pic_camera_off();
+		break;
+	case SONY_PIC_COMMAND_SETCAMERABRIGHTNESS:
+		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_BRIGHTNESS, value),
+				ITERATIONS_SHORT);
+		break;
+	case SONY_PIC_COMMAND_SETCAMERACONTRAST:
+		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTRAST, value),
+				ITERATIONS_SHORT);
+		break;
+	case SONY_PIC_COMMAND_SETCAMERAHUE:
+		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_HUE, value),
+				ITERATIONS_SHORT);
+		break;
+	case SONY_PIC_COMMAND_SETCAMERACOLOR:
+		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_COLOR, value),
+				ITERATIONS_SHORT);
+		break;
+	case SONY_PIC_COMMAND_SETCAMERASHARPNESS:
+		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_SHARPNESS, value),
+				ITERATIONS_SHORT);
+		break;
+	case SONY_PIC_COMMAND_SETCAMERAPICTURE:
+		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE, value),
+				ITERATIONS_SHORT);
+		break;
+	case SONY_PIC_COMMAND_SETCAMERAAGC:
+		wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_AGC, value),
+				ITERATIONS_SHORT);
+		break;
+	default:
+		printk(KERN_ERR DRV_PFX "sony_pic_camera_command invalid: %d\n",
+		       command);
+		break;
+	}
+	mutex_unlock(&spic_dev.lock);
+	return 0;
+}
+EXPORT_SYMBOL(sony_pic_camera_command);
+
+/* gprs/edge modem (SZ460N and SZ210P), thanks to Joshua Wise */
+static void sony_pic_set_wwanpower(u8 state)
+{
+	state = !!state;
+	mutex_lock(&spic_dev.lock);
+	if (spic_dev.wwan_power == state) {
+		mutex_unlock(&spic_dev.lock);
+		return;
+	}
+	sony_pic_call2(0xB0, state);
+	spic_dev.wwan_power = state;
+	mutex_unlock(&spic_dev.lock);
+}
+
+static ssize_t sony_pic_wwanpower_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buffer, size_t count)
+{
+	unsigned long value;
+	if (count > 31)
+		return -EINVAL;
+
+	value = simple_strtoul(buffer, NULL, 10);
+	sony_pic_set_wwanpower(value);
+
+	return count;
+}
+
+static ssize_t sony_pic_wwanpower_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	ssize_t count;
+	mutex_lock(&spic_dev.lock);
+	count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.wwan_power);
+	mutex_unlock(&spic_dev.lock);
+	return count;
+}
+
+/* bluetooth subsystem power state */
+static void __sony_pic_set_bluetoothpower(u8 state)
+{
+	state = !!state;
+	if (spic_dev.bluetooth_power == state)
+		return;
+	sony_pic_call2(0x96, state);
+	sony_pic_call1(0x82);
+	spic_dev.bluetooth_power = state;
+}
+
+static ssize_t sony_pic_bluetoothpower_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buffer, size_t count)
+{
+	unsigned long value;
+	if (count > 31)
+		return -EINVAL;
+
+	value = simple_strtoul(buffer, NULL, 10);
+	mutex_lock(&spic_dev.lock);
+	__sony_pic_set_bluetoothpower(value);
+	mutex_unlock(&spic_dev.lock);
+
+	return count;
+}
+
+static ssize_t sony_pic_bluetoothpower_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	ssize_t count = 0;
+	mutex_lock(&spic_dev.lock);
+	count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.bluetooth_power);
+	mutex_unlock(&spic_dev.lock);
+	return count;
+}
+
+/* fan speed */
+/* FAN0 information (reverse engineered from ACPI tables) */
+#define SONY_PIC_FAN0_STATUS	0x93
+static int sony_pic_set_fanspeed(unsigned long value)
+{
+	return ec_write(SONY_PIC_FAN0_STATUS, value);
+}
+
+static int sony_pic_get_fanspeed(u8 *value)
+{
+	return ec_read(SONY_PIC_FAN0_STATUS, value);
+}
+
+static ssize_t sony_pic_fanspeed_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buffer, size_t count)
+{
+	unsigned long value;
+	if (count > 31)
+		return -EINVAL;
+
+	value = simple_strtoul(buffer, NULL, 10);
+	if (sony_pic_set_fanspeed(value))
+		return -EIO;
+
+	return count;
+}
+
+static ssize_t sony_pic_fanspeed_show(struct device *dev,
+		struct device_attribute *attr, char *buffer)
+{
+	u8 value = 0;
+	if (sony_pic_get_fanspeed(&value))
+		return -EIO;
+
+	return snprintf(buffer, PAGE_SIZE, "%d\n", value);
+}
+
+#define SPIC_ATTR(_name, _mode)					\
+struct device_attribute spic_attr_##_name = __ATTR(_name,	\
+		_mode, sony_pic_## _name ##_show,		\
+		sony_pic_## _name ##_store)
+
+static SPIC_ATTR(bluetoothpower, 0644);
+static SPIC_ATTR(wwanpower, 0644);
+static SPIC_ATTR(fanspeed, 0644);
+
+static struct attribute *spic_attributes[] = {
+	&spic_attr_bluetoothpower.attr,
+	&spic_attr_wwanpower.attr,
+	&spic_attr_fanspeed.attr,
+	NULL
+};
+
+static struct attribute_group spic_attribute_group = {
+	.attrs = spic_attributes
+};
+
+/******** SONYPI compatibility **********/
+#ifdef CONFIG_SONYPI_COMPAT
+
+/* battery / brightness / temperature  addresses */
+#define SONYPI_BAT_FLAGS	0x81
+#define SONYPI_LCD_LIGHT	0x96
+#define SONYPI_BAT1_PCTRM	0xa0
+#define SONYPI_BAT1_LEFT	0xa2
+#define SONYPI_BAT1_MAXRT	0xa4
+#define SONYPI_BAT2_PCTRM	0xa8
+#define SONYPI_BAT2_LEFT	0xaa
+#define SONYPI_BAT2_MAXRT	0xac
+#define SONYPI_BAT1_MAXTK	0xb0
+#define SONYPI_BAT1_FULL	0xb2
+#define SONYPI_BAT2_MAXTK	0xb8
+#define SONYPI_BAT2_FULL	0xba
+#define SONYPI_TEMP_STATUS	0xC1
+
+struct sonypi_compat_s {
+	struct fasync_struct	*fifo_async;
+	struct kfifo		*fifo;
+	spinlock_t		fifo_lock;
+	wait_queue_head_t	fifo_proc_list;
+	atomic_t		open_count;
+};
+static struct sonypi_compat_s sonypi_compat = {
+	.open_count = ATOMIC_INIT(0),
+};
+
+static int sonypi_misc_fasync(int fd, struct file *filp, int on)
+{
+	int retval;
+
+	retval = fasync_helper(fd, filp, on, &sonypi_compat.fifo_async);
+	if (retval < 0)
+		return retval;
+	return 0;
+}
+
+static int sonypi_misc_release(struct inode *inode, struct file *file)
+{
+	atomic_dec(&sonypi_compat.open_count);
+	return 0;
+}
+
+static int sonypi_misc_open(struct inode *inode, struct file *file)
+{
+	/* Flush input queue on first open */
+	lock_kernel();
+	if (atomic_inc_return(&sonypi_compat.open_count) == 1)
+		kfifo_reset(sonypi_compat.fifo);
+	unlock_kernel();
+	return 0;
+}
+
+static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
+				size_t count, loff_t *pos)
+{
+	ssize_t ret;
+	unsigned char c;
+
+	if ((kfifo_len(sonypi_compat.fifo) == 0) &&
+	    (file->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	ret = wait_event_interruptible(sonypi_compat.fifo_proc_list,
+				       kfifo_len(sonypi_compat.fifo) != 0);
+	if (ret)
+		return ret;
+
+	while (ret < count &&
+	       (kfifo_get(sonypi_compat.fifo, &c, sizeof(c)) == sizeof(c))) {
+		if (put_user(c, buf++))
+			return -EFAULT;
+		ret++;
+	}
+
+	if (ret > 0) {
+		struct inode *inode = file->f_path.dentry->d_inode;
+		inode->i_atime = current_fs_time(inode->i_sb);
+	}
+
+	return ret;
+}
+
+static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
+{
+	poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
+	if (kfifo_len(sonypi_compat.fifo))
+		return POLLIN | POLLRDNORM;
+	return 0;
+}
+
+static int ec_read16(u8 addr, u16 *value)
+{
+	u8 val_lb, val_hb;
+	if (ec_read(addr, &val_lb))
+		return -1;
+	if (ec_read(addr + 1, &val_hb))
+		return -1;
+	*value = val_lb | (val_hb << 8);
+	return 0;
+}
+
+static int sonypi_misc_ioctl(struct inode *ip, struct file *fp,
+			     unsigned int cmd, unsigned long arg)
+{
+	int ret = 0;
+	void __user *argp = (void __user *)arg;
+	u8 val8;
+	u16 val16;
+	int value;
+
+	mutex_lock(&spic_dev.lock);
+	switch (cmd) {
+	case SONYPI_IOCGBRT:
+		if (sony_backlight_device == NULL) {
+			ret = -EIO;
+			break;
+		}
+		if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) {
+			ret = -EIO;
+			break;
+		}
+		val8 = ((value & 0xff) - 1) << 5;
+		if (copy_to_user(argp, &val8, sizeof(val8)))
+				ret = -EFAULT;
+		break;
+	case SONYPI_IOCSBRT:
+		if (sony_backlight_device == NULL) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_from_user(&val8, argp, sizeof(val8))) {
+			ret = -EFAULT;
+			break;
+		}
+		if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
+				(val8 >> 5) + 1, NULL)) {
+			ret = -EIO;
+			break;
+		}
+		/* sync the backlight device status */
+		sony_backlight_device->props.brightness =
+		    sony_backlight_get_brightness(sony_backlight_device);
+		break;
+	case SONYPI_IOCGBAT1CAP:
+		if (ec_read16(SONYPI_BAT1_FULL, &val16)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val16, sizeof(val16)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCGBAT1REM:
+		if (ec_read16(SONYPI_BAT1_LEFT, &val16)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val16, sizeof(val16)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCGBAT2CAP:
+		if (ec_read16(SONYPI_BAT2_FULL, &val16)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val16, sizeof(val16)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCGBAT2REM:
+		if (ec_read16(SONYPI_BAT2_LEFT, &val16)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val16, sizeof(val16)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCGBATFLAGS:
+		if (ec_read(SONYPI_BAT_FLAGS, &val8)) {
+			ret = -EIO;
+			break;
+		}
+		val8 &= 0x07;
+		if (copy_to_user(argp, &val8, sizeof(val8)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCGBLUE:
+		val8 = spic_dev.bluetooth_power;
+		if (copy_to_user(argp, &val8, sizeof(val8)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCSBLUE:
+		if (copy_from_user(&val8, argp, sizeof(val8))) {
+			ret = -EFAULT;
+			break;
+		}
+		__sony_pic_set_bluetoothpower(val8);
+		break;
+	/* FAN Controls */
+	case SONYPI_IOCGFAN:
+		if (sony_pic_get_fanspeed(&val8)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val8, sizeof(val8)))
+			ret = -EFAULT;
+		break;
+	case SONYPI_IOCSFAN:
+		if (copy_from_user(&val8, argp, sizeof(val8))) {
+			ret = -EFAULT;
+			break;
+		}
+		if (sony_pic_set_fanspeed(val8))
+			ret = -EIO;
+		break;
+	/* GET Temperature (useful under APM) */
+	case SONYPI_IOCGTEMP:
+		if (ec_read(SONYPI_TEMP_STATUS, &val8)) {
+			ret = -EIO;
+			break;
+		}
+		if (copy_to_user(argp, &val8, sizeof(val8)))
+			ret = -EFAULT;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	mutex_unlock(&spic_dev.lock);
+	return ret;
+}
+
+static const struct file_operations sonypi_misc_fops = {
+	.owner		= THIS_MODULE,
+	.read		= sonypi_misc_read,
+	.poll		= sonypi_misc_poll,
+	.open		= sonypi_misc_open,
+	.release	= sonypi_misc_release,
+	.fasync		= sonypi_misc_fasync,
+	.ioctl		= sonypi_misc_ioctl,
+};
+
+static struct miscdevice sonypi_misc_device = {
+	.minor		= MISC_DYNAMIC_MINOR,
+	.name		= "sonypi",
+	.fops		= &sonypi_misc_fops,
+};
+
+static void sonypi_compat_report_event(u8 event)
+{
+	kfifo_put(sonypi_compat.fifo, (unsigned char *)&event, sizeof(event));
+	kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN);
+	wake_up_interruptible(&sonypi_compat.fifo_proc_list);
+}
+
+static int sonypi_compat_init(void)
+{
+	int error;
+
+	spin_lock_init(&sonypi_compat.fifo_lock);
+	sonypi_compat.fifo = kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
+					 &sonypi_compat.fifo_lock);
+	if (IS_ERR(sonypi_compat.fifo)) {
+		printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
+		return PTR_ERR(sonypi_compat.fifo);
+	}
+
+	init_waitqueue_head(&sonypi_compat.fifo_proc_list);
+
+	if (minor != -1)
+		sonypi_misc_device.minor = minor;
+	error = misc_register(&sonypi_misc_device);
+	if (error) {
+		printk(KERN_ERR DRV_PFX "misc_register failed\n");
+		goto err_free_kfifo;
+	}
+	if (minor == -1)
+		printk(KERN_INFO DRV_PFX "device allocated minor is %d\n",
+		       sonypi_misc_device.minor);
+
+	return 0;
+
+err_free_kfifo:
+	kfifo_free(sonypi_compat.fifo);
+	return error;
+}
+
+static void sonypi_compat_exit(void)
+{
+	misc_deregister(&sonypi_misc_device);
+	kfifo_free(sonypi_compat.fifo);
+}
+#else
+static int sonypi_compat_init(void) { return 0; }
+static void sonypi_compat_exit(void) { }
+static void sonypi_compat_report_event(u8 event) { }
+#endif /* CONFIG_SONYPI_COMPAT */
+
+/*
+ * ACPI callbacks
+ */
+static acpi_status
+sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
+{
+	u32 i;
+	struct sony_pic_dev *dev = (struct sony_pic_dev *)context;
+
+	switch (resource->type) {
+	case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+		{
+			/* start IO enumeration */
+			struct sony_pic_ioport *ioport = kzalloc(sizeof(*ioport), GFP_KERNEL);
+			if (!ioport)
+				return AE_ERROR;
+
+			list_add(&ioport->list, &dev->ioports);
+			return AE_OK;
+		}
+
+	case ACPI_RESOURCE_TYPE_END_DEPENDENT:
+		/* end IO enumeration */
+		return AE_OK;
+
+	case ACPI_RESOURCE_TYPE_IRQ:
+		{
+			struct acpi_resource_irq *p = &resource->data.irq;
+			struct sony_pic_irq *interrupt = NULL;
+			if (!p || !p->interrupt_count) {
+				/*
+				 * IRQ descriptors may have no IRQ# bits set,
+				 * particularly those those w/ _STA disabled
+				 */
+				dprintk("Blank IRQ resource\n");
+				return AE_OK;
+			}
+			for (i = 0; i < p->interrupt_count; i++) {
+				if (!p->interrupts[i]) {
+					printk(KERN_WARNING DRV_PFX
+							"Invalid IRQ %d\n",
+							p->interrupts[i]);
+					continue;
+				}
+				interrupt = kzalloc(sizeof(*interrupt),
+						GFP_KERNEL);
+				if (!interrupt)
+					return AE_ERROR;
+
+				list_add(&interrupt->list, &dev->interrupts);
+				interrupt->irq.triggering = p->triggering;
+				interrupt->irq.polarity = p->polarity;
+				interrupt->irq.sharable = p->sharable;
+				interrupt->irq.interrupt_count = 1;
+				interrupt->irq.interrupts[0] = p->interrupts[i];
+			}
+			return AE_OK;
+		}
+	case ACPI_RESOURCE_TYPE_IO:
+		{
+			struct acpi_resource_io *io = &resource->data.io;
+			struct sony_pic_ioport *ioport =
+				list_first_entry(&dev->ioports, struct sony_pic_ioport, list);
+			if (!io) {
+				dprintk("Blank IO resource\n");
+				return AE_OK;
+			}
+
+			if (!ioport->io1.minimum) {
+				memcpy(&ioport->io1, io, sizeof(*io));
+				dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum,
+						ioport->io1.address_length);
+			}
+			else if (!ioport->io2.minimum) {
+				memcpy(&ioport->io2, io, sizeof(*io));
+				dprintk("IO2 at 0x%.4x (0x%.2x)\n", ioport->io2.minimum,
+						ioport->io2.address_length);
+			}
+			else {
+				printk(KERN_ERR DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n");
+				return AE_ERROR;
+			}
+			return AE_OK;
+		}
+	default:
+		dprintk("Resource %d isn't an IRQ nor an IO port\n",
+				resource->type);
+
+	case ACPI_RESOURCE_TYPE_END_TAG:
+		return AE_OK;
+	}
+	return AE_CTRL_TERMINATE;
+}
+
+static int sony_pic_possible_resources(struct acpi_device *device)
+{
+	int result = 0;
+	acpi_status status = AE_OK;
+
+	if (!device)
+		return -EINVAL;
+
+	/* get device status */
+	/* see acpi_pci_link_get_current acpi_pci_link_get_possible */
+	dprintk("Evaluating _STA\n");
+	result = acpi_bus_get_status(device);
+	if (result) {
+		printk(KERN_WARNING DRV_PFX "Unable to read status\n");
+		goto end;
+	}
+
+	if (!device->status.enabled)
+		dprintk("Device disabled\n");
+	else
+		dprintk("Device enabled\n");
+
+	/*
+	 * Query and parse 'method'
+	 */
+	dprintk("Evaluating %s\n", METHOD_NAME__PRS);
+	status = acpi_walk_resources(device->handle, METHOD_NAME__PRS,
+			sony_pic_read_possible_resource, &spic_dev);
+	if (ACPI_FAILURE(status)) {
+		printk(KERN_WARNING DRV_PFX
+				"Failure evaluating %s\n",
+				METHOD_NAME__PRS);
+		result = -ENODEV;
+	}
+end:
+	return result;
+}
+
+/*
+ *  Disable the spic device by calling its _DIS method
+ */
+static int sony_pic_disable(struct acpi_device *device)
+{
+	acpi_status ret = acpi_evaluate_object(device->handle, "_DIS", NULL,
+					       NULL);
+
+	if (ACPI_FAILURE(ret) && ret != AE_NOT_FOUND)
+		return -ENXIO;
+
+	dprintk("Device disabled\n");
+	return 0;
+}
+
+
+/*
+ *  Based on drivers/acpi/pci_link.c:acpi_pci_link_set
+ *
+ *  Call _SRS to set current resources
+ */
+static int sony_pic_enable(struct acpi_device *device,
+		struct sony_pic_ioport *ioport, struct sony_pic_irq *irq)
+{
+	acpi_status status;
+	int result = 0;
+	/* Type 1 resource layout is:
+	 *    IO
+	 *    IO
+	 *    IRQNoFlags
+	 *    End
+	 *
+	 * Type 2 and 3 resource layout is:
+	 *    IO
+	 *    IRQNoFlags
+	 *    End
+	 */
+	struct {
+		struct acpi_resource res1;
+		struct acpi_resource res2;
+		struct acpi_resource res3;
+		struct acpi_resource res4;
+	} *resource;
+	struct acpi_buffer buffer = { 0, NULL };
+
+	if (!ioport || !irq)
+		return -EINVAL;
+
+	/* init acpi_buffer */
+	resource = kzalloc(sizeof(*resource) + 1, GFP_KERNEL);
+	if (!resource)
+		return -ENOMEM;
+
+	buffer.length = sizeof(*resource) + 1;
+	buffer.pointer = resource;
+
+	/* setup Type 1 resources */
+	if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) {
+
+		/* setup io resources */
+		resource->res1.type = ACPI_RESOURCE_TYPE_IO;
+		resource->res1.length = sizeof(struct acpi_resource);
+		memcpy(&resource->res1.data.io, &ioport->io1,
+				sizeof(struct acpi_resource_io));
+
+		resource->res2.type = ACPI_RESOURCE_TYPE_IO;
+		resource->res2.length = sizeof(struct acpi_resource);
+		memcpy(&resource->res2.data.io, &ioport->io2,
+				sizeof(struct acpi_resource_io));
+
+		/* setup irq resource */
+		resource->res3.type = ACPI_RESOURCE_TYPE_IRQ;
+		resource->res3.length = sizeof(struct acpi_resource);
+		memcpy(&resource->res3.data.irq, &irq->irq,
+				sizeof(struct acpi_resource_irq));
+		/* we requested a shared irq */
+		resource->res3.data.irq.sharable = ACPI_SHARED;
+
+		resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG;
+
+	}
+	/* setup Type 2/3 resources */
+	else {
+		/* setup io resource */
+		resource->res1.type = ACPI_RESOURCE_TYPE_IO;
+		resource->res1.length = sizeof(struct acpi_resource);
+		memcpy(&resource->res1.data.io, &ioport->io1,
+				sizeof(struct acpi_resource_io));
+
+		/* setup irq resource */
+		resource->res2.type = ACPI_RESOURCE_TYPE_IRQ;
+		resource->res2.length = sizeof(struct acpi_resource);
+		memcpy(&resource->res2.data.irq, &irq->irq,
+				sizeof(struct acpi_resource_irq));
+		/* we requested a shared irq */
+		resource->res2.data.irq.sharable = ACPI_SHARED;
+
+		resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG;
+	}
+
+	/* Attempt to set the resource */
+	dprintk("Evaluating _SRS\n");
+	status = acpi_set_current_resources(device->handle, &buffer);
+
+	/* check for total failure */
+	if (ACPI_FAILURE(status)) {
+		printk(KERN_ERR DRV_PFX "Error evaluating _SRS\n");
+		result = -ENODEV;
+		goto end;
+	}
+
+	/* Necessary device initializations calls (from sonypi) */
+	sony_pic_call1(0x82);
+	sony_pic_call2(0x81, 0xff);
+	sony_pic_call1(compat ? 0x92 : 0x82);
+
+end:
+	kfree(resource);
+	return result;
+}
+
+/*****************
+ *
+ * ISR: some event is available
+ *
+ *****************/
+static irqreturn_t sony_pic_irq(int irq, void *dev_id)
+{
+	int i, j;
+	u8 ev = 0;
+	u8 data_mask = 0;
+	u8 device_event = 0;
+
+	struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id;
+
+	ev = inb_p(dev->cur_ioport->io1.minimum);
+	if (dev->cur_ioport->io2.minimum)
+		data_mask = inb_p(dev->cur_ioport->io2.minimum);
+	else
+		data_mask = inb_p(dev->cur_ioport->io1.minimum +
+				dev->control->evport_offset);
+
+	dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
+			ev, data_mask, dev->cur_ioport->io1.minimum,
+			dev->control->evport_offset);
+
+	if (ev == 0x00 || ev == 0xff)
+		return IRQ_HANDLED;
+
+	for (i = 0; dev->control->event_types[i].mask; i++) {
+
+		if ((data_mask & dev->control->event_types[i].data) !=
+		    dev->control->event_types[i].data)
+			continue;
+
+		if (!(mask & dev->control->event_types[i].mask))
+			continue;
+
+		for (j = 0; dev->control->event_types[i].events[j].event; j++) {
+			if (ev == dev->control->event_types[i].events[j].data) {
+				device_event =
+					dev->control->
+						event_types[i].events[j].event;
+				goto found;
+			}
+		}
+	}
+	/* Still not able to decode the event try to pass
+	 * it over to the minidriver
+	 */
+	if (dev->control->handle_irq &&
+			dev->control->handle_irq(data_mask, ev) == 0)
+		return IRQ_HANDLED;
+
+	dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
+			ev, data_mask, dev->cur_ioport->io1.minimum,
+			dev->control->evport_offset);
+	return IRQ_HANDLED;
+
+found:
+	sony_laptop_report_input_event(device_event);
+	acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event);
+	sonypi_compat_report_event(device_event);
+
+	return IRQ_HANDLED;
+}
+
+/*****************
+ *
+ *  ACPI driver
+ *
+ *****************/
+static int sony_pic_remove(struct acpi_device *device, int type)
+{
+	struct sony_pic_ioport *io, *tmp_io;
+	struct sony_pic_irq *irq, *tmp_irq;
+
+	if (sony_pic_disable(device)) {
+		printk(KERN_ERR DRV_PFX "Couldn't disable device.\n");
+		return -ENXIO;
+	}
+
+	free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
+	release_region(spic_dev.cur_ioport->io1.minimum,
+			spic_dev.cur_ioport->io1.address_length);
+	if (spic_dev.cur_ioport->io2.minimum)
+		release_region(spic_dev.cur_ioport->io2.minimum,
+				spic_dev.cur_ioport->io2.address_length);
+
+	sonypi_compat_exit();
+
+	sony_laptop_remove_input();
+
+	/* pf attrs */
+	sysfs_remove_group(&sony_pf_device->dev.kobj, &spic_attribute_group);
+	sony_pf_remove();
+
+	list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) {
+		list_del(&io->list);
+		kfree(io);
+	}
+	list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) {
+		list_del(&irq->list);
+		kfree(irq);
+	}
+	spic_dev.cur_ioport = NULL;
+	spic_dev.cur_irq = NULL;
+
+	dprintk(SONY_PIC_DRIVER_NAME " removed.\n");
+	return 0;
+}
+
+static int sony_pic_add(struct acpi_device *device)
+{
+	int result;
+	struct sony_pic_ioport *io, *tmp_io;
+	struct sony_pic_irq *irq, *tmp_irq;
+
+	printk(KERN_INFO DRV_PFX "%s v%s.\n",
+		SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
+
+	spic_dev.acpi_dev = device;
+	strcpy(acpi_device_class(device), "sony/hotkey");
+	sony_pic_detect_device_type(&spic_dev);
+	mutex_init(&spic_dev.lock);
+
+	/* read _PRS resources */
+	result = sony_pic_possible_resources(device);
+	if (result) {
+		printk(KERN_ERR DRV_PFX
+				"Unabe to read possible resources.\n");
+		goto err_free_resources;
+	}
+
+	/* setup input devices and helper fifo */
+	result = sony_laptop_setup_input(device);
+	if (result) {
+		printk(KERN_ERR DRV_PFX
+				"Unabe to create input devices.\n");
+		goto err_free_resources;
+	}
+
+	if (sonypi_compat_init())
+		goto err_remove_input;
+
+	/* request io port */
+	list_for_each_entry_reverse(io, &spic_dev.ioports, list) {
+		if (request_region(io->io1.minimum, io->io1.address_length,
+					"Sony Programable I/O Device")) {
+			dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n",
+					io->io1.minimum, io->io1.maximum,
+					io->io1.address_length);
+			/* Type 1 have 2 ioports */
+			if (io->io2.minimum) {
+				if (request_region(io->io2.minimum,
+						io->io2.address_length,
+						"Sony Programable I/O Device")) {
+					dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n",
+							io->io2.minimum, io->io2.maximum,
+							io->io2.address_length);
+					spic_dev.cur_ioport = io;
+					break;
+				}
+				else {
+					dprintk("Unable to get I/O port2: "
+							"0x%.4x (0x%.4x) + 0x%.2x\n",
+							io->io2.minimum, io->io2.maximum,
+							io->io2.address_length);
+					release_region(io->io1.minimum,
+							io->io1.address_length);
+				}
+			}
+			else {
+				spic_dev.cur_ioport = io;
+				break;
+			}
+		}
+	}
+	if (!spic_dev.cur_ioport) {
+		printk(KERN_ERR DRV_PFX "Failed to request_region.\n");
+		result = -ENODEV;
+		goto err_remove_compat;
+	}
+
+	/* request IRQ */
+	list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) {
+		if (!request_irq(irq->irq.interrupts[0], sony_pic_irq,
+					IRQF_SHARED, "sony-laptop", &spic_dev)) {
+			dprintk("IRQ: %d - triggering: %d - "
+					"polarity: %d - shr: %d\n",
+					irq->irq.interrupts[0],
+					irq->irq.triggering,
+					irq->irq.polarity,
+					irq->irq.sharable);
+			spic_dev.cur_irq = irq;
+			break;
+		}
+	}
+	if (!spic_dev.cur_irq) {
+		printk(KERN_ERR DRV_PFX "Failed to request_irq.\n");
+		result = -ENODEV;
+		goto err_release_region;
+	}
+
+	/* set resource status _SRS */
+	result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
+	if (result) {
+		printk(KERN_ERR DRV_PFX "Couldn't enable device.\n");
+		goto err_free_irq;
+	}
+
+	spic_dev.bluetooth_power = -1;
+	/* create device attributes */
+	result = sony_pf_add();
+	if (result)
+		goto err_disable_device;
+
+	result = sysfs_create_group(&sony_pf_device->dev.kobj, &spic_attribute_group);
+	if (result)
+		goto err_remove_pf;
+
+	return 0;
+
+err_remove_pf:
+	sony_pf_remove();
+
+err_disable_device:
+	sony_pic_disable(device);
+
+err_free_irq:
+	free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
+
+err_release_region:
+	release_region(spic_dev.cur_ioport->io1.minimum,
+			spic_dev.cur_ioport->io1.address_length);
+	if (spic_dev.cur_ioport->io2.minimum)
+		release_region(spic_dev.cur_ioport->io2.minimum,
+				spic_dev.cur_ioport->io2.address_length);
+
+err_remove_compat:
+	sonypi_compat_exit();
+
+err_remove_input:
+	sony_laptop_remove_input();
+
+err_free_resources:
+	list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) {
+		list_del(&io->list);
+		kfree(io);
+	}
+	list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) {
+		list_del(&irq->list);
+		kfree(irq);
+	}
+	spic_dev.cur_ioport = NULL;
+	spic_dev.cur_irq = NULL;
+
+	return result;
+}
+
+static int sony_pic_suspend(struct acpi_device *device, pm_message_t state)
+{
+	if (sony_pic_disable(device))
+		return -ENXIO;
+	return 0;
+}
+
+static int sony_pic_resume(struct acpi_device *device)
+{
+	sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
+	return 0;
+}
+
+static const struct acpi_device_id sony_pic_device_ids[] = {
+	{SONY_PIC_HID, 0},
+	{"", 0},
+};
+
+static struct acpi_driver sony_pic_driver = {
+	.name = SONY_PIC_DRIVER_NAME,
+	.class = SONY_PIC_CLASS,
+	.ids = sony_pic_device_ids,
+	.owner = THIS_MODULE,
+	.ops = {
+		.add = sony_pic_add,
+		.remove = sony_pic_remove,
+		.suspend = sony_pic_suspend,
+		.resume = sony_pic_resume,
+		},
+};
+
+static struct dmi_system_id __initdata sonypi_dmi_table[] = {
+	{
+		.ident = "Sony Vaio",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"),
+		},
+	},
+	{
+		.ident = "Sony Vaio",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"),
+		},
+	},
+	{ }
+};
+
+static int __init sony_laptop_init(void)
+{
+	int result;
+
+	if (!no_spic && dmi_check_system(sonypi_dmi_table)) {
+		result = acpi_bus_register_driver(&sony_pic_driver);
+		if (result) {
+			printk(KERN_ERR DRV_PFX
+					"Unable to register SPIC driver.");
+			goto out;
+		}
+	}
+
+	result = acpi_bus_register_driver(&sony_nc_driver);
+	if (result) {
+		printk(KERN_ERR DRV_PFX "Unable to register SNC driver.");
+		goto out_unregister_pic;
+	}
+
+	return 0;
+
+out_unregister_pic:
+	if (!no_spic)
+		acpi_bus_unregister_driver(&sony_pic_driver);
+out:
+	return result;
+}
+
+static void __exit sony_laptop_exit(void)
+{
+	acpi_bus_unregister_driver(&sony_nc_driver);
+	if (!no_spic)
+		acpi_bus_unregister_driver(&sony_pic_driver);
+}
+
+module_init(sony_laptop_init);
+module_exit(sony_laptop_exit);
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
new file mode 100644
index 0000000..b4a4aa9
--- /dev/null
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -0,0 +1,289 @@
+/*
+ *  HP Compaq TC1100 Tablet WMI Extras Driver
+ *
+ *  Copyright (C) 2007 Carlos Corbacho <carlos@strangeworlds.co.uk>
+ *  Copyright (C) 2004 Jamey Hicks <jamey.hicks@hp.com>
+ *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/platform_device.h>
+
+#define GUID "C364AC71-36DB-495A-8494-B439D472A505"
+
+#define TC1100_INSTANCE_WIRELESS		1
+#define TC1100_INSTANCE_JOGDIAL		2
+
+#define TC1100_LOGPREFIX "tc1100-wmi: "
+#define TC1100_INFO KERN_INFO TC1100_LOGPREFIX
+
+MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
+MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505");
+
+static int tc1100_probe(struct platform_device *device);
+static int tc1100_remove(struct platform_device *device);
+static int tc1100_suspend(struct platform_device *device, pm_message_t state);
+static int tc1100_resume(struct platform_device *device);
+
+static struct platform_driver tc1100_driver = {
+	.driver = {
+		.name = "tc1100-wmi",
+		.owner = THIS_MODULE,
+	},
+	.probe = tc1100_probe,
+	.remove = tc1100_remove,
+	.suspend = tc1100_suspend,
+	.resume = tc1100_resume,
+};
+
+static struct platform_device *tc1100_device;
+
+struct tc1100_data {
+	u32 wireless;
+	u32 jogdial;
+};
+
+static struct tc1100_data suspend_data;
+
+/* --------------------------------------------------------------------------
+				Device Management
+   -------------------------------------------------------------------------- */
+
+static int get_state(u32 *out, u8 instance)
+{
+	u32 tmp;
+	acpi_status status;
+	struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *obj;
+
+	if (!out)
+		return -EINVAL;
+
+	if (instance > 2)
+		return -ENODEV;
+
+	status = wmi_query_block(GUID, instance, &result);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	obj = (union acpi_object *) result.pointer;
+	if (obj && obj->type == ACPI_TYPE_BUFFER &&
+		obj->buffer.length == sizeof(u32)) {
+		tmp = *((u32 *) obj->buffer.pointer);
+	} else {
+		tmp = 0;
+	}
+
+	if (result.length > 0 && result.pointer)
+		kfree(result.pointer);
+
+	switch (instance) {
+	case TC1100_INSTANCE_WIRELESS:
+		*out = (tmp == 3) ? 1 : 0;
+		return 0;
+	case TC1100_INSTANCE_JOGDIAL:
+		*out = (tmp == 1) ? 1 : 0;
+		return 0;
+	default:
+		return -ENODEV;
+	}
+}
+
+static int set_state(u32 *in, u8 instance)
+{
+	u32 value;
+	acpi_status status;
+	struct acpi_buffer input;
+
+	if (!in)
+		return -EINVAL;
+
+	if (instance > 2)
+		return -ENODEV;
+
+	switch (instance) {
+	case TC1100_INSTANCE_WIRELESS:
+		value = (*in) ? 1 : 2;
+		break;
+	case TC1100_INSTANCE_JOGDIAL:
+		value = (*in) ? 0 : 1;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	input.length = sizeof(u32);
+	input.pointer = &value;
+
+	status = wmi_set_block(GUID, instance, &input);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	return 0;
+}
+
+/* --------------------------------------------------------------------------
+				FS Interface (/sys)
+   -------------------------------------------------------------------------- */
+
+/*
+ * Read/ write bool sysfs macro
+ */
+#define show_set_bool(value, instance) \
+static ssize_t \
+show_bool_##value(struct device *dev, struct device_attribute *attr, \
+	char *buf) \
+{ \
+	u32 result; \
+	acpi_status status = get_state(&result, instance); \
+	if (ACPI_SUCCESS(status)) \
+		return sprintf(buf, "%d\n", result); \
+	return sprintf(buf, "Read error\n"); \
+} \
+\
+static ssize_t \
+set_bool_##value(struct device *dev, struct device_attribute *attr, \
+	const char *buf, size_t count) \
+{ \
+	u32 tmp = simple_strtoul(buf, NULL, 10); \
+	acpi_status status = set_state(&tmp, instance); \
+		if (ACPI_FAILURE(status)) \
+			return -EINVAL; \
+	return count; \
+} \
+static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+	show_bool_##value, set_bool_##value);
+
+show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
+show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL);
+
+static void remove_fs(void)
+{
+	device_remove_file(&tc1100_device->dev, &dev_attr_wireless);
+	device_remove_file(&tc1100_device->dev, &dev_attr_jogdial);
+}
+
+static int add_fs(void)
+{
+	int ret;
+
+	ret = device_create_file(&tc1100_device->dev, &dev_attr_wireless);
+	if (ret)
+		goto add_sysfs_error;
+
+	ret = device_create_file(&tc1100_device->dev, &dev_attr_jogdial);
+	if (ret)
+		goto add_sysfs_error;
+
+	return ret;
+
+add_sysfs_error:
+	remove_fs();
+	return ret;
+}
+
+/* --------------------------------------------------------------------------
+				Driver Model
+   -------------------------------------------------------------------------- */
+
+static int tc1100_probe(struct platform_device *device)
+{
+	int result = 0;
+
+	result = add_fs();
+	return result;
+}
+
+
+static int tc1100_remove(struct platform_device *device)
+{
+	remove_fs();
+	return 0;
+}
+
+static int tc1100_suspend(struct platform_device *dev, pm_message_t state)
+{
+	int ret;
+
+	ret = get_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
+	if (ret)
+		return ret;
+
+	ret = get_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int tc1100_resume(struct platform_device *dev)
+{
+	int ret;
+
+	ret = set_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
+	if (ret)
+		return ret;
+
+	ret = set_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int __init tc1100_init(void)
+{
+	int result = 0;
+
+	if (!wmi_has_guid(GUID))
+		return -ENODEV;
+
+	result = platform_driver_register(&tc1100_driver);
+	if (result)
+		return result;
+
+	tc1100_device = platform_device_alloc("tc1100-wmi", -1);
+	platform_device_add(tc1100_device);
+
+	printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras loaded\n");
+
+	return result;
+}
+
+static void __exit tc1100_exit(void)
+{
+	platform_device_del(tc1100_device);
+	platform_driver_unregister(&tc1100_driver);
+
+	printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras unloaded\n");
+}
+
+module_init(tc1100_init);
+module_exit(tc1100_exit);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
new file mode 100644
index 0000000..bcbc051
--- /dev/null
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -0,0 +1,7554 @@
+/*
+ *  thinkpad_acpi.c - ThinkPad ACPI Extras
+ *
+ *
+ *  Copyright (C) 2004-2005 Borislav Deianov <borislav@users.sf.net>
+ *  Copyright (C) 2006-2008 Henrique de Moraes Holschuh <hmh@hmh.eng.br>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ *  02110-1301, USA.
+ */
+
+#define TPACPI_VERSION "0.22"
+#define TPACPI_SYSFS_VERSION 0x020200
+
+/*
+ *  Changelog:
+ *  2007-10-20		changelog trimmed down
+ *
+ *  2007-03-27  0.14	renamed to thinkpad_acpi and moved to
+ *  			drivers/misc.
+ *
+ *  2006-11-22	0.13	new maintainer
+ *  			changelog now lives in git commit history, and will
+ *  			not be updated further in-file.
+ *
+ *  2005-03-17	0.11	support for 600e, 770x
+ *			    thanks to Jamie Lentin <lentinj@dial.pipex.com>
+ *
+ *  2005-01-16	0.9	use MODULE_VERSION
+ *			    thanks to Henrik Brix Andersen <brix@gentoo.org>
+ *			fix parameter passing on module loading
+ *			    thanks to Rusty Russell <rusty@rustcorp.com.au>
+ *			    thanks to Jim Radford <radford@blackbean.org>
+ *  2004-11-08	0.8	fix init error case, don't return from a macro
+ *			    thanks to Chris Wright <chrisw@osdl.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+
+#include <linux/nvram.h>
+#include <linux/proc_fs.h>
+#include <linux/sysfs.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include <linux/rfkill.h>
+#include <asm/uaccess.h>
+
+#include <linux/dmi.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+
+#include <acpi/acpi_drivers.h>
+
+#include <linux/pci_ids.h>
+
+
+/* ThinkPad CMOS commands */
+#define TP_CMOS_VOLUME_DOWN	0
+#define TP_CMOS_VOLUME_UP	1
+#define TP_CMOS_VOLUME_MUTE	2
+#define TP_CMOS_BRIGHTNESS_UP	4
+#define TP_CMOS_BRIGHTNESS_DOWN	5
+#define TP_CMOS_THINKLIGHT_ON	12
+#define TP_CMOS_THINKLIGHT_OFF	13
+
+/* NVRAM Addresses */
+enum tp_nvram_addr {
+	TP_NVRAM_ADDR_HK2		= 0x57,
+	TP_NVRAM_ADDR_THINKLIGHT	= 0x58,
+	TP_NVRAM_ADDR_VIDEO		= 0x59,
+	TP_NVRAM_ADDR_BRIGHTNESS	= 0x5e,
+	TP_NVRAM_ADDR_MIXER		= 0x60,
+};
+
+/* NVRAM bit masks */
+enum {
+	TP_NVRAM_MASK_HKT_THINKPAD	= 0x08,
+	TP_NVRAM_MASK_HKT_ZOOM		= 0x20,
+	TP_NVRAM_MASK_HKT_DISPLAY	= 0x40,
+	TP_NVRAM_MASK_HKT_HIBERNATE	= 0x80,
+	TP_NVRAM_MASK_THINKLIGHT	= 0x10,
+	TP_NVRAM_MASK_HKT_DISPEXPND	= 0x30,
+	TP_NVRAM_MASK_HKT_BRIGHTNESS	= 0x20,
+	TP_NVRAM_MASK_LEVEL_BRIGHTNESS	= 0x0f,
+	TP_NVRAM_POS_LEVEL_BRIGHTNESS	= 0,
+	TP_NVRAM_MASK_MUTE		= 0x40,
+	TP_NVRAM_MASK_HKT_VOLUME	= 0x80,
+	TP_NVRAM_MASK_LEVEL_VOLUME	= 0x0f,
+	TP_NVRAM_POS_LEVEL_VOLUME	= 0,
+};
+
+/* ACPI HIDs */
+#define TPACPI_ACPI_HKEY_HID		"IBM0068"
+
+/* Input IDs */
+#define TPACPI_HKEY_INPUT_PRODUCT	0x5054 /* "TP" */
+#define TPACPI_HKEY_INPUT_VERSION	0x4101
+
+/* ACPI \WGSV commands */
+enum {
+	TP_ACPI_WGSV_GET_STATE		= 0x01, /* Get state information */
+	TP_ACPI_WGSV_PWR_ON_ON_RESUME	= 0x02, /* Resume WWAN powered on */
+	TP_ACPI_WGSV_PWR_OFF_ON_RESUME	= 0x03,	/* Resume WWAN powered off */
+	TP_ACPI_WGSV_SAVE_STATE		= 0x04, /* Save state for S4/S5 */
+};
+
+/* TP_ACPI_WGSV_GET_STATE bits */
+enum {
+	TP_ACPI_WGSV_STATE_WWANEXIST	= 0x0001, /* WWAN hw available */
+	TP_ACPI_WGSV_STATE_WWANPWR	= 0x0002, /* WWAN radio enabled */
+	TP_ACPI_WGSV_STATE_WWANPWRRES	= 0x0004, /* WWAN state at resume */
+	TP_ACPI_WGSV_STATE_WWANBIOSOFF	= 0x0008, /* WWAN disabled in BIOS */
+	TP_ACPI_WGSV_STATE_BLTHEXIST	= 0x0001, /* BLTH hw available */
+	TP_ACPI_WGSV_STATE_BLTHPWR	= 0x0002, /* BLTH radio enabled */
+	TP_ACPI_WGSV_STATE_BLTHPWRRES	= 0x0004, /* BLTH state at resume */
+	TP_ACPI_WGSV_STATE_BLTHBIOSOFF	= 0x0008, /* BLTH disabled in BIOS */
+	TP_ACPI_WGSV_STATE_UWBEXIST	= 0x0010, /* UWB hw available */
+	TP_ACPI_WGSV_STATE_UWBPWR	= 0x0020, /* UWB radio enabled */
+};
+
+/****************************************************************************
+ * Main driver
+ */
+
+#define TPACPI_NAME "thinkpad"
+#define TPACPI_DESC "ThinkPad ACPI Extras"
+#define TPACPI_FILE TPACPI_NAME "_acpi"
+#define TPACPI_URL "http://ibm-acpi.sf.net/"
+#define TPACPI_MAIL "ibm-acpi-devel@lists.sourceforge.net"
+
+#define TPACPI_PROC_DIR "ibm"
+#define TPACPI_ACPI_EVENT_PREFIX "ibm"
+#define TPACPI_DRVR_NAME TPACPI_FILE
+#define TPACPI_DRVR_SHORTNAME "tpacpi"
+#define TPACPI_HWMON_DRVR_NAME TPACPI_NAME "_hwmon"
+
+#define TPACPI_NVRAM_KTHREAD_NAME "ktpacpi_nvramd"
+#define TPACPI_WORKQUEUE_NAME "ktpacpid"
+
+#define TPACPI_MAX_ACPI_ARGS 3
+
+/* rfkill switches */
+enum {
+	TPACPI_RFK_BLUETOOTH_SW_ID = 0,
+	TPACPI_RFK_WWAN_SW_ID,
+	TPACPI_RFK_UWB_SW_ID,
+};
+
+/* Debugging */
+#define TPACPI_LOG TPACPI_FILE ": "
+#define TPACPI_ALERT	KERN_ALERT  TPACPI_LOG
+#define TPACPI_CRIT	KERN_CRIT   TPACPI_LOG
+#define TPACPI_ERR	KERN_ERR    TPACPI_LOG
+#define TPACPI_NOTICE	KERN_NOTICE TPACPI_LOG
+#define TPACPI_INFO	KERN_INFO   TPACPI_LOG
+#define TPACPI_DEBUG	KERN_DEBUG  TPACPI_LOG
+
+#define TPACPI_DBG_ALL		0xffff
+#define TPACPI_DBG_INIT		0x0001
+#define TPACPI_DBG_EXIT		0x0002
+#define dbg_printk(a_dbg_level, format, arg...) \
+	do { if (dbg_level & a_dbg_level) \
+		printk(TPACPI_DEBUG "%s: " format, __func__ , ## arg); \
+	} while (0)
+#ifdef CONFIG_THINKPAD_ACPI_DEBUG
+#define vdbg_printk(a_dbg_level, format, arg...) \
+	dbg_printk(a_dbg_level, format, ## arg)
+static const char *str_supported(int is_supported);
+#else
+#define vdbg_printk(a_dbg_level, format, arg...)
+#endif
+
+#define onoff(status, bit) ((status) & (1 << (bit)) ? "on" : "off")
+#define enabled(status, bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
+#define strlencmp(a, b) (strncmp((a), (b), strlen(b)))
+
+
+/****************************************************************************
+ * Driver-wide structs and misc. variables
+ */
+
+struct ibm_struct;
+
+struct tp_acpi_drv_struct {
+	const struct acpi_device_id *hid;
+	struct acpi_driver *driver;
+
+	void (*notify) (struct ibm_struct *, u32);
+	acpi_handle *handle;
+	u32 type;
+	struct acpi_device *device;
+};
+
+struct ibm_struct {
+	char *name;
+
+	int (*read) (char *);
+	int (*write) (char *);
+	void (*exit) (void);
+	void (*resume) (void);
+	void (*suspend) (pm_message_t state);
+	void (*shutdown) (void);
+
+	struct list_head all_drivers;
+
+	struct tp_acpi_drv_struct *acpi;
+
+	struct {
+		u8 acpi_driver_registered:1;
+		u8 acpi_notify_installed:1;
+		u8 proc_created:1;
+		u8 init_called:1;
+		u8 experimental:1;
+	} flags;
+};
+
+struct ibm_init_struct {
+	char param[32];
+
+	int (*init) (struct ibm_init_struct *);
+	struct ibm_struct *data;
+};
+
+static struct {
+#ifdef CONFIG_THINKPAD_ACPI_BAY
+	u32 bay_status:1;
+	u32 bay_eject:1;
+	u32 bay_status2:1;
+	u32 bay_eject2:1;
+#endif
+	u32 bluetooth:1;
+	u32 hotkey:1;
+	u32 hotkey_mask:1;
+	u32 hotkey_wlsw:1;
+	u32 hotkey_tablet:1;
+	u32 light:1;
+	u32 light_status:1;
+	u32 bright_16levels:1;
+	u32 bright_acpimode:1;
+	u32 wan:1;
+	u32 uwb:1;
+	u32 fan_ctrl_status_undef:1;
+	u32 input_device_registered:1;
+	u32 platform_drv_registered:1;
+	u32 platform_drv_attrs_registered:1;
+	u32 sensors_pdrv_registered:1;
+	u32 sensors_pdrv_attrs_registered:1;
+	u32 sensors_pdev_attrs_registered:1;
+	u32 hotkey_poll_active:1;
+} tp_features;
+
+static struct {
+	u16 hotkey_mask_ff:1;
+	u16 bright_cmos_ec_unsync:1;
+} tp_warned;
+
+struct thinkpad_id_data {
+	unsigned int vendor;	/* ThinkPad vendor:
+				 * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */
+
+	char *bios_version_str;	/* Something like 1ZET51WW (1.03z) */
+	char *ec_version_str;	/* Something like 1ZHT51WW-1.04a */
+
+	u16 bios_model;		/* Big Endian, TP-1Y = 0x5931, 0 = unknown */
+	u16 ec_model;
+
+	char *model_str;	/* ThinkPad T43 */
+	char *nummodel_str;	/* 9384A9C for a 9384-A9C model */
+};
+static struct thinkpad_id_data thinkpad_id;
+
+static enum {
+	TPACPI_LIFE_INIT = 0,
+	TPACPI_LIFE_RUNNING,
+	TPACPI_LIFE_EXITING,
+} tpacpi_lifecycle;
+
+static int experimental;
+static u32 dbg_level;
+
+static struct workqueue_struct *tpacpi_wq;
+
+/* Special LED class that can defer work */
+struct tpacpi_led_classdev {
+	struct led_classdev led_classdev;
+	struct work_struct work;
+	enum led_brightness new_brightness;
+	unsigned int led;
+};
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+static int dbg_wlswemul;
+static int tpacpi_wlsw_emulstate;
+static int dbg_bluetoothemul;
+static int tpacpi_bluetooth_emulstate;
+static int dbg_wwanemul;
+static int tpacpi_wwan_emulstate;
+static int dbg_uwbemul;
+static int tpacpi_uwb_emulstate;
+#endif
+
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * ACPI Helpers and device model
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+/*************************************************************************
+ * ACPI basic handles
+ */
+
+static acpi_handle root_handle;
+
+#define TPACPI_HANDLE(object, parent, paths...)			\
+	static acpi_handle  object##_handle;			\
+	static acpi_handle *object##_parent = &parent##_handle;	\
+	static char        *object##_path;			\
+	static char        *object##_paths[] = { paths }
+
+TPACPI_HANDLE(ec, root, "\\_SB.PCI0.ISA.EC0",	/* 240, 240x */
+	   "\\_SB.PCI.ISA.EC",	/* 570 */
+	   "\\_SB.PCI0.ISA0.EC0",	/* 600e/x, 770e, 770x */
+	   "\\_SB.PCI0.ISA.EC",	/* A21e, A2xm/p, T20-22, X20-21 */
+	   "\\_SB.PCI0.AD4S.EC0",	/* i1400, R30 */
+	   "\\_SB.PCI0.ICH3.EC0",	/* R31 */
+	   "\\_SB.PCI0.LPC.EC",	/* all others */
+	   );
+
+TPACPI_HANDLE(ecrd, ec, "ECRD");	/* 570 */
+TPACPI_HANDLE(ecwr, ec, "ECWR");	/* 570 */
+
+TPACPI_HANDLE(cmos, root, "\\UCMS",	/* R50, R50e, R50p, R51, */
+					/* T4x, X31, X40 */
+	   "\\CMOS",		/* A3x, G4x, R32, T23, T30, X22-24, X30 */
+	   "\\CMS",		/* R40, R40e */
+	   );			/* all others */
+
+TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY",	/* 600e/x, 770e, 770x */
+	   "^HKEY",		/* R30, R31 */
+	   "HKEY",		/* all others */
+	   );			/* 570 */
+
+TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA",	/* 570 */
+	   "\\_SB.PCI0.AGP0.VID0",	/* 600e/x, 770x */
+	   "\\_SB.PCI0.VID0",	/* 770e */
+	   "\\_SB.PCI0.VID",	/* A21e, G4x, R50e, X30, X40 */
+	   "\\_SB.PCI0.AGP.VID",	/* all others */
+	   );				/* R30, R31 */
+
+
+/*************************************************************************
+ * ACPI helpers
+ */
+
+static int acpi_evalf(acpi_handle handle,
+		      void *res, char *method, char *fmt, ...)
+{
+	char *fmt0 = fmt;
+	struct acpi_object_list params;
+	union acpi_object in_objs[TPACPI_MAX_ACPI_ARGS];
+	struct acpi_buffer result, *resultp;
+	union acpi_object out_obj;
+	acpi_status status;
+	va_list ap;
+	char res_type;
+	int success;
+	int quiet;
+
+	if (!*fmt) {
+		printk(TPACPI_ERR "acpi_evalf() called with empty format\n");
+		return 0;
+	}
+
+	if (*fmt == 'q') {
+		quiet = 1;
+		fmt++;
+	} else
+		quiet = 0;
+
+	res_type = *(fmt++);
+
+	params.count = 0;
+	params.pointer = &in_objs[0];
+
+	va_start(ap, fmt);
+	while (*fmt) {
+		char c = *(fmt++);
+		switch (c) {
+		case 'd':	/* int */
+			in_objs[params.count].integer.value = va_arg(ap, int);
+			in_objs[params.count++].type = ACPI_TYPE_INTEGER;
+			break;
+			/* add more types as needed */
+		default:
+			printk(TPACPI_ERR "acpi_evalf() called "
+			       "with invalid format character '%c'\n", c);
+			return 0;
+		}
+	}
+	va_end(ap);
+
+	if (res_type != 'v') {
+		result.length = sizeof(out_obj);
+		result.pointer = &out_obj;
+		resultp = &result;
+	} else
+		resultp = NULL;
+
+	status = acpi_evaluate_object(handle, method, &params, resultp);
+
+	switch (res_type) {
+	case 'd':		/* int */
+		if (res)
+			*(int *)res = out_obj.integer.value;
+		success = status == AE_OK && out_obj.type == ACPI_TYPE_INTEGER;
+		break;
+	case 'v':		/* void */
+		success = status == AE_OK;
+		break;
+		/* add more types as needed */
+	default:
+		printk(TPACPI_ERR "acpi_evalf() called "
+		       "with invalid format character '%c'\n", res_type);
+		return 0;
+	}
+
+	if (!success && !quiet)
+		printk(TPACPI_ERR "acpi_evalf(%s, %s, ...) failed: %d\n",
+		       method, fmt0, status);
+
+	return success;
+}
+
+static int acpi_ec_read(int i, u8 *p)
+{
+	int v;
+
+	if (ecrd_handle) {
+		if (!acpi_evalf(ecrd_handle, &v, NULL, "dd", i))
+			return 0;
+		*p = v;
+	} else {
+		if (ec_read(i, p) < 0)
+			return 0;
+	}
+
+	return 1;
+}
+
+static int acpi_ec_write(int i, u8 v)
+{
+	if (ecwr_handle) {
+		if (!acpi_evalf(ecwr_handle, NULL, NULL, "vdd", i, v))
+			return 0;
+	} else {
+		if (ec_write(i, v) < 0)
+			return 0;
+	}
+
+	return 1;
+}
+
+#if defined(CONFIG_THINKPAD_ACPI_DOCK) || defined(CONFIG_THINKPAD_ACPI_BAY)
+static int _sta(acpi_handle handle)
+{
+	int status;
+
+	if (!handle || !acpi_evalf(handle, &status, "_STA", "d"))
+		status = 0;
+
+	return status;
+}
+#endif
+
+static int issue_thinkpad_cmos_command(int cmos_cmd)
+{
+	if (!cmos_handle)
+		return -ENXIO;
+
+	if (!acpi_evalf(cmos_handle, NULL, NULL, "vd", cmos_cmd))
+		return -EIO;
+
+	return 0;
+}
+
+/*************************************************************************
+ * ACPI device model
+ */
+
+#define TPACPI_ACPIHANDLE_INIT(object) \
+	drv_acpi_handle_init(#object, &object##_handle, *object##_parent, \
+		object##_paths, ARRAY_SIZE(object##_paths), &object##_path)
+
+static void drv_acpi_handle_init(char *name,
+			   acpi_handle *handle, acpi_handle parent,
+			   char **paths, int num_paths, char **path)
+{
+	int i;
+	acpi_status status;
+
+	vdbg_printk(TPACPI_DBG_INIT, "trying to locate ACPI handle for %s\n",
+		name);
+
+	for (i = 0; i < num_paths; i++) {
+		status = acpi_get_handle(parent, paths[i], handle);
+		if (ACPI_SUCCESS(status)) {
+			*path = paths[i];
+			dbg_printk(TPACPI_DBG_INIT,
+				   "Found ACPI handle %s for %s\n",
+				   *path, name);
+			return;
+		}
+	}
+
+	vdbg_printk(TPACPI_DBG_INIT, "ACPI handle for %s not found\n",
+		    name);
+	*handle = NULL;
+}
+
+static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+	struct ibm_struct *ibm = data;
+
+	if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
+		return;
+
+	if (!ibm || !ibm->acpi || !ibm->acpi->notify)
+		return;
+
+	ibm->acpi->notify(ibm, event);
+}
+
+static int __init setup_acpi_notify(struct ibm_struct *ibm)
+{
+	acpi_status status;
+	int rc;
+
+	BUG_ON(!ibm->acpi);
+
+	if (!*ibm->acpi->handle)
+		return 0;
+
+	vdbg_printk(TPACPI_DBG_INIT,
+		"setting up ACPI notify for %s\n", ibm->name);
+
+	rc = acpi_bus_get_device(*ibm->acpi->handle, &ibm->acpi->device);
+	if (rc < 0) {
+		printk(TPACPI_ERR "acpi_bus_get_device(%s) failed: %d\n",
+			ibm->name, rc);
+		return -ENODEV;
+	}
+
+	ibm->acpi->device->driver_data = ibm;
+	sprintf(acpi_device_class(ibm->acpi->device), "%s/%s",
+		TPACPI_ACPI_EVENT_PREFIX,
+		ibm->name);
+
+	status = acpi_install_notify_handler(*ibm->acpi->handle,
+			ibm->acpi->type, dispatch_acpi_notify, ibm);
+	if (ACPI_FAILURE(status)) {
+		if (status == AE_ALREADY_EXISTS) {
+			printk(TPACPI_NOTICE
+			       "another device driver is already "
+			       "handling %s events\n", ibm->name);
+		} else {
+			printk(TPACPI_ERR
+			       "acpi_install_notify_handler(%s) failed: %d\n",
+			       ibm->name, status);
+		}
+		return -ENODEV;
+	}
+	ibm->flags.acpi_notify_installed = 1;
+	return 0;
+}
+
+static int __init tpacpi_device_add(struct acpi_device *device)
+{
+	return 0;
+}
+
+static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
+{
+	int rc;
+
+	dbg_printk(TPACPI_DBG_INIT,
+		"registering %s as an ACPI driver\n", ibm->name);
+
+	BUG_ON(!ibm->acpi);
+
+	ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL);
+	if (!ibm->acpi->driver) {
+		printk(TPACPI_ERR
+		       "failed to allocate memory for ibm->acpi->driver\n");
+		return -ENOMEM;
+	}
+
+	sprintf(ibm->acpi->driver->name, "%s_%s", TPACPI_NAME, ibm->name);
+	ibm->acpi->driver->ids = ibm->acpi->hid;
+
+	ibm->acpi->driver->ops.add = &tpacpi_device_add;
+
+	rc = acpi_bus_register_driver(ibm->acpi->driver);
+	if (rc < 0) {
+		printk(TPACPI_ERR "acpi_bus_register_driver(%s) failed: %d\n",
+		       ibm->name, rc);
+		kfree(ibm->acpi->driver);
+		ibm->acpi->driver = NULL;
+	} else if (!rc)
+		ibm->flags.acpi_driver_registered = 1;
+
+	return rc;
+}
+
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * Procfs Helpers
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+static int dispatch_procfs_read(char *page, char **start, off_t off,
+			int count, int *eof, void *data)
+{
+	struct ibm_struct *ibm = data;
+	int len;
+
+	if (!ibm || !ibm->read)
+		return -EINVAL;
+
+	len = ibm->read(page);
+	if (len < 0)
+		return len;
+
+	if (len <= off + count)
+		*eof = 1;
+	*start = page + off;
+	len -= off;
+	if (len > count)
+		len = count;
+	if (len < 0)
+		len = 0;
+
+	return len;
+}
+
+static int dispatch_procfs_write(struct file *file,
+			const char __user *userbuf,
+			unsigned long count, void *data)
+{
+	struct ibm_struct *ibm = data;
+	char *kernbuf;
+	int ret;
+
+	if (!ibm || !ibm->write)
+		return -EINVAL;
+
+	kernbuf = kmalloc(count + 2, GFP_KERNEL);
+	if (!kernbuf)
+		return -ENOMEM;
+
+	if (copy_from_user(kernbuf, userbuf, count)) {
+		kfree(kernbuf);
+		return -EFAULT;
+	}
+
+	kernbuf[count] = 0;
+	strcat(kernbuf, ",");
+	ret = ibm->write(kernbuf);
+	if (ret == 0)
+		ret = count;
+
+	kfree(kernbuf);
+
+	return ret;
+}
+
+static char *next_cmd(char **cmds)
+{
+	char *start = *cmds;
+	char *end;
+
+	while ((end = strchr(start, ',')) && end == start)
+		start = end + 1;
+
+	if (!end)
+		return NULL;
+
+	*end = 0;
+	*cmds = end + 1;
+	return start;
+}
+
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * Device model: input, hwmon and platform
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+static struct platform_device *tpacpi_pdev;
+static struct platform_device *tpacpi_sensors_pdev;
+static struct device *tpacpi_hwmon;
+static struct input_dev *tpacpi_inputdev;
+static struct mutex tpacpi_inputdev_send_mutex;
+static LIST_HEAD(tpacpi_all_drivers);
+
+static int tpacpi_suspend_handler(struct platform_device *pdev,
+				  pm_message_t state)
+{
+	struct ibm_struct *ibm, *itmp;
+
+	list_for_each_entry_safe(ibm, itmp,
+				 &tpacpi_all_drivers,
+				 all_drivers) {
+		if (ibm->suspend)
+			(ibm->suspend)(state);
+	}
+
+	return 0;
+}
+
+static int tpacpi_resume_handler(struct platform_device *pdev)
+{
+	struct ibm_struct *ibm, *itmp;
+
+	list_for_each_entry_safe(ibm, itmp,
+				 &tpacpi_all_drivers,
+				 all_drivers) {
+		if (ibm->resume)
+			(ibm->resume)();
+	}
+
+	return 0;
+}
+
+static void tpacpi_shutdown_handler(struct platform_device *pdev)
+{
+	struct ibm_struct *ibm, *itmp;
+
+	list_for_each_entry_safe(ibm, itmp,
+				 &tpacpi_all_drivers,
+				 all_drivers) {
+		if (ibm->shutdown)
+			(ibm->shutdown)();
+	}
+}
+
+static struct platform_driver tpacpi_pdriver = {
+	.driver = {
+		.name = TPACPI_DRVR_NAME,
+		.owner = THIS_MODULE,
+	},
+	.suspend = tpacpi_suspend_handler,
+	.resume = tpacpi_resume_handler,
+	.shutdown = tpacpi_shutdown_handler,
+};
+
+static struct platform_driver tpacpi_hwmon_pdriver = {
+	.driver = {
+		.name = TPACPI_HWMON_DRVR_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+/*************************************************************************
+ * sysfs support helpers
+ */
+
+struct attribute_set {
+	unsigned int members, max_members;
+	struct attribute_group group;
+};
+
+struct attribute_set_obj {
+	struct attribute_set s;
+	struct attribute *a;
+} __attribute__((packed));
+
+static struct attribute_set *create_attr_set(unsigned int max_members,
+						const char *name)
+{
+	struct attribute_set_obj *sobj;
+
+	if (max_members == 0)
+		return NULL;
+
+	/* Allocates space for implicit NULL at the end too */
+	sobj = kzalloc(sizeof(struct attribute_set_obj) +
+		    max_members * sizeof(struct attribute *),
+		    GFP_KERNEL);
+	if (!sobj)
+		return NULL;
+	sobj->s.max_members = max_members;
+	sobj->s.group.attrs = &sobj->a;
+	sobj->s.group.name = name;
+
+	return &sobj->s;
+}
+
+#define destroy_attr_set(_set) \
+	kfree(_set);
+
+/* not multi-threaded safe, use it in a single thread per set */
+static int add_to_attr_set(struct attribute_set *s, struct attribute *attr)
+{
+	if (!s || !attr)
+		return -EINVAL;
+
+	if (s->members >= s->max_members)
+		return -ENOMEM;
+
+	s->group.attrs[s->members] = attr;
+	s->members++;
+
+	return 0;
+}
+
+static int add_many_to_attr_set(struct attribute_set *s,
+			struct attribute **attr,
+			unsigned int count)
+{
+	int i, res;
+
+	for (i = 0; i < count; i++) {
+		res = add_to_attr_set(s, attr[i]);
+		if (res)
+			return res;
+	}
+
+	return 0;
+}
+
+static void delete_attr_set(struct attribute_set *s, struct kobject *kobj)
+{
+	sysfs_remove_group(kobj, &s->group);
+	destroy_attr_set(s);
+}
+
+#define register_attr_set_with_sysfs(_attr_set, _kobj) \
+	sysfs_create_group(_kobj, &_attr_set->group)
+
+static int parse_strtoul(const char *buf,
+		unsigned long max, unsigned long *value)
+{
+	char *endp;
+
+	while (*buf && isspace(*buf))
+		buf++;
+	*value = simple_strtoul(buf, &endp, 0);
+	while (*endp && isspace(*endp))
+		endp++;
+	if (*endp || *value > max)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void tpacpi_disable_brightness_delay(void)
+{
+	if (acpi_evalf(hkey_handle, NULL, "PWMS", "qvd", 0))
+		printk(TPACPI_NOTICE
+			"ACPI backlight control delay disabled\n");
+}
+
+static int __init tpacpi_query_bcl_levels(acpi_handle handle)
+{
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *obj;
+	int rc;
+
+	if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
+		obj = (union acpi_object *)buffer.pointer;
+		if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
+			printk(TPACPI_ERR "Unknown _BCL data, "
+			       "please report this to %s\n", TPACPI_MAIL);
+			rc = 0;
+		} else {
+			rc = obj->package.count;
+		}
+	} else {
+		return 0;
+	}
+
+	kfree(buffer.pointer);
+	return rc;
+}
+
+static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle,
+					u32 lvl, void *context, void **rv)
+{
+	char name[ACPI_PATH_SEGMENT_LENGTH];
+	struct acpi_buffer buffer = { sizeof(name), &name };
+
+	if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
+	    !strncmp("_BCL", name, sizeof(name) - 1)) {
+		BUG_ON(!rv || !*rv);
+		**(int **)rv = tpacpi_query_bcl_levels(handle);
+		return AE_CTRL_TERMINATE;
+	} else {
+		return AE_OK;
+	}
+}
+
+/*
+ * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map
+ */
+static int __init tpacpi_check_std_acpi_brightness_support(void)
+{
+	int status;
+	int bcl_levels = 0;
+	void *bcl_ptr = &bcl_levels;
+
+	if (!vid_handle) {
+		TPACPI_ACPIHANDLE_INIT(vid);
+	}
+	if (!vid_handle)
+		return 0;
+
+	/*
+	 * Search for a _BCL method, and execute it.  This is safe on all
+	 * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista
+	 * BIOS in ACPI backlight control mode.  We do NOT have to care
+	 * about calling the _BCL method in an enabled video device, any
+	 * will do for our purposes.
+	 */
+
+	status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
+				     tpacpi_acpi_walk_find_bcl, NULL,
+				     &bcl_ptr);
+
+	if (ACPI_SUCCESS(status) && bcl_levels > 2) {
+		tp_features.bright_acpimode = 1;
+		return (bcl_levels - 2);
+	}
+
+	return 0;
+}
+
+static int __init tpacpi_new_rfkill(const unsigned int id,
+			struct rfkill **rfk,
+			const enum rfkill_type rfktype,
+			const char *name,
+			const bool set_default,
+			int (*toggle_radio)(void *, enum rfkill_state),
+			int (*get_state)(void *, enum rfkill_state *))
+{
+	int res;
+	enum rfkill_state initial_state = RFKILL_STATE_SOFT_BLOCKED;
+
+	res = get_state(NULL, &initial_state);
+	if (res < 0) {
+		printk(TPACPI_ERR
+			"failed to read initial state for %s, error %d; "
+			"will turn radio off\n", name, res);
+	} else if (set_default) {
+		/* try to set the initial state as the default for the rfkill
+		 * type, since we ask the firmware to preserve it across S5 in
+		 * NVRAM */
+		rfkill_set_default(rfktype,
+				(initial_state == RFKILL_STATE_UNBLOCKED) ?
+					RFKILL_STATE_UNBLOCKED :
+					RFKILL_STATE_SOFT_BLOCKED);
+	}
+
+	*rfk = rfkill_allocate(&tpacpi_pdev->dev, rfktype);
+	if (!*rfk) {
+		printk(TPACPI_ERR
+			"failed to allocate memory for rfkill class\n");
+		return -ENOMEM;
+	}
+
+	(*rfk)->name = name;
+	(*rfk)->get_state = get_state;
+	(*rfk)->toggle_radio = toggle_radio;
+	(*rfk)->state = initial_state;
+
+	res = rfkill_register(*rfk);
+	if (res < 0) {
+		printk(TPACPI_ERR
+			"failed to register %s rfkill switch: %d\n",
+			name, res);
+		rfkill_free(*rfk);
+		*rfk = NULL;
+		return res;
+	}
+
+	return 0;
+}
+
+/*************************************************************************
+ * thinkpad-acpi driver attributes
+ */
+
+/* interface_version --------------------------------------------------- */
+static ssize_t tpacpi_driver_interface_version_show(
+				struct device_driver *drv,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n", TPACPI_SYSFS_VERSION);
+}
+
+static DRIVER_ATTR(interface_version, S_IRUGO,
+		tpacpi_driver_interface_version_show, NULL);
+
+/* debug_level --------------------------------------------------------- */
+static ssize_t tpacpi_driver_debug_show(struct device_driver *drv,
+						char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%04x\n", dbg_level);
+}
+
+static ssize_t tpacpi_driver_debug_store(struct device_driver *drv,
+						const char *buf, size_t count)
+{
+	unsigned long t;
+
+	if (parse_strtoul(buf, 0xffff, &t))
+		return -EINVAL;
+
+	dbg_level = t;
+
+	return count;
+}
+
+static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
+		tpacpi_driver_debug_show, tpacpi_driver_debug_store);
+
+/* version ------------------------------------------------------------- */
+static ssize_t tpacpi_driver_version_show(struct device_driver *drv,
+						char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s v%s\n",
+			TPACPI_DESC, TPACPI_VERSION);
+}
+
+static DRIVER_ATTR(version, S_IRUGO,
+		tpacpi_driver_version_show, NULL);
+
+/* --------------------------------------------------------------------- */
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+
+static void tpacpi_send_radiosw_update(void);
+
+/* wlsw_emulstate ------------------------------------------------------ */
+static ssize_t tpacpi_driver_wlsw_emulstate_show(struct device_driver *drv,
+						char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_wlsw_emulstate);
+}
+
+static ssize_t tpacpi_driver_wlsw_emulstate_store(struct device_driver *drv,
+						const char *buf, size_t count)
+{
+	unsigned long t;
+
+	if (parse_strtoul(buf, 1, &t))
+		return -EINVAL;
+
+	if (tpacpi_wlsw_emulstate != t) {
+		tpacpi_wlsw_emulstate = !!t;
+		tpacpi_send_radiosw_update();
+	} else
+		tpacpi_wlsw_emulstate = !!t;
+
+	return count;
+}
+
+static DRIVER_ATTR(wlsw_emulstate, S_IWUSR | S_IRUGO,
+		tpacpi_driver_wlsw_emulstate_show,
+		tpacpi_driver_wlsw_emulstate_store);
+
+/* bluetooth_emulstate ------------------------------------------------- */
+static ssize_t tpacpi_driver_bluetooth_emulstate_show(
+					struct device_driver *drv,
+					char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_bluetooth_emulstate);
+}
+
+static ssize_t tpacpi_driver_bluetooth_emulstate_store(
+					struct device_driver *drv,
+					const char *buf, size_t count)
+{
+	unsigned long t;
+
+	if (parse_strtoul(buf, 1, &t))
+		return -EINVAL;
+
+	tpacpi_bluetooth_emulstate = !!t;
+
+	return count;
+}
+
+static DRIVER_ATTR(bluetooth_emulstate, S_IWUSR | S_IRUGO,
+		tpacpi_driver_bluetooth_emulstate_show,
+		tpacpi_driver_bluetooth_emulstate_store);
+
+/* wwan_emulstate ------------------------------------------------- */
+static ssize_t tpacpi_driver_wwan_emulstate_show(
+					struct device_driver *drv,
+					char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_wwan_emulstate);
+}
+
+static ssize_t tpacpi_driver_wwan_emulstate_store(
+					struct device_driver *drv,
+					const char *buf, size_t count)
+{
+	unsigned long t;
+
+	if (parse_strtoul(buf, 1, &t))
+		return -EINVAL;
+
+	tpacpi_wwan_emulstate = !!t;
+
+	return count;
+}
+
+static DRIVER_ATTR(wwan_emulstate, S_IWUSR | S_IRUGO,
+		tpacpi_driver_wwan_emulstate_show,
+		tpacpi_driver_wwan_emulstate_store);
+
+/* uwb_emulstate ------------------------------------------------- */
+static ssize_t tpacpi_driver_uwb_emulstate_show(
+					struct device_driver *drv,
+					char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_uwb_emulstate);
+}
+
+static ssize_t tpacpi_driver_uwb_emulstate_store(
+					struct device_driver *drv,
+					const char *buf, size_t count)
+{
+	unsigned long t;
+
+	if (parse_strtoul(buf, 1, &t))
+		return -EINVAL;
+
+	tpacpi_uwb_emulstate = !!t;
+
+	return count;
+}
+
+static DRIVER_ATTR(uwb_emulstate, S_IWUSR | S_IRUGO,
+		tpacpi_driver_uwb_emulstate_show,
+		tpacpi_driver_uwb_emulstate_store);
+#endif
+
+/* --------------------------------------------------------------------- */
+
+static struct driver_attribute *tpacpi_driver_attributes[] = {
+	&driver_attr_debug_level, &driver_attr_version,
+	&driver_attr_interface_version,
+};
+
+static int __init tpacpi_create_driver_attributes(struct device_driver *drv)
+{
+	int i, res;
+
+	i = 0;
+	res = 0;
+	while (!res && i < ARRAY_SIZE(tpacpi_driver_attributes)) {
+		res = driver_create_file(drv, tpacpi_driver_attributes[i]);
+		i++;
+	}
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (!res && dbg_wlswemul)
+		res = driver_create_file(drv, &driver_attr_wlsw_emulstate);
+	if (!res && dbg_bluetoothemul)
+		res = driver_create_file(drv, &driver_attr_bluetooth_emulstate);
+	if (!res && dbg_wwanemul)
+		res = driver_create_file(drv, &driver_attr_wwan_emulstate);
+	if (!res && dbg_uwbemul)
+		res = driver_create_file(drv, &driver_attr_uwb_emulstate);
+#endif
+
+	return res;
+}
+
+static void tpacpi_remove_driver_attributes(struct device_driver *drv)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tpacpi_driver_attributes); i++)
+		driver_remove_file(drv, tpacpi_driver_attributes[i]);
+
+#ifdef THINKPAD_ACPI_DEBUGFACILITIES
+	driver_remove_file(drv, &driver_attr_wlsw_emulstate);
+	driver_remove_file(drv, &driver_attr_bluetooth_emulstate);
+	driver_remove_file(drv, &driver_attr_wwan_emulstate);
+	driver_remove_file(drv, &driver_attr_uwb_emulstate);
+#endif
+}
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * Subdrivers
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+/*************************************************************************
+ * thinkpad-acpi init subdriver
+ */
+
+static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
+{
+	printk(TPACPI_INFO "%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
+	printk(TPACPI_INFO "%s\n", TPACPI_URL);
+
+	printk(TPACPI_INFO "ThinkPad BIOS %s, EC %s\n",
+		(thinkpad_id.bios_version_str) ?
+			thinkpad_id.bios_version_str : "unknown",
+		(thinkpad_id.ec_version_str) ?
+			thinkpad_id.ec_version_str : "unknown");
+
+	if (thinkpad_id.vendor && thinkpad_id.model_str)
+		printk(TPACPI_INFO "%s %s, model %s\n",
+			(thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
+				"IBM" : ((thinkpad_id.vendor ==
+						PCI_VENDOR_ID_LENOVO) ?
+					"Lenovo" : "Unknown vendor"),
+			thinkpad_id.model_str,
+			(thinkpad_id.nummodel_str) ?
+				thinkpad_id.nummodel_str : "unknown");
+
+	return 0;
+}
+
+static int thinkpad_acpi_driver_read(char *p)
+{
+	int len = 0;
+
+	len += sprintf(p + len, "driver:\t\t%s\n", TPACPI_DESC);
+	len += sprintf(p + len, "version:\t%s\n", TPACPI_VERSION);
+
+	return len;
+}
+
+static struct ibm_struct thinkpad_acpi_driver_data = {
+	.name = "driver",
+	.read = thinkpad_acpi_driver_read,
+};
+
+/*************************************************************************
+ * Hotkey subdriver
+ */
+
+enum {	/* hot key scan codes (derived from ACPI DSDT) */
+	TP_ACPI_HOTKEYSCAN_FNF1		= 0,
+	TP_ACPI_HOTKEYSCAN_FNF2,
+	TP_ACPI_HOTKEYSCAN_FNF3,
+	TP_ACPI_HOTKEYSCAN_FNF4,
+	TP_ACPI_HOTKEYSCAN_FNF5,
+	TP_ACPI_HOTKEYSCAN_FNF6,
+	TP_ACPI_HOTKEYSCAN_FNF7,
+	TP_ACPI_HOTKEYSCAN_FNF8,
+	TP_ACPI_HOTKEYSCAN_FNF9,
+	TP_ACPI_HOTKEYSCAN_FNF10,
+	TP_ACPI_HOTKEYSCAN_FNF11,
+	TP_ACPI_HOTKEYSCAN_FNF12,
+	TP_ACPI_HOTKEYSCAN_FNBACKSPACE,
+	TP_ACPI_HOTKEYSCAN_FNINSERT,
+	TP_ACPI_HOTKEYSCAN_FNDELETE,
+	TP_ACPI_HOTKEYSCAN_FNHOME,
+	TP_ACPI_HOTKEYSCAN_FNEND,
+	TP_ACPI_HOTKEYSCAN_FNPAGEUP,
+	TP_ACPI_HOTKEYSCAN_FNPAGEDOWN,
+	TP_ACPI_HOTKEYSCAN_FNSPACE,
+	TP_ACPI_HOTKEYSCAN_VOLUMEUP,
+	TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
+	TP_ACPI_HOTKEYSCAN_MUTE,
+	TP_ACPI_HOTKEYSCAN_THINKPAD,
+};
+
+enum {	/* Keys available through NVRAM polling */
+	TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U,
+	TPACPI_HKEY_NVRAM_GOOD_MASK  = 0x00fb8000U,
+};
+
+enum {	/* Positions of some of the keys in hotkey masks */
+	TP_ACPI_HKEY_DISPSWTCH_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNF7,
+	TP_ACPI_HKEY_DISPXPAND_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNF8,
+	TP_ACPI_HKEY_HIBERNATE_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNF12,
+	TP_ACPI_HKEY_BRGHTUP_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNHOME,
+	TP_ACPI_HKEY_BRGHTDWN_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNEND,
+	TP_ACPI_HKEY_THNKLGHT_MASK	= 1 << TP_ACPI_HOTKEYSCAN_FNPAGEUP,
+	TP_ACPI_HKEY_ZOOM_MASK		= 1 << TP_ACPI_HOTKEYSCAN_FNSPACE,
+	TP_ACPI_HKEY_VOLUP_MASK		= 1 << TP_ACPI_HOTKEYSCAN_VOLUMEUP,
+	TP_ACPI_HKEY_VOLDWN_MASK	= 1 << TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
+	TP_ACPI_HKEY_MUTE_MASK		= 1 << TP_ACPI_HOTKEYSCAN_MUTE,
+	TP_ACPI_HKEY_THINKPAD_MASK	= 1 << TP_ACPI_HOTKEYSCAN_THINKPAD,
+};
+
+enum {	/* NVRAM to ACPI HKEY group map */
+	TP_NVRAM_HKEY_GROUP_HK2		= TP_ACPI_HKEY_THINKPAD_MASK |
+					  TP_ACPI_HKEY_ZOOM_MASK |
+					  TP_ACPI_HKEY_DISPSWTCH_MASK |
+					  TP_ACPI_HKEY_HIBERNATE_MASK,
+	TP_NVRAM_HKEY_GROUP_BRIGHTNESS	= TP_ACPI_HKEY_BRGHTUP_MASK |
+					  TP_ACPI_HKEY_BRGHTDWN_MASK,
+	TP_NVRAM_HKEY_GROUP_VOLUME	= TP_ACPI_HKEY_VOLUP_MASK |
+					  TP_ACPI_HKEY_VOLDWN_MASK |
+					  TP_ACPI_HKEY_MUTE_MASK,
+};
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+struct tp_nvram_state {
+       u16 thinkpad_toggle:1;
+       u16 zoom_toggle:1;
+       u16 display_toggle:1;
+       u16 thinklight_toggle:1;
+       u16 hibernate_toggle:1;
+       u16 displayexp_toggle:1;
+       u16 display_state:1;
+       u16 brightness_toggle:1;
+       u16 volume_toggle:1;
+       u16 mute:1;
+
+       u8 brightness_level;
+       u8 volume_level;
+};
+
+static struct task_struct *tpacpi_hotkey_task;
+static u32 hotkey_source_mask;		/* bit mask 0=ACPI,1=NVRAM */
+static int hotkey_poll_freq = 10;	/* Hz */
+static struct mutex hotkey_thread_mutex;
+static struct mutex hotkey_thread_data_mutex;
+static unsigned int hotkey_config_change;
+
+#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+#define hotkey_source_mask 0U
+
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+static struct mutex hotkey_mutex;
+
+static enum {	/* Reasons for waking up */
+	TP_ACPI_WAKEUP_NONE = 0,	/* None or unknown */
+	TP_ACPI_WAKEUP_BAYEJ,		/* Bay ejection request */
+	TP_ACPI_WAKEUP_UNDOCK,		/* Undock request */
+} hotkey_wakeup_reason;
+
+static int hotkey_autosleep_ack;
+
+static int hotkey_orig_status;
+static u32 hotkey_orig_mask;
+static u32 hotkey_all_mask;
+static u32 hotkey_reserved_mask;
+static u32 hotkey_mask;
+
+static unsigned int hotkey_report_mode;
+
+static u16 *hotkey_keycode_map;
+
+static struct attribute_set *hotkey_dev_attributes;
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+#define HOTKEY_CONFIG_CRITICAL_START \
+	do { \
+		mutex_lock(&hotkey_thread_data_mutex); \
+		hotkey_config_change++; \
+	} while (0);
+#define HOTKEY_CONFIG_CRITICAL_END \
+	mutex_unlock(&hotkey_thread_data_mutex);
+#else
+#define HOTKEY_CONFIG_CRITICAL_START
+#define HOTKEY_CONFIG_CRITICAL_END
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+/* HKEY.MHKG() return bits */
+#define TP_HOTKEY_TABLET_MASK (1 << 3)
+
+static int hotkey_get_wlsw(int *status)
+{
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (dbg_wlswemul) {
+		*status = !!tpacpi_wlsw_emulstate;
+		return 0;
+	}
+#endif
+	if (!acpi_evalf(hkey_handle, status, "WLSW", "d"))
+		return -EIO;
+	return 0;
+}
+
+static int hotkey_get_tablet_mode(int *status)
+{
+	int s;
+
+	if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
+		return -EIO;
+
+	*status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
+	return 0;
+}
+
+/*
+ * Call with hotkey_mutex held
+ */
+static int hotkey_mask_get(void)
+{
+	u32 m = 0;
+
+	if (tp_features.hotkey_mask) {
+		if (!acpi_evalf(hkey_handle, &m, "DHKN", "d"))
+			return -EIO;
+	}
+	hotkey_mask = m | (hotkey_source_mask & hotkey_mask);
+
+	return 0;
+}
+
+/*
+ * Call with hotkey_mutex held
+ */
+static int hotkey_mask_set(u32 mask)
+{
+	int i;
+	int rc = 0;
+
+	if (tp_features.hotkey_mask) {
+		if (!tp_warned.hotkey_mask_ff &&
+		    (mask == 0xffff || mask == 0xffffff ||
+		     mask == 0xffffffff)) {
+			tp_warned.hotkey_mask_ff = 1;
+			printk(TPACPI_NOTICE
+			       "setting the hotkey mask to 0x%08x is likely "
+			       "not the best way to go about it\n", mask);
+			printk(TPACPI_NOTICE
+			       "please consider using the driver defaults, "
+			       "and refer to up-to-date thinkpad-acpi "
+			       "documentation\n");
+		}
+
+		HOTKEY_CONFIG_CRITICAL_START
+		for (i = 0; i < 32; i++) {
+			u32 m = 1 << i;
+			/* enable in firmware mask only keys not in NVRAM
+			 * mode, but enable the key in the cached hotkey_mask
+			 * regardless of mode, or the key will end up
+			 * disabled by hotkey_mask_get() */
+			if (!acpi_evalf(hkey_handle,
+					NULL, "MHKM", "vdd", i + 1,
+					!!((mask & ~hotkey_source_mask) & m))) {
+				rc = -EIO;
+				break;
+			} else {
+				hotkey_mask = (hotkey_mask & ~m) | (mask & m);
+			}
+		}
+		HOTKEY_CONFIG_CRITICAL_END
+
+		/* hotkey_mask_get must be called unconditionally below */
+		if (!hotkey_mask_get() && !rc &&
+		    (hotkey_mask & ~hotkey_source_mask) !=
+		     (mask & ~hotkey_source_mask)) {
+			printk(TPACPI_NOTICE
+			       "requested hot key mask 0x%08x, but "
+			       "firmware forced it to 0x%08x\n",
+			       mask, hotkey_mask);
+		}
+	} else {
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+		HOTKEY_CONFIG_CRITICAL_START
+		hotkey_mask = mask & hotkey_source_mask;
+		HOTKEY_CONFIG_CRITICAL_END
+		hotkey_mask_get();
+		if (hotkey_mask != mask) {
+			printk(TPACPI_NOTICE
+			       "requested hot key mask 0x%08x, "
+			       "forced to 0x%08x (NVRAM poll mask is "
+			       "0x%08x): no firmware mask support\n",
+			       mask, hotkey_mask, hotkey_source_mask);
+		}
+#else
+		hotkey_mask_get();
+		rc = -ENXIO;
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+	}
+
+	return rc;
+}
+
+static int hotkey_status_get(int *status)
+{
+	if (!acpi_evalf(hkey_handle, status, "DHKC", "d"))
+		return -EIO;
+
+	return 0;
+}
+
+static int hotkey_status_set(int status)
+{
+	if (!acpi_evalf(hkey_handle, NULL, "MHKC", "vd", status))
+		return -EIO;
+
+	return 0;
+}
+
+static void tpacpi_input_send_tabletsw(void)
+{
+	int state;
+
+	if (tp_features.hotkey_tablet &&
+	    !hotkey_get_tablet_mode(&state)) {
+		mutex_lock(&tpacpi_inputdev_send_mutex);
+
+		input_report_switch(tpacpi_inputdev,
+				    SW_TABLET_MODE, !!state);
+		input_sync(tpacpi_inputdev);
+
+		mutex_unlock(&tpacpi_inputdev_send_mutex);
+	}
+}
+
+static void tpacpi_input_send_key(unsigned int scancode)
+{
+	unsigned int keycode;
+
+	keycode = hotkey_keycode_map[scancode];
+
+	if (keycode != KEY_RESERVED) {
+		mutex_lock(&tpacpi_inputdev_send_mutex);
+
+		input_report_key(tpacpi_inputdev, keycode, 1);
+		if (keycode == KEY_UNKNOWN)
+			input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
+				    scancode);
+		input_sync(tpacpi_inputdev);
+
+		input_report_key(tpacpi_inputdev, keycode, 0);
+		if (keycode == KEY_UNKNOWN)
+			input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
+				    scancode);
+		input_sync(tpacpi_inputdev);
+
+		mutex_unlock(&tpacpi_inputdev_send_mutex);
+	}
+}
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
+
+static void tpacpi_hotkey_send_key(unsigned int scancode)
+{
+	tpacpi_input_send_key(scancode);
+	if (hotkey_report_mode < 2) {
+		acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device,
+						0x80, 0x1001 + scancode);
+	}
+}
+
+static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m)
+{
+	u8 d;
+
+	if (m & TP_NVRAM_HKEY_GROUP_HK2) {
+		d = nvram_read_byte(TP_NVRAM_ADDR_HK2);
+		n->thinkpad_toggle = !!(d & TP_NVRAM_MASK_HKT_THINKPAD);
+		n->zoom_toggle = !!(d & TP_NVRAM_MASK_HKT_ZOOM);
+		n->display_toggle = !!(d & TP_NVRAM_MASK_HKT_DISPLAY);
+		n->hibernate_toggle = !!(d & TP_NVRAM_MASK_HKT_HIBERNATE);
+	}
+	if (m & TP_ACPI_HKEY_THNKLGHT_MASK) {
+		d = nvram_read_byte(TP_NVRAM_ADDR_THINKLIGHT);
+		n->thinklight_toggle = !!(d & TP_NVRAM_MASK_THINKLIGHT);
+	}
+	if (m & TP_ACPI_HKEY_DISPXPAND_MASK) {
+		d = nvram_read_byte(TP_NVRAM_ADDR_VIDEO);
+		n->displayexp_toggle =
+				!!(d & TP_NVRAM_MASK_HKT_DISPEXPND);
+	}
+	if (m & TP_NVRAM_HKEY_GROUP_BRIGHTNESS) {
+		d = nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS);
+		n->brightness_level = (d & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
+				>> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
+		n->brightness_toggle =
+				!!(d & TP_NVRAM_MASK_HKT_BRIGHTNESS);
+	}
+	if (m & TP_NVRAM_HKEY_GROUP_VOLUME) {
+		d = nvram_read_byte(TP_NVRAM_ADDR_MIXER);
+		n->volume_level = (d & TP_NVRAM_MASK_LEVEL_VOLUME)
+				>> TP_NVRAM_POS_LEVEL_VOLUME;
+		n->mute = !!(d & TP_NVRAM_MASK_MUTE);
+		n->volume_toggle = !!(d & TP_NVRAM_MASK_HKT_VOLUME);
+	}
+}
+
+#define TPACPI_COMPARE_KEY(__scancode, __member) \
+	do { \
+		if ((mask & (1 << __scancode)) && \
+		    oldn->__member != newn->__member) \
+		tpacpi_hotkey_send_key(__scancode); \
+	} while (0)
+
+#define TPACPI_MAY_SEND_KEY(__scancode) \
+	do { if (mask & (1 << __scancode)) \
+		tpacpi_hotkey_send_key(__scancode); } while (0)
+
+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+					   struct tp_nvram_state *newn,
+					   u32 mask)
+{
+	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
+	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
+	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
+	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF12, hibernate_toggle);
+
+	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNPAGEUP, thinklight_toggle);
+
+	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF8, displayexp_toggle);
+
+	/* handle volume */
+	if (oldn->volume_toggle != newn->volume_toggle) {
+		if (oldn->mute != newn->mute) {
+			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
+		}
+		if (oldn->volume_level > newn->volume_level) {
+			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+		} else if (oldn->volume_level < newn->volume_level) {
+			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+		} else if (oldn->mute == newn->mute) {
+			/* repeated key presses that didn't change state */
+			if (newn->mute) {
+				TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
+			} else if (newn->volume_level != 0) {
+				TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+			} else {
+				TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+			}
+		}
+	}
+
+	/* handle brightness */
+	if (oldn->brightness_toggle != newn->brightness_toggle) {
+		if (oldn->brightness_level < newn->brightness_level) {
+			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+		} else if (oldn->brightness_level > newn->brightness_level) {
+			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+		} else {
+			/* repeated key presses that didn't change state */
+			if (newn->brightness_level != 0) {
+				TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+			} else {
+				TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+			}
+		}
+	}
+}
+
+#undef TPACPI_COMPARE_KEY
+#undef TPACPI_MAY_SEND_KEY
+
+static int hotkey_kthread(void *data)
+{
+	struct tp_nvram_state s[2];
+	u32 mask;
+	unsigned int si, so;
+	unsigned long t;
+	unsigned int change_detector, must_reset;
+
+	mutex_lock(&hotkey_thread_mutex);
+
+	if (tpacpi_lifecycle == TPACPI_LIFE_EXITING)
+		goto exit;
+
+	set_freezable();
+
+	so = 0;
+	si = 1;
+	t = 0;
+
+	/* Initial state for compares */
+	mutex_lock(&hotkey_thread_data_mutex);
+	change_detector = hotkey_config_change;
+	mask = hotkey_source_mask & hotkey_mask;
+	mutex_unlock(&hotkey_thread_data_mutex);
+	hotkey_read_nvram(&s[so], mask);
+
+	while (!kthread_should_stop() && hotkey_poll_freq) {
+		if (t == 0)
+			t = 1000/hotkey_poll_freq;
+		t = msleep_interruptible(t);
+		if (unlikely(kthread_should_stop()))
+			break;
+		must_reset = try_to_freeze();
+		if (t > 0 && !must_reset)
+			continue;
+
+		mutex_lock(&hotkey_thread_data_mutex);
+		if (must_reset || hotkey_config_change != change_detector) {
+			/* forget old state on thaw or config change */
+			si = so;
+			t = 0;
+			change_detector = hotkey_config_change;
+		}
+		mask = hotkey_source_mask & hotkey_mask;
+		mutex_unlock(&hotkey_thread_data_mutex);
+
+		if (likely(mask)) {
+			hotkey_read_nvram(&s[si], mask);
+			if (likely(si != so)) {
+				hotkey_compare_and_issue_event(&s[so], &s[si],
+								mask);
+			}
+		}
+
+		so = si;
+		si ^= 1;
+	}
+
+exit:
+	mutex_unlock(&hotkey_thread_mutex);
+	return 0;
+}
+
+static void hotkey_poll_stop_sync(void)
+{
+	if (tpacpi_hotkey_task) {
+		if (frozen(tpacpi_hotkey_task) ||
+		    freezing(tpacpi_hotkey_task))
+			thaw_process(tpacpi_hotkey_task);
+
+		kthread_stop(tpacpi_hotkey_task);
+		tpacpi_hotkey_task = NULL;
+		mutex_lock(&hotkey_thread_mutex);
+		/* at this point, the thread did exit */
+		mutex_unlock(&hotkey_thread_mutex);
+	}
+}
+
+/* call with hotkey_mutex held */
+static void hotkey_poll_setup(int may_warn)
+{
+	if ((hotkey_source_mask & hotkey_mask) != 0 &&
+	    hotkey_poll_freq > 0 &&
+	    (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) {
+		if (!tpacpi_hotkey_task) {
+			tpacpi_hotkey_task = kthread_run(hotkey_kthread,
+					NULL, TPACPI_NVRAM_KTHREAD_NAME);
+			if (IS_ERR(tpacpi_hotkey_task)) {
+				tpacpi_hotkey_task = NULL;
+				printk(TPACPI_ERR
+				       "could not create kernel thread "
+				       "for hotkey polling\n");
+			}
+		}
+	} else {
+		hotkey_poll_stop_sync();
+		if (may_warn &&
+		    hotkey_source_mask != 0 && hotkey_poll_freq == 0) {
+			printk(TPACPI_NOTICE
+				"hot keys 0x%08x require polling, "
+				"which is currently disabled\n",
+				hotkey_source_mask);
+		}
+	}
+}
+
+static void hotkey_poll_setup_safe(int may_warn)
+{
+	mutex_lock(&hotkey_mutex);
+	hotkey_poll_setup(may_warn);
+	mutex_unlock(&hotkey_mutex);
+}
+
+#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+static void hotkey_poll_setup_safe(int __unused)
+{
+}
+
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+static int hotkey_inputdev_open(struct input_dev *dev)
+{
+	switch (tpacpi_lifecycle) {
+	case TPACPI_LIFE_INIT:
+		/*
+		 * hotkey_init will call hotkey_poll_setup_safe
+		 * at the appropriate moment
+		 */
+		return 0;
+	case TPACPI_LIFE_EXITING:
+		return -EBUSY;
+	case TPACPI_LIFE_RUNNING:
+		hotkey_poll_setup_safe(0);
+		return 0;
+	}
+
+	/* Should only happen if tpacpi_lifecycle is corrupt */
+	BUG();
+	return -EBUSY;
+}
+
+static void hotkey_inputdev_close(struct input_dev *dev)
+{
+	/* disable hotkey polling when possible */
+	if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING)
+		hotkey_poll_setup_safe(0);
+}
+
+/* sysfs hotkey enable ------------------------------------------------- */
+static ssize_t hotkey_enable_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	int res, status;
+
+	res = hotkey_status_get(&status);
+	if (res)
+		return res;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", status);
+}
+
+static ssize_t hotkey_enable_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	unsigned long t;
+	int res;
+
+	if (parse_strtoul(buf, 1, &t))
+		return -EINVAL;
+
+	res = hotkey_status_set(t);
+
+	return (res) ? res : count;
+}
+
+static struct device_attribute dev_attr_hotkey_enable =
+	__ATTR(hotkey_enable, S_IWUSR | S_IRUGO,
+		hotkey_enable_show, hotkey_enable_store);
+
+/* sysfs hotkey mask --------------------------------------------------- */
+static ssize_t hotkey_mask_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	int res;
+
+	if (mutex_lock_killable(&hotkey_mutex))
+		return -ERESTARTSYS;
+	res = hotkey_mask_get();
+	mutex_unlock(&hotkey_mutex);
+
+	return (res)?
+		res : snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_mask);
+}
+
+static ssize_t hotkey_mask_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	unsigned long t;
+	int res;
+
+	if (parse_strtoul(buf, 0xffffffffUL, &t))
+		return -EINVAL;
+
+	if (mutex_lock_killable(&hotkey_mutex))
+		return -ERESTARTSYS;
+
+	res = hotkey_mask_set(t);
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+	hotkey_poll_setup(1);
+#endif
+
+	mutex_unlock(&hotkey_mutex);
+
+	return (res) ? res : count;
+}
+
+static struct device_attribute dev_attr_hotkey_mask =
+	__ATTR(hotkey_mask, S_IWUSR | S_IRUGO,
+		hotkey_mask_show, hotkey_mask_store);
+
+/* sysfs hotkey bios_enabled ------------------------------------------- */
+static ssize_t hotkey_bios_enabled_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_orig_status);
+}
+
+static struct device_attribute dev_attr_hotkey_bios_enabled =
+	__ATTR(hotkey_bios_enabled, S_IRUGO, hotkey_bios_enabled_show, NULL);
+
+/* sysfs hotkey bios_mask ---------------------------------------------- */
+static ssize_t hotkey_bios_mask_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_orig_mask);
+}
+
+static struct device_attribute dev_attr_hotkey_bios_mask =
+	__ATTR(hotkey_bios_mask, S_IRUGO, hotkey_bios_mask_show, NULL);
+
+/* sysfs hotkey all_mask ----------------------------------------------- */
+static ssize_t hotkey_all_mask_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+				hotkey_all_mask | hotkey_source_mask);
+}
+
+static struct device_attribute dev_attr_hotkey_all_mask =
+	__ATTR(hotkey_all_mask, S_IRUGO, hotkey_all_mask_show, NULL);
+
+/* sysfs hotkey recommended_mask --------------------------------------- */
+static ssize_t hotkey_recommended_mask_show(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+			(hotkey_all_mask | hotkey_source_mask)
+			& ~hotkey_reserved_mask);
+}
+
+static struct device_attribute dev_attr_hotkey_recommended_mask =
+	__ATTR(hotkey_recommended_mask, S_IRUGO,
+		hotkey_recommended_mask_show, NULL);
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+
+/* sysfs hotkey hotkey_source_mask ------------------------------------- */
+static ssize_t hotkey_source_mask_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_source_mask);
+}
+
+static ssize_t hotkey_source_mask_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	unsigned long t;
+
+	if (parse_strtoul(buf, 0xffffffffUL, &t) ||
+		((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
+		return -EINVAL;
+
+	if (mutex_lock_killable(&hotkey_mutex))
+		return -ERESTARTSYS;
+
+	HOTKEY_CONFIG_CRITICAL_START
+	hotkey_source_mask = t;
+	HOTKEY_CONFIG_CRITICAL_END
+
+	hotkey_poll_setup(1);
+
+	mutex_unlock(&hotkey_mutex);
+
+	return count;
+}
+
+static struct device_attribute dev_attr_hotkey_source_mask =
+	__ATTR(hotkey_source_mask, S_IWUSR | S_IRUGO,
+		hotkey_source_mask_show, hotkey_source_mask_store);
+
+/* sysfs hotkey hotkey_poll_freq --------------------------------------- */
+static ssize_t hotkey_poll_freq_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_poll_freq);
+}
+
+static ssize_t hotkey_poll_freq_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	unsigned long t;
+
+	if (parse_strtoul(buf, 25, &t))
+		return -EINVAL;
+
+	if (mutex_lock_killable(&hotkey_mutex))
+		return -ERESTARTSYS;
+
+	hotkey_poll_freq = t;
+
+	hotkey_poll_setup(1);
+	mutex_unlock(&hotkey_mutex);
+
+	return count;
+}
+
+static struct device_attribute dev_attr_hotkey_poll_freq =
+	__ATTR(hotkey_poll_freq, S_IWUSR | S_IRUGO,
+		hotkey_poll_freq_show, hotkey_poll_freq_store);
+
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+/* sysfs hotkey radio_sw (pollable) ------------------------------------ */
+static ssize_t hotkey_radio_sw_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	int res, s;
+	res = hotkey_get_wlsw(&s);
+	if (res < 0)
+		return res;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", !!s);
+}
+
+static struct device_attribute dev_attr_hotkey_radio_sw =
+	__ATTR(hotkey_radio_sw, S_IRUGO, hotkey_radio_sw_show, NULL);
+
+static void hotkey_radio_sw_notify_change(void)
+{
+	if (tp_features.hotkey_wlsw)
+		sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+			     "hotkey_radio_sw");
+}
+
+/* sysfs hotkey tablet mode (pollable) --------------------------------- */
+static ssize_t hotkey_tablet_mode_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	int res, s;
+	res = hotkey_get_tablet_mode(&s);
+	if (res < 0)
+		return res;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", !!s);
+}
+
+static struct device_attribute dev_attr_hotkey_tablet_mode =
+	__ATTR(hotkey_tablet_mode, S_IRUGO, hotkey_tablet_mode_show, NULL);
+
+static void hotkey_tablet_mode_notify_change(void)
+{
+	if (tp_features.hotkey_tablet)
+		sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+			     "hotkey_tablet_mode");
+}
+
+/* sysfs hotkey report_mode -------------------------------------------- */
+static ssize_t hotkey_report_mode_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		(hotkey_report_mode != 0) ? hotkey_report_mode : 1);
+}
+
+static struct device_attribute dev_attr_hotkey_report_mode =
+	__ATTR(hotkey_report_mode, S_IRUGO, hotkey_report_mode_show, NULL);
+
+/* sysfs wakeup reason (pollable) -------------------------------------- */
+static ssize_t hotkey_wakeup_reason_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason);
+}
+
+static struct device_attribute dev_attr_hotkey_wakeup_reason =
+	__ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
+
+static void hotkey_wakeup_reason_notify_change(void)
+{
+	if (tp_features.hotkey_mask)
+		sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+			     "wakeup_reason");
+}
+
+/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */
+static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack);
+}
+
+static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
+	__ATTR(wakeup_hotunplug_complete, S_IRUGO,
+	       hotkey_wakeup_hotunplug_complete_show, NULL);
+
+static void hotkey_wakeup_hotunplug_complete_notify_change(void)
+{
+	if (tp_features.hotkey_mask)
+		sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+			     "wakeup_hotunplug_complete");
+}
+
+/* --------------------------------------------------------------------- */
+
+static struct attribute *hotkey_attributes[] __initdata = {
+	&dev_attr_hotkey_enable.attr,
+	&dev_attr_hotkey_bios_enabled.attr,
+	&dev_attr_hotkey_report_mode.attr,
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+	&dev_attr_hotkey_mask.attr,
+	&dev_attr_hotkey_all_mask.attr,
+	&dev_attr_hotkey_recommended_mask.attr,
+	&dev_attr_hotkey_source_mask.attr,
+	&dev_attr_hotkey_poll_freq.attr,
+#endif
+};
+
+static struct attribute *hotkey_mask_attributes[] __initdata = {
+	&dev_attr_hotkey_bios_mask.attr,
+#ifndef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+	&dev_attr_hotkey_mask.attr,
+	&dev_attr_hotkey_all_mask.attr,
+	&dev_attr_hotkey_recommended_mask.attr,
+#endif
+	&dev_attr_hotkey_wakeup_reason.attr,
+	&dev_attr_hotkey_wakeup_hotunplug_complete.attr,
+};
+
+static void bluetooth_update_rfk(void);
+static void wan_update_rfk(void);
+static void uwb_update_rfk(void);
+static void tpacpi_send_radiosw_update(void)
+{
+	int wlsw;
+
+	/* Sync these BEFORE sending any rfkill events */
+	if (tp_features.bluetooth)
+		bluetooth_update_rfk();
+	if (tp_features.wan)
+		wan_update_rfk();
+	if (tp_features.uwb)
+		uwb_update_rfk();
+
+	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
+		mutex_lock(&tpacpi_inputdev_send_mutex);
+
+		input_report_switch(tpacpi_inputdev,
+				    SW_RFKILL_ALL, !!wlsw);
+		input_sync(tpacpi_inputdev);
+
+		mutex_unlock(&tpacpi_inputdev_send_mutex);
+	}
+	hotkey_radio_sw_notify_change();
+}
+
+static void hotkey_exit(void)
+{
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+	hotkey_poll_stop_sync();
+#endif
+
+	if (hotkey_dev_attributes)
+		delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj);
+
+	kfree(hotkey_keycode_map);
+
+	if (tp_features.hotkey) {
+		dbg_printk(TPACPI_DBG_EXIT,
+			   "restoring original hot key mask\n");
+		/* no short-circuit boolean operator below! */
+		if ((hotkey_mask_set(hotkey_orig_mask) |
+		     hotkey_status_set(hotkey_orig_status)) != 0)
+			printk(TPACPI_ERR
+			       "failed to restore hot key mask "
+			       "to BIOS defaults\n");
+	}
+}
+
+static int __init hotkey_init(struct ibm_init_struct *iibm)
+{
+	/* Requirements for changing the default keymaps:
+	 *
+	 * 1. Many of the keys are mapped to KEY_RESERVED for very
+	 *    good reasons.  Do not change them unless you have deep
+	 *    knowledge on the IBM and Lenovo ThinkPad firmware for
+	 *    the various ThinkPad models.  The driver behaves
+	 *    differently for KEY_RESERVED: such keys have their
+	 *    hot key mask *unset* in mask_recommended, and also
+	 *    in the initial hot key mask programmed into the
+	 *    firmware at driver load time, which means the firm-
+	 *    ware may react very differently if you change them to
+	 *    something else;
+	 *
+	 * 2. You must be subscribed to the linux-thinkpad and
+	 *    ibm-acpi-devel mailing lists, and you should read the
+	 *    list archives since 2007 if you want to change the
+	 *    keymaps.  This requirement exists so that you will
+	 *    know the past history of problems with the thinkpad-
+	 *    acpi driver keymaps, and also that you will be
+	 *    listening to any bug reports;
+	 *
+	 * 3. Do not send thinkpad-acpi specific patches directly to
+	 *    for merging, *ever*.  Send them to the linux-acpi
+	 *    mailinglist for comments.  Merging is to be done only
+	 *    through acpi-test and the ACPI maintainer.
+	 *
+	 * If the above is too much to ask, don't change the keymap.
+	 * Ask the thinkpad-acpi maintainer to do it, instead.
+	 */
+	static u16 ibm_keycode_map[] __initdata = {
+		/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
+		KEY_FN_F1,	KEY_FN_F2,	KEY_COFFEE,	KEY_SLEEP,
+		KEY_WLAN,	KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
+		KEY_FN_F9,	KEY_FN_F10,	KEY_FN_F11,	KEY_SUSPEND,
+
+		/* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
+		KEY_UNKNOWN,	/* 0x0C: FN+BACKSPACE */
+		KEY_UNKNOWN,	/* 0x0D: FN+INSERT */
+		KEY_UNKNOWN,	/* 0x0E: FN+DELETE */
+
+		/* brightness: firmware always reacts to them, unless
+		 * X.org did some tricks in the radeon BIOS scratch
+		 * registers of *some* models */
+		KEY_RESERVED,	/* 0x0F: FN+HOME (brightness up) */
+		KEY_RESERVED,	/* 0x10: FN+END (brightness down) */
+
+		/* Thinklight: firmware always react to it */
+		KEY_RESERVED,	/* 0x11: FN+PGUP (thinklight toggle) */
+
+		KEY_UNKNOWN,	/* 0x12: FN+PGDOWN */
+		KEY_ZOOM,	/* 0x13: FN+SPACE (zoom) */
+
+		/* Volume: firmware always react to it and reprograms
+		 * the built-in *extra* mixer.  Never map it to control
+		 * another mixer by default. */
+		KEY_RESERVED,	/* 0x14: VOLUME UP */
+		KEY_RESERVED,	/* 0x15: VOLUME DOWN */
+		KEY_RESERVED,	/* 0x16: MUTE */
+
+		KEY_VENDOR,	/* 0x17: Thinkpad/AccessIBM/Lenovo */
+
+		/* (assignments unknown, please report if found) */
+		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+	};
+	static u16 lenovo_keycode_map[] __initdata = {
+		/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
+		KEY_FN_F1,	KEY_COFFEE,	KEY_BATTERY,	KEY_SLEEP,
+		KEY_WLAN,	KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
+		KEY_FN_F9,	KEY_FN_F10,	KEY_FN_F11,	KEY_SUSPEND,
+
+		/* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
+		KEY_UNKNOWN,	/* 0x0C: FN+BACKSPACE */
+		KEY_UNKNOWN,	/* 0x0D: FN+INSERT */
+		KEY_UNKNOWN,	/* 0x0E: FN+DELETE */
+
+		/* These either have to go through ACPI video, or
+		 * act like in the IBM ThinkPads, so don't ever
+		 * enable them by default */
+		KEY_RESERVED,	/* 0x0F: FN+HOME (brightness up) */
+		KEY_RESERVED,	/* 0x10: FN+END (brightness down) */
+
+		KEY_RESERVED,	/* 0x11: FN+PGUP (thinklight toggle) */
+
+		KEY_UNKNOWN,	/* 0x12: FN+PGDOWN */
+		KEY_ZOOM,	/* 0x13: FN+SPACE (zoom) */
+
+		/* Volume: z60/z61, T60 (BIOS version?): firmware always
+		 * react to it and reprograms the built-in *extra* mixer.
+		 * Never map it to control another mixer by default.
+		 *
+		 * T60?, T61, R60?, R61: firmware and EC tries to send
+		 * these over the regular keyboard, so these are no-ops,
+		 * but there are still weird bugs re. MUTE, so do not
+		 * change unless you get test reports from all Lenovo
+		 * models.  May cause the BIOS to interfere with the
+		 * HDA mixer.
+		 */
+		KEY_RESERVED,	/* 0x14: VOLUME UP */
+		KEY_RESERVED,	/* 0x15: VOLUME DOWN */
+		KEY_RESERVED,	/* 0x16: MUTE */
+
+		KEY_VENDOR,	/* 0x17: Thinkpad/AccessIBM/Lenovo */
+
+		/* (assignments unknown, please report if found) */
+		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+		KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+	};
+
+#define TPACPI_HOTKEY_MAP_LEN		ARRAY_SIZE(ibm_keycode_map)
+#define TPACPI_HOTKEY_MAP_SIZE		sizeof(ibm_keycode_map)
+#define TPACPI_HOTKEY_MAP_TYPESIZE	sizeof(ibm_keycode_map[0])
+
+	int res, i;
+	int status;
+	int hkeyv;
+
+	vdbg_printk(TPACPI_DBG_INIT, "initializing hotkey subdriver\n");
+
+	BUG_ON(!tpacpi_inputdev);
+	BUG_ON(tpacpi_inputdev->open != NULL ||
+	       tpacpi_inputdev->close != NULL);
+
+	TPACPI_ACPIHANDLE_INIT(hkey);
+	mutex_init(&hotkey_mutex);
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+	mutex_init(&hotkey_thread_mutex);
+	mutex_init(&hotkey_thread_data_mutex);
+#endif
+
+	/* hotkey not supported on 570 */
+	tp_features.hotkey = hkey_handle != NULL;
+
+	vdbg_printk(TPACPI_DBG_INIT, "hotkeys are %s\n",
+		str_supported(tp_features.hotkey));
+
+	if (!tp_features.hotkey)
+		return 1;
+
+	tpacpi_disable_brightness_delay();
+
+	hotkey_dev_attributes = create_attr_set(13, NULL);
+	if (!hotkey_dev_attributes)
+		return -ENOMEM;
+	res = add_many_to_attr_set(hotkey_dev_attributes,
+			hotkey_attributes,
+			ARRAY_SIZE(hotkey_attributes));
+	if (res)
+		goto err_exit;
+
+	/* mask not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
+	   A30, R30, R31, T20-22, X20-21, X22-24.  Detected by checking
+	   for HKEY interface version 0x100 */
+	if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
+		if ((hkeyv >> 8) != 1) {
+			printk(TPACPI_ERR "unknown version of the "
+			       "HKEY interface: 0x%x\n", hkeyv);
+			printk(TPACPI_ERR "please report this to %s\n",
+			       TPACPI_MAIL);
+		} else {
+			/*
+			 * MHKV 0x100 in A31, R40, R40e,
+			 * T4x, X31, and later
+			 */
+			tp_features.hotkey_mask = 1;
+		}
+	}
+
+	vdbg_printk(TPACPI_DBG_INIT, "hotkey masks are %s\n",
+		str_supported(tp_features.hotkey_mask));
+
+	if (tp_features.hotkey_mask) {
+		if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+				"MHKA", "qd")) {
+			printk(TPACPI_ERR
+			       "missing MHKA handler, "
+			       "please report this to %s\n",
+			       TPACPI_MAIL);
+			/* FN+F12, FN+F4, FN+F3 */
+			hotkey_all_mask = 0x080cU;
+		}
+	}
+
+	/* hotkey_source_mask *must* be zero for
+	 * the first hotkey_mask_get */
+	res = hotkey_status_get(&hotkey_orig_status);
+	if (res)
+		goto err_exit;
+
+	if (tp_features.hotkey_mask) {
+		res = hotkey_mask_get();
+		if (res)
+			goto err_exit;
+
+		hotkey_orig_mask = hotkey_mask;
+		res = add_many_to_attr_set(
+				hotkey_dev_attributes,
+				hotkey_mask_attributes,
+				ARRAY_SIZE(hotkey_mask_attributes));
+		if (res)
+			goto err_exit;
+	}
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+	if (tp_features.hotkey_mask) {
+		hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
+					& ~hotkey_all_mask;
+	} else {
+		hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK;
+	}
+
+	vdbg_printk(TPACPI_DBG_INIT,
+		    "hotkey source mask 0x%08x, polling freq %d\n",
+		    hotkey_source_mask, hotkey_poll_freq);
+#endif
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (dbg_wlswemul) {
+		tp_features.hotkey_wlsw = 1;
+		printk(TPACPI_INFO
+			"radio switch emulation enabled\n");
+	} else
+#endif
+	/* Not all thinkpads have a hardware radio switch */
+	if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
+		tp_features.hotkey_wlsw = 1;
+		printk(TPACPI_INFO
+			"radio switch found; radios are %s\n",
+			enabled(status, 0));
+	}
+	if (tp_features.hotkey_wlsw)
+		res = add_to_attr_set(hotkey_dev_attributes,
+				&dev_attr_hotkey_radio_sw.attr);
+
+	/* For X41t, X60t, X61t Tablets... */
+	if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
+		tp_features.hotkey_tablet = 1;
+		printk(TPACPI_INFO
+			"possible tablet mode switch found; "
+			"ThinkPad in %s mode\n",
+			(status & TP_HOTKEY_TABLET_MASK)?
+				"tablet" : "laptop");
+		res = add_to_attr_set(hotkey_dev_attributes,
+				&dev_attr_hotkey_tablet_mode.attr);
+	}
+
+	if (!res)
+		res = register_attr_set_with_sysfs(
+				hotkey_dev_attributes,
+				&tpacpi_pdev->dev.kobj);
+	if (res)
+		goto err_exit;
+
+	/* Set up key map */
+
+	hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
+					GFP_KERNEL);
+	if (!hotkey_keycode_map) {
+		printk(TPACPI_ERR
+			"failed to allocate memory for key map\n");
+		res = -ENOMEM;
+		goto err_exit;
+	}
+
+	if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) {
+		dbg_printk(TPACPI_DBG_INIT,
+			   "using Lenovo default hot key map\n");
+		memcpy(hotkey_keycode_map, &lenovo_keycode_map,
+			TPACPI_HOTKEY_MAP_SIZE);
+	} else {
+		dbg_printk(TPACPI_DBG_INIT,
+			   "using IBM default hot key map\n");
+		memcpy(hotkey_keycode_map, &ibm_keycode_map,
+			TPACPI_HOTKEY_MAP_SIZE);
+	}
+
+	set_bit(EV_KEY, tpacpi_inputdev->evbit);
+	set_bit(EV_MSC, tpacpi_inputdev->evbit);
+	set_bit(MSC_SCAN, tpacpi_inputdev->mscbit);
+	tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
+	tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN;
+	tpacpi_inputdev->keycode = hotkey_keycode_map;
+	for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) {
+		if (hotkey_keycode_map[i] != KEY_RESERVED) {
+			set_bit(hotkey_keycode_map[i],
+				tpacpi_inputdev->keybit);
+		} else {
+			if (i < sizeof(hotkey_reserved_mask)*8)
+				hotkey_reserved_mask |= 1 << i;
+		}
+	}
+
+	if (tp_features.hotkey_wlsw) {
+		set_bit(EV_SW, tpacpi_inputdev->evbit);
+		set_bit(SW_RFKILL_ALL, tpacpi_inputdev->swbit);
+	}
+	if (tp_features.hotkey_tablet) {
+		set_bit(EV_SW, tpacpi_inputdev->evbit);
+		set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit);
+	}
+
+	/* Do not issue duplicate brightness change events to
+	 * userspace */
+	if (!tp_features.bright_acpimode)
+		/* update bright_acpimode... */
+		tpacpi_check_std_acpi_brightness_support();
+
+	if (tp_features.bright_acpimode) {
+		printk(TPACPI_INFO
+		       "This ThinkPad has standard ACPI backlight "
+		       "brightness control, supported by the ACPI "
+		       "video driver\n");
+		printk(TPACPI_NOTICE
+		       "Disabling thinkpad-acpi brightness events "
+		       "by default...\n");
+
+		/* The hotkey_reserved_mask change below is not
+		 * necessary while the keys are at KEY_RESERVED in the
+		 * default map, but better safe than sorry, leave it
+		 * here as a marker of what we have to do, especially
+		 * when we finally become able to set this at runtime
+		 * on response to X.org requests */
+		hotkey_reserved_mask |=
+			(1 << TP_ACPI_HOTKEYSCAN_FNHOME)
+			| (1 << TP_ACPI_HOTKEYSCAN_FNEND);
+	}
+
+	dbg_printk(TPACPI_DBG_INIT, "enabling hot key handling\n");
+	res = hotkey_status_set(1);
+	if (res) {
+		hotkey_exit();
+		return res;
+	}
+	res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask)
+				& ~hotkey_reserved_mask)
+				| hotkey_orig_mask);
+	if (res < 0 && res != -ENXIO) {
+		hotkey_exit();
+		return res;
+	}
+
+	dbg_printk(TPACPI_DBG_INIT,
+			"legacy hot key reporting over procfs %s\n",
+			(hotkey_report_mode < 2) ?
+				"enabled" : "disabled");
+
+	tpacpi_inputdev->open = &hotkey_inputdev_open;
+	tpacpi_inputdev->close = &hotkey_inputdev_close;
+
+	hotkey_poll_setup_safe(1);
+	tpacpi_send_radiosw_update();
+	tpacpi_input_send_tabletsw();
+
+	return 0;
+
+err_exit:
+	delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj);
+	hotkey_dev_attributes = NULL;
+
+	return (res < 0)? res : 1;
+}
+
+static bool hotkey_notify_hotkey(const u32 hkey,
+				 bool *send_acpi_ev,
+				 bool *ignore_acpi_ev)
+{
+	/* 0x1000-0x1FFF: key presses */
+	unsigned int scancode = hkey & 0xfff;
+	*send_acpi_ev = true;
+	*ignore_acpi_ev = false;
+
+	if (scancode > 0 && scancode < 0x21) {
+		scancode--;
+		if (!(hotkey_source_mask & (1 << scancode))) {
+			tpacpi_input_send_key(scancode);
+			*send_acpi_ev = false;
+		} else {
+			*ignore_acpi_ev = true;
+		}
+		return true;
+	}
+	return false;
+}
+
+static bool hotkey_notify_wakeup(const u32 hkey,
+				 bool *send_acpi_ev,
+				 bool *ignore_acpi_ev)
+{
+	/* 0x2000-0x2FFF: Wakeup reason */
+	*send_acpi_ev = true;
+	*ignore_acpi_ev = false;
+
+	switch (hkey) {
+	case 0x2304: /* suspend, undock */
+	case 0x2404: /* hibernation, undock */
+		hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
+		*ignore_acpi_ev = true;
+		break;
+
+	case 0x2305: /* suspend, bay eject */
+	case 0x2405: /* hibernation, bay eject */
+		hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
+		*ignore_acpi_ev = true;
+		break;
+
+	case 0x2313: /* Battery on critical low level (S3) */
+	case 0x2413: /* Battery on critical low level (S4) */
+		printk(TPACPI_ALERT
+			"EMERGENCY WAKEUP: battery almost empty\n");
+		/* how to auto-heal: */
+		/* 2313: woke up from S3, go to S4/S5 */
+		/* 2413: woke up from S4, go to S5 */
+		break;
+
+	default:
+		return false;
+	}
+
+	if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) {
+		printk(TPACPI_INFO
+		       "woke up due to a hot-unplug "
+		       "request...\n");
+		hotkey_wakeup_reason_notify_change();
+	}
+	return true;
+}
+
+static bool hotkey_notify_usrevent(const u32 hkey,
+				 bool *send_acpi_ev,
+				 bool *ignore_acpi_ev)
+{
+	/* 0x5000-0x5FFF: human interface helpers */
+	*send_acpi_ev = true;
+	*ignore_acpi_ev = false;
+
+	switch (hkey) {
+	case 0x5010: /* Lenovo new BIOS: brightness changed */
+	case 0x500b: /* X61t: tablet pen inserted into bay */
+	case 0x500c: /* X61t: tablet pen removed from bay */
+		return true;
+
+	case 0x5009: /* X41t-X61t: swivel up (tablet mode) */
+	case 0x500a: /* X41t-X61t: swivel down (normal mode) */
+		tpacpi_input_send_tabletsw();
+		hotkey_tablet_mode_notify_change();
+		*send_acpi_ev = false;
+		return true;
+
+	case 0x5001:
+	case 0x5002:
+		/* LID switch events.  Do not propagate */
+		*ignore_acpi_ev = true;
+		return true;
+
+	default:
+		return false;
+	}
+}
+
+static bool hotkey_notify_thermal(const u32 hkey,
+				 bool *send_acpi_ev,
+				 bool *ignore_acpi_ev)
+{
+	/* 0x6000-0x6FFF: thermal alarms */
+	*send_acpi_ev = true;
+	*ignore_acpi_ev = false;
+
+	switch (hkey) {
+	case 0x6011:
+		printk(TPACPI_CRIT
+			"THERMAL ALARM: battery is too hot!\n");
+		/* recommended action: warn user through gui */
+		return true;
+	case 0x6012:
+		printk(TPACPI_ALERT
+			"THERMAL EMERGENCY: battery is extremely hot!\n");
+		/* recommended action: immediate sleep/hibernate */
+		return true;
+	case 0x6021:
+		printk(TPACPI_CRIT
+			"THERMAL ALARM: "
+			"a sensor reports something is too hot!\n");
+		/* recommended action: warn user through gui, that */
+		/* some internal component is too hot */
+		return true;
+	case 0x6022:
+		printk(TPACPI_ALERT
+			"THERMAL EMERGENCY: "
+			"a sensor reports something is extremely hot!\n");
+		/* recommended action: immediate sleep/hibernate */
+		return true;
+	case 0x6030:
+		printk(TPACPI_INFO
+			"EC reports that Thermal Table has changed\n");
+		/* recommended action: do nothing, we don't have
+		 * Lenovo ATM information */
+		return true;
+	default:
+		printk(TPACPI_ALERT
+			 "THERMAL ALERT: unknown thermal alarm received\n");
+		return false;
+	}
+}
+
+static void hotkey_notify(struct ibm_struct *ibm, u32 event)
+{
+	u32 hkey;
+	bool send_acpi_ev;
+	bool ignore_acpi_ev;
+	bool known_ev;
+
+	if (event != 0x80) {
+		printk(TPACPI_ERR
+		       "unknown HKEY notification event %d\n", event);
+		/* forward it to userspace, maybe it knows how to handle it */
+		acpi_bus_generate_netlink_event(
+					ibm->acpi->device->pnp.device_class,
+					dev_name(&ibm->acpi->device->dev),
+					event, 0);
+		return;
+	}
+
+	while (1) {
+		if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
+			printk(TPACPI_ERR "failed to retrieve HKEY event\n");
+			return;
+		}
+
+		if (hkey == 0) {
+			/* queue empty */
+			return;
+		}
+
+		send_acpi_ev = true;
+		ignore_acpi_ev = false;
+
+		switch (hkey >> 12) {
+		case 1:
+			/* 0x1000-0x1FFF: key presses */
+			known_ev = hotkey_notify_hotkey(hkey, &send_acpi_ev,
+						 &ignore_acpi_ev);
+			break;
+		case 2:
+			/* 0x2000-0x2FFF: Wakeup reason */
+			known_ev = hotkey_notify_wakeup(hkey, &send_acpi_ev,
+						 &ignore_acpi_ev);
+			break;
+		case 3:
+			/* 0x3000-0x3FFF: bay-related wakeups */
+			if (hkey == 0x3003) {
+				hotkey_autosleep_ack = 1;
+				printk(TPACPI_INFO
+				       "bay ejected\n");
+				hotkey_wakeup_hotunplug_complete_notify_change();
+				known_ev = true;
+			} else {
+				known_ev = false;
+			}
+			break;
+		case 4:
+			/* 0x4000-0x4FFF: dock-related wakeups */
+			if (hkey == 0x4003) {
+				hotkey_autosleep_ack = 1;
+				printk(TPACPI_INFO
+				       "undocked\n");
+				hotkey_wakeup_hotunplug_complete_notify_change();
+				known_ev = true;
+			} else {
+				known_ev = false;
+			}
+			break;
+		case 5:
+			/* 0x5000-0x5FFF: human interface helpers */
+			known_ev = hotkey_notify_usrevent(hkey, &send_acpi_ev,
+						 &ignore_acpi_ev);
+			break;
+		case 6:
+			/* 0x6000-0x6FFF: thermal alarms */
+			known_ev = hotkey_notify_thermal(hkey, &send_acpi_ev,
+						 &ignore_acpi_ev);
+			break;
+		case 7:
+			/* 0x7000-0x7FFF: misc */
+			if (tp_features.hotkey_wlsw && hkey == 0x7000) {
+				tpacpi_send_radiosw_update();
+				send_acpi_ev = 0;
+				known_ev = true;
+				break;
+			}
+			/* fallthrough to default */
+		default:
+			known_ev = false;
+		}
+		if (!known_ev) {
+			printk(TPACPI_NOTICE
+			       "unhandled HKEY event 0x%04x\n", hkey);
+			printk(TPACPI_NOTICE
+			       "please report the conditions when this "
+			       "event happened to %s\n", TPACPI_MAIL);
+		}
+
+		/* Legacy events */
+		if (!ignore_acpi_ev &&
+		    (send_acpi_ev || hotkey_report_mode < 2)) {
+			acpi_bus_generate_proc_event(ibm->acpi->device,
+						     event, hkey);
+		}
+
+		/* netlink events */
+		if (!ignore_acpi_ev && send_acpi_ev) {
+			acpi_bus_generate_netlink_event(
+					ibm->acpi->device->pnp.device_class,
+					dev_name(&ibm->acpi->device->dev),
+					event, hkey);
+		}
+	}
+}
+
+static void hotkey_suspend(pm_message_t state)
+{
+	/* Do these on suspend, we get the events on early resume! */
+	hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
+	hotkey_autosleep_ack = 0;
+}
+
+static void hotkey_resume(void)
+{
+	tpacpi_disable_brightness_delay();
+
+	if (hotkey_mask_get())
+		printk(TPACPI_ERR
+		       "error while trying to read hot key mask "
+		       "from firmware\n");
+	tpacpi_send_radiosw_update();
+	hotkey_tablet_mode_notify_change();
+	hotkey_wakeup_reason_notify_change();
+	hotkey_wakeup_hotunplug_complete_notify_change();
+	hotkey_poll_setup_safe(0);
+}
+
+/* procfs -------------------------------------------------------------- */
+static int hotkey_read(char *p)
+{
+	int res, status;
+	int len = 0;
+
+	if (!tp_features.hotkey) {
+		len += sprintf(p + len, "status:\t\tnot supported\n");
+		return len;
+	}
+
+	if (mutex_lock_killable(&hotkey_mutex))
+		return -ERESTARTSYS;
+	res = hotkey_status_get(&status);
+	if (!res)
+		res = hotkey_mask_get();
+	mutex_unlock(&hotkey_mutex);
+	if (res)
+		return res;
+
+	len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
+	if (tp_features.hotkey_mask) {
+		len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_mask);
+		len += sprintf(p + len,
+			       "commands:\tenable, disable, reset, <mask>\n");
+	} else {
+		len += sprintf(p + len, "mask:\t\tnot supported\n");
+		len += sprintf(p + len, "commands:\tenable, disable, reset\n");
+	}
+
+	return len;
+}
+
+static int hotkey_write(char *buf)
+{
+	int res, status;
+	u32 mask;
+	char *cmd;
+
+	if (!tp_features.hotkey)
+		return -ENODEV;
+
+	if (mutex_lock_killable(&hotkey_mutex))
+		return -ERESTARTSYS;
+
+	status = -1;
+	mask = hotkey_mask;
+
+	res = 0;
+	while ((cmd = next_cmd(&buf))) {
+		if (strlencmp(cmd, "enable") == 0) {
+			status = 1;
+		} else if (strlencmp(cmd, "disable") == 0) {
+			status = 0;
+		} else if (strlencmp(cmd, "reset") == 0) {
+			status = hotkey_orig_status;
+			mask = hotkey_orig_mask;
+		} else if (sscanf(cmd, "0x%x", &mask) == 1) {
+			/* mask set */
+		} else if (sscanf(cmd, "%x", &mask) == 1) {
+			/* mask set */
+		} else {
+			res = -EINVAL;
+			goto errexit;
+		}
+	}
+	if (status != -1)
+		res = hotkey_status_set(status);
+
+	if (!res && mask != hotkey_mask)
+		res = hotkey_mask_set(mask);
+
+errexit:
+	mutex_unlock(&hotkey_mutex);
+	return res;
+}
+
+static const struct acpi_device_id ibm_htk_device_ids[] = {
+	{TPACPI_ACPI_HKEY_HID, 0},
+	{"", 0},
+};
+
+static struct tp_acpi_drv_struct ibm_hotkey_acpidriver = {
+	.hid = ibm_htk_device_ids,
+	.notify = hotkey_notify,
+	.handle = &hkey_handle,
+	.type = ACPI_DEVICE_NOTIFY,
+};
+
+static struct ibm_struct hotkey_driver_data = {
+	.name = "hotkey",
+	.read = hotkey_read,
+	.write = hotkey_write,
+	.exit = hotkey_exit,
+	.resume = hotkey_resume,
+	.suspend = hotkey_suspend,
+	.acpi = &ibm_hotkey_acpidriver,
+};
+
+/*************************************************************************
+ * Bluetooth subdriver
+ */
+
+enum {
+	/* ACPI GBDC/SBDC bits */
+	TP_ACPI_BLUETOOTH_HWPRESENT	= 0x01,	/* Bluetooth hw available */
+	TP_ACPI_BLUETOOTH_RADIOSSW	= 0x02,	/* Bluetooth radio enabled */
+	TP_ACPI_BLUETOOTH_RESUMECTRL	= 0x04,	/* Bluetooth state at resume:
+						   off / last state */
+};
+
+enum {
+	/* ACPI \BLTH commands */
+	TP_ACPI_BLTH_GET_ULTRAPORT_ID	= 0x00, /* Get Ultraport BT ID */
+	TP_ACPI_BLTH_GET_PWR_ON_RESUME	= 0x01, /* Get power-on-resume state */
+	TP_ACPI_BLTH_PWR_ON_ON_RESUME	= 0x02, /* Resume powered on */
+	TP_ACPI_BLTH_PWR_OFF_ON_RESUME	= 0x03,	/* Resume powered off */
+	TP_ACPI_BLTH_SAVE_STATE		= 0x05, /* Save state for S4/S5 */
+};
+
+static struct rfkill *tpacpi_bluetooth_rfkill;
+
+static void bluetooth_suspend(pm_message_t state)
+{
+	/* Try to make sure radio will resume powered off */
+	acpi_evalf(NULL, NULL, "\\BLTH", "vd",
+		   TP_ACPI_BLTH_PWR_OFF_ON_RESUME);
+}
+
+static int bluetooth_get_radiosw(void)
+{
+	int status;
+
+	if (!tp_features.bluetooth)
+		return -ENODEV;
+
+	/* WLSW overrides bluetooth in firmware/hardware, reflect that */
+	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
+		return RFKILL_STATE_HARD_BLOCKED;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (dbg_bluetoothemul)
+		return (tpacpi_bluetooth_emulstate) ?
+			RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
+#endif
+
+	if (!acpi_evalf(hkey_handle, &status, "GBDC", "d"))
+		return -EIO;
+
+	return ((status & TP_ACPI_BLUETOOTH_RADIOSSW) != 0) ?
+		RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
+}
+
+static void bluetooth_update_rfk(void)
+{
+	int status;
+
+	if (!tpacpi_bluetooth_rfkill)
+		return;
+
+	status = bluetooth_get_radiosw();
+	if (status < 0)
+		return;
+	rfkill_force_state(tpacpi_bluetooth_rfkill, status);
+}
+
+static int bluetooth_set_radiosw(int radio_on, int update_rfk)
+{
+	int status;
+
+	if (!tp_features.bluetooth)
+		return -ENODEV;
+
+	/* WLSW overrides bluetooth in firmware/hardware, but there is no
+	 * reason to risk weird behaviour. */
+	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status
+	    && radio_on)
+		return -EPERM;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (dbg_bluetoothemul) {
+		tpacpi_bluetooth_emulstate = !!radio_on;
+		if (update_rfk)
+			bluetooth_update_rfk();
+		return 0;
+	}
+#endif
+
+	/* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
+	if (radio_on)
+		status = TP_ACPI_BLUETOOTH_RADIOSSW;
+	else
+		status = 0;
+	if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
+		return -EIO;
+
+	if (update_rfk)
+		bluetooth_update_rfk();
+
+	return 0;
+}
+
+/* sysfs bluetooth enable ---------------------------------------------- */
+static ssize_t bluetooth_enable_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	int status;
+
+	status = bluetooth_get_radiosw();
+	if (status < 0)
+		return status;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			(status == RFKILL_STATE_UNBLOCKED) ? 1 : 0);
+}
+
+static ssize_t bluetooth_enable_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	unsigned long t;
+	int res;
+
+	if (parse_strtoul(buf, 1, &t))
+		return -EINVAL;
+
+	res = bluetooth_set_radiosw(t, 1);
+
+	return (res) ? res : count;
+}
+
+static struct device_attribute dev_attr_bluetooth_enable =
+	__ATTR(bluetooth_enable, S_IWUSR | S_IRUGO,
+		bluetooth_enable_show, bluetooth_enable_store);
+
+/* --------------------------------------------------------------------- */
+
+static struct attribute *bluetooth_attributes[] = {
+	&dev_attr_bluetooth_enable.attr,
+	NULL
+};
+
+static const struct attribute_group bluetooth_attr_group = {
+	.attrs = bluetooth_attributes,
+};
+
+static int tpacpi_bluetooth_rfk_get(void *data, enum rfkill_state *state)
+{
+	int bts = bluetooth_get_radiosw();
+
+	if (bts < 0)
+		return bts;
+
+	*state = bts;
+	return 0;
+}
+
+static int tpacpi_bluetooth_rfk_set(void *data, enum rfkill_state state)
+{
+	return bluetooth_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
+}
+
+static void bluetooth_shutdown(void)
+{
+	/* Order firmware to save current state to NVRAM */
+	if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd",
+			TP_ACPI_BLTH_SAVE_STATE))
+		printk(TPACPI_NOTICE
+			"failed to save bluetooth state to NVRAM\n");
+}
+
+static void bluetooth_exit(void)
+{
+	bluetooth_shutdown();
+
+	if (tpacpi_bluetooth_rfkill)
+		rfkill_unregister(tpacpi_bluetooth_rfkill);
+
+	sysfs_remove_group(&tpacpi_pdev->dev.kobj,
+			&bluetooth_attr_group);
+}
+
+static int __init bluetooth_init(struct ibm_init_struct *iibm)
+{
+	int res;
+	int status = 0;
+
+	vdbg_printk(TPACPI_DBG_INIT, "initializing bluetooth subdriver\n");
+
+	TPACPI_ACPIHANDLE_INIT(hkey);
+
+	/* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
+	   G4x, R30, R31, R40e, R50e, T20-22, X20-21 */
+	tp_features.bluetooth = hkey_handle &&
+	    acpi_evalf(hkey_handle, &status, "GBDC", "qd");
+
+	vdbg_printk(TPACPI_DBG_INIT, "bluetooth is %s, status 0x%02x\n",
+		str_supported(tp_features.bluetooth),
+		status);
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (dbg_bluetoothemul) {
+		tp_features.bluetooth = 1;
+		printk(TPACPI_INFO
+			"bluetooth switch emulation enabled\n");
+	} else
+#endif
+	if (tp_features.bluetooth &&
+	    !(status & TP_ACPI_BLUETOOTH_HWPRESENT)) {
+		/* no bluetooth hardware present in system */
+		tp_features.bluetooth = 0;
+		dbg_printk(TPACPI_DBG_INIT,
+			   "bluetooth hardware not installed\n");
+	}
+
+	if (!tp_features.bluetooth)
+		return 1;
+
+	res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
+				&bluetooth_attr_group);
+	if (res)
+		return res;
+
+	res = tpacpi_new_rfkill(TPACPI_RFK_BLUETOOTH_SW_ID,
+				&tpacpi_bluetooth_rfkill,
+				RFKILL_TYPE_BLUETOOTH,
+				"tpacpi_bluetooth_sw",
+				true,
+				tpacpi_bluetooth_rfk_set,
+				tpacpi_bluetooth_rfk_get);
+	if (res) {
+		bluetooth_exit();
+		return res;
+	}
+
+	return 0;
+}
+
+/* procfs -------------------------------------------------------------- */
+static int bluetooth_read(char *p)
+{
+	int len = 0;
+	int status = bluetooth_get_radiosw();
+
+	if (!tp_features.bluetooth)
+		len += sprintf(p + len, "status:\t\tnot supported\n");
+	else {
+		len += sprintf(p + len, "status:\t\t%s\n",
+				(status == RFKILL_STATE_UNBLOCKED) ?
+					"enabled" : "disabled");
+		len += sprintf(p + len, "commands:\tenable, disable\n");
+	}
+
+	return len;
+}
+
+static int bluetooth_write(char *buf)
+{
+	char *cmd;
+
+	if (!tp_features.bluetooth)
+		return -ENODEV;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (strlencmp(cmd, "enable") == 0) {
+			bluetooth_set_radiosw(1, 1);
+		} else if (strlencmp(cmd, "disable") == 0) {
+			bluetooth_set_radiosw(0, 1);
+		} else
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct ibm_struct bluetooth_driver_data = {
+	.name = "bluetooth",
+	.read = bluetooth_read,
+	.write = bluetooth_write,
+	.exit = bluetooth_exit,
+	.suspend = bluetooth_suspend,
+	.shutdown = bluetooth_shutdown,
+};
+
+/*************************************************************************
+ * Wan subdriver
+ */
+
+enum {
+	/* ACPI GWAN/SWAN bits */
+	TP_ACPI_WANCARD_HWPRESENT	= 0x01,	/* Wan hw available */
+	TP_ACPI_WANCARD_RADIOSSW	= 0x02,	/* Wan radio enabled */
+	TP_ACPI_WANCARD_RESUMECTRL	= 0x04,	/* Wan state at resume:
+						   off / last state */
+};
+
+static struct rfkill *tpacpi_wan_rfkill;
+
+static void wan_suspend(pm_message_t state)
+{
+	/* Try to make sure radio will resume powered off */
+	acpi_evalf(NULL, NULL, "\\WGSV", "qvd",
+		   TP_ACPI_WGSV_PWR_OFF_ON_RESUME);
+}
+
+static int wan_get_radiosw(void)
+{
+	int status;
+
+	if (!tp_features.wan)
+		return -ENODEV;
+
+	/* WLSW overrides WWAN in firmware/hardware, reflect that */
+	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
+		return RFKILL_STATE_HARD_BLOCKED;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (dbg_wwanemul)
+		return (tpacpi_wwan_emulstate) ?
+			RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
+#endif
+
+	if (!acpi_evalf(hkey_handle, &status, "GWAN", "d"))
+		return -EIO;
+
+	return ((status & TP_ACPI_WANCARD_RADIOSSW) != 0) ?
+		RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
+}
+
+static void wan_update_rfk(void)
+{
+	int status;
+
+	if (!tpacpi_wan_rfkill)
+		return;
+
+	status = wan_get_radiosw();
+	if (status < 0)
+		return;
+	rfkill_force_state(tpacpi_wan_rfkill, status);
+}
+
+static int wan_set_radiosw(int radio_on, int update_rfk)
+{
+	int status;
+
+	if (!tp_features.wan)
+		return -ENODEV;
+
+	/* WLSW overrides bluetooth in firmware/hardware, but there is no
+	 * reason to risk weird behaviour. */
+	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status
+	    && radio_on)
+		return -EPERM;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (dbg_wwanemul) {
+		tpacpi_wwan_emulstate = !!radio_on;
+		if (update_rfk)
+			wan_update_rfk();
+		return 0;
+	}
+#endif
+
+	/* We make sure to keep TP_ACPI_WANCARD_RESUMECTRL off */
+	if (radio_on)
+		status = TP_ACPI_WANCARD_RADIOSSW;
+	else
+		status = 0;
+	if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
+		return -EIO;
+
+	if (update_rfk)
+		wan_update_rfk();
+
+	return 0;
+}
+
+/* sysfs wan enable ---------------------------------------------------- */
+static ssize_t wan_enable_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	int status;
+
+	status = wan_get_radiosw();
+	if (status < 0)
+		return status;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			(status == RFKILL_STATE_UNBLOCKED) ? 1 : 0);
+}
+
+static ssize_t wan_enable_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	unsigned long t;
+	int res;
+
+	if (parse_strtoul(buf, 1, &t))
+		return -EINVAL;
+
+	res = wan_set_radiosw(t, 1);
+
+	return (res) ? res : count;
+}
+
+static struct device_attribute dev_attr_wan_enable =
+	__ATTR(wwan_enable, S_IWUSR | S_IRUGO,
+		wan_enable_show, wan_enable_store);
+
+/* --------------------------------------------------------------------- */
+
+static struct attribute *wan_attributes[] = {
+	&dev_attr_wan_enable.attr,
+	NULL
+};
+
+static const struct attribute_group wan_attr_group = {
+	.attrs = wan_attributes,
+};
+
+static int tpacpi_wan_rfk_get(void *data, enum rfkill_state *state)
+{
+	int wans = wan_get_radiosw();
+
+	if (wans < 0)
+		return wans;
+
+	*state = wans;
+	return 0;
+}
+
+static int tpacpi_wan_rfk_set(void *data, enum rfkill_state state)
+{
+	return wan_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
+}
+
+static void wan_shutdown(void)
+{
+	/* Order firmware to save current state to NVRAM */
+	if (!acpi_evalf(NULL, NULL, "\\WGSV", "vd",
+			TP_ACPI_WGSV_SAVE_STATE))
+		printk(TPACPI_NOTICE
+			"failed to save WWAN state to NVRAM\n");
+}
+
+static void wan_exit(void)
+{
+	wan_shutdown();
+
+	if (tpacpi_wan_rfkill)
+		rfkill_unregister(tpacpi_wan_rfkill);
+
+	sysfs_remove_group(&tpacpi_pdev->dev.kobj,
+		&wan_attr_group);
+}
+
+static int __init wan_init(struct ibm_init_struct *iibm)
+{
+	int res;
+	int status = 0;
+
+	vdbg_printk(TPACPI_DBG_INIT, "initializing wan subdriver\n");
+
+	TPACPI_ACPIHANDLE_INIT(hkey);
+
+	tp_features.wan = hkey_handle &&
+	    acpi_evalf(hkey_handle, &status, "GWAN", "qd");
+
+	vdbg_printk(TPACPI_DBG_INIT, "wan is %s, status 0x%02x\n",
+		str_supported(tp_features.wan),
+		status);
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (dbg_wwanemul) {
+		tp_features.wan = 1;
+		printk(TPACPI_INFO
+			"wwan switch emulation enabled\n");
+	} else
+#endif
+	if (tp_features.wan &&
+	    !(status & TP_ACPI_WANCARD_HWPRESENT)) {
+		/* no wan hardware present in system */
+		tp_features.wan = 0;
+		dbg_printk(TPACPI_DBG_INIT,
+			   "wan hardware not installed\n");
+	}
+
+	if (!tp_features.wan)
+		return 1;
+
+	res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
+				&wan_attr_group);
+	if (res)
+		return res;
+
+	res = tpacpi_new_rfkill(TPACPI_RFK_WWAN_SW_ID,
+				&tpacpi_wan_rfkill,
+				RFKILL_TYPE_WWAN,
+				"tpacpi_wwan_sw",
+				true,
+				tpacpi_wan_rfk_set,
+				tpacpi_wan_rfk_get);
+	if (res) {
+		wan_exit();
+		return res;
+	}
+
+	return 0;
+}
+
+/* procfs -------------------------------------------------------------- */
+static int wan_read(char *p)
+{
+	int len = 0;
+	int status = wan_get_radiosw();
+
+	if (!tp_features.wan)
+		len += sprintf(p + len, "status:\t\tnot supported\n");
+	else {
+		len += sprintf(p + len, "status:\t\t%s\n",
+				(status == RFKILL_STATE_UNBLOCKED) ?
+					"enabled" : "disabled");
+		len += sprintf(p + len, "commands:\tenable, disable\n");
+	}
+
+	return len;
+}
+
+static int wan_write(char *buf)
+{
+	char *cmd;
+
+	if (!tp_features.wan)
+		return -ENODEV;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (strlencmp(cmd, "enable") == 0) {
+			wan_set_radiosw(1, 1);
+		} else if (strlencmp(cmd, "disable") == 0) {
+			wan_set_radiosw(0, 1);
+		} else
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct ibm_struct wan_driver_data = {
+	.name = "wan",
+	.read = wan_read,
+	.write = wan_write,
+	.exit = wan_exit,
+	.suspend = wan_suspend,
+	.shutdown = wan_shutdown,
+};
+
+/*************************************************************************
+ * UWB subdriver
+ */
+
+enum {
+	/* ACPI GUWB/SUWB bits */
+	TP_ACPI_UWB_HWPRESENT	= 0x01,	/* UWB hw available */
+	TP_ACPI_UWB_RADIOSSW	= 0x02,	/* UWB radio enabled */
+};
+
+static struct rfkill *tpacpi_uwb_rfkill;
+
+static int uwb_get_radiosw(void)
+{
+	int status;
+
+	if (!tp_features.uwb)
+		return -ENODEV;
+
+	/* WLSW overrides UWB in firmware/hardware, reflect that */
+	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
+		return RFKILL_STATE_HARD_BLOCKED;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (dbg_uwbemul)
+		return (tpacpi_uwb_emulstate) ?
+			RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
+#endif
+
+	if (!acpi_evalf(hkey_handle, &status, "GUWB", "d"))
+		return -EIO;
+
+	return ((status & TP_ACPI_UWB_RADIOSSW) != 0) ?
+		RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
+}
+
+static void uwb_update_rfk(void)
+{
+	int status;
+
+	if (!tpacpi_uwb_rfkill)
+		return;
+
+	status = uwb_get_radiosw();
+	if (status < 0)
+		return;
+	rfkill_force_state(tpacpi_uwb_rfkill, status);
+}
+
+static int uwb_set_radiosw(int radio_on, int update_rfk)
+{
+	int status;
+
+	if (!tp_features.uwb)
+		return -ENODEV;
+
+	/* WLSW overrides UWB in firmware/hardware, but there is no
+	 * reason to risk weird behaviour. */
+	if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status
+	    && radio_on)
+		return -EPERM;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (dbg_uwbemul) {
+		tpacpi_uwb_emulstate = !!radio_on;
+		if (update_rfk)
+			uwb_update_rfk();
+		return 0;
+	}
+#endif
+
+	status = (radio_on) ? TP_ACPI_UWB_RADIOSSW : 0;
+	if (!acpi_evalf(hkey_handle, NULL, "SUWB", "vd", status))
+		return -EIO;
+
+	if (update_rfk)
+		uwb_update_rfk();
+
+	return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int tpacpi_uwb_rfk_get(void *data, enum rfkill_state *state)
+{
+	int uwbs = uwb_get_radiosw();
+
+	if (uwbs < 0)
+		return uwbs;
+
+	*state = uwbs;
+	return 0;
+}
+
+static int tpacpi_uwb_rfk_set(void *data, enum rfkill_state state)
+{
+	return uwb_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
+}
+
+static void uwb_exit(void)
+{
+	if (tpacpi_uwb_rfkill)
+		rfkill_unregister(tpacpi_uwb_rfkill);
+}
+
+static int __init uwb_init(struct ibm_init_struct *iibm)
+{
+	int res;
+	int status = 0;
+
+	vdbg_printk(TPACPI_DBG_INIT, "initializing uwb subdriver\n");
+
+	TPACPI_ACPIHANDLE_INIT(hkey);
+
+	tp_features.uwb = hkey_handle &&
+	    acpi_evalf(hkey_handle, &status, "GUWB", "qd");
+
+	vdbg_printk(TPACPI_DBG_INIT, "uwb is %s, status 0x%02x\n",
+		str_supported(tp_features.uwb),
+		status);
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+	if (dbg_uwbemul) {
+		tp_features.uwb = 1;
+		printk(TPACPI_INFO
+			"uwb switch emulation enabled\n");
+	} else
+#endif
+	if (tp_features.uwb &&
+	    !(status & TP_ACPI_UWB_HWPRESENT)) {
+		/* no uwb hardware present in system */
+		tp_features.uwb = 0;
+		dbg_printk(TPACPI_DBG_INIT,
+			   "uwb hardware not installed\n");
+	}
+
+	if (!tp_features.uwb)
+		return 1;
+
+	res = tpacpi_new_rfkill(TPACPI_RFK_UWB_SW_ID,
+				&tpacpi_uwb_rfkill,
+				RFKILL_TYPE_UWB,
+				"tpacpi_uwb_sw",
+				false,
+				tpacpi_uwb_rfk_set,
+				tpacpi_uwb_rfk_get);
+
+	return res;
+}
+
+static struct ibm_struct uwb_driver_data = {
+	.name = "uwb",
+	.exit = uwb_exit,
+	.flags.experimental = 1,
+};
+
+/*************************************************************************
+ * Video subdriver
+ */
+
+#ifdef CONFIG_THINKPAD_ACPI_VIDEO
+
+enum video_access_mode {
+	TPACPI_VIDEO_NONE = 0,
+	TPACPI_VIDEO_570,	/* 570 */
+	TPACPI_VIDEO_770,	/* 600e/x, 770e, 770x */
+	TPACPI_VIDEO_NEW,	/* all others */
+};
+
+enum {	/* video status flags, based on VIDEO_570 */
+	TP_ACPI_VIDEO_S_LCD = 0x01,	/* LCD output enabled */
+	TP_ACPI_VIDEO_S_CRT = 0x02,	/* CRT output enabled */
+	TP_ACPI_VIDEO_S_DVI = 0x08,	/* DVI output enabled */
+};
+
+enum {  /* TPACPI_VIDEO_570 constants */
+	TP_ACPI_VIDEO_570_PHSCMD = 0x87,	/* unknown magic constant :( */
+	TP_ACPI_VIDEO_570_PHSMASK = 0x03,	/* PHS bits that map to
+						 * video_status_flags */
+	TP_ACPI_VIDEO_570_PHS2CMD = 0x8b,	/* unknown magic constant :( */
+	TP_ACPI_VIDEO_570_PHS2SET = 0x80,	/* unknown magic constant :( */
+};
+
+static enum video_access_mode video_supported;
+static int video_orig_autosw;
+
+static int video_autosw_get(void);
+static int video_autosw_set(int enable);
+
+TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID");	/* G41 */
+
+static int __init video_init(struct ibm_init_struct *iibm)
+{
+	int ivga;
+
+	vdbg_printk(TPACPI_DBG_INIT, "initializing video subdriver\n");
+
+	TPACPI_ACPIHANDLE_INIT(vid);
+	TPACPI_ACPIHANDLE_INIT(vid2);
+
+	if (vid2_handle && acpi_evalf(NULL, &ivga, "\\IVGA", "d") && ivga)
+		/* G41, assume IVGA doesn't change */
+		vid_handle = vid2_handle;
+
+	if (!vid_handle)
+		/* video switching not supported on R30, R31 */
+		video_supported = TPACPI_VIDEO_NONE;
+	else if (acpi_evalf(vid_handle, &video_orig_autosw, "SWIT", "qd"))
+		/* 570 */
+		video_supported = TPACPI_VIDEO_570;
+	else if (acpi_evalf(vid_handle, &video_orig_autosw, "^VADL", "qd"))
+		/* 600e/x, 770e, 770x */
+		video_supported = TPACPI_VIDEO_770;
+	else
+		/* all others */
+		video_supported = TPACPI_VIDEO_NEW;
+
+	vdbg_printk(TPACPI_DBG_INIT, "video is %s, mode %d\n",
+		str_supported(video_supported != TPACPI_VIDEO_NONE),
+		video_supported);
+
+	return (video_supported != TPACPI_VIDEO_NONE)? 0 : 1;
+}
+
+static void video_exit(void)
+{
+	dbg_printk(TPACPI_DBG_EXIT,
+		   "restoring original video autoswitch mode\n");
+	if (video_autosw_set(video_orig_autosw))
+		printk(TPACPI_ERR "error while trying to restore original "
+			"video autoswitch mode\n");
+}
+
+static int video_outputsw_get(void)
+{
+	int status = 0;
+	int i;
+
+	switch (video_supported) {
+	case TPACPI_VIDEO_570:
+		if (!acpi_evalf(NULL, &i, "\\_SB.PHS", "dd",
+				 TP_ACPI_VIDEO_570_PHSCMD))
+			return -EIO;
+		status = i & TP_ACPI_VIDEO_570_PHSMASK;
+		break;
+	case TPACPI_VIDEO_770:
+		if (!acpi_evalf(NULL, &i, "\\VCDL", "d"))
+			return -EIO;
+		if (i)
+			status |= TP_ACPI_VIDEO_S_LCD;
+		if (!acpi_evalf(NULL, &i, "\\VCDC", "d"))
+			return -EIO;
+		if (i)
+			status |= TP_ACPI_VIDEO_S_CRT;
+		break;
+	case TPACPI_VIDEO_NEW:
+		if (!acpi_evalf(NULL, NULL, "\\VUPS", "vd", 1) ||
+		    !acpi_evalf(NULL, &i, "\\VCDC", "d"))
+			return -EIO;
+		if (i)
+			status |= TP_ACPI_VIDEO_S_CRT;
+
+		if (!acpi_evalf(NULL, NULL, "\\VUPS", "vd", 0) ||
+		    !acpi_evalf(NULL, &i, "\\VCDL", "d"))
+			return -EIO;
+		if (i)
+			status |= TP_ACPI_VIDEO_S_LCD;
+		if (!acpi_evalf(NULL, &i, "\\VCDD", "d"))
+			return -EIO;
+		if (i)
+			status |= TP_ACPI_VIDEO_S_DVI;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	return status;
+}
+
+static int video_outputsw_set(int status)
+{
+	int autosw;
+	int res = 0;
+
+	switch (video_supported) {
+	case TPACPI_VIDEO_570:
+		res = acpi_evalf(NULL, NULL,
+				 "\\_SB.PHS2", "vdd",
+				 TP_ACPI_VIDEO_570_PHS2CMD,
+				 status | TP_ACPI_VIDEO_570_PHS2SET);
+		break;
+	case TPACPI_VIDEO_770:
+		autosw = video_autosw_get();
+		if (autosw < 0)
+			return autosw;
+
+		res = video_autosw_set(1);
+		if (res)
+			return res;
+		res = acpi_evalf(vid_handle, NULL,
+				 "ASWT", "vdd", status * 0x100, 0);
+		if (!autosw && video_autosw_set(autosw)) {
+			printk(TPACPI_ERR
+			       "video auto-switch left enabled due to error\n");
+			return -EIO;
+		}
+		break;
+	case TPACPI_VIDEO_NEW:
+		res = acpi_evalf(NULL, NULL, "\\VUPS", "vd", 0x80) &&
+		      acpi_evalf(NULL, NULL, "\\VSDS", "vdd", status, 1);
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	return (res)? 0 : -EIO;
+}
+
+static int video_autosw_get(void)
+{
+	int autosw = 0;
+
+	switch (video_supported) {
+	case TPACPI_VIDEO_570:
+		if (!acpi_evalf(vid_handle, &autosw, "SWIT", "d"))
+			return -EIO;
+		break;
+	case TPACPI_VIDEO_770:
+	case TPACPI_VIDEO_NEW:
+		if (!acpi_evalf(vid_handle, &autosw, "^VDEE", "d"))
+			return -EIO;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	return autosw & 1;
+}
+
+static int video_autosw_set(int enable)
+{
+	if (!acpi_evalf(vid_handle, NULL, "_DOS", "vd", (enable)? 1 : 0))
+		return -EIO;
+	return 0;
+}
+
+static int video_outputsw_cycle(void)
+{
+	int autosw = video_autosw_get();
+	int res;
+
+	if (autosw < 0)
+		return autosw;
+
+	switch (video_supported) {
+	case TPACPI_VIDEO_570:
+		res = video_autosw_set(1);
+		if (res)
+			return res;
+		res = acpi_evalf(ec_handle, NULL, "_Q16", "v");
+		break;
+	case TPACPI_VIDEO_770:
+	case TPACPI_VIDEO_NEW:
+		res = video_autosw_set(1);
+		if (res)
+			return res;
+		res = acpi_evalf(vid_handle, NULL, "VSWT", "v");
+		break;
+	default:
+		return -ENOSYS;
+	}
+	if (!autosw && video_autosw_set(autosw)) {
+		printk(TPACPI_ERR
+		       "video auto-switch left enabled due to error\n");
+		return -EIO;
+	}
+
+	return (res)? 0 : -EIO;
+}
+
+static int video_expand_toggle(void)
+{
+	switch (video_supported) {
+	case TPACPI_VIDEO_570:
+		return acpi_evalf(ec_handle, NULL, "_Q17", "v")?
+			0 : -EIO;
+	case TPACPI_VIDEO_770:
+		return acpi_evalf(vid_handle, NULL, "VEXP", "v")?
+			0 : -EIO;
+	case TPACPI_VIDEO_NEW:
+		return acpi_evalf(NULL, NULL, "\\VEXP", "v")?
+			0 : -EIO;
+	default:
+		return -ENOSYS;
+	}
+	/* not reached */
+}
+
+static int video_read(char *p)
+{
+	int status, autosw;
+	int len = 0;
+
+	if (video_supported == TPACPI_VIDEO_NONE) {
+		len += sprintf(p + len, "status:\t\tnot supported\n");
+		return len;
+	}
+
+	status = video_outputsw_get();
+	if (status < 0)
+		return status;
+
+	autosw = video_autosw_get();
+	if (autosw < 0)
+		return autosw;
+
+	len += sprintf(p + len, "status:\t\tsupported\n");
+	len += sprintf(p + len, "lcd:\t\t%s\n", enabled(status, 0));
+	len += sprintf(p + len, "crt:\t\t%s\n", enabled(status, 1));
+	if (video_supported == TPACPI_VIDEO_NEW)
+		len += sprintf(p + len, "dvi:\t\t%s\n", enabled(status, 3));
+	len += sprintf(p + len, "auto:\t\t%s\n", enabled(autosw, 0));
+	len += sprintf(p + len, "commands:\tlcd_enable, lcd_disable\n");
+	len += sprintf(p + len, "commands:\tcrt_enable, crt_disable\n");
+	if (video_supported == TPACPI_VIDEO_NEW)
+		len += sprintf(p + len, "commands:\tdvi_enable, dvi_disable\n");
+	len += sprintf(p + len, "commands:\tauto_enable, auto_disable\n");
+	len += sprintf(p + len, "commands:\tvideo_switch, expand_toggle\n");
+
+	return len;
+}
+
+static int video_write(char *buf)
+{
+	char *cmd;
+	int enable, disable, status;
+	int res;
+
+	if (video_supported == TPACPI_VIDEO_NONE)
+		return -ENODEV;
+
+	enable = 0;
+	disable = 0;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (strlencmp(cmd, "lcd_enable") == 0) {
+			enable |= TP_ACPI_VIDEO_S_LCD;
+		} else if (strlencmp(cmd, "lcd_disable") == 0) {
+			disable |= TP_ACPI_VIDEO_S_LCD;
+		} else if (strlencmp(cmd, "crt_enable") == 0) {
+			enable |= TP_ACPI_VIDEO_S_CRT;
+		} else if (strlencmp(cmd, "crt_disable") == 0) {
+			disable |= TP_ACPI_VIDEO_S_CRT;
+		} else if (video_supported == TPACPI_VIDEO_NEW &&
+			   strlencmp(cmd, "dvi_enable") == 0) {
+			enable |= TP_ACPI_VIDEO_S_DVI;
+		} else if (video_supported == TPACPI_VIDEO_NEW &&
+			   strlencmp(cmd, "dvi_disable") == 0) {
+			disable |= TP_ACPI_VIDEO_S_DVI;
+		} else if (strlencmp(cmd, "auto_enable") == 0) {
+			res = video_autosw_set(1);
+			if (res)
+				return res;
+		} else if (strlencmp(cmd, "auto_disable") == 0) {
+			res = video_autosw_set(0);
+			if (res)
+				return res;
+		} else if (strlencmp(cmd, "video_switch") == 0) {
+			res = video_outputsw_cycle();
+			if (res)
+				return res;
+		} else if (strlencmp(cmd, "expand_toggle") == 0) {
+			res = video_expand_toggle();
+			if (res)
+				return res;
+		} else
+			return -EINVAL;
+	}
+
+	if (enable || disable) {
+		status = video_outputsw_get();
+		if (status < 0)
+			return status;
+		res = video_outputsw_set((status & ~disable) | enable);
+		if (res)
+			return res;
+	}
+
+	return 0;
+}
+
+static struct ibm_struct video_driver_data = {
+	.name = "video",
+	.read = video_read,
+	.write = video_write,
+	.exit = video_exit,
+};
+
+#endif /* CONFIG_THINKPAD_ACPI_VIDEO */
+
+/*************************************************************************
+ * Light (thinklight) subdriver
+ */
+
+TPACPI_HANDLE(lght, root, "\\LGHT");	/* A21e, A2xm/p, T20-22, X20-21 */
+TPACPI_HANDLE(ledb, ec, "LEDB");		/* G4x */
+
+static int light_get_status(void)
+{
+	int status = 0;
+
+	if (tp_features.light_status) {
+		if (!acpi_evalf(ec_handle, &status, "KBLT", "d"))
+			return -EIO;
+		return (!!status);
+	}
+
+	return -ENXIO;
+}
+
+static int light_set_status(int status)
+{
+	int rc;
+
+	if (tp_features.light) {
+		if (cmos_handle) {
+			rc = acpi_evalf(cmos_handle, NULL, NULL, "vd",
+					(status)?
+						TP_CMOS_THINKLIGHT_ON :
+						TP_CMOS_THINKLIGHT_OFF);
+		} else {
+			rc = acpi_evalf(lght_handle, NULL, NULL, "vd",
+					(status)? 1 : 0);
+		}
+		return (rc)? 0 : -EIO;
+	}
+
+	return -ENXIO;
+}
+
+static void light_set_status_worker(struct work_struct *work)
+{
+	struct tpacpi_led_classdev *data =
+			container_of(work, struct tpacpi_led_classdev, work);
+
+	if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
+		light_set_status((data->new_brightness != LED_OFF));
+}
+
+static void light_sysfs_set(struct led_classdev *led_cdev,
+			enum led_brightness brightness)
+{
+	struct tpacpi_led_classdev *data =
+		container_of(led_cdev,
+			     struct tpacpi_led_classdev,
+			     led_classdev);
+	data->new_brightness = brightness;
+	queue_work(tpacpi_wq, &data->work);
+}
+
+static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev)
+{
+	return (light_get_status() == 1)? LED_FULL : LED_OFF;
+}
+
+static struct tpacpi_led_classdev tpacpi_led_thinklight = {
+	.led_classdev = {
+		.name		= "tpacpi::thinklight",
+		.brightness_set	= &light_sysfs_set,
+		.brightness_get	= &light_sysfs_get,
+	}
+};
+
+static int __init light_init(struct ibm_init_struct *iibm)
+{
+	int rc;
+
+	vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n");
+
+	TPACPI_ACPIHANDLE_INIT(ledb);
+	TPACPI_ACPIHANDLE_INIT(lght);
+	TPACPI_ACPIHANDLE_INIT(cmos);
+	INIT_WORK(&tpacpi_led_thinklight.work, light_set_status_worker);
+
+	/* light not supported on 570, 600e/x, 770e, 770x, G4x, R30, R31 */
+	tp_features.light = (cmos_handle || lght_handle) && !ledb_handle;
+
+	if (tp_features.light)
+		/* light status not supported on
+		   570, 600e/x, 770e, 770x, G4x, R30, R31, R32, X20 */
+		tp_features.light_status =
+			acpi_evalf(ec_handle, NULL, "KBLT", "qv");
+
+	vdbg_printk(TPACPI_DBG_INIT, "light is %s, light status is %s\n",
+		str_supported(tp_features.light),
+		str_supported(tp_features.light_status));
+
+	if (!tp_features.light)
+		return 1;
+
+	rc = led_classdev_register(&tpacpi_pdev->dev,
+				   &tpacpi_led_thinklight.led_classdev);
+
+	if (rc < 0) {
+		tp_features.light = 0;
+		tp_features.light_status = 0;
+	} else  {
+		rc = 0;
+	}
+
+	return rc;
+}
+
+static void light_exit(void)
+{
+	led_classdev_unregister(&tpacpi_led_thinklight.led_classdev);
+	if (work_pending(&tpacpi_led_thinklight.work))
+		flush_workqueue(tpacpi_wq);
+}
+
+static int light_read(char *p)
+{
+	int len = 0;
+	int status;
+
+	if (!tp_features.light) {
+		len += sprintf(p + len, "status:\t\tnot supported\n");
+	} else if (!tp_features.light_status) {
+		len += sprintf(p + len, "status:\t\tunknown\n");
+		len += sprintf(p + len, "commands:\ton, off\n");
+	} else {
+		status = light_get_status();
+		if (status < 0)
+			return status;
+		len += sprintf(p + len, "status:\t\t%s\n", onoff(status, 0));
+		len += sprintf(p + len, "commands:\ton, off\n");
+	}
+
+	return len;
+}
+
+static int light_write(char *buf)
+{
+	char *cmd;
+	int newstatus = 0;
+
+	if (!tp_features.light)
+		return -ENODEV;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (strlencmp(cmd, "on") == 0) {
+			newstatus = 1;
+		} else if (strlencmp(cmd, "off") == 0) {
+			newstatus = 0;
+		} else
+			return -EINVAL;
+	}
+
+	return light_set_status(newstatus);
+}
+
+static struct ibm_struct light_driver_data = {
+	.name = "light",
+	.read = light_read,
+	.write = light_write,
+	.exit = light_exit,
+};
+
+/*************************************************************************
+ * Dock subdriver
+ */
+
+#ifdef CONFIG_THINKPAD_ACPI_DOCK
+
+static void dock_notify(struct ibm_struct *ibm, u32 event);
+static int dock_read(char *p);
+static int dock_write(char *buf);
+
+TPACPI_HANDLE(dock, root, "\\_SB.GDCK",	/* X30, X31, X40 */
+	   "\\_SB.PCI0.DOCK",	/* 600e/x,770e,770x,A2xm/p,T20-22,X20-21 */
+	   "\\_SB.PCI0.PCI1.DOCK",	/* all others */
+	   "\\_SB.PCI.ISA.SLCE",	/* 570 */
+    );				/* A21e,G4x,R30,R31,R32,R40,R40e,R50e */
+
+/* don't list other alternatives as we install a notify handler on the 570 */
+TPACPI_HANDLE(pci, root, "\\_SB.PCI");	/* 570 */
+
+static const struct acpi_device_id ibm_pci_device_ids[] = {
+	{PCI_ROOT_HID_STRING, 0},
+	{"", 0},
+};
+
+static struct tp_acpi_drv_struct ibm_dock_acpidriver[2] = {
+	{
+	 .notify = dock_notify,
+	 .handle = &dock_handle,
+	 .type = ACPI_SYSTEM_NOTIFY,
+	},
+	{
+	/* THIS ONE MUST NEVER BE USED FOR DRIVER AUTOLOADING.
+	 * We just use it to get notifications of dock hotplug
+	 * in very old thinkpads */
+	 .hid = ibm_pci_device_ids,
+	 .notify = dock_notify,
+	 .handle = &pci_handle,
+	 .type = ACPI_SYSTEM_NOTIFY,
+	},
+};
+
+static struct ibm_struct dock_driver_data[2] = {
+	{
+	 .name = "dock",
+	 .read = dock_read,
+	 .write = dock_write,
+	 .acpi = &ibm_dock_acpidriver[0],
+	},
+	{
+	 .name = "dock",
+	 .acpi = &ibm_dock_acpidriver[1],
+	},
+};
+
+#define dock_docked() (_sta(dock_handle) & 1)
+
+static int __init dock_init(struct ibm_init_struct *iibm)
+{
+	vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver\n");
+
+	TPACPI_ACPIHANDLE_INIT(dock);
+
+	vdbg_printk(TPACPI_DBG_INIT, "dock is %s\n",
+		str_supported(dock_handle != NULL));
+
+	return (dock_handle)? 0 : 1;
+}
+
+static int __init dock_init2(struct ibm_init_struct *iibm)
+{
+	int dock2_needed;
+
+	vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver part 2\n");
+
+	if (dock_driver_data[0].flags.acpi_driver_registered &&
+	    dock_driver_data[0].flags.acpi_notify_installed) {
+		TPACPI_ACPIHANDLE_INIT(pci);
+		dock2_needed = (pci_handle != NULL);
+		vdbg_printk(TPACPI_DBG_INIT,
+			    "dock PCI handler for the TP 570 is %s\n",
+			    str_supported(dock2_needed));
+	} else {
+		vdbg_printk(TPACPI_DBG_INIT,
+		"dock subdriver part 2 not required\n");
+		dock2_needed = 0;
+	}
+
+	return (dock2_needed)? 0 : 1;
+}
+
+static void dock_notify(struct ibm_struct *ibm, u32 event)
+{
+	int docked = dock_docked();
+	int pci = ibm->acpi->hid && ibm->acpi->device &&
+		acpi_match_device_ids(ibm->acpi->device, ibm_pci_device_ids);
+	int data;
+
+	if (event == 1 && !pci)	/* 570 */
+		data = 1;	/* button */
+	else if (event == 1 && pci)	/* 570 */
+		data = 3;	/* dock */
+	else if (event == 3 && docked)
+		data = 1;	/* button */
+	else if (event == 3 && !docked)
+		data = 2;	/* undock */
+	else if (event == 0 && docked)
+		data = 3;	/* dock */
+	else {
+		printk(TPACPI_ERR "unknown dock event %d, status %d\n",
+		       event, _sta(dock_handle));
+		data = 0;	/* unknown */
+	}
+	acpi_bus_generate_proc_event(ibm->acpi->device, event, data);
+	acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
+					  dev_name(&ibm->acpi->device->dev),
+					  event, data);
+}
+
+static int dock_read(char *p)
+{
+	int len = 0;
+	int docked = dock_docked();
+
+	if (!dock_handle)
+		len += sprintf(p + len, "status:\t\tnot supported\n");
+	else if (!docked)
+		len += sprintf(p + len, "status:\t\tundocked\n");
+	else {
+		len += sprintf(p + len, "status:\t\tdocked\n");
+		len += sprintf(p + len, "commands:\tdock, undock\n");
+	}
+
+	return len;
+}
+
+static int dock_write(char *buf)
+{
+	char *cmd;
+
+	if (!dock_docked())
+		return -ENODEV;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (strlencmp(cmd, "undock") == 0) {
+			if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 0) ||
+			    !acpi_evalf(dock_handle, NULL, "_EJ0", "vd", 1))
+				return -EIO;
+		} else if (strlencmp(cmd, "dock") == 0) {
+			if (!acpi_evalf(dock_handle, NULL, "_DCK", "vd", 1))
+				return -EIO;
+		} else
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+#endif /* CONFIG_THINKPAD_ACPI_DOCK */
+
+/*************************************************************************
+ * Bay subdriver
+ */
+
+#ifdef CONFIG_THINKPAD_ACPI_BAY
+
+TPACPI_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST",	/* 570 */
+	   "\\_SB.PCI0.IDE0.IDES.IDSM",	/* 600e/x, 770e, 770x */
+	   "\\_SB.PCI0.SATA.SCND.MSTR",	/* T60, X60, Z60 */
+	   "\\_SB.PCI0.IDE0.SCND.MSTR",	/* all others */
+	   );				/* A21e, R30, R31 */
+TPACPI_HANDLE(bay_ej, bay, "_EJ3",	/* 600e/x, A2xm/p, A3x */
+	   "_EJ0",		/* all others */
+	   );			/* 570,A21e,G4x,R30,R31,R32,R40e,R50e */
+TPACPI_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV",	/* A3x, R32 */
+	   "\\_SB.PCI0.IDE0.IDEP.IDPS",	/* 600e/x, 770e, 770x */
+	   );				/* all others */
+TPACPI_HANDLE(bay2_ej, bay2, "_EJ3",	/* 600e/x, 770e, A3x */
+	   "_EJ0",			/* 770x */
+	   );				/* all others */
+
+static int __init bay_init(struct ibm_init_struct *iibm)
+{
+	vdbg_printk(TPACPI_DBG_INIT, "initializing bay subdriver\n");
+
+	TPACPI_ACPIHANDLE_INIT(bay);
+	if (bay_handle)
+		TPACPI_ACPIHANDLE_INIT(bay_ej);
+	TPACPI_ACPIHANDLE_INIT(bay2);
+	if (bay2_handle)
+		TPACPI_ACPIHANDLE_INIT(bay2_ej);
+
+	tp_features.bay_status = bay_handle &&
+		acpi_evalf(bay_handle, NULL, "_STA", "qv");
+	tp_features.bay_status2 = bay2_handle &&
+		acpi_evalf(bay2_handle, NULL, "_STA", "qv");
+
+	tp_features.bay_eject = bay_handle && bay_ej_handle &&
+		(strlencmp(bay_ej_path, "_EJ0") == 0 || experimental);
+	tp_features.bay_eject2 = bay2_handle && bay2_ej_handle &&
+		(strlencmp(bay2_ej_path, "_EJ0") == 0 || experimental);
+
+	vdbg_printk(TPACPI_DBG_INIT,
+		"bay 1: status %s, eject %s; bay 2: status %s, eject %s\n",
+		str_supported(tp_features.bay_status),
+		str_supported(tp_features.bay_eject),
+		str_supported(tp_features.bay_status2),
+		str_supported(tp_features.bay_eject2));
+
+	return (tp_features.bay_status || tp_features.bay_eject ||
+		tp_features.bay_status2 || tp_features.bay_eject2)? 0 : 1;
+}
+
+static void bay_notify(struct ibm_struct *ibm, u32 event)
+{
+	acpi_bus_generate_proc_event(ibm->acpi->device, event, 0);
+	acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
+					  dev_name(&ibm->acpi->device->dev),
+					  event, 0);
+}
+
+#define bay_occupied(b) (_sta(b##_handle) & 1)
+
+static int bay_read(char *p)
+{
+	int len = 0;
+	int occupied = bay_occupied(bay);
+	int occupied2 = bay_occupied(bay2);
+	int eject, eject2;
+
+	len += sprintf(p + len, "status:\t\t%s\n",
+		tp_features.bay_status ?
+			(occupied ? "occupied" : "unoccupied") :
+				"not supported");
+	if (tp_features.bay_status2)
+		len += sprintf(p + len, "status2:\t%s\n", occupied2 ?
+			       "occupied" : "unoccupied");
+
+	eject = tp_features.bay_eject && occupied;
+	eject2 = tp_features.bay_eject2 && occupied2;
+
+	if (eject && eject2)
+		len += sprintf(p + len, "commands:\teject, eject2\n");
+	else if (eject)
+		len += sprintf(p + len, "commands:\teject\n");
+	else if (eject2)
+		len += sprintf(p + len, "commands:\teject2\n");
+
+	return len;
+}
+
+static int bay_write(char *buf)
+{
+	char *cmd;
+
+	if (!tp_features.bay_eject && !tp_features.bay_eject2)
+		return -ENODEV;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (tp_features.bay_eject && strlencmp(cmd, "eject") == 0) {
+			if (!acpi_evalf(bay_ej_handle, NULL, NULL, "vd", 1))
+				return -EIO;
+		} else if (tp_features.bay_eject2 &&
+			   strlencmp(cmd, "eject2") == 0) {
+			if (!acpi_evalf(bay2_ej_handle, NULL, NULL, "vd", 1))
+				return -EIO;
+		} else
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct tp_acpi_drv_struct ibm_bay_acpidriver = {
+	.notify = bay_notify,
+	.handle = &bay_handle,
+	.type = ACPI_SYSTEM_NOTIFY,
+};
+
+static struct ibm_struct bay_driver_data = {
+	.name = "bay",
+	.read = bay_read,
+	.write = bay_write,
+	.acpi = &ibm_bay_acpidriver,
+};
+
+#endif /* CONFIG_THINKPAD_ACPI_BAY */
+
+/*************************************************************************
+ * CMOS subdriver
+ */
+
+/* sysfs cmos_command -------------------------------------------------- */
+static ssize_t cmos_command_store(struct device *dev,
+			    struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	unsigned long cmos_cmd;
+	int res;
+
+	if (parse_strtoul(buf, 21, &cmos_cmd))
+		return -EINVAL;
+
+	res = issue_thinkpad_cmos_command(cmos_cmd);
+	return (res)? res : count;
+}
+
+static struct device_attribute dev_attr_cmos_command =
+	__ATTR(cmos_command, S_IWUSR, NULL, cmos_command_store);
+
+/* --------------------------------------------------------------------- */
+
+static int __init cmos_init(struct ibm_init_struct *iibm)
+{
+	int res;
+
+	vdbg_printk(TPACPI_DBG_INIT,
+		"initializing cmos commands subdriver\n");
+
+	TPACPI_ACPIHANDLE_INIT(cmos);
+
+	vdbg_printk(TPACPI_DBG_INIT, "cmos commands are %s\n",
+		str_supported(cmos_handle != NULL));
+
+	res = device_create_file(&tpacpi_pdev->dev, &dev_attr_cmos_command);
+	if (res)
+		return res;
+
+	return (cmos_handle)? 0 : 1;
+}
+
+static void cmos_exit(void)
+{
+	device_remove_file(&tpacpi_pdev->dev, &dev_attr_cmos_command);
+}
+
+static int cmos_read(char *p)
+{
+	int len = 0;
+
+	/* cmos not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
+	   R30, R31, T20-22, X20-21 */
+	if (!cmos_handle)
+		len += sprintf(p + len, "status:\t\tnot supported\n");
+	else {
+		len += sprintf(p + len, "status:\t\tsupported\n");
+		len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-21)\n");
+	}
+
+	return len;
+}
+
+static int cmos_write(char *buf)
+{
+	char *cmd;
+	int cmos_cmd, res;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (sscanf(cmd, "%u", &cmos_cmd) == 1 &&
+		    cmos_cmd >= 0 && cmos_cmd <= 21) {
+			/* cmos_cmd set */
+		} else
+			return -EINVAL;
+
+		res = issue_thinkpad_cmos_command(cmos_cmd);
+		if (res)
+			return res;
+	}
+
+	return 0;
+}
+
+static struct ibm_struct cmos_driver_data = {
+	.name = "cmos",
+	.read = cmos_read,
+	.write = cmos_write,
+	.exit = cmos_exit,
+};
+
+/*************************************************************************
+ * LED subdriver
+ */
+
+enum led_access_mode {
+	TPACPI_LED_NONE = 0,
+	TPACPI_LED_570,	/* 570 */
+	TPACPI_LED_OLD,	/* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
+	TPACPI_LED_NEW,	/* all others */
+};
+
+enum {	/* For TPACPI_LED_OLD */
+	TPACPI_LED_EC_HLCL = 0x0c,	/* EC reg to get led to power on */
+	TPACPI_LED_EC_HLBL = 0x0d,	/* EC reg to blink a lit led */
+	TPACPI_LED_EC_HLMS = 0x0e,	/* EC reg to select led to command */
+};
+
+enum led_status_t {
+	TPACPI_LED_OFF = 0,
+	TPACPI_LED_ON,
+	TPACPI_LED_BLINK,
+};
+
+static enum led_access_mode led_supported;
+
+TPACPI_HANDLE(led, ec, "SLED",	/* 570 */
+	   "SYSL",		/* 600e/x, 770e, 770x, A21e, A2xm/p, */
+				/* T20-22, X20-21 */
+	   "LED",		/* all others */
+	   );			/* R30, R31 */
+
+#define TPACPI_LED_NUMLEDS 8
+static struct tpacpi_led_classdev *tpacpi_leds;
+static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS];
+static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
+	/* there's a limit of 19 chars + NULL before 2.6.26 */
+	"tpacpi::power",
+	"tpacpi:orange:batt",
+	"tpacpi:green:batt",
+	"tpacpi::dock_active",
+	"tpacpi::bay_active",
+	"tpacpi::dock_batt",
+	"tpacpi::unknown_led",
+	"tpacpi::standby",
+};
+
+static int led_get_status(const unsigned int led)
+{
+	int status;
+	enum led_status_t led_s;
+
+	switch (led_supported) {
+	case TPACPI_LED_570:
+		if (!acpi_evalf(ec_handle,
+				&status, "GLED", "dd", 1 << led))
+			return -EIO;
+		led_s = (status == 0)?
+				TPACPI_LED_OFF :
+				((status == 1)?
+					TPACPI_LED_ON :
+					TPACPI_LED_BLINK);
+		tpacpi_led_state_cache[led] = led_s;
+		return led_s;
+	default:
+		return -ENXIO;
+	}
+
+	/* not reached */
+}
+
+static int led_set_status(const unsigned int led,
+			  const enum led_status_t ledstatus)
+{
+	/* off, on, blink. Index is led_status_t */
+	static const unsigned int led_sled_arg1[] = { 0, 1, 3 };
+	static const unsigned int led_led_arg1[] = { 0, 0x80, 0xc0 };
+
+	int rc = 0;
+
+	switch (led_supported) {
+	case TPACPI_LED_570:
+		/* 570 */
+		if (led > 7)
+			return -EINVAL;
+		if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
+				(1 << led), led_sled_arg1[ledstatus]))
+			rc = -EIO;
+		break;
+	case TPACPI_LED_OLD:
+		/* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */
+		if (led > 7)
+			return -EINVAL;
+		rc = ec_write(TPACPI_LED_EC_HLMS, (1 << led));
+		if (rc >= 0)
+			rc = ec_write(TPACPI_LED_EC_HLBL,
+				      (ledstatus == TPACPI_LED_BLINK) << led);
+		if (rc >= 0)
+			rc = ec_write(TPACPI_LED_EC_HLCL,
+				      (ledstatus != TPACPI_LED_OFF) << led);
+		break;
+	case TPACPI_LED_NEW:
+		/* all others */
+		if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
+				led, led_led_arg1[ledstatus]))
+			rc = -EIO;
+		break;
+	default:
+		rc = -ENXIO;
+	}
+
+	if (!rc)
+		tpacpi_led_state_cache[led] = ledstatus;
+
+	return rc;
+}
+
+static void led_sysfs_set_status(unsigned int led,
+				 enum led_brightness brightness)
+{
+	led_set_status(led,
+			(brightness == LED_OFF) ?
+			TPACPI_LED_OFF :
+			(tpacpi_led_state_cache[led] == TPACPI_LED_BLINK) ?
+				TPACPI_LED_BLINK : TPACPI_LED_ON);
+}
+
+static void led_set_status_worker(struct work_struct *work)
+{
+	struct tpacpi_led_classdev *data =
+		container_of(work, struct tpacpi_led_classdev, work);
+
+	if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
+		led_sysfs_set_status(data->led, data->new_brightness);
+}
+
+static void led_sysfs_set(struct led_classdev *led_cdev,
+			enum led_brightness brightness)
+{
+	struct tpacpi_led_classdev *data = container_of(led_cdev,
+			     struct tpacpi_led_classdev, led_classdev);
+
+	data->new_brightness = brightness;
+	queue_work(tpacpi_wq, &data->work);
+}
+
+static int led_sysfs_blink_set(struct led_classdev *led_cdev,
+			unsigned long *delay_on, unsigned long *delay_off)
+{
+	struct tpacpi_led_classdev *data = container_of(led_cdev,
+			     struct tpacpi_led_classdev, led_classdev);
+
+	/* Can we choose the flash rate? */
+	if (*delay_on == 0 && *delay_off == 0) {
+		/* yes. set them to the hardware blink rate (1 Hz) */
+		*delay_on = 500; /* ms */
+		*delay_off = 500; /* ms */
+	} else if ((*delay_on != 500) || (*delay_off != 500))
+		return -EINVAL;
+
+	data->new_brightness = TPACPI_LED_BLINK;
+	queue_work(tpacpi_wq, &data->work);
+
+	return 0;
+}
+
+static enum led_brightness led_sysfs_get(struct led_classdev *led_cdev)
+{
+	int rc;
+
+	struct tpacpi_led_classdev *data = container_of(led_cdev,
+			     struct tpacpi_led_classdev, led_classdev);
+
+	rc = led_get_status(data->led);
+
+	if (rc == TPACPI_LED_OFF || rc < 0)
+		rc = LED_OFF;	/* no error handling in led class :( */
+	else
+		rc = LED_FULL;
+
+	return rc;
+}
+
+static void led_exit(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
+		if (tpacpi_leds[i].led_classdev.name)
+			led_classdev_unregister(&tpacpi_leds[i].led_classdev);
+	}
+
+	kfree(tpacpi_leds);
+}
+
+static int __init led_init(struct ibm_init_struct *iibm)
+{
+	unsigned int i;
+	int rc;
+
+	vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n");
+
+	TPACPI_ACPIHANDLE_INIT(led);
+
+	if (!led_handle)
+		/* led not supported on R30, R31 */
+		led_supported = TPACPI_LED_NONE;
+	else if (strlencmp(led_path, "SLED") == 0)
+		/* 570 */
+		led_supported = TPACPI_LED_570;
+	else if (strlencmp(led_path, "SYSL") == 0)
+		/* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
+		led_supported = TPACPI_LED_OLD;
+	else
+		/* all others */
+		led_supported = TPACPI_LED_NEW;
+
+	vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
+		str_supported(led_supported), led_supported);
+
+	tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
+			      GFP_KERNEL);
+	if (!tpacpi_leds) {
+		printk(TPACPI_ERR "Out of memory for LED data\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
+		tpacpi_leds[i].led = i;
+
+		tpacpi_leds[i].led_classdev.brightness_set = &led_sysfs_set;
+		tpacpi_leds[i].led_classdev.blink_set = &led_sysfs_blink_set;
+		if (led_supported == TPACPI_LED_570)
+			tpacpi_leds[i].led_classdev.brightness_get =
+							&led_sysfs_get;
+
+		tpacpi_leds[i].led_classdev.name = tpacpi_led_names[i];
+
+		INIT_WORK(&tpacpi_leds[i].work, led_set_status_worker);
+
+		rc = led_classdev_register(&tpacpi_pdev->dev,
+					   &tpacpi_leds[i].led_classdev);
+		if (rc < 0) {
+			tpacpi_leds[i].led_classdev.name = NULL;
+			led_exit();
+			return rc;
+		}
+	}
+
+	return (led_supported != TPACPI_LED_NONE)? 0 : 1;
+}
+
+#define str_led_status(s) \
+	((s) == TPACPI_LED_OFF ? "off" : \
+		((s) == TPACPI_LED_ON ? "on" : "blinking"))
+
+static int led_read(char *p)
+{
+	int len = 0;
+
+	if (!led_supported) {
+		len += sprintf(p + len, "status:\t\tnot supported\n");
+		return len;
+	}
+	len += sprintf(p + len, "status:\t\tsupported\n");
+
+	if (led_supported == TPACPI_LED_570) {
+		/* 570 */
+		int i, status;
+		for (i = 0; i < 8; i++) {
+			status = led_get_status(i);
+			if (status < 0)
+				return -EIO;
+			len += sprintf(p + len, "%d:\t\t%s\n",
+				       i, str_led_status(status));
+		}
+	}
+
+	len += sprintf(p + len, "commands:\t"
+		       "<led> on, <led> off, <led> blink (<led> is 0-7)\n");
+
+	return len;
+}
+
+static int led_write(char *buf)
+{
+	char *cmd;
+	int led, rc;
+	enum led_status_t s;
+
+	if (!led_supported)
+		return -ENODEV;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 7)
+			return -EINVAL;
+
+		if (strstr(cmd, "off")) {
+			s = TPACPI_LED_OFF;
+		} else if (strstr(cmd, "on")) {
+			s = TPACPI_LED_ON;
+		} else if (strstr(cmd, "blink")) {
+			s = TPACPI_LED_BLINK;
+		} else {
+			return -EINVAL;
+		}
+
+		rc = led_set_status(led, s);
+		if (rc < 0)
+			return rc;
+	}
+
+	return 0;
+}
+
+static struct ibm_struct led_driver_data = {
+	.name = "led",
+	.read = led_read,
+	.write = led_write,
+	.exit = led_exit,
+};
+
+/*************************************************************************
+ * Beep subdriver
+ */
+
+TPACPI_HANDLE(beep, ec, "BEEP");	/* all except R30, R31 */
+
+static int __init beep_init(struct ibm_init_struct *iibm)
+{
+	vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n");
+
+	TPACPI_ACPIHANDLE_INIT(beep);
+
+	vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n",
+		str_supported(beep_handle != NULL));
+
+	return (beep_handle)? 0 : 1;
+}
+
+static int beep_read(char *p)
+{
+	int len = 0;
+
+	if (!beep_handle)
+		len += sprintf(p + len, "status:\t\tnot supported\n");
+	else {
+		len += sprintf(p + len, "status:\t\tsupported\n");
+		len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-17)\n");
+	}
+
+	return len;
+}
+
+static int beep_write(char *buf)
+{
+	char *cmd;
+	int beep_cmd;
+
+	if (!beep_handle)
+		return -ENODEV;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (sscanf(cmd, "%u", &beep_cmd) == 1 &&
+		    beep_cmd >= 0 && beep_cmd <= 17) {
+			/* beep_cmd set */
+		} else
+			return -EINVAL;
+		if (!acpi_evalf(beep_handle, NULL, NULL, "vdd", beep_cmd, 0))
+			return -EIO;
+	}
+
+	return 0;
+}
+
+static struct ibm_struct beep_driver_data = {
+	.name = "beep",
+	.read = beep_read,
+	.write = beep_write,
+};
+
+/*************************************************************************
+ * Thermal subdriver
+ */
+
+enum thermal_access_mode {
+	TPACPI_THERMAL_NONE = 0,	/* No thermal support */
+	TPACPI_THERMAL_ACPI_TMP07,	/* Use ACPI TMP0-7 */
+	TPACPI_THERMAL_ACPI_UPDT,	/* Use ACPI TMP0-7 with UPDT */
+	TPACPI_THERMAL_TPEC_8,		/* Use ACPI EC regs, 8 sensors */
+	TPACPI_THERMAL_TPEC_16,		/* Use ACPI EC regs, 16 sensors */
+};
+
+enum { /* TPACPI_THERMAL_TPEC_* */
+	TP_EC_THERMAL_TMP0 = 0x78,	/* ACPI EC regs TMP 0..7 */
+	TP_EC_THERMAL_TMP8 = 0xC0,	/* ACPI EC regs TMP 8..15 */
+	TP_EC_THERMAL_TMP_NA = -128,	/* ACPI EC sensor not available */
+};
+
+#define TPACPI_MAX_THERMAL_SENSORS 16	/* Max thermal sensors supported */
+struct ibm_thermal_sensors_struct {
+	s32 temp[TPACPI_MAX_THERMAL_SENSORS];
+};
+
+static enum thermal_access_mode thermal_read_mode;
+
+/* idx is zero-based */
+static int thermal_get_sensor(int idx, s32 *value)
+{
+	int t;
+	s8 tmp;
+	char tmpi[5];
+
+	t = TP_EC_THERMAL_TMP0;
+
+	switch (thermal_read_mode) {
+#if TPACPI_MAX_THERMAL_SENSORS >= 16
+	case TPACPI_THERMAL_TPEC_16:
+		if (idx >= 8 && idx <= 15) {
+			t = TP_EC_THERMAL_TMP8;
+			idx -= 8;
+		}
+		/* fallthrough */
+#endif
+	case TPACPI_THERMAL_TPEC_8:
+		if (idx <= 7) {
+			if (!acpi_ec_read(t + idx, &tmp))
+				return -EIO;
+			*value = tmp * 1000;
+			return 0;
+		}
+		break;
+
+	case TPACPI_THERMAL_ACPI_UPDT:
+		if (idx <= 7) {
+			snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
+			if (!acpi_evalf(ec_handle, NULL, "UPDT", "v"))
+				return -EIO;
+			if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
+				return -EIO;
+			*value = (t - 2732) * 100;
+			return 0;
+		}
+		break;
+
+	case TPACPI_THERMAL_ACPI_TMP07:
+		if (idx <= 7) {
+			snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
+			if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
+				return -EIO;
+			if (t > 127 || t < -127)
+				t = TP_EC_THERMAL_TMP_NA;
+			*value = t * 1000;
+			return 0;
+		}
+		break;
+
+	case TPACPI_THERMAL_NONE:
+	default:
+		return -ENOSYS;
+	}
+
+	return -EINVAL;
+}
+
+static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
+{
+	int res, i;
+	int n;
+
+	n = 8;
+	i = 0;
+
+	if (!s)
+		return -EINVAL;
+
+	if (thermal_read_mode == TPACPI_THERMAL_TPEC_16)
+		n = 16;
+
+	for (i = 0 ; i < n; i++) {
+		res = thermal_get_sensor(i, &s->temp[i]);
+		if (res)
+			return res;
+	}
+
+	return n;
+}
+
+/* sysfs temp##_input -------------------------------------------------- */
+
+static ssize_t thermal_temp_input_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct sensor_device_attribute *sensor_attr =
+					to_sensor_dev_attr(attr);
+	int idx = sensor_attr->index;
+	s32 value;
+	int res;
+
+	res = thermal_get_sensor(idx, &value);
+	if (res)
+		return res;
+	if (value == TP_EC_THERMAL_TMP_NA * 1000)
+		return -ENXIO;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+#define THERMAL_SENSOR_ATTR_TEMP(_idxA, _idxB) \
+	 SENSOR_ATTR(temp##_idxA##_input, S_IRUGO, \
+		     thermal_temp_input_show, NULL, _idxB)
+
+static struct sensor_device_attribute sensor_dev_attr_thermal_temp_input[] = {
+	THERMAL_SENSOR_ATTR_TEMP(1, 0),
+	THERMAL_SENSOR_ATTR_TEMP(2, 1),
+	THERMAL_SENSOR_ATTR_TEMP(3, 2),
+	THERMAL_SENSOR_ATTR_TEMP(4, 3),
+	THERMAL_SENSOR_ATTR_TEMP(5, 4),
+	THERMAL_SENSOR_ATTR_TEMP(6, 5),
+	THERMAL_SENSOR_ATTR_TEMP(7, 6),
+	THERMAL_SENSOR_ATTR_TEMP(8, 7),
+	THERMAL_SENSOR_ATTR_TEMP(9, 8),
+	THERMAL_SENSOR_ATTR_TEMP(10, 9),
+	THERMAL_SENSOR_ATTR_TEMP(11, 10),
+	THERMAL_SENSOR_ATTR_TEMP(12, 11),
+	THERMAL_SENSOR_ATTR_TEMP(13, 12),
+	THERMAL_SENSOR_ATTR_TEMP(14, 13),
+	THERMAL_SENSOR_ATTR_TEMP(15, 14),
+	THERMAL_SENSOR_ATTR_TEMP(16, 15),
+};
+
+#define THERMAL_ATTRS(X) \
+	&sensor_dev_attr_thermal_temp_input[X].dev_attr.attr
+
+static struct attribute *thermal_temp_input_attr[] = {
+	THERMAL_ATTRS(8),
+	THERMAL_ATTRS(9),
+	THERMAL_ATTRS(10),
+	THERMAL_ATTRS(11),
+	THERMAL_ATTRS(12),
+	THERMAL_ATTRS(13),
+	THERMAL_ATTRS(14),
+	THERMAL_ATTRS(15),
+	THERMAL_ATTRS(0),
+	THERMAL_ATTRS(1),
+	THERMAL_ATTRS(2),
+	THERMAL_ATTRS(3),
+	THERMAL_ATTRS(4),
+	THERMAL_ATTRS(5),
+	THERMAL_ATTRS(6),
+	THERMAL_ATTRS(7),
+	NULL
+};
+
+static const struct attribute_group thermal_temp_input16_group = {
+	.attrs = thermal_temp_input_attr
+};
+
+static const struct attribute_group thermal_temp_input8_group = {
+	.attrs = &thermal_temp_input_attr[8]
+};
+
+#undef THERMAL_SENSOR_ATTR_TEMP
+#undef THERMAL_ATTRS
+
+/* --------------------------------------------------------------------- */
+
+static int __init thermal_init(struct ibm_init_struct *iibm)
+{
+	u8 t, ta1, ta2;
+	int i;
+	int acpi_tmp7;
+	int res;
+
+	vdbg_printk(TPACPI_DBG_INIT, "initializing thermal subdriver\n");
+
+	acpi_tmp7 = acpi_evalf(ec_handle, NULL, "TMP7", "qv");
+
+	if (thinkpad_id.ec_model) {
+		/*
+		 * Direct EC access mode: sensors at registers
+		 * 0x78-0x7F, 0xC0-0xC7.  Registers return 0x00 for
+		 * non-implemented, thermal sensors return 0x80 when
+		 * not available
+		 */
+
+		ta1 = ta2 = 0;
+		for (i = 0; i < 8; i++) {
+			if (acpi_ec_read(TP_EC_THERMAL_TMP0 + i, &t)) {
+				ta1 |= t;
+			} else {
+				ta1 = 0;
+				break;
+			}
+			if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
+				ta2 |= t;
+			} else {
+				ta1 = 0;
+				break;
+			}
+		}
+		if (ta1 == 0) {
+			/* This is sheer paranoia, but we handle it anyway */
+			if (acpi_tmp7) {
+				printk(TPACPI_ERR
+				       "ThinkPad ACPI EC access misbehaving, "
+				       "falling back to ACPI TMPx access "
+				       "mode\n");
+				thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
+			} else {
+				printk(TPACPI_ERR
+				       "ThinkPad ACPI EC access misbehaving, "
+				       "disabling thermal sensors access\n");
+				thermal_read_mode = TPACPI_THERMAL_NONE;
+			}
+		} else {
+			thermal_read_mode =
+			    (ta2 != 0) ?
+			    TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+		}
+	} else if (acpi_tmp7) {
+		if (acpi_evalf(ec_handle, NULL, "UPDT", "qv")) {
+			/* 600e/x, 770e, 770x */
+			thermal_read_mode = TPACPI_THERMAL_ACPI_UPDT;
+		} else {
+			/* Standard ACPI TMPx access, max 8 sensors */
+			thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
+		}
+	} else {
+		/* temperatures not supported on 570, G4x, R30, R31, R32 */
+		thermal_read_mode = TPACPI_THERMAL_NONE;
+	}
+
+	vdbg_printk(TPACPI_DBG_INIT, "thermal is %s, mode %d\n",
+		str_supported(thermal_read_mode != TPACPI_THERMAL_NONE),
+		thermal_read_mode);
+
+	switch (thermal_read_mode) {
+	case TPACPI_THERMAL_TPEC_16:
+		res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
+				&thermal_temp_input16_group);
+		if (res)
+			return res;
+		break;
+	case TPACPI_THERMAL_TPEC_8:
+	case TPACPI_THERMAL_ACPI_TMP07:
+	case TPACPI_THERMAL_ACPI_UPDT:
+		res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
+				&thermal_temp_input8_group);
+		if (res)
+			return res;
+		break;
+	case TPACPI_THERMAL_NONE:
+	default:
+		return 1;
+	}
+
+	return 0;
+}
+
+static void thermal_exit(void)
+{
+	switch (thermal_read_mode) {
+	case TPACPI_THERMAL_TPEC_16:
+		sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
+				   &thermal_temp_input16_group);
+		break;
+	case TPACPI_THERMAL_TPEC_8:
+	case TPACPI_THERMAL_ACPI_TMP07:
+	case TPACPI_THERMAL_ACPI_UPDT:
+		sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
+				   &thermal_temp_input16_group);
+		break;
+	case TPACPI_THERMAL_NONE:
+	default:
+		break;
+	}
+}
+
+static int thermal_read(char *p)
+{
+	int len = 0;
+	int n, i;
+	struct ibm_thermal_sensors_struct t;
+
+	n = thermal_get_sensors(&t);
+	if (unlikely(n < 0))
+		return n;
+
+	len += sprintf(p + len, "temperatures:\t");
+
+	if (n > 0) {
+		for (i = 0; i < (n - 1); i++)
+			len += sprintf(p + len, "%d ", t.temp[i] / 1000);
+		len += sprintf(p + len, "%d\n", t.temp[i] / 1000);
+	} else
+		len += sprintf(p + len, "not supported\n");
+
+	return len;
+}
+
+static struct ibm_struct thermal_driver_data = {
+	.name = "thermal",
+	.read = thermal_read,
+	.exit = thermal_exit,
+};
+
+/*************************************************************************
+ * EC Dump subdriver
+ */
+
+static u8 ecdump_regs[256];
+
+static int ecdump_read(char *p)
+{
+	int len = 0;
+	int i, j;
+	u8 v;
+
+	len += sprintf(p + len, "EC      "
+		       " +00 +01 +02 +03 +04 +05 +06 +07"
+		       " +08 +09 +0a +0b +0c +0d +0e +0f\n");
+	for (i = 0; i < 256; i += 16) {
+		len += sprintf(p + len, "EC 0x%02x:", i);
+		for (j = 0; j < 16; j++) {
+			if (!acpi_ec_read(i + j, &v))
+				break;
+			if (v != ecdump_regs[i + j])
+				len += sprintf(p + len, " *%02x", v);
+			else
+				len += sprintf(p + len, "  %02x", v);
+			ecdump_regs[i + j] = v;
+		}
+		len += sprintf(p + len, "\n");
+		if (j != 16)
+			break;
+	}
+
+	/* These are way too dangerous to advertise openly... */
+#if 0
+	len += sprintf(p + len, "commands:\t0x<offset> 0x<value>"
+		       " (<offset> is 00-ff, <value> is 00-ff)\n");
+	len += sprintf(p + len, "commands:\t0x<offset> <value>  "
+		       " (<offset> is 00-ff, <value> is 0-255)\n");
+#endif
+	return len;
+}
+
+static int ecdump_write(char *buf)
+{
+	char *cmd;
+	int i, v;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (sscanf(cmd, "0x%x 0x%x", &i, &v) == 2) {
+			/* i and v set */
+		} else if (sscanf(cmd, "0x%x %u", &i, &v) == 2) {
+			/* i and v set */
+		} else
+			return -EINVAL;
+		if (i >= 0 && i < 256 && v >= 0 && v < 256) {
+			if (!acpi_ec_write(i, v))
+				return -EIO;
+		} else
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct ibm_struct ecdump_driver_data = {
+	.name = "ecdump",
+	.read = ecdump_read,
+	.write = ecdump_write,
+	.flags.experimental = 1,
+};
+
+/*************************************************************************
+ * Backlight/brightness subdriver
+ */
+
+#define TPACPI_BACKLIGHT_DEV_NAME "thinkpad_screen"
+
+enum {
+	TP_EC_BACKLIGHT = 0x31,
+
+	/* TP_EC_BACKLIGHT bitmasks */
+	TP_EC_BACKLIGHT_LVLMSK = 0x1F,
+	TP_EC_BACKLIGHT_CMDMSK = 0xE0,
+	TP_EC_BACKLIGHT_MAPSW = 0x20,
+};
+
+static struct backlight_device *ibm_backlight_device;
+static int brightness_mode;
+static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */
+
+static struct mutex brightness_mutex;
+
+/*
+ * ThinkPads can read brightness from two places: EC 0x31, or
+ * CMOS NVRAM byte 0x5E, bits 0-3.
+ *
+ * EC 0x31 has the following layout
+ *   Bit 7: unknown function
+ *   Bit 6: unknown function
+ *   Bit 5: Z: honour scale changes, NZ: ignore scale changes
+ *   Bit 4: must be set to zero to avoid problems
+ *   Bit 3-0: backlight brightness level
+ *
+ * brightness_get_raw returns status data in the EC 0x31 layout
+ */
+static int brightness_get_raw(int *status)
+{
+	u8 lec = 0, lcmos = 0, level = 0;
+
+	if (brightness_mode & 1) {
+		if (!acpi_ec_read(TP_EC_BACKLIGHT, &lec))
+			return -EIO;
+		level = lec & TP_EC_BACKLIGHT_LVLMSK;
+	};
+	if (brightness_mode & 2) {
+		lcmos = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
+			 & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
+			>> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
+		lcmos &= (tp_features.bright_16levels)? 0x0f : 0x07;
+		level = lcmos;
+	}
+
+	if (brightness_mode == 3) {
+		*status = lec;	/* Prefer EC, CMOS is just a backing store */
+		lec &= TP_EC_BACKLIGHT_LVLMSK;
+		if (lec == lcmos)
+			tp_warned.bright_cmos_ec_unsync = 0;
+		else {
+			if (!tp_warned.bright_cmos_ec_unsync) {
+				printk(TPACPI_ERR
+					"CMOS NVRAM (%u) and EC (%u) do not "
+					"agree on display brightness level\n",
+					(unsigned int) lcmos,
+					(unsigned int) lec);
+				tp_warned.bright_cmos_ec_unsync = 1;
+			}
+			return -EIO;
+		}
+	} else {
+		*status = level;
+	}
+
+	return 0;
+}
+
+/* May return EINTR which can always be mapped to ERESTARTSYS */
+static int brightness_set(int value)
+{
+	int cmos_cmd, inc, i, res;
+	int current_value;
+	int command_bits;
+
+	if (value > ((tp_features.bright_16levels)? 15 : 7) ||
+	    value < 0)
+		return -EINVAL;
+
+	res = mutex_lock_killable(&brightness_mutex);
+	if (res < 0)
+		return res;
+
+	res = brightness_get_raw(&current_value);
+	if (res < 0)
+		goto errout;
+
+	command_bits = current_value & TP_EC_BACKLIGHT_CMDMSK;
+	current_value &= TP_EC_BACKLIGHT_LVLMSK;
+
+	cmos_cmd = value > current_value ?
+			TP_CMOS_BRIGHTNESS_UP :
+			TP_CMOS_BRIGHTNESS_DOWN;
+	inc = (value > current_value)? 1 : -1;
+
+	res = 0;
+	for (i = current_value; i != value; i += inc) {
+		if ((brightness_mode & 2) &&
+		    issue_thinkpad_cmos_command(cmos_cmd)) {
+			res = -EIO;
+			goto errout;
+		}
+		if ((brightness_mode & 1) &&
+		    !acpi_ec_write(TP_EC_BACKLIGHT,
+				   (i + inc) | command_bits)) {
+			res = -EIO;
+			goto errout;;
+		}
+	}
+
+errout:
+	mutex_unlock(&brightness_mutex);
+	return res;
+}
+
+/* sysfs backlight class ----------------------------------------------- */
+
+static int brightness_update_status(struct backlight_device *bd)
+{
+	/* it is the backlight class's job (caller) to handle
+	 * EINTR and other errors properly */
+	return brightness_set(
+		(bd->props.fb_blank == FB_BLANK_UNBLANK &&
+		 bd->props.power == FB_BLANK_UNBLANK) ?
+				bd->props.brightness : 0);
+}
+
+static int brightness_get(struct backlight_device *bd)
+{
+	int status, res;
+
+	res = brightness_get_raw(&status);
+	if (res < 0)
+		return 0; /* FIXME: teach backlight about error handling */
+
+	return status & TP_EC_BACKLIGHT_LVLMSK;
+}
+
+static struct backlight_ops ibm_backlight_data = {
+	.get_brightness = brightness_get,
+	.update_status  = brightness_update_status,
+};
+
+/* --------------------------------------------------------------------- */
+
+static int __init brightness_init(struct ibm_init_struct *iibm)
+{
+	int b;
+
+	vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n");
+
+	mutex_init(&brightness_mutex);
+
+	/*
+	 * We always attempt to detect acpi support, so as to switch
+	 * Lenovo Vista BIOS to ACPI brightness mode even if we are not
+	 * going to publish a backlight interface
+	 */
+	b = tpacpi_check_std_acpi_brightness_support();
+	if (b > 0) {
+
+		if (acpi_video_backlight_support()) {
+			if (brightness_enable > 1) {
+				printk(TPACPI_NOTICE
+				       "Standard ACPI backlight interface "
+				       "available, not loading native one.\n");
+				return 1;
+			} else if (brightness_enable == 1) {
+				printk(TPACPI_NOTICE
+				       "Backlight control force enabled, even if standard "
+				       "ACPI backlight interface is available\n");
+			}
+		} else {
+			if (brightness_enable > 1) {
+				printk(TPACPI_NOTICE
+				       "Standard ACPI backlight interface not "
+				       "available, thinkpad_acpi native "
+				       "brightness control enabled\n");
+			}
+		}
+	}
+
+	if (!brightness_enable) {
+		dbg_printk(TPACPI_DBG_INIT,
+			   "brightness support disabled by "
+			   "module parameter\n");
+		return 1;
+	}
+
+	if (b > 16) {
+		printk(TPACPI_ERR
+		       "Unsupported brightness interface, "
+		       "please contact %s\n", TPACPI_MAIL);
+		return 1;
+	}
+	if (b == 16)
+		tp_features.bright_16levels = 1;
+
+	if (!brightness_mode) {
+		if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO)
+			brightness_mode = 2;
+		else
+			brightness_mode = 3;
+
+		dbg_printk(TPACPI_DBG_INIT, "selected brightness_mode=%d\n",
+			brightness_mode);
+	}
+
+	if (brightness_mode > 3)
+		return -EINVAL;
+
+	if (brightness_get_raw(&b) < 0)
+		return 1;
+
+	if (tp_features.bright_16levels)
+		printk(TPACPI_INFO
+		       "detected a 16-level brightness capable ThinkPad\n");
+
+	ibm_backlight_device = backlight_device_register(
+					TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL,
+					&ibm_backlight_data);
+	if (IS_ERR(ibm_backlight_device)) {
+		printk(TPACPI_ERR "Could not register backlight device\n");
+		return PTR_ERR(ibm_backlight_device);
+	}
+	vdbg_printk(TPACPI_DBG_INIT, "brightness is supported\n");
+
+	ibm_backlight_device->props.max_brightness =
+				(tp_features.bright_16levels)? 15 : 7;
+	ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
+	backlight_update_status(ibm_backlight_device);
+
+	return 0;
+}
+
+static void brightness_exit(void)
+{
+	if (ibm_backlight_device) {
+		vdbg_printk(TPACPI_DBG_EXIT,
+			    "calling backlight_device_unregister()\n");
+		backlight_device_unregister(ibm_backlight_device);
+	}
+}
+
+static int brightness_read(char *p)
+{
+	int len = 0;
+	int level;
+
+	level = brightness_get(NULL);
+	if (level < 0) {
+		len += sprintf(p + len, "level:\t\tunreadable\n");
+	} else {
+		len += sprintf(p + len, "level:\t\t%d\n", level);
+		len += sprintf(p + len, "commands:\tup, down\n");
+		len += sprintf(p + len, "commands:\tlevel <level>"
+			       " (<level> is 0-%d)\n",
+			       (tp_features.bright_16levels) ? 15 : 7);
+	}
+
+	return len;
+}
+
+static int brightness_write(char *buf)
+{
+	int level;
+	int rc;
+	char *cmd;
+	int max_level = (tp_features.bright_16levels) ? 15 : 7;
+
+	level = brightness_get(NULL);
+	if (level < 0)
+		return level;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (strlencmp(cmd, "up") == 0) {
+			if (level < max_level)
+				level++;
+		} else if (strlencmp(cmd, "down") == 0) {
+			if (level > 0)
+				level--;
+		} else if (sscanf(cmd, "level %d", &level) == 1 &&
+			   level >= 0 && level <= max_level) {
+			/* new level set */
+		} else
+			return -EINVAL;
+	}
+
+	/*
+	 * Now we know what the final level should be, so we try to set it.
+	 * Doing it this way makes the syscall restartable in case of EINTR
+	 */
+	rc = brightness_set(level);
+	return (rc == -EINTR)? ERESTARTSYS : rc;
+}
+
+static struct ibm_struct brightness_driver_data = {
+	.name = "brightness",
+	.read = brightness_read,
+	.write = brightness_write,
+	.exit = brightness_exit,
+};
+
+/*************************************************************************
+ * Volume subdriver
+ */
+
+static int volume_offset = 0x30;
+
+static int volume_read(char *p)
+{
+	int len = 0;
+	u8 level;
+
+	if (!acpi_ec_read(volume_offset, &level)) {
+		len += sprintf(p + len, "level:\t\tunreadable\n");
+	} else {
+		len += sprintf(p + len, "level:\t\t%d\n", level & 0xf);
+		len += sprintf(p + len, "mute:\t\t%s\n", onoff(level, 6));
+		len += sprintf(p + len, "commands:\tup, down, mute\n");
+		len += sprintf(p + len, "commands:\tlevel <level>"
+			       " (<level> is 0-15)\n");
+	}
+
+	return len;
+}
+
+static int volume_write(char *buf)
+{
+	int cmos_cmd, inc, i;
+	u8 level, mute;
+	int new_level, new_mute;
+	char *cmd;
+
+	while ((cmd = next_cmd(&buf))) {
+		if (!acpi_ec_read(volume_offset, &level))
+			return -EIO;
+		new_mute = mute = level & 0x40;
+		new_level = level = level & 0xf;
+
+		if (strlencmp(cmd, "up") == 0) {
+			if (mute)
+				new_mute = 0;
+			else
+				new_level = level == 15 ? 15 : level + 1;
+		} else if (strlencmp(cmd, "down") == 0) {
+			if (mute)
+				new_mute = 0;
+			else
+				new_level = level == 0 ? 0 : level - 1;
+		} else if (sscanf(cmd, "level %d", &new_level) == 1 &&
+			   new_level >= 0 && new_level <= 15) {
+			/* new_level set */
+		} else if (strlencmp(cmd, "mute") == 0) {
+			new_mute = 0x40;
+		} else
+			return -EINVAL;
+
+		if (new_level != level) {
+			/* mute doesn't change */
+
+			cmos_cmd = (new_level > level) ?
+					TP_CMOS_VOLUME_UP : TP_CMOS_VOLUME_DOWN;
+			inc = new_level > level ? 1 : -1;
+
+			if (mute && (issue_thinkpad_cmos_command(cmos_cmd) ||
+				     !acpi_ec_write(volume_offset, level)))
+				return -EIO;
+
+			for (i = level; i != new_level; i += inc)
+				if (issue_thinkpad_cmos_command(cmos_cmd) ||
+				    !acpi_ec_write(volume_offset, i + inc))
+					return -EIO;
+
+			if (mute &&
+			    (issue_thinkpad_cmos_command(TP_CMOS_VOLUME_MUTE) ||
+			     !acpi_ec_write(volume_offset, new_level + mute))) {
+				return -EIO;
+			}
+		}
+
+		if (new_mute != mute) {
+			/* level doesn't change */
+
+			cmos_cmd = (new_mute) ?
+				   TP_CMOS_VOLUME_MUTE : TP_CMOS_VOLUME_UP;
+
+			if (issue_thinkpad_cmos_command(cmos_cmd) ||
+			    !acpi_ec_write(volume_offset, level + new_mute))
+				return -EIO;
+		}
+	}
+
+	return 0;
+}
+
+static struct ibm_struct volume_driver_data = {
+	.name = "volume",
+	.read = volume_read,
+	.write = volume_write,
+};
+
+/*************************************************************************
+ * Fan subdriver
+ */
+
+/*
+ * FAN ACCESS MODES
+ *
+ * TPACPI_FAN_RD_ACPI_GFAN:
+ * 	ACPI GFAN method: returns fan level
+ *
+ * 	see TPACPI_FAN_WR_ACPI_SFAN
+ * 	EC 0x2f (HFSP) not available if GFAN exists
+ *
+ * TPACPI_FAN_WR_ACPI_SFAN:
+ * 	ACPI SFAN method: sets fan level, 0 (stop) to 7 (max)
+ *
+ * 	EC 0x2f (HFSP) might be available *for reading*, but do not use
+ * 	it for writing.
+ *
+ * TPACPI_FAN_WR_TPEC:
+ * 	ThinkPad EC register 0x2f (HFSP): fan control loop mode
+ * 	Supported on almost all ThinkPads
+ *
+ * 	Fan speed changes of any sort (including those caused by the
+ * 	disengaged mode) are usually done slowly by the firmware as the
+ * 	maximum ammount of fan duty cycle change per second seems to be
+ * 	limited.
+ *
+ * 	Reading is not available if GFAN exists.
+ * 	Writing is not available if SFAN exists.
+ *
+ * 	Bits
+ *	 7	automatic mode engaged;
+ *  		(default operation mode of the ThinkPad)
+ * 		fan level is ignored in this mode.
+ *	 6	full speed mode (takes precedence over bit 7);
+ *		not available on all thinkpads.  May disable
+ *		the tachometer while the fan controller ramps up
+ *		the speed (which can take up to a few *minutes*).
+ *		Speeds up fan to 100% duty-cycle, which is far above
+ *		the standard RPM levels.  It is not impossible that
+ *		it could cause hardware damage.
+ *	5-3	unused in some models.  Extra bits for fan level
+ *		in others, but still useless as all values above
+ *		7 map to the same speed as level 7 in these models.
+ *	2-0	fan level (0..7 usually)
+ *			0x00 = stop
+ * 			0x07 = max (set when temperatures critical)
+ * 		Some ThinkPads may have other levels, see
+ * 		TPACPI_FAN_WR_ACPI_FANS (X31/X40/X41)
+ *
+ *	FIRMWARE BUG: on some models, EC 0x2f might not be initialized at
+ *	boot. Apparently the EC does not intialize it, so unless ACPI DSDT
+ *	does so, its initial value is meaningless (0x07).
+ *
+ *	For firmware bugs, refer to:
+ *	http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
+ *
+ * 	----
+ *
+ *	ThinkPad EC register 0x84 (LSB), 0x85 (MSB):
+ *	Main fan tachometer reading (in RPM)
+ *
+ *	This register is present on all ThinkPads with a new-style EC, and
+ *	it is known not to be present on the A21m/e, and T22, as there is
+ *	something else in offset 0x84 according to the ACPI DSDT.  Other
+ *	ThinkPads from this same time period (and earlier) probably lack the
+ *	tachometer as well.
+ *
+ *	Unfortunately a lot of ThinkPads with new-style ECs but whose firwmare
+ *	was never fixed by IBM to report the EC firmware version string
+ *	probably support the tachometer (like the early X models), so
+ *	detecting it is quite hard.  We need more data to know for sure.
+ *
+ *	FIRMWARE BUG: always read 0x84 first, otherwise incorrect readings
+ *	might result.
+ *
+ *	FIRMWARE BUG: may go stale while the EC is switching to full speed
+ *	mode.
+ *
+ *	For firmware bugs, refer to:
+ *	http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
+ *
+ * TPACPI_FAN_WR_ACPI_FANS:
+ *	ThinkPad X31, X40, X41.  Not available in the X60.
+ *
+ *	FANS ACPI handle: takes three arguments: low speed, medium speed,
+ *	high speed.  ACPI DSDT seems to map these three speeds to levels
+ *	as follows: STOP LOW LOW MED MED HIGH HIGH HIGH HIGH
+ *	(this map is stored on FAN0..FAN8 as "0,1,1,2,2,3,3,3,3")
+ *
+ * 	The speeds are stored on handles
+ * 	(FANA:FAN9), (FANC:FANB), (FANE:FAND).
+ *
+ * 	There are three default speed sets, acessible as handles:
+ * 	FS1L,FS1M,FS1H; FS2L,FS2M,FS2H; FS3L,FS3M,FS3H
+ *
+ * 	ACPI DSDT switches which set is in use depending on various
+ * 	factors.
+ *
+ * 	TPACPI_FAN_WR_TPEC is also available and should be used to
+ * 	command the fan.  The X31/X40/X41 seems to have 8 fan levels,
+ * 	but the ACPI tables just mention level 7.
+ */
+
+enum {					/* Fan control constants */
+	fan_status_offset = 0x2f,	/* EC register 0x2f */
+	fan_rpm_offset = 0x84,		/* EC register 0x84: LSB, 0x85 MSB (RPM)
+					 * 0x84 must be read before 0x85 */
+
+	TP_EC_FAN_FULLSPEED = 0x40,	/* EC fan mode: full speed */
+	TP_EC_FAN_AUTO	    = 0x80,	/* EC fan mode: auto fan control */
+
+	TPACPI_FAN_LAST_LEVEL = 0x100,	/* Use cached last-seen fan level */
+};
+
+enum fan_status_access_mode {
+	TPACPI_FAN_NONE = 0,		/* No fan status or control */
+	TPACPI_FAN_RD_ACPI_GFAN,	/* Use ACPI GFAN */
+	TPACPI_FAN_RD_TPEC,		/* Use ACPI EC regs 0x2f, 0x84-0x85 */
+};
+
+enum fan_control_access_mode {
+	TPACPI_FAN_WR_NONE = 0,		/* No fan control */
+	TPACPI_FAN_WR_ACPI_SFAN,	/* Use ACPI SFAN */
+	TPACPI_FAN_WR_TPEC,		/* Use ACPI EC reg 0x2f */
+	TPACPI_FAN_WR_ACPI_FANS,	/* Use ACPI FANS and EC reg 0x2f */
+};
+
+enum fan_control_commands {
+	TPACPI_FAN_CMD_SPEED 	= 0x0001,	/* speed command */
+	TPACPI_FAN_CMD_LEVEL 	= 0x0002,	/* level command  */
+	TPACPI_FAN_CMD_ENABLE	= 0x0004,	/* enable/disable cmd,
+						 * and also watchdog cmd */
+};
+
+static int fan_control_allowed;
+
+static enum fan_status_access_mode fan_status_access_mode;
+static enum fan_control_access_mode fan_control_access_mode;
+static enum fan_control_commands fan_control_commands;
+
+static u8 fan_control_initial_status;
+static u8 fan_control_desired_level;
+static u8 fan_control_resume_level;
+static int fan_watchdog_maxinterval;
+
+static struct mutex fan_mutex;
+
+static void fan_watchdog_fire(struct work_struct *ignored);
+static DECLARE_DELAYED_WORK(fan_watchdog_task, fan_watchdog_fire);
+
+TPACPI_HANDLE(fans, ec, "FANS");	/* X31, X40, X41 */
+TPACPI_HANDLE(gfan, ec, "GFAN",	/* 570 */
+	   "\\FSPD",		/* 600e/x, 770e, 770x */
+	   );			/* all others */
+TPACPI_HANDLE(sfan, ec, "SFAN",	/* 570 */
+	   "JFNS",		/* 770x-JL */
+	   );			/* all others */
+
+/*
+ * Unitialized HFSP quirk: ACPI DSDT and EC fail to initialize the
+ * HFSP register at boot, so it contains 0x07 but the Thinkpad could
+ * be in auto mode (0x80).
+ *
+ * This is corrected by any write to HFSP either by the driver, or
+ * by the firmware.
+ *
+ * We assume 0x07 really means auto mode while this quirk is active,
+ * as this is far more likely than the ThinkPad being in level 7,
+ * which is only used by the firmware during thermal emergencies.
+ */
+
+static void fan_quirk1_detect(void)
+{
+	/* In some ThinkPads, neither the EC nor the ACPI
+	 * DSDT initialize the HFSP register, and it ends up
+	 * being initially set to 0x07 when it *could* be
+	 * either 0x07 or 0x80.
+	 *
+	 * Enable for TP-1Y (T43), TP-78 (R51e),
+	 * TP-76 (R52), TP-70 (T43, R52), which are known
+	 * to be buggy. */
+	if (fan_control_initial_status == 0x07) {
+		switch (thinkpad_id.ec_model) {
+		case 0x5931: /* TP-1Y */
+		case 0x3837: /* TP-78 */
+		case 0x3637: /* TP-76 */
+		case 0x3037: /* TP-70 */
+			printk(TPACPI_NOTICE
+			       "fan_init: initial fan status is unknown, "
+			       "assuming it is in auto mode\n");
+			tp_features.fan_ctrl_status_undef = 1;
+			;;
+		}
+	}
+}
+
+static void fan_quirk1_handle(u8 *fan_status)
+{
+	if (unlikely(tp_features.fan_ctrl_status_undef)) {
+		if (*fan_status != fan_control_initial_status) {
+			/* something changed the HFSP regisnter since
+			 * driver init time, so it is not undefined
+			 * anymore */
+			tp_features.fan_ctrl_status_undef = 0;
+		} else {
+			/* Return most likely status. In fact, it
+			 * might be the only possible status */
+			*fan_status = TP_EC_FAN_AUTO;
+		}
+	}
+}
+
+/*
+ * Call with fan_mutex held
+ */
+static void fan_update_desired_level(u8 status)
+{
+	if ((status &
+	     (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) {
+		if (status > 7)
+			fan_control_desired_level = 7;
+		else
+			fan_control_desired_level = status;
+	}
+}
+
+static int fan_get_status(u8 *status)
+{
+	u8 s;
+
+	/* TODO:
+	 * Add TPACPI_FAN_RD_ACPI_FANS ? */
+
+	switch (fan_status_access_mode) {
+	case TPACPI_FAN_RD_ACPI_GFAN:
+		/* 570, 600e/x, 770e, 770x */
+
+		if (unlikely(!acpi_evalf(gfan_handle, &s, NULL, "d")))
+			return -EIO;
+
+		if (likely(status))
+			*status = s & 0x07;
+
+		break;
+
+	case TPACPI_FAN_RD_TPEC:
+		/* all except 570, 600e/x, 770e, 770x */
+		if (unlikely(!acpi_ec_read(fan_status_offset, &s)))
+			return -EIO;
+
+		if (likely(status)) {
+			*status = s;
+			fan_quirk1_handle(status);
+		}
+
+		break;
+
+	default:
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+static int fan_get_status_safe(u8 *status)
+{
+	int rc;
+	u8 s;
+
+	if (mutex_lock_killable(&fan_mutex))
+		return -ERESTARTSYS;
+	rc = fan_get_status(&s);
+	if (!rc)
+		fan_update_desired_level(s);
+	mutex_unlock(&fan_mutex);
+
+	if (status)
+		*status = s;
+
+	return rc;
+}
+
+static int fan_get_speed(unsigned int *speed)
+{
+	u8 hi, lo;
+
+	switch (fan_status_access_mode) {
+	case TPACPI_FAN_RD_TPEC:
+		/* all except 570, 600e/x, 770e, 770x */
+		if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) ||
+			     !acpi_ec_read(fan_rpm_offset + 1, &hi)))
+			return -EIO;
+
+		if (likely(speed))
+			*speed = (hi << 8) | lo;
+
+		break;
+
+	default:
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+static int fan_set_level(int level)
+{
+	if (!fan_control_allowed)
+		return -EPERM;
+
+	switch (fan_control_access_mode) {
+	case TPACPI_FAN_WR_ACPI_SFAN:
+		if (level >= 0 && level <= 7) {
+			if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", level))
+				return -EIO;
+		} else
+			return -EINVAL;
+		break;
+
+	case TPACPI_FAN_WR_ACPI_FANS:
+	case TPACPI_FAN_WR_TPEC:
+		if (!(level & TP_EC_FAN_AUTO) &&
+		    !(level & TP_EC_FAN_FULLSPEED) &&
+		    ((level < 0) || (level > 7)))
+			return -EINVAL;
+
+		/* safety net should the EC not support AUTO
+		 * or FULLSPEED mode bits and just ignore them */
+		if (level & TP_EC_FAN_FULLSPEED)
+			level |= 7;	/* safety min speed 7 */
+		else if (level & TP_EC_FAN_AUTO)
+			level |= 4;	/* safety min speed 4 */
+
+		if (!acpi_ec_write(fan_status_offset, level))
+			return -EIO;
+		else
+			tp_features.fan_ctrl_status_undef = 0;
+		break;
+
+	default:
+		return -ENXIO;
+	}
+	return 0;
+}
+
+static int fan_set_level_safe(int level)
+{
+	int rc;
+
+	if (!fan_control_allowed)
+		return -EPERM;
+
+	if (mutex_lock_killable(&fan_mutex))
+		return -ERESTARTSYS;
+
+	if (level == TPACPI_FAN_LAST_LEVEL)
+		level = fan_control_desired_level;
+
+	rc = fan_set_level(level);
+	if (!rc)
+		fan_update_desired_level(level);
+
+	mutex_unlock(&fan_mutex);
+	return rc;
+}
+
+static int fan_set_enable(void)
+{
+	u8 s;
+	int rc;
+
+	if (!fan_control_allowed)
+		return -EPERM;
+
+	if (mutex_lock_killable(&fan_mutex))
+		return -ERESTARTSYS;
+
+	switch (fan_control_access_mode) {
+	case TPACPI_FAN_WR_ACPI_FANS:
+	case TPACPI_FAN_WR_TPEC:
+		rc = fan_get_status(&s);
+		if (rc < 0)
+			break;
+
+		/* Don't go out of emergency fan mode */
+		if (s != 7) {
+			s &= 0x07;
+			s |= TP_EC_FAN_AUTO | 4; /* min fan speed 4 */
+		}
+
+		if (!acpi_ec_write(fan_status_offset, s))
+			rc = -EIO;
+		else {
+			tp_features.fan_ctrl_status_undef = 0;
+			rc = 0;
+		}
+		break;
+
+	case TPACPI_FAN_WR_ACPI_SFAN:
+		rc = fan_get_status(&s);
+		if (rc < 0)
+			break;
+
+		s &= 0x07;
+
+		/* Set fan to at least level 4 */
+		s |= 4;
+
+		if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", s))
+			rc = -EIO;
+		else
+			rc = 0;
+		break;
+
+	default:
+		rc = -ENXIO;
+	}
+
+	mutex_unlock(&fan_mutex);
+	return rc;
+}
+
+static int fan_set_disable(void)
+{
+	int rc;
+
+	if (!fan_control_allowed)
+		return -EPERM;
+
+	if (mutex_lock_killable(&fan_mutex))
+		return -ERESTARTSYS;
+
+	rc = 0;
+	switch (fan_control_access_mode) {
+	case TPACPI_FAN_WR_ACPI_FANS:
+	case TPACPI_FAN_WR_TPEC:
+		if (!acpi_ec_write(fan_status_offset, 0x00))
+			rc = -EIO;
+		else {
+			fan_control_desired_level = 0;
+			tp_features.fan_ctrl_status_undef = 0;
+		}
+		break;
+
+	case TPACPI_FAN_WR_ACPI_SFAN:
+		if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", 0x00))
+			rc = -EIO;
+		else
+			fan_control_desired_level = 0;
+		break;
+
+	default:
+		rc = -ENXIO;
+	}
+
+
+	mutex_unlock(&fan_mutex);
+	return rc;
+}
+
+static int fan_set_speed(int speed)
+{
+	int rc;
+
+	if (!fan_control_allowed)
+		return -EPERM;
+
+	if (mutex_lock_killable(&fan_mutex))
+		return -ERESTARTSYS;
+
+	rc = 0;
+	switch (fan_control_access_mode) {
+	case TPACPI_FAN_WR_ACPI_FANS:
+		if (speed >= 0 && speed <= 65535) {
+			if (!acpi_evalf(fans_handle, NULL, NULL, "vddd",
+					speed, speed, speed))
+				rc = -EIO;
+		} else
+			rc = -EINVAL;
+		break;
+
+	default:
+		rc = -ENXIO;
+	}
+
+	mutex_unlock(&fan_mutex);
+	return rc;
+}
+
+static void fan_watchdog_reset(void)
+{
+	static int fan_watchdog_active;
+
+	if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
+		return;
+
+	if (fan_watchdog_active)
+		cancel_delayed_work(&fan_watchdog_task);
+
+	if (fan_watchdog_maxinterval > 0 &&
+	    tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
+		fan_watchdog_active = 1;
+		if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
+				msecs_to_jiffies(fan_watchdog_maxinterval
+						 * 1000))) {
+			printk(TPACPI_ERR
+			       "failed to queue the fan watchdog, "
+			       "watchdog will not trigger\n");
+		}
+	} else
+		fan_watchdog_active = 0;
+}
+
+static void fan_watchdog_fire(struct work_struct *ignored)
+{
+	int rc;
+
+	if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
+		return;
+
+	printk(TPACPI_NOTICE "fan watchdog: enabling fan\n");
+	rc = fan_set_enable();
+	if (rc < 0) {
+		printk(TPACPI_ERR "fan watchdog: error %d while enabling fan, "
+			"will try again later...\n", -rc);
+		/* reschedule for later */
+		fan_watchdog_reset();
+	}
+}
+
+/*
+ * SYSFS fan layout: hwmon compatible (device)
+ *
+ * pwm*_enable:
+ * 	0: "disengaged" mode
+ * 	1: manual mode
+ * 	2: native EC "auto" mode (recommended, hardware default)
+ *
+ * pwm*: set speed in manual mode, ignored otherwise.
+ * 	0 is level 0; 255 is level 7. Intermediate points done with linear
+ * 	interpolation.
+ *
+ * fan*_input: tachometer reading, RPM
+ *
+ *
+ * SYSFS fan layout: extensions
+ *
+ * fan_watchdog (driver):
+ * 	fan watchdog interval in seconds, 0 disables (default), max 120
+ */
+
+/* sysfs fan pwm1_enable ----------------------------------------------- */
+static ssize_t fan_pwm1_enable_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	int res, mode;
+	u8 status;
+
+	res = fan_get_status_safe(&status);
+	if (res)
+		return res;
+
+	if (status & TP_EC_FAN_FULLSPEED) {
+		mode = 0;
+	} else if (status & TP_EC_FAN_AUTO) {
+		mode = 2;
+	} else
+		mode = 1;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", mode);
+}
+
+static ssize_t fan_pwm1_enable_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	unsigned long t;
+	int res, level;
+
+	if (parse_strtoul(buf, 2, &t))
+		return -EINVAL;
+
+	switch (t) {
+	case 0:
+		level = TP_EC_FAN_FULLSPEED;
+		break;
+	case 1:
+		level = TPACPI_FAN_LAST_LEVEL;
+		break;
+	case 2:
+		level = TP_EC_FAN_AUTO;
+		break;
+	case 3:
+		/* reserved for software-controlled auto mode */
+		return -ENOSYS;
+	default:
+		return -EINVAL;
+	}
+
+	res = fan_set_level_safe(level);
+	if (res == -ENXIO)
+		return -EINVAL;
+	else if (res < 0)
+		return res;
+
+	fan_watchdog_reset();
+
+	return count;
+}
+
+static struct device_attribute dev_attr_fan_pwm1_enable =
+	__ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
+		fan_pwm1_enable_show, fan_pwm1_enable_store);
+
+/* sysfs fan pwm1 ------------------------------------------------------ */
+static ssize_t fan_pwm1_show(struct device *dev,
+			     struct device_attribute *attr,
+			     char *buf)
+{
+	int res;
+	u8 status;
+
+	res = fan_get_status_safe(&status);
+	if (res)
+		return res;
+
+	if ((status &
+	     (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) != 0)
+		status = fan_control_desired_level;
+
+	if (status > 7)
+		status = 7;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", (status * 255) / 7);
+}
+
+static ssize_t fan_pwm1_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	unsigned long s;
+	int rc;
+	u8 status, newlevel;
+
+	if (parse_strtoul(buf, 255, &s))
+		return -EINVAL;
+
+	/* scale down from 0-255 to 0-7 */
+	newlevel = (s >> 5) & 0x07;
+
+	if (mutex_lock_killable(&fan_mutex))
+		return -ERESTARTSYS;
+
+	rc = fan_get_status(&status);
+	if (!rc && (status &
+		    (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) {
+		rc = fan_set_level(newlevel);
+		if (rc == -ENXIO)
+			rc = -EINVAL;
+		else if (!rc) {
+			fan_update_desired_level(newlevel);
+			fan_watchdog_reset();
+		}
+	}
+
+	mutex_unlock(&fan_mutex);
+	return (rc)? rc : count;
+}
+
+static struct device_attribute dev_attr_fan_pwm1 =
+	__ATTR(pwm1, S_IWUSR | S_IRUGO,
+		fan_pwm1_show, fan_pwm1_store);
+
+/* sysfs fan fan1_input ------------------------------------------------ */
+static ssize_t fan_fan1_input_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	int res;
+	unsigned int speed;
+
+	res = fan_get_speed(&speed);
+	if (res < 0)
+		return res;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", speed);
+}
+
+static struct device_attribute dev_attr_fan_fan1_input =
+	__ATTR(fan1_input, S_IRUGO,
+		fan_fan1_input_show, NULL);
+
+/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
+static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
+				     char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", fan_watchdog_maxinterval);
+}
+
+static ssize_t fan_fan_watchdog_store(struct device_driver *drv,
+				      const char *buf, size_t count)
+{
+	unsigned long t;
+
+	if (parse_strtoul(buf, 120, &t))
+		return -EINVAL;
+
+	if (!fan_control_allowed)
+		return -EPERM;
+
+	fan_watchdog_maxinterval = t;
+	fan_watchdog_reset();
+
+	return count;
+}
+
+static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO,
+		fan_fan_watchdog_show, fan_fan_watchdog_store);
+
+/* --------------------------------------------------------------------- */
+static struct attribute *fan_attributes[] = {
+	&dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr,
+	&dev_attr_fan_fan1_input.attr,
+	NULL
+};
+
+static const struct attribute_group fan_attr_group = {
+	.attrs = fan_attributes,
+};
+
+static int __init fan_init(struct ibm_init_struct *iibm)
+{
+	int rc;
+
+	vdbg_printk(TPACPI_DBG_INIT, "initializing fan subdriver\n");
+
+	mutex_init(&fan_mutex);
+	fan_status_access_mode = TPACPI_FAN_NONE;
+	fan_control_access_mode = TPACPI_FAN_WR_NONE;
+	fan_control_commands = 0;
+	fan_watchdog_maxinterval = 0;
+	tp_features.fan_ctrl_status_undef = 0;
+	fan_control_desired_level = 7;
+
+	TPACPI_ACPIHANDLE_INIT(fans);
+	TPACPI_ACPIHANDLE_INIT(gfan);
+	TPACPI_ACPIHANDLE_INIT(sfan);
+
+	if (gfan_handle) {
+		/* 570, 600e/x, 770e, 770x */
+		fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
+	} else {
+		/* all other ThinkPads: note that even old-style
+		 * ThinkPad ECs supports the fan control register */
+		if (likely(acpi_ec_read(fan_status_offset,
+					&fan_control_initial_status))) {
+			fan_status_access_mode = TPACPI_FAN_RD_TPEC;
+			fan_quirk1_detect();
+		} else {
+			printk(TPACPI_ERR
+			       "ThinkPad ACPI EC access misbehaving, "
+			       "fan status and control unavailable\n");
+			return 1;
+		}
+	}
+
+	if (sfan_handle) {
+		/* 570, 770x-JL */
+		fan_control_access_mode = TPACPI_FAN_WR_ACPI_SFAN;
+		fan_control_commands |=
+		    TPACPI_FAN_CMD_LEVEL | TPACPI_FAN_CMD_ENABLE;
+	} else {
+		if (!gfan_handle) {
+			/* gfan without sfan means no fan control */
+			/* all other models implement TP EC 0x2f control */
+
+			if (fans_handle) {
+				/* X31, X40, X41 */
+				fan_control_access_mode =
+				    TPACPI_FAN_WR_ACPI_FANS;
+				fan_control_commands |=
+				    TPACPI_FAN_CMD_SPEED |
+				    TPACPI_FAN_CMD_LEVEL |
+				    TPACPI_FAN_CMD_ENABLE;
+			} else {
+				fan_control_access_mode = TPACPI_FAN_WR_TPEC;
+				fan_control_commands |=
+				    TPACPI_FAN_CMD_LEVEL |
+				    TPACPI_FAN_CMD_ENABLE;
+			}
+		}
+	}
+
+	vdbg_printk(TPACPI_DBG_INIT, "fan is %s, modes %d, %d\n",
+		str_supported(fan_status_access_mode != TPACPI_FAN_NONE ||
+		  fan_control_access_mode != TPACPI_FAN_WR_NONE),
+		fan_status_access_mode, fan_control_access_mode);
+
+	/* fan control master switch */
+	if (!fan_control_allowed) {
+		fan_control_access_mode = TPACPI_FAN_WR_NONE;
+		fan_control_commands = 0;
+		dbg_printk(TPACPI_DBG_INIT,
+			   "fan control features disabled by parameter\n");
+	}
+
+	/* update fan_control_desired_level */
+	if (fan_status_access_mode != TPACPI_FAN_NONE)
+		fan_get_status_safe(NULL);
+
+	if (fan_status_access_mode != TPACPI_FAN_NONE ||
+	    fan_control_access_mode != TPACPI_FAN_WR_NONE) {
+		rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
+					 &fan_attr_group);
+		if (rc < 0)
+			return rc;
+
+		rc = driver_create_file(&tpacpi_hwmon_pdriver.driver,
+					&driver_attr_fan_watchdog);
+		if (rc < 0) {
+			sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
+					&fan_attr_group);
+			return rc;
+		}
+		return 0;
+	} else
+		return 1;
+}
+
+static void fan_exit(void)
+{
+	vdbg_printk(TPACPI_DBG_EXIT,
+		    "cancelling any pending fan watchdog tasks\n");
+
+	/* FIXME: can we really do this unconditionally? */
+	sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, &fan_attr_group);
+	driver_remove_file(&tpacpi_hwmon_pdriver.driver,
+			   &driver_attr_fan_watchdog);
+
+	cancel_delayed_work(&fan_watchdog_task);
+	flush_workqueue(tpacpi_wq);
+}
+
+static void fan_suspend(pm_message_t state)
+{
+	int rc;
+
+	if (!fan_control_allowed)
+		return;
+
+	/* Store fan status in cache */
+	fan_control_resume_level = 0;
+	rc = fan_get_status_safe(&fan_control_resume_level);
+	if (rc < 0)
+		printk(TPACPI_NOTICE
+			"failed to read fan level for later "
+			"restore during resume: %d\n", rc);
+
+	/* if it is undefined, don't attempt to restore it.
+	 * KEEP THIS LAST */
+	if (tp_features.fan_ctrl_status_undef)
+		fan_control_resume_level = 0;
+}
+
+static void fan_resume(void)
+{
+	u8 current_level = 7;
+	bool do_set = false;
+	int rc;
+
+	/* DSDT *always* updates status on resume */
+	tp_features.fan_ctrl_status_undef = 0;
+
+	if (!fan_control_allowed ||
+	    !fan_control_resume_level ||
+	    (fan_get_status_safe(&current_level) < 0))
+		return;
+
+	switch (fan_control_access_mode) {
+	case TPACPI_FAN_WR_ACPI_SFAN:
+		/* never decrease fan level */
+		do_set = (fan_control_resume_level > current_level);
+		break;
+	case TPACPI_FAN_WR_ACPI_FANS:
+	case TPACPI_FAN_WR_TPEC:
+		/* never decrease fan level, scale is:
+		 * TP_EC_FAN_FULLSPEED > 7 >= TP_EC_FAN_AUTO
+		 *
+		 * We expect the firmware to set either 7 or AUTO, but we
+		 * handle FULLSPEED out of paranoia.
+		 *
+		 * So, we can safely only restore FULLSPEED or 7, anything
+		 * else could slow the fan.  Restoring AUTO is useless, at
+		 * best that's exactly what the DSDT already set (it is the
+		 * slower it uses).
+		 *
+		 * Always keep in mind that the DSDT *will* have set the
+		 * fans to what the vendor supposes is the best level.  We
+		 * muck with it only to speed the fan up.
+		 */
+		if (fan_control_resume_level != 7 &&
+		    !(fan_control_resume_level & TP_EC_FAN_FULLSPEED))
+			return;
+		else
+			do_set = !(current_level & TP_EC_FAN_FULLSPEED) &&
+				 (current_level != fan_control_resume_level);
+		break;
+	default:
+		return;
+	}
+	if (do_set) {
+		printk(TPACPI_NOTICE
+			"restoring fan level to 0x%02x\n",
+			fan_control_resume_level);
+		rc = fan_set_level_safe(fan_control_resume_level);
+		if (rc < 0)
+			printk(TPACPI_NOTICE
+				"failed to restore fan level: %d\n", rc);
+	}
+}
+
+static int fan_read(char *p)
+{
+	int len = 0;
+	int rc;
+	u8 status;
+	unsigned int speed = 0;
+
+	switch (fan_status_access_mode) {
+	case TPACPI_FAN_RD_ACPI_GFAN:
+		/* 570, 600e/x, 770e, 770x */
+		rc = fan_get_status_safe(&status);
+		if (rc < 0)
+			return rc;
+
+		len += sprintf(p + len, "status:\t\t%s\n"
+			       "level:\t\t%d\n",
+			       (status != 0) ? "enabled" : "disabled", status);
+		break;
+
+	case TPACPI_FAN_RD_TPEC:
+		/* all except 570, 600e/x, 770e, 770x */
+		rc = fan_get_status_safe(&status);
+		if (rc < 0)
+			return rc;
+
+		len += sprintf(p + len, "status:\t\t%s\n",
+			       (status != 0) ? "enabled" : "disabled");
+
+		rc = fan_get_speed(&speed);
+		if (rc < 0)
+			return rc;
+
+		len += sprintf(p + len, "speed:\t\t%d\n", speed);
+
+		if (status & TP_EC_FAN_FULLSPEED)
+			/* Disengaged mode takes precedence */
+			len += sprintf(p + len, "level:\t\tdisengaged\n");
+		else if (status & TP_EC_FAN_AUTO)
+			len += sprintf(p + len, "level:\t\tauto\n");
+		else
+			len += sprintf(p + len, "level:\t\t%d\n", status);
+		break;
+
+	case TPACPI_FAN_NONE:
+	default:
+		len += sprintf(p + len, "status:\t\tnot supported\n");
+	}
+
+	if (fan_control_commands & TPACPI_FAN_CMD_LEVEL) {
+		len += sprintf(p + len, "commands:\tlevel <level>");
+
+		switch (fan_control_access_mode) {
+		case TPACPI_FAN_WR_ACPI_SFAN:
+			len += sprintf(p + len, " (<level> is 0-7)\n");
+			break;
+
+		default:
+			len += sprintf(p + len, " (<level> is 0-7, "
+				       "auto, disengaged, full-speed)\n");
+			break;
+		}
+	}
+
+	if (fan_control_commands & TPACPI_FAN_CMD_ENABLE)
+		len += sprintf(p + len, "commands:\tenable, disable\n"
+			       "commands:\twatchdog <timeout> (<timeout> "
+			       "is 0 (off), 1-120 (seconds))\n");
+
+	if (fan_control_commands & TPACPI_FAN_CMD_SPEED)
+		len += sprintf(p + len, "commands:\tspeed <speed>"
+			       " (<speed> is 0-65535)\n");
+
+	return len;
+}
+
+static int fan_write_cmd_level(const char *cmd, int *rc)
+{
+	int level;
+
+	if (strlencmp(cmd, "level auto") == 0)
+		level = TP_EC_FAN_AUTO;
+	else if ((strlencmp(cmd, "level disengaged") == 0) |
+			(strlencmp(cmd, "level full-speed") == 0))
+		level = TP_EC_FAN_FULLSPEED;
+	else if (sscanf(cmd, "level %d", &level) != 1)
+		return 0;
+
+	*rc = fan_set_level_safe(level);
+	if (*rc == -ENXIO)
+		printk(TPACPI_ERR "level command accepted for unsupported "
+		       "access mode %d", fan_control_access_mode);
+
+	return 1;
+}
+
+static int fan_write_cmd_enable(const char *cmd, int *rc)
+{
+	if (strlencmp(cmd, "enable") != 0)
+		return 0;
+
+	*rc = fan_set_enable();
+	if (*rc == -ENXIO)
+		printk(TPACPI_ERR "enable command accepted for unsupported "
+		       "access mode %d", fan_control_access_mode);
+
+	return 1;
+}
+
+static int fan_write_cmd_disable(const char *cmd, int *rc)
+{
+	if (strlencmp(cmd, "disable") != 0)
+		return 0;
+
+	*rc = fan_set_disable();
+	if (*rc == -ENXIO)
+		printk(TPACPI_ERR "disable command accepted for unsupported "
+		       "access mode %d", fan_control_access_mode);
+
+	return 1;
+}
+
+static int fan_write_cmd_speed(const char *cmd, int *rc)
+{
+	int speed;
+
+	/* TODO:
+	 * Support speed <low> <medium> <high> ? */
+
+	if (sscanf(cmd, "speed %d", &speed) != 1)
+		return 0;
+
+	*rc = fan_set_speed(speed);
+	if (*rc == -ENXIO)
+		printk(TPACPI_ERR "speed command accepted for unsupported "
+		       "access mode %d", fan_control_access_mode);
+
+	return 1;
+}
+
+static int fan_write_cmd_watchdog(const char *cmd, int *rc)
+{
+	int interval;
+
+	if (sscanf(cmd, "watchdog %d", &interval) != 1)
+		return 0;
+
+	if (interval < 0 || interval > 120)
+		*rc = -EINVAL;
+	else
+		fan_watchdog_maxinterval = interval;
+
+	return 1;
+}
+
+static int fan_write(char *buf)
+{
+	char *cmd;
+	int rc = 0;
+
+	while (!rc && (cmd = next_cmd(&buf))) {
+		if (!((fan_control_commands & TPACPI_FAN_CMD_LEVEL) &&
+		      fan_write_cmd_level(cmd, &rc)) &&
+		    !((fan_control_commands & TPACPI_FAN_CMD_ENABLE) &&
+		      (fan_write_cmd_enable(cmd, &rc) ||
+		       fan_write_cmd_disable(cmd, &rc) ||
+		       fan_write_cmd_watchdog(cmd, &rc))) &&
+		    !((fan_control_commands & TPACPI_FAN_CMD_SPEED) &&
+		      fan_write_cmd_speed(cmd, &rc))
+		    )
+			rc = -EINVAL;
+		else if (!rc)
+			fan_watchdog_reset();
+	}
+
+	return rc;
+}
+
+static struct ibm_struct fan_driver_data = {
+	.name = "fan",
+	.read = fan_read,
+	.write = fan_write,
+	.exit = fan_exit,
+	.suspend = fan_suspend,
+	.resume = fan_resume,
+};
+
+/****************************************************************************
+ ****************************************************************************
+ *
+ * Infrastructure
+ *
+ ****************************************************************************
+ ****************************************************************************/
+
+/* sysfs name ---------------------------------------------------------- */
+static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME);
+}
+
+static struct device_attribute dev_attr_thinkpad_acpi_pdev_name =
+	__ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL);
+
+/* --------------------------------------------------------------------- */
+
+/* /proc support */
+static struct proc_dir_entry *proc_dir;
+
+/*
+ * Module and infrastructure proble, init and exit handling
+ */
+
+static int force_load;
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUG
+static const char * __init str_supported(int is_supported)
+{
+	static char text_unsupported[] __initdata = "not supported";
+
+	return (is_supported)? &text_unsupported[4] : &text_unsupported[0];
+}
+#endif /* CONFIG_THINKPAD_ACPI_DEBUG */
+
+static void ibm_exit(struct ibm_struct *ibm)
+{
+	dbg_printk(TPACPI_DBG_EXIT, "removing %s\n", ibm->name);
+
+	list_del_init(&ibm->all_drivers);
+
+	if (ibm->flags.acpi_notify_installed) {
+		dbg_printk(TPACPI_DBG_EXIT,
+			"%s: acpi_remove_notify_handler\n", ibm->name);
+		BUG_ON(!ibm->acpi);
+		acpi_remove_notify_handler(*ibm->acpi->handle,
+					   ibm->acpi->type,
+					   dispatch_acpi_notify);
+		ibm->flags.acpi_notify_installed = 0;
+		ibm->flags.acpi_notify_installed = 0;
+	}
+
+	if (ibm->flags.proc_created) {
+		dbg_printk(TPACPI_DBG_EXIT,
+			"%s: remove_proc_entry\n", ibm->name);
+		remove_proc_entry(ibm->name, proc_dir);
+		ibm->flags.proc_created = 0;
+	}
+
+	if (ibm->flags.acpi_driver_registered) {
+		dbg_printk(TPACPI_DBG_EXIT,
+			"%s: acpi_bus_unregister_driver\n", ibm->name);
+		BUG_ON(!ibm->acpi);
+		acpi_bus_unregister_driver(ibm->acpi->driver);
+		kfree(ibm->acpi->driver);
+		ibm->acpi->driver = NULL;
+		ibm->flags.acpi_driver_registered = 0;
+	}
+
+	if (ibm->flags.init_called && ibm->exit) {
+		ibm->exit();
+		ibm->flags.init_called = 0;
+	}
+
+	dbg_printk(TPACPI_DBG_INIT, "finished removing %s\n", ibm->name);
+}
+
+static int __init ibm_init(struct ibm_init_struct *iibm)
+{
+	int ret;
+	struct ibm_struct *ibm = iibm->data;
+	struct proc_dir_entry *entry;
+
+	BUG_ON(ibm == NULL);
+
+	INIT_LIST_HEAD(&ibm->all_drivers);
+
+	if (ibm->flags.experimental && !experimental)
+		return 0;
+
+	dbg_printk(TPACPI_DBG_INIT,
+		"probing for %s\n", ibm->name);
+
+	if (iibm->init) {
+		ret = iibm->init(iibm);
+		if (ret > 0)
+			return 0;	/* probe failed */
+		if (ret)
+			return ret;
+
+		ibm->flags.init_called = 1;
+	}
+
+	if (ibm->acpi) {
+		if (ibm->acpi->hid) {
+			ret = register_tpacpi_subdriver(ibm);
+			if (ret)
+				goto err_out;
+		}
+
+		if (ibm->acpi->notify) {
+			ret = setup_acpi_notify(ibm);
+			if (ret == -ENODEV) {
+				printk(TPACPI_NOTICE "disabling subdriver %s\n",
+					ibm->name);
+				ret = 0;
+				goto err_out;
+			}
+			if (ret < 0)
+				goto err_out;
+		}
+	}
+
+	dbg_printk(TPACPI_DBG_INIT,
+		"%s installed\n", ibm->name);
+
+	if (ibm->read) {
+		entry = create_proc_entry(ibm->name,
+					  S_IFREG | S_IRUGO | S_IWUSR,
+					  proc_dir);
+		if (!entry) {
+			printk(TPACPI_ERR "unable to create proc entry %s\n",
+			       ibm->name);
+			ret = -ENODEV;
+			goto err_out;
+		}
+		entry->owner = THIS_MODULE;
+		entry->data = ibm;
+		entry->read_proc = &dispatch_procfs_read;
+		if (ibm->write)
+			entry->write_proc = &dispatch_procfs_write;
+		ibm->flags.proc_created = 1;
+	}
+
+	list_add_tail(&ibm->all_drivers, &tpacpi_all_drivers);
+
+	return 0;
+
+err_out:
+	dbg_printk(TPACPI_DBG_INIT,
+		"%s: at error exit path with result %d\n",
+		ibm->name, ret);
+
+	ibm_exit(ibm);
+	return (ret < 0)? ret : 0;
+}
+
+/* Probing */
+
+/* returns 0 - probe ok, or < 0 - probe error.
+ * Probe ok doesn't mean thinkpad found.
+ * On error, kfree() cleanup on tp->* is not performed, caller must do it */
+static int __must_check __init get_thinkpad_model_data(
+						struct thinkpad_id_data *tp)
+{
+	const struct dmi_device *dev = NULL;
+	char ec_fw_string[18];
+	char const *s;
+
+	if (!tp)
+		return -EINVAL;
+
+	memset(tp, 0, sizeof(*tp));
+
+	if (dmi_name_in_vendors("IBM"))
+		tp->vendor = PCI_VENDOR_ID_IBM;
+	else if (dmi_name_in_vendors("LENOVO"))
+		tp->vendor = PCI_VENDOR_ID_LENOVO;
+	else
+		return 0;
+
+	s = dmi_get_system_info(DMI_BIOS_VERSION);
+	tp->bios_version_str = kstrdup(s, GFP_KERNEL);
+	if (s && !tp->bios_version_str)
+		return -ENOMEM;
+	if (!tp->bios_version_str)
+		return 0;
+	tp->bios_model = tp->bios_version_str[0]
+			 | (tp->bios_version_str[1] << 8);
+
+	/*
+	 * ThinkPad T23 or newer, A31 or newer, R50e or newer,
+	 * X32 or newer, all Z series;  Some models must have an
+	 * up-to-date BIOS or they will not be detected.
+	 *
+	 * See http://thinkwiki.org/wiki/List_of_DMI_IDs
+	 */
+	while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev))) {
+		if (sscanf(dev->name,
+			   "IBM ThinkPad Embedded Controller -[%17c",
+			   ec_fw_string) == 1) {
+			ec_fw_string[sizeof(ec_fw_string) - 1] = 0;
+			ec_fw_string[strcspn(ec_fw_string, " ]")] = 0;
+
+			tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
+			if (!tp->ec_version_str)
+				return -ENOMEM;
+			tp->ec_model = ec_fw_string[0]
+					| (ec_fw_string[1] << 8);
+			break;
+		}
+	}
+
+	s = dmi_get_system_info(DMI_PRODUCT_VERSION);
+	if (s && !strnicmp(s, "ThinkPad", 8)) {
+		tp->model_str = kstrdup(s, GFP_KERNEL);
+		if (!tp->model_str)
+			return -ENOMEM;
+	}
+
+	s = dmi_get_system_info(DMI_PRODUCT_NAME);
+	tp->nummodel_str = kstrdup(s, GFP_KERNEL);
+	if (s && !tp->nummodel_str)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int __init probe_for_thinkpad(void)
+{
+	int is_thinkpad;
+
+	if (acpi_disabled)
+		return -ENODEV;
+
+	/*
+	 * Non-ancient models have better DMI tagging, but very old models
+	 * don't.
+	 */
+	is_thinkpad = (thinkpad_id.model_str != NULL);
+
+	/* ec is required because many other handles are relative to it */
+	TPACPI_ACPIHANDLE_INIT(ec);
+	if (!ec_handle) {
+		if (is_thinkpad)
+			printk(TPACPI_ERR
+				"Not yet supported ThinkPad detected!\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Risks a regression on very old machines, but reduces potential
+	 * false positives a damn great deal
+	 */
+	if (!is_thinkpad)
+		is_thinkpad = (thinkpad_id.vendor == PCI_VENDOR_ID_IBM);
+
+	if (!is_thinkpad && !force_load)
+		return -ENODEV;
+
+	return 0;
+}
+
+
+/* Module init, exit, parameters */
+
+static struct ibm_init_struct ibms_init[] __initdata = {
+	{
+		.init = thinkpad_acpi_driver_init,
+		.data = &thinkpad_acpi_driver_data,
+	},
+	{
+		.init = hotkey_init,
+		.data = &hotkey_driver_data,
+	},
+	{
+		.init = bluetooth_init,
+		.data = &bluetooth_driver_data,
+	},
+	{
+		.init = wan_init,
+		.data = &wan_driver_data,
+	},
+	{
+		.init = uwb_init,
+		.data = &uwb_driver_data,
+	},
+#ifdef CONFIG_THINKPAD_ACPI_VIDEO
+	{
+		.init = video_init,
+		.data = &video_driver_data,
+	},
+#endif
+	{
+		.init = light_init,
+		.data = &light_driver_data,
+	},
+#ifdef CONFIG_THINKPAD_ACPI_DOCK
+	{
+		.init = dock_init,
+		.data = &dock_driver_data[0],
+	},
+	{
+		.init = dock_init2,
+		.data = &dock_driver_data[1],
+	},
+#endif
+#ifdef CONFIG_THINKPAD_ACPI_BAY
+	{
+		.init = bay_init,
+		.data = &bay_driver_data,
+	},
+#endif
+	{
+		.init = cmos_init,
+		.data = &cmos_driver_data,
+	},
+	{
+		.init = led_init,
+		.data = &led_driver_data,
+	},
+	{
+		.init = beep_init,
+		.data = &beep_driver_data,
+	},
+	{
+		.init = thermal_init,
+		.data = &thermal_driver_data,
+	},
+	{
+		.data = &ecdump_driver_data,
+	},
+	{
+		.init = brightness_init,
+		.data = &brightness_driver_data,
+	},
+	{
+		.data = &volume_driver_data,
+	},
+	{
+		.init = fan_init,
+		.data = &fan_driver_data,
+	},
+};
+
+static int __init set_ibm_param(const char *val, struct kernel_param *kp)
+{
+	unsigned int i;
+	struct ibm_struct *ibm;
+
+	if (!kp || !kp->name || !val)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
+		ibm = ibms_init[i].data;
+		WARN_ON(ibm == NULL);
+
+		if (!ibm || !ibm->name)
+			continue;
+
+		if (strcmp(ibm->name, kp->name) == 0 && ibm->write) {
+			if (strlen(val) > sizeof(ibms_init[i].param) - 2)
+				return -ENOSPC;
+			strcpy(ibms_init[i].param, val);
+			strcat(ibms_init[i].param, ",");
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+module_param(experimental, int, 0);
+MODULE_PARM_DESC(experimental,
+		 "Enables experimental features when non-zero");
+
+module_param_named(debug, dbg_level, uint, 0);
+MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
+
+module_param(force_load, bool, 0);
+MODULE_PARM_DESC(force_load,
+		 "Attempts to load the driver even on a "
+		 "mis-identified ThinkPad when true");
+
+module_param_named(fan_control, fan_control_allowed, bool, 0);
+MODULE_PARM_DESC(fan_control,
+		 "Enables setting fan parameters features when true");
+
+module_param_named(brightness_mode, brightness_mode, int, 0);
+MODULE_PARM_DESC(brightness_mode,
+		 "Selects brightness control strategy: "
+		 "0=auto, 1=EC, 2=CMOS, 3=both");
+
+module_param(brightness_enable, uint, 0);
+MODULE_PARM_DESC(brightness_enable,
+		 "Enables backlight control when 1, disables when 0");
+
+module_param(hotkey_report_mode, uint, 0);
+MODULE_PARM_DESC(hotkey_report_mode,
+		 "used for backwards compatibility with userspace, "
+		 "see documentation");
+
+#define TPACPI_PARAM(feature) \
+	module_param_call(feature, set_ibm_param, NULL, NULL, 0); \
+	MODULE_PARM_DESC(feature, "Simulates thinkpad-acpi procfs command " \
+			 "at module load, see documentation")
+
+TPACPI_PARAM(hotkey);
+TPACPI_PARAM(bluetooth);
+TPACPI_PARAM(video);
+TPACPI_PARAM(light);
+#ifdef CONFIG_THINKPAD_ACPI_DOCK
+TPACPI_PARAM(dock);
+#endif
+#ifdef CONFIG_THINKPAD_ACPI_BAY
+TPACPI_PARAM(bay);
+#endif /* CONFIG_THINKPAD_ACPI_BAY */
+TPACPI_PARAM(cmos);
+TPACPI_PARAM(led);
+TPACPI_PARAM(beep);
+TPACPI_PARAM(ecdump);
+TPACPI_PARAM(brightness);
+TPACPI_PARAM(volume);
+TPACPI_PARAM(fan);
+
+#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
+module_param(dbg_wlswemul, uint, 0);
+MODULE_PARM_DESC(dbg_wlswemul, "Enables WLSW emulation");
+module_param_named(wlsw_state, tpacpi_wlsw_emulstate, bool, 0);
+MODULE_PARM_DESC(wlsw_state,
+		 "Initial state of the emulated WLSW switch");
+
+module_param(dbg_bluetoothemul, uint, 0);
+MODULE_PARM_DESC(dbg_bluetoothemul, "Enables bluetooth switch emulation");
+module_param_named(bluetooth_state, tpacpi_bluetooth_emulstate, bool, 0);
+MODULE_PARM_DESC(bluetooth_state,
+		 "Initial state of the emulated bluetooth switch");
+
+module_param(dbg_wwanemul, uint, 0);
+MODULE_PARM_DESC(dbg_wwanemul, "Enables WWAN switch emulation");
+module_param_named(wwan_state, tpacpi_wwan_emulstate, bool, 0);
+MODULE_PARM_DESC(wwan_state,
+		 "Initial state of the emulated WWAN switch");
+
+module_param(dbg_uwbemul, uint, 0);
+MODULE_PARM_DESC(dbg_uwbemul, "Enables UWB switch emulation");
+module_param_named(uwb_state, tpacpi_uwb_emulstate, bool, 0);
+MODULE_PARM_DESC(uwb_state,
+		 "Initial state of the emulated UWB switch");
+#endif
+
+static void thinkpad_acpi_module_exit(void)
+{
+	struct ibm_struct *ibm, *itmp;
+
+	tpacpi_lifecycle = TPACPI_LIFE_EXITING;
+
+	list_for_each_entry_safe_reverse(ibm, itmp,
+					 &tpacpi_all_drivers,
+					 all_drivers) {
+		ibm_exit(ibm);
+	}
+
+	dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
+
+	if (tpacpi_inputdev) {
+		if (tp_features.input_device_registered)
+			input_unregister_device(tpacpi_inputdev);
+		else
+			input_free_device(tpacpi_inputdev);
+	}
+
+	if (tpacpi_hwmon)
+		hwmon_device_unregister(tpacpi_hwmon);
+
+	if (tp_features.sensors_pdev_attrs_registered)
+		device_remove_file(&tpacpi_sensors_pdev->dev,
+				   &dev_attr_thinkpad_acpi_pdev_name);
+	if (tpacpi_sensors_pdev)
+		platform_device_unregister(tpacpi_sensors_pdev);
+	if (tpacpi_pdev)
+		platform_device_unregister(tpacpi_pdev);
+
+	if (tp_features.sensors_pdrv_attrs_registered)
+		tpacpi_remove_driver_attributes(&tpacpi_hwmon_pdriver.driver);
+	if (tp_features.platform_drv_attrs_registered)
+		tpacpi_remove_driver_attributes(&tpacpi_pdriver.driver);
+
+	if (tp_features.sensors_pdrv_registered)
+		platform_driver_unregister(&tpacpi_hwmon_pdriver);
+
+	if (tp_features.platform_drv_registered)
+		platform_driver_unregister(&tpacpi_pdriver);
+
+	if (proc_dir)
+		remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir);
+
+	if (tpacpi_wq)
+		destroy_workqueue(tpacpi_wq);
+
+	kfree(thinkpad_id.bios_version_str);
+	kfree(thinkpad_id.ec_version_str);
+	kfree(thinkpad_id.model_str);
+}
+
+
+static int __init thinkpad_acpi_module_init(void)
+{
+	int ret, i;
+
+	tpacpi_lifecycle = TPACPI_LIFE_INIT;
+
+	/* Parameter checking */
+	if (hotkey_report_mode > 2)
+		return -EINVAL;
+
+	/* Driver-level probe */
+
+	ret = get_thinkpad_model_data(&thinkpad_id);
+	if (ret) {
+		printk(TPACPI_ERR
+			"unable to get DMI data: %d\n", ret);
+		thinkpad_acpi_module_exit();
+		return ret;
+	}
+	ret = probe_for_thinkpad();
+	if (ret) {
+		thinkpad_acpi_module_exit();
+		return ret;
+	}
+
+	/* Driver initialization */
+
+	TPACPI_ACPIHANDLE_INIT(ecrd);
+	TPACPI_ACPIHANDLE_INIT(ecwr);
+
+	tpacpi_wq = create_singlethread_workqueue(TPACPI_WORKQUEUE_NAME);
+	if (!tpacpi_wq) {
+		thinkpad_acpi_module_exit();
+		return -ENOMEM;
+	}
+
+	proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir);
+	if (!proc_dir) {
+		printk(TPACPI_ERR
+		       "unable to create proc dir " TPACPI_PROC_DIR);
+		thinkpad_acpi_module_exit();
+		return -ENODEV;
+	}
+	proc_dir->owner = THIS_MODULE;
+
+	ret = platform_driver_register(&tpacpi_pdriver);
+	if (ret) {
+		printk(TPACPI_ERR
+		       "unable to register main platform driver\n");
+		thinkpad_acpi_module_exit();
+		return ret;
+	}
+	tp_features.platform_drv_registered = 1;
+
+	ret = platform_driver_register(&tpacpi_hwmon_pdriver);
+	if (ret) {
+		printk(TPACPI_ERR
+		       "unable to register hwmon platform driver\n");
+		thinkpad_acpi_module_exit();
+		return ret;
+	}
+	tp_features.sensors_pdrv_registered = 1;
+
+	ret = tpacpi_create_driver_attributes(&tpacpi_pdriver.driver);
+	if (!ret) {
+		tp_features.platform_drv_attrs_registered = 1;
+		ret = tpacpi_create_driver_attributes(
+					&tpacpi_hwmon_pdriver.driver);
+	}
+	if (ret) {
+		printk(TPACPI_ERR
+		       "unable to create sysfs driver attributes\n");
+		thinkpad_acpi_module_exit();
+		return ret;
+	}
+	tp_features.sensors_pdrv_attrs_registered = 1;
+
+
+	/* Device initialization */
+	tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
+							NULL, 0);
+	if (IS_ERR(tpacpi_pdev)) {
+		ret = PTR_ERR(tpacpi_pdev);
+		tpacpi_pdev = NULL;
+		printk(TPACPI_ERR "unable to register platform device\n");
+		thinkpad_acpi_module_exit();
+		return ret;
+	}
+	tpacpi_sensors_pdev = platform_device_register_simple(
+						TPACPI_HWMON_DRVR_NAME,
+						-1, NULL, 0);
+	if (IS_ERR(tpacpi_sensors_pdev)) {
+		ret = PTR_ERR(tpacpi_sensors_pdev);
+		tpacpi_sensors_pdev = NULL;
+		printk(TPACPI_ERR
+		       "unable to register hwmon platform device\n");
+		thinkpad_acpi_module_exit();
+		return ret;
+	}
+	ret = device_create_file(&tpacpi_sensors_pdev->dev,
+				 &dev_attr_thinkpad_acpi_pdev_name);
+	if (ret) {
+		printk(TPACPI_ERR
+		       "unable to create sysfs hwmon device attributes\n");
+		thinkpad_acpi_module_exit();
+		return ret;
+	}
+	tp_features.sensors_pdev_attrs_registered = 1;
+	tpacpi_hwmon = hwmon_device_register(&tpacpi_sensors_pdev->dev);
+	if (IS_ERR(tpacpi_hwmon)) {
+		ret = PTR_ERR(tpacpi_hwmon);
+		tpacpi_hwmon = NULL;
+		printk(TPACPI_ERR "unable to register hwmon device\n");
+		thinkpad_acpi_module_exit();
+		return ret;
+	}
+	mutex_init(&tpacpi_inputdev_send_mutex);
+	tpacpi_inputdev = input_allocate_device();
+	if (!tpacpi_inputdev) {
+		printk(TPACPI_ERR "unable to allocate input device\n");
+		thinkpad_acpi_module_exit();
+		return -ENOMEM;
+	} else {
+		/* Prepare input device, but don't register */
+		tpacpi_inputdev->name = "ThinkPad Extra Buttons";
+		tpacpi_inputdev->phys = TPACPI_DRVR_NAME "/input0";
+		tpacpi_inputdev->id.bustype = BUS_HOST;
+		tpacpi_inputdev->id.vendor = (thinkpad_id.vendor) ?
+						thinkpad_id.vendor :
+						PCI_VENDOR_ID_IBM;
+		tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
+		tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
+	}
+	for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
+		ret = ibm_init(&ibms_init[i]);
+		if (ret >= 0 && *ibms_init[i].param)
+			ret = ibms_init[i].data->write(ibms_init[i].param);
+		if (ret < 0) {
+			thinkpad_acpi_module_exit();
+			return ret;
+		}
+	}
+	ret = input_register_device(tpacpi_inputdev);
+	if (ret < 0) {
+		printk(TPACPI_ERR "unable to register input device\n");
+		thinkpad_acpi_module_exit();
+		return ret;
+	} else {
+		tp_features.input_device_registered = 1;
+	}
+
+	tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
+	return 0;
+}
+
+/* Please remove this in year 2009 */
+MODULE_ALIAS("ibm_acpi");
+
+MODULE_ALIAS(TPACPI_DRVR_SHORTNAME);
+
+/*
+ * DMI matching for module autoloading
+ *
+ * See http://thinkwiki.org/wiki/List_of_DMI_IDs
+ * See http://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
+ *
+ * Only models listed in thinkwiki will be supported, so add yours
+ * if it is not there yet.
+ */
+#define IBM_BIOS_MODULE_ALIAS(__type) \
+	MODULE_ALIAS("dmi:bvnIBM:bvr" __type "ET??WW")
+
+/* Non-ancient thinkpads */
+MODULE_ALIAS("dmi:bvnIBM:*:svnIBM:*:pvrThinkPad*:rvnIBM:*");
+MODULE_ALIAS("dmi:bvnLENOVO:*:svnLENOVO:*:pvrThinkPad*:rvnLENOVO:*");
+
+/* Ancient thinkpad BIOSes have to be identified by
+ * BIOS type or model number, and there are far less
+ * BIOS types than model numbers... */
+IBM_BIOS_MODULE_ALIAS("I[B,D,H,I,M,N,O,T,W,V,Y,Z]");
+IBM_BIOS_MODULE_ALIAS("1[0,3,6,8,A-G,I,K,M-P,S,T]");
+IBM_BIOS_MODULE_ALIAS("K[U,X-Z]");
+
+MODULE_AUTHOR("Borislav Deianov, Henrique de Moraes Holschuh");
+MODULE_DESCRIPTION(TPACPI_DESC);
+MODULE_VERSION(TPACPI_VERSION);
+MODULE_LICENSE("GPL");
+
+module_init(thinkpad_acpi_module_init);
+module_exit(thinkpad_acpi_module_exit);
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
similarity index 100%
rename from drivers/acpi/toshiba_acpi.c
rename to drivers/platform/x86/toshiba_acpi.c
diff --git a/drivers/acpi/wmi.c b/drivers/platform/x86/wmi.c
similarity index 100%
rename from drivers/acpi/wmi.c
rename to drivers/platform/x86/wmi.c
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 383e47c..2834846 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -23,7 +23,6 @@
 #include <linux/pnp.h>
 #include <linux/mod_devicetable.h>
 #include <acpi/acpi_bus.h>
-#include <acpi/actypes.h>
 
 #include "../base.h"
 #include "pnpacpi.h"
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 6684724..33da112 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -82,4 +82,10 @@
 	  Say Y here to enable support for batteries charger integrated into
 	  DA9030 PMIC.
 
+config CHARGER_PCF50633
+	tristate "NXP PCF50633 MBC"
+	depends on MFD_PCF50633
+	help
+	 Say Y to include support for NXP PCF50633 Main Battery Charger.
+
 endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index eebb155..2fcf41d 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -25,3 +25,4 @@
 obj-$(CONFIG_BATTERY_WM97XX)	+= wm97xx_battery.o
 obj-$(CONFIG_BATTERY_BQ27x00)	+= bq27x00_battery.o
 obj-$(CONFIG_BATTERY_DA9030)	+= da9030_battery.o
+obj-$(CONFIG_CHARGER_PCF50633)	+= pcf50633-charger.o
\ No newline at end of file
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c
new file mode 100644
index 0000000..e988ec1
--- /dev/null
+++ b/drivers/power/pcf50633-charger.c
@@ -0,0 +1,358 @@
+/* NXP PCF50633 Main Battery Charger Driver
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * Author: Balaji Rao <balajirrao@openmoko.org>
+ * All rights reserved.
+ *
+ * Broken down from monstrous PCF50633 driver mainly by
+ * Harald Welte, Andy Green and Werner Almesberger
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/mfd/pcf50633/mbc.h>
+
+struct pcf50633_mbc {
+	struct pcf50633 *pcf;
+
+	int adapter_active;
+	int adapter_online;
+	int usb_active;
+	int usb_online;
+
+	struct power_supply usb;
+	struct power_supply adapter;
+};
+
+int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
+{
+	struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
+	int ret = 0;
+	u8 bits;
+
+	if (ma >= 1000)
+		bits = PCF50633_MBCC7_USB_1000mA;
+	else if (ma >= 500)
+		bits = PCF50633_MBCC7_USB_500mA;
+	else if (ma >= 100)
+		bits = PCF50633_MBCC7_USB_100mA;
+	else
+		bits = PCF50633_MBCC7_USB_SUSPEND;
+
+	ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7,
+					PCF50633_MBCC7_USB_MASK, bits);
+	if (ret)
+		dev_err(pcf->dev, "error setting usb curlim to %d mA\n", ma);
+	else
+		dev_info(pcf->dev, "usb curlim to %d mA\n", ma);
+
+	power_supply_changed(&mbc->usb);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pcf50633_mbc_usb_curlim_set);
+
+int pcf50633_mbc_get_status(struct pcf50633 *pcf)
+{
+	struct pcf50633_mbc *mbc  = platform_get_drvdata(pcf->mbc_pdev);
+	int status = 0;
+
+	if (mbc->usb_online)
+		status |= PCF50633_MBC_USB_ONLINE;
+	if (mbc->usb_active)
+		status |= PCF50633_MBC_USB_ACTIVE;
+	if (mbc->adapter_online)
+		status |= PCF50633_MBC_ADAPTER_ONLINE;
+	if (mbc->adapter_active)
+		status |= PCF50633_MBC_ADAPTER_ACTIVE;
+
+	return status;
+}
+EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status);
+
+void pcf50633_mbc_set_status(struct pcf50633 *pcf, int what, int status)
+{
+	struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
+
+	if (what & PCF50633_MBC_USB_ONLINE)
+		mbc->usb_online = !!status;
+	if (what & PCF50633_MBC_USB_ACTIVE)
+		mbc->usb_active = !!status;
+	if (what & PCF50633_MBC_ADAPTER_ONLINE)
+		mbc->adapter_online = !!status;
+	if (what & PCF50633_MBC_ADAPTER_ACTIVE)
+		mbc->adapter_active = !!status;
+}
+EXPORT_SYMBOL_GPL(pcf50633_mbc_set_status);
+
+static ssize_t
+show_chgmode(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
+
+	u8 mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2);
+	u8 chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
+
+	return sprintf(buf, "%d\n", chgmod);
+}
+static DEVICE_ATTR(chgmode, S_IRUGO, show_chgmode, NULL);
+
+static ssize_t
+show_usblim(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
+	u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) &
+						PCF50633_MBCC7_USB_MASK;
+	unsigned int ma;
+
+	if (usblim == PCF50633_MBCC7_USB_1000mA)
+		ma = 1000;
+	else if (usblim == PCF50633_MBCC7_USB_500mA)
+		ma = 500;
+	else if (usblim == PCF50633_MBCC7_USB_100mA)
+		ma = 100;
+	else
+		ma = 0;
+
+	return sprintf(buf, "%u\n", ma);
+}
+
+static ssize_t set_usblim(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
+	unsigned long ma;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &ma);
+	if (ret)
+		return -EINVAL;
+
+	pcf50633_mbc_usb_curlim_set(mbc->pcf, ma);
+
+	return count;
+}
+
+static DEVICE_ATTR(usb_curlim, S_IRUGO | S_IWUSR, show_usblim, set_usblim);
+
+static struct attribute *pcf50633_mbc_sysfs_entries[] = {
+	&dev_attr_chgmode.attr,
+	&dev_attr_usb_curlim.attr,
+	NULL,
+};
+
+static struct attribute_group mbc_attr_group = {
+	.name	= NULL,			/* put in device directory */
+	.attrs	= pcf50633_mbc_sysfs_entries,
+};
+
+static void
+pcf50633_mbc_irq_handler(int irq, void *data)
+{
+	struct pcf50633_mbc *mbc = data;
+
+	/* USB */
+	if (irq == PCF50633_IRQ_USBINS) {
+		mbc->usb_online = 1;
+	} else if (irq == PCF50633_IRQ_USBREM) {
+		mbc->usb_online = 0;
+		mbc->usb_active = 0;
+		pcf50633_mbc_usb_curlim_set(mbc->pcf, 0);
+	}
+
+	/* Adapter */
+	if (irq == PCF50633_IRQ_ADPINS) {
+		mbc->adapter_online = 1;
+		mbc->adapter_active = 1;
+	} else if (irq == PCF50633_IRQ_ADPREM) {
+		mbc->adapter_online = 0;
+		mbc->adapter_active = 0;
+	}
+
+	if (irq == PCF50633_IRQ_BATFULL) {
+		mbc->usb_active = 0;
+		mbc->adapter_active = 0;
+	}
+
+	power_supply_changed(&mbc->usb);
+	power_supply_changed(&mbc->adapter);
+
+	if (mbc->pcf->pdata->mbc_event_callback)
+		mbc->pcf->pdata->mbc_event_callback(mbc->pcf, irq);
+}
+
+static int adapter_get_property(struct power_supply *psy,
+			enum power_supply_property psp,
+			union power_supply_propval *val)
+{
+	struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, usb);
+	int ret = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval =  mbc->adapter_online;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int usb_get_property(struct power_supply *psy,
+			enum power_supply_property psp,
+			union power_supply_propval *val)
+{
+	struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, usb);
+	int ret = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = mbc->usb_online;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static enum power_supply_property power_props[] = {
+	POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const u8 mbc_irq_handlers[] = {
+	PCF50633_IRQ_ADPINS,
+	PCF50633_IRQ_ADPREM,
+	PCF50633_IRQ_USBINS,
+	PCF50633_IRQ_USBREM,
+	PCF50633_IRQ_BATFULL,
+	PCF50633_IRQ_CHGHALT,
+	PCF50633_IRQ_THLIMON,
+	PCF50633_IRQ_THLIMOFF,
+	PCF50633_IRQ_USBLIMON,
+	PCF50633_IRQ_USBLIMOFF,
+	PCF50633_IRQ_LOWSYS,
+	PCF50633_IRQ_LOWBAT,
+};
+
+static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
+{
+	struct pcf50633_mbc *mbc;
+	struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data;
+	int ret;
+	int i;
+	u8 mbcs1;
+
+	mbc = kzalloc(sizeof(*mbc), GFP_KERNEL);
+	if (!mbc)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, mbc);
+	mbc->pcf = pdata->pcf;
+
+	/* Set up IRQ handlers */
+	for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++)
+		pcf50633_register_irq(mbc->pcf, mbc_irq_handlers[i],
+					pcf50633_mbc_irq_handler, mbc);
+
+	/* Create power supplies */
+	mbc->adapter.name		= "adapter";
+	mbc->adapter.type		= POWER_SUPPLY_TYPE_MAINS;
+	mbc->adapter.properties		= power_props;
+	mbc->adapter.num_properties	= ARRAY_SIZE(power_props);
+	mbc->adapter.get_property	= &adapter_get_property;
+	mbc->adapter.supplied_to	= mbc->pcf->pdata->batteries;
+	mbc->adapter.num_supplicants	= mbc->pcf->pdata->num_batteries;
+
+	mbc->usb.name			= "usb";
+	mbc->usb.type			= POWER_SUPPLY_TYPE_USB;
+	mbc->usb.properties		= power_props;
+	mbc->usb.num_properties		= ARRAY_SIZE(power_props);
+	mbc->usb.get_property		= usb_get_property;
+	mbc->usb.supplied_to		= mbc->pcf->pdata->batteries;
+	mbc->usb.num_supplicants	= mbc->pcf->pdata->num_batteries;
+
+	ret = power_supply_register(&pdev->dev, &mbc->adapter);
+	if (ret) {
+		dev_err(mbc->pcf->dev, "failed to register adapter\n");
+		kfree(mbc);
+		return ret;
+	}
+
+	ret = power_supply_register(&pdev->dev, &mbc->usb);
+	if (ret) {
+		dev_err(mbc->pcf->dev, "failed to register usb\n");
+		power_supply_unregister(&mbc->adapter);
+		kfree(mbc);
+		return ret;
+	}
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group);
+	if (ret)
+		dev_err(mbc->pcf->dev, "failed to create sysfs entries\n");
+
+	mbcs1 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS1);
+	if (mbcs1 & PCF50633_MBCS1_USBPRES)
+		pcf50633_mbc_irq_handler(PCF50633_IRQ_USBINS, mbc);
+	if (mbcs1 & PCF50633_MBCS1_ADAPTPRES)
+		pcf50633_mbc_irq_handler(PCF50633_IRQ_ADPINS, mbc);
+
+	return 0;
+}
+
+static int __devexit pcf50633_mbc_remove(struct platform_device *pdev)
+{
+	struct pcf50633_mbc *mbc = platform_get_drvdata(pdev);
+	int i;
+
+	/* Remove IRQ handlers */
+	for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++)
+		pcf50633_free_irq(mbc->pcf, mbc_irq_handlers[i]);
+
+	power_supply_unregister(&mbc->usb);
+	power_supply_unregister(&mbc->adapter);
+
+	kfree(mbc);
+
+	return 0;
+}
+
+static struct platform_driver pcf50633_mbc_driver = {
+	.driver = {
+		.name = "pcf50633-mbc",
+	},
+	.probe = pcf50633_mbc_probe,
+	.remove = __devexit_p(pcf50633_mbc_remove),
+};
+
+static int __init pcf50633_mbc_init(void)
+{
+	return platform_driver_register(&pcf50633_mbc_driver);
+}
+module_init(pcf50633_mbc_init);
+
+static void __exit pcf50633_mbc_exit(void)
+{
+	platform_driver_unregister(&pcf50633_mbc_driver);
+}
+module_exit(pcf50633_mbc_exit);
+
+MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
+MODULE_DESCRIPTION("PCF50633 mbc driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pcf50633-mbc");
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index 204158c..fe96793 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -732,7 +732,7 @@
 	case 8:
 		return pm_translate_signal_group_number_on_island8(subgroup);
 	default:
-		dev_dbg(sbd_core(), "%s:%u: island not found: %lu\n", __func__,
+		dev_dbg(sbd_core(), "%s:%u: island not found: %llu\n", __func__,
 			__LINE__, group);
 		BUG();
 		break;
@@ -765,7 +765,7 @@
 				 signal_select, attr1, attr2, attr3);
 	if (ret)
 		dev_err(sbd_core(),
-			"%s:%u: error:%d 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
+			"%s:%u: error:%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
 			__func__, __LINE__, ret, lv1_signal_group, bus_select,
 			signal_select, attr1, attr2, attr3);
 
@@ -908,7 +908,7 @@
 
 	lpm_priv->tb_count = tmp;
 
-	dev_dbg(sbd_core(), "%s:%u: tb_count %lu (%lxh)\n", __func__, __LINE__,
+	dev_dbg(sbd_core(), "%s:%u: tb_count %llu (%llxh)\n", __func__, __LINE__,
 		lpm_priv->tb_count, lpm_priv->tb_count);
 }
 EXPORT_SYMBOL_GPL(ps3_disable_pm);
@@ -938,7 +938,7 @@
 	if (offset >= lpm_priv->tb_count)
 		return 0;
 
-	count = min(count, lpm_priv->tb_count - offset);
+	count = min_t(u64, count, lpm_priv->tb_count - offset);
 
 	while (*bytes_copied < count) {
 		const unsigned long request = count - *bytes_copied;
@@ -993,7 +993,7 @@
 	if (offset >= lpm_priv->tb_count)
 		return 0;
 
-	count = min(count, lpm_priv->tb_count - offset);
+	count = min_t(u64, count, lpm_priv->tb_count - offset);
 
 	while (*bytes_copied < count) {
 		const unsigned long request = count - *bytes_copied;
@@ -1013,7 +1013,7 @@
 		result = copy_to_user(buf, lpm_priv->tb_cache, tmp);
 
 		if (result) {
-			dev_dbg(sbd_core(), "%s:%u: 0x%lx bytes at 0x%p\n",
+			dev_dbg(sbd_core(), "%s:%u: 0x%llx bytes at 0x%p\n",
 				__func__, __LINE__, tmp, buf);
 			dev_err(sbd_core(), "%s:%u: copy_to_user failed: %d\n",
 				__func__, __LINE__, result);
@@ -1148,8 +1148,8 @@
 	lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT;
 	lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT;
 
-	dev_dbg(sbd_core(), "%s:%u: lpm_id 0x%lx, outlet_id 0x%lx, "
-		"tb_size 0x%lx\n", __func__, __LINE__, lpm_priv->lpm_id,
+	dev_dbg(sbd_core(), "%s:%u: lpm_id 0x%llx, outlet_id 0x%llx, "
+		"tb_size 0x%llx\n", __func__, __LINE__, lpm_priv->lpm_id,
 		lpm_priv->outlet_id, tb_size);
 
 	return 0;
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index 90c097a..e4ad5ba 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -114,7 +114,7 @@
 static void __maybe_unused _dump_ports_bmp(
 	const struct ports_bmp *bmp, const char *func, int line)
 {
-	pr_debug("%s:%d: ports_bmp: %016lxh\n", func, line, bmp->status);
+	pr_debug("%s:%d: ports_bmp: %016llxh\n", func, line, bmp->status);
 }
 
 #define dump_port_params(_b) _dump_port_params(_b, __func__, __LINE__)
@@ -159,11 +159,13 @@
 	struct vuart_triggers *trig)
 {
 	int result;
-	unsigned long size;
-	unsigned long val;
+	u64 size;
+	u64 val;
+	u64 tx;
 
 	result = lv1_get_virtual_uart_param(dev->port_number,
-		PARAM_TX_TRIGGER, &trig->tx);
+		PARAM_TX_TRIGGER, &tx);
+	trig->tx = tx;
 
 	if (result) {
 		dev_dbg(&dev->core, "%s:%d: tx_trigger failed: %s\n",
@@ -201,7 +203,7 @@
 	unsigned int rx)
 {
 	int result;
-	unsigned long size;
+	u64 size;
 
 	result = lv1_set_virtual_uart_param(dev->port_number,
 		PARAM_TX_TRIGGER, tx);
@@ -248,7 +250,7 @@
 		dev_dbg(&dev->core, "%s:%d: rx_bytes failed: %s\n",
 			__func__, __LINE__, ps3_result(result));
 
-	dev_dbg(&dev->core, "%s:%d: %lxh\n", __func__, __LINE__,
+	dev_dbg(&dev->core, "%s:%d: %llxh\n", __func__, __LINE__,
 		*bytes_waiting);
 	return result;
 }
@@ -295,7 +297,7 @@
 
 	*status = tmp & priv->interrupt_mask;
 
-	dev_dbg(&dev->core, "%s:%d: m %lxh, s %lxh, m&s %lxh\n",
+	dev_dbg(&dev->core, "%s:%d: m %llxh, s %llxh, m&s %lxh\n",
 		__func__, __LINE__, priv->interrupt_mask, tmp, *status);
 
 	return result;
@@ -363,7 +365,7 @@
  */
 
 static int ps3_vuart_raw_write(struct ps3_system_bus_device *dev,
-	const void *buf, unsigned int bytes, unsigned long *bytes_written)
+	const void *buf, unsigned int bytes, u64 *bytes_written)
 {
 	int result;
 	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
@@ -379,7 +381,7 @@
 
 	priv->stats.bytes_written += *bytes_written;
 
-	dev_dbg(&dev->core, "%s:%d: wrote %lxh/%xh=>%lxh\n", __func__, __LINE__,
+	dev_dbg(&dev->core, "%s:%d: wrote %llxh/%xh=>%lxh\n", __func__, __LINE__,
 		*bytes_written, bytes, priv->stats.bytes_written);
 
 	return result;
@@ -393,7 +395,7 @@
  */
 
 static int ps3_vuart_raw_read(struct ps3_system_bus_device *dev, void *buf,
-	unsigned int bytes, unsigned long *bytes_read)
+	unsigned int bytes, u64 *bytes_read)
 {
 	int result;
 	struct ps3_vuart_port_priv *priv = to_port_priv(dev);
@@ -411,7 +413,7 @@
 
 	priv->stats.bytes_read += *bytes_read;
 
-	dev_dbg(&dev->core, "%s:%d: read %lxh/%xh=>%lxh\n", __func__, __LINE__,
+	dev_dbg(&dev->core, "%s:%d: read %llxh/%xh=>%lxh\n", __func__, __LINE__,
 		*bytes_read, bytes, priv->stats.bytes_read);
 
 	return result;
@@ -500,7 +502,7 @@
 	spin_lock_irqsave(&priv->tx_list.lock, flags);
 
 	if (list_empty(&priv->tx_list.head)) {
-		unsigned long bytes_written;
+		u64 bytes_written;
 
 		result = ps3_vuart_raw_write(dev, buf, bytes, &bytes_written);
 
@@ -592,7 +594,7 @@
 	list_add_tail(&lb->link, &priv->rx_list.head);
 	priv->rx_list.bytes_held += bytes;
 
-	dev_dbg(&dev->core, "%s:%d: buf_%lu: queued %lxh bytes\n",
+	dev_dbg(&dev->core, "%s:%d: buf_%lu: queued %llxh bytes\n",
 		__func__, __LINE__, lb->dbg_number, bytes);
 
 	*bytes_queued = bytes;
@@ -745,7 +747,7 @@
 
 	list_for_each_entry_safe(lb, n, &priv->tx_list.head, link) {
 
-		unsigned long bytes_written;
+		u64 bytes_written;
 
 		result = ps3_vuart_raw_write(dev, lb->head, lb->tail - lb->head,
 			&bytes_written);
@@ -762,7 +764,7 @@
 		if (bytes_written < lb->tail - lb->head) {
 			lb->head += bytes_written;
 			dev_dbg(&dev->core,
-				"%s:%d cleared buf_%lu, %lxh bytes\n",
+				"%s:%d cleared buf_%lu, %llxh bytes\n",
 				__func__, __LINE__, lb->dbg_number,
 				bytes_written);
 			goto port_full;
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
index 55955f1..18066d5 100644
--- a/drivers/ps3/ps3stor_lib.c
+++ b/drivers/ps3/ps3stor_lib.c
@@ -70,7 +70,7 @@
 			 __func__, __LINE__, n);
 	dev->region_idx = __ffs(dev->accessible_regions);
 	dev_info(&dev->sbd.core,
-		 "First accessible region has index %u start %lu size %lu\n",
+		 "First accessible region has index %u start %llu size %llu\n",
 		 dev->region_idx, dev->regions[dev->region_idx].start,
 		 dev->regions[dev->region_idx].size);
 
@@ -220,7 +220,7 @@
 	const char *op = write ? "write" : "read";
 	int res;
 
-	dev_dbg(&dev->sbd.core, "%s:%u: %s %lu sectors starting at %lu\n",
+	dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
 		__func__, __LINE__, op, sectors, start_sector);
 
 	init_completion(&dev->done);
@@ -238,7 +238,7 @@
 
 	wait_for_completion(&dev->done);
 	if (dev->lv1_status) {
-		dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
+		dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
 			__LINE__, op, dev->lv1_status);
 		return dev->lv1_status;
 	}
@@ -268,7 +268,7 @@
 {
 	int res;
 
-	dev_dbg(&dev->sbd.core, "%s:%u: send device command 0x%lx\n", __func__,
+	dev_dbg(&dev->sbd.core, "%s:%u: send device command 0x%llx\n", __func__,
 		__LINE__, cmd);
 
 	init_completion(&dev->done);
@@ -277,19 +277,19 @@
 					      arg2, arg3, arg4, &dev->tag);
 	if (res) {
 		dev_err(&dev->sbd.core,
-			"%s:%u: send_device_command 0x%lx failed %d\n",
+			"%s:%u: send_device_command 0x%llx failed %d\n",
 			__func__, __LINE__, cmd, res);
 		return -1;
 	}
 
 	wait_for_completion(&dev->done);
 	if (dev->lv1_status) {
-		dev_dbg(&dev->sbd.core, "%s:%u: command 0x%lx failed 0x%lx\n",
+		dev_dbg(&dev->sbd.core, "%s:%u: command 0x%llx failed 0x%llx\n",
 			__func__, __LINE__, cmd, dev->lv1_status);
 		return dev->lv1_status;
 	}
 
-	dev_dbg(&dev->sbd.core, "%s:%u: command 0x%lx completed\n", __func__,
+	dev_dbg(&dev->sbd.core, "%s:%u: command 0x%llx completed\n", __func__,
 		__LINE__, cmd);
 
 	return 0;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 39360e2..e7e0cf1 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -73,4 +73,11 @@
 	  Say y here to support the BUCKs and LDOs regulators found on
 	  Dialog Semiconductor DA9030/DA9034 PMIC.
 
+config REGULATOR_PCF50633
+	tristate "PCF50633 regulator driver"
+        depends on MFD_PCF50633
+	help
+	 Say Y here to support the voltage regulators and convertors
+	 on PCF50633
+
 endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 254d40c..61b30c6 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -11,5 +11,6 @@
 obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
 obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
 obj-$(CONFIG_REGULATOR_DA903X)	+= da903x.o
+obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
 
 ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 02a7744..f511a40 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -29,7 +29,7 @@
 static LIST_HEAD(regulator_list);
 static LIST_HEAD(regulator_map_list);
 
-/**
+/*
  * struct regulator_dev
  *
  * Voltage / Current regulator class device. One for each regulator.
@@ -56,7 +56,7 @@
 	void *reg_data;		/* regulator_dev data */
 };
 
-/**
+/*
  * struct regulator_map
  *
  * Used to provide symbolic supply names to devices.
@@ -79,7 +79,7 @@
 	int uA_load;
 	int min_uV;
 	int max_uV;
-	int enabled; /* client has called enabled */
+	int enabled; /* count of client enables */
 	char *supply_name;
 	struct device_attribute dev_attr;
 	struct regulator_dev *rdev;
@@ -174,6 +174,16 @@
 /* operating mode constraint check */
 static int regulator_check_mode(struct regulator_dev *rdev, int mode)
 {
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+	case REGULATOR_MODE_NORMAL:
+	case REGULATOR_MODE_IDLE:
+	case REGULATOR_MODE_STANDBY:
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	if (!rdev->constraints) {
 		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
 		       rdev->desc->name);
@@ -232,6 +242,7 @@
 
 	return ret;
 }
+static DEVICE_ATTR(microvolts, 0444, regulator_uV_show, NULL);
 
 static ssize_t regulator_uA_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
@@ -240,6 +251,7 @@
 
 	return sprintf(buf, "%d\n", _regulator_get_current_limit(rdev));
 }
+static DEVICE_ATTR(microamps, 0444, regulator_uA_show, NULL);
 
 static ssize_t regulator_name_show(struct device *dev,
 			     struct device_attribute *attr, char *buf)
@@ -257,12 +269,8 @@
 	return sprintf(buf, "%s\n", name);
 }
 
-static ssize_t regulator_opmode_show(struct device *dev,
-				    struct device_attribute *attr, char *buf)
+static ssize_t regulator_print_opmode(char *buf, int mode)
 {
-	struct regulator_dev *rdev = dev_get_drvdata(dev);
-	int mode = _regulator_get_mode(rdev);
-
 	switch (mode) {
 	case REGULATOR_MODE_FAST:
 		return sprintf(buf, "fast\n");
@@ -276,12 +284,17 @@
 	return sprintf(buf, "unknown\n");
 }
 
-static ssize_t regulator_state_show(struct device *dev,
-				   struct device_attribute *attr, char *buf)
+static ssize_t regulator_opmode_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
 {
 	struct regulator_dev *rdev = dev_get_drvdata(dev);
-	int state = _regulator_is_enabled(rdev);
 
+	return regulator_print_opmode(buf, _regulator_get_mode(rdev));
+}
+static DEVICE_ATTR(opmode, 0444, regulator_opmode_show, NULL);
+
+static ssize_t regulator_print_state(char *buf, int state)
+{
 	if (state > 0)
 		return sprintf(buf, "enabled\n");
 	else if (state == 0)
@@ -290,6 +303,15 @@
 		return sprintf(buf, "unknown\n");
 }
 
+static ssize_t regulator_state_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct regulator_dev *rdev = dev_get_drvdata(dev);
+
+	return regulator_print_state(buf, _regulator_is_enabled(rdev));
+}
+static DEVICE_ATTR(state, 0444, regulator_state_show, NULL);
+
 static ssize_t regulator_min_uA_show(struct device *dev,
 				    struct device_attribute *attr, char *buf)
 {
@@ -300,6 +322,7 @@
 
 	return sprintf(buf, "%d\n", rdev->constraints->min_uA);
 }
+static DEVICE_ATTR(min_microamps, 0444, regulator_min_uA_show, NULL);
 
 static ssize_t regulator_max_uA_show(struct device *dev,
 				    struct device_attribute *attr, char *buf)
@@ -311,6 +334,7 @@
 
 	return sprintf(buf, "%d\n", rdev->constraints->max_uA);
 }
+static DEVICE_ATTR(max_microamps, 0444, regulator_max_uA_show, NULL);
 
 static ssize_t regulator_min_uV_show(struct device *dev,
 				    struct device_attribute *attr, char *buf)
@@ -322,6 +346,7 @@
 
 	return sprintf(buf, "%d\n", rdev->constraints->min_uV);
 }
+static DEVICE_ATTR(min_microvolts, 0444, regulator_min_uV_show, NULL);
 
 static ssize_t regulator_max_uV_show(struct device *dev,
 				    struct device_attribute *attr, char *buf)
@@ -333,6 +358,7 @@
 
 	return sprintf(buf, "%d\n", rdev->constraints->max_uV);
 }
+static DEVICE_ATTR(max_microvolts, 0444, regulator_max_uV_show, NULL);
 
 static ssize_t regulator_total_uA_show(struct device *dev,
 				      struct device_attribute *attr, char *buf)
@@ -347,6 +373,7 @@
 	mutex_unlock(&rdev->mutex);
 	return sprintf(buf, "%d\n", uA);
 }
+static DEVICE_ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL);
 
 static ssize_t regulator_num_users_show(struct device *dev,
 				      struct device_attribute *attr, char *buf)
@@ -374,153 +401,106 @@
 {
 	struct regulator_dev *rdev = dev_get_drvdata(dev);
 
-	if (!rdev->constraints)
-		return sprintf(buf, "not defined\n");
 	return sprintf(buf, "%d\n", rdev->constraints->state_mem.uV);
 }
+static DEVICE_ATTR(suspend_mem_microvolts, 0444,
+		regulator_suspend_mem_uV_show, NULL);
 
 static ssize_t regulator_suspend_disk_uV_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
 	struct regulator_dev *rdev = dev_get_drvdata(dev);
 
-	if (!rdev->constraints)
-		return sprintf(buf, "not defined\n");
 	return sprintf(buf, "%d\n", rdev->constraints->state_disk.uV);
 }
+static DEVICE_ATTR(suspend_disk_microvolts, 0444,
+		regulator_suspend_disk_uV_show, NULL);
 
 static ssize_t regulator_suspend_standby_uV_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
 	struct regulator_dev *rdev = dev_get_drvdata(dev);
 
-	if (!rdev->constraints)
-		return sprintf(buf, "not defined\n");
 	return sprintf(buf, "%d\n", rdev->constraints->state_standby.uV);
 }
-
-static ssize_t suspend_opmode_show(struct regulator_dev *rdev,
-	unsigned int mode, char *buf)
-{
-	switch (mode) {
-	case REGULATOR_MODE_FAST:
-		return sprintf(buf, "fast\n");
-	case REGULATOR_MODE_NORMAL:
-		return sprintf(buf, "normal\n");
-	case REGULATOR_MODE_IDLE:
-		return sprintf(buf, "idle\n");
-	case REGULATOR_MODE_STANDBY:
-		return sprintf(buf, "standby\n");
-	}
-	return sprintf(buf, "unknown\n");
-}
+static DEVICE_ATTR(suspend_standby_microvolts, 0444,
+		regulator_suspend_standby_uV_show, NULL);
 
 static ssize_t regulator_suspend_mem_mode_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
 	struct regulator_dev *rdev = dev_get_drvdata(dev);
 
-	if (!rdev->constraints)
-		return sprintf(buf, "not defined\n");
-	return suspend_opmode_show(rdev,
-		rdev->constraints->state_mem.mode, buf);
+	return regulator_print_opmode(buf,
+		rdev->constraints->state_mem.mode);
 }
+static DEVICE_ATTR(suspend_mem_mode, 0444,
+		regulator_suspend_mem_mode_show, NULL);
 
 static ssize_t regulator_suspend_disk_mode_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
 	struct regulator_dev *rdev = dev_get_drvdata(dev);
 
-	if (!rdev->constraints)
-		return sprintf(buf, "not defined\n");
-	return suspend_opmode_show(rdev,
-		rdev->constraints->state_disk.mode, buf);
+	return regulator_print_opmode(buf,
+		rdev->constraints->state_disk.mode);
 }
+static DEVICE_ATTR(suspend_disk_mode, 0444,
+		regulator_suspend_disk_mode_show, NULL);
 
 static ssize_t regulator_suspend_standby_mode_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
 	struct regulator_dev *rdev = dev_get_drvdata(dev);
 
-	if (!rdev->constraints)
-		return sprintf(buf, "not defined\n");
-	return suspend_opmode_show(rdev,
-		rdev->constraints->state_standby.mode, buf);
+	return regulator_print_opmode(buf,
+		rdev->constraints->state_standby.mode);
 }
+static DEVICE_ATTR(suspend_standby_mode, 0444,
+		regulator_suspend_standby_mode_show, NULL);
 
 static ssize_t regulator_suspend_mem_state_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct regulator_dev *rdev = dev_get_drvdata(dev);
 
-	if (!rdev->constraints)
-		return sprintf(buf, "not defined\n");
-
-	if (rdev->constraints->state_mem.enabled)
-		return sprintf(buf, "enabled\n");
-	else
-		return sprintf(buf, "disabled\n");
+	return regulator_print_state(buf,
+			rdev->constraints->state_mem.enabled);
 }
+static DEVICE_ATTR(suspend_mem_state, 0444,
+		regulator_suspend_mem_state_show, NULL);
 
 static ssize_t regulator_suspend_disk_state_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct regulator_dev *rdev = dev_get_drvdata(dev);
 
-	if (!rdev->constraints)
-		return sprintf(buf, "not defined\n");
-
-	if (rdev->constraints->state_disk.enabled)
-		return sprintf(buf, "enabled\n");
-	else
-		return sprintf(buf, "disabled\n");
+	return regulator_print_state(buf,
+			rdev->constraints->state_disk.enabled);
 }
+static DEVICE_ATTR(suspend_disk_state, 0444,
+		regulator_suspend_disk_state_show, NULL);
 
 static ssize_t regulator_suspend_standby_state_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct regulator_dev *rdev = dev_get_drvdata(dev);
 
-	if (!rdev->constraints)
-		return sprintf(buf, "not defined\n");
-
-	if (rdev->constraints->state_standby.enabled)
-		return sprintf(buf, "enabled\n");
-	else
-		return sprintf(buf, "disabled\n");
+	return regulator_print_state(buf,
+			rdev->constraints->state_standby.enabled);
 }
+static DEVICE_ATTR(suspend_standby_state, 0444,
+		regulator_suspend_standby_state_show, NULL);
 
+
+/*
+ * These are the only attributes are present for all regulators.
+ * Other attributes are a function of regulator functionality.
+ */
 static struct device_attribute regulator_dev_attrs[] = {
 	__ATTR(name, 0444, regulator_name_show, NULL),
-	__ATTR(microvolts, 0444, regulator_uV_show, NULL),
-	__ATTR(microamps, 0444, regulator_uA_show, NULL),
-	__ATTR(opmode, 0444, regulator_opmode_show, NULL),
-	__ATTR(state, 0444, regulator_state_show, NULL),
-	__ATTR(min_microvolts, 0444, regulator_min_uV_show, NULL),
-	__ATTR(min_microamps, 0444, regulator_min_uA_show, NULL),
-	__ATTR(max_microvolts, 0444, regulator_max_uV_show, NULL),
-	__ATTR(max_microamps, 0444, regulator_max_uA_show, NULL),
-	__ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL),
 	__ATTR(num_users, 0444, regulator_num_users_show, NULL),
 	__ATTR(type, 0444, regulator_type_show, NULL),
-	__ATTR(suspend_mem_microvolts, 0444,
-		regulator_suspend_mem_uV_show, NULL),
-	__ATTR(suspend_disk_microvolts, 0444,
-		regulator_suspend_disk_uV_show, NULL),
-	__ATTR(suspend_standby_microvolts, 0444,
-		regulator_suspend_standby_uV_show, NULL),
-	__ATTR(suspend_mem_mode, 0444,
-		regulator_suspend_mem_mode_show, NULL),
-	__ATTR(suspend_disk_mode, 0444,
-		regulator_suspend_disk_mode_show, NULL),
-	__ATTR(suspend_standby_mode, 0444,
-		regulator_suspend_standby_mode_show, NULL),
-	__ATTR(suspend_mem_state, 0444,
-		regulator_suspend_mem_state_show, NULL),
-	__ATTR(suspend_disk_state, 0444,
-		regulator_suspend_disk_state_show, NULL),
-	__ATTR(suspend_standby_state, 0444,
-		regulator_suspend_standby_state_show, NULL),
 	__ATTR_NULL,
 };
 
@@ -675,7 +655,8 @@
 
 /**
  * set_machine_constraints - sets regulator constraints
- * @regulator: regulator source
+ * @rdev: regulator source
+ * @constraints: constraints to apply
  *
  * Allows platform initialisation code to define and constrain
  * regulator circuits e.g. valid voltage/current ranges, etc.  NOTE:
@@ -750,8 +731,8 @@
 
 /**
  * set_supply - set regulator supply regulator
- * @regulator: regulator name
- * @supply: supply regulator name
+ * @rdev: regulator name
+ * @supply_rdev: supply regulator name
  *
  * Called by platform initialisation code to set the supply regulator for this
  * regulator. This ensures that a regulators supply will also be enabled by the
@@ -778,9 +759,9 @@
 
 /**
  * set_consumer_device_supply: Bind a regulator to a symbolic supply
- * @regulator: regulator source
- * @dev:       device the supply applies to
- * @supply:    symbolic name for supply
+ * @rdev:         regulator source
+ * @consumer_dev: device the supply applies to
+ * @supply:       symbolic name for supply
  *
  * Allows platform initialisation code to map physical regulator
  * sources to symbolic names for supplies for use by devices.  Devices
@@ -795,6 +776,20 @@
 	if (supply == NULL)
 		return -EINVAL;
 
+	list_for_each_entry(node, &regulator_map_list, list) {
+		if (consumer_dev != node->dev)
+			continue;
+		if (strcmp(node->supply, supply) != 0)
+			continue;
+
+		dev_dbg(consumer_dev, "%s/%s is '%s' supply; fail %s/%s\n",
+				dev_name(&node->regulator->dev),
+				node->regulator->desc->name,
+				supply,
+				dev_name(&rdev->dev), rdev->desc->name);
+		return -EBUSY;
+	}
+
 	node = kmalloc(sizeof(struct regulator_map), GFP_KERNEL);
 	if (node == NULL)
 		return -ENOMEM;
@@ -963,16 +958,13 @@
 	if (regulator == NULL || IS_ERR(regulator))
 		return;
 
-	if (regulator->enabled) {
-		printk(KERN_WARNING "Releasing supply %s while enabled\n",
-		       regulator->supply_name);
-		WARN_ON(regulator->enabled);
-		regulator_disable(regulator);
-	}
-
 	mutex_lock(&regulator_list_mutex);
 	rdev = regulator->rdev;
 
+	if (WARN(regulator->enabled, "Releasing supply %s while enabled\n",
+			       regulator->supply_name))
+		_regulator_disable(rdev);
+
 	/* remove any sysfs entries */
 	if (regulator->dev) {
 		sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
@@ -1034,29 +1026,26 @@
  * regulator_enable - enable regulator output
  * @regulator: regulator source
  *
- * Enable the regulator output at the predefined voltage or current value.
+ * Request that the regulator be enabled with the regulator output at
+ * the predefined voltage or current value.  Calls to regulator_enable()
+ * must be balanced with calls to regulator_disable().
+ *
  * NOTE: the output value can be set by other drivers, boot loader or may be
  * hardwired in the regulator.
- * NOTE: calls to regulator_enable() must be balanced with calls to
- * regulator_disable().
  */
 int regulator_enable(struct regulator *regulator)
 {
-	int ret;
+	struct regulator_dev *rdev = regulator->rdev;
+	int ret = 0;
 
-	if (regulator->enabled) {
-		printk(KERN_CRIT "Regulator %s already enabled\n",
-		       regulator->supply_name);
-		WARN_ON(regulator->enabled);
-		return 0;
-	}
-
-	mutex_lock(&regulator->rdev->mutex);
-	regulator->enabled = 1;
-	ret = _regulator_enable(regulator->rdev);
-	if (ret != 0)
-		regulator->enabled = 0;
-	mutex_unlock(&regulator->rdev->mutex);
+	mutex_lock(&rdev->mutex);
+	if (regulator->enabled == 0)
+		ret = _regulator_enable(rdev);
+	else if (regulator->enabled < 0)
+		ret = -EIO;
+	if (ret == 0)
+		regulator->enabled++;
+	mutex_unlock(&rdev->mutex);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(regulator_enable);
@@ -1100,27 +1089,31 @@
  * regulator_disable - disable regulator output
  * @regulator: regulator source
  *
- * Disable the regulator output voltage or current.
- * NOTE: this will only disable the regulator output if no other consumer
- * devices have it enabled.
- * NOTE: calls to regulator_enable() must be balanced with calls to
+ * Disable the regulator output voltage or current.  Calls to
+ * regulator_enable() must be balanced with calls to
  * regulator_disable().
+ *
+ * NOTE: this will only disable the regulator output if no other consumer
+ * devices have it enabled, the regulator device supports disabling and
+ * machine constraints permit this operation.
  */
 int regulator_disable(struct regulator *regulator)
 {
-	int ret;
+	struct regulator_dev *rdev = regulator->rdev;
+	int ret = 0;
 
-	if (!regulator->enabled) {
-		printk(KERN_ERR "%s: not in use by this consumer\n",
-			__func__);
-		return 0;
-	}
-
-	mutex_lock(&regulator->rdev->mutex);
-	regulator->enabled = 0;
-	regulator->uA_load = 0;
-	ret = _regulator_disable(regulator->rdev);
-	mutex_unlock(&regulator->rdev->mutex);
+	mutex_lock(&rdev->mutex);
+	if (regulator->enabled == 1) {
+		ret = _regulator_disable(rdev);
+		if (ret == 0)
+			regulator->uA_load = 0;
+	} else if (WARN(regulator->enabled <= 0,
+			"unbalanced disables for supply %s\n",
+			regulator->supply_name))
+		ret = -EIO;
+	if (ret == 0)
+		regulator->enabled--;
+	mutex_unlock(&rdev->mutex);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(regulator_disable);
@@ -1196,7 +1189,13 @@
  * regulator_is_enabled - is the regulator output enabled
  * @regulator: regulator source
  *
- * Returns zero for disabled otherwise return number of enable requests.
+ * Returns positive if the regulator driver backing the source/client
+ * has requested that the device be enabled, zero if it hasn't, else a
+ * negative errno code.
+ *
+ * Note that the device backing this regulator handle can have multiple
+ * users, so it might be enabled even if regulator_enable() was never
+ * called for this particular source.
  */
 int regulator_is_enabled(struct regulator *regulator)
 {
@@ -1219,7 +1218,7 @@
  *
  * NOTE: If the regulator is shared between several devices then the lowest
  * request voltage that meets the system constraints will be used.
- * NOTE: Regulator system constraints must be set for this regulator before
+ * Regulator system constraints must be set for this regulator before
  * calling this function otherwise this call will fail.
  */
 int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
@@ -1493,7 +1492,8 @@
 	mode = rdev->desc->ops->get_optimum_mode(rdev,
 						 input_uV, output_uV,
 						 total_uA_load);
-	if (ret <= 0) {
+	ret = regulator_check_mode(rdev, mode);
+	if (ret < 0) {
 		printk(KERN_ERR "%s: failed to get optimum mode for %s @"
 			" %d uA %d -> %d uV\n", __func__, rdev->desc->name,
 			total_uA_load, input_uV, output_uV);
@@ -1501,7 +1501,7 @@
 	}
 
 	ret = rdev->desc->ops->set_mode(rdev, mode);
-	if (ret <= 0) {
+	if (ret < 0) {
 		printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n",
 			__func__, mode, rdev->desc->name);
 		goto out;
@@ -1516,7 +1516,7 @@
 /**
  * regulator_register_notifier - register regulator event notifier
  * @regulator: regulator source
- * @notifier_block: notifier block
+ * @nb: notifier block
  *
  * Register notifier block to receive regulator events.
  */
@@ -1531,7 +1531,7 @@
 /**
  * regulator_unregister_notifier - unregister regulator event notifier
  * @regulator: regulator source
- * @notifier_block: notifier block
+ * @nb: notifier block
  *
  * Unregister regulator event notifier block.
  */
@@ -1697,9 +1697,9 @@
 
 /**
  * regulator_notifier_call_chain - call regulator event notifier
- * @regulator: regulator source
+ * @rdev: regulator source
  * @event: notifier block
- * @data:
+ * @data: callback-specific data.
  *
  * Called by regulator drivers to notify clients a regulator event has
  * occurred. We also notify regulator clients downstream.
@@ -1713,10 +1713,122 @@
 }
 EXPORT_SYMBOL_GPL(regulator_notifier_call_chain);
 
+/*
+ * To avoid cluttering sysfs (and memory) with useless state, only
+ * create attributes that can be meaningfully displayed.
+ */
+static int add_regulator_attributes(struct regulator_dev *rdev)
+{
+	struct device		*dev = &rdev->dev;
+	struct regulator_ops	*ops = rdev->desc->ops;
+	int			status = 0;
+
+	/* some attributes need specific methods to be displayed */
+	if (ops->get_voltage) {
+		status = device_create_file(dev, &dev_attr_microvolts);
+		if (status < 0)
+			return status;
+	}
+	if (ops->get_current_limit) {
+		status = device_create_file(dev, &dev_attr_microamps);
+		if (status < 0)
+			return status;
+	}
+	if (ops->get_mode) {
+		status = device_create_file(dev, &dev_attr_opmode);
+		if (status < 0)
+			return status;
+	}
+	if (ops->is_enabled) {
+		status = device_create_file(dev, &dev_attr_state);
+		if (status < 0)
+			return status;
+	}
+
+	/* some attributes are type-specific */
+	if (rdev->desc->type == REGULATOR_CURRENT) {
+		status = device_create_file(dev, &dev_attr_requested_microamps);
+		if (status < 0)
+			return status;
+	}
+
+	/* all the other attributes exist to support constraints;
+	 * don't show them if there are no constraints, or if the
+	 * relevant supporting methods are missing.
+	 */
+	if (!rdev->constraints)
+		return status;
+
+	/* constraints need specific supporting methods */
+	if (ops->set_voltage) {
+		status = device_create_file(dev, &dev_attr_min_microvolts);
+		if (status < 0)
+			return status;
+		status = device_create_file(dev, &dev_attr_max_microvolts);
+		if (status < 0)
+			return status;
+	}
+	if (ops->set_current_limit) {
+		status = device_create_file(dev, &dev_attr_min_microamps);
+		if (status < 0)
+			return status;
+		status = device_create_file(dev, &dev_attr_max_microamps);
+		if (status < 0)
+			return status;
+	}
+
+	/* suspend mode constraints need multiple supporting methods */
+	if (!(ops->set_suspend_enable && ops->set_suspend_disable))
+		return status;
+
+	status = device_create_file(dev, &dev_attr_suspend_standby_state);
+	if (status < 0)
+		return status;
+	status = device_create_file(dev, &dev_attr_suspend_mem_state);
+	if (status < 0)
+		return status;
+	status = device_create_file(dev, &dev_attr_suspend_disk_state);
+	if (status < 0)
+		return status;
+
+	if (ops->set_suspend_voltage) {
+		status = device_create_file(dev,
+				&dev_attr_suspend_standby_microvolts);
+		if (status < 0)
+			return status;
+		status = device_create_file(dev,
+				&dev_attr_suspend_mem_microvolts);
+		if (status < 0)
+			return status;
+		status = device_create_file(dev,
+				&dev_attr_suspend_disk_microvolts);
+		if (status < 0)
+			return status;
+	}
+
+	if (ops->set_suspend_mode) {
+		status = device_create_file(dev,
+				&dev_attr_suspend_standby_mode);
+		if (status < 0)
+			return status;
+		status = device_create_file(dev,
+				&dev_attr_suspend_mem_mode);
+		if (status < 0)
+			return status;
+		status = device_create_file(dev,
+				&dev_attr_suspend_disk_mode);
+		if (status < 0)
+			return status;
+	}
+
+	return status;
+}
+
 /**
  * regulator_register - register regulator
- * @regulator: regulator source
- * @reg_data: private regulator data
+ * @regulator_desc: regulator to register
+ * @dev: struct device for the regulator
+ * @driver_data: private regulator data
  *
  * Called by regulator drivers to register a regulator.
  * Returns 0 on success.
@@ -1761,45 +1873,37 @@
 	/* preform any regulator specific init */
 	if (init_data->regulator_init) {
 		ret = init_data->regulator_init(rdev->reg_data);
-		if (ret < 0) {
-			kfree(rdev);
-			rdev = ERR_PTR(ret);
-			goto out;
-		}
-	}
-
-	/* set regulator constraints */
-	ret = set_machine_constraints(rdev, &init_data->constraints);
-	if (ret < 0) {
-		kfree(rdev);
-		rdev = ERR_PTR(ret);
-		goto out;
+		if (ret < 0)
+			goto clean;
 	}
 
 	/* register with sysfs */
 	rdev->dev.class = &regulator_class;
 	rdev->dev.parent = dev;
-	snprintf(rdev->dev.bus_id, sizeof(rdev->dev.bus_id),
-		 "regulator.%d", atomic_inc_return(&regulator_no) - 1);
+	dev_set_name(&rdev->dev, "regulator.%d",
+		     atomic_inc_return(&regulator_no) - 1);
 	ret = device_register(&rdev->dev);
-	if (ret != 0) {
-		kfree(rdev);
-		rdev = ERR_PTR(ret);
-		goto out;
-	}
+	if (ret != 0)
+		goto clean;
 
 	dev_set_drvdata(&rdev->dev, rdev);
 
+	/* set regulator constraints */
+	ret = set_machine_constraints(rdev, &init_data->constraints);
+	if (ret < 0)
+		goto scrub;
+
+	/* add attributes supported by this regulator */
+	ret = add_regulator_attributes(rdev);
+	if (ret < 0)
+		goto scrub;
+
 	/* set supply regulator if it exists */
 	if (init_data->supply_regulator_dev) {
 		ret = set_supply(rdev,
 			dev_get_drvdata(init_data->supply_regulator_dev));
-		if (ret < 0) {
-			device_unregister(&rdev->dev);
-			kfree(rdev);
-			rdev = ERR_PTR(ret);
-			goto out;
-		}
+		if (ret < 0)
+			goto scrub;
 	}
 
 	/* add consumers devices */
@@ -1811,10 +1915,7 @@
 			for (--i; i >= 0; i--)
 				unset_consumer_device_supply(rdev,
 					init_data->consumer_supplies[i].dev);
-			device_unregister(&rdev->dev);
-			kfree(rdev);
-			rdev = ERR_PTR(ret);
-			goto out;
+			goto scrub;
 		}
 	}
 
@@ -1822,12 +1923,19 @@
 out:
 	mutex_unlock(&regulator_list_mutex);
 	return rdev;
+
+scrub:
+	device_unregister(&rdev->dev);
+clean:
+	kfree(rdev);
+	rdev = ERR_PTR(ret);
+	goto out;
 }
 EXPORT_SYMBOL_GPL(regulator_register);
 
 /**
  * regulator_unregister - unregister regulator
- * @regulator: regulator source
+ * @rdev: regulator to unregister
  *
  * Called by regulator drivers to unregister a regulator.
  */
@@ -1846,7 +1954,7 @@
 EXPORT_SYMBOL_GPL(regulator_unregister);
 
 /**
- * regulator_suspend_prepare: prepare regulators for system wide suspend
+ * regulator_suspend_prepare - prepare regulators for system wide suspend
  * @state: system suspend state
  *
  * Configure each regulator with it's suspend operating parameters for state.
@@ -1882,7 +1990,7 @@
 
 /**
  * rdev_get_drvdata - get rdev regulator driver data
- * @regulator: regulator
+ * @rdev: regulator
  *
  * Get rdev regulator driver private data. This call can be used in the
  * regulator driver context.
@@ -1919,7 +2027,7 @@
 
 /**
  * regulator_get_id - get regulator ID
- * @regulator: regulator
+ * @rdev: regulator
  */
 int rdev_get_id(struct regulator_dev *rdev)
 {
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 773b29c..fe77730 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -102,7 +102,7 @@
 	uint8_t val, mask;
 
 	if (check_range(info, min_uV, max_uV)) {
-		pr_err("invalid voltage range (%d, %d) uV", min_uV, max_uV);
+		pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
 		return -EINVAL;
 	}
 
@@ -159,7 +159,7 @@
 	if (ret)
 		return ret;
 
-	return reg_val & (1 << info->enable_bit);
+	return !!(reg_val & (1 << info->enable_bit));
 }
 
 /* DA9030 specific operations */
@@ -172,7 +172,7 @@
 	int ret;
 
 	if (check_range(info, min_uV, max_uV)) {
-		pr_err("invalid voltage range (%d, %d) uV", min_uV, max_uV);
+		pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
 		return -EINVAL;
 	}
 
@@ -199,7 +199,7 @@
 	int thresh;
 
 	if (check_range(info, min_uV, max_uV)) {
-		pr_err("invalid voltage range (%d, %d) uV", min_uV, max_uV);
+		pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
 		return -EINVAL;
 	}
 
@@ -248,7 +248,7 @@
 	int ret;
 
 	if (check_range(info, min_uV, max_uV)) {
-		pr_err("invalid voltage range (%d, %d) uV", min_uV, max_uV);
+		pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
 		return -EINVAL;
 	}
 
@@ -273,7 +273,7 @@
 	uint8_t val, mask;
 
 	if (check_range(info, min_uV, max_uV)) {
-		pr_err("invalid voltage range (%d, %d) uV", min_uV, max_uV);
+		pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
 		return -EINVAL;
 	}
 
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
new file mode 100644
index 0000000..4cc85ec
--- /dev/null
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -0,0 +1,329 @@
+/* NXP PCF50633 PMIC Driver
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * Author: Balaji Rao <balajirrao@openmoko.org>
+ * All rights reserved.
+ *
+ * Broken down from monstrous PCF50633 driver mainly by
+ * Harald Welte and Andy Green and Werner Almesberger
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/mfd/pcf50633/pmic.h>
+
+#define PCF50633_REGULATOR(_name, _id) 		\
+	{					\
+		.name = _name, 			\
+		.id = _id,			\
+		.ops = &pcf50633_regulator_ops,	\
+		.type = REGULATOR_VOLTAGE, 	\
+		.owner = THIS_MODULE, 		\
+	}
+
+static const u8 pcf50633_regulator_registers[PCF50633_NUM_REGULATORS] = {
+	[PCF50633_REGULATOR_AUTO]	= PCF50633_REG_AUTOOUT,
+	[PCF50633_REGULATOR_DOWN1]	= PCF50633_REG_DOWN1OUT,
+	[PCF50633_REGULATOR_DOWN2]	= PCF50633_REG_DOWN2OUT,
+	[PCF50633_REGULATOR_MEMLDO]	= PCF50633_REG_MEMLDOOUT,
+	[PCF50633_REGULATOR_LDO1]	= PCF50633_REG_LDO1OUT,
+	[PCF50633_REGULATOR_LDO2]	= PCF50633_REG_LDO2OUT,
+	[PCF50633_REGULATOR_LDO3]	= PCF50633_REG_LDO3OUT,
+	[PCF50633_REGULATOR_LDO4]	= PCF50633_REG_LDO4OUT,
+	[PCF50633_REGULATOR_LDO5]	= PCF50633_REG_LDO5OUT,
+	[PCF50633_REGULATOR_LDO6]	= PCF50633_REG_LDO6OUT,
+	[PCF50633_REGULATOR_HCLDO]	= PCF50633_REG_HCLDOOUT,
+};
+
+/* Bits from voltage value */
+static u8 auto_voltage_bits(unsigned int millivolts)
+{
+	if (millivolts < 1800)
+		return 0;
+	if (millivolts > 3800)
+		return 0xff;
+
+	millivolts -= 625;
+
+	return millivolts / 25;
+}
+
+static u8 down_voltage_bits(unsigned int millivolts)
+{
+	if (millivolts < 625)
+		return 0;
+	else if (millivolts > 3000)
+		return 0xff;
+
+	millivolts -= 625;
+
+	return millivolts / 25;
+}
+
+static u8 ldo_voltage_bits(unsigned int millivolts)
+{
+	if (millivolts < 900)
+		return 0;
+	else if (millivolts > 3600)
+		return 0x1f;
+
+	millivolts -= 900;
+	return millivolts / 100;
+}
+
+/* Obtain voltage value from bits */
+static unsigned int auto_voltage_value(u8 bits)
+{
+	if (bits < 0x2f)
+		return 0;
+
+	return 625 + (bits * 25);
+}
+
+
+static unsigned int down_voltage_value(u8 bits)
+{
+	return 625 + (bits * 25);
+}
+
+
+static unsigned int ldo_voltage_value(u8 bits)
+{
+	bits &= 0x1f;
+
+	return 900 + (bits * 100);
+}
+
+static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
+						int min_uV, int max_uV)
+{
+	struct pcf50633 *pcf;
+	int regulator_id, millivolts;
+	u8 volt_bits, regnr;
+
+	pcf = rdev_get_drvdata(rdev);
+
+	regulator_id = rdev_get_id(rdev);
+	if (regulator_id >= PCF50633_NUM_REGULATORS)
+		return -EINVAL;
+
+	millivolts = min_uV / 1000;
+
+	regnr = pcf50633_regulator_registers[regulator_id];
+
+	switch (regulator_id) {
+	case PCF50633_REGULATOR_AUTO:
+		volt_bits = auto_voltage_bits(millivolts);
+		break;
+	case PCF50633_REGULATOR_DOWN1:
+		volt_bits = down_voltage_bits(millivolts);
+		break;
+	case PCF50633_REGULATOR_DOWN2:
+		volt_bits = down_voltage_bits(millivolts);
+		break;
+	case PCF50633_REGULATOR_LDO1:
+	case PCF50633_REGULATOR_LDO2:
+	case PCF50633_REGULATOR_LDO3:
+	case PCF50633_REGULATOR_LDO4:
+	case PCF50633_REGULATOR_LDO5:
+	case PCF50633_REGULATOR_LDO6:
+	case PCF50633_REGULATOR_HCLDO:
+		volt_bits = ldo_voltage_bits(millivolts);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return pcf50633_reg_write(pcf, regnr, volt_bits);
+}
+
+static int pcf50633_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct pcf50633 *pcf;
+	int regulator_id, millivolts, volt_bits;
+	u8 regnr;
+
+	pcf = rdev_get_drvdata(rdev);;
+
+	regulator_id = rdev_get_id(rdev);
+	if (regulator_id >= PCF50633_NUM_REGULATORS)
+		return -EINVAL;
+
+	regnr = pcf50633_regulator_registers[regulator_id];
+
+	volt_bits = pcf50633_reg_read(pcf, regnr);
+	if (volt_bits < 0)
+		return -1;
+
+	switch (regulator_id) {
+	case PCF50633_REGULATOR_AUTO:
+		millivolts = auto_voltage_value(volt_bits);
+		break;
+	case PCF50633_REGULATOR_DOWN1:
+		millivolts = down_voltage_value(volt_bits);
+		break;
+	case PCF50633_REGULATOR_DOWN2:
+		millivolts = down_voltage_value(volt_bits);
+		break;
+	case PCF50633_REGULATOR_LDO1:
+	case PCF50633_REGULATOR_LDO2:
+	case PCF50633_REGULATOR_LDO3:
+	case PCF50633_REGULATOR_LDO4:
+	case PCF50633_REGULATOR_LDO5:
+	case PCF50633_REGULATOR_LDO6:
+	case PCF50633_REGULATOR_HCLDO:
+		millivolts = ldo_voltage_value(volt_bits);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return millivolts * 1000;
+}
+
+static int pcf50633_regulator_enable(struct regulator_dev *rdev)
+{
+	struct pcf50633 *pcf = rdev_get_drvdata(rdev);
+	int regulator_id;
+	u8 regnr;
+
+	regulator_id = rdev_get_id(rdev);
+	if (regulator_id >= PCF50633_NUM_REGULATORS)
+		return -EINVAL;
+
+	/* The *ENA register is always one after the *OUT register */
+	regnr = pcf50633_regulator_registers[regulator_id] + 1;
+
+	return pcf50633_reg_set_bit_mask(pcf, regnr, PCF50633_REGULATOR_ON,
+						       PCF50633_REGULATOR_ON);
+}
+
+static int pcf50633_regulator_disable(struct regulator_dev *rdev)
+{
+	struct pcf50633 *pcf = rdev_get_drvdata(rdev);
+	int regulator_id;
+	u8 regnr;
+
+	regulator_id = rdev_get_id(rdev);
+	if (regulator_id >= PCF50633_NUM_REGULATORS)
+		return -EINVAL;
+
+	/* the *ENA register is always one after the *OUT register */
+	regnr = pcf50633_regulator_registers[regulator_id] + 1;
+
+	return pcf50633_reg_set_bit_mask(pcf, regnr,
+					PCF50633_REGULATOR_ON, 0);
+}
+
+static int pcf50633_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct pcf50633 *pcf = rdev_get_drvdata(rdev);
+	int regulator_id = rdev_get_id(rdev);
+	u8 regnr;
+
+	regulator_id = rdev_get_id(rdev);
+	if (regulator_id >= PCF50633_NUM_REGULATORS)
+		return -EINVAL;
+
+	/* the *ENA register is always one after the *OUT register */
+	regnr = pcf50633_regulator_registers[regulator_id] + 1;
+
+	return pcf50633_reg_read(pcf, regnr) & PCF50633_REGULATOR_ON;
+}
+
+static struct regulator_ops pcf50633_regulator_ops = {
+	.set_voltage = pcf50633_regulator_set_voltage,
+	.get_voltage = pcf50633_regulator_get_voltage,
+	.enable = pcf50633_regulator_enable,
+	.disable = pcf50633_regulator_disable,
+	.is_enabled = pcf50633_regulator_is_enabled,
+};
+
+static struct regulator_desc regulators[] = {
+	[PCF50633_REGULATOR_AUTO] =
+		PCF50633_REGULATOR("auto", PCF50633_REGULATOR_AUTO),
+	[PCF50633_REGULATOR_DOWN1] =
+		PCF50633_REGULATOR("down1", PCF50633_REGULATOR_DOWN1),
+	[PCF50633_REGULATOR_DOWN2] =
+		PCF50633_REGULATOR("down2", PCF50633_REGULATOR_DOWN2),
+	[PCF50633_REGULATOR_LDO1] =
+		PCF50633_REGULATOR("ldo1", PCF50633_REGULATOR_LDO1),
+	[PCF50633_REGULATOR_LDO2] =
+		PCF50633_REGULATOR("ldo2", PCF50633_REGULATOR_LDO2),
+	[PCF50633_REGULATOR_LDO3] =
+		PCF50633_REGULATOR("ldo3", PCF50633_REGULATOR_LDO3),
+	[PCF50633_REGULATOR_LDO4] =
+		PCF50633_REGULATOR("ldo4", PCF50633_REGULATOR_LDO4),
+	[PCF50633_REGULATOR_LDO5] =
+		PCF50633_REGULATOR("ldo5", PCF50633_REGULATOR_LDO5),
+	[PCF50633_REGULATOR_LDO6] =
+		PCF50633_REGULATOR("ldo6", PCF50633_REGULATOR_LDO6),
+	[PCF50633_REGULATOR_HCLDO] =
+		PCF50633_REGULATOR("hcldo", PCF50633_REGULATOR_HCLDO),
+	[PCF50633_REGULATOR_MEMLDO] =
+		PCF50633_REGULATOR("memldo", PCF50633_REGULATOR_MEMLDO),
+};
+
+static int __devinit pcf50633_regulator_probe(struct platform_device *pdev)
+{
+	struct regulator_dev *rdev;
+	struct pcf50633 *pcf;
+
+	/* Already set by core driver */
+	pcf = platform_get_drvdata(pdev);
+
+	rdev = regulator_register(&regulators[pdev->id], &pdev->dev, pcf);
+	if (IS_ERR(rdev))
+		return PTR_ERR(rdev);
+
+	if (pcf->pdata->regulator_registered)
+		pcf->pdata->regulator_registered(pcf, pdev->id);
+
+	return 0;
+}
+
+static int __devexit pcf50633_regulator_remove(struct platform_device *pdev)
+{
+	struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+	regulator_unregister(rdev);
+
+	return 0;
+}
+
+static struct platform_driver pcf50633_regulator_driver = {
+	.driver = {
+		.name = "pcf50633-regltr",
+	},
+	.probe = pcf50633_regulator_probe,
+	.remove = __devexit_p(pcf50633_regulator_remove),
+};
+
+static int __init pcf50633_regulator_init(void)
+{
+	return platform_driver_register(&pcf50633_regulator_driver);
+}
+module_init(pcf50633_regulator_init);
+
+static void __exit pcf50633_regulator_exit(void)
+{
+	platform_driver_unregister(&pcf50633_regulator_driver);
+}
+module_exit(pcf50633_regulator_exit);
+
+MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
+MODULE_DESCRIPTION("PCF50633 regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pcf50633-regulator");
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index c68c496..7aa3524 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1412,6 +1412,97 @@
 }
 EXPORT_SYMBOL_GPL(wm8350_register_regulator);
 
+/**
+ * wm8350_register_led - Register a WM8350 LED output
+ *
+ * @param wm8350 The WM8350 device to configure.
+ * @param lednum LED device index to create.
+ * @param dcdc The DCDC to use for the LED.
+ * @param isink The ISINK to use for the LED.
+ * @param pdata Configuration for the LED.
+ *
+ * The WM8350 supports the use of an ISINK together with a DCDC to
+ * provide a power-efficient LED driver.  This function registers the
+ * regulators and instantiates the platform device for a LED.  The
+ * operating modes for the LED regulators must be configured using
+ * wm8350_isink_set_flash(), wm8350_dcdc25_set_mode() and
+ * wm8350_dcdc_set_slot() prior to calling this function.
+ */
+int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
+			struct wm8350_led_platform_data *pdata)
+{
+	struct wm8350_led *led;
+	struct platform_device *pdev;
+	int ret;
+
+	if (lednum > ARRAY_SIZE(wm8350->pmic.led) || lednum < 0) {
+		dev_err(wm8350->dev, "Invalid LED index %d\n", lednum);
+		return -ENODEV;
+	}
+
+	led = &wm8350->pmic.led[lednum];
+
+	if (led->pdev) {
+		dev_err(wm8350->dev, "LED %d already allocated\n", lednum);
+		return -EINVAL;
+	}
+
+	pdev = platform_device_alloc("wm8350-led", lednum);
+	if (pdev == NULL) {
+		dev_err(wm8350->dev, "Failed to allocate LED %d\n", lednum);
+		return -ENOMEM;
+	}
+
+	led->isink_consumer.dev = &pdev->dev;
+	led->isink_consumer.supply = "led_isink";
+	led->isink_init.num_consumer_supplies = 1;
+	led->isink_init.consumer_supplies = &led->isink_consumer;
+	led->isink_init.constraints.min_uA = 0;
+	led->isink_init.constraints.max_uA = pdata->max_uA;
+	led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT;
+	led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
+	ret = wm8350_register_regulator(wm8350, isink, &led->isink_init);
+	if (ret != 0) {
+		platform_device_put(pdev);
+		return ret;
+	}
+
+	led->dcdc_consumer.dev = &pdev->dev;
+	led->dcdc_consumer.supply = "led_vcc";
+	led->dcdc_init.num_consumer_supplies = 1;
+	led->dcdc_init.consumer_supplies = &led->dcdc_consumer;
+	led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
+	ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init);
+	if (ret != 0) {
+		platform_device_put(pdev);
+		return ret;
+	}
+
+	switch (isink) {
+	case WM8350_ISINK_A:
+		wm8350->pmic.isink_A_dcdc = dcdc;
+		break;
+	case WM8350_ISINK_B:
+		wm8350->pmic.isink_B_dcdc = dcdc;
+		break;
+	}
+
+	pdev->dev.platform_data = pdata;
+	pdev->dev.parent = wm8350->dev;
+	ret = platform_device_add(pdev);
+	if (ret != 0) {
+		dev_err(wm8350->dev, "Failed to register LED %d: %d\n",
+			lednum, ret);
+		platform_device_put(pdev);
+		return ret;
+	}
+
+	led->pdev = pdev;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(wm8350_register_led);
+
 static struct platform_driver wm8350_regulator_driver = {
 	.probe = wm8350_regulator_probe,
 	.remove = wm8350_regulator_remove,
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 48b372e..56e23d4 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -289,7 +289,7 @@
 	},
 };
 
-static int __init wm8400_regulator_probe(struct platform_device *pdev)
+static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
 {
 	struct regulator_dev *rdev;
 
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4ad831d..cced4d1 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -502,6 +502,13 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called "rtc-wm8350".
 
+config RTC_DRV_PCF50633
+	depends on MFD_PCF50633
+	tristate "NXP PCF50633 RTC"
+	help
+	  If you say yes here you get support for the RTC subsystem of the
+	  NXP PCF50633 used in embedded systems.
+
 comment "on-CPU RTC drivers"
 
 config RTC_DRV_OMAP
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 9a4340d..6e28021 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -74,3 +74,4 @@
 obj-$(CONFIG_RTC_DRV_VR41XX)	+= rtc-vr41xx.o
 obj-$(CONFIG_RTC_DRV_WM8350)	+= rtc-wm8350.o
 obj-$(CONFIG_RTC_DRV_X1205)	+= rtc-x1205.o
+obj-$(CONFIG_RTC_DRV_PCF50633)	+= rtc-pcf50633.o
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 162330b..7e5155e 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -86,13 +86,11 @@
 
 
 struct ds1307 {
-	u8			reg_addr;
 	u8			regs[11];
 	enum ds_type		type;
 	unsigned long		flags;
 #define HAS_NVRAM	0		/* bit 0 == sysfs file active */
 #define HAS_ALARM	1		/* bit 1 == irq claimed */
-	struct i2c_msg		msg[2];
 	struct i2c_client	*client;
 	struct rtc_device	*rtc;
 	struct work_struct	work;
@@ -204,13 +202,9 @@
 	int		tmp;
 
 	/* read the RTC date and time registers all at once */
-	ds1307->reg_addr = 0;
-	ds1307->msg[1].flags = I2C_M_RD;
-	ds1307->msg[1].len = 7;
-
-	tmp = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
-			ds1307->msg, 2);
-	if (tmp != 2) {
+	tmp = i2c_smbus_read_i2c_block_data(ds1307->client,
+		DS1307_REG_SECS, 7, ds1307->regs);
+	if (tmp != 7) {
 		dev_err(dev, "%s error %d\n", "read", tmp);
 		return -EIO;
 	}
@@ -257,7 +251,6 @@
 		t->tm_hour, t->tm_mday,
 		t->tm_mon, t->tm_year, t->tm_wday);
 
-	*buf++ = 0;		/* first register addr */
 	buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
 	buf[DS1307_REG_MIN] = bin2bcd(t->tm_min);
 	buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
@@ -282,23 +275,19 @@
 		break;
 	}
 
-	ds1307->msg[1].flags = 0;
-	ds1307->msg[1].len = 8;
-
 	dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
 		"write", buf[0], buf[1], buf[2], buf[3],
 		buf[4], buf[5], buf[6]);
 
-	result = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
-			&ds1307->msg[1], 1);
-	if (result != 1) {
-		dev_err(dev, "%s error %d\n", "write", tmp);
-		return -EIO;
+	result = i2c_smbus_write_i2c_block_data(ds1307->client, 0, 7, buf);
+	if (result < 0) {
+		dev_err(dev, "%s error %d\n", "write", result);
+		return result;
 	}
 	return 0;
 }
 
-static int ds1307_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t)
 {
 	struct i2c_client       *client = to_i2c_client(dev);
 	struct ds1307		*ds1307 = i2c_get_clientdata(client);
@@ -308,13 +297,9 @@
 		return -EINVAL;
 
 	/* read all ALARM1, ALARM2, and status registers at once */
-	ds1307->reg_addr = DS1339_REG_ALARM1_SECS;
-	ds1307->msg[1].flags = I2C_M_RD;
-	ds1307->msg[1].len = 9;
-
-	ret = i2c_transfer(to_i2c_adapter(client->dev.parent),
-			ds1307->msg, 2);
-	if (ret != 2) {
+	ret = i2c_smbus_read_i2c_block_data(client,
+			DS1339_REG_ALARM1_SECS, 9, ds1307->regs);
+	if (ret != 9) {
 		dev_err(dev, "%s error %d\n", "alarm read", ret);
 		return -EIO;
 	}
@@ -353,7 +338,7 @@
 	return 0;
 }
 
-static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 {
 	struct i2c_client       *client = to_i2c_client(dev);
 	struct ds1307		*ds1307 = i2c_get_clientdata(client);
@@ -371,13 +356,9 @@
 		t->enabled, t->pending);
 
 	/* read current status of both alarms and the chip */
-	ds1307->reg_addr = DS1339_REG_ALARM1_SECS;
-	ds1307->msg[1].flags = I2C_M_RD;
-	ds1307->msg[1].len = 9;
-
-	ret = i2c_transfer(to_i2c_adapter(client->dev.parent),
-			ds1307->msg, 2);
-	if (ret != 2) {
+	ret = i2c_smbus_read_i2c_block_data(client,
+			DS1339_REG_ALARM1_SECS, 9, buf);
+	if (ret != 9) {
 		dev_err(dev, "%s error %d\n", "alarm write", ret);
 		return -EIO;
 	}
@@ -392,7 +373,6 @@
 			ds1307->regs[6], control, status);
 
 	/* set ALARM1, using 24 hour and day-of-month modes */
-	*buf++ = DS1339_REG_ALARM1_SECS;	/* first register addr */
 	buf[0] = bin2bcd(t->time.tm_sec);
 	buf[1] = bin2bcd(t->time.tm_min);
 	buf[2] = bin2bcd(t->time.tm_hour);
@@ -411,14 +391,11 @@
 	}
 	buf[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I);
 
-	ds1307->msg[1].flags = 0;
-	ds1307->msg[1].len = 10;
-
-	ret = i2c_transfer(to_i2c_adapter(client->dev.parent),
-			&ds1307->msg[1], 1);
-	if (ret != 1) {
+	ret = i2c_smbus_write_i2c_block_data(client,
+			DS1339_REG_ALARM1_SECS, 9, buf);
+	if (ret < 0) {
 		dev_err(dev, "can't set alarm time\n");
-		return -EIO;
+		return ret;
 	}
 
 	return 0;
@@ -475,8 +452,8 @@
 static const struct rtc_class_ops ds13xx_rtc_ops = {
 	.read_time	= ds1307_get_time,
 	.set_time	= ds1307_set_time,
-	.read_alarm	= ds1307_read_alarm,
-	.set_alarm	= ds1307_set_alarm,
+	.read_alarm	= ds1337_read_alarm,
+	.set_alarm	= ds1337_set_alarm,
 	.ioctl		= ds1307_ioctl,
 };
 
@@ -490,7 +467,6 @@
 {
 	struct i2c_client	*client;
 	struct ds1307		*ds1307;
-	struct i2c_msg		msg[2];
 	int			result;
 
 	client = kobj_to_i2c_client(kobj);
@@ -503,24 +479,10 @@
 	if (unlikely(!count))
 		return count;
 
-	msg[0].addr = client->addr;
-	msg[0].flags = 0;
-	msg[0].len = 1;
-	msg[0].buf = buf;
-
-	buf[0] = 8 + off;
-
-	msg[1].addr = client->addr;
-	msg[1].flags = I2C_M_RD;
-	msg[1].len = count;
-	msg[1].buf = buf;
-
-	result = i2c_transfer(to_i2c_adapter(client->dev.parent), msg, 2);
-	if (result != 2) {
+	result = i2c_smbus_read_i2c_block_data(client, 8 + off, count, buf);
+	if (result < 0)
 		dev_err(&client->dev, "%s error %d\n", "nvram read", result);
-		return -EIO;
-	}
-	return count;
+	return result;
 }
 
 static ssize_t
@@ -528,8 +490,7 @@
 		char *buf, loff_t off, size_t count)
 {
 	struct i2c_client	*client;
-	u8			buffer[NVRAM_SIZE + 1];
-	int			ret;
+	int			result;
 
 	client = kobj_to_i2c_client(kobj);
 
@@ -540,11 +501,12 @@
 	if (unlikely(!count))
 		return count;
 
-	buffer[0] = 8 + off;
-	memcpy(buffer + 1, buf, count);
-
-	ret = i2c_master_send(client, buffer, count + 1);
-	return (ret < 0) ? ret : (ret - 1);
+	result = i2c_smbus_write_i2c_block_data(client, 8 + off, count, buf);
+	if (result < 0) {
+		dev_err(&client->dev, "%s error %d\n", "nvram write", result);
+		return result;
+	}
+	return count;
 }
 
 static struct bin_attribute nvram = {
@@ -571,9 +533,11 @@
 	const struct chip_desc	*chip = &chips[id->driver_data];
 	struct i2c_adapter	*adapter = to_i2c_adapter(client->dev.parent);
 	int			want_irq = false;
+	unsigned char		*buf;
 
 	if (!i2c_check_functionality(adapter,
-			I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+			I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
+			I2C_FUNC_SMBUS_I2C_BLOCK))
 		return -EIO;
 
 	if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL)))
@@ -581,18 +545,8 @@
 
 	ds1307->client = client;
 	i2c_set_clientdata(client, ds1307);
-
-	ds1307->msg[0].addr = client->addr;
-	ds1307->msg[0].flags = 0;
-	ds1307->msg[0].len = 1;
-	ds1307->msg[0].buf = &ds1307->reg_addr;
-
-	ds1307->msg[1].addr = client->addr;
-	ds1307->msg[1].flags = I2C_M_RD;
-	ds1307->msg[1].len = sizeof(ds1307->regs);
-	ds1307->msg[1].buf = ds1307->regs;
-
 	ds1307->type = id->driver_data;
+	buf = ds1307->regs;
 
 	switch (ds1307->type) {
 	case ds_1337:
@@ -602,21 +556,15 @@
 			INIT_WORK(&ds1307->work, ds1307_work);
 			want_irq = true;
 		}
-
-		ds1307->reg_addr = DS1337_REG_CONTROL;
-		ds1307->msg[1].len = 2;
-
 		/* get registers that the "rtc" read below won't read... */
-		tmp = i2c_transfer(adapter, ds1307->msg, 2);
+		tmp = i2c_smbus_read_i2c_block_data(ds1307->client,
+				DS1337_REG_CONTROL, 2, buf);
 		if (tmp != 2) {
 			pr_debug("read error %d\n", tmp);
 			err = -EIO;
 			goto exit_free;
 		}
 
-		ds1307->reg_addr = 0;
-		ds1307->msg[1].len = sizeof(ds1307->regs);
-
 		/* oscillator off?  turn it on, so clock can tick. */
 		if (ds1307->regs[0] & DS1337_BIT_nEOSC)
 			ds1307->regs[0] &= ~DS1337_BIT_nEOSC;
@@ -647,9 +595,8 @@
 
 read_rtc:
 	/* read RTC registers */
-
-	tmp = i2c_transfer(adapter, ds1307->msg, 2);
-	if (tmp != 2) {
+	tmp = i2c_smbus_read_i2c_block_data(ds1307->client, 0, 8, buf);
+	if (tmp != 8) {
 		pr_debug("read error %d\n", tmp);
 		err = -EIO;
 		goto exit_free;
@@ -707,22 +654,6 @@
 		break;
 	}
 
-	tmp = ds1307->regs[DS1307_REG_SECS];
-	tmp = bcd2bin(tmp & 0x7f);
-	if (tmp > 60)
-		goto exit_bad;
-	tmp = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f);
-	if (tmp > 60)
-		goto exit_bad;
-
-	tmp = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
-	if (tmp == 0 || tmp > 31)
-		goto exit_bad;
-
-	tmp = bcd2bin(ds1307->regs[DS1307_REG_MONTH] & 0x1f);
-	if (tmp == 0 || tmp > 12)
-		goto exit_bad;
-
 	tmp = ds1307->regs[DS1307_REG_HOUR];
 	switch (ds1307->type) {
 	case ds_1340:
@@ -779,13 +710,6 @@
 
 	return 0;
 
-exit_bad:
-	dev_dbg(&client->dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
-			"bogus register",
-			ds1307->regs[0], ds1307->regs[1],
-			ds1307->regs[2], ds1307->regs[3],
-			ds1307->regs[4], ds1307->regs[5],
-			ds1307->regs[6]);
 exit_irq:
 	if (ds1307->rtc)
 		rtc_device_unregister(ds1307->rtc);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 23a07fe..0b6b773 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -630,7 +630,7 @@
  static void __exit
 ds1511_rtc_exit(void)
 {
-	return platform_driver_unregister(&ds1511_rtc_driver);
+	platform_driver_unregister(&ds1511_rtc_driver);
 }
 
 module_init(ds1511_rtc_init);
diff --git a/drivers/rtc/rtc-parisc.c b/drivers/rtc/rtc-parisc.c
index 346d633..c6bfa6f 100644
--- a/drivers/rtc/rtc-parisc.c
+++ b/drivers/rtc/rtc-parisc.c
@@ -34,7 +34,8 @@
 static int parisc_set_time(struct device *dev, struct rtc_time *tm)
 {
 	struct parisc_rtc *p = dev_get_drvdata(dev);
-	unsigned long flags, ret;
+	unsigned long flags;
+	int ret;
 
 	spin_lock_irqsave(&p->lock, flags);
 	ret = set_rtc_time(tm);
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
new file mode 100644
index 0000000..f4dd87e
--- /dev/null
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -0,0 +1,344 @@
+/* NXP PCF50633 RTC Driver
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * Author: Balaji Rao <balajirrao@openmoko.org>
+ * All rights reserved.
+ *
+ * Broken down from monstrous PCF50633 driver mainly by
+ * Harald Welte, Andy Green and Werner Almesberger
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/err.h>
+
+#include <linux/mfd/pcf50633/core.h>
+
+#define PCF50633_REG_RTCSC	0x59 /* Second */
+#define PCF50633_REG_RTCMN	0x5a /* Minute */
+#define PCF50633_REG_RTCHR	0x5b /* Hour */
+#define PCF50633_REG_RTCWD	0x5c /* Weekday */
+#define PCF50633_REG_RTCDT	0x5d /* Day */
+#define PCF50633_REG_RTCMT	0x5e /* Month */
+#define PCF50633_REG_RTCYR	0x5f /* Year */
+#define PCF50633_REG_RTCSCA	0x60 /* Alarm Second */
+#define PCF50633_REG_RTCMNA	0x61 /* Alarm Minute */
+#define PCF50633_REG_RTCHRA	0x62 /* Alarm Hour */
+#define PCF50633_REG_RTCWDA	0x63 /* Alarm Weekday */
+#define PCF50633_REG_RTCDTA	0x64 /* Alarm Day */
+#define PCF50633_REG_RTCMTA	0x65 /* Alarm Month */
+#define PCF50633_REG_RTCYRA	0x66 /* Alarm Year */
+
+enum pcf50633_time_indexes {
+	PCF50633_TI_SEC,
+	PCF50633_TI_MIN,
+	PCF50633_TI_HOUR,
+	PCF50633_TI_WKDAY,
+	PCF50633_TI_DAY,
+	PCF50633_TI_MONTH,
+	PCF50633_TI_YEAR,
+	PCF50633_TI_EXTENT /* always last */
+};
+
+struct pcf50633_time {
+	u_int8_t time[PCF50633_TI_EXTENT];
+};
+
+struct pcf50633_rtc {
+	int alarm_enabled;
+	int second_enabled;
+
+	struct pcf50633 *pcf;
+	struct rtc_device *rtc_dev;
+};
+
+static void pcf2rtc_time(struct rtc_time *rtc, struct pcf50633_time *pcf)
+{
+	rtc->tm_sec = bcd2bin(pcf->time[PCF50633_TI_SEC]);
+	rtc->tm_min = bcd2bin(pcf->time[PCF50633_TI_MIN]);
+	rtc->tm_hour = bcd2bin(pcf->time[PCF50633_TI_HOUR]);
+	rtc->tm_wday = bcd2bin(pcf->time[PCF50633_TI_WKDAY]);
+	rtc->tm_mday = bcd2bin(pcf->time[PCF50633_TI_DAY]);
+	rtc->tm_mon = bcd2bin(pcf->time[PCF50633_TI_MONTH]);
+	rtc->tm_year = bcd2bin(pcf->time[PCF50633_TI_YEAR]) + 100;
+}
+
+static void rtc2pcf_time(struct pcf50633_time *pcf, struct rtc_time *rtc)
+{
+	pcf->time[PCF50633_TI_SEC] = bin2bcd(rtc->tm_sec);
+	pcf->time[PCF50633_TI_MIN] = bin2bcd(rtc->tm_min);
+	pcf->time[PCF50633_TI_HOUR] = bin2bcd(rtc->tm_hour);
+	pcf->time[PCF50633_TI_WKDAY] = bin2bcd(rtc->tm_wday);
+	pcf->time[PCF50633_TI_DAY] = bin2bcd(rtc->tm_mday);
+	pcf->time[PCF50633_TI_MONTH] = bin2bcd(rtc->tm_mon);
+	pcf->time[PCF50633_TI_YEAR] = bin2bcd(rtc->tm_year % 100);
+}
+
+static int
+pcf50633_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct pcf50633_rtc *rtc = dev_get_drvdata(dev);
+	int err;
+
+	if (enabled)
+		err = pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
+	else
+		err = pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
+
+	if (err < 0)
+		return err;
+
+	rtc->alarm_enabled = enabled;
+
+	return 0;
+}
+
+static int
+pcf50633_rtc_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct pcf50633_rtc *rtc = dev_get_drvdata(dev);
+	int err;
+
+	if (enabled)
+		err = pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_SECOND);
+	else
+		err = pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_SECOND);
+
+	if (err < 0)
+		return err;
+
+	rtc->second_enabled = enabled;
+
+	return 0;
+}
+
+static int pcf50633_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	struct pcf50633_rtc *rtc;
+	struct pcf50633_time pcf_tm;
+	int ret;
+
+	rtc = dev_get_drvdata(dev);
+
+	ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSC,
+					    PCF50633_TI_EXTENT,
+					    &pcf_tm.time[0]);
+	if (ret != PCF50633_TI_EXTENT) {
+		dev_err(dev, "Failed to read time\n");
+		return -EIO;
+	}
+
+	dev_dbg(dev, "PCF_TIME: %02x.%02x.%02x %02x:%02x:%02x\n",
+		pcf_tm.time[PCF50633_TI_DAY],
+		pcf_tm.time[PCF50633_TI_MONTH],
+		pcf_tm.time[PCF50633_TI_YEAR],
+		pcf_tm.time[PCF50633_TI_HOUR],
+		pcf_tm.time[PCF50633_TI_MIN],
+		pcf_tm.time[PCF50633_TI_SEC]);
+
+	pcf2rtc_time(tm, &pcf_tm);
+
+	dev_dbg(dev, "RTC_TIME: %u.%u.%u %u:%u:%u\n",
+		tm->tm_mday, tm->tm_mon, tm->tm_year,
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+	return rtc_valid_tm(tm);
+}
+
+static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	struct pcf50633_rtc *rtc;
+	struct pcf50633_time pcf_tm;
+	int second_masked, alarm_masked, ret = 0;
+
+	rtc = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "RTC_TIME: %u.%u.%u %u:%u:%u\n",
+		tm->tm_mday, tm->tm_mon, tm->tm_year,
+		tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+	rtc2pcf_time(&pcf_tm, tm);
+
+	dev_dbg(dev, "PCF_TIME: %02x.%02x.%02x %02x:%02x:%02x\n",
+		pcf_tm.time[PCF50633_TI_DAY],
+		pcf_tm.time[PCF50633_TI_MONTH],
+		pcf_tm.time[PCF50633_TI_YEAR],
+		pcf_tm.time[PCF50633_TI_HOUR],
+		pcf_tm.time[PCF50633_TI_MIN],
+		pcf_tm.time[PCF50633_TI_SEC]);
+
+
+	second_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_SECOND);
+	alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM);
+
+	if (!second_masked)
+		pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_SECOND);
+	if (!alarm_masked)
+		pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
+
+	/* Returns 0 on success */
+	ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSC,
+					     PCF50633_TI_EXTENT,
+					     &pcf_tm.time[0]);
+
+	if (!second_masked)
+		pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_SECOND);
+	if (!alarm_masked)
+		pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
+
+	return ret;
+}
+
+static int pcf50633_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct pcf50633_rtc *rtc;
+	struct pcf50633_time pcf_tm;
+	int ret = 0;
+
+	rtc = dev_get_drvdata(dev);
+
+	alrm->enabled = rtc->alarm_enabled;
+
+	ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSCA,
+				PCF50633_TI_EXTENT, &pcf_tm.time[0]);
+	if (ret != PCF50633_TI_EXTENT) {
+		dev_err(dev, "Failed to read time\n");
+		return -EIO;
+	}
+
+	pcf2rtc_time(&alrm->time, &pcf_tm);
+
+	return rtc_valid_tm(&alrm->time);
+}
+
+static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	struct pcf50633_rtc *rtc;
+	struct pcf50633_time pcf_tm;
+	int alarm_masked, ret = 0;
+
+	rtc = dev_get_drvdata(dev);
+
+	rtc2pcf_time(&pcf_tm, &alrm->time);
+
+	/* do like mktime does and ignore tm_wday */
+	pcf_tm.time[PCF50633_TI_WKDAY] = 7;
+
+	alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM);
+
+	/* disable alarm interrupt */
+	if (!alarm_masked)
+		pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
+
+	/* Returns 0 on success */
+	ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSCA,
+				PCF50633_TI_EXTENT, &pcf_tm.time[0]);
+
+	if (!alarm_masked)
+		pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
+
+	return ret;
+}
+
+static struct rtc_class_ops pcf50633_rtc_ops = {
+	.read_time		= pcf50633_rtc_read_time,
+	.set_time		= pcf50633_rtc_set_time,
+	.read_alarm		= pcf50633_rtc_read_alarm,
+	.set_alarm		= pcf50633_rtc_set_alarm,
+	.alarm_irq_enable 	= pcf50633_rtc_alarm_irq_enable,
+	.update_irq_enable 	= pcf50633_rtc_update_irq_enable,
+};
+
+static void pcf50633_rtc_irq(int irq, void *data)
+{
+	struct pcf50633_rtc *rtc = data;
+
+	switch (irq) {
+	case PCF50633_IRQ_ALARM:
+		rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
+		break;
+	case PCF50633_IRQ_SECOND:
+		rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
+		break;
+	}
+}
+
+static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
+{
+	struct pcf50633_subdev_pdata *pdata;
+	struct pcf50633_rtc *rtc;
+
+
+	rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+	if (!rtc)
+		return -ENOMEM;
+
+	pdata = pdev->dev.platform_data;
+	rtc->pcf = pdata->pcf;
+	platform_set_drvdata(pdev, rtc);
+	rtc->rtc_dev = rtc_device_register("pcf50633-rtc", &pdev->dev,
+				&pcf50633_rtc_ops, THIS_MODULE);
+
+	if (IS_ERR(rtc->rtc_dev)) {
+		kfree(rtc);
+		return PTR_ERR(rtc->rtc_dev);
+	}
+
+	pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM,
+					pcf50633_rtc_irq, rtc);
+	pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_SECOND,
+					pcf50633_rtc_irq, rtc);
+
+	return 0;
+}
+
+static int __devexit pcf50633_rtc_remove(struct platform_device *pdev)
+{
+	struct pcf50633_rtc *rtc;
+
+	rtc = platform_get_drvdata(pdev);
+
+	pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_ALARM);
+	pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_SECOND);
+
+	rtc_device_unregister(rtc->rtc_dev);
+	kfree(rtc);
+
+	return 0;
+}
+
+static struct platform_driver pcf50633_rtc_driver = {
+	.driver = {
+		.name = "pcf50633-rtc",
+	},
+	.probe = pcf50633_rtc_probe,
+	.remove = __devexit_p(pcf50633_rtc_remove),
+};
+
+static int __init pcf50633_rtc_init(void)
+{
+	return platform_driver_register(&pcf50633_rtc_driver);
+}
+module_init(pcf50633_rtc_init);
+
+static void __exit pcf50633_rtc_exit(void)
+{
+	platform_driver_unregister(&pcf50633_rtc_driver);
+}
+module_exit(pcf50633_rtc_exit);
+
+MODULE_DESCRIPTION("PCF50633 RTC driver");
+MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index cc7eb87..bd56a03 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -27,6 +27,8 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 
+#include <mach/hardware.h>
+
 #define TIMER_FREQ		CLOCK_TICK_RATE
 #define RTC_DEF_DIVIDER		(32768 - 1)
 #define RTC_DEF_TRIM		0
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index dc0b622..7d1547b 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -399,7 +399,7 @@
 
 static __exit void stk17ta8_exit(void)
 {
-	return platform_driver_unregister(&stk17ta8_rtc_driver);
+	platform_driver_unregister(&stk17ta8_rtc_driver);
 }
 
 module_init(stk17ta8_init);
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c
index 8ce5f74..ad35f76 100644
--- a/drivers/rtc/rtc-twl4030.c
+++ b/drivers/rtc/rtc-twl4030.c
@@ -120,7 +120,7 @@
 static unsigned char rtc_irq_bits;
 
 /*
- * Enable timer and/or alarm interrupts.
+ * Enable 1/second update and/or alarm interrupts.
  */
 static int set_rtc_irq_bit(unsigned char bit)
 {
@@ -128,6 +128,7 @@
 	int ret;
 
 	val = rtc_irq_bits | bit;
+	val &= ~BIT_RTC_INTERRUPTS_REG_EVERY_M;
 	ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
 	if (ret == 0)
 		rtc_irq_bits = val;
@@ -136,7 +137,7 @@
 }
 
 /*
- * Disable timer and/or alarm interrupts.
+ * Disable update and/or alarm interrupts.
  */
 static int mask_rtc_irq_bit(unsigned char bit)
 {
@@ -151,7 +152,7 @@
 	return ret;
 }
 
-static inline int twl4030_rtc_alarm_irq_set_state(int enabled)
+static int twl4030_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
 {
 	int ret;
 
@@ -163,7 +164,7 @@
 	return ret;
 }
 
-static inline int twl4030_rtc_irq_set_state(int enabled)
+static int twl4030_rtc_update_irq_enable(struct device *dev, unsigned enabled)
 {
 	int ret;
 
@@ -292,7 +293,7 @@
 	unsigned char alarm_data[ALL_TIME_REGS + 1];
 	int ret;
 
-	ret = twl4030_rtc_alarm_irq_set_state(0);
+	ret = twl4030_rtc_alarm_irq_enable(dev, 0);
 	if (ret)
 		goto out;
 
@@ -312,35 +313,11 @@
 	}
 
 	if (alm->enabled)
-		ret = twl4030_rtc_alarm_irq_set_state(1);
+		ret = twl4030_rtc_alarm_irq_enable(dev, 1);
 out:
 	return ret;
 }
 
-#ifdef	CONFIG_RTC_INTF_DEV
-
-static int twl4030_rtc_ioctl(struct device *dev, unsigned int cmd,
-			     unsigned long arg)
-{
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		return twl4030_rtc_alarm_irq_set_state(0);
-	case RTC_AIE_ON:
-		return twl4030_rtc_alarm_irq_set_state(1);
-	case RTC_UIE_OFF:
-		return twl4030_rtc_irq_set_state(0);
-	case RTC_UIE_ON:
-		return twl4030_rtc_irq_set_state(1);
-
-	default:
-		return -ENOIOCTLCMD;
-	}
-}
-
-#else
-#define	twl4030_rtc_ioctl	NULL
-#endif
-
 static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
 {
 	unsigned long events = 0;
@@ -400,11 +377,12 @@
 }
 
 static struct rtc_class_ops twl4030_rtc_ops = {
-	.ioctl		= twl4030_rtc_ioctl,
 	.read_time	= twl4030_rtc_read_time,
 	.set_time	= twl4030_rtc_set_time,
 	.read_alarm	= twl4030_rtc_read_alarm,
 	.set_alarm	= twl4030_rtc_set_alarm,
+	.alarm_irq_enable = twl4030_rtc_alarm_irq_enable,
+	.update_irq_enable = twl4030_rtc_update_irq_enable,
 };
 
 /*----------------------------------------------------------------------*/
@@ -422,7 +400,7 @@
 	rtc = rtc_device_register(pdev->name,
 				  &pdev->dev, &twl4030_rtc_ops, THIS_MODULE);
 	if (IS_ERR(rtc)) {
-		ret = -EINVAL;
+		ret = PTR_ERR(rtc);
 		dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
 			PTR_ERR(rtc));
 		goto out0;
@@ -432,7 +410,6 @@
 	platform_set_drvdata(pdev, rtc);
 
 	ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
-
 	if (ret < 0)
 		goto out1;
 
@@ -475,7 +452,6 @@
 
 	return ret;
 
-
 out2:
 	free_irq(irq, rtc);
 out1:
@@ -506,8 +482,9 @@
 
 static void twl4030_rtc_shutdown(struct platform_device *pdev)
 {
-	mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M |
-			 BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+	/* mask timer interrupts, but leave alarm interrupts on to enable
+	   power-on when alarm is triggered */
+	mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 570ae59..bd59149 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -336,6 +336,9 @@
 dasd_state_ready_to_online(struct dasd_device * device)
 {
 	int rc;
+	struct gendisk *disk;
+	struct disk_part_iter piter;
+	struct hd_struct *part;
 
 	if (device->discipline->ready_to_online) {
 		rc = device->discipline->ready_to_online(device);
@@ -343,8 +346,14 @@
 			return rc;
 	}
 	device->state = DASD_STATE_ONLINE;
-	if (device->block)
+	if (device->block) {
 		dasd_schedule_block_bh(device->block);
+		disk = device->block->bdev->bd_disk;
+		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+		while ((part = disk_part_iter_next(&piter)))
+			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
+		disk_part_iter_exit(&piter);
+	}
 	return 0;
 }
 
@@ -354,6 +363,9 @@
 static int dasd_state_online_to_ready(struct dasd_device *device)
 {
 	int rc;
+	struct gendisk *disk;
+	struct disk_part_iter piter;
+	struct hd_struct *part;
 
 	if (device->discipline->online_to_ready) {
 		rc = device->discipline->online_to_ready(device);
@@ -361,6 +373,13 @@
 			return rc;
 	}
 	device->state = DASD_STATE_READY;
+	if (device->block) {
+		disk = device->block->bdev->bd_disk;
+		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+		while ((part = disk_part_iter_next(&piter)))
+			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
+		disk_part_iter_exit(&piter);
+	}
 	return 0;
 }
 
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index b8f9c00..d82aad52 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2621,7 +2621,7 @@
 		}
 	}
 
-	/* double-check if current erp/cqr was successfull */
+	/* double-check if current erp/cqr was successful */
 	if ((cqr->irb.scsw.cmd.cstat == 0x00) &&
 	    (cqr->irb.scsw.cmd.dstat ==
 	     (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 2ef2573..300e28a 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -206,6 +206,8 @@
 			features |= DASD_FEATURE_USEDIAG;
 		else if (len == 6 && !strncmp(str, "erplog", 6))
 			features |= DASD_FEATURE_ERPLOG;
+		else if (len == 8 && !strncmp(str, "failfast", 8))
+			features |= DASD_FEATURE_FAILFAST;
 		else {
 			MESSAGE(KERN_WARNING,
 				"unsupported feature: %*s, "
@@ -667,6 +669,51 @@
  */
 
 /*
+ * failfast controls the behaviour, if no path is available
+ */
+static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct dasd_devmap *devmap;
+	int ff_flag;
+
+	devmap = dasd_find_busid(dev->bus_id);
+	if (!IS_ERR(devmap))
+		ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0;
+	else
+		ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0;
+	return snprintf(buf, PAGE_SIZE, ff_flag ? "1\n" : "0\n");
+}
+
+static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
+	      const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	int val;
+	char *endp;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	val = simple_strtoul(buf, &endp, 0);
+	if (((endp + 1) < (buf + count)) || (val > 1))
+		return -EINVAL;
+
+	spin_lock(&dasd_devmap_lock);
+	if (val)
+		devmap->features |= DASD_FEATURE_FAILFAST;
+	else
+		devmap->features &= ~DASD_FEATURE_FAILFAST;
+	if (devmap->device)
+		devmap->device->features = devmap->features;
+	spin_unlock(&dasd_devmap_lock);
+	return count;
+}
+
+static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
+
+/*
  * readonly controls the readonly status of a dasd
  */
 static ssize_t
@@ -1020,6 +1067,7 @@
 	&dev_attr_use_diag.attr,
 	&dev_attr_eer_enabled.attr,
 	&dev_attr_erplog.attr,
+	&dev_attr_failfast.attr,
 	NULL,
 };
 
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 7844461..ef2a569 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -544,7 +544,8 @@
 	}
 	cqr->retries = DIAG_MAX_RETRIES;
 	cqr->buildclk = get_clock();
-	if (blk_noretry_request(req))
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
 	cqr->startdev = memdev;
 	cqr->memdev = memdev;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bd2c52e..bdb8799 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1700,7 +1700,8 @@
 			recid++;
 		}
 	}
-	if (blk_noretry_request(req))
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
 	cqr->startdev = startdev;
 	cqr->memdev = startdev;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 892e287..f8e05ce 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -535,8 +535,8 @@
 	    eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
 		kfree(eerb);
 		MESSAGE(KERN_WARNING, "can't open device since module "
-			"parameter eer_pages is smaller then 1 or"
-			" bigger then %d", (int)(INT_MAX / PAGE_SIZE));
+			"parameter eer_pages is smaller than 1 or"
+			" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
 		unlock_kernel();
 		return -EINVAL;
 	}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 7d442ae..f1d1760 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -355,7 +355,8 @@
 			recid++;
 		}
 	}
-	if (blk_noretry_request(req))
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
 	cqr->startdev = memdev;
 	cqr->memdev = memdev;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 05a1453..4a39084 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -199,7 +199,7 @@
 #define DASD_CQR_ERROR		0x82	/* request is completed with error */
 #define DASD_CQR_CLEAR_PENDING	0x83	/* request is clear pending */
 #define DASD_CQR_CLEARED	0x84	/* request was cleared */
-#define DASD_CQR_SUCCESS	0x85	/* request was successfull */
+#define DASD_CQR_SUCCESS	0x85	/* request was successful */
 
 
 /* per dasd_ccw_req flags */
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 6430338..0769ced 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -100,7 +100,7 @@
 
 config S390_TAPE_BLOCK
 	bool "Support for tape block devices"
-	depends on S390_TAPE
+	depends on S390_TAPE && BLOCK
 	help
 	  Select this option if you want to access your channel-attached tape
 	  devices using the block device interface.  This interface is similar
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 4005c44..71605a1 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -801,7 +801,7 @@
 static inline int
 tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request)
 {
-	DBF_EVENT(3, "Error Recovery successfull for %s\n",
+	DBF_EVENT(3, "Error Recovery successful for %s\n",
 		  tape_op_verbose[request->op]);
 	return tape_3590_done(device, request);
 }
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index aabbeb9..d8a2289 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -427,7 +427,7 @@
 			buffer = priv->buffer + sizeof(int);
 		}
 		/*
-		 * If the record is bigger then our buffer, we receive only
+		 * If the record is bigger than our buffer, we receive only
 		 * a part of it. We can get the rest later.
 		 */
 		if (iucv_data_count > NET_BUFFER_SIZE)
@@ -437,7 +437,7 @@
 					  0, buffer, iucv_data_count,
 					  &priv->residual_length);
 		spin_unlock_bh(&priv->priv_lock);
-		/* An rc of 5 indicates that the record was bigger then
+		/* An rc of 5 indicates that the record was bigger than
 		 * the buffer, which is OK for us. A 9 indicates that the
 		 * record was purged befor we could receive it.
 		 */
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 06b7182..659f8a7 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -379,7 +379,7 @@
 		if (ccode < 0) /* -EIO if msch gets a program check. */
 			return ccode;
 		switch (ccode) {
-		case 0: /* successfull */
+		case 0: /* successful */
 			if (stsch(sch->schid, &schib) ||
 			    !css_sch_is_valid(&schib))
 				return -ENODEV;
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index f8a3b69..da7afb0 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -169,6 +169,8 @@
 		 q->nr);
 	debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
 						debugfs_root, q, &debugfs_fops);
+	if (IS_ERR(debugfs_queues[i]))
+		debugfs_queues[i] = NULL;
 }
 
 void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 744f928..10cb0f8 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -114,7 +114,7 @@
  * @count: count of buffers to examine
  * @auto_ack: automatically acknowledge buffers
  *
- * Returns the number of successfull extracted equal buffer states.
+ * Returns the number of successfully extracted equal buffer states.
  * Stops processing if a state is different from the last buffers state.
  */
 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 591a2b3..07ab8a5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -916,6 +916,21 @@
 	.get_drvinfo = qeth_core_get_drvinfo,
 };
 
+static const struct net_device_ops qeth_l2_netdev_ops = {
+	.ndo_open		= qeth_l2_open,
+	.ndo_stop		= qeth_l2_stop,
+	.ndo_get_stats		= qeth_get_stats,
+	.ndo_start_xmit		= qeth_l2_hard_start_xmit,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_multicast_list = qeth_l2_set_multicast_list,
+	.ndo_do_ioctl	   	= qeth_l2_do_ioctl,
+	.ndo_set_mac_address    = qeth_l2_set_mac_address,
+	.ndo_change_mtu	   	= qeth_change_mtu,
+	.ndo_vlan_rx_add_vid	= qeth_l2_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid   = qeth_l2_vlan_rx_kill_vid,
+	.ndo_tx_timeout	   	= qeth_tx_timeout,
+};
+
 static int qeth_l2_setup_netdev(struct qeth_card *card)
 {
 	switch (card->info.type) {
@@ -937,19 +952,9 @@
 		return -ENODEV;
 
 	card->dev->ml_priv = card;
-	card->dev->tx_timeout = &qeth_tx_timeout;
 	card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
-	card->dev->open = qeth_l2_open;
-	card->dev->stop = qeth_l2_stop;
-	card->dev->hard_start_xmit = qeth_l2_hard_start_xmit;
-	card->dev->do_ioctl = qeth_l2_do_ioctl;
-	card->dev->get_stats = qeth_get_stats;
-	card->dev->change_mtu = qeth_change_mtu;
-	card->dev->set_multicast_list = qeth_l2_set_multicast_list;
-	card->dev->vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid;
-	card->dev->vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid;
-	card->dev->set_mac_address = qeth_l2_set_mac_address;
 	card->dev->mtu = card->info.initial_mtu;
+	card->dev->netdev_ops = &qeth_l2_netdev_ops;
 	if (card->info.type != QETH_CARD_TYPE_OSN)
 		SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
 	else
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4693ee4e..3d04920 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1829,28 +1829,6 @@
 
 static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
-	struct net_device *vlandev;
-	struct qeth_card *card = dev->ml_priv;
-	struct in_device *in_dev;
-
-	if (card->info.type == QETH_CARD_TYPE_IQD)
-		return;
-
-	vlandev = vlan_group_get_device(card->vlangrp, vid);
-	vlandev->neigh_setup = qeth_l3_neigh_setup;
-
-	in_dev = in_dev_get(vlandev);
-#ifdef CONFIG_SYSCTL
-	neigh_sysctl_unregister(in_dev->arp_parms);
-#endif
-	neigh_parms_release(&arp_tbl, in_dev->arp_parms);
-
-	in_dev->arp_parms = neigh_parms_alloc(vlandev, &arp_tbl);
-#ifdef CONFIG_SYSCTL
-	neigh_sysctl_register(vlandev, in_dev->arp_parms, NET_IPV4,
-			      NET_IPV4_NEIGH, "ipv4", NULL, NULL);
-#endif
-	in_dev_put(in_dev);
 	return;
 }
 
@@ -2916,6 +2894,37 @@
 	return 0;
 }
 
+static const struct net_device_ops qeth_l3_netdev_ops = {
+	.ndo_open		= qeth_l3_open,
+	.ndo_stop		= qeth_l3_stop,
+	.ndo_get_stats		= qeth_get_stats,
+	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_multicast_list = qeth_l3_set_multicast_list,
+	.ndo_do_ioctl	   	= qeth_l3_do_ioctl,
+	.ndo_change_mtu	   	= qeth_change_mtu,
+	.ndo_vlan_rx_register	= qeth_l3_vlan_rx_register,
+	.ndo_vlan_rx_add_vid	= qeth_l3_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid   = qeth_l3_vlan_rx_kill_vid,
+	.ndo_tx_timeout	   	= qeth_tx_timeout,
+};
+
+static const struct net_device_ops qeth_l3_osa_netdev_ops = {
+	.ndo_open		= qeth_l3_open,
+	.ndo_stop		= qeth_l3_stop,
+	.ndo_get_stats		= qeth_get_stats,
+	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_multicast_list = qeth_l3_set_multicast_list,
+	.ndo_do_ioctl	   	= qeth_l3_do_ioctl,
+	.ndo_change_mtu	   	= qeth_change_mtu,
+	.ndo_vlan_rx_register	= qeth_l3_vlan_rx_register,
+	.ndo_vlan_rx_add_vid	= qeth_l3_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid   = qeth_l3_vlan_rx_kill_vid,
+	.ndo_tx_timeout	   	= qeth_tx_timeout,
+	.ndo_neigh_setup	= qeth_l3_neigh_setup,
+};
+
 static int qeth_l3_setup_netdev(struct qeth_card *card)
 {
 	if (card->info.type == QETH_CARD_TYPE_OSAE) {
@@ -2926,11 +2935,12 @@
 #endif
 			if (!card->dev)
 				return -ENODEV;
+			card->dev->netdev_ops = &qeth_l3_netdev_ops;
 		} else {
 			card->dev = alloc_etherdev(0);
 			if (!card->dev)
 				return -ENODEV;
-			card->dev->neigh_setup = qeth_l3_neigh_setup;
+			card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
 
 			/*IPv6 address autoconfiguration stuff*/
 			qeth_l3_get_unique_id(card);
@@ -2943,25 +2953,14 @@
 		if (!card->dev)
 			return -ENODEV;
 		card->dev->flags |= IFF_NOARP;
+		card->dev->netdev_ops = &qeth_l3_netdev_ops;
 		qeth_l3_iqd_read_initial_mac(card);
 	} else
 		return -ENODEV;
 
-	card->dev->hard_start_xmit = qeth_l3_hard_start_xmit;
 	card->dev->ml_priv = card;
-	card->dev->tx_timeout = &qeth_tx_timeout;
 	card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
-	card->dev->open = qeth_l3_open;
-	card->dev->stop = qeth_l3_stop;
-	card->dev->do_ioctl = qeth_l3_do_ioctl;
-	card->dev->get_stats = qeth_get_stats;
-	card->dev->change_mtu = qeth_change_mtu;
-	card->dev->set_multicast_list = qeth_l3_set_multicast_list;
-	card->dev->vlan_rx_register = qeth_l3_vlan_rx_register;
-	card->dev->vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid;
-	card->dev->vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid;
 	card->dev->mtu = card->info.initial_mtu;
-	card->dev->set_mac_address = NULL;
 	SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
 	card->dev->features |=	NETIF_F_HW_VLAN_TX |
 				NETIF_F_HW_VLAN_RX |
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 2550af4..4431578 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -214,7 +214,7 @@
 
 	writeb(regs,  p->regs);
 
-	printk(KERN_INFO PFX "7-Segment Display%s at [%s:0x%lx] %s\n", 
+	printk(KERN_INFO PFX "7-Segment Display%s at [%s:0x%llx] %s\n",
 	       op->node->full_name,
 	       (regs & D7S_FLIP) ? " (FLIPPED)" : "",
 	       op->resource[0].start,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index b732297..256c7be 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -884,6 +884,7 @@
 	tristate "IBM Virtual SCSI support"
 	depends on PPC_PSERIES || PPC_ISERIES
 	select SCSI_SRP_ATTRS
+	select VIOPATH if PPC_ISERIES
 	help
 	  This is the IBM POWER Virtual SCSI Client
 
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index 9e64b21..c889d84 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -318,7 +318,7 @@
 		return -ENOMEM;
 
 	p->dev = dev;
-	snprintf(p->name, sizeof(p->name), "D700(%s)", dev->bus_id);
+	snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev));
 	if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) {
 		printk(KERN_ERR "D700: request_irq failed\n");
 		kfree(p);
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 3c298c7..964769f 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -633,7 +633,7 @@
 		return FAILED;
 	}
 
-	/* Reset device is handled by the firmare, we fill in an SCB and
+	/* Reset device is handled by the firmware, we fill in an SCB and
 	   fire it at the controller, it does the rest */
 	scb->opcode = ORC_BUSDEVRST;
 	scb->target = target;
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 07d572f..37dd471 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -169,10 +169,8 @@
 	    continue;
 
 	instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
-	if (instance == NULL) {
-	    release_mem_region(address, 256);
-	    continue;
-	}
+	if (instance == NULL)
+	    goto release;
 	instance->base = ZTWO_VADDR(address);
 	instance->irq = IRQ_AMIGA_PORTS;
 	instance->unique_id = z->slotaddr;
@@ -183,10 +181,18 @@
 	HDATA(instance)->fast = 0;
 	HDATA(instance)->dma_mode = CTRL_DMA;
 	wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
-	request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
-		    instance);
+	if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
+			instance))
+	    goto unregister;
 	DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
 	num_a2091++;
+	continue;
+
+unregister:
+	scsi_unregister(instance);
+	wd33c93_release();
+release:
+	release_mem_region(address, 256);
     }
 
     return num_a2091;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 2f60272..7507d8b 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2527,7 +2527,7 @@
 {
 	struct asc_board *boardp = shost_priv(s);
 
-	printk("Scsi_Host at addr 0x%p, device %s\n", s, boardp->dev->bus_id);
+	printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
 	printk(" host_busy %u, host_no %d, last_reset %d,\n",
 	       s->host_busy, s->host_no, (unsigned)s->last_reset);
 
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index d4640ef..78eb86f 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -189,7 +189,7 @@
 	asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
 	/* send a hard reset */
 	ASD_DPRINTK("sending %s reset to %s\n",
-		    reset_type ? "hard" : "soft", phy->dev.bus_id);
+		    reset_type ? "hard" : "soft", dev_name(&phy->dev));
 	res = sas_phy_reset(phy, reset_type);
 	if (res == TMF_RESP_FUNC_COMPLETE) {
 		/* wait for the maximum settle time */
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
index 1a41f04..08f3a09 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -11,6 +11,7 @@
  */
 
 #include <linux/skbuff.h>
+#include <linux/scatterlist.h>
 
 /* from cxgb3 LLD */
 #include "common.h"
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index ca73637..5d1bf7e 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -329,12 +329,16 @@
 		     (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
 					     : WD33C93_FS_12_15);
 
-	request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI",
-		    instance);
+	if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI",
+			instance))
+		goto unregister;
 	DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
 	num_gvp11++;
 	continue;
 
+unregister:
+	scsi_unregister(instance);
+	wd33c93_release();
 release:
 	release_mem_region(address, 256);
     }
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 3fdbb13..aa670a1 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -388,8 +388,7 @@
 		shost->dma_boundary = 0xffffffff;
 
 	device_initialize(&shost->shost_gendev);
-	snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d",
-		shost->host_no);
+	dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
 #ifndef CONFIG_SYSFS_DEPRECATED
 	shost->shost_gendev.bus = &scsi_bus_type;
 #endif
@@ -398,8 +397,7 @@
 	device_initialize(&shost->shost_dev);
 	shost->shost_dev.parent = &shost->shost_gendev;
 	shost->shost_dev.class = &shost_class;
-	snprintf(shost->shost_dev.bus_id, BUS_ID_SIZE, "host%d",
-		 shost->host_no);
+	dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
 	shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
 
 	shost->ehandler = kthread_run(scsi_error_handler, shost,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 44f202f..91ef669d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -101,7 +101,7 @@
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
-	{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
@@ -115,11 +115,11 @@
 
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
-	{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" },
-	{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
-	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
-	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
 	{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
@@ -933,7 +933,7 @@
 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
 			break;
 		default:
-			ibmvfc_log(vhost, 3, "Unknown port speed: %ld Gbit\n",
+			ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
 				   vhost->login_buf->resp.link_speed / 100);
 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 			break;
@@ -1145,10 +1145,10 @@
 	login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
 	strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
 	strncpy(login_info->device_name,
-		vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
+		dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
 
 	location = of_get_property(of_node, "ibm,loc-code", NULL);
-	location = location ? location : vhost->dev->bus_id;
+	location = location ? location : dev_name(vhost->dev);
 	strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
 }
 
@@ -2149,8 +2149,8 @@
 {
 	const char *desc = ibmvfc_get_ae_desc(crq->event);
 
-	ibmvfc_log(vhost, 3, "%s event received. scsi_id: %lx, wwpn: %lx,"
-		   " node_name: %lx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
+	ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
+		   " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
 
 	switch (crq->event) {
 	case IBMVFC_AE_LINK_UP:
@@ -2184,7 +2184,7 @@
 		ibmvfc_link_down(vhost, IBMVFC_HALTED);
 		break;
 	default:
-		dev_err(vhost->dev, "Unknown async event received: %ld\n", crq->event);
+		dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
 		break;
 	};
 }
@@ -2261,13 +2261,13 @@
 	 * actually sent
 	 */
 	if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
-		dev_err(vhost->dev, "Returned correlation_token 0x%08lx is invalid!\n",
+		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
 			crq->ioba);
 		return;
 	}
 
 	if (unlikely(atomic_read(&evt->free))) {
-		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08lx!\n",
+		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
 			crq->ioba);
 		return;
 	}
@@ -3259,7 +3259,7 @@
 
 	tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
 	if (!tgt) {
-		dev_err(vhost->dev, "Target allocation failure for scsi id %08lx\n",
+		dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
 			scsi_id);
 		return -ENOMEM;
 	}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index babdf3d..87dafd0 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -691,13 +691,13 @@
 #define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0)
 
 #define tgt_dbg(t, fmt, ...)			\
-	DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
+	DBG_CMD(dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
 
 #define tgt_info(t, fmt, ...)		\
-	dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
+	dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
 
 #define tgt_err(t, fmt, ...)		\
-	dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
+	dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
 
 #define ibmvfc_dbg(vhost, ...) \
 	DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 868d35e..74d07d1 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -89,6 +89,7 @@
 static int max_channel = 3;
 static int init_timeout = 5;
 static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
+static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
 
@@ -1060,7 +1061,7 @@
 	}
 
 	sdev_printk(KERN_INFO, cmd->device,
-                    "aborting command. lun 0x%lx, tag 0x%lx\n",
+                    "aborting command. lun 0x%llx, tag 0x%llx\n",
 		    (((u64) lun) << 48), (u64) found_evt);
 
 	wait_for_completion(&evt->comp);
@@ -1081,7 +1082,7 @@
 	if (rsp_rc) {
 		if (printk_ratelimit())
 			sdev_printk(KERN_WARNING, cmd->device,
-				    "abort code %d for task tag 0x%lx\n",
+				    "abort code %d for task tag 0x%llx\n",
 				    rsp_rc, tsk_mgmt->task_tag);
 		return FAILED;
 	}
@@ -1101,12 +1102,12 @@
 
 	if (found_evt == NULL) {
 		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-		sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n",
+		sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
 			    tsk_mgmt->task_tag);
 		return SUCCESS;
 	}
 
-	sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n",
+	sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
 		    tsk_mgmt->task_tag);
 
 	cmd->result = (DID_ABORT << 16);
@@ -1181,7 +1182,7 @@
 		return FAILED;
 	}
 
-	sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
+	sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
 		    (((u64) lun) << 48));
 
 	wait_for_completion(&evt->comp);
@@ -1202,7 +1203,7 @@
 	if (rsp_rc) {
 		if (printk_ratelimit())
 			sdev_printk(KERN_WARNING, cmd->device,
-				    "reset code %d for task tag 0x%lx\n",
+				    "reset code %d for task tag 0x%llx\n",
 				    rsp_rc, tsk_mgmt->task_tag);
 		return FAILED;
 	}
@@ -1633,7 +1634,7 @@
 static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
 {
 	/* iu_storage data allocated in initialize_event_pool */
-	unsigned long desired_io = max_requests * sizeof(union viosrp_iu);
+	unsigned long desired_io = max_events * sizeof(union viosrp_iu);
 
 	/* add io space for sg data */
 	desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
@@ -1657,7 +1658,6 @@
 
 	vdev->dev.driver_data = NULL;
 
-	driver_template.can_queue = max_requests - 2;
 	host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
 	if (!host) {
 		dev_err(&vdev->dev, "couldn't allocate host data\n");
@@ -1673,12 +1673,12 @@
 	atomic_set(&hostdata->request_limit, -1);
 	hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
 
-	rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_requests);
+	rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
 	if (rc != 0 && rc != H_RESOURCE) {
 		dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
 		goto init_crq_failed;
 	}
-	if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
+	if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
 		dev_err(&vdev->dev, "couldn't initialize event pool\n");
 		goto init_pool_failed;
 	}
@@ -1730,7 +1730,7 @@
       add_host_failed:
 	release_event_pool(&hostdata->pool, hostdata);
       init_pool_failed:
-	ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_requests);
+	ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
       init_crq_failed:
 	scsi_host_put(host);
       scsi_host_alloc_failed:
@@ -1742,7 +1742,7 @@
 	struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
 	release_event_pool(&hostdata->pool, hostdata);
 	ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
-					max_requests);
+					max_events);
 
 	srp_remove_host(hostdata->host);
 	scsi_remove_host(hostdata->host);
@@ -1779,6 +1779,10 @@
 {
 	int ret;
 
+	/* Ensure we have two requests to do error recovery */
+	driver_template.can_queue = max_requests;
+	max_events = max_requests + 2;
+
 	if (firmware_has_feature(FW_FEATURE_ISERIES))
 		ibmvscsi_ops = &iseriesvscsi_ops;
 	else if (firmware_has_feature(FW_FEATURE_VIO))
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0edfb1f..0782900 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2184,7 +2184,7 @@
 		sizeof(struct ipr_dump_entry_header);
 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
-	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
+	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
 	driver_dump->hdr.num_entries++;
 }
 
@@ -4912,7 +4912,7 @@
 	if (res && ipr_is_gata(res)) {
 		if (cmd == HDIO_GET_IDENTITY)
 			return -ENOTTY;
-		return ata_scsi_ioctl(sdev, cmd, arg);
+		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
 	}
 
 	return -EINVAL;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 5945914..8f872f8 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1272,7 +1272,7 @@
 
 struct ipr_dump_location_entry {
 	struct ipr_dump_entry_header hdr;
-	u8 location[BUS_ID_SIZE];
+	u8 location[20];
 }__attribute__((packed));
 
 struct ipr_dump_trace_entry {
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index 3126824..4a4e695 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -103,8 +103,7 @@
 
 	hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
 	if (!hostdata) {
-		printk(KERN_ERR "%s: Failed to allocate host data\n",
-		       dev->dev.bus_id);
+		dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index a745f91..e7705d3 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -177,7 +177,6 @@
 			   struct iscsi_segment *segment, int recv,
 			   unsigned copied)
 {
-	static unsigned char padbuf[ISCSI_PAD_LEN];
 	struct scatterlist sg;
 	unsigned int pad;
 
@@ -233,7 +232,7 @@
 			debug_tcp("consume %d pad bytes\n", pad);
 			segment->total_size += pad;
 			segment->size = pad;
-			segment->data = padbuf;
+			segment->data = segment->padbuf;
 			return 0;
 		}
 	}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 709a6f7..facc5bfc 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -169,7 +169,7 @@
 		if (res) {
 			printk("sas: driver on pcidev %s cannot handle "
 			       "device %llx, error:%d\n",
-			       sas_ha->dev->bus_id,
+			       dev_name(sas_ha->dev),
 			       SAS_ADDR(dev->sas_addr), res);
 		}
 	}
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index bf34a23..c17c250 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -56,7 +56,7 @@
 
 void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he)
 {
-	SAS_DPRINTK("ha %s: %s event\n", sas_ha->dev->bus_id,
+	SAS_DPRINTK("ha %s: %s event\n", dev_name(sas_ha->dev),
 		    sas_hae_str[he]);
 }
 
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 16f9312..d110a36 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -199,8 +199,8 @@
 		break;
 
 	case SMP_DISCOVER:
-		req->data_len =- 16;
-		if (req->data_len < 0) {
+		req->data_len -= 16;
+		if ((int)req->data_len < 0) {
 			req->data_len = 0;
 			error = -EINVAL;
 			goto out;
@@ -215,8 +215,8 @@
 		break;
 
 	case SMP_REPORT_PHY_SATA:
-		req->data_len =- 16;
-		if (req->data_len < 0) {
+		req->data_len -= 16;
+		if ((int)req->data_len < 0) {
 			req->data_len = 0;
 			error = -EINVAL;
 			goto out;
@@ -238,8 +238,8 @@
 		break;
 
 	case SMP_PHY_CONTROL:
-		req->data_len =- 44;
-		if (req->data_len < 0) {
+		req->data_len -= 44;
+		if ((int)req->data_len < 0) {
 			req->data_len = 0;
 			error = -EINVAL;
 			goto out;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 139935a..e6ac59c 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -113,7 +113,7 @@
 	sas_port_add_phy(port->port, phy->phy);
 
 	SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n",
-		    phy->phy->dev.bus_id,port->port->dev.bus_id,
+		    dev_name(&phy->phy->dev), dev_name(&port->port->dev),
 		    port->phy_mask,
 		    SAS_ADDR(port->attached_sas_addr));
 
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 7448387..1c558d3 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -717,7 +717,7 @@
 	struct domain_device *dev = sdev_to_domain_dev(sdev);
 
 	if (dev_is_sata(dev))
-		return ata_scsi_ioctl(sdev, cmd, arg);
+		return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
 
 	return -EINVAL;
 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 8c64494..311ed6d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1964,10 +1964,10 @@
 	uint32_t tmo;
 
 	if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
-		/* For FAN, timeout should be greater then edtov */
+		/* For FAN, timeout should be greater than edtov */
 		tmo = (((phba->fc_edtov + 999) / 1000) + 1);
 	} else {
-		/* Normal discovery timeout should be > then ELS/CT timeout
+		/* Normal discovery timeout should be > than ELS/CT timeout
 		 * FC spec states we need 3 * ratov for CT requests
 		 */
 		tmo = ((phba->fc_ratov * 3) + 3);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4c77038..6c86731 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1795,12 +1795,13 @@
 int
 lpfc_online(struct lpfc_hba *phba)
 {
-	struct lpfc_vport *vport = phba->pport;
+	struct lpfc_vport *vport;
 	struct lpfc_vport **vports;
 	int i;
 
 	if (!phba)
 		return 0;
+	vport = phba->pport;
 
 	if (!(vport->fc_flag & FC_OFFLINE_MODE))
 		return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 01dfdc8..a36a120 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -420,7 +420,7 @@
 		if (unlikely(pring->local_getidx >= max_cmd_idx)) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 					"0315 Ring %d issue: portCmdGet %d "
-					"is bigger then cmd ring %d\n",
+					"is bigger than cmd ring %d\n",
 					pring->ringno,
 					pring->local_getidx, max_cmd_idx);
 
@@ -1628,12 +1628,12 @@
 {
 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
 	/*
-	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
+	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
 	 * rsp ring <portRspMax>
 	 */
 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 			"0312 Ring %d handler: portRspPut %d "
-			"is bigger then rsp ring %d\n",
+			"is bigger than rsp ring %d\n",
 			pring->ringno, le32_to_cpu(pgp->rspPutInx),
 			pring->numRiocb);
 
@@ -2083,12 +2083,12 @@
 	portRspPut = le32_to_cpu(pgp->rspPutInx);
 	if (portRspPut >= portRspMax) {
 		/*
-		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
+		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
 		 * rsp ring <portRspMax>
 		 */
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"0303 Ring %d handler: portRspPut %d "
-				"is bigger then rsp ring %d\n",
+				"is bigger than rsp ring %d\n",
 				pring->ringno, portRspPut, portRspMax);
 
 		phba->link_state = LPFC_HBA_ERROR;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 7dc62de..9fdcd60 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1967,8 +1967,8 @@
 			scb->state |= aor;
 
 			/*
-			 * Check if this command has firmare owenership. If
-			 * yes, we cannot reset this command. Whenever, f/w
+			 * Check if this command has firmware ownership. If
+			 * yes, we cannot reset this command. Whenever f/w
 			 * completes this command, we will return appropriate
 			 * status from ISR.
 			 */
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
index 1dd70d7..23e5a87 100644
--- a/drivers/scsi/mvsas.c
+++ b/drivers/scsi/mvsas.c
@@ -2959,7 +2959,7 @@
 
 	/* enable auto port detection */
 	mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
-	msleep(100);
+	msleep(1100);
 	/* init and reset phys */
 	for (i = 0; i < mvi->chip->n_phy; i++) {
 		u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 165ff88..67cde01 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -114,7 +114,7 @@
     link->io.NumPorts1 = 0x20;
     link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
     link->io.IOAddrLines = 10;
-    link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+    link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
     link->irq.IRQInfo1 = IRQ_LEVEL_ID;
     link->conf.Attributes = CONF_ENABLE_IRQ;
     link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index ce48e2d..ca0dd33 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -290,11 +290,11 @@
 
 	if (tag != dev->tag)
 		dev_err(&dev->sbd.core,
-			"%s:%u: tag mismatch, got %lx, expected %lx\n",
+			"%s:%u: tag mismatch, got %llx, expected %llx\n",
 			__func__, __LINE__, tag, dev->tag);
 
 	if (res) {
-		dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n",
+		dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
 			__func__, __LINE__, res, status);
 		return IRQ_HANDLED;
 	}
@@ -364,7 +364,7 @@
 
 	if (dev->blk_size != CD_FRAMESIZE) {
 		dev_err(&dev->sbd.core,
-			"%s:%u: cannot handle block size %lu\n", __func__,
+			"%s:%u: cannot handle block size %llu\n", __func__,
 			__LINE__, dev->blk_size);
 		return -EINVAL;
 	}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8cb9240..df09820 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -128,7 +128,7 @@
 	- Integrate ql12160_set_target_parameters() with 1280 version
 	- Make qla1280_setup() non static
 	- Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
-	  sent to the card - this command pauses the firmare!!!
+	  sent to the card - this command pauses the firmware!!!
     Rev  3.23.15 Beta March 19, 2002, Jes Sorensen
 	- Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
 	- Remove a pile of pointless and confusing (srb_t **) and
@@ -659,7 +659,7 @@
 	/* The firmware interface is, um, interesting, in that the
 	 * actual firmware image on the chip is little endian, thus,
 	 * the process of taking that image to the CPU would end up
-	 * little endian.  However, the firmare interface requires it
+	 * little endian.  However, the firmware interface requires it
 	 * to be read a word (two bytes) at a time.
 	 *
 	 * The net result of this would be that the word (and
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index cd53627..c7acef5 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -303,7 +303,7 @@
 		else if (start == (ha->flt_region_boot * 4) ||
 		    start == (ha->flt_region_fw * 4))
 			valid = 1;
-		else if (IS_QLA25XX(ha) &&
+		else if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) &&
 		    start == (ha->flt_region_vpd_nvram * 4))
 		    valid = 1;
 		if (!valid) {
@@ -815,6 +815,21 @@
 	    ha->qla_stats.total_isp_aborts);
 }
 
+static ssize_t
+qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA81XX(ha))
+		return snprintf(buf, PAGE_SIZE, "\n");
+
+	return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x (%x)\n",
+	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
+	    ha->mpi_version[3], ha->mpi_capabilities);
+}
+
 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -839,6 +854,7 @@
 		   NULL);
 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
 		   NULL);
+static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
 
 struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_driver_version,
@@ -858,6 +874,7 @@
 	&dev_attr_optrom_fcode_version,
 	&dev_attr_optrom_fw_version,
 	&dev_attr_total_isp_aborts,
+	&dev_attr_mpi_version,
 	NULL,
 };
 
@@ -892,6 +909,9 @@
 	case PORT_SPEED_8GB:
 		speed = FC_PORTSPEED_8GBIT;
 		break;
+	case PORT_SPEED_10GB:
+		speed = FC_PORTSPEED_10GBIT;
+		break;
 	}
 	fc_host_speed(shost) = speed;
 }
@@ -1382,7 +1402,9 @@
 	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
 	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
 
-	if (IS_QLA25XX(ha))
+	if (IS_QLA81XX(ha))
+		speed = FC_PORTSPEED_10GBIT;
+	else if (IS_QLA25XX(ha))
 		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
 		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
 	else if (IS_QLA24XX_TYPE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 1cf7777..34760f8 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -310,6 +310,76 @@
 		*buf++ = htons(RD_REG_WORD(dmp_reg++));
 }
 
+static inline void *
+qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
+{
+	if (!ha->eft)
+		return ptr;
+
+	memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
+	return ptr + ntohl(ha->fw_dump->eft_size);
+}
+
+static inline void *
+qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+	uint32_t cnt;
+	uint32_t *iter_reg;
+	struct qla2xxx_fce_chain *fcec = ptr;
+
+	if (!ha->fce)
+		return ptr;
+
+	*last_chain = &fcec->type;
+	fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
+	fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
+	    fce_calc_size(ha->fce_bufs));
+	fcec->size = htonl(fce_calc_size(ha->fce_bufs));
+	fcec->addr_l = htonl(LSD(ha->fce_dma));
+	fcec->addr_h = htonl(MSD(ha->fce_dma));
+
+	iter_reg = fcec->eregs;
+	for (cnt = 0; cnt < 8; cnt++)
+		*iter_reg++ = htonl(ha->fce_mb[cnt]);
+
+	memcpy(iter_reg, ha->fce, ntohl(fcec->size));
+
+	return iter_reg;
+}
+
+static inline void *
+qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+	uint32_t cnt, que_idx;
+	uint8_t req_cnt, rsp_cnt, que_cnt;
+	struct qla2xxx_mq_chain *mq = ptr;
+	struct device_reg_25xxmq __iomem *reg;
+
+	if (!ha->mqenable)
+		return ptr;
+
+	mq = ptr;
+	*last_chain = &mq->type;
+	mq->type = __constant_htonl(DUMP_CHAIN_MQ);
+	mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
+
+	req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
+	rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
+	que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
+	mq->count = htonl(que_cnt);
+	for (cnt = 0; cnt < que_cnt; cnt++) {
+		reg = (struct device_reg_25xxmq *) ((void *)
+			ha->mqiobase + cnt * QLA_QUE_PAGE);
+		que_idx = cnt * 4;
+		mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
+		mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
+		mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in));
+		mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out));
+	}
+
+	return ptr + sizeof(struct qla2xxx_mq_chain);
+}
+
 /**
  * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
  * @ha: HA context
@@ -913,8 +983,8 @@
 		goto qla24xx_fw_dump_failed_0;
 
 	nxt = qla2xxx_copy_queues(ha, nxt);
-	if (ha->eft)
-		memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
+
+	qla24xx_copy_eft(ha, nxt);
 
 qla24xx_fw_dump_failed_0:
 	if (rval != QLA_SUCCESS) {
@@ -942,19 +1012,14 @@
 	uint32_t	risc_address;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
-	struct device_reg_25xxmq __iomem *reg25;
 	uint32_t __iomem *dmp_reg;
 	uint32_t	*iter_reg;
 	uint16_t __iomem *mbx_reg;
 	unsigned long	flags;
 	struct qla25xx_fw_dump *fw;
 	uint32_t	ext_mem_cnt;
-	void		*nxt;
-	struct qla2xxx_fce_chain *fcec;
-	struct qla2xxx_mq_chain *mq = NULL;
-	uint32_t	qreg_size;
-	uint8_t		req_cnt, rsp_cnt, que_cnt;
-	uint32_t	que_idx;
+	void		*nxt, *nxt_chain;
+	uint32_t	*last_chain = NULL;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
 	risc_address = ext_mem_cnt = 0;
@@ -1001,28 +1066,6 @@
 	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
 	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
 
-	/* Multi queue registers */
-	if (ha->mqenable) {
-		qreg_size = sizeof(struct qla2xxx_mq_chain);
-		mq = kzalloc(qreg_size, GFP_KERNEL);
-		if (!mq)
-			goto qla25xx_fw_dump_failed_0;
-		req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
-		rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
-		que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
-		mq->count = htonl(que_cnt);
-		mq->chain_size = htonl(qreg_size);
-		mq->type = __constant_htonl(DUMP_CHAIN_MQ);
-		for (cnt = 0; cnt < que_cnt; cnt++) {
-			reg25 = (struct device_reg_25xxmq *) ((void *)
-				ha->mqiobase + cnt * QLA_QUE_PAGE);
-			que_idx = cnt * 4;
-			mq->qregs[que_idx] = htonl(reg25->req_q_in);
-			mq->qregs[que_idx+1] = htonl(reg25->req_q_out);
-			mq->qregs[que_idx+2] = htonl(reg25->rsp_q_in);
-			mq->qregs[que_idx+3] = htonl(reg25->rsp_q_out);
-		}
-	}
 	WRT_REG_DWORD(&reg->iobase_window, 0x00);
 	RD_REG_DWORD(&reg->iobase_window);
 
@@ -1240,6 +1283,10 @@
 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
 
+	/* Multi queue registers */
+	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
+	    &last_chain);
+
 	rval = qla24xx_soft_reset(ha);
 	if (rval != QLA_SUCCESS)
 		goto qla25xx_fw_dump_failed_0;
@@ -1249,37 +1296,16 @@
 	if (rval != QLA_SUCCESS)
 		goto qla25xx_fw_dump_failed_0;
 
-	/* Fibre Channel Trace Buffer. */
 	nxt = qla2xxx_copy_queues(ha, nxt);
-	if (ha->eft)
-		memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
 
-	/* Fibre Channel Event Buffer. */
-	if (!ha->fce)
-		goto qla25xx_fw_dump_failed_0;
+	nxt = qla24xx_copy_eft(ha, nxt);
 
-	ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
-
-	if (ha->mqenable) {
-		nxt = nxt + ntohl(ha->fw_dump->eft_size);
-		memcpy(nxt, mq, qreg_size);
-		kfree(mq);
-		fcec = nxt + qreg_size;
-	} else {
-		fcec = nxt + ntohl(ha->fw_dump->eft_size);
+	/* Chain entries -- started with MQ. */
+	qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+	if (last_chain) {
+		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
 	}
-	fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
-	fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
-	    fce_calc_size(ha->fce_bufs));
-	fcec->size = htonl(fce_calc_size(ha->fce_bufs));
-	fcec->addr_l = htonl(LSD(ha->fce_dma));
-	fcec->addr_h = htonl(MSD(ha->fce_dma));
-
-	iter_reg = fcec->eregs;
-	for (cnt = 0; cnt < 8; cnt++)
-		*iter_reg++ = htonl(ha->fce_mb[cnt]);
-
-	memcpy(iter_reg, ha->fce, ntohl(fcec->size));
 
 qla25xx_fw_dump_failed_0:
 	if (rval != QLA_SUCCESS) {
@@ -1298,6 +1324,330 @@
 	if (!hardware_locked)
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
+
+void
+qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	int		rval;
+	uint32_t	cnt;
+	uint32_t	risc_address;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	uint32_t __iomem *dmp_reg;
+	uint32_t	*iter_reg;
+	uint16_t __iomem *mbx_reg;
+	unsigned long	flags;
+	struct qla81xx_fw_dump *fw;
+	uint32_t	ext_mem_cnt;
+	void		*nxt, *nxt_chain;
+	uint32_t	*last_chain = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+	risc_address = ext_mem_cnt = 0;
+	flags = 0;
+
+	if (!hardware_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	if (!ha->fw_dump) {
+		qla_printk(KERN_WARNING, ha,
+		    "No buffer available for dump!!!\n");
+		goto qla81xx_fw_dump_failed;
+	}
+
+	if (ha->fw_dumped) {
+		qla_printk(KERN_WARNING, ha,
+		    "Firmware has been previously dumped (%p) -- ignoring "
+		    "request...\n", ha->fw_dump);
+		goto qla81xx_fw_dump_failed;
+	}
+	fw = &ha->fw_dump->isp.isp81;
+	qla2xxx_prep_dump(ha, ha->fw_dump);
+
+	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+	/* Pause RISC. */
+	rval = qla24xx_pause_risc(reg);
+	if (rval != QLA_SUCCESS)
+		goto qla81xx_fw_dump_failed_0;
+
+	/* Host/Risc registers. */
+	iter_reg = fw->host_risc_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
+
+	/* PCIe registers. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_window, 0x01);
+	dmp_reg = &reg->iobase_c4;
+	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
+	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
+	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+	WRT_REG_DWORD(&reg->iobase_window, 0x00);
+	RD_REG_DWORD(&reg->iobase_window);
+
+	/* Host interface registers. */
+	dmp_reg = &reg->flash_addr;
+	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
+		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+	/* Disable interrupts. */
+	WRT_REG_DWORD(&reg->ictrl, 0);
+	RD_REG_DWORD(&reg->ictrl);
+
+	/* Shadow registers. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
+	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
+	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
+	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
+	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	/* RISC I/O register. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
+	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+	/* Mailbox registers. */
+	mbx_reg = &reg->mailbox0;
+	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
+		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+
+	/* Transfer sequence registers. */
+	iter_reg = fw->xseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+	iter_reg = fw->xseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+	/* Receive sequence registers. */
+	iter_reg = fw->rseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+	iter_reg = fw->rseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+
+	/* Auxiliary sequence registers. */
+	iter_reg = fw->aseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
+	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
+
+	iter_reg = fw->aseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
+	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
+
+	/* Command DMA registers. */
+	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
+
+	/* Queues. */
+	iter_reg = fw->req0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+	iter_reg = fw->resp0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+	iter_reg = fw->req1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+	/* Transmit DMA registers. */
+	iter_reg = fw->xmt0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+	iter_reg = fw->xmt1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+	iter_reg = fw->xmt2_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+	iter_reg = fw->xmt3_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+	iter_reg = fw->xmt4_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+	/* Receive DMA registers. */
+	iter_reg = fw->rcvt0_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+	iter_reg = fw->rcvt1_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+	/* RISC registers. */
+	iter_reg = fw->risc_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+	/* Local memory controller registers. */
+	iter_reg = fw->lmc_reg;
+	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
+
+	/* Fibre Protocol Module registers. */
+	iter_reg = fw->fpm_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
+
+	/* Frame Buffer registers. */
+	iter_reg = fw->fb_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
+
+	/* Multi queue registers */
+	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
+	    &last_chain);
+
+	rval = qla24xx_soft_reset(ha);
+	if (rval != QLA_SUCCESS)
+		goto qla81xx_fw_dump_failed_0;
+
+	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+	    &nxt);
+	if (rval != QLA_SUCCESS)
+		goto qla81xx_fw_dump_failed_0;
+
+	nxt = qla2xxx_copy_queues(ha, nxt);
+
+	nxt = qla24xx_copy_eft(ha, nxt);
+
+	/* Chain entries -- started with MQ. */
+	qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+	if (last_chain) {
+		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+	}
+
+qla81xx_fw_dump_failed_0:
+	if (rval != QLA_SUCCESS) {
+		qla_printk(KERN_WARNING, ha,
+		    "Failed to dump firmware (%x)!!!\n", rval);
+		ha->fw_dumped = 0;
+
+	} else {
+		qla_printk(KERN_INFO, ha,
+		    "Firmware dump saved to temp buffer (%ld/%p).\n",
+		    base_vha->host_no, ha->fw_dump);
+		ha->fw_dumped = 1;
+	}
+
+qla81xx_fw_dump_failed:
+	if (!hardware_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
 /****************************************************************************/
 /*                         Driver Debug Functions.                          */
 /****************************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index c1794a7..f660dd7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -251,6 +251,45 @@
 	uint32_t ext_mem[1];
 };
 
+struct qla81xx_fw_dump {
+	uint32_t host_status;
+	uint32_t host_risc_reg[32];
+	uint32_t pcie_regs[4];
+	uint32_t host_reg[32];
+	uint32_t shadow_reg[11];
+	uint32_t risc_io_reg;
+	uint16_t mailbox_reg[32];
+	uint32_t xseq_gp_reg[128];
+	uint32_t xseq_0_reg[48];
+	uint32_t xseq_1_reg[16];
+	uint32_t rseq_gp_reg[128];
+	uint32_t rseq_0_reg[32];
+	uint32_t rseq_1_reg[16];
+	uint32_t rseq_2_reg[16];
+	uint32_t aseq_gp_reg[128];
+	uint32_t aseq_0_reg[32];
+	uint32_t aseq_1_reg[16];
+	uint32_t aseq_2_reg[16];
+	uint32_t cmd_dma_reg[16];
+	uint32_t req0_dma_reg[15];
+	uint32_t resp0_dma_reg[15];
+	uint32_t req1_dma_reg[15];
+	uint32_t xmt0_dma_reg[32];
+	uint32_t xmt1_dma_reg[32];
+	uint32_t xmt2_dma_reg[32];
+	uint32_t xmt3_dma_reg[32];
+	uint32_t xmt4_dma_reg[32];
+	uint32_t xmt_data_dma_reg[16];
+	uint32_t rcvt0_data_dma_reg[32];
+	uint32_t rcvt1_data_dma_reg[32];
+	uint32_t risc_gp_reg[128];
+	uint32_t lmc_reg[128];
+	uint32_t fpm_hdw_reg[224];
+	uint32_t fb_hdw_reg[208];
+	uint32_t code_ram[0x2000];
+	uint32_t ext_mem[1];
+};
+
 #define EFT_NUM_BUFFERS		4
 #define EFT_BYTES_PER_BUFFER	0x4000
 #define EFT_SIZE		((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@@ -313,5 +352,6 @@
 		struct qla2300_fw_dump isp23;
 		struct qla24xx_fw_dump isp24;
 		struct qla25xx_fw_dump isp25;
+		struct qla81xx_fw_dump isp81;
 	} isp;
 };
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a29c952..023ee77 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -187,7 +187,6 @@
  * SCSI Request Block
  */
 typedef struct srb {
-	struct scsi_qla_host *vha;	/* HA the SP is queued on */
 	struct req_que *que;
 	struct fc_port *fcport;
 
@@ -2136,7 +2135,6 @@
 /* Work events.  */
 enum qla_work_type {
 	QLA_EVT_AEN,
-	QLA_EVT_HWE_LOG,
 };
 
 
@@ -2151,10 +2149,6 @@
 			enum fc_host_event_code code;
 			u32 data;
 		} aen;
-		struct {
-			uint16_t code;
-			uint16_t d1, d2, d3;
-		} hwe;
 	} u;
 };
 
@@ -2309,6 +2303,7 @@
 #define PORT_SPEED_2GB  0x01
 #define PORT_SPEED_4GB  0x03
 #define PORT_SPEED_8GB  0x04
+#define PORT_SPEED_10GB	0x13
 	uint16_t	link_data_rate;         /* F/W operating speed */
 
 	uint8_t		current_topology;
@@ -2328,6 +2323,7 @@
 
 #define PCI_DEVICE_ID_QLOGIC_ISP2532    0x2532
 #define PCI_DEVICE_ID_QLOGIC_ISP8432    0x8432
+#define PCI_DEVICE_ID_QLOGIC_ISP8001	0x8001
 	uint32_t	device_type;
 #define DT_ISP2100                      BIT_0
 #define DT_ISP2200                      BIT_1
@@ -2342,7 +2338,8 @@
 #define DT_ISP5432                      BIT_10
 #define DT_ISP2532                      BIT_11
 #define DT_ISP8432                      BIT_12
-#define DT_ISP_LAST                     (DT_ISP8432 << 1)
+#define DT_ISP8001			BIT_13
+#define DT_ISP_LAST			(DT_ISP8001 << 1)
 
 #define DT_IIDMA                        BIT_26
 #define DT_FWI2                         BIT_27
@@ -2364,6 +2361,7 @@
 #define IS_QLA5432(ha)  (DT_MASK(ha) & DT_ISP5432)
 #define IS_QLA2532(ha)  (DT_MASK(ha) & DT_ISP2532)
 #define IS_QLA8432(ha)  (DT_MASK(ha) & DT_ISP8432)
+#define IS_QLA8001(ha)	(DT_MASK(ha) & DT_ISP8001)
 
 #define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
 			IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2373,8 +2371,11 @@
 #define IS_QLA84XX(ha)  (IS_QLA8432(ha))
 #define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
 				IS_QLA84XX(ha))
+#define IS_QLA81XX(ha)		(IS_QLA8001(ha))
 #define IS_QLA2XXX_MIDTYPE(ha)	(IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
-				IS_QLA25XX(ha))
+				IS_QLA25XX(ha) || IS_QLA81XX(ha))
+#define IS_NOPOLLING_TYPE(ha)	((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
+				(ha)->flags.msix_enabled)
 
 #define IS_IIDMA_CAPABLE(ha)    ((ha)->device_type & DT_IIDMA)
 #define IS_FWI2_CAPABLE(ha)     ((ha)->device_type & DT_FWI2)
@@ -2472,6 +2473,9 @@
 	uint8_t		fw_seriallink_options[4];
 	uint16_t	fw_seriallink_options24[4];
 
+	uint8_t		mpi_version[4];
+	uint32_t	mpi_capabilities;
+
 	/* Firmware dump information. */
 	struct qla2xxx_fw_dump *fw_dump;
 	uint32_t	fw_dump_len;
@@ -2480,6 +2484,7 @@
 	dma_addr_t	eft_dma;
 	void		*eft;
 
+	uint32_t	chain_offset;
 	struct dentry *dfs_dir;
 	struct dentry *dfs_fce;
 	dma_addr_t	fce_dma;
@@ -2489,10 +2494,6 @@
 	uint64_t	fce_wr, fce_rd;
 	struct mutex	fce_mutex;
 
-	uint32_t	hw_event_start;
-	uint32_t	hw_event_ptr;
-	uint32_t	hw_event_pause_errors;
-
 	uint32_t	pci_attr;
 	uint16_t	chip_revision;
 
@@ -2522,6 +2523,12 @@
 	uint8_t 	fcode_revision[16];
 	uint32_t	fw_revision[4];
 
+	/* Offsets for flash/nvram access (set to ~0 if not used). */
+	uint32_t	flash_conf_off;
+	uint32_t	flash_data_off;
+	uint32_t	nvram_conf_off;
+	uint32_t	nvram_data_off;
+
 	uint32_t	fdt_wrt_disable;
 	uint32_t	fdt_erase_cmd;
 	uint32_t	fdt_block_size;
@@ -2533,7 +2540,6 @@
 	uint32_t        flt_region_boot;
 	uint32_t        flt_region_fw;
 	uint32_t        flt_region_vpd_nvram;
-	uint32_t        flt_region_hw_event;
 	uint32_t        flt_region_npiv_conf;
 
 	/* Needed for BEACON */
@@ -2737,6 +2743,7 @@
 #define OPTROM_SIZE_2322	0x100000
 #define OPTROM_SIZE_24XX	0x100000
 #define OPTROM_SIZE_25XX	0x200000
+#define OPTROM_SIZE_81XX	0x400000
 
 #include "qla_gbl.h"
 #include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0e366a1..c66036d 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -113,7 +113,8 @@
 qla2x00_dfs_setup(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
-	if (!IS_QLA25XX(ha))
+
+	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
 		goto out;
 	if (!ha->fce)
 		goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index ee1f1e7..7abb045 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1215,9 +1215,10 @@
 
 struct qla_flt_location {
 	uint8_t sig[4];
-	uint32_t start_lo;
-	uint32_t start_hi;
-	uint16_t unused;
+	uint16_t start_lo;
+	uint16_t start_hi;
+	uint8_t version;
+	uint8_t unused[5];
 	uint16_t checksum;
 };
 
@@ -1390,4 +1391,291 @@
 
 	uint32_t reserved[12];
 };
+
+/* 81XX Support **************************************************************/
+
+#define MBA_DCBX_START		0x8016
+#define MBA_DCBX_COMPLETE	0x8030
+#define MBA_FCF_CONF_ERR	0x8031
+#define MBA_DCBX_PARAM_UPDATE	0x8032
+#define MBA_IDC_COMPLETE	0x8100
+#define MBA_IDC_NOTIFY		0x8101
+#define MBA_IDC_TIME_EXT	0x8102
+
+struct nvram_81xx {
+	/* NVRAM header. */
+	uint8_t id[4];
+	uint16_t nvram_version;
+	uint16_t reserved_0;
+
+	/* Firmware Initialization Control Block. */
+	uint16_t version;
+	uint16_t reserved_1;
+	uint16_t frame_payload_size;
+	uint16_t execution_throttle;
+	uint16_t exchange_count;
+	uint16_t reserved_2;
+
+	uint8_t port_name[WWN_SIZE];
+	uint8_t node_name[WWN_SIZE];
+
+	uint16_t login_retry_count;
+	uint16_t reserved_3;
+	uint16_t interrupt_delay_timer;
+	uint16_t login_timeout;
+
+	uint32_t firmware_options_1;
+	uint32_t firmware_options_2;
+	uint32_t firmware_options_3;
+
+	uint16_t reserved_4[4];
+
+	/* Offset 64. */
+	uint8_t enode_mac[6];
+	uint16_t reserved_5[5];
+
+	/* Offset 80. */
+	uint16_t reserved_6[24];
+
+	/* Offset 128. */
+	uint16_t reserved_7[64];
+
+	/*
+	 * BIT 0  = Enable spinup delay
+	 * BIT 1  = Disable BIOS
+	 * BIT 2  = Enable Memory Map BIOS
+	 * BIT 3  = Enable Selectable Boot
+	 * BIT 4  = Disable RISC code load
+	 * BIT 5  = Disable Serdes
+	 * BIT 6  = Opt boot mode
+	 * BIT 7  = Interrupt enable
+	 *
+	 * BIT 8  = EV Control enable
+	 * BIT 9  = Enable lip reset
+	 * BIT 10 = Enable lip full login
+	 * BIT 11 = Enable target reset
+	 * BIT 12 = Stop firmware
+	 * BIT 13 = Enable nodename option
+	 * BIT 14 = Default WWPN valid
+	 * BIT 15 = Enable alternate WWN
+	 *
+	 * BIT 16 = CLP LUN string
+	 * BIT 17 = CLP Target string
+	 * BIT 18 = CLP BIOS enable string
+	 * BIT 19 = CLP Serdes string
+	 * BIT 20 = CLP WWPN string
+	 * BIT 21 = CLP WWNN string
+	 * BIT 22 =
+	 * BIT 23 =
+	 * BIT 24 = Keep WWPN
+	 * BIT 25 = Temp WWPN
+	 * BIT 26-31 =
+	 */
+	uint32_t host_p;
+
+	uint8_t alternate_port_name[WWN_SIZE];
+	uint8_t alternate_node_name[WWN_SIZE];
+
+	uint8_t boot_port_name[WWN_SIZE];
+	uint16_t boot_lun_number;
+	uint16_t reserved_8;
+
+	uint8_t alt1_boot_port_name[WWN_SIZE];
+	uint16_t alt1_boot_lun_number;
+	uint16_t reserved_9;
+
+	uint8_t alt2_boot_port_name[WWN_SIZE];
+	uint16_t alt2_boot_lun_number;
+	uint16_t reserved_10;
+
+	uint8_t alt3_boot_port_name[WWN_SIZE];
+	uint16_t alt3_boot_lun_number;
+	uint16_t reserved_11;
+
+	/*
+	 * BIT 0 = Selective Login
+	 * BIT 1 = Alt-Boot Enable
+	 * BIT 2 = Reserved
+	 * BIT 3 = Boot Order List
+	 * BIT 4 = Reserved
+	 * BIT 5 = Selective LUN
+	 * BIT 6 = Reserved
+	 * BIT 7-31 =
+	 */
+	uint32_t efi_parameters;
+
+	uint8_t reset_delay;
+	uint8_t reserved_12;
+	uint16_t reserved_13;
+
+	uint16_t boot_id_number;
+	uint16_t reserved_14;
+
+	uint16_t max_luns_per_target;
+	uint16_t reserved_15;
+
+	uint16_t port_down_retry_count;
+	uint16_t link_down_timeout;
+
+	/* FCode parameters. */
+	uint16_t fcode_parameter;
+
+	uint16_t reserved_16[3];
+
+	/* Offset 352. */
+	uint8_t reserved_17[4];
+	uint16_t reserved_18[5];
+	uint8_t reserved_19[2];
+	uint16_t reserved_20[8];
+
+	/* Offset 384. */
+	uint8_t reserved_21[16];
+	uint16_t reserved_22[8];
+
+	/* Offset 416. */
+	uint16_t reserved_23[32];
+
+	/* Offset 480. */
+	uint8_t model_name[16];
+
+	/* Offset 496. */
+	uint16_t feature_mask_l;
+	uint16_t feature_mask_h;
+	uint16_t reserved_24[2];
+
+	uint16_t subsystem_vendor_id;
+	uint16_t subsystem_device_id;
+
+	uint32_t checksum;
+};
+
+/*
+ * ISP Initialization Control Block.
+ * Little endian except where noted.
+ */
+#define	ICB_VERSION 1
+struct init_cb_81xx {
+	uint16_t version;
+	uint16_t reserved_1;
+
+	uint16_t frame_payload_size;
+	uint16_t execution_throttle;
+	uint16_t exchange_count;
+
+	uint16_t reserved_2;
+
+	uint8_t port_name[WWN_SIZE];		/* Big endian. */
+	uint8_t node_name[WWN_SIZE];		/* Big endian. */
+
+	uint16_t response_q_inpointer;
+	uint16_t request_q_outpointer;
+
+	uint16_t login_retry_count;
+
+	uint16_t prio_request_q_outpointer;
+
+	uint16_t response_q_length;
+	uint16_t request_q_length;
+
+	uint16_t reserved_3;
+
+	uint16_t prio_request_q_length;
+
+	uint32_t request_q_address[2];
+	uint32_t response_q_address[2];
+	uint32_t prio_request_q_address[2];
+
+	uint8_t reserved_4[8];
+
+	uint16_t atio_q_inpointer;
+	uint16_t atio_q_length;
+	uint32_t atio_q_address[2];
+
+	uint16_t interrupt_delay_timer;		/* 100us increments. */
+	uint16_t login_timeout;
+
+	/*
+	 * BIT 0-3 = Reserved
+	 * BIT 4  = Enable Target Mode
+	 * BIT 5  = Disable Initiator Mode
+	 * BIT 6  = Reserved
+	 * BIT 7  = Reserved
+	 *
+	 * BIT 8-13 = Reserved
+	 * BIT 14 = Node Name Option
+	 * BIT 15-31 = Reserved
+	 */
+	uint32_t firmware_options_1;
+
+	/*
+	 * BIT 0  = Operation Mode bit 0
+	 * BIT 1  = Operation Mode bit 1
+	 * BIT 2  = Operation Mode bit 2
+	 * BIT 3  = Operation Mode bit 3
+	 * BIT 4-7 = Reserved
+	 *
+	 * BIT 8  = Enable Class 2
+	 * BIT 9  = Enable ACK0
+	 * BIT 10 = Reserved
+	 * BIT 11 = Enable FC-SP Security
+	 * BIT 12 = FC Tape Enable
+	 * BIT 13 = Reserved
+	 * BIT 14 = Enable Target PRLI Control
+	 * BIT 15-31 = Reserved
+	 */
+	uint32_t firmware_options_2;
+
+	/*
+	 * BIT 0-3 = Reserved
+	 * BIT 4  = FCP RSP Payload bit 0
+	 * BIT 5  = FCP RSP Payload bit 1
+	 * BIT 6  = Enable Receive Out-of-Order data frame handling
+	 * BIT 7  = Reserved
+	 *
+	 * BIT 8  = Reserved
+	 * BIT 9  = Enable Out-of-Order FCP_XFER_RDY relative offset handling
+	 * BIT 10-16 = Reserved
+	 * BIT 17 = Enable multiple FCFs
+	 * BIT 18-20 = MAC addressing mode
+	 * BIT 21-25 = Ethernet data rate
+	 * BIT 26 = Enable ethernet header rx IOCB for ATIO q
+	 * BIT 27 = Enable ethernet header rx IOCB for response q
+	 * BIT 28 = SPMA selection bit 0
+	 * BIT 28 = SPMA selection bit 1
+	 * BIT 30-31 = Reserved
+	 */
+	uint32_t firmware_options_3;
+
+	uint8_t  reserved_5[8];
+
+	uint8_t enode_mac[6];
+
+	uint8_t reserved_6[10];
+};
+
+struct mid_init_cb_81xx {
+	struct init_cb_81xx init_cb;
+
+	uint16_t count;
+	uint16_t options;
+
+	struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
+};
+
+#define FARX_ACCESS_FLASH_CONF_81XX	0x7FFD0000
+#define FARX_ACCESS_FLASH_DATA_81XX	0x7F800000
+
+/* 81XX Flash locations -- occupies second 2MB region. */
+#define FA_BOOT_CODE_ADDR_81	0x80000
+#define FA_RISC_CODE_ADDR_81	0xA0000
+#define FA_FW_AREA_ADDR_81	0xC0000
+#define FA_VPD_NVRAM_ADDR_81	0xD0000
+#define FA_FEATURE_ADDR_81	0xD4000
+#define FA_FLASH_DESCR_ADDR_81	0xD8000
+#define FA_FLASH_LAYOUT_ADDR_81	0xD8400
+#define FA_HW_EVENT0_ADDR_81	0xDC000
+#define FA_HW_EVENT1_ADDR_81	0xDC400
+#define FA_NPIV_CONF0_ADDR_81	0xD1000
+#define FA_NPIV_CONF1_ADDR_81	0xD2000
+
 #endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0011e31..ba49133 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -28,8 +28,10 @@
 extern void qla24xx_reset_adapter(struct scsi_qla_host *);
 extern int qla2x00_nvram_config(struct scsi_qla_host *);
 extern int qla24xx_nvram_config(struct scsi_qla_host *);
+extern int qla81xx_nvram_config(struct scsi_qla_host *);
 extern void qla2x00_update_fw_options(struct scsi_qla_host *);
 extern void qla24xx_update_fw_options(scsi_qla_host_t *);
+extern void qla81xx_update_fw_options(scsi_qla_host_t *);
 extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
 extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
 
@@ -69,8 +71,6 @@
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
 extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
     fc_host_event_code, u32);
-extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
-    uint16_t, uint16_t);
 
 extern void qla2x00_abort_fcport_cmds(fc_port_t *);
 extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
@@ -143,7 +143,7 @@
 
 extern void
 qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *,
-    uint16_t *, uint16_t *, uint16_t *, uint32_t *);
+    uint16_t *, uint16_t *, uint16_t *, uint32_t *, uint8_t *, uint32_t *);
 
 extern int
 qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *);
@@ -317,9 +317,6 @@
 extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
 extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
 
-extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
-    uint16_t, uint16_t);
-
 extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
 extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
 
@@ -332,6 +329,7 @@
 extern void qla2300_fw_dump(scsi_qla_host_t *, int);
 extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
 extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
 extern void qla2x00_dump_regs(scsi_qla_host_t *);
 extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
 
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 0a6f7297..557f58d 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1535,7 +1535,10 @@
 	eiter = (struct ct_fdmi_port_attr *) (entries + size);
 	eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
 	eiter->len = __constant_cpu_to_be16(4 + 4);
-	if (IS_QLA25XX(ha))
+	if (IS_QLA81XX(ha))
+		eiter->a.sup_speed = __constant_cpu_to_be32(
+		    FDMI_PORT_SPEED_10GB);
+	else if (IS_QLA25XX(ha))
 		eiter->a.sup_speed = __constant_cpu_to_be32(
 		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
 		    FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
@@ -1575,6 +1578,10 @@
 		eiter->a.cur_speed =
 		    __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
 		break;
+	case PORT_SPEED_10GB:
+		eiter->a.cur_speed =
+		    __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
+		break;
 	default:
 		eiter->a.cur_speed =
 		    __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 52ed56e..9ad4d09 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -552,10 +552,6 @@
 		d2 = RD_REG_DWORD(&reg->ctrl_status);
 		barrier();
 	}
-	if (cnt == 0 || hw_evt)
-		qla2xxx_hw_event_log(vha, HW_EVENT_RESET_ERR,
-		    RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2),
-		    RD_REG_WORD(&reg->mailbox3));
 
 	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
 	RD_REG_DWORD(&reg->hccr);
@@ -574,6 +570,9 @@
 	}
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (IS_NOPOLLING_TYPE(ha))
+		ha->isp_ops->enable_intrs(ha);
 }
 
 /**
@@ -779,16 +778,19 @@
 		mem_size = (ha->fw_memory_size - 0x11000 + 1) *
 		    sizeof(uint16_t);
 	} else if (IS_FWI2_CAPABLE(ha)) {
-		fixed_size = IS_QLA25XX(ha) ?
-			offsetof(struct qla25xx_fw_dump, ext_mem) :
-			offsetof(struct qla24xx_fw_dump, ext_mem);
+		if (IS_QLA81XX(ha))
+			fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
+		else if (IS_QLA25XX(ha))
+			fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
+		else
+			fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
 		mem_size = (ha->fw_memory_size - 0x100000 + 1) *
 		    sizeof(uint32_t);
 		if (ha->mqenable)
 			mq_size = sizeof(struct qla2xxx_mq_chain);
 
 		/* Allocate memory for Fibre Channel Event Buffer. */
-		if (!IS_QLA25XX(ha))
+		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
 			goto try_eft;
 
 		tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
@@ -851,7 +853,9 @@
 
 	dump_size = offsetof(struct qla2xxx_fw_dump, isp);
 	dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
-	    mq_size + eft_size + fce_size;
+	    eft_size;
+	ha->chain_offset = dump_size;
+	dump_size += mq_size + fce_size;
 
 	ha->fw_dump = vmalloc(dump_size);
 	if (!ha->fw_dump) {
@@ -987,7 +991,8 @@
 				    &ha->fw_major_version,
 				    &ha->fw_minor_version,
 				    &ha->fw_subminor_version,
-				    &ha->fw_attributes, &ha->fw_memory_size);
+				    &ha->fw_attributes, &ha->fw_memory_size,
+				    ha->mpi_version, &ha->mpi_capabilities);
 				ha->flags.npiv_supported = 0;
 				if (IS_QLA2XXX_MIDTYPE(ha) &&
 					 (ha->fw_attributes & BIT_2)) {
@@ -1253,35 +1258,48 @@
 {
 	int	rval;
 	unsigned long flags = 0;
-	int cnt;
+	int cnt, que;
 	struct qla_hw_data *ha = vha->hw;
-	struct req_que *req = ha->req_q_map[0];
-	struct rsp_que *rsp = ha->rsp_q_map[0];
+	struct req_que *req;
+	struct rsp_que *rsp;
+	struct scsi_qla_host *vp;
 	struct mid_init_cb_24xx *mid_init_cb =
 	    (struct mid_init_cb_24xx *) ha->init_cb;
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
 	/* Clear outstanding commands array. */
-	for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
-		req->outstanding_cmds[cnt] = NULL;
+	for (que = 0; que < ha->max_queues; que++) {
+		req = ha->req_q_map[que];
+		if (!req)
+			continue;
+		for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+			req->outstanding_cmds[cnt] = NULL;
 
-	req->current_outstanding_cmd = 0;
+		req->current_outstanding_cmd = 0;
+
+		/* Initialize firmware. */
+		req->ring_ptr  = req->ring;
+		req->ring_index    = 0;
+		req->cnt      = req->length;
+	}
+
+	for (que = 0; que < ha->max_queues; que++) {
+		rsp = ha->rsp_q_map[que];
+		if (!rsp)
+			continue;
+		rsp->ring_ptr = rsp->ring;
+		rsp->ring_index    = 0;
+
+		/* Initialize response queue entries */
+		qla2x00_init_response_q_entries(rsp);
+	}
 
 	/* Clear RSCN queue. */
-	vha->rscn_in_ptr = 0;
-	vha->rscn_out_ptr = 0;
-
-	/* Initialize firmware. */
-	req->ring_ptr  = req->ring;
-	req->ring_index    = 0;
-	req->cnt      = req->length;
-	rsp->ring_ptr = rsp->ring;
-	rsp->ring_index    = 0;
-
-	/* Initialize response queue entries */
-	qla2x00_init_response_q_entries(rsp);
-
+	list_for_each_entry(vp, &ha->vp_list, list) {
+		vp->rscn_in_ptr = 0;
+		vp->rscn_out_ptr = 0;
+	}
 	ha->isp_ops->config_rings(vha);
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1665,10 +1683,6 @@
 		qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
 		    "invalid -- WWPN) defaults.\n");
 
-		if (chksum)
-			qla2xxx_hw_event_log(vha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
-			    MSW(chksum), LSW(chksum));
-
 		/*
 		 * Set default initialization control block.
 		 */
@@ -3211,8 +3225,8 @@
 	int rval = QLA_SUCCESS;
 	uint32_t wait_time;
 	struct qla_hw_data *ha = vha->hw;
-	struct req_que *req = ha->req_q_map[0];
-	struct rsp_que *rsp = ha->rsp_q_map[0];
+	struct req_que *req = ha->req_q_map[vha->req_ques[0]];
+	struct rsp_que *rsp = req->rsp;
 
 	atomic_set(&vha->loop_state, LOOP_UPDATE);
 	clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -3491,6 +3505,7 @@
 		}
 		req = ha->req_q_map[i];
 		if (req) {
+		/* Clear outstanding commands array. */
 			req->options &= ~BIT_0;
 			ret = qla25xx_init_req_que(base_vha, req, req->options);
 			if (ret != QLA_SUCCESS)
@@ -3499,7 +3514,7 @@
 						req->id));
 			else
 				DEBUG2_17(printk(KERN_WARNING
-					"%s Rsp que:%d inited\n", __func__,
+					"%s Req que:%d inited\n", __func__,
 						req->id));
 		}
 	}
@@ -4150,8 +4165,8 @@
 	uint16_t mb[MAILBOX_REGISTER_COUNT];
 	struct qla_hw_data *ha = vha->hw;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
-	struct req_que *req = ha->req_q_map[0];
-	struct rsp_que *rsp = ha->rsp_q_map[0];
+	struct req_que *req = ha->req_q_map[vha->req_ques[0]];
+	struct rsp_que *rsp = req->rsp;
 
 	if (!vha->vp_idx)
 		return -EINVAL;
@@ -4255,3 +4270,269 @@
 	return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
 	    QLA_SUCCESS;
 }
+
+/* 81XX Support **************************************************************/
+
+int
+qla81xx_nvram_config(scsi_qla_host_t *vha)
+{
+	int   rval;
+	struct init_cb_81xx *icb;
+	struct nvram_81xx *nv;
+	uint32_t *dptr;
+	uint8_t  *dptr1, *dptr2;
+	uint32_t chksum;
+	uint16_t cnt;
+	struct qla_hw_data *ha = vha->hw;
+
+	rval = QLA_SUCCESS;
+	icb = (struct init_cb_81xx *)ha->init_cb;
+	nv = ha->nvram;
+
+	/* Determine NVRAM starting address. */
+	ha->nvram_size = sizeof(struct nvram_81xx);
+	ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
+	ha->vpd_size = FA_NVRAM_VPD_SIZE;
+	ha->vpd_base = FA_NVRAM_VPD0_ADDR;
+	if (PCI_FUNC(ha->pdev->devfn) & 1) {
+		ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
+		ha->vpd_base = FA_NVRAM_VPD1_ADDR;
+	}
+
+	/* Get VPD data into cache */
+	ha->vpd = ha->nvram + VPD_OFFSET;
+	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
+	    ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
+
+	/* Get NVRAM data into cache and calculate checksum. */
+	dptr = (uint32_t *)nv;
+	ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
+	    ha->nvram_size);
+	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
+		chksum += le32_to_cpu(*dptr++);
+
+	DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no));
+	DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
+
+	/* Bad NVRAM data, set defaults parameters. */
+	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
+	    || nv->id[3] != ' ' ||
+	    nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
+		/* Reset NVRAM data. */
+		qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
+		    "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
+		    le16_to_cpu(nv->nvram_version));
+		qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
+		    "invalid -- WWPN) defaults.\n");
+
+		/*
+		 * Set default initialization control block.
+		 */
+		memset(nv, 0, ha->nvram_size);
+		nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
+		nv->version = __constant_cpu_to_le16(ICB_VERSION);
+		nv->frame_payload_size = __constant_cpu_to_le16(2048);
+		nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+		nv->exchange_count = __constant_cpu_to_le16(0);
+		nv->port_name[0] = 0x21;
+		nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
+		nv->port_name[2] = 0x00;
+		nv->port_name[3] = 0xe0;
+		nv->port_name[4] = 0x8b;
+		nv->port_name[5] = 0x1c;
+		nv->port_name[6] = 0x55;
+		nv->port_name[7] = 0x86;
+		nv->node_name[0] = 0x20;
+		nv->node_name[1] = 0x00;
+		nv->node_name[2] = 0x00;
+		nv->node_name[3] = 0xe0;
+		nv->node_name[4] = 0x8b;
+		nv->node_name[5] = 0x1c;
+		nv->node_name[6] = 0x55;
+		nv->node_name[7] = 0x86;
+		nv->login_retry_count = __constant_cpu_to_le16(8);
+		nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
+		nv->login_timeout = __constant_cpu_to_le16(0);
+		nv->firmware_options_1 =
+		    __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
+		nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
+		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+		nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
+		nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
+		nv->efi_parameters = __constant_cpu_to_le32(0);
+		nv->reset_delay = 5;
+		nv->max_luns_per_target = __constant_cpu_to_le16(128);
+		nv->port_down_retry_count = __constant_cpu_to_le16(30);
+		nv->link_down_timeout = __constant_cpu_to_le16(30);
+		nv->enode_mac[0] = 0x01;
+		nv->enode_mac[1] = 0x02;
+		nv->enode_mac[2] = 0x03;
+		nv->enode_mac[3] = 0x04;
+		nv->enode_mac[4] = 0x05;
+		nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
+
+		rval = 1;
+	}
+
+	/* Reset Initialization control block */
+	memset(icb, 0, sizeof(struct init_cb_81xx));
+
+	/* Copy 1st segment. */
+	dptr1 = (uint8_t *)icb;
+	dptr2 = (uint8_t *)&nv->version;
+	cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
+	while (cnt--)
+		*dptr1++ = *dptr2++;
+
+	icb->login_retry_count = nv->login_retry_count;
+
+	/* Copy 2nd segment. */
+	dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
+	dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
+	cnt = (uint8_t *)&icb->reserved_5 -
+	    (uint8_t *)&icb->interrupt_delay_timer;
+	while (cnt--)
+		*dptr1++ = *dptr2++;
+
+	memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
+	/* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
+	if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
+		icb->enode_mac[0] = 0x01;
+		icb->enode_mac[1] = 0x02;
+		icb->enode_mac[2] = 0x03;
+		icb->enode_mac[3] = 0x04;
+		icb->enode_mac[4] = 0x05;
+		icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
+	}
+
+	/*
+	 * Setup driver NVRAM options.
+	 */
+	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
+	    "QLE81XX");
+
+	/* Use alternate WWN? */
+	if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
+		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
+	}
+
+	/* Prepare nodename */
+	if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
+		/*
+		 * Firmware will apply the following mask if the nodename was
+		 * not provided.
+		 */
+		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
+		icb->node_name[0] &= 0xF0;
+	}
+
+	/* Set host adapter parameters. */
+	ha->flags.disable_risc_code_load = 0;
+	ha->flags.enable_lip_reset = 0;
+	ha->flags.enable_lip_full_login =
+	    le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
+	ha->flags.enable_target_reset =
+	    le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
+	ha->flags.enable_led_scheme = 0;
+	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
+
+	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
+	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
+
+	/* save HBA serial number */
+	ha->serial0 = icb->port_name[5];
+	ha->serial1 = icb->port_name[6];
+	ha->serial2 = icb->port_name[7];
+	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
+	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
+
+	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+
+	ha->retry_count = le16_to_cpu(nv->login_retry_count);
+
+	/* Set minimum login_timeout to 4 seconds. */
+	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
+		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
+	if (le16_to_cpu(nv->login_timeout) < 4)
+		nv->login_timeout = __constant_cpu_to_le16(4);
+	ha->login_timeout = le16_to_cpu(nv->login_timeout);
+	icb->login_timeout = nv->login_timeout;
+
+	/* Set minimum RATOV to 100 tenths of a second. */
+	ha->r_a_tov = 100;
+
+	ha->loop_reset_delay = nv->reset_delay;
+
+	/* Link Down Timeout = 0:
+	 *
+	 * 	When Port Down timer expires we will start returning
+	 *	I/O's to OS with "DID_NO_CONNECT".
+	 *
+	 * Link Down Timeout != 0:
+	 *
+	 *	 The driver waits for the link to come up after link down
+	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
+	 */
+	if (le16_to_cpu(nv->link_down_timeout) == 0) {
+		ha->loop_down_abort_time =
+		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
+	} else {
+		ha->link_down_timeout =	le16_to_cpu(nv->link_down_timeout);
+		ha->loop_down_abort_time =
+		    (LOOP_DOWN_TIME - ha->link_down_timeout);
+	}
+
+	/* Need enough time to try and get the port back. */
+	ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
+	if (qlport_down_retry)
+		ha->port_down_retry_count = qlport_down_retry;
+
+	/* Set login_retry_count */
+	ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
+	if (ha->port_down_retry_count ==
+	    le16_to_cpu(nv->port_down_retry_count) &&
+	    ha->port_down_retry_count > 3)
+		ha->login_retry_count = ha->port_down_retry_count;
+	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
+		ha->login_retry_count = ha->port_down_retry_count;
+	if (ql2xloginretrycount)
+		ha->login_retry_count = ql2xloginretrycount;
+
+	/* Enable ZIO. */
+	if (!vha->flags.init_done) {
+		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
+		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
+		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
+		    le16_to_cpu(icb->interrupt_delay_timer): 2;
+	}
+	icb->firmware_options_2 &= __constant_cpu_to_le32(
+	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
+	vha->flags.process_response_queue = 0;
+	if (ha->zio_mode != QLA_ZIO_DISABLED) {
+		ha->zio_mode = QLA_ZIO_MODE_6;
+
+		DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
+		    "(%d us).\n", vha->host_no, ha->zio_mode,
+		    ha->zio_timer * 100));
+		qla_printk(KERN_INFO, ha,
+		    "ZIO mode %d enabled; timer delay (%d us).\n",
+		    ha->zio_mode, ha->zio_timer * 100);
+
+		icb->firmware_options_2 |= cpu_to_le32(
+		    (uint32_t)ha->zio_mode);
+		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
+		vha->flags.process_response_queue = 1;
+	}
+
+	if (rval) {
+		DEBUG2_3(printk(KERN_WARNING
+		    "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
+	}
+	return (rval);
+}
+
+void
+qla81xx_update_fw_options(scsi_qla_host_t *ha)
+{
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 5bedc9d..2258152 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -173,7 +173,7 @@
 		return;
 	}
 
-	vha = sp->vha;
+	vha = sp->fcport->vha;
 	req = sp->que;
 
 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
@@ -234,7 +234,7 @@
 		return;
 	}
 
-	vha = sp->vha;
+	vha = sp->fcport->vha;
 	req = sp->que;
 
 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
@@ -294,7 +294,7 @@
 
 	/* Setup device pointers. */
 	ret = 0;
-	vha = sp->vha;
+	vha = sp->fcport->vha;
 	ha = vha->hw;
 	reg = &ha->iobase->isp;
 	cmd = sp->cmd;
@@ -353,7 +353,6 @@
 	/* Build command packet */
 	req->current_outstanding_cmd = handle;
 	req->outstanding_cmds[handle] = sp;
-	sp->vha = vha;
 	sp->que = req;
 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 	req->cnt -= req_cnt;
@@ -656,7 +655,7 @@
 		return;
 	}
 
-	vha = sp->vha;
+	vha = sp->fcport->vha;
 	req = sp->que;
 
 	/* Set transfer direction */
@@ -723,7 +722,7 @@
 	struct req_que *req = NULL;
 	struct rsp_que *rsp = NULL;
 	struct scsi_cmnd *cmd = sp->cmd;
-	struct scsi_qla_host *vha = sp->vha;
+	struct scsi_qla_host *vha = sp->fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
 	uint16_t que_id;
 
@@ -791,7 +790,6 @@
 	/* Build command packet. */
 	req->current_outstanding_cmd = handle;
 	req->outstanding_cmds[handle] = sp;
-	sp->vha = vha;
 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 	req->cnt -= req_cnt;
 
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d5fb79a..789fc57 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -275,7 +275,7 @@
 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 {
 #define LS_UNKNOWN	2
-	static char	*link_speeds[5] = { "1", "2", "?", "4", "8" };
+	static char	*link_speeds[] = { "1", "2", "?", "4", "8", "10" };
 	char		*link_speed;
 	uint16_t	handle_cnt;
 	uint16_t	cnt;
@@ -288,6 +288,8 @@
 
 	/* Setup to process RIO completion. */
 	handle_cnt = 0;
+	if (IS_QLA81XX(ha))
+		goto skip_rio;
 	switch (mb[0]) {
 	case MBA_SCSI_COMPLETION:
 		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
@@ -339,7 +341,7 @@
 	default:
 		break;
 	}
-
+skip_rio:
 	switch (mb[0]) {
 	case MBA_SCSI_COMPLETION:	/* Fast Post */
 		if (!vha->flags.online)
@@ -362,7 +364,6 @@
 		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
 		    mb[1], mb[2], mb[3]);
 
-		qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
 		ha->isp_ops->fw_dump(vha, 1);
 
 		if (IS_FWI2_CAPABLE(ha)) {
@@ -387,7 +388,6 @@
 		    vha->host_no));
 		qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
 
-		qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		break;
 
@@ -396,7 +396,6 @@
 		    vha->host_no));
 		qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
 
-		qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
 		break;
 
@@ -436,6 +435,8 @@
 			link_speed = link_speeds[LS_UNKNOWN];
 			if (mb[1] < 5)
 				link_speed = link_speeds[mb[1]];
+			else if (mb[1] == 0x13)
+				link_speed = link_speeds[5];
 			ha->link_data_rate = mb[1];
 		}
 
@@ -495,12 +496,17 @@
 		qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
 		break;
 
+	/* case MBA_DCBX_COMPLETE: */
 	case MBA_POINT_TO_POINT:	/* Point-to-Point */
 		if (IS_QLA2100(ha))
 			break;
 
-		DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
-		    vha->host_no));
+		if (IS_QLA81XX(ha))
+			DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
+			    "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
+		else
+			DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
+			    "received.\n", vha->host_no));
 
 		/*
 		 * Until there's a transition from loop down to loop up, treat
@@ -641,10 +647,7 @@
 
 	/* case MBA_RIO_RESPONSE: */
 	case MBA_ZIO_RESPONSE:
-		DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
-		    vha->host_no));
-		DEBUG(printk(KERN_INFO
-		    "scsi(%ld): [R|Z]IO update completion.\n",
+		DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
 		    vha->host_no));
 
 		if (IS_FWI2_CAPABLE(ha))
@@ -698,6 +701,35 @@
 		}
 		spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
 		break;
+	case MBA_DCBX_START:
+		DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
+		    vha->host_no, mb[1], mb[2], mb[3]));
+		break;
+	case MBA_DCBX_PARAM_UPDATE:
+		DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
+		    "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
+		break;
+	case MBA_FCF_CONF_ERR:
+		DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
+		    "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
+		break;
+	case MBA_IDC_COMPLETE:
+		DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
+		    "Complete -- %04x %04x %04x\n", vha->host_no, mb[1], mb[2],
+		    mb[3]));
+		break;
+	case MBA_IDC_NOTIFY:
+		DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
+		    "Request Notification -- %04x %04x %04x\n", vha->host_no,
+		    mb[1], mb[2], mb[3]));
+		/**** Mailbox registers 4 - 7 valid!!! */
+		break;
+	case MBA_IDC_TIME_EXT:
+		DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
+		    "Time Extension -- %04x %04x %04x\n", vha->host_no, mb[1],
+		    mb[2], mb[3]));
+		/**** Mailbox registers 4 - 7 valid!!! */
+		break;
 	}
 
 	if (!vha->vp_idx && ha->num_vhosts)
@@ -1510,7 +1542,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
-	if (!IS_QLA25XX(ha))
+	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
 		return;
 
 	rval = QLA_SUCCESS;
@@ -1590,12 +1622,6 @@
 			if (pci_channel_offline(ha->pdev))
 				break;
 
-			if (ha->hw_event_pause_errors == 0)
-				qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
-				    0, MSW(stat), LSW(stat));
-			else if (ha->hw_event_pause_errors < 0xffffffff)
-				ha->hw_event_pause_errors++;
-
 			hccr = RD_REG_DWORD(&reg->hccr);
 
 			qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
@@ -1740,12 +1766,6 @@
 			if (pci_channel_offline(ha->pdev))
 				break;
 
-			if (ha->hw_event_pause_errors == 0)
-				qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
-				    0, MSW(stat), LSW(stat));
-			else if (ha->hw_event_pause_errors < 0xffffffff)
-				ha->hw_event_pause_errors++;
-
 			hccr = RD_REG_DWORD(&reg->hccr);
 
 			qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
@@ -1944,7 +1964,8 @@
 	device_reg_t __iomem *reg = ha->iobase;
 
 	/* If possible, enable MSI-X. */
-	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
+	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
+	    !IS_QLA8432(ha) && !IS_QLA8001(ha))
 		goto skip_msix;
 
 	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
@@ -1979,7 +2000,8 @@
 	    "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
 skip_msix:
 
-	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
+	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
+	    !IS_QLA8001(ha))
 		goto skip_msi;
 
 	ret = pci_enable_msi(ha->pdev);
@@ -2000,6 +2022,12 @@
 	ha->flags.inta_enabled = 1;
 clear_risc_ints:
 
+	/*
+	 * FIXME: Noted that 8014s were being dropped during NK testing.
+	 * Timing deltas during MSI-X/INTa transitions?
+	 */
+	if (IS_QLA81XX(ha))
+		goto fail;
 	spin_lock_irq(&ha->hardware_lock);
 	if (IS_FWI2_CAPABLE(ha)) {
 		WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
@@ -2044,7 +2072,7 @@
 		if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
 			sp = req->outstanding_cmds[pkt->handle];
 			if (sp)
-				vha = sp->vha;
+				vha = sp->fcport->vha;
 		}
 	}
 	if (!vha)
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index a99976f..db4df45 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -123,8 +123,7 @@
 
 	/* Wait for mbx cmd completion until timeout */
 
-	if (!abort_active && io_lock_on) {
-
+	if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
 		set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
 
 		if (IS_FWI2_CAPABLE(ha))
@@ -218,7 +217,7 @@
 	/* Clean up */
 	ha->mcp = NULL;
 
-	if (abort_active || !io_lock_on) {
+	if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
 		DEBUG11(printk("%s(%ld): checking for additional resp "
 		    "interrupt.\n", __func__, base_vha->host_no));
 
@@ -412,7 +411,8 @@
  */
 void
 qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
-    uint16_t *subminor, uint16_t *attributes, uint32_t *memory)
+    uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
+    uint32_t *mpi_caps)
 {
 	int		rval;
 	mbx_cmd_t	mc;
@@ -423,6 +423,8 @@
 	mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
 	mcp->out_mb = MBX_0;
 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	if (IS_QLA81XX(vha->hw))
+		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
 	mcp->flags = 0;
 	mcp->tov = MBX_TOV_SECONDS;
 	rval = qla2x00_mailbox_command(vha, mcp);
@@ -436,6 +438,13 @@
 		*memory = 0x1FFFF;			/* Defaults to 128KB. */
 	else
 		*memory = (mcp->mb[5] << 16) | mcp->mb[4];
+	if (IS_QLA81XX(vha->hw)) {
+		mpi[0] = mcp->mb[10] >> 8;
+		mpi[1] = mcp->mb[10] & 0xff;
+		mpi[2] = mcp->mb[11] >> 8;
+		mpi[3] = mcp->mb[11] & 0xff;
+		*mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13];
+	}
 
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
@@ -568,7 +577,6 @@
 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
 {
 	int rval;
-	struct qla_hw_data *ha = vha->hw;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
@@ -595,14 +603,6 @@
 		if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
 		    mcp->mb[7] != 0x2525)
 			rval = QLA_FUNCTION_FAILED;
-		if (rval == QLA_FUNCTION_FAILED) {
-			struct device_reg_24xx __iomem *reg =
-			    &ha->iobase->isp24;
-
-			qla2xxx_hw_event_log(vha, HW_EVENT_ISP_ERR, 0,
-			    LSW(RD_REG_DWORD(&reg->hccr)),
-			    LSW(RD_REG_DWORD(&reg->istatus)));
-		}
 	}
 
 	if (rval != QLA_SUCCESS) {
@@ -1363,7 +1363,13 @@
 
 	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
-	if (IS_FWI2_CAPABLE(vha->hw)) {
+	if (IS_QLA81XX(vha->hw)) {
+		/* Logout across all FCFs. */
+		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
+		mcp->mb[1] = BIT_1;
+		mcp->mb[2] = 0;
+		mcp->out_mb = MBX_2|MBX_1|MBX_0;
+	} else if (IS_FWI2_CAPABLE(vha->hw)) {
 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
 		mcp->mb[1] = BIT_6;
 		mcp->mb[2] = 0;
@@ -1853,6 +1859,9 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
+	if (IS_QLA81XX(vha->hw))
+	    return QLA_SUCCESS;
+
 	DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
 	    vha->host_no));
 
@@ -2512,7 +2521,7 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_QLA25XX(vha->hw))
+	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3155,7 +3164,7 @@
 	mcp->mb[7] = LSW(MSD(rsp->dma));
 	mcp->mb[5] = rsp->length;
 	mcp->mb[11] = rsp->vp_idx;
-	mcp->mb[14] = rsp->msix->vector;
+	mcp->mb[14] = rsp->msix->entry;
 	mcp->mb[13] = rsp->rid;
 
 	reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 386ffea..f53179c4 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -614,8 +614,10 @@
 	req->vp_idx = vp_idx;
 	req->qos = qos;
 
-	if (ha->rsp_q_map[rsp_que])
+	if (ha->rsp_q_map[rsp_que]) {
 		req->rsp = ha->rsp_q_map[rsp_que];
+		req->rsp->req = req;
+	}
 	/* Use alternate PCI bus number */
 	if (MSB(req->rid))
 		options |= BIT_4;
@@ -627,6 +629,7 @@
 	req->ring_index = 0;
 	req->cnt = req->length;
 	req->id = que_id;
+	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
 	mutex_unlock(&ha->vport_lock);
 
 	ret = qla25xx_init_req_que(base_vha, req, options);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8ea9277..cf32653 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -404,26 +404,9 @@
 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
 {
 	struct qla_hw_data *ha = vha->hw;
-	sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
-	    ha->fw_minor_version,
-	    ha->fw_subminor_version);
 
-	if (ha->fw_attributes & BIT_0)
-		strcat(str, "[Class 2] ");
-	if (ha->fw_attributes & BIT_1)
-		strcat(str, "[IP] ");
-	if (ha->fw_attributes & BIT_2)
-		strcat(str, "[Multi-ID] ");
-	if (ha->fw_attributes & BIT_3)
-		strcat(str, "[SB-2] ");
-	if (ha->fw_attributes & BIT_4)
-		strcat(str, "[T10 CRC] ");
-	if (ha->fw_attributes & BIT_5)
-		strcat(str, "[VI] ");
-	if (ha->fw_attributes & BIT_10)
-		strcat(str, "[84XX] ");
-	if (ha->fw_attributes & BIT_13)
-		strcat(str, "[Experimental]");
+	sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
+	    ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
 	return str;
 }
 
@@ -438,7 +421,6 @@
 	if (!sp)
 		return sp;
 
-	sp->vha = vha;
 	sp->fcport = fcport;
 	sp->cmd = cmd;
 	sp->que = ha->req_q_map[0];
@@ -1176,13 +1158,13 @@
 	struct req_que *req;
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
-	for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
-		req = ha->req_q_map[vha->req_ques[que]];
+	for (que = 0; que < ha->max_queues; que++) {
+		req = ha->req_q_map[que];
 		if (!req)
 			continue;
 		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
 			sp = req->outstanding_cmds[cnt];
-			if (sp && sp->vha == vha) {
+			if (sp && sp->fcport->vha == vha) {
 				req->outstanding_cmds[cnt] = NULL;
 				sp->cmd->result = res;
 				qla2x00_sp_compl(ha, sp);
@@ -1211,7 +1193,7 @@
 	scsi_qla_host_t *vha = shost_priv(sdev->host);
 	struct qla_hw_data *ha = vha->hw;
 	struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
-	struct req_que *req = ha->req_q_map[0];
+	struct req_que *req = ha->req_q_map[vha->req_ques[0]];
 
 	if (sdev->tagged_supported)
 		scsi_activate_tcq(sdev, req->max_q_depth);
@@ -1329,6 +1311,8 @@
 	unsigned long flags = 0;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
+	if (IS_NOPOLLING_TYPE(ha))
+		return;
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	ha->interrupts_on = 0;
 	WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1488,6 +1472,44 @@
 	.rd_req_reg		= qla24xx_rd_req_reg,
 };
 
+static struct isp_operations qla81xx_isp_ops = {
+	.pci_config		= qla25xx_pci_config,
+	.reset_chip		= qla24xx_reset_chip,
+	.chip_diag		= qla24xx_chip_diag,
+	.config_rings		= qla24xx_config_rings,
+	.reset_adapter		= qla24xx_reset_adapter,
+	.nvram_config		= qla81xx_nvram_config,
+	.update_fw_options	= qla81xx_update_fw_options,
+	.load_risc		= qla24xx_load_risc,
+	.pci_info_str		= qla24xx_pci_info_str,
+	.fw_version_str		= qla24xx_fw_version_str,
+	.intr_handler		= qla24xx_intr_handler,
+	.enable_intrs		= qla24xx_enable_intrs,
+	.disable_intrs		= qla24xx_disable_intrs,
+	.abort_command		= qla24xx_abort_command,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
+	.fabric_login		= qla24xx_login_fabric,
+	.fabric_logout		= qla24xx_fabric_logout,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= qla25xx_read_nvram_data,
+	.write_nvram		= qla25xx_write_nvram_data,
+	.fw_dump		= qla81xx_fw_dump,
+	.beacon_on		= qla24xx_beacon_on,
+	.beacon_off		= qla24xx_beacon_off,
+	.beacon_blink		= qla24xx_beacon_blink,
+	.read_optrom		= qla25xx_read_optrom_data,
+	.write_optrom		= qla24xx_write_optrom_data,
+	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qla24xx_start_scsi,
+	.wrt_req_reg		= qla24xx_wrt_req_reg,
+	.wrt_rsp_reg		= qla24xx_wrt_rsp_reg,
+	.rd_req_reg		= qla24xx_rd_req_reg,
+};
+
 static inline void
 qla2x00_set_isp_flags(struct qla_hw_data *ha)
 {
@@ -1567,6 +1589,13 @@
 		ha->device_type |= DT_IIDMA;
 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
 		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP8001:
+		ha->device_type |= DT_ISP8001;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
 	}
 }
 
@@ -1629,7 +1658,7 @@
 
 	/* Determine queue resources */
 	ha->max_queues = 1;
-	if (ql2xmaxqueues <= 1 || !IS_QLA25XX(ha))
+	if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
 		goto mqiobase_exit;
 	ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
 			pci_resource_len(ha->pdev, 3));
@@ -1706,7 +1735,8 @@
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
-	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) {
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) {
 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 		sht = &qla24xx_driver_template;
 		mem_only = 1;
@@ -1760,6 +1790,10 @@
 		rsp_length = RESPONSE_ENTRY_CNT_2100;
 		ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
 		ha->gid_list_info_size = 4;
+		ha->flash_conf_off = ~0;
+		ha->flash_data_off = ~0;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
 		ha->isp_ops = &qla2100_isp_ops;
 	} else if (IS_QLA2200(ha)) {
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1767,6 +1801,10 @@
 		rsp_length = RESPONSE_ENTRY_CNT_2100;
 		ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
 		ha->gid_list_info_size = 4;
+		ha->flash_conf_off = ~0;
+		ha->flash_data_off = ~0;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
 		ha->isp_ops = &qla2100_isp_ops;
 	} else if (IS_QLA23XX(ha)) {
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1776,6 +1814,10 @@
 		ha->gid_list_info_size = 6;
 		if (IS_QLA2322(ha) || IS_QLA6322(ha))
 			ha->optrom_size = OPTROM_SIZE_2322;
+		ha->flash_conf_off = ~0;
+		ha->flash_data_off = ~0;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
 		ha->isp_ops = &qla2300_isp_ops;
 	} else if (IS_QLA24XX_TYPE(ha)) {
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1787,6 +1829,10 @@
 		ha->optrom_size = OPTROM_SIZE_24XX;
 		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
 		ha->isp_ops = &qla24xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
 	} else if (IS_QLA25XX(ha)) {
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
 		req_length = REQUEST_ENTRY_CNT_24XX;
@@ -1797,6 +1843,23 @@
 		ha->optrom_size = OPTROM_SIZE_25XX;
 		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
 		ha->isp_ops = &qla25xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+	} else if (IS_QLA81XX(ha)) {
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_24XX;
+		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+		ha->gid_list_info_size = 8;
+		ha->optrom_size = OPTROM_SIZE_81XX;
+		ha->isp_ops = &qla81xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
 	}
 
 	mutex_init(&ha->vport_lock);
@@ -1935,7 +1998,6 @@
 	return 0;
 
 probe_failed:
-	qla2x00_free_que(ha, req, rsp);
 	qla2x00_free_device(base_vha);
 
 	scsi_host_put(base_vha->host);
@@ -2458,23 +2520,6 @@
 	return qla2x00_post_work(vha, e, 1);
 }
 
-int
-qla2x00_post_hwe_work(struct scsi_qla_host *vha, uint16_t code, uint16_t d1,
-    uint16_t d2, uint16_t d3)
-{
-	struct qla_work_evt *e;
-
-	e = qla2x00_alloc_work(vha, QLA_EVT_HWE_LOG, 1);
-	if (!e)
-		return QLA_FUNCTION_FAILED;
-
-	e->u.hwe.code = code;
-	e->u.hwe.d1 = d1;
-	e->u.hwe.d2 = d2;
-	e->u.hwe.d3 = d3;
-	return qla2x00_post_work(vha, e, 1);
-}
-
 static void
 qla2x00_do_work(struct scsi_qla_host *vha)
 {
@@ -2492,10 +2537,6 @@
 			fc_host_post_event(vha->host, fc_get_event_number(),
 			    e->u.aen.code, e->u.aen.data);
 			break;
-		case QLA_EVT_HWE_LOG:
-			qla2xxx_hw_event_log(vha, e->u.hwe.code, e->u.hwe.d1,
-			    e->u.hwe.d2, e->u.hwe.d3);
-			break;
 		}
 		if (e->flags & QLA_EVT_FLAG_FREE)
 			kfree(e);
@@ -2914,13 +2955,14 @@
 
 /* Firmware interface routines. */
 
-#define FW_BLOBS	6
+#define FW_BLOBS	7
 #define FW_ISP21XX	0
 #define FW_ISP22XX	1
 #define FW_ISP2300	2
 #define FW_ISP2322	3
 #define FW_ISP24XX	4
 #define FW_ISP25XX	5
+#define FW_ISP81XX	6
 
 #define FW_FILE_ISP21XX	"ql2100_fw.bin"
 #define FW_FILE_ISP22XX	"ql2200_fw.bin"
@@ -2928,6 +2970,7 @@
 #define FW_FILE_ISP2322	"ql2322_fw.bin"
 #define FW_FILE_ISP24XX	"ql2400_fw.bin"
 #define FW_FILE_ISP25XX	"ql2500_fw.bin"
+#define FW_FILE_ISP81XX	"ql8100_fw.bin"
 
 static DEFINE_MUTEX(qla_fw_lock);
 
@@ -2938,6 +2981,7 @@
 	{ .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
 	{ .name = FW_FILE_ISP24XX, },
 	{ .name = FW_FILE_ISP25XX, },
+	{ .name = FW_FILE_ISP81XX, },
 };
 
 struct fw_blob *
@@ -2959,6 +3003,8 @@
 		blob = &qla_fw_blobs[FW_ISP24XX];
 	} else if (IS_QLA25XX(ha)) {
 		blob = &qla_fw_blobs[FW_ISP25XX];
+	} else if (IS_QLA81XX(ha)) {
+		blob = &qla_fw_blobs[FW_ISP81XX];
 	}
 
 	mutex_lock(&qla_fw_lock);
@@ -3112,6 +3158,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
 	{ 0 },
 };
 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -3200,3 +3247,4 @@
 MODULE_FIRMWARE(FW_FILE_ISP2322);
 MODULE_FIRMWARE(FW_FILE_ISP24XX);
 MODULE_FIRMWARE(FW_FILE_ISP25XX);
+MODULE_FIRMWARE(FW_FILE_ISP81XX);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index c538ee1..303f8ee 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -425,27 +425,27 @@
 #define OPTROM_BURST_DWORDS	(OPTROM_BURST_SIZE / 4)
 
 static inline uint32_t
-flash_conf_to_access_addr(uint32_t faddr)
+flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
 {
-	return FARX_ACCESS_FLASH_CONF | faddr;
+	return ha->flash_conf_off | faddr;
 }
 
 static inline uint32_t
-flash_data_to_access_addr(uint32_t faddr)
+flash_data_addr(struct qla_hw_data *ha, uint32_t faddr)
 {
-	return FARX_ACCESS_FLASH_DATA | faddr;
+	return ha->flash_data_off | faddr;
 }
 
 static inline uint32_t
-nvram_conf_to_access_addr(uint32_t naddr)
+nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr)
 {
-	return FARX_ACCESS_NVRAM_CONF | naddr;
+	return ha->nvram_conf_off | naddr;
 }
 
 static inline uint32_t
-nvram_data_to_access_addr(uint32_t naddr)
+nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr)
 {
-	return FARX_ACCESS_NVRAM_DATA | naddr;
+	return ha->nvram_data_off | naddr;
 }
 
 static uint32_t
@@ -481,10 +481,12 @@
     uint32_t dwords)
 {
 	uint32_t i;
+	struct qla_hw_data *ha = vha->hw;
+
 	/* Dword reads to flash. */
 	for (i = 0; i < dwords; i++, faddr++)
-		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
-		    flash_data_to_access_addr(faddr)));
+		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+		    flash_data_addr(ha, faddr)));
 
 	return dwptr;
 }
@@ -518,7 +520,7 @@
 {
 	uint32_t ids;
 
-	ids = qla24xx_read_flash_dword(ha, flash_data_to_access_addr(0xd03ab));
+	ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x03ab));
 	*man_id = LSB(ids);
 	*flash_id = MSB(ids);
 
@@ -530,8 +532,7 @@
 		 * Example: ATMEL 0x00 01 45 1F
 		 * Extract MFG and Dev ID from last two bytes.
 		 */
-		ids = qla24xx_read_flash_dword(ha,
-		    flash_data_to_access_addr(0xd009f));
+		ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x009f));
 		*man_id = LSB(ids);
 		*flash_id = MSB(ids);
 	}
@@ -555,9 +556,13 @@
 
 	/* Begin with sane defaults. */
 	loc = locations[0];
-	*start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24:
-	    FA_FLASH_LAYOUT_ADDR;
-
+	*start = 0;
+	if (IS_QLA24XX_TYPE(ha))
+		*start = FA_FLASH_LAYOUT_ADDR_24;
+	else if (IS_QLA25XX(ha))
+		*start = FA_FLASH_LAYOUT_ADDR;
+	else if (IS_QLA81XX(ha))
+		*start = FA_FLASH_LAYOUT_ADDR_81;
 	/* Begin with first PCI expansion ROM header. */
 	buf = (uint8_t *)req->ring;
 	dcode = (uint32_t *)req->ring;
@@ -618,6 +623,22 @@
 qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
 {
 	const char *loc, *locations[] = { "DEF", "FLT" };
+	const uint32_t def_fw[] =
+		{ FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 };
+	const uint32_t def_boot[] =
+		{ FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR_81 };
+	const uint32_t def_vpd_nvram[] =
+		{ FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR_81 };
+	const uint32_t def_fdt[] =
+		{ FA_FLASH_DESCR_ADDR_24, FA_FLASH_DESCR_ADDR,
+			FA_FLASH_DESCR_ADDR_81 };
+	const uint32_t def_npiv_conf0[] =
+		{ FA_NPIV_CONF0_ADDR_24, FA_NPIV_CONF0_ADDR,
+			FA_NPIV_CONF0_ADDR_81 };
+	const uint32_t def_npiv_conf1[] =
+		{ FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR,
+			FA_NPIV_CONF1_ADDR_81 };
+	uint32_t def;
 	uint16_t *wptr;
 	uint16_t cnt, chksum;
 	uint32_t start;
@@ -676,20 +697,12 @@
 		case FLT_REG_FDT:
 			ha->flt_region_fdt = start;
 			break;
-		case FLT_REG_HW_EVENT_0:
-			if (!PCI_FUNC(ha->pdev->devfn))
-				ha->flt_region_hw_event = start;
-			break;
-		case FLT_REG_HW_EVENT_1:
-			if (PCI_FUNC(ha->pdev->devfn))
-				ha->flt_region_hw_event = start;
-			break;
 		case FLT_REG_NPIV_CONF_0:
-			if (!PCI_FUNC(ha->pdev->devfn))
+			if (!(PCI_FUNC(ha->pdev->devfn) & 1))
 				ha->flt_region_npiv_conf = start;
 			break;
 		case FLT_REG_NPIV_CONF_1:
-			if (PCI_FUNC(ha->pdev->devfn))
+			if (PCI_FUNC(ha->pdev->devfn) & 1)
 				ha->flt_region_npiv_conf = start;
 			break;
 		}
@@ -699,22 +712,24 @@
 no_flash_data:
 	/* Use hardcoded defaults. */
 	loc = locations[0];
-	ha->flt_region_fw = FA_RISC_CODE_ADDR;
-	ha->flt_region_boot = FA_BOOT_CODE_ADDR;
-	ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR;
-	ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24:
-	    FA_FLASH_DESCR_ADDR;
-	ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ?
-	    FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR;
-	ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ?
-	    (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR):
-	    (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR);
+	def = 0;
+	if (IS_QLA24XX_TYPE(ha))
+		def = 0;
+	else if (IS_QLA25XX(ha))
+		def = 1;
+	else if (IS_QLA81XX(ha))
+		def = 2;
+	ha->flt_region_fw = def_fw[def];
+	ha->flt_region_boot = def_boot[def];
+	ha->flt_region_vpd_nvram = def_vpd_nvram[def];
+	ha->flt_region_fdt = def_fdt[def];
+	ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
+	    def_npiv_conf0[def]: def_npiv_conf1[def];
 done:
 	DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
-	    "vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc,
+	    "vpd_nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x.\n", loc,
 	    ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
-	    ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event,
-	    ha->flt_region_npiv_conf));
+	    ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf));
 }
 
 static void
@@ -757,14 +772,14 @@
 	mid = le16_to_cpu(fdt->man_id);
 	fid = le16_to_cpu(fdt->id);
 	ha->fdt_wrt_disable = fdt->wrt_disable_bits;
-	ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
+	ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0300 | fdt->erase_cmd);
 	ha->fdt_block_size = le32_to_cpu(fdt->block_size);
 	if (fdt->unprotect_sec_cmd) {
-		ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0300 |
+		ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
 		    fdt->unprotect_sec_cmd);
 		ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
-		    flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd):
-		    flash_conf_to_access_addr(0x0336);
+		    flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd):
+		    flash_conf_addr(ha, 0x0336);
 	}
 	goto done;
 no_flash_data:
@@ -773,7 +788,7 @@
 	mid = man_id;
 	fid = flash_id;
 	ha->fdt_wrt_disable = 0x9c;
-	ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8);
+	ha->fdt_erase_cmd = flash_conf_addr(ha, 0x03d8);
 	switch (man_id) {
 	case 0xbf: /* STT flash. */
 		if (flash_id == 0x8e)
@@ -782,16 +797,16 @@
 			ha->fdt_block_size = FLASH_BLK_SIZE_32K;
 
 		if (flash_id == 0x80)
-			ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0352);
+			ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0352);
 		break;
 	case 0x13: /* ST M25P80. */
 		ha->fdt_block_size = FLASH_BLK_SIZE_64K;
 		break;
 	case 0x1f: /* Atmel 26DF081A. */
 		ha->fdt_block_size = FLASH_BLK_SIZE_4K;
-		ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320);
-		ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339);
-		ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336);
+		ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0320);
+		ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0339);
+		ha->fdt_protect_sec_cmd = flash_conf_addr(ha, 0x0336);
 		break;
 	default:
 		/* Default to 64 kb sector size. */
@@ -813,7 +828,7 @@
 	uint32_t flt_addr;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
 		return QLA_SUCCESS;
 
 	ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -838,7 +853,7 @@
 	struct qla_npiv_entry *entry;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
 		return;
 
 	ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -930,9 +945,9 @@
 		return;
 
 	/* Disable flash write-protection. */
-	qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0);
+	qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
 	/* Some flash parts need an additional zero-write to clear bits.*/
-	qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0);
+	qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
 }
 
 static void
@@ -945,11 +960,10 @@
 		goto skip_wrt_protect;
 
 	/* Enable flash write-protection and wait for completion. */
-	qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101),
+	qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101),
 	    ha->fdt_wrt_disable);
 	for (cnt = 300; cnt &&
-	    qla24xx_read_flash_dword(ha,
-		    flash_conf_to_access_addr(0x005)) & BIT_0;
+	    qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x005)) & BIT_0;
 	    cnt--) {
 		udelay(10);
 	}
@@ -977,7 +991,7 @@
 	ret = QLA_SUCCESS;
 
 	/* Prepare burst-capable write on supported ISPs. */
-	if (IS_QLA25XX(ha) && !(faddr & 0xfff) &&
+	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && !(faddr & 0xfff) &&
 	    dwords > OPTROM_BURST_DWORDS) {
 		optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
 		    &optrom_dma, GFP_KERNEL);
@@ -989,7 +1003,7 @@
 	}
 
 	rest_addr = (ha->fdt_block_size >> 2) - 1;
-	sec_mask = 0x80000 - (ha->fdt_block_size >> 2);
+	sec_mask = (ha->optrom_size >> 2) - (ha->fdt_block_size >> 2);
 
 	qla24xx_unprotect_flash(ha);
 
@@ -1024,13 +1038,13 @@
 				*s = cpu_to_le32(*d);
 
 			ret = qla2x00_load_ram(vha, optrom_dma,
-			    flash_data_to_access_addr(faddr),
+			    flash_data_addr(ha, faddr),
 			    OPTROM_BURST_DWORDS);
 			if (ret != QLA_SUCCESS) {
 				qla_printk(KERN_WARNING, ha,
 				    "Unable to burst-write optrom segment "
 				    "(%x/%x/%llx).\n", ret,
-				    flash_data_to_access_addr(faddr),
+				    flash_data_addr(ha, faddr),
 				    (unsigned long long)optrom_dma);
 				qla_printk(KERN_WARNING, ha,
 				    "Reverting to slow-write.\n");
@@ -1047,7 +1061,7 @@
 		}
 
 		ret = qla24xx_write_flash_dword(ha,
-		    flash_data_to_access_addr(faddr), cpu_to_le32(*dwptr));
+		    flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
 		if (ret != QLA_SUCCESS) {
 			DEBUG9(printk("%s(%ld) Unable to program flash "
 			    "address=%x data=%x.\n", __func__,
@@ -1098,12 +1112,13 @@
 {
 	uint32_t i;
 	uint32_t *dwptr;
+	struct qla_hw_data *ha = vha->hw;
 
 	/* Dword reads to flash. */
 	dwptr = (uint32_t *)buf;
 	for (i = 0; i < bytes >> 2; i++, naddr++)
-		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
-		    nvram_data_to_access_addr(naddr)));
+		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+		    nvram_data_addr(ha, naddr)));
 
 	return buf;
 }
@@ -1160,17 +1175,14 @@
 	RD_REG_DWORD(&reg->ctrl_status);	/* PCI Posting. */
 
 	/* Disable NVRAM write-protection. */
-	qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101),
-	    0);
-	qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101),
-	    0);
+	qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
+	qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
 
 	/* Dword writes to flash. */
 	dwptr = (uint32_t *)buf;
 	for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) {
 		ret = qla24xx_write_flash_dword(ha,
-		    nvram_data_to_access_addr(naddr),
-		    cpu_to_le32(*dwptr));
+		    nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
 		if (ret != QLA_SUCCESS) {
 			DEBUG9(qla_printk("Unable to program nvram address=%x "
 			    "data=%x.\n", naddr, *dwptr));
@@ -1179,8 +1191,7 @@
 	}
 
 	/* Enable NVRAM write-protection. */
-	qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101),
-	    0x8c);
+	qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c);
 
 	/* Disable flash write. */
 	WRT_REG_DWORD(&reg->ctrl_status,
@@ -1202,8 +1213,7 @@
 	dwptr = (uint32_t *)buf;
 	for (i = 0; i < bytes >> 2; i++, naddr++)
 		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
-		    flash_data_to_access_addr(ha->flt_region_vpd_nvram |
-		    naddr)));
+		    flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr)));
 
 	return buf;
 }
@@ -2246,12 +2256,12 @@
 			burst = left;
 
 		rval = qla2x00_dump_ram(vha, optrom_dma,
-		    flash_data_to_access_addr(faddr), burst);
+		    flash_data_addr(ha, faddr), burst);
 		if (rval) {
 			qla_printk(KERN_WARNING, ha,
 			    "Unable to burst-read optrom segment "
 			    "(%x/%x/%llx).\n", rval,
-			    flash_data_to_access_addr(faddr),
+			    flash_data_addr(ha, faddr),
 			    (unsigned long long)optrom_dma);
 			qla_printk(KERN_WARNING, ha,
 			    "Reverting to slow-read.\n");
@@ -2648,108 +2658,3 @@
 
 	return 0;
 }
-
-static int
-qla2xxx_hw_event_store(scsi_qla_host_t *vha, uint32_t *fdata)
-{
-	uint32_t d[2], faddr;
-	struct qla_hw_data *ha = vha->hw;
-
-	/* Locate first empty entry. */
-	for (;;) {
-		if (ha->hw_event_ptr >=
-		    ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
-			DEBUG2(qla_printk(KERN_WARNING, ha,
-			    "HW event -- Log Full!\n"));
-			return QLA_MEMORY_ALLOC_FAILED;
-		}
-
-		qla24xx_read_flash_data(vha, d, ha->hw_event_ptr, 2);
-		faddr = flash_data_to_access_addr(ha->hw_event_ptr);
-		ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
-		if (d[0] == __constant_cpu_to_le32(0xffffffff) &&
-		    d[1] == __constant_cpu_to_le32(0xffffffff)) {
-			qla24xx_unprotect_flash(ha);
-
-			qla24xx_write_flash_dword(ha, faddr++,
-			    cpu_to_le32(jiffies));
-			qla24xx_write_flash_dword(ha, faddr++, 0);
-			qla24xx_write_flash_dword(ha, faddr++, *fdata++);
-			qla24xx_write_flash_dword(ha, faddr++, *fdata);
-
-			qla24xx_protect_flash(ha);
-			break;
-		}
-	}
-	return QLA_SUCCESS;
-}
-
-int
-qla2xxx_hw_event_log(scsi_qla_host_t *vha, uint16_t code, uint16_t d1,
-    uint16_t d2, uint16_t d3)
-{
-#define QMARK(a, b, c, d) \
-    cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d))
-	struct qla_hw_data *ha = vha->hw;
-	int rval;
-	uint32_t marker[2], fdata[4];
-
-	if (ha->flt_region_hw_event == 0)
-		return QLA_FUNCTION_FAILED;
-
-	DEBUG2(qla_printk(KERN_WARNING, ha,
-	    "HW event -- code=%x, d1=%x, d2=%x, d3=%x.\n", code, d1, d2, d3));
-
-	/* If marker not already found, locate or write.  */
-	if (!ha->flags.hw_event_marker_found) {
-		/* Create marker. */
-		marker[0] = QMARK('L', ha->fw_major_version,
-		    ha->fw_minor_version, ha->fw_subminor_version);
-		marker[1] = QMARK(QLA_DRIVER_MAJOR_VER, QLA_DRIVER_MINOR_VER,
-		    QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
-
-		/* Locate marker. */
-		ha->hw_event_ptr = ha->flt_region_hw_event;
-		for (;;) {
-			qla24xx_read_flash_data(vha, fdata, ha->hw_event_ptr,
-			    4);
-			if (fdata[0] == __constant_cpu_to_le32(0xffffffff) &&
-			    fdata[1] == __constant_cpu_to_le32(0xffffffff))
-				break;
-			ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
-			if (ha->hw_event_ptr >=
-			    ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
-				DEBUG2(qla_printk(KERN_WARNING, ha,
-				    "HW event -- Log Full!\n"));
-				return QLA_MEMORY_ALLOC_FAILED;
-			}
-			if (fdata[2] == marker[0] && fdata[3] == marker[1]) {
-				ha->flags.hw_event_marker_found = 1;
-				break;
-			}
-		}
-		/* No marker, write it. */
-		if (!ha->flags.hw_event_marker_found) {
-			rval = qla2xxx_hw_event_store(vha, marker);
-			if (rval != QLA_SUCCESS) {
-				DEBUG2(qla_printk(KERN_WARNING, ha,
-				    "HW event -- Failed marker write=%x.!\n",
-				    rval));
-				return rval;
-			}
-			ha->flags.hw_event_marker_found = 1;
-		}
-	}
-
-	/* Store error.  */
-	fdata[0] = cpu_to_le32(code << 16 | d1);
-	fdata[1] = cpu_to_le32(d2 << 16 | d3);
-	rval = qla2xxx_hw_event_store(vha, fdata);
-	if (rval != QLA_SUCCESS) {
-		DEBUG2(qla_printk(KERN_WARNING, ha,
-		    "HW event -- Failed error write=%x.!\n",
-		    rval));
-	}
-
-	return rval;
-}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index be22f3a..808bab6 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.02.03-k1"
+#define QLA2XXX_VERSION      "8.03.00-k1"
 
 #define QLA_DRIVER_MAJOR_VER	8
-#define QLA_DRIVER_MINOR_VER	2
-#define QLA_DRIVER_PATCH_VER	3
+#define QLA_DRIVER_MINOR_VER	3
+#define QLA_DRIVER_PATCH_VER	0
 #define QLA_DRIVER_BETA_VER	0
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c577d79..051b0f5 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -392,7 +392,7 @@
 		ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
 	else
 		dev_info(&ha->pdev->dev, "WARNING!!!  You have less than %d "
-			   "firmare IOCBs available (%d).\n",
+			   "firmware IOCBs available (%d).\n",
 			   IOCB_HIWAT_CUSHION, ha->iocb_hiwat);
 
 	return QLA_SUCCESS;
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 913a931..8e5c169 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -237,8 +237,7 @@
 	rc->dev.parent = get_device(component_dev);
 	rc->num = rd->component_count++;
 
-	snprintf(rc->dev.bus_id, sizeof(rc->dev.bus_id),
-		 "component-%d", rc->num);
+	dev_set_name(&rc->dev, "component-%d", rc->num);
 	list_add_tail(&rc->node, &rd->component_list);
 	rc->dev.class = &raid_class.class;
 	err = device_add(&rc->dev);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index f8b79d4..cbcd3f6 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -651,10 +651,6 @@
 	unsigned long timeout;
 	int rtn = 0;
 
-	/*
-	 * We will use a queued command if possible, otherwise we will
-	 * emulate the queuing and calling of completion function ourselves.
-	 */
 	atomic_inc(&cmd->device->iorequest_cnt);
 
 	/* check if the device is still usable */
@@ -1099,7 +1095,8 @@
  * Description: Looks up the scsi_device with the specified @lun for a given
  * @starget.  The returned scsi_device does not have an additional
  * reference.  You must hold the host's host_lock over this call and
- * any access to the returned scsi_device.
+ * any access to the returned scsi_device. A scsi_device in state
+ * SDEV_DEL is skipped.
  *
  * Note:  The only reason why drivers should use this is because
  * they need to access the device list in irq context.  Otherwise you
@@ -1111,6 +1108,8 @@
 	struct scsi_device *sdev;
 
 	list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
+		if (sdev->sdev_state == SDEV_DEL)
+			continue;
 		if (sdev->lun ==lun)
 			return sdev;
 	}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 27c633f..6eebd0b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2508,7 +2508,7 @@
 }
 
 static struct device pseudo_primary = {
-	.bus_id		= "pseudo_0",
+	.init_name	= "pseudo_0",
 	.release	= pseudo_0_release,
 };
 
@@ -2680,7 +2680,7 @@
         sdbg_host->dev.bus = &pseudo_lld_bus;
         sdbg_host->dev.parent = &pseudo_primary;
         sdbg_host->dev.release = &sdebug_release_adapter;
-        sprintf(sdbg_host->dev.bus_id, "adapter%d", scsi_debug_add_host);
+        dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
 
         error = device_register(&sdbg_host->dev);
 
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 4969e4e..099b545 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -224,6 +224,7 @@
 	{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
 	{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+	{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
 	{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
 	{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
 	{"SONY", "TSL", NULL, BLIST_FORCELUN},		/* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 381838e..ad6a137 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -124,34 +124,22 @@
 enum blk_eh_timer_return scsi_times_out(struct request *req)
 {
 	struct scsi_cmnd *scmd = req->special;
-	enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
 	enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
 
 	scsi_log_completion(scmd, TIMEOUT_ERROR);
 
 	if (scmd->device->host->transportt->eh_timed_out)
-		eh_timed_out = scmd->device->host->transportt->eh_timed_out;
+		rtn = scmd->device->host->transportt->eh_timed_out(scmd);
 	else if (scmd->device->host->hostt->eh_timed_out)
-		eh_timed_out = scmd->device->host->hostt->eh_timed_out;
-	else
-		eh_timed_out = NULL;
+		rtn = scmd->device->host->hostt->eh_timed_out(scmd);
 
-	if (eh_timed_out) {
-		rtn = eh_timed_out(scmd);
-		switch (rtn) {
-		case BLK_EH_NOT_HANDLED:
-			break;
-		default:
-			return rtn;
-		}
-	}
-
-	if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
+	if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
+		     !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
 		scmd->result |= DID_TIME_OUT << 16;
-		return BLK_EH_HANDLED;
+		rtn = BLK_EH_HANDLED;
 	}
 
-	return BLK_EH_NOT_HANDLED;
+	return rtn;
 }
 
 /**
@@ -1650,7 +1638,7 @@
 	 * We use TASK_INTERRUPTIBLE so that the thread is not
 	 * counted against the load average as a running process.
 	 * We never actually get interrupted because kthread_run
-	 * disables singal delivery for the created thread.
+	 * disables signal delivery for the created thread.
 	 */
 	set_current_state(TASK_INTERRUPTIBLE);
 	while (!kthread_should_stop()) {
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 2ae4f8f..b98f763 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -167,10 +167,17 @@
 static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
 {
 	struct device *dev = scsi_get_device(sdev->host);
+	const char *name;
 
         if (!dev)
 		return -ENXIO;
-        return copy_to_user(arg, dev->bus_id, sizeof(dev->bus_id))? -EFAULT: 0;
+
+	name = dev_name(dev);
+
+	/* compatibility with old ioctl which only returned
+	 * 20 characters */
+        return copy_to_user(arg, name, min(strlen(name), (size_t)20))
+		? -EFAULT: 0;
 }
 
 
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f2f51e0..940dc32 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -91,26 +91,19 @@
 	scsi_put_command(cmd);
 }
 
-/*
- * Function:    scsi_queue_insert()
+/**
+ * __scsi_queue_insert - private queue insertion
+ * @cmd: The SCSI command being requeued
+ * @reason:  The reason for the requeue
+ * @unbusy: Whether the queue should be unbusied
  *
- * Purpose:     Insert a command in the midlevel queue.
- *
- * Arguments:   cmd    - command that we are adding to queue.
- *              reason - why we are inserting command to queue.
- *
- * Lock status: Assumed that lock is not held upon entry.
- *
- * Returns:     Nothing.
- *
- * Notes:       We do this for one of two cases.  Either the host is busy
- *              and it cannot accept any more commands for the time being,
- *              or the device returned QUEUE_FULL and can accept no more
- *              commands.
- * Notes:       This could be called either from an interrupt context or a
- *              normal process context.
+ * This is a private queue insertion.  The public interface
+ * scsi_queue_insert() always assumes the queue should be unbusied
+ * because it's always called before the completion.  This function is
+ * for a requeue after completion, which should only occur in this
+ * file.
  */
-int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
+static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
 {
 	struct Scsi_Host *host = cmd->device->host;
 	struct scsi_device *device = cmd->device;
@@ -150,7 +143,8 @@
 	 * Decrement the counters, since these commands are no longer
 	 * active on the host/device.
 	 */
-	scsi_device_unbusy(device);
+	if (unbusy)
+		scsi_device_unbusy(device);
 
 	/*
 	 * Requeue this command.  It will go before all other commands
@@ -172,6 +166,29 @@
 	return 0;
 }
 
+/*
+ * Function:    scsi_queue_insert()
+ *
+ * Purpose:     Insert a command in the midlevel queue.
+ *
+ * Arguments:   cmd    - command that we are adding to queue.
+ *              reason - why we are inserting command to queue.
+ *
+ * Lock status: Assumed that lock is not held upon entry.
+ *
+ * Returns:     Nothing.
+ *
+ * Notes:       We do this for one of two cases.  Either the host is busy
+ *              and it cannot accept any more commands for the time being,
+ *              or the device returned QUEUE_FULL and can accept no more
+ *              commands.
+ * Notes:       This could be called either from an interrupt context or a
+ *              normal process context.
+ */
+int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
+{
+	return __scsi_queue_insert(cmd, reason, 1);
+}
 /**
  * scsi_execute - insert request and wait for the result
  * @sdev:	scsi device
@@ -684,6 +701,8 @@
 		scsi_run_queue(sdev->request_queue);
 }
 
+static void __scsi_release_buffers(struct scsi_cmnd *, int);
+
 /*
  * Function:    scsi_end_request()
  *
@@ -732,6 +751,7 @@
 				 * leftovers in the front of the
 				 * queue, and goose the queue again.
 				 */
+				scsi_release_buffers(cmd);
 				scsi_requeue_command(q, cmd);
 				cmd = NULL;
 			}
@@ -743,6 +763,7 @@
 	 * This will goose the queue request function at the end, so we don't
 	 * need to worry about launching another command.
 	 */
+	__scsi_release_buffers(cmd, 0);
 	scsi_next_command(cmd);
 	return NULL;
 }
@@ -798,6 +819,26 @@
 	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
 }
 
+static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
+{
+
+	if (cmd->sdb.table.nents)
+		scsi_free_sgtable(&cmd->sdb);
+
+	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+
+	if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
+		struct scsi_data_buffer *bidi_sdb =
+			cmd->request->next_rq->special;
+		scsi_free_sgtable(bidi_sdb);
+		kmem_cache_free(scsi_sdb_cache, bidi_sdb);
+		cmd->request->next_rq->special = NULL;
+	}
+
+	if (scsi_prot_sg_count(cmd))
+		scsi_free_sgtable(cmd->prot_sdb);
+}
+
 /*
  * Function:    scsi_release_buffers()
  *
@@ -817,21 +858,7 @@
  */
 void scsi_release_buffers(struct scsi_cmnd *cmd)
 {
-	if (cmd->sdb.table.nents)
-		scsi_free_sgtable(&cmd->sdb);
-
-	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
-
-	if (scsi_bidi_cmnd(cmd)) {
-		struct scsi_data_buffer *bidi_sdb =
-			cmd->request->next_rq->special;
-		scsi_free_sgtable(bidi_sdb);
-		kmem_cache_free(scsi_sdb_cache, bidi_sdb);
-		cmd->request->next_rq->special = NULL;
-	}
-
-	if (scsi_prot_sg_count(cmd))
-		scsi_free_sgtable(cmd->prot_sdb);
+	__scsi_release_buffers(cmd, 1);
 }
 EXPORT_SYMBOL(scsi_release_buffers);
 
@@ -945,7 +972,6 @@
 	}
 
 	BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
-	scsi_release_buffers(cmd);
 
 	/*
 	 * Next deal with any sectors which we were able to correctly
@@ -963,6 +989,8 @@
 		return;
 	this_count = blk_rq_bytes(req);
 
+	error = -EIO;
+
 	if (host_byte(result) == DID_RESET) {
 		/* Third party bus reset or reset for error recovery
 		 * reasons.  Just retry the command and see what
@@ -1004,13 +1032,18 @@
 				/* This will issue a new 6-byte command. */
 				cmd->device->use_10_for_rw = 0;
 				action = ACTION_REPREP;
+			} else if (sshdr.asc == 0x10) /* DIX */ {
+				description = "Host Data Integrity Failure";
+				action = ACTION_FAIL;
+				error = -EILSEQ;
 			} else
 				action = ACTION_FAIL;
 			break;
 		case ABORTED_COMMAND:
 			if (sshdr.asc == 0x10) { /* DIF */
+				description = "Target Data Integrity Failure";
 				action = ACTION_FAIL;
-				description = "Data Integrity Failure";
+				error = -EILSEQ;
 			} else
 				action = ACTION_RETRY;
 			break;
@@ -1029,6 +1062,10 @@
 				case 0x09: /* self test in progress */
 					action = ACTION_DELAYED_RETRY;
 					break;
+				default:
+					description = "Device not ready";
+					action = ACTION_FAIL;
+					break;
 				}
 			} else {
 				description = "Device not ready";
@@ -1052,9 +1089,10 @@
 	switch (action) {
 	case ACTION_FAIL:
 		/* Give up and fail the remainder of the request */
+		scsi_release_buffers(cmd);
 		if (!(req->cmd_flags & REQ_QUIET)) {
 			if (description)
-				scmd_printk(KERN_INFO, cmd, "%s",
+				scmd_printk(KERN_INFO, cmd, "%s\n",
 					    description);
 			scsi_print_result(cmd);
 			if (driver_byte(result) & DRIVER_SENSE)
@@ -1067,15 +1105,16 @@
 		/* Unprep the request and put it back at the head of the queue.
 		 * A new command will be prepared and issued.
 		 */
+		scsi_release_buffers(cmd);
 		scsi_requeue_command(q, cmd);
 		break;
 	case ACTION_RETRY:
 		/* Retry the same command immediately */
-		scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
+		__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
 		break;
 	case ACTION_DELAYED_RETRY:
 		/* Retry the same command after a delay */
-		scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
+		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
 		break;
 	}
 }
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 18486b5..66505bb 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -32,6 +32,7 @@
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/spinlock.h>
+#include <linux/async.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -179,6 +180,8 @@
 	spin_unlock(&async_scan_lock);
 
 	kfree(data);
+	/* Synchronize async operations globally */
+	async_synchronize_full();
 	return 0;
 }
 
@@ -411,8 +414,7 @@
 	device_initialize(dev);
 	starget->reap_ref = 1;
 	dev->parent = get_device(parent);
-	sprintf(dev->bus_id, "target%d:%d:%d",
-		shost->host_no, channel, id);
+	dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
 #ifndef CONFIG_SYSFS_DEPRECATED
 	dev->bus = &scsi_bus_type;
 #endif
@@ -1021,7 +1023,7 @@
 		if (rescan || !scsi_device_created(sdev)) {
 			SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
 				"scsi scan: device exists on %s\n",
-				sdev->sdev_gendev.bus_id));
+				dev_name(&sdev->sdev_gendev)));
 			if (sdevp)
 				*sdevp = sdev;
 			else
@@ -1160,7 +1162,7 @@
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 
 	SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of"
-				    "%s\n", starget->dev.bus_id));
+				    "%s\n", dev_name(&starget->dev)));
 
 	max_dev_lun = min(max_scsi_luns, shost->max_lun);
 	/*
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 93c28f3..da63802 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1079,16 +1079,14 @@
 	device_initialize(&sdev->sdev_gendev);
 	sdev->sdev_gendev.bus = &scsi_bus_type;
 	sdev->sdev_gendev.type = &scsi_dev_type;
-	sprintf(sdev->sdev_gendev.bus_id,"%d:%d:%d:%d",
-		sdev->host->host_no, sdev->channel, sdev->id,
-		sdev->lun);
-	
+	dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%d",
+		     sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
+
 	device_initialize(&sdev->sdev_dev);
 	sdev->sdev_dev.parent = &sdev->sdev_gendev;
 	sdev->sdev_dev.class = &sdev_class;
-	snprintf(sdev->sdev_dev.bus_id, BUS_ID_SIZE,
-		 "%d:%d:%d:%d", sdev->host->host_no,
-		 sdev->channel, sdev->id, sdev->lun);
+	dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d",
+		     sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
 	sdev->scsi_level = starget->scsi_level;
 	transport_setup_device(&sdev->sdev_gendev);
 	spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 062304d..5f77417 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2407,8 +2407,12 @@
 	/*
 	 * Notify the driver that the rport is now dead. The LLDD will
 	 * also guarantee that any communication to the rport is terminated
+	 *
+	 * Avoid this call if we already called it when we preserved the
+	 * rport for the binding.
 	 */
-	if (i->f->dev_loss_tmo_callbk)
+	if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
+	    (i->f->dev_loss_tmo_callbk))
 		i->f->dev_loss_tmo_callbk(rport);
 
 	transport_remove_device(dev);
@@ -2486,8 +2490,8 @@
 	device_initialize(dev);			/* takes self reference */
 	dev->parent = get_device(&shost->shost_gendev); /* parent reference */
 	dev->release = fc_rport_dev_release;
-	sprintf(dev->bus_id, "rport-%d:%d-%d",
-		shost->host_no, channel, rport->number);
+	dev_set_name(dev, "rport-%d:%d-%d",
+		     shost->host_no, channel, rport->number);
 	transport_setup_device(dev);
 
 	error = device_add(dev);
@@ -2647,7 +2651,8 @@
 				spin_lock_irqsave(shost->host_lock, flags);
 
 				rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
-						  FC_RPORT_DEVLOSS_PENDING);
+						  FC_RPORT_DEVLOSS_PENDING |
+						  FC_RPORT_DEVLOSS_CALLBK_DONE);
 
 				/* if target, initiate a scan */
 				if (rport->scsi_target_id != -1) {
@@ -2944,6 +2949,7 @@
 	struct fc_rport *rport =
 		container_of(work, struct fc_rport, dev_loss_work.work);
 	struct Scsi_Host *shost = rport_to_shost(rport);
+	struct fc_internal *i = to_fc_internal(shost->transportt);
 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
 	unsigned long flags;
 
@@ -3011,6 +3017,7 @@
 	rport->roles = FC_PORT_ROLE_UNKNOWN;
 	rport->port_state = FC_PORTSTATE_NOTPRESENT;
 	rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
+	rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
 
 	/*
 	 * Pre-emptively kill I/O rather than waiting for the work queue
@@ -3046,8 +3053,18 @@
 	 * all attached scsi devices.
 	 */
 	fc_queue_work(shost, &rport->stgt_delete_work);
+
+	/*
+	 * Notify the driver that the rport is now dead. The LLDD will
+	 * also guarantee that any communication to the rport is terminated
+	 *
+	 * Note: we set the CALLBK_DONE flag above to correspond
+	 */
+	if (i->f->dev_loss_tmo_callbk)
+		i->f->dev_loss_tmo_callbk(rport);
 }
 
+
 /**
  * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
  * @work:	rport to terminate io on.
@@ -3164,8 +3181,8 @@
 	device_initialize(dev);			/* takes self reference */
 	dev->parent = get_device(pdev);		/* takes parent reference */
 	dev->release = fc_vport_dev_release;
-	sprintf(dev->bus_id, "vport-%d:%d-%d",
-		shost->host_no, channel, vport->number);
+	dev_set_name(dev, "vport-%d:%d-%d",
+		     shost->host_no, channel, vport->number);
 	transport_setup_device(dev);
 
 	error = device_add(dev);
@@ -3188,19 +3205,19 @@
 	 */
 	if (pdev != &shost->shost_gendev) {
 		error = sysfs_create_link(&shost->shost_gendev.kobj,
-				 &dev->kobj, dev->bus_id);
+				 &dev->kobj, dev_name(dev));
 		if (error)
 			printk(KERN_ERR
 				"%s: Cannot create vport symlinks for "
 				"%s, err=%d\n",
-				__func__, dev->bus_id, error);
+				__func__, dev_name(dev), error);
 	}
 	spin_lock_irqsave(shost->host_lock, flags);
 	vport->flags &= ~FC_VPORT_CREATING;
 	spin_unlock_irqrestore(shost->host_lock, flags);
 
 	dev_printk(KERN_NOTICE, pdev,
-			"%s created via shost%d channel %d\n", dev->bus_id,
+			"%s created via shost%d channel %d\n", dev_name(dev),
 			shost->host_no, channel);
 
 	*ret_vport = vport;
@@ -3297,7 +3314,7 @@
 		return stat;
 
 	if (dev->parent != &shost->shost_gendev)
-		sysfs_remove_link(&shost->shost_gendev.kobj, dev->bus_id);
+		sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
 	transport_remove_device(dev);
 	device_del(dev);
 	transport_destroy_device(dev);
@@ -3329,7 +3346,7 @@
 		dev_printk(KERN_ERR, vport->dev.parent,
 			"%s: %s could not be deleted created via "
 			"shost%d channel %d - error %d\n", __func__,
-			vport->dev.bus_id, vport->shost->host_no,
+			dev_name(&vport->dev), vport->shost->host_no,
 			vport->channel, stat);
 }
 
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 4a803eb..75c9297 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -187,8 +187,7 @@
 
 	ep->id = id;
 	ep->dev.class = &iscsi_endpoint_class;
-	snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%llu",
-		 (unsigned long long) id);
+	dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
 	err = device_register(&ep->dev);
         if (err)
                 goto free_ep;
@@ -724,8 +723,7 @@
 	}
 	session->target_id = id;
 
-	snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
-		 session->sid);
+	dev_set_name(&session->dev, "session%u", session->sid);
 	err = device_add(&session->dev);
 	if (err) {
 		iscsi_cls_session_printk(KERN_ERR, session,
@@ -898,8 +896,7 @@
 	if (!get_device(&session->dev))
 		goto free_conn;
 
-	snprintf(conn->dev.bus_id, BUS_ID_SIZE, "connection%d:%u",
-		 session->sid, cid);
+	dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid);
 	conn->dev.parent = &session->dev;
 	conn->dev.release = iscsi_conn_release;
 	err = device_register(&conn->dev);
@@ -1816,7 +1813,7 @@
 		priv->t.create_work_queue = 1;
 
 	priv->dev.class = &iscsi_transport_class;
-	snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+	dev_set_name(&priv->dev, "%s", tt->name);
 	err = device_register(&priv->dev);
 	if (err)
 		goto free_priv;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 3666093..50988cb 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -207,7 +207,7 @@
 	struct request_queue *q;
 	int error;
 	struct device *dev;
-	char namebuf[BUS_ID_SIZE];
+	char namebuf[20];
 	const char *name;
 	void (*release)(struct device *);
 
@@ -219,7 +219,7 @@
 	if (rphy) {
 		q = blk_init_queue(sas_non_host_smp_request, NULL);
 		dev = &rphy->dev;
-		name = dev->bus_id;
+		name = dev_name(dev);
 		release = NULL;
 	} else {
 		q = blk_init_queue(sas_host_smp_request, NULL);
@@ -629,10 +629,10 @@
 	INIT_LIST_HEAD(&phy->port_siblings);
 	if (scsi_is_sas_expander_device(parent)) {
 		struct sas_rphy *rphy = dev_to_rphy(parent);
-		sprintf(phy->dev.bus_id, "phy-%d:%d:%d", shost->host_no,
+		dev_set_name(&phy->dev, "phy-%d:%d:%d", shost->host_no,
 			rphy->scsi_target_id, number);
 	} else
-		sprintf(phy->dev.bus_id, "phy-%d:%d", shost->host_no, number);
+		dev_set_name(&phy->dev, "phy-%d:%d", shost->host_no, number);
 
 	transport_setup_device(&phy->dev);
 
@@ -770,7 +770,7 @@
 	int res;
 
 	res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj,
-				phy->dev.bus_id);
+				dev_name(&phy->dev));
 	if (res)
 		goto err;
 	res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port");
@@ -785,7 +785,7 @@
 static void sas_port_delete_link(struct sas_port *port,
 				 struct sas_phy *phy)
 {
-	sysfs_remove_link(&port->dev.kobj, phy->dev.bus_id);
+	sysfs_remove_link(&port->dev.kobj, dev_name(&phy->dev));
 	sysfs_remove_link(&phy->dev.kobj, "port");
 }
 
@@ -821,11 +821,11 @@
 
 	if (scsi_is_sas_expander_device(parent)) {
 		struct sas_rphy *rphy = dev_to_rphy(parent);
-		sprintf(port->dev.bus_id, "port-%d:%d:%d", shost->host_no,
-			rphy->scsi_target_id, port->port_identifier);
+		dev_set_name(&port->dev, "port-%d:%d:%d", shost->host_no,
+			     rphy->scsi_target_id, port->port_identifier);
 	} else
-		sprintf(port->dev.bus_id, "port-%d:%d", shost->host_no,
-			port->port_identifier);
+		dev_set_name(&port->dev, "port-%d:%d", shost->host_no,
+			     port->port_identifier);
 
 	transport_setup_device(&port->dev);
 
@@ -935,7 +935,7 @@
 	if (port->is_backlink) {
 		struct device *parent = port->dev.parent;
 
-		sysfs_remove_link(&port->dev.kobj, parent->bus_id);
+		sysfs_remove_link(&port->dev.kobj, dev_name(parent));
 		port->is_backlink = 0;
 	}
 
@@ -984,7 +984,8 @@
 		/* If this trips, you added a phy that was already
 		 * part of a different port */
 		if (unlikely(tmp != phy)) {
-			dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n", phy->dev.bus_id);
+			dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n",
+				   dev_name(&phy->dev));
 			BUG();
 		}
 	} else {
@@ -1023,7 +1024,7 @@
 		return;
 	port->is_backlink = 1;
 	res = sysfs_create_link(&port->dev.kobj, &parent->kobj,
-				parent->bus_id);
+				dev_name(parent));
 	if (res)
 		goto err;
 	return;
@@ -1367,11 +1368,12 @@
 	rdev->rphy.dev.release = sas_end_device_release;
 	if (scsi_is_sas_expander_device(parent->dev.parent)) {
 		struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent);
-		sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d:%d",
-			shost->host_no, rphy->scsi_target_id, parent->port_identifier);
+		dev_set_name(&rdev->rphy.dev, "end_device-%d:%d:%d",
+			     shost->host_no, rphy->scsi_target_id,
+			     parent->port_identifier);
 	} else
-		sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d",
-			shost->host_no, parent->port_identifier);
+		dev_set_name(&rdev->rphy.dev, "end_device-%d:%d",
+			     shost->host_no, parent->port_identifier);
 	rdev->rphy.identify.device_type = SAS_END_DEVICE;
 	sas_rphy_initialize(&rdev->rphy);
 	transport_setup_device(&rdev->rphy.dev);
@@ -1411,8 +1413,8 @@
 	mutex_lock(&sas_host->lock);
 	rdev->rphy.scsi_target_id = sas_host->next_expander_id++;
 	mutex_unlock(&sas_host->lock);
-	sprintf(rdev->rphy.dev.bus_id, "expander-%d:%d",
-		shost->host_no, rdev->rphy.scsi_target_id);
+	dev_set_name(&rdev->rphy.dev, "expander-%d:%d",
+		     shost->host_no, rdev->rphy.scsi_target_id);
 	rdev->rphy.identify.device_type = type;
 	sas_rphy_initialize(&rdev->rphy);
 	transport_setup_device(&rdev->rphy.dev);
@@ -1445,7 +1447,7 @@
 	transport_add_device(&rphy->dev);
 	transport_configure_device(&rphy->dev);
 	if (sas_bsg_initialize(shost, rphy))
-		printk("fail to a bsg device %s\n", rphy->dev.bus_id);
+		printk("fail to a bsg device %s\n", dev_name(&rphy->dev));
 
 
 	mutex_lock(&sas_host->lock);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 8a7af95..21a045e 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -212,7 +212,7 @@
 	rport->roles = ids->roles;
 
 	id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
-	sprintf(rport->dev.bus_id, "port-%d:%d", shost->host_no, id);
+	dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
 
 	transport_setup_device(&rport->dev);
 
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 62b28d5..d57566b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -48,6 +48,7 @@
 #include <linux/delay.h>
 #include <linux/mutex.h>
 #include <linux/string_helpers.h>
+#include <linux/async.h>
 #include <asm/uaccess.h>
 
 #include <scsi/scsi.h>
@@ -1802,6 +1803,71 @@
 	return 0;
 }
 
+/*
+ * The asynchronous part of sd_probe
+ */
+static void sd_probe_async(void *data, async_cookie_t cookie)
+{
+	struct scsi_disk *sdkp = data;
+	struct scsi_device *sdp;
+	struct gendisk *gd;
+	u32 index;
+	struct device *dev;
+
+	sdp = sdkp->device;
+	gd = sdkp->disk;
+	index = sdkp->index;
+	dev = &sdp->sdev_gendev;
+
+	if (!sdp->request_queue->rq_timeout) {
+		if (sdp->type != TYPE_MOD)
+			blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
+		else
+			blk_queue_rq_timeout(sdp->request_queue,
+					     SD_MOD_TIMEOUT);
+	}
+
+	device_initialize(&sdkp->dev);
+	sdkp->dev.parent = &sdp->sdev_gendev;
+	sdkp->dev.class = &sd_disk_class;
+	dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
+
+	if (device_add(&sdkp->dev))
+		goto out_free_index;
+
+	get_device(&sdp->sdev_gendev);
+
+	if (index < SD_MAX_DISKS) {
+		gd->major = sd_major((index & 0xf0) >> 4);
+		gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+		gd->minors = SD_MINORS;
+	}
+	gd->fops = &sd_fops;
+	gd->private_data = &sdkp->driver;
+	gd->queue = sdkp->device->request_queue;
+
+	sd_revalidate_disk(gd);
+
+	blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
+
+	gd->driverfs_dev = &sdp->sdev_gendev;
+	gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
+	if (sdp->removable)
+		gd->flags |= GENHD_FL_REMOVABLE;
+
+	dev_set_drvdata(dev, sdkp);
+	add_disk(gd);
+	sd_dif_config_host(sdkp);
+
+	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
+		  sdp->removable ? "removable " : "");
+
+	return;
+
+ out_free_index:
+	ida_remove(&sd_index_ida, index);
+}
+
 /**
  *	sd_probe - called during driver initialization and whenever a
  *	new scsi device is attached to the system. It is called once
@@ -1865,48 +1931,7 @@
 	sdkp->openers = 0;
 	sdkp->previous_state = 1;
 
-	if (!sdp->request_queue->rq_timeout) {
-		if (sdp->type != TYPE_MOD)
-			blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
-		else
-			blk_queue_rq_timeout(sdp->request_queue,
-					     SD_MOD_TIMEOUT);
-	}
-
-	device_initialize(&sdkp->dev);
-	sdkp->dev.parent = &sdp->sdev_gendev;
-	sdkp->dev.class = &sd_disk_class;
-	strncpy(sdkp->dev.bus_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE);
-
-	if (device_add(&sdkp->dev))
-		goto out_free_index;
-
-	get_device(&sdp->sdev_gendev);
-
-	if (index < SD_MAX_DISKS) {
-		gd->major = sd_major((index & 0xf0) >> 4);
-		gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
-		gd->minors = SD_MINORS;
-	}
-	gd->fops = &sd_fops;
-	gd->private_data = &sdkp->driver;
-	gd->queue = sdkp->device->request_queue;
-
-	sd_revalidate_disk(gd);
-
-	blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
-
-	gd->driverfs_dev = &sdp->sdev_gendev;
-	gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
-	if (sdp->removable)
-		gd->flags |= GENHD_FL_REMOVABLE;
-
-	dev_set_drvdata(dev, sdkp);
-	add_disk(gd);
-	sd_dif_config_host(sdkp);
-
-	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
-		  sdp->removable ? "removable " : "");
+	async_schedule(sd_probe_async, sdkp);
 
 	return 0;
 
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 3ebb1f2..184dff4 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -142,7 +142,7 @@
 static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
 {
 	struct sd_dif_tuple *sdt = prot;
-	char *tag = tag_buf;
+	u8 *tag = tag_buf;
 	unsigned int i, j;
 
 	for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
@@ -154,7 +154,7 @@
 static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
 {
 	struct sd_dif_tuple *sdt = prot;
-	char *tag = tag_buf;
+	u8 *tag = tag_buf;
 	unsigned int i, j;
 
 	for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
@@ -256,7 +256,7 @@
 static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
 {
 	struct sd_dif_tuple *sdt = prot;
-	char *tag = tag_buf;
+	u8 *tag = tag_buf;
 	unsigned int i, j;
 
 	for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
@@ -269,7 +269,7 @@
 static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
 {
 	struct sd_dif_tuple *sdt = prot;
-	char *tag = tag_buf;
+	u8 *tag = tag_buf;
 	unsigned int i, j;
 
 	for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
@@ -374,7 +374,10 @@
 	else
 		csum_convert = 0;
 
+	BUG_ON(dif && (scmd->cmnd[0] == READ_6 || scmd->cmnd[0] == WRITE_6));
+
 	switch (scmd->cmnd[0]) {
+	case READ_6:
 	case READ_10:
 	case READ_12:
 	case READ_16:
@@ -390,6 +393,7 @@
 
 		break;
 
+	case WRITE_6:
 	case WRITE_10:
 	case WRITE_12:
 	case WRITE_16:
@@ -475,8 +479,9 @@
 
 error:
 	kunmap_atomic(sdt, KM_USER0);
-	sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u\n",
-		  __func__, virt, phys, be32_to_cpu(sdt->ref_tag));
+	sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
+		  __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
+		  be16_to_cpu(sdt->app_tag));
 
 	return -EIO;
 }
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 7f0df29..e946e05 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -526,7 +526,7 @@
 	if (!scomp)
 		goto err_free;
 
-	edev = enclosure_register(cdev->parent, sdev->sdev_gendev.bus_id,
+	edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
 				  components, &ses_enclosure_callbacks);
 	if (IS_ERR(edev)) {
 		err = PTR_ERR(edev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5103855..8f0bd3f 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1669,6 +1669,8 @@
 		md->pages = req_schp->pages;
 		md->page_order = req_schp->page_order;
 		md->nr_entries = req_schp->k_use_sg;
+		md->offset = 0;
+		md->null_mapped = hp->dxferp ? 0 : 1;
 	}
 
 	if (iov_count)
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 31fe605..0807b26 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -297,7 +297,7 @@
 	return err;
 }
 
-static void __exit sgiwd93_remove(struct platform_device *pdev)
+static int __exit sgiwd93_remove(struct platform_device *pdev)
 {
 	struct Scsi_Host *host = platform_get_drvdata(pdev);
 	struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata;
@@ -307,6 +307,7 @@
 	free_irq(pd->irq, host);
 	dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
 	scsi_host_put(host);
+	return 0;
 }
 
 static struct platform_driver sgiwd93_driver = {
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index d63d229..6dc8b84 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -102,7 +102,7 @@
 	struct NCR_700_Host_Parameters *hostdata =
 		kzalloc(sizeof(struct NCR_700_Host_Parameters),	GFP_KERNEL);
 
-	printk(KERN_NOTICE "sim710: %s\n", dev->bus_id);
+	printk(KERN_NOTICE "sim710: %s\n", dev_name(dev));
 	printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n",
 	       irq, clock, base_addr, scsi_id);
 
@@ -305,7 +305,7 @@
 		scsi_id = ffs(val) - 1;
 
 		if(scsi_id > 7 || (val & ~(1<<scsi_id)) != 0) {
-			printk(KERN_ERR "sim710.c, EISA card %s has incorrect scsi_id, setting to 7\n", dev->bus_id);
+			printk(KERN_ERR "sim710.c, EISA card %s has incorrect scsi_id, setting to 7\n", dev_name(dev));
 			scsi_id = 7;
 		}
 	} else {
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 2bbef4c..77f0b2c 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -78,8 +78,7 @@
 	base = res->start;
 	hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
 	if (!hostdata) {
-		printk(KERN_ERR "%s: Failed to allocate host data\n",
-		       dev->dev.bus_id);
+		dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7f3f317..c6f19ee 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
    Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
  */
 
-static const char *verstr = "20080504";
+static const char *verstr = "20081215";
 
 #include <linux/module.h>
 
@@ -182,18 +182,16 @@
 
 static int modes_defined;
 
-static struct st_buffer *new_tape_buffer(int, int, int);
 static int enlarge_buffer(struct st_buffer *, int, int);
 static void clear_buffer(struct st_buffer *);
 static void normalize_buffer(struct st_buffer *);
 static int append_to_buffer(const char __user *, struct st_buffer *, int);
 static int from_buffer(struct st_buffer *, char __user *, int);
 static void move_buffer_data(struct st_buffer *, int);
-static void buf_to_sg(struct st_buffer *, unsigned int);
 
-static int sgl_map_user_pages(struct scatterlist *, const unsigned int, 
+static int sgl_map_user_pages(struct st_buffer *, const unsigned int,
 			      unsigned long, size_t, int);
-static int sgl_unmap_user_pages(struct scatterlist *, const unsigned int, int);
+static int sgl_unmap_user_pages(struct st_buffer *, const unsigned int, int);
 
 static int st_probe(struct device *);
 static int st_remove(struct device *);
@@ -435,22 +433,6 @@
 	return (-EIO);
 }
 
-
-/* Wakeup from interrupt */
-static void st_sleep_done(void *data, char *sense, int result, int resid)
-{
-	struct st_request *SRpnt = data;
-	struct scsi_tape *STp = SRpnt->stp;
-
-	memcpy(SRpnt->sense, sense, SCSI_SENSE_BUFFERSIZE);
-	(STp->buffer)->cmdstat.midlevel_result = SRpnt->result = result;
-	(STp->buffer)->cmdstat.residual = resid;
-	DEB( STp->write_pending = 0; )
-
-	if (SRpnt->waiting)
-		complete(SRpnt->waiting);
-}
-
 static struct st_request *st_allocate_request(struct scsi_tape *stp)
 {
 	struct st_request *streq;
@@ -475,6 +457,63 @@
 	kfree(streq);
 }
 
+static void st_scsi_execute_end(struct request *req, int uptodate)
+{
+	struct st_request *SRpnt = req->end_io_data;
+	struct scsi_tape *STp = SRpnt->stp;
+
+	STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
+	STp->buffer->cmdstat.residual = req->data_len;
+
+	if (SRpnt->waiting)
+		complete(SRpnt->waiting);
+
+	blk_rq_unmap_user(SRpnt->bio);
+	__blk_put_request(req->q, req);
+}
+
+static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
+			   int data_direction, void *buffer, unsigned bufflen,
+			   int timeout, int retries)
+{
+	struct request *req;
+	struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
+	int err = 0;
+	int write = (data_direction == DMA_TO_DEVICE);
+
+	req = blk_get_request(SRpnt->stp->device->request_queue, write,
+			      GFP_KERNEL);
+	if (!req)
+		return DRIVER_ERROR << 24;
+
+	req->cmd_type = REQ_TYPE_BLOCK_PC;
+	req->cmd_flags |= REQ_QUIET;
+
+	mdata->null_mapped = 1;
+
+	if (bufflen) {
+		err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
+				      GFP_KERNEL);
+		if (err) {
+			blk_put_request(req);
+			return DRIVER_ERROR << 24;
+		}
+	}
+
+	SRpnt->bio = req->bio;
+	req->cmd_len = COMMAND_SIZE(cmd[0]);
+	memset(req->cmd, 0, BLK_MAX_CDB);
+	memcpy(req->cmd, cmd, req->cmd_len);
+	req->sense = SRpnt->sense;
+	req->sense_len = 0;
+	req->timeout = timeout;
+	req->retries = retries;
+	req->end_io_data = SRpnt;
+
+	blk_execute_rq_nowait(req->q, NULL, req, 1, st_scsi_execute_end);
+	return 0;
+}
+
 /* Do the scsi command. Waits until command performed if do_wait is true.
    Otherwise write_behind_check() is used to check that the command
    has finished. */
@@ -483,6 +522,8 @@
 	   int bytes, int direction, int timeout, int retries, int do_wait)
 {
 	struct completion *waiting;
+	struct rq_map_data *mdata = &STp->buffer->map_data;
+	int ret;
 
 	/* if async, make sure there's no command outstanding */
 	if (!do_wait && ((STp->buffer)->last_SRpnt)) {
@@ -510,21 +551,27 @@
 	init_completion(waiting);
 	SRpnt->waiting = waiting;
 
-	if (!STp->buffer->do_dio)
-		buf_to_sg(STp->buffer, bytes);
+	if (STp->buffer->do_dio) {
+		mdata->nr_entries = STp->buffer->sg_segs;
+		mdata->pages = STp->buffer->mapped_pages;
+	} else {
+		mdata->nr_entries =
+			DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
+		STp->buffer->map_data.pages = STp->buffer->reserved_pages;
+		STp->buffer->map_data.offset = 0;
+	}
 
 	memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
 	STp->buffer->cmdstat.have_sense = 0;
 	STp->buffer->syscall_result = 0;
 
-	if (scsi_execute_async(STp->device, cmd, COMMAND_SIZE(cmd[0]), direction,
-			&((STp->buffer)->sg[0]), bytes, (STp->buffer)->sg_segs,
-			       timeout, retries, SRpnt, st_sleep_done, GFP_KERNEL)) {
+	ret = st_scsi_execute(SRpnt, cmd, direction, NULL, bytes, timeout,
+			      retries);
+	if (ret) {
 		/* could not allocate the buffer or request was too large */
 		(STp->buffer)->syscall_result = (-EBUSY);
 		(STp->buffer)->last_SRpnt = NULL;
-	}
-	else if (do_wait) {
+	} else if (do_wait) {
 		wait_for_completion(waiting);
 		SRpnt->waiting = NULL;
 		(STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
@@ -533,28 +580,6 @@
 	return SRpnt;
 }
 
-static int st_scsi_kern_execute(struct st_request *streq,
-				const unsigned char *cmd, int data_direction,
-				void *buffer, unsigned bufflen, int timeout,
-				int retries)
-{
-	struct scsi_tape *stp = streq->stp;
-	int ret, resid;
-
-	stp->buffer->cmdstat.have_sense = 0;
-	memcpy(streq->cmd, cmd, sizeof(streq->cmd));
-
-	ret = scsi_execute(stp->device, cmd, data_direction, buffer, bufflen,
-			   streq->sense, timeout, retries, 0, &resid);
-	if (driver_byte(ret) & DRIVER_ERROR)
-		return -EBUSY;
-
-	stp->buffer->cmdstat.midlevel_result = streq->result = ret;
-	stp->buffer->cmdstat.residual = resid;
-	stp->buffer->syscall_result = st_chk_result(stp, streq);
-
-	return 0;
-}
 
 /* Handle the write-behind checking (waits for completion). Returns -ENOSPC if
    write has been correct but EOM early warning reached, -EIO if write ended in
@@ -627,7 +652,6 @@
 {
 	struct st_request *SRpnt;
 	unsigned char cmd[MAX_COMMAND_SIZE];
-	int ret;
 
 	cmd[0] = SPACE;
 	cmd[1] = 0x01;		/* Space FileMarks */
@@ -641,26 +665,20 @@
         DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n",
 		   tape_name(STp), forward ? "forward" : "backward"));
 
-	SRpnt = st_allocate_request(STp);
+	SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
+			   STp->device->request_queue->rq_timeout,
+			   MAX_RETRIES, 1);
 	if (!SRpnt)
-		return STp->buffer->syscall_result;
+		return (STp->buffer)->syscall_result;
 
-	ret = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
-				   STp->device->request_queue->rq_timeout,
-				   MAX_RETRIES);
-	if (ret)
-		goto out;
-
-	ret = STp->buffer->syscall_result;
+	st_release_request(SRpnt);
+	SRpnt = NULL;
 
 	if ((STp->buffer)->cmdstat.midlevel_result != 0)
 		printk(KERN_ERR "%s: Stepping over filemark %s failed.\n",
 		   tape_name(STp), forward ? "forward" : "backward");
 
-out:
-	st_release_request(SRpnt);
-
-	return ret;
+	return (STp->buffer)->syscall_result;
 }
 
 
@@ -881,24 +899,21 @@
 	int attentions, waits, max_wait, scode;
 	int retval = CHKRES_READY, new_session = 0;
 	unsigned char cmd[MAX_COMMAND_SIZE];
-	struct st_request *SRpnt;
+	struct st_request *SRpnt = NULL;
 	struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
 
-	SRpnt = st_allocate_request(STp);
-	if (!SRpnt)
-		return STp->buffer->syscall_result;
-
 	max_wait = do_wait ? ST_BLOCK_SECONDS : 0;
 
 	for (attentions=waits=0; ; ) {
 		memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
 		cmd[0] = TEST_UNIT_READY;
+		SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
+				   STp->long_timeout, MAX_READY_RETRIES, 1);
 
-		retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
-					      STp->long_timeout,
-					      MAX_READY_RETRIES);
-		if (retval)
+		if (!SRpnt) {
+			retval = (STp->buffer)->syscall_result;
 			break;
+		}
 
 		if (cmdstatp->have_sense) {
 
@@ -942,8 +957,8 @@
 		break;
 	}
 
-	st_release_request(SRpnt);
-
+	if (SRpnt != NULL)
+		st_release_request(SRpnt);
 	return retval;
 }
 
@@ -1020,24 +1035,17 @@
 		}
 	}
 
-	SRpnt = st_allocate_request(STp);
-	if (!SRpnt) {
-		retval = STp->buffer->syscall_result;
-		goto err_out;
-	}
-
 	if (STp->omit_blklims)
 		STp->min_block = STp->max_block = (-1);
 	else {
 		memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
 		cmd[0] = READ_BLOCK_LIMITS;
 
-		retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
-					      STp->buffer->b_data, 6,
-					      STp->device->request_queue->rq_timeout,
-					      MAX_READY_RETRIES);
-		if (retval) {
-			st_release_request(SRpnt);
+		SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE,
+				   STp->device->request_queue->rq_timeout,
+				   MAX_READY_RETRIES, 1);
+		if (!SRpnt) {
+			retval = (STp->buffer)->syscall_result;
 			goto err_out;
 		}
 
@@ -1061,12 +1069,11 @@
 	cmd[0] = MODE_SENSE;
 	cmd[4] = 12;
 
-	retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
-				      STp->buffer->b_data, 12,
-				      STp->device->request_queue->rq_timeout,
-				      MAX_READY_RETRIES);
-	if (retval) {
-		st_release_request(SRpnt);
+	SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE,
+			   STp->device->request_queue->rq_timeout,
+			   MAX_READY_RETRIES, 1);
+	if (!SRpnt) {
+		retval = (STp->buffer)->syscall_result;
 		goto err_out;
 	}
 
@@ -1296,17 +1303,11 @@
 		cmd[0] = WRITE_FILEMARKS;
 		cmd[4] = 1 + STp->two_fm;
 
-		SRpnt = st_allocate_request(STp);
+		SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
+				   STp->device->request_queue->rq_timeout,
+				   MAX_WRITE_RETRIES, 1);
 		if (!SRpnt) {
-			result = STp->buffer->syscall_result;
-			goto out;
-		}
-
-		result = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
-					      STp->device->request_queue->rq_timeout,
-					      MAX_WRITE_RETRIES);
-		if (result) {
-			st_release_request(SRpnt);
+			result = (STp->buffer)->syscall_result;
 			goto out;
 		}
 
@@ -1471,8 +1472,8 @@
 
 	if (i && ((unsigned long)buf & queue_dma_alignment(
 					STp->device->request_queue)) == 0) {
-		i = sgl_map_user_pages(&(STbp->sg[0]), STbp->use_sg,
-				      (unsigned long)buf, count, (is_read ? READ : WRITE));
+		i = sgl_map_user_pages(STbp, STbp->use_sg, (unsigned long)buf,
+				       count, (is_read ? READ : WRITE));
 		if (i > 0) {
 			STbp->do_dio = i;
 			STbp->buffer_bytes = 0;   /* can be used as transfer counter */
@@ -1480,7 +1481,6 @@
 		else
 			STbp->do_dio = 0;  /* fall back to buffering with any error */
 		STbp->sg_segs = STbp->do_dio;
-		STbp->frp_sg_current = 0;
 		DEB(
 		     if (STbp->do_dio) {
 			STp->nbr_dio++;
@@ -1526,7 +1526,7 @@
 
 	STbp = STp->buffer;
 	if (STbp->do_dio) {
-		sgl_unmap_user_pages(&(STbp->sg[0]), STbp->do_dio, is_read);
+		sgl_unmap_user_pages(STbp, STbp->do_dio, is_read);
 		STbp->do_dio = 0;
 		STbp->sg_segs = 0;
 	}
@@ -2372,7 +2372,6 @@
 {
 	unsigned char cmd[MAX_COMMAND_SIZE];
 	struct st_request *SRpnt;
-	int ret;
 
 	memset(cmd, 0, MAX_COMMAND_SIZE);
 	cmd[0] = MODE_SENSE;
@@ -2381,17 +2380,14 @@
 	cmd[2] = page;
 	cmd[4] = 255;
 
-	SRpnt = st_allocate_request(STp);
-	if (!SRpnt)
-		return STp->buffer->syscall_result;
+	SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_FROM_DEVICE,
+			   STp->device->request_queue->rq_timeout, 0, 1);
+	if (SRpnt == NULL)
+		return (STp->buffer)->syscall_result;
 
-	ret = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
-				   STp->buffer->b_data, cmd[4],
-				   STp->device->request_queue->rq_timeout,
-				   MAX_RETRIES);
 	st_release_request(SRpnt);
 
-	return ret ? : STp->buffer->syscall_result;
+	return STp->buffer->syscall_result;
 }
 
 
@@ -2399,9 +2395,10 @@
    in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */
 static int write_mode_page(struct scsi_tape *STp, int page, int slow)
 {
-	int pgo, timeout, ret = 0;
+	int pgo;
 	unsigned char cmd[MAX_COMMAND_SIZE];
 	struct st_request *SRpnt;
+	int timeout;
 
 	memset(cmd, 0, MAX_COMMAND_SIZE);
 	cmd[0] = MODE_SELECT;
@@ -2415,21 +2412,16 @@
 	(STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP;
 	(STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR;
 
-	SRpnt = st_allocate_request(STp);
-	if (!SRpnt)
-		return ret;
-
-	timeout = slow ? STp->long_timeout :
-		STp->device->request_queue->rq_timeout;
-
-	ret = st_scsi_kern_execute(SRpnt, cmd, DMA_TO_DEVICE,
-				   STp->buffer->b_data, cmd[4], timeout, 0);
-	if (!ret)
-		ret = STp->buffer->syscall_result;
+	timeout = slow ?
+		STp->long_timeout : STp->device->request_queue->rq_timeout;
+	SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_TO_DEVICE,
+			   timeout, 0, 1);
+	if (SRpnt == NULL)
+		return (STp->buffer)->syscall_result;
 
 	st_release_request(SRpnt);
 
-	return ret;
+	return STp->buffer->syscall_result;
 }
 
 
@@ -2547,16 +2539,13 @@
 		printk(ST_DEB_MSG "%s: Loading tape.\n", name);
 		);
 
-	SRpnt = st_allocate_request(STp);
+	SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
+			   timeout, MAX_RETRIES, 1);
 	if (!SRpnt)
-		return STp->buffer->syscall_result;
-
-	retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0, timeout,
-				      MAX_RETRIES);
-	if (retval)
-		goto out;
+		return (STp->buffer)->syscall_result;
 
 	retval = (STp->buffer)->syscall_result;
+	st_release_request(SRpnt);
 
 	if (!retval) {	/* SCSI command successful */
 
@@ -2575,8 +2564,6 @@
 		STps = &(STp->ps[STp->partition]);
 		STps->drv_file = STps->drv_block = (-1);
 	}
-out:
-	st_release_request(SRpnt);
 
 	return retval;
 }
@@ -2852,15 +2839,12 @@
 		return (-ENOSYS);
 	}
 
-	SRpnt = st_allocate_request(STp);
+	SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction,
+			   timeout, MAX_RETRIES, 1);
 	if (!SRpnt)
 		return (STp->buffer)->syscall_result;
 
-	ioctl_result = st_scsi_kern_execute(SRpnt, cmd, direction,
-					    STp->buffer->b_data, datalen,
-					    timeout, MAX_RETRIES);
-	if (!ioctl_result)
-		ioctl_result = (STp->buffer)->syscall_result;
+	ioctl_result = (STp->buffer)->syscall_result;
 
 	if (!ioctl_result) {	/* SCSI command successful */
 		st_release_request(SRpnt);
@@ -3022,17 +3006,11 @@
 		if (!logical && !STp->scsi2_logical)
 			scmd[1] = 1;
 	}
-
-	SRpnt = st_allocate_request(STp);
+	SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE,
+			   STp->device->request_queue->rq_timeout,
+			   MAX_READY_RETRIES, 1);
 	if (!SRpnt)
-		return STp->buffer->syscall_result;
-
-	result = st_scsi_kern_execute(SRpnt, scmd, DMA_FROM_DEVICE,
-				      STp->buffer->b_data, 20,
-				      STp->device->request_queue->rq_timeout,
-				      MAX_READY_RETRIES);
-	if (result)
-		goto out;
+		return (STp->buffer)->syscall_result;
 
 	if ((STp->buffer)->syscall_result != 0 ||
 	    (STp->device->scsi_level >= SCSI_2 &&
@@ -3060,7 +3038,6 @@
                 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name,
                             *block, *partition));
 	}
-out:
 	st_release_request(SRpnt);
 	SRpnt = NULL;
 
@@ -3135,14 +3112,10 @@
 		timeout = STp->device->request_queue->rq_timeout;
 	}
 
-	SRpnt = st_allocate_request(STp);
+	SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE,
+			   timeout, MAX_READY_RETRIES, 1);
 	if (!SRpnt)
-		return STp->buffer->syscall_result;
-
-	result = st_scsi_kern_execute(SRpnt, scmd, DMA_NONE, NULL, 0,
-				      timeout, MAX_READY_RETRIES);
-	if (result)
-		goto out;
+		return (STp->buffer)->syscall_result;
 
 	STps->drv_block = STps->drv_file = (-1);
 	STps->eof = ST_NOEOF;
@@ -3167,7 +3140,7 @@
 			STps->drv_block = STps->drv_file = 0;
 		result = 0;
 	}
-out:
+
 	st_release_request(SRpnt);
 	SRpnt = NULL;
 
@@ -3696,38 +3669,34 @@
 
 /* Try to allocate a new tape buffer. Calling function must not hold
    dev_arr_lock. */
-static struct st_buffer *
- new_tape_buffer(int from_initialization, int need_dma, int max_sg)
+static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
 {
-	int i, got = 0;
-	gfp_t priority;
 	struct st_buffer *tb;
 
-	if (from_initialization)
-		priority = GFP_ATOMIC;
-	else
-		priority = GFP_KERNEL;
-
-	i = sizeof(struct st_buffer) + (max_sg - 1) * sizeof(struct scatterlist) +
-		max_sg * sizeof(struct st_buf_fragment);
-	tb = kzalloc(i, priority);
+	tb = kzalloc(sizeof(struct st_buffer), GFP_ATOMIC);
 	if (!tb) {
 		printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n");
 		return NULL;
 	}
-	tb->frp_segs = tb->orig_frp_segs = 0;
+	tb->frp_segs = 0;
 	tb->use_sg = max_sg;
-	tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg);
-
 	tb->dma = need_dma;
-	tb->buffer_size = got;
-	sg_init_table(tb->sg, max_sg);
+	tb->buffer_size = 0;
+
+	tb->reserved_pages = kzalloc(max_sg * sizeof(struct page *),
+				     GFP_ATOMIC);
+	if (!tb->reserved_pages) {
+		kfree(tb);
+		return NULL;
+	}
 
 	return tb;
 }
 
 
 /* Try to allocate enough space in the tape buffer */
+#define ST_MAX_ORDER 6
+
 static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
 {
 	int segs, nbr, max_segs, b_size, order, got;
@@ -3747,33 +3716,45 @@
 	priority = GFP_KERNEL | __GFP_NOWARN;
 	if (need_dma)
 		priority |= GFP_DMA;
-	for (b_size = PAGE_SIZE, order=0; order <= 6 &&
-	     b_size < new_size - STbuffer->buffer_size;
-	     order++, b_size *= 2)
-		;  /* empty */
+
+	if (STbuffer->cleared)
+		priority |= __GFP_ZERO;
+
+	if (STbuffer->frp_segs) {
+		order = STbuffer->map_data.page_order;
+		b_size = PAGE_SIZE << order;
+	} else {
+		for (b_size = PAGE_SIZE, order = 0;
+		     order < ST_MAX_ORDER && b_size < new_size;
+		     order++, b_size *= 2)
+			;  /* empty */
+	}
+	if (max_segs * (PAGE_SIZE << order) < new_size) {
+		if (order == ST_MAX_ORDER)
+			return 0;
+		normalize_buffer(STbuffer);
+		return enlarge_buffer(STbuffer, new_size, need_dma);
+	}
 
 	for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size;
 	     segs < max_segs && got < new_size;) {
-		STbuffer->frp[segs].page = alloc_pages(priority, order);
-		if (STbuffer->frp[segs].page == NULL) {
-			if (new_size - got <= (max_segs - segs) * b_size / 2) {
-				b_size /= 2; /* Large enough for the rest of the buffers */
-				order--;
-				continue;
-			}
+		struct page *page;
+
+		page = alloc_pages(priority, order);
+		if (!page) {
 			DEB(STbuffer->buffer_size = got);
 			normalize_buffer(STbuffer);
 			return 0;
 		}
-		STbuffer->frp[segs].length = b_size;
+
 		STbuffer->frp_segs += 1;
 		got += b_size;
 		STbuffer->buffer_size = got;
-		if (STbuffer->cleared)
-			memset(page_address(STbuffer->frp[segs].page), 0, b_size);
+		STbuffer->reserved_pages[segs] = page;
 		segs++;
 	}
-	STbuffer->b_data = page_address(STbuffer->frp[0].page);
+	STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
+	STbuffer->map_data.page_order = order;
 
 	return 1;
 }
@@ -3785,7 +3766,8 @@
 	int i;
 
 	for (i=0; i < st_bp->frp_segs; i++)
-		memset(page_address(st_bp->frp[i].page), 0, st_bp->frp[i].length);
+		memset(page_address(st_bp->reserved_pages[i]), 0,
+		       PAGE_SIZE << st_bp->map_data.page_order);
 	st_bp->cleared = 1;
 }
 
@@ -3793,16 +3775,16 @@
 /* Release the extra buffer */
 static void normalize_buffer(struct st_buffer * STbuffer)
 {
-	int i, order;
+	int i, order = STbuffer->map_data.page_order;
 
-	for (i = STbuffer->orig_frp_segs; i < STbuffer->frp_segs; i++) {
-		order = get_order(STbuffer->frp[i].length);
-		__free_pages(STbuffer->frp[i].page, order);
-		STbuffer->buffer_size -= STbuffer->frp[i].length;
+	for (i = 0; i < STbuffer->frp_segs; i++) {
+		__free_pages(STbuffer->reserved_pages[i], order);
+		STbuffer->buffer_size -= (PAGE_SIZE << order);
 	}
-	STbuffer->frp_segs = STbuffer->orig_frp_segs;
-	STbuffer->frp_sg_current = 0;
+	STbuffer->frp_segs = 0;
 	STbuffer->sg_segs = 0;
+	STbuffer->map_data.page_order = 0;
+	STbuffer->map_data.offset = 0;
 }
 
 
@@ -3811,18 +3793,19 @@
 static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
 {
 	int i, cnt, res, offset;
+	int length = PAGE_SIZE << st_bp->map_data.page_order;
 
 	for (i = 0, offset = st_bp->buffer_bytes;
-	     i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++)
-		offset -= st_bp->frp[i].length;
+	     i < st_bp->frp_segs && offset >= length; i++)
+		offset -= length;
 	if (i == st_bp->frp_segs) {	/* Should never happen */
 		printk(KERN_WARNING "st: append_to_buffer offset overflow.\n");
 		return (-EIO);
 	}
 	for (; i < st_bp->frp_segs && do_count > 0; i++) {
-		cnt = st_bp->frp[i].length - offset < do_count ?
-		    st_bp->frp[i].length - offset : do_count;
-		res = copy_from_user(page_address(st_bp->frp[i].page) + offset, ubp, cnt);
+		struct page *page = st_bp->reserved_pages[i];
+		cnt = length - offset < do_count ? length - offset : do_count;
+		res = copy_from_user(page_address(page) + offset, ubp, cnt);
 		if (res)
 			return (-EFAULT);
 		do_count -= cnt;
@@ -3842,18 +3825,19 @@
 static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
 {
 	int i, cnt, res, offset;
+	int length = PAGE_SIZE << st_bp->map_data.page_order;
 
 	for (i = 0, offset = st_bp->read_pointer;
-	     i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++)
-		offset -= st_bp->frp[i].length;
+	     i < st_bp->frp_segs && offset >= length; i++)
+		offset -= length;
 	if (i == st_bp->frp_segs) {	/* Should never happen */
 		printk(KERN_WARNING "st: from_buffer offset overflow.\n");
 		return (-EIO);
 	}
 	for (; i < st_bp->frp_segs && do_count > 0; i++) {
-		cnt = st_bp->frp[i].length - offset < do_count ?
-		    st_bp->frp[i].length - offset : do_count;
-		res = copy_to_user(ubp, page_address(st_bp->frp[i].page) + offset, cnt);
+		struct page *page = st_bp->reserved_pages[i];
+		cnt = length - offset < do_count ? length - offset : do_count;
+		res = copy_to_user(ubp, page_address(page) + offset, cnt);
 		if (res)
 			return (-EFAULT);
 		do_count -= cnt;
@@ -3874,6 +3858,7 @@
 {
 	int src_seg, dst_seg, src_offset = 0, dst_offset;
 	int count, total;
+	int length = PAGE_SIZE << st_bp->map_data.page_order;
 
 	if (offset == 0)
 		return;
@@ -3881,24 +3866,26 @@
 	total=st_bp->buffer_bytes - offset;
 	for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) {
 		src_offset = offset;
-		if (src_offset < st_bp->frp[src_seg].length)
+		if (src_offset < length)
 			break;
-		offset -= st_bp->frp[src_seg].length;
+		offset -= length;
 	}
 
 	st_bp->buffer_bytes = st_bp->read_pointer = total;
 	for (dst_seg=dst_offset=0; total > 0; ) {
-		count = min(st_bp->frp[dst_seg].length - dst_offset,
-			    st_bp->frp[src_seg].length - src_offset);
-		memmove(page_address(st_bp->frp[dst_seg].page) + dst_offset,
-			page_address(st_bp->frp[src_seg].page) + src_offset, count);
+		struct page *dpage = st_bp->reserved_pages[dst_seg];
+		struct page *spage = st_bp->reserved_pages[src_seg];
+
+		count = min(length - dst_offset, length - src_offset);
+		memmove(page_address(dpage) + dst_offset,
+			page_address(spage) + src_offset, count);
 		src_offset += count;
-		if (src_offset >= st_bp->frp[src_seg].length) {
+		if (src_offset >= length) {
 			src_seg++;
 			src_offset = 0;
 		}
 		dst_offset += count;
-		if (dst_offset >= st_bp->frp[dst_seg].length) {
+		if (dst_offset >= length) {
 			dst_seg++;
 			dst_offset = 0;
 		}
@@ -3906,32 +3893,6 @@
 	}
 }
 
-
-/* Fill the s/g list up to the length required for this transfer */
-static void buf_to_sg(struct st_buffer *STbp, unsigned int length)
-{
-	int i;
-	unsigned int count;
-	struct scatterlist *sg;
-	struct st_buf_fragment *frp;
-
-	if (length == STbp->frp_sg_current)
-		return;   /* work already done */
-
-	sg = &(STbp->sg[0]);
-	frp = STbp->frp;
-	for (i=count=0; count < length; i++) {
-		if (length - count > frp[i].length)
-			sg_set_page(&sg[i], frp[i].page, frp[i].length, 0);
-		else
-			sg_set_page(&sg[i], frp[i].page, length - count, 0);
-		count += sg[i].length;
-	}
-	STbp->sg_segs = i;
-	STbp->frp_sg_current = length;
-}
-
-
 /* Validate the options from command line or module parameters */
 static void validate_options(void)
 {
@@ -4026,7 +3987,7 @@
 		SDp->request_queue->max_phys_segments);
 	if (st_max_sg_segs < i)
 		i = st_max_sg_segs;
-	buffer = new_tape_buffer(1, (SDp->host)->unchecked_isa_dma, i);
+	buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
 	if (buffer == NULL) {
 		printk(KERN_ERR
 		       "st: Can't allocate new tape buffer. Device not attached.\n");
@@ -4280,8 +4241,8 @@
 	tpnt->device = NULL;
 
 	if (tpnt->buffer) {
-		tpnt->buffer->orig_frp_segs = 0;
 		normalize_buffer(tpnt->buffer);
+		kfree(tpnt->buffer->reserved_pages);
 		kfree(tpnt->buffer);
 	}
 
@@ -4567,14 +4528,16 @@
 }
 
 /* The following functions may be useful for a larger audience. */
-static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, 
-			      unsigned long uaddr, size_t count, int rw)
+static int sgl_map_user_pages(struct st_buffer *STbp,
+			      const unsigned int max_pages, unsigned long uaddr,
+			      size_t count, int rw)
 {
 	unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	unsigned long start = uaddr >> PAGE_SHIFT;
 	const int nr_pages = end - start;
 	int res, i, j;
 	struct page **pages;
+	struct rq_map_data *mdata = &STbp->map_data;
 
 	/* User attempted Overflow! */
 	if ((uaddr + count) < uaddr)
@@ -4616,24 +4579,11 @@
 		flush_dcache_page(pages[i]);
         }
 
-	/* Populate the scatter/gather list */
-	sg_set_page(&sgl[0], pages[0], 0, uaddr & ~PAGE_MASK);
-	if (nr_pages > 1) {
-		sgl[0].length = PAGE_SIZE - sgl[0].offset;
-		count -= sgl[0].length;
-		for (i=1; i < nr_pages ; i++) {
-			sg_set_page(&sgl[i], pages[i],
-				    count < PAGE_SIZE ? count : PAGE_SIZE, 0);;
-			count -= PAGE_SIZE;
-		}
-	}
-	else {
-		sgl[0].length = count;
-	}
+	mdata->offset = uaddr & ~PAGE_MASK;
+	mdata->page_order = 0;
+	STbp->mapped_pages = pages;
 
-	kfree(pages);
 	return nr_pages;
-
  out_unmap:
 	if (res > 0) {
 		for (j=0; j < res; j++)
@@ -4646,13 +4596,13 @@
 
 
 /* And unmap them... */
-static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
-				int dirtied)
+static int sgl_unmap_user_pages(struct st_buffer *STbp,
+				const unsigned int nr_pages, int dirtied)
 {
 	int i;
 
 	for (i=0; i < nr_pages; i++) {
-		struct page *page = sg_page(&sgl[i]);
+		struct page *page = STbp->mapped_pages[i];
 
 		if (dirtied)
 			SetPageDirty(page);
@@ -4661,6 +4611,8 @@
 		 */
 		page_cache_release(page);
 	}
+	kfree(STbp->mapped_pages);
+	STbp->mapped_pages = NULL;
 
 	return 0;
 }
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index b92712f..544dc6b 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -29,6 +29,7 @@
 	int result;
 	struct scsi_tape *stp;
 	struct completion *waiting;
+	struct bio *bio;
 };
 
 /* The tape buffer descriptor. */
@@ -44,20 +45,13 @@
 	int syscall_result;
 	struct st_request *last_SRpnt;
 	struct st_cmdstatus cmdstat;
+	struct page **reserved_pages;
+	struct page **mapped_pages;
+	struct rq_map_data map_data;
 	unsigned char *b_data;
 	unsigned short use_sg;	/* zero or max number of s/g segments for this adapter */
 	unsigned short sg_segs;		/* number of segments in s/g list */
-	unsigned short orig_frp_segs;	/* number of segments allocated at first try */
 	unsigned short frp_segs;	/* number of buffer segments */
-	unsigned int frp_sg_current;	/* driver buffer length currently in s/g list */
-	struct st_buf_fragment *frp;	/* the allocated buffer fragment list */
-	struct scatterlist sg[1];	/* MUST BE last item */
-};
-
-/* The tape buffer fragment descriptor */
-struct st_buf_fragment {
-	struct page *page;
-	unsigned int length;
 };
 
 /* The tape mode definition */
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 3c4a300..a8d61a6 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -137,8 +137,8 @@
 		goto fail;
 
 	if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
-		printk(KERN_ERR "%s: irq problem with %d, detaching\n ",
-			dev->dev.bus_id, dev->irq);
+	  dev_printk(KERN_ERR, dev, "irq problem with %d, detaching\n ",
+		     dev->irq);
 		goto fail;
 	}
 
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index daa0056..0d934bf 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2839,6 +2839,8 @@
 	p->flags        = port->flags;
 	p->mapbase      = port->mapbase;
 	p->private_data = port->private_data;
+	p->type		= port->type;
+	p->line		= port->line;
 
 	set_io_from_upio(p);
 	if (port->serial_in)
@@ -3123,7 +3125,7 @@
 	if (nr_uarts > UART_NR)
 		nr_uarts = UART_NR;
 
-	printk(KERN_INFO "Serial: 8250/16550 driver"
+	printk(KERN_INFO "Serial: 8250/16550 driver, "
 		"%d ports, IRQ sharing %sabled\n", nr_uarts,
 		share_irqs ? "en" : "dis");
 
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index c088146..2a36712 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -602,6 +602,10 @@
 	/* subdevice 0x00PS means <P> parallel, <S> serial */
 	unsigned int num_serial = dev->subsystem_device & 0xf;
 
+	if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
+			dev->subsystem_device == 0x0299)
+		return 0;
+
 	if (num_serial == 0)
 		return -ENODEV;
 	return num_serial;
@@ -3096,6 +3100,10 @@
 		0,
 		pbn_b0_8_115200 },
 
+	{	PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
+		PCI_VENDOR_ID_IBM, 0x0299,
+		0, 0, pbn_b0_bt_2_115200 },
+
 	/*
 	 * These entries match devices with class COMMUNICATION_SERIAL,
 	 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index fde7f9c..bbcfc26 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -270,6 +270,8 @@
 	{       "RSS0250",              0       },
 	/* SupraExpress 28.8 Data/Fax PnP modem */
 	{	"SUP1310",		0	},
+	/* SupraExpress 336i PnP Voice Modem */
+	{	"SUP1381",		0	},
 	/* SupraExpress 33.6 Data/Fax PnP modem */
 	{	"SUP1421",		0	},
 	/* SupraExpress 33.6 Data/Fax PnP modem */
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index b695ab3..3e525e3 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -457,7 +457,7 @@
 
 config SERIAL_SAMSUNG_UARTS
 	int
-	depends on SERIAL_SAMSUNG
+	depends on ARM && PLAT_S3C
 	default 2 if ARCH_S3C2400
 	default 4 if ARCH_S3C64XX || CPU_S3C2443
 	default 3
@@ -1320,13 +1320,30 @@
 config SERIAL_OF_PLATFORM
 	tristate "Serial port on Open Firmware platform bus"
 	depends on PPC_OF
-	depends on SERIAL_8250
+	depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL
 	help
 	  If you have a PowerPC based system that has serial ports
 	  on a platform specific bus, you should enable this option.
 	  Currently, only 8250 compatible ports are supported, but
 	  others can easily be added.
 
+config SERIAL_OF_PLATFORM_NWPSERIAL
+	tristate "NWP serial port driver"
+	depends on PPC_OF && PPC_DCR
+	select SERIAL_OF_PLATFORM
+	select SERIAL_CORE_CONSOLE
+	select SERIAL_CORE
+	help
+	  This driver supports the cell network processor nwp serial
+	  device.
+
+config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
+	bool "Console on NWP serial port"
+	depends on SERIAL_OF_PLATFORM_NWPSERIAL=y
+	select SERIAL_CORE_CONSOLE
+	help
+	  Support for Console on the NWP serial ports.
+
 config SERIAL_QE
 	tristate "Freescale QUICC Engine serial port support"
 	depends on QUICC_ENGINE
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index dfe775a..8844c0a 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -72,6 +72,7 @@
 obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
 obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
+obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
 obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
 obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
 obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index d5efd6c..89362d7 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -579,7 +579,7 @@
 	/* disable PDC transmit */
 	UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
 
-	if (!uart_circ_empty(xmit)) {
+	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
 		dma_sync_single_for_device(port->dev,
 					   pdc->dma_addr,
 					   pdc->dma_size,
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 8b2c619..e642c22 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -1203,7 +1203,7 @@
 	unsigned long flags;
 
 	/* Disable output DMA channel for the serial port in question
-	 * ( set to something other then serialX)
+	 * ( set to something other than serialX)
 	 */
 	local_irq_save(flags);
 	DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line));
@@ -1266,7 +1266,7 @@
 	unsigned long flags;
 
 	/* Disable input DMA channel for the serial port in question
-	 * ( set to something other then serialX)
+	 * ( set to something other than serialX)
 	 */
 	local_irq_save(flags);
 	if (info->line == 0) {
diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/serial/jsm/jsm_neo.c
index b7584ca..e6390d0 100644
--- a/drivers/serial/jsm/jsm_neo.c
+++ b/drivers/serial/jsm/jsm_neo.c
@@ -577,9 +577,6 @@
 	jsm_printk(MSIGS, INFO, &ch->ch_bd->pci_dev,
 			"neo_parse_modem: port: %d msignals: %x\n", ch->ch_portnum, msignals);
 
-	if (!ch)
-		return;
-
 	/* Scrub off lower bits. They signify delta's, which I don't care about */
 	/* Keep DDCD and DDSR though */
 	msignals &= 0xf8;
diff --git a/drivers/serial/nwpserial.c b/drivers/serial/nwpserial.c
new file mode 100644
index 0000000..32f3eaf
--- /dev/null
+++ b/drivers/serial/nwpserial.c
@@ -0,0 +1,475 @@
+/*
+ *  Serial Port driver for a NWP uart device
+ *
+ *    Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/serial.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/nwpserial.h>
+#include <asm/prom.h>
+#include <asm/dcr.h>
+
+#define NWPSERIAL_NR               2
+
+#define NWPSERIAL_STATUS_RXVALID 0x1
+#define NWPSERIAL_STATUS_TXFULL  0x2
+
+struct nwpserial_port {
+	struct uart_port port;
+	dcr_host_t dcr_host;
+	unsigned int ier;
+	unsigned int mcr;
+};
+
+static DEFINE_MUTEX(nwpserial_mutex);
+static struct nwpserial_port nwpserial_ports[NWPSERIAL_NR];
+
+static void wait_for_bits(struct nwpserial_port *up, int bits)
+{
+	unsigned int status, tmout = 10000;
+
+	/* Wait up to 10ms for the character(s) to be sent. */
+	do {
+		status = dcr_read(up->dcr_host, UART_LSR);
+
+		if (--tmout == 0)
+			break;
+		udelay(1);
+	} while ((status & bits) != bits);
+}
+
+#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
+static void nwpserial_console_putchar(struct uart_port *port, int c)
+{
+	struct nwpserial_port *up;
+	up = container_of(port, struct nwpserial_port, port);
+	/* check if tx buffer is full */
+	wait_for_bits(up, UART_LSR_THRE);
+	dcr_write(up->dcr_host, UART_TX, c);
+	up->port.icount.tx++;
+}
+
+static void
+nwpserial_console_write(struct console *co, const char *s, unsigned int count)
+{
+	struct nwpserial_port *up = &nwpserial_ports[co->index];
+	unsigned long flags;
+	int locked = 1;
+
+	if (oops_in_progress)
+		locked = spin_trylock_irqsave(&up->port.lock, flags);
+	else
+		spin_lock_irqsave(&up->port.lock, flags);
+
+	/* save and disable interrupt */
+	up->ier = dcr_read(up->dcr_host, UART_IER);
+	dcr_write(up->dcr_host, UART_IER, up->ier & ~UART_IER_RDI);
+
+	uart_console_write(&up->port, s, count, nwpserial_console_putchar);
+
+	/* wait for transmitter to become emtpy */
+	while ((dcr_read(up->dcr_host, UART_LSR) & UART_LSR_THRE) == 0)
+		cpu_relax();
+
+	/* restore interrupt state */
+	dcr_write(up->dcr_host, UART_IER, up->ier);
+
+	if (locked)
+		spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static struct uart_driver nwpserial_reg;
+static struct console nwpserial_console = {
+	.name		= "ttySQ",
+	.write		= nwpserial_console_write,
+	.device		= uart_console_device,
+	.flags		= CON_PRINTBUFFER,
+	.index		= -1,
+	.data		= &nwpserial_reg,
+};
+#define NWPSERIAL_CONSOLE	(&nwpserial_console)
+#else
+#define NWPSERIAL_CONSOLE	NULL
+#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */
+
+/**************************************************************************/
+
+static int nwpserial_request_port(struct uart_port *port)
+{
+	return 0;
+}
+
+static void nwpserial_release_port(struct uart_port *port)
+{
+	/* N/A */
+}
+
+static void nwpserial_config_port(struct uart_port *port, int flags)
+{
+	port->type = PORT_NWPSERIAL;
+}
+
+static irqreturn_t nwpserial_interrupt(int irq, void *dev_id)
+{
+	struct nwpserial_port *up = dev_id;
+	struct tty_struct *tty = up->port.info->port.tty;
+	irqreturn_t ret;
+	unsigned int iir;
+	unsigned char ch;
+
+	spin_lock(&up->port.lock);
+
+	/* check if the uart was the interrupt source. */
+	iir = dcr_read(up->dcr_host, UART_IIR);
+	if (!iir) {
+		ret = IRQ_NONE;
+		goto out;
+	}
+
+	do {
+		up->port.icount.rx++;
+		ch = dcr_read(up->dcr_host, UART_RX);
+		if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID)
+			tty_insert_flip_char(tty, ch, TTY_NORMAL);
+	} while (dcr_read(up->dcr_host, UART_RX) & UART_LSR_DR);
+
+	tty_flip_buffer_push(tty);
+	ret = IRQ_HANDLED;
+
+out:
+	spin_unlock(&up->port.lock);
+	return ret;
+}
+
+static int nwpserial_startup(struct uart_port *port)
+{
+	struct nwpserial_port *up;
+	int err;
+
+	up = container_of(port, struct nwpserial_port, port);
+
+	/* disable flow control by default */
+	up->mcr = dcr_read(up->dcr_host, UART_MCR) & ~UART_MCR_AFE;
+	dcr_write(up->dcr_host, UART_MCR, up->mcr);
+
+	/* register interrupt handler */
+	err = request_irq(up->port.irq, nwpserial_interrupt,
+			IRQF_SHARED, "nwpserial", up);
+	if (err)
+		return err;
+
+	/* enable interrupts */
+	up->ier = UART_IER_RDI;
+	dcr_write(up->dcr_host, UART_IER, up->ier);
+
+	/* enable receiving */
+	up->port.ignore_status_mask &= ~NWPSERIAL_STATUS_RXVALID;
+
+	return 0;
+}
+
+static void nwpserial_shutdown(struct uart_port *port)
+{
+	struct nwpserial_port *up;
+	up = container_of(port, struct nwpserial_port, port);
+
+	/* disable receiving */
+	up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID;
+
+	/* disable interrupts from this port */
+	up->ier = 0;
+	dcr_write(up->dcr_host, UART_IER, up->ier);
+
+	/* free irq */
+	free_irq(up->port.irq, port);
+}
+
+static int nwpserial_verify_port(struct uart_port *port,
+			struct serial_struct *ser)
+{
+	return -EINVAL;
+}
+
+static const char *nwpserial_type(struct uart_port *port)
+{
+	return port->type == PORT_NWPSERIAL ? "nwpserial" : NULL;
+}
+
+static void nwpserial_set_termios(struct uart_port *port,
+			struct ktermios *termios, struct ktermios *old)
+{
+	struct nwpserial_port *up;
+	up = container_of(port, struct nwpserial_port, port);
+
+	up->port.read_status_mask = NWPSERIAL_STATUS_RXVALID
+				| NWPSERIAL_STATUS_TXFULL;
+
+	up->port.ignore_status_mask = 0;
+	/* ignore all characters if CREAD is not set */
+	if ((termios->c_cflag & CREAD) == 0)
+		up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID;
+
+	/* Copy back the old hardware settings */
+	if (old)
+		tty_termios_copy_hw(termios, old);
+}
+
+static void nwpserial_break_ctl(struct uart_port *port, int ctl)
+{
+	/* N/A */
+}
+
+static void nwpserial_enable_ms(struct uart_port *port)
+{
+	/* N/A */
+}
+
+static void nwpserial_stop_rx(struct uart_port *port)
+{
+	struct nwpserial_port *up;
+	up = container_of(port, struct nwpserial_port, port);
+	/* don't forward any more data (like !CREAD) */
+	up->port.ignore_status_mask = NWPSERIAL_STATUS_RXVALID;
+}
+
+static void nwpserial_putchar(struct nwpserial_port *up, unsigned char c)
+{
+	/* check if tx buffer is full */
+	wait_for_bits(up, UART_LSR_THRE);
+	dcr_write(up->dcr_host, UART_TX, c);
+	up->port.icount.tx++;
+}
+
+static void nwpserial_start_tx(struct uart_port *port)
+{
+	struct nwpserial_port *up;
+	struct circ_buf *xmit;
+	up = container_of(port, struct nwpserial_port, port);
+	xmit  = &up->port.info->xmit;
+
+	if (port->x_char) {
+		nwpserial_putchar(up, up->port.x_char);
+		port->x_char = 0;
+	}
+
+	while (!(uart_circ_empty(xmit) || uart_tx_stopped(&up->port))) {
+		nwpserial_putchar(up, xmit->buf[xmit->tail]);
+		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
+	}
+}
+
+static unsigned int nwpserial_get_mctrl(struct uart_port *port)
+{
+	return 0;
+}
+
+static void nwpserial_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	/* N/A */
+}
+
+static void nwpserial_stop_tx(struct uart_port *port)
+{
+	/* N/A */
+}
+
+static unsigned int nwpserial_tx_empty(struct uart_port *port)
+{
+	struct nwpserial_port *up;
+	unsigned long flags;
+	int ret;
+	up = container_of(port, struct nwpserial_port, port);
+
+	spin_lock_irqsave(&up->port.lock, flags);
+	ret = dcr_read(up->dcr_host, UART_LSR);
+	spin_unlock_irqrestore(&up->port.lock, flags);
+
+	return ret & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+}
+
+static struct uart_ops nwpserial_pops = {
+	.tx_empty     = nwpserial_tx_empty,
+	.set_mctrl    = nwpserial_set_mctrl,
+	.get_mctrl    = nwpserial_get_mctrl,
+	.stop_tx      = nwpserial_stop_tx,
+	.start_tx     = nwpserial_start_tx,
+	.stop_rx      = nwpserial_stop_rx,
+	.enable_ms    = nwpserial_enable_ms,
+	.break_ctl    = nwpserial_break_ctl,
+	.startup      = nwpserial_startup,
+	.shutdown     = nwpserial_shutdown,
+	.set_termios  = nwpserial_set_termios,
+	.type         = nwpserial_type,
+	.release_port = nwpserial_release_port,
+	.request_port = nwpserial_request_port,
+	.config_port  = nwpserial_config_port,
+	.verify_port  = nwpserial_verify_port,
+};
+
+static struct uart_driver nwpserial_reg = {
+	.owner       = THIS_MODULE,
+	.driver_name = "nwpserial",
+	.dev_name    = "ttySQ",
+	.major       = TTY_MAJOR,
+	.minor       = 68,
+	.nr          = NWPSERIAL_NR,
+	.cons        = NWPSERIAL_CONSOLE,
+};
+
+int nwpserial_register_port(struct uart_port *port)
+{
+	struct nwpserial_port *up = NULL;
+	int ret = -1;
+	int i;
+	static int first = 1;
+	int dcr_len;
+	int dcr_base;
+	struct device_node *dn;
+
+	mutex_lock(&nwpserial_mutex);
+
+	dn = to_of_device(port->dev)->node;
+	if (dn == NULL)
+		goto out;
+
+	/* get dcr base. */
+	dcr_base = dcr_resource_start(dn, 0);
+
+	/* find matching entry */
+	for (i = 0; i < NWPSERIAL_NR; i++)
+		if (nwpserial_ports[i].port.iobase == dcr_base) {
+			up = &nwpserial_ports[i];
+			break;
+		}
+
+	/* we didn't find a mtching entry, search for a free port */
+	if (up == NULL)
+		for (i = 0; i < NWPSERIAL_NR; i++)
+			if (nwpserial_ports[i].port.type == PORT_UNKNOWN &&
+				nwpserial_ports[i].port.iobase == 0) {
+				up = &nwpserial_ports[i];
+				break;
+			}
+
+	if (up == NULL) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (first)
+		uart_register_driver(&nwpserial_reg);
+	first = 0;
+
+	up->port.membase      = port->membase;
+	up->port.irq          = port->irq;
+	up->port.uartclk      = port->uartclk;
+	up->port.fifosize     = port->fifosize;
+	up->port.regshift     = port->regshift;
+	up->port.iotype       = port->iotype;
+	up->port.flags        = port->flags;
+	up->port.mapbase      = port->mapbase;
+	up->port.private_data = port->private_data;
+
+	if (port->dev)
+		up->port.dev = port->dev;
+
+	if (up->port.iobase != dcr_base) {
+		up->port.ops          = &nwpserial_pops;
+		up->port.fifosize     = 16;
+
+		spin_lock_init(&up->port.lock);
+
+		up->port.iobase = dcr_base;
+		dcr_len = dcr_resource_len(dn, 0);
+
+		up->dcr_host = dcr_map(dn, dcr_base, dcr_len);
+		if (!DCR_MAP_OK(up->dcr_host)) {
+			printk(KERN_ERR "Cannot map DCR resources for NWPSERIAL");
+			goto out;
+		}
+	}
+
+	ret = uart_add_one_port(&nwpserial_reg, &up->port);
+	if (ret == 0)
+		ret = up->port.line;
+
+out:
+	mutex_unlock(&nwpserial_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(nwpserial_register_port);
+
+void nwpserial_unregister_port(int line)
+{
+	struct nwpserial_port *up = &nwpserial_ports[line];
+	mutex_lock(&nwpserial_mutex);
+	uart_remove_one_port(&nwpserial_reg, &up->port);
+
+	up->port.type = PORT_UNKNOWN;
+
+	mutex_unlock(&nwpserial_mutex);
+}
+EXPORT_SYMBOL(nwpserial_unregister_port);
+
+#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
+static int __init nwpserial_console_init(void)
+{
+	struct nwpserial_port *up = NULL;
+	struct device_node *dn;
+	const char *name;
+	int dcr_base;
+	int dcr_len;
+	int i;
+
+	/* search for a free port */
+	for (i = 0; i < NWPSERIAL_NR; i++)
+		if (nwpserial_ports[i].port.type == PORT_UNKNOWN) {
+			up = &nwpserial_ports[i];
+			break;
+		}
+
+	if (up == NULL)
+		return -1;
+
+	name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+	if (name == NULL)
+		return -1;
+
+	dn = of_find_node_by_path(name);
+	if (!dn)
+		return -1;
+
+	spin_lock_init(&up->port.lock);
+	up->port.ops = &nwpserial_pops;
+	up->port.type = PORT_NWPSERIAL;
+	up->port.fifosize = 16;
+
+	dcr_base = dcr_resource_start(dn, 0);
+	dcr_len = dcr_resource_len(dn, 0);
+	up->port.iobase = dcr_base;
+
+	up->dcr_host = dcr_map(dn, dcr_base, dcr_len);
+	if (!DCR_MAP_OK(up->dcr_host)) {
+		printk("Cannot map DCR resources for SERIAL");
+		return -1;
+	}
+	register_console(&nwpserial_console);
+	return 0;
+}
+console_initcall(nwpserial_console_init);
+#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 8fa0ff5..14f8fa9 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -14,6 +14,7 @@
 #include <linux/serial_core.h>
 #include <linux/serial_8250.h>
 #include <linux/of_platform.h>
+#include <linux/nwpserial.h>
 
 #include <asm/prom.h>
 
@@ -99,9 +100,16 @@
 		goto out;
 
 	switch (port_type) {
+#ifdef CONFIG_SERIAL_8250
 	case PORT_8250 ... PORT_MAX_8250:
 		ret = serial8250_register_port(&port);
 		break;
+#endif
+#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
+	case PORT_NWPSERIAL:
+		ret = nwpserial_register_port(&port);
+		break;
+#endif
 	default:
 		/* need to add code for these */
 	case PORT_UNKNOWN:
@@ -129,9 +137,16 @@
 {
 	struct of_serial_info *info = ofdev->dev.driver_data;
 	switch (info->type) {
+#ifdef CONFIG_SERIAL_8250
 	case PORT_8250 ... PORT_MAX_8250:
 		serial8250_unregister_port(info->line);
 		break;
+#endif
+#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
+	case PORT_NWPSERIAL:
+		nwpserial_unregister_port(info->line);
+		break;
+#endif
 	default:
 		/* need to add code for these */
 		break;
@@ -148,6 +163,11 @@
 	{ .type = "serial", .compatible = "ns16450",  .data = (void *)PORT_16450, },
 	{ .type = "serial", .compatible = "ns16550",  .data = (void *)PORT_16550, },
 	{ .type = "serial", .compatible = "ns16750",  .data = (void *)PORT_16750, },
+	{ .type = "serial", .compatible = "ns16850",  .data = (void *)PORT_16850, },
+#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
+	{ .type = "serial", .compatible = "ibm,qpace-nwp-serial",
+					.data = (void *)PORT_NWPSERIAL, },
+#endif
 	{ .type = "serial",			      .data = (void *)PORT_UNKNOWN, },
 	{ /* end of list */ },
 };
diff --git a/drivers/serial/pnx8xxx_uart.c b/drivers/serial/pnx8xxx_uart.c
index 22e30d2..1bb8f1b 100644
--- a/drivers/serial/pnx8xxx_uart.c
+++ b/drivers/serial/pnx8xxx_uart.c
@@ -187,7 +187,7 @@
 	status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
 		 ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
 	while (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFIFO)) {
-		ch = serial_in(sport, PNX8XXX_FIFO);
+		ch = serial_in(sport, PNX8XXX_FIFO) & 0xff;
 
 		sport->port.icount.rx++;
 
@@ -198,9 +198,16 @@
 		 * out of the main execution path
 		 */
 		if (status & (FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE |
-					PNX8XXX_UART_FIFO_RXPAR) |
+					PNX8XXX_UART_FIFO_RXPAR |
+					PNX8XXX_UART_FIFO_RXBRK) |
 			      ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN))) {
-			if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR))
+			if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXBRK)) {
+				status &= ~(FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
+					FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR));
+				sport->port.icount.brk++;
+				if (uart_handle_break(&sport->port))
+					goto ignore_char;
+			} else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR))
 				sport->port.icount.parity++;
 			else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
 				sport->port.icount.frame++;
@@ -284,14 +291,8 @@
 	/* Get the interrupts */
 	status  = serial_in(sport, PNX8XXX_ISTAT) & serial_in(sport, PNX8XXX_IEN);
 
-	/* Break signal received */
-	if (status & PNX8XXX_UART_INT_BREAK) {
-		sport->port.icount.brk++;
-		uart_handle_break(&sport->port);
-	}
-
-	/* Byte received */
-	if (status & PNX8XXX_UART_INT_RX)
+	/* Byte or break signal received */
+	if (status & (PNX8XXX_UART_INT_RX | PNX8XXX_UART_INT_BREAK))
 		pnx8xxx_rx_chars(sport);
 
 	/* TX holding register empty - transmit a byte */
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 5e39bac..56ff3e6 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -670,8 +670,7 @@
 	dev_dbg(controller, "new message %p submitted for %s\n",
 			msg, spi->dev.bus_id);
 
-	if (unlikely(list_empty(&msg->transfers)
-			|| !spi->max_speed_hz))
+	if (unlikely(list_empty(&msg->transfers)))
 		return -EINVAL;
 
 	if (as->stopping)
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c
index af65267..568c781 100644
--- a/drivers/spi/spi_lm70llp.c
+++ b/drivers/spi/spi_lm70llp.c
@@ -1,5 +1,5 @@
 /*
- * spi_lm70llp.c - driver for lm70llp eval board for the LM70 sensor
+ * spi_lm70llp.c - driver for LM70EVAL-LLP board for the LM70 sensor
  *
  * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
  *
@@ -40,8 +40,12 @@
  * master controller driver.  The hwmon/lm70 driver is a "SPI protocol
  * driver", layered on top of this one and usable without the lm70llp.
  *
+ * Datasheet and Schematic:
  * The LM70 is a temperature sensor chip from National Semiconductor; its
  * datasheet is available at http://www.national.com/pf/LM/LM70.html
+ * The schematic for this particular board (the LM70EVAL-LLP) is
+ * available (on page 4) here:
+ *  http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf
  *
  * Also see Documentation/spi/spi-lm70llp.  The SPI<->parport code here is
  * (heavily) based on spi-butterfly by David Brownell.
@@ -64,7 +68,7 @@
  *
  * Note that parport pin 13 actually gets inverted by the transistor
  * arrangement which lets either the parport or the LM70 drive the
- * SI/SO signal.
+ * SI/SO signal (see the schematic for details).
  */
 
 #define DRVNAME		"spi-lm70llp"
@@ -106,12 +110,16 @@
 static inline void deassertCS(struct spi_lm70llp *pp)
 {
 	u8 data = parport_read_data(pp->port);
+
+	data &= ~0x80;		/* pull D7/SI-out low while de-asserted */
 	parport_write_data(pp->port, data | nCS);
 }
 
 static inline void assertCS(struct spi_lm70llp *pp)
 {
 	u8 data = parport_read_data(pp->port);
+
+	data |= 0x80;		/* pull D7/SI-out high so lm70 drives SO-in */
 	parport_write_data(pp->port, data & ~nCS);
 }
 
@@ -184,22 +192,7 @@
  */
 static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits)
 {
-	static u32 sio=0;
-	static int first_time=1;
-
-	/* First time: perform SPI bitbang and return the LSB of
-	 * the result of the SPI call.
-	 */
-	if (first_time) {
-		sio = bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
-		first_time=0;
-		return (sio & 0x00ff);
-	}
-	/* Return the MSB of the result of the SPI call */
-	else {
-		first_time=1;
-		return (sio >> 8);
-	}
+	return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
 }
 
 static void spi_lm70llp_attach(struct parport *p)
@@ -293,10 +286,9 @@
 		status = -ENODEV;
 		goto out_bitbang_stop;
 	}
-	pp->spidev_lm70->bits_per_word = 16;
+	pp->spidev_lm70->bits_per_word = 8;
 
 	lm70llp = pp;
-
 	return;
 
 out_bitbang_stop:
@@ -326,7 +318,6 @@
 
 	/* power down */
 	parport_write_data(pp->port, 0);
-	msleep(10);
 
 	parport_release(pp->pd);
 	parport_unregister_device(pp->pd);
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 68d6f49..fe7e5f3 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -15,12 +15,15 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/of_spi.h>
+
 #include <linux/spi/spi.h>
 #include <linux/spi/spi_bitbang.h>
 #include <linux/io.h>
 
-#include <syslib/virtex_devices.h>
-
 #define XILINX_SPI_NAME "xilinx_spi"
 
 /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
@@ -144,23 +147,14 @@
 		struct spi_transfer *t)
 {
 	u8 bits_per_word;
-	u32 hz;
-	struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
 
 	bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
-	hz = (t) ? t->speed_hz : spi->max_speed_hz;
 	if (bits_per_word != 8) {
 		dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
 			__func__, bits_per_word);
 		return -EINVAL;
 	}
 
-	if (hz && xspi->speed_hz > hz) {
-		dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
-			__func__, hz);
-		return -EINVAL;
-	}
-
 	return 0;
 }
 
@@ -304,32 +298,38 @@
 	return IRQ_HANDLED;
 }
 
-static int __init xilinx_spi_probe(struct platform_device *dev)
+static int __init xilinx_spi_of_probe(struct of_device *ofdev,
+					const struct of_device_id *match)
 {
-	int ret = 0;
 	struct spi_master *master;
 	struct xilinx_spi *xspi;
-	struct xspi_platform_data *pdata;
-	struct resource *r;
+	struct resource r_irq_struct;
+	struct resource r_mem_struct;
+
+	struct resource *r_irq = &r_irq_struct;
+	struct resource *r_mem = &r_mem_struct;
+	int rc = 0;
+	const u32 *prop;
+	int len;
 
 	/* Get resources(memory, IRQ) associated with the device */
-	master = spi_alloc_master(&dev->dev, sizeof(struct xilinx_spi));
+	master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
 
 	if (master == NULL) {
 		return -ENOMEM;
 	}
 
-	platform_set_drvdata(dev, master);
-	pdata = dev->dev.platform_data;
+	dev_set_drvdata(&ofdev->dev, master);
 
-	if (pdata == NULL) {
-		ret = -ENODEV;
+	rc = of_address_to_resource(ofdev->node, 0, r_mem);
+	if (rc) {
+		dev_warn(&ofdev->dev, "invalid address\n");
 		goto put_master;
 	}
 
-	r = platform_get_resource(dev, IORESOURCE_MEM, 0);
-	if (r == NULL) {
-		ret = -ENODEV;
+	rc = of_irq_to_resource(ofdev->node, 0, r_irq);
+	if (rc == NO_IRQ) {
+		dev_warn(&ofdev->dev, "no IRQ found\n");
 		goto put_master;
 	}
 
@@ -341,47 +341,57 @@
 	xspi->bitbang.master->setup = xilinx_spi_setup;
 	init_completion(&xspi->done);
 
-	if (!request_mem_region(r->start,
-			r->end - r->start + 1, XILINX_SPI_NAME)) {
-		ret = -ENXIO;
+	xspi->irq = r_irq->start;
+
+	if (!request_mem_region(r_mem->start,
+			r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
+		rc = -ENXIO;
+		dev_warn(&ofdev->dev, "memory request failure\n");
 		goto put_master;
 	}
 
-	xspi->regs = ioremap(r->start, r->end - r->start + 1);
+	xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
 	if (xspi->regs == NULL) {
-		ret = -ENOMEM;
+		rc = -ENOMEM;
+		dev_warn(&ofdev->dev, "ioremap failure\n");
 		goto put_master;
 	}
+	xspi->irq = r_irq->start;
 
-	ret = platform_get_irq(dev, 0);
-	if (ret < 0) {
-		ret = -ENXIO;
-		goto unmap_io;
+	/* dynamic bus assignment */
+	master->bus_num = -1;
+
+	/* number of slave select bits is required */
+	prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
+	if (!prop || len < sizeof(*prop)) {
+		dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
+		goto put_master;
 	}
-	xspi->irq = ret;
-
-	master->bus_num = pdata->bus_num;
-	master->num_chipselect = pdata->num_chipselect;
-	xspi->speed_hz = pdata->speed_hz;
+	master->num_chipselect = *prop;
 
 	/* SPI controller initializations */
 	xspi_init_hw(xspi->regs);
 
 	/* Register for SPI Interrupt */
-	ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
-	if (ret != 0)
+	rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
+	if (rc != 0) {
+		dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
 		goto unmap_io;
+	}
 
-	ret = spi_bitbang_start(&xspi->bitbang);
-	if (ret != 0) {
-		dev_err(&dev->dev, "spi_bitbang_start FAILED\n");
+	rc = spi_bitbang_start(&xspi->bitbang);
+	if (rc != 0) {
+		dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n");
 		goto free_irq;
 	}
 
-	dev_info(&dev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
-			r->start, (u32)xspi->regs, xspi->irq);
+	dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
+			(unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq);
 
-	return ret;
+	/* Add any subnodes on the SPI bus */
+	of_register_spi_devices(master, ofdev->node);
+
+	return rc;
 
 free_irq:
 	free_irq(xspi->irq, xspi);
@@ -389,21 +399,21 @@
 	iounmap(xspi->regs);
 put_master:
 	spi_master_put(master);
-	return ret;
+	return rc;
 }
 
-static int __devexit xilinx_spi_remove(struct platform_device *dev)
+static int __devexit xilinx_spi_remove(struct of_device *ofdev)
 {
 	struct xilinx_spi *xspi;
 	struct spi_master *master;
 
-	master = platform_get_drvdata(dev);
+	master = platform_get_drvdata(ofdev);
 	xspi = spi_master_get_devdata(master);
 
 	spi_bitbang_stop(&xspi->bitbang);
 	free_irq(xspi->irq, xspi);
 	iounmap(xspi->regs);
-	platform_set_drvdata(dev, 0);
+	dev_set_drvdata(&ofdev->dev, 0);
 	spi_master_put(xspi->bitbang.master);
 
 	return 0;
@@ -412,27 +422,42 @@
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:" XILINX_SPI_NAME);
 
-static struct platform_driver xilinx_spi_driver = {
-	.probe	= xilinx_spi_probe,
-	.remove	= __devexit_p(xilinx_spi_remove),
+static int __exit xilinx_spi_of_remove(struct of_device *op)
+{
+	return xilinx_spi_remove(op);
+}
+
+static struct of_device_id xilinx_spi_of_match[] = {
+	{ .compatible = "xlnx,xps-spi-2.00.a", },
+	{ .compatible = "xlnx,xps-spi-2.00.b", },
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+
+static struct of_platform_driver xilinx_spi_of_driver = {
+	.owner = THIS_MODULE,
+	.name = "xilinx-xps-spi",
+	.match_table = xilinx_spi_of_match,
+	.probe = xilinx_spi_of_probe,
+	.remove = __exit_p(xilinx_spi_of_remove),
 	.driver = {
-		.name = XILINX_SPI_NAME,
+		.name = "xilinx-xps-spi",
 		.owner = THIS_MODULE,
 	},
 };
 
 static int __init xilinx_spi_init(void)
 {
-	return platform_driver_register(&xilinx_spi_driver);
+	return of_register_platform_driver(&xilinx_spi_of_driver);
 }
 module_init(xilinx_spi_init);
 
 static void __exit xilinx_spi_exit(void)
 {
-	platform_driver_unregister(&xilinx_spi_driver);
+	of_unregister_platform_driver(&xilinx_spi_of_driver);
 }
 module_exit(xilinx_spi_exit);
-
 MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
 MODULE_DESCRIPTION("Xilinx SPI driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 06372b2..36a93b9 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -557,7 +557,7 @@
 		block_size = num_bytes - count;
 		if (block_size < 0) {
 			rt_printk("%s: %s: bug! block_size is negative\n",
-				__FILE__, __FUNCTION__);
+				__FILE__, __func__);
 			break;
 		}
 		if ((int)(async->munge_ptr + block_size -
diff --git a/drivers/staging/epl/Edrv8139.c b/drivers/staging/epl/Edrv8139.c
index 88ab4a4..296354a 100644
--- a/drivers/staging/epl/Edrv8139.c
+++ b/drivers/staging/epl/Edrv8139.c
@@ -391,19 +391,19 @@
 	// register PCI driver
 	iResult = pci_register_driver(&EdrvDriver);
 	if (iResult != 0) {
-		printk("%s pci_register_driver failed with %d\n", __FUNCTION__,
+		printk("%s pci_register_driver failed with %d\n", __func__,
 		       iResult);
 		Ret = kEplNoResource;
 		goto Exit;
 	}
 
 	if (EdrvInstance_l.m_pPciDev == NULL) {
-		printk("%s m_pPciDev=NULL\n", __FUNCTION__);
+		printk("%s m_pPciDev=NULL\n", __func__);
 		Ret = kEplNoResource;
 		goto Exit;
 	}
 	// read MAC address from controller
-	printk("%s local MAC = ", __FUNCTION__);
+	printk("%s local MAC = ", __func__);
 	for (iResult = 0; iResult < 6; iResult++) {
 		pEdrvInitParam_p->m_abMyMacAddr[iResult] =
 		    EDRV_REGB_READ((EDRV_REGDW_IDR0 + iResult));
@@ -434,7 +434,7 @@
 {
 
 	// unregister PCI driver
-	printk("%s calling pci_unregister_driver()\n", __FUNCTION__);
+	printk("%s calling pci_unregister_driver()\n", __func__);
 	pci_unregister_driver(&EdrvDriver);
 
 	return kEplSuccessful;
@@ -621,7 +621,7 @@
 		    EDRV_REGDW_READ((EDRV_REGDW_TSD0 +
 				     (EdrvInstance_l.m_uiCurTxDesc *
 				      sizeof(DWORD))));
-		printk("%s InvOp TSD%u = 0x%08lX", __FUNCTION__,
+		printk("%s InvOp TSD%u = 0x%08lX", __func__,
 		       EdrvInstance_l.m_uiCurTxDesc, dwTemp);
 		printk("  Cmd = 0x%02X\n",
 		       (WORD) EDRV_REGB_READ(EDRV_REGB_COMMAND));
@@ -646,7 +646,7 @@
 	dwTemp =
 	    EDRV_REGDW_READ((EDRV_REGDW_TSAD0 +
 			     (EdrvInstance_l.m_uiCurTxDesc * sizeof(DWORD))));
-//    printk("%s TSAD%u = 0x%08lX", __FUNCTION__, EdrvInstance_l.m_uiCurTxDesc, dwTemp);
+//    printk("%s TSAD%u = 0x%08lX", __func__, EdrvInstance_l.m_uiCurTxDesc, dwTemp);
 
 	// start transmission
 	EDRV_REGDW_WRITE((EDRV_REGDW_TSD0 +
@@ -786,7 +786,7 @@
 
 		if (EdrvInstance_l.m_pbTxBuf == NULL) {
 			printk("%s Tx buffers currently not allocated\n",
-			       __FUNCTION__);
+			       __func__);
 			goto Exit;
 		}
 		// read transmit status
@@ -842,7 +842,7 @@
 
 		if (EdrvInstance_l.m_pbRxBuf == NULL) {
 			printk("%s Rx buffers currently not allocated\n",
-			       __FUNCTION__);
+			       __func__);
 			goto Exit;
 		}
 		// read current offset in receive buffer
@@ -944,7 +944,7 @@
 	DWORD dwTemp;
 
 	if (EdrvInstance_l.m_pPciDev != NULL) {	// Edrv is already connected to a PCI device
-		printk("%s device %s discarded\n", __FUNCTION__,
+		printk("%s device %s discarded\n", __func__,
 		       pci_name(pPciDev));
 		iResult = -ENODEV;
 		goto Exit;
@@ -953,7 +953,7 @@
 	if (pPciDev->revision >= 0x20) {
 		printk
 		    ("%s device %s is an enhanced 8139C+ version, which is not supported\n",
-		     __FUNCTION__, pci_name(pPciDev));
+		     __func__, pci_name(pPciDev));
 		iResult = -ENODEV;
 		goto Exit;
 	}
@@ -961,7 +961,7 @@
 	EdrvInstance_l.m_pPciDev = pPciDev;
 
 	// enable device
-	printk("%s enable device\n", __FUNCTION__);
+	printk("%s enable device\n", __func__);
 	iResult = pci_enable_device(pPciDev);
 	if (iResult != 0) {
 		goto Exit;
@@ -972,13 +972,13 @@
 		goto Exit;
 	}
 
-	printk("%s request regions\n", __FUNCTION__);
+	printk("%s request regions\n", __func__);
 	iResult = pci_request_regions(pPciDev, DRV_NAME);
 	if (iResult != 0) {
 		goto Exit;
 	}
 
-	printk("%s ioremap\n", __FUNCTION__);
+	printk("%s ioremap\n", __func__);
 	EdrvInstance_l.m_pIoAddr =
 	    ioremap(pci_resource_start(pPciDev, 1),
 		    pci_resource_len(pPciDev, 1));
@@ -987,11 +987,11 @@
 		goto Exit;
 	}
 	// enable PCI busmaster
-	printk("%s enable busmaster\n", __FUNCTION__);
+	printk("%s enable busmaster\n", __func__);
 	pci_set_master(pPciDev);
 
 	// reset controller
-	printk("%s reset controller\n", __FUNCTION__);
+	printk("%s reset controller\n", __func__);
 	EDRV_REGB_WRITE(EDRV_REGB_COMMAND, EDRV_REGB_COMMAND_RST);
 
 	// wait until reset has finished
@@ -1008,20 +1008,20 @@
 	dwTemp = EDRV_REGDW_READ(EDRV_REGDW_TCR);
 	if (((dwTemp & EDRV_REGDW_TCR_VER_MASK) != EDRV_REGDW_TCR_VER_C)
 	    && ((dwTemp & EDRV_REGDW_TCR_VER_MASK) != EDRV_REGDW_TCR_VER_D)) {	// unsupported chip
-		printk("%s Unsupported chip! TCR = 0x%08lX\n", __FUNCTION__,
+		printk("%s Unsupported chip! TCR = 0x%08lX\n", __func__,
 		       dwTemp);
 		iResult = -ENODEV;
 		goto Exit;
 	}
 	// disable interrupts
-	printk("%s disable interrupts\n", __FUNCTION__);
+	printk("%s disable interrupts\n", __func__);
 	EDRV_REGW_WRITE(EDRV_REGW_INT_MASK, 0);
 	// acknowledge all pending interrupts
 	EDRV_REGW_WRITE(EDRV_REGW_INT_STATUS,
 			EDRV_REGW_READ(EDRV_REGW_INT_STATUS));
 
 	// install interrupt handler
-	printk("%s install interrupt handler\n", __FUNCTION__);
+	printk("%s install interrupt handler\n", __func__);
 	iResult =
 	    request_irq(pPciDev->irq, TgtEthIsr, IRQF_SHARED,
 			DRV_NAME /*pPciDev->dev.name */ , pPciDev);
@@ -1031,16 +1031,16 @@
 
 /*
     // unlock configuration registers
-    printk("%s unlock configuration registers\n", __FUNCTION__);
+    printk("%s unlock configuration registers\n", __func__);
     EDRV_REGB_WRITE(EDRV_REGB_CMD9346, EDRV_REGB_CMD9346_UNLOCK);
 
     // check if user specified a MAC address
-    printk("%s check specified MAC address\n", __FUNCTION__);
+    printk("%s check specified MAC address\n", __func__);
     for (iResult = 0; iResult < 6; iResult++)
     {
         if (EdrvInstance_l.m_InitParam.m_abMyMacAddr[iResult] != 0)
         {
-            printk("%s set local MAC address\n", __FUNCTION__);
+            printk("%s set local MAC address\n", __func__);
             // write this MAC address to controller
             EDRV_REGDW_WRITE(EDRV_REGDW_IDR0,
                 le32_to_cpu(*((DWORD*)&EdrvInstance_l.m_InitParam.m_abMyMacAddr[0])));
@@ -1059,7 +1059,7 @@
 */
 
 	// allocate buffers
-	printk("%s allocate buffers\n", __FUNCTION__);
+	printk("%s allocate buffers\n", __func__);
 	EdrvInstance_l.m_pbTxBuf =
 	    pci_alloc_consistent(pPciDev, EDRV_TX_BUFFER_SIZE,
 				 &EdrvInstance_l.m_pTxBufDma);
@@ -1076,7 +1076,7 @@
 		goto Exit;
 	}
 	// reset pointers for Tx buffers
-	printk("%s reset pointers fo Tx buffers\n", __FUNCTION__);
+	printk("%s reset pointers fo Tx buffers\n", __func__);
 	EDRV_REGDW_WRITE(EDRV_REGDW_TSAD0, 0);
 	dwTemp = EDRV_REGDW_READ(EDRV_REGDW_TSAD0);
 	EDRV_REGDW_WRITE(EDRV_REGDW_TSAD1, 0);
@@ -1090,11 +1090,11 @@
 	       (WORD) EDRV_REGB_READ(EDRV_REGB_COMMAND));
 
 	// set pointer for receive buffer in controller
-	printk("%s set pointer to Rx buffer\n", __FUNCTION__);
+	printk("%s set pointer to Rx buffer\n", __func__);
 	EDRV_REGDW_WRITE(EDRV_REGDW_RBSTART, EdrvInstance_l.m_pRxBufDma);
 
 	// enable transmitter and receiver
-	printk("%s enable Tx and Rx", __FUNCTION__);
+	printk("%s enable Tx and Rx", __func__);
 	EDRV_REGB_WRITE(EDRV_REGB_COMMAND,
 			(EDRV_REGB_COMMAND_RE | EDRV_REGB_COMMAND_TE));
 	printk("  Command = 0x%02X\n",
@@ -1104,12 +1104,12 @@
 	EDRV_REGDW_WRITE(EDRV_REGDW_MPC, 0);
 
 	// set transmit configuration register
-	printk("%s set Tx conf register", __FUNCTION__);
+	printk("%s set Tx conf register", __func__);
 	EDRV_REGDW_WRITE(EDRV_REGDW_TCR, EDRV_REGDW_TCR_DEF);
 	printk(" = 0x%08X\n", EDRV_REGDW_READ(EDRV_REGDW_TCR));
 
 	// set receive configuration register
-	printk("%s set Rx conf register", __FUNCTION__);
+	printk("%s set Rx conf register", __func__);
 	EDRV_REGDW_WRITE(EDRV_REGDW_RCR, EDRV_REGDW_RCR_DEF);
 	printk(" = 0x%08X\n", EDRV_REGDW_READ(EDRV_REGDW_RCR));
 
@@ -1121,7 +1121,7 @@
 
 /*
     // enable transmitter and receiver
-    printk("%s enable Tx and Rx", __FUNCTION__);
+    printk("%s enable Tx and Rx", __func__);
     EDRV_REGB_WRITE(EDRV_REGB_COMMAND, (EDRV_REGB_COMMAND_RE | EDRV_REGB_COMMAND_TE));
     printk("  Command = 0x%02X\n", (WORD) EDRV_REGB_READ(EDRV_REGB_COMMAND));
 */
@@ -1129,11 +1129,11 @@
 	EDRV_REGW_WRITE(EDRV_REGW_MULINT, 0);
 
 	// enable interrupts
-	printk("%s enable interrupts\n", __FUNCTION__);
+	printk("%s enable interrupts\n", __func__);
 	EDRV_REGW_WRITE(EDRV_REGW_INT_MASK, EDRV_REGW_INT_MASK_DEF);
 
       Exit:
-	printk("%s finished with %d\n", __FUNCTION__, iResult);
+	printk("%s finished with %d\n", __func__, iResult);
 	return iResult;
 }
 
diff --git a/drivers/staging/epl/EplSdoAsySequ.c b/drivers/staging/epl/EplSdoAsySequ.c
index 991c6be..6b6a997 100644
--- a/drivers/staging/epl/EplSdoAsySequ.c
+++ b/drivers/staging/epl/EplSdoAsySequ.c
@@ -876,7 +876,7 @@
 				{
 /*
                     PRINTF3("%s scon=%u rcon=%u\n",
-                            __FUNCTION__,
+                            __func__,
                             pRecFrame_p->m_le_bSendSeqNumCon,
                             pRecFrame_p->m_le_bRecSeqNumCon);
 */
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index 61d7c5d..6136e3f 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -239,7 +239,7 @@
 			goto exit;
 		} else {
 			dbg_info(&dev->intf->dev, "%s: nonzero status received: %d\n",
-				 __FUNCTION__, urb->status);
+				 __func__, urb->status);
 			goto resubmit; /* maybe we can recover */
 		}
 	}
@@ -261,7 +261,7 @@
 		if(dev->offline > 0 && dev->interrupt_in_buffer[1] != 0xff) { dev->offline = 0; }
 		if(dev->offline == 0 && dev->interrupt_in_buffer[1] == 0xff) { dev->offline = 1; }
 #endif
-		dbg_info(&dev->intf->dev, "%s: head, tail are %x, %x\n", __FUNCTION__,dev->ring_head,dev->ring_tail);
+		dbg_info(&dev->intf->dev, "%s: head, tail are %x, %x\n", __func__,dev->ring_head,dev->ring_tail);
 		next_ring_head = (dev->ring_head+1) % ring_buffer_size;
 
 		if (next_ring_head != dev->ring_tail) {
@@ -305,7 +305,7 @@
 			     urb->status == -ESHUTDOWN))
 		dbg_info(&dev->intf->dev,
 			 "%s - nonzero write interrupt status received: %d\n",
-			 __FUNCTION__, urb->status);
+			 __func__, urb->status);
 	atomic_dec(&dev->writes_pending);
 	dev->interrupt_out_busy = 0;
 	wake_up_interruptible(&dev->write_wait);
@@ -330,7 +330,7 @@
 
 	if (!interface) {
 		err("%s - error, can't find device for minor %d\n",
-		     __FUNCTION__, subminor);
+		     __func__, subminor);
 		retval = -ENODEV;
 		goto unlock_disconnect_exit;
 	}
@@ -514,7 +514,7 @@
 						 }
 						 dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;
 						 c+=INPUT_CMD_SIZE;
-						 dbg_info(&dev->intf->dev, "%s: head, tail are %x, %x\n", __FUNCTION__,dev->ring_head,dev->ring_tail);
+						 dbg_info(&dev->intf->dev, "%s: head, tail are %x, %x\n", __func__,dev->ring_head,dev->ring_tail);
 	   }
 	   retval = c;
 
@@ -573,7 +573,7 @@
 	if (bytes_to_write < count)
 		dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n",count-bytes_to_write);
 
-	dbg_info(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n", __FUNCTION__, count, bytes_to_write);
+	dbg_info(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n", __func__, count, bytes_to_write);
 
 	if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
 		retval = -EFAULT;
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 275faa7..79abb6b 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -335,7 +335,7 @@
 			goto exit;
 		} else {
 			dbg_info(&dev->intf->dev, "%s: nonzero status received: %d\n",
-				 __FUNCTION__, urb->status);
+				 __func__, urb->status);
 			goto resubmit; /* maybe we can recover */
 		}
 	}
@@ -345,7 +345,7 @@
 			 "Urb length was %d bytes!! Do something intelligent \n", urb->actual_length);
 	} else {
 		dbg_info(&dev->intf->dev, "%s: received: %02x%02x%02x%02x%02x%02x%02x%02x\n",
-			 __FUNCTION__, dev->interrupt_in_buffer[0],dev->interrupt_in_buffer[1],dev->interrupt_in_buffer[2],dev->interrupt_in_buffer[3],dev->interrupt_in_buffer[4],dev->interrupt_in_buffer[5],dev->interrupt_in_buffer[6],dev->interrupt_in_buffer[7]);
+			 __func__, dev->interrupt_in_buffer[0],dev->interrupt_in_buffer[1],dev->interrupt_in_buffer[2],dev->interrupt_in_buffer[3],dev->interrupt_in_buffer[4],dev->interrupt_in_buffer[5],dev->interrupt_in_buffer[6],dev->interrupt_in_buffer[7]);
 #if SUPPRESS_EXTRA_OFFLINE_EVENTS
 		if(dev->offline == 2 && dev->interrupt_in_buffer[1] == 0xff) { goto resubmit; }
 		if(dev->offline == 1 && dev->interrupt_in_buffer[1] == 0xff) { dev->offline = 2; goto resubmit; }
@@ -355,7 +355,7 @@
 		if(dev->offline == 0 && dev->interrupt_in_buffer[1] == 0xff) { dev->offline = 1; }
 
 #endif
-		dbg_info(&dev->intf->dev, "%s: head, tail are %x, %x\n", __FUNCTION__,dev->ring_head,dev->ring_tail);
+		dbg_info(&dev->intf->dev, "%s: head, tail are %x, %x\n", __func__,dev->ring_head,dev->ring_tail);
 
 		next_ring_head = (dev->ring_head+1) % ring_buffer_size;
 
@@ -399,7 +399,7 @@
 			     urb->status == -ESHUTDOWN))
 		dbg_info(&dev->intf->dev,
 			 "%s - nonzero write interrupt status received: %d\n",
-			 __FUNCTION__, urb->status);
+			 __func__, urb->status);
 
 	dev->interrupt_out_busy = 0;
 	wake_up_interruptible(&dev->write_wait);
@@ -424,7 +424,7 @@
 
 	if (!interface) {
 		err("%s - error, can't find device for minor %d\n",
-		     __FUNCTION__, subminor);
+		     __func__, subminor);
 		retval = -ENODEV;
 		goto unlock_disconnect_exit;
 	}
@@ -613,7 +613,7 @@
 	}
 
 	dbg_info(&dev->intf->dev, "%s: copying to userspace: %02x%02x%02x%02x%02x%02x%02x%02x\n",
-			 __FUNCTION__, (*dev->ring_buffer)[dev->ring_tail].cmd[0],(*dev->ring_buffer)[dev->ring_tail].cmd[1],(*dev->ring_buffer)[dev->ring_tail].cmd[2],(*dev->ring_buffer)[dev->ring_tail].cmd[3],(*dev->ring_buffer)[dev->ring_tail].cmd[4],(*dev->ring_buffer)[dev->ring_tail].cmd[5],(*dev->ring_buffer)[dev->ring_tail].cmd[6],(*dev->ring_buffer)[dev->ring_tail].cmd[7]);
+			 __func__, (*dev->ring_buffer)[dev->ring_tail].cmd[0],(*dev->ring_buffer)[dev->ring_tail].cmd[1],(*dev->ring_buffer)[dev->ring_tail].cmd[2],(*dev->ring_buffer)[dev->ring_tail].cmd[3],(*dev->ring_buffer)[dev->ring_tail].cmd[4],(*dev->ring_buffer)[dev->ring_tail].cmd[5],(*dev->ring_buffer)[dev->ring_tail].cmd[6],(*dev->ring_buffer)[dev->ring_tail].cmd[7]);
 
 #if BUFFERED_READS
 	   c = 0;
@@ -632,7 +632,7 @@
 			// FIXME the math is wrong for going in reverse, actually, as the midi spec doesn't allow signed chars
 
 	dbg_info(&dev->intf->dev, "%s: trying to compress: %02x%02x%02x%02x%02x %02x %02x %02x\n",
-			 __FUNCTION__, (*dev->ring_buffer)[dev->ring_tail].cmd[0],(*dev->ring_buffer)[dev->ring_tail].cmd[1],(*dev->ring_buffer)[dev->ring_tail].cmd[2],(*dev->ring_buffer)[dev->ring_tail].cmd[3],(*dev->ring_buffer)[dev->ring_tail].cmd[4],(*dev->ring_buffer)[dev->ring_tail].cmd[5],(*dev->ring_buffer)[dev->ring_tail].cmd[6],(*dev->ring_buffer)[dev->ring_tail].cmd[7]);
+			 __func__, (*dev->ring_buffer)[dev->ring_tail].cmd[0],(*dev->ring_buffer)[dev->ring_tail].cmd[1],(*dev->ring_buffer)[dev->ring_tail].cmd[2],(*dev->ring_buffer)[dev->ring_tail].cmd[3],(*dev->ring_buffer)[dev->ring_tail].cmd[4],(*dev->ring_buffer)[dev->ring_tail].cmd[5],(*dev->ring_buffer)[dev->ring_tail].cmd[6],(*dev->ring_buffer)[dev->ring_tail].cmd[7]);
 
 
 			if(((*dev->ring_buffer)[dev->ring_tail].cmd[6] != 0 &&
@@ -645,7 +645,7 @@
 				((*dev->ring_buffer)[dev->ring_tail].cmd[5] == (*dev->ring_buffer)[next_tail].cmd[5]))
  {
 	dbg_info(&dev->intf->dev, "%s: should compress: %02x%02x%02x%02x%02x%02x%02x%02x\n",
-			 __FUNCTION__, (*dev->ring_buffer)[dev->ring_tail].cmd[0],(*dev->ring_buffer)[dev->ring_tail].cmd[1],(*dev->ring_buffer)[dev->ring_tail].cmd[2],(*dev->ring_buffer)[dev->ring_tail].cmd[3],(*dev->ring_buffer)[dev->ring_tail].cmd[4],(*dev->ring_buffer)[dev->ring_tail].cmd[5],(*dev->ring_buffer)[dev->ring_tail].cmd[6],(*dev->ring_buffer)[dev->ring_tail].cmd[7]);
+			 __func__, (*dev->ring_buffer)[dev->ring_tail].cmd[0],(*dev->ring_buffer)[dev->ring_tail].cmd[1],(*dev->ring_buffer)[dev->ring_tail].cmd[2],(*dev->ring_buffer)[dev->ring_tail].cmd[3],(*dev->ring_buffer)[dev->ring_tail].cmd[4],(*dev->ring_buffer)[dev->ring_tail].cmd[5],(*dev->ring_buffer)[dev->ring_tail].cmd[6],(*dev->ring_buffer)[dev->ring_tail].cmd[7]);
 
 				newwheel += oldwheel;
 				if(oldwheel > 0 && !(newwheel > 0)) {
@@ -673,7 +673,7 @@
 
 		dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;
 		c+=8;
-		dbg_info(&dev->intf->dev, "%s: head, tail are %x, %x\n", __FUNCTION__,dev->ring_head,dev->ring_tail);
+		dbg_info(&dev->intf->dev, "%s: head, tail are %x, %x\n", __func__,dev->ring_head,dev->ring_tail);
 	   }
 	   retval = c;
 
@@ -684,7 +684,7 @@
 	}
 
 	dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;
-	dbg_info(&dev->intf->dev, "%s: head, tail are %x, %x\n", __FUNCTION__,dev->ring_head,dev->ring_tail);
+	dbg_info(&dev->intf->dev, "%s: head, tail are %x, %x\n", __func__,dev->ring_head,dev->ring_tail);
 
 	retval = 8;
 #endif /* BUFFERED_READS */
@@ -743,7 +743,7 @@
 	if (bytes_to_write < count)
 		dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n",count-bytes_to_write);
 
-	dbg_info(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n", __FUNCTION__, count, bytes_to_write);
+	dbg_info(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n", __func__, count, bytes_to_write);
 
 	if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
 		retval = -EFAULT;
diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
index 4f7237a..868edb6 100644
--- a/drivers/staging/go7007/go7007-v4l2.c
+++ b/drivers/staging/go7007/go7007-v4l2.c
@@ -1712,8 +1712,7 @@
 	page = alloc_page(GFP_USER | __GFP_DMA32);
 	if (!page)
 		return VM_FAULT_OOM;
-	clear_user_page(page_address(page), (unsigned long)vmf->virtual_address,
-			page);
+	clear_user_highpage(page, (unsigned long)vmf->virtual_address);
 	vmf->page = page;
 	return 0;
 }
diff --git a/drivers/staging/meilhaus/me0600_ext_irq.c b/drivers/staging/meilhaus/me0600_ext_irq.c
index a449ab2..eba18ad 100644
--- a/drivers/staging/meilhaus/me0600_ext_irq.c
+++ b/drivers/staging/meilhaus/me0600_ext_irq.c
@@ -360,7 +360,7 @@
 	if (instance->lintno > 1) {
 		PERROR_CRITICAL
 		    ("%s():Wrong subdevice index=%d plx:irq_status_reg=0x%04X.\n",
-		     __FUNCTION__, instance->lintno, inl(instance->intcsr));
+		     __func__, instance->lintno, inl(instance->intcsr));
 		return IRQ_NONE;
 	}
 
@@ -384,7 +384,7 @@
 	} else {
 		PINFO
 		    ("%ld Shared interrupt. %s(): idx=0 plx:irq_status_reg=0x%04X\n",
-		     jiffies, __FUNCTION__, status);
+		     jiffies, __func__, status);
 		ret = IRQ_NONE;
 	}
 	spin_unlock(instance->intcsr_lock);
diff --git a/drivers/staging/meilhaus/me1400_ext_irq.c b/drivers/staging/meilhaus/me1400_ext_irq.c
index b8c2696..b4df7cc 100644
--- a/drivers/staging/meilhaus/me1400_ext_irq.c
+++ b/drivers/staging/meilhaus/me1400_ext_irq.c
@@ -349,7 +349,7 @@
 	    (PLX_LOCAL_INT1_STATE | PLX_LOCAL_INT1_EN | PLX_PCI_INT_EN)) {
 		spin_unlock(&instance->subdevice_lock);
 		PINFO("%ld Shared interrupt. %s(): irq_status_reg=0x%04X\n",
-		      jiffies, __FUNCTION__, status);
+		      jiffies, __func__, status);
 		return IRQ_NONE;
 	}
 
diff --git a/drivers/staging/meilhaus/me1600_ao.c b/drivers/staging/meilhaus/me1600_ao.c
index 6f26665..d127c6b 100644
--- a/drivers/staging/meilhaus/me1600_ao.c
+++ b/drivers/staging/meilhaus/me1600_ao.c
@@ -977,7 +977,7 @@
 	    container_of((void *)work, me1600_ao_subdevice_t, ao_control_task);
 #endif
 
-	PINFO("<%s: %ld> executed. idx=%d\n", __FUNCTION__, jiffies,
+	PINFO("<%s: %ld> executed. idx=%d\n", __func__, jiffies,
 	      instance->ao_idx);
 
 	if (!((instance->ao_regs_shadows)->trigger & instance->ao_idx)) {	// Output was triggerd.
@@ -1027,7 +1027,7 @@
 		queue_delayed_work(instance->me1600_workqueue,
 				   &instance->ao_control_task, 1);
 	} else {
-		PINFO("<%s> Ending control task.\n", __FUNCTION__);
+		PINFO("<%s> Ending control task.\n", __func__);
 	}
 
 }
diff --git a/drivers/staging/meilhaus/me4600_ai.c b/drivers/staging/meilhaus/me4600_ai.c
index 1a0de5d..0a8c9d7 100644
--- a/drivers/staging/meilhaus/me4600_ai.c
+++ b/drivers/staging/meilhaus/me4600_ai.c
@@ -2629,11 +2629,11 @@
 		if ((irq_status & (ME4600_IRQ_STATUS_BIT_AI_HF | ME4600_IRQ_STATUS_BIT_SC | ME4600_IRQ_STATUS_BIT_LE)) == ME4600_IRQ_STATUS_BIT_LE) {	//This is security check case. LE is unused. This should never ever happend.
 			PINFO
 			    ("%ld Shared interrupt. %s(): irq_status_reg=LE_IRQ\n",
-			     jiffies, __FUNCTION__);
+			     jiffies, __func__);
 		} else {
 			PINFO
 			    ("%ld Shared interrupt. %s(): irq_status_reg=0x%04X\n",
-			     jiffies, __FUNCTION__, irq_status);
+			     jiffies, __func__, irq_status);
 		}
 #endif
 		return IRQ_NONE;
@@ -3329,7 +3329,7 @@
 	instance =
 	    container_of((void *)work, me4600_ai_subdevice_t, ai_control_task);
 #endif
-	PINFO("<%s: %ld> executed.\n", __FUNCTION__, jiffies);
+	PINFO("<%s: %ld> executed.\n", __func__, jiffies);
 
 	status = inl(instance->status_reg);
 	PDEBUG_REG("status_reg inl(0x%lX+0x%lX)=0x%x\n", instance->reg_base,
@@ -3428,7 +3428,7 @@
 		queue_delayed_work(instance->me4600_workqueue,
 				   &instance->ai_control_task, 1);
 	} else {
-		PINFO("<%s> Ending control task.\n", __FUNCTION__);
+		PINFO("<%s> Ending control task.\n", __func__);
 	}
 
 }
diff --git a/drivers/staging/meilhaus/me4600_ao.c b/drivers/staging/meilhaus/me4600_ao.c
index 2c92e65..e2bec82 100644
--- a/drivers/staging/meilhaus/me4600_ao.c
+++ b/drivers/staging/meilhaus/me4600_ao.c
@@ -2294,7 +2294,7 @@
 	irq_status = inl(instance->irq_status_reg);
 	if (!(irq_status & (ME4600_IRQ_STATUS_BIT_AO_HF << instance->ao_idx))) {
 		PINFO("%ld Shared interrupt. %s(): ID=%d: status_reg=0x%04X\n",
-		      jiffies, __FUNCTION__, instance->ao_idx, irq_status);
+		      jiffies, __func__, instance->ao_idx, irq_status);
 		return IRQ_NONE;
 	}
 
@@ -3009,7 +3009,7 @@
 	instance =
 	    container_of((void *)work, me4600_ao_subdevice_t, ao_control_task);
 #endif
-	PINFO("<%s: %ld> executed. idx=%d\n", __FUNCTION__, jiffies,
+	PINFO("<%s: %ld> executed. idx=%d\n", __func__, jiffies,
 	      instance->ao_idx);
 
 	status = inl(instance->status_reg);
@@ -3316,7 +3316,7 @@
 		queue_delayed_work(instance->me4600_workqueue,
 				   &instance->ao_control_task, 1);
 	} else {
-		PINFO("<%s> Ending control task.\n", __FUNCTION__);
+		PINFO("<%s> Ending control task.\n", __func__);
 	}
 
 }
diff --git a/drivers/staging/meilhaus/me4600_ext_irq.c b/drivers/staging/meilhaus/me4600_ext_irq.c
index 8a10dce..adc1e1b 100644
--- a/drivers/staging/meilhaus/me4600_ext_irq.c
+++ b/drivers/staging/meilhaus/me4600_ext_irq.c
@@ -356,7 +356,7 @@
 	irq_status = inl(instance->irq_status_reg);
 	if (!(irq_status & ME4600_IRQ_STATUS_BIT_EX)) {
 		PINFO("%ld Shared interrupt. %s(): irq_status_reg=0x%04X\n",
-		      jiffies, __FUNCTION__, irq_status);
+		      jiffies, __func__, irq_status);
 		return IRQ_NONE;
 	}
 
diff --git a/drivers/staging/meilhaus/me6000_ao.c b/drivers/staging/meilhaus/me6000_ao.c
index 3f5ff6d..94f0123 100644
--- a/drivers/staging/meilhaus/me6000_ao.c
+++ b/drivers/staging/meilhaus/me6000_ao.c
@@ -863,7 +863,7 @@
 
 /// @note When flag 'ME_IO_SINGLE_TYPE_TRIG_SYNCHRONOUS' is set than output is triggered. ALWAYS!
 
-	PINFO("<%s> start mode= 0x%08x %s\n", __FUNCTION__, mode,
+	PINFO("<%s> start mode= 0x%08x %s\n", __func__, mode,
 	      (flags & ME_IO_SINGLE_TYPE_TRIG_SYNCHRONOUS) ? "SYNCHRONOUS" :
 	      "");
 	if (instance->fifo & ME6000_AO_HAS_FIFO) {	// FIFO - Continous mode
@@ -1663,7 +1663,7 @@
 
 	status = inl(instance->status_reg);
 	//Start state machine and interrupts
-	PINFO("<%s:%d> Start state machine.\n", __FUNCTION__, __LINE__);
+	PINFO("<%s:%d> Start state machine.\n", __func__, __LINE__);
 	ctrl &= ~(ME6000_AO_CTRL_BIT_STOP | ME6000_AO_CTRL_BIT_IMMEDIATE_STOP);
 	if (instance->start_mode == ME6000_AO_EXT_TRIG) {
 		PDEBUG("DIGITAL TRIGGER\n");
@@ -1671,7 +1671,7 @@
 	}
 	if (!(status & ME6000_AO_STATUS_BIT_HF)) {	//More than half!
 		if ((ctrl & ME6000_AO_CTRL_MODE_MASK) == ME6000_AO_MODE_CONTINUOUS) {	//Enable IRQ only when hardware_continous is set and FIFO is more than half
-			PINFO("<%s:%d> Start interrupts.\n", __FUNCTION__,
+			PINFO("<%s:%d> Start interrupts.\n", __func__,
 			      __LINE__);
 			ctrl |= ME6000_AO_CTRL_BIT_ENABLE_IRQ;
 		}
@@ -1682,7 +1682,7 @@
 	spin_unlock_irqrestore(&instance->subdevice_lock, cpu_flags);
 
 	//Trigger output
-	PINFO("<%s> start mode= 0x%x %s\n", __FUNCTION__, instance->start_mode,
+	PINFO("<%s> start mode= 0x%x %s\n", __func__, instance->start_mode,
 	      (flags & ME_IO_SINGLE_TYPE_TRIG_SYNCHRONOUS) ? "SYNCHRONOUS" :
 	      "");
 	if (flags & ME_IO_SINGLE_TYPE_TRIG_SYNCHRONOUS) {	//Trigger outputs
@@ -1777,7 +1777,7 @@
 		status = inl(instance->status_reg);
 		if (!(status & ME6000_AO_STATUS_BIT_HF)) {	//More than half!
 			spin_lock_irqsave(&instance->subdevice_lock, cpu_flags);
-			PINFO("<%s:%d> Start interrupts.\n", __FUNCTION__,
+			PINFO("<%s:%d> Start interrupts.\n", __func__,
 			      __LINE__);
 			ctrl = inl(instance->ctrl_reg);
 			ctrl |= ME6000_AO_CTRL_BIT_ENABLE_IRQ;
@@ -1819,7 +1819,7 @@
 				spin_lock_irqsave(&instance->subdevice_lock,
 						  cpu_flags);
 				PINFO("<%s:%d> Start interrupts.\n",
-				      __FUNCTION__, __LINE__);
+				      __func__, __LINE__);
 				ctrl = inl(instance->ctrl_reg);
 				ctrl |= ME6000_AO_CTRL_BIT_ENABLE_IRQ;
 				outl(ctrl, instance->ctrl_reg);
@@ -2346,7 +2346,7 @@
 	irq_status = inl(instance->irq_status_reg);
 	if (!(irq_status & (ME6000_IRQ_STATUS_BIT_AO_HF << instance->ao_idx))) {
 		PINFO("%ld Shared interrupt. %s(): ID=%d: status_reg=0x%04X\n",
-		      jiffies, __FUNCTION__, instance->ao_idx, irq_status);
+		      jiffies, __func__, instance->ao_idx, irq_status);
 		return IRQ_NONE;
 	}
 
@@ -2861,7 +2861,7 @@
 			}
 		}
 
-		PINFO("<%s> Wait for stop: %d\n", __FUNCTION__, i);
+		PINFO("<%s> Wait for stop: %d\n", __func__, i);
 
 		//Still working!
 		set_current_state(TASK_INTERRUPTIBLE);
@@ -3132,7 +3132,7 @@
 	instance =
 	    container_of((void *)work, me6000_ao_subdevice_t, ao_control_task);
 #endif
-	PINFO("<%s: %ld> executed. idx=%d\n", __FUNCTION__, jiffies,
+	PINFO("<%s: %ld> executed. idx=%d\n", __func__, jiffies,
 	      instance->ao_idx);
 
 	status = inl(instance->status_reg);
@@ -3550,7 +3550,7 @@
 		queue_delayed_work(instance->me6000_workqueue,
 				   &instance->ao_control_task, 1);
 	} else {
-		PINFO("<%s> Ending control task.\n", __FUNCTION__);
+		PINFO("<%s> Ending control task.\n", __func__);
 	}
 
 }
diff --git a/drivers/staging/meilhaus/me8100_di.c b/drivers/staging/meilhaus/me8100_di.c
index 0f14637..971727c 100644
--- a/drivers/staging/meilhaus/me8100_di.c
+++ b/drivers/staging/meilhaus/me8100_di.c
@@ -536,7 +536,7 @@
 		     PLX_INTCSR_LOCAL_INT1_EN)) {
 			PINFO
 			    ("%ld Shared interrupt. %s(): idx=0 plx:irq_status_reg=0x%04X\n",
-			     jiffies, __FUNCTION__, icsr);
+			     jiffies, __func__, icsr);
 			return IRQ_NONE;
 		}
 	} else if (instance->di_idx == 1) {
@@ -547,11 +547,11 @@
 		     PLX_INTCSR_LOCAL_INT2_EN)) {
 			PINFO
 			    ("%ld Shared interrupt. %s(): idx=1 plx:irq_status_reg=0x%04X\n",
-			     jiffies, __FUNCTION__, icsr);
+			     jiffies, __func__, icsr);
 			return IRQ_NONE;
 		}
 	} else {
-		PERROR("%s():Wrong interrupt idx=%d csr=0x%X.\n", __FUNCTION__,
+		PERROR("%s():Wrong interrupt idx=%d csr=0x%X.\n", __func__,
 		       instance->di_idx, icsr);
 		return IRQ_NONE;
 	}
diff --git a/drivers/staging/meilhaus/me8200_di.c b/drivers/staging/meilhaus/me8200_di.c
index 0bb4567..27525bc 100644
--- a/drivers/staging/meilhaus/me8200_di.c
+++ b/drivers/staging/meilhaus/me8200_di.c
@@ -570,7 +570,7 @@
 	if (!irq_status) {
 		PINFO
 		    ("%ld Shared interrupt. %s(): idx=%d irq_status_reg=0x%04X\n",
-		     jiffies, __FUNCTION__, instance->di_idx, irq_status);
+		     jiffies, __func__, instance->di_idx, irq_status);
 		return IRQ_NONE;
 	}
 
@@ -658,7 +658,7 @@
 	if (!irq_status) {
 		PINFO
 		    ("%ld Shared interrupt. %s(): idx=%d irq_status_reg=0x%04X\n",
-		     jiffies, __FUNCTION__, instance->di_idx, irq_status);
+		     jiffies, __func__, instance->di_idx, irq_status);
 		return IRQ_NONE;
 	}
 
diff --git a/drivers/staging/meilhaus/me8200_do.c b/drivers/staging/meilhaus/me8200_do.c
index 5f4ba5d..d2bebd1 100644
--- a/drivers/staging/meilhaus/me8200_do.c
+++ b/drivers/staging/meilhaus/me8200_do.c
@@ -475,7 +475,7 @@
 	     (ME8200_DO_IRQ_STATUS_BIT_ACTIVE << instance->do_idx))) {
 		PINFO
 		    ("%ld Shared interrupt. %s(): idx=%d irq_status_reg=0x%04X\n",
-		     jiffies, __FUNCTION__, instance->do_idx, irq_status);
+		     jiffies, __func__, instance->do_idx, irq_status);
 		return IRQ_NONE;
 	}
 
diff --git a/drivers/staging/meilhaus/medebug.h b/drivers/staging/meilhaus/medebug.h
index 382d00f..dcfb97c 100644
--- a/drivers/staging/meilhaus/medebug.h
+++ b/drivers/staging/meilhaus/medebug.h
@@ -66,21 +66,21 @@
 
 #ifdef MEDEBUG_DEBUG
 # define PDEBUG(fmt, args...) \
-	printk(KERN_DEBUG"ME_DRV D: <%s> " fmt, __FUNCTION__, ##args)
+	printk(KERN_DEBUG"ME_DRV D: <%s> " fmt, __func__, ##args)
 #else
 # define PDEBUG(fmt, args...)
 #endif
 
 #ifdef MEDEBUG_DEBUG_LOCKS
 # define PDEBUG_LOCKS(fmt, args...) \
-	printk(KERN_DEBUG"ME_DRV L: <%s> " fmt, __FUNCTION__, ##args)
+	printk(KERN_DEBUG"ME_DRV L: <%s> " fmt, __func__, ##args)
 #else
 # define PDEBUG_LOCKS(fmt, args...)
 #endif
 
 #ifdef MEDEBUG_DEBUG_REG
 # define PDEBUG_REG(fmt, args...) \
-	printk(KERN_DEBUG"ME_DRV R: <%s:%d> REG:" fmt, __FUNCTION__, __LINE__, ##args)
+	printk(KERN_DEBUG"ME_DRV R: <%s:%d> REG:" fmt, __func__, __LINE__, ##args)
 #else
 # define PDEBUG_REG(fmt, args...)
 #endif
@@ -108,7 +108,7 @@
 
 //This debug is only to detect logical errors!
 # define PSECURITY(fmt, args...) \
-	printk(KERN_CRIT"ME_DRV SECURITY: <%s:%s:%i> " fmt, __FILE__, __FUNCTION__, __LINE__, ##args)
+	printk(KERN_CRIT"ME_DRV SECURITY: <%s:%s:%i> " fmt, __FILE__, __func__, __LINE__, ##args)
 //This debug is to keep track in customers' system
 # define PLOG(fmt, args...) \
 	printk(KERN_INFO"ME_DRV: " fmt, ##args)
@@ -116,7 +116,7 @@
 //This debug is to check new parts during development
 #ifdef MEDEBUG_DEVELOP
 # define PDEVELOP(fmt, args...) \
-	printk(KERN_CRIT"ME_DRV: <%s:%s:%i> " fmt, __FILE__, __FUNCTION__, __LINE__, ##args)
+	printk(KERN_CRIT"ME_DRV: <%s:%s:%i> " fmt, __FILE__, __func__, __LINE__, ##args)
 #else
 # define PDEVELOP(fmt, args...)
 #endif
diff --git a/drivers/staging/otus/80211core/cagg.c b/drivers/staging/otus/80211core/cagg.c
index fcfd01a..4942190 100644
--- a/drivers/staging/otus/80211core/cagg.c
+++ b/drivers/staging/otus/80211core/cagg.c
@@ -1933,7 +1933,7 @@
      */
 
     /* zm_msg2_agg(ZM_LV_0, "queue seq = ", seq_no);
-     * DbgPrint("%s:%s%lxh %s%lxh\n", __FUNCTION__, "queue seq=", seq_no,
+     * DbgPrint("%s:%s%lxh %s%lxh\n", __func__, "queue seq=", seq_no,
      *   "; seq_start=", tid_rx->seq_start);
      */
 
diff --git a/drivers/staging/otus/80211core/pub_zfw.h b/drivers/staging/otus/80211core/pub_zfw.h
index 01a2272..2474bb7 100644
--- a/drivers/staging/otus/80211core/pub_zfw.h
+++ b/drivers/staging/otus/80211core/pub_zfw.h
@@ -23,7 +23,7 @@
 /* Buffer management */
 #ifdef ZM_ENABLE_BUFFER_DEBUG
 extern zbuf_t* zfwBufAllocateWithContext(zdev_t* dev, u16_t len, u8_t *functionName, ULONG line);
-#define zfwBufAllocate(dev, len)  zfwBufAllocateWithContext(dev, len, (u8_t *)__FUNCTION__, __LINE__)
+#define zfwBufAllocate(dev, len)  zfwBufAllocateWithContext(dev, len, (u8_t *)__func__, __LINE__)
 #else
 extern zbuf_t* zfwBufAllocate(zdev_t* dev, u16_t len);
 #endif
diff --git a/drivers/staging/otus/80211core/struct.h b/drivers/staging/otus/80211core/struct.h
index 43631c6..17b5ce3 100644
--- a/drivers/staging/otus/80211core/struct.h
+++ b/drivers/staging/otus/80211core/struct.h
@@ -137,7 +137,7 @@
 
 #ifdef ZM_ENABLE_BUFFER_TRACE
 extern void zfwBufTrace(zdev_t* dev, zbuf_t *buf, u8_t *functionName);
-#define ZM_BUFFER_TRACE(dev, buf)       zfwBufTrace(dev, buf, __FUNCTION__);
+#define ZM_BUFFER_TRACE(dev, buf)       zfwBufTrace(dev, buf, __func__);
 #else
 #define ZM_BUFFER_TRACE(dev, buf)
 #endif
diff --git a/drivers/staging/otus/oal_marc.h b/drivers/staging/otus/oal_marc.h
index 438e4bc..2061116 100644
--- a/drivers/staging/otus/oal_marc.h
+++ b/drivers/staging/otus/oal_marc.h
@@ -106,14 +106,14 @@
 
 /***** Debug message *****/
 #if 0
-#define zm_debug_msg0(msg) printk("%s:%s\n", __FUNCTION__, msg);
-#define zm_debug_msg1(msg, val) printk("%s:%s%ld\n", __FUNCTION__, \
+#define zm_debug_msg0(msg) printk("%s:%s\n", __func__, msg);
+#define zm_debug_msg1(msg, val) printk("%s:%s%ld\n", __func__, \
         msg, (u32_t)val);
-#define zm_debug_msg2(msg, val) printk("%s:%s%lxh\n", __FUNCTION__, \
+#define zm_debug_msg2(msg, val) printk("%s:%s%lxh\n", __func__, \
         msg, (u32_t)val);
-#define zm_debug_msg_s(msg, val) printk("%s:%s%s\n", __FUNCTION__, \
+#define zm_debug_msg_s(msg, val) printk("%s:%s%s\n", __func__, \
         msg, val);
-#define zm_debug_msg_p(msg, val1, val2) printk("%s:%s%01ld.%02ld\n", __FUNCTION__, \
+#define zm_debug_msg_p(msg, val1, val2) printk("%s:%s%01ld.%02ld\n", __func__, \
         msg, (val1/val2), (((val1*100)/val2)%100));
 #define zm_dbg(S) printk S
 #else
@@ -127,7 +127,7 @@
 
 #define zm_assert(expr) if(!(expr)) {                           \
         printk( "Atheors Assertion failed! %s,%s,%s,line=%d\n",   \
-        #expr,__FILE__,__FUNCTION__,__LINE__);                  \
+        #expr,__FILE__,__func__,__LINE__);                  \
         }
 
 #define DbgPrint printk
diff --git a/drivers/staging/rt2860/2860_main_dev.c b/drivers/staging/rt2860/2860_main_dev.c
index 1e38f2d..08ca81f 100644
--- a/drivers/staging/rt2860/2860_main_dev.c
+++ b/drivers/staging/rt2860/2860_main_dev.c
@@ -1202,7 +1202,7 @@
 	UCHAR			bcn_idx = 0;
 
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s() : No valid Interface be found.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s() : No valid Interface be found.\n", __func__));
 		return;
 	}
 
diff --git a/drivers/staging/rt2860/common/ba_action.c b/drivers/staging/rt2860/common/ba_action.c
index 8247aeb..591d1e2 100644
--- a/drivers/staging/rt2860/common/ba_action.c
+++ b/drivers/staging/rt2860/common/ba_action.c
@@ -599,7 +599,7 @@
 
 		pBAEntry->ORIBATimer.TimerValue = 0;	//pFrame->TimeOutValue;
 
-		DBGPRINT(RT_DEBUG_TRACE,("%s : TXBAbitmap = %x, BAWinSize = %d, TimeOut = %ld\n", __FUNCTION__, pEntry->TXBAbitmap,
+		DBGPRINT(RT_DEBUG_TRACE,("%s : TXBAbitmap = %x, BAWinSize = %d, TimeOut = %ld\n", __func__, pEntry->TXBAbitmap,
 								 pBAEntry->BAWinSize, pBAEntry->ORIBATimer.TimerValue));
 
 		// SEND BAR ;
@@ -673,7 +673,7 @@
 		ba_refresh_reordering_mpdus(pAd, pBAEntry);
 	}
 
-	DBGPRINT(RT_DEBUG_TRACE,("%s(%ld): Idx = %d, BAWinSize(req %d) = %d\n", __FUNCTION__, pAd->BATable.numAsRecipient, Idx,
+	DBGPRINT(RT_DEBUG_TRACE,("%s(%ld): Idx = %d, BAWinSize(req %d) = %d\n", __func__, pAd->BATable.numAsRecipient, Idx,
 							 pFrame->BaParm.BufSize, BAWinSize));
 
 	// Start fill in parameters.
@@ -915,7 +915,7 @@
 		return;
 	}
 
-	DBGPRINT(RT_DEBUG_TRACE,("%s===>Wcid=%d.TID=%d \n", __FUNCTION__, Wcid, TID));
+	DBGPRINT(RT_DEBUG_TRACE,("%s===>Wcid=%d.TID=%d \n", __func__, Wcid, TID));
 
 	pBAEntry = &pAd->BATable.BAOriEntry[Idx];
 	DBGPRINT(RT_DEBUG_TRACE,("\t===>Idx = %ld, Wcid=%d.TID=%d, ORI_BA_Status = %d \n", Idx, Wcid, TID, pBAEntry->ORI_BA_Status));
@@ -974,7 +974,7 @@
 	if (Idx == 0)
 		return;
 
-	DBGPRINT(RT_DEBUG_TRACE,("%s===>Wcid=%d.TID=%d \n", __FUNCTION__, Wcid, TID));
+	DBGPRINT(RT_DEBUG_TRACE,("%s===>Wcid=%d.TID=%d \n", __func__, Wcid, TID));
 
 
 	pBAEntry = &pAd->BATable.BARecEntry[Idx];
@@ -1185,7 +1185,7 @@
 	PULONG      ptemp;
 	PMAC_TABLE_ENTRY	pMacEntry;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s ==> (Wcid = %d)\n", __FUNCTION__, Elem->Wcid));
+	DBGPRINT(RT_DEBUG_TRACE, ("%s ==> (Wcid = %d)\n", __func__, Elem->Wcid));
 
 	//hex_dump("AddBAReq", Elem->Msg, Elem->MsgLen);
 
@@ -1269,7 +1269,7 @@
 	MiniportMMRequest(pAd, QID_AC_BE, pOutBuffer, FrameLen);
 	MlmeFreeMemory(pAd, pOutBuffer);
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s(%d): TID(%d), BufSize(%d) <== \n", __FUNCTION__, Elem->Wcid, ADDframe.BaParm.TID,
+	DBGPRINT(RT_DEBUG_TRACE, ("%s(%d): TID(%d), BufSize(%d) <== \n", __func__, Elem->Wcid, ADDframe.BaParm.TID,
 							  ADDframe.BaParm.BufSize));
 }
 
@@ -1288,7 +1288,7 @@
 	if (Elem->Wcid >= MAX_LEN_OF_MAC_TABLE)
 		return;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s ==> Wcid(%d)\n", __FUNCTION__, Elem->Wcid));
+	DBGPRINT(RT_DEBUG_TRACE, ("%s ==> Wcid(%d)\n", __func__, Elem->Wcid));
 
 	//hex_dump("PeerAddBARspAction()", Elem->Msg, Elem->MsgLen);
 
@@ -1329,7 +1329,7 @@
 	//PUCHAR				pOutBuffer = NULL;
 	PFRAME_DELBA_REQ    pDelFrame = NULL;
 
-	DBGPRINT(RT_DEBUG_TRACE,("%s ==>\n", __FUNCTION__));
+	DBGPRINT(RT_DEBUG_TRACE,("%s ==>\n", __func__));
 	//DELBA Request from unknown peer, ignore this.
 	if (PeerDelBAActionSanity(pAd, Elem->Wcid, Elem->Msg, Elem->MsgLen))
 	{
@@ -1366,7 +1366,7 @@
 
 	TID = (UCHAR)pFrame->BARControl.TID;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s(): BAR-Wcid(%ld), Tid (%d)\n", __FUNCTION__, Wcid, TID));
+	DBGPRINT(RT_DEBUG_TRACE, ("%s(): BAR-Wcid(%ld), Tid (%d)\n", __func__, Wcid, TID));
 	//hex_dump("BAR", (PCHAR) pFrame, MsgLen);
 	// Do nothing if the driver is starting halt state.
 	// This might happen when timer already been fired before cancel timer with mlmehalt
diff --git a/drivers/staging/rt2860/common/cmm_data.c b/drivers/staging/rt2860/common/cmm_data.c
index ac54901..b67b9eb 100644
--- a/drivers/staging/rt2860/common/cmm_data.c
+++ b/drivers/staging/rt2860/common/cmm_data.c
@@ -2787,7 +2787,7 @@
 		}
 		else
 		{
-			printk("\n%s: Impossible Wcid = %d !!!!!\n", __FUNCTION__, wcid);
+			printk("\n%s: Impossible Wcid = %d !!!!!\n", __func__, wcid);
 		}
 	}
 
diff --git a/drivers/staging/rt2860/common/cmm_data_2860.c b/drivers/staging/rt2860/common/cmm_data_2860.c
index 4f414ed..419e50c 100644
--- a/drivers/staging/rt2860/common/cmm_data_2860.c
+++ b/drivers/staging/rt2860/common/cmm_data_2860.c
@@ -1002,7 +1002,7 @@
         AutoWakeupCfg.field.AutoLeadTime = 5;
         RTMP_IO_WRITE32(pAd, AUTO_WAKEUP_CFG, AutoWakeupCfg.word);
         AsicSendCommandToMcu(pAd, 0x30, 0xff, 0xff, 0x00);   // send POWER-SAVE command to MCU. Timeout 40us.
-        DBGPRINT(RT_DEBUG_TRACE, ("<-- %s, TbttNumToNextWakeUp=%d \n", __FUNCTION__, TbttNumToNextWakeUp));
+        DBGPRINT(RT_DEBUG_TRACE, ("<-- %s, TbttNumToNextWakeUp=%d \n", __func__, TbttNumToNextWakeUp));
     }
     OPSTATUS_SET_FLAG(pAd, fOP_STATUS_DOZE);
 }
@@ -1115,7 +1115,7 @@
     if (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RADIO_OFF))
    		return;
 
-    DBGPRINT(RT_DEBUG_TRACE,("%s===>\n", __FUNCTION__));
+    DBGPRINT(RT_DEBUG_TRACE,("%s===>\n", __func__));
 
     if ((pAd->OpMode == OPMODE_AP) ||
         ((pAd->OpMode == OPMODE_STA) && (!OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_ADVANCE_POWER_SAVE_PCIE_DEVICE))))
@@ -1165,7 +1165,7 @@
     if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RADIO_OFF))
     	return;
 
-    DBGPRINT(RT_DEBUG_TRACE,("%s===>\n", __FUNCTION__));
+    DBGPRINT(RT_DEBUG_TRACE,("%s===>\n", __func__));
 
 	// Set LED
 	RTMPSetLED(pAd, LED_RADIO_OFF);
diff --git a/drivers/staging/rt2860/common/cmm_info.c b/drivers/staging/rt2860/common/cmm_info.c
index 0aadf8a..dd92ac6 100644
--- a/drivers/staging/rt2860/common/cmm_info.c
+++ b/drivers/staging/rt2860/common/cmm_info.c
@@ -2039,7 +2039,7 @@
 	wrq->u.data.length = sizeof(RT_802_11_MAC_TABLE);
 	if (copy_to_user(wrq->u.data.pointer, &MacTab, wrq->u.data.length))
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s: copy_to_user() fail\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s: copy_to_user() fail\n", __func__));
 	}
 
 	msg = (CHAR *) kmalloc(sizeof(CHAR)*(MAX_LEN_OF_MAC_TABLE*MAC_LINE_LEN), MEM_ALLOC_FLAG);
diff --git a/drivers/staging/rt2860/common/dfs.c b/drivers/staging/rt2860/common/dfs.c
index 23cf151..b09bba5 100644
--- a/drivers/staging/rt2860/common/dfs.c
+++ b/drivers/staging/rt2860/common/dfs.c
@@ -428,7 +428,7 @@
 
 	pAd->CommonCfg.RadarDetect.ChMovingTime = Value;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s:: %d\n", __FUNCTION__,
+	DBGPRINT(RT_DEBUG_TRACE, ("%s:: %d\n", __func__,
 		pAd->CommonCfg.RadarDetect.ChMovingTime));
 
 	return TRUE;
@@ -444,7 +444,7 @@
 
 	pAd->CommonCfg.RadarDetect.LongPulseRadarTh = Value;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s:: %d\n", __FUNCTION__,
+	DBGPRINT(RT_DEBUG_TRACE, ("%s:: %d\n", __func__,
 		pAd->CommonCfg.RadarDetect.LongPulseRadarTh));
 
 	return TRUE;
diff --git a/drivers/staging/rt2860/common/rtmp_init.c b/drivers/staging/rt2860/common/rtmp_init.c
index 84edfa5..563f2c5 100644
--- a/drivers/staging/rt2860/common/rtmp_init.c
+++ b/drivers/staging/rt2860/common/rtmp_init.c
@@ -2542,7 +2542,7 @@
 #ifdef BIN_IN_FILE
 #define NICLF_DEFAULT_USE()	\
 	flg_default_firm_use = TRUE; \
-	printk("%s - Use default firmware!\n", __FUNCTION__);
+	printk("%s - Use default firmware!\n", __func__);
 
 	NDIS_STATUS		Status = NDIS_STATUS_SUCCESS;
 	PUCHAR			src;
@@ -2557,7 +2557,7 @@
 	BOOLEAN			flg_default_firm_use = FALSE;
 
 
-	DBGPRINT(RT_DEBUG_TRACE, ("===> %s\n", __FUNCTION__));
+	DBGPRINT(RT_DEBUG_TRACE, ("===> %s\n", __func__));
 
 	/* init */
 	pFirmwareImage = NULL;
@@ -2580,7 +2580,7 @@
     if (pFirmwareImage == NULL)
 	{
 		/* allocate fail, use default firmware array in firmware.h */
-		printk("%s - Allocate memory fail!\n", __FUNCTION__);
+		printk("%s - Allocate memory fail!\n", __func__);
 		NICLF_DEFAULT_USE();
     }
 	else
@@ -2601,7 +2601,7 @@
 			if (IS_ERR(srcf))
 			{
 				printk("%s - Error %ld opening %s\n",
-					   __FUNCTION__, -PTR_ERR(srcf), src);
+					   __func__, -PTR_ERR(srcf), src);
 				NICLF_DEFAULT_USE();
 				break;
 			} /* End of if */
@@ -2609,7 +2609,7 @@
 			/* the object must have a read method */
 			if ((srcf->f_op == NULL) || (srcf->f_op->read == NULL))
 			{
-				printk("%s - %s does not have a write method\n", __FUNCTION__, src);
+				printk("%s - %s does not have a write method\n", __func__, src);
 				NICLF_DEFAULT_USE();
 				break;
 			} /* End of if */
@@ -2623,7 +2623,7 @@
 			if (FileLength != MAX_FIRMWARE_IMAGE_SIZE)
 			{
 				printk("%s: error file length (=%d) in RT2860AP.BIN\n",
-					   __FUNCTION__, FileLength);
+					   __func__, FileLength);
 				NICLF_DEFAULT_USE();
 				break;
 			}
@@ -2646,7 +2646,7 @@
 					/* CRC fail */
 					printk("%s: CRC = 0x%02x 0x%02x "
 						   "error, should be 0x%02x 0x%02x\n",
-						   __FUNCTION__,
+						   __func__,
 						   pFirmwareImage[MAX_FIRMWARE_IMAGE_SIZE-2],
 						   pFirmwareImage[MAX_FIRMWARE_IMAGE_SIZE-1],
 						   (UCHAR)(crc>>8), (UCHAR)(crc));
@@ -2665,7 +2665,7 @@
 											((FIRMWARE_MAJOR_VERSION << 8) +
 									  	 	 FIRMWARE_MINOR_VERSION))
 					{
-						printk("%s: firmware version too old!\n", __FUNCTION__);
+						printk("%s: firmware version too old!\n", __func__);
 						NICLF_DEFAULT_USE();
 						break;
 					} /* End of if */
@@ -2770,7 +2770,7 @@
 	} /* End of if */
 
     DBGPRINT(RT_DEBUG_TRACE,
-			 ("<=== %s (status=%d)\n", __FUNCTION__, Status));
+			 ("<=== %s (status=%d)\n", __func__, Status));
     return Status;
 } /* End of NICLoadFirmware */
 
diff --git a/drivers/staging/rt2860/common/spectrum.c b/drivers/staging/rt2860/common/spectrum.c
index 85e636a..0265a6d 100644
--- a/drivers/staging/rt2860/common/spectrum.c
+++ b/drivers/staging/rt2860/common/spectrum.c
@@ -49,7 +49,7 @@
 	if (pAd->CommonCfg.pMeasureReqTab)
 		NdisZeroMemory(pAd->CommonCfg.pMeasureReqTab, sizeof(MEASURE_REQ_TAB));
 	else
-		DBGPRINT(RT_DEBUG_ERROR, ("%s Fail to alloc memory for pAd->CommonCfg.pMeasureReqTab.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s Fail to alloc memory for pAd->CommonCfg.pMeasureReqTab.\n", __func__));
 
 	return;
 }
@@ -77,7 +77,7 @@
 
 	if (pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __func__));
 		return NULL;
 	}
 
@@ -114,7 +114,7 @@
 
 	if(pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __func__));
 		return NULL;
 	}
 
@@ -175,7 +175,7 @@
 		else
 		{
 			pEntry = NULL;
-			DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab tab full.\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab tab full.\n", __func__));
 		}
 
 		// add this Neighbor entry into HASH table
@@ -210,7 +210,7 @@
 
 	if(pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __func__));
 		return;
 	}
 
@@ -267,7 +267,7 @@
 	if (pAd->CommonCfg.pTpcReqTab)
 		NdisZeroMemory(pAd->CommonCfg.pTpcReqTab, sizeof(TPC_REQ_TAB));
 	else
-		DBGPRINT(RT_DEBUG_ERROR, ("%s Fail to alloc memory for pAd->CommonCfg.pTpcReqTab.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s Fail to alloc memory for pAd->CommonCfg.pTpcReqTab.\n", __func__));
 
 	return;
 }
@@ -295,7 +295,7 @@
 
 	if (pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __func__));
 		return NULL;
 	}
 
@@ -333,7 +333,7 @@
 
 	if(pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __func__));
 		return NULL;
 	}
 
@@ -394,7 +394,7 @@
 		else
 		{
 			pEntry = NULL;
-			DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab tab full.\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab tab full.\n", __func__));
 		}
 
 		// add this Neighbor entry into HASH table
@@ -429,7 +429,7 @@
 
 	if(pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __func__));
 		return;
 	}
 
@@ -782,7 +782,7 @@
 	NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer);  //Get an unused nonpaged memory
 	if(NStatus != NDIS_STATUS_SUCCESS)
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
 		return;
 	}
 	NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
@@ -844,7 +844,7 @@
 	NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer);  //Get an unused nonpaged memory
 	if(NStatus != NDIS_STATUS_SUCCESS)
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
 		return;
 	}
 	NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
@@ -898,7 +898,7 @@
 	NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer);  //Get an unused nonpaged memory
 	if(NStatus != NDIS_STATUS_SUCCESS)
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
 		return;
 	}
 	NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
@@ -950,7 +950,7 @@
 	NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer);  //Get an unused nonpaged memory
 	if(NStatus != NDIS_STATUS_SUCCESS)
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
 		return;
 	}
 	NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
@@ -1003,7 +1003,7 @@
 	NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer);  //Get an unused nonpaged memory
 	if(NStatus != NDIS_STATUS_SUCCESS)
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
 		return;
 	}
 	NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
@@ -1596,7 +1596,7 @@
 
 	if ((pMeasureReportInfo = kmalloc(sizeof(MEASURE_RPI_REPORT), GFP_ATOMIC)) == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s unable to alloc memory for measure report buffer (size=%d).\n", __FUNCTION__, sizeof(MEASURE_RPI_REPORT)));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s unable to alloc memory for measure report buffer (size=%d).\n", __func__, sizeof(MEASURE_RPI_REPORT)));
 		return;
 	}
 
@@ -1705,7 +1705,7 @@
 		{
 			TpcReqDelete(pAd, pEntry->DialogToken);
 			DBGPRINT(RT_DEBUG_TRACE, ("%s: DialogToken=%x, TxPwr=%d, LinkMargin=%d\n",
-				__FUNCTION__, DialogToken, TpcRepInfo.TxPwr, TpcRepInfo.LinkMargin));
+				__func__, DialogToken, TpcRepInfo.TxPwr, TpcRepInfo.LinkMargin));
 		}
 	}
 
@@ -1821,7 +1821,7 @@
 				MeasureReqType = simple_strtol(thisChar, 0, 16);
 				if (MeasureReqType > 3)
 				{
-					DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow MeasureReqType(%d)\n", __FUNCTION__, MeasureReqType));
+					DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow MeasureReqType(%d)\n", __func__, MeasureReqType));
 					return TRUE;
 				}
 				break;
@@ -1833,10 +1833,10 @@
 		ArgIdx++;
 	}
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s::Aid = %d, MeasureReqType=%d MeasureCh=%d\n", __FUNCTION__, Aid, MeasureReqType, MeasureCh));
+	DBGPRINT(RT_DEBUG_TRACE, ("%s::Aid = %d, MeasureReqType=%d MeasureCh=%d\n", __func__, Aid, MeasureReqType, MeasureCh));
 	if (!VALID_WCID(Aid))
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow sta of Aid(%d)\n", __FUNCTION__, Aid));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow sta of Aid(%d)\n", __func__, Aid));
 		return TRUE;
 	}
 
@@ -1861,10 +1861,10 @@
 
 	Aid = simple_strtol(arg, 0, 16);
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s::Aid = %d\n", __FUNCTION__, Aid));
+	DBGPRINT(RT_DEBUG_TRACE, ("%s::Aid = %d\n", __func__, Aid));
 	if (!VALID_WCID(Aid))
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow sta of Aid(%d)\n", __FUNCTION__, Aid));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow sta of Aid(%d)\n", __func__, Aid));
 		return TRUE;
 	}
 
diff --git a/drivers/staging/rt2860/rt_ate.c b/drivers/staging/rt2860/rt_ate.c
index 2f07db5..f3316ec 100644
--- a/drivers/staging/rt2860/rt_ate.c
+++ b/drivers/staging/rt2860/rt_ate.c
@@ -291,7 +291,7 @@
 				Bbp94 = BBPR94_DEFAULT;
 			}
 
-			ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R=%ld, BBP_R94=%d)\n", __FUNCTION__, TxPower, R, Bbp94));
+			ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R=%ld, BBP_R94=%d)\n", __func__, TxPower, R, Bbp94));
 		}
 		else// 5.5 GHz
 		{
@@ -318,7 +318,7 @@
 				R = (ULONG) TxPower;
 			}
 
-			ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R=%lu)\n", __FUNCTION__, TxPower, R));
+			ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R=%lu)\n", __func__, TxPower, R));
 		}
 
 		if (pAd->ate.Channel <= 14)
@@ -431,7 +431,7 @@
 		Bbp94 = BBPR94_DEFAULT;
 	}
 
-	ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R3=%ld, BBP_R94=%d)\n", __FUNCTION__, TxPower, R, Bbp94));
+	ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R3=%ld, BBP_R94=%d)\n", __func__, TxPower, R, Bbp94));
 
 		if (pAd->ate.Channel <= 14)
 		{
@@ -2098,7 +2098,7 @@
 	UINT32			FileLength = 0;
 	UINT32 			value = simple_strtol(arg, 0, 10);
 
-	ATEDBGPRINT(RT_DEBUG_ERROR, ("===> %s (value=%d)\n\n", __FUNCTION__, value));
+	ATEDBGPRINT(RT_DEBUG_ERROR, ("===> %s (value=%d)\n\n", __func__, value));
 
 	if (value > 0)
 	{
@@ -2122,14 +2122,14 @@
 
 			if (IS_ERR(srcf))
 			{
-				ate_print("%s - Error %ld opening %s\n", __FUNCTION__, -PTR_ERR(srcf), src);
+				ate_print("%s - Error %ld opening %s\n", __func__, -PTR_ERR(srcf), src);
 				break;
 			}
 
 			/* the object must have a read method */
 			if ((srcf->f_op == NULL) || (srcf->f_op->read == NULL))
 			{
-				ate_print("%s - %s does not have a read method\n", __FUNCTION__, src);
+				ate_print("%s - %s does not have a read method\n", __func__, src);
 				break;
 			}
 
@@ -2142,7 +2142,7 @@
 			if (FileLength != EEPROM_SIZE)
 			{
 				ate_print("%s: error file length (=%d) in e2p.bin\n",
-					   __FUNCTION__, FileLength);
+					   __func__, FileLength);
 				break;
 			}
 			else
@@ -2174,7 +2174,7 @@
 		current->fsuid = orgfsuid;
 		current->fsgid = orgfsgid;
 	}
-    ATEDBGPRINT(RT_DEBUG_ERROR, ("<=== %s (ret=%d)\n", __FUNCTION__, ret));
+    ATEDBGPRINT(RT_DEBUG_ERROR, ("<=== %s (ret=%d)\n", __func__, ret));
 
     return ret;
 
@@ -2187,12 +2187,12 @@
 	USHORT 			WriteEEPROM[(EEPROM_SIZE/2)];
 	struct iwreq	*wrq = (struct iwreq *)arg;
 
-	ATEDBGPRINT(RT_DEBUG_TRACE, ("===> %s (wrq->u.data.length = %d)\n\n", __FUNCTION__, wrq->u.data.length));
+	ATEDBGPRINT(RT_DEBUG_TRACE, ("===> %s (wrq->u.data.length = %d)\n\n", __func__, wrq->u.data.length));
 
 	if (wrq->u.data.length != EEPROM_SIZE)
 	{
 		ate_print("%s: error length (=%d) from host\n",
-			   __FUNCTION__, wrq->u.data.length);
+			   __func__, wrq->u.data.length);
 		return FALSE;
 	}
 	else/* (wrq->u.data.length == EEPROM_SIZE) */
@@ -2211,7 +2211,7 @@
 		} while(FALSE);
 		}
 
-    ATEDBGPRINT(RT_DEBUG_TRACE, ("<=== %s\n", __FUNCTION__));
+    ATEDBGPRINT(RT_DEBUG_TRACE, ("<=== %s\n", __func__));
 
     return TRUE;
 
@@ -3353,7 +3353,7 @@
 	if (pPacket == NULL)
 	{
 		pAd->ate.TxCount = 0;
-		ATEDBGPRINT(RT_DEBUG_TRACE, ("%s fail to alloc packet space.\n", __FUNCTION__));
+		ATEDBGPRINT(RT_DEBUG_TRACE, ("%s fail to alloc packet space.\n", __func__));
 		return -1;
 	}
 	pTxRing->Cell[TxIdx].pNextNdisPacket = pPacket;
@@ -3646,7 +3646,7 @@
 
 	Command_Id = ntohs(pRaCfg->command_id);
 
-	ATEDBGPRINT(RT_DEBUG_TRACE,("\n%s: Command_Id = 0x%04x !\n", __FUNCTION__, Command_Id));
+	ATEDBGPRINT(RT_DEBUG_TRACE,("\n%s: Command_Id = 0x%04x !\n", __func__, Command_Id));
 
 	switch (Command_Id)
 	{
@@ -5690,7 +5690,7 @@
 					pAd->ate.TxAntennaSel = 2;
 		            break;
 		        default:
-		            DBGPRINT(RT_DEBUG_TRACE, ("%s -- Sth. wrong!  : return FALSE; \n", __FUNCTION__));
+		            DBGPRINT(RT_DEBUG_TRACE, ("%s -- Sth. wrong!  : return FALSE; \n", __func__));
 		            return FALSE;
 		    }
 			break;/* case BBP_R1 */
@@ -5728,13 +5728,13 @@
 					pAd->ate.RxAntennaSel = 3;
 		            break;
 		        default:
-		            DBGPRINT(RT_DEBUG_ERROR, ("%s -- Impossible!  : return FALSE; \n", __FUNCTION__));
+		            DBGPRINT(RT_DEBUG_ERROR, ("%s -- Impossible!  : return FALSE; \n", __func__));
 		            return FALSE;
 		    }
 			break;/* case BBP_R3 */
 
         default:
-            DBGPRINT(RT_DEBUG_ERROR, ("%s -- Sth. wrong!  : return FALSE; \n", __FUNCTION__));
+            DBGPRINT(RT_DEBUG_ERROR, ("%s -- Sth. wrong!  : return FALSE; \n", __func__));
             return FALSE;
 
 	}
diff --git a/drivers/staging/rt2860/rt_linux.c b/drivers/staging/rt2860/rt_linux.c
index 374c174..f145009 100644
--- a/drivers/staging/rt2860/rt_linux.c
+++ b/drivers/staging/rt2860/rt_linux.c
@@ -406,7 +406,7 @@
  	skb_put(GET_OS_PKT_TYPE(pPacket), HeaderLen+DataLen);
 
 	RTMP_SET_PACKET_SOURCE(pPacket, PKTSRC_NDIS);
-//	printk("%s : pPacket = %p, len = %d\n", __FUNCTION__, pPacket, GET_OS_PKT_LEN(pPacket));
+//	printk("%s : pPacket = %p, len = %d\n", __func__, pPacket, GET_OS_PKT_LEN(pPacket));
 	*ppPacket = pPacket;
 	return NDIS_STATUS_SUCCESS;
 }
@@ -773,13 +773,13 @@
 
 	if (event_table_len == 0)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s : The type(%0x02x) is not valid.\n", __FUNCTION__, type));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s : The type(%0x02x) is not valid.\n", __func__, type));
 		return;
 	}
 
 	if (event >= event_table_len)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s : The event(%0x02x) is not valid.\n", __FUNCTION__, event));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s : The event(%0x02x) is not valid.\n", __func__, event));
 		return;
 	}
 
@@ -817,14 +817,14 @@
 		//send wireless event
 	    wireless_send_event(pAd->net_dev, IWEVCUSTOM, &wrqu, pBuf);
 
-		//DBGPRINT(RT_DEBUG_TRACE, ("%s : %s\n", __FUNCTION__, pBuf));
+		//DBGPRINT(RT_DEBUG_TRACE, ("%s : %s\n", __func__, pBuf));
 
 		kfree(pBuf);
 	}
 	else
-		DBGPRINT(RT_DEBUG_ERROR, ("%s : Can't allocate memory for wireless event.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s : Can't allocate memory for wireless event.\n", __func__));
 #else
-	DBGPRINT(RT_DEBUG_ERROR, ("%s : The Wireless Extension MUST be v15 or newer.\n", __FUNCTION__));
+	DBGPRINT(RT_DEBUG_ERROR, ("%s : The Wireless Extension MUST be v15 or newer.\n", __func__));
 #endif  /* WIRELESS_EXT >= 15 */
 }
 
@@ -848,13 +848,13 @@
     ASSERT(pRxBlk->pRxPacket);
     if (pRxBlk->DataSize < 10)
     {
-        DBGPRINT(RT_DEBUG_ERROR, ("%s : Size is too small! (%d)\n", __FUNCTION__, pRxBlk->DataSize));
+        DBGPRINT(RT_DEBUG_ERROR, ("%s : Size is too small! (%d)\n", __func__, pRxBlk->DataSize));
 		goto err_free_sk_buff;
     }
 
     if (pRxBlk->DataSize + sizeof(wlan_ng_prism2_header) > RX_BUFFER_AGGRESIZE)
     {
-        DBGPRINT(RT_DEBUG_ERROR, ("%s : Size is too large! (%d)\n", __FUNCTION__, pRxBlk->DataSize + sizeof(wlan_ng_prism2_header)));
+        DBGPRINT(RT_DEBUG_ERROR, ("%s : Size is too large! (%d)\n", __func__, pRxBlk->DataSize + sizeof(wlan_ng_prism2_header)));
 		goto err_free_sk_buff;
     }
 
@@ -910,7 +910,7 @@
 
     if (skb_headroom(pOSPkt) < (sizeof(wlan_ng_prism2_header)+ header_len)) {
         if (pskb_expand_head(pOSPkt, (sizeof(wlan_ng_prism2_header) + header_len), 0, GFP_ATOMIC)) {
-	        DBGPRINT(RT_DEBUG_ERROR, ("%s : Reallocate header size of sk_buff fail!\n", __FUNCTION__));
+	        DBGPRINT(RT_DEBUG_ERROR, ("%s : Reallocate header size of sk_buff fail!\n", __func__));
 			goto err_free_sk_buff;
 	    } //end if
     } //end if
diff --git a/drivers/staging/rt2860/rt_linux.h b/drivers/staging/rt2860/rt_linux.h
index 0cc7cf2..0fd58f5 100644
--- a/drivers/staging/rt2860/rt_linux.h
+++ b/drivers/staging/rt2860/rt_linux.h
@@ -131,7 +131,7 @@
 #define RT_MOD_INC_USE_COUNT() \
 	if (!try_module_get(THIS_MODULE)) \
 	{ \
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: cannot reserve module\n", __FUNCTION__)); \
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: cannot reserve module\n", __func__)); \
 		return -1; \
 	}
 
diff --git a/drivers/staging/rt2860/rt_profile.c b/drivers/staging/rt2860/rt_profile.c
index cd7ffc8..326a3cb 100644
--- a/drivers/staging/rt2860/rt_profile.c
+++ b/drivers/staging/rt2860/rt_profile.c
@@ -1024,7 +1024,7 @@
 								pAd->MlmeAux.SsidLen = pAd->CommonCfg.SsidLen;
 								NdisZeroMemory(pAd->MlmeAux.Ssid, NDIS_802_11_LENGTH_SSID);
 								NdisMoveMemory(pAd->MlmeAux.Ssid, tmpbuf, pAd->MlmeAux.SsidLen);
-								DBGPRINT(RT_DEBUG_TRACE, ("%s::(SSID=%s)\n", __FUNCTION__, tmpbuf));
+								DBGPRINT(RT_DEBUG_TRACE, ("%s::(SSID=%s)\n", __func__, tmpbuf));
 							}
 						}
 					}
@@ -1043,7 +1043,7 @@
 								pAd->StaCfg.BssType = BSS_INFRA;
 							// Reset Ralink supplicant to not use, it will be set to start when UI set PMK key
 							pAd->StaCfg.WpaState = SS_NOTUSE;
-							DBGPRINT(RT_DEBUG_TRACE, ("%s::(NetworkType=%d)\n", __FUNCTION__, pAd->StaCfg.BssType));
+							DBGPRINT(RT_DEBUG_TRACE, ("%s::(NetworkType=%d)\n", __func__, pAd->StaCfg.BssType));
 						}
 					}
 #endif // CONFIG_STA_SUPPORT //
@@ -1337,7 +1337,7 @@
 
 				                        pAd->StaCfg.PortSecured = WPA_802_1X_PORT_NOT_SECURED;
 
-							DBGPRINT(RT_DEBUG_TRACE, ("%s::(EncrypType=%d)\n", __FUNCTION__, pAd->StaCfg.WepStatus));
+							DBGPRINT(RT_DEBUG_TRACE, ("%s::(EncrypType=%d)\n", __func__, pAd->StaCfg.WepStatus));
 						}
 #endif // CONFIG_STA_SUPPORT //
 					}
@@ -1363,7 +1363,7 @@
 							pAd->StaCfg.OrigWepStatus 	= pAd->StaCfg.WepStatus;
 							pAd->StaCfg.bMixCipher 		= FALSE;
 
-							DBGPRINT(RT_DEBUG_TRACE, ("%s::(EncrypType=%d)\n", __FUNCTION__, pAd->StaCfg.WepStatus));
+							DBGPRINT(RT_DEBUG_TRACE, ("%s::(EncrypType=%d)\n", __func__, pAd->StaCfg.WepStatus));
 						}
 #endif // CONFIG_STA_SUPPORT //
 					}
@@ -1400,7 +1400,7 @@
 							else
 							{
 								err = 1;
-								DBGPRINT(RT_DEBUG_ERROR, ("%s::(WPAPSK key-string required 8 ~ 64 characters!)\n", __FUNCTION__));
+								DBGPRINT(RT_DEBUG_ERROR, ("%s::(WPAPSK key-string required 8 ~ 64 characters!)\n", __func__));
 							}
 
 							if (err == 0)
@@ -1416,7 +1416,7 @@
 									pAd->StaCfg.WpaState = SS_NOTUSE;
 								}
 
-								DBGPRINT(RT_DEBUG_TRACE, ("%s::(WPAPSK=%s)\n", __FUNCTION__, tmpbuf));
+								DBGPRINT(RT_DEBUG_TRACE, ("%s::(WPAPSK=%s)\n", __func__, tmpbuf));
 							}
 						}
 					}
diff --git a/drivers/staging/rt2860/rtmp_def.h b/drivers/staging/rt2860/rtmp_def.h
index bb6f37b..be98214 100644
--- a/drivers/staging/rt2860/rtmp_def.h
+++ b/drivers/staging/rt2860/rtmp_def.h
@@ -333,7 +333,7 @@
 /* sanity check for apidx */
 #define MBSS_MR_APIDX_SANITY_CHECK(apidx) \
     { if (apidx > MAX_MBSSID_NUM) { \
-          printk("%s> Error! apidx = %d > MAX_MBSSID_NUM!\n", __FUNCTION__, apidx); \
+          printk("%s> Error! apidx = %d > MAX_MBSSID_NUM!\n", __func__, apidx); \
 	  apidx = MAIN_MBSSID; } }
 
 #define VALID_WCID(_wcid)	((_wcid) > 0 && (_wcid) < MAX_LEN_OF_MAC_TABLE )
diff --git a/drivers/staging/rt2860/sta_ioctl.c b/drivers/staging/rt2860/sta_ioctl.c
index 9347d11..3ea2b2c 100644
--- a/drivers/staging/rt2860/sta_ioctl.c
+++ b/drivers/staging/rt2860/sta_ioctl.c
@@ -2200,7 +2200,7 @@
 			}
 			break;
         default:
-            DBGPRINT(RT_DEBUG_TRACE, ("%s - unknow subcmd = %d\n", __FUNCTION__, subcmd));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s - unknow subcmd = %d\n", __func__, subcmd));
             break;
     }
 
@@ -2219,7 +2219,7 @@
 	MLME_DISASSOC_REQ_STRUCT	DisAssocReq;
 	MLME_DEAUTH_REQ_STRUCT      DeAuthReq;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("====> %s\n", __FUNCTION__));
+	DBGPRINT(RT_DEBUG_TRACE, ("====> %s\n", __func__));
 
 	if (pMlme == NULL)
 		return -EINVAL;
@@ -2228,7 +2228,7 @@
 	{
 #ifdef IW_MLME_DEAUTH
 		case IW_MLME_DEAUTH:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DEAUTH\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DEAUTH\n", __func__));
 			COPY_MAC_ADDR(DeAuthReq.Addr, pAd->CommonCfg.Bssid);
 			DeAuthReq.Reason = pMlme->reason_code;
 			MsgElem.MsgLen = sizeof(MLME_DEAUTH_REQ_STRUCT);
@@ -2243,7 +2243,7 @@
 #endif // IW_MLME_DEAUTH //
 #ifdef IW_MLME_DISASSOC
 		case IW_MLME_DISASSOC:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DISASSOC\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DISASSOC\n", __func__));
 			COPY_MAC_ADDR(DisAssocReq.Addr, pAd->CommonCfg.Bssid);
 			DisAssocReq.Reason =  pMlme->reason_code;
 
@@ -2257,7 +2257,7 @@
 			break;
 #endif // IW_MLME_DISASSOC //
 		default:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - Unknow Command\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - Unknow Command\n", __func__));
 			break;
 	}
 
@@ -2290,7 +2290,7 @@
             else if (param->value == IW_AUTH_WPA_VERSION_WPA2)
                 pAdapter->StaCfg.AuthMode = Ndis802_11AuthModeWPA2PSK;
 
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_CIPHER_PAIRWISE:
             if (param->value == IW_AUTH_CIPHER_NONE)
@@ -2321,7 +2321,7 @@
                 pAdapter->StaCfg.OrigWepStatus = pAdapter->StaCfg.WepStatus;
                 pAdapter->StaCfg.PairCipher = Ndis802_11Encryption3Enabled;
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_PAIRWISE - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_PAIRWISE - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_CIPHER_GROUP:
             if (param->value == IW_AUTH_CIPHER_NONE)
@@ -2341,7 +2341,7 @@
             {
                 pAdapter->StaCfg.GroupCipher = Ndis802_11Encryption3Enabled;
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_GROUP - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_GROUP - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_KEY_MGMT:
             if (param->value == IW_AUTH_KEY_MGMT_802_1X)
@@ -2370,12 +2370,12 @@
             {
 				STA_PORT_SECURED(pAdapter);
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_KEY_MGMT - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_KEY_MGMT - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
             break;
     	case IW_AUTH_PRIVACY_INVOKED:
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_PRIVACY_INVOKED - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_PRIVACY_INVOKED - param->value = %d!\n", __func__, param->value));
     		break;
     	case IW_AUTH_DROP_UNENCRYPTED:
             if (param->value != 0)
@@ -2384,7 +2384,7 @@
 			{
 				STA_PORT_SECURED(pAdapter);
 			}
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __func__, param->value));
     		break;
     	case IW_AUTH_80211_AUTH_ALG:
 			if (param->value & IW_AUTH_ALG_SHARED_KEY)
@@ -2397,10 +2397,10 @@
 			}
             else
 				return -EINVAL;
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_80211_AUTH_ALG - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_80211_AUTH_ALG - param->value = %d!\n", __func__, param->value));
 			break;
     	case IW_AUTH_WPA_ENABLED:
-    		DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_ENABLED - Driver supports WPA!(param->value = %d)\n", __FUNCTION__, param->value));
+    		DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_ENABLED - Driver supports WPA!(param->value = %d)\n", __func__, param->value));
     		break;
     	default:
     		return -EOPNOTSUPP;
@@ -2508,7 +2508,7 @@
 		pAdapter->SharedKey[BSS0][keyIdx].CipherAlg = CIPHER_NONE;
 		AsicRemoveSharedKeyEntry(pAdapter, 0, (UCHAR)keyIdx);
         NdisZeroMemory(&pAdapter->SharedKey[BSS0][keyIdx], sizeof(CIPHER_KEY));
-        DBGPRINT(RT_DEBUG_TRACE, ("%s::Remove all keys!(encoding->flags = %x)\n", __FUNCTION__, encoding->flags));
+        DBGPRINT(RT_DEBUG_TRACE, ("%s::Remove all keys!(encoding->flags = %x)\n", __func__, encoding->flags));
     }
 					else
     {
@@ -2520,15 +2520,15 @@
         if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
         {
             pAdapter->StaCfg.DefaultKeyId = keyIdx;
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::DefaultKeyId = %d\n", __FUNCTION__, pAdapter->StaCfg.DefaultKeyId));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::DefaultKeyId = %d\n", __func__, pAdapter->StaCfg.DefaultKeyId));
         }
 
         switch (alg) {
     		case IW_ENCODE_ALG_NONE:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_NONE\n", __FUNCTION__));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_NONE\n", __func__));
     			break;
     		case IW_ENCODE_ALG_WEP:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_WEP - ext->key_len = %d, keyIdx = %d\n", __FUNCTION__, ext->key_len, keyIdx));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_WEP - ext->key_len = %d, keyIdx = %d\n", __func__, ext->key_len, keyIdx));
     			if (ext->key_len == MAX_WEP_KEY_SIZE)
                 {
         			pAdapter->SharedKey[BSS0][keyIdx].KeyLen = MAX_WEP_KEY_SIZE;
@@ -2546,7 +2546,7 @@
 			    NdisMoveMemory(pAdapter->SharedKey[BSS0][keyIdx].Key, ext->key, ext->key_len);
     			break;
             case IW_ENCODE_ALG_TKIP:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_TKIP - keyIdx = %d, ext->key_len = %d\n", __FUNCTION__, keyIdx, ext->key_len));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_TKIP - keyIdx = %d, ext->key_len = %d\n", __func__, keyIdx, ext->key_len));
                 if (ext->key_len == 32)
                 {
                     if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
diff --git a/drivers/staging/rt2870/2870_main_dev.c b/drivers/staging/rt2870/2870_main_dev.c
index 91da4e2..04c764d 100644
--- a/drivers/staging/rt2870/2870_main_dev.c
+++ b/drivers/staging/rt2870/2870_main_dev.c
@@ -263,7 +263,7 @@
 	 * This is important in preemption kernels, which transfer the flow
 	 * of execution immediately upon a complete().
 	 */
-	DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__FUNCTION__));
+	DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__));
 
 	pObj->MLMEThr_pid = THREAD_PID_INIT_VALUE;
 
@@ -465,7 +465,7 @@
 	 * This is important in preemption kernels, which transfer the flow
 	 * of execution immediately upon a complete().
 	 */
-	DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__FUNCTION__));
+	DBGPRINT(RT_DEBUG_TRACE,( "<---%s\n",__func__));
 
 	pObj->TimerQThr_pid = THREAD_PID_INIT_VALUE;
 
@@ -1258,7 +1258,7 @@
 
 	if (!(pAd->BulkInEpAddr && pAd->BulkOutEpAddr[0]))
 	{
-		printk("%s: Could not find both bulk-in and bulk-out endpoints\n", __FUNCTION__);
+		printk("%s: Could not find both bulk-in and bulk-out endpoints\n", __func__);
 		return FALSE;
 	}
 
diff --git a/drivers/staging/rt2870/common/2870_rtmp_init.c b/drivers/staging/rt2870/common/2870_rtmp_init.c
index 5d89a31..9f5143b 100644
--- a/drivers/staging/rt2870/common/2870_rtmp_init.c
+++ b/drivers/staging/rt2870/common/2870_rtmp_init.c
@@ -1087,7 +1087,7 @@
 	if (pRxWI->MPDUtotalByteCount > ThisFrameLen)
 	{
 		DBGPRINT(RT_DEBUG_ERROR, ("%s():pRxWIMPDUtotalByteCount(%d) large than RxDMALen(%ld)\n",
-									__FUNCTION__, pRxWI->MPDUtotalByteCount, ThisFrameLen));
+									__func__, pRxWI->MPDUtotalByteCount, ThisFrameLen));
 		goto label_null;
 	}
 #ifdef RT_BIG_ENDIAN
@@ -1098,7 +1098,7 @@
 	pSkb = dev_alloc_skb(ThisFrameLen);
 	if (pSkb == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR,("%s():Cannot Allocate sk buffer for this Bulk-In buffer!\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR,("%s():Cannot Allocate sk buffer for this Bulk-In buffer!\n", __func__));
 		goto label_null;
 	}
 
diff --git a/drivers/staging/rt2870/common/ba_action.c b/drivers/staging/rt2870/common/ba_action.c
index 95addb20b..d9f7381 100644
--- a/drivers/staging/rt2870/common/ba_action.c
+++ b/drivers/staging/rt2870/common/ba_action.c
@@ -595,7 +595,7 @@
 
 		pBAEntry->ORIBATimer.TimerValue = 0;	//pFrame->TimeOutValue;
 
-		DBGPRINT(RT_DEBUG_TRACE,("%s : TXBAbitmap = %x, BAWinSize = %d, TimeOut = %ld\n", __FUNCTION__, pEntry->TXBAbitmap,
+		DBGPRINT(RT_DEBUG_TRACE,("%s : TXBAbitmap = %x, BAWinSize = %d, TimeOut = %ld\n", __func__, pEntry->TXBAbitmap,
 								 pBAEntry->BAWinSize, pBAEntry->ORIBATimer.TimerValue));
 
 		// SEND BAR ;
@@ -669,7 +669,7 @@
 		ba_refresh_reordering_mpdus(pAd, pBAEntry);
 	}
 
-	DBGPRINT(RT_DEBUG_TRACE,("%s(%ld): Idx = %d, BAWinSize(req %d) = %d\n", __FUNCTION__, pAd->BATable.numAsRecipient, Idx,
+	DBGPRINT(RT_DEBUG_TRACE,("%s(%ld): Idx = %d, BAWinSize(req %d) = %d\n", __func__, pAd->BATable.numAsRecipient, Idx,
 							 pFrame->BaParm.BufSize, BAWinSize));
 
 	// Start fill in parameters.
@@ -911,7 +911,7 @@
 		return;
 	}
 
-	DBGPRINT(RT_DEBUG_TRACE,("%s===>Wcid=%d.TID=%d \n", __FUNCTION__, Wcid, TID));
+	DBGPRINT(RT_DEBUG_TRACE,("%s===>Wcid=%d.TID=%d \n", __func__, Wcid, TID));
 
 	pBAEntry = &pAd->BATable.BAOriEntry[Idx];
 	DBGPRINT(RT_DEBUG_TRACE,("\t===>Idx = %ld, Wcid=%d.TID=%d, ORI_BA_Status = %d \n", Idx, Wcid, TID, pBAEntry->ORI_BA_Status));
@@ -970,7 +970,7 @@
 	if (Idx == 0)
 		return;
 
-	DBGPRINT(RT_DEBUG_TRACE,("%s===>Wcid=%d.TID=%d \n", __FUNCTION__, Wcid, TID));
+	DBGPRINT(RT_DEBUG_TRACE,("%s===>Wcid=%d.TID=%d \n", __func__, Wcid, TID));
 
 
 	pBAEntry = &pAd->BATable.BARecEntry[Idx];
@@ -1181,7 +1181,7 @@
 	PULONG      ptemp;
 	PMAC_TABLE_ENTRY	pMacEntry;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s ==> (Wcid = %d)\n", __FUNCTION__, Elem->Wcid));
+	DBGPRINT(RT_DEBUG_TRACE, ("%s ==> (Wcid = %d)\n", __func__, Elem->Wcid));
 
 	//hex_dump("AddBAReq", Elem->Msg, Elem->MsgLen);
 
@@ -1265,7 +1265,7 @@
 	MiniportMMRequest(pAd, QID_AC_BE, pOutBuffer, FrameLen);
 	MlmeFreeMemory(pAd, pOutBuffer);
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s(%d): TID(%d), BufSize(%d) <== \n", __FUNCTION__, Elem->Wcid, ADDframe.BaParm.TID,
+	DBGPRINT(RT_DEBUG_TRACE, ("%s(%d): TID(%d), BufSize(%d) <== \n", __func__, Elem->Wcid, ADDframe.BaParm.TID,
 							  ADDframe.BaParm.BufSize));
 }
 
@@ -1284,7 +1284,7 @@
 	if (Elem->Wcid >= MAX_LEN_OF_MAC_TABLE)
 		return;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s ==> Wcid(%d)\n", __FUNCTION__, Elem->Wcid));
+	DBGPRINT(RT_DEBUG_TRACE, ("%s ==> Wcid(%d)\n", __func__, Elem->Wcid));
 
 	//hex_dump("PeerAddBARspAction()", Elem->Msg, Elem->MsgLen);
 
@@ -1325,7 +1325,7 @@
 	//PUCHAR				pOutBuffer = NULL;
 	PFRAME_DELBA_REQ    pDelFrame = NULL;
 
-	DBGPRINT(RT_DEBUG_TRACE,("%s ==>\n", __FUNCTION__));
+	DBGPRINT(RT_DEBUG_TRACE,("%s ==>\n", __func__));
 	//DELBA Request from unknown peer, ignore this.
 	if (PeerDelBAActionSanity(pAd, Elem->Wcid, Elem->Msg, Elem->MsgLen))
 	{
@@ -1362,7 +1362,7 @@
 
 	TID = (UCHAR)pFrame->BARControl.TID;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s(): BAR-Wcid(%ld), Tid (%d)\n", __FUNCTION__, Wcid, TID));
+	DBGPRINT(RT_DEBUG_TRACE, ("%s(): BAR-Wcid(%ld), Tid (%d)\n", __func__, Wcid, TID));
 	//hex_dump("BAR", (PCHAR) pFrame, MsgLen);
 	// Do nothing if the driver is starting halt state.
 	// This might happen when timer already been fired before cancel timer with mlmehalt
diff --git a/drivers/staging/rt2870/common/cmm_data.c b/drivers/staging/rt2870/common/cmm_data.c
index 4477a8e..fd809ab 100644
--- a/drivers/staging/rt2870/common/cmm_data.c
+++ b/drivers/staging/rt2870/common/cmm_data.c
@@ -2009,7 +2009,7 @@
 		}
 		else
 		{
-			printk("\n%s: Impossible Wcid = %d !!!!!\n", __FUNCTION__, wcid);
+			printk("\n%s: Impossible Wcid = %d !!!!!\n", __func__, wcid);
 		}
 	}
 
diff --git a/drivers/staging/rt2870/common/cmm_info.c b/drivers/staging/rt2870/common/cmm_info.c
index 40792e1..47a1b1a 100644
--- a/drivers/staging/rt2870/common/cmm_info.c
+++ b/drivers/staging/rt2870/common/cmm_info.c
@@ -2331,7 +2331,7 @@
 	wrq->u.data.length = sizeof(RT_802_11_MAC_TABLE);
 	if (copy_to_user(wrq->u.data.pointer, &MacTab, wrq->u.data.length))
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s: copy_to_user() fail\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s: copy_to_user() fail\n", __func__));
 	}
 
 	msg = (CHAR *) kmalloc(sizeof(CHAR)*(MAX_LEN_OF_MAC_TABLE*MAC_LINE_LEN), MEM_ALLOC_FLAG);
diff --git a/drivers/staging/rt2870/common/dfs.c b/drivers/staging/rt2870/common/dfs.c
index 23cf151..b09bba5 100644
--- a/drivers/staging/rt2870/common/dfs.c
+++ b/drivers/staging/rt2870/common/dfs.c
@@ -428,7 +428,7 @@
 
 	pAd->CommonCfg.RadarDetect.ChMovingTime = Value;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s:: %d\n", __FUNCTION__,
+	DBGPRINT(RT_DEBUG_TRACE, ("%s:: %d\n", __func__,
 		pAd->CommonCfg.RadarDetect.ChMovingTime));
 
 	return TRUE;
@@ -444,7 +444,7 @@
 
 	pAd->CommonCfg.RadarDetect.LongPulseRadarTh = Value;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s:: %d\n", __FUNCTION__,
+	DBGPRINT(RT_DEBUG_TRACE, ("%s:: %d\n", __func__,
 		pAd->CommonCfg.RadarDetect.LongPulseRadarTh));
 
 	return TRUE;
diff --git a/drivers/staging/rt2870/common/rtmp_init.c b/drivers/staging/rt2870/common/rtmp_init.c
index 87028fd..870a00d 100644
--- a/drivers/staging/rt2870/common/rtmp_init.c
+++ b/drivers/staging/rt2870/common/rtmp_init.c
@@ -2746,7 +2746,7 @@
 #ifdef BIN_IN_FILE
 #define NICLF_DEFAULT_USE()	\
 	flg_default_firm_use = TRUE; \
-	printk("%s - Use default firmware!\n", __FUNCTION__);
+	printk("%s - Use default firmware!\n", __func__);
 
 	NDIS_STATUS		Status = NDIS_STATUS_SUCCESS;
 	PUCHAR			src;
@@ -2761,7 +2761,7 @@
 	BOOLEAN			flg_default_firm_use = FALSE;
 
 
-	DBGPRINT(RT_DEBUG_TRACE, ("===> %s\n", __FUNCTION__));
+	DBGPRINT(RT_DEBUG_TRACE, ("===> %s\n", __func__));
 
 	/* init */
 	pFirmwareImage = NULL;
@@ -2784,7 +2784,7 @@
     if (pFirmwareImage == NULL)
 	{
 		/* allocate fail, use default firmware array in firmware.h */
-		printk("%s - Allocate memory fail!\n", __FUNCTION__);
+		printk("%s - Allocate memory fail!\n", __func__);
 		NICLF_DEFAULT_USE();
     }
 	else
@@ -2805,7 +2805,7 @@
 			if (IS_ERR(srcf))
 			{
 				printk("%s - Error %ld opening %s\n",
-					   __FUNCTION__, -PTR_ERR(srcf), src);
+					   __func__, -PTR_ERR(srcf), src);
 				NICLF_DEFAULT_USE();
 				break;
 			} /* End of if */
@@ -2813,7 +2813,7 @@
 			/* the object must have a read method */
 			if ((srcf->f_op == NULL) || (srcf->f_op->read == NULL))
 			{
-				printk("%s - %s does not have a write method\n", __FUNCTION__, src);
+				printk("%s - %s does not have a write method\n", __func__, src);
 				NICLF_DEFAULT_USE();
 				break;
 			} /* End of if */
@@ -2827,7 +2827,7 @@
 			if (FileLength != MAX_FIRMWARE_IMAGE_SIZE)
 			{
 				printk("%s: error file length (=%d) in RT2860AP.BIN\n",
-					   __FUNCTION__, FileLength);
+					   __func__, FileLength);
 				NICLF_DEFAULT_USE();
 				break;
 			}
@@ -2850,7 +2850,7 @@
 					/* CRC fail */
 					printk("%s: CRC = 0x%02x 0x%02x "
 						   "error, should be 0x%02x 0x%02x\n",
-						   __FUNCTION__,
+						   __func__,
 						   pFirmwareImage[MAX_FIRMWARE_IMAGE_SIZE-2],
 						   pFirmwareImage[MAX_FIRMWARE_IMAGE_SIZE-1],
 						   (UCHAR)(crc>>8), (UCHAR)(crc));
@@ -2869,7 +2869,7 @@
 											((FIRMWARE_MAJOR_VERSION << 8) +
 									  	 	 FIRMWARE_MINOR_VERSION))
 					{
-						printk("%s: firmware version too old!\n", __FUNCTION__);
+						printk("%s: firmware version too old!\n", __func__);
 						NICLF_DEFAULT_USE();
 						break;
 					} /* End of if */
@@ -3024,10 +3024,10 @@
 
 #if 0
     DBGPRINT(RT_DEBUG_TRACE,
-			 ("<=== %s (src=%s, status=%d)\n", __FUNCTION__, src, Status));
+			 ("<=== %s (src=%s, status=%d)\n", __func__, src, Status));
 #else
     DBGPRINT(RT_DEBUG_TRACE,
-			 ("<=== %s (status=%d)\n", __FUNCTION__, Status));
+			 ("<=== %s (status=%d)\n", __func__, Status));
 #endif
     return Status;
 } /* End of NICLoadFirmware */
diff --git a/drivers/staging/rt2870/common/rtusb_bulk.c b/drivers/staging/rt2870/common/rtusb_bulk.c
index 3c6ba1b..c46d916 100644
--- a/drivers/staging/rt2870/common/rtusb_bulk.c
+++ b/drivers/staging/rt2870/common/rtusb_bulk.c
@@ -329,7 +329,7 @@
 
 		if (pTxInfo->QSEL != FIFO_EDCA)
 		{
-			printk("%s(): ====> pTxInfo->QueueSel(%d)!= FIFO_EDCA!!!!\n", __FUNCTION__, pTxInfo->QSEL);
+			printk("%s(): ====> pTxInfo->QueueSel(%d)!= FIFO_EDCA!!!!\n", __func__, pTxInfo->QSEL);
 			printk("\tCWPos=%ld, NBPos=%ld, ENBPos=%ld, bCopy=%d!\n", pHTTXContext->CurWritePosition, pHTTXContext->NextBulkOutPosition, pHTTXContext->ENextBulkOutPosition, pHTTXContext->bCopySavePad);
 			hex_dump("Wrong QSel Pkt:", (PUCHAR)&pWirelessPkt[TmpBulkEndPos], (pHTTXContext->CurWritePosition - pHTTXContext->NextBulkOutPosition));
 		}
@@ -942,7 +942,7 @@
 	pTxInfo = (PTXINFO_STRUC)pMLMEContext->TransferBuffer;
 	if (pTxInfo->QSEL != FIFO_EDCA)
 	{
-			printk("%s(): ====> pTxInfo->QueueSel(%d)!= FIFO_EDCA!!!!\n", __FUNCTION__, pTxInfo->QSEL);
+			printk("%s(): ====> pTxInfo->QueueSel(%d)!= FIFO_EDCA!!!!\n", __func__, pTxInfo->QSEL);
 			printk("\tMLME_Index=%d!\n", Index);
 			hex_dump("Wrong QSel Pkt:", (PUCHAR)pMLMEContext->TransferBuffer, pTxInfo->USBDMATxPktLen);
 	}
diff --git a/drivers/staging/rt2870/common/spectrum.c b/drivers/staging/rt2870/common/spectrum.c
index abba840..43782ce 100644
--- a/drivers/staging/rt2870/common/spectrum.c
+++ b/drivers/staging/rt2870/common/spectrum.c
@@ -48,7 +48,7 @@
 	if (pAd->CommonCfg.pMeasureReqTab)
 		NdisZeroMemory(pAd->CommonCfg.pMeasureReqTab, sizeof(MEASURE_REQ_TAB));
 	else
-		DBGPRINT(RT_DEBUG_ERROR, ("%s Fail to alloc memory for pAd->CommonCfg.pMeasureReqTab.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s Fail to alloc memory for pAd->CommonCfg.pMeasureReqTab.\n", __func__));
 
 	return;
 }
@@ -76,7 +76,7 @@
 
 	if (pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __func__));
 		return NULL;
 	}
 
@@ -113,7 +113,7 @@
 
 	if(pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __func__));
 		return NULL;
 	}
 
@@ -174,7 +174,7 @@
 		else
 		{
 			pEntry = NULL;
-			DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab tab full.\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab tab full.\n", __func__));
 		}
 
 		// add this Neighbor entry into HASH table
@@ -209,7 +209,7 @@
 
 	if(pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __func__));
 		return;
 	}
 
@@ -266,7 +266,7 @@
 	if (pAd->CommonCfg.pTpcReqTab)
 		NdisZeroMemory(pAd->CommonCfg.pTpcReqTab, sizeof(TPC_REQ_TAB));
 	else
-		DBGPRINT(RT_DEBUG_ERROR, ("%s Fail to alloc memory for pAd->CommonCfg.pTpcReqTab.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s Fail to alloc memory for pAd->CommonCfg.pTpcReqTab.\n", __func__));
 
 	return;
 }
@@ -294,7 +294,7 @@
 
 	if (pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __func__));
 		return NULL;
 	}
 
@@ -332,7 +332,7 @@
 
 	if(pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __func__));
 		return NULL;
 	}
 
@@ -393,7 +393,7 @@
 		else
 		{
 			pEntry = NULL;
-			DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab tab full.\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab tab full.\n", __func__));
 		}
 
 		// add this Neighbor entry into HASH table
@@ -428,7 +428,7 @@
 
 	if(pTab == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __func__));
 		return;
 	}
 
@@ -781,7 +781,7 @@
 	NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer);  //Get an unused nonpaged memory
 	if(NStatus != NDIS_STATUS_SUCCESS)
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
 		return;
 	}
 	NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
@@ -843,7 +843,7 @@
 	NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer);  //Get an unused nonpaged memory
 	if(NStatus != NDIS_STATUS_SUCCESS)
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
 		return;
 	}
 	NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
@@ -897,7 +897,7 @@
 	NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer);  //Get an unused nonpaged memory
 	if(NStatus != NDIS_STATUS_SUCCESS)
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
 		return;
 	}
 	NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
@@ -949,7 +949,7 @@
 	NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer);  //Get an unused nonpaged memory
 	if(NStatus != NDIS_STATUS_SUCCESS)
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
 		return;
 	}
 	NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
@@ -1002,7 +1002,7 @@
 	NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer);  //Get an unused nonpaged memory
 	if(NStatus != NDIS_STATUS_SUCCESS)
 	{
-		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
 		return;
 	}
 	NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
@@ -1595,7 +1595,7 @@
 
 	if ((pMeasureReportInfo = kmalloc(sizeof(MEASURE_RPI_REPORT), GFP_ATOMIC)) == NULL)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s unable to alloc memory for measure report buffer (size=%d).\n", __FUNCTION__, sizeof(MEASURE_RPI_REPORT)));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s unable to alloc memory for measure report buffer (size=%d).\n", __func__, sizeof(MEASURE_RPI_REPORT)));
 		return;
 	}
 
@@ -1704,7 +1704,7 @@
 		{
 			TpcReqDelete(pAd, pEntry->DialogToken);
 			DBGPRINT(RT_DEBUG_TRACE, ("%s: DialogToken=%x, TxPwr=%d, LinkMargin=%d\n",
-				__FUNCTION__, DialogToken, TpcRepInfo.TxPwr, TpcRepInfo.LinkMargin));
+				__func__, DialogToken, TpcRepInfo.TxPwr, TpcRepInfo.LinkMargin));
 		}
 	}
 
@@ -1820,7 +1820,7 @@
 				MeasureReqType = simple_strtol(thisChar, 0, 16);
 				if (MeasureReqType > 3)
 				{
-					DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow MeasureReqType(%d)\n", __FUNCTION__, MeasureReqType));
+					DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow MeasureReqType(%d)\n", __func__, MeasureReqType));
 					return TRUE;
 				}
 				break;
@@ -1832,10 +1832,10 @@
 		ArgIdx++;
 	}
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s::Aid = %d, MeasureReqType=%d MeasureCh=%d\n", __FUNCTION__, Aid, MeasureReqType, MeasureCh));
+	DBGPRINT(RT_DEBUG_TRACE, ("%s::Aid = %d, MeasureReqType=%d MeasureCh=%d\n", __func__, Aid, MeasureReqType, MeasureCh));
 	if (!VALID_WCID(Aid))
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow sta of Aid(%d)\n", __FUNCTION__, Aid));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow sta of Aid(%d)\n", __func__, Aid));
 		return TRUE;
 	}
 
@@ -1860,10 +1860,10 @@
 
 	Aid = simple_strtol(arg, 0, 16);
 
-	DBGPRINT(RT_DEBUG_TRACE, ("%s::Aid = %d\n", __FUNCTION__, Aid));
+	DBGPRINT(RT_DEBUG_TRACE, ("%s::Aid = %d\n", __func__, Aid));
 	if (!VALID_WCID(Aid))
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow sta of Aid(%d)\n", __FUNCTION__, Aid));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow sta of Aid(%d)\n", __func__, Aid));
 		return TRUE;
 	}
 
diff --git a/drivers/staging/rt2870/rt_ate.c b/drivers/staging/rt2870/rt_ate.c
index e99b3da..27c763ee 100644
--- a/drivers/staging/rt2870/rt_ate.c
+++ b/drivers/staging/rt2870/rt_ate.c
@@ -314,7 +314,7 @@
 				Bbp94 = BBPR94_DEFAULT;
 			}
 
-			ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R=%ld, BBP_R94=%d)\n", __FUNCTION__, TxPower, R, Bbp94));
+			ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R=%ld, BBP_R94=%d)\n", __func__, TxPower, R, Bbp94));
 		}
 		else// 5.5 GHz
 		{
@@ -341,7 +341,7 @@
 				R = (ULONG) TxPower;
 			}
 
-			ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R=%lu)\n", __FUNCTION__, TxPower, R));
+			ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R=%lu)\n", __func__, TxPower, R));
 		}
 
 		if (pAd->ate.Channel <= 14)
@@ -454,7 +454,7 @@
 		Bbp94 = BBPR94_DEFAULT;
 	}
 
-	ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R3=%ld, BBP_R94=%d)\n", __FUNCTION__, TxPower, R, Bbp94));
+	ATEDBGPRINT(RT_DEBUG_TRACE, ("%s (TxPower=%d, R3=%ld, BBP_R94=%d)\n", __func__, TxPower, R, Bbp94));
 
 		if (pAd->ate.Channel <= 14)
 		{
@@ -2261,7 +2261,7 @@
 	UINT32			FileLength = 0;
 	UINT32 			value = simple_strtol(arg, 0, 10);
 
-	ATEDBGPRINT(RT_DEBUG_ERROR, ("===> %s (value=%d)\n\n", __FUNCTION__, value));
+	ATEDBGPRINT(RT_DEBUG_ERROR, ("===> %s (value=%d)\n\n", __func__, value));
 
 	if (value > 0)
 	{
@@ -2285,14 +2285,14 @@
 
 			if (IS_ERR(srcf))
 			{
-				ate_print("%s - Error %ld opening %s\n", __FUNCTION__, -PTR_ERR(srcf), src);
+				ate_print("%s - Error %ld opening %s\n", __func__, -PTR_ERR(srcf), src);
 				break;
 			}
 
 			/* the object must have a read method */
 			if ((srcf->f_op == NULL) || (srcf->f_op->read == NULL))
 			{
-				ate_print("%s - %s does not have a read method\n", __FUNCTION__, src);
+				ate_print("%s - %s does not have a read method\n", __func__, src);
 				break;
 			}
 
@@ -2305,7 +2305,7 @@
 			if (FileLength != EEPROM_SIZE)
 			{
 				ate_print("%s: error file length (=%d) in e2p.bin\n",
-					   __FUNCTION__, FileLength);
+					   __func__, FileLength);
 				break;
 			}
 			else
@@ -2337,7 +2337,7 @@
 		current->fsuid = orgfsuid;
 		current->fsgid = orgfsgid;
 	}
-    ATEDBGPRINT(RT_DEBUG_ERROR, ("<=== %s (ret=%d)\n", __FUNCTION__, ret));
+    ATEDBGPRINT(RT_DEBUG_ERROR, ("<=== %s (ret=%d)\n", __func__, ret));
 
     return ret;
 
@@ -2350,12 +2350,12 @@
 	USHORT 			WriteEEPROM[(EEPROM_SIZE/2)];
 	struct iwreq	*wrq = (struct iwreq *)arg;
 
-	ATEDBGPRINT(RT_DEBUG_TRACE, ("===> %s (wrq->u.data.length = %d)\n\n", __FUNCTION__, wrq->u.data.length));
+	ATEDBGPRINT(RT_DEBUG_TRACE, ("===> %s (wrq->u.data.length = %d)\n\n", __func__, wrq->u.data.length));
 
 	if (wrq->u.data.length != EEPROM_SIZE)
 	{
 		ate_print("%s: error length (=%d) from host\n",
-			   __FUNCTION__, wrq->u.data.length);
+			   __func__, wrq->u.data.length);
 		return FALSE;
 	}
 	else/* (wrq->u.data.length == EEPROM_SIZE) */
@@ -2374,7 +2374,7 @@
 		} while(FALSE);
 		}
 
-    ATEDBGPRINT(RT_DEBUG_TRACE, ("<=== %s\n", __FUNCTION__));
+    ATEDBGPRINT(RT_DEBUG_TRACE, ("<=== %s\n", __func__));
 
     return TRUE;
 
@@ -4083,7 +4083,7 @@
 
 	Command_Id = ntohs(pRaCfg->command_id);
 
-	ATEDBGPRINT(RT_DEBUG_TRACE,("\n%s: Command_Id = 0x%04x !\n", __FUNCTION__, Command_Id));
+	ATEDBGPRINT(RT_DEBUG_TRACE,("\n%s: Command_Id = 0x%04x !\n", __func__, Command_Id));
 
 	switch (Command_Id)
 	{
@@ -6117,7 +6117,7 @@
 					pAd->ate.TxAntennaSel = 2;
 		            break;
 		        default:
-		            DBGPRINT(RT_DEBUG_TRACE, ("%s -- Sth. wrong!  : return FALSE; \n", __FUNCTION__));
+		            DBGPRINT(RT_DEBUG_TRACE, ("%s -- Sth. wrong!  : return FALSE; \n", __func__));
 		            return FALSE;
 		    }
 			break;/* case BBP_R1 */
@@ -6155,13 +6155,13 @@
 					pAd->ate.RxAntennaSel = 3;
 		            break;
 		        default:
-		            DBGPRINT(RT_DEBUG_ERROR, ("%s -- Impossible!  : return FALSE; \n", __FUNCTION__));
+		            DBGPRINT(RT_DEBUG_ERROR, ("%s -- Impossible!  : return FALSE; \n", __func__));
 		            return FALSE;
 		    }
 			break;/* case BBP_R3 */
 
         default:
-            DBGPRINT(RT_DEBUG_ERROR, ("%s -- Sth. wrong!  : return FALSE; \n", __FUNCTION__));
+            DBGPRINT(RT_DEBUG_ERROR, ("%s -- Sth. wrong!  : return FALSE; \n", __func__));
             return FALSE;
 
 	}
diff --git a/drivers/staging/rt2870/rt_linux.c b/drivers/staging/rt2870/rt_linux.c
index f2ea8eb..992e3d1 100644
--- a/drivers/staging/rt2870/rt_linux.c
+++ b/drivers/staging/rt2870/rt_linux.c
@@ -404,7 +404,7 @@
  	skb_put(GET_OS_PKT_TYPE(pPacket), HeaderLen+DataLen);
 
 	RTMP_SET_PACKET_SOURCE(pPacket, PKTSRC_NDIS);
-//	printk("%s : pPacket = %p, len = %d\n", __FUNCTION__, pPacket, GET_OS_PKT_LEN(pPacket));
+//	printk("%s : pPacket = %p, len = %d\n", __func__, pPacket, GET_OS_PKT_LEN(pPacket));
 	*ppPacket = pPacket;
 	return NDIS_STATUS_SUCCESS;
 }
@@ -814,13 +814,13 @@
 
 	if (event_table_len == 0)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s : The type(%0x02x) is not valid.\n", __FUNCTION__, type));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s : The type(%0x02x) is not valid.\n", __func__, type));
 		return;
 	}
 
 	if (event >= event_table_len)
 	{
-		DBGPRINT(RT_DEBUG_ERROR, ("%s : The event(%0x02x) is not valid.\n", __FUNCTION__, event));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s : The event(%0x02x) is not valid.\n", __func__, event));
 		return;
 	}
 
@@ -858,14 +858,14 @@
 		//send wireless event
 	    wireless_send_event(pAd->net_dev, IWEVCUSTOM, &wrqu, pBuf);
 
-		//DBGPRINT(RT_DEBUG_TRACE, ("%s : %s\n", __FUNCTION__, pBuf));
+		//DBGPRINT(RT_DEBUG_TRACE, ("%s : %s\n", __func__, pBuf));
 
 		kfree(pBuf);
 	}
 	else
-		DBGPRINT(RT_DEBUG_ERROR, ("%s : Can't allocate memory for wireless event.\n", __FUNCTION__));
+		DBGPRINT(RT_DEBUG_ERROR, ("%s : Can't allocate memory for wireless event.\n", __func__));
 #else
-	DBGPRINT(RT_DEBUG_ERROR, ("%s : The Wireless Extension MUST be v15 or newer.\n", __FUNCTION__));
+	DBGPRINT(RT_DEBUG_ERROR, ("%s : The Wireless Extension MUST be v15 or newer.\n", __func__));
 #endif  /* WIRELESS_EXT >= 15 */
 }
 
@@ -889,13 +889,13 @@
     ASSERT(pRxBlk->pRxPacket);
     if (pRxBlk->DataSize < 10)
     {
-        DBGPRINT(RT_DEBUG_ERROR, ("%s : Size is too small! (%d)\n", __FUNCTION__, pRxBlk->DataSize));
+        DBGPRINT(RT_DEBUG_ERROR, ("%s : Size is too small! (%d)\n", __func__, pRxBlk->DataSize));
 		goto err_free_sk_buff;
     }
 
     if (pRxBlk->DataSize + sizeof(wlan_ng_prism2_header) > RX_BUFFER_AGGRESIZE)
     {
-        DBGPRINT(RT_DEBUG_ERROR, ("%s : Size is too large! (%d)\n", __FUNCTION__, pRxBlk->DataSize + sizeof(wlan_ng_prism2_header)));
+        DBGPRINT(RT_DEBUG_ERROR, ("%s : Size is too large! (%d)\n", __func__, pRxBlk->DataSize + sizeof(wlan_ng_prism2_header)));
 		goto err_free_sk_buff;
     }
 
@@ -951,7 +951,7 @@
 
     if (skb_headroom(pOSPkt) < (sizeof(wlan_ng_prism2_header)+ header_len)) {
         if (pskb_expand_head(pOSPkt, (sizeof(wlan_ng_prism2_header) + header_len), 0, GFP_ATOMIC)) {
-	        DBGPRINT(RT_DEBUG_ERROR, ("%s : Reallocate header size of sk_buff fail!\n", __FUNCTION__));
+	        DBGPRINT(RT_DEBUG_ERROR, ("%s : Reallocate header size of sk_buff fail!\n", __func__));
 			goto err_free_sk_buff;
 	    } //end if
     } //end if
diff --git a/drivers/staging/rt2870/rt_linux.h b/drivers/staging/rt2870/rt_linux.h
index 8142975..859f9ce 100644
--- a/drivers/staging/rt2870/rt_linux.h
+++ b/drivers/staging/rt2870/rt_linux.h
@@ -124,7 +124,7 @@
 #define RT_MOD_INC_USE_COUNT() \
 	if (!try_module_get(THIS_MODULE)) \
 	{ \
-		DBGPRINT(RT_DEBUG_ERROR, ("%s: cannot reserve module\n", __FUNCTION__)); \
+		DBGPRINT(RT_DEBUG_ERROR, ("%s: cannot reserve module\n", __func__)); \
 		return -1; \
 	}
 
diff --git a/drivers/staging/rt2870/rt_main_dev.c b/drivers/staging/rt2870/rt_main_dev.c
index b990f8a..313ecea 100644
--- a/drivers/staging/rt2870/rt_main_dev.c
+++ b/drivers/staging/rt2870/rt_main_dev.c
@@ -1554,7 +1554,7 @@
 #if 0
 //	if ((pkt->data[0] & 0x1) == 0)
 	{
-		//hex_dump(__FUNCTION__, pkt->data, pkt->len);
+		//hex_dump(__func__, pkt->data, pkt->len);
 		printk("pPacket = %x\n", pPacket);
 	}
 #endif
diff --git a/drivers/staging/rt2870/rt_profile.c b/drivers/staging/rt2870/rt_profile.c
index c4474a6..467fea3 100644
--- a/drivers/staging/rt2870/rt_profile.c
+++ b/drivers/staging/rt2870/rt_profile.c
@@ -1024,7 +1024,7 @@
 								pAd->MlmeAux.SsidLen = pAd->CommonCfg.SsidLen;
 								NdisZeroMemory(pAd->MlmeAux.Ssid, NDIS_802_11_LENGTH_SSID);
 								NdisMoveMemory(pAd->MlmeAux.Ssid, tmpbuf, pAd->MlmeAux.SsidLen);
-								DBGPRINT(RT_DEBUG_TRACE, ("%s::(SSID=%s)\n", __FUNCTION__, tmpbuf));
+								DBGPRINT(RT_DEBUG_TRACE, ("%s::(SSID=%s)\n", __func__, tmpbuf));
 							}
 						}
 					}
@@ -1043,7 +1043,7 @@
 								pAd->StaCfg.BssType = BSS_INFRA;
 							// Reset Ralink supplicant to not use, it will be set to start when UI set PMK key
 							pAd->StaCfg.WpaState = SS_NOTUSE;
-							DBGPRINT(RT_DEBUG_TRACE, ("%s::(NetworkType=%d)\n", __FUNCTION__, pAd->StaCfg.BssType));
+							DBGPRINT(RT_DEBUG_TRACE, ("%s::(NetworkType=%d)\n", __func__, pAd->StaCfg.BssType));
 						}
 					}
 #endif // CONFIG_STA_SUPPORT //
@@ -1341,7 +1341,7 @@
 
 				                        pAd->StaCfg.PortSecured = WPA_802_1X_PORT_NOT_SECURED;
 
-							DBGPRINT(RT_DEBUG_TRACE, ("%s::(EncrypType=%d)\n", __FUNCTION__, pAd->StaCfg.WepStatus));
+							DBGPRINT(RT_DEBUG_TRACE, ("%s::(EncrypType=%d)\n", __func__, pAd->StaCfg.WepStatus));
 						}
 #endif // CONFIG_STA_SUPPORT //
 					}
@@ -1368,7 +1368,7 @@
 							pAd->StaCfg.bMixCipher 		= FALSE;
 
 							//RTMPMakeRSNIE(pAd, pAd->StaCfg.AuthMode, pAd->StaCfg.WepStatus, 0);
-							DBGPRINT(RT_DEBUG_TRACE, ("%s::(EncrypType=%d)\n", __FUNCTION__, pAd->StaCfg.WepStatus));
+							DBGPRINT(RT_DEBUG_TRACE, ("%s::(EncrypType=%d)\n", __func__, pAd->StaCfg.WepStatus));
 						}
 #endif // CONFIG_STA_SUPPORT //
 					}
@@ -1405,7 +1405,7 @@
 							else
 							{
 								err = 1;
-								DBGPRINT(RT_DEBUG_ERROR, ("%s::(WPAPSK key-string required 8 ~ 64 characters!)\n", __FUNCTION__));
+								DBGPRINT(RT_DEBUG_ERROR, ("%s::(WPAPSK key-string required 8 ~ 64 characters!)\n", __func__));
 							}
 
 							if (err == 0)
@@ -1436,7 +1436,7 @@
 									pAd->StaCfg.WpaState = SS_NOTUSE;
 								}
 
-								DBGPRINT(RT_DEBUG_TRACE, ("%s::(WPAPSK=%s)\n", __FUNCTION__, tmpbuf));
+								DBGPRINT(RT_DEBUG_TRACE, ("%s::(WPAPSK=%s)\n", __func__, tmpbuf));
 							}
 						}
 					}
diff --git a/drivers/staging/rt2870/rtmp_def.h b/drivers/staging/rt2870/rtmp_def.h
index 9b86325..c0f7561 100644
--- a/drivers/staging/rt2870/rtmp_def.h
+++ b/drivers/staging/rt2870/rtmp_def.h
@@ -342,7 +342,7 @@
 /* sanity check for apidx */
 #define MBSS_MR_APIDX_SANITY_CHECK(apidx) \
     { if (apidx > MAX_MBSSID_NUM) { \
-          printk("%s> Error! apidx = %d > MAX_MBSSID_NUM!\n", __FUNCTION__, apidx); \
+          printk("%s> Error! apidx = %d > MAX_MBSSID_NUM!\n", __func__, apidx); \
 	  apidx = MAIN_MBSSID; } }
 
 #define VALID_WCID(_wcid)	((_wcid) > 0 && (_wcid) < MAX_LEN_OF_MAC_TABLE )
diff --git a/drivers/staging/rt2870/sta_ioctl.c b/drivers/staging/rt2870/sta_ioctl.c
index 422f39f..91f0fab 100644
--- a/drivers/staging/rt2870/sta_ioctl.c
+++ b/drivers/staging/rt2870/sta_ioctl.c
@@ -2224,7 +2224,7 @@
 			wrq->length = strlen(extra) + 1; // 1: size of '\0'
 			break;
         default:
-            DBGPRINT(RT_DEBUG_TRACE, ("%s - unknow subcmd = %d\n", __FUNCTION__, subcmd));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s - unknow subcmd = %d\n", __func__, subcmd));
             break;
     }
 
@@ -2243,7 +2243,7 @@
 	MLME_DISASSOC_REQ_STRUCT	DisAssocReq;
 	MLME_DEAUTH_REQ_STRUCT      DeAuthReq;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("====> %s\n", __FUNCTION__));
+	DBGPRINT(RT_DEBUG_TRACE, ("====> %s\n", __func__));
 
 	if (pMlme == NULL)
 		return -EINVAL;
@@ -2252,7 +2252,7 @@
 	{
 #ifdef IW_MLME_DEAUTH
 		case IW_MLME_DEAUTH:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DEAUTH\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DEAUTH\n", __func__));
 			COPY_MAC_ADDR(DeAuthReq.Addr, pAd->CommonCfg.Bssid);
 			DeAuthReq.Reason = pMlme->reason_code;
 			MsgElem.MsgLen = sizeof(MLME_DEAUTH_REQ_STRUCT);
@@ -2267,7 +2267,7 @@
 #endif // IW_MLME_DEAUTH //
 #ifdef IW_MLME_DISASSOC
 		case IW_MLME_DISASSOC:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DISASSOC\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DISASSOC\n", __func__));
 			COPY_MAC_ADDR(DisAssocReq.Addr, pAd->CommonCfg.Bssid);
 			DisAssocReq.Reason =  pMlme->reason_code;
 
@@ -2281,7 +2281,7 @@
 			break;
 #endif // IW_MLME_DISASSOC //
 		default:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - Unknow Command\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - Unknow Command\n", __func__));
 			break;
 	}
 
@@ -2314,7 +2314,7 @@
             else if (param->value == IW_AUTH_WPA_VERSION_WPA2)
                 pAdapter->StaCfg.AuthMode = Ndis802_11AuthModeWPA2PSK;
 
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_CIPHER_PAIRWISE:
             if (param->value == IW_AUTH_CIPHER_NONE)
@@ -2345,7 +2345,7 @@
                 pAdapter->StaCfg.OrigWepStatus = pAdapter->StaCfg.WepStatus;
                 pAdapter->StaCfg.PairCipher = Ndis802_11Encryption3Enabled;
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_PAIRWISE - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_PAIRWISE - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_CIPHER_GROUP:
             if (param->value == IW_AUTH_CIPHER_NONE)
@@ -2365,7 +2365,7 @@
             {
                 pAdapter->StaCfg.GroupCipher = Ndis802_11Encryption3Enabled;
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_GROUP - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_GROUP - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_KEY_MGMT:
             if (param->value == IW_AUTH_KEY_MGMT_802_1X)
@@ -2395,7 +2395,7 @@
                 //pAdapter->StaCfg.PortSecured = WPA_802_1X_PORT_SECURED;
 				STA_PORT_SECURED(pAdapter);
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_KEY_MGMT - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_KEY_MGMT - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
             break;
@@ -2408,7 +2408,7 @@
                 pAdapter->StaCfg.PairCipher = Ndis802_11WEPDisabled;
         	    pAdapter->StaCfg.GroupCipher = Ndis802_11WEPDisabled;
             }*/
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_PRIVACY_INVOKED - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_PRIVACY_INVOKED - param->value = %d!\n", __func__, param->value));
     		break;
     	case IW_AUTH_DROP_UNENCRYPTED:
             if (param->value != 0)
@@ -2418,7 +2418,7 @@
                 //pAdapter->StaCfg.PortSecured = WPA_802_1X_PORT_SECURED;
 				STA_PORT_SECURED(pAdapter);
 			}
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __func__, param->value));
     		break;
     	case IW_AUTH_80211_AUTH_ALG:
 			if (param->value & IW_AUTH_ALG_SHARED_KEY)
@@ -2431,10 +2431,10 @@
 			}
             else
 				return -EINVAL;
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_80211_AUTH_ALG - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_80211_AUTH_ALG - param->value = %d!\n", __func__, param->value));
 			break;
     	case IW_AUTH_WPA_ENABLED:
-    		DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_ENABLED - Driver supports WPA!(param->value = %d)\n", __FUNCTION__, param->value));
+    		DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_ENABLED - Driver supports WPA!(param->value = %d)\n", __func__, param->value));
     		break;
     	default:
     		return -EOPNOTSUPP;
@@ -2542,7 +2542,7 @@
 		pAdapter->SharedKey[BSS0][keyIdx].CipherAlg = CIPHER_NONE;
 		AsicRemoveSharedKeyEntry(pAdapter, 0, (UCHAR)keyIdx);
         NdisZeroMemory(&pAdapter->SharedKey[BSS0][keyIdx], sizeof(CIPHER_KEY));
-        DBGPRINT(RT_DEBUG_TRACE, ("%s::Remove all keys!(encoding->flags = %x)\n", __FUNCTION__, encoding->flags));
+        DBGPRINT(RT_DEBUG_TRACE, ("%s::Remove all keys!(encoding->flags = %x)\n", __func__, encoding->flags));
     }
 					else
     {
@@ -2554,15 +2554,15 @@
         if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
         {
             pAdapter->StaCfg.DefaultKeyId = keyIdx;
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::DefaultKeyId = %d\n", __FUNCTION__, pAdapter->StaCfg.DefaultKeyId));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::DefaultKeyId = %d\n", __func__, pAdapter->StaCfg.DefaultKeyId));
         }
 
         switch (alg) {
     		case IW_ENCODE_ALG_NONE:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_NONE\n", __FUNCTION__));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_NONE\n", __func__));
     			break;
     		case IW_ENCODE_ALG_WEP:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_WEP - ext->key_len = %d, keyIdx = %d\n", __FUNCTION__, ext->key_len, keyIdx));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_WEP - ext->key_len = %d, keyIdx = %d\n", __func__, ext->key_len, keyIdx));
     			if (ext->key_len == MAX_WEP_KEY_SIZE)
                 {
         			pAdapter->SharedKey[BSS0][keyIdx].KeyLen = MAX_WEP_KEY_SIZE;
@@ -2595,7 +2595,7 @@
 				}
     			break;
             case IW_ENCODE_ALG_TKIP:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_TKIP - keyIdx = %d, ext->key_len = %d\n", __FUNCTION__, keyIdx, ext->key_len));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_TKIP - keyIdx = %d, ext->key_len = %d\n", __func__, keyIdx, ext->key_len));
                 if (ext->key_len == 32)
                 {
                     if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
diff --git a/drivers/staging/rt2870/tmp60 b/drivers/staging/rt2870/tmp60
index 992096a..975e444 100644
--- a/drivers/staging/rt2870/tmp60
+++ b/drivers/staging/rt2870/tmp60
@@ -2224,7 +2224,7 @@
 			wrq->length = strlen(extra) + 1; // 1: size of '\0'
 			break;
         default:
-            DBGPRINT(RT_DEBUG_TRACE, ("%s - unknow subcmd = %d\n", __FUNCTION__, subcmd));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s - unknow subcmd = %d\n", __func__, subcmd));
             break;
     }
 
@@ -2243,7 +2243,7 @@
 	MLME_DISASSOC_REQ_STRUCT	DisAssocReq;
 	MLME_DEAUTH_REQ_STRUCT      DeAuthReq;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("====> %s\n", __FUNCTION__));
+	DBGPRINT(RT_DEBUG_TRACE, ("====> %s\n", __func__));
 
 	if (pMlme == NULL)
 		return -EINVAL;
@@ -2252,7 +2252,7 @@
 	{
 #ifdef IW_MLME_DEAUTH
 		case IW_MLME_DEAUTH:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DEAUTH\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DEAUTH\n", __func__));
 			COPY_MAC_ADDR(DeAuthReq.Addr, pAd->CommonCfg.Bssid);
 			DeAuthReq.Reason = pMlme->reason_code;
 			MsgElem.MsgLen = sizeof(MLME_DEAUTH_REQ_STRUCT);
@@ -2267,7 +2267,7 @@
 #endif // IW_MLME_DEAUTH //
 #ifdef IW_MLME_DISASSOC
 		case IW_MLME_DISASSOC:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DISASSOC\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DISASSOC\n", __func__));
 			COPY_MAC_ADDR(DisAssocReq.Addr, pAd->CommonCfg.Bssid);
 			DisAssocReq.Reason =  pMlme->reason_code;
 
@@ -2281,7 +2281,7 @@
 			break;
 #endif // IW_MLME_DISASSOC //
 		default:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - Unknow Command\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - Unknow Command\n", __func__));
 			break;
 	}
 
@@ -2314,7 +2314,7 @@
             else if (param->value == IW_AUTH_WPA_VERSION_WPA2)
                 pAdapter->StaCfg.AuthMode = Ndis802_11AuthModeWPA2PSK;
 
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_CIPHER_PAIRWISE:
             if (param->value == IW_AUTH_CIPHER_NONE)
@@ -2345,7 +2345,7 @@
                 pAdapter->StaCfg.OrigWepStatus = pAdapter->StaCfg.WepStatus;
                 pAdapter->StaCfg.PairCipher = Ndis802_11Encryption3Enabled;
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_PAIRWISE - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_PAIRWISE - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_CIPHER_GROUP:
             if (param->value == IW_AUTH_CIPHER_NONE)
@@ -2365,7 +2365,7 @@
             {
                 pAdapter->StaCfg.GroupCipher = Ndis802_11Encryption3Enabled;
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_GROUP - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_GROUP - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_KEY_MGMT:
             if (param->value == IW_AUTH_KEY_MGMT_802_1X)
@@ -2395,7 +2395,7 @@
                 //pAdapter->StaCfg.PortSecured = WPA_802_1X_PORT_SECURED;
 				STA_PORT_SECURED(pAdapter);
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_KEY_MGMT - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_KEY_MGMT - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
             break;
@@ -2408,7 +2408,7 @@
                 pAdapter->StaCfg.PairCipher = Ndis802_11WEPDisabled;
         	    pAdapter->StaCfg.GroupCipher = Ndis802_11WEPDisabled;
             }*/
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_PRIVACY_INVOKED - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_PRIVACY_INVOKED - param->value = %d!\n", __func__, param->value));
     		break;
     	case IW_AUTH_DROP_UNENCRYPTED:
             if (param->value != 0)
@@ -2418,7 +2418,7 @@
                 //pAdapter->StaCfg.PortSecured = WPA_802_1X_PORT_SECURED;
 				STA_PORT_SECURED(pAdapter);
 			}
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __func__, param->value));
     		break;
     	case IW_AUTH_80211_AUTH_ALG:
 			if (param->value & IW_AUTH_ALG_SHARED_KEY)
@@ -2431,10 +2431,10 @@
 			}
             else
 				return -EINVAL;
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_80211_AUTH_ALG - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_80211_AUTH_ALG - param->value = %d!\n", __func__, param->value));
 			break;
     	case IW_AUTH_WPA_ENABLED:
-    		DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_ENABLED - Driver supports WPA!(param->value = %d)\n", __FUNCTION__, param->value));
+    		DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_ENABLED - Driver supports WPA!(param->value = %d)\n", __func__, param->value));
     		break;
     	default:
     		return -EOPNOTSUPP;
@@ -2542,7 +2542,7 @@
 		pAdapter->SharedKey[BSS0][keyIdx].CipherAlg = CIPHER_NONE;
 		AsicRemoveSharedKeyEntry(pAdapter, 0, (UCHAR)keyIdx);
         NdisZeroMemory(&pAdapter->SharedKey[BSS0][keyIdx], sizeof(CIPHER_KEY));
-        DBGPRINT(RT_DEBUG_TRACE, ("%s::Remove all keys!(encoding->flags = %x)\n", __FUNCTION__, encoding->flags));
+        DBGPRINT(RT_DEBUG_TRACE, ("%s::Remove all keys!(encoding->flags = %x)\n", __func__, encoding->flags));
     }
 					else
     {
@@ -2554,15 +2554,15 @@
         if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
         {
             pAdapter->StaCfg.DefaultKeyId = keyIdx;
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::DefaultKeyId = %d\n", __FUNCTION__, pAdapter->StaCfg.DefaultKeyId));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::DefaultKeyId = %d\n", __func__, pAdapter->StaCfg.DefaultKeyId));
         }
 
         switch (alg) {
     		case IW_ENCODE_ALG_NONE:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_NONE\n", __FUNCTION__));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_NONE\n", __func__));
     			break;
     		case IW_ENCODE_ALG_WEP:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_WEP - ext->key_len = %d, keyIdx = %d\n", __FUNCTION__, ext->key_len, keyIdx));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_WEP - ext->key_len = %d, keyIdx = %d\n", __func__, ext->key_len, keyIdx));
     			if (ext->key_len == MAX_WEP_KEY_SIZE)
                 {
         			pAdapter->SharedKey[BSS0][keyIdx].KeyLen = MAX_WEP_KEY_SIZE;
@@ -2580,7 +2580,7 @@
 			    NdisMoveMemory(pAdapter->SharedKey[BSS0][keyIdx].Key, ext->key, ext->key_len);
     			break;
             case IW_ENCODE_ALG_TKIP:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_TKIP - keyIdx = %d, ext->key_len = %d\n", __FUNCTION__, keyIdx, ext->key_len));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_TKIP - keyIdx = %d, ext->key_len = %d\n", __func__, keyIdx, ext->key_len));
                 if (ext->key_len == 32)
                 {
                     if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
diff --git a/drivers/staging/rt2870/tmp61 b/drivers/staging/rt2870/tmp61
index 992096a..975e444 100644
--- a/drivers/staging/rt2870/tmp61
+++ b/drivers/staging/rt2870/tmp61
@@ -2224,7 +2224,7 @@
 			wrq->length = strlen(extra) + 1; // 1: size of '\0'
 			break;
         default:
-            DBGPRINT(RT_DEBUG_TRACE, ("%s - unknow subcmd = %d\n", __FUNCTION__, subcmd));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s - unknow subcmd = %d\n", __func__, subcmd));
             break;
     }
 
@@ -2243,7 +2243,7 @@
 	MLME_DISASSOC_REQ_STRUCT	DisAssocReq;
 	MLME_DEAUTH_REQ_STRUCT      DeAuthReq;
 
-	DBGPRINT(RT_DEBUG_TRACE, ("====> %s\n", __FUNCTION__));
+	DBGPRINT(RT_DEBUG_TRACE, ("====> %s\n", __func__));
 
 	if (pMlme == NULL)
 		return -EINVAL;
@@ -2252,7 +2252,7 @@
 	{
 #ifdef IW_MLME_DEAUTH
 		case IW_MLME_DEAUTH:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DEAUTH\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DEAUTH\n", __func__));
 			COPY_MAC_ADDR(DeAuthReq.Addr, pAd->CommonCfg.Bssid);
 			DeAuthReq.Reason = pMlme->reason_code;
 			MsgElem.MsgLen = sizeof(MLME_DEAUTH_REQ_STRUCT);
@@ -2267,7 +2267,7 @@
 #endif // IW_MLME_DEAUTH //
 #ifdef IW_MLME_DISASSOC
 		case IW_MLME_DISASSOC:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DISASSOC\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - IW_MLME_DISASSOC\n", __func__));
 			COPY_MAC_ADDR(DisAssocReq.Addr, pAd->CommonCfg.Bssid);
 			DisAssocReq.Reason =  pMlme->reason_code;
 
@@ -2281,7 +2281,7 @@
 			break;
 #endif // IW_MLME_DISASSOC //
 		default:
-			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - Unknow Command\n", __FUNCTION__));
+			DBGPRINT(RT_DEBUG_TRACE, ("====> %s - Unknow Command\n", __func__));
 			break;
 	}
 
@@ -2314,7 +2314,7 @@
             else if (param->value == IW_AUTH_WPA_VERSION_WPA2)
                 pAdapter->StaCfg.AuthMode = Ndis802_11AuthModeWPA2PSK;
 
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_CIPHER_PAIRWISE:
             if (param->value == IW_AUTH_CIPHER_NONE)
@@ -2345,7 +2345,7 @@
                 pAdapter->StaCfg.OrigWepStatus = pAdapter->StaCfg.WepStatus;
                 pAdapter->StaCfg.PairCipher = Ndis802_11Encryption3Enabled;
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_PAIRWISE - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_PAIRWISE - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_CIPHER_GROUP:
             if (param->value == IW_AUTH_CIPHER_NONE)
@@ -2365,7 +2365,7 @@
             {
                 pAdapter->StaCfg.GroupCipher = Ndis802_11Encryption3Enabled;
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_GROUP - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_CIPHER_GROUP - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_KEY_MGMT:
             if (param->value == IW_AUTH_KEY_MGMT_802_1X)
@@ -2395,7 +2395,7 @@
                 //pAdapter->StaCfg.PortSecured = WPA_802_1X_PORT_SECURED;
 				STA_PORT_SECURED(pAdapter);
             }
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_KEY_MGMT - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_KEY_MGMT - param->value = %d!\n", __func__, param->value));
             break;
     	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
             break;
@@ -2408,7 +2408,7 @@
                 pAdapter->StaCfg.PairCipher = Ndis802_11WEPDisabled;
         	    pAdapter->StaCfg.GroupCipher = Ndis802_11WEPDisabled;
             }*/
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_PRIVACY_INVOKED - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_PRIVACY_INVOKED - param->value = %d!\n", __func__, param->value));
     		break;
     	case IW_AUTH_DROP_UNENCRYPTED:
             if (param->value != 0)
@@ -2418,7 +2418,7 @@
                 //pAdapter->StaCfg.PortSecured = WPA_802_1X_PORT_SECURED;
 				STA_PORT_SECURED(pAdapter);
 			}
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_VERSION - param->value = %d!\n", __func__, param->value));
     		break;
     	case IW_AUTH_80211_AUTH_ALG:
 			if (param->value & IW_AUTH_ALG_SHARED_KEY)
@@ -2431,10 +2431,10 @@
 			}
             else
 				return -EINVAL;
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_80211_AUTH_ALG - param->value = %d!\n", __FUNCTION__, param->value));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_80211_AUTH_ALG - param->value = %d!\n", __func__, param->value));
 			break;
     	case IW_AUTH_WPA_ENABLED:
-    		DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_ENABLED - Driver supports WPA!(param->value = %d)\n", __FUNCTION__, param->value));
+    		DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_AUTH_WPA_ENABLED - Driver supports WPA!(param->value = %d)\n", __func__, param->value));
     		break;
     	default:
     		return -EOPNOTSUPP;
@@ -2542,7 +2542,7 @@
 		pAdapter->SharedKey[BSS0][keyIdx].CipherAlg = CIPHER_NONE;
 		AsicRemoveSharedKeyEntry(pAdapter, 0, (UCHAR)keyIdx);
         NdisZeroMemory(&pAdapter->SharedKey[BSS0][keyIdx], sizeof(CIPHER_KEY));
-        DBGPRINT(RT_DEBUG_TRACE, ("%s::Remove all keys!(encoding->flags = %x)\n", __FUNCTION__, encoding->flags));
+        DBGPRINT(RT_DEBUG_TRACE, ("%s::Remove all keys!(encoding->flags = %x)\n", __func__, encoding->flags));
     }
 					else
     {
@@ -2554,15 +2554,15 @@
         if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
         {
             pAdapter->StaCfg.DefaultKeyId = keyIdx;
-            DBGPRINT(RT_DEBUG_TRACE, ("%s::DefaultKeyId = %d\n", __FUNCTION__, pAdapter->StaCfg.DefaultKeyId));
+            DBGPRINT(RT_DEBUG_TRACE, ("%s::DefaultKeyId = %d\n", __func__, pAdapter->StaCfg.DefaultKeyId));
         }
 
         switch (alg) {
     		case IW_ENCODE_ALG_NONE:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_NONE\n", __FUNCTION__));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_NONE\n", __func__));
     			break;
     		case IW_ENCODE_ALG_WEP:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_WEP - ext->key_len = %d, keyIdx = %d\n", __FUNCTION__, ext->key_len, keyIdx));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_WEP - ext->key_len = %d, keyIdx = %d\n", __func__, ext->key_len, keyIdx));
     			if (ext->key_len == MAX_WEP_KEY_SIZE)
                 {
         			pAdapter->SharedKey[BSS0][keyIdx].KeyLen = MAX_WEP_KEY_SIZE;
@@ -2580,7 +2580,7 @@
 			    NdisMoveMemory(pAdapter->SharedKey[BSS0][keyIdx].Key, ext->key, ext->key_len);
     			break;
             case IW_ENCODE_ALG_TKIP:
-                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_TKIP - keyIdx = %d, ext->key_len = %d\n", __FUNCTION__, keyIdx, ext->key_len));
+                DBGPRINT(RT_DEBUG_TRACE, ("%s::IW_ENCODE_ALG_TKIP - keyIdx = %d, ext->key_len = %d\n", __func__, keyIdx, ext->key_len));
                 if (ext->key_len == 32)
                 {
                     if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
diff --git a/drivers/staging/rtl8187se/ieee80211.h b/drivers/staging/rtl8187se/ieee80211.h
index bf06abe..5833608 100644
--- a/drivers/staging/rtl8187se/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211.h
@@ -396,7 +396,7 @@
 #define IEEE80211_DEBUG(level, fmt, args...) \
 do { if (ieee80211_debug_level & (level)) \
   printk(KERN_DEBUG "ieee80211: %c %s " fmt, \
-         in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
+         in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
 #else
 #define IEEE80211_DEBUG(level, fmt, args...) do {} while (0)
 #endif	/* CONFIG_IEEE80211_DEBUG */
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index bf06abe..5833608 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -396,7 +396,7 @@
 #define IEEE80211_DEBUG(level, fmt, args...) \
 do { if (ieee80211_debug_level & (level)) \
   printk(KERN_DEBUG "ieee80211: %c %s " fmt, \
-         in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
+         in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
 #else
 #define IEEE80211_DEBUG(level, fmt, args...) do {} while (0)
 #endif	/* CONFIG_IEEE80211_DEBUG */
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
index 7d890c3..08add38 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
@@ -490,7 +490,7 @@
 
 void ieee80211_ccmp_null(void)
 {
-//    printk("============>%s()\n", __FUNCTION__);
+//    printk("============>%s()\n", __func__);
 	return;
 }
 static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
index e560b1e..de97bbe 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
@@ -984,7 +984,7 @@
 
 void ieee80211_tkip_null(void)
 {
-//    printk("============>%s()\n", __FUNCTION__);
+//    printk("============>%s()\n", __func__);
         return;
 }
 
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
index f973dae..68a22b32 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
@@ -380,7 +380,7 @@
 
 void ieee80211_wep_null(void)
 {
-//	printk("============>%s()\n", __FUNCTION__);
+//	printk("============>%s()\n", __func__);
         return;
 }
 #if 0
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
index 79ec649..23519b3 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
@@ -1635,7 +1635,7 @@
 	memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
 	dst->rates_ex_len = src->rates_ex_len;
 	dst->HighestOperaRate= src->HighestOperaRate;
-	//printk("==========>in %s: src->ssid is %s,chan is %d\n",__FUNCTION__,src->ssid,src->channel);
+	//printk("==========>in %s: src->ssid is %s,chan is %d\n",__func__,src->ssid,src->channel);
 
 	//YJ,add,080819,for hidden ap
 	if(src->ssid_len > 0)
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index fcffee5..e5752f6 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -731,7 +731,7 @@
 	memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
 #endif
 //	printk("ieee80211_softmac_scan_wq ENABLE_IPS\n");
-//	printk("in %s\n",__FUNCTION__);
+//	printk("in %s\n",__func__);
 	down(&ieee->scan_sem);
 
 	do{
@@ -1785,7 +1785,7 @@
 		//If call dev_kfree_skb_any,a warning will ocur....
 		//KERNEL: assertion (!atomic_read(&skb->users)) failed at net/core/dev.c (1708)
 		//So ... 1204 by lawrence.
-		//printk("\nIn %s,line %d call kfree skb.",__FUNCTION__,__LINE__);
+		//printk("\nIn %s,line %d call kfree skb.",__func__,__LINE__);
 		//dev_kfree_skb_any(skb);//edit by thomas
 	}
 }
@@ -2432,7 +2432,7 @@
 	}
 
 	sleep = ieee80211_sta_ps_sleep(ieee,&th, &tl);
-//	printk("===>%s,%d[2 wake, 1 sleep, 0 do nothing], ieee->sta_sleep = %d\n",__FUNCTION__, sleep,ieee->sta_sleep);
+//	printk("===>%s,%d[2 wake, 1 sleep, 0 do nothing], ieee->sta_sleep = %d\n",__func__, sleep,ieee->sta_sleep);
 	/* 2 wake, 1 sleep, 0 do nothing */
 	if(sleep == 0)
 		goto out;
@@ -2510,7 +2510,7 @@
 		/* Null frame with PS bit set */
 		if(success){
 
-		//	printk("==================> %s::enter sleep state\n",__FUNCTION__);
+		//	printk("==================> %s::enter sleep state\n",__func__);
 			ieee->sta_sleep = 1;
 			ieee->enter_sleep_state(ieee->dev,ieee->ps_th,ieee->ps_tl);
 		}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
index 43b8aec..93af37e 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
@@ -31,7 +31,7 @@
 {
 	int ret;
 	struct iw_freq *fwrq = & wrqu->freq;
-//	printk("in %s\n",__FUNCTION__);
+//	printk("in %s\n",__func__);
 	down(&ieee->wx_sem);
 
 	if(ieee->iw_mode == IW_MODE_INFRA){
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
index c7d9f4f..6aad61e 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
@@ -663,7 +663,7 @@
                 ret = -EINVAL;
                 goto done;
         }
-//	printk("8-09-08-9=====>%s, alg name:%s\n",__FUNCTION__, alg);
+//	printk("8-09-08-9=====>%s, alg name:%s\n",__func__, alg);
 
 	 ops = ieee80211_get_crypto_ops(alg);
         if (ops == NULL) {
@@ -757,7 +757,7 @@
                                union iwreq_data *wrqu, char *extra)
 {
 	struct iw_mlme *mlme = (struct iw_mlme *) extra;
-//	printk("\ndkgadfslkdjgalskdf===============>%s(), cmd:%x\n", __FUNCTION__, mlme->cmd);
+//	printk("\ndkgadfslkdjgalskdf===============>%s(), cmd:%x\n", __func__, mlme->cmd);
 #if 1
 	switch (mlme->cmd) {
         case IW_MLME_DEAUTH:
@@ -830,7 +830,7 @@
 int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
 {
 #if 0
-	printk("====>%s()\n", __FUNCTION__);
+	printk("====>%s()\n", __func__);
 	{
 		int i;
 		for (i=0; i<len; i++)
@@ -866,7 +866,7 @@
 		ieee->wpa_ie = NULL;
 		ieee->wpa_ie_len = 0;
 	}
-//	printk("<=====out %s()\n", __FUNCTION__);
+//	printk("<=====out %s()\n", __func__);
 
 	return 0;
 
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 00f4df49..9453495 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -1407,12 +1407,12 @@
 
 	if((ch > 14) || (ch < 1))
 	{
-		printk("In %s: Invalid chnanel %d\n", __FUNCTION__, ch);
+		printk("In %s: Invalid chnanel %d\n", __func__, ch);
 		return;
 	}
 
 	priv->chan=ch;
-	//printk("in %s:channel is %d\n",__FUNCTION__,ch);
+	//printk("in %s:channel is %d\n",__func__,ch);
 	priv->rf_set_chan(dev,priv->chan);
 
 }
@@ -3656,7 +3656,7 @@
 void rtl8180_rq_tx_ack(struct net_device *dev){
 
 	struct r8180_priv *priv = ieee80211_priv(dev);
-//	printk("====================>%s\n",__FUNCTION__);
+//	printk("====================>%s\n",__func__);
 	write_nic_byte(dev,CONFIG4,read_nic_byte(dev,CONFIG4)|CONFIG4_PWRMGT);
 	priv->ack_tx_to_ieee = 1;
 }
@@ -5448,7 +5448,7 @@
 #endif
 
 //	printk("dev is %d\n",dev);
-//	printk("&*&(^*(&(&=========>%s()\n", __FUNCTION__);
+//	printk("&*&(^*(&(&=========>%s()\n", __func__);
 	rtl8180_hw_wakeup(dev);
 
 }
@@ -6318,12 +6318,12 @@
 //				printk("NumTxOkTotal is %d\n",priv->NumTxOkTotal++);
 			}
 #endif
-			//	printk("in function %s:curr_retry_count is %d\n",__FUNCTION__,((*head) & (0x000000ff)));
+			//	printk("in function %s:curr_retry_count is %d\n",__func__,((*head) & (0x000000ff)));
 		}
 		if(!error){
 			priv->NumTxOkBytesTotal += (*(head+3)) & (0x00000fff);
 		}
-//		printk("in function %s:curr_txokbyte_count is %d\n",__FUNCTION__,(*(head+3)) & (0x00000fff));
+//		printk("in function %s:curr_txokbyte_count is %d\n",__func__,(*(head+3)) & (0x00000fff));
 		*head = *head &~ (1<<31);
 
 		if((head - begin)/8 == priv->txringcount-1)
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225z2.c b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
index 90a574d..4136b94 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225z2.c
+++ b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
@@ -1571,7 +1571,7 @@
 	//
 	// Turn off RF power.
 	//
-	//printk("=========>%s()\n", __FUNCTION__);
+	//printk("=========>%s()\n", __func__);
 	MgntActSet_RF_State(dev, eRfSleep, RF_CHANGE_BY_PS);
 	//mdelay(2);	//FIXME
 }
@@ -1580,7 +1580,7 @@
 	//
 	// Turn on RF power.
 	//
-	//printk("=========>%s()\n", __FUNCTION__);
+	//printk("=========>%s()\n", __func__);
 	MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_PS);
 }
 #endif
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index c77abe5..8b3901d 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -1177,7 +1177,7 @@
 	//struct ieee80211_device *ieee = netdev_priv(dev);
 	int *val = (int *)extra;
 	int i;
-	printk("-----in fun %s\n", __FUNCTION__);
+	printk("-----in fun %s\n", __func__);
 
 	if(priv->ieee80211->bHwRadioOff)
 		return 0;
@@ -1235,7 +1235,7 @@
 
 	down(&priv->wx_sem);
 
-	printk("==============>%s(): forcerate is %d\n",__FUNCTION__,forcerate);
+	printk("==============>%s(): forcerate is %d\n",__func__,forcerate);
 	if((forcerate == 2) || (forcerate == 4) || (forcerate == 11) || (forcerate == 22) || (forcerate == 12) ||
 		(forcerate == 18) || (forcerate == 24) || (forcerate == 36) || (forcerate == 48) || (forcerate == 72) ||
 		(forcerate == 96) || (forcerate == 108))
@@ -1260,7 +1260,7 @@
 {
 
 	struct r8180_priv *priv = ieee80211_priv(dev);
-	//printk("===>%s()\n", __FUNCTION__);
+	//printk("===>%s()\n", __func__);
 
 	int ret=0;
 
@@ -1277,7 +1277,7 @@
                                         struct iw_request_info *info,
                                         struct iw_param *data, char *extra)
 {
-	//printk("====>%s()\n", __FUNCTION__);
+	//printk("====>%s()\n", __func__);
 	struct r8180_priv *priv = ieee80211_priv(dev);
 	int ret=0;
 
@@ -1294,7 +1294,7 @@
                                         struct iw_request_info *info,
                                         union iwreq_data *wrqu, char *extra)
 {
-	//printk("====>%s()\n", __FUNCTION__);
+	//printk("====>%s()\n", __func__);
 
 	int ret=0;
 	struct r8180_priv *priv = ieee80211_priv(dev);
@@ -1315,7 +1315,7 @@
                                         struct iw_request_info *info,
                                         struct iw_point *data, char *extra)
 {
-//	printk("====>%s(), len:%d\n", __FUNCTION__, data->length);
+//	printk("====>%s(), len:%d\n", __func__, data->length);
 	int ret=0;
         struct r8180_priv *priv = ieee80211_priv(dev);
 
@@ -1328,7 +1328,7 @@
         ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, data->length);
 #endif
         up(&priv->wx_sem);
-	//printk("<======%s(), ret:%d\n", __FUNCTION__, ret);
+	//printk("<======%s(), ret:%d\n", __func__, ret);
         return ret;
 
 
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 289d81a..83babb0 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -150,4 +150,6 @@
 
 source "drivers/usb/gadget/Kconfig"
 
+source "drivers/usb/otg/Kconfig"
+
 endif # USB_SUPPORT
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index d50a99f..00b47ea 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1275,7 +1275,7 @@
 	struct acm *acm = usb_get_intfdata(intf);
 	int cnt;
 
-	if (acm->dev->auto_pm) {
+	if (message.event & PM_EVENT_AUTO) {
 		int b;
 
 		spin_lock_irq(&acm->read_lock);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 5a8ecc0..3771d6e 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -764,7 +764,8 @@
 
 	mutex_lock(&desc->plock);
 #ifdef CONFIG_PM
-	if (interface_to_usbdev(desc->intf)->auto_pm && test_bit(WDM_IN_USE, &desc->flags)) {
+	if ((message.event & PM_EVENT_AUTO) &&
+			test_bit(WDM_IN_USE, &desc->flags)) {
 		rv = -EBUSY;
 	} else {
 #endif
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 43a863c..0f5c05f 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -21,6 +21,7 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/uaccess.h>
 #include <linux/kref.h>
@@ -482,7 +483,6 @@
 	int retval;
 	int actual;
 	unsigned long int n_bytes;
-	int n;
 	int remaining;
 	int done;
 	int this_part;
@@ -526,11 +526,8 @@
 			goto exit;
 		}
 
-		n_bytes = 12 + this_part;
-		if (this_part % 4)
-			n_bytes += 4 - this_part % 4;
-			for (n = 12 + this_part; n < n_bytes; n++)
-				buffer[n] = 0;
+		n_bytes = roundup(12 + this_part, 4);
+		memset(buffer + 12 + this_part, 0, n_bytes - (12 + this_part));
 
 		retval = usb_bulk_msg(data->usb_dev,
 				      usb_sndbulkpipe(data->usb_dev,
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index aa79280..26fece1 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -981,9 +981,6 @@
 		return -EINVAL;
 	if (!uurb->buffer)
 		return -EINVAL;
-	if (uurb->signr != 0 && (uurb->signr < SIGRTMIN ||
-				 uurb->signr > SIGRTMAX))
-		return -EINVAL;
 	if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL &&
 	    (uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) {
 		ifnum = findintfep(ps->dev, uurb->endpoint);
@@ -1320,7 +1317,7 @@
 	if (__get_user(uptr, &uurb->buffer))
 		return -EFAULT;
 	kurb->buffer = compat_ptr(uptr);
-	if (__get_user(uptr, &uurb->buffer))
+	if (__get_user(uptr, &uurb->usercontext))
 		return -EFAULT;
 	kurb->usercontext = compat_ptr(uptr);
 
@@ -1401,8 +1398,6 @@
 
 	if (copy_from_user(&ds, arg, sizeof(ds)))
 		return -EFAULT;
-	if (ds.signr != 0 && (ds.signr < SIGRTMIN || ds.signr > SIGRTMAX))
-		return -EINVAL;
 	ps->discsignr = ds.signr;
 	ps->disccontext = ds.context;
 	return 0;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 8c08130..9876055 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -184,6 +184,20 @@
 	return 0;
 }
 
+/*
+ * Cancel any pending scheduled resets
+ *
+ * [see usb_queue_reset_device()]
+ *
+ * Called after unconfiguring / when releasing interfaces. See
+ * comments in __usb_queue_reset_device() regarding
+ * udev->reset_running.
+ */
+static void usb_cancel_queued_reset(struct usb_interface *iface)
+{
+	if (iface->reset_running == 0)
+		cancel_work_sync(&iface->reset_ws);
+}
 
 /* called from driver core with dev locked */
 static int usb_probe_interface(struct device *dev)
@@ -242,6 +256,7 @@
 			mark_quiesced(intf);
 			intf->needs_remote_wakeup = 0;
 			intf->condition = USB_INTERFACE_UNBOUND;
+			usb_cancel_queued_reset(intf);
 		} else
 			intf->condition = USB_INTERFACE_BOUND;
 
@@ -272,6 +287,7 @@
 		usb_disable_interface(udev, intf);
 
 	driver->disconnect(intf);
+	usb_cancel_queued_reset(intf);
 
 	/* Reset other interface state.
 	 * We cannot do a Set-Interface if the device is suspended or
@@ -279,9 +295,12 @@
 	 * altsetting means creating new endpoint device entries).
 	 * When either of these happens, defer the Set-Interface.
 	 */
-	if (intf->cur_altsetting->desc.bAlternateSetting == 0)
-		;	/* Already in altsetting 0 so skip Set-Interface */
-	else if (!error && intf->dev.power.status == DPM_ON)
+	if (intf->cur_altsetting->desc.bAlternateSetting == 0) {
+		/* Already in altsetting 0 so skip Set-Interface.
+		 * Just re-enable it without affecting the endpoint toggles.
+		 */
+		usb_enable_interface(udev, intf, false);
+	} else if (!error && intf->dev.power.status == DPM_ON)
 		usb_set_interface(udev, intf->altsetting[0].
 				desc.bInterfaceNumber, 0);
 	else
@@ -380,8 +399,10 @@
 	if (device_is_registered(dev)) {
 		iface->condition = USB_INTERFACE_UNBINDING;
 		device_release_driver(dev);
+	} else {
+		iface->condition = USB_INTERFACE_UNBOUND;
+		usb_cancel_queued_reset(iface);
 	}
-
 	dev->driver = NULL;
 	usb_set_intfdata(iface, NULL);
 
@@ -904,7 +925,7 @@
 }
 
 /* Caller has locked udev's pm_mutex */
-static int usb_resume_device(struct usb_device *udev)
+static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
 {
 	struct usb_device_driver	*udriver;
 	int				status = 0;
@@ -922,7 +943,7 @@
 		udev->reset_resume = 1;
 
 	udriver = to_usb_device_driver(udev->dev.driver);
-	status = udriver->resume(udev);
+	status = udriver->resume(udev, msg);
 
  done:
 	dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
@@ -942,7 +963,8 @@
 	if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf))
 		goto done;
 
-	if (intf->condition == USB_INTERFACE_UNBOUND)	/* This can't happen */
+	/* This can happen; see usb_driver_release_interface() */
+	if (intf->condition == USB_INTERFACE_UNBOUND)
 		goto done;
 	driver = to_usb_driver(intf->dev.driver);
 
@@ -950,7 +972,7 @@
 		status = driver->suspend(intf, msg);
 		if (status == 0)
 			mark_quiesced(intf);
-		else if (!udev->auto_pm)
+		else if (!(msg.event & PM_EVENT_AUTO))
 			dev_err(&intf->dev, "%s error %d\n",
 					"suspend", status);
 	} else {
@@ -968,7 +990,7 @@
 
 /* Caller has locked intf's usb_device's pm_mutex */
 static int usb_resume_interface(struct usb_device *udev,
-		struct usb_interface *intf, int reset_resume)
+		struct usb_interface *intf, pm_message_t msg, int reset_resume)
 {
 	struct usb_driver	*driver;
 	int			status = 0;
@@ -1092,7 +1114,7 @@
 	if (reschedule) {
 		if (!timer_pending(&udev->autosuspend.timer)) {
 			queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
-				round_jiffies_relative(suspend_time - j));
+				round_jiffies_up_relative(suspend_time - j));
 		}
 		return -EAGAIN;
 	}
@@ -1119,10 +1141,9 @@
  * all the interfaces which were suspended are resumed so that they remain
  * in the same state as the device.
  *
- * If an autosuspend is in progress (@udev->auto_pm is set), the routine
- * checks first to make sure that neither the device itself or any of its
- * active interfaces is in use (pm_usage_cnt is greater than 0).  If they
- * are, the autosuspend fails.
+ * If an autosuspend is in progress the routine checks first to make sure
+ * that neither the device itself or any of its active interfaces is in use
+ * (pm_usage_cnt is greater than 0).  If they are, the autosuspend fails.
  *
  * If the suspend succeeds, the routine recursively queues an autosuspend
  * request for @udev's parent device, thereby propagating the change up
@@ -1157,7 +1178,7 @@
 
 	udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
 
-	if (udev->auto_pm) {
+	if (msg.event & PM_EVENT_AUTO) {
 		status = autosuspend_check(udev, 0);
 		if (status < 0)
 			goto done;
@@ -1177,13 +1198,16 @@
 
 	/* If the suspend failed, resume interfaces that did get suspended */
 	if (status != 0) {
+		pm_message_t msg2;
+
+		msg2.event = msg.event ^ (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
 		while (--i >= 0) {
 			intf = udev->actconfig->interface[i];
-			usb_resume_interface(udev, intf, 0);
+			usb_resume_interface(udev, intf, msg2, 0);
 		}
 
 		/* Try another autosuspend when the interfaces aren't busy */
-		if (udev->auto_pm)
+		if (msg.event & PM_EVENT_AUTO)
 			autosuspend_check(udev, status == -EBUSY);
 
 	/* If the suspend succeeded then prevent any more URB submissions,
@@ -1213,6 +1237,7 @@
 /**
  * usb_resume_both - resume a USB device and its interfaces
  * @udev: the usb_device to resume
+ * @msg: Power Management message describing this state transition
  *
  * This is the central routine for resuming USB devices.  It calls the
  * the resume method for @udev and then calls the resume methods for all
@@ -1238,7 +1263,7 @@
  *
  * This routine can run only in process context.
  */
-static int usb_resume_both(struct usb_device *udev)
+static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
 {
 	int			status = 0;
 	int			i;
@@ -1254,14 +1279,15 @@
 
 	/* Propagate the resume up the tree, if necessary */
 	if (udev->state == USB_STATE_SUSPENDED) {
-		if (udev->auto_pm && udev->autoresume_disabled) {
+		if ((msg.event & PM_EVENT_AUTO) &&
+				udev->autoresume_disabled) {
 			status = -EPERM;
 			goto done;
 		}
 		if (parent) {
 			status = usb_autoresume_device(parent);
 			if (status == 0) {
-				status = usb_resume_device(udev);
+				status = usb_resume_device(udev, msg);
 				if (status || udev->state ==
 						USB_STATE_NOTATTACHED) {
 					usb_autosuspend_device(parent);
@@ -1284,15 +1310,16 @@
 			/* We can't progagate beyond the USB subsystem,
 			 * so if a root hub's controller is suspended
 			 * then we're stuck. */
-			status = usb_resume_device(udev);
+			status = usb_resume_device(udev, msg);
 		}
 	} else if (udev->reset_resume)
-		status = usb_resume_device(udev);
+		status = usb_resume_device(udev, msg);
 
 	if (status == 0 && udev->actconfig) {
 		for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
 			intf = udev->actconfig->interface[i];
-			usb_resume_interface(udev, intf, udev->reset_resume);
+			usb_resume_interface(udev, intf, msg,
+					udev->reset_resume);
 		}
 	}
 
@@ -1320,13 +1347,13 @@
 		udev->last_busy = jiffies;
 	if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) {
 		if (udev->state == USB_STATE_SUSPENDED)
-			status = usb_resume_both(udev);
+			status = usb_resume_both(udev, PMSG_AUTO_RESUME);
 		if (status != 0)
 			udev->pm_usage_cnt -= inc_usage_cnt;
 		else if (inc_usage_cnt)
 			udev->last_busy = jiffies;
 	} else if (inc_usage_cnt <= 0 && udev->pm_usage_cnt <= 0) {
-		status = usb_suspend_both(udev, PMSG_SUSPEND);
+		status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
 	}
 	usb_pm_unlock(udev);
 	return status;
@@ -1341,6 +1368,19 @@
 	usb_autopm_do_device(udev, 0);
 }
 
+/* usb_autoresume_work - callback routine to autoresume a USB device */
+void usb_autoresume_work(struct work_struct *work)
+{
+	struct usb_device *udev =
+		container_of(work, struct usb_device, autoresume);
+
+	/* Wake it up, let the drivers do their thing, and then put it
+	 * back to sleep.
+	 */
+	if (usb_autopm_do_device(udev, 1) == 0)
+		usb_autopm_do_device(udev, -1);
+}
+
 /**
  * usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces
  * @udev: the usb_device to autosuspend
@@ -1437,13 +1477,14 @@
 		udev->last_busy = jiffies;
 		if (inc_usage_cnt >= 0 && intf->pm_usage_cnt > 0) {
 			if (udev->state == USB_STATE_SUSPENDED)
-				status = usb_resume_both(udev);
+				status = usb_resume_both(udev,
+						PMSG_AUTO_RESUME);
 			if (status != 0)
 				intf->pm_usage_cnt -= inc_usage_cnt;
 			else
 				udev->last_busy = jiffies;
 		} else if (inc_usage_cnt <= 0 && intf->pm_usage_cnt <= 0) {
-			status = usb_suspend_both(udev, PMSG_SUSPEND);
+			status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
 		}
 	}
 	usb_pm_unlock(udev);
@@ -1492,6 +1533,45 @@
 EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
 
 /**
+ * usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be decremented
+ *
+ * This routine does essentially the same thing as
+ * usb_autopm_put_interface(): it decrements @intf's usage counter and
+ * queues a delayed autosuspend request if the counter is <= 0.  The
+ * difference is that it does not acquire the device's pm_mutex;
+ * callers must handle all synchronization issues themselves.
+ *
+ * Typically a driver would call this routine during an URB's completion
+ * handler, if no more URBs were pending.
+ *
+ * This routine can run in atomic context.
+ */
+void usb_autopm_put_interface_async(struct usb_interface *intf)
+{
+	struct usb_device	*udev = interface_to_usbdev(intf);
+	int			status = 0;
+
+	if (intf->condition == USB_INTERFACE_UNBOUND) {
+		status = -ENODEV;
+	} else {
+		udev->last_busy = jiffies;
+		--intf->pm_usage_cnt;
+		if (udev->autosuspend_disabled || udev->autosuspend_delay < 0)
+			status = -EPERM;
+		else if (intf->pm_usage_cnt <= 0 &&
+				!timer_pending(&udev->autosuspend.timer)) {
+			queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
+					round_jiffies_up_relative(
+						udev->autosuspend_delay));
+		}
+	}
+	dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
+			__func__, status, intf->pm_usage_cnt);
+}
+EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
+
+/**
  * usb_autopm_get_interface - increment a USB interface's PM-usage counter
  * @intf: the usb_interface whose counter should be incremented
  *
@@ -1537,6 +1617,37 @@
 EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
 
 /**
+ * usb_autopm_get_interface_async - increment a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be incremented
+ *
+ * This routine does much the same thing as
+ * usb_autopm_get_interface(): it increments @intf's usage counter and
+ * queues an autoresume request if the result is > 0.  The differences
+ * are that it does not acquire the device's pm_mutex (callers must
+ * handle all synchronization issues themselves), and it does not
+ * autoresume the device directly (it only queues a request).  After a
+ * successful call, the device will generally not yet be resumed.
+ *
+ * This routine can run in atomic context.
+ */
+int usb_autopm_get_interface_async(struct usb_interface *intf)
+{
+	struct usb_device	*udev = interface_to_usbdev(intf);
+	int			status = 0;
+
+	if (intf->condition == USB_INTERFACE_UNBOUND)
+		status = -ENODEV;
+	else if (udev->autoresume_disabled)
+		status = -EPERM;
+	else if (++intf->pm_usage_cnt > 0 && udev->state == USB_STATE_SUSPENDED)
+		queue_work(ksuspend_usb_wq, &udev->autoresume);
+	dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
+			__func__, status, intf->pm_usage_cnt);
+	return status;
+}
+EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
+
+/**
  * usb_autopm_set_interface - set a USB interface's autosuspend state
  * @intf: the usb_interface whose state should be set
  *
@@ -1563,6 +1674,9 @@
 void usb_autosuspend_work(struct work_struct *work)
 {}
 
+void usb_autoresume_work(struct work_struct *work)
+{}
+
 #endif /* CONFIG_USB_SUSPEND */
 
 /**
@@ -1595,6 +1709,7 @@
 /**
  * usb_external_resume_device - external resume of a USB device and its interfaces
  * @udev: the usb_device to resume
+ * @msg: Power Management message describing this state transition
  *
  * This routine handles external resume requests: ones not generated
  * internally by a USB driver (autoresume) but rather coming from the user
@@ -1603,13 +1718,13 @@
  *
  * The caller must hold @udev's device lock.
  */
-int usb_external_resume_device(struct usb_device *udev)
+int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
 {
 	int	status;
 
 	usb_pm_lock(udev);
 	udev->auto_pm = 0;
-	status = usb_resume_both(udev);
+	status = usb_resume_both(udev, msg);
 	udev->last_busy = jiffies;
 	usb_pm_unlock(udev);
 	if (status == 0)
@@ -1622,7 +1737,7 @@
 	return status;
 }
 
-int usb_suspend(struct device *dev, pm_message_t message)
+int usb_suspend(struct device *dev, pm_message_t msg)
 {
 	struct usb_device	*udev;
 
@@ -1641,10 +1756,10 @@
 	}
 
 	udev->skip_sys_resume = 0;
-	return usb_external_suspend_device(udev, message);
+	return usb_external_suspend_device(udev, msg);
 }
 
-int usb_resume(struct device *dev)
+int usb_resume(struct device *dev, pm_message_t msg)
 {
 	struct usb_device	*udev;
 
@@ -1656,7 +1771,7 @@
 	 */
 	if (udev->skip_sys_resume)
 		return 0;
-	return usb_external_resume_device(udev);
+	return usb_external_resume_device(udev, msg);
 }
 
 #endif /* CONFIG_PM */
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 946fae4..e1710f2 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -276,7 +276,7 @@
 	kfree(ep_dev);
 }
 
-int usb_create_ep_files(struct device *parent,
+int usb_create_ep_devs(struct device *parent,
 			struct usb_host_endpoint *endpoint,
 			struct usb_device *udev)
 {
@@ -340,7 +340,7 @@
 	return retval;
 }
 
-void usb_remove_ep_files(struct usb_host_endpoint *endpoint)
+void usb_remove_ep_devs(struct usb_host_endpoint *endpoint)
 {
 	struct ep_device *ep_dev = endpoint->ep_dev;
 
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 7e912f2..30ecac3 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -200,18 +200,18 @@
 	 * interfaces manually by doing a bus (or "global") suspend.
 	 */
 	if (!udev->parent)
-		rc = hcd_bus_suspend(udev);
+		rc = hcd_bus_suspend(udev, msg);
 
 	/* Non-root devices don't need to do anything for FREEZE or PRETHAW */
 	else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
 		rc = 0;
 	else
-		rc = usb_port_suspend(udev);
+		rc = usb_port_suspend(udev, msg);
 
 	return rc;
 }
 
-static int generic_resume(struct usb_device *udev)
+static int generic_resume(struct usb_device *udev, pm_message_t msg)
 {
 	int rc;
 
@@ -221,9 +221,9 @@
 	 * interfaces manually by doing a bus (or "global") resume.
 	 */
 	if (!udev->parent)
-		rc = hcd_bus_resume(udev);
+		rc = hcd_bus_resume(udev, msg);
 	else
-		rc = usb_port_resume(udev);
+		rc = usb_port_resume(udev, msg);
 	return rc;
 }
 
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 5b87ae7..507741e 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -128,6 +128,7 @@
 	}
 
 	pci_set_master(dev);
+	device_set_wakeup_enable(&dev->dev, 1);
 
 	retval = usb_add_hcd(hcd, dev->irq, IRQF_DISABLED | IRQF_SHARED);
 	if (retval != 0)
@@ -191,17 +192,15 @@
 /**
  * usb_hcd_pci_suspend - power management suspend of a PCI-based HCD
  * @dev: USB Host Controller being suspended
- * @message: semantics in flux
+ * @message: Power Management message describing this state transition
  *
- * Store this function in the HCD's struct pci_driver as suspend().
+ * Store this function in the HCD's struct pci_driver as .suspend.
  */
 int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
 {
-	struct usb_hcd		*hcd;
+	struct usb_hcd		*hcd = pci_get_drvdata(dev);
 	int			retval = 0;
-	int			has_pci_pm;
-
-	hcd = pci_get_drvdata(dev);
+	int			wake, w;
 
 	/* Root hub suspend should have stopped all downstream traffic,
 	 * and all bus master traffic.  And done so for both the interface
@@ -212,8 +211,15 @@
 	 * otherwise the swsusp will save (and restore) garbage state.
 	 */
 	if (!(hcd->state == HC_STATE_SUSPENDED ||
-			hcd->state == HC_STATE_HALT))
-		return -EBUSY;
+			hcd->state == HC_STATE_HALT)) {
+		dev_warn(&dev->dev, "Root hub is not suspended\n");
+		retval = -EBUSY;
+		goto done;
+	}
+
+	/* We might already be suspended (runtime PM -- not yet written) */
+	if (dev->current_state != PCI_D0)
+		goto done;
 
 	if (hcd->driver->pci_suspend) {
 		retval = hcd->driver->pci_suspend(hcd, message);
@@ -221,49 +227,60 @@
 		if (retval)
 			goto done;
 	}
+
 	synchronize_irq(dev->irq);
 
-	/* FIXME until the generic PM interfaces change a lot more, this
-	 * can't use PCI D1 and D2 states.  For example, the confusion
-	 * between messages and states will need to vanish, and messages
-	 * will need to provide a target system state again.
-	 *
-	 * It'll be important to learn characteristics of the target state,
-	 * especially on embedded hardware where the HCD will often be in
-	 * charge of an external VBUS power supply and one or more clocks.
-	 * Some target system states will leave them active; others won't.
-	 * (With PCI, that's often handled by platform BIOS code.)
+	/* Don't fail on error to enable wakeup.  We rely on pci code
+	 * to reject requests the hardware can't implement, rather
+	 * than coding the same thing.
 	 */
-
-	/* even when the PCI layer rejects some of the PCI calls
-	 * below, HCs can try global suspend and reduce DMA traffic.
-	 * PM-sensitive HCDs may already have done this.
-	 */
-	has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
+	wake = (hcd->state == HC_STATE_SUSPENDED &&
+			device_may_wakeup(&dev->dev));
+	w = pci_wake_from_d3(dev, wake);
+	if (w < 0)
+		wake = w;
+	dev_dbg(&dev->dev, "wakeup: %d\n", wake);
 
 	/* Downstream ports from this root hub should already be quiesced, so
 	 * there will be no DMA activity.  Now we can shut down the upstream
 	 * link (except maybe for PME# resume signaling) and enter some PCI
 	 * low power state, if the hardware allows.
 	 */
-	if (hcd->state == HC_STATE_SUSPENDED) {
+	pci_disable_device(dev);
+ done:
+	return retval;
+}
+EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend);
 
-		/* no DMA or IRQs except when HC is active */
-		if (dev->current_state == PCI_D0) {
-			pci_save_state(dev);
-			pci_disable_device(dev);
-		}
+/**
+ * usb_hcd_pci_suspend_late - suspend a PCI-based HCD after IRQs are disabled
+ * @dev: USB Host Controller being suspended
+ * @message: Power Management message describing this state transition
+ *
+ * Store this function in the HCD's struct pci_driver as .suspend_late.
+ */
+int usb_hcd_pci_suspend_late(struct pci_dev *dev, pm_message_t message)
+{
+	int			retval = 0;
+	int			has_pci_pm;
 
-		if (message.event == PM_EVENT_FREEZE ||
-				message.event == PM_EVENT_PRETHAW) {
-			dev_dbg(hcd->self.controller, "--> no state change\n");
-			goto done;
-		}
+	/* We might already be suspended (runtime PM -- not yet written) */
+	if (dev->current_state != PCI_D0)
+		goto done;
 
-		if (!has_pci_pm) {
-			dev_dbg(hcd->self.controller, "--> PCI D0/legacy\n");
-			goto done;
-		}
+	pci_save_state(dev);
+
+	/* Don't change state if we don't need to */
+	if (message.event == PM_EVENT_FREEZE ||
+			message.event == PM_EVENT_PRETHAW) {
+		dev_dbg(&dev->dev, "--> no state change\n");
+		goto done;
+	}
+
+	has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
+	if (!has_pci_pm) {
+		dev_dbg(&dev->dev, "--> PCI D0 legacy\n");
+	} else {
 
 		/* NOTE:  dev->current_state becomes nonzero only here, and
 		 * only for devices that support PCI PM.  Also, exiting
@@ -273,35 +290,16 @@
 		retval = pci_set_power_state(dev, PCI_D3hot);
 		suspend_report_result(pci_set_power_state, retval);
 		if (retval == 0) {
-			int wake = device_can_wakeup(&hcd->self.root_hub->dev);
-
-			wake = wake && device_may_wakeup(hcd->self.controller);
-
-			dev_dbg(hcd->self.controller, "--> PCI D3%s\n",
-					wake ? "/wakeup" : "");
-
-			/* Ignore these return values.  We rely on pci code to
-			 * reject requests the hardware can't implement, rather
-			 * than coding the same thing.
-			 */
-			(void) pci_enable_wake(dev, PCI_D3hot, wake);
-			(void) pci_enable_wake(dev, PCI_D3cold, wake);
+			dev_dbg(&dev->dev, "--> PCI D3\n");
 		} else {
 			dev_dbg(&dev->dev, "PCI D3 suspend fail, %d\n",
 					retval);
-			(void) usb_hcd_pci_resume(dev);
+			pci_restore_state(dev);
 		}
-
-	} else if (hcd->state != HC_STATE_HALT) {
-		dev_dbg(hcd->self.controller, "hcd state %d; not suspended\n",
-			hcd->state);
-		WARN_ON(1);
-		retval = -EINVAL;
 	}
 
-done:
-	if (retval == 0) {
 #ifdef CONFIG_PPC_PMAC
+	if (retval == 0) {
 		/* Disable ASIC clocks for USB */
 		if (machine_is(powermac)) {
 			struct device_node	*of_node;
@@ -311,30 +309,24 @@
 				pmac_call_feature(PMAC_FTR_USB_ENABLE,
 							of_node, 0, 0);
 		}
-#endif
 	}
+#endif
 
+ done:
 	return retval;
 }
-EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend);
+EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend_late);
 
 /**
- * usb_hcd_pci_resume - power management resume of a PCI-based HCD
+ * usb_hcd_pci_resume_early - resume a PCI-based HCD before IRQs are enabled
  * @dev: USB Host Controller being resumed
  *
- * Store this function in the HCD's struct pci_driver as resume().
+ * Store this function in the HCD's struct pci_driver as .resume_early.
  */
-int usb_hcd_pci_resume(struct pci_dev *dev)
+int usb_hcd_pci_resume_early(struct pci_dev *dev)
 {
-	struct usb_hcd		*hcd;
-	int			retval;
-
-	hcd = pci_get_drvdata(dev);
-	if (hcd->state != HC_STATE_SUSPENDED) {
-		dev_dbg(hcd->self.controller,
-				"can't resume, not suspended!\n");
-		return 0;
-	}
+	int		retval = 0;
+	pci_power_t	state = dev->current_state;
 
 #ifdef CONFIG_PPC_PMAC
 	/* Reenable ASIC clocks for USB */
@@ -352,7 +344,7 @@
 	 * calls "standby", "suspend to RAM", and so on).  There are also
 	 * dirty cases when swsusp fakes a suspend in "shutdown" mode.
 	 */
-	if (dev->current_state != PCI_D0) {
+	if (state != PCI_D0) {
 #ifdef	DEBUG
 		int	pci_pm;
 		u16	pmcr;
@@ -364,8 +356,7 @@
 			/* Clean case:  power to USB and to HC registers was
 			 * maintained; remote wakeup is easy.
 			 */
-			dev_dbg(hcd->self.controller, "resume from PCI D%d\n",
-					pmcr);
+			dev_dbg(&dev->dev, "resume from PCI D%d\n", pmcr);
 		} else {
 			/* Clean:  HC lost Vcc power, D0 uninitialized
 			 *   + Vaux may have preserved port and transceiver
@@ -376,32 +367,55 @@
 			 *   + after BIOS init
 			 *   + after Linux init (HCD statically linked)
 			 */
-			dev_dbg(hcd->self.controller,
-				"PCI D0, from previous PCI D%d\n",
-				dev->current_state);
+			dev_dbg(&dev->dev, "resume from previous PCI D%d\n",
+					state);
 		}
 #endif
-		/* yes, ignore these results too... */
-		(void) pci_enable_wake(dev, dev->current_state, 0);
-		(void) pci_enable_wake(dev, PCI_D3cold, 0);
+
+		retval = pci_set_power_state(dev, PCI_D0);
 	} else {
 		/* Same basic cases: clean (powered/not), dirty */
-		dev_dbg(hcd->self.controller, "PCI legacy resume\n");
+		dev_dbg(&dev->dev, "PCI legacy resume\n");
 	}
 
-	/* NOTE:  the PCI API itself is asymmetric here.  We don't need to
-	 * pci_set_power_state(PCI_D0) since that's part of re-enabling;
-	 * but that won't re-enable bus mastering.  Yet pci_disable_device()
-	 * explicitly disables bus mastering...
-	 */
+	if (retval < 0)
+		dev_err(&dev->dev, "can't resume: %d\n", retval);
+	else
+		pci_restore_state(dev);
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(usb_hcd_pci_resume_early);
+
+/**
+ * usb_hcd_pci_resume - power management resume of a PCI-based HCD
+ * @dev: USB Host Controller being resumed
+ *
+ * Store this function in the HCD's struct pci_driver as .resume.
+ */
+int usb_hcd_pci_resume(struct pci_dev *dev)
+{
+	struct usb_hcd		*hcd;
+	int			retval;
+
+	hcd = pci_get_drvdata(dev);
+	if (hcd->state != HC_STATE_SUSPENDED) {
+		dev_dbg(hcd->self.controller,
+				"can't resume, not suspended!\n");
+		return 0;
+	}
+
 	retval = pci_enable_device(dev);
 	if (retval < 0) {
-		dev_err(hcd->self.controller,
-			"can't re-enable after resume, %d!\n", retval);
+		dev_err(&dev->dev, "can't re-enable after resume, %d!\n",
+				retval);
 		return retval;
 	}
+
 	pci_set_master(dev);
-	pci_restore_state(dev);
+
+	/* yes, ignore this result too... */
+	(void) pci_wake_from_d3(dev, 0);
 
 	clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
 
@@ -413,7 +427,6 @@
 			usb_hc_died(hcd);
 		}
 	}
-
 	return retval;
 }
 EXPORT_SYMBOL_GPL(usb_hcd_pci_resume);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index e1b4262..3c711db5 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1010,7 +1010,7 @@
 	spin_lock(&hcd_urb_list_lock);
 
 	/* Check that the URB isn't being killed */
-	if (unlikely(urb->reject)) {
+	if (unlikely(atomic_read(&urb->reject))) {
 		rc = -EPERM;
 		goto done;
 	}
@@ -1340,7 +1340,7 @@
 		INIT_LIST_HEAD(&urb->urb_list);
 		atomic_dec(&urb->use_count);
 		atomic_dec(&urb->dev->urbnum);
-		if (urb->reject)
+		if (atomic_read(&urb->reject))
 			wake_up(&usb_kill_urb_queue);
 		usb_put_urb(urb);
 	}
@@ -1444,7 +1444,7 @@
 	urb->status = status;
 	urb->complete (urb);
 	atomic_dec (&urb->use_count);
-	if (unlikely (urb->reject))
+	if (unlikely(atomic_read(&urb->reject)))
 		wake_up (&usb_kill_urb_queue);
 	usb_put_urb (urb);
 }
@@ -1573,14 +1573,14 @@
 
 #ifdef	CONFIG_PM
 
-int hcd_bus_suspend(struct usb_device *rhdev)
+int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
 {
 	struct usb_hcd	*hcd = container_of(rhdev->bus, struct usb_hcd, self);
 	int		status;
 	int		old_state = hcd->state;
 
 	dev_dbg(&rhdev->dev, "bus %s%s\n",
-			rhdev->auto_pm ? "auto-" : "", "suspend");
+			(msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend");
 	if (!hcd->driver->bus_suspend) {
 		status = -ENOENT;
 	} else {
@@ -1598,14 +1598,14 @@
 	return status;
 }
 
-int hcd_bus_resume(struct usb_device *rhdev)
+int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
 {
 	struct usb_hcd	*hcd = container_of(rhdev->bus, struct usb_hcd, self);
 	int		status;
 	int		old_state = hcd->state;
 
 	dev_dbg(&rhdev->dev, "usb %s%s\n",
-			rhdev->auto_pm ? "auto-" : "", "resume");
+			(msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
 	if (!hcd->driver->bus_resume)
 		return -ENOENT;
 	if (hcd->state == HC_STATE_RUNNING)
@@ -1638,7 +1638,7 @@
 
 	usb_lock_device(udev);
 	usb_mark_last_busy(udev);
-	usb_external_resume_device(udev);
+	usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
 	usb_unlock_device(udev);
 }
 
@@ -2028,7 +2028,7 @@
 
 /*-------------------------------------------------------------------------*/
 
-#if defined(CONFIG_USB_MON)
+#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
 
 struct usb_mon_operations *mon_ops;
 
@@ -2064,4 +2064,4 @@
 }
 EXPORT_SYMBOL_GPL (usb_mon_deregister);
 
-#endif /* CONFIG_USB_MON */
+#endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 9465e70..572d2cf 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -16,6 +16,8 @@
  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#ifndef __USB_CORE_HCD_H
+#define __USB_CORE_HCD_H
 
 #ifdef __KERNEL__
 
@@ -254,7 +256,9 @@
 extern void usb_hcd_pci_remove(struct pci_dev *dev);
 
 #ifdef CONFIG_PM
-extern int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t state);
+extern int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t msg);
+extern int usb_hcd_pci_suspend_late(struct pci_dev *dev, pm_message_t msg);
+extern int usb_hcd_pci_resume_early(struct pci_dev *dev);
 extern int usb_hcd_pci_resume(struct pci_dev *dev);
 #endif /* CONFIG_PM */
 
@@ -386,8 +390,8 @@
 #ifdef CONFIG_PM
 extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
 extern void usb_root_hub_lost_power(struct usb_device *rhdev);
-extern int hcd_bus_suspend(struct usb_device *rhdev);
-extern int hcd_bus_resume(struct usb_device *rhdev);
+extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
+extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
 #else
 static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
 {
@@ -419,7 +423,7 @@
 
 /*-------------------------------------------------------------------------*/
 
-#if defined(CONFIG_USB_MON)
+#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
 
 struct usb_mon_operations {
 	void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
@@ -461,7 +465,7 @@
 static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
 		int status) {}
 
-#endif /* CONFIG_USB_MON */
+#endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */
 
 /*-------------------------------------------------------------------------*/
 
@@ -490,3 +494,5 @@
 extern unsigned long usb_hcds_loaded;
 
 #endif /* __KERNEL__ */
+
+#endif /* __USB_CORE_HCD_H */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b19cbfc..94d5ee2 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -107,7 +107,9 @@
 /* define initial 64-byte descriptor request timeout in milliseconds */
 static int initial_descriptor_timeout = USB_CTRL_GET_TIMEOUT;
 module_param(initial_descriptor_timeout, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(initial_descriptor_timeout, "initial 64-byte descriptor request timeout in milliseconds (default 5000 - 5.0 seconds)");
+MODULE_PARM_DESC(initial_descriptor_timeout,
+		"initial 64-byte descriptor request timeout in milliseconds "
+		"(default 5000 - 5.0 seconds)");
 
 /*
  * As of 2.6.10 we introduce a new USB device initialization scheme which
@@ -1136,8 +1138,8 @@
 	hdev = interface_to_usbdev(intf);
 
 	if (hdev->level == MAX_TOPO_LEVEL) {
-		dev_err(&intf->dev, "Unsupported bus topology: "
-				"hub nested too deep\n");
+		dev_err(&intf->dev,
+			"Unsupported bus topology: hub nested too deep\n");
 		return -E2BIG;
 	}
 
@@ -1374,8 +1376,9 @@
 		usb_autosuspend_device(udev->parent);
 	usb_pm_unlock(udev);
 
-	/* Stop any autosuspend requests already submitted */
-	cancel_rearming_delayed_work(&udev->autosuspend);
+	/* Stop any autosuspend or autoresume requests already submitted */
+	cancel_delayed_work_sync(&udev->autosuspend);
+	cancel_work_sync(&udev->autoresume);
 }
 
 #else
@@ -1434,17 +1437,12 @@
 	usb_disable_device(udev, 0);
 	usb_hcd_synchronize_unlinks(udev);
 
+	usb_remove_ep_devs(&udev->ep0);
 	usb_unlock_device(udev);
 
-	/* Remove the device-specific files from sysfs.  This must be
-	 * done with udev unlocked, because some of the attribute
-	 * routines try to acquire the device lock.
-	 */
-	usb_remove_sysfs_dev_files(udev);
-
 	/* Unregister the device.  The device driver is responsible
-	 * for removing the device files from usbfs and sysfs and for
-	 * de-configuring the device.
+	 * for de-configuring the device and invoking the remove-device
+	 * notifier chain (used by usbfs and possibly others).
 	 */
 	device_del(&udev->dev);
 
@@ -1476,8 +1474,8 @@
 	dev_info(&udev->dev, "New USB device found, idVendor=%04x, idProduct=%04x\n",
 		le16_to_cpu(udev->descriptor.idVendor),
 		le16_to_cpu(udev->descriptor.idProduct));
-	dev_info(&udev->dev, "New USB device strings: Mfr=%d, Product=%d, "
-		"SerialNumber=%d\n",
+	dev_info(&udev->dev,
+		"New USB device strings: Mfr=%d, Product=%d, SerialNumber=%d\n",
 		udev->descriptor.iManufacturer,
 		udev->descriptor.iProduct,
 		udev->descriptor.iSerialNumber);
@@ -1542,7 +1540,7 @@
 					 * customize to match your product.
 					 */
 					dev_info(&udev->dev,
-						"can't set HNP mode; %d\n",
+						"can't set HNP mode: %d\n",
 						err);
 					bus->b_hnp_enable = 0;
 				}
@@ -1556,7 +1554,7 @@
 		 * (Includes HNP test device.)
 		 */
 		if (udev->bus->b_hnp_enable || udev->bus->is_b_host) {
-			err = usb_port_suspend(udev);
+			err = usb_port_suspend(udev, PMSG_SUSPEND);
 			if (err < 0)
 				dev_dbg(&udev->dev, "HNP fail, %d\n", err);
 		}
@@ -1635,6 +1633,10 @@
 {
 	int err;
 
+	/* Increment the parent's count of unsuspended children */
+	if (udev->parent)
+		usb_autoresume_device(udev->parent);
+
 	usb_detect_quirks(udev);		/* Determine quirks */
 	err = usb_configure_device(udev);	/* detect & probe dev/intfs */
 	if (err < 0)
@@ -1643,13 +1645,12 @@
 	udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
 			(((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
 
-	/* Increment the parent's count of unsuspended children */
-	if (udev->parent)
-		usb_autoresume_device(udev->parent);
+	/* Tell the world! */
+	announce_device(udev);
 
 	/* Register the device.  The device driver is responsible
-	 * for adding the device files to sysfs and for configuring
-	 * the device.
+	 * for configuring the device and invoking the add-device
+	 * notifier chain (used by usbfs and possibly others).
 	 */
 	err = device_add(&udev->dev);
 	if (err) {
@@ -1657,15 +1658,12 @@
 		goto fail;
 	}
 
-	/* put device-specific files into sysfs */
-	usb_create_sysfs_dev_files(udev);
-
-	/* Tell the world! */
-	announce_device(udev);
+	(void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev);
 	return err;
 
 fail:
 	usb_set_device_state(udev, USB_STATE_NOTATTACHED);
+	usb_stop_pm(udev);
 	return err;
 }
 
@@ -1982,7 +1980,7 @@
  *
  * Returns 0 on success, else negative errno.
  */
-int usb_port_suspend(struct usb_device *udev)
+int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
 {
 	struct usb_hub	*hub = hdev_to_hub(udev->parent);
 	int		port1 = udev->portnum;
@@ -2021,7 +2019,7 @@
 	} else {
 		/* device has up to 10 msec to fully suspend */
 		dev_dbg(&udev->dev, "usb %ssuspend\n",
-				udev->auto_pm ? "auto-" : "");
+				(msg.event & PM_EVENT_AUTO ? "auto-" : ""));
 		usb_set_device_state(udev, USB_STATE_SUSPENDED);
 		msleep(10);
 	}
@@ -2045,8 +2043,8 @@
 	u16	devstatus;
 
 	/* caller owns the udev device lock */
-	dev_dbg(&udev->dev, "finish %sresume\n",
-			udev->reset_resume ? "reset-" : "");
+	dev_dbg(&udev->dev, "%s\n",
+		udev->reset_resume ? "finish reset-resume" : "finish resume");
 
 	/* usb ch9 identifies four variants of SUSPENDED, based on what
 	 * state the device resumes to.  Linux currently won't see the
@@ -2098,8 +2096,9 @@
 					NULL, 0,
 					USB_CTRL_SET_TIMEOUT);
 			if (status)
-				dev_dbg(&udev->dev, "disable remote "
-					"wakeup, status %d\n", status);
+				dev_dbg(&udev->dev,
+					"disable remote wakeup, status %d\n",
+					status);
 		}
 		status = 0;
 	}
@@ -2140,7 +2139,7 @@
  *
  * Returns 0 on success, else negative errno.
  */
-int usb_port_resume(struct usb_device *udev)
+int usb_port_resume(struct usb_device *udev, pm_message_t msg)
 {
 	struct usb_hub	*hub = hdev_to_hub(udev->parent);
 	int		port1 = udev->portnum;
@@ -2165,7 +2164,7 @@
 	} else {
 		/* drive resume for at least 20 msec */
 		dev_dbg(&udev->dev, "usb %sresume\n",
-				udev->auto_pm ? "auto-" : "");
+				(msg.event & PM_EVENT_AUTO ? "auto-" : ""));
 		msleep(25);
 
 		/* Virtual root hubs can trigger on GET_PORT_STATUS to
@@ -2206,7 +2205,7 @@
 	if (udev->state == USB_STATE_SUSPENDED) {
 		dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
 		usb_mark_last_busy(udev);
-		status = usb_external_resume_device(udev);
+		status = usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
 	}
 	return status;
 }
@@ -2215,14 +2214,14 @@
 
 /* When CONFIG_USB_SUSPEND isn't set, we never suspend or resume any ports. */
 
-int usb_port_suspend(struct usb_device *udev)
+int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
 {
 	return 0;
 }
 
 /* However we may need to do a reset-resume */
 
-int usb_port_resume(struct usb_device *udev)
+int usb_port_resume(struct usb_device *udev, pm_message_t msg)
 {
 	struct usb_hub	*hub = hdev_to_hub(udev->parent);
 	int		port1 = udev->portnum;
@@ -2262,7 +2261,7 @@
 
 		udev = hdev->children [port1-1];
 		if (udev && udev->can_submit) {
-			if (!hdev->auto_pm)
+			if (!(msg.event & PM_EVENT_AUTO))
 				dev_dbg(&intf->dev, "port %d nyet suspended\n",
 						port1);
 			return -EBUSY;
@@ -2385,7 +2384,7 @@
 {
 	usb_disable_endpoint(udev, 0 + USB_DIR_IN);
 	usb_disable_endpoint(udev, 0 + USB_DIR_OUT);
-	usb_enable_endpoint(udev, &udev->ep0);
+	usb_enable_endpoint(udev, &udev->ep0, true);
 }
 EXPORT_SYMBOL_GPL(usb_ep0_reinit);
 
@@ -2582,9 +2581,9 @@
 				goto fail;
 			}
 			if (r) {
-				dev_err(&udev->dev, "device descriptor "
-						"read/%s, error %d\n",
-						"64", r);
+				dev_err(&udev->dev,
+					"device descriptor read/64, error %d\n",
+					r);
 				retval = -EMSGSIZE;
 				continue;
 			}
@@ -2621,9 +2620,9 @@
 
 		retval = usb_get_device_descriptor(udev, 8);
 		if (retval < 8) {
-			dev_err(&udev->dev, "device descriptor "
-					"read/%s, error %d\n",
-					"8", retval);
+			dev_err(&udev->dev,
+					"device descriptor read/8, error %d\n",
+					retval);
 			if (retval >= 0)
 				retval = -EMSGSIZE;
 		} else {
@@ -2650,8 +2649,8 @@
   
 	retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
 	if (retval < (signed)sizeof(udev->descriptor)) {
-		dev_err(&udev->dev, "device descriptor read/%s, error %d\n",
-			"all", retval);
+		dev_err(&udev->dev, "device descriptor read/all, error %d\n",
+			retval);
 		if (retval >= 0)
 			retval = -ENOMSG;
 		goto fail;
@@ -2719,9 +2718,9 @@
 		else
 			delta = 8;
 		if (delta > hub->mA_per_port)
-			dev_warn(&udev->dev, "%dmA is over %umA budget "
-					"for port %d!\n",
-					delta, hub->mA_per_port, port1);
+			dev_warn(&udev->dev,
+				 "%dmA is over %umA budget for port %d!\n",
+				 delta, hub->mA_per_port, port1);
 		remaining -= delta;
 	}
 	if (remaining < 0) {
@@ -3517,3 +3516,46 @@
 	return ret;
 }
 EXPORT_SYMBOL_GPL(usb_reset_device);
+
+
+/**
+ * usb_queue_reset_device - Reset a USB device from an atomic context
+ * @iface: USB interface belonging to the device to reset
+ *
+ * This function can be used to reset a USB device from an atomic
+ * context, where usb_reset_device() won't work (as it blocks).
+ *
+ * Doing a reset via this method is functionally equivalent to calling
+ * usb_reset_device(), except for the fact that it is delayed to a
+ * workqueue. This means that any drivers bound to other interfaces
+ * might be unbound, as well as users from usbfs in user space.
+ *
+ * Corner cases:
+ *
+ * - Scheduling two resets at the same time from two different drivers
+ *   attached to two different interfaces of the same device is
+ *   possible; depending on how the driver attached to each interface
+ *   handles ->pre_reset(), the second reset might happen or not.
+ *
+ * - If a driver is unbound and it had a pending reset, the reset will
+ *   be cancelled.
+ *
+ * - This function can be called during .probe() or .disconnect()
+ *   times. On return from .disconnect(), any pending resets will be
+ *   cancelled.
+ *
+ * There is no no need to lock/unlock the @reset_ws as schedule_work()
+ * does its own.
+ *
+ * NOTE: We don't do any reference count tracking because it is not
+ *     needed. The lifecycle of the work_struct is tied to the
+ *     usb_interface. Before destroying the interface we cancel the
+ *     work_struct, so the fact that work_struct is queued and or
+ *     running means the interface (and thus, the device) exist and
+ *     are referenced.
+ */
+void usb_queue_reset_device(struct usb_interface *iface)
+{
+	schedule_work(&iface->reset_ws);
+}
+EXPORT_SYMBOL_GPL(usb_queue_reset_device);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 6d1048f..de51667 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -18,6 +18,8 @@
 #include "hcd.h"	/* for usbcore internals */
 #include "usb.h"
 
+static void cancel_async_set_config(struct usb_device *udev);
+
 struct api_context {
 	struct completion	done;
 	int			status;
@@ -139,9 +141,9 @@
 
 	dr->bRequestType = requesttype;
 	dr->bRequest = request;
-	dr->wValue = cpu_to_le16p(&value);
-	dr->wIndex = cpu_to_le16p(&index);
-	dr->wLength = cpu_to_le16p(&size);
+	dr->wValue = cpu_to_le16(value);
+	dr->wIndex = cpu_to_le16(index);
+	dr->wLength = cpu_to_le16(size);
 
 	/* dbg("usb_control_msg"); */
 
@@ -1004,6 +1006,34 @@
 }
 EXPORT_SYMBOL_GPL(usb_clear_halt);
 
+static int create_intf_ep_devs(struct usb_interface *intf)
+{
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct usb_host_interface *alt = intf->cur_altsetting;
+	int i;
+
+	if (intf->ep_devs_created || intf->unregistering)
+		return 0;
+
+	for (i = 0; i < alt->desc.bNumEndpoints; ++i)
+		(void) usb_create_ep_devs(&intf->dev, &alt->endpoint[i], udev);
+	intf->ep_devs_created = 1;
+	return 0;
+}
+
+static void remove_intf_ep_devs(struct usb_interface *intf)
+{
+	struct usb_host_interface *alt = intf->cur_altsetting;
+	int i;
+
+	if (!intf->ep_devs_created)
+		return;
+
+	for (i = 0; i < alt->desc.bNumEndpoints; ++i)
+		usb_remove_ep_devs(&alt->endpoint[i]);
+	intf->ep_devs_created = 0;
+}
+
 /**
  * usb_disable_endpoint -- Disable an endpoint by address
  * @dev: the device whose endpoint is being disabled
@@ -1092,7 +1122,7 @@
 			dev_dbg(&dev->dev, "unregistering interface %s\n",
 				dev_name(&interface->dev));
 			interface->unregistering = 1;
-			usb_remove_sysfs_intf_files(interface);
+			remove_intf_ep_devs(interface);
 			device_del(&interface->dev);
 		}
 
@@ -1113,22 +1143,26 @@
  * usb_enable_endpoint - Enable an endpoint for USB communications
  * @dev: the device whose interface is being enabled
  * @ep: the endpoint
+ * @reset_toggle: flag to set the endpoint's toggle back to 0
  *
- * Resets the endpoint toggle, and sets dev->ep_{in,out} pointers.
+ * Resets the endpoint toggle if asked, and sets dev->ep_{in,out} pointers.
  * For control endpoints, both the input and output sides are handled.
  */
-void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep)
+void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep,
+		bool reset_toggle)
 {
 	int epnum = usb_endpoint_num(&ep->desc);
 	int is_out = usb_endpoint_dir_out(&ep->desc);
 	int is_control = usb_endpoint_xfer_control(&ep->desc);
 
 	if (is_out || is_control) {
-		usb_settoggle(dev, epnum, 1, 0);
+		if (reset_toggle)
+			usb_settoggle(dev, epnum, 1, 0);
 		dev->ep_out[epnum] = ep;
 	}
 	if (!is_out || is_control) {
-		usb_settoggle(dev, epnum, 0, 0);
+		if (reset_toggle)
+			usb_settoggle(dev, epnum, 0, 0);
 		dev->ep_in[epnum] = ep;
 	}
 	ep->enabled = 1;
@@ -1138,17 +1172,18 @@
  * usb_enable_interface - Enable all the endpoints for an interface
  * @dev: the device whose interface is being enabled
  * @intf: pointer to the interface descriptor
+ * @reset_toggles: flag to set the endpoints' toggles back to 0
  *
  * Enables all the endpoints for the interface's current altsetting.
  */
-static void usb_enable_interface(struct usb_device *dev,
-				 struct usb_interface *intf)
+void usb_enable_interface(struct usb_device *dev,
+		struct usb_interface *intf, bool reset_toggles)
 {
 	struct usb_host_interface *alt = intf->cur_altsetting;
 	int i;
 
 	for (i = 0; i < alt->desc.bNumEndpoints; ++i)
-		usb_enable_endpoint(dev, &alt->endpoint[i]);
+		usb_enable_endpoint(dev, &alt->endpoint[i], reset_toggles);
 }
 
 /**
@@ -1235,8 +1270,10 @@
 	 */
 
 	/* prevent submissions using previous endpoint settings */
-	if (iface->cur_altsetting != alt)
+	if (iface->cur_altsetting != alt) {
+		remove_intf_ep_devs(iface);
 		usb_remove_sysfs_intf_files(iface);
+	}
 	usb_disable_interface(dev, iface);
 
 	iface->cur_altsetting = alt;
@@ -1271,10 +1308,11 @@
 	 * during the SETUP stage - hence EP0 toggles are "don't care" here.
 	 * (Likewise, EP0 never "halts" on well designed devices.)
 	 */
-	usb_enable_interface(dev, iface);
-	if (device_is_registered(&iface->dev))
+	usb_enable_interface(dev, iface, true);
+	if (device_is_registered(&iface->dev)) {
 		usb_create_sysfs_intf_files(iface);
-
+		create_intf_ep_devs(iface);
+	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(usb_set_interface);
@@ -1334,7 +1372,6 @@
 		struct usb_interface *intf = config->interface[i];
 		struct usb_host_interface *alt;
 
-		usb_remove_sysfs_intf_files(intf);
 		alt = usb_altnum_to_altsetting(intf, 0);
 
 		/* No altsetting 0?  We'll assume the first altsetting.
@@ -1345,10 +1382,16 @@
 		if (!alt)
 			alt = &intf->altsetting[0];
 
+		if (alt != intf->cur_altsetting) {
+			remove_intf_ep_devs(intf);
+			usb_remove_sysfs_intf_files(intf);
+		}
 		intf->cur_altsetting = alt;
-		usb_enable_interface(dev, intf);
-		if (device_is_registered(&intf->dev))
+		usb_enable_interface(dev, intf, true);
+		if (device_is_registered(&intf->dev)) {
 			usb_create_sysfs_intf_files(intf);
+			create_intf_ep_devs(intf);
+		}
 	}
 	return 0;
 }
@@ -1441,6 +1484,46 @@
 	return retval;
 }
 
+
+/*
+ * Internal function to queue a device reset
+ *
+ * This is initialized into the workstruct in 'struct
+ * usb_device->reset_ws' that is launched by
+ * message.c:usb_set_configuration() when initializing each 'struct
+ * usb_interface'.
+ *
+ * It is safe to get the USB device without reference counts because
+ * the life cycle of @iface is bound to the life cycle of @udev. Then,
+ * this function will be ran only if @iface is alive (and before
+ * freeing it any scheduled instances of it will have been cancelled).
+ *
+ * We need to set a flag (usb_dev->reset_running) because when we call
+ * the reset, the interfaces might be unbound. The current interface
+ * cannot try to remove the queued work as it would cause a deadlock
+ * (you cannot remove your work from within your executing
+ * workqueue). This flag lets it know, so that
+ * usb_cancel_queued_reset() doesn't try to do it.
+ *
+ * See usb_queue_reset_device() for more details
+ */
+void __usb_queue_reset_device(struct work_struct *ws)
+{
+	int rc;
+	struct usb_interface *iface =
+		container_of(ws, struct usb_interface, reset_ws);
+	struct usb_device *udev = interface_to_usbdev(iface);
+
+	rc = usb_lock_device_for_reset(udev, iface);
+	if (rc >= 0) {
+		iface->reset_running = 1;
+		usb_reset_device(udev);
+		iface->reset_running = 0;
+		usb_unlock_device(udev);
+	}
+}
+
+
 /*
  * usb_set_configuration - Makes a particular device setting be current
  * @dev: the device whose configuration is being updated
@@ -1560,6 +1643,9 @@
 	if (dev->state != USB_STATE_ADDRESS)
 		usb_disable_device(dev, 1);	/* Skip ep0 */
 
+	/* Get rid of pending async Set-Config requests for this device */
+	cancel_async_set_config(dev);
+
 	ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 			      USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
 			      NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1604,13 +1690,14 @@
 			alt = &intf->altsetting[0];
 
 		intf->cur_altsetting = alt;
-		usb_enable_interface(dev, intf);
+		usb_enable_interface(dev, intf, true);
 		intf->dev.parent = &dev->dev;
 		intf->dev.driver = NULL;
 		intf->dev.bus = &usb_bus_type;
 		intf->dev.type = &usb_if_device_type;
 		intf->dev.groups = usb_interface_groups;
 		intf->dev.dma_mask = dev->dev.dma_mask;
+		INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
 		device_initialize(&intf->dev);
 		mark_quiesced(intf);
 		dev_set_name(&intf->dev, "%d-%s:%d.%d",
@@ -1641,17 +1728,21 @@
 				dev_name(&intf->dev), ret);
 			continue;
 		}
-		usb_create_sysfs_intf_files(intf);
+		create_intf_ep_devs(intf);
 	}
 
 	usb_autosuspend_device(dev);
 	return 0;
 }
 
+static LIST_HEAD(set_config_list);
+static DEFINE_SPINLOCK(set_config_lock);
+
 struct set_config_request {
 	struct usb_device	*udev;
 	int			config;
 	struct work_struct	work;
+	struct list_head	node;
 };
 
 /* Worker routine for usb_driver_set_configuration() */
@@ -1659,14 +1750,35 @@
 {
 	struct set_config_request *req =
 		container_of(work, struct set_config_request, work);
+	struct usb_device *udev = req->udev;
 
-	usb_lock_device(req->udev);
-	usb_set_configuration(req->udev, req->config);
-	usb_unlock_device(req->udev);
-	usb_put_dev(req->udev);
+	usb_lock_device(udev);
+	spin_lock(&set_config_lock);
+	list_del(&req->node);
+	spin_unlock(&set_config_lock);
+
+	if (req->config >= -1)		/* Is req still valid? */
+		usb_set_configuration(udev, req->config);
+	usb_unlock_device(udev);
+	usb_put_dev(udev);
 	kfree(req);
 }
 
+/* Cancel pending Set-Config requests for a device whose configuration
+ * was just changed
+ */
+static void cancel_async_set_config(struct usb_device *udev)
+{
+	struct set_config_request *req;
+
+	spin_lock(&set_config_lock);
+	list_for_each_entry(req, &set_config_list, node) {
+		if (req->udev == udev)
+			req->config = -999;	/* Mark as cancelled */
+	}
+	spin_unlock(&set_config_lock);
+}
+
 /**
  * usb_driver_set_configuration - Provide a way for drivers to change device configurations
  * @udev: the device whose configuration is being updated
@@ -1698,6 +1810,10 @@
 	req->config = config;
 	INIT_WORK(&req->work, driver_set_config_work);
 
+	spin_lock(&set_config_lock);
+	list_add(&req->node, &set_config_list);
+	spin_unlock(&set_config_lock);
+
 	usb_get_dev(udev);
 	schedule_work(&req->work);
 	return 0;
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 4fb65fd..4cc2456 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -359,19 +359,19 @@
 			strncmp(buf, on_string, len) == 0) {
 		udev->autosuspend_disabled = 1;
 		udev->autoresume_disabled = 0;
-		rc = usb_external_resume_device(udev);
+		rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
 
 	} else if (len == sizeof auto_string - 1 &&
 			strncmp(buf, auto_string, len) == 0) {
 		udev->autosuspend_disabled = 0;
 		udev->autoresume_disabled = 0;
-		rc = usb_external_resume_device(udev);
+		rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
 
 	} else if (len == sizeof suspend_string - 1 &&
 			strncmp(buf, suspend_string, len) == 0) {
 		udev->autosuspend_disabled = 0;
 		udev->autoresume_disabled = 1;
-		rc = usb_external_suspend_device(udev, PMSG_SUSPEND);
+		rc = usb_external_suspend_device(udev, PMSG_USER_SUSPEND);
 
 	} else
 		rc = -EINVAL;
@@ -629,9 +629,6 @@
 	struct device *dev = &udev->dev;
 	int retval;
 
-	/* Unforunately these attributes cannot be created before
-	 * the uevent is broadcast.
-	 */
 	retval = device_create_bin_file(dev, &dev_bin_attr_descriptors);
 	if (retval)
 		goto error;
@@ -643,11 +640,7 @@
 	retval = add_power_attributes(dev);
 	if (retval)
 		goto error;
-
-	retval = usb_create_ep_files(dev, &udev->ep0, udev);
-	if (retval)
-		goto error;
-	return 0;
+	return retval;
 error:
 	usb_remove_sysfs_dev_files(udev);
 	return retval;
@@ -657,7 +650,6 @@
 {
 	struct device *dev = &udev->dev;
 
-	usb_remove_ep_files(&udev->ep0);
 	remove_power_attributes(dev);
 	remove_persist_attributes(dev);
 	device_remove_bin_file(dev, &dev_bin_attr_descriptors);
@@ -812,28 +804,6 @@
 	NULL
 };
 
-static inline void usb_create_intf_ep_files(struct usb_interface *intf,
-		struct usb_device *udev)
-{
-	struct usb_host_interface *iface_desc;
-	int i;
-
-	iface_desc = intf->cur_altsetting;
-	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i)
-		usb_create_ep_files(&intf->dev, &iface_desc->endpoint[i],
-				udev);
-}
-
-static inline void usb_remove_intf_ep_files(struct usb_interface *intf)
-{
-	struct usb_host_interface *iface_desc;
-	int i;
-
-	iface_desc = intf->cur_altsetting;
-	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i)
-		usb_remove_ep_files(&iface_desc->endpoint[i]);
-}
-
 int usb_create_sysfs_intf_files(struct usb_interface *intf)
 {
 	struct usb_device *udev = interface_to_usbdev(intf);
@@ -843,26 +813,19 @@
 	if (intf->sysfs_files_created || intf->unregistering)
 		return 0;
 
-	/* The interface string may be present in some altsettings
-	 * and missing in others.  Hence its attribute cannot be created
-	 * before the uevent is broadcast.
-	 */
 	if (alt->string == NULL)
 		alt->string = usb_cache_string(udev, alt->desc.iInterface);
 	if (alt->string)
 		retval = device_create_file(&intf->dev, &dev_attr_interface);
-	usb_create_intf_ep_files(intf, udev);
 	intf->sysfs_files_created = 1;
 	return 0;
 }
 
 void usb_remove_sysfs_intf_files(struct usb_interface *intf)
 {
-	struct device *dev = &intf->dev;
-
 	if (!intf->sysfs_files_created)
 		return;
-	usb_remove_intf_ep_files(intf);
-	device_remove_file(dev, &dev_attr_interface);
+
+	device_remove_file(&intf->dev, &dev_attr_interface);
 	intf->sysfs_files_created = 0;
 }
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 1f68af9..58bc5e3 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -10,7 +10,6 @@
 
 #define to_urb(d) container_of(d, struct urb, kref)
 
-static DEFINE_SPINLOCK(usb_reject_lock);
 
 static void urb_destroy(struct kref *kref)
 {
@@ -131,9 +130,7 @@
 	urb->anchor = anchor;
 
 	if (unlikely(anchor->poisoned)) {
-		spin_lock(&usb_reject_lock);
-		urb->reject++;
-		spin_unlock(&usb_reject_lock);
+		atomic_inc(&urb->reject);
 	}
 
 	spin_unlock_irqrestore(&anchor->lock, flags);
@@ -565,16 +562,12 @@
 	might_sleep();
 	if (!(urb && urb->dev && urb->ep))
 		return;
-	spin_lock_irq(&usb_reject_lock);
-	++urb->reject;
-	spin_unlock_irq(&usb_reject_lock);
+	atomic_inc(&urb->reject);
 
 	usb_hcd_unlink_urb(urb, -ENOENT);
 	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
 
-	spin_lock_irq(&usb_reject_lock);
-	--urb->reject;
-	spin_unlock_irq(&usb_reject_lock);
+	atomic_dec(&urb->reject);
 }
 EXPORT_SYMBOL_GPL(usb_kill_urb);
 
@@ -606,9 +599,7 @@
 	might_sleep();
 	if (!(urb && urb->dev && urb->ep))
 		return;
-	spin_lock_irq(&usb_reject_lock);
-	++urb->reject;
-	spin_unlock_irq(&usb_reject_lock);
+	atomic_inc(&urb->reject);
 
 	usb_hcd_unlink_urb(urb, -ENOENT);
 	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
@@ -617,14 +608,10 @@
 
 void usb_unpoison_urb(struct urb *urb)
 {
-	unsigned long flags;
-
 	if (!urb)
 		return;
 
-	spin_lock_irqsave(&usb_reject_lock, flags);
-	--urb->reject;
-	spin_unlock_irqrestore(&usb_reject_lock, flags);
+	atomic_dec(&urb->reject);
 }
 EXPORT_SYMBOL_GPL(usb_unpoison_urb);
 
@@ -692,6 +679,26 @@
 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
 
 /**
+ * usb_unpoison_anchored_urbs - let an anchor be used successfully again
+ * @anchor: anchor the requests are bound to
+ *
+ * Reverses the effect of usb_poison_anchored_urbs
+ * the anchor can be used normally after it returns
+ */
+void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
+{
+	unsigned long flags;
+	struct urb *lazarus;
+
+	spin_lock_irqsave(&anchor->lock, flags);
+	list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
+		usb_unpoison_urb(lazarus);
+	}
+	anchor->poisoned = 0;
+	spin_unlock_irqrestore(&anchor->lock, flags);
+}
+EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
+/**
  * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
  * @anchor: anchor the requests are bound to
  *
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 399e15f..dcfc072 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -253,7 +253,7 @@
 static void usb_dev_complete(struct device *dev)
 {
 	/* Currently used only for rebinding interfaces */
-	usb_resume(dev);	/* Implement eventually? */
+	usb_resume(dev, PMSG_RESUME);	/* Message event is meaningless */
 }
 
 static int usb_dev_suspend(struct device *dev)
@@ -263,7 +263,7 @@
 
 static int usb_dev_resume(struct device *dev)
 {
-	return usb_resume(dev);
+	return usb_resume(dev, PMSG_RESUME);
 }
 
 static int usb_dev_freeze(struct device *dev)
@@ -273,7 +273,7 @@
 
 static int usb_dev_thaw(struct device *dev)
 {
-	return usb_resume(dev);
+	return usb_resume(dev, PMSG_THAW);
 }
 
 static int usb_dev_poweroff(struct device *dev)
@@ -283,7 +283,7 @@
 
 static int usb_dev_restore(struct device *dev)
 {
-	return usb_resume(dev);
+	return usb_resume(dev, PMSG_RESTORE);
 }
 
 static struct dev_pm_ops usb_device_pm_ops = {
@@ -362,7 +362,7 @@
 	dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
 	dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
 	/* ep0 maxpacket comes later, from device descriptor */
-	usb_enable_endpoint(dev, &dev->ep0);
+	usb_enable_endpoint(dev, &dev->ep0, true);
 	dev->can_submit = 1;
 
 	/* Save readable and stable topology id, distinguishing devices
@@ -402,6 +402,7 @@
 #ifdef	CONFIG_PM
 	mutex_init(&dev->pm_mutex);
 	INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
+	INIT_WORK(&dev->autoresume, usb_autoresume_work);
 	dev->autosuspend_delay = usb_autosuspend_delay * HZ;
 	dev->connect_time = jiffies;
 	dev->active_duration = -jiffies;
@@ -513,10 +514,7 @@
  * disconnect; in some drivers (such as usb-storage) the disconnect()
  * or suspend() method will block waiting for a device reset to complete.
  *
- * Returns a negative error code for failure, otherwise 1 or 0 to indicate
- * that the device will or will not have to be unlocked.  (0 can be
- * returned when an interface is given and is BINDING, because in that
- * case the driver already owns the device lock.)
+ * Returns a negative error code for failure, otherwise 0.
  */
 int usb_lock_device_for_reset(struct usb_device *udev,
 			      const struct usb_interface *iface)
@@ -527,16 +525,9 @@
 		return -ENODEV;
 	if (udev->state == USB_STATE_SUSPENDED)
 		return -EHOSTUNREACH;
-	if (iface) {
-		switch (iface->condition) {
-		case USB_INTERFACE_BINDING:
-			return 0;
-		case USB_INTERFACE_BOUND:
-			break;
-		default:
-			return -EINTR;
-		}
-	}
+	if (iface && (iface->condition == USB_INTERFACE_UNBINDING ||
+			iface->condition == USB_INTERFACE_UNBOUND))
+		return -EINTR;
 
 	while (usb_trylock_device(udev) != 0) {
 
@@ -550,10 +541,11 @@
 			return -ENODEV;
 		if (udev->state == USB_STATE_SUSPENDED)
 			return -EHOSTUNREACH;
-		if (iface && iface->condition != USB_INTERFACE_BOUND)
+		if (iface && (iface->condition == USB_INTERFACE_UNBINDING ||
+				iface->condition == USB_INTERFACE_UNBOUND))
 			return -EINTR;
 	}
-	return 1;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(usb_lock_device_for_reset);
 
@@ -962,8 +954,12 @@
 }
 EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
 
-/* format to disable USB on kernel command line is: nousb */
-__module_param_call("", nousb, param_set_bool, param_get_bool, &nousb, 0444);
+/* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */
+#ifdef MODULE
+module_param(nousb, bool, 0444);
+#else
+core_param(nousb, nousb, bool, 0444);
+#endif
 
 /*
  * for external read access to <nousb>
@@ -975,6 +971,37 @@
 EXPORT_SYMBOL_GPL(usb_disabled);
 
 /*
+ * Notifications of device and interface registration
+ */
+static int usb_bus_notify(struct notifier_block *nb, unsigned long action,
+		void *data)
+{
+	struct device *dev = data;
+
+	switch (action) {
+	case BUS_NOTIFY_ADD_DEVICE:
+		if (dev->type == &usb_device_type)
+			(void) usb_create_sysfs_dev_files(to_usb_device(dev));
+		else if (dev->type == &usb_if_device_type)
+			(void) usb_create_sysfs_intf_files(
+					to_usb_interface(dev));
+		break;
+
+	case BUS_NOTIFY_DEL_DEVICE:
+		if (dev->type == &usb_device_type)
+			usb_remove_sysfs_dev_files(to_usb_device(dev));
+		else if (dev->type == &usb_if_device_type)
+			usb_remove_sysfs_intf_files(to_usb_interface(dev));
+		break;
+	}
+	return 0;
+}
+
+static struct notifier_block usb_bus_nb = {
+	.notifier_call = usb_bus_notify,
+};
+
+/*
  * Init
  */
 static int __init usb_init(void)
@@ -991,6 +1018,9 @@
 	retval = bus_register(&usb_bus_type);
 	if (retval)
 		goto bus_register_failed;
+	retval = bus_register_notifier(&usb_bus_type, &usb_bus_nb);
+	if (retval)
+		goto bus_notifier_failed;
 	retval = usb_host_init();
 	if (retval)
 		goto host_init_failed;
@@ -1025,6 +1055,8 @@
 major_init_failed:
 	usb_host_cleanup();
 host_init_failed:
+	bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
+bus_notifier_failed:
 	bus_unregister(&usb_bus_type);
 bus_register_failed:
 	ksuspend_usb_cleanup();
@@ -1048,6 +1080,7 @@
 	usb_devio_cleanup();
 	usb_hub_cleanup();
 	usb_host_cleanup();
+	bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
 	bus_unregister(&usb_bus_type);
 	ksuspend_usb_cleanup();
 }
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 9a1a45a..3861778 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,16 +1,20 @@
+#include <linux/pm.h>
+
 /* Functions local to drivers/usb/core/ */
 
 extern int usb_create_sysfs_dev_files(struct usb_device *dev);
 extern void usb_remove_sysfs_dev_files(struct usb_device *dev);
 extern int usb_create_sysfs_intf_files(struct usb_interface *intf);
 extern void usb_remove_sysfs_intf_files(struct usb_interface *intf);
-extern int usb_create_ep_files(struct device *parent,
+extern int usb_create_ep_devs(struct device *parent,
 				struct usb_host_endpoint *endpoint,
 				struct usb_device *udev);
-extern void usb_remove_ep_files(struct usb_host_endpoint *endpoint);
+extern void usb_remove_ep_devs(struct usb_host_endpoint *endpoint);
 
 extern void usb_enable_endpoint(struct usb_device *dev,
-		struct usb_host_endpoint *ep);
+		struct usb_host_endpoint *ep, bool reset_toggle);
+extern void usb_enable_interface(struct usb_device *dev,
+		struct usb_interface *intf, bool reset_toggles);
 extern void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr);
 extern void usb_disable_interface(struct usb_device *dev,
 		struct usb_interface *intf);
@@ -42,14 +46,16 @@
 #ifdef	CONFIG_PM
 
 extern int usb_suspend(struct device *dev, pm_message_t msg);
-extern int usb_resume(struct device *dev);
+extern int usb_resume(struct device *dev, pm_message_t msg);
 
 extern void usb_autosuspend_work(struct work_struct *work);
-extern int usb_port_suspend(struct usb_device *dev);
-extern int usb_port_resume(struct usb_device *dev);
+extern void usb_autoresume_work(struct work_struct *work);
+extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg);
+extern int usb_port_resume(struct usb_device *dev, pm_message_t msg);
 extern int usb_external_suspend_device(struct usb_device *udev,
 		pm_message_t msg);
-extern int usb_external_resume_device(struct usb_device *udev);
+extern int usb_external_resume_device(struct usb_device *udev,
+		pm_message_t msg);
 
 static inline void usb_pm_lock(struct usb_device *udev)
 {
@@ -63,12 +69,12 @@
 
 #else
 
-static inline int usb_port_suspend(struct usb_device *udev)
+static inline int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
 {
 	return 0;
 }
 
-static inline int usb_port_resume(struct usb_device *udev)
+static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg)
 {
 	return 0;
 }
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index dd4cd5a..3219d13 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -297,13 +297,34 @@
 
 # musb builds in ../musb along with host support
 config USB_GADGET_MUSB_HDRC
-	boolean "Inventra HDRC USB Peripheral (TI, ...)"
+	boolean "Inventra HDRC USB Peripheral (TI, ADI, ...)"
 	depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
 	select USB_GADGET_DUALSPEED
 	select USB_GADGET_SELECTED
 	help
 	  This OTG-capable silicon IP is used in dual designs including
-	  the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
+	  the TI DaVinci, OMAP 243x, OMAP 343x, TUSB 6010, and ADI Blackfin
+
+config USB_GADGET_IMX
+	boolean "Freescale IMX USB Peripheral Controller"
+	depends on ARCH_MX1
+	help
+	   Freescale's IMX series include an integrated full speed
+	   USB 1.1 device controller.  The controller in the IMX series
+	   is register-compatible.
+
+	   It has Six fixed-function endpoints, as well as endpoint
+	   zero (for control transfers).
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "imx_udc" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_IMX
+	tristate
+	depends on USB_GADGET_IMX
+	default USB_GADGET
+	select USB_GADGET_SELECTED
 
 config USB_GADGET_M66592
 	boolean "Renesas M66592 USB Peripheral Controller"
@@ -377,6 +398,24 @@
 	default USB_GADGET
 	select USB_GADGET_SELECTED
 
+config USB_GADGET_CI13XXX
+	boolean "MIPS USB CI13xxx"
+	depends on PCI
+	select USB_GADGET_DUALSPEED
+	help
+	  MIPS USB IP core family device controller
+	  Currently it only supports IP part number CI13412
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "ci13xxx_udc" and force all
+	  gadget drivers to also be dynamically linked.
+
+config USB_CI13XXX
+	tristate
+	depends on USB_GADGET_CI13XXX
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
 config USB_GADGET_NET2280
 	boolean "NetChip 228x"
 	depends on PCI
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index bd4041b..39a51d7 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -10,6 +10,7 @@
 obj-$(CONFIG_USB_AMD5536UDC)	+= amd5536udc.o
 obj-$(CONFIG_USB_PXA25X)	+= pxa25x_udc.o
 obj-$(CONFIG_USB_PXA27X)	+= pxa27x_udc.o
+obj-$(CONFIG_USB_IMX)		+= imx_udc.o
 obj-$(CONFIG_USB_GOKU)		+= goku_udc.o
 obj-$(CONFIG_USB_OMAP)		+= omap_udc.o
 obj-$(CONFIG_USB_LH7A40X)	+= lh7a40x_udc.o
@@ -19,6 +20,7 @@
 obj-$(CONFIG_USB_FSL_USB2)	+= fsl_usb2_udc.o
 obj-$(CONFIG_USB_M66592)	+= m66592-udc.o
 obj-$(CONFIG_USB_FSL_QE)	+= fsl_qe_udc.o
+obj-$(CONFIG_USB_CI13XXX)	+= ci13xxx_udc.o
 
 #
 # USB gadget drivers
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
new file mode 100644
index 0000000..bebf911
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -0,0 +1,2830 @@
+/*
+ * ci13xxx_udc.c - MIPS USB IP core family device controller
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Description: MIPS USB IP core family device controller
+ *              Currently it only supports IP part number CI13412
+ *
+ * This driver is composed of several blocks:
+ * - HW:     hardware interface
+ * - DBG:    debug facilities (optional)
+ * - UTIL:   utilities
+ * - ISR:    interrupts handling
+ * - ENDPT:  endpoint operations (Gadget API)
+ * - GADGET: gadget operations (Gadget API)
+ * - BUS:    bus glue code, bus abstraction layer
+ * - PCI:    PCI core interface and PCI resources (interrupts, memory...)
+ *
+ * Compile Options
+ * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
+ * - STALL_IN:  non-empty bulk-in pipes cannot be halted
+ *              if defined mass storage compliance succeeds but with warnings
+ *              => case 4: Hi >  Dn
+ *              => case 5: Hi >  Di
+ *              => case 8: Hi <> Do
+ *              if undefined usbtest 13 fails
+ * - TRACE:     enable function tracing (depends on DEBUG)
+ *
+ * Main Features
+ * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
+ * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
+ * - Normal & LPM support
+ *
+ * USBTEST Report
+ * - OK: 0-12, 13 (STALL_IN defined) & 14
+ * - Not Supported: 15 & 16 (ISO)
+ *
+ * TODO List
+ * - OTG
+ * - Isochronous & Interrupt Traffic
+ * - Handle requests which spawns into several TDs
+ * - GET_STATUS(device) - always reports 0
+ * - Gadget API (majority of optional features)
+ * - Suspend & Remote Wakeup
+ */
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "ci13xxx_udc.h"
+
+
+/******************************************************************************
+ * DEFINE
+ *****************************************************************************/
+/* ctrl register bank access */
+static DEFINE_SPINLOCK(udc_lock);
+
+/* driver name */
+#define UDC_DRIVER_NAME   "ci13xxx_udc"
+
+/* control endpoint description */
+static const struct usb_endpoint_descriptor
+ctrl_endpt_desc = {
+	.bLength         = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+
+	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+/* UDC descriptor */
+static struct ci13xxx *_udc;
+
+/* Interrupt statistics */
+#define ISR_MASK   0x1F
+static struct {
+	u32 test;
+	u32 ui;
+	u32 uei;
+	u32 pci;
+	u32 uri;
+	u32 sli;
+	u32 none;
+	struct {
+		u32 cnt;
+		u32 buf[ISR_MASK+1];
+		u32 idx;
+	} hndl;
+} isr_statistics;
+
+/**
+ * ffs_nr: find first (least significant) bit set
+ * @x: the word to search
+ *
+ * This function returns bit number (instead of position)
+ */
+static int ffs_nr(u32 x)
+{
+	int n = ffs(x);
+
+	return n ? n-1 : 32;
+}
+
+/******************************************************************************
+ * HW block
+ *****************************************************************************/
+/* register bank descriptor */
+static struct {
+	unsigned      lpm;    /* is LPM? */
+	void __iomem *abs;    /* bus map offset */
+	void __iomem *cap;    /* bus map offset + CAP offset + CAP data */
+	size_t        size;   /* bank size */
+} hw_bank;
+
+/* UDC register map */
+#define ABS_CAPLENGTH       (0x100UL)
+#define ABS_HCCPARAMS       (0x108UL)
+#define ABS_DCCPARAMS       (0x124UL)
+#define ABS_TESTMODE        (hw_bank.lpm ? 0x0FCUL : 0x138UL)
+/* offset to CAPLENTGH (addr + data) */
+#define CAP_USBCMD          (0x000UL)
+#define CAP_USBSTS          (0x004UL)
+#define CAP_USBINTR         (0x008UL)
+#define CAP_DEVICEADDR      (0x014UL)
+#define CAP_ENDPTLISTADDR   (0x018UL)
+#define CAP_PORTSC          (0x044UL)
+#define CAP_DEVLC           (0x0B4UL)
+#define CAP_USBMODE         (hw_bank.lpm ? 0x0C8UL : 0x068UL)
+#define CAP_ENDPTSETUPSTAT  (hw_bank.lpm ? 0x0D8UL : 0x06CUL)
+#define CAP_ENDPTPRIME      (hw_bank.lpm ? 0x0DCUL : 0x070UL)
+#define CAP_ENDPTFLUSH      (hw_bank.lpm ? 0x0E0UL : 0x074UL)
+#define CAP_ENDPTSTAT       (hw_bank.lpm ? 0x0E4UL : 0x078UL)
+#define CAP_ENDPTCOMPLETE   (hw_bank.lpm ? 0x0E8UL : 0x07CUL)
+#define CAP_ENDPTCTRL       (hw_bank.lpm ? 0x0ECUL : 0x080UL)
+#define CAP_LAST            (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
+
+/* maximum number of enpoints: valid only after hw_device_reset() */
+static unsigned hw_ep_max;
+
+/**
+ * hw_ep_bit: calculates the bit number
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns bit number
+ */
+static inline int hw_ep_bit(int num, int dir)
+{
+	return num + (dir ? 16 : 0);
+}
+
+/**
+ * hw_aread: reads from register bitfield
+ * @addr: address relative to bus map
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_aread(u32 addr, u32 mask)
+{
+	return ioread32(addr + hw_bank.abs) & mask;
+}
+
+/**
+ * hw_awrite: writes to register bitfield
+ * @addr: address relative to bus map
+ * @mask: bitfield mask
+ * @data: new data
+ */
+static void hw_awrite(u32 addr, u32 mask, u32 data)
+{
+	iowrite32(hw_aread(addr, ~mask) | (data & mask),
+		  addr + hw_bank.abs);
+}
+
+/**
+ * hw_cread: reads from register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_cread(u32 addr, u32 mask)
+{
+	return ioread32(addr + hw_bank.cap) & mask;
+}
+
+/**
+ * hw_cwrite: writes to register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ * @data: new data
+ */
+static void hw_cwrite(u32 addr, u32 mask, u32 data)
+{
+	iowrite32(hw_cread(addr, ~mask) | (data & mask),
+		  addr + hw_bank.cap);
+}
+
+/**
+ * hw_ctest_and_clear: tests & clears register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_ctest_and_clear(u32 addr, u32 mask)
+{
+	u32 reg = hw_cread(addr, mask);
+
+	iowrite32(reg, addr + hw_bank.cap);
+	return reg;
+}
+
+/**
+ * hw_ctest_and_write: tests & writes register bitfield
+ * @addr: address relative to CAP offset plus content
+ * @mask: bitfield mask
+ * @data: new data
+ *
+ * This function returns register bitfield data
+ */
+static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data)
+{
+	u32 reg = hw_cread(addr, ~0);
+
+	iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap);
+	return (reg & mask) >> ffs_nr(mask);
+}
+
+/**
+ * hw_device_reset: resets chip (execute without interruption)
+ * @base: register base address
+ *
+ * This function returns an error code
+ */
+static int hw_device_reset(void __iomem *base)
+{
+	u32 reg;
+
+	/* bank is a module variable */
+	hw_bank.abs = base;
+
+	hw_bank.cap = hw_bank.abs;
+	hw_bank.cap += ABS_CAPLENGTH;
+	hw_bank.cap += ioread8(hw_bank.cap);
+
+	reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN);
+	hw_bank.lpm  = reg;
+	hw_bank.size = hw_bank.cap - hw_bank.abs;
+	hw_bank.size += CAP_LAST;
+	hw_bank.size /= sizeof(u32);
+
+	/* should flush & stop before reset */
+	hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
+	hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
+
+	hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
+	while (hw_cread(CAP_USBCMD, USBCMD_RST))
+		udelay(10);             /* not RTOS friendly */
+
+	/* USBMODE should be configured step by step */
+	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
+	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
+	hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);  /* HW >= 2.3 */
+
+	if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
+		pr_err("cannot enter in device mode");
+		pr_err("lpm = %i", hw_bank.lpm);
+		return -ENODEV;
+	}
+
+	reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
+	if (reg == 0 || reg > ENDPT_MAX)
+		return -ENODEV;
+
+	hw_ep_max = reg;   /* cache hw ENDPT_MAX */
+
+	/* setup lock mode ? */
+
+	/* ENDPTSETUPSTAT is '0' by default */
+
+	/* HCSPARAMS.bf.ppc SHOULD BE zero for device */
+
+	return 0;
+}
+
+/**
+ * hw_device_state: enables/disables interrupts & starts/stops device (execute
+ *                  without interruption)
+ * @dma: 0 => disable, !0 => enable and set dma engine
+ *
+ * This function returns an error code
+ */
+static int hw_device_state(u32 dma)
+{
+	if (dma) {
+		hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma);
+		/* interrupt, error, port change, reset, sleep/suspend */
+		hw_cwrite(CAP_USBINTR, ~0,
+			     USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
+		hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS);
+	} else {
+		hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
+		hw_cwrite(CAP_USBINTR, ~0, 0);
+	}
+	return 0;
+}
+
+/**
+ * hw_ep_flush: flush endpoint fifo (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_flush(int num, int dir)
+{
+	int n = hw_ep_bit(num, dir);
+
+	do {
+		/* flush any pending transfer */
+		hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n));
+		while (hw_cread(CAP_ENDPTFLUSH, BIT(n)))
+			cpu_relax();
+	} while (hw_cread(CAP_ENDPTSTAT, BIT(n)));
+
+	return 0;
+}
+
+/**
+ * hw_ep_disable: disables endpoint (execute without interruption)
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns an error code
+ */
+static int hw_ep_disable(int num, int dir)
+{
+	hw_ep_flush(num, dir);
+	hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32),
+		  dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
+	return 0;
+}
+
+/**
+ * hw_ep_enable: enables endpoint (execute without interruption)
+ * @num:  endpoint number
+ * @dir:  endpoint direction
+ * @type: endpoint type
+ *
+ * This function returns an error code
+ */
+static int hw_ep_enable(int num, int dir, int type)
+{
+	u32 mask, data;
+
+	if (dir) {
+		mask  = ENDPTCTRL_TXT;  /* type    */
+		data  = type << ffs_nr(mask);
+
+		mask |= ENDPTCTRL_TXS;  /* unstall */
+		mask |= ENDPTCTRL_TXR;  /* reset data toggle */
+		data |= ENDPTCTRL_TXR;
+		mask |= ENDPTCTRL_TXE;  /* enable  */
+		data |= ENDPTCTRL_TXE;
+	} else {
+		mask  = ENDPTCTRL_RXT;  /* type    */
+		data  = type << ffs_nr(mask);
+
+		mask |= ENDPTCTRL_RXS;  /* unstall */
+		mask |= ENDPTCTRL_RXR;  /* reset data toggle */
+		data |= ENDPTCTRL_RXR;
+		mask |= ENDPTCTRL_RXE;  /* enable  */
+		data |= ENDPTCTRL_RXE;
+	}
+	hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
+	return 0;
+}
+
+/**
+ * hw_ep_get_halt: return endpoint halt status
+ * @num: endpoint number
+ * @dir: endpoint direction
+ *
+ * This function returns 1 if endpoint halted
+ */
+static int hw_ep_get_halt(int num, int dir)
+{
+	u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+
+	return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0;
+}
+
+/**
+ * hw_ep_is_primed: test if endpoint is primed (execute without interruption)
+ * @num:   endpoint number
+ * @dir:   endpoint direction
+ *
+ * This function returns true if endpoint primed
+ */
+static int hw_ep_is_primed(int num, int dir)
+{
+	u32 reg = hw_cread(CAP_ENDPTPRIME, ~0) | hw_cread(CAP_ENDPTSTAT, ~0);
+
+	return test_bit(hw_ep_bit(num, dir), (void *)&reg);
+}
+
+/**
+ * hw_test_and_clear_setup_status: test & clear setup status (execute without
+ *                                 interruption)
+ * @n: bit number (endpoint)
+ *
+ * This function returns setup status
+ */
+static int hw_test_and_clear_setup_status(int n)
+{
+	return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
+}
+
+/**
+ * hw_ep_prime: primes endpoint (execute without interruption)
+ * @num:     endpoint number
+ * @dir:     endpoint direction
+ * @is_ctrl: true if control endpoint
+ *
+ * This function returns an error code
+ */
+static int hw_ep_prime(int num, int dir, int is_ctrl)
+{
+	int n = hw_ep_bit(num, dir);
+
+	/* the caller should flush first */
+	if (hw_ep_is_primed(num, dir))
+		return -EBUSY;
+
+	if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+		return -EAGAIN;
+
+	hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
+
+	while (hw_cread(CAP_ENDPTPRIME, BIT(n)))
+		cpu_relax();
+	if (is_ctrl && dir == RX  && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
+		return -EAGAIN;
+
+	/* status shoult be tested according with manual but it doesn't work */
+	return 0;
+}
+
+/**
+ * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
+ *                 without interruption)
+ * @num:   endpoint number
+ * @dir:   endpoint direction
+ * @value: true => stall, false => unstall
+ *
+ * This function returns an error code
+ */
+static int hw_ep_set_halt(int num, int dir, int value)
+{
+	if (value != 0 && value != 1)
+		return -EINVAL;
+
+	do {
+		u32 addr = CAP_ENDPTCTRL + num * sizeof(u32);
+		u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
+		u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
+
+		/* data toggle - reserved for EP0 but it's in ESS */
+		hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr);
+
+	} while (value != hw_ep_get_halt(num, dir));
+
+	return 0;
+}
+
+/**
+ * hw_intr_clear: disables interrupt & clears interrupt status (execute without
+ *                interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_clear(int n)
+{
+	if (n >= REG_BITS)
+		return -EINVAL;
+
+	hw_cwrite(CAP_USBINTR, BIT(n), 0);
+	hw_cwrite(CAP_USBSTS,  BIT(n), BIT(n));
+	return 0;
+}
+
+/**
+ * hw_intr_force: enables interrupt & forces interrupt status (execute without
+ *                interruption)
+ * @n: interrupt bit
+ *
+ * This function returns an error code
+ */
+static int hw_intr_force(int n)
+{
+	if (n >= REG_BITS)
+		return -EINVAL;
+
+	hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE);
+	hw_cwrite(CAP_USBINTR,  BIT(n), BIT(n));
+	hw_cwrite(CAP_USBSTS,   BIT(n), BIT(n));
+	hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0);
+	return 0;
+}
+
+/**
+ * hw_is_port_high_speed: test if port is high speed
+ *
+ * This function returns true if high speed port
+ */
+static int hw_port_is_high_speed(void)
+{
+	return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) :
+		hw_cread(CAP_PORTSC, PORTSC_HSP);
+}
+
+/**
+ * hw_port_test_get: reads port test mode value
+ *
+ * This function returns port test mode value
+ */
+static u8 hw_port_test_get(void)
+{
+	return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC);
+}
+
+/**
+ * hw_port_test_set: writes port test mode (execute without interruption)
+ * @mode: new value
+ *
+ * This function returns an error code
+ */
+static int hw_port_test_set(u8 mode)
+{
+	const u8 TEST_MODE_MAX = 7;
+
+	if (mode > TEST_MODE_MAX)
+		return -EINVAL;
+
+	hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC));
+	return 0;
+}
+
+/**
+ * hw_read_intr_enable: returns interrupt enable register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_enable(void)
+{
+	return hw_cread(CAP_USBINTR, ~0);
+}
+
+/**
+ * hw_read_intr_status: returns interrupt status register
+ *
+ * This function returns register data
+ */
+static u32 hw_read_intr_status(void)
+{
+	return hw_cread(CAP_USBSTS, ~0);
+}
+
+/**
+ * hw_register_read: reads all device registers (execute without interruption)
+ * @buf:  destination buffer
+ * @size: buffer size
+ *
+ * This function returns number of registers read
+ */
+static size_t hw_register_read(u32 *buf, size_t size)
+{
+	unsigned i;
+
+	if (size > hw_bank.size)
+		size = hw_bank.size;
+
+	for (i = 0; i < size; i++)
+		buf[i] = hw_aread(i * sizeof(u32), ~0);
+
+	return size;
+}
+
+/**
+ * hw_register_write: writes to register
+ * @addr: register address
+ * @data: register value
+ *
+ * This function returns an error code
+ */
+static int hw_register_write(u16 addr, u32 data)
+{
+	/* align */
+	addr /= sizeof(u32);
+
+	if (addr >= hw_bank.size)
+		return -EINVAL;
+
+	/* align */
+	addr *= sizeof(u32);
+
+	hw_awrite(addr, ~0, data);
+	return 0;
+}
+
+/**
+ * hw_test_and_clear_complete: test & clear complete status (execute without
+ *                             interruption)
+ * @n: bit number (endpoint)
+ *
+ * This function returns complete status
+ */
+static int hw_test_and_clear_complete(int n)
+{
+	return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
+}
+
+/**
+ * hw_test_and_clear_intr_active: test & clear active interrupts (execute
+ *                                without interruption)
+ *
+ * This function returns active interrutps
+ */
+static u32 hw_test_and_clear_intr_active(void)
+{
+	u32 reg = hw_read_intr_status() & hw_read_intr_enable();
+
+	hw_cwrite(CAP_USBSTS, ~0, reg);
+	return reg;
+}
+
+/**
+ * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
+ *                                interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_clear_setup_guard(void)
+{
+	return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0);
+}
+
+/**
+ * hw_test_and_set_setup_guard: test & set setup guard (execute without
+ *                              interruption)
+ *
+ * This function returns guard value
+ */
+static int hw_test_and_set_setup_guard(void)
+{
+	return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
+}
+
+/**
+ * hw_usb_set_address: configures USB address (execute without interruption)
+ * @value: new USB address
+ *
+ * This function returns an error code
+ */
+static int hw_usb_set_address(u8 value)
+{
+	/* advance */
+	hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA,
+		  value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA);
+	return 0;
+}
+
+/**
+ * hw_usb_reset: restart device after a bus reset (execute without
+ *               interruption)
+ *
+ * This function returns an error code
+ */
+static int hw_usb_reset(void)
+{
+	hw_usb_set_address(0);
+
+	/* ESS flushes only at end?!? */
+	hw_cwrite(CAP_ENDPTFLUSH,    ~0, ~0);   /* flush all EPs */
+
+	/* clear setup token semaphores */
+	hw_cwrite(CAP_ENDPTSETUPSTAT, 0,  0);   /* writes its content */
+
+	/* clear complete status */
+	hw_cwrite(CAP_ENDPTCOMPLETE,  0,  0);   /* writes its content */
+
+	/* wait until all bits cleared */
+	while (hw_cread(CAP_ENDPTPRIME, ~0))
+		udelay(10);             /* not RTOS friendly */
+
+	/* reset all endpoints ? */
+
+	/* reset internal status and wait for further instructions
+	   no need to verify the port reset status (ESS does it) */
+
+	return 0;
+}
+
+/******************************************************************************
+ * DBG block
+ *****************************************************************************/
+/**
+ * show_device: prints information about device capabilities and status
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_device(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	struct usb_gadget *gadget = &udc->gadget;
+	int n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	n += scnprintf(buf + n, PAGE_SIZE - n, "speed             = %d\n",
+		       gadget->speed);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed      = %d\n",
+		       gadget->is_dualspeed);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg            = %d\n",
+		       gadget->is_otg);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral   = %d\n",
+		       gadget->is_a_peripheral);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable      = %d\n",
+		       gadget->b_hnp_enable);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support     = %d\n",
+		       gadget->a_hnp_support);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n",
+		       gadget->a_alt_hnp_support);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "name              = %s\n",
+		       (gadget->name ? gadget->name : ""));
+
+	return n;
+}
+static DEVICE_ATTR(device, S_IRUSR, show_device, NULL);
+
+/**
+ * show_driver: prints information about attached gadget (if any)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	struct usb_gadget_driver *driver = udc->driver;
+	int n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	if (driver == NULL)
+		return scnprintf(buf, PAGE_SIZE,
+				 "There is no gadget attached!\n");
+
+	n += scnprintf(buf + n, PAGE_SIZE - n, "function  = %s\n",
+		       (driver->function ? driver->function : ""));
+	n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
+		       driver->speed);
+
+	return n;
+}
+static DEVICE_ATTR(driver, S_IRUSR, show_driver, NULL);
+
+/* Maximum event message length */
+#define DBG_DATA_MSG   64UL
+
+/* Maximum event messages */
+#define DBG_DATA_MAX   128UL
+
+/* Event buffer descriptor */
+static struct {
+	char     (buf[DBG_DATA_MAX])[DBG_DATA_MSG];   /* buffer */
+	unsigned idx;   /* index */
+	unsigned tty;   /* print to console? */
+	rwlock_t lck;   /* lock */
+} dbg_data = {
+	.idx = 0,
+	.tty = 0,
+	.lck = __RW_LOCK_UNLOCKED(lck)
+};
+
+/**
+ * dbg_dec: decrements debug event index
+ * @idx: buffer index
+ */
+static void dbg_dec(unsigned *idx)
+{
+	*idx = (*idx - 1) & (DBG_DATA_MAX-1);
+}
+
+/**
+ * dbg_inc: increments debug event index
+ * @idx: buffer index
+ */
+static void dbg_inc(unsigned *idx)
+{
+	*idx = (*idx + 1) & (DBG_DATA_MAX-1);
+}
+
+/**
+ * dbg_print:  prints the common part of the event
+ * @addr:   endpoint address
+ * @name:   event name
+ * @status: status
+ * @extra:  extra information
+ */
+static void dbg_print(u8 addr, const char *name, int status, const char *extra)
+{
+	struct timeval tval;
+	unsigned int stamp;
+	unsigned long flags;
+
+	write_lock_irqsave(&dbg_data.lck, flags);
+
+	do_gettimeofday(&tval);
+	stamp = tval.tv_sec & 0xFFFF;	/* 2^32 = 4294967296. Limit to 4096s */
+	stamp = stamp * 1000000 + tval.tv_usec;
+
+	scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
+		  "%04X\t» %02X %-7.7s %4i «\t%s\n",
+		  stamp, addr, name, status, extra);
+
+	dbg_inc(&dbg_data.idx);
+
+	write_unlock_irqrestore(&dbg_data.lck, flags);
+
+	if (dbg_data.tty != 0)
+		pr_notice("%04X\t» %02X %-7.7s %4i «\t%s\n",
+			  stamp, addr, name, status, extra);
+}
+
+/**
+ * dbg_done: prints a DONE event
+ * @addr:   endpoint address
+ * @td:     transfer descriptor
+ * @status: status
+ */
+static void dbg_done(u8 addr, const u32 token, int status)
+{
+	char msg[DBG_DATA_MSG];
+
+	scnprintf(msg, sizeof(msg), "%d %02X",
+		  (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES),
+		  (int)(token & TD_STATUS)      >> ffs_nr(TD_STATUS));
+	dbg_print(addr, "DONE", status, msg);
+}
+
+/**
+ * dbg_event: prints a generic event
+ * @addr:   endpoint address
+ * @name:   event name
+ * @status: status
+ */
+static void dbg_event(u8 addr, const char *name, int status)
+{
+	if (name != NULL)
+		dbg_print(addr, name, status, "");
+}
+
+/*
+ * dbg_queue: prints a QUEUE event
+ * @addr:   endpoint address
+ * @req:    USB request
+ * @status: status
+ */
+static void dbg_queue(u8 addr, const struct usb_request *req, int status)
+{
+	char msg[DBG_DATA_MSG];
+
+	if (req != NULL) {
+		scnprintf(msg, sizeof(msg),
+			  "%d %d", !req->no_interrupt, req->length);
+		dbg_print(addr, "QUEUE", status, msg);
+	}
+}
+
+/**
+ * dbg_setup: prints a SETUP event
+ * @addr: endpoint address
+ * @req:  setup request
+ */
+static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req)
+{
+	char msg[DBG_DATA_MSG];
+
+	if (req != NULL) {
+		scnprintf(msg, sizeof(msg),
+			  "%02X %02X %04X %04X %d", req->bRequestType,
+			  req->bRequest, le16_to_cpu(req->wValue),
+			  le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
+		dbg_print(addr, "SETUP", 0, msg);
+	}
+}
+
+/**
+ * show_events: displays the event buffer
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_events(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	unsigned long flags;
+	unsigned i, j, n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	read_lock_irqsave(&dbg_data.lck, flags);
+
+	i = dbg_data.idx;
+	for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) {
+		n += strlen(dbg_data.buf[i]);
+		if (n >= PAGE_SIZE) {
+			n -= strlen(dbg_data.buf[i]);
+			break;
+		}
+	}
+	for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i))
+		j += scnprintf(buf + j, PAGE_SIZE - j,
+			       "%s", dbg_data.buf[i]);
+
+	read_unlock_irqrestore(&dbg_data.lck, flags);
+
+	return n;
+}
+
+/**
+ * store_events: configure if events are going to be also printed to console
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_events(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	unsigned tty;
+
+	dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		goto done;
+	}
+
+	if (sscanf(buf, "%u", &tty) != 1 || tty > 1) {
+		dev_err(dev, "<1|0>: enable|disable console log\n");
+		goto done;
+	}
+
+	dbg_data.tty = tty;
+	dev_info(dev, "tty = %u", dbg_data.tty);
+
+ done:
+	return count;
+}
+static DEVICE_ATTR(events, S_IRUSR | S_IWUSR, show_events, store_events);
+
+/**
+ * show_inters: interrupt status, enable status and historic
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_inters(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	u32 intr;
+	unsigned i, j, n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+
+	n += scnprintf(buf + n, PAGE_SIZE - n,
+		       "status = %08x\n", hw_read_intr_status());
+	n += scnprintf(buf + n, PAGE_SIZE - n,
+		       "enable = %08x\n", hw_read_intr_enable());
+
+	n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n",
+		       isr_statistics.test);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "» ui  = %d\n",
+		       isr_statistics.ui);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "» uei = %d\n",
+		       isr_statistics.uei);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "» pci = %d\n",
+		       isr_statistics.pci);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "» uri = %d\n",
+		       isr_statistics.uri);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "» sli = %d\n",
+		       isr_statistics.sli);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n",
+		       isr_statistics.none);
+	n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n",
+		       isr_statistics.hndl.cnt);
+
+	for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) {
+		i   &= ISR_MASK;
+		intr = isr_statistics.hndl.buf[i];
+
+		if (USBi_UI  & intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "ui  ");
+		intr &= ~USBi_UI;
+		if (USBi_UEI & intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "uei ");
+		intr &= ~USBi_UEI;
+		if (USBi_PCI & intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "pci ");
+		intr &= ~USBi_PCI;
+		if (USBi_URI & intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "uri ");
+		intr &= ~USBi_URI;
+		if (USBi_SLI & intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "sli ");
+		intr &= ~USBi_SLI;
+		if (intr)
+			n += scnprintf(buf + n, PAGE_SIZE - n, "??? ");
+		if (isr_statistics.hndl.buf[i])
+			n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
+	}
+
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	return n;
+}
+
+/**
+ * store_inters: enable & force or disable an individual interrutps
+ *                   (to be used for test purposes only)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_inters(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	unsigned en, bit;
+
+	dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		goto done;
+	}
+
+	if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) {
+		dev_err(dev, "<1|0> <bit>: enable|disable interrupt");
+		goto done;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	if (en) {
+		if (hw_intr_force(bit))
+			dev_err(dev, "invalid bit number\n");
+		else
+			isr_statistics.test++;
+	} else {
+		if (hw_intr_clear(bit))
+			dev_err(dev, "invalid bit number\n");
+	}
+	spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+	return count;
+}
+static DEVICE_ATTR(inters, S_IRUSR | S_IWUSR, show_inters, store_inters);
+
+/**
+ * show_port_test: reads port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_port_test(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	unsigned mode;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	mode = hw_port_test_get();
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode);
+}
+
+/**
+ * store_port_test: writes port test mode
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_port_test(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	unsigned mode;
+
+	dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		goto done;
+	}
+
+	if (sscanf(buf, "%u", &mode) != 1) {
+		dev_err(dev, "<mode>: set port test mode");
+		goto done;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	if (hw_port_test_set(mode))
+		dev_err(dev, "invalid mode\n");
+	spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+	return count;
+}
+static DEVICE_ATTR(port_test, S_IRUSR | S_IWUSR,
+		   show_port_test, store_port_test);
+
+/**
+ * show_qheads: DMA contents of all queue heads
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	unsigned i, j, n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	for (i = 0; i < hw_ep_max; i++) {
+		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "EP=%02i: RX=%08X TX=%08X\n",
+			       i, (u32)mEp->qh[RX].dma, (u32)mEp->qh[TX].dma);
+		for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
+			n += scnprintf(buf + n, PAGE_SIZE - n,
+				       " %04X:    %08X    %08X\n", j,
+				       *((u32 *)mEp->qh[RX].ptr + j),
+				       *((u32 *)mEp->qh[TX].ptr + j));
+		}
+	}
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	return n;
+}
+static DEVICE_ATTR(qheads, S_IRUSR, show_qheads, NULL);
+
+/**
+ * show_registers: dumps all registers
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_registers(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	u32 dump[512];
+	unsigned i, k, n = 0;
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	k = hw_register_read(dump, sizeof(dump)/sizeof(u32));
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	for (i = 0; i < k; i++) {
+		n += scnprintf(buf + n, PAGE_SIZE - n,
+			       "reg[0x%04X] = 0x%08X\n",
+			       i * (unsigned)sizeof(u32), dump[i]);
+	}
+
+	return n;
+}
+
+/**
+ * store_registers: writes value to register address
+ *
+ * Check "device.h" for details
+ */
+static ssize_t store_registers(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long addr, data, flags;
+
+	dbg_trace("[%s] %p, %d\n", __func__, buf, count);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		goto done;
+	}
+
+	if (sscanf(buf, "%li %li", &addr, &data) != 2) {
+		dev_err(dev, "<addr> <data>: write data to register address");
+		goto done;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	if (hw_register_write(addr, data))
+		dev_err(dev, "invalid address range\n");
+	spin_unlock_irqrestore(udc->lock, flags);
+
+ done:
+	return count;
+}
+static DEVICE_ATTR(registers, S_IRUSR | S_IWUSR,
+		   show_registers, store_registers);
+
+/**
+ * show_requests: DMA contents of all requests currently queued (all endpts)
+ *
+ * Check "device.h" for details
+ */
+static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+	unsigned long flags;
+	struct list_head   *ptr = NULL;
+	struct ci13xxx_req *req = NULL;
+	unsigned i, j, k, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
+
+	dbg_trace("[%s] %p\n", __func__, buf);
+	if (attr == NULL || buf == NULL) {
+		dev_err(dev, "[%s] EINVAL\n", __func__);
+		return 0;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+	for (i = 0; i < hw_ep_max; i++)
+		for (k = RX; k <= TX; k++)
+			list_for_each(ptr, &udc->ci13xxx_ep[i].qh[k].queue)
+			{
+				req = list_entry(ptr,
+						 struct ci13xxx_req, queue);
+
+				n += scnprintf(buf + n, PAGE_SIZE - n,
+					       "EP=%02i: TD=%08X %s\n",
+					       i, (u32)req->dma,
+					       ((k == RX) ? "RX" : "TX"));
+
+				for (j = 0; j < qSize; j++)
+					n += scnprintf(buf + n, PAGE_SIZE - n,
+						       " %04X:    %08X\n", j,
+						       *((u32 *)req->ptr + j));
+			}
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	return n;
+}
+static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL);
+
+/**
+ * dbg_create_files: initializes the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+__maybe_unused static int dbg_create_files(struct device *dev)
+{
+	int retval = 0;
+
+	if (dev == NULL)
+		return -EINVAL;
+	retval = device_create_file(dev, &dev_attr_device);
+	if (retval)
+		goto done;
+	retval = device_create_file(dev, &dev_attr_driver);
+	if (retval)
+		goto rm_device;
+	retval = device_create_file(dev, &dev_attr_events);
+	if (retval)
+		goto rm_driver;
+	retval = device_create_file(dev, &dev_attr_inters);
+	if (retval)
+		goto rm_events;
+	retval = device_create_file(dev, &dev_attr_port_test);
+	if (retval)
+		goto rm_inters;
+	retval = device_create_file(dev, &dev_attr_qheads);
+	if (retval)
+		goto rm_port_test;
+	retval = device_create_file(dev, &dev_attr_registers);
+	if (retval)
+		goto rm_qheads;
+	retval = device_create_file(dev, &dev_attr_requests);
+	if (retval)
+		goto rm_registers;
+	return 0;
+
+ rm_registers:
+	device_remove_file(dev, &dev_attr_registers);
+ rm_qheads:
+	device_remove_file(dev, &dev_attr_qheads);
+ rm_port_test:
+	device_remove_file(dev, &dev_attr_port_test);
+ rm_inters:
+	device_remove_file(dev, &dev_attr_inters);
+ rm_events:
+	device_remove_file(dev, &dev_attr_events);
+ rm_driver:
+	device_remove_file(dev, &dev_attr_driver);
+ rm_device:
+	device_remove_file(dev, &dev_attr_device);
+ done:
+	return retval;
+}
+
+/**
+ * dbg_remove_files: destroys the attribute interface
+ * @dev: device
+ *
+ * This function returns an error code
+ */
+__maybe_unused static int dbg_remove_files(struct device *dev)
+{
+	if (dev == NULL)
+		return -EINVAL;
+	device_remove_file(dev, &dev_attr_requests);
+	device_remove_file(dev, &dev_attr_registers);
+	device_remove_file(dev, &dev_attr_qheads);
+	device_remove_file(dev, &dev_attr_port_test);
+	device_remove_file(dev, &dev_attr_inters);
+	device_remove_file(dev, &dev_attr_events);
+	device_remove_file(dev, &dev_attr_driver);
+	device_remove_file(dev, &dev_attr_device);
+	return 0;
+}
+
+/******************************************************************************
+ * UTIL block
+ *****************************************************************************/
+/**
+ * _usb_addr: calculates endpoint address from direction & number
+ * @ep:  endpoint
+ */
+static inline u8 _usb_addr(struct ci13xxx_ep *ep)
+{
+	return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
+}
+
+/**
+ * _hardware_queue: configures a request at hardware level
+ * @gadget: gadget
+ * @mEp:    endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+	unsigned i;
+
+	trace("%p, %p", mEp, mReq);
+
+	/* don't queue twice */
+	if (mReq->req.status == -EALREADY)
+		return -EALREADY;
+
+	if (hw_ep_is_primed(mEp->num, mEp->dir))
+		return -EBUSY;
+
+	mReq->req.status = -EALREADY;
+
+	if (mReq->req.length && !mReq->req.dma) {
+		mReq->req.dma = \
+			dma_map_single(mEp->device, mReq->req.buf,
+				       mReq->req.length, mEp->dir ?
+				       DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		if (mReq->req.dma == 0)
+			return -ENOMEM;
+
+		mReq->map = 1;
+	}
+
+	/*
+	 * TD configuration
+	 * TODO - handle requests which spawns into several TDs
+	 */
+	memset(mReq->ptr, 0, sizeof(*mReq->ptr));
+	mReq->ptr->next    |= TD_TERMINATE;
+	mReq->ptr->token    = mReq->req.length << ffs_nr(TD_TOTAL_BYTES);
+	mReq->ptr->token   &= TD_TOTAL_BYTES;
+	mReq->ptr->token   |= TD_IOC;
+	mReq->ptr->token   |= TD_STATUS_ACTIVE;
+	mReq->ptr->page[0]  = mReq->req.dma;
+	for (i = 1; i < 5; i++)
+		mReq->ptr->page[i] =
+			(mReq->req.dma + i * PAGE_SIZE) & ~TD_RESERVED_MASK;
+
+	/*
+	 *  QH configuration
+	 *  At this point it's guaranteed exclusive access to qhead
+	 *  (endpt is not primed) so it's no need to use tripwire
+	 */
+	mEp->qh[mEp->dir].ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
+	mEp->qh[mEp->dir].ptr->td.token &= ~TD_STATUS;   /* clear status */
+	if (mReq->req.zero == 0)
+		mEp->qh[mEp->dir].ptr->cap |=  QH_ZLT;
+	else
+		mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
+
+	wmb();   /* synchronize before ep prime */
+
+	return hw_ep_prime(mEp->num, mEp->dir,
+			   mEp->type == USB_ENDPOINT_XFER_CONTROL);
+}
+
+/**
+ * _hardware_dequeue: handles a request at hardware level
+ * @gadget: gadget
+ * @mEp:    endpoint
+ *
+ * This function returns an error code
+ */
+static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
+{
+	trace("%p, %p", mEp, mReq);
+
+	if (mReq->req.status != -EALREADY)
+		return -EINVAL;
+
+	if (hw_ep_is_primed(mEp->num, mEp->dir))
+		hw_ep_flush(mEp->num, mEp->dir);
+
+	mReq->req.status = 0;
+
+	if (mReq->map) {
+		dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
+				 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		mReq->req.dma = 0;
+		mReq->map     = 0;
+	}
+
+	mReq->req.status = mReq->ptr->token & TD_STATUS;
+	if      ((TD_STATUS_ACTIVE & mReq->req.status) != 0)
+		mReq->req.status = -ECONNRESET;
+	else if ((TD_STATUS_HALTED & mReq->req.status) != 0)
+		mReq->req.status = -1;
+	else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
+		mReq->req.status = -1;
+	else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
+		mReq->req.status = -1;
+
+	mReq->req.actual   = mReq->ptr->token & TD_TOTAL_BYTES;
+	mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
+	mReq->req.actual   = mReq->req.length - mReq->req.actual;
+	mReq->req.actual   = mReq->req.status ? 0 : mReq->req.actual;
+
+	return mReq->req.actual;
+}
+
+/**
+ * _ep_nuke: dequeues all endpoint requests
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int _ep_nuke(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+	trace("%p", mEp);
+
+	if (mEp == NULL)
+		return -EINVAL;
+
+	hw_ep_flush(mEp->num, mEp->dir);
+
+	while (!list_empty(&mEp->qh[mEp->dir].queue)) {
+
+		/* pop oldest request */
+		struct ci13xxx_req *mReq = \
+			list_entry(mEp->qh[mEp->dir].queue.next,
+				   struct ci13xxx_req, queue);
+		list_del_init(&mReq->queue);
+		mReq->req.status = -ESHUTDOWN;
+
+		if (!mReq->req.no_interrupt && mReq->req.complete != NULL) {
+			spin_unlock(mEp->lock);
+			mReq->req.complete(&mEp->ep, &mReq->req);
+			spin_lock(mEp->lock);
+		}
+	}
+	return 0;
+}
+
+/**
+ * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
+ * @gadget: gadget
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int _gadget_stop_activity(struct usb_gadget *gadget)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+	struct usb_ep *ep;
+	struct ci13xxx    *udc = container_of(gadget, struct ci13xxx, gadget);
+	struct ci13xxx_ep *mEp = container_of(gadget->ep0,
+					      struct ci13xxx_ep, ep);
+
+	trace("%p", gadget);
+
+	if (gadget == NULL)
+		return -EINVAL;
+
+	spin_unlock(udc->lock);
+
+	/* flush all endpoints */
+	gadget_for_each_ep(ep, gadget) {
+		usb_ep_fifo_flush(ep);
+	}
+	usb_ep_fifo_flush(gadget->ep0);
+
+	udc->driver->disconnect(gadget);
+
+	/* make sure to disable all endpoints */
+	gadget_for_each_ep(ep, gadget) {
+		usb_ep_disable(ep);
+	}
+	usb_ep_disable(gadget->ep0);
+
+	if (mEp->status != NULL) {
+		usb_ep_free_request(gadget->ep0, mEp->status);
+		mEp->status = NULL;
+	}
+
+	spin_lock(udc->lock);
+
+	return 0;
+}
+
+/******************************************************************************
+ * ISR block
+ *****************************************************************************/
+/**
+ * isr_reset_handler: USB reset interrupt handler
+ * @udc: UDC device
+ *
+ * This function resets USB engine after a bus reset occurred
+ */
+static void isr_reset_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+	struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[0];
+	int retval;
+
+	trace("%p", udc);
+
+	if (udc == NULL) {
+		err("EINVAL");
+		return;
+	}
+
+	dbg_event(0xFF, "BUS RST", 0);
+
+	retval = _gadget_stop_activity(&udc->gadget);
+	if (retval)
+		goto done;
+
+	retval = hw_usb_reset();
+	if (retval)
+		goto done;
+
+	spin_unlock(udc->lock);
+	retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc);
+	if (!retval) {
+		mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_KERNEL);
+		if (mEp->status == NULL) {
+			usb_ep_disable(&mEp->ep);
+			retval = -ENOMEM;
+		}
+	}
+	spin_lock(udc->lock);
+
+ done:
+	if (retval)
+		err("error: %i", retval);
+}
+
+/**
+ * isr_get_status_complete: get_status request complete function
+ * @ep:  endpoint
+ * @req: request handled
+ *
+ * Caller must release lock
+ */
+static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	trace("%p, %p", ep, req);
+
+	if (ep == NULL || req == NULL) {
+		err("EINVAL");
+		return;
+	}
+
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+/**
+ * isr_get_status_response: get_status request response
+ * @ep:    endpoint
+ * @setup: setup request packet
+ *
+ * This function returns an error code
+ */
+static int isr_get_status_response(struct ci13xxx_ep *mEp,
+				   struct usb_ctrlrequest *setup)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+	struct usb_request *req = NULL;
+	gfp_t gfp_flags = GFP_ATOMIC;
+	int dir, num, retval;
+
+	trace("%p, %p", mEp, setup);
+
+	if (mEp == NULL || setup == NULL)
+		return -EINVAL;
+
+	spin_unlock(mEp->lock);
+	req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
+	spin_lock(mEp->lock);
+	if (req == NULL)
+		return -ENOMEM;
+
+	req->complete = isr_get_status_complete;
+	req->length   = 2;
+	req->buf      = kzalloc(req->length, gfp_flags);
+	if (req->buf == NULL) {
+		retval = -ENOMEM;
+		goto err_free_req;
+	}
+
+	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+		/* TODO: D1 - Remote Wakeup; D0 - Self Powered */
+		retval = 0;
+	} else if ((setup->bRequestType & USB_RECIP_MASK) \
+		   == USB_RECIP_ENDPOINT) {
+		dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
+			TX : RX;
+		num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
+		*((u16 *)req->buf) = hw_ep_get_halt(num, dir);
+	}
+	/* else do nothing; reserved for future use */
+
+	spin_unlock(mEp->lock);
+	retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
+	spin_lock(mEp->lock);
+	if (retval)
+		goto err_free_buf;
+
+	return 0;
+
+ err_free_buf:
+	kfree(req->buf);
+ err_free_req:
+	spin_unlock(mEp->lock);
+	usb_ep_free_request(&mEp->ep, req);
+	spin_lock(mEp->lock);
+	return retval;
+}
+
+/**
+ * isr_setup_status_phase: queues the status phase of a setup transation
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ */
+static int isr_setup_status_phase(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+	int retval;
+
+	trace("%p", mEp);
+
+	/* mEp is always valid & configured */
+
+	if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+		mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+	mEp->status->no_interrupt = 1;
+
+	spin_unlock(mEp->lock);
+	retval = usb_ep_queue(&mEp->ep, mEp->status, GFP_ATOMIC);
+	spin_lock(mEp->lock);
+
+	return retval;
+}
+
+/**
+ * isr_tr_complete_low: transaction complete low level handler
+ * @mEp: endpoint
+ *
+ * This function returns an error code
+ * Caller must hold lock
+ */
+static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
+__releases(mEp->lock)
+__acquires(mEp->lock)
+{
+	struct ci13xxx_req *mReq;
+	int retval;
+
+	trace("%p", mEp);
+
+	if (list_empty(&mEp->qh[mEp->dir].queue))
+		return -EINVAL;
+
+	/* pop oldest request */
+	mReq = list_entry(mEp->qh[mEp->dir].queue.next,
+			  struct ci13xxx_req, queue);
+	list_del_init(&mReq->queue);
+
+	retval = _hardware_dequeue(mEp, mReq);
+	if (retval < 0) {
+		dbg_event(_usb_addr(mEp), "DONE", retval);
+		goto done;
+	}
+
+	dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
+
+	if (!mReq->req.no_interrupt && mReq->req.complete != NULL) {
+		spin_unlock(mEp->lock);
+		mReq->req.complete(&mEp->ep, &mReq->req);
+		spin_lock(mEp->lock);
+	}
+
+	if (!list_empty(&mEp->qh[mEp->dir].queue)) {
+		mReq = list_entry(mEp->qh[mEp->dir].queue.next,
+				  struct ci13xxx_req, queue);
+		_hardware_enqueue(mEp, mReq);
+	}
+
+ done:
+	return retval;
+}
+
+/**
+ * isr_tr_complete_handler: transaction complete interrupt handler
+ * @udc: UDC descriptor
+ *
+ * This function handles traffic events
+ */
+static void isr_tr_complete_handler(struct ci13xxx *udc)
+__releases(udc->lock)
+__acquires(udc->lock)
+{
+	unsigned i;
+
+	trace("%p", udc);
+
+	if (udc == NULL) {
+		err("EINVAL");
+		return;
+	}
+
+	for (i = 0; i < hw_ep_max; i++) {
+		struct ci13xxx_ep *mEp  = &udc->ci13xxx_ep[i];
+		int type, num, err = -EINVAL;
+		struct usb_ctrlrequest req;
+
+
+		if (mEp->desc == NULL)
+			continue;   /* not configured */
+
+		if ((mEp->dir == RX && hw_test_and_clear_complete(i)) ||
+		    (mEp->dir == TX && hw_test_and_clear_complete(i + 16))) {
+			err = isr_tr_complete_low(mEp);
+			if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
+				if (err > 0)   /* needs status phase */
+					err = isr_setup_status_phase(mEp);
+				if (err < 0) {
+					dbg_event(_usb_addr(mEp),
+						  "ERROR", err);
+					spin_unlock(udc->lock);
+					if (usb_ep_set_halt(&mEp->ep))
+						err("error: ep_set_halt");
+					spin_lock(udc->lock);
+				}
+			}
+		}
+
+		if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
+		    !hw_test_and_clear_setup_status(i))
+			continue;
+
+		if (i != 0) {
+			warn("ctrl traffic received at endpoint");
+			continue;
+		}
+
+		/* read_setup_packet */
+		do {
+			hw_test_and_set_setup_guard();
+			memcpy(&req, &mEp->qh[RX].ptr->setup, sizeof(req));
+		} while (!hw_test_and_clear_setup_guard());
+
+		type = req.bRequestType;
+
+		mEp->dir = (type & USB_DIR_IN) ? TX : RX;
+
+		dbg_setup(_usb_addr(mEp), &req);
+
+		switch (req.bRequest) {
+		case USB_REQ_CLEAR_FEATURE:
+			if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+			    le16_to_cpu(req.wValue) != USB_ENDPOINT_HALT)
+				goto delegate;
+			if (req.wLength != 0)
+				break;
+			num  = le16_to_cpu(req.wIndex);
+			num &= USB_ENDPOINT_NUMBER_MASK;
+			if (!udc->ci13xxx_ep[num].wedge) {
+				spin_unlock(udc->lock);
+				err = usb_ep_clear_halt(
+					&udc->ci13xxx_ep[num].ep);
+				spin_lock(udc->lock);
+				if (err)
+					break;
+			}
+			err = isr_setup_status_phase(mEp);
+			break;
+		case USB_REQ_GET_STATUS:
+			if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
+			    type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
+			    type != (USB_DIR_IN|USB_RECIP_INTERFACE))
+				goto delegate;
+			if (le16_to_cpu(req.wLength) != 2 ||
+			    le16_to_cpu(req.wValue)  != 0)
+				break;
+			err = isr_get_status_response(mEp, &req);
+			break;
+		case USB_REQ_SET_ADDRESS:
+			if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
+				goto delegate;
+			if (le16_to_cpu(req.wLength) != 0 ||
+			    le16_to_cpu(req.wIndex)  != 0)
+				break;
+			err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
+			if (err)
+				break;
+			err = isr_setup_status_phase(mEp);
+			break;
+		case USB_REQ_SET_FEATURE:
+			if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
+			    le16_to_cpu(req.wValue) != USB_ENDPOINT_HALT)
+				goto delegate;
+			if (req.wLength != 0)
+				break;
+			num  = le16_to_cpu(req.wIndex);
+			num &= USB_ENDPOINT_NUMBER_MASK;
+
+			spin_unlock(udc->lock);
+			err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
+			spin_lock(udc->lock);
+			if (err)
+				break;
+			err = isr_setup_status_phase(mEp);
+			break;
+		default:
+delegate:
+			if (req.wLength == 0)   /* no data phase */
+				mEp->dir = TX;
+
+			spin_unlock(udc->lock);
+			err = udc->driver->setup(&udc->gadget, &req);
+			spin_lock(udc->lock);
+			break;
+		}
+
+		if (err < 0) {
+			dbg_event(_usb_addr(mEp), "ERROR", err);
+
+			spin_unlock(udc->lock);
+			if (usb_ep_set_halt(&mEp->ep))
+				err("error: ep_set_halt");
+			spin_lock(udc->lock);
+		}
+	}
+}
+
+/******************************************************************************
+ * ENDPT block
+ *****************************************************************************/
+/**
+ * ep_enable: configure endpoint, making it usable
+ *
+ * Check usb_ep_enable() at "usb_gadget.h" for details
+ */
+static int ep_enable(struct usb_ep *ep,
+		     const struct usb_endpoint_descriptor *desc)
+{
+	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+	int direction, retval = 0;
+	unsigned long flags;
+
+	trace("%p, %p", ep, desc);
+
+	if (ep == NULL || desc == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	/* only internal SW should enable ctrl endpts */
+
+	mEp->desc = desc;
+
+	if (!list_empty(&mEp->qh[mEp->dir].queue))
+		warn("enabling a non-empty endpoint!");
+
+	mEp->dir  = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? TX : RX;
+	mEp->num  =  desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+	mEp->type =  desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
+
+	direction = mEp->dir;
+	do {
+		dbg_event(_usb_addr(mEp), "ENABLE", 0);
+
+		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+			mEp->qh[mEp->dir].ptr->cap |=  QH_IOS;
+		else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
+			mEp->qh[mEp->dir].ptr->cap &= ~QH_MULT;
+		else
+			mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
+
+		mEp->qh[mEp->dir].ptr->cap |=
+			(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
+		mEp->qh[mEp->dir].ptr->td.next |= TD_TERMINATE;   /* needed? */
+
+		retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
+
+		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+			mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+	} while (mEp->dir != direction);
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+	return retval;
+}
+
+/**
+ * ep_disable: endpoint is no longer usable
+ *
+ * Check usb_ep_disable() at "usb_gadget.h" for details
+ */
+static int ep_disable(struct usb_ep *ep)
+{
+	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+	int direction, retval = 0;
+	unsigned long flags;
+
+	trace("%p", ep);
+
+	if (ep == NULL)
+		return -EINVAL;
+	else if (mEp->desc == NULL)
+		return -EBUSY;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	/* only internal SW should disable ctrl endpts */
+
+	direction = mEp->dir;
+	do {
+		dbg_event(_usb_addr(mEp), "DISABLE", 0);
+
+		retval |= _ep_nuke(mEp);
+		retval |= hw_ep_disable(mEp->num, mEp->dir);
+
+		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+			mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+	} while (mEp->dir != direction);
+
+	mEp->desc = NULL;
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+	return retval;
+}
+
+/**
+ * ep_alloc_request: allocate a request object to use with this endpoint
+ *
+ * Check usb_ep_alloc_request() at "usb_gadget.h" for details
+ */
+static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+	struct ci13xxx_ep  *mEp  = container_of(ep, struct ci13xxx_ep, ep);
+	struct ci13xxx_req *mReq = NULL;
+	unsigned long flags;
+
+	trace("%p, %i", ep, gfp_flags);
+
+	if (ep == NULL) {
+		err("EINVAL");
+		return NULL;
+	}
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
+	if (mReq != NULL) {
+		INIT_LIST_HEAD(&mReq->queue);
+
+		mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
+					   &mReq->dma);
+		if (mReq->ptr == NULL) {
+			kfree(mReq);
+			mReq = NULL;
+		}
+	}
+
+	dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+
+	return (mReq == NULL) ? NULL : &mReq->req;
+}
+
+/**
+ * ep_free_request: frees a request object
+ *
+ * Check usb_ep_free_request() at "usb_gadget.h" for details
+ */
+static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
+	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+	unsigned long flags;
+
+	trace("%p, %p", ep, req);
+
+	if (ep == NULL || req == NULL) {
+		err("EINVAL");
+		return;
+	} else if (!list_empty(&mReq->queue)) {
+		err("EBUSY");
+		return;
+	}
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	if (mReq->ptr)
+		dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
+	kfree(mReq);
+
+	dbg_event(_usb_addr(mEp), "FREE", 0);
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * ep_queue: queues (submits) an I/O request to an endpoint
+ *
+ * Check usb_ep_queue()* at usb_gadget.h" for details
+ */
+static int ep_queue(struct usb_ep *ep, struct usb_request *req,
+		    gfp_t __maybe_unused gfp_flags)
+{
+	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
+	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+	int retval = 0;
+	unsigned long flags;
+
+	trace("%p, %p, %X", ep, req, gfp_flags);
+
+	if (ep == NULL || req == NULL || mEp->desc == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	if (mEp->type == USB_ENDPOINT_XFER_CONTROL &&
+	    !list_empty(&mEp->qh[mEp->dir].queue)) {
+		_ep_nuke(mEp);
+		retval = -EOVERFLOW;
+		warn("endpoint ctrl %X nuked", _usb_addr(mEp));
+	}
+
+	/* first nuke then test link, e.g. previous status has not sent */
+	if (!list_empty(&mReq->queue)) {
+		retval = -EBUSY;
+		err("request already in queue");
+		goto done;
+	}
+
+	if (req->length > (4 * PAGE_SIZE)) {
+		req->length = (4 * PAGE_SIZE);
+		retval = -EMSGSIZE;
+		warn("request length truncated");
+	}
+
+	dbg_queue(_usb_addr(mEp), req, retval);
+
+	/* push request */
+	mReq->req.status = -EINPROGRESS;
+	mReq->req.actual = 0;
+	list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue);
+
+	retval = _hardware_enqueue(mEp, mReq);
+	if (retval == -EALREADY || retval == -EBUSY) {
+		dbg_event(_usb_addr(mEp), "QUEUE", retval);
+		retval = 0;
+	}
+
+ done:
+	spin_unlock_irqrestore(mEp->lock, flags);
+	return retval;
+}
+
+/**
+ * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
+ *
+ * Check usb_ep_dequeue() at "usb_gadget.h" for details
+ */
+static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
+	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
+	unsigned long flags;
+
+	trace("%p, %p", ep, req);
+
+	if (ep == NULL || req == NULL || mEp->desc == NULL ||
+	    list_empty(&mReq->queue)  || list_empty(&mEp->qh[mEp->dir].queue))
+		return -EINVAL;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
+
+	if (mReq->req.status == -EALREADY)
+		_hardware_dequeue(mEp, mReq);
+
+	/* pop request */
+	list_del_init(&mReq->queue);
+	req->status = -ECONNRESET;
+
+	if (!mReq->req.no_interrupt && mReq->req.complete != NULL) {
+		spin_unlock(mEp->lock);
+		mReq->req.complete(&mEp->ep, &mReq->req);
+		spin_lock(mEp->lock);
+	}
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+	return 0;
+}
+
+/**
+ * ep_set_halt: sets the endpoint halt feature
+ *
+ * Check usb_ep_set_halt() at "usb_gadget.h" for details
+ */
+static int ep_set_halt(struct usb_ep *ep, int value)
+{
+	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+	int direction, retval = 0;
+	unsigned long flags;
+
+	trace("%p, %i", ep, value);
+
+	if (ep == NULL || mEp->desc == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+#ifndef STALL_IN
+	/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
+	if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
+	    !list_empty(&mEp->qh[mEp->dir].queue)) {
+		spin_unlock_irqrestore(mEp->lock, flags);
+		return -EAGAIN;
+	}
+#endif
+
+	direction = mEp->dir;
+	do {
+		dbg_event(_usb_addr(mEp), "HALT", value);
+		retval |= hw_ep_set_halt(mEp->num, mEp->dir, value);
+
+		if (!value)
+			mEp->wedge = 0;
+
+		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+			mEp->dir = (mEp->dir == TX) ? RX : TX;
+
+	} while (mEp->dir != direction);
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+	return retval;
+}
+
+/**
+ * ep_set_wedge: sets the halt feature and ignores clear requests
+ *
+ * Check usb_ep_set_wedge() at "usb_gadget.h" for details
+ */
+static int ep_set_wedge(struct usb_ep *ep)
+{
+	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+	unsigned long flags;
+
+	trace("%p", ep);
+
+	if (ep == NULL || mEp->desc == NULL)
+		return -EINVAL;
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	dbg_event(_usb_addr(mEp), "WEDGE", 0);
+	mEp->wedge = 1;
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+
+	return usb_ep_set_halt(ep);
+}
+
+/**
+ * ep_fifo_flush: flushes contents of a fifo
+ *
+ * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
+ */
+static void ep_fifo_flush(struct usb_ep *ep)
+{
+	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
+	unsigned long flags;
+
+	trace("%p", ep);
+
+	if (ep == NULL) {
+		err("%02X: -EINVAL", _usb_addr(mEp));
+		return;
+	}
+
+	spin_lock_irqsave(mEp->lock, flags);
+
+	dbg_event(_usb_addr(mEp), "FFLUSH", 0);
+	hw_ep_flush(mEp->num, mEp->dir);
+
+	spin_unlock_irqrestore(mEp->lock, flags);
+}
+
+/**
+ * Endpoint-specific part of the API to the USB controller hardware
+ * Check "usb_gadget.h" for details
+ */
+static const struct usb_ep_ops usb_ep_ops = {
+	.enable	       = ep_enable,
+	.disable       = ep_disable,
+	.alloc_request = ep_alloc_request,
+	.free_request  = ep_free_request,
+	.queue	       = ep_queue,
+	.dequeue       = ep_dequeue,
+	.set_halt      = ep_set_halt,
+	.set_wedge     = ep_set_wedge,
+	.fifo_flush    = ep_fifo_flush,
+};
+
+/******************************************************************************
+ * GADGET block
+ *****************************************************************************/
+/**
+ * Device operations part of the API to the USB controller hardware,
+ * which don't involve endpoints (or i/o)
+ * Check  "usb_gadget.h" for details
+ */
+static const struct usb_gadget_ops usb_gadget_ops;
+
+/**
+ * usb_gadget_register_driver: register a gadget driver
+ *
+ * Check usb_gadget_register_driver() at "usb_gadget.h" for details
+ * Interrupts are enabled here
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+	struct ci13xxx *udc = _udc;
+	unsigned long i, k, flags;
+	int retval = -ENOMEM;
+
+	trace("%p", driver);
+
+	if (driver             == NULL ||
+	    driver->bind       == NULL ||
+	    driver->unbind     == NULL ||
+	    driver->setup      == NULL ||
+	    driver->disconnect == NULL ||
+	    driver->suspend    == NULL ||
+	    driver->resume     == NULL)
+		return -EINVAL;
+	else if (udc         == NULL)
+		return -ENODEV;
+	else if (udc->driver != NULL)
+		return -EBUSY;
+
+	/* alloc resources */
+	udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev,
+				       sizeof(struct ci13xxx_qh),
+				       64, PAGE_SIZE);
+	if (udc->qh_pool == NULL)
+		return -ENOMEM;
+
+	udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev,
+				       sizeof(struct ci13xxx_td),
+				       64, PAGE_SIZE);
+	if (udc->td_pool == NULL) {
+		dma_pool_destroy(udc->qh_pool);
+		udc->qh_pool = NULL;
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(udc->lock, flags);
+
+	info("hw_ep_max = %d", hw_ep_max);
+
+	udc->driver = driver;
+	udc->gadget.ops        = NULL;
+	udc->gadget.dev.driver = NULL;
+
+	retval = 0;
+	for (i = 0; i < hw_ep_max; i++) {
+		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+
+		scnprintf(mEp->name, sizeof(mEp->name), "ep%i", (int)i);
+
+		mEp->lock         = udc->lock;
+		mEp->device       = &udc->gadget.dev;
+		mEp->td_pool      = udc->td_pool;
+
+		mEp->ep.name      = mEp->name;
+		mEp->ep.ops       = &usb_ep_ops;
+		mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
+
+		/* this allocation cannot be random */
+		for (k = RX; k <= TX; k++) {
+			INIT_LIST_HEAD(&mEp->qh[k].queue);
+			mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool,
+							GFP_KERNEL,
+							&mEp->qh[k].dma);
+			if (mEp->qh[k].ptr == NULL)
+				retval = -ENOMEM;
+			else
+				memset(mEp->qh[k].ptr, 0,
+				       sizeof(*mEp->qh[k].ptr));
+		}
+		if (i == 0)
+			udc->gadget.ep0 = &mEp->ep;
+		else
+			list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
+	}
+	if (retval)
+		goto done;
+
+	/* bind gadget */
+	driver->driver.bus     = NULL;
+	udc->gadget.ops        = &usb_gadget_ops;
+	udc->gadget.dev.driver = &driver->driver;
+
+	spin_unlock_irqrestore(udc->lock, flags);
+	retval = driver->bind(&udc->gadget);                /* MAY SLEEP */
+	spin_lock_irqsave(udc->lock, flags);
+
+	if (retval) {
+		udc->gadget.ops        = NULL;
+		udc->gadget.dev.driver = NULL;
+		goto done;
+	}
+
+	retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+
+ done:
+	spin_unlock_irqrestore(udc->lock, flags);
+	if (retval)
+		usb_gadget_unregister_driver(driver);
+	return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+/**
+ * usb_gadget_unregister_driver: unregister a gadget driver
+ *
+ * Check usb_gadget_unregister_driver() at "usb_gadget.h" for details
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct ci13xxx *udc = _udc;
+	unsigned long i, k, flags;
+
+	trace("%p", driver);
+
+	if (driver             == NULL ||
+	    driver->bind       == NULL ||
+	    driver->unbind     == NULL ||
+	    driver->setup      == NULL ||
+	    driver->disconnect == NULL ||
+	    driver->suspend    == NULL ||
+	    driver->resume     == NULL ||
+	    driver             != udc->driver)
+		return -EINVAL;
+
+	spin_lock_irqsave(udc->lock, flags);
+
+	hw_device_state(0);
+
+	/* unbind gadget */
+	if (udc->gadget.ops != NULL) {
+		_gadget_stop_activity(&udc->gadget);
+
+		spin_unlock_irqrestore(udc->lock, flags);
+		driver->unbind(&udc->gadget);               /* MAY SLEEP */
+		spin_lock_irqsave(udc->lock, flags);
+
+		udc->gadget.ops        = NULL;
+		udc->gadget.dev.driver = NULL;
+	}
+
+	/* free resources */
+	for (i = 0; i < hw_ep_max; i++) {
+		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+
+		if (i == 0)
+			udc->gadget.ep0 = NULL;
+		else if (!list_empty(&mEp->ep.ep_list))
+			list_del_init(&mEp->ep.ep_list);
+
+		for (k = RX; k <= TX; k++)
+			if (mEp->qh[k].ptr != NULL)
+				dma_pool_free(udc->qh_pool,
+					      mEp->qh[k].ptr, mEp->qh[k].dma);
+	}
+
+	udc->driver = NULL;
+
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	if (udc->td_pool != NULL) {
+		dma_pool_destroy(udc->td_pool);
+		udc->td_pool = NULL;
+	}
+	if (udc->qh_pool != NULL) {
+		dma_pool_destroy(udc->qh_pool);
+		udc->qh_pool = NULL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+/******************************************************************************
+ * BUS block
+ *****************************************************************************/
+/**
+ * udc_irq: global interrupt handler
+ *
+ * This function returns IRQ_HANDLED if the IRQ has been handled
+ * It locks access to registers
+ */
+static irqreturn_t udc_irq(void)
+{
+	struct ci13xxx *udc = _udc;
+	irqreturn_t retval;
+	u32 intr;
+
+	trace();
+
+	if (udc == NULL) {
+		err("ENODEV");
+		return IRQ_HANDLED;
+	}
+
+	spin_lock(udc->lock);
+	intr = hw_test_and_clear_intr_active();
+	if (intr) {
+		isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
+		isr_statistics.hndl.idx &= ISR_MASK;
+		isr_statistics.hndl.cnt++;
+
+		/* order defines priority - do NOT change it */
+		if (USBi_URI & intr) {
+			isr_statistics.uri++;
+			isr_reset_handler(udc);
+		}
+		if (USBi_PCI & intr) {
+			isr_statistics.pci++;
+			udc->gadget.speed = hw_port_is_high_speed() ?
+				USB_SPEED_HIGH : USB_SPEED_FULL;
+		}
+		if (USBi_UEI & intr)
+			isr_statistics.uei++;
+		if (USBi_UI  & intr) {
+			isr_statistics.ui++;
+			isr_tr_complete_handler(udc);
+		}
+		if (USBi_SLI & intr)
+			isr_statistics.sli++;
+		retval = IRQ_HANDLED;
+	} else {
+		isr_statistics.none++;
+		retval = IRQ_NONE;
+	}
+	spin_unlock(udc->lock);
+
+	return retval;
+}
+
+/**
+ * udc_release: driver release function
+ * @dev: device
+ *
+ * Currently does nothing
+ */
+static void udc_release(struct device *dev)
+{
+	trace("%p", dev);
+
+	if (dev == NULL)
+		err("EINVAL");
+}
+
+/**
+ * udc_probe: parent probe must call this to initialize UDC
+ * @dev:  parent device
+ * @regs: registers base address
+ * @name: driver name
+ *
+ * This function returns an error code
+ * No interrupts active, the IRQ has not been requested yet
+ * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
+ */
+static int udc_probe(struct device *dev, void __iomem *regs, const char *name)
+{
+	struct ci13xxx *udc;
+	int retval = 0;
+
+	trace("%p, %p, %p", dev, regs, name);
+
+	if (dev == NULL || regs == NULL || name == NULL)
+		return -EINVAL;
+
+	udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
+	if (udc == NULL)
+		return -ENOMEM;
+
+	udc->lock = &udc_lock;
+
+	retval = hw_device_reset(regs);
+	if (retval)
+		goto done;
+
+	udc->gadget.ops          = NULL;
+	udc->gadget.speed        = USB_SPEED_UNKNOWN;
+	udc->gadget.is_dualspeed = 1;
+	udc->gadget.is_otg       = 0;
+	udc->gadget.name         = name;
+
+	INIT_LIST_HEAD(&udc->gadget.ep_list);
+	udc->gadget.ep0 = NULL;
+
+	strcpy(udc->gadget.dev.bus_id, "gadget");
+	udc->gadget.dev.dma_mask = dev->dma_mask;
+	udc->gadget.dev.parent   = dev;
+	udc->gadget.dev.release  = udc_release;
+
+	retval = device_register(&udc->gadget.dev);
+	if (retval)
+		goto done;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	retval = dbg_create_files(&udc->gadget.dev);
+#endif
+	if (retval) {
+		device_unregister(&udc->gadget.dev);
+		goto done;
+	}
+
+	_udc = udc;
+	return retval;
+
+ done:
+	err("error = %i", retval);
+	kfree(udc);
+	_udc = NULL;
+	return retval;
+}
+
+/**
+ * udc_remove: parent remove must call this to remove UDC
+ *
+ * No interrupts active, the IRQ has been released
+ */
+static void udc_remove(void)
+{
+	struct ci13xxx *udc = _udc;
+
+	if (udc == NULL) {
+		err("EINVAL");
+		return;
+	}
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	dbg_remove_files(&udc->gadget.dev);
+#endif
+	device_unregister(&udc->gadget.dev);
+
+	kfree(udc);
+	_udc = NULL;
+}
+
+/******************************************************************************
+ * PCI block
+ *****************************************************************************/
+/**
+ * ci13xxx_pci_irq: interrut handler
+ * @irq:  irq number
+ * @pdev: USB Device Controller interrupt source
+ *
+ * This function returns IRQ_HANDLED if the IRQ has been handled
+ * This is an ISR don't trace, use attribute interface instead
+ */
+static irqreturn_t ci13xxx_pci_irq(int irq, void *pdev)
+{
+	if (irq == 0) {
+		dev_err(&((struct pci_dev *)pdev)->dev, "Invalid IRQ0 usage!");
+		return IRQ_HANDLED;
+	}
+	return udc_irq();
+}
+
+/**
+ * ci13xxx_pci_probe: PCI probe
+ * @pdev: USB device controller being probed
+ * @id:   PCI hotplug ID connecting controller to UDC framework
+ *
+ * This function returns an error code
+ * Allocates basic PCI resources for this USB device controller, and then
+ * invokes the udc_probe() method to start the UDC associated with it
+ */
+static int __devinit ci13xxx_pci_probe(struct pci_dev *pdev,
+				       const struct pci_device_id *id)
+{
+	void __iomem *regs = NULL;
+	int retval = 0;
+
+	if (id == NULL)
+		return -EINVAL;
+
+	retval = pci_enable_device(pdev);
+	if (retval)
+		goto done;
+
+	if (!pdev->irq) {
+		dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
+		retval = -ENODEV;
+		goto disable_device;
+	}
+
+	retval = pci_request_regions(pdev, UDC_DRIVER_NAME);
+	if (retval)
+		goto disable_device;
+
+	/* BAR 0 holds all the registers */
+	regs = pci_iomap(pdev, 0, 0);
+	if (!regs) {
+		dev_err(&pdev->dev, "Error mapping memory!");
+		retval = -EFAULT;
+		goto release_regions;
+	}
+	pci_set_drvdata(pdev, (__force void *)regs);
+
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+
+	retval = udc_probe(&pdev->dev, regs, UDC_DRIVER_NAME);
+	if (retval)
+		goto iounmap;
+
+	/* our device does not have MSI capability */
+
+	retval = request_irq(pdev->irq, ci13xxx_pci_irq, IRQF_SHARED,
+			     UDC_DRIVER_NAME, pdev);
+	if (retval)
+		goto gadget_remove;
+
+	return 0;
+
+ gadget_remove:
+	udc_remove();
+ iounmap:
+	pci_iounmap(pdev, regs);
+ release_regions:
+	pci_release_regions(pdev);
+ disable_device:
+	pci_disable_device(pdev);
+ done:
+	return retval;
+}
+
+/**
+ * ci13xxx_pci_remove: PCI remove
+ * @pdev: USB Device Controller being removed
+ *
+ * Reverses the effect of ci13xxx_pci_probe(),
+ * first invoking the udc_remove() and then releases
+ * all PCI resources allocated for this USB device controller
+ */
+static void __devexit ci13xxx_pci_remove(struct pci_dev *pdev)
+{
+	free_irq(pdev->irq, pdev);
+	udc_remove();
+	pci_iounmap(pdev, (__force void __iomem *)pci_get_drvdata(pdev));
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+/**
+ * PCI device table
+ * PCI device structure
+ *
+ * Check "pci.h" for details
+ */
+static DEFINE_PCI_DEVICE_TABLE(ci13xxx_pci_id_table) = {
+	{ PCI_DEVICE(0x153F, 0x1004) },
+	{ PCI_DEVICE(0x153F, 0x1006) },
+	{ 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, ci13xxx_pci_id_table);
+
+static struct pci_driver ci13xxx_pci_driver = {
+	.name         =	UDC_DRIVER_NAME,
+	.id_table     =	ci13xxx_pci_id_table,
+	.probe        =	ci13xxx_pci_probe,
+	.remove       =	__devexit_p(ci13xxx_pci_remove),
+};
+
+/**
+ * ci13xxx_pci_init: module init
+ *
+ * Driver load
+ */
+static int __init ci13xxx_pci_init(void)
+{
+	return pci_register_driver(&ci13xxx_pci_driver);
+}
+module_init(ci13xxx_pci_init);
+
+/**
+ * ci13xxx_pci_exit: module exit
+ *
+ * Driver unload
+ */
+static void __exit ci13xxx_pci_exit(void)
+{
+	pci_unregister_driver(&ci13xxx_pci_driver);
+}
+module_exit(ci13xxx_pci_exit);
+
+MODULE_AUTHOR("MIPS - David Lopo <dlopo@chipidea.mips.com>");
+MODULE_DESCRIPTION("MIPS CI13XXX USB Peripheral Controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("June 2008");
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
new file mode 100644
index 0000000..4026e9c
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -0,0 +1,195 @@
+/*
+ * ci13xxx_udc.h - structures, registers, and macros MIPS USB IP core
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description: MIPS USB IP core family device controller
+ *              Structures, registers and logging macros
+ */
+
+#ifndef _CI13XXX_h_
+#define _CI13XXX_h_
+
+/******************************************************************************
+ * DEFINE
+ *****************************************************************************/
+#define ENDPT_MAX          (16)
+#define CTRL_PAYLOAD_MAX   (64)
+#define RX        (0)  /* similar to USB_DIR_OUT but can be used as an index */
+#define TX        (1)  /* similar to USB_DIR_IN  but can be used as an index */
+
+/******************************************************************************
+ * STRUCTURES
+ *****************************************************************************/
+/* DMA layout of transfer descriptors */
+struct ci13xxx_td {
+	/* 0 */
+	u32 next;
+#define TD_TERMINATE          BIT(0)
+	/* 1 */
+	u32 token;
+#define TD_STATUS             (0x00FFUL <<  0)
+#define TD_STATUS_TR_ERR      BIT(3)
+#define TD_STATUS_DT_ERR      BIT(5)
+#define TD_STATUS_HALTED      BIT(6)
+#define TD_STATUS_ACTIVE      BIT(7)
+#define TD_MULTO              (0x0003UL << 10)
+#define TD_IOC                BIT(15)
+#define TD_TOTAL_BYTES        (0x7FFFUL << 16)
+	/* 2 */
+	u32 page[5];
+#define TD_CURR_OFFSET        (0x0FFFUL <<  0)
+#define TD_FRAME_NUM          (0x07FFUL <<  0)
+#define TD_RESERVED_MASK      (0x0FFFUL <<  0)
+} __attribute__ ((packed));
+
+/* DMA layout of queue heads */
+struct ci13xxx_qh {
+	/* 0 */
+	u32 cap;
+#define QH_IOS                BIT(15)
+#define QH_MAX_PKT            (0x07FFUL << 16)
+#define QH_ZLT                BIT(29)
+#define QH_MULT               (0x0003UL << 30)
+	/* 1 */
+	u32 curr;
+	/* 2 - 8 */
+	struct ci13xxx_td        td;
+	/* 9 */
+	u32 RESERVED;
+	struct usb_ctrlrequest   setup;
+} __attribute__ ((packed));
+
+/* Extension of usb_request */
+struct ci13xxx_req {
+	struct usb_request   req;
+	unsigned             map;
+	struct list_head     queue;
+	struct ci13xxx_td   *ptr;
+	dma_addr_t           dma;
+};
+
+/* Extension of usb_ep */
+struct ci13xxx_ep {
+	struct usb_ep                          ep;
+	const struct usb_endpoint_descriptor  *desc;
+	u8                                     dir;
+	u8                                     num;
+	u8                                     type;
+	char                                   name[16];
+	struct {
+		struct list_head   queue;
+		struct ci13xxx_qh *ptr;
+		dma_addr_t         dma;
+	}                                      qh[2];
+	struct usb_request                    *status;
+	int                                    wedge;
+
+	/* global resources */
+	spinlock_t                            *lock;
+	struct device                         *device;
+	struct dma_pool                       *td_pool;
+};
+
+/* CI13XXX UDC descriptor & global resources */
+struct ci13xxx {
+	spinlock_t		  *lock;      /* ctrl register bank access */
+
+	struct dma_pool           *qh_pool;   /* DMA pool for queue heads */
+	struct dma_pool           *td_pool;   /* DMA pool for transfer descs */
+
+	struct usb_gadget          gadget;     /* USB slave device */
+	struct ci13xxx_ep          ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
+
+	struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
+};
+
+/******************************************************************************
+ * REGISTERS
+ *****************************************************************************/
+/* register size */
+#define REG_BITS   (32)
+
+/* HCCPARAMS */
+#define HCCPARAMS_LEN         BIT(17)
+
+/* DCCPARAMS */
+#define DCCPARAMS_DEN         (0x1F << 0)
+#define DCCPARAMS_DC          BIT(7)
+
+/* TESTMODE */
+#define TESTMODE_FORCE        BIT(0)
+
+/* USBCMD */
+#define USBCMD_RS             BIT(0)
+#define USBCMD_RST            BIT(1)
+#define USBCMD_SUTW           BIT(13)
+
+/* USBSTS & USBINTR */
+#define USBi_UI               BIT(0)
+#define USBi_UEI              BIT(1)
+#define USBi_PCI              BIT(2)
+#define USBi_URI              BIT(6)
+#define USBi_SLI              BIT(8)
+
+/* DEVICEADDR */
+#define DEVICEADDR_USBADRA    BIT(24)
+#define DEVICEADDR_USBADR     (0x7FUL << 25)
+
+/* PORTSC */
+#define PORTSC_SUSP           BIT(7)
+#define PORTSC_HSP            BIT(9)
+#define PORTSC_PTC            (0x0FUL << 16)
+
+/* DEVLC */
+#define DEVLC_PSPD            (0x03UL << 25)
+#define    DEVLC_PSPD_HS      (0x02UL << 25)
+
+/* USBMODE */
+#define USBMODE_CM            (0x03UL <<  0)
+#define    USBMODE_CM_IDLE    (0x00UL <<  0)
+#define    USBMODE_CM_DEVICE  (0x02UL <<  0)
+#define    USBMODE_CM_HOST    (0x03UL <<  0)
+#define USBMODE_SLOM          BIT(3)
+
+/* ENDPTCTRL */
+#define ENDPTCTRL_RXS         BIT(0)
+#define ENDPTCTRL_RXT         (0x03UL <<  2)
+#define ENDPTCTRL_RXR         BIT(6)         /* reserved for port 0 */
+#define ENDPTCTRL_RXE         BIT(7)
+#define ENDPTCTRL_TXS         BIT(16)
+#define ENDPTCTRL_TXT         (0x03UL << 18)
+#define ENDPTCTRL_TXR         BIT(22)        /* reserved for port 0 */
+#define ENDPTCTRL_TXE         BIT(23)
+
+/******************************************************************************
+ * LOGGING
+ *****************************************************************************/
+#define ci13xxx_printk(level, format, args...) \
+do { \
+	if (_udc == NULL) \
+		printk(level "[%s] " format "\n", __func__, ## args); \
+	else \
+		dev_printk(level, _udc->gadget.dev.parent, \
+			   "[%s] " format "\n", __func__, ## args); \
+} while (0)
+
+#define err(format, args...)    ci13xxx_printk(KERN_ERR, format, ## args)
+#define warn(format, args...)   ci13xxx_printk(KERN_WARNING, format, ## args)
+#define info(format, args...)   ci13xxx_printk(KERN_INFO, format, ## args)
+
+#ifdef TRACE
+#define trace(format, args...)      ci13xxx_printk(KERN_DEBUG, format, ## args)
+#define dbg_trace(format, args...)  dev_dbg(dev, format, ##args)
+#else
+#define trace(format, args...)      do {} while (0)
+#define dbg_trace(format, args...)  do {} while (0)
+#endif
+
+#endif	/* _CI13XXX_h_ */
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 9462e30..a36b117 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -161,7 +161,7 @@
 	/* report address */
 	desc->bEndpointAddress &= USB_DIR_IN;
 	if (isdigit (ep->name [2])) {
-		u8	num = simple_strtol (&ep->name [2], NULL, 10);
+		u8	num = simple_strtoul (&ep->name [2], NULL, 10);
 		desc->bEndpointAddress |= num;
 #ifdef	MANY_ENDPOINTS
 	} else if (desc->bEndpointAddress & USB_DIR_IN) {
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index d8fc9b3..c0916c7 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -279,6 +279,13 @@
 	return err;
 }
 
+static const struct net_device_ops pn_netdev_ops = {
+	.ndo_open	= pn_net_open,
+	.ndo_stop	= pn_net_close,
+	.ndo_start_xmit	= pn_net_xmit,
+	.ndo_change_mtu	= pn_net_mtu,
+};
+
 static void pn_net_setup(struct net_device *dev)
 {
 	dev->features		= 0;
@@ -290,12 +297,9 @@
 	dev->addr_len		= 1;
 	dev->tx_queue_len	= 1;
 
+	dev->netdev_ops		= &pn_netdev_ops;
 	dev->destructor		= free_netdev;
 	dev->header_ops		= &phonet_header_ops;
-	dev->open		= pn_net_open;
-	dev->stop		= pn_net_close;
-	dev->hard_start_xmit	= pn_net_xmit; /* mandatory */
-	dev->change_mtu		= pn_net_mtu;
 }
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 2e71368..b10fa31 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -1,7 +1,7 @@
 /*
  * file_storage.c -- File-backed USB Storage Gadget, for USB development
  *
- * Copyright (C) 2003-2007 Alan Stern
+ * Copyright (C) 2003-2008 Alan Stern
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -38,16 +38,17 @@
 
 /*
  * The File-backed Storage Gadget acts as a USB Mass Storage device,
- * appearing to the host as a disk drive.  In addition to providing an
- * example of a genuinely useful gadget driver for a USB device, it also
- * illustrates a technique of double-buffering for increased throughput.
- * Last but not least, it gives an easy way to probe the behavior of the
- * Mass Storage drivers in a USB host.
+ * appearing to the host as a disk drive or as a CD-ROM drive.  In addition
+ * to providing an example of a genuinely useful gadget driver for a USB
+ * device, it also illustrates a technique of double-buffering for increased
+ * throughput.  Last but not least, it gives an easy way to probe the
+ * behavior of the Mass Storage drivers in a USB host.
  *
  * Backing storage is provided by a regular file or a block device, specified
  * by the "file" module parameter.  Access can be limited to read-only by
- * setting the optional "ro" module parameter.  The gadget will indicate that
- * it has removable media if the optional "removable" module parameter is set.
+ * setting the optional "ro" module parameter.  (For CD-ROM emulation,
+ * access is always read-only.)  The gadget will indicate that it has
+ * removable media if the optional "removable" module parameter is set.
  *
  * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI),
  * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected
@@ -64,7 +65,12 @@
  * The default number of LUNs is taken from the number of "file" elements;
  * it is 1 if "file" is not given.  If "removable" is not set then a backing
  * file must be specified for each LUN.  If it is set, then an unspecified
- * or empty backing filename means the LUN's medium is not loaded.
+ * or empty backing filename means the LUN's medium is not loaded.  Ideally
+ * each LUN would be settable independently as a disk drive or a CD-ROM
+ * drive, but currently all LUNs have to be the same type.  The CD-ROM
+ * emulation includes a single data track and no audio tracks; hence there
+ * need be only one backing file per LUN.  Note also that the CD-ROM block
+ * length is set to 512 rather than the more common value 2048.
  *
  * Requirements are modest; only a bulk-in and a bulk-out endpoint are
  * needed (an interrupt-out endpoint is also needed for CBI).  The memory
@@ -91,6 +97,8 @@
  *					USB device controller (usually true),
  *					boolean to permit the driver to halt
  *					bulk endpoints
+ *	cdrom			Default false, boolean for whether to emulate
+ *					a CD-ROM drive
  *	transport=XXX		Default BBB, transport name (CB, CBI, or BBB)
  *	protocol=YYY		Default SCSI, protocol name (RBC, 8020 or
  *					ATAPI, QIC, UFI, 8070, or SCSI;
@@ -103,15 +111,16 @@
  *					PAGE_CACHE_SIZE)
  *
  * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "ro",
- * "removable", "luns", and "stall" options are available; default values
- * are used for everything else.
+ * "removable", "luns", "stall", and "cdrom" options are available; default
+ * values are used for everything else.
  *
  * The pathnames of the backing files and the ro settings are available in
  * the attribute files "file" and "ro" in the lun<n> subdirectory of the
  * gadget's sysfs directory.  If the "removable" option is set, writing to
  * these files will simulate ejecting/loading the medium (writing an empty
  * line means eject) and adjusting a write-enable tab.  Changes to the ro
- * setting are not allowed when the medium is loaded.
+ * setting are not allowed when the medium is loaded or if CD-ROM emulation
+ * is being used.
  *
  * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
  * The driver's SCSI command interface was based on the "Information
@@ -261,7 +270,7 @@
 
 #define DRIVER_DESC		"File-backed Storage Gadget"
 #define DRIVER_NAME		"g_file_storage"
-#define DRIVER_VERSION		"7 August 2007"
+#define DRIVER_VERSION		"20 November 2008"
 
 static const char longname[] = DRIVER_DESC;
 static const char shortname[] = DRIVER_NAME;
@@ -341,6 +350,7 @@
 
 	int		removable;
 	int		can_stall;
+	int		cdrom;
 
 	char		*transport_parm;
 	char		*protocol_parm;
@@ -359,6 +369,7 @@
 	.protocol_parm		= "SCSI",
 	.removable		= 0,
 	.can_stall		= 1,
+	.cdrom			= 0,
 	.vendor			= DRIVER_VENDOR_ID,
 	.product		= DRIVER_PRODUCT_ID,
 	.release		= 0xffff,	// Use controller chip type
@@ -382,6 +393,9 @@
 module_param_named(stall, mod_data.can_stall, bool, S_IRUGO);
 MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
 
+module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO);
+MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk");
+
 
 /* In the non-TEST version, only the module parameters listed above
  * are available. */
@@ -411,6 +425,10 @@
 
 /*-------------------------------------------------------------------------*/
 
+/* SCSI device types */
+#define TYPE_DISK	0x00
+#define TYPE_CDROM	0x05
+
 /* USB protocol value = the transport method */
 #define USB_PR_CBI	0x00		// Control/Bulk/Interrupt
 #define USB_PR_CB	0x01		// Control/Bulk w/o interrupt
@@ -487,6 +505,8 @@
 #define SC_READ_12			0xa8
 #define SC_READ_CAPACITY		0x25
 #define SC_READ_FORMAT_CAPACITIES	0x23
+#define SC_READ_HEADER			0x44
+#define SC_READ_TOC			0x43
 #define SC_RELEASE			0x17
 #define SC_REQUEST_SENSE		0x03
 #define SC_RESERVE			0x16
@@ -2006,23 +2026,28 @@
 	u8	*buf = (u8 *) bh->buf;
 
 	static char vendor_id[] = "Linux   ";
-	static char product_id[] = "File-Stor Gadget";
+	static char product_disk_id[] = "File-Stor Gadget";
+	static char product_cdrom_id[] = "File-CD Gadget  ";
 
 	if (!fsg->curlun) {		// Unsupported LUNs are okay
 		fsg->bad_lun_okay = 1;
 		memset(buf, 0, 36);
 		buf[0] = 0x7f;		// Unsupported, no device-type
+		buf[4] = 31;		// Additional length
 		return 36;
 	}
 
-	memset(buf, 0, 8);	// Non-removable, direct-access device
+	memset(buf, 0, 8);
+	buf[0] = (mod_data.cdrom ? TYPE_CDROM : TYPE_DISK);
 	if (mod_data.removable)
 		buf[1] = 0x80;
 	buf[2] = 2;		// ANSI SCSI level 2
 	buf[3] = 2;		// SCSI-2 INQUIRY data format
 	buf[4] = 31;		// Additional length
 				// No special options
-	sprintf(buf + 8, "%-8s%-16s%04x", vendor_id, product_id,
+	sprintf(buf + 8, "%-8s%-16s%04x", vendor_id,
+			(mod_data.cdrom ? product_cdrom_id :
+				product_disk_id),
 			mod_data.release);
 	return 36;
 }
@@ -2101,6 +2126,75 @@
 }
 
 
+static void store_cdrom_address(u8 *dest, int msf, u32 addr)
+{
+	if (msf) {
+		/* Convert to Minutes-Seconds-Frames */
+		addr >>= 2;		/* Convert to 2048-byte frames */
+		addr += 2*75;		/* Lead-in occupies 2 seconds */
+		dest[3] = addr % 75;	/* Frames */
+		addr /= 75;
+		dest[2] = addr % 60;	/* Seconds */
+		addr /= 60;
+		dest[1] = addr;		/* Minutes */
+		dest[0] = 0;		/* Reserved */
+	} else {
+		/* Absolute sector */
+		put_be32(dest, addr);
+	}
+}
+
+static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		msf = fsg->cmnd[1] & 0x02;
+	u32		lba = get_be32(&fsg->cmnd[2]);
+	u8		*buf = (u8 *) bh->buf;
+
+	if ((fsg->cmnd[1] & ~0x02) != 0) {		/* Mask away MSF */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 8);
+	buf[0] = 0x01;		/* 2048 bytes of user data, rest is EC */
+	store_cdrom_address(&buf[4], msf, lba);
+	return 8;
+}
+
+
+static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		msf = fsg->cmnd[1] & 0x02;
+	int		start_track = fsg->cmnd[6];
+	u8		*buf = (u8 *) bh->buf;
+
+	if ((fsg->cmnd[1] & ~0x02) != 0 ||		/* Mask away MSF */
+			start_track > 1) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 20);
+	buf[1] = (20-2);		/* TOC data length */
+	buf[2] = 1;			/* First track number */
+	buf[3] = 1;			/* Last track number */
+	buf[5] = 0x16;			/* Data track, copying allowed */
+	buf[6] = 0x01;			/* Only track is number 1 */
+	store_cdrom_address(&buf[8], msf, 0);
+
+	buf[13] = 0x16;			/* Lead-out track is data */
+	buf[14] = 0xAA;			/* Lead-out track number */
+	store_cdrom_address(&buf[16], msf, curlun->num_sectors);
+	return 20;
+}
+
+
 static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 {
 	struct lun	*curlun = fsg->curlun;
@@ -2848,6 +2942,26 @@
 			reply = do_read_capacity(fsg, bh);
 		break;
 
+	case SC_READ_HEADER:
+		if (!mod_data.cdrom)
+			goto unknown_cmnd;
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(3<<7) | (0x1f<<1), 1,
+				"READ HEADER")) == 0)
+			reply = do_read_header(fsg, bh);
+		break;
+
+	case SC_READ_TOC:
+		if (!mod_data.cdrom)
+			goto unknown_cmnd;
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(7<<6) | (1<<1), 1,
+				"READ TOC")) == 0)
+			reply = do_read_toc(fsg, bh);
+		break;
+
 	case SC_READ_FORMAT_CAPACITIES:
 		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
 		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
@@ -2933,6 +3047,7 @@
 		// Fall through
 
 	default:
+ unknown_cmnd:
 		fsg->data_size_from_cmnd = 0;
 		sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
 		if ((reply = check_command(fsg, fsg->cmnd_size,
@@ -3498,6 +3613,7 @@
 	struct inode			*inode = NULL;
 	loff_t				size;
 	loff_t				num_sectors;
+	loff_t				min_sectors;
 
 	/* R/W if we can, R/O if we must */
 	ro = curlun->ro;
@@ -3541,8 +3657,19 @@
 		rc = (int) size;
 		goto out;
 	}
-	num_sectors = size >> 9;	// File size in 512-byte sectors
-	if (num_sectors == 0) {
+	num_sectors = size >> 9;	// File size in 512-byte blocks
+	min_sectors = 1;
+	if (mod_data.cdrom) {
+		num_sectors &= ~3;	// Reduce to a multiple of 2048
+		min_sectors = 300*4;	// Smallest track is 300 frames
+		if (num_sectors >= 256*60*75*4) {
+			num_sectors = (256*60*75 - 1) * 4;
+			LINFO(curlun, "file too big: %s\n", filename);
+			LINFO(curlun, "using only first %d blocks\n",
+					(int) num_sectors);
+		}
+	}
+	if (num_sectors < min_sectors) {
 		LINFO(curlun, "file too small: %s\n", filename);
 		rc = -ETOOSMALL;
 		goto out;
@@ -3845,9 +3972,12 @@
 		goto out;
 
 	if (mod_data.removable) {	// Enable the store_xxx attributes
-		dev_attr_ro.attr.mode = dev_attr_file.attr.mode = 0644;
-		dev_attr_ro.store = store_ro;
+		dev_attr_file.attr.mode = 0644;
 		dev_attr_file.store = store_file;
+		if (!mod_data.cdrom) {
+			dev_attr_ro.attr.mode = 0644;
+			dev_attr_ro.store = store_ro;
+		}
 	}
 
 	/* Find out how many LUNs there should be */
@@ -3872,6 +4002,8 @@
 	for (i = 0; i < fsg->nluns; ++i) {
 		curlun = &fsg->luns[i];
 		curlun->ro = mod_data.ro[i];
+		if (mod_data.cdrom)
+			curlun->ro = 1;
 		curlun->dev.release = lun_release;
 		curlun->dev.parent = &gadget->dev;
 		curlun->dev.driver = &fsg_driver.driver;
@@ -4031,9 +4163,9 @@
 			mod_data.protocol_name, mod_data.protocol_type);
 	DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
 			mod_data.vendor, mod_data.product, mod_data.release);
-	DBG(fsg, "removable=%d, stall=%d, buflen=%u\n",
+	DBG(fsg, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n",
 			mod_data.removable, mod_data.can_stall,
-			mod_data.buflen);
+			mod_data.cdrom, mod_data.buflen);
 	DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task));
 
 	set_bit(REGISTERED, &fsg->atomic_bitflags);
@@ -4050,6 +4182,7 @@
 	fsg->state = FSG_STATE_TERMINATED;	// The thread is dead
 	fsg_unbind(gadget);
 	close_all_backing_files(fsg);
+	complete(&fsg->thread_notifier);
 	return rc;
 }
 
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index f402725..d6c5bcd 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -26,6 +26,7 @@
 #include <linux/ioport.h>
 #include <linux/types.h>
 #include <linux/errno.h>
+#include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/list.h>
 #include <linux/interrupt.h>
@@ -370,6 +371,9 @@
 	/* alloc multi-ram for BD rings and set the ep parameters */
 	tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
 				USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
+	if (IS_ERR_VALUE(tmp_addr))
+		return -ENOMEM;
+
 	out_be16(&epparam->rbase, (u16)tmp_addr);
 	out_be16(&epparam->tbase, (u16)(tmp_addr +
 				(sizeof(struct qe_bd) * bdring_len)));
@@ -689,7 +693,7 @@
 en_done1:
 	spin_unlock_irqrestore(&udc->lock, flags);
 en_done:
-	dev_dbg(udc->dev, "failed to initialize %s\n", ep->ep.name);
+	dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
 	return -ENODEV;
 }
 
@@ -2408,6 +2412,8 @@
 	tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
 					sizeof(struct usb_ep_para)),
 					   USB_EP_PARA_ALIGNMENT);
+	if (IS_ERR_VALUE(tmp_addr))
+		goto cleanup;
 
 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
 		out_be16(&usbpram->epptr[i], (u16)tmp_addr);
@@ -2513,7 +2519,7 @@
 	/* Initialize the udc structure including QH member and other member */
 	udc_controller = qe_udc_config(ofdev);
 	if (!udc_controller) {
-		dev_dbg(&ofdev->dev, "udc_controll is NULL\n");
+		dev_err(&ofdev->dev, "failed to initialize\n");
 		return -ENOMEM;
 	}
 
@@ -2568,7 +2574,7 @@
 	/* create a buf for ZLP send, need to remain zeroed */
 	udc_controller->nullbuf = kzalloc(256, GFP_KERNEL);
 	if (udc_controller->nullbuf == NULL) {
-		dev_dbg(udc_controller->dev, "cannot alloc nullbuf\n");
+		dev_err(udc_controller->dev, "cannot alloc nullbuf\n");
 		ret = -ENOMEM;
 		goto err3;
 	}
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 4e3107d..ec6d439 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -110,7 +110,6 @@
 #define gadget_is_at91(g)	0
 #endif
 
-/* status unclear */
 #ifdef CONFIG_USB_GADGET_IMX
 #define gadget_is_imx(g)	!strcmp("imx_udc", (g)->name)
 #else
@@ -158,6 +157,11 @@
 #define gadget_is_fsl_qe(g)	0
 #endif
 
+#ifdef CONFIG_USB_GADGET_CI13XXX
+#define gadget_is_ci13xxx(g)	(!strcmp("ci13xxx_udc", (g)->name))
+#else
+#define gadget_is_ci13xxx(g)	0
+#endif
 
 // CONFIG_USB_GADGET_SX2
 // CONFIG_USB_GADGET_AU1X00
@@ -225,6 +229,8 @@
 		return 0x21;
 	else if (gadget_is_fsl_qe(gadget))
 		return 0x22;
+	else if (gadget_is_ci13xxx(gadget))
+		return 0x23;
 	return -ENOENT;
 }
 
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 60aa048..63419c4 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1349,7 +1349,7 @@
 	int			retval;
 
 	if (!driver
-			|| driver->speed != USB_SPEED_FULL
+			|| driver->speed < USB_SPEED_FULL
 			|| !driver->bind
 			|| !driver->disconnect
 			|| !driver->setup)
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
new file mode 100644
index 0000000..cde8fdf
--- /dev/null
+++ b/drivers/usb/gadget/imx_udc.c
@@ -0,0 +1,1516 @@
+/*
+ *	driver/usb/gadget/imx_udc.c
+ *
+ *	Copyright (C) 2005 Mike Lee(eemike@gmail.com)
+ *	Copyright (C) 2008 Darius Augulis <augulis.darius@gmail.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *	GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <mach/usb.h>
+#include <mach/hardware.h>
+
+#include "imx_udc.h"
+
+static const char driver_name[] = "imx_udc";
+static const char ep0name[] = "ep0";
+
+void ep0_chg_stat(const char *label, struct imx_udc_struct *imx_usb,
+							enum ep0_state stat);
+
+/*******************************************************************************
+ * IMX UDC hardware related functions
+ *******************************************************************************
+ */
+
+void imx_udc_enable(struct imx_udc_struct *imx_usb)
+{
+	int temp = __raw_readl(imx_usb->base + USB_CTRL);
+	__raw_writel(temp | CTRL_FE_ENA | CTRL_AFE_ENA, imx_usb->base + USB_CTRL);
+	imx_usb->gadget.speed = USB_SPEED_FULL;
+}
+
+void imx_udc_disable(struct imx_udc_struct *imx_usb)
+{
+	int temp = __raw_readl(imx_usb->base + USB_CTRL);
+
+	__raw_writel(temp & ~(CTRL_FE_ENA | CTRL_AFE_ENA),
+		 imx_usb->base + USB_CTRL);
+
+	ep0_chg_stat(__func__, imx_usb, EP0_IDLE);
+	imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
+}
+
+void imx_udc_reset(struct imx_udc_struct *imx_usb)
+{
+	int temp = __raw_readl(imx_usb->base + USB_ENAB);
+
+	/* set RST bit */
+	__raw_writel(temp | ENAB_RST, imx_usb->base + USB_ENAB);
+
+	/* wait RST bit to clear */
+	do {} while (__raw_readl(imx_usb->base + USB_ENAB) & ENAB_RST);
+
+	/* wait CFG bit to assert */
+	do {} while (!(__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG));
+
+	/* udc module is now ready */
+}
+
+void imx_udc_config(struct imx_udc_struct *imx_usb)
+{
+	u8 ep_conf[5];
+	u8 i, j, cfg;
+	struct imx_ep_struct *imx_ep;
+
+	/* wait CFG bit to assert */
+	do {} while (!(__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG));
+
+	/* Download the endpoint buffer for endpoint 0. */
+	for (j = 0; j < 5; j++) {
+		i = (j == 2 ? imx_usb->imx_ep[0].fifosize : 0x00);
+		__raw_writeb(i, imx_usb->base + USB_DDAT);
+		do {} while (__raw_readl(imx_usb->base + USB_DADR) & DADR_BSY);
+	}
+
+	/* Download the endpoint buffers for endpoints 1-5.
+	 * We specify two configurations, one interface
+	 */
+	for (cfg = 1; cfg < 3; cfg++) {
+		for (i = 1; i < IMX_USB_NB_EP; i++) {
+			imx_ep = &imx_usb->imx_ep[i];
+			/* EP no | Config no */
+			ep_conf[0] = (i << 4) | (cfg << 2);
+			/* Type | Direction */
+			ep_conf[1] = (imx_ep->bmAttributes << 3) |
+					(EP_DIR(imx_ep) << 2);
+			/* Max packet size */
+			ep_conf[2] = imx_ep->fifosize;
+			/* TRXTYP */
+			ep_conf[3] = 0xC0;
+			/* FIFO no */
+			ep_conf[4] = i;
+
+			D_INI(imx_usb->dev,
+				"<%s> ep%d_conf[%d]:"
+				"[%02x-%02x-%02x-%02x-%02x]\n",
+				__func__, i, cfg,
+				ep_conf[0], ep_conf[1], ep_conf[2],
+				ep_conf[3], ep_conf[4]);
+
+			for (j = 0; j < 5; j++) {
+				__raw_writeb(ep_conf[j],
+					imx_usb->base + USB_DDAT);
+				do {} while (__raw_readl(imx_usb->base + USB_DADR)
+					& DADR_BSY);
+			}
+		}
+	}
+
+	/* wait CFG bit to clear */
+	do {} while (__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG);
+}
+
+void imx_udc_init_irq(struct imx_udc_struct *imx_usb)
+{
+	int i;
+
+	/* Mask and clear all irqs */
+	__raw_writel(0xFFFFFFFF, imx_usb->base + USB_MASK);
+	__raw_writel(0xFFFFFFFF, imx_usb->base + USB_INTR);
+	for (i = 0; i < IMX_USB_NB_EP; i++) {
+		__raw_writel(0x1FF, imx_usb->base + USB_EP_MASK(i));
+		__raw_writel(0x1FF, imx_usb->base + USB_EP_INTR(i));
+	}
+
+	/* Enable USB irqs */
+	__raw_writel(INTR_MSOF | INTR_FRAME_MATCH, imx_usb->base + USB_MASK);
+
+	/* Enable EP0 irqs */
+	__raw_writel(0x1FF & ~(EPINTR_DEVREQ | EPINTR_MDEVREQ | EPINTR_EOT
+		| EPINTR_EOF | EPINTR_FIFO_EMPTY | EPINTR_FIFO_FULL),
+		imx_usb->base + USB_EP_MASK(0));
+}
+
+void imx_udc_init_ep(struct imx_udc_struct *imx_usb)
+{
+	int i, max, temp;
+	struct imx_ep_struct *imx_ep;
+	for (i = 0; i < IMX_USB_NB_EP; i++) {
+		imx_ep = &imx_usb->imx_ep[i];
+		switch (imx_ep->fifosize) {
+		case 8:
+			max = 0;
+			break;
+		case 16:
+			max = 1;
+			break;
+		case 32:
+			max = 2;
+			break;
+		case 64:
+			max = 3;
+			break;
+		default:
+			max = 1;
+			break;
+		}
+		temp = (EP_DIR(imx_ep) << 7) | (max << 5)
+			| (imx_ep->bmAttributes << 3);
+		__raw_writel(temp, imx_usb->base + USB_EP_STAT(i));
+		__raw_writel(temp | EPSTAT_FLUSH, imx_usb->base + USB_EP_STAT(i));
+		D_INI(imx_usb->dev, "<%s> ep%d_stat %08x\n", __func__, i,
+			__raw_readl(imx_usb->base + USB_EP_STAT(i)));
+	}
+}
+
+void imx_udc_init_fifo(struct imx_udc_struct *imx_usb)
+{
+	int i, temp;
+	struct imx_ep_struct *imx_ep;
+	for (i = 0; i < IMX_USB_NB_EP; i++) {
+		imx_ep = &imx_usb->imx_ep[i];
+
+		/* Fifo control */
+		temp = EP_DIR(imx_ep) ? 0x0B000000 : 0x0F000000;
+		__raw_writel(temp, imx_usb->base + USB_EP_FCTRL(i));
+		D_INI(imx_usb->dev, "<%s> ep%d_fctrl %08x\n", __func__, i,
+			__raw_readl(imx_usb->base + USB_EP_FCTRL(i)));
+
+		/* Fifo alarm */
+		temp = (i ? imx_ep->fifosize / 2 : 0);
+		__raw_writel(temp, imx_usb->base + USB_EP_FALRM(i));
+		D_INI(imx_usb->dev, "<%s> ep%d_falrm %08x\n", __func__, i,
+			__raw_readl(imx_usb->base + USB_EP_FALRM(i)));
+	}
+}
+
+static void imx_udc_init(struct imx_udc_struct *imx_usb)
+{
+	/* Reset UDC */
+	imx_udc_reset(imx_usb);
+
+	/* Download config to enpoint buffer */
+	imx_udc_config(imx_usb);
+
+	/* Setup interrups */
+	imx_udc_init_irq(imx_usb);
+
+	/* Setup endpoints */
+	imx_udc_init_ep(imx_usb);
+
+	/* Setup fifos */
+	imx_udc_init_fifo(imx_usb);
+}
+
+void imx_ep_irq_enable(struct imx_ep_struct *imx_ep)
+{
+
+	int i = EP_NO(imx_ep);
+
+	__raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_MASK(i));
+	__raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_INTR(i));
+	__raw_writel(0x1FF & ~(EPINTR_EOT | EPINTR_EOF),
+		imx_ep->imx_usb->base + USB_EP_MASK(i));
+}
+
+void imx_ep_irq_disable(struct imx_ep_struct *imx_ep)
+{
+
+	int i = EP_NO(imx_ep);
+
+	__raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_MASK(i));
+	__raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_INTR(i));
+}
+
+int imx_ep_empty(struct imx_ep_struct *imx_ep)
+{
+	struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+
+	return __raw_readl(imx_usb->base + USB_EP_FSTAT(EP_NO(imx_ep)))
+			& FSTAT_EMPTY;
+}
+
+unsigned imx_fifo_bcount(struct imx_ep_struct *imx_ep)
+{
+	struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+
+	return (__raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)))
+			& EPSTAT_BCOUNT) >> 16;
+}
+
+void imx_flush(struct imx_ep_struct *imx_ep)
+{
+	struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+
+	int temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+	__raw_writel(temp | EPSTAT_FLUSH,
+		imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+}
+
+void imx_ep_stall(struct imx_ep_struct *imx_ep)
+{
+	struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+	int temp, i;
+
+	D_ERR(imx_usb->dev, "<%s> Forced stall on %s\n", __func__, imx_ep->ep.name);
+
+	imx_flush(imx_ep);
+
+	/* Special care for ep0 */
+	if (EP_NO(imx_ep)) {
+		temp = __raw_readl(imx_usb->base + USB_CTRL);
+		__raw_writel(temp | CTRL_CMDOVER | CTRL_CMDERROR, imx_usb->base + USB_CTRL);
+		do { } while (__raw_readl(imx_usb->base + USB_CTRL) & CTRL_CMDOVER);
+		temp = __raw_readl(imx_usb->base + USB_CTRL);
+		__raw_writel(temp & ~CTRL_CMDERROR, imx_usb->base + USB_CTRL);
+	}
+	else {
+		temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+		__raw_writel(temp | EPSTAT_STALL,
+			imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+
+		for (i = 0; i < 100; i ++) {
+			temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+	 		if (!temp & EPSTAT_STALL)
+	 			break;
+	 		udelay(20);
+	 	}
+		if (i == 50)
+			D_ERR(imx_usb->dev, "<%s> Non finished stall on %s\n",
+				__func__, imx_ep->ep.name);
+	}
+}
+
+static int imx_udc_get_frame(struct usb_gadget *_gadget)
+{
+	struct imx_udc_struct *imx_usb = container_of(_gadget,
+			struct imx_udc_struct, gadget);
+
+	return __raw_readl(imx_usb->base + USB_FRAME) & 0x7FF;
+}
+
+static int imx_udc_wakeup(struct usb_gadget *_gadget)
+{
+	return 0;
+}
+
+/*******************************************************************************
+ * USB request control functions
+ *******************************************************************************
+ */
+
+static void ep_add_request(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+	if (unlikely(!req))
+		return;
+
+	req->in_use = 1;
+	list_add_tail(&req->queue, &imx_ep->queue);
+}
+
+static void ep_del_request(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+	if (unlikely(!req))
+		return;
+
+	list_del_init(&req->queue);
+	req->in_use = 0;
+}
+
+static void done(struct imx_ep_struct *imx_ep, struct imx_request *req, int status)
+{
+	ep_del_request(imx_ep, req);
+
+	if (likely(req->req.status == -EINPROGRESS))
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	if (status && status != -ESHUTDOWN)
+		D_ERR(imx_ep->imx_usb->dev,
+			"<%s> complete %s req %p stat %d len %u/%u\n", __func__,
+			imx_ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	req->req.complete(&imx_ep->ep, &req->req);
+}
+
+static void nuke(struct imx_ep_struct *imx_ep, int status)
+{
+	struct imx_request *req;
+
+	while (!list_empty(&imx_ep->queue)) {
+		req = list_entry(imx_ep->queue.next, struct imx_request, queue);
+		done(imx_ep, req, status);
+	}
+}
+
+/*******************************************************************************
+ * Data tansfer over USB functions
+ *******************************************************************************
+ */
+static int read_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+	u8	*buf;
+	int	bytes_ep, bufferspace, count, i;
+
+	bytes_ep = imx_fifo_bcount(imx_ep);
+	bufferspace = req->req.length - req->req.actual;
+
+	buf = req->req.buf + req->req.actual;
+	prefetchw(buf);
+
+	if (unlikely(imx_ep_empty(imx_ep)))
+		count = 0;	/* zlp */
+	else
+		count = min(bytes_ep, bufferspace);
+
+	for (i = count; i > 0; i--)
+		*buf++ = __raw_readb(imx_ep->imx_usb->base
+						+ USB_EP_FDAT0(EP_NO(imx_ep)));
+	req->req.actual += count;
+
+	return count;
+}
+
+static int write_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+	u8	*buf;
+	int	length, count, temp;
+
+	buf = req->req.buf + req->req.actual;
+	prefetch(buf);
+
+	length = min(req->req.length - req->req.actual, (u32)imx_ep->fifosize);
+
+	if (imx_fifo_bcount(imx_ep) + length > imx_ep->fifosize) {
+		D_TRX(imx_ep->imx_usb->dev, "<%s> packet overfill %s fifo\n",
+			__func__, imx_ep->ep.name);
+		return -1;
+	}
+
+	req->req.actual += length;
+	count = length;
+
+	if (!count && req->req.zero) {	/* zlp */
+		temp = __raw_readl(imx_ep->imx_usb->base
+			+ USB_EP_STAT(EP_NO(imx_ep)));
+		__raw_writel(temp | EPSTAT_ZLPS, imx_ep->imx_usb->base
+			+ USB_EP_STAT(EP_NO(imx_ep)));
+		D_TRX(imx_ep->imx_usb->dev, "<%s> zero packet\n", __func__);
+		return 0;
+	}
+
+	while (count--) {
+		if (count == 0) {	/* last byte */
+			temp = __raw_readl(imx_ep->imx_usb->base
+				+ USB_EP_FCTRL(EP_NO(imx_ep)));
+			__raw_writel(temp | FCTRL_WFR, imx_ep->imx_usb->base
+				+ USB_EP_FCTRL(EP_NO(imx_ep)));
+		}
+		__raw_writeb(*buf++,
+			imx_ep->imx_usb->base + USB_EP_FDAT0(EP_NO(imx_ep)));
+	}
+
+	return length;
+}
+
+static int read_fifo(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+	int 	bytes = 0,
+		count,
+		completed = 0;
+
+	while (__raw_readl(imx_ep->imx_usb->base + USB_EP_FSTAT(EP_NO(imx_ep)))
+		& FSTAT_FR) {
+			count = read_packet(imx_ep, req);
+			bytes += count;
+
+			completed = (count != imx_ep->fifosize);
+			if (completed || req->req.actual == req->req.length) {
+				completed = 1;
+				break;
+			}
+	}
+
+	if (completed || !req->req.length) {
+		done(imx_ep, req, 0);
+		D_REQ(imx_ep->imx_usb->dev, "<%s> %s req<%p> %s\n",
+			__func__, imx_ep->ep.name, req,
+			completed ? "completed" : "not completed");
+		if (!EP_NO(imx_ep))
+			ep0_chg_stat(__func__, imx_ep->imx_usb, EP0_IDLE);
+	}
+
+	D_TRX(imx_ep->imx_usb->dev, "<%s> bytes read: %d\n", __func__, bytes);
+
+	return completed;
+}
+
+static int write_fifo(struct imx_ep_struct *imx_ep, struct imx_request *req)
+{
+	int	bytes = 0,
+		count,
+		completed = 0;
+
+	while (!completed) {
+		count = write_packet(imx_ep, req);
+		if (count < 0)
+			break; /* busy */
+		bytes += count;
+
+		/* last packet "must be" short (or a zlp) */
+		completed = (count != imx_ep->fifosize);
+
+		if (unlikely(completed)) {
+			done(imx_ep, req, 0);
+			D_REQ(imx_ep->imx_usb->dev, "<%s> %s req<%p> %s\n",
+				__func__, imx_ep->ep.name, req,
+				completed ? "completed" : "not completed");
+			if (!EP_NO(imx_ep))
+				ep0_chg_stat(__func__, imx_ep->imx_usb, EP0_IDLE);
+		}
+	}
+
+	D_TRX(imx_ep->imx_usb->dev, "<%s> bytes sent: %d\n", __func__, bytes);
+
+	return completed;
+}
+
+/*******************************************************************************
+ * Endpoint handlers
+ *******************************************************************************
+ */
+static int handle_ep(struct imx_ep_struct *imx_ep)
+{
+	struct imx_request *req;
+	int completed = 0;
+
+	do {
+		if (!list_empty(&imx_ep->queue))
+			req = list_entry(imx_ep->queue.next,
+				struct imx_request, queue);
+		else {
+			D_REQ(imx_ep->imx_usb->dev, "<%s> no request on %s\n",
+				__func__, imx_ep->ep.name);
+			return 0;
+		}
+
+		if (EP_DIR(imx_ep))	/* to host */
+			completed = write_fifo(imx_ep, req);
+		else			/* to device */
+			completed = read_fifo(imx_ep, req);
+
+		dump_ep_stat(__func__, imx_ep);
+
+	} while (completed);
+
+	return 0;
+}
+
+static int handle_ep0(struct imx_ep_struct *imx_ep)
+{
+	struct imx_request *req = NULL;
+	int ret = 0;
+
+	if (!list_empty(&imx_ep->queue))
+		req = list_entry(imx_ep->queue.next, struct imx_request, queue);
+
+	if (req) {
+		switch (imx_ep->imx_usb->ep0state) {
+
+		case EP0_IN_DATA_PHASE:			/* GET_DESCRIPTOR */
+			write_fifo(imx_ep, req);
+			break;
+		case EP0_OUT_DATA_PHASE:		/* SET_DESCRIPTOR */
+			read_fifo(imx_ep, req);
+			break;
+		default:
+			D_EP0(imx_ep->imx_usb->dev,
+				"<%s> ep0 i/o, odd state %d\n",
+				__func__, imx_ep->imx_usb->ep0state);
+			ep_del_request(imx_ep, req);
+			ret = -EL2HLT;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void handle_ep0_devreq(struct imx_udc_struct *imx_usb)
+{
+	struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[0];
+	union {
+		struct usb_ctrlrequest	r;
+		u8			raw[8];
+		u32			word[2];
+	} u;
+	int temp, i;
+
+	nuke(imx_ep, -EPROTO);
+
+	/* read SETUP packet */
+	for (i = 0; i < 2; i++) {
+		if (imx_ep_empty(imx_ep)) {
+			D_ERR(imx_usb->dev,
+				"<%s> no setup packet received\n", __func__);
+			goto stall;
+		}
+		u.word[i] = __raw_readl(imx_usb->base +	USB_EP_FDAT(EP_NO(imx_ep)));
+	}
+
+	temp = imx_ep_empty(imx_ep);
+	while (!imx_ep_empty(imx_ep)) {
+		i = __raw_readl(imx_usb->base +	USB_EP_FDAT(EP_NO(imx_ep)));
+		D_ERR(imx_usb->dev,
+			"<%s> wrong to have extra bytes for setup : 0x%08x\n",
+			__func__, i);
+	}
+	if (!temp)
+		goto stall;
+
+	le16_to_cpus(&u.r.wValue);
+	le16_to_cpus(&u.r.wIndex);
+	le16_to_cpus(&u.r.wLength);
+
+	D_REQ(imx_usb->dev, "<%s> SETUP %02x.%02x v%04x i%04x l%04x\n",
+		__func__, u.r.bRequestType, u.r.bRequest,
+		u.r.wValue, u.r.wIndex, u.r.wLength);
+
+	if (imx_usb->set_config) {
+		/* NACK the host by using CMDOVER */
+		temp = __raw_readl(imx_usb->base + USB_CTRL);
+		__raw_writel(temp | CTRL_CMDOVER, imx_usb->base + USB_CTRL);
+
+		D_ERR(imx_usb->dev,
+			"<%s> set config req is pending, NACK the host\n",
+			__func__);
+		return;
+	}
+
+	if (u.r.bRequestType & USB_DIR_IN)
+		ep0_chg_stat(__func__, imx_usb, EP0_IN_DATA_PHASE);
+	else
+		ep0_chg_stat(__func__, imx_usb, EP0_OUT_DATA_PHASE);
+
+	i = imx_usb->driver->setup(&imx_usb->gadget, &u.r);
+	if (i < 0) {
+		D_ERR(imx_usb->dev, "<%s> device setup error %d\n",
+			__func__, i);
+		goto stall;
+	}
+
+	return;
+stall:
+	D_ERR(imx_usb->dev, "<%s> protocol STALL\n", __func__);
+	imx_ep_stall(imx_ep);
+	ep0_chg_stat(__func__, imx_usb, EP0_STALL);
+	return;
+}
+
+/*******************************************************************************
+ * USB gadget callback functions
+ *******************************************************************************
+ */
+
+static int imx_ep_enable(struct usb_ep *usb_ep,
+				const struct usb_endpoint_descriptor *desc)
+{
+	struct imx_ep_struct *imx_ep = container_of(usb_ep,
+						struct imx_ep_struct, ep);
+	struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
+	unsigned long flags;
+
+	if (!usb_ep
+		|| !desc
+		|| !EP_NO(imx_ep)
+		|| desc->bDescriptorType != USB_DT_ENDPOINT
+		|| imx_ep->bEndpointAddress != desc->bEndpointAddress) {
+			D_ERR(imx_usb->dev,
+				"<%s> bad ep or descriptor\n", __func__);
+			return -EINVAL;
+	}
+
+	if (imx_ep->bmAttributes != desc->bmAttributes) {
+		D_ERR(imx_usb->dev,
+			"<%s> %s type mismatch\n", __func__, usb_ep->name);
+		return -EINVAL;
+	}
+
+	if (imx_ep->fifosize < le16_to_cpu(desc->wMaxPacketSize)) {
+		D_ERR(imx_usb->dev,
+			"<%s> bad %s maxpacket\n", __func__, usb_ep->name);
+		return -ERANGE;
+	}
+
+	if (!imx_usb->driver || imx_usb->gadget.speed == USB_SPEED_UNKNOWN) {
+		D_ERR(imx_usb->dev, "<%s> bogus device state\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	local_irq_save(flags);
+
+	imx_ep->stopped = 0;
+	imx_flush(imx_ep);
+	imx_ep_irq_enable(imx_ep);
+
+	local_irq_restore(flags);
+
+	D_EPX(imx_usb->dev, "<%s> ENABLED %s\n", __func__, usb_ep->name);
+	return 0;
+}
+
+static int imx_ep_disable(struct usb_ep *usb_ep)
+{
+	struct imx_ep_struct *imx_ep = container_of(usb_ep,
+						struct imx_ep_struct, ep);
+	unsigned long flags;
+
+	if (!usb_ep || !EP_NO(imx_ep) || !list_empty(&imx_ep->queue)) {
+		D_ERR(imx_ep->imx_usb->dev, "<%s> %s can not be disabled\n",
+			__func__, usb_ep ? imx_ep->ep.name : NULL);
+		return -EINVAL;
+	}
+
+	local_irq_save(flags);
+
+	imx_ep->stopped = 1;
+	nuke(imx_ep, -ESHUTDOWN);
+	imx_flush(imx_ep);
+	imx_ep_irq_disable(imx_ep);
+
+	local_irq_restore(flags);
+
+	D_EPX(imx_ep->imx_usb->dev,
+		"<%s> DISABLED %s\n", __func__, usb_ep->name);
+	return 0;
+}
+
+static struct usb_request *imx_ep_alloc_request
+					(struct usb_ep *usb_ep, gfp_t gfp_flags)
+{
+	struct imx_request *req;
+
+	req = kzalloc(sizeof *req, gfp_flags);
+	if (!req || !usb_ep)
+		return 0;
+
+	INIT_LIST_HEAD(&req->queue);
+	req->in_use = 0;
+
+	return &req->req;
+}
+
+static void imx_ep_free_request
+			(struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+	struct imx_request *req;
+
+	req = container_of(usb_req, struct imx_request, req);
+	WARN_ON(!list_empty(&req->queue));
+	kfree(req);
+}
+
+static int imx_ep_queue
+	(struct usb_ep *usb_ep, struct usb_request *usb_req, gfp_t gfp_flags)
+{
+	struct imx_ep_struct	*imx_ep;
+	struct imx_udc_struct	*imx_usb;
+	struct imx_request	*req;
+	unsigned long		flags;
+	int			ret = 0;
+
+	imx_ep = container_of(usb_ep, struct imx_ep_struct, ep);
+	imx_usb = imx_ep->imx_usb;
+	req = container_of(usb_req, struct imx_request, req);
+
+	/*
+	  Special care on IMX udc.
+	  Ignore enqueue when after set configuration from the
+	  host. This assume all gadget drivers reply set
+	  configuration with the next ep0 req enqueue.
+	*/
+	if (imx_usb->set_config && !EP_NO(imx_ep)) {
+		imx_usb->set_config = 0;
+		D_EPX(imx_usb->dev,
+			"<%s> gadget reply set config\n", __func__);
+		return 0;
+	}
+
+	if (unlikely(!usb_req || !req || !usb_req->complete || !usb_req->buf)) {
+		D_ERR(imx_usb->dev, "<%s> bad params\n", __func__);
+		return -EINVAL;
+	}
+
+	if (unlikely(!usb_ep || !imx_ep)) {
+		D_ERR(imx_usb->dev, "<%s> bad ep\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!imx_usb->driver || imx_usb->gadget.speed == USB_SPEED_UNKNOWN) {
+		D_ERR(imx_usb->dev, "<%s> bogus device state\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	local_irq_save(flags);
+
+	/* Debug */
+	D_REQ(imx_usb->dev, "<%s> ep%d %s request for [%d] bytes\n",
+		__func__, EP_NO(imx_ep),
+		((!EP_NO(imx_ep) && imx_ep->imx_usb->ep0state == EP0_IN_DATA_PHASE)
+		|| (EP_NO(imx_ep) && EP_DIR(imx_ep))) ? "IN" : "OUT", usb_req->length);
+	dump_req(__func__, imx_ep, usb_req);
+
+	if (imx_ep->stopped) {
+		usb_req->status = -ESHUTDOWN;
+		ret = -ESHUTDOWN;
+		goto out;
+	}
+
+	if (req->in_use) {
+		D_ERR(imx_usb->dev,
+			"<%s> refusing to queue req %p (already queued)\n",
+			__func__, req);
+		goto out;
+	}
+
+	usb_req->status = -EINPROGRESS;
+	usb_req->actual = 0;
+
+	ep_add_request(imx_ep, req);
+
+	if (!EP_NO(imx_ep))
+		ret = handle_ep0(imx_ep);
+	else
+		ret = handle_ep(imx_ep);
+out:
+	local_irq_restore(flags);
+	return ret;
+}
+
+static int imx_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+
+	struct imx_ep_struct *imx_ep = container_of
+					(usb_ep, struct imx_ep_struct, ep);
+	struct imx_request *req;
+	unsigned long flags;
+
+	if (unlikely(!usb_ep || !EP_NO(imx_ep))) {
+		D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+		return -EINVAL;
+	}
+
+	local_irq_save(flags);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &imx_ep->queue, queue) {
+		if (&req->req == usb_req)
+			break;
+	}
+	if (&req->req != usb_req) {
+		local_irq_restore(flags);
+		return -EINVAL;
+	}
+
+	done(imx_ep, req, -ECONNRESET);
+
+	local_irq_restore(flags);
+	return 0;
+}
+
+static int imx_ep_set_halt(struct usb_ep *usb_ep, int value)
+{
+	struct imx_ep_struct *imx_ep = container_of
+					(usb_ep, struct imx_ep_struct, ep);
+	unsigned long flags;
+
+	if (unlikely(!usb_ep || !EP_NO(imx_ep))) {
+		D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+		return -EINVAL;
+	}
+
+	local_irq_save(flags);
+
+	if ((imx_ep->bEndpointAddress & USB_DIR_IN)
+		&& !list_empty(&imx_ep->queue)) {
+			local_irq_restore(flags);
+			return -EAGAIN;
+	}
+
+	imx_ep_stall(imx_ep);
+
+	local_irq_restore(flags);
+
+	D_EPX(imx_ep->imx_usb->dev, "<%s> %s halt\n", __func__, usb_ep->name);
+	return 0;
+}
+
+static int imx_ep_fifo_status(struct usb_ep *usb_ep)
+{
+	struct imx_ep_struct *imx_ep = container_of
+					(usb_ep, struct imx_ep_struct, ep);
+
+	if (!usb_ep) {
+		D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+		return -ENODEV;
+	}
+
+	if (imx_ep->imx_usb->gadget.speed == USB_SPEED_UNKNOWN)
+		return 0;
+	else
+		return imx_fifo_bcount(imx_ep);
+}
+
+static void imx_ep_fifo_flush(struct usb_ep *usb_ep)
+{
+	struct imx_ep_struct *imx_ep = container_of
+					(usb_ep, struct imx_ep_struct, ep);
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	if (!usb_ep || !EP_NO(imx_ep) || !list_empty(&imx_ep->queue)) {
+		D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
+		local_irq_restore(flags);
+		return;
+	}
+
+	/* toggle and halt bits stay unchanged */
+	imx_flush(imx_ep);
+
+	local_irq_restore(flags);
+}
+
+static struct usb_ep_ops imx_ep_ops = {
+	.enable		= imx_ep_enable,
+	.disable	= imx_ep_disable,
+
+	.alloc_request	= imx_ep_alloc_request,
+	.free_request	= imx_ep_free_request,
+
+	.queue		= imx_ep_queue,
+	.dequeue	= imx_ep_dequeue,
+
+	.set_halt	= imx_ep_set_halt,
+	.fifo_status	= imx_ep_fifo_status,
+	.fifo_flush	= imx_ep_fifo_flush,
+};
+
+/*******************************************************************************
+ * USB endpoint control functions
+ *******************************************************************************
+ */
+
+void ep0_chg_stat(const char *label,
+			struct imx_udc_struct *imx_usb, enum ep0_state stat)
+{
+	D_EP0(imx_usb->dev, "<%s> from %15s to %15s\n",
+		label, state_name[imx_usb->ep0state], state_name[stat]);
+
+	if (imx_usb->ep0state == stat)
+		return;
+
+	imx_usb->ep0state = stat;
+}
+
+static void usb_init_data(struct imx_udc_struct *imx_usb)
+{
+	struct imx_ep_struct *imx_ep;
+	u8 i;
+
+	/* device/ep0 records init */
+	INIT_LIST_HEAD(&imx_usb->gadget.ep_list);
+	INIT_LIST_HEAD(&imx_usb->gadget.ep0->ep_list);
+	ep0_chg_stat(__func__, imx_usb, EP0_IDLE);
+
+	/* basic endpoint records init */
+	for (i = 0; i < IMX_USB_NB_EP; i++) {
+		imx_ep = &imx_usb->imx_ep[i];
+
+		if (i) {
+			list_add_tail(&imx_ep->ep.ep_list,
+				&imx_usb->gadget.ep_list);
+			imx_ep->stopped = 1;
+		} else
+			imx_ep->stopped = 0;
+
+		INIT_LIST_HEAD(&imx_ep->queue);
+	}
+}
+
+static void udc_stop_activity(struct imx_udc_struct *imx_usb,
+					struct usb_gadget_driver *driver)
+{
+	struct imx_ep_struct *imx_ep;
+	int i;
+
+	if (imx_usb->gadget.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+
+	/* prevent new request submissions, kill any outstanding requests  */
+	for (i = 1; i < IMX_USB_NB_EP; i++) {
+		imx_ep = &imx_usb->imx_ep[i];
+		imx_flush(imx_ep);
+		imx_ep->stopped = 1;
+		imx_ep_irq_disable(imx_ep);
+		nuke(imx_ep, -ESHUTDOWN);
+	}
+
+	imx_usb->cfg = 0;
+	imx_usb->intf = 0;
+	imx_usb->alt = 0;
+
+	if (driver)
+		driver->disconnect(&imx_usb->gadget);
+}
+
+/*******************************************************************************
+ * Interrupt handlers
+ *******************************************************************************
+ */
+
+static irqreturn_t imx_udc_irq(int irq, void *dev)
+{
+	struct imx_udc_struct *imx_usb = dev;
+	struct usb_ctrlrequest u;
+	int temp, cfg, intf, alt;
+	int intr = __raw_readl(imx_usb->base + USB_INTR);
+
+	if (intr & (INTR_WAKEUP | INTR_SUSPEND | INTR_RESUME | INTR_RESET_START
+			| INTR_RESET_STOP | INTR_CFG_CHG)) {
+				dump_intr(__func__, intr, imx_usb->dev);
+				dump_usb_stat(__func__, imx_usb);
+	}
+
+	if (!imx_usb->driver) {
+		/*imx_udc_disable(imx_usb);*/
+		goto end_irq;
+	}
+
+	if (intr & INTR_WAKEUP) {
+		if (imx_usb->gadget.speed == USB_SPEED_UNKNOWN
+			&& imx_usb->driver && imx_usb->driver->resume)
+				imx_usb->driver->resume(&imx_usb->gadget);
+		imx_usb->set_config = 0;
+		imx_usb->gadget.speed = USB_SPEED_FULL;
+	}
+
+	if (intr & INTR_SUSPEND) {
+		if (imx_usb->gadget.speed != USB_SPEED_UNKNOWN
+			&& imx_usb->driver && imx_usb->driver->suspend)
+				imx_usb->driver->suspend(&imx_usb->gadget);
+		imx_usb->set_config = 0;
+		imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
+	}
+
+	if (intr & INTR_RESET_START) {
+		__raw_writel(intr, imx_usb->base + USB_INTR);
+		udc_stop_activity(imx_usb, imx_usb->driver);
+		imx_usb->set_config = 0;
+		imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
+	}
+
+	if (intr & INTR_RESET_STOP)
+		imx_usb->gadget.speed = USB_SPEED_FULL;
+
+	if (intr & INTR_CFG_CHG) {
+		__raw_writel(INTR_CFG_CHG, imx_usb->base + USB_INTR);
+		temp = __raw_readl(imx_usb->base + USB_STAT);
+		cfg  = (temp & STAT_CFG) >> 5;
+		intf = (temp & STAT_INTF) >> 3;
+		alt  =  temp & STAT_ALTSET;
+
+		D_REQ(imx_usb->dev,
+			"<%s> orig config C=%d, I=%d, A=%d / "
+			"req config C=%d, I=%d, A=%d\n",
+			__func__, imx_usb->cfg, imx_usb->intf, imx_usb->alt,
+			cfg, intf, alt);
+
+		if (cfg != 1 && cfg != 2)
+			goto end_irq;
+
+		imx_usb->set_config = 0;
+
+		/* Config setup */
+		if (imx_usb->cfg != cfg) {
+			D_REQ(imx_usb->dev, "<%s> Change config start\n",__func__);
+			u.bRequest = USB_REQ_SET_CONFIGURATION;
+			u.bRequestType = USB_DIR_OUT |
+					USB_TYPE_STANDARD |
+					USB_RECIP_DEVICE;
+			u.wValue = cfg;
+			u.wIndex = 0;
+			u.wLength = 0;
+			imx_usb->cfg = cfg;
+			imx_usb->set_config = 1;
+			imx_usb->driver->setup(&imx_usb->gadget, &u);
+			imx_usb->set_config = 0;
+			D_REQ(imx_usb->dev, "<%s> Change config done\n",__func__);
+
+		}
+		if (imx_usb->intf != intf || imx_usb->alt != alt) {
+			D_REQ(imx_usb->dev, "<%s> Change interface start\n",__func__);
+			u.bRequest = USB_REQ_SET_INTERFACE;
+			u.bRequestType = USB_DIR_OUT |
+					  USB_TYPE_STANDARD |
+					  USB_RECIP_INTERFACE;
+			u.wValue = alt;
+			u.wIndex = intf;
+			u.wLength = 0;
+			imx_usb->intf = intf;
+			imx_usb->alt = alt;
+			imx_usb->set_config = 1;
+			imx_usb->driver->setup(&imx_usb->gadget, &u);
+			imx_usb->set_config = 0;
+			D_REQ(imx_usb->dev, "<%s> Change interface done\n",__func__);
+		}
+	}
+
+	if (intr & INTR_SOF) {
+		if (imx_usb->ep0state == EP0_IDLE) {
+			temp = __raw_readl(imx_usb->base + USB_CTRL);
+			__raw_writel(temp | CTRL_CMDOVER, imx_usb->base + USB_CTRL);
+		}
+	}
+
+end_irq:
+	__raw_writel(intr, imx_usb->base + USB_INTR);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t imx_udc_ctrl_irq(int irq, void *dev)
+{
+	struct imx_udc_struct *imx_usb = dev;
+	int intr = __raw_readl(imx_usb->base + USB_EP_INTR(0));
+
+	dump_ep_intr(__func__, 0, intr, imx_usb->dev);
+
+	if (!imx_usb->driver) {
+		__raw_writel(intr, imx_usb->base + USB_EP_INTR(0));
+		return IRQ_HANDLED;
+	}
+
+	/* DEVREQ IRQ has highest priority */
+	if (intr & (EPINTR_DEVREQ | EPINTR_MDEVREQ))
+		handle_ep0_devreq(imx_usb);
+	/* Seem i.MX is missing EOF interrupt sometimes.
+	 * Therefore we monitor both EOF and FIFO_EMPTY interrups
+	 * when transmiting, and both EOF and FIFO_FULL when
+	 * receiving data.
+	 */
+	else if (intr & (EPINTR_EOF | EPINTR_FIFO_EMPTY | EPINTR_FIFO_FULL))
+		handle_ep0(&imx_usb->imx_ep[0]);
+
+	__raw_writel(intr, imx_usb->base + USB_EP_INTR(0));
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t imx_udc_bulk_irq(int irq, void *dev)
+{
+	struct imx_udc_struct *imx_usb = dev;
+	struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[irq - USBD_INT0];
+	int intr = __raw_readl(imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
+
+	dump_ep_intr(__func__, irq - USBD_INT0, intr, imx_usb->dev);
+
+	if (!imx_usb->driver) {
+		__raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
+		return IRQ_HANDLED;
+	}
+
+	handle_ep(imx_ep);
+
+	__raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
+
+	return IRQ_HANDLED;
+}
+
+irq_handler_t intr_handler(int i)
+{
+	switch (i) {
+	case 0:
+		return imx_udc_ctrl_irq;
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		return imx_udc_bulk_irq;
+	default:
+		return imx_udc_irq;
+	}
+}
+
+/*******************************************************************************
+ * Static defined IMX UDC structure
+ *******************************************************************************
+ */
+
+static const struct usb_gadget_ops imx_udc_ops = {
+	.get_frame	 = imx_udc_get_frame,
+	.wakeup		 = imx_udc_wakeup,
+};
+
+static struct imx_udc_struct controller = {
+	.gadget = {
+		.ops		= &imx_udc_ops,
+		.ep0		= &controller.imx_ep[0].ep,
+		.name		= driver_name,
+		.dev = {
+			 .bus_id	= "gadget",
+		 },
+	},
+
+	.imx_ep[0] = {
+		.ep = {
+			.name		= ep0name,
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 32,
+		},
+		.imx_usb		= &controller,
+		.fifosize		= 32,
+		.bEndpointAddress	= 0,
+		.bmAttributes		= USB_ENDPOINT_XFER_CONTROL,
+	 },
+	.imx_ep[1] = {
+		.ep = {
+			.name		= "ep1in-bulk",
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 64,
+		},
+		.imx_usb		= &controller,
+		.fifosize		= 64,
+		.bEndpointAddress	= USB_DIR_IN | 1,
+		.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+	 },
+	.imx_ep[2] = {
+		.ep = {
+			.name		= "ep2out-bulk",
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 64,
+		},
+		.imx_usb		= &controller,
+		.fifosize		= 64,
+		.bEndpointAddress	= USB_DIR_OUT | 2,
+		.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+	 },
+	.imx_ep[3] = {
+		.ep = {
+			.name		= "ep3out-bulk",
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 32,
+		},
+		.imx_usb		= &controller,
+		.fifosize		= 32,
+		.bEndpointAddress 	= USB_DIR_OUT | 3,
+		.bmAttributes		= USB_ENDPOINT_XFER_BULK,
+	 },
+	.imx_ep[4] = {
+		.ep = {
+			.name		= "ep4in-int",
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 32,
+		 },
+		.imx_usb		= &controller,
+		.fifosize		= 32,
+		.bEndpointAddress 	= USB_DIR_IN | 4,
+		.bmAttributes		= USB_ENDPOINT_XFER_INT,
+	 },
+	.imx_ep[5] = {
+		.ep = {
+			.name		= "ep5out-int",
+			.ops		= &imx_ep_ops,
+			.maxpacket	= 32,
+		},
+		.imx_usb		= &controller,
+		.fifosize		= 32,
+		.bEndpointAddress 	= USB_DIR_OUT | 5,
+		.bmAttributes		= USB_ENDPOINT_XFER_INT,
+	 },
+};
+
+/*******************************************************************************
+ * USB gadged driver functions
+ *******************************************************************************
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+	struct imx_udc_struct *imx_usb = &controller;
+	int retval;
+
+	if (!driver
+		|| driver->speed < USB_SPEED_FULL
+		|| !driver->bind
+		|| !driver->disconnect
+		|| !driver->setup)
+			return -EINVAL;
+	if (!imx_usb)
+		return -ENODEV;
+	if (imx_usb->driver)
+		return -EBUSY;
+
+	/* first hook up the driver ... */
+	imx_usb->driver = driver;
+	imx_usb->gadget.dev.driver = &driver->driver;
+
+	retval = device_add(&imx_usb->gadget.dev);
+	if (retval)
+		goto fail;
+	retval = driver->bind(&imx_usb->gadget);
+	if (retval) {
+		D_ERR(imx_usb->dev, "<%s> bind to driver %s --> error %d\n",
+			__func__, driver->driver.name, retval);
+		device_del(&imx_usb->gadget.dev);
+
+		goto fail;
+	}
+
+	D_INI(imx_usb->dev, "<%s> registered gadget driver '%s'\n",
+		__func__, driver->driver.name);
+
+	imx_udc_enable(imx_usb);
+
+	return 0;
+fail:
+	imx_usb->driver = NULL;
+	imx_usb->gadget.dev.driver = NULL;
+	return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct imx_udc_struct *imx_usb = &controller;
+
+	if (!imx_usb)
+		return -ENODEV;
+	if (!driver || driver != imx_usb->driver || !driver->unbind)
+		return -EINVAL;
+
+	udc_stop_activity(imx_usb, driver);
+	imx_udc_disable(imx_usb);
+
+	driver->unbind(&imx_usb->gadget);
+	imx_usb->gadget.dev.driver = NULL;
+	imx_usb->driver = NULL;
+
+	device_del(&imx_usb->gadget.dev);
+
+	D_INI(imx_usb->dev, "<%s> unregistered gadget driver '%s'\n",
+		__func__, driver->driver.name);
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+/*******************************************************************************
+ * Module functions
+ *******************************************************************************
+ */
+
+static int __init imx_udc_probe(struct platform_device *pdev)
+{
+	struct imx_udc_struct *imx_usb = &controller;
+	struct resource *res;
+	struct imxusb_platform_data *pdata;
+	struct clk *clk;
+	void __iomem *base;
+	int ret = 0;
+	int i, res_size;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "can't get device resources\n");
+		return -ENODEV;
+	}
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "driver needs platform data\n");
+		return -ENODEV;
+	}
+
+	res_size = res->end - res->start + 1;
+	if (!request_mem_region(res->start, res_size, res->name)) {
+		dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n",
+			res_size, res->start);
+		return -ENOMEM;
+	}
+
+	if (pdata->init) {
+		ret = pdata->init(&pdev->dev);
+		if (ret)
+			goto fail0;
+	}
+
+	base = ioremap(res->start, res_size);
+	if (!base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		ret = -EIO;
+		goto fail1;
+	}
+
+	clk = clk_get(NULL, "usbd_clk");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev, "can't get USB clock\n");
+		goto fail2;
+	}
+	clk_enable(clk);
+
+	if (clk_get_rate(clk) != 48000000) {
+		D_INI(&pdev->dev,
+			"Bad USB clock (%d Hz), changing to 48000000 Hz\n",
+			(int)clk_get_rate(clk));
+		if (clk_set_rate(clk, 48000000)) {
+			dev_err(&pdev->dev,
+				"Unable to set correct USB clock (48MHz)\n");
+			ret = -EIO;
+			goto fail3;
+		}
+	}
+
+	for (i = 0; i < IMX_USB_NB_EP + 1; i++) {
+		imx_usb->usbd_int[i] = platform_get_irq(pdev, i);
+		if (imx_usb->usbd_int[i] < 0) {
+			dev_err(&pdev->dev, "can't get irq number\n");
+			ret = -ENODEV;
+			goto fail3;
+		}
+	}
+
+	for (i = 0; i < IMX_USB_NB_EP + 1; i++) {
+		ret = request_irq(imx_usb->usbd_int[i], intr_handler(i),
+				     IRQF_DISABLED, driver_name, imx_usb);
+		if (ret) {
+			dev_err(&pdev->dev, "can't get irq %i, err %d\n",
+				imx_usb->usbd_int[i], ret);
+			for (--i; i >= 0; i--)
+				free_irq(imx_usb->usbd_int[i], imx_usb);
+			goto fail3;
+		}
+	}
+
+	imx_usb->res = res;
+	imx_usb->base = base;
+	imx_usb->clk = clk;
+	imx_usb->dev = &pdev->dev;
+
+	device_initialize(&imx_usb->gadget.dev);
+
+	imx_usb->gadget.dev.parent = &pdev->dev;
+	imx_usb->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+	platform_set_drvdata(pdev, imx_usb);
+
+	usb_init_data(imx_usb);
+	imx_udc_init(imx_usb);
+
+	return 0;
+
+fail3:
+	clk_put(clk);
+	clk_disable(clk);
+fail2:
+	iounmap(base);
+fail1:
+	if (pdata->exit)
+		pdata->exit(&pdev->dev);
+fail0:
+	release_mem_region(res->start, res_size);
+	return ret;
+}
+
+static int __exit imx_udc_remove(struct platform_device *pdev)
+{
+	struct imx_udc_struct *imx_usb = platform_get_drvdata(pdev);
+	struct imxusb_platform_data *pdata = pdev->dev.platform_data;
+	int i;
+
+	imx_udc_disable(imx_usb);
+
+	for (i = 0; i < IMX_USB_NB_EP + 1; i++)
+		free_irq(imx_usb->usbd_int[i], imx_usb);
+
+	clk_put(imx_usb->clk);
+	clk_disable(imx_usb->clk);
+	iounmap(imx_usb->base);
+
+	release_mem_region(imx_usb->res->start,
+		imx_usb->res->end - imx_usb->res->start + 1);
+
+	if (pdata->exit)
+		pdata->exit(&pdev->dev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+
+#ifdef	CONFIG_PM
+#define	imx_udc_suspend	NULL
+#define	imx_udc_resume	NULL
+#else
+#define	imx_udc_suspend	NULL
+#define	imx_udc_resume	NULL
+#endif
+
+/*----------------------------------------------------------------------------*/
+
+static struct platform_driver udc_driver = {
+	.driver		= {
+		.name	= driver_name,
+		.owner	= THIS_MODULE,
+	},
+	.remove		= __exit_p(imx_udc_remove),
+	.suspend	= imx_udc_suspend,
+	.resume		= imx_udc_resume,
+};
+
+static int __init udc_init(void)
+{
+	return platform_driver_probe(&udc_driver, imx_udc_probe);
+}
+module_init(udc_init);
+
+static void __exit udc_exit(void)
+{
+	platform_driver_unregister(&udc_driver);
+}
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION("IMX USB Device Controller driver");
+MODULE_AUTHOR("Darius Augulis <augulis.darius@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx_udc");
diff --git a/drivers/usb/gadget/imx_udc.h b/drivers/usb/gadget/imx_udc.h
new file mode 100644
index 0000000..8500769
--- /dev/null
+++ b/drivers/usb/gadget/imx_udc.h
@@ -0,0 +1,344 @@
+/*
+ *	Copyright (C) 2005 Mike Lee(eemike@gmail.com)
+ *
+ *	This udc driver is now under testing and code is based on pxa2xx_udc.h
+ *	Please use it with your own risk!
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *	GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_USB_GADGET_IMX_H
+#define __LINUX_USB_GADGET_IMX_H
+
+#include <linux/types.h>
+
+/* Helper macros */
+#define EP_NO(ep)	((ep->bEndpointAddress) & ~USB_DIR_IN) /* IN:1, OUT:0 */
+#define EP_DIR(ep)	((ep->bEndpointAddress) & USB_DIR_IN ? 1 : 0)
+#define irq_to_ep(irq)	(((irq) >= USBD_INT0) || ((irq) <= USBD_INT6) ? ((irq) - USBD_INT0) : (USBD_INT6)) /*should not happen*/
+#define ep_to_irq(ep)	(EP_NO((ep)) + USBD_INT0)
+#define IMX_USB_NB_EP	6
+
+/* Driver structures */
+struct imx_request {
+	struct usb_request			req;
+	struct list_head			queue;
+	unsigned int				in_use;
+};
+
+enum ep0_state {
+	EP0_IDLE,
+	EP0_IN_DATA_PHASE,
+	EP0_OUT_DATA_PHASE,
+	EP0_CONFIG,
+	EP0_STALL,
+};
+
+struct imx_ep_struct {
+	struct usb_ep				ep;
+	struct imx_udc_struct			*imx_usb;
+	struct list_head			queue;
+	unsigned char				stopped;
+	unsigned char				fifosize;
+	unsigned char				bEndpointAddress;
+	unsigned char				bmAttributes;
+};
+
+struct imx_udc_struct {
+	struct usb_gadget			gadget;
+	struct usb_gadget_driver		*driver;
+	struct device				*dev;
+	struct imx_ep_struct			imx_ep[IMX_USB_NB_EP];
+	struct clk				*clk;
+	enum ep0_state				ep0state;
+	struct resource				*res;
+	void __iomem				*base;
+	unsigned char				set_config;
+	int					cfg,
+						intf,
+						alt,
+						usbd_int[7];
+};
+
+/* USB registers */
+#define  USB_FRAME		(0x00)	/* USB frame */
+#define  USB_SPEC		(0x04)	/* USB Spec */
+#define  USB_STAT		(0x08)	/* USB Status */
+#define  USB_CTRL		(0x0C)	/* USB Control */
+#define  USB_DADR		(0x10)	/* USB Desc RAM addr */
+#define  USB_DDAT		(0x14)	/* USB Desc RAM/EP buffer data */
+#define  USB_INTR		(0x18)	/* USB interrupt */
+#define  USB_MASK		(0x1C)	/* USB Mask */
+#define  USB_ENAB		(0x24)	/* USB Enable */
+#define  USB_EP_STAT(x)		(0x30 + (x*0x30)) /* USB status/control */
+#define  USB_EP_INTR(x)		(0x34 + (x*0x30)) /* USB interrupt */
+#define  USB_EP_MASK(x)		(0x38 + (x*0x30)) /* USB mask */
+#define  USB_EP_FDAT(x)		(0x3C + (x*0x30)) /* USB FIFO data */
+#define  USB_EP_FDAT0(x)	(0x3C + (x*0x30)) /* USB FIFO data */
+#define  USB_EP_FDAT1(x)	(0x3D + (x*0x30)) /* USB FIFO data */
+#define  USB_EP_FDAT2(x)	(0x3E + (x*0x30)) /* USB FIFO data */
+#define  USB_EP_FDAT3(x)	(0x3F + (x*0x30)) /* USB FIFO data */
+#define  USB_EP_FSTAT(x)	(0x40 + (x*0x30)) /* USB FIFO status */
+#define  USB_EP_FCTRL(x)	(0x44 + (x*0x30)) /* USB FIFO control */
+#define  USB_EP_LRFP(x)		(0x48 + (x*0x30)) /* USB last read frame pointer */
+#define  USB_EP_LWFP(x)		(0x4C + (x*0x30)) /* USB last write frame pointer */
+#define  USB_EP_FALRM(x)	(0x50 + (x*0x30)) /* USB FIFO alarm */
+#define  USB_EP_FRDP(x)		(0x54 + (x*0x30)) /* USB FIFO read pointer */
+#define  USB_EP_FWRP(x)		(0x58 + (x*0x30)) /* USB FIFO write pointer */
+/* USB Control Register Bit Fields.*/
+#define CTRL_CMDOVER		(1<<6)	/* UDC status */
+#define CTRL_CMDERROR		(1<<5)	/* UDC status */
+#define CTRL_FE_ENA		(1<<3)	/* Enable Font End logic */
+#define CTRL_UDC_RST		(1<<2)	/* UDC reset */
+#define CTRL_AFE_ENA		(1<<1)	/* Analog Font end enable */
+#define CTRL_RESUME		(1<<0)	/* UDC resume */
+/* USB Status Register Bit Fields.*/
+#define STAT_RST		(1<<8)
+#define STAT_SUSP		(1<<7)
+#define STAT_CFG		(3<<5)
+#define STAT_INTF		(3<<3)
+#define STAT_ALTSET		(7<<0)
+/* USB Interrupt Status/Mask Registers Bit fields */
+#define INTR_WAKEUP		(1<<31)	/* Wake up Interrupt */
+#define INTR_MSOF		(1<<7)	/* Missed Start of Frame */
+#define INTR_SOF		(1<<6)	/* Start of Frame */
+#define INTR_RESET_STOP		(1<<5)	/* Reset Signaling stop */
+#define INTR_RESET_START	(1<<4)	/* Reset Signaling start */
+#define INTR_RESUME		(1<<3)	/* Suspend to resume */
+#define INTR_SUSPEND		(1<<2)	/* Active to suspend */
+#define INTR_FRAME_MATCH	(1<<1)	/* Frame matched */
+#define INTR_CFG_CHG		(1<<0)	/* Configuration change occurred */
+/* USB Enable Register Bit Fields.*/
+#define ENAB_RST		(1<<31)	/* Reset USB modules */
+#define ENAB_ENAB		(1<<30)	/* Enable USB modules*/
+#define ENAB_SUSPEND		(1<<29)	/* Suspend USB modules */
+#define ENAB_ENDIAN		(1<<28)	/* Endian of USB modules */
+#define ENAB_PWRMD		(1<<0)	/* Power mode of USB modules */
+/* USB Descriptor Ram Address Register bit fields */
+#define DADR_CFG		(1<<31)	/* Configuration */
+#define DADR_BSY		(1<<30)	/* Busy status */
+#define DADR_DADR		(0x1FF)	/* Descriptor Ram Address */
+/* USB Descriptor RAM/Endpoint Buffer Data Register bit fields */
+#define DDAT_DDAT		(0xFF)	/* Descriptor Endpoint Buffer */
+/* USB Endpoint Status Register bit fields */
+#define EPSTAT_BCOUNT		(0x7F<<16)	/* Endpoint FIFO byte count */
+#define EPSTAT_SIP		(1<<8)	/* Endpoint setup in progress */
+#define EPSTAT_DIR		(1<<7)	/* Endpoint transfer direction */
+#define EPSTAT_MAX		(3<<5)	/* Endpoint Max packet size */
+#define EPSTAT_TYP		(3<<3)	/* Endpoint type */
+#define EPSTAT_ZLPS		(1<<2)	/* Send zero length packet */
+#define EPSTAT_FLUSH		(1<<1)	/* Endpoint FIFO Flush */
+#define EPSTAT_STALL		(1<<0)	/* Force stall */
+/* USB Endpoint FIFO Status Register bit fields */
+#define FSTAT_FRAME_STAT	(0xF<<24)	/* Frame status bit [0-3] */
+#define FSTAT_ERR		(1<<22)	/* FIFO error */
+#define FSTAT_UF		(1<<21)	/* FIFO underflow */
+#define FSTAT_OF		(1<<20)	/* FIFO overflow */
+#define FSTAT_FR		(1<<19)	/* FIFO frame ready */
+#define FSTAT_FULL		(1<<18)	/* FIFO full */
+#define FSTAT_ALRM		(1<<17)	/* FIFO alarm */
+#define FSTAT_EMPTY		(1<<16)	/* FIFO empty */
+/* USB Endpoint FIFO Control Register bit fields */
+#define FCTRL_WFR		(1<<29)	/* Write frame end */
+/* USB Endpoint Interrupt Status Regsiter bit fields */
+#define EPINTR_FIFO_FULL	(1<<8)	/* fifo full */
+#define EPINTR_FIFO_EMPTY	(1<<7)	/* fifo empty */
+#define EPINTR_FIFO_ERROR	(1<<6)	/* fifo error */
+#define EPINTR_FIFO_HIGH	(1<<5)	/* fifo high */
+#define EPINTR_FIFO_LOW		(1<<4)	/* fifo low */
+#define EPINTR_MDEVREQ		(1<<3)	/* multi Device request */
+#define EPINTR_EOT		(1<<2)	/* fifo end of transfer */
+#define EPINTR_DEVREQ		(1<<1)	/* Device request */
+#define EPINTR_EOF		(1<<0)	/* fifo end of frame */
+
+/* Debug macros */
+#ifdef DEBUG
+
+/* #define DEBUG_REQ */
+/* #define DEBUG_TRX */
+/* #define DEBUG_INIT */
+/* #define DEBUG_EP0 */
+/* #define DEBUG_EPX */
+/* #define DEBUG_IRQ */
+/* #define DEBUG_EPIRQ */
+/* #define DEBUG_DUMP */
+#define DEBUG_ERR
+
+#ifdef DEBUG_REQ
+	#define D_REQ(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_REQ(dev, args...)	do {} while (0)
+#endif /* DEBUG_REQ */
+
+#ifdef DEBUG_TRX
+	#define D_TRX(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_TRX(dev, args...)	do {} while (0)
+#endif /* DEBUG_TRX */
+
+#ifdef DEBUG_INIT
+	#define D_INI(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_INI(dev, args...)	do {} while (0)
+#endif /* DEBUG_INIT */
+
+#ifdef DEBUG_EP0
+	static const char *state_name[] = {
+		"EP0_IDLE",
+		"EP0_IN_DATA_PHASE",
+		"EP0_OUT_DATA_PHASE",
+		"EP0_CONFIG",
+		"EP0_STALL"
+	};
+	#define D_EP0(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_EP0(dev, args...)	do {} while (0)
+#endif /* DEBUG_EP0 */
+
+#ifdef DEBUG_EPX
+	#define D_EPX(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_EPX(dev, args...)	do {} while (0)
+#endif /* DEBUG_EP0 */
+
+#ifdef DEBUG_IRQ
+	static void dump_intr(const char *label, int irqreg, struct device *dev)
+	{
+		dev_dbg(dev, "<%s> USB_INTR=[%s%s%s%s%s%s%s%s%s]\n", label,
+			(irqreg & INTR_WAKEUP) ? " wake" : "",
+			(irqreg & INTR_MSOF) ? " msof" : "",
+			(irqreg & INTR_SOF) ? " sof" : "",
+			(irqreg & INTR_RESUME) ? " resume" : "",
+			(irqreg & INTR_SUSPEND) ? " suspend" : "",
+			(irqreg & INTR_RESET_STOP) ? " noreset" : "",
+			(irqreg & INTR_RESET_START) ? " reset" : "",
+			(irqreg & INTR_FRAME_MATCH) ? " fmatch" : "",
+			(irqreg & INTR_CFG_CHG) ? " config" : "");
+	}
+#else
+	#define dump_intr(x, y, z)		do {} while (0)
+#endif /* DEBUG_IRQ */
+
+#ifdef DEBUG_EPIRQ
+	static void dump_ep_intr(const char *label, int nr, int irqreg, struct device *dev)
+	{
+		dev_dbg(dev, "<%s> EP%d_INTR=[%s%s%s%s%s%s%s%s%s]\n", label, nr,
+			(irqreg & EPINTR_FIFO_FULL) ? " full" : "",
+			(irqreg & EPINTR_FIFO_EMPTY) ? " fempty" : "",
+			(irqreg & EPINTR_FIFO_ERROR) ? " ferr" : "",
+			(irqreg & EPINTR_FIFO_HIGH) ? " fhigh" : "",
+			(irqreg & EPINTR_FIFO_LOW) ? " flow" : "",
+			(irqreg & EPINTR_MDEVREQ) ? " mreq" : "",
+			(irqreg & EPINTR_EOF) ? " eof" : "",
+			(irqreg & EPINTR_DEVREQ) ? " devreq" : "",
+			(irqreg & EPINTR_EOT) ? " eot" : "");
+	}
+#else
+	#define dump_ep_intr(x, y, z, i)	do {} while (0)
+#endif /* DEBUG_IRQ */
+
+#ifdef DEBUG_DUMP
+	static void dump_usb_stat(const char *label, struct imx_udc_struct *imx_usb)
+	{
+		int temp = __raw_readl(imx_usb->base + USB_STAT);
+
+		dev_dbg(imx_usb->dev,
+			"<%s> USB_STAT=[%s%s CFG=%d, INTF=%d, ALTR=%d]\n", label,
+			(temp & STAT_RST) ? " reset" : "",
+			(temp & STAT_SUSP) ? " suspend" : "",
+			(temp & STAT_CFG) >> 5,
+			(temp & STAT_INTF) >> 3,
+			(temp & STAT_ALTSET));
+	}
+
+	static void dump_ep_stat(const char *label, struct imx_ep_struct *imx_ep)
+	{
+		int temp = __raw_readl(imx_ep->imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
+
+		dev_dbg(imx_ep->imx_usb->dev,
+			"<%s> EP%d_INTR=[%s%s%s%s%s%s%s%s%s]\n", label, EP_NO(imx_ep),
+			(temp & EPINTR_FIFO_FULL) ? " full" : "",
+			(temp & EPINTR_FIFO_EMPTY) ? " fempty" : "",
+			(temp & EPINTR_FIFO_ERROR) ? " ferr" : "",
+			(temp & EPINTR_FIFO_HIGH) ? " fhigh" : "",
+			(temp & EPINTR_FIFO_LOW) ? " flow" : "",
+			(temp & EPINTR_MDEVREQ) ? " mreq" : "",
+			(temp & EPINTR_EOF) ? " eof" : "",
+			(temp & EPINTR_DEVREQ) ? " devreq" : "",
+			(temp & EPINTR_EOT) ? " eot" : "");
+
+		temp = __raw_readl(imx_ep->imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
+
+		dev_dbg(imx_ep->imx_usb->dev,
+			"<%s> EP%d_STAT=[%s%s bcount=%d]\n", label, EP_NO(imx_ep),
+			(temp & EPSTAT_SIP) ? " sip" : "",
+			(temp & EPSTAT_STALL) ? " stall" : "",
+			(temp & EPSTAT_BCOUNT) >> 16);
+
+		temp = __raw_readl(imx_ep->imx_usb->base + USB_EP_FSTAT(EP_NO(imx_ep)));
+
+		dev_dbg(imx_ep->imx_usb->dev,
+			"<%s> EP%d_FSTAT=[%s%s%s%s%s%s%s]\n", label, EP_NO(imx_ep),
+			(temp & FSTAT_ERR) ? " ferr" : "",
+			(temp & FSTAT_UF) ? " funder" : "",
+			(temp & FSTAT_OF) ? " fover" : "",
+			(temp & FSTAT_FR) ? " fready" : "",
+			(temp & FSTAT_FULL) ? " ffull" : "",
+			(temp & FSTAT_ALRM) ? " falarm" : "",
+			(temp & FSTAT_EMPTY) ? " fempty" : "");
+	}
+
+	static void dump_req(const char *label, struct imx_ep_struct *imx_ep, struct usb_request *req)
+	{
+		int i;
+
+		if (!req || !req->buf) {
+			dev_dbg(imx_ep->imx_usb->dev, "<%s> req or req buf is free\n", label);
+			return;
+		}
+
+		if ((!EP_NO(imx_ep) && imx_ep->imx_usb->ep0state == EP0_IN_DATA_PHASE)
+			|| (EP_NO(imx_ep) && EP_DIR(imx_ep))) {
+
+			dev_dbg(imx_ep->imx_usb->dev, "<%s> request dump <", label);
+			for (i = 0; i < req->length; i++)
+				printk("%02x-", *((u8 *)req->buf + i));
+			printk(">\n");
+		}
+	}
+
+#else
+	#define dump_ep_stat(x, y)		do {} while (0)
+	#define dump_usb_stat(x, y)		do {} while (0)
+	#define dump_req(x, y, z)		do {} while (0)
+#endif /* DEBUG_DUMP */
+
+#ifdef DEBUG_ERR
+	#define D_ERR(dev, args...)	dev_dbg(dev, ## args)
+#else
+	#define D_ERR(dev, args...)	do {} while (0)
+#endif
+
+#else
+	#define D_REQ(dev, args...)		do {} while (0)
+	#define D_TRX(dev, args...)		do {} while (0)
+	#define D_INI(dev, args...)		do {} while (0)
+	#define D_EP0(dev, args...)		do {} while (0)
+	#define D_EPX(dev, args...)		do {} while (0)
+	#define dump_ep_intr(x, y, z, i)	do {} while (0)
+	#define dump_intr(x, y, z)		do {} while (0)
+	#define dump_ep_stat(x, y)		do {} while (0)
+	#define dump_usb_stat(x, y)		do {} while (0)
+	#define dump_req(x, y, z)		do {} while (0)
+	#define D_ERR(dev, args...)		do {} while (0)
+#endif /* DEBUG */
+
+#endif /* __LINUX_USB_GADGET_IMX_H */
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 3a8879e..43dcf9e 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1546,8 +1546,6 @@
 {
 }
 
-#define resource_len(r) (((r)->end - (r)->start) + 1)
-
 static int __init m66592_probe(struct platform_device *pdev)
 {
 	struct resource *res;
@@ -1560,11 +1558,10 @@
 	int ret = 0;
 	int i;
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-			(char *)udc_name);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		ret = -ENODEV;
-		pr_err("platform_get_resource_byname error.\n");
+		pr_err("platform_get_resource error.\n");
 		goto clean_up;
 	}
 
@@ -1575,7 +1572,7 @@
 		goto clean_up;
 	}
 
-	reg = ioremap(res->start, resource_len(res));
+	reg = ioremap(res->start, resource_size(res));
 	if (reg == NULL) {
 		ret = -ENOMEM;
 		pr_err("ioremap error.\n");
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 8ae70de..12c6d83 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -669,7 +669,7 @@
 
 	/* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
 	wmb ();
-	td->dmacount = cpu_to_le32p (&dmacount);
+	td->dmacount = cpu_to_le32(dmacount);
 }
 
 static const u32 dmactl_default =
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 34e9e39..57d9641 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -3006,7 +3006,7 @@
 
 cleanup0:
 	if (xceiv)
-		put_device(xceiv->dev);
+		otg_put_transceiver(xceiv);
 
 	if (cpu_is_omap16xx() || cpu_is_omap24xx()) {
 		clk_disable(hhc_clk);
@@ -3034,7 +3034,7 @@
 
 	pullup_disable(udc);
 	if (udc->transceiver) {
-		put_device(udc->transceiver->dev);
+		otg_put_transceiver(udc->transceiver);
 		udc->transceiver = NULL;
 	}
 	omap_writew(0, UDC_SYSCON1);
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 697a0ca..9b36205 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2198,7 +2198,7 @@
 	udc_disable(dev);
 	udc_reinit(dev);
 
-	dev->vbus = is_vbus_present();
+	dev->vbus = !!is_vbus_present();
 
 	/* irq setup after old hardware state is cleaned up */
 	retval = request_irq(irq, pxa25x_udc_irq,
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 65110d0..990f40f 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -430,7 +430,6 @@
 /**
  * pio_irq_disable - Disables irq generation for one endpoint
  * @ep: udc endpoint
- * @index: endpoint number
  */
 static void pio_irq_disable(struct pxa_ep *ep)
 {
@@ -586,7 +585,6 @@
  * inc_ep_stats_bytes - Update ep stats counts
  * @ep: physical endpoint
  * @count: bytes transfered on endpoint
- * @req: usb request
  * @is_in: ep direction (USB_DIR_IN or 0)
  */
 static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in)
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index c7e2556..9a2b892 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -36,6 +36,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/gpio.h>
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
@@ -51,7 +52,6 @@
 #include <mach/irqs.h>
 
 #include <mach/hardware.h>
-#include <mach/regs-gpio.h>
 
 #include <plat/regs-udc.h>
 #include <plat/udc.h>
@@ -1510,11 +1510,7 @@
 
 	dprintk(DEBUG_NORMAL, "%s()\n", __func__);
 
-	/* some cpus cannot read from an line configured to IRQ! */
-	s3c2410_gpio_cfgpin(udc_info->vbus_pin, S3C2410_GPIO_INPUT);
-	value = s3c2410_gpio_getpin(udc_info->vbus_pin);
-	s3c2410_gpio_cfgpin(udc_info->vbus_pin, S3C2410_GPIO_SFN2);
-
+	value = gpio_get_value(udc_info->vbus_pin) ? 1 : 0;
 	if (udc_info->vbus_pin_inverted)
 		value = !value;
 
@@ -1802,7 +1798,7 @@
 	struct s3c2410_udc *udc = &memory;
 	struct device *dev = &pdev->dev;
 	int retval;
-	unsigned int irq;
+	int irq;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
@@ -1861,7 +1857,7 @@
 
 	/* irq setup after old hardware state is cleaned up */
 	retval = request_irq(IRQ_USBD, s3c2410_udc_irq,
-			IRQF_DISABLED, gadget_name, udc);
+			     IRQF_DISABLED, gadget_name, udc);
 
 	if (retval != 0) {
 		dev_err(dev, "cannot get irq %i, err %d\n", IRQ_USBD, retval);
@@ -1872,17 +1868,28 @@
 	dev_dbg(dev, "got irq %i\n", IRQ_USBD);
 
 	if (udc_info && udc_info->vbus_pin > 0) {
-		irq = s3c2410_gpio_getirq(udc_info->vbus_pin);
+		retval = gpio_request(udc_info->vbus_pin, "udc vbus");
+		if (retval < 0) {
+			dev_err(dev, "cannot claim vbus pin\n");
+			goto err_int;
+		}
+
+		irq = gpio_to_irq(udc_info->vbus_pin);
+		if (irq < 0) {
+			dev_err(dev, "no irq for gpio vbus pin\n");
+			goto err_gpio_claim;
+		}
+
 		retval = request_irq(irq, s3c2410_udc_vbus_irq,
 				     IRQF_DISABLED | IRQF_TRIGGER_RISING
 				     | IRQF_TRIGGER_FALLING | IRQF_SHARED,
 				     gadget_name, udc);
 
 		if (retval != 0) {
-			dev_err(dev, "can't get vbus irq %i, err %d\n",
+			dev_err(dev, "can't get vbus irq %d, err %d\n",
 				irq, retval);
 			retval = -EBUSY;
-			goto err_int;
+			goto err_gpio_claim;
 		}
 
 		dev_dbg(dev, "got irq %i\n", irq);
@@ -1902,6 +1909,9 @@
 
 	return 0;
 
+err_gpio_claim:
+	if (udc_info && udc_info->vbus_pin > 0)
+		gpio_free(udc_info->vbus_pin);
 err_int:
 	free_irq(IRQ_USBD, udc);
 err_map:
@@ -1927,7 +1937,7 @@
 	debugfs_remove(udc->regs_info);
 
 	if (udc_info && udc_info->vbus_pin > 0) {
-		irq = s3c2410_gpio_getirq(udc_info->vbus_pin);
+		irq = gpio_to_irq(udc_info->vbus_pin);
 		free_irq(irq, udc);
 	}
 
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index d9739d5..96d65ca 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -716,6 +716,14 @@
 
 static struct eth_dev *the_dev;
 
+static const struct net_device_ops eth_netdev_ops = {
+	.ndo_open		= eth_open,
+	.ndo_stop		= eth_stop,
+	.ndo_start_xmit		= eth_start_xmit,
+	.ndo_change_mtu		= ueth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
 
 /**
  * gether_setup - initialize one ethernet-over-usb link
@@ -764,12 +772,8 @@
 	if (ethaddr)
 		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
 
-	net->change_mtu = ueth_change_mtu;
-	net->hard_start_xmit = eth_start_xmit;
-	net->open = eth_open;
-	net->stop = eth_stop;
-	/* watchdog_timeo, tx_timeout ... */
-	/* set_multicast_list */
+	net->netdev_ops = &eth_netdev_ops;
+
 	SET_ETHTOOL_OPS(net, &ops);
 
 	/* two kinds of host-initiated state changes:
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index f3a75a9..2b476b6 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -96,6 +96,19 @@
 	  Enables support for the USB controller present on the PowerPC
 	  OpenFirmware platform bus.
 
+config USB_OXU210HP_HCD
+	tristate "OXU210HP HCD support"
+	depends on USB
+	---help---
+	  The OXU210HP is an USB host/OTG/device controller. Enable this
+	  option if your board has this chip. If unsure, say N.
+
+	  This driver does not support isochronous transfers and doesn't
+	  implement OTG nor USB device controllers.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called oxu210hp-hcd.
+
 config USB_ISP116X_HCD
 	tristate "ISP116X HCD support"
 	depends on USB
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 23be222..e5f3f20 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_PCI)		+= pci-quirks.o
 
 obj-$(CONFIG_USB_EHCI_HCD)	+= ehci-hcd.o
+obj-$(CONFIG_USB_OXU210HP_HCD)	+= oxu210hp-hcd.o
 obj-$(CONFIG_USB_ISP116X_HCD)	+= isp116x-hcd.o
 obj-$(CONFIG_USB_OHCI_HCD)	+= ohci-hcd.o
 obj-$(CONFIG_USB_UHCI_HCD)	+= uhci-hcd.o
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 0cb53ca..7f4ace7 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -455,9 +455,7 @@
 				(scratch >> 16) & 0x7fff,
 				scratch,
 				td->urb);
-		if (temp < 0)
-			temp = 0;
-		else if (size < temp)
+		if (size < temp)
 			temp = size;
 		size -= temp;
 		next += temp;
@@ -466,9 +464,7 @@
 	}
 
 	temp = snprintf (next, size, "\n");
-	if (temp < 0)
-		temp = 0;
-	else if (size < temp)
+	if (size < temp)
 		temp = size;
 	size -= temp;
 	next += temp;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 218f966..97a53a4 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -194,6 +194,7 @@
 	u32			temp;
 	u32			power_okay;
 	int			i;
+	u8			resume_needed = 0;
 
 	if (time_before (jiffies, ehci->next_statechange))
 		msleep(5);
@@ -228,7 +229,9 @@
 
 	/* Some controller/firmware combinations need a delay during which
 	 * they set up the port statuses.  See Bugzilla #8190. */
-	mdelay(8);
+	spin_unlock_irq(&ehci->lock);
+	msleep(8);
+	spin_lock_irq(&ehci->lock);
 
 	/* manually resume the ports we suspended during bus_suspend() */
 	i = HCS_N_PORTS (ehci->hcs_params);
@@ -236,12 +239,21 @@
 		temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
 		temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
 		if (test_bit(i, &ehci->bus_suspended) &&
-				(temp & PORT_SUSPEND))
+				(temp & PORT_SUSPEND)) {
 			temp |= PORT_RESUME;
+			resume_needed = 1;
+		}
 		ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
 	}
+
+	/* msleep for 20ms only if code is trying to resume port */
+	if (resume_needed) {
+		spin_unlock_irq(&ehci->lock);
+		msleep(20);
+		spin_lock_irq(&ehci->lock);
+	}
+
 	i = HCS_N_PORTS (ehci->hcs_params);
-	mdelay (20);
 	while (i--) {
 		temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
 		if (test_bit(i, &ehci->bus_suspended) &&
@@ -422,8 +434,15 @@
 		port_status &= ~PORT_RWC_BITS;
 		ehci_writel(ehci, port_status, status_reg);
 
-	} else
+		/* ensure 440EPX ohci controller state is operational */
+		if (ehci->has_amcc_usb23)
+			set_ohci_hcfs(ehci, 1);
+	} else {
 		ehci_dbg (ehci, "port %d high speed\n", index + 1);
+		/* ensure 440EPx ohci controller state is suspended */
+		if (ehci->has_amcc_usb23)
+			set_ohci_hcfs(ehci, 0);
+	}
 
 	return port_status;
 }
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 36864f9..bdc6e86 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -219,15 +219,19 @@
 	/* Serial Bus Release Number is at PCI 0x60 offset */
 	pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
 
-	/* Workaround current PCI init glitch:  wakeup bits aren't
-	 * being set from PCI PM capability.
+	/* Keep this around for a while just in case some EHCI
+	 * implementation uses legacy PCI PM support.  This test
+	 * can be removed on 17 Dec 2009 if the dev_warn() hasn't
+	 * been triggered by then.
 	 */
 	if (!device_can_wakeup(&pdev->dev)) {
 		u16	port_wake;
 
 		pci_read_config_word(pdev, 0x62, &port_wake);
-		if (port_wake & 0x0001)
+		if (port_wake & 0x0001) {
+			dev_warn(&pdev->dev, "Enabling legacy PCI PM\n");
 			device_init_wakeup(&pdev->dev, 1);
+		}
 	}
 
 #ifdef	CONFIG_USB_SUSPEND
@@ -428,6 +432,8 @@
 
 #ifdef	CONFIG_PM
 	.suspend =	usb_hcd_pci_suspend,
+	.suspend_late =	usb_hcd_pci_suspend_late,
+	.resume_early =	usb_hcd_pci_resume_early,
 	.resume =	usb_hcd_pci_resume,
 #endif
 	.shutdown = 	usb_hcd_pci_shutdown,
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index b018dee..ef732b7 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -107,11 +107,13 @@
 {
 	struct device_node *dn = op->node;
 	struct usb_hcd *hcd;
-	struct ehci_hcd	*ehci;
+	struct ehci_hcd	*ehci = NULL;
 	struct resource res;
 	int irq;
 	int rv;
 
+	struct device_node *np;
+
 	if (usb_disabled())
 		return -ENODEV;
 
@@ -149,6 +151,20 @@
 	}
 
 	ehci = hcd_to_ehci(hcd);
+	np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx");
+	if (np != NULL) {
+		/* claim we really affected by usb23 erratum */
+		if (!of_address_to_resource(np, 0, &res))
+			ehci->ohci_hcctrl_reg = ioremap(res.start +
+					OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN);
+		else
+			pr_debug(__FILE__ ": no ohci offset in fdt\n");
+		if (!ehci->ohci_hcctrl_reg) {
+			pr_debug(__FILE__ ": ioremap for ohci hcctrl failed\n");
+		} else {
+			ehci->has_amcc_usb23 = 1;
+		}
+	}
 
 	if (of_get_property(dn, "big-endian", NULL)) {
 		ehci->big_endian_mmio = 1;
@@ -181,6 +197,9 @@
 	irq_dispose_mapping(irq);
 err_irq:
 	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+	if (ehci->has_amcc_usb23)
+		iounmap(ehci->ohci_hcctrl_reg);
 err_rmr:
 	usb_put_hcd(hcd);
 
@@ -191,6 +210,11 @@
 static int ehci_hcd_ppc_of_remove(struct of_device *op)
 {
 	struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+	struct device_node *np;
+	struct resource res;
+
 	dev_set_drvdata(&op->dev, NULL);
 
 	dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
@@ -201,6 +225,25 @@
 	irq_dispose_mapping(hcd->irq);
 	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
 
+	/* use request_mem_region to test if the ohci driver is loaded.  if so
+	 * ensure the ohci core is operational.
+	 */
+	if (ehci->has_amcc_usb23) {
+		np = of_find_compatible_node(NULL, NULL, "ibm,usb-ohci-440epx");
+		if (np != NULL) {
+			if (!of_address_to_resource(np, 0, &res))
+				if (!request_mem_region(res.start,
+							    0x4, hcd_name))
+					set_ohci_hcfs(ehci, 1);
+				else
+					release_mem_region(res.start, 0x4);
+			else
+				pr_debug(__FILE__ ": no ohci offset in fdt\n");
+			of_node_put(np);
+		}
+
+		iounmap(ehci->ohci_hcctrl_reg);
+	}
 	usb_put_hcd(hcd);
 
 	return 0;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index c7d4b5a..fb7054cc 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -120,6 +120,16 @@
 	unsigned		has_fsl_port_bug:1; /* FreeScale */
 	unsigned		big_endian_mmio:1;
 	unsigned		big_endian_desc:1;
+	unsigned		has_amcc_usb23:1;
+
+	/* required for usb32 quirk */
+	#define OHCI_CTRL_HCFS          (3 << 6)
+	#define OHCI_USB_OPER           (2 << 6)
+	#define OHCI_USB_SUSPEND        (3 << 6)
+
+	#define OHCI_HCCTRL_OFFSET      0x4
+	#define OHCI_HCCTRL_LEN         0x4
+	__hc32			*ohci_hcctrl_reg;
 
 	u8			sbrn;		/* packed release number */
 
@@ -636,6 +646,30 @@
 #endif
 }
 
+/*
+ * On certain ppc-44x SoC there is a HW issue, that could only worked around with
+ * explicit suspend/operate of OHCI. This function hereby makes sense only on that arch.
+ * Other common bits are dependant on has_amcc_usb23 quirk flag.
+ */
+#ifdef CONFIG_44x
+static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
+{
+	u32 hc_control;
+
+	hc_control = (readl_be(ehci->ohci_hcctrl_reg) & ~OHCI_CTRL_HCFS);
+	if (operational)
+		hc_control |= OHCI_USB_OPER;
+	else
+		hc_control |= OHCI_USB_SUSPEND;
+
+	writel_be(hc_control, ehci->ohci_hcctrl_reg);
+	(void) readl_be(ehci->ohci_hcctrl_reg);
+}
+#else
+static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
+{ }
+#endif
+
 /*-------------------------------------------------------------------------*/
 
 /*
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 8017f1c..b899f1a 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -435,14 +435,13 @@
 
 	/*
 	 * PORT 1 Control register of the ISP1760 is the OTG control
-	 * register on ISP1761.
+	 * register on ISP1761. Since there is no OTG or device controller
+	 * support in this driver, we use port 1 as a "normal" USB host port on
+	 * both chips.
 	 */
-	if (!(priv->devflags & ISP1760_FLAG_ISP1761) &&
-	    !(priv->devflags & ISP1760_FLAG_PORT1_DIS)) {
-		isp1760_writel(PORT1_POWER | PORT1_INIT2,
-			       hcd->regs + HC_PORT1_CTRL);
-		mdelay(10);
-	}
+	isp1760_writel(PORT1_POWER | PORT1_INIT2,
+		       hcd->regs + HC_PORT1_CTRL);
+	mdelay(10);
 
 	priv->hcs_params = isp1760_readl(hcd->regs + HC_HCSPARAMS);
 
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 4377277..a9daea5 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -135,7 +135,6 @@
  * indicate the most "atypical" case, so that a devflags of 0 is
  * a sane default configuration.
  */
-#define ISP1760_FLAG_PORT1_DIS		0x00000001 /* Port 1 disabled */
 #define ISP1760_FLAG_BUS_WIDTH_16	0x00000002 /* 16-bit data bus width */
 #define ISP1760_FLAG_OTG_EN		0x00000004 /* Port 1 supports OTG */
 #define ISP1760_FLAG_ANALOG_OC		0x00000008 /* Analog overcurrent */
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index b87ca7c..4cf7ca4 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -60,9 +60,6 @@
 	if (of_device_is_compatible(dp, "nxp,usb-isp1761"))
 		devflags |= ISP1760_FLAG_ISP1761;
 
-	if (of_get_property(dp, "port1-disable", NULL) != NULL)
-		devflags |= ISP1760_FLAG_PORT1_DIS;
-
 	/* Some systems wire up only 16 of the 32 data lines */
 	prop = of_get_property(dp, "bus-width", NULL);
 	if (prop && *prop == 16)
@@ -129,23 +126,23 @@
 #endif
 
 #ifdef CONFIG_PCI
-static u32 nxp_pci_io_base;
-static u32 iolength;
-static u32 pci_mem_phy0;
-static u32 length;
-static u8 __iomem *chip_addr;
-static u8 __iomem *iobase;
-
 static int __devinit isp1761_pci_probe(struct pci_dev *dev,
 		const struct pci_device_id *id)
 {
 	u8 latency, limit;
 	__u32 reg_data;
 	int retry_count;
-	int length;
-	int status = 1;
 	struct usb_hcd *hcd;
 	unsigned int devflags = 0;
+	int ret_status = 0;
+
+	resource_size_t pci_mem_phy0;
+	resource_size_t memlength;
+
+	u8 __iomem *chip_addr;
+	u8 __iomem *iobase;
+	resource_size_t nxp_pci_io_base;
+	resource_size_t iolength;
 
 	if (usb_disabled())
 		return -ENODEV;
@@ -168,26 +165,30 @@
 	iobase = ioremap_nocache(nxp_pci_io_base, iolength);
 	if (!iobase) {
 		printk(KERN_ERR "ioremap #1\n");
-		release_mem_region(nxp_pci_io_base, iolength);
-		return -ENOMEM;
+		ret_status = -ENOMEM;
+		goto cleanup1;
 	}
 	/* Grab the PLX PCI shared memory of the ISP 1761 we need  */
 	pci_mem_phy0 = pci_resource_start(dev, 3);
-	length = pci_resource_len(dev, 3);
-
-	if (length < 0xffff) {
-		printk(KERN_ERR "memory length for this resource is less than "
-				"required\n");
-		release_mem_region(nxp_pci_io_base, iolength);
-		iounmap(iobase);
-		return  -ENOMEM;
+	memlength = pci_resource_len(dev, 3);
+	if (memlength < 0xffff) {
+		printk(KERN_ERR "memory length for this resource is wrong\n");
+		ret_status = -ENOMEM;
+		goto cleanup2;
 	}
 
-	if (!request_mem_region(pci_mem_phy0, length, "ISP-PCI")) {
+	if (!request_mem_region(pci_mem_phy0, memlength, "ISP-PCI")) {
 		printk(KERN_ERR "host controller already in use\n");
-		release_mem_region(nxp_pci_io_base, iolength);
-		iounmap(iobase);
-		return -EBUSY;
+		ret_status = -EBUSY;
+		goto cleanup2;
+	}
+
+	/* map available memory */
+	chip_addr = ioremap_nocache(pci_mem_phy0,memlength);
+	if (!chip_addr) {
+		printk(KERN_ERR "Error ioremap failed\n");
+		ret_status = -ENOMEM;
+		goto cleanup3;
 	}
 
 	/* bad pci latencies can contribute to overruns */
@@ -210,39 +211,54 @@
 		 * */
 		writel(0xface, chip_addr + HC_SCRATCH_REG);
 		udelay(100);
-		reg_data = readl(chip_addr + HC_SCRATCH_REG);
+		reg_data = readl(chip_addr + HC_SCRATCH_REG) & 0x0000ffff;
 		retry_count--;
 	}
 
+	iounmap(chip_addr);
+
 	/* Host Controller presence is detected by writing to scratch register
 	 * and reading back and checking the contents are same or not
 	 */
 	if (reg_data != 0xFACE) {
 		dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data);
-		goto clean;
+		ret_status = -ENOMEM;
+		goto cleanup3;
 	}
 
 	pci_set_master(dev);
 
-	status = readl(iobase + 0x68);
-	status |= 0x900;
-	writel(status, iobase + 0x68);
+	/* configure PLX PCI chip to pass interrupts */
+#define PLX_INT_CSR_REG 0x68
+	reg_data = readl(iobase + PLX_INT_CSR_REG);
+	reg_data |= 0x900;
+	writel(reg_data, iobase + PLX_INT_CSR_REG);
 
 	dev->dev.dma_mask = NULL;
-	hcd = isp1760_register(pci_mem_phy0, length, dev->irq,
+	hcd = isp1760_register(pci_mem_phy0, memlength, dev->irq,
 		IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev),
 		devflags);
-	if (!IS_ERR(hcd)) {
-		pci_set_drvdata(dev, hcd);
-		return 0;
+	if (IS_ERR(hcd)) {
+		ret_status = -ENODEV;
+		goto cleanup3;
 	}
-clean:
-	status = -ENODEV;
+
+	/* done with PLX IO access */
 	iounmap(iobase);
-	release_mem_region(pci_mem_phy0, length);
 	release_mem_region(nxp_pci_io_base, iolength);
-	return status;
+
+	pci_set_drvdata(dev, hcd);
+	return 0;
+
+cleanup3:
+	release_mem_region(pci_mem_phy0, memlength);
+cleanup2:
+	iounmap(iobase);
+cleanup1:
+	release_mem_region(nxp_pci_io_base, iolength);
+	return ret_status;
 }
+
 static void isp1761_pci_remove(struct pci_dev *dev)
 {
 	struct usb_hcd *hcd;
@@ -255,12 +271,6 @@
 	usb_put_hcd(hcd);
 
 	pci_disable_device(dev);
-
-	iounmap(iobase);
-	iounmap(chip_addr);
-
-	release_mem_region(nxp_pci_io_base, iolength);
-	release_mem_region(pci_mem_phy0, length);
 }
 
 static void isp1761_pci_shutdown(struct pci_dev *dev)
@@ -268,12 +278,16 @@
 	printk(KERN_ERR "ips1761_pci_shutdown\n");
 }
 
-static const struct pci_device_id isp1760_plx [] = { {
-	/* handle any USB 2.0 EHCI controller */
-	PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_OTHER << 8) | (0x06 << 16)), ~0),
-		.driver_data = 0,
-},
-{ /* end: all zeroes */ }
+static const struct pci_device_id isp1760_plx [] = {
+	{
+		.class          = PCI_CLASS_BRIDGE_OTHER << 8,
+		.class_mask     = ~0,
+		.vendor		= PCI_VENDOR_ID_PLX,
+		.device		= 0x5406,
+		.subvendor	= PCI_VENDOR_ID_PLX,
+		.subdevice	= 0x9054,
+	},
+	{ }
 };
 MODULE_DEVICE_TABLE(pci, isp1760_plx);
 
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8aa3f45..65a9609 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -589,13 +589,15 @@
 		/* also: power/overcurrent flags in roothub.a */
 	}
 
-	/* Reset USB nearly "by the book".  RemoteWakeupConnected was
-	 * saved if boot firmware (BIOS/SMM/...) told us it's connected,
-	 * or if bus glue did the same (e.g. for PCI add-in cards with
-	 * PCI PM support).
+	/* Reset USB nearly "by the book".  RemoteWakeupConnected has
+	 * to be checked in case boot firmware (BIOS/SMM/...) has set up
+	 * wakeup in a way the bus isn't aware of (e.g., legacy PCI PM).
+	 * If the bus glue detected wakeup capability then it should
+	 * already be enabled.  Either way, if wakeup should be enabled
+	 * but isn't, we'll enable it now.
 	 */
 	if ((ohci->hc_control & OHCI_CTRL_RWC) != 0
-			&& !device_may_wakeup(hcd->self.controller))
+			&& !device_can_wakeup(hcd->self.controller))
 		device_init_wakeup(hcd->self.controller, 1);
 
 	switch (ohci->hc_control & OHCI_CTRL_HCFS) {
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index a9c2ae3..8b28ae7 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -355,9 +355,9 @@
 
 		/* RWC may not be set for add-in PCI cards, since boot
 		 * firmware probably ignored them.  This transfers PCI
-		 * PM wakeup capabilities (once the PCI layer is fixed).
+		 * PM wakeup capabilities.
 		 */
-		if (device_may_wakeup(&pdev->dev))
+		if (device_can_wakeup(&pdev->dev))
 			ohci->hc_control |= OHCI_CTRL_RWC;
 	}
 #endif /* CONFIG_PM */
@@ -487,6 +487,8 @@
 
 #ifdef	CONFIG_PM
 	.suspend =	usb_hcd_pci_suspend,
+	.suspend_late =	usb_hcd_pci_suspend_late,
+	.resume_early =	usb_hcd_pci_resume_early,
 	.resume =	usb_hcd_pci_resume,
 #endif
 
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index e306ca6..100bf3d 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -106,65 +106,34 @@
 
 static struct clk *usb_clk;
 
-static int isp1301_probe(struct i2c_adapter *adap);
-static int isp1301_detach(struct i2c_client *client);
-
 static const unsigned short normal_i2c[] =
     { ISP1301_I2C_ADDR, ISP1301_I2C_ADDR + 1, I2C_CLIENT_END };
-static const unsigned short dummy_i2c_addrlist[] = { I2C_CLIENT_END };
 
-static struct i2c_client_address_data addr_data = {
-	.normal_i2c = normal_i2c,
-	.probe = dummy_i2c_addrlist,
-	.ignore = dummy_i2c_addrlist,
+static int isp1301_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	return 0;
+}
+
+static int isp1301_remove(struct i2c_client *client)
+{
+	return 0;
+}
+
+const struct i2c_device_id isp1301_id[] = {
+	{ "isp1301_pnx", 0 },
+	{ }
 };
 
 struct i2c_driver isp1301_driver = {
 	.driver = {
 		.name = "isp1301_pnx",
 	},
-	.attach_adapter = isp1301_probe,
-	.detach_client = isp1301_detach,
+	.probe = isp1301_probe,
+	.remove = isp1301_remove,
+	.id_table = isp1301_id,
 };
 
-static int isp1301_attach(struct i2c_adapter *adap, int addr, int kind)
-{
-	struct i2c_client *c;
-	int err;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return -ENOMEM;
-
-	strlcpy(c->name, "isp1301_pnx", I2C_NAME_SIZE);
-	c->flags = 0;
-	c->addr = addr;
-	c->adapter = adap;
-	c->driver = &isp1301_driver;
-
-	err = i2c_attach_client(c);
-	if (err) {
-		kfree(c);
-		return err;
-	}
-
-	isp1301_i2c_client = c;
-
-	return 0;
-}
-
-static int isp1301_probe(struct i2c_adapter *adap)
-{
-	return i2c_probe(adap, &addr_data, isp1301_attach);
-}
-
-static int isp1301_detach(struct i2c_client *client)
-{
-	i2c_detach_client(client);
-	kfree(isp1301_i2c_client);
-	return 0;
-}
-
 static void i2c_write(u8 buf, u8 subaddr)
 {
 	char tmpbuf[2];
@@ -328,6 +297,8 @@
 	struct usb_hcd *hcd = 0;
 	struct ohci_hcd *ohci;
 	const struct hc_driver *driver = &ohci_pnx4008_hc_driver;
+	struct i2c_adapter *i2c_adap;
+	struct i2c_board_info i2c_info;
 
 	int ret = 0, irq;
 
@@ -351,9 +322,20 @@
 
 	ret = i2c_add_driver(&isp1301_driver);
 	if (ret < 0) {
-		err("failed to connect I2C to ISP1301 USB Transceiver");
+		err("failed to add ISP1301 driver");
 		goto out;
 	}
+	i2c_adap = i2c_get_adapter(2);
+	memset(&i2c_info, 0, sizeof(struct i2c_board_info));
+	strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE);
+	isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
+						   normal_i2c);
+	i2c_put_adapter(i2c_adap);
+	if (!isp1301_i2c_client) {
+		err("failed to connect I2C to ISP1301 USB Transceiver");
+		ret = -ENODEV;
+		goto out_i2c_driver;
+	}
 
 	isp1301_configure();
 
@@ -429,6 +411,9 @@
 out2:
 	clk_put(usb_clk);
 out1:
+	i2c_unregister_client(isp1301_i2c_client);
+	isp1301_i2c_client = NULL;
+out_i2c_driver:
 	i2c_del_driver(&isp1301_driver);
 out:
 	return ret;
@@ -445,6 +430,8 @@
 	pnx4008_unset_usb_bits();
 	clk_disable(usb_clk);
 	clk_put(usb_clk);
+	i2c_unregister_client(isp1301_i2c_client);
+	isp1301_i2c_client = NULL;
 	i2c_del_driver(&isp1301_driver);
 
 	platform_set_drvdata(pdev, NULL);
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 7ac5326..68a3017 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -91,6 +91,7 @@
 
 	int rv;
 	int is_bigendian;
+	struct device_node *np;
 
 	if (usb_disabled())
 		return -ENODEV;
@@ -147,6 +148,30 @@
 	if (rv == 0)
 		return 0;
 
+	/* by now, 440epx is known to show usb_23 erratum */
+	np = of_find_compatible_node(NULL, NULL, "ibm,usb-ehci-440epx");
+
+	/* Work around - At this point ohci_run has executed, the
+	* controller is running, everything, the root ports, etc., is
+	* set up.  If the ehci driver is loaded, put the ohci core in
+	* the suspended state.  The ehci driver will bring it out of
+	* suspended state when / if a non-high speed USB device is
+	* attached to the USB Host port.  If the ehci driver is not
+	* loaded, do nothing. request_mem_region is used to test if
+	* the ehci driver is loaded.
+	*/
+	if (np !=  NULL) {
+		if (!of_address_to_resource(np, 0, &res)) {
+			if (!request_mem_region(res.start, 0x4, hcd_name)) {
+				writel_be((readl_be(&ohci->regs->control) |
+					OHCI_USB_SUSPEND), &ohci->regs->control);
+					(void) readl_be(&ohci->regs->control);
+			} else
+				release_mem_region(res.start, 0x4);
+		} else
+		    pr_debug(__FILE__ ": cannot get ehci offset from fdt\n");
+	}
+
 	iounmap(hcd->regs);
 err_ioremap:
 	irq_dispose_mapping(irq);
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index f9f134a..8dabe8e 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -201,7 +201,7 @@
 	if (!cell)
 		return -EINVAL;
 
-	hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev->dev.bus_id);
+	hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev));
 	if (!hcd) {
 		ret = -ENOMEM;
 		goto err_usb_create_hcd;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
new file mode 100644
index 0000000..75548f7
--- /dev/null
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -0,0 +1,3985 @@
+/*
+ * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it>
+ *
+ * This code is *strongly* based on EHCI-HCD code by David Brownell since
+ * the chip is a quasi-EHCI compatible.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+#include <linux/usb.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include "../core/hcd.h"
+
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+
+#include "oxu210hp.h"
+
+#define DRIVER_VERSION "0.0.50"
+
+/*
+ * Main defines
+ */
+
+#define oxu_dbg(oxu, fmt, args...) \
+		dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
+#define oxu_err(oxu, fmt, args...) \
+		dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
+#define oxu_info(oxu, fmt, args...) \
+		dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
+
+static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
+{
+	return container_of((void *) oxu, struct usb_hcd, hcd_priv);
+}
+
+static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd)
+{
+	return (struct oxu_hcd *) (hcd->hcd_priv);
+}
+
+/*
+ * Debug stuff
+ */
+
+#undef OXU_URB_TRACE
+#undef OXU_VERBOSE_DEBUG
+
+#ifdef OXU_VERBOSE_DEBUG
+#define oxu_vdbg			oxu_dbg
+#else
+#define oxu_vdbg(oxu, fmt, args...)	/* Nop */
+#endif
+
+#ifdef DEBUG
+
+static int __attribute__((__unused__))
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{
+	return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+		label, label[0] ? " " : "", status,
+		(status & STS_ASS) ? " Async" : "",
+		(status & STS_PSS) ? " Periodic" : "",
+		(status & STS_RECL) ? " Recl" : "",
+		(status & STS_HALT) ? " Halt" : "",
+		(status & STS_IAA) ? " IAA" : "",
+		(status & STS_FATAL) ? " FATAL" : "",
+		(status & STS_FLR) ? " FLR" : "",
+		(status & STS_PCD) ? " PCD" : "",
+		(status & STS_ERR) ? " ERR" : "",
+		(status & STS_INT) ? " INT" : ""
+		);
+}
+
+static int __attribute__((__unused__))
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{
+	return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
+		label, label[0] ? " " : "", enable,
+		(enable & STS_IAA) ? " IAA" : "",
+		(enable & STS_FATAL) ? " FATAL" : "",
+		(enable & STS_FLR) ? " FLR" : "",
+		(enable & STS_PCD) ? " PCD" : "",
+		(enable & STS_ERR) ? " ERR" : "",
+		(enable & STS_INT) ? " INT" : ""
+		);
+}
+
+static const char *const fls_strings[] =
+    { "1024", "512", "256", "??" };
+
+static int dbg_command_buf(char *buf, unsigned len,
+				const char *label, u32 command)
+{
+	return scnprintf(buf, len,
+		"%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
+		label, label[0] ? " " : "", command,
+		(command & CMD_PARK) ? "park" : "(park)",
+		CMD_PARK_CNT(command),
+		(command >> 16) & 0x3f,
+		(command & CMD_LRESET) ? " LReset" : "",
+		(command & CMD_IAAD) ? " IAAD" : "",
+		(command & CMD_ASE) ? " Async" : "",
+		(command & CMD_PSE) ? " Periodic" : "",
+		fls_strings[(command >> 2) & 0x3],
+		(command & CMD_RESET) ? " Reset" : "",
+		(command & CMD_RUN) ? "RUN" : "HALT"
+		);
+}
+
+static int dbg_port_buf(char *buf, unsigned len, const char *label,
+				int port, u32 status)
+{
+	char	*sig;
+
+	/* signaling state */
+	switch (status & (3 << 10)) {
+	case 0 << 10:
+		sig = "se0";
+		break;
+	case 1 << 10:
+		sig = "k";	/* low speed */
+		break;
+	case 2 << 10:
+		sig = "j";
+		break;
+	default:
+		sig = "?";
+		break;
+	}
+
+	return scnprintf(buf, len,
+		"%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
+		label, label[0] ? " " : "", port, status,
+		(status & PORT_POWER) ? " POWER" : "",
+		(status & PORT_OWNER) ? " OWNER" : "",
+		sig,
+		(status & PORT_RESET) ? " RESET" : "",
+		(status & PORT_SUSPEND) ? " SUSPEND" : "",
+		(status & PORT_RESUME) ? " RESUME" : "",
+		(status & PORT_OCC) ? " OCC" : "",
+		(status & PORT_OC) ? " OC" : "",
+		(status & PORT_PEC) ? " PEC" : "",
+		(status & PORT_PE) ? " PE" : "",
+		(status & PORT_CSC) ? " CSC" : "",
+		(status & PORT_CONNECT) ? " CONNECT" : ""
+	    );
+}
+
+#else
+
+static inline int __attribute__((__unused__))
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{ return 0; }
+
+static inline int __attribute__((__unused__))
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
+{ return 0; }
+
+static inline int __attribute__((__unused__))
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{ return 0; }
+
+static inline int __attribute__((__unused__))
+dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
+{ return 0; }
+
+#endif /* DEBUG */
+
+/* functions have the "wrong" filename when they're output... */
+#define dbg_status(oxu, label, status) { \
+	char _buf[80]; \
+	dbg_status_buf(_buf, sizeof _buf, label, status); \
+	oxu_dbg(oxu, "%s\n", _buf); \
+}
+
+#define dbg_cmd(oxu, label, command) { \
+	char _buf[80]; \
+	dbg_command_buf(_buf, sizeof _buf, label, command); \
+	oxu_dbg(oxu, "%s\n", _buf); \
+}
+
+#define dbg_port(oxu, label, port, status) { \
+	char _buf[80]; \
+	dbg_port_buf(_buf, sizeof _buf, label, port, status); \
+	oxu_dbg(oxu, "%s\n", _buf); \
+}
+
+/*
+ * Module parameters
+ */
+
+/* Initial IRQ latency: faster than hw default */
+static int log2_irq_thresh;			/* 0 to 6 */
+module_param(log2_irq_thresh, int, S_IRUGO);
+MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
+
+/* Initial park setting: slower than hw default */
+static unsigned park;
+module_param(park, uint, S_IRUGO);
+MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
+
+/* For flakey hardware, ignore overcurrent indicators */
+static int ignore_oc;
+module_param(ignore_oc, bool, S_IRUGO);
+MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
+
+
+static void ehci_work(struct oxu_hcd *oxu);
+static int oxu_hub_control(struct usb_hcd *hcd,
+				u16 typeReq, u16 wValue, u16 wIndex,
+				char *buf, u16 wLength);
+
+/*
+ * Local functions
+ */
+
+/* Low level read/write registers functions */
+static inline u32 oxu_readl(void *base, u32 reg)
+{
+	return readl(base + reg);
+}
+
+static inline void oxu_writel(void *base, u32 reg, u32 val)
+{
+	writel(val, base + reg);
+}
+
+static inline void timer_action_done(struct oxu_hcd *oxu,
+					enum ehci_timer_action action)
+{
+	clear_bit(action, &oxu->actions);
+}
+
+static inline void timer_action(struct oxu_hcd *oxu,
+					enum ehci_timer_action action)
+{
+	if (!test_and_set_bit(action, &oxu->actions)) {
+		unsigned long t;
+
+		switch (action) {
+		case TIMER_IAA_WATCHDOG:
+			t = EHCI_IAA_JIFFIES;
+			break;
+		case TIMER_IO_WATCHDOG:
+			t = EHCI_IO_JIFFIES;
+			break;
+		case TIMER_ASYNC_OFF:
+			t = EHCI_ASYNC_JIFFIES;
+			break;
+		case TIMER_ASYNC_SHRINK:
+		default:
+			t = EHCI_SHRINK_JIFFIES;
+			break;
+		}
+		t += jiffies;
+		/* all timings except IAA watchdog can be overridden.
+		 * async queue SHRINK often precedes IAA.  while it's ready
+		 * to go OFF neither can matter, and afterwards the IO
+		 * watchdog stops unless there's still periodic traffic.
+		 */
+		if (action != TIMER_IAA_WATCHDOG
+				&& t > oxu->watchdog.expires
+				&& timer_pending(&oxu->watchdog))
+			return;
+		mod_timer(&oxu->watchdog, t);
+	}
+}
+
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done).  There are two failure modes:  "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ *
+ * That last failure should_only happen in cases like physical cardbus eject
+ * before driver shutdown. But it also seems to be caused by bugs in cardbus
+ * bridge shutdown:  shutting down the bridge before the devices using it.
+ */
+static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
+					u32 mask, u32 done, int usec)
+{
+	u32 result;
+
+	do {
+		result = readl(ptr);
+		if (result == ~(u32)0)		/* card removed */
+			return -ENODEV;
+		result &= mask;
+		if (result == done)
+			return 0;
+		udelay(1);
+		usec--;
+	} while (usec > 0);
+	return -ETIMEDOUT;
+}
+
+/* Force HC to halt state from unknown (EHCI spec section 2.3) */
+static int ehci_halt(struct oxu_hcd *oxu)
+{
+	u32	temp = readl(&oxu->regs->status);
+
+	/* disable any irqs left enabled by previous code */
+	writel(0, &oxu->regs->intr_enable);
+
+	if ((temp & STS_HALT) != 0)
+		return 0;
+
+	temp = readl(&oxu->regs->command);
+	temp &= ~CMD_RUN;
+	writel(temp, &oxu->regs->command);
+	return handshake(oxu, &oxu->regs->status,
+			  STS_HALT, STS_HALT, 16 * 125);
+}
+
+/* Put TDI/ARC silicon into EHCI mode */
+static void tdi_reset(struct oxu_hcd *oxu)
+{
+	u32 __iomem *reg_ptr;
+	u32 tmp;
+
+	reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
+	tmp = readl(reg_ptr);
+	tmp |= 0x3;
+	writel(tmp, reg_ptr);
+}
+
+/* Reset a non-running (STS_HALT == 1) controller */
+static int ehci_reset(struct oxu_hcd *oxu)
+{
+	int	retval;
+	u32	command = readl(&oxu->regs->command);
+
+	command |= CMD_RESET;
+	dbg_cmd(oxu, "reset", command);
+	writel(command, &oxu->regs->command);
+	oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+	oxu->next_statechange = jiffies;
+	retval = handshake(oxu, &oxu->regs->command,
+			    CMD_RESET, 0, 250 * 1000);
+
+	if (retval)
+		return retval;
+
+	tdi_reset(oxu);
+
+	return retval;
+}
+
+/* Idle the controller (from running) */
+static void ehci_quiesce(struct oxu_hcd *oxu)
+{
+	u32	temp;
+
+#ifdef DEBUG
+	if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
+		BUG();
+#endif
+
+	/* wait for any schedule enables/disables to take effect */
+	temp = readl(&oxu->regs->command) << 10;
+	temp &= STS_ASS | STS_PSS;
+	if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
+				temp, 16 * 125) != 0) {
+		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+		return;
+	}
+
+	/* then disable anything that's still active */
+	temp = readl(&oxu->regs->command);
+	temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
+	writel(temp, &oxu->regs->command);
+
+	/* hardware can take 16 microframes to turn off ... */
+	if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
+				0, 16 * 125) != 0) {
+		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+		return;
+	}
+}
+
+static int check_reset_complete(struct oxu_hcd *oxu, int index,
+				u32 __iomem *status_reg, int port_status)
+{
+	if (!(port_status & PORT_CONNECT)) {
+		oxu->reset_done[index] = 0;
+		return port_status;
+	}
+
+	/* if reset finished and it's still not enabled -- handoff */
+	if (!(port_status & PORT_PE)) {
+		oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
+				index+1);
+		return port_status;
+	} else
+		oxu_dbg(oxu, "port %d high speed\n", index + 1);
+
+	return port_status;
+}
+
+static void ehci_hub_descriptor(struct oxu_hcd *oxu,
+				struct usb_hub_descriptor *desc)
+{
+	int ports = HCS_N_PORTS(oxu->hcs_params);
+	u16 temp;
+
+	desc->bDescriptorType = 0x29;
+	desc->bPwrOn2PwrGood = 10;	/* oxu 1.0, 2.3.9 says 20ms max */
+	desc->bHubContrCurrent = 0;
+
+	desc->bNbrPorts = ports;
+	temp = 1 + (ports / 8);
+	desc->bDescLength = 7 + 2 * temp;
+
+	/* two bitmaps:  ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+	memset(&desc->bitmap[0], 0, temp);
+	memset(&desc->bitmap[temp], 0xff, temp);
+
+	temp = 0x0008;			/* per-port overcurrent reporting */
+	if (HCS_PPC(oxu->hcs_params))
+		temp |= 0x0001;		/* per-port power control */
+	else
+		temp |= 0x0002;		/* no power switching */
+	desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
+}
+
+
+/* Allocate an OXU210HP on-chip memory data buffer
+ *
+ * An on-chip memory data buffer is required for each OXU210HP USB transfer.
+ * Each transfer descriptor has one or more on-chip memory data buffers.
+ *
+ * Data buffers are allocated from a fix sized pool of data blocks.
+ * To minimise fragmentation and give reasonable memory utlisation,
+ * data buffers are allocated with sizes the power of 2 multiples of
+ * the block size, starting on an address a multiple of the allocated size.
+ *
+ * FIXME: callers of this function require a buffer to be allocated for
+ * len=0. This is a waste of on-chip memory and should be fix. Then this
+ * function should be changed to not allocate a buffer for len=0.
+ */
+static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
+{
+	int n_blocks;	/* minium blocks needed to hold len */
+	int a_blocks;	/* blocks allocated */
+	int i, j;
+
+	/* Don't allocte bigger than supported */
+	if (len > BUFFER_SIZE * BUFFER_NUM) {
+		oxu_err(oxu, "buffer too big (%d)\n", len);
+		return -ENOMEM;
+	}
+
+	spin_lock(&oxu->mem_lock);
+
+	/* Number of blocks needed to hold len */
+	n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
+
+	/* Round the number of blocks up to the power of 2 */
+	for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
+		;
+
+	/* Find a suitable available data buffer */
+	for (i = 0; i < BUFFER_NUM;
+			i += max(a_blocks, (int)oxu->db_used[i])) {
+
+		/* Check all the required blocks are available */
+		for (j = 0; j < a_blocks; j++)
+			if (oxu->db_used[i + j])
+				break;
+
+		if (j != a_blocks)
+			continue;
+
+		/* Allocate blocks found! */
+		qtd->buffer = (void *) &oxu->mem->db_pool[i];
+		qtd->buffer_dma = virt_to_phys(qtd->buffer);
+
+		qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
+		oxu->db_used[i] = a_blocks;
+
+		spin_unlock(&oxu->mem_lock);
+
+		return 0;
+	}
+
+	/* Failed */
+
+	spin_unlock(&oxu->mem_lock);
+
+	return -ENOMEM;
+}
+
+static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
+{
+	int index;
+
+	spin_lock(&oxu->mem_lock);
+
+	index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
+							 / BUFFER_SIZE;
+	oxu->db_used[index] = 0;
+	qtd->qtd_buffer_len = 0;
+	qtd->buffer_dma = 0;
+	qtd->buffer = NULL;
+
+	spin_unlock(&oxu->mem_lock);
+
+	return;
+}
+
+static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
+{
+	memset(qtd, 0, sizeof *qtd);
+	qtd->qtd_dma = dma;
+	qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
+	qtd->hw_next = EHCI_LIST_END;
+	qtd->hw_alt_next = EHCI_LIST_END;
+	INIT_LIST_HEAD(&qtd->qtd_list);
+}
+
+static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
+{
+	int index;
+
+	if (qtd->buffer)
+		oxu_buf_free(oxu, qtd);
+
+	spin_lock(&oxu->mem_lock);
+
+	index = qtd - &oxu->mem->qtd_pool[0];
+	oxu->qtd_used[index] = 0;
+
+	spin_unlock(&oxu->mem_lock);
+
+	return;
+}
+
+static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
+{
+	int i;
+	struct ehci_qtd *qtd = NULL;
+
+	spin_lock(&oxu->mem_lock);
+
+	for (i = 0; i < QTD_NUM; i++)
+		if (!oxu->qtd_used[i])
+			break;
+
+	if (i < QTD_NUM) {
+		qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
+		memset(qtd, 0, sizeof *qtd);
+
+		qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
+		qtd->hw_next = EHCI_LIST_END;
+		qtd->hw_alt_next = EHCI_LIST_END;
+		INIT_LIST_HEAD(&qtd->qtd_list);
+
+		qtd->qtd_dma = virt_to_phys(qtd);
+
+		oxu->qtd_used[i] = 1;
+	}
+
+	spin_unlock(&oxu->mem_lock);
+
+	return qtd;
+}
+
+static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	int index;
+
+	spin_lock(&oxu->mem_lock);
+
+	index = qh - &oxu->mem->qh_pool[0];
+	oxu->qh_used[index] = 0;
+
+	spin_unlock(&oxu->mem_lock);
+
+	return;
+}
+
+static void qh_destroy(struct kref *kref)
+{
+	struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
+	struct oxu_hcd *oxu = qh->oxu;
+
+	/* clean qtds first, and know this is not linked */
+	if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
+		oxu_dbg(oxu, "unused qh not empty!\n");
+		BUG();
+	}
+	if (qh->dummy)
+		oxu_qtd_free(oxu, qh->dummy);
+	oxu_qh_free(oxu, qh);
+}
+
+static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
+{
+	int i;
+	struct ehci_qh *qh = NULL;
+
+	spin_lock(&oxu->mem_lock);
+
+	for (i = 0; i < QHEAD_NUM; i++)
+		if (!oxu->qh_used[i])
+			break;
+
+	if (i < QHEAD_NUM) {
+		qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
+		memset(qh, 0, sizeof *qh);
+
+		kref_init(&qh->kref);
+		qh->oxu = oxu;
+		qh->qh_dma = virt_to_phys(qh);
+		INIT_LIST_HEAD(&qh->qtd_list);
+
+		/* dummy td enables safe urb queuing */
+		qh->dummy = ehci_qtd_alloc(oxu);
+		if (qh->dummy == NULL) {
+			oxu_dbg(oxu, "no dummy td\n");
+			oxu->qh_used[i] = 0;
+
+			return NULL;
+		}
+
+		oxu->qh_used[i] = 1;
+	}
+
+	spin_unlock(&oxu->mem_lock);
+
+	return qh;
+}
+
+/* to share a qh (cpu threads, or hc) */
+static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
+{
+	kref_get(&qh->kref);
+	return qh;
+}
+
+static inline void qh_put(struct ehci_qh *qh)
+{
+	kref_put(&qh->kref, qh_destroy);
+}
+
+static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
+{
+	int index;
+
+	spin_lock(&oxu->mem_lock);
+
+	index = murb - &oxu->murb_pool[0];
+	oxu->murb_used[index] = 0;
+
+	spin_unlock(&oxu->mem_lock);
+
+	return;
+}
+
+static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
+
+{
+	int i;
+	struct oxu_murb *murb = NULL;
+
+	spin_lock(&oxu->mem_lock);
+
+	for (i = 0; i < MURB_NUM; i++)
+		if (!oxu->murb_used[i])
+			break;
+
+	if (i < MURB_NUM) {
+		murb = &(oxu->murb_pool)[i];
+
+		oxu->murb_used[i] = 1;
+	}
+
+	spin_unlock(&oxu->mem_lock);
+
+	return murb;
+}
+
+/* The queue heads and transfer descriptors are managed from pools tied
+ * to each of the "per device" structures.
+ * This is the initialisation and cleanup code.
+ */
+static void ehci_mem_cleanup(struct oxu_hcd *oxu)
+{
+	kfree(oxu->murb_pool);
+	oxu->murb_pool = NULL;
+
+	if (oxu->async)
+		qh_put(oxu->async);
+	oxu->async = NULL;
+
+	del_timer(&oxu->urb_timer);
+
+	oxu->periodic = NULL;
+
+	/* shadow periodic table */
+	kfree(oxu->pshadow);
+	oxu->pshadow = NULL;
+}
+
+/* Remember to add cleanup code (above) if you add anything here.
+ */
+static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
+{
+	int i;
+
+	for (i = 0; i < oxu->periodic_size; i++)
+		oxu->mem->frame_list[i] = EHCI_LIST_END;
+	for (i = 0; i < QHEAD_NUM; i++)
+		oxu->qh_used[i] = 0;
+	for (i = 0; i < QTD_NUM; i++)
+		oxu->qtd_used[i] = 0;
+
+	oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
+	if (!oxu->murb_pool)
+		goto fail;
+
+	for (i = 0; i < MURB_NUM; i++)
+		oxu->murb_used[i] = 0;
+
+	oxu->async = oxu_qh_alloc(oxu);
+	if (!oxu->async)
+		goto fail;
+
+	oxu->periodic = (__le32 *) &oxu->mem->frame_list;
+	oxu->periodic_dma = virt_to_phys(oxu->periodic);
+
+	for (i = 0; i < oxu->periodic_size; i++)
+		oxu->periodic[i] = EHCI_LIST_END;
+
+	/* software shadow of hardware table */
+	oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
+	if (oxu->pshadow != NULL)
+		return 0;
+
+fail:
+	oxu_dbg(oxu, "couldn't init memory\n");
+	ehci_mem_cleanup(oxu);
+	return -ENOMEM;
+}
+
+/* Fill a qtd, returning how much of the buffer we were able to queue up.
+ */
+static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
+				int token, int maxpacket)
+{
+	int i, count;
+	u64 addr = buf;
+
+	/* one buffer entry per 4K ... first might be short or unaligned */
+	qtd->hw_buf[0] = cpu_to_le32((u32)addr);
+	qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
+	count = 0x1000 - (buf & 0x0fff);	/* rest of that page */
+	if (likely(len < count))		/* ... iff needed */
+		count = len;
+	else {
+		buf +=  0x1000;
+		buf &= ~0x0fff;
+
+		/* per-qtd limit: from 16K to 20K (best alignment) */
+		for (i = 1; count < len && i < 5; i++) {
+			addr = buf;
+			qtd->hw_buf[i] = cpu_to_le32((u32)addr);
+			qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
+			buf += 0x1000;
+			if ((count + 0x1000) < len)
+				count += 0x1000;
+			else
+				count = len;
+		}
+
+		/* short packets may only terminate transfers */
+		if (count != len)
+			count -= (count % maxpacket);
+	}
+	qtd->hw_token = cpu_to_le32((count << 16) | token);
+	qtd->length = count;
+
+	return count;
+}
+
+static inline void qh_update(struct oxu_hcd *oxu,
+				struct ehci_qh *qh, struct ehci_qtd *qtd)
+{
+	/* writes to an active overlay are unsafe */
+	BUG_ON(qh->qh_state != QH_STATE_IDLE);
+
+	qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
+	qh->hw_alt_next = EHCI_LIST_END;
+
+	/* Except for control endpoints, we make hardware maintain data
+	 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
+	 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
+	 * ever clear it.
+	 */
+	if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
+		unsigned	is_out, epnum;
+
+		is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
+		epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
+		if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
+			qh->hw_token &= ~__constant_cpu_to_le32(QTD_TOGGLE);
+			usb_settoggle(qh->dev, epnum, is_out, 1);
+		}
+	}
+
+	/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
+	wmb();
+	qh->hw_token &= __constant_cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
+}
+
+/* If it weren't for a common silicon quirk (writing the dummy into the qh
+ * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
+ * recovery (including urb dequeue) would need software changes to a QH...
+ */
+static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	struct ehci_qtd *qtd;
+
+	if (list_empty(&qh->qtd_list))
+		qtd = qh->dummy;
+	else {
+		qtd = list_entry(qh->qtd_list.next,
+				struct ehci_qtd, qtd_list);
+		/* first qtd may already be partially processed */
+		if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
+			qtd = NULL;
+	}
+
+	if (qtd)
+		qh_update(oxu, qh, qtd);
+}
+
+static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
+				size_t length, u32 token)
+{
+	/* count IN/OUT bytes, not SETUP (even short packets) */
+	if (likely(QTD_PID(token) != 2))
+		urb->actual_length += length - QTD_LENGTH(token);
+
+	/* don't modify error codes */
+	if (unlikely(urb->status != -EINPROGRESS))
+		return;
+
+	/* force cleanup after short read; not always an error */
+	if (unlikely(IS_SHORT_READ(token)))
+		urb->status = -EREMOTEIO;
+
+	/* serious "can't proceed" faults reported by the hardware */
+	if (token & QTD_STS_HALT) {
+		if (token & QTD_STS_BABBLE) {
+			/* FIXME "must" disable babbling device's port too */
+			urb->status = -EOVERFLOW;
+		} else if (token & QTD_STS_MMF) {
+			/* fs/ls interrupt xfer missed the complete-split */
+			urb->status = -EPROTO;
+		} else if (token & QTD_STS_DBE) {
+			urb->status = (QTD_PID(token) == 1) /* IN ? */
+				? -ENOSR  /* hc couldn't read data */
+				: -ECOMM; /* hc couldn't write data */
+		} else if (token & QTD_STS_XACT) {
+			/* timeout, bad crc, wrong PID, etc; retried */
+			if (QTD_CERR(token))
+				urb->status = -EPIPE;
+			else {
+				oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
+					urb->dev->devpath,
+					usb_pipeendpoint(urb->pipe),
+					usb_pipein(urb->pipe) ? "in" : "out");
+				urb->status = -EPROTO;
+			}
+		/* CERR nonzero + no errors + halt --> stall */
+		} else if (QTD_CERR(token))
+			urb->status = -EPIPE;
+		else	/* unknown */
+			urb->status = -EPROTO;
+
+		oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
+			usb_pipedevice(urb->pipe),
+			usb_pipeendpoint(urb->pipe),
+			usb_pipein(urb->pipe) ? "in" : "out",
+			token, urb->status);
+	}
+}
+
+static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
+__releases(oxu->lock)
+__acquires(oxu->lock)
+{
+	if (likely(urb->hcpriv != NULL)) {
+		struct ehci_qh	*qh = (struct ehci_qh *) urb->hcpriv;
+
+		/* S-mask in a QH means it's an interrupt urb */
+		if ((qh->hw_info2 & __constant_cpu_to_le32(QH_SMASK)) != 0) {
+
+			/* ... update hc-wide periodic stats (for usbfs) */
+			oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
+		}
+		qh_put(qh);
+	}
+
+	urb->hcpriv = NULL;
+	switch (urb->status) {
+	case -EINPROGRESS:		/* success */
+		urb->status = 0;
+	default:			/* fault */
+		break;
+	case -EREMOTEIO:		/* fault or normal */
+		if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
+			urb->status = 0;
+		break;
+	case -ECONNRESET:		/* canceled */
+	case -ENOENT:
+		break;
+	}
+
+#ifdef OXU_URB_TRACE
+	oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
+		__func__, urb->dev->devpath, urb,
+		usb_pipeendpoint(urb->pipe),
+		usb_pipein(urb->pipe) ? "in" : "out",
+		urb->status,
+		urb->actual_length, urb->transfer_buffer_length);
+#endif
+
+	/* complete() can reenter this HCD */
+	spin_unlock(&oxu->lock);
+	usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
+	spin_lock(&oxu->lock);
+}
+
+static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
+static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
+
+static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
+static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
+
+#define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT)
+
+/* Process and free completed qtds for a qh, returning URBs to drivers.
+ * Chases up to qh->hw_current.  Returns number of completions called,
+ * indicating how much "real" work we did.
+ */
+static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	struct ehci_qtd *last = NULL, *end = qh->dummy;
+	struct list_head *entry, *tmp;
+	int stopped;
+	unsigned count = 0;
+	int do_status = 0;
+	u8 state;
+	struct oxu_murb *murb = NULL;
+
+	if (unlikely(list_empty(&qh->qtd_list)))
+		return count;
+
+	/* completions (or tasks on other cpus) must never clobber HALT
+	 * till we've gone through and cleaned everything up, even when
+	 * they add urbs to this qh's queue or mark them for unlinking.
+	 *
+	 * NOTE:  unlinking expects to be done in queue order.
+	 */
+	state = qh->qh_state;
+	qh->qh_state = QH_STATE_COMPLETING;
+	stopped = (state == QH_STATE_IDLE);
+
+	/* remove de-activated QTDs from front of queue.
+	 * after faults (including short reads), cleanup this urb
+	 * then let the queue advance.
+	 * if queue is stopped, handles unlinks.
+	 */
+	list_for_each_safe(entry, tmp, &qh->qtd_list) {
+		struct ehci_qtd	*qtd;
+		struct urb *urb;
+		u32 token = 0;
+
+		qtd = list_entry(entry, struct ehci_qtd, qtd_list);
+		urb = qtd->urb;
+
+		/* Clean up any state from previous QTD ...*/
+		if (last) {
+			if (likely(last->urb != urb)) {
+				if (last->urb->complete == NULL) {
+					murb = (struct oxu_murb *) last->urb;
+					last->urb = murb->main;
+					if (murb->last) {
+						ehci_urb_done(oxu, last->urb);
+						count++;
+					}
+					oxu_murb_free(oxu, murb);
+				} else {
+					ehci_urb_done(oxu, last->urb);
+					count++;
+				}
+			}
+			oxu_qtd_free(oxu, last);
+			last = NULL;
+		}
+
+		/* ignore urbs submitted during completions we reported */
+		if (qtd == end)
+			break;
+
+		/* hardware copies qtd out of qh overlay */
+		rmb();
+		token = le32_to_cpu(qtd->hw_token);
+
+		/* always clean up qtds the hc de-activated */
+		if ((token & QTD_STS_ACTIVE) == 0) {
+
+			if ((token & QTD_STS_HALT) != 0) {
+				stopped = 1;
+
+			/* magic dummy for some short reads; qh won't advance.
+			 * that silicon quirk can kick in with this dummy too.
+			 */
+			} else if (IS_SHORT_READ(token) &&
+					!(qtd->hw_alt_next & EHCI_LIST_END)) {
+				stopped = 1;
+				goto halt;
+			}
+
+		/* stop scanning when we reach qtds the hc is using */
+		} else if (likely(!stopped &&
+				HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
+			break;
+
+		} else {
+			stopped = 1;
+
+			if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
+				urb->status = -ESHUTDOWN;
+
+			/* ignore active urbs unless some previous qtd
+			 * for the urb faulted (including short read) or
+			 * its urb was canceled.  we may patch qh or qtds.
+			 */
+			if (likely(urb->status == -EINPROGRESS))
+				continue;
+
+			/* issue status after short control reads */
+			if (unlikely(do_status != 0)
+					&& QTD_PID(token) == 0 /* OUT */) {
+				do_status = 0;
+				continue;
+			}
+
+			/* token in overlay may be most current */
+			if (state == QH_STATE_IDLE
+					&& cpu_to_le32(qtd->qtd_dma)
+						== qh->hw_current)
+				token = le32_to_cpu(qh->hw_token);
+
+			/* force halt for unlinked or blocked qh, so we'll
+			 * patch the qh later and so that completions can't
+			 * activate it while we "know" it's stopped.
+			 */
+			if ((HALT_BIT & qh->hw_token) == 0) {
+halt:
+				qh->hw_token |= HALT_BIT;
+				wmb();
+			}
+		}
+
+		/* Remove it from the queue */
+		qtd_copy_status(oxu, urb->complete ?
+					urb : ((struct oxu_murb *) urb)->main,
+				qtd->length, token);
+		if ((usb_pipein(qtd->urb->pipe)) &&
+				(NULL != qtd->transfer_buffer))
+			memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
+		do_status = (urb->status == -EREMOTEIO)
+				&& usb_pipecontrol(urb->pipe);
+
+		if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
+			last = list_entry(qtd->qtd_list.prev,
+					struct ehci_qtd, qtd_list);
+			last->hw_next = qtd->hw_next;
+		}
+		list_del(&qtd->qtd_list);
+		last = qtd;
+	}
+
+	/* last urb's completion might still need calling */
+	if (likely(last != NULL)) {
+		if (last->urb->complete == NULL) {
+			murb = (struct oxu_murb *) last->urb;
+			last->urb = murb->main;
+			if (murb->last) {
+				ehci_urb_done(oxu, last->urb);
+				count++;
+			}
+			oxu_murb_free(oxu, murb);
+		} else {
+			ehci_urb_done(oxu, last->urb);
+			count++;
+		}
+		oxu_qtd_free(oxu, last);
+	}
+
+	/* restore original state; caller must unlink or relink */
+	qh->qh_state = state;
+
+	/* be sure the hardware's done with the qh before refreshing
+	 * it after fault cleanup, or recovering from silicon wrongly
+	 * overlaying the dummy qtd (which reduces DMA chatter).
+	 */
+	if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
+		switch (state) {
+		case QH_STATE_IDLE:
+			qh_refresh(oxu, qh);
+			break;
+		case QH_STATE_LINKED:
+			/* should be rare for periodic transfers,
+			 * except maybe high bandwidth ...
+			 */
+			if ((__constant_cpu_to_le32(QH_SMASK)
+					& qh->hw_info2) != 0) {
+				intr_deschedule(oxu, qh);
+				(void) qh_schedule(oxu, qh);
+			} else
+				unlink_async(oxu, qh);
+			break;
+		/* otherwise, unlink already started */
+		}
+	}
+
+	return count;
+}
+
+/* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize)		(1 + (((wMaxPacketSize) >> 11) & 0x03))
+/* ... and packet size, for any kind of endpoint descriptor */
+#define max_packet(wMaxPacketSize)	((wMaxPacketSize) & 0x07ff)
+
+/* Reverse of qh_urb_transaction: free a list of TDs.
+ * used for cleanup after errors, before HC sees an URB's TDs.
+ */
+static void qtd_list_free(struct oxu_hcd *oxu,
+				struct urb *urb, struct list_head *qtd_list)
+{
+	struct list_head *entry, *temp;
+
+	list_for_each_safe(entry, temp, qtd_list) {
+		struct ehci_qtd	*qtd;
+
+		qtd = list_entry(entry, struct ehci_qtd, qtd_list);
+		list_del(&qtd->qtd_list);
+		oxu_qtd_free(oxu, qtd);
+	}
+}
+
+/* Create a list of filled qtds for this URB; won't link into qh.
+ */
+static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
+						struct urb *urb,
+						struct list_head *head,
+						gfp_t flags)
+{
+	struct ehci_qtd	*qtd, *qtd_prev;
+	dma_addr_t buf;
+	int len, maxpacket;
+	int is_input;
+	u32 token;
+	void *transfer_buf = NULL;
+	int ret;
+
+	/*
+	 * URBs map to sequences of QTDs: one logical transaction
+	 */
+	qtd = ehci_qtd_alloc(oxu);
+	if (unlikely(!qtd))
+		return NULL;
+	list_add_tail(&qtd->qtd_list, head);
+	qtd->urb = urb;
+
+	token = QTD_STS_ACTIVE;
+	token |= (EHCI_TUNE_CERR << 10);
+	/* for split transactions, SplitXState initialized to zero */
+
+	len = urb->transfer_buffer_length;
+	is_input = usb_pipein(urb->pipe);
+	if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
+		urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
+
+	if (usb_pipecontrol(urb->pipe)) {
+		/* SETUP pid */
+		ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
+		if (ret)
+			goto cleanup;
+
+		qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
+				token | (2 /* "setup" */ << 8), 8);
+		memcpy(qtd->buffer, qtd->urb->setup_packet,
+				sizeof(struct usb_ctrlrequest));
+
+		/* ... and always at least one more pid */
+		token ^= QTD_TOGGLE;
+		qtd_prev = qtd;
+		qtd = ehci_qtd_alloc(oxu);
+		if (unlikely(!qtd))
+			goto cleanup;
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+		list_add_tail(&qtd->qtd_list, head);
+
+		/* for zero length DATA stages, STATUS is always IN */
+		if (len == 0)
+			token |= (1 /* "in" */ << 8);
+	}
+
+	/*
+	 * Data transfer stage: buffer setup
+	 */
+
+	ret = oxu_buf_alloc(oxu, qtd, len);
+	if (ret)
+		goto cleanup;
+
+	buf = qtd->buffer_dma;
+	transfer_buf = urb->transfer_buffer;
+
+	if (!is_input)
+		memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
+
+	if (is_input)
+		token |= (1 /* "in" */ << 8);
+	/* else it's already initted to "out" pid (0 << 8) */
+
+	maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+
+	/*
+	 * buffer gets wrapped in one or more qtds;
+	 * last one may be "short" (including zero len)
+	 * and may serve as a control status ack
+	 */
+	for (;;) {
+		int this_qtd_len;
+
+		this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
+		qtd->transfer_buffer = transfer_buf;
+		len -= this_qtd_len;
+		buf += this_qtd_len;
+		transfer_buf += this_qtd_len;
+		if (is_input)
+			qtd->hw_alt_next = oxu->async->hw_alt_next;
+
+		/* qh makes control packets use qtd toggle; maybe switch it */
+		if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
+			token ^= QTD_TOGGLE;
+
+		if (likely(len <= 0))
+			break;
+
+		qtd_prev = qtd;
+		qtd = ehci_qtd_alloc(oxu);
+		if (unlikely(!qtd))
+			goto cleanup;
+		if (likely(len > 0)) {
+			ret = oxu_buf_alloc(oxu, qtd, len);
+			if (ret)
+				goto cleanup;
+		}
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+		list_add_tail(&qtd->qtd_list, head);
+	}
+
+	/* unless the bulk/interrupt caller wants a chance to clean
+	 * up after short reads, hc should advance qh past this urb
+	 */
+	if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
+				|| usb_pipecontrol(urb->pipe)))
+		qtd->hw_alt_next = EHCI_LIST_END;
+
+	/*
+	 * control requests may need a terminating data "status" ack;
+	 * bulk ones may need a terminating short packet (zero length).
+	 */
+	if (likely(urb->transfer_buffer_length != 0)) {
+		int	one_more = 0;
+
+		if (usb_pipecontrol(urb->pipe)) {
+			one_more = 1;
+			token ^= 0x0100;	/* "in" <--> "out"  */
+			token |= QTD_TOGGLE;	/* force DATA1 */
+		} else if (usb_pipebulk(urb->pipe)
+				&& (urb->transfer_flags & URB_ZERO_PACKET)
+				&& !(urb->transfer_buffer_length % maxpacket)) {
+			one_more = 1;
+		}
+		if (one_more) {
+			qtd_prev = qtd;
+			qtd = ehci_qtd_alloc(oxu);
+			if (unlikely(!qtd))
+				goto cleanup;
+			qtd->urb = urb;
+			qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+			list_add_tail(&qtd->qtd_list, head);
+
+			/* never any data in such packets */
+			qtd_fill(qtd, 0, 0, token, 0);
+		}
+	}
+
+	/* by default, enable interrupt on urb completion */
+		qtd->hw_token |= __constant_cpu_to_le32(QTD_IOC);
+	return head;
+
+cleanup:
+	qtd_list_free(oxu, urb, head);
+	return NULL;
+}
+
+/* Each QH holds a qtd list; a QH is used for everything except iso.
+ *
+ * For interrupt urbs, the scheduler must set the microframe scheduling
+ * mask(s) each time the QH gets scheduled.  For highspeed, that's
+ * just one microframe in the s-mask.  For split interrupt transactions
+ * there are additional complications: c-mask, maybe FSTNs.
+ */
+static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
+				struct urb *urb, gfp_t flags)
+{
+	struct ehci_qh *qh = oxu_qh_alloc(oxu);
+	u32 info1 = 0, info2 = 0;
+	int is_input, type;
+	int maxp = 0;
+
+	if (!qh)
+		return qh;
+
+	/*
+	 * init endpoint/device data for this QH
+	 */
+	info1 |= usb_pipeendpoint(urb->pipe) << 8;
+	info1 |= usb_pipedevice(urb->pipe) << 0;
+
+	is_input = usb_pipein(urb->pipe);
+	type = usb_pipetype(urb->pipe);
+	maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+
+	/* Compute interrupt scheduling parameters just once, and save.
+	 * - allowing for high bandwidth, how many nsec/uframe are used?
+	 * - split transactions need a second CSPLIT uframe; same question
+	 * - splits also need a schedule gap (for full/low speed I/O)
+	 * - qh has a polling interval
+	 *
+	 * For control/bulk requests, the HC or TT handles these.
+	 */
+	if (type == PIPE_INTERRUPT) {
+		qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+								is_input, 0,
+				hb_mult(maxp) * max_packet(maxp)));
+		qh->start = NO_FRAME;
+
+		if (urb->dev->speed == USB_SPEED_HIGH) {
+			qh->c_usecs = 0;
+			qh->gap_uf = 0;
+
+			qh->period = urb->interval >> 3;
+			if (qh->period == 0 && urb->interval != 1) {
+				/* NOTE interval 2 or 4 uframes could work.
+				 * But interval 1 scheduling is simpler, and
+				 * includes high bandwidth.
+				 */
+				dbg("intr period %d uframes, NYET!",
+						urb->interval);
+				goto done;
+			}
+		} else {
+			struct usb_tt	*tt = urb->dev->tt;
+			int		think_time;
+
+			/* gap is f(FS/LS transfer times) */
+			qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
+					is_input, 0, maxp) / (125 * 1000);
+
+			/* FIXME this just approximates SPLIT/CSPLIT times */
+			if (is_input) {		/* SPLIT, gap, CSPLIT+DATA */
+				qh->c_usecs = qh->usecs + HS_USECS(0);
+				qh->usecs = HS_USECS(1);
+			} else {		/* SPLIT+DATA, gap, CSPLIT */
+				qh->usecs += HS_USECS(1);
+				qh->c_usecs = HS_USECS(0);
+			}
+
+			think_time = tt ? tt->think_time : 0;
+			qh->tt_usecs = NS_TO_US(think_time +
+					usb_calc_bus_time(urb->dev->speed,
+					is_input, 0, max_packet(maxp)));
+			qh->period = urb->interval;
+		}
+	}
+
+	/* support for tt scheduling, and access to toggles */
+	qh->dev = urb->dev;
+
+	/* using TT? */
+	switch (urb->dev->speed) {
+	case USB_SPEED_LOW:
+		info1 |= (1 << 12);	/* EPS "low" */
+		/* FALL THROUGH */
+
+	case USB_SPEED_FULL:
+		/* EPS 0 means "full" */
+		if (type != PIPE_INTERRUPT)
+			info1 |= (EHCI_TUNE_RL_TT << 28);
+		if (type == PIPE_CONTROL) {
+			info1 |= (1 << 27);	/* for TT */
+			info1 |= 1 << 14;	/* toggle from qtd */
+		}
+		info1 |= maxp << 16;
+
+		info2 |= (EHCI_TUNE_MULT_TT << 30);
+		info2 |= urb->dev->ttport << 23;
+
+		/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets c-mask } */
+
+		break;
+
+	case USB_SPEED_HIGH:		/* no TT involved */
+		info1 |= (2 << 12);	/* EPS "high" */
+		if (type == PIPE_CONTROL) {
+			info1 |= (EHCI_TUNE_RL_HS << 28);
+			info1 |= 64 << 16;	/* usb2 fixed maxpacket */
+			info1 |= 1 << 14;	/* toggle from qtd */
+			info2 |= (EHCI_TUNE_MULT_HS << 30);
+		} else if (type == PIPE_BULK) {
+			info1 |= (EHCI_TUNE_RL_HS << 28);
+			info1 |= 512 << 16;	/* usb2 fixed maxpacket */
+			info2 |= (EHCI_TUNE_MULT_HS << 30);
+		} else {		/* PIPE_INTERRUPT */
+			info1 |= max_packet(maxp) << 16;
+			info2 |= hb_mult(maxp) << 30;
+		}
+		break;
+	default:
+		dbg("bogus dev %p speed %d", urb->dev, urb->dev->speed);
+done:
+		qh_put(qh);
+		return NULL;
+	}
+
+	/* NOTE:  if (PIPE_INTERRUPT) { scheduler sets s-mask } */
+
+	/* init as live, toggle clear, advance to dummy */
+	qh->qh_state = QH_STATE_IDLE;
+	qh->hw_info1 = cpu_to_le32(info1);
+	qh->hw_info2 = cpu_to_le32(info2);
+	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
+	qh_refresh(oxu, qh);
+	return qh;
+}
+
+/* Move qh (and its qtds) onto async queue; maybe enable queue.
+ */
+static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	__le32 dma = QH_NEXT(qh->qh_dma);
+	struct ehci_qh *head;
+
+	/* (re)start the async schedule? */
+	head = oxu->async;
+	timer_action_done(oxu, TIMER_ASYNC_OFF);
+	if (!head->qh_next.qh) {
+		u32	cmd = readl(&oxu->regs->command);
+
+		if (!(cmd & CMD_ASE)) {
+			/* in case a clear of CMD_ASE didn't take yet */
+			(void)handshake(oxu, &oxu->regs->status,
+					STS_ASS, 0, 150);
+			cmd |= CMD_ASE | CMD_RUN;
+			writel(cmd, &oxu->regs->command);
+			oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
+			/* posted write need not be known to HC yet ... */
+		}
+	}
+
+	/* clear halt and/or toggle; and maybe recover from silicon quirk */
+	if (qh->qh_state == QH_STATE_IDLE)
+		qh_refresh(oxu, qh);
+
+	/* splice right after start */
+	qh->qh_next = head->qh_next;
+	qh->hw_next = head->hw_next;
+	wmb();
+
+	head->qh_next.qh = qh;
+	head->hw_next = dma;
+
+	qh->qh_state = QH_STATE_LINKED;
+	/* qtd completions reported later by interrupt */
+}
+
+#define	QH_ADDR_MASK	__constant_cpu_to_le32(0x7f)
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
+				struct urb *urb, struct list_head *qtd_list,
+				int epnum, void	**ptr)
+{
+	struct ehci_qh *qh = NULL;
+
+	qh = (struct ehci_qh *) *ptr;
+	if (unlikely(qh == NULL)) {
+		/* can't sleep here, we have oxu->lock... */
+		qh = qh_make(oxu, urb, GFP_ATOMIC);
+		*ptr = qh;
+	}
+	if (likely(qh != NULL)) {
+		struct ehci_qtd	*qtd;
+
+		if (unlikely(list_empty(qtd_list)))
+			qtd = NULL;
+		else
+			qtd = list_entry(qtd_list->next, struct ehci_qtd,
+					qtd_list);
+
+		/* control qh may need patching ... */
+		if (unlikely(epnum == 0)) {
+
+			/* usb_reset_device() briefly reverts to address 0 */
+			if (usb_pipedevice(urb->pipe) == 0)
+				qh->hw_info1 &= ~QH_ADDR_MASK;
+		}
+
+		/* just one way to queue requests: swap with the dummy qtd.
+		 * only hc or qh_refresh() ever modify the overlay.
+		 */
+		if (likely(qtd != NULL)) {
+			struct ehci_qtd	*dummy;
+			dma_addr_t dma;
+			__le32 token;
+
+			/* to avoid racing the HC, use the dummy td instead of
+			 * the first td of our list (becomes new dummy).  both
+			 * tds stay deactivated until we're done, when the
+			 * HC is allowed to fetch the old dummy (4.10.2).
+			 */
+			token = qtd->hw_token;
+			qtd->hw_token = HALT_BIT;
+			wmb();
+			dummy = qh->dummy;
+
+			dma = dummy->qtd_dma;
+			*dummy = *qtd;
+			dummy->qtd_dma = dma;
+
+			list_del(&qtd->qtd_list);
+			list_add(&dummy->qtd_list, qtd_list);
+			list_splice(qtd_list, qh->qtd_list.prev);
+
+			ehci_qtd_init(qtd, qtd->qtd_dma);
+			qh->dummy = qtd;
+
+			/* hc must see the new dummy at list end */
+			dma = qtd->qtd_dma;
+			qtd = list_entry(qh->qtd_list.prev,
+					struct ehci_qtd, qtd_list);
+			qtd->hw_next = QTD_NEXT(dma);
+
+			/* let the hc process these next qtds */
+			dummy->hw_token = (token & ~(0x80));
+			wmb();
+			dummy->hw_token = token;
+
+			urb->hcpriv = qh_get(qh);
+		}
+	}
+	return qh;
+}
+
+static int submit_async(struct oxu_hcd	*oxu, struct urb *urb,
+			struct list_head *qtd_list, gfp_t mem_flags)
+{
+	struct ehci_qtd	*qtd;
+	int epnum;
+	unsigned long flags;
+	struct ehci_qh *qh = NULL;
+	int rc = 0;
+
+	qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
+	epnum = urb->ep->desc.bEndpointAddress;
+
+#ifdef OXU_URB_TRACE
+	oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+		__func__, urb->dev->devpath, urb,
+		epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+		urb->transfer_buffer_length,
+		qtd, urb->ep->hcpriv);
+#endif
+
+	spin_lock_irqsave(&oxu->lock, flags);
+	if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
+			       &oxu_to_hcd(oxu)->flags))) {
+		rc = -ESHUTDOWN;
+		goto done;
+	}
+
+	qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
+	if (unlikely(qh == NULL)) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	/* Control/bulk operations through TTs don't need scheduling,
+	 * the HC and TT handle it when the TT has a buffer ready.
+	 */
+	if (likely(qh->qh_state == QH_STATE_IDLE))
+		qh_link_async(oxu, qh_get(qh));
+done:
+	spin_unlock_irqrestore(&oxu->lock, flags);
+	if (unlikely(qh == NULL))
+		qtd_list_free(oxu, urb, qtd_list);
+	return rc;
+}
+
+/* The async qh for the qtds being reclaimed are now unlinked from the HC */
+
+static void end_unlink_async(struct oxu_hcd *oxu)
+{
+	struct ehci_qh *qh = oxu->reclaim;
+	struct ehci_qh *next;
+
+	timer_action_done(oxu, TIMER_IAA_WATCHDOG);
+
+	qh->qh_state = QH_STATE_IDLE;
+	qh->qh_next.qh = NULL;
+	qh_put(qh);			/* refcount from reclaim */
+
+	/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
+	next = qh->reclaim;
+	oxu->reclaim = next;
+	oxu->reclaim_ready = 0;
+	qh->reclaim = NULL;
+
+	qh_completions(oxu, qh);
+
+	if (!list_empty(&qh->qtd_list)
+			&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
+		qh_link_async(oxu, qh);
+	else {
+		qh_put(qh);		/* refcount from async list */
+
+		/* it's not free to turn the async schedule on/off; leave it
+		 * active but idle for a while once it empties.
+		 */
+		if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
+				&& oxu->async->qh_next.qh == NULL)
+			timer_action(oxu, TIMER_ASYNC_OFF);
+	}
+
+	if (next) {
+		oxu->reclaim = NULL;
+		start_unlink_async(oxu, next);
+	}
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own oxu->lock */
+
+static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	int cmd = readl(&oxu->regs->command);
+	struct ehci_qh *prev;
+
+#ifdef DEBUG
+	assert_spin_locked(&oxu->lock);
+	if (oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
+				&& qh->qh_state != QH_STATE_UNLINK_WAIT))
+		BUG();
+#endif
+
+	/* stop async schedule right now? */
+	if (unlikely(qh == oxu->async)) {
+		/* can't get here without STS_ASS set */
+		if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
+				&& !oxu->reclaim) {
+			/* ... and CMD_IAAD clear */
+			writel(cmd & ~CMD_ASE, &oxu->regs->command);
+			wmb();
+			/* handshake later, if we need to */
+			timer_action_done(oxu, TIMER_ASYNC_OFF);
+		}
+		return;
+	}
+
+	qh->qh_state = QH_STATE_UNLINK;
+	oxu->reclaim = qh = qh_get(qh);
+
+	prev = oxu->async;
+	while (prev->qh_next.qh != qh)
+		prev = prev->qh_next.qh;
+
+	prev->hw_next = qh->hw_next;
+	prev->qh_next = qh->qh_next;
+	wmb();
+
+	if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
+		/* if (unlikely(qh->reclaim != 0))
+		 *	this will recurse, probably not much
+		 */
+		end_unlink_async(oxu);
+		return;
+	}
+
+	oxu->reclaim_ready = 0;
+	cmd |= CMD_IAAD;
+	writel(cmd, &oxu->regs->command);
+	(void) readl(&oxu->regs->command);
+	timer_action(oxu, TIMER_IAA_WATCHDOG);
+}
+
+static void scan_async(struct oxu_hcd *oxu)
+{
+	struct ehci_qh *qh;
+	enum ehci_timer_action action = TIMER_IO_WATCHDOG;
+
+	if (!++(oxu->stamp))
+		oxu->stamp++;
+	timer_action_done(oxu, TIMER_ASYNC_SHRINK);
+rescan:
+	qh = oxu->async->qh_next.qh;
+	if (likely(qh != NULL)) {
+		do {
+			/* clean any finished work for this qh */
+			if (!list_empty(&qh->qtd_list)
+					&& qh->stamp != oxu->stamp) {
+				int temp;
+
+				/* unlinks could happen here; completion
+				 * reporting drops the lock.  rescan using
+				 * the latest schedule, but don't rescan
+				 * qhs we already finished (no looping).
+				 */
+				qh = qh_get(qh);
+				qh->stamp = oxu->stamp;
+				temp = qh_completions(oxu, qh);
+				qh_put(qh);
+				if (temp != 0)
+					goto rescan;
+			}
+
+			/* unlink idle entries, reducing HC PCI usage as well
+			 * as HCD schedule-scanning costs.  delay for any qh
+			 * we just scanned, there's a not-unusual case that it
+			 * doesn't stay idle for long.
+			 * (plus, avoids some kind of re-activation race.)
+			 */
+			if (list_empty(&qh->qtd_list)) {
+				if (qh->stamp == oxu->stamp)
+					action = TIMER_ASYNC_SHRINK;
+				else if (!oxu->reclaim
+					    && qh->qh_state == QH_STATE_LINKED)
+					start_unlink_async(oxu, qh);
+			}
+
+			qh = qh->qh_next.qh;
+		} while (qh);
+	}
+	if (action == TIMER_ASYNC_SHRINK)
+		timer_action(oxu, TIMER_ASYNC_SHRINK);
+}
+
+/*
+ * periodic_next_shadow - return "next" pointer on shadow list
+ * @periodic: host pointer to qh/itd/sitd
+ * @tag: hardware tag for type of this record
+ */
+static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
+						__le32 tag)
+{
+	switch (tag) {
+	default:
+	case Q_TYPE_QH:
+		return &periodic->qh->qh_next;
+	}
+}
+
+/* caller must hold oxu->lock */
+static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
+{
+	union ehci_shadow *prev_p = &oxu->pshadow[frame];
+	__le32 *hw_p = &oxu->periodic[frame];
+	union ehci_shadow here = *prev_p;
+
+	/* find predecessor of "ptr"; hw and shadow lists are in sync */
+	while (here.ptr && here.ptr != ptr) {
+		prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
+		hw_p = here.hw_next;
+		here = *prev_p;
+	}
+	/* an interrupt entry (at list end) could have been shared */
+	if (!here.ptr)
+		return;
+
+	/* update shadow and hardware lists ... the old "next" pointers
+	 * from ptr may still be in use, the caller updates them.
+	 */
+	*prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
+	*hw_p = *here.hw_next;
+}
+
+/* how many of the uframe's 125 usecs are allocated? */
+static unsigned short periodic_usecs(struct oxu_hcd *oxu,
+					unsigned frame, unsigned uframe)
+{
+	__le32 *hw_p = &oxu->periodic[frame];
+	union ehci_shadow *q = &oxu->pshadow[frame];
+	unsigned usecs = 0;
+
+	while (q->ptr) {
+		switch (Q_NEXT_TYPE(*hw_p)) {
+		case Q_TYPE_QH:
+		default:
+			/* is it in the S-mask? */
+			if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
+				usecs += q->qh->usecs;
+			/* ... or C-mask? */
+			if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
+				usecs += q->qh->c_usecs;
+			hw_p = &q->qh->hw_next;
+			q = &q->qh->qh_next;
+			break;
+		}
+	}
+#ifdef DEBUG
+	if (usecs > 100)
+		oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
+						frame * 8 + uframe, usecs);
+#endif
+	return usecs;
+}
+
+static int enable_periodic(struct oxu_hcd *oxu)
+{
+	u32 cmd;
+	int status;
+
+	/* did clearing PSE did take effect yet?
+	 * takes effect only at frame boundaries...
+	 */
+	status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
+	if (status != 0) {
+		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+		return status;
+	}
+
+	cmd = readl(&oxu->regs->command) | CMD_PSE;
+	writel(cmd, &oxu->regs->command);
+	/* posted write ... PSS happens later */
+	oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
+
+	/* make sure ehci_work scans these */
+	oxu->next_uframe = readl(&oxu->regs->frame_index)
+		% (oxu->periodic_size << 3);
+	return 0;
+}
+
+static int disable_periodic(struct oxu_hcd *oxu)
+{
+	u32 cmd;
+	int status;
+
+	/* did setting PSE not take effect yet?
+	 * takes effect only at frame boundaries...
+	 */
+	status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
+	if (status != 0) {
+		oxu_to_hcd(oxu)->state = HC_STATE_HALT;
+		return status;
+	}
+
+	cmd = readl(&oxu->regs->command) & ~CMD_PSE;
+	writel(cmd, &oxu->regs->command);
+	/* posted write ... */
+
+	oxu->next_uframe = -1;
+	return 0;
+}
+
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; oxu 0.96+)
+ */
+static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	unsigned i;
+	unsigned period = qh->period;
+
+	dev_dbg(&qh->dev->dev,
+		"link qh%d-%04x/%p start %d [%d/%d us]\n",
+		period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
+		qh, qh->start, qh->usecs, qh->c_usecs);
+
+	/* high bandwidth, or otherwise every microframe */
+	if (period == 0)
+		period = 1;
+
+	for (i = qh->start; i < oxu->periodic_size; i += period) {
+		union ehci_shadow	*prev = &oxu->pshadow[i];
+		__le32			*hw_p = &oxu->periodic[i];
+		union ehci_shadow	here = *prev;
+		__le32			type = 0;
+
+		/* skip the iso nodes at list head */
+		while (here.ptr) {
+			type = Q_NEXT_TYPE(*hw_p);
+			if (type == Q_TYPE_QH)
+				break;
+			prev = periodic_next_shadow(prev, type);
+			hw_p = &here.qh->hw_next;
+			here = *prev;
+		}
+
+		/* sorting each branch by period (slow-->fast)
+		 * enables sharing interior tree nodes
+		 */
+		while (here.ptr && qh != here.qh) {
+			if (qh->period > here.qh->period)
+				break;
+			prev = &here.qh->qh_next;
+			hw_p = &here.qh->hw_next;
+			here = *prev;
+		}
+		/* link in this qh, unless some earlier pass did that */
+		if (qh != here.qh) {
+			qh->qh_next = here;
+			if (here.qh)
+				qh->hw_next = *hw_p;
+			wmb();
+			prev->qh = qh;
+			*hw_p = QH_NEXT(qh->qh_dma);
+		}
+	}
+	qh->qh_state = QH_STATE_LINKED;
+	qh_get(qh);
+
+	/* update per-qh bandwidth for usbfs */
+	oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
+		? ((qh->usecs + qh->c_usecs) / qh->period)
+		: (qh->usecs * 8);
+
+	/* maybe enable periodic schedule processing */
+	if (!oxu->periodic_sched++)
+		return enable_periodic(oxu);
+
+	return 0;
+}
+
+static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	unsigned i;
+	unsigned period;
+
+	/* FIXME:
+	 *   IF this isn't high speed
+	 *   and this qh is active in the current uframe
+	 *   (and overlay token SplitXstate is false?)
+	 * THEN
+	 *   qh->hw_info1 |= __constant_cpu_to_le32(1 << 7 "ignore");
+	 */
+
+	/* high bandwidth, or otherwise part of every microframe */
+	period = qh->period;
+	if (period == 0)
+		period = 1;
+
+	for (i = qh->start; i < oxu->periodic_size; i += period)
+		periodic_unlink(oxu, i, qh);
+
+	/* update per-qh bandwidth for usbfs */
+	oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
+		? ((qh->usecs + qh->c_usecs) / qh->period)
+		: (qh->usecs * 8);
+
+	dev_dbg(&qh->dev->dev,
+		"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+		qh->period,
+		le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
+		qh, qh->start, qh->usecs, qh->c_usecs);
+
+	/* qh->qh_next still "live" to HC */
+	qh->qh_state = QH_STATE_UNLINK;
+	qh->qh_next.ptr = NULL;
+	qh_put(qh);
+
+	/* maybe turn off periodic schedule */
+	oxu->periodic_sched--;
+	if (!oxu->periodic_sched)
+		(void) disable_periodic(oxu);
+}
+
+static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	unsigned wait;
+
+	qh_unlink_periodic(oxu, qh);
+
+	/* simple/paranoid:  always delay, expecting the HC needs to read
+	 * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
+	 * expect khubd to clean up after any CSPLITs we won't issue.
+	 * active high speed queues may need bigger delays...
+	 */
+	if (list_empty(&qh->qtd_list)
+		|| (__constant_cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
+		wait = 2;
+	else
+		wait = 55;	/* worst case: 3 * 1024 */
+
+	udelay(wait);
+	qh->qh_state = QH_STATE_IDLE;
+	qh->hw_next = EHCI_LIST_END;
+	wmb();
+}
+
+static int check_period(struct oxu_hcd *oxu,
+			unsigned frame, unsigned uframe,
+			unsigned period, unsigned usecs)
+{
+	int claimed;
+
+	/* complete split running into next frame?
+	 * given FSTN support, we could sometimes check...
+	 */
+	if (uframe >= 8)
+		return 0;
+
+	/*
+	 * 80% periodic == 100 usec/uframe available
+	 * convert "usecs we need" to "max already claimed"
+	 */
+	usecs = 100 - usecs;
+
+	/* we "know" 2 and 4 uframe intervals were rejected; so
+	 * for period 0, check _every_ microframe in the schedule.
+	 */
+	if (unlikely(period == 0)) {
+		do {
+			for (uframe = 0; uframe < 7; uframe++) {
+				claimed = periodic_usecs(oxu, frame, uframe);
+				if (claimed > usecs)
+					return 0;
+			}
+		} while ((frame += 1) < oxu->periodic_size);
+
+	/* just check the specified uframe, at that period */
+	} else {
+		do {
+			claimed = periodic_usecs(oxu, frame, uframe);
+			if (claimed > usecs)
+				return 0;
+		} while ((frame += period) < oxu->periodic_size);
+	}
+
+	return 1;
+}
+
+static int check_intr_schedule(struct oxu_hcd	*oxu,
+				unsigned frame, unsigned uframe,
+				const struct ehci_qh *qh, __le32 *c_maskp)
+{
+	int retval = -ENOSPC;
+
+	if (qh->c_usecs && uframe >= 6)		/* FSTN territory? */
+		goto done;
+
+	if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
+		goto done;
+	if (!qh->c_usecs) {
+		retval = 0;
+		*c_maskp = 0;
+		goto done;
+	}
+
+done:
+	return retval;
+}
+
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
+static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	int		status;
+	unsigned	uframe;
+	__le32		c_mask;
+	unsigned	frame;		/* 0..(qh->period - 1), or NO_FRAME */
+
+	qh_refresh(oxu, qh);
+	qh->hw_next = EHCI_LIST_END;
+	frame = qh->start;
+
+	/* reuse the previous schedule slots, if we can */
+	if (frame < qh->period) {
+		uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
+		status = check_intr_schedule(oxu, frame, --uframe,
+				qh, &c_mask);
+	} else {
+		uframe = 0;
+		c_mask = 0;
+		status = -ENOSPC;
+	}
+
+	/* else scan the schedule to find a group of slots such that all
+	 * uframes have enough periodic bandwidth available.
+	 */
+	if (status) {
+		/* "normal" case, uframing flexible except with splits */
+		if (qh->period) {
+			frame = qh->period - 1;
+			do {
+				for (uframe = 0; uframe < 8; uframe++) {
+					status = check_intr_schedule(oxu,
+							frame, uframe, qh,
+							&c_mask);
+					if (status == 0)
+						break;
+				}
+			} while (status && frame--);
+
+		/* qh->period == 0 means every uframe */
+		} else {
+			frame = 0;
+			status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
+		}
+		if (status)
+			goto done;
+		qh->start = frame;
+
+		/* reset S-frame and (maybe) C-frame masks */
+		qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK));
+		qh->hw_info2 |= qh->period
+			? cpu_to_le32(1 << uframe)
+			: __constant_cpu_to_le32(QH_SMASK);
+		qh->hw_info2 |= c_mask;
+	} else
+		oxu_dbg(oxu, "reused qh %p schedule\n", qh);
+
+	/* stuff into the periodic schedule */
+	status = qh_link_periodic(oxu, qh);
+done:
+	return status;
+}
+
+static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
+			struct list_head *qtd_list, gfp_t mem_flags)
+{
+	unsigned epnum;
+	unsigned long flags;
+	struct ehci_qh *qh;
+	int status = 0;
+	struct list_head	empty;
+
+	/* get endpoint and transfer/schedule data */
+	epnum = urb->ep->desc.bEndpointAddress;
+
+	spin_lock_irqsave(&oxu->lock, flags);
+
+	if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
+			       &oxu_to_hcd(oxu)->flags))) {
+		status = -ESHUTDOWN;
+		goto done;
+	}
+
+	/* get qh and force any scheduling errors */
+	INIT_LIST_HEAD(&empty);
+	qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
+	if (qh == NULL) {
+		status = -ENOMEM;
+		goto done;
+	}
+	if (qh->qh_state == QH_STATE_IDLE) {
+		status = qh_schedule(oxu, qh);
+		if (status != 0)
+			goto done;
+	}
+
+	/* then queue the urb's tds to the qh */
+	qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
+	BUG_ON(qh == NULL);
+
+	/* ... update usbfs periodic stats */
+	oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
+
+done:
+	spin_unlock_irqrestore(&oxu->lock, flags);
+	if (status)
+		qtd_list_free(oxu, urb, qtd_list);
+
+	return status;
+}
+
+static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
+						gfp_t mem_flags)
+{
+	oxu_dbg(oxu, "iso support is missing!\n");
+	return -ENOSYS;
+}
+
+static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
+						gfp_t mem_flags)
+{
+	oxu_dbg(oxu, "split iso support is missing!\n");
+	return -ENOSYS;
+}
+
+static void scan_periodic(struct oxu_hcd *oxu)
+{
+	unsigned frame, clock, now_uframe, mod;
+	unsigned modified;
+
+	mod = oxu->periodic_size << 3;
+
+	/*
+	 * When running, scan from last scan point up to "now"
+	 * else clean up by scanning everything that's left.
+	 * Touches as few pages as possible:  cache-friendly.
+	 */
+	now_uframe = oxu->next_uframe;
+	if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
+		clock = readl(&oxu->regs->frame_index);
+	else
+		clock = now_uframe + mod - 1;
+	clock %= mod;
+
+	for (;;) {
+		union ehci_shadow	q, *q_p;
+		__le32			type, *hw_p;
+		unsigned		uframes;
+
+		/* don't scan past the live uframe */
+		frame = now_uframe >> 3;
+		if (frame == (clock >> 3))
+			uframes = now_uframe & 0x07;
+		else {
+			/* safe to scan the whole frame at once */
+			now_uframe |= 0x07;
+			uframes = 8;
+		}
+
+restart:
+		/* scan each element in frame's queue for completions */
+		q_p = &oxu->pshadow[frame];
+		hw_p = &oxu->periodic[frame];
+		q.ptr = q_p->ptr;
+		type = Q_NEXT_TYPE(*hw_p);
+		modified = 0;
+
+		while (q.ptr != NULL) {
+			union ehci_shadow temp;
+			int live;
+
+			live = HC_IS_RUNNING(oxu_to_hcd(oxu)->state);
+			switch (type) {
+			case Q_TYPE_QH:
+				/* handle any completions */
+				temp.qh = qh_get(q.qh);
+				type = Q_NEXT_TYPE(q.qh->hw_next);
+				q = q.qh->qh_next;
+				modified = qh_completions(oxu, temp.qh);
+				if (unlikely(list_empty(&temp.qh->qtd_list)))
+					intr_deschedule(oxu, temp.qh);
+				qh_put(temp.qh);
+				break;
+			default:
+				dbg("corrupt type %d frame %d shadow %p",
+					type, frame, q.ptr);
+				q.ptr = NULL;
+			}
+
+			/* assume completion callbacks modify the queue */
+			if (unlikely(modified))
+				goto restart;
+		}
+
+		/* Stop when we catch up to the HC */
+
+		/* FIXME:  this assumes we won't get lapped when
+		 * latencies climb; that should be rare, but...
+		 * detect it, and just go all the way around.
+		 * FLR might help detect this case, so long as latencies
+		 * don't exceed periodic_size msec (default 1.024 sec).
+		 */
+
+		/* FIXME: likewise assumes HC doesn't halt mid-scan */
+
+		if (now_uframe == clock) {
+			unsigned	now;
+
+			if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
+				break;
+			oxu->next_uframe = now_uframe;
+			now = readl(&oxu->regs->frame_index) % mod;
+			if (now_uframe == now)
+				break;
+
+			/* rescan the rest of this frame, then ... */
+			clock = now;
+		} else {
+			now_uframe++;
+			now_uframe %= mod;
+		}
+	}
+}
+
+/* On some systems, leaving remote wakeup enabled prevents system shutdown.
+ * The firmware seems to think that powering off is a wakeup event!
+ * This routine turns off remote wakeup and everything else, on all ports.
+ */
+static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
+{
+	int port = HCS_N_PORTS(oxu->hcs_params);
+
+	while (port--)
+		writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
+}
+
+static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
+{
+	unsigned port;
+
+	if (!HCS_PPC(oxu->hcs_params))
+		return;
+
+	oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
+	for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
+		(void) oxu_hub_control(oxu_to_hcd(oxu),
+				is_on ? SetPortFeature : ClearPortFeature,
+				USB_PORT_FEAT_POWER,
+				port--, NULL, 0);
+	msleep(20);
+}
+
+/* Called from some interrupts, timers, and so on.
+ * It calls driver completion functions, after dropping oxu->lock.
+ */
+static void ehci_work(struct oxu_hcd *oxu)
+{
+	timer_action_done(oxu, TIMER_IO_WATCHDOG);
+	if (oxu->reclaim_ready)
+		end_unlink_async(oxu);
+
+	/* another CPU may drop oxu->lock during a schedule scan while
+	 * it reports urb completions.  this flag guards against bogus
+	 * attempts at re-entrant schedule scanning.
+	 */
+	if (oxu->scanning)
+		return;
+	oxu->scanning = 1;
+	scan_async(oxu);
+	if (oxu->next_uframe != -1)
+		scan_periodic(oxu);
+	oxu->scanning = 0;
+
+	/* the IO watchdog guards against hardware or driver bugs that
+	 * misplace IRQs, and should let us run completely without IRQs.
+	 * such lossage has been observed on both VT6202 and VT8235.
+	 */
+	if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
+			(oxu->async->qh_next.ptr != NULL ||
+			 oxu->periodic_sched != 0))
+		timer_action(oxu, TIMER_IO_WATCHDOG);
+}
+
+static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
+{
+	/* if we need to use IAA and it's busy, defer */
+	if (qh->qh_state == QH_STATE_LINKED
+			&& oxu->reclaim
+			&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
+		struct ehci_qh		*last;
+
+		for (last = oxu->reclaim;
+				last->reclaim;
+				last = last->reclaim)
+			continue;
+		qh->qh_state = QH_STATE_UNLINK_WAIT;
+		last->reclaim = qh;
+
+	/* bypass IAA if the hc can't care */
+	} else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
+		end_unlink_async(oxu);
+
+	/* something else might have unlinked the qh by now */
+	if (qh->qh_state == QH_STATE_LINKED)
+		start_unlink_async(oxu, qh);
+}
+
+/*
+ * USB host controller methods
+ */
+
+static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	u32 status, pcd_status = 0;
+	int bh;
+
+	spin_lock(&oxu->lock);
+
+	status = readl(&oxu->regs->status);
+
+	/* e.g. cardbus physical eject */
+	if (status == ~(u32) 0) {
+		oxu_dbg(oxu, "device removed\n");
+		goto dead;
+	}
+
+	status &= INTR_MASK;
+	if (!status) {			/* irq sharing? */
+		spin_unlock(&oxu->lock);
+		return IRQ_NONE;
+	}
+
+	/* clear (just) interrupts */
+	writel(status, &oxu->regs->status);
+	readl(&oxu->regs->command);	/* unblock posted write */
+	bh = 0;
+
+#ifdef OXU_VERBOSE_DEBUG
+	/* unrequested/ignored: Frame List Rollover */
+	dbg_status(oxu, "irq", status);
+#endif
+
+	/* INT, ERR, and IAA interrupt rates can be throttled */
+
+	/* normal [4.15.1.2] or error [4.15.1.1] completion */
+	if (likely((status & (STS_INT|STS_ERR)) != 0))
+		bh = 1;
+
+	/* complete the unlinking of some qh [4.15.2.3] */
+	if (status & STS_IAA) {
+		oxu->reclaim_ready = 1;
+		bh = 1;
+	}
+
+	/* remote wakeup [4.3.1] */
+	if (status & STS_PCD) {
+		unsigned i = HCS_N_PORTS(oxu->hcs_params);
+		pcd_status = status;
+
+		/* resume root hub? */
+		if (!(readl(&oxu->regs->command) & CMD_RUN))
+			usb_hcd_resume_root_hub(hcd);
+
+		while (i--) {
+			int pstatus = readl(&oxu->regs->port_status[i]);
+
+			if (pstatus & PORT_OWNER)
+				continue;
+			if (!(pstatus & PORT_RESUME)
+					|| oxu->reset_done[i] != 0)
+				continue;
+
+			/* start 20 msec resume signaling from this port,
+			 * and make khubd collect PORT_STAT_C_SUSPEND to
+			 * stop that signaling.
+			 */
+			oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
+			oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
+			mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
+		}
+	}
+
+	/* PCI errors [4.15.2.4] */
+	if (unlikely((status & STS_FATAL) != 0)) {
+		/* bogus "fatal" IRQs appear on some chips... why?  */
+		status = readl(&oxu->regs->status);
+		dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
+		dbg_status(oxu, "fatal", status);
+		if (status & STS_HALT) {
+			oxu_err(oxu, "fatal error\n");
+dead:
+			ehci_reset(oxu);
+			writel(0, &oxu->regs->configured_flag);
+			/* generic layer kills/unlinks all urbs, then
+			 * uses oxu_stop to clean up the rest
+			 */
+			bh = 1;
+		}
+	}
+
+	if (bh)
+		ehci_work(oxu);
+	spin_unlock(&oxu->lock);
+	if (pcd_status & STS_PCD)
+		usb_hcd_poll_rh_status(hcd);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t oxu_irq(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	int ret = IRQ_HANDLED;
+
+	u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS);
+	u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET);
+
+	/* Disable all interrupt */
+	oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable);
+
+	if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
+		(!oxu->is_otg && (status & OXU_USBSPHI)))
+		oxu210_hcd_irq(hcd);
+	else
+		ret = IRQ_NONE;
+
+	/* Enable all interrupt back */
+	oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable);
+
+	return ret;
+}
+
+static void oxu_watchdog(unsigned long param)
+{
+	struct oxu_hcd	*oxu = (struct oxu_hcd *) param;
+	unsigned long flags;
+
+	spin_lock_irqsave(&oxu->lock, flags);
+
+	/* lost IAA irqs wedge things badly; seen with a vt8235 */
+	if (oxu->reclaim) {
+		u32 status = readl(&oxu->regs->status);
+		if (status & STS_IAA) {
+			oxu_vdbg(oxu, "lost IAA\n");
+			writel(STS_IAA, &oxu->regs->status);
+			oxu->reclaim_ready = 1;
+		}
+	}
+
+	/* stop async processing after it's idled a bit */
+	if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
+		start_unlink_async(oxu, oxu->async);
+
+	/* oxu could run by timer, without IRQs ... */
+	ehci_work(oxu);
+
+	spin_unlock_irqrestore(&oxu->lock, flags);
+}
+
+/* One-time init, only for memory state.
+ */
+static int oxu_hcd_init(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	u32 temp;
+	int retval;
+	u32 hcc_params;
+
+	spin_lock_init(&oxu->lock);
+
+	init_timer(&oxu->watchdog);
+	oxu->watchdog.function = oxu_watchdog;
+	oxu->watchdog.data = (unsigned long) oxu;
+
+	/*
+	 * hw default: 1K periodic list heads, one per frame.
+	 * periodic_size can shrink by USBCMD update if hcc_params allows.
+	 */
+	oxu->periodic_size = DEFAULT_I_TDPS;
+	retval = ehci_mem_init(oxu, GFP_KERNEL);
+	if (retval < 0)
+		return retval;
+
+	/* controllers may cache some of the periodic schedule ... */
+	hcc_params = readl(&oxu->caps->hcc_params);
+	if (HCC_ISOC_CACHE(hcc_params))		/* full frame cache */
+		oxu->i_thresh = 8;
+	else					/* N microframes cached */
+		oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
+
+	oxu->reclaim = NULL;
+	oxu->reclaim_ready = 0;
+	oxu->next_uframe = -1;
+
+	/*
+	 * dedicate a qh for the async ring head, since we couldn't unlink
+	 * a 'real' qh without stopping the async schedule [4.8].  use it
+	 * as the 'reclamation list head' too.
+	 * its dummy is used in hw_alt_next of many tds, to prevent the qh
+	 * from automatically advancing to the next td after short reads.
+	 */
+	oxu->async->qh_next.qh = NULL;
+	oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
+	oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
+	oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
+	oxu->async->hw_qtd_next = EHCI_LIST_END;
+	oxu->async->qh_state = QH_STATE_LINKED;
+	oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
+
+	/* clear interrupt enables, set irq latency */
+	if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+		log2_irq_thresh = 0;
+	temp = 1 << (16 + log2_irq_thresh);
+	if (HCC_CANPARK(hcc_params)) {
+		/* HW default park == 3, on hardware that supports it (like
+		 * NVidia and ALI silicon), maximizes throughput on the async
+		 * schedule by avoiding QH fetches between transfers.
+		 *
+		 * With fast usb storage devices and NForce2, "park" seems to
+		 * make problems:  throughput reduction (!), data errors...
+		 */
+		if (park) {
+			park = min(park, (unsigned) 3);
+			temp |= CMD_PARK;
+			temp |= park << 8;
+		}
+		oxu_dbg(oxu, "park %d\n", park);
+	}
+	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+		/* periodic schedule size can be smaller than default */
+		temp &= ~(3 << 2);
+		temp |= (EHCI_TUNE_FLS << 2);
+	}
+	oxu->command = temp;
+
+	return 0;
+}
+
+/* Called during probe() after chip reset completes.
+ */
+static int oxu_reset(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	int ret;
+
+	spin_lock_init(&oxu->mem_lock);
+	INIT_LIST_HEAD(&oxu->urb_list);
+	oxu->urb_len = 0;
+
+	/* FIMXE */
+	hcd->self.controller->dma_mask = 0UL;
+
+	if (oxu->is_otg) {
+		oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
+		oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
+			HC_LENGTH(readl(&oxu->caps->hc_capbase));
+
+		oxu->mem = hcd->regs + OXU_SPH_MEM;
+	} else {
+		oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
+		oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
+			HC_LENGTH(readl(&oxu->caps->hc_capbase));
+
+		oxu->mem = hcd->regs + OXU_OTG_MEM;
+	}
+
+	oxu->hcs_params = readl(&oxu->caps->hcs_params);
+	oxu->sbrn = 0x20;
+
+	ret = oxu_hcd_init(hcd);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int oxu_run(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	int retval;
+	u32 temp, hcc_params;
+
+	hcd->uses_new_polling = 1;
+	hcd->poll_rh = 0;
+
+	/* EHCI spec section 4.1 */
+	retval = ehci_reset(oxu);
+	if (retval != 0) {
+		ehci_mem_cleanup(oxu);
+		return retval;
+	}
+	writel(oxu->periodic_dma, &oxu->regs->frame_list);
+	writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
+
+	/* hcc_params controls whether oxu->regs->segment must (!!!)
+	 * be used; it constrains QH/ITD/SITD and QTD locations.
+	 * pci_pool consistent memory always uses segment zero.
+	 * streaming mappings for I/O buffers, like pci_map_single(),
+	 * can return segments above 4GB, if the device allows.
+	 *
+	 * NOTE:  the dma mask is visible through dma_supported(), so
+	 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+	 * Scsi_Host.highmem_io, and so forth.  It's readonly to all
+	 * host side drivers though.
+	 */
+	hcc_params = readl(&oxu->caps->hcc_params);
+	if (HCC_64BIT_ADDR(hcc_params))
+		writel(0, &oxu->regs->segment);
+
+	oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
+				CMD_ASE | CMD_RESET);
+	oxu->command |= CMD_RUN;
+	writel(oxu->command, &oxu->regs->command);
+	dbg_cmd(oxu, "init", oxu->command);
+
+	/*
+	 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+	 * are explicitly handed to companion controller(s), so no TT is
+	 * involved with the root hub.  (Except where one is integrated,
+	 * and there's no companion controller unless maybe for USB OTG.)
+	 */
+	hcd->state = HC_STATE_RUNNING;
+	writel(FLAG_CF, &oxu->regs->configured_flag);
+	readl(&oxu->regs->command);	/* unblock posted writes */
+
+	temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
+	oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
+		((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
+		temp >> 8, temp & 0xff, DRIVER_VERSION,
+		ignore_oc ? ", overcurrent ignored" : "");
+
+	writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
+
+	return 0;
+}
+
+static void oxu_stop(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+
+	/* Turn off port power on all root hub ports. */
+	ehci_port_power(oxu, 0);
+
+	/* no more interrupts ... */
+	del_timer_sync(&oxu->watchdog);
+
+	spin_lock_irq(&oxu->lock);
+	if (HC_IS_RUNNING(hcd->state))
+		ehci_quiesce(oxu);
+
+	ehci_reset(oxu);
+	writel(0, &oxu->regs->intr_enable);
+	spin_unlock_irq(&oxu->lock);
+
+	/* let companion controllers work when we aren't */
+	writel(0, &oxu->regs->configured_flag);
+
+	/* root hub is shut down separately (first, when possible) */
+	spin_lock_irq(&oxu->lock);
+	if (oxu->async)
+		ehci_work(oxu);
+	spin_unlock_irq(&oxu->lock);
+	ehci_mem_cleanup(oxu);
+
+	dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
+}
+
+/* Kick in for silicon on any bus (not just pci, etc).
+ * This forcibly disables dma and IRQs, helping kexec and other cases
+ * where the next system software may expect clean state.
+ */
+static void oxu_shutdown(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+
+	(void) ehci_halt(oxu);
+	ehci_turn_off_all_ports(oxu);
+
+	/* make BIOS/etc use companion controller during reboot */
+	writel(0, &oxu->regs->configured_flag);
+
+	/* unblock posted writes */
+	readl(&oxu->regs->configured_flag);
+}
+
+/* Non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ *
+ * urb + dev is in hcd.self.controller.urb_list
+ * we're queueing TDs onto software and hardware lists
+ *
+ * hcd-specific init for hcpriv hasn't been done yet
+ *
+ * NOTE:  control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
+ */
+static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+				gfp_t mem_flags)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	struct list_head qtd_list;
+
+	INIT_LIST_HEAD(&qtd_list);
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+	default:
+		if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
+			return -ENOMEM;
+		return submit_async(oxu, urb, &qtd_list, mem_flags);
+
+	case PIPE_INTERRUPT:
+		if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
+			return -ENOMEM;
+		return intr_submit(oxu, urb, &qtd_list, mem_flags);
+
+	case PIPE_ISOCHRONOUS:
+		if (urb->dev->speed == USB_SPEED_HIGH)
+			return itd_submit(oxu, urb, mem_flags);
+		else
+			return sitd_submit(oxu, urb, mem_flags);
+	}
+}
+
+/* This function is responsible for breaking URBs with big data size
+ * into smaller size and processing small urbs in sequence.
+ */
+static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+				gfp_t mem_flags)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	int num, rem;
+	int transfer_buffer_length;
+	void *transfer_buffer;
+	struct urb *murb;
+	int i, ret;
+
+	/* If not bulk pipe just enqueue the URB */
+	if (!usb_pipebulk(urb->pipe))
+		return __oxu_urb_enqueue(hcd, urb, mem_flags);
+
+	/* Otherwise we should verify the USB transfer buffer size! */
+	transfer_buffer = urb->transfer_buffer;
+	transfer_buffer_length = urb->transfer_buffer_length;
+
+	num = urb->transfer_buffer_length / 4096;
+	rem = urb->transfer_buffer_length % 4096;
+	if (rem != 0)
+		num++;
+
+	/* If URB is smaller than 4096 bytes just enqueue it! */
+	if (num == 1)
+		return __oxu_urb_enqueue(hcd, urb, mem_flags);
+
+	/* Ok, we have more job to do! :) */
+
+	for (i = 0; i < num - 1; i++) {
+		/* Get free micro URB poll till a free urb is recieved */
+
+		do {
+			murb = (struct urb *) oxu_murb_alloc(oxu);
+			if (!murb)
+				schedule();
+		} while (!murb);
+
+		/* Coping the urb */
+		memcpy(murb, urb, sizeof(struct urb));
+
+		murb->transfer_buffer_length = 4096;
+		murb->transfer_buffer = transfer_buffer + i * 4096;
+
+		/* Null pointer for the encodes that this is a micro urb */
+		murb->complete = NULL;
+
+		((struct oxu_murb *) murb)->main = urb;
+		((struct oxu_murb *) murb)->last = 0;
+
+		/* This loop is to guarantee urb to be processed when there's
+		 * not enough resources at a particular time by retrying.
+		 */
+		do {
+			ret  = __oxu_urb_enqueue(hcd, murb, mem_flags);
+			if (ret)
+				schedule();
+		} while (ret);
+	}
+
+	/* Last urb requires special handling  */
+
+	/* Get free micro URB poll till a free urb is recieved */
+	do {
+		murb = (struct urb *) oxu_murb_alloc(oxu);
+		if (!murb)
+			schedule();
+	} while (!murb);
+
+	/* Coping the urb */
+	memcpy(murb, urb, sizeof(struct urb));
+
+	murb->transfer_buffer_length = rem > 0 ? rem : 4096;
+	murb->transfer_buffer = transfer_buffer + (num - 1) * 4096;
+
+	/* Null pointer for the encodes that this is a micro urb */
+	murb->complete = NULL;
+
+	((struct oxu_murb *) murb)->main = urb;
+	((struct oxu_murb *) murb)->last = 1;
+
+	do {
+		ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
+		if (ret)
+			schedule();
+	} while (ret);
+
+	return ret;
+}
+
+/* Remove from hardware lists.
+ * Completions normally happen asynchronously
+ */
+static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	struct ehci_qh *qh;
+	unsigned long flags;
+
+	spin_lock_irqsave(&oxu->lock, flags);
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+	default:
+		qh = (struct ehci_qh *) urb->hcpriv;
+		if (!qh)
+			break;
+		unlink_async(oxu, qh);
+		break;
+
+	case PIPE_INTERRUPT:
+		qh = (struct ehci_qh *) urb->hcpriv;
+		if (!qh)
+			break;
+		switch (qh->qh_state) {
+		case QH_STATE_LINKED:
+			intr_deschedule(oxu, qh);
+			/* FALL THROUGH */
+		case QH_STATE_IDLE:
+			qh_completions(oxu, qh);
+			break;
+		default:
+			oxu_dbg(oxu, "bogus qh %p state %d\n",
+					qh, qh->qh_state);
+			goto done;
+		}
+
+		/* reschedule QH iff another request is queued */
+		if (!list_empty(&qh->qtd_list)
+				&& HC_IS_RUNNING(hcd->state)) {
+			int status;
+
+			status = qh_schedule(oxu, qh);
+			spin_unlock_irqrestore(&oxu->lock, flags);
+
+			if (status != 0) {
+				/* shouldn't happen often, but ...
+				 * FIXME kill those tds' urbs
+				 */
+				err("can't reschedule qh %p, err %d",
+					qh, status);
+			}
+			return status;
+		}
+		break;
+	}
+done:
+	spin_unlock_irqrestore(&oxu->lock, flags);
+	return 0;
+}
+
+/* Bulk qh holds the data toggle */
+static void oxu_endpoint_disable(struct usb_hcd *hcd,
+					struct usb_host_endpoint *ep)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	unsigned long		flags;
+	struct ehci_qh		*qh, *tmp;
+
+	/* ASSERT:  any requests/urbs are being unlinked */
+	/* ASSERT:  nobody can be submitting urbs for this any more */
+
+rescan:
+	spin_lock_irqsave(&oxu->lock, flags);
+	qh = ep->hcpriv;
+	if (!qh)
+		goto done;
+
+	/* endpoints can be iso streams.  for now, we don't
+	 * accelerate iso completions ... so spin a while.
+	 */
+	if (qh->hw_info1 == 0) {
+		oxu_vdbg(oxu, "iso delay\n");
+		goto idle_timeout;
+	}
+
+	if (!HC_IS_RUNNING(hcd->state))
+		qh->qh_state = QH_STATE_IDLE;
+	switch (qh->qh_state) {
+	case QH_STATE_LINKED:
+		for (tmp = oxu->async->qh_next.qh;
+				tmp && tmp != qh;
+				tmp = tmp->qh_next.qh)
+			continue;
+		/* periodic qh self-unlinks on empty */
+		if (!tmp)
+			goto nogood;
+		unlink_async(oxu, qh);
+		/* FALL THROUGH */
+	case QH_STATE_UNLINK:		/* wait for hw to finish? */
+idle_timeout:
+		spin_unlock_irqrestore(&oxu->lock, flags);
+		schedule_timeout_uninterruptible(1);
+		goto rescan;
+	case QH_STATE_IDLE:		/* fully unlinked */
+		if (list_empty(&qh->qtd_list)) {
+			qh_put(qh);
+			break;
+		}
+		/* else FALL THROUGH */
+	default:
+nogood:
+		/* caller was supposed to have unlinked any requests;
+		 * that's not our job.  just leak this memory.
+		 */
+		oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
+			qh, ep->desc.bEndpointAddress, qh->qh_state,
+			list_empty(&qh->qtd_list) ? "" : "(has tds)");
+		break;
+	}
+	ep->hcpriv = NULL;
+done:
+	spin_unlock_irqrestore(&oxu->lock, flags);
+	return;
+}
+
+static int oxu_get_frame(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+
+	return (readl(&oxu->regs->frame_index) >> 3) %
+		oxu->periodic_size;
+}
+
+/* Build "status change" packet (one or two bytes) from HC registers */
+static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	u32 temp, mask, status = 0;
+	int ports, i, retval = 1;
+	unsigned long flags;
+
+	/* if !USB_SUSPEND, root hub timers won't get shut down ... */
+	if (!HC_IS_RUNNING(hcd->state))
+		return 0;
+
+	/* init status to no-changes */
+	buf[0] = 0;
+	ports = HCS_N_PORTS(oxu->hcs_params);
+	if (ports > 7) {
+		buf[1] = 0;
+		retval++;
+	}
+
+	/* Some boards (mostly VIA?) report bogus overcurrent indications,
+	 * causing massive log spam unless we completely ignore them.  It
+	 * may be relevant that VIA VT8235 controlers, where PORT_POWER is
+	 * always set, seem to clear PORT_OCC and PORT_CSC when writing to
+	 * PORT_POWER; that's surprising, but maybe within-spec.
+	 */
+	if (!ignore_oc)
+		mask = PORT_CSC | PORT_PEC | PORT_OCC;
+	else
+		mask = PORT_CSC | PORT_PEC;
+
+	/* no hub change reports (bit 0) for now (power, ...) */
+
+	/* port N changes (bit N)? */
+	spin_lock_irqsave(&oxu->lock, flags);
+	for (i = 0; i < ports; i++) {
+		temp = readl(&oxu->regs->port_status[i]);
+
+		/*
+		 * Return status information even for ports with OWNER set.
+		 * Otherwise khubd wouldn't see the disconnect event when a
+		 * high-speed device is switched over to the companion
+		 * controller by the user.
+		 */
+
+		if (!(temp & PORT_CONNECT))
+			oxu->reset_done[i] = 0;
+		if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 &&
+				time_after_eq(jiffies, oxu->reset_done[i]))) {
+			if (i < 7)
+				buf[0] |= 1 << (i + 1);
+			else
+				buf[1] |= 1 << (i - 7);
+			status = STS_PCD;
+		}
+	}
+	/* FIXME autosuspend idle root hubs */
+	spin_unlock_irqrestore(&oxu->lock, flags);
+	return status ? retval : 0;
+}
+
+/* Returns the speed of a device attached to a port on the root hub. */
+static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
+						unsigned int portsc)
+{
+	switch ((portsc >> 26) & 3) {
+	case 0:
+		return 0;
+	case 1:
+		return 1 << USB_PORT_FEAT_LOWSPEED;
+	case 2:
+	default:
+		return 1 << USB_PORT_FEAT_HIGHSPEED;
+	}
+}
+
+#define	PORT_WAKE_BITS	(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
+static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
+				u16 wValue, u16 wIndex, char *buf, u16 wLength)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	int ports = HCS_N_PORTS(oxu->hcs_params);
+	u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
+	u32 temp, status;
+	unsigned long	flags;
+	int retval = 0;
+	unsigned selector;
+
+	/*
+	 * FIXME:  support SetPortFeatures USB_PORT_FEAT_INDICATOR.
+	 * HCS_INDICATOR may say we can change LEDs to off/amber/green.
+	 * (track current state ourselves) ... blink for diagnostics,
+	 * power, "this is the one", etc.  EHCI spec supports this.
+	 */
+
+	spin_lock_irqsave(&oxu->lock, flags);
+	switch (typeReq) {
+	case ClearHubFeature:
+		switch (wValue) {
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		temp = readl(status_reg);
+
+		/*
+		 * Even if OWNER is set, so the port is owned by the
+		 * companion controller, khubd needs to be able to clear
+		 * the port-change status bits (especially
+		 * USB_PORT_FEAT_C_CONNECTION).
+		 */
+
+		switch (wValue) {
+		case USB_PORT_FEAT_ENABLE:
+			writel(temp & ~PORT_PE, status_reg);
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			if (temp & PORT_RESET)
+				goto error;
+			if (temp & PORT_SUSPEND) {
+				if ((temp & PORT_PE) == 0)
+					goto error;
+				/* resume signaling for 20 msec */
+				temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+				writel(temp | PORT_RESUME, status_reg);
+				oxu->reset_done[wIndex] = jiffies
+						+ msecs_to_jiffies(20);
+			}
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			/* we auto-clear this feature */
+			break;
+		case USB_PORT_FEAT_POWER:
+			if (HCS_PPC(oxu->hcs_params))
+				writel(temp & ~(PORT_RWC_BITS | PORT_POWER),
+					  status_reg);
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg);
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg);
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			/* GetPortStatus clears reset */
+			break;
+		default:
+			goto error;
+		}
+		readl(&oxu->regs->command);	/* unblock posted write */
+		break;
+	case GetHubDescriptor:
+		ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
+			buf);
+		break;
+	case GetHubStatus:
+		/* no hub-wide feature/status flags */
+		memset(buf, 0, 4);
+		break;
+	case GetPortStatus:
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		status = 0;
+		temp = readl(status_reg);
+
+		/* wPortChange bits */
+		if (temp & PORT_CSC)
+			status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+		if (temp & PORT_PEC)
+			status |= 1 << USB_PORT_FEAT_C_ENABLE;
+		if ((temp & PORT_OCC) && !ignore_oc)
+			status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+
+		/* whoever resumes must GetPortStatus to complete it!! */
+		if (temp & PORT_RESUME) {
+
+			/* Remote Wakeup received? */
+			if (!oxu->reset_done[wIndex]) {
+				/* resume signaling for 20 msec */
+				oxu->reset_done[wIndex] = jiffies
+						+ msecs_to_jiffies(20);
+				/* check the port again */
+				mod_timer(&oxu_to_hcd(oxu)->rh_timer,
+						oxu->reset_done[wIndex]);
+			}
+
+			/* resume completed? */
+			else if (time_after_eq(jiffies,
+					oxu->reset_done[wIndex])) {
+				status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+				oxu->reset_done[wIndex] = 0;
+
+				/* stop resume signaling */
+				temp = readl(status_reg);
+				writel(temp & ~(PORT_RWC_BITS | PORT_RESUME),
+					status_reg);
+				retval = handshake(oxu, status_reg,
+					   PORT_RESUME, 0, 2000 /* 2msec */);
+				if (retval != 0) {
+					oxu_err(oxu,
+						"port %d resume error %d\n",
+						wIndex + 1, retval);
+					goto error;
+				}
+				temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+			}
+		}
+
+		/* whoever resets must GetPortStatus to complete it!! */
+		if ((temp & PORT_RESET)
+				&& time_after_eq(jiffies,
+					oxu->reset_done[wIndex])) {
+			status |= 1 << USB_PORT_FEAT_C_RESET;
+			oxu->reset_done[wIndex] = 0;
+
+			/* force reset to complete */
+			writel(temp & ~(PORT_RWC_BITS | PORT_RESET),
+					status_reg);
+			/* REVISIT:  some hardware needs 550+ usec to clear
+			 * this bit; seems too long to spin routinely...
+			 */
+			retval = handshake(oxu, status_reg,
+					PORT_RESET, 0, 750);
+			if (retval != 0) {
+				oxu_err(oxu, "port %d reset error %d\n",
+					wIndex + 1, retval);
+				goto error;
+			}
+
+			/* see what we found out */
+			temp = check_reset_complete(oxu, wIndex, status_reg,
+					readl(status_reg));
+		}
+
+		/* transfer dedicated ports to the companion hc */
+		if ((temp & PORT_CONNECT) &&
+				test_bit(wIndex, &oxu->companion_ports)) {
+			temp &= ~PORT_RWC_BITS;
+			temp |= PORT_OWNER;
+			writel(temp, status_reg);
+			oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
+			temp = readl(status_reg);
+		}
+
+		/*
+		 * Even if OWNER is set, there's no harm letting khubd
+		 * see the wPortStatus values (they should all be 0 except
+		 * for PORT_POWER anyway).
+		 */
+
+		if (temp & PORT_CONNECT) {
+			status |= 1 << USB_PORT_FEAT_CONNECTION;
+			/* status may be from integrated TT */
+			status |= oxu_port_speed(oxu, temp);
+		}
+		if (temp & PORT_PE)
+			status |= 1 << USB_PORT_FEAT_ENABLE;
+		if (temp & (PORT_SUSPEND|PORT_RESUME))
+			status |= 1 << USB_PORT_FEAT_SUSPEND;
+		if (temp & PORT_OC)
+			status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+		if (temp & PORT_RESET)
+			status |= 1 << USB_PORT_FEAT_RESET;
+		if (temp & PORT_POWER)
+			status |= 1 << USB_PORT_FEAT_POWER;
+
+#ifndef	OXU_VERBOSE_DEBUG
+	if (status & ~0xffff)	/* only if wPortChange is interesting */
+#endif
+		dbg_port(oxu, "GetStatus", wIndex + 1, temp);
+		put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+		break;
+	case SetHubFeature:
+		switch (wValue) {
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case SetPortFeature:
+		selector = wIndex >> 8;
+		wIndex &= 0xff;
+		if (!wIndex || wIndex > ports)
+			goto error;
+		wIndex--;
+		temp = readl(status_reg);
+		if (temp & PORT_OWNER)
+			break;
+
+		temp &= ~PORT_RWC_BITS;
+		switch (wValue) {
+		case USB_PORT_FEAT_SUSPEND:
+			if ((temp & PORT_PE) == 0
+					|| (temp & PORT_RESET) != 0)
+				goto error;
+			if (device_may_wakeup(&hcd->self.root_hub->dev))
+				temp |= PORT_WAKE_BITS;
+			writel(temp | PORT_SUSPEND, status_reg);
+			break;
+		case USB_PORT_FEAT_POWER:
+			if (HCS_PPC(oxu->hcs_params))
+				writel(temp | PORT_POWER, status_reg);
+			break;
+		case USB_PORT_FEAT_RESET:
+			if (temp & PORT_RESUME)
+				goto error;
+			/* line status bits may report this as low speed,
+			 * which can be fine if this root hub has a
+			 * transaction translator built in.
+			 */
+			oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
+			temp |= PORT_RESET;
+			temp &= ~PORT_PE;
+
+			/*
+			 * caller must wait, then call GetPortStatus
+			 * usb 2.0 spec says 50 ms resets on root
+			 */
+			oxu->reset_done[wIndex] = jiffies
+					+ msecs_to_jiffies(50);
+			writel(temp, status_reg);
+			break;
+
+		/* For downstream facing ports (these):  one hub port is put
+		 * into test mode according to USB2 11.24.2.13, then the hub
+		 * must be reset (which for root hub now means rmmod+modprobe,
+		 * or else system reboot).  See EHCI 2.3.9 and 4.14 for info
+		 * about the EHCI-specific stuff.
+		 */
+		case USB_PORT_FEAT_TEST:
+			if (!selector || selector > 5)
+				goto error;
+			ehci_quiesce(oxu);
+			ehci_halt(oxu);
+			temp |= selector << 16;
+			writel(temp, status_reg);
+			break;
+
+		default:
+			goto error;
+		}
+		readl(&oxu->regs->command);	/* unblock posted writes */
+		break;
+
+	default:
+error:
+		/* "stall" on error */
+		retval = -EPIPE;
+	}
+	spin_unlock_irqrestore(&oxu->lock, flags);
+	return retval;
+}
+
+#ifdef CONFIG_PM
+
+static int oxu_bus_suspend(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	int port;
+	int mask;
+
+	oxu_dbg(oxu, "suspend root hub\n");
+
+	if (time_before(jiffies, oxu->next_statechange))
+		msleep(5);
+
+	port = HCS_N_PORTS(oxu->hcs_params);
+	spin_lock_irq(&oxu->lock);
+
+	/* stop schedules, clean any completed work */
+	if (HC_IS_RUNNING(hcd->state)) {
+		ehci_quiesce(oxu);
+		hcd->state = HC_STATE_QUIESCING;
+	}
+	oxu->command = readl(&oxu->regs->command);
+	if (oxu->reclaim)
+		oxu->reclaim_ready = 1;
+	ehci_work(oxu);
+
+	/* Unlike other USB host controller types, EHCI doesn't have
+	 * any notion of "global" or bus-wide suspend.  The driver has
+	 * to manually suspend all the active unsuspended ports, and
+	 * then manually resume them in the bus_resume() routine.
+	 */
+	oxu->bus_suspended = 0;
+	while (port--) {
+		u32 __iomem *reg = &oxu->regs->port_status[port];
+		u32 t1 = readl(reg) & ~PORT_RWC_BITS;
+		u32 t2 = t1;
+
+		/* keep track of which ports we suspend */
+		if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
+				!(t1 & PORT_SUSPEND)) {
+			t2 |= PORT_SUSPEND;
+			set_bit(port, &oxu->bus_suspended);
+		}
+
+		/* enable remote wakeup on all ports */
+		if (device_may_wakeup(&hcd->self.root_hub->dev))
+			t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
+		else
+			t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
+
+		if (t1 != t2) {
+			oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
+				port + 1, t1, t2);
+			writel(t2, reg);
+		}
+	}
+
+	/* turn off now-idle HC */
+	del_timer_sync(&oxu->watchdog);
+	ehci_halt(oxu);
+	hcd->state = HC_STATE_SUSPENDED;
+
+	/* allow remote wakeup */
+	mask = INTR_MASK;
+	if (!device_may_wakeup(&hcd->self.root_hub->dev))
+		mask &= ~STS_PCD;
+	writel(mask, &oxu->regs->intr_enable);
+	readl(&oxu->regs->intr_enable);
+
+	oxu->next_statechange = jiffies + msecs_to_jiffies(10);
+	spin_unlock_irq(&oxu->lock);
+	return 0;
+}
+
+/* Caller has locked the root hub, and should reset/reinit on error */
+static int oxu_bus_resume(struct usb_hcd *hcd)
+{
+	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
+	u32 temp;
+	int i;
+
+	if (time_before(jiffies, oxu->next_statechange))
+		msleep(5);
+	spin_lock_irq(&oxu->lock);
+
+	/* Ideally and we've got a real resume here, and no port's power
+	 * was lost.  (For PCI, that means Vaux was maintained.)  But we
+	 * could instead be restoring a swsusp snapshot -- so that BIOS was
+	 * the last user of the controller, not reset/pm hardware keeping
+	 * state we gave to it.
+	 */
+	temp = readl(&oxu->regs->intr_enable);
+	oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
+
+	/* at least some APM implementations will try to deliver
+	 * IRQs right away, so delay them until we're ready.
+	 */
+	writel(0, &oxu->regs->intr_enable);
+
+	/* re-init operational registers */
+	writel(0, &oxu->regs->segment);
+	writel(oxu->periodic_dma, &oxu->regs->frame_list);
+	writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
+
+	/* restore CMD_RUN, framelist size, and irq threshold */
+	writel(oxu->command, &oxu->regs->command);
+
+	/* Some controller/firmware combinations need a delay during which
+	 * they set up the port statuses.  See Bugzilla #8190. */
+	mdelay(8);
+
+	/* manually resume the ports we suspended during bus_suspend() */
+	i = HCS_N_PORTS(oxu->hcs_params);
+	while (i--) {
+		temp = readl(&oxu->regs->port_status[i]);
+		temp &= ~(PORT_RWC_BITS
+			| PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
+		if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
+			oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
+			temp |= PORT_RESUME;
+		}
+		writel(temp, &oxu->regs->port_status[i]);
+	}
+	i = HCS_N_PORTS(oxu->hcs_params);
+	mdelay(20);
+	while (i--) {
+		temp = readl(&oxu->regs->port_status[i]);
+		if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
+			temp &= ~(PORT_RWC_BITS | PORT_RESUME);
+			writel(temp, &oxu->regs->port_status[i]);
+			oxu_vdbg(oxu, "resumed port %d\n", i + 1);
+		}
+	}
+	(void) readl(&oxu->regs->command);
+
+	/* maybe re-activate the schedule(s) */
+	temp = 0;
+	if (oxu->async->qh_next.qh)
+		temp |= CMD_ASE;
+	if (oxu->periodic_sched)
+		temp |= CMD_PSE;
+	if (temp) {
+		oxu->command |= temp;
+		writel(oxu->command, &oxu->regs->command);
+	}
+
+	oxu->next_statechange = jiffies + msecs_to_jiffies(5);
+	hcd->state = HC_STATE_RUNNING;
+
+	/* Now we can safely re-enable irqs */
+	writel(INTR_MASK, &oxu->regs->intr_enable);
+
+	spin_unlock_irq(&oxu->lock);
+	return 0;
+}
+
+#else
+
+static int oxu_bus_suspend(struct usb_hcd *hcd)
+{
+	return 0;
+}
+
+static int oxu_bus_resume(struct usb_hcd *hcd)
+{
+	return 0;
+}
+
+#endif	/* CONFIG_PM */
+
+static const struct hc_driver oxu_hc_driver = {
+	.description =		"oxu210hp_hcd",
+	.product_desc =		"oxu210hp HCD",
+	.hcd_priv_size =	sizeof(struct oxu_hcd),
+
+	/*
+	 * Generic hardware linkage
+	 */
+	.irq =			oxu_irq,
+	.flags =		HCD_MEMORY | HCD_USB2,
+
+	/*
+	 * Basic lifecycle operations
+	 */
+	.reset =		oxu_reset,
+	.start =		oxu_run,
+	.stop =			oxu_stop,
+	.shutdown =		oxu_shutdown,
+
+	/*
+	 * Managing i/o requests and associated device resources
+	 */
+	.urb_enqueue =		oxu_urb_enqueue,
+	.urb_dequeue =		oxu_urb_dequeue,
+	.endpoint_disable =	oxu_endpoint_disable,
+
+	/*
+	 * Scheduling support
+	 */
+	.get_frame_number =	oxu_get_frame,
+
+	/*
+	 * Root hub support
+	 */
+	.hub_status_data =	oxu_hub_status_data,
+	.hub_control =		oxu_hub_control,
+	.bus_suspend =		oxu_bus_suspend,
+	.bus_resume =		oxu_bus_resume,
+};
+
+/*
+ * Module stuff
+ */
+
+static void oxu_configuration(struct platform_device *pdev, void *base)
+{
+	u32 tmp;
+
+	/* Initialize top level registers.
+	 * First write ever
+	 */
+	oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
+	oxu_writel(base, OXU_SOFTRESET, OXU_SRESET);
+	oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
+
+	tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL);
+	oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040);
+
+	oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN |
+					OXU_COMPARATOR | OXU_ASO_OP);
+
+	tmp = oxu_readl(base, OXU_CLKCTRL_SET);
+	oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN);
+
+	/* Clear all top interrupt enable */
+	oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff);
+
+	/* Clear all top interrupt status */
+	oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff);
+
+	/* Enable all needed top interrupt except OTG SPH core */
+	oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
+}
+
+static int oxu_verify_id(struct platform_device *pdev, void *base)
+{
+	u32 id;
+	char *bo[] = {
+		"reserved",
+		"128-pin LQFP",
+		"84-pin TFBGA",
+		"reserved",
+	};
+
+	/* Read controller signature register to find a match */
+	id = oxu_readl(base, OXU_DEVICEID);
+	dev_info(&pdev->dev, "device ID %x\n", id);
+	if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT))
+		return -1;
+
+	dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n",
+		id >> OXU_REV_SHIFT,
+		bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT],
+		(id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT,
+		(id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT);
+
+	return 0;
+}
+
+static const struct hc_driver oxu_hc_driver;
+static struct usb_hcd *oxu_create(struct platform_device *pdev,
+				unsigned long memstart, unsigned long memlen,
+				void *base, int irq, int otg)
+{
+	struct device *dev = &pdev->dev;
+
+	struct usb_hcd *hcd;
+	struct oxu_hcd *oxu;
+	int ret;
+
+	/* Set endian mode and host mode */
+	oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET),
+				OXU_USBMODE,
+				OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS);
+
+	hcd = usb_create_hcd(&oxu_hc_driver, dev,
+				otg ? "oxu210hp_otg" : "oxu210hp_sph");
+	if (!hcd)
+		return ERR_PTR(-ENOMEM);
+
+	hcd->rsrc_start = memstart;
+	hcd->rsrc_len = memlen;
+	hcd->regs = base;
+	hcd->irq = irq;
+	hcd->state = HC_STATE_HALT;
+
+	oxu = hcd_to_oxu(hcd);
+	oxu->is_otg = otg;
+
+	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	return hcd;
+}
+
+static int oxu_init(struct platform_device *pdev,
+				unsigned long memstart, unsigned long memlen,
+				void *base, int irq)
+{
+	struct oxu_info *info = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd;
+	int ret;
+
+	/* First time configuration at start up */
+	oxu_configuration(pdev, base);
+
+	ret = oxu_verify_id(pdev, base);
+	if (ret) {
+		dev_err(&pdev->dev, "no devices found!\n");
+		return -ENODEV;
+	}
+
+	/* Create the OTG controller */
+	hcd = oxu_create(pdev, memstart, memlen, base, irq, 1);
+	if (IS_ERR(hcd)) {
+		dev_err(&pdev->dev, "cannot create OTG controller!\n");
+		ret = PTR_ERR(hcd);
+		goto error_create_otg;
+	}
+	info->hcd[0] = hcd;
+
+	/* Create the SPH host controller */
+	hcd = oxu_create(pdev, memstart, memlen, base, irq, 0);
+	if (IS_ERR(hcd)) {
+		dev_err(&pdev->dev, "cannot create SPH controller!\n");
+		ret = PTR_ERR(hcd);
+		goto error_create_sph;
+	}
+	info->hcd[1] = hcd;
+
+	oxu_writel(base, OXU_CHIPIRQEN_SET,
+		oxu_readl(base, OXU_CHIPIRQEN_SET) | 3);
+
+	return 0;
+
+error_create_sph:
+	usb_remove_hcd(info->hcd[0]);
+	usb_put_hcd(info->hcd[0]);
+
+error_create_otg:
+	return ret;
+}
+
+static int oxu_drv_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	void *base;
+	unsigned long memstart, memlen;
+	int irq, ret;
+	struct oxu_info *info;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	/*
+	 * Get the platform resources
+	 */
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(&pdev->dev,
+			"no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+	irq = res->start;
+	dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "no registers address! Check %s setup!\n",
+			dev_name(&pdev->dev));
+		return -ENODEV;
+	}
+	memstart = res->start;
+	memlen = res->end - res->start + 1;
+	dev_dbg(&pdev->dev, "MEM resource %lx-%lx\n", memstart, memlen);
+	if (!request_mem_region(memstart, memlen,
+				oxu_hc_driver.description)) {
+		dev_dbg(&pdev->dev, "memory area already in use\n");
+		return -EBUSY;
+	}
+
+	ret = set_irq_type(irq, IRQF_TRIGGER_FALLING);
+	if (ret) {
+		dev_err(&pdev->dev, "error setting irq type\n");
+		ret = -EFAULT;
+		goto error_set_irq_type;
+	}
+
+	base = ioremap(memstart, memlen);
+	if (!base) {
+		dev_dbg(&pdev->dev, "error mapping memory\n");
+		ret = -EFAULT;
+		goto error_ioremap;
+	}
+
+	/* Allocate a driver data struct to hold useful info for both
+	 * SPH & OTG devices
+	 */
+	info = kzalloc(sizeof(struct oxu_info), GFP_KERNEL);
+	if (!info) {
+		dev_dbg(&pdev->dev, "error allocating memory\n");
+		ret = -EFAULT;
+		goto error_alloc;
+	}
+	platform_set_drvdata(pdev, info);
+
+	ret = oxu_init(pdev, memstart, memlen, base, irq);
+	if (ret < 0) {
+		dev_dbg(&pdev->dev, "cannot init USB devices\n");
+		goto error_init;
+	}
+
+	dev_info(&pdev->dev, "devices enabled and running\n");
+	platform_set_drvdata(pdev, info);
+
+	return 0;
+
+error_init:
+	kfree(info);
+	platform_set_drvdata(pdev, NULL);
+
+error_alloc:
+	iounmap(base);
+
+error_set_irq_type:
+error_ioremap:
+	release_mem_region(memstart, memlen);
+
+	dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
+	return ret;
+}
+
+static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
+{
+	usb_remove_hcd(hcd);
+	usb_put_hcd(hcd);
+}
+
+static int oxu_drv_remove(struct platform_device *pdev)
+{
+	struct oxu_info *info = platform_get_drvdata(pdev);
+	unsigned long memstart = info->hcd[0]->rsrc_start,
+			memlen = info->hcd[0]->rsrc_len;
+	void *base = info->hcd[0]->regs;
+
+	oxu_remove(pdev, info->hcd[0]);
+	oxu_remove(pdev, info->hcd[1]);
+
+	iounmap(base);
+	release_mem_region(memstart, memlen);
+
+	kfree(info);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static void oxu_drv_shutdown(struct platform_device *pdev)
+{
+	oxu_drv_remove(pdev);
+}
+
+#if 0
+/* FIXME: TODO */
+static int oxu_drv_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+	return 0;
+}
+
+static int oxu_drv_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+	return 0;
+}
+#else
+#define oxu_drv_suspend	NULL
+#define oxu_drv_resume	NULL
+#endif
+
+static struct platform_driver oxu_driver = {
+	.probe		= oxu_drv_probe,
+	.remove		= oxu_drv_remove,
+	.shutdown	= oxu_drv_shutdown,
+	.suspend	= oxu_drv_suspend,
+	.resume		= oxu_drv_resume,
+	.driver = {
+		.name = "oxu210hp-hcd",
+		.bus = &platform_bus_type
+	}
+};
+
+static int __init oxu_module_init(void)
+{
+	int retval = 0;
+
+	retval = platform_driver_register(&oxu_driver);
+	if (retval < 0)
+		return retval;
+
+	return retval;
+}
+
+static void __exit oxu_module_cleanup(void)
+{
+	platform_driver_unregister(&oxu_driver);
+}
+
+module_init(oxu_module_init);
+module_exit(oxu_module_cleanup);
+
+MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/oxu210hp.h b/drivers/usb/host/oxu210hp.h
new file mode 100644
index 0000000..8910e27
--- /dev/null
+++ b/drivers/usb/host/oxu210hp.h
@@ -0,0 +1,447 @@
+/*
+ * Host interface registers
+ */
+
+#define OXU_DEVICEID			0x00
+	#define OXU_REV_MASK		0xffff0000
+	#define OXU_REV_SHIFT		16
+	#define OXU_REV_2100		0x2100
+	#define OXU_BO_SHIFT		8
+	#define OXU_BO_MASK		(0x3 << OXU_BO_SHIFT)
+	#define OXU_MAJ_REV_SHIFT	4
+	#define OXU_MAJ_REV_MASK	(0xf << OXU_MAJ_REV_SHIFT)
+	#define OXU_MIN_REV_SHIFT	0
+	#define OXU_MIN_REV_MASK	(0xf << OXU_MIN_REV_SHIFT)
+#define OXU_HOSTIFCONFIG		0x04
+#define OXU_SOFTRESET			0x08
+	#define OXU_SRESET		(1 << 0)
+
+#define OXU_PIOBURSTREADCTRL		0x0C
+
+#define OXU_CHIPIRQSTATUS		0x10
+#define OXU_CHIPIRQEN_SET		0x14
+#define OXU_CHIPIRQEN_CLR		0x18
+	#define OXU_USBSPHLPWUI		0x00000080
+	#define OXU_USBOTGLPWUI		0x00000040
+	#define OXU_USBSPHI		0x00000002
+	#define OXU_USBOTGI		0x00000001
+
+#define OXU_CLKCTRL_SET			0x1C
+	#define OXU_SYSCLKEN		0x00000008
+	#define OXU_USBSPHCLKEN		0x00000002
+	#define OXU_USBOTGCLKEN		0x00000001
+
+#define OXU_ASO				0x68
+	#define OXU_SPHPOEN		0x00000100
+	#define OXU_OVRCCURPUPDEN	0x00000800
+	#define OXU_ASO_OP		(1 << 10)
+	#define OXU_COMPARATOR		0x000004000
+
+#define OXU_USBMODE			0x1A8
+	#define OXU_VBPS		0x00000020
+	#define OXU_ES_LITTLE		0x00000000
+	#define OXU_CM_HOST_ONLY	0x00000003
+
+/*
+ * Proper EHCI structs & defines
+ */
+
+/* Magic numbers that can affect system performance */
+#define EHCI_TUNE_CERR		3	/* 0-3 qtd retries; 0 == don't stop */
+#define EHCI_TUNE_RL_HS		4	/* nak throttle; see 4.9 */
+#define EHCI_TUNE_RL_TT		0
+#define EHCI_TUNE_MULT_HS	1	/* 1-3 transactions/uframe; 4.10.3 */
+#define EHCI_TUNE_MULT_TT	1
+#define EHCI_TUNE_FLS		2	/* (small) 256 frame schedule */
+
+struct oxu_hcd;
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct ehci_caps {
+	/* these fields are specified as 8 and 16 bit registers,
+	 * but some hosts can't perform 8 or 16 bit PCI accesses.
+	 */
+	u32		hc_capbase;
+#define HC_LENGTH(p)		(((p)>>00)&0x00ff)	/* bits 7:0 */
+#define HC_VERSION(p)		(((p)>>16)&0xffff)	/* bits 31:16 */
+	u32		hcs_params;     /* HCSPARAMS - offset 0x4 */
+#define HCS_DEBUG_PORT(p)	(((p)>>20)&0xf)	/* bits 23:20, debug port? */
+#define HCS_INDICATOR(p)	((p)&(1 << 16))	/* true: has port indicators */
+#define HCS_N_CC(p)		(((p)>>12)&0xf)	/* bits 15:12, #companion HCs */
+#define HCS_N_PCC(p)		(((p)>>8)&0xf)	/* bits 11:8, ports per CC */
+#define HCS_PORTROUTED(p)	((p)&(1 << 7))	/* true: port routing */
+#define HCS_PPC(p)		((p)&(1 << 4))	/* true: port power control */
+#define HCS_N_PORTS(p)		(((p)>>0)&0xf)	/* bits 3:0, ports on HC */
+
+	u32		hcc_params;      /* HCCPARAMS - offset 0x8 */
+#define HCC_EXT_CAPS(p)		(((p)>>8)&0xff)	/* for pci extended caps */
+#define HCC_ISOC_CACHE(p)       ((p)&(1 << 7))  /* true: can cache isoc frame */
+#define HCC_ISOC_THRES(p)       (((p)>>4)&0x7)  /* bits 6:4, uframes cached */
+#define HCC_CANPARK(p)		((p)&(1 << 2))  /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1))  /* true: periodic_size changes*/
+#define HCC_64BIT_ADDR(p)       ((p)&(1))       /* true: can use 64-bit addr */
+	u8		portroute[8];	 /* nibbles for routing - offset 0xC */
+} __attribute__ ((packed));
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct ehci_regs {
+	/* USBCMD: offset 0x00 */
+	u32		command;
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK	(1<<11)		/* enable "park" on async qh */
+#define CMD_PARK_CNT(c)	(((c)>>8)&3)	/* how many transfers to park for */
+#define CMD_LRESET	(1<<7)		/* partial reset (no ports, etc) */
+#define CMD_IAAD	(1<<6)		/* "doorbell" interrupt async advance */
+#define CMD_ASE		(1<<5)		/* async schedule enable */
+#define CMD_PSE		(1<<4)		/* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET	(1<<1)		/* reset HC not bus */
+#define CMD_RUN		(1<<0)		/* start/stop HC */
+
+	/* USBSTS: offset 0x04 */
+	u32		status;
+#define STS_ASS		(1<<15)		/* Async Schedule Status */
+#define STS_PSS		(1<<14)		/* Periodic Schedule Status */
+#define STS_RECL	(1<<13)		/* Reclamation */
+#define STS_HALT	(1<<12)		/* Not running (any reason) */
+/* some bits reserved */
+	/* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA		(1<<5)		/* Interrupted on async advance */
+#define STS_FATAL	(1<<4)		/* such as some PCI access errors */
+#define STS_FLR		(1<<3)		/* frame list rolled over */
+#define STS_PCD		(1<<2)		/* port change detect */
+#define STS_ERR		(1<<1)		/* "error" completion (overflow, ...) */
+#define STS_INT		(1<<0)		/* "normal" completion (short, ...) */
+
+#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+	/* USBINTR: offset 0x08 */
+	u32		intr_enable;
+
+	/* FRINDEX: offset 0x0C */
+	u32		frame_index;	/* current microframe number */
+	/* CTRLDSSEGMENT: offset 0x10 */
+	u32		segment;	/* address bits 63:32 if needed */
+	/* PERIODICLISTBASE: offset 0x14 */
+	u32		frame_list;	/* points to periodic list */
+	/* ASYNCLISTADDR: offset 0x18 */
+	u32		async_next;	/* address of next async queue head */
+
+	u32		reserved[9];
+
+	/* CONFIGFLAG: offset 0x40 */
+	u32		configured_flag;
+#define FLAG_CF		(1<<0)		/* true: we'll support "high speed" */
+
+	/* PORTSC: offset 0x44 */
+	u32		port_status[0];	/* up to N_PORTS */
+/* 31:23 reserved */
+#define PORT_WKOC_E	(1<<22)		/* wake on overcurrent (enable) */
+#define PORT_WKDISC_E	(1<<21)		/* wake on disconnect (enable) */
+#define PORT_WKCONN_E	(1<<20)		/* wake on connect (enable) */
+/* 19:16 for port testing */
+#define PORT_LED_OFF	(0<<14)
+#define PORT_LED_AMBER	(1<<14)
+#define PORT_LED_GREEN	(2<<14)
+#define PORT_LED_MASK	(3<<14)
+#define PORT_OWNER	(1<<13)		/* true: companion hc owns this port */
+#define PORT_POWER	(1<<12)		/* true: has power (see PPC) */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10))	/* USB 1.1 device */
+/* 11:10 for detecting lowspeed devices (reset vs release ownership) */
+/* 9 reserved */
+#define PORT_RESET	(1<<8)		/* reset port */
+#define PORT_SUSPEND	(1<<7)		/* suspend port */
+#define PORT_RESUME	(1<<6)		/* resume it */
+#define PORT_OCC	(1<<5)		/* over current change */
+#define PORT_OC		(1<<4)		/* over current active */
+#define PORT_PEC	(1<<3)		/* port enable change */
+#define PORT_PE		(1<<2)		/* port enable */
+#define PORT_CSC	(1<<1)		/* connect status change */
+#define PORT_CONNECT	(1<<0)		/* device connected */
+#define PORT_RWC_BITS   (PORT_CSC | PORT_PEC | PORT_OCC)
+} __attribute__ ((packed));
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console.  (nonstandard enumeration.)
+ */
+struct ehci_dbg_port {
+	u32	control;
+#define DBGP_OWNER	(1<<30)
+#define DBGP_ENABLED	(1<<28)
+#define DBGP_DONE	(1<<16)
+#define DBGP_INUSE	(1<<10)
+#define DBGP_ERRCODE(x)	(((x)>>7)&0x07)
+#	define DBGP_ERR_BAD	1
+#	define DBGP_ERR_SIGNAL	2
+#define DBGP_ERROR	(1<<6)
+#define DBGP_GO		(1<<5)
+#define DBGP_OUT	(1<<4)
+#define DBGP_LEN(x)	(((x)>>0)&0x0f)
+	u32	pids;
+#define DBGP_PID_GET(x)		(((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok)	(((data)<<8)|(tok))
+	u32	data03;
+	u32	data47;
+	u32	address;
+#define DBGP_EPADDR(dev, ep)	(((dev)<<8)|(ep))
+} __attribute__ ((packed));
+
+
+#define	QTD_NEXT(dma)	cpu_to_le32((u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct ehci_qtd {
+	/* first part defined by EHCI spec */
+	__le32			hw_next;		/* see EHCI 3.5.1 */
+	__le32			hw_alt_next;		/* see EHCI 3.5.2 */
+	__le32			hw_token;		/* see EHCI 3.5.3 */
+#define	QTD_TOGGLE	(1 << 31)	/* data toggle */
+#define	QTD_LENGTH(tok)	(((tok)>>16) & 0x7fff)
+#define	QTD_IOC		(1 << 15)	/* interrupt on complete */
+#define	QTD_CERR(tok)	(((tok)>>10) & 0x3)
+#define	QTD_PID(tok)	(((tok)>>8) & 0x3)
+#define	QTD_STS_ACTIVE	(1 << 7)	/* HC may execute this */
+#define	QTD_STS_HALT	(1 << 6)	/* halted on error */
+#define	QTD_STS_DBE	(1 << 5)	/* data buffer error (in HC) */
+#define	QTD_STS_BABBLE	(1 << 4)	/* device was babbling (qtd halted) */
+#define	QTD_STS_XACT	(1 << 3)	/* device gave illegal response */
+#define	QTD_STS_MMF	(1 << 2)	/* incomplete split transaction */
+#define	QTD_STS_STS	(1 << 1)	/* split transaction state */
+#define	QTD_STS_PING	(1 << 0)	/* issue PING? */
+	__le32			hw_buf[5];		/* see EHCI 3.5.4 */
+	__le32			hw_buf_hi[5];		/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t		qtd_dma;		/* qtd address */
+	struct list_head	qtd_list;		/* sw qtd list */
+	struct urb		*urb;			/* qtd's urb */
+	size_t			length;			/* length of buffer */
+
+	u32			qtd_buffer_len;
+	void			*buffer;
+	dma_addr_t		buffer_dma;
+	void			*transfer_buffer;
+	void			*transfer_dma;
+} __attribute__ ((aligned(32)));
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK __constant_cpu_to_le32 (~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
+
+/* Type tag from {qh, itd, sitd, fstn}->hw_next */
+#define Q_NEXT_TYPE(dma) ((dma) & __constant_cpu_to_le32 (3 << 1))
+
+/* values for that type tag */
+#define Q_TYPE_QH	__constant_cpu_to_le32 (1 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define	QH_NEXT(dma)	(cpu_to_le32(((u32)dma)&~0x01f)|Q_TYPE_QH)
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define	EHCI_LIST_END	__constant_cpu_to_le32(1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure.  That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule.  Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union ehci_shadow {
+	struct ehci_qh		*qh;		/* Q_TYPE_QH */
+	__le32			*hw_next;	/* (all types) */
+	void			*ptr;
+};
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+struct ehci_qh {
+	/* first part defined by EHCI spec */
+	__le32			hw_next;	 /* see EHCI 3.6.1 */
+	__le32			hw_info1;	/* see EHCI 3.6.2 */
+#define	QH_HEAD		0x00008000
+	__le32			hw_info2;	/* see EHCI 3.6.2 */
+#define	QH_SMASK	0x000000ff
+#define	QH_CMASK	0x0000ff00
+#define	QH_HUBADDR	0x007f0000
+#define	QH_HUBPORT	0x3f800000
+#define	QH_MULT		0xc0000000
+	__le32			hw_current;	 /* qtd list - see EHCI 3.6.4 */
+
+	/* qtd overlay (hardware parts of a struct ehci_qtd) */
+	__le32			hw_qtd_next;
+	__le32			hw_alt_next;
+	__le32			hw_token;
+	__le32			hw_buf[5];
+	__le32			hw_buf_hi[5];
+
+	/* the rest is HCD-private */
+	dma_addr_t		qh_dma;		/* address of qh */
+	union ehci_shadow	qh_next;	/* ptr to qh; or periodic */
+	struct list_head	qtd_list;	/* sw qtd list */
+	struct ehci_qtd		*dummy;
+	struct ehci_qh		*reclaim;	/* next to reclaim */
+
+	struct oxu_hcd		*oxu;
+	struct kref		kref;
+	unsigned		stamp;
+
+	u8			qh_state;
+#define	QH_STATE_LINKED		1		/* HC sees this */
+#define	QH_STATE_UNLINK		2		/* HC may still see this */
+#define	QH_STATE_IDLE		3		/* HC doesn't see this */
+#define	QH_STATE_UNLINK_WAIT	4		/* LINKED and on reclaim q */
+#define	QH_STATE_COMPLETING	5		/* don't touch token.HALT */
+
+	/* periodic schedule info */
+	u8			usecs;		/* intr bandwidth */
+	u8			gap_uf;		/* uframes split/csplit gap */
+	u8			c_usecs;	/* ... split completion bw */
+	u16			tt_usecs;	/* tt downstream bandwidth */
+	unsigned short		period;		/* polling interval */
+	unsigned short		start;		/* where polling starts */
+#define NO_FRAME ((unsigned short)~0)			/* pick new start */
+	struct usb_device	*dev;		/* access to TT */
+} __attribute__ ((aligned(32)));
+
+/*
+ * Proper OXU210HP structs
+ */
+
+#define OXU_OTG_CORE_OFFSET	0x00400
+#define OXU_OTG_CAP_OFFSET	(OXU_OTG_CORE_OFFSET + 0x100)
+#define OXU_SPH_CORE_OFFSET	0x00800
+#define OXU_SPH_CAP_OFFSET	(OXU_SPH_CORE_OFFSET + 0x100)
+
+#define OXU_OTG_MEM		0xE000
+#define OXU_SPH_MEM		0x16000
+
+/* Only how many elements & element structure are specifies here. */
+/* 2 host controllers are enabled - total size <= 28 kbytes */
+#define	DEFAULT_I_TDPS		1024
+#define QHEAD_NUM		16
+#define QTD_NUM			32
+#define SITD_NUM		8
+#define MURB_NUM		8
+
+#define BUFFER_NUM		8
+#define BUFFER_SIZE		512
+
+struct oxu_info {
+	struct usb_hcd *hcd[2];
+};
+
+struct oxu_buf {
+	u8			buffer[BUFFER_SIZE];
+} __attribute__ ((aligned(BUFFER_SIZE)));
+
+struct oxu_onchip_mem {
+	struct oxu_buf		db_pool[BUFFER_NUM];
+
+	u32			frame_list[DEFAULT_I_TDPS];
+	struct ehci_qh		qh_pool[QHEAD_NUM];
+	struct ehci_qtd		qtd_pool[QTD_NUM];
+} __attribute__ ((aligned(4 << 10)));
+
+#define	EHCI_MAX_ROOT_PORTS	15		/* see HCS_N_PORTS */
+
+struct oxu_murb {
+	struct urb		urb;
+	struct urb		*main;
+	u8			last;
+};
+
+struct oxu_hcd {				/* one per controller */
+	unsigned int		is_otg:1;
+
+	u8			qh_used[QHEAD_NUM];
+	u8			qtd_used[QTD_NUM];
+	u8			db_used[BUFFER_NUM];
+	u8			murb_used[MURB_NUM];
+
+	struct oxu_onchip_mem	__iomem *mem;
+	spinlock_t		mem_lock;
+
+	struct timer_list	urb_timer;
+
+	struct ehci_caps __iomem *caps;
+	struct ehci_regs __iomem *regs;
+
+	__u32			hcs_params;	/* cached register copy */
+	spinlock_t		lock;
+
+	/* async schedule support */
+	struct ehci_qh		*async;
+	struct ehci_qh		*reclaim;
+	unsigned		reclaim_ready:1;
+	unsigned		scanning:1;
+
+	/* periodic schedule support */
+	unsigned		periodic_size;
+	__le32			*periodic;	/* hw periodic table */
+	dma_addr_t		periodic_dma;
+	unsigned		i_thresh;	/* uframes HC might cache */
+
+	union ehci_shadow	*pshadow;	/* mirror hw periodic table */
+	int			next_uframe;	/* scan periodic, start here */
+	unsigned		periodic_sched;	/* periodic activity count */
+
+	/* per root hub port */
+	unsigned long		reset_done[EHCI_MAX_ROOT_PORTS];
+	/* bit vectors (one bit per port) */
+	unsigned long		bus_suspended;	/* which ports were
+						 * already suspended at the
+						 * start of a bus suspend
+						 */
+	unsigned long		companion_ports;/* which ports are dedicated
+						 * to the companion controller
+						 */
+
+	struct timer_list	watchdog;
+	unsigned long		actions;
+	unsigned		stamp;
+	unsigned long		next_statechange;
+	u32			command;
+
+	/* SILICON QUIRKS */
+	struct list_head	urb_list;	/* this is the head to urb
+						 * queue that didn't get enough
+						 * resources
+						 */
+	struct oxu_murb		*murb_pool;	/* murb per split big urb */
+	unsigned urb_len;
+
+	u8			sbrn;		/* packed release number */
+};
+
+#define EHCI_IAA_JIFFIES	(HZ/100)	/* arbitrary; ~10 msec */
+#define EHCI_IO_JIFFIES	 	(HZ/10)		/* io watchdog > irq_thresh */
+#define EHCI_ASYNC_JIFFIES      (HZ/20)		/* async idle timeout */
+#define EHCI_SHRINK_JIFFIES     (HZ/200)	/* async qh unlink delay */
+
+enum ehci_timer_action {
+	TIMER_IO_WATCHDOG,
+	TIMER_IAA_WATCHDOG,
+	TIMER_ASYNC_SHRINK,
+	TIMER_ASYNC_OFF,
+};
+
+#include <linux/oxu210hp.h>
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index ae6e70e..75b6984 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -172,9 +172,9 @@
 	if (!mmio_resource_enabled(pdev, 0))
 		return;
 
-	base = ioremap_nocache(pci_resource_start(pdev, 0),
-				     pci_resource_len(pdev, 0));
-	if (base == NULL) return;
+	base = pci_ioremap_bar(pdev, 0);
+	if (base == NULL)
+		return;
 
 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
 #ifndef __hppa__
@@ -221,9 +221,9 @@
 	if (!mmio_resource_enabled(pdev, 0))
 		return;
 
-	base = ioremap_nocache(pci_resource_start(pdev, 0),
-				pci_resource_len(pdev, 0));
-	if (base == NULL) return;
+	base = pci_ioremap_bar(pdev, 0);
+	if (base == NULL)
+		return;
 
 	cap_length = readb(base);
 	op_reg_base = base + cap_length;
@@ -271,7 +271,7 @@
 			/* if boot firmware now owns EHCI, spin till
 			 * it hands it over.
 			 */
-			msec = 5000;
+			msec = 1000;
 			while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
 				tried_handoff = 1;
 				msleep(10);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index c21f14e..3190412 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2275,7 +2275,6 @@
 	return 0;
 }
 
-#define resource_len(r) (((r)->end - (r)->start) + 1)
 static int __init r8a66597_probe(struct platform_device *pdev)
 {
 #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
@@ -2296,11 +2295,10 @@
 		goto clean_up;
 	}
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-					   (char *)hcd_name);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		ret = -ENODEV;
-		dev_err(&pdev->dev, "platform_get_resource_byname error.\n");
+		dev_err(&pdev->dev, "platform_get_resource error.\n");
 		goto clean_up;
 	}
 
@@ -2315,7 +2313,7 @@
 	irq = ires->start;
 	irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
 
-	reg = ioremap(res->start, resource_len(res));
+	reg = ioremap(res->start, resource_size(res));
 	if (reg == NULL) {
 		ret = -ENOMEM;
 		dev_err(&pdev->dev, "ioremap error.\n");
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index cf5e4cf..4e22106 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -942,6 +942,8 @@
 
 #ifdef	CONFIG_PM
 	.suspend =	usb_hcd_pci_suspend,
+	.suspend_late =	usb_hcd_pci_suspend_late,
+	.resume_early =	usb_hcd_pci_resume_early,
 	.resume =	usb_hcd_pci_resume,
 #endif	/* PM */
 };
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 885867a..4541dfc 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -350,17 +350,16 @@
 static int mts_scsi_host_reset(struct scsi_cmnd *srb)
 {
 	struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
-	int result, rc;
+	int result;
 
 	MTS_DEBUG_GOT_HERE();
 	mts_debug_dump(desc);
 
-	rc = usb_lock_device_for_reset(desc->usb_dev, desc->usb_intf);
-	if (rc < 0)
-		return FAILED;
-	result = usb_reset_device(desc->usb_dev);
-	if (rc)
+	result = usb_lock_device_for_reset(desc->usb_dev, desc->usb_intf);
+	if (result == 0) {
+		result = usb_reset_device(desc->usb_dev);
 		usb_unlock_device(desc->usb_dev);
+	}
 	return result ? FAILED : SUCCESS;
 }
 
diff --git a/drivers/usb/misc/berry_charge.c b/drivers/usb/misc/berry_charge.c
index 24e2dc3..c05a85b 100644
--- a/drivers/usb/misc/berry_charge.c
+++ b/drivers/usb/misc/berry_charge.c
@@ -123,6 +123,11 @@
 {
 	struct usb_device *udev = interface_to_usbdev(intf);
 
+	if (udev->bus_mA < 500) {
+		dbg(&udev->dev, "Not enough power to charge available\n");
+		return -ENODEV;
+	}
+
 	dbg(&udev->dev, "Power is set to %dmA\n",
 	    udev->actconfig->desc.bMaxPower * 2);
 
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index e762beb..879a980 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -160,7 +160,7 @@
 			err("%s - error loading firmware: error = %d", __func__, err);
 			goto wraperr;
 		}
-	} while (i > 0);
+	} while (rec);
 
 	/* Assert reset (stop the CPU in the EMI) */
 	err = emi26_set_reset(dev,1);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 444c69c..5f1a19d 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -192,8 +192,6 @@
 {
 	struct urb		*urb;
 
-	if (bytes < 0)
-		return NULL;
 	urb = usb_alloc_urb (0, GFP_KERNEL);
 	if (!urb)
 		return urb;
diff --git a/drivers/usb/mon/Kconfig b/drivers/usb/mon/Kconfig
index deb9ddf..f28f350 100644
--- a/drivers/usb/mon/Kconfig
+++ b/drivers/usb/mon/Kconfig
@@ -3,14 +3,13 @@
 #
 
 config USB_MON
-	bool "USB Monitor"
-	depends on USB!=n
-	default y
+	tristate "USB Monitor"
+	depends on USB
+	default y if USB=y
+	default m if USB=m
 	help
-	  If you say Y here, a component which captures the USB traffic
+	  If you select this option, a component which captures the USB traffic
 	  between peripheral-specific drivers and HC drivers will be built.
 	  For more information, see <file:Documentation/usb/usbmon.txt>.
 
-	  This is somewhat experimental at this time, but it should be safe.
-
-	  If unsure, say Y.
+	  If unsure, say Y (if allowed), otherwise M.
diff --git a/drivers/usb/mon/Makefile b/drivers/usb/mon/Makefile
index 0f76ed5..c6516b5 100644
--- a/drivers/usb/mon/Makefile
+++ b/drivers/usb/mon/Makefile
@@ -4,5 +4,4 @@
 
 usbmon-objs	:= mon_main.o mon_stat.o mon_text.o mon_bin.o mon_dma.o
 
-# This does not use CONFIG_USB_MON because we want this to use a tristate.
-obj-$(CONFIG_USB)	+= usbmon.o
+obj-$(CONFIG_USB_MON)	+= usbmon.o
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 4b9542b..5af7379 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -11,7 +11,7 @@
 	depends on (USB || USB_GADGET) && HAVE_CLK
 	depends on !SUPERH
 	select TWL4030_USB if MACH_OMAP_3430SDP
-	tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
+	tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
 	help
 	  Say Y here if your system has a dual role high speed USB
 	  controller based on the Mentor Graphics silicon IP.  Then
@@ -22,6 +22,9 @@
 	  Texas Instruments parts using this IP include DaVinci 644x,
 	  OMAP 243x, OMAP 343x, and TUSB 6010.
 
+	  Analog Devices parts using this IP include Blackfin BF54x,
+	  BF525 and BF527.
+
 	  If you do not know what this is, please say N.
 
 	  To compile this driver as a module, choose M here; the
@@ -33,6 +36,8 @@
 	default y if ARCH_DAVINCI
 	default y if ARCH_OMAP2430
 	default y if ARCH_OMAP34XX
+	default y if (BF54x && !BF544)
+	default y if (BF52x && !BF522 && !BF523)
 
 comment "DaVinci 644x USB support"
 	depends on USB_MUSB_HDRC && ARCH_DAVINCI
@@ -43,6 +48,9 @@
 comment "OMAP 343x high speed USB support"
 	depends on USB_MUSB_HDRC && ARCH_OMAP34XX
 
+comment "Blackfin high speed USB Support"
+	depends on USB_MUSB_HDRC && (BF54x && !BF544) || (BF52x && !BF522 && !BF523)
+
 config USB_TUSB6010
 	boolean "TUSB 6010 support"
 	depends on USB_MUSB_HDRC && !USB_MUSB_SOC
@@ -142,7 +150,7 @@
 config USB_INVENTRA_DMA
 	bool
 	depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
-	default ARCH_OMAP2430 || ARCH_OMAP34XX
+	default ARCH_OMAP2430 || ARCH_OMAP34XX || BLACKFIN
 	help
 	  Enable DMA transfers using Mentor's engine.
 
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index b6af0d6..85710cc 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -22,6 +22,14 @@
 	musb_hdrc-objs	+= omap2430.o
 endif
 
+ifeq ($(CONFIG_BF54x),y)
+	musb_hdrc-objs	+= blackfin.o
+endif
+
+ifeq ($(CONFIG_BF52x),y)
+	musb_hdrc-objs	+= blackfin.o
+endif
+
 ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y)
 	musb_hdrc-objs		+= musb_gadget_ep0.o musb_gadget.o
 endif
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
new file mode 100644
index 0000000..7861348
--- /dev/null
+++ b/drivers/usb/musb/blackfin.c
@@ -0,0 +1,320 @@
+/*
+ * MUSB OTG controller driver for Blackfin Processors
+ *
+ * Copyright 2006-2008 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+
+#include "musb_core.h"
+#include "blackfin.h"
+
+/*
+ * Load an endpoint's FIFO
+ */
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+{
+	void __iomem *fifo = hw_ep->fifo;
+	void __iomem *epio = hw_ep->regs;
+
+	prefetch((u8 *)src);
+
+	musb_writew(epio, MUSB_TXCOUNT, len);
+
+	DBG(4, "TX ep%d fifo %p count %d buf %p, epio %p\n",
+			hw_ep->epnum, fifo, len, src, epio);
+
+	dump_fifo_data(src, len);
+
+	if (unlikely((unsigned long)src & 0x01))
+		outsw_8((unsigned long)fifo, src,
+			len & 0x01 ? (len >> 1) + 1 : len >> 1);
+	else
+		outsw((unsigned long)fifo, src,
+			len & 0x01 ? (len >> 1) + 1 : len >> 1);
+}
+
+/*
+ * Unload an endpoint's FIFO
+ */
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+	void __iomem *fifo = hw_ep->fifo;
+	u8 epnum = hw_ep->epnum;
+	u16 dma_reg = 0;
+
+	DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+			'R', hw_ep->epnum, fifo, len, dst);
+
+#ifdef CONFIG_BF52x
+	invalidate_dcache_range((unsigned int)dst,
+		(unsigned int)(dst + len));
+
+	/* Setup DMA address register */
+	dma_reg = (u16) ((u32) dst & 0xFFFF);
+	bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
+	SSYNC();
+
+	dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF);
+	bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
+	SSYNC();
+
+	/* Setup DMA count register */
+	bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
+	bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
+	SSYNC();
+
+	/* Enable the DMA */
+	dma_reg = (epnum << 4) | DMA_ENA | INT_ENA;
+	bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
+	SSYNC();
+
+	/* Wait for compelete */
+	while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
+		cpu_relax();
+
+	/* acknowledge dma interrupt */
+	bfin_write_USB_DMA_INTERRUPT(1 << epnum);
+	SSYNC();
+
+	/* Reset DMA */
+	bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
+	SSYNC();
+#else
+	if (unlikely((unsigned long)dst & 0x01))
+		insw_8((unsigned long)fifo, dst,
+			len & 0x01 ? (len >> 1) + 1 : len >> 1);
+	else
+		insw((unsigned long)fifo, dst,
+			len & 0x01 ? (len >> 1) + 1 : len >> 1);
+#endif
+
+	dump_fifo_data(dst, len);
+}
+
+static irqreturn_t blackfin_interrupt(int irq, void *__hci)
+{
+	unsigned long	flags;
+	irqreturn_t	retval = IRQ_NONE;
+	struct musb	*musb = __hci;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+	musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+	musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+	if (musb->int_usb || musb->int_tx || musb->int_rx) {
+		musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb);
+		musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx);
+		musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx);
+		retval = musb_interrupt(musb);
+	}
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	/* REVISIT we sometimes get spurious IRQs on g_ep0
+	 * not clear why... fall in BF54x too.
+	 */
+	if (retval != IRQ_HANDLED)
+		DBG(5, "spurious?\n");
+
+	return IRQ_HANDLED;
+}
+
+static void musb_conn_timer_handler(unsigned long _musb)
+{
+	struct musb *musb = (void *)_musb;
+	unsigned long flags;
+	u16 val;
+
+	spin_lock_irqsave(&musb->lock, flags);
+	switch (musb->xceiv.state) {
+	case OTG_STATE_A_IDLE:
+	case OTG_STATE_A_WAIT_BCON:
+		/* Start a new session */
+		val = musb_readw(musb->mregs, MUSB_DEVCTL);
+		val |= MUSB_DEVCTL_SESSION;
+		musb_writew(musb->mregs, MUSB_DEVCTL, val);
+
+		val = musb_readw(musb->mregs, MUSB_DEVCTL);
+		if (!(val & MUSB_DEVCTL_BDEVICE)) {
+			gpio_set_value(musb->config->gpio_vrsel, 1);
+			musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+		} else {
+			gpio_set_value(musb->config->gpio_vrsel, 0);
+
+			/* Ignore VBUSERROR and SUSPEND IRQ */
+			val = musb_readb(musb->mregs, MUSB_INTRUSBE);
+			val &= ~MUSB_INTR_VBUSERROR;
+			musb_writeb(musb->mregs, MUSB_INTRUSBE, val);
+
+			val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR;
+			musb_writeb(musb->mregs, MUSB_INTRUSB, val);
+
+			val = MUSB_POWER_HSENAB;
+			musb_writeb(musb->mregs, MUSB_POWER, val);
+		}
+		mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+		break;
+
+	default:
+		DBG(1, "%s state not handled\n", otg_state_string(musb));
+		break;
+	}
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	DBG(4, "state is %s\n", otg_state_string(musb));
+}
+
+void musb_platform_enable(struct musb *musb)
+{
+	if (is_host_enabled(musb)) {
+		mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+		musb->a_wait_bcon = TIMER_DELAY;
+	}
+}
+
+void musb_platform_disable(struct musb *musb)
+{
+}
+
+static void bfin_vbus_power(struct musb *musb, int is_on, int sleeping)
+{
+}
+
+static void bfin_set_vbus(struct musb *musb, int is_on)
+{
+	if (is_on)
+		gpio_set_value(musb->config->gpio_vrsel, 1);
+	else
+		gpio_set_value(musb->config->gpio_vrsel, 0);
+
+	DBG(1, "VBUS %s, devctl %02x "
+		/* otg %3x conf %08x prcm %08x */ "\n",
+		otg_state_string(musb),
+		musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+
+static int bfin_set_power(struct otg_transceiver *x, unsigned mA)
+{
+	return 0;
+}
+
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+	if (is_host_enabled(musb))
+		mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+}
+
+int musb_platform_get_vbus_status(struct musb *musb)
+{
+	return 0;
+}
+
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+
+	/*
+	 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
+	 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
+	 * be low for DEVICE mode and high for HOST mode. We set it high
+	 * here because we are in host mode
+	 */
+
+	if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
+		printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n",
+			musb->config->gpio_vrsel);
+		return -ENODEV;
+	}
+	gpio_direction_output(musb->config->gpio_vrsel, 0);
+
+	if (ANOMALY_05000346) {
+		bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
+		SSYNC();
+	}
+
+	if (ANOMALY_05000347) {
+		bfin_write_USB_APHY_CNTRL(0x0);
+		SSYNC();
+	}
+
+	/* TODO
+	 * Set SIC-IVG register
+	 */
+
+	/* Configure PLL oscillator register */
+	bfin_write_USB_PLLOSC_CTRL(0x30a8);
+	SSYNC();
+
+	bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1);
+	SSYNC();
+
+	bfin_write_USB_EP_NI0_RXMAXP(64);
+	SSYNC();
+
+	bfin_write_USB_EP_NI0_TXMAXP(64);
+	SSYNC();
+
+	/* Route INTRUSB/INTR_RX/INTR_TX to USB_INT0*/
+	bfin_write_USB_GLOBINTR(0x7);
+	SSYNC();
+
+	bfin_write_USB_GLOBAL_CTL(GLOBAL_ENA | EP1_TX_ENA | EP2_TX_ENA |
+				EP3_TX_ENA | EP4_TX_ENA | EP5_TX_ENA |
+				EP6_TX_ENA | EP7_TX_ENA | EP1_RX_ENA |
+				EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA |
+				EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA);
+	SSYNC();
+
+	if (is_host_enabled(musb)) {
+		musb->board_set_vbus = bfin_set_vbus;
+		setup_timer(&musb_conn_timer,
+			musb_conn_timer_handler, (unsigned long) musb);
+	}
+	if (is_peripheral_enabled(musb))
+		musb->xceiv.set_power = bfin_set_power;
+
+	musb->isr = blackfin_interrupt;
+
+	return 0;
+}
+
+int musb_platform_suspend(struct musb *musb)
+{
+	return 0;
+}
+
+int musb_platform_resume(struct musb *musb)
+{
+	return 0;
+}
+
+
+int musb_platform_exit(struct musb *musb)
+{
+
+	bfin_vbus_power(musb, 0 /*off*/, 1);
+	gpio_free(musb->config->gpio_vrsel);
+	musb_platform_suspend(musb);
+
+	return 0;
+}
diff --git a/drivers/usb/musb/blackfin.h b/drivers/usb/musb/blackfin.h
new file mode 100644
index 0000000..a240c1e
--- /dev/null
+++ b/drivers/usb/musb/blackfin.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2007 by Analog Devices, Inc.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_BLACKFIN_H__
+#define __MUSB_BLACKFIN_H__
+
+/*
+ * Blackfin specific definitions
+ */
+
+#undef DUMP_FIFO_DATA
+#ifdef DUMP_FIFO_DATA
+static void dump_fifo_data(u8 *buf, u16 len)
+{
+	u8 *tmp = buf;
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (!(i % 16) && i)
+			pr_debug("\n");
+		pr_debug("%02x ", *tmp++);
+	}
+	pr_debug("\n");
+}
+#else
+#define dump_fifo_data(buf, len)	do {} while (0)
+#endif
+
+#ifdef CONFIG_BF52x
+
+#define USB_DMA_BASE		USB_DMA_INTERRUPT
+#define USB_DMAx_CTRL		0x04
+#define USB_DMAx_ADDR_LOW	0x08
+#define USB_DMAx_ADDR_HIGH	0x0C
+#define USB_DMAx_COUNT_LOW	0x10
+#define USB_DMAx_COUNT_HIGH	0x14
+
+#define USB_DMA_REG(ep, reg)	(USB_DMA_BASE + 0x20 * ep + reg)
+#endif
+
+/* Almost 1 second */
+#define TIMER_DELAY	(1 * HZ)
+
+static struct timer_list musb_conn_timer;
+
+#endif	/* __MUSB_BLACKFIN_H__ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index dfb3bcb..0d566dc 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -32,9 +32,9 @@
 #include <linux/io.h>
 #include <linux/gpio.h>
 
-#include <asm/arch/hardware.h>
-#include <asm/arch/memory.h>
-#include <asm/arch/gpio.h>
+#include <mach/arch/hardware.h>
+#include <mach/arch/memory.h>
+#include <mach/arch/gpio.h>
 #include <asm/mach-types.h>
 
 #include "musb_core.h"
@@ -364,6 +364,18 @@
 	return IRQ_HANDLED;
 }
 
+int musb_platform_set_mode(struct musb *musb, u8 mode)
+{
+	/* EVM can't do this (right?) */
+	return -EIO;
+}
+
+int musb_platform_set_mode(struct musb *musb, u8 mode)
+{
+       /* EVM can't do this (right?) */
+       return -EIO;
+}
+
 int __init musb_platform_init(struct musb *musb)
 {
 	void __iomem	*tibase = musb->ctrl_base;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 5280dba..6c7faac 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -148,7 +148,8 @@
 
 /*-------------------------------------------------------------------------*/
 
-#ifndef CONFIG_USB_TUSB6010
+#if !defined(CONFIG_USB_TUSB6010) && !defined(CONFIG_BLACKFIN)
+
 /*
  * Load an endpoint's FIFO
  */
@@ -1124,25 +1125,25 @@
 #endif
 	switch (cfg->style) {
 	case FIFO_TX:
-		musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
-		musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+		musb_write_txfifosz(mbase, c_size);
+		musb_write_txfifoadd(mbase, c_off);
 		hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
 		hw_ep->max_packet_sz_tx = maxpacket;
 		break;
 	case FIFO_RX:
-		musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
-		musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+		musb_write_rxfifosz(mbase, c_size);
+		musb_write_rxfifoadd(mbase, c_off);
 		hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
 		hw_ep->max_packet_sz_rx = maxpacket;
 		break;
 	case FIFO_RXTX:
-		musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
-		musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+		musb_write_txfifosz(mbase, c_size);
+		musb_write_txfifoadd(mbase, c_off);
 		hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
 		hw_ep->max_packet_sz_rx = maxpacket;
 
-		musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
-		musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+		musb_write_rxfifosz(mbase, c_size);
+		musb_write_rxfifoadd(mbase, c_off);
 		hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
 		hw_ep->max_packet_sz_tx = maxpacket;
 
@@ -1212,7 +1213,7 @@
 		if (epn >= musb->config->num_eps) {
 			pr_debug("%s: invalid ep %d\n",
 					musb_driver_name, epn);
-			continue;
+			return -EINVAL;
 		}
 		offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
 		if (offset < 0) {
@@ -1246,9 +1247,10 @@
  */
 static int __init ep_config_from_hw(struct musb *musb)
 {
-	u8 epnum = 0, reg;
+	u8 epnum = 0;
 	struct musb_hw_ep *hw_ep;
 	void *mbase = musb->mregs;
+	int ret = 0;
 
 	DBG(2, "<== static silicon ep config\n");
 
@@ -1258,26 +1260,9 @@
 		musb_ep_select(mbase, epnum);
 		hw_ep = musb->endpoints + epnum;
 
-		/* read from core using indexed model */
-		reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
-		if (!reg) {
-			/* 0's returned when no more endpoints */
+		ret = musb_read_fifosize(musb, hw_ep, epnum);
+		if (ret < 0)
 			break;
-		}
-		musb->nr_endpoints++;
-		musb->epmask |= (1 << epnum);
-
-		hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
-
-		/* shared TX/RX FIFO? */
-		if ((reg & 0xf0) == 0xf0) {
-			hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
-			hw_ep->is_shared_fifo = true;
-			continue;
-		} else {
-			hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
-			hw_ep->is_shared_fifo = false;
-		}
 
 		/* FIXME set up hw_ep->{rx,tx}_double_buffered */
 
@@ -1326,7 +1311,7 @@
 
 	/* log core options (read using indexed model) */
 	musb_ep_select(mbase, 0);
-	reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
+	reg = musb_read_configdata(mbase);
 
 	strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
 	if (reg & MUSB_CONFIGDATA_DYNFIFO)
@@ -1391,7 +1376,7 @@
 	}
 
 	/* log release info */
-	hwvers = musb_readw(mbase, MUSB_HWVERS);
+	hwvers = musb_read_hwvers(mbase);
 	rev_major = (hwvers >> 10) & 0x1f;
 	rev_minor = hwvers & 0x3ff;
 	snprintf(aRevision, 32, "%d.%d%s", rev_major,
@@ -1400,8 +1385,7 @@
 			musb_driver_name, type, aRevision, aDate);
 
 	/* configure ep0 */
-	musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
-	musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+	musb_configure_ep0(musb);
 
 	/* discover endpoint configuration */
 	musb->nr_endpoints = 1;
@@ -1445,7 +1429,7 @@
 
 		hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
-		hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase;
+		hw_ep->target_regs = musb_read_target_reg_base(i, mbase);
 		hw_ep->rx_reinit = 1;
 		hw_ep->tx_reinit = 1;
 #endif
@@ -1671,17 +1655,20 @@
 {
 	struct musb	*musb = dev_to_musb(dev);
 	unsigned long	flags;
+	int		status;
 
 	spin_lock_irqsave(&musb->lock, flags);
-	if (!strncmp(buf, "host", 4))
-		musb_platform_set_mode(musb, MUSB_HOST);
-	if (!strncmp(buf, "peripheral", 10))
-		musb_platform_set_mode(musb, MUSB_PERIPHERAL);
-	if (!strncmp(buf, "otg", 3))
-		musb_platform_set_mode(musb, MUSB_OTG);
+	if (sysfs_streq(buf, "host"))
+		status = musb_platform_set_mode(musb, MUSB_HOST);
+	else if (sysfs_streq(buf, "peripheral"))
+		status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
+	else if (sysfs_streq(buf, "otg"))
+		status = musb_platform_set_mode(musb, MUSB_OTG);
+	else
+		status = -EINVAL;
 	spin_unlock_irqrestore(&musb->lock, flags);
 
-	return n;
+	return (status == 0) ? n : status;
 }
 static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
 
@@ -1781,7 +1768,7 @@
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
 	struct usb_hcd	*hcd;
 
-	hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id);
+	hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
 	if (!hcd)
 		return NULL;
 	/* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
@@ -1810,7 +1797,6 @@
 	for (epnum = 0, ep = musb->endpoints;
 			epnum < musb->config->num_eps;
 			epnum++, ep++) {
-
 		ep->musb = musb;
 		ep->epnum = epnum;
 	}
@@ -1838,7 +1824,7 @@
 	musb_gadget_cleanup(musb);
 #endif
 
-	if (musb->nIrq >= 0) {
+	if (musb->nIrq >= 0 && musb->irq_wake) {
 		disable_irq_wake(musb->nIrq);
 		free_irq(musb->nIrq, musb);
 	}
@@ -1984,15 +1970,19 @@
 	INIT_WORK(&musb->irq_work, musb_irq_work);
 
 	/* attach to the IRQ */
-	if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) {
+	if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
 		dev_err(dev, "request_irq %d failed!\n", nIrq);
 		status = -ENODEV;
 		goto fail2;
 	}
 	musb->nIrq = nIrq;
 /* FIXME this handles wakeup irqs wrong */
-	if (enable_irq_wake(nIrq) == 0)
+	if (enable_irq_wake(nIrq) == 0) {
+		musb->irq_wake = 1;
 		device_init_wakeup(dev, 1);
+	} else {
+		musb->irq_wake = 0;
+	}
 
 	pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
 			musb_driver_name,
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 8222725..630946a 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -191,7 +191,7 @@
  */
 
 #if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
-		|| defined(CONFIG_ARCH_OMAP3430)
+		|| defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN)
 /* REVISIT indexed access seemed to
  * misbehave (on DaVinci) for at least peripheral IN ...
  */
@@ -359,6 +359,7 @@
 	struct otg_transceiver	xceiv;
 
 	int nIrq;
+	unsigned		irq_wake:1;
 
 	struct musb_hw_ep	 endpoints[MUSB_C_NUM_EPS];
 #define control_ep		endpoints
@@ -447,6 +448,70 @@
 }
 #endif
 
+#ifdef CONFIG_BLACKFIN
+static inline int musb_read_fifosize(struct musb *musb,
+		struct musb_hw_ep *hw_ep, u8 epnum)
+{
+	musb->nr_endpoints++;
+	musb->epmask |= (1 << epnum);
+
+	if (epnum < 5) {
+		hw_ep->max_packet_sz_tx = 128;
+		hw_ep->max_packet_sz_rx = 128;
+	} else {
+		hw_ep->max_packet_sz_tx = 1024;
+		hw_ep->max_packet_sz_rx = 1024;
+	}
+	hw_ep->is_shared_fifo = false;
+
+	return 0;
+}
+
+static inline void musb_configure_ep0(struct musb *musb)
+{
+	musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
+	musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+	musb->endpoints[0].is_shared_fifo = true;
+}
+
+#else
+
+static inline int musb_read_fifosize(struct musb *musb,
+		struct musb_hw_ep *hw_ep, u8 epnum)
+{
+	u8 reg = 0;
+
+	/* read from core using indexed model */
+	reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
+	/* 0's returned when no more endpoints */
+	if (!reg)
+		return -ENODEV;
+
+	musb->nr_endpoints++;
+	musb->epmask |= (1 << epnum);
+
+	hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
+
+	/* shared TX/RX FIFO? */
+	if ((reg & 0xf0) == 0xf0) {
+		hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
+		hw_ep->is_shared_fifo = true;
+		return 0;
+	} else {
+		hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
+		hw_ep->is_shared_fifo = false;
+	}
+
+	return 0;
+}
+
+static inline void musb_configure_ep0(struct musb *musb)
+{
+	musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
+	musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+}
+#endif /* CONFIG_BLACKFIN */
+
 
 /***************************** Glue it together *****************************/
 
@@ -467,16 +532,16 @@
 
 extern void musb_hnp_stop(struct musb *musb);
 
-extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode);
+extern int musb_platform_set_mode(struct musb *musb, u8 musb_mode);
 
-#if defined(CONFIG_USB_TUSB6010) || \
+#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) || \
 	defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
 extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
 #else
 #define musb_platform_try_idle(x, y)		do {} while (0)
 #endif
 
-#ifdef CONFIG_USB_TUSB6010
+#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN)
 extern int musb_platform_get_vbus_status(struct musb *musb);
 #else
 #define musb_platform_get_vbus_status(x)	0
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index d6a802c..6197dae 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1633,7 +1633,7 @@
 	musb->g.speed = USB_SPEED_UNKNOWN;
 
 	/* this "gadget" abstracts/virtualizes the controller */
-	strcpy(musb->g.dev.bus_id, "gadget");
+	dev_set_name(&musb->g.dev, "gadget");
 	musb->g.dev.parent = musb->controller;
 	musb->g.dev.dma_mask = musb->controller->dma_mask;
 	musb->g.dev.release = musb_gadget_release;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index cc64462..99fa612 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -112,18 +112,21 @@
 {
 	void __iomem	*epio = ep->regs;
 	u16		csr;
+	u16		lastcsr = 0;
 	int		retries = 1000;
 
 	csr = musb_readw(epio, MUSB_TXCSR);
 	while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
-		DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+		if (csr != lastcsr)
+			DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+		lastcsr = csr;
 		csr |= MUSB_TXCSR_FLUSHFIFO;
 		musb_writew(epio, MUSB_TXCSR, csr);
 		csr = musb_readw(epio, MUSB_TXCSR);
-		if (retries-- < 1) {
-			ERR("Could not flush host TX fifo: csr: %04x\n", csr);
+		if (WARN(retries-- < 1,
+				"Could not flush host TX%d fifo: csr: %04x\n",
+				ep->epnum, csr))
 			return;
-		}
 		mdelay(1);
 	}
 }
@@ -268,7 +271,7 @@
 __releases(musb->lock)
 __acquires(musb->lock)
 {
-	DBG(({ int level; switch (urb->status) {
+	DBG(({ int level; switch (status) {
 				case 0:
 					level = 4;
 					break;
@@ -283,8 +286,8 @@
 					level = 2;
 					break;
 				}; level; }),
-			"complete %p (%d), dev%d ep%d%s, %d/%d\n",
-			urb, urb->status,
+			"complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
+			urb, urb->complete, status,
 			usb_pipedevice(urb->pipe),
 			usb_pipeendpoint(urb->pipe),
 			usb_pipein(urb->pipe) ? "in" : "out",
@@ -593,12 +596,10 @@
 
 	/* target addr and (for multipoint) hub addr/port */
 	if (musb->is_multipoint) {
-		musb_writeb(ep->target_regs, MUSB_RXFUNCADDR,
-			qh->addr_reg);
-		musb_writeb(ep->target_regs, MUSB_RXHUBADDR,
-			qh->h_addr_reg);
-		musb_writeb(ep->target_regs, MUSB_RXHUBPORT,
-			qh->h_port_reg);
+		musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
+		musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
+		musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
+
 	} else
 		musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
 
@@ -712,15 +713,9 @@
 
 		/* target addr and (for multipoint) hub addr/port */
 		if (musb->is_multipoint) {
-			musb_writeb(mbase,
-				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
-				qh->addr_reg);
-			musb_writeb(mbase,
-				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
-				qh->h_addr_reg);
-			musb_writeb(mbase,
-				MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
-				qh->h_port_reg);
+			musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
+			musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
+			musb_write_txhubport(mbase, epnum, qh->h_port_reg);
 /* FIXME if !epnum, do the same for RX ... */
 		} else
 			musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
@@ -988,8 +983,10 @@
 		if (fifo_count) {
 			fifo_dest = (u8 *) (urb->transfer_buffer
 					+ urb->actual_length);
-			DBG(3, "Sending %d bytes to %p\n",
-					fifo_count, fifo_dest);
+			DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
+					fifo_count,
+					(fifo_count == 1) ? "" : "s",
+					fifo_dest);
 			musb_write_fifo(hw_ep, fifo_count, fifo_dest);
 
 			urb->actual_length += fifo_count;
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 223f0a5..b06e9ef 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -39,7 +39,7 @@
 
 #if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \
 	&& !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \
-	&& !defined(CONFIG_PPC64)
+	&& !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN)
 static inline void readsl(const void __iomem *addr, void *buf, int len)
 	{ insl((unsigned long)addr, buf, len); }
 static inline void readsw(const void __iomem *addr, void *buf, int len)
@@ -56,6 +56,8 @@
 
 #endif
 
+#ifndef CONFIG_BLACKFIN
+
 /* NOTE:  these offsets are all in bytes */
 
 static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
@@ -114,4 +116,26 @@
 
 #endif	/* CONFIG_USB_TUSB6010 */
 
+#else
+
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+	{ return (u8) (bfin_read16(addr + offset)); }
+
+static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
+	{ return bfin_read16(addr + offset); }
+
+static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
+	{ return (u32) (bfin_read16(addr + offset)); }
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+	{ bfin_write16(addr + offset, (u16) data); }
+
+static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
+	{ bfin_write16(addr + offset, data); }
+
+static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+	{ bfin_write16(addr + offset, (u16) data); }
+
+#endif /* CONFIG_BLACKFIN */
+
 #endif
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 9c22866..de3b2f1 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -38,97 +38,6 @@
 #define MUSB_EP0_FIFOSIZE	64	/* This is non-configurable */
 
 /*
- * Common USB registers
- */
-
-#define MUSB_FADDR		0x00	/* 8-bit */
-#define MUSB_POWER		0x01	/* 8-bit */
-
-#define MUSB_INTRTX		0x02	/* 16-bit */
-#define MUSB_INTRRX		0x04
-#define MUSB_INTRTXE		0x06
-#define MUSB_INTRRXE		0x08
-#define MUSB_INTRUSB		0x0A	/* 8 bit */
-#define MUSB_INTRUSBE		0x0B	/* 8 bit */
-#define MUSB_FRAME		0x0C
-#define MUSB_INDEX		0x0E	/* 8 bit */
-#define MUSB_TESTMODE		0x0F	/* 8 bit */
-
-/* Get offset for a given FIFO from musb->mregs */
-#ifdef	CONFIG_USB_TUSB6010
-#define MUSB_FIFO_OFFSET(epnum)	(0x200 + ((epnum) * 0x20))
-#else
-#define MUSB_FIFO_OFFSET(epnum)	(0x20 + ((epnum) * 4))
-#endif
-
-/*
- * Additional Control Registers
- */
-
-#define MUSB_DEVCTL		0x60	/* 8 bit */
-
-/* These are always controlled through the INDEX register */
-#define MUSB_TXFIFOSZ		0x62	/* 8-bit (see masks) */
-#define MUSB_RXFIFOSZ		0x63	/* 8-bit (see masks) */
-#define MUSB_TXFIFOADD		0x64	/* 16-bit offset shifted right 3 */
-#define MUSB_RXFIFOADD		0x66	/* 16-bit offset shifted right 3 */
-
-/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
-#define MUSB_HWVERS		0x6C	/* 8 bit */
-
-#define MUSB_EPINFO		0x78	/* 8 bit */
-#define MUSB_RAMINFO		0x79	/* 8 bit */
-#define MUSB_LINKINFO		0x7a	/* 8 bit */
-#define MUSB_VPLEN		0x7b	/* 8 bit */
-#define MUSB_HS_EOF1		0x7c	/* 8 bit */
-#define MUSB_FS_EOF1		0x7d	/* 8 bit */
-#define MUSB_LS_EOF1		0x7e	/* 8 bit */
-
-/* Offsets to endpoint registers */
-#define MUSB_TXMAXP		0x00
-#define MUSB_TXCSR		0x02
-#define MUSB_CSR0		MUSB_TXCSR	/* Re-used for EP0 */
-#define MUSB_RXMAXP		0x04
-#define MUSB_RXCSR		0x06
-#define MUSB_RXCOUNT		0x08
-#define MUSB_COUNT0		MUSB_RXCOUNT	/* Re-used for EP0 */
-#define MUSB_TXTYPE		0x0A
-#define MUSB_TYPE0		MUSB_TXTYPE	/* Re-used for EP0 */
-#define MUSB_TXINTERVAL		0x0B
-#define MUSB_NAKLIMIT0		MUSB_TXINTERVAL	/* Re-used for EP0 */
-#define MUSB_RXTYPE		0x0C
-#define MUSB_RXINTERVAL		0x0D
-#define MUSB_FIFOSIZE		0x0F
-#define MUSB_CONFIGDATA		MUSB_FIFOSIZE	/* Re-used for EP0 */
-
-/* Offsets to endpoint registers in indexed model (using INDEX register) */
-#define MUSB_INDEXED_OFFSET(_epnum, _offset)	\
-	(0x10 + (_offset))
-
-/* Offsets to endpoint registers in flat models */
-#define MUSB_FLAT_OFFSET(_epnum, _offset)	\
-	(0x100 + (0x10*(_epnum)) + (_offset))
-
-#ifdef CONFIG_USB_TUSB6010
-/* TUSB6010 EP0 configuration register is special */
-#define MUSB_TUSB_OFFSET(_epnum, _offset)	\
-	(0x10 + _offset)
-#include "tusb6010.h"		/* Needed "only" for TUSB_EP0_CONF */
-#endif
-
-/* "bus control"/target registers, for host side multipoint (external hubs) */
-#define MUSB_TXFUNCADDR		0x00
-#define MUSB_TXHUBADDR		0x02
-#define MUSB_TXHUBPORT		0x03
-
-#define MUSB_RXFUNCADDR		0x04
-#define MUSB_RXHUBADDR		0x06
-#define MUSB_RXHUBPORT		0x07
-
-#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \
-	(0x80 + (8*(_epnum)) + (_offset))
-
-/*
  * MUSB Register bits
  */
 
@@ -228,7 +137,6 @@
 
 /* TXCSR in Peripheral and Host mode */
 #define MUSB_TXCSR_AUTOSET		0x8000
-#define MUSB_TXCSR_MODE			0x2000
 #define MUSB_TXCSR_DMAENAB		0x1000
 #define MUSB_TXCSR_FRCDATATOG		0x0800
 #define MUSB_TXCSR_DMAMODE		0x0400
@@ -297,4 +205,309 @@
 /* HUBADDR */
 #define MUSB_HUBADDR_MULTI_TT		0x80
 
+
+#ifndef CONFIG_BLACKFIN
+
+/*
+ * Common USB registers
+ */
+
+#define MUSB_FADDR		0x00	/* 8-bit */
+#define MUSB_POWER		0x01	/* 8-bit */
+
+#define MUSB_INTRTX		0x02	/* 16-bit */
+#define MUSB_INTRRX		0x04
+#define MUSB_INTRTXE		0x06
+#define MUSB_INTRRXE		0x08
+#define MUSB_INTRUSB		0x0A	/* 8 bit */
+#define MUSB_INTRUSBE		0x0B	/* 8 bit */
+#define MUSB_FRAME		0x0C
+#define MUSB_INDEX		0x0E	/* 8 bit */
+#define MUSB_TESTMODE		0x0F	/* 8 bit */
+
+/* Get offset for a given FIFO from musb->mregs */
+#ifdef	CONFIG_USB_TUSB6010
+#define MUSB_FIFO_OFFSET(epnum)	(0x200 + ((epnum) * 0x20))
+#else
+#define MUSB_FIFO_OFFSET(epnum)	(0x20 + ((epnum) * 4))
+#endif
+
+/*
+ * Additional Control Registers
+ */
+
+#define MUSB_DEVCTL		0x60	/* 8 bit */
+
+/* These are always controlled through the INDEX register */
+#define MUSB_TXFIFOSZ		0x62	/* 8-bit (see masks) */
+#define MUSB_RXFIFOSZ		0x63	/* 8-bit (see masks) */
+#define MUSB_TXFIFOADD		0x64	/* 16-bit offset shifted right 3 */
+#define MUSB_RXFIFOADD		0x66	/* 16-bit offset shifted right 3 */
+
+/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
+#define MUSB_HWVERS		0x6C	/* 8 bit */
+
+#define MUSB_EPINFO		0x78	/* 8 bit */
+#define MUSB_RAMINFO		0x79	/* 8 bit */
+#define MUSB_LINKINFO		0x7a	/* 8 bit */
+#define MUSB_VPLEN		0x7b	/* 8 bit */
+#define MUSB_HS_EOF1		0x7c	/* 8 bit */
+#define MUSB_FS_EOF1		0x7d	/* 8 bit */
+#define MUSB_LS_EOF1		0x7e	/* 8 bit */
+
+/* Offsets to endpoint registers */
+#define MUSB_TXMAXP		0x00
+#define MUSB_TXCSR		0x02
+#define MUSB_CSR0		MUSB_TXCSR	/* Re-used for EP0 */
+#define MUSB_RXMAXP		0x04
+#define MUSB_RXCSR		0x06
+#define MUSB_RXCOUNT		0x08
+#define MUSB_COUNT0		MUSB_RXCOUNT	/* Re-used for EP0 */
+#define MUSB_TXTYPE		0x0A
+#define MUSB_TYPE0		MUSB_TXTYPE	/* Re-used for EP0 */
+#define MUSB_TXINTERVAL		0x0B
+#define MUSB_NAKLIMIT0		MUSB_TXINTERVAL	/* Re-used for EP0 */
+#define MUSB_RXTYPE		0x0C
+#define MUSB_RXINTERVAL		0x0D
+#define MUSB_FIFOSIZE		0x0F
+#define MUSB_CONFIGDATA		MUSB_FIFOSIZE	/* Re-used for EP0 */
+
+/* Offsets to endpoint registers in indexed model (using INDEX register) */
+#define MUSB_INDEXED_OFFSET(_epnum, _offset)	\
+	(0x10 + (_offset))
+
+/* Offsets to endpoint registers in flat models */
+#define MUSB_FLAT_OFFSET(_epnum, _offset)	\
+	(0x100 + (0x10*(_epnum)) + (_offset))
+
+#ifdef CONFIG_USB_TUSB6010
+/* TUSB6010 EP0 configuration register is special */
+#define MUSB_TUSB_OFFSET(_epnum, _offset)	\
+	(0x10 + _offset)
+#include "tusb6010.h"		/* Needed "only" for TUSB_EP0_CONF */
+#endif
+
+#define MUSB_TXCSR_MODE			0x2000
+
+/* "bus control"/target registers, for host side multipoint (external hubs) */
+#define MUSB_TXFUNCADDR		0x00
+#define MUSB_TXHUBADDR		0x02
+#define MUSB_TXHUBPORT		0x03
+
+#define MUSB_RXFUNCADDR		0x04
+#define MUSB_RXHUBADDR		0x06
+#define MUSB_RXHUBPORT		0x07
+
+#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \
+	(0x80 + (8*(_epnum)) + (_offset))
+
+static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size)
+{
+	musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+}
+
+static inline void musb_write_txfifoadd(void __iomem *mbase, u16 c_off)
+{
+	musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+}
+
+static inline void musb_write_rxfifosz(void __iomem *mbase, u8 c_size)
+{
+	musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+}
+
+static inline void  musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
+{
+	musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+}
+
+static inline u8 musb_read_configdata(void __iomem *mbase)
+{
+	return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
+}
+
+static inline u16 musb_read_hwvers(void __iomem *mbase)
+{
+	return musb_readw(mbase, MUSB_HWVERS);
+}
+
+static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
+{
+	return (MUSB_BUSCTL_OFFSET(i, 0) + mbase);
+}
+
+static inline void musb_write_rxfunaddr(void __iomem *ep_target_regs,
+		u8 qh_addr_reg)
+{
+	musb_writeb(ep_target_regs, MUSB_RXFUNCADDR, qh_addr_reg);
+}
+
+static inline void musb_write_rxhubaddr(void __iomem *ep_target_regs,
+		u8 qh_h_addr_reg)
+{
+	musb_writeb(ep_target_regs, MUSB_RXHUBADDR, qh_h_addr_reg);
+}
+
+static inline void musb_write_rxhubport(void __iomem *ep_target_regs,
+		u8 qh_h_port_reg)
+{
+	musb_writeb(ep_target_regs, MUSB_RXHUBPORT, qh_h_port_reg);
+}
+
+static inline void  musb_write_txfunaddr(void __iomem *mbase, u8 epnum,
+		u8 qh_addr_reg)
+{
+	musb_writeb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
+			qh_addr_reg);
+}
+
+static inline void  musb_write_txhubaddr(void __iomem *mbase, u8 epnum,
+		u8 qh_addr_reg)
+{
+	musb_writeb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
+			qh_addr_reg);
+}
+
+static inline void  musb_write_txhubport(void __iomem *mbase, u8 epnum,
+		u8 qh_h_port_reg)
+{
+	musb_writeb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
+			qh_h_port_reg);
+}
+
+#else /* CONFIG_BLACKFIN */
+
+#define USB_BASE		USB_FADDR
+#define USB_OFFSET(reg)		(reg - USB_BASE)
+
+/*
+ * Common USB registers
+ */
+#define MUSB_FADDR		USB_OFFSET(USB_FADDR)	/* 8-bit */
+#define MUSB_POWER		USB_OFFSET(USB_POWER)	/* 8-bit */
+#define MUSB_INTRTX		USB_OFFSET(USB_INTRTX)	/* 16-bit */
+#define MUSB_INTRRX		USB_OFFSET(USB_INTRRX)
+#define MUSB_INTRTXE		USB_OFFSET(USB_INTRTXE)
+#define MUSB_INTRRXE		USB_OFFSET(USB_INTRRXE)
+#define MUSB_INTRUSB		USB_OFFSET(USB_INTRUSB)	/* 8 bit */
+#define MUSB_INTRUSBE		USB_OFFSET(USB_INTRUSBE)/* 8 bit */
+#define MUSB_FRAME		USB_OFFSET(USB_FRAME)
+#define MUSB_INDEX		USB_OFFSET(USB_INDEX)	/* 8 bit */
+#define MUSB_TESTMODE		USB_OFFSET(USB_TESTMODE)/* 8 bit */
+
+/* Get offset for a given FIFO from musb->mregs */
+#define MUSB_FIFO_OFFSET(epnum)	\
+	(USB_OFFSET(USB_EP0_FIFO) + ((epnum) * 8))
+
+/*
+ * Additional Control Registers
+ */
+
+#define MUSB_DEVCTL		USB_OFFSET(USB_OTG_DEV_CTL)	/* 8 bit */
+
+#define MUSB_LINKINFO		USB_OFFSET(USB_LINKINFO)/* 8 bit */
+#define MUSB_VPLEN		USB_OFFSET(USB_VPLEN)	/* 8 bit */
+#define MUSB_HS_EOF1		USB_OFFSET(USB_HS_EOF1)	/* 8 bit */
+#define MUSB_FS_EOF1		USB_OFFSET(USB_FS_EOF1)	/* 8 bit */
+#define MUSB_LS_EOF1		USB_OFFSET(USB_LS_EOF1)	/* 8 bit */
+
+/* Offsets to endpoint registers */
+#define MUSB_TXMAXP		0x00
+#define MUSB_TXCSR		0x04
+#define MUSB_CSR0		MUSB_TXCSR	/* Re-used for EP0 */
+#define MUSB_RXMAXP		0x08
+#define MUSB_RXCSR		0x0C
+#define MUSB_RXCOUNT		0x10
+#define MUSB_COUNT0		MUSB_RXCOUNT	/* Re-used for EP0 */
+#define MUSB_TXTYPE		0x14
+#define MUSB_TYPE0		MUSB_TXTYPE	/* Re-used for EP0 */
+#define MUSB_TXINTERVAL		0x18
+#define MUSB_NAKLIMIT0		MUSB_TXINTERVAL	/* Re-used for EP0 */
+#define MUSB_RXTYPE		0x1C
+#define MUSB_RXINTERVAL		0x20
+#define MUSB_TXCOUNT		0x28
+
+/* Offsets to endpoint registers in indexed model (using INDEX register) */
+#define MUSB_INDEXED_OFFSET(_epnum, _offset)	\
+	(0x40 + (_offset))
+
+/* Offsets to endpoint registers in flat models */
+#define MUSB_FLAT_OFFSET(_epnum, _offset)	\
+	(USB_OFFSET(USB_EP_NI0_TXMAXP) + (0x40 * (_epnum)) + (_offset))
+
+/* Not implemented - HW has seperate Tx/Rx FIFO */
+#define MUSB_TXCSR_MODE			0x0000
+
+/*
+ * Dummy stub for clk framework, it will be removed
+ * until Blackfin supports clk framework
+ */
+#define clk_get(dev, id)	NULL
+#define clk_put(clock)		do {} while (0)
+#define clk_enable(clock)	do {} while (0)
+#define clk_disable(clock)	do {} while (0)
+
+static inline void musb_write_txfifosz(void __iomem *mbase, u8 c_size)
+{
+}
+
+static inline void musb_write_txfifoadd(void __iomem *mbase, u16 c_off)
+{
+}
+
+static inline void musb_write_rxfifosz(void __iomem *mbase, u8 c_size)
+{
+}
+
+static inline void  musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
+{
+}
+
+static inline u8 musb_read_configdata(void __iomem *mbase)
+{
+	return 0;
+}
+
+static inline u16 musb_read_hwvers(void __iomem *mbase)
+{
+	return 0;
+}
+
+static inline u16 musb_read_target_reg_base(u8 i, void __iomem *mbase)
+{
+	return 0;
+}
+
+static inline void musb_write_rxfunaddr(void __iomem *ep_target_regs,
+		u8 qh_addr_req)
+{
+}
+
+static inline void musb_write_rxhubaddr(void __iomem *ep_target_regs,
+		u8 qh_h_addr_reg)
+{
+}
+
+static inline void musb_write_rxhubport(void __iomem *ep_target_regs,
+		u8 qh_h_port_reg)
+{
+}
+
+static inline void  musb_write_txfunaddr(void __iomem *mbase, u8 epnum,
+		u8 qh_addr_reg)
+{
+}
+
+static inline void  musb_write_txhubaddr(void __iomem *mbase, u8 epnum,
+		u8 qh_addr_reg)
+{
+}
+
+static inline void  musb_write_txhubport(void __iomem *mbase, u8 epnum,
+		u8 qh_h_port_reg)
+{
+}
+
+#endif /* CONFIG_BLACKFIN */
+
 #endif	/* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 8c734ef..8662e9e 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -34,58 +34,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include "musb_core.h"
-
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
-#include "omap2430.h"
-#endif
-
-#define MUSB_HSDMA_BASE		0x200
-#define MUSB_HSDMA_INTR		(MUSB_HSDMA_BASE + 0)
-#define MUSB_HSDMA_CONTROL		0x4
-#define MUSB_HSDMA_ADDRESS		0x8
-#define MUSB_HSDMA_COUNT		0xc
-
-#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset)		\
-		(MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
-
-/* control register (16-bit): */
-#define MUSB_HSDMA_ENABLE_SHIFT		0
-#define MUSB_HSDMA_TRANSMIT_SHIFT		1
-#define MUSB_HSDMA_MODE1_SHIFT		2
-#define MUSB_HSDMA_IRQENABLE_SHIFT		3
-#define MUSB_HSDMA_ENDPOINT_SHIFT		4
-#define MUSB_HSDMA_BUSERROR_SHIFT		8
-#define MUSB_HSDMA_BURSTMODE_SHIFT		9
-#define MUSB_HSDMA_BURSTMODE		(3 << MUSB_HSDMA_BURSTMODE_SHIFT)
-#define MUSB_HSDMA_BURSTMODE_UNSPEC	0
-#define MUSB_HSDMA_BURSTMODE_INCR4	1
-#define MUSB_HSDMA_BURSTMODE_INCR8	2
-#define MUSB_HSDMA_BURSTMODE_INCR16	3
-
-#define MUSB_HSDMA_CHANNELS		8
-
-struct musb_dma_controller;
-
-struct musb_dma_channel {
-	struct dma_channel		channel;
-	struct musb_dma_controller	*controller;
-	u32				start_addr;
-	u32				len;
-	u16				max_packet_sz;
-	u8				idx;
-	u8				epnum;
-	u8				transmit;
-};
-
-struct musb_dma_controller {
-	struct dma_controller		controller;
-	struct musb_dma_channel		channel[MUSB_HSDMA_CHANNELS];
-	void				*private_data;
-	void __iomem			*base;
-	u8				channel_count;
-	u8				used_channels;
-	u8				irq;
-};
+#include "musbhsdma.h"
 
 static int dma_controller_start(struct dma_controller *c)
 {
@@ -203,12 +152,8 @@
 				: 0);
 
 	/* address/count */
-	musb_writel(mbase,
-		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS),
-		dma_addr);
-	musb_writel(mbase,
-		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT),
-		len);
+	musb_write_hsdma_addr(mbase, bchannel, dma_addr);
+	musb_write_hsdma_count(mbase, bchannel, len);
 
 	/* control (this should start things) */
 	musb_writew(mbase,
@@ -279,13 +224,8 @@
 		musb_writew(mbase,
 			MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
 			0);
-		musb_writel(mbase,
-			MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS),
-			0);
-		musb_writel(mbase,
-			MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT),
-			0);
-
+		musb_write_hsdma_addr(mbase, bchannel, 0);
+		musb_write_hsdma_count(mbase, bchannel, 0);
 		channel->status = MUSB_DMA_STATUS_FREE;
 	}
 
@@ -333,10 +273,8 @@
 			} else {
 				u8 devctl;
 
-				addr = musb_readl(mbase,
-						MUSB_HSDMA_CHANNEL_OFFSET(
-							bchannel,
-							MUSB_HSDMA_ADDRESS));
+				addr = musb_read_hsdma_addr(mbase,
+						bchannel);
 				channel->actual_len = addr
 					- musb_channel->start_addr;
 
@@ -375,6 +313,12 @@
 			}
 		}
 	}
+
+#ifdef CONFIG_BLACKFIN
+	/* Clear DMA interrup flags */
+	musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
+#endif
+
 	retval = IRQ_HANDLED;
 done:
 	spin_unlock_irqrestore(&musb->lock, flags);
@@ -424,7 +368,7 @@
 	controller->controller.channel_abort = dma_channel_abort;
 
 	if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
-			musb->controller->bus_id, &controller->controller)) {
+			dev_name(musb->controller), &controller->controller)) {
 		dev_err(dev, "request_irq %d failed!\n", irq);
 		dma_controller_destroy(&controller->controller);
 
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
new file mode 100644
index 0000000..1299d92
--- /dev/null
+++ b/drivers/usb/musb/musbhsdma.h
@@ -0,0 +1,149 @@
+/*
+ * MUSB OTG driver - support for Mentor's DMA controller
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2007 by Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include "omap2430.h"
+#endif
+
+#ifndef CONFIG_BLACKFIN
+
+#define MUSB_HSDMA_BASE		0x200
+#define MUSB_HSDMA_INTR		(MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL		0x4
+#define MUSB_HSDMA_ADDRESS		0x8
+#define MUSB_HSDMA_COUNT		0xc
+
+#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset)		\
+		(MUSB_HSDMA_BASE + (_bchannel << 4) + _offset)
+
+#define musb_read_hsdma_addr(mbase, bchannel)	\
+	musb_readl(mbase,	\
+		   MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS))
+
+#define musb_write_hsdma_addr(mbase, bchannel, addr) \
+	musb_writel(mbase, \
+		    MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \
+		    addr)
+
+#define musb_write_hsdma_count(mbase, bchannel, len) \
+	musb_writel(mbase, \
+		    MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \
+		    len)
+#else
+
+#define MUSB_HSDMA_BASE		0x400
+#define MUSB_HSDMA_INTR		(MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL		0x04
+#define MUSB_HSDMA_ADDR_LOW		0x08
+#define MUSB_HSDMA_ADDR_HIGH		0x0C
+#define MUSB_HSDMA_COUNT_LOW		0x10
+#define MUSB_HSDMA_COUNT_HIGH		0x14
+
+#define MUSB_HSDMA_CHANNEL_OFFSET(_bchannel, _offset)		\
+		(MUSB_HSDMA_BASE + (_bchannel * 0x20) + _offset)
+
+static inline u32 musb_read_hsdma_addr(void __iomem *mbase, u8 bchannel)
+{
+	u32 addr = musb_readw(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH));
+
+	addr = addr << 16;
+
+	addr |= musb_readw(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW));
+
+	return addr;
+}
+
+static inline void musb_write_hsdma_addr(void __iomem *mbase,
+				u8 bchannel, dma_addr_t dma_addr)
+{
+	musb_writew(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
+		((u16)((u32) dma_addr & 0xFFFF)));
+	musb_writew(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
+		((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
+}
+
+static inline void musb_write_hsdma_count(void __iomem *mbase,
+				u8 bchannel, u32 len)
+{
+	musb_writew(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),
+		((u16)((u32) len & 0xFFFF)));
+	musb_writew(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
+		((u16)(((u32) len >> 16) & 0xFFFF)));
+}
+
+#endif /* CONFIG_BLACKFIN */
+
+/* control register (16-bit): */
+#define MUSB_HSDMA_ENABLE_SHIFT		0
+#define MUSB_HSDMA_TRANSMIT_SHIFT	1
+#define MUSB_HSDMA_MODE1_SHIFT		2
+#define MUSB_HSDMA_IRQENABLE_SHIFT	3
+#define MUSB_HSDMA_ENDPOINT_SHIFT	4
+#define MUSB_HSDMA_BUSERROR_SHIFT	8
+#define MUSB_HSDMA_BURSTMODE_SHIFT	9
+#define MUSB_HSDMA_BURSTMODE		(3 << MUSB_HSDMA_BURSTMODE_SHIFT)
+#define MUSB_HSDMA_BURSTMODE_UNSPEC	0
+#define MUSB_HSDMA_BURSTMODE_INCR4	1
+#define MUSB_HSDMA_BURSTMODE_INCR8	2
+#define MUSB_HSDMA_BURSTMODE_INCR16	3
+
+#define MUSB_HSDMA_CHANNELS		8
+
+struct musb_dma_controller;
+
+struct musb_dma_channel {
+	struct dma_channel		channel;
+	struct musb_dma_controller	*controller;
+	u32				start_addr;
+	u32				len;
+	u16				max_packet_sz;
+	u8				idx;
+	u8				epnum;
+	u8				transmit;
+};
+
+struct musb_dma_controller {
+	struct dma_controller		controller;
+	struct musb_dma_channel		channel[MUSB_HSDMA_CHANNELS];
+	void				*private_data;
+	void __iomem			*base;
+	u8				channel_count;
+	u8				used_channels;
+	u8				irq;
+};
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index ce6c162..901dffd 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -58,10 +58,10 @@
 #endif
 	u8	devctl;
 
-	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
 	spin_lock_irqsave(&musb->lock, flags);
 
+	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
 	switch (musb->xceiv.state) {
 	case OTG_STATE_A_WAIT_BCON:
 		devctl &= ~MUSB_DEVCTL_SESSION;
@@ -196,7 +196,7 @@
 
 static int musb_platform_resume(struct musb *musb);
 
-void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
 {
 	u8	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
 
@@ -204,15 +204,24 @@
 	musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
 
 	switch (musb_mode) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
 	case MUSB_HOST:
 		otg_set_host(&musb->xceiv, musb->xceiv.host);
 		break;
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
 	case MUSB_PERIPHERAL:
 		otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
 		break;
+#endif
+#ifdef CONFIG_USB_MUSB_OTG
 	case MUSB_OTG:
 		break;
+#endif
+	default:
+		return -EINVAL;
 	}
+	return 0;
 }
 
 int __init musb_platform_init(struct musb *musb)
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index ee8fca9..9e20fd0 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -598,7 +598,7 @@
  * and peripheral modes in non-OTG configurations by reconfiguring hardware
  * and then setting musb->board_mode. For now, only support OTG mode.
  */
-void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
 {
 	void __iomem	*tbase = musb->ctrl_base;
 	u32		otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
@@ -641,7 +641,8 @@
 #endif
 
 	default:
-		DBG(2, "Trying to set unknown mode %i\n", musb_mode);
+		DBG(2, "Trying to set mode %i\n", musb_mode);
+		return -EINVAL;
 	}
 
 	musb_writel(tbase, TUSB_PHY_OTG_CTRL,
@@ -655,6 +656,8 @@
 		!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
 			INFO("Cannot be peripheral with mini-A cable "
 			"otg_stat: %08x\n", otg_stat);
+
+	return 0;
 }
 
 static inline unsigned long
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
new file mode 100644
index 0000000..8e8dbdb
--- /dev/null
+++ b/drivers/usb/otg/Kconfig
@@ -0,0 +1,54 @@
+#
+# USB OTG infrastructure may be needed for peripheral-only, host-only,
+# or OTG-capable configurations when OTG transceivers or controllers
+# are used.
+#
+
+comment "OTG and related infrastructure"
+
+if USB || USB_GADGET
+
+config USB_OTG_UTILS
+	bool
+	help
+	  Select this to make sure the build includes objects from
+	  the OTG infrastructure directory.
+
+#
+# USB Transceiver Drivers
+#
+config USB_GPIO_VBUS
+	tristate "GPIO based peripheral-only VBUS sensing 'transceiver'"
+	depends on GENERIC_GPIO
+	select USB_OTG_UTILS
+	help
+	  Provides simple GPIO VBUS sensing for controllers with an
+	  internal transceiver via the otg_transceiver interface, and
+	  optionally control of a D+ pullup GPIO as well as a VBUS
+	  current limit regulator.
+
+config ISP1301_OMAP
+	tristate "Philips ISP1301 with OMAP OTG"
+	depends on I2C && ARCH_OMAP_OTG
+	select USB_OTG_UTILS
+	help
+	  If you say yes here you get support for the Philips ISP1301
+	  USB-On-The-Go transceiver working with the OMAP OTG controller.
+	  The ISP1301 is a full speed USB  transceiver which is used in
+	  products including H2, H3, and H4 development boards for Texas
+	  Instruments OMAP processors.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called isp1301_omap.
+
+config TWL4030_USB
+	tristate "TWL4030 USB Transceiver Driver"
+	depends on TWL4030_CORE
+	select USB_OTG_UTILS
+	help
+	  Enable this to support the USB OTG transceiver on TWL4030
+	  family chips (including the TWL5030 and TPS659x0 devices).
+	  This transceiver supports high and full speed devices plus,
+	  in host mode, low speed.
+
+endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
new file mode 100644
index 0000000..d73c7cf
--- /dev/null
+++ b/drivers/usb/otg/Makefile
@@ -0,0 +1,15 @@
+#
+# OTG infrastructure and transceiver drivers
+#
+
+# infrastructure
+obj-$(CONFIG_USB_OTG_UTILS)	+= otg.o
+
+# transceiver drivers
+obj-$(CONFIG_USB_GPIO_VBUS)	+= gpio_vbus.o
+obj-$(CONFIG_ISP1301_OMAP)	+= isp1301_omap.o
+obj-$(CONFIG_TWL4030_USB)	+= twl4030-usb.o
+
+ccflags-$(CONFIG_USB_DEBUG)	+= -DDEBUG
+ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG
+
diff --git a/drivers/usb/otg/gpio_vbus.c b/drivers/usb/otg/gpio_vbus.c
new file mode 100644
index 0000000..63a6036
--- /dev/null
+++ b/drivers/usb/otg/gpio_vbus.c
@@ -0,0 +1,335 @@
+/*
+ * gpio-vbus.c - simple GPIO VBUS sensing driver for B peripheral devices
+ *
+ * Copyright (c) 2008 Philipp Zabel <philipp.zabel@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+
+#include <linux/regulator/consumer.h>
+
+#include <linux/usb/gadget.h>
+#include <linux/usb/gpio_vbus.h>
+#include <linux/usb/otg.h>
+
+
+/*
+ * A simple GPIO VBUS sensing driver for B peripheral only devices
+ * with internal transceivers. It can control a D+ pullup GPIO and
+ * a regulator to limit the current drawn from VBUS.
+ *
+ * Needs to be loaded before the UDC driver that will use it.
+ */
+struct gpio_vbus_data {
+	struct otg_transceiver otg;
+	struct device          *dev;
+	struct regulator       *vbus_draw;
+	int			vbus_draw_enabled;
+	unsigned		mA;
+};
+
+
+/*
+ * This driver relies on "both edges" triggering.  VBUS has 100 msec to
+ * stabilize, so the peripheral controller driver may need to cope with
+ * some bouncing due to current surges (e.g. charging local capacitance)
+ * and contact chatter.
+ *
+ * REVISIT in desperate straits, toggling between rising and falling
+ * edges might be workable.
+ */
+#define VBUS_IRQ_FLAGS \
+	( IRQF_SAMPLE_RANDOM | IRQF_SHARED \
+	| IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING )
+
+
+/* interface to regulator framework */
+static void set_vbus_draw(struct gpio_vbus_data *gpio_vbus, unsigned mA)
+{
+	struct regulator *vbus_draw = gpio_vbus->vbus_draw;
+	int enabled;
+
+	if (!vbus_draw)
+		return;
+
+	enabled = gpio_vbus->vbus_draw_enabled;
+	if (mA) {
+		regulator_set_current_limit(vbus_draw, 0, 1000 * mA);
+		if (!enabled) {
+			regulator_enable(vbus_draw);
+			gpio_vbus->vbus_draw_enabled = 1;
+		}
+	} else {
+		if (enabled) {
+			regulator_disable(vbus_draw);
+			gpio_vbus->vbus_draw_enabled = 0;
+		}
+	}
+	gpio_vbus->mA = mA;
+}
+
+/* VBUS change IRQ handler */
+static irqreturn_t gpio_vbus_irq(int irq, void *data)
+{
+	struct platform_device *pdev = data;
+	struct gpio_vbus_mach_info *pdata = pdev->dev.platform_data;
+	struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
+	int gpio, vbus;
+
+	vbus = gpio_get_value(pdata->gpio_vbus);
+	if (pdata->gpio_vbus_inverted)
+		vbus = !vbus;
+
+	dev_dbg(&pdev->dev, "VBUS %s (gadget: %s)\n",
+		vbus ? "supplied" : "inactive",
+		gpio_vbus->otg.gadget ? gpio_vbus->otg.gadget->name : "none");
+
+	if (!gpio_vbus->otg.gadget)
+		return IRQ_HANDLED;
+
+	/* Peripheral controllers which manage the pullup themselves won't have
+	 * gpio_pullup configured here.  If it's configured here, we'll do what
+	 * isp1301_omap::b_peripheral() does and enable the pullup here... although
+	 * that may complicate usb_gadget_{,dis}connect() support.
+	 */
+	gpio = pdata->gpio_pullup;
+	if (vbus) {
+		gpio_vbus->otg.state = OTG_STATE_B_PERIPHERAL;
+		usb_gadget_vbus_connect(gpio_vbus->otg.gadget);
+
+		/* drawing a "unit load" is *always* OK, except for OTG */
+		set_vbus_draw(gpio_vbus, 100);
+
+		/* optionally enable D+ pullup */
+		if (gpio_is_valid(gpio))
+			gpio_set_value(gpio, !pdata->gpio_pullup_inverted);
+	} else {
+		/* optionally disable D+ pullup */
+		if (gpio_is_valid(gpio))
+			gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+
+		set_vbus_draw(gpio_vbus, 0);
+
+		usb_gadget_vbus_disconnect(gpio_vbus->otg.gadget);
+		gpio_vbus->otg.state = OTG_STATE_B_IDLE;
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* OTG transceiver interface */
+
+/* bind/unbind the peripheral controller */
+static int gpio_vbus_set_peripheral(struct otg_transceiver *otg,
+				struct usb_gadget *gadget)
+{
+	struct gpio_vbus_data *gpio_vbus;
+	struct gpio_vbus_mach_info *pdata;
+	struct platform_device *pdev;
+	int gpio, irq;
+
+	gpio_vbus = container_of(otg, struct gpio_vbus_data, otg);
+	pdev = to_platform_device(gpio_vbus->dev);
+	pdata = gpio_vbus->dev->platform_data;
+	irq = gpio_to_irq(pdata->gpio_vbus);
+	gpio = pdata->gpio_pullup;
+
+	if (!gadget) {
+		dev_dbg(&pdev->dev, "unregistering gadget '%s'\n",
+			otg->gadget->name);
+
+		/* optionally disable D+ pullup */
+		if (gpio_is_valid(gpio))
+			gpio_set_value(gpio, pdata->gpio_pullup_inverted);
+
+		set_vbus_draw(gpio_vbus, 0);
+
+		usb_gadget_vbus_disconnect(otg->gadget);
+		otg->state = OTG_STATE_UNDEFINED;
+
+		otg->gadget = NULL;
+		return 0;
+	}
+
+	otg->gadget = gadget;
+	dev_dbg(&pdev->dev, "registered gadget '%s'\n", gadget->name);
+
+	/* initialize connection state */
+	gpio_vbus_irq(irq, pdev);
+	return 0;
+}
+
+/* effective for B devices, ignored for A-peripheral */
+static int gpio_vbus_set_power(struct otg_transceiver *otg, unsigned mA)
+{
+	struct gpio_vbus_data *gpio_vbus;
+
+	gpio_vbus = container_of(otg, struct gpio_vbus_data, otg);
+
+	if (otg->state == OTG_STATE_B_PERIPHERAL)
+		set_vbus_draw(gpio_vbus, mA);
+	return 0;
+}
+
+/* for non-OTG B devices: set/clear transceiver suspend mode */
+static int gpio_vbus_set_suspend(struct otg_transceiver *otg, int suspend)
+{
+	struct gpio_vbus_data *gpio_vbus;
+
+	gpio_vbus = container_of(otg, struct gpio_vbus_data, otg);
+
+	/* draw max 0 mA from vbus in suspend mode; or the previously
+	 * recorded amount of current if not suspended
+	 *
+	 * NOTE: high powered configs (mA > 100) may draw up to 2.5 mA
+	 * if they're wake-enabled ... we don't handle that yet.
+	 */
+	return gpio_vbus_set_power(otg, suspend ? 0 : gpio_vbus->mA);
+}
+
+/* platform driver interface */
+
+static int __init gpio_vbus_probe(struct platform_device *pdev)
+{
+	struct gpio_vbus_mach_info *pdata = pdev->dev.platform_data;
+	struct gpio_vbus_data *gpio_vbus;
+	struct resource *res;
+	int err, gpio, irq;
+
+	if (!pdata || !gpio_is_valid(pdata->gpio_vbus))
+		return -EINVAL;
+	gpio = pdata->gpio_vbus;
+
+	gpio_vbus = kzalloc(sizeof(struct gpio_vbus_data), GFP_KERNEL);
+	if (!gpio_vbus)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, gpio_vbus);
+	gpio_vbus->dev = &pdev->dev;
+	gpio_vbus->otg.label = "gpio-vbus";
+	gpio_vbus->otg.state = OTG_STATE_UNDEFINED;
+	gpio_vbus->otg.set_peripheral = gpio_vbus_set_peripheral;
+	gpio_vbus->otg.set_power = gpio_vbus_set_power;
+	gpio_vbus->otg.set_suspend = gpio_vbus_set_suspend;
+
+	err = gpio_request(gpio, "vbus_detect");
+	if (err) {
+		dev_err(&pdev->dev, "can't request vbus gpio %d, err: %d\n",
+			gpio, err);
+		goto err_gpio;
+	}
+	gpio_direction_input(gpio);
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res) {
+		irq = res->start;
+		res->flags &= IRQF_TRIGGER_MASK;
+		res->flags |= IRQF_SAMPLE_RANDOM | IRQF_SHARED;
+	} else
+		irq = gpio_to_irq(gpio);
+
+	/* if data line pullup is in use, initialize it to "not pulling up" */
+	gpio = pdata->gpio_pullup;
+	if (gpio_is_valid(gpio)) {
+		err = gpio_request(gpio, "udc_pullup");
+		if (err) {
+			dev_err(&pdev->dev,
+				"can't request pullup gpio %d, err: %d\n",
+				gpio, err);
+			gpio_free(pdata->gpio_vbus);
+			goto err_gpio;
+		}
+		gpio_direction_output(gpio, pdata->gpio_pullup_inverted);
+	}
+
+	err = request_irq(irq, gpio_vbus_irq, VBUS_IRQ_FLAGS,
+		"vbus_detect", pdev);
+	if (err) {
+		dev_err(&pdev->dev, "can't request irq %i, err: %d\n",
+			irq, err);
+		goto err_irq;
+	}
+
+	/* only active when a gadget is registered */
+	err = otg_set_transceiver(&gpio_vbus->otg);
+	if (err) {
+		dev_err(&pdev->dev, "can't register transceiver, err: %d\n",
+			err);
+		goto err_otg;
+	}
+
+	gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw");
+	if (IS_ERR(gpio_vbus->vbus_draw)) {
+		dev_dbg(&pdev->dev, "can't get vbus_draw regulator, err: %ld\n",
+			PTR_ERR(gpio_vbus->vbus_draw));
+		gpio_vbus->vbus_draw = NULL;
+	}
+
+	return 0;
+err_otg:
+	free_irq(irq, &pdev->dev);
+err_irq:
+	if (gpio_is_valid(pdata->gpio_pullup))
+		gpio_free(pdata->gpio_pullup);
+	gpio_free(pdata->gpio_vbus);
+err_gpio:
+	platform_set_drvdata(pdev, NULL);
+	kfree(gpio_vbus);
+	return err;
+}
+
+static int __exit gpio_vbus_remove(struct platform_device *pdev)
+{
+	struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
+	struct gpio_vbus_mach_info *pdata = pdev->dev.platform_data;
+	int gpio = pdata->gpio_vbus;
+
+	regulator_put(gpio_vbus->vbus_draw);
+
+	otg_set_transceiver(NULL);
+
+	free_irq(gpio_to_irq(gpio), &pdev->dev);
+	if (gpio_is_valid(pdata->gpio_pullup))
+		gpio_free(pdata->gpio_pullup);
+	gpio_free(gpio);
+	platform_set_drvdata(pdev, NULL);
+	kfree(gpio_vbus);
+
+	return 0;
+}
+
+/* NOTE:  the gpio-vbus device may *NOT* be hotplugged */
+
+MODULE_ALIAS("platform:gpio-vbus");
+
+static struct platform_driver gpio_vbus_driver = {
+	.driver = {
+		.name  = "gpio-vbus",
+		.owner = THIS_MODULE,
+	},
+	.remove  = __exit_p(gpio_vbus_remove),
+};
+
+static int __init gpio_vbus_init(void)
+{
+	return platform_driver_probe(&gpio_vbus_driver, gpio_vbus_probe);
+}
+module_init(gpio_vbus_init);
+
+static void __exit gpio_vbus_exit(void)
+{
+	platform_driver_unregister(&gpio_vbus_driver);
+}
+module_exit(gpio_vbus_exit);
+
+MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver");
+MODULE_AUTHOR("Philipp Zabel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
similarity index 100%
rename from drivers/i2c/chips/isp1301_omap.c
rename to drivers/usb/otg/isp1301_omap.c
diff --git a/drivers/usb/otg/otg.c b/drivers/usb/otg/otg.c
new file mode 100644
index 0000000..ff318fa
--- /dev/null
+++ b/drivers/usb/otg/otg.c
@@ -0,0 +1,65 @@
+/*
+ * otg.c -- USB OTG utility code
+ *
+ * Copyright (C) 2004 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include <linux/usb/otg.h>
+
+static struct otg_transceiver *xceiv;
+
+/**
+ * otg_get_transceiver - find the (single) OTG transceiver
+ *
+ * Returns the transceiver driver, after getting a refcount to it; or
+ * null if there is no such transceiver.  The caller is responsible for
+ * calling otg_put_transceiver() to release that count.
+ *
+ * For use by USB host and peripheral drivers.
+ */
+struct otg_transceiver *otg_get_transceiver(void)
+{
+	if (xceiv)
+		get_device(xceiv->dev);
+	return xceiv;
+}
+EXPORT_SYMBOL(otg_get_transceiver);
+
+/**
+ * otg_put_transceiver - release the (single) OTG transceiver
+ * @x: the transceiver returned by otg_get_transceiver()
+ *
+ * Releases a refcount the caller received from otg_get_transceiver().
+ *
+ * For use by USB host and peripheral drivers.
+ */
+void otg_put_transceiver(struct otg_transceiver *x)
+{
+	put_device(x->dev);
+}
+EXPORT_SYMBOL(otg_put_transceiver);
+
+/**
+ * otg_set_transceiver - declare the (single) OTG transceiver
+ * @x: the USB OTG transceiver to be used; or NULL
+ *
+ * This call is exclusively for use by transceiver drivers, which
+ * coordinate the activities of drivers for host and peripheral
+ * controllers, and in some cases for VBUS current regulation.
+ */
+int otg_set_transceiver(struct otg_transceiver *x)
+{
+	if (xceiv && x)
+		return -EBUSY;
+	xceiv = x;
+	return 0;
+}
+EXPORT_SYMBOL(otg_set_transceiver);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
new file mode 100644
index 0000000..416e441
--- /dev/null
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -0,0 +1,721 @@
+/*
+ * twl4030_usb - TWL4030 USB transceiver, talking to OMAP OTG controller
+ *
+ * Copyright (C) 2004-2007 Texas Instruments
+ * Copyright (C) 2008 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Current status:
+ *	- HS USB ULPI mode works.
+ *	- 3-pin mode support may be added in future.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/usb/otg.h>
+#include <linux/i2c/twl4030.h>
+
+
+/* Register defines */
+
+#define VENDOR_ID_LO			0x00
+#define VENDOR_ID_HI			0x01
+#define PRODUCT_ID_LO			0x02
+#define PRODUCT_ID_HI			0x03
+
+#define FUNC_CTRL			0x04
+#define FUNC_CTRL_SET			0x05
+#define FUNC_CTRL_CLR			0x06
+#define FUNC_CTRL_SUSPENDM		(1 << 6)
+#define FUNC_CTRL_RESET			(1 << 5)
+#define FUNC_CTRL_OPMODE_MASK		(3 << 3) /* bits 3 and 4 */
+#define FUNC_CTRL_OPMODE_NORMAL		(0 << 3)
+#define FUNC_CTRL_OPMODE_NONDRIVING	(1 << 3)
+#define FUNC_CTRL_OPMODE_DISABLE_BIT_NRZI	(2 << 3)
+#define FUNC_CTRL_TERMSELECT		(1 << 2)
+#define FUNC_CTRL_XCVRSELECT_MASK	(3 << 0) /* bits 0 and 1 */
+#define FUNC_CTRL_XCVRSELECT_HS		(0 << 0)
+#define FUNC_CTRL_XCVRSELECT_FS		(1 << 0)
+#define FUNC_CTRL_XCVRSELECT_LS		(2 << 0)
+#define FUNC_CTRL_XCVRSELECT_FS4LS	(3 << 0)
+
+#define IFC_CTRL			0x07
+#define IFC_CTRL_SET			0x08
+#define IFC_CTRL_CLR			0x09
+#define IFC_CTRL_INTERFACE_PROTECT_DISABLE	(1 << 7)
+#define IFC_CTRL_AUTORESUME		(1 << 4)
+#define IFC_CTRL_CLOCKSUSPENDM		(1 << 3)
+#define IFC_CTRL_CARKITMODE		(1 << 2)
+#define IFC_CTRL_FSLSSERIALMODE_3PIN	(1 << 1)
+
+#define TWL4030_OTG_CTRL		0x0A
+#define TWL4030_OTG_CTRL_SET		0x0B
+#define TWL4030_OTG_CTRL_CLR		0x0C
+#define TWL4030_OTG_CTRL_DRVVBUS	(1 << 5)
+#define TWL4030_OTG_CTRL_CHRGVBUS	(1 << 4)
+#define TWL4030_OTG_CTRL_DISCHRGVBUS	(1 << 3)
+#define TWL4030_OTG_CTRL_DMPULLDOWN	(1 << 2)
+#define TWL4030_OTG_CTRL_DPPULLDOWN	(1 << 1)
+#define TWL4030_OTG_CTRL_IDPULLUP	(1 << 0)
+
+#define USB_INT_EN_RISE			0x0D
+#define USB_INT_EN_RISE_SET		0x0E
+#define USB_INT_EN_RISE_CLR		0x0F
+#define USB_INT_EN_FALL			0x10
+#define USB_INT_EN_FALL_SET		0x11
+#define USB_INT_EN_FALL_CLR		0x12
+#define USB_INT_STS			0x13
+#define USB_INT_LATCH			0x14
+#define USB_INT_IDGND			(1 << 4)
+#define USB_INT_SESSEND			(1 << 3)
+#define USB_INT_SESSVALID		(1 << 2)
+#define USB_INT_VBUSVALID		(1 << 1)
+#define USB_INT_HOSTDISCONNECT		(1 << 0)
+
+#define CARKIT_CTRL			0x19
+#define CARKIT_CTRL_SET			0x1A
+#define CARKIT_CTRL_CLR			0x1B
+#define CARKIT_CTRL_MICEN		(1 << 6)
+#define CARKIT_CTRL_SPKRIGHTEN		(1 << 5)
+#define CARKIT_CTRL_SPKLEFTEN		(1 << 4)
+#define CARKIT_CTRL_RXDEN		(1 << 3)
+#define CARKIT_CTRL_TXDEN		(1 << 2)
+#define CARKIT_CTRL_IDGNDDRV		(1 << 1)
+#define CARKIT_CTRL_CARKITPWR		(1 << 0)
+#define CARKIT_PLS_CTRL			0x22
+#define CARKIT_PLS_CTRL_SET		0x23
+#define CARKIT_PLS_CTRL_CLR		0x24
+#define CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN	(1 << 3)
+#define CARKIT_PLS_CTRL_SPKRLEFT_BIASEN	(1 << 2)
+#define CARKIT_PLS_CTRL_RXPLSEN		(1 << 1)
+#define CARKIT_PLS_CTRL_TXPLSEN		(1 << 0)
+
+#define MCPC_CTRL			0x30
+#define MCPC_CTRL_SET			0x31
+#define MCPC_CTRL_CLR			0x32
+#define MCPC_CTRL_RTSOL			(1 << 7)
+#define MCPC_CTRL_EXTSWR		(1 << 6)
+#define MCPC_CTRL_EXTSWC		(1 << 5)
+#define MCPC_CTRL_VOICESW		(1 << 4)
+#define MCPC_CTRL_OUT64K		(1 << 3)
+#define MCPC_CTRL_RTSCTSSW		(1 << 2)
+#define MCPC_CTRL_HS_UART		(1 << 0)
+
+#define MCPC_IO_CTRL			0x33
+#define MCPC_IO_CTRL_SET		0x34
+#define MCPC_IO_CTRL_CLR		0x35
+#define MCPC_IO_CTRL_MICBIASEN		(1 << 5)
+#define MCPC_IO_CTRL_CTS_NPU		(1 << 4)
+#define MCPC_IO_CTRL_RXD_PU		(1 << 3)
+#define MCPC_IO_CTRL_TXDTYP		(1 << 2)
+#define MCPC_IO_CTRL_CTSTYP		(1 << 1)
+#define MCPC_IO_CTRL_RTSTYP		(1 << 0)
+
+#define MCPC_CTRL2			0x36
+#define MCPC_CTRL2_SET			0x37
+#define MCPC_CTRL2_CLR			0x38
+#define MCPC_CTRL2_MCPC_CK_EN		(1 << 0)
+
+#define OTHER_FUNC_CTRL			0x80
+#define OTHER_FUNC_CTRL_SET		0x81
+#define OTHER_FUNC_CTRL_CLR		0x82
+#define OTHER_FUNC_CTRL_BDIS_ACON_EN	(1 << 4)
+#define OTHER_FUNC_CTRL_FIVEWIRE_MODE	(1 << 2)
+
+#define OTHER_IFC_CTRL			0x83
+#define OTHER_IFC_CTRL_SET		0x84
+#define OTHER_IFC_CTRL_CLR		0x85
+#define OTHER_IFC_CTRL_OE_INT_EN	(1 << 6)
+#define OTHER_IFC_CTRL_CEA2011_MODE	(1 << 5)
+#define OTHER_IFC_CTRL_FSLSSERIALMODE_4PIN	(1 << 4)
+#define OTHER_IFC_CTRL_HIZ_ULPI_60MHZ_OUT	(1 << 3)
+#define OTHER_IFC_CTRL_HIZ_ULPI		(1 << 2)
+#define OTHER_IFC_CTRL_ALT_INT_REROUTE	(1 << 0)
+
+#define OTHER_INT_EN_RISE		0x86
+#define OTHER_INT_EN_RISE_SET		0x87
+#define OTHER_INT_EN_RISE_CLR		0x88
+#define OTHER_INT_EN_FALL		0x89
+#define OTHER_INT_EN_FALL_SET		0x8A
+#define OTHER_INT_EN_FALL_CLR		0x8B
+#define OTHER_INT_STS			0x8C
+#define OTHER_INT_LATCH			0x8D
+#define OTHER_INT_VB_SESS_VLD		(1 << 7)
+#define OTHER_INT_DM_HI			(1 << 6) /* not valid for "latch" reg */
+#define OTHER_INT_DP_HI			(1 << 5) /* not valid for "latch" reg */
+#define OTHER_INT_BDIS_ACON		(1 << 3) /* not valid for "fall" regs */
+#define OTHER_INT_MANU			(1 << 1)
+#define OTHER_INT_ABNORMAL_STRESS	(1 << 0)
+
+#define ID_STATUS			0x96
+#define ID_RES_FLOAT			(1 << 4)
+#define ID_RES_440K			(1 << 3)
+#define ID_RES_200K			(1 << 2)
+#define ID_RES_102K			(1 << 1)
+#define ID_RES_GND			(1 << 0)
+
+#define POWER_CTRL			0xAC
+#define POWER_CTRL_SET			0xAD
+#define POWER_CTRL_CLR			0xAE
+#define POWER_CTRL_OTG_ENAB		(1 << 5)
+
+#define OTHER_IFC_CTRL2			0xAF
+#define OTHER_IFC_CTRL2_SET		0xB0
+#define OTHER_IFC_CTRL2_CLR		0xB1
+#define OTHER_IFC_CTRL2_ULPI_STP_LOW	(1 << 4)
+#define OTHER_IFC_CTRL2_ULPI_TXEN_POL	(1 << 3)
+#define OTHER_IFC_CTRL2_ULPI_4PIN_2430	(1 << 2)
+#define OTHER_IFC_CTRL2_USB_INT_OUTSEL_MASK	(3 << 0) /* bits 0 and 1 */
+#define OTHER_IFC_CTRL2_USB_INT_OUTSEL_INT1N	(0 << 0)
+#define OTHER_IFC_CTRL2_USB_INT_OUTSEL_INT2N	(1 << 0)
+
+#define REG_CTRL_EN			0xB2
+#define REG_CTRL_EN_SET			0xB3
+#define REG_CTRL_EN_CLR			0xB4
+#define REG_CTRL_ERROR			0xB5
+#define ULPI_I2C_CONFLICT_INTEN		(1 << 0)
+
+#define OTHER_FUNC_CTRL2		0xB8
+#define OTHER_FUNC_CTRL2_SET		0xB9
+#define OTHER_FUNC_CTRL2_CLR		0xBA
+#define OTHER_FUNC_CTRL2_VBAT_TIMER_EN	(1 << 0)
+
+/* following registers do not have separate _clr and _set registers */
+#define VBUS_DEBOUNCE			0xC0
+#define ID_DEBOUNCE			0xC1
+#define VBAT_TIMER			0xD3
+#define PHY_PWR_CTRL			0xFD
+#define PHY_PWR_PHYPWD			(1 << 0)
+#define PHY_CLK_CTRL			0xFE
+#define PHY_CLK_CTRL_CLOCKGATING_EN	(1 << 2)
+#define PHY_CLK_CTRL_CLK32K_EN		(1 << 1)
+#define REQ_PHY_DPLL_CLK		(1 << 0)
+#define PHY_CLK_CTRL_STS		0xFF
+#define PHY_DPLL_CLK			(1 << 0)
+
+/* In module TWL4030_MODULE_PM_MASTER */
+#define PROTECT_KEY			0x0E
+
+/* In module TWL4030_MODULE_PM_RECEIVER */
+#define VUSB_DEDICATED1			0x7D
+#define VUSB_DEDICATED2			0x7E
+#define VUSB1V5_DEV_GRP			0x71
+#define VUSB1V5_TYPE			0x72
+#define VUSB1V5_REMAP			0x73
+#define VUSB1V8_DEV_GRP			0x74
+#define VUSB1V8_TYPE			0x75
+#define VUSB1V8_REMAP			0x76
+#define VUSB3V1_DEV_GRP			0x77
+#define VUSB3V1_TYPE			0x78
+#define VUSB3V1_REMAP			0x79
+
+/* In module TWL4030_MODULE_INTBR */
+#define PMBR1				0x0D
+#define GPIO_USB_4PIN_ULPI_2430C	(3 << 0)
+
+
+
+enum linkstat {
+	USB_LINK_UNKNOWN = 0,
+	USB_LINK_NONE,
+	USB_LINK_VBUS,
+	USB_LINK_ID,
+};
+
+struct twl4030_usb {
+	struct otg_transceiver	otg;
+	struct device		*dev;
+
+	/* for vbus reporting with irqs disabled */
+	spinlock_t		lock;
+
+	/* pin configuration */
+	enum twl4030_usb_mode	usb_mode;
+
+	int			irq;
+	u8			linkstat;
+	u8			asleep;
+	bool			irq_enabled;
+};
+
+/* internal define on top of container_of */
+#define xceiv_to_twl(x)		container_of((x), struct twl4030_usb, otg);
+
+/*-------------------------------------------------------------------------*/
+
+static int twl4030_i2c_write_u8_verify(struct twl4030_usb *twl,
+		u8 module, u8 data, u8 address)
+{
+	u8 check;
+
+	if ((twl4030_i2c_write_u8(module, data, address) >= 0) &&
+	    (twl4030_i2c_read_u8(module, &check, address) >= 0) &&
+						(check == data))
+		return 0;
+	dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n",
+			1, module, address, check, data);
+
+	/* Failed once: Try again */
+	if ((twl4030_i2c_write_u8(module, data, address) >= 0) &&
+	    (twl4030_i2c_read_u8(module, &check, address) >= 0) &&
+						(check == data))
+		return 0;
+	dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n",
+			2, module, address, check, data);
+
+	/* Failed again: Return error */
+	return -EBUSY;
+}
+
+#define twl4030_usb_write_verify(twl, address, data)	\
+	twl4030_i2c_write_u8_verify(twl, TWL4030_MODULE_USB, (data), (address))
+
+static inline int twl4030_usb_write(struct twl4030_usb *twl,
+		u8 address, u8 data)
+{
+	int ret = 0;
+
+	ret = twl4030_i2c_write_u8(TWL4030_MODULE_USB, data, address);
+	if (ret < 0)
+		dev_dbg(twl->dev,
+			"TWL4030:USB:Write[0x%x] Error %d\n", address, ret);
+	return ret;
+}
+
+static inline int twl4030_readb(struct twl4030_usb *twl, u8 module, u8 address)
+{
+	u8 data;
+	int ret = 0;
+
+	ret = twl4030_i2c_read_u8(module, &data, address);
+	if (ret >= 0)
+		ret = data;
+	else
+		dev_dbg(twl->dev,
+			"TWL4030:readb[0x%x,0x%x] Error %d\n",
+					module, address, ret);
+
+	return ret;
+}
+
+static inline int twl4030_usb_read(struct twl4030_usb *twl, u8 address)
+{
+	return twl4030_readb(twl, TWL4030_MODULE_USB, address);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline int
+twl4030_usb_set_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
+{
+	return twl4030_usb_write(twl, reg + 1, bits);
+}
+
+static inline int
+twl4030_usb_clear_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
+{
+	return twl4030_usb_write(twl, reg + 2, bits);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
+{
+	int	status;
+	int	linkstat = USB_LINK_UNKNOWN;
+
+	/* STS_HW_CONDITIONS */
+	status = twl4030_readb(twl, TWL4030_MODULE_PM_MASTER, 0x0f);
+	if (status < 0)
+		dev_err(twl->dev, "USB link status err %d\n", status);
+	else if (status & BIT(7))
+		linkstat = USB_LINK_VBUS;
+	else if (status & BIT(2))
+		linkstat = USB_LINK_ID;
+	else
+		linkstat = USB_LINK_NONE;
+
+	dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
+			status, status, linkstat);
+
+	/* REVISIT this assumes host and peripheral controllers
+	 * are registered, and that both are active...
+	 */
+
+	spin_lock_irq(&twl->lock);
+	twl->linkstat = linkstat;
+	if (linkstat == USB_LINK_ID) {
+		twl->otg.default_a = true;
+		twl->otg.state = OTG_STATE_A_IDLE;
+	} else {
+		twl->otg.default_a = false;
+		twl->otg.state = OTG_STATE_B_IDLE;
+	}
+	spin_unlock_irq(&twl->lock);
+
+	return linkstat;
+}
+
+static void twl4030_usb_set_mode(struct twl4030_usb *twl, int mode)
+{
+	twl->usb_mode = mode;
+
+	switch (mode) {
+	case T2_USB_MODE_ULPI:
+		twl4030_usb_clear_bits(twl, IFC_CTRL, IFC_CTRL_CARKITMODE);
+		twl4030_usb_set_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
+		twl4030_usb_clear_bits(twl, FUNC_CTRL,
+					FUNC_CTRL_XCVRSELECT_MASK |
+					FUNC_CTRL_OPMODE_MASK);
+		break;
+	case -1:
+		/* FIXME: power on defaults */
+		break;
+	default:
+		dev_err(twl->dev, "unsupported T2 transceiver mode %d\n",
+				mode);
+		break;
+	};
+}
+
+static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
+{
+	unsigned long timeout;
+	int val = twl4030_usb_read(twl, PHY_CLK_CTRL);
+
+	if (val >= 0) {
+		if (on) {
+			/* enable DPLL to access PHY registers over I2C */
+			val |= REQ_PHY_DPLL_CLK;
+			WARN_ON(twl4030_usb_write_verify(twl, PHY_CLK_CTRL,
+						(u8)val) < 0);
+
+			timeout = jiffies + HZ;
+			while (!(twl4030_usb_read(twl, PHY_CLK_CTRL_STS) &
+							PHY_DPLL_CLK)
+				&& time_before(jiffies, timeout))
+					udelay(10);
+			if (!(twl4030_usb_read(twl, PHY_CLK_CTRL_STS) &
+							PHY_DPLL_CLK))
+				dev_err(twl->dev, "Timeout setting T2 HSUSB "
+						"PHY DPLL clock\n");
+		} else {
+			/* let ULPI control the DPLL clock */
+			val &= ~REQ_PHY_DPLL_CLK;
+			WARN_ON(twl4030_usb_write_verify(twl, PHY_CLK_CTRL,
+						(u8)val) < 0);
+		}
+	}
+}
+
+static void twl4030_phy_power(struct twl4030_usb *twl, int on)
+{
+	u8 pwr;
+
+	pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+	if (on) {
+		pwr &= ~PHY_PWR_PHYPWD;
+		WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+		twl4030_usb_write(twl, PHY_CLK_CTRL,
+				  twl4030_usb_read(twl, PHY_CLK_CTRL) |
+					(PHY_CLK_CTRL_CLOCKGATING_EN |
+						PHY_CLK_CTRL_CLK32K_EN));
+	} else  {
+		pwr |= PHY_PWR_PHYPWD;
+		WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
+	}
+}
+
+static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off)
+{
+	if (twl->asleep)
+		return;
+
+	twl4030_phy_power(twl, 0);
+	twl->asleep = 1;
+}
+
+static void twl4030_phy_resume(struct twl4030_usb *twl)
+{
+	if (!twl->asleep)
+		return;
+
+	twl4030_phy_power(twl, 1);
+	twl4030_i2c_access(twl, 1);
+	twl4030_usb_set_mode(twl, twl->usb_mode);
+	if (twl->usb_mode == T2_USB_MODE_ULPI)
+		twl4030_i2c_access(twl, 0);
+	twl->asleep = 0;
+}
+
+static void twl4030_usb_ldo_init(struct twl4030_usb *twl)
+{
+	/* Enable writing to power configuration registers */
+	twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
+	twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
+
+	/* put VUSB3V1 LDO in active state */
+	twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);
+
+	/* input to VUSB3V1 LDO is from VBAT, not VBUS */
+	twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
+
+	/* turn on 3.1V regulator */
+	twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x20, VUSB3V1_DEV_GRP);
+	twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_TYPE);
+
+	/* turn on 1.5V regulator */
+	twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x20, VUSB1V5_DEV_GRP);
+	twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_TYPE);
+
+	/* turn on 1.8V regulator */
+	twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x20, VUSB1V8_DEV_GRP);
+	twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_TYPE);
+
+	/* disable access to power configuration registers */
+	twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, PROTECT_KEY);
+}
+
+static ssize_t twl4030_usb_vbus_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct twl4030_usb *twl = dev_get_drvdata(dev);
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&twl->lock, flags);
+	ret = sprintf(buf, "%s\n",
+			(twl->linkstat == USB_LINK_VBUS) ? "on" : "off");
+	spin_unlock_irqrestore(&twl->lock, flags);
+
+	return ret;
+}
+static DEVICE_ATTR(vbus, 0444, twl4030_usb_vbus_show, NULL);
+
+static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
+{
+	struct twl4030_usb *twl = _twl;
+	int status;
+
+#ifdef CONFIG_LOCKDEP
+	/* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
+	 * we don't want and can't tolerate.  Although it might be
+	 * friendlier not to borrow this thread context...
+	 */
+	local_irq_enable();
+#endif
+
+	status = twl4030_usb_linkstat(twl);
+	if (status != USB_LINK_UNKNOWN) {
+
+		/* FIXME add a set_power() method so that B-devices can
+		 * configure the charger appropriately.  It's not always
+		 * correct to consume VBUS power, and how much current to
+		 * consume is a function of the USB configuration chosen
+		 * by the host.
+		 *
+		 * REVISIT usb_gadget_vbus_connect(...) as needed, ditto
+		 * its disconnect() sibling, when changing to/from the
+		 * USB_LINK_VBUS state.  musb_hdrc won't care until it
+		 * starts to handle softconnect right.
+		 */
+		twl4030charger_usb_en(status == USB_LINK_VBUS);
+
+		if (status == USB_LINK_NONE)
+			twl4030_phy_suspend(twl, 0);
+		else
+			twl4030_phy_resume(twl);
+	}
+	sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+
+	return IRQ_HANDLED;
+}
+
+static int twl4030_set_suspend(struct otg_transceiver *x, int suspend)
+{
+	struct twl4030_usb *twl = xceiv_to_twl(x);
+
+	if (suspend)
+		twl4030_phy_suspend(twl, 1);
+	else
+		twl4030_phy_resume(twl);
+
+	return 0;
+}
+
+static int twl4030_set_peripheral(struct otg_transceiver *x,
+		struct usb_gadget *gadget)
+{
+	struct twl4030_usb *twl;
+
+	if (!x)
+		return -ENODEV;
+
+	twl = xceiv_to_twl(x);
+	twl->otg.gadget = gadget;
+	if (!gadget)
+		twl->otg.state = OTG_STATE_UNDEFINED;
+
+	return 0;
+}
+
+static int twl4030_set_host(struct otg_transceiver *x, struct usb_bus *host)
+{
+	struct twl4030_usb *twl;
+
+	if (!x)
+		return -ENODEV;
+
+	twl = xceiv_to_twl(x);
+	twl->otg.host = host;
+	if (!host)
+		twl->otg.state = OTG_STATE_UNDEFINED;
+
+	return 0;
+}
+
+static int __init twl4030_usb_probe(struct platform_device *pdev)
+{
+	struct twl4030_usb_data *pdata = pdev->dev.platform_data;
+	struct twl4030_usb	*twl;
+	int			status;
+
+	if (!pdata) {
+		dev_dbg(&pdev->dev, "platform_data not available\n");
+		return -EINVAL;
+	}
+
+	twl = kzalloc(sizeof *twl, GFP_KERNEL);
+	if (!twl)
+		return -ENOMEM;
+
+	twl->dev		= &pdev->dev;
+	twl->irq		= platform_get_irq(pdev, 0);
+	twl->otg.dev		= twl->dev;
+	twl->otg.label		= "twl4030";
+	twl->otg.set_host	= twl4030_set_host;
+	twl->otg.set_peripheral	= twl4030_set_peripheral;
+	twl->otg.set_suspend	= twl4030_set_suspend;
+	twl->usb_mode		= pdata->usb_mode;
+	twl->asleep		= 1;
+
+	/* init spinlock for workqueue */
+	spin_lock_init(&twl->lock);
+
+	twl4030_usb_ldo_init(twl);
+	otg_set_transceiver(&twl->otg);
+
+	platform_set_drvdata(pdev, twl);
+	if (device_create_file(&pdev->dev, &dev_attr_vbus))
+		dev_warn(&pdev->dev, "could not create sysfs file\n");
+
+	/* Our job is to use irqs and status from the power module
+	 * to keep the transceiver disabled when nothing's connected.
+	 *
+	 * FIXME we actually shouldn't start enabling it until the
+	 * USB controller drivers have said they're ready, by calling
+	 * set_host() and/or set_peripheral() ... OTG_capable boards
+	 * need both handles, otherwise just one suffices.
+	 */
+	twl->irq_enabled = true;
+	status = request_irq(twl->irq, twl4030_usb_irq,
+			IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+			"twl4030_usb", twl);
+	if (status < 0) {
+		dev_dbg(&pdev->dev, "can't get IRQ %d, err %d\n",
+			twl->irq, status);
+		kfree(twl);
+		return status;
+	}
+
+	/* The IRQ handler just handles changes from the previous states
+	 * of the ID and VBUS pins ... in probe() we must initialize that
+	 * previous state.  The easy way:  fake an IRQ.
+	 *
+	 * REVISIT:  a real IRQ might have happened already, if PREEMPT is
+	 * enabled.  Else the IRQ may not yet be configured or enabled,
+	 * because of scheduling delays.
+	 */
+	twl4030_usb_irq(twl->irq, twl);
+
+	dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
+	return 0;
+}
+
+static int __exit twl4030_usb_remove(struct platform_device *pdev)
+{
+	struct twl4030_usb *twl = platform_get_drvdata(pdev);
+	int val;
+
+	free_irq(twl->irq, twl);
+	device_remove_file(twl->dev, &dev_attr_vbus);
+
+	/* set transceiver mode to power on defaults */
+	twl4030_usb_set_mode(twl, -1);
+
+	/* autogate 60MHz ULPI clock,
+	 * clear dpll clock request for i2c access,
+	 * disable 32KHz
+	 */
+	val = twl4030_usb_read(twl, PHY_CLK_CTRL);
+	if (val >= 0) {
+		val |= PHY_CLK_CTRL_CLOCKGATING_EN;
+		val &= ~(PHY_CLK_CTRL_CLK32K_EN | REQ_PHY_DPLL_CLK);
+		twl4030_usb_write(twl, PHY_CLK_CTRL, (u8)val);
+	}
+
+	/* disable complete OTG block */
+	twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
+
+	twl4030_phy_power(twl, 0);
+
+	kfree(twl);
+
+	return 0;
+}
+
+static struct platform_driver twl4030_usb_driver = {
+	.probe		= twl4030_usb_probe,
+	.remove		= __exit_p(twl4030_usb_remove),
+	.driver		= {
+		.name	= "twl4030_usb",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init twl4030_usb_init(void)
+{
+	return platform_driver_register(&twl4030_usb_driver);
+}
+subsys_initcall(twl4030_usb_init);
+
+static void __exit twl4030_usb_exit(void)
+{
+	platform_driver_unregister(&twl4030_usb_driver);
+}
+module_exit(twl4030_usb_exit);
+
+MODULE_ALIAS("platform:twl4030_usb");
+MODULE_AUTHOR("Texas Instruments, Inc, Nokia Corporation");
+MODULE_DESCRIPTION("TWL4030 USB transceiver driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 70338f4..b361f05 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -496,6 +496,14 @@
 	bool "USB Secure Encapsulated Driver - Padded"
 	depends on USB_SERIAL_SAFE
 
+config USB_SERIAL_SIEMENS_MPI
+	tristate "USB Siemens MPI driver"
+	help
+	  Say M here if you want to use a Siemens USB/MPI adapter.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called siemens_mpi.
+
 config USB_SERIAL_SIERRAWIRELESS
 	tristate "USB Sierra Wireless Driver"
 	help
@@ -565,6 +573,15 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called omninet.
 
+config USB_SERIAL_OPTICON
+	tristate "USB Opticon Barcode driver (serial mode)"
+	help
+	  Say Y here if you want to use a Opticon USB Barcode device
+	  in serial emulation mode.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called opticon.
+
 config USB_SERIAL_DEBUG
 	tristate "USB Debugging Device"
 	help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 6047f81..b75be91 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -41,10 +41,12 @@
 obj-$(CONFIG_USB_SERIAL_MOTOROLA)		+= moto_modem.o
 obj-$(CONFIG_USB_SERIAL_NAVMAN)			+= navman.o
 obj-$(CONFIG_USB_SERIAL_OMNINET)		+= omninet.o
+obj-$(CONFIG_USB_SERIAL_OPTICON)		+= opticon.o
 obj-$(CONFIG_USB_SERIAL_OPTION)			+= option.o
 obj-$(CONFIG_USB_SERIAL_OTI6858)		+= oti6858.o
 obj-$(CONFIG_USB_SERIAL_PL2303)			+= pl2303.o
 obj-$(CONFIG_USB_SERIAL_SAFE)			+= safe_serial.o
+obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI)		+= siemens_mpi.o
 obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS)		+= sierra.o
 obj-$(CONFIG_USB_SERIAL_SPCP8X5)		+= spcp8x5.o
 obj-$(CONFIG_USB_SERIAL_TI)			+= ti_usb_3410_5052.o
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 69f84f0..38ba4ea 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -635,8 +635,7 @@
 
 	spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
 	while (count > 0) {
-		while (oob_port->write_urb->status == -EINPROGRESS
-			|| oob_priv->dp_write_urb_in_use) {
+		while (oob_priv->dp_write_urb_in_use) {
 			cond_wait_interruptible_timeout_irqrestore(
 				&oob_port->write_wait, DIGI_RETRY_TIMEOUT,
 				&oob_priv->dp_port_lock, flags);
@@ -699,9 +698,8 @@
 
 	spin_lock_irqsave(&priv->dp_port_lock, flags);
 	while (count > 0 && ret == 0) {
-		while ((port->write_urb->status == -EINPROGRESS
-				|| priv->dp_write_urb_in_use)
-					&& time_before(jiffies, timeout)) {
+		while (priv->dp_write_urb_in_use &&
+		       time_before(jiffies, timeout)) {
 			cond_wait_interruptible_timeout_irqrestore(
 				&port->write_wait, DIGI_RETRY_TIMEOUT,
 				&priv->dp_port_lock, flags);
@@ -779,8 +777,7 @@
 	spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
 	spin_lock(&port_priv->dp_port_lock);
 
-	while (oob_port->write_urb->status == -EINPROGRESS ||
-					oob_priv->dp_write_urb_in_use) {
+	while (oob_priv->dp_write_urb_in_use) {
 		spin_unlock(&port_priv->dp_port_lock);
 		cond_wait_interruptible_timeout_irqrestore(
 			&oob_port->write_wait, DIGI_RETRY_TIMEOUT,
@@ -1168,12 +1165,10 @@
 
 	/* be sure only one write proceeds at a time */
 	/* there are races on the port private buffer */
-	/* and races to check write_urb->status */
 	spin_lock_irqsave(&priv->dp_port_lock, flags);
 
 	/* wait for urb status clear to submit another urb */
-	if (port->write_urb->status == -EINPROGRESS ||
-					priv->dp_write_urb_in_use) {
+	if (priv->dp_write_urb_in_use) {
 		/* buffer data if count is 1 (probably put_char) if possible */
 		if (count == 1 && priv->dp_out_buf_len < DIGI_OUT_BUF_SIZE) {
 			priv->dp_out_buf[priv->dp_out_buf_len++] = *buf;
@@ -1236,7 +1231,7 @@
 	int ret = 0;
 	int status = urb->status;
 
-	dbg("digi_write_bulk_callback: TOP, urb->status=%d", status);
+	dbg("digi_write_bulk_callback: TOP, status=%d", status);
 
 	/* port and serial sanity check */
 	if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) {
@@ -1266,8 +1261,7 @@
 	/* try to send any buffered data on this port, if it is open */
 	spin_lock(&priv->dp_port_lock);
 	priv->dp_write_urb_in_use = 0;
-	if (port->port.count && port->write_urb->status != -EINPROGRESS
-	    && priv->dp_out_buf_len > 0) {
+	if (port->port.count && priv->dp_out_buf_len > 0) {
 		*((unsigned char *)(port->write_urb->transfer_buffer))
 			= (unsigned char)DIGI_CMD_SEND_DATA;
 		*((unsigned char *)(port->write_urb->transfer_buffer) + 1)
@@ -1305,8 +1299,7 @@
 
 	spin_lock_irqsave(&priv->dp_port_lock, flags);
 
-	if (port->write_urb->status == -EINPROGRESS ||
-					priv->dp_write_urb_in_use)
+	if (priv->dp_write_urb_in_use)
 		room = 0;
 	else
 		room = port->bulk_out_size - 2 - priv->dp_out_buf_len;
@@ -1322,8 +1315,7 @@
 	struct usb_serial_port *port = tty->driver_data;
 	struct digi_port *priv = usb_get_serial_port_data(port);
 
-	if (port->write_urb->status == -EINPROGRESS
-	    || priv->dp_write_urb_in_use) {
+	if (priv->dp_write_urb_in_use) {
 		dbg("digi_chars_in_buffer: port=%d, chars=%d",
 			priv->dp_port_num, port->bulk_out_size - 2);
 		/* return(port->bulk_out_size - 2); */
@@ -1702,7 +1694,7 @@
 	/* short/multiple packet check */
 	if (urb->actual_length != len + 2) {
 		dev_err(&port->dev, "%s: INCOMPLETE OR MULTIPLE PACKET, "
-			"urb->status=%d, port=%d, opcode=%d, len=%d, "
+			"status=%d, port=%d, opcode=%d, len=%d, "
 			"actual_length=%d, status=%d\n", __func__, status,
 			priv->dp_port_num, opcode, len, urb->actual_length,
 			port_status);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ef6cfa5..c70a8f6 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2030,7 +2030,7 @@
 			spin_unlock_irqrestore(&priv->rx_lock, flags);
 			dbg("%s - deferring remainder until unthrottled",
 					__func__);
-			return;
+			goto out;
 		}
 		spin_unlock_irqrestore(&priv->rx_lock, flags);
 		/* if the port is closed stop trying to read */
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 8e6a66e..a26a0e2 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1056,7 +1056,7 @@
 
 		if (status) {
 			dbg("%s - nonzero write bulk status received: %d",
-			    __func__, urb->status);
+			    __func__, status);
 			spin_lock_irqsave(&garmin_data_p->lock, flags);
 			garmin_data_p->flags |= CLEAR_HALT_REQUIRED;
 			spin_unlock_irqrestore(&garmin_data_p->lock, flags);
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 3ac59a8..f530032 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -473,7 +473,7 @@
 
 
 
-static int usb_ipw_init(void)
+static int __init usb_ipw_init(void)
 {
 	int retval;
 
@@ -490,7 +490,7 @@
 	return 0;
 }
 
-static void usb_ipw_exit(void)
+static void __exit usb_ipw_exit(void)
 {
 	usb_deregister(&usb_ipw_driver);
 	usb_serial_deregister(&ipw_device);
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index e320972..2314c6a 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -190,10 +190,12 @@
 {
 	struct usb_serial_port *port = urb->context;
 	int result;
+	int status = urb->status;
+
 	dbg("%s - enter", __func__);
 
-	if (urb->status) {
-		dbg("%s - urb->status = %d", __func__, urb->status);
+	if (status) {
+		dbg("%s - status = %d", __func__, status);
 		/* error stop all */
 		return;
 	}
@@ -245,10 +247,12 @@
 	struct usb_serial_port *port = urb->context;
 	struct iuu_private *priv = usb_get_serial_port_data(port);
 	u8 *st;
+	int status = urb->status;
+
 	dbg("%s - enter", __func__);
 
-	if (urb->status) {
-		dbg("%s - urb->status = %d", __func__, urb->status);
+	if (status) {
+		dbg("%s - status = %d", __func__, status);
 		/* error stop all */
 		return;
 	}
@@ -274,9 +278,9 @@
 {
 	struct usb_serial_port *port = urb->context;
 	int result;
-	dbg("%s - enter", __func__);
+	int status = urb->status;
 
-	dbg("%s - urb->status = %d", __func__, urb->status);
+	dbg("%s - status = %d", __func__, status);
 	usb_fill_bulk_urb(port->read_urb, port->serial->dev,
 			  usb_rcvbulkpipe(port->serial->dev,
 					  port->bulk_in_endpointAddress),
@@ -618,11 +622,12 @@
 	struct usb_serial_port *port = urb->context;
 	unsigned char *data = urb->transfer_buffer;
 	struct tty_struct *tty;
-	dbg("%s - urb->status = %d", __func__, urb->status);
+	int status = urb->status;
 
-	if (urb->status) {
-		dbg("%s - urb->status = %d", __func__, urb->status);
-		if (urb->status == -EPROTO) {
+	dbg("%s - status = %d", __func__, status);
+
+	if (status) {
+		if (status == -EPROTO) {
 			/* reschedule needed */
 		}
 		return;
@@ -695,7 +700,7 @@
 	struct usb_serial_port *port = urb->context;
 	struct iuu_private *priv = usb_get_serial_port_data(port);
 	unsigned long flags;
-	int status;
+	int status = urb->status;
 	int error = 0;
 	int len = 0;
 	unsigned char *data = urb->transfer_buffer;
@@ -703,8 +708,8 @@
 
 	dbg("%s - enter", __func__);
 
-	if (urb->status) {
-		dbg("%s - urb->status = %d", __func__, urb->status);
+	if (status) {
+		dbg("%s - status = %d", __func__, status);
 		/* error stop all */
 		return;
 	}
@@ -782,12 +787,11 @@
 {
 	struct usb_serial_port *port = urb->context;
 	int result;
-	dbg("%s - enter", __func__);
+	int status = urb->status;
 
-	dbg("%s - urb->status = %d", __func__, urb->status);
+	dbg("%s - status = %d", __func__, status);
 
-	if (urb->status) {
-		dbg("%s - urb->status = %d", __func__, urb->status);
+	if (status) {
 		/* error stop all */
 		return;
 	}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 96a8c77..2c20e88 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -214,6 +214,7 @@
 	spinlock_t pool_lock;
 	struct urb *write_urb_pool[NUM_URBS];
 	char busy[NUM_URBS];
+	bool read_urb_busy;
 };
 
 
@@ -679,26 +680,30 @@
 	struct tty_struct *tty;
 	int status = urb->status;
 
-	if (status) {
-		dbg("nonzero read bulk status received: %d", status);
-		return;
-	}
-
 	mos7840_port = urb->context;
 	if (!mos7840_port) {
 		dbg("%s", "NULL mos7840_port pointer \n");
+		mos7840_port->read_urb_busy = false;
+		return;
+	}
+
+	if (status) {
+		dbg("nonzero read bulk status received: %d", status);
+		mos7840_port->read_urb_busy = false;
 		return;
 	}
 
 	port = (struct usb_serial_port *)mos7840_port->port;
 	if (mos7840_port_paranoia_check(port, __func__)) {
 		dbg("%s", "Port Paranoia failed \n");
+		mos7840_port->read_urb_busy = false;
 		return;
 	}
 
 	serial = mos7840_get_usb_serial(port, __func__);
 	if (!serial) {
 		dbg("%s\n", "Bad serial pointer ");
+		mos7840_port->read_urb_busy = false;
 		return;
 	}
 
@@ -725,17 +730,19 @@
 
 	if (!mos7840_port->read_urb) {
 		dbg("%s", "URB KILLED !!!\n");
+		mos7840_port->read_urb_busy = false;
 		return;
 	}
 
 
 	mos7840_port->read_urb->dev = serial->dev;
 
+	mos7840_port->read_urb_busy = true;
 	retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
 
 	if (retval) {
-		dbg(" usb_submit_urb(read bulk) failed, retval = %d",
-		 retval);
+		dbg("usb_submit_urb(read bulk) failed, retval = %d", retval);
+		mos7840_port->read_urb_busy = false;
 	}
 }
 
@@ -1055,10 +1062,12 @@
 
 	dbg("mos7840_open: bulkin endpoint is %d\n",
 	    port->bulk_in_endpointAddress);
+	mos7840_port->read_urb_busy = true;
 	response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
 	if (response) {
 		dev_err(&port->dev, "%s - Error %d submitting control urb\n",
 			__func__, response);
+		mos7840_port->read_urb_busy = false;
 	}
 
 	/* initialize our wait queues */
@@ -1227,6 +1236,7 @@
 		if (mos7840_port->read_urb) {
 			dbg("%s", "Shutdown bulk read\n");
 			usb_kill_urb(mos7840_port->read_urb);
+			mos7840_port->read_urb_busy = false;
 		}
 		if ((&mos7840_port->control_urb)) {
 			dbg("%s", "Shutdown control read\n");
@@ -2043,14 +2053,14 @@
 	Data = 0x0c;
 	mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
 
-	if (mos7840_port->read_urb->status != -EINPROGRESS) {
+	if (mos7840_port->read_urb_busy == false) {
 		mos7840_port->read_urb->dev = serial->dev;
-
+		mos7840_port->read_urb_busy = true;
 		status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
-
 		if (status) {
-			dbg(" usb_submit_urb(read bulk) failed, status = %d",
+			dbg("usb_submit_urb(read bulk) failed, status = %d",
 			    status);
+			mos7840_port->read_urb_busy = false;
 		}
 	}
 	wake_up(&mos7840_port->delta_msr_wait);
@@ -2117,12 +2127,14 @@
 		return;
 	}
 
-	if (mos7840_port->read_urb->status != -EINPROGRESS) {
+	if (mos7840_port->read_urb_busy == false) {
 		mos7840_port->read_urb->dev = serial->dev;
+		mos7840_port->read_urb_busy = true;
 		status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
 		if (status) {
-			dbg(" usb_submit_urb(read bulk) failed, status = %d",
+			dbg("usb_submit_urb(read bulk) failed, status = %d",
 			    status);
+			mos7840_port->read_urb_busy = false;
 		}
 	}
 	return;
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
new file mode 100644
index 0000000..cea326f
--- /dev/null
+++ b/drivers/usb/serial/opticon.c
@@ -0,0 +1,358 @@
+/*
+ * Opticon USB barcode to serial driver
+ *
+ * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (C) 2008 Novell Inc.
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License version
+ *	2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <linux/uaccess.h>
+
+static int debug;
+
+static struct usb_device_id id_table[] = {
+	{ USB_DEVICE(0x065a, 0x0009) },
+	{ },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+/* This structure holds all of the individual device information */
+struct opticon_private {
+	struct usb_device *udev;
+	struct usb_serial *serial;
+	struct usb_serial_port *port;
+	unsigned char *bulk_in_buffer;
+	struct urb *bulk_read_urb;
+	int buffer_size;
+	u8 bulk_address;
+	spinlock_t lock;	/* protects the following flags */
+	bool throttled;
+	bool actually_throttled;
+	bool rts;
+};
+
+static void opticon_bulk_callback(struct urb *urb)
+{
+	struct opticon_private *priv = urb->context;
+	unsigned char *data = urb->transfer_buffer;
+	struct usb_serial_port *port = priv->port;
+	int status = urb->status;
+	struct tty_struct *tty;
+	int result;
+	int available_room = 0;
+	int data_length;
+
+	dbg("%s - port %d", __func__, port->number);
+
+	switch (status) {
+	case 0:
+		/* success */
+		break;
+	case -ECONNRESET:
+	case -ENOENT:
+	case -ESHUTDOWN:
+		/* this urb is terminated, clean up */
+		dbg("%s - urb shutting down with status: %d",
+		    __func__, status);
+		return;
+	default:
+		dbg("%s - nonzero urb status received: %d",
+		    __func__, status);
+		goto exit;
+	}
+
+	usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length,
+			      data);
+
+	if (urb->actual_length > 2) {
+		data_length = urb->actual_length - 2;
+
+		/*
+		 * Data from the device comes with a 2 byte header:
+		 *
+		 * <0x00><0x00>data...
+		 * 	This is real data to be sent to the tty layer
+		 * <0x00><0x01)level
+		 * 	This is a RTS level change, the third byte is the RTS
+		 * 	value (0 for low, 1 for high).
+		 */
+		if ((data[0] == 0x00) && (data[1] == 0x00)) {
+			/* real data, send it to the tty layer */
+			tty = tty_port_tty_get(&port->port);
+			if (tty) {
+				available_room = tty_buffer_request_room(tty,
+								data_length);
+				if (available_room) {
+					tty_insert_flip_string(tty, data,
+							       available_room);
+					tty_flip_buffer_push(tty);
+				}
+				tty_kref_put(tty);
+			}
+		} else {
+			if ((data[0] == 0x00) && (data[1] == 0x01)) {
+				if (data[2] == 0x00)
+					priv->rts = false;
+				else
+					priv->rts = true;
+				/* FIXME change the RTS level */
+			} else {
+			dev_dbg(&priv->udev->dev,
+				"Unknown data packet received from the device:"
+				" %2x %2x\n",
+				data[0], data[1]);
+			}
+		}
+	} else {
+		dev_dbg(&priv->udev->dev,
+			"Improper ammount of data received from the device, "
+			"%d bytes", urb->actual_length);
+	}
+
+exit:
+	spin_lock(&priv->lock);
+
+	/* Continue trying to always read if we should */
+	if (!priv->throttled) {
+		usb_fill_bulk_urb(priv->bulk_read_urb, priv->udev,
+				  usb_rcvbulkpipe(priv->udev,
+						  priv->bulk_address),
+				  priv->bulk_in_buffer, priv->buffer_size,
+				  opticon_bulk_callback, priv);
+		result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+		if (result)
+			dev_err(&port->dev,
+			    "%s - failed resubmitting read urb, error %d\n",
+							__func__, result);
+	} else
+		priv->actually_throttled = true;
+	spin_unlock(&priv->lock);
+}
+
+static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port,
+			struct file *filp)
+{
+	struct opticon_private *priv = usb_get_serial_data(port->serial);
+	unsigned long flags;
+	int result = 0;
+
+	dbg("%s - port %d", __func__, port->number);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->throttled = false;
+	priv->actually_throttled = false;
+	priv->port = port;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	/*
+	 * Force low_latency on so that our tty_push actually forces the data
+	 * through, otherwise it is scheduled, and with high data rates (like
+	 * with OHCI) data can get lost.
+	 */
+	if (tty)
+		tty->low_latency = 1;
+
+	/* Start reading from the device */
+	usb_fill_bulk_urb(priv->bulk_read_urb, priv->udev,
+			  usb_rcvbulkpipe(priv->udev,
+					  priv->bulk_address),
+			  priv->bulk_in_buffer, priv->buffer_size,
+			  opticon_bulk_callback, priv);
+	result = usb_submit_urb(priv->bulk_read_urb, GFP_KERNEL);
+	if (result)
+		dev_err(&port->dev,
+			"%s - failed resubmitting read urb, error %d\n",
+			__func__, result);
+	return result;
+}
+
+static void opticon_close(struct tty_struct *tty, struct usb_serial_port *port,
+			  struct file *filp)
+{
+	struct opticon_private *priv = usb_get_serial_data(port->serial);
+
+	dbg("%s - port %d", __func__, port->number);
+
+	/* shutdown our urbs */
+	usb_kill_urb(priv->bulk_read_urb);
+}
+
+static void opticon_throttle(struct tty_struct *tty)
+{
+	struct usb_serial_port *port = tty->driver_data;
+	struct opticon_private *priv = usb_get_serial_data(port->serial);
+	unsigned long flags;
+
+	dbg("%s - port %d", __func__, port->number);
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->throttled = true;
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+
+static void opticon_unthrottle(struct tty_struct *tty)
+{
+	struct usb_serial_port *port = tty->driver_data;
+	struct opticon_private *priv = usb_get_serial_data(port->serial);
+	unsigned long flags;
+	int result;
+
+	dbg("%s - port %d", __func__, port->number);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->throttled = false;
+	priv->actually_throttled = false;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	priv->bulk_read_urb->dev = port->serial->dev;
+	result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
+	if (result)
+		dev_err(&port->dev,
+			"%s - failed submitting read urb, error %d\n",
+							__func__, result);
+}
+
+static int opticon_startup(struct usb_serial *serial)
+{
+	struct opticon_private *priv;
+	struct usb_host_interface *intf;
+	int i;
+	int retval = -ENOMEM;
+	bool bulk_in_found = false;
+
+	/* create our private serial structure */
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (priv == NULL) {
+		dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__);
+		return -ENOMEM;
+	}
+	spin_lock_init(&priv->lock);
+	priv->serial = serial;
+	priv->port = serial->port[0];
+	priv->udev = serial->dev;
+
+	/* find our bulk endpoint */
+	intf = serial->interface->altsetting;
+	for (i = 0; i < intf->desc.bNumEndpoints; ++i) {
+		struct usb_endpoint_descriptor *endpoint;
+
+		endpoint = &intf->endpoint[i].desc;
+		if (!usb_endpoint_is_bulk_in(endpoint))
+			continue;
+
+		priv->bulk_read_urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!priv->bulk_read_urb) {
+			dev_err(&priv->udev->dev, "out of memory\n");
+			goto error;
+		}
+
+		priv->buffer_size = le16_to_cpu(endpoint->wMaxPacketSize) * 2;
+		priv->bulk_in_buffer = kmalloc(priv->buffer_size, GFP_KERNEL);
+		if (!priv->bulk_in_buffer) {
+			dev_err(&priv->udev->dev, "out of memory\n");
+			goto error;
+		}
+
+		priv->bulk_address = endpoint->bEndpointAddress;
+
+		/* set up our bulk urb */
+		usb_fill_bulk_urb(priv->bulk_read_urb, priv->udev,
+				  usb_rcvbulkpipe(priv->udev,
+						  endpoint->bEndpointAddress),
+				  priv->bulk_in_buffer, priv->buffer_size,
+				  opticon_bulk_callback, priv);
+
+		bulk_in_found = true;
+		break;
+		}
+
+	if (!bulk_in_found) {
+		dev_err(&priv->udev->dev,
+			"Error - the proper endpoints were not found!\n");
+		goto error;
+	}
+
+	usb_set_serial_data(serial, priv);
+	return 0;
+
+error:
+	usb_free_urb(priv->bulk_read_urb);
+	kfree(priv->bulk_in_buffer);
+	kfree(priv);
+	return retval;
+}
+
+static void opticon_shutdown(struct usb_serial *serial)
+{
+	struct opticon_private *priv = usb_get_serial_data(serial);
+
+	dbg("%s", __func__);
+
+	usb_kill_urb(priv->bulk_read_urb);
+	usb_free_urb(priv->bulk_read_urb);
+	kfree(priv->bulk_in_buffer);
+	kfree(priv);
+	usb_set_serial_data(serial, NULL);
+}
+
+
+static struct usb_driver opticon_driver = {
+	.name =		"opticon",
+	.probe =	usb_serial_probe,
+	.disconnect =	usb_serial_disconnect,
+	.id_table =	id_table,
+	.no_dynamic_id = 	1,
+};
+
+static struct usb_serial_driver opticon_device = {
+	.driver = {
+		.owner =	THIS_MODULE,
+		.name =		"opticon",
+	},
+	.id_table =		id_table,
+	.usb_driver = 		&opticon_driver,
+	.num_ports =		1,
+	.attach =		opticon_startup,
+	.open =			opticon_open,
+	.close =		opticon_close,
+	.shutdown =		opticon_shutdown,
+	.throttle = 		opticon_throttle,
+	.unthrottle =		opticon_unthrottle,
+};
+
+static int __init opticon_init(void)
+{
+	int retval;
+
+	retval = usb_serial_register(&opticon_device);
+	if (retval)
+		return retval;
+	retval = usb_register(&opticon_driver);
+	if (retval)
+		usb_serial_deregister(&opticon_device);
+	return retval;
+}
+
+static void __exit opticon_exit(void)
+{
+	usb_deregister(&opticon_driver);
+	usb_serial_deregister(&opticon_device);
+}
+
+module_init(opticon_init);
+module_exit(opticon_exit);
+MODULE_LICENSE("GPL");
+
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug enabled or not");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 809697b..5ed1834 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -522,9 +522,9 @@
 /* per port private data */
 
 #define N_IN_URB 4
-#define N_OUT_URB 1
+#define N_OUT_URB 4
 #define IN_BUFLEN 4096
-#define OUT_BUFLEN 128
+#define OUT_BUFLEN 4096
 
 struct option_port_private {
 	/* Input endpoints and buffer for this port */
@@ -654,10 +654,6 @@
 			usb_unlink_urb(this_urb);
 			continue;
 		}
-		if (this_urb->status != 0)
-			dbg("usb_write %p failed (err=%d)",
-				this_urb, this_urb->status);
-
 		dbg("%s: endpoint %d buf %d", __func__,
 			usb_pipeendpoint(this_urb->pipe), i);
 
@@ -669,8 +665,7 @@
 		err = usb_submit_urb(this_urb, GFP_ATOMIC);
 		if (err) {
 			dbg("usb_submit_urb %p (write bulk) failed "
-				"(%d, has %d)", this_urb,
-				err, this_urb->status);
+				"(%d)", this_urb, err);
 			clear_bit(i, &portdata->out_busy);
 			continue;
 		}
diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
new file mode 100644
index 0000000..951ea0c
--- /dev/null
+++ b/drivers/usb/serial/siemens_mpi.c
@@ -0,0 +1,77 @@
+/*
+ * Siemens USB-MPI Serial USB driver
+ *
+ * Copyright (C) 2005 Thomas Hergenhahn <thomas.hergenhahn@suse.de>
+ * Copyright (C) 2005,2008 Greg Kroah-Hartman <gregkh@suse.de>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License version
+ *	2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+/* Version Information */
+#define DRIVER_VERSION "Version 0.1 09/26/2005"
+#define DRIVER_AUTHOR "Thomas Hergenhahn@web.de http://libnodave.sf.net"
+#define DRIVER_DESC "Driver for Siemens USB/MPI adapter"
+
+
+static struct usb_device_id id_table[] = {
+	/* Vendor and product id for 6ES7-972-0CB20-0XA0 */
+	{ USB_DEVICE(0x908, 0x0004) },
+	{ },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver siemens_usb_mpi_driver = {
+	.name =		"siemens_mpi",
+	.probe =	usb_serial_probe,
+	.disconnect =	usb_serial_disconnect,
+	.id_table =	id_table,
+};
+
+static struct usb_serial_driver siemens_usb_mpi_device = {
+	.driver = {
+		.owner =	THIS_MODULE,
+		.name =		"siemens_mpi",
+	},
+	.id_table =		id_table,
+	.num_ports =		1,
+};
+
+static int __init siemens_usb_mpi_init(void)
+{
+	int retval;
+
+	retval = usb_serial_register(&siemens_usb_mpi_device);
+	if (retval)
+		goto failed_usb_serial_register;
+	retval = usb_register(&siemens_usb_mpi_driver);
+	if (retval)
+		goto failed_usb_register;
+	printk(KERN_INFO DRIVER_DESC "\n");
+	printk(KERN_INFO DRIVER_VERSION " " DRIVER_AUTHOR "\n");
+	return retval;
+failed_usb_register:
+	usb_serial_deregister(&siemens_usb_mpi_device);
+failed_usb_serial_register:
+	return retval;
+}
+
+static void __exit siemens_usb_mpi_exit(void)
+{
+	usb_deregister(&siemens_usb_mpi_driver);
+	usb_serial_deregister(&siemens_usb_mpi_device);
+}
+
+module_init(siemens_usb_mpi_init);
+module_exit(siemens_usb_mpi_exit);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index a65bc2b..5e7528c 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -709,21 +709,20 @@
 	unsigned char *data = urb->transfer_buffer;
 	unsigned long flags;
 	int i;
-	int result;
-	u8 status = 0;
+	int result = urb->status;
+	u8 status;
 	char tty_flag;
 
-	dev_dbg(&port->dev, "start, urb->status = %d, "
-		"urb->actual_length = %d\n,", urb->status, urb->actual_length);
+	dev_dbg(&port->dev, "start, result = %d, urb->actual_length = %d\n,",
+		result, urb->actual_length);
 
 	/* check the urb status */
-	if (urb->status) {
+	if (result) {
 		if (!port->port.count)
 			return;
-		if (urb->status == -EPROTO) {
+		if (result == -EPROTO) {
 			/* spcp8x5 mysteriously fails with -EPROTO */
 			/* reschedule the read */
-			urb->status = 0;
 			urb->dev = port->serial->dev;
 			result = usb_submit_urb(urb , GFP_ATOMIC);
 			if (result)
@@ -833,8 +832,9 @@
 	struct usb_serial_port *port = urb->context;
 	struct spcp8x5_private *priv = usb_get_serial_port_data(port);
 	int result;
+	int status = urb->status;
 
-	switch (urb->status) {
+	switch (status) {
 	case 0:
 		/* success */
 		break;
@@ -843,14 +843,14 @@
 	case -ESHUTDOWN:
 		/* this urb is terminated, clean up */
 		dev_dbg(&port->dev, "urb shutting down with status: %d\n",
-			urb->status);
+			status);
 		priv->write_urb_in_use = 0;
 		return;
 	default:
 		/* error in the urb, so we have to resubmit it */
 		dbg("%s - Overflow in write", __func__);
 		dbg("%s - nonzero write bulk status received: %d",
-			__func__, urb->status);
+			__func__, status);
 		port->write_urb->transfer_buffer_length = 1;
 		port->write_urb->dev = port->serial->dev;
 		result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 01d0c70..3cf41df 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -145,7 +145,7 @@
 static int ti_write_byte(struct ti_device *tdev, unsigned long addr,
 	__u8 mask, __u8 byte);
 
-static int ti_download_firmware(struct ti_device *tdev, int type);
+static int ti_download_firmware(struct ti_device *tdev);
 
 /* circular buffer */
 static struct circ_buf *ti_buf_alloc(void);
@@ -176,9 +176,14 @@
 /* the array dimension is the number of default entries plus */
 /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
 /* null entry */
-static struct usb_device_id ti_id_table_3410[1+TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_3410[7+TI_EXTRA_VID_PID_COUNT+1] = {
 	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
+	{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
+	{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_NO_FW_PRODUCT_ID) },
+	{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
+	{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
+	{ USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
 };
 
 static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = {
@@ -188,9 +193,14 @@
 	{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
 };
 
-static struct usb_device_id ti_id_table_combined[] = {
+static struct usb_device_id ti_id_table_combined[6+2*TI_EXTRA_VID_PID_COUNT+1] = {
 	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
+	{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
+	{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_NO_FW_PRODUCT_ID) },
+	{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
+	{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
+	{ USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
@@ -272,6 +282,9 @@
 
 MODULE_FIRMWARE("ti_3410.fw");
 MODULE_FIRMWARE("ti_5052.fw");
+MODULE_FIRMWARE("mts_cdma.fw");
+MODULE_FIRMWARE("mts_gsm.fw");
+MODULE_FIRMWARE("mts_edge.fw");
 
 module_param(debug, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes");
@@ -304,21 +317,28 @@
 
 static int __init ti_init(void)
 {
-	int i, j;
+	int i, j, c;
 	int ret;
 
 	/* insert extra vendor and product ids */
+	c = ARRAY_SIZE(ti_id_table_combined) - 2 * TI_EXTRA_VID_PID_COUNT - 1;
 	j = ARRAY_SIZE(ti_id_table_3410) - TI_EXTRA_VID_PID_COUNT - 1;
-	for (i = 0; i < min(vendor_3410_count, product_3410_count); i++, j++) {
+	for (i = 0; i < min(vendor_3410_count, product_3410_count); i++, j++, c++) {
 		ti_id_table_3410[j].idVendor = vendor_3410[i];
 		ti_id_table_3410[j].idProduct = product_3410[i];
 		ti_id_table_3410[j].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
+		ti_id_table_combined[c].idVendor = vendor_3410[i];
+		ti_id_table_combined[c].idProduct = product_3410[i];
+		ti_id_table_combined[c].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
 	}
 	j = ARRAY_SIZE(ti_id_table_5052) - TI_EXTRA_VID_PID_COUNT - 1;
-	for (i = 0; i < min(vendor_5052_count, product_5052_count); i++, j++) {
+	for (i = 0; i < min(vendor_5052_count, product_5052_count); i++, j++, c++) {
 		ti_id_table_5052[j].idVendor = vendor_5052[i];
 		ti_id_table_5052[j].idProduct = product_5052[i];
 		ti_id_table_5052[j].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
+		ti_id_table_combined[c].idVendor = vendor_5052[i];
+		ti_id_table_combined[c].idProduct = product_5052[i];
+		ti_id_table_combined[c].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
 	}
 
 	ret = usb_serial_register(&ti_1port_device);
@@ -390,11 +410,7 @@
 
 	/* if we have only 1 configuration, download firmware */
 	if (dev->descriptor.bNumConfigurations == 1) {
-		if (tdev->td_is_3410)
-			status = ti_download_firmware(tdev, 3410);
-		else
-			status = ti_download_firmware(tdev, 5052);
-		if (status)
+		if ((status = ti_download_firmware(tdev)) != 0)
 			goto free_tdev;
 
 		/* 3410 must be reset, 5052 resets itself */
@@ -1671,9 +1687,9 @@
 	return status;
 }
 
-static int ti_download_firmware(struct ti_device *tdev, int type)
+static int ti_download_firmware(struct ti_device *tdev)
 {
-	int status = -ENOMEM;
+	int status;
 	int buffer_size;
 	__u8 *buffer;
 	struct usb_device *dev = tdev->td_serial->dev;
@@ -1681,9 +1697,34 @@
 		tdev->td_serial->port[0]->bulk_out_endpointAddress);
 	const struct firmware *fw_p;
 	char buf[32];
-	sprintf(buf, "ti_usb-%d.bin", type);
 
-	if (request_firmware(&fw_p, buf, &dev->dev)) {
+	/* try ID specific firmware first, then try generic firmware */
+	sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
+	    dev->descriptor.idProduct);
+	if ((status = request_firmware(&fw_p, buf, &dev->dev)) != 0) {
+		buf[0] = '\0';
+		if (dev->descriptor.idVendor == MTS_VENDOR_ID) {
+			switch (dev->descriptor.idProduct) {
+			case MTS_CDMA_PRODUCT_ID:
+				strcpy(buf, "mts_cdma.fw");
+				break;
+			case MTS_GSM_PRODUCT_ID:
+				strcpy(buf, "mts_gsm.fw");
+				break;
+			case MTS_EDGE_PRODUCT_ID:
+				strcpy(buf, "mts_edge.fw");
+				break;
+			}
+		}
+		if (buf[0] == '\0') {
+			if (tdev->td_is_3410)
+				strcpy(buf, "ti_3410.fw");
+			else
+				strcpy(buf, "ti_5052.fw");
+		}
+		status = request_firmware(&fw_p, buf, &dev->dev);
+	}
+	if (status) {
 		dev_err(&dev->dev, "%s - firmware not found\n", __func__);
 		return -ENOENT;
 	}
@@ -1699,6 +1740,8 @@
 		memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size);
 		status = ti_do_download(dev, pipe, buffer, fw_p->size);
 		kfree(buffer);
+	} else {
+		status = -ENOMEM;
 	}
 	release_firmware(fw_p);
 	if (status) {
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index b5541bf..7e4752f 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -34,6 +34,14 @@
 #define TI_5052_EEPROM_PRODUCT_ID	0x505A	/* EEPROM, no firmware */
 #define TI_5052_FIRMWARE_PRODUCT_ID	0x505F	/* firmware is running */
 
+/* Multi-Tech vendor and product ids */
+#define MTS_VENDOR_ID			0x06E0
+#define MTS_GSM_NO_FW_PRODUCT_ID	0xF108
+#define MTS_CDMA_NO_FW_PRODUCT_ID	0xF109
+#define MTS_CDMA_PRODUCT_ID		0xF110
+#define MTS_GSM_PRODUCT_ID		0xF111
+#define MTS_EDGE_PRODUCT_ID		0xF112
+
 /* Commands */
 #define TI_GET_VERSION			0x01
 #define TI_GET_PORT_STATUS		0x02
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 080ade2..cfcfd5a 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -511,9 +511,6 @@
 
 	dbg("%s - port %d", __func__, port->number);
 
-	if (!port)
-		return;
-
 	tty = tty_port_tty_get(&port->port);
 	if (!tty)
 		return;
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index fc5d995..6c9cbb5 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -31,7 +31,7 @@
 	.no_dynamic_id = 	1,
 };
 
-int usb_debug_open(struct tty_struct *tty, struct usb_serial_port *port,
+static int usb_debug_open(struct tty_struct *tty, struct usb_serial_port *port,
 							struct file *filp)
 {
 	port->bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE;
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index c68b738..9df6887 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -61,13 +61,6 @@
 	  - CyQ've CQ8060A CDRW drive
 	  - Planex eXtreme Drive RX-25HU USB-IDE cable (not model RX-25U)
 
-config USB_STORAGE_DPCM
-	bool "Microtech/ZiO! CompactFlash/SmartMedia support"
-	depends on USB_STORAGE
-	help
-	  Say Y here to support the Microtech/ZiO! CompactFlash reader.
-	  There is a web page at <http://www.ziocorp.com/products/>.
-
 config USB_STORAGE_USBAT
 	bool "USBAT/USBAT02-based storage support"
 	depends on USB_STORAGE
@@ -90,12 +83,12 @@
 	  - Sandisk ImageMate SDDR-05b
 
 config USB_STORAGE_SDDR09
-	bool "SanDisk SDDR-09 (and other SmartMedia) support"
+	bool "SanDisk SDDR-09 (and other SmartMedia, including DPCM) support"
 	depends on USB_STORAGE
 	help
 	  Say Y here to include additional code to support the Sandisk SDDR-09
 	  SmartMedia reader in the USB Mass Storage driver.
-	  Also works for the Microtech Zio! SmartMedia reader.
+	  Also works for the Microtech Zio! CompactFlash/SmartMedia reader.
 
 config USB_STORAGE_SDDR55
 	bool "SanDisk SDDR-55 SmartMedia support"
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index 7f8beb5..b320693 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -14,7 +14,6 @@
 usb-storage-obj-$(CONFIG_USB_STORAGE_SDDR09)	+= sddr09.o
 usb-storage-obj-$(CONFIG_USB_STORAGE_SDDR55)	+= sddr55.o
 usb-storage-obj-$(CONFIG_USB_STORAGE_FREECOM)	+= freecom.o
-usb-storage-obj-$(CONFIG_USB_STORAGE_DPCM)	+= dpcm.o
 usb-storage-obj-$(CONFIG_USB_STORAGE_ISD200)	+= isd200.o
 usb-storage-obj-$(CONFIG_USB_STORAGE_DATAFAB)	+= datafab.o
 usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT)	+= jumpshot.o
@@ -24,7 +23,7 @@
 usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o
 
 usb-storage-objs :=	scsiglue.o protocol.o transport.o usb.o \
-			initializers.o sierra_ms.o $(usb-storage-obj-y)
+			initializers.o sierra_ms.o option_ms.o $(usb-storage-obj-y)
 
 ifneq ($(CONFIG_USB_LIBUSUAL),)
 	obj-$(CONFIG_USB)	+= libusual.o
diff --git a/drivers/usb/storage/dpcm.c b/drivers/usb/storage/dpcm.c
deleted file mode 100644
index 9399234..0000000
--- a/drivers/usb/storage/dpcm.c
+++ /dev/null
@@ -1,86 +0,0 @@
-/* Driver for Microtech DPCM-USB CompactFlash/SmartMedia reader
- *
- * DPCM driver v0.1:
- *
- * First release
- *
- * Current development and maintenance by:
- *   (c) 2000 Brian Webb (webbb@earthlink.net)
- *
- * This device contains both a CompactFlash card reader, which
- * uses the Control/Bulk w/o Interrupt protocol and
- * a SmartMedia card reader that uses the same protocol
- * as the SDDR09.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-
-#include "usb.h"
-#include "transport.h"
-#include "protocol.h"
-#include "debug.h"
-#include "dpcm.h"
-#include "sddr09.h"
-
-/*
- * Transport for the Microtech DPCM-USB
- *
- */
-int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us)
-{
-	int ret;
-
-	if (srb == NULL)
-		return USB_STOR_TRANSPORT_ERROR;
-
-	US_DEBUGP("dpcm_transport: LUN=%d\n", srb->device->lun);
-
-	switch (srb->device->lun) {
-		case 0:
-
-			/*
-			 * LUN 0 corresponds to the CompactFlash card reader.
-			 */
-			ret = usb_stor_CB_transport(srb, us);
-			break;
-
-#ifdef CONFIG_USB_STORAGE_SDDR09
-		case 1:
-
-			/*
-			 * LUN 1 corresponds to the SmartMedia card reader.
-			 */
-
-			/*
-			 * Set the LUN to 0 (just in case).
-			 */
-			srb->device->lun = 0; us->srb->device->lun = 0;
-			ret = sddr09_transport(srb, us);
-			srb->device->lun = 1; us->srb->device->lun = 1;
-			break;
-
-#endif
-
-		default:
-			US_DEBUGP("dpcm_transport: Invalid LUN %d\n", srb->device->lun);
-			ret = USB_STOR_TRANSPORT_ERROR;
-			break;
-	}
-	return ret;
-}
diff --git a/drivers/usb/storage/dpcm.h b/drivers/usb/storage/dpcm.h
deleted file mode 100644
index e7b7b0f..0000000
--- a/drivers/usb/storage/dpcm.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Driver for Microtech DPCM-USB CompactFlash/SmartMedia reader
- *
- * DPCM driver v0.1:
- *
- * First release
- *
- * Current development and maintenance by:
- *   (c) 2000 Brian Webb (webbb@earthlink.net)
- *
- * See dpcm.c for more explanation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _MICROTECH_DPCM_USB_H
-#define _MICROTECH_DPCM_USB_H
-
-extern int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us);
-
-#endif
diff --git a/drivers/usb/storage/libusual.c b/drivers/usb/storage/libusual.c
index d617e8a..f970b27 100644
--- a/drivers/usb/storage/libusual.c
+++ b/drivers/usb/storage/libusual.c
@@ -46,6 +46,12 @@
 { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin,bcdDeviceMax), \
   .driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
 
+#define COMPLIANT_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
+		    vendorName, productName, useProtocol, useTransport, \
+		    initFunction, flags) \
+{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
+  .driver_info = (flags) }
+
 #define USUAL_DEV(useProto, useTrans, useType) \
 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
   .driver_info = ((useType)<<24) }
@@ -57,6 +63,7 @@
 
 #undef USUAL_DEV
 #undef UNUSUAL_DEV
+#undef COMPLIANT_DEV
 
 MODULE_DEVICE_TABLE(usb, storage_usb_ids);
 EXPORT_SYMBOL_GPL(storage_usb_ids);
diff --git a/drivers/usb/storage/option_ms.c b/drivers/usb/storage/option_ms.c
new file mode 100644
index 0000000..353f922
--- /dev/null
+++ b/drivers/usb/storage/option_ms.c
@@ -0,0 +1,147 @@
+/*
+ * Driver for Option High Speed Mobile Devices.
+ *
+ *   (c) 2008 Dan Williams <dcbw@redhat.com>
+ *
+ * Inspiration taken from sierra_ms.c by Kevin Lloyd <klloyd@sierrawireless.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/usb.h>
+
+#include "usb.h"
+#include "transport.h"
+#include "option_ms.h"
+#include "debug.h"
+
+#define ZCD_FORCE_MODEM			0x01
+#define ZCD_ALLOW_MS 			0x02
+
+static unsigned int option_zero_cd = ZCD_FORCE_MODEM;
+module_param(option_zero_cd, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(option_zero_cd, "ZeroCD mode (1=Force Modem (default),"
+		 " 2=Allow CD-Rom");
+
+#define RESPONSE_LEN 1024
+
+static int option_rezero(struct us_data *us, int ep_in, int ep_out)
+{
+	const unsigned char rezero_msg[] = {
+	  0x55, 0x53, 0x42, 0x43, 0x78, 0x56, 0x34, 0x12,
+	  0x01, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x01,
+	  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+	};
+	char *buffer;
+	int result;
+
+	US_DEBUGP("Option MS: %s", "DEVICE MODE SWITCH\n");
+
+	buffer = kzalloc(RESPONSE_LEN, GFP_KERNEL);
+	if (buffer == NULL)
+		return USB_STOR_TRANSPORT_ERROR;
+
+	memcpy(buffer, rezero_msg, sizeof (rezero_msg));
+	result = usb_stor_bulk_transfer_buf(us,
+			usb_sndbulkpipe(us->pusb_dev, ep_out),
+			buffer, sizeof (rezero_msg), NULL);
+	if (result != USB_STOR_XFER_GOOD) {
+		result = USB_STOR_XFER_ERROR;
+		goto out;
+	}
+
+	/* Some of the devices need to be asked for a response, but we don't
+	 * care what that response is.
+	 */
+	result = usb_stor_bulk_transfer_buf(us,
+			usb_sndbulkpipe(us->pusb_dev, ep_out),
+			buffer, RESPONSE_LEN, NULL);
+	result = USB_STOR_XFER_GOOD;
+
+out:
+	kfree(buffer);
+	return result;
+}
+
+int option_ms_init(struct us_data *us)
+{
+	struct usb_device *udev;
+	struct usb_interface *intf;
+	struct usb_host_interface *iface_desc;
+	struct usb_endpoint_descriptor *endpoint = NULL;
+	u8 ep_in = 0, ep_out = 0;
+	int ep_in_size = 0, ep_out_size = 0;
+	int i, result;
+
+	udev = us->pusb_dev;
+	intf = us->pusb_intf;
+
+	/* Ensure it's really a ZeroCD device; devices that are already
+	 * in modem mode return 0xFF for class, subclass, and protocol.
+	 */
+	if (udev->descriptor.bDeviceClass != 0 ||
+	    udev->descriptor.bDeviceSubClass != 0 ||
+	    udev->descriptor.bDeviceProtocol != 0)
+		return USB_STOR_TRANSPORT_GOOD;
+
+	US_DEBUGP("Option MS: option_ms_init called\n");
+
+	/* Find the right mass storage interface */
+	iface_desc = intf->cur_altsetting;
+	if (iface_desc->desc.bInterfaceClass != 0x8 ||
+	    iface_desc->desc.bInterfaceSubClass != 0x6 ||
+	    iface_desc->desc.bInterfaceProtocol != 0x50) {
+		US_DEBUGP("Option MS: mass storage interface not found, no action "
+		          "required\n");
+		return USB_STOR_TRANSPORT_GOOD;
+	}
+
+	/* Find the mass storage bulk endpoints */
+	for (i = 0; i < iface_desc->desc.bNumEndpoints && (!ep_in_size || !ep_out_size); ++i) {
+		endpoint = &iface_desc->endpoint[i].desc;
+
+		if (usb_endpoint_is_bulk_in(endpoint)) {
+			ep_in = usb_endpoint_num(endpoint);
+			ep_in_size = le16_to_cpu(endpoint->wMaxPacketSize);
+		} else if (usb_endpoint_is_bulk_out(endpoint)) {
+			ep_out = usb_endpoint_num(endpoint);
+			ep_out_size = le16_to_cpu(endpoint->wMaxPacketSize);
+		}
+	}
+
+	/* Can't find the mass storage endpoints */
+	if (!ep_in_size || !ep_out_size) {
+		US_DEBUGP("Option MS: mass storage endpoints not found, no action "
+		          "required\n");
+		return USB_STOR_TRANSPORT_GOOD;
+	}
+
+	/* Force Modem mode */
+	if (option_zero_cd == ZCD_FORCE_MODEM) {
+		US_DEBUGP("Option MS: %s", "Forcing Modem Mode\n");
+		result = option_rezero(us, ep_in, ep_out);
+		if (result != USB_STOR_XFER_GOOD)
+			US_DEBUGP("Option MS: Failed to switch to modem mode.\n");
+		return -EIO;
+	} else if (option_zero_cd == ZCD_ALLOW_MS) {
+		/* Allow Mass Storage mode (keep CD-Rom) */
+		US_DEBUGP("Option MS: %s", "Allowing Mass Storage Mode if device"
+		          " requests it\n");
+	}
+
+	return USB_STOR_TRANSPORT_GOOD;
+}
+
diff --git a/drivers/usb/storage/option_ms.h b/drivers/usb/storage/option_ms.h
new file mode 100644
index 0000000..b6e448c
--- /dev/null
+++ b/drivers/usb/storage/option_ms.h
@@ -0,0 +1,4 @@
+#ifndef _OPTION_MS_H_
+#define _OPTION_MS_H_
+extern int option_ms_init(struct us_data *us);
+#endif
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 3b3357e..be441d8 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -56,9 +56,9 @@
  * Protocol routines
  ***********************************************************************/
 
-void usb_stor_qic157_command(struct scsi_cmnd *srb, struct us_data *us)
+void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
 {
-	/* Pad the ATAPI command with zeros 
+	/* Pad the SCSI command with zeros out to 12 bytes
 	 *
 	 * NOTE: This only works because a scsi_cmnd struct field contains
 	 * a unsigned char cmnd[16], so we know we have storage available
@@ -73,26 +73,6 @@
 	usb_stor_invoke_transport(srb, us);
 }
 
-void usb_stor_ATAPI_command(struct scsi_cmnd *srb, struct us_data *us)
-{
-	/* Pad the ATAPI command with zeros 
-	 *
-	 * NOTE: This only works because a scsi_cmnd struct field contains
-	 * a unsigned char cmnd[16], so we know we have storage available
-	 */
-
-	/* Pad the ATAPI command with zeros */
-	for (; srb->cmd_len<12; srb->cmd_len++)
-		srb->cmnd[srb->cmd_len] = 0;
-
-	/* set command length to 12 bytes */
-	srb->cmd_len = 12;
-
-	/* send the command to the transport layer */
-	usb_stor_invoke_transport(srb, us);
-}
-
-
 void usb_stor_ufi_command(struct scsi_cmnd *srb, struct us_data *us)
 {
 	/* fix some commands -- this is a form of mode translation
diff --git a/drivers/usb/storage/protocol.h b/drivers/usb/storage/protocol.h
index 487056f..ffc3e2a 100644
--- a/drivers/usb/storage/protocol.h
+++ b/drivers/usb/storage/protocol.h
@@ -40,8 +40,7 @@
 #define _PROTOCOL_H_
 
 /* Protocol handling routines */
-extern void usb_stor_ATAPI_command(struct scsi_cmnd*, struct us_data*);
-extern void usb_stor_qic157_command(struct scsi_cmnd*, struct us_data*);
+extern void usb_stor_pad12_command(struct scsi_cmnd*, struct us_data*);
 extern void usb_stor_ufi_command(struct scsi_cmnd*, struct us_data*);
 extern void usb_stor_transparent_scsi_command(struct scsi_cmnd*,
 		struct us_data*);
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 09779f6..2a42b86 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -59,6 +59,13 @@
 #include "transport.h"
 #include "protocol.h"
 
+/* Vendor IDs for companies that seem to include the READ CAPACITY bug
+ * in all their devices
+ */
+#define VENDOR_ID_NOKIA		0x0421
+#define VENDOR_ID_NIKON		0x04b0
+#define VENDOR_ID_MOTOROLA	0x22b8
+
 /***********************************************************************
  * Host functions 
  ***********************************************************************/
@@ -129,11 +136,35 @@
 					      max_sectors);
 	}
 
+	/* Some USB host controllers can't do DMA; they have to use PIO.
+	 * They indicate this by setting their dma_mask to NULL.  For
+	 * such controllers we need to make sure the block layer sets
+	 * up bounce buffers in addressable memory.
+	 */
+	if (!us->pusb_dev->bus->controller->dma_mask)
+		blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH);
+
 	/* We can't put these settings in slave_alloc() because that gets
 	 * called before the device type is known.  Consequently these
 	 * settings can't be overridden via the scsi devinfo mechanism. */
 	if (sdev->type == TYPE_DISK) {
 
+		/* Some vendors seem to put the READ CAPACITY bug into
+		 * all their devices -- primarily makers of cell phones
+		 * and digital cameras.  Since these devices always use
+		 * flash media and can be expected to have an even number
+		 * of sectors, we will always enable the CAPACITY_HEURISTICS
+		 * flag unless told otherwise. */
+		switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) {
+		case VENDOR_ID_NOKIA:
+		case VENDOR_ID_NIKON:
+		case VENDOR_ID_MOTOROLA:
+			if (!(us->fflags & (US_FL_FIX_CAPACITY |
+					US_FL_CAPACITY_OK)))
+				us->fflags |= US_FL_CAPACITY_HEURISTICS;
+			break;
+		}
+
 		/* Disk-type devices use MODE SENSE(6) if the protocol
 		 * (SubClass) is Transparent SCSI, otherwise they use
 		 * MODE SENSE(10). */
@@ -170,6 +201,10 @@
 		if (us->fflags & US_FL_CAPACITY_HEURISTICS)
 			sdev->guess_capacity = 1;
 
+		/* assume SPC3 or latter devices support sense size > 18 */
+		if (sdev->scsi_level > SCSI_SPC_2)
+			us->fflags |= US_FL_SANE_SENSE;
+
 		/* Some devices report a SCSI revision level above 2 but are
 		 * unable to handle the REPORT LUNS command (for which
 		 * support is mandatory at level 3).  Since we already have
@@ -196,6 +231,14 @@
 		 * sector in a larger then 1 sector read, since the performance
 		 * impact is negible we set this flag for all USB disks */
 		sdev->last_sector_bug = 1;
+
+		/* Enable last-sector hacks for single-target devices using
+		 * the Bulk-only transport, unless we already know the
+		 * capacity will be decremented or is correct. */
+		if (!(us->fflags & (US_FL_FIX_CAPACITY | US_FL_CAPACITY_OK |
+					US_FL_SCM_MULT_TARG)) &&
+				us->protocol == US_PR_BULK)
+			us->use_last_sector_hacks = 1;
 	} else {
 
 		/* Non-disk-type devices don't need to blacklist any pages
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index c5a54b8..531ae5c 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -45,6 +45,7 @@
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
 
 #include "usb.h"
 #include "transport.h"
@@ -1446,6 +1447,48 @@
 }
 
 /*
+ * Transport for the Microtech DPCM-USB
+ */
+int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us)
+{
+	int ret;
+
+	US_DEBUGP("dpcm_transport: LUN=%d\n", srb->device->lun);
+
+	switch (srb->device->lun) {
+	case 0:
+
+		/*
+		 * LUN 0 corresponds to the CompactFlash card reader.
+		 */
+		ret = usb_stor_CB_transport(srb, us);
+		break;
+
+	case 1:
+
+		/*
+		 * LUN 1 corresponds to the SmartMedia card reader.
+		 */
+
+		/*
+		 * Set the LUN to 0 (just in case).
+		 */
+		srb->device->lun = 0;
+		ret = sddr09_transport(srb, us);
+		srb->device->lun = 1;
+		break;
+
+	default:
+		US_DEBUGP("dpcm_transport: Invalid LUN %d\n",
+				srb->device->lun);
+		ret = USB_STOR_TRANSPORT_ERROR;
+		break;
+	}
+	return ret;
+}
+
+
+/*
  * Transport for the Sandisk SDDR-09
  */
 int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
diff --git a/drivers/usb/storage/sddr09.h b/drivers/usb/storage/sddr09.h
index e50033a..b701172 100644
--- a/drivers/usb/storage/sddr09.h
+++ b/drivers/usb/storage/sddr09.h
@@ -28,8 +28,11 @@
 /* Sandisk SDDR-09 stuff */
 
 extern int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us);
-
-extern int usb_stor_sddr09_dpcm_init(struct us_data *us);
 extern int usb_stor_sddr09_init(struct us_data *us);
 
+/* Microtech DPCM-USB stuff */
+
+extern int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us);
+extern int usb_stor_sddr09_dpcm_init(struct us_data *us);
+
 #endif
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 79108d5..1d5438e 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -57,6 +57,9 @@
 #include "scsiglue.h"
 #include "debug.h"
 
+#include <linux/blkdev.h>
+#include "../../scsi/sd.h"
+
 
 /***********************************************************************
  * Data transfer routines
@@ -511,6 +514,110 @@
  * Transport routines
  ***********************************************************************/
 
+/* There are so many devices that report the capacity incorrectly,
+ * this routine was written to counteract some of the resulting
+ * problems.
+ */
+static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
+{
+	struct gendisk *disk;
+	struct scsi_disk *sdkp;
+	u32 sector;
+
+	/* To Report "Medium Error: Record Not Found */
+	static unsigned char record_not_found[18] = {
+		[0]	= 0x70,			/* current error */
+		[2]	= MEDIUM_ERROR,		/* = 0x03 */
+		[7]	= 0x0a,			/* additional length */
+		[12]	= 0x14			/* Record Not Found */
+	};
+
+	/* If last-sector problems can't occur, whether because the
+	 * capacity was already decremented or because the device is
+	 * known to report the correct capacity, then we don't need
+	 * to do anything.
+	 */
+	if (!us->use_last_sector_hacks)
+		return;
+
+	/* Was this command a READ(10) or a WRITE(10)? */
+	if (srb->cmnd[0] != READ_10 && srb->cmnd[0] != WRITE_10)
+		goto done;
+
+	/* Did this command access the last sector? */
+	sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
+			(srb->cmnd[4] << 8) | (srb->cmnd[5]);
+	disk = srb->request->rq_disk;
+	if (!disk)
+		goto done;
+	sdkp = scsi_disk(disk);
+	if (!sdkp)
+		goto done;
+	if (sector + 1 != sdkp->capacity)
+		goto done;
+
+	if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
+
+		/* The command succeeded.  If the capacity is odd
+		 * (i.e., if the sector number is even) then the
+		 * "always-even" heuristic would be wrong for this
+		 * device.  Issue a WARN() so that the kerneloops.org
+		 * project will be notified and we will then know to
+		 * mark the device with a CAPACITY_OK flag.  Hopefully
+		 * this will occur for only a few devices.
+		 *
+		 * Use the sign of us->last_sector_hacks to tell whether
+		 * the warning has already been issued; we don't need
+		 * more than one warning per device.
+		 */
+		if (!(sector & 1) && us->use_last_sector_hacks > 0) {
+			unsigned vid = le16_to_cpu(
+					us->pusb_dev->descriptor.idVendor);
+			unsigned pid = le16_to_cpu(
+					us->pusb_dev->descriptor.idProduct);
+			unsigned rev = le16_to_cpu(
+					us->pusb_dev->descriptor.bcdDevice);
+
+			WARN(1, "%s: Successful last sector success at %u, "
+					"device %04x:%04x:%04x\n",
+					sdkp->disk->disk_name, sector,
+					vid, pid, rev);
+			us->use_last_sector_hacks = -1;
+		}
+
+	} else {
+		/* The command failed.  Allow up to 3 retries in case this
+		 * is some normal sort of failure.  After that, assume the
+		 * capacity is wrong and we're trying to access the sector
+		 * beyond the end.  Replace the result code and sense data
+		 * with values that will cause the SCSI core to fail the
+		 * command immediately, instead of going into an infinite
+		 * (or even just a very long) retry loop.
+		 */
+		if (++us->last_sector_retries < 3)
+			return;
+		srb->result = SAM_STAT_CHECK_CONDITION;
+		memcpy(srb->sense_buffer, record_not_found,
+				sizeof(record_not_found));
+
+		/* In theory we might want to issue a WARN() here if the
+		 * capacity is even, since it could indicate the device
+		 * has the READ CAPACITY bug _and_ the real capacity is
+		 * odd.  But it could also indicate that the device
+		 * simply can't access its last sector, a failure mode
+		 * which is surprisingly common.  So no warning.
+		 */
+	}
+
+ done:
+	/* Don't reset the retry counter for TEST UNIT READY commands,
+	 * because they get issued after device resets which might be
+	 * caused by a failed last-sector access.
+	 */
+	if (srb->cmnd[0] != TEST_UNIT_READY)
+		us->last_sector_retries = 0;
+}
+
 /* Invoke the transport and basic error-handling/recovery methods
  *
  * This is used by the protocol layers to actually send the message to
@@ -544,6 +651,7 @@
 	/* if the transport provided its own sense data, don't auto-sense */
 	if (result == USB_STOR_TRANSPORT_NO_SENSE) {
 		srb->result = SAM_STAT_CHECK_CONDITION;
+		last_sector_hacks(us, srb);
 		return;
 	}
 
@@ -579,6 +687,20 @@
 	}
 
 	/*
+	 * Determine if this device is SAT by seeing if the
+	 * command executed successfully.  Otherwise we'll have
+	 * to wait for at least one CHECK_CONDITION to determine
+	 * SANE_SENSE support
+	 */
+	if ((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
+	    result == USB_STOR_TRANSPORT_GOOD &&
+	    !(us->fflags & US_FL_SANE_SENSE) &&
+	    !(srb->cmnd[2] & 0x20)) {
+		US_DEBUGP("-- SAT supported, increasing auto-sense\n");
+		us->fflags |= US_FL_SANE_SENSE;
+	}
+
+	/*
 	 * A short transfer on a command where we don't expect it
 	 * is unusual, but it doesn't mean we need to auto-sense.
 	 */
@@ -595,10 +717,15 @@
 	if (need_auto_sense) {
 		int temp_result;
 		struct scsi_eh_save ses;
+		int sense_size = US_SENSE_SIZE;
+
+		/* device supports and needs bigger sense buffer */
+		if (us->fflags & US_FL_SANE_SENSE)
+			sense_size = ~0;
 
 		US_DEBUGP("Issuing auto-REQUEST_SENSE\n");
 
-		scsi_eh_prep_cmnd(srb, &ses, NULL, 0, US_SENSE_SIZE);
+		scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size);
 
 		/* FIXME: we must do the protocol translation here */
 		if (us->subclass == US_SC_RBC || us->subclass == US_SC_SCSI ||
@@ -632,6 +759,25 @@
 			return;
 		}
 
+		/* If the sense data returned is larger than 18-bytes then we
+		 * assume this device supports requesting more in the future.
+		 * The response code must be 70h through 73h inclusive.
+		 */
+		if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) &&
+		    !(us->fflags & US_FL_SANE_SENSE) &&
+		    (srb->sense_buffer[0] & 0x7C) == 0x70) {
+			US_DEBUGP("-- SANE_SENSE support enabled\n");
+			us->fflags |= US_FL_SANE_SENSE;
+
+			/* Indicate to the user that we truncated their sense
+			 * because we didn't know it supported larger sense.
+			 */
+			US_DEBUGP("-- Sense data truncated to %i from %i\n",
+			          US_SENSE_SIZE,
+			          srb->sense_buffer[7] + 8);
+			srb->sense_buffer[7] = (US_SENSE_SIZE - 8);
+		}
+
 		US_DEBUGP("-- Result from auto-sense is %d\n", temp_result);
 		US_DEBUGP("-- code: 0x%x, key: 0x%x, ASC: 0x%x, ASCQ: 0x%x\n",
 			  srb->sense_buffer[0],
@@ -667,6 +813,7 @@
 			scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
 		srb->result = (DID_ERROR << 16) | (SUGGEST_RETRY << 24);
 
+	last_sector_hacks(us, srb);
 	return;
 
 	/* Error and abort processing: try to resynchronize with the device
@@ -694,6 +841,7 @@
 		us->transport_reset(us);
 	}
 	clear_bit(US_FLIDX_RESETTING, &us->dflags);
+	last_sector_hacks(us, srb);
 }
 
 /* Stop the current URB transfer */
@@ -718,10 +866,10 @@
 }
 
 /*
- * Control/Bulk/Interrupt transport
+ * Control/Bulk and Control/Bulk/Interrupt transport
  */
 
-int usb_stor_CBI_transport(struct scsi_cmnd *srb, struct us_data *us)
+int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
 {
 	unsigned int transfer_length = scsi_bufflen(srb);
 	unsigned int pipe = 0;
@@ -763,6 +911,13 @@
 	}
 
 	/* STATUS STAGE */
+
+	/* NOTE: CB does not have a status stage.  Silly, I know.  So
+	 * we have to catch this at a higher level.
+	 */
+	if (us->protocol != US_PR_CBI)
+		return USB_STOR_TRANSPORT_GOOD;
+
 	result = usb_stor_intr_transfer(us, us->iobuf, 2);
 	US_DEBUGP("Got interrupt data (0x%x, 0x%x)\n", 
 			us->iobuf[0], us->iobuf[1]);
@@ -817,56 +972,6 @@
 }
 
 /*
- * Control/Bulk transport
- */
-int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
-{
-	unsigned int transfer_length = scsi_bufflen(srb);
-	int result;
-
-	/* COMMAND STAGE */
-	/* let's send the command via the control pipe */
-	result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
-				      US_CBI_ADSC, 
-				      USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 
-				      us->ifnum, srb->cmnd, srb->cmd_len);
-
-	/* check the return code for the command */
-	US_DEBUGP("Call to usb_stor_ctrl_transfer() returned %d\n", result);
-
-	/* if we stalled the command, it means command failed */
-	if (result == USB_STOR_XFER_STALLED) {
-		return USB_STOR_TRANSPORT_FAILED;
-	}
-
-	/* Uh oh... serious problem here */
-	if (result != USB_STOR_XFER_GOOD) {
-		return USB_STOR_TRANSPORT_ERROR;
-	}
-
-	/* DATA STAGE */
-	/* transfer the data payload for this command, if one exists*/
-	if (transfer_length) {
-		unsigned int pipe = srb->sc_data_direction == DMA_FROM_DEVICE ? 
-				us->recv_bulk_pipe : us->send_bulk_pipe;
-		result = usb_stor_bulk_srb(us, pipe, srb);
-		US_DEBUGP("CB data stage result is 0x%x\n", result);
-
-		/* if we stalled the data transfer it means command failed */
-		if (result == USB_STOR_XFER_STALLED)
-			return USB_STOR_TRANSPORT_FAILED;
-		if (result > USB_STOR_XFER_STALLED)
-			return USB_STOR_TRANSPORT_ERROR;
-	}
-
-	/* STATUS STAGE */
-	/* NOTE: CB does not have a status stage.  Silly, I know.  So
-	 * we have to catch this at a higher level.
-	 */
-	return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
  * Bulk only transport
  */
 
@@ -1173,10 +1278,9 @@
  */
 int usb_stor_port_reset(struct us_data *us)
 {
-	int result, rc_lock;
+	int result;
 
-	result = rc_lock =
-		usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf);
+	result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf);
 	if (result < 0)
 		US_DEBUGP("unable to lock device for reset: %d\n", result);
 	else {
@@ -1189,8 +1293,7 @@
 			US_DEBUGP("usb_reset_device returns %d\n",
 					result);
 		}
-		if (rc_lock)
-			usb_unlock_device(us->pusb_dev);
+		usb_unlock_device(us->pusb_dev);
 	}
 	return result;
 }
diff --git a/drivers/usb/storage/transport.h b/drivers/usb/storage/transport.h
index e70b881..242ff5e 100644
--- a/drivers/usb/storage/transport.h
+++ b/drivers/usb/storage/transport.h
@@ -113,8 +113,6 @@
 
 #define US_CBI_ADSC		0
 
-extern int usb_stor_CBI_transport(struct scsi_cmnd *, struct us_data*);
-
 extern int usb_stor_CB_transport(struct scsi_cmnd *, struct us_data*);
 extern int usb_stor_CB_reset(struct us_data*);
 
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index bfcc1fe..a7f9513 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -27,7 +27,8 @@
 
 /* IMPORTANT NOTE: This file must be included in another file which does
  * the following thing for it to work:
- * The macro UNUSUAL_DEV() must be defined before this file is included
+ * The UNUSUAL_DEV, COMPLIANT_DEV, and USUAL_DEV macros must be defined
+ * before this file is included.
  */
 
 /* If you edit this file, please try to keep it sorted first by VendorID,
@@ -46,6 +47,12 @@
  * <usb-storage@lists.one-eyed-alien.net>
  */
 
+/* Note: If you add an entry only in order to set the CAPACITY_OK flag,
+ * use the COMPLIANT_DEV macro instead of UNUSUAL_DEV.  This is
+ * because such entries mark devices which actually work correctly,
+ * as opposed to devices that do something strangely or wrongly.
+ */
+
 /* patch submitted by Vivian Bregier <Vivian.Bregier@imag.fr>
  */
 UNUSUAL_DEV(  0x03eb, 0x2002, 0x0100, 0x0100,
@@ -85,6 +92,13 @@
 		US_SC_8070, US_PR_USBAT, init_usbat_cd, 0),
 #endif
 
+/* Reported by Ben Efros <ben@pc-doctor.com> */
+UNUSUAL_DEV(  0x03f0, 0x070c, 0x0000, 0x0000,
+		"HP",
+		"Personal Media Drive",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_SANE_SENSE ),
+
 /* Reported by Grant Grundler <grundler@parisc-linux.org>
  * HP r707 camera in "Disk" mode with 2.00.23 or 2.00.24 firmware.
  */
@@ -160,34 +174,6 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_MAX_SECTORS_64 ),
 
-/* Reported by Filip Joelsson <filip@blueturtle.nu> */
-UNUSUAL_DEV(  0x0421, 0x005d, 0x0001, 0x0600,
-		"Nokia",
-		"Nokia 3110c",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY ),
-
-/* Reported by Ozan Sener <themgzzy@gmail.com> */
-UNUSUAL_DEV(  0x0421, 0x0060, 0x0551, 0x0551,
-		"Nokia",
-		"3500c",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY ),
-
-/* Reported by CSECSY Laszlo <boobaa@frugalware.org> */
-UNUSUAL_DEV(  0x0421, 0x0063, 0x0001, 0x0601,
-		"Nokia",
-		"Nokia 3109c",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY ),
-
-/* Patch for Nokia 5310 capacity */
-UNUSUAL_DEV(  0x0421, 0x006a, 0x0000, 0x0701,
-		"Nokia",
-		"5310",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY ),
-
 /* Reported by Mario Rettig <mariorettig@web.de> */
 UNUSUAL_DEV(  0x0421, 0x042e, 0x0100, 0x0100,
 		"Nokia",
@@ -253,35 +239,6 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_MAX_SECTORS_64 ),
 
-/* Reported by Cedric Godin <cedric@belbone.be> */
-UNUSUAL_DEV(  0x0421, 0x04b9, 0x0500, 0x0551,
-		"Nokia",
-		"5300",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY ),
-
-/* Reported by Richard Nauber <RichardNauber@web.de> */
-UNUSUAL_DEV(  0x0421, 0x04fa, 0x0550, 0x0660,
-		"Nokia",
-		"6300",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY ),
-
-/* Patch for Nokia 5310 capacity */
-UNUSUAL_DEV(  0x0421, 0x006a, 0x0000, 0x0591,
-	"Nokia",
-	"5310",
-	US_SC_DEVICE, US_PR_DEVICE, NULL,
-	US_FL_FIX_CAPACITY ),
-
-/* Submitted by Ricky Wong Yung Fei <evilbladewarrior@gmail.com> */
-/* Nokia 7610 Supernova - Too many sectors reported in usb storage mode */
-UNUSUAL_DEV(  0x0421, 0x00f5, 0x0000, 0x0470,
-	"Nokia",
-	"7610 Supernova",
-	US_SC_DEVICE, US_PR_DEVICE, NULL,
-	US_FL_FIX_CAPACITY ),
-
 /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
 UNUSUAL_DEV(  0x0424, 0x0fdc, 0x0210, 0x0210,
 		"SMSC",
@@ -289,11 +246,17 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_SINGLE_LUN ),
 
-#ifdef CONFIG_USB_STORAGE_DPCM
+#ifdef CONFIG_USB_STORAGE_SDDR09
 UNUSUAL_DEV(  0x0436, 0x0005, 0x0100, 0x0100,
 		"Microtech",
 		"CameraMate (DPCM_USB)",
  		US_SC_SCSI, US_PR_DPCM_USB, NULL, 0 ),
+#else
+UNUSUAL_DEV(  0x0436, 0x0005, 0x0100, 0x0100,
+		"Microtech",
+		"CameraMate",
+		US_SC_SCSI, US_PR_CB, NULL,
+		US_FL_SINGLE_LUN ),
 #endif
 
 /* Patch submitted by Daniel Drake <dsd@gentoo.org>
@@ -388,98 +351,6 @@
 		"DVD-CAM DZ-MV100A Camcorder",
 		US_SC_SCSI, US_PR_CB, NULL, US_FL_SINGLE_LUN),
 
-/* Patch for Nikon coolpix 2000
- * Submitted by Fabien Cosse <fabien.cosse@wanadoo.fr>*/
-UNUSUAL_DEV(  0x04b0, 0x0301, 0x0010, 0x0010,
-		"NIKON",
-		"NIKON DSC E2000",
-		US_SC_DEVICE, US_PR_DEVICE,NULL,
-		US_FL_NOT_LOCKABLE ),
-
-/* Reported by Stefan de Konink <skinkie@xs4all.nl> */
-UNUSUAL_DEV(  0x04b0, 0x0401, 0x0200, 0x0200,
-		"NIKON",
-		"NIKON DSC D100",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/* Reported by Tobias Kunze Briseno <t-linux@fictive.com> */
-UNUSUAL_DEV(  0x04b0, 0x0403, 0x0200, 0x0200,
-		"NIKON",
-		"NIKON DSC D2H",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/* Reported by Milinevsky Dmitry <niam.niam@gmail.com> */
-UNUSUAL_DEV(  0x04b0, 0x0409, 0x0100, 0x0100,
-		"NIKON",
-		"NIKON DSC D50",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/* Reported by Andreas Bockhold <andreas@bockionline.de> */
-UNUSUAL_DEV(  0x04b0, 0x0405, 0x0100, 0x0100,
-		"NIKON",
-		"NIKON DSC D70",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/* Reported by Jamie Kitson <jamie@staberinde.fsnet.co.uk> */
-UNUSUAL_DEV(  0x04b0, 0x040d, 0x0100, 0x0100,
-		"NIKON",
-		"NIKON DSC D70s",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/* Reported by Graber and Mike Pagano <mpagano-kernel@mpagano.com> */
-UNUSUAL_DEV(  0x04b0, 0x040f, 0x0100, 0x0200,
-		"NIKON",
-		"NIKON DSC D200",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/* Reported by Emil Larsson <emil@swip.net> */
-UNUSUAL_DEV(  0x04b0, 0x0411, 0x0100, 0x0111,
-		"NIKON",
-		"NIKON DSC D80",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/* Reported by Ortwin Glueck <odi@odi.ch> */
-UNUSUAL_DEV(  0x04b0, 0x0413, 0x0110, 0x0111,
-		"NIKON",
-		"NIKON DSC D40",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/* Reported by Paul Check <paul@openstreet.com> */
-UNUSUAL_DEV(  0x04b0, 0x0415, 0x0100, 0x0100,
-		"NIKON",
-		"NIKON DSC D2Xs",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/* Reported by Shan Destromp (shansan@gmail.com) */
-UNUSUAL_DEV(  0x04b0, 0x0417, 0x0100, 0x0100,
-		"NIKON",
-		"NIKON DSC D40X",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/* Reported by paul ready <lxtwin@homecall.co.uk> */
-UNUSUAL_DEV(  0x04b0, 0x0419, 0x0100, 0x0200,
-		"NIKON",
-		"NIKON DSC D300",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/* Reported by Doug Maxey (dwm@austin.ibm.com) */
-UNUSUAL_DEV(  0x04b3, 0x4001, 0x0110, 0x0110,
-		"IBM",
-		"IBM RSA2",
-		US_SC_DEVICE, US_PR_CB, NULL,
-		US_FL_MAX_SECTORS_MIN),
-
 /* BENQ DC5330
  * Reported by Manuel Fombuena <mfombuena@ya.com> and
  * Frank Copeland <fjc@thingy.apana.org.au> */
@@ -489,6 +360,21 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_IGNORE_RESIDUE ),
 
+/* Patch for Nikon coolpix 2000
+ * Submitted by Fabien Cosse <fabien.cosse@wanadoo.fr>*/
+UNUSUAL_DEV(  0x04b0, 0x0301, 0x0010, 0x0010,
+		"NIKON",
+		"NIKON DSC E2000",
+		US_SC_DEVICE, US_PR_DEVICE,NULL,
+		US_FL_NOT_LOCKABLE ),
+
+/* Reported by Doug Maxey (dwm@austin.ibm.com) */
+UNUSUAL_DEV(  0x04b3, 0x4001, 0x0110, 0x0110,
+		"IBM",
+		"IBM RSA2",
+		US_SC_DEVICE, US_PR_CB, NULL,
+		US_FL_MAX_SECTORS_MIN),
+
 #ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
 /* CY7C68300 : support atacb */
 UNUSUAL_DEV(  0x04b4, 0x6830, 0x0000, 0x9999,
@@ -594,6 +480,12 @@
 		"eUSB SmartMedia / CompactFlash Adapter",
 		US_SC_SCSI, US_PR_DPCM_USB, usb_stor_sddr09_dpcm_init,
 		0),
+#else
+UNUSUAL_DEV(  0x04e6, 0x0005, 0x0100, 0x0208,
+		"SCM Microsystems",
+		"eUSB CompactFlash Adapter",
+		US_SC_SCSI, US_PR_CB, NULL,
+		US_FL_SINGLE_LUN),
 #endif
 
 /* Reported by Markus Demleitner <msdemlei@cl.uni-heidelberg.de> */
@@ -685,6 +577,13 @@
 		US_SC_8070, US_PR_DEVICE, NULL,
 		US_FL_FIX_INQUIRY ),
 
+/* Added by Alan Stern <stern@rowland.harvard.edu> */
+COMPLIANT_DEV(0x0525, 0xa4a5, 0x0000, 0x9999,
+		"Linux",
+		"File-backed Storage Gadget",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_CAPACITY_OK ),
+
 /* Yakumo Mega Image 37
  * Submitted by Stephan Fuhrmann <atomenergie@t-online.de> */
 UNUSUAL_DEV(  0x052b, 0x1801, 0x0100, 0x0100,
@@ -807,15 +706,15 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_INQUIRY ),
 
-/* Submitted by Mike Alborn <malborn@deandra.homeip.net> */
-UNUSUAL_DEV(  0x054c, 0x016a, 0x0000, 0x9999,
+/* Submitted by Frank Engel <frankie@cse.unsw.edu.au> */
+UNUSUAL_DEV(  0x054c, 0x0099, 0x0000, 0x9999,
 		"Sony",
 		"PEG Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_INQUIRY ),
-		
-/* Submitted by Frank Engel <frankie@cse.unsw.edu.au> */
-UNUSUAL_DEV(  0x054c, 0x0099, 0x0000, 0x9999,
+
+/* Submitted by Mike Alborn <malborn@deandra.homeip.net> */
+UNUSUAL_DEV(  0x054c, 0x016a, 0x0000, 0x9999,
 		"Sony",
 		"PEG Mass Storage",
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -966,6 +865,18 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_CAPACITY ),
 
+/* Reported by Dan Williams <dcbw@redhat.com>
+ * Option N.V. mobile broadband modems
+ * Ignore driver CD mode and force into modem mode by default.
+ */
+
+/* Globetrotter HSDPA; mass storage shows up as Qualcomm for vendor */
+UNUSUAL_DEV(  0x05c6, 0x1000, 0x0000, 0x9999,
+		"Option N.V.",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, option_ms_init,
+		0),
+
 #ifdef CONFIG_USB_STORAGE_JUMPSHOT
 UNUSUAL_DEV(  0x05dc, 0x0001, 0x0000, 0x0001,
 		"Lexar",
@@ -1004,6 +915,13 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_GO_SLOW | US_FL_MAX_SECTORS_64 ),
 
+/* Reported by Ben Efros <ben@pc-doctor.com> */
+UNUSUAL_DEV(  0x05e3, 0x0723, 0x9451, 0x9451,
+		"Genesys Logic",
+		"USB to SATA",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_SANE_SENSE ),
+
 /* Reported by Hanno Boeck <hanno@gmx.de>
  * Taken from the Lycoris Kernel */
 UNUSUAL_DEV(  0x0636, 0x0003, 0x0000, 0x9999,
@@ -1040,7 +958,7 @@
 		US_FL_FIX_CAPACITY | US_FL_GO_SLOW ),
 
 /* Reported by Alex Butcher <alex.butcher@assursys.co.uk> */
-UNUSUAL_DEV( 0x067b, 0x3507, 0x0001, 0x0001,
+UNUSUAL_DEV( 0x067b, 0x3507, 0x0001, 0x0101,
 		"Prolific Technology Inc.",
 		"ATAPI-6 Bridge Controller",
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -1161,11 +1079,17 @@
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init,
 		US_FL_SCM_MULT_TARG ),
 
-#ifdef CONFIG_USB_STORAGE_DPCM
+#ifdef CONFIG_USB_STORAGE_SDDR09
 UNUSUAL_DEV(  0x07af, 0x0006, 0x0100, 0x0100,
 		"Microtech",
 		"CameraMate (DPCM_USB)",
  		US_SC_SCSI, US_PR_DPCM_USB, NULL, 0 ),
+#else
+UNUSUAL_DEV(  0x07af, 0x0006, 0x0100, 0x0100,
+		"Microtech",
+		"CameraMate",
+		US_SC_SCSI, US_PR_CB, NULL,
+		US_FL_SINGLE_LUN ),
 #endif
 
 #ifdef CONFIG_USB_STORAGE_ALAUDA
@@ -1320,6 +1244,13 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_CAPACITY),
 
+/* Reported and patched by Nguyen Anh Quynh <aquynh@gmail.com> */
+UNUSUAL_DEV( 0x0840, 0x0084, 0x0001, 0x0001,
+		"Argosy",
+		"Storage",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_FIX_CAPACITY),
+
 /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
  * Flag will support Bulk devices which use a standards-violating 32-byte
  * Command Block Wrapper. Here, the "DC2MEGA" cameras (several brands) with
@@ -1343,17 +1274,6 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_NOT_LOCKABLE),
 
-/* Andrew Lunn <andrew@lunn.ch>
- * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
- * on LUN 4.
- * Note: Vend:Prod clash with "Ltd Maxell WS30 Slim Digital Camera"
-*/
-UNUSUAL_DEV(  0x0851, 0x1543, 0x0200, 0x0200,
-		"PanDigital",
-		"Photo Frame",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_NOT_LOCKABLE),
-
 /* Submitted by Jan De Luyck <lkml@kcore.org> */
 UNUSUAL_DEV(  0x08bd, 0x1100, 0x0000, 0x0000,
 		"CITIZEN",
@@ -1425,6 +1345,13 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_INQUIRY ),
 
+/* Reported by Jaak Ristioja <Ristioja@gmail.com> */
+UNUSUAL_DEV( 0x0a17, 0x006e, 0x0100, 0x0100,
+		"Pentax",
+		"K10D",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_FIX_CAPACITY ),
+
 /* These are virtual windows driver CDs, which the zd1211rw driver
  * automatically converts into WLAN devices. */
 UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101,
@@ -1439,6 +1366,18 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_IGNORE_DEVICE ),
 
+/* Reported by Dan Williams <dcbw@redhat.com>
+ * Option N.V. mobile broadband modems
+ * Ignore driver CD mode and force into modem mode by default.
+ */
+
+/* iCON 225 */
+UNUSUAL_DEV(  0x0af0, 0x6971, 0x0000, 0x9999,
+		"Option N.V.",
+		"Mass Storage",
+		US_SC_DEVICE, US_PR_DEVICE, option_ms_init,
+		0),
+
 /* Reported by F. Aben <f.aben@option.com>
  * This device (wrongly) has a vendor-specific device descriptor.
  * The entry is needed so usb-storage can bind to it's mass-storage
@@ -1449,6 +1388,13 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		0 ),
 
+/* Reported by Ben Efros <ben@pc-doctor.com> */
+UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
+		"Seagate",
+		"FreeAgent Pro",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_SANE_SENSE ),
+
 #ifdef CONFIG_USB_STORAGE_ISD200
 UNUSUAL_DEV(  0x0bf6, 0xa001, 0x0100, 0x0110,
 		"ATI",
@@ -1472,6 +1418,22 @@
 		US_FL_SINGLE_LUN ),
 #endif
 
+UNUSUAL_DEV(  0x0d49, 0x7310, 0x0000, 0x9999,
+		"Maxtor",
+		"USB to SATA",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_SANE_SENSE),
+
+/*
+ * Pete Zaitcev <zaitcev@yahoo.com>, bz#164688.
+ * The device blatantly ignores LUN and returns 1 in GetMaxLUN.
+ */
+UNUSUAL_DEV( 0x0c45, 0x1060, 0x0100, 0x0100,
+		"Unknown",
+		"Unknown",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_SINGLE_LUN ),
+
 /* Submitted by: Nick Sillik <n.sillik@temple.edu>
  * Needed for OneTouch extension to usb-storage
  *
@@ -1489,16 +1451,6 @@
 			0),
 #endif
 
-/*
- * Pete Zaitcev <zaitcev@yahoo.com>, bz#164688.
- * The device blatantly ignores LUN and returns 1 in GetMaxLUN.
- */
-UNUSUAL_DEV( 0x0c45, 0x1060, 0x0100, 0x0100,
-		"Unknown",
-		"Unknown",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_SINGLE_LUN ),
-
 /* Submitted by Joris Struyve <joris@struyve.be> */
 UNUSUAL_DEV( 0x0d96, 0x410a, 0x0001, 0xffff,
 		"Medion",
@@ -1516,6 +1468,13 @@
 		"JD 5200 z3",
 		US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY),
 
+/* Reported by  Jason Johnston <killean@shaw.ca> */
+UNUSUAL_DEV(  0x0dc4, 0x0073, 0x0000, 0x0000,
+		"Macpower Technology Co.LTD.",
+		"USB 2.0 3.5\" DEVICE",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_FIX_CAPACITY),
+
 /* Reported by Lubomir Blaha <tritol@trilogic.cz>
  * I _REALLY_ don't know what 3rd, 4th number and all defines mean, but this
  * works for me. Can anybody correct these values? (I able to test corrected
@@ -1638,13 +1597,6 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ),
 
-/* Reported by Ricardo Barberis <ricardo@dattatec.com> */
-UNUSUAL_DEV(  0x0fce, 0xe092, 0x0000, 0x0000,
-		"Sony Ericsson",
-		"P1i",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_IGNORE_RESIDUE ),
-
 /* Reported by Emmanuel Vasilakis <evas@forthnet.gr> */
 UNUSUAL_DEV(  0x0fce, 0xe031, 0x0000, 0x0000,
 		"Sony Ericsson",
@@ -1652,6 +1604,13 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ),
 
+/* Reported by Ricardo Barberis <ricardo@dattatec.com> */
+UNUSUAL_DEV(  0x0fce, 0xe092, 0x0000, 0x0000,
+		"Sony Ericsson",
+		"P1i",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_IGNORE_RESIDUE ),
+
 /* Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu>
  * Tested on hardware version 1.10.
  * Entry is needed only for the initializer function override.
@@ -1664,6 +1623,12 @@
 		US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init,
 		0 ),
 
+UNUSUAL_DEV(  0x1058, 0x0704, 0x0000, 0x9999,
+		"Western Digital",
+		"External HDD",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_SANE_SENSE),
+
 /* Reported by Fabio Venturi <f.venturi@tdnet.it>
  * The device reports a vendor-specific bDeviceClass.
  */
@@ -2053,10 +2018,10 @@
  * JMicron responds to USN and several other SCSI ioctls with a
  * residue that causes subsequent I/O requests to fail.  */
 UNUSUAL_DEV(  0x152d, 0x2329, 0x0100, 0x0100,
-	        "JMicron",
-	        "USB to ATA/ATAPI Bridge",
-	        US_SC_DEVICE, US_PR_DEVICE, NULL,
-	        US_FL_IGNORE_RESIDUE ),
+		"JMicron",
+		"USB to ATA/ATAPI Bridge",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
 
 /* Reported by Robert Schedel <r.schedel@yahoo.de>
  * Note: this is a 'super top' device like the above 14cd/6600 device */
@@ -2086,27 +2051,6 @@
 		US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ),
 
 /*
- * Patch by Pete Zaitcev <zaitcev@redhat.com>
- * Report by Mark Patton. Red Hat bz#208928.
- * Added support for rev 0x0002 (Motorola ROKR W5)
- * by Javier Smaldone <javier@smaldone.com.ar>
- */
-UNUSUAL_DEV(  0x22b8, 0x4810, 0x0001, 0x0002,
-		"Motorola",
-		"RAZR V3i/ROKR W5",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/*
- * Patch by Jost Diederichs <jost@qdusa.com>
- */
-UNUSUAL_DEV(0x22b8, 0x6410, 0x0001, 0x9999,
-		"Motorola Inc.",
-		"Motorola Phone (RAZRV3xx)",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_FIX_CAPACITY),
-
-/*
  * Patch by Constantin Baranov <const@tltsu.ru>
  * Report by Andreas Koenecke.
  * Motorola ROKR Z6.
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 27016fd..4becf49 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -75,9 +75,6 @@
 #ifdef CONFIG_USB_STORAGE_SDDR55
 #include "sddr55.h"
 #endif
-#ifdef CONFIG_USB_STORAGE_DPCM
-#include "dpcm.h"
-#endif
 #ifdef CONFIG_USB_STORAGE_FREECOM
 #include "freecom.h"
 #endif
@@ -103,6 +100,7 @@
 #include "cypress_atacb.h"
 #endif
 #include "sierra_ms.h"
+#include "option_ms.h"
 
 /* Some informational data */
 MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
@@ -113,6 +111,10 @@
 module_param(delay_use, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
 
+static char quirks[128];
+module_param_string(quirks, quirks, sizeof(quirks), S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
+
 
 /*
  * The entries in this table correspond, line for line,
@@ -126,6 +128,8 @@
 { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin,bcdDeviceMax), \
   .driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
 
+#define COMPLIANT_DEV	UNUSUAL_DEV
+
 #define USUAL_DEV(useProto, useTrans, useType) \
 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
   .driver_info = (USB_US_TYPE_STOR<<24) }
@@ -134,6 +138,7 @@
 
 #	include "unusual_devs.h"
 #undef UNUSUAL_DEV
+#undef COMPLIANT_DEV
 #undef USUAL_DEV
 	/* Terminating entry */
 	{ }
@@ -164,6 +169,8 @@
 	.initFunction = init_function,	\
 }
 
+#define COMPLIANT_DEV	UNUSUAL_DEV
+
 #define USUAL_DEV(use_protocol, use_transport, use_type) \
 { \
 	.useProtocol = use_protocol,	\
@@ -173,6 +180,7 @@
 static struct us_unusual_dev us_unusual_dev_list[] = {
 #	include "unusual_devs.h" 
 #	undef UNUSUAL_DEV
+#	undef COMPLIANT_DEV
 #	undef USUAL_DEV
 
 	/* Terminating entry */
@@ -464,15 +472,85 @@
 		US_DEBUGP("I/O buffer allocation failed\n");
 		return -ENOMEM;
 	}
-
-	us->sensebuf = kmalloc(US_SENSE_SIZE, GFP_KERNEL);
-	if (!us->sensebuf) {
-		US_DEBUGP("Sense buffer allocation failed\n");
-		return -ENOMEM;
-	}
 	return 0;
 }
 
+/* Works only for digits and letters, but small and fast */
+#define TOLOWER(x) ((x) | 0x20)
+
+/* Adjust device flags based on the "quirks=" module parameter */
+static void adjust_quirks(struct us_data *us)
+{
+	char *p;
+	u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
+	u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
+	unsigned f = 0;
+	unsigned int mask = (US_FL_SANE_SENSE | US_FL_FIX_CAPACITY |
+			US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
+			US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
+			US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
+			US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT);
+
+	p = quirks;
+	while (*p) {
+		/* Each entry consists of VID:PID:flags */
+		if (vid == simple_strtoul(p, &p, 16) &&
+				*p == ':' &&
+				pid == simple_strtoul(p+1, &p, 16) &&
+				*p == ':')
+			break;
+
+		/* Move forward to the next entry */
+		while (*p) {
+			if (*p++ == ',')
+				break;
+		}
+	}
+	if (!*p)	/* No match */
+		return;
+
+	/* Collect the flags */
+	while (*++p && *p != ',') {
+		switch (TOLOWER(*p)) {
+		case 'a':
+			f |= US_FL_SANE_SENSE;
+			break;
+		case 'c':
+			f |= US_FL_FIX_CAPACITY;
+			break;
+		case 'h':
+			f |= US_FL_CAPACITY_HEURISTICS;
+			break;
+		case 'i':
+			f |= US_FL_IGNORE_DEVICE;
+			break;
+		case 'l':
+			f |= US_FL_NOT_LOCKABLE;
+			break;
+		case 'm':
+			f |= US_FL_MAX_SECTORS_64;
+			break;
+		case 'o':
+			f |= US_FL_CAPACITY_OK;
+			break;
+		case 'r':
+			f |= US_FL_IGNORE_RESIDUE;
+			break;
+		case 's':
+			f |= US_FL_SINGLE_LUN;
+			break;
+		case 'w':
+			f |= US_FL_NO_WP_DETECT;
+			break;
+		/* Ignore unrecognized flag characters */
+		}
+	}
+	us->fflags = (us->fflags & ~mask) | f;
+	dev_info(&us->pusb_intf->dev, "Quirks match for "
+			"vid %04x pid %04x: %x\n",
+			vid, pid, f);
+}
+
 /* Find an unusual_dev descriptor (always succeeds in the current code) */
 static struct us_unusual_dev *find_unusual(const struct usb_device_id *id)
 {
@@ -497,6 +575,7 @@
 			idesc->bInterfaceProtocol :
 			unusual_dev->useTransport;
 	us->fflags = USB_US_ORIG_FLAGS(id->driver_info);
+	adjust_quirks(us);
 
 	if (us->fflags & US_FL_IGNORE_DEVICE) {
 		printk(KERN_INFO USB_STORAGE "device ignored\n");
@@ -562,7 +641,7 @@
 
 	case US_PR_CBI:
 		us->transport_name = "Control/Bulk/Interrupt";
-		us->transport = usb_stor_CBI_transport;
+		us->transport = usb_stor_CB_transport;
 		us->transport_reset = usb_stor_CB_reset;
 		us->max_lun = 7;
 		break;
@@ -675,19 +754,19 @@
 
 	case US_SC_8020:
 		us->protocol_name = "8020i";
-		us->proto_handler = usb_stor_ATAPI_command;
+		us->proto_handler = usb_stor_pad12_command;
 		us->max_lun = 0;
 		break;
 
 	case US_SC_QIC:
 		us->protocol_name = "QIC-157";
-		us->proto_handler = usb_stor_qic157_command;
+		us->proto_handler = usb_stor_pad12_command;
 		us->max_lun = 0;
 		break;
 
 	case US_SC_8070:
 		us->protocol_name = "8070i";
-		us->proto_handler = usb_stor_ATAPI_command;
+		us->proto_handler = usb_stor_pad12_command;
 		us->max_lun = 0;
 		break;
 
@@ -840,8 +919,6 @@
 {
 	US_DEBUGP("-- %s\n", __func__);
 
-	kfree(us->sensebuf);
-
 	/* Free the device-related DMA-mapped buffers */
 	if (us->cr)
 		usb_buffer_free(us->pusb_dev, sizeof(*us->cr), us->cr,
@@ -1064,6 +1141,7 @@
 static int __init usb_stor_init(void)
 {
 	int retval;
+
 	printk(KERN_INFO "Initializing USB Mass Storage driver...\n");
 
 	/* register the driver, return usb_register return code if error */
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index a4ad73b..65e674e 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -138,7 +138,6 @@
 	struct usb_ctrlrequest	*cr;		 /* control requests	 */
 	struct usb_sg_request	current_sg;	 /* scatter-gather req.  */
 	unsigned char		*iobuf;		 /* I/O buffer		 */
-	unsigned char		*sensebuf;	 /* sense data buffer	 */
 	dma_addr_t		cr_dma;		 /* buffer DMA addresses */
 	dma_addr_t		iobuf_dma;
 	struct task_struct	*ctl_thread;	 /* the control thread   */
@@ -155,6 +154,10 @@
 #ifdef CONFIG_PM
 	pm_hook			suspend_resume_hook;
 #endif
+
+	/* hacks for READ CAPACITY bug handling */
+	int			use_last_sector_hacks;
+	int			last_sector_retries;
 };
 
 /* Convert between us_data and the corresponding Scsi_Host */
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
index 407a9fc..9fe4246 100644
--- a/drivers/usb/wusbcore/rh.c
+++ b/drivers/usb/wusbcore/rh.c
@@ -329,7 +329,7 @@
 static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx,
 				     u32 *_buf, u16 wLength)
 {
-	u16 *buf = (u16 *) _buf;
+	__le16 *buf = (__le16 *)_buf;
 
 	if (port_idx > wusbhc->ports_max)
 		return -EINVAL;
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 686795e..c7080d4 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -387,7 +387,7 @@
 		goto error_create;
 	}
 
-	/* setup the fops and upload the firmare */
+	/* setup the fops and upload the firmware */
 	i1480->pre_fw_name = "i1480-pre-phy-0.0.bin";
 	i1480->mac_fw_name = "i1480-usb-0.0.bin";
 	i1480->mac_fw_name_deprecate = "ptc-0.0.bin";
diff --git a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
index 5f1b2951..3421d33 100644
--- a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
+++ b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
@@ -221,7 +221,6 @@
 	struct net_device *net_dev;
 
 	spinlock_t lock;
-	struct net_device_stats stats;
 
 	/* RX context handling */
 	struct sk_buff *rx_skb;
@@ -271,7 +270,6 @@
 extern int i1480u_hard_start_xmit(struct sk_buff *, struct net_device *);
 extern void i1480u_tx_timeout(struct net_device *);
 extern int i1480u_set_config(struct net_device *, struct ifmap *);
-extern struct net_device_stats *i1480u_get_stats(struct net_device *);
 extern int i1480u_change_mtu(struct net_device *, int);
 extern void i1480u_uwb_notifs_cb(void *, struct uwb_dev *, enum uwb_notifs);
 
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c
index 049c05d..f272dfe 100644
--- a/drivers/uwb/i1480/i1480u-wlp/lc.c
+++ b/drivers/uwb/i1480/i1480u-wlp/lc.c
@@ -181,6 +181,15 @@
 }
 #endif
 
+static const struct net_device_ops i1480u_netdev_ops = {
+	.ndo_open	= i1480u_open,
+	.ndo_stop 	= i1480u_stop,
+	.ndo_start_xmit = i1480u_hard_start_xmit,
+	.ndo_tx_timeout = i1480u_tx_timeout,
+	.ndo_set_config = i1480u_set_config,
+	.ndo_change_mtu = i1480u_change_mtu,
+};
+
 static
 int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface)
 {
@@ -235,13 +244,7 @@
 	net_dev->features |= NETIF_F_HIGHDMA;
 	net_dev->watchdog_timeo = 5*HZ;		/* FIXME: a better default? */
 
-	net_dev->open = i1480u_open;
-	net_dev->stop = i1480u_stop;
-	net_dev->hard_start_xmit = i1480u_hard_start_xmit;
-	net_dev->tx_timeout = i1480u_tx_timeout;
-	net_dev->get_stats = i1480u_get_stats;
-	net_dev->set_config = i1480u_set_config;
-	net_dev->change_mtu = i1480u_change_mtu;
+	net_dev->netdev_ops = &i1480u_netdev_ops;
 
 #ifdef i1480u_FLOW_CONTROL
 	/* Notification endpoint setup (submitted when we open the device) */
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c
index e3873ff..7305553 100644
--- a/drivers/uwb/i1480/i1480u-wlp/netdev.c
+++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c
@@ -262,15 +262,6 @@
 	return 0;
 }
 
-
-/** Report statistics */
-struct net_device_stats *i1480u_get_stats(struct net_device *net_dev)
-{
-	struct i1480u *i1480u = netdev_priv(net_dev);
-	return &i1480u->stats;
-}
-
-
 /**
  *
  * Change the interface config--we probably don't have to do anything.
diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c
index 34f4cf9..25a2758 100644
--- a/drivers/uwb/i1480/i1480u-wlp/rx.c
+++ b/drivers/uwb/i1480/i1480u-wlp/rx.c
@@ -167,7 +167,7 @@
 do {							\
 	if (printk_ratelimit())				\
 		dev_err(&i1480u->usb_iface->dev, msg);	\
-	i1480u->stats.rx_dropped++;			\
+	i1480u->net_dev->stats.rx_dropped++;			\
 } while (0)
 
 
@@ -193,10 +193,8 @@
 	if (!should_parse)
 		goto out;
 	i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
-	i1480u->stats.rx_packets++;
-	i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size;
-	net_dev->last_rx = jiffies;
-	/* FIXME: flow control: check netif_rx() retval */
+	net_dev->stats.rx_packets++;
+	net_dev->stats.rx_bytes += i1480u->rx_untd_pkt_size;
 
 	netif_rx(i1480u->rx_skb);		/* deliver */
 out:
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c
index 39032cc..26bacc0 100644
--- a/drivers/uwb/i1480/i1480u-wlp/tx.c
+++ b/drivers/uwb/i1480/i1480u-wlp/tx.c
@@ -117,8 +117,8 @@
 	switch (urb->status) {
 	case 0:
 		spin_lock_irqsave(&i1480u->lock, flags);
-		i1480u->stats.tx_packets++;
-		i1480u->stats.tx_bytes += urb->actual_length;
+		net_dev->stats.tx_packets++;
+		net_dev->stats.tx_bytes += urb->actual_length;
 		spin_unlock_irqrestore(&i1480u->lock, flags);
 		break;
 	case -ECONNRESET:	/* Not an error, but a controlled situation; */
@@ -530,7 +530,7 @@
 	return NETDEV_TX_OK;
 error:
 	dev_kfree_skb_any(skb);
-	i1480u->stats.tx_dropped++;
+	net_dev->stats.tx_dropped++;
 out:
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index e39e33e..be2b657 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -28,7 +28,7 @@
 obj-$(CONFIG_FB_DEFERRED_IO)   += fb_defio.o
 
 # Hardware specific drivers go first
-obj-$(CONFIG_FB_AMIGA)            += amifb.o c2p.o
+obj-$(CONFIG_FB_AMIGA)            += amifb.o c2p_planar.o
 obj-$(CONFIG_FB_ARC)              += arcfb.o
 obj-$(CONFIG_FB_CLPS711X)         += clps711xfb.o
 obj-$(CONFIG_FB_CYBER2000)        += cyber2000fb.o
@@ -72,7 +72,7 @@
 obj-$(CONFIG_FB_LEO)              += leo.o sbuslib.o
 obj-$(CONFIG_FB_SGIVW)            += sgivwfb.o
 obj-$(CONFIG_FB_ACORN)            += acornfb.o
-obj-$(CONFIG_FB_ATARI)            += atafb.o c2p.o atafb_mfb.o \
+obj-$(CONFIG_FB_ATARI)            += atafb.o c2p_iplan2.o atafb_mfb.o \
                                      atafb_iplan2p2.o atafb_iplan2p4.o atafb_iplan2p8.o
 obj-$(CONFIG_FB_MAC)              += macfb.o
 obj-$(CONFIG_FB_HECUBA)           += hecubafb.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 2ac52fd..4e046fe 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -24,6 +24,7 @@
 #include <linux/amba/bus.h>
 #include <linux/amba/clcd.h>
 #include <linux/clk.h>
+#include <linux/hardirq.h>
 
 #include <asm/sizes.h>
 
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index b8e9a86..100f236 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -2159,9 +2159,9 @@
 			src += pitch;
 		}
 	} else {
-		c2p(info->screen_base, image->data, dx, dy, width, height,
-		    par->next_line, par->next_plane, image->width,
-		    info->var.bits_per_pixel);
+		c2p_planar(info->screen_base, image->data, dx, dy, width,
+			   height, par->next_line, par->next_plane,
+			   image->width, info->var.bits_per_pixel);
 	}
 }
 
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 77eb8b3..8058572 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -122,7 +122,6 @@
 	void *screen_base;
 	int yres_virtual;
 	u_long next_line;
-	u_long next_plane;
 #if defined ATAFB_TT || defined ATAFB_STE
 	union {
 		struct {
@@ -149,6 +148,7 @@
 			short mono;
 			short ste_mode;
 			short bpp;
+			u32 pseudo_palette[16];
 		} falcon;
 #endif
 		/* Nothing needed for external mode */
@@ -614,7 +614,7 @@
 	fix->xpanstep = 0;
 	fix->ypanstep = 1;
 	fix->ywrapstep = 0;
-	fix->line_length = 0;
+	fix->line_length = par->next_line;
 	fix->accel = FB_ACCEL_ATARIBLITT;
 	return 0;
 }
@@ -691,6 +691,7 @@
 		return -EINVAL;
 	par->yres_virtual = yres_virtual;
 	par->screen_base = screen_base + var->yoffset * linelen;
+	par->next_line = linelen;
 	return 0;
 }
 
@@ -884,10 +885,6 @@
 /* Default hsync timing [mon_type] in picoseconds */
 static long h_syncs[4] = { 3000000, 4875000, 4000000, 4875000 };
 
-#ifdef FBCON_HAS_CFB16
-static u16 fbcon_cfb16_cmap[16];
-#endif
-
 static inline int hxx_prescale(struct falcon_hw *hw)
 {
 	return hw->ste_mode ? 16
@@ -918,7 +915,7 @@
 		fix->visual = FB_VISUAL_TRUECOLOR;
 		fix->xpanstep = 2;
 	}
-	fix->line_length = 0;
+	fix->line_length = par->next_line;
 	fix->accel = FB_ACCEL_ATARIBLITT;
 	return 0;
 }
@@ -1394,14 +1391,7 @@
 	par->screen_base = screen_base + var->yoffset * linelen;
 	par->hw.falcon.xoffset = 0;
 
-	// FIXME!!! sort of works, no crash
-	//par->next_line = linelen;
-	//par->next_plane = yres_virtual * linelen;
 	par->next_line = linelen;
-	par->next_plane = 2;
-	// crashes
-	//par->next_plane = linelen;
-	//par->next_line  = yres_virtual * linelen;
 
 	return 0;
 }
@@ -1735,10 +1725,10 @@
 			(((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) |
 			(((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) |
 			((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12);
-#ifdef FBCON_HAS_CFB16
-		fbcon_cfb16_cmap[regno] = ((red & 0xf800) |
-					   ((green & 0xfc00) >> 5) |
-					   ((blue & 0xf800) >> 11));
+#ifdef ATAFB_FALCON
+		((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) |
+						       ((green & 0xfc00) >> 5) |
+						       ((blue & 0xf800) >> 11));
 #endif
 	}
 	return 0;
@@ -1852,7 +1842,7 @@
 		fix->ypanstep = 0;
 	}
 	fix->ywrapstep = 0;
-	fix->line_length = 0;
+	fix->line_length = par->next_line;
 	fix->accel = FB_ACCEL_ATARIBLITT;
 	return 0;
 }
@@ -1910,6 +1900,7 @@
 		return -EINVAL;
 	par->yres_virtual = yres_virtual;
 	par->screen_base = screen_base + var->yoffset * linelen;
+	par->next_line = linelen;
 	return 0;
 }
 
@@ -2169,7 +2160,7 @@
 	fix->xpanstep = 0;
 	fix->ypanstep = 0;
 	fix->ywrapstep = 0;
-	fix->line_length = 0;
+	fix->line_length = par->next_line;
 	return 0;
 }
 
@@ -2184,6 +2175,8 @@
 	    var->xoffset > 0 ||
 	    var->yoffset > 0)
 		return -EINVAL;
+
+	par->next_line = external_xres_virtual * external_depth / 8;
 	return 0;
 }
 
@@ -2443,42 +2436,6 @@
 	atafb_get_fix(&info->fix, info);
 
 	info->screen_base = (void *)info->fix.smem_start;
-
-	switch (info->fix.type) {
-	case FB_TYPE_INTERLEAVED_PLANES:
-		switch (info->var.bits_per_pixel) {
-		case 2:
-			// display->dispsw = &fbcon_iplan2p2;
-			break;
-		case 4:
-			// display->dispsw = &fbcon_iplan2p4;
-			break;
-		case 8:
-			// display->dispsw = &fbcon_iplan2p8;
-			break;
-		}
-		break;
-	case FB_TYPE_PACKED_PIXELS:
-		switch (info->var.bits_per_pixel) {
-#ifdef FBCON_HAS_MFB
-		case 1:
-			// display->dispsw = &fbcon_mfb;
-			break;
-#endif
-#ifdef FBCON_HAS_CFB8
-		case 8:
-			// display->dispsw = &fbcon_cfb8;
-			break;
-#endif
-#ifdef FBCON_HAS_CFB16
-		case 16:
-			// display->dispsw = &fbcon_cfb16;
-			// display->dispsw_data = fbcon_cfb16_cmap;
-			break;
-#endif
-		}
-		break;
-	}
 }
 
 static int atafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
@@ -2549,6 +2506,13 @@
 	if (!rect->width || !rect->height)
 		return;
 
+#ifdef ATAFB_FALCON
+	if (info->var.bits_per_pixel == 16) {
+		cfb_fillrect(info, rect);
+		return;
+	}
+#endif
+
 	/*
 	 * We could use hardware clipping but on many cards you get around
 	 * hardware clipping by writing to framebuffer directly.
@@ -2583,6 +2547,13 @@
 	u32 dx, dy, sx, sy, width, height;
 	int rev_copy = 0;
 
+#ifdef ATAFB_FALCON
+	if (info->var.bits_per_pixel == 16) {
+		cfb_copyarea(info, area);
+		return;
+	}
+#endif
+
 	/* clip the destination */
 	x2 = area->dx + area->width;
 	y2 = area->dy + area->height;
@@ -2632,6 +2603,13 @@
 	const char *src;
 	u32 dx, dy, width, height, pitch;
 
+#ifdef ATAFB_FALCON
+	if (info->var.bits_per_pixel == 16) {
+		cfb_imageblit(info, image);
+		return;
+	}
+#endif
+
 	/*
 	 * We could use hardware clipping but on many cards you get around
 	 * hardware clipping by writing to framebuffer directly like we are
@@ -2676,10 +2654,9 @@
 			src += pitch;
 		}
 	} else {
-		// only used for logo; broken
-		c2p(info->screen_base, image->data, dx, dy, width, height,
-		    par->next_line, par->next_plane, image->width,
-		    info->var.bits_per_pixel);
+		c2p_iplan2(info->screen_base, image->data, dx, dy, width,
+			   height, par->next_line, image->width,
+			   info->var.bits_per_pixel);
 	}
 }
 
@@ -3098,8 +3075,7 @@
 
 int __init atafb_init(void)
 {
-	int pad;
-	int detected_mode;
+	int pad, detected_mode, error;
 	unsigned int defmode = 0;
 	unsigned long mem_req;
 
@@ -3139,8 +3115,12 @@
 			printk("atafb_init: initializing Falcon hw\n");
 			fbhw = &falcon_switch;
 			atafb_ops.fb_setcolreg = &falcon_setcolreg;
-			request_irq(IRQ_AUTO_4, falcon_vbl_switcher, IRQ_TYPE_PRIO,
-			            "framebuffer/modeswitch", falcon_vbl_switcher);
+			error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher,
+					    IRQ_TYPE_PRIO,
+					    "framebuffer/modeswitch",
+					    falcon_vbl_switcher);
+			if (error)
+				return error;
 			defmode = DEFMODE_F30;
 			break;
 		}
@@ -3225,6 +3205,10 @@
 	// tries to read from HW which may not be initialized yet
 	// so set sane var first, then call atafb_set_par
 	atafb_get_var(&fb_info.var, &fb_info);
+
+#ifdef ATAFB_FALCON
+	fb_info.pseudo_palette = current_par.hw.falcon.pseudo_palette;
+#endif
 	fb_info.flags = FBINFO_FLAG_DEFAULT;
 
 	if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, atafb_modedb,
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 4a4dd9a..72facb9 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -52,11 +52,11 @@
 	  then say y to include a power driver for it.
 
 config LCD_TDO24M
-	tristate "Toppoly TDO24M LCD Panels support"
+	tristate "Toppoly TDO24M  and TDO35S LCD Panels support"
 	depends on LCD_CLASS_DEVICE && SPI_MASTER
 	default n
 	help
-	  If you have a Toppoly TDO24M series LCD panel, say y here to
+	  If you have a Toppoly TDO24M/TDO35S series LCD panel, say y here to
 	  include the support for it.
 
 config LCD_VGG2432A4
@@ -123,17 +123,14 @@
 	  To compile this driver as a module, choose M here: the module will be
 	  called atmel-pwm-bl.
 
-config BACKLIGHT_CORGI
-	tristate "Generic (aka Sharp Corgi) Backlight Driver (DEPRECATED)"
+config BACKLIGHT_GENERIC
+	tristate "Generic (aka Sharp Corgi) Backlight Driver"
 	depends on BACKLIGHT_CLASS_DEVICE
-	default n
+	default y
 	help
 	  Say y to enable the generic platform backlight driver previously
 	  known as the Corgi backlight driver. If you have a Sharp Zaurus
-	  SL-C7xx, SL-Cxx00 or SL-6000x say y. Most users can say n.
-
-	  Note: this driver is marked as deprecated, try enable SPI and
-	  use the new corgi_lcd driver with integrated backlight control
+	  SL-C7xx, SL-Cxx00 or SL-6000x say y.
 
 config BACKLIGHT_LOCOMO
 	tristate "Sharp LOCOMO LCD/Backlight Driver"
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 103427d..363b3cb 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -11,7 +11,7 @@
 
 obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
 obj-$(CONFIG_BACKLIGHT_ATMEL_PWM)    += atmel-pwm-bl.o
-obj-$(CONFIG_BACKLIGHT_CORGI)	+= corgi_bl.o
+obj-$(CONFIG_BACKLIGHT_GENERIC)	+= generic_bl.o
 obj-$(CONFIG_BACKLIGHT_HP680)	+= hp680_bl.o
 obj-$(CONFIG_BACKLIGHT_LOCOMO)	+= locomolcd.o
 obj-$(CONFIG_BACKLIGHT_OMAP1)	+= omap1_bl.o
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 0664fc0..157057c 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -40,6 +40,10 @@
 		if (!bd->ops->check_fb ||
 		    bd->ops->check_fb(evdata->info)) {
 			bd->props.fb_blank = *(int *)evdata->data;
+			if (bd->props.fb_blank == FB_BLANK_UNBLANK)
+				bd->props.state &= ~BL_CORE_FBBLANK;
+			else
+				bd->props.state |= BL_CORE_FBBLANK;
 			backlight_update_status(bd);
 		}
 	mutex_unlock(&bd->ops_lock);
@@ -80,20 +84,18 @@
 static ssize_t backlight_store_power(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
-	int rc = -ENXIO;
-	char *endp;
+	int rc;
 	struct backlight_device *bd = to_backlight_device(dev);
-	int power = simple_strtoul(buf, &endp, 0);
-	size_t size = endp - buf;
+	unsigned long power;
 
-	if (*endp && isspace(*endp))
-		size++;
-	if (size != count)
-		return -EINVAL;
+	rc = strict_strtoul(buf, 0, &power);
+	if (rc)
+		return rc;
 
+	rc = -ENXIO;
 	mutex_lock(&bd->ops_lock);
 	if (bd->ops) {
-		pr_debug("backlight: set power to %d\n", power);
+		pr_debug("backlight: set power to %lu\n", power);
 		if (bd->props.power != power) {
 			bd->props.power = power;
 			backlight_update_status(bd);
@@ -116,28 +118,25 @@
 static ssize_t backlight_store_brightness(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
-	int rc = -ENXIO;
-	char *endp;
+	int rc;
 	struct backlight_device *bd = to_backlight_device(dev);
-	int brightness = simple_strtoul(buf, &endp, 0);
-	size_t size = endp - buf;
+	unsigned long brightness;
 
-	if (*endp && isspace(*endp))
-		size++;
-	if (size != count)
-		return -EINVAL;
+	rc = strict_strtoul(buf, 0, &brightness);
+	if (rc)
+		return rc;
+
+	rc = -ENXIO;
 
 	mutex_lock(&bd->ops_lock);
 	if (bd->ops) {
 		if (brightness > bd->props.max_brightness)
 			rc = -EINVAL;
 		else {
-			pr_debug("backlight: set brightness to %d\n",
+			pr_debug("backlight: set brightness to %lu\n",
 				 brightness);
-			if (bd->props.brightness != brightness) {
-				bd->props.brightness = brightness;
-				backlight_update_status(bd);
-			}
+			bd->props.brightness = brightness;
+			backlight_update_status(bd);
 			rc = count;
 		}
 	}
@@ -170,6 +169,34 @@
 
 static struct class *backlight_class;
 
+static int backlight_suspend(struct device *dev, pm_message_t state)
+{
+	struct backlight_device *bd = to_backlight_device(dev);
+
+	if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
+		mutex_lock(&bd->ops_lock);
+		bd->props.state |= BL_CORE_SUSPENDED;
+		backlight_update_status(bd);
+		mutex_unlock(&bd->ops_lock);
+	}
+
+	return 0;
+}
+
+static int backlight_resume(struct device *dev)
+{
+	struct backlight_device *bd = to_backlight_device(dev);
+
+	if (bd->ops->options & BL_CORE_SUSPENDRESUME) {
+		mutex_lock(&bd->ops_lock);
+		bd->props.state &= ~BL_CORE_SUSPENDED;
+		backlight_update_status(bd);
+		mutex_unlock(&bd->ops_lock);
+	}
+
+	return 0;
+}
+
 static void bl_device_release(struct device *dev)
 {
 	struct backlight_device *bd = to_backlight_device(dev);
@@ -286,6 +313,8 @@
 	}
 
 	backlight_class->dev_attrs = bl_device_attributes;
+	backlight_class->suspend = backlight_suspend;
+	backlight_class->resume = backlight_resume;
 	return 0;
 }
 
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
deleted file mode 100644
index 4d4d037..0000000
--- a/drivers/video/backlight/corgi_bl.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- *  Backlight Driver for Sharp Zaurus Handhelds (various models)
- *
- *  Copyright (c) 2004-2006 Richard Purdie
- *
- *  Based on Sharp's 2.4 Backlight Driver
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-
-static int corgibl_intensity;
-static struct backlight_properties corgibl_data;
-static struct backlight_device *corgi_backlight_device;
-static struct generic_bl_info *bl_machinfo;
-
-static unsigned long corgibl_flags;
-#define CORGIBL_SUSPENDED     0x01
-#define CORGIBL_BATTLOW       0x02
-
-static int corgibl_send_intensity(struct backlight_device *bd)
-{
-	int intensity = bd->props.brightness;
-
-	if (bd->props.power != FB_BLANK_UNBLANK)
-		intensity = 0;
-	if (bd->props.fb_blank != FB_BLANK_UNBLANK)
-		intensity = 0;
-	if (corgibl_flags & CORGIBL_SUSPENDED)
-		intensity = 0;
-	if (corgibl_flags & CORGIBL_BATTLOW)
-		intensity &= bl_machinfo->limit_mask;
-
-	bl_machinfo->set_bl_intensity(intensity);
-
-	corgibl_intensity = intensity;
-
-	if (bl_machinfo->kick_battery)
-		bl_machinfo->kick_battery();
-
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int corgibl_suspend(struct platform_device *pdev, pm_message_t state)
-{
-	struct backlight_device *bd = platform_get_drvdata(pdev);
-
-	corgibl_flags |= CORGIBL_SUSPENDED;
-	backlight_update_status(bd);
-	return 0;
-}
-
-static int corgibl_resume(struct platform_device *pdev)
-{
-	struct backlight_device *bd = platform_get_drvdata(pdev);
-
-	corgibl_flags &= ~CORGIBL_SUSPENDED;
-	backlight_update_status(bd);
-	return 0;
-}
-#else
-#define corgibl_suspend	NULL
-#define corgibl_resume	NULL
-#endif
-
-static int corgibl_get_intensity(struct backlight_device *bd)
-{
-	return corgibl_intensity;
-}
-
-/*
- * Called when the battery is low to limit the backlight intensity.
- * If limit==0 clear any limit, otherwise limit the intensity
- */
-void corgibl_limit_intensity(int limit)
-{
-	if (limit)
-		corgibl_flags |= CORGIBL_BATTLOW;
-	else
-		corgibl_flags &= ~CORGIBL_BATTLOW;
-	backlight_update_status(corgi_backlight_device);
-}
-EXPORT_SYMBOL(corgibl_limit_intensity);
-
-
-static struct backlight_ops corgibl_ops = {
-	.get_brightness = corgibl_get_intensity,
-	.update_status  = corgibl_send_intensity,
-};
-
-static int corgibl_probe(struct platform_device *pdev)
-{
-	struct generic_bl_info *machinfo = pdev->dev.platform_data;
-	const char *name = "generic-bl";
-
-	bl_machinfo = machinfo;
-	if (!machinfo->limit_mask)
-		machinfo->limit_mask = -1;
-
-	if (machinfo->name)
-		name = machinfo->name;
-
-	corgi_backlight_device = backlight_device_register (name,
-		&pdev->dev, NULL, &corgibl_ops);
-	if (IS_ERR (corgi_backlight_device))
-		return PTR_ERR (corgi_backlight_device);
-
-	platform_set_drvdata(pdev, corgi_backlight_device);
-
-	corgi_backlight_device->props.max_brightness = machinfo->max_intensity;
-	corgi_backlight_device->props.power = FB_BLANK_UNBLANK;
-	corgi_backlight_device->props.brightness = machinfo->default_intensity;
-	backlight_update_status(corgi_backlight_device);
-
-	printk("Corgi Backlight Driver Initialized.\n");
-	return 0;
-}
-
-static int corgibl_remove(struct platform_device *pdev)
-{
-	struct backlight_device *bd = platform_get_drvdata(pdev);
-
-	corgibl_data.power = 0;
-	corgibl_data.brightness = 0;
-	backlight_update_status(bd);
-
-	backlight_device_unregister(bd);
-
-	printk("Corgi Backlight Driver Unloaded\n");
-	return 0;
-}
-
-static struct platform_driver corgibl_driver = {
-	.probe		= corgibl_probe,
-	.remove		= corgibl_remove,
-	.suspend	= corgibl_suspend,
-	.resume		= corgibl_resume,
-	.driver		= {
-		.name	= "generic-bl",
-	},
-};
-
-static int __init corgibl_init(void)
-{
-	return platform_driver_register(&corgibl_driver);
-}
-
-static void __exit corgibl_exit(void)
-{
-	platform_driver_unregister(&corgibl_driver);
-}
-
-module_init(corgibl_init);
-module_exit(corgibl_exit);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Corgi Backlight Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 26add88..b9fe62b 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -259,22 +259,18 @@
 {
 	int ret = platform_driver_register(&cr_backlight_driver);
 
-	if (!ret) {
-		crp = platform_device_alloc("cr_backlight", -1);
-		if (!crp)
-			return -ENOMEM;
+	if (ret)
+		return ret;
 
-		ret = platform_device_add(crp);
-
-		if (ret) {
-			platform_device_put(crp);
-			platform_driver_unregister(&cr_backlight_driver);
-		}
+	crp = platform_device_register_simple("cr_backlight", -1, NULL, 0);
+	if (IS_ERR(crp)) {
+		platform_driver_unregister(&cr_backlight_driver);
+		return PTR_ERR(crp);
 	}
 
 	printk("Carillo Ranch Backlight Driver Initialized.\n");
 
-	return ret;
+	return 0;
 }
 
 static void __exit cr_backlight_exit(void)
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
new file mode 100644
index 0000000..6d27f62
--- /dev/null
+++ b/drivers/video/backlight/generic_bl.c
@@ -0,0 +1,147 @@
+/*
+ *  Generic Backlight Driver
+ *
+ *  Copyright (c) 2004-2008 Richard Purdie
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+
+static int genericbl_intensity;
+static struct backlight_device *generic_backlight_device;
+static struct generic_bl_info *bl_machinfo;
+
+/* Flag to signal when the battery is low */
+#define GENERICBL_BATTLOW       BL_CORE_DRIVER1
+
+static int genericbl_send_intensity(struct backlight_device *bd)
+{
+	int intensity = bd->props.brightness;
+
+	if (bd->props.power != FB_BLANK_UNBLANK)
+		intensity = 0;
+	if (bd->props.state & BL_CORE_FBBLANK)
+		intensity = 0;
+	if (bd->props.state & BL_CORE_SUSPENDED)
+		intensity = 0;
+	if (bd->props.state & GENERICBL_BATTLOW)
+		intensity &= bl_machinfo->limit_mask;
+
+	bl_machinfo->set_bl_intensity(intensity);
+
+	genericbl_intensity = intensity;
+
+	if (bl_machinfo->kick_battery)
+		bl_machinfo->kick_battery();
+
+	return 0;
+}
+
+static int genericbl_get_intensity(struct backlight_device *bd)
+{
+	return genericbl_intensity;
+}
+
+/*
+ * Called when the battery is low to limit the backlight intensity.
+ * If limit==0 clear any limit, otherwise limit the intensity
+ */
+void corgibl_limit_intensity(int limit)
+{
+	struct backlight_device *bd = generic_backlight_device;
+
+	mutex_lock(&bd->ops_lock);
+	if (limit)
+		bd->props.state |= GENERICBL_BATTLOW;
+	else
+		bd->props.state &= ~GENERICBL_BATTLOW;
+	backlight_update_status(generic_backlight_device);
+	mutex_unlock(&bd->ops_lock);
+}
+EXPORT_SYMBOL(corgibl_limit_intensity);
+
+static struct backlight_ops genericbl_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.get_brightness = genericbl_get_intensity,
+	.update_status  = genericbl_send_intensity,
+};
+
+static int genericbl_probe(struct platform_device *pdev)
+{
+	struct generic_bl_info *machinfo = pdev->dev.platform_data;
+	const char *name = "generic-bl";
+	struct backlight_device *bd;
+
+	bl_machinfo = machinfo;
+	if (!machinfo->limit_mask)
+		machinfo->limit_mask = -1;
+
+	if (machinfo->name)
+		name = machinfo->name;
+
+	bd = backlight_device_register (name,
+		&pdev->dev, NULL, &genericbl_ops);
+	if (IS_ERR (bd))
+		return PTR_ERR (bd);
+
+	platform_set_drvdata(pdev, bd);
+
+	bd->props.max_brightness = machinfo->max_intensity;
+	bd->props.power = FB_BLANK_UNBLANK;
+	bd->props.brightness = machinfo->default_intensity;
+	backlight_update_status(bd);
+
+	generic_backlight_device = bd;
+
+	printk("Generic Backlight Driver Initialized.\n");
+	return 0;
+}
+
+static int genericbl_remove(struct platform_device *pdev)
+{
+	struct backlight_device *bd = platform_get_drvdata(pdev);
+
+	bd->props.power = 0;
+	bd->props.brightness = 0;
+	backlight_update_status(bd);
+
+	backlight_device_unregister(bd);
+
+	printk("Generic Backlight Driver Unloaded\n");
+	return 0;
+}
+
+static struct platform_driver genericbl_driver = {
+	.probe		= genericbl_probe,
+	.remove		= genericbl_remove,
+	.driver		= {
+		.name	= "generic-bl",
+	},
+};
+
+static int __init genericbl_init(void)
+{
+	return platform_driver_register(&genericbl_driver);
+}
+
+static void __exit genericbl_exit(void)
+{
+	platform_driver_unregister(&genericbl_driver);
+}
+
+module_init(genericbl_init);
+module_exit(genericbl_exit);
+
+MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
+MODULE_DESCRIPTION("Generic Backlight Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index d4cfed0..5be55a2 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -151,19 +151,15 @@
 	int ret;
 
 	ret = platform_driver_register(&hp680bl_driver);
-	if (!ret) {
-		hp680bl_device = platform_device_alloc("hp680-bl", -1);
-		if (!hp680bl_device)
-			return -ENOMEM;
-
-		ret = platform_device_add(hp680bl_device);
-
-		if (ret) {
-			platform_device_put(hp680bl_device);
-			platform_driver_unregister(&hp680bl_driver);
-		}
+	if (ret)
+		return ret;
+	hp680bl_device = platform_device_register_simple("hp680-bl", -1,
+							NULL, 0);
+	if (IS_ERR(hp680bl_device)) {
+		platform_driver_unregister(&hp680bl_driver);
+		return PTR_ERR(hp680bl_device);
 	}
-	return ret;
+	return 0;
 }
 
 static void __exit hp680bl_exit(void)
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 06964af..65864c5 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -70,6 +70,7 @@
 }
 
 static struct backlight_ops mbp_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
 	.get_brightness = mbp_get_intensity,
 	.update_status  = mbp_send_intensity,
 };
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 15fb4d58..9edaf24f 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -119,20 +119,16 @@
 {
 	int ret = platform_driver_register(&progearbl_driver);
 
-	if (!ret) {
-		progearbl_device = platform_device_alloc("progear-bl", -1);
-		if (!progearbl_device)
-			return -ENOMEM;
-
-		ret = platform_device_add(progearbl_device);
-
-		if (ret) {
-			platform_device_put(progearbl_device);
-			platform_driver_unregister(&progearbl_driver);
-		}
+	if (ret)
+		return ret;
+	progearbl_device = platform_device_register_simple("progear-bl", -1,
+								NULL, 0);
+	if (IS_ERR(progearbl_device)) {
+		platform_driver_unregister(&progearbl_driver);
+		return PTR_ERR(progearbl_device);
 	}
 
-	return ret;
+	return 0;
 }
 
 static void __exit progearbl_exit(void)
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 8427669..1dae7f8 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/spi/spi.h>
+#include <linux/spi/tdo24m.h>
 #include <linux/fb.h>
 #include <linux/lcd.h>
 
@@ -31,6 +32,9 @@
 	struct spi_transfer	xfer;
 	uint8_t			*buf;
 
+	int (*adj_mode)(struct tdo24m *lcd, int mode);
+	int color_invert;
+
 	int			power;
 	int			mode;
 };
@@ -66,7 +70,7 @@
 	CMD_NULL,
 };
 
-static uint32_t lcd_vga_pass_through[] = {
+static uint32_t lcd_vga_pass_through_tdo24m[] = {
 	CMD1(0xB0, 0x16),
 	CMD1(0xBC, 0x80),
 	CMD1(0xE1, 0x00),
@@ -75,7 +79,7 @@
 	CMD_NULL,
 };
 
-static uint32_t lcd_qvga_pass_through[] = {
+static uint32_t lcd_qvga_pass_through_tdo24m[] = {
 	CMD1(0xB0, 0x16),
 	CMD1(0xBC, 0x81),
 	CMD1(0xE1, 0x00),
@@ -84,7 +88,7 @@
 	CMD_NULL,
 };
 
-static uint32_t lcd_vga_transfer[] = {
+static uint32_t lcd_vga_transfer_tdo24m[] = {
 	CMD1(0xcf, 0x02), 	/* Blanking period control (1) */
 	CMD2(0xd0, 0x08, 0x04),	/* Blanking period control (2) */
 	CMD1(0xd1, 0x01),	/* CKV timing control on/off */
@@ -110,6 +114,35 @@
 	CMD_NULL,
 };
 
+static uint32_t lcd_vga_pass_through_tdo35s[] = {
+	CMD1(0xB0, 0x16),
+	CMD1(0xBC, 0x80),
+	CMD1(0xE1, 0x00),
+	CMD1(0x3B, 0x00),
+	CMD_NULL,
+};
+
+static uint32_t lcd_qvga_pass_through_tdo35s[] = {
+	CMD1(0xB0, 0x16),
+	CMD1(0xBC, 0x81),
+	CMD1(0xE1, 0x00),
+	CMD1(0x3B, 0x22),
+	CMD_NULL,
+};
+
+static uint32_t lcd_vga_transfer_tdo35s[] = {
+	CMD1(0xcf, 0x02), 	/* Blanking period control (1) */
+	CMD2(0xd0, 0x08, 0x04),	/* Blanking period control (2) */
+	CMD1(0xd1, 0x01),	/* CKV timing control on/off */
+	CMD2(0xd2, 0x00, 0x1e),	/* CKV 1,2 timing control */
+	CMD2(0xd3, 0x14, 0x28),	/* OEV timing control */
+	CMD2(0xd4, 0x28, 0x64),	/* ASW timing control (1) */
+	CMD1(0xd5, 0x28),	/* ASW timing control (2) */
+	CMD0(0x21),		/* Invert for normally black display */
+	CMD0(0x29),		/* Display on */
+	CMD_NULL,
+};
+
 static uint32_t lcd_panel_config[] = {
 	CMD2(0xb8, 0xff, 0xf9),	/* Output control */
 	CMD0(0x11),		/* sleep out */
@@ -148,6 +181,8 @@
 	int nparams, err = 0;
 
 	for (; *p != CMD_NULL; p++) {
+		if (!lcd->color_invert && *p == CMD0(0x21))
+			continue;
 
 		nparams = (*p >> 30) & 0x3;
 
@@ -184,12 +219,33 @@
 {
 	switch (mode) {
 	case MODE_VGA:
-		tdo24m_writes(lcd, lcd_vga_pass_through);
+		tdo24m_writes(lcd, lcd_vga_pass_through_tdo24m);
 		tdo24m_writes(lcd, lcd_panel_config);
-		tdo24m_writes(lcd, lcd_vga_transfer);
+		tdo24m_writes(lcd, lcd_vga_transfer_tdo24m);
 		break;
 	case MODE_QVGA:
-		tdo24m_writes(lcd, lcd_qvga_pass_through);
+		tdo24m_writes(lcd, lcd_qvga_pass_through_tdo24m);
+		tdo24m_writes(lcd, lcd_panel_config);
+		tdo24m_writes(lcd, lcd_qvga_transfer);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	lcd->mode = mode;
+	return 0;
+}
+
+static int tdo35s_adj_mode(struct tdo24m *lcd, int mode)
+{
+	switch (mode) {
+	case MODE_VGA:
+		tdo24m_writes(lcd, lcd_vga_pass_through_tdo35s);
+		tdo24m_writes(lcd, lcd_panel_config);
+		tdo24m_writes(lcd, lcd_vga_transfer_tdo35s);
+		break;
+	case MODE_QVGA:
+		tdo24m_writes(lcd, lcd_qvga_pass_through_tdo35s);
 		tdo24m_writes(lcd, lcd_panel_config);
 		tdo24m_writes(lcd, lcd_qvga_transfer);
 		break;
@@ -213,7 +269,7 @@
 	if (err)
 		goto out;
 
-	err = tdo24m_adj_mode(lcd, lcd->mode);
+	err = lcd->adj_mode(lcd, lcd->mode);
 out:
 	return err;
 }
@@ -262,7 +318,7 @@
 	if (lcd->mode == mode)
 		return 0;
 
-	return tdo24m_adj_mode(lcd, mode);
+	return lcd->adj_mode(lcd, mode);
 }
 
 static struct lcd_ops tdo24m_ops = {
@@ -276,8 +332,16 @@
 	struct tdo24m *lcd;
 	struct spi_message *m;
 	struct spi_transfer *x;
+	struct tdo24m_platform_data *pdata;
+	enum tdo24m_model model;
 	int err;
 
+	pdata = spi->dev.platform_data;
+	if (pdata)
+		model = pdata->model;
+	else
+		model = TDO24M;
+
 	spi->bits_per_word = 8;
 	spi->mode = SPI_MODE_3;
 	err = spi_setup(spi);
@@ -306,6 +370,20 @@
 	x->tx_buf = &lcd->buf[0];
 	spi_message_add_tail(x, m);
 
+	switch (model) {
+	case TDO24M:
+		lcd->color_invert = 1;
+		lcd->adj_mode = tdo24m_adj_mode;
+		break;
+	case TDO35S:
+		lcd->adj_mode = tdo35s_adj_mode;
+		lcd->color_invert = 0;
+		break;
+	default:
+		dev_err(&spi->dev, "Unsupported model");
+		goto out_free;
+	}
+
 	lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
 					lcd, &tdo24m_ops);
 	if (IS_ERR(lcd->lcd_dev)) {
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 57a2664..b7fbc75 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -39,6 +39,7 @@
 	struct i2c_client *i2c;
 
 	int lcd_power;
+	bool is_vga;
 };
 
 static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
@@ -81,8 +82,12 @@
 static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
 {
 	struct spi_device *spi = data->spi;
-	const int value = TG_REG0_COLOR | TG_REG0_UD | TG_REG0_LR;
-	tosa_tg_send(spi, TG_PNLCTL, value | TG_REG0_VQV); /* this depends on mode */
+	int value = TG_REG0_COLOR | TG_REG0_UD | TG_REG0_LR;
+
+	if (data->is_vga)
+		value |= TG_REG0_VQV;
+
+	tosa_tg_send(spi, TG_PNLCTL, value);
 
 	/* TG LCD pannel power up */
 	tosa_tg_send(spi, TG_PINICTL,0x4);
@@ -142,9 +147,25 @@
 	return data->lcd_power;
 }
 
+static int tosa_lcd_set_mode(struct lcd_device *lcd, struct fb_videomode *mode)
+{
+	struct tosa_lcd_data *data = lcd_get_data(lcd);
+
+	if (mode->xres == 320 || mode->yres == 320)
+		data->is_vga = false;
+	else
+		data->is_vga = true;
+
+	if (POWER_IS_ON(data->lcd_power))
+		tosa_lcd_tg_on(data);
+
+	return 0;
+}
+
 static struct lcd_ops tosa_lcd_ops = {
 	.set_power = tosa_lcd_set_power,
 	.get_power = tosa_lcd_get_power,
+	.set_mode = tosa_lcd_set_mode,
 };
 
 static int __devinit tosa_lcd_probe(struct spi_device *spi)
@@ -156,6 +177,8 @@
 	if (!data)
 		return -ENOMEM;
 
+	data->is_vga = true; /* defaut to VGA mode */
+
 	/*
 	 * bits_per_word cannot be configured in platform data
 	 */
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
index 593c768..8e653b8 100644
--- a/drivers/video/backlight/vgg2432a4.c
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -137,7 +137,7 @@
 
 	ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1);
 	ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0);
-	ili9320_write(lcd, ILI9320_RGB_IF2, ILI9320_RGBIF2_DPL);
+	ili9320_write(lcd, ILI9320_RGB_IF2, cfg->rgb_if2);
 
 	ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1));
 	if (ret != 0)
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 7644ed2..37e60b1 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -335,7 +335,20 @@
 				   struct fb_info *info)
 {
 
-	if (var->bits_per_pixel != LCD_BPP) {
+	switch (var->bits_per_pixel) {
+	case 24:/* TRUECOLOUR, 16m */
+		var->red.offset = 16;
+		var->green.offset = 8;
+		var->blue.offset = 0;
+		var->red.length = var->green.length = var->blue.length = 8;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		var->transp.msb_right = 0;
+		var->red.msb_right = 0;
+		var->green.msb_right = 0;
+		var->blue.msb_right = 0;
+		break;
+	default:
 		pr_debug("%s: depth not supported: %u BPP\n", __func__,
 			 var->bits_per_pixel);
 		return -EINVAL;
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index a9b3ada..2a423d3 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -254,7 +254,20 @@
 				   struct fb_info *info)
 {
 
-	if (var->bits_per_pixel != LCD_BPP) {
+	switch (var->bits_per_pixel) {
+	case 24:/* TRUECOLOUR, 16m */
+		var->red.offset = 0;
+		var->green.offset = 8;
+		var->blue.offset = 16;
+		var->red.length = var->green.length = var->blue.length = 8;
+		var->transp.offset = 0;
+		var->transp.length = 0;
+		var->transp.msb_right = 0;
+		var->red.msb_right = 0;
+		var->green.msb_right = 0;
+		var->blue.msb_right = 0;
+		break;
+	default:
 		pr_debug("%s: depth not supported: %u BPP\n", __func__,
 			 var->bits_per_pixel);
 		return -EINVAL;
diff --git a/drivers/video/c2p.c b/drivers/video/c2p.c
deleted file mode 100644
index 376bc07..0000000
--- a/drivers/video/c2p.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- *  Fast C2P (Chunky-to-Planar) Conversion
- *
- *  Copyright (C) 2003 Geert Uytterhoeven
- *
- *  NOTES:
- *    - This code was inspired by Scout's C2P tutorial
- *    - It assumes to run on a big endian system
- *
- *  This file is subject to the terms and conditions of the GNU General Public
- *  License. See the file COPYING in the main directory of this archive
- *  for more details.
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include "c2p.h"
-
-
-    /*
-     *  Basic transpose step
-     */
-
-#define _transp(d, i1, i2, shift, mask)			\
-    do {						\
-	u32 t = (d[i1] ^ (d[i2] >> shift)) & mask;	\
-	d[i1] ^= t;					\
-	d[i2] ^= t << shift;				\
-    } while (0)
-
-static inline u32 get_mask(int n)
-{
-    switch (n) {
-	case 1:
-	    return 0x55555555;
-	    break;
-
-	case 2:
-	    return 0x33333333;
-	    break;
-
-	case 4:
-	    return 0x0f0f0f0f;
-	    break;
-
-	case 8:
-	    return 0x00ff00ff;
-	    break;
-
-	case 16:
-	    return 0x0000ffff;
-	    break;
-    }
-    return 0;
-}
-
-#define transp_nx1(d, n)				\
-    do {						\
-	u32 mask = get_mask(n);				\
-	/* First block */				\
-	_transp(d, 0, 1, n, mask);			\
-	/* Second block */				\
-	_transp(d, 2, 3, n, mask);			\
-	/* Third block */				\
-	_transp(d, 4, 5, n, mask);			\
-	/* Fourth block */				\
-	_transp(d, 6, 7, n, mask);			\
-    } while (0)
-
-#define transp_nx2(d, n)				\
-    do {						\
-	u32 mask = get_mask(n);				\
-	/* First block */				\
-	_transp(d, 0, 2, n, mask);			\
-	_transp(d, 1, 3, n, mask);			\
-	/* Second block */				\
-	_transp(d, 4, 6, n, mask);			\
-	_transp(d, 5, 7, n, mask);			\
-    } while (0)
-
-#define transp_nx4(d, n)				\
-    do {						\
-	u32 mask = get_mask(n);				\
-	_transp(d, 0, 4, n, mask);			\
-	_transp(d, 1, 5, n, mask);			\
-	_transp(d, 2, 6, n, mask);			\
-	_transp(d, 3, 7, n, mask);			\
-    } while (0)
-
-#define transp(d, n, m)	transp_nx ## m(d, n)
-
-
-    /*
-     *  Perform a full C2P step on 32 8-bit pixels, stored in 8 32-bit words
-     *  containing
-     *    - 32 8-bit chunky pixels on input
-     *    - permuted planar data on output
-     */
-
-static void c2p_8bpp(u32 d[8])
-{
-    transp(d, 16, 4);
-    transp(d, 8, 2);
-    transp(d, 4, 1);
-    transp(d, 2, 4);
-    transp(d, 1, 2);
-}
-
-
-    /*
-     *  Array containing the permution indices of the planar data after c2p
-     */
-
-static const int perm_c2p_8bpp[8] = { 7, 5, 3, 1, 6, 4, 2, 0 };
-
-
-    /*
-     *  Compose two values, using a bitmask as decision value
-     *  This is equivalent to (a & mask) | (b & ~mask)
-     */
-
-static inline unsigned long comp(unsigned long a, unsigned long b,
-				 unsigned long mask)
-{
-	return ((a ^ b) & mask) ^ b;
-}
-
-
-    /*
-     *  Store a full block of planar data after c2p conversion
-     */
-
-static inline void store_planar(char *dst, u32 dst_inc, u32 bpp, u32 d[8])
-{
-    int i;
-
-    for (i = 0; i < bpp; i++, dst += dst_inc)
-	*(u32 *)dst = d[perm_c2p_8bpp[i]];
-}
-
-
-    /*
-     *  Store a partial block of planar data after c2p conversion
-     */
-
-static inline void store_planar_masked(char *dst, u32 dst_inc, u32 bpp,
-				       u32 d[8], u32 mask)
-{
-    int i;
-
-    for (i = 0; i < bpp; i++, dst += dst_inc)
-	*(u32 *)dst = comp(d[perm_c2p_8bpp[i]], *(u32 *)dst, mask);
-}
-
-
-    /*
-     *  c2p - Copy 8-bit chunky image data to a planar frame buffer
-     *  @dst: Starting address of the planar frame buffer
-     *  @dx: Horizontal destination offset (in pixels)
-     *  @dy: Vertical destination offset (in pixels)
-     *  @width: Image width (in pixels)
-     *  @height: Image height (in pixels)
-     *  @dst_nextline: Frame buffer offset to the next line (in bytes)
-     *  @dst_nextplane: Frame buffer offset to the next plane (in bytes)
-     *  @src_nextline: Image offset to the next line (in bytes)
-     *  @bpp: Bits per pixel of the planar frame buffer (1-8)
-     */
-
-void c2p(u8 *dst, const u8 *src, u32 dx, u32 dy, u32 width, u32 height,
-	 u32 dst_nextline, u32 dst_nextplane, u32 src_nextline, u32 bpp)
-{
-    int dst_idx;
-    u32 d[8], first, last, w;
-    const u8 *c;
-    u8 *p;
-
-    dst += dy*dst_nextline+(dx & ~31);
-    dst_idx = dx % 32;
-    first = ~0UL >> dst_idx;
-    last = ~(~0UL >> ((dst_idx+width) % 32));
-    while (height--) {
-	c = src;
-	p = dst;
-	w = width;
-	if (dst_idx+width <= 32) {
-	    /* Single destination word */
-	    first &= last;
-	    memset(d, 0, sizeof(d));
-	    memcpy((u8 *)d+dst_idx, c, width);
-	    c += width;
-	    c2p_8bpp(d);
-	    store_planar_masked(p, dst_nextplane, bpp, d, first);
-	    p += 4;
-	} else {
-	    /* Multiple destination words */
-	    w = width;
-	    /* Leading bits */
-	    if (dst_idx) {
-		w = 32 - dst_idx;
-		memset(d, 0, dst_idx);
-		memcpy((u8 *)d+dst_idx, c, w);
-		c += w;
-		c2p_8bpp(d);
-		store_planar_masked(p, dst_nextplane, bpp, d, first);
-		p += 4;
-		w = width-w;
-	    }
-	    /* Main chunk */
-	    while (w >= 32) {
-		memcpy(d, c, 32);
-		c += 32;
-		c2p_8bpp(d);
-		store_planar(p, dst_nextplane, bpp, d);
-		p += 4;
-		w -= 32;
-	    }
-	    /* Trailing bits */
-	    w %= 32;
-	    if (w > 0) {
-		memcpy(d, c, w);
-		memset((u8 *)d+w, 0, 32-w);
-		c2p_8bpp(d);
-		store_planar_masked(p, dst_nextplane, bpp, d, last);
-	    }
-	}
-	src += src_nextline;
-	dst += dst_nextline;
-    }
-}
-EXPORT_SYMBOL_GPL(c2p);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/c2p.h b/drivers/video/c2p.h
index c77cbf1..6c38d40 100644
--- a/drivers/video/c2p.h
+++ b/drivers/video/c2p.h
@@ -1,7 +1,7 @@
 /*
  *  Fast C2P (Chunky-to-Planar) Conversion
  *
- *  Copyright (C) 2003 Geert Uytterhoeven
+ *  Copyright (C) 2003-2008 Geert Uytterhoeven
  *
  *  This file is subject to the terms and conditions of the GNU General Public
  *  License. See the file COPYING in the main directory of this archive
@@ -10,7 +10,10 @@
 
 #include <linux/types.h>
 
-extern void c2p(u8 *dst, const u8 *src, u32 dx, u32 dy, u32 width, u32 height,
-		u32 dst_nextline, u32 dst_nextplane, u32 src_nextline,
-		u32 bpp);
+extern void c2p_planar(void *dst, const void *src, u32 dx, u32 dy, u32 width,
+		       u32 height, u32 dst_nextline, u32 dst_nextplane,
+		       u32 src_nextline, u32 bpp);
 
+extern void c2p_iplan2(void *dst, const void *src, u32 dx, u32 dy, u32 width,
+		       u32 height, u32 dst_nextline, u32 src_nextline,
+		       u32 bpp);
diff --git a/drivers/video/c2p_core.h b/drivers/video/c2p_core.h
new file mode 100644
index 0000000..e1035a8
--- /dev/null
+++ b/drivers/video/c2p_core.h
@@ -0,0 +1,153 @@
+/*
+ *  Fast C2P (Chunky-to-Planar) Conversion
+ *
+ *  Copyright (C) 2003-2008 Geert Uytterhoeven
+ *
+ *  NOTES:
+ *    - This code was inspired by Scout's C2P tutorial
+ *    - It assumes to run on a big endian system
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License. See the file COPYING in the main directory of this archive
+ *  for more details.
+ */
+
+
+    /*
+     *  Basic transpose step
+     */
+
+static inline void _transp(u32 d[], unsigned int i1, unsigned int i2,
+			   unsigned int shift, u32 mask)
+{
+	u32 t = (d[i1] ^ (d[i2] >> shift)) & mask;
+
+	d[i1] ^= t;
+	d[i2] ^= t << shift;
+}
+
+
+extern void c2p_unsupported(void);
+
+static inline u32 get_mask(unsigned int n)
+{
+	switch (n) {
+	case 1:
+		return 0x55555555;
+
+	case 2:
+		return 0x33333333;
+
+	case 4:
+		return 0x0f0f0f0f;
+
+	case 8:
+		return 0x00ff00ff;
+
+	case 16:
+		return 0x0000ffff;
+	}
+
+	c2p_unsupported();
+	return 0;
+}
+
+
+    /*
+     *  Transpose operations on 8 32-bit words
+     */
+
+static inline void transp8(u32 d[], unsigned int n, unsigned int m)
+{
+	u32 mask = get_mask(n);
+
+	switch (m) {
+	case 1:
+		/* First n x 1 block */
+		_transp(d, 0, 1, n, mask);
+		/* Second n x 1 block */
+		_transp(d, 2, 3, n, mask);
+		/* Third n x 1 block */
+		_transp(d, 4, 5, n, mask);
+		/* Fourth n x 1 block */
+		_transp(d, 6, 7, n, mask);
+		return;
+
+	case 2:
+		/* First n x 2 block */
+		_transp(d, 0, 2, n, mask);
+		_transp(d, 1, 3, n, mask);
+		/* Second n x 2 block */
+		_transp(d, 4, 6, n, mask);
+		_transp(d, 5, 7, n, mask);
+		return;
+
+	case 4:
+		/* Single n x 4 block */
+		_transp(d, 0, 4, n, mask);
+		_transp(d, 1, 5, n, mask);
+		_transp(d, 2, 6, n, mask);
+		_transp(d, 3, 7, n, mask);
+		return;
+	}
+
+	c2p_unsupported();
+}
+
+
+    /*
+     *  Transpose operations on 4 32-bit words
+     */
+
+static inline void transp4(u32 d[], unsigned int n, unsigned int m)
+{
+	u32 mask = get_mask(n);
+
+	switch (m) {
+	case 1:
+		/* First n x 1 block */
+		_transp(d, 0, 1, n, mask);
+		/* Second n x 1 block */
+		_transp(d, 2, 3, n, mask);
+		return;
+
+	case 2:
+		/* Single n x 2 block */
+		_transp(d, 0, 2, n, mask);
+		_transp(d, 1, 3, n, mask);
+		return;
+	}
+
+	c2p_unsupported();
+}
+
+
+    /*
+     *  Transpose operations on 4 32-bit words (reverse order)
+     */
+
+static inline void transp4x(u32 d[], unsigned int n, unsigned int m)
+{
+	u32 mask = get_mask(n);
+
+	switch (m) {
+	case 2:
+		/* Single n x 2 block */
+		_transp(d, 2, 0, n, mask);
+		_transp(d, 3, 1, n, mask);
+		return;
+	}
+
+	c2p_unsupported();
+}
+
+
+    /*
+     *  Compose two values, using a bitmask as decision value
+     *  This is equivalent to (a & mask) | (b & ~mask)
+     */
+
+static inline u32 comp(u32 a, u32 b, u32 mask)
+{
+	return ((a ^ b) & mask) ^ b;
+}
diff --git a/drivers/video/c2p_iplan2.c b/drivers/video/c2p_iplan2.c
new file mode 100644
index 0000000..19156dc
--- /dev/null
+++ b/drivers/video/c2p_iplan2.c
@@ -0,0 +1,153 @@
+/*
+ *  Fast C2P (Chunky-to-Planar) Conversion
+ *
+ *  Copyright (C) 2003-2008 Geert Uytterhoeven
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License. See the file COPYING in the main directory of this archive
+ *  for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <asm/unaligned.h>
+
+#include "c2p.h"
+#include "c2p_core.h"
+
+
+    /*
+     *  Perform a full C2P step on 16 8-bit pixels, stored in 4 32-bit words
+     *  containing
+     *    - 16 8-bit chunky pixels on input
+     *    - permutated planar data (2 planes per 32-bit word) on output
+     */
+
+static void c2p_16x8(u32 d[4])
+{
+	transp4(d, 8, 2);
+	transp4(d, 1, 2);
+	transp4x(d, 16, 2);
+	transp4x(d, 2, 2);
+	transp4(d, 4, 1);
+}
+
+
+    /*
+     *  Array containing the permutation indices of the planar data after c2p
+     */
+
+static const int perm_c2p_16x8[4] = { 1, 3, 0, 2 };
+
+
+    /*
+     *  Store a full block of iplan2 data after c2p conversion
+     */
+
+static inline void store_iplan2(void *dst, u32 bpp, u32 d[4])
+{
+	int i;
+
+	for (i = 0; i < bpp/2; i++, dst += 4)
+		put_unaligned_be32(d[perm_c2p_16x8[i]], dst);
+}
+
+
+    /*
+     *  Store a partial block of iplan2 data after c2p conversion
+     */
+
+static inline void store_iplan2_masked(void *dst, u32 bpp, u32 d[4], u32 mask)
+{
+	int i;
+
+	for (i = 0; i < bpp/2; i++, dst += 4)
+		put_unaligned_be32(comp(d[perm_c2p_16x8[i]],
+					get_unaligned_be32(dst), mask),
+				   dst);
+}
+
+
+    /*
+     *  c2p_iplan2 - Copy 8-bit chunky image data to an interleaved planar
+     *  frame buffer with 2 bytes of interleave
+     *  @dst: Starting address of the planar frame buffer
+     *  @dx: Horizontal destination offset (in pixels)
+     *  @dy: Vertical destination offset (in pixels)
+     *  @width: Image width (in pixels)
+     *  @height: Image height (in pixels)
+     *  @dst_nextline: Frame buffer offset to the next line (in bytes)
+     *  @src_nextline: Image offset to the next line (in bytes)
+     *  @bpp: Bits per pixel of the planar frame buffer (2, 4, or 8)
+     */
+
+void c2p_iplan2(void *dst, const void *src, u32 dx, u32 dy, u32 width,
+		u32 height, u32 dst_nextline, u32 src_nextline, u32 bpp)
+{
+	union {
+		u8 pixels[16];
+		u32 words[4];
+	} d;
+	u32 dst_idx, first, last, w;
+	const u8 *c;
+	void *p;
+
+	dst += dy*dst_nextline+(dx & ~15)*bpp;
+	dst_idx = dx % 16;
+	first = 0xffffU >> dst_idx;
+	first |= first << 16;
+	last = 0xffffU ^ (0xffffU >> ((dst_idx+width) % 16));
+	last |= last << 16;
+	while (height--) {
+		c = src;
+		p = dst;
+		w = width;
+		if (dst_idx+width <= 16) {
+			/* Single destination word */
+			first &= last;
+			memset(d.pixels, 0, sizeof(d));
+			memcpy(d.pixels+dst_idx, c, width);
+			c += width;
+			c2p_16x8(d.words);
+			store_iplan2_masked(p, bpp, d.words, first);
+			p += bpp*2;
+		} else {
+			/* Multiple destination words */
+			w = width;
+			/* Leading bits */
+			if (dst_idx) {
+				w = 16 - dst_idx;
+				memset(d.pixels, 0, dst_idx);
+				memcpy(d.pixels+dst_idx, c, w);
+				c += w;
+				c2p_16x8(d.words);
+				store_iplan2_masked(p, bpp, d.words, first);
+				p += bpp*2;
+				w = width-w;
+			}
+			/* Main chunk */
+			while (w >= 16) {
+				memcpy(d.pixels, c, 16);
+				c += 16;
+				c2p_16x8(d.words);
+				store_iplan2(p, bpp, d.words);
+				p += bpp*2;
+				w -= 16;
+			}
+			/* Trailing bits */
+			w %= 16;
+			if (w > 0) {
+				memcpy(d.pixels, c, w);
+				memset(d.pixels+w, 0, 16-w);
+				c2p_16x8(d.words);
+				store_iplan2_masked(p, bpp, d.words, last);
+			}
+		}
+		src += src_nextline;
+		dst += dst_nextline;
+	}
+}
+EXPORT_SYMBOL_GPL(c2p_iplan2);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/c2p_planar.c b/drivers/video/c2p_planar.c
new file mode 100644
index 0000000..ec7ac85
--- /dev/null
+++ b/drivers/video/c2p_planar.c
@@ -0,0 +1,156 @@
+/*
+ *  Fast C2P (Chunky-to-Planar) Conversion
+ *
+ *  Copyright (C) 2003-2008 Geert Uytterhoeven
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License. See the file COPYING in the main directory of this archive
+ *  for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <asm/unaligned.h>
+
+#include "c2p.h"
+#include "c2p_core.h"
+
+
+    /*
+     *  Perform a full C2P step on 32 8-bit pixels, stored in 8 32-bit words
+     *  containing
+     *    - 32 8-bit chunky pixels on input
+     *    - permutated planar data (1 plane per 32-bit word) on output
+     */
+
+static void c2p_32x8(u32 d[8])
+{
+	transp8(d, 16, 4);
+	transp8(d, 8, 2);
+	transp8(d, 4, 1);
+	transp8(d, 2, 4);
+	transp8(d, 1, 2);
+}
+
+
+    /*
+     *  Array containing the permutation indices of the planar data after c2p
+     */
+
+static const int perm_c2p_32x8[8] = { 7, 5, 3, 1, 6, 4, 2, 0 };
+
+
+    /*
+     *  Store a full block of planar data after c2p conversion
+     */
+
+static inline void store_planar(void *dst, u32 dst_inc, u32 bpp, u32 d[8])
+{
+	int i;
+
+	for (i = 0; i < bpp; i++, dst += dst_inc)
+		put_unaligned_be32(d[perm_c2p_32x8[i]], dst);
+}
+
+
+    /*
+     *  Store a partial block of planar data after c2p conversion
+     */
+
+static inline void store_planar_masked(void *dst, u32 dst_inc, u32 bpp,
+				       u32 d[8], u32 mask)
+{
+	int i;
+
+	for (i = 0; i < bpp; i++, dst += dst_inc)
+		put_unaligned_be32(comp(d[perm_c2p_32x8[i]],
+					get_unaligned_be32(dst), mask),
+				   dst);
+}
+
+
+    /*
+     *  c2p_planar - Copy 8-bit chunky image data to a planar frame buffer
+     *  @dst: Starting address of the planar frame buffer
+     *  @dx: Horizontal destination offset (in pixels)
+     *  @dy: Vertical destination offset (in pixels)
+     *  @width: Image width (in pixels)
+     *  @height: Image height (in pixels)
+     *  @dst_nextline: Frame buffer offset to the next line (in bytes)
+     *  @dst_nextplane: Frame buffer offset to the next plane (in bytes)
+     *  @src_nextline: Image offset to the next line (in bytes)
+     *  @bpp: Bits per pixel of the planar frame buffer (1-8)
+     */
+
+void c2p_planar(void *dst, const void *src, u32 dx, u32 dy, u32 width,
+		u32 height, u32 dst_nextline, u32 dst_nextplane,
+		u32 src_nextline, u32 bpp)
+{
+	union {
+		u8 pixels[32];
+		u32 words[8];
+	} d;
+	u32 dst_idx, first, last, w;
+	const u8 *c;
+	void *p;
+
+	dst += dy*dst_nextline+(dx & ~31);
+	dst_idx = dx % 32;
+	first = 0xffffffffU >> dst_idx;
+	last = ~(0xffffffffU >> ((dst_idx+width) % 32));
+	while (height--) {
+		c = src;
+		p = dst;
+		w = width;
+		if (dst_idx+width <= 32) {
+			/* Single destination word */
+			first &= last;
+			memset(d.pixels, 0, sizeof(d));
+			memcpy(d.pixels+dst_idx, c, width);
+			c += width;
+			c2p_32x8(d.words);
+			store_planar_masked(p, dst_nextplane, bpp, d.words,
+					    first);
+			p += 4;
+		} else {
+			/* Multiple destination words */
+			w = width;
+			/* Leading bits */
+			if (dst_idx) {
+				w = 32 - dst_idx;
+				memset(d.pixels, 0, dst_idx);
+				memcpy(d.pixels+dst_idx, c, w);
+				c += w;
+				c2p_32x8(d.words);
+				store_planar_masked(p, dst_nextplane, bpp,
+						    d.words, first);
+				p += 4;
+				w = width-w;
+			}
+			/* Main chunk */
+			while (w >= 32) {
+				memcpy(d.pixels, c, 32);
+				c += 32;
+				c2p_32x8(d.words);
+				store_planar(p, dst_nextplane, bpp, d.words);
+				p += 4;
+				w -= 32;
+			}
+			/* Trailing bits */
+			w %= 32;
+			if (w > 0) {
+				memcpy(d.pixels, c, w);
+				memset(d.pixels+w, 0, 32-w);
+				c2p_32x8(d.words);
+				store_planar_masked(p, dst_nextplane, bpp,
+						    d.words, last);
+			}
+		}
+		src += src_nextline;
+		dst += dst_nextline;
+	}
+}
+EXPORT_SYMBOL_GPL(c2p_planar);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 4bcff81..1657b96 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -78,13 +78,6 @@
 #include <asm/fb.h>
 #include <asm/irq.h>
 #include <asm/system.h>
-#ifdef CONFIG_ATARI
-#include <asm/atariints.h>
-#endif
-#if defined(__mc68000__)
-#include <asm/machdep.h>
-#include <asm/setup.h>
-#endif
 
 #include "fbcon.h"
 
@@ -155,9 +148,6 @@
 
 #define CURSOR_DRAW_DELAY		(1)
 
-/* # VBL ints between cursor state changes */
-#define ATARI_CURSOR_BLINK_RATE		(42)
-
 static int vbl_cursor_cnt;
 static int fbcon_cursor_noblink;
 
@@ -403,20 +393,6 @@
 	release_console_sem();
 }
 
-#ifdef CONFIG_ATARI
-static int cursor_blink_rate;
-static irqreturn_t fb_vbl_handler(int irq, void *dev_id)
-{
-	struct fb_info *info = dev_id;
-
-	if (vbl_cursor_cnt && --vbl_cursor_cnt == 0) {
-		schedule_work(&info->queue);	
-		vbl_cursor_cnt = cursor_blink_rate; 
-	}
-	return IRQ_HANDLED;
-}
-#endif
-	
 static void cursor_timer_handler(unsigned long dev_addr)
 {
 	struct fb_info *info = (struct fb_info *) dev_addr;
@@ -1017,15 +993,6 @@
 		info->var.yres,
 		info->var.bits_per_pixel);
 
-#ifdef CONFIG_ATARI
-	if (MACH_IS_ATARI) {
-		cursor_blink_rate = ATARI_CURSOR_BLINK_RATE;
-		(void)request_irq(IRQ_AUTO_4, fb_vbl_handler,
-				IRQ_TYPE_PRIO, "framebuffer vbl",
-				info);
-	}
-#endif /* CONFIG_ATARI */
-
 	fbcon_add_cursor_timer(info);
 	fbcon_has_exited = 0;
 	return display_desc;
@@ -3454,11 +3421,6 @@
 	if (fbcon_has_exited)
 		return;
 
-#ifdef CONFIG_ATARI
-	if (MACH_IS_ATARI)
-		free_irq(IRQ_AUTO_4, fb_vbl_handler);
-#endif
-
 	kfree((void *)softback_buf);
 	softback_buf = 0UL;
 
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index e621072..d012edd 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1332,7 +1332,7 @@
 		c->vc_y = screen_info.orig_y;
 	}
 
-	/* We can't copy in more then the size of the video buffer,
+	/* We can't copy in more than the size of the video buffer,
 	 * or we'll be copying in VGA BIOS */
 
 	if (!vga_is_gfx)
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 38ac805..87f826e 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -1006,7 +1006,7 @@
 			__func__, status);
 		return -ENXIO;
 	}
-	dev_dbg(dev, "video:%p ioif:%lx lpar:%lx size:%lx\n",
+	dev_dbg(dev, "video:%p ioif:%lx lpar:%llx size:%lx\n",
 		ps3fb_videomemory.address, GPU_IOIF, xdr_lpar,
 		ps3fb_videomemory.size);
 
@@ -1133,7 +1133,7 @@
 			__func__, status);
 		goto err;
 	}
-	dev_dbg(&dev->core, "ddr:lpar:0x%lx\n", ddr_lpar);
+	dev_dbg(&dev->core, "ddr:lpar:0x%llx\n", ddr_lpar);
 
 	status = lv1_gpu_context_allocate(ps3fb.memory_handle, 0,
 					  &ps3fb.context_handle,
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 9061682..96d2f8e 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -34,6 +34,12 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called ds2482.
 
+config W1_MASTER_MXC
+	tristate "Freescale MXC 1-wire busmaster"
+	depends on W1 && ARCH_MXC
+	help
+	  Say Y here to enable MXC 1-wire host
+
 config W1_MASTER_DS1WM
 	tristate "Maxim DS1WM 1-wire busmaster"
 	depends on W1 && ARM && HAVE_CLK
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index bc4714a..c5a3e96 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -5,6 +5,8 @@
 obj-$(CONFIG_W1_MASTER_MATROX)		+= matrox_w1.o
 obj-$(CONFIG_W1_MASTER_DS2490)		+= ds2490.o
 obj-$(CONFIG_W1_MASTER_DS2482)		+= ds2482.o
+obj-$(CONFIG_W1_MASTER_MXC)		+= mxc_w1.o
+
 obj-$(CONFIG_W1_MASTER_DS1WM)		+= ds1wm.o
 obj-$(CONFIG_W1_MASTER_GPIO)		+= w1-gpio.o
 obj-$(CONFIG_HDQ_MASTER_OMAP)		+= omap_hdq.o
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
new file mode 100644
index 0000000..b9d74d0
--- /dev/null
+++ b/drivers/w1/masters/mxc_w1.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2005-2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Luotao Fu, kernel@pengutronix.de
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_log.h"
+
+/* According to the mx27 Datasheet the reset procedure should take up to about
+ * 1350us. We set the timeout to 500*100us = 50ms for sure */
+#define MXC_W1_RESET_TIMEOUT 500
+
+/*
+ * MXC W1 Register offsets
+ */
+#define MXC_W1_CONTROL          0x00
+#define MXC_W1_TIME_DIVIDER     0x02
+#define MXC_W1_RESET            0x04
+#define MXC_W1_COMMAND          0x06
+#define MXC_W1_TXRX             0x08
+#define MXC_W1_INTERRUPT        0x0A
+#define MXC_W1_INTERRUPT_EN     0x0C
+
+struct mxc_w1_device {
+	void __iomem *regs;
+	unsigned int clkdiv;
+	struct clk *clk;
+	struct w1_bus_master bus_master;
+};
+
+/*
+ * this is the low level routine to
+ * reset the device on the One Wire interface
+ * on the hardware
+ */
+static u8 mxc_w1_ds2_reset_bus(void *data)
+{
+	u8 reg_val;
+	unsigned int timeout_cnt = 0;
+	struct mxc_w1_device *dev = data;
+
+	__raw_writeb(0x80, (dev->regs + MXC_W1_CONTROL));
+
+	while (1) {
+		reg_val = __raw_readb(dev->regs + MXC_W1_CONTROL);
+
+		if (((reg_val >> 7) & 0x1) == 0 ||
+		    timeout_cnt > MXC_W1_RESET_TIMEOUT)
+			break;
+		else
+			timeout_cnt++;
+
+		udelay(100);
+	}
+	return (reg_val >> 7) & 0x1;
+}
+
+/*
+ * this is the low level routine to read/write a bit on the One Wire
+ * interface on the hardware. It does write 0 if parameter bit is set
+ * to 0, otherwise a write 1/read.
+ */
+static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
+{
+	struct mxc_w1_device *mdev = data;
+	void __iomem *ctrl_addr = mdev->regs + MXC_W1_CONTROL;
+	unsigned int timeout_cnt = 400; /* Takes max. 120us according to
+					 * datasheet.
+					 */
+
+	__raw_writeb((1 << (5 - bit)), ctrl_addr);
+
+	while (timeout_cnt--) {
+		if (!((__raw_readb(ctrl_addr) >> (5 - bit)) & 0x1))
+			break;
+
+		udelay(1);
+	}
+
+	return ((__raw_readb(ctrl_addr)) >> 3) & 0x1;
+}
+
+static int __init mxc_w1_probe(struct platform_device *pdev)
+{
+	struct mxc_w1_device *mdev;
+	struct resource *res;
+	int err = 0;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	mdev = kzalloc(sizeof(struct mxc_w1_device), GFP_KERNEL);
+	if (!mdev)
+		return -ENOMEM;
+
+	mdev->clk = clk_get(&pdev->dev, "owire_clk");
+	if (!mdev->clk) {
+		err = -ENODEV;
+		goto failed_clk;
+	}
+
+	mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1;
+
+	res = request_mem_region(res->start, resource_size(res),
+				"mxc_w1");
+	if (!res) {
+		err = -EBUSY;
+		goto failed_req;
+	}
+
+	mdev->regs = ioremap(res->start, resource_size(res));
+	if (!mdev->regs) {
+		printk(KERN_ERR "Cannot map frame buffer registers\n");
+		goto failed_ioremap;
+	}
+
+	clk_enable(mdev->clk);
+	__raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
+
+	mdev->bus_master.data = mdev;
+	mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus;
+	mdev->bus_master.touch_bit = mxc_w1_ds2_touch_bit;
+
+	err = w1_add_master_device(&mdev->bus_master);
+
+	if (err)
+		goto failed_add;
+
+	platform_set_drvdata(pdev, mdev);
+	return 0;
+
+failed_add:
+	iounmap(mdev->regs);
+failed_ioremap:
+	release_mem_region(res->start, resource_size(res));
+failed_req:
+	clk_put(mdev->clk);
+failed_clk:
+	kfree(mdev);
+	return err;
+}
+
+/*
+ * disassociate the w1 device from the driver
+ */
+static int mxc_w1_remove(struct platform_device *pdev)
+{
+	struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	w1_remove_master_device(&mdev->bus_master);
+
+	iounmap(mdev->regs);
+	release_mem_region(res->start, resource_size(res));
+	clk_disable(mdev->clk);
+	clk_put(mdev->clk);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver mxc_w1_driver = {
+	.driver = {
+		   .name = "mxc_w1",
+	},
+	.probe = mxc_w1_probe,
+	.remove = mxc_w1_remove,
+};
+
+static int __init mxc_w1_init(void)
+{
+	return platform_driver_register(&mxc_w1_driver);
+}
+
+static void mxc_w1_exit(void)
+{
+	platform_driver_unregister(&mxc_w1_driver);
+}
+
+module_init(mxc_w1_init);
+module_exit(mxc_w1_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale Semiconductors Inc");
+MODULE_DESCRIPTION("Driver for One-Wire on MXC");
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 97304bd..d8a9709 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -210,6 +210,7 @@
 int w1_reset_bus(struct w1_master *);
 u8 w1_calc_crc8(u8 *, int);
 void w1_write_block(struct w1_master *, const u8 *, int);
+void w1_touch_block(struct w1_master *, u8 *, int);
 u8 w1_read_block(struct w1_master *, u8 *, int);
 int w1_reset_select_slave(struct w1_slave *sl);
 void w1_next_pullup(struct w1_master *, int);
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 5139c25..442bd8b 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -238,7 +238,6 @@
  * @param dev     the master device
  * @param buf     pointer to the data to write
  * @param len     the number of bytes to write
- * @return        the byte read
  */
 void w1_write_block(struct w1_master *dev, const u8 *buf, int len)
 {
@@ -256,6 +255,31 @@
 EXPORT_SYMBOL_GPL(w1_write_block);
 
 /**
+ * Touches a series of bytes.
+ *
+ * @param dev     the master device
+ * @param buf     pointer to the data to write
+ * @param len     the number of bytes to write
+ */
+void w1_touch_block(struct w1_master *dev, u8 *buf, int len)
+{
+	int i, j;
+	u8 tmp;
+
+	for (i = 0; i < len; ++i) {
+		tmp = 0;
+		for (j = 0; j < 8; ++j) {
+			if (j == 7)
+				w1_pre_write(dev);
+			tmp |= w1_touch_bit(dev, (buf[i] >> j) & 0x1) << j;
+		}
+
+		buf[i] = tmp;
+	}
+}
+EXPORT_SYMBOL_GPL(w1_touch_block);
+
+/**
  * Reads a series of bytes.
  *
  * @param dev     the master device
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 65c5ebd..fdf7285 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -47,21 +47,56 @@
 	cn_netlink_send(m, 0, GFP_KERNEL);
 }
 
-static int w1_process_command_master(struct w1_master *dev, struct cn_msg *msg,
-		struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd)
+static void w1_send_slave(struct w1_master *dev, u64 rn)
 {
-	dev_dbg(&dev->dev, "%s: %s: cmd=%02x, len=%u.\n",
-		__func__, dev->name, cmd->cmd, cmd->len);
+	struct cn_msg *msg = dev->priv;
+	struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
+	struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
+	int avail;
 
-	if (cmd->cmd != W1_CMD_SEARCH && cmd->cmd != W1_CMD_ALARM_SEARCH)
-		return -EINVAL;
+	avail = dev->priv_size - cmd->len;
 
-	w1_search_process(dev, (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH);
+	if (avail > 8) {
+		u64 *data = (void *)(cmd + 1) + cmd->len;
+
+		*data = rn;
+		cmd->len += 8;
+		hdr->len += 8;
+		msg->len += 8;
+		return;
+	}
+
+	msg->ack++;
+	cn_netlink_send(msg, 0, GFP_KERNEL);
+
+	msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
+	hdr->len = sizeof(struct w1_netlink_cmd);
+	cmd->len = 0;
+}
+
+static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg,
+		unsigned int avail)
+{
+	struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
+	struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
+	int search_type = (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH;
+
+	dev->priv = msg;
+	dev->priv_size = avail;
+
+	w1_search_devices(dev, search_type, w1_send_slave);
+
+	msg->ack = 0;
+	cn_netlink_send(msg, 0, GFP_KERNEL);
+
+	dev->priv = NULL;
+	dev->priv_size = 0;
+
 	return 0;
 }
 
-static int w1_send_read_reply(struct w1_slave *sl, struct cn_msg *msg,
-		struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd)
+static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr,
+		struct w1_netlink_cmd *cmd)
 {
 	void *data;
 	struct w1_netlink_msg *h;
@@ -85,7 +120,8 @@
 	memcpy(c, cmd, sizeof(struct w1_netlink_cmd));
 
 	cm->ack = msg->seq+1;
-	cm->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd) + cmd->len;
+	cm->len = sizeof(struct w1_netlink_msg) +
+		sizeof(struct w1_netlink_cmd) + cmd->len;
 
 	h->len = sizeof(struct w1_netlink_cmd) + cmd->len;
 
@@ -98,36 +134,178 @@
 	return err;
 }
 
-static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg,
+static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg,
 		struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd)
 {
 	int err = 0;
 
-	dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n",
-		__func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id, sl->reg_num.crc,
-		cmd->cmd, cmd->len);
-
 	switch (cmd->cmd) {
-		case W1_CMD_READ:
-			w1_read_block(sl->master, cmd->data, cmd->len);
-			w1_send_read_reply(sl, msg, hdr, cmd);
-			break;
-		case W1_CMD_WRITE:
-			w1_write_block(sl->master, cmd->data, cmd->len);
-			break;
-		case W1_CMD_SEARCH:
-		case W1_CMD_ALARM_SEARCH:
-			w1_search_process(sl->master,
-					(cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH);
-			break;
-		default:
-			err = -1;
-			break;
+	case W1_CMD_TOUCH:
+		w1_touch_block(dev, cmd->data, cmd->len);
+		w1_send_read_reply(msg, hdr, cmd);
+		break;
+	case W1_CMD_READ:
+		w1_read_block(dev, cmd->data, cmd->len);
+		w1_send_read_reply(msg, hdr, cmd);
+		break;
+	case W1_CMD_WRITE:
+		w1_write_block(dev, cmd->data, cmd->len);
+		break;
+	default:
+		err = -EINVAL;
+		break;
 	}
 
 	return err;
 }
 
+static int w1_process_command_master(struct w1_master *dev, struct cn_msg *req_msg,
+		struct w1_netlink_msg *req_hdr, struct w1_netlink_cmd *req_cmd)
+{
+	int err = -EINVAL;
+	struct cn_msg *msg;
+	struct w1_netlink_msg *hdr;
+	struct w1_netlink_cmd *cmd;
+
+	msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	msg->id = req_msg->id;
+	msg->seq = req_msg->seq;
+	msg->ack = 0;
+	msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
+
+	hdr = (struct w1_netlink_msg *)(msg + 1);
+	cmd = (struct w1_netlink_cmd *)(hdr + 1);
+
+	hdr->type = W1_MASTER_CMD;
+	hdr->id = req_hdr->id;
+	hdr->len = sizeof(struct w1_netlink_cmd);
+
+	cmd->cmd = req_cmd->cmd;
+	cmd->len = 0;
+
+	switch (cmd->cmd) {
+	case W1_CMD_SEARCH:
+	case W1_CMD_ALARM_SEARCH:
+		err = w1_process_search_command(dev, msg,
+				PAGE_SIZE - msg->len - sizeof(struct cn_msg));
+		break;
+	case W1_CMD_READ:
+	case W1_CMD_WRITE:
+	case W1_CMD_TOUCH:
+		err = w1_process_command_io(dev, req_msg, req_hdr, req_cmd);
+		break;
+	case W1_CMD_RESET:
+		err = w1_reset_bus(dev);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	kfree(msg);
+	return err;
+}
+
+static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg,
+		struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd)
+{
+	dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n",
+		__func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id,
+		sl->reg_num.crc, cmd->cmd, cmd->len);
+
+	return w1_process_command_io(sl->master, msg, hdr, cmd);
+}
+
+static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mcmd)
+{
+	struct w1_master *m;
+	struct cn_msg *cn;
+	struct w1_netlink_msg *w;
+	u32 *id;
+
+	if (mcmd->type != W1_LIST_MASTERS) {
+		printk(KERN_NOTICE "%s: msg: %x.%x, wrong type: %u, len: %u.\n",
+			__func__, msg->id.idx, msg->id.val, mcmd->type, mcmd->len);
+		return -EPROTO;
+	}
+
+	cn = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!cn)
+		return -ENOMEM;
+
+	cn->id.idx = CN_W1_IDX;
+	cn->id.val = CN_W1_VAL;
+
+	cn->seq = msg->seq;
+	cn->ack = 1;
+	cn->len = sizeof(struct w1_netlink_msg);
+	w = (struct w1_netlink_msg *)(cn + 1);
+
+	w->type = W1_LIST_MASTERS;
+	w->status = 0;
+	w->len = 0;
+	id = (u32 *)(w + 1);
+
+	mutex_lock(&w1_mlock);
+	list_for_each_entry(m, &w1_masters, w1_master_entry) {
+		if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) {
+			cn_netlink_send(cn, 0, GFP_KERNEL);
+			cn->ack++;
+			cn->len = sizeof(struct w1_netlink_msg);
+			w->len = 0;
+			id = (u32 *)(w + 1);
+		}
+
+		*id = m->id;
+		w->len += sizeof(*id);
+		cn->len += sizeof(*id);
+		id++;
+	}
+	cn->ack = 0;
+	cn_netlink_send(cn, 0, GFP_KERNEL);
+	mutex_unlock(&w1_mlock);
+
+	kfree(cn);
+	return 0;
+}
+
+static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rmsg,
+		struct w1_netlink_cmd *rcmd, int error)
+{
+	struct cn_msg *cmsg;
+	struct w1_netlink_msg *msg;
+	struct w1_netlink_cmd *cmd;
+
+	cmsg = kzalloc(sizeof(*msg) + sizeof(*cmd) + sizeof(*cmsg), GFP_KERNEL);
+	if (!cmsg)
+		return -ENOMEM;
+
+	msg = (struct w1_netlink_msg *)(cmsg + 1);
+	cmd = (struct w1_netlink_cmd *)(msg + 1);
+
+	memcpy(cmsg, rcmsg, sizeof(*cmsg));
+	cmsg->len = sizeof(*msg);
+
+	memcpy(msg, rmsg, sizeof(*msg));
+	msg->len = 0;
+	msg->status = (short)-error;
+
+	if (rcmd) {
+		memcpy(cmd, rcmd, sizeof(*cmd));
+		cmd->len = 0;
+		msg->len += sizeof(*cmd);
+		cmsg->len += sizeof(*cmd);
+	}
+
+	error = cn_netlink_send(cmsg, 0, GFP_KERNEL);
+	kfree(cmsg);
+
+	return error;
+}
+
 static void w1_cn_callback(void *data)
 {
 	struct cn_msg *msg = data;
@@ -144,6 +322,7 @@
 
 		dev = NULL;
 		sl = NULL;
+		cmd = NULL;
 
 		memcpy(&id, m->id.id, sizeof(id));
 #if 0
@@ -155,15 +334,15 @@
 			break;
 		}
 
-		if (!mlen)
-			goto out_cont;
-
 		if (m->type == W1_MASTER_CMD) {
 			dev = w1_search_master_id(m->id.mst.id);
 		} else if (m->type == W1_SLAVE_CMD) {
 			sl = w1_search_slave(&id);
 			if (sl)
 				dev = sl->master;
+		} else {
+			err = w1_process_command_root(msg, m);
+			goto out_cont;
 		}
 
 		if (!dev) {
@@ -171,6 +350,10 @@
 			goto out_cont;
 		}
 
+		err = 0;
+		if (!mlen)
+			goto out_cont;
+
 		mutex_lock(&dev->mutex);
 
 		if (sl && w1_reset_select_slave(sl)) {
@@ -187,9 +370,12 @@
 			}
 
 			if (sl)
-				w1_process_command_slave(sl, msg, m, cmd);
+				err = w1_process_command_slave(sl, msg, m, cmd);
 			else
-				w1_process_command_master(dev, msg, m, cmd);
+				err = w1_process_command_master(dev, msg, m, cmd);
+
+			w1_netlink_send_error(msg, m, cmd, err);
+			err = 0;
 
 			cmd_data += cmd->len + sizeof(struct w1_netlink_cmd);
 			mlen -= cmd->len + sizeof(struct w1_netlink_cmd);
@@ -200,6 +386,8 @@
 			atomic_dec(&sl->refcnt);
 		mutex_unlock(&dev->mutex);
 out_cont:
+		if (!cmd || err)
+			w1_netlink_send_error(msg, m, cmd, err);
 		msg->len -= sizeof(struct w1_netlink_msg) + m->len;
 		m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len);
 
@@ -209,11 +397,6 @@
 		if (err == -ENODEV)
 			err = 0;
 	}
-#if 0
-	if (err) {
-		printk("%s: malformed message. Dropping.\n", __func__);
-	}
-#endif
 }
 
 int w1_init_netlink(void)
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index 56122b9..27e950f 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -34,12 +34,13 @@
 	W1_MASTER_REMOVE,
 	W1_MASTER_CMD,
 	W1_SLAVE_CMD,
+	W1_LIST_MASTERS,
 };
 
 struct w1_netlink_msg
 {
 	__u8				type;
-	__u8				reserved;
+	__u8				status;
 	__u16				len;
 	union {
 		__u8			id[8];
@@ -51,10 +52,15 @@
 	__u8				data[0];
 };
 
-#define W1_CMD_READ		0x0
-#define W1_CMD_WRITE		0x1
-#define W1_CMD_SEARCH		0x2
-#define W1_CMD_ALARM_SEARCH	0x3
+enum w1_commands {
+	W1_CMD_READ = 0,
+	W1_CMD_WRITE,
+	W1_CMD_SEARCH,
+	W1_CMD_ALARM_SEARCH,
+	W1_CMD_TOUCH,
+	W1_CMD_RESET,
+	W1_CMD_MAX,
+};
 
 struct w1_netlink_cmd
 {
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ec68c74..3efa12f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -770,6 +770,12 @@
 
 # POWERPC Architecture
 
+config GEF_WDT
+	tristate "GE Fanuc Watchdog Timer"
+	depends on GEF_SBC610
+	---help---
+	  Watchdog timer found in a number of GE Fanuc single board computers.
+
 config MPC5200_WDT
 	tristate "MPC5200 Watchdog Timer"
 	depends on PPC_MPC52xx
@@ -790,6 +796,14 @@
 	tristate "MV64X60 (Marvell Discovery) Watchdog Timer"
 	depends on MV64X60
 
+config PIKA_WDT
+	tristate "PIKA FPGA Watchdog"
+	depends on WARP
+	default y
+	help
+	  This enables the watchdog in the PIKA FPGA. Currently used on
+	  the Warp platform.
+
 config BOOKE_WDT
 	bool "PowerPC Book-E Watchdog Timer"
 	depends on BOOKE || 4xx
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c19b866..806b3eb 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -111,9 +111,11 @@
 # PARISC Architecture
 
 # POWERPC Architecture
+obj-$(CONFIG_GEF_WDT) += gef_wdt.o
 obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o
 obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o
 obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o
+obj-$(CONFIG_PIKA_WDT) += pika_wdt.o
 obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
 
 # PPC64 Architecture
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
new file mode 100644
index 0000000..f0c2b7a
--- /dev/null
+++ b/drivers/watchdog/gef_wdt.c
@@ -0,0 +1,330 @@
+/*
+ * GE Fanuc watchdog userspace interface
+ *
+ * Author:  Martyn Welch <martyn.welch@gefanuc.com>
+ *
+ * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * Based on: mv64x60_wdt.c (MV64X60 watchdog userspace interface)
+ *   Author: James Chapman <jchapman@katalix.com>
+ */
+
+/* TODO:
+ * This driver does not provide support for the hardwares capability of sending
+ * an interrupt at a programmable threshold.
+ *
+ * This driver currently can only support 1 watchdog - there are 2 in the
+ * hardware that this driver supports. Thus one could be configured as a
+ * process-based watchdog (via /dev/watchdog), the second (using the interrupt
+ * capabilities) a kernel-based watchdog.
+ */
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <sysdev/fsl_soc.h>
+
+/*
+ * The watchdog configuration register contains a pair of 2-bit fields,
+ *   1.  a reload field, bits 27-26, which triggers a reload of
+ *       the countdown register, and
+ *   2.  an enable field, bits 25-24, which toggles between
+ *       enabling and disabling the watchdog timer.
+ * Bit 31 is a read-only field which indicates whether the
+ * watchdog timer is currently enabled.
+ *
+ * The low 24 bits contain the timer reload value.
+ */
+#define GEF_WDC_ENABLE_SHIFT	24
+#define GEF_WDC_SERVICE_SHIFT	26
+#define GEF_WDC_ENABLED_SHIFT	31
+
+#define GEF_WDC_ENABLED_TRUE	1
+#define GEF_WDC_ENABLED_FALSE	0
+
+/* Flags bits */
+#define GEF_WDOG_FLAG_OPENED	0
+
+static unsigned long wdt_flags;
+static int wdt_status;
+static void __iomem *gef_wdt_regs;
+static int gef_wdt_timeout;
+static int gef_wdt_count;
+static unsigned int bus_clk;
+static char expect_close;
+static DEFINE_SPINLOCK(gef_wdt_spinlock);
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+	__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+
+static int gef_wdt_toggle_wdc(int enabled_predicate, int field_shift)
+{
+	u32 data;
+	u32 enabled;
+	int ret = 0;
+
+	spin_lock(&gef_wdt_spinlock);
+	data = ioread32be(gef_wdt_regs);
+	enabled = (data >> GEF_WDC_ENABLED_SHIFT) & 1;
+
+	/* only toggle the requested field if enabled state matches predicate */
+	if ((enabled ^ enabled_predicate) == 0) {
+		/* We write a 1, then a 2 -- to the appropriate field */
+		data = (1 << field_shift) | gef_wdt_count;
+		iowrite32be(data, gef_wdt_regs);
+
+		data = (2 << field_shift) | gef_wdt_count;
+		iowrite32be(data, gef_wdt_regs);
+		ret = 1;
+	}
+	spin_unlock(&gef_wdt_spinlock);
+
+	return ret;
+}
+
+static void gef_wdt_service(void)
+{
+	gef_wdt_toggle_wdc(GEF_WDC_ENABLED_TRUE,
+		GEF_WDC_SERVICE_SHIFT);
+}
+
+static void gef_wdt_handler_enable(void)
+{
+	if (gef_wdt_toggle_wdc(GEF_WDC_ENABLED_FALSE,
+				   GEF_WDC_ENABLE_SHIFT)) {
+		gef_wdt_service();
+		printk(KERN_NOTICE "gef_wdt: watchdog activated\n");
+	}
+}
+
+static void gef_wdt_handler_disable(void)
+{
+	if (gef_wdt_toggle_wdc(GEF_WDC_ENABLED_TRUE,
+				   GEF_WDC_ENABLE_SHIFT))
+		printk(KERN_NOTICE "gef_wdt: watchdog deactivated\n");
+}
+
+static void gef_wdt_set_timeout(unsigned int timeout)
+{
+	/* maximum bus cycle count is 0xFFFFFFFF */
+	if (timeout > 0xFFFFFFFF / bus_clk)
+		timeout = 0xFFFFFFFF / bus_clk;
+
+	/* Register only holds upper 24 bits, bit shifted into lower 24 */
+	gef_wdt_count = (timeout * bus_clk) >> 8;
+	gef_wdt_timeout = timeout;
+}
+
+
+static ssize_t gef_wdt_write(struct file *file, const char __user *data,
+				 size_t len, loff_t *ppos)
+{
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			expect_close = 0;
+
+			for (i = 0; i != len; i++) {
+				char c;
+				if (get_user(c, data + i))
+					return -EFAULT;
+				if (c == 'V')
+					expect_close = 42;
+			}
+		}
+		gef_wdt_service();
+	}
+
+	return len;
+}
+
+static long gef_wdt_ioctl(struct file *file, unsigned int cmd,
+							unsigned long arg)
+{
+	int timeout;
+	int options;
+	void __user *argp = (void __user *)arg;
+	static struct watchdog_info info = {
+		.options =	WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
+				WDIOF_KEEPALIVEPING,
+		.firmware_version = 0,
+		.identity = "GE Fanuc watchdog",
+	};
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		if (copy_to_user(argp, &info, sizeof(info)))
+			return -EFAULT;
+		break;
+
+	case WDIOC_GETSTATUS:
+	case WDIOC_GETBOOTSTATUS:
+		if (put_user(wdt_status, (int __user *)argp))
+			return -EFAULT;
+		wdt_status &= ~WDIOF_KEEPALIVEPING;
+		break;
+
+	case WDIOC_SETOPTIONS:
+		if (get_user(options, (int __user *)argp))
+			return -EFAULT;
+
+		if (options & WDIOS_DISABLECARD)
+			gef_wdt_handler_disable();
+
+		if (options & WDIOS_ENABLECARD)
+			gef_wdt_handler_enable();
+		break;
+
+	case WDIOC_KEEPALIVE:
+		gef_wdt_service();
+		wdt_status |= WDIOF_KEEPALIVEPING;
+		break;
+
+	case WDIOC_SETTIMEOUT:
+		if (get_user(timeout, (int __user *)argp))
+			return -EFAULT;
+		gef_wdt_set_timeout(timeout);
+		/* Fall through */
+
+	case WDIOC_GETTIMEOUT:
+		if (put_user(gef_wdt_timeout, (int __user *)argp))
+			return -EFAULT;
+		break;
+
+	default:
+		return -ENOTTY;
+	}
+
+	return 0;
+}
+
+static int gef_wdt_open(struct inode *inode, struct file *file)
+{
+	if (test_and_set_bit(GEF_WDOG_FLAG_OPENED, &wdt_flags))
+		return -EBUSY;
+
+	if (nowayout)
+		__module_get(THIS_MODULE);
+
+	gef_wdt_handler_enable();
+
+	return nonseekable_open(inode, file);
+}
+
+static int gef_wdt_release(struct inode *inode, struct file *file)
+{
+	if (expect_close == 42)
+		gef_wdt_handler_disable();
+	else {
+		printk(KERN_CRIT
+		       "gef_wdt: unexpected close, not stopping timer!\n");
+		gef_wdt_service();
+	}
+	expect_close = 0;
+
+	clear_bit(GEF_WDOG_FLAG_OPENED, &wdt_flags);
+
+	return 0;
+}
+
+static const struct file_operations gef_wdt_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.write = gef_wdt_write,
+	.unlocked_ioctl = gef_wdt_ioctl,
+	.open = gef_wdt_open,
+	.release = gef_wdt_release,
+};
+
+static struct miscdevice gef_wdt_miscdev = {
+	.minor = WATCHDOG_MINOR,
+	.name = "watchdog",
+	.fops = &gef_wdt_fops,
+};
+
+
+static int __devinit gef_wdt_probe(struct of_device *dev,
+	const struct of_device_id *match)
+{
+	int timeout = 10;
+	u32 freq;
+
+	bus_clk = 133; /* in MHz */
+
+	freq = fsl_get_sys_freq();
+	if (freq > 0)
+		bus_clk = freq;
+
+	/* Map devices registers into memory */
+	gef_wdt_regs = of_iomap(dev->node, 0);
+	if (gef_wdt_regs == NULL)
+		return -ENOMEM;
+
+	gef_wdt_set_timeout(timeout);
+
+	gef_wdt_handler_disable();	/* in case timer was already running */
+
+	return misc_register(&gef_wdt_miscdev);
+}
+
+static int __devexit gef_wdt_remove(struct platform_device *dev)
+{
+	misc_deregister(&gef_wdt_miscdev);
+
+	gef_wdt_handler_disable();
+
+	iounmap(gef_wdt_regs);
+
+	return 0;
+}
+
+static const struct of_device_id gef_wdt_ids[] = {
+	{
+		.compatible = "gef,fpga-wdt",
+	},
+	{},
+};
+
+static struct of_platform_driver gef_wdt_driver = {
+	.owner		= THIS_MODULE,
+	.name		= "gef_wdt",
+	.match_table	= gef_wdt_ids,
+	.probe		= gef_wdt_probe,
+};
+
+static int __init gef_wdt_init(void)
+{
+	printk(KERN_INFO "GE Fanuc watchdog driver\n");
+	return of_register_platform_driver(&gef_wdt_driver);
+}
+
+static void __exit gef_wdt_exit(void)
+{
+	of_unregister_platform_driver(&gef_wdt_driver);
+}
+
+module_init(gef_wdt_init);
+module_exit(gef_wdt_exit);
+
+MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com>");
+MODULE_DESCRIPTION("GE Fanuc watchdog driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform: gef_wdt");
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
new file mode 100644
index 0000000..2d22e99
--- /dev/null
+++ b/drivers/watchdog/pika_wdt.c
@@ -0,0 +1,301 @@
+/*
+ * PIKA FPGA based Watchdog Timer
+ *
+ * Copyright (c) 2008 PIKA Technologies
+ *   Sean MacLennan <smaclennan@pikatech.com>
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/reboot.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+
+#define DRV_NAME "PIKA-WDT"
+#define PFX DRV_NAME ": "
+
+/* Hardware timeout in seconds */
+#define WDT_HW_TIMEOUT 2
+
+/* Timer heartbeat (500ms) */
+#define WDT_TIMEOUT	(HZ/2)
+
+/* User land timeout */
+#define WDT_HEARTBEAT 15
+static int heartbeat = WDT_HEARTBEAT;
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
+	"(default = " __MODULE_STRING(WDT_HEARTBEAT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+	"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static struct {
+	void __iomem *fpga;
+	unsigned long next_heartbeat;	/* the next_heartbeat for the timer */
+	unsigned long open;
+	char expect_close;
+	int bootstatus;
+	struct timer_list timer;	/* The timer that pings the watchdog */
+} pikawdt_private;
+
+static struct watchdog_info ident = {
+	.identity	= DRV_NAME,
+	.options	= WDIOF_CARDRESET |
+			  WDIOF_SETTIMEOUT |
+			  WDIOF_KEEPALIVEPING |
+			  WDIOF_MAGICCLOSE,
+};
+
+/*
+ * Reload the watchdog timer.  (ie, pat the watchdog)
+ */
+static inline void pikawdt_reset(void)
+{
+	/* -- FPGA: Reset Control Register (32bit R/W) (Offset: 0x14) --
+	 * Bit 7,    WTCHDG_EN: When set to 1, the watchdog timer is enabled.
+	 *           Once enabled, it cannot be disabled. The watchdog can be
+	 *           kicked by performing any write access to the reset
+	 *           control register (this register).
+	 * Bit 8-11, WTCHDG_TIMEOUT_SEC: Sets the watchdog timeout value in
+	 *           seconds. Valid ranges are 1 to 15 seconds. The value can
+	 *           be modified dynamically.
+	 */
+	unsigned reset = in_be32(pikawdt_private.fpga + 0x14);
+	/* enable with max timeout - 15 seconds */
+	reset |= (1 << 7) + (WDT_HW_TIMEOUT << 8);
+	out_be32(pikawdt_private.fpga + 0x14, reset);
+}
+
+/*
+ * Timer tick
+ */
+static void pikawdt_ping(unsigned long data)
+{
+	if (time_before(jiffies, pikawdt_private.next_heartbeat) ||
+			(!nowayout && !pikawdt_private.open)) {
+		pikawdt_reset();
+		mod_timer(&pikawdt_private.timer, jiffies + WDT_TIMEOUT);
+	} else
+		printk(KERN_CRIT PFX "I will reset your machine !\n");
+}
+
+
+static void pikawdt_keepalive(void)
+{
+	pikawdt_private.next_heartbeat = jiffies + heartbeat * HZ;
+}
+
+static void pikawdt_start(void)
+{
+	pikawdt_keepalive();
+	mod_timer(&pikawdt_private.timer, jiffies + WDT_TIMEOUT);
+}
+
+/*
+ * Watchdog device is opened, and watchdog starts running.
+ */
+static int pikawdt_open(struct inode *inode, struct file *file)
+{
+	/* /dev/watchdog can only be opened once */
+	if (test_and_set_bit(0, &pikawdt_private.open))
+		return -EBUSY;
+
+	pikawdt_start();
+
+	return nonseekable_open(inode, file);
+}
+
+/*
+ * Close the watchdog device.
+ */
+static int pikawdt_release(struct inode *inode, struct file *file)
+{
+	/* stop internal ping */
+	if (!pikawdt_private.expect_close)
+		del_timer(&pikawdt_private.timer);
+
+	clear_bit(0, &pikawdt_private.open);
+	pikawdt_private.expect_close = 0;
+	return 0;
+}
+
+/*
+ * Pat the watchdog whenever device is written to.
+ */
+static ssize_t pikawdt_write(struct file *file, const char __user *data,
+			     size_t len, loff_t *ppos)
+{
+	if (!len)
+		return 0;
+
+	/* Scan for magic character */
+	if (!nowayout) {
+		size_t i;
+
+		pikawdt_private.expect_close = 0;
+
+		for (i = 0; i < len; i++) {
+			char c;
+			if (get_user(c, data + i))
+				return -EFAULT;
+			if (c == 'V') {
+				pikawdt_private.expect_close = 42;
+				break;
+			}
+		}
+	}
+
+	pikawdt_keepalive();
+
+	return len;
+}
+
+/*
+ * Handle commands from user-space.
+ */
+static long pikawdt_ioctl(struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	int new_value;
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+
+	case WDIOC_GETSTATUS:
+		return put_user(0, p);
+
+	case WDIOC_GETBOOTSTATUS:
+		return put_user(pikawdt_private.bootstatus, p);
+
+	case WDIOC_KEEPALIVE:
+		pikawdt_keepalive();
+		return 0;
+
+	case WDIOC_SETTIMEOUT:
+		if (get_user(new_value, p))
+			return -EFAULT;
+
+		heartbeat = new_value;
+		pikawdt_keepalive();
+
+		return put_user(new_value, p);  /* return current value */
+
+	case WDIOC_GETTIMEOUT:
+		return put_user(heartbeat, p);
+	}
+	return -ENOTTY;
+}
+
+
+static const struct file_operations pikawdt_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.open		= pikawdt_open,
+	.release	= pikawdt_release,
+	.write		= pikawdt_write,
+	.unlocked_ioctl	= pikawdt_ioctl,
+};
+
+static struct miscdevice pikawdt_miscdev = {
+	.minor	= WATCHDOG_MINOR,
+	.name	= "watchdog",
+	.fops	= &pikawdt_fops,
+};
+
+static int __init pikawdt_init(void)
+{
+	struct device_node *np;
+	void __iomem *fpga;
+	static u32 post1;
+	int ret;
+
+	np = of_find_compatible_node(NULL, NULL, "pika,fpga");
+	if (np == NULL) {
+		printk(KERN_ERR PFX "Unable to find fpga.\n");
+		return -ENOENT;
+	}
+
+	pikawdt_private.fpga = of_iomap(np, 0);
+	of_node_put(np);
+	if (pikawdt_private.fpga == NULL) {
+		printk(KERN_ERR PFX "Unable to map fpga.\n");
+		return -ENOMEM;
+	}
+
+	ident.firmware_version = in_be32(pikawdt_private.fpga + 0x1c) & 0xffff;
+
+	/* POST information is in the sd area. */
+	np = of_find_compatible_node(NULL, NULL, "pika,fpga-sd");
+	if (np == NULL) {
+		printk(KERN_ERR PFX "Unable to find fpga-sd.\n");
+		ret = -ENOENT;
+		goto out;
+	}
+
+	fpga = of_iomap(np, 0);
+	of_node_put(np);
+	if (fpga == NULL) {
+		printk(KERN_ERR PFX "Unable to map fpga-sd.\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* -- FPGA: POST Test Results Register 1 (32bit R/W) (Offset: 0x4040) --
+	 * Bit 31,   WDOG: Set to 1 when the last reset was caused by a watchdog
+	 *           timeout.
+	 */
+	post1 = in_be32(fpga + 0x40);
+	if (post1 & 0x80000000)
+		pikawdt_private.bootstatus = WDIOF_CARDRESET;
+
+	iounmap(fpga);
+
+	setup_timer(&pikawdt_private.timer, pikawdt_ping, 0);
+
+	ret = misc_register(&pikawdt_miscdev);
+	if (ret) {
+		printk(KERN_ERR PFX "Unable to register miscdev.\n");
+		goto out;
+	}
+
+	printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n",
+							heartbeat, nowayout);
+	return 0;
+
+out:
+	iounmap(pikawdt_private.fpga);
+	return ret;
+}
+
+static void __exit pikawdt_exit(void)
+{
+	misc_deregister(&pikawdt_miscdev);
+
+	iounmap(pikawdt_private.fpga);
+}
+
+module_init(pikawdt_init);
+module_exit(pikawdt_exit);
+
+MODULE_AUTHOR("Sean MacLennan <smaclennan@pikatech.com>");
+MODULE_DESCRIPTION("PIKA FPGA based Watchdog Timer");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+
diff --git a/drivers/watchdog/wm8350_wdt.c b/drivers/watchdog/wm8350_wdt.c
index 2bc0d4d..a2d2e8e 100644
--- a/drivers/watchdog/wm8350_wdt.c
+++ b/drivers/watchdog/wm8350_wdt.c
@@ -279,7 +279,7 @@
 	.fops = &wm8350_wdt_fops,
 };
 
-static int wm8350_wdt_probe(struct platform_device *pdev)
+static int __devinit wm8350_wdt_probe(struct platform_device *pdev)
 {
 	struct wm8350 *wm8350 = platform_get_drvdata(pdev);
 
@@ -296,7 +296,7 @@
 	return misc_register(&wm8350_wdt_miscdev);
 }
 
-static int __exit wm8350_wdt_remove(struct platform_device *pdev)
+static int __devexit wm8350_wdt_remove(struct platform_device *pdev)
 {
 	misc_deregister(&wm8350_wdt_miscdev);
 
@@ -305,7 +305,7 @@
 
 static struct platform_driver wm8350_wdt_driver = {
 	.probe = wm8350_wdt_probe,
-	.remove = wm8350_wdt_remove,
+	.remove = __devexit_p(wm8350_wdt_remove),
 	.driver = {
 		.name = "wm8350-wdt",
 	},
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 4b75a16..526187c 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -17,3 +17,27 @@
 	  is not accidentally visible to other domains.  Is it more
 	  secure, but slightly less efficient.
 	  If in doubt, say yes.
+
+config XENFS
+	tristate "Xen filesystem"
+	depends on XEN
+	default y
+	help
+	  The xen filesystem provides a way for domains to share
+	  information with each other and with the hypervisor.
+	  For example, by reading and writing the "xenbus" file, guests
+	  may pass arbitrary information to the initial domain.
+	  If in doubt, say yes.
+
+config XEN_COMPAT_XENFS
+       bool "Create compatibility mount point /proc/xen"
+       depends on XENFS
+       default y
+       help
+         The old xenstore userspace tools expect to find "xenbus"
+         under /proc/xen, but "xenbus" is now found at the root of the
+         xenfs filesystem.  Selecting this causes the kernel to create
+         the compatibilty mount point /proc/xen if it is running on
+         a xen platform.
+         If in doubt, say yes.
+
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index d2a8fdf..ff8accc 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,5 +1,7 @@
 obj-y	+= grant-table.o features.o events.o manage.o
 obj-y	+= xenbus/
+
 obj-$(CONFIG_HOTPLUG_CPU)	+= cpu_hotplug.o
 obj-$(CONFIG_XEN_XENCOMM)	+= xencomm.o
 obj-$(CONFIG_XEN_BALLOON)	+= balloon.o
+obj-$(CONFIG_XENFS)		+= xenfs/
\ No newline at end of file
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 9678b3e..92a1ef8 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -136,7 +136,6 @@
 /**
  * xenbus_switch_state
  * @dev: xenbus device
- * @xbt: transaction handle
  * @state: new state
  *
  * Advertise in the store a change of the given driver to the given new_state.
@@ -267,7 +266,7 @@
  * @fmt: error message format
  *
  * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
- * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
+ * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
  * closedown of this driver and its peer.
  */
 
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index b2a0318..773d1cf 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -40,6 +40,7 @@
 #include <linux/ctype.h>
 #include <linux/fcntl.h>
 #include <linux/mm.h>
+#include <linux/proc_fs.h>
 #include <linux/notifier.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
@@ -55,7 +56,10 @@
 #include "xenbus_comms.h"
 #include "xenbus_probe.h"
 
+
 int xen_store_evtchn;
+EXPORT_SYMBOL(xen_store_evtchn);
+
 struct xenstore_domain_interface *xen_store_interface;
 static unsigned long xen_store_mfn;
 
@@ -166,6 +170,9 @@
 	return read_otherend_details(xendev, "backend-id", "backend");
 }
 
+static struct device_attribute xenbus_dev_attrs[] = {
+	__ATTR_NULL
+};
 
 /* Bus type for frontend drivers. */
 static struct xen_bus_type xenbus_frontend = {
@@ -174,12 +181,13 @@
 	.get_bus_id = frontend_bus_id,
 	.probe = xenbus_probe_frontend,
 	.bus = {
-		.name     = "xen",
-		.match    = xenbus_match,
-		.uevent   = xenbus_uevent,
-		.probe    = xenbus_dev_probe,
-		.remove   = xenbus_dev_remove,
-		.shutdown = xenbus_dev_shutdown,
+		.name      = "xen",
+		.match     = xenbus_match,
+		.uevent    = xenbus_uevent,
+		.probe     = xenbus_dev_probe,
+		.remove    = xenbus_dev_remove,
+		.shutdown  = xenbus_dev_shutdown,
+		.dev_attrs = xenbus_dev_attrs,
 	},
 };
 
@@ -852,6 +860,14 @@
 	if (!xen_initial_domain())
 		xenbus_probe(NULL);
 
+#ifdef CONFIG_XEN_COMPAT_XENFS
+	/*
+	 * Create xenfs mountpoint in /proc for compatibility with
+	 * utilities that expect to find "xenbus" under "/proc/xen".
+	 */
+	proc_mkdir("xen", NULL);
+#endif
+
 	return 0;
 
   out_unreg_back:
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 7f2f91c..e325eab 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -184,6 +184,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(xenbus_dev_request_and_reply);
 
 /* Send message to xs, get kmalloc'ed reply.  ERR_PTR() on error. */
 static void *xs_talkv(struct xenbus_transaction t,
diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile
new file mode 100644
index 0000000..25275c3
--- /dev/null
+++ b/drivers/xen/xenfs/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_XENFS) += xenfs.o
+
+xenfs-objs = super.o xenbus.o
\ No newline at end of file
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
new file mode 100644
index 0000000..515741a
--- /dev/null
+++ b/drivers/xen/xenfs/super.c
@@ -0,0 +1,64 @@
+/*
+ *  xenfs.c - a filesystem for passing info between the a domain and
+ *  the hypervisor.
+ *
+ * 2008-10-07  Alex Zeffertt    Replaced /proc/xen/xenbus with xenfs filesystem
+ *                              and /proc/xen compatibility mount point.
+ *                              Turned xenfs into a loadable module.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/magic.h>
+
+#include "xenfs.h"
+
+#include <asm/xen/hypervisor.h>
+
+MODULE_DESCRIPTION("Xen filesystem");
+MODULE_LICENSE("GPL");
+
+static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	static struct tree_descr xenfs_files[] = {
+		[2] = {"xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR},
+		{""},
+	};
+
+	return simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files);
+}
+
+static int xenfs_get_sb(struct file_system_type *fs_type,
+			int flags, const char *dev_name,
+			void *data, struct vfsmount *mnt)
+{
+	return get_sb_single(fs_type, flags, data, xenfs_fill_super, mnt);
+}
+
+static struct file_system_type xenfs_type = {
+	.owner =	THIS_MODULE,
+	.name =		"xenfs",
+	.get_sb =	xenfs_get_sb,
+	.kill_sb =	kill_litter_super,
+};
+
+static int __init xenfs_init(void)
+{
+	if (xen_pv_domain())
+		return register_filesystem(&xenfs_type);
+
+	printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n");
+	return 0;
+}
+
+static void __exit xenfs_exit(void)
+{
+	if (xen_pv_domain())
+		unregister_filesystem(&xenfs_type);
+}
+
+module_init(xenfs_init);
+module_exit(xenfs_exit);
+
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
new file mode 100644
index 0000000..875a4c5
--- /dev/null
+++ b/drivers/xen/xenfs/xenbus.c
@@ -0,0 +1,593 @@
+/*
+ * Driver giving user-space access to the kernel's xenbus connection
+ * to xenstore.
+ *
+ * Copyright (c) 2005, Christian Limpach
+ * Copyright (c) 2005, Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Changes:
+ * 2008-10-07  Alex Zeffertt    Replaced /proc/xen/xenbus with xenfs filesystem
+ *                              and /proc/xen compatibility mount point.
+ *                              Turned xenfs into a loadable module.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/uio.h>
+#include <linux/notifier.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/string.h>
+
+#include "xenfs.h"
+#include "../xenbus/xenbus_comms.h"
+
+#include <xen/xenbus.h>
+#include <asm/xen/hypervisor.h>
+
+/*
+ * An element of a list of outstanding transactions, for which we're
+ * still waiting a reply.
+ */
+struct xenbus_transaction_holder {
+	struct list_head list;
+	struct xenbus_transaction handle;
+};
+
+/*
+ * A buffer of data on the queue.
+ */
+struct read_buffer {
+	struct list_head list;
+	unsigned int cons;
+	unsigned int len;
+	char msg[];
+};
+
+struct xenbus_file_priv {
+	/*
+	 * msgbuffer_mutex is held while partial requests are built up
+	 * and complete requests are acted on.  It therefore protects
+	 * the "transactions" and "watches" lists, and the partial
+	 * request length and buffer.
+	 *
+	 * reply_mutex protects the reply being built up to return to
+	 * usermode.  It nests inside msgbuffer_mutex but may be held
+	 * alone during a watch callback.
+	 */
+	struct mutex msgbuffer_mutex;
+
+	/* In-progress transactions */
+	struct list_head transactions;
+
+	/* Active watches. */
+	struct list_head watches;
+
+	/* Partial request. */
+	unsigned int len;
+	union {
+		struct xsd_sockmsg msg;
+		char buffer[PAGE_SIZE];
+	} u;
+
+	/* Response queue. */
+	struct mutex reply_mutex;
+	struct list_head read_buffers;
+	wait_queue_head_t read_waitq;
+
+};
+
+/* Read out any raw xenbus messages queued up. */
+static ssize_t xenbus_file_read(struct file *filp,
+			       char __user *ubuf,
+			       size_t len, loff_t *ppos)
+{
+	struct xenbus_file_priv *u = filp->private_data;
+	struct read_buffer *rb;
+	unsigned i;
+	int ret;
+
+	mutex_lock(&u->reply_mutex);
+	while (list_empty(&u->read_buffers)) {
+		mutex_unlock(&u->reply_mutex);
+		ret = wait_event_interruptible(u->read_waitq,
+					       !list_empty(&u->read_buffers));
+		if (ret)
+			return ret;
+		mutex_lock(&u->reply_mutex);
+	}
+
+	rb = list_entry(u->read_buffers.next, struct read_buffer, list);
+	i = 0;
+	while (i < len) {
+		unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
+
+		ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
+
+		i += sz - ret;
+		rb->cons += sz - ret;
+
+		if (ret != sz) {
+			if (i == 0)
+				i = -EFAULT;
+			goto out;
+		}
+
+		/* Clear out buffer if it has been consumed */
+		if (rb->cons == rb->len) {
+			list_del(&rb->list);
+			kfree(rb);
+			if (list_empty(&u->read_buffers))
+				break;
+			rb = list_entry(u->read_buffers.next,
+					struct read_buffer, list);
+		}
+	}
+
+out:
+	mutex_unlock(&u->reply_mutex);
+	return i;
+}
+
+/*
+ * Add a buffer to the queue.  Caller must hold the appropriate lock
+ * if the queue is not local.  (Commonly the caller will build up
+ * multiple queued buffers on a temporary local list, and then add it
+ * to the appropriate list under lock once all the buffers have een
+ * successfully allocated.)
+ */
+static int queue_reply(struct list_head *queue, const void *data, size_t len)
+{
+	struct read_buffer *rb;
+
+	if (len == 0)
+		return 0;
+
+	rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
+	if (rb == NULL)
+		return -ENOMEM;
+
+	rb->cons = 0;
+	rb->len = len;
+
+	memcpy(rb->msg, data, len);
+
+	list_add_tail(&rb->list, queue);
+	return 0;
+}
+
+/*
+ * Free all the read_buffer s on a list.
+ * Caller must have sole reference to list.
+ */
+static void queue_cleanup(struct list_head *list)
+{
+	struct read_buffer *rb;
+
+	while (!list_empty(list)) {
+		rb = list_entry(list->next, struct read_buffer, list);
+		list_del(list->next);
+		kfree(rb);
+	}
+}
+
+struct watch_adapter {
+	struct list_head list;
+	struct xenbus_watch watch;
+	struct xenbus_file_priv *dev_data;
+	char *token;
+};
+
+static void free_watch_adapter(struct watch_adapter *watch)
+{
+	kfree(watch->watch.node);
+	kfree(watch->token);
+	kfree(watch);
+}
+
+static struct watch_adapter *alloc_watch_adapter(const char *path,
+						 const char *token)
+{
+	struct watch_adapter *watch;
+
+	watch = kzalloc(sizeof(*watch), GFP_KERNEL);
+	if (watch == NULL)
+		goto out_fail;
+
+	watch->watch.node = kstrdup(path, GFP_KERNEL);
+	if (watch->watch.node == NULL)
+		goto out_free;
+
+	watch->token = kstrdup(token, GFP_KERNEL);
+	if (watch->token == NULL)
+		goto out_free;
+
+	return watch;
+
+out_free:
+	free_watch_adapter(watch);
+
+out_fail:
+	return NULL;
+}
+
+static void watch_fired(struct xenbus_watch *watch,
+			const char **vec,
+			unsigned int len)
+{
+	struct watch_adapter *adap;
+	struct xsd_sockmsg hdr;
+	const char *path, *token;
+	int path_len, tok_len, body_len, data_len = 0;
+	int ret;
+	LIST_HEAD(staging_q);
+
+	adap = container_of(watch, struct watch_adapter, watch);
+
+	path = vec[XS_WATCH_PATH];
+	token = adap->token;
+
+	path_len = strlen(path) + 1;
+	tok_len = strlen(token) + 1;
+	if (len > 2)
+		data_len = vec[len] - vec[2] + 1;
+	body_len = path_len + tok_len + data_len;
+
+	hdr.type = XS_WATCH_EVENT;
+	hdr.len = body_len;
+
+	mutex_lock(&adap->dev_data->reply_mutex);
+
+	ret = queue_reply(&staging_q, &hdr, sizeof(hdr));
+	if (!ret)
+		ret = queue_reply(&staging_q, path, path_len);
+	if (!ret)
+		ret = queue_reply(&staging_q, token, tok_len);
+	if (!ret && len > 2)
+		ret = queue_reply(&staging_q, vec[2], data_len);
+
+	if (!ret) {
+		/* success: pass reply list onto watcher */
+		list_splice_tail(&staging_q, &adap->dev_data->read_buffers);
+		wake_up(&adap->dev_data->read_waitq);
+	} else
+		queue_cleanup(&staging_q);
+
+	mutex_unlock(&adap->dev_data->reply_mutex);
+}
+
+static int xenbus_write_transaction(unsigned msg_type,
+				    struct xenbus_file_priv *u)
+{
+	int rc, ret;
+	void *reply;
+	struct xenbus_transaction_holder *trans = NULL;
+	LIST_HEAD(staging_q);
+
+	if (msg_type == XS_TRANSACTION_START) {
+		trans = kmalloc(sizeof(*trans), GFP_KERNEL);
+		if (!trans) {
+			rc = -ENOMEM;
+			goto out;
+		}
+	}
+
+	reply = xenbus_dev_request_and_reply(&u->u.msg);
+	if (IS_ERR(reply)) {
+		kfree(trans);
+		rc = PTR_ERR(reply);
+		goto out;
+	}
+
+	if (msg_type == XS_TRANSACTION_START) {
+		trans->handle.id = simple_strtoul(reply, NULL, 0);
+
+		list_add(&trans->list, &u->transactions);
+	} else if (msg_type == XS_TRANSACTION_END) {
+		list_for_each_entry(trans, &u->transactions, list)
+			if (trans->handle.id == u->u.msg.tx_id)
+				break;
+		BUG_ON(&trans->list == &u->transactions);
+		list_del(&trans->list);
+
+		kfree(trans);
+	}
+
+	mutex_lock(&u->reply_mutex);
+	ret = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
+	if (!ret)
+		ret = queue_reply(&staging_q, reply, u->u.msg.len);
+	if (!ret) {
+		list_splice_tail(&staging_q, &u->read_buffers);
+		wake_up(&u->read_waitq);
+	} else {
+		queue_cleanup(&staging_q);
+		rc = ret;
+	}
+	mutex_unlock(&u->reply_mutex);
+
+	kfree(reply);
+
+out:
+	return rc;
+}
+
+static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
+{
+	struct watch_adapter *watch, *tmp_watch;
+	char *path, *token;
+	int err, rc;
+	LIST_HEAD(staging_q);
+
+	path = u->u.buffer + sizeof(u->u.msg);
+	token = memchr(path, 0, u->u.msg.len);
+	if (token == NULL) {
+		rc = -EILSEQ;
+		goto out;
+	}
+	token++;
+
+	if (msg_type == XS_WATCH) {
+		watch = alloc_watch_adapter(path, token);
+		if (watch == NULL) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		watch->watch.callback = watch_fired;
+		watch->dev_data = u;
+
+		err = register_xenbus_watch(&watch->watch);
+		if (err) {
+			free_watch_adapter(watch);
+			rc = err;
+			goto out;
+		}
+		list_add(&watch->list, &u->watches);
+	} else {
+		list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
+			if (!strcmp(watch->token, token) &&
+			    !strcmp(watch->watch.node, path)) {
+				unregister_xenbus_watch(&watch->watch);
+				list_del(&watch->list);
+				free_watch_adapter(watch);
+				break;
+			}
+		}
+	}
+
+	/* Success.  Synthesize a reply to say all is OK. */
+	{
+		struct {
+			struct xsd_sockmsg hdr;
+			char body[3];
+		} __packed reply = {
+			{
+				.type = msg_type,
+				.len = sizeof(reply.body)
+			},
+			"OK"
+		};
+
+		mutex_lock(&u->reply_mutex);
+		rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
+		mutex_unlock(&u->reply_mutex);
+	}
+
+out:
+	return rc;
+}
+
+static ssize_t xenbus_file_write(struct file *filp,
+				const char __user *ubuf,
+				size_t len, loff_t *ppos)
+{
+	struct xenbus_file_priv *u = filp->private_data;
+	uint32_t msg_type;
+	int rc = len;
+	int ret;
+	LIST_HEAD(staging_q);
+
+	/*
+	 * We're expecting usermode to be writing properly formed
+	 * xenbus messages.  If they write an incomplete message we
+	 * buffer it up.  Once it is complete, we act on it.
+	 */
+
+	/*
+	 * Make sure concurrent writers can't stomp all over each
+	 * other's messages and make a mess of our partial message
+	 * buffer.  We don't make any attemppt to stop multiple
+	 * writers from making a mess of each other's incomplete
+	 * messages; we're just trying to guarantee our own internal
+	 * consistency and make sure that single writes are handled
+	 * atomically.
+	 */
+	mutex_lock(&u->msgbuffer_mutex);
+
+	/* Get this out of the way early to avoid confusion */
+	if (len == 0)
+		goto out;
+
+	/* Can't write a xenbus message larger we can buffer */
+	if ((len + u->len) > sizeof(u->u.buffer)) {
+		/* On error, dump existing buffer */
+		u->len = 0;
+		rc = -EINVAL;
+		goto out;
+	}
+
+	ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
+
+	if (ret == len) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	/* Deal with a partial copy. */
+	len -= ret;
+	rc = len;
+
+	u->len += len;
+
+	/* Return if we haven't got a full message yet */
+	if (u->len < sizeof(u->u.msg))
+		goto out;	/* not even the header yet */
+
+	/* If we're expecting a message that's larger than we can
+	   possibly send, dump what we have and return an error. */
+	if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) {
+		rc = -E2BIG;
+		u->len = 0;
+		goto out;
+	}
+
+	if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
+		goto out;	/* incomplete data portion */
+
+	/*
+	 * OK, now we have a complete message.  Do something with it.
+	 */
+
+	msg_type = u->u.msg.type;
+
+	switch (msg_type) {
+	case XS_TRANSACTION_START:
+	case XS_TRANSACTION_END:
+	case XS_DIRECTORY:
+	case XS_READ:
+	case XS_GET_PERMS:
+	case XS_RELEASE:
+	case XS_GET_DOMAIN_PATH:
+	case XS_WRITE:
+	case XS_MKDIR:
+	case XS_RM:
+	case XS_SET_PERMS:
+		/* Send out a transaction */
+		ret = xenbus_write_transaction(msg_type, u);
+		break;
+
+	case XS_WATCH:
+	case XS_UNWATCH:
+		/* (Un)Ask for some path to be watched for changes */
+		ret = xenbus_write_watch(msg_type, u);
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	if (ret != 0)
+		rc = ret;
+
+	/* Buffered message consumed */
+	u->len = 0;
+
+ out:
+	mutex_unlock(&u->msgbuffer_mutex);
+	return rc;
+}
+
+static int xenbus_file_open(struct inode *inode, struct file *filp)
+{
+	struct xenbus_file_priv *u;
+
+	if (xen_store_evtchn == 0)
+		return -ENOENT;
+
+	nonseekable_open(inode, filp);
+
+	u = kzalloc(sizeof(*u), GFP_KERNEL);
+	if (u == NULL)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&u->transactions);
+	INIT_LIST_HEAD(&u->watches);
+	INIT_LIST_HEAD(&u->read_buffers);
+	init_waitqueue_head(&u->read_waitq);
+
+	mutex_init(&u->reply_mutex);
+	mutex_init(&u->msgbuffer_mutex);
+
+	filp->private_data = u;
+
+	return 0;
+}
+
+static int xenbus_file_release(struct inode *inode, struct file *filp)
+{
+	struct xenbus_file_priv *u = filp->private_data;
+	struct xenbus_transaction_holder *trans, *tmp;
+	struct watch_adapter *watch, *tmp_watch;
+
+	/*
+	 * No need for locking here because there are no other users,
+	 * by definition.
+	 */
+
+	list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
+		xenbus_transaction_end(trans->handle, 1);
+		list_del(&trans->list);
+		kfree(trans);
+	}
+
+	list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
+		unregister_xenbus_watch(&watch->watch);
+		list_del(&watch->list);
+		free_watch_adapter(watch);
+	}
+
+	kfree(u);
+
+	return 0;
+}
+
+static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
+{
+	struct xenbus_file_priv *u = file->private_data;
+
+	poll_wait(file, &u->read_waitq, wait);
+	if (!list_empty(&u->read_buffers))
+		return POLLIN | POLLRDNORM;
+	return 0;
+}
+
+const struct file_operations xenbus_file_ops = {
+	.read = xenbus_file_read,
+	.write = xenbus_file_write,
+	.open = xenbus_file_open,
+	.release = xenbus_file_release,
+	.poll = xenbus_file_poll,
+};
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h
new file mode 100644
index 0000000..51f08b2
--- /dev/null
+++ b/drivers/xen/xenfs/xenfs.h
@@ -0,0 +1,6 @@
+#ifndef _XENFS_XENBUS_H
+#define _XENFS_XENBUS_H
+
+extern const struct file_operations xenbus_file_ops;
+
+#endif	/* _XENFS_XENBUS_H */
diff --git a/drivers/zorro/.gitignore b/drivers/zorro/.gitignore
new file mode 100644
index 0000000..34f980b
--- /dev/null
+++ b/drivers/zorro/.gitignore
@@ -0,0 +1,2 @@
+devlist.h
+gen-devlist
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 5290552..1d2a772e 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -77,17 +77,21 @@
 	.read = zorro_read_config,
 };
 
-void zorro_create_sysfs_dev_files(struct zorro_dev *z)
+int zorro_create_sysfs_dev_files(struct zorro_dev *z)
 {
 	struct device *dev = &z->dev;
+	int error;
 
 	/* current configuration's attributes */
-	device_create_file(dev, &dev_attr_id);
-	device_create_file(dev, &dev_attr_type);
-	device_create_file(dev, &dev_attr_serial);
-	device_create_file(dev, &dev_attr_slotaddr);
-	device_create_file(dev, &dev_attr_slotsize);
-	device_create_file(dev, &dev_attr_resource);
-	sysfs_create_bin_file(&dev->kobj, &zorro_config_attr);
+	if ((error = device_create_file(dev, &dev_attr_id)) ||
+	    (error = device_create_file(dev, &dev_attr_type)) ||
+	    (error = device_create_file(dev, &dev_attr_serial)) ||
+	    (error = device_create_file(dev, &dev_attr_slotaddr)) ||
+	    (error = device_create_file(dev, &dev_attr_slotsize)) ||
+	    (error = device_create_file(dev, &dev_attr_resource)) ||
+	    (error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr)))
+		return error;
+
+	return 0;
 }
 
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index dff16d9..a1585d6 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -130,6 +130,7 @@
 {
     struct zorro_dev *z;
     unsigned int i;
+    int error;
 
     if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO))
 	return 0;
@@ -140,7 +141,11 @@
     /* Initialize the Zorro bus */
     INIT_LIST_HEAD(&zorro_bus.devices);
     strcpy(zorro_bus.dev.bus_id, "zorro");
-    device_register(&zorro_bus.dev);
+    error = device_register(&zorro_bus.dev);
+    if (error) {
+	pr_err("Zorro: Error registering zorro_bus\n");
+	return error;
+    }
 
     /* Request the resources */
     zorro_bus.num_resources = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2;
@@ -160,15 +165,19 @@
 	zorro_name_device(z);
 	z->resource.name = z->name;
 	if (request_resource(zorro_find_parent_resource(z), &z->resource))
-	    printk(KERN_ERR "Zorro: Address space collision on device %s "
-		   "[%lx:%lx]\n",
-		   z->name, (unsigned long)zorro_resource_start(z),
-		   (unsigned long)zorro_resource_end(z));
+	    pr_err("Zorro: Address space collision on device %s %pR\n",
+		   z->name, &z->resource);
 	sprintf(z->dev.bus_id, "%02x", i);
 	z->dev.parent = &zorro_bus.dev;
 	z->dev.bus = &zorro_bus_type;
-	device_register(&z->dev);
-	zorro_create_sysfs_dev_files(z);
+	error = device_register(&z->dev);
+	if (error) {
+	    pr_err("Zorro: Error registering device %s\n", z->name);
+	    continue;
+	}
+	error = zorro_create_sysfs_dev_files(z);
+	if (error)
+	    dev_err(&z->dev, "Error creating sysfs files\n");
     }
 
     /* Mark all available Zorro II memory */
diff --git a/drivers/zorro/zorro.h b/drivers/zorro/zorro.h
index 5c91ada..b682d5c 100644
--- a/drivers/zorro/zorro.h
+++ b/drivers/zorro/zorro.h
@@ -1,4 +1,4 @@
 
 extern void zorro_name_device(struct zorro_dev *z);
-extern void zorro_create_sysfs_dev_files(struct zorro_dev *z);
+extern int zorro_create_sysfs_dev_files(struct zorro_dev *z);
 
diff --git a/firmware/.gitignore b/firmware/.gitignore
index d9c6901..f89a21f 100644
--- a/firmware/.gitignore
+++ b/firmware/.gitignore
@@ -3,4 +3,3 @@
 *.bin
 *.csp
 *.dsp
-ihex2fw
diff --git a/firmware/Makefile b/firmware/Makefile
index d872b79..466106f 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -37,6 +37,8 @@
 				   cxgb3/t3c_psram-1.1.0.bin \
 				   cxgb3/t3fw-7.0.0.bin
 fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
+fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
+			     e100/d102e_ucode.bin
 fw-shipped-$(CONFIG_SMCTR) += tr_smctr.bin
 fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp
 fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \
@@ -76,7 +78,8 @@
 	keyspan/usa28.fw keyspan/usa28xa.fw keyspan/usa28xb.fw		\
 	keyspan/usa28x.fw keyspan/usa49w.fw keyspan/usa49wlc.fw
 endif
-fw-shipped-$(CONFIG_USB_SERIAL_TI) += ti_3410.fw ti_5052.fw
+fw-shipped-$(CONFIG_USB_SERIAL_TI) += ti_3410.fw ti_5052.fw \
+				      mts_cdma.fw mts_gsm.fw mts_edge.fw
 fw-shipped-$(CONFIG_USB_SERIAL_EDGEPORT) += edgeport/boot.fw edgeport/boot2.fw \
 					    edgeport/down.fw edgeport/down2.fw
 fw-shipped-$(CONFIG_USB_SERIAL_EDGEPORT_TI) += edgeport/down3.bin
@@ -99,10 +102,10 @@
       cmd_ihex  = $(OBJCOPY) -Iihex -Obinary $< $@
 
 quiet_cmd_ihex2fw  = IHEX2FW $@
-      cmd_ihex2fw  = $(objtree)/$(obj)/ihex2fw $< $@
+      cmd_ihex2fw  = $(objtree)/scripts/ihex2fw $< $@
 
 quiet_cmd_h16tofw  = H16TOFW $@
-      cmd_h16tofw  = $(objtree)/$(obj)/ihex2fw -w $< $@
+      cmd_h16tofw  = $(objtree)/scripts/ihex2fw -w $< $@
 
 quiet_cmd_fwbin = MK_FW   $@
       cmd_fwbin = FWNAME="$(patsubst firmware/%.gen.S,%,$@)";		     \
@@ -165,11 +168,11 @@
 # is actually meaningful, because the firmware has to be loaded in a certain
 # order rather than as a single binary blob. Thus, we convert them into our
 # more compact binary representation of ihex records (<linux/ihex.h>)
-$(obj)/%.fw: $(obj)/%.HEX $(obj)/ihex2fw | $(objtree)/$(obj)/$$(dir %)
+$(obj)/%.fw: $(obj)/%.HEX  | $(objtree)/$(obj)/$$(dir %)
 	$(call cmd,ihex2fw)
 
 # .H16 is our own modified form of Intel HEX, with 16-bit length for records.
-$(obj)/%.fw: $(obj)/%.H16 $(obj)/ihex2fw | $(objtree)/$(obj)/$$(dir %)
+$(obj)/%.fw: $(obj)/%.H16 | $(objtree)/$(obj)/$$(dir %)
 	$(call cmd,h16tofw)
 
 $(firmware-dirs):
@@ -186,5 +189,3 @@
 # Without this, built-in.o won't be created when it's empty, and the
 # final vmlinux link will fail.
 obj-n := dummy
-
-hostprogs-y := ihex2fw
diff --git a/firmware/WHENCE b/firmware/WHENCE
index 1bb2cf4..524113f 100644
--- a/firmware/WHENCE
+++ b/firmware/WHENCE
@@ -191,7 +191,7 @@
 
 --------------------------------------------------------------------------
 
-Driver: tu_usb_3410_5052 -- USB TI 3410/5052 serial device
+Driver: ti_usb_3410_5052 -- USB TI 3410/5052 serial device
 
 File: ti_3410.fw
 Info: firmware 9/10/04 FW3410_Special_StartWdogOnStartPort
@@ -206,6 +206,20 @@
 
 --------------------------------------------------------------------------
 
+Driver: ti_usb_3410_5052 -- Multi-Tech USB cell modems
+
+File: mts_cdma.fw
+File: mts_gsm.fw
+File: mts_edge.fw
+
+Licence: "all firmware components are redistributable in binary form"
+         per support@multitech.com
+	 Copyright (C) 2005 Multi-Tech Systems, Inc.
+
+Found in hex form in ftp://ftp.multitech.com/wireless/wireless_linux.zip
+
+--------------------------------------------------------------------------
+
 Driver: whiteheat -- USB ConnectTech WhiteHEAT serial device
 
 File: whiteheat.fw
@@ -360,6 +374,18 @@
 
 --------------------------------------------------------------------------
 
+Driver: e100 -- Intel PRO/100 Ethernet NIC
+
+File: e100/d101m_ucode.bin
+File: e100/d101s_ucode.bin
+File: e100/d102e_ucode.bin
+
+Licence: Unknown
+
+Found in hex form in kernel source.
+
+--------------------------------------------------------------------------
+
 Driver: acenic -- Alteon AceNIC Gigabit Ethernet card
 
 File: acenic/tg1.bin
diff --git a/firmware/e100/d101m_ucode.bin.ihex b/firmware/e100/d101m_ucode.bin.ihex
new file mode 100644
index 0000000..12971ed
--- /dev/null
+++ b/firmware/e100/d101m_ucode.bin.ihex
@@ -0,0 +1,38 @@
+:10000000150255003704FFFFFFFFFFFF8907A70612
+:10001000FFFFFFFFFFFF580501000C001213100047
+:1000200008000C00160238009C001000564020000A
+:10003000CC802300560038009C0010000B4C24009C
+:1000400000080000184812003804380000000000C2
+:1000500000001400550538000080300062061000D2
+:100060006105100008040E006148130002000C0036
+:10007000933010000080300024061000610510004D
+:1000800008040E00610810007E000C00212C2200E4
+:1000900002000C00933010007A0C380000000800B9
+:1000A000903010007A0C38000000000000000000C2
+:1000B00000000000000000009C0010002D4C2400F7
+:1000C000040001000010040037043A00104004004E
+:1000D0008A07380000000000990010007A6C2000A8
+:1000E0009C001000484C24002408130001000C0060
+:1000F00013121000750C260000100400040001000B
+:100100002608130006000C00A806220026C91300CA
+:1001100013131000A80638000000000000000000C3
+:1001200000000000000000000000000000000000CF
+:10013000000000000000000000060800101B100076
+:10014000040005002608100010121000340C3800BE
+:1001500000000000000000005B1521009900100065
+:10016000596520009C0010005945240036081300F2
+:1001700000000C00620C220001000C00131B100098
+:100180000E9C22000E0C21000E6C22000E6C210031
+:100190000EFC22000E5C21000E4C2100550538009B
+:1001A0000400010000100400678C27000008040010
+:1001B0000081010037043A002608130001000C00FA
+:1001C00059052200131310005905380000000000E3
+:1001D000000000000000000000000000000000001F
+:1001E00000000000000000000000000031081300C3
+:1001F0000B0910001348120080FF0C00AB0626000C
+:100200000010040004000100A806380000000000EF
+:0B02100000000000000000004E417ED6
+:00000001FF
+/********************************************************/
+/*  Micro code for 8086:1229 Rev 8                      */
+/********************************************************/
diff --git a/firmware/e100/d101s_ucode.bin.ihex b/firmware/e100/d101s_ucode.bin.ihex
new file mode 100644
index 0000000..102c7fe
--- /dev/null
+++ b/firmware/e100/d101s_ucode.bin.ihex
@@ -0,0 +1,38 @@
+:10000000420255007E04FFFFFFFFFFFF1808FF06B6
+:10001000FFFFFFFFFFFFA60501000C0012131000F9
+:1000200008000C00430238009C00100056402000DD
+:10003000D0802300560038009C0010008B4F240015
+:1000400000080000184812007F043800000000007B
+:1000500000001400A30538000080300010061000D6
+:100060006105100008040E006148130002000C0036
+:10007000933010000080300024061000610510004D
+:1000800008040E00610810007E000C00A12F220061
+:1000900002000C0093301000900F380000000800A0
+:1000A00090301000900F38000000000000000000A9
+:1000B00000000000000000009C001000AD4F240074
+:1000C00004000100001004007E043A001040040007
+:1000D000190838000000000099001000FD6F200092
+:1000E0009A001000FDAF20009C001000C84F2400B3
+:1000F0002408130001000C0013121000F70F260053
+:1001000000100400040001002608130006000C0083
+:100110000007220026C9130013131000000738003F
+:1001200000000000000000000000000000000000CF
+:10013000000000000000000000060800101B100076
+:10014000040005002608100010121000B60F380039
+:100150000000000000000000A91521009900100017
+:10016000A76520009A001000A7A520009C001000A1
+:10017000A74524003608130000000C00E40F2200FD
+:1001800001000C00131B10008E9F22008E0F210017
+:100190008E6F22008E6F21008EFF22008E5F210065
+:1001A0008E4F2100A3053800040001000010040058
+:1001B000E98F270000080400008101007E043A0056
+:1001C0002608130001000C00A705220013131000DD
+:1001D000A70538000000000000000000000000003B
+:1001E000000000000000000000000000000000000F
+:1001F00000000000310813000B0910001348120022
+:1002000080FF0C000307260000100400040001001A
+:0B02100000073800000000004E438093
+:00000001FF
+/********************************************************/
+/*  Micro code for 8086:1229 Rev 9                      */
+/********************************************************/
diff --git a/firmware/e100/d102e_ucode.bin.ihex b/firmware/e100/d102e_ucode.bin.ihex
new file mode 100644
index 0000000..9e806da
--- /dev/null
+++ b/firmware/e100/d102e_ucode.bin.ihex
@@ -0,0 +1,38 @@
+:100000008F027D00F904420E850CED14E914FA14F8
+:10001000360EF70EFF1FFF1FB914E00000000000AE
+:100020000000000000000000BD14E000000000001F
+:100030000000000000000000D514E00000000000F7
+:1000400000000000000000000000000000000000B0
+:100050000000000000000000C114E00000000000EB
+:100060000000000000000000000000000000000090
+:100070000000000000000000000000000000000080
+:100080000000000000000000000000000000000070
+:100090000000000000000000C814E00000000000A4
+:1000A000000000000000000000062000EE14E00048
+:1000B000000000000000000080FF3000460E9400A9
+:1000C0000082030000201000430EE000000000004A
+:1000D000000000000000000006003000FB14E000FB
+:1000E0000000000000000000000000000000000010
+:1000F0000000000000000000000000000000000000
+:1001000000000000000000000000000000000000EF
+:100110000000000000000000416E90003C0E8000D6
+:10012000390EE00000000000FD6E9000FD0E900012
+:10013000F80EE000000000000000000000000000D9
+:1001400000000000000000000000000000000000AF
+:10015000000000000000000000000000000000009F
+:10016000000000000000000000000000000000008F
+:10017000000000000000000000000000000000007F
+:10018000000000000000000000000000000000006F
+:10019000000000000000000000000000000000005F
+:1001A000000000000000000000000000000000004F
+:1001B000000000000000000000000000000000003F
+:1001C000000000000000000000000000000000002F
+:1001D000000000000000000000000000000000001F
+:1001E000000000000000000000000000000000000F
+:1001F00000000000000000000000000000000000FF
+:1002000000000000000000000000000000000000EE
+:0B02100000000000000000002A362E55
+:00000001FF
+/********************************************************/
+/*  Micro code for the 8086:1229 Rev F/10               */
+/********************************************************/
diff --git a/firmware/mts_cdma.fw.ihex b/firmware/mts_cdma.fw.ihex
new file mode 100644
index 0000000..f6ad0cb
--- /dev/null
+++ b/firmware/mts_cdma.fw.ihex
@@ -0,0 +1,867 @@
+:1000000014360002001E021AF9FFFFFFFFFF023341
+:100010001DFFFFFFFFFFFFFFFFFFFFFFFFFF02339B
+:10002000C87581CE90FDE88583A012353CEC4D600B
+:100030007378AB8003760018B89CFA787F800376DB
+:100040000018B865FA78208003760018B820FA788E
+:10005000208003760018B81FFA90FDDDAE83AF82D2
+:1000600090FBF81200AA6005E4F0A380F690FDE88A
+:10007000A88290FDE8A982E8696005E4F20880F7AB
+:100080009001081200B390010C1200B390011012FD
+:1000900000B39001141200D190011A1200D1900106
+:1000A000201200D175D00012341A020126EF6582A9
+:1000B0007003EE658322E493F8740193F97402935C
+:1000C000FE740393F5828E83E869700122E493F64F
+:1000D000A30880F4E493FC740193FD740293FE740E
+:1000E0000393FF740493F8740593F58288831200D8
+:1000F000AA700122E493A3A883A9828C838D82F045
+:10010000A3AC83AD828883898280E32121049B8014
+:1001100080049BACAE049BFDE8049D049DFBF304AE
+:10012000A2049DFBF30502050280FED0F030F00929
+:1001300020F303F68010F7800D30F10920F303F26D
+:100140008004F38001F020F404FCD0E0CC22CCC089
+:10015000E0120163020154BC0005D0F0ACF022C3F0
+:1001600013DCFC02012ABF0009ED258275F001F8BD
+:10017000E622BF010FED2582F582EE3583F583750A
+:10018000F004E022ED258275F002F8E222D083D05F
+:1001900082F5F0C3E493A3C5F095F0C0E0C3D0F0BE
+:1001A000E493A395F04012A3A3C3E5F033500205F6
+:1001B000832582F58250020583740193C0E0E493A5
+:1001C000C0E022D083D082F5F0E4937009740193EB
+:1001D0007004A3A3800C74029365F06005A3A3A32D
+:1001E00080E7740193C0E0E493C0E022120264024D
+:1001F00001FB1202B80201FB1202DC0201FB30E03B
+:100200000720E302E622E72230E10720E302E222B0
+:10021000E32230E202E022E493221202DC02022313
+:100220001202B8020223ABF012022DCBC5F0CB2292
+:1002300030E01020E306E6F5F008E622E7F5F009E5
+:10024000E7192230E11020E306E2F5F008E222E3AC
+:10025000F5F009E3192230E206E0F5F0A3E022E42C
+:1002600093F5F074019322BB0003740922BB0107CC
+:1002700089828A83740422BB020789828A8374106C
+:1002800022740A22020284BB0007E92582F8740165
+:1002900022BB010DE92582F582EA3583F5837404DA
+:1002A00022BB020DE92582F582EA3583F5837410BD
+:1002B00022E92582F87402220202B8BF0005EDF897
+:1002C000740122BF01078D828E83740422BF02074E
+:1002D0008D828E83741022EDF87402220202DCBF3C
+:1002E0000007ED2582F8740122BF010DED2582F58E
+:1002F00082EE3583F583740422BF020DED2582F56D
+:1003000082EE3583F583741022ED2582F874022283
+:10031000020310C0E0120264020328C0E01202B817
+:10032000020328C0E01202DC02032830E00B20E3C5
+:1003300004D0E0F622D0E0F72230E10B20E304D035
+:10034000E0F222D0E0F322D0E0F022C9CDC9CACE3B
+:10035000CACBCFCB12035BEDF9EEFAEFFB22BB0069
+:100360002FBF000AFAEDF8E7F60809DAFA22BF0112
+:10037000128D828E83F802037809A3E7F0D8FA225F
+:10038000020383FAEDF8E7F20809DAFA2202038D94
+:10039000BB014DBF001489828A83F9EDF802039FE7
+:1003A00008A3E0F6D9FA220203B0BF01228D828EA3
+:1003B00083FB08C9C582C9CAC583CAE0A3C9C5826F
+:1003C000C9CAC583CAF0A3DBEAD8E8220203D38DE9
+:1003D000828E83F9EDF8E0F208A3D9FA220203DD58
+:1003E000BB024DBF001289828A83F9EDF80203EF48
+:1003F00008A3E493F6D9F922BF01238D828E83FBF3
+:1004000008C9C582C9CAC583CAE493A3C9C582C93C
+:10041000CAC583CAF0A3DBE9D8E722020422898295
+:100420008A83F9EDF8E493F208A3D9F922020433A0
+:10043000BF000DFAEDF8E3F60809DAFA2202043DEE
+:10044000BF01128D828E83F802044A09A3E3F0D81B
+:10045000FA22020455FAEDF8E3F20809DAFA220268
+:10046000045FE6FB08E6FA08E6F904F618700106F0
+:1004700022E6FF08E6FE08E6FD22EFF0A3EEF0A379
+:10048000EDF022EBF0A3EAF0A3E9F022E0FFA3E015
+:10049000FEA3E0FD22E0FBA3E0FAA3E0F9220000C6
+:1004A00000000000000502006105710026059800AB
+:1004B000330A0900610A750066154400610CF900F1
+:1004C0006109A9006109E000610DC000610BF10044
+:1004D000610A1C00610A510061173C0033174F008C
+:1004E000341E1400431EBF0044202C0044201A0078
+:1004F000471EE600471F8B004D1FDC004F1F080002
+:100500005832A800617CCC7DFF121CC52290FFFCF4
+:10051000E020E72DC2AFAE59AF58755A20E55A1406
+:10052000C55A6019E4FE7F05EE4FCE24FFCECF34CE
+:10053000FFCF6007E490FF92F080ED80E08E598F4E
+:10054000582212050A7D077CB71232C47D0F7C6EDB
+:100550001232DE789D7A06E4F608DAFC7A06120595
+:10056000CD7C03120E55122168E4FEFF7C0F12327F
+:100570004DD2A822123138E490FC38F090FFF0E020
+:1005800030E408740190FC39F08005E490FC39F007
+:100590007D0A7C001225461231BB2212313890FCB4
+:1005A00039E014700E90FFF0E04410F07C0012254A
+:1005B000DF801990FC39E0700E90FFF0E054EFF00E
+:1005C0007C001225DF80057C171225DF1231BB224B
+:1005D00090FFF0E054ABF090FFF0E04420F0228C6C
+:1005E000378D367882EDF608ECF6EDFEECFD7F01F6
+:1005F0009000051201F57880F67882E6FD08E6FCA9
+:10060000EDFEECFD7F019000041201F5540FFC7D1E
+:100610008012176D7880E6700DAD3AAE39AF38E4D0
+:100620001203187C082290FFF0E054FEF090FFF0D7
+:10063000E054FDF0801E7882E6FD08E6FCEDFEEC5D
+:10064000FD7F0190000812021725E0440190FFF39E
+:10065000F00206D97882E6FD08E6FCEDFEECFD7FAF
+:100660000190000612021754FE90FFF3F0802B78E1
+:1006700082E6FD08E6FCEDFEECFD7F01900008122D
+:100680000217FAEB90FFF1F01208C8400DAD3AAE38
+:1006900039AF38E41203187C18227882E6FD08E6A8
+:1006A000FCEDFEECFD7F0190000812021790FFF1B7
+:1006B000F01208C8400DAD3AAE39AF38E412031855
+:1006C0007C18227882E6FD08E6FCEDFEECFD7F0159
+:1006D000900006120217440190FFF3F07883E6249D
+:1006E00003F618E63400F67880E624FE500990FF01
+:1006F000F0E054FDF0800790FFF0E04402F0E49059
+:10070000FFF1F0788176007880E624FFFCE434FF86
+:10071000FD7881E67F00FEECD39EEF6480CD64809F
+:100720009D402F1208AD400F7881E6AD3AAE39AF4B
+:10073000381203187C182290FFF2E0FC788286833E
+:10074000088682ECF0788106A37882A68308A682C8
+:1007500080B51208AD400F7881E6AD3AAE39AF38BA
+:100760001203187C182290FFF2E0FC78828683083E
+:100770008682ECF07880E6AD3AAE39AF38120318D5
+:100780007C00228C378D367882EDF608ECF6EDFE93
+:10079000ECFD7F019000051201F57881F67882E684
+:1007A000FD08E6FCEDFEECFD7F019000041201F572
+:1007B000540FFC7D8112176D7881E670037C08224E
+:1007C00090FFF0E054FEF090FFF0E054FDF0801B4D
+:1007D0007882E6FD08E6FCEDFEECFD7F0190000866
+:1007E00012021725E090FFF3F0805B7882E6FD08A7
+:1007F000E6FCEDFEECFD7F0190000612021754FEB0
+:1008000090FFF3F080217882E6FD08E6FCEDFEEC37
+:10081000FD7F01900008120217FAEB90FFF1F01231
+:1008200008C840037C18227882E6FD08E6FCEDFE4D
+:10083000ECFD7F0190000812021790FFF1F0120802
+:10084000C840037C18227883E6240AF618E63400B0
+:10085000F6788076007881E624FFFCE434FFFD78AA
+:1008600080E67F00FEECD39EEF6480CD64809D40E7
+:100870002178828683088682E090FFF1F01208C812
+:1008800040037C1822788006788306E618700106FB
+:1008900080C390FFF0E04401F0788286830886826E
+:1008A000E090FFF1F01208C840037C18227C00227F
+:1008B00090FFF0E020E71290FFF0E030E50990FFB4
+:1008C000F0E04420F0C32280E7D32290FFF0E02044
+:1008D000E31290FFF0E030E50990FFF0E04420F0F3
+:1008E000C32280E7D3228C428D417C00ED54F0FD81
+:1008F000EC7003ED64307005753E038003753E04B3
+:10090000AC3E120F72758300858340E541540FF5AC
+:100910003FE5407004E53F64037035E53E24FD7516
+:10092000F00AA42402F582E434FCF583E030E60505
+:100930001210598019E53E249DF8E654FBF678A97B
+:10094000E62405F58218E63400F583740FF080592B
+:10095000E5407004E53F64047048E53E24FD75F011
+:100960000AA42402F582E434FCF583E030E507AC08
+:1009700042AD41121C5AE54230E21578ADE630E056
+:100980000F78ADE630E109E4FF04FE7C0412324D3D
+:1009900078A9E62406F58218E63400F583740FF092
+:1009A0008007E4FC7DEE121C5AC203221231381279
+:1009B0000F7278A9E62406F58218E63400F583E084
+:1009C00090FC38F078A9E62405F58218E63400F5A5
+:1009D00083E090FC39F0C2037D027C0012254612B0
+:1009E00031BB221231387895ECF6EC249DF8E630D4
+:1009F000E1077C131225DF800F90FC39E0FD78952C
+:100A0000E6FC1213EF1225DF1231BB2212313878C7
+:100A100095ECF67D00120F121225DF1231BB221267
+:100A200031387895ECF6EC249DF8E630E2077C133B
+:100A30001225DF801B7895E6249DF8E620E1077CEF
+:100A4000121225DF800A7895E6FC1214131225DFB6
+:100A50001231BB221231387895ECF6EC249DF8E681
+:100A600020E2077C111225DF800A7895E6FC12153A
+:100A7000141225DF1231BB221231387895ECF612B0
+:100A80000F7278A9E62409F58218E63400F583E0B0
+:100A900090FC3FF078A9E6240AF58218E63400F5C8
+:100AA00083E090FC40F078A9E62403F58218E63450
+:100AB00000F583E0FC78A9E62404F58218E634000A
+:100AC000F583E0F56278A9E62402F58218E63400A1
+:100AD000F583E0F5638C61E4EC333354017895F6EB
+:100AE0006008E56230E1037895067895E690FC4170
+:100AF000F078A7E62402F58218E63400F583E0FDDD
+:100B0000A3E0540CFCED54E68C65F564E56130E53A
+:100B100003436501E56220E50EE561547F7008E559
+:100B20006120E703436502E56130E303436510E5B7
+:100B30006130E203436520E561540360034365408F
+:100B4000E56130E103436580E56130E4034364011E
+:100B5000E56130E603436408E56220E40EE5615494
+:100B60007F7008E56120E7034364105365FB53641D
+:100B7000F9AD64E56590FC3ACDF0A3CDF0E56330C6
+:100B8000E30DE5635430C4540F90FC3DF08005E460
+:100B900090FC3DF0E563540390FC3CF0E5635404A5
+:100BA000C31390FC3EF090FC3CE0700E7D357EFC63
+:100BB0007F01740190000912014B78A9E62408F521
+:100BC0008218E63400F583E07C00FD78A9E624076E
+:100BD000F58218E63400F583E07F004CFEEF4D907F
+:100BE000FC38F0A3CEF0CEC2037D0A7C001225466D
+:100BF0001231BB221231387895ECF6789A760108DA
+:100C000076FC0876387897760C789A12046E120281
+:100C10001D7898CBF6CB08F67F00EF24EA401FE45E
+:100C2000EF25E090357EFD93CD04937899667003AF
+:100C3000ED186670067897760080030F80DC789652
+:100C4000EFF6789A12046E9000021202177898CB91
+:100C5000F6CB08F65404CB54064B60047897760B19
+:100C60007899E630E313789A12046E900005120129
+:100C7000F524FB50047897760D7899E654C07D00F2
+:100C800064C04D70047897760B789A12046E9000C9
+:100C9000041201F524FC50047897760F789A120418
+:100CA0006E9000061201F524FD50047897760E78B8
+:100CB0009A12046E9000091201F524FD50047897F1
+:100CC000760A7897E6702A7895E6FC120F72789A81
+:100CD00012046E78A7E6F978A6E6FA7B01740A7822
+:100CE00000120348C2037895E6FC1211157897ECC0
+:100CF000F67897E6FC1225DF1231BB2212313878E4
+:100D000095ECF6120F727895E624FD75F00AA4248E
+:100D100014F582E434FCF583AC82AD8378A6868337
+:100D2000088682ECF9EDFA7B0A78011203B0C2035F
+:100D30007895E6FC1211151231BB228D2B8C2AED11
+:100D400060407527017529487528FFE52A24FDFCB8
+:100D5000E434FFFDEC7C0325E0CD33CDDCF9FCE58C
+:100D6000292CF529E5283DF528AD29AE28AF2774B3
+:100D7000809000061203207480900002120320125B
+:100D80000FC5E52B14603B7527017529087528FFF1
+:100D9000E52A24FDFCE434FFFDEC7C0325E0CD33A3
+:100DA000CDDCF9FCE5292CF529E5283DF528AD2910
+:100DB000AE28AF27E4900006120320E49000021250
+:100DC0000320221231387895ECF6EC249DF8E630B9
+:100DD000E2097895E6FC121514D2007895E6FC122B
+:100DE0000F727896760090FC39E030E704789676BA
+:100DF000017896E6FD7895E6FC120D38C2033000C6
+:100E0000077895E6FC1214137C001225DF1231BB23
+:100E10002278A9E62404F58218E63400F583E0443C
+:100E200001F078A9E62404F58218E63400F583E0A1
+:100E300030E00280ED78A9E6240BF58218E6340054
+:100E4000F583E054F8F078A9E62402F58218E63438
+:100E500000F583E04480F022C2038C58120F7278B0
+:100E6000A6868308868279AF7A357B0A78011203D9
+:100E7000FE120E0EAC587D02120D38C203AC581291
+:100E80001115228D538E528F518C50120F72754F47
+:100E90000078A9E62405F58218E63400F583E02001
+:100EA000E41FE54F24F64019054FC2037C181232A7
+:100EB000FB90FF93E04401F0B2B3AC50120F72808C
+:100EC000D078A9E62405F58218E63400F583E02001
+:100ED000E405C2037C022278A9E62405F58218E61F
+:100EE0003400F583E0540F601678A9E62405F582F6
+:100EF00018E63400F583E0540FF0C2037C01227839
+:100F0000A88683088682E0AD53AE52AF5112031813
+:100F1000C2037C00228D318C30121514E531600F34
+:100F2000E530B4030A7C0112250E7C8112250EAC3B
+:100F300030120F72E531601A78AA8683088682E043
+:100F400054E7F0A3A3A3A3E054E7F0AC307D021272
+:100F50000D3878A6868308868279B97A357B0A7837
+:100F6000011203FEC203E530249DF8E654FDF6AC01
+:100F700030121115228C2630030512329A80F87C2B
+:100F80000A1231ADD203E52624FD78A3F670077866
+:100F9000AA76FF0876E078A3E67D007C0425E0CD04
+:100FA00033CDDCF9FC24A078A9F6ED34FF18F678EF
+:100FB000A3E675F00AA42400FCE434FCFD78A6ED59
+:100FC000F608ECF61232462278A9E62402F58218D9
+:100FD000E63400F583E030E72278A9E62402F582C2
+:100FE00018E63400F583E0547FF078A9E62402F592
+:100FF0008218E63400F583E04480F02278AA8683E4
+:10100000088682E0547FF0AD83E5822404FCE43D51
+:101010008C82F583E0547FF078A9E6240BF58218E2
+:10102000E63400F583E054F8F078ABE62401F5826D
+:1010300018E63400F583E04403F078ABE62405F5C8
+:101040008218E63400F583E04403F078A9E624052D
+:10105000F58218E63400F583740FF02278AA8683AF
+:10106000088682E0543FF0AD83E5822404FCE43D31
+:101070008C82F583E0543FF078A3E624A4F8E6FCE4
+:1010800078ABE62401F58218E63400F583ECF078BD
+:10109000A3E624A4F8E6FC78ABE62405F58218E67E
+:1010A0003400F583ECF078A9E6240BF58218E634D9
+:1010B00000F583E054FB4402F52678A7E62402F508
+:1010C0008218E63400F583E030E50343260178A971
+:1010D000E62405F58218E63400F583E030E00312DB
+:1010E0000FC5E526FC78A9E6240BF58218E6340046
+:1010F000F583ECF078A9E62405F58218E63400F5CE
+:1011000083740FF078AA8683088682E04480F0A377
+:10111000A3A3A3E04480F0228C2A120F7278A7E6E2
+:101120002408F58218E63400F583E0FC78A9E6246B
+:101130000AF58218E63400F583ECF078A7E6240778
+:10114000F58218E63400F583E0FC78A9E62409F579
+:101150008218E63400F583ECF078A6868308868250
+:10116000E0FDA3E0FCEDFE78A9E62408F58218E690
+:101170003400F583EEF0ECFE78A9E62407F582183A
+:10118000E63400F583EEF08C298D28C3EC9405ED50
+:10119000940C400575277C8033D3E5299401E5281C
+:1011A0009403400575273C8023D3E5299481E528E5
+:1011B000940140057527188013D3E5299460E5282C
+:1011C0009400400575270C8003752708AF27E4EFCE
+:1011D000547C4483FF8F27E527FC78ABE62401F598
+:1011E0008218E63400F583ECF0E527FC78ABE624C2
+:1011F00005F58218E63400F583ECF0E527FC78A3CA
+:10120000E624A4F8ECF678A9E62402F58218E63480
+:1012100000F583E0F52778A7E62402F58218E63486
+:1012200000F583A3E030E3175327C778A7E624052A
+:10123000F58218E63400F583E09035AA93422778CA
+:10124000A7E62402F58218E63400F583E030E705CE
+:1012500043274080035327BF5327FB78A7E6240684
+:10126000F58218E63400F583E06003432704532732
+:10127000FC78A7E62404F58218E63400F583E04202
+:1012800027432780E527FC78A9E62402F58218E6A3
+:101290003400F583ECF078A9E62404F58218E634EE
+:1012A00000F583E0F52778A7E62402F58218E634F6
+:1012B00000F583A3E030E1055327DF8003432720B7
+:1012C00078A7E62402F58218E63400F583E030E4DE
+:1012D000055327EF800343271078A7E62409F582FA
+:1012E00018E63400F583E0B40203432702E527FC47
+:1012F00078A9E62404F58218E63400F583ECF0784A
+:10130000A9E62403F58218E63400F583E0F5277892
+:10131000A7E62409F58218E63400F583E07005534A
+:10132000277F800343278078A7E62402F58218E60A
+:101330003400F583A3E030E00543272080035327E2
+:10134000DF78A7E62402F58218E63400F583E03062
+:10135000E30543274080035327BF78A7E62402F51F
+:101360008218E63400F583E030E00543271080035F
+:101370005327EF78A7E62402F58218E63400F583B8
+:10138000A3E030E40543270880035327F778A7E656
+:101390002402F58218E63400F583A3E030E5054326
+:1013A000270480035327FB78A7E62402F58218E67A
+:1013B0003400F583A3E030E605432701800353277B
+:1013C000FE78A7E62402F58218E63400F583A3E050
+:1013D00030E70543270280035327FDE527FC78A962
+:1013E000E62403F58218E63400F583ECF0C2037CB2
+:1013F00000228D278C26ED54031460037C1022E517
+:1014000027547C24FC40037C0B22E526249DF8E62F
+:101410004402F67C00228C30120F72E530249DF8D5
+:10142000E620E24FAC307D02120D38E53024FE4458
+:1014300028FC78AA8683088682ECF0AF83E58224B4
+:1014400004FEE43FFFEC8E828F83F07C038C2CE55E
+:101450002CFC78ABE62401F58218E63400F583EC29
+:10146000F0E52CFC78ABE62405F58218E63400F5AF
+:1014700083ECF0752D01752F48752EFFE53024FDA6
+:10148000FCE434FFFDEC7C0325E0CD33CDDCF9FC3E
+:10149000E52F2CF52FE52E3DF52E78ABE62404F54F
+:1014A0008218E63400F583E054E7F52CAD2FAE2E1C
+:1014B000AF2DE4900002120320E4900006120320F6
+:1014C0001201EF30E503432C10E52CFC78ABE62449
+:1014D00004F58218E63400F583ECF012105978A96F
+:1014E000E62406F58218E63400F583E0C203FCE545
+:1014F00030249DF8E64404F68C2CE530540FC45497
+:10150000F07E00FFEEEF44047D00FFEC4EFCED4F5B
+:10151000FD121CC57C00228C2F120F72120FF9785D
+:10152000AA8683088682E05408F0A3A3A3A3E0540C
+:1015300008F0AC2F7D02120D38C203E52F249DF870
+:10154000E654FBF67C00221231387896ECF6EC2457
+:101550009DF8E630E10A7D007C131225461231BB6E
+:101560007896E6249DF8E64401F67896E6FC120F9C
+:10157000727896E624FD75F00AA42414F582E4340A
+:10158000FCF58378A6E6FA08E6F97B0A78011203EF
+:10159000B078A6868308868279B97A357B0A780185
+:1015A0001203FE120FC5C2037896E6FC12111578DD
+:1015B00095ECF6EC600A7D007C081225461231BBE2
+:1015C0007896E6FC120F7278A9E62404F58218E6F4
+:1015D0003400F583E0441054DFFC78A9E62404F5D8
+:1015E0008218E63400F583ECF07895ECF6C2037CC3
+:1015F000C81232FB7896E6FC120F7278A9E6240432
+:10160000F58218E63400F583E054EFF0C2037CC89D
+:101610001232FB7896E6FC120F7278A9E62404F5E4
+:101620008218E63400F583E04410F0C2037CC8124F
+:1016300032FB7896E6FC120F7278A9E62404F58254
+:1016400018E63400F583E04420F0C2037CF0123247
+:10165000FB7896E6FC120F7278A9E62405F582184D
+:10166000E63400F583E030E415C2037896E64410D2
+:101670007F00FE7C0712324D1231BB02173B78A966
+:10168000E62404F58218E63400F583E054CFF0C276
+:10169000037CC81232FB7896E6FC120F7278A9E63A
+:1016A0002404F58218E63400F583E04430F0C203E8
+:1016B0007CF01232FB7896E6FC120F7278A9E624D1
+:1016C00005F58218E63400F583E030E414C20378AF
+:1016D00096E644107F00FE7C0712324D1231BB802B
+:1016E0005D78A9E62404F58218E63400F583E05419
+:1016F000EFF078A9E62404F58218E63400F583E0DB
+:1017000054DFF07896E624FD75F00AA42414F582DF
+:10171000E434FCF583AC82AD8378A68683088682A8
+:10172000ECF9EDFA7B0A78011203B0C2037896E671
+:10173000FC1211157D007C0B1225461231BB2212C2
+:101740003138E490FC39F07D027C001225461231DC
+:10175000BB221231387C001225DF1231BB22743CCF
+:1017600090FBE0F0743E90FBE0F0E490FC28F02267
+:101770008D358C34ECB401028003D340028028B450
+:1017800002028003D34008A835E625E0F68018B4AD
+:1017900004028003D3400AA835E625E025E0F68060
+:1017A00006A83576008000228C3C8D3BEDFEECFDDA
+:1017B0007F0175660675670090FC29120477120197
+:1017C000EFB480028006D3500302186E90FC2912F9
+:1017D00004899000031201F554F0B430028003D361
+:1017E000405F90FC29120489900008120217FAFD4C
+:1017F000EBFE7F0190FC2C120477EECD9035C3FCFC
+:10180000E493FF740193FEF9EFFA7B01EAFFE9FE2E
+:10181000ECC39EED9F40259035C5E493FD74019384
+:10182000FCEDFEECFD7F01EECDFC90FC2EE0D39CA8
+:1018300090FC2DE09D5005756680803312198C80D8
+:101840002EB460028003D3400BAC3CAD3B1207804A
+:101850008C66801BB41003B34010C3B42003B340A4
+:1018600009C3B440028003D34000756681800080C4
+:1018700075B481028003D3406B90FC2912048990D7
+:1018800000031201F554F0B430028003D3401D90E0
+:10189000FC29120489900008120217FAFDEBFE7F62
+:1018A0000190FC2F1204771218F68036B460028083
+:1018B00003D34013753A67E4F539F538AC3CAD3BDA
+:1018C0001205DC8C66801BB41003B34010C3B42037
+:1018D00003B34009C3B440028003D340007566815E
+:1018E000800080028000E566FC90FC29120489ECEF
+:1018F000900002120320AC672290FC291204899008
+:1019000000041201F5600474018001E4A2E0920178
+:1019100090FC29120489ED2403FD50010E90FC2C4B
+:1019200012047790FC291204899000051201F5F544
+:10193000679000041201F5540FFC7D6712176DE5E6
+:10194000677004756608227566007884760078846E
+:10195000E6C39567503890FC2F1204891201EFFC02
+:1019600090FC2C120489EC12031830010E90FC310B
+:10197000E004F090FC307003E004F078840690FC02
+:101980002EE004F090FC2D7003E004F080C0229063
+:10199000FC2AE0FDA3E0FCEDFEECFD7F01ED240A56
+:1019A000FD50010E90FC3212047790FC291204893C
+:1019B0009000041201F5540FB401028003D34017C4
+:1019C00090FC321204890DED70010E90FC2F120470
+:1019D0007778887601804EB402028003D340199054
+:1019E000FC32120489ED2402FD50010E90FC2F12EE
+:1019F000047778887602802DB404028003D34019DE
+:101A000090FC32120489ED2404FD50010E90FC2F4D
+:101A100012047778887604800CB400028003D340E7
+:101A2000007566082290FC291204899000051201B5
+:101A3000F5F567788576007885E6C39567400302FB
+:101A40001AF4788676007886E6C378889650769081
+:101A5000FC2C1204891201EFFC90FC321204921249
+:101A600001E9F45CFC1201E9F890FC2F120489E80A
+:101A7000C0E01201EFC8D0E0C8584CFC90FC2C121A
+:101A80000489EC1203187887ECF690FC31E004F03E
+:101A900090FC307003E004F009E970010A90FC3218
+:101AA00012048090FC291204899000041201F53080
+:101AB000E40E90FC2EE004F090FC2D7003E004F0A6
+:101AC00078860680817888E6FDE4FEFFEECDFC9006
+:101AD000FC31E02CF090FC30E03DF07888E6FDE44D
+:101AE000FEFFEECDFC90FC34E02CF090FC33E03DAA
+:101AF000F0788506021A347566002222C0E0C0F034
+:101B0000C082C083C0D0E8C0E0E9C0E0EAC0E0EB3A
+:101B1000C0E0ECC0E0EDC0E0EEC0E0EFC0E090FF60
+:101B200092E01201C01B47301B47321B56381B681E
+:101B30003A1B7A3E1B92441B86461B9E501BE0526A
+:101B40001BBF541C015600001C2290FF92E07F0036
+:101B5000FE7C0112324D021C32E4FF04FE7C0312B3
+:101B6000324D742090FFFEF0021C32E4FF04FE7C34
+:101B70000212324D744090FFFEF0021C32E4FF046A
+:101B8000FE7C0412324D021C32E4FF04FE7C05127E
+:101B9000324D021C32E4FF04FE7C0612324D021C60
+:101BA0003290FFA5E07D0090FBF8CDF0A3CDF09042
+:101BB000FBF9E0FCF58390FBF8E04433FD121CC513
+:101BC000807390FFB5E07D0090FBFACDF0A3CDF0DF
+:101BD00090FBFBE0FCF58390FBFAE04443FD121C14
+:101BE000C5805290FFA6E07D0090FBFCCDF0A3CD18
+:101BF000F090FBFDE0FCF58390FBFCE04434FD122B
+:101C00001CC5803190FFB6E07D0090FBFECDF0A3B7
+:101C1000CDF090FBFFE0FCF58390FBFEE04444FD3B
+:101C2000121CC5801090FF92E07D00FCED44AAFDDF
+:101C3000121CC58000E490FF92F0D0E0FFD0E0FEDF
+:101C4000D0E0FDD0E0FCD0E0FBD0E0FAD0E0F9D06D
+:101C5000E0F8D0D0D083D082D0F0D0E0320581053A
+:101C60008105810581A881181818EDF608ECF69019
+:101C7000FF5AE020E70280F790FF59E07D00A8813D
+:101C800018CDF6CD08F67D03A881E618FCE6CC2534
+:101C9000E0CC33CCDDF9CCF6CC08F6A88118E644CC
+:101CA000F8F6A881181818E6FD08E6FCA881188641
+:101CB00083088682EDF0A3ECF0740290FF5AF015D1
+:101CC0008115811581158122E5812405F581E4A81E
+:101CD0008118F6A88118181818EDF608ECF690FB94
+:101CE000F5E024F85003021DE6E4A8811818F6A8D0
+:101CF0008118E6FEA88118181818E6FD08E6FC7F92
+:101D000000EF24F8404DE4EF25E0247DF582E43433
+:101D1000FCF583E0FBA3E06C7003FAEB6D700974D3
+:101D200001A8811818F6802BE4EF25E0247DF582C8
+:101D3000E434FCF5837A00E054F0CCF8CCCDF9CD56
+:101D4000FB7800E954F0F9EA687002EB6970010E63
+:101D50000F80AEA88118EEF6A88118181818EDF6B5
+:101D600008ECF6A881EFF6A8811818E67079A8812A
+:101D700018E624F74071A88118181818E6540FA81F
+:101D800081F664046017A881E664036010A88118D6
+:101D9000181818E6FD08E6FC121C5A804A7C0A1244
+:101DA00031ADA88118181818E6FD08E6FC90FBF480
+:101DB000E025E0247DF582E434FCF583EDF0A3EC2E
+:101DC000F090FBF4E0FFE4EF045407FF90FBF4F025
+:101DD00090FBF5E004F012324690FBF6E07008E468
+:101DE000FEFF7C0F12324D802790FBF7E004F05489
+:101DF0003F701D90FBF7E044FE7D00FC90FBF4E09B
+:101E000025E0247DF582E434FCF583EDF0A3ECF0CD
+:101E1000E58124FBF58122788B7600788C7600743E
+:101E20000190FBF6F012313890FBF5E060577C0A28
+:101E30001231AD90FBF3E025E0247DF582E434FC23
+:101E4000F583E0FDA3E0FC90FBF3E025E0247DF5C5
+:101E500082E434FCF583E4F0A3F090FBF3E0FFE4CC
+:101E6000EF045407FF90FBF3F090FBF5E014F078DB
+:101E700089EDF608ECF61232467889E6FD08E6FCB4
+:101E80001208E380A312329A90FF93E04401F0B26B
+:101E9000B3788B06B60011788B7600788CE6F40464
+:101EA00004A2E092B4788CF6021E25E490FBF6F0D2
+:101EB00090FBF5E07D00FCED44CFFD121C5A123181
+:101EC000BB22123138E5706449456F601590FF837D
+:101ED000E0540F7D00D39570ED956F5005122F8162
+:101EE00080031230511231BB22123138E57064493F
+:101EF000456F600512308B800E90FF80E04408F043
+:101F000090FF83E0547FF01231BB221231388C54A1
+:101F1000EC54F0B41015756A357569FC756801E507
+:101F20006A2403F56AE5693400F569E4F557F55666
+:101F3000E556C394015027E554540FFCAD6AAE69D1
+:101F4000AF68120E808C55EC60028012056AE56A5B
+:101F5000700205690557E5577002055680D2E554B1
+:101F6000540F249DF8E654FEF6E554540F7F00FE0E
+:101F70007C1212324DE5551470097D007C09122542
+:101F8000468007AD577C001225461231BB22123124
+:101F90003890FFFCE04402F090FF00E030E713903F
+:101FA000FF83E04480F0436D8090FFFCE04401F04B
+:101FB000801190FF82E04408F0536D7F90FFFCE0B9
+:101FC00054FEF090FF81E04480F01225F990FFFE6E
+:101FD000E04405F090FFFCE054FDF01231BB22120A
+:101FE00031387C011232FB78ADE64402F674FEFC17
+:101FF00004FD121CC590FF5AE030E70280F7E4F5BB
+:102000004E754D10AC4EAD4DE54E154E7002154D52
+:10201000EC4D600280EE4387011231BB2212313851
+:102020007C021231C778ADE654FDF61231BB2212A4
+:10203000313878ADE630E02C78ADE630E12678AD89
+:10204000E6FCF58318E644F0FD121C5A90FFFCE014
+:102050004420F07C021232FB78ADE654FDF6741A8F
+:1020600090FFFEF078ADE6FCF58318E644F1FD1232
+:102070001C5A1231BB22756D0090FFFFE0600343D4
+:102080006D01756E00E4F56CF56BE4F56F757049E4
+:10209000748490FF82F0748490FF80F0748090FFCD
+:1020A00058F0748090FF5AF0AD46AF457E00EE24A4
+:1020B000FE5003022142E4EE75F007A4247FF5826E
+:1020C000E434F8F583E0FFE4EF5480FDE4EF540FCF
+:1020D00014FFED6038E4EF75F008A42448F582E4BD
+:1020E00034FFF5837490F0E4EF75F008A4244AF50A
+:1020F00082E434FFF5837480F0E4EF75F008A424E3
+:102100004EF582E434FFF5837480F08034E4EF759B
+:10211000F008A42408F582E434FFF5837490F0E419
+:10212000EF75F008A4240AF582E434FFF583E4F0A7
+:10213000E4EF75F008A4240EF582E434FFF583E49F
+:10214000F00E0220AB8D468E448F45747F90FFFDCC
+:10215000F0749090FFFCF0228C58EC24F65006E5C9
+:10216000582437FC22E5582430FC22D2B0122543F3
+:10217000EC700302227E755C03AE5B7F00E55C15AC
+:102180005C6480247F5035EF2400F582E434FBF555
+:1021900083E0FE24FE501EEF7D00FCE4FB7474C35C
+:1021A0009CFAEB9DFBEE7D00FCEAC39CED6480CBCA
+:1021B00064809B50028005EF2EFF80C18E5B8F5A9A
+:1021C000E55C6480247F500302227EE55A248E5011
+:1021D0000302227E855A5D755B00AE5AAF5B903577
+:1021E000EEE493F55CE55C155C6480247F5018EEAA
+:1021F0002400F582E434FBF583E0FCEF9035EE93A8
+:102200006C70040E0F80DE8E5A8F5BE55C64802458
+:102210007F406E755E017560E8755FFFE55D2402C5
+:10222000F55A755C07E55C334057AD60AE5FAF5E55
+:10223000E55CF5823395E0F5831201F5C4540FFC9B
+:10224000122155E55A2400F582E434FBF583ECF0C5
+:10225000055A055AAD60AE5FAF5EE55CF582339519
+:10226000E0F5831201F5540FFC122155E55A2400C4
+:10227000F582E434FBF583ECF0055A055A155C80D1
+:10228000A4740290F851F090F86B79C77A357B27E7
+:1022900078011203FE756A357569FC756801E49072
+:1022A000FF83F0748090FF81F0755902E55975F055
+:1022B00007A4247FF582E434F8F583E0788FF6FCF8
+:1022C000540F14FC788FECF6E55975F007A42481BF
+:1022D000F582E434F8F583E0789276FD0876E8FC40
+:1022E000788FE675F008A42448F582E434FFF5837E
+:1022F000E4F0788FE675F008A4244FF582E434FF0B
+:10230000F583ECF07892E6FF08E67E03CFC313CFA7
+:1023100013DEF9FE788FE675F008A42449F582E40F
+:1023200034FFF583EEF0788FE675F008A4244AF5C3
+:1023300082E434FFF5837480F07890ECF67D0078C9
+:1023400093E62CF618E63DF67892E6FD08E67C0367
+:10235000CDC313CD13DCF9FC788FE675F008A42407
+:102360004DF582E434FFF583ECF0788FE675F008E4
+:10237000A4244EF582E434FFF583E4F07892E6FD80
+:1023800008E6FC788FE6FF7E00EE24FE5003022470
+:10239000FDE4EE75F007A4247FF582E434F8F583BC
+:1023A000E0FFE4EF5480FAE4EF540F14FFE4EE751D
+:1023B000F007A42481F582E434F8F583E07890F600
+:1023C000E4EE1313548024F0F8E434FDF9E8FCE95A
+:1023D000FD8A5AEA700302246AE4EF75F008A42427
+:1023E00048F582E434FFF583E4F07890E6FAE4EF10
+:1023F00075F008A4244FF582E434FFF583EAF0ED8C
+:10240000FBEC7A03CBC313CB13DAF9FAE4EF75F0E4
+:1024100008A42449F582E434FFF583EAF07890E6D5
+:102420007B00FAEC2AFCED3BFDFBEC7A03CBC313FB
+:10243000CB13DAF9FAE4EF75F008A4244DF582E441
+:1024400034FFF583EAF0E4EF75F008A4244AF5823E
+:10245000E434FFF5837480F0E4EF75F008A4244EB3
+:10246000F582E434FFF5837480F00224F9E4EF751B
+:10247000F008A42408F582E434FFF583E4F07890B2
+:10248000E6FAE4EF75F008A4240FF582E434FFF5D2
+:1024900083EAF0EDFBEC7A03CBC313CB13DAF9FA42
+:1024A000E4EF75F008A42409F582E434FFF583EA2B
+:1024B000F07890E67B00FAEC2AFCED3BFDFBEC7A31
+:1024C00003CBC313CB13DAF9FAE4EF75F008A424B5
+:1024D0000DF582E434FFF583EAF0E4EF75F008A42B
+:1024E000240AF582E434FFF583E4F0E4EF75F008A4
+:1024F000A4240EF582E434FFF583E4F00E02238673
+:102500008E597892EDF608ECF6788FEFF61220737C
+:10251000228C26EC30E718E526540F1475F008A439
+:102520002448F582E434FFF583E054DFF08016E5BB
+:1025300026540F1475F008A42408F582E434FFF53E
+:1025400083E054DFF0227C0022EC90FC37F08C24F6
+:10255000ED2403F5257D00D39572ED95714003853B
+:102560007225E52524B75009752503740290FC37C0
+:10257000F0AC2512307622E4F56CF56B12257D2245
+:1025800090FC35E06573600E740490FC37F0E4F560
+:102590006B756C0380467D73E4FEFF79357AFC7BB6
+:1025A0000174057800120348E56C2403F56CE56BB3
+:1025B0003400F56BE56CD39572E56B95714006853B
+:1025C000726C85716BD3E56C9448E56B9400400C9C
+:1025D000740290FC37F0E4F56B756C03AC6C123050
+:1025E0007622EC90FC37F0E4F56CF56B8C32EC6005
+:1025F0000512306780057C001230762290FF93E050
+:102600004401F0B2B390FF04E0F54A90FF06E0FD0C
+:10261000A3E0ED7D00FC7D00FC90FF06E0FFA3E061
+:102620007E00FFE4FEEC4EFCED4FFDC3EC9448ED64
+:102630009400502290FF06E0FDA3E0ED7D00FC7DBC
+:1026400000FC90FF06E0FFA3E07E00FFE4FEEC4EFE
+:10265000FCED4FFD8004E4FD7C488C728D7190FF91
+:1026600002E0FDA3E0ED7D00FC7D00FC90FF02E0B8
+:10267000FFA3E07E00FFE4FEEC4EF54CED4FF54B82
+:10268000756A357569FC7568017D357EFC7F017959
+:1026900073E4FAFB74057800120348754900E549B4
+:1026A00024FE4019AD6AAE69AF68E412031805490B
+:1026B0000DED70010E8D6A8E698F6880E1756A3547
+:1026C0007569FC75680190FF00E05460B4000280F9
+:1026D00006D35003022CBFE54A540FF549E54A548E
+:1026E00080A2E0920290FF01E012018A000B2CBA56
+:1026F000270528232CBA292F2CBA2A122A462BADBB
+:102700002BB02BF02C632C91E56D30E70EE54C459A
+:102710004B7008E572640245716003022CBC90FFA7
+:1027200000E0541FB400028003D34029E54A60034F
+:10273000022820AD6AAE69AF68740112031878AD43
+:10274000E630E00BAD6AAE69AF6874021203187C24
+:102750000212307622B401028003D3401BE56D20C3
+:10276000E107E54A6003022820E54A24FE500302FF
+:1027700028207C0212307622B402028006D3500355
+:1027800002281EE56D20E10DE54A6009E54A6480F6
+:102790006003022820AC4A1230FD4003022820E5E5
+:1027A00049702530021190FF80E05408AD6AAE698F
+:1027B000AF68120318800F90FF82E05408AD6AAE34
+:1027C00069AF68120318803D154930021DE549754F
+:1027D000F008A42448F582E434FFF583E05408AD02
+:1027E0006AAE69AF68120318801BE54975F008A44A
+:1027F0002408F582E434FFF583E05408AD6AAE693D
+:10280000AF68120318AD6AAE69AF681201EF600BD2
+:10281000AD6AAE69AF6874011203187C021230769B
+:10282000228000022CBCE56D20E706E57245716050
+:1028300003022CBC90FF00E0541FB400028003D3BD
+:10284000401AE54C14454B7004E54A600302292CFC
+:1028500078ADE654FEF67C0012307622B401028098
+:1028600003D3402AE56D20E108E56D20E00302294D
+:102870002CE56D30E004E54A700BE56D30E109E5CB
+:102880004A24FE500302292C7C0012307622B40226
+:10289000028006D3500302292AE54C454B6003020F
+:1028A000292CAC4A1230FD400302292CE56D20E1B1
+:1028B00007E56D20E0028077E56D30E006E54960D0
+:1028C00002806CE549700F90FF82E054F7F090FFB2
+:1028D00080E054F7F022E549B401028003D34009B7
+:1028E0007D017C03120F128011B402028003D340D9
+:1028F000097D017C04120F1280001549300215E594
+:102900004975F008A42448F582E434FFF583E054C7
+:10291000F7F08013E54975F008A42408F582E43443
+:10292000FFF583E054F7F07C00123076228000023D
+:102930002CBCE56D20E706E57245716003022CBCF6
+:1029400090FF00E0541FB400028003D3401AE54C0E
+:1029500014454B7004E54A6003022A0F78ADE64443
+:1029600001F67C0012307622B401028003D34029A4
+:10297000E56D20E108E56D20E003022A0FE56D30EA
+:10298000E004E549700BE56D30E108E54924FE50AF
+:1029900002807F7C0012307622B402028003D34092
+:1029A0006FE54C454B60028069AC4A1230FD400235
+:1029B0008060E56D20E107E56D20E0028054E54987
+:1029C000701430020990FF80E04408F0800790FF07
+:1029D00082E04408F022E56D30E1331549300215FC
+:1029E000E54975F008A42448F582E434FFF583E056
+:1029F0004408F08013E54975F008A42408F582E442
+:102A000034FFF583E04408F07C0012307622800227
+:102A10008000022CBCE56D20E712E5724571700C58
+:102A2000E54A700890FF00E0541F6003022CBCE5EB
+:102A30004C90FFFFF090FFFFE06005436D018003C5
+:102A4000536DFE7C0012307622E56D30E70EE572A4
+:102A50004571600890FF00E0541F6003022CBCAD7C
+:102A60004BE54CED7D00FC7D00FCBD0002800302C7
+:102A70002BA8B401028003D34032E54A7005E54C2F
+:102A8000FC6003022BAA756A407569F8756801D36A
+:102A9000E5729412E57194004006E4FD7C12800416
+:102AA000AC72AD718C708D6F12308B22B4020280CB
+:102AB00003D34059E54A6003022BAAE54CFC70277A
+:102AC000756A527569F8756801D3E5729419E571F4
+:102AD00094004006E4FD7C198004AC72AD718C70EA
+:102AE0008D6F12308B8025756A6B7569F87568017A
+:102AF000D3E5729427E57194004006E4FD7C2780BD
+:102B000004AC72AD718C708D6F12308B22B40302E5
+:102B10008006D35003022BA8E54CF549700F90FFB7
+:102B200004E0FDA3E04D6003022BAA801890FB0295
+:102B3000E0FDA3E0FC90FF05E06C700790FF04E06F
+:102B40006D60028068E4F570F56F7F00E54914C59B
+:102B500049600FEF2400F582E434FBF583E02FFF9A
+:102B600080EA8F4AE54A2400F582E434FBF583E0ED
+:102B70007D00D39572ED95714006AC72AD71800FFA
+:102B8000E54A2400F582E434FBF583E07D00FC8C0B
+:102B9000708D6FE54A2400FCE434FBFDFEECFD7F04
+:102BA000018D6A8E698F6812308B228000022CBCE6
+:102BB000022CBCE56D30E719E5721445717012E521
+:102BC0004A700EE54C454B700890FF00E0541F60C2
+:102BD00003022CBCE56D20E008E56D20E103022C2A
+:102BE000BC756A6EE4F569F568E4F56F04F570127A
+:102BF000308B22E56D20E727E57245717021E54AAB
+:102C0000701DE54C6402454B600DE54C14454B606E
+:102C100006E54C454B700890FF00E0541F6003022E
+:102C20002CBCE56D20E008E56D20E103022CBC859D
+:102C30004C6EE56E700A436D01536DFDD2B080207D
+:102C4000E56E64026007E56E1460028072536DFEEB
+:102C5000436D02E56E64026005E56E147002C2B059
+:102C60007C0012307622E56D30E71AE5721445716A
+:102C70007013E54A700FE54C454B700990FF00E07A
+:102C8000541F1460028038E56D20E10280317C0120
+:102C900012307622E56D20E715E5724571700FE57B
+:102CA0004C454B700990FF00E0541F146002800FE8
+:102CB000E56D20E10280087C00123076228000025F
+:102CC0002F7DB440028006D35003022F7390FF0182
+:102CD000E090FC35F0E54A90FC36F0E490FC37F0EB
+:102CE000E56A2403F56AE5693400F569AD4BE54C06
+:102CF000856A82856983CDF0A3CDF090FF01E01253
+:102D000001C02D2A012D50022D7A032DA4042DF28D
+:102D1000052E2F062E55072E7B082EA7092ECD0B2C
+:102D20002EF30C2F02802F028100002F60E56D2012
+:102D3000E7067C051225DF227D767E357F02793815
+:102D40007AFC7B01740878001203487D087C00122D
+:102D5000254622E56D20E7067C051225DF22E54A9F
+:102D6000B403004010B40500500BE54A7F00FE7C20
+:102D70001012324D227D007C0712254622E56D207F
+:102D8000E7067C051225DF22E54AB403004010B4B3
+:102D90000500500BE54A7F00FE7C1112324D227D6A
+:102DA000007C0712254622E56D20E7067C051225EA
+:102DB000DF22E54AB405028003D3400AE4FF04FEA3
+:102DC0007C0A12324D22B401028003D3400AE4FF90
+:102DD00004FE7C0812324D22B403004010B40500FA
+:102DE000500BE54A7F00FE7C1312324D227D007CA1
+:102DF0000712254622E56D20E734D3E5729448E5B5
+:102E00007194005006E572457170067C021225DF50
+:102E100022E54AB40103B3400BC3B403004009B434
+:102E200006005004123123227C071225DF221225CE
+:102E30007D22E56D20E71DE54AB403004010B4058E
+:102E400000500BE54A7F00FE7C1612324D227C07B3
+:102E50001225DF2212257D22E56D20E71DE54AB40B
+:102E600003004010B40500500BE54A7F00FE7C19BA
+:102E700012324D227C071225DF2212257D22E56DBC
+:102E800020E723748190FF93F0E54AB403004010DB
+:102E9000B40500500BE54A7F00FE7C1712324D222C
+:102EA0007C071225DF2212257D22E56D20E71DE536
+:102EB0004AB403004010B40500500BE54A7F00FE01
+:102EC0007C1812324D227C071225DF2212257D222A
+:102ED000E56D20E71DE54AB403004010B40500503D
+:102EE0000BE54A7F00FE7C1512324D227C0712252D
+:102EF000DF2212257D22E56D20E7067C071225DF03
+:102F00002212257D22E56D30E72090FF00E0541F5E
+:102F1000701090FF01E0B480051225748003122523
+:102F20007D227D007C051225462290FF00E0541F83
+:102F300060067C051225DF22D3E5729448E5719482
+:102F400000500BC3E5729407E571940050067C03B2
+:102F50001225DF22E54AB40504123123227C071230
+:102F600025DF22E56D30E7087D007C05122546222D
+:102F70007C051225DF22B420028003D340008000AC
+:102F80001230512275430090FF83E0540FD39543D4
+:102F90004024E54324F0F582E434FEF583E0AD6A95
+:102FA000AE69AF6812031805430DED70010E8D6A0E
+:102FB0008E698F6880D1E5437D00FCC3E5709CF588
+:102FC00070E56F9DF56FE570456F6006E490FF83D7
+:102FD000F02290FF82E04408F0E4F56F75704990AC
+:102FE000FC35E0B405028003D3404090FC36E0F5A8
+:102FF00043B405028003D3400AE4FF04FE7C0B12B5
+:10300000324D22B401028003D3400AE4FF04FE7C67
+:103010000912324D22B403004010B40500500BE5F4
+:10302000437F00FE7C1412324D2222B480004023E4
+:10303000B48200501E7C357DFC1217A57D008C6C7F
+:103040008D6B90FC37E0600512305180057C0012DA
+:103050003076222290FF83E0547FF090FF82E0449C
+:1030600008F090FF80E04408F02290FF82E04408DE
+:10307000F090FF80E04408F0228C237D008C708D5E
+:103080006F756A357569FC75680112308B2290FF87
+:1030900083E0547FF0E5706449456F700122C3E519
+:1030A000709408E56F94004015752108E5217D00B6
+:1030B000FCC3E5709CF570E56F9DF56F8009857028
+:1030C00021E4F56F757049752200E522C395215002
+:1030D00026AD6AAE69AF681201EFFCE52224F8F56F
+:1030E00082E434FEF583ECF005220DED70010E8DC7
+:1030F0006A8E698F6880D3E521547F90FF81F0222A
+:103100008C487F00EF24FD4019E4EF75F007A424FC
+:103110007FF582E434F8F583E065487002D3220F2E
+:1031200080E28F47C32285727085716F90FF82E0C5
+:1031300054F7F090FF83E0547FF022C000C001C03C
+:1031400002C006C007E5782408F8860653067F7C8F
+:10315000FF1231AD7C007D00E57B6046FF90FD9560
+:10316000E0547F6E700FC083C082A3E0FDA3E0FC3B
+:10317000A3157B8007A3A3A3DFE68026DF06D0820A
+:10318000D083801EE0F8A3E0F9A3E0FAD082D083D8
+:10319000E8F0A3E9F0A3EAF0A3C083C082A3A3A34D
+:1031A00080DA123246D007D006D002D001D00022F9
+:1031B00085A87A75A888EC70027C3F8C7922E57826
+:1031C0002408F8760012329A80FBC000C001C002C9
+:1031D000C006C007AE047CFF1231ADE57B6042FF44
+:1031E00090FD95E0547F6E700BC083C082A3A3A3B3
+:1031F000157B8007A3A3A3DFEA8026DF06D082D059
+:103200008380D8E0F8A3E0F9A3E0FAD082D083E885
+:10321000F0A3E9F0A3EAF0A3C083C082A3A3A38034
+:10322000DA7808087918097C01E6547F6E70067612
+:10323000007700800608090CBC08EE123246D00761
+:10324000D006D002D001D00022757900857AA8225C
+:10325000C0F0C082C083C3E57B24E8500512329AD7
+:1032600080F4EC6031903575E493C39C4028C00431
+:103270007CFF1231ADD004430480E57B75F003A4DC
+:103280002495F582E434FDF583ECF0EFA3F0EEA392
+:10329000F0057B123246D083D082D0F022C0047C6D
+:1032A00020D28CD28DD504FDD0042275A80075885B
+:1032B0000075B80075F00075D000E4F8900000F6D5
+:1032C00008B800FB020000C3ED940250047D037CAB
+:1032D000E8ECF4FCEDF4FD0CBC00010D8C7F8D7E60
+:1032E00022C3EC94BCED940250047D077CD0ECF436
+:1032F000FCEDF4FD0CBC00010D8C7D8D7C22EC708E
+:103300000122C000E5782418F8A604E5782408F81E
+:10331000C6547FF6E630E703D0002212329A80F4DA
+:10332000C28C857C8C857D8AD28CC0E0C0D0C0F0F8
+:10333000C082C083C000C001C002C003C004C00579
+:10334000C006C007121AF8E5782408F8E66024E5FC
+:10335000782410F8A681E57875F021A4248DF582F3
+:10336000E434FCF58378AEE58104C398F9E6F0080F
+:10337000A3D9FA74082578F8057808E65480700C0B
+:10338000E578B407F3780875780080EFE5782410C5
+:10339000F88681E57875F021A4248DF582E434FC6B
+:1033A000F58378AEE58104C398F9E0F608A3D9FA6D
+:1033B000D007D006D005D004D003D002D001D00071
+:1033C000D083D082D0F0D0D0D0E032C0E0C0D0C026
+:1033D00000C001C002C28E857E8D857F8BD28E7823
+:1033E0001979097A07E77004A600800BE6600816D1
+:1033F000E67004E74480F70809DAEAE57960131417
+:10340000F579700EE5782408F87600123246D28CF1
+:10341000D28DD002D001D000D0D0D0E0327581ADB5
+:10342000742A90FF93F0757F30757EF8757D607516
+:103430007CF012053F1234CE12175B90FF93E044EC
+:1034400001F0B2B31234F81232A880DA22C0007C44
+:1034500001EC2408F8E660090CBC08F512329A80E9
+:10346000EED00022C0F0C082C083C000C006C007FA
+:10347000ED2410F876BCED75F021A4248DF582E4DE
+:1034800034FCF583C082C083A3A3E4780DF0A3D8F5
+:10349000FCEC547F75F002A42441F582E5F034354C
+:1034A000F583E493FE740193F5828E83E493FE74B6
+:1034B0000193FFD083D082EFF0A3EEF0ED2408F863
+:1034C000EC4480F6D007D006D000D083D082D0F074
+:1034D00022757800757B007A08791878087600776D
+:1034E000000809DAF8E478087480447FF67401442F
+:1034F00010F58975B808D2ABD2A9227581ADD28EEC
+:10350000D28CD2AFE57B6032FF90FD95E0548060B5
+:103510002478087908E0547FFA7B00E6547FB502EE
+:10352000027BFF08D9F5EB700CEAF012344AAD04C7
+:10353000AC02123461A3A3A3DFD212329A80C57CFD
+:10354000017D002204FE04F204F604EA04E604E22B
+:1035500004EE04FA04A604AA04D604DA04A204A21F
+:1035600004A204DE04BE04B604BA04B204CA04C64B
+:1035700004C204CE04D204AE1901030022004802A2
+:1035800000480E301420C81AD0180A0C0506020391
+:1035900001020001CE0181010000C0008000600036
+:1035A0003000180010000800040002000100081894
+:1035B00028380C05100A0200000000000301100A60
+:1035C000020000000000FBE0FBF2090227000102FC
+:1035D00000A0FA0904000003FF00000007058102B3
+:1035E00040000007050102400000070583030200B8
+:1035F00001220354005500530042003300340031CF
+:1036000000300020002000200020002000200020AA
+:073610000020000000000093
+:00000001FF
diff --git a/firmware/mts_edge.fw.ihex b/firmware/mts_edge.fw.ihex
new file mode 100644
index 0000000..d14ebd6
--- /dev/null
+++ b/firmware/mts_edge.fw.ihex
@@ -0,0 +1,881 @@
+:10000000F0360002001E021AFBFFFFFFFFFF023363
+:10001000F9FFFFFFFFFFFFFFFFFFFFFFFFFF0234BE
+:10002000A47581D490FDE88583A0123618EC4D604C
+:100030007378AF8003760018B8A0FA787F800376D3
+:100040000018B865FA78208003760018B820FA788E
+:10005000208003760018B81FFA90FDDDAE83AF82D2
+:1000600090FBF81200AA6005E4F0A380F690FDE88A
+:10007000A88290FDE8A982E8696005E4F20880F7AB
+:100080009001081200B390010C1200B390011012FD
+:1000900000B39001141200D190011A1200D1900106
+:1000A000201200D175D0001234F6020126EF6582CD
+:1000B0007003EE658322E493F8740193F97402935C
+:1000C000FE740393F5828E83E869700122E493F64F
+:1000D000A30880F4E493FC740193FD740293FE740E
+:1000E0000393FF740493F8740593F58288831200D8
+:1000F000AA700122E493A3A883A9828C838D82F045
+:10010000A3AC83AD828883898280E32121049B8014
+:1001100080049BB0B4049BFDE8049F049FFBF304A0
+:10012000A4049FFBF30504050480FED0F030F00921
+:1001300020F303F68010F7800D30F10920F303F26D
+:100140008004F38001F020F404FCD0E0CC22CCC089
+:10015000E0120163020154BC0005D0F0ACF022C3F0
+:1001600013DCFC02012ABF0009ED258275F001F8BD
+:10017000E622BF010FED2582F582EE3583F583750A
+:10018000F004E022ED258275F002F8E222D083D05F
+:1001900082F5F0C3E493A3C5F095F0C0E0C3D0F0BE
+:1001A000E493A395F04012A3A3C3E5F033500205F6
+:1001B000832582F58250020583740193C0E0E493A5
+:1001C000C0E022D083D082F5F0E4937009740193EB
+:1001D0007004A3A3800C74029365F06005A3A3A32D
+:1001E00080E7740193C0E0E493C0E022120264024D
+:1001F00001FB1202B80201FB1202DC0201FB30E03B
+:100200000720E302E622E72230E10720E302E222B0
+:10021000E32230E202E022E493221202DC02022313
+:100220001202B8020223ABF012022DCBC5F0CB2292
+:1002300030E01020E306E6F5F008E622E7F5F009E5
+:10024000E7192230E11020E306E2F5F008E222E3AC
+:10025000F5F009E3192230E206E0F5F0A3E022E42C
+:1002600093F5F074019322BB0003740922BB0107CC
+:1002700089828A83740422BB020789828A8374106C
+:1002800022740A22020284BB0007E92582F8740165
+:1002900022BB010DE92582F582EA3583F5837404DA
+:1002A00022BB020DE92582F582EA3583F5837410BD
+:1002B00022E92582F87402220202B8BF0005EDF897
+:1002C000740122BF01078D828E83740422BF02074E
+:1002D0008D828E83741022EDF87402220202DCBF3C
+:1002E0000007ED2582F8740122BF010DED2582F58E
+:1002F00082EE3583F583740422BF020DED2582F56D
+:1003000082EE3583F583741022ED2582F874022283
+:10031000020310C0E0120264020328C0E01202B817
+:10032000020328C0E01202DC02032830E00B20E3C5
+:1003300004D0E0F622D0E0F72230E10B20E304D035
+:10034000E0F222D0E0F322D0E0F022C9CDC9CACE3B
+:10035000CACBCFCB12035BEDF9EEFAEFFB22BB0069
+:100360002FBF000AFAEDF8E7F60809DAFA22BF0112
+:10037000128D828E83F802037809A3E7F0D8FA225F
+:10038000020383FAEDF8E7F20809DAFA2202038D94
+:10039000BB014DBF001489828A83F9EDF802039FE7
+:1003A00008A3E0F6D9FA220203B0BF01228D828EA3
+:1003B00083FB08C9C582C9CAC583CAE0A3C9C5826F
+:1003C000C9CAC583CAF0A3DBEAD8E8220203D38DE9
+:1003D000828E83F9EDF8E0F208A3D9FA220203DD58
+:1003E000BB024DBF001289828A83F9EDF80203EF48
+:1003F00008A3E493F6D9F922BF01238D828E83FBF3
+:1004000008C9C582C9CAC583CAE493A3C9C582C93C
+:10041000CAC583CAF0A3DBE9D8E722020422898295
+:100420008A83F9EDF8E493F208A3D9F922020433A0
+:10043000BF000DFAEDF8E3F60809DAFA2202043DEE
+:10044000BF01128D828E83F802044A09A3E3F0D81B
+:10045000FA22020455FAEDF8E3F20809DAFA220268
+:10046000045FE6FB08E6FA08E6F904F618700106F0
+:1004700022E6FF08E6FE08E6FD22EFF0A3EEF0A379
+:10048000EDF022EBF0A3EAF0A3E9F022E0FFA3E015
+:10049000FEA3E0FD22E0FBA3E0FAA3E0F9220000C6
+:1004A000000000000000000504006105730026053F
+:1004B0009A00330A0B00610A770066154600610C4A
+:1004C000FB006109AB006109E200610DC200610B34
+:1004D000F300610A1E00610A530061173E003317E2
+:1004E0005100341E1600431EBC0044202900442045
+:1004F0001700471EE300471F88004D1FD9004F1FFC
+:10050000050058338400617CCC7DFF121CC722900B
+:10051000FFFCE020E72DC2AFAE59AF58755A20E579
+:100520005A14C55A6019E4FE7F05EE4FCE24FFCE63
+:10053000CF34FFCF6007E490FF92F080ED80E08E33
+:10054000598F582212050C7D077CB71233A07D0FFE
+:100550007C6E1233BA78A17A06E4F608DAFC7A06E1
+:100560001205CF7C03120E57122165E4FEFF7C0FAB
+:10057000123329D2A822123214E490FC38F090FFF2
+:10058000F0E030E408740190FC39F08005E490FC60
+:1005900039F07D0A7C00122547123297221232145C
+:1005A00090FC39E014700E90FFF0E04410F07C00F5
+:1005B0001225E0801990FC39E0700E90FFF0E054B5
+:1005C000EFF07C001225E080057C171225E0123246
+:1005D000972290FFF0E054ABF090FFF0E04420F061
+:1005E000228C378D367882EDF608ECF6EDFEECFDC8
+:1005F0007F019000051201F57880F67882E6FD080B
+:10060000E6FCEDFEECFD7F019000041201F5540FB5
+:10061000FC7D8012176F7880E6700DAD3AAE39AF71
+:1006200038E41203187C082290FFF0E054FEF090AA
+:10063000FFF0E054FDF0801E7882E6FD08E6FCED58
+:10064000FEECFD7F0190000812021725E0440190A6
+:10065000FFF3F00206DB7882E6FD08E6FCEDFEEC37
+:10066000FD7F0190000612021754FE90FFF3F08008
+:100670002B7882E6FD08E6FCEDFEECFD7F019000A4
+:1006800008120217FAEB90FFF1F01208CA400DAD04
+:100690003AAE39AF38E41203187C18227882E6FDAE
+:1006A00008E6FCEDFEECFD7F0190000812021790B9
+:1006B000FFF1F01208CA400DAD3AAE39AF38E4127E
+:1006C00003187C18227882E6FD08E6FCEDFEECFDBE
+:1006D0007F01900006120217440190FFF3F0788327
+:1006E000E62403F618E63400F67880E624FE500986
+:1006F00090FFF0E054FDF0800790FFF0E04402F03E
+:10070000E490FFF1F0788176007880E624FFFCE445
+:1007100034FFFD7881E67F00FEECD39EEF6480CD50
+:1007200064809D402F1208AF400F7881E6AD3AAE4D
+:1007300039AF381203187C182290FFF2E0FC78825F
+:100740008683088682ECF0788106A37882A68308E7
+:10075000A68280B51208AF400F7881E6AD3AAE3977
+:10076000AF381203187C182290FFF2E0FC788286E2
+:1007700083088682ECF07880E6AD3AAE39AF381265
+:1007800003187C00228C378D367882EDF608ECF663
+:10079000EDFEECFD7F019000051201F57881F67801
+:1007A00082E6FD08E6FCEDFEECFD7F019000041200
+:1007B00001F5540FFC7D8112176F7881E670037C80
+:1007C000082290FFF0E054FEF090FFF0E054FDF0BE
+:1007D000801B7882E6FD08E6FCEDFEECFD7F0190D3
+:1007E000000812021725E090FFF3F0805B7882E6A4
+:1007F000FD08E6FCEDFEECFD7F01900006120217FD
+:1008000054FE90FFF3F080217882E6FD08E6FCEDCF
+:10081000FEECFD7F01900008120217FAEB90FFF149
+:10082000F01208CA40037C18227882E6FD08E6FC34
+:10083000EDFEECFD7F0190000812021790FFF1F031
+:100840001208CA40037C18227883E6240AF618E6C8
+:100850003400F6788076007881E624FFFCE434FFEB
+:10086000FD7880E67F00FEECD39EEF6480CD64804F
+:100870009D402178828683088682E090FFF1F01205
+:1008800008CA40037C1822788006788306E6187030
+:10089000010680C390FFF0E04401F078828683086F
+:1008A0008682E090FFF1F01208CA40037C18227C97
+:1008B000002290FFF0E020E71290FFF0E030E50921
+:1008C00090FFF0E04420F0C32280E7D32290FFF0B5
+:1008D000E020E31290FFF0E030E50990FFF0E04403
+:1008E00020F0C32280E7D3228C428D417C00ED545E
+:1008F000F0FDEC7003ED64307005753E0380037508
+:100900003E04AC3E120F74758300858340E541546C
+:100910000FF53FE5407004E53F64037035E53E2484
+:10092000FD75F00AA42402F582E434FCF583E0307E
+:10093000E60512105B8019E53E24A1F8E654FBF6AB
+:1009400078ADE62405F58218E63400F583740FF0DF
+:100950008059E5407004E53F64047048E53E24FD9D
+:1009600075F00AA42402F582E434FCF583E030E556
+:1009700007AC42AD41121C5CE54230E21578B1E6AD
+:1009800030E00F78B1E630E109E4FF04FE7C0412A8
+:10099000332978ADE62406F58218E63400F5837431
+:1009A0000FF08007E4FC7DEE121C5CC203221232C1
+:1009B00014120F7478ADE62406F58218E63400F5BB
+:1009C00083E090FC38F078ADE62405F58218E63433
+:1009D00000F583E090FC39F0C2037D027C00122513
+:1009E00047123297221232147899ECF6EC24A1F8CF
+:1009F000E630E1077C131225E0800F90FC39E0FD22
+:100A00007899E6FC1213F11225E012329722123285
+:100A1000147899ECF67D00120F141225E01232972B
+:100A2000221232147899ECF6EC24A1F8E630E207B1
+:100A30007C131225E0801B7899E624A1F8E620E1DA
+:100A4000077C121225E0800A7899E6FC1214151230
+:100A500025E0123297221232147899ECF6EC24A198
+:100A6000F8E620E2077C111225E0800A7899E6FC7E
+:100A70001215161225E0123297221232147899ECD0
+:100A8000F6120F7478ADE62409F58218E63400F505
+:100A900083E090FC3FF078ADE6240AF58218E63456
+:100AA00000F583E090FC40F078ADE62403F5821871
+:100AB000E63400F583E0FC78ADE62404F58218E620
+:100AC0003400F583E0F56278ADE62402F58218E69D
+:100AD0003400F583E0F5638C61E4EC333354017842
+:100AE00099F66008E56230E1037899067899E69016
+:100AF000FC41F078ABE62402F58218E63400F58379
+:100B0000E0FDA3E0540CFCED54E68C65F564E56172
+:100B100030E503436501E56220E50EE561547F7031
+:100B200008E56120E703436502E56130E3034365BF
+:100B300010E56130E203436520E56154036003433F
+:100B40006540E56130E103436580E56130E40343DE
+:100B50006401E56130E603436408E56220E40EE5E4
+:100B600061547F7008E56120E7034364105365FB1F
+:100B70005364F9AD64E56590FC3ACDF0A3CDF0E5A2
+:100B80006330E30DE5635430C4540F90FC3DF080B6
+:100B900005E490FC3DF0E563540390FC3CF0E56314
+:100BA0005404C31390FC3EF090FC3CE0700E7D3585
+:100BB0007EFC7F01740190000912014B78ADE624A0
+:100BC00008F58218E63400F583E07C00FD78ADE698
+:100BD0002407F58218E63400F583E07F004CFEEF31
+:100BE0004D90FC38F0A3CEF0CEC2037D0A7C0012FB
+:100BF0002547123297221232147899ECF6789E76B5
+:100C0000010876FC087638789B760C789E12046E84
+:100C100012021D789CCBF6CB08F67F00EF24EA4049
+:100C20001FE4EF25E090365AFD93CD0493789D663E
+:100C30007003ED18667006789B760080030F80DCE9
+:100C4000789AEFF6789E12046E90000212021778DE
+:100C50009CCBF6CB08F65404CB54064B6004789B2F
+:100C6000760B789DE630E313789E12046E900005B3
+:100C70001201F524FB5004789B760D789DE654C054
+:100C80007D0064C04D7004789B760B789E12046ED4
+:100C90009000041201F524FC5004789B760F789E96
+:100CA00012046E9000061201F524FD5004789B7624
+:100CB0000E789E12046E9000091201F524FD500476
+:100CC000789B760A789BE6702A7899E6FC120F7476
+:100CD000789E12046E78ABE6F978AAE6FA7B017486
+:100CE0000A7800120348C2037899E6FC12111778BB
+:100CF0009BECF6789BE6FC1225E01232972212322A
+:100D0000147899ECF6120F747899E624FD75F00AC0
+:100D1000A42414F582E434FCF583AC82AD8378AA74
+:100D20008683088682ECF9EDFA7B0A78011203B01B
+:100D3000C2037899E6FC121117123297228D2B8C80
+:100D40002AED60407527017529487528FFE52A249A
+:100D5000FDFCE434FFFDEC7C0325E0CD33CDDCF974
+:100D6000FCE5292CF529E5283DF528AD29AE28AF6D
+:100D700027748090000612032074809000021203F2
+:100D800020120FC7E52B14603B75270175290875E4
+:100D900028FFE52A24FDFCE434FFFDEC7C0325E07C
+:100DA000CD33CDDCF9FCE5292CF529E5283DF528E6
+:100DB000AD29AE28AF27E4900006120320E490008E
+:100DC00002120320221232147899ECF6EC24A1F8D6
+:100DD000E630E2097899E6FC121516D2007899E619
+:100DE000FC120F74789A760090FC39E030E70478B2
+:100DF0009A7601789AE6FD7899E6FC120D3AC203DC
+:100E00003000077899E6FC1214157C001225E012D8
+:100E100032972278ADE62404F58218E63400F58393
+:100E2000E04401F078ADE62404F58218E63400F5DC
+:100E300083E030E00280ED78ADE6240BF58218E621
+:100E40003400F583E054F8F078ADE62402F582181A
+:100E5000E63400F583E04480F022C2038C58120F80
+:100E60007478AA8683088682798B7A367B0A780121
+:100E70001203FE120E10AC587D02120D3AC203ACE2
+:100E800058121117228D538E528F518C50120F749D
+:100E9000754F0078ADE62405F58218E63400F58339
+:100EA000E020E41FE54F24F64019054FC2037C18EB
+:100EB0001233D790FF93E04401F0B2B3AC50120F5D
+:100EC0007480D078ADE62405F58218E63400F58309
+:100ED000E020E405C2037C022278ADE62405F58219
+:100EE00018E63400F583E0540F601678ADE624056B
+:100EF000F58218E63400F583E0540FF0C2037C015C
+:100F00002278AC8683088682E0AD53AE52AF511290
+:100F10000318C2037C00228D318C30121516E53186
+:100F2000600FE530B4030A7C0112250F7C81122585
+:100F30000FAC30120F74E531601A78AE86830886E4
+:100F400082E054E7F0A3A3A3A3E054E7F0AC307D24
+:100F500002120D3A78AA868308868279957A367BC2
+:100F60000A78011203FEC203E53024A1F8E654FD1D
+:100F7000F6AC30121117228C26300305123376801E
+:100F8000F87C0A123289D203E52624FD78A7F67090
+:100F90000778AE76FF0876E078A7E67D007C04252A
+:100FA000E0CD33CDDCF9FC24A078ADF6ED34FF18AC
+:100FB000F678A7E675F00AA42400FCE434FCFD787A
+:100FC000AAEDF608ECF61233222278ADE62402F5FB
+:100FD0008218E63400F583E030E72278ADE624029B
+:100FE000F58218E63400F583E0547FF078ADE6240E
+:100FF00002F58218E63400F583E04480F02278AEF2
+:101000008683088682E0547FF0AD83E5822404FC69
+:10101000E43D8C82F583E0547FF078ADE6240BF557
+:101020008218E63400F583E054F8F078AFE6240146
+:10103000F58218E63400F583E04403F078AFE62447
+:1010400005F58218E63400F583E04403F078ADE658
+:101050002405F58218E63400F583740FF02278AE8B
+:101060008683088682E0543FF0AD83E5822404FC49
+:10107000E43D8C82F583E0543FF078A7E624A8F89D
+:10108000E6FC78AFE62401F58218E63400F583EC3F
+:10109000F078A7E624A8F8E6FC78AFE62405F58208
+:1010A00018E63400F583ECF078ADE6240BF58218F1
+:1010B000E63400F583E054FB4402F52678ABE624E1
+:1010C00002F58218E63400F583E030E5034326019B
+:1010D00078ADE62405F58218E63400F583E030E0CB
+:1010E00003120FC7E526FC78ADE6240BF58218E65F
+:1010F0003400F583ECF078ADE62405F58218E6348B
+:1011000000F583740FF078AE8683088682E0448011
+:10111000F0A3A3A3A3E04480F0228C2A120F7478DA
+:10112000ABE62408F58218E63400F583E0FC78ADE0
+:10113000E6240AF58218E63400F583ECF078ABE695
+:101140002407F58218E63400F583E0FC78ADE62448
+:1011500009F58218E63400F583ECF078AA86830856
+:101160008682E0FDA3E0FCEDFE78ADE62408F58282
+:1011700018E63400F583EEF0ECFE78ADE62407F5D2
+:101180008218E63400F583EEF08C298D28C3EC94A8
+:1011900005ED940C400575277C8033D3E529940137
+:1011A000E5289403400575273C8023D3E5299481E5
+:1011B000E528940140057527188013D3E52994602C
+:1011C000E5289400400575270C8003752708AF2794
+:1011D000E4EF547C4483FF8F27E527FC78AFE624B7
+:1011E00001F58218E63400F583ECF0E527FC78AFD2
+:1011F000E62405F58218E63400F583ECF0E527FCDB
+:1012000078A7E624A8F8ECF678ADE62402F5821873
+:10121000E63400F583E0F52778ABE62402F5821882
+:10122000E63400F583A3E030E3175327C778ABE635
+:101230002405F58218E63400F583E0903686934263
+:101240002778ABE62402F58218E63400F583E03017
+:10125000E70543274080035327BF5327FB78ABE6BE
+:101260002406F58218E63400F583E0600343270482
+:101270005327FC78ABE62404F58218E63400F583A6
+:10128000E04227432780E527FC78ADE62402F5827B
+:1012900018E63400F583ECF078ADE62404F5821806
+:1012A000E63400F583E0F52778ABE62402F58218F2
+:1012B000E63400F583A3E030E1055327DF800343E4
+:1012C000272078ABE62402F58218E63400F583E0A7
+:1012D00030E4055327EF800343271078ABE6240959
+:1012E000F58218E63400F583E0B40203432702E5F3
+:1012F00027FC78ADE62404F58218E63400F583EC8B
+:10130000F078ADE62403F58218E63400F583E0F5C5
+:101310002778ABE62409F58218E63400F583E070FF
+:101320000553277F800343278078ABE62402F582AC
+:1013300018E63400F583A3E030E00543272080035E
+:101340005327DF78ABE62402F58218E63400F583F4
+:10135000E030E30543274080035327BF78ABE62402
+:1013600002F58218E63400F583E030E005432710EB
+:1013700080035327EF78ABE62402F58218E63400A9
+:10138000F583A3E030E40543270880035327F7786B
+:10139000ABE62402F58218E63400F583A3E030E5DD
+:1013A0000543270480035327FB78ABE62402F5822C
+:1013B00018E63400F583A3E030E6054327018003F7
+:1013C0005327FE78ABE62402F58218E63400F58355
+:1013D000A3E030E70543270280035327FDE527FC00
+:1013E00078ADE62403F58218E63400F583ECF0C20C
+:1013F000037C00228D278C26ED54031460037C109F
+:1014000022E527547C24FC40037C0B22E52624A102
+:10141000F8E64402F67C00228C30120F74E530248A
+:10142000A1F8E620E24FAC307D02120D3AE53024FF
+:10143000FE4428FC78AE8683088682ECF0AF83E514
+:10144000822404FEE43FFFEC8E828F83F07C038CC9
+:101450002CE52CFC78AFE62401F58218E63400F583
+:1014600083ECF0E52CFC78AFE62405F58218E63431
+:1014700000F583ECF0752D01752F48752EFFE530D2
+:1014800024FDFCE434FFFDEC7C0325E0CD33CDDC12
+:10149000F9FCE52F2CF52FE52E3DF52E78AFE6244F
+:1014A00004F58218E63400F583E054E7F52CAD2FFF
+:1014B000AE2EAF2DE4900002120320E4900006123D
+:1014C00003201201EF30E503432C10E52CFC78AF2C
+:1014D000E62404F58218E63400F583ECF012105B84
+:1014E00078ADE62406F58218E63400F583E0C20301
+:1014F000FCE53024A1F8E64404F68C2CE530540FCA
+:10150000C454F07E00FFEEEF44047D00FFEC4EFC7F
+:10151000ED4FFD121CC77C00228C2F120F74120F8E
+:10152000FB78AE8683088682E05408F0A3A3A3A3C9
+:10153000E05408F0AC2F7D02120D3AC203E52F24CF
+:10154000A1F8E654FBF67C0022123214789AECF6ED
+:10155000EC24A1F8E630E10A7D007C131225471245
+:101560003297789AE624A1F8E64401F6789AE6FCE8
+:10157000120F74789AE624FD75F00AA42414F582FB
+:10158000E434FCF58378AAE6FA08E6F97B0A7801E8
+:101590001203B078AA868308868279957A367B0A08
+:1015A00078011203FE120FC7C203789AE6FC1211EB
+:1015B000177899ECF6EC600A7D007C08122547123A
+:1015C0003297789AE6FC120F7478ADE62404F5821F
+:1015D00018E63400F583E0441054DFFC78ADE624CF
+:1015E00004F58218E63400F583ECF07899ECF6C245
+:1015F000037CC81233D7789AE6FC120F7478ADE6F4
+:101600002404F58218E63400F583E054EFF0C203B9
+:101610007CC81233D7789AE6FC120F7478ADE624B2
+:1016200004F58218E63400F583E04410F0C2037C30
+:10163000C81233D7789AE6FC120F7478ADE624040A
+:10164000F58218E63400F583E04420F0C2037CF014
+:101650001233D7789AE6FC120F7478ADE62405F5BC
+:101660008218E63400F583E030E415C203789AE688
+:1016700044107F00FE7C0712332912329702173D77
+:1016800078ADE62404F58218E63400F583E054CF03
+:10169000F0C2037CC81233D7789AE6FC120F747834
+:1016A000ADE62404F58218E63400F583E04430F01A
+:1016B000C2037CF01233D7789AE6FC120F7478AD2F
+:1016C000E62405F58218E63400F583E030E414C220
+:1016D00003789AE644107F00FE7C07123329123209
+:1016E00097805D78ADE62404F58218E63400F58332
+:1016F000E054EFF078ADE62404F58218E63400F506
+:1017000083E054DFF0789AE624FD75F00AA42414EF
+:10171000F582E434FCF583AC82AD8378AA86830835
+:101720008682ECF9EDFA7B0A78011203B0C20378E5
+:101730009AE6FC1211177D007C0B12254712329796
+:1017400022123214E490FC39F07D027C001225470D
+:10175000123297221232147C001225E012329722A4
+:10176000743C90FBE0F0743E90FBE0F0E490FC28C9
+:10177000F0228D358C34ECB401028003D34002801A
+:1017800028B402028003D34008A835E625E0F6809D
+:1017900018B404028003D3400AA835E625E025E00A
+:1017A000F68006A83576008000228C3C8D3BEDFE4D
+:1017B000ECFD7F0175660675670090FC29120477C1
+:1017C0001201EFB480028006D3500302187090FC1F
+:1017D000291204899000031201F554F0B4300280FC
+:1017E00003D3405F90FC291204899000081202176D
+:1017F000FAFDEBFE7F0190FC2C120477EECD9036C3
+:101800009FFCE493FF740193FEF9EFFA7B01EAFF7A
+:10181000E9FEECC39EED9F40259036A1E493FD7454
+:101820000193FCEDFEECFD7F01EECDFC90FC2EE083
+:10183000D39C90FC2DE09D50057566808033121975
+:101840008E802EB460028003D3400BAC3CAD3B12C3
+:1018500007828C66801BB41003B34010C3B420030E
+:10186000B34009C3B440028003D340007566818051
+:10187000008075B481028003D3406B90FC29120470
+:10188000899000031201F554F0B430028003D34074
+:101890001D90FC29120489900008120217FAFDEB32
+:1018A000FE7F0190FC2F1204771218F88036B46086
+:1018B000028003D34013753A67E4F539F538AC3C40
+:1018C000AD3B1205DE8C66801BB41003B34010C321
+:1018D000B42003B34009C3B440028003D340007571
+:1018E0006681800080028000E566FC90FC2912047D
+:1018F00089EC900002120320AC672290FC291204AC
+:10190000899000041201F5600474018001E4A2E0F2
+:10191000920190FC29120489ED2403FD50010E90E0
+:10192000FC2C12047790FC29120489900005120106
+:10193000F5F5679000041201F5540FFC7D6712174E
+:101940006FE5677004756608227566007884760016
+:101950007884E6C39567503890FC2F1204891201F1
+:10196000EFFC90FC2C120489EC12031830010E904D
+:10197000FC31E004F090FC307003E004F078840661
+:1019800090FC2EE004F090FC2D7003E004F080C089
+:101990002290FC2AE0FDA3E0FCEDFEECFD7F01EDD2
+:1019A000240AFD50010E90FC3212047790FC29129B
+:1019B00004899000041201F5540FB401028003D38E
+:1019C000401790FC321204890DED70010E90FC2F2F
+:1019D00012047778887601804EB402028003D340E7
+:1019E0001990FC32120489ED2402FD50010E90FC86
+:1019F0002F12047778887602802DB404028003D3F6
+:101A0000401990FC32120489ED2404FD50010E901F
+:101A1000FC2F12047778887604800CB400028003CF
+:101A2000D340007566082290FC29120489900005B5
+:101A30001201F5F567788576007885E6C3956740ED
+:101A400003021AF6788676007886E6C37888965080
+:101A50007690FC2C1204891201EFFC90FC321204E7
+:101A6000921201E9F45CFC1201E9F890FC2F1204D7
+:101A700089E8C0E01201EFC8D0E0C8584CFC90FCE7
+:101A80002C120489EC1203187887ECF690FC31E0F4
+:101A900004F090FC307003E004F009E970010A9052
+:101AA000FC3212048090FC29120489900004120177
+:101AB000F530E40E90FC2EE004F090FC2D7003E075
+:101AC00004F078860680817888E6FDE4FEFFEECD9E
+:101AD000FC90FC31E02CF090FC30E03DF07888E6A2
+:101AE000FDE4FEFFEECDFC90FC34E02CF090FC33E6
+:101AF000E03DF0788506021A367566002222C0E0C5
+:101B0000C0F0C082C083C0D0E8C0E0E9C0E0EAC055
+:101B1000E0EBC0E0ECC0E0EDC0E0EEC0E0EFC0E024
+:101B200090FF92E01201C01B49301B49321B58380C
+:101B30001B6A3A1B7C3E1B94441B88461BA0501B0F
+:101B4000E2521BC1541C035600001C2490FF92E07B
+:101B50007F00FE7C01123329021C34E4FF04FE7C6A
+:101B600003123329742090FFFEF0021C34E4FF04BA
+:101B7000FE7C02123329744090FFFEF0021C34E414
+:101B8000FF04FE7C04123329021C34E4FF04FE7CB3
+:101B900005123329021C34E4FF04FE7C06123329AB
+:101BA000021C3490FFA5E07D0090FBF8CDF0A3CDA2
+:101BB000F090FBF9E0FCF58390FBF8E04433FD1274
+:101BC0001CC7807390FFB5E07D0090FBFACDF0A3B9
+:101BD000CDF090FBFBE0FCF58390FBFAE04443FD85
+:101BE000121CC7805290FFA6E07D0090FBFCCDF058
+:101BF000A3CDF090FBFDE0FCF58390FBFCE04434CA
+:101C0000FD121CC7803190FFB6E07D0090FBFECD39
+:101C1000F0A3CDF090FBFFE0FCF58390FBFEE044E9
+:101C200044FD121CC7801090FF92E07D00FCED4443
+:101C3000AAFD121CC78000E490FF92F0D0E0FFD014
+:101C4000E0FED0E0FDD0E0FCD0E0FBD0E0FAD0E058
+:101C5000F9D0E0F8D0D0D083D082D0F0D0E03205F7
+:101C600081058105810581A881181818EDF608EC19
+:101C7000F690FF5AE020E70280F790FF59E07D00E0
+:101C8000A88118CDF6CD08F67D03A881E618FCE6FC
+:101C9000CC25E0CC33CCDDF9CCF6CC08F6A8811805
+:101CA000E644F8F6A881181818E6FD08E6FCA881B5
+:101CB000188683088682EDF0A3ECF0740290FF5A38
+:101CC000F0158115811581158122E5812405F581A5
+:101CD000E4A88118F6A88118181818EDF608ECF693
+:101CE00090FBF5E024F85003021DE8E4A8811818E1
+:101CF000F6A88118E6FEA88118181818E6FD08E66F
+:101D0000FC7F00EF24F8404DE4EF25E0247DF582D0
+:101D1000E434FCF583E0FBA3E06C7003FAEB6D7038
+:101D2000097401A8811818F6802BE4EF25E0247DC2
+:101D3000F582E434FCF5837A00E054F0CCF8CCCDA5
+:101D4000F9CDFB7800E954F0F9EA687002EB6970AC
+:101D5000010E0F80AEA88118EEF6A8811818181889
+:101D6000EDF608ECF6A881EFF6A8811818E6707970
+:101D7000A88118E624F74071A88118181818E654AD
+:101D80000FA881F664046017A881E664036010A8B8
+:101D90008118181818E6FD08E6FC121C5C804A7CC5
+:101DA0000A123289A88118181818E6FD08E6FC9076
+:101DB000FBF4E025E0247DF582E434FCF583EDF0CE
+:101DC000A3ECF090FBF4E0FFE4EF045407FF90FB7A
+:101DD000F4F090FBF5E004F012332290FBF6E07093
+:101DE00008E4FEFF7C0F123329802790FBF7E00404
+:101DF000F0543F701D90FBF7E044FE7D00FC90FB2B
+:101E0000F4E025E0247DF582E434FCF583EDF0A3D5
+:101E1000ECF0E58124FBF58122788B7600788C76D6
+:101E200000740190FBF6F012321490FBF5E060575D
+:101E30007C0A12328990FBF3E025E0247DF582E4F0
+:101E400034FCF583E0FDA3E0FC90FBF3E025E02407
+:101E50007DF582E434FCF583E4F0A3F090FBF3E03D
+:101E6000FFE4EF045407FF90FBF3F090FBF5E01460
+:101E7000F07889EDF608ECF61233227889E6FD0851
+:101E8000E6FC1208E580A312337690FF93E044014C
+:101E9000F0B2B3788B06B6000D788B7600788CE6BE
+:101EA000F40404788CF68082E490FBF6F090FBF565
+:101EB000E07D00FCED44CFFD121C5C123297221233
+:101EC0003214E5706449456F601590FF83E0540F4C
+:101ED0007D00D39570ED956F500512305D80031233
+:101EE000312D12329722123214E5706449456F6029
+:101EF00005123167800E90FF80E04408F090FF8368
+:101F0000E0547FF0123297221232148C54EC54F0C9
+:101F1000B41015756A357569FC756801E56A2403A6
+:101F2000F56AE5693400F569E4F557F556E556C3F9
+:101F300094015027E554540FFCAD6AAE69AF6812A6
+:101F40000E828C55EC60028012056AE56A7002050B
+:101F5000690557E5577002055680D2E554540F24A1
+:101F6000A1F8E654FEF6E554540F7F00FE7C1212F1
+:101F70003329E5551470097D007C09122547800737
+:101F8000AD577C001225471232972212321490FF6F
+:101F9000FCE04402F090FF00E030E71390FF83E0A4
+:101FA0004480F0436D8090FFFCE04401F08011908C
+:101FB000FF82E04408F0536D7F90FFFCE054FEF098
+:101FC00090FF81E04480F01225FA90FFFEE0440586
+:101FD000F090FFFCE054FDF0123297221232147C94
+:101FE000011233D778B1E64402F674FEFC04FD1208
+:101FF0001CC790FF5AE030E70280F7E4F54E754DBC
+:1020000010AC4EAD4DE54E154E7002154DEC4D60C9
+:102010000280EE438701123297221232147C0212A0
+:1020200032A378B1E654FDF61232972212321478B8
+:10203000B1E630E02C78B1E630E12678B1E6FCF587
+:102040008318E644F0FD121C5C90FFFCE04420F095
+:102050007C021233D778B1E654FDF6741A90FFFE75
+:10206000F078B1E6FCF58318E644F1FD121C5C1231
+:10207000329722756D0090FFFFE06003436D01759C
+:102080006E00E4F56CF56BE4F56F7570497484903F
+:10209000FF82F0748490FF80F0748090FF58F07499
+:1020A0008090FF5AF0AD46AF457E00EE24FE50030F
+:1020B00002213FE4EE75F007A4247FF582E434F8B2
+:1020C000F583E0FFE4EF5480FDE4EF540F14FFEDDF
+:1020D0006038E4EF75F008A42448F582E434FFF595
+:1020E000837490F0E4EF75F008A4244AF582E43498
+:1020F000FFF5837480F0E4EF75F008A4244EF582B8
+:10210000E434FFF5837480F08034E4EF75F008A4C4
+:102110002408F582E434FFF5837490F0E4EF75F061
+:1021200008A4240AF582E434FFF583E4F0E4EF75B3
+:10213000F008A4240EF582E434FFF583E4F00E02E7
+:1021400020A88D468E448F45747F90FFFDF07490DB
+:1021500090FFFCF0228C58EC24F65006E55824370A
+:10216000FC22E5582430FC22D2B0D2B1C2B41225F0
+:1021700044EC700302227F755C03AE5B7F00E55C7C
+:10218000155C6480247F5035EF2400F582E434FB35
+:10219000F583E0FE24FE501EEF7D00FCE4FB74742A
+:1021A000C39CFAEB9DFBEE7D00FCEAC39CED6480D2
+:1021B000CB64809B50028005EF2EFF80C18E5B8F29
+:1021C0005AE55C6480247F500302227FE55A248E06
+:1021D000500302227F855A5D755B00AE5AAF5B905B
+:1021E00036CAE493F55CE55C155C6480247F501886
+:1021F000EE2400F582E434FBF583E0FCEF9036CA70
+:10220000936C70040E0F80DE8E5A8F5BE55C6480E9
+:10221000247F406E755E017560E8755FFFE55D24A3
+:1022200002F55A755C07E55C334057AD60AE5FAFB1
+:102230005EE55CF5823395E0F5831201F5C4540F39
+:10224000FC122152E55A2400F582E434FBF583ECBC
+:10225000F0055A055AAD60AE5FAF5EE55CF58233BE
+:1022600095E0F5831201F5540FFC122152E55A2432
+:1022700000F582E434FBF583ECF0055A055A155C51
+:1022800080A4740290F851F090F86B79A37A367BB1
+:102290002778011203FE756A357569FC756801E4DB
+:1022A00090FF83F0748090FF81F0755902E55975B5
+:1022B000F007A4247FF582E434F8F583E07893F600
+:1022C000FC540F14FC7893ECF6E55975F007A42440
+:1022D00081F582E434F8F583E0789676FD0876E8B7
+:1022E000FC7893E675F008A42448F582E434FFF501
+:1022F00083E4F07893E675F008A4244FF582E43483
+:10230000FFF583ECF07896E6FF08E67E03CFC31373
+:10231000CF13DEF9FE7893E675F008A42449F58220
+:10232000E434FFF583EEF07893E675F008A4244AD0
+:10233000F582E434FFF5837480F07894ECF67D0048
+:102340007897E62CF618E63DF67896E6FD08E67CEA
+:1023500003CDC313CD13DCF9FC7893E675F008A424
+:10236000244DF582E434FFF583ECF07893E675F0C4
+:1023700008A4244EF582E434FFF583E4F07896E671
+:10238000FD08E6FC7893E6FF7E00EE24FE50030293
+:1023900024FEE4EE75F007A4247FF582E434F8F51A
+:1023A00083E0FFE4EF5480FAE4EF540F14FFE4EE0F
+:1023B00075F007A42481F582E434F8F583E078947D
+:1023C000F6E4EE1313548024F0F8E434FDF9E8FC4D
+:1023D000E9FD8A5AEA700302246BE4EF75F008A461
+:1023E0002448F582E434FFF583E4F07894E6FAE4D7
+:1023F000EF75F008A4244FF582E434FFF583EAF08A
+:10240000EDFBEC7A03CBC313CB13DAF9FAE4EF75E7
+:10241000F008A42449F582E434FFF583EAF07894C7
+:10242000E67B00FAEC2AFCED3BFDFBEC7A03CBC328
+:1024300013CB13DAF9FAE4EF75F008A4244DF58212
+:10244000E434FFF583EAF0E4EF75F008A4244AF5DC
+:1024500082E434FFF5837480F0E4EF75F008A4247F
+:102460004EF582E434FFF5837480F00224FAE4EF41
+:1024700075F008A42408F582E434FFF583E4F078CD
+:1024800094E6FAE4EF75F008A4240FF582E434FF33
+:10249000F583EAF0EDFBEC7A03CBC313CB13DAF947
+:1024A000FAE4EF75F008A42409F582E434FFF5831B
+:1024B000EAF07894E67B00FAEC2AFCED3BFDFBECBD
+:1024C0007A03CBC313CB13DAF9FAE4EF75F008A45F
+:1024D000240DF582E434FFF583EAF0E4EF75F008AB
+:1024E000A4240AF582E434FFF583E4F0E4EF75F008
+:1024F00008A4240EF582E434FFF583E4F00E0223F1
+:10250000878E597896EDF608ECF67893EFF6122060
+:1025100070228C26EC30E718E526540F1475F0086D
+:10252000A42448F582E434FFF583E054DFF08016FC
+:10253000E526540F1475F008A42408F582E434FF4E
+:10254000F583E054DFF0227C0022EC90FC37F08C25
+:1025500024ED2403F5257D00D39572ED957140039C
+:10256000857225E52524B75009752503740290FC72
+:1025700037F0AC2512315222E4F56CF56B12257E52
+:102580002290FC35E06573600E740490FC37F0E433
+:10259000F56B756C0380467D73E4FEFF79357AFC3C
+:1025A0007B0174057800120348E56C2403F56CE5A3
+:1025B0006B3400F56BE56CD39572E56B9571400655
+:1025C00085726C85716BD3E56C9448E56B94004023
+:1025D0000C740290FC37F0E4F56B756C03AC6C1274
+:1025E000315222EC90FC37F0E4F56CF56B8C32EC58
+:1025F000600512314380057C001231522290FF9316
+:10260000E04401F0B2B390FF04E0F54A90FF06E029
+:10261000FDA3E0ED7D00FC7D00FC90FF06E0FFA344
+:10262000E07E00FFE4FEEC4EFCED4FFDC3EC944871
+:10263000ED9400502290FF06E0FDA3E0ED7D00FC4C
+:102640007D00FC90FF06E0FFA3E07E00FFE4FEECCF
+:102650004EFCED4FFD8004E4FD7C488C728D719042
+:10266000FF02E0FDA3E0ED7D00FC7D00FC90FF0299
+:10267000E0FFA3E07E00FFE4FEEC4EF54CED4FF5ED
+:102680004B756A357569FC7568017D357EFC7F0187
+:102690007973E4FAFB74057800120348754900E584
+:1026A0004924FE4019AD6AAE69AF68E4120318050B
+:1026B000490DED70010E8D6A8E698F6880E1756A33
+:1026C000357569FC75680178B3E614184660030235
+:1026D00027927890E6FF08E6FE788EE4F608F6C3C7
+:1026E000788FE6940218E69400501DE4FEFFC3EED6
+:1026F00094E8EF940350070EBE00010F80F0788F2E
+:1027000006E61870010680D77890EFF608EEF6D24C
+:10271000B47890E6FF08E6FE788EE4F608F6C37813
+:102720008FE6941E18E69400501DE4FEFFC3EE945D
+:10273000E8EF940350070EBE00010F80F0788F067B
+:10274000E61870010680D77890EFF608EEF6C2B171
+:102750007890E6FF08E6FE788EE4F608F6C3788FF8
+:10276000E6943A18E69400501DE4FEFFC3EE94E8A8
+:10277000EF940350070EBE00010F80F0788F06E63D
+:102780001870010680D77890EFF608EEF6D2B1788F
+:10279000B2E4F608F690FF00E05460B40002800650
+:1027A000D35003022D9BE54A540FF549E54A548066
+:1027B000A2E0920290FF01E012018A000B2D962701
+:1027C000D428F22D9629FE2D962AE12B152C7C2C4F
+:1027D0007F2CBF2D3F2D6DE56D30E70EE54C454B51
+:1027E0007008E572640245716003022D9890FF0045
+:1027F000E0541FB400028003D34029E54A6003027D
+:1028000028EFAD6AAE69AF68740112031878B1E6BB
+:1028100030E00BAD6AAE69AF6874021203187C0237
+:1028200012315222B401028003D3401BE56D20E136
+:1028300007E54A60030228EFE54A24FE5003022818
+:10284000EF7C0212315222B402028006D3500302FE
+:1028500028EDE56D20E10DE54A6009E54A648060F8
+:10286000030228EFAC4A1231D940030228EFE549B0
+:10287000702530021190FF80E05408AD6AAE69AF58
+:1028800068120318800F90FF82E05408AD6AAE69A9
+:10289000AF68120318803D154930021DE54975F0F7
+:1028A00008A42448F582E434FFF583E05408AD6AB7
+:1028B000AE69AF68120318801BE54975F008A424BF
+:1028C00008F582E434FFF583E05408AD6AAE69AFE1
+:1028D00068120318AD6AAE69AF681201EF600BAD04
+:1028E0006AAE69AF6874011203187C021231522279
+:1028F0008000022D98E56D20E706E57245716003C2
+:10290000022D9890FF00E0541FB400028003D340D2
+:102910001AE54C14454B7004E54A60030229FB7824
+:10292000B1E654FEF67C0012315222B4010280035B
+:10293000D3402AE56D20E108E56D20E0030229FB84
+:10294000E56D30E004E54A700BE56D30E109E54ADC
+:1029500024FE50030229FB7C0012315222B40202F1
+:102960008006D350030229F9E54C454B6003022948
+:10297000FBAC4A1231D940030229FBE56D20E10787
+:10298000E56D20E0028077E56D30E006E549600204
+:10299000806CE549700F90FF82E054F7F090FF8063
+:1029A000E054F7F022E549B401028003D340097DE9
+:1029B000017C03120F148011B402028003D340097A
+:1029C0007D017C04120F1480001549300215E54981
+:1029D00075F008A42448F582E434FFF583E054F749
+:1029E000F08013E54975F008A42408F582E434FF6B
+:1029F000F583E054F7F07C00123152228000022D62
+:102A000098E56D20E706E57245716003022D989008
+:102A1000FF00E0541FB400028003D3401AE54C14B9
+:102A2000454B7004E54A6003022ADE78B1E64401B2
+:102A3000F67C0012315222B401028003D34029E512
+:102A40006D20E108E56D20E003022ADEE56D30E04F
+:102A500004E549700BE56D30E108E54924FE5002BC
+:102A6000807F7C0012315222B402028003D3406F77
+:102A7000E54C454B60028069AC4A1231D940028076
+:102A800060E56D20E107E56D20E0028054E54970C6
+:102A90001430020990FF80E04408F0800790FF8224
+:102AA000E04408F022E56D30E1331549300215E5C8
+:102AB0004975F008A42448F582E434FFF583E04426
+:102AC00008F08013E54975F008A42408F582E43481
+:102AD000FFF583E04408F07C00123152228002802E
+:102AE00000022D98E56D20E712E5724571700CE546
+:102AF0004A700890FF00E0541F6003022D98E54CD7
+:102B000090FFFFF090FFFFE06005436D01800353ED
+:102B10006DFE7C0012315222E56D30E70EE5724504
+:102B200071600890FF00E0541F6003022D98AD4BC8
+:102B3000E54CED7D00FC7D00FCBD00028003022C15
+:102B400077B401028003D34032E54A7005E54CFCBE
+:102B50006003022C79756A407569F8756801D3E5E0
+:102B6000729412E57194004006E4FD7C128004AC7E
+:102B700072AD718C708D6F12316722B402028003C6
+:102B8000D34059E54A6003022C79E54CFC70277567
+:102B90006A527569F8756801D3E5729419E5719404
+:102BA000004006E4FD7C198004AC72AD718C708D20
+:102BB0006F1231678025756A6B7569F8756801D386
+:102BC000E5729427E57194004006E4FD7C278004BB
+:102BD000AC72AD718C708D6F12316722B4030280BC
+:102BE00006D35003022C77E54CF549700F90FF0493
+:102BF000E0FDA3E04D6003022C79801890FB02E019
+:102C0000FDA3E0FC90FF05E06C700790FF04E06D11
+:102C100060028068E4F570F56F7F00E54914C549EE
+:102C2000600FEF2400F582E434FBF583E02FFF8092
+:102C3000EA8F4AE54A2400F582E434FBF583E07D1F
+:102C400000D39572ED95714006AC72AD71800FE5C1
+:102C50004A2400F582E434FBF583E07D00FC8C70AF
+:102C60008D6FE54A2400FCE434FBFDFEECFD7F01A2
+:102C70008D6A8E698F68123167228000022D98025A
+:102C80002D98E56D30E719E5721445717012E54A2B
+:102C9000700EE54C454B700890FF00E0541F600338
+:102CA000022D98E56D20E008E56D20E103022D98E6
+:102CB000756A6EE4F569F568E4F56F04F570123134
+:102CC0006722E56D20E727E57245717021E54A70BE
+:102CD0001DE54C6402454B600DE54C14454B600608
+:102CE000E54C454B700890FF00E0541F6003022D37
+:102CF00098E56D20E008E56D20E103022D98854CF4
+:102D00006EE56E7010436D01536DFDD2B078B2E484
+:102D1000F608F68027E56E64026007E56E1460022F
+:102D20008079536DFE436D02E56E64026005E56EC9
+:102D300014700978B2E4F60804F6C2B07C001231CF
+:102D40005222E56D30E71AE5721445717013E54AB9
+:102D5000700FE54C454B700990FF00E0541F146064
+:102D6000028038E56D20E10280317C01123152226F
+:102D7000E56D20E715E5724571700FE54C454B7028
+:102D80000990FF00E0541F146002800FE56D20E100
+:102D90000280087C00123152228000023059B44077
+:102DA000028006D3500302304F90FF01E090FC35C3
+:102DB000F0E54A90FC36F0E490FC37F0E56A240335
+:102DC000F56AE5693400F569AD4BE54C856A8285A5
+:102DD0006983CDF0A3CDF090FF01E01201C02E0673
+:102DE000012E2C022E56032E80042ECE052F0B060C
+:102DF0002F31072F57082F83092FA90B2FCF0C2F07
+:102E0000DE802FDE810000303CE56D20E7067C058A
+:102E10001225E0227D527E367F0279387AFC7B01D2
+:102E2000740878001203487D087C0012254722E5CB
+:102E30006D20E7067C051225E022E54AB403004038
+:102E400010B40500500BE54A7F00FE7C10123329B8
+:102E5000227D007C0712254722E56D20E7067C05D0
+:102E60001225E022E54AB403004010B40500500BDF
+:102E7000E54A7F00FE7C11123329227D007C071277
+:102E8000254722E56D20E7067C051225E022E54A6C
+:102E9000B405028003D3400AE4FF04FE7C0A123327
+:102EA0002922B401028003D3400AE4FF04FE7C0817
+:102EB00012332922B403004010B40500500BE54A38
+:102EC0007F00FE7C13123329227D007C07122547E8
+:102ED00022E56D20E734D3E5729448E57194005003
+:102EE00006E572457170067C021225E022E54AB4BF
+:102EF0000103B3400BC3B403004009B406005004FF
+:102F00001231FF227C071225E02212257E22E56D78
+:102F100020E71DE54AB403004010B40500500BE55E
+:102F20004A7F00FE7C16123329227C071225E022FC
+:102F300012257E22E56D20E71DE54AB4030040100E
+:102F4000B40500500BE54A7F00FE7C19123329229C
+:102F50007C071225E02212257E22E56D20E72374EE
+:102F60008190FF93F0E54AB403004010B40500508F
+:102F70000BE54A7F00FE7C17123329227C071225BD
+:102F8000E02212257E22E56D20E71DE54AB403000C
+:102F90004010B40500500BE54A7F00FE7C18123348
+:102FA00029227C071225E02212257E22E56D20E7EA
+:102FB0001DE54AB403004010B40500500BE54A7FFC
+:102FC00000FE7C15123329227C071225E0221225EF
+:102FD0007E22E56D20E7067C071225E02212257E81
+:102FE00022E56D30E72090FF00E0541F701090FF45
+:102FF00001E0B48005122575800312257E227D0034
+:103000007C051225472290FF00E0541F60067C05D6
+:103010001225E022D3E5729448E5719400500BC369
+:10302000E5729407E571940050067C031225E022B6
+:10303000E54AB405041231FF227C071225E022E59F
+:103040006D30E7087D007C05122547227C0512259E
+:10305000E022B420028003D34000800012312D22F0
+:1030600075430090FF83E0540FD395434024E5431C
+:1030700024F0F582E434FEF583E0AD6AAE69AF6812
+:1030800012031805430DED70010E8D6A8E698F686D
+:1030900080D1E5437D00FCC3E5709CF570E56F9D34
+:1030A000F56FE570456F6006E490FF83F02290FFB6
+:1030B00082E04408F0E4F56F75704990FC35E0B4A7
+:1030C00005028003D3404090FC36E0F543B405028E
+:1030D0008003D3400AE4FF04FE7C0B12332922B4A0
+:1030E00001028003D3400AE4FF04FE7C0912332965
+:1030F00022B403004010B40500500BE5437F00FEEE
+:103100007C141233292222B480004023B482005060
+:103110001E7C357DFC1217A77D008C6C8D6B90FC9E
+:1031200037E0600512312D80057C001231522222D9
+:1031300090FF83E0547FF090FF82E04408F090FF1E
+:1031400080E04408F02290FF82E04408F090FF8085
+:10315000E04408F0228C237D008C708D6F756A35F9
+:103160007569FC7568011231672290FF83E0547F16
+:10317000F0E5706449456F700122C3E5709408E57D
+:103180006F94004015752108E5217D00FCC3E570B2
+:103190009CF570E56F9DF56F8009857021E4F56FF2
+:1031A000757049752200E522C395215026AD6AAE9F
+:1031B00069AF681201EFFCE52224F8F582E434FEE1
+:1031C000F583ECF005220DED70010E8D6A8E698F8E
+:1031D0006880D3E521547F90FF81F0228C487F00E6
+:1031E000EF24FD4019E4EF75F007A4247FF582E495
+:1031F00034F8F583E065487002D3220F80E28F47F0
+:10320000C32285727085716F90FF82E054F7F09051
+:10321000FF83E0547FF022C000C001C002C006C09E
+:1032200007E5782408F8860653067F7CFF1232896A
+:103230007C007D00E57B6046FF90FD95E0547F6E4D
+:10324000700FC083C082A3E0FDA3E0FCA3157B80C8
+:1032500007A3A3A3DFE68026DF06D082D083801EEB
+:10326000E0F8A3E0F9A3E0FAD082D083E8F0A3E984
+:10327000F0A3EAF0A3C083C082A3A3A380DA123331
+:1032800022D007D006D002D001D0002285A87A75BE
+:10329000A888EC70027C3F8C7922E5782408F876C7
+:1032A0000012337680FBC000C001C002C006C00718
+:1032B000AE047CFF123289E57B6042FF90FD95E011
+:1032C000547F6E700BC083C082A3A3A3157B8007BD
+:1032D000A3A3A3DFEA8026DF06D082D08380D8E0D4
+:1032E000F8A3E0F9A3E0FAD082D083E8F0A3E9F0F4
+:1032F000A3EAF0A3C083C082A3A3A380DA7808085E
+:103300007918097C01E6547F6E700676007700809C
+:103310000608090CBC08EE123322D007D006D002F2
+:10332000D001D00022757900857AA822C0F0C08231
+:10333000C083C3E57B24E8500512337680F4EC604B
+:1033400031903651E493C39C4028C0047CFF123274
+:1033500089D004430480E57B75F003A42495F582AD
+:10336000E434FDF583ECF0EFA3F0EEA3F0057B125F
+:103370003322D083D082D0F022C0047C20D28CD2E1
+:103380008DD504FDD0042275A80075880075B8009D
+:1033900075F00075D000E4F8900000F608B800FB66
+:1033A000020000C3ED940250047D037CE8ECF4FCC1
+:1033B000EDF4FD0CBC00010D8C7F8D7E22C3EC94DE
+:1033C000BCED940250047D077CD0ECF4FCEDF4FDE0
+:1033D0000CBC00010D8C7D8D7C22EC700122C000A4
+:1033E000E5782418F8A604E5782408F8C6547FF692
+:1033F000E630E703D0002212337680F4C28C857C5D
+:103400008C857D8AD28CC0E0C0D0C0F0C082C083E1
+:10341000C000C001C002C003C004C005C006C00790
+:10342000121AFAE5782408F8E66024E5782410F802
+:10343000A681E57875F021A4248DF582E434FCF5AD
+:103440008378B4E58104C398F9E6F008A3D9FA7447
+:10345000082578F8057808E65480700CE578B407FC
+:10346000F3780875780080EFE5782410F88681E518
+:103470007875F021A4248DF582E434FCF58378B4CA
+:10348000E58104C398F9E0F608A3D9FAD007D0067D
+:10349000D005D004D003D002D001D000D083D08298
+:1034A000D0F0D0D0D0E032C0E0C0D0C000C001C069
+:1034B00002C28E857E8D857F8BD28E781979097AAE
+:1034C00007E77004A600800BE6600816E67004E7C4
+:1034D0004480F70809DAEAE579601314F579700E8B
+:1034E000E5782408F87600123322D28CD28DD002EF
+:1034F000D001D000D0D0D0E0327581B3742A90FFD3
+:1035000093F0757F30757EF8757D60757CF01205DF
+:10351000411235AA12175D90FF93E04401F0B2B357
+:103520001235D412338480DA22C0007C01EC2408E6
+:10353000F8E660090CBC08F512337680EED0002264
+:10354000C0F0C082C083C000C006C007ED2410F8E0
+:1035500076C2ED75F021A4248DF582E434FCF58368
+:10356000C082C083A3A3E4780DF0A3D8FCEC547F01
+:1035700075F002A4241DF582E5F03436F583E4935A
+:10358000FE740193F5828E83E493FE740193FFD061
+:1035900083D082EFF0A3EEF0ED2408F8EC4480F63F
+:1035A000D007D006D000D083D082D0F0227578002A
+:1035B000757B007A0879187808760077000809DAB0
+:1035C000F8E478087480447FF674014410F5897536
+:1035D000B808D2ABD2A9227581B3D28ED28CD2AF29
+:1035E000E57B6032FF90FD95E05480602478087997
+:1035F00008E0547FFA7B00E6547FB502027BFF08A7
+:10360000D9F5EB700CEAF0123526AD04AC02123598
+:103610003DA3A3A3DFD212337680C57C017D0022B7
+:10362000050004F404F804EC04E804E404F004FCE9
+:1036300004A804AC04D804DC04A404A404A404E096
+:1036400004C004B804BC04B404CC04C804C404D04A
+:1036500004D404B0190103002200480200480E30CF
+:103660001420C81AD0180A0C050602030102000132
+:10367000CE0181010000C000800060003000180011
+:1036800010000800040002000100081828380C058A
+:10369000100A0200000000000301100A02000000EE
+:1036A0000000FBE0FBF209022700010200A0FA097A
+:1036B00004000003FF00000007058102400000072E
+:1036C00005010240000007058303020001220354A4
+:1036D0000055005300420033003400310030002018
+:1036E00000200020002000200020002000200000FA
+:0336F000000000D7
+:00000001FF
diff --git a/firmware/mts_gsm.fw.ihex b/firmware/mts_gsm.fw.ihex
new file mode 100644
index 0000000..f6ad0cb
--- /dev/null
+++ b/firmware/mts_gsm.fw.ihex
@@ -0,0 +1,867 @@
+:1000000014360002001E021AF9FFFFFFFFFF023341
+:100010001DFFFFFFFFFFFFFFFFFFFFFFFFFF02339B
+:10002000C87581CE90FDE88583A012353CEC4D600B
+:100030007378AB8003760018B89CFA787F800376DB
+:100040000018B865FA78208003760018B820FA788E
+:10005000208003760018B81FFA90FDDDAE83AF82D2
+:1000600090FBF81200AA6005E4F0A380F690FDE88A
+:10007000A88290FDE8A982E8696005E4F20880F7AB
+:100080009001081200B390010C1200B390011012FD
+:1000900000B39001141200D190011A1200D1900106
+:1000A000201200D175D00012341A020126EF6582A9
+:1000B0007003EE658322E493F8740193F97402935C
+:1000C000FE740393F5828E83E869700122E493F64F
+:1000D000A30880F4E493FC740193FD740293FE740E
+:1000E0000393FF740493F8740593F58288831200D8
+:1000F000AA700122E493A3A883A9828C838D82F045
+:10010000A3AC83AD828883898280E32121049B8014
+:1001100080049BACAE049BFDE8049D049DFBF304AE
+:10012000A2049DFBF30502050280FED0F030F00929
+:1001300020F303F68010F7800D30F10920F303F26D
+:100140008004F38001F020F404FCD0E0CC22CCC089
+:10015000E0120163020154BC0005D0F0ACF022C3F0
+:1001600013DCFC02012ABF0009ED258275F001F8BD
+:10017000E622BF010FED2582F582EE3583F583750A
+:10018000F004E022ED258275F002F8E222D083D05F
+:1001900082F5F0C3E493A3C5F095F0C0E0C3D0F0BE
+:1001A000E493A395F04012A3A3C3E5F033500205F6
+:1001B000832582F58250020583740193C0E0E493A5
+:1001C000C0E022D083D082F5F0E4937009740193EB
+:1001D0007004A3A3800C74029365F06005A3A3A32D
+:1001E00080E7740193C0E0E493C0E022120264024D
+:1001F00001FB1202B80201FB1202DC0201FB30E03B
+:100200000720E302E622E72230E10720E302E222B0
+:10021000E32230E202E022E493221202DC02022313
+:100220001202B8020223ABF012022DCBC5F0CB2292
+:1002300030E01020E306E6F5F008E622E7F5F009E5
+:10024000E7192230E11020E306E2F5F008E222E3AC
+:10025000F5F009E3192230E206E0F5F0A3E022E42C
+:1002600093F5F074019322BB0003740922BB0107CC
+:1002700089828A83740422BB020789828A8374106C
+:1002800022740A22020284BB0007E92582F8740165
+:1002900022BB010DE92582F582EA3583F5837404DA
+:1002A00022BB020DE92582F582EA3583F5837410BD
+:1002B00022E92582F87402220202B8BF0005EDF897
+:1002C000740122BF01078D828E83740422BF02074E
+:1002D0008D828E83741022EDF87402220202DCBF3C
+:1002E0000007ED2582F8740122BF010DED2582F58E
+:1002F00082EE3583F583740422BF020DED2582F56D
+:1003000082EE3583F583741022ED2582F874022283
+:10031000020310C0E0120264020328C0E01202B817
+:10032000020328C0E01202DC02032830E00B20E3C5
+:1003300004D0E0F622D0E0F72230E10B20E304D035
+:10034000E0F222D0E0F322D0E0F022C9CDC9CACE3B
+:10035000CACBCFCB12035BEDF9EEFAEFFB22BB0069
+:100360002FBF000AFAEDF8E7F60809DAFA22BF0112
+:10037000128D828E83F802037809A3E7F0D8FA225F
+:10038000020383FAEDF8E7F20809DAFA2202038D94
+:10039000BB014DBF001489828A83F9EDF802039FE7
+:1003A00008A3E0F6D9FA220203B0BF01228D828EA3
+:1003B00083FB08C9C582C9CAC583CAE0A3C9C5826F
+:1003C000C9CAC583CAF0A3DBEAD8E8220203D38DE9
+:1003D000828E83F9EDF8E0F208A3D9FA220203DD58
+:1003E000BB024DBF001289828A83F9EDF80203EF48
+:1003F00008A3E493F6D9F922BF01238D828E83FBF3
+:1004000008C9C582C9CAC583CAE493A3C9C582C93C
+:10041000CAC583CAF0A3DBE9D8E722020422898295
+:100420008A83F9EDF8E493F208A3D9F922020433A0
+:10043000BF000DFAEDF8E3F60809DAFA2202043DEE
+:10044000BF01128D828E83F802044A09A3E3F0D81B
+:10045000FA22020455FAEDF8E3F20809DAFA220268
+:10046000045FE6FB08E6FA08E6F904F618700106F0
+:1004700022E6FF08E6FE08E6FD22EFF0A3EEF0A379
+:10048000EDF022EBF0A3EAF0A3E9F022E0FFA3E015
+:10049000FEA3E0FD22E0FBA3E0FAA3E0F9220000C6
+:1004A00000000000000502006105710026059800AB
+:1004B000330A0900610A750066154400610CF900F1
+:1004C0006109A9006109E000610DC000610BF10044
+:1004D000610A1C00610A510061173C0033174F008C
+:1004E000341E1400431EBF0044202C0044201A0078
+:1004F000471EE600471F8B004D1FDC004F1F080002
+:100500005832A800617CCC7DFF121CC52290FFFCF4
+:10051000E020E72DC2AFAE59AF58755A20E55A1406
+:10052000C55A6019E4FE7F05EE4FCE24FFCECF34CE
+:10053000FFCF6007E490FF92F080ED80E08E598F4E
+:10054000582212050A7D077CB71232C47D0F7C6EDB
+:100550001232DE789D7A06E4F608DAFC7A06120595
+:10056000CD7C03120E55122168E4FEFF7C0F12327F
+:100570004DD2A822123138E490FC38F090FFF0E020
+:1005800030E408740190FC39F08005E490FC39F007
+:100590007D0A7C001225461231BB2212313890FCB4
+:1005A00039E014700E90FFF0E04410F07C0012254A
+:1005B000DF801990FC39E0700E90FFF0E054EFF00E
+:1005C0007C001225DF80057C171225DF1231BB224B
+:1005D00090FFF0E054ABF090FFF0E04420F0228C6C
+:1005E000378D367882EDF608ECF6EDFEECFD7F01F6
+:1005F0009000051201F57880F67882E6FD08E6FCA9
+:10060000EDFEECFD7F019000041201F5540FFC7D1E
+:100610008012176D7880E6700DAD3AAE39AF38E4D0
+:100620001203187C082290FFF0E054FEF090FFF0D7
+:10063000E054FDF0801E7882E6FD08E6FCEDFEEC5D
+:10064000FD7F0190000812021725E0440190FFF39E
+:10065000F00206D97882E6FD08E6FCEDFEECFD7FAF
+:100660000190000612021754FE90FFF3F0802B78E1
+:1006700082E6FD08E6FCEDFEECFD7F01900008122D
+:100680000217FAEB90FFF1F01208C8400DAD3AAE38
+:1006900039AF38E41203187C18227882E6FD08E6A8
+:1006A000FCEDFEECFD7F0190000812021790FFF1B7
+:1006B000F01208C8400DAD3AAE39AF38E412031855
+:1006C0007C18227882E6FD08E6FCEDFEECFD7F0159
+:1006D000900006120217440190FFF3F07883E6249D
+:1006E00003F618E63400F67880E624FE500990FF01
+:1006F000F0E054FDF0800790FFF0E04402F0E49059
+:10070000FFF1F0788176007880E624FFFCE434FF86
+:10071000FD7881E67F00FEECD39EEF6480CD64809F
+:100720009D402F1208AD400F7881E6AD3AAE39AF4B
+:10073000381203187C182290FFF2E0FC788286833E
+:10074000088682ECF0788106A37882A68308A682C8
+:1007500080B51208AD400F7881E6AD3AAE39AF38BA
+:100760001203187C182290FFF2E0FC78828683083E
+:100770008682ECF07880E6AD3AAE39AF38120318D5
+:100780007C00228C378D367882EDF608ECF6EDFE93
+:10079000ECFD7F019000051201F57881F67882E684
+:1007A000FD08E6FCEDFEECFD7F019000041201F572
+:1007B000540FFC7D8112176D7881E670037C08224E
+:1007C00090FFF0E054FEF090FFF0E054FDF0801B4D
+:1007D0007882E6FD08E6FCEDFEECFD7F0190000866
+:1007E00012021725E090FFF3F0805B7882E6FD08A7
+:1007F000E6FCEDFEECFD7F0190000612021754FEB0
+:1008000090FFF3F080217882E6FD08E6FCEDFEEC37
+:10081000FD7F01900008120217FAEB90FFF1F01231
+:1008200008C840037C18227882E6FD08E6FCEDFE4D
+:10083000ECFD7F0190000812021790FFF1F0120802
+:10084000C840037C18227883E6240AF618E63400B0
+:10085000F6788076007881E624FFFCE434FFFD78AA
+:1008600080E67F00FEECD39EEF6480CD64809D40E7
+:100870002178828683088682E090FFF1F01208C812
+:1008800040037C1822788006788306E618700106FB
+:1008900080C390FFF0E04401F0788286830886826E
+:1008A000E090FFF1F01208C840037C18227C00227F
+:1008B00090FFF0E020E71290FFF0E030E50990FFB4
+:1008C000F0E04420F0C32280E7D32290FFF0E02044
+:1008D000E31290FFF0E030E50990FFF0E04420F0F3
+:1008E000C32280E7D3228C428D417C00ED54F0FD81
+:1008F000EC7003ED64307005753E038003753E04B3
+:10090000AC3E120F72758300858340E541540FF5AC
+:100910003FE5407004E53F64037035E53E24FD7516
+:10092000F00AA42402F582E434FCF583E030E60505
+:100930001210598019E53E249DF8E654FBF678A97B
+:10094000E62405F58218E63400F583740FF080592B
+:10095000E5407004E53F64047048E53E24FD75F011
+:100960000AA42402F582E434FCF583E030E507AC08
+:1009700042AD41121C5AE54230E21578ADE630E056
+:100980000F78ADE630E109E4FF04FE7C0412324D3D
+:1009900078A9E62406F58218E63400F583740FF092
+:1009A0008007E4FC7DEE121C5AC203221231381279
+:1009B0000F7278A9E62406F58218E63400F583E084
+:1009C00090FC38F078A9E62405F58218E63400F5A5
+:1009D00083E090FC39F0C2037D027C0012254612B0
+:1009E00031BB221231387895ECF6EC249DF8E630D4
+:1009F000E1077C131225DF800F90FC39E0FD78952C
+:100A0000E6FC1213EF1225DF1231BB2212313878C7
+:100A100095ECF67D00120F121225DF1231BB221267
+:100A200031387895ECF6EC249DF8E630E2077C133B
+:100A30001225DF801B7895E6249DF8E620E1077CEF
+:100A4000121225DF800A7895E6FC1214131225DFB6
+:100A50001231BB221231387895ECF6EC249DF8E681
+:100A600020E2077C111225DF800A7895E6FC12153A
+:100A7000141225DF1231BB221231387895ECF612B0
+:100A80000F7278A9E62409F58218E63400F583E0B0
+:100A900090FC3FF078A9E6240AF58218E63400F5C8
+:100AA00083E090FC40F078A9E62403F58218E63450
+:100AB00000F583E0FC78A9E62404F58218E634000A
+:100AC000F583E0F56278A9E62402F58218E63400A1
+:100AD000F583E0F5638C61E4EC333354017895F6EB
+:100AE0006008E56230E1037895067895E690FC4170
+:100AF000F078A7E62402F58218E63400F583E0FDDD
+:100B0000A3E0540CFCED54E68C65F564E56130E53A
+:100B100003436501E56220E50EE561547F7008E559
+:100B20006120E703436502E56130E303436510E5B7
+:100B30006130E203436520E561540360034365408F
+:100B4000E56130E103436580E56130E4034364011E
+:100B5000E56130E603436408E56220E40EE5615494
+:100B60007F7008E56120E7034364105365FB53641D
+:100B7000F9AD64E56590FC3ACDF0A3CDF0E56330C6
+:100B8000E30DE5635430C4540F90FC3DF08005E460
+:100B900090FC3DF0E563540390FC3CF0E5635404A5
+:100BA000C31390FC3EF090FC3CE0700E7D357EFC63
+:100BB0007F01740190000912014B78A9E62408F521
+:100BC0008218E63400F583E07C00FD78A9E624076E
+:100BD000F58218E63400F583E07F004CFEEF4D907F
+:100BE000FC38F0A3CEF0CEC2037D0A7C001225466D
+:100BF0001231BB221231387895ECF6789A760108DA
+:100C000076FC0876387897760C789A12046E120281
+:100C10001D7898CBF6CB08F67F00EF24EA401FE45E
+:100C2000EF25E090357EFD93CD04937899667003AF
+:100C3000ED186670067897760080030F80DC789652
+:100C4000EFF6789A12046E9000021202177898CB91
+:100C5000F6CB08F65404CB54064B60047897760B19
+:100C60007899E630E313789A12046E900005120129
+:100C7000F524FB50047897760D7899E654C07D00F2
+:100C800064C04D70047897760B789A12046E9000C9
+:100C9000041201F524FC50047897760F789A120418
+:100CA0006E9000061201F524FD50047897760E78B8
+:100CB0009A12046E9000091201F524FD50047897F1
+:100CC000760A7897E6702A7895E6FC120F72789A81
+:100CD00012046E78A7E6F978A6E6FA7B01740A7822
+:100CE00000120348C2037895E6FC1211157897ECC0
+:100CF000F67897E6FC1225DF1231BB2212313878E4
+:100D000095ECF6120F727895E624FD75F00AA4248E
+:100D100014F582E434FCF583AC82AD8378A6868337
+:100D2000088682ECF9EDFA7B0A78011203B0C2035F
+:100D30007895E6FC1211151231BB228D2B8C2AED11
+:100D400060407527017529487528FFE52A24FDFCB8
+:100D5000E434FFFDEC7C0325E0CD33CDDCF9FCE58C
+:100D6000292CF529E5283DF528AD29AE28AF2774B3
+:100D7000809000061203207480900002120320125B
+:100D80000FC5E52B14603B7527017529087528FFF1
+:100D9000E52A24FDFCE434FFFDEC7C0325E0CD33A3
+:100DA000CDDCF9FCE5292CF529E5283DF528AD2910
+:100DB000AE28AF27E4900006120320E49000021250
+:100DC0000320221231387895ECF6EC249DF8E630B9
+:100DD000E2097895E6FC121514D2007895E6FC122B
+:100DE0000F727896760090FC39E030E704789676BA
+:100DF000017896E6FD7895E6FC120D38C2033000C6
+:100E0000077895E6FC1214137C001225DF1231BB23
+:100E10002278A9E62404F58218E63400F583E0443C
+:100E200001F078A9E62404F58218E63400F583E0A1
+:100E300030E00280ED78A9E6240BF58218E6340054
+:100E4000F583E054F8F078A9E62402F58218E63438
+:100E500000F583E04480F022C2038C58120F7278B0
+:100E6000A6868308868279AF7A357B0A78011203D9
+:100E7000FE120E0EAC587D02120D38C203AC581291
+:100E80001115228D538E528F518C50120F72754F47
+:100E90000078A9E62405F58218E63400F583E02001
+:100EA000E41FE54F24F64019054FC2037C181232A7
+:100EB000FB90FF93E04401F0B2B3AC50120F72808C
+:100EC000D078A9E62405F58218E63400F583E02001
+:100ED000E405C2037C022278A9E62405F58218E61F
+:100EE0003400F583E0540F601678A9E62405F582F6
+:100EF00018E63400F583E0540FF0C2037C01227839
+:100F0000A88683088682E0AD53AE52AF5112031813
+:100F1000C2037C00228D318C30121514E531600F34
+:100F2000E530B4030A7C0112250E7C8112250EAC3B
+:100F300030120F72E531601A78AA8683088682E043
+:100F400054E7F0A3A3A3A3E054E7F0AC307D021272
+:100F50000D3878A6868308868279B97A357B0A7837
+:100F6000011203FEC203E530249DF8E654FDF6AC01
+:100F700030121115228C2630030512329A80F87C2B
+:100F80000A1231ADD203E52624FD78A3F670077866
+:100F9000AA76FF0876E078A3E67D007C0425E0CD04
+:100FA00033CDDCF9FC24A078A9F6ED34FF18F678EF
+:100FB000A3E675F00AA42400FCE434FCFD78A6ED59
+:100FC000F608ECF61232462278A9E62402F58218D9
+:100FD000E63400F583E030E72278A9E62402F582C2
+:100FE00018E63400F583E0547FF078A9E62402F592
+:100FF0008218E63400F583E04480F02278AA8683E4
+:10100000088682E0547FF0AD83E5822404FCE43D51
+:101010008C82F583E0547FF078A9E6240BF58218E2
+:10102000E63400F583E054F8F078ABE62401F5826D
+:1010300018E63400F583E04403F078ABE62405F5C8
+:101040008218E63400F583E04403F078A9E624052D
+:10105000F58218E63400F583740FF02278AA8683AF
+:10106000088682E0543FF0AD83E5822404FCE43D31
+:101070008C82F583E0543FF078A3E624A4F8E6FCE4
+:1010800078ABE62401F58218E63400F583ECF078BD
+:10109000A3E624A4F8E6FC78ABE62405F58218E67E
+:1010A0003400F583ECF078A9E6240BF58218E634D9
+:1010B00000F583E054FB4402F52678A7E62402F508
+:1010C0008218E63400F583E030E50343260178A971
+:1010D000E62405F58218E63400F583E030E00312DB
+:1010E0000FC5E526FC78A9E6240BF58218E6340046
+:1010F000F583ECF078A9E62405F58218E63400F5CE
+:1011000083740FF078AA8683088682E04480F0A377
+:10111000A3A3A3E04480F0228C2A120F7278A7E6E2
+:101120002408F58218E63400F583E0FC78A9E6246B
+:101130000AF58218E63400F583ECF078A7E6240778
+:10114000F58218E63400F583E0FC78A9E62409F579
+:101150008218E63400F583ECF078A6868308868250
+:10116000E0FDA3E0FCEDFE78A9E62408F58218E690
+:101170003400F583EEF0ECFE78A9E62407F582183A
+:10118000E63400F583EEF08C298D28C3EC9405ED50
+:10119000940C400575277C8033D3E5299401E5281C
+:1011A0009403400575273C8023D3E5299481E528E5
+:1011B000940140057527188013D3E5299460E5282C
+:1011C0009400400575270C8003752708AF27E4EFCE
+:1011D000547C4483FF8F27E527FC78ABE62401F598
+:1011E0008218E63400F583ECF0E527FC78ABE624C2
+:1011F00005F58218E63400F583ECF0E527FC78A3CA
+:10120000E624A4F8ECF678A9E62402F58218E63480
+:1012100000F583E0F52778A7E62402F58218E63486
+:1012200000F583A3E030E3175327C778A7E624052A
+:10123000F58218E63400F583E09035AA93422778CA
+:10124000A7E62402F58218E63400F583E030E705CE
+:1012500043274080035327BF5327FB78A7E6240684
+:10126000F58218E63400F583E06003432704532732
+:10127000FC78A7E62404F58218E63400F583E04202
+:1012800027432780E527FC78A9E62402F58218E6A3
+:101290003400F583ECF078A9E62404F58218E634EE
+:1012A00000F583E0F52778A7E62402F58218E634F6
+:1012B00000F583A3E030E1055327DF8003432720B7
+:1012C00078A7E62402F58218E63400F583E030E4DE
+:1012D000055327EF800343271078A7E62409F582FA
+:1012E00018E63400F583E0B40203432702E527FC47
+:1012F00078A9E62404F58218E63400F583ECF0784A
+:10130000A9E62403F58218E63400F583E0F5277892
+:10131000A7E62409F58218E63400F583E07005534A
+:10132000277F800343278078A7E62402F58218E60A
+:101330003400F583A3E030E00543272080035327E2
+:10134000DF78A7E62402F58218E63400F583E03062
+:10135000E30543274080035327BF78A7E62402F51F
+:101360008218E63400F583E030E00543271080035F
+:101370005327EF78A7E62402F58218E63400F583B8
+:10138000A3E030E40543270880035327F778A7E656
+:101390002402F58218E63400F583A3E030E5054326
+:1013A000270480035327FB78A7E62402F58218E67A
+:1013B0003400F583A3E030E605432701800353277B
+:1013C000FE78A7E62402F58218E63400F583A3E050
+:1013D00030E70543270280035327FDE527FC78A962
+:1013E000E62403F58218E63400F583ECF0C2037CB2
+:1013F00000228D278C26ED54031460037C1022E517
+:1014000027547C24FC40037C0B22E526249DF8E62F
+:101410004402F67C00228C30120F72E530249DF8D5
+:10142000E620E24FAC307D02120D38E53024FE4458
+:1014300028FC78AA8683088682ECF0AF83E58224B4
+:1014400004FEE43FFFEC8E828F83F07C038C2CE55E
+:101450002CFC78ABE62401F58218E63400F583EC29
+:10146000F0E52CFC78ABE62405F58218E63400F5AF
+:1014700083ECF0752D01752F48752EFFE53024FDA6
+:10148000FCE434FFFDEC7C0325E0CD33CDDCF9FC3E
+:10149000E52F2CF52FE52E3DF52E78ABE62404F54F
+:1014A0008218E63400F583E054E7F52CAD2FAE2E1C
+:1014B000AF2DE4900002120320E4900006120320F6
+:1014C0001201EF30E503432C10E52CFC78ABE62449
+:1014D00004F58218E63400F583ECF012105978A96F
+:1014E000E62406F58218E63400F583E0C203FCE545
+:1014F00030249DF8E64404F68C2CE530540FC45497
+:10150000F07E00FFEEEF44047D00FFEC4EFCED4F5B
+:10151000FD121CC57C00228C2F120F72120FF9785D
+:10152000AA8683088682E05408F0A3A3A3A3E0540C
+:1015300008F0AC2F7D02120D38C203E52F249DF870
+:10154000E654FBF67C00221231387896ECF6EC2457
+:101550009DF8E630E10A7D007C131225461231BB6E
+:101560007896E6249DF8E64401F67896E6FC120F9C
+:10157000727896E624FD75F00AA42414F582E4340A
+:10158000FCF58378A6E6FA08E6F97B0A78011203EF
+:10159000B078A6868308868279B97A357B0A780185
+:1015A0001203FE120FC5C2037896E6FC12111578DD
+:1015B00095ECF6EC600A7D007C081225461231BBE2
+:1015C0007896E6FC120F7278A9E62404F58218E6F4
+:1015D0003400F583E0441054DFFC78A9E62404F5D8
+:1015E0008218E63400F583ECF07895ECF6C2037CC3
+:1015F000C81232FB7896E6FC120F7278A9E6240432
+:10160000F58218E63400F583E054EFF0C2037CC89D
+:101610001232FB7896E6FC120F7278A9E62404F5E4
+:101620008218E63400F583E04410F0C2037CC8124F
+:1016300032FB7896E6FC120F7278A9E62404F58254
+:1016400018E63400F583E04420F0C2037CF0123247
+:10165000FB7896E6FC120F7278A9E62405F582184D
+:10166000E63400F583E030E415C2037896E64410D2
+:101670007F00FE7C0712324D1231BB02173B78A966
+:10168000E62404F58218E63400F583E054CFF0C276
+:10169000037CC81232FB7896E6FC120F7278A9E63A
+:1016A0002404F58218E63400F583E04430F0C203E8
+:1016B0007CF01232FB7896E6FC120F7278A9E624D1
+:1016C00005F58218E63400F583E030E414C20378AF
+:1016D00096E644107F00FE7C0712324D1231BB802B
+:1016E0005D78A9E62404F58218E63400F583E05419
+:1016F000EFF078A9E62404F58218E63400F583E0DB
+:1017000054DFF07896E624FD75F00AA42414F582DF
+:10171000E434FCF583AC82AD8378A68683088682A8
+:10172000ECF9EDFA7B0A78011203B0C2037896E671
+:10173000FC1211157D007C0B1225461231BB2212C2
+:101740003138E490FC39F07D027C001225461231DC
+:10175000BB221231387C001225DF1231BB22743CCF
+:1017600090FBE0F0743E90FBE0F0E490FC28F02267
+:101770008D358C34ECB401028003D340028028B450
+:1017800002028003D34008A835E625E0F68018B4AD
+:1017900004028003D3400AA835E625E025E0F68060
+:1017A00006A83576008000228C3C8D3BEDFEECFDDA
+:1017B0007F0175660675670090FC29120477120197
+:1017C000EFB480028006D3500302186E90FC2912F9
+:1017D00004899000031201F554F0B430028003D361
+:1017E000405F90FC29120489900008120217FAFD4C
+:1017F000EBFE7F0190FC2C120477EECD9035C3FCFC
+:10180000E493FF740193FEF9EFFA7B01EAFFE9FE2E
+:10181000ECC39EED9F40259035C5E493FD74019384
+:10182000FCEDFEECFD7F01EECDFC90FC2EE0D39CA8
+:1018300090FC2DE09D5005756680803312198C80D8
+:101840002EB460028003D3400BAC3CAD3B1207804A
+:101850008C66801BB41003B34010C3B42003B340A4
+:1018600009C3B440028003D34000756681800080C4
+:1018700075B481028003D3406B90FC2912048990D7
+:1018800000031201F554F0B430028003D3401D90E0
+:10189000FC29120489900008120217FAFDEBFE7F62
+:1018A0000190FC2F1204771218F68036B460028083
+:1018B00003D34013753A67E4F539F538AC3CAD3BDA
+:1018C0001205DC8C66801BB41003B34010C3B42037
+:1018D00003B34009C3B440028003D340007566815E
+:1018E000800080028000E566FC90FC29120489ECEF
+:1018F000900002120320AC672290FC291204899008
+:1019000000041201F5600474018001E4A2E0920178
+:1019100090FC29120489ED2403FD50010E90FC2C4B
+:1019200012047790FC291204899000051201F5F544
+:10193000679000041201F5540FFC7D6712176DE5E6
+:10194000677004756608227566007884760078846E
+:10195000E6C39567503890FC2F1204891201EFFC02
+:1019600090FC2C120489EC12031830010E90FC310B
+:10197000E004F090FC307003E004F078840690FC02
+:101980002EE004F090FC2D7003E004F080C0229063
+:10199000FC2AE0FDA3E0FCEDFEECFD7F01ED240A56
+:1019A000FD50010E90FC3212047790FC291204893C
+:1019B0009000041201F5540FB401028003D34017C4
+:1019C00090FC321204890DED70010E90FC2F120470
+:1019D0007778887601804EB402028003D340199054
+:1019E000FC32120489ED2402FD50010E90FC2F12EE
+:1019F000047778887602802DB404028003D34019DE
+:101A000090FC32120489ED2404FD50010E90FC2F4D
+:101A100012047778887604800CB400028003D340E7
+:101A2000007566082290FC291204899000051201B5
+:101A3000F5F567788576007885E6C39567400302FB
+:101A40001AF4788676007886E6C378889650769081
+:101A5000FC2C1204891201EFFC90FC321204921249
+:101A600001E9F45CFC1201E9F890FC2F120489E80A
+:101A7000C0E01201EFC8D0E0C8584CFC90FC2C121A
+:101A80000489EC1203187887ECF690FC31E004F03E
+:101A900090FC307003E004F009E970010A90FC3218
+:101AA00012048090FC291204899000041201F53080
+:101AB000E40E90FC2EE004F090FC2D7003E004F0A6
+:101AC00078860680817888E6FDE4FEFFEECDFC9006
+:101AD000FC31E02CF090FC30E03DF07888E6FDE44D
+:101AE000FEFFEECDFC90FC34E02CF090FC33E03DAA
+:101AF000F0788506021A347566002222C0E0C0F034
+:101B0000C082C083C0D0E8C0E0E9C0E0EAC0E0EB3A
+:101B1000C0E0ECC0E0EDC0E0EEC0E0EFC0E090FF60
+:101B200092E01201C01B47301B47321B56381B681E
+:101B30003A1B7A3E1B92441B86461B9E501BE0526A
+:101B40001BBF541C015600001C2290FF92E07F0036
+:101B5000FE7C0112324D021C32E4FF04FE7C0312B3
+:101B6000324D742090FFFEF0021C32E4FF04FE7C34
+:101B70000212324D744090FFFEF0021C32E4FF046A
+:101B8000FE7C0412324D021C32E4FF04FE7C05127E
+:101B9000324D021C32E4FF04FE7C0612324D021C60
+:101BA0003290FFA5E07D0090FBF8CDF0A3CDF09042
+:101BB000FBF9E0FCF58390FBF8E04433FD121CC513
+:101BC000807390FFB5E07D0090FBFACDF0A3CDF0DF
+:101BD00090FBFBE0FCF58390FBFAE04443FD121C14
+:101BE000C5805290FFA6E07D0090FBFCCDF0A3CD18
+:101BF000F090FBFDE0FCF58390FBFCE04434FD122B
+:101C00001CC5803190FFB6E07D0090FBFECDF0A3B7
+:101C1000CDF090FBFFE0FCF58390FBFEE04444FD3B
+:101C2000121CC5801090FF92E07D00FCED44AAFDDF
+:101C3000121CC58000E490FF92F0D0E0FFD0E0FEDF
+:101C4000D0E0FDD0E0FCD0E0FBD0E0FAD0E0F9D06D
+:101C5000E0F8D0D0D083D082D0F0D0E0320581053A
+:101C60008105810581A881181818EDF608ECF69019
+:101C7000FF5AE020E70280F790FF59E07D00A8813D
+:101C800018CDF6CD08F67D03A881E618FCE6CC2534
+:101C9000E0CC33CCDDF9CCF6CC08F6A88118E644CC
+:101CA000F8F6A881181818E6FD08E6FCA881188641
+:101CB00083088682EDF0A3ECF0740290FF5AF015D1
+:101CC0008115811581158122E5812405F581E4A81E
+:101CD0008118F6A88118181818EDF608ECF690FB94
+:101CE000F5E024F85003021DE6E4A8811818F6A8D0
+:101CF0008118E6FEA88118181818E6FD08E6FC7F92
+:101D000000EF24F8404DE4EF25E0247DF582E43433
+:101D1000FCF583E0FBA3E06C7003FAEB6D700974D3
+:101D200001A8811818F6802BE4EF25E0247DF582C8
+:101D3000E434FCF5837A00E054F0CCF8CCCDF9CD56
+:101D4000FB7800E954F0F9EA687002EB6970010E63
+:101D50000F80AEA88118EEF6A88118181818EDF6B5
+:101D600008ECF6A881EFF6A8811818E67079A8812A
+:101D700018E624F74071A88118181818E6540FA81F
+:101D800081F664046017A881E664036010A88118D6
+:101D9000181818E6FD08E6FC121C5A804A7C0A1244
+:101DA00031ADA88118181818E6FD08E6FC90FBF480
+:101DB000E025E0247DF582E434FCF583EDF0A3EC2E
+:101DC000F090FBF4E0FFE4EF045407FF90FBF4F025
+:101DD00090FBF5E004F012324690FBF6E07008E468
+:101DE000FEFF7C0F12324D802790FBF7E004F05489
+:101DF0003F701D90FBF7E044FE7D00FC90FBF4E09B
+:101E000025E0247DF582E434FCF583EDF0A3ECF0CD
+:101E1000E58124FBF58122788B7600788C7600743E
+:101E20000190FBF6F012313890FBF5E060577C0A28
+:101E30001231AD90FBF3E025E0247DF582E434FC23
+:101E4000F583E0FDA3E0FC90FBF3E025E0247DF5C5
+:101E500082E434FCF583E4F0A3F090FBF3E0FFE4CC
+:101E6000EF045407FF90FBF3F090FBF5E014F078DB
+:101E700089EDF608ECF61232467889E6FD08E6FCB4
+:101E80001208E380A312329A90FF93E04401F0B26B
+:101E9000B3788B06B60011788B7600788CE6F40464
+:101EA00004A2E092B4788CF6021E25E490FBF6F0D2
+:101EB00090FBF5E07D00FCED44CFFD121C5A123181
+:101EC000BB22123138E5706449456F601590FF837D
+:101ED000E0540F7D00D39570ED956F5005122F8162
+:101EE00080031230511231BB22123138E57064493F
+:101EF000456F600512308B800E90FF80E04408F043
+:101F000090FF83E0547FF01231BB221231388C54A1
+:101F1000EC54F0B41015756A357569FC756801E507
+:101F20006A2403F56AE5693400F569E4F557F55666
+:101F3000E556C394015027E554540FFCAD6AAE69D1
+:101F4000AF68120E808C55EC60028012056AE56A5B
+:101F5000700205690557E5577002055680D2E554B1
+:101F6000540F249DF8E654FEF6E554540F7F00FE0E
+:101F70007C1212324DE5551470097D007C09122542
+:101F8000468007AD577C001225461231BB22123124
+:101F90003890FFFCE04402F090FF00E030E713903F
+:101FA000FF83E04480F0436D8090FFFCE04401F04B
+:101FB000801190FF82E04408F0536D7F90FFFCE0B9
+:101FC00054FEF090FF81E04480F01225F990FFFE6E
+:101FD000E04405F090FFFCE054FDF01231BB22120A
+:101FE00031387C011232FB78ADE64402F674FEFC17
+:101FF00004FD121CC590FF5AE030E70280F7E4F5BB
+:102000004E754D10AC4EAD4DE54E154E7002154D52
+:10201000EC4D600280EE4387011231BB2212313851
+:102020007C021231C778ADE654FDF61231BB2212A4
+:10203000313878ADE630E02C78ADE630E12678AD89
+:10204000E6FCF58318E644F0FD121C5A90FFFCE014
+:102050004420F07C021232FB78ADE654FDF6741A8F
+:1020600090FFFEF078ADE6FCF58318E644F1FD1232
+:102070001C5A1231BB22756D0090FFFFE0600343D4
+:102080006D01756E00E4F56CF56BE4F56F757049E4
+:10209000748490FF82F0748490FF80F0748090FFCD
+:1020A00058F0748090FF5AF0AD46AF457E00EE24A4
+:1020B000FE5003022142E4EE75F007A4247FF5826E
+:1020C000E434F8F583E0FFE4EF5480FDE4EF540FCF
+:1020D00014FFED6038E4EF75F008A42448F582E4BD
+:1020E00034FFF5837490F0E4EF75F008A4244AF50A
+:1020F00082E434FFF5837480F0E4EF75F008A424E3
+:102100004EF582E434FFF5837480F08034E4EF759B
+:10211000F008A42408F582E434FFF5837490F0E419
+:10212000EF75F008A4240AF582E434FFF583E4F0A7
+:10213000E4EF75F008A4240EF582E434FFF583E49F
+:10214000F00E0220AB8D468E448F45747F90FFFDCC
+:10215000F0749090FFFCF0228C58EC24F65006E5C9
+:10216000582437FC22E5582430FC22D2B0122543F3
+:10217000EC700302227E755C03AE5B7F00E55C15AC
+:102180005C6480247F5035EF2400F582E434FBF555
+:1021900083E0FE24FE501EEF7D00FCE4FB7474C35C
+:1021A0009CFAEB9DFBEE7D00FCEAC39CED6480CBCA
+:1021B00064809B50028005EF2EFF80C18E5B8F5A9A
+:1021C000E55C6480247F500302227EE55A248E5011
+:1021D0000302227E855A5D755B00AE5AAF5B903577
+:1021E000EEE493F55CE55C155C6480247F5018EEAA
+:1021F0002400F582E434FBF583E0FCEF9035EE93A8
+:102200006C70040E0F80DE8E5A8F5BE55C64802458
+:102210007F406E755E017560E8755FFFE55D2402C5
+:10222000F55A755C07E55C334057AD60AE5FAF5E55
+:10223000E55CF5823395E0F5831201F5C4540FFC9B
+:10224000122155E55A2400F582E434FBF583ECF0C5
+:10225000055A055AAD60AE5FAF5EE55CF582339519
+:10226000E0F5831201F5540FFC122155E55A2400C4
+:10227000F582E434FBF583ECF0055A055A155C80D1
+:10228000A4740290F851F090F86B79C77A357B27E7
+:1022900078011203FE756A357569FC756801E49072
+:1022A000FF83F0748090FF81F0755902E55975F055
+:1022B00007A4247FF582E434F8F583E0788FF6FCF8
+:1022C000540F14FC788FECF6E55975F007A42481BF
+:1022D000F582E434F8F583E0789276FD0876E8FC40
+:1022E000788FE675F008A42448F582E434FFF5837E
+:1022F000E4F0788FE675F008A4244FF582E434FF0B
+:10230000F583ECF07892E6FF08E67E03CFC313CFA7
+:1023100013DEF9FE788FE675F008A42449F582E40F
+:1023200034FFF583EEF0788FE675F008A4244AF5C3
+:1023300082E434FFF5837480F07890ECF67D0078C9
+:1023400093E62CF618E63DF67892E6FD08E67C0367
+:10235000CDC313CD13DCF9FC788FE675F008A42407
+:102360004DF582E434FFF583ECF0788FE675F008E4
+:10237000A4244EF582E434FFF583E4F07892E6FD80
+:1023800008E6FC788FE6FF7E00EE24FE5003022470
+:10239000FDE4EE75F007A4247FF582E434F8F583BC
+:1023A000E0FFE4EF5480FAE4EF540F14FFE4EE751D
+:1023B000F007A42481F582E434F8F583E07890F600
+:1023C000E4EE1313548024F0F8E434FDF9E8FCE95A
+:1023D000FD8A5AEA700302246AE4EF75F008A42427
+:1023E00048F582E434FFF583E4F07890E6FAE4EF10
+:1023F00075F008A4244FF582E434FFF583EAF0ED8C
+:10240000FBEC7A03CBC313CB13DAF9FAE4EF75F0E4
+:1024100008A42449F582E434FFF583EAF07890E6D5
+:102420007B00FAEC2AFCED3BFDFBEC7A03CBC313FB
+:10243000CB13DAF9FAE4EF75F008A4244DF582E441
+:1024400034FFF583EAF0E4EF75F008A4244AF5823E
+:10245000E434FFF5837480F0E4EF75F008A4244EB3
+:10246000F582E434FFF5837480F00224F9E4EF751B
+:10247000F008A42408F582E434FFF583E4F07890B2
+:10248000E6FAE4EF75F008A4240FF582E434FFF5D2
+:1024900083EAF0EDFBEC7A03CBC313CB13DAF9FA42
+:1024A000E4EF75F008A42409F582E434FFF583EA2B
+:1024B000F07890E67B00FAEC2AFCED3BFDFBEC7A31
+:1024C00003CBC313CB13DAF9FAE4EF75F008A424B5
+:1024D0000DF582E434FFF583EAF0E4EF75F008A42B
+:1024E000240AF582E434FFF583E4F0E4EF75F008A4
+:1024F000A4240EF582E434FFF583E4F00E02238673
+:102500008E597892EDF608ECF6788FEFF61220737C
+:10251000228C26EC30E718E526540F1475F008A439
+:102520002448F582E434FFF583E054DFF08016E5BB
+:1025300026540F1475F008A42408F582E434FFF53E
+:1025400083E054DFF0227C0022EC90FC37F08C24F6
+:10255000ED2403F5257D00D39572ED95714003853B
+:102560007225E52524B75009752503740290FC37C0
+:10257000F0AC2512307622E4F56CF56B12257D2245
+:1025800090FC35E06573600E740490FC37F0E4F560
+:102590006B756C0380467D73E4FEFF79357AFC7BB6
+:1025A0000174057800120348E56C2403F56CE56BB3
+:1025B0003400F56BE56CD39572E56B95714006853B
+:1025C000726C85716BD3E56C9448E56B9400400C9C
+:1025D000740290FC37F0E4F56B756C03AC6C123050
+:1025E0007622EC90FC37F0E4F56CF56B8C32EC6005
+:1025F0000512306780057C001230762290FF93E050
+:102600004401F0B2B390FF04E0F54A90FF06E0FD0C
+:10261000A3E0ED7D00FC7D00FC90FF06E0FFA3E061
+:102620007E00FFE4FEEC4EFCED4FFDC3EC9448ED64
+:102630009400502290FF06E0FDA3E0ED7D00FC7DBC
+:1026400000FC90FF06E0FFA3E07E00FFE4FEEC4EFE
+:10265000FCED4FFD8004E4FD7C488C728D7190FF91
+:1026600002E0FDA3E0ED7D00FC7D00FC90FF02E0B8
+:10267000FFA3E07E00FFE4FEEC4EF54CED4FF54B82
+:10268000756A357569FC7568017D357EFC7F017959
+:1026900073E4FAFB74057800120348754900E549B4
+:1026A00024FE4019AD6AAE69AF68E412031805490B
+:1026B0000DED70010E8D6A8E698F6880E1756A3547
+:1026C0007569FC75680190FF00E05460B4000280F9
+:1026D00006D35003022CBFE54A540FF549E54A548E
+:1026E00080A2E0920290FF01E012018A000B2CBA56
+:1026F000270528232CBA292F2CBA2A122A462BADBB
+:102700002BB02BF02C632C91E56D30E70EE54C459A
+:102710004B7008E572640245716003022CBC90FFA7
+:1027200000E0541FB400028003D34029E54A60034F
+:10273000022820AD6AAE69AF68740112031878AD43
+:10274000E630E00BAD6AAE69AF6874021203187C24
+:102750000212307622B401028003D3401BE56D20C3
+:10276000E107E54A6003022820E54A24FE500302FF
+:1027700028207C0212307622B402028006D3500355
+:1027800002281EE56D20E10DE54A6009E54A6480F6
+:102790006003022820AC4A1230FD4003022820E5E5
+:1027A00049702530021190FF80E05408AD6AAE698F
+:1027B000AF68120318800F90FF82E05408AD6AAE34
+:1027C00069AF68120318803D154930021DE549754F
+:1027D000F008A42448F582E434FFF583E05408AD02
+:1027E0006AAE69AF68120318801BE54975F008A44A
+:1027F0002408F582E434FFF583E05408AD6AAE693D
+:10280000AF68120318AD6AAE69AF681201EF600BD2
+:10281000AD6AAE69AF6874011203187C021230769B
+:10282000228000022CBCE56D20E706E57245716050
+:1028300003022CBC90FF00E0541FB400028003D3BD
+:10284000401AE54C14454B7004E54A600302292CFC
+:1028500078ADE654FEF67C0012307622B401028098
+:1028600003D3402AE56D20E108E56D20E00302294D
+:102870002CE56D30E004E54A700BE56D30E109E5CB
+:102880004A24FE500302292C7C0012307622B40226
+:10289000028006D3500302292AE54C454B6003020F
+:1028A000292CAC4A1230FD400302292CE56D20E1B1
+:1028B00007E56D20E0028077E56D30E006E54960D0
+:1028C00002806CE549700F90FF82E054F7F090FFB2
+:1028D00080E054F7F022E549B401028003D34009B7
+:1028E0007D017C03120F128011B402028003D340D9
+:1028F000097D017C04120F1280001549300215E594
+:102900004975F008A42448F582E434FFF583E054C7
+:10291000F7F08013E54975F008A42408F582E43443
+:10292000FFF583E054F7F07C00123076228000023D
+:102930002CBCE56D20E706E57245716003022CBCF6
+:1029400090FF00E0541FB400028003D3401AE54C0E
+:1029500014454B7004E54A6003022A0F78ADE64443
+:1029600001F67C0012307622B401028003D34029A4
+:10297000E56D20E108E56D20E003022A0FE56D30EA
+:10298000E004E549700BE56D30E108E54924FE50AF
+:1029900002807F7C0012307622B402028003D34092
+:1029A0006FE54C454B60028069AC4A1230FD400235
+:1029B0008060E56D20E107E56D20E0028054E54987
+:1029C000701430020990FF80E04408F0800790FF07
+:1029D00082E04408F022E56D30E1331549300215FC
+:1029E000E54975F008A42448F582E434FFF583E056
+:1029F0004408F08013E54975F008A42408F582E442
+:102A000034FFF583E04408F07C0012307622800227
+:102A10008000022CBCE56D20E712E5724571700C58
+:102A2000E54A700890FF00E0541F6003022CBCE5EB
+:102A30004C90FFFFF090FFFFE06005436D018003C5
+:102A4000536DFE7C0012307622E56D30E70EE572A4
+:102A50004571600890FF00E0541F6003022CBCAD7C
+:102A60004BE54CED7D00FC7D00FCBD0002800302C7
+:102A70002BA8B401028003D34032E54A7005E54C2F
+:102A8000FC6003022BAA756A407569F8756801D36A
+:102A9000E5729412E57194004006E4FD7C12800416
+:102AA000AC72AD718C708D6F12308B22B4020280CB
+:102AB00003D34059E54A6003022BAAE54CFC70277A
+:102AC000756A527569F8756801D3E5729419E571F4
+:102AD00094004006E4FD7C198004AC72AD718C70EA
+:102AE0008D6F12308B8025756A6B7569F87568017A
+:102AF000D3E5729427E57194004006E4FD7C2780BD
+:102B000004AC72AD718C708D6F12308B22B40302E5
+:102B10008006D35003022BA8E54CF549700F90FFB7
+:102B200004E0FDA3E04D6003022BAA801890FB0295
+:102B3000E0FDA3E0FC90FF05E06C700790FF04E06F
+:102B40006D60028068E4F570F56F7F00E54914C59B
+:102B500049600FEF2400F582E434FBF583E02FFF9A
+:102B600080EA8F4AE54A2400F582E434FBF583E0ED
+:102B70007D00D39572ED95714006AC72AD71800FFA
+:102B8000E54A2400F582E434FBF583E07D00FC8C0B
+:102B9000708D6FE54A2400FCE434FBFDFEECFD7F04
+:102BA000018D6A8E698F6812308B228000022CBCE6
+:102BB000022CBCE56D30E719E5721445717012E521
+:102BC0004A700EE54C454B700890FF00E0541F60C2
+:102BD00003022CBCE56D20E008E56D20E103022C2A
+:102BE000BC756A6EE4F569F568E4F56F04F570127A
+:102BF000308B22E56D20E727E57245717021E54AAB
+:102C0000701DE54C6402454B600DE54C14454B606E
+:102C100006E54C454B700890FF00E0541F6003022E
+:102C20002CBCE56D20E008E56D20E103022CBC859D
+:102C30004C6EE56E700A436D01536DFDD2B080207D
+:102C4000E56E64026007E56E1460028072536DFEEB
+:102C5000436D02E56E64026005E56E147002C2B059
+:102C60007C0012307622E56D30E71AE5721445716A
+:102C70007013E54A700FE54C454B700990FF00E07A
+:102C8000541F1460028038E56D20E10280317C0120
+:102C900012307622E56D20E715E5724571700FE57B
+:102CA0004C454B700990FF00E0541F146002800FE8
+:102CB000E56D20E10280087C00123076228000025F
+:102CC0002F7DB440028006D35003022F7390FF0182
+:102CD000E090FC35F0E54A90FC36F0E490FC37F0EB
+:102CE000E56A2403F56AE5693400F569AD4BE54C06
+:102CF000856A82856983CDF0A3CDF090FF01E01253
+:102D000001C02D2A012D50022D7A032DA4042DF28D
+:102D1000052E2F062E55072E7B082EA7092ECD0B2C
+:102D20002EF30C2F02802F028100002F60E56D2012
+:102D3000E7067C051225DF227D767E357F02793815
+:102D40007AFC7B01740878001203487D087C00122D
+:102D5000254622E56D20E7067C051225DF22E54A9F
+:102D6000B403004010B40500500BE54A7F00FE7C20
+:102D70001012324D227D007C0712254622E56D207F
+:102D8000E7067C051225DF22E54AB403004010B4B3
+:102D90000500500BE54A7F00FE7C1112324D227D6A
+:102DA000007C0712254622E56D20E7067C051225EA
+:102DB000DF22E54AB405028003D3400AE4FF04FEA3
+:102DC0007C0A12324D22B401028003D3400AE4FF90
+:102DD00004FE7C0812324D22B403004010B40500FA
+:102DE000500BE54A7F00FE7C1312324D227D007CA1
+:102DF0000712254622E56D20E734D3E5729448E5B5
+:102E00007194005006E572457170067C021225DF50
+:102E100022E54AB40103B3400BC3B403004009B434
+:102E200006005004123123227C071225DF221225CE
+:102E30007D22E56D20E71DE54AB403004010B4058E
+:102E400000500BE54A7F00FE7C1612324D227C07B3
+:102E50001225DF2212257D22E56D20E71DE54AB40B
+:102E600003004010B40500500BE54A7F00FE7C19BA
+:102E700012324D227C071225DF2212257D22E56DBC
+:102E800020E723748190FF93F0E54AB403004010DB
+:102E9000B40500500BE54A7F00FE7C1712324D222C
+:102EA0007C071225DF2212257D22E56D20E71DE536
+:102EB0004AB403004010B40500500BE54A7F00FE01
+:102EC0007C1812324D227C071225DF2212257D222A
+:102ED000E56D20E71DE54AB403004010B40500503D
+:102EE0000BE54A7F00FE7C1512324D227C0712252D
+:102EF000DF2212257D22E56D20E7067C071225DF03
+:102F00002212257D22E56D30E72090FF00E0541F5E
+:102F1000701090FF01E0B480051225748003122523
+:102F20007D227D007C051225462290FF00E0541F83
+:102F300060067C051225DF22D3E5729448E5719482
+:102F400000500BC3E5729407E571940050067C03B2
+:102F50001225DF22E54AB40504123123227C071230
+:102F600025DF22E56D30E7087D007C05122546222D
+:102F70007C051225DF22B420028003D340008000AC
+:102F80001230512275430090FF83E0540FD39543D4
+:102F90004024E54324F0F582E434FEF583E0AD6A95
+:102FA000AE69AF6812031805430DED70010E8D6A0E
+:102FB0008E698F6880D1E5437D00FCC3E5709CF588
+:102FC00070E56F9DF56FE570456F6006E490FF83D7
+:102FD000F02290FF82E04408F0E4F56F75704990AC
+:102FE000FC35E0B405028003D3404090FC36E0F5A8
+:102FF00043B405028003D3400AE4FF04FE7C0B12B5
+:10300000324D22B401028003D3400AE4FF04FE7C67
+:103010000912324D22B403004010B40500500BE5F4
+:10302000437F00FE7C1412324D2222B480004023E4
+:10303000B48200501E7C357DFC1217A57D008C6C7F
+:103040008D6B90FC37E0600512305180057C0012DA
+:103050003076222290FF83E0547FF090FF82E0449C
+:1030600008F090FF80E04408F02290FF82E04408DE
+:10307000F090FF80E04408F0228C237D008C708D5E
+:103080006F756A357569FC75680112308B2290FF87
+:1030900083E0547FF0E5706449456F700122C3E519
+:1030A000709408E56F94004015752108E5217D00B6
+:1030B000FCC3E5709CF570E56F9DF56F8009857028
+:1030C00021E4F56F757049752200E522C395215002
+:1030D00026AD6AAE69AF681201EFFCE52224F8F56F
+:1030E00082E434FEF583ECF005220DED70010E8DC7
+:1030F0006A8E698F6880D3E521547F90FF81F0222A
+:103100008C487F00EF24FD4019E4EF75F007A424FC
+:103110007FF582E434F8F583E065487002D3220F2E
+:1031200080E28F47C32285727085716F90FF82E0C5
+:1031300054F7F090FF83E0547FF022C000C001C03C
+:1031400002C006C007E5782408F8860653067F7C8F
+:10315000FF1231AD7C007D00E57B6046FF90FD9560
+:10316000E0547F6E700FC083C082A3E0FDA3E0FC3B
+:10317000A3157B8007A3A3A3DFE68026DF06D0820A
+:10318000D083801EE0F8A3E0F9A3E0FAD082D083D8
+:10319000E8F0A3E9F0A3EAF0A3C083C082A3A3A34D
+:1031A00080DA123246D007D006D002D001D00022F9
+:1031B00085A87A75A888EC70027C3F8C7922E57826
+:1031C0002408F8760012329A80FBC000C001C002C9
+:1031D000C006C007AE047CFF1231ADE57B6042FF44
+:1031E00090FD95E0547F6E700BC083C082A3A3A3B3
+:1031F000157B8007A3A3A3DFEA8026DF06D082D059
+:103200008380D8E0F8A3E0F9A3E0FAD082D083E885
+:10321000F0A3E9F0A3EAF0A3C083C082A3A3A38034
+:10322000DA7808087918097C01E6547F6E70067612
+:10323000007700800608090CBC08EE123246D00761
+:10324000D006D002D001D00022757900857AA8225C
+:10325000C0F0C082C083C3E57B24E8500512329AD7
+:1032600080F4EC6031903575E493C39C4028C00431
+:103270007CFF1231ADD004430480E57B75F003A4DC
+:103280002495F582E434FDF583ECF0EFA3F0EEA392
+:10329000F0057B123246D083D082D0F022C0047C6D
+:1032A00020D28CD28DD504FDD0042275A80075885B
+:1032B0000075B80075F00075D000E4F8900000F6D5
+:1032C00008B800FB020000C3ED940250047D037CAB
+:1032D000E8ECF4FCEDF4FD0CBC00010D8C7F8D7E60
+:1032E00022C3EC94BCED940250047D077CD0ECF436
+:1032F000FCEDF4FD0CBC00010D8C7D8D7C22EC708E
+:103300000122C000E5782418F8A604E5782408F81E
+:10331000C6547FF6E630E703D0002212329A80F4DA
+:10332000C28C857C8C857D8AD28CC0E0C0D0C0F0F8
+:10333000C082C083C000C001C002C003C004C00579
+:10334000C006C007121AF8E5782408F8E66024E5FC
+:10335000782410F8A681E57875F021A4248DF582F3
+:10336000E434FCF58378AEE58104C398F9E6F0080F
+:10337000A3D9FA74082578F8057808E65480700C0B
+:10338000E578B407F3780875780080EFE5782410C5
+:10339000F88681E57875F021A4248DF582E434FC6B
+:1033A000F58378AEE58104C398F9E0F608A3D9FA6D
+:1033B000D007D006D005D004D003D002D001D00071
+:1033C000D083D082D0F0D0D0D0E032C0E0C0D0C026
+:1033D00000C001C002C28E857E8D857F8BD28E7823
+:1033E0001979097A07E77004A600800BE6600816D1
+:1033F000E67004E74480F70809DAEAE57960131417
+:10340000F579700EE5782408F87600123246D28CF1
+:10341000D28DD002D001D000D0D0D0E0327581ADB5
+:10342000742A90FF93F0757F30757EF8757D607516
+:103430007CF012053F1234CE12175B90FF93E044EC
+:1034400001F0B2B31234F81232A880DA22C0007C44
+:1034500001EC2408F8E660090CBC08F512329A80E9
+:10346000EED00022C0F0C082C083C000C006C007FA
+:10347000ED2410F876BCED75F021A4248DF582E4DE
+:1034800034FCF583C082C083A3A3E4780DF0A3D8F5
+:10349000FCEC547F75F002A42441F582E5F034354C
+:1034A000F583E493FE740193F5828E83E493FE74B6
+:1034B0000193FFD083D082EFF0A3EEF0ED2408F863
+:1034C000EC4480F6D007D006D000D083D082D0F074
+:1034D00022757800757B007A08791878087600776D
+:1034E000000809DAF8E478087480447FF67401442F
+:1034F00010F58975B808D2ABD2A9227581ADD28EEC
+:10350000D28CD2AFE57B6032FF90FD95E0548060B5
+:103510002478087908E0547FFA7B00E6547FB502EE
+:10352000027BFF08D9F5EB700CEAF012344AAD04C7
+:10353000AC02123461A3A3A3DFD212329A80C57CFD
+:10354000017D002204FE04F204F604EA04E604E22B
+:1035500004EE04FA04A604AA04D604DA04A204A21F
+:1035600004A204DE04BE04B604BA04B204CA04C64B
+:1035700004C204CE04D204AE1901030022004802A2
+:1035800000480E301420C81AD0180A0C0506020391
+:1035900001020001CE0181010000C0008000600036
+:1035A0003000180010000800040002000100081894
+:1035B00028380C05100A0200000000000301100A60
+:1035C000020000000000FBE0FBF2090227000102FC
+:1035D00000A0FA0904000003FF00000007058102B3
+:1035E00040000007050102400000070583030200B8
+:1035F00001220354005500530042003300340031CF
+:1036000000300020002000200020002000200020AA
+:073610000020000000000093
+:00000001FF
diff --git a/fs/Kconfig b/fs/Kconfig
index 3288358..51307b0 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -269,6 +269,25 @@
 	  Posix Access Control Lists (ACLs) support permissions for users and
 	  groups beyond the owner/group/world scheme.
 
+config BTRFS_FS
+	tristate "Btrfs filesystem (EXPERIMENTAL) Unstable disk format"
+	depends on EXPERIMENTAL
+	select LIBCRC32C
+	select ZLIB_INFLATE
+	select ZLIB_DEFLATE
+	help
+	  Btrfs is a new filesystem with extents, writable snapshotting,
+	  support for multiple devices and many more features.
+
+	  Btrfs is highly experimental, and THE DISK FORMAT IS NOT YET
+	  FINALIZED.  You should say N here unless you are interested in
+	  testing Btrfs with non-critical data.
+
+	  To compile this file system support as a module, choose M here. The
+	  module will be called btrfs.
+
+	  If unsure, say N.
+
 endif # BLOCK
 
 source "fs/notify/Kconfig"
@@ -913,6 +932,58 @@
 
 	  If unsure, say N.
 
+config SQUASHFS
+	tristate "SquashFS 4.0 - Squashed file system support"
+	depends on BLOCK
+	select ZLIB_INFLATE
+	help
+	  Saying Y here includes support for SquashFS 4.0 (a Compressed
+	  Read-Only File System).  Squashfs is a highly compressed read-only
+	  filesystem for Linux.  It uses zlib compression to compress both
+	  files, inodes and directories.  Inodes in the system are very small
+	  and all blocks are packed to minimise data overhead. Block sizes
+	  greater than 4K are supported up to a maximum of 1 Mbytes (default
+	  block size 128K).  SquashFS 4.0 supports 64 bit filesystems and files
+	  (larger than 4GB), full uid/gid information, hard links and
+	  timestamps.  
+
+	  Squashfs is intended for general read-only filesystem use, for
+	  archival use (i.e. in cases where a .tar.gz file may be used), and in
+	  embedded systems where low overhead is needed.  Further information
+	  and tools are available from http://squashfs.sourceforge.net.
+
+	  If you want to compile this as a module ( = code which can be
+	  inserted in and removed from the running kernel whenever you want),
+	  say M here and read <file:Documentation/modules.txt>.  The module
+	  will be called squashfs.  Note that the root file system (the one
+	  containing the directory /) cannot be compiled as a module.
+
+	  If unsure, say N.
+
+config SQUASHFS_EMBEDDED
+
+	bool "Additional option for memory-constrained systems" 
+	depends on SQUASHFS
+	default n
+	help
+	  Saying Y here allows you to specify cache size.
+
+	  If unsure, say N.
+
+config SQUASHFS_FRAGMENT_CACHE_SIZE
+	int "Number of fragments cached" if SQUASHFS_EMBEDDED
+	depends on SQUASHFS
+	default "3"
+	help
+	  By default SquashFS caches the last 3 fragments read from
+	  the filesystem.  Increasing this amount may mean SquashFS
+	  has to re-read fragments less often from disk, at the expense
+	  of extra system memory.  Decreasing this amount will mean
+	  SquashFS uses less memory at the expense of extra reads from disk.
+
+	  Note there must be at least one cached fragment.  Anything
+	  much more than three will probably not make much difference.
+
 config VXFS_FS
 	tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)"
 	depends on BLOCK
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index ce9fb3f..bb4cc5b 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -43,7 +43,7 @@
 config CORE_DUMP_DEFAULT_ELF_HEADERS
 	bool "Write ELF core dumps with partial segments"
 	default n
-	depends on BINFMT_ELF
+	depends on BINFMT_ELF && ELF_CORE
 	help
 	  ELF core dump files describe each memory mapping of the crashed
 	  process, and can contain or omit the memory contents of each one.
diff --git a/fs/Makefile b/fs/Makefile
index c830611..38bc735 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -74,6 +74,7 @@
 obj-$(CONFIG_JBD2)		+= jbd2/
 obj-$(CONFIG_EXT2_FS)		+= ext2/
 obj-$(CONFIG_CRAMFS)		+= cramfs/
+obj-$(CONFIG_SQUASHFS)		+= squashfs/
 obj-y				+= ramfs/
 obj-$(CONFIG_HUGETLBFS)		+= hugetlbfs/
 obj-$(CONFIG_CODA_FS)		+= coda/
@@ -119,4 +120,5 @@
 obj-$(CONFIG_HPPFS)		+= hppfs/
 obj-$(CONFIG_DEBUG_FS)		+= debugfs/
 obj-$(CONFIG_OCFS2_FS)		+= ocfs2/
+obj-$(CONFIG_BTRFS_FS)		+= btrfs/
 obj-$(CONFIG_GFS2_FS)           += gfs2/
diff --git a/fs/aio.c b/fs/aio.c
index d6f89d3..8fa77e2 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1270,7 +1270,7 @@
  *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
  *	implemented.
  */
-asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
+SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
 {
 	struct kioctx *ioctx = NULL;
 	unsigned long ctx;
@@ -1308,7 +1308,7 @@
  *	implemented.  May fail with -EFAULT if the context pointed to
  *	is invalid.
  */
-asmlinkage long sys_io_destroy(aio_context_t ctx)
+SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
 {
 	struct kioctx *ioctx = lookup_ioctx(ctx);
 	if (likely(NULL != ioctx)) {
@@ -1662,8 +1662,8 @@
  *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
  *	fail with -ENOSYS if not implemented.
  */
-asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr,
-			      struct iocb __user * __user *iocbpp)
+SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
+		struct iocb __user * __user *, iocbpp)
 {
 	struct kioctx *ctx;
 	long ret = 0;
@@ -1737,8 +1737,8 @@
  *	invalid.  May fail with -EAGAIN if the iocb specified was not
  *	cancelled.  Will fail with -ENOSYS if not implemented.
  */
-asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
-			      struct io_event __user *result)
+SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
+		struct io_event __user *, result)
 {
 	int (*cancel)(struct kiocb *iocb, struct io_event *res);
 	struct kioctx *ctx;
@@ -1799,11 +1799,11 @@
  *	will be updated if not NULL and the operation blocks.  Will fail
  *	with -ENOSYS if not implemented.
  */
-asmlinkage long sys_io_getevents(aio_context_t ctx_id,
-				 long min_nr,
-				 long nr,
-				 struct io_event __user *events,
-				 struct timespec __user *timeout)
+SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
+		long, min_nr,
+		long, nr,
+		struct io_event __user *, events,
+		struct timespec __user *, timeout)
 {
 	struct kioctx *ioctx = lookup_ioctx(ctx_id);
 	long ret = -EINVAL;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index c41fa2a..e3ff2b9 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -152,8 +152,10 @@
 	elf_addr_t __user *sp;
 	elf_addr_t __user *u_platform;
 	elf_addr_t __user *u_base_platform;
+	elf_addr_t __user *u_rand_bytes;
 	const char *k_platform = ELF_PLATFORM;
 	const char *k_base_platform = ELF_BASE_PLATFORM;
+	unsigned char k_rand_bytes[16];
 	int items;
 	elf_addr_t *elf_info;
 	int ei_index = 0;
@@ -196,6 +198,15 @@
 			return -EFAULT;
 	}
 
+	/*
+	 * Generate 16 random bytes for userspace PRNG seeding.
+	 */
+	get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
+	u_rand_bytes = (elf_addr_t __user *)
+		       STACK_ALLOC(p, sizeof(k_rand_bytes));
+	if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+		return -EFAULT;
+
 	/* Create the ELF interpreter info */
 	elf_info = (elf_addr_t *)current->mm->saved_auxv;
 	/* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
@@ -228,6 +239,7 @@
 	NEW_AUX_ENT(AT_GID, cred->gid);
 	NEW_AUX_ENT(AT_EGID, cred->egid);
  	NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
+	NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
 	NEW_AUX_ENT(AT_EXECFN, bprm->exec);
 	if (k_platform) {
 		NEW_AUX_ENT(AT_PLATFORM,
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index aa5b432..f3e72c5 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -168,9 +168,6 @@
 	struct elf_fdpic_params exec_params, interp_params;
 	struct elf_phdr *phdr;
 	unsigned long stack_size, entryaddr;
-#ifndef CONFIG_MMU
-	unsigned long fullsize;
-#endif
 #ifdef ELF_FDPIC_PLAT_INIT
 	unsigned long dynaddr;
 #endif
@@ -390,11 +387,6 @@
 		goto error_kill;
 	}
 
-	/* expand the stack mapping to use up the entire allocation granule */
-	fullsize = kobjsize((char *) current->mm->start_brk);
-	if (!IS_ERR_VALUE(do_mremap(current->mm->start_brk, stack_size,
-				    fullsize, 0, 0)))
-		stack_size = fullsize;
 	up_write(&current->mm->mmap_sem);
 
 	current->mm->brk = current->mm->start_brk;
@@ -1567,11 +1559,9 @@
 static int elf_fdpic_dump_segments(struct file *file, size_t *size,
 			   unsigned long *limit, unsigned long mm_flags)
 {
-	struct vm_list_struct *vml;
+	struct vm_area_struct *vma;
 
-	for (vml = current->mm->context.vmlist; vml; vml = vml->next) {
-	struct vm_area_struct *vma = vml->vma;
-
+	for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
 		if (!maydump(vma, mm_flags))
 			continue;
 
@@ -1617,9 +1607,6 @@
 	elf_fpxregset_t *xfpu = NULL;
 #endif
 	int thread_status_size = 0;
-#ifndef CONFIG_MMU
-	struct vm_list_struct *vml;
-#endif
 	elf_addr_t *auxv;
 	unsigned long mm_flags;
 
@@ -1685,13 +1672,7 @@
 	fill_prstatus(prstatus, current, signr);
 	elf_core_copy_regs(&prstatus->pr_reg, regs);
 
-#ifdef CONFIG_MMU
 	segs = current->mm->map_count;
-#else
-	segs = 0;
-	for (vml = current->mm->context.vmlist; vml; vml = vml->next)
-	    segs++;
-#endif
 #ifdef ELF_CORE_EXTRA_PHDRS
 	segs += ELF_CORE_EXTRA_PHDRS;
 #endif
@@ -1766,20 +1747,10 @@
 	mm_flags = current->mm->flags;
 
 	/* write program headers for segments dump */
-	for (
-#ifdef CONFIG_MMU
-		vma = current->mm->mmap; vma; vma = vma->vm_next
-#else
-			vml = current->mm->context.vmlist; vml; vml = vml->next
-#endif
-	     ) {
+	for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
 		struct elf_phdr phdr;
 		size_t sz;
 
-#ifndef CONFIG_MMU
-		vma = vml->vma;
-#endif
-
 		sz = vma->vm_end - vma->vm_start;
 
 		phdr.p_type = PT_LOAD;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 7bbd5c6..5cebf0b 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -417,8 +417,8 @@
 	unsigned long textpos = 0, datapos = 0, result;
 	unsigned long realdatastart = 0;
 	unsigned long text_len, data_len, bss_len, stack_len, flags;
-	unsigned long len, reallen, memp = 0;
-	unsigned long extra, rlim;
+	unsigned long len, memp = 0;
+	unsigned long memp_size, extra, rlim;
 	unsigned long *reloc = 0, *rp;
 	struct inode *inode;
 	int i, rev, relocs = 0;
@@ -543,17 +543,10 @@
 		}
 
 		len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
+		len = PAGE_ALIGN(len);
 		down_write(&current->mm->mmap_sem);
 		realdatastart = do_mmap(0, 0, len,
 			PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
-		/* Remap to use all availabe slack region space */
-		if (realdatastart && (realdatastart < (unsigned long)-4096)) {
-			reallen = kobjsize((void *)realdatastart);
-			if (reallen > len) {
-				realdatastart = do_mremap(realdatastart, len,
-					reallen, MREMAP_FIXED, realdatastart);
-			}
-		}
 		up_write(&current->mm->mmap_sem);
 
 		if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) {
@@ -591,21 +584,14 @@
 
 		reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len));
 		memp = realdatastart;
-
+		memp_size = len;
 	} else {
 
 		len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
+		len = PAGE_ALIGN(len);
 		down_write(&current->mm->mmap_sem);
 		textpos = do_mmap(0, 0, len,
 			PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
-		/* Remap to use all availabe slack region space */
-		if (textpos && (textpos < (unsigned long) -4096)) {
-			reallen = kobjsize((void *)textpos);
-			if (reallen > len) {
-				textpos = do_mremap(textpos, len, reallen,
-					MREMAP_FIXED, textpos);
-			}
-		}
 		up_write(&current->mm->mmap_sem);
 
 		if (!textpos  || textpos >= (unsigned long) -4096) {
@@ -622,7 +608,7 @@
 		reloc = (unsigned long *) (textpos + ntohl(hdr->reloc_start) +
 				MAX_SHARED_LIBS * sizeof(unsigned long));
 		memp = textpos;
-
+		memp_size = len;
 #ifdef CONFIG_BINFMT_ZFLAT
 		/*
 		 * load it all in and treat it like a RAM load from now on
@@ -680,10 +666,12 @@
 		 * set up the brk stuff, uses any slack left in data/bss/stack
 		 * allocation.  We put the brk after the bss (between the bss
 		 * and stack) like other platforms.
+		 * Userspace code relies on the stack pointer starting out at
+		 * an address right at the end of a page.
 		 */
 		current->mm->start_brk = datapos + data_len + bss_len;
 		current->mm->brk = (current->mm->start_brk + 3) & ~3;
-		current->mm->context.end_brk = memp + kobjsize((void *) memp) - stack_len;
+		current->mm->context.end_brk = memp + memp_size - stack_len;
 	}
 
 	if (flags & FLAT_FLAG_KTRACE)
@@ -790,8 +778,8 @@
 
 	/* zero the BSS,  BRK and stack areas */
 	memset((void*)(datapos + data_len), 0, bss_len + 
-			(memp + kobjsize((void *) memp) - stack_len -	/* end brk */
-			libinfo->lib_list[id].start_brk) +		/* start brk */
+			(memp + memp_size - stack_len -		/* end brk */
+			libinfo->lib_list[id].start_brk) +	/* start brk */
 			stack_len);
 
 	return 0;
diff --git a/fs/bio.c b/fs/bio.c
index 711cee1..062299a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -788,6 +788,7 @@
 	int i, ret;
 	int nr_pages = 0;
 	unsigned int len = 0;
+	unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
 
 	for (i = 0; i < iov_count; i++) {
 		unsigned long uaddr;
@@ -814,35 +815,42 @@
 	bio->bi_rw |= (!write_to_vm << BIO_RW);
 
 	ret = 0;
-	i = 0;
-	while (len) {
-		unsigned int bytes;
 
-		if (map_data)
-			bytes = 1U << (PAGE_SHIFT + map_data->page_order);
-		else
-			bytes = PAGE_SIZE;
+	if (map_data) {
+		nr_pages = 1 << map_data->page_order;
+		i = map_data->offset / PAGE_SIZE;
+	}
+	while (len) {
+		unsigned int bytes = PAGE_SIZE;
+
+		bytes -= offset;
 
 		if (bytes > len)
 			bytes = len;
 
 		if (map_data) {
-			if (i == map_data->nr_entries) {
+			if (i == map_data->nr_entries * nr_pages) {
 				ret = -ENOMEM;
 				break;
 			}
-			page = map_data->pages[i++];
-		} else
+
+			page = map_data->pages[i / nr_pages];
+			page += (i % nr_pages);
+
+			i++;
+		} else {
 			page = alloc_page(q->bounce_gfp | gfp_mask);
-		if (!page) {
-			ret = -ENOMEM;
-			break;
+			if (!page) {
+				ret = -ENOMEM;
+				break;
+			}
 		}
 
-		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
+		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
 			break;
 
 		len -= bytes;
+		offset = 0;
 	}
 
 	if (ret)
@@ -851,7 +859,7 @@
 	/*
 	 * success
 	 */
-	if (!write_to_vm) {
+	if (!write_to_vm && (!map_data || !map_data->null_mapped)) {
 		ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
 		if (ret)
 			goto cleanup;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b957717..b3c1eff 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -285,6 +285,8 @@
 	INIT_LIST_HEAD(&bdev->bd_holder_list);
 #endif
 	inode_init_once(&ei->vfs_inode);
+	/* Initialize mutex for freeze. */
+	mutex_init(&bdev->bd_fsfreeze_mutex);
 }
 
 static inline void __bd_forget(struct inode *inode)
@@ -1005,6 +1007,7 @@
 	}
 
 	lock_kernel();
+ restart:
 
 	ret = -ENXIO;
 	disk = get_gendisk(bdev->bd_dev, &partno);
@@ -1025,6 +1028,19 @@
 
 			if (disk->fops->open) {
 				ret = disk->fops->open(bdev, mode);
+				if (ret == -ERESTARTSYS) {
+					/* Lost a race with 'disk' being
+					 * deleted, try again.
+					 * See md.c
+					 */
+					disk_put_part(bdev->bd_part);
+					bdev->bd_part = NULL;
+					module_put(disk->fops->owner);
+					put_disk(disk);
+					bdev->bd_disk = NULL;
+					mutex_unlock(&bdev->bd_mutex);
+					goto restart;
+				}
 				if (ret)
 					goto out_clear;
 			}
@@ -1220,6 +1236,20 @@
 	return blkdev_ioctl(bdev, mode, cmd, arg);
 }
 
+/*
+ * Try to release a page associated with block device when the system
+ * is under memory pressure.
+ */
+static int blkdev_releasepage(struct page *page, gfp_t wait)
+{
+	struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
+
+	if (super && super->s_op->bdev_try_to_free_page)
+		return super->s_op->bdev_try_to_free_page(super, page, wait);
+
+	return try_to_free_buffers(page);
+}
+
 static const struct address_space_operations def_blk_aops = {
 	.readpage	= blkdev_readpage,
 	.writepage	= blkdev_writepage,
@@ -1227,6 +1257,7 @@
 	.write_begin	= blkdev_write_begin,
 	.write_end	= blkdev_write_end,
 	.writepages	= generic_writepages,
+	.releasepage	= blkdev_releasepage,
 	.direct_IO	= blkdev_direct_IO,
 };
 
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
new file mode 100644
index 0000000..d2cf5a5
--- /dev/null
+++ b/fs/btrfs/Makefile
@@ -0,0 +1,25 @@
+ifneq ($(KERNELRELEASE),)
+# kbuild part of makefile
+
+obj-$(CONFIG_BTRFS_FS) := btrfs.o
+btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
+	   file-item.o inode-item.o inode-map.o disk-io.o \
+	   transaction.o inode.o file.o tree-defrag.o \
+	   extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
+	   extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
+	   ref-cache.o export.o tree-log.o acl.o free-space-cache.o zlib.o \
+	   compression.o
+else
+
+# Normal Makefile
+
+KERNELDIR := /lib/modules/`uname -r`/build
+all:
+	$(MAKE) -C $(KERNELDIR) M=`pwd` CONFIG_BTRFS_FS=m modules
+
+modules_install:
+	$(MAKE) -C $(KERNELDIR) M=`pwd` modules_install
+clean:
+	$(MAKE) -C $(KERNELDIR) M=`pwd` clean
+
+endif
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
new file mode 100644
index 0000000..1d53b62
--- /dev/null
+++ b/fs/btrfs/acl.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2007 Red Hat.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/xattr.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/posix_acl.h>
+#include <linux/sched.h>
+
+#include "ctree.h"
+#include "btrfs_inode.h"
+#include "xattr.h"
+
+#ifdef CONFIG_FS_POSIX_ACL
+
+static void btrfs_update_cached_acl(struct inode *inode,
+				    struct posix_acl **p_acl,
+				    struct posix_acl *acl)
+{
+	spin_lock(&inode->i_lock);
+	if (*p_acl && *p_acl != BTRFS_ACL_NOT_CACHED)
+		posix_acl_release(*p_acl);
+	*p_acl = posix_acl_dup(acl);
+	spin_unlock(&inode->i_lock);
+}
+
+static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
+{
+	int size;
+	const char *name;
+	char *value = NULL;
+	struct posix_acl *acl = NULL, **p_acl;
+
+	switch (type) {
+	case ACL_TYPE_ACCESS:
+		name = POSIX_ACL_XATTR_ACCESS;
+		p_acl = &BTRFS_I(inode)->i_acl;
+		break;
+	case ACL_TYPE_DEFAULT:
+		name = POSIX_ACL_XATTR_DEFAULT;
+		p_acl = &BTRFS_I(inode)->i_default_acl;
+		break;
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	spin_lock(&inode->i_lock);
+	if (*p_acl != BTRFS_ACL_NOT_CACHED)
+		acl = posix_acl_dup(*p_acl);
+	spin_unlock(&inode->i_lock);
+
+	if (acl)
+		return acl;
+
+
+	size = __btrfs_getxattr(inode, name, "", 0);
+	if (size > 0) {
+		value = kzalloc(size, GFP_NOFS);
+		if (!value)
+			return ERR_PTR(-ENOMEM);
+		size = __btrfs_getxattr(inode, name, value, size);
+		if (size > 0) {
+			acl = posix_acl_from_xattr(value, size);
+			btrfs_update_cached_acl(inode, p_acl, acl);
+		}
+		kfree(value);
+	} else if (size == -ENOENT) {
+		acl = NULL;
+		btrfs_update_cached_acl(inode, p_acl, acl);
+	}
+
+	return acl;
+}
+
+static int btrfs_xattr_get_acl(struct inode *inode, int type,
+			       void *value, size_t size)
+{
+	struct posix_acl *acl;
+	int ret = 0;
+
+	acl = btrfs_get_acl(inode, type);
+
+	if (IS_ERR(acl))
+		return PTR_ERR(acl);
+	if (acl == NULL)
+		return -ENODATA;
+	ret = posix_acl_to_xattr(acl, value, size);
+	posix_acl_release(acl);
+
+	return ret;
+}
+
+/*
+ * Needs to be called with fs_mutex held
+ */
+static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+	int ret, size = 0;
+	const char *name;
+	struct posix_acl **p_acl;
+	char *value = NULL;
+	mode_t mode;
+
+	if (acl) {
+		ret = posix_acl_valid(acl);
+		if (ret < 0)
+			return ret;
+		ret = 0;
+	}
+
+	switch (type) {
+	case ACL_TYPE_ACCESS:
+		mode = inode->i_mode;
+		ret = posix_acl_equiv_mode(acl, &mode);
+		if (ret < 0)
+			return ret;
+		ret = 0;
+		inode->i_mode = mode;
+		name = POSIX_ACL_XATTR_ACCESS;
+		p_acl = &BTRFS_I(inode)->i_acl;
+		break;
+	case ACL_TYPE_DEFAULT:
+		if (!S_ISDIR(inode->i_mode))
+			return acl ? -EINVAL : 0;
+		name = POSIX_ACL_XATTR_DEFAULT;
+		p_acl = &BTRFS_I(inode)->i_default_acl;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (acl) {
+		size = posix_acl_xattr_size(acl->a_count);
+		value = kmalloc(size, GFP_NOFS);
+		if (!value) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		ret = posix_acl_to_xattr(acl, value, size);
+		if (ret < 0)
+			goto out;
+	}
+
+	ret = __btrfs_setxattr(inode, name, value, size, 0);
+
+out:
+	kfree(value);
+
+	if (!ret)
+		btrfs_update_cached_acl(inode, p_acl, acl);
+
+	return ret;
+}
+
+static int btrfs_xattr_set_acl(struct inode *inode, int type,
+			       const void *value, size_t size)
+{
+	int ret = 0;
+	struct posix_acl *acl = NULL;
+
+	if (value) {
+		acl = posix_acl_from_xattr(value, size);
+		if (acl == NULL) {
+			value = NULL;
+			size = 0;
+		} else if (IS_ERR(acl)) {
+			return PTR_ERR(acl);
+		}
+	}
+
+	ret = btrfs_set_acl(inode, acl, type);
+
+	posix_acl_release(acl);
+
+	return ret;
+}
+
+
+static int btrfs_xattr_acl_access_get(struct inode *inode, const char *name,
+				      void *value, size_t size)
+{
+	return btrfs_xattr_get_acl(inode, ACL_TYPE_ACCESS, value, size);
+}
+
+static int btrfs_xattr_acl_access_set(struct inode *inode, const char *name,
+				      const void *value, size_t size, int flags)
+{
+	return btrfs_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
+}
+
+static int btrfs_xattr_acl_default_get(struct inode *inode, const char *name,
+				       void *value, size_t size)
+{
+	return btrfs_xattr_get_acl(inode, ACL_TYPE_DEFAULT, value, size);
+}
+
+static int btrfs_xattr_acl_default_set(struct inode *inode, const char *name,
+			       const void *value, size_t size, int flags)
+{
+	return btrfs_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
+}
+
+int btrfs_check_acl(struct inode *inode, int mask)
+{
+	struct posix_acl *acl;
+	int error = -EAGAIN;
+
+	acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
+
+	if (IS_ERR(acl))
+		return PTR_ERR(acl);
+	if (acl) {
+		error = posix_acl_permission(inode, acl, mask);
+		posix_acl_release(acl);
+	}
+
+	return error;
+}
+
+/*
+ * btrfs_init_acl is already generally called under fs_mutex, so the locking
+ * stuff has been fixed to work with that.  If the locking stuff changes, we
+ * need to re-evaluate the acl locking stuff.
+ */
+int btrfs_init_acl(struct inode *inode, struct inode *dir)
+{
+	struct posix_acl *acl = NULL;
+	int ret = 0;
+
+	/* this happens with subvols */
+	if (!dir)
+		return 0;
+
+	if (!S_ISLNK(inode->i_mode)) {
+		if (IS_POSIXACL(dir)) {
+			acl = btrfs_get_acl(dir, ACL_TYPE_DEFAULT);
+			if (IS_ERR(acl))
+				return PTR_ERR(acl);
+		}
+
+		if (!acl)
+			inode->i_mode &= ~current->fs->umask;
+	}
+
+	if (IS_POSIXACL(dir) && acl) {
+		struct posix_acl *clone;
+		mode_t mode;
+
+		if (S_ISDIR(inode->i_mode)) {
+			ret = btrfs_set_acl(inode, acl, ACL_TYPE_DEFAULT);
+			if (ret)
+				goto failed;
+		}
+		clone = posix_acl_clone(acl, GFP_NOFS);
+		ret = -ENOMEM;
+		if (!clone)
+			goto failed;
+
+		mode = inode->i_mode;
+		ret = posix_acl_create_masq(clone, &mode);
+		if (ret >= 0) {
+			inode->i_mode = mode;
+			if (ret > 0) {
+				/* we need an acl */
+				ret = btrfs_set_acl(inode, clone,
+						    ACL_TYPE_ACCESS);
+			}
+		}
+	}
+failed:
+	posix_acl_release(acl);
+
+	return ret;
+}
+
+int btrfs_acl_chmod(struct inode *inode)
+{
+	struct posix_acl *acl, *clone;
+	int ret = 0;
+
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+
+	if (!IS_POSIXACL(inode))
+		return 0;
+
+	acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
+	if (IS_ERR(acl) || !acl)
+		return PTR_ERR(acl);
+
+	clone = posix_acl_clone(acl, GFP_KERNEL);
+	posix_acl_release(acl);
+	if (!clone)
+		return -ENOMEM;
+
+	ret = posix_acl_chmod_masq(clone, inode->i_mode);
+	if (!ret)
+		ret = btrfs_set_acl(inode, clone, ACL_TYPE_ACCESS);
+
+	posix_acl_release(clone);
+
+	return ret;
+}
+
+struct xattr_handler btrfs_xattr_acl_default_handler = {
+	.prefix = POSIX_ACL_XATTR_DEFAULT,
+	.get	= btrfs_xattr_acl_default_get,
+	.set	= btrfs_xattr_acl_default_set,
+};
+
+struct xattr_handler btrfs_xattr_acl_access_handler = {
+	.prefix = POSIX_ACL_XATTR_ACCESS,
+	.get	= btrfs_xattr_acl_access_get,
+	.set	= btrfs_xattr_acl_access_set,
+};
+
+#else /* CONFIG_FS_POSIX_ACL */
+
+int btrfs_acl_chmod(struct inode *inode)
+{
+	return 0;
+}
+
+int btrfs_init_acl(struct inode *inode, struct inode *dir)
+{
+	return 0;
+}
+
+int btrfs_check_acl(struct inode *inode, int mask)
+{
+	return 0;
+}
+
+#endif /* CONFIG_FS_POSIX_ACL */
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
new file mode 100644
index 0000000..8e2fec0
--- /dev/null
+++ b/fs/btrfs/async-thread.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/version.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+# include <linux/freezer.h>
+#include "async-thread.h"
+
+#define WORK_QUEUED_BIT 0
+#define WORK_DONE_BIT 1
+#define WORK_ORDER_DONE_BIT 2
+
+/*
+ * container for the kthread task pointer and the list of pending work
+ * One of these is allocated per thread.
+ */
+struct btrfs_worker_thread {
+	/* pool we belong to */
+	struct btrfs_workers *workers;
+
+	/* list of struct btrfs_work that are waiting for service */
+	struct list_head pending;
+
+	/* list of worker threads from struct btrfs_workers */
+	struct list_head worker_list;
+
+	/* kthread */
+	struct task_struct *task;
+
+	/* number of things on the pending list */
+	atomic_t num_pending;
+
+	unsigned long sequence;
+
+	/* protects the pending list. */
+	spinlock_t lock;
+
+	/* set to non-zero when this thread is already awake and kicking */
+	int working;
+
+	/* are we currently idle */
+	int idle;
+};
+
+/*
+ * helper function to move a thread onto the idle list after it
+ * has finished some requests.
+ */
+static void check_idle_worker(struct btrfs_worker_thread *worker)
+{
+	if (!worker->idle && atomic_read(&worker->num_pending) <
+	    worker->workers->idle_thresh / 2) {
+		unsigned long flags;
+		spin_lock_irqsave(&worker->workers->lock, flags);
+		worker->idle = 1;
+		list_move(&worker->worker_list, &worker->workers->idle_list);
+		spin_unlock_irqrestore(&worker->workers->lock, flags);
+	}
+}
+
+/*
+ * helper function to move a thread off the idle list after new
+ * pending work is added.
+ */
+static void check_busy_worker(struct btrfs_worker_thread *worker)
+{
+	if (worker->idle && atomic_read(&worker->num_pending) >=
+	    worker->workers->idle_thresh) {
+		unsigned long flags;
+		spin_lock_irqsave(&worker->workers->lock, flags);
+		worker->idle = 0;
+		list_move_tail(&worker->worker_list,
+			       &worker->workers->worker_list);
+		spin_unlock_irqrestore(&worker->workers->lock, flags);
+	}
+}
+
+static noinline int run_ordered_completions(struct btrfs_workers *workers,
+					    struct btrfs_work *work)
+{
+	unsigned long flags;
+
+	if (!workers->ordered)
+		return 0;
+
+	set_bit(WORK_DONE_BIT, &work->flags);
+
+	spin_lock_irqsave(&workers->lock, flags);
+
+	while (!list_empty(&workers->order_list)) {
+		work = list_entry(workers->order_list.next,
+				  struct btrfs_work, order_list);
+
+		if (!test_bit(WORK_DONE_BIT, &work->flags))
+			break;
+
+		/* we are going to call the ordered done function, but
+		 * we leave the work item on the list as a barrier so
+		 * that later work items that are done don't have their
+		 * functions called before this one returns
+		 */
+		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
+			break;
+
+		spin_unlock_irqrestore(&workers->lock, flags);
+
+		work->ordered_func(work);
+
+		/* now take the lock again and call the freeing code */
+		spin_lock_irqsave(&workers->lock, flags);
+		list_del(&work->order_list);
+		work->ordered_free(work);
+	}
+
+	spin_unlock_irqrestore(&workers->lock, flags);
+	return 0;
+}
+
+/*
+ * main loop for servicing work items
+ */
+static int worker_loop(void *arg)
+{
+	struct btrfs_worker_thread *worker = arg;
+	struct list_head *cur;
+	struct btrfs_work *work;
+	do {
+		spin_lock_irq(&worker->lock);
+		while (!list_empty(&worker->pending)) {
+			cur = worker->pending.next;
+			work = list_entry(cur, struct btrfs_work, list);
+			list_del(&work->list);
+			clear_bit(WORK_QUEUED_BIT, &work->flags);
+
+			work->worker = worker;
+			spin_unlock_irq(&worker->lock);
+
+			work->func(work);
+
+			atomic_dec(&worker->num_pending);
+			/*
+			 * unless this is an ordered work queue,
+			 * 'work' was probably freed by func above.
+			 */
+			run_ordered_completions(worker->workers, work);
+
+			spin_lock_irq(&worker->lock);
+			check_idle_worker(worker);
+
+		}
+		worker->working = 0;
+		if (freezing(current)) {
+			refrigerator();
+		} else {
+			set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock_irq(&worker->lock);
+			if (!kthread_should_stop())
+				schedule();
+			__set_current_state(TASK_RUNNING);
+		}
+	} while (!kthread_should_stop());
+	return 0;
+}
+
+/*
+ * this will wait for all the worker threads to shutdown
+ */
+int btrfs_stop_workers(struct btrfs_workers *workers)
+{
+	struct list_head *cur;
+	struct btrfs_worker_thread *worker;
+
+	list_splice_init(&workers->idle_list, &workers->worker_list);
+	while (!list_empty(&workers->worker_list)) {
+		cur = workers->worker_list.next;
+		worker = list_entry(cur, struct btrfs_worker_thread,
+				    worker_list);
+		kthread_stop(worker->task);
+		list_del(&worker->worker_list);
+		kfree(worker);
+	}
+	return 0;
+}
+
+/*
+ * simple init on struct btrfs_workers
+ */
+void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
+{
+	workers->num_workers = 0;
+	INIT_LIST_HEAD(&workers->worker_list);
+	INIT_LIST_HEAD(&workers->idle_list);
+	INIT_LIST_HEAD(&workers->order_list);
+	spin_lock_init(&workers->lock);
+	workers->max_workers = max;
+	workers->idle_thresh = 32;
+	workers->name = name;
+	workers->ordered = 0;
+}
+
+/*
+ * starts new worker threads.  This does not enforce the max worker
+ * count in case you need to temporarily go past it.
+ */
+int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+{
+	struct btrfs_worker_thread *worker;
+	int ret = 0;
+	int i;
+
+	for (i = 0; i < num_workers; i++) {
+		worker = kzalloc(sizeof(*worker), GFP_NOFS);
+		if (!worker) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		INIT_LIST_HEAD(&worker->pending);
+		INIT_LIST_HEAD(&worker->worker_list);
+		spin_lock_init(&worker->lock);
+		atomic_set(&worker->num_pending, 0);
+		worker->task = kthread_run(worker_loop, worker,
+					   "btrfs-%s-%d", workers->name,
+					   workers->num_workers + i);
+		worker->workers = workers;
+		if (IS_ERR(worker->task)) {
+			kfree(worker);
+			ret = PTR_ERR(worker->task);
+			goto fail;
+		}
+
+		spin_lock_irq(&workers->lock);
+		list_add_tail(&worker->worker_list, &workers->idle_list);
+		worker->idle = 1;
+		workers->num_workers++;
+		spin_unlock_irq(&workers->lock);
+	}
+	return 0;
+fail:
+	btrfs_stop_workers(workers);
+	return ret;
+}
+
+/*
+ * run through the list and find a worker thread that doesn't have a lot
+ * to do right now.  This can return null if we aren't yet at the thread
+ * count limit and all of the threads are busy.
+ */
+static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
+{
+	struct btrfs_worker_thread *worker;
+	struct list_head *next;
+	int enforce_min = workers->num_workers < workers->max_workers;
+
+	/*
+	 * if we find an idle thread, don't move it to the end of the
+	 * idle list.  This improves the chance that the next submission
+	 * will reuse the same thread, and maybe catch it while it is still
+	 * working
+	 */
+	if (!list_empty(&workers->idle_list)) {
+		next = workers->idle_list.next;
+		worker = list_entry(next, struct btrfs_worker_thread,
+				    worker_list);
+		return worker;
+	}
+	if (enforce_min || list_empty(&workers->worker_list))
+		return NULL;
+
+	/*
+	 * if we pick a busy task, move the task to the end of the list.
+	 * hopefully this will keep things somewhat evenly balanced.
+	 * Do the move in batches based on the sequence number.  This groups
+	 * requests submitted at roughly the same time onto the same worker.
+	 */
+	next = workers->worker_list.next;
+	worker = list_entry(next, struct btrfs_worker_thread, worker_list);
+	atomic_inc(&worker->num_pending);
+	worker->sequence++;
+
+	if (worker->sequence % workers->idle_thresh == 0)
+		list_move_tail(next, &workers->worker_list);
+	return worker;
+}
+
+/*
+ * selects a worker thread to take the next job.  This will either find
+ * an idle worker, start a new worker up to the max count, or just return
+ * one of the existing busy workers.
+ */
+static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
+{
+	struct btrfs_worker_thread *worker;
+	unsigned long flags;
+
+again:
+	spin_lock_irqsave(&workers->lock, flags);
+	worker = next_worker(workers);
+	spin_unlock_irqrestore(&workers->lock, flags);
+
+	if (!worker) {
+		spin_lock_irqsave(&workers->lock, flags);
+		if (workers->num_workers >= workers->max_workers) {
+			struct list_head *fallback = NULL;
+			/*
+			 * we have failed to find any workers, just
+			 * return the force one
+			 */
+			if (!list_empty(&workers->worker_list))
+				fallback = workers->worker_list.next;
+			if (!list_empty(&workers->idle_list))
+				fallback = workers->idle_list.next;
+			BUG_ON(!fallback);
+			worker = list_entry(fallback,
+				  struct btrfs_worker_thread, worker_list);
+			spin_unlock_irqrestore(&workers->lock, flags);
+		} else {
+			spin_unlock_irqrestore(&workers->lock, flags);
+			/* we're below the limit, start another worker */
+			btrfs_start_workers(workers, 1);
+			goto again;
+		}
+	}
+	return worker;
+}
+
+/*
+ * btrfs_requeue_work just puts the work item back on the tail of the list
+ * it was taken from.  It is intended for use with long running work functions
+ * that make some progress and want to give the cpu up for others.
+ */
+int btrfs_requeue_work(struct btrfs_work *work)
+{
+	struct btrfs_worker_thread *worker = work->worker;
+	unsigned long flags;
+
+	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
+		goto out;
+
+	spin_lock_irqsave(&worker->lock, flags);
+	atomic_inc(&worker->num_pending);
+	list_add_tail(&work->list, &worker->pending);
+
+	/* by definition we're busy, take ourselves off the idle
+	 * list
+	 */
+	if (worker->idle) {
+		spin_lock_irqsave(&worker->workers->lock, flags);
+		worker->idle = 0;
+		list_move_tail(&worker->worker_list,
+			       &worker->workers->worker_list);
+		spin_unlock_irqrestore(&worker->workers->lock, flags);
+	}
+
+	spin_unlock_irqrestore(&worker->lock, flags);
+
+out:
+	return 0;
+}
+
+/*
+ * places a struct btrfs_work into the pending queue of one of the kthreads
+ */
+int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
+{
+	struct btrfs_worker_thread *worker;
+	unsigned long flags;
+	int wake = 0;
+
+	/* don't requeue something already on a list */
+	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
+		goto out;
+
+	worker = find_worker(workers);
+	if (workers->ordered) {
+		spin_lock_irqsave(&workers->lock, flags);
+		list_add_tail(&work->order_list, &workers->order_list);
+		spin_unlock_irqrestore(&workers->lock, flags);
+	} else {
+		INIT_LIST_HEAD(&work->order_list);
+	}
+
+	spin_lock_irqsave(&worker->lock, flags);
+	atomic_inc(&worker->num_pending);
+	check_busy_worker(worker);
+	list_add_tail(&work->list, &worker->pending);
+
+	/*
+	 * avoid calling into wake_up_process if this thread has already
+	 * been kicked
+	 */
+	if (!worker->working)
+		wake = 1;
+	worker->working = 1;
+
+	spin_unlock_irqrestore(&worker->lock, flags);
+
+	if (wake)
+		wake_up_process(worker->task);
+out:
+	return 0;
+}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
new file mode 100644
index 0000000..31be4ed
--- /dev/null
+++ b/fs/btrfs/async-thread.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_ASYNC_THREAD_
+#define __BTRFS_ASYNC_THREAD_
+
+struct btrfs_worker_thread;
+
+/*
+ * This is similar to a workqueue, but it is meant to spread the operations
+ * across all available cpus instead of just the CPU that was used to
+ * queue the work.  There is also some batching introduced to try and
+ * cut down on context switches.
+ *
+ * By default threads are added on demand up to 2 * the number of cpus.
+ * Changing struct btrfs_workers->max_workers is one way to prevent
+ * demand creation of kthreads.
+ *
+ * the basic model of these worker threads is to embed a btrfs_work
+ * structure in your own data struct, and use container_of in a
+ * work function to get back to your data struct.
+ */
+struct btrfs_work {
+	/*
+	 * func should be set to the function you want called
+	 * your work struct is passed as the only arg
+	 *
+	 * ordered_func must be set for work sent to an ordered work queue,
+	 * and it is called to complete a given work item in the same
+	 * order they were sent to the queue.
+	 */
+	void (*func)(struct btrfs_work *work);
+	void (*ordered_func)(struct btrfs_work *work);
+	void (*ordered_free)(struct btrfs_work *work);
+
+	/*
+	 * flags should be set to zero.  It is used to make sure the
+	 * struct is only inserted once into the list.
+	 */
+	unsigned long flags;
+
+	/* don't touch these */
+	struct btrfs_worker_thread *worker;
+	struct list_head list;
+	struct list_head order_list;
+};
+
+struct btrfs_workers {
+	/* current number of running workers */
+	int num_workers;
+
+	/* max number of workers allowed.  changed by btrfs_start_workers */
+	int max_workers;
+
+	/* once a worker has this many requests or fewer, it is idle */
+	int idle_thresh;
+
+	/* force completions in the order they were queued */
+	int ordered;
+
+	/* list with all the work threads.  The workers on the idle thread
+	 * may be actively servicing jobs, but they haven't yet hit the
+	 * idle thresh limit above.
+	 */
+	struct list_head worker_list;
+	struct list_head idle_list;
+
+	/*
+	 * when operating in ordered mode, this maintains the list
+	 * of work items waiting for completion
+	 */
+	struct list_head order_list;
+
+	/* lock for finding the next worker thread to queue on */
+	spinlock_t lock;
+
+	/* extra name for this worker, used for current->name */
+	char *name;
+};
+
+int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
+int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
+int btrfs_stop_workers(struct btrfs_workers *workers);
+void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max);
+int btrfs_requeue_work(struct btrfs_work *work);
+#endif
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
new file mode 100644
index 0000000..a8c9693
--- /dev/null
+++ b/fs/btrfs/btrfs_inode.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_I__
+#define __BTRFS_I__
+
+#include "extent_map.h"
+#include "extent_io.h"
+#include "ordered-data.h"
+
+/* in memory btrfs inode */
+struct btrfs_inode {
+	/* which subvolume this inode belongs to */
+	struct btrfs_root *root;
+
+	/* key used to find this inode on disk.  This is used by the code
+	 * to read in roots of subvolumes
+	 */
+	struct btrfs_key location;
+
+	/* the extent_tree has caches of all the extent mappings to disk */
+	struct extent_map_tree extent_tree;
+
+	/* the io_tree does range state (DIRTY, LOCKED etc) */
+	struct extent_io_tree io_tree;
+
+	/* special utility tree used to record which mirrors have already been
+	 * tried when checksums fail for a given block
+	 */
+	struct extent_io_tree io_failure_tree;
+
+	/* held while inesrting or deleting extents from files */
+	struct mutex extent_mutex;
+
+	/* held while logging the inode in tree-log.c */
+	struct mutex log_mutex;
+
+	/* used to order data wrt metadata */
+	struct btrfs_ordered_inode_tree ordered_tree;
+
+	/* standard acl pointers */
+	struct posix_acl *i_acl;
+	struct posix_acl *i_default_acl;
+
+	/* for keeping track of orphaned inodes */
+	struct list_head i_orphan;
+
+	/* list of all the delalloc inodes in the FS.  There are times we need
+	 * to write all the delalloc pages to disk, and this list is used
+	 * to walk them all.
+	 */
+	struct list_head delalloc_inodes;
+
+	/* full 64 bit generation number, struct vfs_inode doesn't have a big
+	 * enough field for this.
+	 */
+	u64 generation;
+
+	/* sequence number for NFS changes */
+	u64 sequence;
+
+	/*
+	 * transid of the trans_handle that last modified this inode
+	 */
+	u64 last_trans;
+	/*
+	 * transid that last logged this inode
+	 */
+	u64 logged_trans;
+
+	/*
+	 * trans that last made a change that should be fully fsync'd.  This
+	 * gets reset to zero each time the inode is logged
+	 */
+	u64 log_dirty_trans;
+
+	/* total number of bytes pending delalloc, used by stat to calc the
+	 * real block usage of the file
+	 */
+	u64 delalloc_bytes;
+
+	/*
+	 * the size of the file stored in the metadata on disk.  data=ordered
+	 * means the in-memory i_size might be larger than the size on disk
+	 * because not all the blocks are written yet.
+	 */
+	u64 disk_i_size;
+
+	/* flags field from the on disk inode */
+	u32 flags;
+
+	/*
+	 * if this is a directory then index_cnt is the counter for the index
+	 * number for new files that are created
+	 */
+	u64 index_cnt;
+
+	/* the start of block group preferred for allocations. */
+	u64 block_group;
+
+	struct inode vfs_inode;
+};
+
+static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
+{
+	return container_of(inode, struct btrfs_inode, vfs_inode);
+}
+
+static inline void btrfs_i_size_write(struct inode *inode, u64 size)
+{
+	inode->i_size = size;
+	BTRFS_I(inode)->disk_i_size = size;
+}
+
+
+#endif
diff --git a/fs/btrfs/compat.h b/fs/btrfs/compat.h
new file mode 100644
index 0000000..7c4503e
--- /dev/null
+++ b/fs/btrfs/compat.h
@@ -0,0 +1,7 @@
+#ifndef _COMPAT_H_
+#define _COMPAT_H_
+
+#define btrfs_drop_nlink(inode) drop_nlink(inode)
+#define btrfs_inc_nlink(inode)	inc_nlink(inode)
+
+#endif /* _COMPAT_H_ */
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
new file mode 100644
index 0000000..ee848d8
--- /dev/null
+++ b/fs/btrfs/compression.c
@@ -0,0 +1,709 @@
+/*
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/buffer_head.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/backing-dev.h>
+#include <linux/mpage.h>
+#include <linux/swap.h>
+#include <linux/writeback.h>
+#include <linux/bit_spinlock.h>
+#include <linux/version.h>
+#include <linux/pagevec.h>
+#include "compat.h"
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "btrfs_inode.h"
+#include "volumes.h"
+#include "ordered-data.h"
+#include "compression.h"
+#include "extent_io.h"
+#include "extent_map.h"
+
+struct compressed_bio {
+	/* number of bios pending for this compressed extent */
+	atomic_t pending_bios;
+
+	/* the pages with the compressed data on them */
+	struct page **compressed_pages;
+
+	/* inode that owns this data */
+	struct inode *inode;
+
+	/* starting offset in the inode for our pages */
+	u64 start;
+
+	/* number of bytes in the inode we're working on */
+	unsigned long len;
+
+	/* number of bytes on disk */
+	unsigned long compressed_len;
+
+	/* number of compressed pages in the array */
+	unsigned long nr_pages;
+
+	/* IO errors */
+	int errors;
+	int mirror_num;
+
+	/* for reads, this is the bio we are copying the data into */
+	struct bio *orig_bio;
+
+	/*
+	 * the start of a variable length array of checksums only
+	 * used by reads
+	 */
+	u32 sums;
+};
+
+static inline int compressed_bio_size(struct btrfs_root *root,
+				      unsigned long disk_size)
+{
+	u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy);
+	return sizeof(struct compressed_bio) +
+		((disk_size + root->sectorsize - 1) / root->sectorsize) *
+		csum_size;
+}
+
+static struct bio *compressed_bio_alloc(struct block_device *bdev,
+					u64 first_byte, gfp_t gfp_flags)
+{
+	struct bio *bio;
+	int nr_vecs;
+
+	nr_vecs = bio_get_nr_vecs(bdev);
+	bio = bio_alloc(gfp_flags, nr_vecs);
+
+	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
+		while (!bio && (nr_vecs /= 2))
+			bio = bio_alloc(gfp_flags, nr_vecs);
+	}
+
+	if (bio) {
+		bio->bi_size = 0;
+		bio->bi_bdev = bdev;
+		bio->bi_sector = first_byte >> 9;
+	}
+	return bio;
+}
+
+static int check_compressed_csum(struct inode *inode,
+				 struct compressed_bio *cb,
+				 u64 disk_start)
+{
+	int ret;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct page *page;
+	unsigned long i;
+	char *kaddr;
+	u32 csum;
+	u32 *cb_sum = &cb->sums;
+
+	if (btrfs_test_flag(inode, NODATASUM))
+		return 0;
+
+	for (i = 0; i < cb->nr_pages; i++) {
+		page = cb->compressed_pages[i];
+		csum = ~(u32)0;
+
+		kaddr = kmap_atomic(page, KM_USER0);
+		csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE);
+		btrfs_csum_final(csum, (char *)&csum);
+		kunmap_atomic(kaddr, KM_USER0);
+
+		if (csum != *cb_sum) {
+			printk(KERN_INFO "btrfs csum failed ino %lu "
+			       "extent %llu csum %u "
+			       "wanted %u mirror %d\n", inode->i_ino,
+			       (unsigned long long)disk_start,
+			       csum, *cb_sum, cb->mirror_num);
+			ret = -EIO;
+			goto fail;
+		}
+		cb_sum++;
+
+	}
+	ret = 0;
+fail:
+	return ret;
+}
+
+/* when we finish reading compressed pages from the disk, we
+ * decompress them and then run the bio end_io routines on the
+ * decompressed pages (in the inode address space).
+ *
+ * This allows the checksumming and other IO error handling routines
+ * to work normally
+ *
+ * The compressed pages are freed here, and it must be run
+ * in process context
+ */
+static void end_compressed_bio_read(struct bio *bio, int err)
+{
+	struct extent_io_tree *tree;
+	struct compressed_bio *cb = bio->bi_private;
+	struct inode *inode;
+	struct page *page;
+	unsigned long index;
+	int ret;
+
+	if (err)
+		cb->errors = 1;
+
+	/* if there are more bios still pending for this compressed
+	 * extent, just exit
+	 */
+	if (!atomic_dec_and_test(&cb->pending_bios))
+		goto out;
+
+	inode = cb->inode;
+	ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9);
+	if (ret)
+		goto csum_failed;
+
+	/* ok, we're the last bio for this extent, lets start
+	 * the decompression.
+	 */
+	tree = &BTRFS_I(inode)->io_tree;
+	ret = btrfs_zlib_decompress_biovec(cb->compressed_pages,
+					cb->start,
+					cb->orig_bio->bi_io_vec,
+					cb->orig_bio->bi_vcnt,
+					cb->compressed_len);
+csum_failed:
+	if (ret)
+		cb->errors = 1;
+
+	/* release the compressed pages */
+	index = 0;
+	for (index = 0; index < cb->nr_pages; index++) {
+		page = cb->compressed_pages[index];
+		page->mapping = NULL;
+		page_cache_release(page);
+	}
+
+	/* do io completion on the original bio */
+	if (cb->errors) {
+		bio_io_error(cb->orig_bio);
+	} else {
+		int bio_index = 0;
+		struct bio_vec *bvec = cb->orig_bio->bi_io_vec;
+
+		/*
+		 * we have verified the checksum already, set page
+		 * checked so the end_io handlers know about it
+		 */
+		while (bio_index < cb->orig_bio->bi_vcnt) {
+			SetPageChecked(bvec->bv_page);
+			bvec++;
+			bio_index++;
+		}
+		bio_endio(cb->orig_bio, 0);
+	}
+
+	/* finally free the cb struct */
+	kfree(cb->compressed_pages);
+	kfree(cb);
+out:
+	bio_put(bio);
+}
+
+/*
+ * Clear the writeback bits on all of the file
+ * pages for a compressed write
+ */
+static noinline int end_compressed_writeback(struct inode *inode, u64 start,
+					     unsigned long ram_size)
+{
+	unsigned long index = start >> PAGE_CACHE_SHIFT;
+	unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
+	struct page *pages[16];
+	unsigned long nr_pages = end_index - index + 1;
+	int i;
+	int ret;
+
+	while (nr_pages > 0) {
+		ret = find_get_pages_contig(inode->i_mapping, index,
+				     min_t(unsigned long,
+				     nr_pages, ARRAY_SIZE(pages)), pages);
+		if (ret == 0) {
+			nr_pages -= 1;
+			index += 1;
+			continue;
+		}
+		for (i = 0; i < ret; i++) {
+			end_page_writeback(pages[i]);
+			page_cache_release(pages[i]);
+		}
+		nr_pages -= ret;
+		index += ret;
+	}
+	/* the inode may be gone now */
+	return 0;
+}
+
+/*
+ * do the cleanup once all the compressed pages hit the disk.
+ * This will clear writeback on the file pages and free the compressed
+ * pages.
+ *
+ * This also calls the writeback end hooks for the file pages so that
+ * metadata and checksums can be updated in the file.
+ */
+static void end_compressed_bio_write(struct bio *bio, int err)
+{
+	struct extent_io_tree *tree;
+	struct compressed_bio *cb = bio->bi_private;
+	struct inode *inode;
+	struct page *page;
+	unsigned long index;
+
+	if (err)
+		cb->errors = 1;
+
+	/* if there are more bios still pending for this compressed
+	 * extent, just exit
+	 */
+	if (!atomic_dec_and_test(&cb->pending_bios))
+		goto out;
+
+	/* ok, we're the last bio for this extent, step one is to
+	 * call back into the FS and do all the end_io operations
+	 */
+	inode = cb->inode;
+	tree = &BTRFS_I(inode)->io_tree;
+	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
+	tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
+					 cb->start,
+					 cb->start + cb->len - 1,
+					 NULL, 1);
+	cb->compressed_pages[0]->mapping = NULL;
+
+	end_compressed_writeback(inode, cb->start, cb->len);
+	/* note, our inode could be gone now */
+
+	/*
+	 * release the compressed pages, these came from alloc_page and
+	 * are not attached to the inode at all
+	 */
+	index = 0;
+	for (index = 0; index < cb->nr_pages; index++) {
+		page = cb->compressed_pages[index];
+		page->mapping = NULL;
+		page_cache_release(page);
+	}
+
+	/* finally free the cb struct */
+	kfree(cb->compressed_pages);
+	kfree(cb);
+out:
+	bio_put(bio);
+}
+
+/*
+ * worker function to build and submit bios for previously compressed pages.
+ * The corresponding pages in the inode should be marked for writeback
+ * and the compressed pages should have a reference on them for dropping
+ * when the IO is complete.
+ *
+ * This also checksums the file bytes and gets things ready for
+ * the end io hooks.
+ */
+int btrfs_submit_compressed_write(struct inode *inode, u64 start,
+				 unsigned long len, u64 disk_start,
+				 unsigned long compressed_len,
+				 struct page **compressed_pages,
+				 unsigned long nr_pages)
+{
+	struct bio *bio = NULL;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct compressed_bio *cb;
+	unsigned long bytes_left;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	int page_index = 0;
+	struct page *page;
+	u64 first_byte = disk_start;
+	struct block_device *bdev;
+	int ret;
+
+	WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
+	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+	atomic_set(&cb->pending_bios, 0);
+	cb->errors = 0;
+	cb->inode = inode;
+	cb->start = start;
+	cb->len = len;
+	cb->mirror_num = 0;
+	cb->compressed_pages = compressed_pages;
+	cb->compressed_len = compressed_len;
+	cb->orig_bio = NULL;
+	cb->nr_pages = nr_pages;
+
+	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+
+	bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
+	bio->bi_private = cb;
+	bio->bi_end_io = end_compressed_bio_write;
+	atomic_inc(&cb->pending_bios);
+
+	/* create and submit bios for the compressed pages */
+	bytes_left = compressed_len;
+	for (page_index = 0; page_index < cb->nr_pages; page_index++) {
+		page = compressed_pages[page_index];
+		page->mapping = inode->i_mapping;
+		if (bio->bi_size)
+			ret = io_tree->ops->merge_bio_hook(page, 0,
+							   PAGE_CACHE_SIZE,
+							   bio, 0);
+		else
+			ret = 0;
+
+		page->mapping = NULL;
+		if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) <
+		    PAGE_CACHE_SIZE) {
+			bio_get(bio);
+
+			/*
+			 * inc the count before we submit the bio so
+			 * we know the end IO handler won't happen before
+			 * we inc the count.  Otherwise, the cb might get
+			 * freed before we're done setting it up
+			 */
+			atomic_inc(&cb->pending_bios);
+			ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+			BUG_ON(ret);
+
+			ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
+			BUG_ON(ret);
+
+			ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
+			BUG_ON(ret);
+
+			bio_put(bio);
+
+			bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
+			bio->bi_private = cb;
+			bio->bi_end_io = end_compressed_bio_write;
+			bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
+		}
+		if (bytes_left < PAGE_CACHE_SIZE) {
+			printk("bytes left %lu compress len %lu nr %lu\n",
+			       bytes_left, cb->compressed_len, cb->nr_pages);
+		}
+		bytes_left -= PAGE_CACHE_SIZE;
+		first_byte += PAGE_CACHE_SIZE;
+		cond_resched();
+	}
+	bio_get(bio);
+
+	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+	BUG_ON(ret);
+
+	ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
+	BUG_ON(ret);
+
+	ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
+	BUG_ON(ret);
+
+	bio_put(bio);
+	return 0;
+}
+
+static noinline int add_ra_bio_pages(struct inode *inode,
+				     u64 compressed_end,
+				     struct compressed_bio *cb)
+{
+	unsigned long end_index;
+	unsigned long page_index;
+	u64 last_offset;
+	u64 isize = i_size_read(inode);
+	int ret;
+	struct page *page;
+	unsigned long nr_pages = 0;
+	struct extent_map *em;
+	struct address_space *mapping = inode->i_mapping;
+	struct pagevec pvec;
+	struct extent_map_tree *em_tree;
+	struct extent_io_tree *tree;
+	u64 end;
+	int misses = 0;
+
+	page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
+	last_offset = (page_offset(page) + PAGE_CACHE_SIZE);
+	em_tree = &BTRFS_I(inode)->extent_tree;
+	tree = &BTRFS_I(inode)->io_tree;
+
+	if (isize == 0)
+		return 0;
+
+	end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+
+	pagevec_init(&pvec, 0);
+	while (last_offset < compressed_end) {
+		page_index = last_offset >> PAGE_CACHE_SHIFT;
+
+		if (page_index > end_index)
+			break;
+
+		rcu_read_lock();
+		page = radix_tree_lookup(&mapping->page_tree, page_index);
+		rcu_read_unlock();
+		if (page) {
+			misses++;
+			if (misses > 4)
+				break;
+			goto next;
+		}
+
+		page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS);
+		if (!page)
+			break;
+
+		page->index = page_index;
+		/*
+		 * what we want to do here is call add_to_page_cache_lru,
+		 * but that isn't exported, so we reproduce it here
+		 */
+		if (add_to_page_cache(page, mapping,
+				      page->index, GFP_NOFS)) {
+			page_cache_release(page);
+			goto next;
+		}
+
+		/* open coding of lru_cache_add, also not exported */
+		page_cache_get(page);
+		if (!pagevec_add(&pvec, page))
+			__pagevec_lru_add_file(&pvec);
+
+		end = last_offset + PAGE_CACHE_SIZE - 1;
+		/*
+		 * at this point, we have a locked page in the page cache
+		 * for these bytes in the file.  But, we have to make
+		 * sure they map to this compressed extent on disk.
+		 */
+		set_page_extent_mapped(page);
+		lock_extent(tree, last_offset, end, GFP_NOFS);
+		spin_lock(&em_tree->lock);
+		em = lookup_extent_mapping(em_tree, last_offset,
+					   PAGE_CACHE_SIZE);
+		spin_unlock(&em_tree->lock);
+
+		if (!em || last_offset < em->start ||
+		    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
+		    (em->block_start >> 9) != cb->orig_bio->bi_sector) {
+			free_extent_map(em);
+			unlock_extent(tree, last_offset, end, GFP_NOFS);
+			unlock_page(page);
+			page_cache_release(page);
+			break;
+		}
+		free_extent_map(em);
+
+		if (page->index == end_index) {
+			char *userpage;
+			size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1);
+
+			if (zero_offset) {
+				int zeros;
+				zeros = PAGE_CACHE_SIZE - zero_offset;
+				userpage = kmap_atomic(page, KM_USER0);
+				memset(userpage + zero_offset, 0, zeros);
+				flush_dcache_page(page);
+				kunmap_atomic(userpage, KM_USER0);
+			}
+		}
+
+		ret = bio_add_page(cb->orig_bio, page,
+				   PAGE_CACHE_SIZE, 0);
+
+		if (ret == PAGE_CACHE_SIZE) {
+			nr_pages++;
+			page_cache_release(page);
+		} else {
+			unlock_extent(tree, last_offset, end, GFP_NOFS);
+			unlock_page(page);
+			page_cache_release(page);
+			break;
+		}
+next:
+		last_offset += PAGE_CACHE_SIZE;
+	}
+	if (pagevec_count(&pvec))
+		__pagevec_lru_add_file(&pvec);
+	return 0;
+}
+
+/*
+ * for a compressed read, the bio we get passed has all the inode pages
+ * in it.  We don't actually do IO on those pages but allocate new ones
+ * to hold the compressed pages on disk.
+ *
+ * bio->bi_sector points to the compressed extent on disk
+ * bio->bi_io_vec points to all of the inode pages
+ * bio->bi_vcnt is a count of pages
+ *
+ * After the compressed pages are read, we copy the bytes into the
+ * bio we were passed and then call the bio end_io calls
+ */
+int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
+				 int mirror_num, unsigned long bio_flags)
+{
+	struct extent_io_tree *tree;
+	struct extent_map_tree *em_tree;
+	struct compressed_bio *cb;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
+	unsigned long compressed_len;
+	unsigned long nr_pages;
+	unsigned long page_index;
+	struct page *page;
+	struct block_device *bdev;
+	struct bio *comp_bio;
+	u64 cur_disk_byte = (u64)bio->bi_sector << 9;
+	u64 em_len;
+	u64 em_start;
+	struct extent_map *em;
+	int ret;
+	u32 *sums;
+
+	tree = &BTRFS_I(inode)->io_tree;
+	em_tree = &BTRFS_I(inode)->extent_tree;
+
+	/* we need the actual starting offset of this extent in the file */
+	spin_lock(&em_tree->lock);
+	em = lookup_extent_mapping(em_tree,
+				   page_offset(bio->bi_io_vec->bv_page),
+				   PAGE_CACHE_SIZE);
+	spin_unlock(&em_tree->lock);
+
+	compressed_len = em->block_len;
+	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+	atomic_set(&cb->pending_bios, 0);
+	cb->errors = 0;
+	cb->inode = inode;
+	cb->mirror_num = mirror_num;
+	sums = &cb->sums;
+
+	cb->start = em->orig_start;
+	em_len = em->len;
+	em_start = em->start;
+
+	free_extent_map(em);
+	em = NULL;
+
+	cb->len = uncompressed_len;
+	cb->compressed_len = compressed_len;
+	cb->orig_bio = bio;
+
+	nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
+				 PAGE_CACHE_SIZE;
+	cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages,
+				       GFP_NOFS);
+	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+
+	for (page_index = 0; page_index < nr_pages; page_index++) {
+		cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
+							      __GFP_HIGHMEM);
+	}
+	cb->nr_pages = nr_pages;
+
+	add_ra_bio_pages(inode, em_start + em_len, cb);
+
+	/* include any pages we added in add_ra-bio_pages */
+	uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
+	cb->len = uncompressed_len;
+
+	comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
+	comp_bio->bi_private = cb;
+	comp_bio->bi_end_io = end_compressed_bio_read;
+	atomic_inc(&cb->pending_bios);
+
+	for (page_index = 0; page_index < nr_pages; page_index++) {
+		page = cb->compressed_pages[page_index];
+		page->mapping = inode->i_mapping;
+		page->index = em_start >> PAGE_CACHE_SHIFT;
+
+		if (comp_bio->bi_size)
+			ret = tree->ops->merge_bio_hook(page, 0,
+							PAGE_CACHE_SIZE,
+							comp_bio, 0);
+		else
+			ret = 0;
+
+		page->mapping = NULL;
+		if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) <
+		    PAGE_CACHE_SIZE) {
+			bio_get(comp_bio);
+
+			ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
+			BUG_ON(ret);
+
+			/*
+			 * inc the count before we submit the bio so
+			 * we know the end IO handler won't happen before
+			 * we inc the count.  Otherwise, the cb might get
+			 * freed before we're done setting it up
+			 */
+			atomic_inc(&cb->pending_bios);
+
+			if (!btrfs_test_flag(inode, NODATASUM)) {
+				btrfs_lookup_bio_sums(root, inode, comp_bio,
+						      sums);
+			}
+			sums += (comp_bio->bi_size + root->sectorsize - 1) /
+				root->sectorsize;
+
+			ret = btrfs_map_bio(root, READ, comp_bio,
+					    mirror_num, 0);
+			BUG_ON(ret);
+
+			bio_put(comp_bio);
+
+			comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
+							GFP_NOFS);
+			comp_bio->bi_private = cb;
+			comp_bio->bi_end_io = end_compressed_bio_read;
+
+			bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0);
+		}
+		cur_disk_byte += PAGE_CACHE_SIZE;
+	}
+	bio_get(comp_bio);
+
+	ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
+	BUG_ON(ret);
+
+	if (!btrfs_test_flag(inode, NODATASUM))
+		btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
+
+	ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
+	BUG_ON(ret);
+
+	bio_put(comp_bio);
+	return 0;
+}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
new file mode 100644
index 0000000..421f5b4
--- /dev/null
+++ b/fs/btrfs/compression.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_COMPRESSION_
+#define __BTRFS_COMPRESSION_
+
+int btrfs_zlib_decompress(unsigned char *data_in,
+			  struct page *dest_page,
+			  unsigned long start_byte,
+			  size_t srclen, size_t destlen);
+int btrfs_zlib_compress_pages(struct address_space *mapping,
+			      u64 start, unsigned long len,
+			      struct page **pages,
+			      unsigned long nr_dest_pages,
+			      unsigned long *out_pages,
+			      unsigned long *total_in,
+			      unsigned long *total_out,
+			      unsigned long max_out);
+int btrfs_zlib_decompress_biovec(struct page **pages_in,
+			      u64 disk_start,
+			      struct bio_vec *bvec,
+			      int vcnt,
+			      size_t srclen);
+void btrfs_zlib_exit(void);
+int btrfs_submit_compressed_write(struct inode *inode, u64 start,
+				  unsigned long len, u64 disk_start,
+				  unsigned long compressed_len,
+				  struct page **compressed_pages,
+				  unsigned long nr_pages);
+int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
+				 int mirror_num, unsigned long bio_flags);
+#endif
diff --git a/fs/btrfs/crc32c.h b/fs/btrfs/crc32c.h
new file mode 100644
index 0000000..6e1b3de
--- /dev/null
+++ b/fs/btrfs/crc32c.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_CRC32C__
+#define __BTRFS_CRC32C__
+#include <linux/crc32c.h>
+
+/*
+ * this file used to do more for selecting the HW version of crc32c,
+ * perhaps it will one day again soon.
+ */
+#define btrfs_crc32c(seed, data, length) crc32c(seed, data, length)
+#endif
+
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
new file mode 100644
index 0000000..9e46c07
--- /dev/null
+++ b/fs/btrfs/ctree.c
@@ -0,0 +1,3953 @@
+/*
+ * Copyright (C) 2007,2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "print-tree.h"
+#include "locking.h"
+
+static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_path *path, int level);
+static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_key *ins_key,
+		      struct btrfs_path *path, int data_size, int extend);
+static int push_node_left(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, struct extent_buffer *dst,
+			  struct extent_buffer *src, int empty);
+static int balance_node_right(struct btrfs_trans_handle *trans,
+			      struct btrfs_root *root,
+			      struct extent_buffer *dst_buf,
+			      struct extent_buffer *src_buf);
+static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		   struct btrfs_path *path, int level, int slot);
+
+inline void btrfs_init_path(struct btrfs_path *p)
+{
+	memset(p, 0, sizeof(*p));
+}
+
+struct btrfs_path *btrfs_alloc_path(void)
+{
+	struct btrfs_path *path;
+	path = kmem_cache_alloc(btrfs_path_cachep, GFP_NOFS);
+	if (path) {
+		btrfs_init_path(path);
+		path->reada = 1;
+	}
+	return path;
+}
+
+/* this also releases the path */
+void btrfs_free_path(struct btrfs_path *p)
+{
+	btrfs_release_path(NULL, p);
+	kmem_cache_free(btrfs_path_cachep, p);
+}
+
+/*
+ * path release drops references on the extent buffers in the path
+ * and it drops any locks held by this path
+ *
+ * It is safe to call this on paths that no locks or extent buffers held.
+ */
+noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
+{
+	int i;
+
+	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
+		p->slots[i] = 0;
+		if (!p->nodes[i])
+			continue;
+		if (p->locks[i]) {
+			btrfs_tree_unlock(p->nodes[i]);
+			p->locks[i] = 0;
+		}
+		free_extent_buffer(p->nodes[i]);
+		p->nodes[i] = NULL;
+	}
+}
+
+/*
+ * safely gets a reference on the root node of a tree.  A lock
+ * is not taken, so a concurrent writer may put a different node
+ * at the root of the tree.  See btrfs_lock_root_node for the
+ * looping required.
+ *
+ * The extent buffer returned by this has a reference taken, so
+ * it won't disappear.  It may stop being the root of the tree
+ * at any time because there are no locks held.
+ */
+struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
+{
+	struct extent_buffer *eb;
+	spin_lock(&root->node_lock);
+	eb = root->node;
+	extent_buffer_get(eb);
+	spin_unlock(&root->node_lock);
+	return eb;
+}
+
+/* loop around taking references on and locking the root node of the
+ * tree until you end up with a lock on the root.  A locked buffer
+ * is returned, with a reference held.
+ */
+struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
+{
+	struct extent_buffer *eb;
+
+	while (1) {
+		eb = btrfs_root_node(root);
+		btrfs_tree_lock(eb);
+
+		spin_lock(&root->node_lock);
+		if (eb == root->node) {
+			spin_unlock(&root->node_lock);
+			break;
+		}
+		spin_unlock(&root->node_lock);
+
+		btrfs_tree_unlock(eb);
+		free_extent_buffer(eb);
+	}
+	return eb;
+}
+
+/* cowonly root (everything not a reference counted cow subvolume), just get
+ * put onto a simple dirty list.  transaction.c walks this to make sure they
+ * get properly updated on disk.
+ */
+static void add_root_to_dirty_list(struct btrfs_root *root)
+{
+	if (root->track_dirty && list_empty(&root->dirty_list)) {
+		list_add(&root->dirty_list,
+			 &root->fs_info->dirty_cowonly_roots);
+	}
+}
+
+/*
+ * used by snapshot creation to make a copy of a root for a tree with
+ * a given objectid.  The buffer with the new root node is returned in
+ * cow_ret, and this func returns zero on success or a negative error code.
+ */
+int btrfs_copy_root(struct btrfs_trans_handle *trans,
+		      struct btrfs_root *root,
+		      struct extent_buffer *buf,
+		      struct extent_buffer **cow_ret, u64 new_root_objectid)
+{
+	struct extent_buffer *cow;
+	u32 nritems;
+	int ret = 0;
+	int level;
+	struct btrfs_root *new_root;
+
+	new_root = kmalloc(sizeof(*new_root), GFP_NOFS);
+	if (!new_root)
+		return -ENOMEM;
+
+	memcpy(new_root, root, sizeof(*new_root));
+	new_root->root_key.objectid = new_root_objectid;
+
+	WARN_ON(root->ref_cows && trans->transid !=
+		root->fs_info->running_transaction->transid);
+	WARN_ON(root->ref_cows && trans->transid != root->last_trans);
+
+	level = btrfs_header_level(buf);
+	nritems = btrfs_header_nritems(buf);
+
+	cow = btrfs_alloc_free_block(trans, new_root, buf->len, 0,
+				     new_root_objectid, trans->transid,
+				     level, buf->start, 0);
+	if (IS_ERR(cow)) {
+		kfree(new_root);
+		return PTR_ERR(cow);
+	}
+
+	copy_extent_buffer(cow, buf, 0, 0, cow->len);
+	btrfs_set_header_bytenr(cow, cow->start);
+	btrfs_set_header_generation(cow, trans->transid);
+	btrfs_set_header_owner(cow, new_root_objectid);
+	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN);
+
+	write_extent_buffer(cow, root->fs_info->fsid,
+			    (unsigned long)btrfs_header_fsid(cow),
+			    BTRFS_FSID_SIZE);
+
+	WARN_ON(btrfs_header_generation(buf) > trans->transid);
+	ret = btrfs_inc_ref(trans, new_root, buf, cow, NULL);
+	kfree(new_root);
+
+	if (ret)
+		return ret;
+
+	btrfs_mark_buffer_dirty(cow);
+	*cow_ret = cow;
+	return 0;
+}
+
+/*
+ * does the dirty work in cow of a single block.  The parent block (if
+ * supplied) is updated to point to the new cow copy.  The new buffer is marked
+ * dirty and returned locked.  If you modify the block it needs to be marked
+ * dirty again.
+ *
+ * search_start -- an allocation hint for the new block
+ *
+ * empty_size -- a hint that you plan on doing more cow.  This is the size in
+ * bytes the allocator should try to find free next to the block it returns.
+ * This is just a hint and may be ignored by the allocator.
+ *
+ * prealloc_dest -- if you have already reserved a destination for the cow,
+ * this uses that block instead of allocating a new one.
+ * btrfs_alloc_reserved_extent is used to finish the allocation.
+ */
+static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root,
+			     struct extent_buffer *buf,
+			     struct extent_buffer *parent, int parent_slot,
+			     struct extent_buffer **cow_ret,
+			     u64 search_start, u64 empty_size,
+			     u64 prealloc_dest)
+{
+	u64 parent_start;
+	struct extent_buffer *cow;
+	u32 nritems;
+	int ret = 0;
+	int level;
+	int unlock_orig = 0;
+
+	if (*cow_ret == buf)
+		unlock_orig = 1;
+
+	WARN_ON(!btrfs_tree_locked(buf));
+
+	if (parent)
+		parent_start = parent->start;
+	else
+		parent_start = 0;
+
+	WARN_ON(root->ref_cows && trans->transid !=
+		root->fs_info->running_transaction->transid);
+	WARN_ON(root->ref_cows && trans->transid != root->last_trans);
+
+	level = btrfs_header_level(buf);
+	nritems = btrfs_header_nritems(buf);
+
+	if (prealloc_dest) {
+		struct btrfs_key ins;
+
+		ins.objectid = prealloc_dest;
+		ins.offset = buf->len;
+		ins.type = BTRFS_EXTENT_ITEM_KEY;
+
+		ret = btrfs_alloc_reserved_extent(trans, root, parent_start,
+						  root->root_key.objectid,
+						  trans->transid, level, &ins);
+		BUG_ON(ret);
+		cow = btrfs_init_new_buffer(trans, root, prealloc_dest,
+					    buf->len);
+	} else {
+		cow = btrfs_alloc_free_block(trans, root, buf->len,
+					     parent_start,
+					     root->root_key.objectid,
+					     trans->transid, level,
+					     search_start, empty_size);
+	}
+	if (IS_ERR(cow))
+		return PTR_ERR(cow);
+
+	copy_extent_buffer(cow, buf, 0, 0, cow->len);
+	btrfs_set_header_bytenr(cow, cow->start);
+	btrfs_set_header_generation(cow, trans->transid);
+	btrfs_set_header_owner(cow, root->root_key.objectid);
+	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN);
+
+	write_extent_buffer(cow, root->fs_info->fsid,
+			    (unsigned long)btrfs_header_fsid(cow),
+			    BTRFS_FSID_SIZE);
+
+	WARN_ON(btrfs_header_generation(buf) > trans->transid);
+	if (btrfs_header_generation(buf) != trans->transid) {
+		u32 nr_extents;
+		ret = btrfs_inc_ref(trans, root, buf, cow, &nr_extents);
+		if (ret)
+			return ret;
+
+		ret = btrfs_cache_ref(trans, root, buf, nr_extents);
+		WARN_ON(ret);
+	} else if (btrfs_header_owner(buf) == BTRFS_TREE_RELOC_OBJECTID) {
+		/*
+		 * There are only two places that can drop reference to
+		 * tree blocks owned by living reloc trees, one is here,
+		 * the other place is btrfs_drop_subtree. In both places,
+		 * we check reference count while tree block is locked.
+		 * Furthermore, if reference count is one, it won't get
+		 * increased by someone else.
+		 */
+		u32 refs;
+		ret = btrfs_lookup_extent_ref(trans, root, buf->start,
+					      buf->len, &refs);
+		BUG_ON(ret);
+		if (refs == 1) {
+			ret = btrfs_update_ref(trans, root, buf, cow,
+					       0, nritems);
+			clean_tree_block(trans, root, buf);
+		} else {
+			ret = btrfs_inc_ref(trans, root, buf, cow, NULL);
+		}
+		BUG_ON(ret);
+	} else {
+		ret = btrfs_update_ref(trans, root, buf, cow, 0, nritems);
+		if (ret)
+			return ret;
+		clean_tree_block(trans, root, buf);
+	}
+
+	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
+		ret = btrfs_reloc_tree_cache_ref(trans, root, cow, buf->start);
+		WARN_ON(ret);
+	}
+
+	if (buf == root->node) {
+		WARN_ON(parent && parent != buf);
+
+		spin_lock(&root->node_lock);
+		root->node = cow;
+		extent_buffer_get(cow);
+		spin_unlock(&root->node_lock);
+
+		if (buf != root->commit_root) {
+			btrfs_free_extent(trans, root, buf->start,
+					  buf->len, buf->start,
+					  root->root_key.objectid,
+					  btrfs_header_generation(buf),
+					  level, 1);
+		}
+		free_extent_buffer(buf);
+		add_root_to_dirty_list(root);
+	} else {
+		btrfs_set_node_blockptr(parent, parent_slot,
+					cow->start);
+		WARN_ON(trans->transid == 0);
+		btrfs_set_node_ptr_generation(parent, parent_slot,
+					      trans->transid);
+		btrfs_mark_buffer_dirty(parent);
+		WARN_ON(btrfs_header_generation(parent) != trans->transid);
+		btrfs_free_extent(trans, root, buf->start, buf->len,
+				  parent_start, btrfs_header_owner(parent),
+				  btrfs_header_generation(parent), level, 1);
+	}
+	if (unlock_orig)
+		btrfs_tree_unlock(buf);
+	free_extent_buffer(buf);
+	btrfs_mark_buffer_dirty(cow);
+	*cow_ret = cow;
+	return 0;
+}
+
+/*
+ * cows a single block, see __btrfs_cow_block for the real work.
+ * This version of it has extra checks so that a block isn't cow'd more than
+ * once per transaction, as long as it hasn't been written yet
+ */
+noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
+		    struct btrfs_root *root, struct extent_buffer *buf,
+		    struct extent_buffer *parent, int parent_slot,
+		    struct extent_buffer **cow_ret, u64 prealloc_dest)
+{
+	u64 search_start;
+	int ret;
+
+	if (trans->transaction != root->fs_info->running_transaction) {
+		printk(KERN_CRIT "trans %llu running %llu\n",
+		       (unsigned long long)trans->transid,
+		       (unsigned long long)
+		       root->fs_info->running_transaction->transid);
+		WARN_ON(1);
+	}
+	if (trans->transid != root->fs_info->generation) {
+		printk(KERN_CRIT "trans %llu running %llu\n",
+		       (unsigned long long)trans->transid,
+		       (unsigned long long)root->fs_info->generation);
+		WARN_ON(1);
+	}
+
+	spin_lock(&root->fs_info->hash_lock);
+	if (btrfs_header_generation(buf) == trans->transid &&
+	    btrfs_header_owner(buf) == root->root_key.objectid &&
+	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
+		*cow_ret = buf;
+		spin_unlock(&root->fs_info->hash_lock);
+		WARN_ON(prealloc_dest);
+		return 0;
+	}
+	spin_unlock(&root->fs_info->hash_lock);
+	search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
+	ret = __btrfs_cow_block(trans, root, buf, parent,
+				 parent_slot, cow_ret, search_start, 0,
+				 prealloc_dest);
+	return ret;
+}
+
+/*
+ * helper function for defrag to decide if two blocks pointed to by a
+ * node are actually close by
+ */
+static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
+{
+	if (blocknr < other && other - (blocknr + blocksize) < 32768)
+		return 1;
+	if (blocknr > other && blocknr - (other + blocksize) < 32768)
+		return 1;
+	return 0;
+}
+
+/*
+ * compare two keys in a memcmp fashion
+ */
+static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
+{
+	struct btrfs_key k1;
+
+	btrfs_disk_key_to_cpu(&k1, disk);
+
+	if (k1.objectid > k2->objectid)
+		return 1;
+	if (k1.objectid < k2->objectid)
+		return -1;
+	if (k1.type > k2->type)
+		return 1;
+	if (k1.type < k2->type)
+		return -1;
+	if (k1.offset > k2->offset)
+		return 1;
+	if (k1.offset < k2->offset)
+		return -1;
+	return 0;
+}
+
+/*
+ * same as comp_keys only with two btrfs_key's
+ */
+static int comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
+{
+	if (k1->objectid > k2->objectid)
+		return 1;
+	if (k1->objectid < k2->objectid)
+		return -1;
+	if (k1->type > k2->type)
+		return 1;
+	if (k1->type < k2->type)
+		return -1;
+	if (k1->offset > k2->offset)
+		return 1;
+	if (k1->offset < k2->offset)
+		return -1;
+	return 0;
+}
+
+/*
+ * this is used by the defrag code to go through all the
+ * leaves pointed to by a node and reallocate them so that
+ * disk order is close to key order
+ */
+int btrfs_realloc_node(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *root, struct extent_buffer *parent,
+		       int start_slot, int cache_only, u64 *last_ret,
+		       struct btrfs_key *progress)
+{
+	struct extent_buffer *cur;
+	u64 blocknr;
+	u64 gen;
+	u64 search_start = *last_ret;
+	u64 last_block = 0;
+	u64 other;
+	u32 parent_nritems;
+	int end_slot;
+	int i;
+	int err = 0;
+	int parent_level;
+	int uptodate;
+	u32 blocksize;
+	int progress_passed = 0;
+	struct btrfs_disk_key disk_key;
+
+	parent_level = btrfs_header_level(parent);
+	if (cache_only && parent_level != 1)
+		return 0;
+
+	if (trans->transaction != root->fs_info->running_transaction)
+		WARN_ON(1);
+	if (trans->transid != root->fs_info->generation)
+		WARN_ON(1);
+
+	parent_nritems = btrfs_header_nritems(parent);
+	blocksize = btrfs_level_size(root, parent_level - 1);
+	end_slot = parent_nritems;
+
+	if (parent_nritems == 1)
+		return 0;
+
+	for (i = start_slot; i < end_slot; i++) {
+		int close = 1;
+
+		if (!parent->map_token) {
+			map_extent_buffer(parent,
+					btrfs_node_key_ptr_offset(i),
+					sizeof(struct btrfs_key_ptr),
+					&parent->map_token, &parent->kaddr,
+					&parent->map_start, &parent->map_len,
+					KM_USER1);
+		}
+		btrfs_node_key(parent, &disk_key, i);
+		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
+			continue;
+
+		progress_passed = 1;
+		blocknr = btrfs_node_blockptr(parent, i);
+		gen = btrfs_node_ptr_generation(parent, i);
+		if (last_block == 0)
+			last_block = blocknr;
+
+		if (i > 0) {
+			other = btrfs_node_blockptr(parent, i - 1);
+			close = close_blocks(blocknr, other, blocksize);
+		}
+		if (!close && i < end_slot - 2) {
+			other = btrfs_node_blockptr(parent, i + 1);
+			close = close_blocks(blocknr, other, blocksize);
+		}
+		if (close) {
+			last_block = blocknr;
+			continue;
+		}
+		if (parent->map_token) {
+			unmap_extent_buffer(parent, parent->map_token,
+					    KM_USER1);
+			parent->map_token = NULL;
+		}
+
+		cur = btrfs_find_tree_block(root, blocknr, blocksize);
+		if (cur)
+			uptodate = btrfs_buffer_uptodate(cur, gen);
+		else
+			uptodate = 0;
+		if (!cur || !uptodate) {
+			if (cache_only) {
+				free_extent_buffer(cur);
+				continue;
+			}
+			if (!cur) {
+				cur = read_tree_block(root, blocknr,
+							 blocksize, gen);
+			} else if (!uptodate) {
+				btrfs_read_buffer(cur, gen);
+			}
+		}
+		if (search_start == 0)
+			search_start = last_block;
+
+		btrfs_tree_lock(cur);
+		err = __btrfs_cow_block(trans, root, cur, parent, i,
+					&cur, search_start,
+					min(16 * blocksize,
+					    (end_slot - i) * blocksize), 0);
+		if (err) {
+			btrfs_tree_unlock(cur);
+			free_extent_buffer(cur);
+			break;
+		}
+		search_start = cur->start;
+		last_block = cur->start;
+		*last_ret = search_start;
+		btrfs_tree_unlock(cur);
+		free_extent_buffer(cur);
+	}
+	if (parent->map_token) {
+		unmap_extent_buffer(parent, parent->map_token,
+				    KM_USER1);
+		parent->map_token = NULL;
+	}
+	return err;
+}
+
+/*
+ * The leaf data grows from end-to-front in the node.
+ * this returns the address of the start of the last item,
+ * which is the stop of the leaf data stack
+ */
+static inline unsigned int leaf_data_end(struct btrfs_root *root,
+					 struct extent_buffer *leaf)
+{
+	u32 nr = btrfs_header_nritems(leaf);
+	if (nr == 0)
+		return BTRFS_LEAF_DATA_SIZE(root);
+	return btrfs_item_offset_nr(leaf, nr - 1);
+}
+
+/*
+ * extra debugging checks to make sure all the items in a key are
+ * well formed and in the proper order
+ */
+static int check_node(struct btrfs_root *root, struct btrfs_path *path,
+		      int level)
+{
+	struct extent_buffer *parent = NULL;
+	struct extent_buffer *node = path->nodes[level];
+	struct btrfs_disk_key parent_key;
+	struct btrfs_disk_key node_key;
+	int parent_slot;
+	int slot;
+	struct btrfs_key cpukey;
+	u32 nritems = btrfs_header_nritems(node);
+
+	if (path->nodes[level + 1])
+		parent = path->nodes[level + 1];
+
+	slot = path->slots[level];
+	BUG_ON(nritems == 0);
+	if (parent) {
+		parent_slot = path->slots[level + 1];
+		btrfs_node_key(parent, &parent_key, parent_slot);
+		btrfs_node_key(node, &node_key, 0);
+		BUG_ON(memcmp(&parent_key, &node_key,
+			      sizeof(struct btrfs_disk_key)));
+		BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
+		       btrfs_header_bytenr(node));
+	}
+	BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
+	if (slot != 0) {
+		btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
+		btrfs_node_key(node, &node_key, slot);
+		BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
+	}
+	if (slot < nritems - 1) {
+		btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
+		btrfs_node_key(node, &node_key, slot);
+		BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
+	}
+	return 0;
+}
+
+/*
+ * extra checking to make sure all the items in a leaf are
+ * well formed and in the proper order
+ */
+static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
+		      int level)
+{
+	struct extent_buffer *leaf = path->nodes[level];
+	struct extent_buffer *parent = NULL;
+	int parent_slot;
+	struct btrfs_key cpukey;
+	struct btrfs_disk_key parent_key;
+	struct btrfs_disk_key leaf_key;
+	int slot = path->slots[0];
+
+	u32 nritems = btrfs_header_nritems(leaf);
+
+	if (path->nodes[level + 1])
+		parent = path->nodes[level + 1];
+
+	if (nritems == 0)
+		return 0;
+
+	if (parent) {
+		parent_slot = path->slots[level + 1];
+		btrfs_node_key(parent, &parent_key, parent_slot);
+		btrfs_item_key(leaf, &leaf_key, 0);
+
+		BUG_ON(memcmp(&parent_key, &leaf_key,
+		       sizeof(struct btrfs_disk_key)));
+		BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
+		       btrfs_header_bytenr(leaf));
+	}
+	if (slot != 0 && slot < nritems - 1) {
+		btrfs_item_key(leaf, &leaf_key, slot);
+		btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
+		if (comp_keys(&leaf_key, &cpukey) <= 0) {
+			btrfs_print_leaf(root, leaf);
+			printk(KERN_CRIT "slot %d offset bad key\n", slot);
+			BUG_ON(1);
+		}
+		if (btrfs_item_offset_nr(leaf, slot - 1) !=
+		       btrfs_item_end_nr(leaf, slot)) {
+			btrfs_print_leaf(root, leaf);
+			printk(KERN_CRIT "slot %d offset bad\n", slot);
+			BUG_ON(1);
+		}
+	}
+	if (slot < nritems - 1) {
+		btrfs_item_key(leaf, &leaf_key, slot);
+		btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
+		BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
+		if (btrfs_item_offset_nr(leaf, slot) !=
+			btrfs_item_end_nr(leaf, slot + 1)) {
+			btrfs_print_leaf(root, leaf);
+			printk(KERN_CRIT "slot %d offset bad\n", slot);
+			BUG_ON(1);
+		}
+	}
+	BUG_ON(btrfs_item_offset_nr(leaf, 0) +
+	       btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
+	return 0;
+}
+
+static noinline int check_block(struct btrfs_root *root,
+				struct btrfs_path *path, int level)
+{
+	return 0;
+	if (level == 0)
+		return check_leaf(root, path, level);
+	return check_node(root, path, level);
+}
+
+/*
+ * search for key in the extent_buffer.  The items start at offset p,
+ * and they are item_size apart.  There are 'max' items in p.
+ *
+ * the slot in the array is returned via slot, and it points to
+ * the place where you would insert key if it is not found in
+ * the array.
+ *
+ * slot may point to max if the key is bigger than all of the keys
+ */
+static noinline int generic_bin_search(struct extent_buffer *eb,
+				       unsigned long p,
+				       int item_size, struct btrfs_key *key,
+				       int max, int *slot)
+{
+	int low = 0;
+	int high = max;
+	int mid;
+	int ret;
+	struct btrfs_disk_key *tmp = NULL;
+	struct btrfs_disk_key unaligned;
+	unsigned long offset;
+	char *map_token = NULL;
+	char *kaddr = NULL;
+	unsigned long map_start = 0;
+	unsigned long map_len = 0;
+	int err;
+
+	while (low < high) {
+		mid = (low + high) / 2;
+		offset = p + mid * item_size;
+
+		if (!map_token || offset < map_start ||
+		    (offset + sizeof(struct btrfs_disk_key)) >
+		    map_start + map_len) {
+			if (map_token) {
+				unmap_extent_buffer(eb, map_token, KM_USER0);
+				map_token = NULL;
+			}
+
+			err = map_private_extent_buffer(eb, offset,
+						sizeof(struct btrfs_disk_key),
+						&map_token, &kaddr,
+						&map_start, &map_len, KM_USER0);
+
+			if (!err) {
+				tmp = (struct btrfs_disk_key *)(kaddr + offset -
+							map_start);
+			} else {
+				read_extent_buffer(eb, &unaligned,
+						   offset, sizeof(unaligned));
+				tmp = &unaligned;
+			}
+
+		} else {
+			tmp = (struct btrfs_disk_key *)(kaddr + offset -
+							map_start);
+		}
+		ret = comp_keys(tmp, key);
+
+		if (ret < 0)
+			low = mid + 1;
+		else if (ret > 0)
+			high = mid;
+		else {
+			*slot = mid;
+			if (map_token)
+				unmap_extent_buffer(eb, map_token, KM_USER0);
+			return 0;
+		}
+	}
+	*slot = low;
+	if (map_token)
+		unmap_extent_buffer(eb, map_token, KM_USER0);
+	return 1;
+}
+
+/*
+ * simple bin_search frontend that does the right thing for
+ * leaves vs nodes
+ */
+static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
+		      int level, int *slot)
+{
+	if (level == 0) {
+		return generic_bin_search(eb,
+					  offsetof(struct btrfs_leaf, items),
+					  sizeof(struct btrfs_item),
+					  key, btrfs_header_nritems(eb),
+					  slot);
+	} else {
+		return generic_bin_search(eb,
+					  offsetof(struct btrfs_node, ptrs),
+					  sizeof(struct btrfs_key_ptr),
+					  key, btrfs_header_nritems(eb),
+					  slot);
+	}
+	return -1;
+}
+
+/* given a node and slot number, this reads the blocks it points to.  The
+ * extent buffer is returned with a reference taken (but unlocked).
+ * NULL is returned on error.
+ */
+static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
+				   struct extent_buffer *parent, int slot)
+{
+	int level = btrfs_header_level(parent);
+	if (slot < 0)
+		return NULL;
+	if (slot >= btrfs_header_nritems(parent))
+		return NULL;
+
+	BUG_ON(level == 0);
+
+	return read_tree_block(root, btrfs_node_blockptr(parent, slot),
+		       btrfs_level_size(root, level - 1),
+		       btrfs_node_ptr_generation(parent, slot));
+}
+
+/*
+ * node level balancing, used to make sure nodes are in proper order for
+ * item deletion.  We balance from the top down, so we have to make sure
+ * that a deletion won't leave an node completely empty later on.
+ */
+static noinline int balance_level(struct btrfs_trans_handle *trans,
+			 struct btrfs_root *root,
+			 struct btrfs_path *path, int level)
+{
+	struct extent_buffer *right = NULL;
+	struct extent_buffer *mid;
+	struct extent_buffer *left = NULL;
+	struct extent_buffer *parent = NULL;
+	int ret = 0;
+	int wret;
+	int pslot;
+	int orig_slot = path->slots[level];
+	int err_on_enospc = 0;
+	u64 orig_ptr;
+
+	if (level == 0)
+		return 0;
+
+	mid = path->nodes[level];
+	WARN_ON(!path->locks[level]);
+	WARN_ON(btrfs_header_generation(mid) != trans->transid);
+
+	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
+
+	if (level < BTRFS_MAX_LEVEL - 1)
+		parent = path->nodes[level + 1];
+	pslot = path->slots[level + 1];
+
+	/*
+	 * deal with the case where there is only one pointer in the root
+	 * by promoting the node below to a root
+	 */
+	if (!parent) {
+		struct extent_buffer *child;
+
+		if (btrfs_header_nritems(mid) != 1)
+			return 0;
+
+		/* promote the child to a root */
+		child = read_node_slot(root, mid, 0);
+		btrfs_tree_lock(child);
+		BUG_ON(!child);
+		ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 0);
+		BUG_ON(ret);
+
+		spin_lock(&root->node_lock);
+		root->node = child;
+		spin_unlock(&root->node_lock);
+
+		ret = btrfs_update_extent_ref(trans, root, child->start,
+					      mid->start, child->start,
+					      root->root_key.objectid,
+					      trans->transid, level - 1);
+		BUG_ON(ret);
+
+		add_root_to_dirty_list(root);
+		btrfs_tree_unlock(child);
+		path->locks[level] = 0;
+		path->nodes[level] = NULL;
+		clean_tree_block(trans, root, mid);
+		btrfs_tree_unlock(mid);
+		/* once for the path */
+		free_extent_buffer(mid);
+		ret = btrfs_free_extent(trans, root, mid->start, mid->len,
+					mid->start, root->root_key.objectid,
+					btrfs_header_generation(mid),
+					level, 1);
+		/* once for the root ptr */
+		free_extent_buffer(mid);
+		return ret;
+	}
+	if (btrfs_header_nritems(mid) >
+	    BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
+		return 0;
+
+	if (btrfs_header_nritems(mid) < 2)
+		err_on_enospc = 1;
+
+	left = read_node_slot(root, parent, pslot - 1);
+	if (left) {
+		btrfs_tree_lock(left);
+		wret = btrfs_cow_block(trans, root, left,
+				       parent, pslot - 1, &left, 0);
+		if (wret) {
+			ret = wret;
+			goto enospc;
+		}
+	}
+	right = read_node_slot(root, parent, pslot + 1);
+	if (right) {
+		btrfs_tree_lock(right);
+		wret = btrfs_cow_block(trans, root, right,
+				       parent, pslot + 1, &right, 0);
+		if (wret) {
+			ret = wret;
+			goto enospc;
+		}
+	}
+
+	/* first, try to make some room in the middle buffer */
+	if (left) {
+		orig_slot += btrfs_header_nritems(left);
+		wret = push_node_left(trans, root, left, mid, 1);
+		if (wret < 0)
+			ret = wret;
+		if (btrfs_header_nritems(mid) < 2)
+			err_on_enospc = 1;
+	}
+
+	/*
+	 * then try to empty the right most buffer into the middle
+	 */
+	if (right) {
+		wret = push_node_left(trans, root, mid, right, 1);
+		if (wret < 0 && wret != -ENOSPC)
+			ret = wret;
+		if (btrfs_header_nritems(right) == 0) {
+			u64 bytenr = right->start;
+			u64 generation = btrfs_header_generation(parent);
+			u32 blocksize = right->len;
+
+			clean_tree_block(trans, root, right);
+			btrfs_tree_unlock(right);
+			free_extent_buffer(right);
+			right = NULL;
+			wret = del_ptr(trans, root, path, level + 1, pslot +
+				       1);
+			if (wret)
+				ret = wret;
+			wret = btrfs_free_extent(trans, root, bytenr,
+						 blocksize, parent->start,
+						 btrfs_header_owner(parent),
+						 generation, level, 1);
+			if (wret)
+				ret = wret;
+		} else {
+			struct btrfs_disk_key right_key;
+			btrfs_node_key(right, &right_key, 0);
+			btrfs_set_node_key(parent, &right_key, pslot + 1);
+			btrfs_mark_buffer_dirty(parent);
+		}
+	}
+	if (btrfs_header_nritems(mid) == 1) {
+		/*
+		 * we're not allowed to leave a node with one item in the
+		 * tree during a delete.  A deletion from lower in the tree
+		 * could try to delete the only pointer in this node.
+		 * So, pull some keys from the left.
+		 * There has to be a left pointer at this point because
+		 * otherwise we would have pulled some pointers from the
+		 * right
+		 */
+		BUG_ON(!left);
+		wret = balance_node_right(trans, root, mid, left);
+		if (wret < 0) {
+			ret = wret;
+			goto enospc;
+		}
+		if (wret == 1) {
+			wret = push_node_left(trans, root, left, mid, 1);
+			if (wret < 0)
+				ret = wret;
+		}
+		BUG_ON(wret == 1);
+	}
+	if (btrfs_header_nritems(mid) == 0) {
+		/* we've managed to empty the middle node, drop it */
+		u64 root_gen = btrfs_header_generation(parent);
+		u64 bytenr = mid->start;
+		u32 blocksize = mid->len;
+
+		clean_tree_block(trans, root, mid);
+		btrfs_tree_unlock(mid);
+		free_extent_buffer(mid);
+		mid = NULL;
+		wret = del_ptr(trans, root, path, level + 1, pslot);
+		if (wret)
+			ret = wret;
+		wret = btrfs_free_extent(trans, root, bytenr, blocksize,
+					 parent->start,
+					 btrfs_header_owner(parent),
+					 root_gen, level, 1);
+		if (wret)
+			ret = wret;
+	} else {
+		/* update the parent key to reflect our changes */
+		struct btrfs_disk_key mid_key;
+		btrfs_node_key(mid, &mid_key, 0);
+		btrfs_set_node_key(parent, &mid_key, pslot);
+		btrfs_mark_buffer_dirty(parent);
+	}
+
+	/* update the path */
+	if (left) {
+		if (btrfs_header_nritems(left) > orig_slot) {
+			extent_buffer_get(left);
+			/* left was locked after cow */
+			path->nodes[level] = left;
+			path->slots[level + 1] -= 1;
+			path->slots[level] = orig_slot;
+			if (mid) {
+				btrfs_tree_unlock(mid);
+				free_extent_buffer(mid);
+			}
+		} else {
+			orig_slot -= btrfs_header_nritems(left);
+			path->slots[level] = orig_slot;
+		}
+	}
+	/* double check we haven't messed things up */
+	check_block(root, path, level);
+	if (orig_ptr !=
+	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
+		BUG();
+enospc:
+	if (right) {
+		btrfs_tree_unlock(right);
+		free_extent_buffer(right);
+	}
+	if (left) {
+		if (path->nodes[level] != left)
+			btrfs_tree_unlock(left);
+		free_extent_buffer(left);
+	}
+	return ret;
+}
+
+/* Node balancing for insertion.  Here we only split or push nodes around
+ * when they are completely full.  This is also done top down, so we
+ * have to be pessimistic.
+ */
+static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
+					  struct btrfs_root *root,
+					  struct btrfs_path *path, int level)
+{
+	struct extent_buffer *right = NULL;
+	struct extent_buffer *mid;
+	struct extent_buffer *left = NULL;
+	struct extent_buffer *parent = NULL;
+	int ret = 0;
+	int wret;
+	int pslot;
+	int orig_slot = path->slots[level];
+	u64 orig_ptr;
+
+	if (level == 0)
+		return 1;
+
+	mid = path->nodes[level];
+	WARN_ON(btrfs_header_generation(mid) != trans->transid);
+	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
+
+	if (level < BTRFS_MAX_LEVEL - 1)
+		parent = path->nodes[level + 1];
+	pslot = path->slots[level + 1];
+
+	if (!parent)
+		return 1;
+
+	left = read_node_slot(root, parent, pslot - 1);
+
+	/* first, try to make some room in the middle buffer */
+	if (left) {
+		u32 left_nr;
+
+		btrfs_tree_lock(left);
+		left_nr = btrfs_header_nritems(left);
+		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
+			wret = 1;
+		} else {
+			ret = btrfs_cow_block(trans, root, left, parent,
+					      pslot - 1, &left, 0);
+			if (ret)
+				wret = 1;
+			else {
+				wret = push_node_left(trans, root,
+						      left, mid, 0);
+			}
+		}
+		if (wret < 0)
+			ret = wret;
+		if (wret == 0) {
+			struct btrfs_disk_key disk_key;
+			orig_slot += left_nr;
+			btrfs_node_key(mid, &disk_key, 0);
+			btrfs_set_node_key(parent, &disk_key, pslot);
+			btrfs_mark_buffer_dirty(parent);
+			if (btrfs_header_nritems(left) > orig_slot) {
+				path->nodes[level] = left;
+				path->slots[level + 1] -= 1;
+				path->slots[level] = orig_slot;
+				btrfs_tree_unlock(mid);
+				free_extent_buffer(mid);
+			} else {
+				orig_slot -=
+					btrfs_header_nritems(left);
+				path->slots[level] = orig_slot;
+				btrfs_tree_unlock(left);
+				free_extent_buffer(left);
+			}
+			return 0;
+		}
+		btrfs_tree_unlock(left);
+		free_extent_buffer(left);
+	}
+	right = read_node_slot(root, parent, pslot + 1);
+
+	/*
+	 * then try to empty the right most buffer into the middle
+	 */
+	if (right) {
+		u32 right_nr;
+		btrfs_tree_lock(right);
+		right_nr = btrfs_header_nritems(right);
+		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
+			wret = 1;
+		} else {
+			ret = btrfs_cow_block(trans, root, right,
+					      parent, pslot + 1,
+					      &right, 0);
+			if (ret)
+				wret = 1;
+			else {
+				wret = balance_node_right(trans, root,
+							  right, mid);
+			}
+		}
+		if (wret < 0)
+			ret = wret;
+		if (wret == 0) {
+			struct btrfs_disk_key disk_key;
+
+			btrfs_node_key(right, &disk_key, 0);
+			btrfs_set_node_key(parent, &disk_key, pslot + 1);
+			btrfs_mark_buffer_dirty(parent);
+
+			if (btrfs_header_nritems(mid) <= orig_slot) {
+				path->nodes[level] = right;
+				path->slots[level + 1] += 1;
+				path->slots[level] = orig_slot -
+					btrfs_header_nritems(mid);
+				btrfs_tree_unlock(mid);
+				free_extent_buffer(mid);
+			} else {
+				btrfs_tree_unlock(right);
+				free_extent_buffer(right);
+			}
+			return 0;
+		}
+		btrfs_tree_unlock(right);
+		free_extent_buffer(right);
+	}
+	return 1;
+}
+
+/*
+ * readahead one full node of leaves, finding things that are close
+ * to the block in 'slot', and triggering ra on them.
+ */
+static noinline void reada_for_search(struct btrfs_root *root,
+				      struct btrfs_path *path,
+				      int level, int slot, u64 objectid)
+{
+	struct extent_buffer *node;
+	struct btrfs_disk_key disk_key;
+	u32 nritems;
+	u64 search;
+	u64 lowest_read;
+	u64 highest_read;
+	u64 nread = 0;
+	int direction = path->reada;
+	struct extent_buffer *eb;
+	u32 nr;
+	u32 blocksize;
+	u32 nscan = 0;
+
+	if (level != 1)
+		return;
+
+	if (!path->nodes[level])
+		return;
+
+	node = path->nodes[level];
+
+	search = btrfs_node_blockptr(node, slot);
+	blocksize = btrfs_level_size(root, level - 1);
+	eb = btrfs_find_tree_block(root, search, blocksize);
+	if (eb) {
+		free_extent_buffer(eb);
+		return;
+	}
+
+	highest_read = search;
+	lowest_read = search;
+
+	nritems = btrfs_header_nritems(node);
+	nr = slot;
+	while (1) {
+		if (direction < 0) {
+			if (nr == 0)
+				break;
+			nr--;
+		} else if (direction > 0) {
+			nr++;
+			if (nr >= nritems)
+				break;
+		}
+		if (path->reada < 0 && objectid) {
+			btrfs_node_key(node, &disk_key, nr);
+			if (btrfs_disk_key_objectid(&disk_key) != objectid)
+				break;
+		}
+		search = btrfs_node_blockptr(node, nr);
+		if ((search >= lowest_read && search <= highest_read) ||
+		    (search < lowest_read && lowest_read - search <= 16384) ||
+		    (search > highest_read && search - highest_read <= 16384)) {
+			readahead_tree_block(root, search, blocksize,
+				     btrfs_node_ptr_generation(node, nr));
+			nread += blocksize;
+		}
+		nscan++;
+		if (path->reada < 2 && (nread > (64 * 1024) || nscan > 32))
+			break;
+
+		if (nread > (256 * 1024) || nscan > 128)
+			break;
+
+		if (search < lowest_read)
+			lowest_read = search;
+		if (search > highest_read)
+			highest_read = search;
+	}
+}
+
+/*
+ * when we walk down the tree, it is usually safe to unlock the higher layers
+ * in the tree.  The exceptions are when our path goes through slot 0, because
+ * operations on the tree might require changing key pointers higher up in the
+ * tree.
+ *
+ * callers might also have set path->keep_locks, which tells this code to keep
+ * the lock if the path points to the last slot in the block.  This is part of
+ * walking through the tree, and selecting the next slot in the higher block.
+ *
+ * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
+ * if lowest_unlock is 1, level 0 won't be unlocked
+ */
+static noinline void unlock_up(struct btrfs_path *path, int level,
+			       int lowest_unlock)
+{
+	int i;
+	int skip_level = level;
+	int no_skips = 0;
+	struct extent_buffer *t;
+
+	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
+		if (!path->nodes[i])
+			break;
+		if (!path->locks[i])
+			break;
+		if (!no_skips && path->slots[i] == 0) {
+			skip_level = i + 1;
+			continue;
+		}
+		if (!no_skips && path->keep_locks) {
+			u32 nritems;
+			t = path->nodes[i];
+			nritems = btrfs_header_nritems(t);
+			if (nritems < 1 || path->slots[i] >= nritems - 1) {
+				skip_level = i + 1;
+				continue;
+			}
+		}
+		if (skip_level < i && i >= lowest_unlock)
+			no_skips = 1;
+
+		t = path->nodes[i];
+		if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
+			btrfs_tree_unlock(t);
+			path->locks[i] = 0;
+		}
+	}
+}
+
+/*
+ * look for key in the tree.  path is filled in with nodes along the way
+ * if key is found, we return zero and you can find the item in the leaf
+ * level of the path (level 0)
+ *
+ * If the key isn't found, the path points to the slot where it should
+ * be inserted, and 1 is returned.  If there are other errors during the
+ * search a negative error number is returned.
+ *
+ * if ins_len > 0, nodes and leaves will be split as we walk down the
+ * tree.  if ins_len < 0, nodes will be merged as we walk down the tree (if
+ * possible)
+ */
+int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_key *key, struct btrfs_path *p, int
+		      ins_len, int cow)
+{
+	struct extent_buffer *b;
+	struct extent_buffer *tmp;
+	int slot;
+	int ret;
+	int level;
+	int should_reada = p->reada;
+	int lowest_unlock = 1;
+	int blocksize;
+	u8 lowest_level = 0;
+	u64 blocknr;
+	u64 gen;
+	struct btrfs_key prealloc_block;
+
+	lowest_level = p->lowest_level;
+	WARN_ON(lowest_level && ins_len > 0);
+	WARN_ON(p->nodes[0] != NULL);
+
+	if (ins_len < 0)
+		lowest_unlock = 2;
+
+	prealloc_block.objectid = 0;
+
+again:
+	if (p->skip_locking)
+		b = btrfs_root_node(root);
+	else
+		b = btrfs_lock_root_node(root);
+
+	while (b) {
+		level = btrfs_header_level(b);
+
+		/*
+		 * setup the path here so we can release it under lock
+		 * contention with the cow code
+		 */
+		p->nodes[level] = b;
+		if (!p->skip_locking)
+			p->locks[level] = 1;
+
+		if (cow) {
+			int wret;
+
+			/* is a cow on this block not required */
+			spin_lock(&root->fs_info->hash_lock);
+			if (btrfs_header_generation(b) == trans->transid &&
+			    btrfs_header_owner(b) == root->root_key.objectid &&
+			    !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) {
+				spin_unlock(&root->fs_info->hash_lock);
+				goto cow_done;
+			}
+			spin_unlock(&root->fs_info->hash_lock);
+
+			/* ok, we have to cow, is our old prealloc the right
+			 * size?
+			 */
+			if (prealloc_block.objectid &&
+			    prealloc_block.offset != b->len) {
+				btrfs_free_reserved_extent(root,
+					   prealloc_block.objectid,
+					   prealloc_block.offset);
+				prealloc_block.objectid = 0;
+			}
+
+			/*
+			 * for higher level blocks, try not to allocate blocks
+			 * with the block and the parent locks held.
+			 */
+			if (level > 1 && !prealloc_block.objectid &&
+			    btrfs_path_lock_waiting(p, level)) {
+				u32 size = b->len;
+				u64 hint = b->start;
+
+				btrfs_release_path(root, p);
+				ret = btrfs_reserve_extent(trans, root,
+							   size, size, 0,
+							   hint, (u64)-1,
+							   &prealloc_block, 0);
+				BUG_ON(ret);
+				goto again;
+			}
+
+			wret = btrfs_cow_block(trans, root, b,
+					       p->nodes[level + 1],
+					       p->slots[level + 1],
+					       &b, prealloc_block.objectid);
+			prealloc_block.objectid = 0;
+			if (wret) {
+				free_extent_buffer(b);
+				ret = wret;
+				goto done;
+			}
+		}
+cow_done:
+		BUG_ON(!cow && ins_len);
+		if (level != btrfs_header_level(b))
+			WARN_ON(1);
+		level = btrfs_header_level(b);
+
+		p->nodes[level] = b;
+		if (!p->skip_locking)
+			p->locks[level] = 1;
+
+		ret = check_block(root, p, level);
+		if (ret) {
+			ret = -1;
+			goto done;
+		}
+
+		ret = bin_search(b, key, level, &slot);
+		if (level != 0) {
+			if (ret && slot > 0)
+				slot -= 1;
+			p->slots[level] = slot;
+			if ((p->search_for_split || ins_len > 0) &&
+			    btrfs_header_nritems(b) >=
+			    BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
+				int sret = split_node(trans, root, p, level);
+				BUG_ON(sret > 0);
+				if (sret) {
+					ret = sret;
+					goto done;
+				}
+				b = p->nodes[level];
+				slot = p->slots[level];
+			} else if (ins_len < 0) {
+				int sret = balance_level(trans, root, p,
+							 level);
+				if (sret) {
+					ret = sret;
+					goto done;
+				}
+				b = p->nodes[level];
+				if (!b) {
+					btrfs_release_path(NULL, p);
+					goto again;
+				}
+				slot = p->slots[level];
+				BUG_ON(btrfs_header_nritems(b) == 1);
+			}
+			unlock_up(p, level, lowest_unlock);
+
+			/* this is only true while dropping a snapshot */
+			if (level == lowest_level) {
+				ret = 0;
+				goto done;
+			}
+
+			blocknr = btrfs_node_blockptr(b, slot);
+			gen = btrfs_node_ptr_generation(b, slot);
+			blocksize = btrfs_level_size(root, level - 1);
+
+			tmp = btrfs_find_tree_block(root, blocknr, blocksize);
+			if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
+				b = tmp;
+			} else {
+				/*
+				 * reduce lock contention at high levels
+				 * of the btree by dropping locks before
+				 * we read.
+				 */
+				if (level > 1) {
+					btrfs_release_path(NULL, p);
+					if (tmp)
+						free_extent_buffer(tmp);
+					if (should_reada)
+						reada_for_search(root, p,
+								 level, slot,
+								 key->objectid);
+
+					tmp = read_tree_block(root, blocknr,
+							 blocksize, gen);
+					if (tmp)
+						free_extent_buffer(tmp);
+					goto again;
+				} else {
+					if (tmp)
+						free_extent_buffer(tmp);
+					if (should_reada)
+						reada_for_search(root, p,
+								 level, slot,
+								 key->objectid);
+					b = read_node_slot(root, b, slot);
+				}
+			}
+			if (!p->skip_locking)
+				btrfs_tree_lock(b);
+		} else {
+			p->slots[level] = slot;
+			if (ins_len > 0 &&
+			    btrfs_leaf_free_space(root, b) < ins_len) {
+				int sret = split_leaf(trans, root, key,
+						      p, ins_len, ret == 0);
+				BUG_ON(sret > 0);
+				if (sret) {
+					ret = sret;
+					goto done;
+				}
+			}
+			if (!p->search_for_split)
+				unlock_up(p, level, lowest_unlock);
+			goto done;
+		}
+	}
+	ret = 1;
+done:
+	if (prealloc_block.objectid) {
+		btrfs_free_reserved_extent(root,
+			   prealloc_block.objectid,
+			   prealloc_block.offset);
+	}
+
+	return ret;
+}
+
+int btrfs_merge_path(struct btrfs_trans_handle *trans,
+		     struct btrfs_root *root,
+		     struct btrfs_key *node_keys,
+		     u64 *nodes, int lowest_level)
+{
+	struct extent_buffer *eb;
+	struct extent_buffer *parent;
+	struct btrfs_key key;
+	u64 bytenr;
+	u64 generation;
+	u32 blocksize;
+	int level;
+	int slot;
+	int key_match;
+	int ret;
+
+	eb = btrfs_lock_root_node(root);
+	ret = btrfs_cow_block(trans, root, eb, NULL, 0, &eb, 0);
+	BUG_ON(ret);
+
+	parent = eb;
+	while (1) {
+		level = btrfs_header_level(parent);
+		if (level == 0 || level <= lowest_level)
+			break;
+
+		ret = bin_search(parent, &node_keys[lowest_level], level,
+				 &slot);
+		if (ret && slot > 0)
+			slot--;
+
+		bytenr = btrfs_node_blockptr(parent, slot);
+		if (nodes[level - 1] == bytenr)
+			break;
+
+		blocksize = btrfs_level_size(root, level - 1);
+		generation = btrfs_node_ptr_generation(parent, slot);
+		btrfs_node_key_to_cpu(eb, &key, slot);
+		key_match = !memcmp(&key, &node_keys[level - 1], sizeof(key));
+
+		if (generation == trans->transid) {
+			eb = read_tree_block(root, bytenr, blocksize,
+					     generation);
+			btrfs_tree_lock(eb);
+		}
+
+		/*
+		 * if node keys match and node pointer hasn't been modified
+		 * in the running transaction, we can merge the path. for
+		 * blocks owened by reloc trees, the node pointer check is
+		 * skipped, this is because these blocks are fully controlled
+		 * by the space balance code, no one else can modify them.
+		 */
+		if (!nodes[level - 1] || !key_match ||
+		    (generation == trans->transid &&
+		     btrfs_header_owner(eb) != BTRFS_TREE_RELOC_OBJECTID)) {
+			if (level == 1 || level == lowest_level + 1) {
+				if (generation == trans->transid) {
+					btrfs_tree_unlock(eb);
+					free_extent_buffer(eb);
+				}
+				break;
+			}
+
+			if (generation != trans->transid) {
+				eb = read_tree_block(root, bytenr, blocksize,
+						generation);
+				btrfs_tree_lock(eb);
+			}
+
+			ret = btrfs_cow_block(trans, root, eb, parent, slot,
+					      &eb, 0);
+			BUG_ON(ret);
+
+			if (root->root_key.objectid ==
+			    BTRFS_TREE_RELOC_OBJECTID) {
+				if (!nodes[level - 1]) {
+					nodes[level - 1] = eb->start;
+					memcpy(&node_keys[level - 1], &key,
+					       sizeof(node_keys[0]));
+				} else {
+					WARN_ON(1);
+				}
+			}
+
+			btrfs_tree_unlock(parent);
+			free_extent_buffer(parent);
+			parent = eb;
+			continue;
+		}
+
+		btrfs_set_node_blockptr(parent, slot, nodes[level - 1]);
+		btrfs_set_node_ptr_generation(parent, slot, trans->transid);
+		btrfs_mark_buffer_dirty(parent);
+
+		ret = btrfs_inc_extent_ref(trans, root,
+					nodes[level - 1],
+					blocksize, parent->start,
+					btrfs_header_owner(parent),
+					btrfs_header_generation(parent),
+					level - 1);
+		BUG_ON(ret);
+
+		/*
+		 * If the block was created in the running transaction,
+		 * it's possible this is the last reference to it, so we
+		 * should drop the subtree.
+		 */
+		if (generation == trans->transid) {
+			ret = btrfs_drop_subtree(trans, root, eb, parent);
+			BUG_ON(ret);
+			btrfs_tree_unlock(eb);
+			free_extent_buffer(eb);
+		} else {
+			ret = btrfs_free_extent(trans, root, bytenr,
+					blocksize, parent->start,
+					btrfs_header_owner(parent),
+					btrfs_header_generation(parent),
+					level - 1, 1);
+			BUG_ON(ret);
+		}
+		break;
+	}
+	btrfs_tree_unlock(parent);
+	free_extent_buffer(parent);
+	return 0;
+}
+
+/*
+ * adjust the pointers going up the tree, starting at level
+ * making sure the right key of each node is points to 'key'.
+ * This is used after shifting pointers to the left, so it stops
+ * fixing up pointers when a given leaf/node is not in slot 0 of the
+ * higher levels
+ *
+ * If this fails to write a tree block, it returns -1, but continues
+ * fixing up the blocks in ram so the tree is consistent.
+ */
+static int fixup_low_keys(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, struct btrfs_path *path,
+			  struct btrfs_disk_key *key, int level)
+{
+	int i;
+	int ret = 0;
+	struct extent_buffer *t;
+
+	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
+		int tslot = path->slots[i];
+		if (!path->nodes[i])
+			break;
+		t = path->nodes[i];
+		btrfs_set_node_key(t, key, tslot);
+		btrfs_mark_buffer_dirty(path->nodes[i]);
+		if (tslot != 0)
+			break;
+	}
+	return ret;
+}
+
+/*
+ * update item key.
+ *
+ * This function isn't completely safe. It's the caller's responsibility
+ * that the new key won't break the order
+ */
+int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, struct btrfs_path *path,
+			    struct btrfs_key *new_key)
+{
+	struct btrfs_disk_key disk_key;
+	struct extent_buffer *eb;
+	int slot;
+
+	eb = path->nodes[0];
+	slot = path->slots[0];
+	if (slot > 0) {
+		btrfs_item_key(eb, &disk_key, slot - 1);
+		if (comp_keys(&disk_key, new_key) >= 0)
+			return -1;
+	}
+	if (slot < btrfs_header_nritems(eb) - 1) {
+		btrfs_item_key(eb, &disk_key, slot + 1);
+		if (comp_keys(&disk_key, new_key) <= 0)
+			return -1;
+	}
+
+	btrfs_cpu_key_to_disk(&disk_key, new_key);
+	btrfs_set_item_key(eb, &disk_key, slot);
+	btrfs_mark_buffer_dirty(eb);
+	if (slot == 0)
+		fixup_low_keys(trans, root, path, &disk_key, 1);
+	return 0;
+}
+
+/*
+ * try to push data from one node into the next node left in the
+ * tree.
+ *
+ * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
+ * error, and > 0 if there was no room in the left hand block.
+ */
+static int push_node_left(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, struct extent_buffer *dst,
+			  struct extent_buffer *src, int empty)
+{
+	int push_items = 0;
+	int src_nritems;
+	int dst_nritems;
+	int ret = 0;
+
+	src_nritems = btrfs_header_nritems(src);
+	dst_nritems = btrfs_header_nritems(dst);
+	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
+	WARN_ON(btrfs_header_generation(src) != trans->transid);
+	WARN_ON(btrfs_header_generation(dst) != trans->transid);
+
+	if (!empty && src_nritems <= 8)
+		return 1;
+
+	if (push_items <= 0)
+		return 1;
+
+	if (empty) {
+		push_items = min(src_nritems, push_items);
+		if (push_items < src_nritems) {
+			/* leave at least 8 pointers in the node if
+			 * we aren't going to empty it
+			 */
+			if (src_nritems - push_items < 8) {
+				if (push_items <= 8)
+					return 1;
+				push_items -= 8;
+			}
+		}
+	} else
+		push_items = min(src_nritems - 8, push_items);
+
+	copy_extent_buffer(dst, src,
+			   btrfs_node_key_ptr_offset(dst_nritems),
+			   btrfs_node_key_ptr_offset(0),
+			   push_items * sizeof(struct btrfs_key_ptr));
+
+	if (push_items < src_nritems) {
+		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
+				      btrfs_node_key_ptr_offset(push_items),
+				      (src_nritems - push_items) *
+				      sizeof(struct btrfs_key_ptr));
+	}
+	btrfs_set_header_nritems(src, src_nritems - push_items);
+	btrfs_set_header_nritems(dst, dst_nritems + push_items);
+	btrfs_mark_buffer_dirty(src);
+	btrfs_mark_buffer_dirty(dst);
+
+	ret = btrfs_update_ref(trans, root, src, dst, dst_nritems, push_items);
+	BUG_ON(ret);
+
+	return ret;
+}
+
+/*
+ * try to push data from one node into the next node right in the
+ * tree.
+ *
+ * returns 0 if some ptrs were pushed, < 0 if there was some horrible
+ * error, and > 0 if there was no room in the right hand block.
+ *
+ * this will  only push up to 1/2 the contents of the left node over
+ */
+static int balance_node_right(struct btrfs_trans_handle *trans,
+			      struct btrfs_root *root,
+			      struct extent_buffer *dst,
+			      struct extent_buffer *src)
+{
+	int push_items = 0;
+	int max_push;
+	int src_nritems;
+	int dst_nritems;
+	int ret = 0;
+
+	WARN_ON(btrfs_header_generation(src) != trans->transid);
+	WARN_ON(btrfs_header_generation(dst) != trans->transid);
+
+	src_nritems = btrfs_header_nritems(src);
+	dst_nritems = btrfs_header_nritems(dst);
+	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
+	if (push_items <= 0)
+		return 1;
+
+	if (src_nritems < 4)
+		return 1;
+
+	max_push = src_nritems / 2 + 1;
+	/* don't try to empty the node */
+	if (max_push >= src_nritems)
+		return 1;
+
+	if (max_push < push_items)
+		push_items = max_push;
+
+	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
+				      btrfs_node_key_ptr_offset(0),
+				      (dst_nritems) *
+				      sizeof(struct btrfs_key_ptr));
+
+	copy_extent_buffer(dst, src,
+			   btrfs_node_key_ptr_offset(0),
+			   btrfs_node_key_ptr_offset(src_nritems - push_items),
+			   push_items * sizeof(struct btrfs_key_ptr));
+
+	btrfs_set_header_nritems(src, src_nritems - push_items);
+	btrfs_set_header_nritems(dst, dst_nritems + push_items);
+
+	btrfs_mark_buffer_dirty(src);
+	btrfs_mark_buffer_dirty(dst);
+
+	ret = btrfs_update_ref(trans, root, src, dst, 0, push_items);
+	BUG_ON(ret);
+
+	return ret;
+}
+
+/*
+ * helper function to insert a new root level in the tree.
+ * A new node is allocated, and a single item is inserted to
+ * point to the existing root
+ *
+ * returns zero on success or < 0 on failure.
+ */
+static noinline int insert_new_root(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root,
+			   struct btrfs_path *path, int level)
+{
+	u64 lower_gen;
+	struct extent_buffer *lower;
+	struct extent_buffer *c;
+	struct extent_buffer *old;
+	struct btrfs_disk_key lower_key;
+	int ret;
+
+	BUG_ON(path->nodes[level]);
+	BUG_ON(path->nodes[level-1] != root->node);
+
+	lower = path->nodes[level-1];
+	if (level == 1)
+		btrfs_item_key(lower, &lower_key, 0);
+	else
+		btrfs_node_key(lower, &lower_key, 0);
+
+	c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
+				   root->root_key.objectid, trans->transid,
+				   level, root->node->start, 0);
+	if (IS_ERR(c))
+		return PTR_ERR(c);
+
+	memset_extent_buffer(c, 0, 0, root->nodesize);
+	btrfs_set_header_nritems(c, 1);
+	btrfs_set_header_level(c, level);
+	btrfs_set_header_bytenr(c, c->start);
+	btrfs_set_header_generation(c, trans->transid);
+	btrfs_set_header_owner(c, root->root_key.objectid);
+
+	write_extent_buffer(c, root->fs_info->fsid,
+			    (unsigned long)btrfs_header_fsid(c),
+			    BTRFS_FSID_SIZE);
+
+	write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
+			    (unsigned long)btrfs_header_chunk_tree_uuid(c),
+			    BTRFS_UUID_SIZE);
+
+	btrfs_set_node_key(c, &lower_key, 0);
+	btrfs_set_node_blockptr(c, 0, lower->start);
+	lower_gen = btrfs_header_generation(lower);
+	WARN_ON(lower_gen != trans->transid);
+
+	btrfs_set_node_ptr_generation(c, 0, lower_gen);
+
+	btrfs_mark_buffer_dirty(c);
+
+	spin_lock(&root->node_lock);
+	old = root->node;
+	root->node = c;
+	spin_unlock(&root->node_lock);
+
+	ret = btrfs_update_extent_ref(trans, root, lower->start,
+				      lower->start, c->start,
+				      root->root_key.objectid,
+				      trans->transid, level - 1);
+	BUG_ON(ret);
+
+	/* the super has an extra ref to root->node */
+	free_extent_buffer(old);
+
+	add_root_to_dirty_list(root);
+	extent_buffer_get(c);
+	path->nodes[level] = c;
+	path->locks[level] = 1;
+	path->slots[level] = 0;
+	return 0;
+}
+
+/*
+ * worker function to insert a single pointer in a node.
+ * the node should have enough room for the pointer already
+ *
+ * slot and level indicate where you want the key to go, and
+ * blocknr is the block the key points to.
+ *
+ * returns zero on success and < 0 on any error
+ */
+static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_path *path, struct btrfs_disk_key
+		      *key, u64 bytenr, int slot, int level)
+{
+	struct extent_buffer *lower;
+	int nritems;
+
+	BUG_ON(!path->nodes[level]);
+	lower = path->nodes[level];
+	nritems = btrfs_header_nritems(lower);
+	if (slot > nritems)
+		BUG();
+	if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
+		BUG();
+	if (slot != nritems) {
+		memmove_extent_buffer(lower,
+			      btrfs_node_key_ptr_offset(slot + 1),
+			      btrfs_node_key_ptr_offset(slot),
+			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
+	}
+	btrfs_set_node_key(lower, key, slot);
+	btrfs_set_node_blockptr(lower, slot, bytenr);
+	WARN_ON(trans->transid == 0);
+	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
+	btrfs_set_header_nritems(lower, nritems + 1);
+	btrfs_mark_buffer_dirty(lower);
+	return 0;
+}
+
+/*
+ * split the node at the specified level in path in two.
+ * The path is corrected to point to the appropriate node after the split
+ *
+ * Before splitting this tries to make some room in the node by pushing
+ * left and right, if either one works, it returns right away.
+ *
+ * returns 0 on success and < 0 on failure
+ */
+static noinline int split_node(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       struct btrfs_path *path, int level)
+{
+	struct extent_buffer *c;
+	struct extent_buffer *split;
+	struct btrfs_disk_key disk_key;
+	int mid;
+	int ret;
+	int wret;
+	u32 c_nritems;
+
+	c = path->nodes[level];
+	WARN_ON(btrfs_header_generation(c) != trans->transid);
+	if (c == root->node) {
+		/* trying to split the root, lets make a new one */
+		ret = insert_new_root(trans, root, path, level + 1);
+		if (ret)
+			return ret;
+	} else {
+		ret = push_nodes_for_insert(trans, root, path, level);
+		c = path->nodes[level];
+		if (!ret && btrfs_header_nritems(c) <
+		    BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
+			return 0;
+		if (ret < 0)
+			return ret;
+	}
+
+	c_nritems = btrfs_header_nritems(c);
+
+	split = btrfs_alloc_free_block(trans, root, root->nodesize,
+					path->nodes[level + 1]->start,
+					root->root_key.objectid,
+					trans->transid, level, c->start, 0);
+	if (IS_ERR(split))
+		return PTR_ERR(split);
+
+	btrfs_set_header_flags(split, btrfs_header_flags(c));
+	btrfs_set_header_level(split, btrfs_header_level(c));
+	btrfs_set_header_bytenr(split, split->start);
+	btrfs_set_header_generation(split, trans->transid);
+	btrfs_set_header_owner(split, root->root_key.objectid);
+	btrfs_set_header_flags(split, 0);
+	write_extent_buffer(split, root->fs_info->fsid,
+			    (unsigned long)btrfs_header_fsid(split),
+			    BTRFS_FSID_SIZE);
+	write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
+			    (unsigned long)btrfs_header_chunk_tree_uuid(split),
+			    BTRFS_UUID_SIZE);
+
+	mid = (c_nritems + 1) / 2;
+
+	copy_extent_buffer(split, c,
+			   btrfs_node_key_ptr_offset(0),
+			   btrfs_node_key_ptr_offset(mid),
+			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
+	btrfs_set_header_nritems(split, c_nritems - mid);
+	btrfs_set_header_nritems(c, mid);
+	ret = 0;
+
+	btrfs_mark_buffer_dirty(c);
+	btrfs_mark_buffer_dirty(split);
+
+	btrfs_node_key(split, &disk_key, 0);
+	wret = insert_ptr(trans, root, path, &disk_key, split->start,
+			  path->slots[level + 1] + 1,
+			  level + 1);
+	if (wret)
+		ret = wret;
+
+	ret = btrfs_update_ref(trans, root, c, split, 0, c_nritems - mid);
+	BUG_ON(ret);
+
+	if (path->slots[level] >= mid) {
+		path->slots[level] -= mid;
+		btrfs_tree_unlock(c);
+		free_extent_buffer(c);
+		path->nodes[level] = split;
+		path->slots[level + 1] += 1;
+	} else {
+		btrfs_tree_unlock(split);
+		free_extent_buffer(split);
+	}
+	return ret;
+}
+
+/*
+ * how many bytes are required to store the items in a leaf.  start
+ * and nr indicate which items in the leaf to check.  This totals up the
+ * space used both by the item structs and the item data
+ */
+static int leaf_space_used(struct extent_buffer *l, int start, int nr)
+{
+	int data_len;
+	int nritems = btrfs_header_nritems(l);
+	int end = min(nritems, start + nr) - 1;
+
+	if (!nr)
+		return 0;
+	data_len = btrfs_item_end_nr(l, start);
+	data_len = data_len - btrfs_item_offset_nr(l, end);
+	data_len += sizeof(struct btrfs_item) * nr;
+	WARN_ON(data_len < 0);
+	return data_len;
+}
+
+/*
+ * The space between the end of the leaf items and
+ * the start of the leaf data.  IOW, how much room
+ * the leaf has left for both items and data
+ */
+noinline int btrfs_leaf_free_space(struct btrfs_root *root,
+				   struct extent_buffer *leaf)
+{
+	int nritems = btrfs_header_nritems(leaf);
+	int ret;
+	ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
+	if (ret < 0) {
+		printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
+		       "used %d nritems %d\n",
+		       ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
+		       leaf_space_used(leaf, 0, nritems), nritems);
+	}
+	return ret;
+}
+
+/*
+ * push some data in the path leaf to the right, trying to free up at
+ * least data_size bytes.  returns zero if the push worked, nonzero otherwise
+ *
+ * returns 1 if the push failed because the other node didn't have enough
+ * room, 0 if everything worked out and < 0 if there were major errors.
+ */
+static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
+			   *root, struct btrfs_path *path, int data_size,
+			   int empty)
+{
+	struct extent_buffer *left = path->nodes[0];
+	struct extent_buffer *right;
+	struct extent_buffer *upper;
+	struct btrfs_disk_key disk_key;
+	int slot;
+	u32 i;
+	int free_space;
+	int push_space = 0;
+	int push_items = 0;
+	struct btrfs_item *item;
+	u32 left_nritems;
+	u32 nr;
+	u32 right_nritems;
+	u32 data_end;
+	u32 this_item_size;
+	int ret;
+
+	slot = path->slots[1];
+	if (!path->nodes[1])
+		return 1;
+
+	upper = path->nodes[1];
+	if (slot >= btrfs_header_nritems(upper) - 1)
+		return 1;
+
+	WARN_ON(!btrfs_tree_locked(path->nodes[1]));
+
+	right = read_node_slot(root, upper, slot + 1);
+	btrfs_tree_lock(right);
+	free_space = btrfs_leaf_free_space(root, right);
+	if (free_space < data_size)
+		goto out_unlock;
+
+	/* cow and double check */
+	ret = btrfs_cow_block(trans, root, right, upper,
+			      slot + 1, &right, 0);
+	if (ret)
+		goto out_unlock;
+
+	free_space = btrfs_leaf_free_space(root, right);
+	if (free_space < data_size)
+		goto out_unlock;
+
+	left_nritems = btrfs_header_nritems(left);
+	if (left_nritems == 0)
+		goto out_unlock;
+
+	if (empty)
+		nr = 0;
+	else
+		nr = 1;
+
+	if (path->slots[0] >= left_nritems)
+		push_space += data_size;
+
+	i = left_nritems - 1;
+	while (i >= nr) {
+		item = btrfs_item_nr(left, i);
+
+		if (!empty && push_items > 0) {
+			if (path->slots[0] > i)
+				break;
+			if (path->slots[0] == i) {
+				int space = btrfs_leaf_free_space(root, left);
+				if (space + push_space * 2 > free_space)
+					break;
+			}
+		}
+
+		if (path->slots[0] == i)
+			push_space += data_size;
+
+		if (!left->map_token) {
+			map_extent_buffer(left, (unsigned long)item,
+					sizeof(struct btrfs_item),
+					&left->map_token, &left->kaddr,
+					&left->map_start, &left->map_len,
+					KM_USER1);
+		}
+
+		this_item_size = btrfs_item_size(left, item);
+		if (this_item_size + sizeof(*item) + push_space > free_space)
+			break;
+
+		push_items++;
+		push_space += this_item_size + sizeof(*item);
+		if (i == 0)
+			break;
+		i--;
+	}
+	if (left->map_token) {
+		unmap_extent_buffer(left, left->map_token, KM_USER1);
+		left->map_token = NULL;
+	}
+
+	if (push_items == 0)
+		goto out_unlock;
+
+	if (!empty && push_items == left_nritems)
+		WARN_ON(1);
+
+	/* push left to right */
+	right_nritems = btrfs_header_nritems(right);
+
+	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
+	push_space -= leaf_data_end(root, left);
+
+	/* make room in the right data area */
+	data_end = leaf_data_end(root, right);
+	memmove_extent_buffer(right,
+			      btrfs_leaf_data(right) + data_end - push_space,
+			      btrfs_leaf_data(right) + data_end,
+			      BTRFS_LEAF_DATA_SIZE(root) - data_end);
+
+	/* copy from the left data area */
+	copy_extent_buffer(right, left, btrfs_leaf_data(right) +
+		     BTRFS_LEAF_DATA_SIZE(root) - push_space,
+		     btrfs_leaf_data(left) + leaf_data_end(root, left),
+		     push_space);
+
+	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
+			      btrfs_item_nr_offset(0),
+			      right_nritems * sizeof(struct btrfs_item));
+
+	/* copy the items from left to right */
+	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
+		   btrfs_item_nr_offset(left_nritems - push_items),
+		   push_items * sizeof(struct btrfs_item));
+
+	/* update the item pointers */
+	right_nritems += push_items;
+	btrfs_set_header_nritems(right, right_nritems);
+	push_space = BTRFS_LEAF_DATA_SIZE(root);
+	for (i = 0; i < right_nritems; i++) {
+		item = btrfs_item_nr(right, i);
+		if (!right->map_token) {
+			map_extent_buffer(right, (unsigned long)item,
+					sizeof(struct btrfs_item),
+					&right->map_token, &right->kaddr,
+					&right->map_start, &right->map_len,
+					KM_USER1);
+		}
+		push_space -= btrfs_item_size(right, item);
+		btrfs_set_item_offset(right, item, push_space);
+	}
+
+	if (right->map_token) {
+		unmap_extent_buffer(right, right->map_token, KM_USER1);
+		right->map_token = NULL;
+	}
+	left_nritems -= push_items;
+	btrfs_set_header_nritems(left, left_nritems);
+
+	if (left_nritems)
+		btrfs_mark_buffer_dirty(left);
+	btrfs_mark_buffer_dirty(right);
+
+	ret = btrfs_update_ref(trans, root, left, right, 0, push_items);
+	BUG_ON(ret);
+
+	btrfs_item_key(right, &disk_key, 0);
+	btrfs_set_node_key(upper, &disk_key, slot + 1);
+	btrfs_mark_buffer_dirty(upper);
+
+	/* then fixup the leaf pointer in the path */
+	if (path->slots[0] >= left_nritems) {
+		path->slots[0] -= left_nritems;
+		if (btrfs_header_nritems(path->nodes[0]) == 0)
+			clean_tree_block(trans, root, path->nodes[0]);
+		btrfs_tree_unlock(path->nodes[0]);
+		free_extent_buffer(path->nodes[0]);
+		path->nodes[0] = right;
+		path->slots[1] += 1;
+	} else {
+		btrfs_tree_unlock(right);
+		free_extent_buffer(right);
+	}
+	return 0;
+
+out_unlock:
+	btrfs_tree_unlock(right);
+	free_extent_buffer(right);
+	return 1;
+}
+
+/*
+ * push some data in the path leaf to the left, trying to free up at
+ * least data_size bytes.  returns zero if the push worked, nonzero otherwise
+ */
+static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
+			  *root, struct btrfs_path *path, int data_size,
+			  int empty)
+{
+	struct btrfs_disk_key disk_key;
+	struct extent_buffer *right = path->nodes[0];
+	struct extent_buffer *left;
+	int slot;
+	int i;
+	int free_space;
+	int push_space = 0;
+	int push_items = 0;
+	struct btrfs_item *item;
+	u32 old_left_nritems;
+	u32 right_nritems;
+	u32 nr;
+	int ret = 0;
+	int wret;
+	u32 this_item_size;
+	u32 old_left_item_size;
+
+	slot = path->slots[1];
+	if (slot == 0)
+		return 1;
+	if (!path->nodes[1])
+		return 1;
+
+	right_nritems = btrfs_header_nritems(right);
+	if (right_nritems == 0)
+		return 1;
+
+	WARN_ON(!btrfs_tree_locked(path->nodes[1]));
+
+	left = read_node_slot(root, path->nodes[1], slot - 1);
+	btrfs_tree_lock(left);
+	free_space = btrfs_leaf_free_space(root, left);
+	if (free_space < data_size) {
+		ret = 1;
+		goto out;
+	}
+
+	/* cow and double check */
+	ret = btrfs_cow_block(trans, root, left,
+			      path->nodes[1], slot - 1, &left, 0);
+	if (ret) {
+		/* we hit -ENOSPC, but it isn't fatal here */
+		ret = 1;
+		goto out;
+	}
+
+	free_space = btrfs_leaf_free_space(root, left);
+	if (free_space < data_size) {
+		ret = 1;
+		goto out;
+	}
+
+	if (empty)
+		nr = right_nritems;
+	else
+		nr = right_nritems - 1;
+
+	for (i = 0; i < nr; i++) {
+		item = btrfs_item_nr(right, i);
+		if (!right->map_token) {
+			map_extent_buffer(right, (unsigned long)item,
+					sizeof(struct btrfs_item),
+					&right->map_token, &right->kaddr,
+					&right->map_start, &right->map_len,
+					KM_USER1);
+		}
+
+		if (!empty && push_items > 0) {
+			if (path->slots[0] < i)
+				break;
+			if (path->slots[0] == i) {
+				int space = btrfs_leaf_free_space(root, right);
+				if (space + push_space * 2 > free_space)
+					break;
+			}
+		}
+
+		if (path->slots[0] == i)
+			push_space += data_size;
+
+		this_item_size = btrfs_item_size(right, item);
+		if (this_item_size + sizeof(*item) + push_space > free_space)
+			break;
+
+		push_items++;
+		push_space += this_item_size + sizeof(*item);
+	}
+
+	if (right->map_token) {
+		unmap_extent_buffer(right, right->map_token, KM_USER1);
+		right->map_token = NULL;
+	}
+
+	if (push_items == 0) {
+		ret = 1;
+		goto out;
+	}
+	if (!empty && push_items == btrfs_header_nritems(right))
+		WARN_ON(1);
+
+	/* push data from right to left */
+	copy_extent_buffer(left, right,
+			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
+			   btrfs_item_nr_offset(0),
+			   push_items * sizeof(struct btrfs_item));
+
+	push_space = BTRFS_LEAF_DATA_SIZE(root) -
+		     btrfs_item_offset_nr(right, push_items - 1);
+
+	copy_extent_buffer(left, right, btrfs_leaf_data(left) +
+		     leaf_data_end(root, left) - push_space,
+		     btrfs_leaf_data(right) +
+		     btrfs_item_offset_nr(right, push_items - 1),
+		     push_space);
+	old_left_nritems = btrfs_header_nritems(left);
+	BUG_ON(old_left_nritems <= 0);
+
+	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
+	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
+		u32 ioff;
+
+		item = btrfs_item_nr(left, i);
+		if (!left->map_token) {
+			map_extent_buffer(left, (unsigned long)item,
+					sizeof(struct btrfs_item),
+					&left->map_token, &left->kaddr,
+					&left->map_start, &left->map_len,
+					KM_USER1);
+		}
+
+		ioff = btrfs_item_offset(left, item);
+		btrfs_set_item_offset(left, item,
+		      ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
+	}
+	btrfs_set_header_nritems(left, old_left_nritems + push_items);
+	if (left->map_token) {
+		unmap_extent_buffer(left, left->map_token, KM_USER1);
+		left->map_token = NULL;
+	}
+
+	/* fixup right node */
+	if (push_items > right_nritems) {
+		printk(KERN_CRIT "push items %d nr %u\n", push_items,
+		       right_nritems);
+		WARN_ON(1);
+	}
+
+	if (push_items < right_nritems) {
+		push_space = btrfs_item_offset_nr(right, push_items - 1) -
+						  leaf_data_end(root, right);
+		memmove_extent_buffer(right, btrfs_leaf_data(right) +
+				      BTRFS_LEAF_DATA_SIZE(root) - push_space,
+				      btrfs_leaf_data(right) +
+				      leaf_data_end(root, right), push_space);
+
+		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
+			      btrfs_item_nr_offset(push_items),
+			     (btrfs_header_nritems(right) - push_items) *
+			     sizeof(struct btrfs_item));
+	}
+	right_nritems -= push_items;
+	btrfs_set_header_nritems(right, right_nritems);
+	push_space = BTRFS_LEAF_DATA_SIZE(root);
+	for (i = 0; i < right_nritems; i++) {
+		item = btrfs_item_nr(right, i);
+
+		if (!right->map_token) {
+			map_extent_buffer(right, (unsigned long)item,
+					sizeof(struct btrfs_item),
+					&right->map_token, &right->kaddr,
+					&right->map_start, &right->map_len,
+					KM_USER1);
+		}
+
+		push_space = push_space - btrfs_item_size(right, item);
+		btrfs_set_item_offset(right, item, push_space);
+	}
+	if (right->map_token) {
+		unmap_extent_buffer(right, right->map_token, KM_USER1);
+		right->map_token = NULL;
+	}
+
+	btrfs_mark_buffer_dirty(left);
+	if (right_nritems)
+		btrfs_mark_buffer_dirty(right);
+
+	ret = btrfs_update_ref(trans, root, right, left,
+			       old_left_nritems, push_items);
+	BUG_ON(ret);
+
+	btrfs_item_key(right, &disk_key, 0);
+	wret = fixup_low_keys(trans, root, path, &disk_key, 1);
+	if (wret)
+		ret = wret;
+
+	/* then fixup the leaf pointer in the path */
+	if (path->slots[0] < push_items) {
+		path->slots[0] += old_left_nritems;
+		if (btrfs_header_nritems(path->nodes[0]) == 0)
+			clean_tree_block(trans, root, path->nodes[0]);
+		btrfs_tree_unlock(path->nodes[0]);
+		free_extent_buffer(path->nodes[0]);
+		path->nodes[0] = left;
+		path->slots[1] -= 1;
+	} else {
+		btrfs_tree_unlock(left);
+		free_extent_buffer(left);
+		path->slots[0] -= push_items;
+	}
+	BUG_ON(path->slots[0] < 0);
+	return ret;
+out:
+	btrfs_tree_unlock(left);
+	free_extent_buffer(left);
+	return ret;
+}
+
+/*
+ * split the path's leaf in two, making sure there is at least data_size
+ * available for the resulting leaf level of the path.
+ *
+ * returns 0 if all went well and < 0 on failure.
+ */
+static noinline int split_leaf(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       struct btrfs_key *ins_key,
+			       struct btrfs_path *path, int data_size,
+			       int extend)
+{
+	struct extent_buffer *l;
+	u32 nritems;
+	int mid;
+	int slot;
+	struct extent_buffer *right;
+	int data_copy_size;
+	int rt_data_off;
+	int i;
+	int ret = 0;
+	int wret;
+	int double_split;
+	int num_doubles = 0;
+	struct btrfs_disk_key disk_key;
+
+	/* first try to make some room by pushing left and right */
+	if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
+		wret = push_leaf_right(trans, root, path, data_size, 0);
+		if (wret < 0)
+			return wret;
+		if (wret) {
+			wret = push_leaf_left(trans, root, path, data_size, 0);
+			if (wret < 0)
+				return wret;
+		}
+		l = path->nodes[0];
+
+		/* did the pushes work? */
+		if (btrfs_leaf_free_space(root, l) >= data_size)
+			return 0;
+	}
+
+	if (!path->nodes[1]) {
+		ret = insert_new_root(trans, root, path, 1);
+		if (ret)
+			return ret;
+	}
+again:
+	double_split = 0;
+	l = path->nodes[0];
+	slot = path->slots[0];
+	nritems = btrfs_header_nritems(l);
+	mid = (nritems + 1) / 2;
+
+	right = btrfs_alloc_free_block(trans, root, root->leafsize,
+					path->nodes[1]->start,
+					root->root_key.objectid,
+					trans->transid, 0, l->start, 0);
+	if (IS_ERR(right)) {
+		BUG_ON(1);
+		return PTR_ERR(right);
+	}
+
+	memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
+	btrfs_set_header_bytenr(right, right->start);
+	btrfs_set_header_generation(right, trans->transid);
+	btrfs_set_header_owner(right, root->root_key.objectid);
+	btrfs_set_header_level(right, 0);
+	write_extent_buffer(right, root->fs_info->fsid,
+			    (unsigned long)btrfs_header_fsid(right),
+			    BTRFS_FSID_SIZE);
+
+	write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
+			    (unsigned long)btrfs_header_chunk_tree_uuid(right),
+			    BTRFS_UUID_SIZE);
+	if (mid <= slot) {
+		if (nritems == 1 ||
+		    leaf_space_used(l, mid, nritems - mid) + data_size >
+			BTRFS_LEAF_DATA_SIZE(root)) {
+			if (slot >= nritems) {
+				btrfs_cpu_key_to_disk(&disk_key, ins_key);
+				btrfs_set_header_nritems(right, 0);
+				wret = insert_ptr(trans, root, path,
+						  &disk_key, right->start,
+						  path->slots[1] + 1, 1);
+				if (wret)
+					ret = wret;
+
+				btrfs_tree_unlock(path->nodes[0]);
+				free_extent_buffer(path->nodes[0]);
+				path->nodes[0] = right;
+				path->slots[0] = 0;
+				path->slots[1] += 1;
+				btrfs_mark_buffer_dirty(right);
+				return ret;
+			}
+			mid = slot;
+			if (mid != nritems &&
+			    leaf_space_used(l, mid, nritems - mid) +
+			    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+				double_split = 1;
+			}
+		}
+	} else {
+		if (leaf_space_used(l, 0, mid) + data_size >
+			BTRFS_LEAF_DATA_SIZE(root)) {
+			if (!extend && data_size && slot == 0) {
+				btrfs_cpu_key_to_disk(&disk_key, ins_key);
+				btrfs_set_header_nritems(right, 0);
+				wret = insert_ptr(trans, root, path,
+						  &disk_key,
+						  right->start,
+						  path->slots[1], 1);
+				if (wret)
+					ret = wret;
+				btrfs_tree_unlock(path->nodes[0]);
+				free_extent_buffer(path->nodes[0]);
+				path->nodes[0] = right;
+				path->slots[0] = 0;
+				if (path->slots[1] == 0) {
+					wret = fixup_low_keys(trans, root,
+						      path, &disk_key, 1);
+					if (wret)
+						ret = wret;
+				}
+				btrfs_mark_buffer_dirty(right);
+				return ret;
+			} else if ((extend || !data_size) && slot == 0) {
+				mid = 1;
+			} else {
+				mid = slot;
+				if (mid != nritems &&
+				    leaf_space_used(l, mid, nritems - mid) +
+				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+					double_split = 1;
+				}
+			}
+		}
+	}
+	nritems = nritems - mid;
+	btrfs_set_header_nritems(right, nritems);
+	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
+
+	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
+			   btrfs_item_nr_offset(mid),
+			   nritems * sizeof(struct btrfs_item));
+
+	copy_extent_buffer(right, l,
+		     btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
+		     data_copy_size, btrfs_leaf_data(l) +
+		     leaf_data_end(root, l), data_copy_size);
+
+	rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
+		      btrfs_item_end_nr(l, mid);
+
+	for (i = 0; i < nritems; i++) {
+		struct btrfs_item *item = btrfs_item_nr(right, i);
+		u32 ioff;
+
+		if (!right->map_token) {
+			map_extent_buffer(right, (unsigned long)item,
+					sizeof(struct btrfs_item),
+					&right->map_token, &right->kaddr,
+					&right->map_start, &right->map_len,
+					KM_USER1);
+		}
+
+		ioff = btrfs_item_offset(right, item);
+		btrfs_set_item_offset(right, item, ioff + rt_data_off);
+	}
+
+	if (right->map_token) {
+		unmap_extent_buffer(right, right->map_token, KM_USER1);
+		right->map_token = NULL;
+	}
+
+	btrfs_set_header_nritems(l, mid);
+	ret = 0;
+	btrfs_item_key(right, &disk_key, 0);
+	wret = insert_ptr(trans, root, path, &disk_key, right->start,
+			  path->slots[1] + 1, 1);
+	if (wret)
+		ret = wret;
+
+	btrfs_mark_buffer_dirty(right);
+	btrfs_mark_buffer_dirty(l);
+	BUG_ON(path->slots[0] != slot);
+
+	ret = btrfs_update_ref(trans, root, l, right, 0, nritems);
+	BUG_ON(ret);
+
+	if (mid <= slot) {
+		btrfs_tree_unlock(path->nodes[0]);
+		free_extent_buffer(path->nodes[0]);
+		path->nodes[0] = right;
+		path->slots[0] -= mid;
+		path->slots[1] += 1;
+	} else {
+		btrfs_tree_unlock(right);
+		free_extent_buffer(right);
+	}
+
+	BUG_ON(path->slots[0] < 0);
+
+	if (double_split) {
+		BUG_ON(num_doubles != 0);
+		num_doubles++;
+		goto again;
+	}
+	return ret;
+}
+
+/*
+ * This function splits a single item into two items,
+ * giving 'new_key' to the new item and splitting the
+ * old one at split_offset (from the start of the item).
+ *
+ * The path may be released by this operation.  After
+ * the split, the path is pointing to the old item.  The
+ * new item is going to be in the same node as the old one.
+ *
+ * Note, the item being split must be smaller enough to live alone on
+ * a tree block with room for one extra struct btrfs_item
+ *
+ * This allows us to split the item in place, keeping a lock on the
+ * leaf the entire time.
+ */
+int btrfs_split_item(struct btrfs_trans_handle *trans,
+		     struct btrfs_root *root,
+		     struct btrfs_path *path,
+		     struct btrfs_key *new_key,
+		     unsigned long split_offset)
+{
+	u32 item_size;
+	struct extent_buffer *leaf;
+	struct btrfs_key orig_key;
+	struct btrfs_item *item;
+	struct btrfs_item *new_item;
+	int ret = 0;
+	int slot;
+	u32 nritems;
+	u32 orig_offset;
+	struct btrfs_disk_key disk_key;
+	char *buf;
+
+	leaf = path->nodes[0];
+	btrfs_item_key_to_cpu(leaf, &orig_key, path->slots[0]);
+	if (btrfs_leaf_free_space(root, leaf) >= sizeof(struct btrfs_item))
+		goto split;
+
+	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+	btrfs_release_path(root, path);
+
+	path->search_for_split = 1;
+	path->keep_locks = 1;
+
+	ret = btrfs_search_slot(trans, root, &orig_key, path, 0, 1);
+	path->search_for_split = 0;
+
+	/* if our item isn't there or got smaller, return now */
+	if (ret != 0 || item_size != btrfs_item_size_nr(path->nodes[0],
+							path->slots[0])) {
+		path->keep_locks = 0;
+		return -EAGAIN;
+	}
+
+	ret = split_leaf(trans, root, &orig_key, path,
+			 sizeof(struct btrfs_item), 1);
+	path->keep_locks = 0;
+	BUG_ON(ret);
+
+	leaf = path->nodes[0];
+	BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
+
+split:
+	item = btrfs_item_nr(leaf, path->slots[0]);
+	orig_offset = btrfs_item_offset(leaf, item);
+	item_size = btrfs_item_size(leaf, item);
+
+
+	buf = kmalloc(item_size, GFP_NOFS);
+	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
+			    path->slots[0]), item_size);
+	slot = path->slots[0] + 1;
+	leaf = path->nodes[0];
+
+	nritems = btrfs_header_nritems(leaf);
+
+	if (slot != nritems) {
+		/* shift the items */
+		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
+			      btrfs_item_nr_offset(slot),
+			      (nritems - slot) * sizeof(struct btrfs_item));
+
+	}
+
+	btrfs_cpu_key_to_disk(&disk_key, new_key);
+	btrfs_set_item_key(leaf, &disk_key, slot);
+
+	new_item = btrfs_item_nr(leaf, slot);
+
+	btrfs_set_item_offset(leaf, new_item, orig_offset);
+	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
+
+	btrfs_set_item_offset(leaf, item,
+			      orig_offset + item_size - split_offset);
+	btrfs_set_item_size(leaf, item, split_offset);
+
+	btrfs_set_header_nritems(leaf, nritems + 1);
+
+	/* write the data for the start of the original item */
+	write_extent_buffer(leaf, buf,
+			    btrfs_item_ptr_offset(leaf, path->slots[0]),
+			    split_offset);
+
+	/* write the data for the new item */
+	write_extent_buffer(leaf, buf + split_offset,
+			    btrfs_item_ptr_offset(leaf, slot),
+			    item_size - split_offset);
+	btrfs_mark_buffer_dirty(leaf);
+
+	ret = 0;
+	if (btrfs_leaf_free_space(root, leaf) < 0) {
+		btrfs_print_leaf(root, leaf);
+		BUG();
+	}
+	kfree(buf);
+	return ret;
+}
+
+/*
+ * make the item pointed to by the path smaller.  new_size indicates
+ * how small to make it, and from_end tells us if we just chop bytes
+ * off the end of the item or if we shift the item to chop bytes off
+ * the front.
+ */
+int btrfs_truncate_item(struct btrfs_trans_handle *trans,
+			struct btrfs_root *root,
+			struct btrfs_path *path,
+			u32 new_size, int from_end)
+{
+	int ret = 0;
+	int slot;
+	int slot_orig;
+	struct extent_buffer *leaf;
+	struct btrfs_item *item;
+	u32 nritems;
+	unsigned int data_end;
+	unsigned int old_data_start;
+	unsigned int old_size;
+	unsigned int size_diff;
+	int i;
+
+	slot_orig = path->slots[0];
+	leaf = path->nodes[0];
+	slot = path->slots[0];
+
+	old_size = btrfs_item_size_nr(leaf, slot);
+	if (old_size == new_size)
+		return 0;
+
+	nritems = btrfs_header_nritems(leaf);
+	data_end = leaf_data_end(root, leaf);
+
+	old_data_start = btrfs_item_offset_nr(leaf, slot);
+
+	size_diff = old_size - new_size;
+
+	BUG_ON(slot < 0);
+	BUG_ON(slot >= nritems);
+
+	/*
+	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
+	 */
+	/* first correct the data pointers */
+	for (i = slot; i < nritems; i++) {
+		u32 ioff;
+		item = btrfs_item_nr(leaf, i);
+
+		if (!leaf->map_token) {
+			map_extent_buffer(leaf, (unsigned long)item,
+					sizeof(struct btrfs_item),
+					&leaf->map_token, &leaf->kaddr,
+					&leaf->map_start, &leaf->map_len,
+					KM_USER1);
+		}
+
+		ioff = btrfs_item_offset(leaf, item);
+		btrfs_set_item_offset(leaf, item, ioff + size_diff);
+	}
+
+	if (leaf->map_token) {
+		unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+		leaf->map_token = NULL;
+	}
+
+	/* shift the data */
+	if (from_end) {
+		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
+			      data_end + size_diff, btrfs_leaf_data(leaf) +
+			      data_end, old_data_start + new_size - data_end);
+	} else {
+		struct btrfs_disk_key disk_key;
+		u64 offset;
+
+		btrfs_item_key(leaf, &disk_key, slot);
+
+		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
+			unsigned long ptr;
+			struct btrfs_file_extent_item *fi;
+
+			fi = btrfs_item_ptr(leaf, slot,
+					    struct btrfs_file_extent_item);
+			fi = (struct btrfs_file_extent_item *)(
+			     (unsigned long)fi - size_diff);
+
+			if (btrfs_file_extent_type(leaf, fi) ==
+			    BTRFS_FILE_EXTENT_INLINE) {
+				ptr = btrfs_item_ptr_offset(leaf, slot);
+				memmove_extent_buffer(leaf, ptr,
+				      (unsigned long)fi,
+				      offsetof(struct btrfs_file_extent_item,
+						 disk_bytenr));
+			}
+		}
+
+		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
+			      data_end + size_diff, btrfs_leaf_data(leaf) +
+			      data_end, old_data_start - data_end);
+
+		offset = btrfs_disk_key_offset(&disk_key);
+		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
+		btrfs_set_item_key(leaf, &disk_key, slot);
+		if (slot == 0)
+			fixup_low_keys(trans, root, path, &disk_key, 1);
+	}
+
+	item = btrfs_item_nr(leaf, slot);
+	btrfs_set_item_size(leaf, item, new_size);
+	btrfs_mark_buffer_dirty(leaf);
+
+	ret = 0;
+	if (btrfs_leaf_free_space(root, leaf) < 0) {
+		btrfs_print_leaf(root, leaf);
+		BUG();
+	}
+	return ret;
+}
+
+/*
+ * make the item pointed to by the path bigger, data_size is the new size.
+ */
+int btrfs_extend_item(struct btrfs_trans_handle *trans,
+		      struct btrfs_root *root, struct btrfs_path *path,
+		      u32 data_size)
+{
+	int ret = 0;
+	int slot;
+	int slot_orig;
+	struct extent_buffer *leaf;
+	struct btrfs_item *item;
+	u32 nritems;
+	unsigned int data_end;
+	unsigned int old_data;
+	unsigned int old_size;
+	int i;
+
+	slot_orig = path->slots[0];
+	leaf = path->nodes[0];
+
+	nritems = btrfs_header_nritems(leaf);
+	data_end = leaf_data_end(root, leaf);
+
+	if (btrfs_leaf_free_space(root, leaf) < data_size) {
+		btrfs_print_leaf(root, leaf);
+		BUG();
+	}
+	slot = path->slots[0];
+	old_data = btrfs_item_end_nr(leaf, slot);
+
+	BUG_ON(slot < 0);
+	if (slot >= nritems) {
+		btrfs_print_leaf(root, leaf);
+		printk(KERN_CRIT "slot %d too large, nritems %d\n",
+		       slot, nritems);
+		BUG_ON(1);
+	}
+
+	/*
+	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
+	 */
+	/* first correct the data pointers */
+	for (i = slot; i < nritems; i++) {
+		u32 ioff;
+		item = btrfs_item_nr(leaf, i);
+
+		if (!leaf->map_token) {
+			map_extent_buffer(leaf, (unsigned long)item,
+					sizeof(struct btrfs_item),
+					&leaf->map_token, &leaf->kaddr,
+					&leaf->map_start, &leaf->map_len,
+					KM_USER1);
+		}
+		ioff = btrfs_item_offset(leaf, item);
+		btrfs_set_item_offset(leaf, item, ioff - data_size);
+	}
+
+	if (leaf->map_token) {
+		unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+		leaf->map_token = NULL;
+	}
+
+	/* shift the data */
+	memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
+		      data_end - data_size, btrfs_leaf_data(leaf) +
+		      data_end, old_data - data_end);
+
+	data_end = old_data;
+	old_size = btrfs_item_size_nr(leaf, slot);
+	item = btrfs_item_nr(leaf, slot);
+	btrfs_set_item_size(leaf, item, old_size + data_size);
+	btrfs_mark_buffer_dirty(leaf);
+
+	ret = 0;
+	if (btrfs_leaf_free_space(root, leaf) < 0) {
+		btrfs_print_leaf(root, leaf);
+		BUG();
+	}
+	return ret;
+}
+
+/*
+ * Given a key and some data, insert items into the tree.
+ * This does all the path init required, making room in the tree if needed.
+ * Returns the number of keys that were inserted.
+ */
+int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root,
+			    struct btrfs_path *path,
+			    struct btrfs_key *cpu_key, u32 *data_size,
+			    int nr)
+{
+	struct extent_buffer *leaf;
+	struct btrfs_item *item;
+	int ret = 0;
+	int slot;
+	int i;
+	u32 nritems;
+	u32 total_data = 0;
+	u32 total_size = 0;
+	unsigned int data_end;
+	struct btrfs_disk_key disk_key;
+	struct btrfs_key found_key;
+
+	for (i = 0; i < nr; i++) {
+		if (total_size + data_size[i] + sizeof(struct btrfs_item) >
+		    BTRFS_LEAF_DATA_SIZE(root)) {
+			break;
+			nr = i;
+		}
+		total_data += data_size[i];
+		total_size += data_size[i] + sizeof(struct btrfs_item);
+	}
+	BUG_ON(nr == 0);
+
+	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
+	if (ret == 0)
+		return -EEXIST;
+	if (ret < 0)
+		goto out;
+
+	leaf = path->nodes[0];
+
+	nritems = btrfs_header_nritems(leaf);
+	data_end = leaf_data_end(root, leaf);
+
+	if (btrfs_leaf_free_space(root, leaf) < total_size) {
+		for (i = nr; i >= 0; i--) {
+			total_data -= data_size[i];
+			total_size -= data_size[i] + sizeof(struct btrfs_item);
+			if (total_size < btrfs_leaf_free_space(root, leaf))
+				break;
+		}
+		nr = i;
+	}
+
+	slot = path->slots[0];
+	BUG_ON(slot < 0);
+
+	if (slot != nritems) {
+		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
+
+		item = btrfs_item_nr(leaf, slot);
+		btrfs_item_key_to_cpu(leaf, &found_key, slot);
+
+		/* figure out how many keys we can insert in here */
+		total_data = data_size[0];
+		for (i = 1; i < nr; i++) {
+			if (comp_cpu_keys(&found_key, cpu_key + i) <= 0)
+				break;
+			total_data += data_size[i];
+		}
+		nr = i;
+
+		if (old_data < data_end) {
+			btrfs_print_leaf(root, leaf);
+			printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
+			       slot, old_data, data_end);
+			BUG_ON(1);
+		}
+		/*
+		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
+		 */
+		/* first correct the data pointers */
+		WARN_ON(leaf->map_token);
+		for (i = slot; i < nritems; i++) {
+			u32 ioff;
+
+			item = btrfs_item_nr(leaf, i);
+			if (!leaf->map_token) {
+				map_extent_buffer(leaf, (unsigned long)item,
+					sizeof(struct btrfs_item),
+					&leaf->map_token, &leaf->kaddr,
+					&leaf->map_start, &leaf->map_len,
+					KM_USER1);
+			}
+
+			ioff = btrfs_item_offset(leaf, item);
+			btrfs_set_item_offset(leaf, item, ioff - total_data);
+		}
+		if (leaf->map_token) {
+			unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+			leaf->map_token = NULL;
+		}
+
+		/* shift the items */
+		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
+			      btrfs_item_nr_offset(slot),
+			      (nritems - slot) * sizeof(struct btrfs_item));
+
+		/* shift the data */
+		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
+			      data_end - total_data, btrfs_leaf_data(leaf) +
+			      data_end, old_data - data_end);
+		data_end = old_data;
+	} else {
+		/*
+		 * this sucks but it has to be done, if we are inserting at
+		 * the end of the leaf only insert 1 of the items, since we
+		 * have no way of knowing whats on the next leaf and we'd have
+		 * to drop our current locks to figure it out
+		 */
+		nr = 1;
+	}
+
+	/* setup the item for the new data */
+	for (i = 0; i < nr; i++) {
+		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
+		btrfs_set_item_key(leaf, &disk_key, slot + i);
+		item = btrfs_item_nr(leaf, slot + i);
+		btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
+		data_end -= data_size[i];
+		btrfs_set_item_size(leaf, item, data_size[i]);
+	}
+	btrfs_set_header_nritems(leaf, nritems + nr);
+	btrfs_mark_buffer_dirty(leaf);
+
+	ret = 0;
+	if (slot == 0) {
+		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
+		ret = fixup_low_keys(trans, root, path, &disk_key, 1);
+	}
+
+	if (btrfs_leaf_free_space(root, leaf) < 0) {
+		btrfs_print_leaf(root, leaf);
+		BUG();
+	}
+out:
+	if (!ret)
+		ret = nr;
+	return ret;
+}
+
+/*
+ * Given a key and some data, insert items into the tree.
+ * This does all the path init required, making room in the tree if needed.
+ */
+int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root,
+			    struct btrfs_path *path,
+			    struct btrfs_key *cpu_key, u32 *data_size,
+			    int nr)
+{
+	struct extent_buffer *leaf;
+	struct btrfs_item *item;
+	int ret = 0;
+	int slot;
+	int slot_orig;
+	int i;
+	u32 nritems;
+	u32 total_size = 0;
+	u32 total_data = 0;
+	unsigned int data_end;
+	struct btrfs_disk_key disk_key;
+
+	for (i = 0; i < nr; i++)
+		total_data += data_size[i];
+
+	total_size = total_data + (nr * sizeof(struct btrfs_item));
+	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
+	if (ret == 0)
+		return -EEXIST;
+	if (ret < 0)
+		goto out;
+
+	slot_orig = path->slots[0];
+	leaf = path->nodes[0];
+
+	nritems = btrfs_header_nritems(leaf);
+	data_end = leaf_data_end(root, leaf);
+
+	if (btrfs_leaf_free_space(root, leaf) < total_size) {
+		btrfs_print_leaf(root, leaf);
+		printk(KERN_CRIT "not enough freespace need %u have %d\n",
+		       total_size, btrfs_leaf_free_space(root, leaf));
+		BUG();
+	}
+
+	slot = path->slots[0];
+	BUG_ON(slot < 0);
+
+	if (slot != nritems) {
+		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
+
+		if (old_data < data_end) {
+			btrfs_print_leaf(root, leaf);
+			printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
+			       slot, old_data, data_end);
+			BUG_ON(1);
+		}
+		/*
+		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
+		 */
+		/* first correct the data pointers */
+		WARN_ON(leaf->map_token);
+		for (i = slot; i < nritems; i++) {
+			u32 ioff;
+
+			item = btrfs_item_nr(leaf, i);
+			if (!leaf->map_token) {
+				map_extent_buffer(leaf, (unsigned long)item,
+					sizeof(struct btrfs_item),
+					&leaf->map_token, &leaf->kaddr,
+					&leaf->map_start, &leaf->map_len,
+					KM_USER1);
+			}
+
+			ioff = btrfs_item_offset(leaf, item);
+			btrfs_set_item_offset(leaf, item, ioff - total_data);
+		}
+		if (leaf->map_token) {
+			unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+			leaf->map_token = NULL;
+		}
+
+		/* shift the items */
+		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
+			      btrfs_item_nr_offset(slot),
+			      (nritems - slot) * sizeof(struct btrfs_item));
+
+		/* shift the data */
+		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
+			      data_end - total_data, btrfs_leaf_data(leaf) +
+			      data_end, old_data - data_end);
+		data_end = old_data;
+	}
+
+	/* setup the item for the new data */
+	for (i = 0; i < nr; i++) {
+		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
+		btrfs_set_item_key(leaf, &disk_key, slot + i);
+		item = btrfs_item_nr(leaf, slot + i);
+		btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
+		data_end -= data_size[i];
+		btrfs_set_item_size(leaf, item, data_size[i]);
+	}
+	btrfs_set_header_nritems(leaf, nritems + nr);
+	btrfs_mark_buffer_dirty(leaf);
+
+	ret = 0;
+	if (slot == 0) {
+		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
+		ret = fixup_low_keys(trans, root, path, &disk_key, 1);
+	}
+
+	if (btrfs_leaf_free_space(root, leaf) < 0) {
+		btrfs_print_leaf(root, leaf);
+		BUG();
+	}
+out:
+	return ret;
+}
+
+/*
+ * Given a key and some data, insert an item into the tree.
+ * This does all the path init required, making room in the tree if needed.
+ */
+int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_key *cpu_key, void *data, u32
+		      data_size)
+{
+	int ret = 0;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	unsigned long ptr;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
+	if (!ret) {
+		leaf = path->nodes[0];
+		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+		write_extent_buffer(leaf, data, ptr, data_size);
+		btrfs_mark_buffer_dirty(leaf);
+	}
+	btrfs_free_path(path);
+	return ret;
+}
+
+/*
+ * delete the pointer from a given node.
+ *
+ * the tree should have been previously balanced so the deletion does not
+ * empty a node.
+ */
+static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		   struct btrfs_path *path, int level, int slot)
+{
+	struct extent_buffer *parent = path->nodes[level];
+	u32 nritems;
+	int ret = 0;
+	int wret;
+
+	nritems = btrfs_header_nritems(parent);
+	if (slot != nritems - 1) {
+		memmove_extent_buffer(parent,
+			      btrfs_node_key_ptr_offset(slot),
+			      btrfs_node_key_ptr_offset(slot + 1),
+			      sizeof(struct btrfs_key_ptr) *
+			      (nritems - slot - 1));
+	}
+	nritems--;
+	btrfs_set_header_nritems(parent, nritems);
+	if (nritems == 0 && parent == root->node) {
+		BUG_ON(btrfs_header_level(root->node) != 1);
+		/* just turn the root into a leaf and break */
+		btrfs_set_header_level(root->node, 0);
+	} else if (slot == 0) {
+		struct btrfs_disk_key disk_key;
+
+		btrfs_node_key(parent, &disk_key, 0);
+		wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
+		if (wret)
+			ret = wret;
+	}
+	btrfs_mark_buffer_dirty(parent);
+	return ret;
+}
+
+/*
+ * a helper function to delete the leaf pointed to by path->slots[1] and
+ * path->nodes[1].  bytenr is the node block pointer, but since the callers
+ * already know it, it is faster to have them pass it down than to
+ * read it out of the node again.
+ *
+ * This deletes the pointer in path->nodes[1] and frees the leaf
+ * block extent.  zero is returned if it all worked out, < 0 otherwise.
+ *
+ * The path must have already been setup for deleting the leaf, including
+ * all the proper balancing.  path->nodes[1] must be locked.
+ */
+noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root,
+			    struct btrfs_path *path, u64 bytenr)
+{
+	int ret;
+	u64 root_gen = btrfs_header_generation(path->nodes[1]);
+
+	ret = del_ptr(trans, root, path, 1, path->slots[1]);
+	if (ret)
+		return ret;
+
+	ret = btrfs_free_extent(trans, root, bytenr,
+				btrfs_level_size(root, 0),
+				path->nodes[1]->start,
+				btrfs_header_owner(path->nodes[1]),
+				root_gen, 0, 1);
+	return ret;
+}
+/*
+ * delete the item at the leaf level in path.  If that empties
+ * the leaf, remove it from the tree
+ */
+int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		    struct btrfs_path *path, int slot, int nr)
+{
+	struct extent_buffer *leaf;
+	struct btrfs_item *item;
+	int last_off;
+	int dsize = 0;
+	int ret = 0;
+	int wret;
+	int i;
+	u32 nritems;
+
+	leaf = path->nodes[0];
+	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
+
+	for (i = 0; i < nr; i++)
+		dsize += btrfs_item_size_nr(leaf, slot + i);
+
+	nritems = btrfs_header_nritems(leaf);
+
+	if (slot + nr != nritems) {
+		int data_end = leaf_data_end(root, leaf);
+
+		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
+			      data_end + dsize,
+			      btrfs_leaf_data(leaf) + data_end,
+			      last_off - data_end);
+
+		for (i = slot + nr; i < nritems; i++) {
+			u32 ioff;
+
+			item = btrfs_item_nr(leaf, i);
+			if (!leaf->map_token) {
+				map_extent_buffer(leaf, (unsigned long)item,
+					sizeof(struct btrfs_item),
+					&leaf->map_token, &leaf->kaddr,
+					&leaf->map_start, &leaf->map_len,
+					KM_USER1);
+			}
+			ioff = btrfs_item_offset(leaf, item);
+			btrfs_set_item_offset(leaf, item, ioff + dsize);
+		}
+
+		if (leaf->map_token) {
+			unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+			leaf->map_token = NULL;
+		}
+
+		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
+			      btrfs_item_nr_offset(slot + nr),
+			      sizeof(struct btrfs_item) *
+			      (nritems - slot - nr));
+	}
+	btrfs_set_header_nritems(leaf, nritems - nr);
+	nritems -= nr;
+
+	/* delete the leaf if we've emptied it */
+	if (nritems == 0) {
+		if (leaf == root->node) {
+			btrfs_set_header_level(leaf, 0);
+		} else {
+			ret = btrfs_del_leaf(trans, root, path, leaf->start);
+			BUG_ON(ret);
+		}
+	} else {
+		int used = leaf_space_used(leaf, 0, nritems);
+		if (slot == 0) {
+			struct btrfs_disk_key disk_key;
+
+			btrfs_item_key(leaf, &disk_key, 0);
+			wret = fixup_low_keys(trans, root, path,
+					      &disk_key, 1);
+			if (wret)
+				ret = wret;
+		}
+
+		/* delete the leaf if it is mostly empty */
+		if (used < BTRFS_LEAF_DATA_SIZE(root) / 4) {
+			/* push_leaf_left fixes the path.
+			 * make sure the path still points to our leaf
+			 * for possible call to del_ptr below
+			 */
+			slot = path->slots[1];
+			extent_buffer_get(leaf);
+
+			wret = push_leaf_left(trans, root, path, 1, 1);
+			if (wret < 0 && wret != -ENOSPC)
+				ret = wret;
+
+			if (path->nodes[0] == leaf &&
+			    btrfs_header_nritems(leaf)) {
+				wret = push_leaf_right(trans, root, path, 1, 1);
+				if (wret < 0 && wret != -ENOSPC)
+					ret = wret;
+			}
+
+			if (btrfs_header_nritems(leaf) == 0) {
+				path->slots[1] = slot;
+				ret = btrfs_del_leaf(trans, root, path,
+						     leaf->start);
+				BUG_ON(ret);
+				free_extent_buffer(leaf);
+			} else {
+				/* if we're still in the path, make sure
+				 * we're dirty.  Otherwise, one of the
+				 * push_leaf functions must have already
+				 * dirtied this buffer
+				 */
+				if (path->nodes[0] == leaf)
+					btrfs_mark_buffer_dirty(leaf);
+				free_extent_buffer(leaf);
+			}
+		} else {
+			btrfs_mark_buffer_dirty(leaf);
+		}
+	}
+	return ret;
+}
+
+/*
+ * search the tree again to find a leaf with lesser keys
+ * returns 0 if it found something or 1 if there are no lesser leaves.
+ * returns < 0 on io errors.
+ *
+ * This may release the path, and so you may lose any locks held at the
+ * time you call it.
+ */
+int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
+{
+	struct btrfs_key key;
+	struct btrfs_disk_key found_key;
+	int ret;
+
+	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
+
+	if (key.offset > 0)
+		key.offset--;
+	else if (key.type > 0)
+		key.type--;
+	else if (key.objectid > 0)
+		key.objectid--;
+	else
+		return 1;
+
+	btrfs_release_path(root, path);
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		return ret;
+	btrfs_item_key(path->nodes[0], &found_key, 0);
+	ret = comp_keys(&found_key, &key);
+	if (ret < 0)
+		return 0;
+	return 1;
+}
+
+/*
+ * A helper function to walk down the tree starting at min_key, and looking
+ * for nodes or leaves that are either in cache or have a minimum
+ * transaction id.  This is used by the btree defrag code, and tree logging
+ *
+ * This does not cow, but it does stuff the starting key it finds back
+ * into min_key, so you can call btrfs_search_slot with cow=1 on the
+ * key and get a writable path.
+ *
+ * This does lock as it descends, and path->keep_locks should be set
+ * to 1 by the caller.
+ *
+ * This honors path->lowest_level to prevent descent past a given level
+ * of the tree.
+ *
+ * min_trans indicates the oldest transaction that you are interested
+ * in walking through.  Any nodes or leaves older than min_trans are
+ * skipped over (without reading them).
+ *
+ * returns zero if something useful was found, < 0 on error and 1 if there
+ * was nothing in the tree that matched the search criteria.
+ */
+int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
+			 struct btrfs_key *max_key,
+			 struct btrfs_path *path, int cache_only,
+			 u64 min_trans)
+{
+	struct extent_buffer *cur;
+	struct btrfs_key found_key;
+	int slot;
+	int sret;
+	u32 nritems;
+	int level;
+	int ret = 1;
+
+	WARN_ON(!path->keep_locks);
+again:
+	cur = btrfs_lock_root_node(root);
+	level = btrfs_header_level(cur);
+	WARN_ON(path->nodes[level]);
+	path->nodes[level] = cur;
+	path->locks[level] = 1;
+
+	if (btrfs_header_generation(cur) < min_trans) {
+		ret = 1;
+		goto out;
+	}
+	while (1) {
+		nritems = btrfs_header_nritems(cur);
+		level = btrfs_header_level(cur);
+		sret = bin_search(cur, min_key, level, &slot);
+
+		/* at the lowest level, we're done, setup the path and exit */
+		if (level == path->lowest_level) {
+			if (slot >= nritems)
+				goto find_next_key;
+			ret = 0;
+			path->slots[level] = slot;
+			btrfs_item_key_to_cpu(cur, &found_key, slot);
+			goto out;
+		}
+		if (sret && slot > 0)
+			slot--;
+		/*
+		 * check this node pointer against the cache_only and
+		 * min_trans parameters.  If it isn't in cache or is too
+		 * old, skip to the next one.
+		 */
+		while (slot < nritems) {
+			u64 blockptr;
+			u64 gen;
+			struct extent_buffer *tmp;
+			struct btrfs_disk_key disk_key;
+
+			blockptr = btrfs_node_blockptr(cur, slot);
+			gen = btrfs_node_ptr_generation(cur, slot);
+			if (gen < min_trans) {
+				slot++;
+				continue;
+			}
+			if (!cache_only)
+				break;
+
+			if (max_key) {
+				btrfs_node_key(cur, &disk_key, slot);
+				if (comp_keys(&disk_key, max_key) >= 0) {
+					ret = 1;
+					goto out;
+				}
+			}
+
+			tmp = btrfs_find_tree_block(root, blockptr,
+					    btrfs_level_size(root, level - 1));
+
+			if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
+				free_extent_buffer(tmp);
+				break;
+			}
+			if (tmp)
+				free_extent_buffer(tmp);
+			slot++;
+		}
+find_next_key:
+		/*
+		 * we didn't find a candidate key in this node, walk forward
+		 * and find another one
+		 */
+		if (slot >= nritems) {
+			path->slots[level] = slot;
+			sret = btrfs_find_next_key(root, path, min_key, level,
+						  cache_only, min_trans);
+			if (sret == 0) {
+				btrfs_release_path(root, path);
+				goto again;
+			} else {
+				goto out;
+			}
+		}
+		/* save our key for returning back */
+		btrfs_node_key_to_cpu(cur, &found_key, slot);
+		path->slots[level] = slot;
+		if (level == path->lowest_level) {
+			ret = 0;
+			unlock_up(path, level, 1);
+			goto out;
+		}
+		cur = read_node_slot(root, cur, slot);
+
+		btrfs_tree_lock(cur);
+		path->locks[level - 1] = 1;
+		path->nodes[level - 1] = cur;
+		unlock_up(path, level, 1);
+	}
+out:
+	if (ret == 0)
+		memcpy(min_key, &found_key, sizeof(found_key));
+	return ret;
+}
+
+/*
+ * this is similar to btrfs_next_leaf, but does not try to preserve
+ * and fixup the path.  It looks for and returns the next key in the
+ * tree based on the current path and the cache_only and min_trans
+ * parameters.
+ *
+ * 0 is returned if another key is found, < 0 if there are any errors
+ * and 1 is returned if there are no higher keys in the tree
+ *
+ * path->keep_locks should be set to 1 on the search made before
+ * calling this function.
+ */
+int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
+			struct btrfs_key *key, int lowest_level,
+			int cache_only, u64 min_trans)
+{
+	int level = lowest_level;
+	int slot;
+	struct extent_buffer *c;
+
+	WARN_ON(!path->keep_locks);
+	while (level < BTRFS_MAX_LEVEL) {
+		if (!path->nodes[level])
+			return 1;
+
+		slot = path->slots[level] + 1;
+		c = path->nodes[level];
+next:
+		if (slot >= btrfs_header_nritems(c)) {
+			level++;
+			if (level == BTRFS_MAX_LEVEL)
+				return 1;
+			continue;
+		}
+		if (level == 0)
+			btrfs_item_key_to_cpu(c, key, slot);
+		else {
+			u64 blockptr = btrfs_node_blockptr(c, slot);
+			u64 gen = btrfs_node_ptr_generation(c, slot);
+
+			if (cache_only) {
+				struct extent_buffer *cur;
+				cur = btrfs_find_tree_block(root, blockptr,
+					    btrfs_level_size(root, level - 1));
+				if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
+					slot++;
+					if (cur)
+						free_extent_buffer(cur);
+					goto next;
+				}
+				free_extent_buffer(cur);
+			}
+			if (gen < min_trans) {
+				slot++;
+				goto next;
+			}
+			btrfs_node_key_to_cpu(c, key, slot);
+		}
+		return 0;
+	}
+	return 1;
+}
+
+/*
+ * search the tree again to find a leaf with greater keys
+ * returns 0 if it found something or 1 if there are no greater leaves.
+ * returns < 0 on io errors.
+ */
+int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
+{
+	int slot;
+	int level = 1;
+	struct extent_buffer *c;
+	struct extent_buffer *next = NULL;
+	struct btrfs_key key;
+	u32 nritems;
+	int ret;
+
+	nritems = btrfs_header_nritems(path->nodes[0]);
+	if (nritems == 0)
+		return 1;
+
+	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
+
+	btrfs_release_path(root, path);
+	path->keep_locks = 1;
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	path->keep_locks = 0;
+
+	if (ret < 0)
+		return ret;
+
+	nritems = btrfs_header_nritems(path->nodes[0]);
+	/*
+	 * by releasing the path above we dropped all our locks.  A balance
+	 * could have added more items next to the key that used to be
+	 * at the very end of the block.  So, check again here and
+	 * advance the path if there are now more items available.
+	 */
+	if (nritems > 0 && path->slots[0] < nritems - 1) {
+		path->slots[0]++;
+		goto done;
+	}
+
+	while (level < BTRFS_MAX_LEVEL) {
+		if (!path->nodes[level])
+			return 1;
+
+		slot = path->slots[level] + 1;
+		c = path->nodes[level];
+		if (slot >= btrfs_header_nritems(c)) {
+			level++;
+			if (level == BTRFS_MAX_LEVEL)
+				return 1;
+			continue;
+		}
+
+		if (next) {
+			btrfs_tree_unlock(next);
+			free_extent_buffer(next);
+		}
+
+		if (level == 1 && (path->locks[1] || path->skip_locking) &&
+		    path->reada)
+			reada_for_search(root, path, level, slot, 0);
+
+		next = read_node_slot(root, c, slot);
+		if (!path->skip_locking) {
+			WARN_ON(!btrfs_tree_locked(c));
+			btrfs_tree_lock(next);
+		}
+		break;
+	}
+	path->slots[level] = slot;
+	while (1) {
+		level--;
+		c = path->nodes[level];
+		if (path->locks[level])
+			btrfs_tree_unlock(c);
+		free_extent_buffer(c);
+		path->nodes[level] = next;
+		path->slots[level] = 0;
+		if (!path->skip_locking)
+			path->locks[level] = 1;
+		if (!level)
+			break;
+		if (level == 1 && path->locks[1] && path->reada)
+			reada_for_search(root, path, level, slot, 0);
+		next = read_node_slot(root, next, 0);
+		if (!path->skip_locking) {
+			WARN_ON(!btrfs_tree_locked(path->nodes[level]));
+			btrfs_tree_lock(next);
+		}
+	}
+done:
+	unlock_up(path, 0, 1);
+	return 0;
+}
+
+/*
+ * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
+ * searching until it gets past min_objectid or finds an item of 'type'
+ *
+ * returns 0 if something is found, 1 if nothing was found and < 0 on error
+ */
+int btrfs_previous_item(struct btrfs_root *root,
+			struct btrfs_path *path, u64 min_objectid,
+			int type)
+{
+	struct btrfs_key found_key;
+	struct extent_buffer *leaf;
+	u32 nritems;
+	int ret;
+
+	while (1) {
+		if (path->slots[0] == 0) {
+			ret = btrfs_prev_leaf(root, path);
+			if (ret != 0)
+				return ret;
+		} else {
+			path->slots[0]--;
+		}
+		leaf = path->nodes[0];
+		nritems = btrfs_header_nritems(leaf);
+		if (nritems == 0)
+			return 1;
+		if (path->slots[0] == nritems)
+			path->slots[0]--;
+
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+		if (found_key.type == type)
+			return 0;
+		if (found_key.objectid < min_objectid)
+			break;
+		if (found_key.objectid == min_objectid &&
+		    found_key.type < type)
+			break;
+	}
+	return 1;
+}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
new file mode 100644
index 0000000..eee060f
--- /dev/null
+++ b/fs/btrfs/ctree.h
@@ -0,0 +1,2129 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_CTREE__
+#define __BTRFS_CTREE__
+
+#include <linux/version.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/fs.h>
+#include <linux/completion.h>
+#include <linux/backing-dev.h>
+#include <linux/wait.h>
+#include <asm/kmap_types.h>
+#include "extent_io.h"
+#include "extent_map.h"
+#include "async-thread.h"
+
+struct btrfs_trans_handle;
+struct btrfs_transaction;
+extern struct kmem_cache *btrfs_trans_handle_cachep;
+extern struct kmem_cache *btrfs_transaction_cachep;
+extern struct kmem_cache *btrfs_bit_radix_cachep;
+extern struct kmem_cache *btrfs_path_cachep;
+struct btrfs_ordered_sum;
+
+#define BTRFS_MAGIC "_BHRfS_M"
+
+#define BTRFS_ACL_NOT_CACHED    ((void *)-1)
+
+#ifdef CONFIG_LOCKDEP
+# define BTRFS_MAX_LEVEL 7
+#else
+# define BTRFS_MAX_LEVEL 8
+#endif
+
+/* holds pointers to all of the tree roots */
+#define BTRFS_ROOT_TREE_OBJECTID 1ULL
+
+/* stores information about which extents are in use, and reference counts */
+#define BTRFS_EXTENT_TREE_OBJECTID 2ULL
+
+/*
+ * chunk tree stores translations from logical -> physical block numbering
+ * the super block points to the chunk tree
+ */
+#define BTRFS_CHUNK_TREE_OBJECTID 3ULL
+
+/*
+ * stores information about which areas of a given device are in use.
+ * one per device.  The tree of tree roots points to the device tree
+ */
+#define BTRFS_DEV_TREE_OBJECTID 4ULL
+
+/* one per subvolume, storing files and directories */
+#define BTRFS_FS_TREE_OBJECTID 5ULL
+
+/* directory objectid inside the root tree */
+#define BTRFS_ROOT_TREE_DIR_OBJECTID 6ULL
+
+/* holds checksums of all the data extents */
+#define BTRFS_CSUM_TREE_OBJECTID 7ULL
+
+/* orhpan objectid for tracking unlinked/truncated files */
+#define BTRFS_ORPHAN_OBJECTID -5ULL
+
+/* does write ahead logging to speed up fsyncs */
+#define BTRFS_TREE_LOG_OBJECTID -6ULL
+#define BTRFS_TREE_LOG_FIXUP_OBJECTID -7ULL
+
+/* for space balancing */
+#define BTRFS_TREE_RELOC_OBJECTID -8ULL
+#define BTRFS_DATA_RELOC_TREE_OBJECTID -9ULL
+
+/*
+ * extent checksums all have this objectid
+ * this allows them to share the logging tree
+ * for fsyncs
+ */
+#define BTRFS_EXTENT_CSUM_OBJECTID -10ULL
+
+/* dummy objectid represents multiple objectids */
+#define BTRFS_MULTIPLE_OBJECTIDS -255ULL
+
+/*
+ * All files have objectids in this range.
+ */
+#define BTRFS_FIRST_FREE_OBJECTID 256ULL
+#define BTRFS_LAST_FREE_OBJECTID -256ULL
+#define BTRFS_FIRST_CHUNK_TREE_OBJECTID 256ULL
+
+
+/*
+ * the device items go into the chunk tree.  The key is in the form
+ * [ 1 BTRFS_DEV_ITEM_KEY device_id ]
+ */
+#define BTRFS_DEV_ITEMS_OBJECTID 1ULL
+
+/*
+ * we can actually store much bigger names, but lets not confuse the rest
+ * of linux
+ */
+#define BTRFS_NAME_LEN 255
+
+/* 32 bytes in various csum fields */
+#define BTRFS_CSUM_SIZE 32
+
+/* csum types */
+#define BTRFS_CSUM_TYPE_CRC32	0
+
+static int btrfs_csum_sizes[] = { 4, 0 };
+
+/* four bytes for CRC32 */
+#define BTRFS_EMPTY_DIR_SIZE 0
+
+#define BTRFS_FT_UNKNOWN	0
+#define BTRFS_FT_REG_FILE	1
+#define BTRFS_FT_DIR		2
+#define BTRFS_FT_CHRDEV		3
+#define BTRFS_FT_BLKDEV		4
+#define BTRFS_FT_FIFO		5
+#define BTRFS_FT_SOCK		6
+#define BTRFS_FT_SYMLINK	7
+#define BTRFS_FT_XATTR		8
+#define BTRFS_FT_MAX		9
+
+/*
+ * the key defines the order in the tree, and so it also defines (optimal)
+ * block layout.  objectid corresonds to the inode number.  The flags
+ * tells us things about the object, and is a kind of stream selector.
+ * so for a given inode, keys with flags of 1 might refer to the inode
+ * data, flags of 2 may point to file data in the btree and flags == 3
+ * may point to extents.
+ *
+ * offset is the starting byte offset for this key in the stream.
+ *
+ * btrfs_disk_key is in disk byte order.  struct btrfs_key is always
+ * in cpu native order.  Otherwise they are identical and their sizes
+ * should be the same (ie both packed)
+ */
+struct btrfs_disk_key {
+	__le64 objectid;
+	u8 type;
+	__le64 offset;
+} __attribute__ ((__packed__));
+
+struct btrfs_key {
+	u64 objectid;
+	u8 type;
+	u64 offset;
+} __attribute__ ((__packed__));
+
+struct btrfs_mapping_tree {
+	struct extent_map_tree map_tree;
+};
+
+#define BTRFS_UUID_SIZE 16
+struct btrfs_dev_item {
+	/* the internal btrfs device id */
+	__le64 devid;
+
+	/* size of the device */
+	__le64 total_bytes;
+
+	/* bytes used */
+	__le64 bytes_used;
+
+	/* optimal io alignment for this device */
+	__le32 io_align;
+
+	/* optimal io width for this device */
+	__le32 io_width;
+
+	/* minimal io size for this device */
+	__le32 sector_size;
+
+	/* type and info about this device */
+	__le64 type;
+
+	/* expected generation for this device */
+	__le64 generation;
+
+	/*
+	 * starting byte of this partition on the device,
+	 * to allowr for stripe alignment in the future
+	 */
+	__le64 start_offset;
+
+	/* grouping information for allocation decisions */
+	__le32 dev_group;
+
+	/* seek speed 0-100 where 100 is fastest */
+	u8 seek_speed;
+
+	/* bandwidth 0-100 where 100 is fastest */
+	u8 bandwidth;
+
+	/* btrfs generated uuid for this device */
+	u8 uuid[BTRFS_UUID_SIZE];
+
+	/* uuid of FS who owns this device */
+	u8 fsid[BTRFS_UUID_SIZE];
+} __attribute__ ((__packed__));
+
+struct btrfs_stripe {
+	__le64 devid;
+	__le64 offset;
+	u8 dev_uuid[BTRFS_UUID_SIZE];
+} __attribute__ ((__packed__));
+
+struct btrfs_chunk {
+	/* size of this chunk in bytes */
+	__le64 length;
+
+	/* objectid of the root referencing this chunk */
+	__le64 owner;
+
+	__le64 stripe_len;
+	__le64 type;
+
+	/* optimal io alignment for this chunk */
+	__le32 io_align;
+
+	/* optimal io width for this chunk */
+	__le32 io_width;
+
+	/* minimal io size for this chunk */
+	__le32 sector_size;
+
+	/* 2^16 stripes is quite a lot, a second limit is the size of a single
+	 * item in the btree
+	 */
+	__le16 num_stripes;
+
+	/* sub stripes only matter for raid10 */
+	__le16 sub_stripes;
+	struct btrfs_stripe stripe;
+	/* additional stripes go here */
+} __attribute__ ((__packed__));
+
+static inline unsigned long btrfs_chunk_item_size(int num_stripes)
+{
+	BUG_ON(num_stripes == 0);
+	return sizeof(struct btrfs_chunk) +
+		sizeof(struct btrfs_stripe) * (num_stripes - 1);
+}
+
+#define BTRFS_FSID_SIZE 16
+#define BTRFS_HEADER_FLAG_WRITTEN (1 << 0)
+
+/*
+ * every tree block (leaf or node) starts with this header.
+ */
+struct btrfs_header {
+	/* these first four must match the super block */
+	u8 csum[BTRFS_CSUM_SIZE];
+	u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
+	__le64 bytenr; /* which block this node is supposed to live in */
+	__le64 flags;
+
+	/* allowed to be different from the super from here on down */
+	u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
+	__le64 generation;
+	__le64 owner;
+	__le32 nritems;
+	u8 level;
+} __attribute__ ((__packed__));
+
+#define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \
+				      sizeof(struct btrfs_header)) / \
+				     sizeof(struct btrfs_key_ptr))
+#define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header))
+#define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->leafsize))
+#define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
+					sizeof(struct btrfs_item) - \
+					sizeof(struct btrfs_file_extent_item))
+
+#define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32)
+
+/*
+ * this is a very generous portion of the super block, giving us
+ * room to translate 14 chunks with 3 stripes each.
+ */
+#define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048
+#define BTRFS_LABEL_SIZE 256
+
+/*
+ * the super block basically lists the main trees of the FS
+ * it currently lacks any block count etc etc
+ */
+struct btrfs_super_block {
+	u8 csum[BTRFS_CSUM_SIZE];
+	/* the first 4 fields must match struct btrfs_header */
+	u8 fsid[BTRFS_FSID_SIZE];    /* FS specific uuid */
+	__le64 bytenr; /* this block number */
+	__le64 flags;
+
+	/* allowed to be different from the btrfs_header from here own down */
+	__le64 magic;
+	__le64 generation;
+	__le64 root;
+	__le64 chunk_root;
+	__le64 log_root;
+
+	/* this will help find the new super based on the log root */
+	__le64 log_root_transid;
+	__le64 total_bytes;
+	__le64 bytes_used;
+	__le64 root_dir_objectid;
+	__le64 num_devices;
+	__le32 sectorsize;
+	__le32 nodesize;
+	__le32 leafsize;
+	__le32 stripesize;
+	__le32 sys_chunk_array_size;
+	__le64 chunk_root_generation;
+	__le64 compat_flags;
+	__le64 compat_ro_flags;
+	__le64 incompat_flags;
+	__le16 csum_type;
+	u8 root_level;
+	u8 chunk_root_level;
+	u8 log_root_level;
+	struct btrfs_dev_item dev_item;
+
+	char label[BTRFS_LABEL_SIZE];
+
+	/* future expansion */
+	__le64 reserved[32];
+	u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE];
+} __attribute__ ((__packed__));
+
+/*
+ * Compat flags that we support.  If any incompat flags are set other than the
+ * ones specified below then we will fail to mount
+ */
+#define BTRFS_FEATURE_COMPAT_SUPP	0x0
+#define BTRFS_FEATURE_COMPAT_RO_SUPP	0x0
+#define BTRFS_FEATURE_INCOMPAT_SUPP	0x0
+
+/*
+ * A leaf is full of items. offset and size tell us where to find
+ * the item in the leaf (relative to the start of the data area)
+ */
+struct btrfs_item {
+	struct btrfs_disk_key key;
+	__le32 offset;
+	__le32 size;
+} __attribute__ ((__packed__));
+
+/*
+ * leaves have an item area and a data area:
+ * [item0, item1....itemN] [free space] [dataN...data1, data0]
+ *
+ * The data is separate from the items to get the keys closer together
+ * during searches.
+ */
+struct btrfs_leaf {
+	struct btrfs_header header;
+	struct btrfs_item items[];
+} __attribute__ ((__packed__));
+
+/*
+ * all non-leaf blocks are nodes, they hold only keys and pointers to
+ * other blocks
+ */
+struct btrfs_key_ptr {
+	struct btrfs_disk_key key;
+	__le64 blockptr;
+	__le64 generation;
+} __attribute__ ((__packed__));
+
+struct btrfs_node {
+	struct btrfs_header header;
+	struct btrfs_key_ptr ptrs[];
+} __attribute__ ((__packed__));
+
+/*
+ * btrfs_paths remember the path taken from the root down to the leaf.
+ * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
+ * to any other levels that are present.
+ *
+ * The slots array records the index of the item or block pointer
+ * used while walking the tree.
+ */
+struct btrfs_path {
+	struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
+	int slots[BTRFS_MAX_LEVEL];
+	/* if there is real range locking, this locks field will change */
+	int locks[BTRFS_MAX_LEVEL];
+	int reada;
+	/* keep some upper locks as we walk down */
+	int keep_locks;
+	int skip_locking;
+	int lowest_level;
+
+	/*
+	 * set by btrfs_split_item, tells search_slot to keep all locks
+	 * and to force calls to keep space in the nodes
+	 */
+	int search_for_split;
+};
+
+/*
+ * items in the extent btree are used to record the objectid of the
+ * owner of the block and the number of references
+ */
+struct btrfs_extent_item {
+	__le32 refs;
+} __attribute__ ((__packed__));
+
+struct btrfs_extent_ref {
+	__le64 root;
+	__le64 generation;
+	__le64 objectid;
+	__le32 num_refs;
+} __attribute__ ((__packed__));
+
+/* dev extents record free space on individual devices.  The owner
+ * field points back to the chunk allocation mapping tree that allocated
+ * the extent.  The chunk tree uuid field is a way to double check the owner
+ */
+struct btrfs_dev_extent {
+	__le64 chunk_tree;
+	__le64 chunk_objectid;
+	__le64 chunk_offset;
+	__le64 length;
+	u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
+} __attribute__ ((__packed__));
+
+struct btrfs_inode_ref {
+	__le64 index;
+	__le16 name_len;
+	/* name goes here */
+} __attribute__ ((__packed__));
+
+struct btrfs_timespec {
+	__le64 sec;
+	__le32 nsec;
+} __attribute__ ((__packed__));
+
+typedef enum {
+	BTRFS_COMPRESS_NONE = 0,
+	BTRFS_COMPRESS_ZLIB = 1,
+	BTRFS_COMPRESS_LAST = 2,
+} btrfs_compression_type;
+
+/* we don't understand any encryption methods right now */
+typedef enum {
+	BTRFS_ENCRYPTION_NONE = 0,
+	BTRFS_ENCRYPTION_LAST = 1,
+} btrfs_encryption_type;
+
+struct btrfs_inode_item {
+	/* nfs style generation number */
+	__le64 generation;
+	/* transid that last touched this inode */
+	__le64 transid;
+	__le64 size;
+	__le64 nbytes;
+	__le64 block_group;
+	__le32 nlink;
+	__le32 uid;
+	__le32 gid;
+	__le32 mode;
+	__le64 rdev;
+	__le64 flags;
+
+	/* modification sequence number for NFS */
+	__le64 sequence;
+
+	/*
+	 * a little future expansion, for more than this we can
+	 * just grow the inode item and version it
+	 */
+	__le64 reserved[4];
+	struct btrfs_timespec atime;
+	struct btrfs_timespec ctime;
+	struct btrfs_timespec mtime;
+	struct btrfs_timespec otime;
+} __attribute__ ((__packed__));
+
+struct btrfs_dir_log_item {
+	__le64 end;
+} __attribute__ ((__packed__));
+
+struct btrfs_dir_item {
+	struct btrfs_disk_key location;
+	__le64 transid;
+	__le16 data_len;
+	__le16 name_len;
+	u8 type;
+} __attribute__ ((__packed__));
+
+struct btrfs_root_item {
+	struct btrfs_inode_item inode;
+	__le64 generation;
+	__le64 root_dirid;
+	__le64 bytenr;
+	__le64 byte_limit;
+	__le64 bytes_used;
+	__le64 last_snapshot;
+	__le64 flags;
+	__le32 refs;
+	struct btrfs_disk_key drop_progress;
+	u8 drop_level;
+	u8 level;
+} __attribute__ ((__packed__));
+
+/*
+ * this is used for both forward and backward root refs
+ */
+struct btrfs_root_ref {
+	__le64 dirid;
+	__le64 sequence;
+	__le16 name_len;
+} __attribute__ ((__packed__));
+
+#define BTRFS_FILE_EXTENT_INLINE 0
+#define BTRFS_FILE_EXTENT_REG 1
+#define BTRFS_FILE_EXTENT_PREALLOC 2
+
+struct btrfs_file_extent_item {
+	/*
+	 * transaction id that created this extent
+	 */
+	__le64 generation;
+	/*
+	 * max number of bytes to hold this extent in ram
+	 * when we split a compressed extent we can't know how big
+	 * each of the resulting pieces will be.  So, this is
+	 * an upper limit on the size of the extent in ram instead of
+	 * an exact limit.
+	 */
+	__le64 ram_bytes;
+
+	/*
+	 * 32 bits for the various ways we might encode the data,
+	 * including compression and encryption.  If any of these
+	 * are set to something a given disk format doesn't understand
+	 * it is treated like an incompat flag for reading and writing,
+	 * but not for stat.
+	 */
+	u8 compression;
+	u8 encryption;
+	__le16 other_encoding; /* spare for later use */
+
+	/* are we inline data or a real extent? */
+	u8 type;
+
+	/*
+	 * disk space consumed by the extent, checksum blocks are included
+	 * in these numbers
+	 */
+	__le64 disk_bytenr;
+	__le64 disk_num_bytes;
+	/*
+	 * the logical offset in file blocks (no csums)
+	 * this extent record is for.  This allows a file extent to point
+	 * into the middle of an existing extent on disk, sharing it
+	 * between two snapshots (useful if some bytes in the middle of the
+	 * extent have changed
+	 */
+	__le64 offset;
+	/*
+	 * the logical number of file blocks (no csums included).  This
+	 * always reflects the size uncompressed and without encoding.
+	 */
+	__le64 num_bytes;
+
+} __attribute__ ((__packed__));
+
+struct btrfs_csum_item {
+	u8 csum;
+} __attribute__ ((__packed__));
+
+/* different types of block groups (and chunks) */
+#define BTRFS_BLOCK_GROUP_DATA     (1 << 0)
+#define BTRFS_BLOCK_GROUP_SYSTEM   (1 << 1)
+#define BTRFS_BLOCK_GROUP_METADATA (1 << 2)
+#define BTRFS_BLOCK_GROUP_RAID0    (1 << 3)
+#define BTRFS_BLOCK_GROUP_RAID1    (1 << 4)
+#define BTRFS_BLOCK_GROUP_DUP	   (1 << 5)
+#define BTRFS_BLOCK_GROUP_RAID10   (1 << 6)
+
+struct btrfs_block_group_item {
+	__le64 used;
+	__le64 chunk_objectid;
+	__le64 flags;
+} __attribute__ ((__packed__));
+
+struct btrfs_space_info {
+	u64 flags;
+	u64 total_bytes;
+	u64 bytes_used;
+	u64 bytes_pinned;
+	u64 bytes_reserved;
+	u64 bytes_readonly;
+	int full;
+	int force_alloc;
+	struct list_head list;
+
+	/* for block groups in our same type */
+	struct list_head block_groups;
+	spinlock_t lock;
+	struct rw_semaphore groups_sem;
+};
+
+struct btrfs_free_space {
+	struct rb_node bytes_index;
+	struct rb_node offset_index;
+	u64 offset;
+	u64 bytes;
+};
+
+struct btrfs_block_group_cache {
+	struct btrfs_key key;
+	struct btrfs_block_group_item item;
+	spinlock_t lock;
+	struct mutex alloc_mutex;
+	struct mutex cache_mutex;
+	u64 pinned;
+	u64 reserved;
+	u64 flags;
+	int cached;
+	int ro;
+	int dirty;
+
+	struct btrfs_space_info *space_info;
+
+	/* free space cache stuff */
+	struct rb_root free_space_bytes;
+	struct rb_root free_space_offset;
+
+	/* block group cache stuff */
+	struct rb_node cache_node;
+
+	/* for block groups in the same raid type */
+	struct list_head list;
+
+	/* usage count */
+	atomic_t count;
+};
+
+struct btrfs_leaf_ref_tree {
+	struct rb_root root;
+	struct list_head list;
+	spinlock_t lock;
+};
+
+struct btrfs_device;
+struct btrfs_fs_devices;
+struct btrfs_fs_info {
+	u8 fsid[BTRFS_FSID_SIZE];
+	u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
+	struct btrfs_root *extent_root;
+	struct btrfs_root *tree_root;
+	struct btrfs_root *chunk_root;
+	struct btrfs_root *dev_root;
+	struct btrfs_root *fs_root;
+	struct btrfs_root *csum_root;
+
+	/* the log root tree is a directory of all the other log roots */
+	struct btrfs_root *log_root_tree;
+	struct radix_tree_root fs_roots_radix;
+
+	/* block group cache stuff */
+	spinlock_t block_group_cache_lock;
+	struct rb_root block_group_cache_tree;
+
+	struct extent_io_tree pinned_extents;
+	struct extent_io_tree pending_del;
+	struct extent_io_tree extent_ins;
+
+	/* logical->physical extent mapping */
+	struct btrfs_mapping_tree mapping_tree;
+
+	u64 generation;
+	u64 last_trans_committed;
+	u64 last_trans_new_blockgroup;
+	u64 open_ioctl_trans;
+	unsigned long mount_opt;
+	u64 max_extent;
+	u64 max_inline;
+	u64 alloc_start;
+	struct btrfs_transaction *running_transaction;
+	wait_queue_head_t transaction_throttle;
+	wait_queue_head_t transaction_wait;
+
+	wait_queue_head_t async_submit_wait;
+	wait_queue_head_t tree_log_wait;
+
+	struct btrfs_super_block super_copy;
+	struct btrfs_super_block super_for_commit;
+	struct block_device *__bdev;
+	struct super_block *sb;
+	struct inode *btree_inode;
+	struct backing_dev_info bdi;
+	spinlock_t hash_lock;
+	struct mutex trans_mutex;
+	struct mutex tree_log_mutex;
+	struct mutex transaction_kthread_mutex;
+	struct mutex cleaner_mutex;
+	struct mutex extent_ins_mutex;
+	struct mutex pinned_mutex;
+	struct mutex chunk_mutex;
+	struct mutex drop_mutex;
+	struct mutex volume_mutex;
+	struct mutex tree_reloc_mutex;
+	struct list_head trans_list;
+	struct list_head hashers;
+	struct list_head dead_roots;
+
+	atomic_t nr_async_submits;
+	atomic_t async_submit_draining;
+	atomic_t nr_async_bios;
+	atomic_t async_delalloc_pages;
+	atomic_t tree_log_writers;
+	atomic_t tree_log_commit;
+	unsigned long tree_log_batch;
+	u64 tree_log_transid;
+
+	/*
+	 * this is used by the balancing code to wait for all the pending
+	 * ordered extents
+	 */
+	spinlock_t ordered_extent_lock;
+	struct list_head ordered_extents;
+	struct list_head delalloc_inodes;
+
+	/*
+	 * there is a pool of worker threads for checksumming during writes
+	 * and a pool for checksumming after reads.  This is because readers
+	 * can run with FS locks held, and the writers may be waiting for
+	 * those locks.  We don't want ordering in the pending list to cause
+	 * deadlocks, and so the two are serviced separately.
+	 *
+	 * A third pool does submit_bio to avoid deadlocking with the other
+	 * two
+	 */
+	struct btrfs_workers workers;
+	struct btrfs_workers delalloc_workers;
+	struct btrfs_workers endio_workers;
+	struct btrfs_workers endio_meta_workers;
+	struct btrfs_workers endio_meta_write_workers;
+	struct btrfs_workers endio_write_workers;
+	struct btrfs_workers submit_workers;
+	/*
+	 * fixup workers take dirty pages that didn't properly go through
+	 * the cow mechanism and make them safe to write.  It happens
+	 * for the sys_munmap function call path
+	 */
+	struct btrfs_workers fixup_workers;
+	struct task_struct *transaction_kthread;
+	struct task_struct *cleaner_kthread;
+	int thread_pool_size;
+
+	/* tree relocation relocated fields */
+	struct list_head dead_reloc_roots;
+	struct btrfs_leaf_ref_tree reloc_ref_tree;
+	struct btrfs_leaf_ref_tree shared_ref_tree;
+
+	struct kobject super_kobj;
+	struct completion kobj_unregister;
+	int do_barriers;
+	int closing;
+	int log_root_recovering;
+	atomic_t throttles;
+	atomic_t throttle_gen;
+
+	u64 total_pinned;
+	struct list_head dirty_cowonly_roots;
+
+	struct btrfs_fs_devices *fs_devices;
+	struct list_head space_info;
+	spinlock_t delalloc_lock;
+	spinlock_t new_trans_lock;
+	u64 delalloc_bytes;
+	u64 last_alloc;
+	u64 last_data_alloc;
+
+	spinlock_t ref_cache_lock;
+	u64 total_ref_cache_size;
+
+	u64 avail_data_alloc_bits;
+	u64 avail_metadata_alloc_bits;
+	u64 avail_system_alloc_bits;
+	u64 data_alloc_profile;
+	u64 metadata_alloc_profile;
+	u64 system_alloc_profile;
+
+	void *bdev_holder;
+};
+
+/*
+ * in ram representation of the tree.  extent_root is used for all allocations
+ * and for the extent tree extent_root root.
+ */
+struct btrfs_dirty_root;
+struct btrfs_root {
+	struct extent_buffer *node;
+
+	/* the node lock is held while changing the node pointer */
+	spinlock_t node_lock;
+
+	struct extent_buffer *commit_root;
+	struct btrfs_leaf_ref_tree *ref_tree;
+	struct btrfs_leaf_ref_tree ref_tree_struct;
+	struct btrfs_dirty_root *dirty_root;
+	struct btrfs_root *log_root;
+	struct btrfs_root *reloc_root;
+
+	struct btrfs_root_item root_item;
+	struct btrfs_key root_key;
+	struct btrfs_fs_info *fs_info;
+	struct extent_io_tree dirty_log_pages;
+
+	struct kobject root_kobj;
+	struct completion kobj_unregister;
+	struct mutex objectid_mutex;
+	struct mutex log_mutex;
+
+	u64 objectid;
+	u64 last_trans;
+
+	/* data allocations are done in sectorsize units */
+	u32 sectorsize;
+
+	/* node allocations are done in nodesize units */
+	u32 nodesize;
+
+	/* leaf allocations are done in leafsize units */
+	u32 leafsize;
+
+	u32 stripesize;
+
+	u32 type;
+	u64 highest_inode;
+	u64 last_inode_alloc;
+	int ref_cows;
+	int track_dirty;
+	u64 defrag_trans_start;
+	struct btrfs_key defrag_progress;
+	struct btrfs_key defrag_max;
+	int defrag_running;
+	int defrag_level;
+	char *name;
+	int in_sysfs;
+
+	/* the dirty list is only used by non-reference counted roots */
+	struct list_head dirty_list;
+
+	spinlock_t list_lock;
+	struct list_head dead_list;
+	struct list_head orphan_list;
+
+	/*
+	 * right now this just gets used so that a root has its own devid
+	 * for stat.  It may be used for more later
+	 */
+	struct super_block anon_super;
+};
+
+/*
+
+ * inode items have the data typically returned from stat and store other
+ * info about object characteristics.  There is one for every file and dir in
+ * the FS
+ */
+#define BTRFS_INODE_ITEM_KEY		1
+#define BTRFS_INODE_REF_KEY		12
+#define BTRFS_XATTR_ITEM_KEY		24
+#define BTRFS_ORPHAN_ITEM_KEY		48
+/* reserve 2-15 close to the inode for later flexibility */
+
+/*
+ * dir items are the name -> inode pointers in a directory.  There is one
+ * for every name in a directory.
+ */
+#define BTRFS_DIR_LOG_ITEM_KEY  60
+#define BTRFS_DIR_LOG_INDEX_KEY 72
+#define BTRFS_DIR_ITEM_KEY	84
+#define BTRFS_DIR_INDEX_KEY	96
+/*
+ * extent data is for file data
+ */
+#define BTRFS_EXTENT_DATA_KEY	108
+
+/*
+ * extent csums are stored in a separate tree and hold csums for
+ * an entire extent on disk.
+ */
+#define BTRFS_EXTENT_CSUM_KEY	128
+
+/*
+ * root items point to tree roots.  There are typically in the root
+ * tree used by the super block to find all the other trees
+ */
+#define BTRFS_ROOT_ITEM_KEY	132
+
+/*
+ * root backrefs tie subvols and snapshots to the directory entries that
+ * reference them
+ */
+#define BTRFS_ROOT_BACKREF_KEY	144
+
+/*
+ * root refs make a fast index for listing all of the snapshots and
+ * subvolumes referenced by a given root.  They point directly to the
+ * directory item in the root that references the subvol
+ */
+#define BTRFS_ROOT_REF_KEY	156
+
+/*
+ * extent items are in the extent map tree.  These record which blocks
+ * are used, and how many references there are to each block
+ */
+#define BTRFS_EXTENT_ITEM_KEY	168
+#define BTRFS_EXTENT_REF_KEY	180
+
+/*
+ * block groups give us hints into the extent allocation trees.  Which
+ * blocks are free etc etc
+ */
+#define BTRFS_BLOCK_GROUP_ITEM_KEY 192
+
+#define BTRFS_DEV_EXTENT_KEY	204
+#define BTRFS_DEV_ITEM_KEY	216
+#define BTRFS_CHUNK_ITEM_KEY	228
+
+/*
+ * string items are for debugging.  They just store a short string of
+ * data in the FS
+ */
+#define BTRFS_STRING_ITEM_KEY	253
+
+#define BTRFS_MOUNT_NODATASUM		(1 << 0)
+#define BTRFS_MOUNT_NODATACOW		(1 << 1)
+#define BTRFS_MOUNT_NOBARRIER		(1 << 2)
+#define BTRFS_MOUNT_SSD			(1 << 3)
+#define BTRFS_MOUNT_DEGRADED		(1 << 4)
+#define BTRFS_MOUNT_COMPRESS		(1 << 5)
+
+#define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
+#define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
+#define btrfs_test_opt(root, opt)	((root)->fs_info->mount_opt & \
+					 BTRFS_MOUNT_##opt)
+/*
+ * Inode flags
+ */
+#define BTRFS_INODE_NODATASUM		(1 << 0)
+#define BTRFS_INODE_NODATACOW		(1 << 1)
+#define BTRFS_INODE_READONLY		(1 << 2)
+#define BTRFS_INODE_NOCOMPRESS		(1 << 3)
+#define BTRFS_INODE_PREALLOC		(1 << 4)
+#define btrfs_clear_flag(inode, flag)	(BTRFS_I(inode)->flags &= \
+					 ~BTRFS_INODE_##flag)
+#define btrfs_set_flag(inode, flag)	(BTRFS_I(inode)->flags |= \
+					 BTRFS_INODE_##flag)
+#define btrfs_test_flag(inode, flag)	(BTRFS_I(inode)->flags & \
+					 BTRFS_INODE_##flag)
+/* some macros to generate set/get funcs for the struct fields.  This
+ * assumes there is a lefoo_to_cpu for every type, so lets make a simple
+ * one for u8:
+ */
+#define le8_to_cpu(v) (v)
+#define cpu_to_le8(v) (v)
+#define __le8 u8
+
+#define read_eb_member(eb, ptr, type, member, result) (			\
+	read_extent_buffer(eb, (char *)(result),			\
+			   ((unsigned long)(ptr)) +			\
+			    offsetof(type, member),			\
+			   sizeof(((type *)0)->member)))
+
+#define write_eb_member(eb, ptr, type, member, result) (		\
+	write_extent_buffer(eb, (char *)(result),			\
+			   ((unsigned long)(ptr)) +			\
+			    offsetof(type, member),			\
+			   sizeof(((type *)0)->member)))
+
+#ifndef BTRFS_SETGET_FUNCS
+#define BTRFS_SETGET_FUNCS(name, type, member, bits)			\
+u##bits btrfs_##name(struct extent_buffer *eb, type *s);		\
+void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
+#endif
+
+#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits)		\
+static inline u##bits btrfs_##name(struct extent_buffer *eb)		\
+{									\
+	type *p = kmap_atomic(eb->first_page, KM_USER0);		\
+	u##bits res = le##bits##_to_cpu(p->member);			\
+	kunmap_atomic(p, KM_USER0);					\
+	return res;							\
+}									\
+static inline void btrfs_set_##name(struct extent_buffer *eb,		\
+				    u##bits val)			\
+{									\
+	type *p = kmap_atomic(eb->first_page, KM_USER0);		\
+	p->member = cpu_to_le##bits(val);				\
+	kunmap_atomic(p, KM_USER0);					\
+}
+
+#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits)		\
+static inline u##bits btrfs_##name(type *s)				\
+{									\
+	return le##bits##_to_cpu(s->member);				\
+}									\
+static inline void btrfs_set_##name(type *s, u##bits val)		\
+{									\
+	s->member = cpu_to_le##bits(val);				\
+}
+
+BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64);
+BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64);
+BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64);
+BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32);
+BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32);
+BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item,
+		   start_offset, 64);
+BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32);
+BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64);
+BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32);
+BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8);
+BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8);
+BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64);
+
+BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item,
+			 total_bytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item,
+			 bytes_used, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item,
+			 io_align, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item,
+			 io_width, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item,
+			 sector_size, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item,
+			 dev_group, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item,
+			 seek_speed, 8);
+BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item,
+			 bandwidth, 8);
+BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item,
+			 generation, 64);
+
+static inline char *btrfs_device_uuid(struct btrfs_dev_item *d)
+{
+	return (char *)d + offsetof(struct btrfs_dev_item, uuid);
+}
+
+static inline char *btrfs_device_fsid(struct btrfs_dev_item *d)
+{
+	return (char *)d + offsetof(struct btrfs_dev_item, fsid);
+}
+
+BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64);
+BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64);
+BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64);
+BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32);
+BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32);
+BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32);
+BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64);
+BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16);
+BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16);
+BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64);
+BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64);
+
+static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s)
+{
+	return (char *)s + offsetof(struct btrfs_stripe, dev_uuid);
+}
+
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk,
+			 stripe_len, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk,
+			 io_align, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk,
+			 io_width, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk,
+			 sector_size, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk,
+			 num_stripes, 16);
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk,
+			 sub_stripes, 16);
+BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64);
+
+static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c,
+						   int nr)
+{
+	unsigned long offset = (unsigned long)c;
+	offset += offsetof(struct btrfs_chunk, stripe);
+	offset += nr * sizeof(struct btrfs_stripe);
+	return (struct btrfs_stripe *)offset;
+}
+
+static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr)
+{
+	return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr));
+}
+
+static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb,
+					 struct btrfs_chunk *c, int nr)
+{
+	return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
+}
+
+static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb,
+					     struct btrfs_chunk *c, int nr,
+					     u64 val)
+{
+	btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val);
+}
+
+static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
+					 struct btrfs_chunk *c, int nr)
+{
+	return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
+}
+
+static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb,
+					     struct btrfs_chunk *c, int nr,
+					     u64 val)
+{
+	btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val);
+}
+
+/* struct btrfs_block_group_item */
+BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
+			 used, 64);
+BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item,
+			 used, 64);
+BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid,
+			struct btrfs_block_group_item, chunk_objectid, 64);
+
+BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid,
+		   struct btrfs_block_group_item, chunk_objectid, 64);
+BTRFS_SETGET_FUNCS(disk_block_group_flags,
+		   struct btrfs_block_group_item, flags, 64);
+BTRFS_SETGET_STACK_FUNCS(block_group_flags,
+			struct btrfs_block_group_item, flags, 64);
+
+/* struct btrfs_inode_ref */
+BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16);
+BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64);
+
+/* struct btrfs_inode_item */
+BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64);
+BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64);
+BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64);
+BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64);
+BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64);
+BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64);
+BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32);
+BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32);
+BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32);
+BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32);
+BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64);
+BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64);
+
+static inline struct btrfs_timespec *
+btrfs_inode_atime(struct btrfs_inode_item *inode_item)
+{
+	unsigned long ptr = (unsigned long)inode_item;
+	ptr += offsetof(struct btrfs_inode_item, atime);
+	return (struct btrfs_timespec *)ptr;
+}
+
+static inline struct btrfs_timespec *
+btrfs_inode_mtime(struct btrfs_inode_item *inode_item)
+{
+	unsigned long ptr = (unsigned long)inode_item;
+	ptr += offsetof(struct btrfs_inode_item, mtime);
+	return (struct btrfs_timespec *)ptr;
+}
+
+static inline struct btrfs_timespec *
+btrfs_inode_ctime(struct btrfs_inode_item *inode_item)
+{
+	unsigned long ptr = (unsigned long)inode_item;
+	ptr += offsetof(struct btrfs_inode_item, ctime);
+	return (struct btrfs_timespec *)ptr;
+}
+
+static inline struct btrfs_timespec *
+btrfs_inode_otime(struct btrfs_inode_item *inode_item)
+{
+	unsigned long ptr = (unsigned long)inode_item;
+	ptr += offsetof(struct btrfs_inode_item, otime);
+	return (struct btrfs_timespec *)ptr;
+}
+
+BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
+BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
+
+/* struct btrfs_dev_extent */
+BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent,
+		   chunk_tree, 64);
+BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent,
+		   chunk_objectid, 64);
+BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent,
+		   chunk_offset, 64);
+BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64);
+
+static inline u8 *btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev)
+{
+	unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid);
+	return (u8 *)((unsigned long)dev + ptr);
+}
+
+/* struct btrfs_extent_ref */
+BTRFS_SETGET_FUNCS(ref_root, struct btrfs_extent_ref, root, 64);
+BTRFS_SETGET_FUNCS(ref_generation, struct btrfs_extent_ref, generation, 64);
+BTRFS_SETGET_FUNCS(ref_objectid, struct btrfs_extent_ref, objectid, 64);
+BTRFS_SETGET_FUNCS(ref_num_refs, struct btrfs_extent_ref, num_refs, 32);
+
+BTRFS_SETGET_STACK_FUNCS(stack_ref_root, struct btrfs_extent_ref, root, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_ref_generation, struct btrfs_extent_ref,
+			 generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_ref_objectid, struct btrfs_extent_ref,
+			 objectid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_ref_num_refs, struct btrfs_extent_ref,
+			 num_refs, 32);
+
+/* struct btrfs_extent_item */
+BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_extent_refs, struct btrfs_extent_item,
+			 refs, 32);
+
+/* struct btrfs_node */
+BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64);
+BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64);
+
+static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr)
+{
+	unsigned long ptr;
+	ptr = offsetof(struct btrfs_node, ptrs) +
+		sizeof(struct btrfs_key_ptr) * nr;
+	return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr);
+}
+
+static inline void btrfs_set_node_blockptr(struct extent_buffer *eb,
+					   int nr, u64 val)
+{
+	unsigned long ptr;
+	ptr = offsetof(struct btrfs_node, ptrs) +
+		sizeof(struct btrfs_key_ptr) * nr;
+	btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val);
+}
+
+static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr)
+{
+	unsigned long ptr;
+	ptr = offsetof(struct btrfs_node, ptrs) +
+		sizeof(struct btrfs_key_ptr) * nr;
+	return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr);
+}
+
+static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb,
+						 int nr, u64 val)
+{
+	unsigned long ptr;
+	ptr = offsetof(struct btrfs_node, ptrs) +
+		sizeof(struct btrfs_key_ptr) * nr;
+	btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val);
+}
+
+static inline unsigned long btrfs_node_key_ptr_offset(int nr)
+{
+	return offsetof(struct btrfs_node, ptrs) +
+		sizeof(struct btrfs_key_ptr) * nr;
+}
+
+void btrfs_node_key(struct extent_buffer *eb,
+		    struct btrfs_disk_key *disk_key, int nr);
+
+static inline void btrfs_set_node_key(struct extent_buffer *eb,
+				      struct btrfs_disk_key *disk_key, int nr)
+{
+	unsigned long ptr;
+	ptr = btrfs_node_key_ptr_offset(nr);
+	write_eb_member(eb, (struct btrfs_key_ptr *)ptr,
+		       struct btrfs_key_ptr, key, disk_key);
+}
+
+/* struct btrfs_item */
+BTRFS_SETGET_FUNCS(item_offset, struct btrfs_item, offset, 32);
+BTRFS_SETGET_FUNCS(item_size, struct btrfs_item, size, 32);
+
+static inline unsigned long btrfs_item_nr_offset(int nr)
+{
+	return offsetof(struct btrfs_leaf, items) +
+		sizeof(struct btrfs_item) * nr;
+}
+
+static inline struct btrfs_item *btrfs_item_nr(struct extent_buffer *eb,
+					       int nr)
+{
+	return (struct btrfs_item *)btrfs_item_nr_offset(nr);
+}
+
+static inline u32 btrfs_item_end(struct extent_buffer *eb,
+				 struct btrfs_item *item)
+{
+	return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item);
+}
+
+static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr)
+{
+	return btrfs_item_end(eb, btrfs_item_nr(eb, nr));
+}
+
+static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr)
+{
+	return btrfs_item_offset(eb, btrfs_item_nr(eb, nr));
+}
+
+static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr)
+{
+	return btrfs_item_size(eb, btrfs_item_nr(eb, nr));
+}
+
+static inline void btrfs_item_key(struct extent_buffer *eb,
+			   struct btrfs_disk_key *disk_key, int nr)
+{
+	struct btrfs_item *item = btrfs_item_nr(eb, nr);
+	read_eb_member(eb, item, struct btrfs_item, key, disk_key);
+}
+
+static inline void btrfs_set_item_key(struct extent_buffer *eb,
+			       struct btrfs_disk_key *disk_key, int nr)
+{
+	struct btrfs_item *item = btrfs_item_nr(eb, nr);
+	write_eb_member(eb, item, struct btrfs_item, key, disk_key);
+}
+
+BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64);
+
+/*
+ * struct btrfs_root_ref
+ */
+BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64);
+BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64);
+BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16);
+
+/* struct btrfs_dir_item */
+BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16);
+BTRFS_SETGET_FUNCS(dir_type, struct btrfs_dir_item, type, 8);
+BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16);
+BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64);
+
+static inline void btrfs_dir_item_key(struct extent_buffer *eb,
+				      struct btrfs_dir_item *item,
+				      struct btrfs_disk_key *key)
+{
+	read_eb_member(eb, item, struct btrfs_dir_item, location, key);
+}
+
+static inline void btrfs_set_dir_item_key(struct extent_buffer *eb,
+					  struct btrfs_dir_item *item,
+					  struct btrfs_disk_key *key)
+{
+	write_eb_member(eb, item, struct btrfs_dir_item, location, key);
+}
+
+/* struct btrfs_disk_key */
+BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key,
+			 objectid, 64);
+BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64);
+BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8);
+
+static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu,
+					 struct btrfs_disk_key *disk)
+{
+	cpu->offset = le64_to_cpu(disk->offset);
+	cpu->type = disk->type;
+	cpu->objectid = le64_to_cpu(disk->objectid);
+}
+
+static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk,
+					 struct btrfs_key *cpu)
+{
+	disk->offset = cpu_to_le64(cpu->offset);
+	disk->type = cpu->type;
+	disk->objectid = cpu_to_le64(cpu->objectid);
+}
+
+static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb,
+				  struct btrfs_key *key, int nr)
+{
+	struct btrfs_disk_key disk_key;
+	btrfs_node_key(eb, &disk_key, nr);
+	btrfs_disk_key_to_cpu(key, &disk_key);
+}
+
+static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb,
+				  struct btrfs_key *key, int nr)
+{
+	struct btrfs_disk_key disk_key;
+	btrfs_item_key(eb, &disk_key, nr);
+	btrfs_disk_key_to_cpu(key, &disk_key);
+}
+
+static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb,
+				      struct btrfs_dir_item *item,
+				      struct btrfs_key *key)
+{
+	struct btrfs_disk_key disk_key;
+	btrfs_dir_item_key(eb, item, &disk_key);
+	btrfs_disk_key_to_cpu(key, &disk_key);
+}
+
+
+static inline u8 btrfs_key_type(struct btrfs_key *key)
+{
+	return key->type;
+}
+
+static inline void btrfs_set_key_type(struct btrfs_key *key, u8 val)
+{
+	key->type = val;
+}
+
+/* struct btrfs_header */
+BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64);
+BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header,
+			  generation, 64);
+BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64);
+BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32);
+BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64);
+BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8);
+
+static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag)
+{
+	return (btrfs_header_flags(eb) & flag) == flag;
+}
+
+static inline int btrfs_set_header_flag(struct extent_buffer *eb, u64 flag)
+{
+	u64 flags = btrfs_header_flags(eb);
+	btrfs_set_header_flags(eb, flags | flag);
+	return (flags & flag) == flag;
+}
+
+static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag)
+{
+	u64 flags = btrfs_header_flags(eb);
+	btrfs_set_header_flags(eb, flags & ~flag);
+	return (flags & flag) == flag;
+}
+
+static inline u8 *btrfs_header_fsid(struct extent_buffer *eb)
+{
+	unsigned long ptr = offsetof(struct btrfs_header, fsid);
+	return (u8 *)ptr;
+}
+
+static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb)
+{
+	unsigned long ptr = offsetof(struct btrfs_header, chunk_tree_uuid);
+	return (u8 *)ptr;
+}
+
+static inline u8 *btrfs_super_fsid(struct extent_buffer *eb)
+{
+	unsigned long ptr = offsetof(struct btrfs_super_block, fsid);
+	return (u8 *)ptr;
+}
+
+static inline u8 *btrfs_header_csum(struct extent_buffer *eb)
+{
+	unsigned long ptr = offsetof(struct btrfs_header, csum);
+	return (u8 *)ptr;
+}
+
+static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb)
+{
+	return NULL;
+}
+
+static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb)
+{
+	return NULL;
+}
+
+static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb)
+{
+	return NULL;
+}
+
+static inline int btrfs_is_leaf(struct extent_buffer *eb)
+{
+	return btrfs_header_level(eb) == 0;
+}
+
+/* struct btrfs_root_item */
+BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item,
+		   generation, 64);
+BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32);
+BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64);
+BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item,
+			 generation, 64);
+BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64);
+BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8);
+BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64);
+BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32);
+BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64);
+BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64);
+BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64);
+BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item,
+			 last_snapshot, 64);
+
+/* struct btrfs_super_block */
+
+BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64);
+BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64);
+BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block,
+			 generation, 64);
+BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64);
+BTRFS_SETGET_STACK_FUNCS(super_sys_array_size,
+			 struct btrfs_super_block, sys_chunk_array_size, 32);
+BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation,
+			 struct btrfs_super_block, chunk_root_generation, 64);
+BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block,
+			 root_level, 8);
+BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block,
+			 chunk_root, 64);
+BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block,
+			 chunk_root_level, 8);
+BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block,
+			 log_root, 64);
+BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block,
+			 log_root_transid, 64);
+BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block,
+			 log_root_level, 8);
+BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block,
+			 total_bytes, 64);
+BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block,
+			 bytes_used, 64);
+BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block,
+			 sectorsize, 32);
+BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block,
+			 nodesize, 32);
+BTRFS_SETGET_STACK_FUNCS(super_leafsize, struct btrfs_super_block,
+			 leafsize, 32);
+BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block,
+			 stripesize, 32);
+BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block,
+			 root_dir_objectid, 64);
+BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block,
+			 num_devices, 64);
+BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block,
+			 compat_flags, 64);
+BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block,
+			 compat_flags, 64);
+BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block,
+			 incompat_flags, 64);
+BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block,
+			 csum_type, 16);
+
+static inline int btrfs_super_csum_size(struct btrfs_super_block *s)
+{
+	int t = btrfs_super_csum_type(s);
+	BUG_ON(t >= ARRAY_SIZE(btrfs_csum_sizes));
+	return btrfs_csum_sizes[t];
+}
+
+static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
+{
+	return offsetof(struct btrfs_leaf, items);
+}
+
+/* struct btrfs_file_extent_item */
+BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8);
+
+static inline unsigned long
+btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e)
+{
+	unsigned long offset = (unsigned long)e;
+	offset += offsetof(struct btrfs_file_extent_item, disk_bytenr);
+	return offset;
+}
+
+static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize)
+{
+	return offsetof(struct btrfs_file_extent_item, disk_bytenr) + datasize;
+}
+
+BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item,
+		   disk_bytenr, 64);
+BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item,
+		   generation, 64);
+BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item,
+		   disk_num_bytes, 64);
+BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item,
+		  offset, 64);
+BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item,
+		   num_bytes, 64);
+BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item,
+		   ram_bytes, 64);
+BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item,
+		   compression, 8);
+BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item,
+		   encryption, 8);
+BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item,
+		   other_encoding, 16);
+
+/* this returns the number of file bytes represented by the inline item.
+ * If an item is compressed, this is the uncompressed size
+ */
+static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb,
+					       struct btrfs_file_extent_item *e)
+{
+	return btrfs_file_extent_ram_bytes(eb, e);
+}
+
+/*
+ * this returns the number of bytes used by the item on disk, minus the
+ * size of any extent headers.  If a file is compressed on disk, this is
+ * the compressed size
+ */
+static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
+						    struct btrfs_item *e)
+{
+	unsigned long offset;
+	offset = offsetof(struct btrfs_file_extent_item, disk_bytenr);
+	return btrfs_item_size(eb, e) - offset;
+}
+
+static inline struct btrfs_root *btrfs_sb(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+static inline int btrfs_set_root_name(struct btrfs_root *root,
+				      const char *name, int len)
+{
+	/* if we already have a name just free it */
+	kfree(root->name);
+
+	root->name = kmalloc(len+1, GFP_KERNEL);
+	if (!root->name)
+		return -ENOMEM;
+
+	memcpy(root->name, name, len);
+	root->name[len] = '\0';
+
+	return 0;
+}
+
+static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
+{
+	if (level == 0)
+		return root->leafsize;
+	return root->nodesize;
+}
+
+/* helper function to cast into the data area of the leaf. */
+#define btrfs_item_ptr(leaf, slot, type) \
+	((type *)(btrfs_leaf_data(leaf) + \
+	btrfs_item_offset_nr(leaf, slot)))
+
+#define btrfs_item_ptr_offset(leaf, slot) \
+	((unsigned long)(btrfs_leaf_data(leaf) + \
+	btrfs_item_offset_nr(leaf, slot)))
+
+static inline struct dentry *fdentry(struct file *file)
+{
+	return file->f_path.dentry;
+}
+
+/* extent-tree.c */
+int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len);
+int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, u64 bytenr,
+			    u64 num_bytes, u32 *refs);
+int btrfs_update_pinned_extents(struct btrfs_root *root,
+				u64 bytenr, u64 num, int pin);
+int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
+			struct btrfs_root *root, struct extent_buffer *leaf);
+int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, u64 objectid, u64 bytenr);
+int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
+			 struct btrfs_root *root);
+int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy);
+struct btrfs_block_group_cache *btrfs_lookup_block_group(
+						 struct btrfs_fs_info *info,
+						 u64 bytenr);
+u64 btrfs_find_block_group(struct btrfs_root *root,
+			   u64 search_start, u64 search_hint, int owner);
+struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
+					     struct btrfs_root *root,
+					     u32 blocksize, u64 parent,
+					     u64 root_objectid,
+					     u64 ref_generation,
+					     int level,
+					     u64 hint,
+					     u64 empty_size);
+struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
+					    struct btrfs_root *root,
+					    u64 bytenr, u32 blocksize);
+int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *root,
+		       u64 num_bytes, u64 parent, u64 min_bytes,
+		       u64 root_objectid, u64 ref_generation,
+		       u64 owner, u64 empty_size, u64 hint_byte,
+		       u64 search_end, struct btrfs_key *ins, u64 data);
+int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
+				struct btrfs_root *root, u64 parent,
+				u64 root_objectid, u64 ref_generation,
+				u64 owner, struct btrfs_key *ins);
+int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
+				struct btrfs_root *root, u64 parent,
+				u64 root_objectid, u64 ref_generation,
+				u64 owner, struct btrfs_key *ins);
+int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
+				  struct btrfs_root *root,
+				  u64 num_bytes, u64 min_alloc_size,
+				  u64 empty_size, u64 hint_byte,
+				  u64 search_end, struct btrfs_key *ins,
+				  u64 data);
+int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		  struct extent_buffer *orig_buf, struct extent_buffer *buf,
+		  u32 *nr_extents);
+int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		    struct extent_buffer *buf, u32 nr_extents);
+int btrfs_update_ref(struct btrfs_trans_handle *trans,
+		     struct btrfs_root *root, struct extent_buffer *orig_buf,
+		     struct extent_buffer *buf, int start_slot, int nr);
+int btrfs_free_extent(struct btrfs_trans_handle *trans,
+		      struct btrfs_root *root,
+		      u64 bytenr, u64 num_bytes, u64 parent,
+		      u64 root_objectid, u64 ref_generation,
+		      u64 owner_objectid, int pin);
+int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
+int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       struct extent_io_tree *unpin);
+int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+			 struct btrfs_root *root,
+			 u64 bytenr, u64 num_bytes, u64 parent,
+			 u64 root_objectid, u64 ref_generation,
+			 u64 owner_objectid);
+int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, u64 bytenr,
+			    u64 orig_parent, u64 parent,
+			    u64 root_objectid, u64 ref_generation,
+			    u64 owner_objectid);
+int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
+				    struct btrfs_root *root);
+int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
+int btrfs_free_block_groups(struct btrfs_fs_info *info);
+int btrfs_read_block_groups(struct btrfs_root *root);
+int btrfs_make_block_group(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root, u64 bytes_used,
+			   u64 type, u64 chunk_objectid, u64 chunk_offset,
+			   u64 size);
+int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root, u64 group_start);
+int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start);
+int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root);
+int btrfs_drop_dead_reloc_roots(struct btrfs_root *root);
+int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       struct extent_buffer *buf, u64 orig_start);
+int btrfs_add_dead_reloc_root(struct btrfs_root *root);
+int btrfs_cleanup_reloc_trees(struct btrfs_root *root);
+int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len);
+u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
+/* ctree.c */
+int btrfs_previous_item(struct btrfs_root *root,
+			struct btrfs_path *path, u64 min_objectid,
+			int type);
+int btrfs_merge_path(struct btrfs_trans_handle *trans,
+		     struct btrfs_root *root,
+		     struct btrfs_key *node_keys,
+		     u64 *nodes, int lowest_level);
+int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, struct btrfs_path *path,
+			    struct btrfs_key *new_key);
+struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
+struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
+int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
+			struct btrfs_key *key, int lowest_level,
+			int cache_only, u64 min_trans);
+int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
+			 struct btrfs_key *max_key,
+			 struct btrfs_path *path, int cache_only,
+			 u64 min_trans);
+int btrfs_cow_block(struct btrfs_trans_handle *trans,
+		    struct btrfs_root *root, struct extent_buffer *buf,
+		    struct extent_buffer *parent, int parent_slot,
+		    struct extent_buffer **cow_ret, u64 prealloc_dest);
+int btrfs_copy_root(struct btrfs_trans_handle *trans,
+		      struct btrfs_root *root,
+		      struct extent_buffer *buf,
+		      struct extent_buffer **cow_ret, u64 new_root_objectid);
+int btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_path *path, u32 data_size);
+int btrfs_truncate_item(struct btrfs_trans_handle *trans,
+			struct btrfs_root *root,
+			struct btrfs_path *path,
+			u32 new_size, int from_end);
+int btrfs_split_item(struct btrfs_trans_handle *trans,
+		     struct btrfs_root *root,
+		     struct btrfs_path *path,
+		     struct btrfs_key *new_key,
+		     unsigned long split_offset);
+int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_key *key, struct btrfs_path *p, int
+		      ins_len, int cow);
+int btrfs_realloc_node(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *root, struct extent_buffer *parent,
+		       int start_slot, int cache_only, u64 *last_ret,
+		       struct btrfs_key *progress);
+void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
+struct btrfs_path *btrfs_alloc_path(void);
+void btrfs_free_path(struct btrfs_path *p);
+void btrfs_init_path(struct btrfs_path *p);
+int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		   struct btrfs_path *path, int slot, int nr);
+int btrfs_del_leaf(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root,
+			    struct btrfs_path *path, u64 bytenr);
+static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root,
+				 struct btrfs_path *path)
+{
+	return btrfs_del_items(trans, root, path, path->slots[0], 1);
+}
+
+int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_key *key, void *data, u32 data_size);
+int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root,
+			    struct btrfs_path *path,
+			    struct btrfs_key *cpu_key, u32 *data_size,
+			    int nr);
+int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root,
+			     struct btrfs_path *path,
+			     struct btrfs_key *cpu_key, u32 *data_size, int nr);
+
+static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
+					  struct btrfs_root *root,
+					  struct btrfs_path *path,
+					  struct btrfs_key *key,
+					  u32 data_size)
+{
+	return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1);
+}
+
+int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
+int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
+int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
+int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
+			*root);
+int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
+			struct btrfs_root *root,
+			struct extent_buffer *node,
+			struct extent_buffer *parent);
+/* root-item.c */
+int btrfs_find_root_ref(struct btrfs_root *tree_root,
+		   struct btrfs_path *path,
+		   u64 root_id, u64 ref_id);
+int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *tree_root,
+		       u64 root_id, u8 type, u64 ref_id,
+		       u64 dirid, u64 sequence,
+		       const char *name, int name_len);
+int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		   struct btrfs_key *key);
+int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_key *key, struct btrfs_root_item
+		      *item);
+int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_key *key, struct btrfs_root_item
+		      *item);
+int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
+			 btrfs_root_item *item, struct btrfs_key *key);
+int btrfs_search_root(struct btrfs_root *root, u64 search_start,
+		      u64 *found_objectid);
+int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid,
+			  struct btrfs_root *latest_root);
+/* dir-item.c */
+int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, const char *name,
+			  int name_len, u64 dir,
+			  struct btrfs_key *location, u8 type, u64 index);
+struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
+					     struct btrfs_root *root,
+					     struct btrfs_path *path, u64 dir,
+					     const char *name, int name_len,
+					     int mod);
+struct btrfs_dir_item *
+btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root,
+			    struct btrfs_path *path, u64 dir,
+			    u64 objectid, const char *name, int name_len,
+			    int mod);
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+			      struct btrfs_path *path,
+			      const char *name, int name_len);
+int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
+			      struct btrfs_root *root,
+			      struct btrfs_path *path,
+			      struct btrfs_dir_item *di);
+int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, const char *name,
+			    u16 name_len, const void *data, u16 data_len,
+			    u64 dir);
+struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
+					  struct btrfs_root *root,
+					  struct btrfs_path *path, u64 dir,
+					  const char *name, u16 name_len,
+					  int mod);
+
+/* orphan.c */
+int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root, u64 offset);
+int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, u64 offset);
+
+/* inode-map.c */
+int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *fs_root,
+			     u64 dirid, u64 *objectid);
+int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid);
+
+/* inode-item.c */
+int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root,
+			   const char *name, int name_len,
+			   u64 inode_objectid, u64 ref_objectid, u64 index);
+int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root,
+			   const char *name, int name_len,
+			   u64 inode_objectid, u64 ref_objectid, u64 *index);
+int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root,
+			     struct btrfs_path *path, u64 objectid);
+int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
+		       *root, struct btrfs_path *path,
+		       struct btrfs_key *location, int mod);
+
+/* file-item.c */
+int btrfs_del_csums(struct btrfs_trans_handle *trans,
+		    struct btrfs_root *root, u64 bytenr, u64 len);
+int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
+			  struct bio *bio, u32 *dst);
+int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root,
+			     u64 objectid, u64 pos,
+			     u64 disk_offset, u64 disk_num_bytes,
+			     u64 num_bytes, u64 offset, u64 ram_bytes,
+			     u8 compression, u8 encryption, u16 other_encoding);
+int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root,
+			     struct btrfs_path *path, u64 objectid,
+			     u64 bytenr, int mod);
+int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root,
+			   struct btrfs_ordered_sum *sums);
+int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
+		       struct bio *bio, u64 file_start, int contig);
+int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode,
+			  u64 start, unsigned long len);
+struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
+					  struct btrfs_root *root,
+					  struct btrfs_path *path,
+					  u64 bytenr, int cow);
+int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
+			struct btrfs_root *root, struct btrfs_path *path,
+			u64 isize);
+int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start,
+			     u64 end, struct list_head *list);
+/* inode.c */
+
+/* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */
+#if defined(ClearPageFsMisc) && !defined(ClearPageChecked)
+#define ClearPageChecked ClearPageFsMisc
+#define SetPageChecked SetPageFsMisc
+#define PageChecked PageFsMisc
+#endif
+
+struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
+int btrfs_set_inode_index(struct inode *dir, u64 *index);
+int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *root,
+		       struct inode *dir, struct inode *inode,
+		       const char *name, int name_len);
+int btrfs_add_link(struct btrfs_trans_handle *trans,
+		   struct inode *parent_inode, struct inode *inode,
+		   const char *name, int name_len, int add_backref, u64 index);
+int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       struct inode *inode, u64 new_size,
+			       u32 min_type);
+
+int btrfs_start_delalloc_inodes(struct btrfs_root *root);
+int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end);
+int btrfs_writepages(struct address_space *mapping,
+		     struct writeback_control *wbc);
+int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *new_root, struct dentry *dentry,
+			     u64 new_dirid, u64 alloc_hint);
+int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
+			 size_t size, struct bio *bio, unsigned long bio_flags);
+
+unsigned long btrfs_force_ra(struct address_space *mapping,
+			      struct file_ra_state *ra, struct file *file,
+			      pgoff_t offset, pgoff_t last_index);
+int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
+			   int for_del);
+int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page);
+int btrfs_readpage(struct file *file, struct page *page);
+void btrfs_delete_inode(struct inode *inode);
+void btrfs_put_inode(struct inode *inode);
+void btrfs_read_locked_inode(struct inode *inode);
+int btrfs_write_inode(struct inode *inode, int wait);
+void btrfs_dirty_inode(struct inode *inode);
+struct inode *btrfs_alloc_inode(struct super_block *sb);
+void btrfs_destroy_inode(struct inode *inode);
+int btrfs_init_cachep(void);
+void btrfs_destroy_cachep(void);
+long btrfs_ioctl_trans_end(struct file *file);
+struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
+			    struct btrfs_root *root, int wait);
+struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
+				struct btrfs_root *root);
+struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
+			 struct btrfs_root *root, int *is_new);
+int btrfs_commit_write(struct file *file, struct page *page,
+		       unsigned from, unsigned to);
+struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
+				    size_t page_offset, u64 start, u64 end,
+				    int create);
+int btrfs_update_inode(struct btrfs_trans_handle *trans,
+			      struct btrfs_root *root,
+			      struct inode *inode);
+int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
+int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
+void btrfs_orphan_cleanup(struct btrfs_root *root);
+int btrfs_cont_expand(struct inode *inode, loff_t size);
+
+/* ioctl.c */
+long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+/* file.c */
+int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync);
+int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
+			    int skip_pinned);
+int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
+extern struct file_operations btrfs_file_operations;
+int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *root, struct inode *inode,
+		       u64 start, u64 end, u64 inline_limit, u64 *hint_block);
+int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+			      struct btrfs_root *root,
+			      struct inode *inode, u64 start, u64 end);
+int btrfs_release_file(struct inode *inode, struct file *file);
+
+/* tree-defrag.c */
+int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
+			struct btrfs_root *root, int cache_only);
+
+/* sysfs.c */
+int btrfs_init_sysfs(void);
+void btrfs_exit_sysfs(void);
+int btrfs_sysfs_add_super(struct btrfs_fs_info *fs);
+int btrfs_sysfs_add_root(struct btrfs_root *root);
+void btrfs_sysfs_del_root(struct btrfs_root *root);
+void btrfs_sysfs_del_super(struct btrfs_fs_info *root);
+
+/* xattr.c */
+ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
+
+/* super.c */
+u64 btrfs_parse_size(char *str);
+int btrfs_parse_options(struct btrfs_root *root, char *options);
+int btrfs_sync_fs(struct super_block *sb, int wait);
+
+/* acl.c */
+int btrfs_check_acl(struct inode *inode, int mask);
+int btrfs_init_acl(struct inode *inode, struct inode *dir);
+int btrfs_acl_chmod(struct inode *inode);
+
+/* free-space-cache.c */
+int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+			 u64 bytenr, u64 size);
+int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
+			      u64 offset, u64 bytes);
+int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+			    u64 bytenr, u64 size);
+int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
+				 u64 offset, u64 bytes);
+void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
+				   *block_group);
+struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
+					       *block_group, u64 offset,
+					       u64 bytes);
+void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
+			   u64 bytes);
+u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group);
+#endif
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
new file mode 100644
index 0000000..926a0b2
--- /dev/null
+++ b/fs/btrfs/dir-item.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include "ctree.h"
+#include "disk-io.h"
+#include "hash.h"
+#include "transaction.h"
+
+/*
+ * insert a name into a directory, doing overflow properly if there is a hash
+ * collision.  data_size indicates how big the item inserted should be.  On
+ * success a struct btrfs_dir_item pointer is returned, otherwise it is
+ * an ERR_PTR.
+ *
+ * The name is not copied into the dir item, you have to do that yourself.
+ */
+static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
+						   *trans,
+						   struct btrfs_root *root,
+						   struct btrfs_path *path,
+						   struct btrfs_key *cpu_key,
+						   u32 data_size,
+						   const char *name,
+						   int name_len)
+{
+	int ret;
+	char *ptr;
+	struct btrfs_item *item;
+	struct extent_buffer *leaf;
+
+	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
+	if (ret == -EEXIST) {
+		struct btrfs_dir_item *di;
+		di = btrfs_match_dir_item_name(root, path, name, name_len);
+		if (di)
+			return ERR_PTR(-EEXIST);
+		ret = btrfs_extend_item(trans, root, path, data_size);
+		WARN_ON(ret > 0);
+	}
+	if (ret < 0)
+		return ERR_PTR(ret);
+	WARN_ON(ret > 0);
+	leaf = path->nodes[0];
+	item = btrfs_item_nr(leaf, path->slots[0]);
+	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
+	BUG_ON(data_size > btrfs_item_size(leaf, item));
+	ptr += btrfs_item_size(leaf, item) - data_size;
+	return (struct btrfs_dir_item *)ptr;
+}
+
+/*
+ * xattrs work a lot like directories, this inserts an xattr item
+ * into the tree
+ */
+int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, const char *name,
+			    u16 name_len, const void *data, u16 data_len,
+			    u64 dir)
+{
+	int ret = 0;
+	struct btrfs_path *path;
+	struct btrfs_dir_item *dir_item;
+	unsigned long name_ptr, data_ptr;
+	struct btrfs_key key, location;
+	struct btrfs_disk_key disk_key;
+	struct extent_buffer *leaf;
+	u32 data_size;
+
+	key.objectid = dir;
+	btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
+	key.offset = btrfs_name_hash(name, name_len);
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+	if (name_len + data_len + sizeof(struct btrfs_dir_item) >
+	    BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item))
+		return -ENOSPC;
+
+	data_size = sizeof(*dir_item) + name_len + data_len;
+	dir_item = insert_with_overflow(trans, root, path, &key, data_size,
+					name, name_len);
+	/*
+	 * FIXME: at some point we should handle xattr's that are larger than
+	 * what we can fit in our leaf.  We set location to NULL b/c we arent
+	 * pointing at anything else, that will change if we store the xattr
+	 * data in a separate inode.
+	 */
+	BUG_ON(IS_ERR(dir_item));
+	memset(&location, 0, sizeof(location));
+
+	leaf = path->nodes[0];
+	btrfs_cpu_key_to_disk(&disk_key, &location);
+	btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
+	btrfs_set_dir_type(leaf, dir_item, BTRFS_FT_XATTR);
+	btrfs_set_dir_name_len(leaf, dir_item, name_len);
+	btrfs_set_dir_transid(leaf, dir_item, trans->transid);
+	btrfs_set_dir_data_len(leaf, dir_item, data_len);
+	name_ptr = (unsigned long)(dir_item + 1);
+	data_ptr = (unsigned long)((char *)name_ptr + name_len);
+
+	write_extent_buffer(leaf, name, name_ptr, name_len);
+	write_extent_buffer(leaf, data, data_ptr, data_len);
+	btrfs_mark_buffer_dirty(path->nodes[0]);
+
+	btrfs_free_path(path);
+	return ret;
+}
+
+/*
+ * insert a directory item in the tree, doing all the magic for
+ * both indexes. 'dir' indicates which objectid to insert it into,
+ * 'location' is the key to stuff into the directory item, 'type' is the
+ * type of the inode we're pointing to, and 'index' is the sequence number
+ * to use for the second index (if one is created).
+ */
+int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
+			  *root, const char *name, int name_len, u64 dir,
+			  struct btrfs_key *location, u8 type, u64 index)
+{
+	int ret = 0;
+	int ret2 = 0;
+	struct btrfs_path *path;
+	struct btrfs_dir_item *dir_item;
+	struct extent_buffer *leaf;
+	unsigned long name_ptr;
+	struct btrfs_key key;
+	struct btrfs_disk_key disk_key;
+	u32 data_size;
+
+	key.objectid = dir;
+	btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
+	key.offset = btrfs_name_hash(name, name_len);
+	path = btrfs_alloc_path();
+	data_size = sizeof(*dir_item) + name_len;
+	dir_item = insert_with_overflow(trans, root, path, &key, data_size,
+					name, name_len);
+	if (IS_ERR(dir_item)) {
+		ret = PTR_ERR(dir_item);
+		if (ret == -EEXIST)
+			goto second_insert;
+		goto out;
+	}
+
+	leaf = path->nodes[0];
+	btrfs_cpu_key_to_disk(&disk_key, location);
+	btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
+	btrfs_set_dir_type(leaf, dir_item, type);
+	btrfs_set_dir_data_len(leaf, dir_item, 0);
+	btrfs_set_dir_name_len(leaf, dir_item, name_len);
+	btrfs_set_dir_transid(leaf, dir_item, trans->transid);
+	name_ptr = (unsigned long)(dir_item + 1);
+
+	write_extent_buffer(leaf, name, name_ptr, name_len);
+	btrfs_mark_buffer_dirty(leaf);
+
+second_insert:
+	/* FIXME, use some real flag for selecting the extra index */
+	if (root == root->fs_info->tree_root) {
+		ret = 0;
+		goto out;
+	}
+	btrfs_release_path(root, path);
+
+	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
+	key.offset = index;
+	dir_item = insert_with_overflow(trans, root, path, &key, data_size,
+					name, name_len);
+	if (IS_ERR(dir_item)) {
+		ret2 = PTR_ERR(dir_item);
+		goto out;
+	}
+	leaf = path->nodes[0];
+	btrfs_cpu_key_to_disk(&disk_key, location);
+	btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
+	btrfs_set_dir_type(leaf, dir_item, type);
+	btrfs_set_dir_data_len(leaf, dir_item, 0);
+	btrfs_set_dir_name_len(leaf, dir_item, name_len);
+	btrfs_set_dir_transid(leaf, dir_item, trans->transid);
+	name_ptr = (unsigned long)(dir_item + 1);
+	write_extent_buffer(leaf, name, name_ptr, name_len);
+	btrfs_mark_buffer_dirty(leaf);
+out:
+	btrfs_free_path(path);
+	if (ret)
+		return ret;
+	if (ret2)
+		return ret2;
+	return 0;
+}
+
+/*
+ * lookup a directory item based on name.  'dir' is the objectid
+ * we're searching in, and 'mod' tells us if you plan on deleting the
+ * item (use mod < 0) or changing the options (use mod > 0)
+ */
+struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
+					     struct btrfs_root *root,
+					     struct btrfs_path *path, u64 dir,
+					     const char *name, int name_len,
+					     int mod)
+{
+	int ret;
+	struct btrfs_key key;
+	int ins_len = mod < 0 ? -1 : 0;
+	int cow = mod != 0;
+	struct btrfs_key found_key;
+	struct extent_buffer *leaf;
+
+	key.objectid = dir;
+	btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
+
+	key.offset = btrfs_name_hash(name, name_len);
+
+	ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
+	if (ret < 0)
+		return ERR_PTR(ret);
+	if (ret > 0) {
+		if (path->slots[0] == 0)
+			return NULL;
+		path->slots[0]--;
+	}
+
+	leaf = path->nodes[0];
+	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+	if (found_key.objectid != dir ||
+	    btrfs_key_type(&found_key) != BTRFS_DIR_ITEM_KEY ||
+	    found_key.offset != key.offset)
+		return NULL;
+
+	return btrfs_match_dir_item_name(root, path, name, name_len);
+}
+
+/*
+ * lookup a directory item based on index.  'dir' is the objectid
+ * we're searching in, and 'mod' tells us if you plan on deleting the
+ * item (use mod < 0) or changing the options (use mod > 0)
+ *
+ * The name is used to make sure the index really points to the name you were
+ * looking for.
+ */
+struct btrfs_dir_item *
+btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root,
+			    struct btrfs_path *path, u64 dir,
+			    u64 objectid, const char *name, int name_len,
+			    int mod)
+{
+	int ret;
+	struct btrfs_key key;
+	int ins_len = mod < 0 ? -1 : 0;
+	int cow = mod != 0;
+
+	key.objectid = dir;
+	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
+	key.offset = objectid;
+
+	ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
+	if (ret < 0)
+		return ERR_PTR(ret);
+	if (ret > 0)
+		return ERR_PTR(-ENOENT);
+	return btrfs_match_dir_item_name(root, path, name, name_len);
+}
+
+struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
+					  struct btrfs_root *root,
+					  struct btrfs_path *path, u64 dir,
+					  const char *name, u16 name_len,
+					  int mod)
+{
+	int ret;
+	struct btrfs_key key;
+	int ins_len = mod < 0 ? -1 : 0;
+	int cow = mod != 0;
+	struct btrfs_key found_key;
+	struct extent_buffer *leaf;
+
+	key.objectid = dir;
+	btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
+	key.offset = btrfs_name_hash(name, name_len);
+	ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
+	if (ret < 0)
+		return ERR_PTR(ret);
+	if (ret > 0) {
+		if (path->slots[0] == 0)
+			return NULL;
+		path->slots[0]--;
+	}
+
+	leaf = path->nodes[0];
+	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+	if (found_key.objectid != dir ||
+	    btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY ||
+	    found_key.offset != key.offset)
+		return NULL;
+
+	return btrfs_match_dir_item_name(root, path, name, name_len);
+}
+
+/*
+ * helper function to look at the directory item pointed to by 'path'
+ * this walks through all the entries in a dir item and finds one
+ * for a specific name.
+ */
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+			      struct btrfs_path *path,
+			      const char *name, int name_len)
+{
+	struct btrfs_dir_item *dir_item;
+	unsigned long name_ptr;
+	u32 total_len;
+	u32 cur = 0;
+	u32 this_len;
+	struct extent_buffer *leaf;
+
+	leaf = path->nodes[0];
+	dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
+	total_len = btrfs_item_size_nr(leaf, path->slots[0]);
+	while (cur < total_len) {
+		this_len = sizeof(*dir_item) +
+			btrfs_dir_name_len(leaf, dir_item) +
+			btrfs_dir_data_len(leaf, dir_item);
+		name_ptr = (unsigned long)(dir_item + 1);
+
+		if (btrfs_dir_name_len(leaf, dir_item) == name_len &&
+		    memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
+			return dir_item;
+
+		cur += this_len;
+		dir_item = (struct btrfs_dir_item *)((char *)dir_item +
+						     this_len);
+	}
+	return NULL;
+}
+
+/*
+ * given a pointer into a directory item, delete it.  This
+ * handles items that have more than one entry in them.
+ */
+int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
+			      struct btrfs_root *root,
+			      struct btrfs_path *path,
+			      struct btrfs_dir_item *di)
+{
+
+	struct extent_buffer *leaf;
+	u32 sub_item_len;
+	u32 item_len;
+	int ret = 0;
+
+	leaf = path->nodes[0];
+	sub_item_len = sizeof(*di) + btrfs_dir_name_len(leaf, di) +
+		btrfs_dir_data_len(leaf, di);
+	item_len = btrfs_item_size_nr(leaf, path->slots[0]);
+	if (sub_item_len == item_len) {
+		ret = btrfs_del_item(trans, root, path);
+	} else {
+		/* MARKER */
+		unsigned long ptr = (unsigned long)di;
+		unsigned long start;
+
+		start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+		memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
+			item_len - (ptr + sub_item_len - start));
+		ret = btrfs_truncate_item(trans, root, path,
+					  item_len - sub_item_len, 1);
+	}
+	return 0;
+}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
new file mode 100644
index 0000000..81a3138
--- /dev/null
+++ b/fs/btrfs/disk-io.c
@@ -0,0 +1,2343 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/scatterlist.h>
+#include <linux/swap.h>
+#include <linux/radix-tree.h>
+#include <linux/writeback.h>
+#include <linux/buffer_head.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include "compat.h"
+#include "crc32c.h"
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "btrfs_inode.h"
+#include "volumes.h"
+#include "print-tree.h"
+#include "async-thread.h"
+#include "locking.h"
+#include "ref-cache.h"
+#include "tree-log.h"
+
+static struct extent_io_ops btree_extent_io_ops;
+static void end_workqueue_fn(struct btrfs_work *work);
+
+/*
+ * end_io_wq structs are used to do processing in task context when an IO is
+ * complete.  This is used during reads to verify checksums, and it is used
+ * by writes to insert metadata for new file extents after IO is complete.
+ */
+struct end_io_wq {
+	struct bio *bio;
+	bio_end_io_t *end_io;
+	void *private;
+	struct btrfs_fs_info *info;
+	int error;
+	int metadata;
+	struct list_head list;
+	struct btrfs_work work;
+};
+
+/*
+ * async submit bios are used to offload expensive checksumming
+ * onto the worker threads.  They checksum file and metadata bios
+ * just before they are sent down the IO stack.
+ */
+struct async_submit_bio {
+	struct inode *inode;
+	struct bio *bio;
+	struct list_head list;
+	extent_submit_bio_hook_t *submit_bio_start;
+	extent_submit_bio_hook_t *submit_bio_done;
+	int rw;
+	int mirror_num;
+	unsigned long bio_flags;
+	struct btrfs_work work;
+};
+
+/*
+ * extents on the btree inode are pretty simple, there's one extent
+ * that covers the entire device
+ */
+static struct extent_map *btree_get_extent(struct inode *inode,
+		struct page *page, size_t page_offset, u64 start, u64 len,
+		int create)
+{
+	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+	struct extent_map *em;
+	int ret;
+
+	spin_lock(&em_tree->lock);
+	em = lookup_extent_mapping(em_tree, start, len);
+	if (em) {
+		em->bdev =
+			BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+		spin_unlock(&em_tree->lock);
+		goto out;
+	}
+	spin_unlock(&em_tree->lock);
+
+	em = alloc_extent_map(GFP_NOFS);
+	if (!em) {
+		em = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+	em->start = 0;
+	em->len = (u64)-1;
+	em->block_len = (u64)-1;
+	em->block_start = 0;
+	em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+
+	spin_lock(&em_tree->lock);
+	ret = add_extent_mapping(em_tree, em);
+	if (ret == -EEXIST) {
+		u64 failed_start = em->start;
+		u64 failed_len = em->len;
+
+		free_extent_map(em);
+		em = lookup_extent_mapping(em_tree, start, len);
+		if (em) {
+			ret = 0;
+		} else {
+			em = lookup_extent_mapping(em_tree, failed_start,
+						   failed_len);
+			ret = -EIO;
+		}
+	} else if (ret) {
+		free_extent_map(em);
+		em = NULL;
+	}
+	spin_unlock(&em_tree->lock);
+
+	if (ret)
+		em = ERR_PTR(ret);
+out:
+	return em;
+}
+
+u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
+{
+	return btrfs_crc32c(seed, data, len);
+}
+
+void btrfs_csum_final(u32 crc, char *result)
+{
+	*(__le32 *)result = ~cpu_to_le32(crc);
+}
+
+/*
+ * compute the csum for a btree block, and either verify it or write it
+ * into the csum field of the block.
+ */
+static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
+			   int verify)
+{
+	u16 csum_size =
+		btrfs_super_csum_size(&root->fs_info->super_copy);
+	char *result = NULL;
+	unsigned long len;
+	unsigned long cur_len;
+	unsigned long offset = BTRFS_CSUM_SIZE;
+	char *map_token = NULL;
+	char *kaddr;
+	unsigned long map_start;
+	unsigned long map_len;
+	int err;
+	u32 crc = ~(u32)0;
+	unsigned long inline_result;
+
+	len = buf->len - offset;
+	while (len > 0) {
+		err = map_private_extent_buffer(buf, offset, 32,
+					&map_token, &kaddr,
+					&map_start, &map_len, KM_USER0);
+		if (err)
+			return 1;
+		cur_len = min(len, map_len - (offset - map_start));
+		crc = btrfs_csum_data(root, kaddr + offset - map_start,
+				      crc, cur_len);
+		len -= cur_len;
+		offset += cur_len;
+		unmap_extent_buffer(buf, map_token, KM_USER0);
+	}
+	if (csum_size > sizeof(inline_result)) {
+		result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
+		if (!result)
+			return 1;
+	} else {
+		result = (char *)&inline_result;
+	}
+
+	btrfs_csum_final(crc, result);
+
+	if (verify) {
+		if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
+			u32 val;
+			u32 found = 0;
+			memcpy(&found, result, csum_size);
+
+			read_extent_buffer(buf, &val, 0, csum_size);
+			printk(KERN_INFO "btrfs: %s checksum verify failed "
+			       "on %llu wanted %X found %X level %d\n",
+			       root->fs_info->sb->s_id,
+			       buf->start, val, found, btrfs_header_level(buf));
+			if (result != (char *)&inline_result)
+				kfree(result);
+			return 1;
+		}
+	} else {
+		write_extent_buffer(buf, result, 0, csum_size);
+	}
+	if (result != (char *)&inline_result)
+		kfree(result);
+	return 0;
+}
+
+/*
+ * we can't consider a given block up to date unless the transid of the
+ * block matches the transid in the parent node's pointer.  This is how we
+ * detect blocks that either didn't get written at all or got written
+ * in the wrong place.
+ */
+static int verify_parent_transid(struct extent_io_tree *io_tree,
+				 struct extent_buffer *eb, u64 parent_transid)
+{
+	int ret;
+
+	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
+		return 0;
+
+	lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
+	if (extent_buffer_uptodate(io_tree, eb) &&
+	    btrfs_header_generation(eb) == parent_transid) {
+		ret = 0;
+		goto out;
+	}
+	printk("parent transid verify failed on %llu wanted %llu found %llu\n",
+	       (unsigned long long)eb->start,
+	       (unsigned long long)parent_transid,
+	       (unsigned long long)btrfs_header_generation(eb));
+	ret = 1;
+	clear_extent_buffer_uptodate(io_tree, eb);
+out:
+	unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
+		      GFP_NOFS);
+	return ret;
+}
+
+/*
+ * helper to read a given tree block, doing retries as required when
+ * the checksums don't match and we have alternate mirrors to try.
+ */
+static int btree_read_extent_buffer_pages(struct btrfs_root *root,
+					  struct extent_buffer *eb,
+					  u64 start, u64 parent_transid)
+{
+	struct extent_io_tree *io_tree;
+	int ret;
+	int num_copies = 0;
+	int mirror_num = 0;
+
+	io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
+	while (1) {
+		ret = read_extent_buffer_pages(io_tree, eb, start, 1,
+					       btree_get_extent, mirror_num);
+		if (!ret &&
+		    !verify_parent_transid(io_tree, eb, parent_transid))
+			return ret;
+
+		num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
+					      eb->start, eb->len);
+		if (num_copies == 1)
+			return ret;
+
+		mirror_num++;
+		if (mirror_num > num_copies)
+			return ret;
+	}
+	return -EIO;
+}
+
+/*
+ * checksum a dirty tree block before IO.  This has extra checks to make sure
+ * we only fill in the checksum field in the first page of a multi-page block
+ */
+
+static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
+{
+	struct extent_io_tree *tree;
+	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+	u64 found_start;
+	int found_level;
+	unsigned long len;
+	struct extent_buffer *eb;
+	int ret;
+
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+
+	if (page->private == EXTENT_PAGE_PRIVATE)
+		goto out;
+	if (!page->private)
+		goto out;
+	len = page->private >> 2;
+	WARN_ON(len == 0);
+
+	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+	ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
+					     btrfs_header_generation(eb));
+	BUG_ON(ret);
+	found_start = btrfs_header_bytenr(eb);
+	if (found_start != start) {
+		WARN_ON(1);
+		goto err;
+	}
+	if (eb->first_page != page) {
+		WARN_ON(1);
+		goto err;
+	}
+	if (!PageUptodate(page)) {
+		WARN_ON(1);
+		goto err;
+	}
+	found_level = btrfs_header_level(eb);
+
+	csum_tree_block(root, eb, 0);
+err:
+	free_extent_buffer(eb);
+out:
+	return 0;
+}
+
+static int check_tree_block_fsid(struct btrfs_root *root,
+				 struct extent_buffer *eb)
+{
+	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+	u8 fsid[BTRFS_UUID_SIZE];
+	int ret = 1;
+
+	read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
+			   BTRFS_FSID_SIZE);
+	while (fs_devices) {
+		if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
+			ret = 0;
+			break;
+		}
+		fs_devices = fs_devices->seed;
+	}
+	return ret;
+}
+
+static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
+			       struct extent_state *state)
+{
+	struct extent_io_tree *tree;
+	u64 found_start;
+	int found_level;
+	unsigned long len;
+	struct extent_buffer *eb;
+	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
+	int ret = 0;
+
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+	if (page->private == EXTENT_PAGE_PRIVATE)
+		goto out;
+	if (!page->private)
+		goto out;
+
+	len = page->private >> 2;
+	WARN_ON(len == 0);
+
+	eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+
+	found_start = btrfs_header_bytenr(eb);
+	if (found_start != start) {
+		printk(KERN_INFO "btrfs bad tree block start %llu %llu\n",
+		       (unsigned long long)found_start,
+		       (unsigned long long)eb->start);
+		ret = -EIO;
+		goto err;
+	}
+	if (eb->first_page != page) {
+		printk(KERN_INFO "btrfs bad first page %lu %lu\n",
+		       eb->first_page->index, page->index);
+		WARN_ON(1);
+		ret = -EIO;
+		goto err;
+	}
+	if (check_tree_block_fsid(root, eb)) {
+		printk(KERN_INFO "btrfs bad fsid on block %llu\n",
+		       (unsigned long long)eb->start);
+		ret = -EIO;
+		goto err;
+	}
+	found_level = btrfs_header_level(eb);
+
+	ret = csum_tree_block(root, eb, 1);
+	if (ret)
+		ret = -EIO;
+
+	end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
+	end = eb->start + end - 1;
+err:
+	free_extent_buffer(eb);
+out:
+	return ret;
+}
+
+static void end_workqueue_bio(struct bio *bio, int err)
+{
+	struct end_io_wq *end_io_wq = bio->bi_private;
+	struct btrfs_fs_info *fs_info;
+
+	fs_info = end_io_wq->info;
+	end_io_wq->error = err;
+	end_io_wq->work.func = end_workqueue_fn;
+	end_io_wq->work.flags = 0;
+
+	if (bio->bi_rw & (1 << BIO_RW)) {
+		if (end_io_wq->metadata)
+			btrfs_queue_worker(&fs_info->endio_meta_write_workers,
+					   &end_io_wq->work);
+		else
+			btrfs_queue_worker(&fs_info->endio_write_workers,
+					   &end_io_wq->work);
+	} else {
+		if (end_io_wq->metadata)
+			btrfs_queue_worker(&fs_info->endio_meta_workers,
+					   &end_io_wq->work);
+		else
+			btrfs_queue_worker(&fs_info->endio_workers,
+					   &end_io_wq->work);
+	}
+}
+
+int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
+			int metadata)
+{
+	struct end_io_wq *end_io_wq;
+	end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
+	if (!end_io_wq)
+		return -ENOMEM;
+
+	end_io_wq->private = bio->bi_private;
+	end_io_wq->end_io = bio->bi_end_io;
+	end_io_wq->info = info;
+	end_io_wq->error = 0;
+	end_io_wq->bio = bio;
+	end_io_wq->metadata = metadata;
+
+	bio->bi_private = end_io_wq;
+	bio->bi_end_io = end_workqueue_bio;
+	return 0;
+}
+
+unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
+{
+	unsigned long limit = min_t(unsigned long,
+				    info->workers.max_workers,
+				    info->fs_devices->open_devices);
+	return 256 * limit;
+}
+
+int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
+{
+	return atomic_read(&info->nr_async_bios) >
+		btrfs_async_submit_limit(info);
+}
+
+static void run_one_async_start(struct btrfs_work *work)
+{
+	struct btrfs_fs_info *fs_info;
+	struct async_submit_bio *async;
+
+	async = container_of(work, struct  async_submit_bio, work);
+	fs_info = BTRFS_I(async->inode)->root->fs_info;
+	async->submit_bio_start(async->inode, async->rw, async->bio,
+			       async->mirror_num, async->bio_flags);
+}
+
+static void run_one_async_done(struct btrfs_work *work)
+{
+	struct btrfs_fs_info *fs_info;
+	struct async_submit_bio *async;
+	int limit;
+
+	async = container_of(work, struct  async_submit_bio, work);
+	fs_info = BTRFS_I(async->inode)->root->fs_info;
+
+	limit = btrfs_async_submit_limit(fs_info);
+	limit = limit * 2 / 3;
+
+	atomic_dec(&fs_info->nr_async_submits);
+
+	if (atomic_read(&fs_info->nr_async_submits) < limit &&
+	    waitqueue_active(&fs_info->async_submit_wait))
+		wake_up(&fs_info->async_submit_wait);
+
+	async->submit_bio_done(async->inode, async->rw, async->bio,
+			       async->mirror_num, async->bio_flags);
+}
+
+static void run_one_async_free(struct btrfs_work *work)
+{
+	struct async_submit_bio *async;
+
+	async = container_of(work, struct  async_submit_bio, work);
+	kfree(async);
+}
+
+int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
+			int rw, struct bio *bio, int mirror_num,
+			unsigned long bio_flags,
+			extent_submit_bio_hook_t *submit_bio_start,
+			extent_submit_bio_hook_t *submit_bio_done)
+{
+	struct async_submit_bio *async;
+
+	async = kmalloc(sizeof(*async), GFP_NOFS);
+	if (!async)
+		return -ENOMEM;
+
+	async->inode = inode;
+	async->rw = rw;
+	async->bio = bio;
+	async->mirror_num = mirror_num;
+	async->submit_bio_start = submit_bio_start;
+	async->submit_bio_done = submit_bio_done;
+
+	async->work.func = run_one_async_start;
+	async->work.ordered_func = run_one_async_done;
+	async->work.ordered_free = run_one_async_free;
+
+	async->work.flags = 0;
+	async->bio_flags = bio_flags;
+
+	atomic_inc(&fs_info->nr_async_submits);
+	btrfs_queue_worker(&fs_info->workers, &async->work);
+#if 0
+	int limit = btrfs_async_submit_limit(fs_info);
+	if (atomic_read(&fs_info->nr_async_submits) > limit) {
+		wait_event_timeout(fs_info->async_submit_wait,
+			   (atomic_read(&fs_info->nr_async_submits) < limit),
+			   HZ/10);
+
+		wait_event_timeout(fs_info->async_submit_wait,
+			   (atomic_read(&fs_info->nr_async_bios) < limit),
+			   HZ/10);
+	}
+#endif
+	while (atomic_read(&fs_info->async_submit_draining) &&
+	      atomic_read(&fs_info->nr_async_submits)) {
+		wait_event(fs_info->async_submit_wait,
+			   (atomic_read(&fs_info->nr_async_submits) == 0));
+	}
+
+	return 0;
+}
+
+static int btree_csum_one_bio(struct bio *bio)
+{
+	struct bio_vec *bvec = bio->bi_io_vec;
+	int bio_index = 0;
+	struct btrfs_root *root;
+
+	WARN_ON(bio->bi_vcnt <= 0);
+	while (bio_index < bio->bi_vcnt) {
+		root = BTRFS_I(bvec->bv_page->mapping->host)->root;
+		csum_dirty_buffer(root, bvec->bv_page);
+		bio_index++;
+		bvec++;
+	}
+	return 0;
+}
+
+static int __btree_submit_bio_start(struct inode *inode, int rw,
+				    struct bio *bio, int mirror_num,
+				    unsigned long bio_flags)
+{
+	/*
+	 * when we're called for a write, we're already in the async
+	 * submission context.  Just jump into btrfs_map_bio
+	 */
+	btree_csum_one_bio(bio);
+	return 0;
+}
+
+static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
+				 int mirror_num, unsigned long bio_flags)
+{
+	/*
+	 * when we're called for a write, we're already in the async
+	 * submission context.  Just jump into btrfs_map_bio
+	 */
+	return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
+}
+
+static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+				 int mirror_num, unsigned long bio_flags)
+{
+	int ret;
+
+	ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
+					  bio, 1);
+	BUG_ON(ret);
+
+	if (!(rw & (1 << BIO_RW))) {
+		/*
+		 * called for a read, do the setup so that checksum validation
+		 * can happen in the async kernel threads
+		 */
+		return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
+				     mirror_num, 0);
+	}
+	/*
+	 * kthread helpers are used to submit writes so that checksumming
+	 * can happen in parallel across all CPUs
+	 */
+	return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
+				   inode, rw, bio, mirror_num, 0,
+				   __btree_submit_bio_start,
+				   __btree_submit_bio_done);
+}
+
+static int btree_writepage(struct page *page, struct writeback_control *wbc)
+{
+	struct extent_io_tree *tree;
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+
+	if (current->flags & PF_MEMALLOC) {
+		redirty_page_for_writepage(wbc, page);
+		unlock_page(page);
+		return 0;
+	}
+	return extent_write_full_page(tree, page, btree_get_extent, wbc);
+}
+
+static int btree_writepages(struct address_space *mapping,
+			    struct writeback_control *wbc)
+{
+	struct extent_io_tree *tree;
+	tree = &BTRFS_I(mapping->host)->io_tree;
+	if (wbc->sync_mode == WB_SYNC_NONE) {
+		u64 num_dirty;
+		u64 start = 0;
+		unsigned long thresh = 32 * 1024 * 1024;
+
+		if (wbc->for_kupdate)
+			return 0;
+
+		num_dirty = count_range_bits(tree, &start, (u64)-1,
+					     thresh, EXTENT_DIRTY);
+		if (num_dirty < thresh)
+			return 0;
+	}
+	return extent_writepages(tree, mapping, btree_get_extent, wbc);
+}
+
+static int btree_readpage(struct file *file, struct page *page)
+{
+	struct extent_io_tree *tree;
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+	return extent_read_full_page(tree, page, btree_get_extent);
+}
+
+static int btree_releasepage(struct page *page, gfp_t gfp_flags)
+{
+	struct extent_io_tree *tree;
+	struct extent_map_tree *map;
+	int ret;
+
+	if (PageWriteback(page) || PageDirty(page))
+		return 0;
+
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+	map = &BTRFS_I(page->mapping->host)->extent_tree;
+
+	ret = try_release_extent_state(map, tree, page, gfp_flags);
+	if (!ret)
+		return 0;
+
+	ret = try_release_extent_buffer(tree, page);
+	if (ret == 1) {
+		ClearPagePrivate(page);
+		set_page_private(page, 0);
+		page_cache_release(page);
+	}
+
+	return ret;
+}
+
+static void btree_invalidatepage(struct page *page, unsigned long offset)
+{
+	struct extent_io_tree *tree;
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+	extent_invalidatepage(tree, page, offset);
+	btree_releasepage(page, GFP_NOFS);
+	if (PagePrivate(page)) {
+		printk(KERN_WARNING "btrfs warning page private not zero "
+		       "on page %llu\n", (unsigned long long)page_offset(page));
+		ClearPagePrivate(page);
+		set_page_private(page, 0);
+		page_cache_release(page);
+	}
+}
+
+#if 0
+static int btree_writepage(struct page *page, struct writeback_control *wbc)
+{
+	struct buffer_head *bh;
+	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
+	struct buffer_head *head;
+	if (!page_has_buffers(page)) {
+		create_empty_buffers(page, root->fs_info->sb->s_blocksize,
+					(1 << BH_Dirty)|(1 << BH_Uptodate));
+	}
+	head = page_buffers(page);
+	bh = head;
+	do {
+		if (buffer_dirty(bh))
+			csum_tree_block(root, bh, 0);
+		bh = bh->b_this_page;
+	} while (bh != head);
+	return block_write_full_page(page, btree_get_block, wbc);
+}
+#endif
+
+static struct address_space_operations btree_aops = {
+	.readpage	= btree_readpage,
+	.writepage	= btree_writepage,
+	.writepages	= btree_writepages,
+	.releasepage	= btree_releasepage,
+	.invalidatepage = btree_invalidatepage,
+	.sync_page	= block_sync_page,
+};
+
+int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
+			 u64 parent_transid)
+{
+	struct extent_buffer *buf = NULL;
+	struct inode *btree_inode = root->fs_info->btree_inode;
+	int ret = 0;
+
+	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+	if (!buf)
+		return 0;
+	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
+				 buf, 0, 0, btree_get_extent, 0);
+	free_extent_buffer(buf);
+	return ret;
+}
+
+struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
+					    u64 bytenr, u32 blocksize)
+{
+	struct inode *btree_inode = root->fs_info->btree_inode;
+	struct extent_buffer *eb;
+	eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
+				bytenr, blocksize, GFP_NOFS);
+	return eb;
+}
+
+struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
+						 u64 bytenr, u32 blocksize)
+{
+	struct inode *btree_inode = root->fs_info->btree_inode;
+	struct extent_buffer *eb;
+
+	eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
+				 bytenr, blocksize, NULL, GFP_NOFS);
+	return eb;
+}
+
+
+int btrfs_write_tree_block(struct extent_buffer *buf)
+{
+	return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
+				      buf->start + buf->len - 1, WB_SYNC_ALL);
+}
+
+int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
+{
+	return btrfs_wait_on_page_writeback_range(buf->first_page->mapping,
+				  buf->start, buf->start + buf->len - 1);
+}
+
+struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
+				      u32 blocksize, u64 parent_transid)
+{
+	struct extent_buffer *buf = NULL;
+	struct inode *btree_inode = root->fs_info->btree_inode;
+	struct extent_io_tree *io_tree;
+	int ret;
+
+	io_tree = &BTRFS_I(btree_inode)->io_tree;
+
+	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+	if (!buf)
+		return NULL;
+
+	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
+
+	if (ret == 0)
+		buf->flags |= EXTENT_UPTODATE;
+	else
+		WARN_ON(1);
+	return buf;
+
+}
+
+int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		     struct extent_buffer *buf)
+{
+	struct inode *btree_inode = root->fs_info->btree_inode;
+	if (btrfs_header_generation(buf) ==
+	    root->fs_info->running_transaction->transid) {
+		WARN_ON(!btrfs_tree_locked(buf));
+		clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
+					  buf);
+	}
+	return 0;
+}
+
+static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
+			u32 stripesize, struct btrfs_root *root,
+			struct btrfs_fs_info *fs_info,
+			u64 objectid)
+{
+	root->node = NULL;
+	root->commit_root = NULL;
+	root->ref_tree = NULL;
+	root->sectorsize = sectorsize;
+	root->nodesize = nodesize;
+	root->leafsize = leafsize;
+	root->stripesize = stripesize;
+	root->ref_cows = 0;
+	root->track_dirty = 0;
+
+	root->fs_info = fs_info;
+	root->objectid = objectid;
+	root->last_trans = 0;
+	root->highest_inode = 0;
+	root->last_inode_alloc = 0;
+	root->name = NULL;
+	root->in_sysfs = 0;
+
+	INIT_LIST_HEAD(&root->dirty_list);
+	INIT_LIST_HEAD(&root->orphan_list);
+	INIT_LIST_HEAD(&root->dead_list);
+	spin_lock_init(&root->node_lock);
+	spin_lock_init(&root->list_lock);
+	mutex_init(&root->objectid_mutex);
+	mutex_init(&root->log_mutex);
+	extent_io_tree_init(&root->dirty_log_pages,
+			     fs_info->btree_inode->i_mapping, GFP_NOFS);
+
+	btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
+	root->ref_tree = &root->ref_tree_struct;
+
+	memset(&root->root_key, 0, sizeof(root->root_key));
+	memset(&root->root_item, 0, sizeof(root->root_item));
+	memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
+	memset(&root->root_kobj, 0, sizeof(root->root_kobj));
+	root->defrag_trans_start = fs_info->generation;
+	init_completion(&root->kobj_unregister);
+	root->defrag_running = 0;
+	root->defrag_level = 0;
+	root->root_key.objectid = objectid;
+	root->anon_super.s_root = NULL;
+	root->anon_super.s_dev = 0;
+	INIT_LIST_HEAD(&root->anon_super.s_list);
+	INIT_LIST_HEAD(&root->anon_super.s_instances);
+	init_rwsem(&root->anon_super.s_umount);
+
+	return 0;
+}
+
+static int find_and_setup_root(struct btrfs_root *tree_root,
+			       struct btrfs_fs_info *fs_info,
+			       u64 objectid,
+			       struct btrfs_root *root)
+{
+	int ret;
+	u32 blocksize;
+	u64 generation;
+
+	__setup_root(tree_root->nodesize, tree_root->leafsize,
+		     tree_root->sectorsize, tree_root->stripesize,
+		     root, fs_info, objectid);
+	ret = btrfs_find_last_root(tree_root, objectid,
+				   &root->root_item, &root->root_key);
+	BUG_ON(ret);
+
+	generation = btrfs_root_generation(&root->root_item);
+	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
+	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
+				     blocksize, generation);
+	BUG_ON(!root->node);
+	return 0;
+}
+
+int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
+			     struct btrfs_fs_info *fs_info)
+{
+	struct extent_buffer *eb;
+	struct btrfs_root *log_root_tree = fs_info->log_root_tree;
+	u64 start = 0;
+	u64 end = 0;
+	int ret;
+
+	if (!log_root_tree)
+		return 0;
+
+	while (1) {
+		ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
+				    0, &start, &end, EXTENT_DIRTY);
+		if (ret)
+			break;
+
+		clear_extent_dirty(&log_root_tree->dirty_log_pages,
+				   start, end, GFP_NOFS);
+	}
+	eb = fs_info->log_root_tree->node;
+
+	WARN_ON(btrfs_header_level(eb) != 0);
+	WARN_ON(btrfs_header_nritems(eb) != 0);
+
+	ret = btrfs_free_reserved_extent(fs_info->tree_root,
+				eb->start, eb->len);
+	BUG_ON(ret);
+
+	free_extent_buffer(eb);
+	kfree(fs_info->log_root_tree);
+	fs_info->log_root_tree = NULL;
+	return 0;
+}
+
+int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
+			     struct btrfs_fs_info *fs_info)
+{
+	struct btrfs_root *root;
+	struct btrfs_root *tree_root = fs_info->tree_root;
+
+	root = kzalloc(sizeof(*root), GFP_NOFS);
+	if (!root)
+		return -ENOMEM;
+
+	__setup_root(tree_root->nodesize, tree_root->leafsize,
+		     tree_root->sectorsize, tree_root->stripesize,
+		     root, fs_info, BTRFS_TREE_LOG_OBJECTID);
+
+	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
+	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
+	root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
+	root->ref_cows = 0;
+
+	root->node = btrfs_alloc_free_block(trans, root, root->leafsize,
+					    0, BTRFS_TREE_LOG_OBJECTID,
+					    trans->transid, 0, 0, 0);
+
+	btrfs_set_header_nritems(root->node, 0);
+	btrfs_set_header_level(root->node, 0);
+	btrfs_set_header_bytenr(root->node, root->node->start);
+	btrfs_set_header_generation(root->node, trans->transid);
+	btrfs_set_header_owner(root->node, BTRFS_TREE_LOG_OBJECTID);
+
+	write_extent_buffer(root->node, root->fs_info->fsid,
+			    (unsigned long)btrfs_header_fsid(root->node),
+			    BTRFS_FSID_SIZE);
+	btrfs_mark_buffer_dirty(root->node);
+	btrfs_tree_unlock(root->node);
+	fs_info->log_root_tree = root;
+	return 0;
+}
+
+struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
+					       struct btrfs_key *location)
+{
+	struct btrfs_root *root;
+	struct btrfs_fs_info *fs_info = tree_root->fs_info;
+	struct btrfs_path *path;
+	struct extent_buffer *l;
+	u64 highest_inode;
+	u64 generation;
+	u32 blocksize;
+	int ret = 0;
+
+	root = kzalloc(sizeof(*root), GFP_NOFS);
+	if (!root)
+		return ERR_PTR(-ENOMEM);
+	if (location->offset == (u64)-1) {
+		ret = find_and_setup_root(tree_root, fs_info,
+					  location->objectid, root);
+		if (ret) {
+			kfree(root);
+			return ERR_PTR(ret);
+		}
+		goto insert;
+	}
+
+	__setup_root(tree_root->nodesize, tree_root->leafsize,
+		     tree_root->sectorsize, tree_root->stripesize,
+		     root, fs_info, location->objectid);
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
+	if (ret != 0) {
+		if (ret > 0)
+			ret = -ENOENT;
+		goto out;
+	}
+	l = path->nodes[0];
+	read_extent_buffer(l, &root->root_item,
+	       btrfs_item_ptr_offset(l, path->slots[0]),
+	       sizeof(root->root_item));
+	memcpy(&root->root_key, location, sizeof(*location));
+	ret = 0;
+out:
+	btrfs_release_path(root, path);
+	btrfs_free_path(path);
+	if (ret) {
+		kfree(root);
+		return ERR_PTR(ret);
+	}
+	generation = btrfs_root_generation(&root->root_item);
+	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
+	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
+				     blocksize, generation);
+	BUG_ON(!root->node);
+insert:
+	if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
+		root->ref_cows = 1;
+		ret = btrfs_find_highest_inode(root, &highest_inode);
+		if (ret == 0) {
+			root->highest_inode = highest_inode;
+			root->last_inode_alloc = highest_inode;
+		}
+	}
+	return root;
+}
+
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+					u64 root_objectid)
+{
+	struct btrfs_root *root;
+
+	if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
+		return fs_info->tree_root;
+	if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
+		return fs_info->extent_root;
+
+	root = radix_tree_lookup(&fs_info->fs_roots_radix,
+				 (unsigned long)root_objectid);
+	return root;
+}
+
+struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
+					      struct btrfs_key *location)
+{
+	struct btrfs_root *root;
+	int ret;
+
+	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
+		return fs_info->tree_root;
+	if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
+		return fs_info->extent_root;
+	if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
+		return fs_info->chunk_root;
+	if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
+		return fs_info->dev_root;
+	if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
+		return fs_info->csum_root;
+
+	root = radix_tree_lookup(&fs_info->fs_roots_radix,
+				 (unsigned long)location->objectid);
+	if (root)
+		return root;
+
+	root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
+	if (IS_ERR(root))
+		return root;
+
+	set_anon_super(&root->anon_super, NULL);
+
+	ret = radix_tree_insert(&fs_info->fs_roots_radix,
+				(unsigned long)root->root_key.objectid,
+				root);
+	if (ret) {
+		free_extent_buffer(root->node);
+		kfree(root);
+		return ERR_PTR(ret);
+	}
+	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
+		ret = btrfs_find_dead_roots(fs_info->tree_root,
+					    root->root_key.objectid, root);
+		BUG_ON(ret);
+		btrfs_orphan_cleanup(root);
+	}
+	return root;
+}
+
+struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
+				      struct btrfs_key *location,
+				      const char *name, int namelen)
+{
+	struct btrfs_root *root;
+	int ret;
+
+	root = btrfs_read_fs_root_no_name(fs_info, location);
+	if (!root)
+		return NULL;
+
+	if (root->in_sysfs)
+		return root;
+
+	ret = btrfs_set_root_name(root, name, namelen);
+	if (ret) {
+		free_extent_buffer(root->node);
+		kfree(root);
+		return ERR_PTR(ret);
+	}
+#if 0
+	ret = btrfs_sysfs_add_root(root);
+	if (ret) {
+		free_extent_buffer(root->node);
+		kfree(root->name);
+		kfree(root);
+		return ERR_PTR(ret);
+	}
+#endif
+	root->in_sysfs = 1;
+	return root;
+}
+
+static int btrfs_congested_fn(void *congested_data, int bdi_bits)
+{
+	struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
+	int ret = 0;
+	struct list_head *cur;
+	struct btrfs_device *device;
+	struct backing_dev_info *bdi;
+#if 0
+	if ((bdi_bits & (1 << BDI_write_congested)) &&
+	    btrfs_congested_async(info, 0))
+		return 1;
+#endif
+	list_for_each(cur, &info->fs_devices->devices) {
+		device = list_entry(cur, struct btrfs_device, dev_list);
+		if (!device->bdev)
+			continue;
+		bdi = blk_get_backing_dev_info(device->bdev);
+		if (bdi && bdi_congested(bdi, bdi_bits)) {
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
+
+/*
+ * this unplugs every device on the box, and it is only used when page
+ * is null
+ */
+static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
+{
+	struct list_head *cur;
+	struct btrfs_device *device;
+	struct btrfs_fs_info *info;
+
+	info = (struct btrfs_fs_info *)bdi->unplug_io_data;
+	list_for_each(cur, &info->fs_devices->devices) {
+		device = list_entry(cur, struct btrfs_device, dev_list);
+		if (!device->bdev)
+			continue;
+
+		bdi = blk_get_backing_dev_info(device->bdev);
+		if (bdi->unplug_io_fn)
+			bdi->unplug_io_fn(bdi, page);
+	}
+}
+
+static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
+{
+	struct inode *inode;
+	struct extent_map_tree *em_tree;
+	struct extent_map *em;
+	struct address_space *mapping;
+	u64 offset;
+
+	/* the generic O_DIRECT read code does this */
+	if (1 || !page) {
+		__unplug_io_fn(bdi, page);
+		return;
+	}
+
+	/*
+	 * page->mapping may change at any time.  Get a consistent copy
+	 * and use that for everything below
+	 */
+	smp_mb();
+	mapping = page->mapping;
+	if (!mapping)
+		return;
+
+	inode = mapping->host;
+
+	/*
+	 * don't do the expensive searching for a small number of
+	 * devices
+	 */
+	if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
+		__unplug_io_fn(bdi, page);
+		return;
+	}
+
+	offset = page_offset(page);
+
+	em_tree = &BTRFS_I(inode)->extent_tree;
+	spin_lock(&em_tree->lock);
+	em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
+	spin_unlock(&em_tree->lock);
+	if (!em) {
+		__unplug_io_fn(bdi, page);
+		return;
+	}
+
+	if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+		free_extent_map(em);
+		__unplug_io_fn(bdi, page);
+		return;
+	}
+	offset = offset - em->start;
+	btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
+			  em->block_start + offset, page);
+	free_extent_map(em);
+}
+
+static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
+{
+	bdi_init(bdi);
+	bdi->ra_pages	= default_backing_dev_info.ra_pages;
+	bdi->state		= 0;
+	bdi->capabilities	= default_backing_dev_info.capabilities;
+	bdi->unplug_io_fn	= btrfs_unplug_io_fn;
+	bdi->unplug_io_data	= info;
+	bdi->congested_fn	= btrfs_congested_fn;
+	bdi->congested_data	= info;
+	return 0;
+}
+
+static int bio_ready_for_csum(struct bio *bio)
+{
+	u64 length = 0;
+	u64 buf_len = 0;
+	u64 start = 0;
+	struct page *page;
+	struct extent_io_tree *io_tree = NULL;
+	struct btrfs_fs_info *info = NULL;
+	struct bio_vec *bvec;
+	int i;
+	int ret;
+
+	bio_for_each_segment(bvec, bio, i) {
+		page = bvec->bv_page;
+		if (page->private == EXTENT_PAGE_PRIVATE) {
+			length += bvec->bv_len;
+			continue;
+		}
+		if (!page->private) {
+			length += bvec->bv_len;
+			continue;
+		}
+		length = bvec->bv_len;
+		buf_len = page->private >> 2;
+		start = page_offset(page) + bvec->bv_offset;
+		io_tree = &BTRFS_I(page->mapping->host)->io_tree;
+		info = BTRFS_I(page->mapping->host)->root->fs_info;
+	}
+	/* are we fully contained in this bio? */
+	if (buf_len <= length)
+		return 1;
+
+	ret = extent_range_uptodate(io_tree, start + length,
+				    start + buf_len - 1);
+	if (ret == 1)
+		return ret;
+	return ret;
+}
+
+/*
+ * called by the kthread helper functions to finally call the bio end_io
+ * functions.  This is where read checksum verification actually happens
+ */
+static void end_workqueue_fn(struct btrfs_work *work)
+{
+	struct bio *bio;
+	struct end_io_wq *end_io_wq;
+	struct btrfs_fs_info *fs_info;
+	int error;
+
+	end_io_wq = container_of(work, struct end_io_wq, work);
+	bio = end_io_wq->bio;
+	fs_info = end_io_wq->info;
+
+	/* metadata bio reads are special because the whole tree block must
+	 * be checksummed at once.  This makes sure the entire block is in
+	 * ram and up to date before trying to verify things.  For
+	 * blocksize <= pagesize, it is basically a noop
+	 */
+	if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
+	    !bio_ready_for_csum(bio)) {
+		btrfs_queue_worker(&fs_info->endio_meta_workers,
+				   &end_io_wq->work);
+		return;
+	}
+	error = end_io_wq->error;
+	bio->bi_private = end_io_wq->private;
+	bio->bi_end_io = end_io_wq->end_io;
+	kfree(end_io_wq);
+	bio_endio(bio, error);
+}
+
+static int cleaner_kthread(void *arg)
+{
+	struct btrfs_root *root = arg;
+
+	do {
+		smp_mb();
+		if (root->fs_info->closing)
+			break;
+
+		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
+		mutex_lock(&root->fs_info->cleaner_mutex);
+		btrfs_clean_old_snapshots(root);
+		mutex_unlock(&root->fs_info->cleaner_mutex);
+
+		if (freezing(current)) {
+			refrigerator();
+		} else {
+			smp_mb();
+			if (root->fs_info->closing)
+				break;
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule();
+			__set_current_state(TASK_RUNNING);
+		}
+	} while (!kthread_should_stop());
+	return 0;
+}
+
+static int transaction_kthread(void *arg)
+{
+	struct btrfs_root *root = arg;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_transaction *cur;
+	unsigned long now;
+	unsigned long delay;
+	int ret;
+
+	do {
+		smp_mb();
+		if (root->fs_info->closing)
+			break;
+
+		delay = HZ * 30;
+		vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
+		mutex_lock(&root->fs_info->transaction_kthread_mutex);
+
+		if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) {
+			printk(KERN_INFO "btrfs: total reference cache "
+			       "size %llu\n",
+			       root->fs_info->total_ref_cache_size);
+		}
+
+		mutex_lock(&root->fs_info->trans_mutex);
+		cur = root->fs_info->running_transaction;
+		if (!cur) {
+			mutex_unlock(&root->fs_info->trans_mutex);
+			goto sleep;
+		}
+
+		now = get_seconds();
+		if (now < cur->start_time || now - cur->start_time < 30) {
+			mutex_unlock(&root->fs_info->trans_mutex);
+			delay = HZ * 5;
+			goto sleep;
+		}
+		mutex_unlock(&root->fs_info->trans_mutex);
+		trans = btrfs_start_transaction(root, 1);
+		ret = btrfs_commit_transaction(trans, root);
+sleep:
+		wake_up_process(root->fs_info->cleaner_kthread);
+		mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+
+		if (freezing(current)) {
+			refrigerator();
+		} else {
+			if (root->fs_info->closing)
+				break;
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(delay);
+			__set_current_state(TASK_RUNNING);
+		}
+	} while (!kthread_should_stop());
+	return 0;
+}
+
+struct btrfs_root *open_ctree(struct super_block *sb,
+			      struct btrfs_fs_devices *fs_devices,
+			      char *options)
+{
+	u32 sectorsize;
+	u32 nodesize;
+	u32 leafsize;
+	u32 blocksize;
+	u32 stripesize;
+	u64 generation;
+	u64 features;
+	struct btrfs_key location;
+	struct buffer_head *bh;
+	struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
+						 GFP_NOFS);
+	struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
+						 GFP_NOFS);
+	struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
+					       GFP_NOFS);
+	struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
+						GFP_NOFS);
+	struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
+						GFP_NOFS);
+	struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
+					      GFP_NOFS);
+	struct btrfs_root *log_tree_root;
+
+	int ret;
+	int err = -EINVAL;
+
+	struct btrfs_super_block *disk_super;
+
+	if (!extent_root || !tree_root || !fs_info ||
+	    !chunk_root || !dev_root || !csum_root) {
+		err = -ENOMEM;
+		goto fail;
+	}
+	INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
+	INIT_LIST_HEAD(&fs_info->trans_list);
+	INIT_LIST_HEAD(&fs_info->dead_roots);
+	INIT_LIST_HEAD(&fs_info->hashers);
+	INIT_LIST_HEAD(&fs_info->delalloc_inodes);
+	spin_lock_init(&fs_info->hash_lock);
+	spin_lock_init(&fs_info->delalloc_lock);
+	spin_lock_init(&fs_info->new_trans_lock);
+	spin_lock_init(&fs_info->ref_cache_lock);
+
+	init_completion(&fs_info->kobj_unregister);
+	fs_info->tree_root = tree_root;
+	fs_info->extent_root = extent_root;
+	fs_info->csum_root = csum_root;
+	fs_info->chunk_root = chunk_root;
+	fs_info->dev_root = dev_root;
+	fs_info->fs_devices = fs_devices;
+	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
+	INIT_LIST_HEAD(&fs_info->space_info);
+	btrfs_mapping_init(&fs_info->mapping_tree);
+	atomic_set(&fs_info->nr_async_submits, 0);
+	atomic_set(&fs_info->async_delalloc_pages, 0);
+	atomic_set(&fs_info->async_submit_draining, 0);
+	atomic_set(&fs_info->nr_async_bios, 0);
+	atomic_set(&fs_info->throttles, 0);
+	atomic_set(&fs_info->throttle_gen, 0);
+	fs_info->sb = sb;
+	fs_info->max_extent = (u64)-1;
+	fs_info->max_inline = 8192 * 1024;
+	setup_bdi(fs_info, &fs_info->bdi);
+	fs_info->btree_inode = new_inode(sb);
+	fs_info->btree_inode->i_ino = 1;
+	fs_info->btree_inode->i_nlink = 1;
+
+	fs_info->thread_pool_size = min_t(unsigned long,
+					  num_online_cpus() + 2, 8);
+
+	INIT_LIST_HEAD(&fs_info->ordered_extents);
+	spin_lock_init(&fs_info->ordered_extent_lock);
+
+	sb->s_blocksize = 4096;
+	sb->s_blocksize_bits = blksize_bits(4096);
+
+	/*
+	 * we set the i_size on the btree inode to the max possible int.
+	 * the real end of the address space is determined by all of
+	 * the devices in the system
+	 */
+	fs_info->btree_inode->i_size = OFFSET_MAX;
+	fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
+	fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
+
+	extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
+			     fs_info->btree_inode->i_mapping,
+			     GFP_NOFS);
+	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
+			     GFP_NOFS);
+
+	BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
+
+	spin_lock_init(&fs_info->block_group_cache_lock);
+	fs_info->block_group_cache_tree.rb_node = NULL;
+
+	extent_io_tree_init(&fs_info->pinned_extents,
+			     fs_info->btree_inode->i_mapping, GFP_NOFS);
+	extent_io_tree_init(&fs_info->pending_del,
+			     fs_info->btree_inode->i_mapping, GFP_NOFS);
+	extent_io_tree_init(&fs_info->extent_ins,
+			     fs_info->btree_inode->i_mapping, GFP_NOFS);
+	fs_info->do_barriers = 1;
+
+	INIT_LIST_HEAD(&fs_info->dead_reloc_roots);
+	btrfs_leaf_ref_tree_init(&fs_info->reloc_ref_tree);
+	btrfs_leaf_ref_tree_init(&fs_info->shared_ref_tree);
+
+	BTRFS_I(fs_info->btree_inode)->root = tree_root;
+	memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
+	       sizeof(struct btrfs_key));
+	insert_inode_hash(fs_info->btree_inode);
+
+	mutex_init(&fs_info->trans_mutex);
+	mutex_init(&fs_info->tree_log_mutex);
+	mutex_init(&fs_info->drop_mutex);
+	mutex_init(&fs_info->extent_ins_mutex);
+	mutex_init(&fs_info->pinned_mutex);
+	mutex_init(&fs_info->chunk_mutex);
+	mutex_init(&fs_info->transaction_kthread_mutex);
+	mutex_init(&fs_info->cleaner_mutex);
+	mutex_init(&fs_info->volume_mutex);
+	mutex_init(&fs_info->tree_reloc_mutex);
+	init_waitqueue_head(&fs_info->transaction_throttle);
+	init_waitqueue_head(&fs_info->transaction_wait);
+	init_waitqueue_head(&fs_info->async_submit_wait);
+	init_waitqueue_head(&fs_info->tree_log_wait);
+	atomic_set(&fs_info->tree_log_commit, 0);
+	atomic_set(&fs_info->tree_log_writers, 0);
+	fs_info->tree_log_transid = 0;
+
+	__setup_root(4096, 4096, 4096, 4096, tree_root,
+		     fs_info, BTRFS_ROOT_TREE_OBJECTID);
+
+
+	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
+	if (!bh)
+		goto fail_iput;
+
+	memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
+	memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
+	       sizeof(fs_info->super_for_commit));
+	brelse(bh);
+
+	memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
+
+	disk_super = &fs_info->super_copy;
+	if (!btrfs_super_root(disk_super))
+		goto fail_iput;
+
+	ret = btrfs_parse_options(tree_root, options);
+	if (ret) {
+		err = ret;
+		goto fail_iput;
+	}
+
+	features = btrfs_super_incompat_flags(disk_super) &
+		~BTRFS_FEATURE_INCOMPAT_SUPP;
+	if (features) {
+		printk(KERN_ERR "BTRFS: couldn't mount because of "
+		       "unsupported optional features (%Lx).\n",
+		       features);
+		err = -EINVAL;
+		goto fail_iput;
+	}
+
+	features = btrfs_super_compat_ro_flags(disk_super) &
+		~BTRFS_FEATURE_COMPAT_RO_SUPP;
+	if (!(sb->s_flags & MS_RDONLY) && features) {
+		printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
+		       "unsupported option features (%Lx).\n",
+		       features);
+		err = -EINVAL;
+		goto fail_iput;
+	}
+
+	/*
+	 * we need to start all the end_io workers up front because the
+	 * queue work function gets called at interrupt time, and so it
+	 * cannot dynamically grow.
+	 */
+	btrfs_init_workers(&fs_info->workers, "worker",
+			   fs_info->thread_pool_size);
+
+	btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
+			   fs_info->thread_pool_size);
+
+	btrfs_init_workers(&fs_info->submit_workers, "submit",
+			   min_t(u64, fs_devices->num_devices,
+			   fs_info->thread_pool_size));
+
+	/* a higher idle thresh on the submit workers makes it much more
+	 * likely that bios will be send down in a sane order to the
+	 * devices
+	 */
+	fs_info->submit_workers.idle_thresh = 64;
+
+	fs_info->workers.idle_thresh = 16;
+	fs_info->workers.ordered = 1;
+
+	fs_info->delalloc_workers.idle_thresh = 2;
+	fs_info->delalloc_workers.ordered = 1;
+
+	btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
+	btrfs_init_workers(&fs_info->endio_workers, "endio",
+			   fs_info->thread_pool_size);
+	btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
+			   fs_info->thread_pool_size);
+	btrfs_init_workers(&fs_info->endio_meta_write_workers,
+			   "endio-meta-write", fs_info->thread_pool_size);
+	btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
+			   fs_info->thread_pool_size);
+
+	/*
+	 * endios are largely parallel and should have a very
+	 * low idle thresh
+	 */
+	fs_info->endio_workers.idle_thresh = 4;
+	fs_info->endio_write_workers.idle_thresh = 64;
+	fs_info->endio_meta_write_workers.idle_thresh = 64;
+
+	btrfs_start_workers(&fs_info->workers, 1);
+	btrfs_start_workers(&fs_info->submit_workers, 1);
+	btrfs_start_workers(&fs_info->delalloc_workers, 1);
+	btrfs_start_workers(&fs_info->fixup_workers, 1);
+	btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
+	btrfs_start_workers(&fs_info->endio_meta_workers,
+			    fs_info->thread_pool_size);
+	btrfs_start_workers(&fs_info->endio_meta_write_workers,
+			    fs_info->thread_pool_size);
+	btrfs_start_workers(&fs_info->endio_write_workers,
+			    fs_info->thread_pool_size);
+
+	fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
+	fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
+				    4 * 1024 * 1024 / PAGE_CACHE_SIZE);
+
+	nodesize = btrfs_super_nodesize(disk_super);
+	leafsize = btrfs_super_leafsize(disk_super);
+	sectorsize = btrfs_super_sectorsize(disk_super);
+	stripesize = btrfs_super_stripesize(disk_super);
+	tree_root->nodesize = nodesize;
+	tree_root->leafsize = leafsize;
+	tree_root->sectorsize = sectorsize;
+	tree_root->stripesize = stripesize;
+
+	sb->s_blocksize = sectorsize;
+	sb->s_blocksize_bits = blksize_bits(sectorsize);
+
+	if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
+		    sizeof(disk_super->magic))) {
+		printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
+		goto fail_sb_buffer;
+	}
+
+	mutex_lock(&fs_info->chunk_mutex);
+	ret = btrfs_read_sys_array(tree_root);
+	mutex_unlock(&fs_info->chunk_mutex);
+	if (ret) {
+		printk(KERN_WARNING "btrfs: failed to read the system "
+		       "array on %s\n", sb->s_id);
+		goto fail_sys_array;
+	}
+
+	blocksize = btrfs_level_size(tree_root,
+				     btrfs_super_chunk_root_level(disk_super));
+	generation = btrfs_super_chunk_root_generation(disk_super);
+
+	__setup_root(nodesize, leafsize, sectorsize, stripesize,
+		     chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
+
+	chunk_root->node = read_tree_block(chunk_root,
+					   btrfs_super_chunk_root(disk_super),
+					   blocksize, generation);
+	BUG_ON(!chunk_root->node);
+
+	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
+	   (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
+	   BTRFS_UUID_SIZE);
+
+	mutex_lock(&fs_info->chunk_mutex);
+	ret = btrfs_read_chunk_tree(chunk_root);
+	mutex_unlock(&fs_info->chunk_mutex);
+	if (ret) {
+		printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
+		       sb->s_id);
+		goto fail_chunk_root;
+	}
+
+	btrfs_close_extra_devices(fs_devices);
+
+	blocksize = btrfs_level_size(tree_root,
+				     btrfs_super_root_level(disk_super));
+	generation = btrfs_super_generation(disk_super);
+
+	tree_root->node = read_tree_block(tree_root,
+					  btrfs_super_root(disk_super),
+					  blocksize, generation);
+	if (!tree_root->node)
+		goto fail_chunk_root;
+
+
+	ret = find_and_setup_root(tree_root, fs_info,
+				  BTRFS_EXTENT_TREE_OBJECTID, extent_root);
+	if (ret)
+		goto fail_tree_root;
+	extent_root->track_dirty = 1;
+
+	ret = find_and_setup_root(tree_root, fs_info,
+				  BTRFS_DEV_TREE_OBJECTID, dev_root);
+	dev_root->track_dirty = 1;
+
+	if (ret)
+		goto fail_extent_root;
+
+	ret = find_and_setup_root(tree_root, fs_info,
+				  BTRFS_CSUM_TREE_OBJECTID, csum_root);
+	if (ret)
+		goto fail_extent_root;
+
+	csum_root->track_dirty = 1;
+
+	btrfs_read_block_groups(extent_root);
+
+	fs_info->generation = generation;
+	fs_info->last_trans_committed = generation;
+	fs_info->data_alloc_profile = (u64)-1;
+	fs_info->metadata_alloc_profile = (u64)-1;
+	fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
+	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
+					       "btrfs-cleaner");
+	if (!fs_info->cleaner_kthread)
+		goto fail_csum_root;
+
+	fs_info->transaction_kthread = kthread_run(transaction_kthread,
+						   tree_root,
+						   "btrfs-transaction");
+	if (!fs_info->transaction_kthread)
+		goto fail_cleaner;
+
+	if (btrfs_super_log_root(disk_super) != 0) {
+		u64 bytenr = btrfs_super_log_root(disk_super);
+
+		if (fs_devices->rw_devices == 0) {
+			printk(KERN_WARNING "Btrfs log replay required "
+			       "on RO media\n");
+			err = -EIO;
+			goto fail_trans_kthread;
+		}
+		blocksize =
+		     btrfs_level_size(tree_root,
+				      btrfs_super_log_root_level(disk_super));
+
+		log_tree_root = kzalloc(sizeof(struct btrfs_root),
+						      GFP_NOFS);
+
+		__setup_root(nodesize, leafsize, sectorsize, stripesize,
+			     log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
+
+		log_tree_root->node = read_tree_block(tree_root, bytenr,
+						      blocksize,
+						      generation + 1);
+		ret = btrfs_recover_log_trees(log_tree_root);
+		BUG_ON(ret);
+
+		if (sb->s_flags & MS_RDONLY) {
+			ret =  btrfs_commit_super(tree_root);
+			BUG_ON(ret);
+		}
+	}
+
+	if (!(sb->s_flags & MS_RDONLY)) {
+		ret = btrfs_cleanup_reloc_trees(tree_root);
+		BUG_ON(ret);
+	}
+
+	location.objectid = BTRFS_FS_TREE_OBJECTID;
+	location.type = BTRFS_ROOT_ITEM_KEY;
+	location.offset = (u64)-1;
+
+	fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
+	if (!fs_info->fs_root)
+		goto fail_trans_kthread;
+	return tree_root;
+
+fail_trans_kthread:
+	kthread_stop(fs_info->transaction_kthread);
+fail_cleaner:
+	kthread_stop(fs_info->cleaner_kthread);
+
+	/*
+	 * make sure we're done with the btree inode before we stop our
+	 * kthreads
+	 */
+	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
+	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
+
+fail_csum_root:
+	free_extent_buffer(csum_root->node);
+fail_extent_root:
+	free_extent_buffer(extent_root->node);
+fail_tree_root:
+	free_extent_buffer(tree_root->node);
+fail_chunk_root:
+	free_extent_buffer(chunk_root->node);
+fail_sys_array:
+	free_extent_buffer(dev_root->node);
+fail_sb_buffer:
+	btrfs_stop_workers(&fs_info->fixup_workers);
+	btrfs_stop_workers(&fs_info->delalloc_workers);
+	btrfs_stop_workers(&fs_info->workers);
+	btrfs_stop_workers(&fs_info->endio_workers);
+	btrfs_stop_workers(&fs_info->endio_meta_workers);
+	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
+	btrfs_stop_workers(&fs_info->endio_write_workers);
+	btrfs_stop_workers(&fs_info->submit_workers);
+fail_iput:
+	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
+	iput(fs_info->btree_inode);
+fail:
+	btrfs_close_devices(fs_info->fs_devices);
+	btrfs_mapping_tree_free(&fs_info->mapping_tree);
+
+	kfree(extent_root);
+	kfree(tree_root);
+	bdi_destroy(&fs_info->bdi);
+	kfree(fs_info);
+	kfree(chunk_root);
+	kfree(dev_root);
+	kfree(csum_root);
+	return ERR_PTR(err);
+}
+
+static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
+{
+	char b[BDEVNAME_SIZE];
+
+	if (uptodate) {
+		set_buffer_uptodate(bh);
+	} else {
+		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
+			printk(KERN_WARNING "lost page write due to "
+					"I/O error on %s\n",
+				       bdevname(bh->b_bdev, b));
+		}
+		/* note, we dont' set_buffer_write_io_error because we have
+		 * our own ways of dealing with the IO errors
+		 */
+		clear_buffer_uptodate(bh);
+	}
+	unlock_buffer(bh);
+	put_bh(bh);
+}
+
+struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
+{
+	struct buffer_head *bh;
+	struct buffer_head *latest = NULL;
+	struct btrfs_super_block *super;
+	int i;
+	u64 transid = 0;
+	u64 bytenr;
+
+	/* we would like to check all the supers, but that would make
+	 * a btrfs mount succeed after a mkfs from a different FS.
+	 * So, we need to add a special mount option to scan for
+	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
+	 */
+	for (i = 0; i < 1; i++) {
+		bytenr = btrfs_sb_offset(i);
+		if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
+			break;
+		bh = __bread(bdev, bytenr / 4096, 4096);
+		if (!bh)
+			continue;
+
+		super = (struct btrfs_super_block *)bh->b_data;
+		if (btrfs_super_bytenr(super) != bytenr ||
+		    strncmp((char *)(&super->magic), BTRFS_MAGIC,
+			    sizeof(super->magic))) {
+			brelse(bh);
+			continue;
+		}
+
+		if (!latest || btrfs_super_generation(super) > transid) {
+			brelse(latest);
+			latest = bh;
+			transid = btrfs_super_generation(super);
+		} else {
+			brelse(bh);
+		}
+	}
+	return latest;
+}
+
+static int write_dev_supers(struct btrfs_device *device,
+			    struct btrfs_super_block *sb,
+			    int do_barriers, int wait, int max_mirrors)
+{
+	struct buffer_head *bh;
+	int i;
+	int ret;
+	int errors = 0;
+	u32 crc;
+	u64 bytenr;
+	int last_barrier = 0;
+
+	if (max_mirrors == 0)
+		max_mirrors = BTRFS_SUPER_MIRROR_MAX;
+
+	/* make sure only the last submit_bh does a barrier */
+	if (do_barriers) {
+		for (i = 0; i < max_mirrors; i++) {
+			bytenr = btrfs_sb_offset(i);
+			if (bytenr + BTRFS_SUPER_INFO_SIZE >=
+			    device->total_bytes)
+				break;
+			last_barrier = i;
+		}
+	}
+
+	for (i = 0; i < max_mirrors; i++) {
+		bytenr = btrfs_sb_offset(i);
+		if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
+			break;
+
+		if (wait) {
+			bh = __find_get_block(device->bdev, bytenr / 4096,
+					      BTRFS_SUPER_INFO_SIZE);
+			BUG_ON(!bh);
+			brelse(bh);
+			wait_on_buffer(bh);
+			if (buffer_uptodate(bh)) {
+				brelse(bh);
+				continue;
+			}
+		} else {
+			btrfs_set_super_bytenr(sb, bytenr);
+
+			crc = ~(u32)0;
+			crc = btrfs_csum_data(NULL, (char *)sb +
+					      BTRFS_CSUM_SIZE, crc,
+					      BTRFS_SUPER_INFO_SIZE -
+					      BTRFS_CSUM_SIZE);
+			btrfs_csum_final(crc, sb->csum);
+
+			bh = __getblk(device->bdev, bytenr / 4096,
+				      BTRFS_SUPER_INFO_SIZE);
+			memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
+
+			set_buffer_uptodate(bh);
+			get_bh(bh);
+			lock_buffer(bh);
+			bh->b_end_io = btrfs_end_buffer_write_sync;
+		}
+
+		if (i == last_barrier && do_barriers && device->barriers) {
+			ret = submit_bh(WRITE_BARRIER, bh);
+			if (ret == -EOPNOTSUPP) {
+				printk("btrfs: disabling barriers on dev %s\n",
+				       device->name);
+				set_buffer_uptodate(bh);
+				device->barriers = 0;
+				get_bh(bh);
+				lock_buffer(bh);
+				ret = submit_bh(WRITE, bh);
+			}
+		} else {
+			ret = submit_bh(WRITE, bh);
+		}
+
+		if (!ret && wait) {
+			wait_on_buffer(bh);
+			if (!buffer_uptodate(bh))
+				errors++;
+		} else if (ret) {
+			errors++;
+		}
+		if (wait)
+			brelse(bh);
+	}
+	return errors < i ? 0 : -1;
+}
+
+int write_all_supers(struct btrfs_root *root, int max_mirrors)
+{
+	struct list_head *cur;
+	struct list_head *head = &root->fs_info->fs_devices->devices;
+	struct btrfs_device *dev;
+	struct btrfs_super_block *sb;
+	struct btrfs_dev_item *dev_item;
+	int ret;
+	int do_barriers;
+	int max_errors;
+	int total_errors = 0;
+	u64 flags;
+
+	max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
+	do_barriers = !btrfs_test_opt(root, NOBARRIER);
+
+	sb = &root->fs_info->super_for_commit;
+	dev_item = &sb->dev_item;
+	list_for_each(cur, head) {
+		dev = list_entry(cur, struct btrfs_device, dev_list);
+		if (!dev->bdev) {
+			total_errors++;
+			continue;
+		}
+		if (!dev->in_fs_metadata || !dev->writeable)
+			continue;
+
+		btrfs_set_stack_device_generation(dev_item, 0);
+		btrfs_set_stack_device_type(dev_item, dev->type);
+		btrfs_set_stack_device_id(dev_item, dev->devid);
+		btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
+		btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
+		btrfs_set_stack_device_io_align(dev_item, dev->io_align);
+		btrfs_set_stack_device_io_width(dev_item, dev->io_width);
+		btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
+		memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
+		memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
+
+		flags = btrfs_super_flags(sb);
+		btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
+
+		ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
+		if (ret)
+			total_errors++;
+	}
+	if (total_errors > max_errors) {
+		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
+		       total_errors);
+		BUG();
+	}
+
+	total_errors = 0;
+	list_for_each(cur, head) {
+		dev = list_entry(cur, struct btrfs_device, dev_list);
+		if (!dev->bdev)
+			continue;
+		if (!dev->in_fs_metadata || !dev->writeable)
+			continue;
+
+		ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
+		if (ret)
+			total_errors++;
+	}
+	if (total_errors > max_errors) {
+		printk(KERN_ERR "btrfs: %d errors while writing supers\n",
+		       total_errors);
+		BUG();
+	}
+	return 0;
+}
+
+int write_ctree_super(struct btrfs_trans_handle *trans,
+		      struct btrfs_root *root, int max_mirrors)
+{
+	int ret;
+
+	ret = write_all_supers(root, max_mirrors);
+	return ret;
+}
+
+int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
+{
+	radix_tree_delete(&fs_info->fs_roots_radix,
+			  (unsigned long)root->root_key.objectid);
+	if (root->anon_super.s_dev) {
+		down_write(&root->anon_super.s_umount);
+		kill_anon_super(&root->anon_super);
+	}
+	if (root->node)
+		free_extent_buffer(root->node);
+	if (root->commit_root)
+		free_extent_buffer(root->commit_root);
+	kfree(root->name);
+	kfree(root);
+	return 0;
+}
+
+static int del_fs_roots(struct btrfs_fs_info *fs_info)
+{
+	int ret;
+	struct btrfs_root *gang[8];
+	int i;
+
+	while (1) {
+		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+					     (void **)gang, 0,
+					     ARRAY_SIZE(gang));
+		if (!ret)
+			break;
+		for (i = 0; i < ret; i++)
+			btrfs_free_fs_root(fs_info, gang[i]);
+	}
+	return 0;
+}
+
+int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
+{
+	u64 root_objectid = 0;
+	struct btrfs_root *gang[8];
+	int i;
+	int ret;
+
+	while (1) {
+		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+					     (void **)gang, root_objectid,
+					     ARRAY_SIZE(gang));
+		if (!ret)
+			break;
+		for (i = 0; i < ret; i++) {
+			root_objectid = gang[i]->root_key.objectid;
+			ret = btrfs_find_dead_roots(fs_info->tree_root,
+						    root_objectid, gang[i]);
+			BUG_ON(ret);
+			btrfs_orphan_cleanup(gang[i]);
+		}
+		root_objectid++;
+	}
+	return 0;
+}
+
+int btrfs_commit_super(struct btrfs_root *root)
+{
+	struct btrfs_trans_handle *trans;
+	int ret;
+
+	mutex_lock(&root->fs_info->cleaner_mutex);
+	btrfs_clean_old_snapshots(root);
+	mutex_unlock(&root->fs_info->cleaner_mutex);
+	trans = btrfs_start_transaction(root, 1);
+	ret = btrfs_commit_transaction(trans, root);
+	BUG_ON(ret);
+	/* run commit again to drop the original snapshot */
+	trans = btrfs_start_transaction(root, 1);
+	btrfs_commit_transaction(trans, root);
+	ret = btrfs_write_and_wait_transaction(NULL, root);
+	BUG_ON(ret);
+
+	ret = write_ctree_super(NULL, root, 0);
+	return ret;
+}
+
+int close_ctree(struct btrfs_root *root)
+{
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	int ret;
+
+	fs_info->closing = 1;
+	smp_mb();
+
+	kthread_stop(root->fs_info->transaction_kthread);
+	kthread_stop(root->fs_info->cleaner_kthread);
+
+	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
+		ret =  btrfs_commit_super(root);
+		if (ret)
+			printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
+	}
+
+	if (fs_info->delalloc_bytes) {
+		printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
+		       fs_info->delalloc_bytes);
+	}
+	if (fs_info->total_ref_cache_size) {
+		printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
+		       (unsigned long long)fs_info->total_ref_cache_size);
+	}
+
+	if (fs_info->extent_root->node)
+		free_extent_buffer(fs_info->extent_root->node);
+
+	if (fs_info->tree_root->node)
+		free_extent_buffer(fs_info->tree_root->node);
+
+	if (root->fs_info->chunk_root->node)
+		free_extent_buffer(root->fs_info->chunk_root->node);
+
+	if (root->fs_info->dev_root->node)
+		free_extent_buffer(root->fs_info->dev_root->node);
+
+	if (root->fs_info->csum_root->node)
+		free_extent_buffer(root->fs_info->csum_root->node);
+
+	btrfs_free_block_groups(root->fs_info);
+
+	del_fs_roots(fs_info);
+
+	iput(fs_info->btree_inode);
+
+	btrfs_stop_workers(&fs_info->fixup_workers);
+	btrfs_stop_workers(&fs_info->delalloc_workers);
+	btrfs_stop_workers(&fs_info->workers);
+	btrfs_stop_workers(&fs_info->endio_workers);
+	btrfs_stop_workers(&fs_info->endio_meta_workers);
+	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
+	btrfs_stop_workers(&fs_info->endio_write_workers);
+	btrfs_stop_workers(&fs_info->submit_workers);
+
+#if 0
+	while (!list_empty(&fs_info->hashers)) {
+		struct btrfs_hasher *hasher;
+		hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
+				    hashers);
+		list_del(&hasher->hashers);
+		crypto_free_hash(&fs_info->hash_tfm);
+		kfree(hasher);
+	}
+#endif
+	btrfs_close_devices(fs_info->fs_devices);
+	btrfs_mapping_tree_free(&fs_info->mapping_tree);
+
+	bdi_destroy(&fs_info->bdi);
+
+	kfree(fs_info->extent_root);
+	kfree(fs_info->tree_root);
+	kfree(fs_info->chunk_root);
+	kfree(fs_info->dev_root);
+	kfree(fs_info->csum_root);
+	return 0;
+}
+
+int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
+{
+	int ret;
+	struct inode *btree_inode = buf->first_page->mapping->host;
+
+	ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
+	if (!ret)
+		return ret;
+
+	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
+				    parent_transid);
+	return !ret;
+}
+
+int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
+{
+	struct inode *btree_inode = buf->first_page->mapping->host;
+	return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
+					  buf);
+}
+
+void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
+{
+	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+	u64 transid = btrfs_header_generation(buf);
+	struct inode *btree_inode = root->fs_info->btree_inode;
+
+	WARN_ON(!btrfs_tree_locked(buf));
+	if (transid != root->fs_info->generation) {
+		printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
+		       "found %llu running %llu\n",
+			(unsigned long long)buf->start,
+			(unsigned long long)transid,
+			(unsigned long long)root->fs_info->generation);
+		WARN_ON(1);
+	}
+	set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
+}
+
+void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
+{
+	/*
+	 * looks as though older kernels can get into trouble with
+	 * this code, they end up stuck in balance_dirty_pages forever
+	 */
+	struct extent_io_tree *tree;
+	u64 num_dirty;
+	u64 start = 0;
+	unsigned long thresh = 32 * 1024 * 1024;
+	tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
+
+	if (current_is_pdflush() || current->flags & PF_MEMALLOC)
+		return;
+
+	num_dirty = count_range_bits(tree, &start, (u64)-1,
+				     thresh, EXTENT_DIRTY);
+	if (num_dirty > thresh) {
+		balance_dirty_pages_ratelimited_nr(
+				   root->fs_info->btree_inode->i_mapping, 1);
+	}
+	return;
+}
+
+int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
+{
+	struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
+	int ret;
+	ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
+	if (ret == 0)
+		buf->flags |= EXTENT_UPTODATE;
+	return ret;
+}
+
+int btree_lock_page_hook(struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	struct extent_buffer *eb;
+	unsigned long len;
+	u64 bytenr = page_offset(page);
+
+	if (page->private == EXTENT_PAGE_PRIVATE)
+		goto out;
+
+	len = page->private >> 2;
+	eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
+	if (!eb)
+		goto out;
+
+	btrfs_tree_lock(eb);
+	spin_lock(&root->fs_info->hash_lock);
+	btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
+	spin_unlock(&root->fs_info->hash_lock);
+	btrfs_tree_unlock(eb);
+	free_extent_buffer(eb);
+out:
+	lock_page(page);
+	return 0;
+}
+
+static struct extent_io_ops btree_extent_io_ops = {
+	.write_cache_pages_lock_hook = btree_lock_page_hook,
+	.readpage_end_io_hook = btree_readpage_end_io_hook,
+	.submit_bio_hook = btree_submit_bio_hook,
+	/* note we're sharing with inode.c for the merge bio hook */
+	.merge_bio_hook = btrfs_merge_bio_hook,
+};
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
new file mode 100644
index 0000000..c0ff404
--- /dev/null
+++ b/fs/btrfs/disk-io.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __DISKIO__
+#define __DISKIO__
+
+#define BTRFS_SUPER_INFO_OFFSET (64 * 1024)
+#define BTRFS_SUPER_INFO_SIZE 4096
+
+#define BTRFS_SUPER_MIRROR_MAX	 3
+#define BTRFS_SUPER_MIRROR_SHIFT 12
+
+static inline u64 btrfs_sb_offset(int mirror)
+{
+	u64 start = 16 * 1024;
+	if (mirror)
+		return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror);
+	return BTRFS_SUPER_INFO_OFFSET;
+}
+
+struct btrfs_device;
+struct btrfs_fs_devices;
+
+struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
+				      u32 blocksize, u64 parent_transid);
+int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
+			 u64 parent_transid);
+struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
+						   u64 bytenr, u32 blocksize);
+int clean_tree_block(struct btrfs_trans_handle *trans,
+		     struct btrfs_root *root, struct extent_buffer *buf);
+struct btrfs_root *open_ctree(struct super_block *sb,
+			      struct btrfs_fs_devices *fs_devices,
+			      char *options);
+int close_ctree(struct btrfs_root *root);
+int write_ctree_super(struct btrfs_trans_handle *trans,
+		      struct btrfs_root *root, int max_mirrors);
+struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
+int btrfs_commit_super(struct btrfs_root *root);
+struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
+					    u64 bytenr, u32 blocksize);
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+					u64 root_objectid);
+struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
+				      struct btrfs_key *location,
+				      const char *name, int namelen);
+struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
+					       struct btrfs_key *location);
+struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
+					      struct btrfs_key *location);
+int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
+int btrfs_insert_dev_radix(struct btrfs_root *root,
+			   struct block_device *bdev,
+			   u64 device_id,
+			   u64 block_start,
+			   u64 num_blocks);
+void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
+int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
+void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
+int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
+int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
+int wait_on_tree_block_writeback(struct btrfs_root *root,
+				 struct extent_buffer *buf);
+int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
+u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
+void btrfs_csum_final(u32 crc, char *result);
+int btrfs_open_device(struct btrfs_device *dev);
+int btrfs_verify_block_csum(struct btrfs_root *root,
+			    struct extent_buffer *buf);
+int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
+			int metadata);
+int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
+			int rw, struct bio *bio, int mirror_num,
+			unsigned long bio_flags,
+			extent_submit_bio_hook_t *submit_bio_start,
+			extent_submit_bio_hook_t *submit_bio_done);
+
+int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
+unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
+int btrfs_write_tree_block(struct extent_buffer *buf);
+int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
+int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
+			     struct btrfs_fs_info *fs_info);
+int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
+			     struct btrfs_fs_info *fs_info);
+int btree_lock_page_hook(struct page *page);
+#endif
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
new file mode 100644
index 0000000..85315d2
--- /dev/null
+++ b/fs/btrfs/export.c
@@ -0,0 +1,203 @@
+#include <linux/fs.h>
+#include <linux/types.h>
+#include "ctree.h"
+#include "disk-io.h"
+#include "btrfs_inode.h"
+#include "print-tree.h"
+#include "export.h"
+#include "compat.h"
+
+#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, \
+						 parent_objectid) / 4)
+#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, \
+					     parent_root_objectid) / 4)
+#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4)
+
+static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
+			   int connectable)
+{
+	struct btrfs_fid *fid = (struct btrfs_fid *)fh;
+	struct inode *inode = dentry->d_inode;
+	int len = *max_len;
+	int type;
+
+	if ((len < BTRFS_FID_SIZE_NON_CONNECTABLE) ||
+	    (connectable && len < BTRFS_FID_SIZE_CONNECTABLE))
+		return 255;
+
+	len  = BTRFS_FID_SIZE_NON_CONNECTABLE;
+	type = FILEID_BTRFS_WITHOUT_PARENT;
+
+	fid->objectid = BTRFS_I(inode)->location.objectid;
+	fid->root_objectid = BTRFS_I(inode)->root->objectid;
+	fid->gen = inode->i_generation;
+
+	if (connectable && !S_ISDIR(inode->i_mode)) {
+		struct inode *parent;
+		u64 parent_root_id;
+
+		spin_lock(&dentry->d_lock);
+
+		parent = dentry->d_parent->d_inode;
+		fid->parent_objectid = BTRFS_I(parent)->location.objectid;
+		fid->parent_gen = parent->i_generation;
+		parent_root_id = BTRFS_I(parent)->root->objectid;
+
+		spin_unlock(&dentry->d_lock);
+
+		if (parent_root_id != fid->root_objectid) {
+			fid->parent_root_objectid = parent_root_id;
+			len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
+			type = FILEID_BTRFS_WITH_PARENT_ROOT;
+		} else {
+			len = BTRFS_FID_SIZE_CONNECTABLE;
+			type = FILEID_BTRFS_WITH_PARENT;
+		}
+	}
+
+	*max_len = len;
+	return type;
+}
+
+static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
+				       u64 root_objectid, u32 generation)
+{
+	struct btrfs_root *root;
+	struct inode *inode;
+	struct btrfs_key key;
+
+	key.objectid = root_objectid;
+	btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+	key.offset = (u64)-1;
+
+	root = btrfs_read_fs_root_no_name(btrfs_sb(sb)->fs_info, &key);
+	if (IS_ERR(root))
+		return ERR_CAST(root);
+
+	key.objectid = objectid;
+	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+	key.offset = 0;
+
+	inode = btrfs_iget(sb, &key, root, NULL);
+	if (IS_ERR(inode))
+		return (void *)inode;
+
+	if (generation != inode->i_generation) {
+		iput(inode);
+		return ERR_PTR(-ESTALE);
+	}
+
+	return d_obtain_alias(inode);
+}
+
+static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
+					 int fh_len, int fh_type)
+{
+	struct btrfs_fid *fid = (struct btrfs_fid *) fh;
+	u64 objectid, root_objectid;
+	u32 generation;
+
+	if (fh_type == FILEID_BTRFS_WITH_PARENT) {
+		if (fh_len !=  BTRFS_FID_SIZE_CONNECTABLE)
+			return NULL;
+		root_objectid = fid->root_objectid;
+	} else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) {
+		if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT)
+			return NULL;
+		root_objectid = fid->parent_root_objectid;
+	} else
+		return NULL;
+
+	objectid = fid->parent_objectid;
+	generation = fid->parent_gen;
+
+	return btrfs_get_dentry(sb, objectid, root_objectid, generation);
+}
+
+static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
+					 int fh_len, int fh_type)
+{
+	struct btrfs_fid *fid = (struct btrfs_fid *) fh;
+	u64 objectid, root_objectid;
+	u32 generation;
+
+	if ((fh_type != FILEID_BTRFS_WITH_PARENT ||
+	     fh_len != BTRFS_FID_SIZE_CONNECTABLE) &&
+	    (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT ||
+	     fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) &&
+	    (fh_type != FILEID_BTRFS_WITHOUT_PARENT ||
+	     fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE))
+		return NULL;
+
+	objectid = fid->objectid;
+	root_objectid = fid->root_objectid;
+	generation = fid->gen;
+
+	return btrfs_get_dentry(sb, objectid, root_objectid, generation);
+}
+
+static struct dentry *btrfs_get_parent(struct dentry *child)
+{
+	struct inode *dir = child->d_inode;
+	struct btrfs_root *root = BTRFS_I(dir)->root;
+	struct btrfs_key key;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	int slot;
+	u64 objectid;
+	int ret;
+
+	path = btrfs_alloc_path();
+
+	key.objectid = dir->i_ino;
+	btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
+	key.offset = (u64)-1;
+
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0) {
+		/* Error */
+		btrfs_free_path(path);
+		return ERR_PTR(ret);
+	}
+	leaf = path->nodes[0];
+	slot = path->slots[0];
+	if (ret) {
+		/* btrfs_search_slot() returns the slot where we'd want to
+		   insert a backref for parent inode #0xFFFFFFFFFFFFFFFF.
+		   The _real_ backref, telling us what the parent inode
+		   _actually_ is, will be in the slot _before_ the one
+		   that btrfs_search_slot() returns. */
+		if (!slot) {
+			/* Unless there is _no_ key in the tree before... */
+			btrfs_free_path(path);
+			return ERR_PTR(-EIO);
+		}
+		slot--;
+	}
+
+	btrfs_item_key_to_cpu(leaf, &key, slot);
+	btrfs_free_path(path);
+
+	if (key.objectid != dir->i_ino || key.type != BTRFS_INODE_REF_KEY)
+		return ERR_PTR(-EINVAL);
+
+	objectid = key.offset;
+
+	/* If we are already at the root of a subvol, return the real root */
+	if (objectid == dir->i_ino)
+		return dget(dir->i_sb->s_root);
+
+	/* Build a new key for the inode item */
+	key.objectid = objectid;
+	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+	key.offset = 0;
+
+	return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
+}
+
+const struct export_operations btrfs_export_ops = {
+	.encode_fh	= btrfs_encode_fh,
+	.fh_to_dentry	= btrfs_fh_to_dentry,
+	.fh_to_parent	= btrfs_fh_to_parent,
+	.get_parent	= btrfs_get_parent,
+};
diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
new file mode 100644
index 0000000..074348a
--- /dev/null
+++ b/fs/btrfs/export.h
@@ -0,0 +1,19 @@
+#ifndef BTRFS_EXPORT_H
+#define BTRFS_EXPORT_H
+
+#include <linux/exportfs.h>
+
+extern const struct export_operations btrfs_export_ops;
+
+struct btrfs_fid {
+	u64 objectid;
+	u64 root_objectid;
+	u32 gen;
+
+	u64 parent_objectid;
+	u32 parent_gen;
+
+	u64 parent_root_objectid;
+} __attribute__ ((packed));
+
+#endif
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
new file mode 100644
index 0000000..293da65
--- /dev/null
+++ b/fs/btrfs/extent-tree.c
@@ -0,0 +1,5986 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+#include <linux/version.h>
+#include "compat.h"
+#include "hash.h"
+#include "crc32c.h"
+#include "ctree.h"
+#include "disk-io.h"
+#include "print-tree.h"
+#include "transaction.h"
+#include "volumes.h"
+#include "locking.h"
+#include "ref-cache.h"
+#include "compat.h"
+
+#define PENDING_EXTENT_INSERT 0
+#define PENDING_EXTENT_DELETE 1
+#define PENDING_BACKREF_UPDATE 2
+
+struct pending_extent_op {
+	int type;
+	u64 bytenr;
+	u64 num_bytes;
+	u64 parent;
+	u64 orig_parent;
+	u64 generation;
+	u64 orig_generation;
+	int level;
+	struct list_head list;
+	int del;
+};
+
+static int finish_current_insert(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *extent_root, int all);
+static int del_pending_extents(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *extent_root, int all);
+static int pin_down_bytes(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root,
+			  u64 bytenr, u64 num_bytes, int is_data);
+static int update_block_group(struct btrfs_trans_handle *trans,
+			      struct btrfs_root *root,
+			      u64 bytenr, u64 num_bytes, int alloc,
+			      int mark_free);
+
+static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
+{
+	return (cache->flags & bits) == bits;
+}
+
+/*
+ * this adds the block group to the fs_info rb tree for the block group
+ * cache
+ */
+static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
+				struct btrfs_block_group_cache *block_group)
+{
+	struct rb_node **p;
+	struct rb_node *parent = NULL;
+	struct btrfs_block_group_cache *cache;
+
+	spin_lock(&info->block_group_cache_lock);
+	p = &info->block_group_cache_tree.rb_node;
+
+	while (*p) {
+		parent = *p;
+		cache = rb_entry(parent, struct btrfs_block_group_cache,
+				 cache_node);
+		if (block_group->key.objectid < cache->key.objectid) {
+			p = &(*p)->rb_left;
+		} else if (block_group->key.objectid > cache->key.objectid) {
+			p = &(*p)->rb_right;
+		} else {
+			spin_unlock(&info->block_group_cache_lock);
+			return -EEXIST;
+		}
+	}
+
+	rb_link_node(&block_group->cache_node, parent, p);
+	rb_insert_color(&block_group->cache_node,
+			&info->block_group_cache_tree);
+	spin_unlock(&info->block_group_cache_lock);
+
+	return 0;
+}
+
+/*
+ * This will return the block group at or after bytenr if contains is 0, else
+ * it will return the block group that contains the bytenr
+ */
+static struct btrfs_block_group_cache *
+block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
+			      int contains)
+{
+	struct btrfs_block_group_cache *cache, *ret = NULL;
+	struct rb_node *n;
+	u64 end, start;
+
+	spin_lock(&info->block_group_cache_lock);
+	n = info->block_group_cache_tree.rb_node;
+
+	while (n) {
+		cache = rb_entry(n, struct btrfs_block_group_cache,
+				 cache_node);
+		end = cache->key.objectid + cache->key.offset - 1;
+		start = cache->key.objectid;
+
+		if (bytenr < start) {
+			if (!contains && (!ret || start < ret->key.objectid))
+				ret = cache;
+			n = n->rb_left;
+		} else if (bytenr > start) {
+			if (contains && bytenr <= end) {
+				ret = cache;
+				break;
+			}
+			n = n->rb_right;
+		} else {
+			ret = cache;
+			break;
+		}
+	}
+	if (ret)
+		atomic_inc(&ret->count);
+	spin_unlock(&info->block_group_cache_lock);
+
+	return ret;
+}
+
+/*
+ * this is only called by cache_block_group, since we could have freed extents
+ * we need to check the pinned_extents for any extents that can't be used yet
+ * since their free space will be released as soon as the transaction commits.
+ */
+static int add_new_free_space(struct btrfs_block_group_cache *block_group,
+			      struct btrfs_fs_info *info, u64 start, u64 end)
+{
+	u64 extent_start, extent_end, size;
+	int ret;
+
+	mutex_lock(&info->pinned_mutex);
+	while (start < end) {
+		ret = find_first_extent_bit(&info->pinned_extents, start,
+					    &extent_start, &extent_end,
+					    EXTENT_DIRTY);
+		if (ret)
+			break;
+
+		if (extent_start == start) {
+			start = extent_end + 1;
+		} else if (extent_start > start && extent_start < end) {
+			size = extent_start - start;
+			ret = btrfs_add_free_space(block_group, start,
+						   size);
+			BUG_ON(ret);
+			start = extent_end + 1;
+		} else {
+			break;
+		}
+	}
+
+	if (start < end) {
+		size = end - start;
+		ret = btrfs_add_free_space(block_group, start, size);
+		BUG_ON(ret);
+	}
+	mutex_unlock(&info->pinned_mutex);
+
+	return 0;
+}
+
+static int remove_sb_from_cache(struct btrfs_root *root,
+				struct btrfs_block_group_cache *cache)
+{
+	u64 bytenr;
+	u64 *logical;
+	int stripe_len;
+	int i, nr, ret;
+
+	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+		bytenr = btrfs_sb_offset(i);
+		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
+				       cache->key.objectid, bytenr, 0,
+				       &logical, &nr, &stripe_len);
+		BUG_ON(ret);
+		while (nr--) {
+			btrfs_remove_free_space(cache, logical[nr],
+						stripe_len);
+		}
+		kfree(logical);
+	}
+	return 0;
+}
+
+static int cache_block_group(struct btrfs_root *root,
+			     struct btrfs_block_group_cache *block_group)
+{
+	struct btrfs_path *path;
+	int ret = 0;
+	struct btrfs_key key;
+	struct extent_buffer *leaf;
+	int slot;
+	u64 last;
+
+	if (!block_group)
+		return 0;
+
+	root = root->fs_info->extent_root;
+
+	if (block_group->cached)
+		return 0;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	path->reada = 2;
+	/*
+	 * we get into deadlocks with paths held by callers of this function.
+	 * since the alloc_mutex is protecting things right now, just
+	 * skip the locking here
+	 */
+	path->skip_locking = 1;
+	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
+	key.objectid = last;
+	key.offset = 0;
+	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto err;
+
+	while (1) {
+		leaf = path->nodes[0];
+		slot = path->slots[0];
+		if (slot >= btrfs_header_nritems(leaf)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0)
+				goto err;
+			if (ret == 0)
+				continue;
+			else
+				break;
+		}
+		btrfs_item_key_to_cpu(leaf, &key, slot);
+		if (key.objectid < block_group->key.objectid)
+			goto next;
+
+		if (key.objectid >= block_group->key.objectid +
+		    block_group->key.offset)
+			break;
+
+		if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
+			add_new_free_space(block_group, root->fs_info, last,
+					   key.objectid);
+
+			last = key.objectid + key.offset;
+		}
+next:
+		path->slots[0]++;
+	}
+
+	add_new_free_space(block_group, root->fs_info, last,
+			   block_group->key.objectid +
+			   block_group->key.offset);
+
+	remove_sb_from_cache(root, block_group);
+	block_group->cached = 1;
+	ret = 0;
+err:
+	btrfs_free_path(path);
+	return ret;
+}
+
+/*
+ * return the block group that starts at or after bytenr
+ */
+static struct btrfs_block_group_cache *
+btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
+{
+	struct btrfs_block_group_cache *cache;
+
+	cache = block_group_cache_tree_search(info, bytenr, 0);
+
+	return cache;
+}
+
+/*
+ * return the block group that contains teh given bytenr
+ */
+struct btrfs_block_group_cache *btrfs_lookup_block_group(
+						 struct btrfs_fs_info *info,
+						 u64 bytenr)
+{
+	struct btrfs_block_group_cache *cache;
+
+	cache = block_group_cache_tree_search(info, bytenr, 1);
+
+	return cache;
+}
+
+static inline void put_block_group(struct btrfs_block_group_cache *cache)
+{
+	if (atomic_dec_and_test(&cache->count))
+		kfree(cache);
+}
+
+static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
+						  u64 flags)
+{
+	struct list_head *head = &info->space_info;
+	struct list_head *cur;
+	struct btrfs_space_info *found;
+	list_for_each(cur, head) {
+		found = list_entry(cur, struct btrfs_space_info, list);
+		if (found->flags == flags)
+			return found;
+	}
+	return NULL;
+}
+
+static u64 div_factor(u64 num, int factor)
+{
+	if (factor == 10)
+		return num;
+	num *= factor;
+	do_div(num, 10);
+	return num;
+}
+
+u64 btrfs_find_block_group(struct btrfs_root *root,
+			   u64 search_start, u64 search_hint, int owner)
+{
+	struct btrfs_block_group_cache *cache;
+	u64 used;
+	u64 last = max(search_hint, search_start);
+	u64 group_start = 0;
+	int full_search = 0;
+	int factor = 9;
+	int wrapped = 0;
+again:
+	while (1) {
+		cache = btrfs_lookup_first_block_group(root->fs_info, last);
+		if (!cache)
+			break;
+
+		spin_lock(&cache->lock);
+		last = cache->key.objectid + cache->key.offset;
+		used = btrfs_block_group_used(&cache->item);
+
+		if ((full_search || !cache->ro) &&
+		    block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
+			if (used + cache->pinned + cache->reserved <
+			    div_factor(cache->key.offset, factor)) {
+				group_start = cache->key.objectid;
+				spin_unlock(&cache->lock);
+				put_block_group(cache);
+				goto found;
+			}
+		}
+		spin_unlock(&cache->lock);
+		put_block_group(cache);
+		cond_resched();
+	}
+	if (!wrapped) {
+		last = search_start;
+		wrapped = 1;
+		goto again;
+	}
+	if (!full_search && factor < 10) {
+		last = search_start;
+		full_search = 1;
+		factor = 10;
+		goto again;
+	}
+found:
+	return group_start;
+}
+
+/* simple helper to search for an existing extent at a given offset */
+int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
+{
+	int ret;
+	struct btrfs_key key;
+	struct btrfs_path *path;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	key.objectid = start;
+	key.offset = len;
+	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
+	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
+				0, 0);
+	btrfs_free_path(path);
+	return ret;
+}
+
+/*
+ * Back reference rules.  Back refs have three main goals:
+ *
+ * 1) differentiate between all holders of references to an extent so that
+ *    when a reference is dropped we can make sure it was a valid reference
+ *    before freeing the extent.
+ *
+ * 2) Provide enough information to quickly find the holders of an extent
+ *    if we notice a given block is corrupted or bad.
+ *
+ * 3) Make it easy to migrate blocks for FS shrinking or storage pool
+ *    maintenance.  This is actually the same as #2, but with a slightly
+ *    different use case.
+ *
+ * File extents can be referenced by:
+ *
+ * - multiple snapshots, subvolumes, or different generations in one subvol
+ * - different files inside a single subvolume
+ * - different offsets inside a file (bookend extents in file.c)
+ *
+ * The extent ref structure has fields for:
+ *
+ * - Objectid of the subvolume root
+ * - Generation number of the tree holding the reference
+ * - objectid of the file holding the reference
+ * - number of references holding by parent node (alway 1 for tree blocks)
+ *
+ * Btree leaf may hold multiple references to a file extent. In most cases,
+ * these references are from same file and the corresponding offsets inside
+ * the file are close together.
+ *
+ * When a file extent is allocated the fields are filled in:
+ *     (root_key.objectid, trans->transid, inode objectid, 1)
+ *
+ * When a leaf is cow'd new references are added for every file extent found
+ * in the leaf.  It looks similar to the create case, but trans->transid will
+ * be different when the block is cow'd.
+ *
+ *     (root_key.objectid, trans->transid, inode objectid,
+ *      number of references in the leaf)
+ *
+ * When a file extent is removed either during snapshot deletion or
+ * file truncation, we find the corresponding back reference and check
+ * the following fields:
+ *
+ *     (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
+ *      inode objectid)
+ *
+ * Btree extents can be referenced by:
+ *
+ * - Different subvolumes
+ * - Different generations of the same subvolume
+ *
+ * When a tree block is created, back references are inserted:
+ *
+ * (root->root_key.objectid, trans->transid, level, 1)
+ *
+ * When a tree block is cow'd, new back references are added for all the
+ * blocks it points to. If the tree block isn't in reference counted root,
+ * the old back references are removed. These new back references are of
+ * the form (trans->transid will have increased since creation):
+ *
+ * (root->root_key.objectid, trans->transid, level, 1)
+ *
+ * When a backref is in deleting, the following fields are checked:
+ *
+ * if backref was for a tree root:
+ *     (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
+ * else
+ *     (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
+ *
+ * Back Reference Key composing:
+ *
+ * The key objectid corresponds to the first byte in the extent, the key
+ * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
+ * byte of parent extent. If a extent is tree root, the key offset is set
+ * to the key objectid.
+ */
+
+static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans,
+					  struct btrfs_root *root,
+					  struct btrfs_path *path,
+					  u64 bytenr, u64 parent,
+					  u64 ref_root, u64 ref_generation,
+					  u64 owner_objectid, int del)
+{
+	struct btrfs_key key;
+	struct btrfs_extent_ref *ref;
+	struct extent_buffer *leaf;
+	u64 ref_objectid;
+	int ret;
+
+	key.objectid = bytenr;
+	key.type = BTRFS_EXTENT_REF_KEY;
+	key.offset = parent;
+
+	ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
+	if (ret < 0)
+		goto out;
+	if (ret > 0) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	leaf = path->nodes[0];
+	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
+	ref_objectid = btrfs_ref_objectid(leaf, ref);
+	if (btrfs_ref_root(leaf, ref) != ref_root ||
+	    btrfs_ref_generation(leaf, ref) != ref_generation ||
+	    (ref_objectid != owner_objectid &&
+	     ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
+		ret = -EIO;
+		WARN_ON(1);
+		goto out;
+	}
+	ret = 0;
+out:
+	return ret;
+}
+
+/*
+ * updates all the backrefs that are pending on update_list for the
+ * extent_root
+ */
+static noinline int update_backrefs(struct btrfs_trans_handle *trans,
+				    struct btrfs_root *extent_root,
+				    struct btrfs_path *path,
+				    struct list_head *update_list)
+{
+	struct btrfs_key key;
+	struct btrfs_extent_ref *ref;
+	struct btrfs_fs_info *info = extent_root->fs_info;
+	struct pending_extent_op *op;
+	struct extent_buffer *leaf;
+	int ret = 0;
+	struct list_head *cur = update_list->next;
+	u64 ref_objectid;
+	u64 ref_root = extent_root->root_key.objectid;
+
+	op = list_entry(cur, struct pending_extent_op, list);
+
+search:
+	key.objectid = op->bytenr;
+	key.type = BTRFS_EXTENT_REF_KEY;
+	key.offset = op->orig_parent;
+
+	ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
+	BUG_ON(ret);
+
+	leaf = path->nodes[0];
+
+loop:
+	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
+
+	ref_objectid = btrfs_ref_objectid(leaf, ref);
+
+	if (btrfs_ref_root(leaf, ref) != ref_root ||
+	    btrfs_ref_generation(leaf, ref) != op->orig_generation ||
+	    (ref_objectid != op->level &&
+	     ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
+		printk(KERN_ERR "btrfs couldn't find %llu, parent %llu, "
+		       "root %llu, owner %u\n",
+		       (unsigned long long)op->bytenr,
+		       (unsigned long long)op->orig_parent,
+		       (unsigned long long)ref_root, op->level);
+		btrfs_print_leaf(extent_root, leaf);
+		BUG();
+	}
+
+	key.objectid = op->bytenr;
+	key.offset = op->parent;
+	key.type = BTRFS_EXTENT_REF_KEY;
+	ret = btrfs_set_item_key_safe(trans, extent_root, path, &key);
+	BUG_ON(ret);
+	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
+	btrfs_set_ref_generation(leaf, ref, op->generation);
+
+	cur = cur->next;
+
+	list_del_init(&op->list);
+	unlock_extent(&info->extent_ins, op->bytenr,
+		      op->bytenr + op->num_bytes - 1, GFP_NOFS);
+	kfree(op);
+
+	if (cur == update_list) {
+		btrfs_mark_buffer_dirty(path->nodes[0]);
+		btrfs_release_path(extent_root, path);
+		goto out;
+	}
+
+	op = list_entry(cur, struct pending_extent_op, list);
+
+	path->slots[0]++;
+	while (path->slots[0] < btrfs_header_nritems(leaf)) {
+		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+		if (key.objectid == op->bytenr &&
+		    key.type == BTRFS_EXTENT_REF_KEY)
+			goto loop;
+		path->slots[0]++;
+	}
+
+	btrfs_mark_buffer_dirty(path->nodes[0]);
+	btrfs_release_path(extent_root, path);
+	goto search;
+
+out:
+	return 0;
+}
+
+static noinline int insert_extents(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *extent_root,
+				   struct btrfs_path *path,
+				   struct list_head *insert_list, int nr)
+{
+	struct btrfs_key *keys;
+	u32 *data_size;
+	struct pending_extent_op *op;
+	struct extent_buffer *leaf;
+	struct list_head *cur = insert_list->next;
+	struct btrfs_fs_info *info = extent_root->fs_info;
+	u64 ref_root = extent_root->root_key.objectid;
+	int i = 0, last = 0, ret;
+	int total = nr * 2;
+
+	if (!nr)
+		return 0;
+
+	keys = kzalloc(total * sizeof(struct btrfs_key), GFP_NOFS);
+	if (!keys)
+		return -ENOMEM;
+
+	data_size = kzalloc(total * sizeof(u32), GFP_NOFS);
+	if (!data_size) {
+		kfree(keys);
+		return -ENOMEM;
+	}
+
+	list_for_each_entry(op, insert_list, list) {
+		keys[i].objectid = op->bytenr;
+		keys[i].offset = op->num_bytes;
+		keys[i].type = BTRFS_EXTENT_ITEM_KEY;
+		data_size[i] = sizeof(struct btrfs_extent_item);
+		i++;
+
+		keys[i].objectid = op->bytenr;
+		keys[i].offset = op->parent;
+		keys[i].type = BTRFS_EXTENT_REF_KEY;
+		data_size[i] = sizeof(struct btrfs_extent_ref);
+		i++;
+	}
+
+	op = list_entry(cur, struct pending_extent_op, list);
+	i = 0;
+	while (i < total) {
+		int c;
+		ret = btrfs_insert_some_items(trans, extent_root, path,
+					      keys+i, data_size+i, total-i);
+		BUG_ON(ret < 0);
+
+		if (last && ret > 1)
+			BUG();
+
+		leaf = path->nodes[0];
+		for (c = 0; c < ret; c++) {
+			int ref_first = keys[i].type == BTRFS_EXTENT_REF_KEY;
+
+			/*
+			 * if the first item we inserted was a backref, then
+			 * the EXTENT_ITEM will be the odd c's, else it will
+			 * be the even c's
+			 */
+			if ((ref_first && (c % 2)) ||
+			    (!ref_first && !(c % 2))) {
+				struct btrfs_extent_item *itm;
+
+				itm = btrfs_item_ptr(leaf, path->slots[0] + c,
+						     struct btrfs_extent_item);
+				btrfs_set_extent_refs(path->nodes[0], itm, 1);
+				op->del++;
+			} else {
+				struct btrfs_extent_ref *ref;
+
+				ref = btrfs_item_ptr(leaf, path->slots[0] + c,
+						     struct btrfs_extent_ref);
+				btrfs_set_ref_root(leaf, ref, ref_root);
+				btrfs_set_ref_generation(leaf, ref,
+							 op->generation);
+				btrfs_set_ref_objectid(leaf, ref, op->level);
+				btrfs_set_ref_num_refs(leaf, ref, 1);
+				op->del++;
+			}
+
+			/*
+			 * using del to see when its ok to free up the
+			 * pending_extent_op.  In the case where we insert the
+			 * last item on the list in order to help do batching
+			 * we need to not free the extent op until we actually
+			 * insert the extent_item
+			 */
+			if (op->del == 2) {
+				unlock_extent(&info->extent_ins, op->bytenr,
+					      op->bytenr + op->num_bytes - 1,
+					      GFP_NOFS);
+				cur = cur->next;
+				list_del_init(&op->list);
+				kfree(op);
+				if (cur != insert_list)
+					op = list_entry(cur,
+						struct pending_extent_op,
+						list);
+			}
+		}
+		btrfs_mark_buffer_dirty(leaf);
+		btrfs_release_path(extent_root, path);
+
+		/*
+		 * Ok backref's and items usually go right next to eachother,
+		 * but if we could only insert 1 item that means that we
+		 * inserted on the end of a leaf, and we have no idea what may
+		 * be on the next leaf so we just play it safe.  In order to
+		 * try and help this case we insert the last thing on our
+		 * insert list so hopefully it will end up being the last
+		 * thing on the leaf and everything else will be before it,
+		 * which will let us insert a whole bunch of items at the same
+		 * time.
+		 */
+		if (ret == 1 && !last && (i + ret < total)) {
+			/*
+			 * last: where we will pick up the next time around
+			 * i: our current key to insert, will be total - 1
+			 * cur: the current op we are screwing with
+			 * op: duh
+			 */
+			last = i + ret;
+			i = total - 1;
+			cur = insert_list->prev;
+			op = list_entry(cur, struct pending_extent_op, list);
+		} else if (last) {
+			/*
+			 * ok we successfully inserted the last item on the
+			 * list, lets reset everything
+			 *
+			 * i: our current key to insert, so where we left off
+			 *    last time
+			 * last: done with this
+			 * cur: the op we are messing with
+			 * op: duh
+			 * total: since we inserted the last key, we need to
+			 *        decrement total so we dont overflow
+			 */
+			i = last;
+			last = 0;
+			total--;
+			if (i < total) {
+				cur = insert_list->next;
+				op = list_entry(cur, struct pending_extent_op,
+						list);
+			}
+		} else {
+			i += ret;
+		}
+
+		cond_resched();
+	}
+	ret = 0;
+	kfree(keys);
+	kfree(data_size);
+	return ret;
+}
+
+static noinline int insert_extent_backref(struct btrfs_trans_handle *trans,
+					  struct btrfs_root *root,
+					  struct btrfs_path *path,
+					  u64 bytenr, u64 parent,
+					  u64 ref_root, u64 ref_generation,
+					  u64 owner_objectid)
+{
+	struct btrfs_key key;
+	struct extent_buffer *leaf;
+	struct btrfs_extent_ref *ref;
+	u32 num_refs;
+	int ret;
+
+	key.objectid = bytenr;
+	key.type = BTRFS_EXTENT_REF_KEY;
+	key.offset = parent;
+
+	ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
+	if (ret == 0) {
+		leaf = path->nodes[0];
+		ref = btrfs_item_ptr(leaf, path->slots[0],
+				     struct btrfs_extent_ref);
+		btrfs_set_ref_root(leaf, ref, ref_root);
+		btrfs_set_ref_generation(leaf, ref, ref_generation);
+		btrfs_set_ref_objectid(leaf, ref, owner_objectid);
+		btrfs_set_ref_num_refs(leaf, ref, 1);
+	} else if (ret == -EEXIST) {
+		u64 existing_owner;
+		BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
+		leaf = path->nodes[0];
+		ref = btrfs_item_ptr(leaf, path->slots[0],
+				     struct btrfs_extent_ref);
+		if (btrfs_ref_root(leaf, ref) != ref_root ||
+		    btrfs_ref_generation(leaf, ref) != ref_generation) {
+			ret = -EIO;
+			WARN_ON(1);
+			goto out;
+		}
+
+		num_refs = btrfs_ref_num_refs(leaf, ref);
+		BUG_ON(num_refs == 0);
+		btrfs_set_ref_num_refs(leaf, ref, num_refs + 1);
+
+		existing_owner = btrfs_ref_objectid(leaf, ref);
+		if (existing_owner != owner_objectid &&
+		    existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
+			btrfs_set_ref_objectid(leaf, ref,
+					BTRFS_MULTIPLE_OBJECTIDS);
+		}
+		ret = 0;
+	} else {
+		goto out;
+	}
+	btrfs_mark_buffer_dirty(path->nodes[0]);
+out:
+	btrfs_release_path(root, path);
+	return ret;
+}
+
+static noinline int remove_extent_backref(struct btrfs_trans_handle *trans,
+					  struct btrfs_root *root,
+					  struct btrfs_path *path)
+{
+	struct extent_buffer *leaf;
+	struct btrfs_extent_ref *ref;
+	u32 num_refs;
+	int ret = 0;
+
+	leaf = path->nodes[0];
+	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
+	num_refs = btrfs_ref_num_refs(leaf, ref);
+	BUG_ON(num_refs == 0);
+	num_refs -= 1;
+	if (num_refs == 0) {
+		ret = btrfs_del_item(trans, root, path);
+	} else {
+		btrfs_set_ref_num_refs(leaf, ref, num_refs);
+		btrfs_mark_buffer_dirty(leaf);
+	}
+	btrfs_release_path(root, path);
+	return ret;
+}
+
+#ifdef BIO_RW_DISCARD
+static void btrfs_issue_discard(struct block_device *bdev,
+				u64 start, u64 len)
+{
+	blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
+}
+#endif
+
+static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+				u64 num_bytes)
+{
+#ifdef BIO_RW_DISCARD
+	int ret;
+	u64 map_length = num_bytes;
+	struct btrfs_multi_bio *multi = NULL;
+
+	/* Tell the block device(s) that the sectors can be discarded */
+	ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
+			      bytenr, &map_length, &multi, 0);
+	if (!ret) {
+		struct btrfs_bio_stripe *stripe = multi->stripes;
+		int i;
+
+		if (map_length > num_bytes)
+			map_length = num_bytes;
+
+		for (i = 0; i < multi->num_stripes; i++, stripe++) {
+			btrfs_issue_discard(stripe->dev->bdev,
+					    stripe->physical,
+					    map_length);
+		}
+		kfree(multi);
+	}
+
+	return ret;
+#else
+	return 0;
+#endif
+}
+
+static noinline int free_extents(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *extent_root,
+				 struct list_head *del_list)
+{
+	struct btrfs_fs_info *info = extent_root->fs_info;
+	struct btrfs_path *path;
+	struct btrfs_key key, found_key;
+	struct extent_buffer *leaf;
+	struct list_head *cur;
+	struct pending_extent_op *op;
+	struct btrfs_extent_item *ei;
+	int ret, num_to_del, extent_slot = 0, found_extent = 0;
+	u32 refs;
+	u64 bytes_freed = 0;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+	path->reada = 1;
+
+search:
+	/* search for the backref for the current ref we want to delete */
+	cur = del_list->next;
+	op = list_entry(cur, struct pending_extent_op, list);
+	ret = lookup_extent_backref(trans, extent_root, path, op->bytenr,
+				    op->orig_parent,
+				    extent_root->root_key.objectid,
+				    op->orig_generation, op->level, 1);
+	if (ret) {
+		printk(KERN_ERR "btrfs unable to find backref byte nr %llu "
+		       "root %llu gen %llu owner %u\n",
+		       (unsigned long long)op->bytenr,
+		       (unsigned long long)extent_root->root_key.objectid,
+		       (unsigned long long)op->orig_generation, op->level);
+		btrfs_print_leaf(extent_root, path->nodes[0]);
+		WARN_ON(1);
+		goto out;
+	}
+
+	extent_slot = path->slots[0];
+	num_to_del = 1;
+	found_extent = 0;
+
+	/*
+	 * if we aren't the first item on the leaf we can move back one and see
+	 * if our ref is right next to our extent item
+	 */
+	if (likely(extent_slot)) {
+		extent_slot--;
+		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+				      extent_slot);
+		if (found_key.objectid == op->bytenr &&
+		    found_key.type == BTRFS_EXTENT_ITEM_KEY &&
+		    found_key.offset == op->num_bytes) {
+			num_to_del++;
+			found_extent = 1;
+		}
+	}
+
+	/*
+	 * if we didn't find the extent we need to delete the backref and then
+	 * search for the extent item key so we can update its ref count
+	 */
+	if (!found_extent) {
+		key.objectid = op->bytenr;
+		key.type = BTRFS_EXTENT_ITEM_KEY;
+		key.offset = op->num_bytes;
+
+		ret = remove_extent_backref(trans, extent_root, path);
+		BUG_ON(ret);
+		btrfs_release_path(extent_root, path);
+		ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
+		BUG_ON(ret);
+		extent_slot = path->slots[0];
+	}
+
+	/* this is where we update the ref count for the extent */
+	leaf = path->nodes[0];
+	ei = btrfs_item_ptr(leaf, extent_slot, struct btrfs_extent_item);
+	refs = btrfs_extent_refs(leaf, ei);
+	BUG_ON(refs == 0);
+	refs--;
+	btrfs_set_extent_refs(leaf, ei, refs);
+
+	btrfs_mark_buffer_dirty(leaf);
+
+	/*
+	 * This extent needs deleting.  The reason cur_slot is extent_slot +
+	 * num_to_del is because extent_slot points to the slot where the extent
+	 * is, and if the backref was not right next to the extent we will be
+	 * deleting at least 1 item, and will want to start searching at the
+	 * slot directly next to extent_slot.  However if we did find the
+	 * backref next to the extent item them we will be deleting at least 2
+	 * items and will want to start searching directly after the ref slot
+	 */
+	if (!refs) {
+		struct list_head *pos, *n, *end;
+		int cur_slot = extent_slot+num_to_del;
+		u64 super_used;
+		u64 root_used;
+
+		path->slots[0] = extent_slot;
+		bytes_freed = op->num_bytes;
+
+		mutex_lock(&info->pinned_mutex);
+		ret = pin_down_bytes(trans, extent_root, op->bytenr,
+				     op->num_bytes, op->level >=
+				     BTRFS_FIRST_FREE_OBJECTID);
+		mutex_unlock(&info->pinned_mutex);
+		BUG_ON(ret < 0);
+		op->del = ret;
+
+		/*
+		 * we need to see if we can delete multiple things at once, so
+		 * start looping through the list of extents we are wanting to
+		 * delete and see if their extent/backref's are right next to
+		 * eachother and the extents only have 1 ref
+		 */
+		for (pos = cur->next; pos != del_list; pos = pos->next) {
+			struct pending_extent_op *tmp;
+
+			tmp = list_entry(pos, struct pending_extent_op, list);
+
+			/* we only want to delete extent+ref at this stage */
+			if (cur_slot >= btrfs_header_nritems(leaf) - 1)
+				break;
+
+			btrfs_item_key_to_cpu(leaf, &found_key, cur_slot);
+			if (found_key.objectid != tmp->bytenr ||
+			    found_key.type != BTRFS_EXTENT_ITEM_KEY ||
+			    found_key.offset != tmp->num_bytes)
+				break;
+
+			/* check to make sure this extent only has one ref */
+			ei = btrfs_item_ptr(leaf, cur_slot,
+					    struct btrfs_extent_item);
+			if (btrfs_extent_refs(leaf, ei) != 1)
+				break;
+
+			btrfs_item_key_to_cpu(leaf, &found_key, cur_slot+1);
+			if (found_key.objectid != tmp->bytenr ||
+			    found_key.type != BTRFS_EXTENT_REF_KEY ||
+			    found_key.offset != tmp->orig_parent)
+				break;
+
+			/*
+			 * the ref is right next to the extent, we can set the
+			 * ref count to 0 since we will delete them both now
+			 */
+			btrfs_set_extent_refs(leaf, ei, 0);
+
+			/* pin down the bytes for this extent */
+			mutex_lock(&info->pinned_mutex);
+			ret = pin_down_bytes(trans, extent_root, tmp->bytenr,
+					     tmp->num_bytes, tmp->level >=
+					     BTRFS_FIRST_FREE_OBJECTID);
+			mutex_unlock(&info->pinned_mutex);
+			BUG_ON(ret < 0);
+
+			/*
+			 * use the del field to tell if we need to go ahead and
+			 * free up the extent when we delete the item or not.
+			 */
+			tmp->del = ret;
+			bytes_freed += tmp->num_bytes;
+
+			num_to_del += 2;
+			cur_slot += 2;
+		}
+		end = pos;
+
+		/* update the free space counters */
+		spin_lock(&info->delalloc_lock);
+		super_used = btrfs_super_bytes_used(&info->super_copy);
+		btrfs_set_super_bytes_used(&info->super_copy,
+					   super_used - bytes_freed);
+
+		root_used = btrfs_root_used(&extent_root->root_item);
+		btrfs_set_root_used(&extent_root->root_item,
+				    root_used - bytes_freed);
+		spin_unlock(&info->delalloc_lock);
+
+		/* delete the items */
+		ret = btrfs_del_items(trans, extent_root, path,
+				      path->slots[0], num_to_del);
+		BUG_ON(ret);
+
+		/*
+		 * loop through the extents we deleted and do the cleanup work
+		 * on them
+		 */
+		for (pos = cur, n = pos->next; pos != end;
+		     pos = n, n = pos->next) {
+			struct pending_extent_op *tmp;
+			tmp = list_entry(pos, struct pending_extent_op, list);
+
+			/*
+			 * remember tmp->del tells us wether or not we pinned
+			 * down the extent
+			 */
+			ret = update_block_group(trans, extent_root,
+						 tmp->bytenr, tmp->num_bytes, 0,
+						 tmp->del);
+			BUG_ON(ret);
+
+			list_del_init(&tmp->list);
+			unlock_extent(&info->extent_ins, tmp->bytenr,
+				      tmp->bytenr + tmp->num_bytes - 1,
+				      GFP_NOFS);
+			kfree(tmp);
+		}
+	} else if (refs && found_extent) {
+		/*
+		 * the ref and extent were right next to eachother, but the
+		 * extent still has a ref, so just free the backref and keep
+		 * going
+		 */
+		ret = remove_extent_backref(trans, extent_root, path);
+		BUG_ON(ret);
+
+		list_del_init(&op->list);
+		unlock_extent(&info->extent_ins, op->bytenr,
+			      op->bytenr + op->num_bytes - 1, GFP_NOFS);
+		kfree(op);
+	} else {
+		/*
+		 * the extent has multiple refs and the backref we were looking
+		 * for was not right next to it, so just unlock and go next,
+		 * we're good to go
+		 */
+		list_del_init(&op->list);
+		unlock_extent(&info->extent_ins, op->bytenr,
+			      op->bytenr + op->num_bytes - 1, GFP_NOFS);
+		kfree(op);
+	}
+
+	btrfs_release_path(extent_root, path);
+	if (!list_empty(del_list))
+		goto search;
+
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
+				     struct btrfs_root *root, u64 bytenr,
+				     u64 orig_parent, u64 parent,
+				     u64 orig_root, u64 ref_root,
+				     u64 orig_generation, u64 ref_generation,
+				     u64 owner_objectid)
+{
+	int ret;
+	struct btrfs_root *extent_root = root->fs_info->extent_root;
+	struct btrfs_path *path;
+
+	if (root == root->fs_info->extent_root) {
+		struct pending_extent_op *extent_op;
+		u64 num_bytes;
+
+		BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL);
+		num_bytes = btrfs_level_size(root, (int)owner_objectid);
+		mutex_lock(&root->fs_info->extent_ins_mutex);
+		if (test_range_bit(&root->fs_info->extent_ins, bytenr,
+				bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
+			u64 priv;
+			ret = get_state_private(&root->fs_info->extent_ins,
+						bytenr, &priv);
+			BUG_ON(ret);
+			extent_op = (struct pending_extent_op *)
+							(unsigned long)priv;
+			BUG_ON(extent_op->parent != orig_parent);
+			BUG_ON(extent_op->generation != orig_generation);
+
+			extent_op->parent = parent;
+			extent_op->generation = ref_generation;
+		} else {
+			extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
+			BUG_ON(!extent_op);
+
+			extent_op->type = PENDING_BACKREF_UPDATE;
+			extent_op->bytenr = bytenr;
+			extent_op->num_bytes = num_bytes;
+			extent_op->parent = parent;
+			extent_op->orig_parent = orig_parent;
+			extent_op->generation = ref_generation;
+			extent_op->orig_generation = orig_generation;
+			extent_op->level = (int)owner_objectid;
+			INIT_LIST_HEAD(&extent_op->list);
+			extent_op->del = 0;
+
+			set_extent_bits(&root->fs_info->extent_ins,
+					bytenr, bytenr + num_bytes - 1,
+					EXTENT_WRITEBACK, GFP_NOFS);
+			set_state_private(&root->fs_info->extent_ins,
+					  bytenr, (unsigned long)extent_op);
+		}
+		mutex_unlock(&root->fs_info->extent_ins_mutex);
+		return 0;
+	}
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+	ret = lookup_extent_backref(trans, extent_root, path,
+				    bytenr, orig_parent, orig_root,
+				    orig_generation, owner_objectid, 1);
+	if (ret)
+		goto out;
+	ret = remove_extent_backref(trans, extent_root, path);
+	if (ret)
+		goto out;
+	ret = insert_extent_backref(trans, extent_root, path, bytenr,
+				    parent, ref_root, ref_generation,
+				    owner_objectid);
+	BUG_ON(ret);
+	finish_current_insert(trans, extent_root, 0);
+	del_pending_extents(trans, extent_root, 0);
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, u64 bytenr,
+			    u64 orig_parent, u64 parent,
+			    u64 ref_root, u64 ref_generation,
+			    u64 owner_objectid)
+{
+	int ret;
+	if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
+	    owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
+		return 0;
+	ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent,
+					parent, ref_root, ref_root,
+					ref_generation, ref_generation,
+					owner_objectid);
+	return ret;
+}
+
+static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+				  struct btrfs_root *root, u64 bytenr,
+				  u64 orig_parent, u64 parent,
+				  u64 orig_root, u64 ref_root,
+				  u64 orig_generation, u64 ref_generation,
+				  u64 owner_objectid)
+{
+	struct btrfs_path *path;
+	int ret;
+	struct btrfs_key key;
+	struct extent_buffer *l;
+	struct btrfs_extent_item *item;
+	u32 refs;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	path->reada = 1;
+	key.objectid = bytenr;
+	key.type = BTRFS_EXTENT_ITEM_KEY;
+	key.offset = (u64)-1;
+
+	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
+				0, 1);
+	if (ret < 0)
+		return ret;
+	BUG_ON(ret == 0 || path->slots[0] == 0);
+
+	path->slots[0]--;
+	l = path->nodes[0];
+
+	btrfs_item_key_to_cpu(l, &key, path->slots[0]);
+	if (key.objectid != bytenr) {
+		btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
+		printk(KERN_ERR "btrfs wanted %llu found %llu\n",
+		       (unsigned long long)bytenr,
+		       (unsigned long long)key.objectid);
+		BUG();
+	}
+	BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
+
+	item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
+	refs = btrfs_extent_refs(l, item);
+	btrfs_set_extent_refs(l, item, refs + 1);
+	btrfs_mark_buffer_dirty(path->nodes[0]);
+
+	btrfs_release_path(root->fs_info->extent_root, path);
+
+	path->reada = 1;
+	ret = insert_extent_backref(trans, root->fs_info->extent_root,
+				    path, bytenr, parent,
+				    ref_root, ref_generation,
+				    owner_objectid);
+	BUG_ON(ret);
+	finish_current_insert(trans, root->fs_info->extent_root, 0);
+	del_pending_extents(trans, root->fs_info->extent_root, 0);
+
+	btrfs_free_path(path);
+	return 0;
+}
+
+int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+			 struct btrfs_root *root,
+			 u64 bytenr, u64 num_bytes, u64 parent,
+			 u64 ref_root, u64 ref_generation,
+			 u64 owner_objectid)
+{
+	int ret;
+	if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
+	    owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
+		return 0;
+	ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent,
+				     0, ref_root, 0, ref_generation,
+				     owner_objectid);
+	return ret;
+}
+
+int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
+			 struct btrfs_root *root)
+{
+	finish_current_insert(trans, root->fs_info->extent_root, 1);
+	del_pending_extents(trans, root->fs_info->extent_root, 1);
+	return 0;
+}
+
+int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root, u64 bytenr,
+			    u64 num_bytes, u32 *refs)
+{
+	struct btrfs_path *path;
+	int ret;
+	struct btrfs_key key;
+	struct extent_buffer *l;
+	struct btrfs_extent_item *item;
+
+	WARN_ON(num_bytes < root->sectorsize);
+	path = btrfs_alloc_path();
+	path->reada = 1;
+	key.objectid = bytenr;
+	key.offset = num_bytes;
+	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
+	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
+				0, 0);
+	if (ret < 0)
+		goto out;
+	if (ret != 0) {
+		btrfs_print_leaf(root, path->nodes[0]);
+		printk(KERN_INFO "btrfs failed to find block number %llu\n",
+		       (unsigned long long)bytenr);
+		BUG();
+	}
+	l = path->nodes[0];
+	item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
+	*refs = btrfs_extent_refs(l, item);
+out:
+	btrfs_free_path(path);
+	return 0;
+}
+
+int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, u64 objectid, u64 bytenr)
+{
+	struct btrfs_root *extent_root = root->fs_info->extent_root;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	struct btrfs_extent_ref *ref_item;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	u64 ref_root;
+	u64 last_snapshot;
+	u32 nritems;
+	int ret;
+
+	key.objectid = bytenr;
+	key.offset = (u64)-1;
+	key.type = BTRFS_EXTENT_ITEM_KEY;
+
+	path = btrfs_alloc_path();
+	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
+	if (ret < 0)
+		goto out;
+	BUG_ON(ret == 0);
+
+	ret = -ENOENT;
+	if (path->slots[0] == 0)
+		goto out;
+
+	path->slots[0]--;
+	leaf = path->nodes[0];
+	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+	if (found_key.objectid != bytenr ||
+	    found_key.type != BTRFS_EXTENT_ITEM_KEY)
+		goto out;
+
+	last_snapshot = btrfs_root_last_snapshot(&root->root_item);
+	while (1) {
+		leaf = path->nodes[0];
+		nritems = btrfs_header_nritems(leaf);
+		if (path->slots[0] >= nritems) {
+			ret = btrfs_next_leaf(extent_root, path);
+			if (ret < 0)
+				goto out;
+			if (ret == 0)
+				continue;
+			break;
+		}
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+		if (found_key.objectid != bytenr)
+			break;
+
+		if (found_key.type != BTRFS_EXTENT_REF_KEY) {
+			path->slots[0]++;
+			continue;
+		}
+
+		ref_item = btrfs_item_ptr(leaf, path->slots[0],
+					  struct btrfs_extent_ref);
+		ref_root = btrfs_ref_root(leaf, ref_item);
+		if ((ref_root != root->root_key.objectid &&
+		     ref_root != BTRFS_TREE_LOG_OBJECTID) ||
+		     objectid != btrfs_ref_objectid(leaf, ref_item)) {
+			ret = 1;
+			goto out;
+		}
+		if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) {
+			ret = 1;
+			goto out;
+		}
+
+		path->slots[0]++;
+	}
+	ret = 0;
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		    struct extent_buffer *buf, u32 nr_extents)
+{
+	struct btrfs_key key;
+	struct btrfs_file_extent_item *fi;
+	u64 root_gen;
+	u32 nritems;
+	int i;
+	int level;
+	int ret = 0;
+	int shared = 0;
+
+	if (!root->ref_cows)
+		return 0;
+
+	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
+		shared = 0;
+		root_gen = root->root_key.offset;
+	} else {
+		shared = 1;
+		root_gen = trans->transid - 1;
+	}
+
+	level = btrfs_header_level(buf);
+	nritems = btrfs_header_nritems(buf);
+
+	if (level == 0) {
+		struct btrfs_leaf_ref *ref;
+		struct btrfs_extent_info *info;
+
+		ref = btrfs_alloc_leaf_ref(root, nr_extents);
+		if (!ref) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		ref->root_gen = root_gen;
+		ref->bytenr = buf->start;
+		ref->owner = btrfs_header_owner(buf);
+		ref->generation = btrfs_header_generation(buf);
+		ref->nritems = nr_extents;
+		info = ref->extents;
+
+		for (i = 0; nr_extents > 0 && i < nritems; i++) {
+			u64 disk_bytenr;
+			btrfs_item_key_to_cpu(buf, &key, i);
+			if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
+				continue;
+			fi = btrfs_item_ptr(buf, i,
+					    struct btrfs_file_extent_item);
+			if (btrfs_file_extent_type(buf, fi) ==
+			    BTRFS_FILE_EXTENT_INLINE)
+				continue;
+			disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
+			if (disk_bytenr == 0)
+				continue;
+
+			info->bytenr = disk_bytenr;
+			info->num_bytes =
+				btrfs_file_extent_disk_num_bytes(buf, fi);
+			info->objectid = key.objectid;
+			info->offset = key.offset;
+			info++;
+		}
+
+		ret = btrfs_add_leaf_ref(root, ref, shared);
+		if (ret == -EEXIST && shared) {
+			struct btrfs_leaf_ref *old;
+			old = btrfs_lookup_leaf_ref(root, ref->bytenr);
+			BUG_ON(!old);
+			btrfs_remove_leaf_ref(root, old);
+			btrfs_free_leaf_ref(root, old);
+			ret = btrfs_add_leaf_ref(root, ref, shared);
+		}
+		WARN_ON(ret);
+		btrfs_free_leaf_ref(root, ref);
+	}
+out:
+	return ret;
+}
+
+int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		  struct extent_buffer *orig_buf, struct extent_buffer *buf,
+		  u32 *nr_extents)
+{
+	u64 bytenr;
+	u64 ref_root;
+	u64 orig_root;
+	u64 ref_generation;
+	u64 orig_generation;
+	u32 nritems;
+	u32 nr_file_extents = 0;
+	struct btrfs_key key;
+	struct btrfs_file_extent_item *fi;
+	int i;
+	int level;
+	int ret = 0;
+	int faili = 0;
+	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
+			    u64, u64, u64, u64, u64, u64, u64, u64);
+
+	ref_root = btrfs_header_owner(buf);
+	ref_generation = btrfs_header_generation(buf);
+	orig_root = btrfs_header_owner(orig_buf);
+	orig_generation = btrfs_header_generation(orig_buf);
+
+	nritems = btrfs_header_nritems(buf);
+	level = btrfs_header_level(buf);
+
+	if (root->ref_cows) {
+		process_func = __btrfs_inc_extent_ref;
+	} else {
+		if (level == 0 &&
+		    root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
+			goto out;
+		if (level != 0 &&
+		    root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
+			goto out;
+		process_func = __btrfs_update_extent_ref;
+	}
+
+	for (i = 0; i < nritems; i++) {
+		cond_resched();
+		if (level == 0) {
+			btrfs_item_key_to_cpu(buf, &key, i);
+			if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
+				continue;
+			fi = btrfs_item_ptr(buf, i,
+					    struct btrfs_file_extent_item);
+			if (btrfs_file_extent_type(buf, fi) ==
+			    BTRFS_FILE_EXTENT_INLINE)
+				continue;
+			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
+			if (bytenr == 0)
+				continue;
+
+			nr_file_extents++;
+
+			ret = process_func(trans, root, bytenr,
+					   orig_buf->start, buf->start,
+					   orig_root, ref_root,
+					   orig_generation, ref_generation,
+					   key.objectid);
+
+			if (ret) {
+				faili = i;
+				WARN_ON(1);
+				goto fail;
+			}
+		} else {
+			bytenr = btrfs_node_blockptr(buf, i);
+			ret = process_func(trans, root, bytenr,
+					   orig_buf->start, buf->start,
+					   orig_root, ref_root,
+					   orig_generation, ref_generation,
+					   level - 1);
+			if (ret) {
+				faili = i;
+				WARN_ON(1);
+				goto fail;
+			}
+		}
+	}
+out:
+	if (nr_extents) {
+		if (level == 0)
+			*nr_extents = nr_file_extents;
+		else
+			*nr_extents = nritems;
+	}
+	return 0;
+fail:
+	WARN_ON(1);
+	return ret;
+}
+
+int btrfs_update_ref(struct btrfs_trans_handle *trans,
+		     struct btrfs_root *root, struct extent_buffer *orig_buf,
+		     struct extent_buffer *buf, int start_slot, int nr)
+
+{
+	u64 bytenr;
+	u64 ref_root;
+	u64 orig_root;
+	u64 ref_generation;
+	u64 orig_generation;
+	struct btrfs_key key;
+	struct btrfs_file_extent_item *fi;
+	int i;
+	int ret;
+	int slot;
+	int level;
+
+	BUG_ON(start_slot < 0);
+	BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
+
+	ref_root = btrfs_header_owner(buf);
+	ref_generation = btrfs_header_generation(buf);
+	orig_root = btrfs_header_owner(orig_buf);
+	orig_generation = btrfs_header_generation(orig_buf);
+	level = btrfs_header_level(buf);
+
+	if (!root->ref_cows) {
+		if (level == 0 &&
+		    root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
+			return 0;
+		if (level != 0 &&
+		    root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
+			return 0;
+	}
+
+	for (i = 0, slot = start_slot; i < nr; i++, slot++) {
+		cond_resched();
+		if (level == 0) {
+			btrfs_item_key_to_cpu(buf, &key, slot);
+			if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
+				continue;
+			fi = btrfs_item_ptr(buf, slot,
+					    struct btrfs_file_extent_item);
+			if (btrfs_file_extent_type(buf, fi) ==
+			    BTRFS_FILE_EXTENT_INLINE)
+				continue;
+			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
+			if (bytenr == 0)
+				continue;
+			ret = __btrfs_update_extent_ref(trans, root, bytenr,
+					    orig_buf->start, buf->start,
+					    orig_root, ref_root,
+					    orig_generation, ref_generation,
+					    key.objectid);
+			if (ret)
+				goto fail;
+		} else {
+			bytenr = btrfs_node_blockptr(buf, slot);
+			ret = __btrfs_update_extent_ref(trans, root, bytenr,
+					    orig_buf->start, buf->start,
+					    orig_root, ref_root,
+					    orig_generation, ref_generation,
+					    level - 1);
+			if (ret)
+				goto fail;
+		}
+	}
+	return 0;
+fail:
+	WARN_ON(1);
+	return -1;
+}
+
+static int write_one_cache_group(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root,
+				 struct btrfs_path *path,
+				 struct btrfs_block_group_cache *cache)
+{
+	int ret;
+	int pending_ret;
+	struct btrfs_root *extent_root = root->fs_info->extent_root;
+	unsigned long bi;
+	struct extent_buffer *leaf;
+
+	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
+	if (ret < 0)
+		goto fail;
+	BUG_ON(ret);
+
+	leaf = path->nodes[0];
+	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
+	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
+	btrfs_mark_buffer_dirty(leaf);
+	btrfs_release_path(extent_root, path);
+fail:
+	finish_current_insert(trans, extent_root, 0);
+	pending_ret = del_pending_extents(trans, extent_root, 0);
+	if (ret)
+		return ret;
+	if (pending_ret)
+		return pending_ret;
+	return 0;
+
+}
+
+int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root)
+{
+	struct btrfs_block_group_cache *cache, *entry;
+	struct rb_node *n;
+	int err = 0;
+	int werr = 0;
+	struct btrfs_path *path;
+	u64 last = 0;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	while (1) {
+		cache = NULL;
+		spin_lock(&root->fs_info->block_group_cache_lock);
+		for (n = rb_first(&root->fs_info->block_group_cache_tree);
+		     n; n = rb_next(n)) {
+			entry = rb_entry(n, struct btrfs_block_group_cache,
+					 cache_node);
+			if (entry->dirty) {
+				cache = entry;
+				break;
+			}
+		}
+		spin_unlock(&root->fs_info->block_group_cache_lock);
+
+		if (!cache)
+			break;
+
+		cache->dirty = 0;
+		last += cache->key.offset;
+
+		err = write_one_cache_group(trans, root,
+					    path, cache);
+		/*
+		 * if we fail to write the cache group, we want
+		 * to keep it marked dirty in hopes that a later
+		 * write will work
+		 */
+		if (err) {
+			werr = err;
+			continue;
+		}
+	}
+	btrfs_free_path(path);
+	return werr;
+}
+
+int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
+{
+	struct btrfs_block_group_cache *block_group;
+	int readonly = 0;
+
+	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
+	if (!block_group || block_group->ro)
+		readonly = 1;
+	if (block_group)
+		put_block_group(block_group);
+	return readonly;
+}
+
+static int update_space_info(struct btrfs_fs_info *info, u64 flags,
+			     u64 total_bytes, u64 bytes_used,
+			     struct btrfs_space_info **space_info)
+{
+	struct btrfs_space_info *found;
+
+	found = __find_space_info(info, flags);
+	if (found) {
+		spin_lock(&found->lock);
+		found->total_bytes += total_bytes;
+		found->bytes_used += bytes_used;
+		found->full = 0;
+		spin_unlock(&found->lock);
+		*space_info = found;
+		return 0;
+	}
+	found = kzalloc(sizeof(*found), GFP_NOFS);
+	if (!found)
+		return -ENOMEM;
+
+	list_add(&found->list, &info->space_info);
+	INIT_LIST_HEAD(&found->block_groups);
+	init_rwsem(&found->groups_sem);
+	spin_lock_init(&found->lock);
+	found->flags = flags;
+	found->total_bytes = total_bytes;
+	found->bytes_used = bytes_used;
+	found->bytes_pinned = 0;
+	found->bytes_reserved = 0;
+	found->bytes_readonly = 0;
+	found->full = 0;
+	found->force_alloc = 0;
+	*space_info = found;
+	return 0;
+}
+
+static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
+{
+	u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
+				   BTRFS_BLOCK_GROUP_RAID1 |
+				   BTRFS_BLOCK_GROUP_RAID10 |
+				   BTRFS_BLOCK_GROUP_DUP);
+	if (extra_flags) {
+		if (flags & BTRFS_BLOCK_GROUP_DATA)
+			fs_info->avail_data_alloc_bits |= extra_flags;
+		if (flags & BTRFS_BLOCK_GROUP_METADATA)
+			fs_info->avail_metadata_alloc_bits |= extra_flags;
+		if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+			fs_info->avail_system_alloc_bits |= extra_flags;
+	}
+}
+
+static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
+{
+	spin_lock(&cache->space_info->lock);
+	spin_lock(&cache->lock);
+	if (!cache->ro) {
+		cache->space_info->bytes_readonly += cache->key.offset -
+					btrfs_block_group_used(&cache->item);
+		cache->ro = 1;
+	}
+	spin_unlock(&cache->lock);
+	spin_unlock(&cache->space_info->lock);
+}
+
+u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
+{
+	u64 num_devices = root->fs_info->fs_devices->rw_devices;
+
+	if (num_devices == 1)
+		flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
+	if (num_devices < 4)
+		flags &= ~BTRFS_BLOCK_GROUP_RAID10;
+
+	if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
+	    (flags & (BTRFS_BLOCK_GROUP_RAID1 |
+		      BTRFS_BLOCK_GROUP_RAID10))) {
+		flags &= ~BTRFS_BLOCK_GROUP_DUP;
+	}
+
+	if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
+	    (flags & BTRFS_BLOCK_GROUP_RAID10)) {
+		flags &= ~BTRFS_BLOCK_GROUP_RAID1;
+	}
+
+	if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
+	    ((flags & BTRFS_BLOCK_GROUP_RAID1) |
+	     (flags & BTRFS_BLOCK_GROUP_RAID10) |
+	     (flags & BTRFS_BLOCK_GROUP_DUP)))
+		flags &= ~BTRFS_BLOCK_GROUP_RAID0;
+	return flags;
+}
+
+static int do_chunk_alloc(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *extent_root, u64 alloc_bytes,
+			  u64 flags, int force)
+{
+	struct btrfs_space_info *space_info;
+	u64 thresh;
+	int ret = 0;
+
+	mutex_lock(&extent_root->fs_info->chunk_mutex);
+
+	flags = btrfs_reduce_alloc_profile(extent_root, flags);
+
+	space_info = __find_space_info(extent_root->fs_info, flags);
+	if (!space_info) {
+		ret = update_space_info(extent_root->fs_info, flags,
+					0, 0, &space_info);
+		BUG_ON(ret);
+	}
+	BUG_ON(!space_info);
+
+	spin_lock(&space_info->lock);
+	if (space_info->force_alloc) {
+		force = 1;
+		space_info->force_alloc = 0;
+	}
+	if (space_info->full) {
+		spin_unlock(&space_info->lock);
+		goto out;
+	}
+
+	thresh = space_info->total_bytes - space_info->bytes_readonly;
+	thresh = div_factor(thresh, 6);
+	if (!force &&
+	   (space_info->bytes_used + space_info->bytes_pinned +
+	    space_info->bytes_reserved + alloc_bytes) < thresh) {
+		spin_unlock(&space_info->lock);
+		goto out;
+	}
+	spin_unlock(&space_info->lock);
+
+	ret = btrfs_alloc_chunk(trans, extent_root, flags);
+	if (ret)
+		space_info->full = 1;
+out:
+	mutex_unlock(&extent_root->fs_info->chunk_mutex);
+	return ret;
+}
+
+static int update_block_group(struct btrfs_trans_handle *trans,
+			      struct btrfs_root *root,
+			      u64 bytenr, u64 num_bytes, int alloc,
+			      int mark_free)
+{
+	struct btrfs_block_group_cache *cache;
+	struct btrfs_fs_info *info = root->fs_info;
+	u64 total = num_bytes;
+	u64 old_val;
+	u64 byte_in_group;
+
+	while (total) {
+		cache = btrfs_lookup_block_group(info, bytenr);
+		if (!cache)
+			return -1;
+		byte_in_group = bytenr - cache->key.objectid;
+		WARN_ON(byte_in_group > cache->key.offset);
+
+		spin_lock(&cache->space_info->lock);
+		spin_lock(&cache->lock);
+		cache->dirty = 1;
+		old_val = btrfs_block_group_used(&cache->item);
+		num_bytes = min(total, cache->key.offset - byte_in_group);
+		if (alloc) {
+			old_val += num_bytes;
+			cache->space_info->bytes_used += num_bytes;
+			if (cache->ro)
+				cache->space_info->bytes_readonly -= num_bytes;
+			btrfs_set_block_group_used(&cache->item, old_val);
+			spin_unlock(&cache->lock);
+			spin_unlock(&cache->space_info->lock);
+		} else {
+			old_val -= num_bytes;
+			cache->space_info->bytes_used -= num_bytes;
+			if (cache->ro)
+				cache->space_info->bytes_readonly += num_bytes;
+			btrfs_set_block_group_used(&cache->item, old_val);
+			spin_unlock(&cache->lock);
+			spin_unlock(&cache->space_info->lock);
+			if (mark_free) {
+				int ret;
+
+				ret = btrfs_discard_extent(root, bytenr,
+							   num_bytes);
+				WARN_ON(ret);
+
+				ret = btrfs_add_free_space(cache, bytenr,
+							   num_bytes);
+				WARN_ON(ret);
+			}
+		}
+		put_block_group(cache);
+		total -= num_bytes;
+		bytenr += num_bytes;
+	}
+	return 0;
+}
+
+static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
+{
+	struct btrfs_block_group_cache *cache;
+	u64 bytenr;
+
+	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
+	if (!cache)
+		return 0;
+
+	bytenr = cache->key.objectid;
+	put_block_group(cache);
+
+	return bytenr;
+}
+
+int btrfs_update_pinned_extents(struct btrfs_root *root,
+				u64 bytenr, u64 num, int pin)
+{
+	u64 len;
+	struct btrfs_block_group_cache *cache;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+
+	WARN_ON(!mutex_is_locked(&root->fs_info->pinned_mutex));
+	if (pin) {
+		set_extent_dirty(&fs_info->pinned_extents,
+				bytenr, bytenr + num - 1, GFP_NOFS);
+	} else {
+		clear_extent_dirty(&fs_info->pinned_extents,
+				bytenr, bytenr + num - 1, GFP_NOFS);
+	}
+	while (num > 0) {
+		cache = btrfs_lookup_block_group(fs_info, bytenr);
+		BUG_ON(!cache);
+		len = min(num, cache->key.offset -
+			  (bytenr - cache->key.objectid));
+		if (pin) {
+			spin_lock(&cache->space_info->lock);
+			spin_lock(&cache->lock);
+			cache->pinned += len;
+			cache->space_info->bytes_pinned += len;
+			spin_unlock(&cache->lock);
+			spin_unlock(&cache->space_info->lock);
+			fs_info->total_pinned += len;
+		} else {
+			spin_lock(&cache->space_info->lock);
+			spin_lock(&cache->lock);
+			cache->pinned -= len;
+			cache->space_info->bytes_pinned -= len;
+			spin_unlock(&cache->lock);
+			spin_unlock(&cache->space_info->lock);
+			fs_info->total_pinned -= len;
+			if (cache->cached)
+				btrfs_add_free_space(cache, bytenr, len);
+		}
+		put_block_group(cache);
+		bytenr += len;
+		num -= len;
+	}
+	return 0;
+}
+
+static int update_reserved_extents(struct btrfs_root *root,
+				   u64 bytenr, u64 num, int reserve)
+{
+	u64 len;
+	struct btrfs_block_group_cache *cache;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+
+	while (num > 0) {
+		cache = btrfs_lookup_block_group(fs_info, bytenr);
+		BUG_ON(!cache);
+		len = min(num, cache->key.offset -
+			  (bytenr - cache->key.objectid));
+
+		spin_lock(&cache->space_info->lock);
+		spin_lock(&cache->lock);
+		if (reserve) {
+			cache->reserved += len;
+			cache->space_info->bytes_reserved += len;
+		} else {
+			cache->reserved -= len;
+			cache->space_info->bytes_reserved -= len;
+		}
+		spin_unlock(&cache->lock);
+		spin_unlock(&cache->space_info->lock);
+		put_block_group(cache);
+		bytenr += len;
+		num -= len;
+	}
+	return 0;
+}
+
+int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
+{
+	u64 last = 0;
+	u64 start;
+	u64 end;
+	struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
+	int ret;
+
+	mutex_lock(&root->fs_info->pinned_mutex);
+	while (1) {
+		ret = find_first_extent_bit(pinned_extents, last,
+					    &start, &end, EXTENT_DIRTY);
+		if (ret)
+			break;
+		set_extent_dirty(copy, start, end, GFP_NOFS);
+		last = end + 1;
+	}
+	mutex_unlock(&root->fs_info->pinned_mutex);
+	return 0;
+}
+
+int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       struct extent_io_tree *unpin)
+{
+	u64 start;
+	u64 end;
+	int ret;
+
+	mutex_lock(&root->fs_info->pinned_mutex);
+	while (1) {
+		ret = find_first_extent_bit(unpin, 0, &start, &end,
+					    EXTENT_DIRTY);
+		if (ret)
+			break;
+
+		ret = btrfs_discard_extent(root, start, end + 1 - start);
+
+		btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
+		clear_extent_dirty(unpin, start, end, GFP_NOFS);
+
+		if (need_resched()) {
+			mutex_unlock(&root->fs_info->pinned_mutex);
+			cond_resched();
+			mutex_lock(&root->fs_info->pinned_mutex);
+		}
+	}
+	mutex_unlock(&root->fs_info->pinned_mutex);
+	return ret;
+}
+
+static int finish_current_insert(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *extent_root, int all)
+{
+	u64 start;
+	u64 end;
+	u64 priv;
+	u64 search = 0;
+	u64 skipped = 0;
+	struct btrfs_fs_info *info = extent_root->fs_info;
+	struct btrfs_path *path;
+	struct pending_extent_op *extent_op, *tmp;
+	struct list_head insert_list, update_list;
+	int ret;
+	int num_inserts = 0, max_inserts;
+
+	path = btrfs_alloc_path();
+	INIT_LIST_HEAD(&insert_list);
+	INIT_LIST_HEAD(&update_list);
+
+	max_inserts = extent_root->leafsize /
+		(2 * sizeof(struct btrfs_key) + 2 * sizeof(struct btrfs_item) +
+		 sizeof(struct btrfs_extent_ref) +
+		 sizeof(struct btrfs_extent_item));
+again:
+	mutex_lock(&info->extent_ins_mutex);
+	while (1) {
+		ret = find_first_extent_bit(&info->extent_ins, search, &start,
+					    &end, EXTENT_WRITEBACK);
+		if (ret) {
+			if (skipped && all && !num_inserts) {
+				skipped = 0;
+				search = 0;
+				continue;
+			}
+			mutex_unlock(&info->extent_ins_mutex);
+			break;
+		}
+
+		ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
+		if (!ret) {
+			skipped = 1;
+			search = end + 1;
+			if (need_resched()) {
+				mutex_unlock(&info->extent_ins_mutex);
+				cond_resched();
+				mutex_lock(&info->extent_ins_mutex);
+			}
+			continue;
+		}
+
+		ret = get_state_private(&info->extent_ins, start, &priv);
+		BUG_ON(ret);
+		extent_op = (struct pending_extent_op *)(unsigned long) priv;
+
+		if (extent_op->type == PENDING_EXTENT_INSERT) {
+			num_inserts++;
+			list_add_tail(&extent_op->list, &insert_list);
+			search = end + 1;
+			if (num_inserts == max_inserts) {
+				mutex_unlock(&info->extent_ins_mutex);
+				break;
+			}
+		} else if (extent_op->type == PENDING_BACKREF_UPDATE) {
+			list_add_tail(&extent_op->list, &update_list);
+			search = end + 1;
+		} else {
+			BUG();
+		}
+	}
+
+	/*
+	 * process the update list, clear the writeback bit for it, and if
+	 * somebody marked this thing for deletion then just unlock it and be
+	 * done, the free_extents will handle it
+	 */
+	mutex_lock(&info->extent_ins_mutex);
+	list_for_each_entry_safe(extent_op, tmp, &update_list, list) {
+		clear_extent_bits(&info->extent_ins, extent_op->bytenr,
+				  extent_op->bytenr + extent_op->num_bytes - 1,
+				  EXTENT_WRITEBACK, GFP_NOFS);
+		if (extent_op->del) {
+			list_del_init(&extent_op->list);
+			unlock_extent(&info->extent_ins, extent_op->bytenr,
+				      extent_op->bytenr + extent_op->num_bytes
+				      - 1, GFP_NOFS);
+			kfree(extent_op);
+		}
+	}
+	mutex_unlock(&info->extent_ins_mutex);
+
+	/*
+	 * still have things left on the update list, go ahead an update
+	 * everything
+	 */
+	if (!list_empty(&update_list)) {
+		ret = update_backrefs(trans, extent_root, path, &update_list);
+		BUG_ON(ret);
+	}
+
+	/*
+	 * if no inserts need to be done, but we skipped some extents and we
+	 * need to make sure everything is cleaned then reset everything and
+	 * go back to the beginning
+	 */
+	if (!num_inserts && all && skipped) {
+		search = 0;
+		skipped = 0;
+		INIT_LIST_HEAD(&update_list);
+		INIT_LIST_HEAD(&insert_list);
+		goto again;
+	} else if (!num_inserts) {
+		goto out;
+	}
+
+	/*
+	 * process the insert extents list.  Again if we are deleting this
+	 * extent, then just unlock it, pin down the bytes if need be, and be
+	 * done with it.  Saves us from having to actually insert the extent
+	 * into the tree and then subsequently come along and delete it
+	 */
+	mutex_lock(&info->extent_ins_mutex);
+	list_for_each_entry_safe(extent_op, tmp, &insert_list, list) {
+		clear_extent_bits(&info->extent_ins, extent_op->bytenr,
+				  extent_op->bytenr + extent_op->num_bytes - 1,
+				  EXTENT_WRITEBACK, GFP_NOFS);
+		if (extent_op->del) {
+			u64 used;
+			list_del_init(&extent_op->list);
+			unlock_extent(&info->extent_ins, extent_op->bytenr,
+				      extent_op->bytenr + extent_op->num_bytes
+				      - 1, GFP_NOFS);
+
+			mutex_lock(&extent_root->fs_info->pinned_mutex);
+			ret = pin_down_bytes(trans, extent_root,
+					     extent_op->bytenr,
+					     extent_op->num_bytes, 0);
+			mutex_unlock(&extent_root->fs_info->pinned_mutex);
+
+			spin_lock(&info->delalloc_lock);
+			used = btrfs_super_bytes_used(&info->super_copy);
+			btrfs_set_super_bytes_used(&info->super_copy,
+					used - extent_op->num_bytes);
+			used = btrfs_root_used(&extent_root->root_item);
+			btrfs_set_root_used(&extent_root->root_item,
+					used - extent_op->num_bytes);
+			spin_unlock(&info->delalloc_lock);
+
+			ret = update_block_group(trans, extent_root,
+						 extent_op->bytenr,
+						 extent_op->num_bytes,
+						 0, ret > 0);
+			BUG_ON(ret);
+			kfree(extent_op);
+			num_inserts--;
+		}
+	}
+	mutex_unlock(&info->extent_ins_mutex);
+
+	ret = insert_extents(trans, extent_root, path, &insert_list,
+			     num_inserts);
+	BUG_ON(ret);
+
+	/*
+	 * if we broke out of the loop in order to insert stuff because we hit
+	 * the maximum number of inserts at a time we can handle, then loop
+	 * back and pick up where we left off
+	 */
+	if (num_inserts == max_inserts) {
+		INIT_LIST_HEAD(&insert_list);
+		INIT_LIST_HEAD(&update_list);
+		num_inserts = 0;
+		goto again;
+	}
+
+	/*
+	 * again, if we need to make absolutely sure there are no more pending
+	 * extent operations left and we know that we skipped some, go back to
+	 * the beginning and do it all again
+	 */
+	if (all && skipped) {
+		INIT_LIST_HEAD(&insert_list);
+		INIT_LIST_HEAD(&update_list);
+		search = 0;
+		skipped = 0;
+		num_inserts = 0;
+		goto again;
+	}
+out:
+	btrfs_free_path(path);
+	return 0;
+}
+
+static int pin_down_bytes(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root,
+			  u64 bytenr, u64 num_bytes, int is_data)
+{
+	int err = 0;
+	struct extent_buffer *buf;
+
+	if (is_data)
+		goto pinit;
+
+	buf = btrfs_find_tree_block(root, bytenr, num_bytes);
+	if (!buf)
+		goto pinit;
+
+	/* we can reuse a block if it hasn't been written
+	 * and it is from this transaction.  We can't
+	 * reuse anything from the tree log root because
+	 * it has tiny sub-transactions.
+	 */
+	if (btrfs_buffer_uptodate(buf, 0) &&
+	    btrfs_try_tree_lock(buf)) {
+		u64 header_owner = btrfs_header_owner(buf);
+		u64 header_transid = btrfs_header_generation(buf);
+		if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
+		    header_owner != BTRFS_TREE_RELOC_OBJECTID &&
+		    header_transid == trans->transid &&
+		    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
+			clean_tree_block(NULL, root, buf);
+			btrfs_tree_unlock(buf);
+			free_extent_buffer(buf);
+			return 1;
+		}
+		btrfs_tree_unlock(buf);
+	}
+	free_extent_buffer(buf);
+pinit:
+	btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
+
+	BUG_ON(err < 0);
+	return 0;
+}
+
+/*
+ * remove an extent from the root, returns 0 on success
+ */
+static int __free_extent(struct btrfs_trans_handle *trans,
+			 struct btrfs_root *root,
+			 u64 bytenr, u64 num_bytes, u64 parent,
+			 u64 root_objectid, u64 ref_generation,
+			 u64 owner_objectid, int pin, int mark_free)
+{
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct btrfs_fs_info *info = root->fs_info;
+	struct btrfs_root *extent_root = info->extent_root;
+	struct extent_buffer *leaf;
+	int ret;
+	int extent_slot = 0;
+	int found_extent = 0;
+	int num_to_del = 1;
+	struct btrfs_extent_item *ei;
+	u32 refs;
+
+	key.objectid = bytenr;
+	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
+	key.offset = num_bytes;
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	path->reada = 1;
+	ret = lookup_extent_backref(trans, extent_root, path,
+				    bytenr, parent, root_objectid,
+				    ref_generation, owner_objectid, 1);
+	if (ret == 0) {
+		struct btrfs_key found_key;
+		extent_slot = path->slots[0];
+		while (extent_slot > 0) {
+			extent_slot--;
+			btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+					      extent_slot);
+			if (found_key.objectid != bytenr)
+				break;
+			if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
+			    found_key.offset == num_bytes) {
+				found_extent = 1;
+				break;
+			}
+			if (path->slots[0] - extent_slot > 5)
+				break;
+		}
+		if (!found_extent) {
+			ret = remove_extent_backref(trans, extent_root, path);
+			BUG_ON(ret);
+			btrfs_release_path(extent_root, path);
+			ret = btrfs_search_slot(trans, extent_root,
+						&key, path, -1, 1);
+			if (ret) {
+				printk(KERN_ERR "umm, got %d back from search"
+				       ", was looking for %llu\n", ret,
+				       (unsigned long long)bytenr);
+				btrfs_print_leaf(extent_root, path->nodes[0]);
+			}
+			BUG_ON(ret);
+			extent_slot = path->slots[0];
+		}
+	} else {
+		btrfs_print_leaf(extent_root, path->nodes[0]);
+		WARN_ON(1);
+		printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
+		       "root %llu gen %llu owner %llu\n",
+		       (unsigned long long)bytenr,
+		       (unsigned long long)root_objectid,
+		       (unsigned long long)ref_generation,
+		       (unsigned long long)owner_objectid);
+	}
+
+	leaf = path->nodes[0];
+	ei = btrfs_item_ptr(leaf, extent_slot,
+			    struct btrfs_extent_item);
+	refs = btrfs_extent_refs(leaf, ei);
+	BUG_ON(refs == 0);
+	refs -= 1;
+	btrfs_set_extent_refs(leaf, ei, refs);
+
+	btrfs_mark_buffer_dirty(leaf);
+
+	if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
+		struct btrfs_extent_ref *ref;
+		ref = btrfs_item_ptr(leaf, path->slots[0],
+				     struct btrfs_extent_ref);
+		BUG_ON(btrfs_ref_num_refs(leaf, ref) != 1);
+		/* if the back ref and the extent are next to each other
+		 * they get deleted below in one shot
+		 */
+		path->slots[0] = extent_slot;
+		num_to_del = 2;
+	} else if (found_extent) {
+		/* otherwise delete the extent back ref */
+		ret = remove_extent_backref(trans, extent_root, path);
+		BUG_ON(ret);
+		/* if refs are 0, we need to setup the path for deletion */
+		if (refs == 0) {
+			btrfs_release_path(extent_root, path);
+			ret = btrfs_search_slot(trans, extent_root, &key, path,
+						-1, 1);
+			BUG_ON(ret);
+		}
+	}
+
+	if (refs == 0) {
+		u64 super_used;
+		u64 root_used;
+
+		if (pin) {
+			mutex_lock(&root->fs_info->pinned_mutex);
+			ret = pin_down_bytes(trans, root, bytenr, num_bytes,
+				owner_objectid >= BTRFS_FIRST_FREE_OBJECTID);
+			mutex_unlock(&root->fs_info->pinned_mutex);
+			if (ret > 0)
+				mark_free = 1;
+			BUG_ON(ret < 0);
+		}
+		/* block accounting for super block */
+		spin_lock(&info->delalloc_lock);
+		super_used = btrfs_super_bytes_used(&info->super_copy);
+		btrfs_set_super_bytes_used(&info->super_copy,
+					   super_used - num_bytes);
+
+		/* block accounting for root item */
+		root_used = btrfs_root_used(&root->root_item);
+		btrfs_set_root_used(&root->root_item,
+					   root_used - num_bytes);
+		spin_unlock(&info->delalloc_lock);
+		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
+				      num_to_del);
+		BUG_ON(ret);
+		btrfs_release_path(extent_root, path);
+
+		if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
+			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
+			BUG_ON(ret);
+		}
+
+		ret = update_block_group(trans, root, bytenr, num_bytes, 0,
+					 mark_free);
+		BUG_ON(ret);
+	}
+	btrfs_free_path(path);
+	finish_current_insert(trans, extent_root, 0);
+	return ret;
+}
+
+/*
+ * find all the blocks marked as pending in the radix tree and remove
+ * them from the extent map
+ */
+static int del_pending_extents(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *extent_root, int all)
+{
+	int ret;
+	int err = 0;
+	u64 start;
+	u64 end;
+	u64 priv;
+	u64 search = 0;
+	int nr = 0, skipped = 0;
+	struct extent_io_tree *pending_del;
+	struct extent_io_tree *extent_ins;
+	struct pending_extent_op *extent_op;
+	struct btrfs_fs_info *info = extent_root->fs_info;
+	struct list_head delete_list;
+
+	INIT_LIST_HEAD(&delete_list);
+	extent_ins = &extent_root->fs_info->extent_ins;
+	pending_del = &extent_root->fs_info->pending_del;
+
+again:
+	mutex_lock(&info->extent_ins_mutex);
+	while (1) {
+		ret = find_first_extent_bit(pending_del, search, &start, &end,
+					    EXTENT_WRITEBACK);
+		if (ret) {
+			if (all && skipped && !nr) {
+				search = 0;
+				continue;
+			}
+			mutex_unlock(&info->extent_ins_mutex);
+			break;
+		}
+
+		ret = try_lock_extent(extent_ins, start, end, GFP_NOFS);
+		if (!ret) {
+			search = end+1;
+			skipped = 1;
+
+			if (need_resched()) {
+				mutex_unlock(&info->extent_ins_mutex);
+				cond_resched();
+				mutex_lock(&info->extent_ins_mutex);
+			}
+
+			continue;
+		}
+		BUG_ON(ret < 0);
+
+		ret = get_state_private(pending_del, start, &priv);
+		BUG_ON(ret);
+		extent_op = (struct pending_extent_op *)(unsigned long)priv;
+
+		clear_extent_bits(pending_del, start, end, EXTENT_WRITEBACK,
+				  GFP_NOFS);
+		if (!test_range_bit(extent_ins, start, end,
+				    EXTENT_WRITEBACK, 0)) {
+			list_add_tail(&extent_op->list, &delete_list);
+			nr++;
+		} else {
+			kfree(extent_op);
+
+			ret = get_state_private(&info->extent_ins, start,
+						&priv);
+			BUG_ON(ret);
+			extent_op = (struct pending_extent_op *)
+						(unsigned long)priv;
+
+			clear_extent_bits(&info->extent_ins, start, end,
+					  EXTENT_WRITEBACK, GFP_NOFS);
+
+			if (extent_op->type == PENDING_BACKREF_UPDATE) {
+				list_add_tail(&extent_op->list, &delete_list);
+				search = end + 1;
+				nr++;
+				continue;
+			}
+
+			mutex_lock(&extent_root->fs_info->pinned_mutex);
+			ret = pin_down_bytes(trans, extent_root, start,
+					     end + 1 - start, 0);
+			mutex_unlock(&extent_root->fs_info->pinned_mutex);
+
+			ret = update_block_group(trans, extent_root, start,
+						end + 1 - start, 0, ret > 0);
+
+			unlock_extent(extent_ins, start, end, GFP_NOFS);
+			BUG_ON(ret);
+			kfree(extent_op);
+		}
+		if (ret)
+			err = ret;
+
+		search = end + 1;
+
+		if (need_resched()) {
+			mutex_unlock(&info->extent_ins_mutex);
+			cond_resched();
+			mutex_lock(&info->extent_ins_mutex);
+		}
+	}
+
+	if (nr) {
+		ret = free_extents(trans, extent_root, &delete_list);
+		BUG_ON(ret);
+	}
+
+	if (all && skipped) {
+		INIT_LIST_HEAD(&delete_list);
+		search = 0;
+		nr = 0;
+		goto again;
+	}
+
+	return err;
+}
+
+/*
+ * remove an extent from the root, returns 0 on success
+ */
+static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       u64 bytenr, u64 num_bytes, u64 parent,
+			       u64 root_objectid, u64 ref_generation,
+			       u64 owner_objectid, int pin)
+{
+	struct btrfs_root *extent_root = root->fs_info->extent_root;
+	int pending_ret;
+	int ret;
+
+	WARN_ON(num_bytes < root->sectorsize);
+	if (root == extent_root) {
+		struct pending_extent_op *extent_op = NULL;
+
+		mutex_lock(&root->fs_info->extent_ins_mutex);
+		if (test_range_bit(&root->fs_info->extent_ins, bytenr,
+				bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
+			u64 priv;
+			ret = get_state_private(&root->fs_info->extent_ins,
+						bytenr, &priv);
+			BUG_ON(ret);
+			extent_op = (struct pending_extent_op *)
+						(unsigned long)priv;
+
+			extent_op->del = 1;
+			if (extent_op->type == PENDING_EXTENT_INSERT) {
+				mutex_unlock(&root->fs_info->extent_ins_mutex);
+				return 0;
+			}
+		}
+
+		if (extent_op) {
+			ref_generation = extent_op->orig_generation;
+			parent = extent_op->orig_parent;
+		}
+
+		extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
+		BUG_ON(!extent_op);
+
+		extent_op->type = PENDING_EXTENT_DELETE;
+		extent_op->bytenr = bytenr;
+		extent_op->num_bytes = num_bytes;
+		extent_op->parent = parent;
+		extent_op->orig_parent = parent;
+		extent_op->generation = ref_generation;
+		extent_op->orig_generation = ref_generation;
+		extent_op->level = (int)owner_objectid;
+		INIT_LIST_HEAD(&extent_op->list);
+		extent_op->del = 0;
+
+		set_extent_bits(&root->fs_info->pending_del,
+				bytenr, bytenr + num_bytes - 1,
+				EXTENT_WRITEBACK, GFP_NOFS);
+		set_state_private(&root->fs_info->pending_del,
+				  bytenr, (unsigned long)extent_op);
+		mutex_unlock(&root->fs_info->extent_ins_mutex);
+		return 0;
+	}
+	/* if metadata always pin */
+	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
+		if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+			struct btrfs_block_group_cache *cache;
+
+			/* btrfs_free_reserved_extent */
+			cache = btrfs_lookup_block_group(root->fs_info, bytenr);
+			BUG_ON(!cache);
+			btrfs_add_free_space(cache, bytenr, num_bytes);
+			put_block_group(cache);
+			update_reserved_extents(root, bytenr, num_bytes, 0);
+			return 0;
+		}
+		pin = 1;
+	}
+
+	/* if data pin when any transaction has committed this */
+	if (ref_generation != trans->transid)
+		pin = 1;
+
+	ret = __free_extent(trans, root, bytenr, num_bytes, parent,
+			    root_objectid, ref_generation,
+			    owner_objectid, pin, pin == 0);
+
+	finish_current_insert(trans, root->fs_info->extent_root, 0);
+	pending_ret = del_pending_extents(trans, root->fs_info->extent_root, 0);
+	return ret ? ret : pending_ret;
+}
+
+int btrfs_free_extent(struct btrfs_trans_handle *trans,
+		      struct btrfs_root *root,
+		      u64 bytenr, u64 num_bytes, u64 parent,
+		      u64 root_objectid, u64 ref_generation,
+		      u64 owner_objectid, int pin)
+{
+	int ret;
+
+	ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent,
+				  root_objectid, ref_generation,
+				  owner_objectid, pin);
+	return ret;
+}
+
+static u64 stripe_align(struct btrfs_root *root, u64 val)
+{
+	u64 mask = ((u64)root->stripesize - 1);
+	u64 ret = (val + mask) & ~mask;
+	return ret;
+}
+
+/*
+ * walks the btree of allocated extents and find a hole of a given size.
+ * The key ins is changed to record the hole:
+ * ins->objectid == block start
+ * ins->flags = BTRFS_EXTENT_ITEM_KEY
+ * ins->offset == number of blocks
+ * Any available blocks before search_start are skipped.
+ */
+static noinline int find_free_extent(struct btrfs_trans_handle *trans,
+				     struct btrfs_root *orig_root,
+				     u64 num_bytes, u64 empty_size,
+				     u64 search_start, u64 search_end,
+				     u64 hint_byte, struct btrfs_key *ins,
+				     u64 exclude_start, u64 exclude_nr,
+				     int data)
+{
+	int ret = 0;
+	struct btrfs_root *root = orig_root->fs_info->extent_root;
+	u64 total_needed = num_bytes;
+	u64 *last_ptr = NULL;
+	u64 last_wanted = 0;
+	struct btrfs_block_group_cache *block_group = NULL;
+	int chunk_alloc_done = 0;
+	int empty_cluster = 2 * 1024 * 1024;
+	int allowed_chunk_alloc = 0;
+	struct list_head *head = NULL, *cur = NULL;
+	int loop = 0;
+	int extra_loop = 0;
+	struct btrfs_space_info *space_info;
+
+	WARN_ON(num_bytes < root->sectorsize);
+	btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
+	ins->objectid = 0;
+	ins->offset = 0;
+
+	if (orig_root->ref_cows || empty_size)
+		allowed_chunk_alloc = 1;
+
+	if (data & BTRFS_BLOCK_GROUP_METADATA) {
+		last_ptr = &root->fs_info->last_alloc;
+		empty_cluster = 64 * 1024;
+	}
+
+	if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
+		last_ptr = &root->fs_info->last_data_alloc;
+
+	if (last_ptr) {
+		if (*last_ptr) {
+			hint_byte = *last_ptr;
+			last_wanted = *last_ptr;
+		} else
+			empty_size += empty_cluster;
+	} else {
+		empty_cluster = 0;
+	}
+	search_start = max(search_start, first_logical_byte(root, 0));
+	search_start = max(search_start, hint_byte);
+
+	if (last_wanted && search_start != last_wanted) {
+		last_wanted = 0;
+		empty_size += empty_cluster;
+	}
+
+	total_needed += empty_size;
+	block_group = btrfs_lookup_block_group(root->fs_info, search_start);
+	if (!block_group)
+		block_group = btrfs_lookup_first_block_group(root->fs_info,
+							     search_start);
+	space_info = __find_space_info(root->fs_info, data);
+
+	down_read(&space_info->groups_sem);
+	while (1) {
+		struct btrfs_free_space *free_space;
+		/*
+		 * the only way this happens if our hint points to a block
+		 * group thats not of the proper type, while looping this
+		 * should never happen
+		 */
+		if (empty_size)
+			extra_loop = 1;
+
+		if (!block_group)
+			goto new_group_no_lock;
+
+		if (unlikely(!block_group->cached)) {
+			mutex_lock(&block_group->cache_mutex);
+			ret = cache_block_group(root, block_group);
+			mutex_unlock(&block_group->cache_mutex);
+			if (ret)
+				break;
+		}
+
+		mutex_lock(&block_group->alloc_mutex);
+		if (unlikely(!block_group_bits(block_group, data)))
+			goto new_group;
+
+		if (unlikely(block_group->ro))
+			goto new_group;
+
+		free_space = btrfs_find_free_space(block_group, search_start,
+						   total_needed);
+		if (free_space) {
+			u64 start = block_group->key.objectid;
+			u64 end = block_group->key.objectid +
+				block_group->key.offset;
+
+			search_start = stripe_align(root, free_space->offset);
+
+			/* move on to the next group */
+			if (search_start + num_bytes >= search_end)
+				goto new_group;
+
+			/* move on to the next group */
+			if (search_start + num_bytes > end)
+				goto new_group;
+
+			if (last_wanted && search_start != last_wanted) {
+				total_needed += empty_cluster;
+				empty_size += empty_cluster;
+				last_wanted = 0;
+				/*
+				 * if search_start is still in this block group
+				 * then we just re-search this block group
+				 */
+				if (search_start >= start &&
+				    search_start < end) {
+					mutex_unlock(&block_group->alloc_mutex);
+					continue;
+				}
+
+				/* else we go to the next block group */
+				goto new_group;
+			}
+
+			if (exclude_nr > 0 &&
+			    (search_start + num_bytes > exclude_start &&
+			     search_start < exclude_start + exclude_nr)) {
+				search_start = exclude_start + exclude_nr;
+				/*
+				 * if search_start is still in this block group
+				 * then we just re-search this block group
+				 */
+				if (search_start >= start &&
+				    search_start < end) {
+					mutex_unlock(&block_group->alloc_mutex);
+					last_wanted = 0;
+					continue;
+				}
+
+				/* else we go to the next block group */
+				goto new_group;
+			}
+
+			ins->objectid = search_start;
+			ins->offset = num_bytes;
+
+			btrfs_remove_free_space_lock(block_group, search_start,
+						     num_bytes);
+			/* we are all good, lets return */
+			mutex_unlock(&block_group->alloc_mutex);
+			break;
+		}
+new_group:
+		mutex_unlock(&block_group->alloc_mutex);
+		put_block_group(block_group);
+		block_group = NULL;
+new_group_no_lock:
+		/* don't try to compare new allocations against the
+		 * last allocation any more
+		 */
+		last_wanted = 0;
+
+		/*
+		 * Here's how this works.
+		 * loop == 0: we were searching a block group via a hint
+		 *		and didn't find anything, so we start at
+		 *		the head of the block groups and keep searching
+		 * loop == 1: we're searching through all of the block groups
+		 *		if we hit the head again we have searched
+		 *		all of the block groups for this space and we
+		 *		need to try and allocate, if we cant error out.
+		 * loop == 2: we allocated more space and are looping through
+		 *		all of the block groups again.
+		 */
+		if (loop == 0) {
+			head = &space_info->block_groups;
+			cur = head->next;
+			loop++;
+		} else if (loop == 1 && cur == head) {
+			int keep_going;
+
+			/* at this point we give up on the empty_size
+			 * allocations and just try to allocate the min
+			 * space.
+			 *
+			 * The extra_loop field was set if an empty_size
+			 * allocation was attempted above, and if this
+			 * is try we need to try the loop again without
+			 * the additional empty_size.
+			 */
+			total_needed -= empty_size;
+			empty_size = 0;
+			keep_going = extra_loop;
+			loop++;
+
+			if (allowed_chunk_alloc && !chunk_alloc_done) {
+				up_read(&space_info->groups_sem);
+				ret = do_chunk_alloc(trans, root, num_bytes +
+						     2 * 1024 * 1024, data, 1);
+				down_read(&space_info->groups_sem);
+				if (ret < 0)
+					goto loop_check;
+				head = &space_info->block_groups;
+				/*
+				 * we've allocated a new chunk, keep
+				 * trying
+				 */
+				keep_going = 1;
+				chunk_alloc_done = 1;
+			} else if (!allowed_chunk_alloc) {
+				space_info->force_alloc = 1;
+			}
+loop_check:
+			if (keep_going) {
+				cur = head->next;
+				extra_loop = 0;
+			} else {
+				break;
+			}
+		} else if (cur == head) {
+			break;
+		}
+
+		block_group = list_entry(cur, struct btrfs_block_group_cache,
+					 list);
+		atomic_inc(&block_group->count);
+
+		search_start = block_group->key.objectid;
+		cur = cur->next;
+	}
+
+	/* we found what we needed */
+	if (ins->objectid) {
+		if (!(data & BTRFS_BLOCK_GROUP_DATA))
+			trans->block_group = block_group->key.objectid;
+
+		if (last_ptr)
+			*last_ptr = ins->objectid + ins->offset;
+		ret = 0;
+	} else if (!ret) {
+		printk(KERN_ERR "btrfs searching for %llu bytes, "
+		       "num_bytes %llu, loop %d, allowed_alloc %d\n",
+		       (unsigned long long)total_needed,
+		       (unsigned long long)num_bytes,
+		       loop, allowed_chunk_alloc);
+		ret = -ENOSPC;
+	}
+	if (block_group)
+		put_block_group(block_group);
+
+	up_read(&space_info->groups_sem);
+	return ret;
+}
+
+static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
+{
+	struct btrfs_block_group_cache *cache;
+	struct list_head *l;
+
+	printk(KERN_INFO "space_info has %llu free, is %sfull\n",
+	       (unsigned long long)(info->total_bytes - info->bytes_used -
+				    info->bytes_pinned - info->bytes_reserved),
+	       (info->full) ? "" : "not ");
+
+	down_read(&info->groups_sem);
+	list_for_each(l, &info->block_groups) {
+		cache = list_entry(l, struct btrfs_block_group_cache, list);
+		spin_lock(&cache->lock);
+		printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
+		       "%llu pinned %llu reserved\n",
+		       (unsigned long long)cache->key.objectid,
+		       (unsigned long long)cache->key.offset,
+		       (unsigned long long)btrfs_block_group_used(&cache->item),
+		       (unsigned long long)cache->pinned,
+		       (unsigned long long)cache->reserved);
+		btrfs_dump_free_space(cache, bytes);
+		spin_unlock(&cache->lock);
+	}
+	up_read(&info->groups_sem);
+}
+
+static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
+				  struct btrfs_root *root,
+				  u64 num_bytes, u64 min_alloc_size,
+				  u64 empty_size, u64 hint_byte,
+				  u64 search_end, struct btrfs_key *ins,
+				  u64 data)
+{
+	int ret;
+	u64 search_start = 0;
+	u64 alloc_profile;
+	struct btrfs_fs_info *info = root->fs_info;
+
+	if (data) {
+		alloc_profile = info->avail_data_alloc_bits &
+			info->data_alloc_profile;
+		data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
+	} else if (root == root->fs_info->chunk_root) {
+		alloc_profile = info->avail_system_alloc_bits &
+			info->system_alloc_profile;
+		data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
+	} else {
+		alloc_profile = info->avail_metadata_alloc_bits &
+			info->metadata_alloc_profile;
+		data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
+	}
+again:
+	data = btrfs_reduce_alloc_profile(root, data);
+	/*
+	 * the only place that sets empty_size is btrfs_realloc_node, which
+	 * is not called recursively on allocations
+	 */
+	if (empty_size || root->ref_cows) {
+		if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
+			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
+				     2 * 1024 * 1024,
+				     BTRFS_BLOCK_GROUP_METADATA |
+				     (info->metadata_alloc_profile &
+				      info->avail_metadata_alloc_bits), 0);
+		}
+		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
+				     num_bytes + 2 * 1024 * 1024, data, 0);
+	}
+
+	WARN_ON(num_bytes < root->sectorsize);
+	ret = find_free_extent(trans, root, num_bytes, empty_size,
+			       search_start, search_end, hint_byte, ins,
+			       trans->alloc_exclude_start,
+			       trans->alloc_exclude_nr, data);
+
+	if (ret == -ENOSPC && num_bytes > min_alloc_size) {
+		num_bytes = num_bytes >> 1;
+		num_bytes = num_bytes & ~(root->sectorsize - 1);
+		num_bytes = max(num_bytes, min_alloc_size);
+		do_chunk_alloc(trans, root->fs_info->extent_root,
+			       num_bytes, data, 1);
+		goto again;
+	}
+	if (ret) {
+		struct btrfs_space_info *sinfo;
+
+		sinfo = __find_space_info(root->fs_info, data);
+		printk(KERN_ERR "btrfs allocation failed flags %llu, "
+		       "wanted %llu\n", (unsigned long long)data,
+		       (unsigned long long)num_bytes);
+		dump_space_info(sinfo, num_bytes);
+		BUG();
+	}
+
+	return ret;
+}
+
+int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
+{
+	struct btrfs_block_group_cache *cache;
+	int ret = 0;
+
+	cache = btrfs_lookup_block_group(root->fs_info, start);
+	if (!cache) {
+		printk(KERN_ERR "Unable to find block group for %llu\n",
+		       (unsigned long long)start);
+		return -ENOSPC;
+	}
+
+	ret = btrfs_discard_extent(root, start, len);
+
+	btrfs_add_free_space(cache, start, len);
+	put_block_group(cache);
+	update_reserved_extents(root, start, len, 0);
+
+	return ret;
+}
+
+int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
+				  struct btrfs_root *root,
+				  u64 num_bytes, u64 min_alloc_size,
+				  u64 empty_size, u64 hint_byte,
+				  u64 search_end, struct btrfs_key *ins,
+				  u64 data)
+{
+	int ret;
+	ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
+				     empty_size, hint_byte, search_end, ins,
+				     data);
+	update_reserved_extents(root, ins->objectid, ins->offset, 1);
+	return ret;
+}
+
+static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
+					 struct btrfs_root *root, u64 parent,
+					 u64 root_objectid, u64 ref_generation,
+					 u64 owner, struct btrfs_key *ins)
+{
+	int ret;
+	int pending_ret;
+	u64 super_used;
+	u64 root_used;
+	u64 num_bytes = ins->offset;
+	u32 sizes[2];
+	struct btrfs_fs_info *info = root->fs_info;
+	struct btrfs_root *extent_root = info->extent_root;
+	struct btrfs_extent_item *extent_item;
+	struct btrfs_extent_ref *ref;
+	struct btrfs_path *path;
+	struct btrfs_key keys[2];
+
+	if (parent == 0)
+		parent = ins->objectid;
+
+	/* block accounting for super block */
+	spin_lock(&info->delalloc_lock);
+	super_used = btrfs_super_bytes_used(&info->super_copy);
+	btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
+
+	/* block accounting for root item */
+	root_used = btrfs_root_used(&root->root_item);
+	btrfs_set_root_used(&root->root_item, root_used + num_bytes);
+	spin_unlock(&info->delalloc_lock);
+
+	if (root == extent_root) {
+		struct pending_extent_op *extent_op;
+
+		extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
+		BUG_ON(!extent_op);
+
+		extent_op->type = PENDING_EXTENT_INSERT;
+		extent_op->bytenr = ins->objectid;
+		extent_op->num_bytes = ins->offset;
+		extent_op->parent = parent;
+		extent_op->orig_parent = 0;
+		extent_op->generation = ref_generation;
+		extent_op->orig_generation = 0;
+		extent_op->level = (int)owner;
+		INIT_LIST_HEAD(&extent_op->list);
+		extent_op->del = 0;
+
+		mutex_lock(&root->fs_info->extent_ins_mutex);
+		set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
+				ins->objectid + ins->offset - 1,
+				EXTENT_WRITEBACK, GFP_NOFS);
+		set_state_private(&root->fs_info->extent_ins,
+				  ins->objectid, (unsigned long)extent_op);
+		mutex_unlock(&root->fs_info->extent_ins_mutex);
+		goto update_block;
+	}
+
+	memcpy(&keys[0], ins, sizeof(*ins));
+	keys[1].objectid = ins->objectid;
+	keys[1].type = BTRFS_EXTENT_REF_KEY;
+	keys[1].offset = parent;
+	sizes[0] = sizeof(*extent_item);
+	sizes[1] = sizeof(*ref);
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
+				       sizes, 2);
+	BUG_ON(ret);
+
+	extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+				     struct btrfs_extent_item);
+	btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
+	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
+			     struct btrfs_extent_ref);
+
+	btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
+	btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
+	btrfs_set_ref_objectid(path->nodes[0], ref, owner);
+	btrfs_set_ref_num_refs(path->nodes[0], ref, 1);
+
+	btrfs_mark_buffer_dirty(path->nodes[0]);
+
+	trans->alloc_exclude_start = 0;
+	trans->alloc_exclude_nr = 0;
+	btrfs_free_path(path);
+	finish_current_insert(trans, extent_root, 0);
+	pending_ret = del_pending_extents(trans, extent_root, 0);
+
+	if (ret)
+		goto out;
+	if (pending_ret) {
+		ret = pending_ret;
+		goto out;
+	}
+
+update_block:
+	ret = update_block_group(trans, root, ins->objectid,
+				 ins->offset, 1, 0);
+	if (ret) {
+		printk(KERN_ERR "btrfs update block group failed for %llu "
+		       "%llu\n", (unsigned long long)ins->objectid,
+		       (unsigned long long)ins->offset);
+		BUG();
+	}
+out:
+	return ret;
+}
+
+int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
+				struct btrfs_root *root, u64 parent,
+				u64 root_objectid, u64 ref_generation,
+				u64 owner, struct btrfs_key *ins)
+{
+	int ret;
+
+	if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
+		return 0;
+	ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
+					    ref_generation, owner, ins);
+	update_reserved_extents(root, ins->objectid, ins->offset, 0);
+	return ret;
+}
+
+/*
+ * this is used by the tree logging recovery code.  It records that
+ * an extent has been allocated and makes sure to clear the free
+ * space cache bits as well
+ */
+int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
+				struct btrfs_root *root, u64 parent,
+				u64 root_objectid, u64 ref_generation,
+				u64 owner, struct btrfs_key *ins)
+{
+	int ret;
+	struct btrfs_block_group_cache *block_group;
+
+	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
+	mutex_lock(&block_group->cache_mutex);
+	cache_block_group(root, block_group);
+	mutex_unlock(&block_group->cache_mutex);
+
+	ret = btrfs_remove_free_space(block_group, ins->objectid,
+				      ins->offset);
+	BUG_ON(ret);
+	put_block_group(block_group);
+	ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
+					    ref_generation, owner, ins);
+	return ret;
+}
+
+/*
+ * finds a free extent and does all the dirty work required for allocation
+ * returns the key for the extent through ins, and a tree buffer for
+ * the first block of the extent through buf.
+ *
+ * returns 0 if everything worked, non-zero otherwise.
+ */
+int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *root,
+		       u64 num_bytes, u64 parent, u64 min_alloc_size,
+		       u64 root_objectid, u64 ref_generation,
+		       u64 owner_objectid, u64 empty_size, u64 hint_byte,
+		       u64 search_end, struct btrfs_key *ins, u64 data)
+{
+	int ret;
+
+	ret = __btrfs_reserve_extent(trans, root, num_bytes,
+				     min_alloc_size, empty_size, hint_byte,
+				     search_end, ins, data);
+	BUG_ON(ret);
+	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
+		ret = __btrfs_alloc_reserved_extent(trans, root, parent,
+					root_objectid, ref_generation,
+					owner_objectid, ins);
+		BUG_ON(ret);
+
+	} else {
+		update_reserved_extents(root, ins->objectid, ins->offset, 1);
+	}
+	return ret;
+}
+
+struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
+					    struct btrfs_root *root,
+					    u64 bytenr, u32 blocksize)
+{
+	struct extent_buffer *buf;
+
+	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+	btrfs_set_header_generation(buf, trans->transid);
+	btrfs_tree_lock(buf);
+	clean_tree_block(trans, root, buf);
+	btrfs_set_buffer_uptodate(buf);
+	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+		set_extent_dirty(&root->dirty_log_pages, buf->start,
+			 buf->start + buf->len - 1, GFP_NOFS);
+	} else {
+		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
+			 buf->start + buf->len - 1, GFP_NOFS);
+	}
+	trans->blocks_used++;
+	return buf;
+}
+
+/*
+ * helper function to allocate a block for a given tree
+ * returns the tree buffer or NULL.
+ */
+struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
+					     struct btrfs_root *root,
+					     u32 blocksize, u64 parent,
+					     u64 root_objectid,
+					     u64 ref_generation,
+					     int level,
+					     u64 hint,
+					     u64 empty_size)
+{
+	struct btrfs_key ins;
+	int ret;
+	struct extent_buffer *buf;
+
+	ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize,
+				 root_objectid, ref_generation, level,
+				 empty_size, hint, (u64)-1, &ins, 0);
+	if (ret) {
+		BUG_ON(ret > 0);
+		return ERR_PTR(ret);
+	}
+
+	buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
+	return buf;
+}
+
+int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
+			struct btrfs_root *root, struct extent_buffer *leaf)
+{
+	u64 leaf_owner;
+	u64 leaf_generation;
+	struct btrfs_key key;
+	struct btrfs_file_extent_item *fi;
+	int i;
+	int nritems;
+	int ret;
+
+	BUG_ON(!btrfs_is_leaf(leaf));
+	nritems = btrfs_header_nritems(leaf);
+	leaf_owner = btrfs_header_owner(leaf);
+	leaf_generation = btrfs_header_generation(leaf);
+
+	for (i = 0; i < nritems; i++) {
+		u64 disk_bytenr;
+		cond_resched();
+
+		btrfs_item_key_to_cpu(leaf, &key, i);
+		if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
+			continue;
+		fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
+		if (btrfs_file_extent_type(leaf, fi) ==
+		    BTRFS_FILE_EXTENT_INLINE)
+			continue;
+		/*
+		 * FIXME make sure to insert a trans record that
+		 * repeats the snapshot del on crash
+		 */
+		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+		if (disk_bytenr == 0)
+			continue;
+
+		ret = __btrfs_free_extent(trans, root, disk_bytenr,
+				btrfs_file_extent_disk_num_bytes(leaf, fi),
+				leaf->start, leaf_owner, leaf_generation,
+				key.objectid, 0);
+		BUG_ON(ret);
+
+		atomic_inc(&root->fs_info->throttle_gen);
+		wake_up(&root->fs_info->transaction_throttle);
+		cond_resched();
+	}
+	return 0;
+}
+
+static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
+					struct btrfs_root *root,
+					struct btrfs_leaf_ref *ref)
+{
+	int i;
+	int ret;
+	struct btrfs_extent_info *info = ref->extents;
+
+	for (i = 0; i < ref->nritems; i++) {
+		ret = __btrfs_free_extent(trans, root, info->bytenr,
+					  info->num_bytes, ref->bytenr,
+					  ref->owner, ref->generation,
+					  info->objectid, 0);
+
+		atomic_inc(&root->fs_info->throttle_gen);
+		wake_up(&root->fs_info->transaction_throttle);
+		cond_resched();
+
+		BUG_ON(ret);
+		info++;
+	}
+
+	return 0;
+}
+
+static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start,
+				     u64 len, u32 *refs)
+{
+	int ret;
+
+	ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs);
+	BUG_ON(ret);
+
+#if 0 /* some debugging code in case we see problems here */
+	/* if the refs count is one, it won't get increased again.  But
+	 * if the ref count is > 1, someone may be decreasing it at
+	 * the same time we are.
+	 */
+	if (*refs != 1) {
+		struct extent_buffer *eb = NULL;
+		eb = btrfs_find_create_tree_block(root, start, len);
+		if (eb)
+			btrfs_tree_lock(eb);
+
+		mutex_lock(&root->fs_info->alloc_mutex);
+		ret = lookup_extent_ref(NULL, root, start, len, refs);
+		BUG_ON(ret);
+		mutex_unlock(&root->fs_info->alloc_mutex);
+
+		if (eb) {
+			btrfs_tree_unlock(eb);
+			free_extent_buffer(eb);
+		}
+		if (*refs == 1) {
+			printk(KERN_ERR "btrfs block %llu went down to one "
+			       "during drop_snap\n", (unsigned long long)start);
+		}
+
+	}
+#endif
+
+	cond_resched();
+	return ret;
+}
+
+/*
+ * helper function for drop_snapshot, this walks down the tree dropping ref
+ * counts as it goes.
+ */
+static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root,
+				   struct btrfs_path *path, int *level)
+{
+	u64 root_owner;
+	u64 root_gen;
+	u64 bytenr;
+	u64 ptr_gen;
+	struct extent_buffer *next;
+	struct extent_buffer *cur;
+	struct extent_buffer *parent;
+	struct btrfs_leaf_ref *ref;
+	u32 blocksize;
+	int ret;
+	u32 refs;
+
+	WARN_ON(*level < 0);
+	WARN_ON(*level >= BTRFS_MAX_LEVEL);
+	ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
+				path->nodes[*level]->len, &refs);
+	BUG_ON(ret);
+	if (refs > 1)
+		goto out;
+
+	/*
+	 * walk down to the last node level and free all the leaves
+	 */
+	while (*level >= 0) {
+		WARN_ON(*level < 0);
+		WARN_ON(*level >= BTRFS_MAX_LEVEL);
+		cur = path->nodes[*level];
+
+		if (btrfs_header_level(cur) != *level)
+			WARN_ON(1);
+
+		if (path->slots[*level] >=
+		    btrfs_header_nritems(cur))
+			break;
+		if (*level == 0) {
+			ret = btrfs_drop_leaf_ref(trans, root, cur);
+			BUG_ON(ret);
+			break;
+		}
+		bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
+		ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
+		blocksize = btrfs_level_size(root, *level - 1);
+
+		ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
+		BUG_ON(ret);
+		if (refs != 1) {
+			parent = path->nodes[*level];
+			root_owner = btrfs_header_owner(parent);
+			root_gen = btrfs_header_generation(parent);
+			path->slots[*level]++;
+
+			ret = __btrfs_free_extent(trans, root, bytenr,
+						blocksize, parent->start,
+						root_owner, root_gen,
+						*level - 1, 1);
+			BUG_ON(ret);
+
+			atomic_inc(&root->fs_info->throttle_gen);
+			wake_up(&root->fs_info->transaction_throttle);
+			cond_resched();
+
+			continue;
+		}
+		/*
+		 * at this point, we have a single ref, and since the
+		 * only place referencing this extent is a dead root
+		 * the reference count should never go higher.
+		 * So, we don't need to check it again
+		 */
+		if (*level == 1) {
+			ref = btrfs_lookup_leaf_ref(root, bytenr);
+			if (ref && ref->generation != ptr_gen) {
+				btrfs_free_leaf_ref(root, ref);
+				ref = NULL;
+			}
+			if (ref) {
+				ret = cache_drop_leaf_ref(trans, root, ref);
+				BUG_ON(ret);
+				btrfs_remove_leaf_ref(root, ref);
+				btrfs_free_leaf_ref(root, ref);
+				*level = 0;
+				break;
+			}
+		}
+		next = btrfs_find_tree_block(root, bytenr, blocksize);
+		if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
+			free_extent_buffer(next);
+
+			next = read_tree_block(root, bytenr, blocksize,
+					       ptr_gen);
+			cond_resched();
+#if 0
+			/*
+			 * this is a debugging check and can go away
+			 * the ref should never go all the way down to 1
+			 * at this point
+			 */
+			ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
+						&refs);
+			BUG_ON(ret);
+			WARN_ON(refs != 1);
+#endif
+		}
+		WARN_ON(*level <= 0);
+		if (path->nodes[*level-1])
+			free_extent_buffer(path->nodes[*level-1]);
+		path->nodes[*level-1] = next;
+		*level = btrfs_header_level(next);
+		path->slots[*level] = 0;
+		cond_resched();
+	}
+out:
+	WARN_ON(*level < 0);
+	WARN_ON(*level >= BTRFS_MAX_LEVEL);
+
+	if (path->nodes[*level] == root->node) {
+		parent = path->nodes[*level];
+		bytenr = path->nodes[*level]->start;
+	} else {
+		parent = path->nodes[*level + 1];
+		bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
+	}
+
+	blocksize = btrfs_level_size(root, *level);
+	root_owner = btrfs_header_owner(parent);
+	root_gen = btrfs_header_generation(parent);
+
+	ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
+				  parent->start, root_owner, root_gen,
+				  *level, 1);
+	free_extent_buffer(path->nodes[*level]);
+	path->nodes[*level] = NULL;
+	*level += 1;
+	BUG_ON(ret);
+
+	cond_resched();
+	return 0;
+}
+
+/*
+ * helper function for drop_subtree, this function is similar to
+ * walk_down_tree. The main difference is that it checks reference
+ * counts while tree blocks are locked.
+ */
+static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
+				      struct btrfs_root *root,
+				      struct btrfs_path *path, int *level)
+{
+	struct extent_buffer *next;
+	struct extent_buffer *cur;
+	struct extent_buffer *parent;
+	u64 bytenr;
+	u64 ptr_gen;
+	u32 blocksize;
+	u32 refs;
+	int ret;
+
+	cur = path->nodes[*level];
+	ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len,
+				      &refs);
+	BUG_ON(ret);
+	if (refs > 1)
+		goto out;
+
+	while (*level >= 0) {
+		cur = path->nodes[*level];
+		if (*level == 0) {
+			ret = btrfs_drop_leaf_ref(trans, root, cur);
+			BUG_ON(ret);
+			clean_tree_block(trans, root, cur);
+			break;
+		}
+		if (path->slots[*level] >= btrfs_header_nritems(cur)) {
+			clean_tree_block(trans, root, cur);
+			break;
+		}
+
+		bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
+		blocksize = btrfs_level_size(root, *level - 1);
+		ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
+
+		next = read_tree_block(root, bytenr, blocksize, ptr_gen);
+		btrfs_tree_lock(next);
+
+		ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
+					      &refs);
+		BUG_ON(ret);
+		if (refs > 1) {
+			parent = path->nodes[*level];
+			ret = btrfs_free_extent(trans, root, bytenr,
+					blocksize, parent->start,
+					btrfs_header_owner(parent),
+					btrfs_header_generation(parent),
+					*level - 1, 1);
+			BUG_ON(ret);
+			path->slots[*level]++;
+			btrfs_tree_unlock(next);
+			free_extent_buffer(next);
+			continue;
+		}
+
+		*level = btrfs_header_level(next);
+		path->nodes[*level] = next;
+		path->slots[*level] = 0;
+		path->locks[*level] = 1;
+		cond_resched();
+	}
+out:
+	parent = path->nodes[*level + 1];
+	bytenr = path->nodes[*level]->start;
+	blocksize = path->nodes[*level]->len;
+
+	ret = btrfs_free_extent(trans, root, bytenr, blocksize,
+			parent->start, btrfs_header_owner(parent),
+			btrfs_header_generation(parent), *level, 1);
+	BUG_ON(ret);
+
+	if (path->locks[*level]) {
+		btrfs_tree_unlock(path->nodes[*level]);
+		path->locks[*level] = 0;
+	}
+	free_extent_buffer(path->nodes[*level]);
+	path->nodes[*level] = NULL;
+	*level += 1;
+	cond_resched();
+	return 0;
+}
+
+/*
+ * helper for dropping snapshots.  This walks back up the tree in the path
+ * to find the first node higher up where we haven't yet gone through
+ * all the slots
+ */
+static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root,
+				 struct btrfs_path *path,
+				 int *level, int max_level)
+{
+	u64 root_owner;
+	u64 root_gen;
+	struct btrfs_root_item *root_item = &root->root_item;
+	int i;
+	int slot;
+	int ret;
+
+	for (i = *level; i < max_level && path->nodes[i]; i++) {
+		slot = path->slots[i];
+		if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
+			struct extent_buffer *node;
+			struct btrfs_disk_key disk_key;
+			node = path->nodes[i];
+			path->slots[i]++;
+			*level = i;
+			WARN_ON(*level == 0);
+			btrfs_node_key(node, &disk_key, path->slots[i]);
+			memcpy(&root_item->drop_progress,
+			       &disk_key, sizeof(disk_key));
+			root_item->drop_level = i;
+			return 0;
+		} else {
+			struct extent_buffer *parent;
+			if (path->nodes[*level] == root->node)
+				parent = path->nodes[*level];
+			else
+				parent = path->nodes[*level + 1];
+
+			root_owner = btrfs_header_owner(parent);
+			root_gen = btrfs_header_generation(parent);
+
+			clean_tree_block(trans, root, path->nodes[*level]);
+			ret = btrfs_free_extent(trans, root,
+						path->nodes[*level]->start,
+						path->nodes[*level]->len,
+						parent->start, root_owner,
+						root_gen, *level, 1);
+			BUG_ON(ret);
+			if (path->locks[*level]) {
+				btrfs_tree_unlock(path->nodes[*level]);
+				path->locks[*level] = 0;
+			}
+			free_extent_buffer(path->nodes[*level]);
+			path->nodes[*level] = NULL;
+			*level = i + 1;
+		}
+	}
+	return 1;
+}
+
+/*
+ * drop the reference count on the tree rooted at 'snap'.  This traverses
+ * the tree freeing any blocks that have a ref count of zero after being
+ * decremented.
+ */
+int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
+			*root)
+{
+	int ret = 0;
+	int wret;
+	int level;
+	struct btrfs_path *path;
+	int i;
+	int orig_level;
+	struct btrfs_root_item *root_item = &root->root_item;
+
+	WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	level = btrfs_header_level(root->node);
+	orig_level = level;
+	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
+		path->nodes[level] = root->node;
+		extent_buffer_get(root->node);
+		path->slots[level] = 0;
+	} else {
+		struct btrfs_key key;
+		struct btrfs_disk_key found_key;
+		struct extent_buffer *node;
+
+		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
+		level = root_item->drop_level;
+		path->lowest_level = level;
+		wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+		if (wret < 0) {
+			ret = wret;
+			goto out;
+		}
+		node = path->nodes[level];
+		btrfs_node_key(node, &found_key, path->slots[level]);
+		WARN_ON(memcmp(&found_key, &root_item->drop_progress,
+			       sizeof(found_key)));
+		/*
+		 * unlock our path, this is safe because only this
+		 * function is allowed to delete this snapshot
+		 */
+		for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
+			if (path->nodes[i] && path->locks[i]) {
+				path->locks[i] = 0;
+				btrfs_tree_unlock(path->nodes[i]);
+			}
+		}
+	}
+	while (1) {
+		wret = walk_down_tree(trans, root, path, &level);
+		if (wret > 0)
+			break;
+		if (wret < 0)
+			ret = wret;
+
+		wret = walk_up_tree(trans, root, path, &level,
+				    BTRFS_MAX_LEVEL);
+		if (wret > 0)
+			break;
+		if (wret < 0)
+			ret = wret;
+		if (trans->transaction->in_commit) {
+			ret = -EAGAIN;
+			break;
+		}
+		atomic_inc(&root->fs_info->throttle_gen);
+		wake_up(&root->fs_info->transaction_throttle);
+	}
+	for (i = 0; i <= orig_level; i++) {
+		if (path->nodes[i]) {
+			free_extent_buffer(path->nodes[i]);
+			path->nodes[i] = NULL;
+		}
+	}
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
+			struct btrfs_root *root,
+			struct extent_buffer *node,
+			struct extent_buffer *parent)
+{
+	struct btrfs_path *path;
+	int level;
+	int parent_level;
+	int ret = 0;
+	int wret;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	BUG_ON(!btrfs_tree_locked(parent));
+	parent_level = btrfs_header_level(parent);
+	extent_buffer_get(parent);
+	path->nodes[parent_level] = parent;
+	path->slots[parent_level] = btrfs_header_nritems(parent);
+
+	BUG_ON(!btrfs_tree_locked(node));
+	level = btrfs_header_level(node);
+	extent_buffer_get(node);
+	path->nodes[level] = node;
+	path->slots[level] = 0;
+
+	while (1) {
+		wret = walk_down_subtree(trans, root, path, &level);
+		if (wret < 0)
+			ret = wret;
+		if (wret != 0)
+			break;
+
+		wret = walk_up_tree(trans, root, path, &level, parent_level);
+		if (wret < 0)
+			ret = wret;
+		if (wret != 0)
+			break;
+	}
+
+	btrfs_free_path(path);
+	return ret;
+}
+
+static unsigned long calc_ra(unsigned long start, unsigned long last,
+			     unsigned long nr)
+{
+	return min(last, start + nr - 1);
+}
+
+static noinline int relocate_inode_pages(struct inode *inode, u64 start,
+					 u64 len)
+{
+	u64 page_start;
+	u64 page_end;
+	unsigned long first_index;
+	unsigned long last_index;
+	unsigned long i;
+	struct page *page;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	struct file_ra_state *ra;
+	struct btrfs_ordered_extent *ordered;
+	unsigned int total_read = 0;
+	unsigned int total_dirty = 0;
+	int ret = 0;
+
+	ra = kzalloc(sizeof(*ra), GFP_NOFS);
+
+	mutex_lock(&inode->i_mutex);
+	first_index = start >> PAGE_CACHE_SHIFT;
+	last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
+
+	/* make sure the dirty trick played by the caller work */
+	ret = invalidate_inode_pages2_range(inode->i_mapping,
+					    first_index, last_index);
+	if (ret)
+		goto out_unlock;
+
+	file_ra_state_init(ra, inode->i_mapping);
+
+	for (i = first_index ; i <= last_index; i++) {
+		if (total_read % ra->ra_pages == 0) {
+			btrfs_force_ra(inode->i_mapping, ra, NULL, i,
+				       calc_ra(i, last_index, ra->ra_pages));
+		}
+		total_read++;
+again:
+		if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
+			BUG_ON(1);
+		page = grab_cache_page(inode->i_mapping, i);
+		if (!page) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+		if (!PageUptodate(page)) {
+			btrfs_readpage(NULL, page);
+			lock_page(page);
+			if (!PageUptodate(page)) {
+				unlock_page(page);
+				page_cache_release(page);
+				ret = -EIO;
+				goto out_unlock;
+			}
+		}
+		wait_on_page_writeback(page);
+
+		page_start = (u64)page->index << PAGE_CACHE_SHIFT;
+		page_end = page_start + PAGE_CACHE_SIZE - 1;
+		lock_extent(io_tree, page_start, page_end, GFP_NOFS);
+
+		ordered = btrfs_lookup_ordered_extent(inode, page_start);
+		if (ordered) {
+			unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+			unlock_page(page);
+			page_cache_release(page);
+			btrfs_start_ordered_extent(inode, ordered, 1);
+			btrfs_put_ordered_extent(ordered);
+			goto again;
+		}
+		set_page_extent_mapped(page);
+
+		if (i == first_index)
+			set_extent_bits(io_tree, page_start, page_end,
+					EXTENT_BOUNDARY, GFP_NOFS);
+		btrfs_set_extent_delalloc(inode, page_start, page_end);
+
+		set_page_dirty(page);
+		total_dirty++;
+
+		unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+		unlock_page(page);
+		page_cache_release(page);
+	}
+
+out_unlock:
+	kfree(ra);
+	mutex_unlock(&inode->i_mutex);
+	balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
+	return ret;
+}
+
+static noinline int relocate_data_extent(struct inode *reloc_inode,
+					 struct btrfs_key *extent_key,
+					 u64 offset)
+{
+	struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
+	struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
+	struct extent_map *em;
+	u64 start = extent_key->objectid - offset;
+	u64 end = start + extent_key->offset - 1;
+
+	em = alloc_extent_map(GFP_NOFS);
+	BUG_ON(!em || IS_ERR(em));
+
+	em->start = start;
+	em->len = extent_key->offset;
+	em->block_len = extent_key->offset;
+	em->block_start = extent_key->objectid;
+	em->bdev = root->fs_info->fs_devices->latest_bdev;
+	set_bit(EXTENT_FLAG_PINNED, &em->flags);
+
+	/* setup extent map to cheat btrfs_readpage */
+	lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
+	while (1) {
+		int ret;
+		spin_lock(&em_tree->lock);
+		ret = add_extent_mapping(em_tree, em);
+		spin_unlock(&em_tree->lock);
+		if (ret != -EEXIST) {
+			free_extent_map(em);
+			break;
+		}
+		btrfs_drop_extent_cache(reloc_inode, start, end, 0);
+	}
+	unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
+
+	return relocate_inode_pages(reloc_inode, start, extent_key->offset);
+}
+
+struct btrfs_ref_path {
+	u64 extent_start;
+	u64 nodes[BTRFS_MAX_LEVEL];
+	u64 root_objectid;
+	u64 root_generation;
+	u64 owner_objectid;
+	u32 num_refs;
+	int lowest_level;
+	int current_level;
+	int shared_level;
+
+	struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
+	u64 new_nodes[BTRFS_MAX_LEVEL];
+};
+
+struct disk_extent {
+	u64 ram_bytes;
+	u64 disk_bytenr;
+	u64 disk_num_bytes;
+	u64 offset;
+	u64 num_bytes;
+	u8 compression;
+	u8 encryption;
+	u16 other_encoding;
+};
+
+static int is_cowonly_root(u64 root_objectid)
+{
+	if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
+	    root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
+	    root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
+	    root_objectid == BTRFS_DEV_TREE_OBJECTID ||
+	    root_objectid == BTRFS_TREE_LOG_OBJECTID ||
+	    root_objectid == BTRFS_CSUM_TREE_OBJECTID)
+		return 1;
+	return 0;
+}
+
+static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
+				    struct btrfs_root *extent_root,
+				    struct btrfs_ref_path *ref_path,
+				    int first_time)
+{
+	struct extent_buffer *leaf;
+	struct btrfs_path *path;
+	struct btrfs_extent_ref *ref;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	u64 bytenr;
+	u32 nritems;
+	int level;
+	int ret = 1;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	if (first_time) {
+		ref_path->lowest_level = -1;
+		ref_path->current_level = -1;
+		ref_path->shared_level = -1;
+		goto walk_up;
+	}
+walk_down:
+	level = ref_path->current_level - 1;
+	while (level >= -1) {
+		u64 parent;
+		if (level < ref_path->lowest_level)
+			break;
+
+		if (level >= 0)
+			bytenr = ref_path->nodes[level];
+		else
+			bytenr = ref_path->extent_start;
+		BUG_ON(bytenr == 0);
+
+		parent = ref_path->nodes[level + 1];
+		ref_path->nodes[level + 1] = 0;
+		ref_path->current_level = level;
+		BUG_ON(parent == 0);
+
+		key.objectid = bytenr;
+		key.offset = parent + 1;
+		key.type = BTRFS_EXTENT_REF_KEY;
+
+		ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
+		if (ret < 0)
+			goto out;
+		BUG_ON(ret == 0);
+
+		leaf = path->nodes[0];
+		nritems = btrfs_header_nritems(leaf);
+		if (path->slots[0] >= nritems) {
+			ret = btrfs_next_leaf(extent_root, path);
+			if (ret < 0)
+				goto out;
+			if (ret > 0)
+				goto next;
+			leaf = path->nodes[0];
+		}
+
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+		if (found_key.objectid == bytenr &&
+		    found_key.type == BTRFS_EXTENT_REF_KEY) {
+			if (level < ref_path->shared_level)
+				ref_path->shared_level = level;
+			goto found;
+		}
+next:
+		level--;
+		btrfs_release_path(extent_root, path);
+		cond_resched();
+	}
+	/* reached lowest level */
+	ret = 1;
+	goto out;
+walk_up:
+	level = ref_path->current_level;
+	while (level < BTRFS_MAX_LEVEL - 1) {
+		u64 ref_objectid;
+
+		if (level >= 0)
+			bytenr = ref_path->nodes[level];
+		else
+			bytenr = ref_path->extent_start;
+
+		BUG_ON(bytenr == 0);
+
+		key.objectid = bytenr;
+		key.offset = 0;
+		key.type = BTRFS_EXTENT_REF_KEY;
+
+		ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
+		if (ret < 0)
+			goto out;
+
+		leaf = path->nodes[0];
+		nritems = btrfs_header_nritems(leaf);
+		if (path->slots[0] >= nritems) {
+			ret = btrfs_next_leaf(extent_root, path);
+			if (ret < 0)
+				goto out;
+			if (ret > 0) {
+				/* the extent was freed by someone */
+				if (ref_path->lowest_level == level)
+					goto out;
+				btrfs_release_path(extent_root, path);
+				goto walk_down;
+			}
+			leaf = path->nodes[0];
+		}
+
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+		if (found_key.objectid != bytenr ||
+				found_key.type != BTRFS_EXTENT_REF_KEY) {
+			/* the extent was freed by someone */
+			if (ref_path->lowest_level == level) {
+				ret = 1;
+				goto out;
+			}
+			btrfs_release_path(extent_root, path);
+			goto walk_down;
+		}
+found:
+		ref = btrfs_item_ptr(leaf, path->slots[0],
+				struct btrfs_extent_ref);
+		ref_objectid = btrfs_ref_objectid(leaf, ref);
+		if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
+			if (first_time) {
+				level = (int)ref_objectid;
+				BUG_ON(level >= BTRFS_MAX_LEVEL);
+				ref_path->lowest_level = level;
+				ref_path->current_level = level;
+				ref_path->nodes[level] = bytenr;
+			} else {
+				WARN_ON(ref_objectid != level);
+			}
+		} else {
+			WARN_ON(level != -1);
+		}
+		first_time = 0;
+
+		if (ref_path->lowest_level == level) {
+			ref_path->owner_objectid = ref_objectid;
+			ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
+		}
+
+		/*
+		 * the block is tree root or the block isn't in reference
+		 * counted tree.
+		 */
+		if (found_key.objectid == found_key.offset ||
+		    is_cowonly_root(btrfs_ref_root(leaf, ref))) {
+			ref_path->root_objectid = btrfs_ref_root(leaf, ref);
+			ref_path->root_generation =
+				btrfs_ref_generation(leaf, ref);
+			if (level < 0) {
+				/* special reference from the tree log */
+				ref_path->nodes[0] = found_key.offset;
+				ref_path->current_level = 0;
+			}
+			ret = 0;
+			goto out;
+		}
+
+		level++;
+		BUG_ON(ref_path->nodes[level] != 0);
+		ref_path->nodes[level] = found_key.offset;
+		ref_path->current_level = level;
+
+		/*
+		 * the reference was created in the running transaction,
+		 * no need to continue walking up.
+		 */
+		if (btrfs_ref_generation(leaf, ref) == trans->transid) {
+			ref_path->root_objectid = btrfs_ref_root(leaf, ref);
+			ref_path->root_generation =
+				btrfs_ref_generation(leaf, ref);
+			ret = 0;
+			goto out;
+		}
+
+		btrfs_release_path(extent_root, path);
+		cond_resched();
+	}
+	/* reached max tree level, but no tree root found. */
+	BUG();
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
+				struct btrfs_root *extent_root,
+				struct btrfs_ref_path *ref_path,
+				u64 extent_start)
+{
+	memset(ref_path, 0, sizeof(*ref_path));
+	ref_path->extent_start = extent_start;
+
+	return __next_ref_path(trans, extent_root, ref_path, 1);
+}
+
+static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *extent_root,
+			       struct btrfs_ref_path *ref_path)
+{
+	return __next_ref_path(trans, extent_root, ref_path, 0);
+}
+
+static noinline int get_new_locations(struct inode *reloc_inode,
+				      struct btrfs_key *extent_key,
+				      u64 offset, int no_fragment,
+				      struct disk_extent **extents,
+				      int *nr_extents)
+{
+	struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
+	struct btrfs_path *path;
+	struct btrfs_file_extent_item *fi;
+	struct extent_buffer *leaf;
+	struct disk_extent *exts = *extents;
+	struct btrfs_key found_key;
+	u64 cur_pos;
+	u64 last_byte;
+	u32 nritems;
+	int nr = 0;
+	int max = *nr_extents;
+	int ret;
+
+	WARN_ON(!no_fragment && *extents);
+	if (!exts) {
+		max = 1;
+		exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
+		if (!exts)
+			return -ENOMEM;
+	}
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	cur_pos = extent_key->objectid - offset;
+	last_byte = extent_key->objectid + extent_key->offset;
+	ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
+				       cur_pos, 0);
+	if (ret < 0)
+		goto out;
+	if (ret > 0) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	while (1) {
+		leaf = path->nodes[0];
+		nritems = btrfs_header_nritems(leaf);
+		if (path->slots[0] >= nritems) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0)
+				goto out;
+			if (ret > 0)
+				break;
+			leaf = path->nodes[0];
+		}
+
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+		if (found_key.offset != cur_pos ||
+		    found_key.type != BTRFS_EXTENT_DATA_KEY ||
+		    found_key.objectid != reloc_inode->i_ino)
+			break;
+
+		fi = btrfs_item_ptr(leaf, path->slots[0],
+				    struct btrfs_file_extent_item);
+		if (btrfs_file_extent_type(leaf, fi) !=
+		    BTRFS_FILE_EXTENT_REG ||
+		    btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
+			break;
+
+		if (nr == max) {
+			struct disk_extent *old = exts;
+			max *= 2;
+			exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
+			memcpy(exts, old, sizeof(*exts) * nr);
+			if (old != *extents)
+				kfree(old);
+		}
+
+		exts[nr].disk_bytenr =
+			btrfs_file_extent_disk_bytenr(leaf, fi);
+		exts[nr].disk_num_bytes =
+			btrfs_file_extent_disk_num_bytes(leaf, fi);
+		exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
+		exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
+		exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
+		exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
+		exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
+		exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
+									   fi);
+		BUG_ON(exts[nr].offset > 0);
+		BUG_ON(exts[nr].compression || exts[nr].encryption);
+		BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
+
+		cur_pos += exts[nr].num_bytes;
+		nr++;
+
+		if (cur_pos + offset >= last_byte)
+			break;
+
+		if (no_fragment) {
+			ret = 1;
+			goto out;
+		}
+		path->slots[0]++;
+	}
+
+	BUG_ON(cur_pos + offset > last_byte);
+	if (cur_pos + offset < last_byte) {
+		ret = -ENOENT;
+		goto out;
+	}
+	ret = 0;
+out:
+	btrfs_free_path(path);
+	if (ret) {
+		if (exts != *extents)
+			kfree(exts);
+	} else {
+		*extents = exts;
+		*nr_extents = nr;
+	}
+	return ret;
+}
+
+static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
+					struct btrfs_root *root,
+					struct btrfs_path *path,
+					struct btrfs_key *extent_key,
+					struct btrfs_key *leaf_key,
+					struct btrfs_ref_path *ref_path,
+					struct disk_extent *new_extents,
+					int nr_extents)
+{
+	struct extent_buffer *leaf;
+	struct btrfs_file_extent_item *fi;
+	struct inode *inode = NULL;
+	struct btrfs_key key;
+	u64 lock_start = 0;
+	u64 lock_end = 0;
+	u64 num_bytes;
+	u64 ext_offset;
+	u64 first_pos;
+	u32 nritems;
+	int nr_scaned = 0;
+	int extent_locked = 0;
+	int extent_type;
+	int ret;
+
+	memcpy(&key, leaf_key, sizeof(key));
+	first_pos = INT_LIMIT(loff_t) - extent_key->offset;
+	if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
+		if (key.objectid < ref_path->owner_objectid ||
+		    (key.objectid == ref_path->owner_objectid &&
+		     key.type < BTRFS_EXTENT_DATA_KEY)) {
+			key.objectid = ref_path->owner_objectid;
+			key.type = BTRFS_EXTENT_DATA_KEY;
+			key.offset = 0;
+		}
+	}
+
+	while (1) {
+		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+		if (ret < 0)
+			goto out;
+
+		leaf = path->nodes[0];
+		nritems = btrfs_header_nritems(leaf);
+next:
+		if (extent_locked && ret > 0) {
+			/*
+			 * the file extent item was modified by someone
+			 * before the extent got locked.
+			 */
+			unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
+				      lock_end, GFP_NOFS);
+			extent_locked = 0;
+		}
+
+		if (path->slots[0] >= nritems) {
+			if (++nr_scaned > 2)
+				break;
+
+			BUG_ON(extent_locked);
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0)
+				goto out;
+			if (ret > 0)
+				break;
+			leaf = path->nodes[0];
+			nritems = btrfs_header_nritems(leaf);
+		}
+
+		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+		if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
+			if ((key.objectid > ref_path->owner_objectid) ||
+			    (key.objectid == ref_path->owner_objectid &&
+			     key.type > BTRFS_EXTENT_DATA_KEY) ||
+			    (key.offset >= first_pos + extent_key->offset))
+				break;
+		}
+
+		if (inode && key.objectid != inode->i_ino) {
+			BUG_ON(extent_locked);
+			btrfs_release_path(root, path);
+			mutex_unlock(&inode->i_mutex);
+			iput(inode);
+			inode = NULL;
+			continue;
+		}
+
+		if (key.type != BTRFS_EXTENT_DATA_KEY) {
+			path->slots[0]++;
+			ret = 1;
+			goto next;
+		}
+		fi = btrfs_item_ptr(leaf, path->slots[0],
+				    struct btrfs_file_extent_item);
+		extent_type = btrfs_file_extent_type(leaf, fi);
+		if ((extent_type != BTRFS_FILE_EXTENT_REG &&
+		     extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
+		    (btrfs_file_extent_disk_bytenr(leaf, fi) !=
+		     extent_key->objectid)) {
+			path->slots[0]++;
+			ret = 1;
+			goto next;
+		}
+
+		num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
+		ext_offset = btrfs_file_extent_offset(leaf, fi);
+
+		if (first_pos > key.offset - ext_offset)
+			first_pos = key.offset - ext_offset;
+
+		if (!extent_locked) {
+			lock_start = key.offset;
+			lock_end = lock_start + num_bytes - 1;
+		} else {
+			if (lock_start > key.offset ||
+			    lock_end + 1 < key.offset + num_bytes) {
+				unlock_extent(&BTRFS_I(inode)->io_tree,
+					      lock_start, lock_end, GFP_NOFS);
+				extent_locked = 0;
+			}
+		}
+
+		if (!inode) {
+			btrfs_release_path(root, path);
+
+			inode = btrfs_iget_locked(root->fs_info->sb,
+						  key.objectid, root);
+			if (inode->i_state & I_NEW) {
+				BTRFS_I(inode)->root = root;
+				BTRFS_I(inode)->location.objectid =
+					key.objectid;
+				BTRFS_I(inode)->location.type =
+					BTRFS_INODE_ITEM_KEY;
+				BTRFS_I(inode)->location.offset = 0;
+				btrfs_read_locked_inode(inode);
+				unlock_new_inode(inode);
+			}
+			/*
+			 * some code call btrfs_commit_transaction while
+			 * holding the i_mutex, so we can't use mutex_lock
+			 * here.
+			 */
+			if (is_bad_inode(inode) ||
+			    !mutex_trylock(&inode->i_mutex)) {
+				iput(inode);
+				inode = NULL;
+				key.offset = (u64)-1;
+				goto skip;
+			}
+		}
+
+		if (!extent_locked) {
+			struct btrfs_ordered_extent *ordered;
+
+			btrfs_release_path(root, path);
+
+			lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
+				    lock_end, GFP_NOFS);
+			ordered = btrfs_lookup_first_ordered_extent(inode,
+								    lock_end);
+			if (ordered &&
+			    ordered->file_offset <= lock_end &&
+			    ordered->file_offset + ordered->len > lock_start) {
+				unlock_extent(&BTRFS_I(inode)->io_tree,
+					      lock_start, lock_end, GFP_NOFS);
+				btrfs_start_ordered_extent(inode, ordered, 1);
+				btrfs_put_ordered_extent(ordered);
+				key.offset += num_bytes;
+				goto skip;
+			}
+			if (ordered)
+				btrfs_put_ordered_extent(ordered);
+
+			extent_locked = 1;
+			continue;
+		}
+
+		if (nr_extents == 1) {
+			/* update extent pointer in place */
+			btrfs_set_file_extent_disk_bytenr(leaf, fi,
+						new_extents[0].disk_bytenr);
+			btrfs_set_file_extent_disk_num_bytes(leaf, fi,
+						new_extents[0].disk_num_bytes);
+			btrfs_mark_buffer_dirty(leaf);
+
+			btrfs_drop_extent_cache(inode, key.offset,
+						key.offset + num_bytes - 1, 0);
+
+			ret = btrfs_inc_extent_ref(trans, root,
+						new_extents[0].disk_bytenr,
+						new_extents[0].disk_num_bytes,
+						leaf->start,
+						root->root_key.objectid,
+						trans->transid,
+						key.objectid);
+			BUG_ON(ret);
+
+			ret = btrfs_free_extent(trans, root,
+						extent_key->objectid,
+						extent_key->offset,
+						leaf->start,
+						btrfs_header_owner(leaf),
+						btrfs_header_generation(leaf),
+						key.objectid, 0);
+			BUG_ON(ret);
+
+			btrfs_release_path(root, path);
+			key.offset += num_bytes;
+		} else {
+			BUG_ON(1);
+#if 0
+			u64 alloc_hint;
+			u64 extent_len;
+			int i;
+			/*
+			 * drop old extent pointer at first, then insert the
+			 * new pointers one bye one
+			 */
+			btrfs_release_path(root, path);
+			ret = btrfs_drop_extents(trans, root, inode, key.offset,
+						 key.offset + num_bytes,
+						 key.offset, &alloc_hint);
+			BUG_ON(ret);
+
+			for (i = 0; i < nr_extents; i++) {
+				if (ext_offset >= new_extents[i].num_bytes) {
+					ext_offset -= new_extents[i].num_bytes;
+					continue;
+				}
+				extent_len = min(new_extents[i].num_bytes -
+						 ext_offset, num_bytes);
+
+				ret = btrfs_insert_empty_item(trans, root,
+							      path, &key,
+							      sizeof(*fi));
+				BUG_ON(ret);
+
+				leaf = path->nodes[0];
+				fi = btrfs_item_ptr(leaf, path->slots[0],
+						struct btrfs_file_extent_item);
+				btrfs_set_file_extent_generation(leaf, fi,
+							trans->transid);
+				btrfs_set_file_extent_type(leaf, fi,
+							BTRFS_FILE_EXTENT_REG);
+				btrfs_set_file_extent_disk_bytenr(leaf, fi,
+						new_extents[i].disk_bytenr);
+				btrfs_set_file_extent_disk_num_bytes(leaf, fi,
+						new_extents[i].disk_num_bytes);
+				btrfs_set_file_extent_ram_bytes(leaf, fi,
+						new_extents[i].ram_bytes);
+
+				btrfs_set_file_extent_compression(leaf, fi,
+						new_extents[i].compression);
+				btrfs_set_file_extent_encryption(leaf, fi,
+						new_extents[i].encryption);
+				btrfs_set_file_extent_other_encoding(leaf, fi,
+						new_extents[i].other_encoding);
+
+				btrfs_set_file_extent_num_bytes(leaf, fi,
+							extent_len);
+				ext_offset += new_extents[i].offset;
+				btrfs_set_file_extent_offset(leaf, fi,
+							ext_offset);
+				btrfs_mark_buffer_dirty(leaf);
+
+				btrfs_drop_extent_cache(inode, key.offset,
+						key.offset + extent_len - 1, 0);
+
+				ret = btrfs_inc_extent_ref(trans, root,
+						new_extents[i].disk_bytenr,
+						new_extents[i].disk_num_bytes,
+						leaf->start,
+						root->root_key.objectid,
+						trans->transid, key.objectid);
+				BUG_ON(ret);
+				btrfs_release_path(root, path);
+
+				inode_add_bytes(inode, extent_len);
+
+				ext_offset = 0;
+				num_bytes -= extent_len;
+				key.offset += extent_len;
+
+				if (num_bytes == 0)
+					break;
+			}
+			BUG_ON(i >= nr_extents);
+#endif
+		}
+
+		if (extent_locked) {
+			unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
+				      lock_end, GFP_NOFS);
+			extent_locked = 0;
+		}
+skip:
+		if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
+		    key.offset >= first_pos + extent_key->offset)
+			break;
+
+		cond_resched();
+	}
+	ret = 0;
+out:
+	btrfs_release_path(root, path);
+	if (inode) {
+		mutex_unlock(&inode->i_mutex);
+		if (extent_locked) {
+			unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
+				      lock_end, GFP_NOFS);
+		}
+		iput(inode);
+	}
+	return ret;
+}
+
+int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       struct extent_buffer *buf, u64 orig_start)
+{
+	int level;
+	int ret;
+
+	BUG_ON(btrfs_header_generation(buf) != trans->transid);
+	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+
+	level = btrfs_header_level(buf);
+	if (level == 0) {
+		struct btrfs_leaf_ref *ref;
+		struct btrfs_leaf_ref *orig_ref;
+
+		orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
+		if (!orig_ref)
+			return -ENOENT;
+
+		ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
+		if (!ref) {
+			btrfs_free_leaf_ref(root, orig_ref);
+			return -ENOMEM;
+		}
+
+		ref->nritems = orig_ref->nritems;
+		memcpy(ref->extents, orig_ref->extents,
+			sizeof(ref->extents[0]) * ref->nritems);
+
+		btrfs_free_leaf_ref(root, orig_ref);
+
+		ref->root_gen = trans->transid;
+		ref->bytenr = buf->start;
+		ref->owner = btrfs_header_owner(buf);
+		ref->generation = btrfs_header_generation(buf);
+		ret = btrfs_add_leaf_ref(root, ref, 0);
+		WARN_ON(ret);
+		btrfs_free_leaf_ref(root, ref);
+	}
+	return 0;
+}
+
+static noinline int invalidate_extent_cache(struct btrfs_root *root,
+					struct extent_buffer *leaf,
+					struct btrfs_block_group_cache *group,
+					struct btrfs_root *target_root)
+{
+	struct btrfs_key key;
+	struct inode *inode = NULL;
+	struct btrfs_file_extent_item *fi;
+	u64 num_bytes;
+	u64 skip_objectid = 0;
+	u32 nritems;
+	u32 i;
+
+	nritems = btrfs_header_nritems(leaf);
+	for (i = 0; i < nritems; i++) {
+		btrfs_item_key_to_cpu(leaf, &key, i);
+		if (key.objectid == skip_objectid ||
+		    key.type != BTRFS_EXTENT_DATA_KEY)
+			continue;
+		fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
+		if (btrfs_file_extent_type(leaf, fi) ==
+		    BTRFS_FILE_EXTENT_INLINE)
+			continue;
+		if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
+			continue;
+		if (!inode || inode->i_ino != key.objectid) {
+			iput(inode);
+			inode = btrfs_ilookup(target_root->fs_info->sb,
+					      key.objectid, target_root, 1);
+		}
+		if (!inode) {
+			skip_objectid = key.objectid;
+			continue;
+		}
+		num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
+
+		lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
+			    key.offset + num_bytes - 1, GFP_NOFS);
+		btrfs_drop_extent_cache(inode, key.offset,
+					key.offset + num_bytes - 1, 1);
+		unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
+			      key.offset + num_bytes - 1, GFP_NOFS);
+		cond_resched();
+	}
+	iput(inode);
+	return 0;
+}
+
+static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
+					struct btrfs_root *root,
+					struct extent_buffer *leaf,
+					struct btrfs_block_group_cache *group,
+					struct inode *reloc_inode)
+{
+	struct btrfs_key key;
+	struct btrfs_key extent_key;
+	struct btrfs_file_extent_item *fi;
+	struct btrfs_leaf_ref *ref;
+	struct disk_extent *new_extent;
+	u64 bytenr;
+	u64 num_bytes;
+	u32 nritems;
+	u32 i;
+	int ext_index;
+	int nr_extent;
+	int ret;
+
+	new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
+	BUG_ON(!new_extent);
+
+	ref = btrfs_lookup_leaf_ref(root, leaf->start);
+	BUG_ON(!ref);
+
+	ext_index = -1;
+	nritems = btrfs_header_nritems(leaf);
+	for (i = 0; i < nritems; i++) {
+		btrfs_item_key_to_cpu(leaf, &key, i);
+		if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
+			continue;
+		fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
+		if (btrfs_file_extent_type(leaf, fi) ==
+		    BTRFS_FILE_EXTENT_INLINE)
+			continue;
+		bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+		num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
+		if (bytenr == 0)
+			continue;
+
+		ext_index++;
+		if (bytenr >= group->key.objectid + group->key.offset ||
+		    bytenr + num_bytes <= group->key.objectid)
+			continue;
+
+		extent_key.objectid = bytenr;
+		extent_key.offset = num_bytes;
+		extent_key.type = BTRFS_EXTENT_ITEM_KEY;
+		nr_extent = 1;
+		ret = get_new_locations(reloc_inode, &extent_key,
+					group->key.objectid, 1,
+					&new_extent, &nr_extent);
+		if (ret > 0)
+			continue;
+		BUG_ON(ret < 0);
+
+		BUG_ON(ref->extents[ext_index].bytenr != bytenr);
+		BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
+		ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
+		ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
+
+		btrfs_set_file_extent_disk_bytenr(leaf, fi,
+						new_extent->disk_bytenr);
+		btrfs_set_file_extent_disk_num_bytes(leaf, fi,
+						new_extent->disk_num_bytes);
+		btrfs_mark_buffer_dirty(leaf);
+
+		ret = btrfs_inc_extent_ref(trans, root,
+					new_extent->disk_bytenr,
+					new_extent->disk_num_bytes,
+					leaf->start,
+					root->root_key.objectid,
+					trans->transid, key.objectid);
+		BUG_ON(ret);
+		ret = btrfs_free_extent(trans, root,
+					bytenr, num_bytes, leaf->start,
+					btrfs_header_owner(leaf),
+					btrfs_header_generation(leaf),
+					key.objectid, 0);
+		BUG_ON(ret);
+		cond_resched();
+	}
+	kfree(new_extent);
+	BUG_ON(ext_index + 1 != ref->nritems);
+	btrfs_free_leaf_ref(root, ref);
+	return 0;
+}
+
+int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root)
+{
+	struct btrfs_root *reloc_root;
+	int ret;
+
+	if (root->reloc_root) {
+		reloc_root = root->reloc_root;
+		root->reloc_root = NULL;
+		list_add(&reloc_root->dead_list,
+			 &root->fs_info->dead_reloc_roots);
+
+		btrfs_set_root_bytenr(&reloc_root->root_item,
+				      reloc_root->node->start);
+		btrfs_set_root_level(&root->root_item,
+				     btrfs_header_level(reloc_root->node));
+		memset(&reloc_root->root_item.drop_progress, 0,
+			sizeof(struct btrfs_disk_key));
+		reloc_root->root_item.drop_level = 0;
+
+		ret = btrfs_update_root(trans, root->fs_info->tree_root,
+					&reloc_root->root_key,
+					&reloc_root->root_item);
+		BUG_ON(ret);
+	}
+	return 0;
+}
+
+int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *reloc_root;
+	struct btrfs_root *prev_root = NULL;
+	struct list_head dead_roots;
+	int ret;
+	unsigned long nr;
+
+	INIT_LIST_HEAD(&dead_roots);
+	list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
+
+	while (!list_empty(&dead_roots)) {
+		reloc_root = list_entry(dead_roots.prev,
+					struct btrfs_root, dead_list);
+		list_del_init(&reloc_root->dead_list);
+
+		BUG_ON(reloc_root->commit_root != NULL);
+		while (1) {
+			trans = btrfs_join_transaction(root, 1);
+			BUG_ON(!trans);
+
+			mutex_lock(&root->fs_info->drop_mutex);
+			ret = btrfs_drop_snapshot(trans, reloc_root);
+			if (ret != -EAGAIN)
+				break;
+			mutex_unlock(&root->fs_info->drop_mutex);
+
+			nr = trans->blocks_used;
+			ret = btrfs_end_transaction(trans, root);
+			BUG_ON(ret);
+			btrfs_btree_balance_dirty(root, nr);
+		}
+
+		free_extent_buffer(reloc_root->node);
+
+		ret = btrfs_del_root(trans, root->fs_info->tree_root,
+				     &reloc_root->root_key);
+		BUG_ON(ret);
+		mutex_unlock(&root->fs_info->drop_mutex);
+
+		nr = trans->blocks_used;
+		ret = btrfs_end_transaction(trans, root);
+		BUG_ON(ret);
+		btrfs_btree_balance_dirty(root, nr);
+
+		kfree(prev_root);
+		prev_root = reloc_root;
+	}
+	if (prev_root) {
+		btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
+		kfree(prev_root);
+	}
+	return 0;
+}
+
+int btrfs_add_dead_reloc_root(struct btrfs_root *root)
+{
+	list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
+	return 0;
+}
+
+int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
+{
+	struct btrfs_root *reloc_root;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_key location;
+	int found;
+	int ret;
+
+	mutex_lock(&root->fs_info->tree_reloc_mutex);
+	ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
+	BUG_ON(ret);
+	found = !list_empty(&root->fs_info->dead_reloc_roots);
+	mutex_unlock(&root->fs_info->tree_reloc_mutex);
+
+	if (found) {
+		trans = btrfs_start_transaction(root, 1);
+		BUG_ON(!trans);
+		ret = btrfs_commit_transaction(trans, root);
+		BUG_ON(ret);
+	}
+
+	location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
+	location.offset = (u64)-1;
+	location.type = BTRFS_ROOT_ITEM_KEY;
+
+	reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
+	BUG_ON(!reloc_root);
+	btrfs_orphan_cleanup(reloc_root);
+	return 0;
+}
+
+static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
+				    struct btrfs_root *root)
+{
+	struct btrfs_root *reloc_root;
+	struct extent_buffer *eb;
+	struct btrfs_root_item *root_item;
+	struct btrfs_key root_key;
+	int ret;
+
+	BUG_ON(!root->ref_cows);
+	if (root->reloc_root)
+		return 0;
+
+	root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
+	BUG_ON(!root_item);
+
+	ret = btrfs_copy_root(trans, root, root->commit_root,
+			      &eb, BTRFS_TREE_RELOC_OBJECTID);
+	BUG_ON(ret);
+
+	root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
+	root_key.offset = root->root_key.objectid;
+	root_key.type = BTRFS_ROOT_ITEM_KEY;
+
+	memcpy(root_item, &root->root_item, sizeof(root_item));
+	btrfs_set_root_refs(root_item, 0);
+	btrfs_set_root_bytenr(root_item, eb->start);
+	btrfs_set_root_level(root_item, btrfs_header_level(eb));
+	btrfs_set_root_generation(root_item, trans->transid);
+
+	btrfs_tree_unlock(eb);
+	free_extent_buffer(eb);
+
+	ret = btrfs_insert_root(trans, root->fs_info->tree_root,
+				&root_key, root_item);
+	BUG_ON(ret);
+	kfree(root_item);
+
+	reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
+						 &root_key);
+	BUG_ON(!reloc_root);
+	reloc_root->last_trans = trans->transid;
+	reloc_root->commit_root = NULL;
+	reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
+
+	root->reloc_root = reloc_root;
+	return 0;
+}
+
+/*
+ * Core function of space balance.
+ *
+ * The idea is using reloc trees to relocate tree blocks in reference
+ * counted roots. There is one reloc tree for each subvol, and all
+ * reloc trees share same root key objectid. Reloc trees are snapshots
+ * of the latest committed roots of subvols (root->commit_root).
+ *
+ * To relocate a tree block referenced by a subvol, there are two steps.
+ * COW the block through subvol's reloc tree, then update block pointer
+ * in the subvol to point to the new block. Since all reloc trees share
+ * same root key objectid, doing special handing for tree blocks owned
+ * by them is easy. Once a tree block has been COWed in one reloc tree,
+ * we can use the resulting new block directly when the same block is
+ * required to COW again through other reloc trees. By this way, relocated
+ * tree blocks are shared between reloc trees, so they are also shared
+ * between subvols.
+ */
+static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
+				      struct btrfs_root *root,
+				      struct btrfs_path *path,
+				      struct btrfs_key *first_key,
+				      struct btrfs_ref_path *ref_path,
+				      struct btrfs_block_group_cache *group,
+				      struct inode *reloc_inode)
+{
+	struct btrfs_root *reloc_root;
+	struct extent_buffer *eb = NULL;
+	struct btrfs_key *keys;
+	u64 *nodes;
+	int level;
+	int shared_level;
+	int lowest_level = 0;
+	int ret;
+
+	if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
+		lowest_level = ref_path->owner_objectid;
+
+	if (!root->ref_cows) {
+		path->lowest_level = lowest_level;
+		ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
+		BUG_ON(ret < 0);
+		path->lowest_level = 0;
+		btrfs_release_path(root, path);
+		return 0;
+	}
+
+	mutex_lock(&root->fs_info->tree_reloc_mutex);
+	ret = init_reloc_tree(trans, root);
+	BUG_ON(ret);
+	reloc_root = root->reloc_root;
+
+	shared_level = ref_path->shared_level;
+	ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
+
+	keys = ref_path->node_keys;
+	nodes = ref_path->new_nodes;
+	memset(&keys[shared_level + 1], 0,
+	       sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
+	memset(&nodes[shared_level + 1], 0,
+	       sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
+
+	if (nodes[lowest_level] == 0) {
+		path->lowest_level = lowest_level;
+		ret = btrfs_search_slot(trans, reloc_root, first_key, path,
+					0, 1);
+		BUG_ON(ret);
+		for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
+			eb = path->nodes[level];
+			if (!eb || eb == reloc_root->node)
+				break;
+			nodes[level] = eb->start;
+			if (level == 0)
+				btrfs_item_key_to_cpu(eb, &keys[level], 0);
+			else
+				btrfs_node_key_to_cpu(eb, &keys[level], 0);
+		}
+		if (nodes[0] &&
+		    ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
+			eb = path->nodes[0];
+			ret = replace_extents_in_leaf(trans, reloc_root, eb,
+						      group, reloc_inode);
+			BUG_ON(ret);
+		}
+		btrfs_release_path(reloc_root, path);
+	} else {
+		ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
+				       lowest_level);
+		BUG_ON(ret);
+	}
+
+	/*
+	 * replace tree blocks in the fs tree with tree blocks in
+	 * the reloc tree.
+	 */
+	ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
+	BUG_ON(ret < 0);
+
+	if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
+		ret = btrfs_search_slot(trans, reloc_root, first_key, path,
+					0, 0);
+		BUG_ON(ret);
+		extent_buffer_get(path->nodes[0]);
+		eb = path->nodes[0];
+		btrfs_release_path(reloc_root, path);
+		ret = invalidate_extent_cache(reloc_root, eb, group, root);
+		BUG_ON(ret);
+		free_extent_buffer(eb);
+	}
+
+	mutex_unlock(&root->fs_info->tree_reloc_mutex);
+	path->lowest_level = 0;
+	return 0;
+}
+
+static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
+					struct btrfs_root *root,
+					struct btrfs_path *path,
+					struct btrfs_key *first_key,
+					struct btrfs_ref_path *ref_path)
+{
+	int ret;
+
+	ret = relocate_one_path(trans, root, path, first_key,
+				ref_path, NULL, NULL);
+	BUG_ON(ret);
+
+	if (root == root->fs_info->extent_root)
+		btrfs_extent_post_op(trans, root);
+
+	return 0;
+}
+
+static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
+				    struct btrfs_root *extent_root,
+				    struct btrfs_path *path,
+				    struct btrfs_key *extent_key)
+{
+	int ret;
+
+	ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
+	if (ret)
+		goto out;
+	ret = btrfs_del_item(trans, extent_root, path);
+out:
+	btrfs_release_path(extent_root, path);
+	return ret;
+}
+
+static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
+						struct btrfs_ref_path *ref_path)
+{
+	struct btrfs_key root_key;
+
+	root_key.objectid = ref_path->root_objectid;
+	root_key.type = BTRFS_ROOT_ITEM_KEY;
+	if (is_cowonly_root(ref_path->root_objectid))
+		root_key.offset = 0;
+	else
+		root_key.offset = (u64)-1;
+
+	return btrfs_read_fs_root_no_name(fs_info, &root_key);
+}
+
+static noinline int relocate_one_extent(struct btrfs_root *extent_root,
+					struct btrfs_path *path,
+					struct btrfs_key *extent_key,
+					struct btrfs_block_group_cache *group,
+					struct inode *reloc_inode, int pass)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *found_root;
+	struct btrfs_ref_path *ref_path = NULL;
+	struct disk_extent *new_extents = NULL;
+	int nr_extents = 0;
+	int loops;
+	int ret;
+	int level;
+	struct btrfs_key first_key;
+	u64 prev_block = 0;
+
+
+	trans = btrfs_start_transaction(extent_root, 1);
+	BUG_ON(!trans);
+
+	if (extent_key->objectid == 0) {
+		ret = del_extent_zero(trans, extent_root, path, extent_key);
+		goto out;
+	}
+
+	ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
+	if (!ref_path) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	for (loops = 0; ; loops++) {
+		if (loops == 0) {
+			ret = btrfs_first_ref_path(trans, extent_root, ref_path,
+						   extent_key->objectid);
+		} else {
+			ret = btrfs_next_ref_path(trans, extent_root, ref_path);
+		}
+		if (ret < 0)
+			goto out;
+		if (ret > 0)
+			break;
+
+		if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
+		    ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
+			continue;
+
+		found_root = read_ref_root(extent_root->fs_info, ref_path);
+		BUG_ON(!found_root);
+		/*
+		 * for reference counted tree, only process reference paths
+		 * rooted at the latest committed root.
+		 */
+		if (found_root->ref_cows &&
+		    ref_path->root_generation != found_root->root_key.offset)
+			continue;
+
+		if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
+			if (pass == 0) {
+				/*
+				 * copy data extents to new locations
+				 */
+				u64 group_start = group->key.objectid;
+				ret = relocate_data_extent(reloc_inode,
+							   extent_key,
+							   group_start);
+				if (ret < 0)
+					goto out;
+				break;
+			}
+			level = 0;
+		} else {
+			level = ref_path->owner_objectid;
+		}
+
+		if (prev_block != ref_path->nodes[level]) {
+			struct extent_buffer *eb;
+			u64 block_start = ref_path->nodes[level];
+			u64 block_size = btrfs_level_size(found_root, level);
+
+			eb = read_tree_block(found_root, block_start,
+					     block_size, 0);
+			btrfs_tree_lock(eb);
+			BUG_ON(level != btrfs_header_level(eb));
+
+			if (level == 0)
+				btrfs_item_key_to_cpu(eb, &first_key, 0);
+			else
+				btrfs_node_key_to_cpu(eb, &first_key, 0);
+
+			btrfs_tree_unlock(eb);
+			free_extent_buffer(eb);
+			prev_block = block_start;
+		}
+
+		btrfs_record_root_in_trans(found_root);
+		if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
+			/*
+			 * try to update data extent references while
+			 * keeping metadata shared between snapshots.
+			 */
+			if (pass == 1) {
+				ret = relocate_one_path(trans, found_root,
+						path, &first_key, ref_path,
+						group, reloc_inode);
+				if (ret < 0)
+					goto out;
+				continue;
+			}
+			/*
+			 * use fallback method to process the remaining
+			 * references.
+			 */
+			if (!new_extents) {
+				u64 group_start = group->key.objectid;
+				new_extents = kmalloc(sizeof(*new_extents),
+						      GFP_NOFS);
+				nr_extents = 1;
+				ret = get_new_locations(reloc_inode,
+							extent_key,
+							group_start, 1,
+							&new_extents,
+							&nr_extents);
+				if (ret)
+					goto out;
+			}
+			ret = replace_one_extent(trans, found_root,
+						path, extent_key,
+						&first_key, ref_path,
+						new_extents, nr_extents);
+		} else {
+			ret = relocate_tree_block(trans, found_root, path,
+						  &first_key, ref_path);
+		}
+		if (ret < 0)
+			goto out;
+	}
+	ret = 0;
+out:
+	btrfs_end_transaction(trans, extent_root);
+	kfree(new_extents);
+	kfree(ref_path);
+	return ret;
+}
+
+static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
+{
+	u64 num_devices;
+	u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
+		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
+
+	num_devices = root->fs_info->fs_devices->rw_devices;
+	if (num_devices == 1) {
+		stripped |= BTRFS_BLOCK_GROUP_DUP;
+		stripped = flags & ~stripped;
+
+		/* turn raid0 into single device chunks */
+		if (flags & BTRFS_BLOCK_GROUP_RAID0)
+			return stripped;
+
+		/* turn mirroring into duplication */
+		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
+			     BTRFS_BLOCK_GROUP_RAID10))
+			return stripped | BTRFS_BLOCK_GROUP_DUP;
+		return flags;
+	} else {
+		/* they already had raid on here, just return */
+		if (flags & stripped)
+			return flags;
+
+		stripped |= BTRFS_BLOCK_GROUP_DUP;
+		stripped = flags & ~stripped;
+
+		/* switch duplicated blocks with raid1 */
+		if (flags & BTRFS_BLOCK_GROUP_DUP)
+			return stripped | BTRFS_BLOCK_GROUP_RAID1;
+
+		/* turn single device chunks into raid0 */
+		return stripped | BTRFS_BLOCK_GROUP_RAID0;
+	}
+	return flags;
+}
+
+static int __alloc_chunk_for_shrink(struct btrfs_root *root,
+		     struct btrfs_block_group_cache *shrink_block_group,
+		     int force)
+{
+	struct btrfs_trans_handle *trans;
+	u64 new_alloc_flags;
+	u64 calc;
+
+	spin_lock(&shrink_block_group->lock);
+	if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
+		spin_unlock(&shrink_block_group->lock);
+
+		trans = btrfs_start_transaction(root, 1);
+		spin_lock(&shrink_block_group->lock);
+
+		new_alloc_flags = update_block_group_flags(root,
+						   shrink_block_group->flags);
+		if (new_alloc_flags != shrink_block_group->flags) {
+			calc =
+			     btrfs_block_group_used(&shrink_block_group->item);
+		} else {
+			calc = shrink_block_group->key.offset;
+		}
+		spin_unlock(&shrink_block_group->lock);
+
+		do_chunk_alloc(trans, root->fs_info->extent_root,
+			       calc + 2 * 1024 * 1024, new_alloc_flags, force);
+
+		btrfs_end_transaction(trans, root);
+	} else
+		spin_unlock(&shrink_block_group->lock);
+	return 0;
+}
+
+static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root,
+				 u64 objectid, u64 size)
+{
+	struct btrfs_path *path;
+	struct btrfs_inode_item *item;
+	struct extent_buffer *leaf;
+	int ret;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	ret = btrfs_insert_empty_inode(trans, root, path, objectid);
+	if (ret)
+		goto out;
+
+	leaf = path->nodes[0];
+	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
+	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
+	btrfs_set_inode_generation(leaf, item, 1);
+	btrfs_set_inode_size(leaf, item, size);
+	btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
+	btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
+	btrfs_mark_buffer_dirty(leaf);
+	btrfs_release_path(root, path);
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
+					struct btrfs_block_group_cache *group)
+{
+	struct inode *inode = NULL;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root;
+	struct btrfs_key root_key;
+	u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
+	int err = 0;
+
+	root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
+	root_key.type = BTRFS_ROOT_ITEM_KEY;
+	root_key.offset = (u64)-1;
+	root = btrfs_read_fs_root_no_name(fs_info, &root_key);
+	if (IS_ERR(root))
+		return ERR_CAST(root);
+
+	trans = btrfs_start_transaction(root, 1);
+	BUG_ON(!trans);
+
+	err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
+	if (err)
+		goto out;
+
+	err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
+	BUG_ON(err);
+
+	err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
+				       group->key.offset, 0, group->key.offset,
+				       0, 0, 0);
+	BUG_ON(err);
+
+	inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
+	if (inode->i_state & I_NEW) {
+		BTRFS_I(inode)->root = root;
+		BTRFS_I(inode)->location.objectid = objectid;
+		BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
+		BTRFS_I(inode)->location.offset = 0;
+		btrfs_read_locked_inode(inode);
+		unlock_new_inode(inode);
+		BUG_ON(is_bad_inode(inode));
+	} else {
+		BUG_ON(1);
+	}
+	BTRFS_I(inode)->index_cnt = group->key.objectid;
+
+	err = btrfs_orphan_add(trans, inode);
+out:
+	btrfs_end_transaction(trans, root);
+	if (err) {
+		if (inode)
+			iput(inode);
+		inode = ERR_PTR(err);
+	}
+	return inode;
+}
+
+int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
+{
+
+	struct btrfs_ordered_sum *sums;
+	struct btrfs_sector_sum *sector_sum;
+	struct btrfs_ordered_extent *ordered;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct list_head list;
+	size_t offset;
+	int ret;
+	u64 disk_bytenr;
+
+	INIT_LIST_HEAD(&list);
+
+	ordered = btrfs_lookup_ordered_extent(inode, file_pos);
+	BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
+
+	disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
+	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
+				       disk_bytenr + len - 1, &list);
+
+	while (!list_empty(&list)) {
+		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
+		list_del_init(&sums->list);
+
+		sector_sum = sums->sums;
+		sums->bytenr = ordered->start;
+
+		offset = 0;
+		while (offset < sums->len) {
+			sector_sum->bytenr += ordered->start - disk_bytenr;
+			sector_sum++;
+			offset += root->sectorsize;
+		}
+
+		btrfs_add_ordered_sum(inode, ordered, sums);
+	}
+	btrfs_put_ordered_extent(ordered);
+	return 0;
+}
+
+int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_path *path;
+	struct btrfs_fs_info *info = root->fs_info;
+	struct extent_buffer *leaf;
+	struct inode *reloc_inode;
+	struct btrfs_block_group_cache *block_group;
+	struct btrfs_key key;
+	u64 skipped;
+	u64 cur_byte;
+	u64 total_found;
+	u32 nritems;
+	int ret;
+	int progress;
+	int pass = 0;
+
+	root = root->fs_info->extent_root;
+
+	block_group = btrfs_lookup_block_group(info, group_start);
+	BUG_ON(!block_group);
+
+	printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
+	       (unsigned long long)block_group->key.objectid,
+	       (unsigned long long)block_group->flags);
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	reloc_inode = create_reloc_inode(info, block_group);
+	BUG_ON(IS_ERR(reloc_inode));
+
+	__alloc_chunk_for_shrink(root, block_group, 1);
+	set_block_group_readonly(block_group);
+
+	btrfs_start_delalloc_inodes(info->tree_root);
+	btrfs_wait_ordered_extents(info->tree_root, 0);
+again:
+	skipped = 0;
+	total_found = 0;
+	progress = 0;
+	key.objectid = block_group->key.objectid;
+	key.offset = 0;
+	key.type = 0;
+	cur_byte = key.objectid;
+
+	trans = btrfs_start_transaction(info->tree_root, 1);
+	btrfs_commit_transaction(trans, info->tree_root);
+
+	mutex_lock(&root->fs_info->cleaner_mutex);
+	btrfs_clean_old_snapshots(info->tree_root);
+	btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
+	mutex_unlock(&root->fs_info->cleaner_mutex);
+
+	while (1) {
+		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+		if (ret < 0)
+			goto out;
+next:
+		leaf = path->nodes[0];
+		nritems = btrfs_header_nritems(leaf);
+		if (path->slots[0] >= nritems) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0)
+				goto out;
+			if (ret == 1) {
+				ret = 0;
+				break;
+			}
+			leaf = path->nodes[0];
+			nritems = btrfs_header_nritems(leaf);
+		}
+
+		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+		if (key.objectid >= block_group->key.objectid +
+		    block_group->key.offset)
+			break;
+
+		if (progress && need_resched()) {
+			btrfs_release_path(root, path);
+			cond_resched();
+			progress = 0;
+			continue;
+		}
+		progress = 1;
+
+		if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
+		    key.objectid + key.offset <= cur_byte) {
+			path->slots[0]++;
+			goto next;
+		}
+
+		total_found++;
+		cur_byte = key.objectid + key.offset;
+		btrfs_release_path(root, path);
+
+		__alloc_chunk_for_shrink(root, block_group, 0);
+		ret = relocate_one_extent(root, path, &key, block_group,
+					  reloc_inode, pass);
+		BUG_ON(ret < 0);
+		if (ret > 0)
+			skipped++;
+
+		key.objectid = cur_byte;
+		key.type = 0;
+		key.offset = 0;
+	}
+
+	btrfs_release_path(root, path);
+
+	if (pass == 0) {
+		btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
+		invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
+	}
+
+	if (total_found > 0) {
+		printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
+		       (unsigned long long)total_found, pass);
+		pass++;
+		if (total_found == skipped && pass > 2) {
+			iput(reloc_inode);
+			reloc_inode = create_reloc_inode(info, block_group);
+			pass = 0;
+		}
+		goto again;
+	}
+
+	/* delete reloc_inode */
+	iput(reloc_inode);
+
+	/* unpin extents in this range */
+	trans = btrfs_start_transaction(info->tree_root, 1);
+	btrfs_commit_transaction(trans, info->tree_root);
+
+	spin_lock(&block_group->lock);
+	WARN_ON(block_group->pinned > 0);
+	WARN_ON(block_group->reserved > 0);
+	WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
+	spin_unlock(&block_group->lock);
+	put_block_group(block_group);
+	ret = 0;
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+static int find_first_block_group(struct btrfs_root *root,
+		struct btrfs_path *path, struct btrfs_key *key)
+{
+	int ret = 0;
+	struct btrfs_key found_key;
+	struct extent_buffer *leaf;
+	int slot;
+
+	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+	if (ret < 0)
+		goto out;
+
+	while (1) {
+		slot = path->slots[0];
+		leaf = path->nodes[0];
+		if (slot >= btrfs_header_nritems(leaf)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret == 0)
+				continue;
+			if (ret < 0)
+				goto out;
+			break;
+		}
+		btrfs_item_key_to_cpu(leaf, &found_key, slot);
+
+		if (found_key.objectid >= key->objectid &&
+		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
+			ret = 0;
+			goto out;
+		}
+		path->slots[0]++;
+	}
+	ret = -ENOENT;
+out:
+	return ret;
+}
+
+int btrfs_free_block_groups(struct btrfs_fs_info *info)
+{
+	struct btrfs_block_group_cache *block_group;
+	struct rb_node *n;
+
+	spin_lock(&info->block_group_cache_lock);
+	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
+		block_group = rb_entry(n, struct btrfs_block_group_cache,
+				       cache_node);
+		rb_erase(&block_group->cache_node,
+			 &info->block_group_cache_tree);
+		spin_unlock(&info->block_group_cache_lock);
+
+		btrfs_remove_free_space_cache(block_group);
+		down_write(&block_group->space_info->groups_sem);
+		list_del(&block_group->list);
+		up_write(&block_group->space_info->groups_sem);
+
+		WARN_ON(atomic_read(&block_group->count) != 1);
+		kfree(block_group);
+
+		spin_lock(&info->block_group_cache_lock);
+	}
+	spin_unlock(&info->block_group_cache_lock);
+	return 0;
+}
+
+int btrfs_read_block_groups(struct btrfs_root *root)
+{
+	struct btrfs_path *path;
+	int ret;
+	struct btrfs_block_group_cache *cache;
+	struct btrfs_fs_info *info = root->fs_info;
+	struct btrfs_space_info *space_info;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	struct extent_buffer *leaf;
+
+	root = info->extent_root;
+	key.objectid = 0;
+	key.offset = 0;
+	btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	while (1) {
+		ret = find_first_block_group(root, path, &key);
+		if (ret > 0) {
+			ret = 0;
+			goto error;
+		}
+		if (ret != 0)
+			goto error;
+
+		leaf = path->nodes[0];
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+		cache = kzalloc(sizeof(*cache), GFP_NOFS);
+		if (!cache) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		atomic_set(&cache->count, 1);
+		spin_lock_init(&cache->lock);
+		mutex_init(&cache->alloc_mutex);
+		mutex_init(&cache->cache_mutex);
+		INIT_LIST_HEAD(&cache->list);
+		read_extent_buffer(leaf, &cache->item,
+				   btrfs_item_ptr_offset(leaf, path->slots[0]),
+				   sizeof(cache->item));
+		memcpy(&cache->key, &found_key, sizeof(found_key));
+
+		key.objectid = found_key.objectid + found_key.offset;
+		btrfs_release_path(root, path);
+		cache->flags = btrfs_block_group_flags(&cache->item);
+
+		ret = update_space_info(info, cache->flags, found_key.offset,
+					btrfs_block_group_used(&cache->item),
+					&space_info);
+		BUG_ON(ret);
+		cache->space_info = space_info;
+		down_write(&space_info->groups_sem);
+		list_add_tail(&cache->list, &space_info->block_groups);
+		up_write(&space_info->groups_sem);
+
+		ret = btrfs_add_block_group_cache(root->fs_info, cache);
+		BUG_ON(ret);
+
+		set_avail_alloc_bits(root->fs_info, cache->flags);
+		if (btrfs_chunk_readonly(root, cache->key.objectid))
+			set_block_group_readonly(cache);
+	}
+	ret = 0;
+error:
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_make_block_group(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root, u64 bytes_used,
+			   u64 type, u64 chunk_objectid, u64 chunk_offset,
+			   u64 size)
+{
+	int ret;
+	struct btrfs_root *extent_root;
+	struct btrfs_block_group_cache *cache;
+
+	extent_root = root->fs_info->extent_root;
+
+	root->fs_info->last_trans_new_blockgroup = trans->transid;
+
+	cache = kzalloc(sizeof(*cache), GFP_NOFS);
+	if (!cache)
+		return -ENOMEM;
+
+	cache->key.objectid = chunk_offset;
+	cache->key.offset = size;
+	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+	atomic_set(&cache->count, 1);
+	spin_lock_init(&cache->lock);
+	mutex_init(&cache->alloc_mutex);
+	mutex_init(&cache->cache_mutex);
+	INIT_LIST_HEAD(&cache->list);
+
+	btrfs_set_block_group_used(&cache->item, bytes_used);
+	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
+	cache->flags = type;
+	btrfs_set_block_group_flags(&cache->item, type);
+
+	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
+				&cache->space_info);
+	BUG_ON(ret);
+	down_write(&cache->space_info->groups_sem);
+	list_add_tail(&cache->list, &cache->space_info->block_groups);
+	up_write(&cache->space_info->groups_sem);
+
+	ret = btrfs_add_block_group_cache(root->fs_info, cache);
+	BUG_ON(ret);
+
+	ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
+				sizeof(cache->item));
+	BUG_ON(ret);
+
+	finish_current_insert(trans, extent_root, 0);
+	ret = del_pending_extents(trans, extent_root, 0);
+	BUG_ON(ret);
+	set_avail_alloc_bits(extent_root->fs_info, type);
+
+	return 0;
+}
+
+int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root, u64 group_start)
+{
+	struct btrfs_path *path;
+	struct btrfs_block_group_cache *block_group;
+	struct btrfs_key key;
+	int ret;
+
+	root = root->fs_info->extent_root;
+
+	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
+	BUG_ON(!block_group);
+	BUG_ON(!block_group->ro);
+
+	memcpy(&key, &block_group->key, sizeof(key));
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	btrfs_remove_free_space_cache(block_group);
+	rb_erase(&block_group->cache_node,
+		 &root->fs_info->block_group_cache_tree);
+	down_write(&block_group->space_info->groups_sem);
+	list_del(&block_group->list);
+	up_write(&block_group->space_info->groups_sem);
+
+	spin_lock(&block_group->space_info->lock);
+	block_group->space_info->total_bytes -= block_group->key.offset;
+	block_group->space_info->bytes_readonly -= block_group->key.offset;
+	spin_unlock(&block_group->space_info->lock);
+	block_group->space_info->full = 0;
+
+	put_block_group(block_group);
+	put_block_group(block_group);
+
+	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+	if (ret > 0)
+		ret = -EIO;
+	if (ret < 0)
+		goto out;
+
+	ret = btrfs_del_item(trans, root, path);
+out:
+	btrfs_free_path(path);
+	return ret;
+}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
new file mode 100644
index 0000000..e086d40
--- /dev/null
+++ b/fs/btrfs/extent_io.c
@@ -0,0 +1,3717 @@
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/bio.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/pagemap.h>
+#include <linux/page-flags.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/swap.h>
+#include <linux/version.h>
+#include <linux/writeback.h>
+#include <linux/pagevec.h>
+#include "extent_io.h"
+#include "extent_map.h"
+#include "compat.h"
+#include "ctree.h"
+#include "btrfs_inode.h"
+
+/* temporary define until extent_map moves out of btrfs */
+struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
+				       unsigned long extra_flags,
+				       void (*ctor)(void *, struct kmem_cache *,
+						    unsigned long));
+
+static struct kmem_cache *extent_state_cache;
+static struct kmem_cache *extent_buffer_cache;
+
+static LIST_HEAD(buffers);
+static LIST_HEAD(states);
+
+#define LEAK_DEBUG 0
+#ifdef LEAK_DEBUG
+static DEFINE_SPINLOCK(leak_lock);
+#endif
+
+#define BUFFER_LRU_MAX 64
+
+struct tree_entry {
+	u64 start;
+	u64 end;
+	struct rb_node rb_node;
+};
+
+struct extent_page_data {
+	struct bio *bio;
+	struct extent_io_tree *tree;
+	get_extent_t *get_extent;
+
+	/* tells writepage not to lock the state bits for this range
+	 * it still does the unlocking
+	 */
+	int extent_locked;
+};
+
+int __init extent_io_init(void)
+{
+	extent_state_cache = btrfs_cache_create("extent_state",
+					    sizeof(struct extent_state), 0,
+					    NULL);
+	if (!extent_state_cache)
+		return -ENOMEM;
+
+	extent_buffer_cache = btrfs_cache_create("extent_buffers",
+					    sizeof(struct extent_buffer), 0,
+					    NULL);
+	if (!extent_buffer_cache)
+		goto free_state_cache;
+	return 0;
+
+free_state_cache:
+	kmem_cache_destroy(extent_state_cache);
+	return -ENOMEM;
+}
+
+void extent_io_exit(void)
+{
+	struct extent_state *state;
+	struct extent_buffer *eb;
+
+	while (!list_empty(&states)) {
+		state = list_entry(states.next, struct extent_state, leak_list);
+		printk(KERN_ERR "btrfs state leak: start %llu end %llu "
+		       "state %lu in tree %p refs %d\n",
+		       (unsigned long long)state->start,
+		       (unsigned long long)state->end,
+		       state->state, state->tree, atomic_read(&state->refs));
+		list_del(&state->leak_list);
+		kmem_cache_free(extent_state_cache, state);
+
+	}
+
+	while (!list_empty(&buffers)) {
+		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
+		printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
+		       "refs %d\n", (unsigned long long)eb->start,
+		       eb->len, atomic_read(&eb->refs));
+		list_del(&eb->leak_list);
+		kmem_cache_free(extent_buffer_cache, eb);
+	}
+	if (extent_state_cache)
+		kmem_cache_destroy(extent_state_cache);
+	if (extent_buffer_cache)
+		kmem_cache_destroy(extent_buffer_cache);
+}
+
+void extent_io_tree_init(struct extent_io_tree *tree,
+			  struct address_space *mapping, gfp_t mask)
+{
+	tree->state.rb_node = NULL;
+	tree->buffer.rb_node = NULL;
+	tree->ops = NULL;
+	tree->dirty_bytes = 0;
+	spin_lock_init(&tree->lock);
+	spin_lock_init(&tree->buffer_lock);
+	tree->mapping = mapping;
+}
+
+static struct extent_state *alloc_extent_state(gfp_t mask)
+{
+	struct extent_state *state;
+#ifdef LEAK_DEBUG
+	unsigned long flags;
+#endif
+
+	state = kmem_cache_alloc(extent_state_cache, mask);
+	if (!state)
+		return state;
+	state->state = 0;
+	state->private = 0;
+	state->tree = NULL;
+#ifdef LEAK_DEBUG
+	spin_lock_irqsave(&leak_lock, flags);
+	list_add(&state->leak_list, &states);
+	spin_unlock_irqrestore(&leak_lock, flags);
+#endif
+	atomic_set(&state->refs, 1);
+	init_waitqueue_head(&state->wq);
+	return state;
+}
+
+static void free_extent_state(struct extent_state *state)
+{
+	if (!state)
+		return;
+	if (atomic_dec_and_test(&state->refs)) {
+#ifdef LEAK_DEBUG
+		unsigned long flags;
+#endif
+		WARN_ON(state->tree);
+#ifdef LEAK_DEBUG
+		spin_lock_irqsave(&leak_lock, flags);
+		list_del(&state->leak_list);
+		spin_unlock_irqrestore(&leak_lock, flags);
+#endif
+		kmem_cache_free(extent_state_cache, state);
+	}
+}
+
+static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
+				   struct rb_node *node)
+{
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct tree_entry *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct tree_entry, rb_node);
+
+		if (offset < entry->start)
+			p = &(*p)->rb_left;
+		else if (offset > entry->end)
+			p = &(*p)->rb_right;
+		else
+			return parent;
+	}
+
+	entry = rb_entry(node, struct tree_entry, rb_node);
+	rb_link_node(node, parent, p);
+	rb_insert_color(node, root);
+	return NULL;
+}
+
+static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
+				     struct rb_node **prev_ret,
+				     struct rb_node **next_ret)
+{
+	struct rb_root *root = &tree->state;
+	struct rb_node *n = root->rb_node;
+	struct rb_node *prev = NULL;
+	struct rb_node *orig_prev = NULL;
+	struct tree_entry *entry;
+	struct tree_entry *prev_entry = NULL;
+
+	while (n) {
+		entry = rb_entry(n, struct tree_entry, rb_node);
+		prev = n;
+		prev_entry = entry;
+
+		if (offset < entry->start)
+			n = n->rb_left;
+		else if (offset > entry->end)
+			n = n->rb_right;
+		else
+			return n;
+	}
+
+	if (prev_ret) {
+		orig_prev = prev;
+		while (prev && offset > prev_entry->end) {
+			prev = rb_next(prev);
+			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
+		}
+		*prev_ret = prev;
+		prev = orig_prev;
+	}
+
+	if (next_ret) {
+		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
+		while (prev && offset < prev_entry->start) {
+			prev = rb_prev(prev);
+			prev_entry = rb_entry(prev, struct tree_entry, rb_node);
+		}
+		*next_ret = prev;
+	}
+	return NULL;
+}
+
+static inline struct rb_node *tree_search(struct extent_io_tree *tree,
+					  u64 offset)
+{
+	struct rb_node *prev = NULL;
+	struct rb_node *ret;
+
+	ret = __etree_search(tree, offset, &prev, NULL);
+	if (!ret)
+		return prev;
+	return ret;
+}
+
+static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
+					  u64 offset, struct rb_node *node)
+{
+	struct rb_root *root = &tree->buffer;
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct extent_buffer *eb;
+
+	while (*p) {
+		parent = *p;
+		eb = rb_entry(parent, struct extent_buffer, rb_node);
+
+		if (offset < eb->start)
+			p = &(*p)->rb_left;
+		else if (offset > eb->start)
+			p = &(*p)->rb_right;
+		else
+			return eb;
+	}
+
+	rb_link_node(node, parent, p);
+	rb_insert_color(node, root);
+	return NULL;
+}
+
+static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
+					   u64 offset)
+{
+	struct rb_root *root = &tree->buffer;
+	struct rb_node *n = root->rb_node;
+	struct extent_buffer *eb;
+
+	while (n) {
+		eb = rb_entry(n, struct extent_buffer, rb_node);
+		if (offset < eb->start)
+			n = n->rb_left;
+		else if (offset > eb->start)
+			n = n->rb_right;
+		else
+			return eb;
+	}
+	return NULL;
+}
+
+/*
+ * utility function to look for merge candidates inside a given range.
+ * Any extents with matching state are merged together into a single
+ * extent in the tree.  Extents with EXTENT_IO in their state field
+ * are not merged because the end_io handlers need to be able to do
+ * operations on them without sleeping (or doing allocations/splits).
+ *
+ * This should be called with the tree lock held.
+ */
+static int merge_state(struct extent_io_tree *tree,
+		       struct extent_state *state)
+{
+	struct extent_state *other;
+	struct rb_node *other_node;
+
+	if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
+		return 0;
+
+	other_node = rb_prev(&state->rb_node);
+	if (other_node) {
+		other = rb_entry(other_node, struct extent_state, rb_node);
+		if (other->end == state->start - 1 &&
+		    other->state == state->state) {
+			state->start = other->start;
+			other->tree = NULL;
+			rb_erase(&other->rb_node, &tree->state);
+			free_extent_state(other);
+		}
+	}
+	other_node = rb_next(&state->rb_node);
+	if (other_node) {
+		other = rb_entry(other_node, struct extent_state, rb_node);
+		if (other->start == state->end + 1 &&
+		    other->state == state->state) {
+			other->start = state->start;
+			state->tree = NULL;
+			rb_erase(&state->rb_node, &tree->state);
+			free_extent_state(state);
+		}
+	}
+	return 0;
+}
+
+static void set_state_cb(struct extent_io_tree *tree,
+			 struct extent_state *state,
+			 unsigned long bits)
+{
+	if (tree->ops && tree->ops->set_bit_hook) {
+		tree->ops->set_bit_hook(tree->mapping->host, state->start,
+					state->end, state->state, bits);
+	}
+}
+
+static void clear_state_cb(struct extent_io_tree *tree,
+			   struct extent_state *state,
+			   unsigned long bits)
+{
+	if (tree->ops && tree->ops->clear_bit_hook) {
+		tree->ops->clear_bit_hook(tree->mapping->host, state->start,
+					  state->end, state->state, bits);
+	}
+}
+
+/*
+ * insert an extent_state struct into the tree.  'bits' are set on the
+ * struct before it is inserted.
+ *
+ * This may return -EEXIST if the extent is already there, in which case the
+ * state struct is freed.
+ *
+ * The tree lock is not taken internally.  This is a utility function and
+ * probably isn't what you want to call (see set/clear_extent_bit).
+ */
+static int insert_state(struct extent_io_tree *tree,
+			struct extent_state *state, u64 start, u64 end,
+			int bits)
+{
+	struct rb_node *node;
+
+	if (end < start) {
+		printk(KERN_ERR "btrfs end < start %llu %llu\n",
+		       (unsigned long long)end,
+		       (unsigned long long)start);
+		WARN_ON(1);
+	}
+	if (bits & EXTENT_DIRTY)
+		tree->dirty_bytes += end - start + 1;
+	set_state_cb(tree, state, bits);
+	state->state |= bits;
+	state->start = start;
+	state->end = end;
+	node = tree_insert(&tree->state, end, &state->rb_node);
+	if (node) {
+		struct extent_state *found;
+		found = rb_entry(node, struct extent_state, rb_node);
+		printk(KERN_ERR "btrfs found node %llu %llu on insert of "
+		       "%llu %llu\n", (unsigned long long)found->start,
+		       (unsigned long long)found->end,
+		       (unsigned long long)start, (unsigned long long)end);
+		free_extent_state(state);
+		return -EEXIST;
+	}
+	state->tree = tree;
+	merge_state(tree, state);
+	return 0;
+}
+
+/*
+ * split a given extent state struct in two, inserting the preallocated
+ * struct 'prealloc' as the newly created second half.  'split' indicates an
+ * offset inside 'orig' where it should be split.
+ *
+ * Before calling,
+ * the tree has 'orig' at [orig->start, orig->end].  After calling, there
+ * are two extent state structs in the tree:
+ * prealloc: [orig->start, split - 1]
+ * orig: [ split, orig->end ]
+ *
+ * The tree locks are not taken by this function. They need to be held
+ * by the caller.
+ */
+static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
+		       struct extent_state *prealloc, u64 split)
+{
+	struct rb_node *node;
+	prealloc->start = orig->start;
+	prealloc->end = split - 1;
+	prealloc->state = orig->state;
+	orig->start = split;
+
+	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
+	if (node) {
+		struct extent_state *found;
+		found = rb_entry(node, struct extent_state, rb_node);
+		free_extent_state(prealloc);
+		return -EEXIST;
+	}
+	prealloc->tree = tree;
+	return 0;
+}
+
+/*
+ * utility function to clear some bits in an extent state struct.
+ * it will optionally wake up any one waiting on this state (wake == 1), or
+ * forcibly remove the state from the tree (delete == 1).
+ *
+ * If no bits are set on the state struct after clearing things, the
+ * struct is freed and removed from the tree
+ */
+static int clear_state_bit(struct extent_io_tree *tree,
+			    struct extent_state *state, int bits, int wake,
+			    int delete)
+{
+	int ret = state->state & bits;
+
+	if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
+		u64 range = state->end - state->start + 1;
+		WARN_ON(range > tree->dirty_bytes);
+		tree->dirty_bytes -= range;
+	}
+	clear_state_cb(tree, state, bits);
+	state->state &= ~bits;
+	if (wake)
+		wake_up(&state->wq);
+	if (delete || state->state == 0) {
+		if (state->tree) {
+			clear_state_cb(tree, state, state->state);
+			rb_erase(&state->rb_node, &tree->state);
+			state->tree = NULL;
+			free_extent_state(state);
+		} else {
+			WARN_ON(1);
+		}
+	} else {
+		merge_state(tree, state);
+	}
+	return ret;
+}
+
+/*
+ * clear some bits on a range in the tree.  This may require splitting
+ * or inserting elements in the tree, so the gfp mask is used to
+ * indicate which allocations or sleeping are allowed.
+ *
+ * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
+ * the given range from the tree regardless of state (ie for truncate).
+ *
+ * the range [start, end] is inclusive.
+ *
+ * This takes the tree lock, and returns < 0 on error, > 0 if any of the
+ * bits were already set, or zero if none of the bits were already set.
+ */
+int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+		     int bits, int wake, int delete, gfp_t mask)
+{
+	struct extent_state *state;
+	struct extent_state *prealloc = NULL;
+	struct rb_node *node;
+	int err;
+	int set = 0;
+
+again:
+	if (!prealloc && (mask & __GFP_WAIT)) {
+		prealloc = alloc_extent_state(mask);
+		if (!prealloc)
+			return -ENOMEM;
+	}
+
+	spin_lock(&tree->lock);
+	/*
+	 * this search will find the extents that end after
+	 * our range starts
+	 */
+	node = tree_search(tree, start);
+	if (!node)
+		goto out;
+	state = rb_entry(node, struct extent_state, rb_node);
+	if (state->start > end)
+		goto out;
+	WARN_ON(state->end < start);
+
+	/*
+	 *     | ---- desired range ---- |
+	 *  | state | or
+	 *  | ------------- state -------------- |
+	 *
+	 * We need to split the extent we found, and may flip
+	 * bits on second half.
+	 *
+	 * If the extent we found extends past our range, we
+	 * just split and search again.  It'll get split again
+	 * the next time though.
+	 *
+	 * If the extent we found is inside our range, we clear
+	 * the desired bit on it.
+	 */
+
+	if (state->start < start) {
+		if (!prealloc)
+			prealloc = alloc_extent_state(GFP_ATOMIC);
+		err = split_state(tree, state, prealloc, start);
+		BUG_ON(err == -EEXIST);
+		prealloc = NULL;
+		if (err)
+			goto out;
+		if (state->end <= end) {
+			start = state->end + 1;
+			set |= clear_state_bit(tree, state, bits,
+					wake, delete);
+		} else {
+			start = state->start;
+		}
+		goto search_again;
+	}
+	/*
+	 * | ---- desired range ---- |
+	 *                        | state |
+	 * We need to split the extent, and clear the bit
+	 * on the first half
+	 */
+	if (state->start <= end && state->end > end) {
+		if (!prealloc)
+			prealloc = alloc_extent_state(GFP_ATOMIC);
+		err = split_state(tree, state, prealloc, end + 1);
+		BUG_ON(err == -EEXIST);
+
+		if (wake)
+			wake_up(&state->wq);
+		set |= clear_state_bit(tree, prealloc, bits,
+				       wake, delete);
+		prealloc = NULL;
+		goto out;
+	}
+
+	start = state->end + 1;
+	set |= clear_state_bit(tree, state, bits, wake, delete);
+	goto search_again;
+
+out:
+	spin_unlock(&tree->lock);
+	if (prealloc)
+		free_extent_state(prealloc);
+
+	return set;
+
+search_again:
+	if (start > end)
+		goto out;
+	spin_unlock(&tree->lock);
+	if (mask & __GFP_WAIT)
+		cond_resched();
+	goto again;
+}
+
+static int wait_on_state(struct extent_io_tree *tree,
+			 struct extent_state *state)
+		__releases(tree->lock)
+		__acquires(tree->lock)
+{
+	DEFINE_WAIT(wait);
+	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
+	spin_unlock(&tree->lock);
+	schedule();
+	spin_lock(&tree->lock);
+	finish_wait(&state->wq, &wait);
+	return 0;
+}
+
+/*
+ * waits for one or more bits to clear on a range in the state tree.
+ * The range [start, end] is inclusive.
+ * The tree lock is taken by this function
+ */
+int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
+{
+	struct extent_state *state;
+	struct rb_node *node;
+
+	spin_lock(&tree->lock);
+again:
+	while (1) {
+		/*
+		 * this search will find all the extents that end after
+		 * our range starts
+		 */
+		node = tree_search(tree, start);
+		if (!node)
+			break;
+
+		state = rb_entry(node, struct extent_state, rb_node);
+
+		if (state->start > end)
+			goto out;
+
+		if (state->state & bits) {
+			start = state->start;
+			atomic_inc(&state->refs);
+			wait_on_state(tree, state);
+			free_extent_state(state);
+			goto again;
+		}
+		start = state->end + 1;
+
+		if (start > end)
+			break;
+
+		if (need_resched()) {
+			spin_unlock(&tree->lock);
+			cond_resched();
+			spin_lock(&tree->lock);
+		}
+	}
+out:
+	spin_unlock(&tree->lock);
+	return 0;
+}
+
+static void set_state_bits(struct extent_io_tree *tree,
+			   struct extent_state *state,
+			   int bits)
+{
+	if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
+		u64 range = state->end - state->start + 1;
+		tree->dirty_bytes += range;
+	}
+	set_state_cb(tree, state, bits);
+	state->state |= bits;
+}
+
+/*
+ * set some bits on a range in the tree.  This may require allocations
+ * or sleeping, so the gfp mask is used to indicate what is allowed.
+ *
+ * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
+ * range already has the desired bits set.  The start of the existing
+ * range is returned in failed_start in this case.
+ *
+ * [start, end] is inclusive
+ * This takes the tree lock.
+ */
+static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+			  int bits, int exclusive, u64 *failed_start,
+			  gfp_t mask)
+{
+	struct extent_state *state;
+	struct extent_state *prealloc = NULL;
+	struct rb_node *node;
+	int err = 0;
+	int set;
+	u64 last_start;
+	u64 last_end;
+again:
+	if (!prealloc && (mask & __GFP_WAIT)) {
+		prealloc = alloc_extent_state(mask);
+		if (!prealloc)
+			return -ENOMEM;
+	}
+
+	spin_lock(&tree->lock);
+	/*
+	 * this search will find all the extents that end after
+	 * our range starts.
+	 */
+	node = tree_search(tree, start);
+	if (!node) {
+		err = insert_state(tree, prealloc, start, end, bits);
+		prealloc = NULL;
+		BUG_ON(err == -EEXIST);
+		goto out;
+	}
+
+	state = rb_entry(node, struct extent_state, rb_node);
+	last_start = state->start;
+	last_end = state->end;
+
+	/*
+	 * | ---- desired range ---- |
+	 * | state |
+	 *
+	 * Just lock what we found and keep going
+	 */
+	if (state->start == start && state->end <= end) {
+		set = state->state & bits;
+		if (set && exclusive) {
+			*failed_start = state->start;
+			err = -EEXIST;
+			goto out;
+		}
+		set_state_bits(tree, state, bits);
+		start = state->end + 1;
+		merge_state(tree, state);
+		goto search_again;
+	}
+
+	/*
+	 *     | ---- desired range ---- |
+	 * | state |
+	 *   or
+	 * | ------------- state -------------- |
+	 *
+	 * We need to split the extent we found, and may flip bits on
+	 * second half.
+	 *
+	 * If the extent we found extends past our
+	 * range, we just split and search again.  It'll get split
+	 * again the next time though.
+	 *
+	 * If the extent we found is inside our range, we set the
+	 * desired bit on it.
+	 */
+	if (state->start < start) {
+		set = state->state & bits;
+		if (exclusive && set) {
+			*failed_start = start;
+			err = -EEXIST;
+			goto out;
+		}
+		err = split_state(tree, state, prealloc, start);
+		BUG_ON(err == -EEXIST);
+		prealloc = NULL;
+		if (err)
+			goto out;
+		if (state->end <= end) {
+			set_state_bits(tree, state, bits);
+			start = state->end + 1;
+			merge_state(tree, state);
+		} else {
+			start = state->start;
+		}
+		goto search_again;
+	}
+	/*
+	 * | ---- desired range ---- |
+	 *     | state | or               | state |
+	 *
+	 * There's a hole, we need to insert something in it and
+	 * ignore the extent we found.
+	 */
+	if (state->start > start) {
+		u64 this_end;
+		if (end < last_start)
+			this_end = end;
+		else
+			this_end = last_start - 1;
+		err = insert_state(tree, prealloc, start, this_end,
+				   bits);
+		prealloc = NULL;
+		BUG_ON(err == -EEXIST);
+		if (err)
+			goto out;
+		start = this_end + 1;
+		goto search_again;
+	}
+	/*
+	 * | ---- desired range ---- |
+	 *                        | state |
+	 * We need to split the extent, and set the bit
+	 * on the first half
+	 */
+	if (state->start <= end && state->end > end) {
+		set = state->state & bits;
+		if (exclusive && set) {
+			*failed_start = start;
+			err = -EEXIST;
+			goto out;
+		}
+		err = split_state(tree, state, prealloc, end + 1);
+		BUG_ON(err == -EEXIST);
+
+		set_state_bits(tree, prealloc, bits);
+		merge_state(tree, prealloc);
+		prealloc = NULL;
+		goto out;
+	}
+
+	goto search_again;
+
+out:
+	spin_unlock(&tree->lock);
+	if (prealloc)
+		free_extent_state(prealloc);
+
+	return err;
+
+search_again:
+	if (start > end)
+		goto out;
+	spin_unlock(&tree->lock);
+	if (mask & __GFP_WAIT)
+		cond_resched();
+	goto again;
+}
+
+/* wrappers around set/clear extent bit */
+int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
+		     gfp_t mask)
+{
+	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
+			      mask);
+}
+
+int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
+		       gfp_t mask)
+{
+	return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
+}
+
+int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+		    int bits, gfp_t mask)
+{
+	return set_extent_bit(tree, start, end, bits, 0, NULL,
+			      mask);
+}
+
+int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+		      int bits, gfp_t mask)
+{
+	return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
+}
+
+int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
+		     gfp_t mask)
+{
+	return set_extent_bit(tree, start, end,
+			      EXTENT_DELALLOC | EXTENT_DIRTY,
+			      0, NULL, mask);
+}
+
+int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
+		       gfp_t mask)
+{
+	return clear_extent_bit(tree, start, end,
+				EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
+}
+
+int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
+			 gfp_t mask)
+{
+	return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
+}
+
+int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
+		     gfp_t mask)
+{
+	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
+			      mask);
+}
+
+static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
+		       gfp_t mask)
+{
+	return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
+}
+
+int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
+			gfp_t mask)
+{
+	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
+			      mask);
+}
+
+static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
+				 u64 end, gfp_t mask)
+{
+	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
+}
+
+static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
+			 gfp_t mask)
+{
+	return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
+			      0, NULL, mask);
+}
+
+static int clear_extent_writeback(struct extent_io_tree *tree, u64 start,
+				  u64 end, gfp_t mask)
+{
+	return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
+}
+
+int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
+{
+	return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
+}
+
+/*
+ * either insert or lock state struct between start and end use mask to tell
+ * us if waiting is desired.
+ */
+int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
+{
+	int err;
+	u64 failed_start;
+	while (1) {
+		err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
+				     &failed_start, mask);
+		if (err == -EEXIST && (mask & __GFP_WAIT)) {
+			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
+			start = failed_start;
+		} else {
+			break;
+		}
+		WARN_ON(start > end);
+	}
+	return err;
+}
+
+int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+		    gfp_t mask)
+{
+	int err;
+	u64 failed_start;
+
+	err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
+			     &failed_start, mask);
+	if (err == -EEXIST) {
+		if (failed_start > start)
+			clear_extent_bit(tree, start, failed_start - 1,
+					 EXTENT_LOCKED, 1, 0, mask);
+		return 0;
+	}
+	return 1;
+}
+
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+		  gfp_t mask)
+{
+	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
+}
+
+/*
+ * helper function to set pages and extents in the tree dirty
+ */
+int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
+{
+	unsigned long index = start >> PAGE_CACHE_SHIFT;
+	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	struct page *page;
+
+	while (index <= end_index) {
+		page = find_get_page(tree->mapping, index);
+		BUG_ON(!page);
+		__set_page_dirty_nobuffers(page);
+		page_cache_release(page);
+		index++;
+	}
+	set_extent_dirty(tree, start, end, GFP_NOFS);
+	return 0;
+}
+
+/*
+ * helper function to set both pages and extents in the tree writeback
+ */
+static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
+{
+	unsigned long index = start >> PAGE_CACHE_SHIFT;
+	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	struct page *page;
+
+	while (index <= end_index) {
+		page = find_get_page(tree->mapping, index);
+		BUG_ON(!page);
+		set_page_writeback(page);
+		page_cache_release(page);
+		index++;
+	}
+	set_extent_writeback(tree, start, end, GFP_NOFS);
+	return 0;
+}
+
+/*
+ * find the first offset in the io tree with 'bits' set. zero is
+ * returned if we find something, and *start_ret and *end_ret are
+ * set to reflect the state struct that was found.
+ *
+ * If nothing was found, 1 is returned, < 0 on error
+ */
+int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+			  u64 *start_ret, u64 *end_ret, int bits)
+{
+	struct rb_node *node;
+	struct extent_state *state;
+	int ret = 1;
+
+	spin_lock(&tree->lock);
+	/*
+	 * this search will find all the extents that end after
+	 * our range starts.
+	 */
+	node = tree_search(tree, start);
+	if (!node)
+		goto out;
+
+	while (1) {
+		state = rb_entry(node, struct extent_state, rb_node);
+		if (state->end >= start && (state->state & bits)) {
+			*start_ret = state->start;
+			*end_ret = state->end;
+			ret = 0;
+			break;
+		}
+		node = rb_next(node);
+		if (!node)
+			break;
+	}
+out:
+	spin_unlock(&tree->lock);
+	return ret;
+}
+
+/* find the first state struct with 'bits' set after 'start', and
+ * return it.  tree->lock must be held.  NULL will returned if
+ * nothing was found after 'start'
+ */
+struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
+						 u64 start, int bits)
+{
+	struct rb_node *node;
+	struct extent_state *state;
+
+	/*
+	 * this search will find all the extents that end after
+	 * our range starts.
+	 */
+	node = tree_search(tree, start);
+	if (!node)
+		goto out;
+
+	while (1) {
+		state = rb_entry(node, struct extent_state, rb_node);
+		if (state->end >= start && (state->state & bits))
+			return state;
+
+		node = rb_next(node);
+		if (!node)
+			break;
+	}
+out:
+	return NULL;
+}
+
+/*
+ * find a contiguous range of bytes in the file marked as delalloc, not
+ * more than 'max_bytes'.  start and end are used to return the range,
+ *
+ * 1 is returned if we find something, 0 if nothing was in the tree
+ */
+static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
+					u64 *start, u64 *end, u64 max_bytes)
+{
+	struct rb_node *node;
+	struct extent_state *state;
+	u64 cur_start = *start;
+	u64 found = 0;
+	u64 total_bytes = 0;
+
+	spin_lock(&tree->lock);
+
+	/*
+	 * this search will find all the extents that end after
+	 * our range starts.
+	 */
+	node = tree_search(tree, cur_start);
+	if (!node) {
+		if (!found)
+			*end = (u64)-1;
+		goto out;
+	}
+
+	while (1) {
+		state = rb_entry(node, struct extent_state, rb_node);
+		if (found && (state->start != cur_start ||
+			      (state->state & EXTENT_BOUNDARY))) {
+			goto out;
+		}
+		if (!(state->state & EXTENT_DELALLOC)) {
+			if (!found)
+				*end = state->end;
+			goto out;
+		}
+		if (!found)
+			*start = state->start;
+		found++;
+		*end = state->end;
+		cur_start = state->end + 1;
+		node = rb_next(node);
+		if (!node)
+			break;
+		total_bytes += state->end - state->start + 1;
+		if (total_bytes >= max_bytes)
+			break;
+	}
+out:
+	spin_unlock(&tree->lock);
+	return found;
+}
+
+static noinline int __unlock_for_delalloc(struct inode *inode,
+					  struct page *locked_page,
+					  u64 start, u64 end)
+{
+	int ret;
+	struct page *pages[16];
+	unsigned long index = start >> PAGE_CACHE_SHIFT;
+	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	unsigned long nr_pages = end_index - index + 1;
+	int i;
+
+	if (index == locked_page->index && end_index == index)
+		return 0;
+
+	while (nr_pages > 0) {
+		ret = find_get_pages_contig(inode->i_mapping, index,
+				     min_t(unsigned long, nr_pages,
+				     ARRAY_SIZE(pages)), pages);
+		for (i = 0; i < ret; i++) {
+			if (pages[i] != locked_page)
+				unlock_page(pages[i]);
+			page_cache_release(pages[i]);
+		}
+		nr_pages -= ret;
+		index += ret;
+		cond_resched();
+	}
+	return 0;
+}
+
+static noinline int lock_delalloc_pages(struct inode *inode,
+					struct page *locked_page,
+					u64 delalloc_start,
+					u64 delalloc_end)
+{
+	unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
+	unsigned long start_index = index;
+	unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
+	unsigned long pages_locked = 0;
+	struct page *pages[16];
+	unsigned long nrpages;
+	int ret;
+	int i;
+
+	/* the caller is responsible for locking the start index */
+	if (index == locked_page->index && index == end_index)
+		return 0;
+
+	/* skip the page at the start index */
+	nrpages = end_index - index + 1;
+	while (nrpages > 0) {
+		ret = find_get_pages_contig(inode->i_mapping, index,
+				     min_t(unsigned long,
+				     nrpages, ARRAY_SIZE(pages)), pages);
+		if (ret == 0) {
+			ret = -EAGAIN;
+			goto done;
+		}
+		/* now we have an array of pages, lock them all */
+		for (i = 0; i < ret; i++) {
+			/*
+			 * the caller is taking responsibility for
+			 * locked_page
+			 */
+			if (pages[i] != locked_page) {
+				lock_page(pages[i]);
+				if (!PageDirty(pages[i]) ||
+				    pages[i]->mapping != inode->i_mapping) {
+					ret = -EAGAIN;
+					unlock_page(pages[i]);
+					page_cache_release(pages[i]);
+					goto done;
+				}
+			}
+			page_cache_release(pages[i]);
+			pages_locked++;
+		}
+		nrpages -= ret;
+		index += ret;
+		cond_resched();
+	}
+	ret = 0;
+done:
+	if (ret && pages_locked) {
+		__unlock_for_delalloc(inode, locked_page,
+			      delalloc_start,
+			      ((u64)(start_index + pages_locked - 1)) <<
+			      PAGE_CACHE_SHIFT);
+	}
+	return ret;
+}
+
+/*
+ * find a contiguous range of bytes in the file marked as delalloc, not
+ * more than 'max_bytes'.  start and end are used to return the range,
+ *
+ * 1 is returned if we find something, 0 if nothing was in the tree
+ */
+static noinline u64 find_lock_delalloc_range(struct inode *inode,
+					     struct extent_io_tree *tree,
+					     struct page *locked_page,
+					     u64 *start, u64 *end,
+					     u64 max_bytes)
+{
+	u64 delalloc_start;
+	u64 delalloc_end;
+	u64 found;
+	int ret;
+	int loops = 0;
+
+again:
+	/* step one, find a bunch of delalloc bytes starting at start */
+	delalloc_start = *start;
+	delalloc_end = 0;
+	found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
+				    max_bytes);
+	if (!found || delalloc_end <= *start) {
+		*start = delalloc_start;
+		*end = delalloc_end;
+		return found;
+	}
+
+	/*
+	 * start comes from the offset of locked_page.  We have to lock
+	 * pages in order, so we can't process delalloc bytes before
+	 * locked_page
+	 */
+	if (delalloc_start < *start)
+		delalloc_start = *start;
+
+	/*
+	 * make sure to limit the number of pages we try to lock down
+	 * if we're looping.
+	 */
+	if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
+		delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
+
+	/* step two, lock all the pages after the page that has start */
+	ret = lock_delalloc_pages(inode, locked_page,
+				  delalloc_start, delalloc_end);
+	if (ret == -EAGAIN) {
+		/* some of the pages are gone, lets avoid looping by
+		 * shortening the size of the delalloc range we're searching
+		 */
+		if (!loops) {
+			unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
+			max_bytes = PAGE_CACHE_SIZE - offset;
+			loops = 1;
+			goto again;
+		} else {
+			found = 0;
+			goto out_failed;
+		}
+	}
+	BUG_ON(ret);
+
+	/* step three, lock the state bits for the whole range */
+	lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
+
+	/* then test to make sure it is all still delalloc */
+	ret = test_range_bit(tree, delalloc_start, delalloc_end,
+			     EXTENT_DELALLOC, 1);
+	if (!ret) {
+		unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
+		__unlock_for_delalloc(inode, locked_page,
+			      delalloc_start, delalloc_end);
+		cond_resched();
+		goto again;
+	}
+	*start = delalloc_start;
+	*end = delalloc_end;
+out_failed:
+	return found;
+}
+
+int extent_clear_unlock_delalloc(struct inode *inode,
+				struct extent_io_tree *tree,
+				u64 start, u64 end, struct page *locked_page,
+				int unlock_pages,
+				int clear_unlock,
+				int clear_delalloc, int clear_dirty,
+				int set_writeback,
+				int end_writeback)
+{
+	int ret;
+	struct page *pages[16];
+	unsigned long index = start >> PAGE_CACHE_SHIFT;
+	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	unsigned long nr_pages = end_index - index + 1;
+	int i;
+	int clear_bits = 0;
+
+	if (clear_unlock)
+		clear_bits |= EXTENT_LOCKED;
+	if (clear_dirty)
+		clear_bits |= EXTENT_DIRTY;
+
+	if (clear_delalloc)
+		clear_bits |= EXTENT_DELALLOC;
+
+	clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
+	if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
+		return 0;
+
+	while (nr_pages > 0) {
+		ret = find_get_pages_contig(inode->i_mapping, index,
+				     min_t(unsigned long,
+				     nr_pages, ARRAY_SIZE(pages)), pages);
+		for (i = 0; i < ret; i++) {
+			if (pages[i] == locked_page) {
+				page_cache_release(pages[i]);
+				continue;
+			}
+			if (clear_dirty)
+				clear_page_dirty_for_io(pages[i]);
+			if (set_writeback)
+				set_page_writeback(pages[i]);
+			if (end_writeback)
+				end_page_writeback(pages[i]);
+			if (unlock_pages)
+				unlock_page(pages[i]);
+			page_cache_release(pages[i]);
+		}
+		nr_pages -= ret;
+		index += ret;
+		cond_resched();
+	}
+	return 0;
+}
+
+/*
+ * count the number of bytes in the tree that have a given bit(s)
+ * set.  This can be fairly slow, except for EXTENT_DIRTY which is
+ * cached.  The total number found is returned.
+ */
+u64 count_range_bits(struct extent_io_tree *tree,
+		     u64 *start, u64 search_end, u64 max_bytes,
+		     unsigned long bits)
+{
+	struct rb_node *node;
+	struct extent_state *state;
+	u64 cur_start = *start;
+	u64 total_bytes = 0;
+	int found = 0;
+
+	if (search_end <= cur_start) {
+		WARN_ON(1);
+		return 0;
+	}
+
+	spin_lock(&tree->lock);
+	if (cur_start == 0 && bits == EXTENT_DIRTY) {
+		total_bytes = tree->dirty_bytes;
+		goto out;
+	}
+	/*
+	 * this search will find all the extents that end after
+	 * our range starts.
+	 */
+	node = tree_search(tree, cur_start);
+	if (!node)
+		goto out;
+
+	while (1) {
+		state = rb_entry(node, struct extent_state, rb_node);
+		if (state->start > search_end)
+			break;
+		if (state->end >= cur_start && (state->state & bits)) {
+			total_bytes += min(search_end, state->end) + 1 -
+				       max(cur_start, state->start);
+			if (total_bytes >= max_bytes)
+				break;
+			if (!found) {
+				*start = state->start;
+				found = 1;
+			}
+		}
+		node = rb_next(node);
+		if (!node)
+			break;
+	}
+out:
+	spin_unlock(&tree->lock);
+	return total_bytes;
+}
+
+#if 0
+/*
+ * helper function to lock both pages and extents in the tree.
+ * pages must be locked first.
+ */
+static int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
+{
+	unsigned long index = start >> PAGE_CACHE_SHIFT;
+	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	struct page *page;
+	int err;
+
+	while (index <= end_index) {
+		page = grab_cache_page(tree->mapping, index);
+		if (!page) {
+			err = -ENOMEM;
+			goto failed;
+		}
+		if (IS_ERR(page)) {
+			err = PTR_ERR(page);
+			goto failed;
+		}
+		index++;
+	}
+	lock_extent(tree, start, end, GFP_NOFS);
+	return 0;
+
+failed:
+	/*
+	 * we failed above in getting the page at 'index', so we undo here
+	 * up to but not including the page at 'index'
+	 */
+	end_index = index;
+	index = start >> PAGE_CACHE_SHIFT;
+	while (index < end_index) {
+		page = find_get_page(tree->mapping, index);
+		unlock_page(page);
+		page_cache_release(page);
+		index++;
+	}
+	return err;
+}
+
+/*
+ * helper function to unlock both pages and extents in the tree.
+ */
+static int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
+{
+	unsigned long index = start >> PAGE_CACHE_SHIFT;
+	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	struct page *page;
+
+	while (index <= end_index) {
+		page = find_get_page(tree->mapping, index);
+		unlock_page(page);
+		page_cache_release(page);
+		index++;
+	}
+	unlock_extent(tree, start, end, GFP_NOFS);
+	return 0;
+}
+#endif
+
+/*
+ * set the private field for a given byte offset in the tree.  If there isn't
+ * an extent_state there already, this does nothing.
+ */
+int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
+{
+	struct rb_node *node;
+	struct extent_state *state;
+	int ret = 0;
+
+	spin_lock(&tree->lock);
+	/*
+	 * this search will find all the extents that end after
+	 * our range starts.
+	 */
+	node = tree_search(tree, start);
+	if (!node) {
+		ret = -ENOENT;
+		goto out;
+	}
+	state = rb_entry(node, struct extent_state, rb_node);
+	if (state->start != start) {
+		ret = -ENOENT;
+		goto out;
+	}
+	state->private = private;
+out:
+	spin_unlock(&tree->lock);
+	return ret;
+}
+
+int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
+{
+	struct rb_node *node;
+	struct extent_state *state;
+	int ret = 0;
+
+	spin_lock(&tree->lock);
+	/*
+	 * this search will find all the extents that end after
+	 * our range starts.
+	 */
+	node = tree_search(tree, start);
+	if (!node) {
+		ret = -ENOENT;
+		goto out;
+	}
+	state = rb_entry(node, struct extent_state, rb_node);
+	if (state->start != start) {
+		ret = -ENOENT;
+		goto out;
+	}
+	*private = state->private;
+out:
+	spin_unlock(&tree->lock);
+	return ret;
+}
+
+/*
+ * searches a range in the state tree for a given mask.
+ * If 'filled' == 1, this returns 1 only if every extent in the tree
+ * has the bits set.  Otherwise, 1 is returned if any bit in the
+ * range is found set.
+ */
+int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
+		   int bits, int filled)
+{
+	struct extent_state *state = NULL;
+	struct rb_node *node;
+	int bitset = 0;
+
+	spin_lock(&tree->lock);
+	node = tree_search(tree, start);
+	while (node && start <= end) {
+		state = rb_entry(node, struct extent_state, rb_node);
+
+		if (filled && state->start > start) {
+			bitset = 0;
+			break;
+		}
+
+		if (state->start > end)
+			break;
+
+		if (state->state & bits) {
+			bitset = 1;
+			if (!filled)
+				break;
+		} else if (filled) {
+			bitset = 0;
+			break;
+		}
+		start = state->end + 1;
+		if (start > end)
+			break;
+		node = rb_next(node);
+		if (!node) {
+			if (filled)
+				bitset = 0;
+			break;
+		}
+	}
+	spin_unlock(&tree->lock);
+	return bitset;
+}
+
+/*
+ * helper function to set a given page up to date if all the
+ * extents in the tree for that page are up to date
+ */
+static int check_page_uptodate(struct extent_io_tree *tree,
+			       struct page *page)
+{
+	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+	u64 end = start + PAGE_CACHE_SIZE - 1;
+	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
+		SetPageUptodate(page);
+	return 0;
+}
+
+/*
+ * helper function to unlock a page if all the extents in the tree
+ * for that page are unlocked
+ */
+static int check_page_locked(struct extent_io_tree *tree,
+			     struct page *page)
+{
+	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+	u64 end = start + PAGE_CACHE_SIZE - 1;
+	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
+		unlock_page(page);
+	return 0;
+}
+
+/*
+ * helper function to end page writeback if all the extents
+ * in the tree for that page are done with writeback
+ */
+static int check_page_writeback(struct extent_io_tree *tree,
+			     struct page *page)
+{
+	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+	u64 end = start + PAGE_CACHE_SIZE - 1;
+	if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
+		end_page_writeback(page);
+	return 0;
+}
+
+/* lots and lots of room for performance fixes in the end_bio funcs */
+
+/*
+ * after a writepage IO is done, we need to:
+ * clear the uptodate bits on error
+ * clear the writeback bits in the extent tree for this IO
+ * end_page_writeback if the page has no more pending IO
+ *
+ * Scheduling is not allowed, so the extent state tree is expected
+ * to have one and only one object corresponding to this IO.
+ */
+static void end_bio_extent_writepage(struct bio *bio, int err)
+{
+	int uptodate = err == 0;
+	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+	struct extent_io_tree *tree;
+	u64 start;
+	u64 end;
+	int whole_page;
+	int ret;
+
+	do {
+		struct page *page = bvec->bv_page;
+		tree = &BTRFS_I(page->mapping->host)->io_tree;
+
+		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
+			 bvec->bv_offset;
+		end = start + bvec->bv_len - 1;
+
+		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
+			whole_page = 1;
+		else
+			whole_page = 0;
+
+		if (--bvec >= bio->bi_io_vec)
+			prefetchw(&bvec->bv_page->flags);
+		if (tree->ops && tree->ops->writepage_end_io_hook) {
+			ret = tree->ops->writepage_end_io_hook(page, start,
+						       end, NULL, uptodate);
+			if (ret)
+				uptodate = 0;
+		}
+
+		if (!uptodate && tree->ops &&
+		    tree->ops->writepage_io_failed_hook) {
+			ret = tree->ops->writepage_io_failed_hook(bio, page,
+							 start, end, NULL);
+			if (ret == 0) {
+				uptodate = (err == 0);
+				continue;
+			}
+		}
+
+		if (!uptodate) {
+			clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
+			ClearPageUptodate(page);
+			SetPageError(page);
+		}
+
+		clear_extent_writeback(tree, start, end, GFP_ATOMIC);
+
+		if (whole_page)
+			end_page_writeback(page);
+		else
+			check_page_writeback(tree, page);
+	} while (bvec >= bio->bi_io_vec);
+
+	bio_put(bio);
+}
+
+/*
+ * after a readpage IO is done, we need to:
+ * clear the uptodate bits on error
+ * set the uptodate bits if things worked
+ * set the page up to date if all extents in the tree are uptodate
+ * clear the lock bit in the extent tree
+ * unlock the page if there are no other extents locked for it
+ *
+ * Scheduling is not allowed, so the extent state tree is expected
+ * to have one and only one object corresponding to this IO.
+ */
+static void end_bio_extent_readpage(struct bio *bio, int err)
+{
+	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+	struct extent_io_tree *tree;
+	u64 start;
+	u64 end;
+	int whole_page;
+	int ret;
+
+	if (err)
+		uptodate = 0;
+
+	do {
+		struct page *page = bvec->bv_page;
+		tree = &BTRFS_I(page->mapping->host)->io_tree;
+
+		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
+			bvec->bv_offset;
+		end = start + bvec->bv_len - 1;
+
+		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
+			whole_page = 1;
+		else
+			whole_page = 0;
+
+		if (--bvec >= bio->bi_io_vec)
+			prefetchw(&bvec->bv_page->flags);
+
+		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
+			ret = tree->ops->readpage_end_io_hook(page, start, end,
+							      NULL);
+			if (ret)
+				uptodate = 0;
+		}
+		if (!uptodate && tree->ops &&
+		    tree->ops->readpage_io_failed_hook) {
+			ret = tree->ops->readpage_io_failed_hook(bio, page,
+							 start, end, NULL);
+			if (ret == 0) {
+				uptodate =
+					test_bit(BIO_UPTODATE, &bio->bi_flags);
+				if (err)
+					uptodate = 0;
+				continue;
+			}
+		}
+
+		if (uptodate) {
+			set_extent_uptodate(tree, start, end,
+					    GFP_ATOMIC);
+		}
+		unlock_extent(tree, start, end, GFP_ATOMIC);
+
+		if (whole_page) {
+			if (uptodate) {
+				SetPageUptodate(page);
+			} else {
+				ClearPageUptodate(page);
+				SetPageError(page);
+			}
+			unlock_page(page);
+		} else {
+			if (uptodate) {
+				check_page_uptodate(tree, page);
+			} else {
+				ClearPageUptodate(page);
+				SetPageError(page);
+			}
+			check_page_locked(tree, page);
+		}
+	} while (bvec >= bio->bi_io_vec);
+
+	bio_put(bio);
+}
+
+/*
+ * IO done from prepare_write is pretty simple, we just unlock
+ * the structs in the extent tree when done, and set the uptodate bits
+ * as appropriate.
+ */
+static void end_bio_extent_preparewrite(struct bio *bio, int err)
+{
+	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+	struct extent_io_tree *tree;
+	u64 start;
+	u64 end;
+
+	do {
+		struct page *page = bvec->bv_page;
+		tree = &BTRFS_I(page->mapping->host)->io_tree;
+
+		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
+			bvec->bv_offset;
+		end = start + bvec->bv_len - 1;
+
+		if (--bvec >= bio->bi_io_vec)
+			prefetchw(&bvec->bv_page->flags);
+
+		if (uptodate) {
+			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
+		} else {
+			ClearPageUptodate(page);
+			SetPageError(page);
+		}
+
+		unlock_extent(tree, start, end, GFP_ATOMIC);
+
+	} while (bvec >= bio->bi_io_vec);
+
+	bio_put(bio);
+}
+
+static struct bio *
+extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
+		 gfp_t gfp_flags)
+{
+	struct bio *bio;
+
+	bio = bio_alloc(gfp_flags, nr_vecs);
+
+	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
+		while (!bio && (nr_vecs /= 2))
+			bio = bio_alloc(gfp_flags, nr_vecs);
+	}
+
+	if (bio) {
+		bio->bi_size = 0;
+		bio->bi_bdev = bdev;
+		bio->bi_sector = first_sector;
+	}
+	return bio;
+}
+
+static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
+			  unsigned long bio_flags)
+{
+	int ret = 0;
+	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+	struct page *page = bvec->bv_page;
+	struct extent_io_tree *tree = bio->bi_private;
+	u64 start;
+	u64 end;
+
+	start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
+	end = start + bvec->bv_len - 1;
+
+	bio->bi_private = NULL;
+
+	bio_get(bio);
+
+	if (tree->ops && tree->ops->submit_bio_hook)
+		tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
+					   mirror_num, bio_flags);
+	else
+		submit_bio(rw, bio);
+	if (bio_flagged(bio, BIO_EOPNOTSUPP))
+		ret = -EOPNOTSUPP;
+	bio_put(bio);
+	return ret;
+}
+
+static int submit_extent_page(int rw, struct extent_io_tree *tree,
+			      struct page *page, sector_t sector,
+			      size_t size, unsigned long offset,
+			      struct block_device *bdev,
+			      struct bio **bio_ret,
+			      unsigned long max_pages,
+			      bio_end_io_t end_io_func,
+			      int mirror_num,
+			      unsigned long prev_bio_flags,
+			      unsigned long bio_flags)
+{
+	int ret = 0;
+	struct bio *bio;
+	int nr;
+	int contig = 0;
+	int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
+	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
+	size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
+
+	if (bio_ret && *bio_ret) {
+		bio = *bio_ret;
+		if (old_compressed)
+			contig = bio->bi_sector == sector;
+		else
+			contig = bio->bi_sector + (bio->bi_size >> 9) ==
+				sector;
+
+		if (prev_bio_flags != bio_flags || !contig ||
+		    (tree->ops && tree->ops->merge_bio_hook &&
+		     tree->ops->merge_bio_hook(page, offset, page_size, bio,
+					       bio_flags)) ||
+		    bio_add_page(bio, page, page_size, offset) < page_size) {
+			ret = submit_one_bio(rw, bio, mirror_num,
+					     prev_bio_flags);
+			bio = NULL;
+		} else {
+			return 0;
+		}
+	}
+	if (this_compressed)
+		nr = BIO_MAX_PAGES;
+	else
+		nr = bio_get_nr_vecs(bdev);
+
+	bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
+
+	bio_add_page(bio, page, page_size, offset);
+	bio->bi_end_io = end_io_func;
+	bio->bi_private = tree;
+
+	if (bio_ret)
+		*bio_ret = bio;
+	else
+		ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
+
+	return ret;
+}
+
+void set_page_extent_mapped(struct page *page)
+{
+	if (!PagePrivate(page)) {
+		SetPagePrivate(page);
+		page_cache_get(page);
+		set_page_private(page, EXTENT_PAGE_PRIVATE);
+	}
+}
+
+static void set_page_extent_head(struct page *page, unsigned long len)
+{
+	set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
+}
+
+/*
+ * basic readpage implementation.  Locked extent state structs are inserted
+ * into the tree that are removed when the IO is done (by the end_io
+ * handlers)
+ */
+static int __extent_read_full_page(struct extent_io_tree *tree,
+				   struct page *page,
+				   get_extent_t *get_extent,
+				   struct bio **bio, int mirror_num,
+				   unsigned long *bio_flags)
+{
+	struct inode *inode = page->mapping->host;
+	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+	u64 page_end = start + PAGE_CACHE_SIZE - 1;
+	u64 end;
+	u64 cur = start;
+	u64 extent_offset;
+	u64 last_byte = i_size_read(inode);
+	u64 block_start;
+	u64 cur_end;
+	sector_t sector;
+	struct extent_map *em;
+	struct block_device *bdev;
+	int ret;
+	int nr = 0;
+	size_t page_offset = 0;
+	size_t iosize;
+	size_t disk_io_size;
+	size_t blocksize = inode->i_sb->s_blocksize;
+	unsigned long this_bio_flag = 0;
+
+	set_page_extent_mapped(page);
+
+	end = page_end;
+	lock_extent(tree, start, end, GFP_NOFS);
+
+	if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
+		char *userpage;
+		size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
+
+		if (zero_offset) {
+			iosize = PAGE_CACHE_SIZE - zero_offset;
+			userpage = kmap_atomic(page, KM_USER0);
+			memset(userpage + zero_offset, 0, iosize);
+			flush_dcache_page(page);
+			kunmap_atomic(userpage, KM_USER0);
+		}
+	}
+	while (cur <= end) {
+		if (cur >= last_byte) {
+			char *userpage;
+			iosize = PAGE_CACHE_SIZE - page_offset;
+			userpage = kmap_atomic(page, KM_USER0);
+			memset(userpage + page_offset, 0, iosize);
+			flush_dcache_page(page);
+			kunmap_atomic(userpage, KM_USER0);
+			set_extent_uptodate(tree, cur, cur + iosize - 1,
+					    GFP_NOFS);
+			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+			break;
+		}
+		em = get_extent(inode, page, page_offset, cur,
+				end - cur + 1, 0);
+		if (IS_ERR(em) || !em) {
+			SetPageError(page);
+			unlock_extent(tree, cur, end, GFP_NOFS);
+			break;
+		}
+		extent_offset = cur - em->start;
+		BUG_ON(extent_map_end(em) <= cur);
+		BUG_ON(end < cur);
+
+		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
+			this_bio_flag = EXTENT_BIO_COMPRESSED;
+
+		iosize = min(extent_map_end(em) - cur, end - cur + 1);
+		cur_end = min(extent_map_end(em) - 1, end);
+		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
+		if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
+			disk_io_size = em->block_len;
+			sector = em->block_start >> 9;
+		} else {
+			sector = (em->block_start + extent_offset) >> 9;
+			disk_io_size = iosize;
+		}
+		bdev = em->bdev;
+		block_start = em->block_start;
+		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+			block_start = EXTENT_MAP_HOLE;
+		free_extent_map(em);
+		em = NULL;
+
+		/* we've found a hole, just zero and go on */
+		if (block_start == EXTENT_MAP_HOLE) {
+			char *userpage;
+			userpage = kmap_atomic(page, KM_USER0);
+			memset(userpage + page_offset, 0, iosize);
+			flush_dcache_page(page);
+			kunmap_atomic(userpage, KM_USER0);
+
+			set_extent_uptodate(tree, cur, cur + iosize - 1,
+					    GFP_NOFS);
+			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+			cur = cur + iosize;
+			page_offset += iosize;
+			continue;
+		}
+		/* the get_extent function already copied into the page */
+		if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
+			check_page_uptodate(tree, page);
+			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+			cur = cur + iosize;
+			page_offset += iosize;
+			continue;
+		}
+		/* we have an inline extent but it didn't get marked up
+		 * to date.  Error out
+		 */
+		if (block_start == EXTENT_MAP_INLINE) {
+			SetPageError(page);
+			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+			cur = cur + iosize;
+			page_offset += iosize;
+			continue;
+		}
+
+		ret = 0;
+		if (tree->ops && tree->ops->readpage_io_hook) {
+			ret = tree->ops->readpage_io_hook(page, cur,
+							  cur + iosize - 1);
+		}
+		if (!ret) {
+			unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
+			pnr -= page->index;
+			ret = submit_extent_page(READ, tree, page,
+					 sector, disk_io_size, page_offset,
+					 bdev, bio, pnr,
+					 end_bio_extent_readpage, mirror_num,
+					 *bio_flags,
+					 this_bio_flag);
+			nr++;
+			*bio_flags = this_bio_flag;
+		}
+		if (ret)
+			SetPageError(page);
+		cur = cur + iosize;
+		page_offset += iosize;
+	}
+	if (!nr) {
+		if (!PageError(page))
+			SetPageUptodate(page);
+		unlock_page(page);
+	}
+	return 0;
+}
+
+int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
+			    get_extent_t *get_extent)
+{
+	struct bio *bio = NULL;
+	unsigned long bio_flags = 0;
+	int ret;
+
+	ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
+				      &bio_flags);
+	if (bio)
+		submit_one_bio(READ, bio, 0, bio_flags);
+	return ret;
+}
+
+/*
+ * the writepage semantics are similar to regular writepage.  extent
+ * records are inserted to lock ranges in the tree, and as dirty areas
+ * are found, they are marked writeback.  Then the lock bits are removed
+ * and the end_io handler clears the writeback ranges
+ */
+static int __extent_writepage(struct page *page, struct writeback_control *wbc,
+			      void *data)
+{
+	struct inode *inode = page->mapping->host;
+	struct extent_page_data *epd = data;
+	struct extent_io_tree *tree = epd->tree;
+	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+	u64 delalloc_start;
+	u64 page_end = start + PAGE_CACHE_SIZE - 1;
+	u64 end;
+	u64 cur = start;
+	u64 extent_offset;
+	u64 last_byte = i_size_read(inode);
+	u64 block_start;
+	u64 iosize;
+	u64 unlock_start;
+	sector_t sector;
+	struct extent_map *em;
+	struct block_device *bdev;
+	int ret;
+	int nr = 0;
+	size_t pg_offset = 0;
+	size_t blocksize;
+	loff_t i_size = i_size_read(inode);
+	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
+	u64 nr_delalloc;
+	u64 delalloc_end;
+	int page_started;
+	int compressed;
+	unsigned long nr_written = 0;
+
+	WARN_ON(!PageLocked(page));
+	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
+	if (page->index > end_index ||
+	   (page->index == end_index && !pg_offset)) {
+		page->mapping->a_ops->invalidatepage(page, 0);
+		unlock_page(page);
+		return 0;
+	}
+
+	if (page->index == end_index) {
+		char *userpage;
+
+		userpage = kmap_atomic(page, KM_USER0);
+		memset(userpage + pg_offset, 0,
+		       PAGE_CACHE_SIZE - pg_offset);
+		kunmap_atomic(userpage, KM_USER0);
+		flush_dcache_page(page);
+	}
+	pg_offset = 0;
+
+	set_page_extent_mapped(page);
+
+	delalloc_start = start;
+	delalloc_end = 0;
+	page_started = 0;
+	if (!epd->extent_locked) {
+		while (delalloc_end < page_end) {
+			nr_delalloc = find_lock_delalloc_range(inode, tree,
+						       page,
+						       &delalloc_start,
+						       &delalloc_end,
+						       128 * 1024 * 1024);
+			if (nr_delalloc == 0) {
+				delalloc_start = delalloc_end + 1;
+				continue;
+			}
+			tree->ops->fill_delalloc(inode, page, delalloc_start,
+						 delalloc_end, &page_started,
+						 &nr_written);
+			delalloc_start = delalloc_end + 1;
+		}
+
+		/* did the fill delalloc function already unlock and start
+		 * the IO?
+		 */
+		if (page_started) {
+			ret = 0;
+			goto update_nr_written;
+		}
+	}
+	lock_extent(tree, start, page_end, GFP_NOFS);
+
+	unlock_start = start;
+
+	if (tree->ops && tree->ops->writepage_start_hook) {
+		ret = tree->ops->writepage_start_hook(page, start,
+						      page_end);
+		if (ret == -EAGAIN) {
+			unlock_extent(tree, start, page_end, GFP_NOFS);
+			redirty_page_for_writepage(wbc, page);
+			unlock_page(page);
+			ret = 0;
+			goto update_nr_written;
+		}
+	}
+
+	nr_written++;
+
+	end = page_end;
+	if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0))
+		printk(KERN_ERR "btrfs delalloc bits after lock_extent\n");
+
+	if (last_byte <= start) {
+		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
+		unlock_extent(tree, start, page_end, GFP_NOFS);
+		if (tree->ops && tree->ops->writepage_end_io_hook)
+			tree->ops->writepage_end_io_hook(page, start,
+							 page_end, NULL, 1);
+		unlock_start = page_end + 1;
+		goto done;
+	}
+
+	set_extent_uptodate(tree, start, page_end, GFP_NOFS);
+	blocksize = inode->i_sb->s_blocksize;
+
+	while (cur <= end) {
+		if (cur >= last_byte) {
+			clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
+			unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
+			if (tree->ops && tree->ops->writepage_end_io_hook)
+				tree->ops->writepage_end_io_hook(page, cur,
+							 page_end, NULL, 1);
+			unlock_start = page_end + 1;
+			break;
+		}
+		em = epd->get_extent(inode, page, pg_offset, cur,
+				     end - cur + 1, 1);
+		if (IS_ERR(em) || !em) {
+			SetPageError(page);
+			break;
+		}
+
+		extent_offset = cur - em->start;
+		BUG_ON(extent_map_end(em) <= cur);
+		BUG_ON(end < cur);
+		iosize = min(extent_map_end(em) - cur, end - cur + 1);
+		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
+		sector = (em->block_start + extent_offset) >> 9;
+		bdev = em->bdev;
+		block_start = em->block_start;
+		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+		free_extent_map(em);
+		em = NULL;
+
+		/*
+		 * compressed and inline extents are written through other
+		 * paths in the FS
+		 */
+		if (compressed || block_start == EXTENT_MAP_HOLE ||
+		    block_start == EXTENT_MAP_INLINE) {
+			clear_extent_dirty(tree, cur,
+					   cur + iosize - 1, GFP_NOFS);
+
+			unlock_extent(tree, unlock_start, cur + iosize - 1,
+				      GFP_NOFS);
+
+			/*
+			 * end_io notification does not happen here for
+			 * compressed extents
+			 */
+			if (!compressed && tree->ops &&
+			    tree->ops->writepage_end_io_hook)
+				tree->ops->writepage_end_io_hook(page, cur,
+							 cur + iosize - 1,
+							 NULL, 1);
+			else if (compressed) {
+				/* we don't want to end_page_writeback on
+				 * a compressed extent.  this happens
+				 * elsewhere
+				 */
+				nr++;
+			}
+
+			cur += iosize;
+			pg_offset += iosize;
+			unlock_start = cur;
+			continue;
+		}
+		/* leave this out until we have a page_mkwrite call */
+		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
+				   EXTENT_DIRTY, 0)) {
+			cur = cur + iosize;
+			pg_offset += iosize;
+			continue;
+		}
+
+		clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
+		if (tree->ops && tree->ops->writepage_io_hook) {
+			ret = tree->ops->writepage_io_hook(page, cur,
+						cur + iosize - 1);
+		} else {
+			ret = 0;
+		}
+		if (ret) {
+			SetPageError(page);
+		} else {
+			unsigned long max_nr = end_index + 1;
+
+			set_range_writeback(tree, cur, cur + iosize - 1);
+			if (!PageWriteback(page)) {
+				printk(KERN_ERR "btrfs warning page %lu not "
+				       "writeback, cur %llu end %llu\n",
+				       page->index, (unsigned long long)cur,
+				       (unsigned long long)end);
+			}
+
+			ret = submit_extent_page(WRITE, tree, page, sector,
+						 iosize, pg_offset, bdev,
+						 &epd->bio, max_nr,
+						 end_bio_extent_writepage,
+						 0, 0, 0);
+			if (ret)
+				SetPageError(page);
+		}
+		cur = cur + iosize;
+		pg_offset += iosize;
+		nr++;
+	}
+done:
+	if (nr == 0) {
+		/* make sure the mapping tag for page dirty gets cleared */
+		set_page_writeback(page);
+		end_page_writeback(page);
+	}
+	if (unlock_start <= page_end)
+		unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
+	unlock_page(page);
+
+update_nr_written:
+	wbc->nr_to_write -= nr_written;
+	if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
+	    wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
+		page->mapping->writeback_index = page->index + nr_written;
+	return 0;
+}
+
+/**
+ * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
+ * @mapping: address space structure to write
+ * @wbc: subtract the number of written pages from *@wbc->nr_to_write
+ * @writepage: function called for each page
+ * @data: data passed to writepage function
+ *
+ * If a page is already under I/O, write_cache_pages() skips it, even
+ * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
+ * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
+ * and msync() need to guarantee that all the data which was dirty at the time
+ * the call was made get new I/O started against them.  If wbc->sync_mode is
+ * WB_SYNC_ALL then we were called for data integrity and we must wait for
+ * existing IO to complete.
+ */
+static int extent_write_cache_pages(struct extent_io_tree *tree,
+			     struct address_space *mapping,
+			     struct writeback_control *wbc,
+			     writepage_t writepage, void *data,
+			     void (*flush_fn)(void *))
+{
+	struct backing_dev_info *bdi = mapping->backing_dev_info;
+	int ret = 0;
+	int done = 0;
+	struct pagevec pvec;
+	int nr_pages;
+	pgoff_t index;
+	pgoff_t end;		/* Inclusive */
+	int scanned = 0;
+	int range_whole = 0;
+
+	if (wbc->nonblocking && bdi_write_congested(bdi)) {
+		wbc->encountered_congestion = 1;
+		return 0;
+	}
+
+	pagevec_init(&pvec, 0);
+	if (wbc->range_cyclic) {
+		index = mapping->writeback_index; /* Start from prev offset */
+		end = -1;
+	} else {
+		index = wbc->range_start >> PAGE_CACHE_SHIFT;
+		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+			range_whole = 1;
+		scanned = 1;
+	}
+retry:
+	while (!done && (index <= end) &&
+	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+			      PAGECACHE_TAG_DIRTY, min(end - index,
+				  (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+		unsigned i;
+
+		scanned = 1;
+		for (i = 0; i < nr_pages; i++) {
+			struct page *page = pvec.pages[i];
+
+			/*
+			 * At this point we hold neither mapping->tree_lock nor
+			 * lock on the page itself: the page may be truncated or
+			 * invalidated (changing page->mapping to NULL), or even
+			 * swizzled back from swapper_space to tmpfs file
+			 * mapping
+			 */
+			if (tree->ops && tree->ops->write_cache_pages_lock_hook)
+				tree->ops->write_cache_pages_lock_hook(page);
+			else
+				lock_page(page);
+
+			if (unlikely(page->mapping != mapping)) {
+				unlock_page(page);
+				continue;
+			}
+
+			if (!wbc->range_cyclic && page->index > end) {
+				done = 1;
+				unlock_page(page);
+				continue;
+			}
+
+			if (wbc->sync_mode != WB_SYNC_NONE) {
+				if (PageWriteback(page))
+					flush_fn(data);
+				wait_on_page_writeback(page);
+			}
+
+			if (PageWriteback(page) ||
+			    !clear_page_dirty_for_io(page)) {
+				unlock_page(page);
+				continue;
+			}
+
+			ret = (*writepage)(page, wbc, data);
+
+			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
+				unlock_page(page);
+				ret = 0;
+			}
+			if (ret || wbc->nr_to_write <= 0)
+				done = 1;
+			if (wbc->nonblocking && bdi_write_congested(bdi)) {
+				wbc->encountered_congestion = 1;
+				done = 1;
+			}
+		}
+		pagevec_release(&pvec);
+		cond_resched();
+	}
+	if (!scanned && !done) {
+		/*
+		 * We hit the last page and there is more work to be done: wrap
+		 * back to the start of the file
+		 */
+		scanned = 1;
+		index = 0;
+		goto retry;
+	}
+	return ret;
+}
+
+static noinline void flush_write_bio(void *data)
+{
+	struct extent_page_data *epd = data;
+	if (epd->bio) {
+		submit_one_bio(WRITE, epd->bio, 0, 0);
+		epd->bio = NULL;
+	}
+}
+
+int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
+			  get_extent_t *get_extent,
+			  struct writeback_control *wbc)
+{
+	int ret;
+	struct address_space *mapping = page->mapping;
+	struct extent_page_data epd = {
+		.bio = NULL,
+		.tree = tree,
+		.get_extent = get_extent,
+		.extent_locked = 0,
+	};
+	struct writeback_control wbc_writepages = {
+		.bdi		= wbc->bdi,
+		.sync_mode	= WB_SYNC_NONE,
+		.older_than_this = NULL,
+		.nr_to_write	= 64,
+		.range_start	= page_offset(page) + PAGE_CACHE_SIZE,
+		.range_end	= (loff_t)-1,
+	};
+
+
+	ret = __extent_writepage(page, wbc, &epd);
+
+	extent_write_cache_pages(tree, mapping, &wbc_writepages,
+				 __extent_writepage, &epd, flush_write_bio);
+	if (epd.bio)
+		submit_one_bio(WRITE, epd.bio, 0, 0);
+	return ret;
+}
+
+int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
+			      u64 start, u64 end, get_extent_t *get_extent,
+			      int mode)
+{
+	int ret = 0;
+	struct address_space *mapping = inode->i_mapping;
+	struct page *page;
+	unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
+		PAGE_CACHE_SHIFT;
+
+	struct extent_page_data epd = {
+		.bio = NULL,
+		.tree = tree,
+		.get_extent = get_extent,
+		.extent_locked = 1,
+	};
+	struct writeback_control wbc_writepages = {
+		.bdi		= inode->i_mapping->backing_dev_info,
+		.sync_mode	= mode,
+		.older_than_this = NULL,
+		.nr_to_write	= nr_pages * 2,
+		.range_start	= start,
+		.range_end	= end + 1,
+	};
+
+	while (start <= end) {
+		page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+		if (clear_page_dirty_for_io(page))
+			ret = __extent_writepage(page, &wbc_writepages, &epd);
+		else {
+			if (tree->ops && tree->ops->writepage_end_io_hook)
+				tree->ops->writepage_end_io_hook(page, start,
+						 start + PAGE_CACHE_SIZE - 1,
+						 NULL, 1);
+			unlock_page(page);
+		}
+		page_cache_release(page);
+		start += PAGE_CACHE_SIZE;
+	}
+
+	if (epd.bio)
+		submit_one_bio(WRITE, epd.bio, 0, 0);
+	return ret;
+}
+
+int extent_writepages(struct extent_io_tree *tree,
+		      struct address_space *mapping,
+		      get_extent_t *get_extent,
+		      struct writeback_control *wbc)
+{
+	int ret = 0;
+	struct extent_page_data epd = {
+		.bio = NULL,
+		.tree = tree,
+		.get_extent = get_extent,
+		.extent_locked = 0,
+	};
+
+	ret = extent_write_cache_pages(tree, mapping, wbc,
+				       __extent_writepage, &epd,
+				       flush_write_bio);
+	if (epd.bio)
+		submit_one_bio(WRITE, epd.bio, 0, 0);
+	return ret;
+}
+
+int extent_readpages(struct extent_io_tree *tree,
+		     struct address_space *mapping,
+		     struct list_head *pages, unsigned nr_pages,
+		     get_extent_t get_extent)
+{
+	struct bio *bio = NULL;
+	unsigned page_idx;
+	struct pagevec pvec;
+	unsigned long bio_flags = 0;
+
+	pagevec_init(&pvec, 0);
+	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
+		struct page *page = list_entry(pages->prev, struct page, lru);
+
+		prefetchw(&page->flags);
+		list_del(&page->lru);
+		/*
+		 * what we want to do here is call add_to_page_cache_lru,
+		 * but that isn't exported, so we reproduce it here
+		 */
+		if (!add_to_page_cache(page, mapping,
+					page->index, GFP_KERNEL)) {
+
+			/* open coding of lru_cache_add, also not exported */
+			page_cache_get(page);
+			if (!pagevec_add(&pvec, page))
+				__pagevec_lru_add_file(&pvec);
+			__extent_read_full_page(tree, page, get_extent,
+						&bio, 0, &bio_flags);
+		}
+		page_cache_release(page);
+	}
+	if (pagevec_count(&pvec))
+		__pagevec_lru_add_file(&pvec);
+	BUG_ON(!list_empty(pages));
+	if (bio)
+		submit_one_bio(READ, bio, 0, bio_flags);
+	return 0;
+}
+
+/*
+ * basic invalidatepage code, this waits on any locked or writeback
+ * ranges corresponding to the page, and then deletes any extent state
+ * records from the tree
+ */
+int extent_invalidatepage(struct extent_io_tree *tree,
+			  struct page *page, unsigned long offset)
+{
+	u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
+	u64 end = start + PAGE_CACHE_SIZE - 1;
+	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
+
+	start += (offset + blocksize - 1) & ~(blocksize - 1);
+	if (start > end)
+		return 0;
+
+	lock_extent(tree, start, end, GFP_NOFS);
+	wait_on_extent_writeback(tree, start, end);
+	clear_extent_bit(tree, start, end,
+			 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
+			 1, 1, GFP_NOFS);
+	return 0;
+}
+
+/*
+ * simple commit_write call, set_range_dirty is used to mark both
+ * the pages and the extent records as dirty
+ */
+int extent_commit_write(struct extent_io_tree *tree,
+			struct inode *inode, struct page *page,
+			unsigned from, unsigned to)
+{
+	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+
+	set_page_extent_mapped(page);
+	set_page_dirty(page);
+
+	if (pos > inode->i_size) {
+		i_size_write(inode, pos);
+		mark_inode_dirty(inode);
+	}
+	return 0;
+}
+
+int extent_prepare_write(struct extent_io_tree *tree,
+			 struct inode *inode, struct page *page,
+			 unsigned from, unsigned to, get_extent_t *get_extent)
+{
+	u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
+	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
+	u64 block_start;
+	u64 orig_block_start;
+	u64 block_end;
+	u64 cur_end;
+	struct extent_map *em;
+	unsigned blocksize = 1 << inode->i_blkbits;
+	size_t page_offset = 0;
+	size_t block_off_start;
+	size_t block_off_end;
+	int err = 0;
+	int iocount = 0;
+	int ret = 0;
+	int isnew;
+
+	set_page_extent_mapped(page);
+
+	block_start = (page_start + from) & ~((u64)blocksize - 1);
+	block_end = (page_start + to - 1) | (blocksize - 1);
+	orig_block_start = block_start;
+
+	lock_extent(tree, page_start, page_end, GFP_NOFS);
+	while (block_start <= block_end) {
+		em = get_extent(inode, page, page_offset, block_start,
+				block_end - block_start + 1, 1);
+		if (IS_ERR(em) || !em)
+			goto err;
+
+		cur_end = min(block_end, extent_map_end(em) - 1);
+		block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
+		block_off_end = block_off_start + blocksize;
+		isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
+
+		if (!PageUptodate(page) && isnew &&
+		    (block_off_end > to || block_off_start < from)) {
+			void *kaddr;
+
+			kaddr = kmap_atomic(page, KM_USER0);
+			if (block_off_end > to)
+				memset(kaddr + to, 0, block_off_end - to);
+			if (block_off_start < from)
+				memset(kaddr + block_off_start, 0,
+				       from - block_off_start);
+			flush_dcache_page(page);
+			kunmap_atomic(kaddr, KM_USER0);
+		}
+		if ((em->block_start != EXTENT_MAP_HOLE &&
+		     em->block_start != EXTENT_MAP_INLINE) &&
+		    !isnew && !PageUptodate(page) &&
+		    (block_off_end > to || block_off_start < from) &&
+		    !test_range_bit(tree, block_start, cur_end,
+				    EXTENT_UPTODATE, 1)) {
+			u64 sector;
+			u64 extent_offset = block_start - em->start;
+			size_t iosize;
+			sector = (em->block_start + extent_offset) >> 9;
+			iosize = (cur_end - block_start + blocksize) &
+				~((u64)blocksize - 1);
+			/*
+			 * we've already got the extent locked, but we
+			 * need to split the state such that our end_bio
+			 * handler can clear the lock.
+			 */
+			set_extent_bit(tree, block_start,
+				       block_start + iosize - 1,
+				       EXTENT_LOCKED, 0, NULL, GFP_NOFS);
+			ret = submit_extent_page(READ, tree, page,
+					 sector, iosize, page_offset, em->bdev,
+					 NULL, 1,
+					 end_bio_extent_preparewrite, 0,
+					 0, 0);
+			iocount++;
+			block_start = block_start + iosize;
+		} else {
+			set_extent_uptodate(tree, block_start, cur_end,
+					    GFP_NOFS);
+			unlock_extent(tree, block_start, cur_end, GFP_NOFS);
+			block_start = cur_end + 1;
+		}
+		page_offset = block_start & (PAGE_CACHE_SIZE - 1);
+		free_extent_map(em);
+	}
+	if (iocount) {
+		wait_extent_bit(tree, orig_block_start,
+				block_end, EXTENT_LOCKED);
+	}
+	check_page_uptodate(tree, page);
+err:
+	/* FIXME, zero out newly allocated blocks on error */
+	return err;
+}
+
+/*
+ * a helper for releasepage, this tests for areas of the page that
+ * are locked or under IO and drops the related state bits if it is safe
+ * to drop the page.
+ */
+int try_release_extent_state(struct extent_map_tree *map,
+			     struct extent_io_tree *tree, struct page *page,
+			     gfp_t mask)
+{
+	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+	u64 end = start + PAGE_CACHE_SIZE - 1;
+	int ret = 1;
+
+	if (test_range_bit(tree, start, end,
+			   EXTENT_IOBITS | EXTENT_ORDERED, 0))
+		ret = 0;
+	else {
+		if ((mask & GFP_NOFS) == GFP_NOFS)
+			mask = GFP_NOFS;
+		clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
+				 1, 1, mask);
+	}
+	return ret;
+}
+
+/*
+ * a helper for releasepage.  As long as there are no locked extents
+ * in the range corresponding to the page, both state records and extent
+ * map records are removed
+ */
+int try_release_extent_mapping(struct extent_map_tree *map,
+			       struct extent_io_tree *tree, struct page *page,
+			       gfp_t mask)
+{
+	struct extent_map *em;
+	u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+	u64 end = start + PAGE_CACHE_SIZE - 1;
+
+	if ((mask & __GFP_WAIT) &&
+	    page->mapping->host->i_size > 16 * 1024 * 1024) {
+		u64 len;
+		while (start <= end) {
+			len = end - start + 1;
+			spin_lock(&map->lock);
+			em = lookup_extent_mapping(map, start, len);
+			if (!em || IS_ERR(em)) {
+				spin_unlock(&map->lock);
+				break;
+			}
+			if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
+			    em->start != start) {
+				spin_unlock(&map->lock);
+				free_extent_map(em);
+				break;
+			}
+			if (!test_range_bit(tree, em->start,
+					    extent_map_end(em) - 1,
+					    EXTENT_LOCKED | EXTENT_WRITEBACK |
+					    EXTENT_ORDERED,
+					    0)) {
+				remove_extent_mapping(map, em);
+				/* once for the rb tree */
+				free_extent_map(em);
+			}
+			start = extent_map_end(em);
+			spin_unlock(&map->lock);
+
+			/* once for us */
+			free_extent_map(em);
+		}
+	}
+	return try_release_extent_state(map, tree, page, mask);
+}
+
+sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
+		get_extent_t *get_extent)
+{
+	struct inode *inode = mapping->host;
+	u64 start = iblock << inode->i_blkbits;
+	sector_t sector = 0;
+	size_t blksize = (1 << inode->i_blkbits);
+	struct extent_map *em;
+
+	lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
+		    GFP_NOFS);
+	em = get_extent(inode, NULL, 0, start, blksize, 0);
+	unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
+		      GFP_NOFS);
+	if (!em || IS_ERR(em))
+		return 0;
+
+	if (em->block_start > EXTENT_MAP_LAST_BYTE)
+		goto out;
+
+	sector = (em->block_start + start - em->start) >> inode->i_blkbits;
+out:
+	free_extent_map(em);
+	return sector;
+}
+
+static inline struct page *extent_buffer_page(struct extent_buffer *eb,
+					      unsigned long i)
+{
+	struct page *p;
+	struct address_space *mapping;
+
+	if (i == 0)
+		return eb->first_page;
+	i += eb->start >> PAGE_CACHE_SHIFT;
+	mapping = eb->first_page->mapping;
+	if (!mapping)
+		return NULL;
+
+	/*
+	 * extent_buffer_page is only called after pinning the page
+	 * by increasing the reference count.  So we know the page must
+	 * be in the radix tree.
+	 */
+	rcu_read_lock();
+	p = radix_tree_lookup(&mapping->page_tree, i);
+	rcu_read_unlock();
+
+	return p;
+}
+
+static inline unsigned long num_extent_pages(u64 start, u64 len)
+{
+	return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
+		(start >> PAGE_CACHE_SHIFT);
+}
+
+static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
+						   u64 start,
+						   unsigned long len,
+						   gfp_t mask)
+{
+	struct extent_buffer *eb = NULL;
+#ifdef LEAK_DEBUG
+	unsigned long flags;
+#endif
+
+	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
+	eb->start = start;
+	eb->len = len;
+	mutex_init(&eb->mutex);
+#ifdef LEAK_DEBUG
+	spin_lock_irqsave(&leak_lock, flags);
+	list_add(&eb->leak_list, &buffers);
+	spin_unlock_irqrestore(&leak_lock, flags);
+#endif
+	atomic_set(&eb->refs, 1);
+
+	return eb;
+}
+
+static void __free_extent_buffer(struct extent_buffer *eb)
+{
+#ifdef LEAK_DEBUG
+	unsigned long flags;
+	spin_lock_irqsave(&leak_lock, flags);
+	list_del(&eb->leak_list);
+	spin_unlock_irqrestore(&leak_lock, flags);
+#endif
+	kmem_cache_free(extent_buffer_cache, eb);
+}
+
+struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
+					  u64 start, unsigned long len,
+					  struct page *page0,
+					  gfp_t mask)
+{
+	unsigned long num_pages = num_extent_pages(start, len);
+	unsigned long i;
+	unsigned long index = start >> PAGE_CACHE_SHIFT;
+	struct extent_buffer *eb;
+	struct extent_buffer *exists = NULL;
+	struct page *p;
+	struct address_space *mapping = tree->mapping;
+	int uptodate = 1;
+
+	spin_lock(&tree->buffer_lock);
+	eb = buffer_search(tree, start);
+	if (eb) {
+		atomic_inc(&eb->refs);
+		spin_unlock(&tree->buffer_lock);
+		mark_page_accessed(eb->first_page);
+		return eb;
+	}
+	spin_unlock(&tree->buffer_lock);
+
+	eb = __alloc_extent_buffer(tree, start, len, mask);
+	if (!eb)
+		return NULL;
+
+	if (page0) {
+		eb->first_page = page0;
+		i = 1;
+		index++;
+		page_cache_get(page0);
+		mark_page_accessed(page0);
+		set_page_extent_mapped(page0);
+		set_page_extent_head(page0, len);
+		uptodate = PageUptodate(page0);
+	} else {
+		i = 0;
+	}
+	for (; i < num_pages; i++, index++) {
+		p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
+		if (!p) {
+			WARN_ON(1);
+			goto free_eb;
+		}
+		set_page_extent_mapped(p);
+		mark_page_accessed(p);
+		if (i == 0) {
+			eb->first_page = p;
+			set_page_extent_head(p, len);
+		} else {
+			set_page_private(p, EXTENT_PAGE_PRIVATE);
+		}
+		if (!PageUptodate(p))
+			uptodate = 0;
+		unlock_page(p);
+	}
+	if (uptodate)
+		eb->flags |= EXTENT_UPTODATE;
+	eb->flags |= EXTENT_BUFFER_FILLED;
+
+	spin_lock(&tree->buffer_lock);
+	exists = buffer_tree_insert(tree, start, &eb->rb_node);
+	if (exists) {
+		/* add one reference for the caller */
+		atomic_inc(&exists->refs);
+		spin_unlock(&tree->buffer_lock);
+		goto free_eb;
+	}
+	spin_unlock(&tree->buffer_lock);
+
+	/* add one reference for the tree */
+	atomic_inc(&eb->refs);
+	return eb;
+
+free_eb:
+	if (!atomic_dec_and_test(&eb->refs))
+		return exists;
+	for (index = 1; index < i; index++)
+		page_cache_release(extent_buffer_page(eb, index));
+	page_cache_release(extent_buffer_page(eb, 0));
+	__free_extent_buffer(eb);
+	return exists;
+}
+
+struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
+					 u64 start, unsigned long len,
+					  gfp_t mask)
+{
+	struct extent_buffer *eb;
+
+	spin_lock(&tree->buffer_lock);
+	eb = buffer_search(tree, start);
+	if (eb)
+		atomic_inc(&eb->refs);
+	spin_unlock(&tree->buffer_lock);
+
+	if (eb)
+		mark_page_accessed(eb->first_page);
+
+	return eb;
+}
+
+void free_extent_buffer(struct extent_buffer *eb)
+{
+	if (!eb)
+		return;
+
+	if (!atomic_dec_and_test(&eb->refs))
+		return;
+
+	WARN_ON(1);
+}
+
+int clear_extent_buffer_dirty(struct extent_io_tree *tree,
+			      struct extent_buffer *eb)
+{
+	int set;
+	unsigned long i;
+	unsigned long num_pages;
+	struct page *page;
+
+	u64 start = eb->start;
+	u64 end = start + eb->len - 1;
+
+	set = clear_extent_dirty(tree, start, end, GFP_NOFS);
+	num_pages = num_extent_pages(eb->start, eb->len);
+
+	for (i = 0; i < num_pages; i++) {
+		page = extent_buffer_page(eb, i);
+		if (!set && !PageDirty(page))
+			continue;
+
+		lock_page(page);
+		if (i == 0)
+			set_page_extent_head(page, eb->len);
+		else
+			set_page_private(page, EXTENT_PAGE_PRIVATE);
+
+		/*
+		 * if we're on the last page or the first page and the
+		 * block isn't aligned on a page boundary, do extra checks
+		 * to make sure we don't clean page that is partially dirty
+		 */
+		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
+		    ((i == num_pages - 1) &&
+		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
+			start = (u64)page->index << PAGE_CACHE_SHIFT;
+			end  = start + PAGE_CACHE_SIZE - 1;
+			if (test_range_bit(tree, start, end,
+					   EXTENT_DIRTY, 0)) {
+				unlock_page(page);
+				continue;
+			}
+		}
+		clear_page_dirty_for_io(page);
+		spin_lock_irq(&page->mapping->tree_lock);
+		if (!PageDirty(page)) {
+			radix_tree_tag_clear(&page->mapping->page_tree,
+						page_index(page),
+						PAGECACHE_TAG_DIRTY);
+		}
+		spin_unlock_irq(&page->mapping->tree_lock);
+		unlock_page(page);
+	}
+	return 0;
+}
+
+int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
+				    struct extent_buffer *eb)
+{
+	return wait_on_extent_writeback(tree, eb->start,
+					eb->start + eb->len - 1);
+}
+
+int set_extent_buffer_dirty(struct extent_io_tree *tree,
+			     struct extent_buffer *eb)
+{
+	unsigned long i;
+	unsigned long num_pages;
+
+	num_pages = num_extent_pages(eb->start, eb->len);
+	for (i = 0; i < num_pages; i++) {
+		struct page *page = extent_buffer_page(eb, i);
+		/* writepage may need to do something special for the
+		 * first page, we have to make sure page->private is
+		 * properly set.  releasepage may drop page->private
+		 * on us if the page isn't already dirty.
+		 */
+		lock_page(page);
+		if (i == 0) {
+			set_page_extent_head(page, eb->len);
+		} else if (PagePrivate(page) &&
+			   page->private != EXTENT_PAGE_PRIVATE) {
+			set_page_extent_mapped(page);
+		}
+		__set_page_dirty_nobuffers(extent_buffer_page(eb, i));
+		set_extent_dirty(tree, page_offset(page),
+				 page_offset(page) + PAGE_CACHE_SIZE - 1,
+				 GFP_NOFS);
+		unlock_page(page);
+	}
+	return 0;
+}
+
+int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
+				struct extent_buffer *eb)
+{
+	unsigned long i;
+	struct page *page;
+	unsigned long num_pages;
+
+	num_pages = num_extent_pages(eb->start, eb->len);
+	eb->flags &= ~EXTENT_UPTODATE;
+
+	clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
+			      GFP_NOFS);
+	for (i = 0; i < num_pages; i++) {
+		page = extent_buffer_page(eb, i);
+		if (page)
+			ClearPageUptodate(page);
+	}
+	return 0;
+}
+
+int set_extent_buffer_uptodate(struct extent_io_tree *tree,
+				struct extent_buffer *eb)
+{
+	unsigned long i;
+	struct page *page;
+	unsigned long num_pages;
+
+	num_pages = num_extent_pages(eb->start, eb->len);
+
+	set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
+			    GFP_NOFS);
+	for (i = 0; i < num_pages; i++) {
+		page = extent_buffer_page(eb, i);
+		if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
+		    ((i == num_pages - 1) &&
+		     ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
+			check_page_uptodate(tree, page);
+			continue;
+		}
+		SetPageUptodate(page);
+	}
+	return 0;
+}
+
+int extent_range_uptodate(struct extent_io_tree *tree,
+			  u64 start, u64 end)
+{
+	struct page *page;
+	int ret;
+	int pg_uptodate = 1;
+	int uptodate;
+	unsigned long index;
+
+	ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
+	if (ret)
+		return 1;
+	while (start <= end) {
+		index = start >> PAGE_CACHE_SHIFT;
+		page = find_get_page(tree->mapping, index);
+		uptodate = PageUptodate(page);
+		page_cache_release(page);
+		if (!uptodate) {
+			pg_uptodate = 0;
+			break;
+		}
+		start += PAGE_CACHE_SIZE;
+	}
+	return pg_uptodate;
+}
+
+int extent_buffer_uptodate(struct extent_io_tree *tree,
+			   struct extent_buffer *eb)
+{
+	int ret = 0;
+	unsigned long num_pages;
+	unsigned long i;
+	struct page *page;
+	int pg_uptodate = 1;
+
+	if (eb->flags & EXTENT_UPTODATE)
+		return 1;
+
+	ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
+			   EXTENT_UPTODATE, 1);
+	if (ret)
+		return ret;
+
+	num_pages = num_extent_pages(eb->start, eb->len);
+	for (i = 0; i < num_pages; i++) {
+		page = extent_buffer_page(eb, i);
+		if (!PageUptodate(page)) {
+			pg_uptodate = 0;
+			break;
+		}
+	}
+	return pg_uptodate;
+}
+
+int read_extent_buffer_pages(struct extent_io_tree *tree,
+			     struct extent_buffer *eb,
+			     u64 start, int wait,
+			     get_extent_t *get_extent, int mirror_num)
+{
+	unsigned long i;
+	unsigned long start_i;
+	struct page *page;
+	int err;
+	int ret = 0;
+	int locked_pages = 0;
+	int all_uptodate = 1;
+	int inc_all_pages = 0;
+	unsigned long num_pages;
+	struct bio *bio = NULL;
+	unsigned long bio_flags = 0;
+
+	if (eb->flags & EXTENT_UPTODATE)
+		return 0;
+
+	if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
+			   EXTENT_UPTODATE, 1)) {
+		return 0;
+	}
+
+	if (start) {
+		WARN_ON(start < eb->start);
+		start_i = (start >> PAGE_CACHE_SHIFT) -
+			(eb->start >> PAGE_CACHE_SHIFT);
+	} else {
+		start_i = 0;
+	}
+
+	num_pages = num_extent_pages(eb->start, eb->len);
+	for (i = start_i; i < num_pages; i++) {
+		page = extent_buffer_page(eb, i);
+		if (!wait) {
+			if (!trylock_page(page))
+				goto unlock_exit;
+		} else {
+			lock_page(page);
+		}
+		locked_pages++;
+		if (!PageUptodate(page))
+			all_uptodate = 0;
+	}
+	if (all_uptodate) {
+		if (start_i == 0)
+			eb->flags |= EXTENT_UPTODATE;
+		goto unlock_exit;
+	}
+
+	for (i = start_i; i < num_pages; i++) {
+		page = extent_buffer_page(eb, i);
+		if (inc_all_pages)
+			page_cache_get(page);
+		if (!PageUptodate(page)) {
+			if (start_i == 0)
+				inc_all_pages = 1;
+			ClearPageError(page);
+			err = __extent_read_full_page(tree, page,
+						      get_extent, &bio,
+						      mirror_num, &bio_flags);
+			if (err)
+				ret = err;
+		} else {
+			unlock_page(page);
+		}
+	}
+
+	if (bio)
+		submit_one_bio(READ, bio, mirror_num, bio_flags);
+
+	if (ret || !wait)
+		return ret;
+
+	for (i = start_i; i < num_pages; i++) {
+		page = extent_buffer_page(eb, i);
+		wait_on_page_locked(page);
+		if (!PageUptodate(page))
+			ret = -EIO;
+	}
+
+	if (!ret)
+		eb->flags |= EXTENT_UPTODATE;
+	return ret;
+
+unlock_exit:
+	i = start_i;
+	while (locked_pages > 0) {
+		page = extent_buffer_page(eb, i);
+		i++;
+		unlock_page(page);
+		locked_pages--;
+	}
+	return ret;
+}
+
+void read_extent_buffer(struct extent_buffer *eb, void *dstv,
+			unsigned long start,
+			unsigned long len)
+{
+	size_t cur;
+	size_t offset;
+	struct page *page;
+	char *kaddr;
+	char *dst = (char *)dstv;
+	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
+	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+
+	WARN_ON(start > eb->len);
+	WARN_ON(start + len > eb->start + eb->len);
+
+	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
+
+	while (len > 0) {
+		page = extent_buffer_page(eb, i);
+
+		cur = min(len, (PAGE_CACHE_SIZE - offset));
+		kaddr = kmap_atomic(page, KM_USER1);
+		memcpy(dst, kaddr + offset, cur);
+		kunmap_atomic(kaddr, KM_USER1);
+
+		dst += cur;
+		len -= cur;
+		offset = 0;
+		i++;
+	}
+}
+
+int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
+			       unsigned long min_len, char **token, char **map,
+			       unsigned long *map_start,
+			       unsigned long *map_len, int km)
+{
+	size_t offset = start & (PAGE_CACHE_SIZE - 1);
+	char *kaddr;
+	struct page *p;
+	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
+	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+	unsigned long end_i = (start_offset + start + min_len - 1) >>
+		PAGE_CACHE_SHIFT;
+
+	if (i != end_i)
+		return -EINVAL;
+
+	if (i == 0) {
+		offset = start_offset;
+		*map_start = 0;
+	} else {
+		offset = 0;
+		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
+	}
+
+	if (start + min_len > eb->len) {
+		printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
+		       "wanted %lu %lu\n", (unsigned long long)eb->start,
+		       eb->len, start, min_len);
+		WARN_ON(1);
+	}
+
+	p = extent_buffer_page(eb, i);
+	kaddr = kmap_atomic(p, km);
+	*token = kaddr;
+	*map = kaddr + offset;
+	*map_len = PAGE_CACHE_SIZE - offset;
+	return 0;
+}
+
+int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
+		      unsigned long min_len,
+		      char **token, char **map,
+		      unsigned long *map_start,
+		      unsigned long *map_len, int km)
+{
+	int err;
+	int save = 0;
+	if (eb->map_token) {
+		unmap_extent_buffer(eb, eb->map_token, km);
+		eb->map_token = NULL;
+		save = 1;
+		WARN_ON(!mutex_is_locked(&eb->mutex));
+	}
+	err = map_private_extent_buffer(eb, start, min_len, token, map,
+				       map_start, map_len, km);
+	if (!err && save) {
+		eb->map_token = *token;
+		eb->kaddr = *map;
+		eb->map_start = *map_start;
+		eb->map_len = *map_len;
+	}
+	return err;
+}
+
+void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
+{
+	kunmap_atomic(token, km);
+}
+
+int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
+			  unsigned long start,
+			  unsigned long len)
+{
+	size_t cur;
+	size_t offset;
+	struct page *page;
+	char *kaddr;
+	char *ptr = (char *)ptrv;
+	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
+	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+	int ret = 0;
+
+	WARN_ON(start > eb->len);
+	WARN_ON(start + len > eb->start + eb->len);
+
+	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
+
+	while (len > 0) {
+		page = extent_buffer_page(eb, i);
+
+		cur = min(len, (PAGE_CACHE_SIZE - offset));
+
+		kaddr = kmap_atomic(page, KM_USER0);
+		ret = memcmp(ptr, kaddr + offset, cur);
+		kunmap_atomic(kaddr, KM_USER0);
+		if (ret)
+			break;
+
+		ptr += cur;
+		len -= cur;
+		offset = 0;
+		i++;
+	}
+	return ret;
+}
+
+void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
+			 unsigned long start, unsigned long len)
+{
+	size_t cur;
+	size_t offset;
+	struct page *page;
+	char *kaddr;
+	char *src = (char *)srcv;
+	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
+	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+
+	WARN_ON(start > eb->len);
+	WARN_ON(start + len > eb->start + eb->len);
+
+	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
+
+	while (len > 0) {
+		page = extent_buffer_page(eb, i);
+		WARN_ON(!PageUptodate(page));
+
+		cur = min(len, PAGE_CACHE_SIZE - offset);
+		kaddr = kmap_atomic(page, KM_USER1);
+		memcpy(kaddr + offset, src, cur);
+		kunmap_atomic(kaddr, KM_USER1);
+
+		src += cur;
+		len -= cur;
+		offset = 0;
+		i++;
+	}
+}
+
+void memset_extent_buffer(struct extent_buffer *eb, char c,
+			  unsigned long start, unsigned long len)
+{
+	size_t cur;
+	size_t offset;
+	struct page *page;
+	char *kaddr;
+	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
+	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+
+	WARN_ON(start > eb->len);
+	WARN_ON(start + len > eb->start + eb->len);
+
+	offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
+
+	while (len > 0) {
+		page = extent_buffer_page(eb, i);
+		WARN_ON(!PageUptodate(page));
+
+		cur = min(len, PAGE_CACHE_SIZE - offset);
+		kaddr = kmap_atomic(page, KM_USER0);
+		memset(kaddr + offset, c, cur);
+		kunmap_atomic(kaddr, KM_USER0);
+
+		len -= cur;
+		offset = 0;
+		i++;
+	}
+}
+
+void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
+			unsigned long dst_offset, unsigned long src_offset,
+			unsigned long len)
+{
+	u64 dst_len = dst->len;
+	size_t cur;
+	size_t offset;
+	struct page *page;
+	char *kaddr;
+	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
+	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
+
+	WARN_ON(src->len != dst_len);
+
+	offset = (start_offset + dst_offset) &
+		((unsigned long)PAGE_CACHE_SIZE - 1);
+
+	while (len > 0) {
+		page = extent_buffer_page(dst, i);
+		WARN_ON(!PageUptodate(page));
+
+		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
+
+		kaddr = kmap_atomic(page, KM_USER0);
+		read_extent_buffer(src, kaddr + offset, src_offset, cur);
+		kunmap_atomic(kaddr, KM_USER0);
+
+		src_offset += cur;
+		len -= cur;
+		offset = 0;
+		i++;
+	}
+}
+
+static void move_pages(struct page *dst_page, struct page *src_page,
+		       unsigned long dst_off, unsigned long src_off,
+		       unsigned long len)
+{
+	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
+	if (dst_page == src_page) {
+		memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
+	} else {
+		char *src_kaddr = kmap_atomic(src_page, KM_USER1);
+		char *p = dst_kaddr + dst_off + len;
+		char *s = src_kaddr + src_off + len;
+
+		while (len--)
+			*--p = *--s;
+
+		kunmap_atomic(src_kaddr, KM_USER1);
+	}
+	kunmap_atomic(dst_kaddr, KM_USER0);
+}
+
+static void copy_pages(struct page *dst_page, struct page *src_page,
+		       unsigned long dst_off, unsigned long src_off,
+		       unsigned long len)
+{
+	char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
+	char *src_kaddr;
+
+	if (dst_page != src_page)
+		src_kaddr = kmap_atomic(src_page, KM_USER1);
+	else
+		src_kaddr = dst_kaddr;
+
+	memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
+	kunmap_atomic(dst_kaddr, KM_USER0);
+	if (dst_page != src_page)
+		kunmap_atomic(src_kaddr, KM_USER1);
+}
+
+void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
+			   unsigned long src_offset, unsigned long len)
+{
+	size_t cur;
+	size_t dst_off_in_page;
+	size_t src_off_in_page;
+	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
+	unsigned long dst_i;
+	unsigned long src_i;
+
+	if (src_offset + len > dst->len) {
+		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
+		       "len %lu dst len %lu\n", src_offset, len, dst->len);
+		BUG_ON(1);
+	}
+	if (dst_offset + len > dst->len) {
+		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
+		       "len %lu dst len %lu\n", dst_offset, len, dst->len);
+		BUG_ON(1);
+	}
+
+	while (len > 0) {
+		dst_off_in_page = (start_offset + dst_offset) &
+			((unsigned long)PAGE_CACHE_SIZE - 1);
+		src_off_in_page = (start_offset + src_offset) &
+			((unsigned long)PAGE_CACHE_SIZE - 1);
+
+		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
+		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
+
+		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
+					       src_off_in_page));
+		cur = min_t(unsigned long, cur,
+			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
+
+		copy_pages(extent_buffer_page(dst, dst_i),
+			   extent_buffer_page(dst, src_i),
+			   dst_off_in_page, src_off_in_page, cur);
+
+		src_offset += cur;
+		dst_offset += cur;
+		len -= cur;
+	}
+}
+
+void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
+			   unsigned long src_offset, unsigned long len)
+{
+	size_t cur;
+	size_t dst_off_in_page;
+	size_t src_off_in_page;
+	unsigned long dst_end = dst_offset + len - 1;
+	unsigned long src_end = src_offset + len - 1;
+	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
+	unsigned long dst_i;
+	unsigned long src_i;
+
+	if (src_offset + len > dst->len) {
+		printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
+		       "len %lu len %lu\n", src_offset, len, dst->len);
+		BUG_ON(1);
+	}
+	if (dst_offset + len > dst->len) {
+		printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
+		       "len %lu len %lu\n", dst_offset, len, dst->len);
+		BUG_ON(1);
+	}
+	if (dst_offset < src_offset) {
+		memcpy_extent_buffer(dst, dst_offset, src_offset, len);
+		return;
+	}
+	while (len > 0) {
+		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
+		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
+
+		dst_off_in_page = (start_offset + dst_end) &
+			((unsigned long)PAGE_CACHE_SIZE - 1);
+		src_off_in_page = (start_offset + src_end) &
+			((unsigned long)PAGE_CACHE_SIZE - 1);
+
+		cur = min_t(unsigned long, len, src_off_in_page + 1);
+		cur = min(cur, dst_off_in_page + 1);
+		move_pages(extent_buffer_page(dst, dst_i),
+			   extent_buffer_page(dst, src_i),
+			   dst_off_in_page - cur + 1,
+			   src_off_in_page - cur + 1, cur);
+
+		dst_end -= cur;
+		src_end -= cur;
+		len -= cur;
+	}
+}
+
+int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
+{
+	u64 start = page_offset(page);
+	struct extent_buffer *eb;
+	int ret = 1;
+	unsigned long i;
+	unsigned long num_pages;
+
+	spin_lock(&tree->buffer_lock);
+	eb = buffer_search(tree, start);
+	if (!eb)
+		goto out;
+
+	if (atomic_read(&eb->refs) > 1) {
+		ret = 0;
+		goto out;
+	}
+	/* at this point we can safely release the extent buffer */
+	num_pages = num_extent_pages(eb->start, eb->len);
+	for (i = 0; i < num_pages; i++)
+		page_cache_release(extent_buffer_page(eb, i));
+	rb_erase(&eb->rb_node, &tree->buffer);
+	__free_extent_buffer(eb);
+out:
+	spin_unlock(&tree->buffer_lock);
+	return ret;
+}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
new file mode 100644
index 0000000..c5b483a
--- /dev/null
+++ b/fs/btrfs/extent_io.h
@@ -0,0 +1,269 @@
+#ifndef __EXTENTIO__
+#define __EXTENTIO__
+
+#include <linux/rbtree.h>
+
+/* bits for the extent state */
+#define EXTENT_DIRTY 1
+#define EXTENT_WRITEBACK (1 << 1)
+#define EXTENT_UPTODATE (1 << 2)
+#define EXTENT_LOCKED (1 << 3)
+#define EXTENT_NEW (1 << 4)
+#define EXTENT_DELALLOC (1 << 5)
+#define EXTENT_DEFRAG (1 << 6)
+#define EXTENT_DEFRAG_DONE (1 << 7)
+#define EXTENT_BUFFER_FILLED (1 << 8)
+#define EXTENT_ORDERED (1 << 9)
+#define EXTENT_ORDERED_METADATA (1 << 10)
+#define EXTENT_BOUNDARY (1 << 11)
+#define EXTENT_NODATASUM (1 << 12)
+#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
+
+/* flags for bio submission */
+#define EXTENT_BIO_COMPRESSED 1
+
+/*
+ * page->private values.  Every page that is controlled by the extent
+ * map has page->private set to one.
+ */
+#define EXTENT_PAGE_PRIVATE 1
+#define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3
+
+struct extent_state;
+
+typedef	int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
+				       struct bio *bio, int mirror_num,
+				       unsigned long bio_flags);
+struct extent_io_ops {
+	int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
+			     u64 start, u64 end, int *page_started,
+			     unsigned long *nr_written);
+	int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
+	int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
+	extent_submit_bio_hook_t *submit_bio_hook;
+	int (*merge_bio_hook)(struct page *page, unsigned long offset,
+			      size_t size, struct bio *bio,
+			      unsigned long bio_flags);
+	int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
+	int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
+				       u64 start, u64 end,
+				       struct extent_state *state);
+	int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
+					u64 start, u64 end,
+				       struct extent_state *state);
+	int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
+				    struct extent_state *state);
+	int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
+				      struct extent_state *state, int uptodate);
+	int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
+			    unsigned long old, unsigned long bits);
+	int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end,
+			    unsigned long old, unsigned long bits);
+	int (*write_cache_pages_lock_hook)(struct page *page);
+};
+
+struct extent_io_tree {
+	struct rb_root state;
+	struct rb_root buffer;
+	struct address_space *mapping;
+	u64 dirty_bytes;
+	spinlock_t lock;
+	spinlock_t buffer_lock;
+	struct extent_io_ops *ops;
+};
+
+struct extent_state {
+	u64 start;
+	u64 end; /* inclusive */
+	struct rb_node rb_node;
+	struct extent_io_tree *tree;
+	wait_queue_head_t wq;
+	atomic_t refs;
+	unsigned long state;
+
+	/* for use by the FS */
+	u64 private;
+
+	struct list_head leak_list;
+};
+
+struct extent_buffer {
+	u64 start;
+	unsigned long len;
+	char *map_token;
+	char *kaddr;
+	unsigned long map_start;
+	unsigned long map_len;
+	struct page *first_page;
+	atomic_t refs;
+	int flags;
+	struct list_head leak_list;
+	struct rb_node rb_node;
+	struct mutex mutex;
+};
+
+struct extent_map_tree;
+
+static inline struct extent_state *extent_state_next(struct extent_state *state)
+{
+	struct rb_node *node;
+	node = rb_next(&state->rb_node);
+	if (!node)
+		return NULL;
+	return rb_entry(node, struct extent_state, rb_node);
+}
+
+typedef struct extent_map *(get_extent_t)(struct inode *inode,
+					  struct page *page,
+					  size_t page_offset,
+					  u64 start, u64 len,
+					  int create);
+
+void extent_io_tree_init(struct extent_io_tree *tree,
+			  struct address_space *mapping, gfp_t mask);
+int try_release_extent_mapping(struct extent_map_tree *map,
+			       struct extent_io_tree *tree, struct page *page,
+			       gfp_t mask);
+int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page);
+int try_release_extent_state(struct extent_map_tree *map,
+			     struct extent_io_tree *tree, struct page *page,
+			     gfp_t mask);
+int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
+int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
+		    gfp_t mask);
+int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
+			  get_extent_t *get_extent);
+int __init extent_io_init(void);
+void extent_io_exit(void);
+
+u64 count_range_bits(struct extent_io_tree *tree,
+		     u64 *start, u64 search_end,
+		     u64 max_bytes, unsigned long bits);
+
+int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
+		   int bits, int filled);
+int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+		      int bits, gfp_t mask);
+int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+		     int bits, int wake, int delete, gfp_t mask);
+int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+		    int bits, gfp_t mask);
+int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
+			gfp_t mask);
+int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
+		   gfp_t mask);
+int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
+		     gfp_t mask);
+int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
+		       gfp_t mask);
+int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
+		       gfp_t mask);
+int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start,
+				  u64 end, gfp_t mask);
+int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
+		     gfp_t mask);
+int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
+		     gfp_t mask);
+int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+			  u64 *start_ret, u64 *end_ret, int bits);
+struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
+						 u64 start, int bits);
+int extent_invalidatepage(struct extent_io_tree *tree,
+			  struct page *page, unsigned long offset);
+int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
+			  get_extent_t *get_extent,
+			  struct writeback_control *wbc);
+int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
+			      u64 start, u64 end, get_extent_t *get_extent,
+			      int mode);
+int extent_writepages(struct extent_io_tree *tree,
+		      struct address_space *mapping,
+		      get_extent_t *get_extent,
+		      struct writeback_control *wbc);
+int extent_readpages(struct extent_io_tree *tree,
+		     struct address_space *mapping,
+		     struct list_head *pages, unsigned nr_pages,
+		     get_extent_t get_extent);
+int extent_prepare_write(struct extent_io_tree *tree,
+			 struct inode *inode, struct page *page,
+			 unsigned from, unsigned to, get_extent_t *get_extent);
+int extent_commit_write(struct extent_io_tree *tree,
+			struct inode *inode, struct page *page,
+			unsigned from, unsigned to);
+sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
+		get_extent_t *get_extent);
+int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
+int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
+int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
+void set_page_extent_mapped(struct page *page);
+
+struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
+					  u64 start, unsigned long len,
+					  struct page *page0,
+					  gfp_t mask);
+struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
+					 u64 start, unsigned long len,
+					  gfp_t mask);
+void free_extent_buffer(struct extent_buffer *eb);
+int read_extent_buffer_pages(struct extent_io_tree *tree,
+			     struct extent_buffer *eb, u64 start, int wait,
+			     get_extent_t *get_extent, int mirror_num);
+
+static inline void extent_buffer_get(struct extent_buffer *eb)
+{
+	atomic_inc(&eb->refs);
+}
+
+int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
+			  unsigned long start,
+			  unsigned long len);
+void read_extent_buffer(struct extent_buffer *eb, void *dst,
+			unsigned long start,
+			unsigned long len);
+void write_extent_buffer(struct extent_buffer *eb, const void *src,
+			 unsigned long start, unsigned long len);
+void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
+			unsigned long dst_offset, unsigned long src_offset,
+			unsigned long len);
+void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
+			   unsigned long src_offset, unsigned long len);
+void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
+			   unsigned long src_offset, unsigned long len);
+void memset_extent_buffer(struct extent_buffer *eb, char c,
+			  unsigned long start, unsigned long len);
+int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
+				    struct extent_buffer *eb);
+int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end);
+int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
+int clear_extent_buffer_dirty(struct extent_io_tree *tree,
+			      struct extent_buffer *eb);
+int set_extent_buffer_dirty(struct extent_io_tree *tree,
+			     struct extent_buffer *eb);
+int set_extent_buffer_uptodate(struct extent_io_tree *tree,
+			       struct extent_buffer *eb);
+int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
+				struct extent_buffer *eb);
+int extent_buffer_uptodate(struct extent_io_tree *tree,
+			   struct extent_buffer *eb);
+int map_extent_buffer(struct extent_buffer *eb, unsigned long offset,
+		      unsigned long min_len, char **token, char **map,
+		      unsigned long *map_start,
+		      unsigned long *map_len, int km);
+int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
+		      unsigned long min_len, char **token, char **map,
+		      unsigned long *map_start,
+		      unsigned long *map_len, int km);
+void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
+int release_extent_buffer_tail_pages(struct extent_buffer *eb);
+int extent_range_uptodate(struct extent_io_tree *tree,
+			  u64 start, u64 end);
+int extent_clear_unlock_delalloc(struct inode *inode,
+				struct extent_io_tree *tree,
+				u64 start, u64 end, struct page *locked_page,
+				int unlock_page,
+				int clear_unlock,
+				int clear_delalloc, int clear_dirty,
+				int set_writeback,
+				int end_writeback);
+#endif
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
new file mode 100644
index 0000000..4a83e33
--- /dev/null
+++ b/fs/btrfs/extent_map.c
@@ -0,0 +1,351 @@
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/version.h>
+#include <linux/hardirq.h>
+#include "extent_map.h"
+
+/* temporary define until extent_map moves out of btrfs */
+struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
+				       unsigned long extra_flags,
+				       void (*ctor)(void *, struct kmem_cache *,
+						    unsigned long));
+
+static struct kmem_cache *extent_map_cache;
+
+int __init extent_map_init(void)
+{
+	extent_map_cache = btrfs_cache_create("extent_map",
+					    sizeof(struct extent_map), 0,
+					    NULL);
+	if (!extent_map_cache)
+		return -ENOMEM;
+	return 0;
+}
+
+void extent_map_exit(void)
+{
+	if (extent_map_cache)
+		kmem_cache_destroy(extent_map_cache);
+}
+
+/**
+ * extent_map_tree_init - initialize extent map tree
+ * @tree:		tree to initialize
+ * @mask:		flags for memory allocations during tree operations
+ *
+ * Initialize the extent tree @tree.  Should be called for each new inode
+ * or other user of the extent_map interface.
+ */
+void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
+{
+	tree->map.rb_node = NULL;
+	spin_lock_init(&tree->lock);
+}
+EXPORT_SYMBOL(extent_map_tree_init);
+
+/**
+ * alloc_extent_map - allocate new extent map structure
+ * @mask:	memory allocation flags
+ *
+ * Allocate a new extent_map structure.  The new structure is
+ * returned with a reference count of one and needs to be
+ * freed using free_extent_map()
+ */
+struct extent_map *alloc_extent_map(gfp_t mask)
+{
+	struct extent_map *em;
+	em = kmem_cache_alloc(extent_map_cache, mask);
+	if (!em || IS_ERR(em))
+		return em;
+	em->in_tree = 0;
+	em->flags = 0;
+	atomic_set(&em->refs, 1);
+	return em;
+}
+EXPORT_SYMBOL(alloc_extent_map);
+
+/**
+ * free_extent_map - drop reference count of an extent_map
+ * @em:		extent map beeing releasead
+ *
+ * Drops the reference out on @em by one and free the structure
+ * if the reference count hits zero.
+ */
+void free_extent_map(struct extent_map *em)
+{
+	if (!em)
+		return;
+	WARN_ON(atomic_read(&em->refs) == 0);
+	if (atomic_dec_and_test(&em->refs)) {
+		WARN_ON(em->in_tree);
+		kmem_cache_free(extent_map_cache, em);
+	}
+}
+EXPORT_SYMBOL(free_extent_map);
+
+static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
+				   struct rb_node *node)
+{
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct extent_map *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct extent_map, rb_node);
+
+		WARN_ON(!entry->in_tree);
+
+		if (offset < entry->start)
+			p = &(*p)->rb_left;
+		else if (offset >= extent_map_end(entry))
+			p = &(*p)->rb_right;
+		else
+			return parent;
+	}
+
+	entry = rb_entry(node, struct extent_map, rb_node);
+	entry->in_tree = 1;
+	rb_link_node(node, parent, p);
+	rb_insert_color(node, root);
+	return NULL;
+}
+
+/*
+ * search through the tree for an extent_map with a given offset.  If
+ * it can't be found, try to find some neighboring extents
+ */
+static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
+				     struct rb_node **prev_ret,
+				     struct rb_node **next_ret)
+{
+	struct rb_node *n = root->rb_node;
+	struct rb_node *prev = NULL;
+	struct rb_node *orig_prev = NULL;
+	struct extent_map *entry;
+	struct extent_map *prev_entry = NULL;
+
+	while (n) {
+		entry = rb_entry(n, struct extent_map, rb_node);
+		prev = n;
+		prev_entry = entry;
+
+		WARN_ON(!entry->in_tree);
+
+		if (offset < entry->start)
+			n = n->rb_left;
+		else if (offset >= extent_map_end(entry))
+			n = n->rb_right;
+		else
+			return n;
+	}
+
+	if (prev_ret) {
+		orig_prev = prev;
+		while (prev && offset >= extent_map_end(prev_entry)) {
+			prev = rb_next(prev);
+			prev_entry = rb_entry(prev, struct extent_map, rb_node);
+		}
+		*prev_ret = prev;
+		prev = orig_prev;
+	}
+
+	if (next_ret) {
+		prev_entry = rb_entry(prev, struct extent_map, rb_node);
+		while (prev && offset < prev_entry->start) {
+			prev = rb_prev(prev);
+			prev_entry = rb_entry(prev, struct extent_map, rb_node);
+		}
+		*next_ret = prev;
+	}
+	return NULL;
+}
+
+/*
+ * look for an offset in the tree, and if it can't be found, return
+ * the first offset we can find smaller than 'offset'.
+ */
+static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
+{
+	struct rb_node *prev;
+	struct rb_node *ret;
+	ret = __tree_search(root, offset, &prev, NULL);
+	if (!ret)
+		return prev;
+	return ret;
+}
+
+/* check to see if two extent_map structs are adjacent and safe to merge */
+static int mergable_maps(struct extent_map *prev, struct extent_map *next)
+{
+	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
+		return 0;
+
+	/*
+	 * don't merge compressed extents, we need to know their
+	 * actual size
+	 */
+	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
+		return 0;
+
+	if (extent_map_end(prev) == next->start &&
+	    prev->flags == next->flags &&
+	    prev->bdev == next->bdev &&
+	    ((next->block_start == EXTENT_MAP_HOLE &&
+	      prev->block_start == EXTENT_MAP_HOLE) ||
+	     (next->block_start == EXTENT_MAP_INLINE &&
+	      prev->block_start == EXTENT_MAP_INLINE) ||
+	     (next->block_start == EXTENT_MAP_DELALLOC &&
+	      prev->block_start == EXTENT_MAP_DELALLOC) ||
+	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
+	      next->block_start == extent_map_block_end(prev)))) {
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * add_extent_mapping - add new extent map to the extent tree
+ * @tree:	tree to insert new map in
+ * @em:		map to insert
+ *
+ * Insert @em into @tree or perform a simple forward/backward merge with
+ * existing mappings.  The extent_map struct passed in will be inserted
+ * into the tree directly, with an additional reference taken, or a
+ * reference dropped if the merge attempt was sucessfull.
+ */
+int add_extent_mapping(struct extent_map_tree *tree,
+		       struct extent_map *em)
+{
+	int ret = 0;
+	struct extent_map *merge = NULL;
+	struct rb_node *rb;
+	struct extent_map *exist;
+
+	exist = lookup_extent_mapping(tree, em->start, em->len);
+	if (exist) {
+		free_extent_map(exist);
+		ret = -EEXIST;
+		goto out;
+	}
+	assert_spin_locked(&tree->lock);
+	rb = tree_insert(&tree->map, em->start, &em->rb_node);
+	if (rb) {
+		ret = -EEXIST;
+		free_extent_map(merge);
+		goto out;
+	}
+	atomic_inc(&em->refs);
+	if (em->start != 0) {
+		rb = rb_prev(&em->rb_node);
+		if (rb)
+			merge = rb_entry(rb, struct extent_map, rb_node);
+		if (rb && mergable_maps(merge, em)) {
+			em->start = merge->start;
+			em->len += merge->len;
+			em->block_len += merge->block_len;
+			em->block_start = merge->block_start;
+			merge->in_tree = 0;
+			rb_erase(&merge->rb_node, &tree->map);
+			free_extent_map(merge);
+		}
+	 }
+	rb = rb_next(&em->rb_node);
+	if (rb)
+		merge = rb_entry(rb, struct extent_map, rb_node);
+	if (rb && mergable_maps(em, merge)) {
+		em->len += merge->len;
+		em->block_len += merge->len;
+		rb_erase(&merge->rb_node, &tree->map);
+		merge->in_tree = 0;
+		free_extent_map(merge);
+	}
+out:
+	return ret;
+}
+EXPORT_SYMBOL(add_extent_mapping);
+
+/* simple helper to do math around the end of an extent, handling wrap */
+static u64 range_end(u64 start, u64 len)
+{
+	if (start + len < start)
+		return (u64)-1;
+	return start + len;
+}
+
+/**
+ * lookup_extent_mapping - lookup extent_map
+ * @tree:	tree to lookup in
+ * @start:	byte offset to start the search
+ * @len:	length of the lookup range
+ *
+ * Find and return the first extent_map struct in @tree that intersects the
+ * [start, len] range.  There may be additional objects in the tree that
+ * intersect, so check the object returned carefully to make sure that no
+ * additional lookups are needed.
+ */
+struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
+					 u64 start, u64 len)
+{
+	struct extent_map *em;
+	struct rb_node *rb_node;
+	struct rb_node *prev = NULL;
+	struct rb_node *next = NULL;
+	u64 end = range_end(start, len);
+
+	assert_spin_locked(&tree->lock);
+	rb_node = __tree_search(&tree->map, start, &prev, &next);
+	if (!rb_node && prev) {
+		em = rb_entry(prev, struct extent_map, rb_node);
+		if (end > em->start && start < extent_map_end(em))
+			goto found;
+	}
+	if (!rb_node && next) {
+		em = rb_entry(next, struct extent_map, rb_node);
+		if (end > em->start && start < extent_map_end(em))
+			goto found;
+	}
+	if (!rb_node) {
+		em = NULL;
+		goto out;
+	}
+	if (IS_ERR(rb_node)) {
+		em = ERR_PTR(PTR_ERR(rb_node));
+		goto out;
+	}
+	em = rb_entry(rb_node, struct extent_map, rb_node);
+	if (end > em->start && start < extent_map_end(em))
+		goto found;
+
+	em = NULL;
+	goto out;
+
+found:
+	atomic_inc(&em->refs);
+out:
+	return em;
+}
+EXPORT_SYMBOL(lookup_extent_mapping);
+
+/**
+ * remove_extent_mapping - removes an extent_map from the extent tree
+ * @tree:	extent tree to remove from
+ * @em:		extent map beeing removed
+ *
+ * Removes @em from @tree.  No reference counts are dropped, and no checks
+ * are done to see if the range is in use
+ */
+int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
+{
+	int ret = 0;
+
+	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
+	assert_spin_locked(&tree->lock);
+	rb_erase(&em->rb_node, &tree->map);
+	em->in_tree = 0;
+	return ret;
+}
+EXPORT_SYMBOL(remove_extent_mapping);
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
new file mode 100644
index 0000000..fb6eeef
--- /dev/null
+++ b/fs/btrfs/extent_map.h
@@ -0,0 +1,62 @@
+#ifndef __EXTENTMAP__
+#define __EXTENTMAP__
+
+#include <linux/rbtree.h>
+
+#define EXTENT_MAP_LAST_BYTE (u64)-4
+#define EXTENT_MAP_HOLE (u64)-3
+#define EXTENT_MAP_INLINE (u64)-2
+#define EXTENT_MAP_DELALLOC (u64)-1
+
+/* bits for the flags field */
+#define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */
+#define EXTENT_FLAG_COMPRESSED 1
+#define EXTENT_FLAG_VACANCY 2 /* no file extent item found */
+#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
+
+struct extent_map {
+	struct rb_node rb_node;
+
+	/* all of these are in bytes */
+	u64 start;
+	u64 len;
+	u64 orig_start;
+	u64 block_start;
+	u64 block_len;
+	unsigned long flags;
+	struct block_device *bdev;
+	atomic_t refs;
+	int in_tree;
+};
+
+struct extent_map_tree {
+	struct rb_root map;
+	spinlock_t lock;
+};
+
+static inline u64 extent_map_end(struct extent_map *em)
+{
+	if (em->start + em->len < em->start)
+		return (u64)-1;
+	return em->start + em->len;
+}
+
+static inline u64 extent_map_block_end(struct extent_map *em)
+{
+	if (em->block_start + em->block_len < em->block_start)
+		return (u64)-1;
+	return em->block_start + em->block_len;
+}
+
+void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask);
+struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
+					 u64 start, u64 len);
+int add_extent_mapping(struct extent_map_tree *tree,
+		       struct extent_map *em);
+int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
+
+struct extent_map *alloc_extent_map(gfp_t mask);
+void free_extent_map(struct extent_map *em);
+int __init extent_map_init(void);
+void extent_map_exit(void);
+#endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
new file mode 100644
index 0000000..9646524
--- /dev/null
+++ b/fs/btrfs/file-item.c
@@ -0,0 +1,831 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/bio.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "print-tree.h"
+
+#define MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \
+				   sizeof(struct btrfs_item) * 2) / \
+				  size) - 1))
+
+#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
+				   sizeof(struct btrfs_ordered_sum)) / \
+				   sizeof(struct btrfs_sector_sum) * \
+				   (r)->sectorsize - (r)->sectorsize)
+
+int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root,
+			     u64 objectid, u64 pos,
+			     u64 disk_offset, u64 disk_num_bytes,
+			     u64 num_bytes, u64 offset, u64 ram_bytes,
+			     u8 compression, u8 encryption, u16 other_encoding)
+{
+	int ret = 0;
+	struct btrfs_file_extent_item *item;
+	struct btrfs_key file_key;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	file_key.objectid = objectid;
+	file_key.offset = pos;
+	btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
+
+	ret = btrfs_insert_empty_item(trans, root, path, &file_key,
+				      sizeof(*item));
+	if (ret < 0)
+		goto out;
+	BUG_ON(ret);
+	leaf = path->nodes[0];
+	item = btrfs_item_ptr(leaf, path->slots[0],
+			      struct btrfs_file_extent_item);
+	btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
+	btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
+	btrfs_set_file_extent_offset(leaf, item, offset);
+	btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
+	btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
+	btrfs_set_file_extent_generation(leaf, item, trans->transid);
+	btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
+	btrfs_set_file_extent_compression(leaf, item, compression);
+	btrfs_set_file_extent_encryption(leaf, item, encryption);
+	btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
+
+	btrfs_mark_buffer_dirty(leaf);
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
+					  struct btrfs_root *root,
+					  struct btrfs_path *path,
+					  u64 bytenr, int cow)
+{
+	int ret;
+	struct btrfs_key file_key;
+	struct btrfs_key found_key;
+	struct btrfs_csum_item *item;
+	struct extent_buffer *leaf;
+	u64 csum_offset = 0;
+	u16 csum_size =
+		btrfs_super_csum_size(&root->fs_info->super_copy);
+	int csums_in_item;
+
+	file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+	file_key.offset = bytenr;
+	btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY);
+	ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
+	if (ret < 0)
+		goto fail;
+	leaf = path->nodes[0];
+	if (ret > 0) {
+		ret = 1;
+		if (path->slots[0] == 0)
+			goto fail;
+		path->slots[0]--;
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+		if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY)
+			goto fail;
+
+		csum_offset = (bytenr - found_key.offset) >>
+				root->fs_info->sb->s_blocksize_bits;
+		csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
+		csums_in_item /= csum_size;
+
+		if (csum_offset >= csums_in_item) {
+			ret = -EFBIG;
+			goto fail;
+		}
+	}
+	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
+	item = (struct btrfs_csum_item *)((unsigned char *)item +
+					  csum_offset * csum_size);
+	return item;
+fail:
+	if (ret > 0)
+		ret = -ENOENT;
+	return ERR_PTR(ret);
+}
+
+
+int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root,
+			     struct btrfs_path *path, u64 objectid,
+			     u64 offset, int mod)
+{
+	int ret;
+	struct btrfs_key file_key;
+	int ins_len = mod < 0 ? -1 : 0;
+	int cow = mod != 0;
+
+	file_key.objectid = objectid;
+	file_key.offset = offset;
+	btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
+	ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
+	return ret;
+}
+
+
+int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
+			  struct bio *bio, u32 *dst)
+{
+	u32 sum;
+	struct bio_vec *bvec = bio->bi_io_vec;
+	int bio_index = 0;
+	u64 offset;
+	u64 item_start_offset = 0;
+	u64 item_last_offset = 0;
+	u64 disk_bytenr;
+	u32 diff;
+	u16 csum_size =
+		btrfs_super_csum_size(&root->fs_info->super_copy);
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_csum_item *item = NULL;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+
+	path = btrfs_alloc_path();
+	if (bio->bi_size > PAGE_CACHE_SIZE * 8)
+		path->reada = 2;
+
+	WARN_ON(bio->bi_vcnt <= 0);
+
+	disk_bytenr = (u64)bio->bi_sector << 9;
+	while (bio_index < bio->bi_vcnt) {
+		offset = page_offset(bvec->bv_page) + bvec->bv_offset;
+		ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum);
+		if (ret == 0)
+			goto found;
+
+		if (!item || disk_bytenr < item_start_offset ||
+		    disk_bytenr >= item_last_offset) {
+			struct btrfs_key found_key;
+			u32 item_size;
+
+			if (item)
+				btrfs_release_path(root, path);
+			item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
+						 path, disk_bytenr, 0);
+			if (IS_ERR(item)) {
+				ret = PTR_ERR(item);
+				if (ret == -ENOENT || ret == -EFBIG)
+					ret = 0;
+				sum = 0;
+				if (BTRFS_I(inode)->root->root_key.objectid ==
+				    BTRFS_DATA_RELOC_TREE_OBJECTID) {
+					set_extent_bits(io_tree, offset,
+						offset + bvec->bv_len - 1,
+						EXTENT_NODATASUM, GFP_NOFS);
+				} else {
+					printk(KERN_INFO "btrfs no csum found "
+					       "for inode %lu start %llu\n",
+					       inode->i_ino,
+					       (unsigned long long)offset);
+				}
+				item = NULL;
+				btrfs_release_path(root, path);
+				goto found;
+			}
+			btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+					      path->slots[0]);
+
+			item_start_offset = found_key.offset;
+			item_size = btrfs_item_size_nr(path->nodes[0],
+						       path->slots[0]);
+			item_last_offset = item_start_offset +
+				(item_size / csum_size) *
+				root->sectorsize;
+			item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+					      struct btrfs_csum_item);
+		}
+		/*
+		 * this byte range must be able to fit inside
+		 * a single leaf so it will also fit inside a u32
+		 */
+		diff = disk_bytenr - item_start_offset;
+		diff = diff / root->sectorsize;
+		diff = diff * csum_size;
+
+		read_extent_buffer(path->nodes[0], &sum,
+				   ((unsigned long)item) + diff,
+				   csum_size);
+found:
+		if (dst)
+			*dst++ = sum;
+		else
+			set_state_private(io_tree, offset, sum);
+		disk_bytenr += bvec->bv_len;
+		bio_index++;
+		bvec++;
+	}
+	btrfs_free_path(path);
+	return 0;
+}
+
+int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
+			     struct list_head *list)
+{
+	struct btrfs_key key;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	struct btrfs_ordered_sum *sums;
+	struct btrfs_sector_sum *sector_sum;
+	struct btrfs_csum_item *item;
+	unsigned long offset;
+	int ret;
+	size_t size;
+	u64 csum_end;
+	u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy);
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+	key.offset = start;
+	key.type = BTRFS_EXTENT_CSUM_KEY;
+
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto fail;
+	if (ret > 0 && path->slots[0] > 0) {
+		leaf = path->nodes[0];
+		btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
+		if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
+		    key.type == BTRFS_EXTENT_CSUM_KEY) {
+			offset = (start - key.offset) >>
+				 root->fs_info->sb->s_blocksize_bits;
+			if (offset * csum_size <
+			    btrfs_item_size_nr(leaf, path->slots[0] - 1))
+				path->slots[0]--;
+		}
+	}
+
+	while (start <= end) {
+		leaf = path->nodes[0];
+		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0)
+				goto fail;
+			if (ret > 0)
+				break;
+			leaf = path->nodes[0];
+		}
+
+		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+		if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+		    key.type != BTRFS_EXTENT_CSUM_KEY)
+			break;
+
+		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+		if (key.offset > end)
+			break;
+
+		if (key.offset > start)
+			start = key.offset;
+
+		size = btrfs_item_size_nr(leaf, path->slots[0]);
+		csum_end = key.offset + (size / csum_size) * root->sectorsize;
+		if (csum_end <= start) {
+			path->slots[0]++;
+			continue;
+		}
+
+		csum_end = min(csum_end, end + 1);
+		item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+				      struct btrfs_csum_item);
+		while (start < csum_end) {
+			size = min_t(size_t, csum_end - start,
+					MAX_ORDERED_SUM_BYTES(root));
+			sums = kzalloc(btrfs_ordered_sum_size(root, size),
+					GFP_NOFS);
+			BUG_ON(!sums);
+
+			sector_sum = sums->sums;
+			sums->bytenr = start;
+			sums->len = size;
+
+			offset = (start - key.offset) >>
+				root->fs_info->sb->s_blocksize_bits;
+			offset *= csum_size;
+
+			while (size > 0) {
+				read_extent_buffer(path->nodes[0],
+						&sector_sum->sum,
+						((unsigned long)item) +
+						offset, csum_size);
+				sector_sum->bytenr = start;
+
+				size -= root->sectorsize;
+				start += root->sectorsize;
+				offset += csum_size;
+				sector_sum++;
+			}
+			list_add_tail(&sums->list, list);
+		}
+		path->slots[0]++;
+	}
+	ret = 0;
+fail:
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
+		       struct bio *bio, u64 file_start, int contig)
+{
+	struct btrfs_ordered_sum *sums;
+	struct btrfs_sector_sum *sector_sum;
+	struct btrfs_ordered_extent *ordered;
+	char *data;
+	struct bio_vec *bvec = bio->bi_io_vec;
+	int bio_index = 0;
+	unsigned long total_bytes = 0;
+	unsigned long this_sum_bytes = 0;
+	u64 offset;
+	u64 disk_bytenr;
+
+	WARN_ON(bio->bi_vcnt <= 0);
+	sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS);
+	if (!sums)
+		return -ENOMEM;
+
+	sector_sum = sums->sums;
+	disk_bytenr = (u64)bio->bi_sector << 9;
+	sums->len = bio->bi_size;
+	INIT_LIST_HEAD(&sums->list);
+
+	if (contig)
+		offset = file_start;
+	else
+		offset = page_offset(bvec->bv_page) + bvec->bv_offset;
+
+	ordered = btrfs_lookup_ordered_extent(inode, offset);
+	BUG_ON(!ordered);
+	sums->bytenr = ordered->start;
+
+	while (bio_index < bio->bi_vcnt) {
+		if (!contig)
+			offset = page_offset(bvec->bv_page) + bvec->bv_offset;
+
+		if (!contig && (offset >= ordered->file_offset + ordered->len ||
+		    offset < ordered->file_offset)) {
+			unsigned long bytes_left;
+			sums->len = this_sum_bytes;
+			this_sum_bytes = 0;
+			btrfs_add_ordered_sum(inode, ordered, sums);
+			btrfs_put_ordered_extent(ordered);
+
+			bytes_left = bio->bi_size - total_bytes;
+
+			sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
+				       GFP_NOFS);
+			BUG_ON(!sums);
+			sector_sum = sums->sums;
+			sums->len = bytes_left;
+			ordered = btrfs_lookup_ordered_extent(inode, offset);
+			BUG_ON(!ordered);
+			sums->bytenr = ordered->start;
+		}
+
+		data = kmap_atomic(bvec->bv_page, KM_USER0);
+		sector_sum->sum = ~(u32)0;
+		sector_sum->sum = btrfs_csum_data(root,
+						  data + bvec->bv_offset,
+						  sector_sum->sum,
+						  bvec->bv_len);
+		kunmap_atomic(data, KM_USER0);
+		btrfs_csum_final(sector_sum->sum,
+				 (char *)&sector_sum->sum);
+		sector_sum->bytenr = disk_bytenr;
+
+		sector_sum++;
+		bio_index++;
+		total_bytes += bvec->bv_len;
+		this_sum_bytes += bvec->bv_len;
+		disk_bytenr += bvec->bv_len;
+		offset += bvec->bv_len;
+		bvec++;
+	}
+	this_sum_bytes = 0;
+	btrfs_add_ordered_sum(inode, ordered, sums);
+	btrfs_put_ordered_extent(ordered);
+	return 0;
+}
+
+/*
+ * helper function for csum removal, this expects the
+ * key to describe the csum pointed to by the path, and it expects
+ * the csum to overlap the range [bytenr, len]
+ *
+ * The csum should not be entirely contained in the range and the
+ * range should not be entirely contained in the csum.
+ *
+ * This calls btrfs_truncate_item with the correct args based on the
+ * overlap, and fixes up the key as required.
+ */
+static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
+				      struct btrfs_root *root,
+				      struct btrfs_path *path,
+				      struct btrfs_key *key,
+				      u64 bytenr, u64 len)
+{
+	struct extent_buffer *leaf;
+	u16 csum_size =
+		btrfs_super_csum_size(&root->fs_info->super_copy);
+	u64 csum_end;
+	u64 end_byte = bytenr + len;
+	u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
+	int ret;
+
+	leaf = path->nodes[0];
+	csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
+	csum_end <<= root->fs_info->sb->s_blocksize_bits;
+	csum_end += key->offset;
+
+	if (key->offset < bytenr && csum_end <= end_byte) {
+		/*
+		 *         [ bytenr - len ]
+		 *         [   ]
+		 *   [csum     ]
+		 *   A simple truncate off the end of the item
+		 */
+		u32 new_size = (bytenr - key->offset) >> blocksize_bits;
+		new_size *= csum_size;
+		ret = btrfs_truncate_item(trans, root, path, new_size, 1);
+		BUG_ON(ret);
+	} else if (key->offset >= bytenr && csum_end > end_byte &&
+		   end_byte > key->offset) {
+		/*
+		 *         [ bytenr - len ]
+		 *                 [ ]
+		 *                 [csum     ]
+		 * we need to truncate from the beginning of the csum
+		 */
+		u32 new_size = (csum_end - end_byte) >> blocksize_bits;
+		new_size *= csum_size;
+
+		ret = btrfs_truncate_item(trans, root, path, new_size, 0);
+		BUG_ON(ret);
+
+		key->offset = end_byte;
+		ret = btrfs_set_item_key_safe(trans, root, path, key);
+		BUG_ON(ret);
+	} else {
+		BUG();
+	}
+	return 0;
+}
+
+/*
+ * deletes the csum items from the csum tree for a given
+ * range of bytes.
+ */
+int btrfs_del_csums(struct btrfs_trans_handle *trans,
+		    struct btrfs_root *root, u64 bytenr, u64 len)
+{
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	u64 end_byte = bytenr + len;
+	u64 csum_end;
+	struct extent_buffer *leaf;
+	int ret;
+	u16 csum_size =
+		btrfs_super_csum_size(&root->fs_info->super_copy);
+	int blocksize_bits = root->fs_info->sb->s_blocksize_bits;
+
+	root = root->fs_info->csum_root;
+
+	path = btrfs_alloc_path();
+
+	while (1) {
+		key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+		key.offset = end_byte - 1;
+		key.type = BTRFS_EXTENT_CSUM_KEY;
+
+		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+		if (ret > 0) {
+			if (path->slots[0] == 0)
+				goto out;
+			path->slots[0]--;
+		}
+		leaf = path->nodes[0];
+		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+		if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+		    key.type != BTRFS_EXTENT_CSUM_KEY) {
+			break;
+		}
+
+		if (key.offset >= end_byte)
+			break;
+
+		csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
+		csum_end <<= blocksize_bits;
+		csum_end += key.offset;
+
+		/* this csum ends before we start, we're done */
+		if (csum_end <= bytenr)
+			break;
+
+		/* delete the entire item, it is inside our range */
+		if (key.offset >= bytenr && csum_end <= end_byte) {
+			ret = btrfs_del_item(trans, root, path);
+			BUG_ON(ret);
+			if (key.offset == bytenr)
+				break;
+		} else if (key.offset < bytenr && csum_end > end_byte) {
+			unsigned long offset;
+			unsigned long shift_len;
+			unsigned long item_offset;
+			/*
+			 *        [ bytenr - len ]
+			 *     [csum                ]
+			 *
+			 * Our bytes are in the middle of the csum,
+			 * we need to split this item and insert a new one.
+			 *
+			 * But we can't drop the path because the
+			 * csum could change, get removed, extended etc.
+			 *
+			 * The trick here is the max size of a csum item leaves
+			 * enough room in the tree block for a single
+			 * item header.  So, we split the item in place,
+			 * adding a new header pointing to the existing
+			 * bytes.  Then we loop around again and we have
+			 * a nicely formed csum item that we can neatly
+			 * truncate.
+			 */
+			offset = (bytenr - key.offset) >> blocksize_bits;
+			offset *= csum_size;
+
+			shift_len = (len >> blocksize_bits) * csum_size;
+
+			item_offset = btrfs_item_ptr_offset(leaf,
+							    path->slots[0]);
+
+			memset_extent_buffer(leaf, 0, item_offset + offset,
+					     shift_len);
+			key.offset = bytenr;
+
+			/*
+			 * btrfs_split_item returns -EAGAIN when the
+			 * item changed size or key
+			 */
+			ret = btrfs_split_item(trans, root, path, &key, offset);
+			BUG_ON(ret && ret != -EAGAIN);
+
+			key.offset = end_byte - 1;
+		} else {
+			ret = truncate_one_csum(trans, root, path,
+						&key, bytenr, len);
+			BUG_ON(ret);
+			if (key.offset < bytenr)
+				break;
+		}
+		btrfs_release_path(root, path);
+	}
+out:
+	btrfs_free_path(path);
+	return 0;
+}
+
+int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root,
+			   struct btrfs_ordered_sum *sums)
+{
+	u64 bytenr;
+	int ret;
+	struct btrfs_key file_key;
+	struct btrfs_key found_key;
+	u64 next_offset;
+	u64 total_bytes = 0;
+	int found_next;
+	struct btrfs_path *path;
+	struct btrfs_csum_item *item;
+	struct btrfs_csum_item *item_end;
+	struct extent_buffer *leaf = NULL;
+	u64 csum_offset;
+	struct btrfs_sector_sum *sector_sum;
+	u32 nritems;
+	u32 ins_size;
+	char *eb_map;
+	char *eb_token;
+	unsigned long map_len;
+	unsigned long map_start;
+	u16 csum_size =
+		btrfs_super_csum_size(&root->fs_info->super_copy);
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	sector_sum = sums->sums;
+again:
+	next_offset = (u64)-1;
+	found_next = 0;
+	file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+	file_key.offset = sector_sum->bytenr;
+	bytenr = sector_sum->bytenr;
+	btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY);
+
+	item = btrfs_lookup_csum(trans, root, path, sector_sum->bytenr, 1);
+	if (!IS_ERR(item)) {
+		leaf = path->nodes[0];
+		ret = 0;
+		goto found;
+	}
+	ret = PTR_ERR(item);
+	if (ret == -EFBIG) {
+		u32 item_size;
+		/* we found one, but it isn't big enough yet */
+		leaf = path->nodes[0];
+		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+		if ((item_size / csum_size) >=
+		    MAX_CSUM_ITEMS(root, csum_size)) {
+			/* already at max size, make a new one */
+			goto insert;
+		}
+	} else {
+		int slot = path->slots[0] + 1;
+		/* we didn't find a csum item, insert one */
+		nritems = btrfs_header_nritems(path->nodes[0]);
+		if (path->slots[0] >= nritems - 1) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret == 1)
+				found_next = 1;
+			if (ret != 0)
+				goto insert;
+			slot = 0;
+		}
+		btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+		if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+		    found_key.type != BTRFS_EXTENT_CSUM_KEY) {
+			found_next = 1;
+			goto insert;
+		}
+		next_offset = found_key.offset;
+		found_next = 1;
+		goto insert;
+	}
+
+	/*
+	 * at this point, we know the tree has an item, but it isn't big
+	 * enough yet to put our csum in.  Grow it
+	 */
+	btrfs_release_path(root, path);
+	ret = btrfs_search_slot(trans, root, &file_key, path,
+				csum_size, 1);
+	if (ret < 0)
+		goto fail_unlock;
+
+	if (ret > 0) {
+		if (path->slots[0] == 0)
+			goto insert;
+		path->slots[0]--;
+	}
+
+	leaf = path->nodes[0];
+	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+	csum_offset = (bytenr - found_key.offset) >>
+			root->fs_info->sb->s_blocksize_bits;
+
+	if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY ||
+	    found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+	    csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
+		goto insert;
+	}
+
+	if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) /
+	    csum_size) {
+		u32 diff = (csum_offset + 1) * csum_size;
+
+		/*
+		 * is the item big enough already?  we dropped our lock
+		 * before and need to recheck
+		 */
+		if (diff < btrfs_item_size_nr(leaf, path->slots[0]))
+			goto csum;
+
+		diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
+		if (diff != csum_size)
+			goto insert;
+
+		ret = btrfs_extend_item(trans, root, path, diff);
+		BUG_ON(ret);
+		goto csum;
+	}
+
+insert:
+	btrfs_release_path(root, path);
+	csum_offset = 0;
+	if (found_next) {
+		u64 tmp = total_bytes + root->sectorsize;
+		u64 next_sector = sector_sum->bytenr;
+		struct btrfs_sector_sum *next = sector_sum + 1;
+
+		while (tmp < sums->len) {
+			if (next_sector + root->sectorsize != next->bytenr)
+				break;
+			tmp += root->sectorsize;
+			next_sector = next->bytenr;
+			next++;
+		}
+		tmp = min(tmp, next_offset - file_key.offset);
+		tmp >>= root->fs_info->sb->s_blocksize_bits;
+		tmp = max((u64)1, tmp);
+		tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
+		ins_size = csum_size * tmp;
+	} else {
+		ins_size = csum_size;
+	}
+	ret = btrfs_insert_empty_item(trans, root, path, &file_key,
+				      ins_size);
+	if (ret < 0)
+		goto fail_unlock;
+	if (ret != 0) {
+		WARN_ON(1);
+		goto fail_unlock;
+	}
+csum:
+	leaf = path->nodes[0];
+	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
+	ret = 0;
+	item = (struct btrfs_csum_item *)((unsigned char *)item +
+					  csum_offset * csum_size);
+found:
+	item_end = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
+	item_end = (struct btrfs_csum_item *)((unsigned char *)item_end +
+				      btrfs_item_size_nr(leaf, path->slots[0]));
+	eb_token = NULL;
+	cond_resched();
+next_sector:
+
+	if (!eb_token ||
+	   (unsigned long)item + csum_size >= map_start + map_len) {
+		int err;
+
+		if (eb_token)
+			unmap_extent_buffer(leaf, eb_token, KM_USER1);
+		eb_token = NULL;
+		err = map_private_extent_buffer(leaf, (unsigned long)item,
+						csum_size,
+						&eb_token, &eb_map,
+						&map_start, &map_len, KM_USER1);
+		if (err)
+			eb_token = NULL;
+	}
+	if (eb_token) {
+		memcpy(eb_token + ((unsigned long)item & (PAGE_CACHE_SIZE - 1)),
+		       &sector_sum->sum, csum_size);
+	} else {
+		write_extent_buffer(leaf, &sector_sum->sum,
+				    (unsigned long)item, csum_size);
+	}
+
+	total_bytes += root->sectorsize;
+	sector_sum++;
+	if (total_bytes < sums->len) {
+		item = (struct btrfs_csum_item *)((char *)item +
+						  csum_size);
+		if (item < item_end && bytenr + PAGE_CACHE_SIZE ==
+		    sector_sum->bytenr) {
+			bytenr = sector_sum->bytenr;
+			goto next_sector;
+		}
+	}
+	if (eb_token) {
+		unmap_extent_buffer(leaf, eb_token, KM_USER1);
+		eb_token = NULL;
+	}
+	btrfs_mark_buffer_dirty(path->nodes[0]);
+	cond_resched();
+	if (total_bytes < sums->len) {
+		btrfs_release_path(root, path);
+		goto again;
+	}
+out:
+	btrfs_free_path(path);
+	return ret;
+
+fail_unlock:
+	goto out;
+}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
new file mode 100644
index 0000000..9026833
--- /dev/null
+++ b/fs/btrfs/file.c
@@ -0,0 +1,1288 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/backing-dev.h>
+#include <linux/mpage.h>
+#include <linux/swap.h>
+#include <linux/writeback.h>
+#include <linux/statfs.h>
+#include <linux/compat.h>
+#include <linux/version.h>
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "btrfs_inode.h"
+#include "ioctl.h"
+#include "print-tree.h"
+#include "tree-log.h"
+#include "locking.h"
+#include "compat.h"
+
+
+/* simple helper to fault in pages and copy.  This should go away
+ * and be replaced with calls into generic code.
+ */
+static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
+					 int write_bytes,
+					 struct page **prepared_pages,
+					 const char __user *buf)
+{
+	long page_fault = 0;
+	int i;
+	int offset = pos & (PAGE_CACHE_SIZE - 1);
+
+	for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
+		size_t count = min_t(size_t,
+				     PAGE_CACHE_SIZE - offset, write_bytes);
+		struct page *page = prepared_pages[i];
+		fault_in_pages_readable(buf, count);
+
+		/* Copy data from userspace to the current page */
+		kmap(page);
+		page_fault = __copy_from_user(page_address(page) + offset,
+					      buf, count);
+		/* Flush processor's dcache for this page */
+		flush_dcache_page(page);
+		kunmap(page);
+		buf += count;
+		write_bytes -= count;
+
+		if (page_fault)
+			break;
+	}
+	return page_fault ? -EFAULT : 0;
+}
+
+/*
+ * unlocks pages after btrfs_file_write is done with them
+ */
+static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
+{
+	size_t i;
+	for (i = 0; i < num_pages; i++) {
+		if (!pages[i])
+			break;
+		/* page checked is some magic around finding pages that
+		 * have been modified without going through btrfs_set_page_dirty
+		 * clear it here
+		 */
+		ClearPageChecked(pages[i]);
+		unlock_page(pages[i]);
+		mark_page_accessed(pages[i]);
+		page_cache_release(pages[i]);
+	}
+}
+
+/*
+ * after copy_from_user, pages need to be dirtied and we need to make
+ * sure holes are created between the current EOF and the start of
+ * any next extents (if required).
+ *
+ * this also makes the decision about creating an inline extent vs
+ * doing real data extents, marking pages dirty and delalloc as required.
+ */
+static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root,
+				   struct file *file,
+				   struct page **pages,
+				   size_t num_pages,
+				   loff_t pos,
+				   size_t write_bytes)
+{
+	int err = 0;
+	int i;
+	struct inode *inode = fdentry(file)->d_inode;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	u64 hint_byte;
+	u64 num_bytes;
+	u64 start_pos;
+	u64 end_of_last_block;
+	u64 end_pos = pos + write_bytes;
+	loff_t isize = i_size_read(inode);
+
+	start_pos = pos & ~((u64)root->sectorsize - 1);
+	num_bytes = (write_bytes + pos - start_pos +
+		    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
+
+	end_of_last_block = start_pos + num_bytes - 1;
+
+	lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
+	trans = btrfs_join_transaction(root, 1);
+	if (!trans) {
+		err = -ENOMEM;
+		goto out_unlock;
+	}
+	btrfs_set_trans_block_group(trans, inode);
+	hint_byte = 0;
+
+	set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
+
+	/* check for reserved extents on each page, we don't want
+	 * to reset the delalloc bit on things that already have
+	 * extents reserved.
+	 */
+	btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
+	for (i = 0; i < num_pages; i++) {
+		struct page *p = pages[i];
+		SetPageUptodate(p);
+		ClearPageChecked(p);
+		set_page_dirty(p);
+	}
+	if (end_pos > isize) {
+		i_size_write(inode, end_pos);
+		btrfs_update_inode(trans, root, inode);
+	}
+	err = btrfs_end_transaction(trans, root);
+out_unlock:
+	unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
+	return err;
+}
+
+/*
+ * this drops all the extents in the cache that intersect the range
+ * [start, end].  Existing extents are split as required.
+ */
+int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
+			    int skip_pinned)
+{
+	struct extent_map *em;
+	struct extent_map *split = NULL;
+	struct extent_map *split2 = NULL;
+	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+	u64 len = end - start + 1;
+	int ret;
+	int testend = 1;
+	unsigned long flags;
+	int compressed = 0;
+
+	WARN_ON(end < start);
+	if (end == (u64)-1) {
+		len = (u64)-1;
+		testend = 0;
+	}
+	while (1) {
+		if (!split)
+			split = alloc_extent_map(GFP_NOFS);
+		if (!split2)
+			split2 = alloc_extent_map(GFP_NOFS);
+
+		spin_lock(&em_tree->lock);
+		em = lookup_extent_mapping(em_tree, start, len);
+		if (!em) {
+			spin_unlock(&em_tree->lock);
+			break;
+		}
+		flags = em->flags;
+		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
+			spin_unlock(&em_tree->lock);
+			if (em->start <= start &&
+			    (!testend || em->start + em->len >= start + len)) {
+				free_extent_map(em);
+				break;
+			}
+			if (start < em->start) {
+				len = em->start - start;
+			} else {
+				len = start + len - (em->start + em->len);
+				start = em->start + em->len;
+			}
+			free_extent_map(em);
+			continue;
+		}
+		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+		remove_extent_mapping(em_tree, em);
+
+		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
+		    em->start < start) {
+			split->start = em->start;
+			split->len = start - em->start;
+			split->orig_start = em->orig_start;
+			split->block_start = em->block_start;
+
+			if (compressed)
+				split->block_len = em->block_len;
+			else
+				split->block_len = split->len;
+
+			split->bdev = em->bdev;
+			split->flags = flags;
+			ret = add_extent_mapping(em_tree, split);
+			BUG_ON(ret);
+			free_extent_map(split);
+			split = split2;
+			split2 = NULL;
+		}
+		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
+		    testend && em->start + em->len > start + len) {
+			u64 diff = start + len - em->start;
+
+			split->start = start + len;
+			split->len = em->start + em->len - (start + len);
+			split->bdev = em->bdev;
+			split->flags = flags;
+
+			if (compressed) {
+				split->block_len = em->block_len;
+				split->block_start = em->block_start;
+				split->orig_start = em->orig_start;
+			} else {
+				split->block_len = split->len;
+				split->block_start = em->block_start + diff;
+				split->orig_start = split->start;
+			}
+
+			ret = add_extent_mapping(em_tree, split);
+			BUG_ON(ret);
+			free_extent_map(split);
+			split = NULL;
+		}
+		spin_unlock(&em_tree->lock);
+
+		/* once for us */
+		free_extent_map(em);
+		/* once for the tree*/
+		free_extent_map(em);
+	}
+	if (split)
+		free_extent_map(split);
+	if (split2)
+		free_extent_map(split2);
+	return 0;
+}
+
+int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
+{
+	return 0;
+#if 0
+	struct btrfs_path *path;
+	struct btrfs_key found_key;
+	struct extent_buffer *leaf;
+	struct btrfs_file_extent_item *extent;
+	u64 last_offset = 0;
+	int nritems;
+	int slot;
+	int found_type;
+	int ret;
+	int err = 0;
+	u64 extent_end = 0;
+
+	path = btrfs_alloc_path();
+	ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
+				       last_offset, 0);
+	while (1) {
+		nritems = btrfs_header_nritems(path->nodes[0]);
+		if (path->slots[0] >= nritems) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret)
+				goto out;
+			nritems = btrfs_header_nritems(path->nodes[0]);
+		}
+		slot = path->slots[0];
+		leaf = path->nodes[0];
+		btrfs_item_key_to_cpu(leaf, &found_key, slot);
+		if (found_key.objectid != inode->i_ino)
+			break;
+		if (found_key.type != BTRFS_EXTENT_DATA_KEY)
+			goto out;
+
+		if (found_key.offset < last_offset) {
+			WARN_ON(1);
+			btrfs_print_leaf(root, leaf);
+			printk(KERN_ERR "inode %lu found offset %llu "
+			       "expected %llu\n", inode->i_ino,
+			       (unsigned long long)found_key.offset,
+			       (unsigned long long)last_offset);
+			err = 1;
+			goto out;
+		}
+		extent = btrfs_item_ptr(leaf, slot,
+					struct btrfs_file_extent_item);
+		found_type = btrfs_file_extent_type(leaf, extent);
+		if (found_type == BTRFS_FILE_EXTENT_REG) {
+			extent_end = found_key.offset +
+			     btrfs_file_extent_num_bytes(leaf, extent);
+		} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+			struct btrfs_item *item;
+			item = btrfs_item_nr(leaf, slot);
+			extent_end = found_key.offset +
+			     btrfs_file_extent_inline_len(leaf, extent);
+			extent_end = (extent_end + root->sectorsize - 1) &
+				~((u64)root->sectorsize - 1);
+		}
+		last_offset = extent_end;
+		path->slots[0]++;
+	}
+	if (0 && last_offset < inode->i_size) {
+		WARN_ON(1);
+		btrfs_print_leaf(root, leaf);
+		printk(KERN_ERR "inode %lu found offset %llu size %llu\n",
+		       inode->i_ino, (unsigned long long)last_offset,
+		       (unsigned long long)inode->i_size);
+		err = 1;
+
+	}
+out:
+	btrfs_free_path(path);
+	return err;
+#endif
+}
+
+/*
+ * this is very complex, but the basic idea is to drop all extents
+ * in the range start - end.  hint_block is filled in with a block number
+ * that would be a good hint to the block allocator for this file.
+ *
+ * If an extent intersects the range but is not entirely inside the range
+ * it is either truncated or split.  Anything entirely inside the range
+ * is deleted from the tree.
+ *
+ * inline_limit is used to tell this code which offsets in the file to keep
+ * if they contain inline extents.
+ */
+noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *root, struct inode *inode,
+		       u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
+{
+	u64 extent_end = 0;
+	u64 locked_end = end;
+	u64 search_start = start;
+	u64 leaf_start;
+	u64 ram_bytes = 0;
+	u64 orig_parent = 0;
+	u64 disk_bytenr = 0;
+	u8 compression;
+	u8 encryption;
+	u16 other_encoding = 0;
+	u64 root_gen;
+	u64 root_owner;
+	struct extent_buffer *leaf;
+	struct btrfs_file_extent_item *extent;
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct btrfs_file_extent_item old;
+	int keep;
+	int slot;
+	int bookend;
+	int found_type = 0;
+	int found_extent;
+	int found_inline;
+	int recow;
+	int ret;
+
+	inline_limit = 0;
+	btrfs_drop_extent_cache(inode, start, end - 1, 0);
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+	while (1) {
+		recow = 0;
+		btrfs_release_path(root, path);
+		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+					       search_start, -1);
+		if (ret < 0)
+			goto out;
+		if (ret > 0) {
+			if (path->slots[0] == 0) {
+				ret = 0;
+				goto out;
+			}
+			path->slots[0]--;
+		}
+next_slot:
+		keep = 0;
+		bookend = 0;
+		found_extent = 0;
+		found_inline = 0;
+		leaf_start = 0;
+		root_gen = 0;
+		root_owner = 0;
+		compression = 0;
+		encryption = 0;
+		extent = NULL;
+		leaf = path->nodes[0];
+		slot = path->slots[0];
+		ret = 0;
+		btrfs_item_key_to_cpu(leaf, &key, slot);
+		if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
+		    key.offset >= end) {
+			goto out;
+		}
+		if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
+		    key.objectid != inode->i_ino) {
+			goto out;
+		}
+		if (recow) {
+			search_start = max(key.offset, start);
+			continue;
+		}
+		if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
+			extent = btrfs_item_ptr(leaf, slot,
+						struct btrfs_file_extent_item);
+			found_type = btrfs_file_extent_type(leaf, extent);
+			compression = btrfs_file_extent_compression(leaf,
+								    extent);
+			encryption = btrfs_file_extent_encryption(leaf,
+								  extent);
+			other_encoding = btrfs_file_extent_other_encoding(leaf,
+								  extent);
+			if (found_type == BTRFS_FILE_EXTENT_REG ||
+			    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+				extent_end =
+				     btrfs_file_extent_disk_bytenr(leaf,
+								   extent);
+				if (extent_end)
+					*hint_byte = extent_end;
+
+				extent_end = key.offset +
+				     btrfs_file_extent_num_bytes(leaf, extent);
+				ram_bytes = btrfs_file_extent_ram_bytes(leaf,
+								extent);
+				found_extent = 1;
+			} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+				found_inline = 1;
+				extent_end = key.offset +
+				     btrfs_file_extent_inline_len(leaf, extent);
+			}
+		} else {
+			extent_end = search_start;
+		}
+
+		/* we found nothing we can drop */
+		if ((!found_extent && !found_inline) ||
+		    search_start >= extent_end) {
+			int nextret;
+			u32 nritems;
+			nritems = btrfs_header_nritems(leaf);
+			if (slot >= nritems - 1) {
+				nextret = btrfs_next_leaf(root, path);
+				if (nextret)
+					goto out;
+				recow = 1;
+			} else {
+				path->slots[0]++;
+			}
+			goto next_slot;
+		}
+
+		if (end <= extent_end && start >= key.offset && found_inline)
+			*hint_byte = EXTENT_MAP_INLINE;
+
+		if (found_extent) {
+			read_extent_buffer(leaf, &old, (unsigned long)extent,
+					   sizeof(old));
+			root_gen = btrfs_header_generation(leaf);
+			root_owner = btrfs_header_owner(leaf);
+			leaf_start = leaf->start;
+		}
+
+		if (end < extent_end && end >= key.offset) {
+			bookend = 1;
+			if (found_inline && start <= key.offset)
+				keep = 1;
+		}
+
+		if (bookend && found_extent) {
+			if (locked_end < extent_end) {
+				ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
+						locked_end, extent_end - 1,
+						GFP_NOFS);
+				if (!ret) {
+					btrfs_release_path(root, path);
+					lock_extent(&BTRFS_I(inode)->io_tree,
+						locked_end, extent_end - 1,
+						GFP_NOFS);
+					locked_end = extent_end;
+					continue;
+				}
+				locked_end = extent_end;
+			}
+			orig_parent = path->nodes[0]->start;
+			disk_bytenr = le64_to_cpu(old.disk_bytenr);
+			if (disk_bytenr != 0) {
+				ret = btrfs_inc_extent_ref(trans, root,
+					   disk_bytenr,
+					   le64_to_cpu(old.disk_num_bytes),
+					   orig_parent, root->root_key.objectid,
+					   trans->transid, inode->i_ino);
+				BUG_ON(ret);
+			}
+		}
+
+		if (found_inline) {
+			u64 mask = root->sectorsize - 1;
+			search_start = (extent_end + mask) & ~mask;
+		} else
+			search_start = extent_end;
+
+		/* truncate existing extent */
+		if (start > key.offset) {
+			u64 new_num;
+			u64 old_num;
+			keep = 1;
+			WARN_ON(start & (root->sectorsize - 1));
+			if (found_extent) {
+				new_num = start - key.offset;
+				old_num = btrfs_file_extent_num_bytes(leaf,
+								      extent);
+				*hint_byte =
+					btrfs_file_extent_disk_bytenr(leaf,
+								      extent);
+				if (btrfs_file_extent_disk_bytenr(leaf,
+								  extent)) {
+					inode_sub_bytes(inode, old_num -
+							new_num);
+				}
+				btrfs_set_file_extent_num_bytes(leaf,
+							extent, new_num);
+				btrfs_mark_buffer_dirty(leaf);
+			} else if (key.offset < inline_limit &&
+				   (end > extent_end) &&
+				   (inline_limit < extent_end)) {
+				u32 new_size;
+				new_size = btrfs_file_extent_calc_inline_size(
+						   inline_limit - key.offset);
+				inode_sub_bytes(inode, extent_end -
+						inline_limit);
+				btrfs_set_file_extent_ram_bytes(leaf, extent,
+							new_size);
+				if (!compression && !encryption) {
+					btrfs_truncate_item(trans, root, path,
+							    new_size, 1);
+				}
+			}
+		}
+		/* delete the entire extent */
+		if (!keep) {
+			if (found_inline)
+				inode_sub_bytes(inode, extent_end -
+						key.offset);
+			ret = btrfs_del_item(trans, root, path);
+			/* TODO update progress marker and return */
+			BUG_ON(ret);
+			extent = NULL;
+			btrfs_release_path(root, path);
+			/* the extent will be freed later */
+		}
+		if (bookend && found_inline && start <= key.offset) {
+			u32 new_size;
+			new_size = btrfs_file_extent_calc_inline_size(
+						   extent_end - end);
+			inode_sub_bytes(inode, end - key.offset);
+			btrfs_set_file_extent_ram_bytes(leaf, extent,
+							new_size);
+			if (!compression && !encryption)
+				ret = btrfs_truncate_item(trans, root, path,
+							  new_size, 0);
+			BUG_ON(ret);
+		}
+		/* create bookend, splitting the extent in two */
+		if (bookend && found_extent) {
+			struct btrfs_key ins;
+			ins.objectid = inode->i_ino;
+			ins.offset = end;
+			btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
+
+			btrfs_release_path(root, path);
+			ret = btrfs_insert_empty_item(trans, root, path, &ins,
+						      sizeof(*extent));
+			BUG_ON(ret);
+
+			leaf = path->nodes[0];
+			extent = btrfs_item_ptr(leaf, path->slots[0],
+						struct btrfs_file_extent_item);
+			write_extent_buffer(leaf, &old,
+					    (unsigned long)extent, sizeof(old));
+
+			btrfs_set_file_extent_compression(leaf, extent,
+							  compression);
+			btrfs_set_file_extent_encryption(leaf, extent,
+							 encryption);
+			btrfs_set_file_extent_other_encoding(leaf, extent,
+							     other_encoding);
+			btrfs_set_file_extent_offset(leaf, extent,
+				    le64_to_cpu(old.offset) + end - key.offset);
+			WARN_ON(le64_to_cpu(old.num_bytes) <
+				(extent_end - end));
+			btrfs_set_file_extent_num_bytes(leaf, extent,
+							extent_end - end);
+
+			/*
+			 * set the ram bytes to the size of the full extent
+			 * before splitting.  This is a worst case flag,
+			 * but its the best we can do because we don't know
+			 * how splitting affects compression
+			 */
+			btrfs_set_file_extent_ram_bytes(leaf, extent,
+							ram_bytes);
+			btrfs_set_file_extent_type(leaf, extent, found_type);
+
+			btrfs_mark_buffer_dirty(path->nodes[0]);
+
+			if (disk_bytenr != 0) {
+				ret = btrfs_update_extent_ref(trans, root,
+						disk_bytenr, orig_parent,
+						leaf->start,
+						root->root_key.objectid,
+						trans->transid, ins.objectid);
+
+				BUG_ON(ret);
+			}
+			btrfs_release_path(root, path);
+			if (disk_bytenr != 0)
+				inode_add_bytes(inode, extent_end - end);
+		}
+
+		if (found_extent && !keep) {
+			u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
+
+			if (old_disk_bytenr != 0) {
+				inode_sub_bytes(inode,
+						le64_to_cpu(old.num_bytes));
+				ret = btrfs_free_extent(trans, root,
+						old_disk_bytenr,
+						le64_to_cpu(old.disk_num_bytes),
+						leaf_start, root_owner,
+						root_gen, key.objectid, 0);
+				BUG_ON(ret);
+				*hint_byte = old_disk_bytenr;
+			}
+		}
+
+		if (search_start >= end) {
+			ret = 0;
+			goto out;
+		}
+	}
+out:
+	btrfs_free_path(path);
+	if (locked_end > end) {
+		unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
+			      GFP_NOFS);
+	}
+	btrfs_check_file(root, inode);
+	return ret;
+}
+
+static int extent_mergeable(struct extent_buffer *leaf, int slot,
+			    u64 objectid, u64 bytenr, u64 *start, u64 *end)
+{
+	struct btrfs_file_extent_item *fi;
+	struct btrfs_key key;
+	u64 extent_end;
+
+	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
+		return 0;
+
+	btrfs_item_key_to_cpu(leaf, &key, slot);
+	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
+		return 0;
+
+	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
+	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
+	    btrfs_file_extent_compression(leaf, fi) ||
+	    btrfs_file_extent_encryption(leaf, fi) ||
+	    btrfs_file_extent_other_encoding(leaf, fi))
+		return 0;
+
+	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
+	if ((*start && *start != key.offset) || (*end && *end != extent_end))
+		return 0;
+
+	*start = key.offset;
+	*end = extent_end;
+	return 1;
+}
+
+/*
+ * Mark extent in the range start - end as written.
+ *
+ * This changes extent type from 'pre-allocated' to 'regular'. If only
+ * part of extent is marked as written, the extent will be split into
+ * two or three.
+ */
+int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+			      struct btrfs_root *root,
+			      struct inode *inode, u64 start, u64 end)
+{
+	struct extent_buffer *leaf;
+	struct btrfs_path *path;
+	struct btrfs_file_extent_item *fi;
+	struct btrfs_key key;
+	u64 bytenr;
+	u64 num_bytes;
+	u64 extent_end;
+	u64 extent_offset;
+	u64 other_start;
+	u64 other_end;
+	u64 split = start;
+	u64 locked_end = end;
+	u64 orig_parent;
+	int extent_type;
+	int split_end = 1;
+	int ret;
+
+	btrfs_drop_extent_cache(inode, start, end - 1, 0);
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+again:
+	key.objectid = inode->i_ino;
+	key.type = BTRFS_EXTENT_DATA_KEY;
+	if (split == start)
+		key.offset = split;
+	else
+		key.offset = split - 1;
+
+	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+	if (ret > 0 && path->slots[0] > 0)
+		path->slots[0]--;
+
+	leaf = path->nodes[0];
+	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+	BUG_ON(key.objectid != inode->i_ino ||
+	       key.type != BTRFS_EXTENT_DATA_KEY);
+	fi = btrfs_item_ptr(leaf, path->slots[0],
+			    struct btrfs_file_extent_item);
+	extent_type = btrfs_file_extent_type(leaf, fi);
+	BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
+	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
+	BUG_ON(key.offset > start || extent_end < end);
+
+	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
+	extent_offset = btrfs_file_extent_offset(leaf, fi);
+
+	if (key.offset == start)
+		split = end;
+
+	if (key.offset == start && extent_end == end) {
+		int del_nr = 0;
+		int del_slot = 0;
+		u64 leaf_owner = btrfs_header_owner(leaf);
+		u64 leaf_gen = btrfs_header_generation(leaf);
+		other_start = end;
+		other_end = 0;
+		if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
+				     bytenr, &other_start, &other_end)) {
+			extent_end = other_end;
+			del_slot = path->slots[0] + 1;
+			del_nr++;
+			ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+						leaf->start, leaf_owner,
+						leaf_gen, inode->i_ino, 0);
+			BUG_ON(ret);
+		}
+		other_start = 0;
+		other_end = start;
+		if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
+				     bytenr, &other_start, &other_end)) {
+			key.offset = other_start;
+			del_slot = path->slots[0];
+			del_nr++;
+			ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+						leaf->start, leaf_owner,
+						leaf_gen, inode->i_ino, 0);
+			BUG_ON(ret);
+		}
+		split_end = 0;
+		if (del_nr == 0) {
+			btrfs_set_file_extent_type(leaf, fi,
+						   BTRFS_FILE_EXTENT_REG);
+			goto done;
+		}
+
+		fi = btrfs_item_ptr(leaf, del_slot - 1,
+				    struct btrfs_file_extent_item);
+		btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
+		btrfs_set_file_extent_num_bytes(leaf, fi,
+						extent_end - key.offset);
+		btrfs_mark_buffer_dirty(leaf);
+
+		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+		BUG_ON(ret);
+		goto done;
+	} else if (split == start) {
+		if (locked_end < extent_end) {
+			ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
+					locked_end, extent_end - 1, GFP_NOFS);
+			if (!ret) {
+				btrfs_release_path(root, path);
+				lock_extent(&BTRFS_I(inode)->io_tree,
+					locked_end, extent_end - 1, GFP_NOFS);
+				locked_end = extent_end;
+				goto again;
+			}
+			locked_end = extent_end;
+		}
+		btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
+		extent_offset += split - key.offset;
+	} else  {
+		BUG_ON(key.offset != start);
+		btrfs_set_file_extent_offset(leaf, fi, extent_offset +
+					     split - key.offset);
+		btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
+		key.offset = split;
+		btrfs_set_item_key_safe(trans, root, path, &key);
+		extent_end = split;
+	}
+
+	if (extent_end == end) {
+		split_end = 0;
+		extent_type = BTRFS_FILE_EXTENT_REG;
+	}
+	if (extent_end == end && split == start) {
+		other_start = end;
+		other_end = 0;
+		if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
+				     bytenr, &other_start, &other_end)) {
+			path->slots[0]++;
+			fi = btrfs_item_ptr(leaf, path->slots[0],
+					    struct btrfs_file_extent_item);
+			key.offset = split;
+			btrfs_set_item_key_safe(trans, root, path, &key);
+			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+			btrfs_set_file_extent_num_bytes(leaf, fi,
+							other_end - split);
+			goto done;
+		}
+	}
+	if (extent_end == end && split == end) {
+		other_start = 0;
+		other_end = start;
+		if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
+				     bytenr, &other_start, &other_end)) {
+			path->slots[0]--;
+			fi = btrfs_item_ptr(leaf, path->slots[0],
+					    struct btrfs_file_extent_item);
+			btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
+							other_start);
+			goto done;
+		}
+	}
+
+	btrfs_mark_buffer_dirty(leaf);
+
+	orig_parent = leaf->start;
+	ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
+				   orig_parent, root->root_key.objectid,
+				   trans->transid, inode->i_ino);
+	BUG_ON(ret);
+	btrfs_release_path(root, path);
+
+	key.offset = start;
+	ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
+	BUG_ON(ret);
+
+	leaf = path->nodes[0];
+	fi = btrfs_item_ptr(leaf, path->slots[0],
+			    struct btrfs_file_extent_item);
+	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+	btrfs_set_file_extent_type(leaf, fi, extent_type);
+	btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
+	btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
+	btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+	btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
+	btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+	btrfs_set_file_extent_compression(leaf, fi, 0);
+	btrfs_set_file_extent_encryption(leaf, fi, 0);
+	btrfs_set_file_extent_other_encoding(leaf, fi, 0);
+
+	if (orig_parent != leaf->start) {
+		ret = btrfs_update_extent_ref(trans, root, bytenr,
+					      orig_parent, leaf->start,
+					      root->root_key.objectid,
+					      trans->transid, inode->i_ino);
+		BUG_ON(ret);
+	}
+done:
+	btrfs_mark_buffer_dirty(leaf);
+	btrfs_release_path(root, path);
+	if (split_end && split == start) {
+		split = end;
+		goto again;
+	}
+	if (locked_end > end) {
+		unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
+			      GFP_NOFS);
+	}
+	btrfs_free_path(path);
+	return 0;
+}
+
+/*
+ * this gets pages into the page cache and locks them down, it also properly
+ * waits for data=ordered extents to finish before allowing the pages to be
+ * modified.
+ */
+static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
+			 struct page **pages, size_t num_pages,
+			 loff_t pos, unsigned long first_index,
+			 unsigned long last_index, size_t write_bytes)
+{
+	int i;
+	unsigned long index = pos >> PAGE_CACHE_SHIFT;
+	struct inode *inode = fdentry(file)->d_inode;
+	int err = 0;
+	u64 start_pos;
+	u64 last_pos;
+
+	start_pos = pos & ~((u64)root->sectorsize - 1);
+	last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
+
+	if (start_pos > inode->i_size) {
+		err = btrfs_cont_expand(inode, start_pos);
+		if (err)
+			return err;
+	}
+
+	memset(pages, 0, num_pages * sizeof(struct page *));
+again:
+	for (i = 0; i < num_pages; i++) {
+		pages[i] = grab_cache_page(inode->i_mapping, index + i);
+		if (!pages[i]) {
+			err = -ENOMEM;
+			BUG_ON(1);
+		}
+		wait_on_page_writeback(pages[i]);
+	}
+	if (start_pos < inode->i_size) {
+		struct btrfs_ordered_extent *ordered;
+		lock_extent(&BTRFS_I(inode)->io_tree,
+			    start_pos, last_pos - 1, GFP_NOFS);
+		ordered = btrfs_lookup_first_ordered_extent(inode,
+							    last_pos - 1);
+		if (ordered &&
+		    ordered->file_offset + ordered->len > start_pos &&
+		    ordered->file_offset < last_pos) {
+			btrfs_put_ordered_extent(ordered);
+			unlock_extent(&BTRFS_I(inode)->io_tree,
+				      start_pos, last_pos - 1, GFP_NOFS);
+			for (i = 0; i < num_pages; i++) {
+				unlock_page(pages[i]);
+				page_cache_release(pages[i]);
+			}
+			btrfs_wait_ordered_range(inode, start_pos,
+						 last_pos - start_pos);
+			goto again;
+		}
+		if (ordered)
+			btrfs_put_ordered_extent(ordered);
+
+		clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
+				  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
+				  GFP_NOFS);
+		unlock_extent(&BTRFS_I(inode)->io_tree,
+			      start_pos, last_pos - 1, GFP_NOFS);
+	}
+	for (i = 0; i < num_pages; i++) {
+		clear_page_dirty_for_io(pages[i]);
+		set_page_extent_mapped(pages[i]);
+		WARN_ON(!PageLocked(pages[i]));
+	}
+	return 0;
+}
+
+static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	loff_t pos;
+	loff_t start_pos;
+	ssize_t num_written = 0;
+	ssize_t err = 0;
+	int ret = 0;
+	struct inode *inode = fdentry(file)->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct page **pages = NULL;
+	int nrptrs;
+	struct page *pinned[2];
+	unsigned long first_index;
+	unsigned long last_index;
+	int will_write;
+
+	will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
+		      (file->f_flags & O_DIRECT));
+
+	nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
+		     PAGE_CACHE_SIZE / (sizeof(struct page *)));
+	pinned[0] = NULL;
+	pinned[1] = NULL;
+
+	pos = *ppos;
+	start_pos = pos;
+
+	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+	current->backing_dev_info = inode->i_mapping->backing_dev_info;
+	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
+	if (err)
+		goto out_nolock;
+	if (count == 0)
+		goto out_nolock;
+
+	err = file_remove_suid(file);
+	if (err)
+		goto out_nolock;
+	file_update_time(file);
+
+	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
+
+	mutex_lock(&inode->i_mutex);
+	BTRFS_I(inode)->sequence++;
+	first_index = pos >> PAGE_CACHE_SHIFT;
+	last_index = (pos + count) >> PAGE_CACHE_SHIFT;
+
+	/*
+	 * there are lots of better ways to do this, but this code
+	 * makes sure the first and last page in the file range are
+	 * up to date and ready for cow
+	 */
+	if ((pos & (PAGE_CACHE_SIZE - 1))) {
+		pinned[0] = grab_cache_page(inode->i_mapping, first_index);
+		if (!PageUptodate(pinned[0])) {
+			ret = btrfs_readpage(NULL, pinned[0]);
+			BUG_ON(ret);
+			wait_on_page_locked(pinned[0]);
+		} else {
+			unlock_page(pinned[0]);
+		}
+	}
+	if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
+		pinned[1] = grab_cache_page(inode->i_mapping, last_index);
+		if (!PageUptodate(pinned[1])) {
+			ret = btrfs_readpage(NULL, pinned[1]);
+			BUG_ON(ret);
+			wait_on_page_locked(pinned[1]);
+		} else {
+			unlock_page(pinned[1]);
+		}
+	}
+
+	while (count > 0) {
+		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
+		size_t write_bytes = min(count, nrptrs *
+					(size_t)PAGE_CACHE_SIZE -
+					 offset);
+		size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
+					PAGE_CACHE_SHIFT;
+
+		WARN_ON(num_pages > nrptrs);
+		memset(pages, 0, sizeof(struct page *) * nrptrs);
+
+		ret = btrfs_check_free_space(root, write_bytes, 0);
+		if (ret)
+			goto out;
+
+		ret = prepare_pages(root, file, pages, num_pages,
+				    pos, first_index, last_index,
+				    write_bytes);
+		if (ret)
+			goto out;
+
+		ret = btrfs_copy_from_user(pos, num_pages,
+					   write_bytes, pages, buf);
+		if (ret) {
+			btrfs_drop_pages(pages, num_pages);
+			goto out;
+		}
+
+		ret = dirty_and_release_pages(NULL, root, file, pages,
+					      num_pages, pos, write_bytes);
+		btrfs_drop_pages(pages, num_pages);
+		if (ret)
+			goto out;
+
+		if (will_write) {
+			btrfs_fdatawrite_range(inode->i_mapping, pos,
+					       pos + write_bytes - 1,
+					       WB_SYNC_NONE);
+		} else {
+			balance_dirty_pages_ratelimited_nr(inode->i_mapping,
+							   num_pages);
+			if (num_pages <
+			    (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
+				btrfs_btree_balance_dirty(root, 1);
+			btrfs_throttle(root);
+		}
+
+		buf += write_bytes;
+		count -= write_bytes;
+		pos += write_bytes;
+		num_written += write_bytes;
+
+		cond_resched();
+	}
+out:
+	mutex_unlock(&inode->i_mutex);
+
+out_nolock:
+	kfree(pages);
+	if (pinned[0])
+		page_cache_release(pinned[0]);
+	if (pinned[1])
+		page_cache_release(pinned[1]);
+	*ppos = pos;
+
+	if (num_written > 0 && will_write) {
+		struct btrfs_trans_handle *trans;
+
+		err = btrfs_wait_ordered_range(inode, start_pos, num_written);
+		if (err)
+			num_written = err;
+
+		if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
+			trans = btrfs_start_transaction(root, 1);
+			ret = btrfs_log_dentry_safe(trans, root,
+						    file->f_dentry);
+			if (ret == 0) {
+				btrfs_sync_log(trans, root);
+				btrfs_end_transaction(trans, root);
+			} else {
+				btrfs_commit_transaction(trans, root);
+			}
+		}
+		if (file->f_flags & O_DIRECT) {
+			invalidate_mapping_pages(inode->i_mapping,
+			      start_pos >> PAGE_CACHE_SHIFT,
+			     (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
+		}
+	}
+	current->backing_dev_info = NULL;
+	return num_written ? num_written : err;
+}
+
+int btrfs_release_file(struct inode *inode, struct file *filp)
+{
+	if (filp->private_data)
+		btrfs_ioctl_trans_end(filp);
+	return 0;
+}
+
+/*
+ * fsync call for both files and directories.  This logs the inode into
+ * the tree log instead of forcing full commits whenever possible.
+ *
+ * It needs to call filemap_fdatawait so that all ordered extent updates are
+ * in the metadata btree are up to date for copying to the log.
+ *
+ * It drops the inode mutex before doing the tree log commit.  This is an
+ * important optimization for directories because holding the mutex prevents
+ * new operations on the dir while we write to disk.
+ */
+int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
+{
+	struct inode *inode = dentry->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret = 0;
+	struct btrfs_trans_handle *trans;
+
+	/*
+	 * check the transaction that last modified this inode
+	 * and see if its already been committed
+	 */
+	if (!BTRFS_I(inode)->last_trans)
+		goto out;
+
+	mutex_lock(&root->fs_info->trans_mutex);
+	if (BTRFS_I(inode)->last_trans <=
+	    root->fs_info->last_trans_committed) {
+		BTRFS_I(inode)->last_trans = 0;
+		mutex_unlock(&root->fs_info->trans_mutex);
+		goto out;
+	}
+	mutex_unlock(&root->fs_info->trans_mutex);
+
+	root->fs_info->tree_log_batch++;
+	filemap_fdatawrite(inode->i_mapping);
+	btrfs_wait_ordered_range(inode, 0, (u64)-1);
+	root->fs_info->tree_log_batch++;
+
+	/*
+	 * ok we haven't committed the transaction yet, lets do a commit
+	 */
+	if (file->private_data)
+		btrfs_ioctl_trans_end(file);
+
+	trans = btrfs_start_transaction(root, 1);
+	if (!trans) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
+	if (ret < 0)
+		goto out;
+
+	/* we've logged all the items and now have a consistent
+	 * version of the file in the log.  It is possible that
+	 * someone will come in and modify the file, but that's
+	 * fine because the log is consistent on disk, and we
+	 * have references to all of the file's extents
+	 *
+	 * It is possible that someone will come in and log the
+	 * file again, but that will end up using the synchronization
+	 * inside btrfs_sync_log to keep things safe.
+	 */
+	mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+
+	if (ret > 0) {
+		ret = btrfs_commit_transaction(trans, root);
+	} else {
+		btrfs_sync_log(trans, root);
+		ret = btrfs_end_transaction(trans, root);
+	}
+	mutex_lock(&file->f_dentry->d_inode->i_mutex);
+out:
+	return ret > 0 ? EIO : ret;
+}
+
+static struct vm_operations_struct btrfs_file_vm_ops = {
+	.fault		= filemap_fault,
+	.page_mkwrite	= btrfs_page_mkwrite,
+};
+
+static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
+{
+	vma->vm_ops = &btrfs_file_vm_ops;
+	file_accessed(filp);
+	return 0;
+}
+
+struct file_operations btrfs_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= do_sync_read,
+	.aio_read       = generic_file_aio_read,
+	.splice_read	= generic_file_splice_read,
+	.write		= btrfs_file_write,
+	.mmap		= btrfs_file_mmap,
+	.open		= generic_file_open,
+	.release	= btrfs_release_file,
+	.fsync		= btrfs_sync_file,
+	.unlocked_ioctl	= btrfs_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= btrfs_ioctl,
+#endif
+};
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
new file mode 100644
index 0000000..d1e5f0e
--- /dev/null
+++ b/fs/btrfs/free-space-cache.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright (C) 2008 Red Hat.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include "ctree.h"
+
+static int tree_insert_offset(struct rb_root *root, u64 offset,
+			      struct rb_node *node)
+{
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct btrfs_free_space *info;
+
+	while (*p) {
+		parent = *p;
+		info = rb_entry(parent, struct btrfs_free_space, offset_index);
+
+		if (offset < info->offset)
+			p = &(*p)->rb_left;
+		else if (offset > info->offset)
+			p = &(*p)->rb_right;
+		else
+			return -EEXIST;
+	}
+
+	rb_link_node(node, parent, p);
+	rb_insert_color(node, root);
+
+	return 0;
+}
+
+static int tree_insert_bytes(struct rb_root *root, u64 bytes,
+			     struct rb_node *node)
+{
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct btrfs_free_space *info;
+
+	while (*p) {
+		parent = *p;
+		info = rb_entry(parent, struct btrfs_free_space, bytes_index);
+
+		if (bytes < info->bytes)
+			p = &(*p)->rb_left;
+		else
+			p = &(*p)->rb_right;
+	}
+
+	rb_link_node(node, parent, p);
+	rb_insert_color(node, root);
+
+	return 0;
+}
+
+/*
+ * searches the tree for the given offset.  If contains is set we will return
+ * the free space that contains the given offset.  If contains is not set we
+ * will return the free space that starts at or after the given offset and is
+ * at least bytes long.
+ */
+static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
+						   u64 offset, u64 bytes,
+						   int contains)
+{
+	struct rb_node *n = root->rb_node;
+	struct btrfs_free_space *entry, *ret = NULL;
+
+	while (n) {
+		entry = rb_entry(n, struct btrfs_free_space, offset_index);
+
+		if (offset < entry->offset) {
+			if (!contains &&
+			    (!ret || entry->offset < ret->offset) &&
+			    (bytes <= entry->bytes))
+				ret = entry;
+			n = n->rb_left;
+		} else if (offset > entry->offset) {
+			if ((entry->offset + entry->bytes - 1) >= offset &&
+			    bytes <= entry->bytes) {
+				ret = entry;
+				break;
+			}
+			n = n->rb_right;
+		} else {
+			if (bytes > entry->bytes) {
+				n = n->rb_right;
+				continue;
+			}
+			ret = entry;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * return a chunk at least bytes size, as close to offset that we can get.
+ */
+static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
+						  u64 offset, u64 bytes)
+{
+	struct rb_node *n = root->rb_node;
+	struct btrfs_free_space *entry, *ret = NULL;
+
+	while (n) {
+		entry = rb_entry(n, struct btrfs_free_space, bytes_index);
+
+		if (bytes < entry->bytes) {
+			/*
+			 * We prefer to get a hole size as close to the size we
+			 * are asking for so we don't take small slivers out of
+			 * huge holes, but we also want to get as close to the
+			 * offset as possible so we don't have a whole lot of
+			 * fragmentation.
+			 */
+			if (offset <= entry->offset) {
+				if (!ret)
+					ret = entry;
+				else if (entry->bytes < ret->bytes)
+					ret = entry;
+				else if (entry->offset < ret->offset)
+					ret = entry;
+			}
+			n = n->rb_left;
+		} else if (bytes > entry->bytes) {
+			n = n->rb_right;
+		} else {
+			/*
+			 * Ok we may have multiple chunks of the wanted size,
+			 * so we don't want to take the first one we find, we
+			 * want to take the one closest to our given offset, so
+			 * keep searching just in case theres a better match.
+			 */
+			n = n->rb_right;
+			if (offset > entry->offset)
+				continue;
+			else if (!ret || entry->offset < ret->offset)
+				ret = entry;
+		}
+	}
+
+	return ret;
+}
+
+static void unlink_free_space(struct btrfs_block_group_cache *block_group,
+			      struct btrfs_free_space *info)
+{
+	rb_erase(&info->offset_index, &block_group->free_space_offset);
+	rb_erase(&info->bytes_index, &block_group->free_space_bytes);
+}
+
+static int link_free_space(struct btrfs_block_group_cache *block_group,
+			   struct btrfs_free_space *info)
+{
+	int ret = 0;
+
+
+	ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
+				 &info->offset_index);
+	if (ret)
+		return ret;
+
+	ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
+				&info->bytes_index);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+				  u64 offset, u64 bytes)
+{
+	struct btrfs_free_space *right_info;
+	struct btrfs_free_space *left_info;
+	struct btrfs_free_space *info = NULL;
+	struct btrfs_free_space *alloc_info;
+	int ret = 0;
+
+	alloc_info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
+	if (!alloc_info)
+		return -ENOMEM;
+
+	/*
+	 * first we want to see if there is free space adjacent to the range we
+	 * are adding, if there is remove that struct and add a new one to
+	 * cover the entire range
+	 */
+	right_info = tree_search_offset(&block_group->free_space_offset,
+					offset+bytes, 0, 1);
+	left_info = tree_search_offset(&block_group->free_space_offset,
+				       offset-1, 0, 1);
+
+	if (right_info && right_info->offset == offset+bytes) {
+		unlink_free_space(block_group, right_info);
+		info = right_info;
+		info->offset = offset;
+		info->bytes += bytes;
+	} else if (right_info && right_info->offset != offset+bytes) {
+		printk(KERN_ERR "btrfs adding space in the middle of an "
+		       "existing free space area. existing: "
+		       "offset=%llu, bytes=%llu. new: offset=%llu, "
+		       "bytes=%llu\n", (unsigned long long)right_info->offset,
+		       (unsigned long long)right_info->bytes,
+		       (unsigned long long)offset,
+		       (unsigned long long)bytes);
+		BUG();
+	}
+
+	if (left_info) {
+		unlink_free_space(block_group, left_info);
+
+		if (unlikely((left_info->offset + left_info->bytes) !=
+			     offset)) {
+			printk(KERN_ERR "btrfs free space to the left "
+			       "of new free space isn't "
+			       "quite right. existing: offset=%llu, "
+			       "bytes=%llu. new: offset=%llu, bytes=%llu\n",
+			       (unsigned long long)left_info->offset,
+			       (unsigned long long)left_info->bytes,
+			       (unsigned long long)offset,
+			       (unsigned long long)bytes);
+			BUG();
+		}
+
+		if (info) {
+			info->offset = left_info->offset;
+			info->bytes += left_info->bytes;
+			kfree(left_info);
+		} else {
+			info = left_info;
+			info->bytes += bytes;
+		}
+	}
+
+	if (info) {
+		ret = link_free_space(block_group, info);
+		if (!ret)
+			info = NULL;
+		goto out;
+	}
+
+	info = alloc_info;
+	alloc_info = NULL;
+	info->offset = offset;
+	info->bytes = bytes;
+
+	ret = link_free_space(block_group, info);
+	if (ret)
+		kfree(info);
+out:
+	if (ret) {
+		printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
+		if (ret == -EEXIST)
+			BUG();
+	}
+
+	kfree(alloc_info);
+
+	return ret;
+}
+
+static int
+__btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+			  u64 offset, u64 bytes)
+{
+	struct btrfs_free_space *info;
+	int ret = 0;
+
+	info = tree_search_offset(&block_group->free_space_offset, offset, 0,
+				  1);
+
+	if (info && info->offset == offset) {
+		if (info->bytes < bytes) {
+			printk(KERN_ERR "Found free space at %llu, size %llu,"
+			       "trying to use %llu\n",
+			       (unsigned long long)info->offset,
+			       (unsigned long long)info->bytes,
+			       (unsigned long long)bytes);
+			WARN_ON(1);
+			ret = -EINVAL;
+			goto out;
+		}
+		unlink_free_space(block_group, info);
+
+		if (info->bytes == bytes) {
+			kfree(info);
+			goto out;
+		}
+
+		info->offset += bytes;
+		info->bytes -= bytes;
+
+		ret = link_free_space(block_group, info);
+		BUG_ON(ret);
+	} else if (info && info->offset < offset &&
+		   info->offset + info->bytes >= offset + bytes) {
+		u64 old_start = info->offset;
+		/*
+		 * we're freeing space in the middle of the info,
+		 * this can happen during tree log replay
+		 *
+		 * first unlink the old info and then
+		 * insert it again after the hole we're creating
+		 */
+		unlink_free_space(block_group, info);
+		if (offset + bytes < info->offset + info->bytes) {
+			u64 old_end = info->offset + info->bytes;
+
+			info->offset = offset + bytes;
+			info->bytes = old_end - info->offset;
+			ret = link_free_space(block_group, info);
+			BUG_ON(ret);
+		} else {
+			/* the hole we're creating ends at the end
+			 * of the info struct, just free the info
+			 */
+			kfree(info);
+		}
+
+		/* step two, insert a new info struct to cover anything
+		 * before the hole
+		 */
+		ret = __btrfs_add_free_space(block_group, old_start,
+					     offset - old_start);
+		BUG_ON(ret);
+	} else {
+		WARN_ON(1);
+	}
+out:
+	return ret;
+}
+
+int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+			 u64 offset, u64 bytes)
+{
+	int ret;
+	struct btrfs_free_space *sp;
+
+	mutex_lock(&block_group->alloc_mutex);
+	ret = __btrfs_add_free_space(block_group, offset, bytes);
+	sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
+	BUG_ON(!sp);
+	mutex_unlock(&block_group->alloc_mutex);
+
+	return ret;
+}
+
+int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
+			      u64 offset, u64 bytes)
+{
+	int ret;
+	struct btrfs_free_space *sp;
+
+	ret = __btrfs_add_free_space(block_group, offset, bytes);
+	sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
+	BUG_ON(!sp);
+
+	return ret;
+}
+
+int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
+			    u64 offset, u64 bytes)
+{
+	int ret = 0;
+
+	mutex_lock(&block_group->alloc_mutex);
+	ret = __btrfs_remove_free_space(block_group, offset, bytes);
+	mutex_unlock(&block_group->alloc_mutex);
+
+	return ret;
+}
+
+int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
+				 u64 offset, u64 bytes)
+{
+	int ret;
+
+	ret = __btrfs_remove_free_space(block_group, offset, bytes);
+
+	return ret;
+}
+
+void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
+			   u64 bytes)
+{
+	struct btrfs_free_space *info;
+	struct rb_node *n;
+	int count = 0;
+
+	for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
+		info = rb_entry(n, struct btrfs_free_space, offset_index);
+		if (info->bytes >= bytes)
+			count++;
+	}
+	printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
+	       "\n", count);
+}
+
+u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
+{
+	struct btrfs_free_space *info;
+	struct rb_node *n;
+	u64 ret = 0;
+
+	for (n = rb_first(&block_group->free_space_offset); n;
+	     n = rb_next(n)) {
+		info = rb_entry(n, struct btrfs_free_space, offset_index);
+		ret += info->bytes;
+	}
+
+	return ret;
+}
+
+void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
+{
+	struct btrfs_free_space *info;
+	struct rb_node *node;
+
+	mutex_lock(&block_group->alloc_mutex);
+	while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
+		info = rb_entry(node, struct btrfs_free_space, bytes_index);
+		unlink_free_space(block_group, info);
+		kfree(info);
+		if (need_resched()) {
+			mutex_unlock(&block_group->alloc_mutex);
+			cond_resched();
+			mutex_lock(&block_group->alloc_mutex);
+		}
+	}
+	mutex_unlock(&block_group->alloc_mutex);
+}
+
+#if 0
+static struct btrfs_free_space *btrfs_find_free_space_offset(struct
+						      btrfs_block_group_cache
+						      *block_group, u64 offset,
+						      u64 bytes)
+{
+	struct btrfs_free_space *ret;
+
+	mutex_lock(&block_group->alloc_mutex);
+	ret = tree_search_offset(&block_group->free_space_offset, offset,
+				 bytes, 0);
+	mutex_unlock(&block_group->alloc_mutex);
+
+	return ret;
+}
+
+static struct btrfs_free_space *btrfs_find_free_space_bytes(struct
+						     btrfs_block_group_cache
+						     *block_group, u64 offset,
+						     u64 bytes)
+{
+	struct btrfs_free_space *ret;
+
+	mutex_lock(&block_group->alloc_mutex);
+
+	ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes);
+	mutex_unlock(&block_group->alloc_mutex);
+
+	return ret;
+}
+#endif
+
+struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
+					       *block_group, u64 offset,
+					       u64 bytes)
+{
+	struct btrfs_free_space *ret = NULL;
+
+	ret = tree_search_offset(&block_group->free_space_offset, offset,
+				 bytes, 0);
+	if (!ret)
+		ret = tree_search_bytes(&block_group->free_space_bytes,
+					offset, bytes);
+
+	return ret;
+}
diff --git a/fs/btrfs/hash.h b/fs/btrfs/hash.h
new file mode 100644
index 0000000..2a020b2
--- /dev/null
+++ b/fs/btrfs/hash.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __HASH__
+#define __HASH__
+
+#include "crc32c.h"
+static inline u64 btrfs_name_hash(const char *name, int len)
+{
+	return btrfs_crc32c((u32)~1, name, len);
+}
+#endif
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
new file mode 100644
index 0000000..3d46fa1
--- /dev/null
+++ b/fs/btrfs/inode-item.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+
+static int find_name_in_backref(struct btrfs_path *path, const char *name,
+			 int name_len, struct btrfs_inode_ref **ref_ret)
+{
+	struct extent_buffer *leaf;
+	struct btrfs_inode_ref *ref;
+	unsigned long ptr;
+	unsigned long name_ptr;
+	u32 item_size;
+	u32 cur_offset = 0;
+	int len;
+
+	leaf = path->nodes[0];
+	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+	while (cur_offset < item_size) {
+		ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
+		len = btrfs_inode_ref_name_len(leaf, ref);
+		name_ptr = (unsigned long)(ref + 1);
+		cur_offset += len + sizeof(*ref);
+		if (len != name_len)
+			continue;
+		if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) {
+			*ref_ret = ref;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root,
+			   const char *name, int name_len,
+			   u64 inode_objectid, u64 ref_objectid, u64 *index)
+{
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct btrfs_inode_ref *ref;
+	struct extent_buffer *leaf;
+	unsigned long ptr;
+	unsigned long item_start;
+	u32 item_size;
+	u32 sub_item_len;
+	int ret;
+	int del_len = name_len + sizeof(*ref);
+
+	key.objectid = inode_objectid;
+	key.offset = ref_objectid;
+	btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+	if (ret > 0) {
+		ret = -ENOENT;
+		goto out;
+	} else if (ret < 0) {
+		goto out;
+	}
+	if (!find_name_in_backref(path, name, name_len, &ref)) {
+		ret = -ENOENT;
+		goto out;
+	}
+	leaf = path->nodes[0];
+	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+
+	if (index)
+		*index = btrfs_inode_ref_index(leaf, ref);
+
+	if (del_len == item_size) {
+		ret = btrfs_del_item(trans, root, path);
+		goto out;
+	}
+	ptr = (unsigned long)ref;
+	sub_item_len = name_len + sizeof(*ref);
+	item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+	memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
+			      item_size - (ptr + sub_item_len - item_start));
+	ret = btrfs_truncate_item(trans, root, path,
+				  item_size - sub_item_len, 1);
+	BUG_ON(ret);
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root,
+			   const char *name, int name_len,
+			   u64 inode_objectid, u64 ref_objectid, u64 index)
+{
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct btrfs_inode_ref *ref;
+	unsigned long ptr;
+	int ret;
+	int ins_len = name_len + sizeof(*ref);
+
+	key.objectid = inode_objectid;
+	key.offset = ref_objectid;
+	btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	ret = btrfs_insert_empty_item(trans, root, path, &key,
+				      ins_len);
+	if (ret == -EEXIST) {
+		u32 old_size;
+
+		if (find_name_in_backref(path, name, name_len, &ref))
+			goto out;
+
+		old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
+		ret = btrfs_extend_item(trans, root, path, ins_len);
+		BUG_ON(ret);
+		ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+				     struct btrfs_inode_ref);
+		ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
+		btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
+		btrfs_set_inode_ref_index(path->nodes[0], ref, index);
+		ptr = (unsigned long)(ref + 1);
+		ret = 0;
+	} else if (ret < 0) {
+		goto out;
+	} else {
+		ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+				     struct btrfs_inode_ref);
+		btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
+		btrfs_set_inode_ref_index(path->nodes[0], ref, index);
+		ptr = (unsigned long)(ref + 1);
+	}
+	write_extent_buffer(path->nodes[0], name, ptr, name_len);
+	btrfs_mark_buffer_dirty(path->nodes[0]);
+
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root,
+			     struct btrfs_path *path, u64 objectid)
+{
+	struct btrfs_key key;
+	int ret;
+	key.objectid = objectid;
+	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+	key.offset = 0;
+
+	ret = btrfs_insert_empty_item(trans, root, path, &key,
+				      sizeof(struct btrfs_inode_item));
+	if (ret == 0 && objectid > root->highest_inode)
+		root->highest_inode = objectid;
+	return ret;
+}
+
+int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
+		       *root, struct btrfs_path *path,
+		       struct btrfs_key *location, int mod)
+{
+	int ins_len = mod < 0 ? -1 : 0;
+	int cow = mod != 0;
+	int ret;
+	int slot;
+	struct extent_buffer *leaf;
+	struct btrfs_key found_key;
+
+	ret = btrfs_search_slot(trans, root, location, path, ins_len, cow);
+	if (ret > 0 && btrfs_key_type(location) == BTRFS_ROOT_ITEM_KEY &&
+	    location->offset == (u64)-1 && path->slots[0] != 0) {
+		slot = path->slots[0] - 1;
+		leaf = path->nodes[0];
+		btrfs_item_key_to_cpu(leaf, &found_key, slot);
+		if (found_key.objectid == location->objectid &&
+		    btrfs_key_type(&found_key) == btrfs_key_type(location)) {
+			path->slots[0]--;
+			return 0;
+		}
+	}
+	return ret;
+}
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
new file mode 100644
index 0000000..2aa7987
--- /dev/null
+++ b/fs/btrfs/inode-map.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+
+int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid)
+{
+	struct btrfs_path *path;
+	int ret;
+	struct extent_buffer *l;
+	struct btrfs_key search_key;
+	struct btrfs_key found_key;
+	int slot;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
+	search_key.type = -1;
+	search_key.offset = (u64)-1;
+	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
+	if (ret < 0)
+		goto error;
+	BUG_ON(ret == 0);
+	if (path->slots[0] > 0) {
+		slot = path->slots[0] - 1;
+		l = path->nodes[0];
+		btrfs_item_key_to_cpu(l, &found_key, slot);
+		*objectid = found_key.objectid;
+	} else {
+		*objectid = BTRFS_FIRST_FREE_OBJECTID;
+	}
+	ret = 0;
+error:
+	btrfs_free_path(path);
+	return ret;
+}
+
+/*
+ * walks the btree of allocated inodes and find a hole.
+ */
+int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root,
+			     u64 dirid, u64 *objectid)
+{
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	int ret;
+	int slot = 0;
+	u64 last_ino = 0;
+	int start_found;
+	struct extent_buffer *l;
+	struct btrfs_key search_key;
+	u64 search_start = dirid;
+
+	mutex_lock(&root->objectid_mutex);
+	if (root->last_inode_alloc >= BTRFS_FIRST_FREE_OBJECTID &&
+	    root->last_inode_alloc < BTRFS_LAST_FREE_OBJECTID) {
+		*objectid = ++root->last_inode_alloc;
+		mutex_unlock(&root->objectid_mutex);
+		return 0;
+	}
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	search_start = max(search_start, BTRFS_FIRST_FREE_OBJECTID);
+	search_key.objectid = search_start;
+	search_key.type = 0;
+	search_key.offset = 0;
+
+	btrfs_init_path(path);
+	start_found = 0;
+	ret = btrfs_search_slot(trans, root, &search_key, path, 0, 0);
+	if (ret < 0)
+		goto error;
+
+	while (1) {
+		l = path->nodes[0];
+		slot = path->slots[0];
+		if (slot >= btrfs_header_nritems(l)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret == 0)
+				continue;
+			if (ret < 0)
+				goto error;
+			if (!start_found) {
+				*objectid = search_start;
+				start_found = 1;
+				goto found;
+			}
+			*objectid = last_ino > search_start ?
+				last_ino : search_start;
+			goto found;
+		}
+		btrfs_item_key_to_cpu(l, &key, slot);
+		if (key.objectid >= search_start) {
+			if (start_found) {
+				if (last_ino < search_start)
+					last_ino = search_start;
+				if (key.objectid > last_ino) {
+					*objectid = last_ino;
+					goto found;
+				}
+			} else if (key.objectid > search_start) {
+				*objectid = search_start;
+				goto found;
+			}
+		}
+		if (key.objectid >= BTRFS_LAST_FREE_OBJECTID)
+			break;
+
+		start_found = 1;
+		last_ino = key.objectid + 1;
+		path->slots[0]++;
+	}
+	BUG_ON(1);
+found:
+	btrfs_release_path(root, path);
+	btrfs_free_path(path);
+	BUG_ON(*objectid < search_start);
+	mutex_unlock(&root->objectid_mutex);
+	return 0;
+error:
+	btrfs_release_path(root, path);
+	btrfs_free_path(path);
+	mutex_unlock(&root->objectid_mutex);
+	return ret;
+}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
new file mode 100644
index 0000000..8adfe05
--- /dev/null
+++ b/fs/btrfs/inode.c
@@ -0,0 +1,5035 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/buffer_head.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/backing-dev.h>
+#include <linux/mpage.h>
+#include <linux/swap.h>
+#include <linux/writeback.h>
+#include <linux/statfs.h>
+#include <linux/compat.h>
+#include <linux/bit_spinlock.h>
+#include <linux/version.h>
+#include <linux/xattr.h>
+#include <linux/posix_acl.h>
+#include <linux/falloc.h>
+#include "compat.h"
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "btrfs_inode.h"
+#include "ioctl.h"
+#include "print-tree.h"
+#include "volumes.h"
+#include "ordered-data.h"
+#include "xattr.h"
+#include "tree-log.h"
+#include "ref-cache.h"
+#include "compression.h"
+
+struct btrfs_iget_args {
+	u64 ino;
+	struct btrfs_root *root;
+};
+
+static struct inode_operations btrfs_dir_inode_operations;
+static struct inode_operations btrfs_symlink_inode_operations;
+static struct inode_operations btrfs_dir_ro_inode_operations;
+static struct inode_operations btrfs_special_inode_operations;
+static struct inode_operations btrfs_file_inode_operations;
+static struct address_space_operations btrfs_aops;
+static struct address_space_operations btrfs_symlink_aops;
+static struct file_operations btrfs_dir_file_operations;
+static struct extent_io_ops btrfs_extent_io_ops;
+
+static struct kmem_cache *btrfs_inode_cachep;
+struct kmem_cache *btrfs_trans_handle_cachep;
+struct kmem_cache *btrfs_transaction_cachep;
+struct kmem_cache *btrfs_bit_radix_cachep;
+struct kmem_cache *btrfs_path_cachep;
+
+#define S_SHIFT 12
+static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
+	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
+	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
+	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
+	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
+	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
+	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
+	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
+};
+
+static void btrfs_truncate(struct inode *inode);
+static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
+static noinline int cow_file_range(struct inode *inode,
+				   struct page *locked_page,
+				   u64 start, u64 end, int *page_started,
+				   unsigned long *nr_written, int unlock);
+
+/*
+ * a very lame attempt at stopping writes when the FS is 85% full.  There
+ * are countless ways this is incorrect, but it is better than nothing.
+ */
+int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
+			   int for_del)
+{
+	u64 total;
+	u64 used;
+	u64 thresh;
+	int ret = 0;
+
+	spin_lock(&root->fs_info->delalloc_lock);
+	total = btrfs_super_total_bytes(&root->fs_info->super_copy);
+	used = btrfs_super_bytes_used(&root->fs_info->super_copy);
+	if (for_del)
+		thresh = total * 90;
+	else
+		thresh = total * 85;
+
+	do_div(thresh, 100);
+
+	if (used + root->fs_info->delalloc_bytes + num_required > thresh)
+		ret = -ENOSPC;
+	spin_unlock(&root->fs_info->delalloc_lock);
+	return ret;
+}
+
+/*
+ * this does all the hard work for inserting an inline extent into
+ * the btree.  The caller should have done a btrfs_drop_extents so that
+ * no overlapping inline items exist in the btree
+ */
+static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
+				struct btrfs_root *root, struct inode *inode,
+				u64 start, size_t size, size_t compressed_size,
+				struct page **compressed_pages)
+{
+	struct btrfs_key key;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	struct page *page = NULL;
+	char *kaddr;
+	unsigned long ptr;
+	struct btrfs_file_extent_item *ei;
+	int err = 0;
+	int ret;
+	size_t cur_size = size;
+	size_t datasize;
+	unsigned long offset;
+	int use_compress = 0;
+
+	if (compressed_size && compressed_pages) {
+		use_compress = 1;
+		cur_size = compressed_size;
+	}
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	btrfs_set_trans_block_group(trans, inode);
+
+	key.objectid = inode->i_ino;
+	key.offset = start;
+	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
+	datasize = btrfs_file_extent_calc_inline_size(cur_size);
+
+	inode_add_bytes(inode, size);
+	ret = btrfs_insert_empty_item(trans, root, path, &key,
+				      datasize);
+	BUG_ON(ret);
+	if (ret) {
+		err = ret;
+		goto fail;
+	}
+	leaf = path->nodes[0];
+	ei = btrfs_item_ptr(leaf, path->slots[0],
+			    struct btrfs_file_extent_item);
+	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
+	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
+	btrfs_set_file_extent_encryption(leaf, ei, 0);
+	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
+	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
+	ptr = btrfs_file_extent_inline_start(ei);
+
+	if (use_compress) {
+		struct page *cpage;
+		int i = 0;
+		while (compressed_size > 0) {
+			cpage = compressed_pages[i];
+			cur_size = min_t(unsigned long, compressed_size,
+				       PAGE_CACHE_SIZE);
+
+			kaddr = kmap(cpage);
+			write_extent_buffer(leaf, kaddr, ptr, cur_size);
+			kunmap(cpage);
+
+			i++;
+			ptr += cur_size;
+			compressed_size -= cur_size;
+		}
+		btrfs_set_file_extent_compression(leaf, ei,
+						  BTRFS_COMPRESS_ZLIB);
+	} else {
+		page = find_get_page(inode->i_mapping,
+				     start >> PAGE_CACHE_SHIFT);
+		btrfs_set_file_extent_compression(leaf, ei, 0);
+		kaddr = kmap_atomic(page, KM_USER0);
+		offset = start & (PAGE_CACHE_SIZE - 1);
+		write_extent_buffer(leaf, kaddr + offset, ptr, size);
+		kunmap_atomic(kaddr, KM_USER0);
+		page_cache_release(page);
+	}
+	btrfs_mark_buffer_dirty(leaf);
+	btrfs_free_path(path);
+
+	BTRFS_I(inode)->disk_i_size = inode->i_size;
+	btrfs_update_inode(trans, root, inode);
+	return 0;
+fail:
+	btrfs_free_path(path);
+	return err;
+}
+
+
+/*
+ * conditionally insert an inline extent into the file.  This
+ * does the checks required to make sure the data is small enough
+ * to fit as an inline extent.
+ */
+static int cow_file_range_inline(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root,
+				 struct inode *inode, u64 start, u64 end,
+				 size_t compressed_size,
+				 struct page **compressed_pages)
+{
+	u64 isize = i_size_read(inode);
+	u64 actual_end = min(end + 1, isize);
+	u64 inline_len = actual_end - start;
+	u64 aligned_end = (end + root->sectorsize - 1) &
+			~((u64)root->sectorsize - 1);
+	u64 hint_byte;
+	u64 data_len = inline_len;
+	int ret;
+
+	if (compressed_size)
+		data_len = compressed_size;
+
+	if (start > 0 ||
+	    actual_end >= PAGE_CACHE_SIZE ||
+	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
+	    (!compressed_size &&
+	    (actual_end & (root->sectorsize - 1)) == 0) ||
+	    end + 1 < isize ||
+	    data_len > root->fs_info->max_inline) {
+		return 1;
+	}
+
+	ret = btrfs_drop_extents(trans, root, inode, start,
+				 aligned_end, start, &hint_byte);
+	BUG_ON(ret);
+
+	if (isize > actual_end)
+		inline_len = min_t(u64, isize, actual_end);
+	ret = insert_inline_extent(trans, root, inode, start,
+				   inline_len, compressed_size,
+				   compressed_pages);
+	BUG_ON(ret);
+	btrfs_drop_extent_cache(inode, start, aligned_end, 0);
+	return 0;
+}
+
+struct async_extent {
+	u64 start;
+	u64 ram_size;
+	u64 compressed_size;
+	struct page **pages;
+	unsigned long nr_pages;
+	struct list_head list;
+};
+
+struct async_cow {
+	struct inode *inode;
+	struct btrfs_root *root;
+	struct page *locked_page;
+	u64 start;
+	u64 end;
+	struct list_head extents;
+	struct btrfs_work work;
+};
+
+static noinline int add_async_extent(struct async_cow *cow,
+				     u64 start, u64 ram_size,
+				     u64 compressed_size,
+				     struct page **pages,
+				     unsigned long nr_pages)
+{
+	struct async_extent *async_extent;
+
+	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
+	async_extent->start = start;
+	async_extent->ram_size = ram_size;
+	async_extent->compressed_size = compressed_size;
+	async_extent->pages = pages;
+	async_extent->nr_pages = nr_pages;
+	list_add_tail(&async_extent->list, &cow->extents);
+	return 0;
+}
+
+/*
+ * we create compressed extents in two phases.  The first
+ * phase compresses a range of pages that have already been
+ * locked (both pages and state bits are locked).
+ *
+ * This is done inside an ordered work queue, and the compression
+ * is spread across many cpus.  The actual IO submission is step
+ * two, and the ordered work queue takes care of making sure that
+ * happens in the same order things were put onto the queue by
+ * writepages and friends.
+ *
+ * If this code finds it can't get good compression, it puts an
+ * entry onto the work queue to write the uncompressed bytes.  This
+ * makes sure that both compressed inodes and uncompressed inodes
+ * are written in the same order that pdflush sent them down.
+ */
+static noinline int compress_file_range(struct inode *inode,
+					struct page *locked_page,
+					u64 start, u64 end,
+					struct async_cow *async_cow,
+					int *num_added)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_trans_handle *trans;
+	u64 num_bytes;
+	u64 orig_start;
+	u64 disk_num_bytes;
+	u64 blocksize = root->sectorsize;
+	u64 actual_end;
+	u64 isize = i_size_read(inode);
+	int ret = 0;
+	struct page **pages = NULL;
+	unsigned long nr_pages;
+	unsigned long nr_pages_ret = 0;
+	unsigned long total_compressed = 0;
+	unsigned long total_in = 0;
+	unsigned long max_compressed = 128 * 1024;
+	unsigned long max_uncompressed = 128 * 1024;
+	int i;
+	int will_compress;
+
+	orig_start = start;
+
+	actual_end = min_t(u64, isize, end + 1);
+again:
+	will_compress = 0;
+	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
+	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
+
+	total_compressed = actual_end - start;
+
+	/* we want to make sure that amount of ram required to uncompress
+	 * an extent is reasonable, so we limit the total size in ram
+	 * of a compressed extent to 128k.  This is a crucial number
+	 * because it also controls how easily we can spread reads across
+	 * cpus for decompression.
+	 *
+	 * We also want to make sure the amount of IO required to do
+	 * a random read is reasonably small, so we limit the size of
+	 * a compressed extent to 128k.
+	 */
+	total_compressed = min(total_compressed, max_uncompressed);
+	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
+	num_bytes = max(blocksize,  num_bytes);
+	disk_num_bytes = num_bytes;
+	total_in = 0;
+	ret = 0;
+
+	/*
+	 * we do compression for mount -o compress and when the
+	 * inode has not been flagged as nocompress.  This flag can
+	 * change at any time if we discover bad compression ratios.
+	 */
+	if (!btrfs_test_flag(inode, NOCOMPRESS) &&
+	    btrfs_test_opt(root, COMPRESS)) {
+		WARN_ON(pages);
+		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
+
+		ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
+						total_compressed, pages,
+						nr_pages, &nr_pages_ret,
+						&total_in,
+						&total_compressed,
+						max_compressed);
+
+		if (!ret) {
+			unsigned long offset = total_compressed &
+				(PAGE_CACHE_SIZE - 1);
+			struct page *page = pages[nr_pages_ret - 1];
+			char *kaddr;
+
+			/* zero the tail end of the last page, we might be
+			 * sending it down to disk
+			 */
+			if (offset) {
+				kaddr = kmap_atomic(page, KM_USER0);
+				memset(kaddr + offset, 0,
+				       PAGE_CACHE_SIZE - offset);
+				kunmap_atomic(kaddr, KM_USER0);
+			}
+			will_compress = 1;
+		}
+	}
+	if (start == 0) {
+		trans = btrfs_join_transaction(root, 1);
+		BUG_ON(!trans);
+		btrfs_set_trans_block_group(trans, inode);
+
+		/* lets try to make an inline extent */
+		if (ret || total_in < (actual_end - start)) {
+			/* we didn't compress the entire range, try
+			 * to make an uncompressed inline extent.
+			 */
+			ret = cow_file_range_inline(trans, root, inode,
+						    start, end, 0, NULL);
+		} else {
+			/* try making a compressed inline extent */
+			ret = cow_file_range_inline(trans, root, inode,
+						    start, end,
+						    total_compressed, pages);
+		}
+		btrfs_end_transaction(trans, root);
+		if (ret == 0) {
+			/*
+			 * inline extent creation worked, we don't need
+			 * to create any more async work items.  Unlock
+			 * and free up our temp pages.
+			 */
+			extent_clear_unlock_delalloc(inode,
+						     &BTRFS_I(inode)->io_tree,
+						     start, end, NULL, 1, 0,
+						     0, 1, 1, 1);
+			ret = 0;
+			goto free_pages_out;
+		}
+	}
+
+	if (will_compress) {
+		/*
+		 * we aren't doing an inline extent round the compressed size
+		 * up to a block size boundary so the allocator does sane
+		 * things
+		 */
+		total_compressed = (total_compressed + blocksize - 1) &
+			~(blocksize - 1);
+
+		/*
+		 * one last check to make sure the compression is really a
+		 * win, compare the page count read with the blocks on disk
+		 */
+		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
+			~(PAGE_CACHE_SIZE - 1);
+		if (total_compressed >= total_in) {
+			will_compress = 0;
+		} else {
+			disk_num_bytes = total_compressed;
+			num_bytes = total_in;
+		}
+	}
+	if (!will_compress && pages) {
+		/*
+		 * the compression code ran but failed to make things smaller,
+		 * free any pages it allocated and our page pointer array
+		 */
+		for (i = 0; i < nr_pages_ret; i++) {
+			WARN_ON(pages[i]->mapping);
+			page_cache_release(pages[i]);
+		}
+		kfree(pages);
+		pages = NULL;
+		total_compressed = 0;
+		nr_pages_ret = 0;
+
+		/* flag the file so we don't compress in the future */
+		btrfs_set_flag(inode, NOCOMPRESS);
+	}
+	if (will_compress) {
+		*num_added += 1;
+
+		/* the async work queues will take care of doing actual
+		 * allocation on disk for these compressed pages,
+		 * and will submit them to the elevator.
+		 */
+		add_async_extent(async_cow, start, num_bytes,
+				 total_compressed, pages, nr_pages_ret);
+
+		if (start + num_bytes < end && start + num_bytes < actual_end) {
+			start += num_bytes;
+			pages = NULL;
+			cond_resched();
+			goto again;
+		}
+	} else {
+		/*
+		 * No compression, but we still need to write the pages in
+		 * the file we've been given so far.  redirty the locked
+		 * page if it corresponds to our extent and set things up
+		 * for the async work queue to run cow_file_range to do
+		 * the normal delalloc dance
+		 */
+		if (page_offset(locked_page) >= start &&
+		    page_offset(locked_page) <= end) {
+			__set_page_dirty_nobuffers(locked_page);
+			/* unlocked later on in the async handlers */
+		}
+		add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
+		*num_added += 1;
+	}
+
+out:
+	return 0;
+
+free_pages_out:
+	for (i = 0; i < nr_pages_ret; i++) {
+		WARN_ON(pages[i]->mapping);
+		page_cache_release(pages[i]);
+	}
+	kfree(pages);
+
+	goto out;
+}
+
+/*
+ * phase two of compressed writeback.  This is the ordered portion
+ * of the code, which only gets called in the order the work was
+ * queued.  We walk all the async extents created by compress_file_range
+ * and send them down to the disk.
+ */
+static noinline int submit_compressed_extents(struct inode *inode,
+					      struct async_cow *async_cow)
+{
+	struct async_extent *async_extent;
+	u64 alloc_hint = 0;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_key ins;
+	struct extent_map *em;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+	struct extent_io_tree *io_tree;
+	int ret;
+
+	if (list_empty(&async_cow->extents))
+		return 0;
+
+	trans = btrfs_join_transaction(root, 1);
+
+	while (!list_empty(&async_cow->extents)) {
+		async_extent = list_entry(async_cow->extents.next,
+					  struct async_extent, list);
+		list_del(&async_extent->list);
+
+		io_tree = &BTRFS_I(inode)->io_tree;
+
+		/* did the compression code fall back to uncompressed IO? */
+		if (!async_extent->pages) {
+			int page_started = 0;
+			unsigned long nr_written = 0;
+
+			lock_extent(io_tree, async_extent->start,
+				    async_extent->start +
+				    async_extent->ram_size - 1, GFP_NOFS);
+
+			/* allocate blocks */
+			cow_file_range(inode, async_cow->locked_page,
+				       async_extent->start,
+				       async_extent->start +
+				       async_extent->ram_size - 1,
+				       &page_started, &nr_written, 0);
+
+			/*
+			 * if page_started, cow_file_range inserted an
+			 * inline extent and took care of all the unlocking
+			 * and IO for us.  Otherwise, we need to submit
+			 * all those pages down to the drive.
+			 */
+			if (!page_started)
+				extent_write_locked_range(io_tree,
+						  inode, async_extent->start,
+						  async_extent->start +
+						  async_extent->ram_size - 1,
+						  btrfs_get_extent,
+						  WB_SYNC_ALL);
+			kfree(async_extent);
+			cond_resched();
+			continue;
+		}
+
+		lock_extent(io_tree, async_extent->start,
+			    async_extent->start + async_extent->ram_size - 1,
+			    GFP_NOFS);
+		/*
+		 * here we're doing allocation and writeback of the
+		 * compressed pages
+		 */
+		btrfs_drop_extent_cache(inode, async_extent->start,
+					async_extent->start +
+					async_extent->ram_size - 1, 0);
+
+		ret = btrfs_reserve_extent(trans, root,
+					   async_extent->compressed_size,
+					   async_extent->compressed_size,
+					   0, alloc_hint,
+					   (u64)-1, &ins, 1);
+		BUG_ON(ret);
+		em = alloc_extent_map(GFP_NOFS);
+		em->start = async_extent->start;
+		em->len = async_extent->ram_size;
+		em->orig_start = em->start;
+
+		em->block_start = ins.objectid;
+		em->block_len = ins.offset;
+		em->bdev = root->fs_info->fs_devices->latest_bdev;
+		set_bit(EXTENT_FLAG_PINNED, &em->flags);
+		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+
+		while (1) {
+			spin_lock(&em_tree->lock);
+			ret = add_extent_mapping(em_tree, em);
+			spin_unlock(&em_tree->lock);
+			if (ret != -EEXIST) {
+				free_extent_map(em);
+				break;
+			}
+			btrfs_drop_extent_cache(inode, async_extent->start,
+						async_extent->start +
+						async_extent->ram_size - 1, 0);
+		}
+
+		ret = btrfs_add_ordered_extent(inode, async_extent->start,
+					       ins.objectid,
+					       async_extent->ram_size,
+					       ins.offset,
+					       BTRFS_ORDERED_COMPRESSED);
+		BUG_ON(ret);
+
+		btrfs_end_transaction(trans, root);
+
+		/*
+		 * clear dirty, set writeback and unlock the pages.
+		 */
+		extent_clear_unlock_delalloc(inode,
+					     &BTRFS_I(inode)->io_tree,
+					     async_extent->start,
+					     async_extent->start +
+					     async_extent->ram_size - 1,
+					     NULL, 1, 1, 0, 1, 1, 0);
+
+		ret = btrfs_submit_compressed_write(inode,
+				    async_extent->start,
+				    async_extent->ram_size,
+				    ins.objectid,
+				    ins.offset, async_extent->pages,
+				    async_extent->nr_pages);
+
+		BUG_ON(ret);
+		trans = btrfs_join_transaction(root, 1);
+		alloc_hint = ins.objectid + ins.offset;
+		kfree(async_extent);
+		cond_resched();
+	}
+
+	btrfs_end_transaction(trans, root);
+	return 0;
+}
+
+/*
+ * when extent_io.c finds a delayed allocation range in the file,
+ * the call backs end up in this code.  The basic idea is to
+ * allocate extents on disk for the range, and create ordered data structs
+ * in ram to track those extents.
+ *
+ * locked_page is the page that writepage had locked already.  We use
+ * it to make sure we don't do extra locks or unlocks.
+ *
+ * *page_started is set to one if we unlock locked_page and do everything
+ * required to start IO on it.  It may be clean and already done with
+ * IO when we return.
+ */
+static noinline int cow_file_range(struct inode *inode,
+				   struct page *locked_page,
+				   u64 start, u64 end, int *page_started,
+				   unsigned long *nr_written,
+				   int unlock)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_trans_handle *trans;
+	u64 alloc_hint = 0;
+	u64 num_bytes;
+	unsigned long ram_size;
+	u64 disk_num_bytes;
+	u64 cur_alloc_size;
+	u64 blocksize = root->sectorsize;
+	u64 actual_end;
+	u64 isize = i_size_read(inode);
+	struct btrfs_key ins;
+	struct extent_map *em;
+	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+	int ret = 0;
+
+	trans = btrfs_join_transaction(root, 1);
+	BUG_ON(!trans);
+	btrfs_set_trans_block_group(trans, inode);
+
+	actual_end = min_t(u64, isize, end + 1);
+
+	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
+	num_bytes = max(blocksize,  num_bytes);
+	disk_num_bytes = num_bytes;
+	ret = 0;
+
+	if (start == 0) {
+		/* lets try to make an inline extent */
+		ret = cow_file_range_inline(trans, root, inode,
+					    start, end, 0, NULL);
+		if (ret == 0) {
+			extent_clear_unlock_delalloc(inode,
+						     &BTRFS_I(inode)->io_tree,
+						     start, end, NULL, 1, 1,
+						     1, 1, 1, 1);
+			*nr_written = *nr_written +
+			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
+			*page_started = 1;
+			ret = 0;
+			goto out;
+		}
+	}
+
+	BUG_ON(disk_num_bytes >
+	       btrfs_super_total_bytes(&root->fs_info->super_copy));
+
+	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
+
+	while (disk_num_bytes > 0) {
+		cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
+		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
+					   root->sectorsize, 0, alloc_hint,
+					   (u64)-1, &ins, 1);
+		BUG_ON(ret);
+
+		em = alloc_extent_map(GFP_NOFS);
+		em->start = start;
+		em->orig_start = em->start;
+
+		ram_size = ins.offset;
+		em->len = ins.offset;
+
+		em->block_start = ins.objectid;
+		em->block_len = ins.offset;
+		em->bdev = root->fs_info->fs_devices->latest_bdev;
+		set_bit(EXTENT_FLAG_PINNED, &em->flags);
+
+		while (1) {
+			spin_lock(&em_tree->lock);
+			ret = add_extent_mapping(em_tree, em);
+			spin_unlock(&em_tree->lock);
+			if (ret != -EEXIST) {
+				free_extent_map(em);
+				break;
+			}
+			btrfs_drop_extent_cache(inode, start,
+						start + ram_size - 1, 0);
+		}
+
+		cur_alloc_size = ins.offset;
+		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
+					       ram_size, cur_alloc_size, 0);
+		BUG_ON(ret);
+
+		if (root->root_key.objectid ==
+		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
+			ret = btrfs_reloc_clone_csums(inode, start,
+						      cur_alloc_size);
+			BUG_ON(ret);
+		}
+
+		if (disk_num_bytes < cur_alloc_size)
+			break;
+
+		/* we're not doing compressed IO, don't unlock the first
+		 * page (which the caller expects to stay locked), don't
+		 * clear any dirty bits and don't set any writeback bits
+		 */
+		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
+					     start, start + ram_size - 1,
+					     locked_page, unlock, 1,
+					     1, 0, 0, 0);
+		disk_num_bytes -= cur_alloc_size;
+		num_bytes -= cur_alloc_size;
+		alloc_hint = ins.objectid + ins.offset;
+		start += cur_alloc_size;
+	}
+out:
+	ret = 0;
+	btrfs_end_transaction(trans, root);
+
+	return ret;
+}
+
+/*
+ * work queue call back to started compression on a file and pages
+ */
+static noinline void async_cow_start(struct btrfs_work *work)
+{
+	struct async_cow *async_cow;
+	int num_added = 0;
+	async_cow = container_of(work, struct async_cow, work);
+
+	compress_file_range(async_cow->inode, async_cow->locked_page,
+			    async_cow->start, async_cow->end, async_cow,
+			    &num_added);
+	if (num_added == 0)
+		async_cow->inode = NULL;
+}
+
+/*
+ * work queue call back to submit previously compressed pages
+ */
+static noinline void async_cow_submit(struct btrfs_work *work)
+{
+	struct async_cow *async_cow;
+	struct btrfs_root *root;
+	unsigned long nr_pages;
+
+	async_cow = container_of(work, struct async_cow, work);
+
+	root = async_cow->root;
+	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
+		PAGE_CACHE_SHIFT;
+
+	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
+
+	if (atomic_read(&root->fs_info->async_delalloc_pages) <
+	    5 * 1042 * 1024 &&
+	    waitqueue_active(&root->fs_info->async_submit_wait))
+		wake_up(&root->fs_info->async_submit_wait);
+
+	if (async_cow->inode)
+		submit_compressed_extents(async_cow->inode, async_cow);
+}
+
+static noinline void async_cow_free(struct btrfs_work *work)
+{
+	struct async_cow *async_cow;
+	async_cow = container_of(work, struct async_cow, work);
+	kfree(async_cow);
+}
+
+static int cow_file_range_async(struct inode *inode, struct page *locked_page,
+				u64 start, u64 end, int *page_started,
+				unsigned long *nr_written)
+{
+	struct async_cow *async_cow;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	unsigned long nr_pages;
+	u64 cur_end;
+	int limit = 10 * 1024 * 1042;
+
+	if (!btrfs_test_opt(root, COMPRESS)) {
+		return cow_file_range(inode, locked_page, start, end,
+				      page_started, nr_written, 1);
+	}
+
+	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
+			 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
+	while (start < end) {
+		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
+		async_cow->inode = inode;
+		async_cow->root = root;
+		async_cow->locked_page = locked_page;
+		async_cow->start = start;
+
+		if (btrfs_test_flag(inode, NOCOMPRESS))
+			cur_end = end;
+		else
+			cur_end = min(end, start + 512 * 1024 - 1);
+
+		async_cow->end = cur_end;
+		INIT_LIST_HEAD(&async_cow->extents);
+
+		async_cow->work.func = async_cow_start;
+		async_cow->work.ordered_func = async_cow_submit;
+		async_cow->work.ordered_free = async_cow_free;
+		async_cow->work.flags = 0;
+
+		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
+			PAGE_CACHE_SHIFT;
+		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
+
+		btrfs_queue_worker(&root->fs_info->delalloc_workers,
+				   &async_cow->work);
+
+		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
+			wait_event(root->fs_info->async_submit_wait,
+			   (atomic_read(&root->fs_info->async_delalloc_pages) <
+			    limit));
+		}
+
+		while (atomic_read(&root->fs_info->async_submit_draining) &&
+		      atomic_read(&root->fs_info->async_delalloc_pages)) {
+			wait_event(root->fs_info->async_submit_wait,
+			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
+			   0));
+		}
+
+		*nr_written += nr_pages;
+		start = cur_end + 1;
+	}
+	*page_started = 1;
+	return 0;
+}
+
+static noinline int csum_exist_in_range(struct btrfs_root *root,
+					u64 bytenr, u64 num_bytes)
+{
+	int ret;
+	struct btrfs_ordered_sum *sums;
+	LIST_HEAD(list);
+
+	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
+				       bytenr + num_bytes - 1, &list);
+	if (ret == 0 && list_empty(&list))
+		return 0;
+
+	while (!list_empty(&list)) {
+		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
+		list_del(&sums->list);
+		kfree(sums);
+	}
+	return 1;
+}
+
+/*
+ * when nowcow writeback call back.  This checks for snapshots or COW copies
+ * of the extents that exist in the file, and COWs the file as required.
+ *
+ * If no cow copies or snapshots exist, we write directly to the existing
+ * blocks on disk
+ */
+static int run_delalloc_nocow(struct inode *inode, struct page *locked_page,
+			      u64 start, u64 end, int *page_started, int force,
+			      unsigned long *nr_written)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_trans_handle *trans;
+	struct extent_buffer *leaf;
+	struct btrfs_path *path;
+	struct btrfs_file_extent_item *fi;
+	struct btrfs_key found_key;
+	u64 cow_start;
+	u64 cur_offset;
+	u64 extent_end;
+	u64 disk_bytenr;
+	u64 num_bytes;
+	int extent_type;
+	int ret;
+	int type;
+	int nocow;
+	int check_prev = 1;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	trans = btrfs_join_transaction(root, 1);
+	BUG_ON(!trans);
+
+	cow_start = (u64)-1;
+	cur_offset = start;
+	while (1) {
+		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+					       cur_offset, 0);
+		BUG_ON(ret < 0);
+		if (ret > 0 && path->slots[0] > 0 && check_prev) {
+			leaf = path->nodes[0];
+			btrfs_item_key_to_cpu(leaf, &found_key,
+					      path->slots[0] - 1);
+			if (found_key.objectid == inode->i_ino &&
+			    found_key.type == BTRFS_EXTENT_DATA_KEY)
+				path->slots[0]--;
+		}
+		check_prev = 0;
+next_slot:
+		leaf = path->nodes[0];
+		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0)
+				BUG_ON(1);
+			if (ret > 0)
+				break;
+			leaf = path->nodes[0];
+		}
+
+		nocow = 0;
+		disk_bytenr = 0;
+		num_bytes = 0;
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+		if (found_key.objectid > inode->i_ino ||
+		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
+		    found_key.offset > end)
+			break;
+
+		if (found_key.offset > cur_offset) {
+			extent_end = found_key.offset;
+			goto out_check;
+		}
+
+		fi = btrfs_item_ptr(leaf, path->slots[0],
+				    struct btrfs_file_extent_item);
+		extent_type = btrfs_file_extent_type(leaf, fi);
+
+		if (extent_type == BTRFS_FILE_EXTENT_REG ||
+		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
+			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+			extent_end = found_key.offset +
+				btrfs_file_extent_num_bytes(leaf, fi);
+			if (extent_end <= start) {
+				path->slots[0]++;
+				goto next_slot;
+			}
+			if (disk_bytenr == 0)
+				goto out_check;
+			if (btrfs_file_extent_compression(leaf, fi) ||
+			    btrfs_file_extent_encryption(leaf, fi) ||
+			    btrfs_file_extent_other_encoding(leaf, fi))
+				goto out_check;
+			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
+				goto out_check;
+			if (btrfs_extent_readonly(root, disk_bytenr))
+				goto out_check;
+			if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
+						  disk_bytenr))
+				goto out_check;
+			disk_bytenr += btrfs_file_extent_offset(leaf, fi);
+			disk_bytenr += cur_offset - found_key.offset;
+			num_bytes = min(end + 1, extent_end) - cur_offset;
+			/*
+			 * force cow if csum exists in the range.
+			 * this ensure that csum for a given extent are
+			 * either valid or do not exist.
+			 */
+			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
+				goto out_check;
+			nocow = 1;
+		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+			extent_end = found_key.offset +
+				btrfs_file_extent_inline_len(leaf, fi);
+			extent_end = ALIGN(extent_end, root->sectorsize);
+		} else {
+			BUG_ON(1);
+		}
+out_check:
+		if (extent_end <= start) {
+			path->slots[0]++;
+			goto next_slot;
+		}
+		if (!nocow) {
+			if (cow_start == (u64)-1)
+				cow_start = cur_offset;
+			cur_offset = extent_end;
+			if (cur_offset > end)
+				break;
+			path->slots[0]++;
+			goto next_slot;
+		}
+
+		btrfs_release_path(root, path);
+		if (cow_start != (u64)-1) {
+			ret = cow_file_range(inode, locked_page, cow_start,
+					found_key.offset - 1, page_started,
+					nr_written, 1);
+			BUG_ON(ret);
+			cow_start = (u64)-1;
+		}
+
+		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
+			struct extent_map *em;
+			struct extent_map_tree *em_tree;
+			em_tree = &BTRFS_I(inode)->extent_tree;
+			em = alloc_extent_map(GFP_NOFS);
+			em->start = cur_offset;
+			em->orig_start = em->start;
+			em->len = num_bytes;
+			em->block_len = num_bytes;
+			em->block_start = disk_bytenr;
+			em->bdev = root->fs_info->fs_devices->latest_bdev;
+			set_bit(EXTENT_FLAG_PINNED, &em->flags);
+			while (1) {
+				spin_lock(&em_tree->lock);
+				ret = add_extent_mapping(em_tree, em);
+				spin_unlock(&em_tree->lock);
+				if (ret != -EEXIST) {
+					free_extent_map(em);
+					break;
+				}
+				btrfs_drop_extent_cache(inode, em->start,
+						em->start + em->len - 1, 0);
+			}
+			type = BTRFS_ORDERED_PREALLOC;
+		} else {
+			type = BTRFS_ORDERED_NOCOW;
+		}
+
+		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
+					       num_bytes, num_bytes, type);
+		BUG_ON(ret);
+
+		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
+					cur_offset, cur_offset + num_bytes - 1,
+					locked_page, 1, 1, 1, 0, 0, 0);
+		cur_offset = extent_end;
+		if (cur_offset > end)
+			break;
+	}
+	btrfs_release_path(root, path);
+
+	if (cur_offset <= end && cow_start == (u64)-1)
+		cow_start = cur_offset;
+	if (cow_start != (u64)-1) {
+		ret = cow_file_range(inode, locked_page, cow_start, end,
+				     page_started, nr_written, 1);
+		BUG_ON(ret);
+	}
+
+	ret = btrfs_end_transaction(trans, root);
+	BUG_ON(ret);
+	btrfs_free_path(path);
+	return 0;
+}
+
+/*
+ * extent_io.c call back to do delayed allocation processing
+ */
+static int run_delalloc_range(struct inode *inode, struct page *locked_page,
+			      u64 start, u64 end, int *page_started,
+			      unsigned long *nr_written)
+{
+	int ret;
+
+	if (btrfs_test_flag(inode, NODATACOW))
+		ret = run_delalloc_nocow(inode, locked_page, start, end,
+					 page_started, 1, nr_written);
+	else if (btrfs_test_flag(inode, PREALLOC))
+		ret = run_delalloc_nocow(inode, locked_page, start, end,
+					 page_started, 0, nr_written);
+	else
+		ret = cow_file_range_async(inode, locked_page, start, end,
+					   page_started, nr_written);
+
+	return ret;
+}
+
+/*
+ * extent_io.c set_bit_hook, used to track delayed allocation
+ * bytes in this file, and to maintain the list of inodes that
+ * have pending delalloc work to be done.
+ */
+static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
+		       unsigned long old, unsigned long bits)
+{
+	/*
+	 * set_bit and clear bit hooks normally require _irqsave/restore
+	 * but in this case, we are only testeing for the DELALLOC
+	 * bit, which is only set or cleared with irqs on
+	 */
+	if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
+		struct btrfs_root *root = BTRFS_I(inode)->root;
+		spin_lock(&root->fs_info->delalloc_lock);
+		BTRFS_I(inode)->delalloc_bytes += end - start + 1;
+		root->fs_info->delalloc_bytes += end - start + 1;
+		if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
+			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
+				      &root->fs_info->delalloc_inodes);
+		}
+		spin_unlock(&root->fs_info->delalloc_lock);
+	}
+	return 0;
+}
+
+/*
+ * extent_io.c clear_bit_hook, see set_bit_hook for why
+ */
+static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
+			 unsigned long old, unsigned long bits)
+{
+	/*
+	 * set_bit and clear bit hooks normally require _irqsave/restore
+	 * but in this case, we are only testeing for the DELALLOC
+	 * bit, which is only set or cleared with irqs on
+	 */
+	if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
+		struct btrfs_root *root = BTRFS_I(inode)->root;
+
+		spin_lock(&root->fs_info->delalloc_lock);
+		if (end - start + 1 > root->fs_info->delalloc_bytes) {
+			printk(KERN_INFO "btrfs warning: delalloc account "
+			       "%llu %llu\n",
+			       (unsigned long long)end - start + 1,
+			       (unsigned long long)
+			       root->fs_info->delalloc_bytes);
+			root->fs_info->delalloc_bytes = 0;
+			BTRFS_I(inode)->delalloc_bytes = 0;
+		} else {
+			root->fs_info->delalloc_bytes -= end - start + 1;
+			BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
+		}
+		if (BTRFS_I(inode)->delalloc_bytes == 0 &&
+		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
+			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
+		}
+		spin_unlock(&root->fs_info->delalloc_lock);
+	}
+	return 0;
+}
+
+/*
+ * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
+ * we don't create bios that span stripes or chunks
+ */
+int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
+			 size_t size, struct bio *bio,
+			 unsigned long bio_flags)
+{
+	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
+	struct btrfs_mapping_tree *map_tree;
+	u64 logical = (u64)bio->bi_sector << 9;
+	u64 length = 0;
+	u64 map_length;
+	int ret;
+
+	if (bio_flags & EXTENT_BIO_COMPRESSED)
+		return 0;
+
+	length = bio->bi_size;
+	map_tree = &root->fs_info->mapping_tree;
+	map_length = length;
+	ret = btrfs_map_block(map_tree, READ, logical,
+			      &map_length, NULL, 0);
+
+	if (map_length < length + size)
+		return 1;
+	return 0;
+}
+
+/*
+ * in order to insert checksums into the metadata in large chunks,
+ * we wait until bio submission time.   All the pages in the bio are
+ * checksummed and sums are attached onto the ordered extent record.
+ *
+ * At IO completion time the cums attached on the ordered extent record
+ * are inserted into the btree
+ */
+static int __btrfs_submit_bio_start(struct inode *inode, int rw,
+				    struct bio *bio, int mirror_num,
+				    unsigned long bio_flags)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret = 0;
+
+	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
+	BUG_ON(ret);
+	return 0;
+}
+
+/*
+ * in order to insert checksums into the metadata in large chunks,
+ * we wait until bio submission time.   All the pages in the bio are
+ * checksummed and sums are attached onto the ordered extent record.
+ *
+ * At IO completion time the cums attached on the ordered extent record
+ * are inserted into the btree
+ */
+static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
+			  int mirror_num, unsigned long bio_flags)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
+}
+
+/*
+ * extent_io.c submission hook. This does the right thing for csum calculation
+ * on write, or reading the csums from the tree before a read
+ */
+static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+			  int mirror_num, unsigned long bio_flags)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret = 0;
+	int skip_sum;
+
+	skip_sum = btrfs_test_flag(inode, NODATASUM);
+
+	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+	BUG_ON(ret);
+
+	if (!(rw & (1 << BIO_RW))) {
+		if (bio_flags & EXTENT_BIO_COMPRESSED) {
+			return btrfs_submit_compressed_read(inode, bio,
+						    mirror_num, bio_flags);
+		} else if (!skip_sum)
+			btrfs_lookup_bio_sums(root, inode, bio, NULL);
+		goto mapit;
+	} else if (!skip_sum) {
+		/* csum items have already been cloned */
+		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
+			goto mapit;
+		/* we're doing a write, do the async checksumming */
+		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
+				   inode, rw, bio, mirror_num,
+				   bio_flags, __btrfs_submit_bio_start,
+				   __btrfs_submit_bio_done);
+	}
+
+mapit:
+	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
+}
+
+/*
+ * given a list of ordered sums record them in the inode.  This happens
+ * at IO completion time based on sums calculated at bio submission time.
+ */
+static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
+			     struct inode *inode, u64 file_offset,
+			     struct list_head *list)
+{
+	struct list_head *cur;
+	struct btrfs_ordered_sum *sum;
+
+	btrfs_set_trans_block_group(trans, inode);
+	list_for_each(cur, list) {
+		sum = list_entry(cur, struct btrfs_ordered_sum, list);
+		btrfs_csum_file_blocks(trans,
+		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
+	}
+	return 0;
+}
+
+int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
+{
+	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
+		WARN_ON(1);
+	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
+				   GFP_NOFS);
+}
+
+/* see btrfs_writepage_start_hook for details on why this is required */
+struct btrfs_writepage_fixup {
+	struct page *page;
+	struct btrfs_work work;
+};
+
+static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
+{
+	struct btrfs_writepage_fixup *fixup;
+	struct btrfs_ordered_extent *ordered;
+	struct page *page;
+	struct inode *inode;
+	u64 page_start;
+	u64 page_end;
+
+	fixup = container_of(work, struct btrfs_writepage_fixup, work);
+	page = fixup->page;
+again:
+	lock_page(page);
+	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
+		ClearPageChecked(page);
+		goto out_page;
+	}
+
+	inode = page->mapping->host;
+	page_start = page_offset(page);
+	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
+
+	lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
+
+	/* already ordered? We're done */
+	if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
+			     EXTENT_ORDERED, 0)) {
+		goto out;
+	}
+
+	ordered = btrfs_lookup_ordered_extent(inode, page_start);
+	if (ordered) {
+		unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
+			      page_end, GFP_NOFS);
+		unlock_page(page);
+		btrfs_start_ordered_extent(inode, ordered, 1);
+		goto again;
+	}
+
+	btrfs_set_extent_delalloc(inode, page_start, page_end);
+	ClearPageChecked(page);
+out:
+	unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
+out_page:
+	unlock_page(page);
+	page_cache_release(page);
+}
+
+/*
+ * There are a few paths in the higher layers of the kernel that directly
+ * set the page dirty bit without asking the filesystem if it is a
+ * good idea.  This causes problems because we want to make sure COW
+ * properly happens and the data=ordered rules are followed.
+ *
+ * In our case any range that doesn't have the ORDERED bit set
+ * hasn't been properly setup for IO.  We kick off an async process
+ * to fix it up.  The async helper will wait for ordered extents, set
+ * the delalloc bit and make it safe to write the page.
+ */
+static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
+{
+	struct inode *inode = page->mapping->host;
+	struct btrfs_writepage_fixup *fixup;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret;
+
+	ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
+			     EXTENT_ORDERED, 0);
+	if (ret)
+		return 0;
+
+	if (PageChecked(page))
+		return -EAGAIN;
+
+	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
+	if (!fixup)
+		return -EAGAIN;
+
+	SetPageChecked(page);
+	page_cache_get(page);
+	fixup->work.func = btrfs_writepage_fixup_worker;
+	fixup->page = page;
+	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
+	return -EAGAIN;
+}
+
+static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
+				       struct inode *inode, u64 file_pos,
+				       u64 disk_bytenr, u64 disk_num_bytes,
+				       u64 num_bytes, u64 ram_bytes,
+				       u8 compression, u8 encryption,
+				       u16 other_encoding, int extent_type)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_file_extent_item *fi;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	struct btrfs_key ins;
+	u64 hint;
+	int ret;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	ret = btrfs_drop_extents(trans, root, inode, file_pos,
+				 file_pos + num_bytes, file_pos, &hint);
+	BUG_ON(ret);
+
+	ins.objectid = inode->i_ino;
+	ins.offset = file_pos;
+	ins.type = BTRFS_EXTENT_DATA_KEY;
+	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
+	BUG_ON(ret);
+	leaf = path->nodes[0];
+	fi = btrfs_item_ptr(leaf, path->slots[0],
+			    struct btrfs_file_extent_item);
+	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
+	btrfs_set_file_extent_type(leaf, fi, extent_type);
+	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
+	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
+	btrfs_set_file_extent_offset(leaf, fi, 0);
+	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
+	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
+	btrfs_set_file_extent_compression(leaf, fi, compression);
+	btrfs_set_file_extent_encryption(leaf, fi, encryption);
+	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
+	btrfs_mark_buffer_dirty(leaf);
+
+	inode_add_bytes(inode, num_bytes);
+	btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
+
+	ins.objectid = disk_bytenr;
+	ins.offset = disk_num_bytes;
+	ins.type = BTRFS_EXTENT_ITEM_KEY;
+	ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
+					  root->root_key.objectid,
+					  trans->transid, inode->i_ino, &ins);
+	BUG_ON(ret);
+
+	btrfs_free_path(path);
+	return 0;
+}
+
+/* as ordered data IO finishes, this gets called so we can finish
+ * an ordered extent if the range of bytes in the file it covers are
+ * fully written.
+ */
+static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_ordered_extent *ordered_extent;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	int compressed = 0;
+	int ret;
+
+	ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
+	if (!ret)
+		return 0;
+
+	trans = btrfs_join_transaction(root, 1);
+
+	ordered_extent = btrfs_lookup_ordered_extent(inode, start);
+	BUG_ON(!ordered_extent);
+	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
+		goto nocow;
+
+	lock_extent(io_tree, ordered_extent->file_offset,
+		    ordered_extent->file_offset + ordered_extent->len - 1,
+		    GFP_NOFS);
+
+	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
+		compressed = 1;
+	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
+		BUG_ON(compressed);
+		ret = btrfs_mark_extent_written(trans, root, inode,
+						ordered_extent->file_offset,
+						ordered_extent->file_offset +
+						ordered_extent->len);
+		BUG_ON(ret);
+	} else {
+		ret = insert_reserved_file_extent(trans, inode,
+						ordered_extent->file_offset,
+						ordered_extent->start,
+						ordered_extent->disk_len,
+						ordered_extent->len,
+						ordered_extent->len,
+						compressed, 0, 0,
+						BTRFS_FILE_EXTENT_REG);
+		BUG_ON(ret);
+	}
+	unlock_extent(io_tree, ordered_extent->file_offset,
+		    ordered_extent->file_offset + ordered_extent->len - 1,
+		    GFP_NOFS);
+nocow:
+	add_pending_csums(trans, inode, ordered_extent->file_offset,
+			  &ordered_extent->list);
+
+	mutex_lock(&BTRFS_I(inode)->extent_mutex);
+	btrfs_ordered_update_i_size(inode, ordered_extent);
+	btrfs_update_inode(trans, root, inode);
+	btrfs_remove_ordered_extent(inode, ordered_extent);
+	mutex_unlock(&BTRFS_I(inode)->extent_mutex);
+
+	/* once for us */
+	btrfs_put_ordered_extent(ordered_extent);
+	/* once for the tree */
+	btrfs_put_ordered_extent(ordered_extent);
+
+	btrfs_end_transaction(trans, root);
+	return 0;
+}
+
+static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
+				struct extent_state *state, int uptodate)
+{
+	return btrfs_finish_ordered_io(page->mapping->host, start, end);
+}
+
+/*
+ * When IO fails, either with EIO or csum verification fails, we
+ * try other mirrors that might have a good copy of the data.  This
+ * io_failure_record is used to record state as we go through all the
+ * mirrors.  If another mirror has good data, the page is set up to date
+ * and things continue.  If a good mirror can't be found, the original
+ * bio end_io callback is called to indicate things have failed.
+ */
+struct io_failure_record {
+	struct page *page;
+	u64 start;
+	u64 len;
+	u64 logical;
+	unsigned long bio_flags;
+	int last_mirror;
+};
+
+static int btrfs_io_failed_hook(struct bio *failed_bio,
+			 struct page *page, u64 start, u64 end,
+			 struct extent_state *state)
+{
+	struct io_failure_record *failrec = NULL;
+	u64 private;
+	struct extent_map *em;
+	struct inode *inode = page->mapping->host;
+	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
+	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+	struct bio *bio;
+	int num_copies;
+	int ret;
+	int rw;
+	u64 logical;
+
+	ret = get_state_private(failure_tree, start, &private);
+	if (ret) {
+		failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
+		if (!failrec)
+			return -ENOMEM;
+		failrec->start = start;
+		failrec->len = end - start + 1;
+		failrec->last_mirror = 0;
+		failrec->bio_flags = 0;
+
+		spin_lock(&em_tree->lock);
+		em = lookup_extent_mapping(em_tree, start, failrec->len);
+		if (em->start > start || em->start + em->len < start) {
+			free_extent_map(em);
+			em = NULL;
+		}
+		spin_unlock(&em_tree->lock);
+
+		if (!em || IS_ERR(em)) {
+			kfree(failrec);
+			return -EIO;
+		}
+		logical = start - em->start;
+		logical = em->block_start + logical;
+		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
+			logical = em->block_start;
+			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
+		}
+		failrec->logical = logical;
+		free_extent_map(em);
+		set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
+				EXTENT_DIRTY, GFP_NOFS);
+		set_state_private(failure_tree, start,
+				 (u64)(unsigned long)failrec);
+	} else {
+		failrec = (struct io_failure_record *)(unsigned long)private;
+	}
+	num_copies = btrfs_num_copies(
+			      &BTRFS_I(inode)->root->fs_info->mapping_tree,
+			      failrec->logical, failrec->len);
+	failrec->last_mirror++;
+	if (!state) {
+		spin_lock(&BTRFS_I(inode)->io_tree.lock);
+		state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
+						    failrec->start,
+						    EXTENT_LOCKED);
+		if (state && state->start != failrec->start)
+			state = NULL;
+		spin_unlock(&BTRFS_I(inode)->io_tree.lock);
+	}
+	if (!state || failrec->last_mirror > num_copies) {
+		set_state_private(failure_tree, failrec->start, 0);
+		clear_extent_bits(failure_tree, failrec->start,
+				  failrec->start + failrec->len - 1,
+				  EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
+		kfree(failrec);
+		return -EIO;
+	}
+	bio = bio_alloc(GFP_NOFS, 1);
+	bio->bi_private = state;
+	bio->bi_end_io = failed_bio->bi_end_io;
+	bio->bi_sector = failrec->logical >> 9;
+	bio->bi_bdev = failed_bio->bi_bdev;
+	bio->bi_size = 0;
+
+	bio_add_page(bio, page, failrec->len, start - page_offset(page));
+	if (failed_bio->bi_rw & (1 << BIO_RW))
+		rw = WRITE;
+	else
+		rw = READ;
+
+	BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
+						      failrec->last_mirror,
+						      failrec->bio_flags);
+	return 0;
+}
+
+/*
+ * each time an IO finishes, we do a fast check in the IO failure tree
+ * to see if we need to process or clean up an io_failure_record
+ */
+static int btrfs_clean_io_failures(struct inode *inode, u64 start)
+{
+	u64 private;
+	u64 private_failure;
+	struct io_failure_record *failure;
+	int ret;
+
+	private = 0;
+	if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
+			     (u64)-1, 1, EXTENT_DIRTY)) {
+		ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
+					start, &private_failure);
+		if (ret == 0) {
+			failure = (struct io_failure_record *)(unsigned long)
+				   private_failure;
+			set_state_private(&BTRFS_I(inode)->io_failure_tree,
+					  failure->start, 0);
+			clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
+					  failure->start,
+					  failure->start + failure->len - 1,
+					  EXTENT_DIRTY | EXTENT_LOCKED,
+					  GFP_NOFS);
+			kfree(failure);
+		}
+	}
+	return 0;
+}
+
+/*
+ * when reads are done, we need to check csums to verify the data is correct
+ * if there's a match, we allow the bio to finish.  If not, we go through
+ * the io_failure_record routines to find good copies
+ */
+static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
+			       struct extent_state *state)
+{
+	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
+	struct inode *inode = page->mapping->host;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	char *kaddr;
+	u64 private = ~(u32)0;
+	int ret;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	u32 csum = ~(u32)0;
+
+	if (PageChecked(page)) {
+		ClearPageChecked(page);
+		goto good;
+	}
+	if (btrfs_test_flag(inode, NODATASUM))
+		return 0;
+
+	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
+	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
+		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
+				  GFP_NOFS);
+		return 0;
+	}
+
+	if (state && state->start == start) {
+		private = state->private;
+		ret = 0;
+	} else {
+		ret = get_state_private(io_tree, start, &private);
+	}
+	kaddr = kmap_atomic(page, KM_USER0);
+	if (ret)
+		goto zeroit;
+
+	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
+	btrfs_csum_final(csum, (char *)&csum);
+	if (csum != private)
+		goto zeroit;
+
+	kunmap_atomic(kaddr, KM_USER0);
+good:
+	/* if the io failure tree for this inode is non-empty,
+	 * check to see if we've recovered from a failed IO
+	 */
+	btrfs_clean_io_failures(inode, start);
+	return 0;
+
+zeroit:
+	printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
+	       "private %llu\n", page->mapping->host->i_ino,
+	       (unsigned long long)start, csum,
+	       (unsigned long long)private);
+	memset(kaddr + offset, 1, end - start + 1);
+	flush_dcache_page(page);
+	kunmap_atomic(kaddr, KM_USER0);
+	if (private == 0)
+		return 0;
+	return -EIO;
+}
+
+/*
+ * This creates an orphan entry for the given inode in case something goes
+ * wrong in the middle of an unlink/truncate.
+ */
+int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret = 0;
+
+	spin_lock(&root->list_lock);
+
+	/* already on the orphan list, we're good */
+	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
+		spin_unlock(&root->list_lock);
+		return 0;
+	}
+
+	list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
+
+	spin_unlock(&root->list_lock);
+
+	/*
+	 * insert an orphan item to track this unlinked/truncated file
+	 */
+	ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
+
+	return ret;
+}
+
+/*
+ * We have done the truncate/delete so we can go ahead and remove the orphan
+ * item for this particular inode.
+ */
+int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret = 0;
+
+	spin_lock(&root->list_lock);
+
+	if (list_empty(&BTRFS_I(inode)->i_orphan)) {
+		spin_unlock(&root->list_lock);
+		return 0;
+	}
+
+	list_del_init(&BTRFS_I(inode)->i_orphan);
+	if (!trans) {
+		spin_unlock(&root->list_lock);
+		return 0;
+	}
+
+	spin_unlock(&root->list_lock);
+
+	ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
+
+	return ret;
+}
+
+/*
+ * this cleans up any orphans that may be left on the list from the last use
+ * of this root.
+ */
+void btrfs_orphan_cleanup(struct btrfs_root *root)
+{
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	struct btrfs_item *item;
+	struct btrfs_key key, found_key;
+	struct btrfs_trans_handle *trans;
+	struct inode *inode;
+	int ret = 0, nr_unlink = 0, nr_truncate = 0;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return;
+	path->reada = -1;
+
+	key.objectid = BTRFS_ORPHAN_OBJECTID;
+	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
+	key.offset = (u64)-1;
+
+
+	while (1) {
+		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+		if (ret < 0) {
+			printk(KERN_ERR "Error searching slot for orphan: %d"
+			       "\n", ret);
+			break;
+		}
+
+		/*
+		 * if ret == 0 means we found what we were searching for, which
+		 * is weird, but possible, so only screw with path if we didnt
+		 * find the key and see if we have stuff that matches
+		 */
+		if (ret > 0) {
+			if (path->slots[0] == 0)
+				break;
+			path->slots[0]--;
+		}
+
+		/* pull out the item */
+		leaf = path->nodes[0];
+		item = btrfs_item_nr(leaf, path->slots[0]);
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+		/* make sure the item matches what we want */
+		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
+			break;
+		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
+			break;
+
+		/* release the path since we're done with it */
+		btrfs_release_path(root, path);
+
+		/*
+		 * this is where we are basically btrfs_lookup, without the
+		 * crossing root thing.  we store the inode number in the
+		 * offset of the orphan item.
+		 */
+		inode = btrfs_iget_locked(root->fs_info->sb,
+					  found_key.offset, root);
+		if (!inode)
+			break;
+
+		if (inode->i_state & I_NEW) {
+			BTRFS_I(inode)->root = root;
+
+			/* have to set the location manually */
+			BTRFS_I(inode)->location.objectid = inode->i_ino;
+			BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
+			BTRFS_I(inode)->location.offset = 0;
+
+			btrfs_read_locked_inode(inode);
+			unlock_new_inode(inode);
+		}
+
+		/*
+		 * add this inode to the orphan list so btrfs_orphan_del does
+		 * the proper thing when we hit it
+		 */
+		spin_lock(&root->list_lock);
+		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
+		spin_unlock(&root->list_lock);
+
+		/*
+		 * if this is a bad inode, means we actually succeeded in
+		 * removing the inode, but not the orphan record, which means
+		 * we need to manually delete the orphan since iput will just
+		 * do a destroy_inode
+		 */
+		if (is_bad_inode(inode)) {
+			trans = btrfs_start_transaction(root, 1);
+			btrfs_orphan_del(trans, inode);
+			btrfs_end_transaction(trans, root);
+			iput(inode);
+			continue;
+		}
+
+		/* if we have links, this was a truncate, lets do that */
+		if (inode->i_nlink) {
+			nr_truncate++;
+			btrfs_truncate(inode);
+		} else {
+			nr_unlink++;
+		}
+
+		/* this will do delete_inode and everything for us */
+		iput(inode);
+	}
+
+	if (nr_unlink)
+		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
+	if (nr_truncate)
+		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
+
+	btrfs_free_path(path);
+}
+
+/*
+ * read an inode from the btree into the in-memory inode
+ */
+void btrfs_read_locked_inode(struct inode *inode)
+{
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	struct btrfs_inode_item *inode_item;
+	struct btrfs_timespec *tspec;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_key location;
+	u64 alloc_group_block;
+	u32 rdev;
+	int ret;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
+
+	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
+	if (ret)
+		goto make_bad;
+
+	leaf = path->nodes[0];
+	inode_item = btrfs_item_ptr(leaf, path->slots[0],
+				    struct btrfs_inode_item);
+
+	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
+	inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
+	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
+	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
+	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
+
+	tspec = btrfs_inode_atime(inode_item);
+	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
+	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
+
+	tspec = btrfs_inode_mtime(inode_item);
+	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
+	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
+
+	tspec = btrfs_inode_ctime(inode_item);
+	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
+	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
+
+	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
+	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
+	BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
+	inode->i_generation = BTRFS_I(inode)->generation;
+	inode->i_rdev = 0;
+	rdev = btrfs_inode_rdev(leaf, inode_item);
+
+	BTRFS_I(inode)->index_cnt = (u64)-1;
+	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
+
+	alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
+	BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
+						alloc_group_block, 0);
+	btrfs_free_path(path);
+	inode_item = NULL;
+
+	switch (inode->i_mode & S_IFMT) {
+	case S_IFREG:
+		inode->i_mapping->a_ops = &btrfs_aops;
+		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+		inode->i_fop = &btrfs_file_operations;
+		inode->i_op = &btrfs_file_inode_operations;
+		break;
+	case S_IFDIR:
+		inode->i_fop = &btrfs_dir_file_operations;
+		if (root == root->fs_info->tree_root)
+			inode->i_op = &btrfs_dir_ro_inode_operations;
+		else
+			inode->i_op = &btrfs_dir_inode_operations;
+		break;
+	case S_IFLNK:
+		inode->i_op = &btrfs_symlink_inode_operations;
+		inode->i_mapping->a_ops = &btrfs_symlink_aops;
+		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+		break;
+	default:
+		init_special_inode(inode, inode->i_mode, rdev);
+		break;
+	}
+	return;
+
+make_bad:
+	btrfs_free_path(path);
+	make_bad_inode(inode);
+}
+
+/*
+ * given a leaf and an inode, copy the inode fields into the leaf
+ */
+static void fill_inode_item(struct btrfs_trans_handle *trans,
+			    struct extent_buffer *leaf,
+			    struct btrfs_inode_item *item,
+			    struct inode *inode)
+{
+	btrfs_set_inode_uid(leaf, item, inode->i_uid);
+	btrfs_set_inode_gid(leaf, item, inode->i_gid);
+	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
+	btrfs_set_inode_mode(leaf, item, inode->i_mode);
+	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
+
+	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
+			       inode->i_atime.tv_sec);
+	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
+				inode->i_atime.tv_nsec);
+
+	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
+			       inode->i_mtime.tv_sec);
+	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
+				inode->i_mtime.tv_nsec);
+
+	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
+			       inode->i_ctime.tv_sec);
+	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
+				inode->i_ctime.tv_nsec);
+
+	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
+	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
+	btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
+	btrfs_set_inode_transid(leaf, item, trans->transid);
+	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
+	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
+	btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
+}
+
+/*
+ * copy everything in the in-memory inode into the btree.
+ */
+noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
+				struct btrfs_root *root, struct inode *inode)
+{
+	struct btrfs_inode_item *inode_item;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	int ret;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	ret = btrfs_lookup_inode(trans, root, path,
+				 &BTRFS_I(inode)->location, 1);
+	if (ret) {
+		if (ret > 0)
+			ret = -ENOENT;
+		goto failed;
+	}
+
+	leaf = path->nodes[0];
+	inode_item = btrfs_item_ptr(leaf, path->slots[0],
+				  struct btrfs_inode_item);
+
+	fill_inode_item(trans, leaf, inode_item, inode);
+	btrfs_mark_buffer_dirty(leaf);
+	btrfs_set_inode_last_trans(trans, inode);
+	ret = 0;
+failed:
+	btrfs_free_path(path);
+	return ret;
+}
+
+
+/*
+ * unlink helper that gets used here in inode.c and in the tree logging
+ * recovery code.  It remove a link in a directory with a given name, and
+ * also drops the back refs in the inode to the directory
+ */
+int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *root,
+		       struct inode *dir, struct inode *inode,
+		       const char *name, int name_len)
+{
+	struct btrfs_path *path;
+	int ret = 0;
+	struct extent_buffer *leaf;
+	struct btrfs_dir_item *di;
+	struct btrfs_key key;
+	u64 index;
+
+	path = btrfs_alloc_path();
+	if (!path) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+				    name, name_len, -1);
+	if (IS_ERR(di)) {
+		ret = PTR_ERR(di);
+		goto err;
+	}
+	if (!di) {
+		ret = -ENOENT;
+		goto err;
+	}
+	leaf = path->nodes[0];
+	btrfs_dir_item_key_to_cpu(leaf, di, &key);
+	ret = btrfs_delete_one_dir_name(trans, root, path, di);
+	if (ret)
+		goto err;
+	btrfs_release_path(root, path);
+
+	ret = btrfs_del_inode_ref(trans, root, name, name_len,
+				  inode->i_ino,
+				  dir->i_ino, &index);
+	if (ret) {
+		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
+		       "inode %lu parent %lu\n", name_len, name,
+		       inode->i_ino, dir->i_ino);
+		goto err;
+	}
+
+	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
+					 index, name, name_len, -1);
+	if (IS_ERR(di)) {
+		ret = PTR_ERR(di);
+		goto err;
+	}
+	if (!di) {
+		ret = -ENOENT;
+		goto err;
+	}
+	ret = btrfs_delete_one_dir_name(trans, root, path, di);
+	btrfs_release_path(root, path);
+
+	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
+					 inode, dir->i_ino);
+	BUG_ON(ret != 0 && ret != -ENOENT);
+	if (ret != -ENOENT)
+		BTRFS_I(dir)->log_dirty_trans = trans->transid;
+
+	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
+					   dir, index);
+	BUG_ON(ret);
+err:
+	btrfs_free_path(path);
+	if (ret)
+		goto out;
+
+	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
+	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+	btrfs_update_inode(trans, root, dir);
+	btrfs_drop_nlink(inode);
+	ret = btrfs_update_inode(trans, root, inode);
+	dir->i_sb->s_dirt = 1;
+out:
+	return ret;
+}
+
+static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct btrfs_root *root;
+	struct btrfs_trans_handle *trans;
+	struct inode *inode = dentry->d_inode;
+	int ret;
+	unsigned long nr = 0;
+
+	root = BTRFS_I(dir)->root;
+
+	ret = btrfs_check_free_space(root, 1, 1);
+	if (ret)
+		goto fail;
+
+	trans = btrfs_start_transaction(root, 1);
+
+	btrfs_set_trans_block_group(trans, dir);
+	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
+				 dentry->d_name.name, dentry->d_name.len);
+
+	if (inode->i_nlink == 0)
+		ret = btrfs_orphan_add(trans, inode);
+
+	nr = trans->blocks_used;
+
+	btrfs_end_transaction_throttle(trans, root);
+fail:
+	btrfs_btree_balance_dirty(root, nr);
+	return ret;
+}
+
+static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	int err = 0;
+	int ret;
+	struct btrfs_root *root = BTRFS_I(dir)->root;
+	struct btrfs_trans_handle *trans;
+	unsigned long nr = 0;
+
+	/*
+	 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
+	 * the root of a subvolume or snapshot
+	 */
+	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
+	    inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+		return -ENOTEMPTY;
+	}
+
+	ret = btrfs_check_free_space(root, 1, 1);
+	if (ret)
+		goto fail;
+
+	trans = btrfs_start_transaction(root, 1);
+	btrfs_set_trans_block_group(trans, dir);
+
+	err = btrfs_orphan_add(trans, inode);
+	if (err)
+		goto fail_trans;
+
+	/* now the directory is empty */
+	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
+				 dentry->d_name.name, dentry->d_name.len);
+	if (!err)
+		btrfs_i_size_write(inode, 0);
+
+fail_trans:
+	nr = trans->blocks_used;
+	ret = btrfs_end_transaction_throttle(trans, root);
+fail:
+	btrfs_btree_balance_dirty(root, nr);
+
+	if (ret && !err)
+		err = ret;
+	return err;
+}
+
+#if 0
+/*
+ * when truncating bytes in a file, it is possible to avoid reading
+ * the leaves that contain only checksum items.  This can be the
+ * majority of the IO required to delete a large file, but it must
+ * be done carefully.
+ *
+ * The keys in the level just above the leaves are checked to make sure
+ * the lowest key in a given leaf is a csum key, and starts at an offset
+ * after the new  size.
+ *
+ * Then the key for the next leaf is checked to make sure it also has
+ * a checksum item for the same file.  If it does, we know our target leaf
+ * contains only checksum items, and it can be safely freed without reading
+ * it.
+ *
+ * This is just an optimization targeted at large files.  It may do
+ * nothing.  It will return 0 unless things went badly.
+ */
+static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
+				     struct btrfs_root *root,
+				     struct btrfs_path *path,
+				     struct inode *inode, u64 new_size)
+{
+	struct btrfs_key key;
+	int ret;
+	int nritems;
+	struct btrfs_key found_key;
+	struct btrfs_key other_key;
+	struct btrfs_leaf_ref *ref;
+	u64 leaf_gen;
+	u64 leaf_start;
+
+	path->lowest_level = 1;
+	key.objectid = inode->i_ino;
+	key.type = BTRFS_CSUM_ITEM_KEY;
+	key.offset = new_size;
+again:
+	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+	if (ret < 0)
+		goto out;
+
+	if (path->nodes[1] == NULL) {
+		ret = 0;
+		goto out;
+	}
+	ret = 0;
+	btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
+	nritems = btrfs_header_nritems(path->nodes[1]);
+
+	if (!nritems)
+		goto out;
+
+	if (path->slots[1] >= nritems)
+		goto next_node;
+
+	/* did we find a key greater than anything we want to delete? */
+	if (found_key.objectid > inode->i_ino ||
+	   (found_key.objectid == inode->i_ino && found_key.type > key.type))
+		goto out;
+
+	/* we check the next key in the node to make sure the leave contains
+	 * only checksum items.  This comparison doesn't work if our
+	 * leaf is the last one in the node
+	 */
+	if (path->slots[1] + 1 >= nritems) {
+next_node:
+		/* search forward from the last key in the node, this
+		 * will bring us into the next node in the tree
+		 */
+		btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
+
+		/* unlikely, but we inc below, so check to be safe */
+		if (found_key.offset == (u64)-1)
+			goto out;
+
+		/* search_forward needs a path with locks held, do the
+		 * search again for the original key.  It is possible
+		 * this will race with a balance and return a path that
+		 * we could modify, but this drop is just an optimization
+		 * and is allowed to miss some leaves.
+		 */
+		btrfs_release_path(root, path);
+		found_key.offset++;
+
+		/* setup a max key for search_forward */
+		other_key.offset = (u64)-1;
+		other_key.type = key.type;
+		other_key.objectid = key.objectid;
+
+		path->keep_locks = 1;
+		ret = btrfs_search_forward(root, &found_key, &other_key,
+					   path, 0, 0);
+		path->keep_locks = 0;
+		if (ret || found_key.objectid != key.objectid ||
+		    found_key.type != key.type) {
+			ret = 0;
+			goto out;
+		}
+
+		key.offset = found_key.offset;
+		btrfs_release_path(root, path);
+		cond_resched();
+		goto again;
+	}
+
+	/* we know there's one more slot after us in the tree,
+	 * read that key so we can verify it is also a checksum item
+	 */
+	btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
+
+	if (found_key.objectid < inode->i_ino)
+		goto next_key;
+
+	if (found_key.type != key.type || found_key.offset < new_size)
+		goto next_key;
+
+	/*
+	 * if the key for the next leaf isn't a csum key from this objectid,
+	 * we can't be sure there aren't good items inside this leaf.
+	 * Bail out
+	 */
+	if (other_key.objectid != inode->i_ino || other_key.type != key.type)
+		goto out;
+
+	leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
+	leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
+	/*
+	 * it is safe to delete this leaf, it contains only
+	 * csum items from this inode at an offset >= new_size
+	 */
+	ret = btrfs_del_leaf(trans, root, path, leaf_start);
+	BUG_ON(ret);
+
+	if (root->ref_cows && leaf_gen < trans->transid) {
+		ref = btrfs_alloc_leaf_ref(root, 0);
+		if (ref) {
+			ref->root_gen = root->root_key.offset;
+			ref->bytenr = leaf_start;
+			ref->owner = 0;
+			ref->generation = leaf_gen;
+			ref->nritems = 0;
+
+			ret = btrfs_add_leaf_ref(root, ref, 0);
+			WARN_ON(ret);
+			btrfs_free_leaf_ref(root, ref);
+		} else {
+			WARN_ON(1);
+		}
+	}
+next_key:
+	btrfs_release_path(root, path);
+
+	if (other_key.objectid == inode->i_ino &&
+	    other_key.type == key.type && other_key.offset > key.offset) {
+		key.offset = other_key.offset;
+		cond_resched();
+		goto again;
+	}
+	ret = 0;
+out:
+	/* fixup any changes we've made to the path */
+	path->lowest_level = 0;
+	path->keep_locks = 0;
+	btrfs_release_path(root, path);
+	return ret;
+}
+
+#endif
+
+/*
+ * this can truncate away extent items, csum items and directory items.
+ * It starts at a high offset and removes keys until it can't find
+ * any higher than new_size
+ *
+ * csum items that cross the new i_size are truncated to the new size
+ * as well.
+ *
+ * min_type is the minimum key type to truncate down to.  If set to 0, this
+ * will kill all the items on this inode, including the INODE_ITEM_KEY.
+ */
+noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+					struct btrfs_root *root,
+					struct inode *inode,
+					u64 new_size, u32 min_type)
+{
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	u32 found_type;
+	struct extent_buffer *leaf;
+	struct btrfs_file_extent_item *fi;
+	u64 extent_start = 0;
+	u64 extent_num_bytes = 0;
+	u64 item_end = 0;
+	u64 root_gen = 0;
+	u64 root_owner = 0;
+	int found_extent;
+	int del_item;
+	int pending_del_nr = 0;
+	int pending_del_slot = 0;
+	int extent_type = -1;
+	int encoding;
+	u64 mask = root->sectorsize - 1;
+
+	if (root->ref_cows)
+		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
+	path = btrfs_alloc_path();
+	path->reada = -1;
+	BUG_ON(!path);
+
+	/* FIXME, add redo link to tree so we don't leak on crash */
+	key.objectid = inode->i_ino;
+	key.offset = (u64)-1;
+	key.type = (u8)-1;
+
+	btrfs_init_path(path);
+
+search_again:
+	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+	if (ret < 0)
+		goto error;
+
+	if (ret > 0) {
+		/* there are no items in the tree for us to truncate, we're
+		 * done
+		 */
+		if (path->slots[0] == 0) {
+			ret = 0;
+			goto error;
+		}
+		path->slots[0]--;
+	}
+
+	while (1) {
+		fi = NULL;
+		leaf = path->nodes[0];
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+		found_type = btrfs_key_type(&found_key);
+		encoding = 0;
+
+		if (found_key.objectid != inode->i_ino)
+			break;
+
+		if (found_type < min_type)
+			break;
+
+		item_end = found_key.offset;
+		if (found_type == BTRFS_EXTENT_DATA_KEY) {
+			fi = btrfs_item_ptr(leaf, path->slots[0],
+					    struct btrfs_file_extent_item);
+			extent_type = btrfs_file_extent_type(leaf, fi);
+			encoding = btrfs_file_extent_compression(leaf, fi);
+			encoding |= btrfs_file_extent_encryption(leaf, fi);
+			encoding |= btrfs_file_extent_other_encoding(leaf, fi);
+
+			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
+				item_end +=
+				    btrfs_file_extent_num_bytes(leaf, fi);
+			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+				item_end += btrfs_file_extent_inline_len(leaf,
+									 fi);
+			}
+			item_end--;
+		}
+		if (item_end < new_size) {
+			if (found_type == BTRFS_DIR_ITEM_KEY)
+				found_type = BTRFS_INODE_ITEM_KEY;
+			else if (found_type == BTRFS_EXTENT_ITEM_KEY)
+				found_type = BTRFS_EXTENT_DATA_KEY;
+			else if (found_type == BTRFS_EXTENT_DATA_KEY)
+				found_type = BTRFS_XATTR_ITEM_KEY;
+			else if (found_type == BTRFS_XATTR_ITEM_KEY)
+				found_type = BTRFS_INODE_REF_KEY;
+			else if (found_type)
+				found_type--;
+			else
+				break;
+			btrfs_set_key_type(&key, found_type);
+			goto next;
+		}
+		if (found_key.offset >= new_size)
+			del_item = 1;
+		else
+			del_item = 0;
+		found_extent = 0;
+
+		/* FIXME, shrink the extent if the ref count is only 1 */
+		if (found_type != BTRFS_EXTENT_DATA_KEY)
+			goto delete;
+
+		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
+			u64 num_dec;
+			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
+			if (!del_item && !encoding) {
+				u64 orig_num_bytes =
+					btrfs_file_extent_num_bytes(leaf, fi);
+				extent_num_bytes = new_size -
+					found_key.offset + root->sectorsize - 1;
+				extent_num_bytes = extent_num_bytes &
+					~((u64)root->sectorsize - 1);
+				btrfs_set_file_extent_num_bytes(leaf, fi,
+							 extent_num_bytes);
+				num_dec = (orig_num_bytes -
+					   extent_num_bytes);
+				if (root->ref_cows && extent_start != 0)
+					inode_sub_bytes(inode, num_dec);
+				btrfs_mark_buffer_dirty(leaf);
+			} else {
+				extent_num_bytes =
+					btrfs_file_extent_disk_num_bytes(leaf,
+									 fi);
+				/* FIXME blocksize != 4096 */
+				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
+				if (extent_start != 0) {
+					found_extent = 1;
+					if (root->ref_cows)
+						inode_sub_bytes(inode, num_dec);
+				}
+				root_gen = btrfs_header_generation(leaf);
+				root_owner = btrfs_header_owner(leaf);
+			}
+		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+			/*
+			 * we can't truncate inline items that have had
+			 * special encodings
+			 */
+			if (!del_item &&
+			    btrfs_file_extent_compression(leaf, fi) == 0 &&
+			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
+			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
+				u32 size = new_size - found_key.offset;
+
+				if (root->ref_cows) {
+					inode_sub_bytes(inode, item_end + 1 -
+							new_size);
+				}
+				size =
+				    btrfs_file_extent_calc_inline_size(size);
+				ret = btrfs_truncate_item(trans, root, path,
+							  size, 1);
+				BUG_ON(ret);
+			} else if (root->ref_cows) {
+				inode_sub_bytes(inode, item_end + 1 -
+						found_key.offset);
+			}
+		}
+delete:
+		if (del_item) {
+			if (!pending_del_nr) {
+				/* no pending yet, add ourselves */
+				pending_del_slot = path->slots[0];
+				pending_del_nr = 1;
+			} else if (pending_del_nr &&
+				   path->slots[0] + 1 == pending_del_slot) {
+				/* hop on the pending chunk */
+				pending_del_nr++;
+				pending_del_slot = path->slots[0];
+			} else {
+				BUG();
+			}
+		} else {
+			break;
+		}
+		if (found_extent) {
+			ret = btrfs_free_extent(trans, root, extent_start,
+						extent_num_bytes,
+						leaf->start, root_owner,
+						root_gen, inode->i_ino, 0);
+			BUG_ON(ret);
+		}
+next:
+		if (path->slots[0] == 0) {
+			if (pending_del_nr)
+				goto del_pending;
+			btrfs_release_path(root, path);
+			goto search_again;
+		}
+
+		path->slots[0]--;
+		if (pending_del_nr &&
+		    path->slots[0] + 1 != pending_del_slot) {
+			struct btrfs_key debug;
+del_pending:
+			btrfs_item_key_to_cpu(path->nodes[0], &debug,
+					      pending_del_slot);
+			ret = btrfs_del_items(trans, root, path,
+					      pending_del_slot,
+					      pending_del_nr);
+			BUG_ON(ret);
+			pending_del_nr = 0;
+			btrfs_release_path(root, path);
+			goto search_again;
+		}
+	}
+	ret = 0;
+error:
+	if (pending_del_nr) {
+		ret = btrfs_del_items(trans, root, path, pending_del_slot,
+				      pending_del_nr);
+	}
+	btrfs_free_path(path);
+	inode->i_sb->s_dirt = 1;
+	return ret;
+}
+
+/*
+ * taken from block_truncate_page, but does cow as it zeros out
+ * any bytes left in the last page in the file.
+ */
+static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
+{
+	struct inode *inode = mapping->host;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	struct btrfs_ordered_extent *ordered;
+	char *kaddr;
+	u32 blocksize = root->sectorsize;
+	pgoff_t index = from >> PAGE_CACHE_SHIFT;
+	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	struct page *page;
+	int ret = 0;
+	u64 page_start;
+	u64 page_end;
+
+	if ((offset & (blocksize - 1)) == 0)
+		goto out;
+
+	ret = -ENOMEM;
+again:
+	page = grab_cache_page(mapping, index);
+	if (!page)
+		goto out;
+
+	page_start = page_offset(page);
+	page_end = page_start + PAGE_CACHE_SIZE - 1;
+
+	if (!PageUptodate(page)) {
+		ret = btrfs_readpage(NULL, page);
+		lock_page(page);
+		if (page->mapping != mapping) {
+			unlock_page(page);
+			page_cache_release(page);
+			goto again;
+		}
+		if (!PageUptodate(page)) {
+			ret = -EIO;
+			goto out_unlock;
+		}
+	}
+	wait_on_page_writeback(page);
+
+	lock_extent(io_tree, page_start, page_end, GFP_NOFS);
+	set_page_extent_mapped(page);
+
+	ordered = btrfs_lookup_ordered_extent(inode, page_start);
+	if (ordered) {
+		unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+		unlock_page(page);
+		page_cache_release(page);
+		btrfs_start_ordered_extent(inode, ordered, 1);
+		btrfs_put_ordered_extent(ordered);
+		goto again;
+	}
+
+	btrfs_set_extent_delalloc(inode, page_start, page_end);
+	ret = 0;
+	if (offset != PAGE_CACHE_SIZE) {
+		kaddr = kmap(page);
+		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
+		flush_dcache_page(page);
+		kunmap(page);
+	}
+	ClearPageChecked(page);
+	set_page_dirty(page);
+	unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+
+out_unlock:
+	unlock_page(page);
+	page_cache_release(page);
+out:
+	return ret;
+}
+
+int btrfs_cont_expand(struct inode *inode, loff_t size)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	struct extent_map *em;
+	u64 mask = root->sectorsize - 1;
+	u64 hole_start = (inode->i_size + mask) & ~mask;
+	u64 block_end = (size + mask) & ~mask;
+	u64 last_byte;
+	u64 cur_offset;
+	u64 hole_size;
+	int err;
+
+	if (size <= hole_start)
+		return 0;
+
+	err = btrfs_check_free_space(root, 1, 0);
+	if (err)
+		return err;
+
+	btrfs_truncate_page(inode->i_mapping, inode->i_size);
+
+	while (1) {
+		struct btrfs_ordered_extent *ordered;
+		btrfs_wait_ordered_range(inode, hole_start,
+					 block_end - hole_start);
+		lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
+		if (!ordered)
+			break;
+		unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+		btrfs_put_ordered_extent(ordered);
+	}
+
+	trans = btrfs_start_transaction(root, 1);
+	btrfs_set_trans_block_group(trans, inode);
+
+	cur_offset = hole_start;
+	while (1) {
+		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
+				block_end - cur_offset, 0);
+		BUG_ON(IS_ERR(em) || !em);
+		last_byte = min(extent_map_end(em), block_end);
+		last_byte = (last_byte + mask) & ~mask;
+		if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
+			u64 hint_byte = 0;
+			hole_size = last_byte - cur_offset;
+			err = btrfs_drop_extents(trans, root, inode,
+						 cur_offset,
+						 cur_offset + hole_size,
+						 cur_offset, &hint_byte);
+			if (err)
+				break;
+			err = btrfs_insert_file_extent(trans, root,
+					inode->i_ino, cur_offset, 0,
+					0, hole_size, 0, hole_size,
+					0, 0, 0);
+			btrfs_drop_extent_cache(inode, hole_start,
+					last_byte - 1, 0);
+		}
+		free_extent_map(em);
+		cur_offset = last_byte;
+		if (err || cur_offset >= block_end)
+			break;
+	}
+
+	btrfs_end_transaction(trans, root);
+	unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+	return err;
+}
+
+static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	struct inode *inode = dentry->d_inode;
+	int err;
+
+	err = inode_change_ok(inode, attr);
+	if (err)
+		return err;
+
+	if (S_ISREG(inode->i_mode) &&
+	    attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
+		err = btrfs_cont_expand(inode, attr->ia_size);
+		if (err)
+			return err;
+	}
+
+	err = inode_setattr(inode, attr);
+
+	if (!err && ((attr->ia_valid & ATTR_MODE)))
+		err = btrfs_acl_chmod(inode);
+	return err;
+}
+
+void btrfs_delete_inode(struct inode *inode)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	unsigned long nr;
+	int ret;
+
+	truncate_inode_pages(&inode->i_data, 0);
+	if (is_bad_inode(inode)) {
+		btrfs_orphan_del(NULL, inode);
+		goto no_delete;
+	}
+	btrfs_wait_ordered_range(inode, 0, (u64)-1);
+
+	btrfs_i_size_write(inode, 0);
+	trans = btrfs_join_transaction(root, 1);
+
+	btrfs_set_trans_block_group(trans, inode);
+	ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
+	if (ret) {
+		btrfs_orphan_del(NULL, inode);
+		goto no_delete_lock;
+	}
+
+	btrfs_orphan_del(trans, inode);
+
+	nr = trans->blocks_used;
+	clear_inode(inode);
+
+	btrfs_end_transaction(trans, root);
+	btrfs_btree_balance_dirty(root, nr);
+	return;
+
+no_delete_lock:
+	nr = trans->blocks_used;
+	btrfs_end_transaction(trans, root);
+	btrfs_btree_balance_dirty(root, nr);
+no_delete:
+	clear_inode(inode);
+}
+
+/*
+ * this returns the key found in the dir entry in the location pointer.
+ * If no dir entries were found, location->objectid is 0.
+ */
+static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
+			       struct btrfs_key *location)
+{
+	const char *name = dentry->d_name.name;
+	int namelen = dentry->d_name.len;
+	struct btrfs_dir_item *di;
+	struct btrfs_path *path;
+	struct btrfs_root *root = BTRFS_I(dir)->root;
+	int ret = 0;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
+				    namelen, 0);
+	if (IS_ERR(di))
+		ret = PTR_ERR(di);
+
+	if (!di || IS_ERR(di))
+		goto out_err;
+
+	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
+out:
+	btrfs_free_path(path);
+	return ret;
+out_err:
+	location->objectid = 0;
+	goto out;
+}
+
+/*
+ * when we hit a tree root in a directory, the btrfs part of the inode
+ * needs to be changed to reflect the root directory of the tree root.  This
+ * is kind of like crossing a mount point.
+ */
+static int fixup_tree_root_location(struct btrfs_root *root,
+			     struct btrfs_key *location,
+			     struct btrfs_root **sub_root,
+			     struct dentry *dentry)
+{
+	struct btrfs_root_item *ri;
+
+	if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
+		return 0;
+	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
+		return 0;
+
+	*sub_root = btrfs_read_fs_root(root->fs_info, location,
+					dentry->d_name.name,
+					dentry->d_name.len);
+	if (IS_ERR(*sub_root))
+		return PTR_ERR(*sub_root);
+
+	ri = &(*sub_root)->root_item;
+	location->objectid = btrfs_root_dirid(ri);
+	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
+	location->offset = 0;
+
+	return 0;
+}
+
+static noinline void init_btrfs_i(struct inode *inode)
+{
+	struct btrfs_inode *bi = BTRFS_I(inode);
+
+	bi->i_acl = NULL;
+	bi->i_default_acl = NULL;
+
+	bi->generation = 0;
+	bi->sequence = 0;
+	bi->last_trans = 0;
+	bi->logged_trans = 0;
+	bi->delalloc_bytes = 0;
+	bi->disk_i_size = 0;
+	bi->flags = 0;
+	bi->index_cnt = (u64)-1;
+	bi->log_dirty_trans = 0;
+	extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
+	extent_io_tree_init(&BTRFS_I(inode)->io_tree,
+			     inode->i_mapping, GFP_NOFS);
+	extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
+			     inode->i_mapping, GFP_NOFS);
+	INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
+	btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
+	mutex_init(&BTRFS_I(inode)->extent_mutex);
+	mutex_init(&BTRFS_I(inode)->log_mutex);
+}
+
+static int btrfs_init_locked_inode(struct inode *inode, void *p)
+{
+	struct btrfs_iget_args *args = p;
+	inode->i_ino = args->ino;
+	init_btrfs_i(inode);
+	BTRFS_I(inode)->root = args->root;
+	return 0;
+}
+
+static int btrfs_find_actor(struct inode *inode, void *opaque)
+{
+	struct btrfs_iget_args *args = opaque;
+	return args->ino == inode->i_ino &&
+		args->root == BTRFS_I(inode)->root;
+}
+
+struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
+			    struct btrfs_root *root, int wait)
+{
+	struct inode *inode;
+	struct btrfs_iget_args args;
+	args.ino = objectid;
+	args.root = root;
+
+	if (wait) {
+		inode = ilookup5(s, objectid, btrfs_find_actor,
+				 (void *)&args);
+	} else {
+		inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
+					(void *)&args);
+	}
+	return inode;
+}
+
+struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
+				struct btrfs_root *root)
+{
+	struct inode *inode;
+	struct btrfs_iget_args args;
+	args.ino = objectid;
+	args.root = root;
+
+	inode = iget5_locked(s, objectid, btrfs_find_actor,
+			     btrfs_init_locked_inode,
+			     (void *)&args);
+	return inode;
+}
+
+/* Get an inode object given its location and corresponding root.
+ * Returns in *is_new if the inode was read from disk
+ */
+struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
+			 struct btrfs_root *root, int *is_new)
+{
+	struct inode *inode;
+
+	inode = btrfs_iget_locked(s, location->objectid, root);
+	if (!inode)
+		return ERR_PTR(-EACCES);
+
+	if (inode->i_state & I_NEW) {
+		BTRFS_I(inode)->root = root;
+		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
+		btrfs_read_locked_inode(inode);
+		unlock_new_inode(inode);
+		if (is_new)
+			*is_new = 1;
+	} else {
+		if (is_new)
+			*is_new = 0;
+	}
+
+	return inode;
+}
+
+struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode;
+	struct btrfs_inode *bi = BTRFS_I(dir);
+	struct btrfs_root *root = bi->root;
+	struct btrfs_root *sub_root = root;
+	struct btrfs_key location;
+	int ret, new;
+
+	if (dentry->d_name.len > BTRFS_NAME_LEN)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	ret = btrfs_inode_by_name(dir, dentry, &location);
+
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	inode = NULL;
+	if (location.objectid) {
+		ret = fixup_tree_root_location(root, &location, &sub_root,
+						dentry);
+		if (ret < 0)
+			return ERR_PTR(ret);
+		if (ret > 0)
+			return ERR_PTR(-ENOENT);
+		inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
+		if (IS_ERR(inode))
+			return ERR_CAST(inode);
+	}
+	return inode;
+}
+
+static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
+				   struct nameidata *nd)
+{
+	struct inode *inode;
+
+	if (dentry->d_name.len > BTRFS_NAME_LEN)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	inode = btrfs_lookup_dentry(dir, dentry);
+	if (IS_ERR(inode))
+		return ERR_CAST(inode);
+
+	return d_splice_alias(inode, dentry);
+}
+
+static unsigned char btrfs_filetype_table[] = {
+	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+};
+
+static int btrfs_real_readdir(struct file *filp, void *dirent,
+			      filldir_t filldir)
+{
+	struct inode *inode = filp->f_dentry->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_item *item;
+	struct btrfs_dir_item *di;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	struct btrfs_path *path;
+	int ret;
+	u32 nritems;
+	struct extent_buffer *leaf;
+	int slot;
+	int advance;
+	unsigned char d_type;
+	int over = 0;
+	u32 di_cur;
+	u32 di_total;
+	u32 di_len;
+	int key_type = BTRFS_DIR_INDEX_KEY;
+	char tmp_name[32];
+	char *name_ptr;
+	int name_len;
+
+	/* FIXME, use a real flag for deciding about the key type */
+	if (root->fs_info->tree_root == root)
+		key_type = BTRFS_DIR_ITEM_KEY;
+
+	/* special case for "." */
+	if (filp->f_pos == 0) {
+		over = filldir(dirent, ".", 1,
+			       1, inode->i_ino,
+			       DT_DIR);
+		if (over)
+			return 0;
+		filp->f_pos = 1;
+	}
+	/* special case for .., just use the back ref */
+	if (filp->f_pos == 1) {
+		u64 pino = parent_ino(filp->f_path.dentry);
+		over = filldir(dirent, "..", 2,
+			       2, pino, DT_DIR);
+		if (over)
+			return 0;
+		filp->f_pos = 2;
+	}
+	path = btrfs_alloc_path();
+	path->reada = 2;
+
+	btrfs_set_key_type(&key, key_type);
+	key.offset = filp->f_pos;
+	key.objectid = inode->i_ino;
+
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto err;
+	advance = 0;
+
+	while (1) {
+		leaf = path->nodes[0];
+		nritems = btrfs_header_nritems(leaf);
+		slot = path->slots[0];
+		if (advance || slot >= nritems) {
+			if (slot >= nritems - 1) {
+				ret = btrfs_next_leaf(root, path);
+				if (ret)
+					break;
+				leaf = path->nodes[0];
+				nritems = btrfs_header_nritems(leaf);
+				slot = path->slots[0];
+			} else {
+				slot++;
+				path->slots[0]++;
+			}
+		}
+
+		advance = 1;
+		item = btrfs_item_nr(leaf, slot);
+		btrfs_item_key_to_cpu(leaf, &found_key, slot);
+
+		if (found_key.objectid != key.objectid)
+			break;
+		if (btrfs_key_type(&found_key) != key_type)
+			break;
+		if (found_key.offset < filp->f_pos)
+			continue;
+
+		filp->f_pos = found_key.offset;
+
+		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+		di_cur = 0;
+		di_total = btrfs_item_size(leaf, item);
+
+		while (di_cur < di_total) {
+			struct btrfs_key location;
+
+			name_len = btrfs_dir_name_len(leaf, di);
+			if (name_len <= sizeof(tmp_name)) {
+				name_ptr = tmp_name;
+			} else {
+				name_ptr = kmalloc(name_len, GFP_NOFS);
+				if (!name_ptr) {
+					ret = -ENOMEM;
+					goto err;
+				}
+			}
+			read_extent_buffer(leaf, name_ptr,
+					   (unsigned long)(di + 1), name_len);
+
+			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
+			btrfs_dir_item_key_to_cpu(leaf, di, &location);
+
+			/* is this a reference to our own snapshot? If so
+			 * skip it
+			 */
+			if (location.type == BTRFS_ROOT_ITEM_KEY &&
+			    location.objectid == root->root_key.objectid) {
+				over = 0;
+				goto skip;
+			}
+			over = filldir(dirent, name_ptr, name_len,
+				       found_key.offset, location.objectid,
+				       d_type);
+
+skip:
+			if (name_ptr != tmp_name)
+				kfree(name_ptr);
+
+			if (over)
+				goto nopos;
+			di_len = btrfs_dir_name_len(leaf, di) +
+				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
+			di_cur += di_len;
+			di = (struct btrfs_dir_item *)((char *)di + di_len);
+		}
+	}
+
+	/* Reached end of directory/root. Bump pos past the last item. */
+	if (key_type == BTRFS_DIR_INDEX_KEY)
+		filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
+	else
+		filp->f_pos++;
+nopos:
+	ret = 0;
+err:
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_write_inode(struct inode *inode, int wait)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_trans_handle *trans;
+	int ret = 0;
+
+	if (root->fs_info->btree_inode == inode)
+		return 0;
+
+	if (wait) {
+		trans = btrfs_join_transaction(root, 1);
+		btrfs_set_trans_block_group(trans, inode);
+		ret = btrfs_commit_transaction(trans, root);
+	}
+	return ret;
+}
+
+/*
+ * This is somewhat expensive, updating the tree every time the
+ * inode changes.  But, it is most likely to find the inode in cache.
+ * FIXME, needs more benchmarking...there are no reasons other than performance
+ * to keep or drop this code.
+ */
+void btrfs_dirty_inode(struct inode *inode)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_trans_handle *trans;
+
+	trans = btrfs_join_transaction(root, 1);
+	btrfs_set_trans_block_group(trans, inode);
+	btrfs_update_inode(trans, root, inode);
+	btrfs_end_transaction(trans, root);
+}
+
+/*
+ * find the highest existing sequence number in a directory
+ * and then set the in-memory index_cnt variable to reflect
+ * free sequence numbers
+ */
+static int btrfs_set_inode_index_count(struct inode *inode)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_key key, found_key;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	int ret;
+
+	key.objectid = inode->i_ino;
+	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
+	key.offset = (u64)-1;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto out;
+	/* FIXME: we should be able to handle this */
+	if (ret == 0)
+		goto out;
+	ret = 0;
+
+	/*
+	 * MAGIC NUMBER EXPLANATION:
+	 * since we search a directory based on f_pos we have to start at 2
+	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
+	 * else has to start at 2
+	 */
+	if (path->slots[0] == 0) {
+		BTRFS_I(inode)->index_cnt = 2;
+		goto out;
+	}
+
+	path->slots[0]--;
+
+	leaf = path->nodes[0];
+	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+	if (found_key.objectid != inode->i_ino ||
+	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
+		BTRFS_I(inode)->index_cnt = 2;
+		goto out;
+	}
+
+	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+/*
+ * helper to find a free sequence number in a given directory.  This current
+ * code is very simple, later versions will do smarter things in the btree
+ */
+int btrfs_set_inode_index(struct inode *dir, u64 *index)
+{
+	int ret = 0;
+
+	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
+		ret = btrfs_set_inode_index_count(dir);
+		if (ret)
+			return ret;
+	}
+
+	*index = BTRFS_I(dir)->index_cnt;
+	BTRFS_I(dir)->index_cnt++;
+
+	return ret;
+}
+
+static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
+				     struct btrfs_root *root,
+				     struct inode *dir,
+				     const char *name, int name_len,
+				     u64 ref_objectid, u64 objectid,
+				     u64 alloc_hint, int mode, u64 *index)
+{
+	struct inode *inode;
+	struct btrfs_inode_item *inode_item;
+	struct btrfs_key *location;
+	struct btrfs_path *path;
+	struct btrfs_inode_ref *ref;
+	struct btrfs_key key[2];
+	u32 sizes[2];
+	unsigned long ptr;
+	int ret;
+	int owner;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	inode = new_inode(root->fs_info->sb);
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	if (dir) {
+		ret = btrfs_set_inode_index(dir, index);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+	/*
+	 * index_cnt is ignored for everything but a dir,
+	 * btrfs_get_inode_index_count has an explanation for the magic
+	 * number
+	 */
+	init_btrfs_i(inode);
+	BTRFS_I(inode)->index_cnt = 2;
+	BTRFS_I(inode)->root = root;
+	BTRFS_I(inode)->generation = trans->transid;
+
+	if (mode & S_IFDIR)
+		owner = 0;
+	else
+		owner = 1;
+	BTRFS_I(inode)->block_group =
+			btrfs_find_block_group(root, 0, alloc_hint, owner);
+	if ((mode & S_IFREG)) {
+		if (btrfs_test_opt(root, NODATASUM))
+			btrfs_set_flag(inode, NODATASUM);
+		if (btrfs_test_opt(root, NODATACOW))
+			btrfs_set_flag(inode, NODATACOW);
+	}
+
+	key[0].objectid = objectid;
+	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
+	key[0].offset = 0;
+
+	key[1].objectid = objectid;
+	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
+	key[1].offset = ref_objectid;
+
+	sizes[0] = sizeof(struct btrfs_inode_item);
+	sizes[1] = name_len + sizeof(*ref);
+
+	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
+	if (ret != 0)
+		goto fail;
+
+	if (objectid > root->highest_inode)
+		root->highest_inode = objectid;
+
+	inode->i_uid = current_fsuid();
+	inode->i_gid = current_fsgid();
+	inode->i_mode = mode;
+	inode->i_ino = objectid;
+	inode_set_bytes(inode, 0);
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+				  struct btrfs_inode_item);
+	fill_inode_item(trans, path->nodes[0], inode_item, inode);
+
+	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
+			     struct btrfs_inode_ref);
+	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
+	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
+	ptr = (unsigned long)(ref + 1);
+	write_extent_buffer(path->nodes[0], name, ptr, name_len);
+
+	btrfs_mark_buffer_dirty(path->nodes[0]);
+	btrfs_free_path(path);
+
+	location = &BTRFS_I(inode)->location;
+	location->objectid = objectid;
+	location->offset = 0;
+	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
+
+	insert_inode_hash(inode);
+	return inode;
+fail:
+	if (dir)
+		BTRFS_I(dir)->index_cnt--;
+	btrfs_free_path(path);
+	return ERR_PTR(ret);
+}
+
+static inline u8 btrfs_inode_type(struct inode *inode)
+{
+	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
+}
+
+/*
+ * utility function to add 'inode' into 'parent_inode' with
+ * a give name and a given sequence number.
+ * if 'add_backref' is true, also insert a backref from the
+ * inode to the parent directory.
+ */
+int btrfs_add_link(struct btrfs_trans_handle *trans,
+		   struct inode *parent_inode, struct inode *inode,
+		   const char *name, int name_len, int add_backref, u64 index)
+{
+	int ret;
+	struct btrfs_key key;
+	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
+
+	key.objectid = inode->i_ino;
+	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+	key.offset = 0;
+
+	ret = btrfs_insert_dir_item(trans, root, name, name_len,
+				    parent_inode->i_ino,
+				    &key, btrfs_inode_type(inode),
+				    index);
+	if (ret == 0) {
+		if (add_backref) {
+			ret = btrfs_insert_inode_ref(trans, root,
+						     name, name_len,
+						     inode->i_ino,
+						     parent_inode->i_ino,
+						     index);
+		}
+		btrfs_i_size_write(parent_inode, parent_inode->i_size +
+				   name_len * 2);
+		parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
+		ret = btrfs_update_inode(trans, root, parent_inode);
+	}
+	return ret;
+}
+
+static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
+			    struct dentry *dentry, struct inode *inode,
+			    int backref, u64 index)
+{
+	int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
+				 inode, dentry->d_name.name,
+				 dentry->d_name.len, backref, index);
+	if (!err) {
+		d_instantiate(dentry, inode);
+		return 0;
+	}
+	if (err > 0)
+		err = -EEXIST;
+	return err;
+}
+
+static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
+			int mode, dev_t rdev)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root = BTRFS_I(dir)->root;
+	struct inode *inode = NULL;
+	int err;
+	int drop_inode = 0;
+	u64 objectid;
+	unsigned long nr = 0;
+	u64 index = 0;
+
+	if (!new_valid_dev(rdev))
+		return -EINVAL;
+
+	err = btrfs_check_free_space(root, 1, 0);
+	if (err)
+		goto fail;
+
+	trans = btrfs_start_transaction(root, 1);
+	btrfs_set_trans_block_group(trans, dir);
+
+	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
+	if (err) {
+		err = -ENOSPC;
+		goto out_unlock;
+	}
+
+	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
+				dentry->d_name.len,
+				dentry->d_parent->d_inode->i_ino, objectid,
+				BTRFS_I(dir)->block_group, mode, &index);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_unlock;
+
+	err = btrfs_init_acl(inode, dir);
+	if (err) {
+		drop_inode = 1;
+		goto out_unlock;
+	}
+
+	btrfs_set_trans_block_group(trans, inode);
+	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
+	if (err)
+		drop_inode = 1;
+	else {
+		inode->i_op = &btrfs_special_inode_operations;
+		init_special_inode(inode, inode->i_mode, rdev);
+		btrfs_update_inode(trans, root, inode);
+	}
+	dir->i_sb->s_dirt = 1;
+	btrfs_update_inode_block_group(trans, inode);
+	btrfs_update_inode_block_group(trans, dir);
+out_unlock:
+	nr = trans->blocks_used;
+	btrfs_end_transaction_throttle(trans, root);
+fail:
+	if (drop_inode) {
+		inode_dec_link_count(inode);
+		iput(inode);
+	}
+	btrfs_btree_balance_dirty(root, nr);
+	return err;
+}
+
+static int btrfs_create(struct inode *dir, struct dentry *dentry,
+			int mode, struct nameidata *nd)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root = BTRFS_I(dir)->root;
+	struct inode *inode = NULL;
+	int err;
+	int drop_inode = 0;
+	unsigned long nr = 0;
+	u64 objectid;
+	u64 index = 0;
+
+	err = btrfs_check_free_space(root, 1, 0);
+	if (err)
+		goto fail;
+	trans = btrfs_start_transaction(root, 1);
+	btrfs_set_trans_block_group(trans, dir);
+
+	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
+	if (err) {
+		err = -ENOSPC;
+		goto out_unlock;
+	}
+
+	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
+				dentry->d_name.len,
+				dentry->d_parent->d_inode->i_ino,
+				objectid, BTRFS_I(dir)->block_group, mode,
+				&index);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_unlock;
+
+	err = btrfs_init_acl(inode, dir);
+	if (err) {
+		drop_inode = 1;
+		goto out_unlock;
+	}
+
+	btrfs_set_trans_block_group(trans, inode);
+	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
+	if (err)
+		drop_inode = 1;
+	else {
+		inode->i_mapping->a_ops = &btrfs_aops;
+		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+		inode->i_fop = &btrfs_file_operations;
+		inode->i_op = &btrfs_file_inode_operations;
+		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+	}
+	dir->i_sb->s_dirt = 1;
+	btrfs_update_inode_block_group(trans, inode);
+	btrfs_update_inode_block_group(trans, dir);
+out_unlock:
+	nr = trans->blocks_used;
+	btrfs_end_transaction_throttle(trans, root);
+fail:
+	if (drop_inode) {
+		inode_dec_link_count(inode);
+		iput(inode);
+	}
+	btrfs_btree_balance_dirty(root, nr);
+	return err;
+}
+
+static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+		      struct dentry *dentry)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root = BTRFS_I(dir)->root;
+	struct inode *inode = old_dentry->d_inode;
+	u64 index;
+	unsigned long nr = 0;
+	int err;
+	int drop_inode = 0;
+
+	if (inode->i_nlink == 0)
+		return -ENOENT;
+
+	btrfs_inc_nlink(inode);
+	err = btrfs_check_free_space(root, 1, 0);
+	if (err)
+		goto fail;
+	err = btrfs_set_inode_index(dir, &index);
+	if (err)
+		goto fail;
+
+	trans = btrfs_start_transaction(root, 1);
+
+	btrfs_set_trans_block_group(trans, dir);
+	atomic_inc(&inode->i_count);
+
+	err = btrfs_add_nondir(trans, dentry, inode, 1, index);
+
+	if (err)
+		drop_inode = 1;
+
+	dir->i_sb->s_dirt = 1;
+	btrfs_update_inode_block_group(trans, dir);
+	err = btrfs_update_inode(trans, root, inode);
+
+	if (err)
+		drop_inode = 1;
+
+	nr = trans->blocks_used;
+	btrfs_end_transaction_throttle(trans, root);
+fail:
+	if (drop_inode) {
+		inode_dec_link_count(inode);
+		iput(inode);
+	}
+	btrfs_btree_balance_dirty(root, nr);
+	return err;
+}
+
+static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+	struct inode *inode = NULL;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root = BTRFS_I(dir)->root;
+	int err = 0;
+	int drop_on_err = 0;
+	u64 objectid = 0;
+	u64 index = 0;
+	unsigned long nr = 1;
+
+	err = btrfs_check_free_space(root, 1, 0);
+	if (err)
+		goto out_unlock;
+
+	trans = btrfs_start_transaction(root, 1);
+	btrfs_set_trans_block_group(trans, dir);
+
+	if (IS_ERR(trans)) {
+		err = PTR_ERR(trans);
+		goto out_unlock;
+	}
+
+	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
+	if (err) {
+		err = -ENOSPC;
+		goto out_unlock;
+	}
+
+	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
+				dentry->d_name.len,
+				dentry->d_parent->d_inode->i_ino, objectid,
+				BTRFS_I(dir)->block_group, S_IFDIR | mode,
+				&index);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		goto out_fail;
+	}
+
+	drop_on_err = 1;
+
+	err = btrfs_init_acl(inode, dir);
+	if (err)
+		goto out_fail;
+
+	inode->i_op = &btrfs_dir_inode_operations;
+	inode->i_fop = &btrfs_dir_file_operations;
+	btrfs_set_trans_block_group(trans, inode);
+
+	btrfs_i_size_write(inode, 0);
+	err = btrfs_update_inode(trans, root, inode);
+	if (err)
+		goto out_fail;
+
+	err = btrfs_add_link(trans, dentry->d_parent->d_inode,
+				 inode, dentry->d_name.name,
+				 dentry->d_name.len, 0, index);
+	if (err)
+		goto out_fail;
+
+	d_instantiate(dentry, inode);
+	drop_on_err = 0;
+	dir->i_sb->s_dirt = 1;
+	btrfs_update_inode_block_group(trans, inode);
+	btrfs_update_inode_block_group(trans, dir);
+
+out_fail:
+	nr = trans->blocks_used;
+	btrfs_end_transaction_throttle(trans, root);
+
+out_unlock:
+	if (drop_on_err)
+		iput(inode);
+	btrfs_btree_balance_dirty(root, nr);
+	return err;
+}
+
+/* helper for btfs_get_extent.  Given an existing extent in the tree,
+ * and an extent that you want to insert, deal with overlap and insert
+ * the new extent into the tree.
+ */
+static int merge_extent_mapping(struct extent_map_tree *em_tree,
+				struct extent_map *existing,
+				struct extent_map *em,
+				u64 map_start, u64 map_len)
+{
+	u64 start_diff;
+
+	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
+	start_diff = map_start - em->start;
+	em->start = map_start;
+	em->len = map_len;
+	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
+	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
+		em->block_start += start_diff;
+		em->block_len -= start_diff;
+	}
+	return add_extent_mapping(em_tree, em);
+}
+
+static noinline int uncompress_inline(struct btrfs_path *path,
+				      struct inode *inode, struct page *page,
+				      size_t pg_offset, u64 extent_offset,
+				      struct btrfs_file_extent_item *item)
+{
+	int ret;
+	struct extent_buffer *leaf = path->nodes[0];
+	char *tmp;
+	size_t max_size;
+	unsigned long inline_size;
+	unsigned long ptr;
+
+	WARN_ON(pg_offset != 0);
+	max_size = btrfs_file_extent_ram_bytes(leaf, item);
+	inline_size = btrfs_file_extent_inline_item_len(leaf,
+					btrfs_item_nr(leaf, path->slots[0]));
+	tmp = kmalloc(inline_size, GFP_NOFS);
+	ptr = btrfs_file_extent_inline_start(item);
+
+	read_extent_buffer(leaf, tmp, ptr, inline_size);
+
+	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
+	ret = btrfs_zlib_decompress(tmp, page, extent_offset,
+				    inline_size, max_size);
+	if (ret) {
+		char *kaddr = kmap_atomic(page, KM_USER0);
+		unsigned long copy_size = min_t(u64,
+				  PAGE_CACHE_SIZE - pg_offset,
+				  max_size - extent_offset);
+		memset(kaddr + pg_offset, 0, copy_size);
+		kunmap_atomic(kaddr, KM_USER0);
+	}
+	kfree(tmp);
+	return 0;
+}
+
+/*
+ * a bit scary, this does extent mapping from logical file offset to the disk.
+ * the ugly parts come from merging extents from the disk with the in-ram
+ * representation.  This gets more complex because of the data=ordered code,
+ * where the in-ram extents might be locked pending data=ordered completion.
+ *
+ * This also copies inline extents directly into the page.
+ */
+
+struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
+				    size_t pg_offset, u64 start, u64 len,
+				    int create)
+{
+	int ret;
+	int err = 0;
+	u64 bytenr;
+	u64 extent_start = 0;
+	u64 extent_end = 0;
+	u64 objectid = inode->i_ino;
+	u32 found_type;
+	struct btrfs_path *path = NULL;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_file_extent_item *item;
+	struct extent_buffer *leaf;
+	struct btrfs_key found_key;
+	struct extent_map *em = NULL;
+	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	struct btrfs_trans_handle *trans = NULL;
+	int compressed;
+
+again:
+	spin_lock(&em_tree->lock);
+	em = lookup_extent_mapping(em_tree, start, len);
+	if (em)
+		em->bdev = root->fs_info->fs_devices->latest_bdev;
+	spin_unlock(&em_tree->lock);
+
+	if (em) {
+		if (em->start > start || em->start + em->len <= start)
+			free_extent_map(em);
+		else if (em->block_start == EXTENT_MAP_INLINE && page)
+			free_extent_map(em);
+		else
+			goto out;
+	}
+	em = alloc_extent_map(GFP_NOFS);
+	if (!em) {
+		err = -ENOMEM;
+		goto out;
+	}
+	em->bdev = root->fs_info->fs_devices->latest_bdev;
+	em->start = EXTENT_MAP_HOLE;
+	em->orig_start = EXTENT_MAP_HOLE;
+	em->len = (u64)-1;
+	em->block_len = (u64)-1;
+
+	if (!path) {
+		path = btrfs_alloc_path();
+		BUG_ON(!path);
+	}
+
+	ret = btrfs_lookup_file_extent(trans, root, path,
+				       objectid, start, trans != NULL);
+	if (ret < 0) {
+		err = ret;
+		goto out;
+	}
+
+	if (ret != 0) {
+		if (path->slots[0] == 0)
+			goto not_found;
+		path->slots[0]--;
+	}
+
+	leaf = path->nodes[0];
+	item = btrfs_item_ptr(leaf, path->slots[0],
+			      struct btrfs_file_extent_item);
+	/* are we inside the extent that was found? */
+	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+	found_type = btrfs_key_type(&found_key);
+	if (found_key.objectid != objectid ||
+	    found_type != BTRFS_EXTENT_DATA_KEY) {
+		goto not_found;
+	}
+
+	found_type = btrfs_file_extent_type(leaf, item);
+	extent_start = found_key.offset;
+	compressed = btrfs_file_extent_compression(leaf, item);
+	if (found_type == BTRFS_FILE_EXTENT_REG ||
+	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+		extent_end = extent_start +
+		       btrfs_file_extent_num_bytes(leaf, item);
+	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+		size_t size;
+		size = btrfs_file_extent_inline_len(leaf, item);
+		extent_end = (extent_start + size + root->sectorsize - 1) &
+			~((u64)root->sectorsize - 1);
+	}
+
+	if (start >= extent_end) {
+		path->slots[0]++;
+		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0) {
+				err = ret;
+				goto out;
+			}
+			if (ret > 0)
+				goto not_found;
+			leaf = path->nodes[0];
+		}
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+		if (found_key.objectid != objectid ||
+		    found_key.type != BTRFS_EXTENT_DATA_KEY)
+			goto not_found;
+		if (start + len <= found_key.offset)
+			goto not_found;
+		em->start = start;
+		em->len = found_key.offset - start;
+		goto not_found_em;
+	}
+
+	if (found_type == BTRFS_FILE_EXTENT_REG ||
+	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+		em->start = extent_start;
+		em->len = extent_end - extent_start;
+		em->orig_start = extent_start -
+				 btrfs_file_extent_offset(leaf, item);
+		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
+		if (bytenr == 0) {
+			em->block_start = EXTENT_MAP_HOLE;
+			goto insert;
+		}
+		if (compressed) {
+			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+			em->block_start = bytenr;
+			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
+									 item);
+		} else {
+			bytenr += btrfs_file_extent_offset(leaf, item);
+			em->block_start = bytenr;
+			em->block_len = em->len;
+			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
+				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
+		}
+		goto insert;
+	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+		unsigned long ptr;
+		char *map;
+		size_t size;
+		size_t extent_offset;
+		size_t copy_size;
+
+		em->block_start = EXTENT_MAP_INLINE;
+		if (!page || create) {
+			em->start = extent_start;
+			em->len = extent_end - extent_start;
+			goto out;
+		}
+
+		size = btrfs_file_extent_inline_len(leaf, item);
+		extent_offset = page_offset(page) + pg_offset - extent_start;
+		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
+				size - extent_offset);
+		em->start = extent_start + extent_offset;
+		em->len = (copy_size + root->sectorsize - 1) &
+			~((u64)root->sectorsize - 1);
+		em->orig_start = EXTENT_MAP_INLINE;
+		if (compressed)
+			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
+		if (create == 0 && !PageUptodate(page)) {
+			if (btrfs_file_extent_compression(leaf, item) ==
+			    BTRFS_COMPRESS_ZLIB) {
+				ret = uncompress_inline(path, inode, page,
+							pg_offset,
+							extent_offset, item);
+				BUG_ON(ret);
+			} else {
+				map = kmap(page);
+				read_extent_buffer(leaf, map + pg_offset, ptr,
+						   copy_size);
+				kunmap(page);
+			}
+			flush_dcache_page(page);
+		} else if (create && PageUptodate(page)) {
+			if (!trans) {
+				kunmap(page);
+				free_extent_map(em);
+				em = NULL;
+				btrfs_release_path(root, path);
+				trans = btrfs_join_transaction(root, 1);
+				goto again;
+			}
+			map = kmap(page);
+			write_extent_buffer(leaf, map + pg_offset, ptr,
+					    copy_size);
+			kunmap(page);
+			btrfs_mark_buffer_dirty(leaf);
+		}
+		set_extent_uptodate(io_tree, em->start,
+				    extent_map_end(em) - 1, GFP_NOFS);
+		goto insert;
+	} else {
+		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
+		WARN_ON(1);
+	}
+not_found:
+	em->start = start;
+	em->len = len;
+not_found_em:
+	em->block_start = EXTENT_MAP_HOLE;
+	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
+insert:
+	btrfs_release_path(root, path);
+	if (em->start > start || extent_map_end(em) <= start) {
+		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
+		       "[%llu %llu]\n", (unsigned long long)em->start,
+		       (unsigned long long)em->len,
+		       (unsigned long long)start,
+		       (unsigned long long)len);
+		err = -EIO;
+		goto out;
+	}
+
+	err = 0;
+	spin_lock(&em_tree->lock);
+	ret = add_extent_mapping(em_tree, em);
+	/* it is possible that someone inserted the extent into the tree
+	 * while we had the lock dropped.  It is also possible that
+	 * an overlapping map exists in the tree
+	 */
+	if (ret == -EEXIST) {
+		struct extent_map *existing;
+
+		ret = 0;
+
+		existing = lookup_extent_mapping(em_tree, start, len);
+		if (existing && (existing->start > start ||
+		    existing->start + existing->len <= start)) {
+			free_extent_map(existing);
+			existing = NULL;
+		}
+		if (!existing) {
+			existing = lookup_extent_mapping(em_tree, em->start,
+							 em->len);
+			if (existing) {
+				err = merge_extent_mapping(em_tree, existing,
+							   em, start,
+							   root->sectorsize);
+				free_extent_map(existing);
+				if (err) {
+					free_extent_map(em);
+					em = NULL;
+				}
+			} else {
+				err = -EIO;
+				free_extent_map(em);
+				em = NULL;
+			}
+		} else {
+			free_extent_map(em);
+			em = existing;
+			err = 0;
+		}
+	}
+	spin_unlock(&em_tree->lock);
+out:
+	if (path)
+		btrfs_free_path(path);
+	if (trans) {
+		ret = btrfs_end_transaction(trans, root);
+		if (!err)
+			err = ret;
+	}
+	if (err) {
+		free_extent_map(em);
+		WARN_ON(1);
+		return ERR_PTR(err);
+	}
+	return em;
+}
+
+static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
+			const struct iovec *iov, loff_t offset,
+			unsigned long nr_segs)
+{
+	return -EINVAL;
+}
+
+static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
+{
+	return extent_bmap(mapping, iblock, btrfs_get_extent);
+}
+
+int btrfs_readpage(struct file *file, struct page *page)
+{
+	struct extent_io_tree *tree;
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+	return extent_read_full_page(tree, page, btrfs_get_extent);
+}
+
+static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+	struct extent_io_tree *tree;
+
+
+	if (current->flags & PF_MEMALLOC) {
+		redirty_page_for_writepage(wbc, page);
+		unlock_page(page);
+		return 0;
+	}
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
+}
+
+int btrfs_writepages(struct address_space *mapping,
+		     struct writeback_control *wbc)
+{
+	struct extent_io_tree *tree;
+
+	tree = &BTRFS_I(mapping->host)->io_tree;
+	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
+}
+
+static int
+btrfs_readpages(struct file *file, struct address_space *mapping,
+		struct list_head *pages, unsigned nr_pages)
+{
+	struct extent_io_tree *tree;
+	tree = &BTRFS_I(mapping->host)->io_tree;
+	return extent_readpages(tree, mapping, pages, nr_pages,
+				btrfs_get_extent);
+}
+static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
+{
+	struct extent_io_tree *tree;
+	struct extent_map_tree *map;
+	int ret;
+
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+	map = &BTRFS_I(page->mapping->host)->extent_tree;
+	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
+	if (ret == 1) {
+		ClearPagePrivate(page);
+		set_page_private(page, 0);
+		page_cache_release(page);
+	}
+	return ret;
+}
+
+static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
+{
+	if (PageWriteback(page) || PageDirty(page))
+		return 0;
+	return __btrfs_releasepage(page, gfp_flags);
+}
+
+static void btrfs_invalidatepage(struct page *page, unsigned long offset)
+{
+	struct extent_io_tree *tree;
+	struct btrfs_ordered_extent *ordered;
+	u64 page_start = page_offset(page);
+	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
+
+	wait_on_page_writeback(page);
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+	if (offset) {
+		btrfs_releasepage(page, GFP_NOFS);
+		return;
+	}
+
+	lock_extent(tree, page_start, page_end, GFP_NOFS);
+	ordered = btrfs_lookup_ordered_extent(page->mapping->host,
+					   page_offset(page));
+	if (ordered) {
+		/*
+		 * IO on this page will never be started, so we need
+		 * to account for any ordered extents now
+		 */
+		clear_extent_bit(tree, page_start, page_end,
+				 EXTENT_DIRTY | EXTENT_DELALLOC |
+				 EXTENT_LOCKED, 1, 0, GFP_NOFS);
+		btrfs_finish_ordered_io(page->mapping->host,
+					page_start, page_end);
+		btrfs_put_ordered_extent(ordered);
+		lock_extent(tree, page_start, page_end, GFP_NOFS);
+	}
+	clear_extent_bit(tree, page_start, page_end,
+		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
+		 EXTENT_ORDERED,
+		 1, 1, GFP_NOFS);
+	__btrfs_releasepage(page, GFP_NOFS);
+
+	ClearPageChecked(page);
+	if (PagePrivate(page)) {
+		ClearPagePrivate(page);
+		set_page_private(page, 0);
+		page_cache_release(page);
+	}
+}
+
+/*
+ * btrfs_page_mkwrite() is not allowed to change the file size as it gets
+ * called from a page fault handler when a page is first dirtied. Hence we must
+ * be careful to check for EOF conditions here. We set the page up correctly
+ * for a written page which means we get ENOSPC checking when writing into
+ * holes and correct delalloc and unwritten extent mapping on filesystems that
+ * support these features.
+ *
+ * We are not allowed to take the i_mutex here so we have to play games to
+ * protect against truncate races as the page could now be beyond EOF.  Because
+ * vmtruncate() writes the inode size before removing pages, once we have the
+ * page lock we can determine safely if the page is beyond EOF. If it is not
+ * beyond EOF, then the page is guaranteed safe against truncation until we
+ * unlock the page.
+ */
+int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+{
+	struct inode *inode = fdentry(vma->vm_file)->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	struct btrfs_ordered_extent *ordered;
+	char *kaddr;
+	unsigned long zero_start;
+	loff_t size;
+	int ret;
+	u64 page_start;
+	u64 page_end;
+
+	ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
+	if (ret)
+		goto out;
+
+	ret = -EINVAL;
+again:
+	lock_page(page);
+	size = i_size_read(inode);
+	page_start = page_offset(page);
+	page_end = page_start + PAGE_CACHE_SIZE - 1;
+
+	if ((page->mapping != inode->i_mapping) ||
+	    (page_start >= size)) {
+		/* page got truncated out from underneath us */
+		goto out_unlock;
+	}
+	wait_on_page_writeback(page);
+
+	lock_extent(io_tree, page_start, page_end, GFP_NOFS);
+	set_page_extent_mapped(page);
+
+	/*
+	 * we can't set the delalloc bits if there are pending ordered
+	 * extents.  Drop our locks and wait for them to finish
+	 */
+	ordered = btrfs_lookup_ordered_extent(inode, page_start);
+	if (ordered) {
+		unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+		unlock_page(page);
+		btrfs_start_ordered_extent(inode, ordered, 1);
+		btrfs_put_ordered_extent(ordered);
+		goto again;
+	}
+
+	btrfs_set_extent_delalloc(inode, page_start, page_end);
+	ret = 0;
+
+	/* page is wholly or partially inside EOF */
+	if (page_start + PAGE_CACHE_SIZE > size)
+		zero_start = size & ~PAGE_CACHE_MASK;
+	else
+		zero_start = PAGE_CACHE_SIZE;
+
+	if (zero_start != PAGE_CACHE_SIZE) {
+		kaddr = kmap(page);
+		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
+		flush_dcache_page(page);
+		kunmap(page);
+	}
+	ClearPageChecked(page);
+	set_page_dirty(page);
+	unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+
+out_unlock:
+	unlock_page(page);
+out:
+	return ret;
+}
+
+static void btrfs_truncate(struct inode *inode)
+{
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret;
+	struct btrfs_trans_handle *trans;
+	unsigned long nr;
+	u64 mask = root->sectorsize - 1;
+
+	if (!S_ISREG(inode->i_mode))
+		return;
+	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+		return;
+
+	btrfs_truncate_page(inode->i_mapping, inode->i_size);
+	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
+
+	trans = btrfs_start_transaction(root, 1);
+	btrfs_set_trans_block_group(trans, inode);
+	btrfs_i_size_write(inode, inode->i_size);
+
+	ret = btrfs_orphan_add(trans, inode);
+	if (ret)
+		goto out;
+	/* FIXME, add redo link to tree so we don't leak on crash */
+	ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
+				      BTRFS_EXTENT_DATA_KEY);
+	btrfs_update_inode(trans, root, inode);
+
+	ret = btrfs_orphan_del(trans, inode);
+	BUG_ON(ret);
+
+out:
+	nr = trans->blocks_used;
+	ret = btrfs_end_transaction_throttle(trans, root);
+	BUG_ON(ret);
+	btrfs_btree_balance_dirty(root, nr);
+}
+
+/*
+ * create a new subvolume directory/inode (helper for the ioctl).
+ */
+int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *new_root, struct dentry *dentry,
+			     u64 new_dirid, u64 alloc_hint)
+{
+	struct inode *inode;
+	int error;
+	u64 index = 0;
+
+	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
+				new_dirid, alloc_hint, S_IFDIR | 0700, &index);
+	if (IS_ERR(inode))
+		return PTR_ERR(inode);
+	inode->i_op = &btrfs_dir_inode_operations;
+	inode->i_fop = &btrfs_dir_file_operations;
+
+	inode->i_nlink = 1;
+	btrfs_i_size_write(inode, 0);
+
+	error = btrfs_update_inode(trans, new_root, inode);
+	if (error)
+		return error;
+
+	d_instantiate(dentry, inode);
+	return 0;
+}
+
+/* helper function for file defrag and space balancing.  This
+ * forces readahead on a given range of bytes in an inode
+ */
+unsigned long btrfs_force_ra(struct address_space *mapping,
+			      struct file_ra_state *ra, struct file *file,
+			      pgoff_t offset, pgoff_t last_index)
+{
+	pgoff_t req_size = last_index - offset + 1;
+
+	page_cache_sync_readahead(mapping, ra, file, offset, req_size);
+	return offset + req_size;
+}
+
+struct inode *btrfs_alloc_inode(struct super_block *sb)
+{
+	struct btrfs_inode *ei;
+
+	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
+	if (!ei)
+		return NULL;
+	ei->last_trans = 0;
+	ei->logged_trans = 0;
+	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
+	ei->i_acl = BTRFS_ACL_NOT_CACHED;
+	ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
+	INIT_LIST_HEAD(&ei->i_orphan);
+	return &ei->vfs_inode;
+}
+
+void btrfs_destroy_inode(struct inode *inode)
+{
+	struct btrfs_ordered_extent *ordered;
+	WARN_ON(!list_empty(&inode->i_dentry));
+	WARN_ON(inode->i_data.nrpages);
+
+	if (BTRFS_I(inode)->i_acl &&
+	    BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
+		posix_acl_release(BTRFS_I(inode)->i_acl);
+	if (BTRFS_I(inode)->i_default_acl &&
+	    BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
+		posix_acl_release(BTRFS_I(inode)->i_default_acl);
+
+	spin_lock(&BTRFS_I(inode)->root->list_lock);
+	if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
+		printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
+		       " list\n", inode->i_ino);
+		dump_stack();
+	}
+	spin_unlock(&BTRFS_I(inode)->root->list_lock);
+
+	while (1) {
+		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
+		if (!ordered)
+			break;
+		else {
+			printk(KERN_ERR "btrfs found ordered "
+			       "extent %llu %llu on inode cleanup\n",
+			       (unsigned long long)ordered->file_offset,
+			       (unsigned long long)ordered->len);
+			btrfs_remove_ordered_extent(inode, ordered);
+			btrfs_put_ordered_extent(ordered);
+			btrfs_put_ordered_extent(ordered);
+		}
+	}
+	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
+	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
+}
+
+static void init_once(void *foo)
+{
+	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
+
+	inode_init_once(&ei->vfs_inode);
+}
+
+void btrfs_destroy_cachep(void)
+{
+	if (btrfs_inode_cachep)
+		kmem_cache_destroy(btrfs_inode_cachep);
+	if (btrfs_trans_handle_cachep)
+		kmem_cache_destroy(btrfs_trans_handle_cachep);
+	if (btrfs_transaction_cachep)
+		kmem_cache_destroy(btrfs_transaction_cachep);
+	if (btrfs_bit_radix_cachep)
+		kmem_cache_destroy(btrfs_bit_radix_cachep);
+	if (btrfs_path_cachep)
+		kmem_cache_destroy(btrfs_path_cachep);
+}
+
+struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
+				       unsigned long extra_flags,
+				       void (*ctor)(void *))
+{
+	return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
+				 SLAB_MEM_SPREAD | extra_flags), ctor);
+}
+
+int btrfs_init_cachep(void)
+{
+	btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
+					  sizeof(struct btrfs_inode),
+					  0, init_once);
+	if (!btrfs_inode_cachep)
+		goto fail;
+	btrfs_trans_handle_cachep =
+			btrfs_cache_create("btrfs_trans_handle_cache",
+					   sizeof(struct btrfs_trans_handle),
+					   0, NULL);
+	if (!btrfs_trans_handle_cachep)
+		goto fail;
+	btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
+					     sizeof(struct btrfs_transaction),
+					     0, NULL);
+	if (!btrfs_transaction_cachep)
+		goto fail;
+	btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
+					 sizeof(struct btrfs_path),
+					 0, NULL);
+	if (!btrfs_path_cachep)
+		goto fail;
+	btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
+					      SLAB_DESTROY_BY_RCU, NULL);
+	if (!btrfs_bit_radix_cachep)
+		goto fail;
+	return 0;
+fail:
+	btrfs_destroy_cachep();
+	return -ENOMEM;
+}
+
+static int btrfs_getattr(struct vfsmount *mnt,
+			 struct dentry *dentry, struct kstat *stat)
+{
+	struct inode *inode = dentry->d_inode;
+	generic_fillattr(inode, stat);
+	stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
+	stat->blksize = PAGE_CACHE_SIZE;
+	stat->blocks = (inode_get_bytes(inode) +
+			BTRFS_I(inode)->delalloc_bytes) >> 9;
+	return 0;
+}
+
+static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+			   struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root = BTRFS_I(old_dir)->root;
+	struct inode *new_inode = new_dentry->d_inode;
+	struct inode *old_inode = old_dentry->d_inode;
+	struct timespec ctime = CURRENT_TIME;
+	u64 index = 0;
+	int ret;
+
+	/* we're not allowed to rename between subvolumes */
+	if (BTRFS_I(old_inode)->root->root_key.objectid !=
+	    BTRFS_I(new_dir)->root->root_key.objectid)
+		return -EXDEV;
+
+	if (S_ISDIR(old_inode->i_mode) && new_inode &&
+	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
+		return -ENOTEMPTY;
+	}
+
+	/* to rename a snapshot or subvolume, we need to juggle the
+	 * backrefs.  This isn't coded yet
+	 */
+	if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+		return -EXDEV;
+
+	ret = btrfs_check_free_space(root, 1, 0);
+	if (ret)
+		goto out_unlock;
+
+	trans = btrfs_start_transaction(root, 1);
+
+	btrfs_set_trans_block_group(trans, new_dir);
+
+	btrfs_inc_nlink(old_dentry->d_inode);
+	old_dir->i_ctime = old_dir->i_mtime = ctime;
+	new_dir->i_ctime = new_dir->i_mtime = ctime;
+	old_inode->i_ctime = ctime;
+
+	ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
+				 old_dentry->d_name.name,
+				 old_dentry->d_name.len);
+	if (ret)
+		goto out_fail;
+
+	if (new_inode) {
+		new_inode->i_ctime = CURRENT_TIME;
+		ret = btrfs_unlink_inode(trans, root, new_dir,
+					 new_dentry->d_inode,
+					 new_dentry->d_name.name,
+					 new_dentry->d_name.len);
+		if (ret)
+			goto out_fail;
+		if (new_inode->i_nlink == 0) {
+			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
+			if (ret)
+				goto out_fail;
+		}
+
+	}
+	ret = btrfs_set_inode_index(new_dir, &index);
+	if (ret)
+		goto out_fail;
+
+	ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
+			     old_inode, new_dentry->d_name.name,
+			     new_dentry->d_name.len, 1, index);
+	if (ret)
+		goto out_fail;
+
+out_fail:
+	btrfs_end_transaction_throttle(trans, root);
+out_unlock:
+	return ret;
+}
+
+/*
+ * some fairly slow code that needs optimization. This walks the list
+ * of all the inodes with pending delalloc and forces them to disk.
+ */
+int btrfs_start_delalloc_inodes(struct btrfs_root *root)
+{
+	struct list_head *head = &root->fs_info->delalloc_inodes;
+	struct btrfs_inode *binode;
+	struct inode *inode;
+
+	if (root->fs_info->sb->s_flags & MS_RDONLY)
+		return -EROFS;
+
+	spin_lock(&root->fs_info->delalloc_lock);
+	while (!list_empty(head)) {
+		binode = list_entry(head->next, struct btrfs_inode,
+				    delalloc_inodes);
+		inode = igrab(&binode->vfs_inode);
+		if (!inode)
+			list_del_init(&binode->delalloc_inodes);
+		spin_unlock(&root->fs_info->delalloc_lock);
+		if (inode) {
+			filemap_flush(inode->i_mapping);
+			iput(inode);
+		}
+		cond_resched();
+		spin_lock(&root->fs_info->delalloc_lock);
+	}
+	spin_unlock(&root->fs_info->delalloc_lock);
+
+	/* the filemap_flush will queue IO into the worker threads, but
+	 * we have to make sure the IO is actually started and that
+	 * ordered extents get created before we return
+	 */
+	atomic_inc(&root->fs_info->async_submit_draining);
+	while (atomic_read(&root->fs_info->nr_async_submits) ||
+	      atomic_read(&root->fs_info->async_delalloc_pages)) {
+		wait_event(root->fs_info->async_submit_wait,
+		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
+		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
+	}
+	atomic_dec(&root->fs_info->async_submit_draining);
+	return 0;
+}
+
+static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
+			 const char *symname)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root = BTRFS_I(dir)->root;
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct inode *inode = NULL;
+	int err;
+	int drop_inode = 0;
+	u64 objectid;
+	u64 index = 0 ;
+	int name_len;
+	int datasize;
+	unsigned long ptr;
+	struct btrfs_file_extent_item *ei;
+	struct extent_buffer *leaf;
+	unsigned long nr = 0;
+
+	name_len = strlen(symname) + 1;
+	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
+		return -ENAMETOOLONG;
+
+	err = btrfs_check_free_space(root, 1, 0);
+	if (err)
+		goto out_fail;
+
+	trans = btrfs_start_transaction(root, 1);
+	btrfs_set_trans_block_group(trans, dir);
+
+	err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
+	if (err) {
+		err = -ENOSPC;
+		goto out_unlock;
+	}
+
+	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
+				dentry->d_name.len,
+				dentry->d_parent->d_inode->i_ino, objectid,
+				BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
+				&index);
+	err = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_unlock;
+
+	err = btrfs_init_acl(inode, dir);
+	if (err) {
+		drop_inode = 1;
+		goto out_unlock;
+	}
+
+	btrfs_set_trans_block_group(trans, inode);
+	err = btrfs_add_nondir(trans, dentry, inode, 0, index);
+	if (err)
+		drop_inode = 1;
+	else {
+		inode->i_mapping->a_ops = &btrfs_aops;
+		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+		inode->i_fop = &btrfs_file_operations;
+		inode->i_op = &btrfs_file_inode_operations;
+		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+	}
+	dir->i_sb->s_dirt = 1;
+	btrfs_update_inode_block_group(trans, inode);
+	btrfs_update_inode_block_group(trans, dir);
+	if (drop_inode)
+		goto out_unlock;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	key.objectid = inode->i_ino;
+	key.offset = 0;
+	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
+	datasize = btrfs_file_extent_calc_inline_size(name_len);
+	err = btrfs_insert_empty_item(trans, root, path, &key,
+				      datasize);
+	if (err) {
+		drop_inode = 1;
+		goto out_unlock;
+	}
+	leaf = path->nodes[0];
+	ei = btrfs_item_ptr(leaf, path->slots[0],
+			    struct btrfs_file_extent_item);
+	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
+	btrfs_set_file_extent_type(leaf, ei,
+				   BTRFS_FILE_EXTENT_INLINE);
+	btrfs_set_file_extent_encryption(leaf, ei, 0);
+	btrfs_set_file_extent_compression(leaf, ei, 0);
+	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
+	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
+
+	ptr = btrfs_file_extent_inline_start(ei);
+	write_extent_buffer(leaf, symname, ptr, name_len);
+	btrfs_mark_buffer_dirty(leaf);
+	btrfs_free_path(path);
+
+	inode->i_op = &btrfs_symlink_inode_operations;
+	inode->i_mapping->a_ops = &btrfs_symlink_aops;
+	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
+	inode_set_bytes(inode, name_len);
+	btrfs_i_size_write(inode, name_len - 1);
+	err = btrfs_update_inode(trans, root, inode);
+	if (err)
+		drop_inode = 1;
+
+out_unlock:
+	nr = trans->blocks_used;
+	btrfs_end_transaction_throttle(trans, root);
+out_fail:
+	if (drop_inode) {
+		inode_dec_link_count(inode);
+		iput(inode);
+	}
+	btrfs_btree_balance_dirty(root, nr);
+	return err;
+}
+
+static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
+			       u64 alloc_hint, int mode)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_key ins;
+	u64 alloc_size;
+	u64 cur_offset = start;
+	u64 num_bytes = end - start;
+	int ret = 0;
+
+	trans = btrfs_join_transaction(root, 1);
+	BUG_ON(!trans);
+	btrfs_set_trans_block_group(trans, inode);
+
+	while (num_bytes > 0) {
+		alloc_size = min(num_bytes, root->fs_info->max_extent);
+		ret = btrfs_reserve_extent(trans, root, alloc_size,
+					   root->sectorsize, 0, alloc_hint,
+					   (u64)-1, &ins, 1);
+		if (ret) {
+			WARN_ON(1);
+			goto out;
+		}
+		ret = insert_reserved_file_extent(trans, inode,
+						  cur_offset, ins.objectid,
+						  ins.offset, ins.offset,
+						  ins.offset, 0, 0, 0,
+						  BTRFS_FILE_EXTENT_PREALLOC);
+		BUG_ON(ret);
+		num_bytes -= ins.offset;
+		cur_offset += ins.offset;
+		alloc_hint = ins.objectid + ins.offset;
+	}
+out:
+	if (cur_offset > start) {
+		inode->i_ctime = CURRENT_TIME;
+		btrfs_set_flag(inode, PREALLOC);
+		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+		    cur_offset > i_size_read(inode))
+			btrfs_i_size_write(inode, cur_offset);
+		ret = btrfs_update_inode(trans, root, inode);
+		BUG_ON(ret);
+	}
+
+	btrfs_end_transaction(trans, root);
+	return ret;
+}
+
+static long btrfs_fallocate(struct inode *inode, int mode,
+			    loff_t offset, loff_t len)
+{
+	u64 cur_offset;
+	u64 last_byte;
+	u64 alloc_start;
+	u64 alloc_end;
+	u64 alloc_hint = 0;
+	u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
+	struct extent_map *em;
+	int ret;
+
+	alloc_start = offset & ~mask;
+	alloc_end =  (offset + len + mask) & ~mask;
+
+	mutex_lock(&inode->i_mutex);
+	if (alloc_start > inode->i_size) {
+		ret = btrfs_cont_expand(inode, alloc_start);
+		if (ret)
+			goto out;
+	}
+
+	while (1) {
+		struct btrfs_ordered_extent *ordered;
+		lock_extent(&BTRFS_I(inode)->io_tree, alloc_start,
+			    alloc_end - 1, GFP_NOFS);
+		ordered = btrfs_lookup_first_ordered_extent(inode,
+							    alloc_end - 1);
+		if (ordered &&
+		    ordered->file_offset + ordered->len > alloc_start &&
+		    ordered->file_offset < alloc_end) {
+			btrfs_put_ordered_extent(ordered);
+			unlock_extent(&BTRFS_I(inode)->io_tree,
+				      alloc_start, alloc_end - 1, GFP_NOFS);
+			btrfs_wait_ordered_range(inode, alloc_start,
+						 alloc_end - alloc_start);
+		} else {
+			if (ordered)
+				btrfs_put_ordered_extent(ordered);
+			break;
+		}
+	}
+
+	cur_offset = alloc_start;
+	while (1) {
+		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
+				      alloc_end - cur_offset, 0);
+		BUG_ON(IS_ERR(em) || !em);
+		last_byte = min(extent_map_end(em), alloc_end);
+		last_byte = (last_byte + mask) & ~mask;
+		if (em->block_start == EXTENT_MAP_HOLE) {
+			ret = prealloc_file_range(inode, cur_offset,
+					last_byte, alloc_hint, mode);
+			if (ret < 0) {
+				free_extent_map(em);
+				break;
+			}
+		}
+		if (em->block_start <= EXTENT_MAP_LAST_BYTE)
+			alloc_hint = em->block_start;
+		free_extent_map(em);
+
+		cur_offset = last_byte;
+		if (cur_offset >= alloc_end) {
+			ret = 0;
+			break;
+		}
+	}
+	unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1,
+		      GFP_NOFS);
+out:
+	mutex_unlock(&inode->i_mutex);
+	return ret;
+}
+
+static int btrfs_set_page_dirty(struct page *page)
+{
+	return __set_page_dirty_nobuffers(page);
+}
+
+static int btrfs_permission(struct inode *inode, int mask)
+{
+	if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
+		return -EACCES;
+	return generic_permission(inode, mask, btrfs_check_acl);
+}
+
+static struct inode_operations btrfs_dir_inode_operations = {
+	.getattr	= btrfs_getattr,
+	.lookup		= btrfs_lookup,
+	.create		= btrfs_create,
+	.unlink		= btrfs_unlink,
+	.link		= btrfs_link,
+	.mkdir		= btrfs_mkdir,
+	.rmdir		= btrfs_rmdir,
+	.rename		= btrfs_rename,
+	.symlink	= btrfs_symlink,
+	.setattr	= btrfs_setattr,
+	.mknod		= btrfs_mknod,
+	.setxattr	= btrfs_setxattr,
+	.getxattr	= btrfs_getxattr,
+	.listxattr	= btrfs_listxattr,
+	.removexattr	= btrfs_removexattr,
+	.permission	= btrfs_permission,
+};
+static struct inode_operations btrfs_dir_ro_inode_operations = {
+	.lookup		= btrfs_lookup,
+	.permission	= btrfs_permission,
+};
+static struct file_operations btrfs_dir_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_read_dir,
+	.readdir	= btrfs_real_readdir,
+	.unlocked_ioctl	= btrfs_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= btrfs_ioctl,
+#endif
+	.release        = btrfs_release_file,
+	.fsync		= btrfs_sync_file,
+};
+
+static struct extent_io_ops btrfs_extent_io_ops = {
+	.fill_delalloc = run_delalloc_range,
+	.submit_bio_hook = btrfs_submit_bio_hook,
+	.merge_bio_hook = btrfs_merge_bio_hook,
+	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
+	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
+	.writepage_start_hook = btrfs_writepage_start_hook,
+	.readpage_io_failed_hook = btrfs_io_failed_hook,
+	.set_bit_hook = btrfs_set_bit_hook,
+	.clear_bit_hook = btrfs_clear_bit_hook,
+};
+
+static struct address_space_operations btrfs_aops = {
+	.readpage	= btrfs_readpage,
+	.writepage	= btrfs_writepage,
+	.writepages	= btrfs_writepages,
+	.readpages	= btrfs_readpages,
+	.sync_page	= block_sync_page,
+	.bmap		= btrfs_bmap,
+	.direct_IO	= btrfs_direct_IO,
+	.invalidatepage = btrfs_invalidatepage,
+	.releasepage	= btrfs_releasepage,
+	.set_page_dirty	= btrfs_set_page_dirty,
+};
+
+static struct address_space_operations btrfs_symlink_aops = {
+	.readpage	= btrfs_readpage,
+	.writepage	= btrfs_writepage,
+	.invalidatepage = btrfs_invalidatepage,
+	.releasepage	= btrfs_releasepage,
+};
+
+static struct inode_operations btrfs_file_inode_operations = {
+	.truncate	= btrfs_truncate,
+	.getattr	= btrfs_getattr,
+	.setattr	= btrfs_setattr,
+	.setxattr	= btrfs_setxattr,
+	.getxattr	= btrfs_getxattr,
+	.listxattr      = btrfs_listxattr,
+	.removexattr	= btrfs_removexattr,
+	.permission	= btrfs_permission,
+	.fallocate	= btrfs_fallocate,
+};
+static struct inode_operations btrfs_special_inode_operations = {
+	.getattr	= btrfs_getattr,
+	.setattr	= btrfs_setattr,
+	.permission	= btrfs_permission,
+	.setxattr	= btrfs_setxattr,
+	.getxattr	= btrfs_getxattr,
+	.listxattr	= btrfs_listxattr,
+	.removexattr	= btrfs_removexattr,
+};
+static struct inode_operations btrfs_symlink_inode_operations = {
+	.readlink	= generic_readlink,
+	.follow_link	= page_follow_link_light,
+	.put_link	= page_put_link,
+	.permission	= btrfs_permission,
+};
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
new file mode 100644
index 0000000..c2aa33e
--- /dev/null
+++ b/fs/btrfs/ioctl.c
@@ -0,0 +1,1132 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bio.h>
+#include <linux/buffer_head.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/backing-dev.h>
+#include <linux/mount.h>
+#include <linux/mpage.h>
+#include <linux/namei.h>
+#include <linux/swap.h>
+#include <linux/writeback.h>
+#include <linux/statfs.h>
+#include <linux/compat.h>
+#include <linux/bit_spinlock.h>
+#include <linux/security.h>
+#include <linux/version.h>
+#include <linux/xattr.h>
+#include <linux/vmalloc.h>
+#include "compat.h"
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "btrfs_inode.h"
+#include "ioctl.h"
+#include "print-tree.h"
+#include "volumes.h"
+#include "locking.h"
+
+
+
+static noinline int create_subvol(struct btrfs_root *root,
+				  struct dentry *dentry,
+				  char *name, int namelen)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_key key;
+	struct btrfs_root_item root_item;
+	struct btrfs_inode_item *inode_item;
+	struct extent_buffer *leaf;
+	struct btrfs_root *new_root = root;
+	struct inode *dir;
+	int ret;
+	int err;
+	u64 objectid;
+	u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
+	u64 index = 0;
+	unsigned long nr = 1;
+
+	ret = btrfs_check_free_space(root, 1, 0);
+	if (ret)
+		goto fail_commit;
+
+	trans = btrfs_start_transaction(root, 1);
+	BUG_ON(!trans);
+
+	ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
+				       0, &objectid);
+	if (ret)
+		goto fail;
+
+	leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
+				      objectid, trans->transid, 0, 0, 0);
+	if (IS_ERR(leaf)) {
+		ret = PTR_ERR(leaf);
+		goto fail;
+	}
+
+	btrfs_set_header_nritems(leaf, 0);
+	btrfs_set_header_level(leaf, 0);
+	btrfs_set_header_bytenr(leaf, leaf->start);
+	btrfs_set_header_generation(leaf, trans->transid);
+	btrfs_set_header_owner(leaf, objectid);
+
+	write_extent_buffer(leaf, root->fs_info->fsid,
+			    (unsigned long)btrfs_header_fsid(leaf),
+			    BTRFS_FSID_SIZE);
+	btrfs_mark_buffer_dirty(leaf);
+
+	inode_item = &root_item.inode;
+	memset(inode_item, 0, sizeof(*inode_item));
+	inode_item->generation = cpu_to_le64(1);
+	inode_item->size = cpu_to_le64(3);
+	inode_item->nlink = cpu_to_le32(1);
+	inode_item->nbytes = cpu_to_le64(root->leafsize);
+	inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
+
+	btrfs_set_root_bytenr(&root_item, leaf->start);
+	btrfs_set_root_generation(&root_item, trans->transid);
+	btrfs_set_root_level(&root_item, 0);
+	btrfs_set_root_refs(&root_item, 1);
+	btrfs_set_root_used(&root_item, 0);
+	btrfs_set_root_last_snapshot(&root_item, 0);
+
+	memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
+	root_item.drop_level = 0;
+
+	btrfs_tree_unlock(leaf);
+	free_extent_buffer(leaf);
+	leaf = NULL;
+
+	btrfs_set_root_dirid(&root_item, new_dirid);
+
+	key.objectid = objectid;
+	key.offset = 1;
+	btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+	ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
+				&root_item);
+	if (ret)
+		goto fail;
+
+	/*
+	 * insert the directory item
+	 */
+	key.offset = (u64)-1;
+	dir = dentry->d_parent->d_inode;
+	ret = btrfs_set_inode_index(dir, &index);
+	BUG_ON(ret);
+
+	ret = btrfs_insert_dir_item(trans, root,
+				    name, namelen, dir->i_ino, &key,
+				    BTRFS_FT_DIR, index);
+	if (ret)
+		goto fail;
+
+	btrfs_i_size_write(dir, dir->i_size + namelen * 2);
+	ret = btrfs_update_inode(trans, root, dir);
+	BUG_ON(ret);
+
+	/* add the backref first */
+	ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
+				 objectid, BTRFS_ROOT_BACKREF_KEY,
+				 root->root_key.objectid,
+				 dir->i_ino, index, name, namelen);
+
+	BUG_ON(ret);
+
+	/* now add the forward ref */
+	ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
+				 root->root_key.objectid, BTRFS_ROOT_REF_KEY,
+				 objectid,
+				 dir->i_ino, index, name, namelen);
+
+	BUG_ON(ret);
+
+	ret = btrfs_commit_transaction(trans, root);
+	if (ret)
+		goto fail_commit;
+
+	new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
+	BUG_ON(!new_root);
+
+	trans = btrfs_start_transaction(new_root, 1);
+	BUG_ON(!trans);
+
+	ret = btrfs_create_subvol_root(trans, new_root, dentry, new_dirid,
+				       BTRFS_I(dir)->block_group);
+	if (ret)
+		goto fail;
+
+fail:
+	nr = trans->blocks_used;
+	err = btrfs_commit_transaction(trans, new_root);
+	if (err && !ret)
+		ret = err;
+fail_commit:
+	btrfs_btree_balance_dirty(root, nr);
+	return ret;
+}
+
+static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
+			   char *name, int namelen)
+{
+	struct btrfs_pending_snapshot *pending_snapshot;
+	struct btrfs_trans_handle *trans;
+	int ret = 0;
+	int err;
+	unsigned long nr = 0;
+
+	if (!root->ref_cows)
+		return -EINVAL;
+
+	ret = btrfs_check_free_space(root, 1, 0);
+	if (ret)
+		goto fail_unlock;
+
+	pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
+	if (!pending_snapshot) {
+		ret = -ENOMEM;
+		goto fail_unlock;
+	}
+	pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
+	if (!pending_snapshot->name) {
+		ret = -ENOMEM;
+		kfree(pending_snapshot);
+		goto fail_unlock;
+	}
+	memcpy(pending_snapshot->name, name, namelen);
+	pending_snapshot->name[namelen] = '\0';
+	pending_snapshot->dentry = dentry;
+	trans = btrfs_start_transaction(root, 1);
+	BUG_ON(!trans);
+	pending_snapshot->root = root;
+	list_add(&pending_snapshot->list,
+		 &trans->transaction->pending_snapshots);
+	err = btrfs_commit_transaction(trans, root);
+
+fail_unlock:
+	btrfs_btree_balance_dirty(root, nr);
+	return ret;
+}
+
+/* copy of may_create in fs/namei.c() */
+static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
+{
+	if (child->d_inode)
+		return -EEXIST;
+	if (IS_DEADDIR(dir))
+		return -ENOENT;
+	return inode_permission(dir, MAY_WRITE | MAY_EXEC);
+}
+
+/*
+ * Create a new subvolume below @parent.  This is largely modeled after
+ * sys_mkdirat and vfs_mkdir, but we only do a single component lookup
+ * inside this filesystem so it's quite a bit simpler.
+ */
+static noinline int btrfs_mksubvol(struct path *parent, char *name,
+				   int mode, int namelen,
+				   struct btrfs_root *snap_src)
+{
+	struct dentry *dentry;
+	int error;
+
+	mutex_lock_nested(&parent->dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+
+	dentry = lookup_one_len(name, parent->dentry, namelen);
+	error = PTR_ERR(dentry);
+	if (IS_ERR(dentry))
+		goto out_unlock;
+
+	error = -EEXIST;
+	if (dentry->d_inode)
+		goto out_dput;
+
+	if (!IS_POSIXACL(parent->dentry->d_inode))
+		mode &= ~current->fs->umask;
+
+	error = mnt_want_write(parent->mnt);
+	if (error)
+		goto out_dput;
+
+	error = btrfs_may_create(parent->dentry->d_inode, dentry);
+	if (error)
+		goto out_drop_write;
+
+	/*
+	 * Actually perform the low-level subvolume creation after all
+	 * this VFS fuzz.
+	 *
+	 * Eventually we want to pass in an inode under which we create this
+	 * subvolume, but for now all are under the filesystem root.
+	 *
+	 * Also we should pass on the mode eventually to allow creating new
+	 * subvolume with specific mode bits.
+	 */
+	if (snap_src) {
+		struct dentry *dir = dentry->d_parent;
+		struct dentry *test = dir->d_parent;
+		struct btrfs_path *path = btrfs_alloc_path();
+		int ret;
+		u64 test_oid;
+		u64 parent_oid = BTRFS_I(dir->d_inode)->root->root_key.objectid;
+
+		test_oid = snap_src->root_key.objectid;
+
+		ret = btrfs_find_root_ref(snap_src->fs_info->tree_root,
+					  path, parent_oid, test_oid);
+		if (ret == 0)
+			goto create;
+		btrfs_release_path(snap_src->fs_info->tree_root, path);
+
+		/* we need to make sure we aren't creating a directory loop
+		 * by taking a snapshot of something that has our current
+		 * subvol in its directory tree.  So, this loops through
+		 * the dentries and checks the forward refs for each subvolume
+		 * to see if is references the subvolume where we are
+		 * placing this new snapshot.
+		 */
+		while (1) {
+			if (!test ||
+			    dir == snap_src->fs_info->sb->s_root ||
+			    test == snap_src->fs_info->sb->s_root ||
+			    test->d_inode->i_sb != snap_src->fs_info->sb) {
+				break;
+			}
+			if (S_ISLNK(test->d_inode->i_mode)) {
+				printk(KERN_INFO "Btrfs symlink in snapshot "
+				       "path, failed\n");
+				error = -EMLINK;
+				btrfs_free_path(path);
+				goto out_drop_write;
+			}
+			test_oid =
+				BTRFS_I(test->d_inode)->root->root_key.objectid;
+			ret = btrfs_find_root_ref(snap_src->fs_info->tree_root,
+				  path, test_oid, parent_oid);
+			if (ret == 0) {
+				printk(KERN_INFO "Btrfs snapshot creation "
+				       "failed, looping\n");
+				error = -EMLINK;
+				btrfs_free_path(path);
+				goto out_drop_write;
+			}
+			btrfs_release_path(snap_src->fs_info->tree_root, path);
+			test = test->d_parent;
+		}
+create:
+		btrfs_free_path(path);
+		error = create_snapshot(snap_src, dentry, name, namelen);
+	} else {
+		error = create_subvol(BTRFS_I(parent->dentry->d_inode)->root,
+				      dentry, name, namelen);
+	}
+	if (error)
+		goto out_drop_write;
+
+	fsnotify_mkdir(parent->dentry->d_inode, dentry);
+out_drop_write:
+	mnt_drop_write(parent->mnt);
+out_dput:
+	dput(dentry);
+out_unlock:
+	mutex_unlock(&parent->dentry->d_inode->i_mutex);
+	return error;
+}
+
+
+static int btrfs_defrag_file(struct file *file)
+{
+	struct inode *inode = fdentry(file)->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	struct btrfs_ordered_extent *ordered;
+	struct page *page;
+	unsigned long last_index;
+	unsigned long ra_pages = root->fs_info->bdi.ra_pages;
+	unsigned long total_read = 0;
+	u64 page_start;
+	u64 page_end;
+	unsigned long i;
+	int ret;
+
+	ret = btrfs_check_free_space(root, inode->i_size, 0);
+	if (ret)
+		return -ENOSPC;
+
+	mutex_lock(&inode->i_mutex);
+	last_index = inode->i_size >> PAGE_CACHE_SHIFT;
+	for (i = 0; i <= last_index; i++) {
+		if (total_read % ra_pages == 0) {
+			btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i,
+				       min(last_index, i + ra_pages - 1));
+		}
+		total_read++;
+again:
+		page = grab_cache_page(inode->i_mapping, i);
+		if (!page)
+			goto out_unlock;
+		if (!PageUptodate(page)) {
+			btrfs_readpage(NULL, page);
+			lock_page(page);
+			if (!PageUptodate(page)) {
+				unlock_page(page);
+				page_cache_release(page);
+				goto out_unlock;
+			}
+		}
+
+		wait_on_page_writeback(page);
+
+		page_start = (u64)page->index << PAGE_CACHE_SHIFT;
+		page_end = page_start + PAGE_CACHE_SIZE - 1;
+		lock_extent(io_tree, page_start, page_end, GFP_NOFS);
+
+		ordered = btrfs_lookup_ordered_extent(inode, page_start);
+		if (ordered) {
+			unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+			unlock_page(page);
+			page_cache_release(page);
+			btrfs_start_ordered_extent(inode, ordered, 1);
+			btrfs_put_ordered_extent(ordered);
+			goto again;
+		}
+		set_page_extent_mapped(page);
+
+		/*
+		 * this makes sure page_mkwrite is called on the
+		 * page if it is dirtied again later
+		 */
+		clear_page_dirty_for_io(page);
+
+		btrfs_set_extent_delalloc(inode, page_start, page_end);
+
+		unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+		set_page_dirty(page);
+		unlock_page(page);
+		page_cache_release(page);
+		balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
+	}
+
+out_unlock:
+	mutex_unlock(&inode->i_mutex);
+	return 0;
+}
+
+/*
+ * Called inside transaction, so use GFP_NOFS
+ */
+
+static int btrfs_ioctl_resize(struct btrfs_root *root, void __user *arg)
+{
+	u64 new_size;
+	u64 old_size;
+	u64 devid = 1;
+	struct btrfs_ioctl_vol_args *vol_args;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_device *device = NULL;
+	char *sizestr;
+	char *devstr = NULL;
+	int ret = 0;
+	int namelen;
+	int mod = 0;
+
+	if (root->fs_info->sb->s_flags & MS_RDONLY)
+		return -EROFS;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
+
+	if (!vol_args)
+		return -ENOMEM;
+
+	if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
+	namelen = strlen(vol_args->name);
+
+	mutex_lock(&root->fs_info->volume_mutex);
+	sizestr = vol_args->name;
+	devstr = strchr(sizestr, ':');
+	if (devstr) {
+		char *end;
+		sizestr = devstr + 1;
+		*devstr = '\0';
+		devstr = vol_args->name;
+		devid = simple_strtoull(devstr, &end, 10);
+		printk(KERN_INFO "resizing devid %llu\n", devid);
+	}
+	device = btrfs_find_device(root, devid, NULL, NULL);
+	if (!device) {
+		printk(KERN_INFO "resizer unable to find device %llu\n", devid);
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+	if (!strcmp(sizestr, "max"))
+		new_size = device->bdev->bd_inode->i_size;
+	else {
+		if (sizestr[0] == '-') {
+			mod = -1;
+			sizestr++;
+		} else if (sizestr[0] == '+') {
+			mod = 1;
+			sizestr++;
+		}
+		new_size = btrfs_parse_size(sizestr);
+		if (new_size == 0) {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+	}
+
+	old_size = device->total_bytes;
+
+	if (mod < 0) {
+		if (new_size > old_size) {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+		new_size = old_size - new_size;
+	} else if (mod > 0) {
+		new_size = old_size + new_size;
+	}
+
+	if (new_size < 256 * 1024 * 1024) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+	if (new_size > device->bdev->bd_inode->i_size) {
+		ret = -EFBIG;
+		goto out_unlock;
+	}
+
+	do_div(new_size, root->sectorsize);
+	new_size *= root->sectorsize;
+
+	printk(KERN_INFO "new size for %s is %llu\n",
+		device->name, (unsigned long long)new_size);
+
+	if (new_size > old_size) {
+		trans = btrfs_start_transaction(root, 1);
+		ret = btrfs_grow_device(trans, device, new_size);
+		btrfs_commit_transaction(trans, root);
+	} else {
+		ret = btrfs_shrink_device(device, new_size);
+	}
+
+out_unlock:
+	mutex_unlock(&root->fs_info->volume_mutex);
+out:
+	kfree(vol_args);
+	return ret;
+}
+
+static noinline int btrfs_ioctl_snap_create(struct file *file,
+					    void __user *arg, int subvol)
+{
+	struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+	struct btrfs_ioctl_vol_args *vol_args;
+	struct btrfs_dir_item *di;
+	struct btrfs_path *path;
+	struct file *src_file;
+	u64 root_dirid;
+	int namelen;
+	int ret = 0;
+
+	if (root->fs_info->sb->s_flags & MS_RDONLY)
+		return -EROFS;
+
+	vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
+
+	if (!vol_args)
+		return -ENOMEM;
+
+	if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
+	namelen = strlen(vol_args->name);
+	if (strchr(vol_args->name, '/')) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	path = btrfs_alloc_path();
+	if (!path) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
+	di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
+			    path, root_dirid,
+			    vol_args->name, namelen, 0);
+	btrfs_free_path(path);
+
+	if (di && !IS_ERR(di)) {
+		ret = -EEXIST;
+		goto out;
+	}
+
+	if (IS_ERR(di)) {
+		ret = PTR_ERR(di);
+		goto out;
+	}
+
+	if (subvol) {
+		ret = btrfs_mksubvol(&file->f_path, vol_args->name,
+				     file->f_path.dentry->d_inode->i_mode,
+				     namelen, NULL);
+	} else {
+		struct inode *src_inode;
+		src_file = fget(vol_args->fd);
+		if (!src_file) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		src_inode = src_file->f_path.dentry->d_inode;
+		if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) {
+			printk(KERN_INFO "btrfs: Snapshot src from "
+			       "another FS\n");
+			ret = -EINVAL;
+			fput(src_file);
+			goto out;
+		}
+		ret = btrfs_mksubvol(&file->f_path, vol_args->name,
+			     file->f_path.dentry->d_inode->i_mode,
+			     namelen, BTRFS_I(src_inode)->root);
+		fput(src_file);
+	}
+
+out:
+	kfree(vol_args);
+	return ret;
+}
+
+static int btrfs_ioctl_defrag(struct file *file)
+{
+	struct inode *inode = fdentry(file)->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	int ret;
+
+	ret = mnt_want_write(file->f_path.mnt);
+	if (ret)
+		return ret;
+
+	switch (inode->i_mode & S_IFMT) {
+	case S_IFDIR:
+		if (!capable(CAP_SYS_ADMIN)) {
+			ret = -EPERM;
+			goto out;
+		}
+		btrfs_defrag_root(root, 0);
+		btrfs_defrag_root(root->fs_info->extent_root, 0);
+		break;
+	case S_IFREG:
+		if (!(file->f_mode & FMODE_WRITE)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		btrfs_defrag_file(file);
+		break;
+	}
+out:
+	mnt_drop_write(file->f_path.mnt);
+	return ret;
+}
+
+static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
+{
+	struct btrfs_ioctl_vol_args *vol_args;
+	int ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
+
+	if (!vol_args)
+		return -ENOMEM;
+
+	if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
+		ret = -EFAULT;
+		goto out;
+	}
+	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
+	ret = btrfs_init_new_device(root, vol_args->name);
+
+out:
+	kfree(vol_args);
+	return ret;
+}
+
+static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
+{
+	struct btrfs_ioctl_vol_args *vol_args;
+	int ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (root->fs_info->sb->s_flags & MS_RDONLY)
+		return -EROFS;
+
+	vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
+
+	if (!vol_args)
+		return -ENOMEM;
+
+	if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
+		ret = -EFAULT;
+		goto out;
+	}
+	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
+	ret = btrfs_rm_device(root, vol_args->name);
+
+out:
+	kfree(vol_args);
+	return ret;
+}
+
+static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
+		u64 off, u64 olen, u64 destoff)
+{
+	struct inode *inode = fdentry(file)->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct file *src_file;
+	struct inode *src;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	char *buf;
+	struct btrfs_key key;
+	u32 nritems;
+	int slot;
+	int ret;
+	u64 len = olen;
+	u64 bs = root->fs_info->sb->s_blocksize;
+	u64 hint_byte;
+
+	/*
+	 * TODO:
+	 * - split compressed inline extents.  annoying: we need to
+	 *   decompress into destination's address_space (the file offset
+	 *   may change, so source mapping won't do), then recompress (or
+	 *   otherwise reinsert) a subrange.
+	 * - allow ranges within the same file to be cloned (provided
+	 *   they don't overlap)?
+	 */
+
+	/* the destination must be opened for writing */
+	if (!(file->f_mode & FMODE_WRITE))
+		return -EINVAL;
+
+	ret = mnt_want_write(file->f_path.mnt);
+	if (ret)
+		return ret;
+
+	src_file = fget(srcfd);
+	if (!src_file) {
+		ret = -EBADF;
+		goto out_drop_write;
+	}
+	src = src_file->f_dentry->d_inode;
+
+	ret = -EINVAL;
+	if (src == inode)
+		goto out_fput;
+
+	ret = -EISDIR;
+	if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
+		goto out_fput;
+
+	ret = -EXDEV;
+	if (src->i_sb != inode->i_sb || BTRFS_I(src)->root != root)
+		goto out_fput;
+
+	ret = -ENOMEM;
+	buf = vmalloc(btrfs_level_size(root, 0));
+	if (!buf)
+		goto out_fput;
+
+	path = btrfs_alloc_path();
+	if (!path) {
+		vfree(buf);
+		goto out_fput;
+	}
+	path->reada = 2;
+
+	if (inode < src) {
+		mutex_lock(&inode->i_mutex);
+		mutex_lock(&src->i_mutex);
+	} else {
+		mutex_lock(&src->i_mutex);
+		mutex_lock(&inode->i_mutex);
+	}
+
+	/* determine range to clone */
+	ret = -EINVAL;
+	if (off >= src->i_size || off + len > src->i_size)
+		goto out_unlock;
+	if (len == 0)
+		olen = len = src->i_size - off;
+	/* if we extend to eof, continue to block boundary */
+	if (off + len == src->i_size)
+		len = ((src->i_size + bs-1) & ~(bs-1))
+			- off;
+
+	/* verify the end result is block aligned */
+	if ((off & (bs-1)) ||
+	    ((off + len) & (bs-1)))
+		goto out_unlock;
+
+	/* do any pending delalloc/csum calc on src, one way or
+	   another, and lock file content */
+	while (1) {
+		struct btrfs_ordered_extent *ordered;
+		lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+		ordered = btrfs_lookup_first_ordered_extent(inode, off+len);
+		if (BTRFS_I(src)->delalloc_bytes == 0 && !ordered)
+			break;
+		unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+		if (ordered)
+			btrfs_put_ordered_extent(ordered);
+		btrfs_wait_ordered_range(src, off, off+len);
+	}
+
+	trans = btrfs_start_transaction(root, 1);
+	BUG_ON(!trans);
+
+	/* punch hole in destination first */
+	btrfs_drop_extents(trans, root, inode, off, off+len, 0, &hint_byte);
+
+	/* clone data */
+	key.objectid = src->i_ino;
+	key.type = BTRFS_EXTENT_DATA_KEY;
+	key.offset = 0;
+
+	while (1) {
+		/*
+		 * note the key will change type as we walk through the
+		 * tree.
+		 */
+		ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
+		if (ret < 0)
+			goto out;
+
+		nritems = btrfs_header_nritems(path->nodes[0]);
+		if (path->slots[0] >= nritems) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0)
+				goto out;
+			if (ret > 0)
+				break;
+			nritems = btrfs_header_nritems(path->nodes[0]);
+		}
+		leaf = path->nodes[0];
+		slot = path->slots[0];
+
+		btrfs_item_key_to_cpu(leaf, &key, slot);
+		if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
+		    key.objectid != src->i_ino)
+			break;
+
+		if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
+			struct btrfs_file_extent_item *extent;
+			int type;
+			u32 size;
+			struct btrfs_key new_key;
+			u64 disko = 0, diskl = 0;
+			u64 datao = 0, datal = 0;
+			u8 comp;
+
+			size = btrfs_item_size_nr(leaf, slot);
+			read_extent_buffer(leaf, buf,
+					   btrfs_item_ptr_offset(leaf, slot),
+					   size);
+
+			extent = btrfs_item_ptr(leaf, slot,
+						struct btrfs_file_extent_item);
+			comp = btrfs_file_extent_compression(leaf, extent);
+			type = btrfs_file_extent_type(leaf, extent);
+			if (type == BTRFS_FILE_EXTENT_REG) {
+				disko = btrfs_file_extent_disk_bytenr(leaf,
+								      extent);
+				diskl = btrfs_file_extent_disk_num_bytes(leaf,
+								 extent);
+				datao = btrfs_file_extent_offset(leaf, extent);
+				datal = btrfs_file_extent_num_bytes(leaf,
+								    extent);
+			} else if (type == BTRFS_FILE_EXTENT_INLINE) {
+				/* take upper bound, may be compressed */
+				datal = btrfs_file_extent_ram_bytes(leaf,
+								    extent);
+			}
+			btrfs_release_path(root, path);
+
+			if (key.offset + datal < off ||
+			    key.offset >= off+len)
+				goto next;
+
+			memcpy(&new_key, &key, sizeof(new_key));
+			new_key.objectid = inode->i_ino;
+			new_key.offset = key.offset + destoff - off;
+
+			if (type == BTRFS_FILE_EXTENT_REG) {
+				ret = btrfs_insert_empty_item(trans, root, path,
+							      &new_key, size);
+				if (ret)
+					goto out;
+
+				leaf = path->nodes[0];
+				slot = path->slots[0];
+				write_extent_buffer(leaf, buf,
+					    btrfs_item_ptr_offset(leaf, slot),
+					    size);
+
+				extent = btrfs_item_ptr(leaf, slot,
+						struct btrfs_file_extent_item);
+
+				if (off > key.offset) {
+					datao += off - key.offset;
+					datal -= off - key.offset;
+				}
+				if (key.offset + datao + datal + key.offset >
+				    off + len)
+					datal = off + len - key.offset - datao;
+				/* disko == 0 means it's a hole */
+				if (!disko)
+					datao = 0;
+
+				btrfs_set_file_extent_offset(leaf, extent,
+							     datao);
+				btrfs_set_file_extent_num_bytes(leaf, extent,
+								datal);
+				if (disko) {
+					inode_add_bytes(inode, datal);
+					ret = btrfs_inc_extent_ref(trans, root,
+						   disko, diskl, leaf->start,
+						   root->root_key.objectid,
+						   trans->transid,
+						   inode->i_ino);
+					BUG_ON(ret);
+				}
+			} else if (type == BTRFS_FILE_EXTENT_INLINE) {
+				u64 skip = 0;
+				u64 trim = 0;
+				if (off > key.offset) {
+					skip = off - key.offset;
+					new_key.offset += skip;
+				}
+
+				if (key.offset + datal > off+len)
+					trim = key.offset + datal - (off+len);
+
+				if (comp && (skip || trim)) {
+					ret = -EINVAL;
+					goto out;
+				}
+				size -= skip + trim;
+				datal -= skip + trim;
+				ret = btrfs_insert_empty_item(trans, root, path,
+							      &new_key, size);
+				if (ret)
+					goto out;
+
+				if (skip) {
+					u32 start =
+					  btrfs_file_extent_calc_inline_size(0);
+					memmove(buf+start, buf+start+skip,
+						datal);
+				}
+
+				leaf = path->nodes[0];
+				slot = path->slots[0];
+				write_extent_buffer(leaf, buf,
+					    btrfs_item_ptr_offset(leaf, slot),
+					    size);
+				inode_add_bytes(inode, datal);
+			}
+
+			btrfs_mark_buffer_dirty(leaf);
+		}
+
+next:
+		btrfs_release_path(root, path);
+		key.offset++;
+	}
+	ret = 0;
+out:
+	btrfs_release_path(root, path);
+	if (ret == 0) {
+		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+		if (destoff + olen > inode->i_size)
+			btrfs_i_size_write(inode, destoff + olen);
+		BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
+		ret = btrfs_update_inode(trans, root, inode);
+	}
+	btrfs_end_transaction(trans, root);
+	unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
+	if (ret)
+		vmtruncate(inode, 0);
+out_unlock:
+	mutex_unlock(&src->i_mutex);
+	mutex_unlock(&inode->i_mutex);
+	vfree(buf);
+	btrfs_free_path(path);
+out_fput:
+	fput(src_file);
+out_drop_write:
+	mnt_drop_write(file->f_path.mnt);
+	return ret;
+}
+
+static long btrfs_ioctl_clone_range(struct file *file, void __user *argp)
+{
+	struct btrfs_ioctl_clone_range_args args;
+
+	if (copy_from_user(&args, argp, sizeof(args)))
+		return -EFAULT;
+	return btrfs_ioctl_clone(file, args.src_fd, args.src_offset,
+				 args.src_length, args.dest_offset);
+}
+
+/*
+ * there are many ways the trans_start and trans_end ioctls can lead
+ * to deadlocks.  They should only be used by applications that
+ * basically own the machine, and have a very in depth understanding
+ * of all the possible deadlocks and enospc problems.
+ */
+static long btrfs_ioctl_trans_start(struct file *file)
+{
+	struct inode *inode = fdentry(file)->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_trans_handle *trans;
+	int ret = 0;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (file->private_data) {
+		ret = -EINPROGRESS;
+		goto out;
+	}
+
+	ret = mnt_want_write(file->f_path.mnt);
+	if (ret)
+		goto out;
+
+	mutex_lock(&root->fs_info->trans_mutex);
+	root->fs_info->open_ioctl_trans++;
+	mutex_unlock(&root->fs_info->trans_mutex);
+
+	trans = btrfs_start_ioctl_transaction(root, 0);
+	if (trans)
+		file->private_data = trans;
+	else
+		ret = -ENOMEM;
+	/*printk(KERN_INFO "btrfs_ioctl_trans_start on %p\n", file);*/
+out:
+	return ret;
+}
+
+/*
+ * there are many ways the trans_start and trans_end ioctls can lead
+ * to deadlocks.  They should only be used by applications that
+ * basically own the machine, and have a very in depth understanding
+ * of all the possible deadlocks and enospc problems.
+ */
+long btrfs_ioctl_trans_end(struct file *file)
+{
+	struct inode *inode = fdentry(file)->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_trans_handle *trans;
+	int ret = 0;
+
+	trans = file->private_data;
+	if (!trans) {
+		ret = -EINVAL;
+		goto out;
+	}
+	btrfs_end_transaction(trans, root);
+	file->private_data = NULL;
+
+	mutex_lock(&root->fs_info->trans_mutex);
+	root->fs_info->open_ioctl_trans--;
+	mutex_unlock(&root->fs_info->trans_mutex);
+
+	mnt_drop_write(file->f_path.mnt);
+
+out:
+	return ret;
+}
+
+long btrfs_ioctl(struct file *file, unsigned int
+		cmd, unsigned long arg)
+{
+	struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+	void __user *argp = (void __user *)arg;
+
+	switch (cmd) {
+	case BTRFS_IOC_SNAP_CREATE:
+		return btrfs_ioctl_snap_create(file, argp, 0);
+	case BTRFS_IOC_SUBVOL_CREATE:
+		return btrfs_ioctl_snap_create(file, argp, 1);
+	case BTRFS_IOC_DEFRAG:
+		return btrfs_ioctl_defrag(file);
+	case BTRFS_IOC_RESIZE:
+		return btrfs_ioctl_resize(root, argp);
+	case BTRFS_IOC_ADD_DEV:
+		return btrfs_ioctl_add_dev(root, argp);
+	case BTRFS_IOC_RM_DEV:
+		return btrfs_ioctl_rm_dev(root, argp);
+	case BTRFS_IOC_BALANCE:
+		return btrfs_balance(root->fs_info->dev_root);
+	case BTRFS_IOC_CLONE:
+		return btrfs_ioctl_clone(file, arg, 0, 0, 0);
+	case BTRFS_IOC_CLONE_RANGE:
+		return btrfs_ioctl_clone_range(file, argp);
+	case BTRFS_IOC_TRANS_START:
+		return btrfs_ioctl_trans_start(file);
+	case BTRFS_IOC_TRANS_END:
+		return btrfs_ioctl_trans_end(file);
+	case BTRFS_IOC_SYNC:
+		btrfs_sync_fs(file->f_dentry->d_sb, 1);
+		return 0;
+	}
+
+	return -ENOTTY;
+}
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
new file mode 100644
index 0000000..b320b10
--- /dev/null
+++ b/fs/btrfs/ioctl.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __IOCTL_
+#define __IOCTL_
+#include <linux/ioctl.h>
+
+#define BTRFS_IOCTL_MAGIC 0x94
+#define BTRFS_VOL_NAME_MAX 255
+#define BTRFS_PATH_NAME_MAX 4087
+
+/* this should be 4k */
+struct btrfs_ioctl_vol_args {
+	__s64 fd;
+	char name[BTRFS_PATH_NAME_MAX + 1];
+};
+
+struct btrfs_ioctl_clone_range_args {
+  __s64 src_fd;
+  __u64 src_offset, src_length;
+  __u64 dest_offset;
+};
+
+#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
+				   struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
+				   struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_RESIZE _IOW(BTRFS_IOCTL_MAGIC, 3, \
+				   struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_SCAN_DEV _IOW(BTRFS_IOCTL_MAGIC, 4, \
+				   struct btrfs_ioctl_vol_args)
+/* trans start and trans end are dangerous, and only for
+ * use by applications that know how to avoid the
+ * resulting deadlocks
+ */
+#define BTRFS_IOC_TRANS_START  _IO(BTRFS_IOCTL_MAGIC, 6)
+#define BTRFS_IOC_TRANS_END    _IO(BTRFS_IOCTL_MAGIC, 7)
+#define BTRFS_IOC_SYNC         _IO(BTRFS_IOCTL_MAGIC, 8)
+
+#define BTRFS_IOC_CLONE        _IOW(BTRFS_IOCTL_MAGIC, 9, int)
+#define BTRFS_IOC_ADD_DEV _IOW(BTRFS_IOCTL_MAGIC, 10, \
+				   struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_RM_DEV _IOW(BTRFS_IOCTL_MAGIC, 11, \
+				   struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_BALANCE _IOW(BTRFS_IOCTL_MAGIC, 12, \
+				   struct btrfs_ioctl_vol_args)
+
+#define BTRFS_IOC_CLONE_RANGE _IOW(BTRFS_IOCTL_MAGIC, 13, \
+				  struct btrfs_ioctl_clone_range_args)
+
+#define BTRFS_IOC_SUBVOL_CREATE _IOW(BTRFS_IOCTL_MAGIC, 14, \
+				   struct btrfs_ioctl_vol_args)
+
+#endif
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
new file mode 100644
index 0000000..39bae77
--- /dev/null
+++ b/fs/btrfs/locking.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+#include <linux/sched.h>
+#include <linux/gfp.h>
+#include <linux/pagemap.h>
+#include <linux/spinlock.h>
+#include <linux/page-flags.h>
+#include <asm/bug.h>
+#include "ctree.h"
+#include "extent_io.h"
+#include "locking.h"
+
+/*
+ * locks the per buffer mutex in an extent buffer.  This uses adaptive locks
+ * and the spin is not tuned very extensively.  The spinning does make a big
+ * difference in almost every workload, but spinning for the right amount of
+ * time needs some help.
+ *
+ * In general, we want to spin as long as the lock holder is doing btree
+ * searches, and we should give up if they are in more expensive code.
+ */
+
+int btrfs_tree_lock(struct extent_buffer *eb)
+{
+	int i;
+
+	if (mutex_trylock(&eb->mutex))
+		return 0;
+	for (i = 0; i < 512; i++) {
+		cpu_relax();
+		if (mutex_trylock(&eb->mutex))
+			return 0;
+	}
+	cpu_relax();
+	mutex_lock_nested(&eb->mutex, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
+	return 0;
+}
+
+int btrfs_try_tree_lock(struct extent_buffer *eb)
+{
+	return mutex_trylock(&eb->mutex);
+}
+
+int btrfs_tree_unlock(struct extent_buffer *eb)
+{
+	mutex_unlock(&eb->mutex);
+	return 0;
+}
+
+int btrfs_tree_locked(struct extent_buffer *eb)
+{
+	return mutex_is_locked(&eb->mutex);
+}
+
+/*
+ * btrfs_search_slot uses this to decide if it should drop its locks
+ * before doing something expensive like allocating free blocks for cow.
+ */
+int btrfs_path_lock_waiting(struct btrfs_path *path, int level)
+{
+	int i;
+	struct extent_buffer *eb;
+	for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) {
+		eb = path->nodes[i];
+		if (!eb)
+			break;
+		smp_mb();
+		if (!list_empty(&eb->mutex.wait_list))
+			return 1;
+	}
+	return 0;
+}
+
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
new file mode 100644
index 0000000..bc1faef
--- /dev/null
+++ b/fs/btrfs/locking.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_LOCKING_
+#define __BTRFS_LOCKING_
+
+int btrfs_tree_lock(struct extent_buffer *eb);
+int btrfs_tree_unlock(struct extent_buffer *eb);
+int btrfs_tree_locked(struct extent_buffer *eb);
+int btrfs_try_tree_lock(struct extent_buffer *eb);
+int btrfs_path_lock_waiting(struct btrfs_path *path, int level);
+#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
new file mode 100644
index 0000000..a209401
--- /dev/null
+++ b/fs/btrfs/ordered-data.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/writeback.h>
+#include <linux/pagevec.h>
+#include "ctree.h"
+#include "transaction.h"
+#include "btrfs_inode.h"
+#include "extent_io.h"
+
+static u64 entry_end(struct btrfs_ordered_extent *entry)
+{
+	if (entry->file_offset + entry->len < entry->file_offset)
+		return (u64)-1;
+	return entry->file_offset + entry->len;
+}
+
+/* returns NULL if the insertion worked, or it returns the node it did find
+ * in the tree
+ */
+static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
+				   struct rb_node *node)
+{
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct btrfs_ordered_extent *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
+
+		if (file_offset < entry->file_offset)
+			p = &(*p)->rb_left;
+		else if (file_offset >= entry_end(entry))
+			p = &(*p)->rb_right;
+		else
+			return parent;
+	}
+
+	rb_link_node(node, parent, p);
+	rb_insert_color(node, root);
+	return NULL;
+}
+
+/*
+ * look for a given offset in the tree, and if it can't be found return the
+ * first lesser offset
+ */
+static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
+				     struct rb_node **prev_ret)
+{
+	struct rb_node *n = root->rb_node;
+	struct rb_node *prev = NULL;
+	struct rb_node *test;
+	struct btrfs_ordered_extent *entry;
+	struct btrfs_ordered_extent *prev_entry = NULL;
+
+	while (n) {
+		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
+		prev = n;
+		prev_entry = entry;
+
+		if (file_offset < entry->file_offset)
+			n = n->rb_left;
+		else if (file_offset >= entry_end(entry))
+			n = n->rb_right;
+		else
+			return n;
+	}
+	if (!prev_ret)
+		return NULL;
+
+	while (prev && file_offset >= entry_end(prev_entry)) {
+		test = rb_next(prev);
+		if (!test)
+			break;
+		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
+				      rb_node);
+		if (file_offset < entry_end(prev_entry))
+			break;
+
+		prev = test;
+	}
+	if (prev)
+		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
+				      rb_node);
+	while (prev && file_offset < entry_end(prev_entry)) {
+		test = rb_prev(prev);
+		if (!test)
+			break;
+		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
+				      rb_node);
+		prev = test;
+	}
+	*prev_ret = prev;
+	return NULL;
+}
+
+/*
+ * helper to check if a given offset is inside a given entry
+ */
+static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
+{
+	if (file_offset < entry->file_offset ||
+	    entry->file_offset + entry->len <= file_offset)
+		return 0;
+	return 1;
+}
+
+/*
+ * look find the first ordered struct that has this offset, otherwise
+ * the first one less than this offset
+ */
+static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
+					  u64 file_offset)
+{
+	struct rb_root *root = &tree->tree;
+	struct rb_node *prev;
+	struct rb_node *ret;
+	struct btrfs_ordered_extent *entry;
+
+	if (tree->last) {
+		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
+				 rb_node);
+		if (offset_in_entry(entry, file_offset))
+			return tree->last;
+	}
+	ret = __tree_search(root, file_offset, &prev);
+	if (!ret)
+		ret = prev;
+	if (ret)
+		tree->last = ret;
+	return ret;
+}
+
+/* allocate and add a new ordered_extent into the per-inode tree.
+ * file_offset is the logical offset in the file
+ *
+ * start is the disk block number of an extent already reserved in the
+ * extent allocation tree
+ *
+ * len is the length of the extent
+ *
+ * This also sets the EXTENT_ORDERED bit on the range in the inode.
+ *
+ * The tree is given a single reference on the ordered extent that was
+ * inserted.
+ */
+int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
+			     u64 start, u64 len, u64 disk_len, int type)
+{
+	struct btrfs_ordered_inode_tree *tree;
+	struct rb_node *node;
+	struct btrfs_ordered_extent *entry;
+
+	tree = &BTRFS_I(inode)->ordered_tree;
+	entry = kzalloc(sizeof(*entry), GFP_NOFS);
+	if (!entry)
+		return -ENOMEM;
+
+	mutex_lock(&tree->mutex);
+	entry->file_offset = file_offset;
+	entry->start = start;
+	entry->len = len;
+	entry->disk_len = disk_len;
+	entry->inode = inode;
+	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
+		set_bit(type, &entry->flags);
+
+	/* one ref for the tree */
+	atomic_set(&entry->refs, 1);
+	init_waitqueue_head(&entry->wait);
+	INIT_LIST_HEAD(&entry->list);
+	INIT_LIST_HEAD(&entry->root_extent_list);
+
+	node = tree_insert(&tree->tree, file_offset,
+			   &entry->rb_node);
+	BUG_ON(node);
+
+	set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset,
+			   entry_end(entry) - 1, GFP_NOFS);
+
+	spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
+	list_add_tail(&entry->root_extent_list,
+		      &BTRFS_I(inode)->root->fs_info->ordered_extents);
+	spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
+
+	mutex_unlock(&tree->mutex);
+	BUG_ON(node);
+	return 0;
+}
+
+/*
+ * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
+ * when an ordered extent is finished.  If the list covers more than one
+ * ordered extent, it is split across multiples.
+ */
+int btrfs_add_ordered_sum(struct inode *inode,
+			  struct btrfs_ordered_extent *entry,
+			  struct btrfs_ordered_sum *sum)
+{
+	struct btrfs_ordered_inode_tree *tree;
+
+	tree = &BTRFS_I(inode)->ordered_tree;
+	mutex_lock(&tree->mutex);
+	list_add_tail(&sum->list, &entry->list);
+	mutex_unlock(&tree->mutex);
+	return 0;
+}
+
+/*
+ * this is used to account for finished IO across a given range
+ * of the file.  The IO should not span ordered extents.  If
+ * a given ordered_extent is completely done, 1 is returned, otherwise
+ * 0.
+ *
+ * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
+ * to make sure this function only returns 1 once for a given ordered extent.
+ */
+int btrfs_dec_test_ordered_pending(struct inode *inode,
+				   u64 file_offset, u64 io_size)
+{
+	struct btrfs_ordered_inode_tree *tree;
+	struct rb_node *node;
+	struct btrfs_ordered_extent *entry;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	int ret;
+
+	tree = &BTRFS_I(inode)->ordered_tree;
+	mutex_lock(&tree->mutex);
+	clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1,
+			     GFP_NOFS);
+	node = tree_search(tree, file_offset);
+	if (!node) {
+		ret = 1;
+		goto out;
+	}
+
+	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+	if (!offset_in_entry(entry, file_offset)) {
+		ret = 1;
+		goto out;
+	}
+
+	ret = test_range_bit(io_tree, entry->file_offset,
+			     entry->file_offset + entry->len - 1,
+			     EXTENT_ORDERED, 0);
+	if (ret == 0)
+		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
+out:
+	mutex_unlock(&tree->mutex);
+	return ret == 0;
+}
+
+/*
+ * used to drop a reference on an ordered extent.  This will free
+ * the extent if the last reference is dropped
+ */
+int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
+{
+	struct list_head *cur;
+	struct btrfs_ordered_sum *sum;
+
+	if (atomic_dec_and_test(&entry->refs)) {
+		while (!list_empty(&entry->list)) {
+			cur = entry->list.next;
+			sum = list_entry(cur, struct btrfs_ordered_sum, list);
+			list_del(&sum->list);
+			kfree(sum);
+		}
+		kfree(entry);
+	}
+	return 0;
+}
+
+/*
+ * remove an ordered extent from the tree.  No references are dropped
+ * but, anyone waiting on this extent is woken up.
+ */
+int btrfs_remove_ordered_extent(struct inode *inode,
+				struct btrfs_ordered_extent *entry)
+{
+	struct btrfs_ordered_inode_tree *tree;
+	struct rb_node *node;
+
+	tree = &BTRFS_I(inode)->ordered_tree;
+	mutex_lock(&tree->mutex);
+	node = &entry->rb_node;
+	rb_erase(node, &tree->tree);
+	tree->last = NULL;
+	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
+
+	spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
+	list_del_init(&entry->root_extent_list);
+	spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
+
+	mutex_unlock(&tree->mutex);
+	wake_up(&entry->wait);
+	return 0;
+}
+
+/*
+ * wait for all the ordered extents in a root.  This is done when balancing
+ * space between drives.
+ */
+int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only)
+{
+	struct list_head splice;
+	struct list_head *cur;
+	struct btrfs_ordered_extent *ordered;
+	struct inode *inode;
+
+	INIT_LIST_HEAD(&splice);
+
+	spin_lock(&root->fs_info->ordered_extent_lock);
+	list_splice_init(&root->fs_info->ordered_extents, &splice);
+	while (!list_empty(&splice)) {
+		cur = splice.next;
+		ordered = list_entry(cur, struct btrfs_ordered_extent,
+				     root_extent_list);
+		if (nocow_only &&
+		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
+		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
+			list_move(&ordered->root_extent_list,
+				  &root->fs_info->ordered_extents);
+			cond_resched_lock(&root->fs_info->ordered_extent_lock);
+			continue;
+		}
+
+		list_del_init(&ordered->root_extent_list);
+		atomic_inc(&ordered->refs);
+
+		/*
+		 * the inode may be getting freed (in sys_unlink path).
+		 */
+		inode = igrab(ordered->inode);
+
+		spin_unlock(&root->fs_info->ordered_extent_lock);
+
+		if (inode) {
+			btrfs_start_ordered_extent(inode, ordered, 1);
+			btrfs_put_ordered_extent(ordered);
+			iput(inode);
+		} else {
+			btrfs_put_ordered_extent(ordered);
+		}
+
+		spin_lock(&root->fs_info->ordered_extent_lock);
+	}
+	spin_unlock(&root->fs_info->ordered_extent_lock);
+	return 0;
+}
+
+/*
+ * Used to start IO or wait for a given ordered extent to finish.
+ *
+ * If wait is one, this effectively waits on page writeback for all the pages
+ * in the extent, and it waits on the io completion code to insert
+ * metadata into the btree corresponding to the extent
+ */
+void btrfs_start_ordered_extent(struct inode *inode,
+				       struct btrfs_ordered_extent *entry,
+				       int wait)
+{
+	u64 start = entry->file_offset;
+	u64 end = start + entry->len - 1;
+
+	/*
+	 * pages in the range can be dirty, clean or writeback.  We
+	 * start IO on any dirty ones so the wait doesn't stall waiting
+	 * for pdflush to find them
+	 */
+	btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL);
+	if (wait) {
+		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
+						 &entry->flags));
+	}
+}
+
+/*
+ * Used to wait on ordered extents across a large range of bytes.
+ */
+int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
+{
+	u64 end;
+	u64 orig_end;
+	u64 wait_end;
+	struct btrfs_ordered_extent *ordered;
+
+	if (start + len < start) {
+		orig_end = INT_LIMIT(loff_t);
+	} else {
+		orig_end = start + len - 1;
+		if (orig_end > INT_LIMIT(loff_t))
+			orig_end = INT_LIMIT(loff_t);
+	}
+	wait_end = orig_end;
+again:
+	/* start IO across the range first to instantiate any delalloc
+	 * extents
+	 */
+	btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE);
+
+	/* The compression code will leave pages locked but return from
+	 * writepage without setting the page writeback.  Starting again
+	 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
+	 */
+	btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
+
+	btrfs_wait_on_page_writeback_range(inode->i_mapping,
+					   start >> PAGE_CACHE_SHIFT,
+					   orig_end >> PAGE_CACHE_SHIFT);
+
+	end = orig_end;
+	while (1) {
+		ordered = btrfs_lookup_first_ordered_extent(inode, end);
+		if (!ordered)
+			break;
+		if (ordered->file_offset > orig_end) {
+			btrfs_put_ordered_extent(ordered);
+			break;
+		}
+		if (ordered->file_offset + ordered->len < start) {
+			btrfs_put_ordered_extent(ordered);
+			break;
+		}
+		btrfs_start_ordered_extent(inode, ordered, 1);
+		end = ordered->file_offset;
+		btrfs_put_ordered_extent(ordered);
+		if (end == 0 || end == start)
+			break;
+		end--;
+	}
+	if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
+			   EXTENT_ORDERED | EXTENT_DELALLOC, 0)) {
+		schedule_timeout(1);
+		goto again;
+	}
+	return 0;
+}
+
+/*
+ * find an ordered extent corresponding to file_offset.  return NULL if
+ * nothing is found, otherwise take a reference on the extent and return it
+ */
+struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
+							 u64 file_offset)
+{
+	struct btrfs_ordered_inode_tree *tree;
+	struct rb_node *node;
+	struct btrfs_ordered_extent *entry = NULL;
+
+	tree = &BTRFS_I(inode)->ordered_tree;
+	mutex_lock(&tree->mutex);
+	node = tree_search(tree, file_offset);
+	if (!node)
+		goto out;
+
+	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+	if (!offset_in_entry(entry, file_offset))
+		entry = NULL;
+	if (entry)
+		atomic_inc(&entry->refs);
+out:
+	mutex_unlock(&tree->mutex);
+	return entry;
+}
+
+/*
+ * lookup and return any extent before 'file_offset'.  NULL is returned
+ * if none is found
+ */
+struct btrfs_ordered_extent *
+btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
+{
+	struct btrfs_ordered_inode_tree *tree;
+	struct rb_node *node;
+	struct btrfs_ordered_extent *entry = NULL;
+
+	tree = &BTRFS_I(inode)->ordered_tree;
+	mutex_lock(&tree->mutex);
+	node = tree_search(tree, file_offset);
+	if (!node)
+		goto out;
+
+	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+	atomic_inc(&entry->refs);
+out:
+	mutex_unlock(&tree->mutex);
+	return entry;
+}
+
+/*
+ * After an extent is done, call this to conditionally update the on disk
+ * i_size.  i_size is updated to cover any fully written part of the file.
+ */
+int btrfs_ordered_update_i_size(struct inode *inode,
+				struct btrfs_ordered_extent *ordered)
+{
+	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
+	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+	u64 disk_i_size;
+	u64 new_i_size;
+	u64 i_size_test;
+	struct rb_node *node;
+	struct btrfs_ordered_extent *test;
+
+	mutex_lock(&tree->mutex);
+	disk_i_size = BTRFS_I(inode)->disk_i_size;
+
+	/*
+	 * if the disk i_size is already at the inode->i_size, or
+	 * this ordered extent is inside the disk i_size, we're done
+	 */
+	if (disk_i_size >= inode->i_size ||
+	    ordered->file_offset + ordered->len <= disk_i_size) {
+		goto out;
+	}
+
+	/*
+	 * we can't update the disk_isize if there are delalloc bytes
+	 * between disk_i_size and  this ordered extent
+	 */
+	if (test_range_bit(io_tree, disk_i_size,
+			   ordered->file_offset + ordered->len - 1,
+			   EXTENT_DELALLOC, 0)) {
+		goto out;
+	}
+	/*
+	 * walk backward from this ordered extent to disk_i_size.
+	 * if we find an ordered extent then we can't update disk i_size
+	 * yet
+	 */
+	node = &ordered->rb_node;
+	while (1) {
+		node = rb_prev(node);
+		if (!node)
+			break;
+		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+		if (test->file_offset + test->len <= disk_i_size)
+			break;
+		if (test->file_offset >= inode->i_size)
+			break;
+		if (test->file_offset >= disk_i_size)
+			goto out;
+	}
+	new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode));
+
+	/*
+	 * at this point, we know we can safely update i_size to at least
+	 * the offset from this ordered extent.  But, we need to
+	 * walk forward and see if ios from higher up in the file have
+	 * finished.
+	 */
+	node = rb_next(&ordered->rb_node);
+	i_size_test = 0;
+	if (node) {
+		/*
+		 * do we have an area where IO might have finished
+		 * between our ordered extent and the next one.
+		 */
+		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+		if (test->file_offset > entry_end(ordered))
+			i_size_test = test->file_offset;
+	} else {
+		i_size_test = i_size_read(inode);
+	}
+
+	/*
+	 * i_size_test is the end of a region after this ordered
+	 * extent where there are no ordered extents.  As long as there
+	 * are no delalloc bytes in this area, it is safe to update
+	 * disk_i_size to the end of the region.
+	 */
+	if (i_size_test > entry_end(ordered) &&
+	    !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1,
+			   EXTENT_DELALLOC, 0)) {
+		new_i_size = min_t(u64, i_size_test, i_size_read(inode));
+	}
+	BTRFS_I(inode)->disk_i_size = new_i_size;
+out:
+	mutex_unlock(&tree->mutex);
+	return 0;
+}
+
+/*
+ * search the ordered extents for one corresponding to 'offset' and
+ * try to find a checksum.  This is used because we allow pages to
+ * be reclaimed before their checksum is actually put into the btree
+ */
+int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
+			   u32 *sum)
+{
+	struct btrfs_ordered_sum *ordered_sum;
+	struct btrfs_sector_sum *sector_sums;
+	struct btrfs_ordered_extent *ordered;
+	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
+	struct list_head *cur;
+	unsigned long num_sectors;
+	unsigned long i;
+	u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
+	int ret = 1;
+
+	ordered = btrfs_lookup_ordered_extent(inode, offset);
+	if (!ordered)
+		return 1;
+
+	mutex_lock(&tree->mutex);
+	list_for_each_prev(cur, &ordered->list) {
+		ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list);
+		if (disk_bytenr >= ordered_sum->bytenr) {
+			num_sectors = ordered_sum->len / sectorsize;
+			sector_sums = ordered_sum->sums;
+			for (i = 0; i < num_sectors; i++) {
+				if (sector_sums[i].bytenr == disk_bytenr) {
+					*sum = sector_sums[i].sum;
+					ret = 0;
+					goto out;
+				}
+			}
+		}
+	}
+out:
+	mutex_unlock(&tree->mutex);
+	btrfs_put_ordered_extent(ordered);
+	return ret;
+}
+
+
+/**
+ * taken from mm/filemap.c because it isn't exported
+ *
+ * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
+ * @mapping:	address space structure to write
+ * @start:	offset in bytes where the range starts
+ * @end:	offset in bytes where the range ends (inclusive)
+ * @sync_mode:	enable synchronous operation
+ *
+ * Start writeback against all of a mapping's dirty pages that lie
+ * within the byte offsets <start, end> inclusive.
+ *
+ * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
+ * opposed to a regular memory cleansing writeback.  The difference between
+ * these two operations is that if a dirty page/buffer is encountered, it must
+ * be waited upon, and not just skipped over.
+ */
+int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
+			   loff_t end, int sync_mode)
+{
+	struct writeback_control wbc = {
+		.sync_mode = sync_mode,
+		.nr_to_write = mapping->nrpages * 2,
+		.range_start = start,
+		.range_end = end,
+		.for_writepages = 1,
+	};
+	return btrfs_writepages(mapping, &wbc);
+}
+
+/**
+ * taken from mm/filemap.c because it isn't exported
+ *
+ * wait_on_page_writeback_range - wait for writeback to complete
+ * @mapping:	target address_space
+ * @start:	beginning page index
+ * @end:	ending page index
+ *
+ * Wait for writeback to complete against pages indexed by start->end
+ * inclusive
+ */
+int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
+				       pgoff_t start, pgoff_t end)
+{
+	struct pagevec pvec;
+	int nr_pages;
+	int ret = 0;
+	pgoff_t index;
+
+	if (end < start)
+		return 0;
+
+	pagevec_init(&pvec, 0);
+	index = start;
+	while ((index <= end) &&
+			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+			PAGECACHE_TAG_WRITEBACK,
+			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
+		unsigned i;
+
+		for (i = 0; i < nr_pages; i++) {
+			struct page *page = pvec.pages[i];
+
+			/* until radix tree lookup accepts end_index */
+			if (page->index > end)
+				continue;
+
+			wait_on_page_writeback(page);
+			if (PageError(page))
+				ret = -EIO;
+		}
+		pagevec_release(&pvec);
+		cond_resched();
+	}
+
+	/* Check for outstanding write errors */
+	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+		ret = -ENOSPC;
+	if (test_and_clear_bit(AS_EIO, &mapping->flags))
+		ret = -EIO;
+
+	return ret;
+}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
new file mode 100644
index 0000000..ab66d5e
--- /dev/null
+++ b/fs/btrfs/ordered-data.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_ORDERED_DATA__
+#define __BTRFS_ORDERED_DATA__
+
+/* one of these per inode */
+struct btrfs_ordered_inode_tree {
+	struct mutex mutex;
+	struct rb_root tree;
+	struct rb_node *last;
+};
+
+/*
+ * these are used to collect checksums done just before bios submission.
+ * They are attached via a list into the ordered extent, and
+ * checksum items are inserted into the tree after all the blocks in
+ * the ordered extent are on disk
+ */
+struct btrfs_sector_sum {
+	/* bytenr on disk */
+	u64 bytenr;
+	u32 sum;
+};
+
+struct btrfs_ordered_sum {
+	/* bytenr is the start of this extent on disk */
+	u64 bytenr;
+
+	/*
+	 * this is the length in bytes covered by the sums array below.
+	 */
+	unsigned long len;
+	struct list_head list;
+	/* last field is a variable length array of btrfs_sector_sums */
+	struct btrfs_sector_sum sums[];
+};
+
+/*
+ * bits for the flags field:
+ *
+ * BTRFS_ORDERED_IO_DONE is set when all of the blocks are written.
+ * It is used to make sure metadata is inserted into the tree only once
+ * per extent.
+ *
+ * BTRFS_ORDERED_COMPLETE is set when the extent is removed from the
+ * rbtree, just before waking any waiters.  It is used to indicate the
+ * IO is done and any metadata is inserted into the tree.
+ */
+#define BTRFS_ORDERED_IO_DONE 0 /* set when all the pages are written */
+
+#define BTRFS_ORDERED_COMPLETE 1 /* set when removed from the tree */
+
+#define BTRFS_ORDERED_NOCOW 2 /* set when we want to write in place */
+
+#define BTRFS_ORDERED_COMPRESSED 3 /* writing a compressed extent */
+
+#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */
+
+struct btrfs_ordered_extent {
+	/* logical offset in the file */
+	u64 file_offset;
+
+	/* disk byte number */
+	u64 start;
+
+	/* ram length of the extent in bytes */
+	u64 len;
+
+	/* extent length on disk */
+	u64 disk_len;
+
+	/* flags (described above) */
+	unsigned long flags;
+
+	/* reference count */
+	atomic_t refs;
+
+	/* the inode we belong to */
+	struct inode *inode;
+
+	/* list of checksums for insertion when the extent io is done */
+	struct list_head list;
+
+	/* used to wait for the BTRFS_ORDERED_COMPLETE bit */
+	wait_queue_head_t wait;
+
+	/* our friendly rbtree entry */
+	struct rb_node rb_node;
+
+	/* a per root list of all the pending ordered extents */
+	struct list_head root_extent_list;
+};
+
+
+/*
+ * calculates the total size you need to allocate for an ordered sum
+ * structure spanning 'bytes' in the file
+ */
+static inline int btrfs_ordered_sum_size(struct btrfs_root *root,
+					 unsigned long bytes)
+{
+	unsigned long num_sectors = (bytes + root->sectorsize - 1) /
+		root->sectorsize;
+	num_sectors++;
+	return sizeof(struct btrfs_ordered_sum) +
+		num_sectors * sizeof(struct btrfs_sector_sum);
+}
+
+static inline void
+btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
+{
+	mutex_init(&t->mutex);
+	t->tree.rb_node = NULL;
+	t->last = NULL;
+}
+
+int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
+int btrfs_remove_ordered_extent(struct inode *inode,
+				struct btrfs_ordered_extent *entry);
+int btrfs_dec_test_ordered_pending(struct inode *inode,
+				       u64 file_offset, u64 io_size);
+int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
+			     u64 start, u64 len, u64 disk_len, int tyep);
+int btrfs_add_ordered_sum(struct inode *inode,
+			  struct btrfs_ordered_extent *entry,
+			  struct btrfs_ordered_sum *sum);
+struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
+							 u64 file_offset);
+void btrfs_start_ordered_extent(struct inode *inode,
+				struct btrfs_ordered_extent *entry, int wait);
+int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
+struct btrfs_ordered_extent *
+btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
+int btrfs_ordered_update_i_size(struct inode *inode,
+				struct btrfs_ordered_extent *ordered);
+int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
+int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
+				       pgoff_t start, pgoff_t end);
+int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
+			   loff_t end, int sync_mode);
+int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
+#endif
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
new file mode 100644
index 0000000..3c0d52a
--- /dev/null
+++ b/fs/btrfs/orphan.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2008 Red Hat.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include "ctree.h"
+#include "disk-io.h"
+
+int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root, u64 offset)
+{
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	int ret = 0;
+
+	key.objectid = BTRFS_ORPHAN_OBJECTID;
+	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
+	key.offset = offset;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
+
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, u64 offset)
+{
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	int ret = 0;
+
+	key.objectid = BTRFS_ORPHAN_OBJECTID;
+	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
+	key.offset = offset;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+	if (ret)
+		goto out;
+
+	ret = btrfs_del_item(trans, root, path);
+
+out:
+	btrfs_free_path(path);
+	return ret;
+}
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
new file mode 100644
index 0000000..5f8f218
--- /dev/null
+++ b/fs/btrfs/print-tree.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include "ctree.h"
+#include "disk-io.h"
+#include "print-tree.h"
+
+static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk)
+{
+	int num_stripes = btrfs_chunk_num_stripes(eb, chunk);
+	int i;
+	printk(KERN_INFO "\t\tchunk length %llu owner %llu type %llu "
+	       "num_stripes %d\n",
+	       (unsigned long long)btrfs_chunk_length(eb, chunk),
+	       (unsigned long long)btrfs_chunk_owner(eb, chunk),
+	       (unsigned long long)btrfs_chunk_type(eb, chunk),
+	       num_stripes);
+	for (i = 0 ; i < num_stripes ; i++) {
+		printk(KERN_INFO "\t\t\tstripe %d devid %llu offset %llu\n", i,
+		      (unsigned long long)btrfs_stripe_devid_nr(eb, chunk, i),
+		      (unsigned long long)btrfs_stripe_offset_nr(eb, chunk, i));
+	}
+}
+static void print_dev_item(struct extent_buffer *eb,
+			   struct btrfs_dev_item *dev_item)
+{
+	printk(KERN_INFO "\t\tdev item devid %llu "
+	       "total_bytes %llu bytes used %llu\n",
+	       (unsigned long long)btrfs_device_id(eb, dev_item),
+	       (unsigned long long)btrfs_device_total_bytes(eb, dev_item),
+	       (unsigned long long)btrfs_device_bytes_used(eb, dev_item));
+}
+void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
+{
+	int i;
+	u32 nr = btrfs_header_nritems(l);
+	struct btrfs_item *item;
+	struct btrfs_extent_item *ei;
+	struct btrfs_root_item *ri;
+	struct btrfs_dir_item *di;
+	struct btrfs_inode_item *ii;
+	struct btrfs_block_group_item *bi;
+	struct btrfs_file_extent_item *fi;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	struct btrfs_extent_ref *ref;
+	struct btrfs_dev_extent *dev_extent;
+	u32 type;
+
+	printk(KERN_INFO "leaf %llu total ptrs %d free space %d\n",
+		(unsigned long long)btrfs_header_bytenr(l), nr,
+		btrfs_leaf_free_space(root, l));
+	for (i = 0 ; i < nr ; i++) {
+		item = btrfs_item_nr(l, i);
+		btrfs_item_key_to_cpu(l, &key, i);
+		type = btrfs_key_type(&key);
+		printk(KERN_INFO "\titem %d key (%llu %x %llu) itemoff %d "
+		       "itemsize %d\n",
+			i,
+			(unsigned long long)key.objectid, type,
+			(unsigned long long)key.offset,
+			btrfs_item_offset(l, item), btrfs_item_size(l, item));
+		switch (type) {
+		case BTRFS_INODE_ITEM_KEY:
+			ii = btrfs_item_ptr(l, i, struct btrfs_inode_item);
+			printk(KERN_INFO "\t\tinode generation %llu size %llu "
+			       "mode %o\n",
+			       (unsigned long long)
+			       btrfs_inode_generation(l, ii),
+			      (unsigned long long)btrfs_inode_size(l, ii),
+			       btrfs_inode_mode(l, ii));
+			break;
+		case BTRFS_DIR_ITEM_KEY:
+			di = btrfs_item_ptr(l, i, struct btrfs_dir_item);
+			btrfs_dir_item_key_to_cpu(l, di, &found_key);
+			printk(KERN_INFO "\t\tdir oid %llu type %u\n",
+				(unsigned long long)found_key.objectid,
+				btrfs_dir_type(l, di));
+			break;
+		case BTRFS_ROOT_ITEM_KEY:
+			ri = btrfs_item_ptr(l, i, struct btrfs_root_item);
+			printk(KERN_INFO "\t\troot data bytenr %llu refs %u\n",
+				(unsigned long long)
+				btrfs_disk_root_bytenr(l, ri),
+				btrfs_disk_root_refs(l, ri));
+			break;
+		case BTRFS_EXTENT_ITEM_KEY:
+			ei = btrfs_item_ptr(l, i, struct btrfs_extent_item);
+			printk(KERN_INFO "\t\textent data refs %u\n",
+				btrfs_extent_refs(l, ei));
+			break;
+		case BTRFS_EXTENT_REF_KEY:
+			ref = btrfs_item_ptr(l, i, struct btrfs_extent_ref);
+			printk(KERN_INFO "\t\textent back ref root %llu "
+			       "gen %llu owner %llu num_refs %lu\n",
+			       (unsigned long long)btrfs_ref_root(l, ref),
+			       (unsigned long long)btrfs_ref_generation(l, ref),
+			       (unsigned long long)btrfs_ref_objectid(l, ref),
+			       (unsigned long)btrfs_ref_num_refs(l, ref));
+			break;
+
+		case BTRFS_EXTENT_DATA_KEY:
+			fi = btrfs_item_ptr(l, i,
+					    struct btrfs_file_extent_item);
+			if (btrfs_file_extent_type(l, fi) ==
+			    BTRFS_FILE_EXTENT_INLINE) {
+				printk(KERN_INFO "\t\tinline extent data "
+				       "size %u\n",
+				       btrfs_file_extent_inline_len(l, fi));
+				break;
+			}
+			printk(KERN_INFO "\t\textent data disk bytenr %llu "
+			       "nr %llu\n",
+			       (unsigned long long)
+			       btrfs_file_extent_disk_bytenr(l, fi),
+			       (unsigned long long)
+			       btrfs_file_extent_disk_num_bytes(l, fi));
+			printk(KERN_INFO "\t\textent data offset %llu "
+			       "nr %llu ram %llu\n",
+			       (unsigned long long)
+			       btrfs_file_extent_offset(l, fi),
+			       (unsigned long long)
+			       btrfs_file_extent_num_bytes(l, fi),
+			       (unsigned long long)
+			       btrfs_file_extent_ram_bytes(l, fi));
+			break;
+		case BTRFS_BLOCK_GROUP_ITEM_KEY:
+			bi = btrfs_item_ptr(l, i,
+					    struct btrfs_block_group_item);
+			printk(KERN_INFO "\t\tblock group used %llu\n",
+			       (unsigned long long)
+			       btrfs_disk_block_group_used(l, bi));
+			break;
+		case BTRFS_CHUNK_ITEM_KEY:
+			print_chunk(l, btrfs_item_ptr(l, i,
+						      struct btrfs_chunk));
+			break;
+		case BTRFS_DEV_ITEM_KEY:
+			print_dev_item(l, btrfs_item_ptr(l, i,
+					struct btrfs_dev_item));
+			break;
+		case BTRFS_DEV_EXTENT_KEY:
+			dev_extent = btrfs_item_ptr(l, i,
+						    struct btrfs_dev_extent);
+			printk(KERN_INFO "\t\tdev extent chunk_tree %llu\n"
+			       "\t\tchunk objectid %llu chunk offset %llu "
+			       "length %llu\n",
+			       (unsigned long long)
+			       btrfs_dev_extent_chunk_tree(l, dev_extent),
+			       (unsigned long long)
+			       btrfs_dev_extent_chunk_objectid(l, dev_extent),
+			       (unsigned long long)
+			       btrfs_dev_extent_chunk_offset(l, dev_extent),
+			       (unsigned long long)
+			       btrfs_dev_extent_length(l, dev_extent));
+		};
+	}
+}
+
+void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
+{
+	int i; u32 nr;
+	struct btrfs_key key;
+	int level;
+
+	if (!c)
+		return;
+	nr = btrfs_header_nritems(c);
+	level = btrfs_header_level(c);
+	if (level == 0) {
+		btrfs_print_leaf(root, c);
+		return;
+	}
+	printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n",
+	       (unsigned long long)btrfs_header_bytenr(c),
+	       btrfs_header_level(c), nr,
+	       (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
+	for (i = 0; i < nr; i++) {
+		btrfs_node_key_to_cpu(c, &key, i);
+		printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n",
+		       i,
+		       (unsigned long long)key.objectid,
+		       key.type,
+		       (unsigned long long)key.offset,
+		       (unsigned long long)btrfs_node_blockptr(c, i));
+	}
+	for (i = 0; i < nr; i++) {
+		struct extent_buffer *next = read_tree_block(root,
+					btrfs_node_blockptr(c, i),
+					btrfs_level_size(root, level - 1),
+					btrfs_node_ptr_generation(c, i));
+		if (btrfs_is_leaf(next) &&
+		    btrfs_header_level(c) != 1)
+			BUG();
+		if (btrfs_header_level(next) !=
+			btrfs_header_level(c) - 1)
+			BUG();
+		btrfs_print_tree(root, next);
+		free_extent_buffer(next);
+	}
+}
diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h
new file mode 100644
index 0000000..da75efe
--- /dev/null
+++ b/fs/btrfs/print-tree.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __PRINT_TREE_
+#define __PRINT_TREE_
+void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l);
+void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *t);
+#endif
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c
new file mode 100644
index 0000000..6f0acc4
--- /dev/null
+++ b/fs/btrfs/ref-cache.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include "ctree.h"
+#include "ref-cache.h"
+#include "transaction.h"
+
+/*
+ * leaf refs are used to cache the information about which extents
+ * a given leaf has references on.  This allows us to process that leaf
+ * in btrfs_drop_snapshot without needing to read it back from disk.
+ */
+
+/*
+ * kmalloc a leaf reference struct and update the counters for the
+ * total ref cache size
+ */
+struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
+					    int nr_extents)
+{
+	struct btrfs_leaf_ref *ref;
+	size_t size = btrfs_leaf_ref_size(nr_extents);
+
+	ref = kmalloc(size, GFP_NOFS);
+	if (ref) {
+		spin_lock(&root->fs_info->ref_cache_lock);
+		root->fs_info->total_ref_cache_size += size;
+		spin_unlock(&root->fs_info->ref_cache_lock);
+
+		memset(ref, 0, sizeof(*ref));
+		atomic_set(&ref->usage, 1);
+		INIT_LIST_HEAD(&ref->list);
+	}
+	return ref;
+}
+
+/*
+ * free a leaf reference struct and update the counters for the
+ * total ref cache size
+ */
+void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
+{
+	if (!ref)
+		return;
+	WARN_ON(atomic_read(&ref->usage) == 0);
+	if (atomic_dec_and_test(&ref->usage)) {
+		size_t size = btrfs_leaf_ref_size(ref->nritems);
+
+		BUG_ON(ref->in_tree);
+		kfree(ref);
+
+		spin_lock(&root->fs_info->ref_cache_lock);
+		root->fs_info->total_ref_cache_size -= size;
+		spin_unlock(&root->fs_info->ref_cache_lock);
+	}
+}
+
+static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
+				   struct rb_node *node)
+{
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct btrfs_leaf_ref *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
+
+		if (bytenr < entry->bytenr)
+			p = &(*p)->rb_left;
+		else if (bytenr > entry->bytenr)
+			p = &(*p)->rb_right;
+		else
+			return parent;
+	}
+
+	entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
+	rb_link_node(node, parent, p);
+	rb_insert_color(node, root);
+	return NULL;
+}
+
+static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
+{
+	struct rb_node *n = root->rb_node;
+	struct btrfs_leaf_ref *entry;
+
+	while (n) {
+		entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
+		WARN_ON(!entry->in_tree);
+
+		if (bytenr < entry->bytenr)
+			n = n->rb_left;
+		else if (bytenr > entry->bytenr)
+			n = n->rb_right;
+		else
+			return n;
+	}
+	return NULL;
+}
+
+int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
+			   int shared)
+{
+	struct btrfs_leaf_ref *ref = NULL;
+	struct btrfs_leaf_ref_tree *tree = root->ref_tree;
+
+	if (shared)
+		tree = &root->fs_info->shared_ref_tree;
+	if (!tree)
+		return 0;
+
+	spin_lock(&tree->lock);
+	while (!list_empty(&tree->list)) {
+		ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
+		BUG_ON(ref->tree != tree);
+		if (ref->root_gen > max_root_gen)
+			break;
+		if (!xchg(&ref->in_tree, 0)) {
+			cond_resched_lock(&tree->lock);
+			continue;
+		}
+
+		rb_erase(&ref->rb_node, &tree->root);
+		list_del_init(&ref->list);
+
+		spin_unlock(&tree->lock);
+		btrfs_free_leaf_ref(root, ref);
+		cond_resched();
+		spin_lock(&tree->lock);
+	}
+	spin_unlock(&tree->lock);
+	return 0;
+}
+
+/*
+ * find the leaf ref for a given extent.  This returns the ref struct with
+ * a usage reference incremented
+ */
+struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
+					     u64 bytenr)
+{
+	struct rb_node *rb;
+	struct btrfs_leaf_ref *ref = NULL;
+	struct btrfs_leaf_ref_tree *tree = root->ref_tree;
+again:
+	if (tree) {
+		spin_lock(&tree->lock);
+		rb = tree_search(&tree->root, bytenr);
+		if (rb)
+			ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
+		if (ref)
+			atomic_inc(&ref->usage);
+		spin_unlock(&tree->lock);
+		if (ref)
+			return ref;
+	}
+	if (tree != &root->fs_info->shared_ref_tree) {
+		tree = &root->fs_info->shared_ref_tree;
+		goto again;
+	}
+	return NULL;
+}
+
+/*
+ * add a fully filled in leaf ref struct
+ * remove all the refs older than a given root generation
+ */
+int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
+		       int shared)
+{
+	int ret = 0;
+	struct rb_node *rb;
+	struct btrfs_leaf_ref_tree *tree = root->ref_tree;
+
+	if (shared)
+		tree = &root->fs_info->shared_ref_tree;
+
+	spin_lock(&tree->lock);
+	rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
+	if (rb) {
+		ret = -EEXIST;
+	} else {
+		atomic_inc(&ref->usage);
+		ref->tree = tree;
+		ref->in_tree = 1;
+		list_add_tail(&ref->list, &tree->list);
+	}
+	spin_unlock(&tree->lock);
+	return ret;
+}
+
+/*
+ * remove a single leaf ref from the tree.  This drops the ref held by the tree
+ * only
+ */
+int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
+{
+	struct btrfs_leaf_ref_tree *tree;
+
+	if (!xchg(&ref->in_tree, 0))
+		return 0;
+
+	tree = ref->tree;
+	spin_lock(&tree->lock);
+
+	rb_erase(&ref->rb_node, &tree->root);
+	list_del_init(&ref->list);
+
+	spin_unlock(&tree->lock);
+
+	btrfs_free_leaf_ref(root, ref);
+	return 0;
+}
diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h
new file mode 100644
index 0000000..16f3183
--- /dev/null
+++ b/fs/btrfs/ref-cache.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+#ifndef __REFCACHE__
+#define __REFCACHE__
+
+struct btrfs_extent_info {
+	/* bytenr and num_bytes find the extent in the extent allocation tree */
+	u64 bytenr;
+	u64 num_bytes;
+
+	/* objectid and offset find the back reference for the file */
+	u64 objectid;
+	u64 offset;
+};
+
+struct btrfs_leaf_ref {
+	struct rb_node rb_node;
+	struct btrfs_leaf_ref_tree *tree;
+	int in_tree;
+	atomic_t usage;
+
+	u64 root_gen;
+	u64 bytenr;
+	u64 owner;
+	u64 generation;
+	int nritems;
+
+	struct list_head list;
+	struct btrfs_extent_info extents[];
+};
+
+static inline size_t btrfs_leaf_ref_size(int nr_extents)
+{
+	return sizeof(struct btrfs_leaf_ref) +
+	       sizeof(struct btrfs_extent_info) * nr_extents;
+}
+
+static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree)
+{
+	tree->root.rb_node = NULL;
+	INIT_LIST_HEAD(&tree->list);
+	spin_lock_init(&tree->lock);
+}
+
+static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree)
+{
+	return RB_EMPTY_ROOT(&tree->root);
+}
+
+void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree);
+struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
+					    int nr_extents);
+void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
+struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
+					     u64 bytenr);
+int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
+		       int shared);
+int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
+			   int shared);
+int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
+
+#endif
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
new file mode 100644
index 0000000..b48650d
--- /dev/null
+++ b/fs/btrfs/root-tree.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include "ctree.h"
+#include "transaction.h"
+#include "disk-io.h"
+#include "print-tree.h"
+
+/*
+ *  search forward for a root, starting with objectid 'search_start'
+ *  if a root key is found, the objectid we find is filled into 'found_objectid'
+ *  and 0 is returned.  < 0 is returned on error, 1 if there is nothing
+ *  left in the tree.
+ */
+int btrfs_search_root(struct btrfs_root *root, u64 search_start,
+		      u64 *found_objectid)
+{
+	struct btrfs_path *path;
+	struct btrfs_key search_key;
+	int ret;
+
+	root = root->fs_info->tree_root;
+	search_key.objectid = search_start;
+	search_key.type = (u8)-1;
+	search_key.offset = (u64)-1;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+again:
+	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
+	if (ret < 0)
+		goto out;
+	if (ret == 0) {
+		ret = 1;
+		goto out;
+	}
+	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
+		ret = btrfs_next_leaf(root, path);
+		if (ret)
+			goto out;
+	}
+	btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]);
+	if (search_key.type != BTRFS_ROOT_ITEM_KEY) {
+		search_key.offset++;
+		btrfs_release_path(root, path);
+		goto again;
+	}
+	ret = 0;
+	*found_objectid = search_key.objectid;
+
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+/*
+ * lookup the root with the highest offset for a given objectid.  The key we do
+ * find is copied into 'key'.  If we find something return 0, otherwise 1, < 0
+ * on error.
+ */
+int btrfs_find_last_root(struct btrfs_root *root, u64 objectid,
+			struct btrfs_root_item *item, struct btrfs_key *key)
+{
+	struct btrfs_path *path;
+	struct btrfs_key search_key;
+	struct btrfs_key found_key;
+	struct extent_buffer *l;
+	int ret;
+	int slot;
+
+	search_key.objectid = objectid;
+	search_key.type = BTRFS_ROOT_ITEM_KEY;
+	search_key.offset = (u64)-1;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
+	if (ret < 0)
+		goto out;
+
+	BUG_ON(ret == 0);
+	l = path->nodes[0];
+	BUG_ON(path->slots[0] == 0);
+	slot = path->slots[0] - 1;
+	btrfs_item_key_to_cpu(l, &found_key, slot);
+	if (found_key.objectid != objectid) {
+		ret = 1;
+		goto out;
+	}
+	read_extent_buffer(l, item, btrfs_item_ptr_offset(l, slot),
+			   sizeof(*item));
+	memcpy(key, &found_key, sizeof(found_key));
+	ret = 0;
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+/*
+ * copy the data in 'item' into the btree
+ */
+int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_key *key, struct btrfs_root_item
+		      *item)
+{
+	struct btrfs_path *path;
+	struct extent_buffer *l;
+	int ret;
+	int slot;
+	unsigned long ptr;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	ret = btrfs_search_slot(trans, root, key, path, 0, 1);
+	if (ret < 0)
+		goto out;
+
+	if (ret != 0) {
+		btrfs_print_leaf(root, path->nodes[0]);
+		printk(KERN_CRIT "unable to update root key %llu %u %llu\n",
+		       (unsigned long long)key->objectid, key->type,
+		       (unsigned long long)key->offset);
+		BUG_ON(1);
+	}
+
+	l = path->nodes[0];
+	slot = path->slots[0];
+	ptr = btrfs_item_ptr_offset(l, slot);
+	write_extent_buffer(l, item, ptr, sizeof(*item));
+	btrfs_mark_buffer_dirty(path->nodes[0]);
+out:
+	btrfs_release_path(root, path);
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
+		      *root, struct btrfs_key *key, struct btrfs_root_item
+		      *item)
+{
+	int ret;
+	ret = btrfs_insert_item(trans, root, key, item, sizeof(*item));
+	return ret;
+}
+
+/*
+ * at mount time we want to find all the old transaction snapshots that were in
+ * the process of being deleted if we crashed.  This is any root item with an
+ * offset lower than the latest root.  They need to be queued for deletion to
+ * finish what was happening when we crashed.
+ */
+int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid,
+			  struct btrfs_root *latest)
+{
+	struct btrfs_root *dead_root;
+	struct btrfs_item *item;
+	struct btrfs_root_item *ri;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	struct btrfs_path *path;
+	int ret;
+	u32 nritems;
+	struct extent_buffer *leaf;
+	int slot;
+
+	key.objectid = objectid;
+	btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+	key.offset = 0;
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+again:
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto err;
+	while (1) {
+		leaf = path->nodes[0];
+		nritems = btrfs_header_nritems(leaf);
+		slot = path->slots[0];
+		if (slot >= nritems) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret)
+				break;
+			leaf = path->nodes[0];
+			nritems = btrfs_header_nritems(leaf);
+			slot = path->slots[0];
+		}
+		item = btrfs_item_nr(leaf, slot);
+		btrfs_item_key_to_cpu(leaf, &key, slot);
+		if (btrfs_key_type(&key) != BTRFS_ROOT_ITEM_KEY)
+			goto next;
+
+		if (key.objectid < objectid)
+			goto next;
+
+		if (key.objectid > objectid)
+			break;
+
+		ri = btrfs_item_ptr(leaf, slot, struct btrfs_root_item);
+		if (btrfs_disk_root_refs(leaf, ri) != 0)
+			goto next;
+
+		memcpy(&found_key, &key, sizeof(key));
+		key.offset++;
+		btrfs_release_path(root, path);
+		dead_root =
+			btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
+						    &found_key);
+		if (IS_ERR(dead_root)) {
+			ret = PTR_ERR(dead_root);
+			goto err;
+		}
+
+		if (objectid == BTRFS_TREE_RELOC_OBJECTID)
+			ret = btrfs_add_dead_reloc_root(dead_root);
+		else
+			ret = btrfs_add_dead_root(dead_root, latest);
+		if (ret)
+			goto err;
+		goto again;
+next:
+		slot++;
+		path->slots[0]++;
+	}
+	ret = 0;
+err:
+	btrfs_free_path(path);
+	return ret;
+}
+
+/* drop the root item for 'key' from 'root' */
+int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		   struct btrfs_key *key)
+{
+	struct btrfs_path *path;
+	int ret;
+	u32 refs;
+	struct btrfs_root_item *ri;
+	struct extent_buffer *leaf;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+	ret = btrfs_search_slot(trans, root, key, path, -1, 1);
+	if (ret < 0)
+		goto out;
+
+	BUG_ON(ret != 0);
+	leaf = path->nodes[0];
+	ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item);
+
+	refs = btrfs_disk_root_refs(leaf, ri);
+	BUG_ON(refs != 0);
+	ret = btrfs_del_item(trans, root, path);
+out:
+	btrfs_release_path(root, path);
+	btrfs_free_path(path);
+	return ret;
+}
+
+#if 0 /* this will get used when snapshot deletion is implemented */
+int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *tree_root,
+		       u64 root_id, u8 type, u64 ref_id)
+{
+	struct btrfs_key key;
+	int ret;
+	struct btrfs_path *path;
+
+	path = btrfs_alloc_path();
+
+	key.objectid = root_id;
+	key.type = type;
+	key.offset = ref_id;
+
+	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
+	BUG_ON(ret);
+
+	ret = btrfs_del_item(trans, tree_root, path);
+	BUG_ON(ret);
+
+	btrfs_free_path(path);
+	return ret;
+}
+#endif
+
+int btrfs_find_root_ref(struct btrfs_root *tree_root,
+		   struct btrfs_path *path,
+		   u64 root_id, u64 ref_id)
+{
+	struct btrfs_key key;
+	int ret;
+
+	key.objectid = root_id;
+	key.type = BTRFS_ROOT_REF_KEY;
+	key.offset = ref_id;
+
+	ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+	return ret;
+}
+
+
+/*
+ * add a btrfs_root_ref item.  type is either BTRFS_ROOT_REF_KEY
+ * or BTRFS_ROOT_BACKREF_KEY.
+ *
+ * The dirid, sequence, name and name_len refer to the directory entry
+ * that is referencing the root.
+ *
+ * For a forward ref, the root_id is the id of the tree referencing
+ * the root and ref_id is the id of the subvol  or snapshot.
+ *
+ * For a back ref the root_id is the id of the subvol or snapshot and
+ * ref_id is the id of the tree referencing it.
+ */
+int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
+		       struct btrfs_root *tree_root,
+		       u64 root_id, u8 type, u64 ref_id,
+		       u64 dirid, u64 sequence,
+		       const char *name, int name_len)
+{
+	struct btrfs_key key;
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_root_ref *ref;
+	struct extent_buffer *leaf;
+	unsigned long ptr;
+
+
+	path = btrfs_alloc_path();
+
+	key.objectid = root_id;
+	key.type = type;
+	key.offset = ref_id;
+
+	ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
+				      sizeof(*ref) + name_len);
+	BUG_ON(ret);
+
+	leaf = path->nodes[0];
+	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
+	btrfs_set_root_ref_dirid(leaf, ref, dirid);
+	btrfs_set_root_ref_sequence(leaf, ref, sequence);
+	btrfs_set_root_ref_name_len(leaf, ref, name_len);
+	ptr = (unsigned long)(ref + 1);
+	write_extent_buffer(leaf, name, ptr, name_len);
+	btrfs_mark_buffer_dirty(leaf);
+
+	btrfs_free_path(path);
+	return ret;
+}
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
new file mode 100644
index 0000000..c0f7eca
--- /dev/null
+++ b/fs/btrfs/struct-funcs.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/highmem.h>
+
+/* this is some deeply nasty code.  ctree.h has a different
+ * definition for this BTRFS_SETGET_FUNCS macro, behind a #ifndef
+ *
+ * The end result is that anyone who #includes ctree.h gets a
+ * declaration for the btrfs_set_foo functions and btrfs_foo functions
+ *
+ * This file declares the macros and then #includes ctree.h, which results
+ * in cpp creating the function here based on the template below.
+ *
+ * These setget functions do all the extent_buffer related mapping
+ * required to efficiently read and write specific fields in the extent
+ * buffers.  Every pointer to metadata items in btrfs is really just
+ * an unsigned long offset into the extent buffer which has been
+ * cast to a specific type.  This gives us all the gcc type checking.
+ *
+ * The extent buffer api is used to do all the kmapping and page
+ * spanning work required to get extent buffers in highmem and have
+ * a metadata blocksize different from the page size.
+ *
+ * The macro starts with a simple function prototype declaration so that
+ * sparse won't complain about it being static.
+ */
+
+#define BTRFS_SETGET_FUNCS(name, type, member, bits)			\
+u##bits btrfs_##name(struct extent_buffer *eb, type *s);		\
+void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);	\
+u##bits btrfs_##name(struct extent_buffer *eb,				\
+				   type *s)				\
+{									\
+	unsigned long part_offset = (unsigned long)s;			\
+	unsigned long offset = part_offset + offsetof(type, member);	\
+	type *p;							\
+	/* ugly, but we want the fast path here */			\
+	if (eb->map_token && offset >= eb->map_start &&			\
+	    offset + sizeof(((type *)0)->member) <= eb->map_start +	\
+	    eb->map_len) {						\
+		p = (type *)(eb->kaddr + part_offset - eb->map_start);	\
+		return le##bits##_to_cpu(p->member);			\
+	}								\
+	{								\
+		int err;						\
+		char *map_token;					\
+		char *kaddr;						\
+		int unmap_on_exit = (eb->map_token == NULL);		\
+		unsigned long map_start;				\
+		unsigned long map_len;					\
+		u##bits res;						\
+		err = map_extent_buffer(eb, offset,			\
+				sizeof(((type *)0)->member),		\
+				&map_token, &kaddr,			\
+				&map_start, &map_len, KM_USER1);	\
+		if (err) {						\
+			__le##bits leres;				\
+			read_eb_member(eb, s, type, member, &leres);	\
+			return le##bits##_to_cpu(leres);		\
+		}							\
+		p = (type *)(kaddr + part_offset - map_start);		\
+		res = le##bits##_to_cpu(p->member);			\
+		if (unmap_on_exit)					\
+			unmap_extent_buffer(eb, map_token, KM_USER1);	\
+		return res;						\
+	}								\
+}									\
+void btrfs_set_##name(struct extent_buffer *eb,				\
+				    type *s, u##bits val)		\
+{									\
+	unsigned long part_offset = (unsigned long)s;			\
+	unsigned long offset = part_offset + offsetof(type, member);	\
+	type *p;							\
+	/* ugly, but we want the fast path here */			\
+	if (eb->map_token && offset >= eb->map_start &&			\
+	    offset + sizeof(((type *)0)->member) <= eb->map_start +	\
+	    eb->map_len) {						\
+		p = (type *)(eb->kaddr + part_offset - eb->map_start);	\
+		p->member = cpu_to_le##bits(val);			\
+		return;							\
+	}								\
+	{								\
+		int err;						\
+		char *map_token;					\
+		char *kaddr;						\
+		int unmap_on_exit = (eb->map_token == NULL);		\
+		unsigned long map_start;				\
+		unsigned long map_len;					\
+		err = map_extent_buffer(eb, offset,			\
+				sizeof(((type *)0)->member),		\
+				&map_token, &kaddr,			\
+				&map_start, &map_len, KM_USER1);	\
+		if (err) {						\
+			__le##bits val2;				\
+			val2 = cpu_to_le##bits(val);			\
+			write_eb_member(eb, s, type, member, &val2);	\
+			return;						\
+		}							\
+		p = (type *)(kaddr + part_offset - map_start);		\
+		p->member = cpu_to_le##bits(val);			\
+		if (unmap_on_exit)					\
+			unmap_extent_buffer(eb, map_token, KM_USER1);	\
+	}								\
+}
+
+#include "ctree.h"
+
+void btrfs_node_key(struct extent_buffer *eb,
+		    struct btrfs_disk_key *disk_key, int nr)
+{
+	unsigned long ptr = btrfs_node_key_ptr_offset(nr);
+	if (eb->map_token && ptr >= eb->map_start &&
+	    ptr + sizeof(*disk_key) <= eb->map_start + eb->map_len) {
+		memcpy(disk_key, eb->kaddr + ptr - eb->map_start,
+			sizeof(*disk_key));
+		return;
+	} else if (eb->map_token) {
+		unmap_extent_buffer(eb, eb->map_token, KM_USER1);
+		eb->map_token = NULL;
+	}
+	read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
+		       struct btrfs_key_ptr, key, disk_key);
+}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
new file mode 100644
index 0000000..db9fb3b
--- /dev/null
+++ b/fs/btrfs/super.c
@@ -0,0 +1,723 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/backing-dev.h>
+#include <linux/mount.h>
+#include <linux/mpage.h>
+#include <linux/swap.h>
+#include <linux/writeback.h>
+#include <linux/statfs.h>
+#include <linux/compat.h>
+#include <linux/parser.h>
+#include <linux/ctype.h>
+#include <linux/namei.h>
+#include <linux/miscdevice.h>
+#include <linux/version.h>
+#include <linux/magic.h>
+#include "compat.h"
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "btrfs_inode.h"
+#include "ioctl.h"
+#include "print-tree.h"
+#include "xattr.h"
+#include "volumes.h"
+#include "version.h"
+#include "export.h"
+#include "compression.h"
+
+
+static struct super_operations btrfs_super_ops;
+
+static void btrfs_put_super(struct super_block *sb)
+{
+	struct btrfs_root *root = btrfs_sb(sb);
+	int ret;
+
+	ret = close_ctree(root);
+	sb->s_fs_info = NULL;
+}
+
+enum {
+	Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow,
+	Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier,
+	Opt_ssd, Opt_thread_pool, Opt_noacl,  Opt_compress, Opt_err,
+};
+
+static match_table_t tokens = {
+	{Opt_degraded, "degraded"},
+	{Opt_subvol, "subvol=%s"},
+	{Opt_device, "device=%s"},
+	{Opt_nodatasum, "nodatasum"},
+	{Opt_nodatacow, "nodatacow"},
+	{Opt_nobarrier, "nobarrier"},
+	{Opt_max_extent, "max_extent=%s"},
+	{Opt_max_inline, "max_inline=%s"},
+	{Opt_alloc_start, "alloc_start=%s"},
+	{Opt_thread_pool, "thread_pool=%d"},
+	{Opt_compress, "compress"},
+	{Opt_ssd, "ssd"},
+	{Opt_noacl, "noacl"},
+	{Opt_err, NULL},
+};
+
+u64 btrfs_parse_size(char *str)
+{
+	u64 res;
+	int mult = 1;
+	char *end;
+	char last;
+
+	res = simple_strtoul(str, &end, 10);
+
+	last = end[0];
+	if (isalpha(last)) {
+		last = tolower(last);
+		switch (last) {
+		case 'g':
+			mult *= 1024;
+		case 'm':
+			mult *= 1024;
+		case 'k':
+			mult *= 1024;
+		}
+		res = res * mult;
+	}
+	return res;
+}
+
+/*
+ * Regular mount options parser.  Everything that is needed only when
+ * reading in a new superblock is parsed here.
+ */
+int btrfs_parse_options(struct btrfs_root *root, char *options)
+{
+	struct btrfs_fs_info *info = root->fs_info;
+	substring_t args[MAX_OPT_ARGS];
+	char *p, *num;
+	int intarg;
+
+	if (!options)
+		return 0;
+
+	/*
+	 * strsep changes the string, duplicate it because parse_options
+	 * gets called twice
+	 */
+	options = kstrdup(options, GFP_NOFS);
+	if (!options)
+		return -ENOMEM;
+
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_degraded:
+			printk(KERN_INFO "btrfs: allowing degraded mounts\n");
+			btrfs_set_opt(info->mount_opt, DEGRADED);
+			break;
+		case Opt_subvol:
+		case Opt_device:
+			/*
+			 * These are parsed by btrfs_parse_early_options
+			 * and can be happily ignored here.
+			 */
+			break;
+		case Opt_nodatasum:
+			printk(KERN_INFO "btrfs: setting nodatacsum\n");
+			btrfs_set_opt(info->mount_opt, NODATASUM);
+			break;
+		case Opt_nodatacow:
+			printk(KERN_INFO "btrfs: setting nodatacow\n");
+			btrfs_set_opt(info->mount_opt, NODATACOW);
+			btrfs_set_opt(info->mount_opt, NODATASUM);
+			break;
+		case Opt_compress:
+			printk(KERN_INFO "btrfs: use compression\n");
+			btrfs_set_opt(info->mount_opt, COMPRESS);
+			break;
+		case Opt_ssd:
+			printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
+			btrfs_set_opt(info->mount_opt, SSD);
+			break;
+		case Opt_nobarrier:
+			printk(KERN_INFO "btrfs: turning off barriers\n");
+			btrfs_set_opt(info->mount_opt, NOBARRIER);
+			break;
+		case Opt_thread_pool:
+			intarg = 0;
+			match_int(&args[0], &intarg);
+			if (intarg) {
+				info->thread_pool_size = intarg;
+				printk(KERN_INFO "btrfs: thread pool %d\n",
+				       info->thread_pool_size);
+			}
+			break;
+		case Opt_max_extent:
+			num = match_strdup(&args[0]);
+			if (num) {
+				info->max_extent = btrfs_parse_size(num);
+				kfree(num);
+
+				info->max_extent = max_t(u64,
+					info->max_extent, root->sectorsize);
+				printk(KERN_INFO "btrfs: max_extent at %llu\n",
+				       info->max_extent);
+			}
+			break;
+		case Opt_max_inline:
+			num = match_strdup(&args[0]);
+			if (num) {
+				info->max_inline = btrfs_parse_size(num);
+				kfree(num);
+
+				if (info->max_inline) {
+					info->max_inline = max_t(u64,
+						info->max_inline,
+						root->sectorsize);
+				}
+				printk(KERN_INFO "btrfs: max_inline at %llu\n",
+					info->max_inline);
+			}
+			break;
+		case Opt_alloc_start:
+			num = match_strdup(&args[0]);
+			if (num) {
+				info->alloc_start = btrfs_parse_size(num);
+				kfree(num);
+				printk(KERN_INFO
+					"btrfs: allocations start at %llu\n",
+					info->alloc_start);
+			}
+			break;
+		case Opt_noacl:
+			root->fs_info->sb->s_flags &= ~MS_POSIXACL;
+			break;
+		default:
+			break;
+		}
+	}
+	kfree(options);
+	return 0;
+}
+
+/*
+ * Parse mount options that are required early in the mount process.
+ *
+ * All other options will be parsed on much later in the mount process and
+ * only when we need to allocate a new super block.
+ */
+static int btrfs_parse_early_options(const char *options, fmode_t flags,
+		void *holder, char **subvol_name,
+		struct btrfs_fs_devices **fs_devices)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *opts, *p;
+	int error = 0;
+
+	if (!options)
+		goto out;
+
+	/*
+	 * strsep changes the string, duplicate it because parse_options
+	 * gets called twice
+	 */
+	opts = kstrdup(options, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	while ((p = strsep(&opts, ",")) != NULL) {
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_subvol:
+			*subvol_name = match_strdup(&args[0]);
+			break;
+		case Opt_device:
+			error = btrfs_scan_one_device(match_strdup(&args[0]),
+					flags, holder, fs_devices);
+			if (error)
+				goto out_free_opts;
+			break;
+		default:
+			break;
+		}
+	}
+
+ out_free_opts:
+	kfree(opts);
+ out:
+	/*
+	 * If no subvolume name is specified we use the default one.  Allocate
+	 * a copy of the string "." here so that code later in the
+	 * mount path doesn't care if it's the default volume or another one.
+	 */
+	if (!*subvol_name) {
+		*subvol_name = kstrdup(".", GFP_KERNEL);
+		if (!*subvol_name)
+			return -ENOMEM;
+	}
+	return error;
+}
+
+static int btrfs_fill_super(struct super_block *sb,
+			    struct btrfs_fs_devices *fs_devices,
+			    void *data, int silent)
+{
+	struct inode *inode;
+	struct dentry *root_dentry;
+	struct btrfs_super_block *disk_super;
+	struct btrfs_root *tree_root;
+	struct btrfs_inode *bi;
+	int err;
+
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
+	sb->s_magic = BTRFS_SUPER_MAGIC;
+	sb->s_op = &btrfs_super_ops;
+	sb->s_export_op = &btrfs_export_ops;
+	sb->s_xattr = btrfs_xattr_handlers;
+	sb->s_time_gran = 1;
+	sb->s_flags |= MS_POSIXACL;
+
+	tree_root = open_ctree(sb, fs_devices, (char *)data);
+
+	if (IS_ERR(tree_root)) {
+		printk("btrfs: open_ctree failed\n");
+		return PTR_ERR(tree_root);
+	}
+	sb->s_fs_info = tree_root;
+	disk_super = &tree_root->fs_info->super_copy;
+	inode = btrfs_iget_locked(sb, BTRFS_FIRST_FREE_OBJECTID,
+				  tree_root->fs_info->fs_root);
+	bi = BTRFS_I(inode);
+	bi->location.objectid = inode->i_ino;
+	bi->location.offset = 0;
+	bi->root = tree_root->fs_info->fs_root;
+
+	btrfs_set_key_type(&bi->location, BTRFS_INODE_ITEM_KEY);
+
+	if (!inode) {
+		err = -ENOMEM;
+		goto fail_close;
+	}
+	if (inode->i_state & I_NEW) {
+		btrfs_read_locked_inode(inode);
+		unlock_new_inode(inode);
+	}
+
+	root_dentry = d_alloc_root(inode);
+	if (!root_dentry) {
+		iput(inode);
+		err = -ENOMEM;
+		goto fail_close;
+	}
+#if 0
+	/* this does the super kobj at the same time */
+	err = btrfs_sysfs_add_super(tree_root->fs_info);
+	if (err)
+		goto fail_close;
+#endif
+
+	sb->s_root = root_dentry;
+
+	save_mount_options(sb, data);
+	return 0;
+
+fail_close:
+	close_ctree(tree_root);
+	return err;
+}
+
+int btrfs_sync_fs(struct super_block *sb, int wait)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root;
+	int ret;
+	root = btrfs_sb(sb);
+
+	if (sb->s_flags & MS_RDONLY)
+		return 0;
+
+	sb->s_dirt = 0;
+	if (!wait) {
+		filemap_flush(root->fs_info->btree_inode->i_mapping);
+		return 0;
+	}
+
+	btrfs_start_delalloc_inodes(root);
+	btrfs_wait_ordered_extents(root, 0);
+
+	btrfs_clean_old_snapshots(root);
+	trans = btrfs_start_transaction(root, 1);
+	ret = btrfs_commit_transaction(trans, root);
+	sb->s_dirt = 0;
+	return ret;
+}
+
+static void btrfs_write_super(struct super_block *sb)
+{
+	sb->s_dirt = 0;
+}
+
+static int btrfs_test_super(struct super_block *s, void *data)
+{
+	struct btrfs_fs_devices *test_fs_devices = data;
+	struct btrfs_root *root = btrfs_sb(s);
+
+	return root->fs_info->fs_devices == test_fs_devices;
+}
+
+/*
+ * Find a superblock for the given device / mount point.
+ *
+ * Note:  This is based on get_sb_bdev from fs/super.c with a few additions
+ *	  for multiple device setup.  Make sure to keep it in sync.
+ */
+static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
+		const char *dev_name, void *data, struct vfsmount *mnt)
+{
+	char *subvol_name = NULL;
+	struct block_device *bdev = NULL;
+	struct super_block *s;
+	struct dentry *root;
+	struct btrfs_fs_devices *fs_devices = NULL;
+	fmode_t mode = FMODE_READ;
+	int error = 0;
+
+	if (!(flags & MS_RDONLY))
+		mode |= FMODE_WRITE;
+
+	error = btrfs_parse_early_options(data, mode, fs_type,
+					  &subvol_name, &fs_devices);
+	if (error)
+		return error;
+
+	error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices);
+	if (error)
+		goto error_free_subvol_name;
+
+	error = btrfs_open_devices(fs_devices, mode, fs_type);
+	if (error)
+		goto error_free_subvol_name;
+
+	if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
+		error = -EACCES;
+		goto error_close_devices;
+	}
+
+	bdev = fs_devices->latest_bdev;
+	s = sget(fs_type, btrfs_test_super, set_anon_super, fs_devices);
+	if (IS_ERR(s))
+		goto error_s;
+
+	if (s->s_root) {
+		if ((flags ^ s->s_flags) & MS_RDONLY) {
+			up_write(&s->s_umount);
+			deactivate_super(s);
+			error = -EBUSY;
+			goto error_close_devices;
+		}
+
+		btrfs_close_devices(fs_devices);
+	} else {
+		char b[BDEVNAME_SIZE];
+
+		s->s_flags = flags;
+		strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
+		error = btrfs_fill_super(s, fs_devices, data,
+					 flags & MS_SILENT ? 1 : 0);
+		if (error) {
+			up_write(&s->s_umount);
+			deactivate_super(s);
+			goto error_free_subvol_name;
+		}
+
+		btrfs_sb(s)->fs_info->bdev_holder = fs_type;
+		s->s_flags |= MS_ACTIVE;
+	}
+
+	if (!strcmp(subvol_name, "."))
+		root = dget(s->s_root);
+	else {
+		mutex_lock(&s->s_root->d_inode->i_mutex);
+		root = lookup_one_len(subvol_name, s->s_root,
+				      strlen(subvol_name));
+		mutex_unlock(&s->s_root->d_inode->i_mutex);
+
+		if (IS_ERR(root)) {
+			up_write(&s->s_umount);
+			deactivate_super(s);
+			error = PTR_ERR(root);
+			goto error_free_subvol_name;
+		}
+		if (!root->d_inode) {
+			dput(root);
+			up_write(&s->s_umount);
+			deactivate_super(s);
+			error = -ENXIO;
+			goto error_free_subvol_name;
+		}
+	}
+
+	mnt->mnt_sb = s;
+	mnt->mnt_root = root;
+
+	kfree(subvol_name);
+	return 0;
+
+error_s:
+	error = PTR_ERR(s);
+error_close_devices:
+	btrfs_close_devices(fs_devices);
+error_free_subvol_name:
+	kfree(subvol_name);
+	return error;
+}
+
+static int btrfs_remount(struct super_block *sb, int *flags, char *data)
+{
+	struct btrfs_root *root = btrfs_sb(sb);
+	int ret;
+
+	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
+		return 0;
+
+	if (*flags & MS_RDONLY) {
+		sb->s_flags |= MS_RDONLY;
+
+		ret =  btrfs_commit_super(root);
+		WARN_ON(ret);
+	} else {
+		if (root->fs_info->fs_devices->rw_devices == 0)
+			return -EACCES;
+
+		if (btrfs_super_log_root(&root->fs_info->super_copy) != 0)
+			return -EINVAL;
+
+		ret = btrfs_cleanup_reloc_trees(root);
+		WARN_ON(ret);
+
+		ret = btrfs_cleanup_fs_roots(root->fs_info);
+		WARN_ON(ret);
+
+		sb->s_flags &= ~MS_RDONLY;
+	}
+
+	return 0;
+}
+
+static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+	struct btrfs_root *root = btrfs_sb(dentry->d_sb);
+	struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
+	int bits = dentry->d_sb->s_blocksize_bits;
+	__be32 *fsid = (__be32 *)root->fs_info->fsid;
+
+	buf->f_namelen = BTRFS_NAME_LEN;
+	buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits;
+	buf->f_bfree = buf->f_blocks -
+		(btrfs_super_bytes_used(disk_super) >> bits);
+	buf->f_bavail = buf->f_bfree;
+	buf->f_bsize = dentry->d_sb->s_blocksize;
+	buf->f_type = BTRFS_SUPER_MAGIC;
+
+	/* We treat it as constant endianness (it doesn't matter _which_)
+	   because we want the fsid to come out the same whether mounted
+	   on a big-endian or little-endian host */
+	buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
+	buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
+	/* Mask in the root object ID too, to disambiguate subvols */
+	buf->f_fsid.val[0] ^= BTRFS_I(dentry->d_inode)->root->objectid >> 32;
+	buf->f_fsid.val[1] ^= BTRFS_I(dentry->d_inode)->root->objectid;
+
+	return 0;
+}
+
+static struct file_system_type btrfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "btrfs",
+	.get_sb		= btrfs_get_sb,
+	.kill_sb	= kill_anon_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+/*
+ * used by btrfsctl to scan devices when no FS is mounted
+ */
+static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	struct btrfs_ioctl_vol_args *vol;
+	struct btrfs_fs_devices *fs_devices;
+	int ret = -ENOTTY;
+	int len;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	vol = kmalloc(sizeof(*vol), GFP_KERNEL);
+	if (copy_from_user(vol, (void __user *)arg, sizeof(*vol))) {
+		ret = -EFAULT;
+		goto out;
+	}
+	len = strnlen(vol->name, BTRFS_PATH_NAME_MAX);
+
+	switch (cmd) {
+	case BTRFS_IOC_SCAN_DEV:
+		ret = btrfs_scan_one_device(vol->name, FMODE_READ,
+					    &btrfs_fs_type, &fs_devices);
+		break;
+	}
+out:
+	kfree(vol);
+	return ret;
+}
+
+static int btrfs_freeze(struct super_block *sb)
+{
+	struct btrfs_root *root = btrfs_sb(sb);
+	mutex_lock(&root->fs_info->transaction_kthread_mutex);
+	mutex_lock(&root->fs_info->cleaner_mutex);
+	return 0;
+}
+
+static int btrfs_unfreeze(struct super_block *sb)
+{
+	struct btrfs_root *root = btrfs_sb(sb);
+	mutex_unlock(&root->fs_info->cleaner_mutex);
+	mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+	return 0;
+}
+
+static struct super_operations btrfs_super_ops = {
+	.delete_inode	= btrfs_delete_inode,
+	.put_super	= btrfs_put_super,
+	.write_super	= btrfs_write_super,
+	.sync_fs	= btrfs_sync_fs,
+	.show_options	= generic_show_options,
+	.write_inode	= btrfs_write_inode,
+	.dirty_inode	= btrfs_dirty_inode,
+	.alloc_inode	= btrfs_alloc_inode,
+	.destroy_inode	= btrfs_destroy_inode,
+	.statfs		= btrfs_statfs,
+	.remount_fs	= btrfs_remount,
+	.freeze_fs	= btrfs_freeze,
+	.unfreeze_fs	= btrfs_unfreeze,
+};
+
+static const struct file_operations btrfs_ctl_fops = {
+	.unlocked_ioctl	 = btrfs_control_ioctl,
+	.compat_ioctl = btrfs_control_ioctl,
+	.owner	 = THIS_MODULE,
+};
+
+static struct miscdevice btrfs_misc = {
+	.minor		= MISC_DYNAMIC_MINOR,
+	.name		= "btrfs-control",
+	.fops		= &btrfs_ctl_fops
+};
+
+static int btrfs_interface_init(void)
+{
+	return misc_register(&btrfs_misc);
+}
+
+static void btrfs_interface_exit(void)
+{
+	if (misc_deregister(&btrfs_misc) < 0)
+		printk(KERN_INFO "misc_deregister failed for control device");
+}
+
+static int __init init_btrfs_fs(void)
+{
+	int err;
+
+	err = btrfs_init_sysfs();
+	if (err)
+		return err;
+
+	err = btrfs_init_cachep();
+	if (err)
+		goto free_sysfs;
+
+	err = extent_io_init();
+	if (err)
+		goto free_cachep;
+
+	err = extent_map_init();
+	if (err)
+		goto free_extent_io;
+
+	err = btrfs_interface_init();
+	if (err)
+		goto free_extent_map;
+
+	err = register_filesystem(&btrfs_fs_type);
+	if (err)
+		goto unregister_ioctl;
+
+	printk(KERN_INFO "%s loaded\n", BTRFS_BUILD_VERSION);
+	return 0;
+
+unregister_ioctl:
+	btrfs_interface_exit();
+free_extent_map:
+	extent_map_exit();
+free_extent_io:
+	extent_io_exit();
+free_cachep:
+	btrfs_destroy_cachep();
+free_sysfs:
+	btrfs_exit_sysfs();
+	return err;
+}
+
+static void __exit exit_btrfs_fs(void)
+{
+	btrfs_destroy_cachep();
+	extent_map_exit();
+	extent_io_exit();
+	btrfs_interface_exit();
+	unregister_filesystem(&btrfs_fs_type);
+	btrfs_exit_sysfs();
+	btrfs_cleanup_fs_uuids();
+	btrfs_zlib_exit();
+}
+
+module_init(init_btrfs_fs)
+module_exit(exit_btrfs_fs)
+
+MODULE_LICENSE("GPL");
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
new file mode 100644
index 0000000..a240b6f
--- /dev/null
+++ b/fs/btrfs/sysfs.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/buffer_head.h>
+#include <linux/module.h>
+#include <linux/kobject.h>
+
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+
+static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+		(unsigned long long)btrfs_root_used(&root->root_item));
+}
+
+static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+		(unsigned long long)btrfs_root_limit(&root->root_item));
+}
+
+static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf)
+{
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+		(unsigned long long)btrfs_super_bytes_used(&fs->super_copy));
+}
+
+static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+		(unsigned long long)btrfs_super_total_bytes(&fs->super_copy));
+}
+
+static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+		(unsigned long long)btrfs_super_sectorsize(&fs->super_copy));
+}
+
+/* this is for root attrs (subvols/snapshots) */
+struct btrfs_root_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct btrfs_root *, char *);
+	ssize_t (*store)(struct btrfs_root *, const char *, size_t);
+};
+
+#define ROOT_ATTR(name, mode, show, store) \
+static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \
+							      show, store)
+
+ROOT_ATTR(blocks_used,	0444,	root_blocks_used_show,	NULL);
+ROOT_ATTR(block_limit,	0644,	root_block_limit_show,	NULL);
+
+static struct attribute *btrfs_root_attrs[] = {
+	&btrfs_root_attr_blocks_used.attr,
+	&btrfs_root_attr_block_limit.attr,
+	NULL,
+};
+
+/* this is for super attrs (actual full fs) */
+struct btrfs_super_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct btrfs_fs_info *, char *);
+	ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t);
+};
+
+#define SUPER_ATTR(name, mode, show, store) \
+static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \
+								show, store)
+
+SUPER_ATTR(blocks_used,		0444,	super_blocks_used_show,		NULL);
+SUPER_ATTR(total_blocks,	0444,	super_total_blocks_show,	NULL);
+SUPER_ATTR(blocksize,		0444,	super_blocksize_show,		NULL);
+
+static struct attribute *btrfs_super_attrs[] = {
+	&btrfs_super_attr_blocks_used.attr,
+	&btrfs_super_attr_total_blocks.attr,
+	&btrfs_super_attr_blocksize.attr,
+	NULL,
+};
+
+static ssize_t btrfs_super_attr_show(struct kobject *kobj,
+				    struct attribute *attr, char *buf)
+{
+	struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
+						super_kobj);
+	struct btrfs_super_attr *a = container_of(attr,
+						  struct btrfs_super_attr,
+						  attr);
+
+	return a->show ? a->show(fs, buf) : 0;
+}
+
+static ssize_t btrfs_super_attr_store(struct kobject *kobj,
+				     struct attribute *attr,
+				     const char *buf, size_t len)
+{
+	struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
+						super_kobj);
+	struct btrfs_super_attr *a = container_of(attr,
+						  struct btrfs_super_attr,
+						  attr);
+
+	return a->store ? a->store(fs, buf, len) : 0;
+}
+
+static ssize_t btrfs_root_attr_show(struct kobject *kobj,
+				    struct attribute *attr, char *buf)
+{
+	struct btrfs_root *root = container_of(kobj, struct btrfs_root,
+						root_kobj);
+	struct btrfs_root_attr *a = container_of(attr,
+						 struct btrfs_root_attr,
+						 attr);
+
+	return a->show ? a->show(root, buf) : 0;
+}
+
+static ssize_t btrfs_root_attr_store(struct kobject *kobj,
+				     struct attribute *attr,
+				     const char *buf, size_t len)
+{
+	struct btrfs_root *root = container_of(kobj, struct btrfs_root,
+						root_kobj);
+	struct btrfs_root_attr *a = container_of(attr,
+						 struct btrfs_root_attr,
+						 attr);
+	return a->store ? a->store(root, buf, len) : 0;
+}
+
+static void btrfs_super_release(struct kobject *kobj)
+{
+	struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
+						super_kobj);
+	complete(&fs->kobj_unregister);
+}
+
+static void btrfs_root_release(struct kobject *kobj)
+{
+	struct btrfs_root *root = container_of(kobj, struct btrfs_root,
+						root_kobj);
+	complete(&root->kobj_unregister);
+}
+
+static struct sysfs_ops btrfs_super_attr_ops = {
+	.show	= btrfs_super_attr_show,
+	.store	= btrfs_super_attr_store,
+};
+
+static struct sysfs_ops btrfs_root_attr_ops = {
+	.show	= btrfs_root_attr_show,
+	.store	= btrfs_root_attr_store,
+};
+
+static struct kobj_type btrfs_root_ktype = {
+	.default_attrs	= btrfs_root_attrs,
+	.sysfs_ops	= &btrfs_root_attr_ops,
+	.release	= btrfs_root_release,
+};
+
+static struct kobj_type btrfs_super_ktype = {
+	.default_attrs	= btrfs_super_attrs,
+	.sysfs_ops	= &btrfs_super_attr_ops,
+	.release	= btrfs_super_release,
+};
+
+/* /sys/fs/btrfs/ entry */
+static struct kset *btrfs_kset;
+
+int btrfs_sysfs_add_super(struct btrfs_fs_info *fs)
+{
+	int error;
+	char *name;
+	char c;
+	int len = strlen(fs->sb->s_id) + 1;
+	int i;
+
+	name = kmalloc(len, GFP_NOFS);
+	if (!name) {
+		error = -ENOMEM;
+		goto fail;
+	}
+
+	for (i = 0; i < len; i++) {
+		c = fs->sb->s_id[i];
+		if (c == '/' || c == '\\')
+			c = '!';
+		name[i] = c;
+	}
+	name[len] = '\0';
+
+	fs->super_kobj.kset = btrfs_kset;
+	error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype,
+				     NULL, "%s", name);
+	kfree(name);
+	if (error)
+		goto fail;
+
+	return 0;
+
+fail:
+	printk(KERN_ERR "btrfs: sysfs creation for super failed\n");
+	return error;
+}
+
+int btrfs_sysfs_add_root(struct btrfs_root *root)
+{
+	int error;
+
+	error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype,
+				     &root->fs_info->super_kobj,
+				     "%s", root->name);
+	if (error)
+		goto fail;
+
+	return 0;
+
+fail:
+	printk(KERN_ERR "btrfs: sysfs creation for root failed\n");
+	return error;
+}
+
+void btrfs_sysfs_del_root(struct btrfs_root *root)
+{
+	kobject_put(&root->root_kobj);
+	wait_for_completion(&root->kobj_unregister);
+}
+
+void btrfs_sysfs_del_super(struct btrfs_fs_info *fs)
+{
+	kobject_put(&fs->super_kobj);
+	wait_for_completion(&fs->kobj_unregister);
+}
+
+int btrfs_init_sysfs(void)
+{
+	btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj);
+	if (!btrfs_kset)
+		return -ENOMEM;
+	return 0;
+}
+
+void btrfs_exit_sysfs(void)
+{
+	kset_unregister(btrfs_kset);
+}
+
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
new file mode 100644
index 0000000..8a08f94
--- /dev/null
+++ b/fs/btrfs/transaction.c
@@ -0,0 +1,1097 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/writeback.h>
+#include <linux/pagemap.h>
+#include <linux/blkdev.h>
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "locking.h"
+#include "ref-cache.h"
+#include "tree-log.h"
+
+#define BTRFS_ROOT_TRANS_TAG 0
+
+static noinline void put_transaction(struct btrfs_transaction *transaction)
+{
+	WARN_ON(transaction->use_count == 0);
+	transaction->use_count--;
+	if (transaction->use_count == 0) {
+		list_del_init(&transaction->list);
+		memset(transaction, 0, sizeof(*transaction));
+		kmem_cache_free(btrfs_transaction_cachep, transaction);
+	}
+}
+
+/*
+ * either allocate a new transaction or hop into the existing one
+ */
+static noinline int join_transaction(struct btrfs_root *root)
+{
+	struct btrfs_transaction *cur_trans;
+	cur_trans = root->fs_info->running_transaction;
+	if (!cur_trans) {
+		cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
+					     GFP_NOFS);
+		BUG_ON(!cur_trans);
+		root->fs_info->generation++;
+		root->fs_info->last_alloc = 0;
+		root->fs_info->last_data_alloc = 0;
+		cur_trans->num_writers = 1;
+		cur_trans->num_joined = 0;
+		cur_trans->transid = root->fs_info->generation;
+		init_waitqueue_head(&cur_trans->writer_wait);
+		init_waitqueue_head(&cur_trans->commit_wait);
+		cur_trans->in_commit = 0;
+		cur_trans->blocked = 0;
+		cur_trans->use_count = 1;
+		cur_trans->commit_done = 0;
+		cur_trans->start_time = get_seconds();
+		INIT_LIST_HEAD(&cur_trans->pending_snapshots);
+		list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
+		extent_io_tree_init(&cur_trans->dirty_pages,
+				     root->fs_info->btree_inode->i_mapping,
+				     GFP_NOFS);
+		spin_lock(&root->fs_info->new_trans_lock);
+		root->fs_info->running_transaction = cur_trans;
+		spin_unlock(&root->fs_info->new_trans_lock);
+	} else {
+		cur_trans->num_writers++;
+		cur_trans->num_joined++;
+	}
+
+	return 0;
+}
+
+/*
+ * this does all the record keeping required to make sure that a reference
+ * counted root is properly recorded in a given transaction.  This is required
+ * to make sure the old root from before we joined the transaction is deleted
+ * when the transaction commits
+ */
+noinline int btrfs_record_root_in_trans(struct btrfs_root *root)
+{
+	struct btrfs_dirty_root *dirty;
+	u64 running_trans_id = root->fs_info->running_transaction->transid;
+	if (root->ref_cows && root->last_trans < running_trans_id) {
+		WARN_ON(root == root->fs_info->extent_root);
+		if (root->root_item.refs != 0) {
+			radix_tree_tag_set(&root->fs_info->fs_roots_radix,
+				   (unsigned long)root->root_key.objectid,
+				   BTRFS_ROOT_TRANS_TAG);
+
+			dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
+			BUG_ON(!dirty);
+			dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
+			BUG_ON(!dirty->root);
+			dirty->latest_root = root;
+			INIT_LIST_HEAD(&dirty->list);
+
+			root->commit_root = btrfs_root_node(root);
+
+			memcpy(dirty->root, root, sizeof(*root));
+			spin_lock_init(&dirty->root->node_lock);
+			spin_lock_init(&dirty->root->list_lock);
+			mutex_init(&dirty->root->objectid_mutex);
+			mutex_init(&dirty->root->log_mutex);
+			INIT_LIST_HEAD(&dirty->root->dead_list);
+			dirty->root->node = root->commit_root;
+			dirty->root->commit_root = NULL;
+
+			spin_lock(&root->list_lock);
+			list_add(&dirty->root->dead_list, &root->dead_list);
+			spin_unlock(&root->list_lock);
+
+			root->dirty_root = dirty;
+		} else {
+			WARN_ON(1);
+		}
+		root->last_trans = running_trans_id;
+	}
+	return 0;
+}
+
+/* wait for commit against the current transaction to become unblocked
+ * when this is done, it is safe to start a new transaction, but the current
+ * transaction might not be fully on disk.
+ */
+static void wait_current_trans(struct btrfs_root *root)
+{
+	struct btrfs_transaction *cur_trans;
+
+	cur_trans = root->fs_info->running_transaction;
+	if (cur_trans && cur_trans->blocked) {
+		DEFINE_WAIT(wait);
+		cur_trans->use_count++;
+		while (1) {
+			prepare_to_wait(&root->fs_info->transaction_wait, &wait,
+					TASK_UNINTERRUPTIBLE);
+			if (cur_trans->blocked) {
+				mutex_unlock(&root->fs_info->trans_mutex);
+				schedule();
+				mutex_lock(&root->fs_info->trans_mutex);
+				finish_wait(&root->fs_info->transaction_wait,
+					    &wait);
+			} else {
+				finish_wait(&root->fs_info->transaction_wait,
+					    &wait);
+				break;
+			}
+		}
+		put_transaction(cur_trans);
+	}
+}
+
+static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
+					     int num_blocks, int wait)
+{
+	struct btrfs_trans_handle *h =
+		kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
+	int ret;
+
+	mutex_lock(&root->fs_info->trans_mutex);
+	if (!root->fs_info->log_root_recovering &&
+	    ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2))
+		wait_current_trans(root);
+	ret = join_transaction(root);
+	BUG_ON(ret);
+
+	btrfs_record_root_in_trans(root);
+	h->transid = root->fs_info->running_transaction->transid;
+	h->transaction = root->fs_info->running_transaction;
+	h->blocks_reserved = num_blocks;
+	h->blocks_used = 0;
+	h->block_group = 0;
+	h->alloc_exclude_nr = 0;
+	h->alloc_exclude_start = 0;
+	root->fs_info->running_transaction->use_count++;
+	mutex_unlock(&root->fs_info->trans_mutex);
+	return h;
+}
+
+struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
+						   int num_blocks)
+{
+	return start_transaction(root, num_blocks, 1);
+}
+struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
+						   int num_blocks)
+{
+	return start_transaction(root, num_blocks, 0);
+}
+
+struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
+							 int num_blocks)
+{
+	return start_transaction(r, num_blocks, 2);
+}
+
+/* wait for a transaction commit to be fully complete */
+static noinline int wait_for_commit(struct btrfs_root *root,
+				    struct btrfs_transaction *commit)
+{
+	DEFINE_WAIT(wait);
+	mutex_lock(&root->fs_info->trans_mutex);
+	while (!commit->commit_done) {
+		prepare_to_wait(&commit->commit_wait, &wait,
+				TASK_UNINTERRUPTIBLE);
+		if (commit->commit_done)
+			break;
+		mutex_unlock(&root->fs_info->trans_mutex);
+		schedule();
+		mutex_lock(&root->fs_info->trans_mutex);
+	}
+	mutex_unlock(&root->fs_info->trans_mutex);
+	finish_wait(&commit->commit_wait, &wait);
+	return 0;
+}
+
+/*
+ * rate limit against the drop_snapshot code.  This helps to slow down new
+ * operations if the drop_snapshot code isn't able to keep up.
+ */
+static void throttle_on_drops(struct btrfs_root *root)
+{
+	struct btrfs_fs_info *info = root->fs_info;
+	int harder_count = 0;
+
+harder:
+	if (atomic_read(&info->throttles)) {
+		DEFINE_WAIT(wait);
+		int thr;
+		thr = atomic_read(&info->throttle_gen);
+
+		do {
+			prepare_to_wait(&info->transaction_throttle,
+					&wait, TASK_UNINTERRUPTIBLE);
+			if (!atomic_read(&info->throttles)) {
+				finish_wait(&info->transaction_throttle, &wait);
+				break;
+			}
+			schedule();
+			finish_wait(&info->transaction_throttle, &wait);
+		} while (thr == atomic_read(&info->throttle_gen));
+		harder_count++;
+
+		if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
+		    harder_count < 2)
+			goto harder;
+
+		if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
+		    harder_count < 10)
+			goto harder;
+
+		if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
+		    harder_count < 20)
+			goto harder;
+	}
+}
+
+void btrfs_throttle(struct btrfs_root *root)
+{
+	mutex_lock(&root->fs_info->trans_mutex);
+	if (!root->fs_info->open_ioctl_trans)
+		wait_current_trans(root);
+	mutex_unlock(&root->fs_info->trans_mutex);
+
+	throttle_on_drops(root);
+}
+
+static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, int throttle)
+{
+	struct btrfs_transaction *cur_trans;
+	struct btrfs_fs_info *info = root->fs_info;
+
+	mutex_lock(&info->trans_mutex);
+	cur_trans = info->running_transaction;
+	WARN_ON(cur_trans != trans->transaction);
+	WARN_ON(cur_trans->num_writers < 1);
+	cur_trans->num_writers--;
+
+	if (waitqueue_active(&cur_trans->writer_wait))
+		wake_up(&cur_trans->writer_wait);
+	put_transaction(cur_trans);
+	mutex_unlock(&info->trans_mutex);
+	memset(trans, 0, sizeof(*trans));
+	kmem_cache_free(btrfs_trans_handle_cachep, trans);
+
+	if (throttle)
+		throttle_on_drops(root);
+
+	return 0;
+}
+
+int btrfs_end_transaction(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root)
+{
+	return __btrfs_end_transaction(trans, root, 0);
+}
+
+int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root)
+{
+	return __btrfs_end_transaction(trans, root, 1);
+}
+
+/*
+ * when btree blocks are allocated, they have some corresponding bits set for
+ * them in one of two extent_io trees.  This is used to make sure all of
+ * those extents are on disk for transaction or log commit
+ */
+int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
+					struct extent_io_tree *dirty_pages)
+{
+	int ret;
+	int err = 0;
+	int werr = 0;
+	struct page *page;
+	struct inode *btree_inode = root->fs_info->btree_inode;
+	u64 start = 0;
+	u64 end;
+	unsigned long index;
+
+	while (1) {
+		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
+					    EXTENT_DIRTY);
+		if (ret)
+			break;
+		while (start <= end) {
+			cond_resched();
+
+			index = start >> PAGE_CACHE_SHIFT;
+			start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
+			page = find_get_page(btree_inode->i_mapping, index);
+			if (!page)
+				continue;
+
+			btree_lock_page_hook(page);
+			if (!page->mapping) {
+				unlock_page(page);
+				page_cache_release(page);
+				continue;
+			}
+
+			if (PageWriteback(page)) {
+				if (PageDirty(page))
+					wait_on_page_writeback(page);
+				else {
+					unlock_page(page);
+					page_cache_release(page);
+					continue;
+				}
+			}
+			err = write_one_page(page, 0);
+			if (err)
+				werr = err;
+			page_cache_release(page);
+		}
+	}
+	while (1) {
+		ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
+					    EXTENT_DIRTY);
+		if (ret)
+			break;
+
+		clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
+		while (start <= end) {
+			index = start >> PAGE_CACHE_SHIFT;
+			start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
+			page = find_get_page(btree_inode->i_mapping, index);
+			if (!page)
+				continue;
+			if (PageDirty(page)) {
+				btree_lock_page_hook(page);
+				wait_on_page_writeback(page);
+				err = write_one_page(page, 0);
+				if (err)
+					werr = err;
+			}
+			wait_on_page_writeback(page);
+			page_cache_release(page);
+			cond_resched();
+		}
+	}
+	if (err)
+		werr = err;
+	return werr;
+}
+
+int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
+				     struct btrfs_root *root)
+{
+	if (!trans || !trans->transaction) {
+		struct inode *btree_inode;
+		btree_inode = root->fs_info->btree_inode;
+		return filemap_write_and_wait(btree_inode->i_mapping);
+	}
+	return btrfs_write_and_wait_marked_extents(root,
+					   &trans->transaction->dirty_pages);
+}
+
+/*
+ * this is used to update the root pointer in the tree of tree roots.
+ *
+ * But, in the case of the extent allocation tree, updating the root
+ * pointer may allocate blocks which may change the root of the extent
+ * allocation tree.
+ *
+ * So, this loops and repeats and makes sure the cowonly root didn't
+ * change while the root pointer was being updated in the metadata.
+ */
+static int update_cowonly_root(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root)
+{
+	int ret;
+	u64 old_root_bytenr;
+	struct btrfs_root *tree_root = root->fs_info->tree_root;
+
+	btrfs_extent_post_op(trans, root);
+	btrfs_write_dirty_block_groups(trans, root);
+	btrfs_extent_post_op(trans, root);
+
+	while (1) {
+		old_root_bytenr = btrfs_root_bytenr(&root->root_item);
+		if (old_root_bytenr == root->node->start)
+			break;
+		btrfs_set_root_bytenr(&root->root_item,
+				       root->node->start);
+		btrfs_set_root_level(&root->root_item,
+				     btrfs_header_level(root->node));
+		btrfs_set_root_generation(&root->root_item, trans->transid);
+
+		btrfs_extent_post_op(trans, root);
+
+		ret = btrfs_update_root(trans, tree_root,
+					&root->root_key,
+					&root->root_item);
+		BUG_ON(ret);
+		btrfs_write_dirty_block_groups(trans, root);
+		btrfs_extent_post_op(trans, root);
+	}
+	return 0;
+}
+
+/*
+ * update all the cowonly tree roots on disk
+ */
+int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root)
+{
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct list_head *next;
+	struct extent_buffer *eb;
+
+	btrfs_extent_post_op(trans, fs_info->tree_root);
+
+	eb = btrfs_lock_root_node(fs_info->tree_root);
+	btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb, 0);
+	btrfs_tree_unlock(eb);
+	free_extent_buffer(eb);
+
+	btrfs_extent_post_op(trans, fs_info->tree_root);
+
+	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
+		next = fs_info->dirty_cowonly_roots.next;
+		list_del_init(next);
+		root = list_entry(next, struct btrfs_root, dirty_list);
+
+		update_cowonly_root(trans, root);
+	}
+	return 0;
+}
+
+/*
+ * dead roots are old snapshots that need to be deleted.  This allocates
+ * a dirty root struct and adds it into the list of dead roots that need to
+ * be deleted
+ */
+int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest)
+{
+	struct btrfs_dirty_root *dirty;
+
+	dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
+	if (!dirty)
+		return -ENOMEM;
+	dirty->root = root;
+	dirty->latest_root = latest;
+
+	mutex_lock(&root->fs_info->trans_mutex);
+	list_add(&dirty->list, &latest->fs_info->dead_roots);
+	mutex_unlock(&root->fs_info->trans_mutex);
+	return 0;
+}
+
+/*
+ * at transaction commit time we need to schedule the old roots for
+ * deletion via btrfs_drop_snapshot.  This runs through all the
+ * reference counted roots that were modified in the current
+ * transaction and puts them into the drop list
+ */
+static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
+				    struct radix_tree_root *radix,
+				    struct list_head *list)
+{
+	struct btrfs_dirty_root *dirty;
+	struct btrfs_root *gang[8];
+	struct btrfs_root *root;
+	int i;
+	int ret;
+	int err = 0;
+	u32 refs;
+
+	while (1) {
+		ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
+						 ARRAY_SIZE(gang),
+						 BTRFS_ROOT_TRANS_TAG);
+		if (ret == 0)
+			break;
+		for (i = 0; i < ret; i++) {
+			root = gang[i];
+			radix_tree_tag_clear(radix,
+				     (unsigned long)root->root_key.objectid,
+				     BTRFS_ROOT_TRANS_TAG);
+
+			BUG_ON(!root->ref_tree);
+			dirty = root->dirty_root;
+
+			btrfs_free_log(trans, root);
+			btrfs_free_reloc_root(trans, root);
+
+			if (root->commit_root == root->node) {
+				WARN_ON(root->node->start !=
+					btrfs_root_bytenr(&root->root_item));
+
+				free_extent_buffer(root->commit_root);
+				root->commit_root = NULL;
+				root->dirty_root = NULL;
+
+				spin_lock(&root->list_lock);
+				list_del_init(&dirty->root->dead_list);
+				spin_unlock(&root->list_lock);
+
+				kfree(dirty->root);
+				kfree(dirty);
+
+				/* make sure to update the root on disk
+				 * so we get any updates to the block used
+				 * counts
+				 */
+				err = btrfs_update_root(trans,
+						root->fs_info->tree_root,
+						&root->root_key,
+						&root->root_item);
+				continue;
+			}
+
+			memset(&root->root_item.drop_progress, 0,
+			       sizeof(struct btrfs_disk_key));
+			root->root_item.drop_level = 0;
+			root->commit_root = NULL;
+			root->dirty_root = NULL;
+			root->root_key.offset = root->fs_info->generation;
+			btrfs_set_root_bytenr(&root->root_item,
+					      root->node->start);
+			btrfs_set_root_level(&root->root_item,
+					     btrfs_header_level(root->node));
+			btrfs_set_root_generation(&root->root_item,
+						  root->root_key.offset);
+
+			err = btrfs_insert_root(trans, root->fs_info->tree_root,
+						&root->root_key,
+						&root->root_item);
+			if (err)
+				break;
+
+			refs = btrfs_root_refs(&dirty->root->root_item);
+			btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
+			err = btrfs_update_root(trans, root->fs_info->tree_root,
+						&dirty->root->root_key,
+						&dirty->root->root_item);
+
+			BUG_ON(err);
+			if (refs == 1) {
+				list_add(&dirty->list, list);
+			} else {
+				WARN_ON(1);
+				free_extent_buffer(dirty->root->node);
+				kfree(dirty->root);
+				kfree(dirty);
+			}
+		}
+	}
+	return err;
+}
+
+/*
+ * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
+ * otherwise every leaf in the btree is read and defragged.
+ */
+int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
+{
+	struct btrfs_fs_info *info = root->fs_info;
+	int ret;
+	struct btrfs_trans_handle *trans;
+	unsigned long nr;
+
+	smp_mb();
+	if (root->defrag_running)
+		return 0;
+	trans = btrfs_start_transaction(root, 1);
+	while (1) {
+		root->defrag_running = 1;
+		ret = btrfs_defrag_leaves(trans, root, cacheonly);
+		nr = trans->blocks_used;
+		btrfs_end_transaction(trans, root);
+		btrfs_btree_balance_dirty(info->tree_root, nr);
+		cond_resched();
+
+		trans = btrfs_start_transaction(root, 1);
+		if (root->fs_info->closing || ret != -EAGAIN)
+			break;
+	}
+	root->defrag_running = 0;
+	smp_mb();
+	btrfs_end_transaction(trans, root);
+	return 0;
+}
+
+/*
+ * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
+ * all of them
+ */
+static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
+				     struct list_head *list)
+{
+	struct btrfs_dirty_root *dirty;
+	struct btrfs_trans_handle *trans;
+	unsigned long nr;
+	u64 num_bytes;
+	u64 bytes_used;
+	u64 max_useless;
+	int ret = 0;
+	int err;
+
+	while (!list_empty(list)) {
+		struct btrfs_root *root;
+
+		dirty = list_entry(list->prev, struct btrfs_dirty_root, list);
+		list_del_init(&dirty->list);
+
+		num_bytes = btrfs_root_used(&dirty->root->root_item);
+		root = dirty->latest_root;
+		atomic_inc(&root->fs_info->throttles);
+
+		while (1) {
+			trans = btrfs_start_transaction(tree_root, 1);
+			mutex_lock(&root->fs_info->drop_mutex);
+			ret = btrfs_drop_snapshot(trans, dirty->root);
+			if (ret != -EAGAIN)
+				break;
+			mutex_unlock(&root->fs_info->drop_mutex);
+
+			err = btrfs_update_root(trans,
+					tree_root,
+					&dirty->root->root_key,
+					&dirty->root->root_item);
+			if (err)
+				ret = err;
+			nr = trans->blocks_used;
+			ret = btrfs_end_transaction(trans, tree_root);
+			BUG_ON(ret);
+
+			btrfs_btree_balance_dirty(tree_root, nr);
+			cond_resched();
+		}
+		BUG_ON(ret);
+		atomic_dec(&root->fs_info->throttles);
+		wake_up(&root->fs_info->transaction_throttle);
+
+		num_bytes -= btrfs_root_used(&dirty->root->root_item);
+		bytes_used = btrfs_root_used(&root->root_item);
+		if (num_bytes) {
+			btrfs_record_root_in_trans(root);
+			btrfs_set_root_used(&root->root_item,
+					    bytes_used - num_bytes);
+		}
+
+		ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
+		if (ret) {
+			BUG();
+			break;
+		}
+		mutex_unlock(&root->fs_info->drop_mutex);
+
+		spin_lock(&root->list_lock);
+		list_del_init(&dirty->root->dead_list);
+		if (!list_empty(&root->dead_list)) {
+			struct btrfs_root *oldest;
+			oldest = list_entry(root->dead_list.prev,
+					    struct btrfs_root, dead_list);
+			max_useless = oldest->root_key.offset - 1;
+		} else {
+			max_useless = root->root_key.offset - 1;
+		}
+		spin_unlock(&root->list_lock);
+
+		nr = trans->blocks_used;
+		ret = btrfs_end_transaction(trans, tree_root);
+		BUG_ON(ret);
+
+		ret = btrfs_remove_leaf_refs(root, max_useless, 0);
+		BUG_ON(ret);
+
+		free_extent_buffer(dirty->root->node);
+		kfree(dirty->root);
+		kfree(dirty);
+
+		btrfs_btree_balance_dirty(tree_root, nr);
+		cond_resched();
+	}
+	return ret;
+}
+
+/*
+ * new snapshots need to be created at a very specific time in the
+ * transaction commit.  This does the actual creation
+ */
+static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+				   struct btrfs_fs_info *fs_info,
+				   struct btrfs_pending_snapshot *pending)
+{
+	struct btrfs_key key;
+	struct btrfs_root_item *new_root_item;
+	struct btrfs_root *tree_root = fs_info->tree_root;
+	struct btrfs_root *root = pending->root;
+	struct extent_buffer *tmp;
+	struct extent_buffer *old;
+	int ret;
+	u64 objectid;
+
+	new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
+	if (!new_root_item) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
+	if (ret)
+		goto fail;
+
+	btrfs_record_root_in_trans(root);
+	btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
+	memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
+
+	key.objectid = objectid;
+	key.offset = trans->transid;
+	btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+
+	old = btrfs_lock_root_node(root);
+	btrfs_cow_block(trans, root, old, NULL, 0, &old, 0);
+
+	btrfs_copy_root(trans, root, old, &tmp, objectid);
+	btrfs_tree_unlock(old);
+	free_extent_buffer(old);
+
+	btrfs_set_root_bytenr(new_root_item, tmp->start);
+	btrfs_set_root_level(new_root_item, btrfs_header_level(tmp));
+	btrfs_set_root_generation(new_root_item, trans->transid);
+	ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
+				new_root_item);
+	btrfs_tree_unlock(tmp);
+	free_extent_buffer(tmp);
+	if (ret)
+		goto fail;
+
+	key.offset = (u64)-1;
+	memcpy(&pending->root_key, &key, sizeof(key));
+fail:
+	kfree(new_root_item);
+	return ret;
+}
+
+static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
+				   struct btrfs_pending_snapshot *pending)
+{
+	int ret;
+	int namelen;
+	u64 index = 0;
+	struct btrfs_trans_handle *trans;
+	struct inode *parent_inode;
+	struct inode *inode;
+	struct btrfs_root *parent_root;
+
+	parent_inode = pending->dentry->d_parent->d_inode;
+	parent_root = BTRFS_I(parent_inode)->root;
+	trans = btrfs_join_transaction(parent_root, 1);
+
+	/*
+	 * insert the directory item
+	 */
+	namelen = strlen(pending->name);
+	ret = btrfs_set_inode_index(parent_inode, &index);
+	ret = btrfs_insert_dir_item(trans, parent_root,
+			    pending->name, namelen,
+			    parent_inode->i_ino,
+			    &pending->root_key, BTRFS_FT_DIR, index);
+
+	if (ret)
+		goto fail;
+
+	btrfs_i_size_write(parent_inode, parent_inode->i_size + namelen * 2);
+	ret = btrfs_update_inode(trans, parent_root, parent_inode);
+	BUG_ON(ret);
+
+	/* add the backref first */
+	ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
+				 pending->root_key.objectid,
+				 BTRFS_ROOT_BACKREF_KEY,
+				 parent_root->root_key.objectid,
+				 parent_inode->i_ino, index, pending->name,
+				 namelen);
+
+	BUG_ON(ret);
+
+	/* now add the forward ref */
+	ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
+				 parent_root->root_key.objectid,
+				 BTRFS_ROOT_REF_KEY,
+				 pending->root_key.objectid,
+				 parent_inode->i_ino, index, pending->name,
+				 namelen);
+
+	inode = btrfs_lookup_dentry(parent_inode, pending->dentry);
+	d_instantiate(pending->dentry, inode);
+fail:
+	btrfs_end_transaction(trans, fs_info->fs_root);
+	return ret;
+}
+
+/*
+ * create all the snapshots we've scheduled for creation
+ */
+static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
+					     struct btrfs_fs_info *fs_info)
+{
+	struct btrfs_pending_snapshot *pending;
+	struct list_head *head = &trans->transaction->pending_snapshots;
+	struct list_head *cur;
+	int ret;
+
+	list_for_each(cur, head) {
+		pending = list_entry(cur, struct btrfs_pending_snapshot, list);
+		ret = create_pending_snapshot(trans, fs_info, pending);
+		BUG_ON(ret);
+	}
+	return 0;
+}
+
+static noinline int finish_pending_snapshots(struct btrfs_trans_handle *trans,
+					     struct btrfs_fs_info *fs_info)
+{
+	struct btrfs_pending_snapshot *pending;
+	struct list_head *head = &trans->transaction->pending_snapshots;
+	int ret;
+
+	while (!list_empty(head)) {
+		pending = list_entry(head->next,
+				     struct btrfs_pending_snapshot, list);
+		ret = finish_pending_snapshot(fs_info, pending);
+		BUG_ON(ret);
+		list_del(&pending->list);
+		kfree(pending->name);
+		kfree(pending);
+	}
+	return 0;
+}
+
+int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root)
+{
+	unsigned long joined = 0;
+	unsigned long timeout = 1;
+	struct btrfs_transaction *cur_trans;
+	struct btrfs_transaction *prev_trans = NULL;
+	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
+	struct list_head dirty_fs_roots;
+	struct extent_io_tree *pinned_copy;
+	DEFINE_WAIT(wait);
+	int ret;
+
+	INIT_LIST_HEAD(&dirty_fs_roots);
+	mutex_lock(&root->fs_info->trans_mutex);
+	if (trans->transaction->in_commit) {
+		cur_trans = trans->transaction;
+		trans->transaction->use_count++;
+		mutex_unlock(&root->fs_info->trans_mutex);
+		btrfs_end_transaction(trans, root);
+
+		ret = wait_for_commit(root, cur_trans);
+		BUG_ON(ret);
+
+		mutex_lock(&root->fs_info->trans_mutex);
+		put_transaction(cur_trans);
+		mutex_unlock(&root->fs_info->trans_mutex);
+
+		return 0;
+	}
+
+	pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
+	if (!pinned_copy)
+		return -ENOMEM;
+
+	extent_io_tree_init(pinned_copy,
+			     root->fs_info->btree_inode->i_mapping, GFP_NOFS);
+
+	trans->transaction->in_commit = 1;
+	trans->transaction->blocked = 1;
+	cur_trans = trans->transaction;
+	if (cur_trans->list.prev != &root->fs_info->trans_list) {
+		prev_trans = list_entry(cur_trans->list.prev,
+					struct btrfs_transaction, list);
+		if (!prev_trans->commit_done) {
+			prev_trans->use_count++;
+			mutex_unlock(&root->fs_info->trans_mutex);
+
+			wait_for_commit(root, prev_trans);
+
+			mutex_lock(&root->fs_info->trans_mutex);
+			put_transaction(prev_trans);
+		}
+	}
+
+	do {
+		int snap_pending = 0;
+		joined = cur_trans->num_joined;
+		if (!list_empty(&trans->transaction->pending_snapshots))
+			snap_pending = 1;
+
+		WARN_ON(cur_trans != trans->transaction);
+		prepare_to_wait(&cur_trans->writer_wait, &wait,
+				TASK_UNINTERRUPTIBLE);
+
+		if (cur_trans->num_writers > 1)
+			timeout = MAX_SCHEDULE_TIMEOUT;
+		else
+			timeout = 1;
+
+		mutex_unlock(&root->fs_info->trans_mutex);
+
+		if (snap_pending) {
+			ret = btrfs_wait_ordered_extents(root, 1);
+			BUG_ON(ret);
+		}
+
+		schedule_timeout(timeout);
+
+		mutex_lock(&root->fs_info->trans_mutex);
+		finish_wait(&cur_trans->writer_wait, &wait);
+	} while (cur_trans->num_writers > 1 ||
+		 (cur_trans->num_joined != joined));
+
+	ret = create_pending_snapshots(trans, root->fs_info);
+	BUG_ON(ret);
+
+	WARN_ON(cur_trans != trans->transaction);
+
+	/* btrfs_commit_tree_roots is responsible for getting the
+	 * various roots consistent with each other.  Every pointer
+	 * in the tree of tree roots has to point to the most up to date
+	 * root for every subvolume and other tree.  So, we have to keep
+	 * the tree logging code from jumping in and changing any
+	 * of the trees.
+	 *
+	 * At this point in the commit, there can't be any tree-log
+	 * writers, but a little lower down we drop the trans mutex
+	 * and let new people in.  By holding the tree_log_mutex
+	 * from now until after the super is written, we avoid races
+	 * with the tree-log code.
+	 */
+	mutex_lock(&root->fs_info->tree_log_mutex);
+	/*
+	 * keep tree reloc code from adding new reloc trees
+	 */
+	mutex_lock(&root->fs_info->tree_reloc_mutex);
+
+
+	ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
+			      &dirty_fs_roots);
+	BUG_ON(ret);
+
+	/* add_dirty_roots gets rid of all the tree log roots, it is now
+	 * safe to free the root of tree log roots
+	 */
+	btrfs_free_log_root_tree(trans, root->fs_info);
+
+	ret = btrfs_commit_tree_roots(trans, root);
+	BUG_ON(ret);
+
+	cur_trans = root->fs_info->running_transaction;
+	spin_lock(&root->fs_info->new_trans_lock);
+	root->fs_info->running_transaction = NULL;
+	spin_unlock(&root->fs_info->new_trans_lock);
+	btrfs_set_super_generation(&root->fs_info->super_copy,
+				   cur_trans->transid);
+	btrfs_set_super_root(&root->fs_info->super_copy,
+			     root->fs_info->tree_root->node->start);
+	btrfs_set_super_root_level(&root->fs_info->super_copy,
+			   btrfs_header_level(root->fs_info->tree_root->node));
+
+	btrfs_set_super_chunk_root(&root->fs_info->super_copy,
+				   chunk_root->node->start);
+	btrfs_set_super_chunk_root_level(&root->fs_info->super_copy,
+					 btrfs_header_level(chunk_root->node));
+	btrfs_set_super_chunk_root_generation(&root->fs_info->super_copy,
+				btrfs_header_generation(chunk_root->node));
+
+	if (!root->fs_info->log_root_recovering) {
+		btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
+		btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
+	}
+
+	memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
+	       sizeof(root->fs_info->super_copy));
+
+	btrfs_copy_pinned(root, pinned_copy);
+
+	trans->transaction->blocked = 0;
+	wake_up(&root->fs_info->transaction_throttle);
+	wake_up(&root->fs_info->transaction_wait);
+
+	mutex_unlock(&root->fs_info->trans_mutex);
+	ret = btrfs_write_and_wait_transaction(trans, root);
+	BUG_ON(ret);
+	write_ctree_super(trans, root, 0);
+
+	/*
+	 * the super is written, we can safely allow the tree-loggers
+	 * to go about their business
+	 */
+	mutex_unlock(&root->fs_info->tree_log_mutex);
+
+	btrfs_finish_extent_commit(trans, root, pinned_copy);
+	kfree(pinned_copy);
+
+	btrfs_drop_dead_reloc_roots(root);
+	mutex_unlock(&root->fs_info->tree_reloc_mutex);
+
+	/* do the directory inserts of any pending snapshot creations */
+	finish_pending_snapshots(trans, root->fs_info);
+
+	mutex_lock(&root->fs_info->trans_mutex);
+
+	cur_trans->commit_done = 1;
+	root->fs_info->last_trans_committed = cur_trans->transid;
+	wake_up(&cur_trans->commit_wait);
+
+	put_transaction(cur_trans);
+	put_transaction(cur_trans);
+
+	list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
+	if (root->fs_info->closing)
+		list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
+
+	mutex_unlock(&root->fs_info->trans_mutex);
+
+	kmem_cache_free(btrfs_trans_handle_cachep, trans);
+
+	if (root->fs_info->closing)
+		drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
+	return ret;
+}
+
+/*
+ * interface function to delete all the snapshots we have scheduled for deletion
+ */
+int btrfs_clean_old_snapshots(struct btrfs_root *root)
+{
+	struct list_head dirty_roots;
+	INIT_LIST_HEAD(&dirty_roots);
+again:
+	mutex_lock(&root->fs_info->trans_mutex);
+	list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
+	mutex_unlock(&root->fs_info->trans_mutex);
+
+	if (!list_empty(&dirty_roots)) {
+		drop_dirty_roots(root, &dirty_roots);
+		goto again;
+	}
+	return 0;
+}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
new file mode 100644
index 0000000..ea29211
--- /dev/null
+++ b/fs/btrfs/transaction.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_TRANSACTION__
+#define __BTRFS_TRANSACTION__
+#include "btrfs_inode.h"
+
+struct btrfs_transaction {
+	u64 transid;
+	unsigned long num_writers;
+	unsigned long num_joined;
+	int in_commit;
+	int use_count;
+	int commit_done;
+	int blocked;
+	struct list_head list;
+	struct extent_io_tree dirty_pages;
+	unsigned long start_time;
+	wait_queue_head_t writer_wait;
+	wait_queue_head_t commit_wait;
+	struct list_head pending_snapshots;
+};
+
+struct btrfs_trans_handle {
+	u64 transid;
+	unsigned long blocks_reserved;
+	unsigned long blocks_used;
+	struct btrfs_transaction *transaction;
+	u64 block_group;
+	u64 alloc_exclude_start;
+	u64 alloc_exclude_nr;
+};
+
+struct btrfs_pending_snapshot {
+	struct dentry *dentry;
+	struct btrfs_root *root;
+	char *name;
+	struct btrfs_key root_key;
+	struct list_head list;
+};
+
+struct btrfs_dirty_root {
+	struct list_head list;
+	struct btrfs_root *root;
+	struct btrfs_root *latest_root;
+};
+
+static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans,
+					       struct inode *inode)
+{
+	trans->block_group = BTRFS_I(inode)->block_group;
+}
+
+static inline void btrfs_update_inode_block_group(
+					  struct btrfs_trans_handle *trans,
+					  struct inode *inode)
+{
+	BTRFS_I(inode)->block_group = trans->block_group;
+}
+
+static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
+					      struct inode *inode)
+{
+	BTRFS_I(inode)->last_trans = trans->transaction->transid;
+}
+
+int btrfs_end_transaction(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
+						   int num_blocks);
+struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
+						   int num_blocks);
+struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
+						   int num_blocks);
+int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
+				     struct btrfs_root *root);
+int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root);
+
+int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest);
+int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
+int btrfs_clean_old_snapshots(struct btrfs_root *root);
+int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root);
+int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root);
+void btrfs_throttle(struct btrfs_root *root);
+int btrfs_record_root_in_trans(struct btrfs_root *root);
+int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
+					struct extent_io_tree *dirty_pages);
+#endif
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
new file mode 100644
index 0000000..3e8358c
--- /dev/null
+++ b/fs/btrfs/tree-defrag.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include "ctree.h"
+#include "disk-io.h"
+#include "print-tree.h"
+#include "transaction.h"
+#include "locking.h"
+
+/* defrag all the leaves in a given btree.  If cache_only == 1, don't read
+ * things from disk, otherwise read all the leaves and try to get key order to
+ * better reflect disk order
+ */
+
+int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
+			struct btrfs_root *root, int cache_only)
+{
+	struct btrfs_path *path = NULL;
+	struct btrfs_key key;
+	int ret = 0;
+	int wret;
+	int level;
+	int orig_level;
+	int is_extent = 0;
+	int next_key_ret = 0;
+	u64 last_ret = 0;
+	u64 min_trans = 0;
+
+	if (cache_only)
+		goto out;
+
+	if (root->fs_info->extent_root == root) {
+		/*
+		 * there's recursion here right now in the tree locking,
+		 * we can't defrag the extent root without deadlock
+		 */
+		goto out;
+	}
+
+	if (root->ref_cows == 0 && !is_extent)
+		goto out;
+
+	if (btrfs_test_opt(root, SSD))
+		goto out;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	level = btrfs_header_level(root->node);
+	orig_level = level;
+
+	if (level == 0)
+		goto out;
+
+	if (root->defrag_progress.objectid == 0) {
+		struct extent_buffer *root_node;
+		u32 nritems;
+
+		root_node = btrfs_lock_root_node(root);
+		nritems = btrfs_header_nritems(root_node);
+		root->defrag_max.objectid = 0;
+		/* from above we know this is not a leaf */
+		btrfs_node_key_to_cpu(root_node, &root->defrag_max,
+				      nritems - 1);
+		btrfs_tree_unlock(root_node);
+		free_extent_buffer(root_node);
+		memset(&key, 0, sizeof(key));
+	} else {
+		memcpy(&key, &root->defrag_progress, sizeof(key));
+	}
+
+	path->keep_locks = 1;
+	if (cache_only)
+		min_trans = root->defrag_trans_start;
+
+	ret = btrfs_search_forward(root, &key, NULL, path,
+				   cache_only, min_trans);
+	if (ret < 0)
+		goto out;
+	if (ret > 0) {
+		ret = 0;
+		goto out;
+	}
+	btrfs_release_path(root, path);
+	wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+
+	if (wret < 0) {
+		ret = wret;
+		goto out;
+	}
+	if (!path->nodes[1]) {
+		ret = 0;
+		goto out;
+	}
+	path->slots[1] = btrfs_header_nritems(path->nodes[1]);
+	next_key_ret = btrfs_find_next_key(root, path, &key, 1, cache_only,
+					   min_trans);
+	ret = btrfs_realloc_node(trans, root,
+				 path->nodes[1], 0,
+				 cache_only, &last_ret,
+				 &root->defrag_progress);
+	WARN_ON(ret && ret != -EAGAIN);
+	if (next_key_ret == 0) {
+		memcpy(&root->defrag_progress, &key, sizeof(key));
+		ret = -EAGAIN;
+	}
+
+	btrfs_release_path(root, path);
+	if (is_extent)
+		btrfs_extent_post_op(trans, root);
+out:
+	if (path)
+		btrfs_free_path(path);
+	if (ret == -EAGAIN) {
+		if (root->defrag_max.objectid > root->defrag_progress.objectid)
+			goto done;
+		if (root->defrag_max.type > root->defrag_progress.type)
+			goto done;
+		if (root->defrag_max.offset > root->defrag_progress.offset)
+			goto done;
+		ret = 0;
+	}
+done:
+	if (ret != -EAGAIN) {
+		memset(&root->defrag_progress, 0,
+		       sizeof(root->defrag_progress));
+		root->defrag_trans_start = trans->transid;
+	}
+	return ret;
+}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
new file mode 100644
index 0000000..d81cda2
--- /dev/null
+++ b/fs/btrfs/tree-log.c
@@ -0,0 +1,2898 @@
+/*
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include "ctree.h"
+#include "transaction.h"
+#include "disk-io.h"
+#include "locking.h"
+#include "print-tree.h"
+#include "compat.h"
+#include "tree-log.h"
+
+/* magic values for the inode_only field in btrfs_log_inode:
+ *
+ * LOG_INODE_ALL means to log everything
+ * LOG_INODE_EXISTS means to log just enough to recreate the inode
+ * during log replay
+ */
+#define LOG_INODE_ALL 0
+#define LOG_INODE_EXISTS 1
+
+/*
+ * stages for the tree walking.  The first
+ * stage (0) is to only pin down the blocks we find
+ * the second stage (1) is to make sure that all the inodes
+ * we find in the log are created in the subvolume.
+ *
+ * The last stage is to deal with directories and links and extents
+ * and all the other fun semantics
+ */
+#define LOG_WALK_PIN_ONLY 0
+#define LOG_WALK_REPLAY_INODES 1
+#define LOG_WALK_REPLAY_ALL 2
+
+static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root, struct inode *inode,
+			     int inode_only);
+static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root,
+			     struct btrfs_path *path, u64 objectid);
+
+/*
+ * tree logging is a special write ahead log used to make sure that
+ * fsyncs and O_SYNCs can happen without doing full tree commits.
+ *
+ * Full tree commits are expensive because they require commonly
+ * modified blocks to be recowed, creating many dirty pages in the
+ * extent tree an 4x-6x higher write load than ext3.
+ *
+ * Instead of doing a tree commit on every fsync, we use the
+ * key ranges and transaction ids to find items for a given file or directory
+ * that have changed in this transaction.  Those items are copied into
+ * a special tree (one per subvolume root), that tree is written to disk
+ * and then the fsync is considered complete.
+ *
+ * After a crash, items are copied out of the log-tree back into the
+ * subvolume tree.  Any file data extents found are recorded in the extent
+ * allocation tree, and the log-tree freed.
+ *
+ * The log tree is read three times, once to pin down all the extents it is
+ * using in ram and once, once to create all the inodes logged in the tree
+ * and once to do all the other items.
+ */
+
+/*
+ * btrfs_add_log_tree adds a new per-subvolume log tree into the
+ * tree of log tree roots.  This must be called with a tree log transaction
+ * running (see start_log_trans).
+ */
+static int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
+		      struct btrfs_root *root)
+{
+	struct btrfs_key key;
+	struct btrfs_root_item root_item;
+	struct btrfs_inode_item *inode_item;
+	struct extent_buffer *leaf;
+	struct btrfs_root *new_root = root;
+	int ret;
+	u64 objectid = root->root_key.objectid;
+
+	leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
+				      BTRFS_TREE_LOG_OBJECTID,
+				      trans->transid, 0, 0, 0);
+	if (IS_ERR(leaf)) {
+		ret = PTR_ERR(leaf);
+		return ret;
+	}
+
+	btrfs_set_header_nritems(leaf, 0);
+	btrfs_set_header_level(leaf, 0);
+	btrfs_set_header_bytenr(leaf, leaf->start);
+	btrfs_set_header_generation(leaf, trans->transid);
+	btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
+
+	write_extent_buffer(leaf, root->fs_info->fsid,
+			    (unsigned long)btrfs_header_fsid(leaf),
+			    BTRFS_FSID_SIZE);
+	btrfs_mark_buffer_dirty(leaf);
+
+	inode_item = &root_item.inode;
+	memset(inode_item, 0, sizeof(*inode_item));
+	inode_item->generation = cpu_to_le64(1);
+	inode_item->size = cpu_to_le64(3);
+	inode_item->nlink = cpu_to_le32(1);
+	inode_item->nbytes = cpu_to_le64(root->leafsize);
+	inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
+
+	btrfs_set_root_bytenr(&root_item, leaf->start);
+	btrfs_set_root_generation(&root_item, trans->transid);
+	btrfs_set_root_level(&root_item, 0);
+	btrfs_set_root_refs(&root_item, 0);
+	btrfs_set_root_used(&root_item, 0);
+
+	memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
+	root_item.drop_level = 0;
+
+	btrfs_tree_unlock(leaf);
+	free_extent_buffer(leaf);
+	leaf = NULL;
+
+	btrfs_set_root_dirid(&root_item, 0);
+
+	key.objectid = BTRFS_TREE_LOG_OBJECTID;
+	key.offset = objectid;
+	btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+	ret = btrfs_insert_root(trans, root->fs_info->log_root_tree, &key,
+				&root_item);
+	if (ret)
+		goto fail;
+
+	new_root = btrfs_read_fs_root_no_radix(root->fs_info->log_root_tree,
+					       &key);
+	BUG_ON(!new_root);
+
+	WARN_ON(root->log_root);
+	root->log_root = new_root;
+
+	/*
+	 * log trees do not get reference counted because they go away
+	 * before a real commit is actually done.  They do store pointers
+	 * to file data extents, and those reference counts still get
+	 * updated (along with back refs to the log tree).
+	 */
+	new_root->ref_cows = 0;
+	new_root->last_trans = trans->transid;
+
+	/*
+	 * we need to make sure the root block for this new tree
+	 * is marked as dirty in the dirty_log_pages tree.  This
+	 * is how it gets flushed down to disk at tree log commit time.
+	 *
+	 * the tree logging mutex keeps others from coming in and changing
+	 * the new_root->node, so we can safely access it here
+	 */
+	set_extent_dirty(&new_root->dirty_log_pages, new_root->node->start,
+			 new_root->node->start + new_root->node->len - 1,
+			 GFP_NOFS);
+
+fail:
+	return ret;
+}
+
+/*
+ * start a sub transaction and setup the log tree
+ * this increments the log tree writer count to make the people
+ * syncing the tree wait for us to finish
+ */
+static int start_log_trans(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root)
+{
+	int ret;
+	mutex_lock(&root->fs_info->tree_log_mutex);
+	if (!root->fs_info->log_root_tree) {
+		ret = btrfs_init_log_root_tree(trans, root->fs_info);
+		BUG_ON(ret);
+	}
+	if (!root->log_root) {
+		ret = btrfs_add_log_tree(trans, root);
+		BUG_ON(ret);
+	}
+	atomic_inc(&root->fs_info->tree_log_writers);
+	root->fs_info->tree_log_batch++;
+	mutex_unlock(&root->fs_info->tree_log_mutex);
+	return 0;
+}
+
+/*
+ * returns 0 if there was a log transaction running and we were able
+ * to join, or returns -ENOENT if there were not transactions
+ * in progress
+ */
+static int join_running_log_trans(struct btrfs_root *root)
+{
+	int ret = -ENOENT;
+
+	smp_mb();
+	if (!root->log_root)
+		return -ENOENT;
+
+	mutex_lock(&root->fs_info->tree_log_mutex);
+	if (root->log_root) {
+		ret = 0;
+		atomic_inc(&root->fs_info->tree_log_writers);
+		root->fs_info->tree_log_batch++;
+	}
+	mutex_unlock(&root->fs_info->tree_log_mutex);
+	return ret;
+}
+
+/*
+ * indicate we're done making changes to the log tree
+ * and wake up anyone waiting to do a sync
+ */
+static int end_log_trans(struct btrfs_root *root)
+{
+	atomic_dec(&root->fs_info->tree_log_writers);
+	smp_mb();
+	if (waitqueue_active(&root->fs_info->tree_log_wait))
+		wake_up(&root->fs_info->tree_log_wait);
+	return 0;
+}
+
+
+/*
+ * the walk control struct is used to pass state down the chain when
+ * processing the log tree.  The stage field tells us which part
+ * of the log tree processing we are currently doing.  The others
+ * are state fields used for that specific part
+ */
+struct walk_control {
+	/* should we free the extent on disk when done?  This is used
+	 * at transaction commit time while freeing a log tree
+	 */
+	int free;
+
+	/* should we write out the extent buffer?  This is used
+	 * while flushing the log tree to disk during a sync
+	 */
+	int write;
+
+	/* should we wait for the extent buffer io to finish?  Also used
+	 * while flushing the log tree to disk for a sync
+	 */
+	int wait;
+
+	/* pin only walk, we record which extents on disk belong to the
+	 * log trees
+	 */
+	int pin;
+
+	/* what stage of the replay code we're currently in */
+	int stage;
+
+	/* the root we are currently replaying */
+	struct btrfs_root *replay_dest;
+
+	/* the trans handle for the current replay */
+	struct btrfs_trans_handle *trans;
+
+	/* the function that gets used to process blocks we find in the
+	 * tree.  Note the extent_buffer might not be up to date when it is
+	 * passed in, and it must be checked or read if you need the data
+	 * inside it
+	 */
+	int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
+			    struct walk_control *wc, u64 gen);
+};
+
+/*
+ * process_func used to pin down extents, write them or wait on them
+ */
+static int process_one_buffer(struct btrfs_root *log,
+			      struct extent_buffer *eb,
+			      struct walk_control *wc, u64 gen)
+{
+	if (wc->pin) {
+		mutex_lock(&log->fs_info->pinned_mutex);
+		btrfs_update_pinned_extents(log->fs_info->extent_root,
+					    eb->start, eb->len, 1);
+		mutex_unlock(&log->fs_info->pinned_mutex);
+	}
+
+	if (btrfs_buffer_uptodate(eb, gen)) {
+		if (wc->write)
+			btrfs_write_tree_block(eb);
+		if (wc->wait)
+			btrfs_wait_tree_block_writeback(eb);
+	}
+	return 0;
+}
+
+/*
+ * Item overwrite used by replay and tree logging.  eb, slot and key all refer
+ * to the src data we are copying out.
+ *
+ * root is the tree we are copying into, and path is a scratch
+ * path for use in this function (it should be released on entry and
+ * will be released on exit).
+ *
+ * If the key is already in the destination tree the existing item is
+ * overwritten.  If the existing item isn't big enough, it is extended.
+ * If it is too large, it is truncated.
+ *
+ * If the key isn't in the destination yet, a new item is inserted.
+ */
+static noinline int overwrite_item(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root,
+				   struct btrfs_path *path,
+				   struct extent_buffer *eb, int slot,
+				   struct btrfs_key *key)
+{
+	int ret;
+	u32 item_size;
+	u64 saved_i_size = 0;
+	int save_old_i_size = 0;
+	unsigned long src_ptr;
+	unsigned long dst_ptr;
+	int overwrite_root = 0;
+
+	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
+		overwrite_root = 1;
+
+	item_size = btrfs_item_size_nr(eb, slot);
+	src_ptr = btrfs_item_ptr_offset(eb, slot);
+
+	/* look for the key in the destination tree */
+	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+	if (ret == 0) {
+		char *src_copy;
+		char *dst_copy;
+		u32 dst_size = btrfs_item_size_nr(path->nodes[0],
+						  path->slots[0]);
+		if (dst_size != item_size)
+			goto insert;
+
+		if (item_size == 0) {
+			btrfs_release_path(root, path);
+			return 0;
+		}
+		dst_copy = kmalloc(item_size, GFP_NOFS);
+		src_copy = kmalloc(item_size, GFP_NOFS);
+
+		read_extent_buffer(eb, src_copy, src_ptr, item_size);
+
+		dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
+		read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
+				   item_size);
+		ret = memcmp(dst_copy, src_copy, item_size);
+
+		kfree(dst_copy);
+		kfree(src_copy);
+		/*
+		 * they have the same contents, just return, this saves
+		 * us from cowing blocks in the destination tree and doing
+		 * extra writes that may not have been done by a previous
+		 * sync
+		 */
+		if (ret == 0) {
+			btrfs_release_path(root, path);
+			return 0;
+		}
+
+	}
+insert:
+	btrfs_release_path(root, path);
+	/* try to insert the key into the destination tree */
+	ret = btrfs_insert_empty_item(trans, root, path,
+				      key, item_size);
+
+	/* make sure any existing item is the correct size */
+	if (ret == -EEXIST) {
+		u32 found_size;
+		found_size = btrfs_item_size_nr(path->nodes[0],
+						path->slots[0]);
+		if (found_size > item_size) {
+			btrfs_truncate_item(trans, root, path, item_size, 1);
+		} else if (found_size < item_size) {
+			ret = btrfs_extend_item(trans, root, path,
+						item_size - found_size);
+			BUG_ON(ret);
+		}
+	} else if (ret) {
+		BUG();
+	}
+	dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
+					path->slots[0]);
+
+	/* don't overwrite an existing inode if the generation number
+	 * was logged as zero.  This is done when the tree logging code
+	 * is just logging an inode to make sure it exists after recovery.
+	 *
+	 * Also, don't overwrite i_size on directories during replay.
+	 * log replay inserts and removes directory items based on the
+	 * state of the tree found in the subvolume, and i_size is modified
+	 * as it goes
+	 */
+	if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
+		struct btrfs_inode_item *src_item;
+		struct btrfs_inode_item *dst_item;
+
+		src_item = (struct btrfs_inode_item *)src_ptr;
+		dst_item = (struct btrfs_inode_item *)dst_ptr;
+
+		if (btrfs_inode_generation(eb, src_item) == 0)
+			goto no_copy;
+
+		if (overwrite_root &&
+		    S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
+		    S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
+			save_old_i_size = 1;
+			saved_i_size = btrfs_inode_size(path->nodes[0],
+							dst_item);
+		}
+	}
+
+	copy_extent_buffer(path->nodes[0], eb, dst_ptr,
+			   src_ptr, item_size);
+
+	if (save_old_i_size) {
+		struct btrfs_inode_item *dst_item;
+		dst_item = (struct btrfs_inode_item *)dst_ptr;
+		btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
+	}
+
+	/* make sure the generation is filled in */
+	if (key->type == BTRFS_INODE_ITEM_KEY) {
+		struct btrfs_inode_item *dst_item;
+		dst_item = (struct btrfs_inode_item *)dst_ptr;
+		if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
+			btrfs_set_inode_generation(path->nodes[0], dst_item,
+						   trans->transid);
+		}
+	}
+no_copy:
+	btrfs_mark_buffer_dirty(path->nodes[0]);
+	btrfs_release_path(root, path);
+	return 0;
+}
+
+/*
+ * simple helper to read an inode off the disk from a given root
+ * This can only be called for subvolume roots and not for the log
+ */
+static noinline struct inode *read_one_inode(struct btrfs_root *root,
+					     u64 objectid)
+{
+	struct inode *inode;
+	inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
+	if (inode->i_state & I_NEW) {
+		BTRFS_I(inode)->root = root;
+		BTRFS_I(inode)->location.objectid = objectid;
+		BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
+		BTRFS_I(inode)->location.offset = 0;
+		btrfs_read_locked_inode(inode);
+		unlock_new_inode(inode);
+
+	}
+	if (is_bad_inode(inode)) {
+		iput(inode);
+		inode = NULL;
+	}
+	return inode;
+}
+
+/* replays a single extent in 'eb' at 'slot' with 'key' into the
+ * subvolume 'root'.  path is released on entry and should be released
+ * on exit.
+ *
+ * extents in the log tree have not been allocated out of the extent
+ * tree yet.  So, this completes the allocation, taking a reference
+ * as required if the extent already exists or creating a new extent
+ * if it isn't in the extent allocation tree yet.
+ *
+ * The extent is inserted into the file, dropping any existing extents
+ * from the file that overlap the new one.
+ */
+static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+				      struct btrfs_root *root,
+				      struct btrfs_path *path,
+				      struct extent_buffer *eb, int slot,
+				      struct btrfs_key *key)
+{
+	int found_type;
+	u64 mask = root->sectorsize - 1;
+	u64 extent_end;
+	u64 alloc_hint;
+	u64 start = key->offset;
+	u64 saved_nbytes;
+	struct btrfs_file_extent_item *item;
+	struct inode *inode = NULL;
+	unsigned long size;
+	int ret = 0;
+
+	item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+	found_type = btrfs_file_extent_type(eb, item);
+
+	if (found_type == BTRFS_FILE_EXTENT_REG ||
+	    found_type == BTRFS_FILE_EXTENT_PREALLOC)
+		extent_end = start + btrfs_file_extent_num_bytes(eb, item);
+	else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+		size = btrfs_file_extent_inline_len(eb, item);
+		extent_end = (start + size + mask) & ~mask;
+	} else {
+		ret = 0;
+		goto out;
+	}
+
+	inode = read_one_inode(root, key->objectid);
+	if (!inode) {
+		ret = -EIO;
+		goto out;
+	}
+
+	/*
+	 * first check to see if we already have this extent in the
+	 * file.  This must be done before the btrfs_drop_extents run
+	 * so we don't try to drop this extent.
+	 */
+	ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+				       start, 0);
+
+	if (ret == 0 &&
+	    (found_type == BTRFS_FILE_EXTENT_REG ||
+	     found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
+		struct btrfs_file_extent_item cmp1;
+		struct btrfs_file_extent_item cmp2;
+		struct btrfs_file_extent_item *existing;
+		struct extent_buffer *leaf;
+
+		leaf = path->nodes[0];
+		existing = btrfs_item_ptr(leaf, path->slots[0],
+					  struct btrfs_file_extent_item);
+
+		read_extent_buffer(eb, &cmp1, (unsigned long)item,
+				   sizeof(cmp1));
+		read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
+				   sizeof(cmp2));
+
+		/*
+		 * we already have a pointer to this exact extent,
+		 * we don't have to do anything
+		 */
+		if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
+			btrfs_release_path(root, path);
+			goto out;
+		}
+	}
+	btrfs_release_path(root, path);
+
+	saved_nbytes = inode_get_bytes(inode);
+	/* drop any overlapping extents */
+	ret = btrfs_drop_extents(trans, root, inode,
+			 start, extent_end, start, &alloc_hint);
+	BUG_ON(ret);
+
+	if (found_type == BTRFS_FILE_EXTENT_REG ||
+	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+		unsigned long dest_offset;
+		struct btrfs_key ins;
+
+		ret = btrfs_insert_empty_item(trans, root, path, key,
+					      sizeof(*item));
+		BUG_ON(ret);
+		dest_offset = btrfs_item_ptr_offset(path->nodes[0],
+						    path->slots[0]);
+		copy_extent_buffer(path->nodes[0], eb, dest_offset,
+				(unsigned long)item,  sizeof(*item));
+
+		ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
+		ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
+		ins.type = BTRFS_EXTENT_ITEM_KEY;
+
+		if (ins.objectid > 0) {
+			u64 csum_start;
+			u64 csum_end;
+			LIST_HEAD(ordered_sums);
+			/*
+			 * is this extent already allocated in the extent
+			 * allocation tree?  If so, just add a reference
+			 */
+			ret = btrfs_lookup_extent(root, ins.objectid,
+						ins.offset);
+			if (ret == 0) {
+				ret = btrfs_inc_extent_ref(trans, root,
+						ins.objectid, ins.offset,
+						path->nodes[0]->start,
+						root->root_key.objectid,
+						trans->transid, key->objectid);
+			} else {
+				/*
+				 * insert the extent pointer in the extent
+				 * allocation tree
+				 */
+				ret = btrfs_alloc_logged_extent(trans, root,
+						path->nodes[0]->start,
+						root->root_key.objectid,
+						trans->transid, key->objectid,
+						&ins);
+				BUG_ON(ret);
+			}
+			btrfs_release_path(root, path);
+
+			if (btrfs_file_extent_compression(eb, item)) {
+				csum_start = ins.objectid;
+				csum_end = csum_start + ins.offset;
+			} else {
+				csum_start = ins.objectid +
+					btrfs_file_extent_offset(eb, item);
+				csum_end = csum_start +
+					btrfs_file_extent_num_bytes(eb, item);
+			}
+
+			ret = btrfs_lookup_csums_range(root->log_root,
+						csum_start, csum_end - 1,
+						&ordered_sums);
+			BUG_ON(ret);
+			while (!list_empty(&ordered_sums)) {
+				struct btrfs_ordered_sum *sums;
+				sums = list_entry(ordered_sums.next,
+						struct btrfs_ordered_sum,
+						list);
+				ret = btrfs_csum_file_blocks(trans,
+						root->fs_info->csum_root,
+						sums);
+				BUG_ON(ret);
+				list_del(&sums->list);
+				kfree(sums);
+			}
+		} else {
+			btrfs_release_path(root, path);
+		}
+	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+		/* inline extents are easy, we just overwrite them */
+		ret = overwrite_item(trans, root, path, eb, slot, key);
+		BUG_ON(ret);
+	}
+
+	inode_set_bytes(inode, saved_nbytes);
+	btrfs_update_inode(trans, root, inode);
+out:
+	if (inode)
+		iput(inode);
+	return ret;
+}
+
+/*
+ * when cleaning up conflicts between the directory names in the
+ * subvolume, directory names in the log and directory names in the
+ * inode back references, we may have to unlink inodes from directories.
+ *
+ * This is a helper function to do the unlink of a specific directory
+ * item
+ */
+static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
+				      struct btrfs_root *root,
+				      struct btrfs_path *path,
+				      struct inode *dir,
+				      struct btrfs_dir_item *di)
+{
+	struct inode *inode;
+	char *name;
+	int name_len;
+	struct extent_buffer *leaf;
+	struct btrfs_key location;
+	int ret;
+
+	leaf = path->nodes[0];
+
+	btrfs_dir_item_key_to_cpu(leaf, di, &location);
+	name_len = btrfs_dir_name_len(leaf, di);
+	name = kmalloc(name_len, GFP_NOFS);
+	read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
+	btrfs_release_path(root, path);
+
+	inode = read_one_inode(root, location.objectid);
+	BUG_ON(!inode);
+
+	ret = link_to_fixup_dir(trans, root, path, location.objectid);
+	BUG_ON(ret);
+	ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
+	BUG_ON(ret);
+	kfree(name);
+
+	iput(inode);
+	return ret;
+}
+
+/*
+ * helper function to see if a given name and sequence number found
+ * in an inode back reference are already in a directory and correctly
+ * point to this inode
+ */
+static noinline int inode_in_dir(struct btrfs_root *root,
+				 struct btrfs_path *path,
+				 u64 dirid, u64 objectid, u64 index,
+				 const char *name, int name_len)
+{
+	struct btrfs_dir_item *di;
+	struct btrfs_key location;
+	int match = 0;
+
+	di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
+					 index, name, name_len, 0);
+	if (di && !IS_ERR(di)) {
+		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
+		if (location.objectid != objectid)
+			goto out;
+	} else
+		goto out;
+	btrfs_release_path(root, path);
+
+	di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
+	if (di && !IS_ERR(di)) {
+		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
+		if (location.objectid != objectid)
+			goto out;
+	} else
+		goto out;
+	match = 1;
+out:
+	btrfs_release_path(root, path);
+	return match;
+}
+
+/*
+ * helper function to check a log tree for a named back reference in
+ * an inode.  This is used to decide if a back reference that is
+ * found in the subvolume conflicts with what we find in the log.
+ *
+ * inode backreferences may have multiple refs in a single item,
+ * during replay we process one reference at a time, and we don't
+ * want to delete valid links to a file from the subvolume if that
+ * link is also in the log.
+ */
+static noinline int backref_in_log(struct btrfs_root *log,
+				   struct btrfs_key *key,
+				   char *name, int namelen)
+{
+	struct btrfs_path *path;
+	struct btrfs_inode_ref *ref;
+	unsigned long ptr;
+	unsigned long ptr_end;
+	unsigned long name_ptr;
+	int found_name_len;
+	int item_size;
+	int ret;
+	int match = 0;
+
+	path = btrfs_alloc_path();
+	ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
+	if (ret != 0)
+		goto out;
+
+	item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
+	ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
+	ptr_end = ptr + item_size;
+	while (ptr < ptr_end) {
+		ref = (struct btrfs_inode_ref *)ptr;
+		found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
+		if (found_name_len == namelen) {
+			name_ptr = (unsigned long)(ref + 1);
+			ret = memcmp_extent_buffer(path->nodes[0], name,
+						   name_ptr, namelen);
+			if (ret == 0) {
+				match = 1;
+				goto out;
+			}
+		}
+		ptr = (unsigned long)(ref + 1) + found_name_len;
+	}
+out:
+	btrfs_free_path(path);
+	return match;
+}
+
+
+/*
+ * replay one inode back reference item found in the log tree.
+ * eb, slot and key refer to the buffer and key found in the log tree.
+ * root is the destination we are replaying into, and path is for temp
+ * use by this function.  (it should be released on return).
+ */
+static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+				  struct btrfs_root *root,
+				  struct btrfs_root *log,
+				  struct btrfs_path *path,
+				  struct extent_buffer *eb, int slot,
+				  struct btrfs_key *key)
+{
+	struct inode *dir;
+	int ret;
+	struct btrfs_key location;
+	struct btrfs_inode_ref *ref;
+	struct btrfs_dir_item *di;
+	struct inode *inode;
+	char *name;
+	int namelen;
+	unsigned long ref_ptr;
+	unsigned long ref_end;
+
+	location.objectid = key->objectid;
+	location.type = BTRFS_INODE_ITEM_KEY;
+	location.offset = 0;
+
+	/*
+	 * it is possible that we didn't log all the parent directories
+	 * for a given inode.  If we don't find the dir, just don't
+	 * copy the back ref in.  The link count fixup code will take
+	 * care of the rest
+	 */
+	dir = read_one_inode(root, key->offset);
+	if (!dir)
+		return -ENOENT;
+
+	inode = read_one_inode(root, key->objectid);
+	BUG_ON(!dir);
+
+	ref_ptr = btrfs_item_ptr_offset(eb, slot);
+	ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
+
+again:
+	ref = (struct btrfs_inode_ref *)ref_ptr;
+
+	namelen = btrfs_inode_ref_name_len(eb, ref);
+	name = kmalloc(namelen, GFP_NOFS);
+	BUG_ON(!name);
+
+	read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
+
+	/* if we already have a perfect match, we're done */
+	if (inode_in_dir(root, path, dir->i_ino, inode->i_ino,
+			 btrfs_inode_ref_index(eb, ref),
+			 name, namelen)) {
+		goto out;
+	}
+
+	/*
+	 * look for a conflicting back reference in the metadata.
+	 * if we find one we have to unlink that name of the file
+	 * before we add our new link.  Later on, we overwrite any
+	 * existing back reference, and we don't want to create
+	 * dangling pointers in the directory.
+	 */
+conflict_again:
+	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+	if (ret == 0) {
+		char *victim_name;
+		int victim_name_len;
+		struct btrfs_inode_ref *victim_ref;
+		unsigned long ptr;
+		unsigned long ptr_end;
+		struct extent_buffer *leaf = path->nodes[0];
+
+		/* are we trying to overwrite a back ref for the root directory
+		 * if so, just jump out, we're done
+		 */
+		if (key->objectid == key->offset)
+			goto out_nowrite;
+
+		/* check all the names in this back reference to see
+		 * if they are in the log.  if so, we allow them to stay
+		 * otherwise they must be unlinked as a conflict
+		 */
+		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+		ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
+		while (ptr < ptr_end) {
+			victim_ref = (struct btrfs_inode_ref *)ptr;
+			victim_name_len = btrfs_inode_ref_name_len(leaf,
+								   victim_ref);
+			victim_name = kmalloc(victim_name_len, GFP_NOFS);
+			BUG_ON(!victim_name);
+
+			read_extent_buffer(leaf, victim_name,
+					   (unsigned long)(victim_ref + 1),
+					   victim_name_len);
+
+			if (!backref_in_log(log, key, victim_name,
+					    victim_name_len)) {
+				btrfs_inc_nlink(inode);
+				btrfs_release_path(root, path);
+				ret = btrfs_unlink_inode(trans, root, dir,
+							 inode, victim_name,
+							 victim_name_len);
+				kfree(victim_name);
+				btrfs_release_path(root, path);
+				goto conflict_again;
+			}
+			kfree(victim_name);
+			ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
+		}
+		BUG_ON(ret);
+	}
+	btrfs_release_path(root, path);
+
+	/* look for a conflicting sequence number */
+	di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
+					 btrfs_inode_ref_index(eb, ref),
+					 name, namelen, 0);
+	if (di && !IS_ERR(di)) {
+		ret = drop_one_dir_item(trans, root, path, dir, di);
+		BUG_ON(ret);
+	}
+	btrfs_release_path(root, path);
+
+
+	/* look for a conflicting name */
+	di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+				   name, namelen, 0);
+	if (di && !IS_ERR(di)) {
+		ret = drop_one_dir_item(trans, root, path, dir, di);
+		BUG_ON(ret);
+	}
+	btrfs_release_path(root, path);
+
+	/* insert our name */
+	ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
+			     btrfs_inode_ref_index(eb, ref));
+	BUG_ON(ret);
+
+	btrfs_update_inode(trans, root, inode);
+
+out:
+	ref_ptr = (unsigned long)(ref + 1) + namelen;
+	kfree(name);
+	if (ref_ptr < ref_end)
+		goto again;
+
+	/* finally write the back reference in the inode */
+	ret = overwrite_item(trans, root, path, eb, slot, key);
+	BUG_ON(ret);
+
+out_nowrite:
+	btrfs_release_path(root, path);
+	iput(dir);
+	iput(inode);
+	return 0;
+}
+
+/*
+ * There are a few corners where the link count of the file can't
+ * be properly maintained during replay.  So, instead of adding
+ * lots of complexity to the log code, we just scan the backrefs
+ * for any file that has been through replay.
+ *
+ * The scan will update the link count on the inode to reflect the
+ * number of back refs found.  If it goes down to zero, the iput
+ * will free the inode.
+ */
+static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
+					   struct btrfs_root *root,
+					   struct inode *inode)
+{
+	struct btrfs_path *path;
+	int ret;
+	struct btrfs_key key;
+	u64 nlink = 0;
+	unsigned long ptr;
+	unsigned long ptr_end;
+	int name_len;
+
+	key.objectid = inode->i_ino;
+	key.type = BTRFS_INODE_REF_KEY;
+	key.offset = (u64)-1;
+
+	path = btrfs_alloc_path();
+
+	while (1) {
+		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+		if (ret < 0)
+			break;
+		if (ret > 0) {
+			if (path->slots[0] == 0)
+				break;
+			path->slots[0]--;
+		}
+		btrfs_item_key_to_cpu(path->nodes[0], &key,
+				      path->slots[0]);
+		if (key.objectid != inode->i_ino ||
+		    key.type != BTRFS_INODE_REF_KEY)
+			break;
+		ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
+		ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
+						   path->slots[0]);
+		while (ptr < ptr_end) {
+			struct btrfs_inode_ref *ref;
+
+			ref = (struct btrfs_inode_ref *)ptr;
+			name_len = btrfs_inode_ref_name_len(path->nodes[0],
+							    ref);
+			ptr = (unsigned long)(ref + 1) + name_len;
+			nlink++;
+		}
+
+		if (key.offset == 0)
+			break;
+		key.offset--;
+		btrfs_release_path(root, path);
+	}
+	btrfs_free_path(path);
+	if (nlink != inode->i_nlink) {
+		inode->i_nlink = nlink;
+		btrfs_update_inode(trans, root, inode);
+	}
+	BTRFS_I(inode)->index_cnt = (u64)-1;
+
+	return 0;
+}
+
+static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
+					    struct btrfs_root *root,
+					    struct btrfs_path *path)
+{
+	int ret;
+	struct btrfs_key key;
+	struct inode *inode;
+
+	key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
+	key.type = BTRFS_ORPHAN_ITEM_KEY;
+	key.offset = (u64)-1;
+	while (1) {
+		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+		if (ret < 0)
+			break;
+
+		if (ret == 1) {
+			if (path->slots[0] == 0)
+				break;
+			path->slots[0]--;
+		}
+
+		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+		if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
+		    key.type != BTRFS_ORPHAN_ITEM_KEY)
+			break;
+
+		ret = btrfs_del_item(trans, root, path);
+		BUG_ON(ret);
+
+		btrfs_release_path(root, path);
+		inode = read_one_inode(root, key.offset);
+		BUG_ON(!inode);
+
+		ret = fixup_inode_link_count(trans, root, inode);
+		BUG_ON(ret);
+
+		iput(inode);
+
+		if (key.offset == 0)
+			break;
+		key.offset--;
+	}
+	btrfs_release_path(root, path);
+	return 0;
+}
+
+
+/*
+ * record a given inode in the fixup dir so we can check its link
+ * count when replay is done.  The link count is incremented here
+ * so the inode won't go away until we check it
+ */
+static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
+				      struct btrfs_root *root,
+				      struct btrfs_path *path,
+				      u64 objectid)
+{
+	struct btrfs_key key;
+	int ret = 0;
+	struct inode *inode;
+
+	inode = read_one_inode(root, objectid);
+	BUG_ON(!inode);
+
+	key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
+	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
+	key.offset = objectid;
+
+	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
+
+	btrfs_release_path(root, path);
+	if (ret == 0) {
+		btrfs_inc_nlink(inode);
+		btrfs_update_inode(trans, root, inode);
+	} else if (ret == -EEXIST) {
+		ret = 0;
+	} else {
+		BUG();
+	}
+	iput(inode);
+
+	return ret;
+}
+
+/*
+ * when replaying the log for a directory, we only insert names
+ * for inodes that actually exist.  This means an fsync on a directory
+ * does not implicitly fsync all the new files in it
+ */
+static noinline int insert_one_name(struct btrfs_trans_handle *trans,
+				    struct btrfs_root *root,
+				    struct btrfs_path *path,
+				    u64 dirid, u64 index,
+				    char *name, int name_len, u8 type,
+				    struct btrfs_key *location)
+{
+	struct inode *inode;
+	struct inode *dir;
+	int ret;
+
+	inode = read_one_inode(root, location->objectid);
+	if (!inode)
+		return -ENOENT;
+
+	dir = read_one_inode(root, dirid);
+	if (!dir) {
+		iput(inode);
+		return -EIO;
+	}
+	ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
+
+	/* FIXME, put inode into FIXUP list */
+
+	iput(inode);
+	iput(dir);
+	return ret;
+}
+
+/*
+ * take a single entry in a log directory item and replay it into
+ * the subvolume.
+ *
+ * if a conflicting item exists in the subdirectory already,
+ * the inode it points to is unlinked and put into the link count
+ * fix up tree.
+ *
+ * If a name from the log points to a file or directory that does
+ * not exist in the FS, it is skipped.  fsyncs on directories
+ * do not force down inodes inside that directory, just changes to the
+ * names or unlinks in a directory.
+ */
+static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+				    struct btrfs_root *root,
+				    struct btrfs_path *path,
+				    struct extent_buffer *eb,
+				    struct btrfs_dir_item *di,
+				    struct btrfs_key *key)
+{
+	char *name;
+	int name_len;
+	struct btrfs_dir_item *dst_di;
+	struct btrfs_key found_key;
+	struct btrfs_key log_key;
+	struct inode *dir;
+	u8 log_type;
+	int exists;
+	int ret;
+
+	dir = read_one_inode(root, key->objectid);
+	BUG_ON(!dir);
+
+	name_len = btrfs_dir_name_len(eb, di);
+	name = kmalloc(name_len, GFP_NOFS);
+	log_type = btrfs_dir_type(eb, di);
+	read_extent_buffer(eb, name, (unsigned long)(di + 1),
+		   name_len);
+
+	btrfs_dir_item_key_to_cpu(eb, di, &log_key);
+	exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
+	if (exists == 0)
+		exists = 1;
+	else
+		exists = 0;
+	btrfs_release_path(root, path);
+
+	if (key->type == BTRFS_DIR_ITEM_KEY) {
+		dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
+				       name, name_len, 1);
+	} else if (key->type == BTRFS_DIR_INDEX_KEY) {
+		dst_di = btrfs_lookup_dir_index_item(trans, root, path,
+						     key->objectid,
+						     key->offset, name,
+						     name_len, 1);
+	} else {
+		BUG();
+	}
+	if (!dst_di || IS_ERR(dst_di)) {
+		/* we need a sequence number to insert, so we only
+		 * do inserts for the BTRFS_DIR_INDEX_KEY types
+		 */
+		if (key->type != BTRFS_DIR_INDEX_KEY)
+			goto out;
+		goto insert;
+	}
+
+	btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
+	/* the existing item matches the logged item */
+	if (found_key.objectid == log_key.objectid &&
+	    found_key.type == log_key.type &&
+	    found_key.offset == log_key.offset &&
+	    btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
+		goto out;
+	}
+
+	/*
+	 * don't drop the conflicting directory entry if the inode
+	 * for the new entry doesn't exist
+	 */
+	if (!exists)
+		goto out;
+
+	ret = drop_one_dir_item(trans, root, path, dir, dst_di);
+	BUG_ON(ret);
+
+	if (key->type == BTRFS_DIR_INDEX_KEY)
+		goto insert;
+out:
+	btrfs_release_path(root, path);
+	kfree(name);
+	iput(dir);
+	return 0;
+
+insert:
+	btrfs_release_path(root, path);
+	ret = insert_one_name(trans, root, path, key->objectid, key->offset,
+			      name, name_len, log_type, &log_key);
+
+	if (ret && ret != -ENOENT)
+		BUG();
+	goto out;
+}
+
+/*
+ * find all the names in a directory item and reconcile them into
+ * the subvolume.  Only BTRFS_DIR_ITEM_KEY types will have more than
+ * one name in a directory item, but the same code gets used for
+ * both directory index types
+ */
+static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
+					struct btrfs_root *root,
+					struct btrfs_path *path,
+					struct extent_buffer *eb, int slot,
+					struct btrfs_key *key)
+{
+	int ret;
+	u32 item_size = btrfs_item_size_nr(eb, slot);
+	struct btrfs_dir_item *di;
+	int name_len;
+	unsigned long ptr;
+	unsigned long ptr_end;
+
+	ptr = btrfs_item_ptr_offset(eb, slot);
+	ptr_end = ptr + item_size;
+	while (ptr < ptr_end) {
+		di = (struct btrfs_dir_item *)ptr;
+		name_len = btrfs_dir_name_len(eb, di);
+		ret = replay_one_name(trans, root, path, eb, di, key);
+		BUG_ON(ret);
+		ptr = (unsigned long)(di + 1);
+		ptr += name_len;
+	}
+	return 0;
+}
+
+/*
+ * directory replay has two parts.  There are the standard directory
+ * items in the log copied from the subvolume, and range items
+ * created in the log while the subvolume was logged.
+ *
+ * The range items tell us which parts of the key space the log
+ * is authoritative for.  During replay, if a key in the subvolume
+ * directory is in a logged range item, but not actually in the log
+ * that means it was deleted from the directory before the fsync
+ * and should be removed.
+ */
+static noinline int find_dir_range(struct btrfs_root *root,
+				   struct btrfs_path *path,
+				   u64 dirid, int key_type,
+				   u64 *start_ret, u64 *end_ret)
+{
+	struct btrfs_key key;
+	u64 found_end;
+	struct btrfs_dir_log_item *item;
+	int ret;
+	int nritems;
+
+	if (*start_ret == (u64)-1)
+		return 1;
+
+	key.objectid = dirid;
+	key.type = key_type;
+	key.offset = *start_ret;
+
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto out;
+	if (ret > 0) {
+		if (path->slots[0] == 0)
+			goto out;
+		path->slots[0]--;
+	}
+	if (ret != 0)
+		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+
+	if (key.type != key_type || key.objectid != dirid) {
+		ret = 1;
+		goto next;
+	}
+	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+			      struct btrfs_dir_log_item);
+	found_end = btrfs_dir_log_end(path->nodes[0], item);
+
+	if (*start_ret >= key.offset && *start_ret <= found_end) {
+		ret = 0;
+		*start_ret = key.offset;
+		*end_ret = found_end;
+		goto out;
+	}
+	ret = 1;
+next:
+	/* check the next slot in the tree to see if it is a valid item */
+	nritems = btrfs_header_nritems(path->nodes[0]);
+	if (path->slots[0] >= nritems) {
+		ret = btrfs_next_leaf(root, path);
+		if (ret)
+			goto out;
+	} else {
+		path->slots[0]++;
+	}
+
+	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+
+	if (key.type != key_type || key.objectid != dirid) {
+		ret = 1;
+		goto out;
+	}
+	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+			      struct btrfs_dir_log_item);
+	found_end = btrfs_dir_log_end(path->nodes[0], item);
+	*start_ret = key.offset;
+	*end_ret = found_end;
+	ret = 0;
+out:
+	btrfs_release_path(root, path);
+	return ret;
+}
+
+/*
+ * this looks for a given directory item in the log.  If the directory
+ * item is not in the log, the item is removed and the inode it points
+ * to is unlinked
+ */
+static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
+				      struct btrfs_root *root,
+				      struct btrfs_root *log,
+				      struct btrfs_path *path,
+				      struct btrfs_path *log_path,
+				      struct inode *dir,
+				      struct btrfs_key *dir_key)
+{
+	int ret;
+	struct extent_buffer *eb;
+	int slot;
+	u32 item_size;
+	struct btrfs_dir_item *di;
+	struct btrfs_dir_item *log_di;
+	int name_len;
+	unsigned long ptr;
+	unsigned long ptr_end;
+	char *name;
+	struct inode *inode;
+	struct btrfs_key location;
+
+again:
+	eb = path->nodes[0];
+	slot = path->slots[0];
+	item_size = btrfs_item_size_nr(eb, slot);
+	ptr = btrfs_item_ptr_offset(eb, slot);
+	ptr_end = ptr + item_size;
+	while (ptr < ptr_end) {
+		di = (struct btrfs_dir_item *)ptr;
+		name_len = btrfs_dir_name_len(eb, di);
+		name = kmalloc(name_len, GFP_NOFS);
+		if (!name) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		read_extent_buffer(eb, name, (unsigned long)(di + 1),
+				  name_len);
+		log_di = NULL;
+		if (dir_key->type == BTRFS_DIR_ITEM_KEY) {
+			log_di = btrfs_lookup_dir_item(trans, log, log_path,
+						       dir_key->objectid,
+						       name, name_len, 0);
+		} else if (dir_key->type == BTRFS_DIR_INDEX_KEY) {
+			log_di = btrfs_lookup_dir_index_item(trans, log,
+						     log_path,
+						     dir_key->objectid,
+						     dir_key->offset,
+						     name, name_len, 0);
+		}
+		if (!log_di || IS_ERR(log_di)) {
+			btrfs_dir_item_key_to_cpu(eb, di, &location);
+			btrfs_release_path(root, path);
+			btrfs_release_path(log, log_path);
+			inode = read_one_inode(root, location.objectid);
+			BUG_ON(!inode);
+
+			ret = link_to_fixup_dir(trans, root,
+						path, location.objectid);
+			BUG_ON(ret);
+			btrfs_inc_nlink(inode);
+			ret = btrfs_unlink_inode(trans, root, dir, inode,
+						 name, name_len);
+			BUG_ON(ret);
+			kfree(name);
+			iput(inode);
+
+			/* there might still be more names under this key
+			 * check and repeat if required
+			 */
+			ret = btrfs_search_slot(NULL, root, dir_key, path,
+						0, 0);
+			if (ret == 0)
+				goto again;
+			ret = 0;
+			goto out;
+		}
+		btrfs_release_path(log, log_path);
+		kfree(name);
+
+		ptr = (unsigned long)(di + 1);
+		ptr += name_len;
+	}
+	ret = 0;
+out:
+	btrfs_release_path(root, path);
+	btrfs_release_path(log, log_path);
+	return ret;
+}
+
+/*
+ * deletion replay happens before we copy any new directory items
+ * out of the log or out of backreferences from inodes.  It
+ * scans the log to find ranges of keys that log is authoritative for,
+ * and then scans the directory to find items in those ranges that are
+ * not present in the log.
+ *
+ * Anything we don't find in the log is unlinked and removed from the
+ * directory.
+ */
+static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
+				       struct btrfs_root *root,
+				       struct btrfs_root *log,
+				       struct btrfs_path *path,
+				       u64 dirid)
+{
+	u64 range_start;
+	u64 range_end;
+	int key_type = BTRFS_DIR_LOG_ITEM_KEY;
+	int ret = 0;
+	struct btrfs_key dir_key;
+	struct btrfs_key found_key;
+	struct btrfs_path *log_path;
+	struct inode *dir;
+
+	dir_key.objectid = dirid;
+	dir_key.type = BTRFS_DIR_ITEM_KEY;
+	log_path = btrfs_alloc_path();
+	if (!log_path)
+		return -ENOMEM;
+
+	dir = read_one_inode(root, dirid);
+	/* it isn't an error if the inode isn't there, that can happen
+	 * because we replay the deletes before we copy in the inode item
+	 * from the log
+	 */
+	if (!dir) {
+		btrfs_free_path(log_path);
+		return 0;
+	}
+again:
+	range_start = 0;
+	range_end = 0;
+	while (1) {
+		ret = find_dir_range(log, path, dirid, key_type,
+				     &range_start, &range_end);
+		if (ret != 0)
+			break;
+
+		dir_key.offset = range_start;
+		while (1) {
+			int nritems;
+			ret = btrfs_search_slot(NULL, root, &dir_key, path,
+						0, 0);
+			if (ret < 0)
+				goto out;
+
+			nritems = btrfs_header_nritems(path->nodes[0]);
+			if (path->slots[0] >= nritems) {
+				ret = btrfs_next_leaf(root, path);
+				if (ret)
+					break;
+			}
+			btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+					      path->slots[0]);
+			if (found_key.objectid != dirid ||
+			    found_key.type != dir_key.type)
+				goto next_type;
+
+			if (found_key.offset > range_end)
+				break;
+
+			ret = check_item_in_log(trans, root, log, path,
+						log_path, dir, &found_key);
+			BUG_ON(ret);
+			if (found_key.offset == (u64)-1)
+				break;
+			dir_key.offset = found_key.offset + 1;
+		}
+		btrfs_release_path(root, path);
+		if (range_end == (u64)-1)
+			break;
+		range_start = range_end + 1;
+	}
+
+next_type:
+	ret = 0;
+	if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
+		key_type = BTRFS_DIR_LOG_INDEX_KEY;
+		dir_key.type = BTRFS_DIR_INDEX_KEY;
+		btrfs_release_path(root, path);
+		goto again;
+	}
+out:
+	btrfs_release_path(root, path);
+	btrfs_free_path(log_path);
+	iput(dir);
+	return ret;
+}
+
+/*
+ * the process_func used to replay items from the log tree.  This
+ * gets called in two different stages.  The first stage just looks
+ * for inodes and makes sure they are all copied into the subvolume.
+ *
+ * The second stage copies all the other item types from the log into
+ * the subvolume.  The two stage approach is slower, but gets rid of
+ * lots of complexity around inodes referencing other inodes that exist
+ * only in the log (references come from either directory items or inode
+ * back refs).
+ */
+static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
+			     struct walk_control *wc, u64 gen)
+{
+	int nritems;
+	struct btrfs_path *path;
+	struct btrfs_root *root = wc->replay_dest;
+	struct btrfs_key key;
+	u32 item_size;
+	int level;
+	int i;
+	int ret;
+
+	btrfs_read_buffer(eb, gen);
+
+	level = btrfs_header_level(eb);
+
+	if (level != 0)
+		return 0;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	nritems = btrfs_header_nritems(eb);
+	for (i = 0; i < nritems; i++) {
+		btrfs_item_key_to_cpu(eb, &key, i);
+		item_size = btrfs_item_size_nr(eb, i);
+
+		/* inode keys are done during the first stage */
+		if (key.type == BTRFS_INODE_ITEM_KEY &&
+		    wc->stage == LOG_WALK_REPLAY_INODES) {
+			struct inode *inode;
+			struct btrfs_inode_item *inode_item;
+			u32 mode;
+
+			inode_item = btrfs_item_ptr(eb, i,
+					    struct btrfs_inode_item);
+			mode = btrfs_inode_mode(eb, inode_item);
+			if (S_ISDIR(mode)) {
+				ret = replay_dir_deletes(wc->trans,
+					 root, log, path, key.objectid);
+				BUG_ON(ret);
+			}
+			ret = overwrite_item(wc->trans, root, path,
+					     eb, i, &key);
+			BUG_ON(ret);
+
+			/* for regular files, truncate away
+			 * extents past the new EOF
+			 */
+			if (S_ISREG(mode)) {
+				inode = read_one_inode(root,
+						       key.objectid);
+				BUG_ON(!inode);
+
+				ret = btrfs_truncate_inode_items(wc->trans,
+					root, inode, inode->i_size,
+					BTRFS_EXTENT_DATA_KEY);
+				BUG_ON(ret);
+				iput(inode);
+			}
+			ret = link_to_fixup_dir(wc->trans, root,
+						path, key.objectid);
+			BUG_ON(ret);
+		}
+		if (wc->stage < LOG_WALK_REPLAY_ALL)
+			continue;
+
+		/* these keys are simply copied */
+		if (key.type == BTRFS_XATTR_ITEM_KEY) {
+			ret = overwrite_item(wc->trans, root, path,
+					     eb, i, &key);
+			BUG_ON(ret);
+		} else if (key.type == BTRFS_INODE_REF_KEY) {
+			ret = add_inode_ref(wc->trans, root, log, path,
+					    eb, i, &key);
+			BUG_ON(ret && ret != -ENOENT);
+		} else if (key.type == BTRFS_EXTENT_DATA_KEY) {
+			ret = replay_one_extent(wc->trans, root, path,
+						eb, i, &key);
+			BUG_ON(ret);
+		} else if (key.type == BTRFS_DIR_ITEM_KEY ||
+			   key.type == BTRFS_DIR_INDEX_KEY) {
+			ret = replay_one_dir_item(wc->trans, root, path,
+						  eb, i, &key);
+			BUG_ON(ret);
+		}
+	}
+	btrfs_free_path(path);
+	return 0;
+}
+
+static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
+				   struct btrfs_root *root,
+				   struct btrfs_path *path, int *level,
+				   struct walk_control *wc)
+{
+	u64 root_owner;
+	u64 root_gen;
+	u64 bytenr;
+	u64 ptr_gen;
+	struct extent_buffer *next;
+	struct extent_buffer *cur;
+	struct extent_buffer *parent;
+	u32 blocksize;
+	int ret = 0;
+
+	WARN_ON(*level < 0);
+	WARN_ON(*level >= BTRFS_MAX_LEVEL);
+
+	while (*level > 0) {
+		WARN_ON(*level < 0);
+		WARN_ON(*level >= BTRFS_MAX_LEVEL);
+		cur = path->nodes[*level];
+
+		if (btrfs_header_level(cur) != *level)
+			WARN_ON(1);
+
+		if (path->slots[*level] >=
+		    btrfs_header_nritems(cur))
+			break;
+
+		bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
+		ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
+		blocksize = btrfs_level_size(root, *level - 1);
+
+		parent = path->nodes[*level];
+		root_owner = btrfs_header_owner(parent);
+		root_gen = btrfs_header_generation(parent);
+
+		next = btrfs_find_create_tree_block(root, bytenr, blocksize);
+
+		wc->process_func(root, next, wc, ptr_gen);
+
+		if (*level == 1) {
+			path->slots[*level]++;
+			if (wc->free) {
+				btrfs_read_buffer(next, ptr_gen);
+
+				btrfs_tree_lock(next);
+				clean_tree_block(trans, root, next);
+				btrfs_wait_tree_block_writeback(next);
+				btrfs_tree_unlock(next);
+
+				ret = btrfs_drop_leaf_ref(trans, root, next);
+				BUG_ON(ret);
+
+				WARN_ON(root_owner !=
+					BTRFS_TREE_LOG_OBJECTID);
+				ret = btrfs_free_reserved_extent(root,
+							 bytenr, blocksize);
+				BUG_ON(ret);
+			}
+			free_extent_buffer(next);
+			continue;
+		}
+		btrfs_read_buffer(next, ptr_gen);
+
+		WARN_ON(*level <= 0);
+		if (path->nodes[*level-1])
+			free_extent_buffer(path->nodes[*level-1]);
+		path->nodes[*level-1] = next;
+		*level = btrfs_header_level(next);
+		path->slots[*level] = 0;
+		cond_resched();
+	}
+	WARN_ON(*level < 0);
+	WARN_ON(*level >= BTRFS_MAX_LEVEL);
+
+	if (path->nodes[*level] == root->node)
+		parent = path->nodes[*level];
+	else
+		parent = path->nodes[*level + 1];
+
+	bytenr = path->nodes[*level]->start;
+
+	blocksize = btrfs_level_size(root, *level);
+	root_owner = btrfs_header_owner(parent);
+	root_gen = btrfs_header_generation(parent);
+
+	wc->process_func(root, path->nodes[*level], wc,
+			 btrfs_header_generation(path->nodes[*level]));
+
+	if (wc->free) {
+		next = path->nodes[*level];
+		btrfs_tree_lock(next);
+		clean_tree_block(trans, root, next);
+		btrfs_wait_tree_block_writeback(next);
+		btrfs_tree_unlock(next);
+
+		if (*level == 0) {
+			ret = btrfs_drop_leaf_ref(trans, root, next);
+			BUG_ON(ret);
+		}
+		WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
+		ret = btrfs_free_reserved_extent(root, bytenr, blocksize);
+		BUG_ON(ret);
+	}
+	free_extent_buffer(path->nodes[*level]);
+	path->nodes[*level] = NULL;
+	*level += 1;
+
+	cond_resched();
+	return 0;
+}
+
+static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root,
+				 struct btrfs_path *path, int *level,
+				 struct walk_control *wc)
+{
+	u64 root_owner;
+	u64 root_gen;
+	int i;
+	int slot;
+	int ret;
+
+	for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
+		slot = path->slots[i];
+		if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
+			struct extent_buffer *node;
+			node = path->nodes[i];
+			path->slots[i]++;
+			*level = i;
+			WARN_ON(*level == 0);
+			return 0;
+		} else {
+			struct extent_buffer *parent;
+			if (path->nodes[*level] == root->node)
+				parent = path->nodes[*level];
+			else
+				parent = path->nodes[*level + 1];
+
+			root_owner = btrfs_header_owner(parent);
+			root_gen = btrfs_header_generation(parent);
+			wc->process_func(root, path->nodes[*level], wc,
+				 btrfs_header_generation(path->nodes[*level]));
+			if (wc->free) {
+				struct extent_buffer *next;
+
+				next = path->nodes[*level];
+
+				btrfs_tree_lock(next);
+				clean_tree_block(trans, root, next);
+				btrfs_wait_tree_block_writeback(next);
+				btrfs_tree_unlock(next);
+
+				if (*level == 0) {
+					ret = btrfs_drop_leaf_ref(trans, root,
+								  next);
+					BUG_ON(ret);
+				}
+
+				WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
+				ret = btrfs_free_reserved_extent(root,
+						path->nodes[*level]->start,
+						path->nodes[*level]->len);
+				BUG_ON(ret);
+			}
+			free_extent_buffer(path->nodes[*level]);
+			path->nodes[*level] = NULL;
+			*level = i + 1;
+		}
+	}
+	return 1;
+}
+
+/*
+ * drop the reference count on the tree rooted at 'snap'.  This traverses
+ * the tree freeing any blocks that have a ref count of zero after being
+ * decremented.
+ */
+static int walk_log_tree(struct btrfs_trans_handle *trans,
+			 struct btrfs_root *log, struct walk_control *wc)
+{
+	int ret = 0;
+	int wret;
+	int level;
+	struct btrfs_path *path;
+	int i;
+	int orig_level;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	level = btrfs_header_level(log->node);
+	orig_level = level;
+	path->nodes[level] = log->node;
+	extent_buffer_get(log->node);
+	path->slots[level] = 0;
+
+	while (1) {
+		wret = walk_down_log_tree(trans, log, path, &level, wc);
+		if (wret > 0)
+			break;
+		if (wret < 0)
+			ret = wret;
+
+		wret = walk_up_log_tree(trans, log, path, &level, wc);
+		if (wret > 0)
+			break;
+		if (wret < 0)
+			ret = wret;
+	}
+
+	/* was the root node processed? if not, catch it here */
+	if (path->nodes[orig_level]) {
+		wc->process_func(log, path->nodes[orig_level], wc,
+			 btrfs_header_generation(path->nodes[orig_level]));
+		if (wc->free) {
+			struct extent_buffer *next;
+
+			next = path->nodes[orig_level];
+
+			btrfs_tree_lock(next);
+			clean_tree_block(trans, log, next);
+			btrfs_wait_tree_block_writeback(next);
+			btrfs_tree_unlock(next);
+
+			if (orig_level == 0) {
+				ret = btrfs_drop_leaf_ref(trans, log,
+							  next);
+				BUG_ON(ret);
+			}
+			WARN_ON(log->root_key.objectid !=
+				BTRFS_TREE_LOG_OBJECTID);
+			ret = btrfs_free_reserved_extent(log, next->start,
+							 next->len);
+			BUG_ON(ret);
+		}
+	}
+
+	for (i = 0; i <= orig_level; i++) {
+		if (path->nodes[i]) {
+			free_extent_buffer(path->nodes[i]);
+			path->nodes[i] = NULL;
+		}
+	}
+	btrfs_free_path(path);
+	if (wc->free)
+		free_extent_buffer(log->node);
+	return ret;
+}
+
+static int wait_log_commit(struct btrfs_root *log)
+{
+	DEFINE_WAIT(wait);
+	u64 transid = log->fs_info->tree_log_transid;
+
+	do {
+		prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
+				TASK_UNINTERRUPTIBLE);
+		mutex_unlock(&log->fs_info->tree_log_mutex);
+		if (atomic_read(&log->fs_info->tree_log_commit))
+			schedule();
+		finish_wait(&log->fs_info->tree_log_wait, &wait);
+		mutex_lock(&log->fs_info->tree_log_mutex);
+	} while (transid == log->fs_info->tree_log_transid &&
+		atomic_read(&log->fs_info->tree_log_commit));
+	return 0;
+}
+
+/*
+ * btrfs_sync_log does sends a given tree log down to the disk and
+ * updates the super blocks to record it.  When this call is done,
+ * you know that any inodes previously logged are safely on disk
+ */
+int btrfs_sync_log(struct btrfs_trans_handle *trans,
+		   struct btrfs_root *root)
+{
+	int ret;
+	unsigned long batch;
+	struct btrfs_root *log = root->log_root;
+
+	mutex_lock(&log->fs_info->tree_log_mutex);
+	if (atomic_read(&log->fs_info->tree_log_commit)) {
+		wait_log_commit(log);
+		goto out;
+	}
+	atomic_set(&log->fs_info->tree_log_commit, 1);
+
+	while (1) {
+		batch = log->fs_info->tree_log_batch;
+		mutex_unlock(&log->fs_info->tree_log_mutex);
+		schedule_timeout_uninterruptible(1);
+		mutex_lock(&log->fs_info->tree_log_mutex);
+
+		while (atomic_read(&log->fs_info->tree_log_writers)) {
+			DEFINE_WAIT(wait);
+			prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
+					TASK_UNINTERRUPTIBLE);
+			mutex_unlock(&log->fs_info->tree_log_mutex);
+			if (atomic_read(&log->fs_info->tree_log_writers))
+				schedule();
+			mutex_lock(&log->fs_info->tree_log_mutex);
+			finish_wait(&log->fs_info->tree_log_wait, &wait);
+		}
+		if (batch == log->fs_info->tree_log_batch)
+			break;
+	}
+
+	ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages);
+	BUG_ON(ret);
+	ret = btrfs_write_and_wait_marked_extents(root->fs_info->log_root_tree,
+			       &root->fs_info->log_root_tree->dirty_log_pages);
+	BUG_ON(ret);
+
+	btrfs_set_super_log_root(&root->fs_info->super_for_commit,
+				 log->fs_info->log_root_tree->node->start);
+	btrfs_set_super_log_root_level(&root->fs_info->super_for_commit,
+		       btrfs_header_level(log->fs_info->log_root_tree->node));
+
+	write_ctree_super(trans, log->fs_info->tree_root, 2);
+	log->fs_info->tree_log_transid++;
+	log->fs_info->tree_log_batch = 0;
+	atomic_set(&log->fs_info->tree_log_commit, 0);
+	smp_mb();
+	if (waitqueue_active(&log->fs_info->tree_log_wait))
+		wake_up(&log->fs_info->tree_log_wait);
+out:
+	mutex_unlock(&log->fs_info->tree_log_mutex);
+	return 0;
+}
+
+/* * free all the extents used by the tree log.  This should be called
+ * at commit time of the full transaction
+ */
+int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
+{
+	int ret;
+	struct btrfs_root *log;
+	struct key;
+	u64 start;
+	u64 end;
+	struct walk_control wc = {
+		.free = 1,
+		.process_func = process_one_buffer
+	};
+
+	if (!root->log_root || root->fs_info->log_root_recovering)
+		return 0;
+
+	log = root->log_root;
+	ret = walk_log_tree(trans, log, &wc);
+	BUG_ON(ret);
+
+	while (1) {
+		ret = find_first_extent_bit(&log->dirty_log_pages,
+				    0, &start, &end, EXTENT_DIRTY);
+		if (ret)
+			break;
+
+		clear_extent_dirty(&log->dirty_log_pages,
+				   start, end, GFP_NOFS);
+	}
+
+	log = root->log_root;
+	ret = btrfs_del_root(trans, root->fs_info->log_root_tree,
+			     &log->root_key);
+	BUG_ON(ret);
+	root->log_root = NULL;
+	kfree(root->log_root);
+	return 0;
+}
+
+/*
+ * helper function to update the item for a given subvolumes log root
+ * in the tree of log roots
+ */
+static int update_log_root(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *log)
+{
+	u64 bytenr = btrfs_root_bytenr(&log->root_item);
+	int ret;
+
+	if (log->node->start == bytenr)
+		return 0;
+
+	btrfs_set_root_bytenr(&log->root_item, log->node->start);
+	btrfs_set_root_generation(&log->root_item, trans->transid);
+	btrfs_set_root_level(&log->root_item, btrfs_header_level(log->node));
+	ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
+				&log->root_key, &log->root_item);
+	BUG_ON(ret);
+	return ret;
+}
+
+/*
+ * If both a file and directory are logged, and unlinks or renames are
+ * mixed in, we have a few interesting corners:
+ *
+ * create file X in dir Y
+ * link file X to X.link in dir Y
+ * fsync file X
+ * unlink file X but leave X.link
+ * fsync dir Y
+ *
+ * After a crash we would expect only X.link to exist.  But file X
+ * didn't get fsync'd again so the log has back refs for X and X.link.
+ *
+ * We solve this by removing directory entries and inode backrefs from the
+ * log when a file that was logged in the current transaction is
+ * unlinked.  Any later fsync will include the updated log entries, and
+ * we'll be able to reconstruct the proper directory items from backrefs.
+ *
+ * This optimizations allows us to avoid relogging the entire inode
+ * or the entire directory.
+ */
+int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root,
+				 const char *name, int name_len,
+				 struct inode *dir, u64 index)
+{
+	struct btrfs_root *log;
+	struct btrfs_dir_item *di;
+	struct btrfs_path *path;
+	int ret;
+	int bytes_del = 0;
+
+	if (BTRFS_I(dir)->logged_trans < trans->transid)
+		return 0;
+
+	ret = join_running_log_trans(root);
+	if (ret)
+		return 0;
+
+	mutex_lock(&BTRFS_I(dir)->log_mutex);
+
+	log = root->log_root;
+	path = btrfs_alloc_path();
+	di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
+				   name, name_len, -1);
+	if (di && !IS_ERR(di)) {
+		ret = btrfs_delete_one_dir_name(trans, log, path, di);
+		bytes_del += name_len;
+		BUG_ON(ret);
+	}
+	btrfs_release_path(log, path);
+	di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino,
+					 index, name, name_len, -1);
+	if (di && !IS_ERR(di)) {
+		ret = btrfs_delete_one_dir_name(trans, log, path, di);
+		bytes_del += name_len;
+		BUG_ON(ret);
+	}
+
+	/* update the directory size in the log to reflect the names
+	 * we have removed
+	 */
+	if (bytes_del) {
+		struct btrfs_key key;
+
+		key.objectid = dir->i_ino;
+		key.offset = 0;
+		key.type = BTRFS_INODE_ITEM_KEY;
+		btrfs_release_path(log, path);
+
+		ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
+		if (ret == 0) {
+			struct btrfs_inode_item *item;
+			u64 i_size;
+
+			item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+					      struct btrfs_inode_item);
+			i_size = btrfs_inode_size(path->nodes[0], item);
+			if (i_size > bytes_del)
+				i_size -= bytes_del;
+			else
+				i_size = 0;
+			btrfs_set_inode_size(path->nodes[0], item, i_size);
+			btrfs_mark_buffer_dirty(path->nodes[0]);
+		} else
+			ret = 0;
+		btrfs_release_path(log, path);
+	}
+
+	btrfs_free_path(path);
+	mutex_unlock(&BTRFS_I(dir)->log_mutex);
+	end_log_trans(root);
+
+	return 0;
+}
+
+/* see comments for btrfs_del_dir_entries_in_log */
+int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       const char *name, int name_len,
+			       struct inode *inode, u64 dirid)
+{
+	struct btrfs_root *log;
+	u64 index;
+	int ret;
+
+	if (BTRFS_I(inode)->logged_trans < trans->transid)
+		return 0;
+
+	ret = join_running_log_trans(root);
+	if (ret)
+		return 0;
+	log = root->log_root;
+	mutex_lock(&BTRFS_I(inode)->log_mutex);
+
+	ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino,
+				  dirid, &index);
+	mutex_unlock(&BTRFS_I(inode)->log_mutex);
+	end_log_trans(root);
+
+	return ret;
+}
+
+/*
+ * creates a range item in the log for 'dirid'.  first_offset and
+ * last_offset tell us which parts of the key space the log should
+ * be considered authoritative for.
+ */
+static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
+				       struct btrfs_root *log,
+				       struct btrfs_path *path,
+				       int key_type, u64 dirid,
+				       u64 first_offset, u64 last_offset)
+{
+	int ret;
+	struct btrfs_key key;
+	struct btrfs_dir_log_item *item;
+
+	key.objectid = dirid;
+	key.offset = first_offset;
+	if (key_type == BTRFS_DIR_ITEM_KEY)
+		key.type = BTRFS_DIR_LOG_ITEM_KEY;
+	else
+		key.type = BTRFS_DIR_LOG_INDEX_KEY;
+	ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
+	BUG_ON(ret);
+
+	item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+			      struct btrfs_dir_log_item);
+	btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
+	btrfs_mark_buffer_dirty(path->nodes[0]);
+	btrfs_release_path(log, path);
+	return 0;
+}
+
+/*
+ * log all the items included in the current transaction for a given
+ * directory.  This also creates the range items in the log tree required
+ * to replay anything deleted before the fsync
+ */
+static noinline int log_dir_items(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, struct inode *inode,
+			  struct btrfs_path *path,
+			  struct btrfs_path *dst_path, int key_type,
+			  u64 min_offset, u64 *last_offset_ret)
+{
+	struct btrfs_key min_key;
+	struct btrfs_key max_key;
+	struct btrfs_root *log = root->log_root;
+	struct extent_buffer *src;
+	int ret;
+	int i;
+	int nritems;
+	u64 first_offset = min_offset;
+	u64 last_offset = (u64)-1;
+
+	log = root->log_root;
+	max_key.objectid = inode->i_ino;
+	max_key.offset = (u64)-1;
+	max_key.type = key_type;
+
+	min_key.objectid = inode->i_ino;
+	min_key.type = key_type;
+	min_key.offset = min_offset;
+
+	path->keep_locks = 1;
+
+	ret = btrfs_search_forward(root, &min_key, &max_key,
+				   path, 0, trans->transid);
+
+	/*
+	 * we didn't find anything from this transaction, see if there
+	 * is anything at all
+	 */
+	if (ret != 0 || min_key.objectid != inode->i_ino ||
+	    min_key.type != key_type) {
+		min_key.objectid = inode->i_ino;
+		min_key.type = key_type;
+		min_key.offset = (u64)-1;
+		btrfs_release_path(root, path);
+		ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
+		if (ret < 0) {
+			btrfs_release_path(root, path);
+			return ret;
+		}
+		ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
+
+		/* if ret == 0 there are items for this type,
+		 * create a range to tell us the last key of this type.
+		 * otherwise, there are no items in this directory after
+		 * *min_offset, and we create a range to indicate that.
+		 */
+		if (ret == 0) {
+			struct btrfs_key tmp;
+			btrfs_item_key_to_cpu(path->nodes[0], &tmp,
+					      path->slots[0]);
+			if (key_type == tmp.type)
+				first_offset = max(min_offset, tmp.offset) + 1;
+		}
+		goto done;
+	}
+
+	/* go backward to find any previous key */
+	ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
+	if (ret == 0) {
+		struct btrfs_key tmp;
+		btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
+		if (key_type == tmp.type) {
+			first_offset = tmp.offset;
+			ret = overwrite_item(trans, log, dst_path,
+					     path->nodes[0], path->slots[0],
+					     &tmp);
+		}
+	}
+	btrfs_release_path(root, path);
+
+	/* find the first key from this transaction again */
+	ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
+	if (ret != 0) {
+		WARN_ON(1);
+		goto done;
+	}
+
+	/*
+	 * we have a block from this transaction, log every item in it
+	 * from our directory
+	 */
+	while (1) {
+		struct btrfs_key tmp;
+		src = path->nodes[0];
+		nritems = btrfs_header_nritems(src);
+		for (i = path->slots[0]; i < nritems; i++) {
+			btrfs_item_key_to_cpu(src, &min_key, i);
+
+			if (min_key.objectid != inode->i_ino ||
+			    min_key.type != key_type)
+				goto done;
+			ret = overwrite_item(trans, log, dst_path, src, i,
+					     &min_key);
+			BUG_ON(ret);
+		}
+		path->slots[0] = nritems;
+
+		/*
+		 * look ahead to the next item and see if it is also
+		 * from this directory and from this transaction
+		 */
+		ret = btrfs_next_leaf(root, path);
+		if (ret == 1) {
+			last_offset = (u64)-1;
+			goto done;
+		}
+		btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
+		if (tmp.objectid != inode->i_ino || tmp.type != key_type) {
+			last_offset = (u64)-1;
+			goto done;
+		}
+		if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
+			ret = overwrite_item(trans, log, dst_path,
+					     path->nodes[0], path->slots[0],
+					     &tmp);
+
+			BUG_ON(ret);
+			last_offset = tmp.offset;
+			goto done;
+		}
+	}
+done:
+	*last_offset_ret = last_offset;
+	btrfs_release_path(root, path);
+	btrfs_release_path(log, dst_path);
+
+	/* insert the log range keys to indicate where the log is valid */
+	ret = insert_dir_log_key(trans, log, path, key_type, inode->i_ino,
+				 first_offset, last_offset);
+	BUG_ON(ret);
+	return 0;
+}
+
+/*
+ * logging directories is very similar to logging inodes, We find all the items
+ * from the current transaction and write them to the log.
+ *
+ * The recovery code scans the directory in the subvolume, and if it finds a
+ * key in the range logged that is not present in the log tree, then it means
+ * that dir entry was unlinked during the transaction.
+ *
+ * In order for that scan to work, we must include one key smaller than
+ * the smallest logged by this transaction and one key larger than the largest
+ * key logged by this transaction.
+ */
+static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, struct inode *inode,
+			  struct btrfs_path *path,
+			  struct btrfs_path *dst_path)
+{
+	u64 min_key;
+	u64 max_key;
+	int ret;
+	int key_type = BTRFS_DIR_ITEM_KEY;
+
+again:
+	min_key = 0;
+	max_key = 0;
+	while (1) {
+		ret = log_dir_items(trans, root, inode, path,
+				    dst_path, key_type, min_key,
+				    &max_key);
+		BUG_ON(ret);
+		if (max_key == (u64)-1)
+			break;
+		min_key = max_key + 1;
+	}
+
+	if (key_type == BTRFS_DIR_ITEM_KEY) {
+		key_type = BTRFS_DIR_INDEX_KEY;
+		goto again;
+	}
+	return 0;
+}
+
+/*
+ * a helper function to drop items from the log before we relog an
+ * inode.  max_key_type indicates the highest item type to remove.
+ * This cannot be run for file data extents because it does not
+ * free the extents they point to.
+ */
+static int drop_objectid_items(struct btrfs_trans_handle *trans,
+				  struct btrfs_root *log,
+				  struct btrfs_path *path,
+				  u64 objectid, int max_key_type)
+{
+	int ret;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+
+	key.objectid = objectid;
+	key.type = max_key_type;
+	key.offset = (u64)-1;
+
+	while (1) {
+		ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
+
+		if (ret != 1)
+			break;
+
+		if (path->slots[0] == 0)
+			break;
+
+		path->slots[0]--;
+		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+				      path->slots[0]);
+
+		if (found_key.objectid != objectid)
+			break;
+
+		ret = btrfs_del_item(trans, log, path);
+		BUG_ON(ret);
+		btrfs_release_path(log, path);
+	}
+	btrfs_release_path(log, path);
+	return 0;
+}
+
+static noinline int copy_items(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *log,
+			       struct btrfs_path *dst_path,
+			       struct extent_buffer *src,
+			       int start_slot, int nr, int inode_only)
+{
+	unsigned long src_offset;
+	unsigned long dst_offset;
+	struct btrfs_file_extent_item *extent;
+	struct btrfs_inode_item *inode_item;
+	int ret;
+	struct btrfs_key *ins_keys;
+	u32 *ins_sizes;
+	char *ins_data;
+	int i;
+	struct list_head ordered_sums;
+
+	INIT_LIST_HEAD(&ordered_sums);
+
+	ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
+			   nr * sizeof(u32), GFP_NOFS);
+	ins_sizes = (u32 *)ins_data;
+	ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
+
+	for (i = 0; i < nr; i++) {
+		ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
+		btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
+	}
+	ret = btrfs_insert_empty_items(trans, log, dst_path,
+				       ins_keys, ins_sizes, nr);
+	BUG_ON(ret);
+
+	for (i = 0; i < nr; i++) {
+		dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
+						   dst_path->slots[0]);
+
+		src_offset = btrfs_item_ptr_offset(src, start_slot + i);
+
+		copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
+				   src_offset, ins_sizes[i]);
+
+		if (inode_only == LOG_INODE_EXISTS &&
+		    ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
+			inode_item = btrfs_item_ptr(dst_path->nodes[0],
+						    dst_path->slots[0],
+						    struct btrfs_inode_item);
+			btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0);
+
+			/* set the generation to zero so the recover code
+			 * can tell the difference between an logging
+			 * just to say 'this inode exists' and a logging
+			 * to say 'update this inode with these values'
+			 */
+			btrfs_set_inode_generation(dst_path->nodes[0],
+						   inode_item, 0);
+		}
+		/* take a reference on file data extents so that truncates
+		 * or deletes of this inode don't have to relog the inode
+		 * again
+		 */
+		if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) {
+			int found_type;
+			extent = btrfs_item_ptr(src, start_slot + i,
+						struct btrfs_file_extent_item);
+
+			found_type = btrfs_file_extent_type(src, extent);
+			if (found_type == BTRFS_FILE_EXTENT_REG ||
+			    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+				u64 ds = btrfs_file_extent_disk_bytenr(src,
+								   extent);
+				u64 dl = btrfs_file_extent_disk_num_bytes(src,
+								      extent);
+				u64 cs = btrfs_file_extent_offset(src, extent);
+				u64 cl = btrfs_file_extent_num_bytes(src,
+								     extent);;
+				if (btrfs_file_extent_compression(src,
+								  extent)) {
+					cs = 0;
+					cl = dl;
+				}
+				/* ds == 0 is a hole */
+				if (ds != 0) {
+					ret = btrfs_inc_extent_ref(trans, log,
+						   ds, dl,
+						   dst_path->nodes[0]->start,
+						   BTRFS_TREE_LOG_OBJECTID,
+						   trans->transid,
+						   ins_keys[i].objectid);
+					BUG_ON(ret);
+					ret = btrfs_lookup_csums_range(
+						   log->fs_info->csum_root,
+						   ds + cs, ds + cs + cl - 1,
+						   &ordered_sums);
+					BUG_ON(ret);
+				}
+			}
+		}
+		dst_path->slots[0]++;
+	}
+
+	btrfs_mark_buffer_dirty(dst_path->nodes[0]);
+	btrfs_release_path(log, dst_path);
+	kfree(ins_data);
+
+	/*
+	 * we have to do this after the loop above to avoid changing the
+	 * log tree while trying to change the log tree.
+	 */
+	while (!list_empty(&ordered_sums)) {
+		struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
+						   struct btrfs_ordered_sum,
+						   list);
+		ret = btrfs_csum_file_blocks(trans, log, sums);
+		BUG_ON(ret);
+		list_del(&sums->list);
+		kfree(sums);
+	}
+	return 0;
+}
+
+/* log a single inode in the tree log.
+ * At least one parent directory for this inode must exist in the tree
+ * or be logged already.
+ *
+ * Any items from this inode changed by the current transaction are copied
+ * to the log tree.  An extra reference is taken on any extents in this
+ * file, allowing us to avoid a whole pile of corner cases around logging
+ * blocks that have been removed from the tree.
+ *
+ * See LOG_INODE_ALL and related defines for a description of what inode_only
+ * does.
+ *
+ * This handles both files and directories.
+ */
+static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
+			     struct btrfs_root *root, struct inode *inode,
+			     int inode_only)
+{
+	struct btrfs_path *path;
+	struct btrfs_path *dst_path;
+	struct btrfs_key min_key;
+	struct btrfs_key max_key;
+	struct btrfs_root *log = root->log_root;
+	struct extent_buffer *src = NULL;
+	u32 size;
+	int ret;
+	int nritems;
+	int ins_start_slot = 0;
+	int ins_nr;
+
+	log = root->log_root;
+
+	path = btrfs_alloc_path();
+	dst_path = btrfs_alloc_path();
+
+	min_key.objectid = inode->i_ino;
+	min_key.type = BTRFS_INODE_ITEM_KEY;
+	min_key.offset = 0;
+
+	max_key.objectid = inode->i_ino;
+	if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode))
+		max_key.type = BTRFS_XATTR_ITEM_KEY;
+	else
+		max_key.type = (u8)-1;
+	max_key.offset = (u64)-1;
+
+	/*
+	 * if this inode has already been logged and we're in inode_only
+	 * mode, we don't want to delete the things that have already
+	 * been written to the log.
+	 *
+	 * But, if the inode has been through an inode_only log,
+	 * the logged_trans field is not set.  This allows us to catch
+	 * any new names for this inode in the backrefs by logging it
+	 * again
+	 */
+	if (inode_only == LOG_INODE_EXISTS &&
+	    BTRFS_I(inode)->logged_trans == trans->transid) {
+		btrfs_free_path(path);
+		btrfs_free_path(dst_path);
+		goto out;
+	}
+	mutex_lock(&BTRFS_I(inode)->log_mutex);
+
+	/*
+	 * a brute force approach to making sure we get the most uptodate
+	 * copies of everything.
+	 */
+	if (S_ISDIR(inode->i_mode)) {
+		int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
+
+		if (inode_only == LOG_INODE_EXISTS)
+			max_key_type = BTRFS_XATTR_ITEM_KEY;
+		ret = drop_objectid_items(trans, log, path,
+					  inode->i_ino, max_key_type);
+	} else {
+		ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
+	}
+	BUG_ON(ret);
+	path->keep_locks = 1;
+
+	while (1) {
+		ins_nr = 0;
+		ret = btrfs_search_forward(root, &min_key, &max_key,
+					   path, 0, trans->transid);
+		if (ret != 0)
+			break;
+again:
+		/* note, ins_nr might be > 0 here, cleanup outside the loop */
+		if (min_key.objectid != inode->i_ino)
+			break;
+		if (min_key.type > max_key.type)
+			break;
+
+		src = path->nodes[0];
+		size = btrfs_item_size_nr(src, path->slots[0]);
+		if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
+			ins_nr++;
+			goto next_slot;
+		} else if (!ins_nr) {
+			ins_start_slot = path->slots[0];
+			ins_nr = 1;
+			goto next_slot;
+		}
+
+		ret = copy_items(trans, log, dst_path, src, ins_start_slot,
+				 ins_nr, inode_only);
+		BUG_ON(ret);
+		ins_nr = 1;
+		ins_start_slot = path->slots[0];
+next_slot:
+
+		nritems = btrfs_header_nritems(path->nodes[0]);
+		path->slots[0]++;
+		if (path->slots[0] < nritems) {
+			btrfs_item_key_to_cpu(path->nodes[0], &min_key,
+					      path->slots[0]);
+			goto again;
+		}
+		if (ins_nr) {
+			ret = copy_items(trans, log, dst_path, src,
+					 ins_start_slot,
+					 ins_nr, inode_only);
+			BUG_ON(ret);
+			ins_nr = 0;
+		}
+		btrfs_release_path(root, path);
+
+		if (min_key.offset < (u64)-1)
+			min_key.offset++;
+		else if (min_key.type < (u8)-1)
+			min_key.type++;
+		else if (min_key.objectid < (u64)-1)
+			min_key.objectid++;
+		else
+			break;
+	}
+	if (ins_nr) {
+		ret = copy_items(trans, log, dst_path, src,
+				 ins_start_slot,
+				 ins_nr, inode_only);
+		BUG_ON(ret);
+		ins_nr = 0;
+	}
+	WARN_ON(ins_nr);
+	if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
+		btrfs_release_path(root, path);
+		btrfs_release_path(log, dst_path);
+		BTRFS_I(inode)->log_dirty_trans = 0;
+		ret = log_directory_changes(trans, root, inode, path, dst_path);
+		BUG_ON(ret);
+	}
+	BTRFS_I(inode)->logged_trans = trans->transid;
+	mutex_unlock(&BTRFS_I(inode)->log_mutex);
+
+	btrfs_free_path(path);
+	btrfs_free_path(dst_path);
+
+	mutex_lock(&root->fs_info->tree_log_mutex);
+	ret = update_log_root(trans, log);
+	BUG_ON(ret);
+	mutex_unlock(&root->fs_info->tree_log_mutex);
+out:
+	return 0;
+}
+
+int btrfs_log_inode(struct btrfs_trans_handle *trans,
+		    struct btrfs_root *root, struct inode *inode,
+		    int inode_only)
+{
+	int ret;
+
+	start_log_trans(trans, root);
+	ret = __btrfs_log_inode(trans, root, inode, inode_only);
+	end_log_trans(root);
+	return ret;
+}
+
+/*
+ * helper function around btrfs_log_inode to make sure newly created
+ * parent directories also end up in the log.  A minimal inode and backref
+ * only logging is done of any parent directories that are older than
+ * the last committed transaction
+ */
+int btrfs_log_dentry(struct btrfs_trans_handle *trans,
+		    struct btrfs_root *root, struct dentry *dentry)
+{
+	int inode_only = LOG_INODE_ALL;
+	struct super_block *sb;
+	int ret;
+
+	start_log_trans(trans, root);
+	sb = dentry->d_inode->i_sb;
+	while (1) {
+		ret = __btrfs_log_inode(trans, root, dentry->d_inode,
+					inode_only);
+		BUG_ON(ret);
+		inode_only = LOG_INODE_EXISTS;
+
+		dentry = dentry->d_parent;
+		if (!dentry || !dentry->d_inode || sb != dentry->d_inode->i_sb)
+			break;
+
+		if (BTRFS_I(dentry->d_inode)->generation <=
+		    root->fs_info->last_trans_committed)
+			break;
+	}
+	end_log_trans(root);
+	return 0;
+}
+
+/*
+ * it is not safe to log dentry if the chunk root has added new
+ * chunks.  This returns 0 if the dentry was logged, and 1 otherwise.
+ * If this returns 1, you must commit the transaction to safely get your
+ * data on disk.
+ */
+int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, struct dentry *dentry)
+{
+	u64 gen;
+	gen = root->fs_info->last_trans_new_blockgroup;
+	if (gen > root->fs_info->last_trans_committed)
+		return 1;
+	else
+		return btrfs_log_dentry(trans, root, dentry);
+}
+
+/*
+ * should be called during mount to recover any replay any log trees
+ * from the FS
+ */
+int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
+{
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	struct btrfs_key tmp_key;
+	struct btrfs_root *log;
+	struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
+	u64 highest_inode;
+	struct walk_control wc = {
+		.process_func = process_one_buffer,
+		.stage = 0,
+	};
+
+	fs_info->log_root_recovering = 1;
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	trans = btrfs_start_transaction(fs_info->tree_root, 1);
+
+	wc.trans = trans;
+	wc.pin = 1;
+
+	walk_log_tree(trans, log_root_tree, &wc);
+
+again:
+	key.objectid = BTRFS_TREE_LOG_OBJECTID;
+	key.offset = (u64)-1;
+	btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+
+	while (1) {
+		ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
+		if (ret < 0)
+			break;
+		if (ret > 0) {
+			if (path->slots[0] == 0)
+				break;
+			path->slots[0]--;
+		}
+		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+				      path->slots[0]);
+		btrfs_release_path(log_root_tree, path);
+		if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
+			break;
+
+		log = btrfs_read_fs_root_no_radix(log_root_tree,
+						  &found_key);
+		BUG_ON(!log);
+
+
+		tmp_key.objectid = found_key.offset;
+		tmp_key.type = BTRFS_ROOT_ITEM_KEY;
+		tmp_key.offset = (u64)-1;
+
+		wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
+		BUG_ON(!wc.replay_dest);
+
+		wc.replay_dest->log_root = log;
+		btrfs_record_root_in_trans(wc.replay_dest);
+		ret = walk_log_tree(trans, log, &wc);
+		BUG_ON(ret);
+
+		if (wc.stage == LOG_WALK_REPLAY_ALL) {
+			ret = fixup_inode_link_counts(trans, wc.replay_dest,
+						      path);
+			BUG_ON(ret);
+		}
+		ret = btrfs_find_highest_inode(wc.replay_dest, &highest_inode);
+		if (ret == 0) {
+			wc.replay_dest->highest_inode = highest_inode;
+			wc.replay_dest->last_inode_alloc = highest_inode;
+		}
+
+		key.offset = found_key.offset - 1;
+		wc.replay_dest->log_root = NULL;
+		free_extent_buffer(log->node);
+		kfree(log);
+
+		if (found_key.offset == 0)
+			break;
+	}
+	btrfs_release_path(log_root_tree, path);
+
+	/* step one is to pin it all, step two is to replay just inodes */
+	if (wc.pin) {
+		wc.pin = 0;
+		wc.process_func = replay_one_buffer;
+		wc.stage = LOG_WALK_REPLAY_INODES;
+		goto again;
+	}
+	/* step three is to replay everything */
+	if (wc.stage < LOG_WALK_REPLAY_ALL) {
+		wc.stage++;
+		goto again;
+	}
+
+	btrfs_free_path(path);
+
+	free_extent_buffer(log_root_tree->node);
+	log_root_tree->log_root = NULL;
+	fs_info->log_root_recovering = 0;
+
+	/* step 4: commit the transaction, which also unpins the blocks */
+	btrfs_commit_transaction(trans, fs_info->tree_root);
+
+	kfree(log_root_tree);
+	return 0;
+}
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
new file mode 100644
index 0000000..b9409b3
--- /dev/null
+++ b/fs/btrfs/tree-log.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __TREE_LOG_
+#define __TREE_LOG_
+
+int btrfs_sync_log(struct btrfs_trans_handle *trans,
+		   struct btrfs_root *root);
+int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root);
+int btrfs_log_dentry(struct btrfs_trans_handle *trans,
+		    struct btrfs_root *root, struct dentry *dentry);
+int btrfs_recover_log_trees(struct btrfs_root *tree_root);
+int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
+			  struct btrfs_root *root, struct dentry *dentry);
+int btrfs_log_inode(struct btrfs_trans_handle *trans,
+		    struct btrfs_root *root, struct inode *inode,
+		    int inode_only);
+int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
+				 struct btrfs_root *root,
+				 const char *name, int name_len,
+				 struct inode *dir, u64 index);
+int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       const char *name, int name_len,
+			       struct inode *inode, u64 dirid);
+#endif
diff --git a/fs/btrfs/version.h b/fs/btrfs/version.h
new file mode 100644
index 0000000..9bf3946
--- /dev/null
+++ b/fs/btrfs/version.h
@@ -0,0 +1,4 @@
+#ifndef __BTRFS_VERSION_H
+#define __BTRFS_VERSION_H
+#define BTRFS_BUILD_VERSION "Btrfs"
+#endif
diff --git a/fs/btrfs/version.sh b/fs/btrfs/version.sh
new file mode 100644
index 0000000..1ca1952
--- /dev/null
+++ b/fs/btrfs/version.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# determine-version -- report a useful version for releases
+#
+# Copyright 2008, Aron Griffis <agriffis@n01se.net>
+# Copyright 2008, Oracle
+# Released under the GNU GPLv2
+ 
+v="v0.16"
+
+which git &> /dev/null
+if [ $? == 0 ]; then
+    git branch >& /dev/null
+    if [ $? == 0 ]; then
+	    if head=`git rev-parse --verify HEAD 2>/dev/null`; then
+		if tag=`git describe --tags 2>/dev/null`; then
+		    v="$tag"
+		fi
+
+		# Are there uncommitted changes?
+		git update-index --refresh --unmerged > /dev/null
+		if git diff-index --name-only HEAD | \
+		    grep -v "^scripts/package" \
+		    | read dummy; then
+		    v="$v"-dirty
+		fi
+	    fi
+    fi
+fi
+ 
+echo "#ifndef __BUILD_VERSION" > .build-version.h
+echo "#define __BUILD_VERSION" >> .build-version.h
+echo "#define BTRFS_BUILD_VERSION \"Btrfs $v\"" >> .build-version.h
+echo "#endif" >> .build-version.h
+
+diff -q version.h .build-version.h >& /dev/null
+
+if [ $? == 0 ]; then
+    rm .build-version.h
+    exit 0
+fi
+
+mv .build-version.h version.h
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
new file mode 100644
index 0000000..3451e1c
--- /dev/null
+++ b/fs/btrfs/volumes.c
@@ -0,0 +1,3219 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+#include <linux/sched.h>
+#include <linux/bio.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <linux/version.h>
+#include <asm/div64.h>
+#include "compat.h"
+#include "ctree.h"
+#include "extent_map.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "print-tree.h"
+#include "volumes.h"
+#include "async-thread.h"
+
+struct map_lookup {
+	u64 type;
+	int io_align;
+	int io_width;
+	int stripe_len;
+	int sector_size;
+	int num_stripes;
+	int sub_stripes;
+	struct btrfs_bio_stripe stripes[];
+};
+
+static int init_first_rw_device(struct btrfs_trans_handle *trans,
+				struct btrfs_root *root,
+				struct btrfs_device *device);
+static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
+
+#define map_lookup_size(n) (sizeof(struct map_lookup) + \
+			    (sizeof(struct btrfs_bio_stripe) * (n)))
+
+static DEFINE_MUTEX(uuid_mutex);
+static LIST_HEAD(fs_uuids);
+
+void btrfs_lock_volumes(void)
+{
+	mutex_lock(&uuid_mutex);
+}
+
+void btrfs_unlock_volumes(void)
+{
+	mutex_unlock(&uuid_mutex);
+}
+
+static void lock_chunks(struct btrfs_root *root)
+{
+	mutex_lock(&root->fs_info->chunk_mutex);
+}
+
+static void unlock_chunks(struct btrfs_root *root)
+{
+	mutex_unlock(&root->fs_info->chunk_mutex);
+}
+
+static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
+{
+	struct btrfs_device *device;
+	WARN_ON(fs_devices->opened);
+	while (!list_empty(&fs_devices->devices)) {
+		device = list_entry(fs_devices->devices.next,
+				    struct btrfs_device, dev_list);
+		list_del(&device->dev_list);
+		kfree(device->name);
+		kfree(device);
+	}
+	kfree(fs_devices);
+}
+
+int btrfs_cleanup_fs_uuids(void)
+{
+	struct btrfs_fs_devices *fs_devices;
+
+	while (!list_empty(&fs_uuids)) {
+		fs_devices = list_entry(fs_uuids.next,
+					struct btrfs_fs_devices, list);
+		list_del(&fs_devices->list);
+		free_fs_devices(fs_devices);
+	}
+	return 0;
+}
+
+static noinline struct btrfs_device *__find_device(struct list_head *head,
+						   u64 devid, u8 *uuid)
+{
+	struct btrfs_device *dev;
+	struct list_head *cur;
+
+	list_for_each(cur, head) {
+		dev = list_entry(cur, struct btrfs_device, dev_list);
+		if (dev->devid == devid &&
+		    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
+			return dev;
+		}
+	}
+	return NULL;
+}
+
+static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
+{
+	struct list_head *cur;
+	struct btrfs_fs_devices *fs_devices;
+
+	list_for_each(cur, &fs_uuids) {
+		fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
+		if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
+			return fs_devices;
+	}
+	return NULL;
+}
+
+/*
+ * we try to collect pending bios for a device so we don't get a large
+ * number of procs sending bios down to the same device.  This greatly
+ * improves the schedulers ability to collect and merge the bios.
+ *
+ * But, it also turns into a long list of bios to process and that is sure
+ * to eventually make the worker thread block.  The solution here is to
+ * make some progress and then put this work struct back at the end of
+ * the list if the block device is congested.  This way, multiple devices
+ * can make progress from a single worker thread.
+ */
+static noinline int run_scheduled_bios(struct btrfs_device *device)
+{
+	struct bio *pending;
+	struct backing_dev_info *bdi;
+	struct btrfs_fs_info *fs_info;
+	struct bio *tail;
+	struct bio *cur;
+	int again = 0;
+	unsigned long num_run = 0;
+	unsigned long limit;
+
+	bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
+	fs_info = device->dev_root->fs_info;
+	limit = btrfs_async_submit_limit(fs_info);
+	limit = limit * 2 / 3;
+
+loop:
+	spin_lock(&device->io_lock);
+
+	/* take all the bios off the list at once and process them
+	 * later on (without the lock held).  But, remember the
+	 * tail and other pointers so the bios can be properly reinserted
+	 * into the list if we hit congestion
+	 */
+	pending = device->pending_bios;
+	tail = device->pending_bio_tail;
+	WARN_ON(pending && !tail);
+	device->pending_bios = NULL;
+	device->pending_bio_tail = NULL;
+
+	/*
+	 * if pending was null this time around, no bios need processing
+	 * at all and we can stop.  Otherwise it'll loop back up again
+	 * and do an additional check so no bios are missed.
+	 *
+	 * device->running_pending is used to synchronize with the
+	 * schedule_bio code.
+	 */
+	if (pending) {
+		again = 1;
+		device->running_pending = 1;
+	} else {
+		again = 0;
+		device->running_pending = 0;
+	}
+	spin_unlock(&device->io_lock);
+
+	while (pending) {
+		cur = pending;
+		pending = pending->bi_next;
+		cur->bi_next = NULL;
+		atomic_dec(&fs_info->nr_async_bios);
+
+		if (atomic_read(&fs_info->nr_async_bios) < limit &&
+		    waitqueue_active(&fs_info->async_submit_wait))
+			wake_up(&fs_info->async_submit_wait);
+
+		BUG_ON(atomic_read(&cur->bi_cnt) == 0);
+		bio_get(cur);
+		submit_bio(cur->bi_rw, cur);
+		bio_put(cur);
+		num_run++;
+
+		/*
+		 * we made progress, there is more work to do and the bdi
+		 * is now congested.  Back off and let other work structs
+		 * run instead
+		 */
+		if (pending && bdi_write_congested(bdi) &&
+		    fs_info->fs_devices->open_devices > 1) {
+			struct bio *old_head;
+
+			spin_lock(&device->io_lock);
+
+			old_head = device->pending_bios;
+			device->pending_bios = pending;
+			if (device->pending_bio_tail)
+				tail->bi_next = old_head;
+			else
+				device->pending_bio_tail = tail;
+			device->running_pending = 0;
+
+			spin_unlock(&device->io_lock);
+			btrfs_requeue_work(&device->work);
+			goto done;
+		}
+	}
+	if (again)
+		goto loop;
+done:
+	return 0;
+}
+
+static void pending_bios_fn(struct btrfs_work *work)
+{
+	struct btrfs_device *device;
+
+	device = container_of(work, struct btrfs_device, work);
+	run_scheduled_bios(device);
+}
+
+static noinline int device_list_add(const char *path,
+			   struct btrfs_super_block *disk_super,
+			   u64 devid, struct btrfs_fs_devices **fs_devices_ret)
+{
+	struct btrfs_device *device;
+	struct btrfs_fs_devices *fs_devices;
+	u64 found_transid = btrfs_super_generation(disk_super);
+
+	fs_devices = find_fsid(disk_super->fsid);
+	if (!fs_devices) {
+		fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
+		if (!fs_devices)
+			return -ENOMEM;
+		INIT_LIST_HEAD(&fs_devices->devices);
+		INIT_LIST_HEAD(&fs_devices->alloc_list);
+		list_add(&fs_devices->list, &fs_uuids);
+		memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
+		fs_devices->latest_devid = devid;
+		fs_devices->latest_trans = found_transid;
+		device = NULL;
+	} else {
+		device = __find_device(&fs_devices->devices, devid,
+				       disk_super->dev_item.uuid);
+	}
+	if (!device) {
+		if (fs_devices->opened)
+			return -EBUSY;
+
+		device = kzalloc(sizeof(*device), GFP_NOFS);
+		if (!device) {
+			/* we can safely leave the fs_devices entry around */
+			return -ENOMEM;
+		}
+		device->devid = devid;
+		device->work.func = pending_bios_fn;
+		memcpy(device->uuid, disk_super->dev_item.uuid,
+		       BTRFS_UUID_SIZE);
+		device->barriers = 1;
+		spin_lock_init(&device->io_lock);
+		device->name = kstrdup(path, GFP_NOFS);
+		if (!device->name) {
+			kfree(device);
+			return -ENOMEM;
+		}
+		INIT_LIST_HEAD(&device->dev_alloc_list);
+		list_add(&device->dev_list, &fs_devices->devices);
+		device->fs_devices = fs_devices;
+		fs_devices->num_devices++;
+	}
+
+	if (found_transid > fs_devices->latest_trans) {
+		fs_devices->latest_devid = devid;
+		fs_devices->latest_trans = found_transid;
+	}
+	*fs_devices_ret = fs_devices;
+	return 0;
+}
+
+static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
+{
+	struct btrfs_fs_devices *fs_devices;
+	struct btrfs_device *device;
+	struct btrfs_device *orig_dev;
+
+	fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
+	if (!fs_devices)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&fs_devices->devices);
+	INIT_LIST_HEAD(&fs_devices->alloc_list);
+	INIT_LIST_HEAD(&fs_devices->list);
+	fs_devices->latest_devid = orig->latest_devid;
+	fs_devices->latest_trans = orig->latest_trans;
+	memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
+
+	list_for_each_entry(orig_dev, &orig->devices, dev_list) {
+		device = kzalloc(sizeof(*device), GFP_NOFS);
+		if (!device)
+			goto error;
+
+		device->name = kstrdup(orig_dev->name, GFP_NOFS);
+		if (!device->name)
+			goto error;
+
+		device->devid = orig_dev->devid;
+		device->work.func = pending_bios_fn;
+		memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
+		device->barriers = 1;
+		spin_lock_init(&device->io_lock);
+		INIT_LIST_HEAD(&device->dev_list);
+		INIT_LIST_HEAD(&device->dev_alloc_list);
+
+		list_add(&device->dev_list, &fs_devices->devices);
+		device->fs_devices = fs_devices;
+		fs_devices->num_devices++;
+	}
+	return fs_devices;
+error:
+	free_fs_devices(fs_devices);
+	return ERR_PTR(-ENOMEM);
+}
+
+int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
+{
+	struct list_head *tmp;
+	struct list_head *cur;
+	struct btrfs_device *device;
+
+	mutex_lock(&uuid_mutex);
+again:
+	list_for_each_safe(cur, tmp, &fs_devices->devices) {
+		device = list_entry(cur, struct btrfs_device, dev_list);
+		if (device->in_fs_metadata)
+			continue;
+
+		if (device->bdev) {
+			close_bdev_exclusive(device->bdev, device->mode);
+			device->bdev = NULL;
+			fs_devices->open_devices--;
+		}
+		if (device->writeable) {
+			list_del_init(&device->dev_alloc_list);
+			device->writeable = 0;
+			fs_devices->rw_devices--;
+		}
+		list_del_init(&device->dev_list);
+		fs_devices->num_devices--;
+		kfree(device->name);
+		kfree(device);
+	}
+
+	if (fs_devices->seed) {
+		fs_devices = fs_devices->seed;
+		goto again;
+	}
+
+	mutex_unlock(&uuid_mutex);
+	return 0;
+}
+
+static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
+{
+	struct list_head *cur;
+	struct btrfs_device *device;
+
+	if (--fs_devices->opened > 0)
+		return 0;
+
+	list_for_each(cur, &fs_devices->devices) {
+		device = list_entry(cur, struct btrfs_device, dev_list);
+		if (device->bdev) {
+			close_bdev_exclusive(device->bdev, device->mode);
+			fs_devices->open_devices--;
+		}
+		if (device->writeable) {
+			list_del_init(&device->dev_alloc_list);
+			fs_devices->rw_devices--;
+		}
+
+		device->bdev = NULL;
+		device->writeable = 0;
+		device->in_fs_metadata = 0;
+	}
+	WARN_ON(fs_devices->open_devices);
+	WARN_ON(fs_devices->rw_devices);
+	fs_devices->opened = 0;
+	fs_devices->seeding = 0;
+
+	return 0;
+}
+
+int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
+{
+	struct btrfs_fs_devices *seed_devices = NULL;
+	int ret;
+
+	mutex_lock(&uuid_mutex);
+	ret = __btrfs_close_devices(fs_devices);
+	if (!fs_devices->opened) {
+		seed_devices = fs_devices->seed;
+		fs_devices->seed = NULL;
+	}
+	mutex_unlock(&uuid_mutex);
+
+	while (seed_devices) {
+		fs_devices = seed_devices;
+		seed_devices = fs_devices->seed;
+		__btrfs_close_devices(fs_devices);
+		free_fs_devices(fs_devices);
+	}
+	return ret;
+}
+
+static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
+				fmode_t flags, void *holder)
+{
+	struct block_device *bdev;
+	struct list_head *head = &fs_devices->devices;
+	struct list_head *cur;
+	struct btrfs_device *device;
+	struct block_device *latest_bdev = NULL;
+	struct buffer_head *bh;
+	struct btrfs_super_block *disk_super;
+	u64 latest_devid = 0;
+	u64 latest_transid = 0;
+	u64 devid;
+	int seeding = 1;
+	int ret = 0;
+
+	list_for_each(cur, head) {
+		device = list_entry(cur, struct btrfs_device, dev_list);
+		if (device->bdev)
+			continue;
+		if (!device->name)
+			continue;
+
+		bdev = open_bdev_exclusive(device->name, flags, holder);
+		if (IS_ERR(bdev)) {
+			printk(KERN_INFO "open %s failed\n", device->name);
+			goto error;
+		}
+		set_blocksize(bdev, 4096);
+
+		bh = btrfs_read_dev_super(bdev);
+		if (!bh)
+			goto error_close;
+
+		disk_super = (struct btrfs_super_block *)bh->b_data;
+		devid = le64_to_cpu(disk_super->dev_item.devid);
+		if (devid != device->devid)
+			goto error_brelse;
+
+		if (memcmp(device->uuid, disk_super->dev_item.uuid,
+			   BTRFS_UUID_SIZE))
+			goto error_brelse;
+
+		device->generation = btrfs_super_generation(disk_super);
+		if (!latest_transid || device->generation > latest_transid) {
+			latest_devid = devid;
+			latest_transid = device->generation;
+			latest_bdev = bdev;
+		}
+
+		if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
+			device->writeable = 0;
+		} else {
+			device->writeable = !bdev_read_only(bdev);
+			seeding = 0;
+		}
+
+		device->bdev = bdev;
+		device->in_fs_metadata = 0;
+		device->mode = flags;
+
+		fs_devices->open_devices++;
+		if (device->writeable) {
+			fs_devices->rw_devices++;
+			list_add(&device->dev_alloc_list,
+				 &fs_devices->alloc_list);
+		}
+		continue;
+
+error_brelse:
+		brelse(bh);
+error_close:
+		close_bdev_exclusive(bdev, FMODE_READ);
+error:
+		continue;
+	}
+	if (fs_devices->open_devices == 0) {
+		ret = -EIO;
+		goto out;
+	}
+	fs_devices->seeding = seeding;
+	fs_devices->opened = 1;
+	fs_devices->latest_bdev = latest_bdev;
+	fs_devices->latest_devid = latest_devid;
+	fs_devices->latest_trans = latest_transid;
+	fs_devices->total_rw_bytes = 0;
+out:
+	return ret;
+}
+
+int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
+		       fmode_t flags, void *holder)
+{
+	int ret;
+
+	mutex_lock(&uuid_mutex);
+	if (fs_devices->opened) {
+		fs_devices->opened++;
+		ret = 0;
+	} else {
+		ret = __btrfs_open_devices(fs_devices, flags, holder);
+	}
+	mutex_unlock(&uuid_mutex);
+	return ret;
+}
+
+int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
+			  struct btrfs_fs_devices **fs_devices_ret)
+{
+	struct btrfs_super_block *disk_super;
+	struct block_device *bdev;
+	struct buffer_head *bh;
+	int ret;
+	u64 devid;
+	u64 transid;
+
+	mutex_lock(&uuid_mutex);
+
+	bdev = open_bdev_exclusive(path, flags, holder);
+
+	if (IS_ERR(bdev)) {
+		ret = PTR_ERR(bdev);
+		goto error;
+	}
+
+	ret = set_blocksize(bdev, 4096);
+	if (ret)
+		goto error_close;
+	bh = btrfs_read_dev_super(bdev);
+	if (!bh) {
+		ret = -EIO;
+		goto error_close;
+	}
+	disk_super = (struct btrfs_super_block *)bh->b_data;
+	devid = le64_to_cpu(disk_super->dev_item.devid);
+	transid = btrfs_super_generation(disk_super);
+	if (disk_super->label[0])
+		printk(KERN_INFO "device label %s ", disk_super->label);
+	else {
+		/* FIXME, make a readl uuid parser */
+		printk(KERN_INFO "device fsid %llx-%llx ",
+		       *(unsigned long long *)disk_super->fsid,
+		       *(unsigned long long *)(disk_super->fsid + 8));
+	}
+	printk(KERN_INFO "devid %llu transid %llu %s\n",
+	       (unsigned long long)devid, (unsigned long long)transid, path);
+	ret = device_list_add(path, disk_super, devid, fs_devices_ret);
+
+	brelse(bh);
+error_close:
+	close_bdev_exclusive(bdev, flags);
+error:
+	mutex_unlock(&uuid_mutex);
+	return ret;
+}
+
+/*
+ * this uses a pretty simple search, the expectation is that it is
+ * called very infrequently and that a given device has a small number
+ * of extents
+ */
+static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
+					 struct btrfs_device *device,
+					 u64 num_bytes, u64 *start)
+{
+	struct btrfs_key key;
+	struct btrfs_root *root = device->dev_root;
+	struct btrfs_dev_extent *dev_extent = NULL;
+	struct btrfs_path *path;
+	u64 hole_size = 0;
+	u64 last_byte = 0;
+	u64 search_start = 0;
+	u64 search_end = device->total_bytes;
+	int ret;
+	int slot = 0;
+	int start_found;
+	struct extent_buffer *l;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+	path->reada = 2;
+	start_found = 0;
+
+	/* FIXME use last free of some kind */
+
+	/* we don't want to overwrite the superblock on the drive,
+	 * so we make sure to start at an offset of at least 1MB
+	 */
+	search_start = max((u64)1024 * 1024, search_start);
+
+	if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
+		search_start = max(root->fs_info->alloc_start, search_start);
+
+	key.objectid = device->devid;
+	key.offset = search_start;
+	key.type = BTRFS_DEV_EXTENT_KEY;
+	ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto error;
+	ret = btrfs_previous_item(root, path, 0, key.type);
+	if (ret < 0)
+		goto error;
+	l = path->nodes[0];
+	btrfs_item_key_to_cpu(l, &key, path->slots[0]);
+	while (1) {
+		l = path->nodes[0];
+		slot = path->slots[0];
+		if (slot >= btrfs_header_nritems(l)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret == 0)
+				continue;
+			if (ret < 0)
+				goto error;
+no_more_items:
+			if (!start_found) {
+				if (search_start >= search_end) {
+					ret = -ENOSPC;
+					goto error;
+				}
+				*start = search_start;
+				start_found = 1;
+				goto check_pending;
+			}
+			*start = last_byte > search_start ?
+				last_byte : search_start;
+			if (search_end <= *start) {
+				ret = -ENOSPC;
+				goto error;
+			}
+			goto check_pending;
+		}
+		btrfs_item_key_to_cpu(l, &key, slot);
+
+		if (key.objectid < device->devid)
+			goto next;
+
+		if (key.objectid > device->devid)
+			goto no_more_items;
+
+		if (key.offset >= search_start && key.offset > last_byte &&
+		    start_found) {
+			if (last_byte < search_start)
+				last_byte = search_start;
+			hole_size = key.offset - last_byte;
+			if (key.offset > last_byte &&
+			    hole_size >= num_bytes) {
+				*start = last_byte;
+				goto check_pending;
+			}
+		}
+		if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
+			goto next;
+
+		start_found = 1;
+		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
+		last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
+next:
+		path->slots[0]++;
+		cond_resched();
+	}
+check_pending:
+	/* we have to make sure we didn't find an extent that has already
+	 * been allocated by the map tree or the original allocation
+	 */
+	BUG_ON(*start < search_start);
+
+	if (*start + num_bytes > search_end) {
+		ret = -ENOSPC;
+		goto error;
+	}
+	/* check for pending inserts here */
+	ret = 0;
+
+error:
+	btrfs_free_path(path);
+	return ret;
+}
+
+static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
+			  struct btrfs_device *device,
+			  u64 start)
+{
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_root *root = device->dev_root;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	struct extent_buffer *leaf = NULL;
+	struct btrfs_dev_extent *extent = NULL;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	key.objectid = device->devid;
+	key.offset = start;
+	key.type = BTRFS_DEV_EXTENT_KEY;
+
+	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+	if (ret > 0) {
+		ret = btrfs_previous_item(root, path, key.objectid,
+					  BTRFS_DEV_EXTENT_KEY);
+		BUG_ON(ret);
+		leaf = path->nodes[0];
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+		extent = btrfs_item_ptr(leaf, path->slots[0],
+					struct btrfs_dev_extent);
+		BUG_ON(found_key.offset > start || found_key.offset +
+		       btrfs_dev_extent_length(leaf, extent) < start);
+		ret = 0;
+	} else if (ret == 0) {
+		leaf = path->nodes[0];
+		extent = btrfs_item_ptr(leaf, path->slots[0],
+					struct btrfs_dev_extent);
+	}
+	BUG_ON(ret);
+
+	if (device->bytes_used > 0)
+		device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
+	ret = btrfs_del_item(trans, root, path);
+	BUG_ON(ret);
+
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
+			   struct btrfs_device *device,
+			   u64 chunk_tree, u64 chunk_objectid,
+			   u64 chunk_offset, u64 start, u64 num_bytes)
+{
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_root *root = device->dev_root;
+	struct btrfs_dev_extent *extent;
+	struct extent_buffer *leaf;
+	struct btrfs_key key;
+
+	WARN_ON(!device->in_fs_metadata);
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	key.objectid = device->devid;
+	key.offset = start;
+	key.type = BTRFS_DEV_EXTENT_KEY;
+	ret = btrfs_insert_empty_item(trans, root, path, &key,
+				      sizeof(*extent));
+	BUG_ON(ret);
+
+	leaf = path->nodes[0];
+	extent = btrfs_item_ptr(leaf, path->slots[0],
+				struct btrfs_dev_extent);
+	btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
+	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
+	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
+
+	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
+		    (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
+		    BTRFS_UUID_SIZE);
+
+	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
+	btrfs_mark_buffer_dirty(leaf);
+	btrfs_free_path(path);
+	return ret;
+}
+
+static noinline int find_next_chunk(struct btrfs_root *root,
+				    u64 objectid, u64 *offset)
+{
+	struct btrfs_path *path;
+	int ret;
+	struct btrfs_key key;
+	struct btrfs_chunk *chunk;
+	struct btrfs_key found_key;
+
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	key.objectid = objectid;
+	key.offset = (u64)-1;
+	key.type = BTRFS_CHUNK_ITEM_KEY;
+
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto error;
+
+	BUG_ON(ret == 0);
+
+	ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
+	if (ret) {
+		*offset = 0;
+	} else {
+		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+				      path->slots[0]);
+		if (found_key.objectid != objectid)
+			*offset = 0;
+		else {
+			chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
+					       struct btrfs_chunk);
+			*offset = found_key.offset +
+				btrfs_chunk_length(path->nodes[0], chunk);
+		}
+	}
+	ret = 0;
+error:
+	btrfs_free_path(path);
+	return ret;
+}
+
+static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
+{
+	int ret;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	struct btrfs_path *path;
+
+	root = root->fs_info->chunk_root;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
+	key.type = BTRFS_DEV_ITEM_KEY;
+	key.offset = (u64)-1;
+
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto error;
+
+	BUG_ON(ret == 0);
+
+	ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
+				  BTRFS_DEV_ITEM_KEY);
+	if (ret) {
+		*objectid = 1;
+	} else {
+		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+				      path->slots[0]);
+		*objectid = found_key.offset + 1;
+	}
+	ret = 0;
+error:
+	btrfs_free_path(path);
+	return ret;
+}
+
+/*
+ * the device information is stored in the chunk root
+ * the btrfs_device struct should be fully filled in
+ */
+int btrfs_add_device(struct btrfs_trans_handle *trans,
+		     struct btrfs_root *root,
+		     struct btrfs_device *device)
+{
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_dev_item *dev_item;
+	struct extent_buffer *leaf;
+	struct btrfs_key key;
+	unsigned long ptr;
+
+	root = root->fs_info->chunk_root;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
+	key.type = BTRFS_DEV_ITEM_KEY;
+	key.offset = device->devid;
+
+	ret = btrfs_insert_empty_item(trans, root, path, &key,
+				      sizeof(*dev_item));
+	if (ret)
+		goto out;
+
+	leaf = path->nodes[0];
+	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
+
+	btrfs_set_device_id(leaf, dev_item, device->devid);
+	btrfs_set_device_generation(leaf, dev_item, 0);
+	btrfs_set_device_type(leaf, dev_item, device->type);
+	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
+	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
+	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
+	btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
+	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
+	btrfs_set_device_group(leaf, dev_item, 0);
+	btrfs_set_device_seek_speed(leaf, dev_item, 0);
+	btrfs_set_device_bandwidth(leaf, dev_item, 0);
+	btrfs_set_device_start_offset(leaf, dev_item, 0);
+
+	ptr = (unsigned long)btrfs_device_uuid(dev_item);
+	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
+	ptr = (unsigned long)btrfs_device_fsid(dev_item);
+	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
+	btrfs_mark_buffer_dirty(leaf);
+
+	ret = 0;
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+static int btrfs_rm_dev_item(struct btrfs_root *root,
+			     struct btrfs_device *device)
+{
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct btrfs_trans_handle *trans;
+
+	root = root->fs_info->chunk_root;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	trans = btrfs_start_transaction(root, 1);
+	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
+	key.type = BTRFS_DEV_ITEM_KEY;
+	key.offset = device->devid;
+	lock_chunks(root);
+
+	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+	if (ret < 0)
+		goto out;
+
+	if (ret > 0) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	ret = btrfs_del_item(trans, root, path);
+	if (ret)
+		goto out;
+out:
+	btrfs_free_path(path);
+	unlock_chunks(root);
+	btrfs_commit_transaction(trans, root);
+	return ret;
+}
+
+int btrfs_rm_device(struct btrfs_root *root, char *device_path)
+{
+	struct btrfs_device *device;
+	struct btrfs_device *next_device;
+	struct block_device *bdev;
+	struct buffer_head *bh = NULL;
+	struct btrfs_super_block *disk_super;
+	u64 all_avail;
+	u64 devid;
+	u64 num_devices;
+	u8 *dev_uuid;
+	int ret = 0;
+
+	mutex_lock(&uuid_mutex);
+	mutex_lock(&root->fs_info->volume_mutex);
+
+	all_avail = root->fs_info->avail_data_alloc_bits |
+		root->fs_info->avail_system_alloc_bits |
+		root->fs_info->avail_metadata_alloc_bits;
+
+	if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
+	    root->fs_info->fs_devices->rw_devices <= 4) {
+		printk(KERN_ERR "btrfs: unable to go below four devices "
+		       "on raid10\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
+	    root->fs_info->fs_devices->rw_devices <= 2) {
+		printk(KERN_ERR "btrfs: unable to go below two "
+		       "devices on raid1\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (strcmp(device_path, "missing") == 0) {
+		struct list_head *cur;
+		struct list_head *devices;
+		struct btrfs_device *tmp;
+
+		device = NULL;
+		devices = &root->fs_info->fs_devices->devices;
+		list_for_each(cur, devices) {
+			tmp = list_entry(cur, struct btrfs_device, dev_list);
+			if (tmp->in_fs_metadata && !tmp->bdev) {
+				device = tmp;
+				break;
+			}
+		}
+		bdev = NULL;
+		bh = NULL;
+		disk_super = NULL;
+		if (!device) {
+			printk(KERN_ERR "btrfs: no missing devices found to "
+			       "remove\n");
+			goto out;
+		}
+	} else {
+		bdev = open_bdev_exclusive(device_path, FMODE_READ,
+				      root->fs_info->bdev_holder);
+		if (IS_ERR(bdev)) {
+			ret = PTR_ERR(bdev);
+			goto out;
+		}
+
+		set_blocksize(bdev, 4096);
+		bh = btrfs_read_dev_super(bdev);
+		if (!bh) {
+			ret = -EIO;
+			goto error_close;
+		}
+		disk_super = (struct btrfs_super_block *)bh->b_data;
+		devid = le64_to_cpu(disk_super->dev_item.devid);
+		dev_uuid = disk_super->dev_item.uuid;
+		device = btrfs_find_device(root, devid, dev_uuid,
+					   disk_super->fsid);
+		if (!device) {
+			ret = -ENOENT;
+			goto error_brelse;
+		}
+	}
+
+	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
+		printk(KERN_ERR "btrfs: unable to remove the only writeable "
+		       "device\n");
+		ret = -EINVAL;
+		goto error_brelse;
+	}
+
+	if (device->writeable) {
+		list_del_init(&device->dev_alloc_list);
+		root->fs_info->fs_devices->rw_devices--;
+	}
+
+	ret = btrfs_shrink_device(device, 0);
+	if (ret)
+		goto error_brelse;
+
+	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
+	if (ret)
+		goto error_brelse;
+
+	device->in_fs_metadata = 0;
+	list_del_init(&device->dev_list);
+	device->fs_devices->num_devices--;
+
+	next_device = list_entry(root->fs_info->fs_devices->devices.next,
+				 struct btrfs_device, dev_list);
+	if (device->bdev == root->fs_info->sb->s_bdev)
+		root->fs_info->sb->s_bdev = next_device->bdev;
+	if (device->bdev == root->fs_info->fs_devices->latest_bdev)
+		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
+
+	if (device->bdev) {
+		close_bdev_exclusive(device->bdev, device->mode);
+		device->bdev = NULL;
+		device->fs_devices->open_devices--;
+	}
+
+	num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
+	btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
+
+	if (device->fs_devices->open_devices == 0) {
+		struct btrfs_fs_devices *fs_devices;
+		fs_devices = root->fs_info->fs_devices;
+		while (fs_devices) {
+			if (fs_devices->seed == device->fs_devices)
+				break;
+			fs_devices = fs_devices->seed;
+		}
+		fs_devices->seed = device->fs_devices->seed;
+		device->fs_devices->seed = NULL;
+		__btrfs_close_devices(device->fs_devices);
+		free_fs_devices(device->fs_devices);
+	}
+
+	/*
+	 * at this point, the device is zero sized.  We want to
+	 * remove it from the devices list and zero out the old super
+	 */
+	if (device->writeable) {
+		/* make sure this device isn't detected as part of
+		 * the FS anymore
+		 */
+		memset(&disk_super->magic, 0, sizeof(disk_super->magic));
+		set_buffer_dirty(bh);
+		sync_dirty_buffer(bh);
+	}
+
+	kfree(device->name);
+	kfree(device);
+	ret = 0;
+
+error_brelse:
+	brelse(bh);
+error_close:
+	if (bdev)
+		close_bdev_exclusive(bdev, FMODE_READ);
+out:
+	mutex_unlock(&root->fs_info->volume_mutex);
+	mutex_unlock(&uuid_mutex);
+	return ret;
+}
+
+/*
+ * does all the dirty work required for changing file system's UUID.
+ */
+static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
+				struct btrfs_root *root)
+{
+	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+	struct btrfs_fs_devices *old_devices;
+	struct btrfs_fs_devices *seed_devices;
+	struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
+	struct btrfs_device *device;
+	u64 super_flags;
+
+	BUG_ON(!mutex_is_locked(&uuid_mutex));
+	if (!fs_devices->seeding)
+		return -EINVAL;
+
+	seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
+	if (!seed_devices)
+		return -ENOMEM;
+
+	old_devices = clone_fs_devices(fs_devices);
+	if (IS_ERR(old_devices)) {
+		kfree(seed_devices);
+		return PTR_ERR(old_devices);
+	}
+
+	list_add(&old_devices->list, &fs_uuids);
+
+	memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
+	seed_devices->opened = 1;
+	INIT_LIST_HEAD(&seed_devices->devices);
+	INIT_LIST_HEAD(&seed_devices->alloc_list);
+	list_splice_init(&fs_devices->devices, &seed_devices->devices);
+	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
+	list_for_each_entry(device, &seed_devices->devices, dev_list) {
+		device->fs_devices = seed_devices;
+	}
+
+	fs_devices->seeding = 0;
+	fs_devices->num_devices = 0;
+	fs_devices->open_devices = 0;
+	fs_devices->seed = seed_devices;
+
+	generate_random_uuid(fs_devices->fsid);
+	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
+	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
+	super_flags = btrfs_super_flags(disk_super) &
+		      ~BTRFS_SUPER_FLAG_SEEDING;
+	btrfs_set_super_flags(disk_super, super_flags);
+
+	return 0;
+}
+
+/*
+ * strore the expected generation for seed devices in device items.
+ */
+static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root)
+{
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	struct btrfs_dev_item *dev_item;
+	struct btrfs_device *device;
+	struct btrfs_key key;
+	u8 fs_uuid[BTRFS_UUID_SIZE];
+	u8 dev_uuid[BTRFS_UUID_SIZE];
+	u64 devid;
+	int ret;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	root = root->fs_info->chunk_root;
+	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
+	key.offset = 0;
+	key.type = BTRFS_DEV_ITEM_KEY;
+
+	while (1) {
+		ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+		if (ret < 0)
+			goto error;
+
+		leaf = path->nodes[0];
+next_slot:
+		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret > 0)
+				break;
+			if (ret < 0)
+				goto error;
+			leaf = path->nodes[0];
+			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+			btrfs_release_path(root, path);
+			continue;
+		}
+
+		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+		if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
+		    key.type != BTRFS_DEV_ITEM_KEY)
+			break;
+
+		dev_item = btrfs_item_ptr(leaf, path->slots[0],
+					  struct btrfs_dev_item);
+		devid = btrfs_device_id(leaf, dev_item);
+		read_extent_buffer(leaf, dev_uuid,
+				   (unsigned long)btrfs_device_uuid(dev_item),
+				   BTRFS_UUID_SIZE);
+		read_extent_buffer(leaf, fs_uuid,
+				   (unsigned long)btrfs_device_fsid(dev_item),
+				   BTRFS_UUID_SIZE);
+		device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
+		BUG_ON(!device);
+
+		if (device->fs_devices->seeding) {
+			btrfs_set_device_generation(leaf, dev_item,
+						    device->generation);
+			btrfs_mark_buffer_dirty(leaf);
+		}
+
+		path->slots[0]++;
+		goto next_slot;
+	}
+	ret = 0;
+error:
+	btrfs_free_path(path);
+	return ret;
+}
+
+int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_device *device;
+	struct block_device *bdev;
+	struct list_head *cur;
+	struct list_head *devices;
+	struct super_block *sb = root->fs_info->sb;
+	u64 total_bytes;
+	int seeding_dev = 0;
+	int ret = 0;
+
+	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
+		return -EINVAL;
+
+	bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
+	if (!bdev)
+		return -EIO;
+
+	if (root->fs_info->fs_devices->seeding) {
+		seeding_dev = 1;
+		down_write(&sb->s_umount);
+		mutex_lock(&uuid_mutex);
+	}
+
+	filemap_write_and_wait(bdev->bd_inode->i_mapping);
+	mutex_lock(&root->fs_info->volume_mutex);
+
+	devices = &root->fs_info->fs_devices->devices;
+	list_for_each(cur, devices) {
+		device = list_entry(cur, struct btrfs_device, dev_list);
+		if (device->bdev == bdev) {
+			ret = -EEXIST;
+			goto error;
+		}
+	}
+
+	device = kzalloc(sizeof(*device), GFP_NOFS);
+	if (!device) {
+		/* we can safely leave the fs_devices entry around */
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	device->name = kstrdup(device_path, GFP_NOFS);
+	if (!device->name) {
+		kfree(device);
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ret = find_next_devid(root, &device->devid);
+	if (ret) {
+		kfree(device);
+		goto error;
+	}
+
+	trans = btrfs_start_transaction(root, 1);
+	lock_chunks(root);
+
+	device->barriers = 1;
+	device->writeable = 1;
+	device->work.func = pending_bios_fn;
+	generate_random_uuid(device->uuid);
+	spin_lock_init(&device->io_lock);
+	device->generation = trans->transid;
+	device->io_width = root->sectorsize;
+	device->io_align = root->sectorsize;
+	device->sector_size = root->sectorsize;
+	device->total_bytes = i_size_read(bdev->bd_inode);
+	device->dev_root = root->fs_info->dev_root;
+	device->bdev = bdev;
+	device->in_fs_metadata = 1;
+	device->mode = 0;
+	set_blocksize(device->bdev, 4096);
+
+	if (seeding_dev) {
+		sb->s_flags &= ~MS_RDONLY;
+		ret = btrfs_prepare_sprout(trans, root);
+		BUG_ON(ret);
+	}
+
+	device->fs_devices = root->fs_info->fs_devices;
+	list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
+	list_add(&device->dev_alloc_list,
+		 &root->fs_info->fs_devices->alloc_list);
+	root->fs_info->fs_devices->num_devices++;
+	root->fs_info->fs_devices->open_devices++;
+	root->fs_info->fs_devices->rw_devices++;
+	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
+
+	total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
+	btrfs_set_super_total_bytes(&root->fs_info->super_copy,
+				    total_bytes + device->total_bytes);
+
+	total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
+	btrfs_set_super_num_devices(&root->fs_info->super_copy,
+				    total_bytes + 1);
+
+	if (seeding_dev) {
+		ret = init_first_rw_device(trans, root, device);
+		BUG_ON(ret);
+		ret = btrfs_finish_sprout(trans, root);
+		BUG_ON(ret);
+	} else {
+		ret = btrfs_add_device(trans, root, device);
+	}
+
+	unlock_chunks(root);
+	btrfs_commit_transaction(trans, root);
+
+	if (seeding_dev) {
+		mutex_unlock(&uuid_mutex);
+		up_write(&sb->s_umount);
+
+		ret = btrfs_relocate_sys_chunks(root);
+		BUG_ON(ret);
+	}
+out:
+	mutex_unlock(&root->fs_info->volume_mutex);
+	return ret;
+error:
+	close_bdev_exclusive(bdev, 0);
+	if (seeding_dev) {
+		mutex_unlock(&uuid_mutex);
+		up_write(&sb->s_umount);
+	}
+	goto out;
+}
+
+static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
+					struct btrfs_device *device)
+{
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_root *root;
+	struct btrfs_dev_item *dev_item;
+	struct extent_buffer *leaf;
+	struct btrfs_key key;
+
+	root = device->dev_root->fs_info->chunk_root;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
+	key.type = BTRFS_DEV_ITEM_KEY;
+	key.offset = device->devid;
+
+	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+	if (ret < 0)
+		goto out;
+
+	if (ret > 0) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	leaf = path->nodes[0];
+	dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
+
+	btrfs_set_device_id(leaf, dev_item, device->devid);
+	btrfs_set_device_type(leaf, dev_item, device->type);
+	btrfs_set_device_io_align(leaf, dev_item, device->io_align);
+	btrfs_set_device_io_width(leaf, dev_item, device->io_width);
+	btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
+	btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
+	btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
+	btrfs_mark_buffer_dirty(leaf);
+
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
+		      struct btrfs_device *device, u64 new_size)
+{
+	struct btrfs_super_block *super_copy =
+		&device->dev_root->fs_info->super_copy;
+	u64 old_total = btrfs_super_total_bytes(super_copy);
+	u64 diff = new_size - device->total_bytes;
+
+	if (!device->writeable)
+		return -EACCES;
+	if (new_size <= device->total_bytes)
+		return -EINVAL;
+
+	btrfs_set_super_total_bytes(super_copy, old_total + diff);
+	device->fs_devices->total_rw_bytes += diff;
+
+	device->total_bytes = new_size;
+	return btrfs_update_device(trans, device);
+}
+
+int btrfs_grow_device(struct btrfs_trans_handle *trans,
+		      struct btrfs_device *device, u64 new_size)
+{
+	int ret;
+	lock_chunks(device->dev_root);
+	ret = __btrfs_grow_device(trans, device, new_size);
+	unlock_chunks(device->dev_root);
+	return ret;
+}
+
+static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root,
+			    u64 chunk_tree, u64 chunk_objectid,
+			    u64 chunk_offset)
+{
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_key key;
+
+	root = root->fs_info->chunk_root;
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	key.objectid = chunk_objectid;
+	key.offset = chunk_offset;
+	key.type = BTRFS_CHUNK_ITEM_KEY;
+
+	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+	BUG_ON(ret);
+
+	ret = btrfs_del_item(trans, root, path);
+	BUG_ON(ret);
+
+	btrfs_free_path(path);
+	return 0;
+}
+
+static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
+			chunk_offset)
+{
+	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
+	struct btrfs_disk_key *disk_key;
+	struct btrfs_chunk *chunk;
+	u8 *ptr;
+	int ret = 0;
+	u32 num_stripes;
+	u32 array_size;
+	u32 len = 0;
+	u32 cur;
+	struct btrfs_key key;
+
+	array_size = btrfs_super_sys_array_size(super_copy);
+
+	ptr = super_copy->sys_chunk_array;
+	cur = 0;
+
+	while (cur < array_size) {
+		disk_key = (struct btrfs_disk_key *)ptr;
+		btrfs_disk_key_to_cpu(&key, disk_key);
+
+		len = sizeof(*disk_key);
+
+		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
+			chunk = (struct btrfs_chunk *)(ptr + len);
+			num_stripes = btrfs_stack_chunk_num_stripes(chunk);
+			len += btrfs_chunk_item_size(num_stripes);
+		} else {
+			ret = -EIO;
+			break;
+		}
+		if (key.objectid == chunk_objectid &&
+		    key.offset == chunk_offset) {
+			memmove(ptr, ptr + len, array_size - (cur + len));
+			array_size -= len;
+			btrfs_set_super_sys_array_size(super_copy, array_size);
+		} else {
+			ptr += len;
+			cur += len;
+		}
+	}
+	return ret;
+}
+
+static int btrfs_relocate_chunk(struct btrfs_root *root,
+			 u64 chunk_tree, u64 chunk_objectid,
+			 u64 chunk_offset)
+{
+	struct extent_map_tree *em_tree;
+	struct btrfs_root *extent_root;
+	struct btrfs_trans_handle *trans;
+	struct extent_map *em;
+	struct map_lookup *map;
+	int ret;
+	int i;
+
+	printk(KERN_INFO "btrfs relocating chunk %llu\n",
+	       (unsigned long long)chunk_offset);
+	root = root->fs_info->chunk_root;
+	extent_root = root->fs_info->extent_root;
+	em_tree = &root->fs_info->mapping_tree.map_tree;
+
+	/* step one, relocate all the extents inside this chunk */
+	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
+	BUG_ON(ret);
+
+	trans = btrfs_start_transaction(root, 1);
+	BUG_ON(!trans);
+
+	lock_chunks(root);
+
+	/*
+	 * step two, delete the device extents and the
+	 * chunk tree entries
+	 */
+	spin_lock(&em_tree->lock);
+	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
+	spin_unlock(&em_tree->lock);
+
+	BUG_ON(em->start > chunk_offset ||
+	       em->start + em->len < chunk_offset);
+	map = (struct map_lookup *)em->bdev;
+
+	for (i = 0; i < map->num_stripes; i++) {
+		ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
+					    map->stripes[i].physical);
+		BUG_ON(ret);
+
+		if (map->stripes[i].dev) {
+			ret = btrfs_update_device(trans, map->stripes[i].dev);
+			BUG_ON(ret);
+		}
+	}
+	ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
+			       chunk_offset);
+
+	BUG_ON(ret);
+
+	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
+		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
+		BUG_ON(ret);
+	}
+
+	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
+	BUG_ON(ret);
+
+	spin_lock(&em_tree->lock);
+	remove_extent_mapping(em_tree, em);
+	spin_unlock(&em_tree->lock);
+
+	kfree(map);
+	em->bdev = NULL;
+
+	/* once for the tree */
+	free_extent_map(em);
+	/* once for us */
+	free_extent_map(em);
+
+	unlock_chunks(root);
+	btrfs_end_transaction(trans, root);
+	return 0;
+}
+
+static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
+{
+	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	struct btrfs_chunk *chunk;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	u64 chunk_tree = chunk_root->root_key.objectid;
+	u64 chunk_type;
+	int ret;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
+	key.offset = (u64)-1;
+	key.type = BTRFS_CHUNK_ITEM_KEY;
+
+	while (1) {
+		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
+		if (ret < 0)
+			goto error;
+		BUG_ON(ret == 0);
+
+		ret = btrfs_previous_item(chunk_root, path, key.objectid,
+					  key.type);
+		if (ret < 0)
+			goto error;
+		if (ret > 0)
+			break;
+
+		leaf = path->nodes[0];
+		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+		chunk = btrfs_item_ptr(leaf, path->slots[0],
+				       struct btrfs_chunk);
+		chunk_type = btrfs_chunk_type(leaf, chunk);
+		btrfs_release_path(chunk_root, path);
+
+		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
+			ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
+						   found_key.objectid,
+						   found_key.offset);
+			BUG_ON(ret);
+		}
+
+		if (found_key.offset == 0)
+			break;
+		key.offset = found_key.offset - 1;
+	}
+	ret = 0;
+error:
+	btrfs_free_path(path);
+	return ret;
+}
+
+static u64 div_factor(u64 num, int factor)
+{
+	if (factor == 10)
+		return num;
+	num *= factor;
+	do_div(num, 10);
+	return num;
+}
+
+int btrfs_balance(struct btrfs_root *dev_root)
+{
+	int ret;
+	struct list_head *cur;
+	struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
+	struct btrfs_device *device;
+	u64 old_size;
+	u64 size_to_free;
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct btrfs_chunk *chunk;
+	struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_key found_key;
+
+	if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
+		return -EROFS;
+
+	mutex_lock(&dev_root->fs_info->volume_mutex);
+	dev_root = dev_root->fs_info->dev_root;
+
+	/* step one make some room on all the devices */
+	list_for_each(cur, devices) {
+		device = list_entry(cur, struct btrfs_device, dev_list);
+		old_size = device->total_bytes;
+		size_to_free = div_factor(old_size, 1);
+		size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
+		if (!device->writeable ||
+		    device->total_bytes - device->bytes_used > size_to_free)
+			continue;
+
+		ret = btrfs_shrink_device(device, old_size - size_to_free);
+		BUG_ON(ret);
+
+		trans = btrfs_start_transaction(dev_root, 1);
+		BUG_ON(!trans);
+
+		ret = btrfs_grow_device(trans, device, old_size);
+		BUG_ON(ret);
+
+		btrfs_end_transaction(trans, dev_root);
+	}
+
+	/* step two, relocate all the chunks */
+	path = btrfs_alloc_path();
+	BUG_ON(!path);
+
+	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
+	key.offset = (u64)-1;
+	key.type = BTRFS_CHUNK_ITEM_KEY;
+
+	while (1) {
+		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
+		if (ret < 0)
+			goto error;
+
+		/*
+		 * this shouldn't happen, it means the last relocate
+		 * failed
+		 */
+		if (ret == 0)
+			break;
+
+		ret = btrfs_previous_item(chunk_root, path, 0,
+					  BTRFS_CHUNK_ITEM_KEY);
+		if (ret)
+			break;
+
+		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+				      path->slots[0]);
+		if (found_key.objectid != key.objectid)
+			break;
+
+		chunk = btrfs_item_ptr(path->nodes[0],
+				       path->slots[0],
+				       struct btrfs_chunk);
+		key.offset = found_key.offset;
+		/* chunk zero is special */
+		if (key.offset == 0)
+			break;
+
+		btrfs_release_path(chunk_root, path);
+		ret = btrfs_relocate_chunk(chunk_root,
+					   chunk_root->root_key.objectid,
+					   found_key.objectid,
+					   found_key.offset);
+		BUG_ON(ret);
+	}
+	ret = 0;
+error:
+	btrfs_free_path(path);
+	mutex_unlock(&dev_root->fs_info->volume_mutex);
+	return ret;
+}
+
+/*
+ * shrinking a device means finding all of the device extents past
+ * the new size, and then following the back refs to the chunks.
+ * The chunk relocation code actually frees the device extent
+ */
+int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
+{
+	struct btrfs_trans_handle *trans;
+	struct btrfs_root *root = device->dev_root;
+	struct btrfs_dev_extent *dev_extent = NULL;
+	struct btrfs_path *path;
+	u64 length;
+	u64 chunk_tree;
+	u64 chunk_objectid;
+	u64 chunk_offset;
+	int ret;
+	int slot;
+	struct extent_buffer *l;
+	struct btrfs_key key;
+	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
+	u64 old_total = btrfs_super_total_bytes(super_copy);
+	u64 diff = device->total_bytes - new_size;
+
+	if (new_size >= device->total_bytes)
+		return -EINVAL;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	trans = btrfs_start_transaction(root, 1);
+	if (!trans) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	path->reada = 2;
+
+	lock_chunks(root);
+
+	device->total_bytes = new_size;
+	if (device->writeable)
+		device->fs_devices->total_rw_bytes -= diff;
+	ret = btrfs_update_device(trans, device);
+	if (ret) {
+		unlock_chunks(root);
+		btrfs_end_transaction(trans, root);
+		goto done;
+	}
+	WARN_ON(diff > old_total);
+	btrfs_set_super_total_bytes(super_copy, old_total - diff);
+	unlock_chunks(root);
+	btrfs_end_transaction(trans, root);
+
+	key.objectid = device->devid;
+	key.offset = (u64)-1;
+	key.type = BTRFS_DEV_EXTENT_KEY;
+
+	while (1) {
+		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+		if (ret < 0)
+			goto done;
+
+		ret = btrfs_previous_item(root, path, 0, key.type);
+		if (ret < 0)
+			goto done;
+		if (ret) {
+			ret = 0;
+			goto done;
+		}
+
+		l = path->nodes[0];
+		slot = path->slots[0];
+		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
+
+		if (key.objectid != device->devid)
+			goto done;
+
+		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
+		length = btrfs_dev_extent_length(l, dev_extent);
+
+		if (key.offset + length <= new_size)
+			goto done;
+
+		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
+		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
+		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
+		btrfs_release_path(root, path);
+
+		ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
+					   chunk_offset);
+		if (ret)
+			goto done;
+	}
+
+done:
+	btrfs_free_path(path);
+	return ret;
+}
+
+static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
+			   struct btrfs_root *root,
+			   struct btrfs_key *key,
+			   struct btrfs_chunk *chunk, int item_size)
+{
+	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
+	struct btrfs_disk_key disk_key;
+	u32 array_size;
+	u8 *ptr;
+
+	array_size = btrfs_super_sys_array_size(super_copy);
+	if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
+		return -EFBIG;
+
+	ptr = super_copy->sys_chunk_array + array_size;
+	btrfs_cpu_key_to_disk(&disk_key, key);
+	memcpy(ptr, &disk_key, sizeof(disk_key));
+	ptr += sizeof(disk_key);
+	memcpy(ptr, chunk, item_size);
+	item_size += sizeof(disk_key);
+	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
+	return 0;
+}
+
+static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
+					int num_stripes, int sub_stripes)
+{
+	if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
+		return calc_size;
+	else if (type & BTRFS_BLOCK_GROUP_RAID10)
+		return calc_size * (num_stripes / sub_stripes);
+	else
+		return calc_size * num_stripes;
+}
+
+static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *extent_root,
+			       struct map_lookup **map_ret,
+			       u64 *num_bytes, u64 *stripe_size,
+			       u64 start, u64 type)
+{
+	struct btrfs_fs_info *info = extent_root->fs_info;
+	struct btrfs_device *device = NULL;
+	struct btrfs_fs_devices *fs_devices = info->fs_devices;
+	struct list_head *cur;
+	struct map_lookup *map = NULL;
+	struct extent_map_tree *em_tree;
+	struct extent_map *em;
+	struct list_head private_devs;
+	int min_stripe_size = 1 * 1024 * 1024;
+	u64 calc_size = 1024 * 1024 * 1024;
+	u64 max_chunk_size = calc_size;
+	u64 min_free;
+	u64 avail;
+	u64 max_avail = 0;
+	u64 dev_offset;
+	int num_stripes = 1;
+	int min_stripes = 1;
+	int sub_stripes = 0;
+	int looped = 0;
+	int ret;
+	int index;
+	int stripe_len = 64 * 1024;
+
+	if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
+	    (type & BTRFS_BLOCK_GROUP_DUP)) {
+		WARN_ON(1);
+		type &= ~BTRFS_BLOCK_GROUP_DUP;
+	}
+	if (list_empty(&fs_devices->alloc_list))
+		return -ENOSPC;
+
+	if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
+		num_stripes = fs_devices->rw_devices;
+		min_stripes = 2;
+	}
+	if (type & (BTRFS_BLOCK_GROUP_DUP)) {
+		num_stripes = 2;
+		min_stripes = 2;
+	}
+	if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
+		num_stripes = min_t(u64, 2, fs_devices->rw_devices);
+		if (num_stripes < 2)
+			return -ENOSPC;
+		min_stripes = 2;
+	}
+	if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
+		num_stripes = fs_devices->rw_devices;
+		if (num_stripes < 4)
+			return -ENOSPC;
+		num_stripes &= ~(u32)1;
+		sub_stripes = 2;
+		min_stripes = 4;
+	}
+
+	if (type & BTRFS_BLOCK_GROUP_DATA) {
+		max_chunk_size = 10 * calc_size;
+		min_stripe_size = 64 * 1024 * 1024;
+	} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
+		max_chunk_size = 4 * calc_size;
+		min_stripe_size = 32 * 1024 * 1024;
+	} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
+		calc_size = 8 * 1024 * 1024;
+		max_chunk_size = calc_size * 2;
+		min_stripe_size = 1 * 1024 * 1024;
+	}
+
+	/* we don't want a chunk larger than 10% of writeable space */
+	max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
+			     max_chunk_size);
+
+again:
+	if (!map || map->num_stripes != num_stripes) {
+		kfree(map);
+		map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
+		if (!map)
+			return -ENOMEM;
+		map->num_stripes = num_stripes;
+	}
+
+	if (calc_size * num_stripes > max_chunk_size) {
+		calc_size = max_chunk_size;
+		do_div(calc_size, num_stripes);
+		do_div(calc_size, stripe_len);
+		calc_size *= stripe_len;
+	}
+	/* we don't want tiny stripes */
+	calc_size = max_t(u64, min_stripe_size, calc_size);
+
+	do_div(calc_size, stripe_len);
+	calc_size *= stripe_len;
+
+	cur = fs_devices->alloc_list.next;
+	index = 0;
+
+	if (type & BTRFS_BLOCK_GROUP_DUP)
+		min_free = calc_size * 2;
+	else
+		min_free = calc_size;
+
+	/*
+	 * we add 1MB because we never use the first 1MB of the device, unless
+	 * we've looped, then we are likely allocating the maximum amount of
+	 * space left already
+	 */
+	if (!looped)
+		min_free += 1024 * 1024;
+
+	INIT_LIST_HEAD(&private_devs);
+	while (index < num_stripes) {
+		device = list_entry(cur, struct btrfs_device, dev_alloc_list);
+		BUG_ON(!device->writeable);
+		if (device->total_bytes > device->bytes_used)
+			avail = device->total_bytes - device->bytes_used;
+		else
+			avail = 0;
+		cur = cur->next;
+
+		if (device->in_fs_metadata && avail >= min_free) {
+			ret = find_free_dev_extent(trans, device,
+						   min_free, &dev_offset);
+			if (ret == 0) {
+				list_move_tail(&device->dev_alloc_list,
+					       &private_devs);
+				map->stripes[index].dev = device;
+				map->stripes[index].physical = dev_offset;
+				index++;
+				if (type & BTRFS_BLOCK_GROUP_DUP) {
+					map->stripes[index].dev = device;
+					map->stripes[index].physical =
+						dev_offset + calc_size;
+					index++;
+				}
+			}
+		} else if (device->in_fs_metadata && avail > max_avail)
+			max_avail = avail;
+		if (cur == &fs_devices->alloc_list)
+			break;
+	}
+	list_splice(&private_devs, &fs_devices->alloc_list);
+	if (index < num_stripes) {
+		if (index >= min_stripes) {
+			num_stripes = index;
+			if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
+				num_stripes /= sub_stripes;
+				num_stripes *= sub_stripes;
+			}
+			looped = 1;
+			goto again;
+		}
+		if (!looped && max_avail > 0) {
+			looped = 1;
+			calc_size = max_avail;
+			goto again;
+		}
+		kfree(map);
+		return -ENOSPC;
+	}
+	map->sector_size = extent_root->sectorsize;
+	map->stripe_len = stripe_len;
+	map->io_align = stripe_len;
+	map->io_width = stripe_len;
+	map->type = type;
+	map->num_stripes = num_stripes;
+	map->sub_stripes = sub_stripes;
+
+	*map_ret = map;
+	*stripe_size = calc_size;
+	*num_bytes = chunk_bytes_by_type(type, calc_size,
+					 num_stripes, sub_stripes);
+
+	em = alloc_extent_map(GFP_NOFS);
+	if (!em) {
+		kfree(map);
+		return -ENOMEM;
+	}
+	em->bdev = (struct block_device *)map;
+	em->start = start;
+	em->len = *num_bytes;
+	em->block_start = 0;
+	em->block_len = em->len;
+
+	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
+	spin_lock(&em_tree->lock);
+	ret = add_extent_mapping(em_tree, em);
+	spin_unlock(&em_tree->lock);
+	BUG_ON(ret);
+	free_extent_map(em);
+
+	ret = btrfs_make_block_group(trans, extent_root, 0, type,
+				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
+				     start, *num_bytes);
+	BUG_ON(ret);
+
+	index = 0;
+	while (index < map->num_stripes) {
+		device = map->stripes[index].dev;
+		dev_offset = map->stripes[index].physical;
+
+		ret = btrfs_alloc_dev_extent(trans, device,
+				info->chunk_root->root_key.objectid,
+				BTRFS_FIRST_CHUNK_TREE_OBJECTID,
+				start, dev_offset, calc_size);
+		BUG_ON(ret);
+		index++;
+	}
+
+	return 0;
+}
+
+static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
+				struct btrfs_root *extent_root,
+				struct map_lookup *map, u64 chunk_offset,
+				u64 chunk_size, u64 stripe_size)
+{
+	u64 dev_offset;
+	struct btrfs_key key;
+	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
+	struct btrfs_device *device;
+	struct btrfs_chunk *chunk;
+	struct btrfs_stripe *stripe;
+	size_t item_size = btrfs_chunk_item_size(map->num_stripes);
+	int index = 0;
+	int ret;
+
+	chunk = kzalloc(item_size, GFP_NOFS);
+	if (!chunk)
+		return -ENOMEM;
+
+	index = 0;
+	while (index < map->num_stripes) {
+		device = map->stripes[index].dev;
+		device->bytes_used += stripe_size;
+		ret = btrfs_update_device(trans, device);
+		BUG_ON(ret);
+		index++;
+	}
+
+	index = 0;
+	stripe = &chunk->stripe;
+	while (index < map->num_stripes) {
+		device = map->stripes[index].dev;
+		dev_offset = map->stripes[index].physical;
+
+		btrfs_set_stack_stripe_devid(stripe, device->devid);
+		btrfs_set_stack_stripe_offset(stripe, dev_offset);
+		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
+		stripe++;
+		index++;
+	}
+
+	btrfs_set_stack_chunk_length(chunk, chunk_size);
+	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
+	btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
+	btrfs_set_stack_chunk_type(chunk, map->type);
+	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
+	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
+	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
+	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
+	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
+
+	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
+	key.type = BTRFS_CHUNK_ITEM_KEY;
+	key.offset = chunk_offset;
+
+	ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
+	BUG_ON(ret);
+
+	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
+		ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
+					     item_size);
+		BUG_ON(ret);
+	}
+	kfree(chunk);
+	return 0;
+}
+
+/*
+ * Chunk allocation falls into two parts. The first part does works
+ * that make the new allocated chunk useable, but not do any operation
+ * that modifies the chunk tree. The second part does the works that
+ * require modifying the chunk tree. This division is important for the
+ * bootstrap process of adding storage to a seed btrfs.
+ */
+int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+		      struct btrfs_root *extent_root, u64 type)
+{
+	u64 chunk_offset;
+	u64 chunk_size;
+	u64 stripe_size;
+	struct map_lookup *map;
+	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
+	int ret;
+
+	ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
+			      &chunk_offset);
+	if (ret)
+		return ret;
+
+	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
+				  &stripe_size, chunk_offset, type);
+	if (ret)
+		return ret;
+
+	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
+				   chunk_size, stripe_size);
+	BUG_ON(ret);
+	return 0;
+}
+
+static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
+					 struct btrfs_root *root,
+					 struct btrfs_device *device)
+{
+	u64 chunk_offset;
+	u64 sys_chunk_offset;
+	u64 chunk_size;
+	u64 sys_chunk_size;
+	u64 stripe_size;
+	u64 sys_stripe_size;
+	u64 alloc_profile;
+	struct map_lookup *map;
+	struct map_lookup *sys_map;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_root *extent_root = fs_info->extent_root;
+	int ret;
+
+	ret = find_next_chunk(fs_info->chunk_root,
+			      BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
+	BUG_ON(ret);
+
+	alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
+			(fs_info->metadata_alloc_profile &
+			 fs_info->avail_metadata_alloc_bits);
+	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
+
+	ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
+				  &stripe_size, chunk_offset, alloc_profile);
+	BUG_ON(ret);
+
+	sys_chunk_offset = chunk_offset + chunk_size;
+
+	alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
+			(fs_info->system_alloc_profile &
+			 fs_info->avail_system_alloc_bits);
+	alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
+
+	ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
+				  &sys_chunk_size, &sys_stripe_size,
+				  sys_chunk_offset, alloc_profile);
+	BUG_ON(ret);
+
+	ret = btrfs_add_device(trans, fs_info->chunk_root, device);
+	BUG_ON(ret);
+
+	/*
+	 * Modifying chunk tree needs allocating new blocks from both
+	 * system block group and metadata block group. So we only can
+	 * do operations require modifying the chunk tree after both
+	 * block groups were created.
+	 */
+	ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
+				   chunk_size, stripe_size);
+	BUG_ON(ret);
+
+	ret = __finish_chunk_alloc(trans, extent_root, sys_map,
+				   sys_chunk_offset, sys_chunk_size,
+				   sys_stripe_size);
+	BUG_ON(ret);
+	return 0;
+}
+
+int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
+{
+	struct extent_map *em;
+	struct map_lookup *map;
+	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+	int readonly = 0;
+	int i;
+
+	spin_lock(&map_tree->map_tree.lock);
+	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
+	spin_unlock(&map_tree->map_tree.lock);
+	if (!em)
+		return 1;
+
+	map = (struct map_lookup *)em->bdev;
+	for (i = 0; i < map->num_stripes; i++) {
+		if (!map->stripes[i].dev->writeable) {
+			readonly = 1;
+			break;
+		}
+	}
+	free_extent_map(em);
+	return readonly;
+}
+
+void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
+{
+	extent_map_tree_init(&tree->map_tree, GFP_NOFS);
+}
+
+void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
+{
+	struct extent_map *em;
+
+	while (1) {
+		spin_lock(&tree->map_tree.lock);
+		em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
+		if (em)
+			remove_extent_mapping(&tree->map_tree, em);
+		spin_unlock(&tree->map_tree.lock);
+		if (!em)
+			break;
+		kfree(em->bdev);
+		/* once for us */
+		free_extent_map(em);
+		/* once for the tree */
+		free_extent_map(em);
+	}
+}
+
+int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
+{
+	struct extent_map *em;
+	struct map_lookup *map;
+	struct extent_map_tree *em_tree = &map_tree->map_tree;
+	int ret;
+
+	spin_lock(&em_tree->lock);
+	em = lookup_extent_mapping(em_tree, logical, len);
+	spin_unlock(&em_tree->lock);
+	BUG_ON(!em);
+
+	BUG_ON(em->start > logical || em->start + em->len < logical);
+	map = (struct map_lookup *)em->bdev;
+	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
+		ret = map->num_stripes;
+	else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
+		ret = map->sub_stripes;
+	else
+		ret = 1;
+	free_extent_map(em);
+	return ret;
+}
+
+static int find_live_mirror(struct map_lookup *map, int first, int num,
+			    int optimal)
+{
+	int i;
+	if (map->stripes[optimal].dev->bdev)
+		return optimal;
+	for (i = first; i < first + num; i++) {
+		if (map->stripes[i].dev->bdev)
+			return i;
+	}
+	/* we couldn't find one that doesn't fail.  Just return something
+	 * and the io error handling code will clean up eventually
+	 */
+	return optimal;
+}
+
+static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
+			     u64 logical, u64 *length,
+			     struct btrfs_multi_bio **multi_ret,
+			     int mirror_num, struct page *unplug_page)
+{
+	struct extent_map *em;
+	struct map_lookup *map;
+	struct extent_map_tree *em_tree = &map_tree->map_tree;
+	u64 offset;
+	u64 stripe_offset;
+	u64 stripe_nr;
+	int stripes_allocated = 8;
+	int stripes_required = 1;
+	int stripe_index;
+	int i;
+	int num_stripes;
+	int max_errors = 0;
+	struct btrfs_multi_bio *multi = NULL;
+
+	if (multi_ret && !(rw & (1 << BIO_RW)))
+		stripes_allocated = 1;
+again:
+	if (multi_ret) {
+		multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
+				GFP_NOFS);
+		if (!multi)
+			return -ENOMEM;
+
+		atomic_set(&multi->error, 0);
+	}
+
+	spin_lock(&em_tree->lock);
+	em = lookup_extent_mapping(em_tree, logical, *length);
+	spin_unlock(&em_tree->lock);
+
+	if (!em && unplug_page)
+		return 0;
+
+	if (!em) {
+		printk(KERN_CRIT "unable to find logical %llu len %llu\n",
+		       (unsigned long long)logical,
+		       (unsigned long long)*length);
+		BUG();
+	}
+
+	BUG_ON(em->start > logical || em->start + em->len < logical);
+	map = (struct map_lookup *)em->bdev;
+	offset = logical - em->start;
+
+	if (mirror_num > map->num_stripes)
+		mirror_num = 0;
+
+	/* if our multi bio struct is too small, back off and try again */
+	if (rw & (1 << BIO_RW)) {
+		if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
+				 BTRFS_BLOCK_GROUP_DUP)) {
+			stripes_required = map->num_stripes;
+			max_errors = 1;
+		} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
+			stripes_required = map->sub_stripes;
+			max_errors = 1;
+		}
+	}
+	if (multi_ret && rw == WRITE &&
+	    stripes_allocated < stripes_required) {
+		stripes_allocated = map->num_stripes;
+		free_extent_map(em);
+		kfree(multi);
+		goto again;
+	}
+	stripe_nr = offset;
+	/*
+	 * stripe_nr counts the total number of stripes we have to stride
+	 * to get to this block
+	 */
+	do_div(stripe_nr, map->stripe_len);
+
+	stripe_offset = stripe_nr * map->stripe_len;
+	BUG_ON(offset < stripe_offset);
+
+	/* stripe_offset is the offset of this block in its stripe*/
+	stripe_offset = offset - stripe_offset;
+
+	if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
+			 BTRFS_BLOCK_GROUP_RAID10 |
+			 BTRFS_BLOCK_GROUP_DUP)) {
+		/* we limit the length of each bio to what fits in a stripe */
+		*length = min_t(u64, em->len - offset,
+			      map->stripe_len - stripe_offset);
+	} else {
+		*length = em->len - offset;
+	}
+
+	if (!multi_ret && !unplug_page)
+		goto out;
+
+	num_stripes = 1;
+	stripe_index = 0;
+	if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
+		if (unplug_page || (rw & (1 << BIO_RW)))
+			num_stripes = map->num_stripes;
+		else if (mirror_num)
+			stripe_index = mirror_num - 1;
+		else {
+			stripe_index = find_live_mirror(map, 0,
+					    map->num_stripes,
+					    current->pid % map->num_stripes);
+		}
+
+	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
+		if (rw & (1 << BIO_RW))
+			num_stripes = map->num_stripes;
+		else if (mirror_num)
+			stripe_index = mirror_num - 1;
+
+	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
+		int factor = map->num_stripes / map->sub_stripes;
+
+		stripe_index = do_div(stripe_nr, factor);
+		stripe_index *= map->sub_stripes;
+
+		if (unplug_page || (rw & (1 << BIO_RW)))
+			num_stripes = map->sub_stripes;
+		else if (mirror_num)
+			stripe_index += mirror_num - 1;
+		else {
+			stripe_index = find_live_mirror(map, stripe_index,
+					      map->sub_stripes, stripe_index +
+					      current->pid % map->sub_stripes);
+		}
+	} else {
+		/*
+		 * after this do_div call, stripe_nr is the number of stripes
+		 * on this device we have to walk to find the data, and
+		 * stripe_index is the number of our device in the stripe array
+		 */
+		stripe_index = do_div(stripe_nr, map->num_stripes);
+	}
+	BUG_ON(stripe_index >= map->num_stripes);
+
+	for (i = 0; i < num_stripes; i++) {
+		if (unplug_page) {
+			struct btrfs_device *device;
+			struct backing_dev_info *bdi;
+
+			device = map->stripes[stripe_index].dev;
+			if (device->bdev) {
+				bdi = blk_get_backing_dev_info(device->bdev);
+				if (bdi->unplug_io_fn)
+					bdi->unplug_io_fn(bdi, unplug_page);
+			}
+		} else {
+			multi->stripes[i].physical =
+				map->stripes[stripe_index].physical +
+				stripe_offset + stripe_nr * map->stripe_len;
+			multi->stripes[i].dev = map->stripes[stripe_index].dev;
+		}
+		stripe_index++;
+	}
+	if (multi_ret) {
+		*multi_ret = multi;
+		multi->num_stripes = num_stripes;
+		multi->max_errors = max_errors;
+	}
+out:
+	free_extent_map(em);
+	return 0;
+}
+
+int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
+		      u64 logical, u64 *length,
+		      struct btrfs_multi_bio **multi_ret, int mirror_num)
+{
+	return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
+				 mirror_num, NULL);
+}
+
+int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
+		     u64 chunk_start, u64 physical, u64 devid,
+		     u64 **logical, int *naddrs, int *stripe_len)
+{
+	struct extent_map_tree *em_tree = &map_tree->map_tree;
+	struct extent_map *em;
+	struct map_lookup *map;
+	u64 *buf;
+	u64 bytenr;
+	u64 length;
+	u64 stripe_nr;
+	int i, j, nr = 0;
+
+	spin_lock(&em_tree->lock);
+	em = lookup_extent_mapping(em_tree, chunk_start, 1);
+	spin_unlock(&em_tree->lock);
+
+	BUG_ON(!em || em->start != chunk_start);
+	map = (struct map_lookup *)em->bdev;
+
+	length = em->len;
+	if (map->type & BTRFS_BLOCK_GROUP_RAID10)
+		do_div(length, map->num_stripes / map->sub_stripes);
+	else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
+		do_div(length, map->num_stripes);
+
+	buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
+	BUG_ON(!buf);
+
+	for (i = 0; i < map->num_stripes; i++) {
+		if (devid && map->stripes[i].dev->devid != devid)
+			continue;
+		if (map->stripes[i].physical > physical ||
+		    map->stripes[i].physical + length <= physical)
+			continue;
+
+		stripe_nr = physical - map->stripes[i].physical;
+		do_div(stripe_nr, map->stripe_len);
+
+		if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
+			stripe_nr = stripe_nr * map->num_stripes + i;
+			do_div(stripe_nr, map->sub_stripes);
+		} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
+			stripe_nr = stripe_nr * map->num_stripes + i;
+		}
+		bytenr = chunk_start + stripe_nr * map->stripe_len;
+		WARN_ON(nr >= map->num_stripes);
+		for (j = 0; j < nr; j++) {
+			if (buf[j] == bytenr)
+				break;
+		}
+		if (j == nr) {
+			WARN_ON(nr >= map->num_stripes);
+			buf[nr++] = bytenr;
+		}
+	}
+
+	for (i = 0; i > nr; i++) {
+		struct btrfs_multi_bio *multi;
+		struct btrfs_bio_stripe *stripe;
+		int ret;
+
+		length = 1;
+		ret = btrfs_map_block(map_tree, WRITE, buf[i],
+				      &length, &multi, 0);
+		BUG_ON(ret);
+
+		stripe = multi->stripes;
+		for (j = 0; j < multi->num_stripes; j++) {
+			if (stripe->physical >= physical &&
+			    physical < stripe->physical + length)
+				break;
+		}
+		BUG_ON(j >= multi->num_stripes);
+		kfree(multi);
+	}
+
+	*logical = buf;
+	*naddrs = nr;
+	*stripe_len = map->stripe_len;
+
+	free_extent_map(em);
+	return 0;
+}
+
+int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
+		      u64 logical, struct page *page)
+{
+	u64 length = PAGE_CACHE_SIZE;
+	return __btrfs_map_block(map_tree, READ, logical, &length,
+				 NULL, 0, page);
+}
+
+static void end_bio_multi_stripe(struct bio *bio, int err)
+{
+	struct btrfs_multi_bio *multi = bio->bi_private;
+	int is_orig_bio = 0;
+
+	if (err)
+		atomic_inc(&multi->error);
+
+	if (bio == multi->orig_bio)
+		is_orig_bio = 1;
+
+	if (atomic_dec_and_test(&multi->stripes_pending)) {
+		if (!is_orig_bio) {
+			bio_put(bio);
+			bio = multi->orig_bio;
+		}
+		bio->bi_private = multi->private;
+		bio->bi_end_io = multi->end_io;
+		/* only send an error to the higher layers if it is
+		 * beyond the tolerance of the multi-bio
+		 */
+		if (atomic_read(&multi->error) > multi->max_errors) {
+			err = -EIO;
+		} else if (err) {
+			/*
+			 * this bio is actually up to date, we didn't
+			 * go over the max number of errors
+			 */
+			set_bit(BIO_UPTODATE, &bio->bi_flags);
+			err = 0;
+		}
+		kfree(multi);
+
+		bio_endio(bio, err);
+	} else if (!is_orig_bio) {
+		bio_put(bio);
+	}
+}
+
+struct async_sched {
+	struct bio *bio;
+	int rw;
+	struct btrfs_fs_info *info;
+	struct btrfs_work work;
+};
+
+/*
+ * see run_scheduled_bios for a description of why bios are collected for
+ * async submit.
+ *
+ * This will add one bio to the pending list for a device and make sure
+ * the work struct is scheduled.
+ */
+static noinline int schedule_bio(struct btrfs_root *root,
+				 struct btrfs_device *device,
+				 int rw, struct bio *bio)
+{
+	int should_queue = 1;
+
+	/* don't bother with additional async steps for reads, right now */
+	if (!(rw & (1 << BIO_RW))) {
+		bio_get(bio);
+		submit_bio(rw, bio);
+		bio_put(bio);
+		return 0;
+	}
+
+	/*
+	 * nr_async_bios allows us to reliably return congestion to the
+	 * higher layers.  Otherwise, the async bio makes it appear we have
+	 * made progress against dirty pages when we've really just put it
+	 * on a queue for later
+	 */
+	atomic_inc(&root->fs_info->nr_async_bios);
+	WARN_ON(bio->bi_next);
+	bio->bi_next = NULL;
+	bio->bi_rw |= rw;
+
+	spin_lock(&device->io_lock);
+
+	if (device->pending_bio_tail)
+		device->pending_bio_tail->bi_next = bio;
+
+	device->pending_bio_tail = bio;
+	if (!device->pending_bios)
+		device->pending_bios = bio;
+	if (device->running_pending)
+		should_queue = 0;
+
+	spin_unlock(&device->io_lock);
+
+	if (should_queue)
+		btrfs_queue_worker(&root->fs_info->submit_workers,
+				   &device->work);
+	return 0;
+}
+
+int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
+		  int mirror_num, int async_submit)
+{
+	struct btrfs_mapping_tree *map_tree;
+	struct btrfs_device *dev;
+	struct bio *first_bio = bio;
+	u64 logical = (u64)bio->bi_sector << 9;
+	u64 length = 0;
+	u64 map_length;
+	struct btrfs_multi_bio *multi = NULL;
+	int ret;
+	int dev_nr = 0;
+	int total_devs = 1;
+
+	length = bio->bi_size;
+	map_tree = &root->fs_info->mapping_tree;
+	map_length = length;
+
+	ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
+			      mirror_num);
+	BUG_ON(ret);
+
+	total_devs = multi->num_stripes;
+	if (map_length < length) {
+		printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
+		       "len %llu\n", (unsigned long long)logical,
+		       (unsigned long long)length,
+		       (unsigned long long)map_length);
+		BUG();
+	}
+	multi->end_io = first_bio->bi_end_io;
+	multi->private = first_bio->bi_private;
+	multi->orig_bio = first_bio;
+	atomic_set(&multi->stripes_pending, multi->num_stripes);
+
+	while (dev_nr < total_devs) {
+		if (total_devs > 1) {
+			if (dev_nr < total_devs - 1) {
+				bio = bio_clone(first_bio, GFP_NOFS);
+				BUG_ON(!bio);
+			} else {
+				bio = first_bio;
+			}
+			bio->bi_private = multi;
+			bio->bi_end_io = end_bio_multi_stripe;
+		}
+		bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
+		dev = multi->stripes[dev_nr].dev;
+		BUG_ON(rw == WRITE && !dev->writeable);
+		if (dev && dev->bdev) {
+			bio->bi_bdev = dev->bdev;
+			if (async_submit)
+				schedule_bio(root, dev, rw, bio);
+			else
+				submit_bio(rw, bio);
+		} else {
+			bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
+			bio->bi_sector = logical >> 9;
+			bio_endio(bio, -EIO);
+		}
+		dev_nr++;
+	}
+	if (total_devs == 1)
+		kfree(multi);
+	return 0;
+}
+
+struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
+				       u8 *uuid, u8 *fsid)
+{
+	struct btrfs_device *device;
+	struct btrfs_fs_devices *cur_devices;
+
+	cur_devices = root->fs_info->fs_devices;
+	while (cur_devices) {
+		if (!fsid ||
+		    !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
+			device = __find_device(&cur_devices->devices,
+					       devid, uuid);
+			if (device)
+				return device;
+		}
+		cur_devices = cur_devices->seed;
+	}
+	return NULL;
+}
+
+static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
+					    u64 devid, u8 *dev_uuid)
+{
+	struct btrfs_device *device;
+	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+
+	device = kzalloc(sizeof(*device), GFP_NOFS);
+	if (!device)
+		return NULL;
+	list_add(&device->dev_list,
+		 &fs_devices->devices);
+	device->barriers = 1;
+	device->dev_root = root->fs_info->dev_root;
+	device->devid = devid;
+	device->work.func = pending_bios_fn;
+	device->fs_devices = fs_devices;
+	fs_devices->num_devices++;
+	spin_lock_init(&device->io_lock);
+	INIT_LIST_HEAD(&device->dev_alloc_list);
+	memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
+	return device;
+}
+
+static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
+			  struct extent_buffer *leaf,
+			  struct btrfs_chunk *chunk)
+{
+	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+	struct map_lookup *map;
+	struct extent_map *em;
+	u64 logical;
+	u64 length;
+	u64 devid;
+	u8 uuid[BTRFS_UUID_SIZE];
+	int num_stripes;
+	int ret;
+	int i;
+
+	logical = key->offset;
+	length = btrfs_chunk_length(leaf, chunk);
+
+	spin_lock(&map_tree->map_tree.lock);
+	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
+	spin_unlock(&map_tree->map_tree.lock);
+
+	/* already mapped? */
+	if (em && em->start <= logical && em->start + em->len > logical) {
+		free_extent_map(em);
+		return 0;
+	} else if (em) {
+		free_extent_map(em);
+	}
+
+	map = kzalloc(sizeof(*map), GFP_NOFS);
+	if (!map)
+		return -ENOMEM;
+
+	em = alloc_extent_map(GFP_NOFS);
+	if (!em)
+		return -ENOMEM;
+	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+	map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
+	if (!map) {
+		free_extent_map(em);
+		return -ENOMEM;
+	}
+
+	em->bdev = (struct block_device *)map;
+	em->start = logical;
+	em->len = length;
+	em->block_start = 0;
+	em->block_len = em->len;
+
+	map->num_stripes = num_stripes;
+	map->io_width = btrfs_chunk_io_width(leaf, chunk);
+	map->io_align = btrfs_chunk_io_align(leaf, chunk);
+	map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
+	map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+	map->type = btrfs_chunk_type(leaf, chunk);
+	map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+	for (i = 0; i < num_stripes; i++) {
+		map->stripes[i].physical =
+			btrfs_stripe_offset_nr(leaf, chunk, i);
+		devid = btrfs_stripe_devid_nr(leaf, chunk, i);
+		read_extent_buffer(leaf, uuid, (unsigned long)
+				   btrfs_stripe_dev_uuid_nr(chunk, i),
+				   BTRFS_UUID_SIZE);
+		map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
+							NULL);
+		if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
+			kfree(map);
+			free_extent_map(em);
+			return -EIO;
+		}
+		if (!map->stripes[i].dev) {
+			map->stripes[i].dev =
+				add_missing_dev(root, devid, uuid);
+			if (!map->stripes[i].dev) {
+				kfree(map);
+				free_extent_map(em);
+				return -EIO;
+			}
+		}
+		map->stripes[i].dev->in_fs_metadata = 1;
+	}
+
+	spin_lock(&map_tree->map_tree.lock);
+	ret = add_extent_mapping(&map_tree->map_tree, em);
+	spin_unlock(&map_tree->map_tree.lock);
+	BUG_ON(ret);
+	free_extent_map(em);
+
+	return 0;
+}
+
+static int fill_device_from_item(struct extent_buffer *leaf,
+				 struct btrfs_dev_item *dev_item,
+				 struct btrfs_device *device)
+{
+	unsigned long ptr;
+
+	device->devid = btrfs_device_id(leaf, dev_item);
+	device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
+	device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
+	device->type = btrfs_device_type(leaf, dev_item);
+	device->io_align = btrfs_device_io_align(leaf, dev_item);
+	device->io_width = btrfs_device_io_width(leaf, dev_item);
+	device->sector_size = btrfs_device_sector_size(leaf, dev_item);
+
+	ptr = (unsigned long)btrfs_device_uuid(dev_item);
+	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
+
+	return 0;
+}
+
+static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
+{
+	struct btrfs_fs_devices *fs_devices;
+	int ret;
+
+	mutex_lock(&uuid_mutex);
+
+	fs_devices = root->fs_info->fs_devices->seed;
+	while (fs_devices) {
+		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
+			ret = 0;
+			goto out;
+		}
+		fs_devices = fs_devices->seed;
+	}
+
+	fs_devices = find_fsid(fsid);
+	if (!fs_devices) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	fs_devices = clone_fs_devices(fs_devices);
+	if (IS_ERR(fs_devices)) {
+		ret = PTR_ERR(fs_devices);
+		goto out;
+	}
+
+	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
+				   root->fs_info->bdev_holder);
+	if (ret)
+		goto out;
+
+	if (!fs_devices->seeding) {
+		__btrfs_close_devices(fs_devices);
+		free_fs_devices(fs_devices);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	fs_devices->seed = root->fs_info->fs_devices->seed;
+	root->fs_info->fs_devices->seed = fs_devices;
+out:
+	mutex_unlock(&uuid_mutex);
+	return ret;
+}
+
+static int read_one_dev(struct btrfs_root *root,
+			struct extent_buffer *leaf,
+			struct btrfs_dev_item *dev_item)
+{
+	struct btrfs_device *device;
+	u64 devid;
+	int ret;
+	u8 fs_uuid[BTRFS_UUID_SIZE];
+	u8 dev_uuid[BTRFS_UUID_SIZE];
+
+	devid = btrfs_device_id(leaf, dev_item);
+	read_extent_buffer(leaf, dev_uuid,
+			   (unsigned long)btrfs_device_uuid(dev_item),
+			   BTRFS_UUID_SIZE);
+	read_extent_buffer(leaf, fs_uuid,
+			   (unsigned long)btrfs_device_fsid(dev_item),
+			   BTRFS_UUID_SIZE);
+
+	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
+		ret = open_seed_devices(root, fs_uuid);
+		if (ret && !btrfs_test_opt(root, DEGRADED))
+			return ret;
+	}
+
+	device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
+	if (!device || !device->bdev) {
+		if (!btrfs_test_opt(root, DEGRADED))
+			return -EIO;
+
+		if (!device) {
+			printk(KERN_WARNING "warning devid %llu missing\n",
+			       (unsigned long long)devid);
+			device = add_missing_dev(root, devid, dev_uuid);
+			if (!device)
+				return -ENOMEM;
+		}
+	}
+
+	if (device->fs_devices != root->fs_info->fs_devices) {
+		BUG_ON(device->writeable);
+		if (device->generation !=
+		    btrfs_device_generation(leaf, dev_item))
+			return -EINVAL;
+	}
+
+	fill_device_from_item(leaf, dev_item, device);
+	device->dev_root = root->fs_info->dev_root;
+	device->in_fs_metadata = 1;
+	if (device->writeable)
+		device->fs_devices->total_rw_bytes += device->total_bytes;
+	ret = 0;
+	return ret;
+}
+
+int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
+{
+	struct btrfs_dev_item *dev_item;
+
+	dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
+						     dev_item);
+	return read_one_dev(root, buf, dev_item);
+}
+
+int btrfs_read_sys_array(struct btrfs_root *root)
+{
+	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
+	struct extent_buffer *sb;
+	struct btrfs_disk_key *disk_key;
+	struct btrfs_chunk *chunk;
+	u8 *ptr;
+	unsigned long sb_ptr;
+	int ret = 0;
+	u32 num_stripes;
+	u32 array_size;
+	u32 len = 0;
+	u32 cur;
+	struct btrfs_key key;
+
+	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
+					  BTRFS_SUPER_INFO_SIZE);
+	if (!sb)
+		return -ENOMEM;
+	btrfs_set_buffer_uptodate(sb);
+	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
+	array_size = btrfs_super_sys_array_size(super_copy);
+
+	ptr = super_copy->sys_chunk_array;
+	sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
+	cur = 0;
+
+	while (cur < array_size) {
+		disk_key = (struct btrfs_disk_key *)ptr;
+		btrfs_disk_key_to_cpu(&key, disk_key);
+
+		len = sizeof(*disk_key); ptr += len;
+		sb_ptr += len;
+		cur += len;
+
+		if (key.type == BTRFS_CHUNK_ITEM_KEY) {
+			chunk = (struct btrfs_chunk *)sb_ptr;
+			ret = read_one_chunk(root, &key, sb, chunk);
+			if (ret)
+				break;
+			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
+			len = btrfs_chunk_item_size(num_stripes);
+		} else {
+			ret = -EIO;
+			break;
+		}
+		ptr += len;
+		sb_ptr += len;
+		cur += len;
+	}
+	free_extent_buffer(sb);
+	return ret;
+}
+
+int btrfs_read_chunk_tree(struct btrfs_root *root)
+{
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	struct btrfs_key key;
+	struct btrfs_key found_key;
+	int ret;
+	int slot;
+
+	root = root->fs_info->chunk_root;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	/* first we search for all of the device items, and then we
+	 * read in all of the chunk items.  This way we can create chunk
+	 * mappings that reference all of the devices that are afound
+	 */
+	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
+	key.offset = 0;
+	key.type = 0;
+again:
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	while (1) {
+		leaf = path->nodes[0];
+		slot = path->slots[0];
+		if (slot >= btrfs_header_nritems(leaf)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret == 0)
+				continue;
+			if (ret < 0)
+				goto error;
+			break;
+		}
+		btrfs_item_key_to_cpu(leaf, &found_key, slot);
+		if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
+			if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
+				break;
+			if (found_key.type == BTRFS_DEV_ITEM_KEY) {
+				struct btrfs_dev_item *dev_item;
+				dev_item = btrfs_item_ptr(leaf, slot,
+						  struct btrfs_dev_item);
+				ret = read_one_dev(root, leaf, dev_item);
+				if (ret)
+					goto error;
+			}
+		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
+			struct btrfs_chunk *chunk;
+			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
+			ret = read_one_chunk(root, &found_key, leaf, chunk);
+			if (ret)
+				goto error;
+		}
+		path->slots[0]++;
+	}
+	if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
+		key.objectid = 0;
+		btrfs_release_path(root, path);
+		goto again;
+	}
+	ret = 0;
+error:
+	btrfs_free_path(path);
+	return ret;
+}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
new file mode 100644
index 0000000..86c44e9
--- /dev/null
+++ b/fs/btrfs/volumes.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2007 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_VOLUMES_
+#define __BTRFS_VOLUMES_
+
+#include <linux/bio.h>
+#include "async-thread.h"
+
+struct buffer_head;
+struct btrfs_device {
+	struct list_head dev_list;
+	struct list_head dev_alloc_list;
+	struct btrfs_fs_devices *fs_devices;
+	struct btrfs_root *dev_root;
+	struct bio *pending_bios;
+	struct bio *pending_bio_tail;
+	int running_pending;
+	u64 generation;
+
+	int barriers;
+	int writeable;
+	int in_fs_metadata;
+
+	spinlock_t io_lock;
+
+	struct block_device *bdev;
+
+	/* the mode sent to open_bdev_exclusive */
+	fmode_t mode;
+
+	char *name;
+
+	/* the internal btrfs device id */
+	u64 devid;
+
+	/* size of the device */
+	u64 total_bytes;
+
+	/* bytes used */
+	u64 bytes_used;
+
+	/* optimal io alignment for this device */
+	u32 io_align;
+
+	/* optimal io width for this device */
+	u32 io_width;
+
+	/* minimal io size for this device */
+	u32 sector_size;
+
+	/* type and info about this device */
+	u64 type;
+
+	/* physical drive uuid (or lvm uuid) */
+	u8 uuid[BTRFS_UUID_SIZE];
+
+	struct btrfs_work work;
+};
+
+struct btrfs_fs_devices {
+	u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
+
+	/* the device with this id has the most recent coyp of the super */
+	u64 latest_devid;
+	u64 latest_trans;
+	u64 num_devices;
+	u64 open_devices;
+	u64 rw_devices;
+	u64 total_rw_bytes;
+	struct block_device *latest_bdev;
+	/* all of the devices in the FS */
+	struct list_head devices;
+
+	/* devices not currently being allocated */
+	struct list_head alloc_list;
+	struct list_head list;
+
+	struct btrfs_fs_devices *seed;
+	int seeding;
+
+	int opened;
+};
+
+struct btrfs_bio_stripe {
+	struct btrfs_device *dev;
+	u64 physical;
+};
+
+struct btrfs_multi_bio {
+	atomic_t stripes_pending;
+	bio_end_io_t *end_io;
+	struct bio *orig_bio;
+	void *private;
+	atomic_t error;
+	int max_errors;
+	int num_stripes;
+	struct btrfs_bio_stripe stripes[];
+};
+
+#define btrfs_multi_bio_size(n) (sizeof(struct btrfs_multi_bio) + \
+			    (sizeof(struct btrfs_bio_stripe) * (n)))
+
+int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
+			   struct btrfs_device *device,
+			   u64 chunk_tree, u64 chunk_objectid,
+			   u64 chunk_offset, u64 start, u64 num_bytes);
+int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
+		    u64 logical, u64 *length,
+		    struct btrfs_multi_bio **multi_ret, int mirror_num);
+int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
+		     u64 chunk_start, u64 physical, u64 devid,
+		     u64 **logical, int *naddrs, int *stripe_len);
+int btrfs_read_sys_array(struct btrfs_root *root);
+int btrfs_read_chunk_tree(struct btrfs_root *root);
+int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+		      struct btrfs_root *extent_root, u64 type);
+void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
+void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
+int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
+		  int mirror_num, int async_submit);
+int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf);
+int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
+		       fmode_t flags, void *holder);
+int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
+			  struct btrfs_fs_devices **fs_devices_ret);
+int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
+int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices);
+int btrfs_add_device(struct btrfs_trans_handle *trans,
+		     struct btrfs_root *root,
+		     struct btrfs_device *device);
+int btrfs_rm_device(struct btrfs_root *root, char *device_path);
+int btrfs_cleanup_fs_uuids(void);
+int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
+int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
+		      u64 logical, struct page *page);
+int btrfs_grow_device(struct btrfs_trans_handle *trans,
+		      struct btrfs_device *device, u64 new_size);
+struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
+				       u8 *uuid, u8 *fsid);
+int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
+int btrfs_init_new_device(struct btrfs_root *root, char *path);
+int btrfs_balance(struct btrfs_root *dev_root);
+void btrfs_unlock_volumes(void);
+void btrfs_lock_volumes(void);
+int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
+#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
new file mode 100644
index 0000000..7f332e2
--- /dev/null
+++ b/fs/btrfs/xattr.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2007 Red Hat.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/rwsem.h>
+#include <linux/xattr.h>
+#include "ctree.h"
+#include "btrfs_inode.h"
+#include "transaction.h"
+#include "xattr.h"
+#include "disk-io.h"
+
+
+ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
+				void *buffer, size_t size)
+{
+	struct btrfs_dir_item *di;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_path *path;
+	struct extent_buffer *leaf;
+	int ret = 0;
+	unsigned long data_ptr;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	/* lookup the xattr by name */
+	di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name,
+				strlen(name), 0);
+	if (!di || IS_ERR(di)) {
+		ret = -ENODATA;
+		goto out;
+	}
+
+	leaf = path->nodes[0];
+	/* if size is 0, that means we want the size of the attr */
+	if (!size) {
+		ret = btrfs_dir_data_len(leaf, di);
+		goto out;
+	}
+
+	/* now get the data out of our dir_item */
+	if (btrfs_dir_data_len(leaf, di) > size) {
+		ret = -ERANGE;
+		goto out;
+	}
+	data_ptr = (unsigned long)((char *)(di + 1) +
+				   btrfs_dir_name_len(leaf, di));
+	read_extent_buffer(leaf, buffer, data_ptr,
+			   btrfs_dir_data_len(leaf, di));
+	ret = btrfs_dir_data_len(leaf, di);
+
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
+int __btrfs_setxattr(struct inode *inode, const char *name,
+			    const void *value, size_t size, int flags)
+{
+	struct btrfs_dir_item *di;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_trans_handle *trans;
+	struct btrfs_path *path;
+	int ret = 0, mod = 0;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	trans = btrfs_start_transaction(root, 1);
+	btrfs_set_trans_block_group(trans, inode);
+
+	/* first lets see if we already have this xattr */
+	di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name,
+				strlen(name), -1);
+	if (IS_ERR(di)) {
+		ret = PTR_ERR(di);
+		goto out;
+	}
+
+	/* ok we already have this xattr, lets remove it */
+	if (di) {
+		/* if we want create only exit */
+		if (flags & XATTR_CREATE) {
+			ret = -EEXIST;
+			goto out;
+		}
+
+		ret = btrfs_delete_one_dir_name(trans, root, path, di);
+		if (ret)
+			goto out;
+		btrfs_release_path(root, path);
+
+		/* if we don't have a value then we are removing the xattr */
+		if (!value) {
+			mod = 1;
+			goto out;
+		}
+	} else {
+		btrfs_release_path(root, path);
+
+		if (flags & XATTR_REPLACE) {
+			/* we couldn't find the attr to replace */
+			ret = -ENODATA;
+			goto out;
+		}
+	}
+
+	/* ok we have to create a completely new xattr */
+	ret = btrfs_insert_xattr_item(trans, root, name, strlen(name),
+				      value, size, inode->i_ino);
+	if (ret)
+		goto out;
+	mod = 1;
+
+out:
+	if (mod) {
+		inode->i_ctime = CURRENT_TIME;
+		ret = btrfs_update_inode(trans, root, inode);
+	}
+
+	btrfs_end_transaction(trans, root);
+	btrfs_free_path(path);
+	return ret;
+}
+
+ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+	struct btrfs_key key, found_key;
+	struct inode *inode = dentry->d_inode;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_path *path;
+	struct btrfs_item *item;
+	struct extent_buffer *leaf;
+	struct btrfs_dir_item *di;
+	int ret = 0, slot, advance;
+	size_t total_size = 0, size_left = size;
+	unsigned long name_ptr;
+	size_t name_len;
+	u32 nritems;
+
+	/*
+	 * ok we want all objects associated with this id.
+	 * NOTE: we set key.offset = 0; because we want to start with the
+	 * first xattr that we find and walk forward
+	 */
+	key.objectid = inode->i_ino;
+	btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
+	key.offset = 0;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+	path->reada = 2;
+
+	/* search for our xattrs */
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto err;
+	ret = 0;
+	advance = 0;
+	while (1) {
+		leaf = path->nodes[0];
+		nritems = btrfs_header_nritems(leaf);
+		slot = path->slots[0];
+
+		/* this is where we start walking through the path */
+		if (advance || slot >= nritems) {
+			/*
+			 * if we've reached the last slot in this leaf we need
+			 * to go to the next leaf and reset everything
+			 */
+			if (slot >= nritems-1) {
+				ret = btrfs_next_leaf(root, path);
+				if (ret)
+					break;
+				leaf = path->nodes[0];
+				nritems = btrfs_header_nritems(leaf);
+				slot = path->slots[0];
+			} else {
+				/*
+				 * just walking through the slots on this leaf
+				 */
+				slot++;
+				path->slots[0]++;
+			}
+		}
+		advance = 1;
+
+		item = btrfs_item_nr(leaf, slot);
+		btrfs_item_key_to_cpu(leaf, &found_key, slot);
+
+		/* check to make sure this item is what we want */
+		if (found_key.objectid != key.objectid)
+			break;
+		if (btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY)
+			break;
+
+		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+
+		name_len = btrfs_dir_name_len(leaf, di);
+		total_size += name_len + 1;
+
+		/* we are just looking for how big our buffer needs to be */
+		if (!size)
+			continue;
+
+		if (!buffer || (name_len + 1) > size_left) {
+			ret = -ERANGE;
+			goto err;
+		}
+
+		name_ptr = (unsigned long)(di + 1);
+		read_extent_buffer(leaf, buffer, name_ptr, name_len);
+		buffer[name_len] = '\0';
+
+		size_left -= name_len + 1;
+		buffer += name_len + 1;
+	}
+	ret = total_size;
+
+err:
+	btrfs_free_path(path);
+
+	return ret;
+}
+
+/*
+ * List of handlers for synthetic system.* attributes.  All real ondisk
+ * attributes are handled directly.
+ */
+struct xattr_handler *btrfs_xattr_handlers[] = {
+#ifdef CONFIG_FS_POSIX_ACL
+	&btrfs_xattr_acl_access_handler,
+	&btrfs_xattr_acl_default_handler,
+#endif
+	NULL,
+};
+
+/*
+ * Check if the attribute is in a supported namespace.
+ *
+ * This applied after the check for the synthetic attributes in the system
+ * namespace.
+ */
+static bool btrfs_is_valid_xattr(const char *name)
+{
+	return !strncmp(name, XATTR_SECURITY_PREFIX,
+			XATTR_SECURITY_PREFIX_LEN) ||
+	       !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
+	       !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
+	       !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
+}
+
+ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+		       void *buffer, size_t size)
+{
+	/*
+	 * If this is a request for a synthetic attribute in the system.*
+	 * namespace use the generic infrastructure to resolve a handler
+	 * for it via sb->s_xattr.
+	 */
+	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+		return generic_getxattr(dentry, name, buffer, size);
+
+	if (!btrfs_is_valid_xattr(name))
+		return -EOPNOTSUPP;
+	return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
+}
+
+int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+		   size_t size, int flags)
+{
+	/*
+	 * If this is a request for a synthetic attribute in the system.*
+	 * namespace use the generic infrastructure to resolve a handler
+	 * for it via sb->s_xattr.
+	 */
+	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+		return generic_setxattr(dentry, name, value, size, flags);
+
+	if (!btrfs_is_valid_xattr(name))
+		return -EOPNOTSUPP;
+
+	if (size == 0)
+		value = "";  /* empty EA, do not remove */
+	return __btrfs_setxattr(dentry->d_inode, name, value, size, flags);
+}
+
+int btrfs_removexattr(struct dentry *dentry, const char *name)
+{
+	/*
+	 * If this is a request for a synthetic attribute in the system.*
+	 * namespace use the generic infrastructure to resolve a handler
+	 * for it via sb->s_xattr.
+	 */
+	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+		return generic_removexattr(dentry, name);
+
+	if (!btrfs_is_valid_xattr(name))
+		return -EOPNOTSUPP;
+	return __btrfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
+}
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
new file mode 100644
index 0000000..5b1d08f
--- /dev/null
+++ b/fs/btrfs/xattr.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2007 Red Hat.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __XATTR__
+#define __XATTR__
+
+#include <linux/xattr.h>
+
+extern struct xattr_handler btrfs_xattr_acl_access_handler;
+extern struct xattr_handler btrfs_xattr_acl_default_handler;
+extern struct xattr_handler *btrfs_xattr_handlers[];
+
+extern ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
+		void *buffer, size_t size);
+extern int __btrfs_setxattr(struct inode *inode, const char *name,
+		const void *value, size_t size, int flags);
+
+extern ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+		void *buffer, size_t size);
+extern int btrfs_setxattr(struct dentry *dentry, const char *name,
+		const void *value, size_t size, int flags);
+extern int btrfs_removexattr(struct dentry *dentry, const char *name);
+
+#endif /* __XATTR__ */
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
new file mode 100644
index 0000000..ecfbce8
--- /dev/null
+++ b/fs/btrfs/zlib.c
@@ -0,0 +1,632 @@
+/*
+ * Copyright (C) 2008 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on jffs2 zlib code:
+ * Copyright © 2001-2007 Red Hat, Inc.
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/zlib.h>
+#include <linux/zutil.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+#include "compression.h"
+
+/* Plan: call deflate() with avail_in == *sourcelen,
+	avail_out = *dstlen - 12 and flush == Z_FINISH.
+	If it doesn't manage to finish,	call it again with
+	avail_in == 0 and avail_out set to the remaining 12
+	bytes for it to clean up.
+   Q: Is 12 bytes sufficient?
+*/
+#define STREAM_END_SPACE 12
+
+struct workspace {
+	z_stream inf_strm;
+	z_stream def_strm;
+	char *buf;
+	struct list_head list;
+};
+
+static LIST_HEAD(idle_workspace);
+static DEFINE_SPINLOCK(workspace_lock);
+static unsigned long num_workspace;
+static atomic_t alloc_workspace = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(workspace_wait);
+
+/*
+ * this finds an available zlib workspace or allocates a new one
+ * NULL or an ERR_PTR is returned if things go bad.
+ */
+static struct workspace *find_zlib_workspace(void)
+{
+	struct workspace *workspace;
+	int ret;
+	int cpus = num_online_cpus();
+
+again:
+	spin_lock(&workspace_lock);
+	if (!list_empty(&idle_workspace)) {
+		workspace = list_entry(idle_workspace.next, struct workspace,
+				       list);
+		list_del(&workspace->list);
+		num_workspace--;
+		spin_unlock(&workspace_lock);
+		return workspace;
+
+	}
+	spin_unlock(&workspace_lock);
+	if (atomic_read(&alloc_workspace) > cpus) {
+		DEFINE_WAIT(wait);
+		prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
+		if (atomic_read(&alloc_workspace) > cpus)
+			schedule();
+		finish_wait(&workspace_wait, &wait);
+		goto again;
+	}
+	atomic_inc(&alloc_workspace);
+	workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
+	if (!workspace) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
+	if (!workspace->def_strm.workspace) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
+	if (!workspace->inf_strm.workspace) {
+		ret = -ENOMEM;
+		goto fail_inflate;
+	}
+	workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+	if (!workspace->buf) {
+		ret = -ENOMEM;
+		goto fail_kmalloc;
+	}
+	return workspace;
+
+fail_kmalloc:
+	vfree(workspace->inf_strm.workspace);
+fail_inflate:
+	vfree(workspace->def_strm.workspace);
+fail:
+	kfree(workspace);
+	atomic_dec(&alloc_workspace);
+	wake_up(&workspace_wait);
+	return ERR_PTR(ret);
+}
+
+/*
+ * put a workspace struct back on the list or free it if we have enough
+ * idle ones sitting around
+ */
+static int free_workspace(struct workspace *workspace)
+{
+	spin_lock(&workspace_lock);
+	if (num_workspace < num_online_cpus()) {
+		list_add_tail(&workspace->list, &idle_workspace);
+		num_workspace++;
+		spin_unlock(&workspace_lock);
+		if (waitqueue_active(&workspace_wait))
+			wake_up(&workspace_wait);
+		return 0;
+	}
+	spin_unlock(&workspace_lock);
+	vfree(workspace->def_strm.workspace);
+	vfree(workspace->inf_strm.workspace);
+	kfree(workspace->buf);
+	kfree(workspace);
+
+	atomic_dec(&alloc_workspace);
+	if (waitqueue_active(&workspace_wait))
+		wake_up(&workspace_wait);
+	return 0;
+}
+
+/*
+ * cleanup function for module exit
+ */
+static void free_workspaces(void)
+{
+	struct workspace *workspace;
+	while (!list_empty(&idle_workspace)) {
+		workspace = list_entry(idle_workspace.next, struct workspace,
+				       list);
+		list_del(&workspace->list);
+		vfree(workspace->def_strm.workspace);
+		vfree(workspace->inf_strm.workspace);
+		kfree(workspace->buf);
+		kfree(workspace);
+		atomic_dec(&alloc_workspace);
+	}
+}
+
+/*
+ * given an address space and start/len, compress the bytes.
+ *
+ * pages are allocated to hold the compressed result and stored
+ * in 'pages'
+ *
+ * out_pages is used to return the number of pages allocated.  There
+ * may be pages allocated even if we return an error
+ *
+ * total_in is used to return the number of bytes actually read.  It
+ * may be smaller then len if we had to exit early because we
+ * ran out of room in the pages array or because we cross the
+ * max_out threshold.
+ *
+ * total_out is used to return the total number of compressed bytes
+ *
+ * max_out tells us the max number of bytes that we're allowed to
+ * stuff into pages
+ */
+int btrfs_zlib_compress_pages(struct address_space *mapping,
+			      u64 start, unsigned long len,
+			      struct page **pages,
+			      unsigned long nr_dest_pages,
+			      unsigned long *out_pages,
+			      unsigned long *total_in,
+			      unsigned long *total_out,
+			      unsigned long max_out)
+{
+	int ret;
+	struct workspace *workspace;
+	char *data_in;
+	char *cpage_out;
+	int nr_pages = 0;
+	struct page *in_page = NULL;
+	struct page *out_page = NULL;
+	int out_written = 0;
+	int in_read = 0;
+	unsigned long bytes_left;
+
+	*out_pages = 0;
+	*total_out = 0;
+	*total_in = 0;
+
+	workspace = find_zlib_workspace();
+	if (!workspace)
+		return -1;
+
+	if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) {
+		printk(KERN_WARNING "deflateInit failed\n");
+		ret = -1;
+		goto out;
+	}
+
+	workspace->def_strm.total_in = 0;
+	workspace->def_strm.total_out = 0;
+
+	in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+	data_in = kmap(in_page);
+
+	out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+	cpage_out = kmap(out_page);
+	pages[0] = out_page;
+	nr_pages = 1;
+
+	workspace->def_strm.next_in = data_in;
+	workspace->def_strm.next_out = cpage_out;
+	workspace->def_strm.avail_out = PAGE_CACHE_SIZE;
+	workspace->def_strm.avail_in = min(len, PAGE_CACHE_SIZE);
+
+	out_written = 0;
+	in_read = 0;
+
+	while (workspace->def_strm.total_in < len) {
+		ret = zlib_deflate(&workspace->def_strm, Z_SYNC_FLUSH);
+		if (ret != Z_OK) {
+			printk(KERN_DEBUG "btrfs deflate in loop returned %d\n",
+			       ret);
+			zlib_deflateEnd(&workspace->def_strm);
+			ret = -1;
+			goto out;
+		}
+
+		/* we're making it bigger, give up */
+		if (workspace->def_strm.total_in > 8192 &&
+		    workspace->def_strm.total_in <
+		    workspace->def_strm.total_out) {
+			ret = -1;
+			goto out;
+		}
+		/* we need another page for writing out.  Test this
+		 * before the total_in so we will pull in a new page for
+		 * the stream end if required
+		 */
+		if (workspace->def_strm.avail_out == 0) {
+			kunmap(out_page);
+			if (nr_pages == nr_dest_pages) {
+				out_page = NULL;
+				ret = -1;
+				goto out;
+			}
+			out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+			cpage_out = kmap(out_page);
+			pages[nr_pages] = out_page;
+			nr_pages++;
+			workspace->def_strm.avail_out = PAGE_CACHE_SIZE;
+			workspace->def_strm.next_out = cpage_out;
+		}
+		/* we're all done */
+		if (workspace->def_strm.total_in >= len)
+			break;
+
+		/* we've read in a full page, get a new one */
+		if (workspace->def_strm.avail_in == 0) {
+			if (workspace->def_strm.total_out > max_out)
+				break;
+
+			bytes_left = len - workspace->def_strm.total_in;
+			kunmap(in_page);
+			page_cache_release(in_page);
+
+			start += PAGE_CACHE_SIZE;
+			in_page = find_get_page(mapping,
+						start >> PAGE_CACHE_SHIFT);
+			data_in = kmap(in_page);
+			workspace->def_strm.avail_in = min(bytes_left,
+							   PAGE_CACHE_SIZE);
+			workspace->def_strm.next_in = data_in;
+		}
+	}
+	workspace->def_strm.avail_in = 0;
+	ret = zlib_deflate(&workspace->def_strm, Z_FINISH);
+	zlib_deflateEnd(&workspace->def_strm);
+
+	if (ret != Z_STREAM_END) {
+		ret = -1;
+		goto out;
+	}
+
+	if (workspace->def_strm.total_out >= workspace->def_strm.total_in) {
+		ret = -1;
+		goto out;
+	}
+
+	ret = 0;
+	*total_out = workspace->def_strm.total_out;
+	*total_in = workspace->def_strm.total_in;
+out:
+	*out_pages = nr_pages;
+	if (out_page)
+		kunmap(out_page);
+
+	if (in_page) {
+		kunmap(in_page);
+		page_cache_release(in_page);
+	}
+	free_workspace(workspace);
+	return ret;
+}
+
+/*
+ * pages_in is an array of pages with compressed data.
+ *
+ * disk_start is the starting logical offset of this array in the file
+ *
+ * bvec is a bio_vec of pages from the file that we want to decompress into
+ *
+ * vcnt is the count of pages in the biovec
+ *
+ * srclen is the number of bytes in pages_in
+ *
+ * The basic idea is that we have a bio that was created by readpages.
+ * The pages in the bio are for the uncompressed data, and they may not
+ * be contiguous.  They all correspond to the range of bytes covered by
+ * the compressed extent.
+ */
+int btrfs_zlib_decompress_biovec(struct page **pages_in,
+			      u64 disk_start,
+			      struct bio_vec *bvec,
+			      int vcnt,
+			      size_t srclen)
+{
+	int ret = 0;
+	int wbits = MAX_WBITS;
+	struct workspace *workspace;
+	char *data_in;
+	size_t total_out = 0;
+	unsigned long page_bytes_left;
+	unsigned long page_in_index = 0;
+	unsigned long page_out_index = 0;
+	struct page *page_out;
+	unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) /
+					PAGE_CACHE_SIZE;
+	unsigned long buf_start;
+	unsigned long buf_offset;
+	unsigned long bytes;
+	unsigned long working_bytes;
+	unsigned long pg_offset;
+	unsigned long start_byte;
+	unsigned long current_buf_start;
+	char *kaddr;
+
+	workspace = find_zlib_workspace();
+	if (!workspace)
+		return -ENOMEM;
+
+	data_in = kmap(pages_in[page_in_index]);
+	workspace->inf_strm.next_in = data_in;
+	workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE);
+	workspace->inf_strm.total_in = 0;
+
+	workspace->inf_strm.total_out = 0;
+	workspace->inf_strm.next_out = workspace->buf;
+	workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
+	page_out = bvec[page_out_index].bv_page;
+	page_bytes_left = PAGE_CACHE_SIZE;
+	pg_offset = 0;
+
+	/* If it's deflate, and it's got no preset dictionary, then
+	   we can tell zlib to skip the adler32 check. */
+	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
+	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
+	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
+
+		wbits = -((data_in[0] >> 4) + 8);
+		workspace->inf_strm.next_in += 2;
+		workspace->inf_strm.avail_in -= 2;
+	}
+
+	if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
+		printk(KERN_WARNING "inflateInit failed\n");
+		ret = -1;
+		goto out;
+	}
+	while (workspace->inf_strm.total_in < srclen) {
+		ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH);
+		if (ret != Z_OK && ret != Z_STREAM_END)
+			break;
+		/*
+		 * buf start is the byte offset we're of the start of
+		 * our workspace buffer
+		 */
+		buf_start = total_out;
+
+		/* total_out is the last byte of the workspace buffer */
+		total_out = workspace->inf_strm.total_out;
+
+		working_bytes = total_out - buf_start;
+
+		/*
+		 * start byte is the first byte of the page we're currently
+		 * copying into relative to the start of the compressed data.
+		 */
+		start_byte = page_offset(page_out) - disk_start;
+
+		if (working_bytes == 0) {
+			/* we didn't make progress in this inflate
+			 * call, we're done
+			 */
+			if (ret != Z_STREAM_END)
+				ret = -1;
+			break;
+		}
+
+		/* we haven't yet hit data corresponding to this page */
+		if (total_out <= start_byte)
+			goto next;
+
+		/*
+		 * the start of the data we care about is offset into
+		 * the middle of our working buffer
+		 */
+		if (total_out > start_byte && buf_start < start_byte) {
+			buf_offset = start_byte - buf_start;
+			working_bytes -= buf_offset;
+		} else {
+			buf_offset = 0;
+		}
+		current_buf_start = buf_start;
+
+		/* copy bytes from the working buffer into the pages */
+		while (working_bytes > 0) {
+			bytes = min(PAGE_CACHE_SIZE - pg_offset,
+				    PAGE_CACHE_SIZE - buf_offset);
+			bytes = min(bytes, working_bytes);
+			kaddr = kmap_atomic(page_out, KM_USER0);
+			memcpy(kaddr + pg_offset, workspace->buf + buf_offset,
+			       bytes);
+			kunmap_atomic(kaddr, KM_USER0);
+			flush_dcache_page(page_out);
+
+			pg_offset += bytes;
+			page_bytes_left -= bytes;
+			buf_offset += bytes;
+			working_bytes -= bytes;
+			current_buf_start += bytes;
+
+			/* check if we need to pick another page */
+			if (page_bytes_left == 0) {
+				page_out_index++;
+				if (page_out_index >= vcnt) {
+					ret = 0;
+					goto done;
+				}
+
+				page_out = bvec[page_out_index].bv_page;
+				pg_offset = 0;
+				page_bytes_left = PAGE_CACHE_SIZE;
+				start_byte = page_offset(page_out) - disk_start;
+
+				/*
+				 * make sure our new page is covered by this
+				 * working buffer
+				 */
+				if (total_out <= start_byte)
+					goto next;
+
+				/* the next page in the biovec might not
+				 * be adjacent to the last page, but it
+				 * might still be found inside this working
+				 * buffer.  bump our offset pointer
+				 */
+				if (total_out > start_byte &&
+				    current_buf_start < start_byte) {
+					buf_offset = start_byte - buf_start;
+					working_bytes = total_out - start_byte;
+					current_buf_start = buf_start +
+						buf_offset;
+				}
+			}
+		}
+next:
+		workspace->inf_strm.next_out = workspace->buf;
+		workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
+
+		if (workspace->inf_strm.avail_in == 0) {
+			unsigned long tmp;
+			kunmap(pages_in[page_in_index]);
+			page_in_index++;
+			if (page_in_index >= total_pages_in) {
+				data_in = NULL;
+				break;
+			}
+			data_in = kmap(pages_in[page_in_index]);
+			workspace->inf_strm.next_in = data_in;
+			tmp = srclen - workspace->inf_strm.total_in;
+			workspace->inf_strm.avail_in = min(tmp,
+							   PAGE_CACHE_SIZE);
+		}
+	}
+	if (ret != Z_STREAM_END)
+		ret = -1;
+	else
+		ret = 0;
+done:
+	zlib_inflateEnd(&workspace->inf_strm);
+	if (data_in)
+		kunmap(pages_in[page_in_index]);
+out:
+	free_workspace(workspace);
+	return ret;
+}
+
+/*
+ * a less complex decompression routine.  Our compressed data fits in a
+ * single page, and we want to read a single page out of it.
+ * start_byte tells us the offset into the compressed data we're interested in
+ */
+int btrfs_zlib_decompress(unsigned char *data_in,
+			  struct page *dest_page,
+			  unsigned long start_byte,
+			  size_t srclen, size_t destlen)
+{
+	int ret = 0;
+	int wbits = MAX_WBITS;
+	struct workspace *workspace;
+	unsigned long bytes_left = destlen;
+	unsigned long total_out = 0;
+	char *kaddr;
+
+	if (destlen > PAGE_CACHE_SIZE)
+		return -ENOMEM;
+
+	workspace = find_zlib_workspace();
+	if (!workspace)
+		return -ENOMEM;
+
+	workspace->inf_strm.next_in = data_in;
+	workspace->inf_strm.avail_in = srclen;
+	workspace->inf_strm.total_in = 0;
+
+	workspace->inf_strm.next_out = workspace->buf;
+	workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
+	workspace->inf_strm.total_out = 0;
+	/* If it's deflate, and it's got no preset dictionary, then
+	   we can tell zlib to skip the adler32 check. */
+	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
+	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
+	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
+
+		wbits = -((data_in[0] >> 4) + 8);
+		workspace->inf_strm.next_in += 2;
+		workspace->inf_strm.avail_in -= 2;
+	}
+
+	if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
+		printk(KERN_WARNING "inflateInit failed\n");
+		ret = -1;
+		goto out;
+	}
+
+	while (bytes_left > 0) {
+		unsigned long buf_start;
+		unsigned long buf_offset;
+		unsigned long bytes;
+		unsigned long pg_offset = 0;
+
+		ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH);
+		if (ret != Z_OK && ret != Z_STREAM_END)
+			break;
+
+		buf_start = total_out;
+		total_out = workspace->inf_strm.total_out;
+
+		if (total_out == buf_start) {
+			ret = -1;
+			break;
+		}
+
+		if (total_out <= start_byte)
+			goto next;
+
+		if (total_out > start_byte && buf_start < start_byte)
+			buf_offset = start_byte - buf_start;
+		else
+			buf_offset = 0;
+
+		bytes = min(PAGE_CACHE_SIZE - pg_offset,
+			    PAGE_CACHE_SIZE - buf_offset);
+		bytes = min(bytes, bytes_left);
+
+		kaddr = kmap_atomic(dest_page, KM_USER0);
+		memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
+		kunmap_atomic(kaddr, KM_USER0);
+
+		pg_offset += bytes;
+		bytes_left -= bytes;
+next:
+		workspace->inf_strm.next_out = workspace->buf;
+		workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
+	}
+
+	if (ret != Z_STREAM_END && bytes_left != 0)
+		ret = -1;
+	else
+		ret = 0;
+
+	zlib_inflateEnd(&workspace->inf_strm);
+out:
+	free_workspace(workspace);
+	return ret;
+}
+
+void btrfs_zlib_exit(void)
+{
+    free_workspaces();
+}
diff --git a/fs/buffer.c b/fs/buffer.c
index c26da78..b58208f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -203,10 +203,25 @@
  * happen on bdev until thaw_bdev() is called.
  * If a superblock is found on this device, we take the s_umount semaphore
  * on it to make sure nobody unmounts until the snapshot creation is done.
+ * The reference counter (bd_fsfreeze_count) guarantees that only the last
+ * unfreeze process can unfreeze the frozen filesystem actually when multiple
+ * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
+ * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
+ * actually.
  */
 struct super_block *freeze_bdev(struct block_device *bdev)
 {
 	struct super_block *sb;
+	int error = 0;
+
+	mutex_lock(&bdev->bd_fsfreeze_mutex);
+	if (bdev->bd_fsfreeze_count > 0) {
+		bdev->bd_fsfreeze_count++;
+		sb = get_super(bdev);
+		mutex_unlock(&bdev->bd_fsfreeze_mutex);
+		return sb;
+	}
+	bdev->bd_fsfreeze_count++;
 
 	down(&bdev->bd_mount_sem);
 	sb = get_super(bdev);
@@ -221,11 +236,24 @@
 
 		sync_blockdev(sb->s_bdev);
 
-		if (sb->s_op->write_super_lockfs)
-			sb->s_op->write_super_lockfs(sb);
+		if (sb->s_op->freeze_fs) {
+			error = sb->s_op->freeze_fs(sb);
+			if (error) {
+				printk(KERN_ERR
+					"VFS:Filesystem freeze failed\n");
+				sb->s_frozen = SB_UNFROZEN;
+				drop_super(sb);
+				up(&bdev->bd_mount_sem);
+				bdev->bd_fsfreeze_count--;
+				mutex_unlock(&bdev->bd_fsfreeze_mutex);
+				return ERR_PTR(error);
+			}
+		}
 	}
 
 	sync_blockdev(bdev);
+	mutex_unlock(&bdev->bd_fsfreeze_mutex);
+
 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
 }
 EXPORT_SYMBOL(freeze_bdev);
@@ -237,20 +265,48 @@
  *
  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
  */
-void thaw_bdev(struct block_device *bdev, struct super_block *sb)
+int thaw_bdev(struct block_device *bdev, struct super_block *sb)
 {
+	int error = 0;
+
+	mutex_lock(&bdev->bd_fsfreeze_mutex);
+	if (!bdev->bd_fsfreeze_count) {
+		mutex_unlock(&bdev->bd_fsfreeze_mutex);
+		return -EINVAL;
+	}
+
+	bdev->bd_fsfreeze_count--;
+	if (bdev->bd_fsfreeze_count > 0) {
+		if (sb)
+			drop_super(sb);
+		mutex_unlock(&bdev->bd_fsfreeze_mutex);
+		return 0;
+	}
+
 	if (sb) {
 		BUG_ON(sb->s_bdev != bdev);
-
-		if (sb->s_op->unlockfs)
-			sb->s_op->unlockfs(sb);
-		sb->s_frozen = SB_UNFROZEN;
-		smp_wmb();
-		wake_up(&sb->s_wait_unfrozen);
+		if (!(sb->s_flags & MS_RDONLY)) {
+			if (sb->s_op->unfreeze_fs) {
+				error = sb->s_op->unfreeze_fs(sb);
+				if (error) {
+					printk(KERN_ERR
+						"VFS:Filesystem thaw failed\n");
+					sb->s_frozen = SB_FREEZE_TRANS;
+					bdev->bd_fsfreeze_count++;
+					mutex_unlock(&bdev->bd_fsfreeze_mutex);
+					return error;
+				}
+			}
+			sb->s_frozen = SB_UNFROZEN;
+			smp_wmb();
+			wake_up(&sb->s_wait_unfrozen);
+		}
 		drop_super(sb);
 	}
 
 	up(&bdev->bd_mount_sem);
+	mutex_unlock(&bdev->bd_fsfreeze_mutex);
+	return 0;
 }
 EXPORT_SYMBOL(thaw_bdev);
 
@@ -3187,7 +3243,7 @@
  * Use of bdflush() is deprecated and will be removed in a future kernel.
  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
  */
-asmlinkage long sys_bdflush(int func, long data)
+SYSCALL_DEFINE2(bdflush, int, func, long, data)
 {
 	static int msg_count;
 
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index 81b7771..43c96ce 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -11,7 +11,9 @@
 
 #include "coda_int.h"
 
+#ifdef CONFIG_SYSCTL
 static struct ctl_table_header *fs_table_header;
+#endif
 
 static ctl_table coda_table[] = {
 	{
@@ -41,6 +43,7 @@
 	{}
 };
 
+#ifdef CONFIG_SYSCTL
 static ctl_table fs_table[] = {
 	{
 		.ctl_name	= CTL_UNNUMBERED,
@@ -50,7 +53,7 @@
 	},
 	{}
 };
-
+#endif
 
 void coda_sysctl_init(void)
 {
diff --git a/fs/compat.c b/fs/compat.c
index 30f2faa..65a070e 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1709,7 +1709,7 @@
 }
 
 #ifdef HAVE_SET_RESTORE_SIGMASK
-asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
+static long do_compat_pselect(int n, compat_ulong_t __user *inp,
 	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
 	struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
 	compat_size_t sigsetsize)
@@ -1775,8 +1775,8 @@
 				(compat_size_t __user *)(sig+sizeof(up))))
 			return -EFAULT;
 	}
-	return compat_sys_pselect7(n, inp, outp, exp, tsp, compat_ptr(up),
-					sigsetsize);
+	return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
+				 sigsetsize);
 }
 
 asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
diff --git a/fs/dcache.c b/fs/dcache.c
index e88c23b..937df0f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1567,10 +1567,6 @@
 	spin_unlock(&dcache_lock);
 }
 
-#define do_switch(x,y) do { \
-	__typeof__ (x) __tmp = x; \
-	x = y; y = __tmp; } while (0)
-
 /*
  * When switching names, the actual string doesn't strictly have to
  * be preserved in the target - because we're dropping the target
@@ -1589,7 +1585,7 @@
 			/*
 			 * Both external: swap the pointers
 			 */
-			do_switch(target->d_name.name, dentry->d_name.name);
+			swap(target->d_name.name, dentry->d_name.name);
 		} else {
 			/*
 			 * dentry:internal, target:external.  Steal target's
@@ -1620,7 +1616,7 @@
 			return;
 		}
 	}
-	do_switch(dentry->d_name.len, target->d_name.len);
+	swap(dentry->d_name.len, target->d_name.len);
 }
 
 /*
@@ -1680,7 +1676,7 @@
 
 	/* Switch the names.. */
 	switch_names(dentry, target);
-	do_switch(dentry->d_name.hash, target->d_name.hash);
+	swap(dentry->d_name.hash, target->d_name.hash);
 
 	/* ... and switch the parents */
 	if (IS_ROOT(dentry)) {
@@ -1688,7 +1684,7 @@
 		target->d_parent = target;
 		INIT_LIST_HEAD(&target->d_u.d_child);
 	} else {
-		do_switch(dentry->d_parent, target->d_parent);
+		swap(dentry->d_parent, target->d_parent);
 
 		/* And add them back to the (new) parent lists */
 		list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
@@ -1789,7 +1785,7 @@
 	struct dentry *dparent, *aparent;
 
 	switch_names(dentry, anon);
-	do_switch(dentry->d_name.hash, anon->d_name.hash);
+	swap(dentry->d_name.hash, anon->d_name.hash);
 
 	dparent = dentry->d_parent;
 	aparent = anon->d_parent;
@@ -2096,7 +2092,7 @@
  *		return NULL;
  *	}
  */
-asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
+SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
 {
 	int error;
 	struct path pwd, root;
diff --git a/fs/dcookies.c b/fs/dcookies.c
index 180e9fe..a21cabd 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -145,7 +145,7 @@
 /* And here is where the userspace process can look up the cookie value
  * to retrieve the path.
  */
-asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
+SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
 {
 	unsigned long cookie = (unsigned long)cookie64;
 	int err = -EINVAL;
@@ -198,7 +198,13 @@
 	mutex_unlock(&dcookie_mutex);
 	return err;
 }
-
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_lookup_dcookie(u64 cookie64, long buf, long len)
+{
+	return SYSC_lookup_dcookie(cookie64, (char __user *) buf, (size_t) len);
+}
+SYSCALL_ALIAS(sys_lookup_dcookie, SyS_lookup_dcookie);
+#endif
 
 static int dcookie_init(void)
 {
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 159a5ef..33a9012 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -294,6 +294,38 @@
 }
 EXPORT_SYMBOL_GPL(debugfs_create_x32);
 
+
+static int debugfs_size_t_set(void *data, u64 val)
+{
+	*(size_t *)data = val;
+	return 0;
+}
+static int debugfs_size_t_get(void *data, u64 *val)
+{
+	*val = *(size_t *)data;
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_size_t, debugfs_size_t_get, debugfs_size_t_set,
+			"%llu\n");	/* %llu and %zu are more or less the same */
+
+/**
+ * debugfs_create_size_t - create a debugfs file that is used to read and write an size_t value
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this parameter is %NULL, then the
+ *          file will be created in the root of the debugfs filesystem.
+ * @value: a pointer to the variable that the file should read to and write
+ *         from.
+ */
+struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+				     struct dentry *parent, size_t *value)
+{
+	return debugfs_create_file(name, mode, parent, value, &fops_size_t);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_size_t);
+
+
 static ssize_t read_file_bool(struct file *file, char __user *user_buf,
 			      size_t count, loff_t *ppos)
 {
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 2f107d1..1d1d274 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -1,7 +1,7 @@
 /******************************************************************************
 *******************************************************************************
 **
-**  Copyright (C) 2005-2008 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2005-2009 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -25,19 +25,6 @@
 
 static struct dentry *dlm_root;
 
-struct rsb_iter {
-	int entry;
-	int format;
-	int header;
-	struct dlm_ls *ls;
-	struct list_head *next;
-	struct dlm_rsb *rsb;
-};
-
-/*
- * dump all rsb's in the lockspace hash table
- */
-
 static char *print_lockmode(int mode)
 {
 	switch (mode) {
@@ -60,13 +47,13 @@
 	}
 }
 
-static void print_format1_lock(struct seq_file *s, struct dlm_lkb *lkb,
-			       struct dlm_rsb *res)
+static int print_format1_lock(struct seq_file *s, struct dlm_lkb *lkb,
+			      struct dlm_rsb *res)
 {
 	seq_printf(s, "%08x %s", lkb->lkb_id, print_lockmode(lkb->lkb_grmode));
 
-	if (lkb->lkb_status == DLM_LKSTS_CONVERT
-	    || lkb->lkb_status == DLM_LKSTS_WAITING)
+	if (lkb->lkb_status == DLM_LKSTS_CONVERT ||
+	    lkb->lkb_status == DLM_LKSTS_WAITING)
 		seq_printf(s, " (%s)", print_lockmode(lkb->lkb_rqmode));
 
 	if (lkb->lkb_nodeid) {
@@ -80,33 +67,42 @@
 	if (lkb->lkb_wait_type)
 		seq_printf(s, " wait_type: %d", lkb->lkb_wait_type);
 
-	seq_printf(s, "\n");
+	return seq_printf(s, "\n");
 }
 
 static int print_format1(struct dlm_rsb *res, struct seq_file *s)
 {
 	struct dlm_lkb *lkb;
 	int i, lvblen = res->res_ls->ls_lvblen, recover_list, root_list;
+	int rv;
 
 	lock_rsb(res);
 
-	seq_printf(s, "\nResource %p Name (len=%d) \"", res, res->res_length);
+	rv = seq_printf(s, "\nResource %p Name (len=%d) \"",
+			res, res->res_length);
+	if (rv)
+		goto out;
+
 	for (i = 0; i < res->res_length; i++) {
 		if (isprint(res->res_name[i]))
 			seq_printf(s, "%c", res->res_name[i]);
 		else
 			seq_printf(s, "%c", '.');
 	}
+
 	if (res->res_nodeid > 0)
-		seq_printf(s, "\"  \nLocal Copy, Master is node %d\n",
-			   res->res_nodeid);
+		rv = seq_printf(s, "\"  \nLocal Copy, Master is node %d\n",
+				res->res_nodeid);
 	else if (res->res_nodeid == 0)
-		seq_printf(s, "\"  \nMaster Copy\n");
+		rv = seq_printf(s, "\"  \nMaster Copy\n");
 	else if (res->res_nodeid == -1)
-		seq_printf(s, "\"  \nLooking up master (lkid %x)\n",
-			   res->res_first_lkid);
+		rv = seq_printf(s, "\"  \nLooking up master (lkid %x)\n",
+			   	res->res_first_lkid);
 	else
-		seq_printf(s, "\"  \nInvalid master %d\n", res->res_nodeid);
+		rv = seq_printf(s, "\"  \nInvalid master %d\n",
+				res->res_nodeid);
+	if (rv)
+		goto out;
 
 	/* Print the LVB: */
 	if (res->res_lvbptr) {
@@ -119,52 +115,66 @@
 		}
 		if (rsb_flag(res, RSB_VALNOTVALID))
 			seq_printf(s, " (INVALID)");
-		seq_printf(s, "\n");
+		rv = seq_printf(s, "\n");
+		if (rv)
+			goto out;
 	}
 
 	root_list = !list_empty(&res->res_root_list);
 	recover_list = !list_empty(&res->res_recover_list);
 
 	if (root_list || recover_list) {
-		seq_printf(s, "Recovery: root %d recover %d flags %lx "
-			   "count %d\n", root_list, recover_list,
-			   res->res_flags, res->res_recover_locks_count);
+		rv = seq_printf(s, "Recovery: root %d recover %d flags %lx "
+				"count %d\n", root_list, recover_list,
+			   	res->res_flags, res->res_recover_locks_count);
+		if (rv)
+			goto out;
 	}
 
 	/* Print the locks attached to this resource */
 	seq_printf(s, "Granted Queue\n");
-	list_for_each_entry(lkb, &res->res_grantqueue, lkb_statequeue)
-		print_format1_lock(s, lkb, res);
+	list_for_each_entry(lkb, &res->res_grantqueue, lkb_statequeue) {
+		rv = print_format1_lock(s, lkb, res);
+		if (rv)
+			goto out;
+	}
 
 	seq_printf(s, "Conversion Queue\n");
-	list_for_each_entry(lkb, &res->res_convertqueue, lkb_statequeue)
-		print_format1_lock(s, lkb, res);
+	list_for_each_entry(lkb, &res->res_convertqueue, lkb_statequeue) {
+		rv = print_format1_lock(s, lkb, res);
+		if (rv)
+			goto out;
+	}
 
 	seq_printf(s, "Waiting Queue\n");
-	list_for_each_entry(lkb, &res->res_waitqueue, lkb_statequeue)
-		print_format1_lock(s, lkb, res);
+	list_for_each_entry(lkb, &res->res_waitqueue, lkb_statequeue) {
+		rv = print_format1_lock(s, lkb, res);
+		if (rv)
+			goto out;
+	}
 
 	if (list_empty(&res->res_lookup))
 		goto out;
 
 	seq_printf(s, "Lookup Queue\n");
 	list_for_each_entry(lkb, &res->res_lookup, lkb_rsb_lookup) {
-		seq_printf(s, "%08x %s", lkb->lkb_id,
-			   print_lockmode(lkb->lkb_rqmode));
+		rv = seq_printf(s, "%08x %s", lkb->lkb_id,
+				print_lockmode(lkb->lkb_rqmode));
 		if (lkb->lkb_wait_type)
 			seq_printf(s, " wait_type: %d", lkb->lkb_wait_type);
-		seq_printf(s, "\n");
+		rv = seq_printf(s, "\n");
 	}
  out:
 	unlock_rsb(res);
-	return 0;
+	return rv;
 }
 
-static void print_format2_lock(struct seq_file *s, struct dlm_lkb *lkb,
-			       struct dlm_rsb *r)
+static int print_format2_lock(struct seq_file *s, struct dlm_lkb *lkb,
+			      struct dlm_rsb *r)
 {
 	u64 xid = 0;
 	u64 us;
+	int rv;
 
 	if (lkb->lkb_flags & DLM_IFL_USER) {
 		if (lkb->lkb_ua)
@@ -177,69 +187,82 @@
 	/* id nodeid remid pid xid exflags flags sts grmode rqmode time_us
 	   r_nodeid r_len r_name */
 
-	seq_printf(s, "%x %d %x %u %llu %x %x %d %d %d %llu %u %d \"%s\"\n",
-		   lkb->lkb_id,
-		   lkb->lkb_nodeid,
-		   lkb->lkb_remid,
-		   lkb->lkb_ownpid,
-		   (unsigned long long)xid,
-		   lkb->lkb_exflags,
-		   lkb->lkb_flags,
-		   lkb->lkb_status,
-		   lkb->lkb_grmode,
-		   lkb->lkb_rqmode,
-		   (unsigned long long)us,
-		   r->res_nodeid,
-		   r->res_length,
-		   r->res_name);
+	rv = seq_printf(s, "%x %d %x %u %llu %x %x %d %d %d %llu %u %d \"%s\"\n",
+			lkb->lkb_id,
+			lkb->lkb_nodeid,
+			lkb->lkb_remid,
+			lkb->lkb_ownpid,
+			(unsigned long long)xid,
+			lkb->lkb_exflags,
+			lkb->lkb_flags,
+			lkb->lkb_status,
+			lkb->lkb_grmode,
+			lkb->lkb_rqmode,
+			(unsigned long long)us,
+			r->res_nodeid,
+			r->res_length,
+			r->res_name);
+	return rv;
 }
 
 static int print_format2(struct dlm_rsb *r, struct seq_file *s)
 {
 	struct dlm_lkb *lkb;
+	int rv = 0;
 
 	lock_rsb(r);
 
-	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
-		print_format2_lock(s, lkb, r);
+	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
+		rv = print_format2_lock(s, lkb, r);
+		if (rv)
+			goto out;
+	}
 
-	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
-		print_format2_lock(s, lkb, r);
+	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
+		rv = print_format2_lock(s, lkb, r);
+		if (rv)
+			goto out;
+	}
 
-	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
-		print_format2_lock(s, lkb, r);
-
+	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) {
+		rv = print_format2_lock(s, lkb, r);
+		if (rv)
+			goto out;
+	}
+ out:
 	unlock_rsb(r);
-	return 0;
+	return rv;
 }
 
-static void print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb,
-			       int rsb_lookup)
+static int print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb,
+			      int rsb_lookup)
 {
 	u64 xid = 0;
+	int rv;
 
 	if (lkb->lkb_flags & DLM_IFL_USER) {
 		if (lkb->lkb_ua)
 			xid = lkb->lkb_ua->xid;
 	}
 
-	seq_printf(s, "lkb %x %d %x %u %llu %x %x %d %d %d %d %d %d %u %llu %llu\n",
-		   lkb->lkb_id,
-		   lkb->lkb_nodeid,
-		   lkb->lkb_remid,
-		   lkb->lkb_ownpid,
-		   (unsigned long long)xid,
-		   lkb->lkb_exflags,
-		   lkb->lkb_flags,
-		   lkb->lkb_status,
-		   lkb->lkb_grmode,
-		   lkb->lkb_rqmode,
-		   lkb->lkb_highbast,
-		   rsb_lookup,
-		   lkb->lkb_wait_type,
-		   lkb->lkb_lvbseq,
-		   (unsigned long long)ktime_to_ns(lkb->lkb_timestamp),
-		   (unsigned long long)ktime_to_ns(lkb->lkb_time_bast));
+	rv = seq_printf(s, "lkb %x %d %x %u %llu %x %x %d %d %d %d %d %d %u %llu %llu\n",
+			lkb->lkb_id,
+			lkb->lkb_nodeid,
+			lkb->lkb_remid,
+			lkb->lkb_ownpid,
+			(unsigned long long)xid,
+			lkb->lkb_exflags,
+			lkb->lkb_flags,
+			lkb->lkb_status,
+			lkb->lkb_grmode,
+			lkb->lkb_rqmode,
+			lkb->lkb_highbast,
+			rsb_lookup,
+			lkb->lkb_wait_type,
+			lkb->lkb_lvbseq,
+			(unsigned long long)ktime_to_ns(lkb->lkb_timestamp),
+			(unsigned long long)ktime_to_ns(lkb->lkb_time_bast));
+	return rv;
 }
 
 static int print_format3(struct dlm_rsb *r, struct seq_file *s)
@@ -247,18 +270,21 @@
 	struct dlm_lkb *lkb;
 	int i, lvblen = r->res_ls->ls_lvblen;
 	int print_name = 1;
+	int rv;
 
 	lock_rsb(r);
 
-	seq_printf(s, "rsb %p %d %x %lx %d %d %u %d ",
-		   r,
-		   r->res_nodeid,
-		   r->res_first_lkid,
-		   r->res_flags,
-		   !list_empty(&r->res_root_list),
-		   !list_empty(&r->res_recover_list),
-		   r->res_recover_locks_count,
-		   r->res_length);
+	rv = seq_printf(s, "rsb %p %d %x %lx %d %d %u %d ",
+			r,
+			r->res_nodeid,
+			r->res_first_lkid,
+			r->res_flags,
+			!list_empty(&r->res_root_list),
+			!list_empty(&r->res_recover_list),
+			r->res_recover_locks_count,
+			r->res_length);
+	if (rv)
+		goto out;
 
 	for (i = 0; i < r->res_length; i++) {
 		if (!isascii(r->res_name[i]) || !isprint(r->res_name[i]))
@@ -273,7 +299,9 @@
 		else
 			seq_printf(s, " %02x", (unsigned char)r->res_name[i]);
 	}
-	seq_printf(s, "\n");
+	rv = seq_printf(s, "\n");
+	if (rv)
+		goto out;
 
 	if (!r->res_lvbptr)
 		goto do_locks;
@@ -282,344 +310,294 @@
 
 	for (i = 0; i < lvblen; i++)
 		seq_printf(s, " %02x", (unsigned char)r->res_lvbptr[i]);
-	seq_printf(s, "\n");
+	rv = seq_printf(s, "\n");
+	if (rv)
+		goto out;
 
  do_locks:
-	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
-		print_format3_lock(s, lkb, 0);
+	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
+		rv = print_format3_lock(s, lkb, 0);
+		if (rv)
+			goto out;
+	}
 
-	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
-		print_format3_lock(s, lkb, 0);
+	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
+		rv = print_format3_lock(s, lkb, 0);
+		if (rv)
+			goto out;
+	}
 
-	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
-		print_format3_lock(s, lkb, 0);
+	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) {
+		rv = print_format3_lock(s, lkb, 0);
+		if (rv)
+			goto out;
+	}
 
-	list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
-		print_format3_lock(s, lkb, 1);
-
+	list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup) {
+		rv = print_format3_lock(s, lkb, 1);
+		if (rv)
+			goto out;
+	}
+ out:
 	unlock_rsb(r);
-	return 0;
+	return rv;
 }
 
-static int rsb_iter_next(struct rsb_iter *ri)
+struct rsbtbl_iter {
+	struct dlm_rsb *rsb;
+	unsigned bucket;
+	int format;
+	int header;
+};
+
+/* seq_printf returns -1 if the buffer is full, and 0 otherwise.
+   If the buffer is full, seq_printf can be called again, but it
+   does nothing and just returns -1.  So, the these printing routines
+   periodically check the return value to avoid wasting too much time
+   trying to print to a full buffer. */
+
+static int table_seq_show(struct seq_file *seq, void *iter_ptr)
 {
-	struct dlm_ls *ls = ri->ls;
-	int i;
-
-	if (!ri->next) {
- top:
-		/* Find the next non-empty hash bucket */
-		for (i = ri->entry; i < ls->ls_rsbtbl_size; i++) {
-			read_lock(&ls->ls_rsbtbl[i].lock);
-			if (!list_empty(&ls->ls_rsbtbl[i].list)) {
-				ri->next = ls->ls_rsbtbl[i].list.next;
-				ri->rsb = list_entry(ri->next, struct dlm_rsb,
-							res_hashchain);
-				dlm_hold_rsb(ri->rsb);
-				read_unlock(&ls->ls_rsbtbl[i].lock);
-				break;
-			}
-			read_unlock(&ls->ls_rsbtbl[i].lock);
-		}
-		ri->entry = i;
-
-		if (ri->entry >= ls->ls_rsbtbl_size)
-			return 1;
-	} else {
-		struct dlm_rsb *old = ri->rsb;
-		i = ri->entry;
-		read_lock(&ls->ls_rsbtbl[i].lock);
-		ri->next = ri->next->next;
-		if (ri->next->next == ls->ls_rsbtbl[i].list.next) {
-			/* End of list - move to next bucket */
-			ri->next = NULL;
-			ri->entry++;
-			read_unlock(&ls->ls_rsbtbl[i].lock);
-			dlm_put_rsb(old);
-			goto top;
-		}
-		ri->rsb = list_entry(ri->next, struct dlm_rsb, res_hashchain);
-		dlm_hold_rsb(ri->rsb);
-		read_unlock(&ls->ls_rsbtbl[i].lock);
-		dlm_put_rsb(old);
-	}
-
-	return 0;
-}
-
-static void rsb_iter_free(struct rsb_iter *ri)
-{
-	kfree(ri);
-}
-
-static struct rsb_iter *rsb_iter_init(struct dlm_ls *ls)
-{
-	struct rsb_iter *ri;
-
-	ri = kzalloc(sizeof *ri, GFP_KERNEL);
-	if (!ri)
-		return NULL;
-
-	ri->ls = ls;
-	ri->entry = 0;
-	ri->next = NULL;
-	ri->format = 1;
-
-	if (rsb_iter_next(ri)) {
-		rsb_iter_free(ri);
-		return NULL;
-	}
-
-	return ri;
-}
-
-static void *rsb_seq_start(struct seq_file *file, loff_t *pos)
-{
-	struct rsb_iter *ri;
-	loff_t n = *pos;
-
-	ri = rsb_iter_init(file->private);
-	if (!ri)
-		return NULL;
-
-	while (n--) {
-		if (rsb_iter_next(ri)) {
-			rsb_iter_free(ri);
-			return NULL;
-		}
-	}
-
-	return ri;
-}
-
-static void *rsb_seq_next(struct seq_file *file, void *iter_ptr, loff_t *pos)
-{
-	struct rsb_iter *ri = iter_ptr;
-
-	(*pos)++;
-
-	if (rsb_iter_next(ri)) {
-		rsb_iter_free(ri);
-		return NULL;
-	}
-
-	return ri;
-}
-
-static void rsb_seq_stop(struct seq_file *file, void *iter_ptr)
-{
-	/* nothing for now */
-}
-
-static int rsb_seq_show(struct seq_file *file, void *iter_ptr)
-{
-	struct rsb_iter *ri = iter_ptr;
+	struct rsbtbl_iter *ri = iter_ptr;
+	int rv = 0;
 
 	switch (ri->format) {
 	case 1:
-		print_format1(ri->rsb, file);
+		rv = print_format1(ri->rsb, seq);
 		break;
 	case 2:
 		if (ri->header) {
-			seq_printf(file, "id nodeid remid pid xid exflags "
-					 "flags sts grmode rqmode time_ms "
-					 "r_nodeid r_len r_name\n");
+			seq_printf(seq, "id nodeid remid pid xid exflags "
+					"flags sts grmode rqmode time_ms "
+					"r_nodeid r_len r_name\n");
 			ri->header = 0;
 		}
-		print_format2(ri->rsb, file);
+		rv = print_format2(ri->rsb, seq);
 		break;
 	case 3:
 		if (ri->header) {
-			seq_printf(file, "version rsb 1.1 lvb 1.1 lkb 1.1\n");
+			seq_printf(seq, "version rsb 1.1 lvb 1.1 lkb 1.1\n");
 			ri->header = 0;
 		}
-		print_format3(ri->rsb, file);
+		rv = print_format3(ri->rsb, seq);
 		break;
 	}
 
-	return 0;
+	return rv;
 }
 
-static struct seq_operations rsb_seq_ops = {
-	.start = rsb_seq_start,
-	.next  = rsb_seq_next,
-	.stop  = rsb_seq_stop,
-	.show  = rsb_seq_show,
+static struct seq_operations format1_seq_ops;
+static struct seq_operations format2_seq_ops;
+static struct seq_operations format3_seq_ops;
+
+static void *table_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct dlm_ls *ls = seq->private;
+	struct rsbtbl_iter *ri;
+	struct dlm_rsb *r;
+	loff_t n = *pos;
+	unsigned bucket, entry;
+
+	bucket = n >> 32;
+	entry = n & ((1LL << 32) - 1);
+
+	if (bucket >= ls->ls_rsbtbl_size)
+		return NULL;
+
+	ri = kzalloc(sizeof(struct rsbtbl_iter), GFP_KERNEL);
+	if (!ri)
+		return NULL;
+	if (n == 0)
+		ri->header = 1;
+	if (seq->op == &format1_seq_ops)
+		ri->format = 1;
+	if (seq->op == &format2_seq_ops)
+		ri->format = 2;
+	if (seq->op == &format3_seq_ops)
+		ri->format = 3;
+
+	spin_lock(&ls->ls_rsbtbl[bucket].lock);
+	if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
+		list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list,
+				    res_hashchain) {
+			if (!entry--) {
+				dlm_hold_rsb(r);
+				ri->rsb = r;
+				ri->bucket = bucket;
+				spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+				return ri;
+			}
+		}
+	}
+	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+
+	/*
+	 * move to the first rsb in the next non-empty bucket
+	 */
+
+	/* zero the entry */
+	n &= ~((1LL << 32) - 1);
+
+	while (1) {
+		bucket++;
+		n += 1LL << 32;
+
+		if (bucket >= ls->ls_rsbtbl_size) {
+			kfree(ri);
+			return NULL;
+		}
+
+		spin_lock(&ls->ls_rsbtbl[bucket].lock);
+		if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
+			r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
+					     struct dlm_rsb, res_hashchain);
+			dlm_hold_rsb(r);
+			ri->rsb = r;
+			ri->bucket = bucket;
+			spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+			*pos = n;
+			return ri;
+		}
+		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+	}
+}
+
+static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
+{
+	struct dlm_ls *ls = seq->private;
+	struct rsbtbl_iter *ri = iter_ptr;
+	struct list_head *next;
+	struct dlm_rsb *r, *rp;
+	loff_t n = *pos;
+	unsigned bucket;
+
+	bucket = n >> 32;
+
+	/*
+	 * move to the next rsb in the same bucket
+	 */
+
+	spin_lock(&ls->ls_rsbtbl[bucket].lock);
+	rp = ri->rsb;
+	next = rp->res_hashchain.next;
+
+	if (next != &ls->ls_rsbtbl[bucket].list) {
+		r = list_entry(next, struct dlm_rsb, res_hashchain);
+		dlm_hold_rsb(r);
+		ri->rsb = r;
+		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+		dlm_put_rsb(rp);
+		++*pos;
+		return ri;
+	}
+	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+	dlm_put_rsb(rp);
+
+	/*
+	 * move to the first rsb in the next non-empty bucket
+	 */
+
+	/* zero the entry */
+	n &= ~((1LL << 32) - 1);
+
+	while (1) {
+		bucket++;
+		n += 1LL << 32;
+
+		if (bucket >= ls->ls_rsbtbl_size) {
+			kfree(ri);
+			return NULL;
+		}
+
+		spin_lock(&ls->ls_rsbtbl[bucket].lock);
+		if (!list_empty(&ls->ls_rsbtbl[bucket].list)) {
+			r = list_first_entry(&ls->ls_rsbtbl[bucket].list,
+					     struct dlm_rsb, res_hashchain);
+			dlm_hold_rsb(r);
+			ri->rsb = r;
+			ri->bucket = bucket;
+			spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+			*pos = n;
+			return ri;
+		}
+		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+	}
+}
+
+static void table_seq_stop(struct seq_file *seq, void *iter_ptr)
+{
+	struct rsbtbl_iter *ri = iter_ptr;
+
+	if (ri) {
+		dlm_put_rsb(ri->rsb);
+		kfree(ri);
+	}
+}
+
+static struct seq_operations format1_seq_ops = {
+	.start = table_seq_start,
+	.next  = table_seq_next,
+	.stop  = table_seq_stop,
+	.show  = table_seq_show,
 };
 
-static int rsb_open(struct inode *inode, struct file *file)
+static struct seq_operations format2_seq_ops = {
+	.start = table_seq_start,
+	.next  = table_seq_next,
+	.stop  = table_seq_stop,
+	.show  = table_seq_show,
+};
+
+static struct seq_operations format3_seq_ops = {
+	.start = table_seq_start,
+	.next  = table_seq_next,
+	.stop  = table_seq_stop,
+	.show  = table_seq_show,
+};
+
+static const struct file_operations format1_fops;
+static const struct file_operations format2_fops;
+static const struct file_operations format3_fops;
+
+static int table_open(struct inode *inode, struct file *file)
 {
 	struct seq_file *seq;
-	int ret;
+	int ret = -1;
 
-	ret = seq_open(file, &rsb_seq_ops);
+	if (file->f_op == &format1_fops)
+		ret = seq_open(file, &format1_seq_ops);
+	else if (file->f_op == &format2_fops)
+		ret = seq_open(file, &format2_seq_ops);
+	else if (file->f_op == &format3_fops)
+		ret = seq_open(file, &format3_seq_ops);
+
 	if (ret)
 		return ret;
 
 	seq = file->private_data;
-	seq->private = inode->i_private;
-
+	seq->private = inode->i_private; /* the dlm_ls */
 	return 0;
 }
 
-static const struct file_operations rsb_fops = {
+static const struct file_operations format1_fops = {
 	.owner   = THIS_MODULE,
-	.open    = rsb_open,
+	.open    = table_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
 	.release = seq_release
 };
 
-/*
- * Dump state in compact per-lock listing
- */
-
-static struct rsb_iter *locks_iter_init(struct dlm_ls *ls, loff_t *pos)
-{
-	struct rsb_iter *ri;
-
-	ri = kzalloc(sizeof *ri, GFP_KERNEL);
-	if (!ri)
-		return NULL;
-
-	ri->ls = ls;
-	ri->entry = 0;
-	ri->next = NULL;
-	ri->format = 2;
-
-	if (*pos == 0)
-		ri->header = 1;
-
-	if (rsb_iter_next(ri)) {
-		rsb_iter_free(ri);
-		return NULL;
-	}
-
-	return ri;
-}
-
-static void *locks_seq_start(struct seq_file *file, loff_t *pos)
-{
-	struct rsb_iter *ri;
-	loff_t n = *pos;
-
-	ri = locks_iter_init(file->private, pos);
-	if (!ri)
-		return NULL;
-
-	while (n--) {
-		if (rsb_iter_next(ri)) {
-			rsb_iter_free(ri);
-			return NULL;
-		}
-	}
-
-	return ri;
-}
-
-static struct seq_operations locks_seq_ops = {
-	.start = locks_seq_start,
-	.next  = rsb_seq_next,
-	.stop  = rsb_seq_stop,
-	.show  = rsb_seq_show,
-};
-
-static int locks_open(struct inode *inode, struct file *file)
-{
-	struct seq_file *seq;
-	int ret;
-
-	ret = seq_open(file, &locks_seq_ops);
-	if (ret)
-		return ret;
-
-	seq = file->private_data;
-	seq->private = inode->i_private;
-
-	return 0;
-}
-
-static const struct file_operations locks_fops = {
+static const struct file_operations format2_fops = {
 	.owner   = THIS_MODULE,
-	.open    = locks_open,
+	.open    = table_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
 	.release = seq_release
 };
 
-/*
- * Dump all rsb/lvb/lkb state in compact listing, more complete than _locks
- * This can replace both formats 1 and 2 eventually.
- */
-
-static struct rsb_iter *all_iter_init(struct dlm_ls *ls, loff_t *pos)
-{
-	struct rsb_iter *ri;
-
-	ri = kzalloc(sizeof *ri, GFP_KERNEL);
-	if (!ri)
-		return NULL;
-
-	ri->ls = ls;
-	ri->entry = 0;
-	ri->next = NULL;
-	ri->format = 3;
-
-	if (*pos == 0)
-		ri->header = 1;
-
-	if (rsb_iter_next(ri)) {
-		rsb_iter_free(ri);
-		return NULL;
-	}
-
-	return ri;
-}
-
-static void *all_seq_start(struct seq_file *file, loff_t *pos)
-{
-	struct rsb_iter *ri;
-	loff_t n = *pos;
-
-	ri = all_iter_init(file->private, pos);
-	if (!ri)
-		return NULL;
-
-	while (n--) {
-		if (rsb_iter_next(ri)) {
-			rsb_iter_free(ri);
-			return NULL;
-		}
-	}
-
-	return ri;
-}
-
-static struct seq_operations all_seq_ops = {
-	.start = all_seq_start,
-	.next  = rsb_seq_next,
-	.stop  = rsb_seq_stop,
-	.show  = rsb_seq_show,
-};
-
-static int all_open(struct inode *inode, struct file *file)
-{
-	struct seq_file *seq;
-	int ret;
-
-	ret = seq_open(file, &all_seq_ops);
-	if (ret)
-		return ret;
-
-	seq = file->private_data;
-	seq->private = inode->i_private;
-
-	return 0;
-}
-
-static const struct file_operations all_fops = {
+static const struct file_operations format3_fops = {
 	.owner   = THIS_MODULE,
-	.open    = all_open,
+	.open    = table_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
 	.release = seq_release
@@ -689,7 +667,7 @@
 						      S_IFREG | S_IRUGO,
 						      dlm_root,
 						      ls,
-						      &rsb_fops);
+						      &format1_fops);
 	if (!ls->ls_debug_rsb_dentry)
 		goto fail;
 
@@ -702,7 +680,7 @@
 							S_IFREG | S_IRUGO,
 							dlm_root,
 							ls,
-							&locks_fops);
+							&format2_fops);
 	if (!ls->ls_debug_locks_dentry)
 		goto fail;
 
@@ -715,7 +693,7 @@
 						      S_IFREG | S_IRUGO,
 						      dlm_root,
 						      ls,
-						      &all_fops);
+						      &format3_fops);
 	if (!ls->ls_debug_all_dentry)
 		goto fail;
 
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index ef2f1e3..076e86f 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -105,7 +105,7 @@
 struct dlm_rsbtable {
 	struct list_head	list;
 	struct list_head	toss;
-	rwlock_t		lock;
+	spinlock_t		lock;
 };
 
 struct dlm_lkbtable {
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 6cfe65b..01e7d39 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -412,9 +412,9 @@
 		      unsigned int flags, struct dlm_rsb **r_ret)
 {
 	int error;
-	write_lock(&ls->ls_rsbtbl[b].lock);
+	spin_lock(&ls->ls_rsbtbl[b].lock);
 	error = _search_rsb(ls, name, len, b, flags, r_ret);
-	write_unlock(&ls->ls_rsbtbl[b].lock);
+	spin_unlock(&ls->ls_rsbtbl[b].lock);
 	return error;
 }
 
@@ -478,16 +478,16 @@
 		r->res_nodeid = nodeid;
 	}
 
-	write_lock(&ls->ls_rsbtbl[bucket].lock);
+	spin_lock(&ls->ls_rsbtbl[bucket].lock);
 	error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
 	if (!error) {
-		write_unlock(&ls->ls_rsbtbl[bucket].lock);
+		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 		dlm_free_rsb(r);
 		r = tmp;
 		goto out;
 	}
 	list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
-	write_unlock(&ls->ls_rsbtbl[bucket].lock);
+	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 	error = 0;
  out:
 	*r_ret = r;
@@ -530,9 +530,9 @@
 	struct dlm_ls *ls = r->res_ls;
 	uint32_t bucket = r->res_bucket;
 
-	write_lock(&ls->ls_rsbtbl[bucket].lock);
+	spin_lock(&ls->ls_rsbtbl[bucket].lock);
 	kref_put(&r->res_ref, toss_rsb);
-	write_unlock(&ls->ls_rsbtbl[bucket].lock);
+	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 }
 
 void dlm_put_rsb(struct dlm_rsb *r)
@@ -967,7 +967,7 @@
 
 	for (;;) {
 		found = 0;
-		write_lock(&ls->ls_rsbtbl[b].lock);
+		spin_lock(&ls->ls_rsbtbl[b].lock);
 		list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
 					    res_hashchain) {
 			if (!time_after_eq(jiffies, r->res_toss_time +
@@ -978,20 +978,20 @@
 		}
 
 		if (!found) {
-			write_unlock(&ls->ls_rsbtbl[b].lock);
+			spin_unlock(&ls->ls_rsbtbl[b].lock);
 			break;
 		}
 
 		if (kref_put(&r->res_ref, kill_rsb)) {
 			list_del(&r->res_hashchain);
-			write_unlock(&ls->ls_rsbtbl[b].lock);
+			spin_unlock(&ls->ls_rsbtbl[b].lock);
 
 			if (is_master(r))
 				dir_remove(r);
 			dlm_free_rsb(r);
 			count++;
 		} else {
-			write_unlock(&ls->ls_rsbtbl[b].lock);
+			spin_unlock(&ls->ls_rsbtbl[b].lock);
 			log_error(ls, "tossed rsb in use %s", r->res_name);
 		}
 	}
@@ -4224,7 +4224,7 @@
 {
 	struct dlm_rsb *r, *r_ret = NULL;
 
-	read_lock(&ls->ls_rsbtbl[bucket].lock);
+	spin_lock(&ls->ls_rsbtbl[bucket].lock);
 	list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
 		if (!rsb_flag(r, RSB_LOCKS_PURGED))
 			continue;
@@ -4233,7 +4233,7 @@
 		r_ret = r;
 		break;
 	}
-	read_unlock(&ls->ls_rsbtbl[bucket].lock);
+	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
 	return r_ret;
 }
 
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 8d86b79..aa32e5f 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -464,7 +464,7 @@
 	for (i = 0; i < size; i++) {
 		INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
 		INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
-		rwlock_init(&ls->ls_rsbtbl[i].lock);
+		spin_lock_init(&ls->ls_rsbtbl[i].lock);
 	}
 
 	size = dlm_config.ci_lkbtbl_size;
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index 80aba5b..eda43f3 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -726,7 +726,7 @@
 	}
 
 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
-		read_lock(&ls->ls_rsbtbl[i].lock);
+		spin_lock(&ls->ls_rsbtbl[i].lock);
 		list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
 			list_add(&r->res_root_list, &ls->ls_root_list);
 			dlm_hold_rsb(r);
@@ -737,7 +737,7 @@
 		   but no other recovery steps should do anything with them. */
 
 		if (dlm_no_directory(ls)) {
-			read_unlock(&ls->ls_rsbtbl[i].lock);
+			spin_unlock(&ls->ls_rsbtbl[i].lock);
 			continue;
 		}
 
@@ -745,7 +745,7 @@
 			list_add(&r->res_root_list, &ls->ls_root_list);
 			dlm_hold_rsb(r);
 		}
-		read_unlock(&ls->ls_rsbtbl[i].lock);
+		spin_unlock(&ls->ls_rsbtbl[i].lock);
 	}
  out:
 	up_write(&ls->ls_root_sem);
@@ -775,7 +775,7 @@
 	int i;
 
 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
-		write_lock(&ls->ls_rsbtbl[i].lock);
+		spin_lock(&ls->ls_rsbtbl[i].lock);
 		list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
 					 res_hashchain) {
 			if (dlm_no_directory(ls) || !is_master(r)) {
@@ -783,7 +783,7 @@
 				dlm_free_rsb(r);
 			}
 		}
-		write_unlock(&ls->ls_rsbtbl[i].lock);
+		spin_unlock(&ls->ls_rsbtbl[i].lock);
 	}
 }
 
diff --git a/fs/dquot.c b/fs/dquot.c
index 61bfff6..48c0571 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -2090,10 +2090,12 @@
 	}
 	if (di->dqb_valid & QIF_BTIME) {
 		dm->dqb_btime = di->dqb_btime;
+		check_blim = 1;
 		__set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
 	}
 	if (di->dqb_valid & QIF_ITIME) {
 		dm->dqb_itime = di->dqb_itime;
+		check_ilim = 1;
 		__set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
 	}
 
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 08bf558..5de2c2d 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -198,7 +198,7 @@
 	return file;
 }
 
-asmlinkage long sys_eventfd2(unsigned int count, int flags)
+SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
 {
 	int fd;
 	struct eventfd_ctx *ctx;
@@ -228,8 +228,7 @@
 	return fd;
 }
 
-asmlinkage long sys_eventfd(unsigned int count)
+SYSCALL_DEFINE1(eventfd, unsigned int, count)
 {
 	return sys_eventfd2(count, 0);
 }
-
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 96355d5..ba2f9ec 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1110,7 +1110,7 @@
 /*
  * Open an eventpoll file descriptor.
  */
-asmlinkage long sys_epoll_create1(int flags)
+SYSCALL_DEFINE1(epoll_create1, int, flags)
 {
 	int error, fd = -1;
 	struct eventpoll *ep;
@@ -1150,7 +1150,7 @@
 	return fd;
 }
 
-asmlinkage long sys_epoll_create(int size)
+SYSCALL_DEFINE1(epoll_create, int, size)
 {
 	if (size < 0)
 		return -EINVAL;
@@ -1163,8 +1163,8 @@
  * the eventpoll file that enables the insertion/removal/change of
  * file descriptors inside the interest set.
  */
-asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
-			      struct epoll_event __user *event)
+SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+		struct epoll_event __user *, event)
 {
 	int error;
 	struct file *file, *tfile;
@@ -1261,8 +1261,8 @@
  * Implement the event wait interface for the eventpoll file. It is the kernel
  * part of the user space epoll_wait(2).
  */
-asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
-			       int maxevents, int timeout)
+SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
+		int, maxevents, int, timeout)
 {
 	int error;
 	struct file *file;
@@ -1319,9 +1319,9 @@
  * Implement the event wait interface for the eventpoll file. It is the kernel
  * part of the user space epoll_pwait(2).
  */
-asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
-		int maxevents, int timeout, const sigset_t __user *sigmask,
-		size_t sigsetsize)
+SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
+		int, maxevents, int, timeout, const sigset_t __user *, sigmask,
+		size_t, sigsetsize)
 {
 	int error;
 	sigset_t ksigmask, sigsaved;
diff --git a/fs/exec.c b/fs/exec.c
index 71a6efe..0dd60a0 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -99,7 +99,7 @@
  *
  * Also note that we take the address to load from from the file itself.
  */
-asmlinkage long sys_uselib(const char __user * library)
+SYSCALL_DEFINE1(uselib, const char __user *, library)
 {
 	struct file *file;
 	struct nameidata nd;
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 9a0fc40..2999d72 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -95,10 +95,13 @@
 		mark_inode_dirty(dir);
 	}
 
-	if (IS_DIRSYNC(dir))
+	if (IS_DIRSYNC(dir)) {
 		err = write_one_page(page, 1);
-	else
+		if (!err)
+			err = ext2_sync_inode(dir);
+	} else {
 		unlock_page(page);
+	}
 
 	return err;
 }
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index c454d5d..66321a8 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -565,12 +565,8 @@
 	inode->i_blocks = 0;
 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
 	memset(ei->i_data, 0, sizeof(ei->i_data));
-	ei->i_flags = EXT2_I(dir)->i_flags & ~EXT2_BTREE_FL;
-	if (S_ISLNK(mode))
-		ei->i_flags &= ~(EXT2_IMMUTABLE_FL|EXT2_APPEND_FL);
-	/* dirsync is only applied to directories */
-	if (!S_ISDIR(mode))
-		ei->i_flags &= ~EXT2_DIRSYNC_FL;
+	ei->i_flags =
+		ext2_mask_flags(mode, EXT2_I(dir)->i_flags & EXT2_FL_INHERITED);
 	ei->i_faddr = 0;
 	ei->i_frag_no = 0;
 	ei->i_frag_size = 0;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 02b39a5..23fff2f 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -498,8 +498,6 @@
  * ext2_splice_branch - splice the allocated branch onto inode.
  * @inode: owner
  * @block: (logical) number of block we are adding
- * @chain: chain of indirect blocks (with a missing link - see
- *	ext2_alloc_branch)
  * @where: location of missing link
  * @num:   number of indirect blocks we are adding
  * @blks:  number of direct blocks we are adding
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index de876fa..7cb4bad 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -50,8 +50,7 @@
 			goto setflags_out;
 		}
 
-		if (!S_ISDIR(inode->i_mode))
-			flags &= ~EXT2_DIRSYNC_FL;
+		flags = ext2_mask_flags(inode->i_mode, flags);
 
 		mutex_lock(&inode->i_mutex);
 		/* Is it quota file? Do not allow user to mess with it */
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 647cd88..da8bdea 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -132,6 +132,7 @@
 	percpu_counter_destroy(&sbi->s_dirs_counter);
 	brelse (sbi->s_sbh);
 	sb->s_fs_info = NULL;
+	kfree(sbi->s_blockgroup_lock);
 	kfree(sbi);
 
 	return;
@@ -756,6 +757,13 @@
 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
 	if (!sbi)
 		return -ENOMEM;
+
+	sbi->s_blockgroup_lock =
+		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
+	if (!sbi->s_blockgroup_lock) {
+		kfree(sbi);
+		return -ENOMEM;
+	}
 	sb->s_fs_info = sbi;
 	sbi->s_sb_block = sb_block;
 
@@ -983,7 +991,7 @@
 		printk ("EXT2-fs: not enough memory\n");
 		goto failed_mount;
 	}
-	bgl_lock_init(&sbi->s_blockgroup_lock);
+	bgl_lock_init(sbi->s_blockgroup_lock);
 	sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
 	if (!sbi->s_debts) {
 		printk ("EXT2-fs: not enough memory\n");
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index c30e149..7d215b4 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -35,23 +35,71 @@
 
 
 /* The old legacy hash */
-static __u32 dx_hack_hash (const char *name, int len)
+static __u32 dx_hack_hash_unsigned(const char *name, int len)
 {
-	__u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
-	while (len--) {
-		__u32 hash = hash1 + (hash0 ^ (*name++ * 7152373));
+	__u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
+	const unsigned char *ucp = (const unsigned char *) name;
 
-		if (hash & 0x80000000) hash -= 0x7fffffff;
+	while (len--) {
+		hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373));
+
+		if (hash & 0x80000000)
+			hash -= 0x7fffffff;
 		hash1 = hash0;
 		hash0 = hash;
 	}
-	return (hash0 << 1);
+	return hash0 << 1;
 }
 
-static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
+static __u32 dx_hack_hash_signed(const char *name, int len)
+{
+	__u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
+	const signed char *scp = (const signed char *) name;
+
+	while (len--) {
+		hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373));
+
+		if (hash & 0x80000000)
+			hash -= 0x7fffffff;
+		hash1 = hash0;
+		hash0 = hash;
+	}
+	return hash0 << 1;
+}
+
+static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num)
 {
 	__u32	pad, val;
 	int	i;
+	const signed char *scp = (const signed char *) msg;
+
+	pad = (__u32)len | ((__u32)len << 8);
+	pad |= pad << 16;
+
+	val = pad;
+	if (len > num*4)
+		len = num * 4;
+	for (i = 0; i < len; i++) {
+		if ((i % 4) == 0)
+			val = pad;
+		val = ((int) scp[i]) + (val << 8);
+		if ((i % 4) == 3) {
+			*buf++ = val;
+			val = pad;
+			num--;
+		}
+	}
+	if (--num >= 0)
+		*buf++ = val;
+	while (--num >= 0)
+		*buf++ = pad;
+}
+
+static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num)
+{
+	__u32	pad, val;
+	int	i;
+	const unsigned char *ucp = (const unsigned char *) msg;
 
 	pad = (__u32)len | ((__u32)len << 8);
 	pad |= pad << 16;
@@ -62,7 +110,7 @@
 	for (i=0; i < len; i++) {
 		if ((i % 4) == 0)
 			val = pad;
-		val = msg[i] + (val << 8);
+		val = ((int) ucp[i]) + (val << 8);
 		if ((i % 4) == 3) {
 			*buf++ = val;
 			val = pad;
@@ -95,6 +143,8 @@
 	const char	*p;
 	int		i;
 	__u32		in[8], buf[4];
+	void		(*str2hashbuf)(const char *, int, __u32 *, int) =
+				str2hashbuf_signed;
 
 	/* Initialize the default seed for the hash checksum functions */
 	buf[0] = 0x67452301;
@@ -113,13 +163,18 @@
 	}
 
 	switch (hinfo->hash_version) {
-	case DX_HASH_LEGACY:
-		hash = dx_hack_hash(name, len);
+	case DX_HASH_LEGACY_UNSIGNED:
+		hash = dx_hack_hash_unsigned(name, len);
 		break;
+	case DX_HASH_LEGACY:
+		hash = dx_hack_hash_signed(name, len);
+		break;
+	case DX_HASH_HALF_MD4_UNSIGNED:
+		str2hashbuf = str2hashbuf_unsigned;
 	case DX_HASH_HALF_MD4:
 		p = name;
 		while (len > 0) {
-			str2hashbuf(p, len, in, 8);
+			(*str2hashbuf)(p, len, in, 8);
 			half_md4_transform(buf, in);
 			len -= 32;
 			p += 32;
@@ -127,10 +182,12 @@
 		minor_hash = buf[2];
 		hash = buf[1];
 		break;
+	case DX_HASH_TEA_UNSIGNED:
+		str2hashbuf = str2hashbuf_unsigned;
 	case DX_HASH_TEA:
 		p = name;
 		while (len > 0) {
-			str2hashbuf(p, len, in, 4);
+			(*str2hashbuf)(p, len, in, 4);
 			TEA_transform(buf, in);
 			len -= 16;
 			p += 16;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 5655fbc..8de6c72 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -559,12 +559,8 @@
 	ei->i_dir_start_lookup = 0;
 	ei->i_disksize = 0;
 
-	ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
-	if (S_ISLNK(mode))
-		ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
-	/* dirsync only applies to directories */
-	if (!S_ISDIR(mode))
-		ei->i_flags &= ~EXT3_DIRSYNC_FL;
+	ei->i_flags =
+		ext3_mask_flags(mode, EXT3_I(dir)->i_flags & EXT3_FL_INHERITED);
 #ifdef EXT3_FRAGMENTS
 	ei->i_faddr = 0;
 	ei->i_frag_no = 0;
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index b7394d0..5e86ce9 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -53,8 +53,7 @@
 			goto flags_out;
 		}
 
-		if (!S_ISDIR(inode->i_mode))
-			flags &= ~EXT3_DIRSYNC_FL;
+		flags = ext3_mask_flags(inode->i_mode, flags);
 
 		mutex_lock(&inode->i_mutex);
 		/* Is it quota file? Do not allow user to mess with it */
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 1dd2abe..69a3d19 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -74,10 +74,6 @@
 #define assert(test) J_ASSERT(test)
 #endif
 
-#ifndef swap
-#define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0)
-#endif
-
 #ifdef DX_DEBUG
 #define dxtrace(command) command
 #else
@@ -368,6 +364,8 @@
 		goto fail;
 	}
 	hinfo->hash_version = root->info.hash_version;
+	if (hinfo->hash_version <= DX_HASH_TEA)
+		hinfo->hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
 	hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed;
 	if (entry)
 		ext3fs_dirhash(entry->name, entry->len, hinfo);
@@ -636,6 +634,9 @@
 	dir = dir_file->f_path.dentry->d_inode;
 	if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) {
 		hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
+		if (hinfo.hash_version <= DX_HASH_TEA)
+			hinfo.hash_version +=
+				EXT3_SB(dir->i_sb)->s_hash_unsigned;
 		hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
 		count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
 					       start_hash, start_minor_hash);
@@ -1156,9 +1157,9 @@
 	u32 hash2;
 	struct dx_map_entry *map;
 	char *data1 = (*bh)->b_data, *data2;
-	unsigned split, move, size, i;
+	unsigned split, move, size;
 	struct ext3_dir_entry_2 *de = NULL, *de2;
-	int	err = 0;
+	int	err = 0, i;
 
 	bh2 = ext3_append (handle, dir, &newblock, &err);
 	if (!(bh2)) {
@@ -1398,6 +1399,8 @@
 
 	/* Initialize as for dx_probe */
 	hinfo.hash_version = root->info.hash_version;
+	if (hinfo.hash_version <= DX_HASH_TEA)
+		hinfo.hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
 	hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
 	ext3fs_dirhash(name, namelen, &hinfo);
 	frame = frames;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index c22d014..b70d90e 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -48,8 +48,8 @@
 			     unsigned long journal_devnum);
 static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
 			       unsigned int);
-static void ext3_commit_super (struct super_block * sb,
-			       struct ext3_super_block * es,
+static int ext3_commit_super(struct super_block *sb,
+			       struct ext3_super_block *es,
 			       int sync);
 static void ext3_mark_recovery_complete(struct super_block * sb,
 					struct ext3_super_block * es);
@@ -60,9 +60,9 @@
 				     char nbuf[16]);
 static int ext3_remount (struct super_block * sb, int * flags, char * data);
 static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf);
-static void ext3_unlockfs(struct super_block *sb);
+static int ext3_unfreeze(struct super_block *sb);
 static void ext3_write_super (struct super_block * sb);
-static void ext3_write_super_lockfs(struct super_block *sb);
+static int ext3_freeze(struct super_block *sb);
 
 /*
  * Wrappers for journal_start/end.
@@ -439,6 +439,7 @@
 		ext3_blkdev_remove(sbi);
 	}
 	sb->s_fs_info = NULL;
+	kfree(sbi->s_blockgroup_lock);
 	kfree(sbi);
 	return;
 }
@@ -682,6 +683,26 @@
 				    ext3_nfs_get_inode);
 }
 
+/*
+ * Try to release metadata pages (indirect blocks, directories) which are
+ * mapped via the block device.  Since these pages could have journal heads
+ * which would prevent try_to_free_buffers() from freeing them, we must use
+ * jbd layer's try_to_free_buffers() function to release them.
+ */
+static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
+				 gfp_t wait)
+{
+	journal_t *journal = EXT3_SB(sb)->s_journal;
+
+	WARN_ON(PageChecked(page));
+	if (!page_has_buffers(page))
+		return 0;
+	if (journal)
+		return journal_try_to_free_buffers(journal, page, 
+						   wait & ~__GFP_WAIT);
+	return try_to_free_buffers(page);
+}
+
 #ifdef CONFIG_QUOTA
 #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
 #define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
@@ -738,8 +759,8 @@
 	.put_super	= ext3_put_super,
 	.write_super	= ext3_write_super,
 	.sync_fs	= ext3_sync_fs,
-	.write_super_lockfs = ext3_write_super_lockfs,
-	.unlockfs	= ext3_unlockfs,
+	.freeze_fs	= ext3_freeze,
+	.unfreeze_fs	= ext3_unfreeze,
 	.statfs		= ext3_statfs,
 	.remount_fs	= ext3_remount,
 	.clear_inode	= ext3_clear_inode,
@@ -748,6 +769,7 @@
 	.quota_read	= ext3_quota_read,
 	.quota_write	= ext3_quota_write,
 #endif
+	.bdev_try_to_free_page = bdev_try_to_free_page,
 };
 
 static const struct export_operations ext3_export_ops = {
@@ -1546,6 +1568,13 @@
 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
 	if (!sbi)
 		return -ENOMEM;
+
+	sbi->s_blockgroup_lock =
+		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
+	if (!sbi->s_blockgroup_lock) {
+		kfree(sbi);
+		return -ENOMEM;
+	}
 	sb->s_fs_info = sbi;
 	sbi->s_mount_opt = 0;
 	sbi->s_resuid = EXT3_DEF_RESUID;
@@ -1742,6 +1771,18 @@
 	for (i=0; i < 4; i++)
 		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
 	sbi->s_def_hash_version = es->s_def_hash_version;
+	i = le32_to_cpu(es->s_flags);
+	if (i & EXT2_FLAGS_UNSIGNED_HASH)
+		sbi->s_hash_unsigned = 3;
+	else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
+#ifdef __CHAR_UNSIGNED__
+		es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
+		sbi->s_hash_unsigned = 3;
+#else
+		es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
+#endif
+		sb->s_dirt = 1;
+	}
 
 	if (sbi->s_blocks_per_group > blocksize * 8) {
 		printk (KERN_ERR
@@ -1786,7 +1827,7 @@
 		goto failed_mount;
 	}
 
-	bgl_lock_init(&sbi->s_blockgroup_lock);
+	bgl_lock_init(sbi->s_blockgroup_lock);
 
 	for (i = 0; i < db_count; i++) {
 		block = descriptor_loc(sb, logic_sb_block, i);
@@ -2270,21 +2311,23 @@
 	return 0;
 }
 
-static void ext3_commit_super (struct super_block * sb,
-			       struct ext3_super_block * es,
+static int ext3_commit_super(struct super_block *sb,
+			       struct ext3_super_block *es,
 			       int sync)
 {
 	struct buffer_head *sbh = EXT3_SB(sb)->s_sbh;
+	int error = 0;
 
 	if (!sbh)
-		return;
+		return error;
 	es->s_wtime = cpu_to_le32(get_seconds());
 	es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb));
 	es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
 	BUFFER_TRACE(sbh, "marking dirty");
 	mark_buffer_dirty(sbh);
 	if (sync)
-		sync_dirty_buffer(sbh);
+		error = sync_dirty_buffer(sbh);
+	return error;
 }
 
 
@@ -2398,12 +2441,14 @@
  * LVM calls this function before a (read-only) snapshot is created.  This
  * gives us a chance to flush the journal completely and mark the fs clean.
  */
-static void ext3_write_super_lockfs(struct super_block *sb)
+static int ext3_freeze(struct super_block *sb)
 {
+	int error = 0;
+	journal_t *journal;
 	sb->s_dirt = 0;
 
 	if (!(sb->s_flags & MS_RDONLY)) {
-		journal_t *journal = EXT3_SB(sb)->s_journal;
+		journal = EXT3_SB(sb)->s_journal;
 
 		/* Now we set up the journal barrier. */
 		journal_lock_updates(journal);
@@ -2412,20 +2457,28 @@
 		 * We don't want to clear needs_recovery flag when we failed
 		 * to flush the journal.
 		 */
-		if (journal_flush(journal) < 0)
-			return;
+		error = journal_flush(journal);
+		if (error < 0)
+			goto out;
 
 		/* Journal blocked and flushed, clear needs_recovery flag. */
 		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1);
+		error = ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1);
+		if (error)
+			goto out;
 	}
+	return 0;
+
+out:
+	journal_unlock_updates(journal);
+	return error;
 }
 
 /*
  * Called by LVM after the snapshot is done.  We need to reset the RECOVER
  * flag here, even though the filesystem is not technically dirty yet.
  */
-static void ext3_unlockfs(struct super_block *sb)
+static int ext3_unfreeze(struct super_block *sb)
 {
 	if (!(sb->s_flags & MS_RDONLY)) {
 		lock_super(sb);
@@ -2435,6 +2488,7 @@
 		unlock_super(sb);
 		journal_unlock_updates(EXT3_SB(sb)->s_journal);
 	}
+	return 0;
 }
 
 static int ext3_remount (struct super_block * sb, int * flags, char * data)
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 38b3acf..6bba06b 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -20,6 +20,7 @@
 #include "ext4.h"
 #include "ext4_jbd2.h"
 #include "group.h"
+#include "mballoc.h"
 
 /*
  * balloc.c contains the blocks allocation and deallocation routines
@@ -100,10 +101,10 @@
 		 * essentially implementing a per-group read-only flag. */
 		if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
 			ext4_error(sb, __func__,
-				  "Checksum bad for group %lu\n", block_group);
-			gdp->bg_free_blocks_count = 0;
-			gdp->bg_free_inodes_count = 0;
-			gdp->bg_itable_unused = 0;
+				  "Checksum bad for group %u", block_group);
+			ext4_free_blks_set(sb, gdp, 0);
+			ext4_free_inodes_set(sb, gdp, 0);
+			ext4_itable_unused_set(sb, gdp, 0);
 			memset(bh->b_data, 0xff, sb->s_blocksize);
 			return 0;
 		}
@@ -205,15 +206,15 @@
 					     ext4_group_t block_group,
 					     struct buffer_head **bh)
 {
-	unsigned long group_desc;
-	unsigned long offset;
+	unsigned int group_desc;
+	unsigned int offset;
 	struct ext4_group_desc *desc;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
 	if (block_group >= sbi->s_groups_count) {
 		ext4_error(sb, "ext4_get_group_desc",
 			   "block_group >= groups_count - "
-			   "block_group = %lu, groups_count = %lu",
+			   "block_group = %u, groups_count = %u",
 			   block_group, sbi->s_groups_count);
 
 		return NULL;
@@ -225,7 +226,7 @@
 	if (!sbi->s_group_desc[group_desc]) {
 		ext4_error(sb, "ext4_get_group_desc",
 			   "Group descriptor not loaded - "
-			   "block_group = %lu, group_desc = %lu, desc = %lu",
+			   "block_group = %u, group_desc = %u, desc = %u",
 			   block_group, group_desc, offset);
 		return NULL;
 	}
@@ -315,29 +316,50 @@
 	if (unlikely(!bh)) {
 		ext4_error(sb, __func__,
 			    "Cannot read block bitmap - "
-			    "block_group = %lu, block_bitmap = %llu",
+			    "block_group = %u, block_bitmap = %llu",
 			    block_group, bitmap_blk);
 		return NULL;
 	}
-	if (buffer_uptodate(bh) &&
-	    !(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))
+
+	if (bitmap_uptodate(bh))
 		return bh;
 
 	lock_buffer(bh);
+	if (bitmap_uptodate(bh)) {
+		unlock_buffer(bh);
+		return bh;
+	}
 	spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 		ext4_init_block_bitmap(sb, bh, block_group, desc);
+		set_bitmap_uptodate(bh);
 		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
 		spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+		unlock_buffer(bh);
 		return bh;
 	}
 	spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+	if (buffer_uptodate(bh)) {
+		/*
+		 * if not uninit if bh is uptodate,
+		 * bitmap is also uptodate
+		 */
+		set_bitmap_uptodate(bh);
+		unlock_buffer(bh);
+		return bh;
+	}
+	/*
+	 * submit the buffer_head for read. We can
+	 * safely mark the bitmap as uptodate now.
+	 * We do it here so the bitmap uptodate bit
+	 * get set with buffer lock held.
+	 */
+	set_bitmap_uptodate(bh);
 	if (bh_submit_read(bh) < 0) {
 		put_bh(bh);
 		ext4_error(sb, __func__,
 			    "Cannot read block bitmap - "
-			    "block_group = %lu, block_bitmap = %llu",
+			    "block_group = %u, block_bitmap = %llu",
 			    block_group, bitmap_blk);
 		return NULL;
 	}
@@ -350,62 +372,44 @@
 }
 
 /**
- * ext4_free_blocks_sb() -- Free given blocks and update quota
+ * ext4_add_groupblocks() -- Add given blocks to an existing group
  * @handle:			handle to this transaction
  * @sb:				super block
- * @block:			start physcial block to free
+ * @block:			start physcial block to add to the block group
  * @count:			number of blocks to free
- * @pdquot_freed_blocks:	pointer to quota
  *
- * XXX This function is only used by the on-line resizing code, which
- * should probably be fixed up to call the mballoc variant.  There
- * this needs to be cleaned up later; in fact, I'm not convinced this
- * is 100% correct in the face of the mballoc code.  The online resizing
- * code needs to be fixed up to more tightly (and correctly) interlock
- * with the mballoc code.
+ * This marks the blocks as free in the bitmap. We ask the
+ * mballoc to reload the buddy after this by setting group
+ * EXT4_GROUP_INFO_NEED_INIT_BIT flag
  */
-void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
-			 ext4_fsblk_t block, unsigned long count,
-			 unsigned long *pdquot_freed_blocks)
+void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
+			 ext4_fsblk_t block, unsigned long count)
 {
 	struct buffer_head *bitmap_bh = NULL;
 	struct buffer_head *gd_bh;
 	ext4_group_t block_group;
 	ext4_grpblk_t bit;
-	unsigned long i;
-	unsigned long overflow;
+	unsigned int i;
 	struct ext4_group_desc *desc;
 	struct ext4_super_block *es;
 	struct ext4_sb_info *sbi;
-	int err = 0, ret;
-	ext4_grpblk_t group_freed;
+	int err = 0, ret, blk_free_count;
+	ext4_grpblk_t blocks_freed;
+	struct ext4_group_info *grp;
 
-	*pdquot_freed_blocks = 0;
 	sbi = EXT4_SB(sb);
 	es = sbi->s_es;
-	if (block < le32_to_cpu(es->s_first_data_block) ||
-	    block + count < block ||
-	    block + count > ext4_blocks_count(es)) {
-		ext4_error(sb, "ext4_free_blocks",
-			   "Freeing blocks not in datazone - "
-			   "block = %llu, count = %lu", block, count);
-		goto error_return;
-	}
+	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
 
-	ext4_debug("freeing block(s) %llu-%llu\n", block, block + count - 1);
-
-do_more:
-	overflow = 0;
 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
+	grp = ext4_get_group_info(sb, block_group);
 	/*
 	 * Check to see if we are freeing blocks across a group
 	 * boundary.
 	 */
 	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
-		overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
-		count -= overflow;
+		goto error_return;
 	}
-	brelse(bitmap_bh);
 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
 	if (!bitmap_bh)
 		goto error_return;
@@ -418,18 +422,17 @@
 	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
 	    in_range(block + count - 1, ext4_inode_table(sb, desc),
 		     sbi->s_itb_per_group)) {
-		ext4_error(sb, "ext4_free_blocks",
-			   "Freeing blocks in system zones - "
+		ext4_error(sb, __func__,
+			   "Adding blocks in system zones - "
 			   "Block = %llu, count = %lu",
 			   block, count);
 		goto error_return;
 	}
 
 	/*
-	 * We are about to start releasing blocks in the bitmap,
+	 * We are about to add blocks to the bitmap,
 	 * so we need undo access.
 	 */
-	/* @@@ check errors */
 	BUFFER_TRACE(bitmap_bh, "getting undo access");
 	err = ext4_journal_get_undo_access(handle, bitmap_bh);
 	if (err)
@@ -444,107 +447,55 @@
 	err = ext4_journal_get_write_access(handle, gd_bh);
 	if (err)
 		goto error_return;
-
-	jbd_lock_bh_state(bitmap_bh);
-
-	for (i = 0, group_freed = 0; i < count; i++) {
-		/*
-		 * An HJ special.  This is expensive...
-		 */
-#ifdef CONFIG_JBD2_DEBUG
-		jbd_unlock_bh_state(bitmap_bh);
-		{
-			struct buffer_head *debug_bh;
-			debug_bh = sb_find_get_block(sb, block + i);
-			if (debug_bh) {
-				BUFFER_TRACE(debug_bh, "Deleted!");
-				if (!bh2jh(bitmap_bh)->b_committed_data)
-					BUFFER_TRACE(debug_bh,
-						"No commited data in bitmap");
-				BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
-				__brelse(debug_bh);
-			}
-		}
-		jbd_lock_bh_state(bitmap_bh);
-#endif
-		if (need_resched()) {
-			jbd_unlock_bh_state(bitmap_bh);
-			cond_resched();
-			jbd_lock_bh_state(bitmap_bh);
-		}
-		/* @@@ This prevents newly-allocated data from being
-		 * freed and then reallocated within the same
-		 * transaction.
-		 *
-		 * Ideally we would want to allow that to happen, but to
-		 * do so requires making jbd2_journal_forget() capable of
-		 * revoking the queued write of a data block, which
-		 * implies blocking on the journal lock.  *forget()
-		 * cannot block due to truncate races.
-		 *
-		 * Eventually we can fix this by making jbd2_journal_forget()
-		 * return a status indicating whether or not it was able
-		 * to revoke the buffer.  On successful revoke, it is
-		 * safe not to set the allocation bit in the committed
-		 * bitmap, because we know that there is no outstanding
-		 * activity on the buffer any more and so it is safe to
-		 * reallocate it.
-		 */
-		BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
-		J_ASSERT_BH(bitmap_bh,
-				bh2jh(bitmap_bh)->b_committed_data != NULL);
-		ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
-				bh2jh(bitmap_bh)->b_committed_data);
-
-		/*
-		 * We clear the bit in the bitmap after setting the committed
-		 * data bit, because this is the reverse order to that which
-		 * the allocator uses.
-		 */
+	/*
+	 * make sure we don't allow a parallel init on other groups in the
+	 * same buddy cache
+	 */
+	down_write(&grp->alloc_sem);
+	for (i = 0, blocks_freed = 0; i < count; i++) {
 		BUFFER_TRACE(bitmap_bh, "clear bit");
 		if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
 						bit + i, bitmap_bh->b_data)) {
-			jbd_unlock_bh_state(bitmap_bh);
 			ext4_error(sb, __func__,
 				   "bit already cleared for block %llu",
 				   (ext4_fsblk_t)(block + i));
-			jbd_lock_bh_state(bitmap_bh);
 			BUFFER_TRACE(bitmap_bh, "bit already cleared");
 		} else {
-			group_freed++;
+			blocks_freed++;
 		}
 	}
-	jbd_unlock_bh_state(bitmap_bh);
-
 	spin_lock(sb_bgl_lock(sbi, block_group));
-	le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
+	blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
+	ext4_free_blks_set(sb, desc, blk_free_count);
 	desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
 	spin_unlock(sb_bgl_lock(sbi, block_group));
-	percpu_counter_add(&sbi->s_freeblocks_counter, count);
+	percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
 
 	if (sbi->s_log_groups_per_flex) {
 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
 		spin_lock(sb_bgl_lock(sbi, flex_group));
-		sbi->s_flex_groups[flex_group].free_blocks += count;
+		sbi->s_flex_groups[flex_group].free_blocks += blocks_freed;
 		spin_unlock(sb_bgl_lock(sbi, flex_group));
 	}
+	/*
+	 * request to reload the buddy with the
+	 * new bitmap information
+	 */
+	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
+	ext4_mb_update_group_info(grp, blocks_freed);
+	up_write(&grp->alloc_sem);
 
 	/* We dirtied the bitmap block */
 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-	err = ext4_journal_dirty_metadata(handle, bitmap_bh);
+	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
 
 	/* And the group descriptor block */
 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-	ret = ext4_journal_dirty_metadata(handle, gd_bh);
-	if (!err) err = ret;
-	*pdquot_freed_blocks += group_freed;
-
-	if (overflow && !err) {
-		block += count;
-		count = overflow;
-		goto do_more;
-	}
+	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
+	if (!err)
+		err = ret;
 	sb->s_dirt = 1;
+
 error_return:
 	brelse(bitmap_bh);
 	ext4_std_error(sb, err);
@@ -614,7 +565,7 @@
 		if (dirty_blocks < 0) {
 			printk(KERN_CRIT "Dirty block accounting "
 					"went wrong %lld\n",
-					dirty_blocks);
+					(long long)dirty_blocks);
 		}
 	}
 	/* Check whether we have space after
@@ -666,101 +617,45 @@
 	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
 }
 
-#define EXT4_META_BLOCK 0x1
-
-static ext4_fsblk_t do_blk_alloc(handle_t *handle, struct inode *inode,
-				ext4_lblk_t iblock, ext4_fsblk_t goal,
-				unsigned long *count, int *errp, int flags)
-{
-	struct ext4_allocation_request ar;
-	ext4_fsblk_t ret;
-
-	memset(&ar, 0, sizeof(ar));
-	/* Fill with neighbour allocated blocks */
-
-	ar.inode = inode;
-	ar.goal = goal;
-	ar.len = *count;
-	ar.logical = iblock;
-
-	if (S_ISREG(inode->i_mode) && !(flags & EXT4_META_BLOCK))
-		/* enable in-core preallocation for data block allocation */
-		ar.flags = EXT4_MB_HINT_DATA;
-	else
-		/* disable in-core preallocation for non-regular files */
-		ar.flags = 0;
-
-	ret = ext4_mb_new_blocks(handle, &ar, errp);
-	*count = ar.len;
-	return ret;
-}
-
 /*
  * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
  *
  * @handle:             handle to this transaction
  * @inode:              file inode
  * @goal:               given target block(filesystem wide)
- * @count:		total number of blocks need
+ * @count:		pointer to total number of blocks needed
  * @errp:               error code
  *
- * Return 1st allocated block numberon success, *count stores total account
+ * Return 1st allocated block number on success, *count stores total account
  * error stores in errp pointer
  */
 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
 		ext4_fsblk_t goal, unsigned long *count, int *errp)
 {
+	struct ext4_allocation_request ar;
 	ext4_fsblk_t ret;
-	ret = do_blk_alloc(handle, inode, 0, goal,
-				count, errp, EXT4_META_BLOCK);
+
+	memset(&ar, 0, sizeof(ar));
+	/* Fill with neighbour allocated blocks */
+	ar.inode = inode;
+	ar.goal = goal;
+	ar.len = count ? *count : 1;
+
+	ret = ext4_mb_new_blocks(handle, &ar, errp);
+	if (count)
+		*count = ar.len;
+
 	/*
 	 * Account for the allocated meta blocks
 	 */
 	if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) {
 		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
-		EXT4_I(inode)->i_allocated_meta_blocks += *count;
+		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 	}
 	return ret;
 }
 
-/*
- * ext4_new_meta_block() -- allocate block for meta data (indexing) blocks
- *
- * @handle:             handle to this transaction
- * @inode:              file inode
- * @goal:               given target block(filesystem wide)
- * @errp:               error code
- *
- * Return allocated block number on success
- */
-ext4_fsblk_t ext4_new_meta_block(handle_t *handle, struct inode *inode,
-		ext4_fsblk_t goal, int *errp)
-{
-	unsigned long count = 1;
-	return ext4_new_meta_blocks(handle, inode, goal, &count, errp);
-}
-
-/*
- * ext4_new_blocks() -- allocate data blocks
- *
- * @handle:             handle to this transaction
- * @inode:              file inode
- * @goal:               given target block(filesystem wide)
- * @count:		total number of blocks need
- * @errp:               error code
- *
- * Return 1st allocated block numberon success, *count stores total account
- * error stores in errp pointer
- */
-
-ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
-				ext4_lblk_t iblock, ext4_fsblk_t goal,
-				unsigned long *count, int *errp)
-{
-	return do_blk_alloc(handle, inode, iblock, goal, count, errp, 0);
-}
-
 /**
  * ext4_count_free_blocks() -- count filesystem free blocks
  * @sb:		superblock
@@ -776,7 +671,7 @@
 #ifdef EXT4FS_DEBUG
 	struct ext4_super_block *es;
 	ext4_fsblk_t bitmap_count;
-	unsigned long x;
+	unsigned int x;
 	struct buffer_head *bitmap_bh = NULL;
 
 	es = EXT4_SB(sb)->s_es;
@@ -796,7 +691,7 @@
 			continue;
 
 		x = ext4_count_free(bitmap_bh, sb->s_blocksize);
-		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
+		printk(KERN_DEBUG "group %lu: stored = %d, counted = %u\n",
 			i, le16_to_cpu(gdp->bg_free_blocks_count), x);
 		bitmap_count += x;
 	}
@@ -812,7 +707,7 @@
 		gdp = ext4_get_group_desc(sb, i, NULL);
 		if (!gdp)
 			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
+		desc_count += ext4_free_blks_count(sb, gdp);
 	}
 
 	return desc_count;
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index 0a7a666..fa3af81 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -15,10 +15,9 @@
 
 static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
 
-unsigned long ext4_count_free(struct buffer_head *map, unsigned int numchars)
+unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
 {
-	unsigned int i;
-	unsigned long sum = 0;
+	unsigned int i, sum = 0;
 
 	if (!map)
 		return 0;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index fed5b61..2df2e40 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -64,7 +64,7 @@
 int ext4_check_dir_entry(const char *function, struct inode *dir,
 			 struct ext4_dir_entry_2 *de,
 			 struct buffer_head *bh,
-			 unsigned long offset)
+			 unsigned int offset)
 {
 	const char *error_msg = NULL;
 	const int rlen = ext4_rec_len_from_disk(de->rec_len);
@@ -84,9 +84,9 @@
 	if (error_msg != NULL)
 		ext4_error(dir->i_sb, function,
 			"bad entry in directory #%lu: %s - "
-			"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
+			"offset=%u, inode=%u, rec_len=%d, name_len=%d",
 			dir->i_ino, error_msg, offset,
-			(unsigned long) le32_to_cpu(de->inode),
+			le32_to_cpu(de->inode),
 			rlen, de->name_len);
 	return error_msg == NULL ? 1 : 0;
 }
@@ -95,7 +95,7 @@
 			 void *dirent, filldir_t filldir)
 {
 	int error = 0;
-	unsigned long offset;
+	unsigned int offset;
 	int i, stored;
 	struct ext4_dir_entry_2 *de;
 	struct super_block *sb;
@@ -405,7 +405,7 @@
 	sb = inode->i_sb;
 
 	if (!fname) {
-		printk(KERN_ERR "ext4: call_filldir: called with "
+		printk(KERN_ERR "EXT4-fs: call_filldir: called with "
 		       "null fname?!?\n");
 		return 0;
 	}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 6c46c64..c668e43 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -19,6 +19,7 @@
 #include <linux/types.h>
 #include <linux/blkdev.h>
 #include <linux/magic.h>
+#include <linux/jbd2.h>
 #include "ext4_i.h"
 
 /*
@@ -94,9 +95,9 @@
 	/* phys. block for ^^^ */
 	ext4_fsblk_t pright;
 	/* how many blocks we want to allocate */
-	unsigned long len;
+	unsigned int len;
 	/* flags. see above EXT4_MB_HINT_* */
-	unsigned long flags;
+	unsigned int flags;
 };
 
 /*
@@ -156,12 +157,12 @@
 	__le32	bg_block_bitmap_lo;	/* Blocks bitmap block */
 	__le32	bg_inode_bitmap_lo;	/* Inodes bitmap block */
 	__le32	bg_inode_table_lo;	/* Inodes table block */
-	__le16	bg_free_blocks_count;	/* Free blocks count */
-	__le16	bg_free_inodes_count;	/* Free inodes count */
-	__le16	bg_used_dirs_count;	/* Directories count */
+	__le16	bg_free_blocks_count_lo;/* Free blocks count */
+	__le16	bg_free_inodes_count_lo;/* Free inodes count */
+	__le16	bg_used_dirs_count_lo;	/* Directories count */
 	__le16	bg_flags;		/* EXT4_BG_flags (INODE_UNINIT, etc) */
 	__u32	bg_reserved[2];		/* Likely block/inode bitmap checksum */
-	__le16  bg_itable_unused;	/* Unused inodes count */
+	__le16  bg_itable_unused_lo;	/* Unused inodes count */
 	__le16  bg_checksum;		/* crc16(sb_uuid+group+desc) */
 	__le32	bg_block_bitmap_hi;	/* Blocks bitmap block MSB */
 	__le32	bg_inode_bitmap_hi;	/* Inodes bitmap block MSB */
@@ -169,7 +170,7 @@
 	__le16	bg_free_blocks_count_hi;/* Free blocks count MSB */
 	__le16	bg_free_inodes_count_hi;/* Free inodes count MSB */
 	__le16	bg_used_dirs_count_hi;	/* Directories count MSB */
-	__le16	bg_itable_unused_hi;	/* Unused inodes count MSB */
+	__le16  bg_itable_unused_hi;    /* Unused inodes count MSB */
 	__u32	bg_reserved2[3];
 };
 
@@ -328,6 +329,7 @@
 	uid_t s_resuid;
 	gid_t s_resgid;
 	unsigned long s_commit_interval;
+	u32 s_min_batch_time, s_max_batch_time;
 #ifdef CONFIG_QUOTA
 	int s_jquota_fmt;
 	char *s_qf_names[MAXQUOTAS];
@@ -534,7 +536,6 @@
 #define EXT4_MOUNT_QUOTA		0x80000 /* Some quota option set */
 #define EXT4_MOUNT_USRQUOTA		0x100000 /* "old" user quota */
 #define EXT4_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
-#define EXT4_MOUNT_EXTENTS		0x400000 /* Extents support */
 #define EXT4_MOUNT_JOURNAL_CHECKSUM	0x800000 /* Journal checksums */
 #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT	0x1000000 /* Journal Async Commit */
 #define EXT4_MOUNT_I_VERSION            0x2000000 /* i_version support */
@@ -726,11 +727,11 @@
  */
 
 #define EXT4_HAS_COMPAT_FEATURE(sb,mask)			\
-	(EXT4_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask))
+	((EXT4_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask)) != 0)
 #define EXT4_HAS_RO_COMPAT_FEATURE(sb,mask)			\
-	(EXT4_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask))
+	((EXT4_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask)) != 0)
 #define EXT4_HAS_INCOMPAT_FEATURE(sb,mask)			\
-	(EXT4_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask))
+	((EXT4_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask)) != 0)
 #define EXT4_SET_COMPAT_FEATURE(sb,mask)			\
 	EXT4_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
 #define EXT4_SET_RO_COMPAT_FEATURE(sb,mask)			\
@@ -806,6 +807,12 @@
 #define EXT4_DEFM_JMODE_WBACK	0x0060
 
 /*
+ * Default journal batch times
+ */
+#define EXT4_DEF_MIN_BATCH_TIME	0
+#define EXT4_DEF_MAX_BATCH_TIME	15000 /* 15ms */
+
+/*
  * Structure of a directory entry
  */
 #define EXT4_NAME_LEN 255
@@ -891,6 +898,9 @@
 #define DX_HASH_LEGACY		0
 #define DX_HASH_HALF_MD4	1
 #define DX_HASH_TEA		2
+#define DX_HASH_LEGACY_UNSIGNED	3
+#define DX_HASH_HALF_MD4_UNSIGNED	4
+#define DX_HASH_TEA_UNSIGNED		5
 
 #ifdef __KERNEL__
 
@@ -955,7 +965,7 @@
 #define ERR_BAD_DX_DIR	-75000
 
 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
-			unsigned long *blockgrpp, ext4_grpblk_t *offsetp);
+			ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp);
 
 extern struct proc_dir_entry *ext4_proc_root;
 
@@ -987,6 +997,9 @@
 # define ATTRIB_NORET	__attribute__((noreturn))
 # define NORET_AND	noreturn,
 
+/* bitmap.c */
+extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
+
 /* balloc.c */
 extern unsigned int ext4_block_group(struct super_block *sb,
 			ext4_fsblk_t blocknr);
@@ -995,20 +1008,14 @@
 extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group);
 extern unsigned long ext4_bg_num_gdb(struct super_block *sb,
 			ext4_group_t group);
-extern ext4_fsblk_t ext4_new_meta_block(handle_t *handle, struct inode *inode,
-			ext4_fsblk_t goal, int *errp);
 extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
 			ext4_fsblk_t goal, unsigned long *count, int *errp);
-extern ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
-					ext4_lblk_t iblock, ext4_fsblk_t goal,
-					unsigned long *count, int *errp);
 extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
 extern int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
 extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
 			ext4_fsblk_t block, unsigned long count, int metadata);
-extern void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
-				ext4_fsblk_t block, unsigned long count,
-				unsigned long *pdquot_freed_blocks);
+extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
+				ext4_fsblk_t block, unsigned long count);
 extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *);
 extern void ext4_check_blocks_bitmap(struct super_block *);
 extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
@@ -1019,7 +1026,7 @@
 /* dir.c */
 extern int ext4_check_dir_entry(const char *, struct inode *,
 				struct ext4_dir_entry_2 *,
-				struct buffer_head *, unsigned long);
+				struct buffer_head *, unsigned int);
 extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
 				    __u32 minor_hash,
 				    struct ext4_dir_entry_2 *dirent);
@@ -1039,7 +1046,6 @@
 extern unsigned long ext4_count_free_inodes(struct super_block *);
 extern unsigned long ext4_count_dirs(struct super_block *);
 extern void ext4_check_inodes_bitmap(struct super_block *);
-extern unsigned long ext4_count_free(struct buffer_head *, unsigned);
 
 /* mballoc.c */
 extern long ext4_mb_stats;
@@ -1054,12 +1060,13 @@
 extern void exit_ext4_mballoc(void);
 extern void ext4_mb_free_blocks(handle_t *, struct inode *,
 		unsigned long, unsigned long, int, unsigned long *);
-extern int ext4_mb_add_more_groupinfo(struct super_block *sb,
+extern int ext4_mb_add_groupinfo(struct super_block *sb,
 		ext4_group_t i, struct ext4_group_desc *desc);
 extern void ext4_mb_update_group_info(struct ext4_group_info *grp,
 		ext4_grpblk_t add);
-
-
+extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t);
+extern void ext4_mb_put_buddy_cache_lock(struct super_block *,
+						ext4_group_t, int);
 /* inode.c */
 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
 		struct buffer_head *bh, ext4_fsblk_t blocknr);
@@ -1069,10 +1076,6 @@
 						ext4_lblk_t, int, int *);
 int ext4_get_block(struct inode *inode, sector_t iblock,
 				struct buffer_head *bh_result, int create);
-int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
-				ext4_lblk_t iblock, unsigned long maxblocks,
-				struct buffer_head *bh_result,
-				int create, int extend_disksize);
 
 extern struct inode *ext4_iget(struct super_block *, unsigned long);
 extern int  ext4_write_inode(struct inode *, int);
@@ -1123,6 +1126,9 @@
 	__attribute__ ((format (printf, 3, 4)));
 extern void ext4_warning(struct super_block *, const char *, const char *, ...)
 	__attribute__ ((format (printf, 3, 4)));
+extern void ext4_grp_locked_error(struct super_block *, ext4_group_t,
+				const char *, const char *, ...)
+	__attribute__ ((format (printf, 4, 5)));
 extern void ext4_update_dynamic_rev(struct super_block *sb);
 extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
 					__u32 compat);
@@ -1136,12 +1142,28 @@
 				      struct ext4_group_desc *bg);
 extern ext4_fsblk_t ext4_inode_table(struct super_block *sb,
 				     struct ext4_group_desc *bg);
+extern __u32 ext4_free_blks_count(struct super_block *sb,
+				struct ext4_group_desc *bg);
+extern __u32 ext4_free_inodes_count(struct super_block *sb,
+				 struct ext4_group_desc *bg);
+extern __u32 ext4_used_dirs_count(struct super_block *sb,
+				struct ext4_group_desc *bg);
+extern __u32 ext4_itable_unused_count(struct super_block *sb,
+				   struct ext4_group_desc *bg);
 extern void ext4_block_bitmap_set(struct super_block *sb,
 				  struct ext4_group_desc *bg, ext4_fsblk_t blk);
 extern void ext4_inode_bitmap_set(struct super_block *sb,
 				  struct ext4_group_desc *bg, ext4_fsblk_t blk);
 extern void ext4_inode_table_set(struct super_block *sb,
 				 struct ext4_group_desc *bg, ext4_fsblk_t blk);
+extern void ext4_free_blks_set(struct super_block *sb,
+			       struct ext4_group_desc *bg, __u32 count);
+extern void ext4_free_inodes_set(struct super_block *sb,
+				struct ext4_group_desc *bg, __u32 count);
+extern void ext4_used_dirs_set(struct super_block *sb,
+				struct ext4_group_desc *bg, __u32 count);
+extern void ext4_itable_unused_set(struct super_block *sb,
+				   struct ext4_group_desc *bg, __u32 count);
 
 static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
 {
@@ -1246,6 +1268,50 @@
 	return ;
 }
 
+struct ext4_group_info {
+	unsigned long   bb_state;
+	struct rb_root  bb_free_root;
+	unsigned short  bb_first_free;
+	unsigned short  bb_free;
+	unsigned short  bb_fragments;
+	struct          list_head bb_prealloc_list;
+#ifdef DOUBLE_CHECK
+	void            *bb_bitmap;
+#endif
+	struct rw_semaphore alloc_sem;
+	unsigned short  bb_counters[];
+};
+
+#define EXT4_GROUP_INFO_NEED_INIT_BIT	0
+#define EXT4_GROUP_INFO_LOCKED_BIT	1
+
+#define EXT4_MB_GRP_NEED_INIT(grp)	\
+	(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
+
+static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
+{
+	struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
+
+	bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
+}
+
+static inline void ext4_unlock_group(struct super_block *sb,
+					ext4_group_t group)
+{
+	struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
+
+	bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
+}
+
+static inline int ext4_is_group_locked(struct super_block *sb,
+					ext4_group_t group)
+{
+	struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
+
+	return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
+						&(grinfo->bb_state));
+}
+
 /*
  * Inodes and files operations
  */
@@ -1271,18 +1337,38 @@
 extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
 				       int chunk);
 extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
-			ext4_lblk_t iblock,
-			unsigned long max_blocks, struct buffer_head *bh_result,
-			int create, int extend_disksize);
+			       ext4_lblk_t iblock, unsigned int max_blocks,
+			       struct buffer_head *bh_result,
+			       int create, int extend_disksize);
 extern void ext4_ext_truncate(struct inode *);
 extern void ext4_ext_init(struct super_block *);
 extern void ext4_ext_release(struct super_block *);
 extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
 			  loff_t len);
 extern int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode,
-			sector_t block, unsigned long max_blocks,
+			sector_t block, unsigned int max_blocks,
 			struct buffer_head *bh, int create,
 			int extend_disksize, int flag);
+extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+			__u64 start, __u64 len);
+
+/*
+ * Add new method to test wether block and inode bitmaps are properly
+ * initialized. With uninit_bg reading the block from disk is not enough
+ * to mark the bitmap uptodate. We need to also zero-out the bitmap
+ */
+#define BH_BITMAP_UPTODATE BH_JBDPrivateStart
+
+static inline int bitmap_uptodate(struct buffer_head *bh)
+{
+	return (buffer_uptodate(bh) &&
+			test_bit(BH_BITMAP_UPTODATE, &(bh)->b_state));
+}
+static inline void set_bitmap_uptodate(struct buffer_head *bh)
+{
+	set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
+}
+
 #endif	/* __KERNEL__ */
 
 #endif	/* _EXT4_H */
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index bec7ce5..18cb67b 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -194,11 +194,6 @@
 	return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
 }
 
-static inline void ext4_ext_tree_changed(struct inode *inode)
-{
-	EXT4_I(inode)->i_ext_generation++;
-}
-
 static inline void
 ext4_ext_invalidate_cache(struct inode *inode)
 {
diff --git a/fs/ext4/ext4_i.h b/fs/ext4/ext4_i.h
index 5c124c0..e69acc1 100644
--- a/fs/ext4/ext4_i.h
+++ b/fs/ext4/ext4_i.h
@@ -31,7 +31,7 @@
 typedef __u32 ext4_lblk_t;
 
 /* data type for block group number */
-typedef unsigned long ext4_group_t;
+typedef unsigned int ext4_group_t;
 
 #define rsv_start rsv_window._rsv_start
 #define rsv_end rsv_window._rsv_end
@@ -100,9 +100,6 @@
 	 */
 	loff_t	i_disksize;
 
-	/* on-disk additional length */
-	__u16 i_extra_isize;
-
 	/*
 	 * i_data_sem is for serialising ext4_truncate() against
 	 * ext4_getblock().  In the 2.4 ext2 design, great chunks of inode's
@@ -117,7 +114,6 @@
 	struct inode vfs_inode;
 	struct jbd2_inode jinode;
 
-	unsigned long i_ext_generation;
 	struct ext4_ext_cache i_cached_extent;
 	/*
 	 * File creation time. Its function is same as that of
@@ -130,10 +126,14 @@
 	spinlock_t i_prealloc_lock;
 
 	/* allocation reservation info for delalloc */
-	unsigned long i_reserved_data_blocks;
-	unsigned long i_reserved_meta_blocks;
-	unsigned long i_allocated_meta_blocks;
+	unsigned int i_reserved_data_blocks;
+	unsigned int i_reserved_meta_blocks;
+	unsigned int i_allocated_meta_blocks;
 	unsigned short i_delalloc_reserved_flag;
+
+	/* on-disk additional length */
+	__u16 i_extra_isize;
+
 	spinlock_t i_block_reservation_lock;
 };
 
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index c75384b..ad13a84 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -7,53 +7,96 @@
 int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
 				struct buffer_head *bh)
 {
-	int err = jbd2_journal_get_undo_access(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __func__, bh, handle, err);
+	int err = 0;
+
+	if (ext4_handle_valid(handle)) {
+		err = jbd2_journal_get_undo_access(handle, bh);
+		if (err)
+			ext4_journal_abort_handle(where, __func__, bh,
+						  handle, err);
+	}
 	return err;
 }
 
 int __ext4_journal_get_write_access(const char *where, handle_t *handle,
 				struct buffer_head *bh)
 {
-	int err = jbd2_journal_get_write_access(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __func__, bh, handle, err);
+	int err = 0;
+
+	if (ext4_handle_valid(handle)) {
+		err = jbd2_journal_get_write_access(handle, bh);
+		if (err)
+			ext4_journal_abort_handle(where, __func__, bh,
+						  handle, err);
+	}
 	return err;
 }
 
 int __ext4_journal_forget(const char *where, handle_t *handle,
 				struct buffer_head *bh)
 {
-	int err = jbd2_journal_forget(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __func__, bh, handle, err);
+	int err = 0;
+
+	if (ext4_handle_valid(handle)) {
+		err = jbd2_journal_forget(handle, bh);
+		if (err)
+			ext4_journal_abort_handle(where, __func__, bh,
+						  handle, err);
+	}
 	return err;
 }
 
 int __ext4_journal_revoke(const char *where, handle_t *handle,
 				ext4_fsblk_t blocknr, struct buffer_head *bh)
 {
-	int err = jbd2_journal_revoke(handle, blocknr, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __func__, bh, handle, err);
+	int err = 0;
+
+	if (ext4_handle_valid(handle)) {
+		err = jbd2_journal_revoke(handle, blocknr, bh);
+		if (err)
+			ext4_journal_abort_handle(where, __func__, bh,
+						  handle, err);
+	}
 	return err;
 }
 
 int __ext4_journal_get_create_access(const char *where,
 				handle_t *handle, struct buffer_head *bh)
 {
-	int err = jbd2_journal_get_create_access(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __func__, bh, handle, err);
+	int err = 0;
+
+	if (ext4_handle_valid(handle)) {
+		err = jbd2_journal_get_create_access(handle, bh);
+		if (err)
+			ext4_journal_abort_handle(where, __func__, bh,
+						  handle, err);
+	}
 	return err;
 }
 
-int __ext4_journal_dirty_metadata(const char *where,
-				handle_t *handle, struct buffer_head *bh)
+int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
+				 struct inode *inode, struct buffer_head *bh)
 {
-	int err = jbd2_journal_dirty_metadata(handle, bh);
-	if (err)
-		ext4_journal_abort_handle(where, __func__, bh, handle, err);
+	int err = 0;
+
+	if (ext4_handle_valid(handle)) {
+		err = jbd2_journal_dirty_metadata(handle, bh);
+		if (err)
+			ext4_journal_abort_handle(where, __func__, bh,
+						  handle, err);
+	} else {
+		mark_buffer_dirty(bh);
+		if (inode && inode_needs_sync(inode)) {
+			sync_dirty_buffer(bh);
+			if (buffer_req(bh) && !buffer_uptodate(bh)) {
+				ext4_error(inode->i_sb, __func__,
+					   "IO error syncing inode, "
+					   "inode=%lu, block=%llu",
+					   inode->i_ino,
+					   (unsigned long long) bh->b_blocknr);
+				err = -EIO;
+			}
+		}
+	}
 	return err;
 }
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index b455c68..be2f426 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -32,8 +32,8 @@
  * 5 levels of tree + root which are stored in the inode. */
 
 #define EXT4_SINGLEDATA_TRANS_BLOCKS(sb)				\
-	(EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)	\
-		|| test_opt(sb, EXTENTS) ? 27U : 8U)
+	(EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)   \
+	 ? 27U : 8U)
 
 /* Extended attribute operations touch at most two data buffers,
  * two bitmap buffers, and two group summaries, in addition to the inode
@@ -122,12 +122,6 @@
  * been done yet.
  */
 
-static inline void ext4_journal_release_buffer(handle_t *handle,
-						struct buffer_head *bh)
-{
-	jbd2_journal_release_buffer(handle, bh);
-}
-
 void ext4_journal_abort_handle(const char *caller, const char *err_fn,
 		struct buffer_head *bh, handle_t *handle, int err);
 
@@ -146,8 +140,8 @@
 int __ext4_journal_get_create_access(const char *where,
 				handle_t *handle, struct buffer_head *bh);
 
-int __ext4_journal_dirty_metadata(const char *where,
-				handle_t *handle, struct buffer_head *bh);
+int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
+				 struct inode *inode, struct buffer_head *bh);
 
 #define ext4_journal_get_undo_access(handle, bh) \
 	__ext4_journal_get_undo_access(__func__, (handle), (bh))
@@ -157,14 +151,57 @@
 	__ext4_journal_revoke(__func__, (handle), (blocknr), (bh))
 #define ext4_journal_get_create_access(handle, bh) \
 	__ext4_journal_get_create_access(__func__, (handle), (bh))
-#define ext4_journal_dirty_metadata(handle, bh) \
-	__ext4_journal_dirty_metadata(__func__, (handle), (bh))
 #define ext4_journal_forget(handle, bh) \
 	__ext4_journal_forget(__func__, (handle), (bh))
+#define ext4_handle_dirty_metadata(handle, inode, bh) \
+	__ext4_handle_dirty_metadata(__func__, (handle), (inode), (bh))
 
 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
 int __ext4_journal_stop(const char *where, handle_t *handle);
 
+#define EXT4_NOJOURNAL_HANDLE	((handle_t *) 0x1)
+
+static inline int ext4_handle_valid(handle_t *handle)
+{
+	if (handle == EXT4_NOJOURNAL_HANDLE)
+		return 0;
+	return 1;
+}
+
+static inline void ext4_handle_sync(handle_t *handle)
+{
+	if (ext4_handle_valid(handle))
+		handle->h_sync = 1;
+}
+
+static inline void ext4_handle_release_buffer(handle_t *handle,
+						struct buffer_head *bh)
+{
+	if (ext4_handle_valid(handle))
+		jbd2_journal_release_buffer(handle, bh);
+}
+
+static inline int ext4_handle_is_aborted(handle_t *handle)
+{
+	if (ext4_handle_valid(handle))
+		return is_handle_aborted(handle);
+	return 0;
+}
+
+static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
+{
+	if (ext4_handle_valid(handle) && handle->h_buffer_credits < needed)
+		return 0;
+	return 1;
+}
+
+static inline void ext4_journal_release_buffer(handle_t *handle,
+						struct buffer_head *bh)
+{
+	if (ext4_handle_valid(handle))
+		jbd2_journal_release_buffer(handle, bh);
+}
+
 static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks)
 {
 	return ext4_journal_start_sb(inode->i_sb, nblocks);
@@ -180,27 +217,37 @@
 
 static inline int ext4_journal_extend(handle_t *handle, int nblocks)
 {
-	return jbd2_journal_extend(handle, nblocks);
+	if (ext4_handle_valid(handle))
+		return jbd2_journal_extend(handle, nblocks);
+	return 0;
 }
 
 static inline int ext4_journal_restart(handle_t *handle, int nblocks)
 {
-	return jbd2_journal_restart(handle, nblocks);
+	if (ext4_handle_valid(handle))
+		return jbd2_journal_restart(handle, nblocks);
+	return 0;
 }
 
 static inline int ext4_journal_blocks_per_page(struct inode *inode)
 {
-	return jbd2_journal_blocks_per_page(inode);
+	if (EXT4_JOURNAL(inode) != NULL)
+		return jbd2_journal_blocks_per_page(inode);
+	return 0;
 }
 
 static inline int ext4_journal_force_commit(journal_t *journal)
 {
-	return jbd2_journal_force_commit(journal);
+	if (journal)
+		return jbd2_journal_force_commit(journal);
+	return 0;
 }
 
 static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
 {
-	return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode);
+	if (ext4_handle_valid(handle))
+		return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode);
+	return 0;
 }
 
 /* super.c */
@@ -208,6 +255,8 @@
 
 static inline int ext4_should_journal_data(struct inode *inode)
 {
+	if (EXT4_JOURNAL(inode) == NULL)
+		return 0;
 	if (!S_ISREG(inode->i_mode))
 		return 1;
 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
@@ -219,6 +268,8 @@
 
 static inline int ext4_should_order_data(struct inode *inode)
 {
+	if (EXT4_JOURNAL(inode) == NULL)
+		return 0;
 	if (!S_ISREG(inode->i_mode))
 		return 0;
 	if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
@@ -230,6 +281,8 @@
 
 static inline int ext4_should_writeback_data(struct inode *inode)
 {
+	if (EXT4_JOURNAL(inode) == NULL)
+		return 0;
 	if (!S_ISREG(inode->i_mode))
 		return 0;
 	if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
diff --git a/fs/ext4/ext4_sb.h b/fs/ext4/ext4_sb.h
index b21f167..039b6ea 100644
--- a/fs/ext4/ext4_sb.h
+++ b/fs/ext4/ext4_sb.h
@@ -57,6 +57,7 @@
 	u32 s_next_generation;
 	u32 s_hash_seed[4];
 	int s_def_hash_version;
+	int s_hash_unsigned;	/* 3 if hash should be signed, 0 if not */
 	struct percpu_counter s_freeblocks_counter;
 	struct percpu_counter s_freeinodes_counter;
 	struct percpu_counter s_dirs_counter;
@@ -73,6 +74,8 @@
 	struct journal_s *s_journal;
 	struct list_head s_orphan;
 	unsigned long s_commit_interval;
+	u32 s_max_batch_time;
+	u32 s_min_batch_time;
 	struct block_device *journal_bdev;
 #ifdef CONFIG_JBD2_DEBUG
 	struct timer_list turn_ro_timer;	/* For turning read-only (crash simulation) */
@@ -101,7 +104,8 @@
 	spinlock_t s_reserve_lock;
 	spinlock_t s_md_lock;
 	tid_t s_last_transaction;
-	unsigned short *s_mb_offsets, *s_mb_maxs;
+	unsigned short *s_mb_offsets;
+	unsigned int *s_mb_maxs;
 
 	/* tunables */
 	unsigned long s_stripe;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index ea2ce3c..54bf062 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -97,6 +97,8 @@
 {
 	int err;
 
+	if (!ext4_handle_valid(handle))
+		return 0;
 	if (handle->h_buffer_credits > needed)
 		return 0;
 	err = ext4_journal_extend(handle, needed);
@@ -134,7 +136,7 @@
 	int err;
 	if (path->p_bh) {
 		/* path points to block */
-		err = ext4_journal_dirty_metadata(handle, path->p_bh);
+		err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
 	} else {
 		/* path points to leaf/index in inode body */
 		err = ext4_mark_inode_dirty(handle, inode);
@@ -191,7 +193,7 @@
 	ext4_fsblk_t goal, newblock;
 
 	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
-	newblock = ext4_new_meta_block(handle, inode, goal, err);
+	newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
 	return newblock;
 }
 
@@ -780,7 +782,7 @@
 	set_buffer_uptodate(bh);
 	unlock_buffer(bh);
 
-	err = ext4_journal_dirty_metadata(handle, bh);
+	err = ext4_handle_dirty_metadata(handle, inode, bh);
 	if (err)
 		goto cleanup;
 	brelse(bh);
@@ -859,7 +861,7 @@
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
 
-		err = ext4_journal_dirty_metadata(handle, bh);
+		err = ext4_handle_dirty_metadata(handle, inode, bh);
 		if (err)
 			goto cleanup;
 		brelse(bh);
@@ -955,7 +957,7 @@
 	set_buffer_uptodate(bh);
 	unlock_buffer(bh);
 
-	err = ext4_journal_dirty_metadata(handle, bh);
+	err = ext4_handle_dirty_metadata(handle, inode, bh);
 	if (err)
 		goto out;
 
@@ -1160,15 +1162,13 @@
 	while (--depth >= 0) {
 		ix = path[depth].p_idx;
 		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
-			break;
+			goto got_index;
 	}
 
-	if (depth < 0) {
-		/* we've gone up to the root and
-		 * found no index to the right */
-		return 0;
-	}
+	/* we've gone up to the root and found no index to the right */
+	return 0;
 
+got_index:
 	/* we've found index to the right, let's
 	 * follow it and find the closest allocated
 	 * block to the right */
@@ -1201,7 +1201,6 @@
 	*phys = ext_pblock(ex);
 	put_bh(bh);
 	return 0;
-
 }
 
 /*
@@ -1622,7 +1621,6 @@
 		ext4_ext_drop_refs(npath);
 		kfree(npath);
 	}
-	ext4_ext_tree_changed(inode);
 	ext4_ext_invalidate_cache(inode);
 	return err;
 }
@@ -2233,7 +2231,6 @@
 		}
 	}
 out:
-	ext4_ext_tree_changed(inode);
 	ext4_ext_drop_refs(path);
 	kfree(path);
 	ext4_journal_stop(handle);
@@ -2250,7 +2247,7 @@
 	 * possible initialization would be here
 	 */
 
-	if (test_opt(sb, EXTENTS)) {
+	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
 		printk(KERN_INFO "EXT4-fs: file extents enabled");
 #ifdef AGGRESSIVE_TEST
 		printk(", aggressive tests");
@@ -2275,7 +2272,7 @@
  */
 void ext4_ext_release(struct super_block *sb)
 {
-	if (!test_opt(sb, EXTENTS))
+	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
 		return;
 
 #ifdef EXTENTS_STATS
@@ -2380,7 +2377,7 @@
 						struct inode *inode,
 						struct ext4_ext_path *path,
 						ext4_lblk_t iblock,
-						unsigned long max_blocks)
+						unsigned int max_blocks)
 {
 	struct ext4_extent *ex, newex, orig_ex;
 	struct ext4_extent *ex1 = NULL;
@@ -2536,7 +2533,7 @@
 		 */
 		newdepth = ext_depth(inode);
 		/*
-		 * update the extent length after successfull insert of the
+		 * update the extent length after successful insert of the
 		 * split extent
 		 */
 		orig_ex.ee_len = cpu_to_le16(ee_len -
@@ -2678,26 +2675,26 @@
  */
 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
 			ext4_lblk_t iblock,
-			unsigned long max_blocks, struct buffer_head *bh_result,
+			unsigned int max_blocks, struct buffer_head *bh_result,
 			int create, int extend_disksize)
 {
 	struct ext4_ext_path *path = NULL;
 	struct ext4_extent_header *eh;
 	struct ext4_extent newex, *ex;
-	ext4_fsblk_t goal, newblock;
-	int err = 0, depth, ret;
-	unsigned long allocated = 0;
+	ext4_fsblk_t newblock;
+	int err = 0, depth, ret, cache_type;
+	unsigned int allocated = 0;
 	struct ext4_allocation_request ar;
 	loff_t disksize;
 
 	__clear_bit(BH_New, &bh_result->b_state);
-	ext_debug("blocks %u/%lu requested for inode %u\n",
+	ext_debug("blocks %u/%u requested for inode %u\n",
 			iblock, max_blocks, inode->i_ino);
 
 	/* check in cache */
-	goal = ext4_ext_in_cache(inode, iblock, &newex);
-	if (goal) {
-		if (goal == EXT4_EXT_CACHE_GAP) {
+	cache_type = ext4_ext_in_cache(inode, iblock, &newex);
+	if (cache_type) {
+		if (cache_type == EXT4_EXT_CACHE_GAP) {
 			if (!create) {
 				/*
 				 * block isn't allocated yet and
@@ -2706,7 +2703,7 @@
 				goto out2;
 			}
 			/* we should allocate requested block */
-		} else if (goal == EXT4_EXT_CACHE_EXTENT) {
+		} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
 			/* block is already allocated */
 			newblock = iblock
 				   - le32_to_cpu(newex.ee_block)
@@ -2854,7 +2851,7 @@
 	if (!newblock)
 		goto out2;
 	ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
-			goal, newblock, allocated);
+		  ar.goal, newblock, allocated);
 
 	/* try to insert new extent into found leaf and return */
 	ext4_ext_store_pblock(&newex, newblock);
@@ -2950,7 +2947,7 @@
 	 * transaction synchronous.
 	 */
 	if (IS_SYNC(inode))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 
 out_stop:
 	up_write(&EXT4_I(inode)->i_data_sem);
@@ -3004,7 +3001,7 @@
 	handle_t *handle;
 	ext4_lblk_t block;
 	loff_t new_size;
-	unsigned long max_blocks;
+	unsigned int max_blocks;
 	int ret = 0;
 	int ret2 = 0;
 	int retries = 0;
@@ -3083,7 +3080,7 @@
 /*
  * Callback function called for each extent to gather FIEMAP information.
  */
-int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
+static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
 		       struct ext4_ext_cache *newex, struct ext4_extent *ex,
 		       void *data)
 {
@@ -3152,7 +3149,8 @@
 /* fiemap flags we can handle specified here */
 #define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
 
-int ext4_xattr_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo)
+static int ext4_xattr_fiemap(struct inode *inode,
+				struct fiemap_extent_info *fieinfo)
 {
 	__u64 physical = 0;
 	__u64 length;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6bd11fb..f731cb5 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -140,9 +140,6 @@
 	return 0;
 }
 
-extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
-		__u64 start, __u64 len);
-
 const struct file_operations ext4_file_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= do_sync_read,
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index 556ca8e..ac8f168 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -35,23 +35,43 @@
 
 
 /* The old legacy hash */
-static __u32 dx_hack_hash(const char *name, int len)
+static __u32 dx_hack_hash_unsigned(const char *name, int len)
 {
-	__u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
-	while (len--) {
-		__u32 hash = hash1 + (hash0 ^ (*name++ * 7152373));
+	__u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
+	const unsigned char *ucp = (const unsigned char *) name;
 
-		if (hash & 0x80000000) hash -= 0x7fffffff;
+	while (len--) {
+		hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373));
+
+		if (hash & 0x80000000)
+			hash -= 0x7fffffff;
 		hash1 = hash0;
 		hash0 = hash;
 	}
-	return (hash0 << 1);
+	return hash0 << 1;
 }
 
-static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
+static __u32 dx_hack_hash_signed(const char *name, int len)
+{
+	__u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
+	const signed char *scp = (const signed char *) name;
+
+	while (len--) {
+		hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373));
+
+		if (hash & 0x80000000)
+			hash -= 0x7fffffff;
+		hash1 = hash0;
+		hash0 = hash;
+	}
+	return hash0 << 1;
+}
+
+static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num)
 {
 	__u32	pad, val;
 	int	i;
+	const signed char *scp = (const signed char *) msg;
 
 	pad = (__u32)len | ((__u32)len << 8);
 	pad |= pad << 16;
@@ -62,7 +82,35 @@
 	for (i = 0; i < len; i++) {
 		if ((i % 4) == 0)
 			val = pad;
-		val = msg[i] + (val << 8);
+		val = ((int) scp[i]) + (val << 8);
+		if ((i % 4) == 3) {
+			*buf++ = val;
+			val = pad;
+			num--;
+		}
+	}
+	if (--num >= 0)
+		*buf++ = val;
+	while (--num >= 0)
+		*buf++ = pad;
+}
+
+static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num)
+{
+	__u32	pad, val;
+	int	i;
+	const unsigned char *ucp = (const unsigned char *) msg;
+
+	pad = (__u32)len | ((__u32)len << 8);
+	pad |= pad << 16;
+
+	val = pad;
+	if (len > num*4)
+		len = num * 4;
+	for (i = 0; i < len; i++) {
+		if ((i % 4) == 0)
+			val = pad;
+		val = ((int) ucp[i]) + (val << 8);
 		if ((i % 4) == 3) {
 			*buf++ = val;
 			val = pad;
@@ -95,6 +143,8 @@
 	const char	*p;
 	int		i;
 	__u32		in[8], buf[4];
+	void		(*str2hashbuf)(const char *, int, __u32 *, int) =
+				str2hashbuf_signed;
 
 	/* Initialize the default seed for the hash checksum functions */
 	buf[0] = 0x67452301;
@@ -113,13 +163,18 @@
 	}
 
 	switch (hinfo->hash_version) {
-	case DX_HASH_LEGACY:
-		hash = dx_hack_hash(name, len);
+	case DX_HASH_LEGACY_UNSIGNED:
+		hash = dx_hack_hash_unsigned(name, len);
 		break;
+	case DX_HASH_LEGACY:
+		hash = dx_hack_hash_signed(name, len);
+		break;
+	case DX_HASH_HALF_MD4_UNSIGNED:
+		str2hashbuf = str2hashbuf_unsigned;
 	case DX_HASH_HALF_MD4:
 		p = name;
 		while (len > 0) {
-			str2hashbuf(p, len, in, 8);
+			(*str2hashbuf)(p, len, in, 8);
 			half_md4_transform(buf, in);
 			len -= 32;
 			p += 32;
@@ -127,10 +182,12 @@
 		minor_hash = buf[2];
 		hash = buf[1];
 		break;
+	case DX_HASH_TEA_UNSIGNED:
+		str2hashbuf = str2hashbuf_unsigned;
 	case DX_HASH_TEA:
 		p = name;
 		while (len > 0) {
-			str2hashbuf(p, len, in, 4);
+			(*str2hashbuf)(p, len, in, 4);
 			TEA_transform(buf, in);
 			len -= 16;
 			p += 16;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 6e60528..4fb86a0 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -74,17 +74,17 @@
 	/* If checksum is bad mark all blocks and inodes use to prevent
 	 * allocation, essentially implementing a per-group read-only flag. */
 	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
-		ext4_error(sb, __func__, "Checksum bad for group %lu\n",
+		ext4_error(sb, __func__, "Checksum bad for group %u",
 			   block_group);
-		gdp->bg_free_blocks_count = 0;
-		gdp->bg_free_inodes_count = 0;
-		gdp->bg_itable_unused = 0;
+		ext4_free_blks_set(sb, gdp, 0);
+		ext4_free_inodes_set(sb, gdp, 0);
+		ext4_itable_unused_set(sb, gdp, 0);
 		memset(bh->b_data, 0xff, sb->s_blocksize);
 		return 0;
 	}
 
 	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
-	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
+	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
 			bh->b_data);
 
 	return EXT4_INODES_PER_GROUP(sb);
@@ -111,29 +111,49 @@
 	if (unlikely(!bh)) {
 		ext4_error(sb, __func__,
 			    "Cannot read inode bitmap - "
-			    "block_group = %lu, inode_bitmap = %llu",
+			    "block_group = %u, inode_bitmap = %llu",
 			    block_group, bitmap_blk);
 		return NULL;
 	}
-	if (buffer_uptodate(bh) &&
-	    !(desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
+	if (bitmap_uptodate(bh))
 		return bh;
 
 	lock_buffer(bh);
+	if (bitmap_uptodate(bh)) {
+		unlock_buffer(bh);
+		return bh;
+	}
 	spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
 		ext4_init_inode_bitmap(sb, bh, block_group, desc);
+		set_bitmap_uptodate(bh);
 		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
 		spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+		unlock_buffer(bh);
 		return bh;
 	}
 	spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+	if (buffer_uptodate(bh)) {
+		/*
+		 * if not uninit if bh is uptodate,
+		 * bitmap is also uptodate
+		 */
+		set_bitmap_uptodate(bh);
+		unlock_buffer(bh);
+		return bh;
+	}
+	/*
+	 * submit the buffer_head for read. We can
+	 * safely mark the bitmap as uptodate now.
+	 * We do it here so the bitmap uptodate bit
+	 * get set with buffer lock held.
+	 */
+	set_bitmap_uptodate(bh);
 	if (bh_submit_read(bh) < 0) {
 		put_bh(bh);
 		ext4_error(sb, __func__,
 			    "Cannot read inode bitmap - "
-			    "block_group = %lu, inode_bitmap = %llu",
+			    "block_group = %u, inode_bitmap = %llu",
 			    block_group, bitmap_blk);
 		return NULL;
 	}
@@ -168,7 +188,7 @@
 	struct ext4_group_desc *gdp;
 	struct ext4_super_block *es;
 	struct ext4_sb_info *sbi;
-	int fatal = 0, err;
+	int fatal = 0, err, count;
 	ext4_group_t flex_group;
 
 	if (atomic_read(&inode->i_count) > 1) {
@@ -190,6 +210,11 @@
 
 	ino = inode->i_ino;
 	ext4_debug("freeing inode %lu\n", ino);
+	trace_mark(ext4_free_inode,
+		   "dev %s ino %lu mode %d uid %lu gid %lu bocks %llu",
+		   sb->s_id, inode->i_ino, inode->i_mode,
+		   (unsigned long) inode->i_uid, (unsigned long) inode->i_gid,
+		   (unsigned long long) inode->i_blocks);
 
 	/*
 	 * Note: we must free any quota before locking the superblock,
@@ -236,9 +261,12 @@
 
 		if (gdp) {
 			spin_lock(sb_bgl_lock(sbi, block_group));
-			le16_add_cpu(&gdp->bg_free_inodes_count, 1);
-			if (is_directory)
-				le16_add_cpu(&gdp->bg_used_dirs_count, -1);
+			count = ext4_free_inodes_count(sb, gdp) + 1;
+			ext4_free_inodes_set(sb, gdp, count);
+			if (is_directory) {
+				count = ext4_used_dirs_count(sb, gdp) - 1;
+				ext4_used_dirs_set(sb, gdp, count);
+			}
 			gdp->bg_checksum = ext4_group_desc_csum(sbi,
 							block_group, gdp);
 			spin_unlock(sb_bgl_lock(sbi, block_group));
@@ -253,12 +281,12 @@
 				spin_unlock(sb_bgl_lock(sbi, flex_group));
 			}
 		}
-		BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
-		err = ext4_journal_dirty_metadata(handle, bh2);
+		BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
+		err = ext4_handle_dirty_metadata(handle, NULL, bh2);
 		if (!fatal) fatal = err;
 	}
-	BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata");
-	err = ext4_journal_dirty_metadata(handle, bitmap_bh);
+	BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
+	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
 	if (!fatal)
 		fatal = err;
 	sb->s_dirt = 1;
@@ -291,13 +319,13 @@
 
 	for (group = 0; group < ngroups; group++) {
 		desc = ext4_get_group_desc(sb, group, NULL);
-		if (!desc || !desc->bg_free_inodes_count)
+		if (!desc || !ext4_free_inodes_count(sb, desc))
 			continue;
-		if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
+		if (ext4_free_inodes_count(sb, desc) < avefreei)
 			continue;
 		if (!best_desc ||
-		    (le16_to_cpu(desc->bg_free_blocks_count) >
-		     le16_to_cpu(best_desc->bg_free_blocks_count))) {
+		    (ext4_free_blks_count(sb, desc) >
+		     ext4_free_blks_count(sb, best_desc))) {
 			*best_group = group;
 			best_desc = desc;
 			ret = 0;
@@ -369,7 +397,7 @@
 	for (i = best_flex * flex_size; i < ngroups &&
 		     i < (best_flex + 1) * flex_size; i++) {
 		desc = ext4_get_group_desc(sb, i, &bh);
-		if (le16_to_cpu(desc->bg_free_inodes_count)) {
+		if (ext4_free_inodes_count(sb, desc)) {
 			*best_group = i;
 			goto out;
 		}
@@ -443,17 +471,17 @@
 		for (i = 0; i < ngroups; i++) {
 			grp = (parent_group + i) % ngroups;
 			desc = ext4_get_group_desc(sb, grp, NULL);
-			if (!desc || !desc->bg_free_inodes_count)
+			if (!desc || !ext4_free_inodes_count(sb, desc))
 				continue;
-			if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
+			if (ext4_used_dirs_count(sb, desc) >= best_ndir)
 				continue;
-			if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
+			if (ext4_free_inodes_count(sb, desc) < avefreei)
 				continue;
-			if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
+			if (ext4_free_blks_count(sb, desc) < avefreeb)
 				continue;
 			*group = grp;
 			ret = 0;
-			best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
+			best_ndir = ext4_used_dirs_count(sb, desc);
 		}
 		if (ret == 0)
 			return ret;
@@ -479,13 +507,13 @@
 	for (i = 0; i < ngroups; i++) {
 		*group = (parent_group + i) % ngroups;
 		desc = ext4_get_group_desc(sb, *group, NULL);
-		if (!desc || !desc->bg_free_inodes_count)
+		if (!desc || !ext4_free_inodes_count(sb, desc))
 			continue;
-		if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
+		if (ext4_used_dirs_count(sb, desc) >= max_dirs)
 			continue;
-		if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
+		if (ext4_free_inodes_count(sb, desc) < min_inodes)
 			continue;
-		if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
+		if (ext4_free_blks_count(sb, desc) < min_blocks)
 			continue;
 		return 0;
 	}
@@ -494,8 +522,8 @@
 	for (i = 0; i < ngroups; i++) {
 		*group = (parent_group + i) % ngroups;
 		desc = ext4_get_group_desc(sb, *group, NULL);
-		if (desc && desc->bg_free_inodes_count &&
-			le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
+		if (desc && ext4_free_inodes_count(sb, desc) &&
+			ext4_free_inodes_count(sb, desc) >= avefreei)
 			return 0;
 	}
 
@@ -524,8 +552,8 @@
 	 */
 	*group = parent_group;
 	desc = ext4_get_group_desc(sb, *group, NULL);
-	if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
-			le16_to_cpu(desc->bg_free_blocks_count))
+	if (desc && ext4_free_inodes_count(sb, desc) &&
+			ext4_free_blks_count(sb, desc))
 		return 0;
 
 	/*
@@ -548,8 +576,8 @@
 		if (*group >= ngroups)
 			*group -= ngroups;
 		desc = ext4_get_group_desc(sb, *group, NULL);
-		if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
-				le16_to_cpu(desc->bg_free_blocks_count))
+		if (desc && ext4_free_inodes_count(sb, desc) &&
+				ext4_free_blks_count(sb, desc))
 			return 0;
 	}
 
@@ -562,7 +590,7 @@
 		if (++*group >= ngroups)
 			*group = 0;
 		desc = ext4_get_group_desc(sb, *group, NULL);
-		if (desc && le16_to_cpu(desc->bg_free_inodes_count))
+		if (desc && ext4_free_inodes_count(sb, desc))
 			return 0;
 	}
 
@@ -570,6 +598,79 @@
 }
 
 /*
+ * claim the inode from the inode bitmap. If the group
+ * is uninit we need to take the groups's sb_bgl_lock
+ * and clear the uninit flag. The inode bitmap update
+ * and group desc uninit flag clear should be done
+ * after holding sb_bgl_lock so that ext4_read_inode_bitmap
+ * doesn't race with the ext4_claim_inode
+ */
+static int ext4_claim_inode(struct super_block *sb,
+			struct buffer_head *inode_bitmap_bh,
+			unsigned long ino, ext4_group_t group, int mode)
+{
+	int free = 0, retval = 0, count;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
+
+	spin_lock(sb_bgl_lock(sbi, group));
+	if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
+		/* not a free inode */
+		retval = 1;
+		goto err_ret;
+	}
+	ino++;
+	if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
+			ino > EXT4_INODES_PER_GROUP(sb)) {
+		spin_unlock(sb_bgl_lock(sbi, group));
+		ext4_error(sb, __func__,
+			   "reserved inode or inode > inodes count - "
+			   "block_group = %u, inode=%lu", group,
+			   ino + group * EXT4_INODES_PER_GROUP(sb));
+		return 1;
+	}
+	/* If we didn't allocate from within the initialized part of the inode
+	 * table then we need to initialize up to this inode. */
+	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
+
+		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
+			/* When marking the block group with
+			 * ~EXT4_BG_INODE_UNINIT we don't want to depend
+			 * on the value of bg_itable_unused even though
+			 * mke2fs could have initialized the same for us.
+			 * Instead we calculated the value below
+			 */
+
+			free = 0;
+		} else {
+			free = EXT4_INODES_PER_GROUP(sb) -
+				ext4_itable_unused_count(sb, gdp);
+		}
+
+		/*
+		 * Check the relative inode number against the last used
+		 * relative inode number in this group. if it is greater
+		 * we need to  update the bg_itable_unused count
+		 *
+		 */
+		if (ino > free)
+			ext4_itable_unused_set(sb, gdp,
+					(EXT4_INODES_PER_GROUP(sb) - ino));
+	}
+	count = ext4_free_inodes_count(sb, gdp) - 1;
+	ext4_free_inodes_set(sb, gdp, count);
+	if (S_ISDIR(mode)) {
+		count = ext4_used_dirs_count(sb, gdp) + 1;
+		ext4_used_dirs_set(sb, gdp, count);
+	}
+	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+err_ret:
+	spin_unlock(sb_bgl_lock(sbi, group));
+	return retval;
+}
+
+/*
  * There are two policies for allocating an inode.  If the new inode is
  * a directory, then a forward search is made for a block group with both
  * free space and a low directory-to-inode ratio; if that fails, then of
@@ -582,8 +683,8 @@
 struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
 {
 	struct super_block *sb;
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *bh2;
+	struct buffer_head *inode_bitmap_bh = NULL;
+	struct buffer_head *group_desc_bh;
 	ext4_group_t group = 0;
 	unsigned long ino = 0;
 	struct inode *inode;
@@ -602,6 +703,8 @@
 		return ERR_PTR(-EPERM);
 
 	sb = dir->i_sb;
+	trace_mark(ext4_request_inode, "dev %s dir %lu mode %d", sb->s_id,
+		   dir->i_ino, mode);
 	inode = new_inode(sb);
 	if (!inode)
 		return ERR_PTR(-ENOMEM);
@@ -631,40 +734,52 @@
 	for (i = 0; i < sbi->s_groups_count; i++) {
 		err = -EIO;
 
-		gdp = ext4_get_group_desc(sb, group, &bh2);
+		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
 		if (!gdp)
 			goto fail;
 
-		brelse(bitmap_bh);
-		bitmap_bh = ext4_read_inode_bitmap(sb, group);
-		if (!bitmap_bh)
+		brelse(inode_bitmap_bh);
+		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
+		if (!inode_bitmap_bh)
 			goto fail;
 
 		ino = 0;
 
 repeat_in_this_group:
 		ino = ext4_find_next_zero_bit((unsigned long *)
-				bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino);
+					      inode_bitmap_bh->b_data,
+					      EXT4_INODES_PER_GROUP(sb), ino);
+
 		if (ino < EXT4_INODES_PER_GROUP(sb)) {
 
-			BUFFER_TRACE(bitmap_bh, "get_write_access");
-			err = ext4_journal_get_write_access(handle, bitmap_bh);
+			BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
+			err = ext4_journal_get_write_access(handle,
+							    inode_bitmap_bh);
 			if (err)
 				goto fail;
 
-			if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
-						ino, bitmap_bh->b_data)) {
+			BUFFER_TRACE(group_desc_bh, "get_write_access");
+			err = ext4_journal_get_write_access(handle,
+								group_desc_bh);
+			if (err)
+				goto fail;
+			if (!ext4_claim_inode(sb, inode_bitmap_bh,
+						ino, group, mode)) {
 				/* we won it */
-				BUFFER_TRACE(bitmap_bh,
-					"call ext4_journal_dirty_metadata");
-				err = ext4_journal_dirty_metadata(handle,
-								bitmap_bh);
+				BUFFER_TRACE(inode_bitmap_bh,
+					"call ext4_handle_dirty_metadata");
+				err = ext4_handle_dirty_metadata(handle,
+								 inode,
+							inode_bitmap_bh);
 				if (err)
 					goto fail;
+				/* zero bit is inode number 1*/
+				ino++;
 				goto got;
 			}
 			/* we lost it */
-			jbd2_journal_release_buffer(handle, bitmap_bh);
+			ext4_handle_release_buffer(handle, inode_bitmap_bh);
+			ext4_handle_release_buffer(handle, group_desc_bh);
 
 			if (++ino < EXT4_INODES_PER_GROUP(sb))
 				goto repeat_in_this_group;
@@ -684,30 +799,16 @@
 	goto out;
 
 got:
-	ino++;
-	if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
-	    ino > EXT4_INODES_PER_GROUP(sb)) {
-		ext4_error(sb, __func__,
-			   "reserved inode or inode > inodes count - "
-			   "block_group = %lu, inode=%lu", group,
-			   ino + group * EXT4_INODES_PER_GROUP(sb));
-		err = -EIO;
-		goto fail;
-	}
-
-	BUFFER_TRACE(bh2, "get_write_access");
-	err = ext4_journal_get_write_access(handle, bh2);
-	if (err) goto fail;
-
 	/* We may have to initialize the block bitmap if it isn't already */
 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
-		struct buffer_head *block_bh = ext4_read_block_bitmap(sb, group);
+		struct buffer_head *block_bitmap_bh;
 
-		BUFFER_TRACE(block_bh, "get block bitmap access");
-		err = ext4_journal_get_write_access(handle, block_bh);
+		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
+		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
+		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
 		if (err) {
-			brelse(block_bh);
+			brelse(block_bitmap_bh);
 			goto fail;
 		}
 
@@ -715,9 +816,9 @@
 		spin_lock(sb_bgl_lock(sbi, group));
 		/* recheck and clear flag under lock if we still need to */
 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
-			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
 			free = ext4_free_blocks_after_init(sb, group, gdp);
-			gdp->bg_free_blocks_count = cpu_to_le16(free);
+			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
+			ext4_free_blks_set(sb, gdp, free);
 			gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
 								gdp);
 		}
@@ -725,55 +826,19 @@
 
 		/* Don't need to dirty bitmap block if we didn't change it */
 		if (free) {
-			BUFFER_TRACE(block_bh, "dirty block bitmap");
-			err = ext4_journal_dirty_metadata(handle, block_bh);
+			BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
+			err = ext4_handle_dirty_metadata(handle,
+							NULL, block_bitmap_bh);
 		}
 
-		brelse(block_bh);
+		brelse(block_bitmap_bh);
 		if (err)
 			goto fail;
 	}
-
-	spin_lock(sb_bgl_lock(sbi, group));
-	/* If we didn't allocate from within the initialized part of the inode
-	 * table then we need to initialize up to this inode. */
-	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
-		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
-			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
-
-			/* When marking the block group with
-			 * ~EXT4_BG_INODE_UNINIT we don't want to depend
-			 * on the value of bg_itable_unused even though
-			 * mke2fs could have initialized the same for us.
-			 * Instead we calculated the value below
-			 */
-
-			free = 0;
-		} else {
-			free = EXT4_INODES_PER_GROUP(sb) -
-				le16_to_cpu(gdp->bg_itable_unused);
-		}
-
-		/*
-		 * Check the relative inode number against the last used
-		 * relative inode number in this group. if it is greater
-		 * we need to  update the bg_itable_unused count
-		 *
-		 */
-		if (ino > free)
-			gdp->bg_itable_unused =
-				cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
-	}
-
-	le16_add_cpu(&gdp->bg_free_inodes_count, -1);
-	if (S_ISDIR(mode)) {
-		le16_add_cpu(&gdp->bg_used_dirs_count, 1);
-	}
-	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
-	spin_unlock(sb_bgl_lock(sbi, group));
-	BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
-	err = ext4_journal_dirty_metadata(handle, bh2);
-	if (err) goto fail;
+	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
+	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
+	if (err)
+		goto fail;
 
 	percpu_counter_dec(&sbi->s_freeinodes_counter);
 	if (S_ISDIR(mode))
@@ -825,7 +890,7 @@
 
 	ext4_set_inode_flags(inode);
 	if (IS_DIRSYNC(inode))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 	if (insert_inode_locked(inode) < 0) {
 		err = -EINVAL;
 		goto fail_drop;
@@ -852,7 +917,7 @@
 	if (err)
 		goto fail_free_drop;
 
-	if (test_opt(sb, EXTENTS)) {
+	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
 		/* set extent flag only for directory, file and normal symlink*/
 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
 			EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
@@ -867,6 +932,8 @@
 	}
 
 	ext4_debug("allocating inode %lu\n", inode->i_ino);
+	trace_mark(ext4_allocate_inode, "dev %s ino %lu dir %lu mode %d",
+		   sb->s_id, inode->i_ino, dir->i_ino, mode);
 	goto really_out;
 fail:
 	ext4_std_error(sb, err);
@@ -874,7 +941,7 @@
 	iput(inode);
 	ret = ERR_PTR(err);
 really_out:
-	brelse(bitmap_bh);
+	brelse(inode_bitmap_bh);
 	return ret;
 
 fail_free_drop:
@@ -886,7 +953,7 @@
 	inode->i_nlink = 0;
 	unlock_new_inode(inode);
 	iput(inode);
-	brelse(bitmap_bh);
+	brelse(inode_bitmap_bh);
 	return ERR_PTR(err);
 }
 
@@ -985,7 +1052,7 @@
 		gdp = ext4_get_group_desc(sb, i, NULL);
 		if (!gdp)
 			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
+		desc_count += ext4_free_inodes_count(sb, gdp);
 		brelse(bitmap_bh);
 		bitmap_bh = ext4_read_inode_bitmap(sb, i);
 		if (!bitmap_bh)
@@ -993,7 +1060,7 @@
 
 		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
 		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
-			i, le16_to_cpu(gdp->bg_free_inodes_count), x);
+			i, ext4_free_inodes_count(sb, gdp), x);
 		bitmap_count += x;
 	}
 	brelse(bitmap_bh);
@@ -1007,7 +1074,7 @@
 		gdp = ext4_get_group_desc(sb, i, NULL);
 		if (!gdp)
 			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
+		desc_count += ext4_free_inodes_count(sb, gdp);
 		cond_resched();
 	}
 	return desc_count;
@@ -1024,8 +1091,7 @@
 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
 		if (!gdp)
 			continue;
-		count += le16_to_cpu(gdp->bg_used_dirs_count);
+		count += ext4_used_dirs_count(sb, gdp);
 	}
 	return count;
 }
-
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 98d3fe7..a6444ce 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -72,12 +72,17 @@
  * "bh" may be NULL: a metadata block may have been freed from memory
  * but there may still be a record of it in the journal, and that record
  * still needs to be revoked.
+ *
+ * If the handle isn't valid we're not journaling so there's nothing to do.
  */
 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
 			struct buffer_head *bh, ext4_fsblk_t blocknr)
 {
 	int err;
 
+	if (!ext4_handle_valid(handle))
+		return 0;
+
 	might_sleep();
 
 	BUFFER_TRACE(bh, "enter");
@@ -170,7 +175,9 @@
  */
 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
 {
-	if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
+	if (!ext4_handle_valid(handle))
+		return 0;
+	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
 		return 0;
 	if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
 		return 0;
@@ -184,6 +191,7 @@
  */
 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
 {
+	BUG_ON(EXT4_JOURNAL(inode) == NULL);
 	jbd_debug(2, "restarting handle %p\n", handle);
 	return ext4_journal_restart(handle, blocks_for_truncate(inode));
 }
@@ -216,7 +224,7 @@
 	}
 
 	if (IS_SYNC(inode))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 	inode->i_size = 0;
 	err = ext4_mark_inode_dirty(handle, inode);
 	if (err) {
@@ -233,7 +241,7 @@
 	 * enough credits left in the handle to remove the inode from
 	 * the orphan list and set the dtime field.
 	 */
-	if (handle->h_buffer_credits < 3) {
+	if (!ext4_handle_has_enough_credits(handle, 3)) {
 		err = ext4_journal_extend(handle, 3);
 		if (err > 0)
 			err = ext4_journal_restart(handle, 3);
@@ -506,10 +514,10 @@
  *	return the total number of blocks to be allocate, including the
  *	direct and indirect blocks.
  */
-static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
+static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
 		int blocks_to_boundary)
 {
-	unsigned long count = 0;
+	unsigned int count = 0;
 
 	/*
 	 * Simple case, [t,d]Indirect block(s) has not allocated yet
@@ -547,6 +555,7 @@
 				int indirect_blks, int blks,
 				ext4_fsblk_t new_blocks[4], int *err)
 {
+	struct ext4_allocation_request ar;
 	int target, i;
 	unsigned long count = 0, blk_allocated = 0;
 	int index = 0;
@@ -595,10 +604,17 @@
 	if (!target)
 		goto allocated;
 	/* Now allocate data blocks */
-	count = target;
-	/* allocating blocks for data blocks */
-	current_block = ext4_new_blocks(handle, inode, iblock,
-						goal, &count, err);
+	memset(&ar, 0, sizeof(ar));
+	ar.inode = inode;
+	ar.goal = goal;
+	ar.len = target;
+	ar.logical = iblock;
+	if (S_ISREG(inode->i_mode))
+		/* enable in-core preallocation only for regular files */
+		ar.flags = EXT4_MB_HINT_DATA;
+
+	current_block = ext4_mb_new_blocks(handle, &ar, err);
+
 	if (*err && (target == blks)) {
 		/*
 		 * if the allocation failed and we didn't allocate
@@ -614,7 +630,7 @@
 		 */
 			new_blocks[index] = current_block;
 		}
-		blk_allocated += count;
+		blk_allocated += ar.len;
 	}
 allocated:
 	/* total number of blocks allocated for direct blocks */
@@ -709,8 +725,8 @@
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
 
-		BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
-		err = ext4_journal_dirty_metadata(handle, bh);
+		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+		err = ext4_handle_dirty_metadata(handle, inode, bh);
 		if (err)
 			goto failed;
 	}
@@ -792,8 +808,8 @@
 		 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
 		 */
 		jbd_debug(5, "splicing indirect only\n");
-		BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
-		err = ext4_journal_dirty_metadata(handle, where->bh);
+		BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
+		err = ext4_handle_dirty_metadata(handle, inode, where->bh);
 		if (err)
 			goto err_out;
 	} else {
@@ -840,10 +856,10 @@
  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
  */
-int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
-		ext4_lblk_t iblock, unsigned long maxblocks,
-		struct buffer_head *bh_result,
-		int create, int extend_disksize)
+static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
+				  ext4_lblk_t iblock, unsigned int maxblocks,
+				  struct buffer_head *bh_result,
+				  int create, int extend_disksize)
 {
 	int err = -EIO;
 	ext4_lblk_t offsets[4];
@@ -1045,7 +1061,7 @@
  * It returns the error in case of allocation failure.
  */
 int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
-			unsigned long max_blocks, struct buffer_head *bh,
+			unsigned int max_blocks, struct buffer_head *bh,
 			int create, int extend_disksize, int flag)
 {
 	int retval;
@@ -1221,8 +1237,8 @@
 				set_buffer_uptodate(bh);
 			}
 			unlock_buffer(bh);
-			BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
-			err = ext4_journal_dirty_metadata(handle, bh);
+			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+			err = ext4_handle_dirty_metadata(handle, inode, bh);
 			if (!fatal)
 				fatal = err;
 		} else {
@@ -1335,6 +1351,10 @@
  	pgoff_t index;
 	unsigned from, to;
 
+	trace_mark(ext4_write_begin,
+		   "dev %s ino %lu pos %llu len %u flags %u",
+		   inode->i_sb->s_id, inode->i_ino,
+		   (unsigned long long) pos, len, flags);
  	index = pos >> PAGE_CACHE_SHIFT;
 	from = pos & (PAGE_CACHE_SIZE - 1);
 	to = from + len;
@@ -1387,7 +1407,7 @@
 	if (!buffer_mapped(bh) || buffer_freed(bh))
 		return 0;
 	set_buffer_uptodate(bh);
-	return ext4_journal_dirty_metadata(handle, bh);
+	return ext4_handle_dirty_metadata(handle, NULL, bh);
 }
 
 /*
@@ -1406,6 +1426,10 @@
 	struct inode *inode = mapping->host;
 	int ret = 0, ret2;
 
+	trace_mark(ext4_ordered_write_end,
+		   "dev %s ino %lu pos %llu len %u copied %u",
+		   inode->i_sb->s_id, inode->i_ino,
+		   (unsigned long long) pos, len, copied);
 	ret = ext4_jbd2_file_inode(handle, inode);
 
 	if (ret == 0) {
@@ -1444,6 +1468,10 @@
 	int ret = 0, ret2;
 	loff_t new_i_size;
 
+	trace_mark(ext4_writeback_write_end,
+		   "dev %s ino %lu pos %llu len %u copied %u",
+		   inode->i_sb->s_id, inode->i_ino,
+		   (unsigned long long) pos, len, copied);
 	new_i_size = pos + copied;
 	if (new_i_size > EXT4_I(inode)->i_disksize) {
 		ext4_update_i_disksize(inode, new_i_size);
@@ -1479,6 +1507,10 @@
 	unsigned from, to;
 	loff_t new_i_size;
 
+	trace_mark(ext4_journalled_write_end,
+		   "dev %s ino %lu pos %llu len %u copied %u",
+		   inode->i_sb->s_id, inode->i_ino,
+		   (unsigned long long) pos, len, copied);
 	from = pos & (PAGE_CACHE_SIZE - 1);
 	to = from + len;
 
@@ -1625,7 +1657,7 @@
 	get_block_t *get_block;
 	struct writeback_control *wbc;
 	int io_done;
-	long pages_written;
+	int pages_written;
 	int retval;
 };
 
@@ -1645,35 +1677,39 @@
  */
 static int mpage_da_submit_io(struct mpage_da_data *mpd)
 {
-	struct address_space *mapping = mpd->inode->i_mapping;
-	int ret = 0, err, nr_pages, i;
-	unsigned long index, end;
-	struct pagevec pvec;
 	long pages_skipped;
+	struct pagevec pvec;
+	unsigned long index, end;
+	int ret = 0, err, nr_pages, i;
+	struct inode *inode = mpd->inode;
+	struct address_space *mapping = inode->i_mapping;
 
 	BUG_ON(mpd->next_page <= mpd->first_page);
-	pagevec_init(&pvec, 0);
+	/*
+	 * We need to start from the first_page to the next_page - 1
+	 * to make sure we also write the mapped dirty buffer_heads.
+	 * If we look at mpd->lbh.b_blocknr we would only be looking
+	 * at the currently mapped buffer_heads.
+	 */
 	index = mpd->first_page;
 	end = mpd->next_page - 1;
 
+	pagevec_init(&pvec, 0);
 	while (index <= end) {
-		/*
-		 * We can use PAGECACHE_TAG_DIRTY lookup here because
-		 * even though we have cleared the dirty flag on the page
-		 * We still keep the page in the radix tree with tag
-		 * PAGECACHE_TAG_DIRTY. See clear_page_dirty_for_io.
-		 * The PAGECACHE_TAG_DIRTY is cleared in set_page_writeback
-		 * which is called via the below writepage callback.
-		 */
-		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-					PAGECACHE_TAG_DIRTY,
-					min(end - index,
-					(pgoff_t)PAGEVEC_SIZE-1) + 1);
+		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
 		if (nr_pages == 0)
 			break;
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
 
+			index = page->index;
+			if (index > end)
+				break;
+			index++;
+
+			BUG_ON(!PageLocked(page));
+			BUG_ON(PageWriteback(page));
+
 			pages_skipped = mpd->wbc->pages_skipped;
 			err = mapping->a_ops->writepage(page, mpd->wbc);
 			if (!err && (pages_skipped == mpd->wbc->pages_skipped))
@@ -1831,13 +1867,13 @@
 			ext4_count_free_blocks(inode->i_sb));
 	printk(KERN_EMERG "Free/Dirty block details\n");
 	printk(KERN_EMERG "free_blocks=%lld\n",
-			percpu_counter_sum(&sbi->s_freeblocks_counter));
+			(long long)percpu_counter_sum(&sbi->s_freeblocks_counter));
 	printk(KERN_EMERG "dirty_blocks=%lld\n",
-			percpu_counter_sum(&sbi->s_dirtyblocks_counter));
+			(long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter));
 	printk(KERN_EMERG "Block reservation details\n");
-	printk(KERN_EMERG "i_reserved_data_blocks=%lu\n",
+	printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
 			EXT4_I(inode)->i_reserved_data_blocks);
-	printk(KERN_EMERG "i_reserved_meta_blocks=%lu\n",
+	printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
 			EXT4_I(inode)->i_reserved_meta_blocks);
 	return;
 }
@@ -2087,11 +2123,29 @@
 		bh = head;
 		do {
 			BUG_ON(buffer_locked(bh));
+			/*
+			 * We need to try to allocate
+			 * unmapped blocks in the same page.
+			 * Otherwise we won't make progress
+			 * with the page in ext4_da_writepage
+			 */
 			if (buffer_dirty(bh) &&
 				(!buffer_mapped(bh) || buffer_delay(bh))) {
 				mpage_add_bh_to_extent(mpd, logical, bh);
 				if (mpd->io_done)
 					return MPAGE_DA_EXTENT_TAIL;
+			} else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
+				/*
+				 * mapped dirty buffer. We need to update
+				 * the b_state because we look at
+				 * b_state in mpage_da_map_blocks. We don't
+				 * update b_size because if we find an
+				 * unmapped buffer_head later we need to
+				 * use the b_state flag of that buffer_head.
+				 */
+				if (mpd->lbh.b_size == 0)
+					mpd->lbh.b_state =
+						bh->b_state & BH_FLAGS;
 			}
 			logical++;
 		} while ((bh = bh->b_this_page) != head);
@@ -2269,10 +2323,13 @@
 {
 	int ret = 0;
 	loff_t size;
-	unsigned long len;
+	unsigned int len;
 	struct buffer_head *page_bufs;
 	struct inode *inode = page->mapping->host;
 
+	trace_mark(ext4_da_writepage,
+		   "dev %s ino %lu page_index %lu",
+		   inode->i_sb->s_id, inode->i_ino, page->index);
 	size = i_size_read(inode);
 	if (page->index == size >> PAGE_CACHE_SHIFT)
 		len = size & ~PAGE_CACHE_MASK;
@@ -2378,10 +2435,25 @@
 	struct mpage_da_data mpd;
 	struct inode *inode = mapping->host;
 	int no_nrwrite_index_update;
-	long pages_written = 0, pages_skipped;
+	int pages_written = 0;
+	long pages_skipped;
 	int needed_blocks, ret = 0, nr_to_writebump = 0;
 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
 
+	trace_mark(ext4_da_writepages,
+		   "dev %s ino %lu nr_t_write %ld "
+		   "pages_skipped %ld range_start %llu "
+		   "range_end %llu nonblocking %d "
+		   "for_kupdate %d for_reclaim %d "
+		   "for_writepages %d range_cyclic %d",
+		   inode->i_sb->s_id, inode->i_ino,
+		   wbc->nr_to_write, wbc->pages_skipped,
+		   (unsigned long long) wbc->range_start,
+		   (unsigned long long) wbc->range_end,
+		   wbc->nonblocking, wbc->for_kupdate,
+		   wbc->for_reclaim, wbc->for_writepages,
+		   wbc->range_cyclic);
+
 	/*
 	 * No pages to write? This is mainly a kludge to avoid starting
 	 * a transaction for special inodes like journal inode on last iput()
@@ -2389,6 +2461,20 @@
 	 */
 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
 		return 0;
+
+	/*
+	 * If the filesystem has aborted, it is read-only, so return
+	 * right away instead of dumping stack traces later on that
+	 * will obscure the real source of the problem.  We test
+	 * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because
+	 * the latter could be true if the filesystem is mounted
+	 * read-only, and in that case, ext4_da_writepages should
+	 * *never* be called, so if that ever happens, we would want
+	 * the stack trace.
+	 */
+	if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT))
+		return -EROFS;
+
 	/*
 	 * Make sure nr_to_write is >= sbi->s_mb_stream_request
 	 * This make sure small files blocks are allocated in
@@ -2433,7 +2519,7 @@
 		handle = ext4_journal_start(inode, needed_blocks);
 		if (IS_ERR(handle)) {
 			ret = PTR_ERR(handle);
-			printk(KERN_EMERG "%s: jbd2_start: "
+			printk(KERN_CRIT "%s: jbd2_start: "
 			       "%ld pages, ino %lu; err %d\n", __func__,
 				wbc->nr_to_write, inode->i_ino, ret);
 			dump_stack();
@@ -2486,6 +2572,14 @@
 	if (!no_nrwrite_index_update)
 		wbc->no_nrwrite_index_update = 0;
 	wbc->nr_to_write -= nr_to_writebump;
+	trace_mark(ext4_da_writepage_result,
+		   "dev %s ino %lu ret %d pages_written %d "
+		   "pages_skipped %ld congestion %d "
+		   "more_io %d no_nrwrite_index_update %d",
+		   inode->i_sb->s_id, inode->i_ino, ret,
+		   pages_written, wbc->pages_skipped,
+		   wbc->encountered_congestion, wbc->more_io,
+		   wbc->no_nrwrite_index_update);
 	return ret;
 }
 
@@ -2537,6 +2631,11 @@
 					len, flags, pagep, fsdata);
 	}
 	*fsdata = (void *)0;
+
+	trace_mark(ext4_da_write_begin,
+		   "dev %s ino %lu pos %llu len %u flags %u",
+		   inode->i_sb->s_id, inode->i_ino,
+		   (unsigned long long) pos, len, flags);
 retry:
 	/*
 	 * With delayed allocation, we don't log the i_disksize update
@@ -2626,6 +2725,10 @@
 		}
 	}
 
+	trace_mark(ext4_da_write_end,
+		   "dev %s ino %lu pos %llu len %u copied %u",
+		   inode->i_sb->s_id, inode->i_ino,
+		   (unsigned long long) pos, len, copied);
 	start = pos & (PAGE_CACHE_SIZE - 1);
 	end = start + copied - 1;
 
@@ -2718,7 +2821,10 @@
 		filemap_write_and_wait(mapping);
 	}
 
-	if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
+	BUG_ON(!EXT4_JOURNAL(inode) &&
+	       EXT4_I(inode)->i_state & EXT4_STATE_JDATA);
+
+	if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
 		/*
 		 * This is a REALLY heavyweight approach, but the use of
 		 * bmap on dirty files is expected to be extremely rare:
@@ -2836,6 +2942,9 @@
 	loff_t size = i_size_read(inode);
 	loff_t len;
 
+	trace_mark(ext4_normal_writepage,
+		   "dev %s ino %lu page_index %lu",
+		   inode->i_sb->s_id, inode->i_ino, page->index);
 	J_ASSERT(PageLocked(page));
 	if (page->index == size >> PAGE_CACHE_SHIFT)
 		len = size & ~PAGE_CACHE_MASK;
@@ -2921,6 +3030,9 @@
 	loff_t size = i_size_read(inode);
 	loff_t len;
 
+	trace_mark(ext4_journalled_writepage,
+		   "dev %s ino %lu page_index %lu",
+		   inode->i_sb->s_id, inode->i_ino, page->index);
 	J_ASSERT(PageLocked(page));
 	if (page->index == size >> PAGE_CACHE_SHIFT)
 		len = size & ~PAGE_CACHE_MASK;
@@ -2989,7 +3101,10 @@
 	if (offset == 0)
 		ClearPageChecked(page);
 
-	jbd2_journal_invalidatepage(journal, page, offset);
+	if (journal)
+		jbd2_journal_invalidatepage(journal, page, offset);
+	else
+		block_invalidatepage(page, offset);
 }
 
 static int ext4_releasepage(struct page *page, gfp_t wait)
@@ -2999,7 +3114,10 @@
 	WARN_ON(PageChecked(page));
 	if (!page_has_buffers(page))
 		return 0;
-	return jbd2_journal_try_to_free_buffers(journal, page, wait);
+	if (journal)
+		return jbd2_journal_try_to_free_buffers(journal, page, wait);
+	else
+		return try_to_free_buffers(page);
 }
 
 /*
@@ -3271,7 +3389,7 @@
 
 	err = 0;
 	if (ext4_should_journal_data(inode)) {
-		err = ext4_journal_dirty_metadata(handle, bh);
+		err = ext4_handle_dirty_metadata(handle, inode, bh);
 	} else {
 		if (ext4_should_order_data(inode))
 			err = ext4_jbd2_file_inode(handle, inode);
@@ -3395,8 +3513,8 @@
 	__le32 *p;
 	if (try_to_extend_transaction(handle, inode)) {
 		if (bh) {
-			BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
-			ext4_journal_dirty_metadata(handle, bh);
+			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+			ext4_handle_dirty_metadata(handle, inode, bh);
 		}
 		ext4_mark_inode_dirty(handle, inode);
 		ext4_journal_test_restart(handle, inode);
@@ -3496,7 +3614,7 @@
 				  count, block_to_free_p, p);
 
 	if (this_bh) {
-		BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
+		BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
 
 		/*
 		 * The buffer head should have an attached journal head at this
@@ -3505,7 +3623,7 @@
 		 * the block was cleared. Check for this instead of OOPSing.
 		 */
 		if (bh2jh(this_bh))
-			ext4_journal_dirty_metadata(handle, this_bh);
+			ext4_handle_dirty_metadata(handle, inode, this_bh);
 		else
 			ext4_error(inode->i_sb, __func__,
 				   "circular indirect block detected, "
@@ -3535,7 +3653,7 @@
 	ext4_fsblk_t nr;
 	__le32 *p;
 
-	if (is_handle_aborted(handle))
+	if (ext4_handle_is_aborted(handle))
 		return;
 
 	if (depth--) {
@@ -3605,7 +3723,7 @@
 			 * will merely complain about releasing a free block,
 			 * rather than leaking blocks.
 			 */
-			if (is_handle_aborted(handle))
+			if (ext4_handle_is_aborted(handle))
 				return;
 			if (try_to_extend_transaction(handle, inode)) {
 				ext4_mark_inode_dirty(handle, inode);
@@ -3624,9 +3742,10 @@
 								   parent_bh)){
 					*p = 0;
 					BUFFER_TRACE(parent_bh,
-					"call ext4_journal_dirty_metadata");
-					ext4_journal_dirty_metadata(handle,
-								    parent_bh);
+					"call ext4_handle_dirty_metadata");
+					ext4_handle_dirty_metadata(handle,
+								   inode,
+								   parent_bh);
 				}
 			}
 		}
@@ -3814,7 +3933,7 @@
 	 * synchronous
 	 */
 	if (IS_SYNC(inode))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 out_stop:
 	/*
 	 * If this was a simple ftruncate(), and the file will remain alive
@@ -3844,7 +3963,7 @@
 	ext4_fsblk_t		block;
 	int			inodes_per_block, inode_offset;
 
-	iloc->bh = 0;
+	iloc->bh = NULL;
 	if (!ext4_valid_inum(sb, inode->i_ino))
 		return -EIO;
 
@@ -3951,7 +4070,7 @@
 			num = EXT4_INODES_PER_GROUP(sb);
 			if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
 				       EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
-				num -= le16_to_cpu(gdp->bg_itable_unused);
+				num -= ext4_itable_unused_count(sb, gdp);
 			table += num / inodes_per_block;
 			if (end > table)
 				end = table;
@@ -4313,8 +4432,8 @@
 			EXT4_SET_RO_COMPAT_FEATURE(sb,
 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
 			sb->s_dirt = 1;
-			handle->h_sync = 1;
-			err = ext4_journal_dirty_metadata(handle,
+			ext4_handle_sync(handle);
+			err = ext4_handle_dirty_metadata(handle, inode,
 					EXT4_SB(sb)->s_sbh);
 		}
 	}
@@ -4341,9 +4460,8 @@
 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
 	}
 
-
-	BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
-	rc = ext4_journal_dirty_metadata(handle, bh);
+	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+	rc = ext4_handle_dirty_metadata(handle, inode, bh);
 	if (!err)
 		err = rc;
 	ei->i_state &= ~EXT4_STATE_NEW;
@@ -4406,6 +4524,25 @@
 	return ext4_force_commit(inode->i_sb);
 }
 
+int __ext4_write_dirty_metadata(struct inode *inode, struct buffer_head *bh)
+{
+	int err = 0;
+
+	mark_buffer_dirty(bh);
+	if (inode && inode_needs_sync(inode)) {
+		sync_dirty_buffer(bh);
+		if (buffer_req(bh) && !buffer_uptodate(bh)) {
+			ext4_error(inode->i_sb, __func__,
+				   "IO error syncing inode, "
+				   "inode=%lu, block=%llu",
+				   inode->i_ino,
+				   (unsigned long long)bh->b_blocknr);
+			err = -EIO;
+		}
+	}
+	return err;
+}
+
 /*
  * ext4_setattr()
  *
@@ -4710,16 +4847,15 @@
 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
 			 struct ext4_iloc *iloc)
 {
-	int err = 0;
-	if (handle) {
-		err = ext4_get_inode_loc(inode, iloc);
-		if (!err) {
-			BUFFER_TRACE(iloc->bh, "get_write_access");
-			err = ext4_journal_get_write_access(handle, iloc->bh);
-			if (err) {
-				brelse(iloc->bh);
-				iloc->bh = NULL;
-			}
+	int err;
+
+	err = ext4_get_inode_loc(inode, iloc);
+	if (!err) {
+		BUFFER_TRACE(iloc->bh, "get_write_access");
+		err = ext4_journal_get_write_access(handle, iloc->bh);
+		if (err) {
+			brelse(iloc->bh);
+			iloc->bh = NULL;
 		}
 	}
 	ext4_std_error(inode->i_sb, err);
@@ -4791,7 +4927,8 @@
 
 	might_sleep();
 	err = ext4_reserve_inode_write(handle, inode, &iloc);
-	if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
+	if (ext4_handle_valid(handle) &&
+	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
 	    !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
 		/*
 		 * We need extra buffer credits since we may write into EA block
@@ -4843,6 +4980,11 @@
 	handle_t *current_handle = ext4_journal_current_handle();
 	handle_t *handle;
 
+	if (!ext4_handle_valid(current_handle)) {
+		ext4_mark_inode_dirty(current_handle, inode);
+		return;
+	}
+
 	handle = ext4_journal_start(inode, 2);
 	if (IS_ERR(handle))
 		goto out;
@@ -4880,8 +5022,9 @@
 			BUFFER_TRACE(iloc.bh, "get_write_access");
 			err = jbd2_journal_get_write_access(handle, iloc.bh);
 			if (!err)
-				err = ext4_journal_dirty_metadata(handle,
-								  iloc.bh);
+				err = ext4_handle_dirty_metadata(handle,
+								 inode,
+								 iloc.bh);
 			brelse(iloc.bh);
 		}
 	}
@@ -4907,6 +5050,8 @@
 	 */
 
 	journal = EXT4_JOURNAL(inode);
+	if (!journal)
+		return 0;
 	if (is_journal_aborted(journal))
 		return -EROFS;
 
@@ -4936,7 +5081,7 @@
 		return PTR_ERR(handle);
 
 	err = ext4_mark_inode_dirty(handle, inode);
-	handle->h_sync = 1;
+	ext4_handle_sync(handle);
 	ext4_journal_stop(handle);
 	ext4_std_error(inode->i_sb, err);
 
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index dc99b47..42dc83f 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -99,7 +99,7 @@
 			goto flags_out;
 		}
 		if (IS_SYNC(inode))
-			handle->h_sync = 1;
+			ext4_handle_sync(handle);
 		err = ext4_reserve_inode_write(handle, inode, &iloc);
 		if (err)
 			goto flags_err;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 444ad99..918aec0 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -100,7 +100,7 @@
  * inode as:
  *
  *  {                        page                        }
- *  [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
+ *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
  *
  *
  * one block each for bitmap and buddy information.  So for each group we
@@ -330,6 +330,18 @@
  *        object
  *
  */
+static struct kmem_cache *ext4_pspace_cachep;
+static struct kmem_cache *ext4_ac_cachep;
+static struct kmem_cache *ext4_free_ext_cachep;
+static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+					ext4_group_t group);
+static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+						ext4_group_t group);
+static int ext4_mb_init_per_dev_proc(struct super_block *sb);
+static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
+static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
+
+
 
 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
 {
@@ -445,9 +457,9 @@
 			blocknr += first + i;
 			blocknr +=
 			    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
-
-			ext4_error(sb, __func__, "double-free of inode"
-				   " %lu's block %llu(bit %u in group %lu)\n",
+			ext4_grp_locked_error(sb, e4b->bd_group,
+				   __func__, "double-free of inode"
+				   " %lu's block %llu(bit %u in group %u)",
 				   inode ? inode->i_ino : 0, blocknr,
 				   first + i, e4b->bd_group);
 		}
@@ -477,7 +489,7 @@
 		b2 = (unsigned char *) bitmap;
 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
 			if (b1[i] != b2[i]) {
-				printk(KERN_ERR "corruption in group %lu "
+				printk(KERN_ERR "corruption in group %u "
 				       "at byte %u(%u): %x in copy != %x "
 				       "on disk/prealloc\n",
 				       e4b->bd_group, i, i * 8, b1[i], b2[i]);
@@ -690,8 +702,8 @@
 	grp->bb_fragments = fragments;
 
 	if (free != grp->bb_free) {
-		ext4_error(sb, __func__,
-			"EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n",
+		ext4_grp_locked_error(sb, group,  __func__,
+			"EXT4-fs: group %u: %u blocks in bitmap, %u in gd",
 			group, free, grp->bb_free);
 		/*
 		 * If we intent to continue, we consider group descritor
@@ -716,7 +728,7 @@
  * stored in the inode as
  *
  * {                        page                        }
- * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
+ * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
  *
  *
  * one block each for bitmap and buddy information.
@@ -782,25 +794,45 @@
 		if (bh[i] == NULL)
 			goto out;
 
-		if (buffer_uptodate(bh[i]) &&
-		    !(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))
+		if (bitmap_uptodate(bh[i]))
 			continue;
 
 		lock_buffer(bh[i]);
+		if (bitmap_uptodate(bh[i])) {
+			unlock_buffer(bh[i]);
+			continue;
+		}
 		spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
 		if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 			ext4_init_block_bitmap(sb, bh[i],
 						first_group + i, desc);
+			set_bitmap_uptodate(bh[i]);
 			set_buffer_uptodate(bh[i]);
-			unlock_buffer(bh[i]);
 			spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
+			unlock_buffer(bh[i]);
 			continue;
 		}
 		spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
+		if (buffer_uptodate(bh[i])) {
+			/*
+			 * if not uninit if bh is uptodate,
+			 * bitmap is also uptodate
+			 */
+			set_bitmap_uptodate(bh[i]);
+			unlock_buffer(bh[i]);
+			continue;
+		}
 		get_bh(bh[i]);
+		/*
+		 * submit the buffer_head for read. We can
+		 * safely mark the bitmap as uptodate now.
+		 * We do it here so the bitmap uptodate bit
+		 * get set with buffer lock held.
+		 */
+		set_bitmap_uptodate(bh[i]);
 		bh[i]->b_end_io = end_buffer_read_sync;
 		submit_bh(READ, bh[i]);
-		mb_debug("read bitmap for group %lu\n", first_group + i);
+		mb_debug("read bitmap for group %u\n", first_group + i);
 	}
 
 	/* wait for I/O completion */
@@ -814,6 +846,8 @@
 
 	err = 0;
 	first_block = page->index * blocks_per_page;
+	/* init the page  */
+	memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
 	for (i = 0; i < blocks_per_page; i++) {
 		int group;
 		struct ext4_group_info *grinfo;
@@ -840,7 +874,6 @@
 			BUG_ON(incore == NULL);
 			mb_debug("put buddy for group %u in page %lu/%x\n",
 				group, page->index, i * blocksize);
-			memset(data, 0xff, blocksize);
 			grinfo = ext4_get_group_info(sb, group);
 			grinfo->bb_fragments = 0;
 			memset(grinfo->bb_counters, 0,
@@ -848,7 +881,9 @@
 			/*
 			 * incore got set to the group block bitmap below
 			 */
+			ext4_lock_group(sb, group);
 			ext4_mb_generate_buddy(sb, data, incore, group);
+			ext4_unlock_group(sb, group);
 			incore = NULL;
 		} else {
 			/* this is block of bitmap */
@@ -862,6 +897,7 @@
 
 			/* mark all preallocated blks used in in-core bitmap */
 			ext4_mb_generate_from_pa(sb, data, group);
+			ext4_mb_generate_from_freelist(sb, data, group);
 			ext4_unlock_group(sb, group);
 
 			/* set incore so that the buddy information can be
@@ -886,18 +922,20 @@
 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
 					struct ext4_buddy *e4b)
 {
-	struct ext4_sb_info *sbi = EXT4_SB(sb);
-	struct inode *inode = sbi->s_buddy_cache;
 	int blocks_per_page;
 	int block;
 	int pnum;
 	int poff;
 	struct page *page;
 	int ret;
+	struct ext4_group_info *grp;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+	struct inode *inode = sbi->s_buddy_cache;
 
-	mb_debug("load group %lu\n", group);
+	mb_debug("load group %u\n", group);
 
 	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+	grp = ext4_get_group_info(sb, group);
 
 	e4b->bd_blkbits = sb->s_blocksize_bits;
 	e4b->bd_info = ext4_get_group_info(sb, group);
@@ -905,6 +943,15 @@
 	e4b->bd_group = group;
 	e4b->bd_buddy_page = NULL;
 	e4b->bd_bitmap_page = NULL;
+	e4b->alloc_semp = &grp->alloc_sem;
+
+	/* Take the read lock on the group alloc
+	 * sem. This would make sure a parallel
+	 * ext4_mb_init_group happening on other
+	 * groups mapped by the page is blocked
+	 * till we are done with allocation
+	 */
+	down_read(e4b->alloc_semp);
 
 	/*
 	 * the buddy cache inode stores the block bitmap
@@ -920,6 +967,14 @@
 	page = find_get_page(inode->i_mapping, pnum);
 	if (page == NULL || !PageUptodate(page)) {
 		if (page)
+			/*
+			 * drop the page reference and try
+			 * to get the page with lock. If we
+			 * are not uptodate that implies
+			 * somebody just created the page but
+			 * is yet to initialize the same. So
+			 * wait for it to initialize.
+			 */
 			page_cache_release(page);
 		page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
 		if (page) {
@@ -985,6 +1040,9 @@
 		page_cache_release(e4b->bd_buddy_page);
 	e4b->bd_buddy = NULL;
 	e4b->bd_bitmap = NULL;
+
+	/* Done with the buddy cache */
+	up_read(e4b->alloc_semp);
 	return ret;
 }
 
@@ -994,6 +1052,9 @@
 		page_cache_release(e4b->bd_bitmap_page);
 	if (e4b->bd_buddy_page)
 		page_cache_release(e4b->bd_buddy_page);
+	/* Done with the buddy cache */
+	if (e4b->alloc_semp)
+		up_read(e4b->alloc_semp);
 }
 
 
@@ -1031,7 +1092,10 @@
 			cur += 32;
 			continue;
 		}
-		mb_clear_bit_atomic(lock, cur, bm);
+		if (lock)
+			mb_clear_bit_atomic(lock, cur, bm);
+		else
+			mb_clear_bit(cur, bm);
 		cur++;
 	}
 }
@@ -1049,7 +1113,10 @@
 			cur += 32;
 			continue;
 		}
-		mb_set_bit_atomic(lock, cur, bm);
+		if (lock)
+			mb_set_bit_atomic(lock, cur, bm);
+		else
+			mb_set_bit(cur, bm);
 		cur++;
 	}
 }
@@ -1094,12 +1161,11 @@
 			blocknr += block;
 			blocknr +=
 			    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
-			ext4_unlock_group(sb, e4b->bd_group);
-			ext4_error(sb, __func__, "double-free of inode"
-				   " %lu's block %llu(bit %u in group %lu)\n",
+			ext4_grp_locked_error(sb, e4b->bd_group,
+				   __func__, "double-free of inode"
+				   " %lu's block %llu(bit %u in group %u)",
 				   inode ? inode->i_ino : 0, blocknr, block,
 				   e4b->bd_group);
-			ext4_lock_group(sb, e4b->bd_group);
 		}
 		mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
 		e4b->bd_info->bb_counters[order]++;
@@ -1296,13 +1362,20 @@
 	ac->ac_tail = ret & 0xffff;
 	ac->ac_buddy = ret >> 16;
 
-	/* XXXXXXX: SUCH A HORRIBLE **CK */
-	/*FIXME!! Why ? */
+	/*
+	 * take the page reference. We want the page to be pinned
+	 * so that we don't get a ext4_mb_init_cache_call for this
+	 * group until we update the bitmap. That would mean we
+	 * double allocate blocks. The reference is dropped
+	 * in ext4_mb_release_context
+	 */
 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
 	get_page(ac->ac_bitmap_page);
 	ac->ac_buddy_page = e4b->bd_buddy_page;
 	get_page(ac->ac_buddy_page);
-
+	/* on allocation we use ac to track the held semaphore */
+	ac->alloc_semp =  e4b->alloc_semp;
+	e4b->alloc_semp = NULL;
 	/* store last allocated for subsequent stream allocation */
 	if ((ac->ac_flags & EXT4_MB_HINT_DATA)) {
 		spin_lock(&sbi->s_md_lock);
@@ -1326,6 +1399,8 @@
 	struct ext4_free_extent ex;
 	int max;
 
+	if (ac->ac_status == AC_STATUS_FOUND)
+		return;
 	/*
 	 * We don't want to scan for a whole year
 	 */
@@ -1575,8 +1650,9 @@
 			 * free blocks even though group info says we
 			 * we have free blocks
 			 */
-			ext4_error(sb, __func__, "%d free blocks as per "
-					"group info. But bitmap says 0\n",
+			ext4_grp_locked_error(sb, e4b->bd_group,
+					__func__, "%d free blocks as per "
+					"group info. But bitmap says 0",
 					free);
 			break;
 		}
@@ -1584,8 +1660,9 @@
 		mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
 		BUG_ON(ex.fe_len <= 0);
 		if (free < ex.fe_len) {
-			ext4_error(sb, __func__, "%d free blocks as per "
-					"group info. But got %d blocks\n",
+			ext4_grp_locked_error(sb, e4b->bd_group,
+					__func__, "%d free blocks as per "
+					"group info. But got %d blocks",
 					free, ex.fe_len);
 			/*
 			 * The number of free blocks differs. This mostly
@@ -1692,6 +1769,173 @@
 	return 0;
 }
 
+/*
+ * lock the group_info alloc_sem of all the groups
+ * belonging to the same buddy cache page. This
+ * make sure other parallel operation on the buddy
+ * cache doesn't happen  whild holding the buddy cache
+ * lock
+ */
+int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group)
+{
+	int i;
+	int block, pnum;
+	int blocks_per_page;
+	int groups_per_page;
+	ext4_group_t first_group;
+	struct ext4_group_info *grp;
+
+	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+	/*
+	 * the buddy cache inode stores the block bitmap
+	 * and buddy information in consecutive blocks.
+	 * So for each group we need two blocks.
+	 */
+	block = group * 2;
+	pnum = block / blocks_per_page;
+	first_group = pnum * blocks_per_page / 2;
+
+	groups_per_page = blocks_per_page >> 1;
+	if (groups_per_page == 0)
+		groups_per_page = 1;
+	/* read all groups the page covers into the cache */
+	for (i = 0; i < groups_per_page; i++) {
+
+		if ((first_group + i) >= EXT4_SB(sb)->s_groups_count)
+			break;
+		grp = ext4_get_group_info(sb, first_group + i);
+		/* take all groups write allocation
+		 * semaphore. This make sure there is
+		 * no block allocation going on in any
+		 * of that groups
+		 */
+		down_write_nested(&grp->alloc_sem, i);
+	}
+	return i;
+}
+
+void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
+					ext4_group_t group, int locked_group)
+{
+	int i;
+	int block, pnum;
+	int blocks_per_page;
+	ext4_group_t first_group;
+	struct ext4_group_info *grp;
+
+	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+	/*
+	 * the buddy cache inode stores the block bitmap
+	 * and buddy information in consecutive blocks.
+	 * So for each group we need two blocks.
+	 */
+	block = group * 2;
+	pnum = block / blocks_per_page;
+	first_group = pnum * blocks_per_page / 2;
+	/* release locks on all the groups */
+	for (i = 0; i < locked_group; i++) {
+
+		grp = ext4_get_group_info(sb, first_group + i);
+		/* take all groups write allocation
+		 * semaphore. This make sure there is
+		 * no block allocation going on in any
+		 * of that groups
+		 */
+		up_write(&grp->alloc_sem);
+	}
+
+}
+
+static int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
+{
+
+	int ret;
+	void *bitmap;
+	int blocks_per_page;
+	int block, pnum, poff;
+	int num_grp_locked = 0;
+	struct ext4_group_info *this_grp;
+	struct ext4_sb_info *sbi = EXT4_SB(sb);
+	struct inode *inode = sbi->s_buddy_cache;
+	struct page *page = NULL, *bitmap_page = NULL;
+
+	mb_debug("init group %lu\n", group);
+	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+	this_grp = ext4_get_group_info(sb, group);
+	/*
+	 * This ensures we don't add group
+	 * to this buddy cache via resize
+	 */
+	num_grp_locked =  ext4_mb_get_buddy_cache_lock(sb, group);
+	if (!EXT4_MB_GRP_NEED_INIT(this_grp)) {
+		/*
+		 * somebody initialized the group
+		 * return without doing anything
+		 */
+		ret = 0;
+		goto err;
+	}
+	/*
+	 * the buddy cache inode stores the block bitmap
+	 * and buddy information in consecutive blocks.
+	 * So for each group we need two blocks.
+	 */
+	block = group * 2;
+	pnum = block / blocks_per_page;
+	poff = block % blocks_per_page;
+	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
+	if (page) {
+		BUG_ON(page->mapping != inode->i_mapping);
+		ret = ext4_mb_init_cache(page, NULL);
+		if (ret) {
+			unlock_page(page);
+			goto err;
+		}
+		unlock_page(page);
+	}
+	if (page == NULL || !PageUptodate(page)) {
+		ret = -EIO;
+		goto err;
+	}
+	mark_page_accessed(page);
+	bitmap_page = page;
+	bitmap = page_address(page) + (poff * sb->s_blocksize);
+
+	/* init buddy cache */
+	block++;
+	pnum = block / blocks_per_page;
+	poff = block % blocks_per_page;
+	page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
+	if (page == bitmap_page) {
+		/*
+		 * If both the bitmap and buddy are in
+		 * the same page we don't need to force
+		 * init the buddy
+		 */
+		unlock_page(page);
+	} else if (page) {
+		BUG_ON(page->mapping != inode->i_mapping);
+		ret = ext4_mb_init_cache(page, bitmap);
+		if (ret) {
+			unlock_page(page);
+			goto err;
+		}
+		unlock_page(page);
+	}
+	if (page == NULL || !PageUptodate(page)) {
+		ret = -EIO;
+		goto err;
+	}
+	mark_page_accessed(page);
+err:
+	ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked);
+	if (bitmap_page)
+		page_cache_release(bitmap_page);
+	if (page)
+		page_cache_release(page);
+	return ret;
+}
+
 static noinline_for_stack int
 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
 {
@@ -1775,7 +2019,7 @@
 				group = 0;
 
 			/* quick check to skip empty groups */
-			grp = ext4_get_group_info(ac->ac_sb, group);
+			grp = ext4_get_group_info(sb, group);
 			if (grp->bb_free == 0)
 				continue;
 
@@ -1788,10 +2032,9 @@
 				 * we need full data about the group
 				 * to make a good selection
 				 */
-				err = ext4_mb_load_buddy(sb, group, &e4b);
+				err = ext4_mb_init_group(sb, group);
 				if (err)
 					goto out;
-				ext4_mb_release_desc(&e4b);
 			}
 
 			/*
@@ -1932,13 +2175,13 @@
 	if (hs->op == EXT4_MB_HISTORY_ALLOC) {
 		fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
 			"%-5u %-5s %-5u %-6u\n";
-		sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group,
+		sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
 			hs->result.fe_start, hs->result.fe_len,
 			hs->result.fe_logical);
-		sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group,
+		sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
 			hs->orig.fe_start, hs->orig.fe_len,
 			hs->orig.fe_logical);
-		sprintf(buf3, "%lu/%d/%u@%u", hs->goal.fe_group,
+		sprintf(buf3, "%u/%d/%u@%u", hs->goal.fe_group,
 			hs->goal.fe_start, hs->goal.fe_len,
 			hs->goal.fe_logical);
 		seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
@@ -1947,20 +2190,20 @@
 				hs->buddy ? 1 << hs->buddy : 0);
 	} else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
 		fmt = "%-5u %-8u %-23s %-23s %-23s\n";
-		sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group,
+		sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
 			hs->result.fe_start, hs->result.fe_len,
 			hs->result.fe_logical);
-		sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group,
+		sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
 			hs->orig.fe_start, hs->orig.fe_len,
 			hs->orig.fe_logical);
 		seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
 	} else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
-		sprintf(buf2, "%lu/%d/%u", hs->result.fe_group,
+		sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
 			hs->result.fe_start, hs->result.fe_len);
 		seq_printf(seq, "%-5u %-8u %-23s discard\n",
 				hs->pid, hs->ino, buf2);
 	} else if (hs->op == EXT4_MB_HISTORY_FREE) {
-		sprintf(buf2, "%lu/%d/%u", hs->result.fe_group,
+		sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
 			hs->result.fe_start, hs->result.fe_len);
 		seq_printf(seq, "%-5u %-8u %-23s free\n",
 				hs->pid, hs->ino, buf2);
@@ -2073,7 +2316,7 @@
 		return NULL;
 
 	group = *pos + 1;
-	return (void *) group;
+	return (void *) ((unsigned long) group);
 }
 
 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -2086,13 +2329,13 @@
 	if (*pos < 0 || *pos >= sbi->s_groups_count)
 		return NULL;
 	group = *pos + 1;
-	return (void *) group;;
+	return (void *) ((unsigned long) group);
 }
 
 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
 {
 	struct super_block *sb = seq->private;
-	long group = (long) v;
+	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
 	int i;
 	int err;
 	struct ext4_buddy e4b;
@@ -2114,7 +2357,7 @@
 		sizeof(struct ext4_group_info);
 	err = ext4_mb_load_buddy(sb, group, &e4b);
 	if (err) {
-		seq_printf(seq, "#%-5lu: I/O error\n", group);
+		seq_printf(seq, "#%-5u: I/O error\n", group);
 		return 0;
 	}
 	ext4_lock_group(sb, group);
@@ -2122,7 +2365,7 @@
 	ext4_unlock_group(sb, group);
 	ext4_mb_release_desc(&e4b);
 
-	seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
+	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
 			sg.info.bb_fragments, sg.info.bb_first_free);
 	for (i = 0; i <= 13; i++)
 		seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
@@ -2296,10 +2539,11 @@
 			ext4_free_blocks_after_init(sb, group, desc);
 	} else {
 		meta_group_info[i]->bb_free =
-			le16_to_cpu(desc->bg_free_blocks_count);
+			ext4_free_blks_count(sb, desc);
 	}
 
 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
+	init_rwsem(&meta_group_info[i]->alloc_sem);
 	meta_group_info[i]->bb_free_root.rb_node = NULL;;
 
 #ifdef DOUBLE_CHECK
@@ -2327,54 +2571,6 @@
 } /* ext4_mb_add_groupinfo */
 
 /*
- * Add a group to the existing groups.
- * This function is used for online resize
- */
-int ext4_mb_add_more_groupinfo(struct super_block *sb, ext4_group_t group,
-			       struct ext4_group_desc *desc)
-{
-	struct ext4_sb_info *sbi = EXT4_SB(sb);
-	struct inode *inode = sbi->s_buddy_cache;
-	int blocks_per_page;
-	int block;
-	int pnum;
-	struct page *page;
-	int err;
-
-	/* Add group based on group descriptor*/
-	err = ext4_mb_add_groupinfo(sb, group, desc);
-	if (err)
-		return err;
-
-	/*
-	 * Cache pages containing dynamic mb_alloc datas (buddy and bitmap
-	 * datas) are set not up to date so that they will be re-initilaized
-	 * during the next call to ext4_mb_load_buddy
-	 */
-
-	/* Set buddy page as not up to date */
-	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-	block = group * 2;
-	pnum = block / blocks_per_page;
-	page = find_get_page(inode->i_mapping, pnum);
-	if (page != NULL) {
-		ClearPageUptodate(page);
-		page_cache_release(page);
-	}
-
-	/* Set bitmap page as not up to date */
-	block++;
-	pnum = block / blocks_per_page;
-	page = find_get_page(inode->i_mapping, pnum);
-	if (page != NULL) {
-		ClearPageUptodate(page);
-		page_cache_release(page);
-	}
-
-	return 0;
-}
-
-/*
  * Update an existing group.
  * This function is used for online resize
  */
@@ -2457,7 +2653,7 @@
 		desc = ext4_get_group_desc(sb, i, NULL);
 		if (desc == NULL) {
 			printk(KERN_ERR
-				"EXT4-fs: can't read descriptor %lu\n", i);
+				"EXT4-fs: can't read descriptor %u\n", i);
 			goto err_freebuddy;
 		}
 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
@@ -2493,6 +2689,8 @@
 	if (sbi->s_mb_offsets == NULL) {
 		return -ENOMEM;
 	}
+
+	i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int);
 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
 	if (sbi->s_mb_maxs == NULL) {
 		kfree(sbi->s_mb_maxs);
@@ -2551,7 +2749,8 @@
 	ext4_mb_init_per_dev_proc(sb);
 	ext4_mb_history_init(sb);
 
-	sbi->s_journal->j_commit_callback = release_blocks_on_commit;
+	if (sbi->s_journal)
+		sbi->s_journal->j_commit_callback = release_blocks_on_commit;
 
 	printk(KERN_INFO "EXT4-fs: mballoc enabled\n");
 	return 0;
@@ -2652,7 +2851,7 @@
 	list_for_each_safe(l, ltmp, &txn->t_private_list) {
 		entry = list_entry(l, struct ext4_free_data, list);
 
-		mb_debug("gonna free %u blocks in group %lu (0x%p):",
+		mb_debug("gonna free %u blocks in group %u (0x%p):",
 			 entry->count, entry->group, entry);
 
 		err = ext4_mb_load_buddy(sb, entry->group, &e4b);
@@ -2679,8 +2878,9 @@
 		discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb)
 			+ entry->start_blk
 			+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
-		trace_mark(ext4_discard_blocks, "dev %s blk %llu count %u", sb->s_id,
-			   (unsigned long long) discard_block, entry->count);
+		trace_mark(ext4_discard_blocks, "dev %s blk %llu count %u",
+			   sb->s_id, (unsigned long long) discard_block,
+			   entry->count);
 		sb_issue_discard(sb, discard_block, entry->count);
 
 		kmem_cache_free(ext4_free_ext_cachep, entry);
@@ -2791,7 +2991,7 @@
  */
 static noinline_for_stack int
 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
-				handle_t *handle, unsigned long reserv_blks)
+				handle_t *handle, unsigned int reserv_blks)
 {
 	struct buffer_head *bitmap_bh = NULL;
 	struct ext4_super_block *es;
@@ -2824,7 +3024,7 @@
 	if (!gdp)
 		goto out_err;
 
-	ext4_debug("using block group %lu(%d)\n", ac->ac_b_ex.fe_group,
+	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
 			gdp->bg_free_blocks_count);
 
 	err = ext4_journal_get_write_access(handle, gdp_bh);
@@ -2843,8 +3043,8 @@
 	    in_range(block + len - 1, ext4_inode_table(sb, gdp),
 		     EXT4_SB(sb)->s_itb_per_group)) {
 		ext4_error(sb, __func__,
-			   "Allocating block in system zone - block = %llu",
-			   block);
+			   "Allocating block %llu in system zone of %d group\n",
+			   block, ac->ac_b_ex.fe_group);
 		/* File system mounted not to panic on error
 		 * Fix the bitmap and repeat the block allocation
 		 * We leak some of the blocks here.
@@ -2852,7 +3052,7 @@
 		mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
 				bitmap_bh->b_data, ac->ac_b_ex.fe_start,
 				ac->ac_b_ex.fe_len);
-		err = ext4_journal_dirty_metadata(handle, bitmap_bh);
+		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
 		if (!err)
 			err = -EAGAIN;
 		goto out_err;
@@ -2866,18 +3066,17 @@
 		}
 	}
 #endif
-	mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
-				ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
-
 	spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
+	mb_set_bits(NULL, bitmap_bh->b_data,
+				ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
-		gdp->bg_free_blocks_count =
-			cpu_to_le16(ext4_free_blocks_after_init(sb,
-						ac->ac_b_ex.fe_group,
-						gdp));
+		ext4_free_blks_set(sb, gdp,
+					ext4_free_blocks_after_init(sb,
+					ac->ac_b_ex.fe_group, gdp));
 	}
-	le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
+	len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
+	ext4_free_blks_set(sb, gdp, len);
 	gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
 	spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
 	percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
@@ -2899,10 +3098,10 @@
 		spin_unlock(sb_bgl_lock(sbi, flex_group));
 	}
 
-	err = ext4_journal_dirty_metadata(handle, bitmap_bh);
+	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
 	if (err)
 		goto out_err;
-	err = ext4_journal_dirty_metadata(handle, gdp_bh);
+	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
 
 out_err:
 	sb->s_dirt = 1;
@@ -3031,7 +3230,7 @@
 	/* check we don't cross already preallocated blocks */
 	rcu_read_lock();
 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
-		unsigned long pa_end;
+		ext4_lblk_t pa_end;
 
 		if (pa->pa_deleted)
 			continue;
@@ -3075,7 +3274,7 @@
 	/* XXX: extra loop to check we really don't overlap preallocations */
 	rcu_read_lock();
 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
-		unsigned long pa_end;
+		ext4_lblk_t pa_end;
 		spin_lock(&pa->pa_lock);
 		if (pa->pa_deleted == 0) {
 			pa_end = pa->pa_lstart + pa->pa_len;
@@ -3307,6 +3506,32 @@
 }
 
 /*
+ * the function goes through all block freed in the group
+ * but not yet committed and marks them used in in-core bitmap.
+ * buddy must be generated from this bitmap
+ * Need to be called with ext4 group lock (ext4_lock_group)
+ */
+static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+						ext4_group_t group)
+{
+	struct rb_node *n;
+	struct ext4_group_info *grp;
+	struct ext4_free_data *entry;
+
+	grp = ext4_get_group_info(sb, group);
+	n = rb_first(&(grp->bb_free_root));
+
+	while (n) {
+		entry = rb_entry(n, struct ext4_free_data, node);
+		mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
+				bitmap, entry->start_blk,
+				entry->count);
+		n = rb_next(n);
+	}
+	return;
+}
+
+/*
  * the function goes through all preallocation in this group and marks them
  * used in in-core bitmap. buddy must be generated from this bitmap
  * Need to be called with ext4 group lock (ext4_lock_group)
@@ -3346,7 +3571,7 @@
 		preallocated += len;
 		count++;
 	}
-	mb_debug("prellocated %u for group %lu\n", preallocated, group);
+	mb_debug("prellocated %u for group %u\n", preallocated, group);
 }
 
 static void ext4_mb_pa_callback(struct rcu_head *head)
@@ -3363,7 +3588,7 @@
 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
 			struct super_block *sb, struct ext4_prealloc_space *pa)
 {
-	unsigned long grp;
+	ext4_group_t grp;
 
 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
 		return;
@@ -3473,6 +3698,10 @@
 
 	mb_debug("new inode pa %p: %llu/%u for %u\n", pa,
 			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+	trace_mark(ext4_mb_new_inode_pa,
+		   "dev %s ino %lu pstart %llu len %u lstart %u",
+		   sb->s_id, ac->ac_inode->i_ino,
+		   pa->pa_pstart, pa->pa_len, pa->pa_lstart);
 
 	ext4_mb_use_inode_pa(ac, pa);
 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
@@ -3530,7 +3759,9 @@
 	pa->pa_linear = 1;
 
 	mb_debug("new group pa %p: %llu/%u for %u\n", pa,
-			pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+		 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+	trace_mark(ext4_mb_new_group_pa, "dev %s pstart %llu len %u lstart %u",
+		   sb->s_id, pa->pa_pstart, pa->pa_len, pa->pa_lstart);
 
 	ext4_mb_use_group_pa(ac, pa);
 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
@@ -3579,16 +3810,18 @@
 {
 	struct super_block *sb = e4b->bd_sb;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
-	unsigned long end;
-	unsigned long next;
+	unsigned int end;
+	unsigned int next;
 	ext4_group_t group;
 	ext4_grpblk_t bit;
+	unsigned long long grp_blk_start;
 	sector_t start;
 	int err = 0;
 	int free = 0;
 
 	BUG_ON(pa->pa_deleted == 0);
 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
+	grp_blk_start = pa->pa_pstart - bit;
 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
 	end = bit + pa->pa_len;
 
@@ -3618,6 +3851,10 @@
 			ext4_mb_store_history(ac);
 		}
 
+		trace_mark(ext4_mb_release_inode_pa,
+			   "dev %s ino %lu block %llu count %u",
+			   sb->s_id, pa->pa_inode->i_ino, grp_blk_start + bit,
+			   next - bit);
 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
 		bit = next + 1;
 	}
@@ -3626,8 +3863,9 @@
 			pa, (unsigned long) pa->pa_lstart,
 			(unsigned long) pa->pa_pstart,
 			(unsigned long) pa->pa_len);
-		ext4_error(sb, __func__, "free %u, pa_free %u\n",
-						free, pa->pa_free);
+		ext4_grp_locked_error(sb, group,
+					__func__, "free %u, pa_free %u",
+					free, pa->pa_free);
 		/*
 		 * pa is already deleted so we use the value obtained
 		 * from the bitmap and continue.
@@ -3650,6 +3888,8 @@
 	if (ac)
 		ac->ac_op = EXT4_MB_HISTORY_DISCARD;
 
+	trace_mark(ext4_mb_release_group_pa, "dev %s pstart %llu len %d",
+		   sb->s_id, pa->pa_pstart, pa->pa_len);
 	BUG_ON(pa->pa_deleted == 0);
 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
@@ -3692,7 +3932,7 @@
 	int busy = 0;
 	int free = 0;
 
-	mb_debug("discard preallocation for group %lu\n", group);
+	mb_debug("discard preallocation for group %u\n", group);
 
 	if (list_empty(&grp->bb_prealloc_list))
 		return 0;
@@ -3700,14 +3940,14 @@
 	bitmap_bh = ext4_read_block_bitmap(sb, group);
 	if (bitmap_bh == NULL) {
 		ext4_error(sb, __func__, "Error in reading block "
-				"bitmap for %lu\n", group);
+				"bitmap for %u", group);
 		return 0;
 	}
 
 	err = ext4_mb_load_buddy(sb, group, &e4b);
 	if (err) {
 		ext4_error(sb, __func__, "Error in loading buddy "
-				"information for %lu\n", group);
+				"information for %u", group);
 		put_bh(bitmap_bh);
 		return 0;
 	}
@@ -3815,6 +4055,8 @@
 	}
 
 	mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
+	trace_mark(ext4_discard_preallocations, "dev %s ino %lu", sb->s_id,
+		   inode->i_ino);
 
 	INIT_LIST_HEAD(&list);
 
@@ -3874,14 +4116,14 @@
 		err = ext4_mb_load_buddy(sb, group, &e4b);
 		if (err) {
 			ext4_error(sb, __func__, "Error in loading buddy "
-					"information for %lu\n", group);
+					"information for %u", group);
 			continue;
 		}
 
 		bitmap_bh = ext4_read_block_bitmap(sb, group);
 		if (bitmap_bh == NULL) {
 			ext4_error(sb, __func__, "Error in reading block "
-					"bitmap for %lu\n", group);
+					"bitmap for %u", group);
 			ext4_mb_release_desc(&e4b);
 			continue;
 		}
@@ -4024,8 +4266,8 @@
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct ext4_super_block *es = sbi->s_es;
 	ext4_group_t group;
-	unsigned long len;
-	unsigned long goal;
+	unsigned int len;
+	ext4_fsblk_t goal;
 	ext4_grpblk_t block;
 
 	/* we can't allocate > group size */
@@ -4068,6 +4310,7 @@
 	ac->ac_pa = NULL;
 	ac->ac_bitmap_page = NULL;
 	ac->ac_buddy_page = NULL;
+	ac->alloc_semp = NULL;
 	ac->ac_lg = NULL;
 
 	/* we have to define context: we'll we work with a file or
@@ -4146,7 +4389,7 @@
 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
 		if (ext4_mb_load_buddy(sb, group, &e4b)) {
 			ext4_error(sb, __func__, "Error in loading buddy "
-					"information for %lu\n", group);
+					"information for %u", group);
 			continue;
 		}
 		ext4_lock_group(sb, group);
@@ -4248,6 +4491,8 @@
 		}
 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
 	}
+	if (ac->alloc_semp)
+		up_read(ac->alloc_semp);
 	if (ac->ac_bitmap_page)
 		page_cache_release(ac->ac_bitmap_page);
 	if (ac->ac_buddy_page)
@@ -4264,6 +4509,8 @@
 	int ret;
 	int freed = 0;
 
+	trace_mark(ext4_mb_discard_preallocations, "dev %s needed %d",
+		   sb->s_id, needed);
 	for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) {
 		ret = ext4_mb_discard_group_preallocations(sb, i, needed);
 		freed += ret;
@@ -4286,12 +4533,24 @@
 	struct ext4_sb_info *sbi;
 	struct super_block *sb;
 	ext4_fsblk_t block = 0;
-	unsigned long inquota;
-	unsigned long reserv_blks = 0;
+	unsigned int inquota;
+	unsigned int reserv_blks = 0;
 
 	sb = ar->inode->i_sb;
 	sbi = EXT4_SB(sb);
 
+	trace_mark(ext4_request_blocks, "dev %s flags %u len %u ino %lu "
+		   "lblk %llu goal %llu lleft %llu lright %llu "
+		   "pleft %llu pright %llu ",
+		   sb->s_id, ar->flags, ar->len,
+		   ar->inode ? ar->inode->i_ino : 0,
+		   (unsigned long long) ar->logical,
+		   (unsigned long long) ar->goal,
+		   (unsigned long long) ar->lleft,
+		   (unsigned long long) ar->lright,
+		   (unsigned long long) ar->pleft,
+		   (unsigned long long) ar->pright);
+
 	if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
 		/*
 		 * With delalloc we already reserved the blocks
@@ -4313,7 +4572,7 @@
 	}
 	if (ar->len == 0) {
 		*errp = -EDQUOT;
-		return 0;
+		goto out3;
 	}
 	inquota = ar->len;
 
@@ -4348,10 +4607,14 @@
 				ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
 			ext4_mb_new_preallocation(ac);
 	}
-
 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
 		if (*errp ==  -EAGAIN) {
+			/*
+			 * drop the reference that we took
+			 * in ext4_mb_use_best_found
+			 */
+			ext4_mb_release_context(ac);
 			ac->ac_b_ex.fe_group = 0;
 			ac->ac_b_ex.fe_start = 0;
 			ac->ac_b_ex.fe_len = 0;
@@ -4382,6 +4645,26 @@
 out1:
 	if (ar->len < inquota)
 		DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
+out3:
+	if (!ar->len) {
+		if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+			/* release all the reserved blocks if non delalloc */
+			percpu_counter_sub(&sbi->s_dirtyblocks_counter,
+						reserv_blks);
+	}
+
+	trace_mark(ext4_allocate_blocks,
+		   "dev %s block %llu flags %u len %u ino %lu "
+		   "logical %llu goal %llu lleft %llu lright %llu "
+		   "pleft %llu pright %llu ",
+		   sb->s_id, (unsigned long long) block,
+		   ar->flags, ar->len, ar->inode ? ar->inode->i_ino : 0,
+		   (unsigned long long) ar->logical,
+		   (unsigned long long) ar->goal,
+		   (unsigned long long) ar->lleft,
+		   (unsigned long long) ar->lright,
+		   (unsigned long long) ar->pleft,
+		   (unsigned long long) ar->pright);
 
 	return block;
 }
@@ -4403,27 +4686,23 @@
 
 static noinline_for_stack int
 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
-			  ext4_group_t group, ext4_grpblk_t block, int count)
+		      struct ext4_free_data *new_entry)
 {
+	ext4_grpblk_t block;
+	struct ext4_free_data *entry;
 	struct ext4_group_info *db = e4b->bd_info;
 	struct super_block *sb = e4b->bd_sb;
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
-	struct ext4_free_data *entry, *new_entry;
 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
 	struct rb_node *parent = NULL, *new_node;
 
-
+	BUG_ON(!ext4_handle_valid(handle));
 	BUG_ON(e4b->bd_bitmap_page == NULL);
 	BUG_ON(e4b->bd_buddy_page == NULL);
 
-	new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
-	new_entry->start_blk = block;
-	new_entry->group  = group;
-	new_entry->count = count;
-	new_entry->t_tid = handle->h_transaction->t_tid;
 	new_node = &new_entry->node;
+	block = new_entry->start_blk;
 
-	ext4_lock_group(sb, group);
 	if (!*n) {
 		/* first free block exent. We need to
 		   protect buddy cache from being freed,
@@ -4441,10 +4720,9 @@
 		else if (block >= (entry->start_blk + entry->count))
 			n = &(*n)->rb_right;
 		else {
-			ext4_unlock_group(sb, group);
-			ext4_error(sb, __func__,
-			    "Double free of blocks %d (%d %d)\n",
-			    block, entry->start_blk, entry->count);
+			ext4_grp_locked_error(sb, e4b->bd_group, __func__,
+					"Double free of blocks %d (%d %d)",
+					block, entry->start_blk, entry->count);
 			return 0;
 		}
 	}
@@ -4483,7 +4761,6 @@
 	spin_lock(&sbi->s_md_lock);
 	list_add(&new_entry->list, &handle->h_transaction->t_private_list);
 	spin_unlock(&sbi->s_md_lock);
-	ext4_unlock_group(sb, group);
 	return 0;
 }
 
@@ -4499,7 +4776,7 @@
 	struct ext4_allocation_context *ac = NULL;
 	struct ext4_group_desc *gdp;
 	struct ext4_super_block *es;
-	unsigned long overflow;
+	unsigned int overflow;
 	ext4_grpblk_t bit;
 	struct buffer_head *gd_bh;
 	ext4_group_t block_group;
@@ -4522,6 +4799,10 @@
 	}
 
 	ext4_debug("freeing block %lu\n", block);
+	trace_mark(ext4_free_blocks,
+		   "dev %s block %llu count %lu metadata %d ino %lu",
+		   sb->s_id, (unsigned long long) block, count, metadata,
+		   inode ? inode->i_ino : 0);
 
 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
 	if (ac) {
@@ -4581,11 +4862,6 @@
 	err = ext4_journal_get_write_access(handle, gd_bh);
 	if (err)
 		goto error_return;
-
-	err = ext4_mb_load_buddy(sb, block_group, &e4b);
-	if (err)
-		goto error_return;
-
 #ifdef AGGRESSIVE_CHECK
 	{
 		int i;
@@ -4593,13 +4869,6 @@
 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
 	}
 #endif
-	mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
-			bit, count);
-
-	/* We dirtied the bitmap block */
-	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-	err = ext4_journal_dirty_metadata(handle, bitmap_bh);
-
 	if (ac) {
 		ac->ac_b_ex.fe_group = block_group;
 		ac->ac_b_ex.fe_start = bit;
@@ -4607,19 +4876,41 @@
 		ext4_mb_store_history(ac);
 	}
 
-	if (metadata) {
-		/* blocks being freed are metadata. these blocks shouldn't
-		 * be used until this transaction is committed */
-		ext4_mb_free_metadata(handle, &e4b, block_group, bit, count);
+	err = ext4_mb_load_buddy(sb, block_group, &e4b);
+	if (err)
+		goto error_return;
+	if (metadata && ext4_handle_valid(handle)) {
+		struct ext4_free_data *new_entry;
+		/*
+		 * blocks being freed are metadata. these blocks shouldn't
+		 * be used until this transaction is committed
+		 */
+		new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+		new_entry->start_blk = bit;
+		new_entry->group  = block_group;
+		new_entry->count = count;
+		new_entry->t_tid = handle->h_transaction->t_tid;
+		ext4_lock_group(sb, block_group);
+		mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
+				bit, count);
+		ext4_mb_free_metadata(handle, &e4b, new_entry);
+		ext4_unlock_group(sb, block_group);
 	} else {
 		ext4_lock_group(sb, block_group);
+		/* need to update group_info->bb_free and bitmap
+		 * with group lock held. generate_buddy look at
+		 * them with group lock_held
+		 */
+		mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
+				bit, count);
 		mb_free_blocks(inode, &e4b, bit, count);
 		ext4_mb_return_to_preallocation(inode, &e4b, block, count);
 		ext4_unlock_group(sb, block_group);
 	}
 
 	spin_lock(sb_bgl_lock(sbi, block_group));
-	le16_add_cpu(&gdp->bg_free_blocks_count, count);
+	ret = ext4_free_blks_count(sb, gdp) + count;
+	ext4_free_blks_set(sb, gdp, ret);
 	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
 	spin_unlock(sb_bgl_lock(sbi, block_group));
 	percpu_counter_add(&sbi->s_freeblocks_counter, count);
@@ -4635,9 +4926,13 @@
 
 	*freed += count;
 
+	/* We dirtied the bitmap block */
+	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
+	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+
 	/* And the group descriptor block */
 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-	ret = ext4_journal_dirty_metadata(handle, gd_bh);
+	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
 	if (!err)
 		err = ret;
 
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index b5dff1f..10a2921 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -20,6 +20,7 @@
 #include <linux/version.h>
 #include <linux/blkdev.h>
 #include <linux/marker.h>
+#include <linux/mutex.h>
 #include "ext4_jbd2.h"
 #include "ext4.h"
 #include "group.h"
@@ -98,9 +99,6 @@
  */
 #define MB_DEFAULT_GROUP_PREALLOC	512
 
-static struct kmem_cache *ext4_pspace_cachep;
-static struct kmem_cache *ext4_ac_cachep;
-static struct kmem_cache *ext4_free_ext_cachep;
 
 struct ext4_free_data {
 	/* this links the free block information from group_info */
@@ -120,26 +118,6 @@
 	tid_t	t_tid;
 };
 
-struct ext4_group_info {
-	unsigned long	bb_state;
-	struct rb_root  bb_free_root;
-	unsigned short	bb_first_free;
-	unsigned short	bb_free;
-	unsigned short	bb_fragments;
-	struct		list_head bb_prealloc_list;
-#ifdef DOUBLE_CHECK
-	void		*bb_bitmap;
-#endif
-	unsigned short	bb_counters[];
-};
-
-#define EXT4_GROUP_INFO_NEED_INIT_BIT	0
-#define EXT4_GROUP_INFO_LOCKED_BIT	1
-
-#define EXT4_MB_GRP_NEED_INIT(grp)	\
-	(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
-
-
 struct ext4_prealloc_space {
 	struct list_head	pa_inode_list;
 	struct list_head	pa_group_list;
@@ -217,6 +195,11 @@
 	__u8 ac_op;		/* operation, for history only */
 	struct page *ac_bitmap_page;
 	struct page *ac_buddy_page;
+	/*
+	 * pointer to the held semaphore upon successful
+	 * block allocation
+	 */
+	struct rw_semaphore *alloc_semp;
 	struct ext4_prealloc_space *ac_pa;
 	struct ext4_locality_group *ac_lg;
 };
@@ -250,6 +233,7 @@
 	struct super_block *bd_sb;
 	__u16 bd_blkbits;
 	ext4_group_t bd_group;
+	struct rw_semaphore *alloc_semp;
 };
 #define EXT4_MB_BITMAP(e4b)	((e4b)->bd_bitmap)
 #define EXT4_MB_BUDDY(e4b)	((e4b)->bd_buddy)
@@ -259,51 +243,12 @@
 {
 	return;
 }
-#else
-static void ext4_mb_store_history(struct ext4_allocation_context *ac);
 #endif
 
 #define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
 
 struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
-
-static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
-					ext4_group_t group);
-static void ext4_mb_return_to_preallocation(struct inode *inode,
-					struct ext4_buddy *e4b, sector_t block,
-					int count);
-static void ext4_mb_put_pa(struct ext4_allocation_context *,
-			struct super_block *, struct ext4_prealloc_space *pa);
-static int ext4_mb_init_per_dev_proc(struct super_block *sb);
-static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
-static void release_blocks_on_commit(journal_t *journal, transaction_t *txn);
-
-
-static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
-{
-	struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
-
-	bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
-}
-
-static inline void ext4_unlock_group(struct super_block *sb,
-					ext4_group_t group)
-{
-	struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
-
-	bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
-}
-
-static inline int ext4_is_group_locked(struct super_block *sb,
-					ext4_group_t group)
-{
-	struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
-
-	return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
-						&(grinfo->bb_state));
-}
-
-static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
+static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
 					struct ext4_free_extent *fex)
 {
 	ext4_fsblk_t block;
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index f2a9cf4..734abca 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -59,7 +59,8 @@
 	/*
 	 * Make sure the credit we accumalated is not really high
 	 */
-	if (needed && handle->h_buffer_credits >= EXT4_RESERVE_TRANS_BLOCKS) {
+	if (needed && ext4_handle_has_enough_credits(handle,
+						EXT4_RESERVE_TRANS_BLOCKS)) {
 		retval = ext4_journal_restart(handle, needed);
 		if (retval)
 			goto err_out;
@@ -229,7 +230,7 @@
 {
 	int retval = 0, needed;
 
-	if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
+	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
 		return 0;
 	/*
 	 * We are freeing a blocks. During this we touch
@@ -458,13 +459,13 @@
 	struct list_blocks_struct lb;
 	unsigned long max_entries;
 
-	if (!test_opt(inode->i_sb, EXTENTS))
-		/*
-		 * if mounted with noextents we don't allow the migrate
-		 */
-		return -EINVAL;
-
-	if ((EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
+	/*
+	 * If the filesystem does not support extents, or the inode
+	 * already is extent-based, error out.
+	 */
+	if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
+				       EXT4_FEATURE_INCOMPAT_EXTENTS) ||
+	    (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
 		return -EINVAL;
 
 	if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 9fd2a5e..fec0b4c 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -74,10 +74,6 @@
 #define assert(test) J_ASSERT(test)
 #endif
 
-#ifndef swap
-#define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0)
-#endif
-
 #ifdef DX_DEBUG
 #define dxtrace(command) command
 #else
@@ -372,6 +368,8 @@
 		goto fail;
 	}
 	hinfo->hash_version = root->info.hash_version;
+	if (hinfo->hash_version <= DX_HASH_TEA)
+		hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
 	hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
 	if (d_name)
 		ext4fs_dirhash(d_name->name, d_name->len, hinfo);
@@ -641,6 +639,9 @@
 	dir = dir_file->f_path.dentry->d_inode;
 	if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) {
 		hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
+		if (hinfo.hash_version <= DX_HASH_TEA)
+			hinfo.hash_version +=
+				EXT4_SB(dir->i_sb)->s_hash_unsigned;
 		hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
 		count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
 					       start_hash, start_minor_hash);
@@ -806,7 +807,7 @@
 static inline int search_dirblock(struct buffer_head *bh,
 				  struct inode *dir,
 				  const struct qstr *d_name,
-				  unsigned long offset,
+				  unsigned int offset,
 				  struct ext4_dir_entry_2 ** res_dir)
 {
 	struct ext4_dir_entry_2 * de;
@@ -1043,11 +1044,11 @@
 	bh = ext4_find_entry(dir, &dentry->d_name, &de);
 	inode = NULL;
 	if (bh) {
-		unsigned long ino = le32_to_cpu(de->inode);
+		__u32 ino = le32_to_cpu(de->inode);
 		brelse(bh);
 		if (!ext4_valid_inum(dir->i_sb, ino)) {
 			ext4_error(dir->i_sb, "ext4_lookup",
-				   "bad inode number: %lu", ino);
+				   "bad inode number: %u", ino);
 			return ERR_PTR(-EIO);
 		}
 		inode = ext4_iget(dir->i_sb, ino);
@@ -1060,7 +1061,7 @@
 
 struct dentry *ext4_get_parent(struct dentry *child)
 {
-	unsigned long ino;
+	__u32 ino;
 	struct inode *inode;
 	static const struct qstr dotdot = {
 		.name = "..",
@@ -1078,7 +1079,7 @@
 
 	if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
 		ext4_error(child->d_inode->i_sb, "ext4_get_parent",
-			   "bad inode number: %lu", ino);
+			   "bad inode number: %u", ino);
 		return ERR_PTR(-EIO);
 	}
 
@@ -1166,9 +1167,9 @@
 	u32 hash2;
 	struct dx_map_entry *map;
 	char *data1 = (*bh)->b_data, *data2;
-	unsigned split, move, size, i;
+	unsigned split, move, size;
 	struct ext4_dir_entry_2 *de = NULL, *de2;
-	int	err = 0;
+	int	err = 0, i;
 
 	bh2 = ext4_append (handle, dir, &newblock, &err);
 	if (!(bh2)) {
@@ -1228,10 +1229,10 @@
 		de = de2;
 	}
 	dx_insert_block(frame, hash2 + continued, newblock);
-	err = ext4_journal_dirty_metadata(handle, bh2);
+	err = ext4_handle_dirty_metadata(handle, dir, bh2);
 	if (err)
 		goto journal_error;
-	err = ext4_journal_dirty_metadata(handle, frame->bh);
+	err = ext4_handle_dirty_metadata(handle, dir, frame->bh);
 	if (err)
 		goto journal_error;
 	brelse(bh2);
@@ -1266,7 +1267,7 @@
 	struct inode	*dir = dentry->d_parent->d_inode;
 	const char	*name = dentry->d_name.name;
 	int		namelen = dentry->d_name.len;
-	unsigned long	offset = 0;
+	unsigned int	offset = 0;
 	unsigned short	reclen;
 	int		nlen, rlen, err;
 	char		*top;
@@ -1335,8 +1336,8 @@
 	ext4_update_dx_flag(dir);
 	dir->i_version++;
 	ext4_mark_inode_dirty(handle, dir);
-	BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
-	err = ext4_journal_dirty_metadata(handle, bh);
+	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+	err = ext4_handle_dirty_metadata(handle, dir, bh);
 	if (err)
 		ext4_std_error(dir->i_sb, err);
 	brelse(bh);
@@ -1408,6 +1409,8 @@
 
 	/* Initialize as for dx_probe */
 	hinfo.hash_version = root->info.hash_version;
+	if (hinfo.hash_version <= DX_HASH_TEA)
+		hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
 	hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
 	ext4fs_dirhash(name, namelen, &hinfo);
 	frame = frames;
@@ -1437,7 +1440,6 @@
 			  struct inode *inode)
 {
 	struct inode *dir = dentry->d_parent->d_inode;
-	unsigned long offset;
 	struct buffer_head *bh;
 	struct ext4_dir_entry_2 *de;
 	struct super_block *sb;
@@ -1459,7 +1461,7 @@
 		ext4_mark_inode_dirty(handle, dir);
 	}
 	blocks = dir->i_size >> sb->s_blocksize_bits;
-	for (block = 0, offset = 0; block < blocks; block++) {
+	for (block = 0; block < blocks; block++) {
 		bh = ext4_bread(handle, dir, block, 0, &retval);
 		if(!bh)
 			return retval;
@@ -1574,7 +1576,7 @@
 			dxtrace(dx_show_index("node", frames[1].entries));
 			dxtrace(dx_show_index("node",
 			       ((struct dx_node *) bh2->b_data)->entries));
-			err = ext4_journal_dirty_metadata(handle, bh2);
+			err = ext4_handle_dirty_metadata(handle, inode, bh2);
 			if (err)
 				goto journal_error;
 			brelse (bh2);
@@ -1600,7 +1602,7 @@
 			if (err)
 				goto journal_error;
 		}
-		ext4_journal_dirty_metadata(handle, frames[0].bh);
+		ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
 	}
 	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
 	if (!de)
@@ -1646,8 +1648,8 @@
 			else
 				de->inode = 0;
 			dir->i_version++;
-			BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
-			ext4_journal_dirty_metadata(handle, bh);
+			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+			ext4_handle_dirty_metadata(handle, dir, bh);
 			return 0;
 		}
 		i += ext4_rec_len_from_disk(de->rec_len);
@@ -1725,7 +1727,7 @@
 		return PTR_ERR(handle);
 
 	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 
 	inode = ext4_new_inode (handle, dir, mode);
 	err = PTR_ERR(inode);
@@ -1759,7 +1761,7 @@
 		return PTR_ERR(handle);
 
 	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 
 	inode = ext4_new_inode(handle, dir, mode);
 	err = PTR_ERR(inode);
@@ -1795,7 +1797,7 @@
 		return PTR_ERR(handle);
 
 	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 
 	inode = ext4_new_inode(handle, dir, S_IFDIR | mode);
 	err = PTR_ERR(inode);
@@ -1824,8 +1826,8 @@
 	strcpy(de->name, "..");
 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
 	inode->i_nlink = 2;
-	BUFFER_TRACE(dir_block, "call ext4_journal_dirty_metadata");
-	ext4_journal_dirty_metadata(handle, dir_block);
+	BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
+	ext4_handle_dirty_metadata(handle, dir, dir_block);
 	brelse(dir_block);
 	ext4_mark_inode_dirty(handle, inode);
 	err = ext4_add_entry(handle, dentry, inode);
@@ -1854,7 +1856,7 @@
  */
 static int empty_dir(struct inode *inode)
 {
-	unsigned long offset;
+	unsigned int offset;
 	struct buffer_head *bh;
 	struct ext4_dir_entry_2 *de, *de1;
 	struct super_block *sb;
@@ -1899,7 +1901,7 @@
 				if (err)
 					ext4_error(sb, __func__,
 						   "error %d reading directory"
-						   " #%lu offset %lu",
+						   " #%lu offset %u",
 						   err, inode->i_ino, offset);
 				offset += sb->s_blocksize;
 				continue;
@@ -1937,6 +1939,9 @@
 	struct ext4_iloc iloc;
 	int err = 0, rc;
 
+	if (!ext4_handle_valid(handle))
+		return 0;
+
 	lock_super(sb);
 	if (!list_empty(&EXT4_I(inode)->i_orphan))
 		goto out_unlock;
@@ -1965,7 +1970,7 @@
 	/* Insert this inode at the head of the on-disk orphan list... */
 	NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
 	EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
-	err = ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
+	err = ext4_handle_dirty_metadata(handle, inode, EXT4_SB(sb)->s_sbh);
 	rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
 	if (!err)
 		err = rc;
@@ -1999,10 +2004,13 @@
 	struct list_head *prev;
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	struct ext4_sb_info *sbi;
-	unsigned long ino_next;
+	__u32 ino_next;
 	struct ext4_iloc iloc;
 	int err = 0;
 
+	if (!ext4_handle_valid(handle))
+		return 0;
+
 	lock_super(inode->i_sb);
 	if (list_empty(&ei->i_orphan)) {
 		unlock_super(inode->i_sb);
@@ -2021,7 +2029,7 @@
 	 * transaction handle with which to update the orphan list on
 	 * disk, but we still need to remove the inode from the linked
 	 * list in memory. */
-	if (!handle)
+	if (sbi->s_journal && !handle)
 		goto out;
 
 	err = ext4_reserve_inode_write(handle, inode, &iloc);
@@ -2029,19 +2037,19 @@
 		goto out_err;
 
 	if (prev == &sbi->s_orphan) {
-		jbd_debug(4, "superblock will point to %lu\n", ino_next);
+		jbd_debug(4, "superblock will point to %u\n", ino_next);
 		BUFFER_TRACE(sbi->s_sbh, "get_write_access");
 		err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 		if (err)
 			goto out_brelse;
 		sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
-		err = ext4_journal_dirty_metadata(handle, sbi->s_sbh);
+		err = ext4_handle_dirty_metadata(handle, inode, sbi->s_sbh);
 	} else {
 		struct ext4_iloc iloc2;
 		struct inode *i_prev =
 			&list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
 
-		jbd_debug(4, "orphan inode %lu will point to %lu\n",
+		jbd_debug(4, "orphan inode %lu will point to %u\n",
 			  i_prev->i_ino, ino_next);
 		err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
 		if (err)
@@ -2086,7 +2094,7 @@
 		goto end_rmdir;
 
 	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 
 	inode = dentry->d_inode;
 
@@ -2140,7 +2148,7 @@
 		return PTR_ERR(handle);
 
 	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 
 	retval = -ENOENT;
 	bh = ext4_find_entry(dir, &dentry->d_name, &de);
@@ -2197,7 +2205,7 @@
 		return PTR_ERR(handle);
 
 	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 
 	inode = ext4_new_inode(handle, dir, S_IFLNK|S_IRWXUGO);
 	err = PTR_ERR(inode);
@@ -2260,7 +2268,7 @@
 		return PTR_ERR(handle);
 
 	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 
 	inode->i_ctime = ext4_current_time(inode);
 	ext4_inc_count(handle, inode);
@@ -2309,7 +2317,7 @@
 		return PTR_ERR(handle);
 
 	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
-		handle->h_sync = 1;
+		ext4_handle_sync(handle);
 
 	old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de);
 	/*
@@ -2363,8 +2371,8 @@
 		new_dir->i_ctime = new_dir->i_mtime =
 					ext4_current_time(new_dir);
 		ext4_mark_inode_dirty(handle, new_dir);
-		BUFFER_TRACE(new_bh, "call ext4_journal_dirty_metadata");
-		ext4_journal_dirty_metadata(handle, new_bh);
+		BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
+		ext4_handle_dirty_metadata(handle, new_dir, new_bh);
 		brelse(new_bh);
 		new_bh = NULL;
 	}
@@ -2414,8 +2422,8 @@
 		BUFFER_TRACE(dir_bh, "get_write_access");
 		ext4_journal_get_write_access(handle, dir_bh);
 		PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
-		BUFFER_TRACE(dir_bh, "call ext4_journal_dirty_metadata");
-		ext4_journal_dirty_metadata(handle, dir_bh);
+		BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
+		ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
 		ext4_dec_count(handle, old_dir);
 		if (new_inode) {
 			/* checked empty_dir above, can't have another parent,
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index b6ec184..c328be5 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -50,7 +50,7 @@
 	ext4_get_group_no_and_offset(sb, start, NULL, &offset);
 	if (group != sbi->s_groups_count)
 		ext4_warning(sb, __func__,
-			     "Cannot add at group %u (only %lu groups)",
+			     "Cannot add at group %u (only %u groups)",
 			     input->group, sbi->s_groups_count);
 	else if (offset != 0)
 			ext4_warning(sb, __func__, "Last group not full");
@@ -149,7 +149,7 @@
 {
 	int err;
 
-	if (handle->h_buffer_credits >= thresh)
+	if (ext4_handle_has_enough_credits(handle, thresh))
 		return 0;
 
 	err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA);
@@ -232,7 +232,7 @@
 		memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
 		set_buffer_uptodate(gdb);
 		unlock_buffer(gdb);
-		ext4_journal_dirty_metadata(handle, gdb);
+		ext4_handle_dirty_metadata(handle, NULL, gdb);
 		ext4_set_bit(bit, bh->b_data);
 		brelse(gdb);
 	}
@@ -251,7 +251,7 @@
 			err = PTR_ERR(bh);
 			goto exit_bh;
 		}
-		ext4_journal_dirty_metadata(handle, gdb);
+		ext4_handle_dirty_metadata(handle, NULL, gdb);
 		ext4_set_bit(bit, bh->b_data);
 		brelse(gdb);
 	}
@@ -276,7 +276,7 @@
 			err = PTR_ERR(it);
 			goto exit_bh;
 		}
-		ext4_journal_dirty_metadata(handle, it);
+		ext4_handle_dirty_metadata(handle, NULL, it);
 		brelse(it);
 		ext4_set_bit(bit, bh->b_data);
 	}
@@ -284,11 +284,9 @@
 	if ((err = extend_or_restart_transaction(handle, 2, bh)))
 		goto exit_bh;
 
-	mark_bitmap_end(input->blocks_count, EXT4_BLOCKS_PER_GROUP(sb),
-			bh->b_data);
-	ext4_journal_dirty_metadata(handle, bh);
+	mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, bh->b_data);
+	ext4_handle_dirty_metadata(handle, NULL, bh);
 	brelse(bh);
-
 	/* Mark unused entries in inode bitmap used */
 	ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
 		   input->inode_bitmap, input->inode_bitmap - start);
@@ -297,9 +295,9 @@
 		goto exit_journal;
 	}
 
-	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
+	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
 			bh->b_data);
-	ext4_journal_dirty_metadata(handle, bh);
+	ext4_handle_dirty_metadata(handle, NULL, bh);
 exit_bh:
 	brelse(bh);
 
@@ -486,12 +484,12 @@
 	 * reserved inode, and will become GDT blocks (primary and backup).
 	 */
 	data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
-	ext4_journal_dirty_metadata(handle, dind);
+	ext4_handle_dirty_metadata(handle, NULL, dind);
 	brelse(dind);
 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
 	ext4_mark_iloc_dirty(handle, inode, &iloc);
 	memset((*primary)->b_data, 0, sb->s_blocksize);
-	ext4_journal_dirty_metadata(handle, *primary);
+	ext4_handle_dirty_metadata(handle, NULL, *primary);
 
 	o_group_desc = EXT4_SB(sb)->s_group_desc;
 	memcpy(n_group_desc, o_group_desc,
@@ -502,7 +500,7 @@
 	kfree(o_group_desc);
 
 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-	ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
+	ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
 
 	return 0;
 
@@ -618,7 +616,7 @@
 		       primary[i]->b_blocknr, gdbackups,
 		       blk + primary[i]->b_blocknr); */
 		data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
-		err2 = ext4_journal_dirty_metadata(handle, primary[i]);
+		err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
 		if (!err)
 			err = err2;
 	}
@@ -676,7 +674,8 @@
 		struct buffer_head *bh;
 
 		/* Out of journal space, and can't get more - abort - so sad */
-		if (handle->h_buffer_credits == 0 &&
+		if (ext4_handle_valid(handle) &&
+		    handle->h_buffer_credits == 0 &&
 		    ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) &&
 		    (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
 			break;
@@ -696,7 +695,7 @@
 			memset(bh->b_data + size, 0, rest);
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
-		ext4_journal_dirty_metadata(handle, bh);
+		ext4_handle_dirty_metadata(handle, NULL, bh);
 		brelse(bh);
 	}
 	if ((err2 = ext4_journal_stop(handle)) && !err)
@@ -715,7 +714,7 @@
 exit_err:
 	if (err) {
 		ext4_warning(sb, __func__,
-			     "can't update backup for group %lu (err %d), "
+			     "can't update backup for group %u (err %d), "
 			     "forcing fsck on next reboot", group, err);
 		sbi->s_mount_state &= ~EXT4_VALID_FS;
 		sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
@@ -747,6 +746,7 @@
 	struct inode *inode = NULL;
 	handle_t *handle;
 	int gdb_off, gdb_num;
+	int num_grp_locked = 0;
 	int err, err2;
 
 	gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
@@ -761,13 +761,13 @@
 
 	if (ext4_blocks_count(es) + input->blocks_count <
 	    ext4_blocks_count(es)) {
-		ext4_warning(sb, __func__, "blocks_count overflow\n");
+		ext4_warning(sb, __func__, "blocks_count overflow");
 		return -EINVAL;
 	}
 
 	if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
 	    le32_to_cpu(es->s_inodes_count)) {
-		ext4_warning(sb, __func__, "inodes_count overflow\n");
+		ext4_warning(sb, __func__, "inodes_count overflow");
 		return -EINVAL;
 	}
 
@@ -787,6 +787,7 @@
 		}
 	}
 
+
 	if ((err = verify_group_input(sb, input)))
 		goto exit_put;
 
@@ -855,6 +856,7 @@
          * using the new disk blocks.
          */
 
+	num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, input->group);
 	/* Update group descriptor block for new group */
 	gdp = (struct ext4_group_desc *)((char *)primary->b_data +
 					 gdb_off * EXT4_DESC_SIZE(sb));
@@ -862,17 +864,20 @@
 	ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
 	ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
 	ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
-	gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
-	gdp->bg_free_inodes_count = cpu_to_le16(EXT4_INODES_PER_GROUP(sb));
+	ext4_free_blks_set(sb, gdp, input->free_blocks_count);
+	ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
+	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
 	gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
 
 	/*
 	 * We can allocate memory for mb_alloc based on the new group
 	 * descriptor
 	 */
-	err = ext4_mb_add_more_groupinfo(sb, input->group, gdp);
-	if (err)
+	err = ext4_mb_add_groupinfo(sb, input->group, gdp);
+	if (err) {
+		ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
 		goto exit_journal;
+	}
 
 	/*
 	 * Make the new blocks and inodes valid next.  We do this before
@@ -914,8 +919,9 @@
 
 	/* Update the global fs size fields */
 	sbi->s_groups_count++;
+	ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
 
-	ext4_journal_dirty_metadata(handle, primary);
+	ext4_handle_dirty_metadata(handle, NULL, primary);
 
 	/* Update the reserved block counts only once the new group is
 	 * active. */
@@ -937,7 +943,7 @@
 			EXT4_INODES_PER_GROUP(sb);
 	}
 
-	ext4_journal_dirty_metadata(handle, sbi->s_sbh);
+	ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
 	sb->s_dirt = 1;
 
 exit_journal:
@@ -975,9 +981,7 @@
 	struct buffer_head *bh;
 	handle_t *handle;
 	int err;
-	unsigned long freed_blocks;
 	ext4_group_t group;
-	struct ext4_group_info *grp;
 
 	/* We don't need to worry about locking wrt other resizers just
 	 * yet: we're going to revalidate es->s_blocks_count after
@@ -997,8 +1001,7 @@
 			" too large to resize to %llu blocks safely\n",
 			sb->s_id, n_blocks_count);
 		if (sizeof(sector_t) < 8)
-			ext4_warning(sb, __func__,
-			"CONFIG_LBD not enabled\n");
+			ext4_warning(sb, __func__, "CONFIG_LBD not enabled");
 		return -EINVAL;
 	}
 
@@ -1071,62 +1074,18 @@
 		goto exit_put;
 	}
 	ext4_blocks_count_set(es, o_blocks_count + add);
-	ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
+	ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
 	sb->s_dirt = 1;
 	unlock_super(sb);
 	ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
 		   o_blocks_count + add);
-	ext4_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
+	/* We add the blocks to the bitmap and set the group need init bit */
+	ext4_add_groupblocks(handle, sb, o_blocks_count, add);
 	ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
 		   o_blocks_count + add);
 	if ((err = ext4_journal_stop(handle)))
 		goto exit_put;
 
-	/*
-	 * Mark mballoc pages as not up to date so that they will be updated
-	 * next time they are loaded by ext4_mb_load_buddy.
-	 *
-	 * XXX Bad, Bad, BAD!!!  We should not be overloading the
-	 * Uptodate flag, particularly on thte bitmap bh, as way of
-	 * hinting to ext4_mb_load_buddy() that it needs to be
-	 * overloaded.  A user could take a LVM snapshot, then do an
-	 * on-line fsck, and clear the uptodate flag, and this would
-	 * not be a bug in userspace, but a bug in the kernel.  FIXME!!!
-	 */
-	{
-		struct ext4_sb_info *sbi = EXT4_SB(sb);
-		struct inode *inode = sbi->s_buddy_cache;
-		int blocks_per_page;
-		int block;
-		int pnum;
-		struct page *page;
-
-		/* Set buddy page as not up to date */
-		blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-		block = group * 2;
-		pnum = block / blocks_per_page;
-		page = find_get_page(inode->i_mapping, pnum);
-		if (page != NULL) {
-			ClearPageUptodate(page);
-			page_cache_release(page);
-		}
-
-		/* Set bitmap page as not up to date */
-		block++;
-		pnum = block / blocks_per_page;
-		page = find_get_page(inode->i_mapping, pnum);
-		if (page != NULL) {
-			ClearPageUptodate(page);
-			page_cache_release(page);
-		}
-
-		/* Get the info on the last group */
-		grp = ext4_get_group_info(sb, group);
-
-		/* Update free blocks in group info */
-		ext4_mb_update_group_info(grp, add);
-	}
-
 	if (test_opt(sb, DEBUG))
 		printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n",
 		       ext4_blocks_count(es));
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9494bb2..e5f06a5 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -51,9 +51,7 @@
 
 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
 			     unsigned long journal_devnum);
-static int ext4_create_journal(struct super_block *, struct ext4_super_block *,
-			       unsigned int);
-static void ext4_commit_super(struct super_block *sb,
+static int ext4_commit_super(struct super_block *sb,
 			      struct ext4_super_block *es, int sync);
 static void ext4_mark_recovery_complete(struct super_block *sb,
 					struct ext4_super_block *es);
@@ -64,9 +62,9 @@
 				     char nbuf[16]);
 static int ext4_remount(struct super_block *sb, int *flags, char *data);
 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
-static void ext4_unlockfs(struct super_block *sb);
+static int ext4_unfreeze(struct super_block *sb);
 static void ext4_write_super(struct super_block *sb);
-static void ext4_write_super_lockfs(struct super_block *sb);
+static int ext4_freeze(struct super_block *sb);
 
 
 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
@@ -93,6 +91,38 @@
 		(ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
 }
 
+__u32 ext4_free_blks_count(struct super_block *sb,
+			      struct ext4_group_desc *bg)
+{
+	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
+		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
+		(__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
+}
+
+__u32 ext4_free_inodes_count(struct super_block *sb,
+			      struct ext4_group_desc *bg)
+{
+	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
+		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
+		(__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
+}
+
+__u32 ext4_used_dirs_count(struct super_block *sb,
+			      struct ext4_group_desc *bg)
+{
+	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
+		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
+		(__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
+}
+
+__u32 ext4_itable_unused_count(struct super_block *sb,
+			      struct ext4_group_desc *bg)
+{
+	return le16_to_cpu(bg->bg_itable_unused_lo) |
+		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
+		(__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
+}
+
 void ext4_block_bitmap_set(struct super_block *sb,
 			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
 {
@@ -117,6 +147,38 @@
 		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
 }
 
+void ext4_free_blks_set(struct super_block *sb,
+			  struct ext4_group_desc *bg, __u32 count)
+{
+	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
+	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
+		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
+}
+
+void ext4_free_inodes_set(struct super_block *sb,
+			  struct ext4_group_desc *bg, __u32 count)
+{
+	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
+	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
+		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
+}
+
+void ext4_used_dirs_set(struct super_block *sb,
+			  struct ext4_group_desc *bg, __u32 count)
+{
+	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
+	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
+		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
+}
+
+void ext4_itable_unused_set(struct super_block *sb,
+			  struct ext4_group_desc *bg, __u32 count)
+{
+	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
+	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
+		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
+}
+
 /*
  * Wrappers for jbd2_journal_start/end.
  *
@@ -136,13 +198,19 @@
 	 * backs (eg. EIO in the commit thread), then we still need to
 	 * take the FS itself readonly cleanly. */
 	journal = EXT4_SB(sb)->s_journal;
-	if (is_journal_aborted(journal)) {
-		ext4_abort(sb, __func__,
-			   "Detected aborted journal");
-		return ERR_PTR(-EROFS);
+	if (journal) {
+		if (is_journal_aborted(journal)) {
+			ext4_abort(sb, __func__,
+				   "Detected aborted journal");
+			return ERR_PTR(-EROFS);
+		}
+		return jbd2_journal_start(journal, nblocks);
 	}
-
-	return jbd2_journal_start(journal, nblocks);
+	/*
+	 * We're not journaling, return the appropriate indication.
+	 */
+	current->journal_info = EXT4_NOJOURNAL_HANDLE;
+	return current->journal_info;
 }
 
 /*
@@ -157,6 +225,14 @@
 	int err;
 	int rc;
 
+	if (!ext4_handle_valid(handle)) {
+		/*
+		 * Do this here since we don't call jbd2_journal_stop() in
+		 * no-journal mode.
+		 */
+		current->journal_info = NULL;
+		return 0;
+	}
 	sb = handle->h_transaction->t_journal->j_private;
 	err = handle->h_err;
 	rc = jbd2_journal_stop(handle);
@@ -174,6 +250,8 @@
 	char nbuf[16];
 	const char *errstr = ext4_decode_error(NULL, err, nbuf);
 
+	BUG_ON(!ext4_handle_valid(handle));
+
 	if (bh)
 		BUFFER_TRACE(bh, "abort");
 
@@ -350,6 +428,44 @@
 	va_end(args);
 }
 
+void ext4_grp_locked_error(struct super_block *sb, ext4_group_t grp,
+				const char *function, const char *fmt, ...)
+__releases(bitlock)
+__acquires(bitlock)
+{
+	va_list args;
+	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+	va_start(args, fmt);
+	printk(KERN_CRIT "EXT4-fs error (device %s): %s: ", sb->s_id, function);
+	vprintk(fmt, args);
+	printk("\n");
+	va_end(args);
+
+	if (test_opt(sb, ERRORS_CONT)) {
+		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
+		ext4_commit_super(sb, es, 0);
+		return;
+	}
+	ext4_unlock_group(sb, grp);
+	ext4_handle_error(sb);
+	/*
+	 * We only get here in the ERRORS_RO case; relocking the group
+	 * may be dangerous, but nothing bad will happen since the
+	 * filesystem will have already been marked read/only and the
+	 * journal has been aborted.  We return 1 as a hint to callers
+	 * who might what to use the return value from
+	 * ext4_grp_locked_error() to distinguish beween the
+	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
+	 * aggressively from the ext4 function in question, with a
+	 * more appropriate error code.
+	 */
+	ext4_lock_group(sb, grp);
+	return;
+}
+
+
 void ext4_update_dynamic_rev(struct super_block *sb)
 {
 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
@@ -389,7 +505,7 @@
 	return bdev;
 
 fail:
-	printk(KERN_ERR "EXT4: failed to open journal device %s: %ld\n",
+	printk(KERN_ERR "EXT4-fs: failed to open journal device %s: %ld\n",
 			__bdevname(dev, b), PTR_ERR(bdev));
 	return NULL;
 }
@@ -448,11 +564,13 @@
 	ext4_mb_release(sb);
 	ext4_ext_release(sb);
 	ext4_xattr_put_super(sb);
-	err = jbd2_journal_destroy(sbi->s_journal);
-	sbi->s_journal = NULL;
-	if (err < 0)
-		ext4_abort(sb, __func__, "Couldn't clean up the journal");
-
+	if (sbi->s_journal) {
+		err = jbd2_journal_destroy(sbi->s_journal);
+		sbi->s_journal = NULL;
+		if (err < 0)
+			ext4_abort(sb, __func__,
+				   "Couldn't clean up the journal");
+	}
 	if (!(sb->s_flags & MS_RDONLY)) {
 		EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
 		es->s_state = cpu_to_le16(sbi->s_mount_state);
@@ -522,6 +640,11 @@
 	memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
 	INIT_LIST_HEAD(&ei->i_prealloc_list);
 	spin_lock_init(&ei->i_prealloc_lock);
+	/*
+	 * Note:  We can be called before EXT4_SB(sb)->s_journal is set,
+	 * therefore it can be null here.  Don't check it, just initialize
+	 * jinode.
+	 */
 	jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode);
 	ei->i_reserved_data_blocks = 0;
 	ei->i_reserved_meta_blocks = 0;
@@ -588,7 +711,8 @@
 	}
 #endif
 	ext4_discard_preallocations(inode);
-	jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
+	if (EXT4_JOURNAL(inode))
+		jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
 				       &EXT4_I(inode)->jinode);
 }
 
@@ -681,10 +805,19 @@
 #endif
 	if (!test_opt(sb, RESERVATION))
 		seq_puts(seq, ",noreservation");
-	if (sbi->s_commit_interval) {
+	if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
 		seq_printf(seq, ",commit=%u",
 			   (unsigned) (sbi->s_commit_interval / HZ));
 	}
+	if (sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) {
+		seq_printf(seq, ",min_batch_time=%u",
+			   (unsigned) sbi->s_min_batch_time);
+	}
+	if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
+		seq_printf(seq, ",max_batch_time=%u",
+			   (unsigned) sbi->s_min_batch_time);
+	}
+
 	/*
 	 * We're changing the default of barrier mount option, so
 	 * let's always display its mount state so it's clear what its
@@ -696,8 +829,6 @@
 		seq_puts(seq, ",journal_async_commit");
 	if (test_opt(sb, NOBH))
 		seq_puts(seq, ",nobh");
-	if (!test_opt(sb, EXTENTS))
-		seq_puts(seq, ",noextents");
 	if (test_opt(sb, I_VERSION))
 		seq_puts(seq, ",i_version");
 	if (!test_opt(sb, DELALLOC))
@@ -772,6 +903,25 @@
 				    ext4_nfs_get_inode);
 }
 
+/*
+ * Try to release metadata pages (indirect blocks, directories) which are
+ * mapped via the block device.  Since these pages could have journal heads
+ * which would prevent try_to_free_buffers() from freeing them, we must use
+ * jbd2 layer's try_to_free_buffers() function to release them.
+ */
+static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait)
+{
+	journal_t *journal = EXT4_SB(sb)->s_journal;
+
+	WARN_ON(PageChecked(page));
+	if (!page_has_buffers(page))
+		return 0;
+	if (journal)
+		return jbd2_journal_try_to_free_buffers(journal, page,
+							wait & ~__GFP_WAIT);
+	return try_to_free_buffers(page);
+}
+
 #ifdef CONFIG_QUOTA
 #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
 #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
@@ -828,8 +978,8 @@
 	.put_super	= ext4_put_super,
 	.write_super	= ext4_write_super,
 	.sync_fs	= ext4_sync_fs,
-	.write_super_lockfs = ext4_write_super_lockfs,
-	.unlockfs	= ext4_unlockfs,
+	.freeze_fs	= ext4_freeze,
+	.unfreeze_fs	= ext4_unfreeze,
 	.statfs		= ext4_statfs,
 	.remount_fs	= ext4_remount,
 	.clear_inode	= ext4_clear_inode,
@@ -838,6 +988,7 @@
 	.quota_read	= ext4_quota_read,
 	.quota_write	= ext4_quota_write,
 #endif
+	.bdev_try_to_free_page = bdev_try_to_free_page,
 };
 
 static const struct export_operations ext4_export_ops = {
@@ -852,16 +1003,17 @@
 	Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
 	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
 	Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
-	Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
+	Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
+	Opt_journal_update, Opt_journal_dev,
 	Opt_journal_checksum, Opt_journal_async_commit,
 	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
 	Opt_data_err_abort, Opt_data_err_ignore,
 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
 	Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
-	Opt_grpquota, Opt_extents, Opt_noextents, Opt_i_version,
+	Opt_grpquota, Opt_i_version,
 	Opt_stripe, Opt_delalloc, Opt_nodelalloc,
-	Opt_inode_readahead_blks
+	Opt_inode_readahead_blks, Opt_journal_ioprio
 };
 
 static const match_table_t tokens = {
@@ -891,8 +1043,9 @@
 	{Opt_nobh, "nobh"},
 	{Opt_bh, "bh"},
 	{Opt_commit, "commit=%u"},
+	{Opt_min_batch_time, "min_batch_time=%u"},
+	{Opt_max_batch_time, "max_batch_time=%u"},
 	{Opt_journal_update, "journal=update"},
-	{Opt_journal_inum, "journal=%u"},
 	{Opt_journal_dev, "journal_dev=%u"},
 	{Opt_journal_checksum, "journal_checksum"},
 	{Opt_journal_async_commit, "journal_async_commit"},
@@ -913,14 +1066,13 @@
 	{Opt_quota, "quota"},
 	{Opt_usrquota, "usrquota"},
 	{Opt_barrier, "barrier=%u"},
-	{Opt_extents, "extents"},
-	{Opt_noextents, "noextents"},
 	{Opt_i_version, "i_version"},
 	{Opt_stripe, "stripe=%u"},
 	{Opt_resize, "resize"},
 	{Opt_delalloc, "delalloc"},
 	{Opt_nodelalloc, "nodelalloc"},
 	{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
+	{Opt_journal_ioprio, "journal_ioprio=%u"},
 	{Opt_err, NULL},
 };
 
@@ -945,8 +1097,11 @@
 	return sb_block;
 }
 
+#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
+
 static int parse_options(char *options, struct super_block *sb,
-			 unsigned int *inum, unsigned long *journal_devnum,
+			 unsigned long *journal_devnum,
+			 unsigned int *journal_ioprio,
 			 ext4_fsblk_t *n_blocks_count, int is_remount)
 {
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -958,7 +1113,6 @@
 	int qtype, qfmt;
 	char *qname;
 #endif
-	ext4_fsblk_t last_block;
 
 	if (!options)
 		return 1;
@@ -1070,16 +1224,6 @@
 			}
 			set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
 			break;
-		case Opt_journal_inum:
-			if (is_remount) {
-				printk(KERN_ERR "EXT4-fs: cannot specify "
-				       "journal on remount\n");
-				return 0;
-			}
-			if (match_int(&args[0], &option))
-				return 0;
-			*inum = option;
-			break;
 		case Opt_journal_dev:
 			if (is_remount) {
 				printk(KERN_ERR "EXT4-fs: cannot specify "
@@ -1109,6 +1253,22 @@
 				option = JBD2_DEFAULT_MAX_COMMIT_AGE;
 			sbi->s_commit_interval = HZ * option;
 			break;
+		case Opt_max_batch_time:
+			if (match_int(&args[0], &option))
+				return 0;
+			if (option < 0)
+				return 0;
+			if (option == 0)
+				option = EXT4_DEF_MAX_BATCH_TIME;
+			sbi->s_max_batch_time = option;
+			break;
+		case Opt_min_batch_time:
+			if (match_int(&args[0], &option))
+				return 0;
+			if (option < 0)
+				return 0;
+			sbi->s_min_batch_time = option;
+			break;
 		case Opt_data_journal:
 			data_opt = EXT4_MOUNT_JOURNAL_DATA;
 			goto datacheck;
@@ -1279,33 +1439,6 @@
 		case Opt_bh:
 			clear_opt(sbi->s_mount_opt, NOBH);
 			break;
-		case Opt_extents:
-			if (!EXT4_HAS_INCOMPAT_FEATURE(sb,
-					EXT4_FEATURE_INCOMPAT_EXTENTS)) {
-				ext4_warning(sb, __func__,
-					"extents feature not enabled "
-					"on this filesystem, use tune2fs\n");
-				return 0;
-			}
-			set_opt(sbi->s_mount_opt, EXTENTS);
-			break;
-		case Opt_noextents:
-			/*
-			 * When e2fsprogs support resizing an already existing
-			 * ext3 file system to greater than 2**32 we need to
-			 * add support to block allocator to handle growing
-			 * already existing block  mapped inode so that blocks
-			 * allocated for them fall within 2**32
-			 */
-			last_block = ext4_blocks_count(sbi->s_es) - 1;
-			if (last_block  > 0xffffffffULL) {
-				printk(KERN_ERR "EXT4-fs: Filesystem too "
-						"large to mount with "
-						"-o noextents options\n");
-				return 0;
-			}
-			clear_opt(sbi->s_mount_opt, EXTENTS);
-			break;
 		case Opt_i_version:
 			set_opt(sbi->s_mount_opt, I_VERSION);
 			sb->s_flags |= MS_I_VERSION;
@@ -1330,6 +1463,14 @@
 				return 0;
 			sbi->s_inode_readahead_blks = option;
 			break;
+		case Opt_journal_ioprio:
+			if (match_int(&args[0], &option))
+				return 0;
+			if (option < 0 || option > 7)
+				break;
+			*journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE,
+							    option);
+			break;
 		default:
 			printk(KERN_ERR
 			       "EXT4-fs: Unrecognized mount option \"%s\" "
@@ -1405,24 +1546,19 @@
 		printk(KERN_WARNING
 		       "EXT4-fs warning: checktime reached, "
 		       "running e2fsck is recommended\n");
-#if 0
-		/* @@@ We _will_ want to clear the valid bit if we find
-		 * inconsistencies, to force a fsck at reboot.  But for
-		 * a plain journaled filesystem we can keep it set as
-		 * valid forever! :)
-		 */
-	es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
-#endif
+	if (!sbi->s_journal) 
+		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
 	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
 		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
 	le16_add_cpu(&es->s_mnt_count, 1);
 	es->s_mtime = cpu_to_le32(get_seconds());
 	ext4_update_dynamic_rev(sb);
-	EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+	if (sbi->s_journal)
+		EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
 
 	ext4_commit_super(sb, es, 1);
 	if (test_opt(sb, DEBUG))
-		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%lu, "
+		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
 				"bpg=%lu, ipg=%lu, mo=%04lx]\n",
 			sb->s_blocksize,
 			sbi->s_groups_count,
@@ -1430,9 +1566,13 @@
 			EXT4_INODES_PER_GROUP(sb),
 			sbi->s_mount_opt);
 
-	printk(KERN_INFO "EXT4 FS on %s, %s journal on %s\n",
-	       sb->s_id, EXT4_SB(sb)->s_journal->j_inode ? "internal" :
-	       "external", EXT4_SB(sb)->s_journal->j_devname);
+	if (EXT4_SB(sb)->s_journal) {
+		printk(KERN_INFO "EXT4 FS on %s, %s journal on %s\n",
+		       sb->s_id, EXT4_SB(sb)->s_journal->j_inode ? "internal" :
+		       "external", EXT4_SB(sb)->s_journal->j_devname);
+	} else {
+		printk(KERN_INFO "EXT4 FS on %s, no journal\n", sb->s_id);
+	}
 	return res;
 }
 
@@ -1444,7 +1584,6 @@
 	ext4_group_t flex_group_count;
 	ext4_group_t flex_group;
 	int groups_per_flex = 0;
-	__u64 block_bitmap = 0;
 	int i;
 
 	if (!sbi->s_es->s_log_groups_per_flex) {
@@ -1463,21 +1602,18 @@
 				     sizeof(struct flex_groups), GFP_KERNEL);
 	if (sbi->s_flex_groups == NULL) {
 		printk(KERN_ERR "EXT4-fs: not enough memory for "
-				"%lu flex groups\n", flex_group_count);
+				"%u flex groups\n", flex_group_count);
 		goto failed;
 	}
 
-	gdp = ext4_get_group_desc(sb, 1, &bh);
-	block_bitmap = ext4_block_bitmap(sb, gdp) - 1;
-
 	for (i = 0; i < sbi->s_groups_count; i++) {
 		gdp = ext4_get_group_desc(sb, i, &bh);
 
 		flex_group = ext4_flex_group(sbi, i);
 		sbi->s_flex_groups[flex_group].free_inodes +=
-			le16_to_cpu(gdp->bg_free_inodes_count);
+			ext4_free_inodes_count(sb, gdp);
 		sbi->s_flex_groups[flex_group].free_blocks +=
-			le16_to_cpu(gdp->bg_free_blocks_count);
+			ext4_free_blks_count(sb, gdp);
 	}
 
 	return 1;
@@ -1551,14 +1687,14 @@
 		block_bitmap = ext4_block_bitmap(sb, gdp);
 		if (block_bitmap < first_block || block_bitmap > last_block) {
 			printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
-			       "Block bitmap for group %lu not in group "
+			       "Block bitmap for group %u not in group "
 			       "(block %llu)!\n", i, block_bitmap);
 			return 0;
 		}
 		inode_bitmap = ext4_inode_bitmap(sb, gdp);
 		if (inode_bitmap < first_block || inode_bitmap > last_block) {
 			printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
-			       "Inode bitmap for group %lu not in group "
+			       "Inode bitmap for group %u not in group "
 			       "(block %llu)!\n", i, inode_bitmap);
 			return 0;
 		}
@@ -1566,14 +1702,14 @@
 		if (inode_table < first_block ||
 		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
 			printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
-			       "Inode table for group %lu not in group "
+			       "Inode table for group %u not in group "
 			       "(block %llu)!\n", i, inode_table);
 			return 0;
 		}
 		spin_lock(sb_bgl_lock(sbi, i));
 		if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
 			printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
-			       "Checksum for group %lu failed (%u!=%u)\n",
+			       "Checksum for group %u failed (%u!=%u)\n",
 			       i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
 			       gdp)), le16_to_cpu(gdp->bg_checksum));
 			if (!(sb->s_flags & MS_RDONLY)) {
@@ -1865,19 +2001,20 @@
 	ext4_fsblk_t sb_block = get_sb_block(&data);
 	ext4_fsblk_t logical_sb_block;
 	unsigned long offset = 0;
-	unsigned int journal_inum = 0;
 	unsigned long journal_devnum = 0;
 	unsigned long def_mount_opts;
 	struct inode *root;
 	char *cp;
+	const char *descr;
 	int ret = -EINVAL;
 	int blocksize;
-	int db_count;
-	int i;
+	unsigned int db_count;
+	unsigned int i;
 	int needs_recovery, has_huge_files;
-	__le32 features;
+	int features;
 	__u64 blocks_count;
 	int err;
+	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
 
 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
 	if (!sbi)
@@ -1958,31 +2095,22 @@
 
 	sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
 	sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
+	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
+	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
+	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
 
 	set_opt(sbi->s_mount_opt, RESERVATION);
 	set_opt(sbi->s_mount_opt, BARRIER);
 
 	/*
-	 * turn on extents feature by default in ext4 filesystem
-	 * only if feature flag already set by mkfs or tune2fs.
-	 * Use -o noextents to turn it off
-	 */
-	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
-		set_opt(sbi->s_mount_opt, EXTENTS);
-	else
-		ext4_warning(sb, __func__,
-			"extents feature not enabled on this filesystem, "
-			"use tune2fs.\n");
-
-	/*
 	 * enable delayed allocation by default
 	 * Use -o nodelalloc to turn it off
 	 */
 	set_opt(sbi->s_mount_opt, DELALLOC);
 
 
-	if (!parse_options((char *) data, sb, &journal_inum, &journal_devnum,
-			   NULL, 0))
+	if (!parse_options((char *) data, sb, &journal_devnum,
+			   &journal_ioprio, NULL, 0))
 		goto failed_mount;
 
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
@@ -2004,15 +2132,17 @@
 	features = EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP);
 	if (features) {
 		printk(KERN_ERR "EXT4-fs: %s: couldn't mount because of "
-		       "unsupported optional features (%x).\n",
-		       sb->s_id, le32_to_cpu(features));
+		       "unsupported optional features (%x).\n", sb->s_id,
+			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
+			~EXT4_FEATURE_INCOMPAT_SUPP));
 		goto failed_mount;
 	}
 	features = EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP);
 	if (!(sb->s_flags & MS_RDONLY) && features) {
 		printk(KERN_ERR "EXT4-fs: %s: couldn't mount RDWR because of "
-		       "unsupported optional features (%x).\n",
-		       sb->s_id, le32_to_cpu(features));
+		       "unsupported optional features (%x).\n", sb->s_id,
+			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
+			~EXT4_FEATURE_RO_COMPAT_SUPP));
 		goto failed_mount;
 	}
 	has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb,
@@ -2117,6 +2247,18 @@
 	for (i = 0; i < 4; i++)
 		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
 	sbi->s_def_hash_version = es->s_def_hash_version;
+	i = le32_to_cpu(es->s_flags);
+	if (i & EXT2_FLAGS_UNSIGNED_HASH)
+		sbi->s_hash_unsigned = 3;
+	else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
+#ifdef __CHAR_UNSIGNED__
+		es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
+		sbi->s_hash_unsigned = 3;
+#else
+		es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
+#endif
+		sb->s_dirt = 1;
+	}
 
 	if (sbi->s_blocks_per_group > blocksize * 8) {
 		printk(KERN_ERR
@@ -2144,20 +2286,30 @@
 	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
 		goto cantfind_ext4;
 
-	/* ensure blocks_count calculation below doesn't sign-extend */
-	if (ext4_blocks_count(es) + EXT4_BLOCKS_PER_GROUP(sb) <
-	    le32_to_cpu(es->s_first_data_block) + 1) {
-		printk(KERN_WARNING "EXT4-fs: bad geometry: block count %llu, "
-		       "first data block %u, blocks per group %lu\n",
-			ext4_blocks_count(es),
-			le32_to_cpu(es->s_first_data_block),
-			EXT4_BLOCKS_PER_GROUP(sb));
+        /*
+         * It makes no sense for the first data block to be beyond the end
+         * of the filesystem.
+         */
+        if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
+                printk(KERN_WARNING "EXT4-fs: bad geometry: first data"
+		       "block %u is beyond end of filesystem (%llu)\n",
+		       le32_to_cpu(es->s_first_data_block),
+		       ext4_blocks_count(es));
 		goto failed_mount;
 	}
 	blocks_count = (ext4_blocks_count(es) -
 			le32_to_cpu(es->s_first_data_block) +
 			EXT4_BLOCKS_PER_GROUP(sb) - 1);
 	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
+	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
+		printk(KERN_WARNING "EXT4-fs: groups count too large: %u "
+		       "(block count %llu, first data block %u, "
+		       "blocks per group %lu)\n", sbi->s_groups_count,
+		       ext4_blocks_count(es),
+		       le32_to_cpu(es->s_first_data_block),
+		       EXT4_BLOCKS_PER_GROUP(sb));
+		goto failed_mount;
+	}
 	sbi->s_groups_count = blocks_count;
 	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
 		   EXT4_DESC_PER_BLOCK(sb);
@@ -2269,27 +2421,26 @@
 				EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
 				es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
 				ext4_commit_super(sb, es, 1);
-				printk(KERN_CRIT
-				       "EXT4-fs (device %s): mount failed\n",
-				      sb->s_id);
 				goto failed_mount4;
 			}
 		}
-	} else if (journal_inum) {
-		if (ext4_create_journal(sb, es, journal_inum))
-			goto failed_mount3;
+	} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
+	      EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
+		printk(KERN_ERR "EXT4-fs: required journal recovery "
+		       "suppressed and not mounted read-only\n");
+		goto failed_mount4;
 	} else {
-		if (!silent)
-			printk(KERN_ERR
-			       "ext4: No journal on filesystem on %s\n",
-			       sb->s_id);
-		goto failed_mount3;
+		clear_opt(sbi->s_mount_opt, DATA_FLAGS);
+		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
+		sbi->s_journal = NULL;
+		needs_recovery = 0;
+		goto no_journal;
 	}
 
 	if (ext4_blocks_count(es) > 0xffffffffULL &&
 	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
 				       JBD2_FEATURE_INCOMPAT_64BIT)) {
-		printk(KERN_ERR "ext4: Failed to set 64-bit journal feature\n");
+		printk(KERN_ERR "EXT4-fs: Failed to set 64-bit journal feature\n");
 		goto failed_mount4;
 	}
 
@@ -2334,6 +2485,9 @@
 	default:
 		break;
 	}
+	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
+
+no_journal:
 
 	if (test_opt(sb, NOBH)) {
 		if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
@@ -2419,13 +2573,22 @@
 	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
 	ext4_orphan_cleanup(sb, es);
 	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
-	if (needs_recovery)
+	if (needs_recovery) {
 		printk(KERN_INFO "EXT4-fs: recovery complete.\n");
-	ext4_mark_recovery_complete(sb, es);
-	printk(KERN_INFO "EXT4-fs: mounted filesystem with %s data mode.\n",
-	       test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ? "journal":
-	       test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA ? "ordered":
-	       "writeback");
+		ext4_mark_recovery_complete(sb, es);
+	}
+	if (EXT4_SB(sb)->s_journal) {
+		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+			descr = " journalled data mode";
+		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
+			descr = " ordered data mode";
+		else
+			descr = " writeback data mode";
+	} else
+		descr = "out journal";
+
+	printk(KERN_INFO "EXT4-fs: mounted filesystem %s with%s\n",
+	       sb->s_id, descr);
 
 	lock_kernel();
 	return 0;
@@ -2437,8 +2600,11 @@
 	goto failed_mount;
 
 failed_mount4:
-	jbd2_journal_destroy(sbi->s_journal);
-	sbi->s_journal = NULL;
+	printk(KERN_ERR "EXT4-fs (device %s): mount failed\n", sb->s_id);
+	if (sbi->s_journal) {
+		jbd2_journal_destroy(sbi->s_journal);
+		sbi->s_journal = NULL;
+	}
 failed_mount3:
 	percpu_counter_destroy(&sbi->s_freeblocks_counter);
 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
@@ -2475,11 +2641,9 @@
 {
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 
-	if (sbi->s_commit_interval)
-		journal->j_commit_interval = sbi->s_commit_interval;
-	/* We could also set up an ext4-specific default for the commit
-	 * interval here, but for now we'll just fall back to the jbd
-	 * default. */
+	journal->j_commit_interval = sbi->s_commit_interval;
+	journal->j_min_batch_time = sbi->s_min_batch_time;
+	journal->j_max_batch_time = sbi->s_max_batch_time;
 
 	spin_lock(&journal->j_state_lock);
 	if (test_opt(sb, BARRIER))
@@ -2499,6 +2663,8 @@
 	struct inode *journal_inode;
 	journal_t *journal;
 
+	BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
+
 	/* First, test for the existence of a valid inode on disk.  Bad
 	 * things happen if we iget() an unused inode, as the subsequent
 	 * iput() will try to delete it. */
@@ -2547,13 +2713,15 @@
 	struct ext4_super_block *es;
 	struct block_device *bdev;
 
+	BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
+
 	bdev = ext4_blkdev_get(j_dev);
 	if (bdev == NULL)
 		return NULL;
 
 	if (bd_claim(bdev, sb)) {
 		printk(KERN_ERR
-			"EXT4: failed to claim external journal device.\n");
+			"EXT4-fs: failed to claim external journal device.\n");
 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
 		return NULL;
 	}
@@ -2634,6 +2802,8 @@
 	int err = 0;
 	int really_read_only;
 
+	BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
+
 	if (journal_devnum &&
 	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
 		printk(KERN_INFO "EXT4-fs: external journal device major/minor "
@@ -2718,55 +2888,14 @@
 	return 0;
 }
 
-static int ext4_create_journal(struct super_block *sb,
-			       struct ext4_super_block *es,
-			       unsigned int journal_inum)
-{
-	journal_t *journal;
-	int err;
-
-	if (sb->s_flags & MS_RDONLY) {
-		printk(KERN_ERR "EXT4-fs: readonly filesystem when trying to "
-				"create journal.\n");
-		return -EROFS;
-	}
-
-	journal = ext4_get_journal(sb, journal_inum);
-	if (!journal)
-		return -EINVAL;
-
-	printk(KERN_INFO "EXT4-fs: creating new journal on inode %u\n",
-	       journal_inum);
-
-	err = jbd2_journal_create(journal);
-	if (err) {
-		printk(KERN_ERR "EXT4-fs: error creating journal.\n");
-		jbd2_journal_destroy(journal);
-		return -EIO;
-	}
-
-	EXT4_SB(sb)->s_journal = journal;
-
-	ext4_update_dynamic_rev(sb);
-	EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
-	EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL);
-
-	es->s_journal_inum = cpu_to_le32(journal_inum);
-	sb->s_dirt = 1;
-
-	/* Make sure we flush the recovery flag to disk. */
-	ext4_commit_super(sb, es, 1);
-
-	return 0;
-}
-
-static void ext4_commit_super(struct super_block *sb,
+static int ext4_commit_super(struct super_block *sb,
 			      struct ext4_super_block *es, int sync)
 {
 	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
+	int error = 0;
 
 	if (!sbh)
-		return;
+		return error;
 	if (buffer_write_io_error(sbh)) {
 		/*
 		 * Oh, dear.  A previous attempt to write the
@@ -2776,25 +2905,33 @@
 		 * be remapped.  Nothing we can do but to retry the
 		 * write and hope for the best.
 		 */
-		printk(KERN_ERR "ext4: previous I/O error to "
+		printk(KERN_ERR "EXT4-fs: previous I/O error to "
 		       "superblock detected for %s.\n", sb->s_id);
 		clear_buffer_write_io_error(sbh);
 		set_buffer_uptodate(sbh);
 	}
 	es->s_wtime = cpu_to_le32(get_seconds());
-	ext4_free_blocks_count_set(es, ext4_count_free_blocks(sb));
-	es->s_free_inodes_count = cpu_to_le32(ext4_count_free_inodes(sb));
+	ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
+					&EXT4_SB(sb)->s_freeblocks_counter));
+	es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive(
+					&EXT4_SB(sb)->s_freeinodes_counter));
+
 	BUFFER_TRACE(sbh, "marking dirty");
 	mark_buffer_dirty(sbh);
 	if (sync) {
-		sync_dirty_buffer(sbh);
-		if (buffer_write_io_error(sbh)) {
-			printk(KERN_ERR "ext4: I/O error while writing "
+		error = sync_dirty_buffer(sbh);
+		if (error)
+			return error;
+
+		error = buffer_write_io_error(sbh);
+		if (error) {
+			printk(KERN_ERR "EXT4-fs: I/O error while writing "
 			       "superblock for %s.\n", sb->s_id);
 			clear_buffer_write_io_error(sbh);
 			set_buffer_uptodate(sbh);
 		}
 	}
+	return error;
 }
 
 
@@ -2808,6 +2945,10 @@
 {
 	journal_t *journal = EXT4_SB(sb)->s_journal;
 
+	if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
+		BUG_ON(journal != NULL);
+		return;
+	}
 	jbd2_journal_lock_updates(journal);
 	if (jbd2_journal_flush(journal) < 0)
 		goto out;
@@ -2837,6 +2978,8 @@
 	int j_errno;
 	const char *errstr;
 
+	BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
+
 	journal = EXT4_SB(sb)->s_journal;
 
 	/*
@@ -2869,14 +3012,17 @@
 int ext4_force_commit(struct super_block *sb)
 {
 	journal_t *journal;
-	int ret;
+	int ret = 0;
 
 	if (sb->s_flags & MS_RDONLY)
 		return 0;
 
 	journal = EXT4_SB(sb)->s_journal;
-	sb->s_dirt = 0;
-	ret = ext4_journal_force_commit(journal);
+	if (journal) {
+		sb->s_dirt = 0;
+		ret = ext4_journal_force_commit(journal);
+	}
+
 	return ret;
 }
 
@@ -2888,9 +3034,13 @@
  */
 static void ext4_write_super(struct super_block *sb)
 {
-	if (mutex_trylock(&sb->s_lock) != 0)
-		BUG();
-	sb->s_dirt = 0;
+	if (EXT4_SB(sb)->s_journal) {
+		if (mutex_trylock(&sb->s_lock) != 0)
+			BUG();
+		sb->s_dirt = 0;
+	} else {
+		ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1);
+	}
 }
 
 static int ext4_sync_fs(struct super_block *sb, int wait)
@@ -2899,10 +3049,14 @@
 
 	trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait);
 	sb->s_dirt = 0;
-	if (wait)
-		ret = ext4_force_commit(sb);
-	else
-		jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, NULL);
+	if (EXT4_SB(sb)->s_journal) {
+		if (wait)
+			ret = ext4_force_commit(sb);
+		else
+ 			jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, NULL);
+	} else {
+		ext4_commit_super(sb, EXT4_SB(sb)->s_es, wait);
+	}
 	return ret;
 }
 
@@ -2910,36 +3064,48 @@
  * LVM calls this function before a (read-only) snapshot is created.  This
  * gives us a chance to flush the journal completely and mark the fs clean.
  */
-static void ext4_write_super_lockfs(struct super_block *sb)
+static int ext4_freeze(struct super_block *sb)
 {
+	int error = 0;
+	journal_t *journal;
 	sb->s_dirt = 0;
 
 	if (!(sb->s_flags & MS_RDONLY)) {
-		journal_t *journal = EXT4_SB(sb)->s_journal;
+		journal = EXT4_SB(sb)->s_journal;
 
-		/* Now we set up the journal barrier. */
-		jbd2_journal_lock_updates(journal);
+		if (journal) {
+			/* Now we set up the journal barrier. */
+			jbd2_journal_lock_updates(journal);
 
-		/*
-		 * We don't want to clear needs_recovery flag when we failed
-		 * to flush the journal.
-		 */
-		if (jbd2_journal_flush(journal) < 0)
-			return;
+			/*
+			 * We don't want to clear needs_recovery flag when we
+			 * failed to flush the journal.
+			 */
+			error = jbd2_journal_flush(journal);
+			if (error < 0)
+				goto out;
+		}
 
 		/* Journal blocked and flushed, clear needs_recovery flag. */
 		EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
 		ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1);
+		error = ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1);
+		if (error)
+			goto out;
 	}
+	return 0;
+out:
+	jbd2_journal_unlock_updates(journal);
+	return error;
 }
 
 /*
  * Called by LVM after the snapshot is done.  We need to reset the RECOVER
  * flag here, even though the filesystem is not technically dirty yet.
  */
-static void ext4_unlockfs(struct super_block *sb)
+static int ext4_unfreeze(struct super_block *sb)
 {
-	if (!(sb->s_flags & MS_RDONLY)) {
+	if (EXT4_SB(sb)->s_journal && !(sb->s_flags & MS_RDONLY)) {
 		lock_super(sb);
 		/* Reser the needs_recovery flag before the fs is unlocked. */
 		EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
@@ -2947,6 +3113,7 @@
 		unlock_super(sb);
 		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
 	}
+	return 0;
 }
 
 static int ext4_remount(struct super_block *sb, int *flags, char *data)
@@ -2957,6 +3124,7 @@
 	unsigned long old_sb_flags;
 	struct ext4_mount_options old_opts;
 	ext4_group_t g;
+	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
 	int err;
 #ifdef CONFIG_QUOTA
 	int i;
@@ -2968,16 +3136,21 @@
 	old_opts.s_resuid = sbi->s_resuid;
 	old_opts.s_resgid = sbi->s_resgid;
 	old_opts.s_commit_interval = sbi->s_commit_interval;
+	old_opts.s_min_batch_time = sbi->s_min_batch_time;
+	old_opts.s_max_batch_time = sbi->s_max_batch_time;
 #ifdef CONFIG_QUOTA
 	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
 	for (i = 0; i < MAXQUOTAS; i++)
 		old_opts.s_qf_names[i] = sbi->s_qf_names[i];
 #endif
+	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
+		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
 
 	/*
 	 * Allow the "check" option to be passed as a remount option.
 	 */
-	if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) {
+	if (!parse_options(data, sb, NULL, &journal_ioprio,
+			   &n_blocks_count, 1)) {
 		err = -EINVAL;
 		goto restore_opts;
 	}
@@ -2990,7 +3163,10 @@
 
 	es = sbi->s_es;
 
-	ext4_init_journal_params(sb, sbi->s_journal);
+	if (sbi->s_journal) {
+		ext4_init_journal_params(sb, sbi->s_journal);
+		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
+	}
 
 	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
 		n_blocks_count > ext4_blocks_count(es)) {
@@ -3019,17 +3195,20 @@
 			 * We have to unlock super so that we can wait for
 			 * transactions.
 			 */
-			unlock_super(sb);
-			ext4_mark_recovery_complete(sb, es);
-			lock_super(sb);
+			if (sbi->s_journal) {
+				unlock_super(sb);
+				ext4_mark_recovery_complete(sb, es);
+				lock_super(sb);
+			}
 		} else {
-			__le32 ret;
+			int ret;
 			if ((ret = EXT4_HAS_RO_COMPAT_FEATURE(sb,
 					~EXT4_FEATURE_RO_COMPAT_SUPP))) {
 				printk(KERN_WARNING "EXT4-fs: %s: couldn't "
 				       "remount RDWR because of unsupported "
-				       "optional features (%x).\n",
-				       sb->s_id, le32_to_cpu(ret));
+				       "optional features (%x).\n", sb->s_id,
+				(le32_to_cpu(sbi->s_es->s_feature_ro_compat) &
+					~EXT4_FEATURE_RO_COMPAT_SUPP));
 				err = -EROFS;
 				goto restore_opts;
 			}
@@ -3046,7 +3225,7 @@
 				if (!ext4_group_desc_csum_verify(sbi, g, gdp)) {
 					printk(KERN_ERR
 	       "EXT4-fs: ext4_remount: "
-		"Checksum for group %lu failed (%u!=%u)\n",
+		"Checksum for group %u failed (%u!=%u)\n",
 		g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
 					       le16_to_cpu(gdp->bg_checksum));
 					err = -EINVAL;
@@ -3075,7 +3254,8 @@
 			 * been changed by e2fsck since we originally mounted
 			 * the partition.)
 			 */
-			ext4_clear_journal_err(sb, es);
+			if (sbi->s_journal)
+				ext4_clear_journal_err(sb, es);
 			sbi->s_mount_state = le16_to_cpu(es->s_state);
 			if ((err = ext4_group_extend(sb, es, n_blocks_count)))
 				goto restore_opts;
@@ -3083,6 +3263,9 @@
 				sb->s_flags &= ~MS_RDONLY;
 		}
 	}
+	if (sbi->s_journal == NULL)
+		ext4_commit_super(sb, es, 1);
+
 #ifdef CONFIG_QUOTA
 	/* Release old quota file names */
 	for (i = 0; i < MAXQUOTAS; i++)
@@ -3097,6 +3280,8 @@
 	sbi->s_resuid = old_opts.s_resuid;
 	sbi->s_resgid = old_opts.s_resgid;
 	sbi->s_commit_interval = old_opts.s_commit_interval;
+	sbi->s_min_batch_time = old_opts.s_min_batch_time;
+	sbi->s_max_batch_time = old_opts.s_max_batch_time;
 #ifdef CONFIG_QUOTA
 	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
 	for (i = 0; i < MAXQUOTAS; i++) {
@@ -3359,7 +3544,8 @@
 	 * When we journal data on quota file, we have to flush journal to see
 	 * all updates to the file when we bypass pagecache...
 	 */
-	if (ext4_should_journal_data(path.dentry->d_inode)) {
+	if (EXT4_SB(sb)->s_journal &&
+	    ext4_should_journal_data(path.dentry->d_inode)) {
 		/*
 		 * We don't need to lock updates but journal_flush() could
 		 * otherwise be livelocked...
@@ -3433,7 +3619,7 @@
 	struct buffer_head *bh;
 	handle_t *handle = journal_current_handle();
 
-	if (!handle) {
+	if (EXT4_SB(sb)->s_journal && !handle) {
 		printk(KERN_WARNING "EXT4-fs: Quota write (off=%llu, len=%llu)"
 			" cancelled because transaction is not started.\n",
 			(unsigned long long)off, (unsigned long long)len);
@@ -3458,7 +3644,7 @@
 		flush_dcache_page(bh->b_page);
 		unlock_buffer(bh);
 		if (journal_quota)
-			err = ext4_journal_dirty_metadata(handle, bh);
+			err = ext4_handle_dirty_metadata(handle, NULL, bh);
 		else {
 			/* Always do at least ordered writes for quotas */
 			err = ext4_jbd2_file_inode(handle, inode);
@@ -3512,18 +3698,15 @@
 static ssize_t ext4_ui_proc_write(struct file *file, const char __user *buf,
 			       size_t cnt, loff_t *ppos)
 {
-	unsigned int *p = PDE(file->f_path.dentry->d_inode)->data;
+	unsigned long *p = PDE(file->f_path.dentry->d_inode)->data;
 	char str[32];
-	unsigned long value;
 
 	if (cnt >= sizeof(str))
 		return -EINVAL;
 	if (copy_from_user(str, buf, cnt))
 		return -EFAULT;
-	value = simple_strtol(str, NULL, 0);
-	if (value < 0)
-		return -ERANGE;
-	*p = value;
+
+	*p = simple_strtoul(str, NULL, 0);
 	return cnt;
 }
 
@@ -3614,7 +3797,7 @@
 }
 
 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
-MODULE_DESCRIPTION("Fourth Extended Filesystem with extents");
+MODULE_DESCRIPTION("Fourth Extended Filesystem");
 MODULE_LICENSE("GPL");
 module_init(init_ext4_fs)
 module_exit(exit_ext4_fs)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 80626d5..157ce65 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -457,7 +457,7 @@
 	if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
 		EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
 		sb->s_dirt = 1;
-		ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
+		ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
 	}
 }
 
@@ -487,9 +487,9 @@
 		ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
 	} else {
 		le32_add_cpu(&BHDR(bh)->h_refcount, -1);
-		error = ext4_journal_dirty_metadata(handle, bh);
+		error = ext4_handle_dirty_metadata(handle, inode, bh);
 		if (IS_SYNC(inode))
-			handle->h_sync = 1;
+			ext4_handle_sync(handle);
 		DQUOT_FREE_BLOCK(inode, 1);
 		ea_bdebug(bh, "refcount now=%d; releasing",
 			  le32_to_cpu(BHDR(bh)->h_refcount));
@@ -724,8 +724,9 @@
 			if (error == -EIO)
 				goto bad_block;
 			if (!error)
-				error = ext4_journal_dirty_metadata(handle,
-								    bs->bh);
+				error = ext4_handle_dirty_metadata(handle,
+								   inode,
+								   bs->bh);
 			if (error)
 				goto cleanup;
 			goto inserted;
@@ -794,8 +795,9 @@
 				ea_bdebug(new_bh, "reusing; refcount now=%d",
 					le32_to_cpu(BHDR(new_bh)->h_refcount));
 				unlock_buffer(new_bh);
-				error = ext4_journal_dirty_metadata(handle,
-								    new_bh);
+				error = ext4_handle_dirty_metadata(handle,
+								   inode,
+								   new_bh);
 				if (error)
 					goto cleanup_dquot;
 			}
@@ -810,8 +812,8 @@
 			/* We need to allocate a new block */
 			ext4_fsblk_t goal = ext4_group_first_block_no(sb,
 						EXT4_I(inode)->i_block_group);
-			ext4_fsblk_t block = ext4_new_meta_block(handle, inode,
-							goal, &error);
+			ext4_fsblk_t block = ext4_new_meta_blocks(handle, inode,
+						  goal, NULL, &error);
 			if (error)
 				goto cleanup;
 			ea_idebug(inode, "creating block %d", block);
@@ -833,7 +835,8 @@
 			set_buffer_uptodate(new_bh);
 			unlock_buffer(new_bh);
 			ext4_xattr_cache_insert(new_bh);
-			error = ext4_journal_dirty_metadata(handle, new_bh);
+			error = ext4_handle_dirty_metadata(handle,
+							   inode, new_bh);
 			if (error)
 				goto cleanup;
 		}
@@ -1040,7 +1043,7 @@
 		 */
 		is.iloc.bh = NULL;
 		if (IS_SYNC(inode))
-			handle->h_sync = 1;
+			ext4_handle_sync(handle);
 	}
 
 cleanup:
diff --git a/fs/fcntl.c b/fs/fcntl.c
index cdc1419..bd215cc 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -50,7 +50,7 @@
 	return res;
 }
 
-asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
+SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
 {
 	int err = -EBADF;
 	struct file * file, *tofree;
@@ -113,7 +113,7 @@
 	return err;
 }
 
-asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd)
+SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
 {
 	if (unlikely(newfd == oldfd)) { /* corner case */
 		struct files_struct *files = current->files;
@@ -126,7 +126,7 @@
 	return sys_dup3(oldfd, newfd, 0);
 }
 
-asmlinkage long sys_dup(unsigned int fildes)
+SYSCALL_DEFINE1(dup, unsigned int, fildes)
 {
 	int ret = -EBADF;
 	struct file *file = fget(fildes);
@@ -335,7 +335,7 @@
 	return err;
 }
 
-asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg)
+SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 {	
 	struct file *filp;
 	long err = -EBADF;
@@ -358,7 +358,8 @@
 }
 
 #if BITS_PER_LONG == 32
-asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
+SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
+		unsigned long, arg)
 {	
 	struct file * filp;
 	long err;
diff --git a/fs/filesystems.c b/fs/filesystems.c
index d488dcd..1aa7026 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -179,7 +179,7 @@
 /*
  * Whee.. Weird sysv syscall. 
  */
-asmlinkage long sys_sysfs(int option, unsigned long arg1, unsigned long arg2)
+SYSCALL_DEFINE3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2)
 {
 	int retval = -EINVAL;
 
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index ab2f57e..e563a64 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -1,6 +1,6 @@
 config GFS2_FS
 	tristate "GFS2 file system support"
-	depends on EXPERIMENTAL && (64BIT || (LSF && LBD))
+	depends on EXPERIMENTAL && (64BIT || LBD)
 	select FS_POSIX_ACL
 	select CRC32
 	help
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 6e4ea36..4ddab67 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -675,6 +675,7 @@
 		goto out_trans_fail;
 
 	error = -ENOMEM;
+	flags |= AOP_FLAG_NOFS;
 	page = grab_cache_page_write_begin(mapping, index, flags);
 	*pagep = page;
 	if (unlikely(!page))
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 289c5f5..93fe41b 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -342,7 +342,7 @@
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
 	unsigned long last_index;
-	u64 pos = page->index << (PAGE_CACHE_SIZE - inode->i_blkbits);
+	u64 pos = page->index << PAGE_CACHE_SHIFT;
 	unsigned int data_blocks, ind_blocks, rblocks;
 	int alloc_required = 0;
 	struct gfs2_holder gh;
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 777783d..320323d 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -211,18 +211,18 @@
 }
 
 /**
- * gfs2_write_super_lockfs - prevent further writes to the filesystem
+ * gfs2_freeze - prevent further writes to the filesystem
  * @sb: the VFS structure for the filesystem
  *
  */
 
-static void gfs2_write_super_lockfs(struct super_block *sb)
+static int gfs2_freeze(struct super_block *sb)
 {
 	struct gfs2_sbd *sdp = sb->s_fs_info;
 	int error;
 
 	if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
-		return;
+		return -EINVAL;
 
 	for (;;) {
 		error = gfs2_freeze_fs(sdp);
@@ -242,17 +242,19 @@
 		fs_err(sdp, "retrying...\n");
 		msleep(1000);
 	}
+	return 0;
 }
 
 /**
- * gfs2_unlockfs - reallow writes to the filesystem
+ * gfs2_unfreeze - reallow writes to the filesystem
  * @sb: the VFS structure for the filesystem
  *
  */
 
-static void gfs2_unlockfs(struct super_block *sb)
+static int gfs2_unfreeze(struct super_block *sb)
 {
 	gfs2_unfreeze_fs(sb->s_fs_info);
+	return 0;
 }
 
 /**
@@ -688,8 +690,8 @@
 	.put_super		= gfs2_put_super,
 	.write_super		= gfs2_write_super,
 	.sync_fs		= gfs2_sync_fs,
-	.write_super_lockfs 	= gfs2_write_super_lockfs,
-	.unlockfs		= gfs2_unlockfs,
+	.freeze_fs 		= gfs2_freeze,
+	.unfreeze_fs		= gfs2_unfreeze,
 	.statfs			= gfs2_statfs,
 	.remount_fs		= gfs2_remount_fs,
 	.clear_inode		= gfs2_clear_inode,
diff --git a/fs/inode.c b/fs/inode.c
index 7a6e8c2..913ab2d 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -22,6 +22,7 @@
 #include <linux/bootmem.h>
 #include <linux/inotify.h>
 #include <linux/mount.h>
+#include <linux/async.h>
 
 /*
  * This is needed for the following functions:
diff --git a/fs/ioctl.c b/fs/ioctl.c
index cc3f1aa..240ec639 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -439,6 +439,43 @@
 	return error;
 }
 
+static int ioctl_fsfreeze(struct file *filp)
+{
+	struct super_block *sb = filp->f_path.dentry->d_inode->i_sb;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	/* If filesystem doesn't support freeze feature, return. */
+	if (sb->s_op->freeze_fs == NULL)
+		return -EOPNOTSUPP;
+
+	/* If a blockdevice-backed filesystem isn't specified, return. */
+	if (sb->s_bdev == NULL)
+		return -EINVAL;
+
+	/* Freeze */
+	sb = freeze_bdev(sb->s_bdev);
+	if (IS_ERR(sb))
+		return PTR_ERR(sb);
+	return 0;
+}
+
+static int ioctl_fsthaw(struct file *filp)
+{
+	struct super_block *sb = filp->f_path.dentry->d_inode->i_sb;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	/* If a blockdevice-backed filesystem isn't specified, return EINVAL. */
+	if (sb->s_bdev == NULL)
+		return -EINVAL;
+
+	/* Thaw */
+	return thaw_bdev(sb->s_bdev, sb);
+}
+
 /*
  * When you add any new common ioctls to the switches above and below
  * please update compat_sys_ioctl() too.
@@ -486,6 +523,15 @@
 		} else
 			error = -ENOTTY;
 		break;
+
+	case FIFREEZE:
+		error = ioctl_fsfreeze(filp);
+		break;
+
+	case FITHAW:
+		error = ioctl_fsthaw(filp);
+		break;
+
 	default:
 		if (S_ISREG(filp->f_path.dentry->d_inode->i_mode))
 			error = file_ioctl(filp, cmd, arg);
@@ -496,7 +542,7 @@
 	return error;
 }
 
-asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 {
 	struct file *filp;
 	int error = -EBADF;
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 3569e0a..c7c0b28 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -27,7 +27,7 @@
 #include <linux/security.h>
 #include <linux/pid_namespace.h>
 
-static int set_task_ioprio(struct task_struct *task, int ioprio)
+int set_task_ioprio(struct task_struct *task, int ioprio)
 {
 	int err;
 	struct io_context *ioc;
@@ -70,8 +70,9 @@
 	task_unlock(task);
 	return err;
 }
+EXPORT_SYMBOL_GPL(set_task_ioprio);
 
-asmlinkage long sys_ioprio_set(int which, int who, int ioprio)
+SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
 {
 	int class = IOPRIO_PRIO_CLASS(ioprio);
 	int data = IOPRIO_PRIO_DATA(ioprio);
@@ -187,7 +188,7 @@
 		return aprio;
 }
 
-asmlinkage long sys_ioprio_get(int which, int who)
+SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
 {
 	struct task_struct *g, *p;
 	struct user_struct *user;
@@ -251,4 +252,3 @@
 	read_unlock(&tasklist_lock);
 	return ret;
 }
-
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 25719d9..3fbffb1 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -306,6 +306,8 @@
 	int flags;
 	int err;
 	unsigned long blocknr;
+	ktime_t start_time;
+	u64 commit_time;
 	char *tagp = NULL;
 	journal_header_t *header;
 	journal_block_tag_t *tag = NULL;
@@ -418,6 +420,7 @@
 	commit_transaction->t_state = T_FLUSH;
 	journal->j_committing_transaction = commit_transaction;
 	journal->j_running_transaction = NULL;
+	start_time = ktime_get();
 	commit_transaction->t_log_start = journal->j_head;
 	wake_up(&journal->j_wait_transaction_locked);
 	spin_unlock(&journal->j_state_lock);
@@ -913,6 +916,18 @@
 	J_ASSERT(commit_transaction == journal->j_committing_transaction);
 	journal->j_commit_sequence = commit_transaction->t_tid;
 	journal->j_committing_transaction = NULL;
+	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
+
+	/*
+	 * weight the commit time higher than the average time so we don't
+	 * react too strongly to vast changes in commit time
+	 */
+	if (likely(journal->j_average_commit_time))
+		journal->j_average_commit_time = (commit_time*3 +
+				journal->j_average_commit_time) / 4;
+	else
+		journal->j_average_commit_time = commit_time;
+
 	spin_unlock(&journal->j_state_lock);
 
 	if (commit_transaction->t_checkpoint_list == NULL &&
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 60d4c32..e6a1174 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -25,6 +25,7 @@
 #include <linux/timer.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
+#include <linux/hrtimer.h>
 
 static void __journal_temp_unlink_buffer(struct journal_head *jh);
 
@@ -49,6 +50,7 @@
 {
 	transaction->t_journal = journal;
 	transaction->t_state = T_RUNNING;
+	transaction->t_start_time = ktime_get();
 	transaction->t_tid = journal->j_transaction_sequence++;
 	transaction->t_expires = jiffies + journal->j_commit_interval;
 	spin_lock_init(&transaction->t_handle_lock);
@@ -752,7 +754,6 @@
  * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
  * @handle: transaction to add buffer modifications to
  * @bh:     bh to be used for metadata writes
- * @credits: variable that will receive credits for the buffer
  *
  * Returns an error code or 0 on success.
  *
@@ -1370,7 +1371,7 @@
 {
 	transaction_t *transaction = handle->h_transaction;
 	journal_t *journal = transaction->t_journal;
-	int old_handle_count, err;
+	int err;
 	pid_t pid;
 
 	J_ASSERT(journal_current_handle() == handle);
@@ -1399,6 +1400,17 @@
 	 * on IO anyway.  Speeds up many-threaded, many-dir operations
 	 * by 30x or more...
 	 *
+	 * We try and optimize the sleep time against what the underlying disk
+	 * can do, instead of having a static sleep time.  This is usefull for
+	 * the case where our storage is so fast that it is more optimal to go
+	 * ahead and force a flush and wait for the transaction to be committed
+	 * than it is to wait for an arbitrary amount of time for new writers to
+	 * join the transaction.  We acheive this by measuring how long it takes
+	 * to commit a transaction, and compare it with how long this
+	 * transaction has been running, and if run time < commit time then we
+	 * sleep for the delta and commit.  This greatly helps super fast disks
+	 * that would see slowdowns as more threads started doing fsyncs.
+	 *
 	 * But don't do this if this process was the most recent one to
 	 * perform a synchronous write.  We do this to detect the case where a
 	 * single process is doing a stream of sync writes.  No point in waiting
@@ -1406,11 +1418,26 @@
 	 */
 	pid = current->pid;
 	if (handle->h_sync && journal->j_last_sync_writer != pid) {
+		u64 commit_time, trans_time;
+
 		journal->j_last_sync_writer = pid;
-		do {
-			old_handle_count = transaction->t_handle_count;
-			schedule_timeout_uninterruptible(1);
-		} while (old_handle_count != transaction->t_handle_count);
+
+		spin_lock(&journal->j_state_lock);
+		commit_time = journal->j_average_commit_time;
+		spin_unlock(&journal->j_state_lock);
+
+		trans_time = ktime_to_ns(ktime_sub(ktime_get(),
+						   transaction->t_start_time));
+
+		commit_time = min_t(u64, commit_time,
+				    1000*jiffies_to_usecs(1));
+
+		if (trans_time < commit_time) {
+			ktime_t expires = ktime_add_ns(ktime_get(),
+						       commit_time);
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
+		}
 	}
 
 	current->journal_info = NULL;
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 9497718..17159ca 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -249,16 +249,14 @@
 	return ret;
 }
 
-#define NR_BATCH	64
-
 static void
-__flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
+__flush_batch(journal_t *journal, int *batch_count)
 {
 	int i;
 
-	ll_rw_block(SWRITE, *batch_count, bhs);
+	ll_rw_block(SWRITE, *batch_count, journal->j_chkpt_bhs);
 	for (i = 0; i < *batch_count; i++) {
-		struct buffer_head *bh = bhs[i];
+		struct buffer_head *bh = journal->j_chkpt_bhs[i];
 		clear_buffer_jwrite(bh);
 		BUFFER_TRACE(bh, "brelse");
 		__brelse(bh);
@@ -277,8 +275,7 @@
  * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
  */
 static int __process_buffer(journal_t *journal, struct journal_head *jh,
-			struct buffer_head **bhs, int *batch_count,
-			transaction_t *transaction)
+			    int *batch_count, transaction_t *transaction)
 {
 	struct buffer_head *bh = jh2bh(jh);
 	int ret = 0;
@@ -325,14 +322,14 @@
 		get_bh(bh);
 		J_ASSERT_BH(bh, !buffer_jwrite(bh));
 		set_buffer_jwrite(bh);
-		bhs[*batch_count] = bh;
+		journal->j_chkpt_bhs[*batch_count] = bh;
 		__buffer_relink_io(jh);
 		jbd_unlock_bh_state(bh);
 		transaction->t_chp_stats.cs_written++;
 		(*batch_count)++;
-		if (*batch_count == NR_BATCH) {
+		if (*batch_count == JBD2_NR_BATCH) {
 			spin_unlock(&journal->j_list_lock);
-			__flush_batch(journal, bhs, batch_count);
+			__flush_batch(journal, batch_count);
 			ret = 1;
 		}
 	}
@@ -388,7 +385,6 @@
 	if (journal->j_checkpoint_transactions == transaction &&
 			transaction->t_tid == this_tid) {
 		int batch_count = 0;
-		struct buffer_head *bhs[NR_BATCH];
 		struct journal_head *jh;
 		int retry = 0, err;
 
@@ -402,7 +398,7 @@
 				retry = 1;
 				break;
 			}
-			retry = __process_buffer(journal, jh, bhs, &batch_count,
+			retry = __process_buffer(journal, jh, &batch_count,
 						 transaction);
 			if (retry < 0 && !result)
 				result = retry;
@@ -419,7 +415,7 @@
 				spin_unlock(&journal->j_list_lock);
 				retry = 1;
 			}
-			__flush_batch(journal, bhs, &batch_count);
+			__flush_batch(journal, &batch_count);
 		}
 
 		if (retry) {
@@ -686,6 +682,7 @@
 	   safely remove this transaction from the log */
 
 	__jbd2_journal_drop_transaction(journal, transaction);
+	kfree(transaction);
 
 	/* Just in case anybody was waiting for more transactions to be
            checkpointed... */
@@ -760,5 +757,4 @@
 	J_ASSERT(journal->j_running_transaction != transaction);
 
 	jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
-	kfree(transaction);
 }
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index c8a1bac..62804e5 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -25,6 +25,7 @@
 #include <linux/crc32.h>
 #include <linux/writeback.h>
 #include <linux/backing-dev.h>
+#include <linux/bio.h>
 
 /*
  * Default IO end handler for temporary BJ_IO buffer_heads.
@@ -137,7 +138,7 @@
 		set_buffer_ordered(bh);
 		barrier_done = 1;
 	}
-	ret = submit_bh(WRITE, bh);
+	ret = submit_bh(WRITE_SYNC, bh);
 	if (barrier_done)
 		clear_buffer_ordered(bh);
 
@@ -158,7 +159,7 @@
 		lock_buffer(bh);
 		set_buffer_uptodate(bh);
 		clear_buffer_dirty(bh);
-		ret = submit_bh(WRITE, bh);
+		ret = submit_bh(WRITE_SYNC, bh);
 	}
 	*cbh = bh;
 	return ret;
@@ -168,12 +169,34 @@
  * This function along with journal_submit_commit_record
  * allows to write the commit record asynchronously.
  */
-static int journal_wait_on_commit_record(struct buffer_head *bh)
+static int journal_wait_on_commit_record(journal_t *journal,
+					 struct buffer_head *bh)
 {
 	int ret = 0;
 
+retry:
 	clear_buffer_dirty(bh);
 	wait_on_buffer(bh);
+	if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) {
+		printk(KERN_WARNING
+		       "JBD2: wait_on_commit_record: sync failed on %s - "
+		       "disabling barriers\n", journal->j_devname);
+		spin_lock(&journal->j_state_lock);
+		journal->j_flags &= ~JBD2_BARRIER;
+		spin_unlock(&journal->j_state_lock);
+
+		lock_buffer(bh);
+		clear_buffer_dirty(bh);
+		set_buffer_uptodate(bh);
+		bh->b_end_io = journal_end_buffer_io_sync;
+
+		ret = submit_bh(WRITE_SYNC, bh);
+		if (ret) {
+			unlock_buffer(bh);
+			return ret;
+		}
+		goto retry;
+	}
 
 	if (unlikely(!buffer_uptodate(bh)))
 		ret = -EIO;
@@ -332,13 +355,15 @@
 	int flags;
 	int err;
 	unsigned long long blocknr;
+	ktime_t start_time;
+	u64 commit_time;
 	char *tagp = NULL;
 	journal_header_t *header;
 	journal_block_tag_t *tag = NULL;
 	int space_left = 0;
 	int first_tag = 0;
 	int tag_flag;
-	int i;
+	int i, to_free = 0;
 	int tag_bytes = journal_tag_bytes(journal);
 	struct buffer_head *cbh = NULL; /* For transactional checksums */
 	__u32 crc32_sum = ~0;
@@ -458,6 +483,7 @@
 	commit_transaction->t_state = T_FLUSH;
 	journal->j_committing_transaction = commit_transaction;
 	journal->j_running_transaction = NULL;
+	start_time = ktime_get();
 	commit_transaction->t_log_start = journal->j_head;
 	wake_up(&journal->j_wait_transaction_locked);
 	spin_unlock(&journal->j_state_lock);
@@ -803,7 +829,7 @@
 			__jbd2_journal_abort_hard(journal);
 	}
 	if (!err && !is_journal_aborted(journal))
-		err = journal_wait_on_commit_record(cbh);
+		err = journal_wait_on_commit_record(journal, cbh);
 
 	if (err)
 		jbd2_journal_abort(journal, err);
@@ -981,14 +1007,23 @@
 	J_ASSERT(commit_transaction == journal->j_committing_transaction);
 	journal->j_commit_sequence = commit_transaction->t_tid;
 	journal->j_committing_transaction = NULL;
-	spin_unlock(&journal->j_state_lock);
+	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
 
-	if (journal->j_commit_callback)
-		journal->j_commit_callback(journal, commit_transaction);
+	/*
+	 * weight the commit time higher than the average time so we don't
+	 * react too strongly to vast changes in the commit time
+	 */
+	if (likely(journal->j_average_commit_time))
+		journal->j_average_commit_time = (commit_time +
+				journal->j_average_commit_time*3) / 4;
+	else
+		journal->j_average_commit_time = commit_time;
+	spin_unlock(&journal->j_state_lock);
 
 	if (commit_transaction->t_checkpoint_list == NULL &&
 	    commit_transaction->t_checkpoint_io_list == NULL) {
 		__jbd2_journal_drop_transaction(journal, commit_transaction);
+		to_free = 1;
 	} else {
 		if (journal->j_checkpoint_transactions == NULL) {
 			journal->j_checkpoint_transactions = commit_transaction;
@@ -1007,11 +1042,16 @@
 	}
 	spin_unlock(&journal->j_list_lock);
 
+	if (journal->j_commit_callback)
+		journal->j_commit_callback(journal, commit_transaction);
+
 	trace_mark(jbd2_end_commit, "dev %s transaction %d head %d",
-		   journal->j_devname, journal->j_commit_sequence,
+		   journal->j_devname, commit_transaction->t_tid,
 		   journal->j_tail_sequence);
 	jbd_debug(1, "JBD: commit %d complete, head %d\n",
 		  journal->j_commit_sequence, journal->j_tail_sequence);
+	if (to_free)
+		kfree(commit_transaction);
 
 	wake_up(&journal->j_wait_done_commit);
 }
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index f6bff9d..5667530 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -40,6 +40,7 @@
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
+#include <asm/div64.h>
 
 EXPORT_SYMBOL(jbd2_journal_start);
 EXPORT_SYMBOL(jbd2_journal_restart);
@@ -66,7 +67,6 @@
 EXPORT_SYMBOL(jbd2_journal_check_used_features);
 EXPORT_SYMBOL(jbd2_journal_check_available_features);
 EXPORT_SYMBOL(jbd2_journal_set_features);
-EXPORT_SYMBOL(jbd2_journal_create);
 EXPORT_SYMBOL(jbd2_journal_load);
 EXPORT_SYMBOL(jbd2_journal_destroy);
 EXPORT_SYMBOL(jbd2_journal_abort);
@@ -132,8 +132,9 @@
 	journal->j_task = current;
 	wake_up(&journal->j_wait_done_commit);
 
-	printk(KERN_INFO "kjournald2 starting.  Commit interval %ld seconds\n",
-			journal->j_commit_interval / HZ);
+	printk(KERN_INFO "kjournald2 starting: pid %d, dev %s, "
+	       "commit interval %ld seconds\n", current->pid,
+	       journal->j_devname, journal->j_commit_interval / HZ);
 
 	/*
 	 * And now, wait forever for commit wakeup events.
@@ -650,6 +651,8 @@
 		return NULL;
 
 	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+	if (!bh)
+		return NULL;
 	lock_buffer(bh);
 	memset(bh->b_data, 0, journal->j_blocksize);
 	set_buffer_uptodate(bh);
@@ -843,6 +846,8 @@
 	    jiffies_to_msecs(s->stats->u.run.rs_flushing / s->stats->ts_tid));
 	seq_printf(seq, "  %ums logging transaction\n",
 	    jiffies_to_msecs(s->stats->u.run.rs_logging / s->stats->ts_tid));
+	seq_printf(seq, "  %luus average transaction commit time\n",
+		   do_div(s->journal->j_average_commit_time, 1000));
 	seq_printf(seq, "  %lu handles per transaction\n",
 	    s->stats->u.run.rs_handle_count / s->stats->ts_tid);
 	seq_printf(seq, "  %lu blocks per transaction\n",
@@ -980,6 +985,8 @@
 	spin_lock_init(&journal->j_state_lock);
 
 	journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE);
+	journal->j_min_batch_time = 0;
+	journal->j_max_batch_time = 15000; /* 15ms */
 
 	/* The journal is marked for error until we succeed with recovery! */
 	journal->j_flags = JBD2_ABORT;
@@ -1035,15 +1042,14 @@
 
 	/* journal descriptor can store up to n blocks -bzzz */
 	journal->j_blocksize = blocksize;
+	jbd2_stats_proc_init(journal);
 	n = journal->j_blocksize / sizeof(journal_block_tag_t);
 	journal->j_wbufsize = n;
 	journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
 	if (!journal->j_wbuf) {
 		printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
 			__func__);
-		kfree(journal);
-		journal = NULL;
-		goto out;
+		goto out_err;
 	}
 	journal->j_dev = bdev;
 	journal->j_fs_dev = fs_dev;
@@ -1053,14 +1059,22 @@
 	p = journal->j_devname;
 	while ((p = strchr(p, '/')))
 		*p = '!';
-	jbd2_stats_proc_init(journal);
 
 	bh = __getblk(journal->j_dev, start, journal->j_blocksize);
-	J_ASSERT(bh != NULL);
+	if (!bh) {
+		printk(KERN_ERR
+		       "%s: Cannot get buffer for journal superblock\n",
+		       __func__);
+		goto out_err;
+	}
 	journal->j_sb_buffer = bh;
 	journal->j_superblock = (journal_superblock_t *)bh->b_data;
-out:
+
 	return journal;
+out_err:
+	jbd2_stats_proc_exit(journal);
+	kfree(journal);
+	return NULL;
 }
 
 /**
@@ -1108,9 +1122,7 @@
 	if (!journal->j_wbuf) {
 		printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
 			__func__);
-		jbd2_stats_proc_exit(journal);
-		kfree(journal);
-		return NULL;
+		goto out_err;
 	}
 
 	err = jbd2_journal_bmap(journal, 0, &blocknr);
@@ -1118,17 +1130,24 @@
 	if (err) {
 		printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
 		       __func__);
-		jbd2_stats_proc_exit(journal);
-		kfree(journal);
-		return NULL;
+		goto out_err;
 	}
 
 	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-	J_ASSERT(bh != NULL);
+	if (!bh) {
+		printk(KERN_ERR
+		       "%s: Cannot get buffer for journal superblock\n",
+		       __func__);
+		goto out_err;
+	}
 	journal->j_sb_buffer = bh;
 	journal->j_superblock = (journal_superblock_t *)bh->b_data;
 
 	return journal;
+out_err:
+	jbd2_stats_proc_exit(journal);
+	kfree(journal);
+	return NULL;
 }
 
 /*
@@ -1177,77 +1196,6 @@
 }
 
 /**
- * int jbd2_journal_create() - Initialise the new journal file
- * @journal: Journal to create. This structure must have been initialised
- *
- * Given a journal_t structure which tells us which disk blocks we can
- * use, create a new journal superblock and initialise all of the
- * journal fields from scratch.
- **/
-int jbd2_journal_create(journal_t *journal)
-{
-	unsigned long long blocknr;
-	struct buffer_head *bh;
-	journal_superblock_t *sb;
-	int i, err;
-
-	if (journal->j_maxlen < JBD2_MIN_JOURNAL_BLOCKS) {
-		printk (KERN_ERR "Journal length (%d blocks) too short.\n",
-			journal->j_maxlen);
-		journal_fail_superblock(journal);
-		return -EINVAL;
-	}
-
-	if (journal->j_inode == NULL) {
-		/*
-		 * We don't know what block to start at!
-		 */
-		printk(KERN_EMERG
-		       "%s: creation of journal on external device!\n",
-		       __func__);
-		BUG();
-	}
-
-	/* Zero out the entire journal on disk.  We cannot afford to
-	   have any blocks on disk beginning with JBD2_MAGIC_NUMBER. */
-	jbd_debug(1, "JBD: Zeroing out journal blocks...\n");
-	for (i = 0; i < journal->j_maxlen; i++) {
-		err = jbd2_journal_bmap(journal, i, &blocknr);
-		if (err)
-			return err;
-		bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-		lock_buffer(bh);
-		memset (bh->b_data, 0, journal->j_blocksize);
-		BUFFER_TRACE(bh, "marking dirty");
-		mark_buffer_dirty(bh);
-		BUFFER_TRACE(bh, "marking uptodate");
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-		__brelse(bh);
-	}
-
-	sync_blockdev(journal->j_dev);
-	jbd_debug(1, "JBD: journal cleared.\n");
-
-	/* OK, fill in the initial static fields in the new superblock */
-	sb = journal->j_superblock;
-
-	sb->s_header.h_magic	 = cpu_to_be32(JBD2_MAGIC_NUMBER);
-	sb->s_header.h_blocktype = cpu_to_be32(JBD2_SUPERBLOCK_V2);
-
-	sb->s_blocksize	= cpu_to_be32(journal->j_blocksize);
-	sb->s_maxlen	= cpu_to_be32(journal->j_maxlen);
-	sb->s_first	= cpu_to_be32(1);
-
-	journal->j_transaction_sequence = 1;
-
-	journal->j_flags &= ~JBD2_ABORT;
-	journal->j_format_version = 2;
-
-	return journal_reset(journal);
-}
-
-/**
  * void jbd2_journal_update_superblock() - Update journal sb on disk.
  * @journal: The journal to update.
  * @wait: Set to '0' if you don't want to wait for IO completion.
@@ -1491,7 +1439,9 @@
 	spin_lock(&journal->j_list_lock);
 	while (journal->j_checkpoint_transactions != NULL) {
 		spin_unlock(&journal->j_list_lock);
+		mutex_lock(&journal->j_checkpoint_mutex);
 		jbd2_log_do_checkpoint(journal);
+		mutex_unlock(&journal->j_checkpoint_mutex);
 		spin_lock(&journal->j_list_lock);
 	}
 
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 4f925a4..46b4e34 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -25,6 +25,7 @@
 #include <linux/timer.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
+#include <linux/hrtimer.h>
 
 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
 
@@ -48,6 +49,7 @@
 {
 	transaction->t_journal = journal;
 	transaction->t_state = T_RUNNING;
+	transaction->t_start_time = ktime_get();
 	transaction->t_tid = journal->j_transaction_sequence++;
 	transaction->t_expires = jiffies + journal->j_commit_interval;
 	spin_lock_init(&transaction->t_handle_lock);
@@ -1240,7 +1242,7 @@
 {
 	transaction_t *transaction = handle->h_transaction;
 	journal_t *journal = transaction->t_journal;
-	int old_handle_count, err;
+	int err;
 	pid_t pid;
 
 	J_ASSERT(journal_current_handle() == handle);
@@ -1263,24 +1265,54 @@
 	/*
 	 * Implement synchronous transaction batching.  If the handle
 	 * was synchronous, don't force a commit immediately.  Let's
-	 * yield and let another thread piggyback onto this transaction.
-	 * Keep doing that while new threads continue to arrive.
-	 * It doesn't cost much - we're about to run a commit and sleep
-	 * on IO anyway.  Speeds up many-threaded, many-dir operations
-	 * by 30x or more...
+	 * yield and let another thread piggyback onto this
+	 * transaction.  Keep doing that while new threads continue to
+	 * arrive.  It doesn't cost much - we're about to run a commit
+	 * and sleep on IO anyway.  Speeds up many-threaded, many-dir
+	 * operations by 30x or more...
 	 *
-	 * But don't do this if this process was the most recent one to
-	 * perform a synchronous write.  We do this to detect the case where a
-	 * single process is doing a stream of sync writes.  No point in waiting
-	 * for joiners in that case.
+	 * We try and optimize the sleep time against what the
+	 * underlying disk can do, instead of having a static sleep
+	 * time.  This is useful for the case where our storage is so
+	 * fast that it is more optimal to go ahead and force a flush
+	 * and wait for the transaction to be committed than it is to
+	 * wait for an arbitrary amount of time for new writers to
+	 * join the transaction.  We achieve this by measuring how
+	 * long it takes to commit a transaction, and compare it with
+	 * how long this transaction has been running, and if run time
+	 * < commit time then we sleep for the delta and commit.  This
+	 * greatly helps super fast disks that would see slowdowns as
+	 * more threads started doing fsyncs.
+	 *
+	 * But don't do this if this process was the most recent one
+	 * to perform a synchronous write.  We do this to detect the
+	 * case where a single process is doing a stream of sync
+	 * writes.  No point in waiting for joiners in that case.
 	 */
 	pid = current->pid;
 	if (handle->h_sync && journal->j_last_sync_writer != pid) {
+		u64 commit_time, trans_time;
+
 		journal->j_last_sync_writer = pid;
-		do {
-			old_handle_count = transaction->t_handle_count;
-			schedule_timeout_uninterruptible(1);
-		} while (old_handle_count != transaction->t_handle_count);
+
+		spin_lock(&journal->j_state_lock);
+		commit_time = journal->j_average_commit_time;
+		spin_unlock(&journal->j_state_lock);
+
+		trans_time = ktime_to_ns(ktime_sub(ktime_get(),
+						   transaction->t_start_time));
+
+		commit_time = max_t(u64, commit_time,
+				    1000*journal->j_min_batch_time);
+		commit_time = min_t(u64, commit_time,
+				    1000*journal->j_max_batch_time);
+
+		if (trans_time < commit_time) {
+			ktime_t expires = ktime_add_ns(ktime_get(),
+						       commit_time);
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
+		}
 	}
 
 	current->journal_info = NULL;
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
index c73fa89..170d289 100644
--- a/fs/jffs2/compr_rubin.c
+++ b/fs/jffs2/compr_rubin.c
@@ -22,9 +22,7 @@
 
 
 #define BIT_DIVIDER_MIPS 1043
-static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */
-
-#include <linux/errno.h>
+static int bits_mips[8] = { 277, 249, 290, 267, 229, 341, 212, 241};
 
 struct pushpull {
 	unsigned char *buf;
@@ -43,7 +41,9 @@
 	int bits[8];
 };
 
-static inline void init_pushpull(struct pushpull *pp, char *buf, unsigned buflen, unsigned ofs, unsigned reserve)
+static inline void init_pushpull(struct pushpull *pp, char *buf,
+				 unsigned buflen, unsigned ofs,
+				 unsigned reserve)
 {
 	pp->buf = buf;
 	pp->buflen = buflen;
@@ -53,16 +53,14 @@
 
 static inline int pushbit(struct pushpull *pp, int bit, int use_reserved)
 {
-	if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) {
+	if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve))
 		return -ENOSPC;
-	}
 
-	if (bit) {
-		pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs &7)));
-	}
-	else {
-		pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs &7)));
-	}
+	if (bit)
+		pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs & 7)));
+	else
+		pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs & 7)));
+
 	pp->ofs++;
 
 	return 0;
@@ -97,6 +95,7 @@
 	rs->p = (long) (2 * UPPER_BIT_RUBIN);
 	rs->bit_number = (long) 0;
 	rs->bit_divider = div;
+
 	for (c=0; c<8; c++)
 		rs->bits[c] = bits[c];
 }
@@ -108,7 +107,8 @@
 	long i0, i1;
 	int ret;
 
-	while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) {
+	while ((rs->q >= UPPER_BIT_RUBIN) ||
+	       ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) {
 		rs->bit_number++;
 
 		ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0);
@@ -119,12 +119,12 @@
 		rs->p <<= 1;
 	}
 	i0 = A * rs->p / (A + B);
-	if (i0 <= 0) {
+	if (i0 <= 0)
 		i0 = 1;
-	}
-	if (i0 >= rs->p) {
+
+	if (i0 >= rs->p)
 		i0 = rs->p - 1;
-	}
+
 	i1 = rs->p - i0;
 
 	if (symbol == 0)
@@ -157,11 +157,13 @@
 	/* behalve lower */
 	rs->rec_q = 0;
 
-	for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE; rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp)))
+	for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE;
+	     rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp)))
 		;
 }
 
-static void __do_decode(struct rubin_state *rs, unsigned long p, unsigned long q)
+static void __do_decode(struct rubin_state *rs, unsigned long p,
+			unsigned long q)
 {
 	register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN;
 	unsigned long rec_q;
@@ -207,12 +209,11 @@
 		__do_decode(rs, p, q);
 
 	i0 = A * rs->p / (A + B);
-	if (i0 <= 0) {
+	if (i0 <= 0)
 		i0 = 1;
-	}
-	if (i0 >= rs->p) {
+
+	if (i0 >= rs->p)
 		i0 = rs->p - 1;
-	}
 
 	threshold = rs->q + i0;
 	symbol = rs->rec_q >= threshold;
@@ -234,14 +235,15 @@
 	struct rubin_state rs_copy;
 	rs_copy = *rs;
 
-	for (i=0;i<8;i++) {
-		ret = encode(rs, rs->bit_divider-rs->bits[i],rs->bits[i],byte&1);
+	for (i=0; i<8; i++) {
+		ret = encode(rs, rs->bit_divider-rs->bits[i],
+			     rs->bits[i], byte & 1);
 		if (ret) {
 			/* Failed. Restore old state */
 			*rs = rs_copy;
 			return ret;
 		}
-		byte=byte>>1;
+		byte >>= 1 ;
 	}
 	return 0;
 }
@@ -251,7 +253,8 @@
 	int i, result = 0, bit_divider = rs->bit_divider;
 
 	for (i = 0; i < 8; i++)
-		result |= decode(rs, bit_divider - rs->bits[i], rs->bits[i]) << i;
+		result |= decode(rs, bit_divider - rs->bits[i],
+				 rs->bits[i]) << i;
 
 	return result;
 }
@@ -259,7 +262,8 @@
 
 
 static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
-		      unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen)
+			     unsigned char *cpage_out, uint32_t *sourcelen,
+			     uint32_t *dstlen)
 	{
 	int outpos = 0;
 	int pos=0;
@@ -295,7 +299,8 @@
 int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out,
 		   uint32_t *sourcelen, uint32_t *dstlen, void *model)
 {
-	return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen);
+	return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in,
+				 cpage_out, sourcelen, dstlen);
 }
 #endif
 static int jffs2_dynrubin_compress(unsigned char *data_in,
@@ -316,9 +321,8 @@
 		return -1;
 
 	memset(histo, 0, 256);
-	for (i=0; i<mysrclen; i++) {
+	for (i=0; i<mysrclen; i++)
 		histo[data_in[i]]++;
-	}
 	memset(bits, 0, sizeof(int)*8);
 	for (i=0; i<256; i++) {
 		if (i&128)
@@ -346,7 +350,8 @@
 		cpage_out[i] = bits[i];
 	}
 
-	ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen);
+	ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen,
+				&mydstlen);
 	if (ret)
 		return ret;
 
@@ -363,8 +368,10 @@
 	return 0;
 }
 
-static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in,
-			 unsigned char *page_out, uint32_t srclen, uint32_t destlen)
+static void rubin_do_decompress(int bit_divider, int *bits,
+				unsigned char *cdata_in, 
+				unsigned char *page_out, uint32_t srclen,
+				uint32_t destlen)
 {
 	int outpos = 0;
 	struct rubin_state rs;
@@ -372,9 +379,8 @@
 	init_pushpull(&rs.pp, cdata_in, srclen, 0, 0);
 	init_decode(&rs, bit_divider, bits);
 
-	while (outpos < destlen) {
+	while (outpos < destlen)
 		page_out[outpos++] = in_byte(&rs);
-	}
 }
 
 
@@ -383,7 +389,8 @@
 				      uint32_t sourcelen, uint32_t dstlen,
 				      void *model)
 {
-	rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen);
+	rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in,
+			    cpage_out, sourcelen, dstlen);
 	return 0;
 }
 
@@ -398,52 +405,53 @@
 	for (c=0; c<8; c++)
 		bits[c] = data_in[c];
 
-	rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, dstlen);
+	rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8,
+			    dstlen);
 	return 0;
 }
 
 static struct jffs2_compressor jffs2_rubinmips_comp = {
-    .priority = JFFS2_RUBINMIPS_PRIORITY,
-    .name = "rubinmips",
-    .compr = JFFS2_COMPR_DYNRUBIN,
-    .compress = NULL, /*&jffs2_rubinmips_compress,*/
-    .decompress = &jffs2_rubinmips_decompress,
+	.priority = JFFS2_RUBINMIPS_PRIORITY,
+	.name = "rubinmips",
+	.compr = JFFS2_COMPR_DYNRUBIN,
+	.compress = NULL, /*&jffs2_rubinmips_compress,*/
+	.decompress = &jffs2_rubinmips_decompress,
 #ifdef JFFS2_RUBINMIPS_DISABLED
-    .disabled = 1,
+	.disabled = 1,
 #else
-    .disabled = 0,
+	.disabled = 0,
 #endif
 };
 
 int jffs2_rubinmips_init(void)
 {
-    return jffs2_register_compressor(&jffs2_rubinmips_comp);
+	return jffs2_register_compressor(&jffs2_rubinmips_comp);
 }
 
 void jffs2_rubinmips_exit(void)
 {
-    jffs2_unregister_compressor(&jffs2_rubinmips_comp);
+	jffs2_unregister_compressor(&jffs2_rubinmips_comp);
 }
 
 static struct jffs2_compressor jffs2_dynrubin_comp = {
-    .priority = JFFS2_DYNRUBIN_PRIORITY,
-    .name = "dynrubin",
-    .compr = JFFS2_COMPR_RUBINMIPS,
-    .compress = jffs2_dynrubin_compress,
-    .decompress = &jffs2_dynrubin_decompress,
+	.priority = JFFS2_DYNRUBIN_PRIORITY,
+	.name = "dynrubin",
+	.compr = JFFS2_COMPR_RUBINMIPS,
+	.compress = jffs2_dynrubin_compress,
+	.decompress = &jffs2_dynrubin_decompress,
 #ifdef JFFS2_DYNRUBIN_DISABLED
-    .disabled = 1,
+	.disabled = 1,
 #else
-    .disabled = 0,
+	.disabled = 0,
 #endif
 };
 
 int jffs2_dynrubin_init(void)
 {
-    return jffs2_register_compressor(&jffs2_dynrubin_comp);
+	return jffs2_register_compressor(&jffs2_dynrubin_comp);
 }
 
 void jffs2_dynrubin_exit(void)
 {
-    jffs2_unregister_compressor(&jffs2_dynrubin_comp);
+	jffs2_unregister_compressor(&jffs2_dynrubin_comp);
 }
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 259461b..c32b4a1 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -175,7 +175,7 @@
 {
 	/* For NAND, if the failure did not occur at the device level for a
 	   specific physical page, don't bother updating the bad block table. */
-	if (jffs2_cleanmarker_oob(c) && (bad_offset != MTD_FAIL_ADDR_UNKNOWN)) {
+	if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) {
 		/* We had a device-level failure to erase.  Let's see if we've
 		   failed too many times. */
 		if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
@@ -209,7 +209,8 @@
 	struct erase_priv_struct *priv = (void *)instr->priv;
 
 	if(instr->state != MTD_ERASE_DONE) {
-		printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state);
+		printk(KERN_WARNING "Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n",
+			(unsigned long long)instr->addr, instr->state);
 		jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
 	} else {
 		jffs2_erase_succeeded(priv->c, priv->jeb);
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index 1750445..507ed6e 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -366,9 +366,6 @@
 void jffs2_free_raw_node_refs(struct jffs2_sb_info *c);
 struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset);
 void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete);
-struct rb_node *rb_next(struct rb_node *);
-struct rb_node *rb_prev(struct rb_node *);
-void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root);
 int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
 uint32_t jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size);
 struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c,
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 0dae345..b37d1f7 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -543,7 +543,7 @@
 	return ret;
 }
 
-static void jfs_write_super_lockfs(struct super_block *sb)
+static int jfs_freeze(struct super_block *sb)
 {
 	struct jfs_sb_info *sbi = JFS_SBI(sb);
 	struct jfs_log *log = sbi->log;
@@ -553,9 +553,10 @@
 		lmLogShutdown(log);
 		updateSuper(sb, FM_CLEAN);
 	}
+	return 0;
 }
 
-static void jfs_unlockfs(struct super_block *sb)
+static int jfs_unfreeze(struct super_block *sb)
 {
 	struct jfs_sb_info *sbi = JFS_SBI(sb);
 	struct jfs_log *log = sbi->log;
@@ -568,6 +569,7 @@
 		else
 			txResume(sb);
 	}
+	return 0;
 }
 
 static int jfs_get_sb(struct file_system_type *fs_type,
@@ -735,8 +737,8 @@
 	.delete_inode	= jfs_delete_inode,
 	.put_super	= jfs_put_super,
 	.sync_fs	= jfs_sync_fs,
-	.write_super_lockfs = jfs_write_super_lockfs,
-	.unlockfs       = jfs_unlockfs,
+	.freeze_fs	= jfs_freeze,
+	.unfreeze_fs	= jfs_unfreeze,
 	.statfs		= jfs_statfs,
 	.remount_fs	= jfs_remount,
 	.show_options	= jfs_show_options,
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 31668b6..dd79570 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -16,7 +16,6 @@
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/lockd/lockd.h>
-#include <linux/lockd/sm_inter.h>
 
 #define NLMDBG_FACILITY		NLMDBG_CLIENT
 #define NLMCLNT_GRACE_WAIT	(5*HZ)
@@ -518,11 +517,9 @@
 	unsigned char fl_type;
 	int status = -ENOLCK;
 
-	if (nsm_monitor(host) < 0) {
-		printk(KERN_NOTICE "lockd: failed to monitor %s\n",
-					host->h_name);
+	if (nsm_monitor(host) < 0)
 		goto out;
-	}
+
 	fl->fl_flags |= FL_ACCESS;
 	status = do_vfs_lock(fl);
 	fl->fl_flags = fl_flags;
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index abdebf7..99d737b 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -15,7 +15,6 @@
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/lockd/lockd.h>
-#include <linux/lockd/sm_inter.h>
 #include <linux/mutex.h>
 
 #include <net/ipv6.h>
@@ -32,11 +31,6 @@
 static DEFINE_MUTEX(nlm_host_mutex);
 
 static void			nlm_gc_hosts(void);
-static struct nsm_handle	*nsm_find(const struct sockaddr *sap,
-						const size_t salen,
-						const char *hostname,
-						const size_t hostname_len,
-						const int create);
 
 struct nlm_lookup_host_info {
 	const int		server;		/* search for server|client */
@@ -105,32 +99,6 @@
 	}
 }
 
-static void nlm_display_address(const struct sockaddr *sap,
-				char *buf, const size_t len)
-{
-	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
-	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
-
-	switch (sap->sa_family) {
-	case AF_UNSPEC:
-		snprintf(buf, len, "unspecified");
-		break;
-	case AF_INET:
-		snprintf(buf, len, "%pI4", &sin->sin_addr.s_addr);
-		break;
-	case AF_INET6:
-		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
-			snprintf(buf, len, "%pI4",
-				 &sin6->sin6_addr.s6_addr32[3]);
-		else
-			snprintf(buf, len, "%pI6", &sin6->sin6_addr);
-		break;
-	default:
-		snprintf(buf, len, "unsupported address family");
-		break;
-	}
-}
-
 /*
  * Common host lookup routine for server & client
  */
@@ -190,8 +158,8 @@
 		atomic_inc(&nsm->sm_count);
 	else {
 		host = NULL;
-		nsm = nsm_find(ni->sap, ni->salen,
-				ni->hostname, ni->hostname_len, 1);
+		nsm = nsm_get_handle(ni->sap, ni->salen,
+					ni->hostname, ni->hostname_len);
 		if (!nsm) {
 			dprintk("lockd: nlm_lookup_host failed; "
 				"no nsm handle\n");
@@ -206,6 +174,7 @@
 		goto out;
 	}
 	host->h_name	   = nsm->sm_name;
+	host->h_addrbuf    = nsm->sm_addrbuf;
 	memcpy(nlm_addr(host), ni->sap, ni->salen);
 	host->h_addrlen = ni->salen;
 	nlm_clear_port(nlm_addr(host));
@@ -232,11 +201,6 @@
 
 	nrhosts++;
 
-	nlm_display_address((struct sockaddr *)&host->h_addr,
-				host->h_addrbuf, sizeof(host->h_addrbuf));
-	nlm_display_address((struct sockaddr *)&host->h_srcaddr,
-				host->h_srcaddrbuf, sizeof(host->h_srcaddrbuf));
-
 	dprintk("lockd: nlm_lookup_host created host %s\n",
 			host->h_name);
 
@@ -256,10 +220,8 @@
 	BUG_ON(!list_empty(&host->h_lockowners));
 	BUG_ON(atomic_read(&host->h_count));
 
-	/*
-	 * Release NSM handle and unmonitor host.
-	 */
 	nsm_unmonitor(host);
+	nsm_release(host->h_nsmhandle);
 
 	clnt = host->h_rpcclnt;
 	if (clnt != NULL)
@@ -378,8 +340,8 @@
 {
 	struct rpc_clnt	*clnt;
 
-	dprintk("lockd: nlm_bind_host %s (%s), my addr=%s\n",
-			host->h_name, host->h_addrbuf, host->h_srcaddrbuf);
+	dprintk("lockd: nlm_bind_host %s (%s)\n",
+			host->h_name, host->h_addrbuf);
 
 	/* Lock host handle */
 	mutex_lock(&host->h_mutex);
@@ -481,35 +443,23 @@
 	}
 }
 
-/*
- * We were notified that the host indicated by address &sin
- * has rebooted.
- * Release all resources held by that peer.
+/**
+ * nlm_host_rebooted - Release all resources held by rebooted host
+ * @info: pointer to decoded results of NLM_SM_NOTIFY call
+ *
+ * We were notified that the specified host has rebooted.  Release
+ * all resources held by that peer.
  */
-void nlm_host_rebooted(const struct sockaddr_in *sin,
-				const char *hostname,
-				unsigned int hostname_len,
-				u32 new_state)
+void nlm_host_rebooted(const struct nlm_reboot *info)
 {
 	struct hlist_head *chain;
 	struct hlist_node *pos;
 	struct nsm_handle *nsm;
 	struct nlm_host	*host;
 
-	nsm = nsm_find((struct sockaddr *)sin, sizeof(*sin),
-			hostname, hostname_len, 0);
-	if (nsm == NULL) {
-		dprintk("lockd: never saw rebooted peer '%.*s' before\n",
-				hostname_len, hostname);
+	nsm = nsm_reboot_lookup(info);
+	if (unlikely(nsm == NULL))
 		return;
-	}
-
-	dprintk("lockd: nlm_host_rebooted(%.*s, %s)\n",
-			hostname_len, hostname, nsm->sm_addrbuf);
-
-	/* When reclaiming locks on this peer, make sure that
-	 * we set up a new notification */
-	nsm->sm_monitored = 0;
 
 	/* Mark all hosts tied to this NSM state as having rebooted.
 	 * We run the loop repeatedly, because we drop the host table
@@ -520,8 +470,8 @@
 	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
 		hlist_for_each_entry(host, pos, chain, h_hash) {
 			if (host->h_nsmhandle == nsm
-			 && host->h_nsmstate != new_state) {
-				host->h_nsmstate = new_state;
+			 && host->h_nsmstate != info->state) {
+				host->h_nsmstate = info->state;
 				host->h_state++;
 
 				nlm_get_host(host);
@@ -629,89 +579,3 @@
 
 	next_gc = jiffies + NLM_HOST_COLLECT;
 }
-
-
-/*
- * Manage NSM handles
- */
-static LIST_HEAD(nsm_handles);
-static DEFINE_SPINLOCK(nsm_lock);
-
-static struct nsm_handle *nsm_find(const struct sockaddr *sap,
-				   const size_t salen,
-				   const char *hostname,
-				   const size_t hostname_len,
-				   const int create)
-{
-	struct nsm_handle *nsm = NULL;
-	struct nsm_handle *pos;
-
-	if (!sap)
-		return NULL;
-
-	if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
-		if (printk_ratelimit()) {
-			printk(KERN_WARNING "Invalid hostname \"%.*s\" "
-					    "in NFS lock request\n",
-				(int)hostname_len, hostname);
-		}
-		return NULL;
-	}
-
-retry:
-	spin_lock(&nsm_lock);
-	list_for_each_entry(pos, &nsm_handles, sm_link) {
-
-		if (hostname && nsm_use_hostnames) {
-			if (strlen(pos->sm_name) != hostname_len
-			 || memcmp(pos->sm_name, hostname, hostname_len))
-				continue;
-		} else if (!nlm_cmp_addr(nsm_addr(pos), sap))
-			continue;
-		atomic_inc(&pos->sm_count);
-		kfree(nsm);
-		nsm = pos;
-		goto found;
-	}
-	if (nsm) {
-		list_add(&nsm->sm_link, &nsm_handles);
-		goto found;
-	}
-	spin_unlock(&nsm_lock);
-
-	if (!create)
-		return NULL;
-
-	nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL);
-	if (nsm == NULL)
-		return NULL;
-
-	memcpy(nsm_addr(nsm), sap, salen);
-	nsm->sm_addrlen = salen;
-	nsm->sm_name = (char *) (nsm + 1);
-	memcpy(nsm->sm_name, hostname, hostname_len);
-	nsm->sm_name[hostname_len] = '\0';
-	nlm_display_address((struct sockaddr *)&nsm->sm_addr,
-				nsm->sm_addrbuf, sizeof(nsm->sm_addrbuf));
-	atomic_set(&nsm->sm_count, 1);
-	goto retry;
-
-found:
-	spin_unlock(&nsm_lock);
-	return nsm;
-}
-
-/*
- * Release an NSM handle
- */
-void
-nsm_release(struct nsm_handle *nsm)
-{
-	if (!nsm)
-		return;
-	if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
-		list_del(&nsm->sm_link);
-		spin_unlock(&nsm_lock);
-		kfree(nsm);
-	}
-}
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index ffd3461..5e2c4d5 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -9,35 +9,123 @@
 #include <linux/types.h>
 #include <linux/utsname.h>
 #include <linux/kernel.h>
+#include <linux/ktime.h>
+
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/xprtsock.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/lockd/lockd.h>
-#include <linux/lockd/sm_inter.h>
-
 
 #define NLMDBG_FACILITY		NLMDBG_MONITOR
+#define NSM_PROGRAM		100024
+#define NSM_VERSION		1
 
-#define XDR_ADDRBUF_LEN		(20)
+enum {
+	NSMPROC_NULL,
+	NSMPROC_STAT,
+	NSMPROC_MON,
+	NSMPROC_UNMON,
+	NSMPROC_UNMON_ALL,
+	NSMPROC_SIMU_CRASH,
+	NSMPROC_NOTIFY,
+};
 
-static struct rpc_clnt *	nsm_create(void);
+struct nsm_args {
+	struct nsm_private	*priv;
+	u32			prog;		/* RPC callback info */
+	u32			vers;
+	u32			proc;
+
+	char			*mon_name;
+};
+
+struct nsm_res {
+	u32			status;
+	u32			state;
+};
 
 static struct rpc_program	nsm_program;
+static				LIST_HEAD(nsm_handles);
+static				DEFINE_SPINLOCK(nsm_lock);
 
 /*
  * Local NSM state
  */
-int				nsm_local_state;
+int	__read_mostly		nsm_local_state;
+int	__read_mostly		nsm_use_hostnames;
 
-/*
- * Common procedure for SM_MON/SM_UNMON calls
- */
-static int
-nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
+static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
+{
+	return (struct sockaddr *)&nsm->sm_addr;
+}
+
+static void nsm_display_ipv4_address(const struct sockaddr *sap, char *buf,
+				     const size_t len)
+{
+	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
+	snprintf(buf, len, "%pI4", &sin->sin_addr.s_addr);
+}
+
+static void nsm_display_ipv6_address(const struct sockaddr *sap, char *buf,
+				     const size_t len)
+{
+	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
+
+	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
+		snprintf(buf, len, "%pI4", &sin6->sin6_addr.s6_addr32[3]);
+	else if (sin6->sin6_scope_id != 0)
+		snprintf(buf, len, "%pI6%%%u", &sin6->sin6_addr,
+				sin6->sin6_scope_id);
+	else
+		snprintf(buf, len, "%pI6", &sin6->sin6_addr);
+}
+
+static void nsm_display_address(const struct sockaddr *sap,
+				char *buf, const size_t len)
+{
+	switch (sap->sa_family) {
+	case AF_INET:
+		nsm_display_ipv4_address(sap, buf, len);
+		break;
+	case AF_INET6:
+		nsm_display_ipv6_address(sap, buf, len);
+		break;
+	default:
+		snprintf(buf, len, "unsupported address family");
+		break;
+	}
+}
+
+static struct rpc_clnt *nsm_create(void)
+{
+	struct sockaddr_in sin = {
+		.sin_family		= AF_INET,
+		.sin_addr.s_addr	= htonl(INADDR_LOOPBACK),
+	};
+	struct rpc_create_args args = {
+		.protocol		= XPRT_TRANSPORT_UDP,
+		.address		= (struct sockaddr *)&sin,
+		.addrsize		= sizeof(sin),
+		.servername		= "rpc.statd",
+		.program		= &nsm_program,
+		.version		= NSM_VERSION,
+		.authflavor		= RPC_AUTH_NULL,
+	};
+
+	return rpc_create(&args);
+}
+
+static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
 {
 	struct rpc_clnt	*clnt;
 	int		status;
-	struct nsm_args	args;
+	struct nsm_args args = {
+		.priv		= &nsm->sm_priv,
+		.prog		= NLM_PROGRAM,
+		.vers		= 3,
+		.proc		= NLMPROC_NSM_NOTIFY,
+		.mon_name	= nsm->sm_mon_name,
+	};
 	struct rpc_message msg = {
 		.rpc_argp	= &args,
 		.rpc_resp	= res,
@@ -46,22 +134,18 @@
 	clnt = nsm_create();
 	if (IS_ERR(clnt)) {
 		status = PTR_ERR(clnt);
+		dprintk("lockd: failed to create NSM upcall transport, "
+				"status=%d\n", status);
 		goto out;
 	}
 
-	memset(&args, 0, sizeof(args));
-	args.mon_name = nsm->sm_name;
-	args.addr = nsm_addr_in(nsm)->sin_addr.s_addr;
-	args.prog = NLM_PROGRAM;
-	args.vers = 3;
-	args.proc = NLMPROC_NSM_NOTIFY;
 	memset(res, 0, sizeof(*res));
 
 	msg.rpc_proc = &clnt->cl_procinfo[proc];
 	status = rpc_call_sync(clnt, &msg, 0);
 	if (status < 0)
-		printk(KERN_DEBUG "nsm_mon_unmon: rpc failed, status=%d\n",
-			status);
+		dprintk("lockd: NSM upcall RPC failed, status=%d\n",
+				status);
 	else
 		status = 0;
 	rpc_shutdown_client(clnt);
@@ -69,82 +153,272 @@
 	return status;
 }
 
-/*
- * Set up monitoring of a remote host
+/**
+ * nsm_monitor - Notify a peer in case we reboot
+ * @host: pointer to nlm_host of peer to notify
+ *
+ * If this peer is not already monitored, this function sends an
+ * upcall to the local rpc.statd to record the name/address of
+ * the peer to notify in case we reboot.
+ *
+ * Returns zero if the peer is monitored by the local rpc.statd;
+ * otherwise a negative errno value is returned.
  */
-int
-nsm_monitor(struct nlm_host *host)
+int nsm_monitor(const struct nlm_host *host)
 {
 	struct nsm_handle *nsm = host->h_nsmhandle;
 	struct nsm_res	res;
 	int		status;
 
-	dprintk("lockd: nsm_monitor(%s)\n", host->h_name);
-	BUG_ON(nsm == NULL);
+	dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name);
 
 	if (nsm->sm_monitored)
 		return 0;
 
-	status = nsm_mon_unmon(nsm, SM_MON, &res);
+	/*
+	 * Choose whether to record the caller_name or IP address of
+	 * this peer in the local rpc.statd's database.
+	 */
+	nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
 
-	if (status < 0 || res.status != 0)
-		printk(KERN_NOTICE "lockd: cannot monitor %s\n", host->h_name);
+	status = nsm_mon_unmon(nsm, NSMPROC_MON, &res);
+	if (res.status != 0)
+		status = -EIO;
+	if (status < 0)
+		printk(KERN_NOTICE "lockd: cannot monitor %s\n", nsm->sm_name);
 	else
 		nsm->sm_monitored = 1;
 	return status;
 }
 
-/*
- * Cease to monitor remote host
+/**
+ * nsm_unmonitor - Unregister peer notification
+ * @host: pointer to nlm_host of peer to stop monitoring
+ *
+ * If this peer is monitored, this function sends an upcall to
+ * tell the local rpc.statd not to send this peer a notification
+ * when we reboot.
  */
-int
-nsm_unmonitor(struct nlm_host *host)
+void nsm_unmonitor(const struct nlm_host *host)
 {
 	struct nsm_handle *nsm = host->h_nsmhandle;
 	struct nsm_res	res;
-	int		status = 0;
-
-	if (nsm == NULL)
-		return 0;
-	host->h_nsmhandle = NULL;
+	int status;
 
 	if (atomic_read(&nsm->sm_count) == 1
 	 && nsm->sm_monitored && !nsm->sm_sticky) {
-		dprintk("lockd: nsm_unmonitor(%s)\n", host->h_name);
+		dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name);
 
-		status = nsm_mon_unmon(nsm, SM_UNMON, &res);
+		status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res);
+		if (res.status != 0)
+			status = -EIO;
 		if (status < 0)
 			printk(KERN_NOTICE "lockd: cannot unmonitor %s\n",
-					host->h_name);
+					nsm->sm_name);
 		else
 			nsm->sm_monitored = 0;
 	}
-	nsm_release(nsm);
-	return status;
+}
+
+static struct nsm_handle *nsm_lookup_hostname(const char *hostname,
+					      const size_t len)
+{
+	struct nsm_handle *nsm;
+
+	list_for_each_entry(nsm, &nsm_handles, sm_link)
+		if (strlen(nsm->sm_name) == len &&
+		    memcmp(nsm->sm_name, hostname, len) == 0)
+			return nsm;
+	return NULL;
+}
+
+static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap)
+{
+	struct nsm_handle *nsm;
+
+	list_for_each_entry(nsm, &nsm_handles, sm_link)
+		if (nlm_cmp_addr(nsm_addr(nsm), sap))
+			return nsm;
+	return NULL;
+}
+
+static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv)
+{
+	struct nsm_handle *nsm;
+
+	list_for_each_entry(nsm, &nsm_handles, sm_link)
+		if (memcmp(nsm->sm_priv.data, priv->data,
+					sizeof(priv->data)) == 0)
+			return nsm;
+	return NULL;
 }
 
 /*
- * Create NSM client for the local host
+ * Construct a unique cookie to match this nsm_handle to this monitored
+ * host.  It is passed to the local rpc.statd via NSMPROC_MON, and
+ * returned via NLMPROC_SM_NOTIFY, in the "priv" field of these
+ * requests.
+ *
+ * The NSM protocol requires that these cookies be unique while the
+ * system is running.  We prefer a stronger requirement of making them
+ * unique across reboots.  If user space bugs cause a stale cookie to
+ * be sent to the kernel, it could cause the wrong host to lose its
+ * lock state if cookies were not unique across reboots.
+ *
+ * The cookies are exposed only to local user space via loopback.  They
+ * do not appear on the physical network.  If we want greater security
+ * for some reason, nsm_init_private() could perform a one-way hash to
+ * obscure the contents of the cookie.
  */
-static struct rpc_clnt *
-nsm_create(void)
+static void nsm_init_private(struct nsm_handle *nsm)
 {
-	struct sockaddr_in	sin = {
-		.sin_family	= AF_INET,
-		.sin_addr.s_addr = htonl(INADDR_LOOPBACK),
-		.sin_port	= 0,
-	};
-	struct rpc_create_args args = {
-		.protocol	= XPRT_TRANSPORT_UDP,
-		.address	= (struct sockaddr *)&sin,
-		.addrsize	= sizeof(sin),
-		.servername	= "localhost",
-		.program	= &nsm_program,
-		.version	= SM_VERSION,
-		.authflavor	= RPC_AUTH_NULL,
-	};
+	u64 *p = (u64 *)&nsm->sm_priv.data;
+	struct timespec ts;
 
-	return rpc_create(&args);
+	ktime_get_ts(&ts);
+	*p++ = timespec_to_ns(&ts);
+	*p = (unsigned long)nsm;
+}
+
+static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
+					    const size_t salen,
+					    const char *hostname,
+					    const size_t hostname_len)
+{
+	struct nsm_handle *new;
+
+	new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL);
+	if (unlikely(new == NULL))
+		return NULL;
+
+	atomic_set(&new->sm_count, 1);
+	new->sm_name = (char *)(new + 1);
+	memcpy(nsm_addr(new), sap, salen);
+	new->sm_addrlen = salen;
+	nsm_init_private(new);
+	nsm_display_address((const struct sockaddr *)&new->sm_addr,
+				new->sm_addrbuf, sizeof(new->sm_addrbuf));
+	memcpy(new->sm_name, hostname, hostname_len);
+	new->sm_name[hostname_len] = '\0';
+
+	return new;
+}
+
+/**
+ * nsm_get_handle - Find or create a cached nsm_handle
+ * @sap: pointer to socket address of handle to find
+ * @salen: length of socket address
+ * @hostname: pointer to C string containing hostname to find
+ * @hostname_len: length of C string
+ *
+ * Behavior is modulated by the global nsm_use_hostnames variable.
+ *
+ * Returns a cached nsm_handle after bumping its ref count, or
+ * returns a fresh nsm_handle if a handle that matches @sap and/or
+ * @hostname cannot be found in the handle cache.  Returns NULL if
+ * an error occurs.
+ */
+struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
+				  const size_t salen, const char *hostname,
+				  const size_t hostname_len)
+{
+	struct nsm_handle *cached, *new = NULL;
+
+	if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
+		if (printk_ratelimit()) {
+			printk(KERN_WARNING "Invalid hostname \"%.*s\" "
+					    "in NFS lock request\n",
+				(int)hostname_len, hostname);
+		}
+		return NULL;
+	}
+
+retry:
+	spin_lock(&nsm_lock);
+
+	if (nsm_use_hostnames && hostname != NULL)
+		cached = nsm_lookup_hostname(hostname, hostname_len);
+	else
+		cached = nsm_lookup_addr(sap);
+
+	if (cached != NULL) {
+		atomic_inc(&cached->sm_count);
+		spin_unlock(&nsm_lock);
+		kfree(new);
+		dprintk("lockd: found nsm_handle for %s (%s), "
+				"cnt %d\n", cached->sm_name,
+				cached->sm_addrbuf,
+				atomic_read(&cached->sm_count));
+		return cached;
+	}
+
+	if (new != NULL) {
+		list_add(&new->sm_link, &nsm_handles);
+		spin_unlock(&nsm_lock);
+		dprintk("lockd: created nsm_handle for %s (%s)\n",
+				new->sm_name, new->sm_addrbuf);
+		return new;
+	}
+
+	spin_unlock(&nsm_lock);
+
+	new = nsm_create_handle(sap, salen, hostname, hostname_len);
+	if (unlikely(new == NULL))
+		return NULL;
+	goto retry;
+}
+
+/**
+ * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle
+ * @info: pointer to NLMPROC_SM_NOTIFY arguments
+ *
+ * Returns a matching nsm_handle if found in the nsm cache; the returned
+ * nsm_handle's reference count is bumped and sm_monitored is cleared.
+ * Otherwise returns NULL if some error occurred.
+ */
+struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info)
+{
+	struct nsm_handle *cached;
+
+	spin_lock(&nsm_lock);
+
+	cached = nsm_lookup_priv(&info->priv);
+	if (unlikely(cached == NULL)) {
+		spin_unlock(&nsm_lock);
+		dprintk("lockd: never saw rebooted peer '%.*s' before\n",
+				info->len, info->mon);
+		return cached;
+	}
+
+	atomic_inc(&cached->sm_count);
+	spin_unlock(&nsm_lock);
+
+	/*
+	 * During subsequent lock activity, force a fresh
+	 * notification to be set up for this host.
+	 */
+	cached->sm_monitored = 0;
+
+	dprintk("lockd: host %s (%s) rebooted, cnt %d\n",
+			cached->sm_name, cached->sm_addrbuf,
+			atomic_read(&cached->sm_count));
+	return cached;
+}
+
+/**
+ * nsm_release - Release an NSM handle
+ * @nsm: pointer to handle to be released
+ *
+ */
+void nsm_release(struct nsm_handle *nsm)
+{
+	if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
+		list_del(&nsm->sm_link);
+		spin_unlock(&nsm_lock);
+		dprintk("lockd: destroyed nsm_handle for %s (%s)\n",
+				nsm->sm_name, nsm->sm_addrbuf);
+		kfree(nsm);
+	}
 }
 
 /*
@@ -154,127 +428,132 @@
  * Status Monitor wire protocol.
  */
 
-static __be32 *xdr_encode_nsm_string(__be32 *p, char *string)
+static int encode_nsm_string(struct xdr_stream *xdr, const char *string)
 {
-	size_t len = strlen(string);
+	const u32 len = strlen(string);
+	__be32 *p;
 
-	if (len > SM_MAXSTRLEN)
-		len = SM_MAXSTRLEN;
-	return xdr_encode_opaque(p, string, len);
+	if (unlikely(len > SM_MAXSTRLEN))
+		return -EIO;
+	p = xdr_reserve_space(xdr, sizeof(u32) + len);
+	if (unlikely(p == NULL))
+		return -EIO;
+	xdr_encode_opaque(p, string, len);
+	return 0;
 }
 
 /*
  * "mon_name" specifies the host to be monitored.
- *
- * Linux uses a text version of the IP address of the remote
- * host as the host identifier (the "mon_name" argument).
- *
- * Linux statd always looks up the canonical hostname first for
- * whatever remote hostname it receives, so this works alright.
  */
-static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp)
+static int encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	char	buffer[XDR_ADDRBUF_LEN + 1];
-	char	*name = argp->mon_name;
-
-	if (!nsm_use_hostnames) {
-		snprintf(buffer, XDR_ADDRBUF_LEN,
-			 "%pI4", &argp->addr);
-		name = buffer;
-	}
-
-	return xdr_encode_nsm_string(p, name);
+	return encode_nsm_string(xdr, argp->mon_name);
 }
 
 /*
  * The "my_id" argument specifies the hostname and RPC procedure
  * to be called when the status manager receives notification
- * (via the SM_NOTIFY call) that the state of host "mon_name"
+ * (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name"
  * has changed.
  */
-static __be32 *xdr_encode_my_id(__be32 *p, struct nsm_args *argp)
+static int encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	p = xdr_encode_nsm_string(p, utsname()->nodename);
-	if (!p)
-		return ERR_PTR(-EIO);
+	int status;
+	__be32 *p;
 
+	status = encode_nsm_string(xdr, utsname()->nodename);
+	if (unlikely(status != 0))
+		return status;
+	p = xdr_reserve_space(xdr, 3 * sizeof(u32));
+	if (unlikely(p == NULL))
+		return -EIO;
 	*p++ = htonl(argp->prog);
 	*p++ = htonl(argp->vers);
 	*p++ = htonl(argp->proc);
-
-	return p;
+	return 0;
 }
 
 /*
  * The "mon_id" argument specifies the non-private arguments
- * of an SM_MON or SM_UNMON call.
+ * of an NSMPROC_MON or NSMPROC_UNMON call.
  */
-static __be32 *xdr_encode_mon_id(__be32 *p, struct nsm_args *argp)
+static int encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	p = xdr_encode_mon_name(p, argp);
-	if (!p)
-		return ERR_PTR(-EIO);
+	int status;
 
-	return xdr_encode_my_id(p, argp);
+	status = encode_mon_name(xdr, argp);
+	if (unlikely(status != 0))
+		return status;
+	return encode_my_id(xdr, argp);
 }
 
 /*
  * The "priv" argument may contain private information required
- * by the SM_MON call. This information will be supplied in the
- * SM_NOTIFY call.
- *
- * Linux provides the raw IP address of the monitored host,
- * left in network byte order.
+ * by the NSMPROC_MON call. This information will be supplied in the
+ * NLMPROC_SM_NOTIFY call.
  */
-static __be32 *xdr_encode_priv(__be32 *p, struct nsm_args *argp)
+static int encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	*p++ = argp->addr;
-	*p++ = 0;
-	*p++ = 0;
-	*p++ = 0;
+	__be32 *p;
 
-	return p;
-}
-
-static int
-xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
-{
-	p = xdr_encode_mon_id(p, argp);
-	if (IS_ERR(p))
-		return PTR_ERR(p);
-
-	p = xdr_encode_priv(p, argp);
-	if (IS_ERR(p))
-		return PTR_ERR(p);
-
-	rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
+	p = xdr_reserve_space(xdr, SM_PRIV_SIZE);
+	if (unlikely(p == NULL))
+		return -EIO;
+	xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
 	return 0;
 }
 
-static int
-xdr_encode_unmon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
+static int xdr_enc_mon(struct rpc_rqst *req, __be32 *p,
+		       const struct nsm_args *argp)
 {
-	p = xdr_encode_mon_id(p, argp);
-	if (IS_ERR(p))
-		return PTR_ERR(p);
-	rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
-	return 0;
+	struct xdr_stream xdr;
+	int status;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	status = encode_mon_id(&xdr, argp);
+	if (unlikely(status))
+		return status;
+	return encode_priv(&xdr, argp);
 }
 
-static int
-xdr_decode_stat_res(struct rpc_rqst *rqstp, __be32 *p, struct nsm_res *resp)
+static int xdr_enc_unmon(struct rpc_rqst *req, __be32 *p,
+			 const struct nsm_args *argp)
 {
+	struct xdr_stream xdr;
+
+	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	return encode_mon_id(&xdr, argp);
+}
+
+static int xdr_dec_stat_res(struct rpc_rqst *rqstp, __be32 *p,
+			    struct nsm_res *resp)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	p = xdr_inline_decode(&xdr, 2 * sizeof(u32));
+	if (unlikely(p == NULL))
+		return -EIO;
 	resp->status = ntohl(*p++);
-	resp->state = ntohl(*p++);
-	dprintk("nsm: xdr_decode_stat_res status %d state %d\n",
+	resp->state = ntohl(*p);
+
+	dprintk("lockd: xdr_dec_stat_res status %d state %d\n",
 			resp->status, resp->state);
 	return 0;
 }
 
-static int
-xdr_decode_stat(struct rpc_rqst *rqstp, __be32 *p, struct nsm_res *resp)
+static int xdr_dec_stat(struct rpc_rqst *rqstp, __be32 *p,
+			struct nsm_res *resp)
 {
-	resp->state = ntohl(*p++);
+	struct xdr_stream xdr;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	p = xdr_inline_decode(&xdr, sizeof(u32));
+	if (unlikely(p == NULL))
+		return -EIO;
+	resp->state = ntohl(*p);
+
+	dprintk("lockd: xdr_dec_stat state %d\n", resp->state);
 	return 0;
 }
 
@@ -288,22 +567,22 @@
 #define SM_unmonres_sz	1
 
 static struct rpc_procinfo	nsm_procedures[] = {
-[SM_MON] = {
-		.p_proc		= SM_MON,
-		.p_encode	= (kxdrproc_t) xdr_encode_mon,
-		.p_decode	= (kxdrproc_t) xdr_decode_stat_res,
+[NSMPROC_MON] = {
+		.p_proc		= NSMPROC_MON,
+		.p_encode	= (kxdrproc_t)xdr_enc_mon,
+		.p_decode	= (kxdrproc_t)xdr_dec_stat_res,
 		.p_arglen	= SM_mon_sz,
 		.p_replen	= SM_monres_sz,
-		.p_statidx	= SM_MON,
+		.p_statidx	= NSMPROC_MON,
 		.p_name		= "MONITOR",
 	},
-[SM_UNMON] = {
-		.p_proc		= SM_UNMON,
-		.p_encode	= (kxdrproc_t) xdr_encode_unmon,
-		.p_decode	= (kxdrproc_t) xdr_decode_stat,
+[NSMPROC_UNMON] = {
+		.p_proc		= NSMPROC_UNMON,
+		.p_encode	= (kxdrproc_t)xdr_enc_unmon,
+		.p_decode	= (kxdrproc_t)xdr_dec_stat,
 		.p_arglen	= SM_mon_id_sz,
 		.p_replen	= SM_unmonres_sz,
-		.p_statidx	= SM_UNMON,
+		.p_statidx	= NSMPROC_UNMON,
 		.p_name		= "UNMONITOR",
 	},
 };
@@ -322,7 +601,7 @@
 
 static struct rpc_program	nsm_program = {
 		.name		= "statd",
-		.number		= SM_PROGRAM,
+		.number		= NSM_PROGRAM,
 		.nrvers		= ARRAY_SIZE(nsm_version),
 		.version	= nsm_version,
 		.stats		= &nsm_stats
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 252d801..64f1c31 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -35,7 +35,6 @@
 #include <linux/sunrpc/svcsock.h>
 #include <net/ip.h>
 #include <linux/lockd/lockd.h>
-#include <linux/lockd/sm_inter.h>
 #include <linux/nfs.h>
 
 #define NLMDBG_FACILITY		NLMDBG_SVC
@@ -54,13 +53,26 @@
 unsigned long			nlmsvc_timeout;
 
 /*
+ * If the kernel has IPv6 support available, always listen for
+ * both AF_INET and AF_INET6 requests.
+ */
+#if (defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) && \
+	defined(CONFIG_SUNRPC_REGISTER_V4)
+static const sa_family_t	nlmsvc_family = AF_INET6;
+#else	/* (CONFIG_IPV6 || CONFIG_IPV6_MODULE) && CONFIG_SUNRPC_REGISTER_V4 */
+static const sa_family_t	nlmsvc_family = AF_INET;
+#endif	/* (CONFIG_IPV6 || CONFIG_IPV6_MODULE) && CONFIG_SUNRPC_REGISTER_V4 */
+
+/*
  * These can be set at insmod time (useful for NFS as root filesystem),
  * and also changed through the sysctl interface.  -- Jamie Lokier, Aug 2003
  */
 static unsigned long		nlm_grace_period;
 static unsigned long		nlm_timeout = LOCKD_DFLT_TIMEO;
 static int			nlm_udpport, nlm_tcpport;
-int				nsm_use_hostnames = 0;
+
+/* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */
+static unsigned int		nlm_max_connections = 1024;
 
 /*
  * Constants needed for the sysctl interface.
@@ -143,6 +155,9 @@
 		long timeout = MAX_SCHEDULE_TIMEOUT;
 		RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
 
+		/* update sv_maxconn if it has changed */
+		rqstp->rq_server->sv_maxconn = nlm_max_connections;
+
 		if (signalled()) {
 			flush_signals(current);
 			if (nlmsvc_ops) {
@@ -189,6 +204,19 @@
 	return 0;
 }
 
+static int create_lockd_listener(struct svc_serv *serv, char *name,
+				 unsigned short port)
+{
+	struct svc_xprt *xprt;
+
+	xprt = svc_find_xprt(serv, name, 0, 0);
+	if (xprt == NULL)
+		return svc_create_xprt(serv, name, port, SVC_SOCK_DEFAULTS);
+
+	svc_xprt_put(xprt);
+	return 0;
+}
+
 /*
  * Ensure there are active UDP and TCP listeners for lockd.
  *
@@ -202,29 +230,23 @@
 static int make_socks(struct svc_serv *serv)
 {
 	static int warned;
-	struct svc_xprt *xprt;
-	int err = 0;
+	int err;
 
-	xprt = svc_find_xprt(serv, "udp", 0, 0);
-	if (!xprt)
-		err = svc_create_xprt(serv, "udp", nlm_udpport,
-				      SVC_SOCK_DEFAULTS);
-	else
-		svc_xprt_put(xprt);
-	if (err >= 0) {
-		xprt = svc_find_xprt(serv, "tcp", 0, 0);
-		if (!xprt)
-			err = svc_create_xprt(serv, "tcp", nlm_tcpport,
-					      SVC_SOCK_DEFAULTS);
-		else
-			svc_xprt_put(xprt);
-	}
-	if (err >= 0) {
-		warned = 0;
-		err = 0;
-	} else if (warned++ == 0)
+	err = create_lockd_listener(serv, "udp", nlm_udpport);
+	if (err < 0)
+		goto out_err;
+
+	err = create_lockd_listener(serv, "tcp", nlm_tcpport);
+	if (err < 0)
+		goto out_err;
+
+	warned = 0;
+	return 0;
+
+out_err:
+	if (warned++ == 0)
 		printk(KERN_WARNING
-		       "lockd_up: makesock failed, error=%d\n", err);
+			"lockd_up: makesock failed, error=%d\n", err);
 	return err;
 }
 
@@ -252,7 +274,7 @@
 			"lockd_up: no pid, %d users??\n", nlmsvc_users);
 
 	error = -ENOMEM;
-	serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, AF_INET, NULL);
+	serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, nlmsvc_family, NULL);
 	if (!serv) {
 		printk(KERN_WARNING "lockd_up: create service failed\n");
 		goto out;
@@ -276,6 +298,7 @@
 	}
 
 	svc_sock_update_bufs(serv);
+	serv->sv_maxconn = nlm_max_connections;
 
 	nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
 	if (IS_ERR(nlmsvc_task)) {
@@ -485,6 +508,7 @@
 module_param_call(nlm_tcpport, param_set_port, param_get_int,
 		  &nlm_tcpport, 0644);
 module_param(nsm_use_hostnames, bool, 0644);
+module_param(nlm_max_connections, uint, 0644);
 
 /*
  * Initialising and terminating the module.
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 4dfdcbc..17250373 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -16,8 +16,6 @@
 #include <linux/nfsd/nfsd.h>
 #include <linux/lockd/lockd.h>
 #include <linux/lockd/share.h>
-#include <linux/lockd/sm_inter.h>
-
 
 #define NLMDBG_FACILITY		NLMDBG_CLIENT
 
@@ -419,8 +417,6 @@
 nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
 					      void	        *resp)
 {
-	struct sockaddr_in	saddr;
-
 	dprintk("lockd: SM_NOTIFY     called\n");
 
 	if (!nlm_privileged_requester(rqstp)) {
@@ -430,14 +426,7 @@
 		return rpc_system_err;
 	}
 
-	/* Obtain the host pointer for this NFS server and try to
-	 * reclaim all locks we hold on this server.
-	 */
-	memset(&saddr, 0, sizeof(saddr));
-	saddr.sin_family = AF_INET;
-	saddr.sin_addr.s_addr = argp->addr;
-	nlm_host_rebooted(&saddr, argp->mon, argp->len, argp->state);
-
+	nlm_host_rebooted(argp);
 	return rpc_success;
 }
 
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 3ca89e2..3688e55 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -16,8 +16,6 @@
 #include <linux/nfsd/nfsd.h>
 #include <linux/lockd/lockd.h>
 #include <linux/lockd/share.h>
-#include <linux/lockd/sm_inter.h>
-
 
 #define NLMDBG_FACILITY		NLMDBG_CLIENT
 
@@ -451,8 +449,6 @@
 nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
 					      void	        *resp)
 {
-	struct sockaddr_in	saddr;
-
 	dprintk("lockd: SM_NOTIFY     called\n");
 
 	if (!nlm_privileged_requester(rqstp)) {
@@ -462,14 +458,7 @@
 		return rpc_system_err;
 	}
 
-	/* Obtain the host pointer for this NFS server and try to
-	 * reclaim all locks we hold on this server.
-	 */
-	memset(&saddr, 0, sizeof(saddr));
-	saddr.sin_family = AF_INET;
-	saddr.sin_addr.s_addr = argp->addr;
-	nlm_host_rebooted(&saddr, argp->mon, argp->len, argp->state);
-
+	nlm_host_rebooted(argp);
 	return rpc_success;
 }
 
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 34c2766..9e4d6aab 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -17,7 +17,6 @@
 #include <linux/nfsd/export.h>
 #include <linux/lockd/lockd.h>
 #include <linux/lockd/share.h>
-#include <linux/lockd/sm_inter.h>
 #include <linux/module.h>
 #include <linux/mount.h>
 
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 1f22629..0336f2b 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -16,7 +16,6 @@
 #include <linux/sunrpc/svc.h>
 #include <linux/sunrpc/stats.h>
 #include <linux/lockd/lockd.h>
-#include <linux/lockd/sm_inter.h>
 
 #define NLMDBG_FACILITY		NLMDBG_XDR
 
@@ -349,8 +348,8 @@
 	if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
 		return 0;
 	argp->state = ntohl(*p++);
-	/* Preserve the address in network byte order */
-	argp->addr = *p++;
+	memcpy(&argp->priv.data, p, sizeof(argp->priv.data));
+	p += XDR_QUADLEN(SM_PRIV_SIZE);
 	return xdr_argsize_check(rqstp, p);
 }
 
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index 50c493a..e1d5286 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -17,7 +17,6 @@
 #include <linux/sunrpc/svc.h>
 #include <linux/sunrpc/stats.h>
 #include <linux/lockd/lockd.h>
-#include <linux/lockd/sm_inter.h>
 
 #define NLMDBG_FACILITY		NLMDBG_XDR
 
@@ -356,8 +355,8 @@
 	if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
 		return 0;
 	argp->state = ntohl(*p++);
-	/* Preserve the address in network byte order */
-	argp->addr  = *p++;
+	memcpy(&argp->priv.data, p, sizeof(argp->priv.data));
+	p += XDR_QUADLEN(SM_PRIV_SIZE);
 	return xdr_argsize_check(rqstp, p);
 }
 
diff --git a/fs/locks.c b/fs/locks.c
index 46a2e12..ec3deea 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1564,7 +1564,7 @@
  *	%LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
  *	processes read and write access respectively.
  */
-asmlinkage long sys_flock(unsigned int fd, unsigned int cmd)
+SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
 {
 	struct file *filp;
 	struct file_lock *lock;
diff --git a/fs/namei.c b/fs/namei.c
index f05bed2..bbc15c2 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1962,8 +1962,8 @@
 	}
 }
 
-asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode,
-				unsigned dev)
+SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
+		unsigned, dev)
 {
 	int error;
 	char *tmp;
@@ -2017,7 +2017,7 @@
 	return error;
 }
 
-asmlinkage long sys_mknod(const char __user *filename, int mode, unsigned dev)
+SYSCALL_DEFINE3(mknod, const char __user *, filename, int, mode, unsigned, dev)
 {
 	return sys_mknodat(AT_FDCWD, filename, mode, dev);
 }
@@ -2044,7 +2044,7 @@
 	return error;
 }
 
-asmlinkage long sys_mkdirat(int dfd, const char __user *pathname, int mode)
+SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
 {
 	int error = 0;
 	char * tmp;
@@ -2081,7 +2081,7 @@
 	return error;
 }
 
-asmlinkage long sys_mkdir(const char __user *pathname, int mode)
+SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode)
 {
 	return sys_mkdirat(AT_FDCWD, pathname, mode);
 }
@@ -2195,7 +2195,7 @@
 	return error;
 }
 
-asmlinkage long sys_rmdir(const char __user *pathname)
+SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
 {
 	return do_rmdir(AT_FDCWD, pathname);
 }
@@ -2291,7 +2291,7 @@
 	goto exit2;
 }
 
-asmlinkage long sys_unlinkat(int dfd, const char __user *pathname, int flag)
+SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
 {
 	if ((flag & ~AT_REMOVEDIR) != 0)
 		return -EINVAL;
@@ -2302,7 +2302,7 @@
 	return do_unlinkat(dfd, pathname);
 }
 
-asmlinkage long sys_unlink(const char __user *pathname)
+SYSCALL_DEFINE1(unlink, const char __user *, pathname)
 {
 	return do_unlinkat(AT_FDCWD, pathname);
 }
@@ -2328,8 +2328,8 @@
 	return error;
 }
 
-asmlinkage long sys_symlinkat(const char __user *oldname,
-			      int newdfd, const char __user *newname)
+SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
+		int, newdfd, const char __user *, newname)
 {
 	int error;
 	char *from;
@@ -2370,7 +2370,7 @@
 	return error;
 }
 
-asmlinkage long sys_symlink(const char __user *oldname, const char __user *newname)
+SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
 {
 	return sys_symlinkat(oldname, AT_FDCWD, newname);
 }
@@ -2422,9 +2422,8 @@
  * with linux 2.0, and to avoid hard-linking to directories
  * and other special files.  --ADM
  */
-asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
-			   int newdfd, const char __user *newname,
-			   int flags)
+SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+		int, newdfd, const char __user *, newname, int, flags)
 {
 	struct dentry *new_dentry;
 	struct nameidata nd;
@@ -2473,7 +2472,7 @@
 	return error;
 }
 
-asmlinkage long sys_link(const char __user *oldname, const char __user *newname)
+SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
 {
 	return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
 }
@@ -2624,8 +2623,8 @@
 	return error;
 }
 
-asmlinkage long sys_renameat(int olddfd, const char __user *oldname,
-			     int newdfd, const char __user *newname)
+SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
+		int, newdfd, const char __user *, newname)
 {
 	struct dentry *old_dir, *new_dir;
 	struct dentry *old_dentry, *new_dentry;
@@ -2718,7 +2717,7 @@
 	return error;
 }
 
-asmlinkage long sys_rename(const char __user *oldname, const char __user *newname)
+SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
 {
 	return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname);
 }
diff --git a/fs/namespace.c b/fs/namespace.c
index a40685d..228d8c4 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1128,7 +1128,7 @@
  * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
  */
 
-asmlinkage long sys_umount(char __user * name, int flags)
+SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
 {
 	struct path path;
 	int retval;
@@ -1160,7 +1160,7 @@
 /*
  *	The 2.0 compatible umount. No flags.
  */
-asmlinkage long sys_oldumount(char __user * name)
+SYSCALL_DEFINE1(oldumount, char __user *, name)
 {
 	return sys_umount(name, 0);
 }
@@ -2045,9 +2045,8 @@
 	return new_ns;
 }
 
-asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
-			  char __user * type, unsigned long flags,
-			  void __user * data)
+SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
+		char __user *, type, unsigned long, flags, void __user *, data)
 {
 	int retval;
 	unsigned long data_page;
@@ -2172,8 +2171,8 @@
  *    though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
  *    first.
  */
-asmlinkage long sys_pivot_root(const char __user * new_root,
-			       const char __user * put_old)
+SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+		const char __user *, put_old)
 {
 	struct vfsmount *tmp;
 	struct path new, old, parent_path, root_parent, root;
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 6d04e05..f54360f 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -98,7 +98,7 @@
 {
 	s32		auth_type;
 	u32		object_name_len;
-	compat_caddr_t	object_name;	/* an userspace data, in most cases user name */
+	compat_caddr_t	object_name;	/* a userspace data, in most cases user name */
 };
 
 struct compat_ncp_fs_info_v2 {
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
index b274519..8f9a205 100644
--- a/fs/nfsctl.c
+++ b/fs/nfsctl.c
@@ -86,8 +86,8 @@
 	},
 };
 
-long
-asmlinkage sys_nfsservctl(int cmd, struct nfsctl_arg __user *arg, void __user *res)
+SYSCALL_DEFINE3(nfsservctl, int, cmd, struct nfsctl_arg __user *, arg,
+		void __user *, res)
 {
 	struct file *file;
 	void __user *p = &arg->u;
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 0184fe9..c903e04 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -76,10 +76,10 @@
 
 	ret = set_groups(new, gi);
 	put_group_info(gi);
-	if (!ret)
+	if (ret < 0)
 		goto error;
 
-	if (new->uid)
+	if (new->fsuid)
 		new->cap_effective = cap_drop_nfsd_set(new->cap_effective);
 	else
 		new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 6d7d8c0..c464181 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -53,9 +53,6 @@
 #define NFSPROC4_CB_NULL 0
 #define NFSPROC4_CB_COMPOUND 1
 
-/* declarations */
-static const struct rpc_call_ops nfs4_cb_null_ops;
-
 /* Index of predefined Linux callback client operations */
 
 enum {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 669461e..9fa60a3 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -946,6 +946,11 @@
 			nfsd4_encode_operation(resp, op);
 			status = op->status;
 		}
+
+		dprintk("nfsv4 compound op %p opcnt %d #%d: %d: status %d\n",
+			args->ops, args->opcnt, resp->opcnt, op->opnum,
+			be32_to_cpu(status));
+
 		if (cstate->replay_owner) {
 			nfs4_put_stateowner(cstate->replay_owner);
 			cstate->replay_owner = NULL;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 0f9d6ef..74f7b67 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -116,9 +116,9 @@
 
 	md5_to_hex(dname, cksum.data);
 
-	kfree(cksum.data);
 	status = nfs_ok;
 out:
+	kfree(cksum.data);
 	crypto_free_hash(desc.tfm);
 out_no_tfm:
 	return status;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 13e0e07..88db7d3 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -2416,6 +2416,26 @@
 #define LOCK_HASH_SIZE             (1 << LOCK_HASH_BITS)
 #define LOCK_HASH_MASK             (LOCK_HASH_SIZE - 1)
 
+static inline u64
+end_offset(u64 start, u64 len)
+{
+	u64 end;
+
+	end = start + len;
+	return end >= start ? end: NFS4_MAX_UINT64;
+}
+
+/* last octet in a range */
+static inline u64
+last_byte_offset(u64 start, u64 len)
+{
+	u64 end;
+
+	BUG_ON(!len);
+	end = start + len;
+	return end > start ? end - 1: NFS4_MAX_UINT64;
+}
+
 #define lockownerid_hashval(id) \
         ((id) & LOCK_HASH_MASK)
 
@@ -2435,13 +2455,13 @@
 static struct nfs4_stateid *
 find_stateid(stateid_t *stid, int flags)
 {
-	struct nfs4_stateid *local = NULL;
+	struct nfs4_stateid *local;
 	u32 st_id = stid->si_stateownerid;
 	u32 f_id = stid->si_fileid;
 	unsigned int hashval;
 
 	dprintk("NFSD: find_stateid flags 0x%x\n",flags);
-	if ((flags & LOCK_STATE) || (flags & RD_STATE) || (flags & WR_STATE)) {
+	if (flags & (LOCK_STATE | RD_STATE | WR_STATE)) {
 		hashval = stateid_hashval(st_id, f_id);
 		list_for_each_entry(local, &lockstateid_hashtbl[hashval], st_hash) {
 			if ((local->st_stateid.si_stateownerid == st_id) &&
@@ -2449,7 +2469,8 @@
 				return local;
 		}
 	} 
-	if ((flags & OPEN_STATE) || (flags & RD_STATE) || (flags & WR_STATE)) {
+
+	if (flags & (OPEN_STATE | RD_STATE | WR_STATE)) {
 		hashval = stateid_hashval(st_id, f_id);
 		list_for_each_entry(local, &stateid_hashtbl[hashval], st_hash) {
 			if ((local->st_stateid.si_stateownerid == st_id) &&
@@ -2518,8 +2539,8 @@
 		deny->ld_clientid.cl_id = 0;
 	}
 	deny->ld_start = fl->fl_start;
-	deny->ld_length = ~(u64)0;
-	if (fl->fl_end != ~(u64)0)
+	deny->ld_length = NFS4_MAX_UINT64;
+	if (fl->fl_end != NFS4_MAX_UINT64)
 		deny->ld_length = fl->fl_end - fl->fl_start + 1;        
 	deny->ld_type = NFS4_READ_LT;
 	if (fl->fl_type != F_RDLCK)
@@ -2616,7 +2637,7 @@
 static int
 check_lock_length(u64 offset, u64 length)
 {
-	return ((length == 0)  || ((length != ~(u64)0) &&
+	return ((length == 0)  || ((length != NFS4_MAX_UINT64) &&
 	     LOFF_OVERFLOW(offset, length)));
 }
 
@@ -2736,11 +2757,7 @@
 	file_lock.fl_lmops = &nfsd_posix_mng_ops;
 
 	file_lock.fl_start = lock->lk_offset;
-	if ((lock->lk_length == ~(u64)0) || 
-			LOFF_OVERFLOW(lock->lk_offset, lock->lk_length))
-		file_lock.fl_end = ~(u64)0;
-	else
-		file_lock.fl_end = lock->lk_offset + lock->lk_length - 1;
+	file_lock.fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
 	nfs4_transform_lock_offset(&file_lock);
 
 	/*
@@ -2781,6 +2798,25 @@
 }
 
 /*
+ * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
+ * so we do a temporary open here just to get an open file to pass to
+ * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
+ * inode operation.)
+ */
+static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
+{
+	struct file *file;
+	int err;
+
+	err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
+	if (err)
+		return err;
+	err = vfs_test_lock(file, lock);
+	nfsd_close(file);
+	return err;
+}
+
+/*
  * LOCKT operation
  */
 __be32
@@ -2788,7 +2824,6 @@
 	    struct nfsd4_lockt *lockt)
 {
 	struct inode *inode;
-	struct file file;
 	struct file_lock file_lock;
 	int error;
 	__be32 status;
@@ -2839,23 +2874,12 @@
 	file_lock.fl_lmops = &nfsd_posix_mng_ops;
 
 	file_lock.fl_start = lockt->lt_offset;
-	if ((lockt->lt_length == ~(u64)0) || LOFF_OVERFLOW(lockt->lt_offset, lockt->lt_length))
-		file_lock.fl_end = ~(u64)0;
-	else
-		file_lock.fl_end = lockt->lt_offset + lockt->lt_length - 1;
+	file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
 
 	nfs4_transform_lock_offset(&file_lock);
 
-	/* vfs_test_lock uses the struct file _only_ to resolve the inode.
-	 * since LOCKT doesn't require an OPEN, and therefore a struct
-	 * file may not exist, pass vfs_test_lock a struct file with
-	 * only the dentry:inode set.
-	 */
-	memset(&file, 0, sizeof (struct file));
-	file.f_path.dentry = cstate->current_fh.fh_dentry;
-
 	status = nfs_ok;
-	error = vfs_test_lock(&file, &file_lock);
+	error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
 	if (error) {
 		status = nfserrno(error);
 		goto out;
@@ -2906,10 +2930,7 @@
 	file_lock.fl_lmops = &nfsd_posix_mng_ops;
 	file_lock.fl_start = locku->lu_offset;
 
-	if ((locku->lu_length == ~(u64)0) || LOFF_OVERFLOW(locku->lu_offset, locku->lu_length))
-		file_lock.fl_end = ~(u64)0;
-	else
-		file_lock.fl_end = locku->lu_offset + locku->lu_length - 1;
+	file_lock.fl_end = last_byte_offset(locku->lu_offset, locku->lu_length);
 	nfs4_transform_lock_offset(&file_lock);
 
 	/*
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index afcdf4b..f65953b 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1,6 +1,4 @@
 /*
- *  fs/nfs/nfs4xdr.c
- *
  *  Server-side XDR for NFSv4
  *
  *  Copyright (c) 2002 The Regents of the University of Michigan.
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 77d7b8c..3d93b20 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -84,6 +84,8 @@
 static ssize_t write_getfd(struct file *file, char *buf, size_t size);
 static ssize_t write_getfs(struct file *file, char *buf, size_t size);
 static ssize_t write_filehandle(struct file *file, char *buf, size_t size);
+static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size);
+static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size);
 static ssize_t write_threads(struct file *file, char *buf, size_t size);
 static ssize_t write_pool_threads(struct file *file, char *buf, size_t size);
 static ssize_t write_versions(struct file *file, char *buf, size_t size);
@@ -94,9 +96,6 @@
 static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
 #endif
 
-static ssize_t failover_unlock_ip(struct file *file, char *buf, size_t size);
-static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size);
-
 static ssize_t (*write_op[])(struct file *, char *, size_t) = {
 	[NFSD_Svc] = write_svc,
 	[NFSD_Add] = write_add,
@@ -106,8 +105,8 @@
 	[NFSD_Getfd] = write_getfd,
 	[NFSD_Getfs] = write_getfs,
 	[NFSD_Fh] = write_filehandle,
-	[NFSD_FO_UnlockIP] = failover_unlock_ip,
-	[NFSD_FO_UnlockFS] = failover_unlock_fs,
+	[NFSD_FO_UnlockIP] = write_unlock_ip,
+	[NFSD_FO_UnlockFS] = write_unlock_fs,
 	[NFSD_Threads] = write_threads,
 	[NFSD_Pool_Threads] = write_pool_threads,
 	[NFSD_Versions] = write_versions,
@@ -176,10 +175,24 @@
 /*----------------------------------------------------------------------------*/
 /*
  * payload - write methods
- * If the method has a response, the response should be put in buf,
- * and the length returned.  Otherwise return 0 or and -error.
  */
 
+/**
+ * write_svc - Start kernel's NFSD server
+ *
+ * Deprecated.  /proc/fs/nfsd/threads is preferred.
+ * Function remains to support old versions of nfs-utils.
+ *
+ * Input:
+ *			buf:	struct nfsctl_svc
+ *				svc_port:	port number of this
+ *						server's listener
+ *				svc_nthreads:	number of threads to start
+ *			size:	size in bytes of passed in nfsctl_svc
+ * Output:
+ *	On success:	returns zero
+ *	On error:	return code is negative errno value
+ */
 static ssize_t write_svc(struct file *file, char *buf, size_t size)
 {
 	struct nfsctl_svc *data;
@@ -189,6 +202,30 @@
 	return nfsd_svc(data->svc_port, data->svc_nthreads);
 }
 
+/**
+ * write_add - Add or modify client entry in auth unix cache
+ *
+ * Deprecated.  /proc/net/rpc/auth.unix.ip is preferred.
+ * Function remains to support old versions of nfs-utils.
+ *
+ * Input:
+ *			buf:	struct nfsctl_client
+ *				cl_ident:	'\0'-terminated C string
+ *						containing domain name
+ *						of client
+ *				cl_naddr:	no. of items in cl_addrlist
+ *				cl_addrlist:	array of client addresses
+ *				cl_fhkeytype:	ignored
+ *				cl_fhkeylen:	ignored
+ *				cl_fhkey:	ignored
+ *			size:	size in bytes of passed in nfsctl_client
+ * Output:
+ *	On success:	returns zero
+ *	On error:	return code is negative errno value
+ *
+ * Note: Only AF_INET client addresses are passed in, since
+ * nfsctl_client.cl_addrlist contains only in_addr fields for addresses.
+ */
 static ssize_t write_add(struct file *file, char *buf, size_t size)
 {
 	struct nfsctl_client *data;
@@ -198,6 +235,30 @@
 	return exp_addclient(data);
 }
 
+/**
+ * write_del - Remove client from auth unix cache
+ *
+ * Deprecated.  /proc/net/rpc/auth.unix.ip is preferred.
+ * Function remains to support old versions of nfs-utils.
+ *
+ * Input:
+ *			buf:	struct nfsctl_client
+ *				cl_ident:	'\0'-terminated C string
+ *						containing domain name
+ *						of client
+ *				cl_naddr:	ignored
+ *				cl_addrlist:	ignored
+ *				cl_fhkeytype:	ignored
+ *				cl_fhkeylen:	ignored
+ *				cl_fhkey:	ignored
+ *			size:	size in bytes of passed in nfsctl_client
+ * Output:
+ *	On success:	returns zero
+ *	On error:	return code is negative errno value
+ *
+ * Note: Only AF_INET client addresses are passed in, since
+ * nfsctl_client.cl_addrlist contains only in_addr fields for addresses.
+ */
 static ssize_t write_del(struct file *file, char *buf, size_t size)
 {
 	struct nfsctl_client *data;
@@ -207,6 +268,33 @@
 	return exp_delclient(data);
 }
 
+/**
+ * write_export - Export part or all of a local file system
+ *
+ * Deprecated.  /proc/net/rpc/{nfsd.export,nfsd.fh} are preferred.
+ * Function remains to support old versions of nfs-utils.
+ *
+ * Input:
+ *			buf:	struct nfsctl_export
+ *				ex_client:	'\0'-terminated C string
+ *						containing domain name
+ *						of client allowed to access
+ *						this export
+ *				ex_path:	'\0'-terminated C string
+ *						containing pathname of
+ *						directory in local file system
+ *				ex_dev:		fsid to use for this export
+ *				ex_ino:		ignored
+ *				ex_flags:	export flags for this export
+ *				ex_anon_uid:	UID to use for anonymous
+ *						requests
+ *				ex_anon_gid:	GID to use for anonymous
+ *						requests
+ *			size:	size in bytes of passed in nfsctl_export
+ * Output:
+ *	On success:	returns zero
+ *	On error:	return code is negative errno value
+ */
 static ssize_t write_export(struct file *file, char *buf, size_t size)
 {
 	struct nfsctl_export *data;
@@ -216,6 +304,31 @@
 	return exp_export(data);
 }
 
+/**
+ * write_unexport - Unexport a previously exported file system
+ *
+ * Deprecated.  /proc/net/rpc/{nfsd.export,nfsd.fh} are preferred.
+ * Function remains to support old versions of nfs-utils.
+ *
+ * Input:
+ *			buf:	struct nfsctl_export
+ *				ex_client:	'\0'-terminated C string
+ *						containing domain name
+ *						of client no longer allowed
+ *						to access this export
+ *				ex_path:	'\0'-terminated C string
+ *						containing pathname of
+ *						directory in local file system
+ *				ex_dev:		ignored
+ *				ex_ino:		ignored
+ *				ex_flags:	ignored
+ *				ex_anon_uid:	ignored
+ *				ex_anon_gid:	ignored
+ *			size:	size in bytes of passed in nfsctl_export
+ * Output:
+ *	On success:	returns zero
+ *	On error:	return code is negative errno value
+ */
 static ssize_t write_unexport(struct file *file, char *buf, size_t size)
 {
 	struct nfsctl_export *data;
@@ -226,6 +339,30 @@
 	return exp_unexport(data);
 }
 
+/**
+ * write_getfs - Get a variable-length NFS file handle by path
+ *
+ * Deprecated.  /proc/fs/nfsd/filehandle is preferred.
+ * Function remains to support old versions of nfs-utils.
+ *
+ * Input:
+ *			buf:	struct nfsctl_fsparm
+ *				gd_addr:	socket address of client
+ *				gd_path:	'\0'-terminated C string
+ *						containing pathname of
+ *						directory in local file system
+ *				gd_maxlen:	maximum size of returned file
+ *						handle
+ *			size:	size in bytes of passed in nfsctl_fsparm
+ * Output:
+ *	On success:	passed-in buffer filled with a knfsd_fh structure
+ *			(a variable-length raw NFS file handle);
+ *			return code is the size in bytes of the file handle
+ *	On error:	return code is negative errno value
+ *
+ * Note: Only AF_INET client addresses are passed in, since gd_addr
+ * is the same size as a struct sockaddr_in.
+ */
 static ssize_t write_getfs(struct file *file, char *buf, size_t size)
 {
 	struct nfsctl_fsparm *data;
@@ -265,6 +402,29 @@
 	return err;
 }
 
+/**
+ * write_getfd - Get a fixed-length NFS file handle by path (used by mountd)
+ *
+ * Deprecated.  /proc/fs/nfsd/filehandle is preferred.
+ * Function remains to support old versions of nfs-utils.
+ *
+ * Input:
+ *			buf:	struct nfsctl_fdparm
+ *				gd_addr:	socket address of client
+ *				gd_path:	'\0'-terminated C string
+ *						containing pathname of
+ *						directory in local file system
+ *				gd_version:	fdparm structure version
+ *			size:	size in bytes of passed in nfsctl_fdparm
+ * Output:
+ *	On success:	passed-in buffer filled with nfsctl_res
+ *			(a fixed-length raw NFS file handle);
+ *			return code is the size in bytes of the file handle
+ *	On error:	return code is negative errno value
+ *
+ * Note: Only AF_INET client addresses are passed in, since gd_addr
+ * is the same size as a struct sockaddr_in.
+ */
 static ssize_t write_getfd(struct file *file, char *buf, size_t size)
 {
 	struct nfsctl_fdparm *data;
@@ -309,7 +469,23 @@
 	return err;
 }
 
-static ssize_t failover_unlock_ip(struct file *file, char *buf, size_t size)
+/**
+ * write_unlock_ip - Release all locks used by a client
+ *
+ * Experimental.
+ *
+ * Input:
+ *			buf:	'\n'-terminated C string containing a
+ *				presentation format IPv4 address
+ *			size:	length of C string in @buf
+ * Output:
+ *	On success:	returns zero if all specified locks were released;
+ *			returns one if one or more locks were not released
+ *	On error:	return code is negative errno value
+ *
+ * Note: Only AF_INET client addresses are passed in
+ */
+static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
 {
 	struct sockaddr_in sin = {
 		.sin_family	= AF_INET,
@@ -339,7 +515,21 @@
 	return nlmsvc_unlock_all_by_ip((struct sockaddr *)&sin);
 }
 
-static ssize_t failover_unlock_fs(struct file *file, char *buf, size_t size)
+/**
+ * write_unlock_fs - Release all locks on a local file system
+ *
+ * Experimental.
+ *
+ * Input:
+ *			buf:	'\n'-terminated C string containing the
+ *				absolute pathname of a local file system
+ *			size:	length of C string in @buf
+ * Output:
+ *	On success:	returns zero if all specified locks were released;
+ *			returns one if one or more locks were not released
+ *	On error:	return code is negative errno value
+ */
+static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size)
 {
 	struct path path;
 	char *fo_path;
@@ -360,21 +550,44 @@
 	if (error)
 		return error;
 
+	/*
+	 * XXX: Needs better sanity checking.  Otherwise we could end up
+	 * releasing locks on the wrong file system.
+	 *
+	 * For example:
+	 * 1.  Does the path refer to a directory?
+	 * 2.  Is that directory a mount point, or
+	 * 3.  Is that directory the root of an exported file system?
+	 */
 	error = nlmsvc_unlock_all_by_sb(path.mnt->mnt_sb);
 
 	path_put(&path);
 	return error;
 }
 
+/**
+ * write_filehandle - Get a variable-length NFS file handle by path
+ *
+ * On input, the buffer contains a '\n'-terminated C string comprised of
+ * three alphanumeric words separated by whitespace.  The string may
+ * contain escape sequences.
+ *
+ * Input:
+ *			buf:
+ *				domain:		client domain name
+ *				path:		export pathname
+ *				maxsize:	numeric maximum size of
+ *						@buf
+ *			size:	length of C string in @buf
+ * Output:
+ *	On success:	passed-in buffer filled with '\n'-terminated C
+ *			string containing a ASCII hex text version
+ *			of the NFS file handle;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is negative errno value
+ */
 static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
 {
-	/* request is:
-	 *   domain path maxsize
-	 * response is
-	 *   filehandle
-	 *
-	 * qword quoting is used, so filehandle will be \x....
-	 */
 	char *dname, *path;
 	int uninitialized_var(maxsize);
 	char *mesg = buf;
@@ -391,11 +604,13 @@
 
 	dname = mesg;
 	len = qword_get(&mesg, dname, size);
-	if (len <= 0) return -EINVAL;
+	if (len <= 0)
+		return -EINVAL;
 	
 	path = dname+len+1;
 	len = qword_get(&mesg, path, size);
-	if (len <= 0) return -EINVAL;
+	if (len <= 0)
+		return -EINVAL;
 
 	len = get_int(&mesg, &maxsize);
 	if (len)
@@ -419,17 +634,43 @@
 	if (len)
 		return len;
 	
-	mesg = buf; len = SIMPLE_TRANSACTION_LIMIT;
+	mesg = buf;
+	len = SIMPLE_TRANSACTION_LIMIT;
 	qword_addhex(&mesg, &len, (char*)&fh.fh_base, fh.fh_size);
 	mesg[-1] = '\n';
 	return mesg - buf;	
 }
 
+/**
+ * write_threads - Start NFSD, or report the current number of running threads
+ *
+ * Input:
+ *			buf:		ignored
+ *			size:		zero
+ * Output:
+ *	On success:	passed-in buffer filled with '\n'-terminated C
+ *			string numeric value representing the number of
+ *			running NFSD threads;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is zero
+ *
+ * OR
+ *
+ * Input:
+ *			buf:		C string containing an unsigned
+ *					integer value representing the
+ *					number of NFSD threads to start
+ *			size:		non-zero length of C string in @buf
+ * Output:
+ *	On success:	NFS service is started;
+ *			passed-in buffer filled with '\n'-terminated C
+ *			string numeric value representing the number of
+ *			running NFSD threads;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is zero or a negative errno value
+ */
 static ssize_t write_threads(struct file *file, char *buf, size_t size)
 {
-	/* if size > 0, look for a number of threads and call nfsd_svc
-	 * then write out number of threads as reply
-	 */
 	char *mesg = buf;
 	int rv;
 	if (size > 0) {
@@ -437,9 +678,9 @@
 		rv = get_int(&mesg, &newthreads);
 		if (rv)
 			return rv;
-		if (newthreads <0)
+		if (newthreads < 0)
 			return -EINVAL;
-		rv = nfsd_svc(2049, newthreads);
+		rv = nfsd_svc(NFS_PORT, newthreads);
 		if (rv)
 			return rv;
 	}
@@ -447,6 +688,28 @@
 	return strlen(buf);
 }
 
+/**
+ * write_pool_threads - Set or report the current number of threads per pool
+ *
+ * Input:
+ *			buf:		ignored
+ *			size:		zero
+ *
+ * OR
+ *
+ * Input:
+ * 			buf:		C string containing whitespace-
+ * 					separated unsigned integer values
+ *					representing the number of NFSD
+ *					threads to start in each pool
+ *			size:		non-zero length of C string in @buf
+ * Output:
+ *	On success:	passed-in buffer filled with '\n'-terminated C
+ *			string containing integer values representing the
+ *			number of NFSD threads in each pool;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is zero or a negative errno value
+ */
 static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
 {
 	/* if size > 0, look for an array of number of threads per node
@@ -517,10 +780,6 @@
 
 static ssize_t __write_versions(struct file *file, char *buf, size_t size)
 {
-	/*
-	 * Format:
-	 *   [-/+]vers [-/+]vers ...
-	 */
 	char *mesg = buf;
 	char *vers, sign;
 	int len, num;
@@ -578,6 +837,38 @@
 	return len;
 }
 
+/**
+ * write_versions - Set or report the available NFS protocol versions
+ *
+ * Input:
+ *			buf:		ignored
+ *			size:		zero
+ * Output:
+ *	On success:	passed-in buffer filled with '\n'-terminated C
+ *			string containing positive or negative integer
+ *			values representing the current status of each
+ *			protocol version;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is zero or a negative errno value
+ *
+ * OR
+ *
+ * Input:
+ * 			buf:		C string containing whitespace-
+ * 					separated positive or negative
+ * 					integer values representing NFS
+ * 					protocol versions to enable ("+n")
+ * 					or disable ("-n")
+ *			size:		non-zero length of C string in @buf
+ * Output:
+ *	On success:	status of zero or more protocol versions has
+ *			been updated; passed-in buffer filled with
+ *			'\n'-terminated C string containing positive
+ *			or negative integer values representing the
+ *			current status of each protocol version;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is zero or a negative errno value
+ */
 static ssize_t write_versions(struct file *file, char *buf, size_t size)
 {
 	ssize_t rv;
@@ -687,6 +978,75 @@
 	return -EINVAL;
 }
 
+/**
+ * write_ports - Pass a socket file descriptor or transport name to listen on
+ *
+ * Input:
+ *			buf:		ignored
+ *			size:		zero
+ * Output:
+ *	On success:	passed-in buffer filled with a '\n'-terminated C
+ *			string containing a whitespace-separated list of
+ *			named NFSD listeners;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is zero or a negative errno value
+ *
+ * OR
+ *
+ * Input:
+ *			buf:		C string containing an unsigned
+ *					integer value representing a bound
+ *					but unconnected socket that is to be
+ *					used as an NFSD listener
+ *			size:		non-zero length of C string in @buf
+ * Output:
+ *	On success:	NFS service is started;
+ *			passed-in buffer filled with a '\n'-terminated C
+ *			string containing a unique alphanumeric name of
+ *			the listener;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is a negative errno value
+ *
+ * OR
+ *
+ * Input:
+ *			buf:		C string containing a "-" followed
+ *					by an integer value representing a
+ *					previously passed in socket file
+ *					descriptor
+ *			size:		non-zero length of C string in @buf
+ * Output:
+ *	On success:	NFS service no longer listens on that socket;
+ *			passed-in buffer filled with a '\n'-terminated C
+ *			string containing a unique name of the listener;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is a negative errno value
+ *
+ * OR
+ *
+ * Input:
+ *			buf:		C string containing a transport
+ *					name and an unsigned integer value
+ *					representing the port to listen on,
+ *					separated by whitespace
+ *			size:		non-zero length of C string in @buf
+ * Output:
+ *	On success:	returns zero; NFS service is started
+ *	On error:	return code is a negative errno value
+ *
+ * OR
+ *
+ * Input:
+ *			buf:		C string containing a "-" followed
+ *					by a transport name and an unsigned
+ *					integer value representing the port
+ *					to listen on, separated by whitespace
+ *			size:		non-zero length of C string in @buf
+ * Output:
+ *	On success:	returns zero; NFS service no longer listens
+ *			on that transport
+ *	On error:	return code is a negative errno value
+ */
 static ssize_t write_ports(struct file *file, char *buf, size_t size)
 {
 	ssize_t rv;
@@ -700,6 +1060,27 @@
 
 int nfsd_max_blksize;
 
+/**
+ * write_maxblksize - Set or report the current NFS blksize
+ *
+ * Input:
+ *			buf:		ignored
+ *			size:		zero
+ *
+ * OR
+ *
+ * Input:
+ * 			buf:		C string containing an unsigned
+ * 					integer value representing the new
+ * 					NFS blksize
+ *			size:		non-zero length of C string in @buf
+ * Output:
+ *	On success:	passed-in buffer filled with '\n'-terminated C string
+ *			containing numeric value of the current NFS blksize
+ *			setting;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is zero or a negative errno value
+ */
 static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
 {
 	char *mesg = buf;
@@ -752,6 +1133,27 @@
 	return strlen(buf);
 }
 
+/**
+ * write_leasetime - Set or report the current NFSv4 lease time
+ *
+ * Input:
+ *			buf:		ignored
+ *			size:		zero
+ *
+ * OR
+ *
+ * Input:
+ *			buf:		C string containing an unsigned
+ *					integer value representing the new
+ *					NFSv4 lease expiry time
+ *			size:		non-zero length of C string in @buf
+ * Output:
+ *	On success:	passed-in buffer filled with '\n'-terminated C
+ *			string containing unsigned integer value of the
+ *			current lease expiry time;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is zero or a negative errno value
+ */
 static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
 {
 	ssize_t rv;
@@ -788,6 +1190,27 @@
 	return strlen(buf);
 }
 
+/**
+ * write_recoverydir - Set or report the pathname of the recovery directory
+ *
+ * Input:
+ *			buf:		ignored
+ *			size:		zero
+ *
+ * OR
+ *
+ * Input:
+ *			buf:		C string containing the pathname
+ *					of the directory on a local file
+ *					system containing permanent NFSv4
+ *					recovery data
+ *			size:		non-zero length of C string in @buf
+ * Output:
+ *	On success:	passed-in buffer filled with '\n'-terminated C string
+ *			containing the current recovery pathname setting;
+ *			return code is the size in bytes of the string
+ *	On error:	return code is zero or a negative errno value
+ */
 static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
 {
 	ssize_t rv;
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index f0da7d9..9f1ca17 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -258,14 +258,32 @@
 	return error;
 }
 
-/*
- * Perform sanity checks on the dentry in a client's file handle.
+/**
+ * fh_verify - filehandle lookup and access checking
+ * @rqstp: pointer to current rpc request
+ * @fhp: filehandle to be verified
+ * @type: expected type of object pointed to by filehandle
+ * @access: type of access needed to object
  *
- * Note that the file handle dentry may need to be freed even after
- * an error return.
+ * Look up a dentry from the on-the-wire filehandle, check the client's
+ * access to the export, and set the current task's credentials.
  *
- * This is only called at the start of an nfsproc call, so fhp points to
- * a svc_fh which is all 0 except for the over-the-wire file handle.
+ * Regardless of success or failure of fh_verify(), fh_put() should be
+ * called on @fhp when the caller is finished with the filehandle.
+ *
+ * fh_verify() may be called multiple times on a given filehandle, for
+ * example, when processing an NFSv4 compound.  The first call will look
+ * up a dentry using the on-the-wire filehandle.  Subsequent calls will
+ * skip the lookup and just perform the other checks and possibly change
+ * the current task's credentials.
+ *
+ * @type specifies the type of object expected using one of the S_IF*
+ * constants defined in include/linux/stat.h.  The caller may use zero
+ * to indicate that it doesn't care, or a negative integer to indicate
+ * that it expects something not of the given type.
+ *
+ * @access is formed from the NFSD_MAY_* constants defined in
+ * include/linux/nfsd/nfsd.h.
  */
 __be32
 fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
@@ -466,6 +484,8 @@
 				goto retry;
 			break;
 		}
+	} else if (exp->ex_flags & NFSEXP_FSID) {
+		fsid_type = FSID_NUM;
 	} else if (exp->ex_uuid) {
 		if (fhp->fh_maxsize >= 64) {
 			if (root_export)
@@ -478,9 +498,7 @@
 			else
 				fsid_type = FSID_UUID4_INUM;
 		}
-	} else if (exp->ex_flags & NFSEXP_FSID)
-		fsid_type = FSID_NUM;
-	else if (!old_valid_dev(ex_dev))
+	} else if (!old_valid_dev(ex_dev))
 		/* for newer device numbers, we must use a newer fsid format */
 		fsid_type = FSID_ENCODE_DEV;
 	else
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 5cffeca..6f7f263 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -622,6 +622,7 @@
 		{ nfserr_badname, -ESRCH },
 		{ nfserr_io, -ETXTBSY },
 		{ nfserr_notsupp, -EOPNOTSUPP },
+		{ nfserr_toosmall, -ETOOSMALL },
 	};
 	int	i;
 
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 44aa92a..6e50aaa 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -744,16 +744,44 @@
 	fput(filp);
 }
 
+/*
+ * Sync a file
+ * As this calls fsync (not fdatasync) there is no need for a write_inode
+ * after it.
+ */
+static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
+			      const struct file_operations *fop)
+{
+	struct inode *inode = dp->d_inode;
+	int (*fsync) (struct file *, struct dentry *, int);
+	int err;
+
+	err = filemap_fdatawrite(inode->i_mapping);
+	if (err == 0 && fop && (fsync = fop->fsync))
+		err = fsync(filp, dp, 0);
+	if (err == 0)
+		err = filemap_fdatawait(inode->i_mapping);
+
+	return err;
+}
+
 static int
 nfsd_sync(struct file *filp)
 {
-	return vfs_fsync(filp, filp->f_path.dentry, 0);
+        int err;
+	struct inode *inode = filp->f_path.dentry->d_inode;
+	dprintk("nfsd: sync file %s\n", filp->f_path.dentry->d_name.name);
+	mutex_lock(&inode->i_mutex);
+	err=nfsd_dosync(filp, filp->f_path.dentry, filp->f_op);
+	mutex_unlock(&inode->i_mutex);
+
+	return err;
 }
 
 int
-nfsd_sync_dir(struct dentry *dentry)
+nfsd_sync_dir(struct dentry *dp)
 {
-	return vfs_fsync(NULL, dentry, 0);
+	return nfsd_dosync(NULL, dp, dp->d_inode->i_fop);
 }
 
 /*
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 81b8644..d53a183 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -576,7 +576,7 @@
 	.destroy_watch	= free_inotify_user_watch,
 };
 
-asmlinkage long sys_inotify_init1(int flags)
+SYSCALL_DEFINE1(inotify_init1, int, flags)
 {
 	struct inotify_device *dev;
 	struct inotify_handle *ih;
@@ -655,12 +655,13 @@
 	return ret;
 }
 
-asmlinkage long sys_inotify_init(void)
+SYSCALL_DEFINE0(inotify_init)
 {
 	return sys_inotify_init1(0);
 }
 
-asmlinkage long sys_inotify_add_watch(int fd, const char __user *pathname, u32 mask)
+SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
+		u32, mask)
 {
 	struct inode *inode;
 	struct inotify_device *dev;
@@ -704,7 +705,7 @@
 	return ret;
 }
 
-asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd)
+SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
 {
 	struct file *filp;
 	struct inotify_device *dev;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 54ff4c77..d861096c 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -3868,7 +3868,7 @@
 	struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el;
 	struct ocfs2_extent_rec *rec, *tmprec;
 
-	right_el = path_leaf_el(right_path);;
+	right_el = path_leaf_el(right_path);
 	if (left_path)
 		left_el = path_leaf_el(left_path);
 
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 6ebaa58..04697ba 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -854,7 +854,7 @@
 
 	while (!kthread_should_stop() && !reg->hr_unclean_stop) {
 		/* We track the time spent inside
-		 * o2hb_do_disk_heartbeat so that we avoid more then
+		 * o2hb_do_disk_heartbeat so that we avoid more than
 		 * hr_timeout_ms between disk writes. On busy systems
 		 * this should result in a heartbeat which is less
 		 * likely to time itself out. */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index f731ab4..b0c4cad 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1324,7 +1324,7 @@
 			goto out;
 		}
 
-		mlog(0, "lock %s, successfull return from ocfs2_dlm_lock\n",
+		mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
 		     lockres->l_name);
 
 		/* At this point we've gone inside the dlm and need to
@@ -2951,7 +2951,7 @@
 		ocfs2_dlm_dump_lksb(&lockres->l_lksb);
 		BUG();
 	}
-	mlog(0, "lock %s, successfull return from ocfs2_dlm_unlock\n",
+	mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
 	     lockres->l_name);
 
 	ocfs2_wait_on_busy_lock(lockres);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index e8f795f..a5887df 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1605,7 +1605,7 @@
 			    struct ocfs2_space_resv *sr)
 {
 	struct inode *inode = file->f_path.dentry->d_inode;
-	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);;
+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
 	if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
 	    !ocfs2_writes_unwritten_extents(osb))
diff --git a/fs/open.c b/fs/open.c
index d882fd2..a3a78ce 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -122,7 +122,7 @@
 	return 0;
 }
 
-asmlinkage long sys_statfs(const char __user *pathname, struct statfs __user * buf)
+SYSCALL_DEFINE2(statfs, const char __user *, pathname, struct statfs __user *, buf)
 {
 	struct path path;
 	int error;
@@ -138,8 +138,7 @@
 	return error;
 }
 
-
-asmlinkage long sys_statfs64(const char __user *pathname, size_t sz, struct statfs64 __user *buf)
+SYSCALL_DEFINE3(statfs64, const char __user *, pathname, size_t, sz, struct statfs64 __user *, buf)
 {
 	struct path path;
 	long error;
@@ -157,8 +156,7 @@
 	return error;
 }
 
-
-asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user * buf)
+SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct statfs __user *, buf)
 {
 	struct file * file;
 	struct statfs tmp;
@@ -176,7 +174,7 @@
 	return error;
 }
 
-asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz, struct statfs64 __user *buf)
+SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, size_t, sz, struct statfs64 __user *, buf)
 {
 	struct file * file;
 	struct statfs64 tmp;
@@ -289,7 +287,7 @@
 	return error;
 }
 
-asmlinkage long sys_truncate(const char __user * path, unsigned long length)
+SYSCALL_DEFINE2(truncate, const char __user *, path, unsigned long, length)
 {
 	/* on 32-bit boxen it will cut the range 2^31--2^32-1 off */
 	return do_sys_truncate(path, (long)length);
@@ -341,7 +339,7 @@
 	return error;
 }
 
-asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length)
+SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
 {
 	long ret = do_sys_ftruncate(fd, length, 1);
 	/* avoid REGPARM breakage on x86: */
@@ -351,21 +349,35 @@
 
 /* LFS versions of truncate are only needed on 32 bit machines */
 #if BITS_PER_LONG == 32
-asmlinkage long sys_truncate64(const char __user * path, loff_t length)
+SYSCALL_DEFINE(truncate64)(const char __user * path, loff_t length)
 {
 	return do_sys_truncate(path, length);
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_truncate64(long path, loff_t length)
+{
+	return SYSC_truncate64((const char __user *) path, length);
+}
+SYSCALL_ALIAS(sys_truncate64, SyS_truncate64);
+#endif
 
-asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length)
+SYSCALL_DEFINE(ftruncate64)(unsigned int fd, loff_t length)
 {
 	long ret = do_sys_ftruncate(fd, length, 0);
 	/* avoid REGPARM breakage on x86: */
 	asmlinkage_protect(2, ret, fd, length);
 	return ret;
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_ftruncate64(long fd, loff_t length)
+{
+	return SYSC_ftruncate64((unsigned int) fd, length);
+}
+SYSCALL_ALIAS(sys_ftruncate64, SyS_ftruncate64);
 #endif
+#endif /* BITS_PER_LONG == 32 */
 
-asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len)
+SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
 {
 	struct file *file;
 	struct inode *inode;
@@ -422,13 +434,20 @@
 out:
 	return ret;
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_fallocate(long fd, long mode, loff_t offset, loff_t len)
+{
+	return SYSC_fallocate((int)fd, (int)mode, offset, len);
+}
+SYSCALL_ALIAS(sys_fallocate, SyS_fallocate);
+#endif
 
 /*
  * access() needs to use the real uid/gid, not the effective uid/gid.
  * We do this by temporarily clearing all FS-related capabilities and
  * switching the fsuid/fsgid around to the real ones.
  */
-asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode)
+SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
 {
 	const struct cred *old_cred;
 	struct cred *override_cred;
@@ -498,12 +517,12 @@
 	return res;
 }
 
-asmlinkage long sys_access(const char __user *filename, int mode)
+SYSCALL_DEFINE2(access, const char __user *, filename, int, mode)
 {
 	return sys_faccessat(AT_FDCWD, filename, mode);
 }
 
-asmlinkage long sys_chdir(const char __user * filename)
+SYSCALL_DEFINE1(chdir, const char __user *, filename)
 {
 	struct path path;
 	int error;
@@ -524,7 +543,7 @@
 	return error;
 }
 
-asmlinkage long sys_fchdir(unsigned int fd)
+SYSCALL_DEFINE1(fchdir, unsigned int, fd)
 {
 	struct file *file;
 	struct inode *inode;
@@ -550,7 +569,7 @@
 	return error;
 }
 
-asmlinkage long sys_chroot(const char __user * filename)
+SYSCALL_DEFINE1(chroot, const char __user *, filename)
 {
 	struct path path;
 	int error;
@@ -575,7 +594,7 @@
 	return error;
 }
 
-asmlinkage long sys_fchmod(unsigned int fd, mode_t mode)
+SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
 {
 	struct inode * inode;
 	struct dentry * dentry;
@@ -609,8 +628,7 @@
 	return err;
 }
 
-asmlinkage long sys_fchmodat(int dfd, const char __user *filename,
-			     mode_t mode)
+SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
 {
 	struct path path;
 	struct inode *inode;
@@ -639,7 +657,7 @@
 	return error;
 }
 
-asmlinkage long sys_chmod(const char __user *filename, mode_t mode)
+SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
 {
 	return sys_fchmodat(AT_FDCWD, filename, mode);
 }
@@ -669,7 +687,7 @@
 	return error;
 }
 
-asmlinkage long sys_chown(const char __user * filename, uid_t user, gid_t group)
+SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
 {
 	struct path path;
 	int error;
@@ -688,8 +706,8 @@
 	return error;
 }
 
-asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
-			     gid_t group, int flag)
+SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
+		gid_t, group, int, flag)
 {
 	struct path path;
 	int error = -EINVAL;
@@ -713,7 +731,7 @@
 	return error;
 }
 
-asmlinkage long sys_lchown(const char __user * filename, uid_t user, gid_t group)
+SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group)
 {
 	struct path path;
 	int error;
@@ -732,8 +750,7 @@
 	return error;
 }
 
-
-asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group)
+SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
 {
 	struct file * file;
 	int error = -EBADF;
@@ -1029,7 +1046,7 @@
 	return fd;
 }
 
-asmlinkage long sys_open(const char __user *filename, int flags, int mode)
+SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, int, mode)
 {
 	long ret;
 
@@ -1042,8 +1059,8 @@
 	return ret;
 }
 
-asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
-			   int mode)
+SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
+		int, mode)
 {
 	long ret;
 
@@ -1062,7 +1079,7 @@
  * For backward compatibility?  Maybe this should be moved
  * into arch/i386 instead?
  */
-asmlinkage long sys_creat(const char __user * pathname, int mode)
+SYSCALL_DEFINE2(creat, const char __user *, pathname, int, mode)
 {
 	return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode);
 }
@@ -1098,7 +1115,7 @@
  * releasing the fd. This ensures that one clone task can't release
  * an fd while another clone is opening it.
  */
-asmlinkage long sys_close(unsigned int fd)
+SYSCALL_DEFINE1(close, unsigned int, fd)
 {
 	struct file * filp;
 	struct files_struct *files = current->files;
@@ -1131,14 +1148,13 @@
 	spin_unlock(&files->file_lock);
 	return -EBADF;
 }
-
 EXPORT_SYMBOL(sys_close);
 
 /*
  * This routine simulates a hangup on the tty, to arrange that users
  * are given clean terminals at login time.
  */
-asmlinkage long sys_vhangup(void)
+SYSCALL_DEFINE0(vhangup)
 {
 	if (capable(CAP_SYS_TTY_CONFIG)) {
 		tty_vhangup_self();
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 5198ada..6d72024 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -334,6 +334,7 @@
 
 	blk_free_devt(part_devt(part));
 	rcu_assign_pointer(ptbl->part[partno], NULL);
+	rcu_assign_pointer(ptbl->last_lookup, NULL);
 	kobject_put(part->holder_dir);
 	device_del(part_to_dev(part));
 
diff --git a/fs/pipe.c b/fs/pipe.c
index 8916971..3a48ba5 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1043,7 +1043,7 @@
  * sys_pipe() is the normal C calling standard for creating
  * a pipe. It's not the way Unix traditionally does this, though.
  */
-asmlinkage long __weak sys_pipe2(int __user *fildes, int flags)
+SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
 {
 	int fd[2];
 	int error;
@@ -1059,7 +1059,7 @@
 	return error;
 }
 
-asmlinkage long __weak sys_pipe(int __user *fildes)
+SYSCALL_DEFINE1(pipe, int __user *, fildes)
 {
 	return sys_pipe2(fildes, 0);
 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 10fd522..0c9de19 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -65,6 +65,7 @@
 #include <linux/mm.h>
 #include <linux/rcupdate.h>
 #include <linux/kallsyms.h>
+#include <linux/stacktrace.h>
 #include <linux/resource.h>
 #include <linux/module.h>
 #include <linux/mount.h>
@@ -109,25 +110,22 @@
 	.op   = OP,					\
 }
 
-#define DIR(NAME, MODE, OTYPE)							\
-	NOD(NAME, (S_IFDIR|(MODE)),						\
-		&proc_##OTYPE##_inode_operations, &proc_##OTYPE##_operations,	\
-		{} )
-#define LNK(NAME, OTYPE)					\
+#define DIR(NAME, MODE, iops, fops)	\
+	NOD(NAME, (S_IFDIR|(MODE)), &iops, &fops, {} )
+#define LNK(NAME, get_link)					\
 	NOD(NAME, (S_IFLNK|S_IRWXUGO),				\
 		&proc_pid_link_inode_operations, NULL,		\
-		{ .proc_get_link = &proc_##OTYPE##_link } )
-#define REG(NAME, MODE, OTYPE)				\
-	NOD(NAME, (S_IFREG|(MODE)), NULL,		\
-		&proc_##OTYPE##_operations, {})
-#define INF(NAME, MODE, OTYPE)				\
+		{ .proc_get_link = get_link } )
+#define REG(NAME, MODE, fops)				\
+	NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {})
+#define INF(NAME, MODE, read)				\
 	NOD(NAME, (S_IFREG|(MODE)), 			\
 		NULL, &proc_info_file_operations,	\
-		{ .proc_read = &proc_##OTYPE } )
-#define ONE(NAME, MODE, OTYPE)				\
+		{ .proc_read = read } )
+#define ONE(NAME, MODE, show)				\
 	NOD(NAME, (S_IFREG|(MODE)), 			\
 		NULL, &proc_single_file_operations,	\
-		{ .proc_show = &proc_##OTYPE } )
+		{ .proc_show = show } )
 
 /*
  * Count the number of hardlinks for the pid_entry table, excluding the .
@@ -308,9 +306,9 @@
 	struct mm_struct *mm = get_task_mm(task);
 	if (mm) {
 		unsigned int nwords = 0;
-		do
+		do {
 			nwords += 2;
-		while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
+		} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
 		res = nwords * sizeof(mm->saved_auxv[0]);
 		if (res > PAGE_SIZE)
 			res = PAGE_SIZE;
@@ -340,6 +338,37 @@
 }
 #endif /* CONFIG_KALLSYMS */
 
+#ifdef CONFIG_STACKTRACE
+
+#define MAX_STACK_TRACE_DEPTH	64
+
+static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
+			  struct pid *pid, struct task_struct *task)
+{
+	struct stack_trace trace;
+	unsigned long *entries;
+	int i;
+
+	entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
+	if (!entries)
+		return -ENOMEM;
+
+	trace.nr_entries	= 0;
+	trace.max_entries	= MAX_STACK_TRACE_DEPTH;
+	trace.entries		= entries;
+	trace.skip		= 0;
+	save_stack_trace_tsk(task, &trace);
+
+	for (i = 0; i < trace.nr_entries; i++) {
+		seq_printf(m, "[<%p>] %pS\n",
+			   (void *)entries[i], (void *)entries[i]);
+	}
+	kfree(entries);
+
+	return 0;
+}
+#endif
+
 #ifdef CONFIG_SCHEDSTATS
 /*
  * Provides /proc/PID/schedstat
@@ -1186,8 +1215,6 @@
 	struct inode *inode = m->private;
 	struct task_struct *p;
 
-	WARN_ON(!inode);
-
 	p = get_proc_task(inode);
 	if (!p)
 		return -ESRCH;
@@ -1205,8 +1232,6 @@
 	struct inode *inode = file->f_path.dentry->d_inode;
 	struct task_struct *p;
 
-	WARN_ON(!inode);
-
 	p = get_proc_task(inode);
 	if (!p)
 		return -ESRCH;
@@ -1974,13 +1999,11 @@
 					 const struct pid_entry *ents,
 					 unsigned int nents)
 {
-	struct inode *inode;
 	struct dentry *error;
 	struct task_struct *task = get_proc_task(dir);
 	const struct pid_entry *p, *last;
 
 	error = ERR_PTR(-ENOENT);
-	inode = NULL;
 
 	if (!task)
 		goto out_no_task;
@@ -2136,12 +2159,12 @@
 };
 
 static const struct pid_entry attr_dir_stuff[] = {
-	REG("current",    S_IRUGO|S_IWUGO, pid_attr),
-	REG("prev",       S_IRUGO,	   pid_attr),
-	REG("exec",       S_IRUGO|S_IWUGO, pid_attr),
-	REG("fscreate",   S_IRUGO|S_IWUGO, pid_attr),
-	REG("keycreate",  S_IRUGO|S_IWUGO, pid_attr),
-	REG("sockcreate", S_IRUGO|S_IWUGO, pid_attr),
+	REG("current",    S_IRUGO|S_IWUGO, proc_pid_attr_operations),
+	REG("prev",       S_IRUGO,	   proc_pid_attr_operations),
+	REG("exec",       S_IRUGO|S_IWUGO, proc_pid_attr_operations),
+	REG("fscreate",   S_IRUGO|S_IWUGO, proc_pid_attr_operations),
+	REG("keycreate",  S_IRUGO|S_IWUGO, proc_pid_attr_operations),
+	REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
 };
 
 static int proc_attr_dir_readdir(struct file * filp,
@@ -2461,74 +2484,77 @@
 static const struct inode_operations proc_task_inode_operations;
 
 static const struct pid_entry tgid_base_stuff[] = {
-	DIR("task",       S_IRUGO|S_IXUGO, task),
-	DIR("fd",         S_IRUSR|S_IXUSR, fd),
-	DIR("fdinfo",     S_IRUSR|S_IXUSR, fdinfo),
+	DIR("task",       S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
+	DIR("fd",         S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
+	DIR("fdinfo",     S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
 #ifdef CONFIG_NET
-	DIR("net",        S_IRUGO|S_IXUGO, net),
+	DIR("net",        S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
 #endif
-	REG("environ",    S_IRUSR, environ),
-	INF("auxv",       S_IRUSR, pid_auxv),
-	ONE("status",     S_IRUGO, pid_status),
-	ONE("personality", S_IRUSR, pid_personality),
-	INF("limits",	  S_IRUSR, pid_limits),
+	REG("environ",    S_IRUSR, proc_environ_operations),
+	INF("auxv",       S_IRUSR, proc_pid_auxv),
+	ONE("status",     S_IRUGO, proc_pid_status),
+	ONE("personality", S_IRUSR, proc_pid_personality),
+	INF("limits",	  S_IRUSR, proc_pid_limits),
 #ifdef CONFIG_SCHED_DEBUG
-	REG("sched",      S_IRUGO|S_IWUSR, pid_sched),
+	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
-	INF("syscall",    S_IRUSR, pid_syscall),
+	INF("syscall",    S_IRUSR, proc_pid_syscall),
 #endif
-	INF("cmdline",    S_IRUGO, pid_cmdline),
-	ONE("stat",       S_IRUGO, tgid_stat),
-	ONE("statm",      S_IRUGO, pid_statm),
-	REG("maps",       S_IRUGO, maps),
+	INF("cmdline",    S_IRUGO, proc_pid_cmdline),
+	ONE("stat",       S_IRUGO, proc_tgid_stat),
+	ONE("statm",      S_IRUGO, proc_pid_statm),
+	REG("maps",       S_IRUGO, proc_maps_operations),
 #ifdef CONFIG_NUMA
-	REG("numa_maps",  S_IRUGO, numa_maps),
+	REG("numa_maps",  S_IRUGO, proc_numa_maps_operations),
 #endif
-	REG("mem",        S_IRUSR|S_IWUSR, mem),
-	LNK("cwd",        cwd),
-	LNK("root",       root),
-	LNK("exe",        exe),
-	REG("mounts",     S_IRUGO, mounts),
-	REG("mountinfo",  S_IRUGO, mountinfo),
-	REG("mountstats", S_IRUSR, mountstats),
+	REG("mem",        S_IRUSR|S_IWUSR, proc_mem_operations),
+	LNK("cwd",        proc_cwd_link),
+	LNK("root",       proc_root_link),
+	LNK("exe",        proc_exe_link),
+	REG("mounts",     S_IRUGO, proc_mounts_operations),
+	REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
+	REG("mountstats", S_IRUSR, proc_mountstats_operations),
 #ifdef CONFIG_PROC_PAGE_MONITOR
-	REG("clear_refs", S_IWUSR, clear_refs),
-	REG("smaps",      S_IRUGO, smaps),
-	REG("pagemap",    S_IRUSR, pagemap),
+	REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
+	REG("smaps",      S_IRUGO, proc_smaps_operations),
+	REG("pagemap",    S_IRUSR, proc_pagemap_operations),
 #endif
 #ifdef CONFIG_SECURITY
-	DIR("attr",       S_IRUGO|S_IXUGO, attr_dir),
+	DIR("attr",       S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
 #endif
 #ifdef CONFIG_KALLSYMS
-	INF("wchan",      S_IRUGO, pid_wchan),
+	INF("wchan",      S_IRUGO, proc_pid_wchan),
+#endif
+#ifdef CONFIG_STACKTRACE
+	ONE("stack",      S_IRUSR, proc_pid_stack),
 #endif
 #ifdef CONFIG_SCHEDSTATS
-	INF("schedstat",  S_IRUGO, pid_schedstat),
+	INF("schedstat",  S_IRUGO, proc_pid_schedstat),
 #endif
 #ifdef CONFIG_LATENCYTOP
-	REG("latency",  S_IRUGO, lstats),
+	REG("latency",  S_IRUGO, proc_lstats_operations),
 #endif
 #ifdef CONFIG_PROC_PID_CPUSET
-	REG("cpuset",     S_IRUGO, cpuset),
+	REG("cpuset",     S_IRUGO, proc_cpuset_operations),
 #endif
 #ifdef CONFIG_CGROUPS
-	REG("cgroup",  S_IRUGO, cgroup),
+	REG("cgroup",  S_IRUGO, proc_cgroup_operations),
 #endif
-	INF("oom_score",  S_IRUGO, oom_score),
-	REG("oom_adj",    S_IRUGO|S_IWUSR, oom_adjust),
+	INF("oom_score",  S_IRUGO, proc_oom_score),
+	REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
 #ifdef CONFIG_AUDITSYSCALL
-	REG("loginuid",   S_IWUSR|S_IRUGO, loginuid),
-	REG("sessionid",  S_IRUGO, sessionid),
+	REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
+	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
 #endif
 #ifdef CONFIG_FAULT_INJECTION
-	REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
+	REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
 #endif
 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
-	REG("coredump_filter", S_IRUGO|S_IWUSR, coredump_filter),
+	REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
 #endif
 #ifdef CONFIG_TASK_IO_ACCOUNTING
-	INF("io",	S_IRUGO, tgid_io_accounting),
+	INF("io",	S_IRUGO, proc_tgid_io_accounting),
 #endif
 };
 
@@ -2801,66 +2827,69 @@
  * Tasks
  */
 static const struct pid_entry tid_base_stuff[] = {
-	DIR("fd",        S_IRUSR|S_IXUSR, fd),
-	DIR("fdinfo",    S_IRUSR|S_IXUSR, fdinfo),
-	REG("environ",   S_IRUSR, environ),
-	INF("auxv",      S_IRUSR, pid_auxv),
-	ONE("status",    S_IRUGO, pid_status),
-	ONE("personality", S_IRUSR, pid_personality),
-	INF("limits",	 S_IRUSR, pid_limits),
+	DIR("fd",        S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
+	DIR("fdinfo",    S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations),
+	REG("environ",   S_IRUSR, proc_environ_operations),
+	INF("auxv",      S_IRUSR, proc_pid_auxv),
+	ONE("status",    S_IRUGO, proc_pid_status),
+	ONE("personality", S_IRUSR, proc_pid_personality),
+	INF("limits",	 S_IRUSR, proc_pid_limits),
 #ifdef CONFIG_SCHED_DEBUG
-	REG("sched",     S_IRUGO|S_IWUSR, pid_sched),
+	REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
-	INF("syscall",   S_IRUSR, pid_syscall),
+	INF("syscall",   S_IRUSR, proc_pid_syscall),
 #endif
-	INF("cmdline",   S_IRUGO, pid_cmdline),
-	ONE("stat",      S_IRUGO, tid_stat),
-	ONE("statm",     S_IRUGO, pid_statm),
-	REG("maps",      S_IRUGO, maps),
+	INF("cmdline",   S_IRUGO, proc_pid_cmdline),
+	ONE("stat",      S_IRUGO, proc_tid_stat),
+	ONE("statm",     S_IRUGO, proc_pid_statm),
+	REG("maps",      S_IRUGO, proc_maps_operations),
 #ifdef CONFIG_NUMA
-	REG("numa_maps", S_IRUGO, numa_maps),
+	REG("numa_maps", S_IRUGO, proc_numa_maps_operations),
 #endif
-	REG("mem",       S_IRUSR|S_IWUSR, mem),
-	LNK("cwd",       cwd),
-	LNK("root",      root),
-	LNK("exe",       exe),
-	REG("mounts",    S_IRUGO, mounts),
-	REG("mountinfo",  S_IRUGO, mountinfo),
+	REG("mem",       S_IRUSR|S_IWUSR, proc_mem_operations),
+	LNK("cwd",       proc_cwd_link),
+	LNK("root",      proc_root_link),
+	LNK("exe",       proc_exe_link),
+	REG("mounts",    S_IRUGO, proc_mounts_operations),
+	REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
 #ifdef CONFIG_PROC_PAGE_MONITOR
-	REG("clear_refs", S_IWUSR, clear_refs),
-	REG("smaps",     S_IRUGO, smaps),
-	REG("pagemap",    S_IRUSR, pagemap),
+	REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
+	REG("smaps",     S_IRUGO, proc_smaps_operations),
+	REG("pagemap",    S_IRUSR, proc_pagemap_operations),
 #endif
 #ifdef CONFIG_SECURITY
-	DIR("attr",      S_IRUGO|S_IXUGO, attr_dir),
+	DIR("attr",      S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
 #endif
 #ifdef CONFIG_KALLSYMS
-	INF("wchan",     S_IRUGO, pid_wchan),
+	INF("wchan",     S_IRUGO, proc_pid_wchan),
+#endif
+#ifdef CONFIG_STACKTRACE
+	ONE("stack",      S_IRUSR, proc_pid_stack),
 #endif
 #ifdef CONFIG_SCHEDSTATS
-	INF("schedstat", S_IRUGO, pid_schedstat),
+	INF("schedstat", S_IRUGO, proc_pid_schedstat),
 #endif
 #ifdef CONFIG_LATENCYTOP
-	REG("latency",  S_IRUGO, lstats),
+	REG("latency",  S_IRUGO, proc_lstats_operations),
 #endif
 #ifdef CONFIG_PROC_PID_CPUSET
-	REG("cpuset",    S_IRUGO, cpuset),
+	REG("cpuset",    S_IRUGO, proc_cpuset_operations),
 #endif
 #ifdef CONFIG_CGROUPS
-	REG("cgroup",  S_IRUGO, cgroup),
+	REG("cgroup",  S_IRUGO, proc_cgroup_operations),
 #endif
-	INF("oom_score", S_IRUGO, oom_score),
-	REG("oom_adj",   S_IRUGO|S_IWUSR, oom_adjust),
+	INF("oom_score", S_IRUGO, proc_oom_score),
+	REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
 #ifdef CONFIG_AUDITSYSCALL
-	REG("loginuid",  S_IWUSR|S_IRUGO, loginuid),
-	REG("sessionid",  S_IRUSR, sessionid),
+	REG("loginuid",  S_IWUSR|S_IRUGO, proc_loginuid_operations),
+	REG("sessionid",  S_IRUSR, proc_sessionid_operations),
 #endif
 #ifdef CONFIG_FAULT_INJECTION
-	REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
+	REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
 #endif
 #ifdef CONFIG_TASK_IO_ACCOUNTING
-	INF("io",	S_IRUGO, tid_io_accounting),
+	INF("io",	S_IRUGO, proc_tid_io_accounting),
 #endif
 };
 
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 60a359b..db7fa5c 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -14,7 +14,6 @@
 #include <linux/stat.h>
 #include <linux/module.h>
 #include <linux/mount.h>
-#include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/idr.h>
 #include <linux/namei.h>
@@ -379,7 +378,6 @@
 	struct inode *inode = NULL;
 	int error = -ENOENT;
 
-	lock_kernel();
 	spin_lock(&proc_subdir_lock);
 	for (de = de->subdir; de ; de = de->next) {
 		if (de->namelen != dentry->d_name.len)
@@ -397,7 +395,6 @@
 	}
 	spin_unlock(&proc_subdir_lock);
 out_unlock:
-	unlock_kernel();
 
 	if (inode) {
 		dentry->d_op = &proc_dentry_operations;
@@ -432,8 +429,6 @@
 	struct inode *inode = filp->f_path.dentry->d_inode;
 	int ret = 0;
 
-	lock_kernel();
-
 	ino = inode->i_ino;
 	i = filp->f_pos;
 	switch (i) {
@@ -487,7 +482,7 @@
 			spin_unlock(&proc_subdir_lock);
 	}
 	ret = 1;
-out:	unlock_kernel();
+out:
 	return ret;	
 }
 
@@ -504,6 +499,7 @@
  * the /proc directory.
  */
 static const struct file_operations proc_dir_operations = {
+	.llseek			= generic_file_llseek,
 	.read			= generic_read_dir,
 	.readdir		= proc_readdir,
 };
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 2543fd0..3e76bb9 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -35,16 +35,13 @@
  */
 void de_put(struct proc_dir_entry *de)
 {
-	lock_kernel();
 	if (!atomic_read(&de->count)) {
 		printk("de_put: entry %s already free!\n", de->name);
-		unlock_kernel();
 		return;
 	}
 
 	if (atomic_dec_and_test(&de->count))
 		free_proc_entry(de);
-	unlock_kernel();
 }
 
 /*
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 3e8aeb8..cd53ff8 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -41,8 +41,6 @@
 	(vmi)->used = 0;			\
 	(vmi)->largest_chunk = 0;		\
 } while(0)
-
-extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *);
 #endif
 
 extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns,
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index b1675c4..43d23948 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -74,6 +74,9 @@
 		"LowTotal:       %8lu kB\n"
 		"LowFree:        %8lu kB\n"
 #endif
+#ifndef CONFIG_MMU
+		"MmapCopy:       %8lu kB\n"
+#endif
 		"SwapTotal:      %8lu kB\n"
 		"SwapFree:       %8lu kB\n"
 		"Dirty:          %8lu kB\n"
@@ -116,6 +119,9 @@
 		K(i.totalram-i.totalhigh),
 		K(i.freeram-i.freehigh),
 #endif
+#ifndef CONFIG_MMU
+		K((unsigned long) atomic_read(&mmap_pages_allocated)),
+#endif
 		K(i.totalswap),
 		K(i.freeswap),
 		K(global_page_state(NR_FILE_DIRTY)),
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 3f87d26..b446d7a 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -33,33 +33,33 @@
 #include "internal.h"
 
 /*
- * display a single VMA to a sequenced file
+ * display a single region to a sequenced file
  */
-int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
+static int nommu_region_show(struct seq_file *m, struct vm_region *region)
 {
 	unsigned long ino = 0;
 	struct file *file;
 	dev_t dev = 0;
 	int flags, len;
 
-	flags = vma->vm_flags;
-	file = vma->vm_file;
+	flags = region->vm_flags;
+	file = region->vm_file;
 
 	if (file) {
-		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+		struct inode *inode = region->vm_file->f_path.dentry->d_inode;
 		dev = inode->i_sb->s_dev;
 		ino = inode->i_ino;
 	}
 
 	seq_printf(m,
 		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
-		   vma->vm_start,
-		   vma->vm_end,
+		   region->vm_start,
+		   region->vm_end,
 		   flags & VM_READ ? 'r' : '-',
 		   flags & VM_WRITE ? 'w' : '-',
 		   flags & VM_EXEC ? 'x' : '-',
 		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
-		   ((loff_t)vma->vm_pgoff) << PAGE_SHIFT,
+		   ((loff_t)region->vm_pgoff) << PAGE_SHIFT,
 		   MAJOR(dev), MINOR(dev), ino, &len);
 
 	if (file) {
@@ -75,61 +75,54 @@
 }
 
 /*
- * display a list of all the VMAs the kernel knows about
+ * display a list of all the REGIONs the kernel knows about
  * - nommu kernals have a single flat list
  */
-static int nommu_vma_list_show(struct seq_file *m, void *v)
+static int nommu_region_list_show(struct seq_file *m, void *_p)
 {
-	struct vm_area_struct *vma;
+	struct rb_node *p = _p;
 
-	vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb);
-	return nommu_vma_show(m, vma);
+	return nommu_region_show(m, rb_entry(p, struct vm_region, vm_rb));
 }
 
-static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos)
+static void *nommu_region_list_start(struct seq_file *m, loff_t *_pos)
 {
-	struct rb_node *_rb;
+	struct rb_node *p;
 	loff_t pos = *_pos;
-	void *next = NULL;
 
-	down_read(&nommu_vma_sem);
+	down_read(&nommu_region_sem);
 
-	for (_rb = rb_first(&nommu_vma_tree); _rb; _rb = rb_next(_rb)) {
-		if (pos == 0) {
-			next = _rb;
-			break;
-		}
-		pos--;
-	}
-
-	return next;
+	for (p = rb_first(&nommu_region_tree); p; p = rb_next(p))
+		if (pos-- == 0)
+			return p;
+	return NULL;
 }
 
-static void nommu_vma_list_stop(struct seq_file *m, void *v)
+static void nommu_region_list_stop(struct seq_file *m, void *v)
 {
-	up_read(&nommu_vma_sem);
+	up_read(&nommu_region_sem);
 }
 
-static void *nommu_vma_list_next(struct seq_file *m, void *v, loff_t *pos)
+static void *nommu_region_list_next(struct seq_file *m, void *v, loff_t *pos)
 {
 	(*pos)++;
 	return rb_next((struct rb_node *) v);
 }
 
-static const struct seq_operations proc_nommu_vma_list_seqop = {
-	.start	= nommu_vma_list_start,
-	.next	= nommu_vma_list_next,
-	.stop	= nommu_vma_list_stop,
-	.show	= nommu_vma_list_show
+static struct seq_operations proc_nommu_region_list_seqop = {
+	.start	= nommu_region_list_start,
+	.next	= nommu_region_list_next,
+	.stop	= nommu_region_list_stop,
+	.show	= nommu_region_list_show
 };
 
-static int proc_nommu_vma_list_open(struct inode *inode, struct file *file)
+static int proc_nommu_region_list_open(struct inode *inode, struct file *file)
 {
-	return seq_open(file, &proc_nommu_vma_list_seqop);
+	return seq_open(file, &proc_nommu_region_list_seqop);
 }
 
-static const struct file_operations proc_nommu_vma_list_operations = {
-	.open    = proc_nommu_vma_list_open,
+static const struct file_operations proc_nommu_region_list_operations = {
+	.open    = proc_nommu_region_list_open,
 	.read    = seq_read,
 	.llseek  = seq_lseek,
 	.release = seq_release,
@@ -137,7 +130,7 @@
 
 static int __init proc_nommu_init(void)
 {
-	proc_create("maps", S_IRUGO, NULL, &proc_nommu_vma_list_operations);
+	proc_create("maps", S_IRUGO, NULL, &proc_nommu_region_list_operations);
 	return 0;
 }
 
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 7bc296f..04d1270 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -18,7 +18,6 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 #include <linux/bitops.h>
-#include <linux/smp_lock.h>
 #include <linux/mount.h>
 #include <linux/nsproxy.h>
 #include <net/net_namespace.h>
@@ -172,6 +171,7 @@
 }
 
 const struct file_operations proc_net_operations = {
+	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
 	.readdir	= proc_tgid_net_readdir,
 };
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 7761602..f6299a2 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -16,7 +16,6 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 #include <linux/bitops.h>
-#include <linux/smp_lock.h>
 #include <linux/mount.h>
 #include <linux/pid_namespace.h>
 
@@ -162,17 +161,12 @@
 	unsigned int nr = filp->f_pos;
 	int ret;
 
-	lock_kernel();
-
 	if (nr < FIRST_PROCESS_ENTRY) {
 		int error = proc_readdir(filp, dirent, filldir);
-		if (error <= 0) {
-			unlock_kernel();
+		if (error <= 0)
 			return error;
-		}
 		filp->f_pos = FIRST_PROCESS_ENTRY;
 	}
-	unlock_kernel();
 
 	ret = proc_pid_readdir(filp, dirent, filldir);
 	return ret;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 219bd79..343ea12 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -9,31 +9,38 @@
 
 /*
  * Logic: we've got two memory sums for each process, "shared", and
- * "non-shared". Shared memory may get counted more then once, for
+ * "non-shared". Shared memory may get counted more than once, for
  * each process that owns it. Non-shared memory is counted
  * accurately.
  */
 void task_mem(struct seq_file *m, struct mm_struct *mm)
 {
-	struct vm_list_struct *vml;
-	unsigned long bytes = 0, sbytes = 0, slack = 0;
+	struct vm_area_struct *vma;
+	struct vm_region *region;
+	struct rb_node *p;
+	unsigned long bytes = 0, sbytes = 0, slack = 0, size;
         
 	down_read(&mm->mmap_sem);
-	for (vml = mm->context.vmlist; vml; vml = vml->next) {
-		if (!vml->vma)
-			continue;
+	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
+		vma = rb_entry(p, struct vm_area_struct, vm_rb);
 
-		bytes += kobjsize(vml);
-		if (atomic_read(&mm->mm_count) > 1 ||
-		    atomic_read(&vml->vma->vm_usage) > 1
-		    ) {
-			sbytes += kobjsize((void *) vml->vma->vm_start);
-			sbytes += kobjsize(vml->vma);
+		bytes += kobjsize(vma);
+
+		region = vma->vm_region;
+		if (region) {
+			size = kobjsize(region);
+			size += region->vm_end - region->vm_start;
 		} else {
-			bytes += kobjsize((void *) vml->vma->vm_start);
-			bytes += kobjsize(vml->vma);
-			slack += kobjsize((void *) vml->vma->vm_start) -
-				(vml->vma->vm_end - vml->vma->vm_start);
+			size = vma->vm_end - vma->vm_start;
+		}
+
+		if (atomic_read(&mm->mm_count) > 1 ||
+		    vma->vm_flags & VM_MAYSHARE) {
+			sbytes += size;
+		} else {
+			bytes += size;
+			if (region)
+				slack = region->vm_end - vma->vm_end;
 		}
 	}
 
@@ -70,13 +77,14 @@
 
 unsigned long task_vsize(struct mm_struct *mm)
 {
-	struct vm_list_struct *tbp;
+	struct vm_area_struct *vma;
+	struct rb_node *p;
 	unsigned long vsize = 0;
 
 	down_read(&mm->mmap_sem);
-	for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
-		if (tbp->vma)
-			vsize += kobjsize((void *) tbp->vma->vm_start);
+	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
+		vma = rb_entry(p, struct vm_area_struct, vm_rb);
+		vsize += vma->vm_end - vma->vm_start;
 	}
 	up_read(&mm->mmap_sem);
 	return vsize;
@@ -85,15 +93,19 @@
 int task_statm(struct mm_struct *mm, int *shared, int *text,
 	       int *data, int *resident)
 {
-	struct vm_list_struct *tbp;
+	struct vm_area_struct *vma;
+	struct vm_region *region;
+	struct rb_node *p;
 	int size = kobjsize(mm);
 
 	down_read(&mm->mmap_sem);
-	for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
-		size += kobjsize(tbp);
-		if (tbp->vma) {
-			size += kobjsize(tbp->vma);
-			size += kobjsize((void *) tbp->vma->vm_start);
+	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
+		vma = rb_entry(p, struct vm_area_struct, vm_rb);
+		size += kobjsize(vma);
+		region = vma->vm_region;
+		if (region) {
+			size += kobjsize(region);
+			size += region->vm_end - region->vm_start;
 		}
 	}
 
@@ -105,20 +117,62 @@
 }
 
 /*
+ * display a single VMA to a sequenced file
+ */
+static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
+{
+	unsigned long ino = 0;
+	struct file *file;
+	dev_t dev = 0;
+	int flags, len;
+
+	flags = vma->vm_flags;
+	file = vma->vm_file;
+
+	if (file) {
+		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+		dev = inode->i_sb->s_dev;
+		ino = inode->i_ino;
+	}
+
+	seq_printf(m,
+		   "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
+		   vma->vm_start,
+		   vma->vm_end,
+		   flags & VM_READ ? 'r' : '-',
+		   flags & VM_WRITE ? 'w' : '-',
+		   flags & VM_EXEC ? 'x' : '-',
+		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
+		   vma->vm_pgoff << PAGE_SHIFT,
+		   MAJOR(dev), MINOR(dev), ino, &len);
+
+	if (file) {
+		len = 25 + sizeof(void *) * 6 - len;
+		if (len < 1)
+			len = 1;
+		seq_printf(m, "%*c", len, ' ');
+		seq_path(m, &file->f_path, "");
+	}
+
+	seq_putc(m, '\n');
+	return 0;
+}
+
+/*
  * display mapping lines for a particular process's /proc/pid/maps
  */
-static int show_map(struct seq_file *m, void *_vml)
+static int show_map(struct seq_file *m, void *_p)
 {
-	struct vm_list_struct *vml = _vml;
+	struct rb_node *p = _p;
 
-	return nommu_vma_show(m, vml->vma);
+	return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
 }
 
 static void *m_start(struct seq_file *m, loff_t *pos)
 {
 	struct proc_maps_private *priv = m->private;
-	struct vm_list_struct *vml;
 	struct mm_struct *mm;
+	struct rb_node *p;
 	loff_t n = *pos;
 
 	/* pin the task and mm whilst we play with them */
@@ -134,9 +188,9 @@
 	}
 
 	/* start from the Nth VMA */
-	for (vml = mm->context.vmlist; vml; vml = vml->next)
+	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
 		if (n-- == 0)
-			return vml;
+			return p;
 	return NULL;
 }
 
@@ -152,12 +206,12 @@
 	}
 }
 
-static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
+static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
 {
-	struct vm_list_struct *vml = _vml;
+	struct rb_node *p = _p;
 
 	(*pos)++;
-	return vml ? vml->next : NULL;
+	return p ? rb_next(p) : NULL;
 }
 
 static const struct seq_operations proc_pid_maps_ops = {
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 03ec595..5edcc3f 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -47,8 +47,6 @@
 
 	offset = (unsigned long)(*ppos % PAGE_SIZE);
 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
-	if (pfn > saved_max_pfn)
-		return -EINVAL;
 
 	do {
 		if (count > (PAGE_SIZE - offset))
diff --git a/fs/quota.c b/fs/quota.c
index 4a8c94f..d76ada9 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -371,7 +371,8 @@
  * calls. Maybe we need to add the process quotas etc. in the future,
  * but we probably should use rlimits for that.
  */
-asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, qid_t id, void __user *addr)
+SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
+		qid_t, id, void __user *, addr)
 {
 	uint cmds, type;
 	struct super_block *sb = NULL;
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 76acdbc..b9b567a 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -262,11 +262,11 @@
 	ret = -ENOMEM;
 	pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL);
 	if (!pages)
-		goto out;
+		goto out_free;
 
 	nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages);
 	if (nr != lpages)
-		goto out; /* leave if some pages were missing */
+		goto out_free_pages; /* leave if some pages were missing */
 
 	/* check the pages for physical adjacency */
 	ptr = pages;
@@ -274,19 +274,18 @@
 	page++;
 	for (loop = lpages; loop > 1; loop--)
 		if (*ptr++ != page++)
-			goto out;
+			goto out_free_pages;
 
 	/* okay - all conditions fulfilled */
 	ret = (unsigned long) page_address(pages[0]);
 
- out:
-	if (pages) {
-		ptr = pages;
-		for (loop = lpages; loop > 0; loop--)
-			put_page(*ptr++);
-		kfree(pages);
-	}
-
+out_free_pages:
+	ptr = pages;
+	for (loop = nr; loop > 0; loop--)
+		put_page(*ptr++);
+out_free:
+	kfree(pages);
+out:
 	return ret;
 }
 
diff --git a/fs/read_write.c b/fs/read_write.c
index 5cc6924..400fe81 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -147,7 +147,7 @@
 }
 EXPORT_SYMBOL(vfs_llseek);
 
-asmlinkage off_t sys_lseek(unsigned int fd, off_t offset, unsigned int origin)
+SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, origin)
 {
 	off_t retval;
 	struct file * file;
@@ -171,9 +171,9 @@
 }
 
 #ifdef __ARCH_WANT_SYS_LLSEEK
-asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
-			   unsigned long offset_low, loff_t __user * result,
-			   unsigned int origin)
+SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
+		unsigned long, offset_low, loff_t __user *, result,
+		unsigned int, origin)
 {
 	int retval;
 	struct file * file;
@@ -369,7 +369,7 @@
 	file->f_pos = pos;
 }
 
-asmlinkage ssize_t sys_read(unsigned int fd, char __user * buf, size_t count)
+SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
 {
 	struct file *file;
 	ssize_t ret = -EBADF;
@@ -386,7 +386,8 @@
 	return ret;
 }
 
-asmlinkage ssize_t sys_write(unsigned int fd, const char __user * buf, size_t count)
+SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
+		size_t, count)
 {
 	struct file *file;
 	ssize_t ret = -EBADF;
@@ -403,8 +404,8 @@
 	return ret;
 }
 
-asmlinkage ssize_t sys_pread64(unsigned int fd, char __user *buf,
-			     size_t count, loff_t pos)
+SYSCALL_DEFINE(pread64)(unsigned int fd, char __user *buf,
+			size_t count, loff_t pos)
 {
 	struct file *file;
 	ssize_t ret = -EBADF;
@@ -423,9 +424,17 @@
 
 	return ret;
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_pread64(long fd, long buf, long count, loff_t pos)
+{
+	return SYSC_pread64((unsigned int) fd, (char __user *) buf,
+			    (size_t) count, pos);
+}
+SYSCALL_ALIAS(sys_pread64, SyS_pread64);
+#endif
 
-asmlinkage ssize_t sys_pwrite64(unsigned int fd, const char __user *buf,
-			      size_t count, loff_t pos)
+SYSCALL_DEFINE(pwrite64)(unsigned int fd, const char __user *buf,
+			 size_t count, loff_t pos)
 {
 	struct file *file;
 	ssize_t ret = -EBADF;
@@ -444,6 +453,14 @@
 
 	return ret;
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_pwrite64(long fd, long buf, long count, loff_t pos)
+{
+	return SYSC_pwrite64((unsigned int) fd, (const char __user *) buf,
+			     (size_t) count, pos);
+}
+SYSCALL_ALIAS(sys_pwrite64, SyS_pwrite64);
+#endif
 
 /*
  * Reduce an iovec's length in-place.  Return the resulting number of segments
@@ -672,8 +689,8 @@
 
 EXPORT_SYMBOL(vfs_writev);
 
-asmlinkage ssize_t
-sys_readv(unsigned long fd, const struct iovec __user *vec, unsigned long vlen)
+SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
+		unsigned long, vlen)
 {
 	struct file *file;
 	ssize_t ret = -EBADF;
@@ -693,8 +710,8 @@
 	return ret;
 }
 
-asmlinkage ssize_t
-sys_writev(unsigned long fd, const struct iovec __user *vec, unsigned long vlen)
+SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
+		unsigned long, vlen)
 {
 	struct file *file;
 	ssize_t ret = -EBADF;
@@ -812,7 +829,7 @@
 	return retval;
 }
 
-asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t __user *offset, size_t count)
+SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
 {
 	loff_t pos;
 	off_t off;
@@ -831,7 +848,7 @@
 	return do_sendfile(out_fd, in_fd, NULL, count, 0);
 }
 
-asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t __user *offset, size_t count)
+SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
 {
 	loff_t pos;
 	ssize_t ret;
diff --git a/fs/readdir.c b/fs/readdir.c
index b318d9b..7723401 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -102,7 +102,8 @@
 	return -EFAULT;
 }
 
-asmlinkage long old_readdir(unsigned int fd, struct old_linux_dirent __user * dirent, unsigned int count)
+SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
+		struct old_linux_dirent __user *, dirent, unsigned int, count)
 {
 	int error;
 	struct file * file;
@@ -187,7 +188,8 @@
 	return -EFAULT;
 }
 
-asmlinkage long sys_getdents(unsigned int fd, struct linux_dirent __user * dirent, unsigned int count)
+SYSCALL_DEFINE3(getdents, unsigned int, fd,
+		struct linux_dirent __user *, dirent, unsigned int, count)
 {
 	struct file * file;
 	struct linux_dirent __user * lastdirent;
@@ -268,7 +270,8 @@
 	return -EFAULT;
 }
 
-asmlinkage long sys_getdents64(unsigned int fd, struct linux_dirent64 __user * dirent, unsigned int count)
+SYSCALL_DEFINE3(getdents64, unsigned int, fd,
+		struct linux_dirent64 __user *, dirent, unsigned int, count)
 {
 	struct file * file;
 	struct linux_dirent64 __user * lastdirent;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index c55651f..f3c820b 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -83,7 +83,7 @@
 	reiserfs_sync_fs(s, 1);
 }
 
-static void reiserfs_write_super_lockfs(struct super_block *s)
+static int reiserfs_freeze(struct super_block *s)
 {
 	struct reiserfs_transaction_handle th;
 	reiserfs_write_lock(s);
@@ -101,11 +101,13 @@
 	}
 	s->s_dirt = 0;
 	reiserfs_write_unlock(s);
+	return 0;
 }
 
-static void reiserfs_unlockfs(struct super_block *s)
+static int reiserfs_unfreeze(struct super_block *s)
 {
 	reiserfs_allow_writes(s);
+	return 0;
 }
 
 extern const struct in_core_key MAX_IN_CORE_KEY;
@@ -613,8 +615,8 @@
 	.put_super = reiserfs_put_super,
 	.write_super = reiserfs_write_super,
 	.sync_fs = reiserfs_sync_fs,
-	.write_super_lockfs = reiserfs_write_super_lockfs,
-	.unlockfs = reiserfs_unlockfs,
+	.freeze_fs = reiserfs_freeze,
+	.unfreeze_fs = reiserfs_unfreeze,
 	.statfs = reiserfs_statfs,
 	.remount_fs = reiserfs_remount,
 	.show_options = generic_show_options,
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index c97d4c9..98a232f 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -490,7 +490,7 @@
 static struct inode *
 romfs_iget(struct super_block *sb, unsigned long ino)
 {
-	int nextfh;
+	int nextfh, ret;
 	struct romfs_inode ri;
 	struct inode *i;
 
@@ -526,11 +526,11 @@
 	i->i_mtime.tv_nsec = i->i_atime.tv_nsec = i->i_ctime.tv_nsec = 0;
 
         /* Precalculate the data offset */
-        ino = romfs_strnlen(i, ino+ROMFH_SIZE, ROMFS_MAXFN);
-        if (ino >= 0)
-                ino = ((ROMFH_SIZE+ino+1+ROMFH_PAD)&ROMFH_MASK);
-        else
-                ino = 0;
+	ret = romfs_strnlen(i, ino + ROMFH_SIZE, ROMFS_MAXFN);
+	if (ret >= 0)
+		ino = (ROMFH_SIZE + ret + 1 + ROMFH_PAD) & ROMFH_MASK;
+	else
+		ino = 0;
 
         ROMFS_I(i)->i_metasize = ino;
         ROMFS_I(i)->i_dataoffset = ino+(i->i_ino&ROMFH_MASK);
diff --git a/fs/select.c b/fs/select.c
index 08b91be..0fe0e14 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -557,8 +557,8 @@
 	return ret;
 }
 
-asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
-			fd_set __user *exp, struct timeval __user *tvp)
+SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
+		fd_set __user *, exp, struct timeval __user *, tvp)
 {
 	struct timespec end_time, *to = NULL;
 	struct timeval tv;
@@ -582,9 +582,9 @@
 }
 
 #ifdef HAVE_SET_RESTORE_SIGMASK
-asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp,
-		fd_set __user *exp, struct timespec __user *tsp,
-		const sigset_t __user *sigmask, size_t sigsetsize)
+static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
+		       fd_set __user *exp, struct timespec __user *tsp,
+		       const sigset_t __user *sigmask, size_t sigsetsize)
 {
 	sigset_t ksigmask, sigsaved;
 	struct timespec ts, end_time, *to = NULL;
@@ -610,7 +610,7 @@
 		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
 	}
 
-	ret = core_sys_select(n, inp, outp, exp, &end_time);
+	ret = core_sys_select(n, inp, outp, exp, to);
 	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
 
 	if (ret == -ERESTARTNOHAND) {
@@ -636,8 +636,9 @@
  * which has a pointer to the sigset_t itself followed by a size_t containing
  * the sigset size.
  */
-asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
-	fd_set __user *exp, struct timespec __user *tsp, void __user *sig)
+SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
+		fd_set __user *, exp, struct timespec __user *, tsp,
+		void __user *, sig)
 {
 	size_t sigsetsize = 0;
 	sigset_t __user *up = NULL;
@@ -650,7 +651,7 @@
 			return -EFAULT;
 	}
 
-	return sys_pselect7(n, inp, outp, exp, tsp, up, sigsetsize);
+	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
 }
 #endif /* HAVE_SET_RESTORE_SIGMASK */
 
@@ -854,8 +855,8 @@
 	return ret;
 }
 
-asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
-			long timeout_msecs)
+SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
+		long, timeout_msecs)
 {
 	struct timespec end_time, *to = NULL;
 	int ret;
@@ -889,9 +890,9 @@
 }
 
 #ifdef HAVE_SET_RESTORE_SIGMASK
-asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
-	struct timespec __user *tsp, const sigset_t __user *sigmask,
-	size_t sigsetsize)
+SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
+		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
+		size_t, sigsetsize)
 {
 	sigset_t ksigmask, sigsaved;
 	struct timespec ts, end_time, *to = NULL;
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 9c39bc7..b07565c 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -205,8 +205,8 @@
 	.read		= signalfd_read,
 };
 
-asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask,
-			      size_t sizemask, int flags)
+SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
+		size_t, sizemask, int, flags)
 {
 	sigset_t sigmask;
 	struct signalfd_ctx *ctx;
@@ -259,8 +259,8 @@
 	return ufd;
 }
 
-asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask,
-			     size_t sizemask)
+SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
+		size_t, sizemask)
 {
 	return sys_signalfd4(ufd, user_mask, sizemask, 0);
 }
diff --git a/fs/splice.c b/fs/splice.c
index 1abab5c..4ed0ba4 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -21,6 +21,7 @@
 #include <linux/file.h>
 #include <linux/pagemap.h>
 #include <linux/splice.h>
+#include <linux/memcontrol.h>
 #include <linux/mm_inline.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
@@ -1434,8 +1435,8 @@
  * Currently we punt and implement it as a normal copy, see pipe_to_user().
  *
  */
-asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
-			     unsigned long nr_segs, unsigned int flags)
+SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, iov,
+		unsigned long, nr_segs, unsigned int, flags)
 {
 	struct file *file;
 	long error;
@@ -1460,9 +1461,9 @@
 	return error;
 }
 
-asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
-			   int fd_out, loff_t __user *off_out,
-			   size_t len, unsigned int flags)
+SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
+		int, fd_out, loff_t __user *, off_out,
+		size_t, len, unsigned int, flags)
 {
 	long error;
 	struct file *in, *out;
@@ -1684,7 +1685,7 @@
 	return ret;
 }
 
-asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
+SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
 {
 	struct file *in;
 	int error, fput_in;
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
new file mode 100644
index 0000000..8258cf9
--- /dev/null
+++ b/fs/squashfs/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the linux squashfs routines.
+#
+
+obj-$(CONFIG_SQUASHFS) += squashfs.o
+squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
+squashfs-y += namei.o super.o symlink.o
+#squashfs-y += squashfs2_0.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
new file mode 100644
index 0000000..c837dfc
--- /dev/null
+++ b/fs/squashfs/block.c
@@ -0,0 +1,274 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * block.c
+ */
+
+/*
+ * This file implements the low-level routines to read and decompress
+ * datablocks and metadata blocks.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/buffer_head.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Read the metadata block length, this is stored in the first two
+ * bytes of the metadata block.
+ */
+static struct buffer_head *get_block_length(struct super_block *sb,
+			u64 *cur_index, int *offset, int *length)
+{
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+	struct buffer_head *bh;
+
+	bh = sb_bread(sb, *cur_index);
+	if (bh == NULL)
+		return NULL;
+
+	if (msblk->devblksize - *offset == 1) {
+		*length = (unsigned char) bh->b_data[*offset];
+		put_bh(bh);
+		bh = sb_bread(sb, ++(*cur_index));
+		if (bh == NULL)
+			return NULL;
+		*length |= (unsigned char) bh->b_data[0] << 8;
+		*offset = 1;
+	} else {
+		*length = (unsigned char) bh->b_data[*offset] |
+			(unsigned char) bh->b_data[*offset + 1] << 8;
+		*offset += 2;
+	}
+
+	return bh;
+}
+
+
+/*
+ * Read and decompress a metadata block or datablock.  Length is non-zero
+ * if a datablock is being read (the size is stored elsewhere in the
+ * filesystem), otherwise the length is obtained from the first two bytes of
+ * the metadata block.  A bit in the length field indicates if the block
+ * is stored uncompressed in the filesystem (usually because compression
+ * generated a larger block - this does occasionally happen with zlib).
+ */
+int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
+			int length, u64 *next_index, int srclength)
+{
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+	struct buffer_head **bh;
+	int offset = index & ((1 << msblk->devblksize_log2) - 1);
+	u64 cur_index = index >> msblk->devblksize_log2;
+	int bytes, compressed, b = 0, k = 0, page = 0, avail;
+
+
+	bh = kcalloc((msblk->block_size >> msblk->devblksize_log2) + 1,
+				sizeof(*bh), GFP_KERNEL);
+	if (bh == NULL)
+		return -ENOMEM;
+
+	if (length) {
+		/*
+		 * Datablock.
+		 */
+		bytes = -offset;
+		compressed = SQUASHFS_COMPRESSED_BLOCK(length);
+		length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
+		if (next_index)
+			*next_index = index + length;
+
+		TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
+			index, compressed ? "" : "un", length, srclength);
+
+		if (length < 0 || length > srclength ||
+				(index + length) > msblk->bytes_used)
+			goto read_failure;
+
+		for (b = 0; bytes < length; b++, cur_index++) {
+			bh[b] = sb_getblk(sb, cur_index);
+			if (bh[b] == NULL)
+				goto block_release;
+			bytes += msblk->devblksize;
+		}
+		ll_rw_block(READ, b, bh);
+	} else {
+		/*
+		 * Metadata block.
+		 */
+		if ((index + 2) > msblk->bytes_used)
+			goto read_failure;
+
+		bh[0] = get_block_length(sb, &cur_index, &offset, &length);
+		if (bh[0] == NULL)
+			goto read_failure;
+		b = 1;
+
+		bytes = msblk->devblksize - offset;
+		compressed = SQUASHFS_COMPRESSED(length);
+		length = SQUASHFS_COMPRESSED_SIZE(length);
+		if (next_index)
+			*next_index = index + length + 2;
+
+		TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
+				compressed ? "" : "un", length);
+
+		if (length < 0 || length > srclength ||
+					(index + length) > msblk->bytes_used)
+			goto block_release;
+
+		for (; bytes < length; b++) {
+			bh[b] = sb_getblk(sb, ++cur_index);
+			if (bh[b] == NULL)
+				goto block_release;
+			bytes += msblk->devblksize;
+		}
+		ll_rw_block(READ, b - 1, bh + 1);
+	}
+
+	if (compressed) {
+		int zlib_err = 0, zlib_init = 0;
+
+		/*
+		 * Uncompress block.
+		 */
+
+		mutex_lock(&msblk->read_data_mutex);
+
+		msblk->stream.avail_out = 0;
+		msblk->stream.avail_in = 0;
+
+		bytes = length;
+		do {
+			if (msblk->stream.avail_in == 0 && k < b) {
+				avail = min(bytes, msblk->devblksize - offset);
+				bytes -= avail;
+				wait_on_buffer(bh[k]);
+				if (!buffer_uptodate(bh[k]))
+					goto release_mutex;
+
+				if (avail == 0) {
+					offset = 0;
+					put_bh(bh[k++]);
+					continue;
+				}
+
+				msblk->stream.next_in = bh[k]->b_data + offset;
+				msblk->stream.avail_in = avail;
+				offset = 0;
+			}
+
+			if (msblk->stream.avail_out == 0) {
+				msblk->stream.next_out = buffer[page++];
+				msblk->stream.avail_out = PAGE_CACHE_SIZE;
+			}
+
+			if (!zlib_init) {
+				zlib_err = zlib_inflateInit(&msblk->stream);
+				if (zlib_err != Z_OK) {
+					ERROR("zlib_inflateInit returned"
+						" unexpected result 0x%x,"
+						" srclength %d\n", zlib_err,
+						srclength);
+					goto release_mutex;
+				}
+				zlib_init = 1;
+			}
+
+			zlib_err = zlib_inflate(&msblk->stream, Z_NO_FLUSH);
+
+			if (msblk->stream.avail_in == 0 && k < b)
+				put_bh(bh[k++]);
+		} while (zlib_err == Z_OK);
+
+		if (zlib_err != Z_STREAM_END) {
+			ERROR("zlib_inflate returned unexpected result"
+				" 0x%x, srclength %d, avail_in %d,"
+				" avail_out %d\n", zlib_err, srclength,
+				msblk->stream.avail_in,
+				msblk->stream.avail_out);
+			goto release_mutex;
+		}
+
+		zlib_err = zlib_inflateEnd(&msblk->stream);
+		if (zlib_err != Z_OK) {
+			ERROR("zlib_inflateEnd returned unexpected result 0x%x,"
+				" srclength %d\n", zlib_err, srclength);
+			goto release_mutex;
+		}
+		length = msblk->stream.total_out;
+		mutex_unlock(&msblk->read_data_mutex);
+	} else {
+		/*
+		 * Block is uncompressed.
+		 */
+		int i, in, pg_offset = 0;
+
+		for (i = 0; i < b; i++) {
+			wait_on_buffer(bh[i]);
+			if (!buffer_uptodate(bh[i]))
+				goto block_release;
+		}
+
+		for (bytes = length; k < b; k++) {
+			in = min(bytes, msblk->devblksize - offset);
+			bytes -= in;
+			while (in) {
+				if (pg_offset == PAGE_CACHE_SIZE) {
+					page++;
+					pg_offset = 0;
+				}
+				avail = min_t(int, in, PAGE_CACHE_SIZE -
+						pg_offset);
+				memcpy(buffer[page] + pg_offset,
+						bh[k]->b_data + offset, avail);
+				in -= avail;
+				pg_offset += avail;
+				offset += avail;
+			}
+			offset = 0;
+			put_bh(bh[k]);
+		}
+	}
+
+	kfree(bh);
+	return length;
+
+release_mutex:
+	mutex_unlock(&msblk->read_data_mutex);
+
+block_release:
+	for (; k < b; k++)
+		put_bh(bh[k]);
+
+read_failure:
+	ERROR("sb_bread failed reading block 0x%llx\n", cur_index);
+	kfree(bh);
+	return -EIO;
+}
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
new file mode 100644
index 0000000..f29eda1
--- /dev/null
+++ b/fs/squashfs/cache.c
@@ -0,0 +1,412 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * cache.c
+ */
+
+/*
+ * Blocks in Squashfs are compressed.  To avoid repeatedly decompressing
+ * recently accessed data Squashfs uses two small metadata and fragment caches.
+ *
+ * This file implements a generic cache implementation used for both caches,
+ * plus functions layered ontop of the generic cache implementation to
+ * access the metadata and fragment caches.
+ *
+ * To avoid out of memory and fragmentation isssues with vmalloc the cache
+ * uses sequences of kmalloced PAGE_CACHE_SIZE buffers.
+ *
+ * It should be noted that the cache is not used for file datablocks, these
+ * are decompressed and cached in the page-cache in the normal way.  The
+ * cache is only used to temporarily cache fragment and metadata blocks
+ * which have been read as as a result of a metadata (i.e. inode or
+ * directory) or fragment access.  Because metadata and fragments are packed
+ * together into blocks (to gain greater compression) the read of a particular
+ * piece of metadata or fragment will retrieve other metadata/fragments which
+ * have been packed with it, these because of locality-of-reference may be read
+ * in the near future. Temporarily caching them ensures they are available for
+ * near future access without requiring an additional read and decompress.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/zlib.h>
+#include <linux/pagemap.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Look-up block in cache, and increment usage count.  If not in cache, read
+ * and decompress it from disk.
+ */
+struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
+	struct squashfs_cache *cache, u64 block, int length)
+{
+	int i, n;
+	struct squashfs_cache_entry *entry;
+
+	spin_lock(&cache->lock);
+
+	while (1) {
+		for (i = 0; i < cache->entries; i++)
+			if (cache->entry[i].block == block)
+				break;
+
+		if (i == cache->entries) {
+			/*
+			 * Block not in cache, if all cache entries are used
+			 * go to sleep waiting for one to become available.
+			 */
+			if (cache->unused == 0) {
+				cache->num_waiters++;
+				spin_unlock(&cache->lock);
+				wait_event(cache->wait_queue, cache->unused);
+				spin_lock(&cache->lock);
+				cache->num_waiters--;
+				continue;
+			}
+
+			/*
+			 * At least one unused cache entry.  A simple
+			 * round-robin strategy is used to choose the entry to
+			 * be evicted from the cache.
+			 */
+			i = cache->next_blk;
+			for (n = 0; n < cache->entries; n++) {
+				if (cache->entry[i].refcount == 0)
+					break;
+				i = (i + 1) % cache->entries;
+			}
+
+			cache->next_blk = (i + 1) % cache->entries;
+			entry = &cache->entry[i];
+
+			/*
+			 * Initialise choosen cache entry, and fill it in from
+			 * disk.
+			 */
+			cache->unused--;
+			entry->block = block;
+			entry->refcount = 1;
+			entry->pending = 1;
+			entry->num_waiters = 0;
+			entry->error = 0;
+			spin_unlock(&cache->lock);
+
+			entry->length = squashfs_read_data(sb, entry->data,
+				block, length, &entry->next_index,
+				cache->block_size);
+
+			spin_lock(&cache->lock);
+
+			if (entry->length < 0)
+				entry->error = entry->length;
+
+			entry->pending = 0;
+
+			/*
+			 * While filling this entry one or more other processes
+			 * have looked it up in the cache, and have slept
+			 * waiting for it to become available.
+			 */
+			if (entry->num_waiters) {
+				spin_unlock(&cache->lock);
+				wake_up_all(&entry->wait_queue);
+			} else
+				spin_unlock(&cache->lock);
+
+			goto out;
+		}
+
+		/*
+		 * Block already in cache.  Increment refcount so it doesn't
+		 * get reused until we're finished with it, if it was
+		 * previously unused there's one less cache entry available
+		 * for reuse.
+		 */
+		entry = &cache->entry[i];
+		if (entry->refcount == 0)
+			cache->unused--;
+		entry->refcount++;
+
+		/*
+		 * If the entry is currently being filled in by another process
+		 * go to sleep waiting for it to become available.
+		 */
+		if (entry->pending) {
+			entry->num_waiters++;
+			spin_unlock(&cache->lock);
+			wait_event(entry->wait_queue, !entry->pending);
+		} else
+			spin_unlock(&cache->lock);
+
+		goto out;
+	}
+
+out:
+	TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
+		cache->name, i, entry->block, entry->refcount, entry->error);
+
+	if (entry->error)
+		ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
+							block);
+	return entry;
+}
+
+
+/*
+ * Release cache entry, once usage count is zero it can be reused.
+ */
+void squashfs_cache_put(struct squashfs_cache_entry *entry)
+{
+	struct squashfs_cache *cache = entry->cache;
+
+	spin_lock(&cache->lock);
+	entry->refcount--;
+	if (entry->refcount == 0) {
+		cache->unused++;
+		/*
+		 * If there's any processes waiting for a block to become
+		 * available, wake one up.
+		 */
+		if (cache->num_waiters) {
+			spin_unlock(&cache->lock);
+			wake_up(&cache->wait_queue);
+			return;
+		}
+	}
+	spin_unlock(&cache->lock);
+}
+
+/*
+ * Delete cache reclaiming all kmalloced buffers.
+ */
+void squashfs_cache_delete(struct squashfs_cache *cache)
+{
+	int i, j;
+
+	if (cache == NULL)
+		return;
+
+	for (i = 0; i < cache->entries; i++) {
+		if (cache->entry[i].data) {
+			for (j = 0; j < cache->pages; j++)
+				kfree(cache->entry[i].data[j]);
+			kfree(cache->entry[i].data);
+		}
+	}
+
+	kfree(cache->entry);
+	kfree(cache);
+}
+
+
+/*
+ * Initialise cache allocating the specified number of entries, each of
+ * size block_size.  To avoid vmalloc fragmentation issues each entry
+ * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers.
+ */
+struct squashfs_cache *squashfs_cache_init(char *name, int entries,
+	int block_size)
+{
+	int i, j;
+	struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+
+	if (cache == NULL) {
+		ERROR("Failed to allocate %s cache\n", name);
+		return NULL;
+	}
+
+	cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
+	if (cache->entry == NULL) {
+		ERROR("Failed to allocate %s cache\n", name);
+		goto cleanup;
+	}
+
+	cache->next_blk = 0;
+	cache->unused = entries;
+	cache->entries = entries;
+	cache->block_size = block_size;
+	cache->pages = block_size >> PAGE_CACHE_SHIFT;
+	cache->name = name;
+	cache->num_waiters = 0;
+	spin_lock_init(&cache->lock);
+	init_waitqueue_head(&cache->wait_queue);
+
+	for (i = 0; i < entries; i++) {
+		struct squashfs_cache_entry *entry = &cache->entry[i];
+
+		init_waitqueue_head(&cache->entry[i].wait_queue);
+		entry->cache = cache;
+		entry->block = SQUASHFS_INVALID_BLK;
+		entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
+		if (entry->data == NULL) {
+			ERROR("Failed to allocate %s cache entry\n", name);
+			goto cleanup;
+		}
+
+		for (j = 0; j < cache->pages; j++) {
+			entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+			if (entry->data[j] == NULL) {
+				ERROR("Failed to allocate %s buffer\n", name);
+				goto cleanup;
+			}
+		}
+	}
+
+	return cache;
+
+cleanup:
+	squashfs_cache_delete(cache);
+	return NULL;
+}
+
+
+/*
+ * Copy upto length bytes from cache entry to buffer starting at offset bytes
+ * into the cache entry.  If there's not length bytes then copy the number of
+ * bytes available.  In all cases return the number of bytes copied.
+ */
+int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
+		int offset, int length)
+{
+	int remaining = length;
+
+	if (length == 0)
+		return 0;
+	else if (buffer == NULL)
+		return min(length, entry->length - offset);
+
+	while (offset < entry->length) {
+		void *buff = entry->data[offset / PAGE_CACHE_SIZE]
+				+ (offset % PAGE_CACHE_SIZE);
+		int bytes = min_t(int, entry->length - offset,
+				PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE));
+
+		if (bytes >= remaining) {
+			memcpy(buffer, buff, remaining);
+			remaining = 0;
+			break;
+		}
+
+		memcpy(buffer, buff, bytes);
+		buffer += bytes;
+		remaining -= bytes;
+		offset += bytes;
+	}
+
+	return length - remaining;
+}
+
+
+/*
+ * Read length bytes from metadata position <block, offset> (block is the
+ * start of the compressed block on disk, and offset is the offset into
+ * the block once decompressed).  Data is packed into consecutive blocks,
+ * and length bytes may require reading more than one block.
+ */
+int squashfs_read_metadata(struct super_block *sb, void *buffer,
+		u64 *block, int *offset, int length)
+{
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+	int bytes, copied = length;
+	struct squashfs_cache_entry *entry;
+
+	TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
+
+	while (length) {
+		entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
+		if (entry->error)
+			return entry->error;
+		else if (*offset >= entry->length)
+			return -EIO;
+
+		bytes = squashfs_copy_data(buffer, entry, *offset, length);
+		if (buffer)
+			buffer += bytes;
+		length -= bytes;
+		*offset += bytes;
+
+		if (*offset == entry->length) {
+			*block = entry->next_index;
+			*offset = 0;
+		}
+
+		squashfs_cache_put(entry);
+	}
+
+	return copied;
+}
+
+
+/*
+ * Look-up in the fragmment cache the fragment located at <start_block> in the
+ * filesystem.  If necessary read and decompress it from disk.
+ */
+struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
+				u64 start_block, int length)
+{
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+
+	return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
+		length);
+}
+
+
+/*
+ * Read and decompress the datablock located at <start_block> in the
+ * filesystem.  The cache is used here to avoid duplicating locking and
+ * read/decompress code.
+ */
+struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
+				u64 start_block, int length)
+{
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+
+	return squashfs_cache_get(sb, msblk->read_page, start_block, length);
+}
+
+
+/*
+ * Read a filesystem table (uncompressed sequence of bytes) from disk
+ */
+int squashfs_read_table(struct super_block *sb, void *buffer, u64 block,
+	int length)
+{
+	int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	int i, res;
+	void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
+		data[i] = buffer;
+	res = squashfs_read_data(sb, data, block, length |
+		SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length);
+	kfree(data);
+	return res;
+}
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
new file mode 100644
index 0000000..566b0ea
--- /dev/null
+++ b/fs/squashfs/dir.c
@@ -0,0 +1,235 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * dir.c
+ */
+
+/*
+ * This file implements code to read directories from disk.
+ *
+ * See namei.c for a description of directory organisation on disk.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+static const unsigned char squashfs_filetype_table[] = {
+	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
+};
+
+/*
+ * Lookup offset (f_pos) in the directory index, returning the
+ * metadata block containing it.
+ *
+ * If we get an error reading the index then return the part of the index
+ * (if any) we have managed to read - the index isn't essential, just
+ * quicker.
+ */
+static int get_dir_index_using_offset(struct super_block *sb,
+	u64 *next_block, int *next_offset, u64 index_start, int index_offset,
+	int i_count, u64 f_pos)
+{
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+	int err, i, index, length = 0;
+	struct squashfs_dir_index dir_index;
+
+	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %lld\n",
+					i_count, f_pos);
+
+	/*
+	 * Translate from external f_pos to the internal f_pos.  This
+	 * is offset by 3 because we invent "." and ".." entries which are
+	 * not actually stored in the directory.
+	 */
+	if (f_pos < 3)
+		return f_pos;
+	f_pos -= 3;
+
+	for (i = 0; i < i_count; i++) {
+		err = squashfs_read_metadata(sb, &dir_index, &index_start,
+				&index_offset, sizeof(dir_index));
+		if (err < 0)
+			break;
+
+		index = le32_to_cpu(dir_index.index);
+		if (index > f_pos)
+			/*
+			 * Found the index we're looking for.
+			 */
+			break;
+
+		err = squashfs_read_metadata(sb, NULL, &index_start,
+				&index_offset, le32_to_cpu(dir_index.size) + 1);
+		if (err < 0)
+			break;
+
+		length = index;
+		*next_block = le32_to_cpu(dir_index.start_block) +
+					msblk->directory_table;
+	}
+
+	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
+
+	/*
+	 * Translate back from internal f_pos to external f_pos.
+	 */
+	return length + 3;
+}
+
+
+static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	u64 block = squashfs_i(inode)->start + msblk->directory_table;
+	int offset = squashfs_i(inode)->offset, length = 0, dir_count, size,
+				type, err;
+	unsigned int inode_number;
+	struct squashfs_dir_header dirh;
+	struct squashfs_dir_entry *dire;
+
+	TRACE("Entered squashfs_readdir [%llx:%x]\n", block, offset);
+
+	dire = kmalloc(sizeof(*dire) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
+	if (dire == NULL) {
+		ERROR("Failed to allocate squashfs_dir_entry\n");
+		goto finish;
+	}
+
+	/*
+	 * Return "." and  ".." entries as the first two filenames in the
+	 * directory.  To maximise compression these two entries are not
+	 * stored in the directory, and so we invent them here.
+	 *
+	 * It also means that the external f_pos is offset by 3 from the
+	 * on-disk directory f_pos.
+	 */
+	while (file->f_pos < 3) {
+		char *name;
+		int i_ino;
+
+		if (file->f_pos == 0) {
+			name = ".";
+			size = 1;
+			i_ino = inode->i_ino;
+		} else {
+			name = "..";
+			size = 2;
+			i_ino = squashfs_i(inode)->parent;
+		}
+
+		TRACE("Calling filldir(%p, %s, %d, %lld, %d, %d)\n",
+				dirent, name, size, file->f_pos, i_ino,
+				squashfs_filetype_table[1]);
+
+		if (filldir(dirent, name, size, file->f_pos, i_ino,
+				squashfs_filetype_table[1]) < 0) {
+				TRACE("Filldir returned less than 0\n");
+			goto finish;
+		}
+
+		file->f_pos += size;
+	}
+
+	length = get_dir_index_using_offset(inode->i_sb, &block, &offset,
+				squashfs_i(inode)->dir_idx_start,
+				squashfs_i(inode)->dir_idx_offset,
+				squashfs_i(inode)->dir_idx_cnt,
+				file->f_pos);
+
+	while (length < i_size_read(inode)) {
+		/*
+		 * Read directory header
+		 */
+		err = squashfs_read_metadata(inode->i_sb, &dirh, &block,
+					&offset, sizeof(dirh));
+		if (err < 0)
+			goto failed_read;
+
+		length += sizeof(dirh);
+
+		dir_count = le32_to_cpu(dirh.count) + 1;
+		while (dir_count--) {
+			/*
+			 * Read directory entry.
+			 */
+			err = squashfs_read_metadata(inode->i_sb, dire, &block,
+					&offset, sizeof(*dire));
+			if (err < 0)
+				goto failed_read;
+
+			size = le16_to_cpu(dire->size) + 1;
+
+			err = squashfs_read_metadata(inode->i_sb, dire->name,
+					&block, &offset, size);
+			if (err < 0)
+				goto failed_read;
+
+			length += sizeof(*dire) + size;
+
+			if (file->f_pos >= length)
+				continue;
+
+			dire->name[size] = '\0';
+			inode_number = le32_to_cpu(dirh.inode_number) +
+				((short) le16_to_cpu(dire->inode_number));
+			type = le16_to_cpu(dire->type);
+
+			TRACE("Calling filldir(%p, %s, %d, %lld, %x:%x, %d, %d)"
+					"\n", dirent, dire->name, size,
+					file->f_pos,
+					le32_to_cpu(dirh.start_block),
+					le16_to_cpu(dire->offset),
+					inode_number,
+					squashfs_filetype_table[type]);
+
+			if (filldir(dirent, dire->name, size, file->f_pos,
+					inode_number,
+					squashfs_filetype_table[type]) < 0) {
+				TRACE("Filldir returned less than 0\n");
+				goto finish;
+			}
+
+			file->f_pos = length;
+		}
+	}
+
+finish:
+	kfree(dire);
+	return 0;
+
+failed_read:
+	ERROR("Unable to read directory block [%llx:%x]\n", block, offset);
+	kfree(dire);
+	return 0;
+}
+
+
+const struct file_operations squashfs_dir_ops = {
+	.read = generic_read_dir,
+	.readdir = squashfs_readdir
+};
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
new file mode 100644
index 0000000..69e971d
--- /dev/null
+++ b/fs/squashfs/export.c
@@ -0,0 +1,155 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * export.c
+ */
+
+/*
+ * This file implements code to make Squashfs filesystems exportable (NFS etc.)
+ *
+ * The export code uses an inode lookup table to map inode numbers passed in
+ * filehandles to an inode location on disk.  This table is stored compressed
+ * into metadata blocks.  A second index table is used to locate these.  This
+ * second index table for speed of access (and because it is small) is read at
+ * mount time and cached in memory.
+ *
+ * The inode lookup table is used only by the export code, inode disk
+ * locations are directly encoded in directories, enabling direct access
+ * without an intermediate lookup for all operations except the export ops.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/dcache.h>
+#include <linux/exportfs.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Look-up inode number (ino) in table, returning the inode location.
+ */
+static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
+{
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+	int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
+	int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
+	u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]);
+	__le64 ino;
+	int err;
+
+	TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
+
+	err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
+	if (err < 0)
+		return err;
+
+	TRACE("squashfs_inode_lookup, inode = 0x%llx\n",
+		(u64) le64_to_cpu(ino));
+
+	return le64_to_cpu(ino);
+}
+
+
+static struct dentry *squashfs_export_iget(struct super_block *sb,
+	unsigned int ino_num)
+{
+	long long ino;
+	struct dentry *dentry = ERR_PTR(-ENOENT);
+
+	TRACE("Entered squashfs_export_iget\n");
+
+	ino = squashfs_inode_lookup(sb, ino_num);
+	if (ino >= 0)
+		dentry = d_obtain_alias(squashfs_iget(sb, ino, ino_num));
+
+	return dentry;
+}
+
+
+static struct dentry *squashfs_fh_to_dentry(struct super_block *sb,
+		struct fid *fid, int fh_len, int fh_type)
+{
+	if ((fh_type != FILEID_INO32_GEN && fh_type != FILEID_INO32_GEN_PARENT)
+			|| fh_len < 2)
+		return NULL;
+
+	return squashfs_export_iget(sb, fid->i32.ino);
+}
+
+
+static struct dentry *squashfs_fh_to_parent(struct super_block *sb,
+		struct fid *fid, int fh_len, int fh_type)
+{
+	if (fh_type != FILEID_INO32_GEN_PARENT || fh_len < 4)
+		return NULL;
+
+	return squashfs_export_iget(sb, fid->i32.parent_ino);
+}
+
+
+static struct dentry *squashfs_get_parent(struct dentry *child)
+{
+	struct inode *inode = child->d_inode;
+	unsigned int parent_ino = squashfs_i(inode)->parent;
+
+	return squashfs_export_iget(inode->i_sb, parent_ino);
+}
+
+
+/*
+ * Read uncompressed inode lookup table indexes off disk into memory
+ */
+__le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
+		u64 lookup_table_start, unsigned int inodes)
+{
+	unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
+	__le64 *inode_lookup_table;
+	int err;
+
+	TRACE("In read_inode_lookup_table, length %d\n", length);
+
+	/* Allocate inode lookup table indexes */
+	inode_lookup_table = kmalloc(length, GFP_KERNEL);
+	if (inode_lookup_table == NULL) {
+		ERROR("Failed to allocate inode lookup table\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	err = squashfs_read_table(sb, inode_lookup_table, lookup_table_start,
+			length);
+	if (err < 0) {
+		ERROR("unable to read inode lookup table\n");
+		kfree(inode_lookup_table);
+		return ERR_PTR(err);
+	}
+
+	return inode_lookup_table;
+}
+
+
+const struct export_operations squashfs_export_ops = {
+	.fh_to_dentry = squashfs_fh_to_dentry,
+	.fh_to_parent = squashfs_fh_to_parent,
+	.get_parent = squashfs_get_parent
+};
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
new file mode 100644
index 0000000..717767d
--- /dev/null
+++ b/fs/squashfs/file.c
@@ -0,0 +1,502 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * file.c
+ */
+
+/*
+ * This file contains code for handling regular files.  A regular file
+ * consists of a sequence of contiguous compressed blocks, and/or a
+ * compressed fragment block (tail-end packed block).   The compressed size
+ * of each datablock is stored in a block list contained within the
+ * file inode (itself stored in one or more compressed metadata blocks).
+ *
+ * To speed up access to datablocks when reading 'large' files (256 Mbytes or
+ * larger), the code implements an index cache that caches the mapping from
+ * block index to datablock location on disk.
+ *
+ * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while
+ * retaining a simple and space-efficient block list on disk.  The cache
+ * is split into slots, caching up to eight 224 GiB files (128 KiB blocks).
+ * Larger files use multiple slots, with 1.75 TiB files using all 8 slots.
+ * The index cache is designed to be memory efficient, and by default uses
+ * 16 KiB.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/mutex.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Locate cache slot in range [offset, index] for specified inode.  If
+ * there's more than one return the slot closest to index.
+ */
+static struct meta_index *locate_meta_index(struct inode *inode, int offset,
+				int index)
+{
+	struct meta_index *meta = NULL;
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	int i;
+
+	mutex_lock(&msblk->meta_index_mutex);
+
+	TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
+
+	if (msblk->meta_index == NULL)
+		goto not_allocated;
+
+	for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
+		if (msblk->meta_index[i].inode_number == inode->i_ino &&
+				msblk->meta_index[i].offset >= offset &&
+				msblk->meta_index[i].offset <= index &&
+				msblk->meta_index[i].locked == 0) {
+			TRACE("locate_meta_index: entry %d, offset %d\n", i,
+					msblk->meta_index[i].offset);
+			meta = &msblk->meta_index[i];
+			offset = meta->offset;
+		}
+	}
+
+	if (meta)
+		meta->locked = 1;
+
+not_allocated:
+	mutex_unlock(&msblk->meta_index_mutex);
+
+	return meta;
+}
+
+
+/*
+ * Find and initialise an empty cache slot for index offset.
+ */
+static struct meta_index *empty_meta_index(struct inode *inode, int offset,
+				int skip)
+{
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	struct meta_index *meta = NULL;
+	int i;
+
+	mutex_lock(&msblk->meta_index_mutex);
+
+	TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
+
+	if (msblk->meta_index == NULL) {
+		/*
+		 * First time cache index has been used, allocate and
+		 * initialise.  The cache index could be allocated at
+		 * mount time but doing it here means it is allocated only
+		 * if a 'large' file is read.
+		 */
+		msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS,
+			sizeof(*(msblk->meta_index)), GFP_KERNEL);
+		if (msblk->meta_index == NULL) {
+			ERROR("Failed to allocate meta_index\n");
+			goto failed;
+		}
+		for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
+			msblk->meta_index[i].inode_number = 0;
+			msblk->meta_index[i].locked = 0;
+		}
+		msblk->next_meta_index = 0;
+	}
+
+	for (i = SQUASHFS_META_SLOTS; i &&
+			msblk->meta_index[msblk->next_meta_index].locked; i--)
+		msblk->next_meta_index = (msblk->next_meta_index + 1) %
+			SQUASHFS_META_SLOTS;
+
+	if (i == 0) {
+		TRACE("empty_meta_index: failed!\n");
+		goto failed;
+	}
+
+	TRACE("empty_meta_index: returned meta entry %d, %p\n",
+			msblk->next_meta_index,
+			&msblk->meta_index[msblk->next_meta_index]);
+
+	meta = &msblk->meta_index[msblk->next_meta_index];
+	msblk->next_meta_index = (msblk->next_meta_index + 1) %
+			SQUASHFS_META_SLOTS;
+
+	meta->inode_number = inode->i_ino;
+	meta->offset = offset;
+	meta->skip = skip;
+	meta->entries = 0;
+	meta->locked = 1;
+
+failed:
+	mutex_unlock(&msblk->meta_index_mutex);
+	return meta;
+}
+
+
+static void release_meta_index(struct inode *inode, struct meta_index *meta)
+{
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	mutex_lock(&msblk->meta_index_mutex);
+	meta->locked = 0;
+	mutex_unlock(&msblk->meta_index_mutex);
+}
+
+
+/*
+ * Read the next n blocks from the block list, starting from
+ * metadata block <start_block, offset>.
+ */
+static long long read_indexes(struct super_block *sb, int n,
+				u64 *start_block, int *offset)
+{
+	int err, i;
+	long long block = 0;
+	__le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+
+	if (blist == NULL) {
+		ERROR("read_indexes: Failed to allocate block_list\n");
+		return -ENOMEM;
+	}
+
+	while (n) {
+		int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2);
+
+		err = squashfs_read_metadata(sb, blist, start_block,
+				offset, blocks << 2);
+		if (err < 0) {
+			ERROR("read_indexes: reading block [%llx:%x]\n",
+				*start_block, *offset);
+			goto failure;
+		}
+
+		for (i = 0; i < blocks; i++) {
+			int size = le32_to_cpu(blist[i]);
+			block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
+		}
+		n -= blocks;
+	}
+
+	kfree(blist);
+	return block;
+
+failure:
+	kfree(blist);
+	return err;
+}
+
+
+/*
+ * Each cache index slot has SQUASHFS_META_ENTRIES, each of which
+ * can cache one index -> datablock/blocklist-block mapping.  We wish
+ * to distribute these over the length of the file, entry[0] maps index x,
+ * entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on.
+ * The larger the file, the greater the skip factor.  The skip factor is
+ * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure
+ * the number of metadata blocks that need to be read fits into the cache.
+ * If the skip factor is limited in this way then the file will use multiple
+ * slots.
+ */
+static inline int calculate_skip(int blocks)
+{
+	int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
+		 * SQUASHFS_META_INDEXES);
+	return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
+}
+
+
+/*
+ * Search and grow the index cache for the specified inode, returning the
+ * on-disk locations of the datablock and block list metadata block
+ * <index_block, index_offset> for index (scaled to nearest cache index).
+ */
+static int fill_meta_index(struct inode *inode, int index,
+		u64 *index_block, int *index_offset, u64 *data_block)
+{
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	int skip = calculate_skip(i_size_read(inode) >> msblk->block_log);
+	int offset = 0;
+	struct meta_index *meta;
+	struct meta_entry *meta_entry;
+	u64 cur_index_block = squashfs_i(inode)->block_list_start;
+	int cur_offset = squashfs_i(inode)->offset;
+	u64 cur_data_block = squashfs_i(inode)->start;
+	int err, i;
+
+	/*
+	 * Scale index to cache index (cache slot entry)
+	 */
+	index /= SQUASHFS_META_INDEXES * skip;
+
+	while (offset < index) {
+		meta = locate_meta_index(inode, offset + 1, index);
+
+		if (meta == NULL) {
+			meta = empty_meta_index(inode, offset + 1, skip);
+			if (meta == NULL)
+				goto all_done;
+		} else {
+			offset = index < meta->offset + meta->entries ? index :
+				meta->offset + meta->entries - 1;
+			meta_entry = &meta->meta_entry[offset - meta->offset];
+			cur_index_block = meta_entry->index_block +
+				msblk->inode_table;
+			cur_offset = meta_entry->offset;
+			cur_data_block = meta_entry->data_block;
+			TRACE("get_meta_index: offset %d, meta->offset %d, "
+				"meta->entries %d\n", offset, meta->offset,
+				meta->entries);
+			TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
+				" data_block 0x%llx\n", cur_index_block,
+				cur_offset, cur_data_block);
+		}
+
+		/*
+		 * If necessary grow cache slot by reading block list.  Cache
+		 * slot is extended up to index or to the end of the slot, in
+		 * which case further slots will be used.
+		 */
+		for (i = meta->offset + meta->entries; i <= index &&
+				i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
+			int blocks = skip * SQUASHFS_META_INDEXES;
+			long long res = read_indexes(inode->i_sb, blocks,
+					&cur_index_block, &cur_offset);
+
+			if (res < 0) {
+				if (meta->entries == 0)
+					/*
+					 * Don't leave an empty slot on read
+					 * error allocated to this inode...
+					 */
+					meta->inode_number = 0;
+				err = res;
+				goto failed;
+			}
+
+			cur_data_block += res;
+			meta_entry = &meta->meta_entry[i - meta->offset];
+			meta_entry->index_block = cur_index_block -
+				msblk->inode_table;
+			meta_entry->offset = cur_offset;
+			meta_entry->data_block = cur_data_block;
+			meta->entries++;
+			offset++;
+		}
+
+		TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
+				meta->offset, meta->entries);
+
+		release_meta_index(inode, meta);
+	}
+
+all_done:
+	*index_block = cur_index_block;
+	*index_offset = cur_offset;
+	*data_block = cur_data_block;
+
+	/*
+	 * Scale cache index (cache slot entry) to index
+	 */
+	return offset * SQUASHFS_META_INDEXES * skip;
+
+failed:
+	release_meta_index(inode, meta);
+	return err;
+}
+
+
+/*
+ * Get the on-disk location and compressed size of the datablock
+ * specified by index.  Fill_meta_index() does most of the work.
+ */
+static int read_blocklist(struct inode *inode, int index, u64 *block)
+{
+	u64 start;
+	long long blks;
+	int offset;
+	__le32 size;
+	int res = fill_meta_index(inode, index, &start, &offset, block);
+
+	TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset"
+		       " 0x%x, block 0x%llx\n", res, index, start, offset,
+			*block);
+
+	if (res < 0)
+		return res;
+
+	/*
+	 * res contains the index of the mapping returned by fill_meta_index(),
+	 * this will likely be less than the desired index (because the
+	 * meta_index cache works at a higher granularity).  Read any
+	 * extra block indexes needed.
+	 */
+	if (res < index) {
+		blks = read_indexes(inode->i_sb, index - res, &start, &offset);
+		if (blks < 0)
+			return (int) blks;
+		*block += blks;
+	}
+
+	/*
+	 * Read length of block specified by index.
+	 */
+	res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset,
+			sizeof(size));
+	if (res < 0)
+		return res;
+	return le32_to_cpu(size);
+}
+
+
+static int squashfs_readpage(struct file *file, struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	int bytes, i, offset = 0, sparse = 0;
+	struct squashfs_cache_entry *buffer = NULL;
+	void *pageaddr;
+
+	int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+	int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
+	int start_index = page->index & ~mask;
+	int end_index = start_index | mask;
+	int file_end = i_size_read(inode) >> msblk->block_log;
+
+	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
+				page->index, squashfs_i(inode)->start);
+
+	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+					PAGE_CACHE_SHIFT))
+		goto out;
+
+	if (index < file_end || squashfs_i(inode)->fragment_block ==
+					SQUASHFS_INVALID_BLK) {
+		/*
+		 * Reading a datablock from disk.  Need to read block list
+		 * to get location and block size.
+		 */
+		u64 block = 0;
+		int bsize = read_blocklist(inode, index, &block);
+		if (bsize < 0)
+			goto error_out;
+
+		if (bsize == 0) { /* hole */
+			bytes = index == file_end ?
+				(i_size_read(inode) & (msblk->block_size - 1)) :
+				 msblk->block_size;
+			sparse = 1;
+		} else {
+			/*
+			 * Read and decompress datablock.
+			 */
+			buffer = squashfs_get_datablock(inode->i_sb,
+								block, bsize);
+			if (buffer->error) {
+				ERROR("Unable to read page, block %llx, size %x"
+					"\n", block, bsize);
+				squashfs_cache_put(buffer);
+				goto error_out;
+			}
+			bytes = buffer->length;
+		}
+	} else {
+		/*
+		 * Datablock is stored inside a fragment (tail-end packed
+		 * block).
+		 */
+		buffer = squashfs_get_fragment(inode->i_sb,
+				squashfs_i(inode)->fragment_block,
+				squashfs_i(inode)->fragment_size);
+
+		if (buffer->error) {
+			ERROR("Unable to read page, block %llx, size %x\n",
+				squashfs_i(inode)->fragment_block,
+				squashfs_i(inode)->fragment_size);
+			squashfs_cache_put(buffer);
+			goto error_out;
+		}
+		bytes = i_size_read(inode) & (msblk->block_size - 1);
+		offset = squashfs_i(inode)->fragment_offset;
+	}
+
+	/*
+	 * Loop copying datablock into pages.  As the datablock likely covers
+	 * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly
+	 * grab the pages from the page cache, except for the page that we've
+	 * been called to fill.
+	 */
+	for (i = start_index; i <= end_index && bytes > 0; i++,
+			bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
+		struct page *push_page;
+		int avail = sparse ? 0 : min_t(int, bytes, PAGE_CACHE_SIZE);
+
+		TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
+
+		push_page = (i == page->index) ? page :
+			grab_cache_page_nowait(page->mapping, i);
+
+		if (!push_page)
+			continue;
+
+		if (PageUptodate(push_page))
+			goto skip_page;
+
+		pageaddr = kmap_atomic(push_page, KM_USER0);
+		squashfs_copy_data(pageaddr, buffer, offset, avail);
+		memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
+		kunmap_atomic(pageaddr, KM_USER0);
+		flush_dcache_page(push_page);
+		SetPageUptodate(push_page);
+skip_page:
+		unlock_page(push_page);
+		if (i != page->index)
+			page_cache_release(push_page);
+	}
+
+	if (!sparse)
+		squashfs_cache_put(buffer);
+
+	return 0;
+
+error_out:
+	SetPageError(page);
+out:
+	pageaddr = kmap_atomic(page, KM_USER0);
+	memset(pageaddr, 0, PAGE_CACHE_SIZE);
+	kunmap_atomic(pageaddr, KM_USER0);
+	flush_dcache_page(page);
+	if (!PageError(page))
+		SetPageUptodate(page);
+	unlock_page(page);
+
+	return 0;
+}
+
+
+const struct address_space_operations squashfs_aops = {
+	.readpage = squashfs_readpage
+};
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
new file mode 100644
index 0000000..b5a2c15
--- /dev/null
+++ b/fs/squashfs/fragment.c
@@ -0,0 +1,98 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * fragment.c
+ */
+
+/*
+ * This file implements code to handle compressed fragments (tail-end packed
+ * datablocks).
+ *
+ * Regular files contain a fragment index which is mapped to a fragment
+ * location on disk and compressed size using a fragment lookup table.
+ * Like everything in Squashfs this fragment lookup table is itself stored
+ * compressed into metadata blocks.  A second index table is used to locate
+ * these.  This second index table for speed of access (and because it
+ * is small) is read at mount time and cached in memory.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Look-up fragment using the fragment index table.  Return the on disk
+ * location of the fragment and its compressed size
+ */
+int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
+				u64 *fragment_block)
+{
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+	int block = SQUASHFS_FRAGMENT_INDEX(fragment);
+	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
+	u64 start_block = le64_to_cpu(msblk->fragment_index[block]);
+	struct squashfs_fragment_entry fragment_entry;
+	int size;
+
+	size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
+					&offset, sizeof(fragment_entry));
+	if (size < 0)
+		return size;
+
+	*fragment_block = le64_to_cpu(fragment_entry.start_block);
+	size = le32_to_cpu(fragment_entry.size);
+
+	return size;
+}
+
+
+/*
+ * Read the uncompressed fragment lookup table indexes off disk into memory
+ */
+__le64 *squashfs_read_fragment_index_table(struct super_block *sb,
+	u64 fragment_table_start, unsigned int fragments)
+{
+	unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments);
+	__le64 *fragment_index;
+	int err;
+
+	/* Allocate fragment lookup table indexes */
+	fragment_index = kmalloc(length, GFP_KERNEL);
+	if (fragment_index == NULL) {
+		ERROR("Failed to allocate fragment index table\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	err = squashfs_read_table(sb, fragment_index, fragment_table_start,
+			length);
+	if (err < 0) {
+		ERROR("unable to read fragment index table\n");
+		kfree(fragment_index);
+		return ERR_PTR(err);
+	}
+
+	return fragment_index;
+}
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
new file mode 100644
index 0000000..3795b83
--- /dev/null
+++ b/fs/squashfs/id.c
@@ -0,0 +1,94 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * id.c
+ */
+
+/*
+ * This file implements code to handle uids and gids.
+ *
+ * For space efficiency regular files store uid and gid indexes, which are
+ * converted to 32-bit uids/gids using an id look up table.  This table is
+ * stored compressed into metadata blocks.  A second index table is used to
+ * locate these.  This second index table for speed of access (and because it
+ * is small) is read at mount time and cached in memory.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Map uid/gid index into real 32-bit uid/gid using the id look up table
+ */
+int squashfs_get_id(struct super_block *sb, unsigned int index,
+					unsigned int *id)
+{
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+	int block = SQUASHFS_ID_BLOCK(index);
+	int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
+	u64 start_block = le64_to_cpu(msblk->id_table[block]);
+	__le32 disk_id;
+	int err;
+
+	err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
+							sizeof(disk_id));
+	if (err < 0)
+		return err;
+
+	*id = le32_to_cpu(disk_id);
+	return 0;
+}
+
+
+/*
+ * Read uncompressed id lookup table indexes from disk into memory
+ */
+__le64 *squashfs_read_id_index_table(struct super_block *sb,
+			u64 id_table_start, unsigned short no_ids)
+{
+	unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
+	__le64 *id_table;
+	int err;
+
+	TRACE("In read_id_index_table, length %d\n", length);
+
+	/* Allocate id lookup table indexes */
+	id_table = kmalloc(length, GFP_KERNEL);
+	if (id_table == NULL) {
+		ERROR("Failed to allocate id index table\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	err = squashfs_read_table(sb, id_table, id_table_start, length);
+	if (err < 0) {
+		ERROR("unable to read id index table\n");
+		kfree(id_table);
+		return ERR_PTR(err);
+	}
+
+	return id_table;
+}
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
new file mode 100644
index 0000000..7a63398
--- /dev/null
+++ b/fs/squashfs/inode.c
@@ -0,0 +1,346 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * inode.c
+ */
+
+/*
+ * This file implements code to create and read inodes from disk.
+ *
+ * Inodes in Squashfs are identified by a 48-bit inode which encodes the
+ * location of the compressed metadata block containing the inode, and the byte
+ * offset into that block where the inode is placed (<block, offset>).
+ *
+ * To maximise compression there are different inodes for each file type
+ * (regular file, directory, device, etc.), the inode contents and length
+ * varying with the type.
+ *
+ * To further maximise compression, two types of regular file inode and
+ * directory inode are defined: inodes optimised for frequently occurring
+ * regular files and directories, and extended types where extra
+ * information has to be stored.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Initialise VFS inode with the base inode information common to all
+ * Squashfs inode types.  Sqsh_ino contains the unswapped base inode
+ * off disk.
+ */
+static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
+				struct squashfs_base_inode *sqsh_ino)
+{
+	int err;
+
+	err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &inode->i_uid);
+	if (err)
+		return err;
+
+	err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &inode->i_gid);
+	if (err)
+		return err;
+
+	inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
+	inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime);
+	inode->i_atime.tv_sec = inode->i_mtime.tv_sec;
+	inode->i_ctime.tv_sec = inode->i_mtime.tv_sec;
+	inode->i_mode = le16_to_cpu(sqsh_ino->mode);
+	inode->i_size = 0;
+
+	return err;
+}
+
+
+struct inode *squashfs_iget(struct super_block *sb, long long ino,
+				unsigned int ino_number)
+{
+	struct inode *inode = iget_locked(sb, ino_number);
+	int err;
+
+	TRACE("Entered squashfs_iget\n");
+
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+	if (!(inode->i_state & I_NEW))
+		return inode;
+
+	err = squashfs_read_inode(inode, ino);
+	if (err) {
+		iget_failed(inode);
+		return ERR_PTR(err);
+	}
+
+	unlock_new_inode(inode);
+	return inode;
+}
+
+
+/*
+ * Initialise VFS inode by reading inode from inode table (compressed
+ * metadata).  The format and amount of data read depends on type.
+ */
+int squashfs_read_inode(struct inode *inode, long long ino)
+{
+	struct super_block *sb = inode->i_sb;
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+	u64 block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
+	int err, type, offset = SQUASHFS_INODE_OFFSET(ino);
+	union squashfs_inode squashfs_ino;
+	struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base;
+
+	TRACE("Entered squashfs_read_inode\n");
+
+	/*
+	 * Read inode base common to all inode types.
+	 */
+	err = squashfs_read_metadata(sb, sqshb_ino, &block,
+				&offset, sizeof(*sqshb_ino));
+	if (err < 0)
+		goto failed_read;
+
+	err = squashfs_new_inode(sb, inode, sqshb_ino);
+	if (err)
+		goto failed_read;
+
+	block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
+	offset = SQUASHFS_INODE_OFFSET(ino);
+
+	type = le16_to_cpu(sqshb_ino->inode_type);
+	switch (type) {
+	case SQUASHFS_REG_TYPE: {
+		unsigned int frag_offset, frag_size, frag;
+		u64 frag_blk;
+		struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg;
+
+		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+							sizeof(*sqsh_ino));
+		if (err < 0)
+			goto failed_read;
+
+		frag = le32_to_cpu(sqsh_ino->fragment);
+		if (frag != SQUASHFS_INVALID_FRAG) {
+			frag_offset = le32_to_cpu(sqsh_ino->offset);
+			frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+			if (frag_size < 0) {
+				err = frag_size;
+				goto failed_read;
+			}
+		} else {
+			frag_blk = SQUASHFS_INVALID_BLK;
+			frag_size = 0;
+			frag_offset = 0;
+		}
+
+		inode->i_nlink = 1;
+		inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+		inode->i_fop = &generic_ro_fops;
+		inode->i_mode |= S_IFREG;
+		inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
+		squashfs_i(inode)->fragment_block = frag_blk;
+		squashfs_i(inode)->fragment_size = frag_size;
+		squashfs_i(inode)->fragment_offset = frag_offset;
+		squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
+		squashfs_i(inode)->block_list_start = block;
+		squashfs_i(inode)->offset = offset;
+		inode->i_data.a_ops = &squashfs_aops;
+
+		TRACE("File inode %x:%x, start_block %llx, block_list_start "
+			"%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
+			offset, squashfs_i(inode)->start, block, offset);
+		break;
+	}
+	case SQUASHFS_LREG_TYPE: {
+		unsigned int frag_offset, frag_size, frag;
+		u64 frag_blk;
+		struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg;
+
+		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+							sizeof(*sqsh_ino));
+		if (err < 0)
+			goto failed_read;
+
+		frag = le32_to_cpu(sqsh_ino->fragment);
+		if (frag != SQUASHFS_INVALID_FRAG) {
+			frag_offset = le32_to_cpu(sqsh_ino->offset);
+			frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+			if (frag_size < 0) {
+				err = frag_size;
+				goto failed_read;
+			}
+		} else {
+			frag_blk = SQUASHFS_INVALID_BLK;
+			frag_size = 0;
+			frag_offset = 0;
+		}
+
+		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+		inode->i_size = le64_to_cpu(sqsh_ino->file_size);
+		inode->i_fop = &generic_ro_fops;
+		inode->i_mode |= S_IFREG;
+		inode->i_blocks = ((inode->i_size -
+				le64_to_cpu(sqsh_ino->sparse) - 1) >> 9) + 1;
+
+		squashfs_i(inode)->fragment_block = frag_blk;
+		squashfs_i(inode)->fragment_size = frag_size;
+		squashfs_i(inode)->fragment_offset = frag_offset;
+		squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block);
+		squashfs_i(inode)->block_list_start = block;
+		squashfs_i(inode)->offset = offset;
+		inode->i_data.a_ops = &squashfs_aops;
+
+		TRACE("File inode %x:%x, start_block %llx, block_list_start "
+			"%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
+			offset, squashfs_i(inode)->start, block, offset);
+		break;
+	}
+	case SQUASHFS_DIR_TYPE: {
+		struct squashfs_dir_inode *sqsh_ino = &squashfs_ino.dir;
+
+		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+				sizeof(*sqsh_ino));
+		if (err < 0)
+			goto failed_read;
+
+		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+		inode->i_size = le16_to_cpu(sqsh_ino->file_size);
+		inode->i_op = &squashfs_dir_inode_ops;
+		inode->i_fop = &squashfs_dir_ops;
+		inode->i_mode |= S_IFDIR;
+		squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
+		squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
+		squashfs_i(inode)->dir_idx_cnt = 0;
+		squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);
+
+		TRACE("Directory inode %x:%x, start_block %llx, offset %x\n",
+				SQUASHFS_INODE_BLK(ino), offset,
+				squashfs_i(inode)->start,
+				le16_to_cpu(sqsh_ino->offset));
+		break;
+	}
+	case SQUASHFS_LDIR_TYPE: {
+		struct squashfs_ldir_inode *sqsh_ino = &squashfs_ino.ldir;
+
+		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+				sizeof(*sqsh_ino));
+		if (err < 0)
+			goto failed_read;
+
+		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+		inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+		inode->i_op = &squashfs_dir_inode_ops;
+		inode->i_fop = &squashfs_dir_ops;
+		inode->i_mode |= S_IFDIR;
+		squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
+		squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
+		squashfs_i(inode)->dir_idx_start = block;
+		squashfs_i(inode)->dir_idx_offset = offset;
+		squashfs_i(inode)->dir_idx_cnt = le16_to_cpu(sqsh_ino->i_count);
+		squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);
+
+		TRACE("Long directory inode %x:%x, start_block %llx, offset "
+				"%x\n", SQUASHFS_INODE_BLK(ino), offset,
+				squashfs_i(inode)->start,
+				le16_to_cpu(sqsh_ino->offset));
+		break;
+	}
+	case SQUASHFS_SYMLINK_TYPE:
+	case SQUASHFS_LSYMLINK_TYPE: {
+		struct squashfs_symlink_inode *sqsh_ino = &squashfs_ino.symlink;
+
+		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+				sizeof(*sqsh_ino));
+		if (err < 0)
+			goto failed_read;
+
+		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+		inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
+		inode->i_op = &page_symlink_inode_operations;
+		inode->i_data.a_ops = &squashfs_symlink_aops;
+		inode->i_mode |= S_IFLNK;
+		squashfs_i(inode)->start = block;
+		squashfs_i(inode)->offset = offset;
+
+		TRACE("Symbolic link inode %x:%x, start_block %llx, offset "
+				"%x\n", SQUASHFS_INODE_BLK(ino), offset,
+				block, offset);
+		break;
+	}
+	case SQUASHFS_BLKDEV_TYPE:
+	case SQUASHFS_CHRDEV_TYPE:
+	case SQUASHFS_LBLKDEV_TYPE:
+	case SQUASHFS_LCHRDEV_TYPE: {
+		struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev;
+		unsigned int rdev;
+
+		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+				sizeof(*sqsh_ino));
+		if (err < 0)
+			goto failed_read;
+
+		if (type == SQUASHFS_CHRDEV_TYPE)
+			inode->i_mode |= S_IFCHR;
+		else
+			inode->i_mode |= S_IFBLK;
+		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+		rdev = le32_to_cpu(sqsh_ino->rdev);
+		init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
+
+		TRACE("Device inode %x:%x, rdev %x\n",
+				SQUASHFS_INODE_BLK(ino), offset, rdev);
+		break;
+	}
+	case SQUASHFS_FIFO_TYPE:
+	case SQUASHFS_SOCKET_TYPE:
+	case SQUASHFS_LFIFO_TYPE:
+	case SQUASHFS_LSOCKET_TYPE: {
+		struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc;
+
+		err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+				sizeof(*sqsh_ino));
+		if (err < 0)
+			goto failed_read;
+
+		if (type == SQUASHFS_FIFO_TYPE)
+			inode->i_mode |= S_IFIFO;
+		else
+			inode->i_mode |= S_IFSOCK;
+		inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
+		init_special_inode(inode, inode->i_mode, 0);
+		break;
+	}
+	default:
+		ERROR("Unknown inode type %d in squashfs_iget!\n", type);
+		return -EINVAL;
+	}
+
+	return 0;
+
+failed_read:
+	ERROR("Unable to read inode 0x%llx\n", ino);
+	return err;
+}
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
new file mode 100644
index 0000000..9e39865
--- /dev/null
+++ b/fs/squashfs/namei.c
@@ -0,0 +1,242 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * namei.c
+ */
+
+/*
+ * This file implements code to do filename lookup in directories.
+ *
+ * Like inodes, directories are packed into compressed metadata blocks, stored
+ * in a directory table.  Directories are accessed using the start address of
+ * the metablock containing the directory and the offset into the
+ * decompressed block (<block, offset>).
+ *
+ * Directories are organised in a slightly complex way, and are not simply
+ * a list of file names.  The organisation takes advantage of the
+ * fact that (in most cases) the inodes of the files will be in the same
+ * compressed metadata block, and therefore, can share the start block.
+ * Directories are therefore organised in a two level list, a directory
+ * header containing the shared start block value, and a sequence of directory
+ * entries, each of which share the shared start block.  A new directory header
+ * is written once/if the inode start block changes.  The directory
+ * header/directory entry list is repeated as many times as necessary.
+ *
+ * Directories are sorted, and can contain a directory index to speed up
+ * file lookup.  Directory indexes store one entry per metablock, each entry
+ * storing the index/filename mapping to the first directory header
+ * in each metadata block.  Directories are sorted in alphabetical order,
+ * and at lookup the index is scanned linearly looking for the first filename
+ * alphabetically larger than the filename being looked up.  At this point the
+ * location of the metadata block the filename is in has been found.
+ * The general idea of the index is ensure only one metadata block needs to be
+ * decompressed to do a lookup irrespective of the length of the directory.
+ * This scheme has the advantage that it doesn't require extra memory overhead
+ * and doesn't require much extra storage on disk.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/dcache.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Lookup name in the directory index, returning the location of the metadata
+ * block containing it, and the directory index this represents.
+ *
+ * If we get an error reading the index then return the part of the index
+ * (if any) we have managed to read - the index isn't essential, just
+ * quicker.
+ */
+static int get_dir_index_using_name(struct super_block *sb,
+			u64 *next_block, int *next_offset, u64 index_start,
+			int index_offset, int i_count, const char *name,
+			int len)
+{
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+	int i, size, length = 0, err;
+	struct squashfs_dir_index *index;
+	char *str;
+
+	TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
+
+	index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN * 2 + 2, GFP_KERNEL);
+	if (index == NULL) {
+		ERROR("Failed to allocate squashfs_dir_index\n");
+		goto out;
+	}
+
+	str = &index->name[SQUASHFS_NAME_LEN + 1];
+	strncpy(str, name, len);
+	str[len] = '\0';
+
+	for (i = 0; i < i_count; i++) {
+		err = squashfs_read_metadata(sb, index, &index_start,
+					&index_offset, sizeof(*index));
+		if (err < 0)
+			break;
+
+
+		size = le32_to_cpu(index->size) + 1;
+
+		err = squashfs_read_metadata(sb, index->name, &index_start,
+					&index_offset, size);
+		if (err < 0)
+			break;
+
+		index->name[size] = '\0';
+
+		if (strcmp(index->name, str) > 0)
+			break;
+
+		length = le32_to_cpu(index->index);
+		*next_block = le32_to_cpu(index->start_block) +
+					msblk->directory_table;
+	}
+
+	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
+	kfree(index);
+
+out:
+	/*
+	 * Return index (f_pos) of the looked up metadata block.  Translate
+	 * from internal f_pos to external f_pos which is offset by 3 because
+	 * we invent "." and ".." entries which are not actually stored in the
+	 * directory.
+	 */
+	return length + 3;
+}
+
+
+static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
+				 struct nameidata *nd)
+{
+	const unsigned char *name = dentry->d_name.name;
+	int len = dentry->d_name.len;
+	struct inode *inode = NULL;
+	struct squashfs_sb_info *msblk = dir->i_sb->s_fs_info;
+	struct squashfs_dir_header dirh;
+	struct squashfs_dir_entry *dire;
+	u64 block = squashfs_i(dir)->start + msblk->directory_table;
+	int offset = squashfs_i(dir)->offset;
+	int err, length = 0, dir_count, size;
+
+	TRACE("Entered squashfs_lookup [%llx:%x]\n", block, offset);
+
+	dire = kmalloc(sizeof(*dire) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
+	if (dire == NULL) {
+		ERROR("Failed to allocate squashfs_dir_entry\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (len > SQUASHFS_NAME_LEN) {
+		err = -ENAMETOOLONG;
+		goto failed;
+	}
+
+	length = get_dir_index_using_name(dir->i_sb, &block, &offset,
+				squashfs_i(dir)->dir_idx_start,
+				squashfs_i(dir)->dir_idx_offset,
+				squashfs_i(dir)->dir_idx_cnt, name, len);
+
+	while (length < i_size_read(dir)) {
+		/*
+		 * Read directory header.
+		 */
+		err = squashfs_read_metadata(dir->i_sb, &dirh, &block,
+				&offset, sizeof(dirh));
+		if (err < 0)
+			goto read_failure;
+
+		length += sizeof(dirh);
+
+		dir_count = le32_to_cpu(dirh.count) + 1;
+		while (dir_count--) {
+			/*
+			 * Read directory entry.
+			 */
+			err = squashfs_read_metadata(dir->i_sb, dire, &block,
+					&offset, sizeof(*dire));
+			if (err < 0)
+				goto read_failure;
+
+			size = le16_to_cpu(dire->size) + 1;
+
+			err = squashfs_read_metadata(dir->i_sb, dire->name,
+					&block, &offset, size);
+			if (err < 0)
+				goto read_failure;
+
+			length += sizeof(*dire) + size;
+
+			if (name[0] < dire->name[0])
+				goto exit_lookup;
+
+			if (len == size && !strncmp(name, dire->name, len)) {
+				unsigned int blk, off, ino_num;
+				long long ino;
+				blk = le32_to_cpu(dirh.start_block);
+				off = le16_to_cpu(dire->offset);
+				ino_num = le32_to_cpu(dirh.inode_number) +
+					(short) le16_to_cpu(dire->inode_number);
+				ino = SQUASHFS_MKINODE(blk, off);
+
+				TRACE("calling squashfs_iget for directory "
+					"entry %s, inode  %x:%x, %d\n", name,
+					blk, off, ino_num);
+
+				inode = squashfs_iget(dir->i_sb, ino, ino_num);
+				if (IS_ERR(inode)) {
+					err = PTR_ERR(inode);
+					goto failed;
+				}
+
+				goto exit_lookup;
+			}
+		}
+	}
+
+exit_lookup:
+	kfree(dire);
+	if (inode)
+		return d_splice_alias(inode, dentry);
+	d_add(dentry, inode);
+	return ERR_PTR(0);
+
+read_failure:
+	ERROR("Unable to read directory block [%llx:%x]\n",
+		squashfs_i(dir)->start + msblk->directory_table,
+		squashfs_i(dir)->offset);
+failed:
+	kfree(dire);
+	return ERR_PTR(err);
+}
+
+
+const struct inode_operations squashfs_dir_inode_ops = {
+	.lookup = squashfs_lookup
+};
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
new file mode 100644
index 0000000..6b2515d
--- /dev/null
+++ b/fs/squashfs/squashfs.h
@@ -0,0 +1,90 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * squashfs.h
+ */
+
+#define TRACE(s, args...)	pr_debug("SQUASHFS: "s, ## args)
+
+#define ERROR(s, args...)	pr_err("SQUASHFS error: "s, ## args)
+
+#define WARNING(s, args...)	pr_warning("SQUASHFS: "s, ## args)
+
+static inline struct squashfs_inode_info *squashfs_i(struct inode *inode)
+{
+	return list_entry(inode, struct squashfs_inode_info, vfs_inode);
+}
+
+/* block.c */
+extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *,
+				int);
+
+/* cache.c */
+extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
+extern void squashfs_cache_delete(struct squashfs_cache *);
+extern struct squashfs_cache_entry *squashfs_cache_get(struct super_block *,
+				struct squashfs_cache *, u64, int);
+extern void squashfs_cache_put(struct squashfs_cache_entry *);
+extern int squashfs_copy_data(void *, struct squashfs_cache_entry *, int, int);
+extern int squashfs_read_metadata(struct super_block *, void *, u64 *,
+				int *, int);
+extern struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *,
+				u64, int);
+extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *,
+				u64, int);
+extern int squashfs_read_table(struct super_block *, void *, u64, int);
+
+/* export.c */
+extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64,
+				unsigned int);
+
+/* fragment.c */
+extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *);
+extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
+				u64, unsigned int);
+
+/* id.c */
+extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
+extern __le64 *squashfs_read_id_index_table(struct super_block *, u64,
+				unsigned short);
+
+/* inode.c */
+extern struct inode *squashfs_iget(struct super_block *, long long,
+				unsigned int);
+extern int squashfs_read_inode(struct inode *, long long);
+
+/*
+ * Inodes and files operations
+ */
+
+/* dir.c */
+extern const struct file_operations squashfs_dir_ops;
+
+/* export.c */
+extern const struct export_operations squashfs_export_ops;
+
+/* file.c */
+extern const struct address_space_operations squashfs_aops;
+
+/* namei.c */
+extern const struct inode_operations squashfs_dir_inode_ops;
+
+/* symlink.c */
+extern const struct address_space_operations squashfs_symlink_aops;
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
new file mode 100644
index 0000000..283daaf
--- /dev/null
+++ b/fs/squashfs/squashfs_fs.h
@@ -0,0 +1,380 @@
+#ifndef SQUASHFS_FS
+#define SQUASHFS_FS
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * squashfs_fs.h
+ */
+
+#define SQUASHFS_CACHED_FRAGMENTS	CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE
+#define SQUASHFS_MAJOR			4
+#define SQUASHFS_MINOR			0
+#define SQUASHFS_START			0
+
+/* size of metadata (inode and directory) blocks */
+#define SQUASHFS_METADATA_SIZE		8192
+#define SQUASHFS_METADATA_LOG		13
+
+/* default size of data blocks */
+#define SQUASHFS_FILE_SIZE		131072
+#define SQUASHFS_FILE_LOG		17
+
+#define SQUASHFS_FILE_MAX_SIZE		1048576
+#define SQUASHFS_FILE_MAX_LOG		20
+
+/* Max number of uids and gids */
+#define SQUASHFS_IDS			65536
+
+/* Max length of filename (not 255) */
+#define SQUASHFS_NAME_LEN		256
+
+#define SQUASHFS_INVALID_FRAG		(0xffffffffU)
+#define SQUASHFS_INVALID_BLK		(-1LL)
+
+/* Filesystem flags */
+#define SQUASHFS_NOI			0
+#define SQUASHFS_NOD			1
+#define SQUASHFS_NOF			3
+#define SQUASHFS_NO_FRAG		4
+#define SQUASHFS_ALWAYS_FRAG		5
+#define SQUASHFS_DUPLICATE		6
+#define SQUASHFS_EXPORT			7
+
+#define SQUASHFS_BIT(flag, bit)		((flag >> bit) & 1)
+
+#define SQUASHFS_UNCOMPRESSED_INODES(flags)	SQUASHFS_BIT(flags, \
+						SQUASHFS_NOI)
+
+#define SQUASHFS_UNCOMPRESSED_DATA(flags)	SQUASHFS_BIT(flags, \
+						SQUASHFS_NOD)
+
+#define SQUASHFS_UNCOMPRESSED_FRAGMENTS(flags)	SQUASHFS_BIT(flags, \
+						SQUASHFS_NOF)
+
+#define SQUASHFS_NO_FRAGMENTS(flags)		SQUASHFS_BIT(flags, \
+						SQUASHFS_NO_FRAG)
+
+#define SQUASHFS_ALWAYS_FRAGMENTS(flags)	SQUASHFS_BIT(flags, \
+						SQUASHFS_ALWAYS_FRAG)
+
+#define SQUASHFS_DUPLICATES(flags)		SQUASHFS_BIT(flags, \
+						SQUASHFS_DUPLICATE)
+
+#define SQUASHFS_EXPORTABLE(flags)		SQUASHFS_BIT(flags, \
+						SQUASHFS_EXPORT)
+
+/* Max number of types and file types */
+#define SQUASHFS_DIR_TYPE		1
+#define SQUASHFS_REG_TYPE		2
+#define SQUASHFS_SYMLINK_TYPE		3
+#define SQUASHFS_BLKDEV_TYPE		4
+#define SQUASHFS_CHRDEV_TYPE		5
+#define SQUASHFS_FIFO_TYPE		6
+#define SQUASHFS_SOCKET_TYPE		7
+#define SQUASHFS_LDIR_TYPE		8
+#define SQUASHFS_LREG_TYPE		9
+#define SQUASHFS_LSYMLINK_TYPE		10
+#define SQUASHFS_LBLKDEV_TYPE		11
+#define SQUASHFS_LCHRDEV_TYPE		12
+#define SQUASHFS_LFIFO_TYPE		13
+#define SQUASHFS_LSOCKET_TYPE		14
+
+/* Flag whether block is compressed or uncompressed, bit is set if block is
+ * uncompressed */
+#define SQUASHFS_COMPRESSED_BIT		(1 << 15)
+
+#define SQUASHFS_COMPRESSED_SIZE(B)	(((B) & ~SQUASHFS_COMPRESSED_BIT) ? \
+		(B) & ~SQUASHFS_COMPRESSED_BIT :  SQUASHFS_COMPRESSED_BIT)
+
+#define SQUASHFS_COMPRESSED(B)		(!((B) & SQUASHFS_COMPRESSED_BIT))
+
+#define SQUASHFS_COMPRESSED_BIT_BLOCK	(1 << 24)
+
+#define SQUASHFS_COMPRESSED_SIZE_BLOCK(B)	((B) & \
+						~SQUASHFS_COMPRESSED_BIT_BLOCK)
+
+#define SQUASHFS_COMPRESSED_BLOCK(B)	(!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
+
+/*
+ * Inode number ops.  Inodes consist of a compressed block number, and an
+ * uncompressed offset within that block
+ */
+#define SQUASHFS_INODE_BLK(A)		((unsigned int) ((A) >> 16))
+
+#define SQUASHFS_INODE_OFFSET(A)	((unsigned int) ((A) & 0xffff))
+
+#define SQUASHFS_MKINODE(A, B)		((long long)(((long long) (A)\
+					<< 16) + (B)))
+
+/* Translate between VFS mode and squashfs mode */
+#define SQUASHFS_MODE(A)		((A) & 0xfff)
+
+/* fragment and fragment table defines */
+#define SQUASHFS_FRAGMENT_BYTES(A)	\
+				((A) * sizeof(struct squashfs_fragment_entry))
+
+#define SQUASHFS_FRAGMENT_INDEX(A)	(SQUASHFS_FRAGMENT_BYTES(A) / \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_OFFSET(A)	(SQUASHFS_FRAGMENT_BYTES(A) % \
+						SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEXES(A)	((SQUASHFS_FRAGMENT_BYTES(A) + \
+					SQUASHFS_METADATA_SIZE - 1) / \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_BYTES(A)	(SQUASHFS_FRAGMENT_INDEXES(A) *\
+						sizeof(u64))
+
+/* inode lookup table defines */
+#define SQUASHFS_LOOKUP_BYTES(A)	((A) * sizeof(u64))
+
+#define SQUASHFS_LOOKUP_BLOCK(A)	(SQUASHFS_LOOKUP_BYTES(A) / \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_OFFSET(A)	(SQUASHFS_LOOKUP_BYTES(A) % \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCKS(A)	((SQUASHFS_LOOKUP_BYTES(A) + \
+					SQUASHFS_METADATA_SIZE - 1) / \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_BYTES(A)	(SQUASHFS_LOOKUP_BLOCKS(A) *\
+					sizeof(u64))
+
+/* uid/gid lookup table defines */
+#define SQUASHFS_ID_BYTES(A)		((A) * sizeof(unsigned int))
+
+#define SQUASHFS_ID_BLOCK(A)		(SQUASHFS_ID_BYTES(A) / \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCK_OFFSET(A)	(SQUASHFS_ID_BYTES(A) % \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCKS(A)		((SQUASHFS_ID_BYTES(A) + \
+					SQUASHFS_METADATA_SIZE - 1) / \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCK_BYTES(A)	(SQUASHFS_ID_BLOCKS(A) *\
+					sizeof(u64))
+
+/* cached data constants for filesystem */
+#define SQUASHFS_CACHED_BLKS		8
+
+#define SQUASHFS_MAX_FILE_SIZE_LOG	64
+
+#define SQUASHFS_MAX_FILE_SIZE		(1LL << \
+					(SQUASHFS_MAX_FILE_SIZE_LOG - 2))
+
+#define SQUASHFS_MARKER_BYTE		0xff
+
+/* meta index cache */
+#define SQUASHFS_META_INDEXES	(SQUASHFS_METADATA_SIZE / sizeof(unsigned int))
+#define SQUASHFS_META_ENTRIES	127
+#define SQUASHFS_META_SLOTS	8
+
+struct meta_entry {
+	u64			data_block;
+	unsigned int		index_block;
+	unsigned short		offset;
+	unsigned short		pad;
+};
+
+struct meta_index {
+	unsigned int		inode_number;
+	unsigned int		offset;
+	unsigned short		entries;
+	unsigned short		skip;
+	unsigned short		locked;
+	unsigned short		pad;
+	struct meta_entry	meta_entry[SQUASHFS_META_ENTRIES];
+};
+
+
+/*
+ * definitions for structures on disk
+ */
+#define ZLIB_COMPRESSION	 1
+
+struct squashfs_super_block {
+	__le32			s_magic;
+	__le32			inodes;
+	__le32			mkfs_time;
+	__le32			block_size;
+	__le32			fragments;
+	__le16			compression;
+	__le16			block_log;
+	__le16			flags;
+	__le16			no_ids;
+	__le16			s_major;
+	__le16			s_minor;
+	__le64			root_inode;
+	__le64			bytes_used;
+	__le64			id_table_start;
+	__le64			xattr_table_start;
+	__le64			inode_table_start;
+	__le64			directory_table_start;
+	__le64			fragment_table_start;
+	__le64			lookup_table_start;
+};
+
+struct squashfs_dir_index {
+	__le32			index;
+	__le32			start_block;
+	__le32			size;
+	unsigned char		name[0];
+};
+
+struct squashfs_base_inode {
+	__le16			inode_type;
+	__le16			mode;
+	__le16			uid;
+	__le16			guid;
+	__le32			mtime;
+	__le32	 		inode_number;
+};
+
+struct squashfs_ipc_inode {
+	__le16			inode_type;
+	__le16			mode;
+	__le16			uid;
+	__le16			guid;
+	__le32			mtime;
+	__le32	 		inode_number;
+	__le32			nlink;
+};
+
+struct squashfs_dev_inode {
+	__le16			inode_type;
+	__le16			mode;
+	__le16			uid;
+	__le16			guid;
+	__le32			mtime;
+	__le32	 		inode_number;
+	__le32			nlink;
+	__le32			rdev;
+};
+
+struct squashfs_symlink_inode {
+	__le16			inode_type;
+	__le16			mode;
+	__le16			uid;
+	__le16			guid;
+	__le32			mtime;
+	__le32	 		inode_number;
+	__le32			nlink;
+	__le32			symlink_size;
+	char			symlink[0];
+};
+
+struct squashfs_reg_inode {
+	__le16			inode_type;
+	__le16			mode;
+	__le16			uid;
+	__le16			guid;
+	__le32			mtime;
+	__le32	 		inode_number;
+	__le32			start_block;
+	__le32			fragment;
+	__le32			offset;
+	__le32			file_size;
+	__le16			block_list[0];
+};
+
+struct squashfs_lreg_inode {
+	__le16			inode_type;
+	__le16			mode;
+	__le16			uid;
+	__le16			guid;
+	__le32			mtime;
+	__le32	 		inode_number;
+	__le64			start_block;
+	__le64			file_size;
+	__le64			sparse;
+	__le32			nlink;
+	__le32			fragment;
+	__le32			offset;
+	__le32			xattr;
+	__le16			block_list[0];
+};
+
+struct squashfs_dir_inode {
+	__le16			inode_type;
+	__le16			mode;
+	__le16			uid;
+	__le16			guid;
+	__le32			mtime;
+	__le32	 		inode_number;
+	__le32			start_block;
+	__le32			nlink;
+	__le16			file_size;
+	__le16			offset;
+	__le32			parent_inode;
+};
+
+struct squashfs_ldir_inode {
+	__le16			inode_type;
+	__le16			mode;
+	__le16			uid;
+	__le16			guid;
+	__le32			mtime;
+	__le32	 		inode_number;
+	__le32			nlink;
+	__le32			file_size;
+	__le32			start_block;
+	__le32			parent_inode;
+	__le16			i_count;
+	__le16			offset;
+	__le32			xattr;
+	struct squashfs_dir_index	index[0];
+};
+
+union squashfs_inode {
+	struct squashfs_base_inode		base;
+	struct squashfs_dev_inode		dev;
+	struct squashfs_symlink_inode		symlink;
+	struct squashfs_reg_inode		reg;
+	struct squashfs_lreg_inode		lreg;
+	struct squashfs_dir_inode		dir;
+	struct squashfs_ldir_inode		ldir;
+	struct squashfs_ipc_inode		ipc;
+};
+
+struct squashfs_dir_entry {
+	__le16			offset;
+	__le16			inode_number;
+	__le16			type;
+	__le16			size;
+	char			name[0];
+};
+
+struct squashfs_dir_header {
+	__le32			count;
+	__le32			start_block;
+	__le32			inode_number;
+};
+
+struct squashfs_fragment_entry {
+	__le64			start_block;
+	__le32			size;
+	unsigned int		unused;
+};
+
+#endif
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
new file mode 100644
index 0000000..fbfca30
--- /dev/null
+++ b/fs/squashfs/squashfs_fs_i.h
@@ -0,0 +1,45 @@
+#ifndef SQUASHFS_FS_I
+#define SQUASHFS_FS_I
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * squashfs_fs_i.h
+ */
+
+struct squashfs_inode_info {
+	u64		start;
+	int		offset;
+	union {
+		struct {
+			u64		fragment_block;
+			int		fragment_size;
+			int		fragment_offset;
+			u64		block_list_start;
+		};
+		struct {
+			u64		dir_idx_start;
+			int		dir_idx_offset;
+			int		dir_idx_cnt;
+			int		parent;
+		};
+	};
+	struct inode	vfs_inode;
+};
+#endif
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
new file mode 100644
index 0000000..c8c6561
--- /dev/null
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -0,0 +1,76 @@
+#ifndef SQUASHFS_FS_SB
+#define SQUASHFS_FS_SB
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * squashfs_fs_sb.h
+ */
+
+#include "squashfs_fs.h"
+
+struct squashfs_cache {
+	char			*name;
+	int			entries;
+	int			next_blk;
+	int			num_waiters;
+	int			unused;
+	int			block_size;
+	int			pages;
+	spinlock_t		lock;
+	wait_queue_head_t	wait_queue;
+	struct squashfs_cache_entry *entry;
+};
+
+struct squashfs_cache_entry {
+	u64			block;
+	int			length;
+	int			refcount;
+	u64			next_index;
+	int			pending;
+	int			error;
+	int			num_waiters;
+	wait_queue_head_t	wait_queue;
+	struct squashfs_cache	*cache;
+	void			**data;
+};
+
+struct squashfs_sb_info {
+	int			devblksize;
+	int			devblksize_log2;
+	struct squashfs_cache	*block_cache;
+	struct squashfs_cache	*fragment_cache;
+	struct squashfs_cache	*read_page;
+	int			next_meta_index;
+	__le64			*id_table;
+	__le64			*fragment_index;
+	unsigned int		*fragment_index_2;
+	struct mutex		read_data_mutex;
+	struct mutex		meta_index_mutex;
+	struct meta_index	*meta_index;
+	z_stream		stream;
+	__le64			*inode_lookup_table;
+	u64			inode_table;
+	u64			directory_table;
+	unsigned int		block_size;
+	unsigned short		block_log;
+	long long		bytes_used;
+	unsigned int		inodes;
+};
+#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
new file mode 100644
index 0000000..071df5b
--- /dev/null
+++ b/fs/squashfs/super.c
@@ -0,0 +1,441 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * super.c
+ */
+
+/*
+ * This file implements code to read the superblock, read and initialise
+ * in-memory structures at mount time, and all the VFS glue code to register
+ * the filesystem.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/zlib.h>
+#include <linux/magic.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+static struct file_system_type squashfs_fs_type;
+static struct super_operations squashfs_super_ops;
+
+static int supported_squashfs_filesystem(short major, short minor, short comp)
+{
+	if (major < SQUASHFS_MAJOR) {
+		ERROR("Major/Minor mismatch, older Squashfs %d.%d "
+			"filesystems are unsupported\n", major, minor);
+		return -EINVAL;
+	} else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) {
+		ERROR("Major/Minor mismatch, trying to mount newer "
+			"%d.%d filesystem\n", major, minor);
+		ERROR("Please update your kernel\n");
+		return -EINVAL;
+	}
+
+	if (comp != ZLIB_COMPRESSION)
+		return -EINVAL;
+
+	return 0;
+}
+
+
+static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct squashfs_sb_info *msblk;
+	struct squashfs_super_block *sblk = NULL;
+	char b[BDEVNAME_SIZE];
+	struct inode *root;
+	long long root_inode;
+	unsigned short flags;
+	unsigned int fragments;
+	u64 lookup_table_start;
+	int err;
+
+	TRACE("Entered squashfs_fill_superblock\n");
+
+	sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL);
+	if (sb->s_fs_info == NULL) {
+		ERROR("Failed to allocate squashfs_sb_info\n");
+		return -ENOMEM;
+	}
+	msblk = sb->s_fs_info;
+
+	msblk->stream.workspace = kmalloc(zlib_inflate_workspacesize(),
+		GFP_KERNEL);
+	if (msblk->stream.workspace == NULL) {
+		ERROR("Failed to allocate zlib workspace\n");
+		goto failure;
+	}
+
+	sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+	if (sblk == NULL) {
+		ERROR("Failed to allocate squashfs_super_block\n");
+		goto failure;
+	}
+
+	msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE);
+	msblk->devblksize_log2 = ffz(~msblk->devblksize);
+
+	mutex_init(&msblk->read_data_mutex);
+	mutex_init(&msblk->meta_index_mutex);
+
+	/*
+	 * msblk->bytes_used is checked in squashfs_read_table to ensure reads
+	 * are not beyond filesystem end.  But as we're using
+	 * squashfs_read_table here to read the superblock (including the value
+	 * of bytes_used) we need to set it to an initial sensible dummy value
+	 */
+	msblk->bytes_used = sizeof(*sblk);
+	err = squashfs_read_table(sb, sblk, SQUASHFS_START, sizeof(*sblk));
+
+	if (err < 0) {
+		ERROR("unable to read squashfs_super_block\n");
+		goto failed_mount;
+	}
+
+	/* Check it is a SQUASHFS superblock */
+	sb->s_magic = le32_to_cpu(sblk->s_magic);
+	if (sb->s_magic != SQUASHFS_MAGIC) {
+		if (!silent)
+			ERROR("Can't find a SQUASHFS superblock on %s\n",
+						bdevname(sb->s_bdev, b));
+		err = -EINVAL;
+		goto failed_mount;
+	}
+
+	/* Check the MAJOR & MINOR versions and compression type */
+	err = supported_squashfs_filesystem(le16_to_cpu(sblk->s_major),
+			le16_to_cpu(sblk->s_minor),
+			le16_to_cpu(sblk->compression));
+	if (err < 0)
+		goto failed_mount;
+
+	err = -EINVAL;
+
+	/*
+	 * Check if there's xattrs in the filesystem.  These are not
+	 * supported in this version, so warn that they will be ignored.
+	 */
+	if (le64_to_cpu(sblk->xattr_table_start) != SQUASHFS_INVALID_BLK)
+		ERROR("Xattrs in filesystem, these will be ignored\n");
+
+	/* Check the filesystem does not extend beyond the end of the
+	   block device */
+	msblk->bytes_used = le64_to_cpu(sblk->bytes_used);
+	if (msblk->bytes_used < 0 || msblk->bytes_used >
+			i_size_read(sb->s_bdev->bd_inode))
+		goto failed_mount;
+
+	/* Check block size for sanity */
+	msblk->block_size = le32_to_cpu(sblk->block_size);
+	if (msblk->block_size > SQUASHFS_FILE_MAX_SIZE)
+		goto failed_mount;
+
+	msblk->block_log = le16_to_cpu(sblk->block_log);
+	if (msblk->block_log > SQUASHFS_FILE_MAX_LOG)
+		goto failed_mount;
+
+	/* Check the root inode for sanity */
+	root_inode = le64_to_cpu(sblk->root_inode);
+	if (SQUASHFS_INODE_OFFSET(root_inode) > SQUASHFS_METADATA_SIZE)
+		goto failed_mount;
+
+	msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
+	msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
+	msblk->inodes = le32_to_cpu(sblk->inodes);
+	flags = le16_to_cpu(sblk->flags);
+
+	TRACE("Found valid superblock on %s\n", bdevname(sb->s_bdev, b));
+	TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(flags)
+				? "un" : "");
+	TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(flags)
+				? "un" : "");
+	TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
+	TRACE("Block size %d\n", msblk->block_size);
+	TRACE("Number of inodes %d\n", msblk->inodes);
+	TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments));
+	TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
+	TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
+	TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
+	TRACE("sblk->fragment_table_start %llx\n",
+		(u64) le64_to_cpu(sblk->fragment_table_start));
+	TRACE("sblk->id_table_start %llx\n",
+		(u64) le64_to_cpu(sblk->id_table_start));
+
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
+	sb->s_flags |= MS_RDONLY;
+	sb->s_op = &squashfs_super_ops;
+
+	err = -ENOMEM;
+
+	msblk->block_cache = squashfs_cache_init("metadata",
+			SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE);
+	if (msblk->block_cache == NULL)
+		goto failed_mount;
+
+	/* Allocate read_page block */
+	msblk->read_page = squashfs_cache_init("data", 1, msblk->block_size);
+	if (msblk->read_page == NULL) {
+		ERROR("Failed to allocate read_page block\n");
+		goto failed_mount;
+	}
+
+	/* Allocate and read id index table */
+	msblk->id_table = squashfs_read_id_index_table(sb,
+		le64_to_cpu(sblk->id_table_start), le16_to_cpu(sblk->no_ids));
+	if (IS_ERR(msblk->id_table)) {
+		err = PTR_ERR(msblk->id_table);
+		msblk->id_table = NULL;
+		goto failed_mount;
+	}
+
+	fragments = le32_to_cpu(sblk->fragments);
+	if (fragments == 0)
+		goto allocate_lookup_table;
+
+	msblk->fragment_cache = squashfs_cache_init("fragment",
+		SQUASHFS_CACHED_FRAGMENTS, msblk->block_size);
+	if (msblk->fragment_cache == NULL) {
+		err = -ENOMEM;
+		goto failed_mount;
+	}
+
+	/* Allocate and read fragment index table */
+	msblk->fragment_index = squashfs_read_fragment_index_table(sb,
+		le64_to_cpu(sblk->fragment_table_start), fragments);
+	if (IS_ERR(msblk->fragment_index)) {
+		err = PTR_ERR(msblk->fragment_index);
+		msblk->fragment_index = NULL;
+		goto failed_mount;
+	}
+
+allocate_lookup_table:
+	lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
+	if (lookup_table_start == SQUASHFS_INVALID_BLK)
+		goto allocate_root;
+
+	/* Allocate and read inode lookup table */
+	msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
+		lookup_table_start, msblk->inodes);
+	if (IS_ERR(msblk->inode_lookup_table)) {
+		err = PTR_ERR(msblk->inode_lookup_table);
+		msblk->inode_lookup_table = NULL;
+		goto failed_mount;
+	}
+
+	sb->s_export_op = &squashfs_export_ops;
+
+allocate_root:
+	root = new_inode(sb);
+	if (!root) {
+		err = -ENOMEM;
+		goto failed_mount;
+	}
+
+	err = squashfs_read_inode(root, root_inode);
+	if (err) {
+		iget_failed(root);
+		goto failed_mount;
+	}
+	insert_inode_hash(root);
+
+	sb->s_root = d_alloc_root(root);
+	if (sb->s_root == NULL) {
+		ERROR("Root inode create failed\n");
+		err = -ENOMEM;
+		iput(root);
+		goto failed_mount;
+	}
+
+	TRACE("Leaving squashfs_fill_super\n");
+	kfree(sblk);
+	return 0;
+
+failed_mount:
+	squashfs_cache_delete(msblk->block_cache);
+	squashfs_cache_delete(msblk->fragment_cache);
+	squashfs_cache_delete(msblk->read_page);
+	kfree(msblk->inode_lookup_table);
+	kfree(msblk->fragment_index);
+	kfree(msblk->id_table);
+	kfree(msblk->stream.workspace);
+	kfree(sb->s_fs_info);
+	sb->s_fs_info = NULL;
+	kfree(sblk);
+	return err;
+
+failure:
+	kfree(msblk->stream.workspace);
+	kfree(sb->s_fs_info);
+	sb->s_fs_info = NULL;
+	return -ENOMEM;
+}
+
+
+static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+	struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info;
+
+	TRACE("Entered squashfs_statfs\n");
+
+	buf->f_type = SQUASHFS_MAGIC;
+	buf->f_bsize = msblk->block_size;
+	buf->f_blocks = ((msblk->bytes_used - 1) >> msblk->block_log) + 1;
+	buf->f_bfree = buf->f_bavail = 0;
+	buf->f_files = msblk->inodes;
+	buf->f_ffree = 0;
+	buf->f_namelen = SQUASHFS_NAME_LEN;
+
+	return 0;
+}
+
+
+static int squashfs_remount(struct super_block *sb, int *flags, char *data)
+{
+	*flags |= MS_RDONLY;
+	return 0;
+}
+
+
+static void squashfs_put_super(struct super_block *sb)
+{
+	if (sb->s_fs_info) {
+		struct squashfs_sb_info *sbi = sb->s_fs_info;
+		squashfs_cache_delete(sbi->block_cache);
+		squashfs_cache_delete(sbi->fragment_cache);
+		squashfs_cache_delete(sbi->read_page);
+		kfree(sbi->id_table);
+		kfree(sbi->fragment_index);
+		kfree(sbi->meta_index);
+		kfree(sbi->stream.workspace);
+		kfree(sb->s_fs_info);
+		sb->s_fs_info = NULL;
+	}
+}
+
+
+static int squashfs_get_sb(struct file_system_type *fs_type, int flags,
+				const char *dev_name, void *data,
+				struct vfsmount *mnt)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super,
+				mnt);
+}
+
+
+static struct kmem_cache *squashfs_inode_cachep;
+
+
+static void init_once(void *foo)
+{
+	struct squashfs_inode_info *ei = foo;
+
+	inode_init_once(&ei->vfs_inode);
+}
+
+
+static int __init init_inodecache(void)
+{
+	squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
+		sizeof(struct squashfs_inode_info), 0,
+		SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, init_once);
+
+	return squashfs_inode_cachep ? 0 : -ENOMEM;
+}
+
+
+static void destroy_inodecache(void)
+{
+	kmem_cache_destroy(squashfs_inode_cachep);
+}
+
+
+static int __init init_squashfs_fs(void)
+{
+	int err = init_inodecache();
+
+	if (err)
+		return err;
+
+	err = register_filesystem(&squashfs_fs_type);
+	if (err) {
+		destroy_inodecache();
+		return err;
+	}
+
+	printk(KERN_INFO "squashfs: version 4.0 (2009/01/03) "
+		"Phillip Lougher\n");
+
+	return 0;
+}
+
+
+static void __exit exit_squashfs_fs(void)
+{
+	unregister_filesystem(&squashfs_fs_type);
+	destroy_inodecache();
+}
+
+
+static struct inode *squashfs_alloc_inode(struct super_block *sb)
+{
+	struct squashfs_inode_info *ei =
+		kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL);
+
+	return ei ? &ei->vfs_inode : NULL;
+}
+
+
+static void squashfs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode));
+}
+
+
+static struct file_system_type squashfs_fs_type = {
+	.owner = THIS_MODULE,
+	.name = "squashfs",
+	.get_sb = squashfs_get_sb,
+	.kill_sb = kill_block_super,
+	.fs_flags = FS_REQUIRES_DEV
+};
+
+static struct super_operations squashfs_super_ops = {
+	.alloc_inode = squashfs_alloc_inode,
+	.destroy_inode = squashfs_destroy_inode,
+	.statfs = squashfs_statfs,
+	.put_super = squashfs_put_super,
+	.remount_fs = squashfs_remount
+};
+
+module_init(init_squashfs_fs);
+module_exit(exit_squashfs_fs);
+MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem");
+MODULE_AUTHOR("Phillip Lougher <phillip@lougher.demon.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
new file mode 100644
index 0000000..83d8788
--- /dev/null
+++ b/fs/squashfs/symlink.c
@@ -0,0 +1,118 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@lougher.demon.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * symlink.c
+ */
+
+/*
+ * This file implements code to handle symbolic links.
+ *
+ * The data contents of symbolic links are stored inside the symbolic
+ * link inode within the inode table.  This allows the normally small symbolic
+ * link to be compressed as part of the inode table, achieving much greater
+ * compression than if the symbolic link was compressed individually.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/zlib.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+static int squashfs_symlink_readpage(struct file *file, struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	struct super_block *sb = inode->i_sb;
+	struct squashfs_sb_info *msblk = sb->s_fs_info;
+	int index = page->index << PAGE_CACHE_SHIFT;
+	u64 block = squashfs_i(inode)->start;
+	int offset = squashfs_i(inode)->offset;
+	int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE);
+	int bytes, copied;
+	void *pageaddr;
+	struct squashfs_cache_entry *entry;
+
+	TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
+			"%llx, offset %x\n", page->index, block, offset);
+
+	/*
+	 * Skip index bytes into symlink metadata.
+	 */
+	if (index) {
+		bytes = squashfs_read_metadata(sb, NULL, &block, &offset,
+								index);
+		if (bytes < 0) {
+			ERROR("Unable to read symlink [%llx:%x]\n",
+				squashfs_i(inode)->start,
+				squashfs_i(inode)->offset);
+			goto error_out;
+		}
+	}
+
+	/*
+	 * Read length bytes from symlink metadata.  Squashfs_read_metadata
+	 * is not used here because it can sleep and we want to use
+	 * kmap_atomic to map the page.  Instead call the underlying
+	 * squashfs_cache_get routine.  As length bytes may overlap metadata
+	 * blocks, we may need to call squashfs_cache_get multiple times.
+	 */
+	for (bytes = 0; bytes < length; offset = 0, bytes += copied) {
+		entry = squashfs_cache_get(sb, msblk->block_cache, block, 0);
+		if (entry->error) {
+			ERROR("Unable to read symlink [%llx:%x]\n",
+				squashfs_i(inode)->start,
+				squashfs_i(inode)->offset);
+			squashfs_cache_put(entry);
+			goto error_out;
+		}
+
+		pageaddr = kmap_atomic(page, KM_USER0);
+		copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
+								length - bytes);
+		if (copied == length - bytes)
+			memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
+		else
+			block = entry->next_index;
+		kunmap_atomic(pageaddr, KM_USER0);
+		squashfs_cache_put(entry);
+	}
+
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	unlock_page(page);
+	return 0;
+
+error_out:
+	SetPageError(page);
+	unlock_page(page);
+	return 0;
+}
+
+
+const struct address_space_operations squashfs_symlink_aops = {
+	.readpage = squashfs_symlink_readpage
+};
diff --git a/fs/stat.c b/fs/stat.c
index 7e12a6f8..2db740a 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -152,7 +152,7 @@
 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
 }
 
-asmlinkage long sys_stat(char __user * filename, struct __old_kernel_stat __user * statbuf)
+SYSCALL_DEFINE2(stat, char __user *, filename, struct __old_kernel_stat __user *, statbuf)
 {
 	struct kstat stat;
 	int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
@@ -162,7 +162,8 @@
 
 	return error;
 }
-asmlinkage long sys_lstat(char __user * filename, struct __old_kernel_stat __user * statbuf)
+
+SYSCALL_DEFINE2(lstat, char __user *, filename, struct __old_kernel_stat __user *, statbuf)
 {
 	struct kstat stat;
 	int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
@@ -172,7 +173,8 @@
 
 	return error;
 }
-asmlinkage long sys_fstat(unsigned int fd, struct __old_kernel_stat __user * statbuf)
+
+SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
 {
 	struct kstat stat;
 	int error = vfs_fstat(fd, &stat);
@@ -235,7 +237,7 @@
 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
 }
 
-asmlinkage long sys_newstat(char __user *filename, struct stat __user *statbuf)
+SYSCALL_DEFINE2(newstat, char __user *, filename, struct stat __user *, statbuf)
 {
 	struct kstat stat;
 	int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
@@ -246,7 +248,7 @@
 	return error;
 }
 
-asmlinkage long sys_newlstat(char __user *filename, struct stat __user *statbuf)
+SYSCALL_DEFINE2(newlstat, char __user *, filename, struct stat __user *, statbuf)
 {
 	struct kstat stat;
 	int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
@@ -258,8 +260,8 @@
 }
 
 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
-asmlinkage long sys_newfstatat(int dfd, char __user *filename,
-				struct stat __user *statbuf, int flag)
+SYSCALL_DEFINE4(newfstatat, int, dfd, char __user *, filename,
+		struct stat __user *, statbuf, int, flag)
 {
 	struct kstat stat;
 	int error = -EINVAL;
@@ -280,7 +282,7 @@
 }
 #endif
 
-asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf)
+SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
 {
 	struct kstat stat;
 	int error = vfs_fstat(fd, &stat);
@@ -291,8 +293,8 @@
 	return error;
 }
 
-asmlinkage long sys_readlinkat(int dfd, const char __user *pathname,
-				char __user *buf, int bufsiz)
+SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
+		char __user *, buf, int, bufsiz)
 {
 	struct path path;
 	int error;
@@ -318,8 +320,8 @@
 	return error;
 }
 
-asmlinkage long sys_readlink(const char __user *path, char __user *buf,
-				int bufsiz)
+SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
+		int, bufsiz)
 {
 	return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
 }
@@ -365,7 +367,7 @@
 	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
 }
 
-asmlinkage long sys_stat64(char __user * filename, struct stat64 __user * statbuf)
+SYSCALL_DEFINE2(stat64, char __user *, filename, struct stat64 __user *, statbuf)
 {
 	struct kstat stat;
 	int error = vfs_stat(filename, &stat);
@@ -375,7 +377,8 @@
 
 	return error;
 }
-asmlinkage long sys_lstat64(char __user * filename, struct stat64 __user * statbuf)
+
+SYSCALL_DEFINE2(lstat64, char __user *, filename, struct stat64 __user *, statbuf)
 {
 	struct kstat stat;
 	int error = vfs_lstat(filename, &stat);
@@ -385,7 +388,8 @@
 
 	return error;
 }
-asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user * statbuf)
+
+SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
 {
 	struct kstat stat;
 	int error = vfs_fstat(fd, &stat);
@@ -396,8 +400,8 @@
 	return error;
 }
 
-asmlinkage long sys_fstatat64(int dfd, char __user *filename,
-			       struct stat64 __user *statbuf, int flag)
+SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename,
+		struct stat64 __user *, statbuf, int, flag)
 {
 	struct kstat stat;
 	int error = -EINVAL;
diff --git a/fs/super.c b/fs/super.c
index ddba069..645e540 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -38,6 +38,7 @@
 #include <linux/kobject.h>
 #include <linux/mutex.h>
 #include <linux/file.h>
+#include <linux/async.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -71,6 +72,7 @@
 		INIT_HLIST_HEAD(&s->s_anon);
 		INIT_LIST_HEAD(&s->s_inodes);
 		INIT_LIST_HEAD(&s->s_dentry_lru);
+		INIT_LIST_HEAD(&s->s_async_list);
 		init_rwsem(&s->s_umount);
 		mutex_init(&s->s_lock);
 		lockdep_set_class(&s->s_umount, &type->s_umount_key);
@@ -289,11 +291,18 @@
 {
 	const struct super_operations *sop = sb->s_op;
 
+
 	if (sb->s_root) {
 		shrink_dcache_for_umount(sb);
 		fsync_super(sb);
 		lock_super(sb);
 		sb->s_flags &= ~MS_ACTIVE;
+
+		/*
+		 * wait for asynchronous fs operations to finish before going further
+		 */
+		async_synchronize_full_special(&sb->s_async_list);
+
 		/* bad name - it should be evict_inodes() */
 		invalidate_inodes(sb);
 		lock_kernel();
@@ -461,6 +470,7 @@
 		sb->s_count++;
 		spin_unlock(&sb_lock);
 		down_read(&sb->s_umount);
+		async_synchronize_full_special(&sb->s_async_list);
 		if (sb->s_root && (wait || sb->s_dirt))
 			sb->s_op->sync_fs(sb, wait);
 		up_read(&sb->s_umount);
@@ -534,7 +544,7 @@
 	return NULL;
 }
 
-asmlinkage long sys_ustat(unsigned dev, struct ustat __user * ubuf)
+SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
 {
         struct super_block *s;
         struct ustat tmp;
@@ -800,6 +810,7 @@
 		}
 
 		s->s_flags |= MS_ACTIVE;
+		bdev->bd_super = s;
 	}
 
 	return simple_set_mnt(mnt, s);
@@ -819,6 +830,7 @@
 	struct block_device *bdev = sb->s_bdev;
 	fmode_t mode = sb->s_mode;
 
+	bdev->bd_super = 0;
 	generic_shutdown_super(sb);
 	sync_blockdev(bdev);
 	close_bdev_exclusive(bdev, mode);
diff --git a/fs/sync.c b/fs/sync.c
index ac02b56..a16d53e 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -36,7 +36,7 @@
 		laptop_sync_completion();
 }
 
-asmlinkage long sys_sync(void)
+SYSCALL_DEFINE0(sync)
 {
 	do_sync(1);
 	return 0;
@@ -144,12 +144,12 @@
 	return ret;
 }
 
-asmlinkage long sys_fsync(unsigned int fd)
+SYSCALL_DEFINE1(fsync, unsigned int, fd)
 {
 	return do_fsync(fd, 0);
 }
 
-asmlinkage long sys_fdatasync(unsigned int fd)
+SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
 {
 	return do_fsync(fd, 1);
 }
@@ -201,8 +201,8 @@
  * already-instantiated disk blocks, there are no guarantees here that the data
  * will be available after a crash.
  */
-asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
-					unsigned int flags)
+SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
+				unsigned int flags)
 {
 	int ret;
 	struct file *file;
@@ -262,14 +262,32 @@
 out:
 	return ret;
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes,
+				    long flags)
+{
+	return SYSC_sync_file_range((int) fd, offset, nbytes,
+				    (unsigned int) flags);
+}
+SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range);
+#endif
 
 /* It would be nice if people remember that not all the world's an i386
    when they introduce new system calls */
-asmlinkage long sys_sync_file_range2(int fd, unsigned int flags,
-				     loff_t offset, loff_t nbytes)
+SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags,
+				 loff_t offset, loff_t nbytes)
 {
 	return sys_sync_file_range(fd, offset, nbytes, flags);
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_sync_file_range2(long fd, long flags,
+				     loff_t offset, loff_t nbytes)
+{
+	return SYSC_sync_file_range2((int) fd, (unsigned int) flags,
+				     offset, nbytes);
+}
+SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2);
+#endif
 
 /*
  * `endbyte' is inclusive
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 0862f0e..6a123b8 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -177,7 +177,7 @@
 	return file;
 }
 
-asmlinkage long sys_timerfd_create(int clockid, int flags)
+SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
 {
 	int ufd;
 	struct timerfd_ctx *ctx;
@@ -208,9 +208,9 @@
 	return ufd;
 }
 
-asmlinkage long sys_timerfd_settime(int ufd, int flags,
-				    const struct itimerspec __user *utmr,
-				    struct itimerspec __user *otmr)
+SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
+		const struct itimerspec __user *, utmr,
+		struct itimerspec __user *, otmr)
 {
 	struct file *file;
 	struct timerfd_ctx *ctx;
@@ -265,7 +265,7 @@
 	return 0;
 }
 
-asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr)
+SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
 {
 	struct file *file;
 	struct timerfd_ctx *ctx;
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 91ceeda..e35b54d 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -40,7 +40,7 @@
 	depends on UBIFS_FS
 	default y
 	help
-	  Zlib copresses better then LZO but it is slower. Say 'Y' if unsure.
+	  Zlib compresses better than LZO but it is slower. Say 'Y' if unsure.
 
 # Debugging-related stuff
 config UBIFS_FS_DEBUG
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 0e5e54d..175f9c5 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -142,7 +142,7 @@
  *
  * This function is called when an operation cannot be budgeted because there
  * is supposedly no free space. But in most cases there is some free space:
- *   o budgeting is pessimistic, so it always budgets more then it is actually
+ *   o budgeting is pessimistic, so it always budgets more than it is actually
  *     needed, so shrinking the liability is one way to make free space - the
  *     cached data will take less space then it was budgeted for;
  *   o GC may turn some dark space into free space (budgeting treats dark space
@@ -606,7 +606,7 @@
  * @c: UBIFS file-system description object
  *
  * This function converts budget which was allocated for a new page of data to
- * the budget of changing an existing page of data. The latter is smaller then
+ * the budget of changing an existing page of data. The latter is smaller than
  * the former, so this function only does simple re-calculation and does not
  * involve any write-back.
  */
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 0bef650..9832f9a 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -45,7 +45,7 @@
 #define SMALL_NODE_WM  UBIFS_MAX_DENT_NODE_SZ
 
 /*
- * GC may need to move more then one LEB to make progress. The below constants
+ * GC may need to move more than one LEB to make progress. The below constants
  * define "soft" and "hard" limits on the number of LEBs the garbage collector
  * may move.
  */
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 10ae25b..9b7c54e 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -191,7 +191,7 @@
 	if (wbuf->lnum != -1 && avail >= len) {
 		/*
 		 * Someone else has switched the journal head and we have
-		 * enough space now. This happens when more then one process is
+		 * enough space now. This happens when more than one process is
 		 * trying to write to the same journal head at the same time.
 		 */
 		dbg_jnl("return LEB %d back, already have LEB %d:%d",
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index f248533..e7bab52 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -151,7 +151,7 @@
  * @contention: if any contention, this is set to %1
  *
  * This function walks the list of mounted UBIFS file-systems and frees clean
- * znodes which are older then @age, until at least @nr znodes are freed.
+ * znodes which are older than @age, until at least @nr znodes are freed.
  * Returns the number of freed znodes.
  */
 static int shrink_tnc_trees(int nr, int age, int *contention)
diff --git a/fs/utimes.c b/fs/utimes.c
index 6929e3e..e4c75db 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -24,7 +24,7 @@
  * must be owner or have write permission.
  * Else, update from *times, must be owner or super user.
  */
-asmlinkage long sys_utime(char __user *filename, struct utimbuf __user *times)
+SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times)
 {
 	struct timespec tv[2];
 
@@ -170,7 +170,8 @@
 	return error;
 }
 
-asmlinkage long sys_utimensat(int dfd, char __user *filename, struct timespec __user *utimes, int flags)
+SYSCALL_DEFINE4(utimensat, int, dfd, char __user *, filename,
+		struct timespec __user *, utimes, int, flags)
 {
 	struct timespec tstimes[2];
 
@@ -187,7 +188,8 @@
 	return do_utimes(dfd, filename, utimes ? tstimes : NULL, flags);
 }
 
-asmlinkage long sys_futimesat(int dfd, char __user *filename, struct timeval __user *utimes)
+SYSCALL_DEFINE3(futimesat, int, dfd, char __user *, filename,
+		struct timeval __user *, utimes)
 {
 	struct timeval times[2];
 	struct timespec tstimes[2];
@@ -214,7 +216,8 @@
 	return do_utimes(dfd, filename, utimes ? tstimes : NULL, 0);
 }
 
-asmlinkage long sys_utimes(char __user *filename, struct timeval __user *utimes)
+SYSCALL_DEFINE2(utimes, char __user *, filename,
+		struct timeval __user *, utimes)
 {
 	return sys_futimesat(AT_FDCWD, filename, utimes);
 }
diff --git a/fs/xattr.c b/fs/xattr.c
index 237804c..197c4fc 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -251,9 +251,9 @@
 	return error;
 }
 
-asmlinkage long
-sys_setxattr(const char __user *pathname, const char __user *name,
-	     const void __user *value, size_t size, int flags)
+SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
+		const char __user *, name, const void __user *, value,
+		size_t, size, int, flags)
 {
 	struct path path;
 	int error;
@@ -270,9 +270,9 @@
 	return error;
 }
 
-asmlinkage long
-sys_lsetxattr(const char __user *pathname, const char __user *name,
-	      const void __user *value, size_t size, int flags)
+SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
+		const char __user *, name, const void __user *, value,
+		size_t, size, int, flags)
 {
 	struct path path;
 	int error;
@@ -289,9 +289,8 @@
 	return error;
 }
 
-asmlinkage long
-sys_fsetxattr(int fd, const char __user *name, const void __user *value,
-	      size_t size, int flags)
+SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
+		const void __user *,value, size_t, size, int, flags)
 {
 	struct file *f;
 	struct dentry *dentry;
@@ -349,9 +348,8 @@
 	return error;
 }
 
-asmlinkage ssize_t
-sys_getxattr(const char __user *pathname, const char __user *name,
-	     void __user *value, size_t size)
+SYSCALL_DEFINE4(getxattr, const char __user *, pathname,
+		const char __user *, name, void __user *, value, size_t, size)
 {
 	struct path path;
 	ssize_t error;
@@ -364,9 +362,8 @@
 	return error;
 }
 
-asmlinkage ssize_t
-sys_lgetxattr(const char __user *pathname, const char __user *name, void __user *value,
-	      size_t size)
+SYSCALL_DEFINE4(lgetxattr, const char __user *, pathname,
+		const char __user *, name, void __user *, value, size_t, size)
 {
 	struct path path;
 	ssize_t error;
@@ -379,8 +376,8 @@
 	return error;
 }
 
-asmlinkage ssize_t
-sys_fgetxattr(int fd, const char __user *name, void __user *value, size_t size)
+SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
+		void __user *, value, size_t, size)
 {
 	struct file *f;
 	ssize_t error = -EBADF;
@@ -424,8 +421,8 @@
 	return error;
 }
 
-asmlinkage ssize_t
-sys_listxattr(const char __user *pathname, char __user *list, size_t size)
+SYSCALL_DEFINE3(listxattr, const char __user *, pathname, char __user *, list,
+		size_t, size)
 {
 	struct path path;
 	ssize_t error;
@@ -438,8 +435,8 @@
 	return error;
 }
 
-asmlinkage ssize_t
-sys_llistxattr(const char __user *pathname, char __user *list, size_t size)
+SYSCALL_DEFINE3(llistxattr, const char __user *, pathname, char __user *, list,
+		size_t, size)
 {
 	struct path path;
 	ssize_t error;
@@ -452,8 +449,7 @@
 	return error;
 }
 
-asmlinkage ssize_t
-sys_flistxattr(int fd, char __user *list, size_t size)
+SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
 {
 	struct file *f;
 	ssize_t error = -EBADF;
@@ -485,8 +481,8 @@
 	return vfs_removexattr(d, kname);
 }
 
-asmlinkage long
-sys_removexattr(const char __user *pathname, const char __user *name)
+SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
+		const char __user *, name)
 {
 	struct path path;
 	int error;
@@ -503,8 +499,8 @@
 	return error;
 }
 
-asmlinkage long
-sys_lremovexattr(const char __user *pathname, const char __user *name)
+SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
+		const char __user *, name)
 {
 	struct path path;
 	int error;
@@ -521,8 +517,7 @@
 	return error;
 }
 
-asmlinkage long
-sys_fremovexattr(int fd, const char __user *name)
+SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
 {
 	struct file *f;
 	struct dentry *dentry;
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 7b26f5f..1dd5288 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -21,8 +21,6 @@
 extern struct workqueue_struct *xfsdatad_workqueue;
 extern mempool_t *xfs_ioend_pool;
 
-typedef void (*xfs_ioend_func_t)(void *);
-
 /*
  * xfs_ioend struct manages large extent writes for XFS.
  * It can manage several multi-page bio's at once.
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index cb329ed..d71dc44 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -166,75 +166,6 @@
 }
 
 /*
- *	Mapping of multi-page buffers into contiguous virtual space
- */
-
-typedef struct a_list {
-	void		*vm_addr;
-	struct a_list	*next;
-} a_list_t;
-
-static a_list_t		*as_free_head;
-static int		as_list_len;
-static DEFINE_SPINLOCK(as_lock);
-
-/*
- *	Try to batch vunmaps because they are costly.
- */
-STATIC void
-free_address(
-	void		*addr)
-{
-	a_list_t	*aentry;
-
-#ifdef CONFIG_XEN
-	/*
-	 * Xen needs to be able to make sure it can get an exclusive
-	 * RO mapping of pages it wants to turn into a pagetable.  If
-	 * a newly allocated page is also still being vmap()ed by xfs,
-	 * it will cause pagetable construction to fail.  This is a
-	 * quick workaround to always eagerly unmap pages so that Xen
-	 * is happy.
-	 */
-	vunmap(addr);
-	return;
-#endif
-
-	aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
-	if (likely(aentry)) {
-		spin_lock(&as_lock);
-		aentry->next = as_free_head;
-		aentry->vm_addr = addr;
-		as_free_head = aentry;
-		as_list_len++;
-		spin_unlock(&as_lock);
-	} else {
-		vunmap(addr);
-	}
-}
-
-STATIC void
-purge_addresses(void)
-{
-	a_list_t	*aentry, *old;
-
-	if (as_free_head == NULL)
-		return;
-
-	spin_lock(&as_lock);
-	aentry = as_free_head;
-	as_free_head = NULL;
-	as_list_len = 0;
-	spin_unlock(&as_lock);
-
-	while ((old = aentry) != NULL) {
-		vunmap(aentry->vm_addr);
-		aentry = aentry->next;
-		kfree(old);
-	}
-}
-
-/*
  *	Internal xfs_buf_t object manipulation
  */
 
@@ -333,7 +264,7 @@
 		uint		i;
 
 		if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
-			free_address(bp->b_addr - bp->b_offset);
+                       vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
 
 		for (i = 0; i < bp->b_page_count; i++) {
 			struct page	*page = bp->b_pages[i];
@@ -455,10 +386,8 @@
 		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
 		bp->b_flags |= XBF_MAPPED;
 	} else if (flags & XBF_MAPPED) {
-		if (as_list_len > 64)
-			purge_addresses();
-		bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
-					VM_MAP, PAGE_KERNEL);
+               bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
+                                       -1, PAGE_KERNEL);
 		if (unlikely(bp->b_addr == NULL))
 			return -ENOMEM;
 		bp->b_addr += bp->b_offset;
@@ -1743,8 +1672,6 @@
 			count++;
 		}
 
-		if (as_list_len > 0)
-			purge_addresses();
 		if (count)
 			blk_run_address_space(target->bt_mapping);
 
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 595751f..87b8cbd 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -126,11 +126,26 @@
 	if (ino == 0)
 		return ERR_PTR(-ESTALE);
 
-	error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
-	if (error)
+	/*
+	 * The XFS_IGET_BULKSTAT means that an invalid inode number is just
+	 * fine and not an indication of a corrupted filesystem.  Because
+	 * clients can send any kind of invalid file handle, e.g. after
+	 * a restore on the server we have to deal with this case gracefully.
+	 */
+	error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT,
+			 XFS_ILOCK_SHARED, &ip, 0);
+	if (error) {
+		/*
+		 * EINVAL means the inode cluster doesn't exist anymore.
+		 * This implies the filehandle is stale, so we should
+		 * translate it here.
+		 * We don't use ESTALE directly down the chain to not
+		 * confuse applications using bulkstat that expect EINVAL.
+		 */
+		if (error == EINVAL)
+			error = ESTALE;
 		return ERR_PTR(-error);
-	if (!ip)
-		return ERR_PTR(-EIO);
+	}
 
 	if (ip->i_d.di_gen != generation) {
 		xfs_iput_new(ip, XFS_ILOCK_SHARED);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 67205f6..e5be1e0 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -1546,21 +1546,6 @@
 		return -error;
 	}
 
-	case XFS_IOC_FREEZE:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		if (inode->i_sb->s_frozen == SB_UNFROZEN)
-			freeze_bdev(inode->i_sb->s_bdev);
-		return 0;
-
-	case XFS_IOC_THAW:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-		if (inode->i_sb->s_frozen != SB_UNFROZEN)
-			thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
-		return 0;
-
 	case XFS_IOC_GOINGDOWN: {
 		__uint32_t in;
 
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 0504cec..50903ad 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -632,8 +632,6 @@
 	case XFS_IOC_SET_RESBLKS:
 	case XFS_IOC_GET_RESBLKS:
 	case XFS_IOC_FSGROWFSLOG:
-	case XFS_IOC_FREEZE:
-	case XFS_IOC_THAW:
 	case XFS_IOC_GOINGDOWN:
 	case XFS_IOC_ERROR_INJECTION:
 	case XFS_IOC_ERROR_CLEARALL:
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 36f6cc7..95a9710 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1269,14 +1269,14 @@
  * need to take care of the metadata. Once that's done write a dummy
  * record to dirty the log in case of a crash while frozen.
  */
-STATIC void
-xfs_fs_lockfs(
+STATIC int
+xfs_fs_freeze(
 	struct super_block	*sb)
 {
 	struct xfs_mount	*mp = XFS_M(sb);
 
 	xfs_quiesce_attr(mp);
-	xfs_fs_log_dummy(mp);
+	return -xfs_fs_log_dummy(mp);
 }
 
 STATIC int
@@ -1348,7 +1348,7 @@
 {
 	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
 
-	/* Fail a mount where the logbuf is smaller then the log stripe */
+	/* Fail a mount where the logbuf is smaller than the log stripe */
 	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
 		if (mp->m_logbsize <= 0 &&
 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
@@ -1557,7 +1557,7 @@
 	.put_super		= xfs_fs_put_super,
 	.write_super		= xfs_fs_write_super,
 	.sync_fs		= xfs_fs_sync_super,
-	.write_super_lockfs	= xfs_fs_lockfs,
+	.freeze_fs		= xfs_fs_freeze,
 	.statfs			= xfs_fs_statfs,
 	.remount_fs		= xfs_fs_remount,
 	.show_options		= xfs_fs_show_options,
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index a4e293b..642f1db 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -22,7 +22,6 @@
  * Access Control Lists
  */
 typedef __uint16_t	xfs_acl_perm_t;
-typedef __int32_t	xfs_acl_type_t;
 typedef __int32_t	xfs_acl_tag_t;
 typedef __int32_t	xfs_acl_id_t;
 
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index f2e2181..d3b3cf7 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -231,7 +231,7 @@
 #define	XFS_FSB_TO_AGNO(mp,fsbno)	\
 	((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog))
 #define	XFS_FSB_TO_AGBNO(mp,fsbno)	\
-	((xfs_agblock_t)((fsbno) & XFS_MASK32LO((mp)->m_sb.sb_agblklog)))
+	((xfs_agblock_t)((fsbno) & xfs_mask32lo((mp)->m_sb.sb_agblklog)))
 #define	XFS_AGB_TO_DADDR(mp,agno,agbno)	\
 	((xfs_daddr_t)XFS_FSB_TO_BB(mp, \
 		(xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno)))
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 79da6b2..6c323f8 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -736,7 +736,7 @@
 			continue;		/* don't copy partial entries */
 		if (!(entry->flags & XFS_ATTR_LOCAL))
 			return(0);
-		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
+		name_loc = xfs_attr_leaf_name_local(leaf, i);
 		if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX)
 			return(0);
 		if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
@@ -823,7 +823,7 @@
 		if (!entry->nameidx)
 			continue;
 		ASSERT(entry->flags & XFS_ATTR_LOCAL);
-		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
+		name_loc = xfs_attr_leaf_name_local(leaf, i);
 		nargs.name = (char *)name_loc->nameval;
 		nargs.namelen = name_loc->namelen;
 		nargs.value = (char *)&name_loc->nameval[nargs.namelen];
@@ -1141,14 +1141,14 @@
 	 * as part of this transaction (a split operation for example).
 	 */
 	if (entry->flags & XFS_ATTR_LOCAL) {
-		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
+		name_loc = xfs_attr_leaf_name_local(leaf, args->index);
 		name_loc->namelen = args->namelen;
 		name_loc->valuelen = cpu_to_be16(args->valuelen);
 		memcpy((char *)name_loc->nameval, args->name, args->namelen);
 		memcpy((char *)&name_loc->nameval[args->namelen], args->value,
 				   be16_to_cpu(name_loc->valuelen));
 	} else {
-		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+		name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
 		name_rmt->namelen = args->namelen;
 		memcpy((char *)name_rmt->name, args->name, args->namelen);
 		entry->flags |= XFS_ATTR_INCOMPLETE;
@@ -1159,7 +1159,7 @@
 		args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
 	}
 	xfs_da_log_buf(args->trans, bp,
-	     XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index),
+	     XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index),
 				   xfs_attr_leaf_entsize(leaf, args->index)));
 
 	/*
@@ -1749,10 +1749,10 @@
 	/*
 	 * Compress the remaining entries and zero out the removed stuff.
 	 */
-	memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize);
+	memset(xfs_attr_leaf_name(leaf, args->index), 0, entsize);
 	be16_add_cpu(&hdr->usedbytes, -entsize);
 	xfs_da_log_buf(args->trans, bp,
-	     XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index),
+	     XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index),
 				   entsize));
 
 	tmp = (be16_to_cpu(hdr->count) - args->index)
@@ -1985,7 +1985,7 @@
 			continue;
 		}
 		if (entry->flags & XFS_ATTR_LOCAL) {
-			name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, probe);
+			name_loc = xfs_attr_leaf_name_local(leaf, probe);
 			if (name_loc->namelen != args->namelen)
 				continue;
 			if (memcmp(args->name, (char *)name_loc->nameval, args->namelen) != 0)
@@ -1995,7 +1995,7 @@
 			args->index = probe;
 			return(XFS_ERROR(EEXIST));
 		} else {
-			name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, probe);
+			name_rmt = xfs_attr_leaf_name_remote(leaf, probe);
 			if (name_rmt->namelen != args->namelen)
 				continue;
 			if (memcmp(args->name, (char *)name_rmt->name,
@@ -2035,7 +2035,7 @@
 
 	entry = &leaf->entries[args->index];
 	if (entry->flags & XFS_ATTR_LOCAL) {
-		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
+		name_loc = xfs_attr_leaf_name_local(leaf, args->index);
 		ASSERT(name_loc->namelen == args->namelen);
 		ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0);
 		valuelen = be16_to_cpu(name_loc->valuelen);
@@ -2050,7 +2050,7 @@
 		args->valuelen = valuelen;
 		memcpy(args->value, &name_loc->nameval[args->namelen], valuelen);
 	} else {
-		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+		name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
 		ASSERT(name_rmt->namelen == args->namelen);
 		ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
 		valuelen = be32_to_cpu(name_rmt->valuelen);
@@ -2143,7 +2143,7 @@
 		 * off for 6.2, should be revisited later.
 		 */
 		if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
-			memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
+			memset(xfs_attr_leaf_name(leaf_s, start_s + i), 0, tmp);
 			be16_add_cpu(&hdr_s->usedbytes, -tmp);
 			be16_add_cpu(&hdr_s->count, -1);
 			entry_d--;	/* to compensate for ++ in loop hdr */
@@ -2160,11 +2160,11 @@
 			entry_d->flags = entry_s->flags;
 			ASSERT(be16_to_cpu(entry_d->nameidx) + tmp
 							<= XFS_LBSIZE(mp));
-			memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti),
-				XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp);
+			memmove(xfs_attr_leaf_name(leaf_d, desti),
+				xfs_attr_leaf_name(leaf_s, start_s + i), tmp);
 			ASSERT(be16_to_cpu(entry_s->nameidx) + tmp
 							<= XFS_LBSIZE(mp));
-			memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
+			memset(xfs_attr_leaf_name(leaf_s, start_s + i), 0, tmp);
 			be16_add_cpu(&hdr_s->usedbytes, -tmp);
 			be16_add_cpu(&hdr_d->usedbytes, tmp);
 			be16_add_cpu(&hdr_s->count, -1);
@@ -2276,12 +2276,12 @@
 
 	ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
 	if (leaf->entries[index].flags & XFS_ATTR_LOCAL) {
-		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, index);
-		size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(name_loc->namelen,
+		name_loc = xfs_attr_leaf_name_local(leaf, index);
+		size = xfs_attr_leaf_entsize_local(name_loc->namelen,
 						   be16_to_cpu(name_loc->valuelen));
 	} else {
-		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, index);
-		size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(name_rmt->namelen);
+		name_rmt = xfs_attr_leaf_name_remote(leaf, index);
+		size = xfs_attr_leaf_entsize_remote(name_rmt->namelen);
 	}
 	return(size);
 }
@@ -2297,13 +2297,13 @@
 {
 	int size;
 
-	size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(namelen, valuelen);
-	if (size < XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(blocksize)) {
+	size = xfs_attr_leaf_entsize_local(namelen, valuelen);
+	if (size < xfs_attr_leaf_entsize_local_max(blocksize)) {
 		if (local) {
 			*local = 1;
 		}
 	} else {
-		size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(namelen);
+		size = xfs_attr_leaf_entsize_remote(namelen);
 		if (local) {
 			*local = 0;
 		}
@@ -2372,7 +2372,7 @@
 
 		if (entry->flags & XFS_ATTR_LOCAL) {
 			xfs_attr_leaf_name_local_t *name_loc =
-				XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
+				xfs_attr_leaf_name_local(leaf, i);
 
 			retval = context->put_listent(context,
 						entry->flags,
@@ -2384,7 +2384,7 @@
 				return retval;
 		} else {
 			xfs_attr_leaf_name_remote_t *name_rmt =
-				XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
+				xfs_attr_leaf_name_remote(leaf, i);
 
 			int valuelen = be32_to_cpu(name_rmt->valuelen);
 
@@ -2468,11 +2468,11 @@
 
 #ifdef DEBUG
 	if (entry->flags & XFS_ATTR_LOCAL) {
-		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index);
+		name_loc = xfs_attr_leaf_name_local(leaf, args->index);
 		namelen = name_loc->namelen;
 		name = (char *)name_loc->nameval;
 	} else {
-		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+		name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
 		namelen = name_rmt->namelen;
 		name = (char *)name_rmt->name;
 	}
@@ -2487,7 +2487,7 @@
 
 	if (args->rmtblkno) {
 		ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
-		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+		name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
 		name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
 		name_rmt->valuelen = cpu_to_be32(args->valuelen);
 		xfs_da_log_buf(args->trans, bp,
@@ -2534,7 +2534,7 @@
 	xfs_da_log_buf(args->trans, bp,
 			XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
 	if ((entry->flags & XFS_ATTR_LOCAL) == 0) {
-		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index);
+		name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
 		name_rmt->valueblk = 0;
 		name_rmt->valuelen = 0;
 		xfs_da_log_buf(args->trans, bp,
@@ -2607,20 +2607,20 @@
 
 #ifdef DEBUG
 	if (entry1->flags & XFS_ATTR_LOCAL) {
-		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf1, args->index);
+		name_loc = xfs_attr_leaf_name_local(leaf1, args->index);
 		namelen1 = name_loc->namelen;
 		name1 = (char *)name_loc->nameval;
 	} else {
-		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index);
+		name_rmt = xfs_attr_leaf_name_remote(leaf1, args->index);
 		namelen1 = name_rmt->namelen;
 		name1 = (char *)name_rmt->name;
 	}
 	if (entry2->flags & XFS_ATTR_LOCAL) {
-		name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf2, args->index2);
+		name_loc = xfs_attr_leaf_name_local(leaf2, args->index2);
 		namelen2 = name_loc->namelen;
 		name2 = (char *)name_loc->nameval;
 	} else {
-		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2);
+		name_rmt = xfs_attr_leaf_name_remote(leaf2, args->index2);
 		namelen2 = name_rmt->namelen;
 		name2 = (char *)name_rmt->name;
 	}
@@ -2637,7 +2637,7 @@
 			  XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1)));
 	if (args->rmtblkno) {
 		ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
-		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index);
+		name_rmt = xfs_attr_leaf_name_remote(leaf1, args->index);
 		name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
 		name_rmt->valuelen = cpu_to_be32(args->valuelen);
 		xfs_da_log_buf(args->trans, bp1,
@@ -2648,7 +2648,7 @@
 	xfs_da_log_buf(args->trans, bp2,
 			  XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2)));
 	if ((entry2->flags & XFS_ATTR_LOCAL) == 0) {
-		name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf2, args->index2);
+		name_rmt = xfs_attr_leaf_name_remote(leaf2, args->index2);
 		name_rmt->valueblk = 0;
 		name_rmt->valuelen = 0;
 		xfs_da_log_buf(args->trans, bp2,
@@ -2855,7 +2855,7 @@
 	for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
 		if (be16_to_cpu(entry->nameidx) &&
 		    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
-			name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
+			name_rmt = xfs_attr_leaf_name_remote(leaf, i);
 			if (name_rmt->valueblk)
 				count++;
 		}
@@ -2883,7 +2883,7 @@
 	for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
 		if (be16_to_cpu(entry->nameidx) &&
 		    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
-			name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
+			name_rmt = xfs_attr_leaf_name_remote(leaf, i);
 			if (name_rmt->valueblk) {
 				lp->valueblk = be32_to_cpu(name_rmt->valueblk);
 				lp->valuelen = XFS_B_TO_FSB(dp->i_mount,
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index 83e9af4..9c7d22f 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -151,8 +151,6 @@
 /*
  * Cast typed pointers for "local" and "remote" name/value structs.
  */
-#define XFS_ATTR_LEAF_NAME_REMOTE(leafp,idx)	\
-	xfs_attr_leaf_name_remote(leafp,idx)
 static inline xfs_attr_leaf_name_remote_t *
 xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
 {
@@ -160,8 +158,6 @@
 		&((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
 }
 
-#define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx)	\
-	xfs_attr_leaf_name_local(leafp,idx)
 static inline xfs_attr_leaf_name_local_t *
 xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
 {
@@ -169,8 +165,6 @@
 		&((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
 }
 
-#define XFS_ATTR_LEAF_NAME(leafp,idx)		\
-	xfs_attr_leaf_name(leafp,idx)
 static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
 {
 	return &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)];
@@ -181,24 +175,18 @@
  * a "local" name/value structure, a "remote" name/value structure, and
  * a pointer which might be either.
  */
-#define XFS_ATTR_LEAF_ENTSIZE_REMOTE(nlen)	\
-	xfs_attr_leaf_entsize_remote(nlen)
 static inline int xfs_attr_leaf_entsize_remote(int nlen)
 {
 	return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
 		XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
 }
 
-#define XFS_ATTR_LEAF_ENTSIZE_LOCAL(nlen,vlen)	\
-	xfs_attr_leaf_entsize_local(nlen,vlen)
 static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen)
 {
 	return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) +
 		XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
 }
 
-#define XFS_ATTR_LEAF_ENTSIZE_LOCAL_MAX(bsize)	\
-	xfs_attr_leaf_entsize_local_max(bsize)
 static inline int xfs_attr_leaf_entsize_local_max(int bsize)
 {
 	return (((bsize) >> 1) + ((bsize) >> 2));
diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/xfs_bit.h
index bca7b24..f1e3c90 100644
--- a/fs/xfs/xfs_bit.h
+++ b/fs/xfs/xfs_bit.h
@@ -23,24 +23,16 @@
  */
 
 /*
- * masks with n high/low bits set, 32-bit values & 64-bit values
+ * masks with n high/low bits set, 64-bit values
  */
-#define	XFS_MASK32HI(n)		xfs_mask32hi(n)
-static inline __uint32_t xfs_mask32hi(int n)
-{
-	return (__uint32_t)-1 << (32 - (n));
-}
-#define	XFS_MASK64HI(n)		xfs_mask64hi(n)
 static inline __uint64_t xfs_mask64hi(int n)
 {
 	return (__uint64_t)-1 << (64 - (n));
 }
-#define	XFS_MASK32LO(n)		xfs_mask32lo(n)
 static inline __uint32_t xfs_mask32lo(int n)
 {
 	return ((__uint32_t)1 << (n)) - 1;
 }
-#define	XFS_MASK64LO(n)		xfs_mask64lo(n)
 static inline __uint64_t xfs_mask64lo(int n)
 {
 	return ((__uint64_t)1 << (n)) - 1;
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 8f1ec73..ba6b08c 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -110,16 +110,16 @@
 
 	ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
 	s->br_startoff = ((xfs_fileoff_t)l0 &
-			   XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
+			   xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
 #if XFS_BIG_BLKNOS
-	s->br_startblock = (((xfs_fsblock_t)l0 & XFS_MASK64LO(9)) << 43) |
+	s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
 			   (((xfs_fsblock_t)l1) >> 21);
 #else
 #ifdef DEBUG
 	{
 		xfs_dfsbno_t	b;
 
-		b = (((xfs_dfsbno_t)l0 & XFS_MASK64LO(9)) << 43) |
+		b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) |
 		    (((xfs_dfsbno_t)l1) >> 21);
 		ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
 		s->br_startblock = (xfs_fsblock_t)b;
@@ -128,7 +128,7 @@
 	s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
 #endif	/* DEBUG */
 #endif	/* XFS_BIG_BLKNOS */
-	s->br_blockcount = (xfs_filblks_t)(l1 & XFS_MASK64LO(21));
+	s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
 	/* This is xfs_extent_state() in-line */
 	if (ext_flag) {
 		ASSERT(s->br_blockcount != 0);	/* saved for DMIG */
@@ -153,7 +153,7 @@
 xfs_bmbt_get_blockcount(
 	xfs_bmbt_rec_host_t	*r)
 {
-	return (xfs_filblks_t)(r->l1 & XFS_MASK64LO(21));
+	return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
 }
 
 /*
@@ -164,13 +164,13 @@
 	xfs_bmbt_rec_host_t	*r)
 {
 #if XFS_BIG_BLKNOS
-	return (((xfs_fsblock_t)r->l0 & XFS_MASK64LO(9)) << 43) |
+	return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
 	       (((xfs_fsblock_t)r->l1) >> 21);
 #else
 #ifdef DEBUG
 	xfs_dfsbno_t	b;
 
-	b = (((xfs_dfsbno_t)r->l0 & XFS_MASK64LO(9)) << 43) |
+	b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) |
 	    (((xfs_dfsbno_t)r->l1) >> 21);
 	ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
 	return (xfs_fsblock_t)b;
@@ -188,7 +188,7 @@
 	xfs_bmbt_rec_host_t	*r)
 {
 	return ((xfs_fileoff_t)r->l0 &
-		 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
+		 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
 }
 
 xfs_exntst_t
@@ -219,7 +219,7 @@
 xfs_bmbt_disk_get_blockcount(
 	xfs_bmbt_rec_t	*r)
 {
-	return (xfs_filblks_t)(be64_to_cpu(r->l1) & XFS_MASK64LO(21));
+	return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
 }
 
 /*
@@ -230,7 +230,7 @@
 	xfs_bmbt_rec_t	*r)
 {
 	return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
-		 XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
+		 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
 }
 
 
@@ -248,33 +248,33 @@
 	int		extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
 
 	ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
-	ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
-	ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
+	ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
+	ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
 
 #if XFS_BIG_BLKNOS
-	ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
+	ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
 
 	r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
 		((xfs_bmbt_rec_base_t)startoff << 9) |
 		((xfs_bmbt_rec_base_t)startblock >> 43);
 	r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
 		((xfs_bmbt_rec_base_t)blockcount &
-		(xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+		(xfs_bmbt_rec_base_t)xfs_mask64lo(21));
 #else	/* !XFS_BIG_BLKNOS */
 	if (ISNULLSTARTBLOCK(startblock)) {
 		r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
 			((xfs_bmbt_rec_base_t)startoff << 9) |
-			 (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
-		r->l1 = XFS_MASK64HI(11) |
+			 (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
+		r->l1 = xfs_mask64hi(11) |
 			  ((xfs_bmbt_rec_base_t)startblock << 21) |
 			  ((xfs_bmbt_rec_base_t)blockcount &
-			   (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+			   (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
 	} else {
 		r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
 			((xfs_bmbt_rec_base_t)startoff << 9);
 		r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
 			 ((xfs_bmbt_rec_base_t)blockcount &
-			 (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+			 (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
 	}
 #endif	/* XFS_BIG_BLKNOS */
 }
@@ -306,11 +306,11 @@
 	int			extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
 
 	ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
-	ASSERT((startoff & XFS_MASK64HI(64-BMBT_STARTOFF_BITLEN)) == 0);
-	ASSERT((blockcount & XFS_MASK64HI(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
+	ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
+	ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
 
 #if XFS_BIG_BLKNOS
-	ASSERT((startblock & XFS_MASK64HI(64-BMBT_STARTBLOCK_BITLEN)) == 0);
+	ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
 
 	r->l0 = cpu_to_be64(
 		((xfs_bmbt_rec_base_t)extent_flag << 63) |
@@ -319,17 +319,17 @@
 	r->l1 = cpu_to_be64(
 		((xfs_bmbt_rec_base_t)startblock << 21) |
 		 ((xfs_bmbt_rec_base_t)blockcount &
-		  (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+		  (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
 #else	/* !XFS_BIG_BLKNOS */
 	if (ISNULLSTARTBLOCK(startblock)) {
 		r->l0 = cpu_to_be64(
 			((xfs_bmbt_rec_base_t)extent_flag << 63) |
 			 ((xfs_bmbt_rec_base_t)startoff << 9) |
-			  (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
-		r->l1 = cpu_to_be64(XFS_MASK64HI(11) |
+			  (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
+		r->l1 = cpu_to_be64(xfs_mask64hi(11) |
 			  ((xfs_bmbt_rec_base_t)startblock << 21) |
 			  ((xfs_bmbt_rec_base_t)blockcount &
-			   (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+			   (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
 	} else {
 		r->l0 = cpu_to_be64(
 			((xfs_bmbt_rec_base_t)extent_flag << 63) |
@@ -337,7 +337,7 @@
 		r->l1 = cpu_to_be64(
 			((xfs_bmbt_rec_base_t)startblock << 21) |
 			 ((xfs_bmbt_rec_base_t)blockcount &
-			  (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)));
+			  (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
 	}
 #endif	/* XFS_BIG_BLKNOS */
 }
@@ -362,9 +362,9 @@
 	xfs_bmbt_rec_host_t *r,
 	xfs_filblks_t	v)
 {
-	ASSERT((v & XFS_MASK64HI(43)) == 0);
-	r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(43)) |
-		  (xfs_bmbt_rec_base_t)(v & XFS_MASK64LO(21));
+	ASSERT((v & xfs_mask64hi(43)) == 0);
+	r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) |
+		  (xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21));
 }
 
 /*
@@ -376,21 +376,21 @@
 	xfs_fsblock_t	v)
 {
 #if XFS_BIG_BLKNOS
-	ASSERT((v & XFS_MASK64HI(12)) == 0);
-	r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64HI(55)) |
+	ASSERT((v & xfs_mask64hi(12)) == 0);
+	r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) |
 		  (xfs_bmbt_rec_base_t)(v >> 43);
-	r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21)) |
+	r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) |
 		  (xfs_bmbt_rec_base_t)(v << 21);
 #else	/* !XFS_BIG_BLKNOS */
 	if (ISNULLSTARTBLOCK(v)) {
-		r->l0 |= (xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
-		r->l1 = (xfs_bmbt_rec_base_t)XFS_MASK64HI(11) |
+		r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
+		r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) |
 			  ((xfs_bmbt_rec_base_t)v << 21) |
-			  (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+			  (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
 	} else {
-		r->l0 &= ~(xfs_bmbt_rec_base_t)XFS_MASK64LO(9);
+		r->l0 &= ~(xfs_bmbt_rec_base_t)xfs_mask64lo(9);
 		r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
-			  (r->l1 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(21));
+			  (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
 	}
 #endif	/* XFS_BIG_BLKNOS */
 }
@@ -403,10 +403,10 @@
 	xfs_bmbt_rec_host_t *r,
 	xfs_fileoff_t	v)
 {
-	ASSERT((v & XFS_MASK64HI(9)) == 0);
-	r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) XFS_MASK64HI(1)) |
+	ASSERT((v & xfs_mask64hi(9)) == 0);
+	r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) |
 		((xfs_bmbt_rec_base_t)v << 9) |
-		  (r->l0 & (xfs_bmbt_rec_base_t)XFS_MASK64LO(9));
+		  (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
 }
 
 /*
@@ -419,9 +419,9 @@
 {
 	ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
 	if (v == XFS_EXT_NORM)
-		r->l0 &= XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN);
+		r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN);
 	else
-		r->l0 |= XFS_MASK64HI(BMBT_EXNTFLAG_BITLEN);
+		r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN);
 }
 
 /*
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 7ed5926..2c3ef20 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -730,8 +730,8 @@
 	struct xfs_btree_block	*block)
 {
 	int			rval = 0;
-	xfs_fsblock_t		left = be64_to_cpu(block->bb_u.l.bb_leftsib);
-	xfs_fsblock_t		right = be64_to_cpu(block->bb_u.l.bb_rightsib);
+	xfs_dfsbno_t		left = be64_to_cpu(block->bb_u.l.bb_leftsib);
+	xfs_dfsbno_t		right = be64_to_cpu(block->bb_u.l.bb_rightsib);
 
 	if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) {
 		xfs_btree_reada_bufl(cur->bc_mp, left, 1);
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index e2fa0a1..e1f0a06 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -517,9 +517,9 @@
 		/*
 		 * If it didn't fit, set the final offset to here & return.
 		 */
-		if (filldir(dirent, dep->name, dep->namelen, cook,
+		if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff,
 			    ino, DT_UNKNOWN)) {
-			*offset = cook;
+			*offset = cook & 0x7fffffff;
 			xfs_da_brelse(NULL, bp);
 			return 0;
 		}
@@ -529,7 +529,8 @@
 	 * Reached the end of the block.
 	 * Set the offset to a non-existent block 1 and return.
 	 */
-	*offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0);
+	*offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
+			0x7fffffff;
 	xfs_da_brelse(NULL, bp);
 	return 0;
 }
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 9353599..ef805a3 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -1092,7 +1092,7 @@
 		 * Won't fit.  Return to caller.
 		 */
 		if (filldir(dirent, dep->name, dep->namelen,
-			    xfs_dir2_byte_to_dataptr(mp, curoff),
+			    xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff,
 			    ino, DT_UNKNOWN))
 			break;
 
@@ -1108,9 +1108,9 @@
 	 * All done.  Set output offset value to current offset.
 	 */
 	if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR))
-		*offset = XFS_DIR2_MAX_DATAPTR;
+		*offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
 	else
-		*offset = xfs_dir2_byte_to_dataptr(mp, curoff);
+		*offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
 	kmem_free(map);
 	if (bp)
 		xfs_da_brelse(NULL, bp);
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index b46af00..a8a8a6e 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -752,8 +752,8 @@
 #if XFS_BIG_INUMS
 		ino += mp->m_inoadd;
 #endif
-		if (filldir(dirent, ".", 1, dot_offset, ino, DT_DIR)) {
-			*offset = dot_offset;
+		if (filldir(dirent, ".", 1, dot_offset & 0x7fffffff, ino, DT_DIR)) {
+			*offset = dot_offset & 0x7fffffff;
 			return 0;
 		}
 	}
@@ -766,8 +766,8 @@
 #if XFS_BIG_INUMS
 		ino += mp->m_inoadd;
 #endif
-		if (filldir(dirent, "..", 2, dotdot_offset, ino, DT_DIR)) {
-			*offset = dotdot_offset;
+		if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) {
+			*offset = dotdot_offset & 0x7fffffff;
 			return 0;
 		}
 	}
@@ -791,14 +791,15 @@
 #endif
 
 		if (filldir(dirent, sfep->name, sfep->namelen,
-					    off, ino, DT_UNKNOWN)) {
-			*offset = off;
+			    off & 0x7fffffff, ino, DT_UNKNOWN)) {
+			*offset = off & 0x7fffffff;
 			return 0;
 		}
 		sfep = xfs_dir2_sf_nextentry(sfp, sfep);
 	}
 
-	*offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0);
+	*offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
+			0x7fffffff;
 	return 0;
 }
 
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 589c41c..f7c06fa 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -465,8 +465,8 @@
 #define XFS_IOC_ERROR_INJECTION	     _IOW ('X', 116, struct xfs_error_injection)
 #define XFS_IOC_ERROR_CLEARALL	     _IOW ('X', 117, struct xfs_error_injection)
 /*	XFS_IOC_ATTRCTL_BY_HANDLE -- deprecated 118	 */
-#define XFS_IOC_FREEZE		     _IOWR('X', 119, int)
-#define XFS_IOC_THAW		     _IOWR('X', 120, int)
+/*	XFS_IOC_FREEZE		  -- FIFREEZE   119	 */
+/*	XFS_IOC_THAW		  -- FITHAW     120	 */
 #define XFS_IOC_FSSETDM_BY_HANDLE    _IOW ('X', 121, struct xfs_fsop_setdm_handlereq)
 #define XFS_IOC_ATTRLIST_BY_HANDLE   _IOW ('X', 122, struct xfs_fsop_attrlist_handlereq)
 #define XFS_IOC_ATTRMULTI_BY_HANDLE  _IOW ('X', 123, struct xfs_fsop_attrmulti_handlereq)
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 852b6d3..680d0e0 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -595,17 +595,19 @@
 	return 0;
 }
 
-void
+int
 xfs_fs_log_dummy(
 	xfs_mount_t	*mp)
 {
 	xfs_trans_t	*tp;
 	xfs_inode_t	*ip;
+	int		error;
 
 	tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
-	if (xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0)) {
+	error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
+	if (error) {
 		xfs_trans_cancel(tp, 0);
-		return;
+		return error;
 	}
 
 	ip = mp->m_rootip;
@@ -615,9 +617,10 @@
 	xfs_trans_ihold(tp, ip);
 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 	xfs_trans_set_sync(tp);
-	xfs_trans_commit(tp, 0);
+	error = xfs_trans_commit(tp, 0);
 
 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	return error;
 }
 
 int
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index 300d0c9..88435e0 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -25,6 +25,6 @@
 extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
 				xfs_fsop_resblks_t *outval);
 extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
-extern void xfs_fs_log_dummy(xfs_mount_t *mp);
+extern int xfs_fs_log_dummy(xfs_mount_t *mp);
 
 #endif	/* __XFS_FSOPS_H__ */
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
index 0f51916..b2f7245 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/xfs_types.h
@@ -45,7 +45,7 @@
 typedef __uint32_t		inst_t;		/* an instruction */
 
 typedef __s64			xfs_off_t;	/* <file offset> type */
-typedef __u64			xfs_ino_t;	/* <inode> type */
+typedef unsigned long long	xfs_ino_t;	/* <inode> type */
 typedef __s64			xfs_daddr_t;	/* <disk address> type */
 typedef char *			xfs_caddr_t;	/* <core address> type */
 typedef __u32			xfs_dev_t;
@@ -111,8 +111,6 @@
 typedef __int64_t	xfs_sfiloff_t;	/* signed block number in a file */
 typedef __uint64_t	xfs_filblks_t;	/* number of blocks in a file */
 
-typedef __uint8_t	xfs_arch_t;	/* architecture of an xfs fs */
-
 /*
  * Null values for the types.
  */
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
deleted file mode 100644
index 29feee2..0000000
--- a/include/acpi/acconfig.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/******************************************************************************
- *
- * Name: acconfig.h - Global configuration constants
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef _ACCONFIG_H
-#define _ACCONFIG_H
-
-/******************************************************************************
- *
- * Configuration options
- *
- *****************************************************************************/
-
-/*
- * ACPI_DEBUG_OUTPUT    - This switch enables all the debug facilities of the
- *                        ACPI subsystem.  This includes the DEBUG_PRINT output
- *                        statements.  When disabled, all DEBUG_PRINT
- *                        statements are compiled out.
- *
- * ACPI_APPLICATION     - Use this switch if the subsystem is going to be run
- *                        at the application level.
- *
- */
-
-/* Current ACPICA subsystem version in YYYYMMDD format */
-
-#define ACPI_CA_VERSION                 0x20080926
-
-/*
- * OS name, used for the _OS object.  The _OS object is essentially obsolete,
- * but there is a large base of ASL/AML code in existing machines that check
- * for the string below.  The use of this string usually guarantees that
- * the ASL will execute down the most tested code path.  Also, there is some
- * code that will not execute the _OSI method unless _OS matches the string
- * below.  Therefore, change this string at your own risk.
- */
-#define ACPI_OS_NAME                    "Microsoft Windows NT"
-
-/* Maximum objects in the various object caches */
-
-#define ACPI_MAX_STATE_CACHE_DEPTH      96	/* State objects */
-#define ACPI_MAX_PARSE_CACHE_DEPTH      96	/* Parse tree objects */
-#define ACPI_MAX_EXTPARSE_CACHE_DEPTH   96	/* Parse tree objects */
-#define ACPI_MAX_OBJECT_CACHE_DEPTH     96	/* Interpreter operand objects */
-#define ACPI_MAX_NAMESPACE_CACHE_DEPTH  96	/* Namespace objects */
-
-/*
- * Should the subsystem abort the loading of an ACPI table if the
- * table checksum is incorrect?
- */
-#define ACPI_CHECKSUM_ABORT             FALSE
-
-/******************************************************************************
- *
- * Subsystem Constants
- *
- *****************************************************************************/
-
-/* Version of ACPI supported */
-
-#define ACPI_CA_SUPPORT_LEVEL           3
-
-/* Maximum count for a semaphore object */
-
-#define ACPI_MAX_SEMAPHORE_COUNT        256
-
-/* Maximum object reference count (detects object deletion issues) */
-
-#define ACPI_MAX_REFERENCE_COUNT        0x1000
-
-/* Size of cached memory mapping for system memory operation region */
-
-#define ACPI_SYSMEM_REGION_WINDOW_SIZE  4096
-
-/* owner_id tracking. 8 entries allows for 255 owner_ids */
-
-#define ACPI_NUM_OWNERID_MASKS          8
-
-/* Size of the root table array is increased by this increment */
-
-#define ACPI_ROOT_TABLE_SIZE_INCREMENT  4
-
-/******************************************************************************
- *
- * ACPI Specification constants (Do not change unless the specification changes)
- *
- *****************************************************************************/
-
-/* Number of distinct GPE register blocks and register width */
-
-#define ACPI_MAX_GPE_BLOCKS             2
-#define ACPI_GPE_REGISTER_WIDTH         8
-
-/* Method info (in WALK_STATE), containing local variables and argumetns */
-
-#define ACPI_METHOD_NUM_LOCALS          8
-#define ACPI_METHOD_MAX_LOCAL           7
-
-#define ACPI_METHOD_NUM_ARGS            7
-#define ACPI_METHOD_MAX_ARG             6
-
-/* Length of _HID, _UID, _CID, and UUID values */
-
-#define ACPI_DEVICE_ID_LENGTH           0x09
-#define ACPI_MAX_CID_LENGTH             48
-#define ACPI_UUID_LENGTH                16
-
-/*
- * Operand Stack (in WALK_STATE), Must be large enough to contain METHOD_MAX_ARG
- */
-#define ACPI_OBJ_NUM_OPERANDS           8
-#define ACPI_OBJ_MAX_OPERAND            7
-
-/* Number of elements in the Result Stack frame, can be an arbitrary value */
-
-#define ACPI_RESULTS_FRAME_OBJ_NUM      8
-
-/*
- * Maximal number of elements the Result Stack can contain,
- * it may be an arbitray value not exceeding the types of
- * result_size and result_count (now u8).
- */
-#define ACPI_RESULTS_OBJ_NUM_MAX        255
-
-/* Names within the namespace are 4 bytes long */
-
-#define ACPI_NAME_SIZE                  4
-#define ACPI_PATH_SEGMENT_LENGTH        5	/* 4 chars for name + 1 char for separator */
-#define ACPI_PATH_SEPARATOR             '.'
-
-/* Sizes for ACPI table headers */
-
-#define ACPI_OEM_ID_SIZE                6
-#define ACPI_OEM_TABLE_ID_SIZE          8
-
-/* Constants used in searching for the RSDP in low memory */
-
-#define ACPI_EBDA_PTR_LOCATION          0x0000040E	/* Physical Address */
-#define ACPI_EBDA_PTR_LENGTH            2
-#define ACPI_EBDA_WINDOW_SIZE           1024
-#define ACPI_HI_RSDP_WINDOW_BASE        0x000E0000	/* Physical Address */
-#define ACPI_HI_RSDP_WINDOW_SIZE        0x00020000
-#define ACPI_RSDP_SCAN_STEP             16
-
-/* Operation regions */
-
-#define ACPI_NUM_PREDEFINED_REGIONS     8
-#define ACPI_USER_REGION_BEGIN          0x80
-
-/* Maximum space_ids for Operation Regions */
-
-#define ACPI_MAX_ADDRESS_SPACE          255
-
-/* Array sizes.  Used for range checking also */
-
-#define ACPI_MAX_MATCH_OPCODE           5
-
-/* RSDP checksums */
-
-#define ACPI_RSDP_CHECKSUM_LENGTH       20
-#define ACPI_RSDP_XCHECKSUM_LENGTH      36
-
-/* SMBus bidirectional buffer size */
-
-#define ACPI_SMBUS_BUFFER_SIZE          34
-
-/******************************************************************************
- *
- * ACPI AML Debugger
- *
- *****************************************************************************/
-
-#define ACPI_DEBUGGER_MAX_ARGS          8	/* Must be max method args + 1 */
-
-#define ACPI_DEBUGGER_COMMAND_PROMPT    '-'
-#define ACPI_DEBUGGER_EXECUTE_PROMPT    '%'
-
-#endif				/* _ACCONFIG_H */
diff --git a/include/acpi/acdisasm.h b/include/acpi/acdisasm.h
deleted file mode 100644
index 0c1ed38..0000000
--- a/include/acpi/acdisasm.h
+++ /dev/null
@@ -1,445 +0,0 @@
-/******************************************************************************
- *
- * Name: acdisasm.h - AML disassembler
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACDISASM_H__
-#define __ACDISASM_H__
-
-#include "amlresrc.h"
-
-#define BLOCK_NONE              0
-#define BLOCK_PAREN             1
-#define BLOCK_BRACE             2
-#define BLOCK_COMMA_LIST        4
-#define ACPI_DEFAULT_RESNAME    *(u32 *) "__RD"
-
-struct acpi_external_list {
-	char *path;
-	char *internal_path;
-	struct acpi_external_list *next;
-	u32 value;
-	u16 length;
-	u8 type;
-};
-
-extern struct acpi_external_list *acpi_gbl_external_list;
-
-typedef const struct acpi_dmtable_info {
-	u8 opcode;
-	u8 offset;
-	char *name;
-
-} acpi_dmtable_info;
-
-/*
- * Values for Opcode above.
- * Note: 0-7 must not change, used as a flag shift value
- */
-#define ACPI_DMT_FLAG0                  0
-#define ACPI_DMT_FLAG1                  1
-#define ACPI_DMT_FLAG2                  2
-#define ACPI_DMT_FLAG3                  3
-#define ACPI_DMT_FLAG4                  4
-#define ACPI_DMT_FLAG5                  5
-#define ACPI_DMT_FLAG6                  6
-#define ACPI_DMT_FLAG7                  7
-#define ACPI_DMT_FLAGS0                 8
-#define ACPI_DMT_FLAGS2                 9
-#define ACPI_DMT_UINT8                  10
-#define ACPI_DMT_UINT16                 11
-#define ACPI_DMT_UINT24                 12
-#define ACPI_DMT_UINT32                 13
-#define ACPI_DMT_UINT56                 14
-#define ACPI_DMT_UINT64                 15
-#define ACPI_DMT_STRING                 16
-#define ACPI_DMT_NAME4                  17
-#define ACPI_DMT_NAME6                  18
-#define ACPI_DMT_NAME8                  19
-#define ACPI_DMT_CHKSUM                 20
-#define ACPI_DMT_SPACEID                21
-#define ACPI_DMT_GAS                    22
-#define ACPI_DMT_ASF                    23
-#define ACPI_DMT_DMAR                   24
-#define ACPI_DMT_HEST                   25
-#define ACPI_DMT_HESTNTFY               26
-#define ACPI_DMT_HESTNTYP               27
-#define ACPI_DMT_MADT                   28
-#define ACPI_DMT_SRAT                   29
-#define ACPI_DMT_EXIT                   30
-#define ACPI_DMT_SIG                    31
-
-typedef
-void (*acpi_dmtable_handler) (struct acpi_table_header * table);
-
-struct acpi_dmtable_data {
-	char *signature;
-	struct acpi_dmtable_info *table_info;
-	acpi_dmtable_handler table_handler;
-	char *name;
-};
-
-struct acpi_op_walk_info {
-	u32 level;
-	u32 last_level;
-	u32 count;
-	u32 bit_offset;
-	u32 flags;
-	struct acpi_walk_state *walk_state;
-};
-
-typedef
-acpi_status(*asl_walk_callback) (union acpi_parse_object * op,
-				 u32 level, void *context);
-
-struct acpi_resource_tag {
-	u32 bit_index;
-	char *tag;
-};
-
-/* Strings used for decoding flags to ASL keywords */
-
-extern const char *acpi_gbl_word_decode[];
-extern const char *acpi_gbl_irq_decode[];
-extern const char *acpi_gbl_lock_rule[];
-extern const char *acpi_gbl_access_types[];
-extern const char *acpi_gbl_update_rules[];
-extern const char *acpi_gbl_match_ops[];
-
-extern struct acpi_dmtable_info acpi_dm_table_info_asf0[];
-extern struct acpi_dmtable_info acpi_dm_table_info_asf1[];
-extern struct acpi_dmtable_info acpi_dm_table_info_asf1a[];
-extern struct acpi_dmtable_info acpi_dm_table_info_asf2[];
-extern struct acpi_dmtable_info acpi_dm_table_info_asf2a[];
-extern struct acpi_dmtable_info acpi_dm_table_info_asf3[];
-extern struct acpi_dmtable_info acpi_dm_table_info_asf4[];
-extern struct acpi_dmtable_info acpi_dm_table_info_asf_hdr[];
-extern struct acpi_dmtable_info acpi_dm_table_info_boot[];
-extern struct acpi_dmtable_info acpi_dm_table_info_bert[];
-extern struct acpi_dmtable_info acpi_dm_table_info_cpep[];
-extern struct acpi_dmtable_info acpi_dm_table_info_cpep0[];
-extern struct acpi_dmtable_info acpi_dm_table_info_dbgp[];
-extern struct acpi_dmtable_info acpi_dm_table_info_dmar[];
-extern struct acpi_dmtable_info acpi_dm_table_info_dmar_hdr[];
-extern struct acpi_dmtable_info acpi_dm_table_info_dmar_scope[];
-extern struct acpi_dmtable_info acpi_dm_table_info_dmar0[];
-extern struct acpi_dmtable_info acpi_dm_table_info_dmar1[];
-extern struct acpi_dmtable_info acpi_dm_table_info_dmar2[];
-extern struct acpi_dmtable_info acpi_dm_table_info_ecdt[];
-extern struct acpi_dmtable_info acpi_dm_table_info_einj[];
-extern struct acpi_dmtable_info acpi_dm_table_info_einj0[];
-extern struct acpi_dmtable_info acpi_dm_table_info_erst[];
-extern struct acpi_dmtable_info acpi_dm_table_info_facs[];
-extern struct acpi_dmtable_info acpi_dm_table_info_fadt1[];
-extern struct acpi_dmtable_info acpi_dm_table_info_fadt2[];
-extern struct acpi_dmtable_info acpi_dm_table_info_gas[];
-extern struct acpi_dmtable_info acpi_dm_table_info_header[];
-extern struct acpi_dmtable_info acpi_dm_table_info_hest[];
-extern struct acpi_dmtable_info acpi_dm_table_info_hest9[];
-extern struct acpi_dmtable_info acpi_dm_table_info_hest_notify[];
-extern struct acpi_dmtable_info acpi_dm_table_info_hpet[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt0[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt1[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt2[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt3[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt4[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt5[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt6[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt7[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt8[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt9[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt10[];
-extern struct acpi_dmtable_info acpi_dm_table_info_madt_hdr[];
-extern struct acpi_dmtable_info acpi_dm_table_info_mcfg[];
-extern struct acpi_dmtable_info acpi_dm_table_info_mcfg0[];
-extern struct acpi_dmtable_info acpi_dm_table_info_rsdp1[];
-extern struct acpi_dmtable_info acpi_dm_table_info_rsdp2[];
-extern struct acpi_dmtable_info acpi_dm_table_info_sbst[];
-extern struct acpi_dmtable_info acpi_dm_table_info_slic[];
-extern struct acpi_dmtable_info acpi_dm_table_info_slit[];
-extern struct acpi_dmtable_info acpi_dm_table_info_spcr[];
-extern struct acpi_dmtable_info acpi_dm_table_info_spmi[];
-extern struct acpi_dmtable_info acpi_dm_table_info_srat[];
-extern struct acpi_dmtable_info acpi_dm_table_info_srat_hdr[];
-extern struct acpi_dmtable_info acpi_dm_table_info_srat0[];
-extern struct acpi_dmtable_info acpi_dm_table_info_srat1[];
-extern struct acpi_dmtable_info acpi_dm_table_info_srat2[];
-extern struct acpi_dmtable_info acpi_dm_table_info_tcpa[];
-extern struct acpi_dmtable_info acpi_dm_table_info_wdrt[];
-
-/*
- * dmtable
- */
-void acpi_dm_dump_data_table(struct acpi_table_header *table);
-
-acpi_status
-acpi_dm_dump_table(u32 table_length,
-		   u32 table_offset,
-		   void *table,
-		   u32 sub_table_length, struct acpi_dmtable_info *info);
-
-void acpi_dm_line_header(u32 offset, u32 byte_length, char *name);
-
-void acpi_dm_line_header2(u32 offset, u32 byte_length, char *name, u32 value);
-
-/*
- * dmtbdump
- */
-void acpi_dm_dump_asf(struct acpi_table_header *table);
-
-void acpi_dm_dump_cpep(struct acpi_table_header *table);
-
-void acpi_dm_dump_dmar(struct acpi_table_header *table);
-
-void acpi_dm_dump_einj(struct acpi_table_header *table);
-
-void acpi_dm_dump_erst(struct acpi_table_header *table);
-
-void acpi_dm_dump_fadt(struct acpi_table_header *table);
-
-void acpi_dm_dump_hest(struct acpi_table_header *table);
-
-void acpi_dm_dump_mcfg(struct acpi_table_header *table);
-
-void acpi_dm_dump_madt(struct acpi_table_header *table);
-
-u32 acpi_dm_dump_rsdp(struct acpi_table_header *table);
-
-void acpi_dm_dump_rsdt(struct acpi_table_header *table);
-
-void acpi_dm_dump_slit(struct acpi_table_header *table);
-
-void acpi_dm_dump_srat(struct acpi_table_header *table);
-
-void acpi_dm_dump_xsdt(struct acpi_table_header *table);
-
-/*
- * dmwalk
- */
-void
-acpi_dm_disassemble(struct acpi_walk_state *walk_state,
-		    union acpi_parse_object *origin, u32 num_opcodes);
-
-void
-acpi_dm_walk_parse_tree(union acpi_parse_object *op,
-			asl_walk_callback descending_callback,
-			asl_walk_callback ascending_callback, void *context);
-
-/*
- * dmopcode
- */
-void
-acpi_dm_disassemble_one_op(struct acpi_walk_state *walk_state,
-			   struct acpi_op_walk_info *info,
-			   union acpi_parse_object *op);
-
-void acpi_dm_decode_internal_object(union acpi_operand_object *obj_desc);
-
-u32 acpi_dm_list_type(union acpi_parse_object *op);
-
-void acpi_dm_method_flags(union acpi_parse_object *op);
-
-void acpi_dm_field_flags(union acpi_parse_object *op);
-
-void acpi_dm_address_space(u8 space_id);
-
-void acpi_dm_region_flags(union acpi_parse_object *op);
-
-void acpi_dm_match_op(union acpi_parse_object *op);
-
-u8 acpi_dm_comma_if_list_member(union acpi_parse_object *op);
-
-void acpi_dm_comma_if_field_member(union acpi_parse_object *op);
-
-/*
- * dmnames
- */
-u32 acpi_dm_dump_name(char *name);
-
-acpi_status
-acpi_ps_display_object_pathname(struct acpi_walk_state *walk_state,
-				union acpi_parse_object *op);
-
-void acpi_dm_namestring(char *name);
-
-/*
- * dmobject
- */
-void
-acpi_dm_display_internal_object(union acpi_operand_object *obj_desc,
-				struct acpi_walk_state *walk_state);
-
-void acpi_dm_display_arguments(struct acpi_walk_state *walk_state);
-
-void acpi_dm_display_locals(struct acpi_walk_state *walk_state);
-
-void
-acpi_dm_dump_method_info(acpi_status status,
-			 struct acpi_walk_state *walk_state,
-			 union acpi_parse_object *op);
-
-/*
- * dmbuffer
- */
-void acpi_dm_disasm_byte_list(u32 level, u8 * byte_data, u32 byte_count);
-
-void
-acpi_dm_byte_list(struct acpi_op_walk_info *info, union acpi_parse_object *op);
-
-void acpi_dm_is_eisa_id(union acpi_parse_object *op);
-
-void acpi_dm_eisa_id(u32 encoded_id);
-
-u8 acpi_dm_is_unicode_buffer(union acpi_parse_object *op);
-
-u8 acpi_dm_is_string_buffer(union acpi_parse_object *op);
-
-/*
- * dmresrc
- */
-void acpi_dm_dump_integer8(u8 value, char *name);
-
-void acpi_dm_dump_integer16(u16 value, char *name);
-
-void acpi_dm_dump_integer32(u32 value, char *name);
-
-void acpi_dm_dump_integer64(u64 value, char *name);
-
-void
-acpi_dm_resource_template(struct acpi_op_walk_info *info,
-			  union acpi_parse_object *op,
-			  u8 * byte_data, u32 byte_count);
-
-acpi_status acpi_dm_is_resource_template(union acpi_parse_object *op);
-
-void acpi_dm_indent(u32 level);
-
-void acpi_dm_bit_list(u16 mask);
-
-void acpi_dm_decode_attribute(u8 attribute);
-
-void acpi_dm_descriptor_name(void);
-
-/*
- * dmresrcl
- */
-void
-acpi_dm_word_descriptor(union aml_resource *resource, u32 length, u32 level);
-
-void
-acpi_dm_dword_descriptor(union aml_resource *resource, u32 length, u32 level);
-
-void
-acpi_dm_extended_descriptor(union aml_resource *resource,
-			    u32 length, u32 level);
-
-void
-acpi_dm_qword_descriptor(union aml_resource *resource, u32 length, u32 level);
-
-void
-acpi_dm_memory24_descriptor(union aml_resource *resource,
-			    u32 length, u32 level);
-
-void
-acpi_dm_memory32_descriptor(union aml_resource *resource,
-			    u32 length, u32 level);
-
-void
-acpi_dm_fixed_memory32_descriptor(union aml_resource *resource,
-				  u32 length, u32 level);
-
-void
-acpi_dm_generic_register_descriptor(union aml_resource *resource,
-				    u32 length, u32 level);
-
-void
-acpi_dm_interrupt_descriptor(union aml_resource *resource,
-			     u32 length, u32 level);
-
-void
-acpi_dm_vendor_large_descriptor(union aml_resource *resource,
-				u32 length, u32 level);
-
-void acpi_dm_vendor_common(char *name, u8 * byte_data, u32 length, u32 level);
-
-/*
- * dmresrcs
- */
-void
-acpi_dm_irq_descriptor(union aml_resource *resource, u32 length, u32 level);
-
-void
-acpi_dm_dma_descriptor(union aml_resource *resource, u32 length, u32 level);
-
-void acpi_dm_io_descriptor(union aml_resource *resource, u32 length, u32 level);
-
-void
-acpi_dm_fixed_io_descriptor(union aml_resource *resource,
-			    u32 length, u32 level);
-
-void
-acpi_dm_start_dependent_descriptor(union aml_resource *resource,
-				   u32 length, u32 level);
-
-void
-acpi_dm_end_dependent_descriptor(union aml_resource *resource,
-				 u32 length, u32 level);
-
-void
-acpi_dm_vendor_small_descriptor(union aml_resource *resource,
-				u32 length, u32 level);
-
-/*
- * dmutils
- */
-void acpi_dm_add_to_external_list(char *path, u8 type, u32 value);
-
-/*
- * dmrestag
- */
-void acpi_dm_find_resources(union acpi_parse_object *root);
-
-void
-acpi_dm_check_resource_reference(union acpi_parse_object *op,
-				 struct acpi_walk_state *walk_state);
-
-#endif				/* __ACDISASM_H__ */
diff --git a/include/acpi/acevents.h b/include/acpi/acevents.h
deleted file mode 100644
index d5d099b..0000000
--- a/include/acpi/acevents.h
+++ /dev/null
@@ -1,216 +0,0 @@
-/******************************************************************************
- *
- * Name: acevents.h - Event subcomponent prototypes and defines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACEVENTS_H__
-#define __ACEVENTS_H__
-
-/*
- * evevent
- */
-acpi_status acpi_ev_initialize_events(void);
-
-acpi_status acpi_ev_install_xrupt_handlers(void);
-
-acpi_status acpi_ev_install_fadt_gpes(void);
-
-u32 acpi_ev_fixed_event_detect(void);
-
-/*
- * evmisc
- */
-u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
-
-acpi_status acpi_ev_acquire_global_lock(u16 timeout);
-
-acpi_status acpi_ev_release_global_lock(void);
-
-acpi_status acpi_ev_init_global_lock_handler(void);
-
-u32 acpi_ev_get_gpe_number_index(u32 gpe_number);
-
-acpi_status
-acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
-			     u32 notify_value);
-
-/*
- * evgpe - GPE handling and dispatch
- */
-acpi_status
-acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
-				u8 type);
-
-acpi_status
-acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
-		   u8 write_to_hardware);
-
-acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
-
-struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
-						       u32 gpe_number);
-
-/*
- * evgpeblk
- */
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
-
-acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback);
-
-acpi_status
-acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-			    struct acpi_gpe_block_info *gpe_block);
-
-acpi_status
-acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
-			 struct acpi_generic_address *gpe_block_address,
-			 u32 register_count,
-			 u8 gpe_block_base_number,
-			 u32 interrupt_number,
-			 struct acpi_gpe_block_info **return_gpe_block);
-
-acpi_status
-acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
-			     struct acpi_gpe_block_info *gpe_block);
-
-acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
-
-u32
-acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
-		     u32 gpe_number);
-
-u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
-
-acpi_status
-acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type);
-
-acpi_status
-acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info);
-
-acpi_status acpi_ev_gpe_initialize(void);
-
-/*
- * evregion - Address Space handling
- */
-acpi_status acpi_ev_install_region_handlers(void);
-
-acpi_status acpi_ev_initialize_op_regions(void);
-
-acpi_status
-acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
-			       u32 function,
-			       acpi_physical_address address,
-			       u32 bit_width, acpi_integer * value);
-
-acpi_status
-acpi_ev_attach_region(union acpi_operand_object *handler_obj,
-		      union acpi_operand_object *region_obj,
-		      u8 acpi_ns_is_locked);
-
-void
-acpi_ev_detach_region(union acpi_operand_object *region_obj,
-		      u8 acpi_ns_is_locked);
-
-acpi_status
-acpi_ev_install_space_handler(struct acpi_namespace_node *node,
-			      acpi_adr_space_type space_id,
-			      acpi_adr_space_handler handler,
-			      acpi_adr_space_setup setup, void *context);
-
-acpi_status
-acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
-			    acpi_adr_space_type space_id);
-
-acpi_status
-acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
-
-/*
- * evregini - Region initialization and setup
- */
-acpi_status
-acpi_ev_system_memory_region_setup(acpi_handle handle,
-				   u32 function,
-				   void *handler_context,
-				   void **region_context);
-
-acpi_status
-acpi_ev_io_space_region_setup(acpi_handle handle,
-			      u32 function,
-			      void *handler_context, void **region_context);
-
-acpi_status
-acpi_ev_pci_config_region_setup(acpi_handle handle,
-				u32 function,
-				void *handler_context, void **region_context);
-
-acpi_status
-acpi_ev_cmos_region_setup(acpi_handle handle,
-			  u32 function,
-			  void *handler_context, void **region_context);
-
-acpi_status
-acpi_ev_pci_bar_region_setup(acpi_handle handle,
-			     u32 function,
-			     void *handler_context, void **region_context);
-
-acpi_status
-acpi_ev_default_region_setup(acpi_handle handle,
-			     u32 function,
-			     void *handler_context, void **region_context);
-
-acpi_status
-acpi_ev_initialize_region(union acpi_operand_object *region_obj,
-			  u8 acpi_ns_locked);
-
-/*
- * evsci - SCI (System Control Interrupt) handling/dispatch
- */
-u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
-
-u32 acpi_ev_install_sci_handler(void);
-
-acpi_status acpi_ev_remove_sci_handler(void);
-
-u32 acpi_ev_initialize_sCI(u32 program_sCI);
-
-void acpi_ev_terminate(void);
-
-#endif				/* __ACEVENTS_H__  */
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 84f5cb2..eda0454 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -153,8 +153,9 @@
 #define AE_AML_CIRCULAR_REFERENCE       (acpi_status) (0x001E | AE_CODE_AML)
 #define AE_AML_BAD_RESOURCE_LENGTH      (acpi_status) (0x001F | AE_CODE_AML)
 #define AE_AML_ILLEGAL_ADDRESS          (acpi_status) (0x0020 | AE_CODE_AML)
+#define AE_AML_INFINITE_LOOP            (acpi_status) (0x0021 | AE_CODE_AML)
 
-#define AE_CODE_AML_MAX                 0x0020
+#define AE_CODE_AML_MAX                 0x0021
 
 /*
  * Internal exceptions used for control
@@ -175,6 +176,8 @@
 
 #define AE_CODE_CTRL_MAX                0x000D
 
+/* Exception strings for acpi_format_exception */
+
 #ifdef DEFINE_ACPI_GLOBALS
 
 /*
@@ -267,6 +270,7 @@
 	"AE_AML_CIRCULAR_REFERENCE",
 	"AE_AML_BAD_RESOURCE_LENGTH",
 	"AE_AML_ILLEGAL_ADDRESS",
+	"AE_AML_INFINITE_LOOP"
 };
 
 char const *acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acglobal.h b/include/acpi/acglobal.h
deleted file mode 100644
index 15dda46..0000000
--- a/include/acpi/acglobal.h
+++ /dev/null
@@ -1,387 +0,0 @@
-/******************************************************************************
- *
- * Name: acglobal.h - Declarations for global variables
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACGLOBAL_H__
-#define __ACGLOBAL_H__
-
-/*
- * Ensure that the globals are actually defined and initialized only once.
- *
- * The use of these macros allows a single list of globals (here) in order
- * to simplify maintenance of the code.
- */
-#ifdef DEFINE_ACPI_GLOBALS
-#define ACPI_EXTERN
-#define ACPI_INIT_GLOBAL(a,b) a=b
-#else
-#define ACPI_EXTERN extern
-#define ACPI_INIT_GLOBAL(a,b) a
-#endif
-
-/*****************************************************************************
- *
- * Runtime configuration (static defaults that can be overriden at runtime)
- *
- ****************************************************************************/
-
-/*
- * Enable "slack" in the AML interpreter?  Default is FALSE, and the
- * interpreter strictly follows the ACPI specification.  Setting to TRUE
- * allows the interpreter to ignore certain errors and/or bad AML constructs.
- *
- * Currently, these features are enabled by this flag:
- *
- * 1) Allow "implicit return" of last value in a control method
- * 2) Allow access beyond the end of an operation region
- * 3) Allow access to uninitialized locals/args (auto-init to integer 0)
- * 4) Allow ANY object type to be a source operand for the Store() operator
- * 5) Allow unresolved references (invalid target name) in package objects
- * 6) Enable warning messages for behavior that is not ACPI spec compliant
- */
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
-
-/*
- * Automatically serialize ALL control methods? Default is FALSE, meaning
- * to use the Serialized/not_serialized method flags on a per method basis.
- * Only change this if the ASL code is poorly written and cannot handle
- * reentrancy even though methods are marked "NotSerialized".
- */
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
-
-/*
- * Create the predefined _OSI method in the namespace? Default is TRUE
- * because ACPI CA is fully compatible with other ACPI implementations.
- * Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior.
- */
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
-
-/*
- * Disable wakeup GPEs during runtime? Default is TRUE because WAKE and
- * RUNTIME GPEs should never be shared, and WAKE GPEs should typically only
- * be enabled just before going to sleep.
- */
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
-
-/*****************************************************************************
- *
- * Debug support
- *
- ****************************************************************************/
-
-/* Runtime configuration of debug print levels */
-
-extern u32 acpi_dbg_level;
-extern u32 acpi_dbg_layer;
-
-/* Procedure nesting level for debug output */
-
-extern u32 acpi_gbl_nesting_level;
-
-/* Support for dynamic control method tracing mechanism */
-
-ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
-ACPI_EXTERN acpi_name acpi_gbl_trace_method_name;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
-ACPI_EXTERN u32 acpi_gbl_trace_flags;
-
-/*****************************************************************************
- *
- * ACPI Table globals
- *
- ****************************************************************************/
-
-/*
- * acpi_gbl_root_table_list is the master list of ACPI tables found in the
- * RSDT/XSDT.
- *
- * acpi_gbl_FADT is a local copy of the FADT, converted to a common format.
- */
-ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
-ACPI_EXTERN struct acpi_table_fadt acpi_gbl_FADT;
-extern u8 acpi_gbl_permanent_mmap;
-
-/* These addresses are calculated from FADT address values */
-
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
-
-/*
- * Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
- * determined by the revision of the DSDT: If the DSDT revision is less than
- * 2, use only the lower 32 bits of the internal 64-bit Integer.
- */
-ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
-ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
-ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
-
-/*****************************************************************************
- *
- * Mutual exlusion within ACPICA subsystem
- *
- ****************************************************************************/
-
-/*
- * Predefined mutex objects. This array contains the
- * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
- * (The table maps local handles to the real OS handles)
- */
-ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
-
-/*
- * Global lock mutex is an actual AML mutex object
- * Global lock semaphore works in conjunction with the HW global lock
- */
-ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
-ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
-ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
-ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
-ACPI_EXTERN u8 acpi_gbl_global_lock_present;
-
-/*
- * Spinlocks are used for interfaces that can be possibly called at
- * interrupt level
- */
-ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock;	/* For GPE data structs and registers */
-ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock;	/* For ACPI H/W except GPE registers */
-#define acpi_gbl_gpe_lock	&_acpi_gbl_gpe_lock
-#define acpi_gbl_hardware_lock	&_acpi_gbl_hardware_lock
-
-/*****************************************************************************
- *
- * Miscellaneous globals
- *
- ****************************************************************************/
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
-/* Lists for tracking memory allocations */
-
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
-ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
-#endif
-
-/* Object caches */
-
-ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_state_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_ext_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
-
-/* Global handlers */
-
-ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_device_notify;
-ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify;
-ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
-ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
-ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler;
-ACPI_EXTERN void *acpi_gbl_table_handler_context;
-ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
-
-/* Owner ID support */
-
-ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
-ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
-ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
-
-/* Misc */
-
-ACPI_EXTERN u32 acpi_gbl_original_mode;
-ACPI_EXTERN u32 acpi_gbl_rsdp_original_location;
-ACPI_EXTERN u32 acpi_gbl_ns_lookup_count;
-ACPI_EXTERN u32 acpi_gbl_ps_find_count;
-ACPI_EXTERN u16 acpi_gbl_pm1_enable_register_save;
-ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
-ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
-ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
-ACPI_EXTERN u8 acpi_gbl_events_initialized;
-ACPI_EXTERN u8 acpi_gbl_system_awake_and_running;
-
-#ifndef DEFINE_ACPI_GLOBALS
-
-/* Other miscellaneous */
-
-extern u8 acpi_gbl_shutdown;
-extern u32 acpi_gbl_startup_flags;
-extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
-extern const char *acpi_gbl_highest_dstate_names[4];
-extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
-extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
-
-#endif
-
-/* Exception codes */
-
-extern char const *acpi_gbl_exception_names_env[];
-extern char const *acpi_gbl_exception_names_pgm[];
-extern char const *acpi_gbl_exception_names_tbl[];
-extern char const *acpi_gbl_exception_names_aml[];
-extern char const *acpi_gbl_exception_names_ctrl[];
-
-/*****************************************************************************
- *
- * Namespace globals
- *
- ****************************************************************************/
-
-#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
-#define NUM_PREDEFINED_NAMES            10
-#else
-#define NUM_PREDEFINED_NAMES            9
-#endif
-
-ACPI_EXTERN struct acpi_namespace_node acpi_gbl_root_node_struct;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_root_node;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_fadt_gpe_device;
-
-extern const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES];
-extern const struct acpi_predefined_names
-    acpi_gbl_pre_defined_names[NUM_PREDEFINED_NAMES];
-
-#ifdef ACPI_DEBUG_OUTPUT
-ACPI_EXTERN u32 acpi_gbl_current_node_count;
-ACPI_EXTERN u32 acpi_gbl_current_node_size;
-ACPI_EXTERN u32 acpi_gbl_max_concurrent_node_count;
-ACPI_EXTERN acpi_size *acpi_gbl_entry_stack_pointer;
-ACPI_EXTERN acpi_size *acpi_gbl_lowest_stack_pointer;
-ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
-#endif
-
-/*****************************************************************************
- *
- * Interpreter globals
- *
- ****************************************************************************/
-
-ACPI_EXTERN struct acpi_thread_state *acpi_gbl_current_walk_list;
-
-/* Control method single step flag */
-
-ACPI_EXTERN u8 acpi_gbl_cm_single_step;
-
-/*****************************************************************************
- *
- * Hardware globals
- *
- ****************************************************************************/
-
-extern struct acpi_bit_register_info
-    acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
-ACPI_EXTERN u8 acpi_gbl_sleep_type_a;
-ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
-
-/*****************************************************************************
- *
- * Event and GPE globals
- *
- ****************************************************************************/
-
-extern struct acpi_fixed_event_info
-    acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
-ACPI_EXTERN struct acpi_fixed_event_handler
-    acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
-ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
-ACPI_EXTERN struct acpi_gpe_block_info
-*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-
-/*****************************************************************************
- *
- * Debugger globals
- *
- ****************************************************************************/
-
-ACPI_EXTERN u8 acpi_gbl_db_output_flags;
-
-#ifdef ACPI_DISASSEMBLER
-
-ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
-ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
-#endif
-
-#ifdef ACPI_DEBUGGER
-
-extern u8 acpi_gbl_method_executing;
-extern u8 acpi_gbl_abort_method;
-extern u8 acpi_gbl_db_terminate_threads;
-
-ACPI_EXTERN int optind;
-ACPI_EXTERN char *optarg;
-
-ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
-ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
-ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
-
-ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN char acpi_gbl_db_line_buf[80];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[80];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[40];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[40];
-ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
-ACPI_EXTERN char *acpi_gbl_db_buffer;
-ACPI_EXTERN char *acpi_gbl_db_filename;
-ACPI_EXTERN u32 acpi_gbl_db_debug_level;
-ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
-ACPI_EXTERN struct acpi_table_header *acpi_gbl_db_table_ptr;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
-
-/*
- * Statistic globals
- */
-ACPI_EXTERN u16 acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
-ACPI_EXTERN u16 acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
-ACPI_EXTERN u16 acpi_gbl_obj_type_count_misc;
-ACPI_EXTERN u16 acpi_gbl_node_type_count_misc;
-ACPI_EXTERN u32 acpi_gbl_num_nodes;
-ACPI_EXTERN u32 acpi_gbl_num_objects;
-
-ACPI_EXTERN u32 acpi_gbl_size_of_parse_tree;
-ACPI_EXTERN u32 acpi_gbl_size_of_method_trees;
-ACPI_EXTERN u32 acpi_gbl_size_of_node_entries;
-ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
-
-#endif				/* ACPI_DEBUGGER */
-
-#endif				/* __ACGLOBAL_H__ */
diff --git a/include/acpi/achware.h b/include/acpi/achware.h
deleted file mode 100644
index 97a72b1..0000000
--- a/include/acpi/achware.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/******************************************************************************
- *
- * Name: achware.h -- hardware specific interfaces
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACHWARE_H__
-#define __ACHWARE_H__
-
-/* PM Timer ticks per second (HZ) */
-
-#define PM_TIMER_FREQUENCY  3579545
-
-/* Values for the _SST reserved method */
-
-#define ACPI_SST_INDICATOR_OFF  0
-#define ACPI_SST_WORKING        1
-#define ACPI_SST_WAKING         2
-#define ACPI_SST_SLEEPING       3
-#define ACPI_SST_SLEEP_CONTEXT  4
-
-/* Prototypes */
-
-/*
- * hwacpi - high level functions
- */
-acpi_status acpi_hw_set_mode(u32 mode);
-
-u32 acpi_hw_get_mode(void);
-
-/*
- * hwregs - ACPI Register I/O
- */
-struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id);
-
-acpi_status
-acpi_hw_register_read(u32 register_id, u32 * return_value);
-
-acpi_status acpi_hw_register_write(u32 register_id, u32 value);
-
-acpi_status
-acpi_hw_low_level_read(u32 width,
-		       u32 * value, struct acpi_generic_address *reg);
-
-acpi_status
-acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address *reg);
-
-acpi_status acpi_hw_clear_acpi_status(void);
-
-/*
- * hwgpe - GPE support
- */
-acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
-
-acpi_status
-acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info);
-
-acpi_status
-acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-			  struct acpi_gpe_block_info *gpe_block);
-
-acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info);
-
-acpi_status
-acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-			struct acpi_gpe_block_info *gpe_block);
-
-acpi_status
-acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
-		       acpi_event_status * event_status);
-
-acpi_status acpi_hw_disable_all_gpes(void);
-
-acpi_status acpi_hw_enable_all_runtime_gpes(void);
-
-acpi_status acpi_hw_enable_all_wakeup_gpes(void);
-
-acpi_status
-acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-				 struct acpi_gpe_block_info *gpe_block);
-
-#ifdef	ACPI_FUTURE_USAGE
-/*
- * hwtimer - ACPI Timer prototypes
- */
-acpi_status acpi_get_timer_resolution(u32 * resolution);
-
-acpi_status acpi_get_timer(u32 * ticks);
-
-acpi_status
-acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed);
-#endif				/* ACPI_FUTURE_USAGE */
-
-#endif				/* __ACHWARE_H__ */
diff --git a/include/acpi/aclocal.h b/include/acpi/aclocal.h
deleted file mode 100644
index ecab527..0000000
--- a/include/acpi/aclocal.h
+++ /dev/null
@@ -1,1044 +0,0 @@
-/******************************************************************************
- *
- * Name: aclocal.h - Internal data types used across the ACPI subsystem
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACLOCAL_H__
-#define __ACLOCAL_H__
-
-/* acpisrc:struct_defs -- for acpisrc conversion */
-
-#define ACPI_WAIT_FOREVER               0xFFFF	/* u16, as per ACPI spec */
-#define ACPI_DO_NOT_WAIT                0
-#define ACPI_SERIALIZED                 0xFF
-
-typedef u32 acpi_mutex_handle;
-#define ACPI_GLOBAL_LOCK                (acpi_semaphore) (-1)
-
-/* Total number of aml opcodes defined */
-
-#define AML_NUM_OPCODES                 0x7F
-
-/* Forward declarations */
-
-struct acpi_walk_state;
-struct acpi_obj_mutex;
-union acpi_parse_object;
-
-/*****************************************************************************
- *
- * Mutex typedefs and structs
- *
- ****************************************************************************/
-
-/*
- * Predefined handles for the mutex objects used within the subsystem
- * All mutex objects are automatically created by acpi_ut_mutex_initialize.
- *
- * The acquire/release ordering protocol is implied via this list. Mutexes
- * with a lower value must be acquired before mutexes with a higher value.
- *
- * NOTE: any changes here must be reflected in the acpi_gbl_mutex_names
- * table below also!
- */
-#define ACPI_MTX_INTERPRETER            0	/* AML Interpreter, main lock */
-#define ACPI_MTX_NAMESPACE              1	/* ACPI Namespace */
-#define ACPI_MTX_TABLES                 2	/* Data for ACPI tables */
-#define ACPI_MTX_EVENTS                 3	/* Data for ACPI events */
-#define ACPI_MTX_CACHES                 4	/* Internal caches, general purposes */
-#define ACPI_MTX_MEMORY                 5	/* Debug memory tracking lists */
-#define ACPI_MTX_DEBUG_CMD_COMPLETE     6	/* AML debugger */
-#define ACPI_MTX_DEBUG_CMD_READY        7	/* AML debugger */
-
-#define ACPI_MAX_MUTEX                  7
-#define ACPI_NUM_MUTEX                  ACPI_MAX_MUTEX+1
-
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-#ifdef DEFINE_ACPI_GLOBALS
-
-/* Debug names for the mutexes above */
-
-static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
-	"ACPI_MTX_Interpreter",
-	"ACPI_MTX_Namespace",
-	"ACPI_MTX_Tables",
-	"ACPI_MTX_Events",
-	"ACPI_MTX_Caches",
-	"ACPI_MTX_Memory",
-	"ACPI_MTX_CommandComplete",
-	"ACPI_MTX_CommandReady"
-};
-
-#endif
-#endif
-
-/*
- * Predefined handles for spinlocks used within the subsystem.
- * These spinlocks are created by acpi_ut_mutex_initialize
- */
-#define ACPI_LOCK_GPES                  0
-#define ACPI_LOCK_HARDWARE              1
-
-#define ACPI_MAX_LOCK                   1
-#define ACPI_NUM_LOCK                   ACPI_MAX_LOCK+1
-
-/* Owner IDs are used to track namespace nodes for selective deletion */
-
-typedef u8 acpi_owner_id;
-#define ACPI_OWNER_ID_MAX               0xFF
-
-/* This Thread ID means that the mutex is not in use (unlocked) */
-
-#define ACPI_MUTEX_NOT_ACQUIRED         (acpi_thread_id) 0
-
-/* Table for the global mutexes */
-
-struct acpi_mutex_info {
-	acpi_mutex mutex;
-	u32 use_count;
-	acpi_thread_id thread_id;
-};
-
-/* Lock flag parameter for various interfaces */
-
-#define ACPI_MTX_DO_NOT_LOCK            0
-#define ACPI_MTX_LOCK                   1
-
-/* Field access granularities */
-
-#define ACPI_FIELD_BYTE_GRANULARITY     1
-#define ACPI_FIELD_WORD_GRANULARITY     2
-#define ACPI_FIELD_DWORD_GRANULARITY    4
-#define ACPI_FIELD_QWORD_GRANULARITY    8
-
-#define ACPI_ENTRY_NOT_FOUND            NULL
-
-/*****************************************************************************
- *
- * Namespace typedefs and structs
- *
- ****************************************************************************/
-
-/* Operational modes of the AML interpreter/scanner */
-
-typedef enum {
-	ACPI_IMODE_LOAD_PASS1 = 0x01,
-	ACPI_IMODE_LOAD_PASS2 = 0x02,
-	ACPI_IMODE_EXECUTE = 0x03
-} acpi_interpreter_mode;
-
-union acpi_name_union {
-	u32 integer;
-	char ascii[4];
-};
-
-/*
- * The Namespace Node describes a named object that appears in the AML.
- * descriptor_type is used to differentiate between internal descriptors.
- *
- * The node is optimized for both 32-bit and 64-bit platforms:
- * 20 bytes for the 32-bit case, 32 bytes for the 64-bit case.
- *
- * Note: The descriptor_type and Type fields must appear in the identical
- * position in both the struct acpi_namespace_node and union acpi_operand_object
- * structures.
- */
-struct acpi_namespace_node {
-	union acpi_operand_object *object;	/* Interpreter object */
-	u8 descriptor_type;	/* Differentiate object descriptor types */
-	u8 type;		/* ACPI Type associated with this name */
-	u8 flags;		/* Miscellaneous flags */
-	acpi_owner_id owner_id;	/* Node creator */
-	union acpi_name_union name;	/* ACPI Name, always 4 chars per ACPI spec */
-	struct acpi_namespace_node *child;	/* First child */
-	struct acpi_namespace_node *peer;	/* Peer. Parent if ANOBJ_END_OF_PEER_LIST set */
-
-	/*
-	 * The following fields are used by the ASL compiler and disassembler only
-	 */
-#ifdef ACPI_LARGE_NAMESPACE_NODE
-	union acpi_parse_object *op;
-	u32 value;
-	u32 length;
-#endif
-};
-
-/* Namespace Node flags */
-
-#define ANOBJ_END_OF_PEER_LIST          0x01	/* End-of-list, Peer field points to parent */
-#define ANOBJ_TEMPORARY                 0x02	/* Node is create by a method and is temporary */
-#define ANOBJ_METHOD_ARG                0x04	/* Node is a method argument */
-#define ANOBJ_METHOD_LOCAL              0x08	/* Node is a method local */
-#define ANOBJ_SUBTREE_HAS_INI           0x10	/* Used to optimize device initialization */
-#define ANOBJ_EVALUATED                 0x20	/* Set on first evaluation of node */
-
-#define ANOBJ_IS_EXTERNAL               0x08	/* i_aSL only: This object created via External() */
-#define ANOBJ_METHOD_NO_RETVAL          0x10	/* i_aSL only: Method has no return value */
-#define ANOBJ_METHOD_SOME_NO_RETVAL     0x20	/* i_aSL only: Method has at least one return value */
-#define ANOBJ_IS_BIT_OFFSET             0x40	/* i_aSL only: Reference is a bit offset */
-#define ANOBJ_IS_REFERENCED             0x80	/* i_aSL only: Object was referenced */
-
-/*
- * ACPI Table Descriptor.  One per ACPI table
- */
-struct acpi_table_desc {
-	acpi_physical_address address;
-	struct acpi_table_header *pointer;
-	u32 length;		/* Length fixed at 32 bits */
-	union acpi_name_union signature;
-	acpi_owner_id owner_id;
-	u8 flags;
-};
-
-/* Flags for above */
-
-#define ACPI_TABLE_ORIGIN_UNKNOWN       (0)
-#define ACPI_TABLE_ORIGIN_MAPPED        (1)
-#define ACPI_TABLE_ORIGIN_ALLOCATED     (2)
-#define ACPI_TABLE_ORIGIN_MASK          (3)
-#define ACPI_TABLE_IS_LOADED            (4)
-
-/* One internal RSDT for table management */
-
-struct acpi_internal_rsdt {
-	struct acpi_table_desc *tables;
-	u32 count;
-	u32 size;
-	u8 flags;
-};
-
-/* Flags for above */
-
-#define ACPI_ROOT_ORIGIN_UNKNOWN        (0)	/* ~ORIGIN_ALLOCATED */
-#define ACPI_ROOT_ORIGIN_ALLOCATED      (1)
-#define ACPI_ROOT_ALLOW_RESIZE          (2)
-
-/* Predefined (fixed) table indexes */
-
-#define ACPI_TABLE_INDEX_DSDT           (0)
-#define ACPI_TABLE_INDEX_FACS           (1)
-
-struct acpi_find_context {
-	char *search_for;
-	acpi_handle *list;
-	u32 *count;
-};
-
-struct acpi_ns_search_data {
-	struct acpi_namespace_node *node;
-};
-
-/*
- * Predefined Namespace items
- */
-struct acpi_predefined_names {
-	char *name;
-	u8 type;
-	char *val;
-};
-
-/* Object types used during package copies */
-
-#define ACPI_COPY_TYPE_SIMPLE           0
-#define ACPI_COPY_TYPE_PACKAGE          1
-
-/* Info structure used to convert external<->internal namestrings */
-
-struct acpi_namestring_info {
-	const char *external_name;
-	const char *next_external_char;
-	char *internal_name;
-	u32 length;
-	u32 num_segments;
-	u32 num_carats;
-	u8 fully_qualified;
-};
-
-/* Field creation info */
-
-struct acpi_create_field_info {
-	struct acpi_namespace_node *region_node;
-	struct acpi_namespace_node *field_node;
-	struct acpi_namespace_node *register_node;
-	struct acpi_namespace_node *data_register_node;
-	u32 bank_value;
-	u32 field_bit_position;
-	u32 field_bit_length;
-	u8 field_flags;
-	u8 attribute;
-	u8 field_type;
-};
-
-typedef
-acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
-
-/*
- * Bitmapped ACPI types.  Used internally only
- */
-#define ACPI_BTYPE_ANY                  0x00000000
-#define ACPI_BTYPE_INTEGER              0x00000001
-#define ACPI_BTYPE_STRING               0x00000002
-#define ACPI_BTYPE_BUFFER               0x00000004
-#define ACPI_BTYPE_PACKAGE              0x00000008
-#define ACPI_BTYPE_FIELD_UNIT           0x00000010
-#define ACPI_BTYPE_DEVICE               0x00000020
-#define ACPI_BTYPE_EVENT                0x00000040
-#define ACPI_BTYPE_METHOD               0x00000080
-#define ACPI_BTYPE_MUTEX                0x00000100
-#define ACPI_BTYPE_REGION               0x00000200
-#define ACPI_BTYPE_POWER                0x00000400
-#define ACPI_BTYPE_PROCESSOR            0x00000800
-#define ACPI_BTYPE_THERMAL              0x00001000
-#define ACPI_BTYPE_BUFFER_FIELD         0x00002000
-#define ACPI_BTYPE_DDB_HANDLE           0x00004000
-#define ACPI_BTYPE_DEBUG_OBJECT         0x00008000
-#define ACPI_BTYPE_REFERENCE            0x00010000
-#define ACPI_BTYPE_RESOURCE             0x00020000
-
-#define ACPI_BTYPE_COMPUTE_DATA         (ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING | ACPI_BTYPE_BUFFER)
-
-#define ACPI_BTYPE_DATA                 (ACPI_BTYPE_COMPUTE_DATA  | ACPI_BTYPE_PACKAGE)
-#define ACPI_BTYPE_DATA_REFERENCE       (ACPI_BTYPE_DATA | ACPI_BTYPE_REFERENCE | ACPI_BTYPE_DDB_HANDLE)
-#define ACPI_BTYPE_DEVICE_OBJECTS       (ACPI_BTYPE_DEVICE | ACPI_BTYPE_THERMAL | ACPI_BTYPE_PROCESSOR)
-#define ACPI_BTYPE_OBJECTS_AND_REFS     0x0001FFFF	/* ARG or LOCAL */
-#define ACPI_BTYPE_ALL_OBJECTS          0x0000FFFF
-
-/*
- * Information structure for ACPI predefined names.
- * Each entry in the table contains the following items:
- *
- * Name                 - The ACPI reserved name
- * param_count          - Number of arguments to the method
- * expected_return_btypes - Allowed type(s) for the return value
- */
-struct acpi_name_info {
-	char name[ACPI_NAME_SIZE];
-	u8 param_count;
-	u8 expected_btypes;
-};
-
-/*
- * Secondary information structures for ACPI predefined objects that return
- * package objects. This structure appears as the next entry in the table
- * after the NAME_INFO structure above.
- *
- * The reason for this is to minimize the size of the predefined name table.
- */
-
-/*
- * Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2,
- * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT
- */
-struct acpi_package_info {
-	u8 type;
-	u8 object_type1;
-	u8 count1;
-	u8 object_type2;
-	u8 count2;
-	u8 reserved;
-};
-
-/* Used for ACPI_PTYPE2_FIXED */
-
-struct acpi_package_info2 {
-	u8 type;
-	u8 count;
-	u8 object_type[4];
-};
-
-/* Used for ACPI_PTYPE1_OPTION */
-
-struct acpi_package_info3 {
-	u8 type;
-	u8 count;
-	u8 object_type[2];
-	u8 tail_object_type;
-	u8 reserved;
-};
-
-union acpi_predefined_info {
-	struct acpi_name_info info;
-	struct acpi_package_info ret_info;
-	struct acpi_package_info2 ret_info2;
-	struct acpi_package_info3 ret_info3;
-};
-
-/*
- * Bitmapped return value types
- * Note: the actual data types must be contiguous, a loop in nspredef.c
- * depends on this.
- */
-#define ACPI_RTYPE_ANY                  0x00
-#define ACPI_RTYPE_NONE                 0x01
-#define ACPI_RTYPE_INTEGER              0x02
-#define ACPI_RTYPE_STRING               0x04
-#define ACPI_RTYPE_BUFFER               0x08
-#define ACPI_RTYPE_PACKAGE              0x10
-#define ACPI_RTYPE_REFERENCE            0x20
-#define ACPI_RTYPE_ALL                  0x3F
-
-#define ACPI_NUM_RTYPES                 5	/* Number of actual object types */
-
-/*****************************************************************************
- *
- * Event typedefs and structs
- *
- ****************************************************************************/
-
-/* Dispatch info for each GPE -- either a method or handler, cannot be both */
-
-struct acpi_handler_info {
-	acpi_event_handler address;	/* Address of handler, if any */
-	void *context;		/* Context to be passed to handler */
-	struct acpi_namespace_node *method_node;	/* Method node for this GPE level (saved) */
-};
-
-union acpi_gpe_dispatch_info {
-	struct acpi_namespace_node *method_node;	/* Method node for this GPE level */
-	struct acpi_handler_info *handler;
-};
-
-/*
- * Information about a GPE, one per each GPE in an array.
- * NOTE: Important to keep this struct as small as possible.
- */
-struct acpi_gpe_event_info {
-	union acpi_gpe_dispatch_info dispatch;	/* Either Method or Handler */
-	struct acpi_gpe_register_info *register_info;	/* Backpointer to register info */
-	u8 flags;		/* Misc info about this GPE */
-	u8 gpe_number;		/* This GPE */
-};
-
-/* Information about a GPE register pair, one per each status/enable pair in an array */
-
-struct acpi_gpe_register_info {
-	struct acpi_generic_address status_address;	/* Address of status reg */
-	struct acpi_generic_address enable_address;	/* Address of enable reg */
-	u8 enable_for_wake;	/* GPEs to keep enabled when sleeping */
-	u8 enable_for_run;	/* GPEs to keep enabled when running */
-	u8 base_gpe_number;	/* Base GPE number for this register */
-};
-
-/*
- * Information about a GPE register block, one per each installed block --
- * GPE0, GPE1, and one per each installed GPE Block Device.
- */
-struct acpi_gpe_block_info {
-	struct acpi_namespace_node *node;
-	struct acpi_gpe_block_info *previous;
-	struct acpi_gpe_block_info *next;
-	struct acpi_gpe_xrupt_info *xrupt_block;	/* Backpointer to interrupt block */
-	struct acpi_gpe_register_info *register_info;	/* One per GPE register pair */
-	struct acpi_gpe_event_info *event_info;	/* One for each GPE */
-	struct acpi_generic_address block_address;	/* Base address of the block */
-	u32 register_count;	/* Number of register pairs in block */
-	u8 block_base_number;	/* Base GPE number for this block */
-};
-
-/* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
-
-struct acpi_gpe_xrupt_info {
-	struct acpi_gpe_xrupt_info *previous;
-	struct acpi_gpe_xrupt_info *next;
-	struct acpi_gpe_block_info *gpe_block_list_head;	/* List of GPE blocks for this xrupt */
-	u32 interrupt_number;	/* System interrupt number */
-};
-
-struct acpi_gpe_walk_info {
-	struct acpi_namespace_node *gpe_device;
-	struct acpi_gpe_block_info *gpe_block;
-};
-
-typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
-					 gpe_xrupt_info,
-					 struct acpi_gpe_block_info *
-					 gpe_block);
-
-/* Information about each particular fixed event */
-
-struct acpi_fixed_event_handler {
-	acpi_event_handler handler;	/* Address of handler. */
-	void *context;		/* Context to be passed to handler */
-};
-
-struct acpi_fixed_event_info {
-	u8 status_register_id;
-	u8 enable_register_id;
-	u16 status_bit_mask;
-	u16 enable_bit_mask;
-};
-
-/* Information used during field processing */
-
-struct acpi_field_info {
-	u8 skip_field;
-	u8 field_flag;
-	u32 pkg_length;
-};
-
-/*****************************************************************************
- *
- * Generic "state" object for stacks
- *
- ****************************************************************************/
-
-#define ACPI_CONTROL_NORMAL                  0xC0
-#define ACPI_CONTROL_CONDITIONAL_EXECUTING   0xC1
-#define ACPI_CONTROL_PREDICATE_EXECUTING     0xC2
-#define ACPI_CONTROL_PREDICATE_FALSE         0xC3
-#define ACPI_CONTROL_PREDICATE_TRUE          0xC4
-
-#define ACPI_STATE_COMMON \
-	void                            *next; \
-	u8                              descriptor_type; /* To differentiate various internal objs */\
-	u8                              flags; \
-	u16                             value; \
-	u16                             state;
-
-	/* There are 2 bytes available here until the next natural alignment boundary */
-
-struct acpi_common_state {
-ACPI_STATE_COMMON};
-
-/*
- * Update state - used to traverse complex objects such as packages
- */
-struct acpi_update_state {
-	ACPI_STATE_COMMON union acpi_operand_object *object;
-};
-
-/*
- * Pkg state - used to traverse nested package structures
- */
-struct acpi_pkg_state {
-	ACPI_STATE_COMMON u16 index;
-	union acpi_operand_object *source_object;
-	union acpi_operand_object *dest_object;
-	struct acpi_walk_state *walk_state;
-	void *this_target_obj;
-	u32 num_packages;
-};
-
-/*
- * Control state - one per if/else and while constructs.
- * Allows nesting of these constructs
- */
-struct acpi_control_state {
-	ACPI_STATE_COMMON u16 opcode;
-	union acpi_parse_object *predicate_op;
-	u8 *aml_predicate_start;	/* Start of if/while predicate */
-	u8 *package_end;	/* End of if/while block */
-};
-
-/*
- * Scope state - current scope during namespace lookups
- */
-struct acpi_scope_state {
-	ACPI_STATE_COMMON struct acpi_namespace_node *node;
-};
-
-struct acpi_pscope_state {
-	ACPI_STATE_COMMON u32 arg_count;	/* Number of fixed arguments */
-	union acpi_parse_object *op;	/* Current op being parsed */
-	u8 *arg_end;		/* Current argument end */
-	u8 *pkg_end;		/* Current package end */
-	u32 arg_list;		/* Next argument to parse */
-};
-
-/*
- * Thread state - one per thread across multiple walk states.  Multiple walk
- * states are created when there are nested control methods executing.
- */
-struct acpi_thread_state {
-	ACPI_STATE_COMMON u8 current_sync_level;	/* Mutex Sync (nested acquire) level */
-	struct acpi_walk_state *walk_state_list;	/* Head of list of walk_states for this thread */
-	union acpi_operand_object *acquired_mutex_list;	/* List of all currently acquired mutexes */
-	acpi_thread_id thread_id;	/* Running thread ID */
-};
-
-/*
- * Result values - used to accumulate the results of nested
- * AML arguments
- */
-struct acpi_result_values {
-	ACPI_STATE_COMMON
-	    union acpi_operand_object *obj_desc[ACPI_RESULTS_FRAME_OBJ_NUM];
-};
-
-typedef
-acpi_status(*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
-				    union acpi_parse_object ** out_op);
-
-typedef acpi_status(*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
-
-/*
- * Notify info - used to pass info to the deferred notify
- * handler/dispatcher.
- */
-struct acpi_notify_info {
-	ACPI_STATE_COMMON struct acpi_namespace_node *node;
-	union acpi_operand_object *handler_obj;
-};
-
-/* Generic state is union of structs above */
-
-union acpi_generic_state {
-	struct acpi_common_state common;
-	struct acpi_control_state control;
-	struct acpi_update_state update;
-	struct acpi_scope_state scope;
-	struct acpi_pscope_state parse_scope;
-	struct acpi_pkg_state pkg;
-	struct acpi_thread_state thread;
-	struct acpi_result_values results;
-	struct acpi_notify_info notify;
-};
-
-/*****************************************************************************
- *
- * Interpreter typedefs and structs
- *
- ****************************************************************************/
-
-typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
-
-/*****************************************************************************
- *
- * Parser typedefs and structs
- *
- ****************************************************************************/
-
-/*
- * AML opcode, name, and argument layout
- */
-struct acpi_opcode_info {
-#if defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUG_OUTPUT)
-	char *name;		/* Opcode name (disassembler/debug only) */
-#endif
-	u32 parse_args;		/* Grammar/Parse time arguments */
-	u32 runtime_args;	/* Interpret time arguments */
-	u16 flags;		/* Misc flags */
-	u8 object_type;		/* Corresponding internal object type */
-	u8 class;		/* Opcode class */
-	u8 type;		/* Opcode type */
-};
-
-union acpi_parse_value {
-	acpi_integer integer;	/* Integer constant (Up to 64 bits) */
-	struct uint64_struct integer64;	/* Structure overlay for 2 32-bit Dwords */
-	u32 size;		/* bytelist or field size */
-	char *string;		/* NULL terminated string */
-	u8 *buffer;		/* buffer or string */
-	char *name;		/* NULL terminated string */
-	union acpi_parse_object *arg;	/* arguments and contained ops */
-};
-
-#define ACPI_PARSE_COMMON \
-	union acpi_parse_object         *parent;        /* Parent op */\
-	u8                              descriptor_type; /* To differentiate various internal objs */\
-	u8                              flags;          /* Type of Op */\
-	u16                             aml_opcode;     /* AML opcode */\
-	u32                             aml_offset;     /* Offset of declaration in AML */\
-	union acpi_parse_object         *next;          /* Next op */\
-	struct acpi_namespace_node      *node;          /* For use by interpreter */\
-	union acpi_parse_value          value;          /* Value or args associated with the opcode */\
-	u8                              arg_list_length; /* Number of elements in the arg list */\
-	ACPI_DISASM_ONLY_MEMBERS (\
-	u8                              disasm_flags;   /* Used during AML disassembly */\
-	u8                              disasm_opcode;  /* Subtype used for disassembly */\
-	char                            aml_op_name[16])	/* Op name (debug only) */
-
-#define ACPI_DASM_BUFFER                0x00
-#define ACPI_DASM_RESOURCE              0x01
-#define ACPI_DASM_STRING                0x02
-#define ACPI_DASM_UNICODE               0x03
-#define ACPI_DASM_EISAID                0x04
-#define ACPI_DASM_MATCHOP               0x05
-#define ACPI_DASM_LNOT_PREFIX           0x06
-#define ACPI_DASM_LNOT_SUFFIX           0x07
-#define ACPI_DASM_IGNORE                0x08
-
-/*
- * Generic operation (for example:  If, While, Store)
- */
-struct acpi_parse_obj_common {
-ACPI_PARSE_COMMON};
-
-/*
- * Extended Op for named ops (Scope, Method, etc.), deferred ops (Methods and op_regions),
- * and bytelists.
- */
-struct acpi_parse_obj_named {
-	ACPI_PARSE_COMMON u8 *path;
-	u8 *data;		/* AML body or bytelist data */
-	u32 length;		/* AML length */
-	u32 name;		/* 4-byte name or zero if no name */
-};
-
-/* This version is used by the i_aSL compiler only */
-
-#define ACPI_MAX_PARSEOP_NAME   20
-
-struct acpi_parse_obj_asl {
-	ACPI_PARSE_COMMON union acpi_parse_object *child;
-	union acpi_parse_object *parent_method;
-	char *filename;
-	char *external_name;
-	char *namepath;
-	char name_seg[4];
-	u32 extra_value;
-	u32 column;
-	u32 line_number;
-	u32 logical_line_number;
-	u32 logical_byte_offset;
-	u32 end_line;
-	u32 end_logical_line;
-	u32 acpi_btype;
-	u32 aml_length;
-	u32 aml_subtree_length;
-	u32 final_aml_length;
-	u32 final_aml_offset;
-	u32 compile_flags;
-	u16 parse_opcode;
-	u8 aml_opcode_length;
-	u8 aml_pkg_len_bytes;
-	u8 extra;
-	char parse_op_name[ACPI_MAX_PARSEOP_NAME];
-};
-
-union acpi_parse_object {
-	struct acpi_parse_obj_common common;
-	struct acpi_parse_obj_named named;
-	struct acpi_parse_obj_asl asl;
-};
-
-/*
- * Parse state - one state per parser invocation and each control
- * method.
- */
-struct acpi_parse_state {
-	u8 *aml_start;		/* First AML byte */
-	u8 *aml;		/* Next AML byte */
-	u8 *aml_end;		/* (last + 1) AML byte */
-	u8 *pkg_start;		/* Current package begin */
-	u8 *pkg_end;		/* Current package end */
-	union acpi_parse_object *start_op;	/* Root of parse tree */
-	struct acpi_namespace_node *start_node;
-	union acpi_generic_state *scope;	/* Current scope */
-	union acpi_parse_object *start_scope;
-	u32 aml_size;
-};
-
-/* Parse object flags */
-
-#define ACPI_PARSEOP_GENERIC            0x01
-#define ACPI_PARSEOP_NAMED              0x02
-#define ACPI_PARSEOP_DEFERRED           0x04
-#define ACPI_PARSEOP_BYTELIST           0x08
-#define ACPI_PARSEOP_IN_STACK           0x10
-#define ACPI_PARSEOP_TARGET             0x20
-#define ACPI_PARSEOP_IN_CACHE           0x80
-
-/* Parse object disasm_flags */
-
-#define ACPI_PARSEOP_IGNORE             0x01
-#define ACPI_PARSEOP_PARAMLIST          0x02
-#define ACPI_PARSEOP_EMPTY_TERMLIST     0x04
-#define ACPI_PARSEOP_SPECIAL            0x10
-
-/*****************************************************************************
- *
- * Hardware (ACPI registers) and PNP
- *
- ****************************************************************************/
-
-#define PCI_ROOT_HID_STRING             "PNP0A03"
-#define PCI_EXPRESS_ROOT_HID_STRING     "PNP0A08"
-
-struct acpi_bit_register_info {
-	u8 parent_register;
-	u8 bit_position;
-	u16 access_bit_mask;
-};
-
-/*
- * Some ACPI registers have bits that must be ignored -- meaning that they
- * must be preserved.
- */
-#define ACPI_PM1_STATUS_PRESERVED_BITS          0x0800	/* Bit 11 */
-#define ACPI_PM1_CONTROL_PRESERVED_BITS         0x0200	/* Bit 9 (whatever) */
-
-/*
- * Register IDs
- * These are the full ACPI registers
- */
-#define ACPI_REGISTER_PM1_STATUS                0x01
-#define ACPI_REGISTER_PM1_ENABLE                0x02
-#define ACPI_REGISTER_PM1_CONTROL               0x03
-#define ACPI_REGISTER_PM1A_CONTROL              0x04
-#define ACPI_REGISTER_PM1B_CONTROL              0x05
-#define ACPI_REGISTER_PM2_CONTROL               0x06
-#define ACPI_REGISTER_PM_TIMER                  0x07
-#define ACPI_REGISTER_PROCESSOR_BLOCK           0x08
-#define ACPI_REGISTER_SMI_COMMAND_BLOCK         0x09
-
-/* Masks used to access the bit_registers */
-
-#define ACPI_BITMASK_TIMER_STATUS               0x0001
-#define ACPI_BITMASK_BUS_MASTER_STATUS          0x0010
-#define ACPI_BITMASK_GLOBAL_LOCK_STATUS         0x0020
-#define ACPI_BITMASK_POWER_BUTTON_STATUS        0x0100
-#define ACPI_BITMASK_SLEEP_BUTTON_STATUS        0x0200
-#define ACPI_BITMASK_RT_CLOCK_STATUS            0x0400
-#define ACPI_BITMASK_PCIEXP_WAKE_STATUS         0x4000	/* ACPI 3.0 */
-#define ACPI_BITMASK_WAKE_STATUS                0x8000
-
-#define ACPI_BITMASK_ALL_FIXED_STATUS           (\
-	ACPI_BITMASK_TIMER_STATUS          | \
-	ACPI_BITMASK_BUS_MASTER_STATUS     | \
-	ACPI_BITMASK_GLOBAL_LOCK_STATUS    | \
-	ACPI_BITMASK_POWER_BUTTON_STATUS   | \
-	ACPI_BITMASK_SLEEP_BUTTON_STATUS   | \
-	ACPI_BITMASK_RT_CLOCK_STATUS       | \
-	ACPI_BITMASK_WAKE_STATUS)
-
-#define ACPI_BITMASK_TIMER_ENABLE               0x0001
-#define ACPI_BITMASK_GLOBAL_LOCK_ENABLE         0x0020
-#define ACPI_BITMASK_POWER_BUTTON_ENABLE        0x0100
-#define ACPI_BITMASK_SLEEP_BUTTON_ENABLE        0x0200
-#define ACPI_BITMASK_RT_CLOCK_ENABLE            0x0400
-#define ACPI_BITMASK_PCIEXP_WAKE_DISABLE        0x4000	/* ACPI 3.0 */
-
-#define ACPI_BITMASK_SCI_ENABLE                 0x0001
-#define ACPI_BITMASK_BUS_MASTER_RLD             0x0002
-#define ACPI_BITMASK_GLOBAL_LOCK_RELEASE        0x0004
-#define ACPI_BITMASK_SLEEP_TYPE_X               0x1C00
-#define ACPI_BITMASK_SLEEP_ENABLE               0x2000
-
-#define ACPI_BITMASK_ARB_DISABLE                0x0001
-
-/* Raw bit position of each bit_register */
-
-#define ACPI_BITPOSITION_TIMER_STATUS           0x00
-#define ACPI_BITPOSITION_BUS_MASTER_STATUS      0x04
-#define ACPI_BITPOSITION_GLOBAL_LOCK_STATUS     0x05
-#define ACPI_BITPOSITION_POWER_BUTTON_STATUS    0x08
-#define ACPI_BITPOSITION_SLEEP_BUTTON_STATUS    0x09
-#define ACPI_BITPOSITION_RT_CLOCK_STATUS        0x0A
-#define ACPI_BITPOSITION_PCIEXP_WAKE_STATUS     0x0E	/* ACPI 3.0 */
-#define ACPI_BITPOSITION_WAKE_STATUS            0x0F
-
-#define ACPI_BITPOSITION_TIMER_ENABLE           0x00
-#define ACPI_BITPOSITION_GLOBAL_LOCK_ENABLE     0x05
-#define ACPI_BITPOSITION_POWER_BUTTON_ENABLE    0x08
-#define ACPI_BITPOSITION_SLEEP_BUTTON_ENABLE    0x09
-#define ACPI_BITPOSITION_RT_CLOCK_ENABLE        0x0A
-#define ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE    0x0E	/* ACPI 3.0 */
-
-#define ACPI_BITPOSITION_SCI_ENABLE             0x00
-#define ACPI_BITPOSITION_BUS_MASTER_RLD         0x01
-#define ACPI_BITPOSITION_GLOBAL_LOCK_RELEASE    0x02
-#define ACPI_BITPOSITION_SLEEP_TYPE_X           0x0A
-#define ACPI_BITPOSITION_SLEEP_ENABLE           0x0D
-
-#define ACPI_BITPOSITION_ARB_DISABLE            0x00
-
-/*****************************************************************************
- *
- * Resource descriptors
- *
- ****************************************************************************/
-
-/* resource_type values */
-
-#define ACPI_ADDRESS_TYPE_MEMORY_RANGE          0
-#define ACPI_ADDRESS_TYPE_IO_RANGE              1
-#define ACPI_ADDRESS_TYPE_BUS_NUMBER_RANGE      2
-
-/* Resource descriptor types and masks */
-
-#define ACPI_RESOURCE_NAME_LARGE                0x80
-#define ACPI_RESOURCE_NAME_SMALL                0x00
-
-#define ACPI_RESOURCE_NAME_SMALL_MASK           0x78	/* Bits 6:3 contain the type */
-#define ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK    0x07	/* Bits 2:0 contain the length */
-#define ACPI_RESOURCE_NAME_LARGE_MASK           0x7F	/* Bits 6:0 contain the type */
-
-/*
- * Small resource descriptor "names" as defined by the ACPI specification.
- * Note: Bits 2:0 are used for the descriptor length
- */
-#define ACPI_RESOURCE_NAME_IRQ                  0x20
-#define ACPI_RESOURCE_NAME_DMA                  0x28
-#define ACPI_RESOURCE_NAME_START_DEPENDENT      0x30
-#define ACPI_RESOURCE_NAME_END_DEPENDENT        0x38
-#define ACPI_RESOURCE_NAME_IO                   0x40
-#define ACPI_RESOURCE_NAME_FIXED_IO             0x48
-#define ACPI_RESOURCE_NAME_RESERVED_S1          0x50
-#define ACPI_RESOURCE_NAME_RESERVED_S2          0x58
-#define ACPI_RESOURCE_NAME_RESERVED_S3          0x60
-#define ACPI_RESOURCE_NAME_RESERVED_S4          0x68
-#define ACPI_RESOURCE_NAME_VENDOR_SMALL         0x70
-#define ACPI_RESOURCE_NAME_END_TAG              0x78
-
-/*
- * Large resource descriptor "names" as defined by the ACPI specification.
- * Note: includes the Large Descriptor bit in bit[7]
- */
-#define ACPI_RESOURCE_NAME_MEMORY24             0x81
-#define ACPI_RESOURCE_NAME_GENERIC_REGISTER     0x82
-#define ACPI_RESOURCE_NAME_RESERVED_L1          0x83
-#define ACPI_RESOURCE_NAME_VENDOR_LARGE         0x84
-#define ACPI_RESOURCE_NAME_MEMORY32             0x85
-#define ACPI_RESOURCE_NAME_FIXED_MEMORY32       0x86
-#define ACPI_RESOURCE_NAME_ADDRESS32            0x87
-#define ACPI_RESOURCE_NAME_ADDRESS16            0x88
-#define ACPI_RESOURCE_NAME_EXTENDED_IRQ         0x89
-#define ACPI_RESOURCE_NAME_ADDRESS64            0x8A
-#define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64   0x8B
-#define ACPI_RESOURCE_NAME_LARGE_MAX            0x8B
-
-/*****************************************************************************
- *
- * Miscellaneous
- *
- ****************************************************************************/
-
-#define ACPI_ASCII_ZERO                 0x30
-
-/*****************************************************************************
- *
- * Debugger
- *
- ****************************************************************************/
-
-struct acpi_db_method_info {
-	acpi_handle main_thread_gate;
-	acpi_handle thread_complete_gate;
-	u32 *threads;
-	u32 num_threads;
-	u32 num_created;
-	u32 num_completed;
-
-	char *name;
-	u32 flags;
-	u32 num_loops;
-	char pathname[128];
-	char **args;
-
-	/*
-	 * Arguments to be passed to method for the command
-	 * Threads -
-	 *   the Number of threads, ID of current thread and
-	 *   Index of current thread inside all them created.
-	 */
-	char init_args;
-	char *arguments[4];
-	char num_threads_str[11];
-	char id_of_thread_str[11];
-	char index_of_thread_str[11];
-};
-
-struct acpi_integrity_info {
-	u32 nodes;
-	u32 objects;
-};
-
-#define ACPI_DB_REDIRECTABLE_OUTPUT     0x01
-#define ACPI_DB_CONSOLE_OUTPUT          0x02
-#define ACPI_DB_DUPLICATE_OUTPUT        0x03
-
-/*****************************************************************************
- *
- * Debug
- *
- ****************************************************************************/
-
-/* Entry for a memory allocation (debug only) */
-
-#define ACPI_MEM_MALLOC                 0
-#define ACPI_MEM_CALLOC                 1
-#define ACPI_MAX_MODULE_NAME            16
-
-#define ACPI_COMMON_DEBUG_MEM_HEADER \
-	struct acpi_debug_mem_block     *previous; \
-	struct acpi_debug_mem_block     *next; \
-	u32                             size; \
-	u32                             component; \
-	u32                             line; \
-	char                            module[ACPI_MAX_MODULE_NAME]; \
-	u8                              alloc_type;
-
-struct acpi_debug_mem_header {
-ACPI_COMMON_DEBUG_MEM_HEADER};
-
-struct acpi_debug_mem_block {
-	ACPI_COMMON_DEBUG_MEM_HEADER u64 user_space;
-};
-
-#define ACPI_MEM_LIST_GLOBAL            0
-#define ACPI_MEM_LIST_NSNODE            1
-#define ACPI_MEM_LIST_MAX               1
-#define ACPI_NUM_MEM_LISTS              2
-
-struct acpi_memory_list {
-	char *list_name;
-	void *list_head;
-	u16 object_size;
-	u16 max_depth;
-	u16 current_depth;
-	u16 link_offset;
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
-	/* Statistics for debug memory tracking only */
-
-	u32 total_allocated;
-	u32 total_freed;
-	u32 max_occupied;
-	u32 total_size;
-	u32 current_total_size;
-	u32 requests;
-	u32 hits;
-#endif
-};
-
-#endif				/* __ACLOCAL_H__ */
diff --git a/include/acpi/acmacros.h b/include/acpi/acmacros.h
deleted file mode 100644
index a597207..0000000
--- a/include/acpi/acmacros.h
+++ /dev/null
@@ -1,693 +0,0 @@
-/******************************************************************************
- *
- * Name: acmacros.h - C macros for the entire subsystem.
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACMACROS_H__
-#define __ACMACROS_H__
-
-/*
- * Data manipulation macros
- */
-#define ACPI_LOWORD(l)                  ((u16)(u32)(l))
-#define ACPI_HIWORD(l)                  ((u16)((((u32)(l)) >> 16) & 0xFFFF))
-#define ACPI_LOBYTE(l)                  ((u8)(u16)(l))
-#define ACPI_HIBYTE(l)                  ((u8)((((u16)(l)) >> 8) & 0xFF))
-
-#define ACPI_SET_BIT(target,bit)        ((target) |= (bit))
-#define ACPI_CLEAR_BIT(target,bit)      ((target) &= ~(bit))
-#define ACPI_MIN(a,b)                   (((a)<(b))?(a):(b))
-#define ACPI_MAX(a,b)                   (((a)>(b))?(a):(b))
-
-/* Size calculation */
-
-#define ACPI_ARRAY_LENGTH(x)            (sizeof(x) / sizeof((x)[0]))
-
-/*
- * Extract data using a pointer. Any more than a byte and we
- * get into potential aligment issues -- see the STORE macros below.
- * Use with care.
- */
-#define ACPI_GET8(ptr)                  *ACPI_CAST_PTR (u8, ptr)
-#define ACPI_GET16(ptr)                 *ACPI_CAST_PTR (u16, ptr)
-#define ACPI_GET32(ptr)                 *ACPI_CAST_PTR (u32, ptr)
-#define ACPI_GET64(ptr)                 *ACPI_CAST_PTR (u64, ptr)
-#define ACPI_SET8(ptr)                  *ACPI_CAST_PTR (u8, ptr)
-#define ACPI_SET16(ptr)                 *ACPI_CAST_PTR (u16, ptr)
-#define ACPI_SET32(ptr)                 *ACPI_CAST_PTR (u32, ptr)
-#define ACPI_SET64(ptr)                 *ACPI_CAST_PTR (u64, ptr)
-
-/*
- * Pointer manipulation
- */
-#define ACPI_CAST_PTR(t, p)             ((t *) (acpi_uintptr_t) (p))
-#define ACPI_CAST_INDIRECT_PTR(t, p)    ((t **) (acpi_uintptr_t) (p))
-#define ACPI_ADD_PTR(t, a, b)		ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b)))
-#define ACPI_PTR_DIFF(a, b)		(acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))
-
-/* Pointer/Integer type conversions */
-
-#define ACPI_TO_POINTER(i)		ACPI_ADD_PTR (void, (void *) NULL, (acpi_size) i)
-#define ACPI_TO_INTEGER(p)              ACPI_PTR_DIFF (p, (void *) NULL)
-#define ACPI_OFFSET(d, f)               (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f), (void *) NULL)
-#define ACPI_PHYSADDR_TO_PTR(i)         ACPI_TO_POINTER(i)
-#define ACPI_PTR_TO_PHYSADDR(i)         ACPI_TO_INTEGER(i)
-
-#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
-#define ACPI_COMPARE_NAME(a, b)         (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b)))
-#else
-#define ACPI_COMPARE_NAME(a, b)         (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE))
-#endif
-
-/*
- * Full 64-bit integer must be available on both 32-bit and 64-bit platforms
- */
-struct acpi_integer_overlay {
-	u32 lo_dword;
-	u32 hi_dword;
-};
-
-#define ACPI_LODWORD(integer)           (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->lo_dword)
-#define ACPI_HIDWORD(integer)           (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->hi_dword)
-
-/*
- * printf() format helpers
- */
-
-/* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */
-
-#define ACPI_FORMAT_UINT64(i)           ACPI_HIDWORD(i), ACPI_LODWORD(i)
-
-#if ACPI_MACHINE_WIDTH == 64
-#define ACPI_FORMAT_NATIVE_UINT(i)      ACPI_FORMAT_UINT64(i)
-#else
-#define ACPI_FORMAT_NATIVE_UINT(i)      0, (i)
-#endif
-
-/*
- * Macros for moving data around to/from buffers that are possibly unaligned.
- * If the hardware supports the transfer of unaligned data, just do the store.
- * Otherwise, we have to move one byte at a time.
- */
-#ifdef ACPI_BIG_ENDIAN
-/*
- * Macros for big-endian machines
- */
-
-/* These macros reverse the bytes during the move, converting little-endian to big endian */
-
-			  /* Big Endian      <==        Little Endian */
-			  /*  Hi...Lo                     Lo...Hi     */
-/* 16-bit source, 16/32/64 destination */
-
-#define ACPI_MOVE_16_TO_16(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\
-					   ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];}
-
-#define ACPI_MOVE_16_TO_32(d, s)        {(*(u32 *)(void *)(d))=0;\
-							   ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
-							   ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
-
-#define ACPI_MOVE_16_TO_64(d, s)        {(*(u64 *)(void *)(d))=0;\
-									 ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
-									 ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
-
-/* 32-bit source, 16/32/64 destination */
-
-#define ACPI_MOVE_32_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
-
-#define ACPI_MOVE_32_TO_32(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\
-										 ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\
-										 ((  u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
-										 ((  u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
-
-#define ACPI_MOVE_32_TO_64(d, s)        {(*(u64 *)(void *)(d))=0;\
-										   ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\
-										   ((u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\
-										   ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
-										   ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
-
-/* 64-bit source, 16/32/64 destination */
-
-#define ACPI_MOVE_64_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
-
-#define ACPI_MOVE_64_TO_32(d, s)        ACPI_MOVE_32_TO_32(d, s)	/* Truncate to 32 */
-
-#define ACPI_MOVE_64_TO_64(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[7];\
-										 ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[6];\
-										 ((  u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[5];\
-										 ((  u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[4];\
-										 ((  u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\
-										 ((  u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[2];\
-										 ((  u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
-										 ((  u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
-#else
-/*
- * Macros for little-endian machines
- */
-
-#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
-
-/* The hardware supports unaligned transfers, just do the little-endian move */
-
-/* 16-bit source, 16/32/64 destination */
-
-#define ACPI_MOVE_16_TO_16(d, s)        *(u16 *)(void *)(d) = *(u16 *)(void *)(s)
-#define ACPI_MOVE_16_TO_32(d, s)        *(u32 *)(void *)(d) = *(u16 *)(void *)(s)
-#define ACPI_MOVE_16_TO_64(d, s)        *(u64 *)(void *)(d) = *(u16 *)(void *)(s)
-
-/* 32-bit source, 16/32/64 destination */
-
-#define ACPI_MOVE_32_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
-#define ACPI_MOVE_32_TO_32(d, s)        *(u32 *)(void *)(d) = *(u32 *)(void *)(s)
-#define ACPI_MOVE_32_TO_64(d, s)        *(u64 *)(void *)(d) = *(u32 *)(void *)(s)
-
-/* 64-bit source, 16/32/64 destination */
-
-#define ACPI_MOVE_64_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
-#define ACPI_MOVE_64_TO_32(d, s)        ACPI_MOVE_32_TO_32(d, s)	/* Truncate to 32 */
-#define ACPI_MOVE_64_TO_64(d, s)        *(u64 *)(void *)(d) = *(u64 *)(void *)(s)
-
-#else
-/*
- * The hardware does not support unaligned transfers. We must move the
- * data one byte at a time. These macros work whether the source or
- * the destination (or both) is/are unaligned.  (Little-endian move)
- */
-
-/* 16-bit source, 16/32/64 destination */
-
-#define ACPI_MOVE_16_TO_16(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\
-										 ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];}
-
-#define ACPI_MOVE_16_TO_32(d, s)        {(*(u32 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d, s);}
-#define ACPI_MOVE_16_TO_64(d, s)        {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_16_TO_16(d, s);}
-
-/* 32-bit source, 16/32/64 destination */
-
-#define ACPI_MOVE_32_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
-
-#define ACPI_MOVE_32_TO_32(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\
-										 ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\
-										 ((  u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\
-										 ((  u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];}
-
-#define ACPI_MOVE_32_TO_64(d, s)        {(*(u64 *)(void *)(d)) = 0; ACPI_MOVE_32_TO_32(d, s);}
-
-/* 64-bit source, 16/32/64 destination */
-
-#define ACPI_MOVE_64_TO_16(d, s)        ACPI_MOVE_16_TO_16(d, s)	/* Truncate to 16 */
-#define ACPI_MOVE_64_TO_32(d, s)        ACPI_MOVE_32_TO_32(d, s)	/* Truncate to 32 */
-#define ACPI_MOVE_64_TO_64(d, s)        {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[0];\
-										 ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[1];\
-										 ((  u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[2];\
-										 ((  u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[3];\
-										 ((  u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[4];\
-										 ((  u8 *)(void *)(d))[5] = ((u8 *)(void *)(s))[5];\
-										 ((  u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[6];\
-										 ((  u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[7];}
-#endif
-#endif
-
-/* Macros based on machine integer width */
-
-#if ACPI_MACHINE_WIDTH == 32
-#define ACPI_MOVE_SIZE_TO_16(d, s)       ACPI_MOVE_32_TO_16(d, s)
-
-#elif ACPI_MACHINE_WIDTH == 64
-#define ACPI_MOVE_SIZE_TO_16(d, s)       ACPI_MOVE_64_TO_16(d, s)
-
-#else
-#error unknown ACPI_MACHINE_WIDTH
-#endif
-
-/*
- * Fast power-of-two math macros for non-optimized compilers
- */
-#define _ACPI_DIV(value, power_of2)      ((u32) ((value) >> (power_of2)))
-#define _ACPI_MUL(value, power_of2)      ((u32) ((value) << (power_of2)))
-#define _ACPI_MOD(value, divisor)        ((u32) ((value) & ((divisor) -1)))
-
-#define ACPI_DIV_2(a)                   _ACPI_DIV(a, 1)
-#define ACPI_MUL_2(a)                   _ACPI_MUL(a, 1)
-#define ACPI_MOD_2(a)                   _ACPI_MOD(a, 2)
-
-#define ACPI_DIV_4(a)                   _ACPI_DIV(a, 2)
-#define ACPI_MUL_4(a)                   _ACPI_MUL(a, 2)
-#define ACPI_MOD_4(a)                   _ACPI_MOD(a, 4)
-
-#define ACPI_DIV_8(a)                   _ACPI_DIV(a, 3)
-#define ACPI_MUL_8(a)                   _ACPI_MUL(a, 3)
-#define ACPI_MOD_8(a)                   _ACPI_MOD(a, 8)
-
-#define ACPI_DIV_16(a)                  _ACPI_DIV(a, 4)
-#define ACPI_MUL_16(a)                  _ACPI_MUL(a, 4)
-#define ACPI_MOD_16(a)                  _ACPI_MOD(a, 16)
-
-#define ACPI_DIV_32(a)                  _ACPI_DIV(a, 5)
-#define ACPI_MUL_32(a)                  _ACPI_MUL(a, 5)
-#define ACPI_MOD_32(a)                  _ACPI_MOD(a, 32)
-
-/*
- * Rounding macros (Power of two boundaries only)
- */
-#define ACPI_ROUND_DOWN(value, boundary)     (((acpi_size)(value)) & \
-						(~(((acpi_size) boundary)-1)))
-
-#define ACPI_ROUND_UP(value, boundary)	     ((((acpi_size)(value)) + \
-						(((acpi_size) boundary)-1)) & \
-						(~(((acpi_size) boundary)-1)))
-
-/* Note: sizeof(acpi_size) evaluates to either 4 or 8 (32- vs 64-bit mode) */
-
-#define ACPI_ROUND_DOWN_TO_32BIT(a)         ACPI_ROUND_DOWN(a, 4)
-#define ACPI_ROUND_DOWN_TO_64BIT(a)         ACPI_ROUND_DOWN(a, 8)
-#define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a)   ACPI_ROUND_DOWN(a, sizeof(acpi_size))
-
-#define ACPI_ROUND_UP_TO_32BIT(a)           ACPI_ROUND_UP(a, 4)
-#define ACPI_ROUND_UP_TO_64BIT(a)           ACPI_ROUND_UP(a, 8)
-#define ACPI_ROUND_UP_TO_NATIVE_WORD(a)     ACPI_ROUND_UP(a, sizeof(acpi_size))
-
-#define ACPI_ROUND_BITS_UP_TO_BYTES(a)      ACPI_DIV_8((a) + 7)
-#define ACPI_ROUND_BITS_DOWN_TO_BYTES(a)    ACPI_DIV_8((a))
-
-#define ACPI_ROUND_UP_TO_1K(a)              (((a) + 1023) >> 10)
-
-/* Generic (non-power-of-two) rounding */
-
-#define ACPI_ROUND_UP_TO(value, boundary)   (((value) + ((boundary)-1)) / (boundary))
-
-#define ACPI_IS_MISALIGNED(value)	    (((acpi_size) value) & (sizeof(acpi_size)-1))
-
-/*
- * Bitmask creation
- * Bit positions start at zero.
- * MASK_BITS_ABOVE creates a mask starting AT the position and above
- * MASK_BITS_BELOW creates a mask starting one bit BELOW the position
- */
-#define ACPI_MASK_BITS_ABOVE(position)      (~((ACPI_INTEGER_MAX) << ((u32) (position))))
-#define ACPI_MASK_BITS_BELOW(position)      ((ACPI_INTEGER_MAX) << ((u32) (position)))
-
-/* Bitfields within ACPI registers */
-
-#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask)      ((val << pos) & mask)
-#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val)  reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask)
-
-#define ACPI_INSERT_BITS(target, mask, source)          target = ((target & (~(mask))) | (source & mask))
-
-/*
- * An struct acpi_namespace_node can appear in some contexts
- * where a pointer to an union acpi_operand_object can also
- * appear. This macro is used to distinguish them.
- *
- * The "Descriptor" field is the first field in both structures.
- */
-#define ACPI_GET_DESCRIPTOR_TYPE(d)     (((union acpi_descriptor *)(void *)(d))->common.descriptor_type)
-#define ACPI_SET_DESCRIPTOR_TYPE(d, t)  (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = t)
-
-/* Macro to test the object type */
-
-#define ACPI_GET_OBJECT_TYPE(d)         (((union acpi_operand_object *)(void *)(d))->common.type)
-
-/*
- * Macros for the master AML opcode table
- */
-#if defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
-#define ACPI_OP(name, Pargs, Iargs, obj_type, class, type, flags) \
-	{name, (u32)(Pargs), (u32)(Iargs), (u32)(flags), obj_type, class, type}
-#else
-#define ACPI_OP(name, Pargs, Iargs, obj_type, class, type, flags) \
-	{(u32)(Pargs), (u32)(Iargs), (u32)(flags), obj_type, class, type}
-#endif
-
-#ifdef ACPI_DISASSEMBLER
-#define ACPI_DISASM_ONLY_MEMBERS(a)     a;
-#else
-#define ACPI_DISASM_ONLY_MEMBERS(a)
-#endif
-
-#define ARG_TYPE_WIDTH                  5
-#define ARG_1(x)                        ((u32)(x))
-#define ARG_2(x)                        ((u32)(x) << (1 * ARG_TYPE_WIDTH))
-#define ARG_3(x)                        ((u32)(x) << (2 * ARG_TYPE_WIDTH))
-#define ARG_4(x)                        ((u32)(x) << (3 * ARG_TYPE_WIDTH))
-#define ARG_5(x)                        ((u32)(x) << (4 * ARG_TYPE_WIDTH))
-#define ARG_6(x)                        ((u32)(x) << (5 * ARG_TYPE_WIDTH))
-
-#define ARGI_LIST1(a)                   (ARG_1(a))
-#define ARGI_LIST2(a, b)                (ARG_1(b)|ARG_2(a))
-#define ARGI_LIST3(a, b, c)             (ARG_1(c)|ARG_2(b)|ARG_3(a))
-#define ARGI_LIST4(a, b, c, d)          (ARG_1(d)|ARG_2(c)|ARG_3(b)|ARG_4(a))
-#define ARGI_LIST5(a, b, c, d, e)       (ARG_1(e)|ARG_2(d)|ARG_3(c)|ARG_4(b)|ARG_5(a))
-#define ARGI_LIST6(a, b, c, d, e, f)    (ARG_1(f)|ARG_2(e)|ARG_3(d)|ARG_4(c)|ARG_5(b)|ARG_6(a))
-
-#define ARGP_LIST1(a)                   (ARG_1(a))
-#define ARGP_LIST2(a, b)                (ARG_1(a)|ARG_2(b))
-#define ARGP_LIST3(a, b, c)             (ARG_1(a)|ARG_2(b)|ARG_3(c))
-#define ARGP_LIST4(a, b, c, d)          (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d))
-#define ARGP_LIST5(a, b, c, d, e)       (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e))
-#define ARGP_LIST6(a, b, c, d, e, f)    (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)|ARG_6(f))
-
-#define GET_CURRENT_ARG_TYPE(list)      (list & ((u32) 0x1F))
-#define INCREMENT_ARG_LIST(list)        (list >>= ((u32) ARG_TYPE_WIDTH))
-
-#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
-/*
- * Module name is include in both debug and non-debug versions primarily for
- * error messages. The __FILE__ macro is not very useful for this, because it
- * often includes the entire pathname to the module
- */
-#define ACPI_MODULE_NAME(name)		static const char ACPI_UNUSED_VAR _acpi_module_name[] = name;
-#else
-#define ACPI_MODULE_NAME(name)
-#endif
-
-/*
- * Ascii error messages can be configured out
- */
-#ifndef ACPI_NO_ERROR_MESSAGES
-#define AE_INFO                         _acpi_module_name, __LINE__
-
-/*
- * Error reporting. Callers module and line number are inserted by AE_INFO,
- * the plist contains a set of parens to allow variable-length lists.
- * These macros are used for both the debug and non-debug versions of the code.
- */
-#define ACPI_INFO(plist)                acpi_ut_info plist
-#define ACPI_WARNING(plist)             acpi_ut_warning plist
-#define ACPI_EXCEPTION(plist)           acpi_ut_exception plist
-#define ACPI_ERROR(plist)               acpi_ut_error plist
-#define ACPI_ERROR_NAMESPACE(s, e)      acpi_ns_report_error (AE_INFO, s, e);
-#define ACPI_ERROR_METHOD(s, n, p, e)   acpi_ns_report_method_error (AE_INFO, s, n, p, e);
-
-#else
-
-/* No error messages */
-
-#define ACPI_INFO(plist)
-#define ACPI_WARNING(plist)
-#define ACPI_EXCEPTION(plist)
-#define ACPI_ERROR(plist)
-#define ACPI_ERROR_NAMESPACE(s, e)
-#define ACPI_ERROR_METHOD(s, n, p, e)
-#endif
-
-/*
- * Debug macros that are conditionally compiled
- */
-#ifdef ACPI_DEBUG_OUTPUT
-
-/*
- * Common parameters used for debug output functions:
- * line number, function name, module(file) name, component ID
- */
-#define ACPI_DEBUG_PARAMETERS           __LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT
-
-/*
- * Function entry tracing
- */
-
-/*
- * If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header,
- * define it now. This is the case where there the compiler does not support
- * a __func__ macro or equivalent.
- */
-#ifndef ACPI_GET_FUNCTION_NAME
-#define ACPI_GET_FUNCTION_NAME          _acpi_function_name
-/*
- * The Name parameter should be the procedure name as a quoted string.
- * The function name is also used by the function exit macros below.
- * Note: (const char) is used to be compatible with the debug interfaces
- * and macros such as __func__.
- */
-#define ACPI_FUNCTION_NAME(name)	static const char _acpi_function_name[] = #name;
-
-#else
-/* Compiler supports __func__ (or equivalent) -- Ignore this macro */
-
-#define ACPI_FUNCTION_NAME(name)
-#endif
-
-#ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE
-
-#define ACPI_FUNCTION_TRACE(a)          ACPI_FUNCTION_NAME(a) \
-			  acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
-#define ACPI_FUNCTION_TRACE_PTR(a, b)   ACPI_FUNCTION_NAME(a) \
-					   acpi_ut_trace_ptr(ACPI_DEBUG_PARAMETERS, (void *)b)
-#define ACPI_FUNCTION_TRACE_U32(a, b)   ACPI_FUNCTION_NAME(a) \
-							 acpi_ut_trace_u32(ACPI_DEBUG_PARAMETERS, (u32)b)
-#define ACPI_FUNCTION_TRACE_STR(a, b)   ACPI_FUNCTION_NAME(a) \
-									  acpi_ut_trace_str(ACPI_DEBUG_PARAMETERS, (char *)b)
-
-#define ACPI_FUNCTION_ENTRY()           acpi_ut_track_stack_ptr()
-
-/*
- * Function exit tracing.
- * WARNING: These macros include a return statement. This is usually considered
- * bad form, but having a separate exit macro is very ugly and difficult to maintain.
- * One of the FUNCTION_TRACE macros above must be used in conjunction with these macros
- * so that "_AcpiFunctionName" is defined.
- *
- * Note: the DO_WHILE0 macro is used to prevent some compilers from complaining
- * about these constructs.
- */
-#ifdef ACPI_USE_DO_WHILE_0
-#define ACPI_DO_WHILE0(a)               do a while(0)
-#else
-#define ACPI_DO_WHILE0(a)               a
-#endif
-
-#define return_VOID                     ACPI_DO_WHILE0 ({ \
-											acpi_ut_exit (ACPI_DEBUG_PARAMETERS); \
-											return;})
-/*
- * There are two versions of most of the return macros. The default version is
- * safer, since it avoids side-effects by guaranteeing that the argument will
- * not be evaluated twice.
- *
- * A less-safe version of the macros is provided for optional use if the
- * compiler uses excessive CPU stack (for example, this may happen in the
- * debug case if code optimzation is disabled.)
- */
-#ifndef ACPI_SIMPLE_RETURN_MACROS
-
-#define return_ACPI_STATUS(s)           ACPI_DO_WHILE0 ({ \
-											register acpi_status _s = (s); \
-											acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, _s); \
-											return (_s); })
-#define return_PTR(s)                   ACPI_DO_WHILE0 ({ \
-											register void *_s = (void *) (s); \
-											acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) _s); \
-											return (_s); })
-#define return_VALUE(s)                 ACPI_DO_WHILE0 ({ \
-											register acpi_integer _s = (s); \
-											acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, _s); \
-											return (_s); })
-#define return_UINT8(s)                 ACPI_DO_WHILE0 ({ \
-											register u8 _s = (u8) (s); \
-											acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (acpi_integer) _s); \
-											return (_s); })
-#define return_UINT32(s)                ACPI_DO_WHILE0 ({ \
-											register u32 _s = (u32) (s); \
-											acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (acpi_integer) _s); \
-											return (_s); })
-#else				/* Use original less-safe macros */
-
-#define return_ACPI_STATUS(s)           ACPI_DO_WHILE0 ({ \
-											acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, (s)); \
-											return((s)); })
-#define return_PTR(s)                   ACPI_DO_WHILE0 ({ \
-											acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) (s)); \
-											return((s)); })
-#define return_VALUE(s)                 ACPI_DO_WHILE0 ({ \
-											acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (acpi_integer) (s)); \
-											return((s)); })
-#define return_UINT8(s)                 return_VALUE(s)
-#define return_UINT32(s)                return_VALUE(s)
-
-#endif				/* ACPI_SIMPLE_RETURN_MACROS */
-
-#else /* !CONFIG_ACPI_DEBUG_FUNC_TRACE */
-
-#define ACPI_FUNCTION_TRACE(a)
-#define ACPI_FUNCTION_TRACE_PTR(a,b)
-#define ACPI_FUNCTION_TRACE_U32(a,b)
-#define ACPI_FUNCTION_TRACE_STR(a,b)
-#define ACPI_FUNCTION_EXIT
-#define ACPI_FUNCTION_STATUS_EXIT(s)
-#define ACPI_FUNCTION_VALUE_EXIT(s)
-#define ACPI_FUNCTION_TRACE(a)
-#define ACPI_FUNCTION_ENTRY()
-
-#define return_VOID                     return
-#define return_ACPI_STATUS(s)           return(s)
-#define return_VALUE(s)                 return(s)
-#define return_UINT8(s)                 return(s)
-#define return_UINT32(s)                return(s)
-#define return_PTR(s)                   return(s)
-
-#endif /* CONFIG_ACPI_DEBUG_FUNC_TRACE */
-
-/* Conditional execution */
-
-#define ACPI_DEBUG_EXEC(a)              a
-#define ACPI_NORMAL_EXEC(a)
-
-#define ACPI_DEBUG_DEFINE(a)            a;
-#define ACPI_DEBUG_ONLY_MEMBERS(a)      a;
-#define _VERBOSE_STRUCTURES
-
-/* Stack and buffer dumping */
-
-#define ACPI_DUMP_STACK_ENTRY(a)        acpi_ex_dump_operand((a), 0)
-#define ACPI_DUMP_OPERANDS(a, b, c)	acpi_ex_dump_operands(a, b, c)
-
-#define ACPI_DUMP_ENTRY(a, b)           acpi_ns_dump_entry (a, b)
-#define ACPI_DUMP_PATHNAME(a, b, c, d)  acpi_ns_dump_pathname(a, b, c, d)
-#define ACPI_DUMP_RESOURCE_LIST(a)      acpi_rs_dump_resource_list(a)
-#define ACPI_DUMP_BUFFER(a, b)          acpi_ut_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
-
-/*
- * Master debug print macros
- * Print iff:
- *    1) Debug print for the current component is enabled
- *    2) Debug error level or trace level for the print statement is enabled
- */
-#define ACPI_DEBUG_PRINT(plist)         acpi_ut_debug_print plist
-#define ACPI_DEBUG_PRINT_RAW(plist)     acpi_ut_debug_print_raw plist
-
-#else
-/*
- * This is the non-debug case -- make everything go away,
- * leaving no executable debug code!
- */
-#define ACPI_DEBUG_EXEC(a)
-#define ACPI_NORMAL_EXEC(a)             a;
-
-#define ACPI_DEBUG_DEFINE(a)		do { } while(0)
-#define ACPI_DEBUG_ONLY_MEMBERS(a)	do { } while(0)
-#define ACPI_FUNCTION_NAME(a)		do { } while(0)
-#define ACPI_FUNCTION_TRACE(a)		do { } while(0)
-#define ACPI_FUNCTION_TRACE_PTR(a, b)	do { } while(0)
-#define ACPI_FUNCTION_TRACE_U32(a, b)	do { } while(0)
-#define ACPI_FUNCTION_TRACE_STR(a, b)	do { } while(0)
-#define ACPI_FUNCTION_EXIT		do { } while(0)
-#define ACPI_FUNCTION_STATUS_EXIT(s)	do { } while(0)
-#define ACPI_FUNCTION_VALUE_EXIT(s)	do { } while(0)
-#define ACPI_FUNCTION_ENTRY()		do { } while(0)
-#define ACPI_DUMP_STACK_ENTRY(a)	do { } while(0)
-#define ACPI_DUMP_OPERANDS(a, b, c)     do { } while(0)
-#define ACPI_DUMP_ENTRY(a, b)		do { } while(0)
-#define ACPI_DUMP_TABLES(a, b)		do { } while(0)
-#define ACPI_DUMP_PATHNAME(a, b, c, d)	do { } while(0)
-#define ACPI_DUMP_RESOURCE_LIST(a)	do { } while(0)
-#define ACPI_DUMP_BUFFER(a, b)		do { } while(0)
-#define ACPI_DEBUG_PRINT(pl)		do { } while(0)
-#define ACPI_DEBUG_PRINT_RAW(pl)	do { } while(0)
-
-#define return_VOID                     return
-#define return_ACPI_STATUS(s)           return(s)
-#define return_VALUE(s)                 return(s)
-#define return_UINT8(s)                 return(s)
-#define return_UINT32(s)                return(s)
-#define return_PTR(s)                   return(s)
-
-#endif
-
-/*
- * Some code only gets executed when the debugger is built in.
- * Note that this is entirely independent of whether the
- * DEBUG_PRINT stuff (set by ACPI_DEBUG_OUTPUT) is on, or not.
- */
-#ifdef ACPI_DEBUGGER
-#define ACPI_DEBUGGER_EXEC(a)           a
-#else
-#define ACPI_DEBUGGER_EXEC(a)
-#endif
-
-#ifdef ACPI_DEBUG_OUTPUT
-/*
- * 1) Set name to blanks
- * 2) Copy the object name
- */
-#define ACPI_ADD_OBJECT_NAME(a,b)       ACPI_MEMSET (a->common.name, ' ', sizeof (a->common.name));\
-										ACPI_STRNCPY (a->common.name, acpi_gbl_ns_type_names[b], sizeof (a->common.name))
-#else
-
-#define ACPI_ADD_OBJECT_NAME(a,b)
-#endif
-
-/*
- * Memory allocation tracking (DEBUG ONLY)
- */
-#define ACPI_MEM_PARAMETERS         _COMPONENT, _acpi_module_name, __LINE__
-
-#ifndef ACPI_DBG_TRACK_ALLOCATIONS
-
-/* Memory allocation */
-
-#ifndef ACPI_ALLOCATE
-#define ACPI_ALLOCATE(a)            acpi_ut_allocate((acpi_size)(a), ACPI_MEM_PARAMETERS)
-#endif
-#ifndef ACPI_ALLOCATE_ZEROED
-#define ACPI_ALLOCATE_ZEROED(a)     acpi_ut_allocate_zeroed((acpi_size)(a), ACPI_MEM_PARAMETERS)
-#endif
-#ifndef ACPI_FREE
-#define ACPI_FREE(a)                acpio_os_free(a)
-#endif
-#define ACPI_MEM_TRACKING(a)
-
-#else
-
-/* Memory allocation */
-
-#define ACPI_ALLOCATE(a)            acpi_ut_allocate_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS)
-#define ACPI_ALLOCATE_ZEROED(a)     acpi_ut_allocate_zeroed_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS)
-#define ACPI_FREE(a)                acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS)
-#define ACPI_MEM_TRACKING(a)        a
-
-#endif				/* ACPI_DBG_TRACK_ALLOCATIONS */
-
-/* Preemption point */
-#ifndef ACPI_PREEMPTION_POINT
-#define ACPI_PREEMPTION_POINT() /* no preemption */
-#endif
-
-#endif				/* ACMACROS_H */
diff --git a/include/acpi/acnamesp.h b/include/acpi/acnamesp.h
deleted file mode 100644
index db4e6f6..0000000
--- a/include/acpi/acnamesp.h
+++ /dev/null
@@ -1,321 +0,0 @@
-/******************************************************************************
- *
- * Name: acnamesp.h - Namespace subcomponent prototypes and defines
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACNAMESP_H__
-#define __ACNAMESP_H__
-
-/* To search the entire name space, pass this as search_base */
-
-#define ACPI_NS_ALL                 ((acpi_handle)0)
-
-/*
- * Elements of acpi_ns_properties are bit significant
- * and should be one-to-one with values of acpi_object_type
- */
-#define ACPI_NS_NORMAL              0
-#define ACPI_NS_NEWSCOPE            1	/* a definition of this type opens a name scope */
-#define ACPI_NS_LOCAL               2	/* suppress search of enclosing scopes */
-
-/* Flags for acpi_ns_lookup, acpi_ns_search_and_enter */
-
-#define ACPI_NS_NO_UPSEARCH         0
-#define ACPI_NS_SEARCH_PARENT       0x01
-#define ACPI_NS_DONT_OPEN_SCOPE     0x02
-#define ACPI_NS_NO_PEER_SEARCH      0x04
-#define ACPI_NS_ERROR_IF_FOUND      0x08
-#define ACPI_NS_PREFIX_IS_SCOPE     0x10
-#define ACPI_NS_EXTERNAL            0x20
-#define ACPI_NS_TEMPORARY           0x40
-
-/* Flags for acpi_ns_walk_namespace */
-
-#define ACPI_NS_WALK_NO_UNLOCK      0
-#define ACPI_NS_WALK_UNLOCK         0x01
-#define ACPI_NS_WALK_TEMP_NODES     0x02
-
-/*
- * nsinit - Namespace initialization
- */
-acpi_status acpi_ns_initialize_objects(void);
-
-acpi_status acpi_ns_initialize_devices(void);
-
-/*
- * nsload -  Namespace loading
- */
-acpi_status acpi_ns_load_namespace(void);
-
-acpi_status
-acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node);
-
-/*
- * nswalk - walk the namespace
- */
-acpi_status
-acpi_ns_walk_namespace(acpi_object_type type,
-		       acpi_handle start_object,
-		       u32 max_depth,
-		       u32 flags,
-		       acpi_walk_callback user_function,
-		       void *context, void **return_value);
-
-struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
-						  *parent, struct acpi_namespace_node
-						  *child);
-
-/*
- * nsparse - table parsing
- */
-acpi_status
-acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node);
-
-acpi_status
-acpi_ns_one_complete_parse(u32 pass_number,
-			   u32 table_index,
-			   struct acpi_namespace_node *start_node);
-
-/*
- * nsaccess - Top-level namespace access
- */
-acpi_status acpi_ns_root_initialize(void);
-
-acpi_status
-acpi_ns_lookup(union acpi_generic_state *scope_info,
-	       char *name,
-	       acpi_object_type type,
-	       acpi_interpreter_mode interpreter_mode,
-	       u32 flags,
-	       struct acpi_walk_state *walk_state,
-	       struct acpi_namespace_node **ret_node);
-
-/*
- * nsalloc - Named object allocation/deallocation
- */
-struct acpi_namespace_node *acpi_ns_create_node(u32 name);
-
-void acpi_ns_delete_node(struct acpi_namespace_node *node);
-
-void
-acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_handle);
-
-void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id);
-
-void acpi_ns_detach_object(struct acpi_namespace_node *node);
-
-void acpi_ns_delete_children(struct acpi_namespace_node *parent);
-
-int acpi_ns_compare_names(char *name1, char *name2);
-
-/*
- * nsdump - Namespace dump/print utilities
- */
-#ifdef	ACPI_FUTURE_USAGE
-void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth);
-#endif				/* ACPI_FUTURE_USAGE */
-
-void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level);
-
-void
-acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component);
-
-void acpi_ns_print_pathname(u32 num_segments, char *pathname);
-
-acpi_status
-acpi_ns_dump_one_object(acpi_handle obj_handle,
-			u32 level, void *context, void **return_value);
-
-#ifdef	ACPI_FUTURE_USAGE
-void
-acpi_ns_dump_objects(acpi_object_type type,
-		     u8 display_type,
-		     u32 max_depth,
-		     acpi_owner_id owner_id, acpi_handle start_handle);
-#endif				/* ACPI_FUTURE_USAGE */
-
-/*
- * nseval - Namespace evaluation functions
- */
-acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info);
-
-/*
- * nspredef - Support for predefined/reserved names
- */
-acpi_status
-acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
-			       union acpi_operand_object *return_object);
-
-const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
-								    acpi_namespace_node
-								    *node);
-
-void
-acpi_ns_check_parameter_count(char *pathname,
-			      struct acpi_namespace_node *node,
-			      const union acpi_predefined_info *info);
-
-/*
- * nsnames - Name and Scope manipulation
- */
-u32 acpi_ns_opens_scope(acpi_object_type type);
-
-acpi_status
-acpi_ns_build_external_path(struct acpi_namespace_node *node,
-			    acpi_size size, char *name_buffer);
-
-char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node);
-
-char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
-
-acpi_status
-acpi_ns_handle_to_pathname(acpi_handle target_handle,
-			   struct acpi_buffer *buffer);
-
-u8
-acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for);
-
-acpi_status
-acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
-		 const char *external_pathname,
-		 u32 flags, struct acpi_namespace_node **out_node);
-
-acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node);
-
-/*
- * nsobject - Object management for namespace nodes
- */
-acpi_status
-acpi_ns_attach_object(struct acpi_namespace_node *node,
-		      union acpi_operand_object *object, acpi_object_type type);
-
-union acpi_operand_object *acpi_ns_get_attached_object(struct
-						       acpi_namespace_node
-						       *node);
-
-union acpi_operand_object *acpi_ns_get_secondary_object(union
-							acpi_operand_object
-							*obj_desc);
-
-acpi_status
-acpi_ns_attach_data(struct acpi_namespace_node *node,
-		    acpi_object_handler handler, void *data);
-
-acpi_status
-acpi_ns_detach_data(struct acpi_namespace_node *node,
-		    acpi_object_handler handler);
-
-acpi_status
-acpi_ns_get_attached_data(struct acpi_namespace_node *node,
-			  acpi_object_handler handler, void **data);
-
-/*
- * nssearch - Namespace searching and entry
- */
-acpi_status
-acpi_ns_search_and_enter(u32 entry_name,
-			 struct acpi_walk_state *walk_state,
-			 struct acpi_namespace_node *node,
-			 acpi_interpreter_mode interpreter_mode,
-			 acpi_object_type type,
-			 u32 flags, struct acpi_namespace_node **ret_node);
-
-acpi_status
-acpi_ns_search_one_scope(u32 entry_name,
-			 struct acpi_namespace_node *node,
-			 acpi_object_type type,
-			 struct acpi_namespace_node **ret_node);
-
-void
-acpi_ns_install_node(struct acpi_walk_state *walk_state,
-		     struct acpi_namespace_node *parent_node,
-		     struct acpi_namespace_node *node, acpi_object_type type);
-
-/*
- * nsutils - Utility functions
- */
-u8 acpi_ns_valid_root_prefix(char prefix);
-
-acpi_object_type acpi_ns_get_type(struct acpi_namespace_node *node);
-
-u32 acpi_ns_local(acpi_object_type type);
-
-void
-acpi_ns_report_error(const char *module_name,
-		     u32 line_number,
-		     const char *internal_name, acpi_status lookup_status);
-
-void
-acpi_ns_report_method_error(const char *module_name,
-			    u32 line_number,
-			    const char *message,
-			    struct acpi_namespace_node *node,
-			    const char *path, acpi_status lookup_status);
-
-void
-acpi_ns_print_node_pathname(struct acpi_namespace_node *node, const char *msg);
-
-acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info);
-
-void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info);
-
-acpi_status
-acpi_ns_internalize_name(const char *dotted_name, char **converted_name);
-
-acpi_status
-acpi_ns_externalize_name(u32 internal_name_length,
-			 const char *internal_name,
-			 u32 * converted_name_length, char **converted_name);
-
-struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle);
-
-acpi_handle acpi_ns_convert_entry_to_handle(struct acpi_namespace_node *node);
-
-void acpi_ns_terminate(void);
-
-struct acpi_namespace_node *acpi_ns_get_parent_node(struct acpi_namespace_node
-						    *node);
-
-struct acpi_namespace_node *acpi_ns_get_next_valid_node(struct
-							acpi_namespace_node
-							*node);
-
-#endif				/* __ACNAMESP_H__ */
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index db8852d..5c823d5 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -45,9 +45,9 @@
 #define __ACOUTPUT_H__
 
 /*
- * Debug levels and component IDs.  These are used to control the
- * granularity of the output of the DEBUG_PRINT macro -- on a per-
- * component basis and a per-exception-type basis.
+ * Debug levels and component IDs. These are used to control the
+ * granularity of the output of the ACPI_DEBUG_PRINT macro -- on a
+ * per-component basis and a per-exception-type basis.
  */
 
 /* Component IDs are used in the global "DebugLayer" */
@@ -69,8 +69,10 @@
 
 #define ACPI_COMPILER               0x00001000
 #define ACPI_TOOLS                  0x00002000
+#define ACPI_EXAMPLE                0x00004000
+#define ACPI_DRIVER                 0x00008000
 
-#define ACPI_ALL_COMPONENTS         0x00003FFF
+#define ACPI_ALL_COMPONENTS         0x0000FFFF
 #define ACPI_COMPONENT_DEFAULT      (ACPI_ALL_COMPONENTS)
 
 /* Component IDs reserved for ACPI drivers */
@@ -78,7 +80,7 @@
 #define ACPI_ALL_DRIVERS            0xFFFF0000
 
 /*
- * Raw debug output levels, do not use these in the DEBUG_PRINT macros
+ * Raw debug output levels, do not use these in the ACPI_DEBUG_PRINT macros
  */
 #define ACPI_LV_INIT                0x00000001
 #define ACPI_LV_DEBUG_OBJECT        0x00000002
@@ -176,4 +178,95 @@
 #define ACPI_NORMAL_DEFAULT         (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT)
 #define ACPI_DEBUG_ALL              (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
 
+#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
+/*
+ * Module name is included in both debug and non-debug versions primarily for
+ * error messages. The __FILE__ macro is not very useful for this, because it
+ * often includes the entire pathname to the module
+ */
+#define ACPI_MODULE_NAME(name)          static const char ACPI_UNUSED_VAR _acpi_module_name[] = name;
+#else
+#define ACPI_MODULE_NAME(name)
+#endif
+
+/*
+ * Ascii error messages can be configured out
+ */
+#ifndef ACPI_NO_ERROR_MESSAGES
+#define AE_INFO                         _acpi_module_name, __LINE__
+
+/*
+ * Error reporting. Callers module and line number are inserted by AE_INFO,
+ * the plist contains a set of parens to allow variable-length lists.
+ * These macros are used for both the debug and non-debug versions of the code.
+ */
+#define ACPI_INFO(plist)                acpi_info plist
+#define ACPI_WARNING(plist)             acpi_warning plist
+#define ACPI_EXCEPTION(plist)           acpi_exception plist
+#define ACPI_ERROR(plist)               acpi_error plist
+
+#else
+
+/* No error messages */
+
+#define ACPI_INFO(plist)
+#define ACPI_WARNING(plist)
+#define ACPI_EXCEPTION(plist)
+#define ACPI_ERROR(plist)
+
+#endif				/* ACPI_NO_ERROR_MESSAGES */
+
+/*
+ * Debug macros that are conditionally compiled
+ */
+#ifdef ACPI_DEBUG_OUTPUT
+
+/*
+ * If ACPI_GET_FUNCTION_NAME was not defined in the compiler-dependent header,
+ * define it now. This is the case where there the compiler does not support
+ * a __FUNCTION__ macro or equivalent.
+ */
+#ifndef ACPI_GET_FUNCTION_NAME
+#define ACPI_GET_FUNCTION_NAME          _acpi_function_name
+
+/*
+ * The Name parameter should be the procedure name as a quoted string.
+ * The function name is also used by the function exit macros below.
+ * Note: (const char) is used to be compatible with the debug interfaces
+ * and macros such as __FUNCTION__.
+ */
+#define ACPI_FUNCTION_NAME(name)        static const char _acpi_function_name[] = #name;
+
+#else
+/* Compiler supports __FUNCTION__ (or equivalent) -- Ignore this macro */
+
+#define ACPI_FUNCTION_NAME(name)
+#endif				/* ACPI_GET_FUNCTION_NAME */
+
+/*
+ * Common parameters used for debug output functions:
+ * line number, function name, module(file) name, component ID
+ */
+#define ACPI_DEBUG_PARAMETERS           __LINE__, ACPI_GET_FUNCTION_NAME, _acpi_module_name, _COMPONENT
+
+/*
+ * Master debug print macros
+ * Print message if and only if:
+ *    1) Debug print for the current component is enabled
+ *    2) Debug error level or trace level for the print statement is enabled
+ */
+#define ACPI_DEBUG_PRINT(plist)         acpi_debug_print plist
+#define ACPI_DEBUG_PRINT_RAW(plist)     acpi_debug_print_raw plist
+
+#else
+/*
+ * This is the non-debug case -- make everything go away,
+ * leaving no executable debug code!
+ */
+#define ACPI_FUNCTION_NAME(a)
+#define ACPI_DEBUG_PRINT(pl)
+#define ACPI_DEBUG_PRINT_RAW(pl)
+
+#endif				/* ACPI_DEBUG_OUTPUT */
+
 #endif				/* __ACOUTPUT_H__ */
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index c515ef6..472b7bf 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Name: acpi.h - Master include file, Publics and external data.
+ * Name: acpi.h - Master public include file used to interface to ACPICA
  *
  *****************************************************************************/
 
@@ -45,25 +45,22 @@
 #define __ACPI_H__
 
 /*
- * Common includes for all ACPI driver files
- * We put them here because we don't want to duplicate them
- * in the rest of the source code again and again.
+ * Public include files for use by code that will interface to ACPICA.
+ *
+ * Information includes the ACPICA data types, names, exceptions, and
+ * external interface prototypes. Also included are the definitions for
+ * all ACPI tables (FADT, MADT, etc.)
+ *
+ * Note: The order of these include files is important.
  */
-#include "acnames.h"		/* Global ACPI names and strings */
-#include "acconfig.h"		/* Configuration constants */
-#include "platform/acenv.h"	/* Target environment specific items */
-#include "actypes.h"		/* Fundamental common data types */
-#include "acexcep.h"		/* ACPI exception codes */
-#include "acmacros.h"		/* C macros */
+#include "platform/acenv.h"	/* Environment-specific items */
+#include "acnames.h"		/* Common ACPI names and strings */
+#include "actypes.h"		/* ACPICA data types and structures */
+#include "acexcep.h"		/* ACPICA exceptions */
 #include "actbl.h"		/* ACPI table definitions */
-#include "aclocal.h"		/* Internal data types */
 #include "acoutput.h"		/* Error output and Debug macros */
-#include "acpiosxf.h"		/* Interfaces to the ACPI-to-OS layer */
+#include "acrestyp.h"		/* Resource Descriptor structs */
+#include "acpiosxf.h"		/* OSL interfaces (ACPICA-to-OS) */
 #include "acpixf.h"		/* ACPI core subsystem external interfaces */
-#include "acobject.h"		/* ACPI internal object */
-#include "acstruct.h"		/* Common structures */
-#include "acglobal.h"		/* All global variables */
-#include "achware.h"		/* Hardware defines and interfaces */
-#include "acutils.h"		/* Utility interfaces */
 
 #endif				/* __ACPI_H__ */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index b91440a..a62720a 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -121,8 +121,11 @@
 acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units);
 
 /*
- * Mutex primitives
+ * Mutex primitives. May be configured to use semaphores instead via
+ * ACPI_MUTEX_TYPE (see platform/acenv.h)
  */
+#if (ACPI_MUTEX_TYPE != ACPI_BINARY_SEMAPHORE)
+
 acpi_status acpi_os_create_mutex(acpi_mutex * out_handle);
 
 void acpi_os_delete_mutex(acpi_mutex handle);
@@ -130,13 +133,7 @@
 acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout);
 
 void acpi_os_release_mutex(acpi_mutex handle);
-
-/* Temporary macros for Mutex* interfaces, map to existing semaphore xfaces */
-
-#define acpi_os_create_mutex(out_handle)    acpi_os_create_semaphore (1, 1, out_handle)
-#define acpi_os_delete_mutex(handle)        (void) acpi_os_delete_semaphore (handle)
-#define acpi_os_acquire_mutex(handle,time)  acpi_os_wait_semaphore (handle, 1, time)
-#define acpi_os_release_mutex(handle)       (void) acpi_os_signal_semaphore (handle, 1)
+#endif
 
 /*
  * Memory allocation and mapping
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 33bc0e3b1..c8e8cf4 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -45,9 +45,32 @@
 #ifndef __ACXFACE_H__
 #define __ACXFACE_H__
 
+/* Current ACPICA subsystem version in YYYYMMDD format */
+
+#define ACPI_CA_VERSION                 0x20081204
+
 #include "actypes.h"
 #include "actbl.h"
 
+extern u8 acpi_gbl_permanent_mmap;
+
+/*
+ * Globals that are publically available, allowing for
+ * run time configuration
+ */
+extern u32 acpi_dbg_level;
+extern u32 acpi_dbg_layer;
+extern u8 acpi_gbl_enable_interpreter_slack;
+extern u8 acpi_gbl_all_methods_serialized;
+extern u8 acpi_gbl_create_osi_method;
+extern u8 acpi_gbl_leave_wake_gpes_disabled;
+extern acpi_name acpi_gbl_trace_method_name;
+extern u32 acpi_gbl_trace_flags;
+
+extern u32 acpi_current_gpe_count;
+extern struct acpi_table_fadt acpi_gbl_FADT;
+
+extern u32 acpi_rsdt_forced;
 /*
  * Global interfaces
  */
@@ -79,11 +102,6 @@
 
 acpi_status acpi_purge_cached_objects(void);
 
-#ifdef ACPI_FUTURE_USAGE
-acpi_status
-acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
-#endif
-
 /*
  * ACPI Memory management
  */
@@ -193,9 +211,12 @@
 acpi_status acpi_get_parent(acpi_handle object, acpi_handle * out_handle);
 
 /*
- * Event handler interfaces
+ * Handler interfaces
  */
 acpi_status
+acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
+
+acpi_status
 acpi_install_fixed_event_handler(u32 acpi_event,
 				 acpi_event_handler handler, void *context);
 
@@ -227,6 +248,10 @@
 			 u32 gpe_number,
 			 u32 type, acpi_event_handler address, void *context);
 
+acpi_status
+acpi_remove_gpe_handler(acpi_handle gpe_device,
+			u32 gpe_number, acpi_event_handler address);
+
 #ifdef ACPI_FUTURE_USAGE
 acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
 #endif
@@ -238,10 +263,6 @@
 
 acpi_status acpi_release_global_lock(u32 handle);
 
-acpi_status
-acpi_remove_gpe_handler(acpi_handle gpe_device,
-			u32 gpe_number, acpi_event_handler address);
-
 acpi_status acpi_enable_event(u32 event, u32 flags);
 
 acpi_status acpi_disable_event(u32 event, u32 flags);
@@ -250,6 +271,9 @@
 
 acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
 
+/*
+ * GPE Interfaces
+ */
 acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type);
 
 acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number);
@@ -263,6 +287,12 @@
 		    u32 gpe_number,
 		    u32 flags, acpi_event_status * event_status);
 
+acpi_status acpi_disable_all_gpes(void);
+
+acpi_status acpi_enable_all_runtime_gpes(void);
+
+acpi_status acpi_get_gpe_device(u32 gpe_index, acpi_handle *gpe_device);
+
 acpi_status
 acpi_install_gpe_block(acpi_handle gpe_device,
 		       struct acpi_generic_address *gpe_block_address,
@@ -313,6 +343,8 @@
 /*
  * Hardware (ACPI device) interfaces
  */
+acpi_status acpi_reset(void);
+
 acpi_status acpi_get_register(u32 register_id, u32 * return_value);
 
 acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value);
@@ -320,12 +352,14 @@
 acpi_status acpi_set_register(u32 register_id, u32 value);
 
 acpi_status
-acpi_set_firmware_waking_vector(acpi_physical_address physical_address);
+acpi_set_firmware_waking_vector(u32 physical_address);
 
-#ifdef ACPI_FUTURE_USAGE
 acpi_status
-acpi_get_firmware_waking_vector(acpi_physical_address * physical_address);
-#endif
+acpi_set_firmware_waking_vector64(u64 physical_address);
+
+acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg);
+
+acpi_status acpi_write(u32 value, struct acpi_generic_address *reg);
 
 acpi_status
 acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
@@ -340,4 +374,42 @@
 
 acpi_status acpi_leave_sleep_state(u8 sleep_state);
 
+/*
+ * Debug output
+ */
+void ACPI_INTERNAL_VAR_XFACE
+acpi_error(const char *module_name,
+	   u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_exception(const char *module_name,
+	       u32 line_number,
+	       acpi_status status, const char *format, ...) ACPI_PRINTF_LIKE(4);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_warning(const char *module_name,
+	     u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_info(const char *module_name,
+	  u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+
+#ifdef ACPI_DEBUG_OUTPUT
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_debug_print(u32 requested_debug_level,
+		 u32 line_number,
+		 const char *function_name,
+		 const char *module_name,
+		 u32 component_id, const char *format, ...) ACPI_PRINTF_LIKE(6);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_debug_print_raw(u32 requested_debug_level,
+		     u32 line_number,
+		     const char *function_name,
+		     const char *module_name,
+		     u32 component_id,
+		     const char *format, ...) ACPI_PRINTF_LIKE(6);
+#endif
+
 #endif				/* __ACXFACE_H__ */
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
new file mode 100644
index 0000000..9ffe00f
--- /dev/null
+++ b/include/acpi/acrestyp.h
@@ -0,0 +1,405 @@
+/******************************************************************************
+ *
+ * Name: acrestyp.h - Defines, types, and structures for resource descriptors
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACRESTYP_H__
+#define __ACRESTYP_H__
+
+/*
+ * Definitions for Resource Attributes
+ */
+typedef u16 acpi_rs_length;	/* Resource Length field is fixed at 16 bits */
+typedef u32 acpi_rsdesc_size;	/* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */
+
+/*
+ * Memory Attributes
+ */
+#define ACPI_READ_ONLY_MEMORY           (u8) 0x00
+#define ACPI_READ_WRITE_MEMORY          (u8) 0x01
+
+#define ACPI_NON_CACHEABLE_MEMORY       (u8) 0x00
+#define ACPI_CACHABLE_MEMORY            (u8) 0x01
+#define ACPI_WRITE_COMBINING_MEMORY     (u8) 0x02
+#define ACPI_PREFETCHABLE_MEMORY        (u8) 0x03
+
+/*
+ * IO Attributes
+ * The ISA IO ranges are:     n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh.
+ * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh.
+ */
+#define ACPI_NON_ISA_ONLY_RANGES        (u8) 0x01
+#define ACPI_ISA_ONLY_RANGES            (u8) 0x02
+#define ACPI_ENTIRE_RANGE               (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES)
+
+/* Type of translation - 1=Sparse, 0=Dense */
+
+#define ACPI_SPARSE_TRANSLATION         (u8) 0x01
+
+/*
+ * IO Port Descriptor Decode
+ */
+#define ACPI_DECODE_10                  (u8) 0x00	/* 10-bit IO address decode */
+#define ACPI_DECODE_16                  (u8) 0x01	/* 16-bit IO address decode */
+
+/*
+ * IRQ Attributes
+ */
+#define ACPI_LEVEL_SENSITIVE            (u8) 0x00
+#define ACPI_EDGE_SENSITIVE             (u8) 0x01
+
+#define ACPI_ACTIVE_HIGH                (u8) 0x00
+#define ACPI_ACTIVE_LOW                 (u8) 0x01
+
+#define ACPI_EXCLUSIVE                  (u8) 0x00
+#define ACPI_SHARED                     (u8) 0x01
+
+/*
+ * DMA Attributes
+ */
+#define ACPI_COMPATIBILITY              (u8) 0x00
+#define ACPI_TYPE_A                     (u8) 0x01
+#define ACPI_TYPE_B                     (u8) 0x02
+#define ACPI_TYPE_F                     (u8) 0x03
+
+#define ACPI_NOT_BUS_MASTER             (u8) 0x00
+#define ACPI_BUS_MASTER                 (u8) 0x01
+
+#define ACPI_TRANSFER_8                 (u8) 0x00
+#define ACPI_TRANSFER_8_16              (u8) 0x01
+#define ACPI_TRANSFER_16                (u8) 0x02
+
+/*
+ * Start Dependent Functions Priority definitions
+ */
+#define ACPI_GOOD_CONFIGURATION         (u8) 0x00
+#define ACPI_ACCEPTABLE_CONFIGURATION   (u8) 0x01
+#define ACPI_SUB_OPTIMAL_CONFIGURATION  (u8) 0x02
+
+/*
+ * 16, 32 and 64-bit Address Descriptor resource types
+ */
+#define ACPI_MEMORY_RANGE               (u8) 0x00
+#define ACPI_IO_RANGE                   (u8) 0x01
+#define ACPI_BUS_NUMBER_RANGE           (u8) 0x02
+
+#define ACPI_ADDRESS_NOT_FIXED          (u8) 0x00
+#define ACPI_ADDRESS_FIXED              (u8) 0x01
+
+#define ACPI_POS_DECODE                 (u8) 0x00
+#define ACPI_SUB_DECODE                 (u8) 0x01
+
+#define ACPI_PRODUCER                   (u8) 0x00
+#define ACPI_CONSUMER                   (u8) 0x01
+
+/*
+ * If possible, pack the following structures to byte alignment
+ */
+#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
+#pragma pack(1)
+#endif
+
+/* UUID data structures for use in vendor-defined resource descriptors */
+
+struct acpi_uuid {
+	u8 data[ACPI_UUID_LENGTH];
+};
+
+struct acpi_vendor_uuid {
+	u8 subtype;
+	u8 data[ACPI_UUID_LENGTH];
+};
+
+/*
+ * Structures used to describe device resources
+ */
+struct acpi_resource_irq {
+	u8 descriptor_length;
+	u8 triggering;
+	u8 polarity;
+	u8 sharable;
+	u8 interrupt_count;
+	u8 interrupts[1];
+};
+
+struct acpi_resource_dma {
+	u8 type;
+	u8 bus_master;
+	u8 transfer;
+	u8 channel_count;
+	u8 channels[1];
+};
+
+struct acpi_resource_start_dependent {
+	u8 descriptor_length;
+	u8 compatibility_priority;
+	u8 performance_robustness;
+};
+
+/*
+ * The END_DEPENDENT_FUNCTIONS_RESOURCE struct is not
+ * needed because it has no fields
+ */
+
+struct acpi_resource_io {
+	u8 io_decode;
+	u8 alignment;
+	u8 address_length;
+	u16 minimum;
+	u16 maximum;
+};
+
+struct acpi_resource_fixed_io {
+	u16 address;
+	u8 address_length;
+};
+
+struct acpi_resource_vendor {
+	u16 byte_length;
+	u8 byte_data[1];
+};
+
+/* Vendor resource with UUID info (introduced in ACPI 3.0) */
+
+struct acpi_resource_vendor_typed {
+	u16 byte_length;
+	u8 uuid_subtype;
+	u8 uuid[ACPI_UUID_LENGTH];
+	u8 byte_data[1];
+};
+
+struct acpi_resource_end_tag {
+	u8 checksum;
+};
+
+struct acpi_resource_memory24 {
+	u8 write_protect;
+	u16 minimum;
+	u16 maximum;
+	u16 alignment;
+	u16 address_length;
+};
+
+struct acpi_resource_memory32 {
+	u8 write_protect;
+	u32 minimum;
+	u32 maximum;
+	u32 alignment;
+	u32 address_length;
+};
+
+struct acpi_resource_fixed_memory32 {
+	u8 write_protect;
+	u32 address;
+	u32 address_length;
+};
+
+struct acpi_memory_attribute {
+	u8 write_protect;
+	u8 caching;
+	u8 range_type;
+	u8 translation;
+};
+
+struct acpi_io_attribute {
+	u8 range_type;
+	u8 translation;
+	u8 translation_type;
+	u8 reserved1;
+};
+
+union acpi_resource_attribute {
+	struct acpi_memory_attribute mem;
+	struct acpi_io_attribute io;
+
+	/* Used for the *word_space macros */
+
+	u8 type_specific;
+};
+
+struct acpi_resource_source {
+	u8 index;
+	u16 string_length;
+	char *string_ptr;
+};
+
+/* Fields common to all address descriptors, 16/32/64 bit */
+
+#define ACPI_RESOURCE_ADDRESS_COMMON \
+	u8                                      resource_type; \
+	u8                                      producer_consumer; \
+	u8                                      decode; \
+	u8                                      min_address_fixed; \
+	u8                                      max_address_fixed; \
+	union acpi_resource_attribute           info;
+
+struct acpi_resource_address {
+ACPI_RESOURCE_ADDRESS_COMMON};
+
+struct acpi_resource_address16 {
+	ACPI_RESOURCE_ADDRESS_COMMON u16 granularity;
+	u16 minimum;
+	u16 maximum;
+	u16 translation_offset;
+	u16 address_length;
+	struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_address32 {
+	ACPI_RESOURCE_ADDRESS_COMMON u32 granularity;
+	u32 minimum;
+	u32 maximum;
+	u32 translation_offset;
+	u32 address_length;
+	struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_address64 {
+	ACPI_RESOURCE_ADDRESS_COMMON u64 granularity;
+	u64 minimum;
+	u64 maximum;
+	u64 translation_offset;
+	u64 address_length;
+	struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_extended_address64 {
+	ACPI_RESOURCE_ADDRESS_COMMON u8 revision_iD;
+	u64 granularity;
+	u64 minimum;
+	u64 maximum;
+	u64 translation_offset;
+	u64 address_length;
+	u64 type_specific;
+};
+
+struct acpi_resource_extended_irq {
+	u8 producer_consumer;
+	u8 triggering;
+	u8 polarity;
+	u8 sharable;
+	u8 interrupt_count;
+	struct acpi_resource_source resource_source;
+	u32 interrupts[1];
+};
+
+struct acpi_resource_generic_register {
+	u8 space_id;
+	u8 bit_width;
+	u8 bit_offset;
+	u8 access_size;
+	u64 address;
+};
+
+/* ACPI_RESOURCE_TYPEs */
+
+#define ACPI_RESOURCE_TYPE_IRQ                  0
+#define ACPI_RESOURCE_TYPE_DMA                  1
+#define ACPI_RESOURCE_TYPE_START_DEPENDENT      2
+#define ACPI_RESOURCE_TYPE_END_DEPENDENT        3
+#define ACPI_RESOURCE_TYPE_IO                   4
+#define ACPI_RESOURCE_TYPE_FIXED_IO             5
+#define ACPI_RESOURCE_TYPE_VENDOR               6
+#define ACPI_RESOURCE_TYPE_END_TAG              7
+#define ACPI_RESOURCE_TYPE_MEMORY24             8
+#define ACPI_RESOURCE_TYPE_MEMORY32             9
+#define ACPI_RESOURCE_TYPE_FIXED_MEMORY32       10
+#define ACPI_RESOURCE_TYPE_ADDRESS16            11
+#define ACPI_RESOURCE_TYPE_ADDRESS32            12
+#define ACPI_RESOURCE_TYPE_ADDRESS64            13
+#define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64   14	/* ACPI 3.0 */
+#define ACPI_RESOURCE_TYPE_EXTENDED_IRQ         15
+#define ACPI_RESOURCE_TYPE_GENERIC_REGISTER     16
+#define ACPI_RESOURCE_TYPE_MAX                  16
+
+/* Master union for resource descriptors */
+
+union acpi_resource_data {
+	struct acpi_resource_irq irq;
+	struct acpi_resource_dma dma;
+	struct acpi_resource_start_dependent start_dpf;
+	struct acpi_resource_io io;
+	struct acpi_resource_fixed_io fixed_io;
+	struct acpi_resource_vendor vendor;
+	struct acpi_resource_vendor_typed vendor_typed;
+	struct acpi_resource_end_tag end_tag;
+	struct acpi_resource_memory24 memory24;
+	struct acpi_resource_memory32 memory32;
+	struct acpi_resource_fixed_memory32 fixed_memory32;
+	struct acpi_resource_address16 address16;
+	struct acpi_resource_address32 address32;
+	struct acpi_resource_address64 address64;
+	struct acpi_resource_extended_address64 ext_address64;
+	struct acpi_resource_extended_irq extended_irq;
+	struct acpi_resource_generic_register generic_reg;
+
+	/* Common fields */
+
+	struct acpi_resource_address address;	/* Common 16/32/64 address fields */
+};
+
+/* Common resource header */
+
+struct acpi_resource {
+	u32 type;
+	u32 length;
+	union acpi_resource_data data;
+};
+
+/* restore default alignment */
+
+#pragma pack()
+
+#define ACPI_RS_SIZE_NO_DATA                8	/* Id + Length fields */
+#define ACPI_RS_SIZE_MIN                    (u32) ACPI_ROUND_UP_TO_NATIVE_WORD (12)
+#define ACPI_RS_SIZE(type)                  (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type))
+
+#define ACPI_NEXT_RESOURCE(res)             (struct acpi_resource *)((u8 *) res + res->length)
+
+struct acpi_pci_routing_table {
+	u32 length;
+	u32 pin;
+	acpi_integer address;	/* here for 64-bit alignment */
+	u32 source_index;
+	char source[4];		/* pad to 64 bits so sizeof() works in all cases */
+};
+
+#endif				/* __ACRESTYP_H__ */
diff --git a/include/acpi/actables.h b/include/acpi/actables.h
deleted file mode 100644
index 0cbe1b9..0000000
--- a/include/acpi/actables.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/******************************************************************************
- *
- * Name: actables.h - ACPI table management
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACTABLES_H__
-#define __ACTABLES_H__
-
-acpi_status acpi_allocate_root_table(u32 initial_table_count);
-
-/*
- * tbfadt - FADT parse/convert/validate
- */
-void acpi_tb_parse_fadt(u32 table_index, u8 flags);
-
-void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
-
-/*
- * tbfind - find ACPI table
- */
-acpi_status
-acpi_tb_find_table(char *signature,
-		   char *oem_id, char *oem_table_id, u32 *table_index);
-
-/*
- * tbinstal - Table removal and deletion
- */
-acpi_status acpi_tb_resize_root_table_list(void);
-
-acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc);
-
-acpi_status
-acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index);
-
-acpi_status
-acpi_tb_store_table(acpi_physical_address address,
-		    struct acpi_table_header *table,
-		    u32 length, u8 flags, u32 *table_index);
-
-void acpi_tb_delete_table(struct acpi_table_desc *table_desc);
-
-void acpi_tb_terminate(void);
-
-void acpi_tb_delete_namespace_by_owner(u32 table_index);
-
-acpi_status acpi_tb_allocate_owner_id(u32 table_index);
-
-acpi_status acpi_tb_release_owner_id(u32 table_index);
-
-acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id);
-
-u8 acpi_tb_is_table_loaded(u32 table_index);
-
-void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded);
-
-/*
- * tbutils - table manager utilities
- */
-u8 acpi_tb_tables_loaded(void);
-
-void
-acpi_tb_print_table_header(acpi_physical_address address,
-			   struct acpi_table_header *header);
-
-u8 acpi_tb_checksum(u8 *buffer, u32 length);
-
-acpi_status
-acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length);
-
-void
-acpi_tb_install_table(acpi_physical_address address,
-		      u8 flags, char *signature, u32 table_index);
-
-acpi_status
-acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags);
-
-#endif				/* __ACTABLES_H__ */
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 13a3d9a..bf8d4cf 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -245,7 +245,7 @@
 #define ACPI_FADT_POWER_BUTTON      (1<<4)	/* 04: Power button is handled as a generic feature */
 #define ACPI_FADT_SLEEP_BUTTON      (1<<5)	/* 05: Sleep button is handled as a generic feature, or  not present */
 #define ACPI_FADT_FIXED_RTC         (1<<6)	/* 06: RTC wakeup stat not in fixed register space */
-#define ACPI_FADT_S4_RTC_WAKE       (1<<7)	/* 07: RTC wakeup stat not possible from S4 */
+#define ACPI_FADT_S4_RTC_WAKE       (1<<7)	/* 07: RTC wakeup possible from S4 */
 #define ACPI_FADT_32BIT_TIMER       (1<<8)	/* 08: tmr_val is 32 bits 0=24-bits */
 #define ACPI_FADT_DOCKING_SUPPORTED (1<<9)	/* 09: Docking supported */
 #define ACPI_FADT_RESET_REGISTER    (1<<10)	/* 10: System reset via the FADT RESET_REG supported */
@@ -288,6 +288,31 @@
 
 #define ACPI_FADT_OFFSET(f)             (u8) ACPI_OFFSET (struct acpi_table_fadt, f)
 
+union acpi_name_union {
+	u32 integer;
+	char ascii[4];
+};
+
+/*
+ * Internal ACPI Table Descriptor. One per ACPI table
+ */
+struct acpi_table_desc {
+	acpi_physical_address address;
+	struct acpi_table_header *pointer;
+	u32 length;		/* Length fixed at 32 bits */
+	union acpi_name_union signature;
+	acpi_owner_id owner_id;
+	u8 flags;
+};
+
+/* Flags for above */
+
+#define ACPI_TABLE_ORIGIN_UNKNOWN       (0)
+#define ACPI_TABLE_ORIGIN_MAPPED        (1)
+#define ACPI_TABLE_ORIGIN_ALLOCATED     (2)
+#define ACPI_TABLE_ORIGIN_MASK          (3)
+#define ACPI_TABLE_IS_LOADED            (4)
+
 /*
  * Get the remaining ACPI tables
  */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 63f5b4c..18963b9 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -627,7 +627,7 @@
 	u32 uncorrectable_error_mask;
 	u32 uncorrectable_error_severity;
 	u32 correctable_error_mask;
-	u32 advanced_error_cababilities;
+	u32 advanced_error_capabilities;
 };
 
 /* Hardware Error Notification */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 7220361..a20aab5 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -204,11 +204,10 @@
 
 /*******************************************************************************
  *
- * OS-dependent and compiler-dependent types
+ * OS-dependent types
  *
  * If the defaults below are not appropriate for the host system, they can
- * be defined in the compiler-specific or OS-specific header, and this will
- * take precedence.
+ * be defined in the OS-specific header, and this will take precedence.
  *
  ******************************************************************************/
 
@@ -218,12 +217,6 @@
 #define acpi_thread_id			acpi_size
 #endif
 
-/* Object returned from acpi_os_create_lock */
-
-#ifndef acpi_spinlock
-#define acpi_spinlock                   void *
-#endif
-
 /* Flags for acpi_os_acquire_lock/acpi_os_release_lock */
 
 #ifndef acpi_cpu_flags
@@ -233,8 +226,50 @@
 /* Object returned from acpi_os_create_cache */
 
 #ifndef acpi_cache_t
+#ifdef ACPI_USE_LOCAL_CACHE
 #define acpi_cache_t                    struct acpi_memory_list
+#else
+#define acpi_cache_t                    void *
 #endif
+#endif
+
+/*
+ * Synchronization objects - Mutexes, Semaphores, and spin_locks
+ */
+#if (ACPI_MUTEX_TYPE == ACPI_BINARY_SEMAPHORE)
+/*
+ * These macros are used if the host OS does not support a mutex object.
+ * Map the OSL Mutex interfaces to binary semaphores.
+ */
+#define acpi_mutex                      acpi_semaphore
+#define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle)
+#define acpi_os_delete_mutex(handle)    (void) acpi_os_delete_semaphore (handle)
+#define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time)
+#define acpi_os_release_mutex(handle)   (void) acpi_os_signal_semaphore (handle, 1)
+#endif
+
+/* Configurable types for synchronization objects */
+
+#ifndef acpi_spinlock
+#define acpi_spinlock                   void *
+#endif
+
+#ifndef acpi_semaphore
+#define acpi_semaphore                  void *
+#endif
+
+#ifndef acpi_mutex
+#define acpi_mutex                      void *
+#endif
+
+/*******************************************************************************
+ *
+ * Compiler-dependent types
+ *
+ * If the defaults below are not appropriate for the host compiler, they can
+ * be defined in the compiler-specific header, and this will take precedence.
+ *
+ ******************************************************************************/
 
 /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */
 
@@ -268,6 +303,43 @@
 #define ACPI_EXPORT_SYMBOL(symbol)
 #endif
 
+/******************************************************************************
+ *
+ * ACPI Specification constants (Do not change unless the specification changes)
+ *
+ *****************************************************************************/
+
+/* Number of distinct FADT-based GPE register blocks (GPE0 and GPE1) */
+
+#define ACPI_MAX_GPE_BLOCKS             2
+
+/* Default ACPI register widths */
+
+#define ACPI_GPE_REGISTER_WIDTH         8
+#define ACPI_PM1_REGISTER_WIDTH         16
+#define ACPI_PM2_REGISTER_WIDTH         8
+#define ACPI_PM_TIMER_WIDTH             32
+
+/* Names within the namespace are 4 bytes long */
+
+#define ACPI_NAME_SIZE                  4
+#define ACPI_PATH_SEGMENT_LENGTH        5	/* 4 chars for name + 1 char for separator */
+#define ACPI_PATH_SEPARATOR             '.'
+
+/* Sizes for ACPI table headers */
+
+#define ACPI_OEM_ID_SIZE                6
+#define ACPI_OEM_TABLE_ID_SIZE          8
+
+/* ACPI/PNP hardware IDs */
+
+#define PCI_ROOT_HID_STRING             "PNP0A03"
+#define PCI_EXPRESS_ROOT_HID_STRING     "PNP0A08"
+
+/* PM Timer ticks per second (HZ) */
+
+#define PM_TIMER_FREQUENCY  3579545
+
 /*******************************************************************************
  *
  * Independent types
@@ -291,13 +363,18 @@
 #endif
 
 /*
- * Mescellaneous types
+ * Miscellaneous types
  */
 typedef u32 acpi_status;	/* All ACPI Exceptions */
 typedef u32 acpi_name;		/* 4-byte ACPI name */
 typedef char *acpi_string;	/* Null terminated ASCII string */
 typedef void *acpi_handle;	/* Actually a ptr to a NS Node */
 
+/* Owner IDs are used to track namespace nodes for selective deletion */
+
+typedef u8 acpi_owner_id;
+#define ACPI_OWNER_ID_MAX               0xFF
+
 struct uint64_struct {
 	u32 lo;
 	u32 hi;
@@ -313,13 +390,8 @@
 	u32 hi;
 };
 
-/* Synchronization objects */
-
-#define acpi_mutex                      void *
-#define acpi_semaphore                  void *
-
 /*
- * Acpi integer width. In ACPI version 1, integers are 32 bits.  In ACPI
+ * Acpi integer width. In ACPI version 1, integers are 32 bits. In ACPI
  * version 2, integers are 64 bits. Note that this pertains to the ACPI integer
  * type only, not other integers used in the implementation of the ACPI CA
  * subsystem.
@@ -338,10 +410,75 @@
 #define ACPI_MAX16_DECIMAL_DIGITS        5
 #define ACPI_MAX8_DECIMAL_DIGITS         3
 
+/* PM Timer ticks per second (HZ) */
+
+#define PM_TIMER_FREQUENCY  3579545
+
 /*
  * Constants with special meanings
  */
 #define ACPI_ROOT_OBJECT                ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR)
+#define ACPI_WAIT_FOREVER               0xFFFF	/* u16, as per ACPI spec */
+#define ACPI_DO_NOT_WAIT                0
+
+/*******************************************************************************
+ *
+ * Commonly used macros
+ *
+ ******************************************************************************/
+
+/* Data manipulation */
+
+#define ACPI_LOWORD(l)                  ((u16)(u32)(l))
+#define ACPI_HIWORD(l)                  ((u16)((((u32)(l)) >> 16) & 0xFFFF))
+#define ACPI_LOBYTE(l)                  ((u8)(u16)(l))
+#define ACPI_HIBYTE(l)                  ((u8)((((u16)(l)) >> 8) & 0xFF))
+
+/* Full 64-bit integer must be available on both 32-bit and 64-bit platforms */
+
+struct acpi_integer_overlay {
+	u32 lo_dword;
+	u32 hi_dword;
+};
+
+#define ACPI_LODWORD(integer)           (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->lo_dword)
+#define ACPI_HIDWORD(integer)           (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->hi_dword)
+
+#define ACPI_SET_BIT(target,bit)        ((target) |= (bit))
+#define ACPI_CLEAR_BIT(target,bit)      ((target) &= ~(bit))
+#define ACPI_MIN(a,b)                   (((a)<(b))?(a):(b))
+#define ACPI_MAX(a,b)                   (((a)>(b))?(a):(b))
+
+/* Size calculation */
+
+#define ACPI_ARRAY_LENGTH(x)            (sizeof(x) / sizeof((x)[0]))
+
+/* Pointer manipulation */
+
+#define ACPI_CAST_PTR(t, p)             ((t *) (acpi_uintptr_t) (p))
+#define ACPI_CAST_INDIRECT_PTR(t, p)    ((t **) (acpi_uintptr_t) (p))
+#define ACPI_ADD_PTR(t, a, b)           ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b)))
+#define ACPI_PTR_DIFF(a, b)             (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))
+
+/* Pointer/Integer type conversions */
+
+#define ACPI_TO_POINTER(i)              ACPI_ADD_PTR (void, (void *) NULL,(acpi_size) i)
+#define ACPI_TO_INTEGER(p)              ACPI_PTR_DIFF (p, (void *) NULL)
+#define ACPI_OFFSET(d, f)               (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f), (void *) NULL)
+#define ACPI_PHYSADDR_TO_PTR(i)         ACPI_TO_POINTER(i)
+#define ACPI_PTR_TO_PHYSADDR(i)         ACPI_TO_INTEGER(i)
+
+#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
+#define ACPI_COMPARE_NAME(a,b)          (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b)))
+#else
+#define ACPI_COMPARE_NAME(a,b)          (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE))
+#endif
+
+/*******************************************************************************
+ *
+ * Miscellaneous constants
+ *
+ ******************************************************************************/
 
 /*
  * Initialization sequence
@@ -414,7 +551,7 @@
 #define ACPI_NOTIFY_MAX                 0x0B
 
 /*
- * Types associated with ACPI names and objects.  The first group of
+ * Types associated with ACPI names and objects. The first group of
  * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition
  * of the ACPI object_type() operator (See the ACPI Spec). Therefore,
  * only add to the first group if the spec changes.
@@ -467,7 +604,7 @@
 
 /*
  * These are special object types that never appear in
- * a Namespace node, only in an union acpi_operand_object
+ * a Namespace node, only in a union acpi_operand_object
  */
 #define ACPI_TYPE_LOCAL_EXTRA           0x1C
 #define ACPI_TYPE_LOCAL_DATA            0x1D
@@ -732,6 +869,15 @@
 #define ACPI_NAME_TYPE_MAX              1
 
 /*
+ * Predefined Namespace items
+ */
+struct acpi_predefined_names {
+	char *name;
+	u8 type;
+	char *val;
+};
+
+/*
  * Structure and flags for acpi_get_system_info
  */
 #define ACPI_SYS_MODE_UNKNOWN           0x0000
@@ -787,7 +933,7 @@
 				      u16 opcode,
 				      u32 aml_offset, void *context);
 
-/* Table Event handler (Load, load_table etc) and types */
+/* Table Event handler (Load, load_table, etc.) and types */
 
 typedef
 acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context);
@@ -823,6 +969,12 @@
 #define ACPI_INTERRUPT_NOT_HANDLED      0x00
 #define ACPI_INTERRUPT_HANDLED          0x01
 
+/* Length of _HID, _UID, _CID, and UUID values */
+
+#define ACPI_DEVICE_ID_LENGTH           0x09
+#define ACPI_MAX_CID_LENGTH             48
+#define ACPI_UUID_LENGTH                16
+
 /* Common string version of device HIDs and UIDs */
 
 struct acpica_device_id {
@@ -900,357 +1052,28 @@
 };
 
 /*
- * Definitions for Resource Attributes
+ * struct acpi_memory_list is used only if the ACPICA local cache is enabled
  */
-typedef u16 acpi_rs_length;	/* Resource Length field is fixed at 16 bits */
-typedef u32 acpi_rsdesc_size;	/* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */
+struct acpi_memory_list {
+	char *list_name;
+	void *list_head;
+	u16 object_size;
+	u16 max_depth;
+	u16 current_depth;
+	u16 link_offset;
 
-/*
- *  Memory Attributes
- */
-#define ACPI_READ_ONLY_MEMORY           (u8) 0x00
-#define ACPI_READ_WRITE_MEMORY          (u8) 0x01
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
 
-#define ACPI_NON_CACHEABLE_MEMORY       (u8) 0x00
-#define ACPI_CACHABLE_MEMORY            (u8) 0x01
-#define ACPI_WRITE_COMBINING_MEMORY     (u8) 0x02
-#define ACPI_PREFETCHABLE_MEMORY        (u8) 0x03
+	/* Statistics for debug memory tracking only */
 
-/*
- *  IO Attributes
- *  The ISA IO ranges are:     n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh.
- *  The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh.
- */
-#define ACPI_NON_ISA_ONLY_RANGES        (u8) 0x01
-#define ACPI_ISA_ONLY_RANGES            (u8) 0x02
-#define ACPI_ENTIRE_RANGE               (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES)
-
-/* Type of translation - 1=Sparse, 0=Dense */
-
-#define ACPI_SPARSE_TRANSLATION         (u8) 0x01
-
-/*
- *  IO Port Descriptor Decode
- */
-#define ACPI_DECODE_10                  (u8) 0x00	/* 10-bit IO address decode */
-#define ACPI_DECODE_16                  (u8) 0x01	/* 16-bit IO address decode */
-
-/*
- *  IRQ Attributes
- */
-#define ACPI_LEVEL_SENSITIVE            (u8) 0x00
-#define ACPI_EDGE_SENSITIVE             (u8) 0x01
-
-#define ACPI_ACTIVE_HIGH                (u8) 0x00
-#define ACPI_ACTIVE_LOW                 (u8) 0x01
-
-#define ACPI_EXCLUSIVE                  (u8) 0x00
-#define ACPI_SHARED                     (u8) 0x01
-
-/*
- *  DMA Attributes
- */
-#define ACPI_COMPATIBILITY              (u8) 0x00
-#define ACPI_TYPE_A                     (u8) 0x01
-#define ACPI_TYPE_B                     (u8) 0x02
-#define ACPI_TYPE_F                     (u8) 0x03
-
-#define ACPI_NOT_BUS_MASTER             (u8) 0x00
-#define ACPI_BUS_MASTER                 (u8) 0x01
-
-#define ACPI_TRANSFER_8                 (u8) 0x00
-#define ACPI_TRANSFER_8_16              (u8) 0x01
-#define ACPI_TRANSFER_16                (u8) 0x02
-
-/*
- * Start Dependent Functions Priority definitions
- */
-#define ACPI_GOOD_CONFIGURATION         (u8) 0x00
-#define ACPI_ACCEPTABLE_CONFIGURATION   (u8) 0x01
-#define ACPI_SUB_OPTIMAL_CONFIGURATION  (u8) 0x02
-
-/*
- *  16, 32 and 64-bit Address Descriptor resource types
- */
-#define ACPI_MEMORY_RANGE               (u8) 0x00
-#define ACPI_IO_RANGE                   (u8) 0x01
-#define ACPI_BUS_NUMBER_RANGE           (u8) 0x02
-
-#define ACPI_ADDRESS_NOT_FIXED          (u8) 0x00
-#define ACPI_ADDRESS_FIXED              (u8) 0x01
-
-#define ACPI_POS_DECODE                 (u8) 0x00
-#define ACPI_SUB_DECODE                 (u8) 0x01
-
-#define ACPI_PRODUCER                   (u8) 0x00
-#define ACPI_CONSUMER                   (u8) 0x01
-
-/*
- * If possible, pack the following structures to byte alignment
- */
-#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
-#pragma pack(1)
+	u32 total_allocated;
+	u32 total_freed;
+	u32 max_occupied;
+	u32 total_size;
+	u32 current_total_size;
+	u32 requests;
+	u32 hits;
 #endif
-
-/* UUID data structures for use in vendor-defined resource descriptors */
-
-struct acpi_uuid {
-	u8 data[ACPI_UUID_LENGTH];
-};
-
-struct acpi_vendor_uuid {
-	u8 subtype;
-	u8 data[ACPI_UUID_LENGTH];
-};
-
-/*
- *  Structures used to describe device resources
- */
-struct acpi_resource_irq {
-	u8 descriptor_length;
-	u8 triggering;
-	u8 polarity;
-	u8 sharable;
-	u8 interrupt_count;
-	u8 interrupts[1];
-};
-
-struct acpi_resource_dma {
-	u8 type;
-	u8 bus_master;
-	u8 transfer;
-	u8 channel_count;
-	u8 channels[1];
-};
-
-struct acpi_resource_start_dependent {
-	u8 descriptor_length;
-	u8 compatibility_priority;
-	u8 performance_robustness;
-};
-
-/*
- * END_DEPENDENT_FUNCTIONS_RESOURCE struct is not
- * needed because it has no fields
- */
-
-struct acpi_resource_io {
-	u8 io_decode;
-	u8 alignment;
-	u8 address_length;
-	u16 minimum;
-	u16 maximum;
-};
-
-struct acpi_resource_fixed_io {
-	u16 address;
-	u8 address_length;
-};
-
-struct acpi_resource_vendor {
-	u16 byte_length;
-	u8 byte_data[1];
-};
-
-/* Vendor resource with UUID info (introduced in ACPI 3.0) */
-
-struct acpi_resource_vendor_typed {
-	u16 byte_length;
-	u8 uuid_subtype;
-	u8 uuid[ACPI_UUID_LENGTH];
-	u8 byte_data[1];
-};
-
-struct acpi_resource_end_tag {
-	u8 checksum;
-};
-
-struct acpi_resource_memory24 {
-	u8 write_protect;
-	u16 minimum;
-	u16 maximum;
-	u16 alignment;
-	u16 address_length;
-};
-
-struct acpi_resource_memory32 {
-	u8 write_protect;
-	u32 minimum;
-	u32 maximum;
-	u32 alignment;
-	u32 address_length;
-};
-
-struct acpi_resource_fixed_memory32 {
-	u8 write_protect;
-	u32 address;
-	u32 address_length;
-};
-
-struct acpi_memory_attribute {
-	u8 write_protect;
-	u8 caching;
-	u8 range_type;
-	u8 translation;
-};
-
-struct acpi_io_attribute {
-	u8 range_type;
-	u8 translation;
-	u8 translation_type;
-	u8 reserved1;
-};
-
-union acpi_resource_attribute {
-	struct acpi_memory_attribute mem;
-	struct acpi_io_attribute io;
-
-	/* Used for the *word_space macros */
-
-	u8 type_specific;
-};
-
-struct acpi_resource_source {
-	u8 index;
-	u16 string_length;
-	char *string_ptr;
-};
-
-/* Fields common to all address descriptors, 16/32/64 bit */
-
-#define ACPI_RESOURCE_ADDRESS_COMMON \
-	u8                              resource_type; \
-	u8                              producer_consumer; \
-	u8                              decode; \
-	u8                              min_address_fixed; \
-	u8                              max_address_fixed; \
-	union acpi_resource_attribute   info;
-
-struct acpi_resource_address {
-ACPI_RESOURCE_ADDRESS_COMMON};
-
-struct acpi_resource_address16 {
-	ACPI_RESOURCE_ADDRESS_COMMON u16 granularity;
-	u16 minimum;
-	u16 maximum;
-	u16 translation_offset;
-	u16 address_length;
-	struct acpi_resource_source resource_source;
-};
-
-struct acpi_resource_address32 {
-	ACPI_RESOURCE_ADDRESS_COMMON u32 granularity;
-	u32 minimum;
-	u32 maximum;
-	u32 translation_offset;
-	u32 address_length;
-	struct acpi_resource_source resource_source;
-};
-
-struct acpi_resource_address64 {
-	ACPI_RESOURCE_ADDRESS_COMMON u64 granularity;
-	u64 minimum;
-	u64 maximum;
-	u64 translation_offset;
-	u64 address_length;
-	struct acpi_resource_source resource_source;
-};
-
-struct acpi_resource_extended_address64 {
-	ACPI_RESOURCE_ADDRESS_COMMON u8 revision_iD;
-	u64 granularity;
-	u64 minimum;
-	u64 maximum;
-	u64 translation_offset;
-	u64 address_length;
-	u64 type_specific;
-};
-
-struct acpi_resource_extended_irq {
-	u8 producer_consumer;
-	u8 triggering;
-	u8 polarity;
-	u8 sharable;
-	u8 interrupt_count;
-	struct acpi_resource_source resource_source;
-	u32 interrupts[1];
-};
-
-struct acpi_resource_generic_register {
-	u8 space_id;
-	u8 bit_width;
-	u8 bit_offset;
-	u8 access_size;
-	u64 address;
-};
-
-/* ACPI_RESOURCE_TYPEs */
-
-#define ACPI_RESOURCE_TYPE_IRQ                  0
-#define ACPI_RESOURCE_TYPE_DMA                  1
-#define ACPI_RESOURCE_TYPE_START_DEPENDENT      2
-#define ACPI_RESOURCE_TYPE_END_DEPENDENT        3
-#define ACPI_RESOURCE_TYPE_IO                   4
-#define ACPI_RESOURCE_TYPE_FIXED_IO             5
-#define ACPI_RESOURCE_TYPE_VENDOR               6
-#define ACPI_RESOURCE_TYPE_END_TAG              7
-#define ACPI_RESOURCE_TYPE_MEMORY24             8
-#define ACPI_RESOURCE_TYPE_MEMORY32             9
-#define ACPI_RESOURCE_TYPE_FIXED_MEMORY32       10
-#define ACPI_RESOURCE_TYPE_ADDRESS16            11
-#define ACPI_RESOURCE_TYPE_ADDRESS32            12
-#define ACPI_RESOURCE_TYPE_ADDRESS64            13
-#define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64   14	/* ACPI 3.0 */
-#define ACPI_RESOURCE_TYPE_EXTENDED_IRQ         15
-#define ACPI_RESOURCE_TYPE_GENERIC_REGISTER     16
-#define ACPI_RESOURCE_TYPE_MAX                  16
-
-union acpi_resource_data {
-	struct acpi_resource_irq irq;
-	struct acpi_resource_dma dma;
-	struct acpi_resource_start_dependent start_dpf;
-	struct acpi_resource_io io;
-	struct acpi_resource_fixed_io fixed_io;
-	struct acpi_resource_vendor vendor;
-	struct acpi_resource_vendor_typed vendor_typed;
-	struct acpi_resource_end_tag end_tag;
-	struct acpi_resource_memory24 memory24;
-	struct acpi_resource_memory32 memory32;
-	struct acpi_resource_fixed_memory32 fixed_memory32;
-	struct acpi_resource_address16 address16;
-	struct acpi_resource_address32 address32;
-	struct acpi_resource_address64 address64;
-	struct acpi_resource_extended_address64 ext_address64;
-	struct acpi_resource_extended_irq extended_irq;
-	struct acpi_resource_generic_register generic_reg;
-
-	/* Common fields */
-
-	struct acpi_resource_address address;	/* Common 16/32/64 address fields */
-};
-
-struct acpi_resource {
-	u32 type;
-	u32 length;
-	union acpi_resource_data data;
-};
-
-/* restore default alignment */
-
-#pragma pack()
-
-#define ACPI_RS_SIZE_NO_DATA                8	/* Id + Length fields */
-#define ACPI_RS_SIZE_MIN                    (u32) ACPI_ROUND_UP_TO_NATIVE_WORD (12)
-#define ACPI_RS_SIZE(type)                  (u32) (ACPI_RS_SIZE_NO_DATA + sizeof (type))
-
-#define ACPI_NEXT_RESOURCE(res)             (struct acpi_resource *)((u8 *) res + res->length)
-
-struct acpi_pci_routing_table {
-	u32 length;
-	u32 pin;
-	acpi_integer address;	/* here for 64-bit alignment */
-	u32 source_index;
-	char source[4];		/* pad to 64 bits so sizeof() works in all cases */
 };
 
 #endif				/* __ACTYPES_H__ */
diff --git a/include/acpi/acutils.h b/include/acpi/acutils.h
deleted file mode 100644
index d8307b2..0000000
--- a/include/acpi/acutils.h
+++ /dev/null
@@ -1,585 +0,0 @@
-/******************************************************************************
- *
- * Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2008, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef _ACUTILS_H
-#define _ACUTILS_H
-
-extern const u8 acpi_gbl_resource_aml_sizes[];
-
-/* Strings used by the disassembler and debugger resource dump routines */
-
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
-
-extern const char *acpi_gbl_bm_decode[];
-extern const char *acpi_gbl_config_decode[];
-extern const char *acpi_gbl_consume_decode[];
-extern const char *acpi_gbl_dec_decode[];
-extern const char *acpi_gbl_he_decode[];
-extern const char *acpi_gbl_io_decode[];
-extern const char *acpi_gbl_ll_decode[];
-extern const char *acpi_gbl_max_decode[];
-extern const char *acpi_gbl_mem_decode[];
-extern const char *acpi_gbl_min_decode[];
-extern const char *acpi_gbl_mtp_decode[];
-extern const char *acpi_gbl_rng_decode[];
-extern const char *acpi_gbl_rw_decode[];
-extern const char *acpi_gbl_shr_decode[];
-extern const char *acpi_gbl_siz_decode[];
-extern const char *acpi_gbl_trs_decode[];
-extern const char *acpi_gbl_ttp_decode[];
-extern const char *acpi_gbl_typ_decode[];
-#endif
-
-/* Types for Resource descriptor entries */
-
-#define ACPI_INVALID_RESOURCE           0
-#define ACPI_FIXED_LENGTH               1
-#define ACPI_VARIABLE_LENGTH            2
-#define ACPI_SMALL_VARIABLE_LENGTH      3
-
-typedef
-acpi_status(*acpi_walk_aml_callback) (u8 * aml,
-				      u32 length,
-				      u32 offset,
-				      u8 resource_index, void **context);
-
-typedef
-acpi_status(*acpi_pkg_callback) (u8 object_type,
-				 union acpi_operand_object * source_object,
-				 union acpi_generic_state * state,
-				 void *context);
-
-struct acpi_pkg_info {
-	u8 *free_space;
-	acpi_size length;
-	u32 object_space;
-	u32 num_packages;
-};
-
-#define REF_INCREMENT       (u16) 0
-#define REF_DECREMENT       (u16) 1
-#define REF_FORCE_DELETE    (u16) 2
-
-/* acpi_ut_dump_buffer */
-
-#define DB_BYTE_DISPLAY     1
-#define DB_WORD_DISPLAY     2
-#define DB_DWORD_DISPLAY    4
-#define DB_QWORD_DISPLAY    8
-
-/*
- * utglobal - Global data structures and procedures
- */
-acpi_status acpi_ut_init_globals(void);
-
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-
-char *acpi_ut_get_mutex_name(u32 mutex_id);
-
-const char *acpi_ut_get_notify_name(u32 notify_value);
-
-#endif
-
-char *acpi_ut_get_type_name(acpi_object_type type);
-
-char *acpi_ut_get_node_name(void *object);
-
-char *acpi_ut_get_descriptor_name(void *object);
-
-const char *acpi_ut_get_reference_name(union acpi_operand_object *object);
-
-char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc);
-
-char *acpi_ut_get_region_name(u8 space_id);
-
-char *acpi_ut_get_event_name(u32 event_id);
-
-char acpi_ut_hex_to_ascii_char(acpi_integer integer, u32 position);
-
-u8 acpi_ut_valid_object_type(acpi_object_type type);
-
-/*
- * utinit - miscellaneous initialization and shutdown
- */
-acpi_status acpi_ut_hardware_initialize(void);
-
-void acpi_ut_subsystem_shutdown(void);
-
-/*
- * utclib - Local implementations of C library functions
- */
-#ifndef ACPI_USE_SYSTEM_CLIBRARY
-
-acpi_size acpi_ut_strlen(const char *string);
-
-char *acpi_ut_strcpy(char *dst_string, const char *src_string);
-
-char *acpi_ut_strncpy(char *dst_string,
-		      const char *src_string, acpi_size count);
-
-int acpi_ut_memcmp(const char *buffer1, const char *buffer2, acpi_size count);
-
-int acpi_ut_strncmp(const char *string1, const char *string2, acpi_size count);
-
-int acpi_ut_strcmp(const char *string1, const char *string2);
-
-char *acpi_ut_strcat(char *dst_string, const char *src_string);
-
-char *acpi_ut_strncat(char *dst_string,
-		      const char *src_string, acpi_size count);
-
-u32 acpi_ut_strtoul(const char *string, char **terminator, u32 base);
-
-char *acpi_ut_strstr(char *string1, char *string2);
-
-void *acpi_ut_memcpy(void *dest, const void *src, acpi_size count);
-
-void *acpi_ut_memset(void *dest, u8 value, acpi_size count);
-
-int acpi_ut_to_upper(int c);
-
-int acpi_ut_to_lower(int c);
-
-extern const u8 _acpi_ctype[];
-
-#define _ACPI_XA     0x00	/* extra alphabetic - not supported */
-#define _ACPI_XS     0x40	/* extra space */
-#define _ACPI_BB     0x00	/* BEL, BS, etc. - not supported */
-#define _ACPI_CN     0x20	/* CR, FF, HT, NL, VT */
-#define _ACPI_DI     0x04	/* '0'-'9' */
-#define _ACPI_LO     0x02	/* 'a'-'z' */
-#define _ACPI_PU     0x10	/* punctuation */
-#define _ACPI_SP     0x08	/* space */
-#define _ACPI_UP     0x01	/* 'A'-'Z' */
-#define _ACPI_XD     0x80	/* '0'-'9', 'A'-'F', 'a'-'f' */
-
-#define ACPI_IS_DIGIT(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_DI))
-#define ACPI_IS_SPACE(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_SP))
-#define ACPI_IS_XDIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_XD))
-#define ACPI_IS_UPPER(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_UP))
-#define ACPI_IS_LOWER(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO))
-#define ACPI_IS_PRINT(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU))
-#define ACPI_IS_ALPHA(c)  (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP))
-
-#endif				/* ACPI_USE_SYSTEM_CLIBRARY */
-
-/*
- * utcopy - Object construction and conversion interfaces
- */
-acpi_status
-acpi_ut_build_simple_object(union acpi_operand_object *obj,
-			    union acpi_object *user_obj,
-			    u8 * data_space, u32 * buffer_space_used);
-
-acpi_status
-acpi_ut_build_package_object(union acpi_operand_object *obj,
-			     u8 * buffer, u32 * space_used);
-
-acpi_status
-acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *obj,
-				struct acpi_buffer *ret_buffer);
-
-acpi_status
-acpi_ut_copy_eobject_to_iobject(union acpi_object *obj,
-				union acpi_operand_object **internal_obj);
-
-acpi_status
-acpi_ut_copy_isimple_to_isimple(union acpi_operand_object *source_obj,
-				union acpi_operand_object *dest_obj);
-
-acpi_status
-acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
-				union acpi_operand_object **dest_desc,
-				struct acpi_walk_state *walk_state);
-
-/*
- * utcreate - Object creation
- */
-acpi_status
-acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action);
-
-/*
- * utdebug - Debug interfaces
- */
-void acpi_ut_init_stack_ptr_trace(void);
-
-void acpi_ut_track_stack_ptr(void);
-
-void
-acpi_ut_trace(u32 line_number,
-	      const char *function_name,
-	      const char *module_name, u32 component_id);
-
-void
-acpi_ut_trace_ptr(u32 line_number,
-		  const char *function_name,
-		  const char *module_name, u32 component_id, void *pointer);
-
-void
-acpi_ut_trace_u32(u32 line_number,
-		  const char *function_name,
-		  const char *module_name, u32 component_id, u32 integer);
-
-void
-acpi_ut_trace_str(u32 line_number,
-		  const char *function_name,
-		  const char *module_name, u32 component_id, char *string);
-
-void
-acpi_ut_exit(u32 line_number,
-	     const char *function_name,
-	     const char *module_name, u32 component_id);
-
-void
-acpi_ut_status_exit(u32 line_number,
-		    const char *function_name,
-		    const char *module_name,
-		    u32 component_id, acpi_status status);
-
-void
-acpi_ut_value_exit(u32 line_number,
-		   const char *function_name,
-		   const char *module_name,
-		   u32 component_id, acpi_integer value);
-
-void
-acpi_ut_ptr_exit(u32 line_number,
-		 const char *function_name,
-		 const char *module_name, u32 component_id, u8 *ptr);
-
-void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id);
-
-void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display);
-
-void acpi_ut_report_error(char *module_name, u32 line_number);
-
-void acpi_ut_report_info(char *module_name, u32 line_number);
-
-void acpi_ut_report_warning(char *module_name, u32 line_number);
-
-/* Error and message reporting interfaces */
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_debug_print(u32 requested_debug_level,
-		    u32 line_number,
-		    const char *function_name,
-		    const char *module_name,
-		    u32 component_id,
-		    const char *format, ...) ACPI_PRINTF_LIKE(6);
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_debug_print_raw(u32 requested_debug_level,
-			u32 line_number,
-			const char *function_name,
-			const char *module_name,
-			u32 component_id,
-			const char *format, ...) ACPI_PRINTF_LIKE(6);
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_error(const char *module_name,
-	      u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_exception(const char *module_name,
-		  u32 line_number,
-		  acpi_status status,
-		  const char *format, ...) ACPI_PRINTF_LIKE(4);
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_warning(const char *module_name,
-		u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
-
-void ACPI_INTERNAL_VAR_XFACE
-acpi_ut_info(const char *module_name,
-	     u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
-
-/*
- * utdelete - Object deletion and reference counts
- */
-void acpi_ut_add_reference(union acpi_operand_object *object);
-
-void acpi_ut_remove_reference(union acpi_operand_object *object);
-
-void acpi_ut_delete_internal_package_object(union acpi_operand_object *object);
-
-void acpi_ut_delete_internal_simple_object(union acpi_operand_object *object);
-
-void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list);
-
-/*
- * uteval - object evaluation
- */
-acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
-
-acpi_status
-acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
-			char *path,
-			u32 expected_return_btypes,
-			union acpi_operand_object **return_desc);
-
-acpi_status
-acpi_ut_evaluate_numeric_object(char *object_name,
-				struct acpi_namespace_node *device_node,
-				acpi_integer * address);
-
-acpi_status
-acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
-		    struct acpica_device_id *hid);
-
-acpi_status
-acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
-		    struct acpi_compatible_id_list **return_cid_list);
-
-acpi_status
-acpi_ut_execute_STA(struct acpi_namespace_node *device_node,
-		    u32 * status_flags);
-
-acpi_status
-acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
-		    struct acpica_device_id *uid);
-
-acpi_status
-acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest);
-
-/*
- * utobject - internal object create/delete/cache routines
- */
-union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char
-							      *module_name,
-							      u32 line_number,
-							      u32 component_id,
-							      acpi_object_type
-							      type);
-
-void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
-				       u32 line_number, u32 component_id);
-
-#define acpi_ut_create_internal_object(t) acpi_ut_create_internal_object_dbg (_acpi_module_name,__LINE__,_COMPONENT,t)
-#define acpi_ut_allocate_object_desc()  acpi_ut_allocate_object_desc_dbg (_acpi_module_name,__LINE__,_COMPONENT)
-
-void acpi_ut_delete_object_desc(union acpi_operand_object *object);
-
-u8 acpi_ut_valid_internal_object(void *object);
-
-union acpi_operand_object *acpi_ut_create_package_object(u32 count);
-
-union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size);
-
-union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size);
-
-acpi_status
-acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size * obj_length);
-
-/*
- * utstate - Generic state creation/cache routines
- */
-void
-acpi_ut_push_generic_state(union acpi_generic_state **list_head,
-			   union acpi_generic_state *state);
-
-union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
-						    **list_head);
-
-union acpi_generic_state *acpi_ut_create_generic_state(void);
-
-struct acpi_thread_state *acpi_ut_create_thread_state(void);
-
-union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
-						      *object, u16 action);
-
-union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
-						   void *external_object,
-						   u16 index);
-
-acpi_status
-acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
-				     u16 action,
-				     union acpi_generic_state **state_list);
-
-#ifdef	ACPI_FUTURE_USAGE
-acpi_status
-acpi_ut_create_pkg_state_and_push(void *internal_object,
-				  void *external_object,
-				  u16 index,
-				  union acpi_generic_state **state_list);
-#endif				/* ACPI_FUTURE_USAGE */
-
-union acpi_generic_state *acpi_ut_create_control_state(void);
-
-void acpi_ut_delete_generic_state(union acpi_generic_state *state);
-
-/*
- * utmath
- */
-acpi_status
-acpi_ut_divide(acpi_integer in_dividend,
-	       acpi_integer in_divisor,
-	       acpi_integer * out_quotient, acpi_integer * out_remainder);
-
-acpi_status
-acpi_ut_short_divide(acpi_integer in_dividend,
-		     u32 divisor,
-		     acpi_integer * out_quotient, u32 * out_remainder);
-
-/*
- * utmisc
- */
-const char *acpi_ut_validate_exception(acpi_status status);
-
-u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
-
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
-
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
-
-acpi_status
-acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
-			  void *target_object,
-			  acpi_pkg_callback walk_callback, void *context);
-
-void acpi_ut_strupr(char *src_string);
-
-void acpi_ut_print_string(char *string, u8 max_length);
-
-u8 acpi_ut_valid_acpi_name(u32 name);
-
-acpi_name acpi_ut_repair_name(char *name);
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position);
-
-acpi_status
-acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer);
-
-/* Values for Base above (16=Hex, 10=Decimal) */
-
-#define ACPI_ANY_BASE        0
-
-u32 acpi_ut_dword_byte_swap(u32 value);
-
-void acpi_ut_set_integer_width(u8 revision);
-
-#ifdef ACPI_DEBUG_OUTPUT
-void
-acpi_ut_display_init_pathname(u8 type,
-			      struct acpi_namespace_node *obj_handle,
-			      char *path);
-#endif
-
-/*
- * utresrc
- */
-acpi_status
-acpi_ut_walk_aml_resources(u8 * aml,
-			   acpi_size aml_length,
-			   acpi_walk_aml_callback user_function,
-			   void **context);
-
-acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index);
-
-u32 acpi_ut_get_descriptor_length(void *aml);
-
-u16 acpi_ut_get_resource_length(void *aml);
-
-u8 acpi_ut_get_resource_header_length(void *aml);
-
-u8 acpi_ut_get_resource_type(void *aml);
-
-acpi_status
-acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc,
-			     u8 ** end_tag);
-
-/*
- * utmutex - mutex support
- */
-acpi_status acpi_ut_mutex_initialize(void);
-
-void acpi_ut_mutex_terminate(void);
-
-acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id);
-
-acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id);
-
-/*
- * utalloc - memory allocation and object caching
- */
-acpi_status acpi_ut_create_caches(void);
-
-acpi_status acpi_ut_delete_caches(void);
-
-acpi_status acpi_ut_validate_buffer(struct acpi_buffer *buffer);
-
-acpi_status
-acpi_ut_initialize_buffer(struct acpi_buffer *buffer,
-			  acpi_size required_length);
-
-void *acpi_ut_allocate(acpi_size size,
-		       u32 component, const char *module, u32 line);
-
-void *acpi_ut_allocate_zeroed(acpi_size size,
-			      u32 component, const char *module, u32 line);
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-void *acpi_ut_allocate_and_track(acpi_size size,
-				 u32 component, const char *module, u32 line);
-
-void *acpi_ut_allocate_zeroed_and_track(acpi_size size,
-					u32 component,
-					const char *module, u32 line);
-
-void
-acpi_ut_free_and_track(void *address,
-		       u32 component, const char *module, u32 line);
-
-#ifdef	ACPI_FUTURE_USAGE
-void acpi_ut_dump_allocation_info(void);
-#endif				/* ACPI_FUTURE_USAGE */
-
-void acpi_ut_dump_allocations(u32 component, const char *module);
-
-acpi_status
-acpi_ut_create_list(char *list_name,
-		    u16 object_size, struct acpi_memory_list **return_cache);
-
-#endif
-
-#endif				/* _ACUTILS_H */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index fcd2572..e62f10d 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -44,14 +44,26 @@
 #ifndef __ACENV_H__
 #define __ACENV_H__
 
-/*
+/* Types for ACPI_MUTEX_TYPE */
+
+#define ACPI_BINARY_SEMAPHORE       0
+#define ACPI_OSL_MUTEX              1
+
+/* Types for DEBUGGER_THREADING */
+
+#define DEBUGGER_SINGLE_THREADED    0
+#define DEBUGGER_MULTI_THREADED     1
+
+/******************************************************************************
+ *
  * Configuration for ACPI tools and utilities
- */
+ *
+ *****************************************************************************/
 
 #ifdef ACPI_LIBRARY
 /*
  * Note: The non-debug version of the acpi_library does not contain any
- * debug support, for minimimal size. The debug version uses ACPI_FULL_DEBUG
+ * debug support, for minimal size. The debug version uses ACPI_FULL_DEBUG
  */
 #define ACPI_USE_LOCAL_CACHE
 #endif
@@ -75,17 +87,6 @@
 #define ACPI_DBG_TRACK_ALLOCATIONS
 #endif
 
-#ifdef ACPI_DASM_APP
-#ifndef MSDOS
-#define ACPI_DEBUG_OUTPUT
-#endif
-#define ACPI_APPLICATION
-#define ACPI_DISASSEMBLER
-#define ACPI_NO_METHOD_EXECUTION
-#define ACPI_LARGE_NAMESPACE_NODE
-#define ACPI_DATA_TABLE_DISASSEMBLY
-#endif
-
 #ifdef ACPI_APPLICATION
 #define ACPI_USE_SYSTEM_CLIBRARY
 #define ACPI_USE_LOCAL_CACHE
@@ -179,6 +180,19 @@
 
 /*! [End] no source code translation !*/
 
+/******************************************************************************
+ *
+ * Miscellaneous configuration
+ *
+ *****************************************************************************/
+
+/*
+ * Are mutexes supported by the host? default is no, use binary semaphores.
+ */
+#ifndef ACPI_MUTEX_TYPE
+#define ACPI_MUTEX_TYPE             ACPI_BINARY_SEMAPHORE
+#endif
+
 /*
  * Debugger threading model
  * Use single threaded if the entire subsystem is contained in an application
@@ -187,9 +201,6 @@
  * By default the model is single threaded if ACPI_APPLICATION is set,
  * multi-threaded if ACPI_APPLICATION is not set.
  */
-#define DEBUGGER_SINGLE_THREADED    0
-#define DEBUGGER_MULTI_THREADED     1
-
 #ifndef DEBUGGER_THREADING
 #ifdef ACPI_APPLICATION
 #define DEBUGGER_THREADING          DEBUGGER_SINGLE_THREADED
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 0515e75..6d49b2a 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -46,6 +46,7 @@
 
 #define ACPI_USE_SYSTEM_CLIBRARY
 #define ACPI_USE_DO_WHILE_0
+#define ACPI_MUTEX_TYPE             ACPI_BINARY_SEMAPHORE
 
 #ifdef __KERNEL__
 
@@ -70,9 +71,6 @@
 #define ACPI_EXPORT_SYMBOL(symbol)  EXPORT_SYMBOL(symbol);
 #define strtoul                     simple_strtoul
 
-/* Full namespace pathname length limit - arbitrary */
-#define ACPI_PATHNAME_MAX              256
-
 #else				/* !__KERNEL__ */
 
 #include <stdarg.h>
diff --git a/include/asm-frv/Kbuild b/include/asm-frv/Kbuild
index 1f44e7c..0f8956d 100644
--- a/include/asm-frv/Kbuild
+++ b/include/asm-frv/Kbuild
@@ -3,4 +3,3 @@
 header-y += registers.h
 
 unifdef-y += termios.h
-unifdef-y += swab.h
diff --git a/include/asm-frv/byteorder.h b/include/asm-frv/byteorder.h
index 1187e51..f29b759 100644
--- a/include/asm-frv/byteorder.h
+++ b/include/asm-frv/byteorder.h
@@ -1,7 +1,6 @@
 #ifndef _ASM_BYTEORDER_H
 #define _ASM_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/big_endian.h>
 
 #endif /* _ASM_BYTEORDER_H */
diff --git a/include/asm-frv/mmu.h b/include/asm-frv/mmu.h
index 22c0371..86ca0e8 100644
--- a/include/asm-frv/mmu.h
+++ b/include/asm-frv/mmu.h
@@ -22,7 +22,6 @@
 	unsigned long	dtlb_ptd_mapping;	/* [DAMR5] PTD mapping for dtlb cached PGE */
 
 #else
-	struct vm_list_struct	*vmlist;
 	unsigned long		end_brk;
 
 #endif
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index 1870d5e..70d1855 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -31,6 +31,7 @@
 unifdef-y += sockios.h
 unifdef-y += stat.h
 unifdef-y += statfs.h
+unifdef-y += swab.h
 unifdef-y += termbits.h
 unifdef-y += termios.h
 unifdef-y += types.h
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 72ebe91..8e6d0ca 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -301,7 +301,7 @@
  * track_pfn_vma_new is called when a _new_ pfn mapping is being established
  * for physical range indicated by pfn and size.
  */
-static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
 					unsigned long pfn, unsigned long size)
 {
 	return 0;
@@ -332,7 +332,7 @@
 {
 }
 #else
-extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
 				unsigned long pfn, unsigned long size);
 extern int track_pfn_vma_copy(struct vm_area_struct *vma);
 extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
index 89061c1..763e3b0 100644
--- a/include/asm-generic/rtc.h
+++ b/include/asm-generic/rtc.h
@@ -42,7 +42,7 @@
 	return uip;
 }
 
-static inline unsigned int get_rtc_time(struct rtc_time *time)
+static inline unsigned int __get_rtc_time(struct rtc_time *time)
 {
 	unsigned char ctrl;
 	unsigned long flags;
@@ -108,8 +108,12 @@
 	return RTC_24H;
 }
 
+#ifndef get_rtc_time
+#define get_rtc_time	__get_rtc_time
+#endif
+
 /* Set the current date and time in the real time clock. */
-static inline int set_rtc_time(struct rtc_time *time)
+static inline int __set_rtc_time(struct rtc_time *time)
 {
 	unsigned long flags;
 	unsigned char mon, day, hrs, min, sec;
@@ -190,11 +194,15 @@
 	return 0;
 }
 
+#ifndef set_rtc_time
+#define set_rtc_time	__set_rtc_time
+#endif
+
 static inline unsigned int get_rtc_ss(void)
 {
 	struct rtc_time h;
 
-	get_rtc_time(&h);
+	__get_rtc_time(&h);
 	return h.tm_sec;
 }
 
diff --git a/include/asm-m32r/Kbuild b/include/asm-m32r/Kbuild
index 27b108a..c68e168 100644
--- a/include/asm-m32r/Kbuild
+++ b/include/asm-m32r/Kbuild
@@ -1,2 +1 @@
 include include/asm-generic/Kbuild.asm
-unifdef-y += swab.h
diff --git a/include/asm-m32r/byteorder.h b/include/asm-m32r/byteorder.h
index 61ff9cf..21855d8 100644
--- a/include/asm-m32r/byteorder.h
+++ b/include/asm-m32r/byteorder.h
@@ -1,8 +1,6 @@
 #ifndef _ASM_M32R_BYTEORDER_H
 #define _ASM_M32R_BYTEORDER_H
 
-#include <asm/swab.h>
-
 #if defined(__LITTLE_ENDIAN__)
 #  include <linux/byteorder/little_endian.h>
 #else
diff --git a/include/asm-m32r/mmu.h b/include/asm-m32r/mmu.h
index d9bd724..150cb92 100644
--- a/include/asm-m32r/mmu.h
+++ b/include/asm-m32r/mmu.h
@@ -4,7 +4,6 @@
 #if !defined(CONFIG_MMU)
 
 typedef struct {
-	struct vm_list_struct	*vmlist;
 	unsigned long		end_brk;
 } mm_context_t;
 
diff --git a/include/asm-m68k/Kbuild b/include/asm-m68k/Kbuild
index 52fd96b..1a922fa 100644
--- a/include/asm-m68k/Kbuild
+++ b/include/asm-m68k/Kbuild
@@ -1,3 +1,2 @@
 include include/asm-generic/Kbuild.asm
 header-y += cachectl.h
-unifdef-y += swab.h
diff --git a/include/asm-m68k/byteorder.h b/include/asm-m68k/byteorder.h
index 3008665..31b260a 100644
--- a/include/asm-m68k/byteorder.h
+++ b/include/asm-m68k/byteorder.h
@@ -1,7 +1,6 @@
 #ifndef _M68K_BYTEORDER_H
 #define _M68K_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/big_endian.h>
 
 #endif /* _M68K_BYTEORDER_H */
diff --git a/include/asm-m68k/unistd.h b/include/asm-m68k/unistd.h
index 965abb8..3c19027 100644
--- a/include/asm-m68k/unistd.h
+++ b/include/asm-m68k/unistd.h
@@ -5,6 +5,7 @@
  * This file contains the system call numbers.
  */
 
+#define __NR_restart_syscall	  0
 #define __NR_exit		  1
 #define __NR_fork		  2
 #define __NR_read		  3
@@ -359,9 +360,6 @@
 #define __ARCH_WANT_SYS_SIGPROCMASK
 #define __ARCH_WANT_SYS_RT_SIGACTION
 
-/* whitelist for checksyscalls */
-#define __IGNORE_restart_syscall
-
 /*
  * "Conditional" syscalls
  *
diff --git a/include/asm-mn10300/Kbuild b/include/asm-mn10300/Kbuild
index 27b108a..c68e168 100644
--- a/include/asm-mn10300/Kbuild
+++ b/include/asm-mn10300/Kbuild
@@ -1,2 +1 @@
 include include/asm-generic/Kbuild.asm
-unifdef-y += swab.h
diff --git a/include/asm-mn10300/byteorder.h b/include/asm-mn10300/byteorder.h
index 45b18de..5dd0bdd 100644
--- a/include/asm-mn10300/byteorder.h
+++ b/include/asm-mn10300/byteorder.h
@@ -1,7 +1,6 @@
 #ifndef _ASM_BYTEORDER_H
 #define _ASM_BYTEORDER_H
 
-#include <asm/swab.h>
 #include <linux/byteorder/little_endian.h>
 
 #endif /* _ASM_BYTEORDER_H */
diff --git a/include/asm-xtensa/asmmacro.h b/include/asm-xtensa/asmmacro.h
deleted file mode 100644
index 76915ca..0000000
--- a/include/asm-xtensa/asmmacro.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * include/asm-xtensa/asmmacro.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_ASMMACRO_H
-#define _XTENSA_ASMMACRO_H
-
-#include <asm/variant/core.h>
-
-/*
- * Some little helpers for loops. Use zero-overhead-loops
- * where applicable and if supported by the processor.
- *
- * __loopi ar, at, size, inc
- *         ar	register initialized with the start address
- *	   at	scratch register used by macro
- *	   size	size immediate value
- *	   inc	increment
- *
- * __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
- *	   ar	register initialized with the start address
- *	   as	register initialized with the size
- *	   at	scratch register use by macro
- *	   inc_log2	increment [in log2]
- *	   mask_log2	mask [in log2]
- *	   cond		true condition (used in loop'cond')
- *	   ncond	false condition (used in b'ncond')
- *
- * __loop  as
- *	   restart loop. 'as' register must not have been modified!
- *
- * __endla ar, at, incr
- *	   ar	start address (modified)
- *	   as	scratch register used by macro
- *	   inc	increment
- */
-
-/*
- * loop for given size as immediate
- */
-
-	.macro	__loopi ar, at, size, incr
-
-#if XCHAL_HAVE_LOOPS
-		movi	\at, ((\size + \incr - 1) / (\incr))
-		loop	\at, 99f
-#else
-		addi	\at, \ar, \size
-		98:
-#endif
-
-	.endm
-
-/*
- * loop for given size in register
- */
-
-	.macro	__loops	ar, as, at, incr_log2, mask_log2, cond, ncond
-
-#if XCHAL_HAVE_LOOPS
-		.ifgt \incr_log2 - 1
-			addi	\at, \as, (1 << \incr_log2) - 1
-			.ifnc \mask_log2,
-				extui	\at, \at, \incr_log2, \mask_log2
-			.else
-				srli	\at, \at, \incr_log2
-			.endif
-		.endif
-		loop\cond	\at, 99f
-#else
-		.ifnc \mask_log2,
-			extui	\at, \as, \incr_log2, \mask_log2
-		.else
-			.ifnc \ncond,
-				srli	\at, \as, \incr_log2
-			.endif
-		.endif
-		.ifnc \ncond,
-			b\ncond	\at, 99f
-
-		.endif
-		.ifnc \mask_log2,
-			slli	\at, \at, \incr_log2
-			add	\at, \ar, \at
-		.else
-			add	\at, \ar, \as
-		.endif
-#endif
-		98:
-
-	.endm
-
-/*
- * loop from ar to ax
- */
-
-	.macro	__loopt	ar, as, at, incr_log2
-
-#if XCHAL_HAVE_LOOPS
-		sub	\at, \as, \ar
-		.ifgt	\incr_log2 - 1
-			addi	\at, \at, (1 << \incr_log2) - 1
-			srli	\at, \at, \incr_log2
-		.endif
-		loop	\at, 99f
-#else
-		98:
-#endif
-
-	.endm
-
-/*
- * restart loop. registers must be unchanged
- */
-
-	.macro	__loop	as
-
-#if XCHAL_HAVE_LOOPS
-		loop	\as, 99f
-#else
-		98:
-#endif
-
-	.endm
-
-/*
- * end of loop with no increment of the address.
- */
-
-	.macro	__endl	ar, as
-#if !XCHAL_HAVE_LOOPS
-		bltu	\ar, \as, 98b
-#endif
-		99:
-	.endm
-
-/*
- * end of loop with increment of the address.
- */
-
-	.macro	__endla	ar, as, incr
-		addi	\ar, \ar, \incr
-		__endl	\ar \as
-	.endm
-
-
-#endif /* _XTENSA_ASMMACRO_H */
diff --git a/include/asm-xtensa/byteorder.h b/include/asm-xtensa/byteorder.h
deleted file mode 100644
index 765edf1..0000000
--- a/include/asm-xtensa/byteorder.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * include/asm-xtensa/byteorder.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_BYTEORDER_H
-#define _XTENSA_BYTEORDER_H
-
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
-{
-    __u32 res;
-    /* instruction sequence from Xtensa ISA release 2/2000 */
-    __asm__("ssai     8           \n\t"
-	    "srli     %0, %1, 16  \n\t"
-	    "src      %0, %0, %1  \n\t"
-	    "src      %0, %0, %0  \n\t"
-	    "src      %0, %1, %0  \n"
-	    : "=&a" (res)
-	    : "a" (x)
-	    );
-    return res;
-}
-
-static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
-{
-    /* Given that 'short' values are signed (i.e., can be negative),
-     * we cannot assume that the upper 16-bits of the register are
-     * zero.  We are careful to mask values after shifting.
-     */
-
-    /* There exists an anomaly between xt-gcc and xt-xcc.  xt-gcc
-     * inserts an extui instruction after putting this function inline
-     * to ensure that it uses only the least-significant 16 bits of
-     * the result.  xt-xcc doesn't use an extui, but assumes the
-     * __asm__ macro follows convention that the upper 16 bits of an
-     * 'unsigned short' result are still zero.  This macro doesn't
-     * follow convention; indeed, it leaves garbage in the upport 16
-     * bits of the register.
-
-     * Declaring the temporary variables 'res' and 'tmp' to be 32-bit
-     * types while the return type of the function is a 16-bit type
-     * forces both compilers to insert exactly one extui instruction
-     * (or equivalent) to mask off the upper 16 bits. */
-
-    __u32 res;
-    __u32 tmp;
-
-    __asm__("extui    %1, %2, 8, 8\n\t"
-	    "slli     %0, %2, 8   \n\t"
-	    "or       %0, %0, %1  \n"
-	    : "=&a" (res), "=&a" (tmp)
-	    : "a" (x)
-	    );
-
-    return res;
-}
-
-#define __arch__swab32(x) ___arch__swab32(x)
-#define __arch__swab16(x) ___arch__swab16(x)
-
-#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-#  define __BYTEORDER_HAS_U64__
-#  define __SWAB_64_THRU_32__
-#endif
-
-#ifdef __XTENSA_EL__
-# include <linux/byteorder/little_endian.h>
-#elif defined(__XTENSA_EB__)
-# include <linux/byteorder/big_endian.h>
-#else
-# error processor byte order undefined!
-#endif
-
-#endif /* _XTENSA_BYTEORDER_H */
diff --git a/include/asm-xtensa/cache.h b/include/asm-xtensa/cache.h
deleted file mode 100644
index 3bba2a5..0000000
--- a/include/asm-xtensa/cache.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * include/asm-xtensa/cache.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_CACHE_H
-#define _XTENSA_CACHE_H
-
-#include <asm/variant/core.h>
-
-#define L1_CACHE_SHIFT	XCHAL_DCACHE_LINEWIDTH
-#define L1_CACHE_BYTES	XCHAL_DCACHE_LINESIZE
-#define SMP_CACHE_BYTES	L1_CACHE_BYTES
-
-#define DCACHE_WAY_SIZE	(XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
-#define ICACHE_WAY_SIZE	(XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
-#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
-#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
-
-/* Maximum cache size per way. */
-#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
-# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
-#else
-# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
-#endif
-
-
-#endif	/* _XTENSA_CACHE_H */
diff --git a/include/asm-xtensa/checksum.h b/include/asm-xtensa/checksum.h
deleted file mode 100644
index 23534c6..0000000
--- a/include/asm-xtensa/checksum.h
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * include/asm-xtensa/checksum.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_CHECKSUM_H
-#define _XTENSA_CHECKSUM_H
-
-#include <linux/in6.h>
-#include <asm/variant/core.h>
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
-
-/*
- * the same as csum_partial, but copies from src while it
- * checksums, and handles user-space pointer exceptions correctly, when needed.
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum,
-						   int *src_err_ptr, int *dst_err_ptr);
-
-/*
- *	Note: when you get a NULL pointer exception here this means someone
- *	passed in an incorrect kernel address to one of these functions.
- *
- *	If you use these functions directly please don't forget the access_ok().
- */
-static inline
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
-					int len, __wsum sum)
-{
-	return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
-}
-
-static inline
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
-						int len, __wsum sum, int *err_ptr)
-{
-	return csum_partial_copy_generic((__force const void *)src, dst,
-					len, sum, err_ptr, NULL);
-}
-
-/*
- *	Fold a partial checksum
- */
-
-static __inline__ __sum16 csum_fold(__wsum sum)
-{
-	unsigned int __dummy;
-	__asm__("extui	%1, %0, 16, 16\n\t"
-		"extui	%0 ,%0, 0, 16\n\t"
-		"add	%0, %0, %1\n\t"
-		"slli	%1, %0, 16\n\t"
-		"add	%0, %0, %1\n\t"
-		"extui	%0, %0, 16, 16\n\t"
-		"neg	%0, %0\n\t"
-		"addi	%0, %0, -1\n\t"
-		"extui	%0, %0, 0, 16\n\t"
-		: "=r" (sum), "=&r" (__dummy)
-		: "0" (sum));
-	return (__force __sum16)sum;
-}
-
-/*
- *	This is a version of ip_compute_csum() optimized for IP headers,
- *	which always checksum on 4 octet boundaries.
- */
-static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-{
-	unsigned int sum, tmp, endaddr;
-
-	__asm__ __volatile__(
-		"sub		%0, %0, %0\n\t"
-#if XCHAL_HAVE_LOOPS
-		"loopgtz	%2, 2f\n\t"
-#else
-		"beqz		%2, 2f\n\t"
-		"slli		%4, %2, 2\n\t"
-		"add		%4, %4, %1\n\t"
-		"0:\t"
-#endif
-		"l32i		%3, %1, 0\n\t"
-		"add		%0, %0, %3\n\t"
-		"bgeu		%0, %3, 1f\n\t"
-		"addi		%0, %0, 1\n\t"
-		"1:\t"
-		"addi		%1, %1, 4\n\t"
-#if !XCHAL_HAVE_LOOPS
-		"blt		%1, %4, 0b\n\t"
-#endif
-		"2:\t"
-	/* Since the input registers which are loaded with iph and ihl
-	   are modified, we must also specify them as outputs, or gcc
-	   will assume they contain their original values. */
-		: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr)
-		: "1" (iph), "2" (ihl));
-
-	return	csum_fold(sum);
-}
-
-static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
-						   unsigned short len,
-						   unsigned short proto,
-						   __wsum sum)
-{
-
-#ifdef __XTENSA_EL__
-	unsigned long len_proto = (len + proto) << 8;
-#elif defined(__XTENSA_EB__)
-	unsigned long len_proto = len + proto;
-#else
-# error processor byte order undefined!
-#endif
-	__asm__("add	%0, %0, %1\n\t"
-		"bgeu	%0, %1, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		"add	%0, %0, %2\n\t"
-		"bgeu	%0, %2, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		"add	%0, %0, %3\n\t"
-		"bgeu	%0, %3, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		: "=r" (sum), "=r" (len_proto)
-		: "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
-	return sum;
-}
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-						       unsigned short len,
-						       unsigned short proto,
-						       __wsum sum)
-{
-	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
-}
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-
-static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
-{
-    return csum_fold (csum_partial(buff, len, 0));
-}
-
-#define _HAVE_ARCH_IPV6_CSUM
-static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
-					  const struct in6_addr *daddr,
-					  __u32 len, unsigned short proto,
-					  __wsum sum)
-{
-	unsigned int __dummy;
-	__asm__("l32i	%1, %2, 0\n\t"
-		"add	%0, %0, %1\n\t"
-		"bgeu	%0, %1, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		"l32i	%1, %2, 4\n\t"
-		"add	%0, %0, %1\n\t"
-		"bgeu	%0, %1, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		"l32i	%1, %2, 8\n\t"
-		"add	%0, %0, %1\n\t"
-		"bgeu	%0, %1, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		"l32i	%1, %2, 12\n\t"
-		"add	%0, %0, %1\n\t"
-		"bgeu	%0, %1, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		"l32i	%1, %3, 0\n\t"
-		"add	%0, %0, %1\n\t"
-		"bgeu	%0, %1, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		"l32i	%1, %3, 4\n\t"
-		"add	%0, %0, %1\n\t"
-		"bgeu	%0, %1, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		"l32i	%1, %3, 8\n\t"
-		"add	%0, %0, %1\n\t"
-		"bgeu	%0, %1, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		"l32i	%1, %3, 12\n\t"
-		"add	%0, %0, %1\n\t"
-		"bgeu	%0, %1, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		"add	%0, %0, %4\n\t"
-		"bgeu	%0, %4, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		"add	%0, %0, %5\n\t"
-		"bgeu	%0, %5, 1f\n\t"
-		"addi	%0, %0, 1\n\t"
-		"1:\t"
-		: "=r" (sum), "=&r" (__dummy)
-		: "r" (saddr), "r" (daddr),
-		  "r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
-
-	return csum_fold(sum);
-}
-
-/*
- *	Copy and checksum to user
- */
-#define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst,
-				    int len, __wsum sum, int *err_ptr)
-{
-	if (access_ok(VERIFY_WRITE, dst, len))
-		return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
-
-	if (len)
-		*err_ptr = -EFAULT;
-
-	return (__force __wsum)-1; /* invalid checksum */
-}
-#endif
diff --git a/include/asm-xtensa/coprocessor.h b/include/asm-xtensa/coprocessor.h
deleted file mode 100644
index 1cbcf90..0000000
--- a/include/asm-xtensa/coprocessor.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * include/asm-xtensa/coprocessor.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003 - 2007 Tensilica Inc.
- */
-
-
-#ifndef _XTENSA_COPROCESSOR_H
-#define _XTENSA_COPROCESSOR_H
-
-#include <linux/stringify.h>
-#include <asm/variant/tie.h>
-#include <asm/types.h>
-
-#ifdef __ASSEMBLY__
-# include <asm/variant/tie-asm.h>
-
-.macro	xchal_sa_start  a b
-	.set .Lxchal_pofs_, 0
-	.set .Lxchal_ofs_, 0
-.endm
-
-.macro	xchal_sa_align  ptr minofs maxofs ofsalign totalign
-	.set	.Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ + \totalign - 1
-	.set	.Lxchal_ofs_, (.Lxchal_ofs_ & -\totalign) - .Lxchal_pofs_
-.endm
-
-#define _SELECT	(  XTHAL_SAS_TIE | XTHAL_SAS_OPT \
-		 | XTHAL_SAS_CC \
-		 | XTHAL_SAS_CALR | XTHAL_SAS_CALE )
-
-.macro save_xtregs_opt ptr clb at1 at2 at3 at4 offset
-	.if XTREGS_OPT_SIZE > 0
-		addi	\clb, \ptr, \offset
-		xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
-	.endif
-.endm
-
-.macro load_xtregs_opt ptr clb at1 at2 at3 at4 offset
-	.if XTREGS_OPT_SIZE > 0
-		addi	\clb, \ptr, \offset
-		xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
-	.endif
-.endm
-#undef _SELECT
-
-#define _SELECT	(  XTHAL_SAS_TIE | XTHAL_SAS_OPT \
-		 | XTHAL_SAS_NOCC \
-		 | XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
-
-.macro save_xtregs_user ptr clb at1 at2 at3 at4 offset
-	.if XTREGS_USER_SIZE > 0
-		addi	\clb, \ptr, \offset
-		xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
-	.endif
-.endm
-
-.macro load_xtregs_user ptr clb at1 at2 at3 at4 offset
-	.if XTREGS_USER_SIZE > 0
-		addi	\clb, \ptr, \offset
-		xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
-	.endif
-.endm
-#undef _SELECT
-
-
-
-#endif	/* __ASSEMBLY__ */
-
-/*
- * XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
- *
- * XTENSA_HAVE_IO_PORT(x) returns 1 if io-port x is configured.
- *
- */
-
-#define XTENSA_HAVE_COPROCESSOR(x)					\
-	((XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) & (1 << (x)))
-#define XTENSA_HAVE_COPROCESSORS					\
-	(XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK)
-#define XTENSA_HAVE_IO_PORT(x)						\
-	(XCHAL_CP_PORT_MASK & (1 << (x)))
-#define XTENSA_HAVE_IO_PORTS						\
-	XCHAL_CP_PORT_MASK
-
-#ifndef __ASSEMBLY__
-
-
-#if XCHAL_HAVE_CP
-
-#define RSR_CPENABLE(x)	do {						  \
-	__asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
-	} while(0);
-#define WSR_CPENABLE(x)	do {						  \
-  	__asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" 	  \
-	    		     :: "a" (x));				  \
-	} while(0);
-
-#endif /* XCHAL_HAVE_CP */
-
-
-/*
- * Additional registers.
- * We define three types of additional registers:
- *  ext: extra registers that are used by the compiler
- *  cpn: optional registers that can be used by a user application
- *  cpX: coprocessor registers that can only be used if the corresponding
- *       CPENABLE bit is set.
- */
-
-#define XCHAL_SA_REG(list,cc,abi,type,y,name,z,align,size,...)	\
-	__REG ## list (cc, abi, type, name, size, align)
-
-#define __REG0(cc,abi,t,name,s,a)	__REG0_ ## cc (abi,name)
-#define __REG1(cc,abi,t,name,s,a)	__REG1_ ## cc (name)
-#define __REG2(cc,abi,type,...)		__REG2_ ## type (__VA_ARGS__)
-
-#define __REG0_0(abi,name)
-#define __REG0_1(abi,name)		__REG0_1 ## abi (name)
-#define __REG0_10(name)	__u32 name;
-#define __REG0_11(name)	__u32 name;
-#define __REG0_12(name)
-
-#define __REG1_0(name)	__u32 name;
-#define __REG1_1(name)
-
-#define __REG2_0(n,s,a)	__u32 name;
-#define __REG2_1(n,s,a)	unsigned char n[s] __attribute__ ((aligned(a)));
-#define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
-
-typedef struct { XCHAL_NCP_SA_LIST(0) } xtregs_opt_t
-	__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
-typedef struct { XCHAL_NCP_SA_LIST(1) } xtregs_user_t
-	__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
-
-#if XTENSA_HAVE_COPROCESSORS
-
-typedef struct { XCHAL_CP0_SA_LIST(2) } xtregs_cp0_t
-	__attribute__ ((aligned (XCHAL_CP0_SA_ALIGN)));
-typedef struct { XCHAL_CP1_SA_LIST(2) } xtregs_cp1_t
-	__attribute__ ((aligned (XCHAL_CP1_SA_ALIGN)));
-typedef struct { XCHAL_CP2_SA_LIST(2) } xtregs_cp2_t
-	__attribute__ ((aligned (XCHAL_CP2_SA_ALIGN)));
-typedef struct { XCHAL_CP3_SA_LIST(2) } xtregs_cp3_t
-	__attribute__ ((aligned (XCHAL_CP3_SA_ALIGN)));
-typedef struct { XCHAL_CP4_SA_LIST(2) } xtregs_cp4_t
-	__attribute__ ((aligned (XCHAL_CP4_SA_ALIGN)));
-typedef struct { XCHAL_CP5_SA_LIST(2) } xtregs_cp5_t
-	__attribute__ ((aligned (XCHAL_CP5_SA_ALIGN)));
-typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
-	__attribute__ ((aligned (XCHAL_CP6_SA_ALIGN)));
-typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
-	__attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
-
-extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
-extern void coprocessor_save(void*, int);
-extern void coprocessor_load(void*, int);
-extern void coprocessor_flush(struct thread_info*, int);
-extern void coprocessor_restore(struct thread_info*, int);
-
-extern void coprocessor_release_all(struct thread_info*);
-extern void coprocessor_flush_all(struct thread_info*);
-
-static inline void coprocessor_clear_cpenable(void)
-{
-	unsigned long i = 0;
-	WSR_CPENABLE(i);
-}
-
-#endif	/* XTENSA_HAVE_COPROCESSORS */
-
-#endif	/* !__ASSEMBLY__ */
-#endif	/* _XTENSA_COPROCESSOR_H */
diff --git a/include/asm-xtensa/irq.h b/include/asm-xtensa/irq.h
deleted file mode 100644
index fc73b7f..0000000
--- a/include/asm-xtensa/irq.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * include/asm-xtensa/irq.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_IRQ_H
-#define _XTENSA_IRQ_H
-
-#include <asm/platform/hardware.h>
-#include <asm/variant/core.h>
-
-#ifndef PLATFORM_NR_IRQS
-# define PLATFORM_NR_IRQS 0
-#endif
-#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS)
-
-static __inline__ int irq_canonicalize(int irq)
-{
-	return (irq);
-}
-
-struct irqaction;
-
-#endif	/* _XTENSA_IRQ_H */
diff --git a/include/asm-xtensa/platform.h b/include/asm-xtensa/platform.h
deleted file mode 100644
index 48135a9..0000000
--- a/include/asm-xtensa/platform.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * include/asm-xtensa/platform.h
- *
- * Platform specific functions
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file "COPYING" in the main directory of
- * this archive for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_PLATFORM_H
-#define _XTENSA_PLATFORM_H
-
-#include <linux/types.h>
-#include <linux/pci.h>
-
-#include <asm/bootparam.h>
-
-/*
- * platform_init is called before the mmu is initialized to give the
- * platform a early hook-up. bp_tag_t is a list of configuration tags
- * passed from the boot-loader.
- */
-extern void platform_init(bp_tag_t*);
-
-/*
- * platform_setup is called from setup_arch with a pointer to the command-line
- * string.
- */
-extern void platform_setup (char **);
-
-/*
- * platform_init_irq is called from init_IRQ.
- */
-extern void platform_init_irq (void);
-
-/*
- * platform_restart is called to restart the system.
- */
-extern void platform_restart (void);
-
-/*
- * platform_halt is called to stop the system and halt.
- */
-extern void platform_halt (void);
-
-/*
- * platform_power_off is called to stop the system and power it off.
- */
-extern void platform_power_off (void);
-
-/*
- * platform_idle is called from the idle function.
- */
-extern void platform_idle (void);
-
-/*
- * platform_heartbeat is called every HZ
- */
-extern void platform_heartbeat (void);
-
-/*
- * platform_pcibios_init is called to allow the platform to setup the pci bus.
- */
-extern void platform_pcibios_init (void);
-
-/*
- * platform_pcibios_fixup allows to modify the PCI configuration.
- */
-extern int platform_pcibios_fixup (void);
-
-/*
- * platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE)
- */
-extern void platform_calibrate_ccount (void);
-
-/*
- * platform_get_rtc_time returns RTC seconds (returns 0 for no error)
- */
-extern int platform_get_rtc_time(time_t*);
-
-/*
- * platform_set_rtc_time set RTC seconds (returns 0 for no error)
- */
-extern int platform_set_rtc_time(time_t);
-
-
-#endif	/* _XTENSA_PLATFORM_H */
-
diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h
deleted file mode 100644
index 4918a4e..0000000
--- a/include/asm-xtensa/processor.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * include/asm-xtensa/processor.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_PROCESSOR_H
-#define _XTENSA_PROCESSOR_H
-
-#include <asm/variant/core.h>
-#include <asm/coprocessor.h>
-
-#include <linux/compiler.h>
-#include <asm/ptrace.h>
-#include <asm/types.h>
-#include <asm/regs.h>
-
-/* Assertions. */
-
-#if (XCHAL_HAVE_WINDOWED != 1)
-# error Linux requires the Xtensa Windowed Registers Option.
-#endif
-
-/*
- * User space process size: 1 GB.
- * Windowed call ABI requires caller and callee to be located within the same
- * 1 GB region. The C compiler places trampoline code on the stack for sources
- * that take the address of a nested C function (a feature used by glibc), so
- * the 1 GB requirement applies to the stack as well.
- */
-
-#define TASK_SIZE	__XTENSA_UL_CONST(0x40000000)
-#define STACK_TOP	TASK_SIZE
-#define STACK_TOP_MAX	STACK_TOP
-
-/*
- * General exception cause assigned to debug exceptions. Debug exceptions go
- * to their own vector, rather than the general exception vectors (user,
- * kernel, double); and their specific causes are reported via DEBUGCAUSE
- * rather than EXCCAUSE.  However it is sometimes convenient to redirect debug
- * exceptions to the general exception mechanism.  To do this, an otherwise
- * unused EXCCAUSE value was assigned to debug exceptions for this purpose.
- */
-
-#define EXCCAUSE_MAPPED_DEBUG	63
-
-/*
- * We use DEPC also as a flag to distinguish between double and regular
- * exceptions. For performance reasons, DEPC might contain the value of
- * EXCCAUSE for regular exceptions, so we use this definition to mark a
- * valid double exception address.
- * (Note: We use it in bgeui, so it should be 64, 128, or 256)
- */
-
-#define VALID_DOUBLE_EXCEPTION_ADDRESS	64
-
-/* LOCKLEVEL defines the interrupt level that masks all
- * general-purpose interrupts.
- */
-#define LOCKLEVEL 1
-
-/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
- * registers
- */
-#define WSBITS  (XCHAL_NUM_AREGS / 4)      /* width of WINDOWSTART in bits */
-#define WBBITS  (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */
-
-#ifndef __ASSEMBLY__
-
-/* Build a valid return address for the specified call winsize.
- * winsize must be 1 (call4), 2 (call8), or 3 (call12)
- */
-#define MAKE_RA_FOR_CALL(ra,ws)   (((ra) & 0x3fffffff) | (ws) << 30)
-
-/* Convert return address to a valid pc
- * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
- */
-#define MAKE_PC_FROM_RA(ra,sp)    (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
-
-typedef struct {
-    unsigned long seg;
-} mm_segment_t;
-
-struct thread_struct {
-
-	/* kernel's return address and stack pointer for context switching */
-	unsigned long ra; /* kernel's a0: return address and window call size */
-	unsigned long sp; /* kernel's a1: stack pointer */
-
-	mm_segment_t current_ds;    /* see uaccess.h for example uses */
-
-	/* struct xtensa_cpuinfo info; */
-
-	unsigned long bad_vaddr; /* last user fault */
-	unsigned long bad_uaddr; /* last kernel fault accessing user space */
-	unsigned long error_code;
-
-	unsigned long ibreak[XCHAL_NUM_IBREAK];
-	unsigned long dbreaka[XCHAL_NUM_DBREAK];
-	unsigned long dbreakc[XCHAL_NUM_DBREAK];
-
-	/* Make structure 16 bytes aligned. */
-	int align[0] __attribute__ ((aligned(16)));
-};
-
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr()  ({ __label__ _l; _l: &&_l;})
-
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE	(TASK_SIZE / 2)
-
-#define INIT_THREAD  \
-{									\
-	ra:		0, 						\
-	sp:		sizeof(init_stack) + (long) &init_stack,	\
-	current_ds:	{0},						\
-	/*info:		{0}, */						\
-	bad_vaddr:	0,						\
-	bad_uaddr:	0,						\
-	error_code:	0,						\
-}
-
-
-/*
- * Do necessary setup to start up a newly executed thread.
- * Note: We set-up ps as if we did a call4 to the new pc.
- *       set_thread_state in signal.c depends on it.
- */
-#define USER_PS_VALUE ((1 << PS_WOE_BIT) |				\
-                       (1 << PS_CALLINC_SHIFT) |			\
-                       (USER_RING << PS_RING_SHIFT) |			\
-                       (1 << PS_UM_BIT) |				\
-                       (1 << PS_EXCM_BIT))
-
-/* Clearing a0 terminates the backtrace. */
-#define start_thread(regs, new_pc, new_sp) \
-	regs->pc = new_pc; \
-	regs->ps = USER_PS_VALUE; \
-	regs->areg[1] = new_sp; \
-	regs->areg[0] = 0; \
-	regs->wmask = 1; \
-	regs->depc = 0; \
-	regs->windowbase = 0; \
-	regs->windowstart = 1;
-
-/* Forward declaration */
-struct task_struct;
-struct mm_struct;
-
-/* Free all resources held by a thread. */
-#define release_thread(thread) do { } while(0)
-
-/* Prepare to copy thread state - unlazy all lazy status */
-extern void prepare_to_copy(struct task_struct*);
-
-/* Create a kernel thread without removing it from tasklists */
-extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
-/* Copy and release all segment info associated with a VM */
-#define copy_segments(p, mm)	do { } while(0)
-#define release_segments(mm)	do { } while(0)
-#define forget_segments()	do { } while (0)
-
-#define thread_saved_pc(tsk)	(task_pt_regs(tsk)->pc)
-
-extern unsigned long get_wchan(struct task_struct *p);
-
-#define KSTK_EIP(tsk)		(task_pt_regs(tsk)->pc)
-#define KSTK_ESP(tsk)		(task_pt_regs(tsk)->areg[1])
-
-#define cpu_relax()  barrier()
-
-/* Special register access. */
-
-#define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v));
-#define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v));
-
-#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
-#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
-
-#endif	/* __ASSEMBLY__ */
-#endif	/* _XTENSA_PROCESSOR_H */
diff --git a/include/asm-xtensa/ptrace.h b/include/asm-xtensa/ptrace.h
deleted file mode 100644
index 089b0db..0000000
--- a/include/asm-xtensa/ptrace.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * include/asm-xtensa/ptrace.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_PTRACE_H
-#define _XTENSA_PTRACE_H
-
-/*
- * Kernel stack
- *
- * 		+-----------------------+  -------- STACK_SIZE
- * 		|     register file     |  |
- * 		+-----------------------+  |
- * 		|    struct pt_regs     |  |
- * 		+-----------------------+  | ------ PT_REGS_OFFSET
- * double 	:  16 bytes spill area  :  |  ^
- * excetion 	:- - - - - - - - - - - -:  |  |
- * frame	:    struct pt_regs     :  |  |
- * 		:- - - - - - - - - - - -:  |  |
- * 		|                       |  |  |
- * 		|     memory stack      |  |  |
- * 		|                       |  |  |
- * 		~                       ~  ~  ~
- * 		~                       ~  ~  ~
- * 		|                       |  |  |
- * 		|                       |  |  |
- * 		+-----------------------+  |  | --- STACK_BIAS
- * 		|  struct task_struct   |  |  |  ^
- *  current --> +-----------------------+  |  |  |
- * 		|  struct thread_info   |  |  |  |
- *		+-----------------------+ --------
- */
-
-#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
-
-/*  Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
-
-#define EXC_TABLE_KSTK		0x004	/* Kernel Stack */
-#define EXC_TABLE_DOUBLE_SAVE	0x008	/* Double exception save area for a0 */
-#define EXC_TABLE_FIXUP		0x00c	/* Fixup handler */
-#define EXC_TABLE_PARAM		0x010	/* For passing a parameter to fixup */
-#define EXC_TABLE_SYSCALL_SAVE	0x014	/* For fast syscall handler */
-#define EXC_TABLE_FAST_USER	0x100	/* Fast user exception handler */
-#define EXC_TABLE_FAST_KERNEL	0x200	/* Fast kernel exception handler */
-#define EXC_TABLE_DEFAULT	0x300	/* Default C-Handler */
-#define EXC_TABLE_SIZE		0x400
-
-/* Registers used by strace */
-
-#define REG_A_BASE	0x0000
-#define REG_AR_BASE	0x0100
-#define REG_PC		0x0020
-#define REG_PS		0x02e6
-#define REG_WB		0x0248
-#define REG_WS		0x0249
-#define REG_LBEG	0x0200
-#define REG_LEND	0x0201
-#define REG_LCOUNT	0x0202
-#define REG_SAR		0x0203
-
-#define SYSCALL_NR	0x00ff
-
-/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
-
-#define PTRACE_GETREGS		12
-#define PTRACE_SETREGS		13
-#define PTRACE_GETXTREGS	18
-#define PTRACE_SETXTREGS	19
-
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-
-/*
- * This struct defines the way the registers are stored on the
- * kernel stack during a system call or other kernel entry.
- */
-struct pt_regs {
-	unsigned long pc;		/*   4 */
-	unsigned long ps;		/*   8 */
-	unsigned long depc;		/*  12 */
-	unsigned long exccause;		/*  16 */
-	unsigned long excvaddr;		/*  20 */
-	unsigned long debugcause;	/*  24 */
-	unsigned long wmask;		/*  28 */
-	unsigned long lbeg;		/*  32 */
-	unsigned long lend;		/*  36 */
-	unsigned long lcount;		/*  40 */
-	unsigned long sar;		/*  44 */
-	unsigned long windowbase;	/*  48 */
-	unsigned long windowstart;	/*  52 */
-	unsigned long syscall;		/*  56 */
-	unsigned long icountlevel;	/*  60 */
-	int reserved[1];		/*  64 */
-
-	/* Additional configurable registers that are used by the compiler. */
-	xtregs_opt_t xtregs_opt;
-
-	/* Make sure the areg field is 16 bytes aligned. */
-	int align[0] __attribute__ ((aligned(16)));
-
-	/* current register frame.
-	 * Note: The ESF for kernel exceptions ends after 16 registers!
-	 */
-	unsigned long areg[16];		/* 128 (64) */
-};
-
-#include <asm/variant/core.h>
-
-# define task_pt_regs(tsk) ((struct pt_regs*) \
-  (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
-# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
-# define instruction_pointer(regs) ((regs)->pc)
-extern void show_regs(struct pt_regs *);
-
-# ifndef CONFIG_SMP
-#  define profile_pc(regs) instruction_pointer(regs)
-# endif
-
-#else	/* __ASSEMBLY__ */
-
-# include <asm/asm-offsets.h>
-#define PT_REGS_OFFSET	  (KERNEL_STACK_SIZE - PT_USER_SIZE)
-
-#endif	/* !__ASSEMBLY__ */
-
-#endif  /* __KERNEL__ */
-
-#endif	/* _XTENSA_PTRACE_H */
diff --git a/include/asm-xtensa/serial.h b/include/asm-xtensa/serial.h
deleted file mode 100644
index ec04114..0000000
--- a/include/asm-xtensa/serial.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * include/asm-xtensa/serial.h
- *
- * Configuration details for 8250, 16450, 16550, etc. serial ports
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_SERIAL_H
-#define _XTENSA_SERIAL_H
-
-#include <asm/platform/serial.h>
-
-#endif	/* _XTENSA_SERIAL_H */
diff --git a/include/asm-xtensa/unaligned.h b/include/asm-xtensa/unaligned.h
deleted file mode 100644
index 8f3424f..0000000
--- a/include/asm-xtensa/unaligned.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Xtensa doesn't handle unaligned accesses efficiently.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-#ifndef _ASM_XTENSA_UNALIGNED_H
-#define _ASM_XTENSA_UNALIGNED_H
-
-#ifdef __XTENSA_EL__
-# include <linux/unaligned/le_memmove.h>
-# include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned	__get_unaligned_le
-# define put_unaligned	__put_unaligned_le
-#elif defined(__XTENSA_EB__)
-# include <linux/unaligned/be_memmove.h>
-# include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned	__get_unaligned_be
-# define put_unaligned	__put_unaligned_be
-#else
-# error processor byte order undefined!
-#endif
-
-#endif	/* _ASM_XTENSA_UNALIGNED_H */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 0acb07f..47809ac 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -395,7 +395,7 @@
 	void (*save)(struct drm_connector *connector);
 	void (*restore)(struct drm_connector *connector);
 	enum drm_connector_status (*detect)(struct drm_connector *connector);
-	void (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
+	int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
 	int (*set_property)(struct drm_connector *connector, struct drm_property *property,
 			     uint64_t val);
 	void (*destroy)(struct drm_connector *connector);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 4bc04cf..0c6f0e1 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -88,7 +88,7 @@
 	struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
 };
 
-extern void drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
+extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
 extern void drm_helper_disable_unused_functions(struct drm_device *dev);
 extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
 extern bool drm_helper_initial_config(struct drm_device *dev, bool can_grow);
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index a3323f3..12e9a29 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -371,3 +371,5 @@
 unifdef-y += xfrm.h
 
 objhdr-y += version.h
+header-y += wimax.h
+header-y += wimax/
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fba8051..6fce2fc 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -131,22 +131,6 @@
  */
 void acpi_unregister_gsi (u32 gsi);
 
-struct acpi_prt_entry {
-	struct list_head	node;
-	struct acpi_pci_id	id;
-	u8			pin;
-	struct {
-		acpi_handle		handle;
-		u32			index;
-	}			link;
-	u32			irq;
-};
-
-struct acpi_prt_list {
-	int			count;
-	struct list_head	entries;
-};
-
 struct pci_dev;
 
 int acpi_pci_irq_enable (struct pci_dev *dev);
@@ -270,6 +254,7 @@
 #ifdef CONFIG_PM_SLEEP
 void __init acpi_no_s4_hw_signature(void);
 void __init acpi_old_suspend_ordering(void);
+void __init acpi_s4_no_nvs(void);
 #endif /* CONFIG_PM_SLEEP */
 #else	/* CONFIG_ACPI */
 
diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h
index c8fdb6e..110c600 100644
--- a/include/linux/agpgart.h
+++ b/include/linux/agpgart.h
@@ -52,7 +52,6 @@
 
 #ifndef __KERNEL__
 #include <linux/types.h>
-#include <asm/types.h>
 
 struct agp_version {
 	__u16 major;
diff --git a/include/linux/async.h b/include/linux/async.h
new file mode 100644
index 0000000..c4ecacd
--- /dev/null
+++ b/include/linux/async.h
@@ -0,0 +1,25 @@
+/*
+ * async.h: Asynchronous function calls for boot performance
+ *
+ * (C) Copyright 2009 Intel Corporation
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+typedef u64 async_cookie_t;
+typedef void (async_func_ptr) (void *data, async_cookie_t cookie);
+
+extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data);
+extern async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *list);
+extern void async_synchronize_full(void);
+extern void async_synchronize_full_special(struct list_head *list);
+extern void async_synchronize_cookie(async_cookie_t cookie);
+extern void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *list);
+
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 0f50d4c..45f6297 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -59,9 +59,7 @@
 };
 
 #ifdef CONFIG_DMA_ENGINE
-void async_tx_issue_pending_all(void);
-enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
-void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx);
+#define async_tx_issue_pending_all dma_issue_pending_all
 #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
 #include <asm/async_tx.h>
 #else
@@ -77,19 +75,6 @@
 	do { } while (0);
 }
 
-static inline enum dma_status
-dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
-{
-	return DMA_SUCCESS;
-}
-
-static inline void
-async_tx_run_dependencies(struct dma_async_tx_descriptor *tx,
-	struct dma_chan *host_chan)
-{
-	do { } while (0);
-}
-
 static inline struct dma_chan *
 async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
 	enum dma_transaction_type tx_type, struct page **dst, int dst_count,
diff --git a/include/linux/atm_idt77105.h b/include/linux/atm_idt77105.h
index 05621cf..8b72400 100644
--- a/include/linux/atm_idt77105.h
+++ b/include/linux/atm_idt77105.h
@@ -7,7 +7,7 @@
 #ifndef LINUX_ATM_IDT77105_H
 #define LINUX_ATM_IDT77105_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/atmioc.h>
 #include <linux/atmdev.h>
 
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
new file mode 100644
index 0000000..2f1f957
--- /dev/null
+++ b/include/linux/atmel-mci.h
@@ -0,0 +1,39 @@
+#ifndef __LINUX_ATMEL_MCI_H
+#define __LINUX_ATMEL_MCI_H
+
+#define ATMEL_MCI_MAX_NR_SLOTS	2
+
+#include <linux/dw_dmac.h>
+
+/**
+ * struct mci_slot_pdata - board-specific per-slot configuration
+ * @bus_width: Number of data lines wired up the slot
+ * @detect_pin: GPIO pin wired to the card detect switch
+ * @wp_pin: GPIO pin wired to the write protect sensor
+ *
+ * If a given slot is not present on the board, @bus_width should be
+ * set to 0. The other fields are ignored in this case.
+ *
+ * Any pins that aren't available should be set to a negative value.
+ *
+ * Note that support for multiple slots is experimental -- some cards
+ * might get upset if we don't get the clock management exactly right.
+ * But in most cases, it should work just fine.
+ */
+struct mci_slot_pdata {
+	unsigned int		bus_width;
+	int			detect_pin;
+	int			wp_pin;
+};
+
+/**
+ * struct mci_platform_data - board-specific MMC/SDcard configuration
+ * @dma_slave: DMA slave interface to use in data transfers.
+ * @slot: Per-slot configuration data.
+ */
+struct mci_platform_data {
+	struct dw_dma_slave	dma_slave;
+	struct mci_slot_pdata	slot[ATMEL_MCI_MAX_NR_SLOTS];
+};
+
+#endif /* __LINUX_ATMEL_MCI_H */
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index d7afa9d..f3b5d4e 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -23,16 +23,16 @@
 #define AT_PLATFORM 15  /* string identifying CPU for optimizations */
 #define AT_HWCAP  16    /* arch dependent hints at CPU capabilities */
 #define AT_CLKTCK 17	/* frequency at which times() increments */
-
+/* AT_* values 18 through 22 are reserved */
 #define AT_SECURE 23   /* secure mode boolean */
-
 #define AT_BASE_PLATFORM 24	/* string identifying real platform, may
 				 * differ from AT_PLATFORM. */
+#define AT_RANDOM 25	/* address of 16 random bytes */
 
 #define AT_EXECFN  31	/* filename of program */
 
 #ifdef __KERNEL__
-#define AT_VECTOR_SIZE_BASE 18 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
   /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
 #endif
 
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 1ee9488..79ca2da 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -31,6 +31,10 @@
 struct fb_info;
 
 struct backlight_ops {
+	unsigned int options;
+
+#define BL_CORE_SUSPENDRESUME	(1 << 0)
+
 	/* Notify the backlight driver some property has changed */
 	int (*update_status)(struct backlight_device *);
 	/* Return the current backlight brightness (accounting for power,
@@ -51,7 +55,19 @@
 	   modes; 4: full off), see FB_BLANK_XXX */
 	int power;
 	/* FB Blanking active? (values as for power) */
+	/* Due to be removed, please use (state & BL_CORE_FBBLANK) */
 	int fb_blank;
+	/* Flags used to signal drivers of state changes */
+	/* Upper 4 bits are reserved for driver internal use */
+	unsigned int state;
+
+#define BL_CORE_SUSPENDED	(1 << 0)	/* backlight is suspended */
+#define BL_CORE_FBBLANK		(1 << 1)	/* backlight is under an fb blank event */
+#define BL_CORE_DRIVER4		(1 << 28)	/* reserved for driver specific use */
+#define BL_CORE_DRIVER3		(1 << 29)	/* reserved for driver specific use */
+#define BL_CORE_DRIVER2		(1 << 30)	/* reserved for driver specific use */
+#define BL_CORE_DRIVER1		(1 << 31)	/* reserved for driver specific use */
+
 };
 
 struct backlight_device {
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7035cec..044467e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -690,6 +690,8 @@
 	struct page **pages;
 	int page_order;
 	int nr_entries;
+	unsigned long offset;
+	int null_mapped;
 };
 
 struct req_iterator {
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 8605f8a..bd7ac79 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -171,7 +171,7 @@
 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
 int fsync_bdev(struct block_device *);
 struct super_block *freeze_bdev(struct block_device *);
-void thaw_bdev(struct block_device *, struct super_block *);
+int thaw_bdev(struct block_device *, struct super_block *);
 int fsync_super(struct super_block *);
 int fsync_no_super(struct block_device *);
 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
@@ -346,6 +346,15 @@
 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
 static inline void invalidate_bdev(struct block_device *bdev) {}
 
+static inline struct super_block *freeze_bdev(struct block_device *sb)
+{
+	return NULL;
+}
+
+static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
+{
+	return 0;
+}
 
 #endif /* CONFIG_BLOCK */
 #endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index f50785a..25085cb 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -19,7 +19,7 @@
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 
-#define CAN_VERSION "20081130"
+#define CAN_VERSION "20090105"
 
 /* increment this number each time you change some user-space interface */
 #define CAN_ABI_VERSION "8"
diff --git a/include/linux/capi.h b/include/linux/capi.h
index fdebaaa..65100d6 100644
--- a/include/linux/capi.h
+++ b/include/linux/capi.h
@@ -12,7 +12,7 @@
 #ifndef __LINUX_CAPI_H__
 #define __LINUX_CAPI_H__
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/ioctl.h>
 #ifndef __KERNEL__
 #include <linux/kernelcapi.h>
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 08b78c0..e267e628 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -52,9 +52,9 @@
 	 * hierarchy structure */
 	struct cgroup *cgroup;
 
-	/* State maintained by the cgroup system to allow
-	 * subsystems to be "busy". Should be accessed via css_get()
-	 * and css_put() */
+	/* State maintained by the cgroup system to allow subsystems
+	 * to be "busy". Should be accessed via css_get(),
+	 * css_tryget() and and css_put(). */
 
 	atomic_t refcnt;
 
@@ -64,11 +64,14 @@
 /* bits in struct cgroup_subsys_state flags field */
 enum {
 	CSS_ROOT, /* This CSS is the root of the subsystem */
+	CSS_REMOVED, /* This CSS is dead */
 };
 
 /*
- * Call css_get() to hold a reference on the cgroup;
- *
+ * Call css_get() to hold a reference on the css; it can be used
+ * for a reference obtained via:
+ * - an existing ref-counted reference to the css
+ * - task->cgroups for a locked task
  */
 
 static inline void css_get(struct cgroup_subsys_state *css)
@@ -77,9 +80,32 @@
 	if (!test_bit(CSS_ROOT, &css->flags))
 		atomic_inc(&css->refcnt);
 }
+
+static inline bool css_is_removed(struct cgroup_subsys_state *css)
+{
+	return test_bit(CSS_REMOVED, &css->flags);
+}
+
+/*
+ * Call css_tryget() to take a reference on a css if your existing
+ * (known-valid) reference isn't already ref-counted. Returns false if
+ * the css has been destroyed.
+ */
+
+static inline bool css_tryget(struct cgroup_subsys_state *css)
+{
+	if (test_bit(CSS_ROOT, &css->flags))
+		return true;
+	while (!atomic_inc_not_zero(&css->refcnt)) {
+		if (test_bit(CSS_REMOVED, &css->flags))
+			return false;
+	}
+	return true;
+}
+
 /*
  * css_put() should be called to release a reference taken by
- * css_get()
+ * css_get() or css_tryget()
  */
 
 extern void __css_put(struct cgroup_subsys_state *css);
@@ -116,7 +142,7 @@
 	struct list_head children;	/* my children */
 
 	struct cgroup *parent;	/* my parent */
-	struct dentry *dentry;	  	/* cgroup fs entry */
+	struct dentry *dentry;	  	/* cgroup fs entry, RCU protected */
 
 	/* Private pointers for each registered subsystem */
 	struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
@@ -145,6 +171,9 @@
 	int pids_use_count;
 	/* Length of the current tasks_pids array */
 	int pids_length;
+
+	/* For RCU-protected deletion */
+	struct rcu_head rcu_head;
 };
 
 /* A css_set is a structure holding pointers to a set of
@@ -337,9 +366,23 @@
 #define MAX_CGROUP_TYPE_NAMELEN 32
 	const char *name;
 
-	/* Protected by RCU */
-	struct cgroupfs_root *root;
+	/*
+	 * Protects sibling/children links of cgroups in this
+	 * hierarchy, plus protects which hierarchy (or none) the
+	 * subsystem is a part of (i.e. root/sibling).  To avoid
+	 * potential deadlocks, the following operations should not be
+	 * undertaken while holding any hierarchy_mutex:
+	 *
+	 * - allocating memory
+	 * - initiating hotplug events
+	 */
+	struct mutex hierarchy_mutex;
 
+	/*
+	 * Link to parent, and list entry in parent's children.
+	 * Protected by this->hierarchy_mutex and cgroup_lock()
+	 */
+	struct cgroupfs_root *root;
 	struct list_head sibling;
 };
 
diff --git a/include/linux/compat.h b/include/linux/compat.h
index e88f3ec..3fd2194 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -280,5 +280,18 @@
 asmlinkage long compat_sys_timerfd_gettime(int ufd,
 				   struct compat_itimerspec __user *otmr);
 
+asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page,
+				      __u32 __user *pages,
+				      const int __user *nodes,
+				      int __user *status,
+				      int flags);
+asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename,
+				     struct compat_timeval __user *t);
+asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
+				      struct compat_stat __user *statbuf,
+				      int flag);
+asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
+				  int flags, int mode);
+
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index af40f8e..1514d53 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -11,9 +11,19 @@
 /* The "volatile" is due to gcc bugs */
 #define barrier() __asm__ __volatile__("": : :"memory")
 
-/* This macro obfuscates arithmetic on a variable address so that gcc
-   shouldn't recognize the original var, and make assumptions about it */
 /*
+ * This macro obfuscates arithmetic on a variable address so that gcc
+ * shouldn't recognize the original var, and make assumptions about it.
+ *
+ * This is needed because the C standard makes it undefined to do
+ * pointer arithmetic on "objects" outside their boundaries and the
+ * gcc optimizers assume this is the case. In particular they
+ * assume such arithmetic does not wrap.
+ *
+ * A miscompilation has been observed because of this on PPC.
+ * To work around it we hide the relationship of the pointer and the object
+ * using this macro.
+ *
  * Versions of the ppc64 compiler before 4.1 had a bug where use of
  * RELOC_HIDE could trash r30. The bug can be worked around by changing
  * the inline assembly constraint from =g to =r, in this particular
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 5c7f946..34f2789 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -22,7 +22,7 @@
 #ifndef __CONNECTOR_H
 #define __CONNECTOR_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 #define CN_IDX_CONNECTOR		0xffffffff
 #define CN_VAL_CONNECTOR		0xffffffff
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 484b3ab..384b38d 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -80,8 +80,8 @@
 };
 
 struct cpufreq_policy {
-	cpumask_t		cpus;	/* CPUs requiring sw coordination */
-	cpumask_t		related_cpus; /* CPUs with any coordination */
+	cpumask_var_t		cpus;	/* CPUs requiring sw coordination */
+	cpumask_var_t		related_cpus; /* CPUs with any coordination */
 	unsigned int		shared_type; /* ANY or ALL affected CPUs
 						should set cpufreq */
 	unsigned int		cpu;    /* cpu nr of registered CPU */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 51ea2bd..90c6074 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -20,8 +20,9 @@
 extern int cpuset_init_early(void);
 extern int cpuset_init(void);
 extern void cpuset_init_smp(void);
-extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask);
-extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask);
+extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
+extern void cpuset_cpus_allowed_locked(struct task_struct *p,
+				       struct cpumask *mask);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
 #define cpuset_current_mems_allowed (current->mems_allowed)
 void cpuset_init_current_mems_allowed(void);
@@ -86,12 +87,13 @@
 static inline int cpuset_init(void) { return 0; }
 static inline void cpuset_init_smp(void) {}
 
-static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask)
+static inline void cpuset_cpus_allowed(struct task_struct *p,
+				       struct cpumask *mask)
 {
 	*mask = cpu_possible_map;
 }
 static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
-								cpumask_t *mask)
+					      struct cpumask *mask)
 {
 	*mask = cpu_possible_map;
 }
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
index 2d3d1e0..d06fbf2 100644
--- a/include/linux/cyclades.h
+++ b/include/linux/cyclades.h
@@ -150,8 +150,6 @@
  *	architectures and compilers.
  */
 
-#include <asm/types.h>
-
 typedef __u64  ucdouble;		/* 64 bits, unsigned */
 typedef __u32  uclong;			/* 32 bits, unsigned */
 typedef __u16  ucshort;		/* 16 bits, unsigned */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index e1a6c04..23936b1 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -63,6 +63,8 @@
 				  struct dentry *parent, u16 *value);
 struct dentry *debugfs_create_x32(const char *name, mode_t mode,
 				  struct dentry *parent, u32 *value);
+struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
+				     struct dentry *parent, size_t *value);
 struct dentry *debugfs_create_bool(const char *name, mode_t mode,
 				  struct dentry *parent, u32 *value);
 
diff --git a/include/linux/device.h b/include/linux/device.h
index 7d9da4b..45e5b19 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -28,7 +28,6 @@
 #define BUS_ID_SIZE		20
 
 struct device;
-struct device_private;
 struct device_driver;
 struct driver_private;
 struct class;
@@ -366,10 +365,12 @@
 };
 
 struct device {
+	struct klist		klist_children;
+	struct klist_node	knode_parent;	/* node in sibling list */
+	struct klist_node	knode_driver;
+	struct klist_node	knode_bus;
 	struct device		*parent;
 
-	struct device_private	*p;
-
 	struct kobject kobj;
 	char	bus_id[BUS_ID_SIZE];	/* position on parent bus */
 	unsigned		uevent_suppress:1;
diff --git a/include/linux/dio.h b/include/linux/dio.h
index 1e65ebc..b2dd31c 100644
--- a/include/linux/dio.h
+++ b/include/linux/dio.h
@@ -241,7 +241,7 @@
 
 extern int dio_find(int deviceid);
 extern unsigned long dio_scodetophysaddr(int scode);
-extern void dio_create_sysfs_dev_files(struct dio_dev *);
+extern int dio_create_sysfs_dev_files(struct dio_dev *);
 
 /* New-style probing */
 extern int dio_register_driver(struct dio_driver *);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index adb0b08..c73f1e2 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -29,32 +29,6 @@
 #include <linux/dma-mapping.h>
 
 /**
- * enum dma_state - resource PNP/power management state
- * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
- * @DMA_RESOURCE_RESUME: DMA device returning to full power
- * @DMA_RESOURCE_AVAILABLE: DMA device available to the system
- * @DMA_RESOURCE_REMOVED: DMA device removed from the system
- */
-enum dma_state {
-	DMA_RESOURCE_SUSPEND,
-	DMA_RESOURCE_RESUME,
-	DMA_RESOURCE_AVAILABLE,
-	DMA_RESOURCE_REMOVED,
-};
-
-/**
- * enum dma_state_client - state of the channel in the client
- * @DMA_ACK: client would like to use, or was using this channel
- * @DMA_DUP: client has already seen this channel, or is not using this channel
- * @DMA_NAK: client does not want to see any more channels
- */
-enum dma_state_client {
-	DMA_ACK,
-	DMA_DUP,
-	DMA_NAK,
-};
-
-/**
  * typedef dma_cookie_t - an opaque DMA cookie
  *
  * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
@@ -89,23 +63,13 @@
 	DMA_MEMSET,
 	DMA_MEMCPY_CRC32C,
 	DMA_INTERRUPT,
+	DMA_PRIVATE,
 	DMA_SLAVE,
 };
 
 /* last transaction type for creation of the capabilities mask */
 #define DMA_TX_TYPE_END (DMA_SLAVE + 1)
 
-/**
- * enum dma_slave_width - DMA slave register access width.
- * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
- * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
- * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
- */
-enum dma_slave_width {
-	DMA_SLAVE_WIDTH_8BIT,
-	DMA_SLAVE_WIDTH_16BIT,
-	DMA_SLAVE_WIDTH_32BIT,
-};
 
 /**
  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
@@ -132,32 +96,6 @@
 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
 
 /**
- * struct dma_slave - Information about a DMA slave
- * @dev: device acting as DMA slave
- * @dma_dev: required DMA master device. If non-NULL, the client can not be
- *	bound to other masters than this.
- * @tx_reg: physical address of data register used for
- *	memory-to-peripheral transfers
- * @rx_reg: physical address of data register used for
- *	peripheral-to-memory transfers
- * @reg_width: peripheral register width
- *
- * If dma_dev is non-NULL, the client can not be bound to other DMA
- * masters than the one corresponding to this device. The DMA master
- * driver may use this to determine if there is controller-specific
- * data wrapped around this struct. Drivers of platform code that sets
- * the dma_dev field must therefore make sure to use an appropriate
- * controller-specific dma slave structure wrapping this struct.
- */
-struct dma_slave {
-	struct device		*dev;
-	struct device		*dma_dev;
-	dma_addr_t		tx_reg;
-	dma_addr_t		rx_reg;
-	enum dma_slave_width	reg_width;
-};
-
-/**
  * struct dma_chan_percpu - the per-CPU part of struct dma_chan
  * @refcount: local_t used for open-coded "bigref" counting
  * @memcpy_count: transaction counter
@@ -165,7 +103,6 @@
  */
 
 struct dma_chan_percpu {
-	local_t refcount;
 	/* stats */
 	unsigned long memcpy_count;
 	unsigned long bytes_transferred;
@@ -176,13 +113,14 @@
  * @device: ptr to the dma device who supplies this channel, always !%NULL
  * @cookie: last cookie value returned to client
  * @chan_id: channel ID for sysfs
- * @class_dev: class device for sysfs
+ * @dev: class device for sysfs
  * @refcount: kref, used in "bigref" slow-mode
  * @slow_ref: indicates that the DMA channel is free
  * @rcu: the DMA channel's RCU head
  * @device_node: used to add this to the device chan list
  * @local: per-cpu pointer to a struct dma_chan_percpu
  * @client-count: how many clients are using this channel
+ * @table_count: number of appearances in the mem-to-mem allocation table
  */
 struct dma_chan {
 	struct dma_device *device;
@@ -190,73 +128,47 @@
 
 	/* sysfs */
 	int chan_id;
-	struct device dev;
-
-	struct kref refcount;
-	int slow_ref;
-	struct rcu_head rcu;
+	struct dma_chan_dev *dev;
 
 	struct list_head device_node;
 	struct dma_chan_percpu *local;
 	int client_count;
+	int table_count;
 };
 
-#define to_dma_chan(p) container_of(p, struct dma_chan, dev)
+/**
+ * struct dma_chan_dev - relate sysfs device node to backing channel device
+ * @chan - driver channel device
+ * @device - sysfs device
+ * @dev_id - parent dma_device dev_id
+ * @idr_ref - reference count to gate release of dma_device dev_id
+ */
+struct dma_chan_dev {
+	struct dma_chan *chan;
+	struct device device;
+	int dev_id;
+	atomic_t *idr_ref;
+};
+
+static inline const char *dma_chan_name(struct dma_chan *chan)
+{
+	return dev_name(&chan->dev->device);
+}
 
 void dma_chan_cleanup(struct kref *kref);
 
-static inline void dma_chan_get(struct dma_chan *chan)
-{
-	if (unlikely(chan->slow_ref))
-		kref_get(&chan->refcount);
-	else {
-		local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
-		put_cpu();
-	}
-}
-
-static inline void dma_chan_put(struct dma_chan *chan)
-{
-	if (unlikely(chan->slow_ref))
-		kref_put(&chan->refcount, dma_chan_cleanup);
-	else {
-		local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
-		put_cpu();
-	}
-}
-
-/*
- * typedef dma_event_callback - function pointer to a DMA event callback
- * For each channel added to the system this routine is called for each client.
- * If the client would like to use the channel it returns '1' to signal (ack)
- * the dmaengine core to take out a reference on the channel and its
- * corresponding device.  A client must not 'ack' an available channel more
- * than once.  When a channel is removed all clients are notified.  If a client
- * is using the channel it must 'ack' the removal.  A client must not 'ack' a
- * removed channel more than once.
- * @client - 'this' pointer for the client context
- * @chan - channel to be acted upon
- * @state - available or removed
- */
-struct dma_client;
-typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
-		struct dma_chan *chan, enum dma_state state);
-
 /**
- * struct dma_client - info on the entity making use of DMA services
- * @event_callback: func ptr to call when something happens
- * @cap_mask: only return channels that satisfy the requested capabilities
- *  a value of zero corresponds to any capability
- * @slave: data for preparing slave transfer. Must be non-NULL iff the
- *  DMA_SLAVE capability is requested.
- * @global_node: list_head for global dma_client_list
+ * typedef dma_filter_fn - callback filter for dma_request_channel
+ * @chan: channel to be reviewed
+ * @filter_param: opaque parameter passed through dma_request_channel
+ *
+ * When this optional parameter is specified in a call to dma_request_channel a
+ * suitable channel is passed to this routine for further dispositioning before
+ * being returned.  Where 'suitable' indicates a non-busy channel that
+ * satisfies the given capability mask.  It returns 'true' to indicate that the
+ * channel is suitable.
  */
-struct dma_client {
-	dma_event_callback	event_callback;
-	dma_cap_mask_t		cap_mask;
-	struct dma_slave	*slave;
-	struct list_head	global_node;
-};
+typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
 
 typedef void (*dma_async_tx_callback)(void *dma_async_param);
 /**
@@ -323,14 +235,10 @@
 	dma_cap_mask_t  cap_mask;
 	int max_xor;
 
-	struct kref refcount;
-	struct completion done;
-
 	int dev_id;
 	struct device *dev;
 
-	int (*device_alloc_chan_resources)(struct dma_chan *chan,
-			struct dma_client *client);
+	int (*device_alloc_chan_resources)(struct dma_chan *chan);
 	void (*device_free_chan_resources)(struct dma_chan *chan);
 
 	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -362,9 +270,18 @@
 
 /* --- public DMA engine API --- */
 
-void dma_async_client_register(struct dma_client *client);
-void dma_async_client_unregister(struct dma_client *client);
-void dma_async_client_chan_request(struct dma_client *client);
+#ifdef CONFIG_DMA_ENGINE
+void dmaengine_get(void);
+void dmaengine_put(void);
+#else
+static inline void dmaengine_get(void)
+{
+}
+static inline void dmaengine_put(void)
+{
+}
+#endif
+
 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
 	void *dest, void *src, size_t len);
 dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -406,6 +323,12 @@
 	set_bit(tx_type, dstp->bits);
 }
 
+#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
+static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
+{
+	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
+}
+
 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
 static inline int
 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
@@ -475,11 +398,25 @@
 }
 
 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
+#ifdef CONFIG_DMA_ENGINE
+enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
+#else
+static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
+{
+	return DMA_SUCCESS;
+}
+#endif
 
 /* --- DMA device --- */
 
 int dma_async_device_register(struct dma_device *device);
 void dma_async_device_unregister(struct dma_device *device);
+void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+void dma_issue_pending_all(void);
+#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
+struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
+void dma_release_channel(struct dma_chan *chan);
 
 /* --- Helper iov-locking functions --- */
 
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index 04d217b..d797dde 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -22,14 +22,34 @@
 };
 
 /**
+ * enum dw_dma_slave_width - DMA slave register access width.
+ * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
+ * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
+ * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
+ */
+enum dw_dma_slave_width {
+	DW_DMA_SLAVE_WIDTH_8BIT,
+	DW_DMA_SLAVE_WIDTH_16BIT,
+	DW_DMA_SLAVE_WIDTH_32BIT,
+};
+
+/**
  * struct dw_dma_slave - Controller-specific information about a slave
- * @slave: Generic information about the slave
- * @ctl_lo: Platform-specific initializer for the CTL_LO register
+ *
+ * @dma_dev: required DMA master device
+ * @tx_reg: physical address of data register used for
+ *	memory-to-peripheral transfers
+ * @rx_reg: physical address of data register used for
+ *	peripheral-to-memory transfers
+ * @reg_width: peripheral register width
  * @cfg_hi: Platform-specific initializer for the CFG_HI register
  * @cfg_lo: Platform-specific initializer for the CFG_LO register
  */
 struct dw_dma_slave {
-	struct dma_slave	slave;
+	struct device		*dma_dev;
+	dma_addr_t		tx_reg;
+	dma_addr_t		rx_reg;
+	enum dw_dma_slave_width	reg_width;
 	u32			cfg_hi;
 	u32			cfg_lo;
 };
@@ -54,9 +74,4 @@
 #define DWC_CFGL_HS_DST_POL	(1 << 18)	/* dst handshake active low */
 #define DWC_CFGL_HS_SRC_POL	(1 << 19)	/* src handshake active low */
 
-static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave)
-{
-	return container_of(slave, struct dw_dma_slave, slave);
-}
-
 #endif /* DW_DMAC_H */
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index 78c775a..121720d 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -194,6 +194,30 @@
 #define EXT2_FL_USER_VISIBLE		FS_FL_USER_VISIBLE	/* User visible flags */
 #define EXT2_FL_USER_MODIFIABLE		FS_FL_USER_MODIFIABLE	/* User modifiable flags */
 
+/* Flags that should be inherited by new inodes from their parent. */
+#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
+			   EXT2_SYNC_FL | EXT2_IMMUTABLE_FL | EXT2_APPEND_FL |\
+			   EXT2_NODUMP_FL | EXT2_NOATIME_FL | EXT2_COMPRBLK_FL|\
+			   EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
+			   EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
+
+/* Flags that are appropriate for regular files (all but dir-specific ones). */
+#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
+
+/* Flags that are appropriate for non-directories/regular files. */
+#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
+
+/* Mask out flags that are inappropriate for the given type of inode. */
+static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
+{
+	if (S_ISDIR(mode))
+		return flags;
+	else if (S_ISREG(mode))
+		return flags & EXT2_REG_FLMASK;
+	else
+		return flags & EXT2_OTHER_FLMASK;
+}
+
 /*
  * ioctl commands
  */
diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h
index dc541f3..1cdb663 100644
--- a/include/linux/ext2_fs_sb.h
+++ b/include/linux/ext2_fs_sb.h
@@ -101,7 +101,7 @@
 	struct percpu_counter s_freeblocks_counter;
 	struct percpu_counter s_freeinodes_counter;
 	struct percpu_counter s_dirs_counter;
-	struct blockgroup_lock s_blockgroup_lock;
+	struct blockgroup_lock *s_blockgroup_lock;
 	/* root of the per fs reservation window tree */
 	spinlock_t s_rsv_window_lock;
 	struct rb_root s_rsv_window_root;
@@ -111,7 +111,7 @@
 static inline spinlock_t *
 sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
 {
-	return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group);
+	return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
 }
 
 #endif	/* _LINUX_EXT2_FS_SB */
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index d14f029..dd495b8 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -178,6 +178,30 @@
 #define EXT3_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
 #define EXT3_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
 
+/* Flags that should be inherited by new inodes from their parent. */
+#define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
+			   EXT3_SYNC_FL | EXT3_IMMUTABLE_FL | EXT3_APPEND_FL |\
+			   EXT3_NODUMP_FL | EXT3_NOATIME_FL | EXT3_COMPRBLK_FL|\
+			   EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\
+			   EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL)
+
+/* Flags that are appropriate for regular files (all but dir-specific ones). */
+#define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL))
+
+/* Flags that are appropriate for non-directories/regular files. */
+#define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL)
+
+/* Mask out flags that are inappropriate for the given type of inode. */
+static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
+{
+	if (S_ISDIR(mode))
+		return flags;
+	else if (S_ISREG(mode))
+		return flags & EXT3_REG_FLMASK;
+	else
+		return flags & EXT3_OTHER_FLMASK;
+}
+
 /*
  * Inode dynamic state flags
  */
@@ -354,6 +378,13 @@
 #define	EXT3_ORPHAN_FS			0x0004	/* Orphans being recovered */
 
 /*
+ * Misc. filesystem flags
+ */
+#define EXT2_FLAGS_SIGNED_HASH		0x0001  /* Signed dirhash in use */
+#define EXT2_FLAGS_UNSIGNED_HASH	0x0002  /* Unsigned dirhash in use */
+#define EXT2_FLAGS_TEST_FILESYS		0x0004	/* to test development code */
+
+/*
  * Mount flags
  */
 #define EXT3_MOUNT_CHECK		0x00001	/* Do mount-time checks */
@@ -489,7 +520,23 @@
 	__u16	s_reserved_word_pad;
 	__le32	s_default_mount_opts;
 	__le32	s_first_meta_bg;	/* First metablock block group */
-	__u32	s_reserved[190];	/* Padding to the end of the block */
+	__le32	s_mkfs_time;		/* When the filesystem was created */
+	__le32	s_jnl_blocks[17];	/* Backup of the journal inode */
+	/* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */
+/*150*/	__le32	s_blocks_count_hi;	/* Blocks count */
+	__le32	s_r_blocks_count_hi;	/* Reserved blocks count */
+	__le32	s_free_blocks_count_hi;	/* Free blocks count */
+	__le16	s_min_extra_isize;	/* All inodes have at least # bytes */
+	__le16	s_want_extra_isize; 	/* New inodes should reserve # bytes */
+	__le32	s_flags;		/* Miscellaneous flags */
+	__le16  s_raid_stride;		/* RAID stride */
+	__le16  s_mmp_interval;         /* # seconds to wait in MMP checking */
+	__le64  s_mmp_block;            /* Block for multi-mount protection */
+	__le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
+	__u8	s_log_groups_per_flex;  /* FLEX_BG group size */
+	__u8	s_reserved_char_pad2;
+	__le16  s_reserved_pad;
+	__u32   s_reserved[162];        /* Padding to the end of the block */
 };
 
 #ifdef __KERNEL__
@@ -694,6 +741,9 @@
 #define DX_HASH_LEGACY		0
 #define DX_HASH_HALF_MD4	1
 #define DX_HASH_TEA		2
+#define DX_HASH_LEGACY_UNSIGNED	3
+#define DX_HASH_HALF_MD4_UNSIGNED	4
+#define DX_HASH_TEA_UNSIGNED		5
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h
index e024e38..f07f34d 100644
--- a/include/linux/ext3_fs_sb.h
+++ b/include/linux/ext3_fs_sb.h
@@ -57,10 +57,11 @@
 	u32 s_next_generation;
 	u32 s_hash_seed[4];
 	int s_def_hash_version;
+	int s_hash_unsigned;	/* 3 if hash should be signed, 0 if not */
 	struct percpu_counter s_freeblocks_counter;
 	struct percpu_counter s_freeinodes_counter;
 	struct percpu_counter s_dirs_counter;
-	struct blockgroup_lock s_blockgroup_lock;
+	struct blockgroup_lock *s_blockgroup_lock;
 
 	/* root of the per fs reservation window tree */
 	spinlock_t s_rsv_window_lock;
@@ -86,7 +87,7 @@
 static inline spinlock_t *
 sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
 {
-	return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group);
+	return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
 }
 
 #endif	/* _LINUX_EXT3_FS_SB */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 1ee63df..818fe21 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -1,7 +1,7 @@
 #ifndef _LINUX_FB_H
 #define _LINUX_FB_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/i2c.h>
 
 struct dentry;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index d7eba77..6022f44 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -234,6 +234,8 @@
 #define BMAP_IOCTL 1		/* obsolete - kept for compatibility */
 #define FIBMAP	   _IO(0x00,1)	/* bmap access */
 #define FIGETBSZ   _IO(0x00,2)	/* get the block size used for bmap */
+#define FIFREEZE	_IOWR('X', 119, int)	/* Freeze */
+#define FITHAW		_IOWR('X', 120, int)	/* Thaw */
 
 #define	FS_IOC_GETFLAGS			_IOR('f', 1, long)
 #define	FS_IOC_SETFLAGS			_IOW('f', 2, long)
@@ -565,6 +567,7 @@
 struct block_device {
 	dev_t			bd_dev;  /* not a kdev_t - it's a search key */
 	struct inode *		bd_inode;	/* will die */
+	struct super_block *	bd_super;
 	int			bd_openers;
 	struct mutex		bd_mutex;	/* open/close mutex */
 	struct semaphore	bd_mount_sem;
@@ -590,6 +593,11 @@
 	 * care to not mess up bd_private for that case.
 	 */
 	unsigned long		bd_private;
+
+	/* The counter of freeze processes */
+	int			bd_fsfreeze_count;
+	/* Mutex for freeze */
+	struct mutex		bd_fsfreeze_mutex;
 };
 
 /*
@@ -1184,6 +1192,11 @@
 	 * generic_show_options()
 	 */
 	char *s_options;
+
+	/*
+	 * storage for asynchronous operations
+	 */
+	struct list_head s_async_list;
 };
 
 extern struct timespec current_fs_time(struct super_block *sb);
@@ -1371,8 +1384,8 @@
 	void (*put_super) (struct super_block *);
 	void (*write_super) (struct super_block *);
 	int (*sync_fs)(struct super_block *sb, int wait);
-	void (*write_super_lockfs) (struct super_block *);
-	void (*unlockfs) (struct super_block *);
+	int (*freeze_fs) (struct super_block *);
+	int (*unfreeze_fs) (struct super_block *);
 	int (*statfs) (struct dentry *, struct kstatfs *);
 	int (*remount_fs) (struct super_block *, int *, char *);
 	void (*clear_inode) (struct inode *);
@@ -1384,6 +1397,7 @@
 	ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
 	ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
 #endif
+	int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
 };
 
 /*
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 33a5992..20873d4 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -393,11 +393,7 @@
 #define I2C_CLASS_TV_ANALOG	(1<<1)	/* bttv + friends */
 #define I2C_CLASS_TV_DIGITAL	(1<<2)	/* dvb cards */
 #define I2C_CLASS_DDC		(1<<3)	/* DDC bus on graphics adapters */
-#define I2C_CLASS_CAM_ANALOG	(1<<4)	/* camera with analog CCD */
-#define I2C_CLASS_CAM_DIGITAL	(1<<5)	/* most webcams */
-#define I2C_CLASS_SOUND		(1<<6)	/* sound devices */
 #define I2C_CLASS_SPD		(1<<7)	/* SPD EEPROMs and similar */
-#define I2C_CLASS_ALL		(UINT_MAX) /* all of the above */
 
 /* i2c_client_address_data is the struct for holding default client
  * addresses for a driver and for the parameters supplied on the
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 3644f63..194da5a 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -871,7 +871,7 @@
 	ide_hwif_t	*cur_port;	/* for hosts requiring serialization */
 
 	/* used for hosts requiring serialization */
-	volatile long	host_busy;
+	volatile unsigned long	host_busy;
 };
 
 #define IDE_HOST_BUSY 0
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index a7d6a22..c7a6688 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -15,7 +15,7 @@
 #ifndef __LINUX_IF_PPPOL2TP_H
 #define __LINUX_IF_PPPOL2TP_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 #ifdef __KERNEL__
 #include <linux/in.h>
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 6fb7f17..30c88b2 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -17,7 +17,7 @@
 #define __LINUX_IF_PPPOX_H
 
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <asm/byteorder.h>
 
 #ifdef  __KERNEL__
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a5cb0c3..f8ff918 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -115,6 +115,11 @@
 extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
 			     u16 vlan_tci, int polling);
 extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
+extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
+			    unsigned int vlan_tci, struct sk_buff *skb);
+extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
+			  unsigned int vlan_tci,
+			  struct napi_gro_fraginfo *info);
 
 #else
 static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -140,6 +145,20 @@
 {
 	return 0;
 }
+
+static inline int vlan_gro_receive(struct napi_struct *napi,
+				   struct vlan_group *grp,
+				   unsigned int vlan_tci, struct sk_buff *skb)
+{
+	return NET_RX_DROP;
+}
+
+static inline int vlan_gro_frags(struct napi_struct *napi,
+				 struct vlan_group *grp, unsigned int vlan_tci,
+				 struct napi_gro_fraginfo *info)
+{
+	return NET_RX_DROP;
+}
 #endif
 
 /**
diff --git a/include/linux/input.h b/include/linux/input.h
index 9a6355f..1249a0c 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -16,7 +16,7 @@
 #include <sys/time.h>
 #include <sys/ioctl.h>
 #include <sys/types.h>
-#include <asm/types.h>
+#include <linux/types.h>
 #endif
 
 /*
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 041e95a..32e4b2f 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -49,6 +49,7 @@
 #define IORESOURCE_SIZEALIGN	0x00020000	/* size indicates alignment */
 #define IORESOURCE_STARTALIGN	0x00040000	/* start field is alignment */
 
+#define IORESOURCE_EXCLUSIVE	0x08000000	/* Userland may not map this resource */
 #define IORESOURCE_DISABLED	0x10000000
 #define IORESOURCE_UNSET	0x20000000
 #define IORESOURCE_AUTO		0x40000000
@@ -133,13 +134,17 @@
 }
 
 /* Convenience shorthand with allocation */
-#define request_region(start,n,name)	__request_region(&ioport_resource, (start), (n), (name))
-#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
+#define request_region(start,n,name)	__request_region(&ioport_resource, (start), (n), (name), 0)
+#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
+#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
+#define request_mem_region_exclusive(start,n,name) \
+	__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
 #define rename_region(region, newname) do { (region)->name = (newname); } while (0)
 
 extern struct resource * __request_region(struct resource *,
 					resource_size_t start,
-					resource_size_t n, const char *name);
+					resource_size_t n,
+					const char *name, int flags);
 
 /* Compatibility cruft */
 #define release_region(start,n)	__release_region(&ioport_resource, (start), (n))
@@ -175,6 +180,7 @@
 extern void __devm_release_region(struct device *dev, struct resource *parent,
 				  resource_size_t start, resource_size_t n);
 extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
+extern int iomem_is_exclusive(u64 addr);
 
 #endif /* __ASSEMBLY__ */
 #endif	/* _LINUX_IOPORT_H */
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index f98a656..76dad48 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -86,4 +86,6 @@
  */
 extern int ioprio_best(unsigned short aprio, unsigned short bprio);
 
+extern int set_task_ioprio(struct task_struct *task, int ioprio);
+
 #endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 346e2b8..64246dc 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -543,6 +543,11 @@
 	unsigned long		t_expires;
 
 	/*
+	 * When this transaction started, in nanoseconds [no locking]
+	 */
+	ktime_t			t_start_time;
+
+	/*
 	 * How many handles used this transaction? [t_handle_lock]
 	 */
 	int t_handle_count;
@@ -609,6 +614,8 @@
  * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
  *	number that will fit in j_blocksize
  * @j_last_sync_writer: most recent pid which did a synchronous write
+ * @j_average_commit_time: the average amount of time in nanoseconds it
+ *	takes to commit a transaction to the disk.
  * @j_private: An opaque pointer to fs-private information.
  */
 
@@ -798,9 +805,19 @@
 	struct buffer_head	**j_wbuf;
 	int			j_wbufsize;
 
+	/*
+	 * this is the pid of the last person to run a synchronous operation
+	 * through the journal.
+	 */
 	pid_t			j_last_sync_writer;
 
 	/*
+	 * the average amount of time in nanoseconds it takes to commit a
+	 * transaction to the disk.  [j_state_lock]
+	 */
+	u64			j_average_commit_time;
+
+	/*
 	 * An opaque pointer to fs-private information.  ext3 puts its
 	 * superblock pointer here
 	 */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 3445647..b45109c 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -638,6 +638,11 @@
 	unsigned long		t_expires;
 
 	/*
+	 * When this transaction started, in nanoseconds [no locking]
+	 */
+	ktime_t			t_start_time;
+
+	/*
 	 * How many handles used this transaction? [t_handle_lock]
 	 */
 	int t_handle_count;
@@ -682,6 +687,8 @@
 	return end + (MAX_JIFFY_OFFSET - start);
 }
 
+#define JBD2_NR_BATCH	64
+
 /**
  * struct journal_s - The journal_s type is the concrete type associated with
  *     journal_t.
@@ -826,6 +833,14 @@
 	struct mutex		j_checkpoint_mutex;
 
 	/*
+	 * List of buffer heads used by the checkpoint routine.  This
+	 * was moved from jbd2_log_do_checkpoint() to reduce stack
+	 * usage.  Access to this array is controlled by the
+	 * j_checkpoint_mutex.  [j_checkpoint_mutex]
+	 */
+	struct buffer_head	*j_chkpt_bhs[JBD2_NR_BATCH];
+	
+	/*
 	 * Journal head: identifies the first unused block in the journal.
 	 * [j_state_lock]
 	 */
@@ -939,8 +954,26 @@
 	struct buffer_head	**j_wbuf;
 	int			j_wbufsize;
 
+	/*
+	 * this is the pid of hte last person to run a synchronous operation
+	 * through the journal
+	 */
 	pid_t			j_last_sync_writer;
 
+	/*
+	 * the average amount of time in nanoseconds it takes to commit a
+	 * transaction to disk. [j_state_lock]
+	 */
+	u64			j_average_commit_time;
+
+	/*
+	 * minimum and maximum times that we should wait for
+	 * additional filesystem operations to get batched into a
+	 * synchronous handle in microseconds
+	 */
+	u32			j_min_batch_time;
+	u32			j_max_batch_time;
+
 	/* This function is called when a transaction is closed */
 	void			(*j_commit_callback)(journal_t *,
 						     transaction_t *);
@@ -1102,7 +1135,6 @@
 		   (journal_t *, unsigned long, unsigned long, unsigned long);
 extern void	   jbd2_journal_clear_features
 		   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int	   jbd2_journal_create     (journal_t *);
 extern int	   jbd2_journal_load       (journal_t *journal);
 extern int	   jbd2_journal_destroy    (journal_t *);
 extern int	   jbd2_journal_recover    (journal_t *journal);
@@ -1177,8 +1209,8 @@
 int jbd2_log_do_checkpoint(journal_t *journal);
 
 void __jbd2_log_wait_for_space(journal_t *journal);
-extern void	__jbd2_journal_drop_transaction(journal_t *, transaction_t *);
-extern int	jbd2_cleanup_journal_tail(journal_t *);
+extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
+extern int jbd2_cleanup_journal_tail(journal_t *);
 
 /* Debugging code only: */
 
diff --git a/include/linux/joystick.h b/include/linux/joystick.h
index b5e0512..9e20c29 100644
--- a/include/linux/joystick.h
+++ b/include/linux/joystick.h
@@ -27,7 +27,7 @@
  * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic
  */
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/input.h>
 
 /*
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 6b8e202..343df9e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -476,6 +476,12 @@
 	__val = __val < __min ? __min: __val;	\
 	__val > __max ? __max: __val; })
 
+
+/*
+ * swap - swap value of @a and @b
+ */
+#define swap(a, b) ({ typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; })
+
 /**
  * container_of - cast a member of a structure out to the containing structure
  * @ptr:	the pointer to the member.
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 35525ac..5715f19 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -7,7 +7,7 @@
  * Note: you must update KVM_API_VERSION if you change this interface.
  */
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/ioctl.h>
 #include <asm/kvm.h>
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index 81b4207..96eea90 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -15,6 +15,7 @@
 #define __LINUX_PCA9532_H
 
 #include <linux/leds.h>
+#include <linux/workqueue.h>
 
 enum pca9532_state {
 	PCA9532_OFF  = 0x0,
@@ -31,6 +32,7 @@
 	struct i2c_client *client;
 	char *name;
 	struct led_classdev ldev;
+       struct work_struct work;
 	enum pca9532_type type;
 	enum pca9532_state state;
 };
diff --git a/include/linux/leds.h b/include/linux/leds.h
index d3a73f5..24489da 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -32,7 +32,10 @@
 	int			 brightness;
 	int			 flags;
 
+	/* Lower 16 bits reflect status */
 #define LED_SUSPENDED		(1 << 0)
+	/* Upper 16 bits reflect control information */
+#define LED_CORE_SUSPENDRESUME	(1 << 16)
 
 	/* Set LED brightness level */
 	/* Must not sleep, use a workqueue if needed */
@@ -62,7 +65,7 @@
 
 extern int led_classdev_register(struct device *parent,
 				 struct led_classdev *led_cdev);
-extern void led_classdev_unregister(struct led_classdev *lcd);
+extern void led_classdev_unregister(struct led_classdev *led_cdev);
 extern void led_classdev_suspend(struct led_classdev *led_cdev);
 extern void led_classdev_resume(struct led_classdev *led_cdev);
 
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 3449de5..2c6bd66 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -239,6 +239,7 @@
 	/* host set flags */
 	ATA_HOST_SIMPLEX	= (1 << 0),	/* Host is simplex, one DMA channel per host only */
 	ATA_HOST_STARTED	= (1 << 1),	/* Host started */
+	ATA_HOST_PARALLEL_SCAN	= (1 << 2),	/* Ports on this host can be scanned in parallel */
 
 	/* bits 24:31 of host->flags are reserved for LLD specific flags */
 
@@ -400,12 +401,14 @@
 				  ATA_TIMING_CYC8B,
 	ATA_TIMING_ACTIVE	= (1 << 4),
 	ATA_TIMING_RECOVER	= (1 << 5),
-	ATA_TIMING_CYCLE	= (1 << 6),
-	ATA_TIMING_UDMA		= (1 << 7),
+	ATA_TIMING_DMACK_HOLD	= (1 << 6),
+	ATA_TIMING_CYCLE	= (1 << 7),
+	ATA_TIMING_UDMA		= (1 << 8),
 	ATA_TIMING_ALL		= ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
 				  ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
 				  ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
-				  ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
+				  ATA_TIMING_DMACK_HOLD | ATA_TIMING_CYCLE |
+				  ATA_TIMING_UDMA,
 };
 
 enum ata_xfer_mask {
@@ -865,6 +868,7 @@
 	unsigned short cyc8b;		/* t0 for 8-bit I/O */
 	unsigned short active;		/* t2 or tD */
 	unsigned short recover;		/* t2i or tK */
+	unsigned short dmack_hold;	/* tj */
 	unsigned short cycle;		/* t0 */
 	unsigned short udma;		/* t2CYCTYP/2 */
 };
@@ -926,6 +930,8 @@
 extern int ata_scsi_detect(struct scsi_host_template *sht);
 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
+extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
+			    int cmd, void __user *arg);
 extern void ata_sas_port_destroy(struct ata_port *);
 extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
 					   struct ata_port_info *, struct Scsi_Host *);
@@ -1518,6 +1524,7 @@
 
 extern const struct ata_port_operations ata_sff_port_ops;
 extern const struct ata_port_operations ata_bmdma_port_ops;
+extern const struct ata_port_operations ata_bmdma32_port_ops;
 
 /* PIO only, sg_tablesize and dma_boundary limits can be removed */
 #define ATA_PIO_SHT(drv_name)					\
@@ -1545,6 +1552,8 @@
 				 const struct ata_taskfile *tf);
 extern unsigned int ata_sff_data_xfer(struct ata_device *dev,
 			unsigned char *buf, unsigned int buflen, int rw);
+extern unsigned int ata_sff_data_xfer32(struct ata_device *dev,
+			unsigned char *buf, unsigned int buflen, int rw);
 extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev,
 			unsigned char *buf, unsigned int buflen, int rw);
 extern u8 ata_sff_irq_on(struct ata_port *ap);
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 23da3fa..aa6fe70 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -43,8 +43,8 @@
 	struct sockaddr_storage	h_addr;		/* peer address */
 	size_t			h_addrlen;
 	struct sockaddr_storage	h_srcaddr;	/* our address (optional) */
-	struct rpc_clnt	*	h_rpcclnt;	/* RPC client to talk to peer */
-	char *			h_name;		/* remote hostname */
+	struct rpc_clnt		*h_rpcclnt;	/* RPC client to talk to peer */
+	char			*h_name;		/* remote hostname */
 	u32			h_version;	/* interface version */
 	unsigned short		h_proto;	/* transport proto */
 	unsigned short		h_reclaiming : 1,
@@ -64,21 +64,29 @@
 	spinlock_t		h_lock;
 	struct list_head	h_granted;	/* Locks in GRANTED state */
 	struct list_head	h_reclaim;	/* Locks in RECLAIM state */
-	struct nsm_handle *	h_nsmhandle;	/* NSM status handle */
-
-	char			h_addrbuf[48],	/* address eyecatchers */
-				h_srcaddrbuf[48];
+	struct nsm_handle	*h_nsmhandle;	/* NSM status handle */
+	char			*h_addrbuf;	/* address eyecatcher */
 };
 
+/*
+ * The largest string sm_addrbuf should hold is a full-size IPv6 address
+ * (no "::" anywhere) with a scope ID.  The buffer size is computed to
+ * hold eight groups of colon-separated four-hex-digit numbers, a
+ * percent sign, a scope id (at most 32 bits, in decimal), and NUL.
+ */
+#define NSM_ADDRBUF		((8 * 4 + 7) + (1 + 10) + 1)
+
 struct nsm_handle {
 	struct list_head	sm_link;
 	atomic_t		sm_count;
-	char *			sm_name;
+	char			*sm_mon_name;
+	char			*sm_name;
 	struct sockaddr_storage	sm_addr;
 	size_t			sm_addrlen;
 	unsigned int		sm_monitored : 1,
 				sm_sticky : 1;	/* don't unmonitor */
-	char			sm_addrbuf[48];	/* address eyecatcher */
+	struct nsm_private	sm_priv;
+	char			sm_addrbuf[NSM_ADDRBUF];
 };
 
 /*
@@ -104,16 +112,6 @@
 	return (struct sockaddr *)&host->h_srcaddr;
 }
 
-static inline struct sockaddr_in *nsm_addr_in(const struct nsm_handle *handle)
-{
-	return (struct sockaddr_in *)&handle->sm_addr;
-}
-
-static inline struct sockaddr *nsm_addr(const struct nsm_handle *handle)
-{
-	return (struct sockaddr *)&handle->sm_addr;
-}
-
 /*
  * Map an fl_owner_t into a unique 32-bit "pid"
  */
@@ -197,6 +195,7 @@
 extern int			nlmsvc_grace_period;
 extern unsigned long		nlmsvc_timeout;
 extern int			nsm_use_hostnames;
+extern int			nsm_local_state;
 
 /*
  * Lockd client functions
@@ -231,10 +230,20 @@
 struct nlm_host * nlm_get_host(struct nlm_host *);
 void		  nlm_release_host(struct nlm_host *);
 void		  nlm_shutdown_hosts(void);
-extern void	  nlm_host_rebooted(const struct sockaddr_in *, const char *,
-					unsigned int, u32);
-void		  nsm_release(struct nsm_handle *);
+void		  nlm_host_rebooted(const struct nlm_reboot *);
 
+/*
+ * Host monitoring
+ */
+int		  nsm_monitor(const struct nlm_host *host);
+void		  nsm_unmonitor(const struct nlm_host *host);
+
+struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
+					const size_t salen,
+					const char *hostname,
+					const size_t hostname_len);
+struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info);
+void		  nsm_release(struct nsm_handle *nsm);
 
 /*
  * This is used in garbage collection and resource reclaim
@@ -282,16 +291,25 @@
 static inline int __nlm_privileged_request4(const struct sockaddr *sap)
 {
 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
-	return (sin->sin_addr.s_addr == htonl(INADDR_LOOPBACK)) &&
-			(ntohs(sin->sin_port) < 1024);
+
+	if (ntohs(sin->sin_port) > 1023)
+		return 0;
+
+	return ipv4_is_loopback(sin->sin_addr.s_addr);
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 {
 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
-	return (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK) &&
-			(ntohs(sin6->sin6_port) < 1024);
+
+	if (ntohs(sin6->sin6_port) > 1023)
+		return 0;
+
+	if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED)
+		return ipv4_is_loopback(sin6->sin6_addr.s6_addr32[3]);
+
+	return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK;
 }
 #else	/* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
 static inline int __nlm_privileged_request6(const struct sockaddr *sap)
diff --git a/include/linux/lockd/sm_inter.h b/include/linux/lockd/sm_inter.h
deleted file mode 100644
index 5a5448b..0000000
--- a/include/linux/lockd/sm_inter.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * linux/include/linux/lockd/sm_inter.h
- *
- * Declarations for the kernel statd client.
- *
- * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
- */
-
-#ifndef LINUX_LOCKD_SM_INTER_H
-#define LINUX_LOCKD_SM_INTER_H
-
-#define SM_PROGRAM	100024
-#define SM_VERSION	1
-#define SM_STAT		1
-#define SM_MON		2
-#define SM_UNMON	3
-#define SM_UNMON_ALL	4
-#define SM_SIMU_CRASH	5
-#define SM_NOTIFY	6
-
-#define SM_MAXSTRLEN	1024
-#define SM_PRIV_SIZE	16
-
-/*
- * Arguments for all calls to statd
- */
-struct nsm_args {
-	__be32		addr;		/* remote address */
-	u32		prog;		/* RPC callback info */
-	u32		vers;
-	u32		proc;
-
-	char *		mon_name;
-};
-
-/*
- * Result returned by statd
- */
-struct nsm_res {
-	u32		status;
-	u32		state;
-};
-
-int		nsm_monitor(struct nlm_host *);
-int		nsm_unmonitor(struct nlm_host *);
-extern int	nsm_local_state;
-
-#endif /* LINUX_LOCKD_SM_INTER_H */
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index d6b3a80..7dc5b6c 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -13,6 +13,13 @@
 #include <linux/nfs.h>
 #include <linux/sunrpc/xdr.h>
 
+#define SM_MAXSTRLEN		1024
+#define SM_PRIV_SIZE		16
+
+struct nsm_private {
+	unsigned char		data[SM_PRIV_SIZE];
+};
+
 struct svc_rqst;
 
 #define NLM_MAXCOOKIELEN    	32
@@ -77,10 +84,10 @@
  * statd callback when client has rebooted
  */
 struct nlm_reboot {
-	char *		mon;
-	unsigned int	len;
-	u32		state;
-	__be32		addr;
+	char			*mon;
+	unsigned int		len;
+	u32			state;
+	struct nsm_private	priv;
 };
 
 /*
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 46169a7..6ffd6db 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -80,7 +80,7 @@
 };
 
 #include <asm/posix_types.h>	/* for __kernel_old_dev_t */
-#include <asm/types.h>		/* for __u64 */
+#include <linux/types.h>	/* for __u64 */
 
 /* Backwards compatibility version */
 struct loop_info {
diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h
index e794dfb..97ffdc1 100644
--- a/include/linux/mISDNhw.h
+++ b/include/linux/mISDNhw.h
@@ -57,20 +57,21 @@
 #define FLG_L2DATA		14	/* channel use L2 DATA primitivs */
 #define FLG_ORIGIN		15	/* channel is on origin site */
 /* channel specific stuff */
+#define FLG_FILLEMPTY		16	/* fill fifo on first frame (empty) */
 /* arcofi specific */
-#define FLG_ARCOFI_TIMER	16
-#define FLG_ARCOFI_ERROR	17
+#define FLG_ARCOFI_TIMER	17
+#define FLG_ARCOFI_ERROR	18
 /* isar specific */
-#define FLG_INITIALIZED		16
-#define FLG_DLEETX		17
-#define FLG_LASTDLE		18
-#define FLG_FIRST		19
-#define FLG_LASTDATA		20
-#define FLG_NMD_DATA		21
-#define FLG_FTI_RUN		22
-#define FLG_LL_OK		23
-#define FLG_LL_CONN		24
-#define FLG_DTMFSEND		25
+#define FLG_INITIALIZED		17
+#define FLG_DLEETX		18
+#define FLG_LASTDLE		19
+#define FLG_FIRST		20
+#define FLG_LASTDATA		21
+#define FLG_NMD_DATA		22
+#define FLG_FTI_RUN		23
+#define FLG_LL_OK		24
+#define FLG_LL_CONN		25
+#define FLG_DTMFSEND		26
 
 /* workq events */
 #define FLG_RECVQUEUE		30
@@ -183,6 +184,7 @@
 extern int	dchannel_senddata(struct dchannel *, struct sk_buff *);
 extern int	bchannel_senddata(struct bchannel *, struct sk_buff *);
 extern void	recv_Dchannel(struct dchannel *);
+extern void	recv_Echannel(struct dchannel *, struct dchannel *);
 extern void	recv_Bchannel(struct bchannel *);
 extern void	recv_Dchannel_skb(struct dchannel *, struct sk_buff *);
 extern void	recv_Bchannel_skb(struct bchannel *, struct sk_buff *);
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index 8f2d60d..5da3d95 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -36,8 +36,8 @@
  *              - should be incremented on every checkin
  */
 #define	MISDN_MAJOR_VERSION	1
-#define	MISDN_MINOR_VERSION	0
-#define MISDN_RELEASE		19
+#define	MISDN_MINOR_VERSION	1
+#define MISDN_RELEASE		20
 
 /* primitives for information exchange
  * generell format
@@ -80,6 +80,7 @@
 #define PH_DEACTIVATE_IND	0x0202
 #define PH_DEACTIVATE_CNF	0x4202
 #define PH_DATA_IND		0x2002
+#define PH_DATA_E_IND		0x3002
 #define MPH_ACTIVATE_IND	0x0502
 #define MPH_DEACTIVATE_IND	0x0602
 #define MPH_INFORMATION_IND	0x0702
@@ -199,6 +200,18 @@
 #define ISDN_P_NT_S0  		0x02
 #define ISDN_P_TE_E1		0x03
 #define ISDN_P_NT_E1  		0x04
+#define ISDN_P_TE_UP0		0x05
+#define ISDN_P_NT_UP0		0x06
+
+#define IS_ISDN_P_TE(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_TE_E1) || \
+				(p == ISDN_P_TE_UP0) || (p == ISDN_P_LAPD_TE))
+#define IS_ISDN_P_NT(p) ((p == ISDN_P_NT_S0) || (p == ISDN_P_NT_E1) || \
+				(p == ISDN_P_NT_UP0) || (p == ISDN_P_LAPD_NT))
+#define IS_ISDN_P_S0(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_NT_S0))
+#define IS_ISDN_P_E1(p) ((p == ISDN_P_TE_E1) || (p == ISDN_P_NT_E1))
+#define IS_ISDN_P_UP0(p) ((p == ISDN_P_TE_UP0) || (p == ISDN_P_NT_UP0))
+
+
 #define ISDN_P_LAPD_TE		0x10
 #define	ISDN_P_LAPD_NT		0x11
 
@@ -255,16 +268,6 @@
 	unsigned char	tei;
 };
 
-/* timer device ioctl */
-#define IMADDTIMER	_IOR('I', 64, int)
-#define IMDELTIMER	_IOR('I', 65, int)
-/* socket ioctls */
-#define	IMGETVERSION	_IOR('I', 66, int)
-#define	IMGETCOUNT	_IOR('I', 67, int)
-#define IMGETDEVINFO	_IOR('I', 68, int)
-#define IMCTRLREQ	_IOR('I', 69, int)
-#define IMCLEAR_L2	_IOR('I', 70, int)
-
 struct mISDNversion {
 	unsigned char	major;
 	unsigned char	minor;
@@ -281,6 +284,40 @@
 	char			name[MISDN_MAX_IDLEN];
 };
 
+struct mISDN_devrename {
+	u_int			id;
+	char			name[MISDN_MAX_IDLEN]; /* new name */
+};
+
+/* MPH_INFORMATION_REQ payload */
+struct ph_info_ch {
+        __u32 protocol;
+        __u64 Flags;
+};
+
+struct ph_info_dch {
+        struct ph_info_ch ch;
+        __u16 state;
+        __u16 num_bch;
+};
+
+struct ph_info {
+        struct ph_info_dch dch;
+        struct ph_info_ch  bch[];
+};
+
+/* timer device ioctl */
+#define IMADDTIMER	_IOR('I', 64, int)
+#define IMDELTIMER	_IOR('I', 65, int)
+
+/* socket ioctls */
+#define	IMGETVERSION	_IOR('I', 66, int)
+#define	IMGETCOUNT	_IOR('I', 67, int)
+#define IMGETDEVINFO	_IOR('I', 68, int)
+#define IMCTRLREQ	_IOR('I', 69, int)
+#define IMCLEAR_L2	_IOR('I', 70, int)
+#define IMSETDEVNAME	_IOR('I', 71, struct mISDN_devrename)
+
 static inline int
 test_channelmap(u_int nr, u_char *map)
 {
@@ -312,6 +349,8 @@
 #define MISDN_CTRL_SETPEER		0x0040
 #define MISDN_CTRL_UNSETPEER		0x0080
 #define MISDN_CTRL_RX_OFF		0x0100
+#define MISDN_CTRL_FILL_EMPTY		0x0200
+#define MISDN_CTRL_GETPEER		0x0400
 #define MISDN_CTRL_HW_FEATURES_OP	0x2000
 #define MISDN_CTRL_HW_FEATURES		0x2001
 #define MISDN_CTRL_HFC_OP		0x4000
@@ -362,6 +401,7 @@
 #define DEBUG_L2_TEI		0x00100000
 #define DEBUG_L2_TEIFSM		0x00200000
 #define DEBUG_TIMER		0x01000000
+#define DEBUG_CLOCK		0x02000000
 
 #define mISDN_HEAD_P(s)		((struct mISDNhead *)&s->cb[0])
 #define mISDN_HEAD_PRIM(s)	(((struct mISDNhead *)&s->cb[0])->prim)
@@ -375,6 +415,7 @@
 struct mISDNchannel;
 struct mISDNdevice;
 struct mISDNstack;
+struct mISDNclock;
 
 struct channel_req {
 	u_int			protocol;
@@ -423,7 +464,6 @@
 struct mISDNdevice {
 	struct mISDNchannel	D;
 	u_int			id;
-	char			name[MISDN_MAX_IDLEN];
 	u_int			Dprotocols;
 	u_int			Bprotocols;
 	u_int			nrbchan;
@@ -452,6 +492,16 @@
 #endif
 };
 
+typedef	int	(clockctl_func_t)(void *, int);
+
+struct	mISDNclock {
+	struct list_head	list;
+	char			name[64];
+	int			pri;
+	clockctl_func_t		*ctl;
+	void			*priv;
+};
+
 /* global alloc/queue functions */
 
 static inline struct sk_buff *
@@ -498,12 +548,26 @@
 
 /* global register/unregister functions */
 
-extern int	mISDN_register_device(struct mISDNdevice *, char *name);
+extern int	mISDN_register_device(struct mISDNdevice *,
+					struct device *parent, char *name);
 extern void	mISDN_unregister_device(struct mISDNdevice *);
 extern int	mISDN_register_Bprotocol(struct Bprotocol *);
 extern void	mISDN_unregister_Bprotocol(struct Bprotocol *);
+extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *,
+						void *);
+extern void	mISDN_unregister_clock(struct mISDNclock *);
+
+static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
+{
+	if (dev)
+		return dev_get_drvdata(dev);
+	else
+		return NULL;
+}
 
 extern void	set_channel_address(struct mISDNchannel *, u_int, u_int);
+extern void	mISDN_clock_update(struct mISDNclock *, int, struct timeval *);
+extern unsigned short mISDN_clock_get(void);
 
 #endif /* __KERNEL__ */
 #endif /* mISDNIF_H */
diff --git a/include/linux/magic.h b/include/linux/magic.h
index f7f3fdd..0b4df7e 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -10,10 +10,13 @@
 #define SYSFS_MAGIC		0x62656572
 #define SECURITYFS_MAGIC	0x73636673
 #define TMPFS_MAGIC		0x01021994
+#define SQUASHFS_MAGIC		0x73717368
 #define EFS_SUPER_MAGIC		0x414A53
 #define EXT2_SUPER_MAGIC	0xEF53
 #define EXT3_SUPER_MAGIC	0xEF53
+#define XENFS_SUPER_MAGIC	0xabba1974
 #define EXT4_SUPER_MAGIC	0xEF53
+#define BTRFS_SUPER_MAGIC	0x9123683E
 #define HPFS_SUPER_MAGIC	0xf995e849
 #define ISOFS_SUPER_MAGIC	0x9660
 #define JFFS2_SUPER_MAGIC	0x72b6
diff --git a/include/linux/matroxfb.h b/include/linux/matroxfb.h
index ae5b094..404f678 100644
--- a/include/linux/matroxfb.h
+++ b/include/linux/matroxfb.h
@@ -2,7 +2,7 @@
 #define __LINUX_MATROXFB_H__
 
 #include <asm/ioctl.h>
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/videodev2.h>
 
 struct matroxioc_output_mode {
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1fbe14d..326f45c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -19,22 +19,45 @@
 
 #ifndef _LINUX_MEMCONTROL_H
 #define _LINUX_MEMCONTROL_H
-
+#include <linux/cgroup.h>
 struct mem_cgroup;
 struct page_cgroup;
 struct page;
 struct mm_struct;
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
+/*
+ * All "charge" functions with gfp_mask should use GFP_KERNEL or
+ * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
+ * alloc memory but reclaims memory from all available zones. So, "where I want
+ * memory from" bits of gfp_mask has no meaning. So any bits of that field is
+ * available but adding a rule is better. charge functions' gfp_mask should
+ * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
+ * codes.
+ * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
+ */
 
-extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
 				gfp_t gfp_mask);
+/* for swap handling */
+extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
+		struct page *page, gfp_t mask, struct mem_cgroup **ptr);
+extern void mem_cgroup_commit_charge_swapin(struct page *page,
+					struct mem_cgroup *ptr);
+extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
+
 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
 					gfp_t gfp_mask);
-extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru);
+extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
+extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
+extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
+extern void mem_cgroup_del_lru(struct page *page);
+extern void mem_cgroup_move_lists(struct page *page,
+				  enum lru_list from, enum lru_list to);
 extern void mem_cgroup_uncharge_page(struct page *page);
 extern void mem_cgroup_uncharge_cache_page(struct page *page);
-extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
+extern int mem_cgroup_shrink_usage(struct page *page,
+			struct mm_struct *mm, gfp_t gfp_mask);
 
 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
 					struct list_head *dst,
@@ -47,12 +70,20 @@
 
 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 
-#define mm_match_cgroup(mm, cgroup)	\
-	((cgroup) == mem_cgroup_from_task((mm)->owner))
+static inline
+int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
+{
+	struct mem_cgroup *mem;
+	rcu_read_lock();
+	mem = mem_cgroup_from_task((mm)->owner);
+	rcu_read_unlock();
+	return cgroup == mem;
+}
 
 extern int
-mem_cgroup_prepare_migration(struct page *page, struct page *newpage);
-extern void mem_cgroup_end_migration(struct page *page);
+mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr);
+extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
+	struct page *oldpage, struct page *newpage);
 
 /*
  * For memory reclaim.
@@ -65,13 +96,32 @@
 							int priority);
 extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
 							int priority);
+int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
+unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
+				       struct zone *zone,
+				       enum lru_list lru);
+struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
+						      struct zone *zone);
+struct zone_reclaim_stat*
+mem_cgroup_get_reclaim_stat_from_page(struct page *page);
 
-extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
-					int priority, enum lru_list lru);
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+extern int do_swap_account;
+#endif
 
+static inline bool mem_cgroup_disabled(void)
+{
+	if (mem_cgroup_subsys.disabled)
+		return true;
+	return false;
+}
+
+extern bool mem_cgroup_oom_called(struct task_struct *task);
 
 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
-static inline int mem_cgroup_charge(struct page *page,
+struct mem_cgroup;
+
+static inline int mem_cgroup_newpage_charge(struct page *page,
 					struct mm_struct *mm, gfp_t gfp_mask)
 {
 	return 0;
@@ -83,6 +133,21 @@
 	return 0;
 }
 
+static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
+		struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
+{
+	return 0;
+}
+
+static inline void mem_cgroup_commit_charge_swapin(struct page *page,
+					  struct mem_cgroup *ptr)
+{
+}
+
+static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
+{
+}
+
 static inline void mem_cgroup_uncharge_page(struct page *page)
 {
 }
@@ -91,12 +156,33 @@
 {
 }
 
-static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
+static inline int mem_cgroup_shrink_usage(struct page *page,
+			struct mm_struct *mm, gfp_t gfp_mask)
 {
 	return 0;
 }
 
-static inline void mem_cgroup_move_lists(struct page *page, bool active)
+static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
+{
+}
+
+static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
+{
+	return ;
+}
+
+static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
+{
+	return ;
+}
+
+static inline void mem_cgroup_del_lru(struct page *page)
+{
+	return ;
+}
+
+static inline void
+mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
 {
 }
 
@@ -112,12 +198,14 @@
 }
 
 static inline int
-mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
+mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
 {
 	return 0;
 }
 
-static inline void mem_cgroup_end_migration(struct page *page)
+static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
+					struct page *oldpage,
+					struct page *newpage)
 {
 }
 
@@ -146,12 +234,42 @@
 {
 }
 
-static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
-					struct zone *zone, int priority,
-					enum lru_list lru)
+static inline bool mem_cgroup_disabled(void)
+{
+	return true;
+}
+
+static inline bool mem_cgroup_oom_called(struct task_struct *task)
+{
+	return false;
+}
+
+static inline int
+mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
+{
+	return 1;
+}
+
+static inline unsigned long
+mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
+			 enum lru_list lru)
 {
 	return 0;
 }
+
+
+static inline struct zone_reclaim_stat*
+mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
+{
+	return NULL;
+}
+
+static inline struct zone_reclaim_stat*
+mem_cgroup_get_reclaim_stat_from_page(struct page *page)
+{
+	return NULL;
+}
+
 #endif /* CONFIG_CGROUP_MEM_CONT */
 
 #endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index d0c37e6..690c35a 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -100,8 +100,8 @@
 #define MEMSTICK_SYS_PAR8   0x40
 #define MEMSTICK_SYS_SERIAL 0x80
 
-	unsigned short data_count;
-	unsigned int   data_address;
+	__be16 data_count;
+	__be32 data_address;
 	unsigned char  tpc_param;
 } __attribute__((packed));
 
diff --git a/include/linux/mfd/pcf50633/adc.h b/include/linux/mfd/pcf50633/adc.h
new file mode 100644
index 0000000..56669b4
--- /dev/null
+++ b/include/linux/mfd/pcf50633/adc.h
@@ -0,0 +1,72 @@
+/*
+ * adc.h  -- Driver for NXP PCF50633 ADC
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_PCF50633_ADC_H
+#define __LINUX_MFD_PCF50633_ADC_H
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/platform_device.h>
+
+/* ADC Registers */
+#define PCF50633_REG_ADCC3		0x52
+#define PCF50633_REG_ADCC2		0x53
+#define PCF50633_REG_ADCC1		0x54
+#define PCF50633_REG_ADCS1		0x55
+#define PCF50633_REG_ADCS2		0x56
+#define PCF50633_REG_ADCS3		0x57
+
+#define PCF50633_ADCC1_ADCSTART		0x01
+#define PCF50633_ADCC1_RES_10BIT	0x02
+#define PCF50633_ADCC1_AVERAGE_NO	0x00
+#define PCF50633_ADCC1_AVERAGE_4	0x04
+#define PCF50633_ADCC1_AVERAGE_8	0x08
+#define PCF50633_ADCC1_AVERAGE_16	0x0c
+#define PCF50633_ADCC1_MUX_BATSNS_RES	0x00
+#define PCF50633_ADCC1_MUX_BATSNS_SUBTR	0x10
+#define PCF50633_ADCC1_MUX_ADCIN2_RES	0x20
+#define PCF50633_ADCC1_MUX_ADCIN2_SUBTR	0x30
+#define PCF50633_ADCC1_MUX_BATTEMP	0x60
+#define PCF50633_ADCC1_MUX_ADCIN1	0x70
+#define PCF50633_ADCC1_AVERAGE_MASK	0x0c
+#define PCF50633_ADCC1_ADCMUX_MASK	0xf0
+
+#define PCF50633_ADCC2_RATIO_NONE	0x00
+#define PCF50633_ADCC2_RATIO_BATTEMP	0x01
+#define PCF50633_ADCC2_RATIO_ADCIN1	0x02
+#define PCF50633_ADCC2_RATIO_BOTH	0x03
+#define PCF50633_ADCC2_RATIOSETTL_100US 0x04
+
+#define PCF50633_ADCC3_ACCSW_EN		0x01
+#define PCF50633_ADCC3_NTCSW_EN		0x04
+#define PCF50633_ADCC3_RES_DIV_TWO	0x10
+#define PCF50633_ADCC3_RES_DIV_THREE	0x00
+
+#define PCF50633_ADCS3_REF_NTCSW	0x00
+#define PCF50633_ADCS3_REF_ACCSW	0x10
+#define PCF50633_ADCS3_REF_2V0		0x20
+#define PCF50633_ADCS3_REF_VISA		0x30
+#define PCF50633_ADCS3_REF_2V0_2	0x70
+#define PCF50633_ADCS3_ADCRDY		0x80
+
+#define PCF50633_ADCS3_ADCDAT1L_MASK	0x03
+#define PCF50633_ADCS3_ADCDAT2L_MASK	0x0c
+#define PCF50633_ADCS3_ADCDAT2L_SHIFT	2
+#define PCF50633_ASCS3_REF_MASK		0x70
+
+extern int
+pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
+		void (*callback)(struct pcf50633 *, void *, int),
+		void *callback_param);
+extern int
+pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg);
+
+#endif /* __LINUX_PCF50633_ADC_H */
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
new file mode 100644
index 0000000..4455b21
--- /dev/null
+++ b/include/linux/mfd/pcf50633/core.h
@@ -0,0 +1,218 @@
+/*
+ * core.h  -- Core driver for NXP PCF50633
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_PCF50633_CORE_H
+#define __LINUX_MFD_PCF50633_CORE_H
+
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/power_supply.h>
+
+struct pcf50633;
+
+#define PCF50633_NUM_REGULATORS	11
+
+struct pcf50633_platform_data {
+	struct regulator_init_data reg_init_data[PCF50633_NUM_REGULATORS];
+
+	char **batteries;
+	int num_batteries;
+
+	/* Callbacks */
+	void (*probe_done)(struct pcf50633 *);
+	void (*mbc_event_callback)(struct pcf50633 *, int);
+	void (*regulator_registered)(struct pcf50633 *, int);
+	void (*force_shutdown)(struct pcf50633 *);
+
+	u8 resumers[5];
+};
+
+struct pcf50633_subdev_pdata {
+	struct pcf50633 *pcf;
+};
+
+struct pcf50633_irq {
+	void (*handler) (int, void *);
+	void *data;
+};
+
+int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
+			void (*handler) (int, void *), void *data);
+int pcf50633_free_irq(struct pcf50633 *pcf, int irq);
+
+int pcf50633_irq_mask(struct pcf50633 *pcf, int irq);
+int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq);
+int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq);
+
+int pcf50633_read_block(struct pcf50633 *, u8 reg,
+					int nr_regs, u8 *data);
+int pcf50633_write_block(struct pcf50633 *pcf, u8 reg,
+					int nr_regs, u8 *data);
+u8 pcf50633_reg_read(struct pcf50633 *, u8 reg);
+int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val);
+
+int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val);
+int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 bits);
+
+/* Interrupt registers */
+
+#define PCF50633_REG_INT1	0x02
+#define PCF50633_REG_INT2	0x03
+#define PCF50633_REG_INT3	0x04
+#define PCF50633_REG_INT4	0x05
+#define PCF50633_REG_INT5	0x06
+
+#define PCF50633_REG_INT1M	0x07
+#define PCF50633_REG_INT2M	0x08
+#define PCF50633_REG_INT3M	0x09
+#define PCF50633_REG_INT4M	0x0a
+#define PCF50633_REG_INT5M	0x0b
+
+enum {
+	/* Chip IRQs */
+	PCF50633_IRQ_ADPINS,
+	PCF50633_IRQ_ADPREM,
+	PCF50633_IRQ_USBINS,
+	PCF50633_IRQ_USBREM,
+	PCF50633_IRQ_RESERVED1,
+	PCF50633_IRQ_RESERVED2,
+	PCF50633_IRQ_ALARM,
+	PCF50633_IRQ_SECOND,
+	PCF50633_IRQ_ONKEYR,
+	PCF50633_IRQ_ONKEYF,
+	PCF50633_IRQ_EXTON1R,
+	PCF50633_IRQ_EXTON1F,
+	PCF50633_IRQ_EXTON2R,
+	PCF50633_IRQ_EXTON2F,
+	PCF50633_IRQ_EXTON3R,
+	PCF50633_IRQ_EXTON3F,
+	PCF50633_IRQ_BATFULL,
+	PCF50633_IRQ_CHGHALT,
+	PCF50633_IRQ_THLIMON,
+	PCF50633_IRQ_THLIMOFF,
+	PCF50633_IRQ_USBLIMON,
+	PCF50633_IRQ_USBLIMOFF,
+	PCF50633_IRQ_ADCRDY,
+	PCF50633_IRQ_ONKEY1S,
+	PCF50633_IRQ_LOWSYS,
+	PCF50633_IRQ_LOWBAT,
+	PCF50633_IRQ_HIGHTMP,
+	PCF50633_IRQ_AUTOPWRFAIL,
+	PCF50633_IRQ_DWN1PWRFAIL,
+	PCF50633_IRQ_DWN2PWRFAIL,
+	PCF50633_IRQ_LEDPWRFAIL,
+	PCF50633_IRQ_LEDOVP,
+	PCF50633_IRQ_LDO1PWRFAIL,
+	PCF50633_IRQ_LDO2PWRFAIL,
+	PCF50633_IRQ_LDO3PWRFAIL,
+	PCF50633_IRQ_LDO4PWRFAIL,
+	PCF50633_IRQ_LDO5PWRFAIL,
+	PCF50633_IRQ_LDO6PWRFAIL,
+	PCF50633_IRQ_HCLDOPWRFAIL,
+	PCF50633_IRQ_HCLDOOVL,
+
+	/* Always last */
+	PCF50633_NUM_IRQ,
+};
+
+struct pcf50633 {
+	struct device *dev;
+	struct i2c_client *i2c_client;
+
+	struct pcf50633_platform_data *pdata;
+	int irq;
+	struct pcf50633_irq irq_handler[PCF50633_NUM_IRQ];
+	struct work_struct irq_work;
+	struct mutex lock;
+
+	u8 mask_regs[5];
+
+	u8 suspend_irq_masks[5];
+	u8 resume_reason[5];
+	int is_suspended;
+
+	int onkey1s_held;
+
+	struct platform_device *rtc_pdev;
+	struct platform_device *mbc_pdev;
+	struct platform_device *adc_pdev;
+	struct platform_device *input_pdev;
+	struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS];
+};
+
+enum pcf50633_reg_int1 {
+	PCF50633_INT1_ADPINS	= 0x01,	/* Adapter inserted */
+	PCF50633_INT1_ADPREM	= 0x02,	/* Adapter removed */
+	PCF50633_INT1_USBINS	= 0x04,	/* USB inserted */
+	PCF50633_INT1_USBREM	= 0x08,	/* USB removed */
+	/* reserved */
+	PCF50633_INT1_ALARM	= 0x40, /* RTC alarm time is reached */
+	PCF50633_INT1_SECOND	= 0x80,	/* RTC periodic second interrupt */
+};
+
+enum pcf50633_reg_int2 {
+	PCF50633_INT2_ONKEYR	= 0x01, /* ONKEY rising edge */
+	PCF50633_INT2_ONKEYF	= 0x02, /* ONKEY falling edge */
+	PCF50633_INT2_EXTON1R	= 0x04, /* EXTON1 rising edge */
+	PCF50633_INT2_EXTON1F	= 0x08, /* EXTON1 falling edge */
+	PCF50633_INT2_EXTON2R	= 0x10, /* EXTON2 rising edge */
+	PCF50633_INT2_EXTON2F	= 0x20, /* EXTON2 falling edge */
+	PCF50633_INT2_EXTON3R	= 0x40, /* EXTON3 rising edge */
+	PCF50633_INT2_EXTON3F	= 0x80, /* EXTON3 falling edge */
+};
+
+enum pcf50633_reg_int3 {
+	PCF50633_INT3_BATFULL	= 0x01, /* Battery full */
+	PCF50633_INT3_CHGHALT	= 0x02,	/* Charger halt */
+	PCF50633_INT3_THLIMON	= 0x04,
+	PCF50633_INT3_THLIMOFF	= 0x08,
+	PCF50633_INT3_USBLIMON	= 0x10,
+	PCF50633_INT3_USBLIMOFF	= 0x20,
+	PCF50633_INT3_ADCRDY	= 0x40, /* ADC result ready */
+	PCF50633_INT3_ONKEY1S	= 0x80,	/* ONKEY pressed 1 second */
+};
+
+enum pcf50633_reg_int4 {
+	PCF50633_INT4_LOWSYS		= 0x01,
+	PCF50633_INT4_LOWBAT		= 0x02,
+	PCF50633_INT4_HIGHTMP		= 0x04,
+	PCF50633_INT4_AUTOPWRFAIL	= 0x08,
+	PCF50633_INT4_DWN1PWRFAIL	= 0x10,
+	PCF50633_INT4_DWN2PWRFAIL	= 0x20,
+	PCF50633_INT4_LEDPWRFAIL	= 0x40,
+	PCF50633_INT4_LEDOVP		= 0x80,
+};
+
+enum pcf50633_reg_int5 {
+	PCF50633_INT5_LDO1PWRFAIL	= 0x01,
+	PCF50633_INT5_LDO2PWRFAIL	= 0x02,
+	PCF50633_INT5_LDO3PWRFAIL	= 0x04,
+	PCF50633_INT5_LDO4PWRFAIL	= 0x08,
+	PCF50633_INT5_LDO5PWRFAIL	= 0x10,
+	PCF50633_INT5_LDO6PWRFAIL	= 0x20,
+	PCF50633_INT5_HCLDOPWRFAIL	= 0x40,
+	PCF50633_INT5_HCLDOOVL		= 0x80,
+};
+
+/* misc. registers */
+#define PCF50633_REG_OOCSHDWN	0x0c
+
+/* LED registers */
+#define PCF50633_REG_LEDOUT 0x28
+#define PCF50633_REG_LEDENA 0x29
+#define PCF50633_REG_LEDCTL 0x2a
+#define PCF50633_REG_LEDDIM 0x2b
+
+#endif
+
diff --git a/include/linux/mfd/pcf50633/gpio.h b/include/linux/mfd/pcf50633/gpio.h
new file mode 100644
index 0000000..a42b845
--- /dev/null
+++ b/include/linux/mfd/pcf50633/gpio.h
@@ -0,0 +1,52 @@
+/*
+ * gpio.h -- GPIO driver for NXP PCF50633
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_PCF50633_GPIO_H
+#define __LINUX_MFD_PCF50633_GPIO_H
+
+#include <linux/mfd/pcf50633/core.h>
+
+#define PCF50633_GPIO1		1
+#define PCF50633_GPIO2		2
+#define PCF50633_GPIO3		3
+#define PCF50633_GPO		4
+
+#define PCF50633_REG_GPIO1CFG	0x14
+#define PCF50633_REG_GPIO2CFG	0x15
+#define PCF50633_REG_GPIO3CFG	0x16
+#define PCF50633_REG_GPOCFG 	0x17
+
+#define PCF50633_GPOCFG_GPOSEL_MASK	0x07
+
+enum pcf50633_reg_gpocfg {
+	PCF50633_GPOCFG_GPOSEL_0	= 0x00,
+	PCF50633_GPOCFG_GPOSEL_LED_NFET	= 0x01,
+	PCF50633_GPOCFG_GPOSEL_SYSxOK	= 0x02,
+	PCF50633_GPOCFG_GPOSEL_CLK32K	= 0x03,
+	PCF50633_GPOCFG_GPOSEL_ADAPUSB	= 0x04,
+	PCF50633_GPOCFG_GPOSEL_USBxOK	= 0x05,
+	PCF50633_GPOCFG_GPOSEL_ACTPH4	= 0x06,
+	PCF50633_GPOCFG_GPOSEL_1	= 0x07,
+	PCF50633_GPOCFG_GPOSEL_INVERSE	= 0x08,
+};
+
+int pcf50633_gpio_set(struct pcf50633 *pcf, int gpio, u8 val);
+u8 pcf50633_gpio_get(struct pcf50633 *pcf, int gpio);
+
+int pcf50633_gpio_invert_set(struct pcf50633 *, int gpio, int invert);
+int pcf50633_gpio_invert_get(struct pcf50633 *pcf, int gpio);
+
+int pcf50633_gpio_power_supply_set(struct pcf50633 *,
+					int gpio, int regulator, int on);
+#endif /* __LINUX_MFD_PCF50633_GPIO_H */
+
+
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
new file mode 100644
index 0000000..6e17619
--- /dev/null
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -0,0 +1,134 @@
+/*
+ * mbc.h  -- Driver for NXP PCF50633 Main Battery Charger
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_PCF50633_MBC_H
+#define __LINUX_MFD_PCF50633_MBC_H
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/platform_device.h>
+
+#define PCF50633_REG_MBCC1	0x43
+#define PCF50633_REG_MBCC2	0x44
+#define PCF50633_REG_MBCC3	0x45
+#define PCF50633_REG_MBCC4	0x46
+#define PCF50633_REG_MBCC5	0x47
+#define PCF50633_REG_MBCC6	0x48
+#define PCF50633_REG_MBCC7	0x49
+#define PCF50633_REG_MBCC8	0x4a
+#define PCF50633_REG_MBCS1	0x4b
+#define PCF50633_REG_MBCS2	0x4c
+#define PCF50633_REG_MBCS3	0x4d
+
+enum pcf50633_reg_mbcc1 {
+	PCF50633_MBCC1_CHGENA		= 0x01,	/* Charger enable */
+	PCF50633_MBCC1_AUTOSTOP		= 0x02,
+	PCF50633_MBCC1_AUTORES		= 0x04, /* automatic resume */
+	PCF50633_MBCC1_RESUME		= 0x08, /* explicit resume cmd */
+	PCF50633_MBCC1_RESTART		= 0x10, /* restart charging */
+	PCF50633_MBCC1_PREWDTIME_60M	= 0x20,	/* max. precharging time */
+	PCF50633_MBCC1_WDTIME_1H	= 0x00,
+	PCF50633_MBCC1_WDTIME_2H	= 0x40,
+	PCF50633_MBCC1_WDTIME_4H	= 0x80,
+	PCF50633_MBCC1_WDTIME_6H	= 0xc0,
+};
+#define PCF50633_MBCC1_WDTIME_MASK	  0xc0
+
+enum pcf50633_reg_mbcc2 {
+	PCF50633_MBCC2_VBATCOND_2V7	= 0x00,
+	PCF50633_MBCC2_VBATCOND_2V85	= 0x01,
+	PCF50633_MBCC2_VBATCOND_3V0	= 0x02,
+	PCF50633_MBCC2_VBATCOND_3V15	= 0x03,
+	PCF50633_MBCC2_VMAX_4V		= 0x00,
+	PCF50633_MBCC2_VMAX_4V20	= 0x28,
+	PCF50633_MBCC2_VRESDEBTIME_64S	= 0x80,	/* debounce time (32/64sec) */
+};
+
+enum pcf50633_reg_mbcc7 {
+	PCF50633_MBCC7_USB_100mA	= 0x00,
+	PCF50633_MBCC7_USB_500mA	= 0x01,
+	PCF50633_MBCC7_USB_1000mA	= 0x02,
+	PCF50633_MBCC7_USB_SUSPEND	= 0x03,
+	PCF50633_MBCC7_BATTEMP_EN	= 0x04,
+	PCF50633_MBCC7_BATSYSIMAX_1A6	= 0x00,
+	PCF50633_MBCC7_BATSYSIMAX_1A8	= 0x40,
+	PCF50633_MBCC7_BATSYSIMAX_2A0	= 0x80,
+	PCF50633_MBCC7_BATSYSIMAX_2A2	= 0xc0,
+};
+#define PCF50633_MBCC7_USB_MASK 0x03
+
+enum pcf50633_reg_mbcc8 {
+	PCF50633_MBCC8_USBENASUS	= 0x10,
+};
+
+enum pcf50633_reg_mbcs1 {
+	PCF50633_MBCS1_USBPRES		= 0x01,
+	PCF50633_MBCS1_USBOK		= 0x02,
+	PCF50633_MBCS1_ADAPTPRES	= 0x04,
+	PCF50633_MBCS1_ADAPTOK		= 0x08,
+	PCF50633_MBCS1_TBAT_OK		= 0x00,
+	PCF50633_MBCS1_TBAT_ABOVE	= 0x10,
+	PCF50633_MBCS1_TBAT_BELOW	= 0x20,
+	PCF50633_MBCS1_TBAT_UNDEF	= 0x30,
+	PCF50633_MBCS1_PREWDTEXP	= 0x40,
+	PCF50633_MBCS1_WDTEXP		= 0x80,
+};
+
+enum pcf50633_reg_mbcs2_mbcmod {
+	PCF50633_MBCS2_MBC_PLAY		= 0x00,
+	PCF50633_MBCS2_MBC_USB_PRE	= 0x01,
+	PCF50633_MBCS2_MBC_USB_PRE_WAIT	= 0x02,
+	PCF50633_MBCS2_MBC_USB_FAST	= 0x03,
+	PCF50633_MBCS2_MBC_USB_FAST_WAIT = 0x04,
+	PCF50633_MBCS2_MBC_USB_SUSPEND	= 0x05,
+	PCF50633_MBCS2_MBC_ADP_PRE	= 0x06,
+	PCF50633_MBCS2_MBC_ADP_PRE_WAIT	= 0x07,
+	PCF50633_MBCS2_MBC_ADP_FAST	= 0x08,
+	PCF50633_MBCS2_MBC_ADP_FAST_WAIT = 0x09,
+	PCF50633_MBCS2_MBC_BAT_FULL	= 0x0a,
+	PCF50633_MBCS2_MBC_HALT		= 0x0b,
+};
+#define PCF50633_MBCS2_MBC_MASK		0x0f
+enum pcf50633_reg_mbcs2_chgstat {
+	PCF50633_MBCS2_CHGS_NONE	= 0x00,
+	PCF50633_MBCS2_CHGS_ADAPTER	= 0x10,
+	PCF50633_MBCS2_CHGS_USB		= 0x20,
+	PCF50633_MBCS2_CHGS_BOTH	= 0x30,
+};
+#define PCF50633_MBCS2_RESSTAT_AUTO	0x40
+
+enum pcf50633_reg_mbcs3 {
+	PCF50633_MBCS3_USBLIM_PLAY	= 0x01,
+	PCF50633_MBCS3_USBLIM_CGH	= 0x02,
+	PCF50633_MBCS3_TLIM_PLAY	= 0x04,
+	PCF50633_MBCS3_TLIM_CHG		= 0x08,
+	PCF50633_MBCS3_ILIM		= 0x10,	/* 1: Ibat > Icutoff */
+	PCF50633_MBCS3_VLIM		= 0x20,	/* 1: Vbat == Vmax */
+	PCF50633_MBCS3_VBATSTAT		= 0x40,	/* 1: Vbat > Vbatcond */
+	PCF50633_MBCS3_VRES		= 0x80, /* 1: Vbat > Vth(RES) */
+};
+
+#define PCF50633_MBCC2_VBATCOND_MASK	  0x03
+#define PCF50633_MBCC2_VMAX_MASK	  0x3c
+
+/* Charger status */
+#define PCF50633_MBC_USB_ONLINE		0x01
+#define PCF50633_MBC_USB_ACTIVE		0x02
+#define PCF50633_MBC_ADAPTER_ONLINE	0x04
+#define PCF50633_MBC_ADAPTER_ACTIVE	0x08
+
+int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
+
+int pcf50633_mbc_get_status(struct pcf50633 *);
+void pcf50633_mbc_set_status(struct pcf50633 *, int what, int status);
+
+#endif
+
diff --git a/include/linux/mfd/pcf50633/pmic.h b/include/linux/mfd/pcf50633/pmic.h
new file mode 100644
index 0000000..2d3dbe5
--- /dev/null
+++ b/include/linux/mfd/pcf50633/pmic.h
@@ -0,0 +1,67 @@
+#ifndef __LINUX_MFD_PCF50633_PMIC_H
+#define __LINUX_MFD_PCF50633_PMIC_H
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/platform_device.h>
+
+#define PCF50633_REG_AUTOOUT	0x1a
+#define PCF50633_REG_AUTOENA	0x1b
+#define PCF50633_REG_AUTOCTL	0x1c
+#define PCF50633_REG_AUTOMXC	0x1d
+#define PCF50633_REG_DOWN1OUT	0x1e
+#define PCF50633_REG_DOWN1ENA	0x1f
+#define PCF50633_REG_DOWN1CTL	0x20
+#define PCF50633_REG_DOWN1MXC	0x21
+#define PCF50633_REG_DOWN2OUT	0x22
+#define PCF50633_REG_DOWN2ENA	0x23
+#define PCF50633_REG_DOWN2CTL	0x24
+#define PCF50633_REG_DOWN2MXC	0x25
+#define PCF50633_REG_MEMLDOOUT	0x26
+#define PCF50633_REG_MEMLDOENA	0x27
+#define PCF50633_REG_LDO1OUT	0x2d
+#define PCF50633_REG_LDO1ENA	0x2e
+#define PCF50633_REG_LDO2OUT	0x2f
+#define PCF50633_REG_LDO2ENA	0x30
+#define PCF50633_REG_LDO3OUT	0x31
+#define PCF50633_REG_LDO3ENA	0x32
+#define PCF50633_REG_LDO4OUT	0x33
+#define PCF50633_REG_LDO4ENA	0x34
+#define PCF50633_REG_LDO5OUT	0x35
+#define PCF50633_REG_LDO5ENA	0x36
+#define PCF50633_REG_LDO6OUT	0x37
+#define PCF50633_REG_LDO6ENA	0x38
+#define PCF50633_REG_HCLDOOUT	0x39
+#define PCF50633_REG_HCLDOENA	0x3a
+#define PCF50633_REG_HCLDOOVL	0x40
+
+enum pcf50633_regulator_enable {
+	PCF50633_REGULATOR_ON		= 0x01,
+	PCF50633_REGULATOR_ON_GPIO1	= 0x02,
+	PCF50633_REGULATOR_ON_GPIO2	= 0x04,
+	PCF50633_REGULATOR_ON_GPIO3	= 0x08,
+};
+#define PCF50633_REGULATOR_ON_MASK	0x0f
+
+enum pcf50633_regulator_phase {
+	PCF50633_REGULATOR_ACTPH1	= 0x00,
+	PCF50633_REGULATOR_ACTPH2	= 0x10,
+	PCF50633_REGULATOR_ACTPH3	= 0x20,
+	PCF50633_REGULATOR_ACTPH4	= 0x30,
+};
+#define PCF50633_REGULATOR_ACTPH_MASK	0x30
+
+enum pcf50633_regulator_id {
+	PCF50633_REGULATOR_AUTO,
+	PCF50633_REGULATOR_DOWN1,
+	PCF50633_REGULATOR_DOWN2,
+	PCF50633_REGULATOR_LDO1,
+	PCF50633_REGULATOR_LDO2,
+	PCF50633_REGULATOR_LDO3,
+	PCF50633_REGULATOR_LDO4,
+	PCF50633_REGULATOR_LDO5,
+	PCF50633_REGULATOR_LDO6,
+	PCF50633_REGULATOR_HCLDO,
+	PCF50633_REGULATOR_MEMLDO,
+};
+#endif
+
diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h
index 96acbfc..be3264e 100644
--- a/include/linux/mfd/wm8350/pmic.h
+++ b/include/linux/mfd/wm8350/pmic.h
@@ -13,6 +13,10 @@
 #ifndef __LINUX_MFD_WM8350_PMIC_H
 #define __LINUX_MFD_WM8350_PMIC_H
 
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/regulator/machine.h>
+
 /*
  * Register values.
  */
@@ -700,6 +704,33 @@
 struct platform_device;
 struct regulator_init_data;
 
+/*
+ * WM8350 LED platform data
+ */
+struct wm8350_led_platform_data {
+	const char *name;
+	const char *default_trigger;
+	int max_uA;
+};
+
+struct wm8350_led {
+	struct platform_device *pdev;
+	struct mutex mutex;
+	struct work_struct work;
+	spinlock_t value_lock;
+	enum led_brightness value;
+	struct led_classdev cdev;
+	int max_uA_index;
+	int enabled;
+
+	struct regulator *isink;
+	struct regulator_consumer_supply isink_consumer;
+	struct regulator_init_data isink_init;
+	struct regulator *dcdc;
+	struct regulator_consumer_supply dcdc_consumer;
+	struct regulator_init_data dcdc_init;
+};
+
 struct wm8350_pmic {
 	/* Number of regulators of each type on this device */
 	int max_dcdc;
@@ -717,10 +748,15 @@
 
 	/* regulator devices */
 	struct platform_device *pdev[NUM_WM8350_REGULATORS];
+
+	/* LED devices */
+	struct wm8350_led led[2];
 };
 
 int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
 			      struct regulator_init_data *initdata);
+int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
+			struct wm8350_led_platform_data *pdata);
 
 /*
  * Additional DCDC control not supported via regulator API
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4a3d28c..e8ddc98 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -56,19 +56,9 @@
 
 extern struct kmem_cache *vm_area_cachep;
 
-/*
- * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
- * disabled, then there's a single shared list of VMAs maintained by the
- * system, and mm's subscribe to these individually
- */
-struct vm_list_struct {
-	struct vm_list_struct	*next;
-	struct vm_area_struct	*vma;
-};
-
 #ifndef CONFIG_MMU
-extern struct rb_root nommu_vma_tree;
-extern struct rw_semaphore nommu_vma_sem;
+extern struct rb_root nommu_region_tree;
+extern struct rw_semaphore nommu_region_sem;
 
 extern unsigned int kobjsize(const void *objp);
 #endif
@@ -270,7 +260,6 @@
  */
 static inline int get_page_unless_zero(struct page *page)
 {
-	VM_BUG_ON(PageTail(page));
 	return atomic_inc_not_zero(&page->_count);
 }
 
@@ -1061,6 +1050,7 @@
 				unsigned long, enum memmap_context);
 extern void setup_per_zone_pages_min(void);
 extern void mem_init(void);
+extern void __init mmap_init(void);
 extern void show_mem(void);
 extern void si_meminfo(struct sysinfo * val);
 extern void si_meminfo_node(struct sysinfo *val, int nid);
@@ -1072,6 +1062,9 @@
 static inline void setup_per_cpu_pageset(void) {}
 #endif
 
+/* nommu.c */
+extern atomic_t mmap_pages_allocated;
+
 /* prio_tree.c */
 void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
 void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index c948350..7fbb972 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -28,6 +28,7 @@
 {
 	list_add(&page->lru, &zone->lru[l].list);
 	__inc_zone_state(zone, NR_LRU_BASE + l);
+	mem_cgroup_add_lru_list(page, l);
 }
 
 static inline void
@@ -35,6 +36,7 @@
 {
 	list_del(&page->lru);
 	__dec_zone_state(zone, NR_LRU_BASE + l);
+	mem_cgroup_del_lru_list(page, l);
 }
 
 static inline void
@@ -54,6 +56,7 @@
 		l += page_is_file_cache(page);
 	}
 	__dec_zone_state(zone, NR_LRU_BASE + l);
+	mem_cgroup_del_lru_list(page, l);
 }
 
 /**
@@ -78,23 +81,4 @@
 	return lru;
 }
 
-/**
- * inactive_anon_is_low - check if anonymous pages need to be deactivated
- * @zone: zone to check
- *
- * Returns true if the zone does not have enough inactive anon pages,
- * meaning some active anon pages need to be deactivated.
- */
-static inline int inactive_anon_is_low(struct zone *zone)
-{
-	unsigned long active, inactive;
-
-	active = zone_page_state(zone, NR_ACTIVE_ANON);
-	inactive = zone_page_state(zone, NR_INACTIVE_ANON);
-
-	if (inactive * zone->inactive_ratio < active)
-		return 1;
-
-	return 0;
-}
 #endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 9cfc9b6..92915e8 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -97,6 +97,23 @@
 };
 
 /*
+ * A region containing a mapping of a non-memory backed file under NOMMU
+ * conditions.  These are held in a global tree and are pinned by the VMAs that
+ * map parts of them.
+ */
+struct vm_region {
+	struct rb_node	vm_rb;		/* link in global region tree */
+	unsigned long	vm_flags;	/* VMA vm_flags */
+	unsigned long	vm_start;	/* start address of region */
+	unsigned long	vm_end;		/* region initialised to here */
+	unsigned long	vm_top;		/* region allocated to here */
+	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
+	struct file	*vm_file;	/* the backing file or NULL */
+
+	atomic_t	vm_usage;	/* region usage count */
+};
+
+/*
  * This struct defines a memory VMM memory area. There is one of these
  * per VM-area/task.  A VM area is any part of the process virtual memory
  * space that has a special rule for the page-fault handlers (ie a shared
@@ -152,7 +169,7 @@
 	unsigned long vm_truncate_count;/* truncate_count or restart_addr */
 
 #ifndef CONFIG_MMU
-	atomic_t vm_usage;		/* refcount (VMAs shared if !MMU) */
+	struct vm_region *vm_region;	/* NOMMU mapping region */
 #endif
 #ifdef CONFIG_NUMA
 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 35a7b5e..09c14e2 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -263,6 +263,19 @@
 #error ZONES_SHIFT -- too many zones configured adjust calculation
 #endif
 
+struct zone_reclaim_stat {
+	/*
+	 * The pageout code in vmscan.c keeps track of how many of the
+	 * mem/swap backed and file backed pages are refeferenced.
+	 * The higher the rotated/scanned ratio, the more valuable
+	 * that cache is.
+	 *
+	 * The anon LRU stats live in [0], file LRU stats in [1]
+	 */
+	unsigned long		recent_rotated[2];
+	unsigned long		recent_scanned[2];
+};
+
 struct zone {
 	/* Fields commonly accessed by the page allocator */
 	unsigned long		pages_min, pages_low, pages_high;
@@ -315,16 +328,7 @@
 		unsigned long nr_scan;
 	} lru[NR_LRU_LISTS];
 
-	/*
-	 * The pageout code in vmscan.c keeps track of how many of the
-	 * mem/swap backed and file backed pages are refeferenced.
-	 * The higher the rotated/scanned ratio, the more valuable
-	 * that cache is.
-	 *
-	 * The anon LRU stats live in [0], file LRU stats in [1]
-	 */
-	unsigned long		recent_rotated[2];
-	unsigned long		recent_scanned[2];
+	struct zone_reclaim_stat reclaim_stat;
 
 	unsigned long		pages_scanned;	   /* since last reclaim */
 	unsigned long		flags;		   /* zone flags, see below */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 00e2b57..88d3d8f 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -520,6 +520,7 @@
 
 #define CFI_MFR_AMD 0x0001
 #define CFI_MFR_ATMEL 0x001F
+#define CFI_MFR_SAMSUNG 0x00EC
 #define CFI_MFR_ST  0x0020 	/* STMicroelectronics */
 
 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h
index 0be442f..0555f7a 100644
--- a/include/linux/mtd/ftl.h
+++ b/include/linux/mtd/ftl.h
@@ -32,25 +32,25 @@
 #define _LINUX_FTL_H
 
 typedef struct erase_unit_header_t {
-    u_int8_t	LinkTargetTuple[5];
-    u_int8_t	DataOrgTuple[10];
-    u_int8_t	NumTransferUnits;
-    u_int32_t	EraseCount;
-    u_int16_t	LogicalEUN;
-    u_int8_t	BlockSize;
-    u_int8_t	EraseUnitSize;
-    u_int16_t	FirstPhysicalEUN;
-    u_int16_t	NumEraseUnits;
-    u_int32_t	FormattedSize;
-    u_int32_t	FirstVMAddress;
-    u_int16_t	NumVMPages;
-    u_int8_t	Flags;
-    u_int8_t	Code;
-    u_int32_t	SerialNumber;
-    u_int32_t	AltEUHOffset;
-    u_int32_t	BAMOffset;
-    u_int8_t	Reserved[12];
-    u_int8_t	EndTuple[2];
+    uint8_t	LinkTargetTuple[5];
+    uint8_t	DataOrgTuple[10];
+    uint8_t	NumTransferUnits;
+    uint32_t	EraseCount;
+    uint16_t	LogicalEUN;
+    uint8_t	BlockSize;
+    uint8_t	EraseUnitSize;
+    uint16_t	FirstPhysicalEUN;
+    uint16_t	NumEraseUnits;
+    uint32_t	FormattedSize;
+    uint32_t	FirstVMAddress;
+    uint16_t	NumVMPages;
+    uint8_t	Flags;
+    uint8_t	Code;
+    uint32_t	SerialNumber;
+    uint32_t	AltEUHOffset;
+    uint32_t	BAMOffset;
+    uint8_t	Reserved[12];
+    uint8_t	EndTuple[2];
 } erase_unit_header_t;
 
 /* Flags in erase_unit_header_t */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index aa30244..b981b877 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -223,6 +223,7 @@
 	   must leave it enabled. */
 	void (*set_vpp)(struct map_info *, int);
 
+	unsigned long pfow_base;
 	unsigned long map_priv_1;
 	unsigned long map_priv_2;
 	void *fldrv_priv;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index eae26bb..3aa5d77 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -15,6 +15,8 @@
 #include <linux/mtd/compatmac.h>
 #include <mtd/mtd-abi.h>
 
+#include <asm/div64.h>
+
 #define MTD_CHAR_MAJOR 90
 #define MTD_BLOCK_MAJOR 31
 #define MAX_MTD_DEVICES 32
@@ -25,20 +27,20 @@
 #define MTD_ERASE_DONE          0x08
 #define MTD_ERASE_FAILED        0x10
 
-#define MTD_FAIL_ADDR_UNKNOWN 0xffffffff
+#define MTD_FAIL_ADDR_UNKNOWN -1LL
 
 /* If the erase fails, fail_addr might indicate exactly which block failed.  If
    fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not
    specific to any particular block. */
 struct erase_info {
 	struct mtd_info *mtd;
-	u_int32_t addr;
-	u_int32_t len;
-	u_int32_t fail_addr;
+	uint64_t addr;
+	uint64_t len;
+	uint64_t fail_addr;
 	u_long time;
 	u_long retries;
-	u_int dev;
-	u_int cell;
+	unsigned dev;
+	unsigned cell;
 	void (*callback) (struct erase_info *self);
 	u_long priv;
 	u_char state;
@@ -46,9 +48,9 @@
 };
 
 struct mtd_erase_region_info {
-	u_int32_t offset;			/* At which this region starts, from the beginning of the MTD */
-	u_int32_t erasesize;		/* For this region */
-	u_int32_t numblocks;		/* Number of blocks of erasesize in this region */
+	uint64_t offset;			/* At which this region starts, from the beginning of the MTD */
+	uint32_t erasesize;		/* For this region */
+	uint32_t numblocks;		/* Number of blocks of erasesize in this region */
 	unsigned long *lockmap;		/* If keeping bitmap of locks */
 };
 
@@ -83,7 +85,7 @@
  * @datbuf:	data buffer - if NULL only oob data are read/written
  * @oobbuf:	oob data buffer
  *
- * Note, it is allowed to read more then one OOB area at one go, but not write.
+ * Note, it is allowed to read more than one OOB area at one go, but not write.
  * The interface assumes that the OOB write requests program only one page's
  * OOB area.
  */
@@ -100,14 +102,14 @@
 
 struct mtd_info {
 	u_char type;
-	u_int32_t flags;
-	u_int32_t size;	 // Total size of the MTD
+	uint32_t flags;
+	uint64_t size;	 // Total size of the MTD
 
 	/* "Major" erase size for the device. Naïve users may take this
 	 * to be the only erase size available, or may use the more detailed
 	 * information below if they desire
 	 */
-	u_int32_t erasesize;
+	uint32_t erasesize;
 	/* Minimal writable flash unit size. In case of NOR flash it is 1 (even
 	 * though individual bits can be cleared), in case of NAND flash it is
 	 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
@@ -115,10 +117,20 @@
 	 * Any driver registering a struct mtd_info must ensure a writesize of
 	 * 1 or larger.
 	 */
-	u_int32_t writesize;
+	uint32_t writesize;
 
-	u_int32_t oobsize;   // Amount of OOB data per block (e.g. 16)
-	u_int32_t oobavail;  // Available OOB bytes per block
+	uint32_t oobsize;   // Amount of OOB data per block (e.g. 16)
+	uint32_t oobavail;  // Available OOB bytes per block
+
+	/*
+	 * If erasesize is a power of 2 then the shift is stored in
+	 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
+	 */
+	unsigned int erasesize_shift;
+	unsigned int writesize_shift;
+	/* Masks based on erasesize_shift and writesize_shift */
+	unsigned int erasesize_mask;
+	unsigned int writesize_mask;
 
 	// Kernel-only stuff starts here.
 	const char *name;
@@ -190,8 +202,8 @@
 	void (*sync) (struct mtd_info *mtd);
 
 	/* Chip-supported device locking */
-	int (*lock) (struct mtd_info *mtd, loff_t ofs, size_t len);
-	int (*unlock) (struct mtd_info *mtd, loff_t ofs, size_t len);
+	int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+	int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
 
 	/* Power Management functions */
 	int (*suspend) (struct mtd_info *mtd);
@@ -221,6 +233,35 @@
 	void (*put_device) (struct mtd_info *mtd);
 };
 
+static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
+{
+	if (mtd->erasesize_shift)
+		return sz >> mtd->erasesize_shift;
+	do_div(sz, mtd->erasesize);
+	return sz;
+}
+
+static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
+{
+	if (mtd->erasesize_shift)
+		return sz & mtd->erasesize_mask;
+	return do_div(sz, mtd->erasesize);
+}
+
+static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
+{
+	if (mtd->writesize_shift)
+		return sz >> mtd->writesize_shift;
+	do_div(sz, mtd->writesize);
+	return sz;
+}
+
+static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
+{
+	if (mtd->writesize_shift)
+		return sz & mtd->writesize_mask;
+	return do_div(sz, mtd->writesize);
+}
 
 	/* Kernel-side ioctl definitions */
 
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 733d3f3..db5b63d 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -335,17 +335,12 @@
  * @erase_cmd:		[INTERN] erase command write function, selectable due to AND support
  * @scan_bbt:		[REPLACEABLE] function to scan bad block table
  * @chip_delay:		[BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
- * @wq:			[INTERN] wait queue to sleep on if a NAND operation is in progress
  * @state:		[INTERN] the current state of the NAND device
  * @oob_poi:		poison value buffer
  * @page_shift:		[INTERN] number of address bits in a page (column address bits)
  * @phys_erase_shift:	[INTERN] number of address bits in a physical eraseblock
  * @bbt_erase_shift:	[INTERN] number of address bits in a bbt entry
  * @chip_shift:		[INTERN] number of address bits in one chip
- * @datbuf:		[INTERN] internal buffer for one page + oob
- * @oobbuf:		[INTERN] oob buffer for one eraseblock
- * @oobdirty:		[INTERN] indicates that oob_buf must be reinitialized
- * @data_poi:		[INTERN] pointer to a data buffer
  * @options:		[BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about
  *			special functionality. See the defines for further explanation
  * @badblockpos:	[INTERN] position of the bad block marker in the oob area
@@ -399,7 +394,7 @@
 	int		bbt_erase_shift;
 	int		chip_shift;
 	int		numchips;
-	unsigned long	chipsize;
+	uint64_t	chipsize;
 	int		pagemask;
 	int		pagebuf;
 	int		subpagesize;
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index c92b4d4..a45dd83 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -36,9 +36,9 @@
 
 struct mtd_partition {
 	char *name;			/* identifier string */
-	u_int32_t size;			/* partition size */
-	u_int32_t offset;		/* offset within the master MTD space */
-	u_int32_t mask_flags;		/* master MTD flags to mask out for this partition */
+	uint64_t size;			/* partition size */
+	uint64_t offset;		/* offset within the master MTD space */
+	uint32_t mask_flags;		/* master MTD flags to mask out for this partition */
 	struct nand_ecclayout *ecclayout;	/* out of band layout for this partition (NAND only)*/
 	struct mtd_info **mtdp;		/* pointer to store the MTD object */
 };
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
new file mode 100644
index 0000000..b730d4f
--- /dev/null
+++ b/include/linux/mtd/pfow.h
@@ -0,0 +1,159 @@
+/* Primary function overlay window definitions
+ * and service functions used by LPDDR chips
+ */
+#ifndef __LINUX_MTD_PFOW_H
+#define __LINUX_MTD_PFOW_H
+
+#include <linux/mtd/qinfo.h>
+
+/* PFOW registers addressing */
+/* Address of symbol "P" */
+#define PFOW_QUERY_STRING_P			0x0000
+/* Address of symbol "F" */
+#define PFOW_QUERY_STRING_F			0x0002
+/* Address of symbol "O" */
+#define PFOW_QUERY_STRING_O			0x0004
+/* Address of symbol "W" */
+#define PFOW_QUERY_STRING_W			0x0006
+/* Identification info for LPDDR chip */
+#define PFOW_MANUFACTURER_ID			0x0020
+#define PFOW_DEVICE_ID				0x0022
+/* Address in PFOW where prog buffer can can be found */
+#define PFOW_PROGRAM_BUFFER_OFFSET		0x0040
+/* Size of program buffer in words */
+#define PFOW_PROGRAM_BUFFER_SIZE		0x0042
+/* Address command code register */
+#define PFOW_COMMAND_CODE			0x0080
+/* command data register */
+#define PFOW_COMMAND_DATA			0x0084
+/* command address register lower address bits */
+#define PFOW_COMMAND_ADDRESS_L			0x0088
+/* command address register upper address bits */
+#define PFOW_COMMAND_ADDRESS_H			0x008a
+/* number of bytes to be proggrammed lower address bits */
+#define PFOW_DATA_COUNT_L			0x0090
+/* number of bytes to be proggrammed higher address bits */
+#define PFOW_DATA_COUNT_H			0x0092
+/* command execution register, the only possible value is 0x01 */
+#define PFOW_COMMAND_EXECUTE			0x00c0
+/* 0x01 should be written at this address to clear buffer */
+#define PFOW_CLEAR_PROGRAM_BUFFER		0x00c4
+/* device program/erase suspend register */
+#define PFOW_PROGRAM_ERASE_SUSPEND		0x00c8
+/* device status register */
+#define PFOW_DSR				0x00cc
+
+/* LPDDR memory device command codes */
+/* They are possible values of PFOW command code register */
+#define LPDDR_WORD_PROGRAM		0x0041
+#define LPDDR_BUFF_PROGRAM		0x00E9
+#define LPDDR_BLOCK_ERASE		0x0020
+#define LPDDR_LOCK_BLOCK		0x0061
+#define LPDDR_UNLOCK_BLOCK		0x0062
+#define LPDDR_READ_BLOCK_LOCK_STATUS	0x0065
+#define LPDDR_INFO_QUERY		0x0098
+#define LPDDR_READ_OTP			0x0097
+#define LPDDR_PROG_OTP			0x00C0
+#define LPDDR_RESUME			0x00D0
+
+/* Defines possible value of PFOW command execution register */
+#define LPDDR_START_EXECUTION			0x0001
+
+/* Defines possible value of PFOW program/erase suspend register */
+#define LPDDR_SUSPEND				0x0001
+
+/* Possible values of PFOW device status register */
+/* access R - read; RC read & clearable */
+#define DSR_DPS			(1<<1) /* RC; device protect status
+					* 0 - not protected 1 - locked */
+#define DSR_PSS			(1<<2) /* R; program suspend status;
+					* 0-prog in progress/completed,
+					* 1- prog suspended */
+#define DSR_VPPS		(1<<3) /* RC; 0-Vpp OK, * 1-Vpp low */
+#define DSR_PROGRAM_STATUS	(1<<4) /* RC; 0-successful, 1-error */
+#define DSR_ERASE_STATUS	(1<<5) /* RC; erase or blank check status;
+					* 0-success erase/blank check,
+					* 1 blank check error */
+#define DSR_ESS			(1<<6) /* R; erase suspend status;
+					* 0-erase in progress/complete,
+					* 1 erase suspended */
+#define DSR_READY_STATUS	(1<<7) /* R; Device status
+					* 0-busy,
+					* 1-ready */
+#define DSR_RPS			(0x3<<8) /* RC;  region program status
+					* 00 - Success,
+					* 01-re-program attempt in region with
+					* object mode data,
+					* 10-object mode program w attempt in
+					* region with control mode data
+					* 11-attempt to program invalid half
+					* with 0x41 command */
+#define DSR_AOS			(1<<12) /* RC; 1- AO related failure */
+#define DSR_AVAILABLE		(1<<15) /* R; Device availbility
+					* 1 - Device available
+					* 0 - not available */
+
+/* The superset of all possible error bits in DSR */
+#define DSR_ERR			0x133A
+
+static inline void send_pfow_command(struct map_info *map,
+				unsigned long cmd_code, unsigned long adr,
+				unsigned long len, map_word *datum)
+{
+	int bits_per_chip = map_bankwidth(map) * 8;
+	int chipnum;
+	struct lpddr_private *lpddr = map->fldrv_priv;
+	chipnum = adr >> lpddr->chipshift;
+
+	map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE);
+	map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)),
+				map->pfow_base + PFOW_COMMAND_ADDRESS_L);
+	map_write(map, CMD(adr>>bits_per_chip),
+				map->pfow_base + PFOW_COMMAND_ADDRESS_H);
+	if (len) {
+		map_write(map, CMD(len & ((1<<bits_per_chip) - 1)),
+					map->pfow_base + PFOW_DATA_COUNT_L);
+		map_write(map, CMD(len>>bits_per_chip),
+					map->pfow_base + PFOW_DATA_COUNT_H);
+	}
+	if (datum)
+		map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA);
+
+	/* Command execution start */
+	map_write(map, CMD(LPDDR_START_EXECUTION),
+			map->pfow_base + PFOW_COMMAND_EXECUTE);
+}
+
+static inline void print_drs_error(unsigned dsr)
+{
+	int prog_status = (dsr & DSR_RPS) >> 8;
+
+	if (!(dsr & DSR_AVAILABLE))
+		printk(KERN_NOTICE"DSR.15: (0) Device not Available\n");
+	if (prog_status & 0x03)
+		printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid "
+						"half with 41h command\n");
+	else if (prog_status & 0x02)
+		printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt "
+					"in region with Control Mode data\n");
+	else if (prog_status &  0x01)
+		printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region "
+						"with Object Mode data\n");
+	if (!(dsr & DSR_READY_STATUS))
+		printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n");
+	if (dsr & DSR_ESS)
+		printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n");
+	if (dsr & DSR_ERASE_STATUS)
+		printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n");
+	if (dsr & DSR_PROGRAM_STATUS)
+		printk(KERN_NOTICE"DSR.4: (1) Program Error\n");
+	if (dsr & DSR_VPPS)
+		printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation "
+					"aborted\n");
+	if (dsr & DSR_PSS)
+		printk(KERN_NOTICE"DSR.2: (1) Program suspended\n");
+	if (dsr & DSR_DPS)
+		printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt "
+					"on locked block\n");
+}
+#endif /* __LINUX_MTD_PFOW_H */
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index c8e63a5..76f7cab 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -24,6 +24,7 @@
 	unsigned int		width;
 	void			(*set_vpp)(struct map_info *, int);
 	unsigned int		nr_parts;
+	unsigned int		pfow_base;
 	struct mtd_partition	*parts;
 };
 
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
new file mode 100644
index 0000000..7b3d487
--- /dev/null
+++ b/include/linux/mtd/qinfo.h
@@ -0,0 +1,91 @@
+#ifndef __LINUX_MTD_QINFO_H
+#define __LINUX_MTD_QINFO_H
+
+#include <linux/mtd/map.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/partitions.h>
+
+/* lpddr_private describes lpddr flash chip in memory map
+ * @ManufactId - Chip Manufacture ID
+ * @DevId - Chip Device ID
+ * @qinfo - pointer to qinfo records describing the chip
+ * @numchips - number of chips including virual RWW partitions
+ * @chipshift - Chip/partiton size 2^chipshift
+ * @chips - per-chip data structure
+ */
+struct lpddr_private {
+	uint16_t ManufactId;
+	uint16_t DevId;
+	struct qinfo_chip *qinfo;
+	int numchips;
+	unsigned long chipshift;
+	struct flchip chips[0];
+};
+
+/* qinfo_query_info structure contains request information for
+ * each qinfo record
+ * @major - major number of qinfo record
+ * @major - minor number of qinfo record
+ * @id_str - descriptive string to access the record
+ * @desc - detailed description for the qinfo record
+ */
+struct qinfo_query_info {
+	uint8_t	major;
+	uint8_t	minor;
+	char *id_str;
+	char *desc;
+};
+
+/*
+ * qinfo_chip structure contains necessary qinfo records data
+ * @DevSizeShift - Device size 2^n bytes
+ * @BufSizeShift - Program buffer size 2^n bytes
+ * @TotalBlocksNum - Total number of blocks
+ * @UniformBlockSizeShift - Uniform block size 2^UniformBlockSizeShift bytes
+ * @HWPartsNum - Number of hardware partitions
+ * @SuspEraseSupp - Suspend erase supported
+ * @SingleWordProgTime - Single word program 2^SingleWordProgTime u-sec
+ * @ProgBufferTime - Program buffer write 2^ProgBufferTime u-sec
+ * @BlockEraseTime - Block erase 2^BlockEraseTime m-sec
+ */
+struct qinfo_chip {
+	/* General device info */
+	uint16_t DevSizeShift;
+	uint16_t BufSizeShift;
+	/* Erase block information */
+	uint16_t TotalBlocksNum;
+	uint16_t UniformBlockSizeShift;
+	/* Partition information */
+	uint16_t HWPartsNum;
+	/* Optional features */
+	uint16_t SuspEraseSupp;
+	/* Operation typical time */
+	uint16_t SingleWordProgTime;
+	uint16_t ProgBufferTime;
+	uint16_t BlockEraseTime;
+};
+
+/* defines for fixup usage */
+#define LPDDR_MFR_ANY		0xffff
+#define LPDDR_ID_ANY		0xffff
+#define NUMONYX_MFGR_ID		0x0089
+#define R18_DEVICE_ID_1G	0x893c
+
+static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map)
+{
+	map_word val = { {0} };
+	val.x[0] = cmd;
+	return val;
+}
+
+#define CMD(x) lpddr_build_cmd(x, map)
+#define CMDVAL(cmd) cmd.x[0]
+
+struct mtd_info *lpddr_cmdset(struct map_info *);
+
+#endif
+
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
new file mode 100644
index 0000000..25f4d2a
--- /dev/null
+++ b/include/linux/mtd/sharpsl.h
@@ -0,0 +1,20 @@
+/*
+ * SharpSL NAND support
+ *
+ * Copyright (C) 2008 Dmitry Baryshkov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+struct sharpsl_nand_platform_data {
+	struct nand_bbt_descr	*badblock_pattern;
+	struct nand_ecclayout	*ecc_layout;
+	struct mtd_partition	*partitions;
+	unsigned int		nr_partitions;
+};
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index 9f2d763..f69e66d 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -87,7 +87,7 @@
 #define NCP_AUTH_NDS	0x32
 	int		auth_type;
 	size_t		object_name_len;
-	void __user *	object_name;	/* an userspace data, in most cases user name */
+	void __user *	object_name;	/* a userspace data, in most cases user name */
 };
 
 struct ncp_privatedata_ioctl
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c28bbba..ec54785 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -467,7 +467,7 @@
  *     This function is called when network device transistions to the down
  *     state.
  *
- * int (*ndo_hard_start_xmit)(struct sk_buff *skb, struct net_device *dev);
+ * int (*ndo_start_xmit)(struct sk_buff *skb, struct net_device *dev);
  *	Called when a packet needs to be transmitted.
  *	Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED,
  *	Required can not be NULL.
@@ -795,6 +795,7 @@
 	       NETREG_UNREGISTERING,	/* called unregister_netdevice */
 	       NETREG_UNREGISTERED,	/* completed unregister todo */
 	       NETREG_RELEASED,		/* called free_netdev */
+	       NETREG_DUMMY,		/* dummy device for NAPI poll */
 	} reg_state;
 
 	/* Called from unregister, can be used to call free_netdev */
@@ -1077,6 +1078,8 @@
 extern void		synchronize_net(void);
 extern int 		register_netdevice_notifier(struct notifier_block *nb);
 extern int		unregister_netdevice_notifier(struct notifier_block *nb);
+extern int		init_dummy_netdev(struct net_device *dev);
+
 extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 extern struct net_device	*dev_get_by_index(struct net *net, int ifindex);
 extern struct net_device	*__dev_get_by_index(struct net *net, int ifindex);
@@ -1125,9 +1128,6 @@
 	struct sk_buff		*completion_queue;
 
 	struct napi_struct	backlog;
-#ifdef CONFIG_NET_DMA
-	struct dma_chan		*net_dma;
-#endif
 };
 
 DECLARE_PER_CPU(struct softnet_data,softnet_data);
@@ -1373,8 +1373,14 @@
 #define HAVE_NETIF_RECEIVE_SKB 1
 extern int		netif_receive_skb(struct sk_buff *skb);
 extern void		napi_gro_flush(struct napi_struct *napi);
+extern int		dev_gro_receive(struct napi_struct *napi,
+					struct sk_buff *skb);
 extern int		napi_gro_receive(struct napi_struct *napi,
 					 struct sk_buff *skb);
+extern void		napi_reuse_skb(struct napi_struct *napi,
+				       struct sk_buff *skb);
+extern struct sk_buff *	napi_fraginfo_skb(struct napi_struct *napi,
+					  struct napi_gro_fraginfo *info);
 extern int		napi_gro_frags(struct napi_struct *napi,
 				       struct napi_gro_fraginfo *info);
 extern void		netif_nit_deliver(struct sk_buff *skb);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index e52ce47..c7ee874 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -270,6 +270,7 @@
 	struct list_head list;
 
 	const char name[XT_FUNCTION_MAXNAMELEN-1];
+	u_int8_t revision;
 
 	/* Return true or false: return FALSE and set *hotdrop = 1 to
            force immediate packet drop. */
@@ -302,7 +303,6 @@
 	unsigned short proto;
 
 	unsigned short family;
-	u_int8_t revision;
 };
 
 /* Registration hooks for targets. */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index ea03667..b912311 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -88,6 +88,8 @@
 #define NFS4_ACE_GENERIC_EXECUTE              0x001200A0
 #define NFS4_ACE_MASK_ALL                     0x001F01FF
 
+#define NFS4_MAX_UINT64	(~(u64)0)
+
 enum nfs4_acl_whotype {
 	NFS4_ACL_WHO_NAMED = 0,
 	NFS4_ACL_WHO_OWNER,
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index 2126940..e19f459 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -23,7 +23,6 @@
 /*
  * nfsd version
  */
-#define NFSD_VERSION		"0.5"
 #define NFSD_SUPPORTED_MINOR_VERSION	0
 
 /*
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index d1941cb..b2e0938 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -68,6 +68,10 @@
  *     1  - 4 byte user specified identifier
  *     2  - 4 byte major, 4 byte minor, 4 byte inode number - DEPRECATED
  *     3  - 4 byte device id, encoded for user-space, 4 byte inode number
+ *     4  - 4 byte inode number and 4 byte uuid
+ *     5  - 8 byte uuid
+ *     6  - 16 byte uuid
+ *     7  - 8 byte inode number and 16 byte uuid
  *
  * The fileid_type identified how the file within the filesystem is encoded.
  * This is (will be) passed to, and set by, the underlying filesystem if it supports
diff --git a/include/linux/nwpserial.h b/include/linux/nwpserial.h
new file mode 100644
index 0000000..9acb215
--- /dev/null
+++ b/include/linux/nwpserial.h
@@ -0,0 +1,18 @@
+/*
+ *  Serial Port driver for a NWP uart device
+ *
+ *    Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _NWPSERIAL_H
+#define _NWPSERIAL_H
+
+int nwpserial_register_port(struct uart_port *port);
+void nwpserial_unregister_port(int line);
+
+#endif /* _NWPSERIAL_H */
diff --git a/include/linux/of_i2c.h b/include/linux/of_i2c.h
index bd2a870..34974b5 100644
--- a/include/linux/of_i2c.h
+++ b/include/linux/of_i2c.h
@@ -17,4 +17,7 @@
 void of_register_i2c_devices(struct i2c_adapter *adap,
 			     struct device_node *adap_node);
 
+/* must call put_device() when done with returned i2c_client device */
+struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+
 #endif /* __LINUX_OF_I2C_H */
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 1ce9fe5..1d9518b 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -164,4 +164,22 @@
 unsigned long oprofile_get_cpu_buffer_size(void);
 void oprofile_cpu_buffer_inc_smpl_lost(void);
  
+/* cpu buffer functions */
+
+struct op_sample;
+
+struct op_entry {
+	struct ring_buffer_event *event;
+	struct op_sample *sample;
+	unsigned long irq_flags;
+	unsigned long size;
+	unsigned long *data;
+};
+
+void oprofile_write_reserve(struct op_entry *entry,
+			    struct pt_regs * const regs,
+			    unsigned long pc, int code, int size);
+int oprofile_add_data(struct op_entry *entry, unsigned long val);
+int oprofile_write_commit(struct op_entry *entry);
+
 #endif /* OPROFILE_H */
diff --git a/include/linux/oxu210hp.h b/include/linux/oxu210hp.h
new file mode 100644
index 0000000..0bf96ea
--- /dev/null
+++ b/include/linux/oxu210hp.h
@@ -0,0 +1,7 @@
+/* platform data for the OXU210HP HCD */
+
+struct oxu210hp_platform_data {
+	unsigned int bus16:1;
+	unsigned int use_hcd_otg:1;
+	unsigned int use_hcd_sph:1;
+};
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 1e6d34b..602cc1f 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -26,10 +26,6 @@
 	PCG_LOCK,  /* page cgroup is locked */
 	PCG_CACHE, /* charged as cache */
 	PCG_USED, /* this object is in use. */
-	/* flags for LRU placement */
-	PCG_ACTIVE, /* page is active in this cgroup */
-	PCG_FILE, /* page is file system backed */
-	PCG_UNEVICTABLE, /* page is unevictableable */
 };
 
 #define TESTPCGFLAG(uname, lname)			\
@@ -50,19 +46,6 @@
 TESTPCGFLAG(Used, USED)
 CLEARPCGFLAG(Used, USED)
 
-/* LRU management flags (from global-lru definition) */
-TESTPCGFLAG(File, FILE)
-SETPCGFLAG(File, FILE)
-CLEARPCGFLAG(File, FILE)
-
-TESTPCGFLAG(Active, ACTIVE)
-SETPCGFLAG(Active, ACTIVE)
-CLEARPCGFLAG(Active, ACTIVE)
-
-TESTPCGFLAG(Unevictable, UNEVICTABLE)
-SETPCGFLAG(Unevictable, UNEVICTABLE)
-CLEARPCGFLAG(Unevictable, UNEVICTABLE)
-
 static inline int page_cgroup_nid(struct page_cgroup *pc)
 {
 	return page_to_nid(pc->page);
@@ -105,4 +88,39 @@
 }
 
 #endif
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#include <linux/swap.h>
+extern struct mem_cgroup *
+swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem);
+extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent);
+extern int swap_cgroup_swapon(int type, unsigned long max_pages);
+extern void swap_cgroup_swapoff(int type);
+#else
+#include <linux/swap.h>
+
+static inline
+struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
+{
+	return NULL;
+}
+
+static inline
+struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
+{
+	return NULL;
+}
+
+static inline int
+swap_cgroup_swapon(int type, unsigned long max_pages)
+{
+	return 0;
+}
+
+static inline void swap_cgroup_swapoff(int type)
+{
+	return;
+}
+
+#endif
 #endif
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 8837928..042c166 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -8,6 +8,8 @@
 #ifndef _PCI_ACPI_H_
 #define _PCI_ACPI_H_
 
+#include <linux/acpi.h>
+
 #define OSC_QUERY_TYPE			0
 #define OSC_SUPPORT_TYPE 		1
 #define OSC_CONTROL_TYPE		2
@@ -48,15 +50,7 @@
 
 #ifdef CONFIG_ACPI
 extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
-extern acpi_status __pci_osc_support_set(u32 flags, const char *hid);
-static inline acpi_status pci_osc_support_set(u32 flags)
-{
-	return __pci_osc_support_set(flags, PCI_ROOT_HID_STRING);
-}
-static inline acpi_status pcie_osc_support_set(u32 flags)
-{
-	return __pci_osc_support_set(flags, PCI_EXPRESS_ROOT_HID_STRING);
-}
+int pci_acpi_osc_support(acpi_handle handle, u32 flags);
 static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
 {
 	/* Find root host bridge */
@@ -66,6 +60,15 @@
 	return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus),
 			pdev->bus->number);
 }
+
+static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
+{
+	int seg = pci_domain_nr(pbus), busnr = pbus->number;
+	struct pci_dev *bridge = pbus->self;
+	if (bridge)
+		return DEVICE_ACPI_HANDLE(&(bridge->dev));
+	return acpi_get_pci_rootbridge_handle(seg, busnr);
+}
 #else
 #if !defined(AE_ERROR)
 typedef u32 		acpi_status;
@@ -73,8 +76,6 @@
 #endif    
 static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
 {return AE_ERROR;}
-static inline acpi_status pci_osc_support_set(u32 flags) {return AE_ERROR;} 
-static inline acpi_status pcie_osc_support_set(u32 flags) {return AE_ERROR;}
 static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
 { return NULL; }
 #endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4bb156b..80f8b8b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -82,7 +82,30 @@
 #define PCI_DMA_FROMDEVICE	2
 #define PCI_DMA_NONE		3
 
-#define DEVICE_COUNT_RESOURCE	12
+/*
+ *  For PCI devices, the region numbers are assigned this way:
+ */
+enum {
+	/* #0-5: standard PCI resources */
+	PCI_STD_RESOURCES,
+	PCI_STD_RESOURCE_END = 5,
+
+	/* #6: expansion ROM resource */
+	PCI_ROM_RESOURCE,
+
+	/* resources assigned to buses behind the bridge */
+#define PCI_BRIDGE_RESOURCE_NUM 4
+
+	PCI_BRIDGE_RESOURCES,
+	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
+				  PCI_BRIDGE_RESOURCE_NUM - 1,
+
+	/* total resources associated with a PCI device */
+	PCI_NUM_RESOURCES,
+
+	/* preserve this for compatibility */
+	DEVICE_COUNT_RESOURCE
+};
 
 typedef int __bitwise pci_power_t;
 
@@ -274,18 +297,6 @@
 	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
 }
 
-/*
- *  For PCI devices, the region numbers are assigned this way:
- *
- *	0-5	standard PCI regions
- *	6	expansion ROM
- *	7-10	bridges: address space assigned to buses behind the bridge
- */
-
-#define PCI_ROM_RESOURCE	6
-#define PCI_BRIDGE_RESOURCES	7
-#define PCI_NUM_RESOURCES	11
-
 #ifndef PCI_BUS_NUM_RESOURCES
 #define PCI_BUS_NUM_RESOURCES	16
 #endif
@@ -325,6 +336,15 @@
 #define pci_bus_b(n)	list_entry(n, struct pci_bus, node)
 #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
 
+#ifdef CONFIG_PCI_MSI
+static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
+{
+	return pci_dev->msi_enabled || pci_dev->msix_enabled;
+}
+#else
+static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
+#endif
+
 /*
  * Error values that may be returned by PCI functions.
  */
@@ -532,7 +552,9 @@
 void pci_read_bridge_bases(struct pci_bus *child);
 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
 					  struct resource *res);
+u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin);
 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
+u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
 extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
 extern void pci_dev_put(struct pci_dev *dev);
 extern void pci_remove_bus(struct pci_bus *b);
@@ -629,6 +651,7 @@
 
 void pci_disable_device(struct pci_dev *dev);
 void pci_set_master(struct pci_dev *dev);
+void pci_clear_master(struct pci_dev *dev);
 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
 #define HAVE_PCI_SET_MWI
 int __must_check pci_set_mwi(struct pci_dev *dev);
@@ -647,7 +670,7 @@
 int pcie_set_readrq(struct pci_dev *dev, int rq);
 int pci_reset_function(struct pci_dev *dev);
 int pci_execute_reset_function(struct pci_dev *dev);
-void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
+void pci_update_resource(struct pci_dev *dev, int resno);
 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
 
@@ -674,6 +697,11 @@
 /* Functions for PCI Hotplug drivers to use */
 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
 
+/* Vital product data routines */
+ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
+ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+int pci_vpd_truncate(struct pci_dev *dev, size_t size);
+
 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
 void pci_bus_assign_resources(struct pci_bus *bus);
 void pci_bus_size_bridges(struct pci_bus *bus);
@@ -686,10 +714,13 @@
 		    int (*)(struct pci_dev *, u8, u8));
 #define HAVE_PCI_REQ_REGIONS	2
 int __must_check pci_request_regions(struct pci_dev *, const char *);
+int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
 void pci_release_regions(struct pci_dev *);
 int __must_check pci_request_region(struct pci_dev *, int, const char *);
+int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
 void pci_release_region(struct pci_dev *, int);
 int pci_request_selected_regions(struct pci_dev *, int, const char *);
+int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
 void pci_release_selected_regions(struct pci_dev *, int);
 
 /* drivers/pci/bus.c */
@@ -779,6 +810,10 @@
 
 static inline void pci_restore_msi_state(struct pci_dev *dev)
 { }
+static inline int pci_msi_enabled(void)
+{
+	return 0;
+}
 #else
 extern int pci_enable_msi(struct pci_dev *dev);
 extern void pci_msi_shutdown(struct pci_dev *dev);
@@ -789,6 +824,16 @@
 extern void pci_disable_msix(struct pci_dev *dev);
 extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
 extern void pci_restore_msi_state(struct pci_dev *dev);
+extern int pci_msi_enabled(void);
+#endif
+
+#ifndef CONFIG_PCIEASPM
+static inline int pcie_aspm_enabled(void)
+{
+	return 0;
+}
+#else
+extern int pcie_aspm_enabled(void);
 #endif
 
 #ifdef CONFIG_HT_IRQ
@@ -1140,20 +1185,9 @@
 static inline void pci_mmcfg_late_init(void) { }
 #endif
 
-#ifdef CONFIG_HAS_IOMEM
-static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
-{
-	/*
-	 * Make sure the BAR is actually a memory resource, not an IO resource
-	 */
-	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
-		WARN_ON(1);
-		return NULL;
-	}
-	return ioremap_nocache(pci_resource_start(pdev, bar),
-				     pci_resource_len(pdev, bar));
-}
-#endif
+int pci_ext_cfg_avail(struct pci_dev *dev);
+
+void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
 
 #endif /* __KERNEL__ */
 #endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index a00bd1a..2099874 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -223,11 +223,12 @@
 #ifdef CONFIG_ACPI
 #include <acpi/acpi.h>
 #include <acpi/acpi_bus.h>
-#include <acpi/actypes.h>
 extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
 				struct hotplug_params *hpp);
 int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
 int acpi_root_bridge(acpi_handle handle);
+int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
+int acpi_pci_detect_ejectable(struct pci_bus *pbus);
 #endif
 #endif
 
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d543365..d56ad9c 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2174,6 +2174,7 @@
 #define PCI_DEVICE_ID_RDC_R6040		0x6040
 #define PCI_DEVICE_ID_RDC_R6060		0x6060
 #define PCI_DEVICE_ID_RDC_R6061		0x6061
+#define PCI_DEVICE_ID_RDC_D1010		0x1010
 
 #define PCI_VENDOR_ID_LENOVO		0x17aa
 
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index e5effd4..027815b 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -210,6 +210,7 @@
 #define  PCI_CAP_ID_AGP3	0x0E	/* AGP Target PCI-PCI bridge */
 #define  PCI_CAP_ID_EXP 	0x10	/* PCI Express */
 #define  PCI_CAP_ID_MSIX	0x11	/* MSI-X */
+#define  PCI_CAP_ID_AF		0x13	/* PCI Advanced Features */
 #define PCI_CAP_LIST_NEXT	1	/* Next capability in the list */
 #define PCI_CAP_FLAGS		2	/* Capability defined flags (16 bits) */
 #define PCI_CAP_SIZEOF		4
@@ -316,6 +317,17 @@
 #define  PCI_CHSWP_EXT		0x40	/* ENUM# status - extraction */
 #define  PCI_CHSWP_INS		0x80	/* ENUM# status - insertion */
 
+/* PCI Advanced Feature registers */
+
+#define PCI_AF_LENGTH		2
+#define PCI_AF_CAP		3
+#define  PCI_AF_CAP_TP		0x01
+#define  PCI_AF_CAP_FLR		0x02
+#define PCI_AF_CTRL		4
+#define  PCI_AF_CTRL_FLR	0x01
+#define PCI_AF_STATUS		5
+#define  PCI_AF_STATUS_TP	0x01
+
 /* PCI-X registers */
 
 #define PCI_X_CMD		2	/* Modes & Features */
@@ -399,20 +411,70 @@
 #define  PCI_EXP_DEVSTA_AUXPD	0x10	/* AUX Power Detected */
 #define  PCI_EXP_DEVSTA_TRPND	0x20	/* Transactions Pending */
 #define PCI_EXP_LNKCAP		12	/* Link Capabilities */
-#define  PCI_EXP_LNKCAP_ASPMS	0xc00	/* ASPM Support */
-#define  PCI_EXP_LNKCAP_L0SEL	0x7000	/* L0s Exit Latency */
-#define  PCI_EXP_LNKCAP_L1EL	0x38000	/* L1 Exit Latency */
-#define  PCI_EXP_LNKCAP_CLKPM	0x40000	/* L1 Clock Power Management */
+#define  PCI_EXP_LNKCAP_SLS	0x0000000f /* Supported Link Speeds */
+#define  PCI_EXP_LNKCAP_MLW	0x000003f0 /* Maximum Link Width */
+#define  PCI_EXP_LNKCAP_ASPMS	0x00000c00 /* ASPM Support */
+#define  PCI_EXP_LNKCAP_L0SEL	0x00007000 /* L0s Exit Latency */
+#define  PCI_EXP_LNKCAP_L1EL	0x00038000 /* L1 Exit Latency */
+#define  PCI_EXP_LNKCAP_CLKPM	0x00040000 /* L1 Clock Power Management */
+#define  PCI_EXP_LNKCAP_SDERC	0x00080000 /* Suprise Down Error Reporting Capable */
+#define  PCI_EXP_LNKCAP_DLLLARC	0x00100000 /* Data Link Layer Link Active Reporting Capable */
+#define  PCI_EXP_LNKCAP_LBNC	0x00200000 /* Link Bandwidth Notification Capability */
+#define  PCI_EXP_LNKCAP_PN	0xff000000 /* Port Number */
 #define PCI_EXP_LNKCTL		16	/* Link Control */
-#define  PCI_EXP_LNKCTL_RL	0x20	/* Retrain Link */
-#define  PCI_EXP_LNKCTL_CCC	0x40	/* Common Clock COnfiguration */
+#define  PCI_EXP_LNKCTL_ASPMC	0x0003	/* ASPM Control */
+#define  PCI_EXP_LNKCTL_RCB	0x0008	/* Read Completion Boundary */
+#define  PCI_EXP_LNKCTL_LD	0x0010	/* Link Disable */
+#define  PCI_EXP_LNKCTL_RL	0x0020	/* Retrain Link */
+#define  PCI_EXP_LNKCTL_CCC	0x0040	/* Common Clock Configuration */
+#define  PCI_EXP_LNKCTL_ES	0x0080	/* Extended Synch */
 #define  PCI_EXP_LNKCTL_CLKREQ_EN 0x100	/* Enable clkreq */
+#define  PCI_EXP_LNKCTL_HAWD	0x0200	/* Hardware Autonomous Width Disable */
+#define  PCI_EXP_LNKCTL_LBMIE	0x0400	/* Link Bandwidth Management Interrupt Enable */
+#define  PCI_EXP_LNKCTL_LABIE	0x0800	/* Lnk Autonomous Bandwidth Interrupt Enable */
 #define PCI_EXP_LNKSTA		18	/* Link Status */
-#define  PCI_EXP_LNKSTA_LT	0x800	/* Link Training */
+#define  PCI_EXP_LNKSTA_CLS	0x000f	/* Current Link Speed */
+#define  PCI_EXP_LNKSTA_NLW	0x03f0	/* Nogotiated Link Width */
+#define  PCI_EXP_LNKSTA_LT	0x0800	/* Link Training */
 #define  PCI_EXP_LNKSTA_SLC	0x1000	/* Slot Clock Configuration */
+#define  PCI_EXP_LNKSTA_DLLLA	0x2000	/* Data Link Layer Link Active */
+#define  PCI_EXP_LNKSTA_LBMS	0x4000	/* Link Bandwidth Management Status */
+#define  PCI_EXP_LNKSTA_LABS	0x8000	/* Link Autonomous Bandwidth Status */
 #define PCI_EXP_SLTCAP		20	/* Slot Capabilities */
+#define  PCI_EXP_SLTCAP_ABP	0x00000001 /* Attention Button Present */
+#define  PCI_EXP_SLTCAP_PCP	0x00000002 /* Power Controller Present */
+#define  PCI_EXP_SLTCAP_MRLSP	0x00000004 /* MRL Sensor Present */
+#define  PCI_EXP_SLTCAP_AIP	0x00000008 /* Attention Indicator Present */
+#define  PCI_EXP_SLTCAP_PIP	0x00000010 /* Power Indicator Present */
+#define  PCI_EXP_SLTCAP_HPS	0x00000020 /* Hot-Plug Surprise */
+#define  PCI_EXP_SLTCAP_HPC	0x00000040 /* Hot-Plug Capable */
+#define  PCI_EXP_SLTCAP_SPLV	0x00007f80 /* Slot Power Limit Value */
+#define  PCI_EXP_SLTCAP_SPLS	0x00018000 /* Slot Power Limit Scale */
+#define  PCI_EXP_SLTCAP_EIP	0x00020000 /* Electromechanical Interlock Present */
+#define  PCI_EXP_SLTCAP_NCCS	0x00040000 /* No Command Completed Support */
+#define  PCI_EXP_SLTCAP_PSN	0xfff80000 /* Physical Slot Number */
 #define PCI_EXP_SLTCTL		24	/* Slot Control */
+#define  PCI_EXP_SLTCTL_ABPE	0x0001	/* Attention Button Pressed Enable */
+#define  PCI_EXP_SLTCTL_PFDE	0x0002	/* Power Fault Detected Enable */
+#define  PCI_EXP_SLTCTL_MRLSCE	0x0004	/* MRL Sensor Changed Enable */
+#define  PCI_EXP_SLTCTL_PDCE	0x0008	/* Presence Detect Changed Enable */
+#define  PCI_EXP_SLTCTL_CCIE	0x0010	/* Command Completed Interrupt Enable */
+#define  PCI_EXP_SLTCTL_HPIE	0x0020	/* Hot-Plug Interrupt Enable */
+#define  PCI_EXP_SLTCTL_AIC	0x00c0	/* Attention Indicator Control */
+#define  PCI_EXP_SLTCTL_PIC	0x0300	/* Power Indicator Control */
+#define  PCI_EXP_SLTCTL_PCC	0x0400	/* Power Controller Control */
+#define  PCI_EXP_SLTCTL_EIC	0x0800	/* Electromechanical Interlock Control */
+#define  PCI_EXP_SLTCTL_DLLSCE	0x1000	/* Data Link Layer State Changed Enable */
 #define PCI_EXP_SLTSTA		26	/* Slot Status */
+#define  PCI_EXP_SLTSTA_ABP	0x0001	/* Attention Button Pressed */
+#define  PCI_EXP_SLTSTA_PFD	0x0002	/* Power Fault Detected */
+#define  PCI_EXP_SLTSTA_MRLSC	0x0004	/* MRL Sensor Changed */
+#define  PCI_EXP_SLTSTA_PDC	0x0008	/* Presence Detect Changed */
+#define  PCI_EXP_SLTSTA_CC	0x0010	/* Command Completed */
+#define  PCI_EXP_SLTSTA_MRLSS	0x0020	/* MRL Sensor State */
+#define  PCI_EXP_SLTSTA_PDS	0x0040	/* Presence Detect State */
+#define  PCI_EXP_SLTSTA_EIS	0x0080	/* Electromechanical Interlock Status */
+#define  PCI_EXP_SLTSTA_DLLSC	0x0100	/* Data Link Layer State Changed */
 #define PCI_EXP_RTCTL		28	/* Root Control */
 #define  PCI_EXP_RTCTL_SECEE	0x01	/* System Error on Correctable Error */
 #define  PCI_EXP_RTCTL_SENFEE	0x02	/* System Error on Non-Fatal Error */
diff --git a/include/linux/phantom.h b/include/linux/phantom.h
index 02268c5..94dd664 100644
--- a/include/linux/phantom.h
+++ b/include/linux/phantom.h
@@ -10,7 +10,7 @@
 #ifndef __PHANTOM_H
 #define __PHANTOM_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 /* PHN_(G/S)ET_REG param */
 struct phm_reg {
diff --git a/include/linux/pid.h b/include/linux/pid.h
index bb206c5..49f1c2f 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -123,6 +123,24 @@
 extern void free_pid(struct pid *pid);
 
 /*
+ * ns_of_pid() returns the pid namespace in which the specified pid was
+ * allocated.
+ *
+ * NOTE:
+ * 	ns_of_pid() is expected to be called for a process (task) that has
+ * 	an attached 'struct pid' (see attach_pid(), detach_pid()) i.e @pid
+ * 	is expected to be non-NULL. If @pid is NULL, caller should handle
+ * 	the resulting NULL pid-ns.
+ */
+static inline struct pid_namespace *ns_of_pid(struct pid *pid)
+{
+	struct pid_namespace *ns = NULL;
+	if (pid)
+		ns = pid->numbers[pid->level].ns;
+	return ns;
+}
+
+/*
  * the helpers to get the pid's id seen from different namespaces
  *
  * pid_nr()    : global id, i.e. the id seen from the init namespace;
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index d82fe82..38d1032 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -79,11 +79,7 @@
 }
 #endif /* CONFIG_PID_NS */
 
-static inline struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
-{
-	return tsk->nsproxy->pid_ns;
-}
-
+extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
 void pidhash_init(void);
 void pidmap_init(void);
 
diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h
index 34a196e..787d19e 100644
--- a/include/linux/qnx4_fs.h
+++ b/include/linux/qnx4_fs.h
@@ -2,14 +2,12 @@
  *  Name                         : qnx4_fs.h
  *  Author                       : Richard Frowijn
  *  Function                     : qnx4 global filesystem definitions
- *  Version                      : 1.0.2
- *  Last modified                : 2000-01-31
- *
  *  History                      : 23-03-1998 created
  */
 #ifndef _LINUX_QNX4_FS_H
 #define _LINUX_QNX4_FS_H
 
+#include <linux/types.h>
 #include <linux/qnxtypes.h>
 #include <linux/magic.h>
 
diff --git a/include/linux/qnxtypes.h b/include/linux/qnxtypes.h
index a3eb113..bebbe5c 100644
--- a/include/linux/qnxtypes.h
+++ b/include/linux/qnxtypes.h
@@ -2,9 +2,6 @@
  *  Name                         : qnxtypes.h
  *  Author                       : Richard Frowijn
  *  Function                     : standard qnx types
- *  Version                      : 1.0.2
- *  Last modified                : 2000-01-06
- *
  *  History                      : 22-03-1998 created
  *
  */
@@ -12,6 +9,8 @@
 #ifndef _QNX4TYPES_H
 #define _QNX4TYPES_H
 
+#include <linux/types.h>
+
 typedef __le16 qnx4_nxtnt_t;
 typedef __u8  qnx4_ftype_t;
 
diff --git a/include/linux/radeonfb.h b/include/linux/radeonfb.h
index 5bd8975..8c4bbde 100644
--- a/include/linux/radeonfb.h
+++ b/include/linux/radeonfb.h
@@ -2,7 +2,7 @@
 #define __LINUX_RADEONFB_H__
 
 #include <asm/ioctl.h>
-#include <asm/types.h>
+#include <linux/types.h>
 
 #define ATY_RADEON_LCD_ON	0x00000001
 #define ATY_RADEON_CRT_ON	0x00000002
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index 8fc909e..9743e4d 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -137,6 +137,9 @@
 	struct gendisk			*gendisk;
 
 	struct kobject			kobj;
+	int				hold_active;
+#define	UNTIL_IOCTL	1
+#define	UNTIL_STOP	2
 
 	/* Superblock information */
 	int				major_version,
@@ -215,6 +218,9 @@
 #define	MD_RECOVERY_FROZEN	9
 
 	unsigned long			recovery;
+	int				recovery_disabled; /* if we detect that recovery
+							    * will always fail, set this
+							    * so we don't loop trying */
 
 	int				in_sync;	/* know to not need resync */
 	struct mutex			reconfig_mutex;
@@ -244,6 +250,9 @@
 	struct sysfs_dirent		*sysfs_state;	/* handle for 'array_state'
 							 * file in sysfs.
 							 */
+	struct sysfs_dirent		*sysfs_action;  /* handle for 'sync_action' */
+
+	struct work_struct del_work;	/* used for delayed sysfs removal */
 
 	spinlock_t			write_lock;
 	wait_queue_head_t		sb_wait;	/* for waiting on superblock updates */
@@ -334,17 +343,14 @@
  * iterates through some rdev ringlist. It's safe to remove the
  * current 'rdev'. Dont touch 'tmp' though.
  */
-#define rdev_for_each_list(rdev, tmp, list)				\
-									\
-	for ((tmp) = (list).next;					\
-		(rdev) = (list_entry((tmp), mdk_rdev_t, same_set)),	\
-			(tmp) = (tmp)->next, (tmp)->prev != &(list)	\
-		; )
+#define rdev_for_each_list(rdev, tmp, head)				\
+	list_for_each_entry_safe(rdev, tmp, head, same_set)
+
 /*
  * iterates through the 'same array disks' ringlist
  */
 #define rdev_for_each(rdev, tmp, mddev)				\
-	rdev_for_each_list(rdev, tmp, (mddev)->disks)
+	list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
 
 #define rdev_for_each_rcu(rdev, mddev)				\
 	list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h
index 8b4de4a..9491026 100644
--- a/include/linux/raid/md_p.h
+++ b/include/linux/raid/md_p.h
@@ -194,6 +194,8 @@
 	return (ev<<32)| sb->events_lo;
 }
 
+#define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1)
+
 /*
  * The version-1 superblock :
  * All numeric fields are little-endian.
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h
index 1b2dda0..fd42aa8 100644
--- a/include/linux/raid/raid0.h
+++ b/include/linux/raid/raid0.h
@@ -5,9 +5,9 @@
 
 struct strip_zone
 {
-	sector_t zone_offset;	/* Zone offset in md_dev */
-	sector_t dev_offset;	/* Zone offset in real dev */
-	sector_t size;		/* Zone size */
+	sector_t zone_start;	/* Zone offset in md_dev (in sectors) */
+	sector_t dev_start;	/* Zone offset in real dev (in sectors) */
+	sector_t sectors;	/* Zone size in sectors */
 	int nb_dev;		/* # of devices attached to the zone */
 	mdk_rdev_t **dev;	/* Devices attached to the zone */
 };
@@ -19,8 +19,8 @@
 	mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
 	int nr_strip_zones;
 
-	sector_t hash_spacing;
-	int preshift;			/* shift this before divide by hash_spacing */
+	sector_t spacing;
+	int sector_shift; /* shift this before divide by spacing */
 };
 
 typedef struct raid0_private_data raid0_conf_t;
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 344bc34..9c29541 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -140,10 +140,10 @@
 extern void rb_erase(struct rb_node *, struct rb_root *);
 
 /* Find logical next and previous nodes in a tree */
-extern struct rb_node *rb_next(struct rb_node *);
-extern struct rb_node *rb_prev(struct rb_node *);
-extern struct rb_node *rb_first(struct rb_root *);
-extern struct rb_node *rb_last(struct rb_root *);
+extern struct rb_node *rb_next(const struct rb_node *);
+extern struct rb_node *rb_prev(const struct rb_node *);
+extern struct rb_node *rb_first(const struct rb_root *);
+extern struct rb_node *rb_last(const struct rb_root *);
 
 /* Fast replacement of a single node without remove/rebalance/add/rebalance */
 extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, 
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index afdc455..801bf77 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -104,10 +104,10 @@
 /**
  * struct regulator_bulk_data - Data used for bulk regulator operations.
  *
- * @supply   The name of the supply.  Initialised by the user before
- *           using the bulk regulator APIs.
- * @consumer The regulator consumer for the supply.  This will be managed
- *           by the bulk API.
+ * @supply:   The name of the supply.  Initialised by the user before
+ *            using the bulk regulator APIs.
+ * @consumer: The regulator consumer for the supply.  This will be managed
+ *            by the bulk API.
  *
  * The regulator APIs provide a series of regulator_bulk_() API calls as
  * a convenience to consumers which require multiple supplies.  This
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index e37d805..2dae057 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -24,7 +24,33 @@
 /**
  * struct regulator_ops - regulator operations.
  *
- * This struct describes regulator operations.
+ * This struct describes regulator operations which can be implemented by
+ * regulator chip drivers.
+ *
+ * @enable: Enable the regulator.
+ * @disable: Disable the regulator.
+ * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise.
+ *
+ * @set_voltage: Set the voltage for the regulator within the range specified.
+ *               The driver should select the voltage closest to min_uV.
+ * @get_voltage: Return the currently configured voltage for the regulator.
+ *
+ * @set_current_limit: Configure a limit for a current-limited regulator.
+ * @get_current_limit: Get the limit for a current-limited regulator.
+ *
+ * @set_mode: Set the operating mode for the regulator.
+ * @get_mode: Get the current operating mode for the regulator.
+ * @get_optimum_mode: Get the most efficient operating mode for the regulator
+ *                    when running with the specified parameters.
+ *
+ * @set_suspend_voltage: Set the voltage for the regulator when the system
+ *                       is suspended.
+ * @set_suspend_enable: Mark the regulator as enabled when the system is
+ *                      suspended.
+ * @set_suspend_disable: Mark the regulator as disabled when the system is
+ *                       suspended.
+ * @set_suspend_mode: Set the operating mode for the regulator when the
+ *                    system is suspended.
  */
 struct regulator_ops {
 
@@ -75,6 +101,15 @@
 /**
  * struct regulator_desc - Regulator descriptor
  *
+ * Each regulator registered with the core is described with a structure of
+ * this type.
+ *
+ * @name: Identifying name for the regulator.
+ * @id: Numerical identifier for the regulator.
+ * @ops: Regulator operations table.
+ * @irq: Interrupt number for the regulator.
+ * @type: Indicates if the regulator is a voltage or current regulator.
+ * @owner: Module providing the regulator, used for refcounting.
  */
 struct regulator_desc {
 	const char *name;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index c6d6933..3794773 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -44,6 +44,10 @@
  * struct regulator_state - regulator state during low power syatem states
  *
  * This describes a regulators state during a system wide low power state.
+ *
+ * @uV: Operating voltage during suspend.
+ * @mode: Operating mode during suspend.
+ * @enabled: Enabled during suspend.
  */
 struct regulator_state {
 	int uV;	/* suspend voltage */
@@ -55,6 +59,30 @@
  * struct regulation_constraints - regulator operating constraints.
  *
  * This struct describes regulator and board/machine specific constraints.
+ *
+ * @name: Descriptive name for the constraints, used for display purposes.
+ *
+ * @min_uV: Smallest voltage consumers may set.
+ * @max_uV: Largest voltage consumers may set.
+ *
+ * @min_uA: Smallest consumers consumers may set.
+ * @max_uA: Largest current consumers may set.
+ *
+ * @valid_modes_mask: Mask of modes which may be configured by consumers.
+ * @valid_ops_mask: Operations which may be performed by consumers.
+ *
+ * @always_on: Set if the regulator should never be disabled.
+ * @boot_on: Set if the regulator is enabled when the system is initially
+ *           started.
+ * @apply_uV: Apply the voltage constraint when initialising.
+ *
+ * @input_uV: Input voltage for regulator when supplied by another regulator.
+ *
+ * @state_disk: State for regulator when system is suspended in disk mode.
+ * @state_mem: State for regulator when system is suspended in mem mode.
+ * @state_standby: State for regulator when system is suspended in standby
+ *                 mode.
+ * @initial_state: Suspend state to set by default.
  */
 struct regulation_constraints {
 
@@ -93,6 +121,9 @@
  * struct regulator_consumer_supply - supply -> device mapping
  *
  * This maps a supply name to a device.
+ *
+ * @dev: Device structure for the consumer.
+ * @supply: Name for the supply.
  */
 struct regulator_consumer_supply {
 	struct device *dev;	/* consumer */
@@ -103,6 +134,16 @@
  * struct regulator_init_data - regulator platform initialisation data.
  *
  * Initialisation constraints, our supply and consumers supplies.
+ *
+ * @supply_regulator_dev: Parent regulator (if any).
+ *
+ * @constraints: Constraints.  These must be specified for the regulator to
+ *               be usable.
+ * @num_consumer_supplies: Number of consumer device supplies.
+ * @consumer_supplies: Consumer device supply configuration.
+ *
+ * @regulator_init: Callback invoked when the regulator has been registered.
+ * @driver_data: Data passed to regulator_init.
  */
 struct regulator_init_data {
 	struct device *supply_regulator_dev; /* or NULL for LINE */
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 271c1c2..4c5bcf6 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -9,7 +9,7 @@
  *
  * Author: Pavel Emelianov <xemul@openvz.org>
  *
- * See Documentation/controllers/resource_counter.txt for more
+ * See Documentation/cgroups/resource_counter.txt for more
  * info about what this counter is.
  */
 
@@ -43,6 +43,10 @@
 	 * the routines below consider this to be IRQ-safe
 	 */
 	spinlock_t lock;
+	/*
+	 * Parent counter, used for hierarchial resource accounting
+	 */
+	struct res_counter *parent;
 };
 
 /**
@@ -87,7 +91,7 @@
  * helpers for accounting
  */
 
-void res_counter_init(struct res_counter *counter);
+void res_counter_init(struct res_counter *counter, struct res_counter *parent);
 
 /*
  * charge - try to consume more resource.
@@ -103,7 +107,7 @@
 int __must_check res_counter_charge_locked(struct res_counter *counter,
 		unsigned long val);
 int __must_check res_counter_charge(struct res_counter *counter,
-		unsigned long val);
+		unsigned long val, struct res_counter **limit_fail_at);
 
 /*
  * uncharge - tell that some portion of the resource is released
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index b419984..90bbbf0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -161,6 +161,9 @@
 
 #define PORT_S3C6400	84
 
+/* NWPSERIAL */
+#define PORT_NWPSERIAL	85
+
 #ifdef __KERNEL__
 
 #include <linux/compiler.h>
diff --git a/include/linux/smp.h b/include/linux/smp.h
index b824669..715196b 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -24,6 +24,9 @@
 /* total number of cpus in this system (may exceed NR_CPUS) */
 extern unsigned int total_cpus;
 
+int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
+				int wait);
+
 #ifdef CONFIG_SMP
 
 #include <linux/preempt.h>
@@ -79,8 +82,6 @@
 	return 0;
 }
 
-int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
-				int wait);
 void __smp_call_function_single(int cpuid, struct call_single_data *data);
 
 /*
@@ -140,14 +141,6 @@
 static inline void smp_send_reschedule(int cpu) { }
 #define num_booting_cpus()			1
 #define smp_prepare_boot_cpu()			do {} while (0)
-#define smp_call_function_single(cpuid, func, info, wait) \
-({ \
-	WARN_ON(cpuid != 0);	\
-	local_irq_disable();	\
-	(func)(info);		\
-	local_irq_enable();	\
-	0;			\
-})
 #define smp_call_function_mask(mask, func, info, wait) \
 			(up_smp_call_function(func, info))
 #define smp_call_function_many(mask, func, info, wait) \
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 8222931..68bb1c5 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -327,9 +327,9 @@
  * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
  * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
  * @len: size of rx and tx buffers (in bytes)
- * @speed_hz: Select a speed other then the device default for this
+ * @speed_hz: Select a speed other than the device default for this
  *      transfer. If 0 the default (from @spi_device) is used.
- * @bits_per_word: select a bits_per_word other then the device default
+ * @bits_per_word: select a bits_per_word other than the device default
  *      for this transfer. If 0 the default (from @spi_device) is used.
  * @cs_change: affects chipselect after this transfer completes
  * @delay_usecs: microseconds to delay after this transfer before
diff --git a/include/linux/spi/tdo24m.h b/include/linux/spi/tdo24m.h
new file mode 100644
index 0000000..7572d4e
--- /dev/null
+++ b/include/linux/spi/tdo24m.h
@@ -0,0 +1,13 @@
+#ifndef __TDO24M_H__
+#define __TDO24M_H__
+
+enum tdo24m_model {
+	TDO24M,
+	TDO35S,
+};
+
+struct tdo24m_platform_data {
+	enum tdo24m_model model;
+};
+
+#endif /* __TDO24M_H__ */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 3afe7fb..3435d24 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -58,10 +58,13 @@
 	struct svc_stat *	sv_stats;	/* RPC statistics */
 	spinlock_t		sv_lock;
 	unsigned int		sv_nrthreads;	/* # of server threads */
+	unsigned int		sv_maxconn;	/* max connections allowed or
+						 * '0' causing max to be based
+						 * on number of threads. */
+
 	unsigned int		sv_max_payload;	/* datagram payload size */
 	unsigned int		sv_max_mesg;	/* max_payload + 1 page for overheads */
 	unsigned int		sv_xdrsize;	/* XDR buffer size */
-
 	struct list_head	sv_permsocks;	/* all permanent sockets */
 	struct list_head	sv_tempsocks;	/* all temporary sockets */
 	int			sv_tmpcnt;	/* count of temporary sockets */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 2ce8207..2b409c4 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -232,6 +232,11 @@
 
 extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
 extern int hibernate(void);
+extern int hibernate_nvs_register(unsigned long start, unsigned long size);
+extern int hibernate_nvs_alloc(void);
+extern void hibernate_nvs_free(void);
+extern void hibernate_nvs_save(void);
+extern void hibernate_nvs_restore(void);
 #else /* CONFIG_HIBERNATION */
 static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
 static inline void swsusp_set_page_free(struct page *p) {}
@@ -239,6 +244,14 @@
 
 static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
 static inline int hibernate(void) { return -ENOSYS; }
+static inline int hibernate_nvs_register(unsigned long a, unsigned long b)
+{
+	return 0;
+}
+static inline int hibernate_nvs_alloc(void) { return 0; }
+static inline void hibernate_nvs_free(void) {}
+static inline void hibernate_nvs_save(void) {}
+static inline void hibernate_nvs_restore(void) {}
 #endif /* CONFIG_HIBERNATION */
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/include/linux/swab.h b/include/linux/swab.h
index be5284d..ea0c02f 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -3,7 +3,7 @@
 
 #include <linux/types.h>
 #include <linux/compiler.h>
-#include <asm/byteorder.h>
+#include <asm/swab.h>
 
 /*
  * casts are necessary for constants, because we never know how for sure
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 91dee50..d302155 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -214,7 +214,8 @@
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 					gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
-							gfp_t gfp_mask);
+						  gfp_t gfp_mask, bool noswap,
+						  unsigned int swappiness);
 extern int __isolate_lru_page(struct page *page, int mode, int file);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
@@ -333,6 +334,22 @@
 	put_swap_token(swap_token_mm);
 }
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+extern void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent);
+#else
+static inline void
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+{
+}
+#endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
+#else
+static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
+{
+}
+#endif
+
 #else /* CONFIG_SWAP */
 
 #define nr_swap_pages				0L
@@ -409,6 +426,12 @@
 #define has_swap_token(x) 0
 #define disable_swap_token() do { } while(0)
 
+static inline int mem_cgroup_cache_charge_swapin(struct page *page,
+			struct mm_struct *mm, gfp_t mask, bool locked)
+{
+	return 0;
+}
+
 #endif /* CONFIG_SWAP */
 #endif /* __KERNEL__*/
 #endif /* _LINUX_SWAP_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 18d0a24..16875f8 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -54,6 +54,7 @@
 struct compat_timeval;
 struct robust_list_head;
 struct getcpu_cache;
+struct old_linux_dirent;
 
 #include <linux/types.h>
 #include <linux/aio_abi.h>
@@ -65,6 +66,74 @@
 #include <linux/quota.h>
 #include <linux/key.h>
 
+#define __SC_DECL1(t1, a1)	t1 a1
+#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__)
+#define __SC_DECL3(t3, a3, ...) t3 a3, __SC_DECL2(__VA_ARGS__)
+#define __SC_DECL4(t4, a4, ...) t4 a4, __SC_DECL3(__VA_ARGS__)
+#define __SC_DECL5(t5, a5, ...) t5 a5, __SC_DECL4(__VA_ARGS__)
+#define __SC_DECL6(t6, a6, ...) t6 a6, __SC_DECL5(__VA_ARGS__)
+
+#define __SC_LONG1(t1, a1) 	long a1
+#define __SC_LONG2(t2, a2, ...) long a2, __SC_LONG1(__VA_ARGS__)
+#define __SC_LONG3(t3, a3, ...) long a3, __SC_LONG2(__VA_ARGS__)
+#define __SC_LONG4(t4, a4, ...) long a4, __SC_LONG3(__VA_ARGS__)
+#define __SC_LONG5(t5, a5, ...) long a5, __SC_LONG4(__VA_ARGS__)
+#define __SC_LONG6(t6, a6, ...) long a6, __SC_LONG5(__VA_ARGS__)
+
+#define __SC_CAST1(t1, a1)	(t1) a1
+#define __SC_CAST2(t2, a2, ...) (t2) a2, __SC_CAST1(__VA_ARGS__)
+#define __SC_CAST3(t3, a3, ...) (t3) a3, __SC_CAST2(__VA_ARGS__)
+#define __SC_CAST4(t4, a4, ...) (t4) a4, __SC_CAST3(__VA_ARGS__)
+#define __SC_CAST5(t5, a5, ...) (t5) a5, __SC_CAST4(__VA_ARGS__)
+#define __SC_CAST6(t6, a6, ...) (t6) a6, __SC_CAST5(__VA_ARGS__)
+
+#define __SC_TEST(type)		BUILD_BUG_ON(sizeof(type) > sizeof(long))
+#define __SC_TEST1(t1, a1)	__SC_TEST(t1)
+#define __SC_TEST2(t2, a2, ...)	__SC_TEST(t2); __SC_TEST1(__VA_ARGS__)
+#define __SC_TEST3(t3, a3, ...)	__SC_TEST(t3); __SC_TEST2(__VA_ARGS__)
+#define __SC_TEST4(t4, a4, ...)	__SC_TEST(t4); __SC_TEST3(__VA_ARGS__)
+#define __SC_TEST5(t5, a5, ...)	__SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
+#define __SC_TEST6(t6, a6, ...)	__SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
+
+#define SYSCALL_DEFINE0(name)   asmlinkage long sys_##name(void)
+#define SYSCALL_DEFINE1(...)    SYSCALL_DEFINEx(1, __VA_ARGS__)
+#define SYSCALL_DEFINE2(...)    SYSCALL_DEFINEx(2, __VA_ARGS__)
+#define SYSCALL_DEFINE3(...)    SYSCALL_DEFINEx(3, __VA_ARGS__)
+#define SYSCALL_DEFINE4(...)    SYSCALL_DEFINEx(4, __VA_ARGS__)
+#define SYSCALL_DEFINE5(...)    SYSCALL_DEFINEx(5, __VA_ARGS__)
+#define SYSCALL_DEFINE6(...)    SYSCALL_DEFINEx(6, __VA_ARGS__)
+
+#ifdef CONFIG_PPC64
+#define SYSCALL_ALIAS(alias, name)					\
+	asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n"	\
+	     "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
+#else
+#define SYSCALL_ALIAS(alias, name)					\
+	asm ("\t.globl " #alias "\n\t.set " #alias ", " #name)
+#endif
+
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+
+#define SYSCALL_DEFINE(name) static inline long SYSC_##name
+#define SYSCALL_DEFINEx(x, name, ...)					\
+	asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__));		\
+	static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__));	\
+	asmlinkage long SyS_##name(__SC_LONG##x(__VA_ARGS__))		\
+	{								\
+		__SC_TEST##x(__VA_ARGS__);				\
+		return (long) SYSC_##name(__SC_CAST##x(__VA_ARGS__));	\
+	}								\
+	SYSCALL_ALIAS(sys_##name, SyS_##name);				\
+	static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__))
+
+#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
+
+#define SYSCALL_DEFINE(name) asmlinkage long sys_##name
+#define SYSCALL_DEFINEx(x, name, ...)					\
+	asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__))
+
+#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
+
 asmlinkage long sys_time(time_t __user *tloc);
 asmlinkage long sys_stime(time_t __user *tptr);
 asmlinkage long sys_gettimeofday(struct timeval __user *tv,
@@ -77,7 +146,7 @@
 
 asmlinkage long sys_gettid(void);
 asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp);
-asmlinkage unsigned long sys_alarm(unsigned int seconds);
+asmlinkage long sys_alarm(unsigned int seconds);
 asmlinkage long sys_getpid(void);
 asmlinkage long sys_getppid(void);
 asmlinkage long sys_getuid(void);
@@ -166,7 +235,7 @@
 				unsigned long flags);
 
 asmlinkage long sys_exit(int error_code);
-asmlinkage void sys_exit_group(int error_code);
+asmlinkage long sys_exit_group(int error_code);
 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
 				int options, struct rusage __user *ru);
 asmlinkage long sys_waitid(int which, pid_t pid,
@@ -196,7 +265,7 @@
 asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo);
 asmlinkage long sys_sgetmask(void);
 asmlinkage long sys_ssetmask(int newmask);
-asmlinkage unsigned long sys_signal(int sig, __sighandler_t handler);
+asmlinkage long sys_signal(int sig, __sighandler_t handler);
 asmlinkage long sys_pause(void);
 
 asmlinkage long sys_sync(void);
@@ -246,29 +315,29 @@
 			      const void __user *value, size_t size, int flags);
 asmlinkage long sys_fsetxattr(int fd, const char __user *name,
 			      const void __user *value, size_t size, int flags);
-asmlinkage ssize_t sys_getxattr(const char __user *path, const char __user *name,
-				void __user *value, size_t size);
-asmlinkage ssize_t sys_lgetxattr(const char __user *path, const char __user *name,
-				void __user *value, size_t size);
-asmlinkage ssize_t sys_fgetxattr(int fd, const char __user *name,
-				void __user *value, size_t size);
-asmlinkage ssize_t sys_listxattr(const char __user *path, char __user *list,
-				size_t size);
-asmlinkage ssize_t sys_llistxattr(const char __user *path, char __user *list,
-				size_t size);
-asmlinkage ssize_t sys_flistxattr(int fd, char __user *list, size_t size);
+asmlinkage long sys_getxattr(const char __user *path, const char __user *name,
+			     void __user *value, size_t size);
+asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name,
+			      void __user *value, size_t size);
+asmlinkage long sys_fgetxattr(int fd, const char __user *name,
+			      void __user *value, size_t size);
+asmlinkage long sys_listxattr(const char __user *path, char __user *list,
+			      size_t size);
+asmlinkage long sys_llistxattr(const char __user *path, char __user *list,
+			       size_t size);
+asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size);
 asmlinkage long sys_removexattr(const char __user *path,
 				const char __user *name);
 asmlinkage long sys_lremovexattr(const char __user *path,
 				 const char __user *name);
 asmlinkage long sys_fremovexattr(int fd, const char __user *name);
 
-asmlinkage unsigned long sys_brk(unsigned long brk);
+asmlinkage long sys_brk(unsigned long brk);
 asmlinkage long sys_mprotect(unsigned long start, size_t len,
 				unsigned long prot);
-asmlinkage unsigned long sys_mremap(unsigned long addr,
-				unsigned long old_len, unsigned long new_len,
-				unsigned long flags, unsigned long new_addr);
+asmlinkage long sys_mremap(unsigned long addr,
+			   unsigned long old_len, unsigned long new_len,
+			   unsigned long flags, unsigned long new_addr);
 asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
 			unsigned long prot, unsigned long pgoff,
 			unsigned long flags);
@@ -321,10 +390,10 @@
 				struct iocb __user * __user *);
 asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
 			      struct io_event __user *result);
-asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd,
-				off_t __user *offset, size_t count);
-asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd,
-				loff_t __user *offset, size_t count);
+asmlinkage long sys_sendfile(int out_fd, int in_fd,
+			     off_t __user *offset, size_t count);
+asmlinkage long sys_sendfile64(int out_fd, int in_fd,
+			       loff_t __user *offset, size_t count);
 asmlinkage long sys_readlink(const char __user *path,
 				char __user *buf, int bufsiz);
 asmlinkage long sys_creat(const char __user *pathname, int mode);
@@ -368,26 +437,25 @@
 				struct utimbuf __user *times);
 asmlinkage long sys_utimes(char __user *filename,
 				struct timeval __user *utimes);
-asmlinkage off_t sys_lseek(unsigned int fd, off_t offset,
-				unsigned int origin);
+asmlinkage long sys_lseek(unsigned int fd, off_t offset,
+			  unsigned int origin);
 asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
 			unsigned long offset_low, loff_t __user *result,
 			unsigned int origin);
-asmlinkage ssize_t sys_read(unsigned int fd, char __user *buf,
-				size_t count);
-asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count);
-asmlinkage ssize_t sys_readv(unsigned long fd,
-				const struct iovec __user *vec,
-				unsigned long vlen);
-asmlinkage ssize_t sys_write(unsigned int fd, const char __user *buf,
-				size_t count);
-asmlinkage ssize_t sys_writev(unsigned long fd,
-				const struct iovec __user *vec,
-				unsigned long vlen);
-asmlinkage ssize_t sys_pread64(unsigned int fd, char __user *buf,
-				size_t count, loff_t pos);
-asmlinkage ssize_t sys_pwrite64(unsigned int fd, const char __user *buf,
-				size_t count, loff_t pos);
+asmlinkage long sys_read(unsigned int fd, char __user *buf, size_t count);
+asmlinkage long sys_readahead(int fd, loff_t offset, size_t count);
+asmlinkage long sys_readv(unsigned long fd,
+			  const struct iovec __user *vec,
+			  unsigned long vlen);
+asmlinkage long sys_write(unsigned int fd, const char __user *buf,
+			  size_t count);
+asmlinkage long sys_writev(unsigned long fd,
+			   const struct iovec __user *vec,
+			   unsigned long vlen);
+asmlinkage long sys_pread64(unsigned int fd, char __user *buf,
+			    size_t count, loff_t pos);
+asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
+			     size_t count, loff_t pos);
 asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
 asmlinkage long sys_mkdir(const char __user *pathname, int mode);
 asmlinkage long sys_chdir(const char __user *filename);
@@ -476,7 +544,7 @@
 asmlinkage long sys_mq_open(const char __user *name, int oflag, mode_t mode, struct mq_attr __user *attr);
 asmlinkage long sys_mq_unlink(const char __user *name);
 asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout);
-asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout);
+asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout);
 asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification);
 asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat);
 
@@ -530,11 +598,6 @@
 				const int __user *nodes,
 				int __user *status,
 				int flags);
-asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page,
-				__u32 __user *pages,
-				const int __user *nodes,
-				int __user *status,
-				int flags);
 asmlinkage long sys_mbind(unsigned long start, unsigned long len,
 				unsigned long mode,
 				unsigned long __user *nmask,
@@ -583,13 +646,6 @@
 			       int bufsiz);
 asmlinkage long sys_utimensat(int dfd, char __user *filename,
 				struct timespec __user *utimes, int flags);
-asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename,
-				     struct compat_timeval __user *t);
-asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
-				      struct compat_stat __user *statbuf,
-				      int flag);
-asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
-				   int flags, int mode);
 asmlinkage long sys_unshare(unsigned long unshare_flags);
 
 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
@@ -621,6 +677,15 @@
 asmlinkage long sys_eventfd(unsigned int count);
 asmlinkage long sys_eventfd2(unsigned int count, int flags);
 asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
+asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
+asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
+			     fd_set __user *, struct timespec __user *,
+			     void __user *);
+asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
+			  struct timespec __user *, const sigset_t __user *,
+			  size_t);
+asmlinkage long sys_pipe2(int __user *, int);
+asmlinkage long sys_pipe(int __user *);
 
 int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
 
diff --git a/include/linux/types.h b/include/linux/types.h
index 3b864f2..712ca53 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -176,10 +176,9 @@
 typedef __u16 __bitwise __be16;
 typedef __u32 __bitwise __le32;
 typedef __u32 __bitwise __be32;
-#if defined(__GNUC__)
 typedef __u64 __bitwise __le64;
 typedef __u64 __bitwise __be64;
-#endif
+
 typedef __u16 __bitwise __sum16;
 typedef __u32 __bitwise __wsum;
 
diff --git a/include/linux/usb.h b/include/linux/usb.h
index f72aa51..85ee9be 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -108,6 +108,7 @@
  *	(in probe()), bound to a driver, or unbinding (in disconnect())
  * @is_active: flag set when the interface is bound and not suspended.
  * @sysfs_files_created: sysfs attributes exist
+ * @ep_devs_created: endpoint child pseudo-devices exist
  * @unregistering: flag set when the interface is being unregistered
  * @needs_remote_wakeup: flag set when the driver requires remote-wakeup
  *	capability during autosuspend.
@@ -120,6 +121,11 @@
  *	to the sysfs representation for that device.
  * @pm_usage_cnt: PM usage counter for this interface; autosuspend is not
  *	allowed unless the counter is 0.
+ * @reset_ws: Used for scheduling resets from atomic context.
+ * @reset_running: set to 1 if the interface is currently running a
+ *      queued reset so that usb_cancel_queued_reset() doesn't try to
+ *      remove from the workqueue when running inside the worker
+ *      thread. See __usb_queue_reset_device().
  *
  * USB device drivers attach to interfaces on a physical device.  Each
  * interface encapsulates a single high level function, such as feeding
@@ -164,14 +170,17 @@
 	enum usb_interface_condition condition;		/* state of binding */
 	unsigned is_active:1;		/* the interface is not suspended */
 	unsigned sysfs_files_created:1;	/* the sysfs attributes exist */
+	unsigned ep_devs_created:1;	/* endpoint "devices" exist */
 	unsigned unregistering:1;	/* unregistration is in progress */
 	unsigned needs_remote_wakeup:1;	/* driver requires remote wakeup */
 	unsigned needs_altsetting0:1;	/* switch to altsetting 0 is pending */
 	unsigned needs_binding:1;	/* needs delayed unbind/rebind */
+	unsigned reset_running:1;
 
 	struct device dev;		/* interface specific device info */
 	struct device *usb_dev;
 	int pm_usage_cnt;		/* usage counter for autosuspend */
+	struct work_struct reset_ws;	/* for resets in atomic context */
 };
 #define	to_usb_interface(d) container_of(d, struct usb_interface, dev)
 #define	interface_to_usbdev(intf) \
@@ -329,7 +338,7 @@
 #endif
 	struct device *dev;		/* device for this bus */
 
-#if defined(CONFIG_USB_MON)
+#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
 	struct mon_bus *mon_bus;	/* non-null when associated */
 	int monitored;			/* non-zero when monitored */
 #endif
@@ -398,6 +407,7 @@
  * @urbnum: number of URBs submitted for the whole device
  * @active_duration: total time device is not suspended
  * @autosuspend: for delayed autosuspends
+ * @autoresume: for autoresumes requested while in_interrupt
  * @pm_mutex: protects PM operations
  * @last_busy: time of last use
  * @autosuspend_delay: in jiffies
@@ -476,6 +486,7 @@
 
 #ifdef CONFIG_PM
 	struct delayed_work autosuspend;
+	struct work_struct autoresume;
 	struct mutex pm_mutex;
 
 	unsigned long last_busy;
@@ -505,6 +516,7 @@
 
 /* USB port reset for device reinitialization */
 extern int usb_reset_device(struct usb_device *dev);
+extern void usb_queue_reset_device(struct usb_interface *dev);
 
 extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
 
@@ -513,6 +525,8 @@
 extern int usb_autopm_set_interface(struct usb_interface *intf);
 extern int usb_autopm_get_interface(struct usb_interface *intf);
 extern void usb_autopm_put_interface(struct usb_interface *intf);
+extern int usb_autopm_get_interface_async(struct usb_interface *intf);
+extern void usb_autopm_put_interface_async(struct usb_interface *intf);
 
 static inline void usb_autopm_enable(struct usb_interface *intf)
 {
@@ -539,8 +553,13 @@
 static inline int usb_autopm_get_interface(struct usb_interface *intf)
 { return 0; }
 
+static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
+{ return 0; }
+
 static inline void usb_autopm_put_interface(struct usb_interface *intf)
 { }
+static inline void usb_autopm_put_interface_async(struct usb_interface *intf)
+{ }
 static inline void usb_autopm_enable(struct usb_interface *intf)
 { }
 static inline void usb_autopm_disable(struct usb_interface *intf)
@@ -1050,7 +1069,7 @@
 	void (*disconnect) (struct usb_device *udev);
 
 	int (*suspend) (struct usb_device *udev, pm_message_t message);
-	int (*resume) (struct usb_device *udev);
+	int (*resume) (struct usb_device *udev, pm_message_t message);
 	struct usbdrv_wrap drvwrap;
 	unsigned int supports_autosuspend:1;
 };
@@ -1321,7 +1340,7 @@
 	struct kref kref;		/* reference count of the URB */
 	void *hcpriv;			/* private data for host controller */
 	atomic_t use_count;		/* concurrent submissions counter */
-	u8 reject;			/* submissions will fail */
+	atomic_t reject;		/* submissions will fail */
 	int unlinked;			/* unlink error code */
 
 	/* public: documented fields in the urb that can be used by drivers */
@@ -1466,6 +1485,7 @@
 extern void usb_unpoison_urb(struct urb *urb);
 extern void usb_kill_anchored_urbs(struct usb_anchor *anchor);
 extern void usb_poison_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor);
 extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor);
 extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor);
 extern void usb_unanchor_urb(struct urb *urb);
@@ -1722,10 +1742,6 @@
 
 #define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \
 	format "\n" , ## arg)
-#define info(format, arg...) printk(KERN_INFO KBUILD_MODNAME ": " \
-	format "\n" , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING KBUILD_MODNAME ": " \
-	format "\n" , ## arg)
 
 #endif  /* __KERNEL__ */
 
diff --git a/include/linux/usb/association.h b/include/linux/usb/association.h
index 07c5e3c..0a4a18b 100644
--- a/include/linux/usb/association.h
+++ b/include/linux/usb/association.h
@@ -28,17 +28,17 @@
 };
 
 /* Different fields defined by the spec */
-#define WUSB_AR_AssociationTypeId	{ .id = 0x0000, .len =  2 }
-#define WUSB_AR_AssociationSubTypeId	{ .id = 0x0001, .len =  2 }
-#define WUSB_AR_Length			{ .id = 0x0002, .len =  4 }
-#define WUSB_AR_AssociationStatus	{ .id = 0x0004, .len =  4 }
-#define WUSB_AR_LangID			{ .id = 0x0008, .len =  2 }
-#define WUSB_AR_DeviceFriendlyName	{ .id = 0x000b, .len = 64 } /* max */
-#define WUSB_AR_HostFriendlyName	{ .id = 0x000c, .len = 64 } /* max */
-#define WUSB_AR_CHID			{ .id = 0x1000, .len = 16 }
-#define WUSB_AR_CDID			{ .id = 0x1001, .len = 16 }
-#define WUSB_AR_ConnectionContext	{ .id = 0x1002, .len = 48 }
-#define WUSB_AR_BandGroups		{ .id = 0x1004, .len =  2 }
+#define WUSB_AR_AssociationTypeId	{ .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) }
+#define WUSB_AR_AssociationSubTypeId	{ .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) }
+#define WUSB_AR_Length			{ .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) }
+#define WUSB_AR_AssociationStatus	{ .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) }
+#define WUSB_AR_LangID			{ .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) }
+#define WUSB_AR_DeviceFriendlyName	{ .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */
+#define WUSB_AR_HostFriendlyName	{ .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */
+#define WUSB_AR_CHID			{ .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) }
+#define WUSB_AR_CDID			{ .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) }
+#define WUSB_AR_ConnectionContext	{ .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) }
+#define WUSB_AR_BandGroups		{ .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) }
 
 /* CBAF Control Requests (AMS1.0[T4-1] */
 enum {
diff --git a/include/linux/usb/gpio_vbus.h b/include/linux/usb/gpio_vbus.h
new file mode 100644
index 0000000..d9f03cc
--- /dev/null
+++ b/include/linux/usb/gpio_vbus.h
@@ -0,0 +1,30 @@
+/*
+ * A simple GPIO VBUS sensing driver for B peripheral only devices
+ * with internal transceivers.
+ * Optionally D+ pullup can be controlled by a second GPIO.
+ *
+ * Copyright (c) 2008 Philipp Zabel <philipp.zabel@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/**
+ * struct gpio_vbus_mach_info - configuration for gpio_vbus
+ * @gpio_vbus: VBUS sensing GPIO
+ * @gpio_pullup: optional D+ or D- pullup GPIO (else negative/invalid)
+ * @gpio_vbus_inverted: true if gpio_vbus is active low
+ * @gpio_pullup_inverted: true if gpio_pullup is active low
+ *
+ * The VBUS sensing GPIO should have a pulldown, which will normally be
+ * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a
+ * value the GPIO detects as active.  Some systems will use comparators.
+ */
+struct gpio_vbus_mach_info {
+	int gpio_vbus;
+	int gpio_pullup;
+	bool gpio_vbus_inverted;
+	bool gpio_pullup_inverted;
+};
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 630962c..d6aad0e 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -47,6 +47,11 @@
 	u8		ram_bits;	/* ram address size */
 
 	struct musb_hdrc_eps_bits *eps_bits;
+#ifdef CONFIG_BLACKFIN
+        /* A GPIO controlling VRSEL in Blackfin */
+        unsigned int    gpio_vrsel;
+#endif
+
 };
 
 struct musb_hdrc_platform_data {
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 1db25d1..94df4fe 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -84,6 +84,7 @@
 
 /* for usb host and peripheral controller drivers */
 extern struct otg_transceiver *otg_get_transceiver(void);
+extern void otg_put_transceiver(struct otg_transceiver *);
 
 static inline int
 otg_start_hnp(struct otg_transceiver *otg)
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index d9a3bbe..1eea1ab 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -52,8 +52,11 @@
 	US_FLAG(MAX_SECTORS_MIN,0x00002000)			\
 		/* Sets max_sectors to arch min */		\
 	US_FLAG(BULK_IGNORE_TAG,0x00004000)			\
-		/* Ignore tag mismatch in bulk operations */
-
+		/* Ignore tag mismatch in bulk operations */    \
+	US_FLAG(SANE_SENSE,     0x00008000)			\
+		/* Sane Sense (> 18 bytes) */			\
+	US_FLAG(CAPACITY_OK,	0x00010000)			\
+		/* READ CAPACITY response is correct */
 
 #define US_FLAG(name, value)	US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/wimax.h b/include/linux/wimax.h
new file mode 100644
index 0000000..c89de7f
--- /dev/null
+++ b/include/linux/wimax.h
@@ -0,0 +1,234 @@
+/*
+ * Linux WiMax
+ * API for user space
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Initial implementation
+ *
+ *
+ * This file declares the user/kernel protocol that is spoken over
+ * Generic Netlink, as well as any type declaration that is to be used
+ * by kernel and user space.
+ *
+ * It is intended for user space to clone it verbatim to use it as a
+ * primary reference for definitions.
+ *
+ * Stuff intended for kernel usage as well as full protocol and stack
+ * documentation is rooted in include/net/wimax.h.
+ */
+
+#ifndef __LINUX__WIMAX_H__
+#define __LINUX__WIMAX_H__
+
+#include <linux/types.h>
+
+enum {
+	/**
+	 * Version of the interface (unsigned decimal, MMm, max 25.5)
+	 * M - Major: change if removing or modifying an existing call.
+	 * m - minor: change when adding a new call
+	 */
+	WIMAX_GNL_VERSION = 00,
+	/* Generic NetLink attributes */
+	WIMAX_GNL_ATTR_INVALID = 0x00,
+	WIMAX_GNL_ATTR_MAX = 10,
+};
+
+
+/*
+ * Generic NetLink operations
+ *
+ * Most of these map to an API call; _OP_ stands for operation, _RP_
+ * for reply and _RE_ for report (aka: signal).
+ */
+enum {
+	WIMAX_GNL_OP_MSG_FROM_USER,	/* User to kernel message */
+	WIMAX_GNL_OP_MSG_TO_USER,	/* Kernel to user message */
+	WIMAX_GNL_OP_RFKILL,	/* Run wimax_rfkill() */
+	WIMAX_GNL_OP_RESET,	/* Run wimax_rfkill() */
+	WIMAX_GNL_RE_STATE_CHANGE,	/* Report: status change */
+};
+
+
+/* Message from user / to user */
+enum {
+	WIMAX_GNL_MSG_IFIDX = 1,
+	WIMAX_GNL_MSG_PIPE_NAME,
+	WIMAX_GNL_MSG_DATA,
+};
+
+
+/*
+ * wimax_rfkill()
+ *
+ * The state of the radio (ON/OFF) is mapped to the rfkill subsystem's
+ * switch state (DISABLED/ENABLED).
+ */
+enum wimax_rf_state {
+	WIMAX_RF_OFF = 0,	/* Radio is off, rfkill on/enabled */
+	WIMAX_RF_ON = 1,	/* Radio is on, rfkill off/disabled */
+	WIMAX_RF_QUERY = 2,
+};
+
+/* Attributes */
+enum {
+	WIMAX_GNL_RFKILL_IFIDX = 1,
+	WIMAX_GNL_RFKILL_STATE,
+};
+
+
+/* Attributes for wimax_reset() */
+enum {
+	WIMAX_GNL_RESET_IFIDX = 1,
+};
+
+
+/*
+ * Attributes for the Report State Change
+ *
+ * For now we just have the old and new states; new attributes might
+ * be added later on.
+ */
+enum {
+	WIMAX_GNL_STCH_IFIDX = 1,
+	WIMAX_GNL_STCH_STATE_OLD,
+	WIMAX_GNL_STCH_STATE_NEW,
+};
+
+
+/**
+ * enum wimax_st - The different states of a WiMAX device
+ * @__WIMAX_ST_NULL: The device structure has been allocated and zeroed,
+ *     but still wimax_dev_add() hasn't been called. There is no state.
+ *
+ * @WIMAX_ST_DOWN: The device has been registered with the WiMAX and
+ *     networking stacks, but it is not initialized (normally that is
+ *     done with 'ifconfig DEV up' [or equivalent], which can upload
+ *     firmware and enable communications with the device).
+ *     In this state, the device is powered down and using as less
+ *     power as possible.
+ *     This state is the default after a call to wimax_dev_add(). It
+ *     is ok to have drivers move directly to %WIMAX_ST_UNINITIALIZED
+ *     or %WIMAX_ST_RADIO_OFF in _probe() after the call to
+ *     wimax_dev_add().
+ *     It is recommended that the driver leaves this state when
+ *     calling 'ifconfig DEV up' and enters it back on 'ifconfig DEV
+ *     down'.
+ *
+ * @__WIMAX_ST_QUIESCING: The device is being torn down, so no API
+ *     operations are allowed to proceed except the ones needed to
+ *     complete the device clean up process.
+ *
+ * @WIMAX_ST_UNINITIALIZED: [optional] Communication with the device
+ *     is setup, but the device still requires some configuration
+ *     before being operational.
+ *     Some WiMAX API calls might work.
+ *
+ * @WIMAX_ST_RADIO_OFF: The device is fully up; radio is off (wether
+ *     by hardware or software switches).
+ *     It is recommended to always leave the device in this state
+ *     after initialization.
+ *
+ * @WIMAX_ST_READY: The device is fully up and radio is on.
+ *
+ * @WIMAX_ST_SCANNING: [optional] The device has been instructed to
+ *     scan. In this state, the device cannot be actively connected to
+ *     a network.
+ *
+ * @WIMAX_ST_CONNECTING: The device is connecting to a network. This
+ *     state exists because in some devices, the connect process can
+ *     include a number of negotiations between user space, kernel
+ *     space and the device. User space needs to know what the device
+ *     is doing. If the connect sequence in a device is atomic and
+ *     fast, the device can transition directly to CONNECTED
+ *
+ * @WIMAX_ST_CONNECTED: The device is connected to a network.
+ *
+ * @__WIMAX_ST_INVALID: This is an invalid state used to mark the
+ *     maximum numeric value of states.
+ *
+ * Description:
+ *
+ * Transitions from one state to another one are atomic and can only
+ * be caused in kernel space with wimax_state_change(). To read the
+ * state, use wimax_state_get().
+ *
+ * States starting with __ are internal and shall not be used or
+ * referred to by drivers or userspace. They look ugly, but that's the
+ * point -- if any use is made non-internal to the stack, it is easier
+ * to catch on review.
+ *
+ * All API operations [with well defined exceptions] will take the
+ * device mutex before starting and then check the state. If the state
+ * is %__WIMAX_ST_NULL, %WIMAX_ST_DOWN, %WIMAX_ST_UNINITIALIZED or
+ * %__WIMAX_ST_QUIESCING, it will drop the lock and quit with
+ * -%EINVAL, -%ENOMEDIUM, -%ENOTCONN or -%ESHUTDOWN.
+ *
+ * The order of the definitions is important, so we can do numerical
+ * comparisons (eg: < %WIMAX_ST_RADIO_OFF means the device is not ready
+ * to operate).
+ */
+/*
+ * The allowed state transitions are described in the table below
+ * (states in rows can go to states in columns where there is an X):
+ *
+ *                                  UNINI   RADIO READY SCAN CONNEC CONNEC
+ *             NULL DOWN QUIESCING TIALIZED  OFF        NING  TING   TED
+ * NULL         -    x
+ * DOWN              -      x        x       x
+ * QUIESCING         x      -
+ * UNINITIALIZED            x        -       x
+ * RADIO_OFF                x                -     x
+ * READY                    x                x     -     x     x      x
+ * SCANNING                 x                x     x     -     x      x
+ * CONNECTING               x                x     x     x     -      x
+ * CONNECTED                x                x     x                  -
+ *
+ * This table not available in kernel-doc because the formatting messes it up.
+ */
+ enum wimax_st {
+	__WIMAX_ST_NULL = 0,
+	WIMAX_ST_DOWN,
+	__WIMAX_ST_QUIESCING,
+	WIMAX_ST_UNINITIALIZED,
+	WIMAX_ST_RADIO_OFF,
+	WIMAX_ST_READY,
+	WIMAX_ST_SCANNING,
+	WIMAX_ST_CONNECTING,
+	WIMAX_ST_CONNECTED,
+	__WIMAX_ST_INVALID			/* Always keep last */
+};
+
+
+#endif /* #ifndef __LINUX__WIMAX_H__ */
diff --git a/include/linux/wimax/Kbuild b/include/linux/wimax/Kbuild
new file mode 100644
index 0000000..3cb4f26
--- /dev/null
+++ b/include/linux/wimax/Kbuild
@@ -0,0 +1 @@
+header-y += i2400m.h
diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h
new file mode 100644
index 0000000..ba0c493
--- /dev/null
+++ b/include/linux/wimax/debug.h
@@ -0,0 +1,453 @@
+/*
+ * Linux WiMAX
+ * Collection of tools to manage debug operations.
+ *
+ *
+ * Copyright (C) 2005-2007 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Don't #include this file directly, read on!
+ *
+ *
+ * EXECUTING DEBUGGING ACTIONS OR NOT
+ *
+ * The main thing this framework provides is decission power to take a
+ * debug action (like printing a message) if the current debug level
+ * allows it.
+ *
+ * The decission power is at two levels: at compile-time (what does
+ * not make it is compiled out) and at run-time. The run-time
+ * selection is done per-submodule (as they are declared by the user
+ * of the framework).
+ *
+ * A call to d_test(L) (L being the target debug level) returns true
+ * if the action should be taken because the current debug levels
+ * allow it (both compile and run time).
+ *
+ * It follows that a call to d_test() that can be determined to be
+ * always false at compile time will get the code depending on it
+ * compiled out by optimization.
+ *
+ *
+ * DEBUG LEVELS
+ *
+ * It is up to the caller to define how much a debugging level is.
+ *
+ * Convention sets 0 as "no debug" (so an action marked as debug level 0
+ * will always be taken). The increasing debug levels are used for
+ * increased verbosity.
+ *
+ *
+ * USAGE
+ *
+ * Group the code in modules and submodules inside each module [which
+ * in most cases maps to Linux modules and .c files that compose
+ * those].
+ *
+ *
+ * For each module, there is:
+ *
+ *  - a MODULENAME (single word, legal C identifier)
+ *
+ *  - a debug-levels.h header file that declares the list of
+ *    submodules and that is included by all .c files that use
+ *    the debugging tools. The file name can be anything.
+ *
+ *  - some (optional) .c code to manipulate the runtime debug levels
+ *    through debugfs.
+ *
+ * The debug-levels.h file would look like:
+ *
+ *     #ifndef __debug_levels__h__
+ *     #define __debug_levels__h__
+ *
+ *     #define D_MODULENAME modulename
+ *     #define D_MASTER 10
+ *
+ *     #include <linux/wimax/debug.h>
+ *
+ *     enum d_module {
+ *             D_SUBMODULE_DECLARE(submodule_1),
+ *             D_SUBMODULE_DECLARE(submodule_2),
+ *             ...
+ *             D_SUBMODULE_DECLARE(submodule_N)
+ *     };
+ *
+ *     #endif
+ *
+ * D_MASTER is the maximum compile-time debug level; any debug actions
+ * above this will be out. D_MODULENAME is the module name (legal C
+ * identifier), which has to be unique for each module (to avoid
+ * namespace collisions during linkage). Note those #defines need to
+ * be done before #including debug.h
+ *
+ * We declare N different submodules whose debug level can be
+ * independently controlled during runtime.
+ *
+ * In a .c file of the module (and only in one of them), define the
+ * following code:
+ *
+ *     struct d_level D_LEVEL[] = {
+ *             D_SUBMODULE_DEFINE(submodule_1),
+ *             D_SUBMODULE_DEFINE(submodule_2),
+ *             ...
+ *             D_SUBMODULE_DEFINE(submodule_N),
+ *     };
+ *     size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+ *
+ * Externs for d_level_MODULENAME and d_level_size_MODULENAME are used
+ * and declared in this file using the D_LEVEL and D_LEVEL_SIZE macros
+ * #defined also in this file.
+ *
+ * To manipulate from user space the levels, create a debugfs dentry
+ * and then register each submodule with:
+ *
+ *     result = d_level_register_debugfs("PREFIX_", submodule_X, parent);
+ *     if (result < 0)
+ *            goto error;
+ *
+ * Where PREFIX_ is a name of your chosing. This will create debugfs
+ * file with a single numeric value that can be use to tweak it. To
+ * remove the entires, just use debugfs_remove_recursive() on 'parent'.
+ *
+ * NOTE: remember that even if this will show attached to some
+ *     particular instance of a device, the settings are *global*.
+ *
+ *
+ * On each submodule (for example, .c files), the debug infrastructure
+ * should be included like this:
+ *
+ *     #define D_SUBMODULE submodule_x     // matches one in debug-levels.h
+ *     #include "debug-levels.h"
+ *
+ * after #including all your include files.
+ *
+ *
+ * Now you can use the d_*() macros below [d_test(), d_fnstart(),
+ * d_fnend(), d_printf(), d_dump()].
+ *
+ * If their debug level is greater than D_MASTER, they will be
+ * compiled out.
+ *
+ * If their debug level is lower or equal than D_MASTER but greater
+ * than the current debug level of their submodule, they'll be
+ * ignored.
+ *
+ * Otherwise, the action will be performed.
+ */
+#ifndef __debug__h__
+#define __debug__h__
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+
+/* Backend stuff */
+
+/*
+ * Debug backend: generate a message header from a 'struct device'
+ *
+ * @head: buffer where to place the header
+ * @head_size: length of @head
+ * @dev: pointer to device used to generate a header from. If NULL,
+ *     an empty ("") header is generated.
+ */
+static inline
+void __d_head(char *head, size_t head_size,
+	      struct device *dev)
+{
+	if (dev == NULL)
+		head[0] = 0;
+	else if ((unsigned long)dev < 4096) {
+		printk(KERN_ERR "E: Corrupt dev %p\n", dev);
+		WARN_ON(1);
+	} else
+		snprintf(head, head_size, "%s %s: ",
+			 dev_driver_string(dev), dev->bus_id);
+}
+
+
+/*
+ * Debug backend: log some message if debugging is enabled
+ *
+ * @l: intended debug level
+ * @tag: tag to prefix the message with
+ * @dev: 'struct device' associated to this message
+ * @f: printf-like format and arguments
+ *
+ * Note this is optimized out if it doesn't pass the compile-time
+ * check; however, it is *always* compiled. This is useful to make
+ * sure the printf-like formats and variables are always checked and
+ * they don't get bit rot if you have all the debugging disabled.
+ */
+#define _d_printf(l, tag, dev, f, a...)					\
+do {									\
+	char head[64];							\
+	if (!d_test(l))							\
+		break;							\
+	__d_head(head, sizeof(head), dev);				\
+	printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a);	\
+} while (0)
+
+
+/*
+ * CPP sintatic sugar to generate A_B like symbol names when one of
+ * the arguments is a a preprocessor #define.
+ */
+#define __D_PASTE__(varname, modulename) varname##_##modulename
+#define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename))
+#define _D_SUBMODULE_INDEX(_name) (D_SUBMODULE_DECLARE(_name))
+
+
+/*
+ * Store a submodule's runtime debug level and name
+ */
+struct d_level {
+	u8 level;
+	const char *name;
+};
+
+
+/*
+ * List of available submodules and their debug levels
+ *
+ * We call them d_level_MODULENAME and d_level_size_MODULENAME; the
+ * macros D_LEVEL and D_LEVEL_SIZE contain the name already for
+ * convenience.
+ *
+ * This array and the size are defined on some .c file that is part of
+ * the current module.
+ */
+#define D_LEVEL __D_PASTE(d_level, D_MODULENAME)
+#define D_LEVEL_SIZE __D_PASTE(d_level_size, D_MODULENAME)
+
+extern struct d_level D_LEVEL[];
+extern size_t D_LEVEL_SIZE;
+
+
+/*
+ * Frontend stuff
+ *
+ *
+ * Stuff you need to declare prior to using the actual "debug" actions
+ * (defined below).
+ */
+
+#ifndef D_MODULENAME
+#error D_MODULENAME is not defined in your debug-levels.h file
+/**
+ * D_MODULE - Name of the current module
+ *
+ * #define in your module's debug-levels.h, making sure it is
+ * unique. This has to be a legal C identifier.
+ */
+#define D_MODULENAME undefined_modulename
+#endif
+
+
+#ifndef D_MASTER
+#warning D_MASTER not defined, but debug.h included! [see docs]
+/**
+ * D_MASTER - Compile time maximum debug level
+ *
+ * #define in your debug-levels.h file to the maximum debug level the
+ * runtime code will be allowed to have. This allows you to provide a
+ * main knob.
+ *
+ * Anything above that level will be optimized out of the compile.
+ *
+ * Defaults to zero (no debug code compiled in).
+ *
+ * Maximum one definition per module (at the debug-levels.h file).
+ */
+#define D_MASTER 0
+#endif
+
+#ifndef D_SUBMODULE
+#error D_SUBMODULE not defined, but debug.h included! [see docs]
+/**
+ * D_SUBMODULE - Name of the current submodule
+ *
+ * #define in your submodule .c file before #including debug-levels.h
+ * to the name of the current submodule as previously declared and
+ * defined with D_SUBMODULE_DECLARE() (in your module's
+ * debug-levels.h) and D_SUBMODULE_DEFINE().
+ *
+ * This is used to provide runtime-control over the debug levels.
+ *
+ * Maximum one per .c file! Can be shared among different .c files
+ * (meaning they belong to the same submodule categorization).
+ */
+#define D_SUBMODULE undefined_module
+#endif
+
+
+/**
+ * D_SUBMODULE_DECLARE - Declare a submodule for runtime debug level control
+ *
+ * @_name: name of the submodule, restricted to the chars that make up a
+ *     valid C identifier ([a-zA-Z0-9_]).
+ *
+ * Declare in the module's debug-levels.h header file as:
+ *
+ * enum d_module {
+ *         D_SUBMODULE_DECLARE(submodule_1),
+ *         D_SUBMODULE_DECLARE(submodule_2),
+ *         D_SUBMODULE_DECLARE(submodule_3),
+ * };
+ *
+ * Some corresponding .c file needs to have a matching
+ * D_SUBMODULE_DEFINE().
+ */
+#define D_SUBMODULE_DECLARE(_name) __D_SUBMODULE_##_name
+
+
+/**
+ * D_SUBMODULE_DEFINE - Define a submodule for runtime debug level control
+ *
+ * @_name: name of the submodule, restricted to the chars that make up a
+ *     valid C identifier ([a-zA-Z0-9_]).
+ *
+ * Use once per module (in some .c file) as:
+ *
+ * static
+ * struct d_level d_level_SUBMODULENAME[] = {
+ *         D_SUBMODULE_DEFINE(submodule_1),
+ *         D_SUBMODULE_DEFINE(submodule_2),
+ *         D_SUBMODULE_DEFINE(submodule_3),
+ * };
+ * size_t d_level_size_SUBDMODULENAME = ARRAY_SIZE(d_level_SUBDMODULENAME);
+ *
+ * Matching D_SUBMODULE_DECLARE()s have to be present in a
+ * debug-levels.h header file.
+ */
+#define D_SUBMODULE_DEFINE(_name)		\
+[__D_SUBMODULE_##_name] = {			\
+	.level = 0,				\
+	.name = #_name				\
+}
+
+
+
+/* The actual "debug" operations */
+
+
+/**
+ * d_test - Returns true if debugging should be enabled
+ *
+ * @l: intended debug level (unsigned)
+ *
+ * If the master debug switch is enabled and the current settings are
+ * higher or equal to the requested level, then debugging
+ * output/actions should be enabled.
+ *
+ * NOTE:
+ *
+ * This needs to be coded so that it can be evaluated in compile
+ * time; this is why the ugly BUG_ON() is placed in there, so the
+ * D_MASTER evaluation compiles all out if it is compile-time false.
+ */
+#define d_test(l)							\
+({									\
+	unsigned __l = l;	/* type enforcer */			\
+	(D_MASTER) >= __l						\
+	&& ({								\
+		BUG_ON(_D_SUBMODULE_INDEX(D_SUBMODULE) >= D_LEVEL_SIZE);\
+		D_LEVEL[_D_SUBMODULE_INDEX(D_SUBMODULE)].level >= __l;	\
+	});								\
+})
+
+
+/**
+ * d_fnstart - log message at function start if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_fnstart(l, _dev, f, a...) _d_printf(l, " FNSTART", _dev, f, ## a)
+
+
+/**
+ * d_fnend - log message at function end if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_fnend(l, _dev, f, a...) _d_printf(l, " FNEND", _dev, f, ## a)
+
+
+/**
+ * d_printf - log message if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_printf(l, _dev, f, a...) _d_printf(l, "", _dev, f, ## a)
+
+
+/**
+ * d_dump - log buffer hex dump if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_dump(l, dev, ptr, size)			\
+do {							\
+	char head[64];					\
+	if (!d_test(l))					\
+		break;					\
+	__d_head(head, sizeof(head), dev);		\
+	print_hex_dump(KERN_ERR, head, 0, 16, 1,	\
+		       ((void *) ptr), (size), 0);	\
+} while (0)
+
+
+/**
+ * Export a submodule's debug level over debugfs as PREFIXSUBMODULE
+ *
+ * @prefix: string to prefix the name with
+ * @submodule: name of submodule (not a string, just the name)
+ * @dentry: debugfs parent dentry
+ *
+ * Returns: 0 if ok, < 0 errno on error.
+ *
+ * For removing, just use debugfs_remove_recursive() on the parent.
+ */
+#define d_level_register_debugfs(prefix, name, parent)			\
+({									\
+	int rc;								\
+	struct dentry *fd;						\
+	struct dentry *verify_parent_type = parent;			\
+	fd = debugfs_create_u8(						\
+		prefix #name, 0600, verify_parent_type,			\
+		&(D_LEVEL[__D_SUBMODULE_ ## name].level));		\
+	rc = PTR_ERR(fd);						\
+	if (IS_ERR(fd) && rc != -ENODEV)				\
+		printk(KERN_ERR "%s: Can't create debugfs entry %s: "	\
+		       "%d\n", __func__, prefix #name, rc);		\
+	else								\
+		rc = 0;							\
+	rc;								\
+})
+
+
+#endif /* #ifndef __debug__h__ */
diff --git a/include/linux/wimax/i2400m.h b/include/linux/wimax/i2400m.h
new file mode 100644
index 0000000..74198f5
--- /dev/null
+++ b/include/linux/wimax/i2400m.h
@@ -0,0 +1,512 @@
+/*
+ * Intel Wireless WiMax Connection 2400m
+ * Host-Device protocol interface definitions
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *  - Initial implementation
+ *
+ *
+ * This header defines the data structures and constants used to
+ * communicate with the device.
+ *
+ * BOOTMODE/BOOTROM/FIRMWARE UPLOAD PROTOCOL
+ *
+ * The firmware upload protocol is quite simple and only requires a
+ * handful of commands. See drivers/net/wimax/i2400m/fw.c for more
+ * details.
+ *
+ * The BCF data structure is for the firmware file header.
+ *
+ *
+ * THE DATA / CONTROL PROTOCOL
+ *
+ * This is the normal protocol spoken with the device once the
+ * firmware is uploaded. It transports data payloads and control
+ * messages back and forth.
+ *
+ * It consists 'messages' that pack one or more payloads each. The
+ * format is described in detail in drivers/net/wimax/i2400m/rx.c and
+ * tx.c.
+ *
+ *
+ * THE L3L4 PROTOCOL
+ *
+ * The term L3L4 refers to Layer 3 (the device), Layer 4 (the
+ * driver/host software).
+ *
+ * This is the control protocol used by the host to control the i2400m
+ * device (scan, connect, disconnect...). This is sent to / received
+ * as control frames. These frames consist of a header and zero or
+ * more TLVs with information. We call each control frame a "message".
+ *
+ * Each message is composed of:
+ *
+ * HEADER
+ * [TLV0 + PAYLOAD0]
+ * [TLV1 + PAYLOAD1]
+ * [...]
+ * [TLVN + PAYLOADN]
+ *
+ * The HEADER is defined by 'struct i2400m_l3l4_hdr'. The payloads are
+ * defined by a TLV structure (Type Length Value) which is a 'header'
+ * (struct i2400m_tlv_hdr) and then the payload.
+ *
+ * All integers are represented as Little Endian.
+ *
+ * - REQUESTS AND EVENTS
+ *
+ * The requests can be clasified as follows:
+ *
+ *   COMMAND:  implies a request from the host to the device requesting
+ *             an action being performed. The device will reply with a
+ *             message (with the same type as the command), status and
+ *             no (TLV) payload. Execution of a command might cause
+ *             events (of different type) to be sent later on as
+ *             device's state changes.
+ *
+ *   GET/SET:  similar to COMMAND, but will not cause other
+ *             EVENTs. The reply, in the case of GET, will contain
+ *             TLVs with the requested information.
+ *
+ *   EVENT:    asynchronous messages sent from the device, maybe as a
+ *             consequence of previous COMMANDs but disassociated from
+ *             them.
+ *
+ * Only one request might be pending at the same time (ie: don't
+ * parallelize nor post another GET request before the previous
+ * COMMAND has been acknowledged with it's corresponding reply by the
+ * device).
+ *
+ * The different requests and their formats are described below:
+ *
+ *  I2400M_MT_*   Message types
+ *  I2400M_MS_*   Message status (for replies, events)
+ *  i2400m_tlv_*  TLVs
+ *
+ * data types are named 'struct i2400m_msg_OPNAME', OPNAME matching the
+ * operation.
+ */
+
+#ifndef __LINUX__WIMAX__I2400M_H__
+#define __LINUX__WIMAX__I2400M_H__
+
+#include <linux/types.h>
+
+
+/*
+ * Host Device Interface (HDI) common to all busses
+ */
+
+/* Boot-mode (firmware upload mode) commands */
+
+/* Header for the firmware file */
+struct i2400m_bcf_hdr {
+	__le32 module_type;
+	__le32 header_len;
+	__le32 header_version;
+	__le32 module_id;
+	__le32 module_vendor;
+	__le32 date;		/* BCD YYYMMDD */
+	__le32 size;
+	__le32 key_size;	/* in dwords */
+	__le32 modulus_size;	/* in dwords */
+	__le32 exponent_size;	/* in dwords */
+	__u8 reserved[88];
+} __attribute__ ((packed));
+
+/* Boot mode opcodes */
+enum i2400m_brh_opcode {
+	I2400M_BRH_READ = 1,
+	I2400M_BRH_WRITE = 2,
+	I2400M_BRH_JUMP = 3,
+	I2400M_BRH_SIGNED_JUMP = 8,
+	I2400M_BRH_HASH_PAYLOAD_ONLY = 9,
+};
+
+/* Boot mode command masks and stuff */
+enum i2400m_brh {
+	I2400M_BRH_SIGNATURE = 0xcbbc0000,
+	I2400M_BRH_SIGNATURE_MASK = 0xffff0000,
+	I2400M_BRH_SIGNATURE_SHIFT = 16,
+	I2400M_BRH_OPCODE_MASK = 0x0000000f,
+	I2400M_BRH_RESPONSE_MASK = 0x000000f0,
+	I2400M_BRH_RESPONSE_SHIFT = 4,
+	I2400M_BRH_DIRECT_ACCESS = 0x00000400,
+	I2400M_BRH_RESPONSE_REQUIRED = 0x00000200,
+	I2400M_BRH_USE_CHECKSUM = 0x00000100,
+};
+
+
+/* Constants for bcf->module_id */
+enum i2400m_bcf_mod_id {
+	/* Firmware file carries its own pokes -- pokes are a set of
+	 * magical values that have to be written in certain memory
+	 * addresses to get the device up and ready for firmware
+	 * download when it is in non-signed boot mode. */
+	I2400M_BCF_MOD_ID_POKES = 0x000000001,
+};
+
+
+/**
+ * i2400m_bootrom_header - Header for a boot-mode command
+ *
+ * @cmd: the above command descriptor
+ * @target_addr: where on the device memory should the action be performed.
+ * @data_size: for read/write, amount of data to be read/written
+ * @block_checksum: checksum value (if applicable)
+ * @payload: the beginning of data attached to this header
+ */
+struct i2400m_bootrom_header {
+	__le32 command;		/* Compose with enum i2400_brh */
+	__le32 target_addr;
+	__le32 data_size;
+	__le32 block_checksum;
+	char payload[0];
+} __attribute__ ((packed));
+
+
+/*
+ * Data / control protocol
+ */
+
+/* Packet types for the host-device interface */
+enum i2400m_pt {
+	I2400M_PT_DATA = 0,
+	I2400M_PT_CTRL,
+	I2400M_PT_TRACE,	/* For device debug */
+	I2400M_PT_RESET_WARM,	/* device reset */
+	I2400M_PT_RESET_COLD,	/* USB[transport] reset, like reconnect */
+	I2400M_PT_ILLEGAL
+};
+
+
+/*
+ * Payload for a data packet
+ *
+ * This is prefixed to each and every outgoing DATA type.
+ */
+struct i2400m_pl_data_hdr {
+	__le32 reserved;
+} __attribute__((packed));
+
+
+/* Misc constants */
+enum {
+	I2400M_PL_PAD = 16,	/* Payload data size alignment */
+	I2400M_PL_SIZE_MAX = 0x3EFF,
+	I2400M_MAX_PLS_IN_MSG = 60,
+	/* protocol barkers: sync sequences; for notifications they
+	 * are sent in groups of four. */
+	I2400M_H2D_PREVIEW_BARKER = 0xcafe900d,
+	I2400M_COLD_RESET_BARKER = 0xc01dc01d,
+	I2400M_WARM_RESET_BARKER = 0x50f750f7,
+	I2400M_NBOOT_BARKER = 0xdeadbeef,
+	I2400M_SBOOT_BARKER = 0x0ff1c1a1,
+	I2400M_ACK_BARKER = 0xfeedbabe,
+	I2400M_D2H_MSG_BARKER = 0xbeefbabe,
+};
+
+
+/*
+ * Hardware payload descriptor
+ *
+ * Bitfields encoded in a struct to enforce typing semantics.
+ *
+ * Look in rx.c and tx.c for a full description of the format.
+ */
+struct i2400m_pld {
+	__le32 val;
+} __attribute__ ((packed));
+
+#define I2400M_PLD_SIZE_MASK 0x00003fff
+#define I2400M_PLD_TYPE_SHIFT 16
+#define I2400M_PLD_TYPE_MASK 0x000f0000
+
+/*
+ * Header for a TX message or RX message
+ *
+ * @barker: preamble
+ * @size: used for management of the FIFO queue buffer; before
+ *     sending, this is converted to be a real preamble. This
+ *     indicates the real size of the TX message that starts at this
+ *     point. If the highest bit is set, then this message is to be
+ *     skipped.
+ * @sequence: sequence number of this message
+ * @offset: offset where the message itself starts -- see the comments
+ *     in the file header about message header and payload descriptor
+ *     alignment.
+ * @num_pls: number of payloads in this message
+ * @padding: amount of padding bytes at the end of the message to make
+ *           it be of block-size aligned
+ *
+ * Look in rx.c and tx.c for a full description of the format.
+ */
+struct i2400m_msg_hdr {
+	union {
+		__le32 barker;
+		__u32 size;	/* same size type as barker!! */
+	};
+	union {
+		__le32 sequence;
+		__u32 offset;	/* same size type as barker!! */
+	};
+	__le16 num_pls;
+	__le16 rsv1;
+	__le16 padding;
+	__le16 rsv2;
+	struct i2400m_pld pld[0];
+} __attribute__ ((packed));
+
+
+
+/*
+ * L3/L4 control protocol
+ */
+
+enum {
+	/* Interface version */
+	I2400M_L3L4_VERSION             = 0x0100,
+};
+
+/* Message types */
+enum i2400m_mt {
+	I2400M_MT_RESERVED              = 0x0000,
+	I2400M_MT_INVALID               = 0xffff,
+	I2400M_MT_REPORT_MASK		= 0x8000,
+
+	I2400M_MT_GET_SCAN_RESULT  	= 0x4202,
+	I2400M_MT_SET_SCAN_PARAM   	= 0x4402,
+	I2400M_MT_CMD_RF_CONTROL   	= 0x4602,
+	I2400M_MT_CMD_SCAN         	= 0x4603,
+	I2400M_MT_CMD_CONNECT      	= 0x4604,
+	I2400M_MT_CMD_DISCONNECT   	= 0x4605,
+	I2400M_MT_CMD_EXIT_IDLE   	= 0x4606,
+	I2400M_MT_GET_LM_VERSION   	= 0x5201,
+	I2400M_MT_GET_DEVICE_INFO  	= 0x5202,
+	I2400M_MT_GET_LINK_STATUS  	= 0x5203,
+	I2400M_MT_GET_STATISTICS   	= 0x5204,
+	I2400M_MT_GET_STATE        	= 0x5205,
+	I2400M_MT_GET_MEDIA_STATUS	= 0x5206,
+	I2400M_MT_SET_INIT_CONFIG	= 0x5404,
+	I2400M_MT_CMD_INIT	        = 0x5601,
+	I2400M_MT_CMD_TERMINATE		= 0x5602,
+	I2400M_MT_CMD_MODE_OF_OP	= 0x5603,
+	I2400M_MT_CMD_RESET_DEVICE	= 0x5604,
+	I2400M_MT_CMD_MONITOR_CONTROL   = 0x5605,
+	I2400M_MT_CMD_ENTER_POWERSAVE   = 0x5606,
+	I2400M_MT_GET_TLS_OPERATION_RESULT = 0x6201,
+	I2400M_MT_SET_EAP_SUCCESS       = 0x6402,
+	I2400M_MT_SET_EAP_FAIL          = 0x6403,
+	I2400M_MT_SET_EAP_KEY          	= 0x6404,
+	I2400M_MT_CMD_SEND_EAP_RESPONSE = 0x6602,
+	I2400M_MT_REPORT_SCAN_RESULT    = 0xc002,
+	I2400M_MT_REPORT_STATE		= 0xd002,
+	I2400M_MT_REPORT_POWERSAVE_READY = 0xd005,
+	I2400M_MT_REPORT_EAP_REQUEST    = 0xe002,
+	I2400M_MT_REPORT_EAP_RESTART    = 0xe003,
+	I2400M_MT_REPORT_ALT_ACCEPT    	= 0xe004,
+	I2400M_MT_REPORT_KEY_REQUEST 	= 0xe005,
+};
+
+
+/*
+ * Message Ack Status codes
+ *
+ * When a message is replied-to, this status is reported.
+ */
+enum i2400m_ms {
+	I2400M_MS_DONE_OK                  = 0,
+	I2400M_MS_DONE_IN_PROGRESS         = 1,
+	I2400M_MS_INVALID_OP               = 2,
+	I2400M_MS_BAD_STATE                = 3,
+	I2400M_MS_ILLEGAL_VALUE            = 4,
+	I2400M_MS_MISSING_PARAMS           = 5,
+	I2400M_MS_VERSION_ERROR            = 6,
+	I2400M_MS_ACCESSIBILITY_ERROR      = 7,
+	I2400M_MS_BUSY                     = 8,
+	I2400M_MS_CORRUPTED_TLV            = 9,
+	I2400M_MS_UNINITIALIZED            = 10,
+	I2400M_MS_UNKNOWN_ERROR            = 11,
+	I2400M_MS_PRODUCTION_ERROR         = 12,
+	I2400M_MS_NO_RF                    = 13,
+	I2400M_MS_NOT_READY_FOR_POWERSAVE  = 14,
+	I2400M_MS_THERMAL_CRITICAL         = 15,
+	I2400M_MS_MAX
+};
+
+
+/**
+ * i2400m_tlv - enumeration of the different types of TLVs
+ *
+ * TLVs stand for type-length-value and are the header for a payload
+ * composed of almost anything. Each payload has a type assigned
+ * and a length.
+ */
+enum i2400m_tlv {
+	I2400M_TLV_L4_MESSAGE_VERSIONS = 129,
+	I2400M_TLV_SYSTEM_STATE = 141,
+	I2400M_TLV_MEDIA_STATUS = 161,
+	I2400M_TLV_RF_OPERATION = 162,
+	I2400M_TLV_RF_STATUS = 163,
+	I2400M_TLV_DEVICE_RESET_TYPE = 132,
+	I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601,
+};
+
+
+struct i2400m_tlv_hdr {
+	__le16 type;
+	__le16 length;		/* payload's */
+	__u8   pl[0];
+} __attribute__((packed));
+
+
+struct i2400m_l3l4_hdr {
+	__le16 type;
+	__le16 length;		/* payload's */
+	__le16 version;
+	__le16 resv1;
+	__le16 status;
+	__le16 resv2;
+	struct i2400m_tlv_hdr pl[0];
+} __attribute__((packed));
+
+
+/**
+ * i2400m_system_state - different states of the device
+ */
+enum i2400m_system_state {
+	I2400M_SS_UNINITIALIZED = 1,
+	I2400M_SS_INIT,
+	I2400M_SS_READY,
+	I2400M_SS_SCAN,
+	I2400M_SS_STANDBY,
+	I2400M_SS_CONNECTING,
+	I2400M_SS_WIMAX_CONNECTED,
+	I2400M_SS_DATA_PATH_CONNECTED,
+	I2400M_SS_IDLE,
+	I2400M_SS_DISCONNECTING,
+	I2400M_SS_OUT_OF_ZONE,
+	I2400M_SS_SLEEPACTIVE,
+	I2400M_SS_PRODUCTION,
+	I2400M_SS_CONFIG,
+	I2400M_SS_RF_OFF,
+	I2400M_SS_RF_SHUTDOWN,
+	I2400M_SS_DEVICE_DISCONNECT,
+	I2400M_SS_MAX,
+};
+
+
+/**
+ * i2400m_tlv_system_state - report on the state of the system
+ *
+ * @state: see enum i2400m_system_state
+ */
+struct i2400m_tlv_system_state {
+	struct i2400m_tlv_hdr hdr;
+	__le32 state;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_l4_message_versions {
+	struct i2400m_tlv_hdr hdr;
+	__le16 major;
+	__le16 minor;
+	__le16 branch;
+	__le16 reserved;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_detailed_device_info {
+	struct i2400m_tlv_hdr hdr;
+	__u8 reserved1[400];
+	__u8 mac_address[6];
+	__u8 reserved2[2];
+} __attribute__((packed));
+
+
+enum i2400m_rf_switch_status {
+	I2400M_RF_SWITCH_ON = 1,
+	I2400M_RF_SWITCH_OFF = 2,
+};
+
+struct i2400m_tlv_rf_switches_status {
+	struct i2400m_tlv_hdr hdr;
+	__u8 sw_rf_switch;	/* 1 ON, 2 OFF */
+	__u8 hw_rf_switch;	/* 1 ON, 2 OFF */
+	__u8 reserved[2];
+} __attribute__((packed));
+
+
+enum {
+	i2400m_rf_operation_on = 1,
+	i2400m_rf_operation_off = 2
+};
+
+struct i2400m_tlv_rf_operation {
+	struct i2400m_tlv_hdr hdr;
+	__le32 status;	/* 1 ON, 2 OFF */
+} __attribute__((packed));
+
+
+enum i2400m_tlv_reset_type {
+	I2400M_RESET_TYPE_COLD = 1,
+	I2400M_RESET_TYPE_WARM
+};
+
+struct i2400m_tlv_device_reset_type {
+	struct i2400m_tlv_hdr hdr;
+	__le32 reset_type;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_config_idle_parameters {
+	struct i2400m_tlv_hdr hdr;
+	__le32 idle_timeout;	/* 100 to 300000 ms [5min], 100 increments
+				 * 0 disabled */
+	__le32 idle_paging_interval;	/* frames */
+} __attribute__((packed));
+
+
+enum i2400m_media_status {
+	I2400M_MEDIA_STATUS_LINK_UP = 1,
+	I2400M_MEDIA_STATUS_LINK_DOWN,
+	I2400M_MEDIA_STATUS_LINK_RENEW,
+};
+
+struct i2400m_tlv_media_status {
+	struct i2400m_tlv_hdr hdr;
+	__le32 media_status;
+} __attribute__((packed));
+
+#endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */
diff --git a/include/mtd/ubi-user.h b/include/mtd/ubi-user.h
index ccdc562..2dc2eb2 100644
--- a/include/mtd/ubi-user.h
+++ b/include/mtd/ubi-user.h
@@ -253,7 +253,7 @@
  *
  * Re-sizing is possible for both dynamic and static volumes. But while dynamic
  * volumes may be re-sized arbitrarily, static volumes cannot be made to be
- * smaller then the number of bytes they bear. To arbitrarily shrink a static
+ * smaller than the number of bytes they bear. To arbitrarily shrink a static
  * volume, it must be wiped out first (by means of volume update operation with
  * zero number of bytes).
  */
diff --git a/include/net/netdma.h b/include/net/netdma.h
index f28c6e0..8ba8ce2 100644
--- a/include/net/netdma.h
+++ b/include/net/netdma.h
@@ -24,17 +24,6 @@
 #include <linux/dmaengine.h>
 #include <linux/skbuff.h>
 
-static inline struct dma_chan *get_softnet_dma(void)
-{
-	struct dma_chan *chan;
-	rcu_read_lock();
-	chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma);
-	if (chan)
-		dma_chan_get(chan);
-	rcu_read_unlock();
-	return chan;
-}
-
 int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
 		struct sk_buff *skb, int offset, struct iovec *to,
 		size_t len, struct dma_pinned_list *pinned_list);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index cb2965a..ffa5b8b 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -59,6 +59,9 @@
 	int	(*gso_send_check)(struct sk_buff *skb);
 	struct sk_buff *(*gso_segment)(struct sk_buff *skb,
 				       int features);
+	struct sk_buff **(*gro_receive)(struct sk_buff **head,
+					struct sk_buff *skb);
+	int	(*gro_complete)(struct sk_buff *skb);
 
 	unsigned int	flags;	/* INET6_PROTO_xxx */
 };
diff --git a/include/net/wimax.h b/include/net/wimax.h
new file mode 100644
index 0000000..6b3824e
--- /dev/null
+++ b/include/net/wimax.h
@@ -0,0 +1,523 @@
+/*
+ * Linux WiMAX
+ * Kernel space API for accessing WiMAX devices
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * The WiMAX stack provides an API for controlling and managing the
+ * system's WiMAX devices. This API affects the control plane; the
+ * data plane is accessed via the network stack (netdev).
+ *
+ * Parts of the WiMAX stack API and notifications are exported to
+ * user space via Generic Netlink. In user space, libwimax (part of
+ * the wimax-tools package) provides a shim layer for accessing those
+ * calls.
+ *
+ * The API is standarized for all WiMAX devices and different drivers
+ * implement the backend support for it. However, device-specific
+ * messaging pipes are provided that can be used to issue commands and
+ * receive notifications in free form.
+ *
+ * Currently the messaging pipes are the only means of control as it
+ * is not known (due to the lack of more devices in the market) what
+ * will be a good abstraction layer. Expect this to change as more
+ * devices show in the market. This API is designed to be growable in
+ * order to address this problem.
+ *
+ * USAGE
+ *
+ * Embed a `struct wimax_dev` at the beginning of the the device's
+ * private structure, initialize and register it. For details, see
+ * `struct wimax_dev`s documentation.
+ *
+ * Once this is done, wimax-tools's libwimaxll can be used to
+ * communicate with the driver from user space. You user space
+ * application does not have to forcibily use libwimaxll and can talk
+ * the generic netlink protocol directly if desired.
+ *
+ * Remember this is a very low level API that will to provide all of
+ * WiMAX features. Other daemons and services running in user space
+ * are the expected clients of it. They offer a higher level API that
+ * applications should use (an example of this is the Intel's WiMAX
+ * Network Service for the i2400m).
+ *
+ * DESIGN
+ *
+ * Although not set on final stone, this very basic interface is
+ * mostly completed. Remember this is meant to grow as new common
+ * operations are decided upon. New operations will be added to the
+ * interface, intent being on keeping backwards compatibility as much
+ * as possible.
+ *
+ * This layer implements a set of calls to control a WiMAX device,
+ * exposing a frontend to the rest of the kernel and user space (via
+ * generic netlink) and a backend implementation in the driver through
+ * function pointers.
+ *
+ * WiMAX devices have a state, and a kernel-only API allows the
+ * drivers to manipulate that state. State transitions are atomic, and
+ * only some of them are allowed (see `enum wimax_st`).
+ *
+ * Most API calls will set the state automatically; in most cases
+ * drivers have to only report state changes due to external
+ * conditions.
+ *
+ * All API operations are 'atomic', serialized thorough a mutex in the
+ * `struct wimax_dev`.
+ *
+ * EXPORTING TO USER SPACE THROUGH GENERIC NETLINK
+ *
+ * The API is exported to user space using generic netlink (other
+ * methods can be added as needed).
+ *
+ * There is a Generic Netlink Family named "WiMAX", where interfaces
+ * supporting the WiMAX interface receive commands and broadcast their
+ * signals over a multicast group named "msg".
+ *
+ * Mapping to the source/destination interface is done by an interface
+ * index attribute.
+ *
+ * For user-to-kernel traffic (commands) we use a function call
+ * marshalling mechanism, where a message X with attributes A, B, C
+ * sent from user space to kernel space means executing the WiMAX API
+ * call wimax_X(A, B, C), sending the results back as a message.
+ *
+ * Kernel-to-user (notifications or signals) communication is sent
+ * over multicast groups. This allows to have multiple applications
+ * monitoring them.
+ *
+ * Each command/signal gets assigned it's own attribute policy. This
+ * way the validator will verify that all the attributes in there are
+ * only the ones that should be for each command/signal. Thing of an
+ * attribute mapping to a type+argumentname for each command/signal.
+ *
+ * If we had a single policy for *all* commands/signals, after running
+ * the validator we'd have to check "does this attribute belong in
+ * here"?  for each one. It can be done manually, but it's just easier
+ * to have the validator do that job with multiple policies. As well,
+ * it makes it easier to later expand each command/signal signature
+ * without affecting others and keeping the namespace more or less
+ * sane. Not that it is too complicated, but it makes it even easier.
+ *
+ * No state information is maintained in the kernel for each user
+ * space connection (the connection is stateless).
+ *
+ * TESTING FOR THE INTERFACE AND VERSIONING
+ *
+ * If network interface X is a WiMAX device, there will be a Generic
+ * Netlink family named "WiMAX X" and the device will present a
+ * "wimax" directory in it's network sysfs directory
+ * (/sys/class/net/DEVICE/wimax) [used by HAL].
+ *
+ * The inexistence of any of these means the device does not support
+ * this WiMAX API.
+ *
+ * By querying the generic netlink controller, versioning information
+ * and the multicast groups available can be found. Applications using
+ * the interface can either rely on that or use the generic netlink
+ * controller to figure out which generic netlink commands/signals are
+ * supported.
+ *
+ * NOTE: this versioning is a last resort to avoid hard
+ *    incompatibilities. It is the intention of the design of this
+ *    stack not to introduce backward incompatible changes.
+ *
+ * The version code has to fit in one byte (restrictions imposed by
+ * generic netlink); we use `version / 10` for the major version and
+ * `version % 10` for the minor. This gives 9 minors for each major
+ * and 25 majors.
+ *
+ * The version change protocol is as follow:
+ *
+ * - Major versions: needs to be increased if an existing message/API
+ *   call is changed or removed. Doesn't need to be changed if a new
+ *   message is added.
+ *
+ * - Minor version: needs to be increased if new messages/API calls are
+ *   being added or some other consideration that doesn't impact the
+ *   user-kernel interface too much (like some kind of bug fix) and
+ *   that is kind of left up in the air to common sense.
+ *
+ * User space code should not try to work if the major version it was
+ * compiled for differs from what the kernel offers. As well, if the
+ * minor version of the kernel interface is lower than the one user
+ * space is expecting (the one it was compiled for), the kernel
+ * might be missing API calls; user space shall be ready to handle
+ * said condition. Use the generic netlink controller operations to
+ * find which ones are supported and which not.
+ *
+ * libwimaxll:wimaxll_open() takes care of checking versions.
+ *
+ * THE OPERATIONS:
+ *
+ * Each operation is defined in its on file (drivers/net/wimax/op-*.c)
+ * for clarity. The parts needed for an operation are:
+ *
+ *  - a function pointer in `struct wimax_dev`: optional, as the
+ *    operation might be implemented by the stack and not by the
+ *    driver.
+ *
+ *    All function pointers are named wimax_dev->op_*(), and drivers
+ *    must implement them except where noted otherwise.
+ *
+ *  - When exported to user space, a `struct nla_policy` to define the
+ *    attributes of the generic netlink command and a `struct genl_ops`
+ *    to define the operation.
+ *
+ * All the declarations for the operation codes (WIMAX_GNL_OP_<NAME>)
+ * and generic netlink attributes (WIMAX_GNL_<NAME>_*) are declared in
+ * include/linux/wimax.h; this file is intended to be cloned by user
+ * space to gain access to those declarations.
+ *
+ * A few caveats to remember:
+ *
+ *  - Need to define attribute numbers starting in 1; otherwise it
+ *    fails.
+ *
+ *  - the `struct genl_family` requires a maximum attribute id; when
+ *    defining the `struct nla_policy` for each message, it has to have
+ *    an array size of WIMAX_GNL_ATTR_MAX+1.
+ *
+ * THE PIPE INTERFACE:
+ *
+ * This interface is kept intentionally simple. The driver can send
+ * and receive free-form messages to/from user space through a
+ * pipe. See drivers/net/wimax/op-msg.c for details.
+ *
+ * The kernel-to-user messages are sent with
+ * wimax_msg(). user-to-kernel messages are delivered via
+ * wimax_dev->op_msg_from_user().
+ *
+ * RFKILL:
+ *
+ * RFKILL support is built into the wimax_dev layer; the driver just
+ * needs to call wimax_report_rfkill_{hw,sw}() to inform of changes in
+ * the hardware or software RF kill switches. When the stack wants to
+ * turn the radio off, it will call wimax_dev->op_rfkill_sw_toggle(),
+ * which the driver implements.
+ *
+ * User space can set the software RF Kill switch by calling
+ * wimax_rfkill().
+ *
+ * The code for now only supports devices that don't require polling;
+ * If the device needs to be polled, create a self-rearming delayed
+ * work struct for polling or look into adding polled support to the
+ * WiMAX stack.
+ *
+ * When initializing the hardware (_probe), after calling
+ * wimax_dev_add(), query the device for it's RF Kill switches status
+ * and feed it back to the WiMAX stack using
+ * wimax_report_rfkill_{hw,sw}(). If any switch is missing, always
+ * report it as ON.
+ *
+ * NOTE: the wimax stack uses an inverted terminology to that of the
+ * RFKILL subsystem:
+ *
+ *  - ON: radio is ON, RFKILL is DISABLED or OFF.
+ *  - OFF: radio is OFF, RFKILL is ENABLED or ON.
+ *
+ * MISCELLANEOUS OPS:
+ *
+ * wimax_reset() can be used to reset the device to power on state; by
+ * default it issues a warm reset that maintains the same device
+ * node. If that is not possible, it falls back to a cold reset
+ * (device reconnect). The driver implements the backend to this
+ * through wimax_dev->op_reset().
+ */
+
+#ifndef __NET__WIMAX_H__
+#define __NET__WIMAX_H__
+#ifdef __KERNEL__
+
+#include <linux/wimax.h>
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+
+struct net_device;
+struct genl_info;
+struct wimax_dev;
+struct input_dev;
+
+/**
+ * struct wimax_dev - Generic WiMAX device
+ *
+ * @net_dev: [fill] Pointer to the &struct net_device this WiMAX
+ *     device implements.
+ *
+ * @op_msg_from_user: [fill] Driver-specific operation to
+ *     handle a raw message from user space to the driver. The
+ *     driver can send messages to user space using with
+ *     wimax_msg_to_user().
+ *
+ * @op_rfkill_sw_toggle: [fill] Driver-specific operation to act on
+ *     userspace (or any other agent) requesting the WiMAX device to
+ *     change the RF Kill software switch (WIMAX_RF_ON or
+ *     WIMAX_RF_OFF).
+ *     If such hardware support is not present, it is assumed the
+ *     radio cannot be switched off and it is always on (and the stack
+ *     will error out when trying to switch it off). In such case,
+ *     this function pointer can be left as NULL.
+ *
+ * @op_reset: [fill] Driver specific operation to reset the
+ *     device.
+ *     This operation should always attempt first a warm reset that
+ *     does not disconnect the device from the bus and return 0.
+ *     If that fails, it should resort to some sort of cold or bus
+ *     reset (even if it implies a bus disconnection and device
+ *     dissapearance). In that case, -ENODEV should be returned to
+ *     indicate the device is gone.
+ *     This operation has to be synchronous, and return only when the
+ *     reset is complete. In case of having had to resort to bus/cold
+ *     reset implying a device disconnection, the call is allowed to
+ *     return inmediately.
+ *     NOTE: wimax_dev->mutex is NOT locked when this op is being
+ *     called; however, wimax_dev->mutex_reset IS locked to ensure
+ *     serialization of calls to wimax_reset().
+ *     See wimax_reset()'s documentation.
+ *
+ * @name: [fill] A way to identify this device. We need to register a
+ *     name with many subsystems (input for RFKILL, workqueue
+ *     creation, etc). We can't use the network device name as that
+ *     might change and in some instances we don't know it yet (until
+ *     we don't call register_netdev()). So we generate an unique one
+ *     using the driver name and device bus id, place it here and use
+ *     it across the board. Recommended naming:
+ *     DRIVERNAME-BUSNAME:BUSID (dev->bus->name, dev->bus_id).
+ *
+ * @id_table_node: [private] link to the list of wimax devices kept by
+ *     id-table.c. Protected by it's own spinlock.
+ *
+ * @mutex: [private] Serializes all concurrent access and execution of
+ *     operations.
+ *
+ * @mutex_reset: [private] Serializes reset operations. Needs to be a
+ *     different mutex because as part of the reset operation, the
+ *     driver has to call back into the stack to do things such as
+ *     state change, that require wimax_dev->mutex.
+ *
+ * @state: [private] Current state of the WiMAX device.
+ *
+ * @rfkill: [private] integration into the RF-Kill infrastructure.
+ *
+ * @rfkill_input: [private] virtual input device to process the
+ *     hardware RF Kill switches.
+ *
+ * @rf_sw: [private] State of the software radio switch (OFF/ON)
+ *
+ * @rf_hw: [private] State of the hardware radio switch (OFF/ON)
+ *
+ * @debugfs_dentry: [private] Used to hook up a debugfs entry. This
+ *     shows up in the debugfs root as wimax\:DEVICENAME.
+ *
+ * Description:
+ * This structure defines a common interface to access all WiMAX
+ * devices from different vendors and provides a common API as well as
+ * a free-form device-specific messaging channel.
+ *
+ * Usage:
+ *  1. Embed a &struct wimax_dev at *the beginning* the network
+ *     device structure so that netdev_priv() points to it.
+ *
+ *  2. memset() it to zero
+ *
+ *  3. Initialize with wimax_dev_init(). This will leave the WiMAX
+ *     device in the %__WIMAX_ST_NULL state.
+ *
+ *  4. Fill all the fields marked with [fill]; once called
+ *     wimax_dev_add(), those fields CANNOT be modified.
+ *
+ *  5. Call wimax_dev_add() *after* registering the network
+ *     device. This will leave the WiMAX device in the %WIMAX_ST_DOWN
+ *     state.
+ *     Protect the driver's net_device->open() against succeeding if
+ *     the wimax device state is lower than %WIMAX_ST_DOWN.
+ *
+ *  6. Select when the device is going to be turned on/initialized;
+ *     for example, it could be initialized on 'ifconfig up' (when the
+ *     netdev op 'open()' is called on the driver).
+ *
+ * When the device is initialized (at `ifconfig up` time, or right
+ * after calling wimax_dev_add() from _probe(), make sure the
+ * following steps are taken
+ *
+ *  a. Move the device to %WIMAX_ST_UNINITIALIZED. This is needed so
+ *     some API calls that shouldn't work until the device is ready
+ *     can be blocked.
+ *
+ *  b. Initialize the device. Make sure to turn the SW radio switch
+ *     off and move the device to state %WIMAX_ST_RADIO_OFF when
+ *     done. When just initialized, a device should be left in RADIO
+ *     OFF state until user space devices to turn it on.
+ *
+ *  c. Query the device for the state of the hardware rfkill switch
+ *     and call wimax_rfkill_report_hw() and wimax_rfkill_report_sw()
+ *     as needed. See below.
+ *
+ * wimax_dev_rm() undoes before unregistering the network device. Once
+ * wimax_dev_add() is called, the driver can get called on the
+ * wimax_dev->op_* function pointers
+ *
+ * CONCURRENCY:
+ *
+ * The stack provides a mutex for each device that will disallow API
+ * calls happening concurrently; thus, op calls into the driver
+ * through the wimax_dev->op*() function pointers will always be
+ * serialized and *never* concurrent.
+ *
+ * For locking, take wimax_dev->mutex is taken; (most) operations in
+ * the API have to check for wimax_dev_is_ready() to return 0 before
+ * continuing (this is done internally).
+ *
+ * REFERENCE COUNTING:
+ *
+ * The WiMAX device is reference counted by the associated network
+ * device. The only operation that can be used to reference the device
+ * is wimax_dev_get_by_genl_info(), and the reference it acquires has
+ * to be released with dev_put(wimax_dev->net_dev).
+ *
+ * RFKILL:
+ *
+ * At startup, both HW and SW radio switchess are assumed to be off.
+ *
+ * At initialization time [after calling wimax_dev_add()], have the
+ * driver query the device for the status of the software and hardware
+ * RF kill switches and call wimax_report_rfkill_hw() and
+ * wimax_rfkill_report_sw() to indicate their state. If any is
+ * missing, just call it to indicate it is ON (radio always on).
+ *
+ * Whenever the driver detects a change in the state of the RF kill
+ * switches, it should call wimax_report_rfkill_hw() or
+ * wimax_report_rfkill_sw() to report it to the stack.
+ */
+struct wimax_dev {
+	struct net_device *net_dev;
+	struct list_head id_table_node;
+	struct mutex mutex;		/* Protects all members and API calls */
+	struct mutex mutex_reset;
+	enum wimax_st state;
+
+	int (*op_msg_from_user)(struct wimax_dev *wimax_dev,
+				const char *,
+				const void *, size_t,
+				const struct genl_info *info);
+	int (*op_rfkill_sw_toggle)(struct wimax_dev *wimax_dev,
+				   enum wimax_rf_state);
+	int (*op_reset)(struct wimax_dev *wimax_dev);
+
+	struct rfkill *rfkill;
+	struct input_dev *rfkill_input;
+	unsigned rf_hw;
+	unsigned rf_sw;
+	char name[32];
+
+	struct dentry *debugfs_dentry;
+};
+
+
+
+/*
+ * WiMAX stack public API for device drivers
+ * -----------------------------------------
+ *
+ * These functions are not exported to user space.
+ */
+extern void wimax_dev_init(struct wimax_dev *);
+extern int wimax_dev_add(struct wimax_dev *, struct net_device *);
+extern void wimax_dev_rm(struct wimax_dev *);
+
+static inline
+struct wimax_dev *net_dev_to_wimax(struct net_device *net_dev)
+{
+	return netdev_priv(net_dev);
+}
+
+static inline
+struct device *wimax_dev_to_dev(struct wimax_dev *wimax_dev)
+{
+	return wimax_dev->net_dev->dev.parent;
+}
+
+extern void wimax_state_change(struct wimax_dev *, enum wimax_st);
+extern enum wimax_st wimax_state_get(struct wimax_dev *);
+
+/*
+ * Radio Switch state reporting.
+ *
+ * enum wimax_rf_state is declared in linux/wimax.h so the exports
+ * to user space can use it.
+ */
+extern void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
+extern void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
+
+
+/*
+ * Free-form messaging to/from user space
+ *
+ * Sending a message:
+ *
+ *   wimax_msg(wimax_dev, pipe_name, buf, buf_size, GFP_KERNEL);
+ *
+ * Broken up:
+ *
+ *   skb = wimax_msg_alloc(wimax_dev, pipe_name, buf_size, GFP_KERNEL);
+ *   ...fill up skb...
+ *   wimax_msg_send(wimax_dev, pipe_name, skb);
+ *
+ * Be sure not to modify skb->data in the middle (ie: don't use
+ * skb_push()/skb_pull()/skb_reserve() on the skb).
+ *
+ * "pipe_name" is any string, than can be interpreted as the name of
+ * the pipe or destinatary; the interpretation of it is driver
+ * specific, so the recipient can multiplex it as wished. It can be
+ * NULL, it won't be used - an example is using a "diagnostics" tag to
+ * send diagnostics information that a device-specific diagnostics
+ * tool would be interested in.
+ */
+extern struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *,
+				       const void *, size_t, gfp_t);
+extern int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
+extern int wimax_msg(struct wimax_dev *, const char *,
+		     const void *, size_t, gfp_t);
+
+extern const void *wimax_msg_data_len(struct sk_buff *, size_t *);
+extern const void *wimax_msg_data(struct sk_buff *);
+extern ssize_t wimax_msg_len(struct sk_buff *);
+
+
+/*
+ * WiMAX stack user space API
+ * --------------------------
+ *
+ * This API is what gets exported to user space for general
+ * operations. As well, they can be called from within the kernel,
+ * (with a properly referenced `struct wimax_dev`).
+ *
+ * Properly referenced means: the 'struct net_device' that embeds the
+ * device's control structure and (as such) the 'struct wimax_dev' is
+ * referenced by the caller.
+ */
+extern int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
+extern int wimax_reset(struct wimax_dev *);
+
+#else
+/* You might be looking for linux/wimax.h */
+#error This file should not be included from user space.
+#endif /* #ifdef __KERNEL__ */
+#endif /* #ifndef __NET__WIMAX_H__ */
diff --git a/include/scsi/libiscsi_tcp.h b/include/scsi/libiscsi_tcp.h
index 83e32f6..9e3182e 100644
--- a/include/scsi/libiscsi_tcp.h
+++ b/include/scsi/libiscsi_tcp.h
@@ -39,6 +39,7 @@
 	unsigned int		total_copied;
 
 	struct hash_desc	*hash;
+	unsigned char		padbuf[ISCSI_PAD_LEN];
 	unsigned char		recv_digest[ISCSI_DIGEST_SIZE];
 	unsigned char		digest[ISCSI_DIGEST_SIZE];
 	unsigned int		digest_len;
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 6e04e6f..c9184f7 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -358,6 +358,7 @@
 #define FC_RPORT_DEVLOSS_PENDING	0x01
 #define FC_RPORT_SCAN_PENDING		0x02
 #define FC_RPORT_FAST_FAIL_TIMEDOUT	0x04
+#define FC_RPORT_DEVLOSS_CALLBK_DONE	0x08
 
 #define	dev_to_rport(d)				\
 	container_of(d, struct fc_rport, dev)
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 4af1083..93a4edb 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -178,7 +178,7 @@
   	.private_value = (unsigned long)&xenum }
 #define SOC_DAPM_VALUE_ENUM(xname, xenum) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
-	.info = snd_soc_info_value_enum_double, \
+	.info = snd_soc_info_enum_double, \
 	.get = snd_soc_dapm_get_value_enum_double, \
 	.put = snd_soc_dapm_put_value_enum_double, \
 	.private_value = (unsigned long)&xenum }
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 9b930d3..24593ac 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -106,7 +106,7 @@
 	.private_value = (unsigned long)&xenum }
 #define SOC_VALUE_ENUM(xname, xenum) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname,\
-	.info = snd_soc_info_value_enum_double, \
+	.info = snd_soc_info_enum_double, \
 	.get = snd_soc_get_value_enum_double, \
 	.put = snd_soc_put_value_enum_double, \
 	.private_value = (unsigned long)&xenum }
@@ -211,8 +211,6 @@
 	struct snd_ctl_elem_value *ucontrol);
 int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol);
-int snd_soc_info_value_enum_double(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_info *uinfo);
 int snd_soc_get_value_enum_double(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol);
 int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol,
@@ -419,17 +417,6 @@
 	unsigned char shift_l;
 	unsigned char shift_r;
 	unsigned int max;
-	const char **texts;
-	void *dapm;
-};
-
-/* semi enumerated kcontrol */
-struct soc_value_enum {
-	unsigned short reg;
-	unsigned short reg2;
-	unsigned char shift_l;
-	unsigned char shift_r;
-	unsigned int max;
 	unsigned int mask;
 	const char **texts;
 	const unsigned int *values;
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 6369d89..f87f961 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -136,8 +136,6 @@
 /* Nil transaction ID. */
 #define XBT_NIL ((struct xenbus_transaction) { 0 })
 
-int __init xenbus_dev_init(void);
-
 char **xenbus_directory(struct xenbus_transaction t,
 			const char *dir, const char *node, unsigned int *num);
 void *xenbus_read(struct xenbus_transaction t,
diff --git a/init/Kconfig b/init/Kconfig
index e7893b1..2af8382 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -271,59 +271,6 @@
 		     13 =>  8 KB
 		     12 =>  4 KB
 
-config CGROUPS
-	bool "Control Group support"
-	help
-	  This option will let you use process cgroup subsystems
-	  such as Cpusets
-
-	  Say N if unsure.
-
-config CGROUP_DEBUG
-	bool "Example debug cgroup subsystem"
-	depends on CGROUPS
-	default n
-	help
-	  This option enables a simple cgroup subsystem that
-	  exports useful debugging information about the cgroups
-	  framework
-
-	  Say N if unsure
-
-config CGROUP_NS
-        bool "Namespace cgroup subsystem"
-        depends on CGROUPS
-        help
-          Provides a simple namespace cgroup subsystem to
-          provide hierarchical naming of sets of namespaces,
-          for instance virtual servers and checkpoint/restart
-          jobs.
-
-config CGROUP_FREEZER
-        bool "control group freezer subsystem"
-        depends on CGROUPS
-        help
-          Provides a way to freeze and unfreeze all tasks in a
-	  cgroup.
-
-config CGROUP_DEVICE
-	bool "Device controller for cgroups"
-	depends on CGROUPS && EXPERIMENTAL
-	help
-	  Provides a cgroup implementing whitelists for devices which
-	  a process in the cgroup can mknod or open.
-
-config CPUSETS
-	bool "Cpuset support"
-	depends on SMP && CGROUPS
-	help
-	  This option will let you create and manage CPUSETs which
-	  allow dynamically partitioning a system into sets of CPUs and
-	  Memory Nodes and assigning tasks to run only within those sets.
-	  This is primarily useful on large SMP or NUMA systems.
-
-	  Say N if unsure.
-
 #
 # Architectures with an unreliable sched_clock() should select this:
 #
@@ -337,6 +284,8 @@
 	help
 	  This feature lets CPU scheduler recognize task groups and control CPU
 	  bandwidth allocation to such task groups.
+	  In order to create a group from arbitrary set of processes, use
+	  CONFIG_CGROUPS. (See Control Group support.)
 
 config FAIR_GROUP_SCHED
 	bool "Group scheduling for SCHED_OTHER"
@@ -374,28 +323,90 @@
 	  This option allows you to create arbitrary task groups
 	  using the "cgroup" pseudo filesystem and control
 	  the cpu bandwidth allocated to each such task group.
-	  Refer to Documentation/cgroups.txt for more information
-	  on "cgroup" pseudo filesystem.
+	  Refer to Documentation/cgroups/cgroups.txt for more
+	  information on "cgroup" pseudo filesystem.
 
 endchoice
 
+menuconfig CGROUPS
+	boolean "Control Group support"
+	help
+	  This option adds support for grouping sets of processes together, for
+	  use with process control subsystems such as Cpusets, CFS, memory
+	  controls or device isolation.
+	  See
+		- Documentation/scheduler/sched-design-CFS.txt	(CFS)
+		- Documentation/cgroups/ (features for grouping, isolation
+					  and resource control)
+
+	  Say N if unsure.
+
+if CGROUPS
+
+config CGROUP_DEBUG
+	bool "Example debug cgroup subsystem"
+	depends on CGROUPS
+	default n
+	help
+	  This option enables a simple cgroup subsystem that
+	  exports useful debugging information about the cgroups
+	  framework.
+
+	  Say N if unsure.
+
+config CGROUP_NS
+	bool "Namespace cgroup subsystem"
+	depends on CGROUPS
+	help
+	  Provides a simple namespace cgroup subsystem to
+	  provide hierarchical naming of sets of namespaces,
+	  for instance virtual servers and checkpoint/restart
+	  jobs.
+
+config CGROUP_FREEZER
+	bool "Freezer cgroup subsystem"
+	depends on CGROUPS
+	help
+	  Provides a way to freeze and unfreeze all tasks in a
+	  cgroup.
+
+config CGROUP_DEVICE
+	bool "Device controller for cgroups"
+	depends on CGROUPS && EXPERIMENTAL
+	help
+	  Provides a cgroup implementing whitelists for devices which
+	  a process in the cgroup can mknod or open.
+
+config CPUSETS
+	bool "Cpuset support"
+	depends on SMP && CGROUPS
+	help
+	  This option will let you create and manage CPUSETs which
+	  allow dynamically partitioning a system into sets of CPUs and
+	  Memory Nodes and assigning tasks to run only within those sets.
+	  This is primarily useful on large SMP or NUMA systems.
+
+	  Say N if unsure.
+
+config PROC_PID_CPUSET
+	bool "Include legacy /proc/<pid>/cpuset file"
+	depends on CPUSETS
+	default y
+
 config CGROUP_CPUACCT
 	bool "Simple CPU accounting cgroup subsystem"
 	depends on CGROUPS
 	help
 	  Provides a simple Resource Controller for monitoring the
-	  total CPU consumed by the tasks in a cgroup
+	  total CPU consumed by the tasks in a cgroup.
 
 config RESOURCE_COUNTERS
 	bool "Resource counters"
 	help
 	  This option enables controller independent resource accounting
-          infrastructure that works with cgroups
+	  infrastructure that works with cgroups.
 	depends on CGROUPS
 
-config MM_OWNER
-	bool
-
 config CGROUP_MEM_RES_CTLR
 	bool "Memory Resource Controller for Control Groups"
 	depends on CGROUPS && RESOURCE_COUNTERS
@@ -414,11 +425,32 @@
 	  sure you need the memory resource controller. Even when you enable
 	  this, you can set "cgroup_disable=memory" at your boot option to
 	  disable memory resource controller and you can avoid overheads.
-	  (and lose benefits of memory resource contoller)
+	  (and lose benefits of memory resource controller)
 
 	  This config option also selects MM_OWNER config option, which
 	  could in turn add some fork/exit overhead.
 
+config CGROUP_MEM_RES_CTLR_SWAP
+	bool "Memory Resource Controller Swap Extension(EXPERIMENTAL)"
+	depends on CGROUP_MEM_RES_CTLR && SWAP && EXPERIMENTAL
+	help
+	  Add swap management feature to memory resource controller. When you
+	  enable this, you can limit mem+swap usage per cgroup. In other words,
+	  when you disable this, memory resource controller has no cares to
+	  usage of swap...a process can exhaust all of the swap. This extension
+	  is useful when you want to avoid exhaustion swap but this itself
+	  adds more overheads and consumes memory for remembering information.
+	  Especially if you use 32bit system or small memory system, please
+	  be careful about enabling this. When memory resource controller
+	  is disabled by boot option, this will be automatically disabled and
+	  there will be no overhead from this. Even when you set this config=y,
+	  if boot option "noswapaccount" is set, swap will not be accounted.
+
+endif # CGROUPS
+
+config MM_OWNER
+	bool
+
 config SYSFS_DEPRECATED
 	bool
 
@@ -455,11 +487,6 @@
 	  if the original kernel, that came with your distribution, has
 	  this option set to N.
 
-config PROC_PID_CPUSET
-	bool "Include legacy /proc/<pid>/cpuset file"
-	depends on CPUSETS
-	default y
-
 config RELAY
 	bool "Kernel->user space relay support (formerly relayfs)"
 	help
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 5efca73..708105e 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/initrd.h>
+#include <linux/async.h>
 
 #include <linux/nfs_fs.h>
 #include <linux/nfs_fs_sb.h>
@@ -372,6 +373,7 @@
 	/* wait for the known devices to complete their probing */
 	while (driver_probe_done() != 0)
 		msleep(100);
+	async_synchronize_full();
 
 	md_run_setup();
 
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index a7c748f..0f0f0cf 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -9,6 +9,7 @@
 #include <linux/string.h>
 
 #include "do_mounts.h"
+#include "../fs/squashfs/squashfs_fs.h"
 
 int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */
 
@@ -41,6 +42,7 @@
  * 	ext2
  *	romfs
  *	cramfs
+ *	squashfs
  * 	gzip
  */
 static int __init 
@@ -51,6 +53,7 @@
 	struct ext2_super_block *ext2sb;
 	struct romfs_super_block *romfsb;
 	struct cramfs_super *cramfsb;
+	struct squashfs_super_block *squashfsb;
 	int nblocks = -1;
 	unsigned char *buf;
 
@@ -62,6 +65,7 @@
 	ext2sb = (struct ext2_super_block *) buf;
 	romfsb = (struct romfs_super_block *) buf;
 	cramfsb = (struct cramfs_super *) buf;
+	squashfsb = (struct squashfs_super_block *) buf;
 	memset(buf, 0xe5, size);
 
 	/*
@@ -99,6 +103,16 @@
 		goto done;
 	}
 
+	/* squashfs is at block zero too */
+	if (le32_to_cpu(squashfsb->s_magic) == SQUASHFS_MAGIC) {
+		printk(KERN_NOTICE
+		       "RAMDISK: squashfs filesystem found at block %d\n",
+		       start_block);
+		nblocks = (le64_to_cpu(squashfsb->bytes_used) + BLOCK_SIZE - 1)
+			 >> BLOCK_SIZE_BITS;
+		goto done;
+	}
+
 	/*
 	 * Read block 1 to test for minix and ext2 superblock
 	 */
diff --git a/init/initramfs.c b/init/initramfs.c
index 4f5ba75..d9c941c 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -317,6 +317,7 @@
 			if (wfd >= 0) {
 				sys_fchown(wfd, uid, gid);
 				sys_fchmod(wfd, mode);
+				sys_ftruncate(wfd, body_len);
 				vcollected = kstrdup(collected, GFP_KERNEL);
 				state = CopyFile;
 			}
diff --git a/init/main.c b/init/main.c
index b5a892c..8442094 100644
--- a/init/main.c
+++ b/init/main.c
@@ -62,6 +62,7 @@
 #include <linux/signal.h>
 #include <linux/idr.h>
 #include <linux/ftrace.h>
+#include <linux/async.h>
 #include <trace/boot.h>
 
 #include <asm/io.h>
@@ -599,7 +600,8 @@
 	sched_clock_init();
 	profile_init();
 	if (!irqs_disabled())
-		printk("start_kernel(): bug: interrupts were enabled early\n");
+		printk(KERN_CRIT "start_kernel(): bug: interrupts were "
+				 "enabled early\n");
 	early_boot_irqs_on();
 	local_irq_enable();
 
@@ -684,7 +686,7 @@
 	rest_init();
 }
 
-static int initcall_debug;
+int initcall_debug;
 core_param(initcall_debug, initcall_debug, bool, 0644);
 
 int do_one_initcall(initcall_t fn)
@@ -785,6 +787,8 @@
  */
 static noinline int init_post(void)
 {
+	/* need to finish all async __init code before freeing the memory */
+	async_synchronize_full();
 	free_initmem();
 	unlock_kernel();
 	mark_rodata_ro();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index eddb624..54b4077 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -505,7 +505,8 @@
 			sig_i.si_errno = 0;
 			sig_i.si_code = SI_MESGQ;
 			sig_i.si_value = info->notify.sigev_value;
-			sig_i.si_pid = task_tgid_vnr(current);
+			sig_i.si_pid = task_tgid_nr_ns(current,
+						ns_of_pid(info->notify_owner));
 			sig_i.si_uid = current_uid();
 
 			kill_pid_info(info->notify.sigev_signo,
@@ -649,8 +650,8 @@
 	return dentry_open(dentry, mqueue_mnt, oflag, cred);
 }
 
-asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
-				struct mq_attr __user *u_attr)
+SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
+		struct mq_attr __user *, u_attr)
 {
 	struct dentry *dentry;
 	struct file *filp;
@@ -720,7 +721,7 @@
 	return fd;
 }
 
-asmlinkage long sys_mq_unlink(const char __user *u_name)
+SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
 {
 	int err;
 	char *name;
@@ -813,9 +814,9 @@
 	sender->state = STATE_READY;
 }
 
-asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
-	size_t msg_len, unsigned int msg_prio,
-	const struct timespec __user *u_abs_timeout)
+SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
+		size_t, msg_len, unsigned int, msg_prio,
+		const struct timespec __user *, u_abs_timeout)
 {
 	struct file *filp;
 	struct inode *inode;
@@ -906,9 +907,9 @@
 	return ret;
 }
 
-asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
-	size_t msg_len, unsigned int __user *u_msg_prio,
-	const struct timespec __user *u_abs_timeout)
+SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
+		size_t, msg_len, unsigned int __user *, u_msg_prio,
+		const struct timespec __user *, u_abs_timeout)
 {
 	long timeout;
 	ssize_t ret;
@@ -996,8 +997,8 @@
  * and he isn't currently owner of notification, will be silently discarded.
  * It isn't explicitly defined in the POSIX.
  */
-asmlinkage long sys_mq_notify(mqd_t mqdes,
-				const struct sigevent __user *u_notification)
+SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
+		const struct sigevent __user *, u_notification)
 {
 	int ret;
 	struct file *filp;
@@ -1122,9 +1123,9 @@
 	return ret;
 }
 
-asmlinkage long sys_mq_getsetattr(mqd_t mqdes,
-			const struct mq_attr __user *u_mqstat,
-			struct mq_attr __user *u_omqstat)
+SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
+		const struct mq_attr __user *, u_mqstat,
+		struct mq_attr __user *, u_omqstat)
 {
 	int ret;
 	struct mq_attr mqstat, omqstat;
diff --git a/ipc/msg.c b/ipc/msg.c
index b4eee1c..2ceab7f 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -309,7 +309,7 @@
 	return security_msg_queue_associate(msq, msgflg);
 }
 
-asmlinkage long sys_msgget(key_t key, int msgflg)
+SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
 {
 	struct ipc_namespace *ns;
 	struct ipc_ops msg_ops;
@@ -466,7 +466,7 @@
 	return err;
 }
 
-asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
+SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
 {
 	struct msg_queue *msq;
 	int err, version;
@@ -723,8 +723,8 @@
 	return err;
 }
 
-asmlinkage long
-sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
+SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
+		int, msgflg)
 {
 	long mtype;
 
@@ -904,8 +904,8 @@
 	return msgsz;
 }
 
-asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
-			   long msgtyp, int msgflg)
+SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
+		long, msgtyp, int, msgflg)
 {
 	long err, mtype;
 
diff --git a/ipc/sem.c b/ipc/sem.c
index c68cd3f..16a2189 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -308,7 +308,7 @@
 	return 0;
 }
 
-asmlinkage long sys_semget(key_t key, int nsems, int semflg)
+SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 {
 	struct ipc_namespace *ns;
 	struct ipc_ops sem_ops;
@@ -887,7 +887,7 @@
 	return err;
 }
 
-asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
+SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
 {
 	int err = -EINVAL;
 	int version;
@@ -923,6 +923,13 @@
 		return -EINVAL;
 	}
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
+{
+	return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
+}
+SYSCALL_ALIAS(sys_semctl, SyS_semctl);
+#endif
 
 /* If the task doesn't already have a undo_list, then allocate one
  * here.  We guarantee there is only one thread using this undo list,
@@ -1048,8 +1055,8 @@
 	return un;
 }
 
-asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
-			unsigned nsops, const struct timespec __user *timeout)
+SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
+		unsigned, nsops, const struct timespec __user *, timeout)
 {
 	int error = -EINVAL;
 	struct sem_array *sma;
@@ -1225,7 +1232,8 @@
 	return error;
 }
 
-asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops)
+SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
+		unsigned, nsops)
 {
 	return sys_semtimedop(semid, tsops, nsops, NULL);
 }
diff --git a/ipc/shm.c b/ipc/shm.c
index b125b56..a9e09ad 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -440,7 +440,7 @@
 	return 0;
 }
 
-asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
+SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 {
 	struct ipc_namespace *ns;
 	struct ipc_ops shm_ops;
@@ -621,7 +621,7 @@
 	return err;
 }
 
-asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
+SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
 {
 	struct shmid_kernel *shp;
 	int err, version;
@@ -939,7 +939,7 @@
 	goto out_nattch;
 }
 
-asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
+SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
 {
 	unsigned long ret;
 	long err;
@@ -955,7 +955,7 @@
  * detach and kill segment if marked destroyed.
  * The work is done in shm_close.
  */
-asmlinkage long sys_shmdt(char __user *shmaddr)
+SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma, *next;
@@ -990,6 +990,7 @@
 	 */
 	vma = find_vma(mm, addr);
 
+#ifdef CONFIG_MMU
 	while (vma) {
 		next = vma->vm_next;
 
@@ -1034,6 +1035,17 @@
 		vma = next;
 	}
 
+#else /* CONFIG_MMU */
+	/* under NOMMU conditions, the exact address to be destroyed must be
+	 * given */
+	retval = -EINVAL;
+	if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
+		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
+		retval = 0;
+	}
+
+#endif
+
 	up_write(&mm->mmap_sem);
 	return retval;
 }
diff --git a/kernel/Makefile b/kernel/Makefile
index e1c5bf3..170a921 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -9,7 +9,8 @@
 	    rcupdate.o extable.o params.o posix-timers.o \
 	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
 	    hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
-	    notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o
+	    notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
+	    async.o
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace debug files and internal ftrace files
@@ -40,6 +41,9 @@
 obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
 obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
+ifneq ($(CONFIG_SMP),y)
+obj-y += up.o
+endif
 obj-$(CONFIG_SMP) += spinlock.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
diff --git a/kernel/acct.c b/kernel/acct.c
index d57b7cb..7afa315 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -277,7 +277,7 @@
  * should be written. If the filename is NULL, accounting will be
  * shutdown.
  */
-asmlinkage long sys_acct(const char __user *name)
+SYSCALL_DEFINE1(acct, const char __user *, name)
 {
 	int error;
 
diff --git a/kernel/async.c b/kernel/async.c
new file mode 100644
index 0000000..608b32b
--- /dev/null
+++ b/kernel/async.c
@@ -0,0 +1,346 @@
+/*
+ * async.c: Asynchronous function calls for boot performance
+ *
+ * (C) Copyright 2009 Intel Corporation
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+
+/*
+
+Goals and Theory of Operation
+
+The primary goal of this feature is to reduce the kernel boot time,
+by doing various independent hardware delays and discovery operations
+decoupled and not strictly serialized.
+
+More specifically, the asynchronous function call concept allows
+certain operations (primarily during system boot) to happen
+asynchronously, out of order, while these operations still
+have their externally visible parts happen sequentially and in-order.
+(not unlike how out-of-order CPUs retire their instructions in order)
+
+Key to the asynchronous function call implementation is the concept of
+a "sequence cookie" (which, although it has an abstracted type, can be
+thought of as a monotonically incrementing number).
+
+The async core will assign each scheduled event such a sequence cookie and
+pass this to the called functions.
+
+The asynchronously called function should before doing a globally visible
+operation, such as registering device numbers, call the
+async_synchronize_cookie() function and pass in its own cookie. The
+async_synchronize_cookie() function will make sure that all asynchronous
+operations that were scheduled prior to the operation corresponding with the
+cookie have completed.
+
+Subsystem/driver initialization code that scheduled asynchronous probe
+functions, but which shares global resources with other drivers/subsystems
+that do not use the asynchronous call feature, need to do a full
+synchronization with the async_synchronize_full() function, before returning
+from their init function. This is to maintain strict ordering between the
+asynchronous and synchronous parts of the kernel.
+
+*/
+
+#include <linux/async.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <asm/atomic.h>
+
+static async_cookie_t next_cookie = 1;
+
+#define MAX_THREADS	256
+#define MAX_WORK	32768
+
+static LIST_HEAD(async_pending);
+static LIST_HEAD(async_running);
+static DEFINE_SPINLOCK(async_lock);
+
+static int async_enabled = 0;
+
+struct async_entry {
+	struct list_head list;
+	async_cookie_t   cookie;
+	async_func_ptr	 *func;
+	void             *data;
+	struct list_head *running;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(async_done);
+static DECLARE_WAIT_QUEUE_HEAD(async_new);
+
+static atomic_t entry_count;
+static atomic_t thread_count;
+
+extern int initcall_debug;
+
+
+/*
+ * MUST be called with the lock held!
+ */
+static async_cookie_t  __lowest_in_progress(struct list_head *running)
+{
+	struct async_entry *entry;
+	if (!list_empty(running)) {
+		entry = list_first_entry(running,
+			struct async_entry, list);
+		return entry->cookie;
+	} else if (!list_empty(&async_pending)) {
+		entry = list_first_entry(&async_pending,
+			struct async_entry, list);
+		return entry->cookie;
+	} else {
+		/* nothing in progress... next_cookie is "infinity" */
+		return next_cookie;
+	}
+
+}
+
+static async_cookie_t  lowest_in_progress(struct list_head *running)
+{
+	unsigned long flags;
+	async_cookie_t ret;
+
+	spin_lock_irqsave(&async_lock, flags);
+	ret = __lowest_in_progress(running);
+	spin_unlock_irqrestore(&async_lock, flags);
+	return ret;
+}
+/*
+ * pick the first pending entry and run it
+ */
+static void run_one_entry(void)
+{
+	unsigned long flags;
+	struct async_entry *entry;
+	ktime_t calltime, delta, rettime;
+
+	/* 1) pick one task from the pending queue */
+
+	spin_lock_irqsave(&async_lock, flags);
+	if (list_empty(&async_pending))
+		goto out;
+	entry = list_first_entry(&async_pending, struct async_entry, list);
+
+	/* 2) move it to the running queue */
+	list_del(&entry->list);
+	list_add_tail(&entry->list, &async_running);
+	spin_unlock_irqrestore(&async_lock, flags);
+
+	/* 3) run it (and print duration)*/
+	if (initcall_debug && system_state == SYSTEM_BOOTING) {
+		printk("calling  %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current));
+		calltime = ktime_get();
+	}
+	entry->func(entry->data, entry->cookie);
+	if (initcall_debug && system_state == SYSTEM_BOOTING) {
+		rettime = ktime_get();
+		delta = ktime_sub(rettime, calltime);
+		printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie,
+			entry->func, ktime_to_ns(delta) >> 10);
+	}
+
+	/* 4) remove it from the running queue */
+	spin_lock_irqsave(&async_lock, flags);
+	list_del(&entry->list);
+
+	/* 5) free the entry  */
+	kfree(entry);
+	atomic_dec(&entry_count);
+
+	spin_unlock_irqrestore(&async_lock, flags);
+
+	/* 6) wake up any waiters. */
+	wake_up(&async_done);
+	return;
+
+out:
+	spin_unlock_irqrestore(&async_lock, flags);
+}
+
+
+static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
+{
+	struct async_entry *entry;
+	unsigned long flags;
+	async_cookie_t newcookie;
+	
+
+	/* allow irq-off callers */
+	entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
+
+	/*
+	 * If we're out of memory or if there's too much work
+	 * pending already, we execute synchronously.
+	 */
+	if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) {
+		kfree(entry);
+		spin_lock_irqsave(&async_lock, flags);
+		newcookie = next_cookie++;
+		spin_unlock_irqrestore(&async_lock, flags);
+
+		/* low on memory.. run synchronously */
+		ptr(data, newcookie);
+		return newcookie;
+	}
+	entry->func = ptr;
+	entry->data = data;
+	entry->running = running;
+
+	spin_lock_irqsave(&async_lock, flags);
+	newcookie = entry->cookie = next_cookie++;
+	list_add_tail(&entry->list, &async_pending);
+	atomic_inc(&entry_count);
+	spin_unlock_irqrestore(&async_lock, flags);
+	wake_up(&async_new);
+	return newcookie;
+}
+
+async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
+{
+	return __async_schedule(ptr, data, &async_pending);
+}
+EXPORT_SYMBOL_GPL(async_schedule);
+
+async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running)
+{
+	return __async_schedule(ptr, data, running);
+}
+EXPORT_SYMBOL_GPL(async_schedule_special);
+
+void async_synchronize_full(void)
+{
+	do {
+		async_synchronize_cookie(next_cookie);
+	} while (!list_empty(&async_running) || !list_empty(&async_pending));
+}
+EXPORT_SYMBOL_GPL(async_synchronize_full);
+
+void async_synchronize_full_special(struct list_head *list)
+{
+	async_synchronize_cookie_special(next_cookie, list);
+}
+EXPORT_SYMBOL_GPL(async_synchronize_full_special);
+
+void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running)
+{
+	ktime_t starttime, delta, endtime;
+
+	if (initcall_debug && system_state == SYSTEM_BOOTING) {
+		printk("async_waiting @ %i\n", task_pid_nr(current));
+		starttime = ktime_get();
+	}
+
+	wait_event(async_done, lowest_in_progress(running) >= cookie);
+
+	if (initcall_debug && system_state == SYSTEM_BOOTING) {
+		endtime = ktime_get();
+		delta = ktime_sub(endtime, starttime);
+
+		printk("async_continuing @ %i after %lli usec\n",
+			task_pid_nr(current), ktime_to_ns(delta) >> 10);
+	}
+}
+EXPORT_SYMBOL_GPL(async_synchronize_cookie_special);
+
+void async_synchronize_cookie(async_cookie_t cookie)
+{
+	async_synchronize_cookie_special(cookie, &async_running);
+}
+EXPORT_SYMBOL_GPL(async_synchronize_cookie);
+
+
+static int async_thread(void *unused)
+{
+	DECLARE_WAITQUEUE(wq, current);
+	add_wait_queue(&async_new, &wq);
+
+	while (!kthread_should_stop()) {
+		int ret = HZ;
+		set_current_state(TASK_INTERRUPTIBLE);
+		/*
+		 * check the list head without lock.. false positives
+		 * are dealt with inside run_one_entry() while holding
+		 * the lock.
+		 */
+		rmb();
+		if (!list_empty(&async_pending))
+			run_one_entry();
+		else
+			ret = schedule_timeout(HZ);
+
+		if (ret == 0) {
+			/*
+			 * we timed out, this means we as thread are redundant.
+			 * we sign off and die, but we to avoid any races there
+			 * is a last-straw check to see if work snuck in.
+			 */
+			atomic_dec(&thread_count);
+			wmb(); /* manager must see our departure first */
+			if (list_empty(&async_pending))
+				break;
+			/*
+			 * woops work came in between us timing out and us
+			 * signing off; we need to stay alive and keep working.
+			 */
+			atomic_inc(&thread_count);
+		}
+	}
+	remove_wait_queue(&async_new, &wq);
+
+	return 0;
+}
+
+static int async_manager_thread(void *unused)
+{
+	DECLARE_WAITQUEUE(wq, current);
+	add_wait_queue(&async_new, &wq);
+
+	while (!kthread_should_stop()) {
+		int tc, ec;
+
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		tc = atomic_read(&thread_count);
+		rmb();
+		ec = atomic_read(&entry_count);
+
+		while (tc < ec && tc < MAX_THREADS) {
+			kthread_run(async_thread, NULL, "async/%i", tc);
+			atomic_inc(&thread_count);
+			tc++;
+		}
+
+		schedule();
+	}
+	remove_wait_queue(&async_new, &wq);
+
+	return 0;
+}
+
+static int __init async_init(void)
+{
+	if (async_enabled)
+		kthread_run(async_manager_thread, NULL, "async/mgr");
+	return 0;
+}
+
+static int __init setup_async(char *str)
+{
+	async_enabled = 1;
+	return 1;
+}
+
+__setup("fastboot", setup_async);
+
+
+core_initcall(async_init);
diff --git a/kernel/capability.c b/kernel/capability.c
index 688926e..4e17041 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -161,7 +161,7 @@
  *
  * Returns 0 on success and < 0 on error.
  */
-asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
+SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
 {
 	int ret = 0;
 	pid_t pid;
@@ -235,7 +235,7 @@
  *
  * Returns 0 on success and < 0 on error.
  */
-asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
+SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
 {
 	struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
 	unsigned i, tocopy;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f221446..c298310 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -84,7 +84,7 @@
 	/* Tracks how many cgroups are currently defined in hierarchy.*/
 	int number_of_cgroups;
 
-	/* A list running through the mounted hierarchies */
+	/* A list running through the active hierarchies */
 	struct list_head root_list;
 
 	/* Hierarchy-specific flags */
@@ -148,8 +148,8 @@
 #define for_each_subsys(_root, _ss) \
 list_for_each_entry(_ss, &_root->subsys_list, sibling)
 
-/* for_each_root() allows you to iterate across the active hierarchies */
-#define for_each_root(_root) \
+/* for_each_active_root() allows you to iterate across the active hierarchies */
+#define for_each_active_root(_root) \
 list_for_each_entry(_root, &roots, root_list)
 
 /* the list of cgroups eligible for automatic release. Protected by
@@ -271,7 +271,7 @@
 
 	rcu_read_lock();
 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
-		struct cgroup *cgrp = cg->subsys[i]->cgroup;
+		struct cgroup *cgrp = rcu_dereference(cg->subsys[i]->cgroup);
 		if (atomic_dec_and_test(&cgrp->count) &&
 		    notify_on_release(cgrp)) {
 			if (taskexit)
@@ -384,6 +384,25 @@
 	return 0;
 }
 
+/**
+ * link_css_set - a helper function to link a css_set to a cgroup
+ * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
+ * @cg: the css_set to be linked
+ * @cgrp: the destination cgroup
+ */
+static void link_css_set(struct list_head *tmp_cg_links,
+			 struct css_set *cg, struct cgroup *cgrp)
+{
+	struct cg_cgroup_link *link;
+
+	BUG_ON(list_empty(tmp_cg_links));
+	link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
+				cgrp_link_list);
+	link->cg = cg;
+	list_move(&link->cgrp_link_list, &cgrp->css_sets);
+	list_add(&link->cg_link_list, &cg->cg_links);
+}
+
 /*
  * find_css_set() takes an existing cgroup group and a
  * cgroup object, and returns a css_set object that's
@@ -399,7 +418,6 @@
 	int i;
 
 	struct list_head tmp_cg_links;
-	struct cg_cgroup_link *link;
 
 	struct hlist_head *hhead;
 
@@ -444,26 +462,11 @@
 		 * only do it for the first subsystem in each
 		 * hierarchy
 		 */
-		if (ss->root->subsys_list.next == &ss->sibling) {
-			BUG_ON(list_empty(&tmp_cg_links));
-			link = list_entry(tmp_cg_links.next,
-					  struct cg_cgroup_link,
-					  cgrp_link_list);
-			list_del(&link->cgrp_link_list);
-			list_add(&link->cgrp_link_list, &cgrp->css_sets);
-			link->cg = res;
-			list_add(&link->cg_link_list, &res->cg_links);
-		}
+		if (ss->root->subsys_list.next == &ss->sibling)
+			link_css_set(&tmp_cg_links, res, cgrp);
 	}
-	if (list_empty(&rootnode.subsys_list)) {
-		link = list_entry(tmp_cg_links.next,
-				  struct cg_cgroup_link,
-				  cgrp_link_list);
-		list_del(&link->cgrp_link_list);
-		list_add(&link->cgrp_link_list, &dummytop->css_sets);
-		link->cg = res;
-		list_add(&link->cg_link_list, &res->cg_links);
-	}
+	if (list_empty(&rootnode.subsys_list))
+		link_css_set(&tmp_cg_links, res, dummytop);
 
 	BUG_ON(!list_empty(&tmp_cg_links));
 
@@ -586,11 +589,18 @@
 {
 	struct cgroup_subsys *ss;
 	for_each_subsys(cgrp->root, ss)
-		if (ss->pre_destroy && cgrp->subsys[ss->subsys_id])
+		if (ss->pre_destroy)
 			ss->pre_destroy(ss, cgrp);
 	return;
 }
 
+static void free_cgroup_rcu(struct rcu_head *obj)
+{
+	struct cgroup *cgrp = container_of(obj, struct cgroup, rcu_head);
+
+	kfree(cgrp);
+}
+
 static void cgroup_diput(struct dentry *dentry, struct inode *inode)
 {
 	/* is dentry a directory ? if so, kfree() associated cgroup */
@@ -610,19 +620,19 @@
 		/*
 		 * Release the subsystem state objects.
 		 */
-		for_each_subsys(cgrp->root, ss) {
-			if (cgrp->subsys[ss->subsys_id])
-				ss->destroy(ss, cgrp);
-		}
+		for_each_subsys(cgrp->root, ss)
+			ss->destroy(ss, cgrp);
 
 		cgrp->root->number_of_cgroups--;
 		mutex_unlock(&cgroup_mutex);
 
-		/* Drop the active superblock reference that we took when we
-		 * created the cgroup */
+		/*
+		 * Drop the active superblock reference that we took when we
+		 * created the cgroup
+		 */
 		deactivate_super(cgrp->root->sb);
 
-		kfree(cgrp);
+		call_rcu(&cgrp->rcu_head, free_cgroup_rcu);
 	}
 	iput(inode);
 }
@@ -712,23 +722,26 @@
 			BUG_ON(cgrp->subsys[i]);
 			BUG_ON(!dummytop->subsys[i]);
 			BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
+			mutex_lock(&ss->hierarchy_mutex);
 			cgrp->subsys[i] = dummytop->subsys[i];
 			cgrp->subsys[i]->cgroup = cgrp;
-			list_add(&ss->sibling, &root->subsys_list);
-			rcu_assign_pointer(ss->root, root);
+			list_move(&ss->sibling, &root->subsys_list);
+			ss->root = root;
 			if (ss->bind)
 				ss->bind(ss, cgrp);
-
+			mutex_unlock(&ss->hierarchy_mutex);
 		} else if (bit & removed_bits) {
 			/* We're removing this subsystem */
 			BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
 			BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
+			mutex_lock(&ss->hierarchy_mutex);
 			if (ss->bind)
 				ss->bind(ss, dummytop);
 			dummytop->subsys[i]->cgroup = dummytop;
 			cgrp->subsys[i] = NULL;
-			rcu_assign_pointer(subsys[i]->root, &rootnode);
-			list_del(&ss->sibling);
+			subsys[i]->root = &rootnode;
+			list_move(&ss->sibling, &rootnode.subsys_list);
+			mutex_unlock(&ss->hierarchy_mutex);
 		} else if (bit & final_bits) {
 			/* Subsystem state should already exist */
 			BUG_ON(!cgrp->subsys[i]);
@@ -990,7 +1003,7 @@
 		root = NULL;
 	} else {
 		/* New superblock */
-		struct cgroup *cgrp = &root->top_cgroup;
+		struct cgroup *root_cgrp = &root->top_cgroup;
 		struct inode *inode;
 		int i;
 
@@ -1031,7 +1044,7 @@
 		list_add(&root->root_list, &roots);
 		root_count++;
 
-		sb->s_root->d_fsdata = &root->top_cgroup;
+		sb->s_root->d_fsdata = root_cgrp;
 		root->top_cgroup.dentry = sb->s_root;
 
 		/* Link the top cgroup in this hierarchy into all
@@ -1042,29 +1055,18 @@
 			struct hlist_node *node;
 			struct css_set *cg;
 
-			hlist_for_each_entry(cg, node, hhead, hlist) {
-				struct cg_cgroup_link *link;
-
-				BUG_ON(list_empty(&tmp_cg_links));
-				link = list_entry(tmp_cg_links.next,
-						  struct cg_cgroup_link,
-						  cgrp_link_list);
-				list_del(&link->cgrp_link_list);
-				link->cg = cg;
-				list_add(&link->cgrp_link_list,
-					 &root->top_cgroup.css_sets);
-				list_add(&link->cg_link_list, &cg->cg_links);
-			}
+			hlist_for_each_entry(cg, node, hhead, hlist)
+				link_css_set(&tmp_cg_links, cg, root_cgrp);
 		}
 		write_unlock(&css_set_lock);
 
 		free_cg_links(&tmp_cg_links);
 
-		BUG_ON(!list_empty(&cgrp->sibling));
-		BUG_ON(!list_empty(&cgrp->children));
+		BUG_ON(!list_empty(&root_cgrp->sibling));
+		BUG_ON(!list_empty(&root_cgrp->children));
 		BUG_ON(root->number_of_cgroups != 1);
 
-		cgroup_populate_dir(cgrp);
+		cgroup_populate_dir(root_cgrp);
 		mutex_unlock(&inode->i_mutex);
 		mutex_unlock(&cgroup_mutex);
 	}
@@ -1113,10 +1115,9 @@
 	}
 	write_unlock(&css_set_lock);
 
-	if (!list_empty(&root->root_list)) {
-		list_del(&root->root_list);
-		root_count--;
-	}
+	list_del(&root->root_list);
+	root_count--;
+
 	mutex_unlock(&cgroup_mutex);
 
 	kfree(root);
@@ -1145,14 +1146,16 @@
  * @buf: the buffer to write the path into
  * @buflen: the length of the buffer
  *
- * Called with cgroup_mutex held. Writes path of cgroup into buf.
- * Returns 0 on success, -errno on error.
+ * Called with cgroup_mutex held or else with an RCU-protected cgroup
+ * reference.  Writes path of cgroup into buf.  Returns 0 on success,
+ * -errno on error.
  */
 int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
 {
 	char *start;
+	struct dentry *dentry = rcu_dereference(cgrp->dentry);
 
-	if (cgrp == dummytop) {
+	if (!dentry || cgrp == dummytop) {
 		/*
 		 * Inactive subsystems have no dentry for their root
 		 * cgroup
@@ -1165,13 +1168,14 @@
 
 	*--start = '\0';
 	for (;;) {
-		int len = cgrp->dentry->d_name.len;
+		int len = dentry->d_name.len;
 		if ((start -= len) < buf)
 			return -ENAMETOOLONG;
 		memcpy(start, cgrp->dentry->d_name.name, len);
 		cgrp = cgrp->parent;
 		if (!cgrp)
 			break;
+		dentry = rcu_dereference(cgrp->dentry);
 		if (!cgrp->parent)
 			continue;
 		if (--start < buf)
@@ -1216,7 +1220,7 @@
 	int retval = 0;
 	struct cgroup_subsys *ss;
 	struct cgroup *oldcgrp;
-	struct css_set *cg = tsk->cgroups;
+	struct css_set *cg;
 	struct css_set *newcg;
 	struct cgroupfs_root *root = cgrp->root;
 	int subsys_id;
@@ -1236,11 +1240,16 @@
 		}
 	}
 
+	task_lock(tsk);
+	cg = tsk->cgroups;
+	get_css_set(cg);
+	task_unlock(tsk);
 	/*
 	 * Locate or allocate a new css_set for this task,
 	 * based on its final set of cgroups
 	 */
 	newcg = find_css_set(cg, cgrp);
+	put_css_set(cg);
 	if (!newcg)
 		return -ENOMEM;
 
@@ -1445,7 +1454,7 @@
 	struct cftype *cft = __d_cft(file->f_dentry);
 	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
 
-	if (!cft || cgroup_is_removed(cgrp))
+	if (cgroup_is_removed(cgrp))
 		return -ENODEV;
 	if (cft->write)
 		return cft->write(cgrp, cft, file, buf, nbytes, ppos);
@@ -1490,7 +1499,7 @@
 	struct cftype *cft = __d_cft(file->f_dentry);
 	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
 
-	if (!cft || cgroup_is_removed(cgrp))
+	if (cgroup_is_removed(cgrp))
 		return -ENODEV;
 
 	if (cft->read)
@@ -1554,10 +1563,8 @@
 	err = generic_file_open(inode, file);
 	if (err)
 		return err;
-
 	cft = __d_cft(file->f_dentry);
-	if (!cft)
-		return -ENODEV;
+
 	if (cft->read_map || cft->read_seq_string) {
 		struct cgroup_seqfile_state *state =
 			kzalloc(sizeof(*state), GFP_USER);
@@ -1671,7 +1678,7 @@
 	if (!error) {
 		dentry->d_fsdata = cgrp;
 		inc_nlink(parent->d_inode);
-		cgrp->dentry = dentry;
+		rcu_assign_pointer(cgrp->dentry, dentry);
 		dget(dentry);
 	}
 	dput(dentry);
@@ -1812,6 +1819,7 @@
 {
 	struct task_struct *res;
 	struct list_head *l = it->task;
+	struct cg_cgroup_link *link;
 
 	/* If the iterator cg is NULL, we have no tasks */
 	if (!it->cg_link)
@@ -1819,7 +1827,8 @@
 	res = list_entry(l, struct task_struct, cg_list);
 	/* Advance iterator to find next entry */
 	l = l->next;
-	if (l == &res->cgroups->tasks) {
+	link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list);
+	if (l == &link->cg->tasks) {
 		/* We reached the end of this task list - move on to
 		 * the next cg_cgroup_link */
 		cgroup_advance_iter(cgrp, it);
@@ -2013,14 +2022,16 @@
  */
 static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp)
 {
-	int n = 0;
+	int n = 0, pid;
 	struct cgroup_iter it;
 	struct task_struct *tsk;
 	cgroup_iter_start(cgrp, &it);
 	while ((tsk = cgroup_iter_next(cgrp, &it))) {
 		if (unlikely(n == npids))
 			break;
-		pidarray[n++] = task_pid_vnr(tsk);
+		pid = task_pid_vnr(tsk);
+		if (pid > 0)
+			pidarray[n++] = pid;
 	}
 	cgroup_iter_end(cgrp, &it);
 	return n;
@@ -2052,7 +2063,6 @@
 
 	ret = 0;
 	cgrp = dentry->d_fsdata;
-	rcu_read_lock();
 
 	cgroup_iter_start(cgrp, &it);
 	while ((tsk = cgroup_iter_next(cgrp, &it))) {
@@ -2077,7 +2087,6 @@
 	}
 	cgroup_iter_end(cgrp, &it);
 
-	rcu_read_unlock();
 err:
 	return ret;
 }
@@ -2324,7 +2333,7 @@
 			       struct cgroup *cgrp)
 {
 	css->cgroup = cgrp;
-	atomic_set(&css->refcnt, 0);
+	atomic_set(&css->refcnt, 1);
 	css->flags = 0;
 	if (cgrp == dummytop)
 		set_bit(CSS_ROOT, &css->flags);
@@ -2332,6 +2341,29 @@
 	cgrp->subsys[ss->subsys_id] = css;
 }
 
+static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
+{
+	/* We need to take each hierarchy_mutex in a consistent order */
+	int i;
+
+	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+		struct cgroup_subsys *ss = subsys[i];
+		if (ss->root == root)
+			mutex_lock_nested(&ss->hierarchy_mutex, i);
+	}
+}
+
+static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
+{
+	int i;
+
+	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+		struct cgroup_subsys *ss = subsys[i];
+		if (ss->root == root)
+			mutex_unlock(&ss->hierarchy_mutex);
+	}
+}
+
 /*
  * cgroup_create - create a cgroup
  * @parent: cgroup that will be parent of the new cgroup
@@ -2380,7 +2412,9 @@
 		init_cgroup_css(css, ss, cgrp);
 	}
 
+	cgroup_lock_hierarchy(root);
 	list_add(&cgrp->sibling, &cgrp->parent->children);
+	cgroup_unlock_hierarchy(root);
 	root->number_of_cgroups++;
 
 	err = cgroup_create_dir(cgrp, dentry, mode);
@@ -2431,7 +2465,7 @@
 {
 	/* Check the reference count on each subsystem. Since we
 	 * already established that there are no tasks in the
-	 * cgroup, if the css refcount is also 0, then there should
+	 * cgroup, if the css refcount is also 1, then there should
 	 * be no outstanding references, so the subsystem is safe to
 	 * destroy. We scan across all subsystems rather than using
 	 * the per-hierarchy linked list of mounted subsystems since
@@ -2452,19 +2486,67 @@
 		 * matter, since it can only happen if the cgroup
 		 * has been deleted and hence no longer needs the
 		 * release agent to be called anyway. */
-		if (css && atomic_read(&css->refcnt))
+		if (css && (atomic_read(&css->refcnt) > 1))
 			return 1;
 	}
 	return 0;
 }
 
+/*
+ * Atomically mark all (or else none) of the cgroup's CSS objects as
+ * CSS_REMOVED. Return true on success, or false if the cgroup has
+ * busy subsystems. Call with cgroup_mutex held
+ */
+
+static int cgroup_clear_css_refs(struct cgroup *cgrp)
+{
+	struct cgroup_subsys *ss;
+	unsigned long flags;
+	bool failed = false;
+	local_irq_save(flags);
+	for_each_subsys(cgrp->root, ss) {
+		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+		int refcnt;
+		do {
+			/* We can only remove a CSS with a refcnt==1 */
+			refcnt = atomic_read(&css->refcnt);
+			if (refcnt > 1) {
+				failed = true;
+				goto done;
+			}
+			BUG_ON(!refcnt);
+			/*
+			 * Drop the refcnt to 0 while we check other
+			 * subsystems. This will cause any racing
+			 * css_tryget() to spin until we set the
+			 * CSS_REMOVED bits or abort
+			 */
+		} while (atomic_cmpxchg(&css->refcnt, refcnt, 0) != refcnt);
+	}
+ done:
+	for_each_subsys(cgrp->root, ss) {
+		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+		if (failed) {
+			/*
+			 * Restore old refcnt if we previously managed
+			 * to clear it from 1 to 0
+			 */
+			if (!atomic_read(&css->refcnt))
+				atomic_set(&css->refcnt, 1);
+		} else {
+			/* Commit the fact that the CSS is removed */
+			set_bit(CSS_REMOVED, &css->flags);
+		}
+	}
+	local_irq_restore(flags);
+	return !failed;
+}
+
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
 {
 	struct cgroup *cgrp = dentry->d_fsdata;
 	struct dentry *d;
 	struct cgroup *parent;
-	struct super_block *sb;
-	struct cgroupfs_root *root;
 
 	/* the vfs holds both inode->i_mutex already */
 
@@ -2487,12 +2569,10 @@
 
 	mutex_lock(&cgroup_mutex);
 	parent = cgrp->parent;
-	root = cgrp->root;
-	sb = root->sb;
 
 	if (atomic_read(&cgrp->count)
 	    || !list_empty(&cgrp->children)
-	    || cgroup_has_css_refs(cgrp)) {
+	    || !cgroup_clear_css_refs(cgrp)) {
 		mutex_unlock(&cgroup_mutex);
 		return -EBUSY;
 	}
@@ -2502,8 +2582,12 @@
 	if (!list_empty(&cgrp->release_list))
 		list_del(&cgrp->release_list);
 	spin_unlock(&release_list_lock);
-	/* delete my sibling from parent->children */
+
+	cgroup_lock_hierarchy(cgrp->root);
+	/* delete this cgroup from parent->children */
 	list_del(&cgrp->sibling);
+	cgroup_unlock_hierarchy(cgrp->root);
+
 	spin_lock(&cgrp->dentry->d_lock);
 	d = dget(cgrp->dentry);
 	spin_unlock(&d->d_lock);
@@ -2525,6 +2609,7 @@
 	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
 
 	/* Create the top cgroup state for this subsystem */
+	list_add(&ss->sibling, &rootnode.subsys_list);
 	ss->root = &rootnode;
 	css = ss->create(ss, dummytop);
 	/* We don't handle early failures gracefully */
@@ -2544,6 +2629,7 @@
 	 * need to invoke fork callbacks here. */
 	BUG_ON(!list_empty(&init_task.tasks));
 
+	mutex_init(&ss->hierarchy_mutex);
 	ss->active = 1;
 }
 
@@ -2562,7 +2648,6 @@
 	INIT_HLIST_NODE(&init_css_set.hlist);
 	css_set_count = 1;
 	init_cgroup_root(&rootnode);
-	list_add(&rootnode.root_list, &roots);
 	root_count = 1;
 	init_task.cgroups = &init_css_set;
 
@@ -2669,15 +2754,12 @@
 
 	mutex_lock(&cgroup_mutex);
 
-	for_each_root(root) {
+	for_each_active_root(root) {
 		struct cgroup_subsys *ss;
 		struct cgroup *cgrp;
 		int subsys_id;
 		int count = 0;
 
-		/* Skip this hierarchy if it has no active subsystems */
-		if (!root->actual_subsys_bits)
-			continue;
 		seq_printf(m, "%lu:", root->subsys_bits);
 		for_each_subsys(root, ss)
 			seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
@@ -2800,8 +2882,10 @@
 {
 	if (use_task_css_set_links) {
 		write_lock(&css_set_lock);
+		task_lock(child);
 		if (list_empty(&child->cg_list))
 			list_add(&child->cg_list, &child->cgroups->tasks);
+		task_unlock(child);
 		write_unlock(&css_set_lock);
 	}
 }
@@ -2907,6 +2991,7 @@
 		mutex_unlock(&cgroup_mutex);
 		return 0;
 	}
+	task_lock(tsk);
 	cg = tsk->cgroups;
 	parent = task_cgroup(tsk, subsys->subsys_id);
 
@@ -2919,6 +3004,7 @@
 
 	/* Keep the cgroup alive */
 	get_css_set(cg);
+	task_unlock(tsk);
 	mutex_unlock(&cgroup_mutex);
 
 	/* Now do the VFS work to create a cgroup */
@@ -2937,7 +3023,7 @@
 	}
 
 	/* Create the cgroup directory, which also creates the cgroup */
-	ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755);
+	ret = vfs_mkdir(inode, dentry, 0755);
 	child = __d_cgrp(dentry);
 	dput(dentry);
 	if (ret) {
@@ -2947,13 +3033,6 @@
 		goto out_release;
 	}
 
-	if (!child) {
-		printk(KERN_INFO
-		       "Couldn't find new cgroup %s\n", nodename);
-		ret = -ENOMEM;
-		goto out_release;
-	}
-
 	/* The cgroup now exists. Retake cgroup_mutex and check
 	 * that we're still in the same state that we thought we
 	 * were. */
@@ -3049,7 +3128,8 @@
 {
 	struct cgroup *cgrp = css->cgroup;
 	rcu_read_lock();
-	if (atomic_dec_and_test(&css->refcnt) && notify_on_release(cgrp)) {
+	if ((atomic_dec_return(&css->refcnt) == 1) &&
+	    notify_on_release(cgrp)) {
 		set_bit(CGRP_RELEASABLE, &cgrp->flags);
 		check_for_release(cgrp);
 	}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 30e74dd..79e40f0 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -379,8 +379,11 @@
 
 int disable_nonboot_cpus(void)
 {
-	int cpu, first_cpu, error = 0;
+	int cpu, first_cpu, error;
 
+	error = stop_machine_create();
+	if (error)
+		return error;
 	cpu_maps_update_begin();
 	first_cpu = cpumask_first(cpu_online_mask);
 	/* We take down all of the non-boot CPUs in one shot to avoid races
@@ -409,6 +412,7 @@
 		printk(KERN_ERR "Non-boot CPUs are not disabled\n");
 	}
 	cpu_maps_update_done();
+	stop_machine_destroy();
 	return error;
 }
 
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 345ace5..a856788 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -84,7 +84,7 @@
 	struct cgroup_subsys_state css;
 
 	unsigned long flags;		/* "unsigned long" so bitops work */
-	cpumask_t cpus_allowed;		/* CPUs allowed to tasks in cpuset */
+	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
 	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */
 
 	struct cpuset *parent;		/* my parent */
@@ -195,8 +195,6 @@
 
 static struct cpuset top_cpuset = {
 	.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
-	.cpus_allowed = CPU_MASK_ALL,
-	.mems_allowed = NODE_MASK_ALL,
 };
 
 /*
@@ -278,7 +276,7 @@
 };
 
 /*
- * Return in *pmask the portion of a cpusets's cpus_allowed that
+ * Return in pmask the portion of a cpusets's cpus_allowed that
  * are online.  If none are online, walk up the cpuset hierarchy
  * until we find one that does have some online cpus.  If we get
  * all the way to the top and still haven't found any online cpus,
@@ -291,15 +289,16 @@
  * Call with callback_mutex held.
  */
 
-static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
+static void guarantee_online_cpus(const struct cpuset *cs,
+				  struct cpumask *pmask)
 {
-	while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map))
+	while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
 		cs = cs->parent;
 	if (cs)
-		cpus_and(*pmask, cs->cpus_allowed, cpu_online_map);
+		cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
 	else
-		*pmask = cpu_online_map;
-	BUG_ON(!cpus_intersects(*pmask, cpu_online_map));
+		cpumask_copy(pmask, cpu_online_mask);
+	BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
 }
 
 /*
@@ -375,14 +374,9 @@
 	struct task_struct *tsk = current;
 	struct cpuset *cs;
 
-	if (task_cs(tsk) == &top_cpuset) {
-		/* Don't need rcu for top_cpuset.  It's never freed. */
-		my_cpusets_mem_gen = top_cpuset.mems_generation;
-	} else {
-		rcu_read_lock();
-		my_cpusets_mem_gen = task_cs(tsk)->mems_generation;
-		rcu_read_unlock();
-	}
+	rcu_read_lock();
+	my_cpusets_mem_gen = task_cs(tsk)->mems_generation;
+	rcu_read_unlock();
 
 	if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
 		mutex_lock(&callback_mutex);
@@ -414,12 +408,43 @@
 
 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 {
-	return	cpus_subset(p->cpus_allowed, q->cpus_allowed) &&
+	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 		is_mem_exclusive(p) <= is_mem_exclusive(q);
 }
 
+/**
+ * alloc_trial_cpuset - allocate a trial cpuset
+ * @cs: the cpuset that the trial cpuset duplicates
+ */
+static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
+{
+	struct cpuset *trial;
+
+	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
+	if (!trial)
+		return NULL;
+
+	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
+		kfree(trial);
+		return NULL;
+	}
+	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
+
+	return trial;
+}
+
+/**
+ * free_trial_cpuset - free the trial cpuset
+ * @trial: the trial cpuset to be freed
+ */
+static void free_trial_cpuset(struct cpuset *trial)
+{
+	free_cpumask_var(trial->cpus_allowed);
+	kfree(trial);
+}
+
 /*
  * validate_change() - Used to validate that any proposed cpuset change
  *		       follows the structural rules for cpusets.
@@ -469,7 +494,7 @@
 		c = cgroup_cs(cont);
 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 		    c != cur &&
-		    cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
+		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
 			return -EINVAL;
 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 		    c != cur &&
@@ -479,7 +504,7 @@
 
 	/* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
 	if (cgroup_task_count(cur->css.cgroup)) {
-		if (cpus_empty(trial->cpus_allowed) ||
+		if (cpumask_empty(trial->cpus_allowed) ||
 		    nodes_empty(trial->mems_allowed)) {
 			return -ENOSPC;
 		}
@@ -494,7 +519,7 @@
  */
 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
 {
-	return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
+	return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
 }
 
 static void
@@ -519,7 +544,7 @@
 		cp = list_first_entry(&q, struct cpuset, stack_list);
 		list_del(q.next);
 
-		if (cpus_empty(cp->cpus_allowed))
+		if (cpumask_empty(cp->cpus_allowed))
 			continue;
 
 		if (is_sched_load_balance(cp))
@@ -543,7 +568,7 @@
  * load balancing domains (sched domains) as specified by that partial
  * partition.
  *
- * See "What is sched_load_balance" in Documentation/cpusets.txt
+ * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
  * for a background explanation of this.
  *
  * Does not return errors, on the theory that the callers of this
@@ -586,7 +611,8 @@
  *	element of the partition (one sched domain) to be passed to
  *	partition_sched_domains().
  */
-static int generate_sched_domains(cpumask_t **domains,
+/* FIXME: see the FIXME in partition_sched_domains() */
+static int generate_sched_domains(struct cpumask **domains,
 			struct sched_domain_attr **attributes)
 {
 	LIST_HEAD(q);		/* queue of cpusets to be scanned */
@@ -594,10 +620,10 @@
 	struct cpuset **csa;	/* array of all cpuset ptrs */
 	int csn;		/* how many cpuset ptrs in csa so far */
 	int i, j, k;		/* indices for partition finding loops */
-	cpumask_t *doms;	/* resulting partition; i.e. sched domains */
+	struct cpumask *doms;	/* resulting partition; i.e. sched domains */
 	struct sched_domain_attr *dattr;  /* attributes for custom domains */
 	int ndoms = 0;		/* number of sched domains in result */
-	int nslot;		/* next empty doms[] cpumask_t slot */
+	int nslot;		/* next empty doms[] struct cpumask slot */
 
 	doms = NULL;
 	dattr = NULL;
@@ -605,7 +631,7 @@
 
 	/* Special case for the 99% of systems with one, full, sched domain */
 	if (is_sched_load_balance(&top_cpuset)) {
-		doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
+		doms = kmalloc(cpumask_size(), GFP_KERNEL);
 		if (!doms)
 			goto done;
 
@@ -614,7 +640,7 @@
 			*dattr = SD_ATTR_INIT;
 			update_domain_attr_tree(dattr, &top_cpuset);
 		}
-		*doms = top_cpuset.cpus_allowed;
+		cpumask_copy(doms, top_cpuset.cpus_allowed);
 
 		ndoms = 1;
 		goto done;
@@ -633,7 +659,7 @@
 		cp = list_first_entry(&q, struct cpuset, stack_list);
 		list_del(q.next);
 
-		if (cpus_empty(cp->cpus_allowed))
+		if (cpumask_empty(cp->cpus_allowed))
 			continue;
 
 		/*
@@ -684,7 +710,7 @@
 	 * Now we know how many domains to create.
 	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
 	 */
-	doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
+	doms = kmalloc(ndoms * cpumask_size(), GFP_KERNEL);
 	if (!doms)
 		goto done;
 
@@ -696,7 +722,7 @@
 
 	for (nslot = 0, i = 0; i < csn; i++) {
 		struct cpuset *a = csa[i];
-		cpumask_t *dp;
+		struct cpumask *dp;
 		int apn = a->pn;
 
 		if (apn < 0) {
@@ -719,14 +745,14 @@
 			continue;
 		}
 
-		cpus_clear(*dp);
+		cpumask_clear(dp);
 		if (dattr)
 			*(dattr + nslot) = SD_ATTR_INIT;
 		for (j = i; j < csn; j++) {
 			struct cpuset *b = csa[j];
 
 			if (apn == b->pn) {
-				cpus_or(*dp, *dp, b->cpus_allowed);
+				cpumask_or(dp, dp, b->cpus_allowed);
 				if (dattr)
 					update_domain_attr_tree(dattr + nslot, b);
 
@@ -766,7 +792,7 @@
 static void do_rebuild_sched_domains(struct work_struct *unused)
 {
 	struct sched_domain_attr *attr;
-	cpumask_t *doms;
+	struct cpumask *doms;
 	int ndoms;
 
 	get_online_cpus();
@@ -835,7 +861,7 @@
 static int cpuset_test_cpumask(struct task_struct *tsk,
 			       struct cgroup_scanner *scan)
 {
-	return !cpus_equal(tsk->cpus_allowed,
+	return !cpumask_equal(&tsk->cpus_allowed,
 			(cgroup_cs(scan->cg))->cpus_allowed);
 }
 
@@ -853,7 +879,7 @@
 static void cpuset_change_cpumask(struct task_struct *tsk,
 				  struct cgroup_scanner *scan)
 {
-	set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed));
+	set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
 }
 
 /**
@@ -885,10 +911,10 @@
  * @cs: the cpuset to consider
  * @buf: buffer of cpu numbers written to this cpuset
  */
-static int update_cpumask(struct cpuset *cs, const char *buf)
+static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+			  const char *buf)
 {
 	struct ptr_heap heap;
-	struct cpuset trialcs;
 	int retval;
 	int is_load_balanced;
 
@@ -896,8 +922,6 @@
 	if (cs == &top_cpuset)
 		return -EACCES;
 
-	trialcs = *cs;
-
 	/*
 	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
 	 * Since cpulist_parse() fails on an empty mask, we special case
@@ -905,31 +929,31 @@
 	 * with tasks have cpus.
 	 */
 	if (!*buf) {
-		cpus_clear(trialcs.cpus_allowed);
+		cpumask_clear(trialcs->cpus_allowed);
 	} else {
-		retval = cpulist_parse(buf, &trialcs.cpus_allowed);
+		retval = cpulist_parse(buf, trialcs->cpus_allowed);
 		if (retval < 0)
 			return retval;
 
-		if (!cpus_subset(trialcs.cpus_allowed, cpu_online_map))
+		if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
 			return -EINVAL;
 	}
-	retval = validate_change(cs, &trialcs);
+	retval = validate_change(cs, trialcs);
 	if (retval < 0)
 		return retval;
 
 	/* Nothing to do if the cpus didn't change */
-	if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed))
+	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
 		return 0;
 
 	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
 	if (retval)
 		return retval;
 
-	is_load_balanced = is_sched_load_balance(&trialcs);
+	is_load_balanced = is_sched_load_balance(trialcs);
 
 	mutex_lock(&callback_mutex);
-	cs->cpus_allowed = trialcs.cpus_allowed;
+	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
 	mutex_unlock(&callback_mutex);
 
 	/*
@@ -1017,7 +1041,7 @@
 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
 
 	fudge = 10;				/* spare mmarray[] slots */
-	fudge += cpus_weight(cs->cpus_allowed);	/* imagine one fork-bomb/cpu */
+	fudge += cpumask_weight(cs->cpus_allowed);/* imagine 1 fork-bomb/cpu */
 	retval = -ENOMEM;
 
 	/*
@@ -1104,9 +1128,9 @@
  * lock each such tasks mm->mmap_sem, scan its vma's and rebind
  * their mempolicies to the cpusets new mems_allowed.
  */
-static int update_nodemask(struct cpuset *cs, const char *buf)
+static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
+			   const char *buf)
 {
-	struct cpuset trialcs;
 	nodemask_t oldmem;
 	int retval;
 
@@ -1117,8 +1141,6 @@
 	if (cs == &top_cpuset)
 		return -EACCES;
 
-	trialcs = *cs;
-
 	/*
 	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
 	 * Since nodelist_parse() fails on an empty mask, we special case
@@ -1126,27 +1148,27 @@
 	 * with tasks have memory.
 	 */
 	if (!*buf) {
-		nodes_clear(trialcs.mems_allowed);
+		nodes_clear(trialcs->mems_allowed);
 	} else {
-		retval = nodelist_parse(buf, trialcs.mems_allowed);
+		retval = nodelist_parse(buf, trialcs->mems_allowed);
 		if (retval < 0)
 			goto done;
 
-		if (!nodes_subset(trialcs.mems_allowed,
+		if (!nodes_subset(trialcs->mems_allowed,
 				node_states[N_HIGH_MEMORY]))
 			return -EINVAL;
 	}
 	oldmem = cs->mems_allowed;
-	if (nodes_equal(oldmem, trialcs.mems_allowed)) {
+	if (nodes_equal(oldmem, trialcs->mems_allowed)) {
 		retval = 0;		/* Too easy - nothing to do */
 		goto done;
 	}
-	retval = validate_change(cs, &trialcs);
+	retval = validate_change(cs, trialcs);
 	if (retval < 0)
 		goto done;
 
 	mutex_lock(&callback_mutex);
-	cs->mems_allowed = trialcs.mems_allowed;
+	cs->mems_allowed = trialcs->mems_allowed;
 	cs->mems_generation = cpuset_mems_generation++;
 	mutex_unlock(&callback_mutex);
 
@@ -1167,7 +1189,8 @@
 
 	if (val != cs->relax_domain_level) {
 		cs->relax_domain_level = val;
-		if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs))
+		if (!cpumask_empty(cs->cpus_allowed) &&
+		    is_sched_load_balance(cs))
 			async_rebuild_sched_domains();
 	}
 
@@ -1186,31 +1209,36 @@
 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
 		       int turning_on)
 {
-	struct cpuset trialcs;
+	struct cpuset *trialcs;
 	int err;
 	int balance_flag_changed;
 
-	trialcs = *cs;
-	if (turning_on)
-		set_bit(bit, &trialcs.flags);
-	else
-		clear_bit(bit, &trialcs.flags);
+	trialcs = alloc_trial_cpuset(cs);
+	if (!trialcs)
+		return -ENOMEM;
 
-	err = validate_change(cs, &trialcs);
+	if (turning_on)
+		set_bit(bit, &trialcs->flags);
+	else
+		clear_bit(bit, &trialcs->flags);
+
+	err = validate_change(cs, trialcs);
 	if (err < 0)
-		return err;
+		goto out;
 
 	balance_flag_changed = (is_sched_load_balance(cs) !=
-		 			is_sched_load_balance(&trialcs));
+				is_sched_load_balance(trialcs));
 
 	mutex_lock(&callback_mutex);
-	cs->flags = trialcs.flags;
+	cs->flags = trialcs->flags;
 	mutex_unlock(&callback_mutex);
 
-	if (!cpus_empty(trialcs.cpus_allowed) && balance_flag_changed)
+	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
 		async_rebuild_sched_domains();
 
-	return 0;
+out:
+	free_trial_cpuset(trialcs);
+	return err;
 }
 
 /*
@@ -1311,42 +1339,47 @@
 	return val;
 }
 
+/* Protected by cgroup_lock */
+static cpumask_var_t cpus_attach;
+
 /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
 static int cpuset_can_attach(struct cgroup_subsys *ss,
 			     struct cgroup *cont, struct task_struct *tsk)
 {
 	struct cpuset *cs = cgroup_cs(cont);
+	int ret = 0;
 
-	if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
+	if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
 		return -ENOSPC;
-	if (tsk->flags & PF_THREAD_BOUND) {
-		cpumask_t mask;
 
+	if (tsk->flags & PF_THREAD_BOUND) {
 		mutex_lock(&callback_mutex);
-		mask = cs->cpus_allowed;
+		if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed))
+			ret = -EINVAL;
 		mutex_unlock(&callback_mutex);
-		if (!cpus_equal(tsk->cpus_allowed, mask))
-			return -EINVAL;
 	}
 
-	return security_task_setscheduler(tsk, 0, NULL);
+	return ret < 0 ? ret : security_task_setscheduler(tsk, 0, NULL);
 }
 
 static void cpuset_attach(struct cgroup_subsys *ss,
 			  struct cgroup *cont, struct cgroup *oldcont,
 			  struct task_struct *tsk)
 {
-	cpumask_t cpus;
 	nodemask_t from, to;
 	struct mm_struct *mm;
 	struct cpuset *cs = cgroup_cs(cont);
 	struct cpuset *oldcs = cgroup_cs(oldcont);
 	int err;
 
-	mutex_lock(&callback_mutex);
-	guarantee_online_cpus(cs, &cpus);
-	err = set_cpus_allowed_ptr(tsk, &cpus);
-	mutex_unlock(&callback_mutex);
+	if (cs == &top_cpuset) {
+		cpumask_copy(cpus_attach, cpu_possible_mask);
+	} else {
+		mutex_lock(&callback_mutex);
+		guarantee_online_cpus(cs, cpus_attach);
+		mutex_unlock(&callback_mutex);
+	}
+	err = set_cpus_allowed_ptr(tsk, cpus_attach);
 	if (err)
 		return;
 
@@ -1359,7 +1392,6 @@
 			cpuset_migrate_mm(mm, &from, &to);
 		mmput(mm);
 	}
-
 }
 
 /* The various types of files and directories in a cpuset file system */
@@ -1454,21 +1486,29 @@
 				const char *buf)
 {
 	int retval = 0;
+	struct cpuset *cs = cgroup_cs(cgrp);
+	struct cpuset *trialcs;
 
 	if (!cgroup_lock_live_group(cgrp))
 		return -ENODEV;
 
+	trialcs = alloc_trial_cpuset(cs);
+	if (!trialcs)
+		return -ENOMEM;
+
 	switch (cft->private) {
 	case FILE_CPULIST:
-		retval = update_cpumask(cgroup_cs(cgrp), buf);
+		retval = update_cpumask(cs, trialcs, buf);
 		break;
 	case FILE_MEMLIST:
-		retval = update_nodemask(cgroup_cs(cgrp), buf);
+		retval = update_nodemask(cs, trialcs, buf);
 		break;
 	default:
 		retval = -EINVAL;
 		break;
 	}
+
+	free_trial_cpuset(trialcs);
 	cgroup_unlock();
 	return retval;
 }
@@ -1487,13 +1527,13 @@
 
 static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
 {
-	cpumask_t mask;
+	int ret;
 
 	mutex_lock(&callback_mutex);
-	mask = cs->cpus_allowed;
+	ret = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
 	mutex_unlock(&callback_mutex);
 
-	return cpulist_scnprintf(page, PAGE_SIZE, &mask);
+	return ret;
 }
 
 static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
@@ -1729,7 +1769,7 @@
 	parent_cs = cgroup_cs(parent);
 
 	cs->mems_allowed = parent_cs->mems_allowed;
-	cs->cpus_allowed = parent_cs->cpus_allowed;
+	cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed);
 	return;
 }
 
@@ -1755,6 +1795,10 @@
 	cs = kmalloc(sizeof(*cs), GFP_KERNEL);
 	if (!cs)
 		return ERR_PTR(-ENOMEM);
+	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
+		kfree(cs);
+		return ERR_PTR(-ENOMEM);
+	}
 
 	cpuset_update_task_memory_state();
 	cs->flags = 0;
@@ -1763,7 +1807,7 @@
 	if (is_spread_slab(parent))
 		set_bit(CS_SPREAD_SLAB, &cs->flags);
 	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
-	cpus_clear(cs->cpus_allowed);
+	cpumask_clear(cs->cpus_allowed);
 	nodes_clear(cs->mems_allowed);
 	cs->mems_generation = cpuset_mems_generation++;
 	fmeter_init(&cs->fmeter);
@@ -1790,6 +1834,7 @@
 		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
 
 	number_of_cpusets--;
+	free_cpumask_var(cs->cpus_allowed);
 	kfree(cs);
 }
 
@@ -1813,6 +1858,8 @@
 
 int __init cpuset_init_early(void)
 {
+	alloc_bootmem_cpumask_var(&top_cpuset.cpus_allowed);
+
 	top_cpuset.mems_generation = cpuset_mems_generation++;
 	return 0;
 }
@@ -1828,7 +1875,7 @@
 {
 	int err = 0;
 
-	cpus_setall(top_cpuset.cpus_allowed);
+	cpumask_setall(top_cpuset.cpus_allowed);
 	nodes_setall(top_cpuset.mems_allowed);
 
 	fmeter_init(&top_cpuset.fmeter);
@@ -1840,6 +1887,9 @@
 	if (err < 0)
 		return err;
 
+	if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
+		BUG();
+
 	number_of_cpusets = 1;
 	return 0;
 }
@@ -1914,7 +1964,7 @@
 	 * has online cpus, so can't be empty).
 	 */
 	parent = cs->parent;
-	while (cpus_empty(parent->cpus_allowed) ||
+	while (cpumask_empty(parent->cpus_allowed) ||
 			nodes_empty(parent->mems_allowed))
 		parent = parent->parent;
 
@@ -1955,7 +2005,7 @@
 		}
 
 		/* Continue past cpusets with all cpus, mems online */
-		if (cpus_subset(cp->cpus_allowed, cpu_online_map) &&
+		if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
 		    nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
 			continue;
 
@@ -1963,13 +2013,14 @@
 
 		/* Remove offline cpus and mems from this cpuset. */
 		mutex_lock(&callback_mutex);
-		cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
+		cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
+			    cpu_online_mask);
 		nodes_and(cp->mems_allowed, cp->mems_allowed,
 						node_states[N_HIGH_MEMORY]);
 		mutex_unlock(&callback_mutex);
 
 		/* Move tasks from the empty cpuset to a parent */
-		if (cpus_empty(cp->cpus_allowed) ||
+		if (cpumask_empty(cp->cpus_allowed) ||
 		     nodes_empty(cp->mems_allowed))
 			remove_tasks_in_empty_cpuset(cp);
 		else {
@@ -1995,7 +2046,7 @@
 				unsigned long phase, void *unused_cpu)
 {
 	struct sched_domain_attr *attr;
-	cpumask_t *doms;
+	struct cpumask *doms;
 	int ndoms;
 
 	switch (phase) {
@@ -2010,7 +2061,7 @@
 	}
 
 	cgroup_lock();
-	top_cpuset.cpus_allowed = cpu_online_map;
+	cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
 	scan_for_empty_cpusets(&top_cpuset);
 	ndoms = generate_sched_domains(&doms, &attr);
 	cgroup_unlock();
@@ -2055,7 +2106,7 @@
 
 void __init cpuset_init_smp(void)
 {
-	top_cpuset.cpus_allowed = cpu_online_map;
+	cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
 	top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
 
 	hotcpu_notifier(cpuset_track_online_cpus, 0);
@@ -2065,15 +2116,15 @@
 /**
  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
- * @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
+ * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
  *
- * Description: Returns the cpumask_t cpus_allowed of the cpuset
+ * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
  * attached to the specified @tsk.  Guaranteed to return some non-empty
  * subset of cpu_online_map, even if this means going outside the
  * tasks cpuset.
  **/
 
-void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask)
+void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
 {
 	mutex_lock(&callback_mutex);
 	cpuset_cpus_allowed_locked(tsk, pmask);
@@ -2084,7 +2135,7 @@
  * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
  * Must be called with callback_mutex held.
  **/
-void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask)
+void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
 {
 	task_lock(tsk);
 	guarantee_online_cpus(task_cs(tsk), pmask);
diff --git a/kernel/cred.c b/kernel/cred.c
index ff7bc07..3a03918 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -372,7 +372,8 @@
 	    old->fsuid != new->fsuid ||
 	    old->fsgid != new->fsgid ||
 	    !cap_issubset(new->cap_permitted, old->cap_permitted)) {
-		set_dumpable(task->mm, suid_dumpable);
+		if (task->mm)
+			set_dumpable(task->mm, suid_dumpable);
 		task->pdeath_signal = 0;
 		smp_wmb();
 	}
@@ -506,6 +507,7 @@
 	else
 		old = get_cred(&init_cred);
 
+	*new = *old;
 	get_uid(new->user);
 	get_group_info(new->group_info);
 
@@ -529,6 +531,7 @@
 
 error:
 	put_cred(new);
+	put_cred(old);
 	return NULL;
 }
 EXPORT_SYMBOL(prepare_kernel_cred);
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 0511716..667c841 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -209,8 +209,7 @@
 module_init(proc_execdomains_init);
 #endif
 
-asmlinkage long
-sys_personality(u_long personality)
+SYSCALL_DEFINE1(personality, u_long, personality)
 {
 	u_long old = current->personality;
 
diff --git a/kernel/exit.c b/kernel/exit.c
index c7740fa..f80dec3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1141,7 +1141,7 @@
 
 EXPORT_SYMBOL(complete_and_exit);
 
-asmlinkage long sys_exit(int error_code)
+SYSCALL_DEFINE1(exit, int, error_code)
 {
 	do_exit((error_code&0xff)<<8);
 }
@@ -1182,9 +1182,11 @@
  * wait4()-ing process will get the correct exit code - even if this
  * thread is not the thread group leader.
  */
-asmlinkage void sys_exit_group(int error_code)
+SYSCALL_DEFINE1(exit_group, int, error_code)
 {
 	do_group_exit((error_code & 0xff) << 8);
+	/* NOTREACHED */
+	return 0;
 }
 
 static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
@@ -1752,9 +1754,8 @@
 	return retval;
 }
 
-asmlinkage long sys_waitid(int which, pid_t upid,
-			   struct siginfo __user *infop, int options,
-			   struct rusage __user *ru)
+SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
+		infop, int, options, struct rusage __user *, ru)
 {
 	struct pid *pid = NULL;
 	enum pid_type type;
@@ -1793,8 +1794,8 @@
 	return ret;
 }
 
-asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
-			  int options, struct rusage __user *ru)
+SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
+		int, options, struct rusage __user *, ru)
 {
 	struct pid *pid = NULL;
 	enum pid_type type;
@@ -1831,7 +1832,7 @@
  * sys_waitpid() remains for compatibility. waitpid() should be
  * implemented by calling sys_wait4() from libc.a.
  */
-asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
+SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
 {
 	return sys_wait4(pid, stat_addr, options, NULL);
 }
diff --git a/kernel/fork.c b/kernel/fork.c
index 7b8f2a7..bf0cef8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -901,7 +901,7 @@
 	clear_freeze_flag(p);
 }
 
-asmlinkage long sys_set_tid_address(int __user *tidptr)
+SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
 {
 	current->clear_child_tid = tidptr;
 
@@ -1126,12 +1126,12 @@
 
 	if (pid != &init_struct_pid) {
 		retval = -ENOMEM;
-		pid = alloc_pid(task_active_pid_ns(p));
+		pid = alloc_pid(p->nsproxy->pid_ns);
 		if (!pid)
 			goto bad_fork_cleanup_io;
 
 		if (clone_flags & CLONE_NEWPID) {
-			retval = pid_ns_prepare_proc(task_active_pid_ns(p));
+			retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
 			if (retval < 0)
 				goto bad_fork_free_pid;
 		}
@@ -1481,12 +1481,10 @@
 	fs_cachep = kmem_cache_create("fs_cache",
 			sizeof(struct fs_struct), 0,
 			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
-	vm_area_cachep = kmem_cache_create("vm_area_struct",
-			sizeof(struct vm_area_struct), 0,
-			SLAB_PANIC, NULL);
 	mm_cachep = kmem_cache_create("mm_struct",
 			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
 			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+	mmap_init();
 }
 
 /*
@@ -1605,7 +1603,7 @@
  * constructed. Here we are modifying the current, active,
  * task_struct.
  */
-asmlinkage long sys_unshare(unsigned long unshare_flags)
+SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
 {
 	int err = 0;
 	struct fs_struct *fs, *new_fs = NULL;
diff --git a/kernel/futex.c b/kernel/futex.c
index 002aa18..f89d373 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1733,9 +1733,8 @@
  * @head: pointer to the list-head
  * @len: length of the list-head, as userspace expects
  */
-asmlinkage long
-sys_set_robust_list(struct robust_list_head __user *head,
-		    size_t len)
+SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
+		size_t, len)
 {
 	if (!futex_cmpxchg_enabled)
 		return -ENOSYS;
@@ -1756,9 +1755,9 @@
  * @head_ptr: pointer to a list-head pointer, the kernel fills it in
  * @len_ptr: pointer to a length field, the kernel fills in the header size
  */
-asmlinkage long
-sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
-		    size_t __user *len_ptr)
+SYSCALL_DEFINE3(get_robust_list, int, pid,
+		struct robust_list_head __user * __user *, head_ptr,
+		size_t __user *, len_ptr)
 {
 	struct robust_list_head __user *head;
 	unsigned long ret;
@@ -1978,9 +1977,9 @@
 }
 
 
-asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
-			  struct timespec __user *utime, u32 __user *uaddr2,
-			  u32 val3)
+SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+		struct timespec __user *, utime, u32 __user *, uaddr2,
+		u32, val3)
 {
 	struct timespec ts;
 	ktime_t t, *tp = NULL;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 1455b76..2dc30c5 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1467,8 +1467,8 @@
 	return ret;
 }
 
-asmlinkage long
-sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
+SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
+		struct timespec __user *, rmtp)
 {
 	struct timespec tu;
 
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index cc0f732..1de9700 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
+#include <linux/async.h>
 
 #include "internals.h"
 
@@ -34,6 +35,10 @@
 	unsigned int status;
 	int i;
 
+	/*
+	 * quiesce the kernel, or at least the asynchronous portion
+	 */
+	async_synchronize_full();
 	mutex_lock(&probing_active);
 	/*
 	 * something may have generated an irq long ago and we want to
diff --git a/kernel/itimer.c b/kernel/itimer.c
index db7c358..6a5fe93 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -100,7 +100,7 @@
 	return 0;
 }
 
-asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
+SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value)
 {
 	int error = -EFAULT;
 	struct itimerval get_buffer;
@@ -260,9 +260,8 @@
 	return it_old.it_value.tv_sec;
 }
 
-asmlinkage long sys_setitimer(int which,
-			      struct itimerval __user *value,
-			      struct itimerval __user *ovalue)
+SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
+		struct itimerval __user *, ovalue)
 {
 	struct itimerval set_buffer, get_buffer;
 	int error;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 3fb855a..8a6d7b0 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -934,9 +934,8 @@
 
 static DEFINE_MUTEX(kexec_mutex);
 
-asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
-				struct kexec_segment __user *segments,
-				unsigned long flags)
+SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
+		struct kexec_segment __user *, segments, unsigned long, flags)
 {
 	struct kimage **dest_image, *image;
 	int result;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1b9cbdc..7ba8cd9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -123,7 +123,7 @@
 static int __kprobes check_safety(void)
 {
 	int ret = 0;
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
 	ret = freeze_processes();
 	if (ret == 0) {
 		struct task_struct *p, *q;
diff --git a/kernel/module.c b/kernel/module.c
index 496dcb5..e8b51d4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -50,6 +50,7 @@
 #include <asm/sections.h>
 #include <linux/tracepoint.h>
 #include <linux/ftrace.h>
+#include <linux/async.h>
 
 #if 0
 #define DEBUGP printk
@@ -742,8 +743,8 @@
 	mutex_lock(&module_mutex);
 }
 
-asmlinkage long
-sys_delete_module(const char __user *name_user, unsigned int flags)
+SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
+		unsigned int, flags)
 {
 	struct module *mod;
 	char name[MODULE_NAME_LEN];
@@ -816,6 +817,7 @@
 		mod->exit();
 	blocking_notifier_call_chain(&module_notify_list,
 				     MODULE_STATE_GOING, mod);
+	async_synchronize_full();
 	mutex_lock(&module_mutex);
 	/* Store the name of the last unloaded module for diagnostic purposes */
 	strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
@@ -2294,10 +2296,8 @@
 }
 
 /* This is where the real work happens */
-asmlinkage long
-sys_init_module(void __user *umod,
-		unsigned long len,
-		const char __user *uargs)
+SYSCALL_DEFINE3(init_module, void __user *, umod,
+		unsigned long, len, const char __user *, uargs)
 {
 	struct module *mod;
 	int ret = 0;
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index 43c2111..78bc3fd 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -13,7 +13,6 @@
 
 struct ns_cgroup {
 	struct cgroup_subsys_state css;
-	spinlock_t lock;
 };
 
 struct cgroup_subsys ns_subsys;
@@ -84,7 +83,6 @@
 	ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
 	if (!ns_cgroup)
 		return ERR_PTR(-ENOMEM);
-	spin_lock_init(&ns_cgroup->lock);
 	return &ns_cgroup->css;
 }
 
diff --git a/kernel/pid.c b/kernel/pid.c
index 064e76a..1b3586f 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -474,8 +474,14 @@
 }
 EXPORT_SYMBOL(task_session_nr_ns);
 
+struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
+{
+	return ns_of_pid(task_pid(tsk));
+}
+EXPORT_SYMBOL_GPL(task_active_pid_ns);
+
 /*
- * Used by proc to find the first pid that is greater then or equal to nr.
+ * Used by proc to find the first pid that is greater than or equal to nr.
  *
  * If there is a pid at nr this function is exactly the same as find_pid_ns.
  */
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 887c637..052ec4d 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -477,10 +477,9 @@
 
 /* Create a POSIX.1b interval timer. */
 
-asmlinkage long
-sys_timer_create(const clockid_t which_clock,
-		 struct sigevent __user *timer_event_spec,
-		 timer_t __user * created_timer_id)
+SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+		struct sigevent __user *, timer_event_spec,
+		timer_t __user *, created_timer_id)
 {
 	struct k_itimer *new_timer;
 	int error, new_timer_id;
@@ -661,8 +660,8 @@
 }
 
 /* Get the time remaining on a POSIX.1b interval timer. */
-asmlinkage long
-sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
+SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
+		struct itimerspec __user *, setting)
 {
 	struct k_itimer *timr;
 	struct itimerspec cur_setting;
@@ -691,8 +690,7 @@
  * the call back to do_schedule_next_timer().  So all we need to do is
  * to pick up the frozen overrun.
  */
-asmlinkage long
-sys_timer_getoverrun(timer_t timer_id)
+SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
 {
 	struct k_itimer *timr;
 	int overrun;
@@ -760,10 +758,9 @@
 }
 
 /* Set a POSIX.1b interval timer */
-asmlinkage long
-sys_timer_settime(timer_t timer_id, int flags,
-		  const struct itimerspec __user *new_setting,
-		  struct itimerspec __user *old_setting)
+SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
+		const struct itimerspec __user *, new_setting,
+		struct itimerspec __user *, old_setting)
 {
 	struct k_itimer *timr;
 	struct itimerspec new_spec, old_spec;
@@ -816,8 +813,7 @@
 }
 
 /* Delete a POSIX.1b interval timer. */
-asmlinkage long
-sys_timer_delete(timer_t timer_id)
+SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
 {
 	struct k_itimer *timer;
 	unsigned long flags;
@@ -903,8 +899,8 @@
 }
 EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
 
-asmlinkage long sys_clock_settime(const clockid_t which_clock,
-				  const struct timespec __user *tp)
+SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+		const struct timespec __user *, tp)
 {
 	struct timespec new_tp;
 
@@ -916,8 +912,8 @@
 	return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
 }
 
-asmlinkage long
-sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp)
+SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
+		struct timespec __user *,tp)
 {
 	struct timespec kernel_tp;
 	int error;
@@ -933,8 +929,8 @@
 
 }
 
-asmlinkage long
-sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp)
+SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
+		struct timespec __user *, tp)
 {
 	struct timespec rtn_tp;
 	int error;
@@ -963,10 +959,9 @@
 				 which_clock);
 }
 
-asmlinkage long
-sys_clock_nanosleep(const clockid_t which_clock, int flags,
-		    const struct timespec __user *rqtp,
-		    struct timespec __user *rmtp)
+SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
+		const struct timespec __user *, rqtp,
+		struct timespec __user *, rmtp)
 {
 	struct timespec t;
 
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 597823b..d7a1016 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -4,7 +4,8 @@
 endif
 
 obj-y				:= main.o
-obj-$(CONFIG_PM_SLEEP)		+= process.o console.o
+obj-$(CONFIG_PM_SLEEP)		+= console.o
+obj-$(CONFIG_FREEZER)		+= process.o
 obj-$(CONFIG_HIBERNATION)	+= swsusp.o disk.o snapshot.o swap.o user.o
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index f77d381..45e8541 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -258,12 +258,12 @@
 {
 	int error;
 
-	/* Free memory before shutting down devices. */
-	error = swsusp_shrink_memory();
+	error = platform_begin(platform_mode);
 	if (error)
 		return error;
 
-	error = platform_begin(platform_mode);
+	/* Free memory before shutting down devices. */
+	error = swsusp_shrink_memory();
 	if (error)
 		goto Close;
 
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5d2ab83..f5fc2d7 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -25,6 +25,7 @@
 #include <linux/syscalls.h>
 #include <linux/console.h>
 #include <linux/highmem.h>
+#include <linux/list.h>
 
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
@@ -192,12 +193,6 @@
 	return ret;
 }
 
-static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
-{
-	free_list_of_pages(ca->chain, clear_page_nosave);
-	memset(ca, 0, sizeof(struct chain_allocator));
-}
-
 /**
  *	Data types related to memory bitmaps.
  *
@@ -233,7 +228,7 @@
 #define BM_BITS_PER_BLOCK	(PAGE_SIZE << 3)
 
 struct bm_block {
-	struct bm_block *next;		/* next element of the list */
+	struct list_head hook;	/* hook into a list of bitmap blocks */
 	unsigned long start_pfn;	/* pfn represented by the first bit */
 	unsigned long end_pfn;	/* pfn represented by the last bit plus 1 */
 	unsigned long *data;	/* bitmap representing pages */
@@ -244,24 +239,15 @@
 	return bb->end_pfn - bb->start_pfn;
 }
 
-struct zone_bitmap {
-	struct zone_bitmap *next;	/* next element of the list */
-	unsigned long start_pfn;	/* minimal pfn in this zone */
-	unsigned long end_pfn;		/* maximal pfn in this zone plus 1 */
-	struct bm_block *bm_blocks;	/* list of bitmap blocks */
-	struct bm_block *cur_block;	/* recently used bitmap block */
-};
-
 /* strcut bm_position is used for browsing memory bitmaps */
 
 struct bm_position {
-	struct zone_bitmap *zone_bm;
 	struct bm_block *block;
 	int bit;
 };
 
 struct memory_bitmap {
-	struct zone_bitmap *zone_bm_list;	/* list of zone bitmaps */
+	struct list_head blocks;	/* list of bitmap blocks */
 	struct linked_page *p_list;	/* list of pages used to store zone
 					 * bitmap objects and bitmap block
 					 * objects
@@ -273,11 +259,7 @@
 
 static void memory_bm_position_reset(struct memory_bitmap *bm)
 {
-	struct zone_bitmap *zone_bm;
-
-	zone_bm = bm->zone_bm_list;
-	bm->cur.zone_bm = zone_bm;
-	bm->cur.block = zone_bm->bm_blocks;
+	bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
 	bm->cur.bit = 0;
 }
 
@@ -285,151 +267,184 @@
 
 /**
  *	create_bm_block_list - create a list of block bitmap objects
+ *	@nr_blocks - number of blocks to allocate
+ *	@list - list to put the allocated blocks into
+ *	@ca - chain allocator to be used for allocating memory
  */
-
-static inline struct bm_block *
-create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca)
+static int create_bm_block_list(unsigned long pages,
+				struct list_head *list,
+				struct chain_allocator *ca)
 {
-	struct bm_block *bblist = NULL;
+	unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
 
 	while (nr_blocks-- > 0) {
 		struct bm_block *bb;
 
 		bb = chain_alloc(ca, sizeof(struct bm_block));
 		if (!bb)
-			return NULL;
-
-		bb->next = bblist;
-		bblist = bb;
+			return -ENOMEM;
+		list_add(&bb->hook, list);
 	}
-	return bblist;
+
+	return 0;
+}
+
+struct mem_extent {
+	struct list_head hook;
+	unsigned long start;
+	unsigned long end;
+};
+
+/**
+ *	free_mem_extents - free a list of memory extents
+ *	@list - list of extents to empty
+ */
+static void free_mem_extents(struct list_head *list)
+{
+	struct mem_extent *ext, *aux;
+
+	list_for_each_entry_safe(ext, aux, list, hook) {
+		list_del(&ext->hook);
+		kfree(ext);
+	}
 }
 
 /**
- *	create_zone_bm_list - create a list of zone bitmap objects
+ *	create_mem_extents - create a list of memory extents representing
+ *	                     contiguous ranges of PFNs
+ *	@list - list to put the extents into
+ *	@gfp_mask - mask to use for memory allocations
  */
-
-static inline struct zone_bitmap *
-create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca)
+static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
 {
-	struct zone_bitmap *zbmlist = NULL;
+	struct zone *zone;
 
-	while (nr_zones-- > 0) {
-		struct zone_bitmap *zbm;
+	INIT_LIST_HEAD(list);
 
-		zbm = chain_alloc(ca, sizeof(struct zone_bitmap));
-		if (!zbm)
-			return NULL;
+	for_each_zone(zone) {
+		unsigned long zone_start, zone_end;
+		struct mem_extent *ext, *cur, *aux;
 
-		zbm->next = zbmlist;
-		zbmlist = zbm;
+		if (!populated_zone(zone))
+			continue;
+
+		zone_start = zone->zone_start_pfn;
+		zone_end = zone->zone_start_pfn + zone->spanned_pages;
+
+		list_for_each_entry(ext, list, hook)
+			if (zone_start <= ext->end)
+				break;
+
+		if (&ext->hook == list || zone_end < ext->start) {
+			/* New extent is necessary */
+			struct mem_extent *new_ext;
+
+			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
+			if (!new_ext) {
+				free_mem_extents(list);
+				return -ENOMEM;
+			}
+			new_ext->start = zone_start;
+			new_ext->end = zone_end;
+			list_add_tail(&new_ext->hook, &ext->hook);
+			continue;
+		}
+
+		/* Merge this zone's range of PFNs with the existing one */
+		if (zone_start < ext->start)
+			ext->start = zone_start;
+		if (zone_end > ext->end)
+			ext->end = zone_end;
+
+		/* More merging may be possible */
+		cur = ext;
+		list_for_each_entry_safe_continue(cur, aux, list, hook) {
+			if (zone_end < cur->start)
+				break;
+			if (zone_end < cur->end)
+				ext->end = cur->end;
+			list_del(&cur->hook);
+			kfree(cur);
+		}
 	}
-	return zbmlist;
+
+	return 0;
 }
 
 /**
   *	memory_bm_create - allocate memory for a memory bitmap
   */
-
 static int
 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
 {
 	struct chain_allocator ca;
-	struct zone *zone;
-	struct zone_bitmap *zone_bm;
-	struct bm_block *bb;
-	unsigned int nr;
+	struct list_head mem_extents;
+	struct mem_extent *ext;
+	int error;
 
 	chain_init(&ca, gfp_mask, safe_needed);
+	INIT_LIST_HEAD(&bm->blocks);
 
-	/* Compute the number of zones */
-	nr = 0;
-	for_each_zone(zone)
-		if (populated_zone(zone))
-			nr++;
+	error = create_mem_extents(&mem_extents, gfp_mask);
+	if (error)
+		return error;
 
-	/* Allocate the list of zones bitmap objects */
-	zone_bm = create_zone_bm_list(nr, &ca);
-	bm->zone_bm_list = zone_bm;
-	if (!zone_bm) {
-		chain_free(&ca, PG_UNSAFE_CLEAR);
-		return -ENOMEM;
-	}
+	list_for_each_entry(ext, &mem_extents, hook) {
+		struct bm_block *bb;
+		unsigned long pfn = ext->start;
+		unsigned long pages = ext->end - ext->start;
 
-	/* Initialize the zone bitmap objects */
-	for_each_zone(zone) {
-		unsigned long pfn;
+		bb = list_entry(bm->blocks.prev, struct bm_block, hook);
 
-		if (!populated_zone(zone))
-			continue;
+		error = create_bm_block_list(pages, bm->blocks.prev, &ca);
+		if (error)
+			goto Error;
 
-		zone_bm->start_pfn = zone->zone_start_pfn;
-		zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
-		/* Allocate the list of bitmap block objects */
-		nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
-		bb = create_bm_block_list(nr, &ca);
-		zone_bm->bm_blocks = bb;
-		zone_bm->cur_block = bb;
-		if (!bb)
-			goto Free;
-
-		nr = zone->spanned_pages;
-		pfn = zone->zone_start_pfn;
-		/* Initialize the bitmap block objects */
-		while (bb) {
-			unsigned long *ptr;
-
-			ptr = get_image_page(gfp_mask, safe_needed);
-			bb->data = ptr;
-			if (!ptr)
-				goto Free;
+		list_for_each_entry_continue(bb, &bm->blocks, hook) {
+			bb->data = get_image_page(gfp_mask, safe_needed);
+			if (!bb->data) {
+				error = -ENOMEM;
+				goto Error;
+			}
 
 			bb->start_pfn = pfn;
-			if (nr >= BM_BITS_PER_BLOCK) {
+			if (pages >= BM_BITS_PER_BLOCK) {
 				pfn += BM_BITS_PER_BLOCK;
-				nr -= BM_BITS_PER_BLOCK;
+				pages -= BM_BITS_PER_BLOCK;
 			} else {
 				/* This is executed only once in the loop */
-				pfn += nr;
+				pfn += pages;
 			}
 			bb->end_pfn = pfn;
-			bb = bb->next;
 		}
-		zone_bm = zone_bm->next;
 	}
+
 	bm->p_list = ca.chain;
 	memory_bm_position_reset(bm);
-	return 0;
+ Exit:
+	free_mem_extents(&mem_extents);
+	return error;
 
- Free:
+ Error:
 	bm->p_list = ca.chain;
 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
-	return -ENOMEM;
+	goto Exit;
 }
 
 /**
   *	memory_bm_free - free memory occupied by the memory bitmap @bm
   */
-
 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
 {
-	struct zone_bitmap *zone_bm;
+	struct bm_block *bb;
 
-	/* Free the list of bit blocks for each zone_bitmap object */
-	zone_bm = bm->zone_bm_list;
-	while (zone_bm) {
-		struct bm_block *bb;
+	list_for_each_entry(bb, &bm->blocks, hook)
+		if (bb->data)
+			free_image_page(bb->data, clear_nosave_free);
 
-		bb = zone_bm->bm_blocks;
-		while (bb) {
-			if (bb->data)
-				free_image_page(bb->data, clear_nosave_free);
-			bb = bb->next;
-		}
-		zone_bm = zone_bm->next;
-	}
 	free_list_of_pages(bm->p_list, clear_nosave_free);
-	bm->zone_bm_list = NULL;
+
+	INIT_LIST_HEAD(&bm->blocks);
 }
 
 /**
@@ -437,38 +452,33 @@
  *	to given pfn.  The cur_zone_bm member of @bm and the cur_block member
  *	of @bm->cur_zone_bm are updated.
  */
-
 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
 				void **addr, unsigned int *bit_nr)
 {
-	struct zone_bitmap *zone_bm;
 	struct bm_block *bb;
 
-	/* Check if the pfn is from the current zone */
-	zone_bm = bm->cur.zone_bm;
-	if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
-		zone_bm = bm->zone_bm_list;
-		/* We don't assume that the zones are sorted by pfns */
-		while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
-			zone_bm = zone_bm->next;
-
-			if (!zone_bm)
-				return -EFAULT;
-		}
-		bm->cur.zone_bm = zone_bm;
-	}
-	/* Check if the pfn corresponds to the current bitmap block */
-	bb = zone_bm->cur_block;
+	/*
+	 * Check if the pfn corresponds to the current bitmap block and find
+	 * the block where it fits if this is not the case.
+	 */
+	bb = bm->cur.block;
 	if (pfn < bb->start_pfn)
-		bb = zone_bm->bm_blocks;
+		list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
+			if (pfn >= bb->start_pfn)
+				break;
 
-	while (pfn >= bb->end_pfn) {
-		bb = bb->next;
+	if (pfn >= bb->end_pfn)
+		list_for_each_entry_continue(bb, &bm->blocks, hook)
+			if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
+				break;
 
-		BUG_ON(!bb);
-	}
-	zone_bm->cur_block = bb;
+	if (&bb->hook == &bm->blocks)
+		return -EFAULT;
+
+	/* The block has been found */
+	bm->cur.block = bb;
 	pfn -= bb->start_pfn;
+	bm->cur.bit = pfn + 1;
 	*bit_nr = pfn;
 	*addr = bb->data;
 	return 0;
@@ -519,6 +529,14 @@
 	return test_bit(bit, addr);
 }
 
+static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
+{
+	void *addr;
+	unsigned int bit;
+
+	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
+}
+
 /**
  *	memory_bm_next_pfn - find the pfn that corresponds to the next set bit
  *	in the bitmap @bm.  If the pfn cannot be found, BM_END_OF_MAP is
@@ -530,29 +548,21 @@
 
 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
 {
-	struct zone_bitmap *zone_bm;
 	struct bm_block *bb;
 	int bit;
 
+	bb = bm->cur.block;
 	do {
-		bb = bm->cur.block;
-		do {
-			bit = bm->cur.bit;
-			bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
-			if (bit < bm_block_bits(bb))
-				goto Return_pfn;
+		bit = bm->cur.bit;
+		bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
+		if (bit < bm_block_bits(bb))
+			goto Return_pfn;
 
-			bb = bb->next;
-			bm->cur.block = bb;
-			bm->cur.bit = 0;
-		} while (bb);
-		zone_bm = bm->cur.zone_bm->next;
-		if (zone_bm) {
-			bm->cur.zone_bm = zone_bm;
-			bm->cur.block = zone_bm->bm_blocks;
-			bm->cur.bit = 0;
-		}
-	} while (zone_bm);
+		bb = list_entry(bb->hook.next, struct bm_block, hook);
+		bm->cur.block = bb;
+		bm->cur.bit = 0;
+	} while (&bb->hook != &bm->blocks);
+
 	memory_bm_position_reset(bm);
 	return BM_END_OF_MAP;
 
@@ -808,8 +818,7 @@
  *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
  *	and it isn't a part of a free chunk of pages.
  */
-
-static struct page *saveable_highmem_page(unsigned long pfn)
+static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
 {
 	struct page *page;
 
@@ -817,6 +826,8 @@
 		return NULL;
 
 	page = pfn_to_page(pfn);
+	if (page_zone(page) != zone)
+		return NULL;
 
 	BUG_ON(!PageHighMem(page));
 
@@ -846,13 +857,16 @@
 		mark_free_pages(zone);
 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
-			if (saveable_highmem_page(pfn))
+			if (saveable_highmem_page(zone, pfn))
 				n++;
 	}
 	return n;
 }
 #else
-static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
+static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
+{
+	return NULL;
+}
 #endif /* CONFIG_HIGHMEM */
 
 /**
@@ -863,8 +877,7 @@
  *	of pages statically defined as 'unsaveable', and it isn't a part of
  *	a free chunk of pages.
  */
-
-static struct page *saveable_page(unsigned long pfn)
+static struct page *saveable_page(struct zone *zone, unsigned long pfn)
 {
 	struct page *page;
 
@@ -872,6 +885,8 @@
 		return NULL;
 
 	page = pfn_to_page(pfn);
+	if (page_zone(page) != zone)
+		return NULL;
 
 	BUG_ON(PageHighMem(page));
 
@@ -903,7 +918,7 @@
 		mark_free_pages(zone);
 		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
-			if(saveable_page(pfn))
+			if (saveable_page(zone, pfn))
 				n++;
 	}
 	return n;
@@ -944,7 +959,7 @@
 page_is_saveable(struct zone *zone, unsigned long pfn)
 {
 	return is_highmem(zone) ?
-			saveable_highmem_page(pfn) : saveable_page(pfn);
+		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
 }
 
 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
@@ -966,7 +981,7 @@
 			 * data modified by kmap_atomic()
 			 */
 			safe_copy_page(buffer, s_page);
-			dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);
+			dst = kmap_atomic(d_page, KM_USER0);
 			memcpy(dst, buffer, PAGE_SIZE);
 			kunmap_atomic(dst, KM_USER0);
 		} else {
@@ -975,7 +990,7 @@
 	}
 }
 #else
-#define page_is_saveable(zone, pfn)	saveable_page(pfn)
+#define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
 
 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
 {
@@ -1459,9 +1474,7 @@
  *	unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
  *	the corresponding bit in the memory bitmap @bm
  */
-
-static inline void
-unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
+static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
 {
 	int j;
 
@@ -1469,8 +1482,13 @@
 		if (unlikely(buf[j] == BM_END_OF_MAP))
 			break;
 
-		memory_bm_set_bit(bm, buf[j]);
+		if (memory_bm_pfn_present(bm, buf[j]))
+			memory_bm_set_bit(bm, buf[j]);
+		else
+			return -EFAULT;
 	}
+
+	return 0;
 }
 
 /* List of "safe" pages that may be used to store data loaded from the suspend
@@ -1608,7 +1626,7 @@
 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
 	if (!pbe) {
 		swsusp_free();
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 	pbe->orig_page = page;
 	if (safe_highmem_pages > 0) {
@@ -1677,7 +1695,7 @@
 static inline void *
 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
 {
-	return NULL;
+	return ERR_PTR(-EINVAL);
 }
 
 static inline void copy_last_highmem_page(void) {}
@@ -1788,8 +1806,13 @@
 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
 {
 	struct pbe *pbe;
-	struct page *page = pfn_to_page(memory_bm_next_pfn(bm));
+	struct page *page;
+	unsigned long pfn = memory_bm_next_pfn(bm);
 
+	if (pfn == BM_END_OF_MAP)
+		return ERR_PTR(-EFAULT);
+
+	page = pfn_to_page(pfn);
 	if (PageHighMem(page))
 		return get_highmem_page_buffer(page, ca);
 
@@ -1805,7 +1828,7 @@
 	pbe = chain_alloc(ca, sizeof(struct pbe));
 	if (!pbe) {
 		swsusp_free();
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 	pbe->orig_address = page_address(page);
 	pbe->address = safe_pages_list;
@@ -1868,7 +1891,10 @@
 				return error;
 
 		} else if (handle->prev <= nr_meta_pages) {
-			unpack_orig_pfns(buffer, &copy_bm);
+			error = unpack_orig_pfns(buffer, &copy_bm);
+			if (error)
+				return error;
+
 			if (handle->prev == nr_meta_pages) {
 				error = prepare_image(&orig_bm, &copy_bm);
 				if (error)
@@ -1879,12 +1905,14 @@
 				restore_pblist = NULL;
 				handle->buffer = get_buffer(&orig_bm, &ca);
 				handle->sync_read = 0;
-				if (!handle->buffer)
-					return -ENOMEM;
+				if (IS_ERR(handle->buffer))
+					return PTR_ERR(handle->buffer);
 			}
 		} else {
 			copy_last_highmem_page();
 			handle->buffer = get_buffer(&orig_bm, &ca);
+			if (IS_ERR(handle->buffer))
+				return PTR_ERR(handle->buffer);
 			if (handle->buffer != buffer)
 				handle->sync_read = 0;
 		}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 023ff2a..a92c914 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -262,3 +262,125 @@
 
 	return 0;
 }
+
+/*
+ * Platforms, like ACPI, may want us to save some memory used by them during
+ * hibernation and to restore the contents of this memory during the subsequent
+ * resume.  The code below implements a mechanism allowing us to do that.
+ */
+
+struct nvs_page {
+	unsigned long phys_start;
+	unsigned int size;
+	void *kaddr;
+	void *data;
+	struct list_head node;
+};
+
+static LIST_HEAD(nvs_list);
+
+/**
+ *	hibernate_nvs_register - register platform NVS memory region to save
+ *	@start - physical address of the region
+ *	@size - size of the region
+ *
+ *	The NVS region need not be page-aligned (both ends) and we arrange
+ *	things so that the data from page-aligned addresses in this region will
+ *	be copied into separate RAM pages.
+ */
+int hibernate_nvs_register(unsigned long start, unsigned long size)
+{
+	struct nvs_page *entry, *next;
+
+	while (size > 0) {
+		unsigned int nr_bytes;
+
+		entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
+		if (!entry)
+			goto Error;
+
+		list_add_tail(&entry->node, &nvs_list);
+		entry->phys_start = start;
+		nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
+		entry->size = (size < nr_bytes) ? size : nr_bytes;
+
+		start += entry->size;
+		size -= entry->size;
+	}
+	return 0;
+
+ Error:
+	list_for_each_entry_safe(entry, next, &nvs_list, node) {
+		list_del(&entry->node);
+		kfree(entry);
+	}
+	return -ENOMEM;
+}
+
+/**
+ *	hibernate_nvs_free - free data pages allocated for saving NVS regions
+ */
+void hibernate_nvs_free(void)
+{
+	struct nvs_page *entry;
+
+	list_for_each_entry(entry, &nvs_list, node)
+		if (entry->data) {
+			free_page((unsigned long)entry->data);
+			entry->data = NULL;
+			if (entry->kaddr) {
+				iounmap(entry->kaddr);
+				entry->kaddr = NULL;
+			}
+		}
+}
+
+/**
+ *	hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
+ */
+int hibernate_nvs_alloc(void)
+{
+	struct nvs_page *entry;
+
+	list_for_each_entry(entry, &nvs_list, node) {
+		entry->data = (void *)__get_free_page(GFP_KERNEL);
+		if (!entry->data) {
+			hibernate_nvs_free();
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+/**
+ *	hibernate_nvs_save - save NVS memory regions
+ */
+void hibernate_nvs_save(void)
+{
+	struct nvs_page *entry;
+
+	printk(KERN_INFO "PM: Saving platform NVS memory\n");
+
+	list_for_each_entry(entry, &nvs_list, node)
+		if (entry->data) {
+			entry->kaddr = ioremap(entry->phys_start, entry->size);
+			memcpy(entry->data, entry->kaddr, entry->size);
+		}
+}
+
+/**
+ *	hibernate_nvs_restore - restore NVS memory regions
+ *
+ *	This function is going to be called with interrupts disabled, so it
+ *	cannot iounmap the virtual addresses used to access the NVS region.
+ */
+void hibernate_nvs_restore(void)
+{
+	struct nvs_page *entry;
+
+	printk(KERN_INFO "PM: Restoring platform NVS memory\n");
+
+	list_for_each_entry(entry, &nvs_list, node)
+		if (entry->data)
+			memcpy(entry->kaddr, entry->data, entry->size);
+}
diff --git a/kernel/printk.c b/kernel/printk.c
index e651ab0..69188f2 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -382,7 +382,7 @@
 	return error;
 }
 
-asmlinkage long sys_syslog(int type, char __user *buf, int len)
+SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
 {
 	return do_syslog(type, buf, len);
 }
@@ -619,7 +619,7 @@
 static const char recursion_bug_msg [] =
 		KERN_CRIT "BUG: recent printk recursion!\n";
 static int recursion_bug;
-	static int new_text_line = 1;
+static int new_text_line = 1;
 static char printk_buf[1024];
 
 asmlinkage int vprintk(const char *fmt, va_list args)
@@ -742,11 +742,6 @@
 
 #else
 
-asmlinkage long sys_syslog(int type, char __user *buf, int len)
-{
-	return -ENOSYS;
-}
-
 static void call_console_drivers(unsigned start, unsigned end)
 {
 }
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 29dc700..c9cf48b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -574,7 +574,7 @@
 #define arch_ptrace_attach(child)	do { } while (0)
 #endif
 
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
 {
 	struct task_struct *child;
 	long ret;
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 1cff28d..7c4142a 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -136,29 +136,47 @@
 #endif
 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
 
-#define FULLSTOP_SHUTDOWN 1	/* Bail due to system shutdown/panic. */
-#define FULLSTOP_CLEANUP  2	/* Orderly shutdown. */
-static int fullstop;		/* stop generating callbacks at test end. */
-DEFINE_MUTEX(fullstop_mutex);	/* protect fullstop transitions and */
-				/*  spawning of kthreads. */
+/* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
+
+#define FULLSTOP_DONTSTOP 0	/* Normal operation. */
+#define FULLSTOP_SHUTDOWN 1	/* System shutdown with rcutorture running. */
+#define FULLSTOP_RMMOD    2	/* Normal rmmod of rcutorture. */
+static int fullstop = FULLSTOP_RMMOD;
+DEFINE_MUTEX(fullstop_mutex);	/* Protect fullstop transitions and spawning */
+				/*  of kthreads. */
 
 /*
- * Detect and respond to a signal-based shutdown.
+ * Detect and respond to a system shutdown.
  */
 static int
 rcutorture_shutdown_notify(struct notifier_block *unused1,
 			   unsigned long unused2, void *unused3)
 {
-	if (fullstop)
-		return NOTIFY_DONE;
 	mutex_lock(&fullstop_mutex);
-	if (!fullstop)
+	if (fullstop == FULLSTOP_DONTSTOP)
 		fullstop = FULLSTOP_SHUTDOWN;
+	else
+		printk(KERN_WARNING /* but going down anyway, so... */
+		       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
 	mutex_unlock(&fullstop_mutex);
 	return NOTIFY_DONE;
 }
 
 /*
+ * Absorb kthreads into a kernel function that won't return, so that
+ * they won't ever access module text or data again.
+ */
+static void rcutorture_shutdown_absorb(char *title)
+{
+	if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
+		printk(KERN_NOTICE
+		       "rcutorture thread %s parking due to system shutdown\n",
+		       title);
+		schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
+	}
+}
+
+/*
  * Allocate an element from the rcu_tortures pool.
  */
 static struct rcu_torture *
@@ -219,13 +237,14 @@
 }
 
 static void
-rcu_stutter_wait(void)
+rcu_stutter_wait(char *title)
 {
-	while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) {
+	while (stutter_pause_test || !rcutorture_runnable) {
 		if (rcutorture_runnable)
 			schedule_timeout_interruptible(1);
 		else
 			schedule_timeout_interruptible(round_jiffies_relative(HZ));
+		rcutorture_shutdown_absorb(title);
 	}
 }
 
@@ -287,7 +306,7 @@
 	int i;
 	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
 
-	if (fullstop) {
+	if (fullstop != FULLSTOP_DONTSTOP) {
 		/* Test is ending, just drop callbacks on the floor. */
 		/* The next initialization will pick up the pieces. */
 		return;
@@ -619,10 +638,11 @@
 		}
 		rcu_torture_current_version++;
 		oldbatch = cur_ops->completed();
-		rcu_stutter_wait();
-	} while (!kthread_should_stop() && !fullstop);
+		rcu_stutter_wait("rcu_torture_writer");
+	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 	VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
-	while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
+	rcutorture_shutdown_absorb("rcu_torture_writer");
+	while (!kthread_should_stop())
 		schedule_timeout_uninterruptible(1);
 	return 0;
 }
@@ -643,11 +663,12 @@
 		schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
 		udelay(rcu_random(&rand) & 0x3ff);
 		cur_ops->sync();
-		rcu_stutter_wait();
-	} while (!kthread_should_stop() && !fullstop);
+		rcu_stutter_wait("rcu_torture_fakewriter");
+	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 
 	VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
-	while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
+	rcutorture_shutdown_absorb("rcu_torture_fakewriter");
+	while (!kthread_should_stop())
 		schedule_timeout_uninterruptible(1);
 	return 0;
 }
@@ -752,12 +773,13 @@
 		preempt_enable();
 		cur_ops->readunlock(idx);
 		schedule();
-		rcu_stutter_wait();
-	} while (!kthread_should_stop() && !fullstop);
+		rcu_stutter_wait("rcu_torture_reader");
+	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 	VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
+	rcutorture_shutdown_absorb("rcu_torture_reader");
 	if (irqreader && cur_ops->irqcapable)
 		del_timer_sync(&t);
-	while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
+	while (!kthread_should_stop())
 		schedule_timeout_uninterruptible(1);
 	return 0;
 }
@@ -854,7 +876,8 @@
 	do {
 		schedule_timeout_interruptible(stat_interval * HZ);
 		rcu_torture_stats_print();
-	} while (!kthread_should_stop() && !fullstop);
+		rcutorture_shutdown_absorb("rcu_torture_stats");
+	} while (!kthread_should_stop());
 	VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
 	return 0;
 }
@@ -866,52 +889,49 @@
  */
 static void rcu_torture_shuffle_tasks(void)
 {
-	cpumask_var_t tmp_mask;
+	cpumask_t tmp_mask;
 	int i;
 
-	if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
-		BUG();
-
-	cpumask_setall(tmp_mask);
+	cpus_setall(tmp_mask);
 	get_online_cpus();
 
 	/* No point in shuffling if there is only one online CPU (ex: UP) */
-	if (num_online_cpus() == 1)
-		goto out;
+	if (num_online_cpus() == 1) {
+		put_online_cpus();
+		return;
+	}
 
 	if (rcu_idle_cpu != -1)
-		cpumask_clear_cpu(rcu_idle_cpu, tmp_mask);
+		cpu_clear(rcu_idle_cpu, tmp_mask);
 
-	set_cpus_allowed_ptr(current, tmp_mask);
+	set_cpus_allowed_ptr(current, &tmp_mask);
 
 	if (reader_tasks) {
 		for (i = 0; i < nrealreaders; i++)
 			if (reader_tasks[i])
 				set_cpus_allowed_ptr(reader_tasks[i],
-						     tmp_mask);
+						     &tmp_mask);
 	}
 
 	if (fakewriter_tasks) {
 		for (i = 0; i < nfakewriters; i++)
 			if (fakewriter_tasks[i])
 				set_cpus_allowed_ptr(fakewriter_tasks[i],
-						     tmp_mask);
+						     &tmp_mask);
 	}
 
 	if (writer_task)
-		set_cpus_allowed_ptr(writer_task, tmp_mask);
+		set_cpus_allowed_ptr(writer_task, &tmp_mask);
 
 	if (stats_task)
-		set_cpus_allowed_ptr(stats_task, tmp_mask);
+		set_cpus_allowed_ptr(stats_task, &tmp_mask);
 
 	if (rcu_idle_cpu == -1)
 		rcu_idle_cpu = num_online_cpus() - 1;
 	else
 		rcu_idle_cpu--;
 
-out:
 	put_online_cpus();
-	free_cpumask_var(tmp_mask);
 }
 
 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
@@ -925,7 +945,8 @@
 	do {
 		schedule_timeout_interruptible(shuffle_interval * HZ);
 		rcu_torture_shuffle_tasks();
-	} while (!kthread_should_stop() && !fullstop);
+		rcutorture_shutdown_absorb("rcu_torture_shuffle");
+	} while (!kthread_should_stop());
 	VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
 	return 0;
 }
@@ -940,10 +961,11 @@
 	do {
 		schedule_timeout_interruptible(stutter * HZ);
 		stutter_pause_test = 1;
-		if (!kthread_should_stop() && !fullstop)
+		if (!kthread_should_stop())
 			schedule_timeout_interruptible(stutter * HZ);
 		stutter_pause_test = 0;
-	} while (!kthread_should_stop() && !fullstop);
+		rcutorture_shutdown_absorb("rcu_torture_stutter");
+	} while (!kthread_should_stop());
 	VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
 	return 0;
 }
@@ -970,15 +992,16 @@
 	int i;
 
 	mutex_lock(&fullstop_mutex);
-	if (!fullstop) {
-		/* If being signaled, let it happen, then exit. */
+	if (fullstop == FULLSTOP_SHUTDOWN) {
+		printk(KERN_WARNING /* but going down anyway, so... */
+		       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
 		mutex_unlock(&fullstop_mutex);
-		schedule_timeout_interruptible(10 * HZ);
+		schedule_timeout_uninterruptible(10);
 		if (cur_ops->cb_barrier != NULL)
 			cur_ops->cb_barrier();
 		return;
 	}
-	fullstop = FULLSTOP_CLEANUP;
+	fullstop = FULLSTOP_RMMOD;
 	mutex_unlock(&fullstop_mutex);
 	unregister_reboot_notifier(&rcutorture_nb);
 	if (stutter_task) {
@@ -1078,7 +1101,7 @@
 	else
 		nrealreaders = 2 * num_online_cpus();
 	rcu_torture_print_module_parms("Start of test");
-	fullstop = 0;
+	fullstop = FULLSTOP_DONTSTOP;
 
 	/* Set up the freelist. */
 
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index f275c8e..bf8e753 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -15,10 +15,11 @@
 #include <linux/uaccess.h>
 #include <linux/mm.h>
 
-void res_counter_init(struct res_counter *counter)
+void res_counter_init(struct res_counter *counter, struct res_counter *parent)
 {
 	spin_lock_init(&counter->lock);
 	counter->limit = (unsigned long long)LLONG_MAX;
+	counter->parent = parent;
 }
 
 int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
@@ -34,14 +35,34 @@
 	return 0;
 }
 
-int res_counter_charge(struct res_counter *counter, unsigned long val)
+int res_counter_charge(struct res_counter *counter, unsigned long val,
+			struct res_counter **limit_fail_at)
 {
 	int ret;
 	unsigned long flags;
+	struct res_counter *c, *u;
 
-	spin_lock_irqsave(&counter->lock, flags);
-	ret = res_counter_charge_locked(counter, val);
-	spin_unlock_irqrestore(&counter->lock, flags);
+	*limit_fail_at = NULL;
+	local_irq_save(flags);
+	for (c = counter; c != NULL; c = c->parent) {
+		spin_lock(&c->lock);
+		ret = res_counter_charge_locked(c, val);
+		spin_unlock(&c->lock);
+		if (ret < 0) {
+			*limit_fail_at = c;
+			goto undo;
+		}
+	}
+	ret = 0;
+	goto done;
+undo:
+	for (u = counter; u != c; u = u->parent) {
+		spin_lock(&u->lock);
+		res_counter_uncharge_locked(u, val);
+		spin_unlock(&u->lock);
+	}
+done:
+	local_irq_restore(flags);
 	return ret;
 }
 
@@ -56,10 +77,15 @@
 void res_counter_uncharge(struct res_counter *counter, unsigned long val)
 {
 	unsigned long flags;
+	struct res_counter *c;
 
-	spin_lock_irqsave(&counter->lock, flags);
-	res_counter_uncharge_locked(counter, val);
-	spin_unlock_irqrestore(&counter->lock, flags);
+	local_irq_save(flags);
+	for (c = counter; c != NULL; c = c->parent) {
+		spin_lock(&c->lock);
+		res_counter_uncharge_locked(c, val);
+		spin_unlock(&c->lock);
+	}
+	local_irq_restore(flags);
 }
 
 
diff --git a/kernel/resource.c b/kernel/resource.c
index e633106..fd5d7d5 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -620,10 +620,11 @@
  * @start: resource start address
  * @n: resource region size
  * @name: reserving caller's ID string
+ * @flags: IO resource flags
  */
 struct resource * __request_region(struct resource *parent,
 				   resource_size_t start, resource_size_t n,
-				   const char *name)
+				   const char *name, int flags)
 {
 	struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
 
@@ -634,6 +635,7 @@
 	res->start = start;
 	res->end = start + n - 1;
 	res->flags = IORESOURCE_BUSY;
+	res->flags |= flags;
 
 	write_lock(&resource_lock);
 
@@ -679,7 +681,7 @@
 {
 	struct resource * res;
 
-	res = __request_region(parent, start, n, "check-region");
+	res = __request_region(parent, start, n, "check-region", 0);
 	if (!res)
 		return -EBUSY;
 
@@ -776,7 +778,7 @@
 	dr->start = start;
 	dr->n = n;
 
-	res = __request_region(parent, start, n, name);
+	res = __request_region(parent, start, n, name, 0);
 	if (res)
 		devres_add(dev, dr);
 	else
@@ -876,3 +878,57 @@
 
 	return err;
 }
+
+#ifdef CONFIG_STRICT_DEVMEM
+static int strict_iomem_checks = 1;
+#else
+static int strict_iomem_checks;
+#endif
+
+/*
+ * check if an address is reserved in the iomem resource tree
+ * returns 1 if reserved, 0 if not reserved.
+ */
+int iomem_is_exclusive(u64 addr)
+{
+	struct resource *p = &iomem_resource;
+	int err = 0;
+	loff_t l;
+	int size = PAGE_SIZE;
+
+	if (!strict_iomem_checks)
+		return 0;
+
+	addr = addr & PAGE_MASK;
+
+	read_lock(&resource_lock);
+	for (p = p->child; p ; p = r_next(NULL, p, &l)) {
+		/*
+		 * We can probably skip the resources without
+		 * IORESOURCE_IO attribute?
+		 */
+		if (p->start >= addr + size)
+			break;
+		if (p->end < addr)
+			continue;
+		if (p->flags & IORESOURCE_BUSY &&
+		     p->flags & IORESOURCE_EXCLUSIVE) {
+			err = 1;
+			break;
+		}
+	}
+	read_unlock(&resource_lock);
+
+	return err;
+}
+
+static int __init strict_iomem(char *str)
+{
+	if (strstr(str, "relaxed"))
+		strict_iomem_checks = 0;
+	if (strstr(str, "strict"))
+		strict_iomem_checks = 1;
+	return 1;
+}
+
+__setup("iomem=", strict_iomem);
diff --git a/kernel/sched.c b/kernel/sched.c
index 2e3545f..52bbf1c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -125,6 +125,9 @@
 DEFINE_TRACE(sched_migrate_task);
 
 #ifdef CONFIG_SMP
+
+static void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
 /*
  * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
  * Since cpu_power is a 'constant', we can use a reciprocal divide.
@@ -1320,8 +1323,8 @@
  * slice expiry etc.
  */
 
-#define WEIGHT_IDLEPRIO		2
-#define WMULT_IDLEPRIO		(1 << 31)
+#define WEIGHT_IDLEPRIO                3
+#define WMULT_IDLEPRIO         1431655765
 
 /*
  * Nice levels are multiplicative, with a gentle 10% change for every
@@ -3728,8 +3731,13 @@
 		}
 
 		double_unlock_balance(this_rq, busiest);
+		/*
+		 * Should not call ttwu while holding a rq->lock
+		 */
+		spin_unlock(&this_rq->lock);
 		if (active_balance)
 			wake_up_process(busiest->migration_thread);
+		spin_lock(&this_rq->lock);
 
 	} else
 		sd->nr_balance_failed = 0;
@@ -4432,7 +4440,7 @@
 	/*
 	 * Underflow?
 	 */
-       if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
+	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
 		return;
 	/*
 	 * Is the spinlock portion underflowing?
@@ -5118,7 +5126,7 @@
  * sys_setpriority is a more generic, but much slower function that
  * does similar things.
  */
-asmlinkage long sys_nice(int increment)
+SYSCALL_DEFINE1(nice, int, increment)
 {
 	long nice, retval;
 
@@ -5425,8 +5433,8 @@
  * @policy: new policy.
  * @param: structure containing the new RT priority.
  */
-asmlinkage long
-sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
+		struct sched_param __user *, param)
 {
 	/* negative values for policy are not valid */
 	if (policy < 0)
@@ -5440,7 +5448,7 @@
  * @pid: the pid in question.
  * @param: structure containing the new RT priority.
  */
-asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
 {
 	return do_sched_setscheduler(pid, -1, param);
 }
@@ -5449,7 +5457,7 @@
  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
  * @pid: the pid in question.
  */
-asmlinkage long sys_sched_getscheduler(pid_t pid)
+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
 {
 	struct task_struct *p;
 	int retval;
@@ -5474,7 +5482,7 @@
  * @pid: the pid in question.
  * @param: structure containing the RT priority.
  */
-asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
 {
 	struct sched_param lp;
 	struct task_struct *p;
@@ -5592,8 +5600,8 @@
  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  * @user_mask_ptr: user-space pointer to the new cpu mask
  */
-asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
-				      unsigned long __user *user_mask_ptr)
+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
+		unsigned long __user *, user_mask_ptr)
 {
 	cpumask_var_t new_mask;
 	int retval;
@@ -5640,8 +5648,8 @@
  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  * @user_mask_ptr: user-space pointer to hold the current cpu mask
  */
-asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
-				      unsigned long __user *user_mask_ptr)
+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+		unsigned long __user *, user_mask_ptr)
 {
 	int ret;
 	cpumask_var_t mask;
@@ -5670,7 +5678,7 @@
  * This function yields the current CPU to other tasks. If there are no
  * other threads running on this CPU then this function will return.
  */
-asmlinkage long sys_sched_yield(void)
+SYSCALL_DEFINE0(sched_yield)
 {
 	struct rq *rq = this_rq_lock();
 
@@ -5811,7 +5819,7 @@
  * this syscall returns the maximum rt_priority that can be used
  * by a given scheduling class.
  */
-asmlinkage long sys_sched_get_priority_max(int policy)
+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
 {
 	int ret = -EINVAL;
 
@@ -5836,7 +5844,7 @@
  * this syscall returns the minimum rt_priority that can be used
  * by a given scheduling class.
  */
-asmlinkage long sys_sched_get_priority_min(int policy)
+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
 {
 	int ret = -EINVAL;
 
@@ -5861,8 +5869,8 @@
  * this syscall writes the default timeslice value of a given process
  * into the user-space timespec buffer. A value of '0' means infinity.
  */
-asmlinkage
-long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
+		struct timespec __user *, interval)
 {
 	struct task_struct *p;
 	unsigned int time_slice;
@@ -7277,10 +7285,10 @@
  * groups, so roll our own. Now each node has its own list of groups which
  * gets dynamically allocated.
  */
-static DEFINE_PER_CPU(struct sched_domain, node_domains);
+static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
 static struct sched_group ***sched_group_nodes_bycpu;
 
-static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
+static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
 static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
 
 static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
@@ -7555,7 +7563,7 @@
 #ifdef CONFIG_NUMA
 		if (cpumask_weight(cpu_map) >
 				SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
-			sd = &per_cpu(allnodes_domains, i);
+			sd = &per_cpu(allnodes_domains, i).sd;
 			SD_INIT(sd, ALLNODES);
 			set_domain_attribute(sd, attr);
 			cpumask_copy(sched_domain_span(sd), cpu_map);
@@ -7565,7 +7573,7 @@
 		} else
 			p = NULL;
 
-		sd = &per_cpu(node_domains, i);
+		sd = &per_cpu(node_domains, i).sd;
 		SD_INIT(sd, NODE);
 		set_domain_attribute(sd, attr);
 		sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
@@ -7683,7 +7691,7 @@
 		for_each_cpu(j, nodemask) {
 			struct sched_domain *sd;
 
-			sd = &per_cpu(node_domains, j);
+			sd = &per_cpu(node_domains, j).sd;
 			sd->groups = sg;
 		}
 		sg->__cpu_power = 0;
@@ -9042,6 +9050,13 @@
 		runtime = d->rt_runtime;
 	}
 
+#ifdef CONFIG_USER_SCHED
+	if (tg == &root_task_group) {
+		period = global_rt_period();
+		runtime = global_rt_runtime();
+	}
+#endif
+
 	/*
 	 * Cannot have more runtime than the period.
 	 */
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 4293cfa..16eeba4e 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -145,6 +145,19 @@
 	read_unlock_irqrestore(&tasklist_lock, flags);
 }
 
+#if defined(CONFIG_CGROUP_SCHED) && \
+	(defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
+static void task_group_path(struct task_group *tg, char *buf, int buflen)
+{
+	/* may be NULL if the underlying cgroup isn't fully-created yet */
+	if (!tg->css.cgroup) {
+		buf[0] = '\0';
+		return;
+	}
+	cgroup_path(tg->css.cgroup, buf, buflen);
+}
+#endif
+
 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 {
 	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
@@ -154,10 +167,10 @@
 	unsigned long flags;
 
 #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
-	char path[128] = "";
+	char path[128];
 	struct task_group *tg = cfs_rq->tg;
 
-	cgroup_path(tg->css.cgroup, path, sizeof(path));
+	task_group_path(tg, path, sizeof(path));
 
 	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
 #elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
@@ -208,10 +221,10 @@
 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
 {
 #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
-	char path[128] = "";
+	char path[128];
 	struct task_group *tg = rt_rq->tg;
 
-	cgroup_path(tg->css.cgroup, path, sizeof(path));
+	task_group_path(tg, path, sizeof(path));
 
 	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path);
 #else
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e0c0b4b..5cc1c16 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -283,7 +283,7 @@
 						   struct sched_entity,
 						   run_node);
 
-		if (vruntime == cfs_rq->min_vruntime)
+		if (!cfs_rq->curr)
 			vruntime = se->vruntime;
 		else
 			vruntime = min_vruntime(vruntime, se->vruntime);
@@ -429,7 +429,10 @@
 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
 
 	for_each_sched_entity(se) {
-		struct load_weight *load = &cfs_rq->load;
+		struct load_weight *load;
+
+		cfs_rq = cfs_rq_of(se);
+		load = &cfs_rq->load;
 
 		if (unlikely(!se->on_rq)) {
 			struct load_weight lw = cfs_rq->load;
@@ -677,9 +680,13 @@
 			unsigned long thresh = sysctl_sched_latency;
 
 			/*
-			 * convert the sleeper threshold into virtual time
+			 * Convert the sleeper threshold into virtual time.
+			 * SCHED_IDLE is a special sub-class.  We care about
+			 * fairness only relative to other SCHED_IDLE tasks,
+			 * all of which have the same weight.
 			 */
-			if (sched_feat(NORMALIZED_SLEEPER))
+			if (sched_feat(NORMALIZED_SLEEPER) &&
+					task_of(se)->policy != SCHED_IDLE)
 				thresh = calc_delta_fair(thresh, se);
 
 			vruntime -= thresh;
@@ -1340,14 +1347,18 @@
 
 static void set_last_buddy(struct sched_entity *se)
 {
-	for_each_sched_entity(se)
-		cfs_rq_of(se)->last = se;
+	if (likely(task_of(se)->policy != SCHED_IDLE)) {
+		for_each_sched_entity(se)
+			cfs_rq_of(se)->last = se;
+	}
 }
 
 static void set_next_buddy(struct sched_entity *se)
 {
-	for_each_sched_entity(se)
-		cfs_rq_of(se)->next = se;
+	if (likely(task_of(se)->policy != SCHED_IDLE)) {
+		for_each_sched_entity(se)
+			cfs_rq_of(se)->next = se;
+	}
 }
 
 /*
@@ -1393,12 +1404,18 @@
 		return;
 
 	/*
-	 * Batch tasks do not preempt (their preemption is driven by
+	 * Batch and idle tasks do not preempt (their preemption is driven by
 	 * the tick):
 	 */
-	if (unlikely(p->policy == SCHED_BATCH))
+	if (unlikely(p->policy != SCHED_NORMAL))
 		return;
 
+	/* Idle tasks are by definition preempted by everybody. */
+	if (unlikely(curr->policy == SCHED_IDLE)) {
+		resched_task(curr);
+		return;
+	}
+
 	if (!sched_feat(WAKEUP_PREEMPT))
 		return;
 
@@ -1617,8 +1634,6 @@
 	}
 }
 
-#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
-
 /*
  * Share the fairness runtime between parent and child, thus the
  * total amount of pressure for CPU stays equal - new tasks
diff --git a/kernel/signal.c b/kernel/signal.c
index 3152ac3..e737597 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1961,7 +1961,7 @@
  * System call entry points.
  */
 
-asmlinkage long sys_restart_syscall(void)
+SYSCALL_DEFINE0(restart_syscall)
 {
 	struct restart_block *restart = &current_thread_info()->restart_block;
 	return restart->fn(restart);
@@ -2014,8 +2014,8 @@
 	return error;
 }
 
-asmlinkage long
-sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
+SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
+		sigset_t __user *, oset, size_t, sigsetsize)
 {
 	int error = -EINVAL;
 	sigset_t old_set, new_set;
@@ -2074,8 +2074,7 @@
 	return error;
 }	
 
-asmlinkage long
-sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
+SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
 {
 	return do_sigpending(set, sigsetsize);
 }
@@ -2146,11 +2145,9 @@
 
 #endif
 
-asmlinkage long
-sys_rt_sigtimedwait(const sigset_t __user *uthese,
-		    siginfo_t __user *uinfo,
-		    const struct timespec __user *uts,
-		    size_t sigsetsize)
+SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
+		siginfo_t __user *, uinfo, const struct timespec __user *, uts,
+		size_t, sigsetsize)
 {
 	int ret, sig;
 	sigset_t these;
@@ -2223,8 +2220,7 @@
 	return ret;
 }
 
-asmlinkage long
-sys_kill(pid_t pid, int sig)
+SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
 {
 	struct siginfo info;
 
@@ -2283,7 +2279,7 @@
  *  exists but it's not belonging to the target process anymore. This
  *  method solves the problem of threads exiting and PIDs getting reused.
  */
-asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
+SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
 {
 	/* This is only valid for single tasks */
 	if (pid <= 0 || tgid <= 0)
@@ -2295,8 +2291,7 @@
 /*
  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
  */
-asmlinkage long
-sys_tkill(pid_t pid, int sig)
+SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
 {
 	/* This is only valid for single tasks */
 	if (pid <= 0)
@@ -2305,8 +2300,8 @@
 	return do_tkill(0, pid, sig);
 }
 
-asmlinkage long
-sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
+SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
+		siginfo_t __user *, uinfo)
 {
 	siginfo_t info;
 
@@ -2434,8 +2429,7 @@
 
 #ifdef __ARCH_WANT_SYS_SIGPENDING
 
-asmlinkage long
-sys_sigpending(old_sigset_t __user *set)
+SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
 {
 	return do_sigpending(set, sizeof(*set));
 }
@@ -2446,8 +2440,8 @@
 /* Some platforms have their own version with special arguments others
    support only sys_rt_sigprocmask.  */
 
-asmlinkage long
-sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
+SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
+		old_sigset_t __user *, oset)
 {
 	int error;
 	old_sigset_t old_set, new_set;
@@ -2497,11 +2491,10 @@
 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
 
 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
-asmlinkage long
-sys_rt_sigaction(int sig,
-		 const struct sigaction __user *act,
-		 struct sigaction __user *oact,
-		 size_t sigsetsize)
+SYSCALL_DEFINE4(rt_sigaction, int, sig,
+		const struct sigaction __user *, act,
+		struct sigaction __user *, oact,
+		size_t, sigsetsize)
 {
 	struct k_sigaction new_sa, old_sa;
 	int ret = -EINVAL;
@@ -2531,15 +2524,13 @@
 /*
  * For backwards compatibility.  Functionality superseded by sigprocmask.
  */
-asmlinkage long
-sys_sgetmask(void)
+SYSCALL_DEFINE0(sgetmask)
 {
 	/* SMP safe */
 	return current->blocked.sig[0];
 }
 
-asmlinkage long
-sys_ssetmask(int newmask)
+SYSCALL_DEFINE1(ssetmask, int, newmask)
 {
 	int old;
 
@@ -2559,8 +2550,7 @@
 /*
  * For backwards compatibility.  Functionality superseded by sigaction.
  */
-asmlinkage unsigned long
-sys_signal(int sig, __sighandler_t handler)
+SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
 {
 	struct k_sigaction new_sa, old_sa;
 	int ret;
@@ -2577,8 +2567,7 @@
 
 #ifdef __ARCH_WANT_SYS_PAUSE
 
-asmlinkage long
-sys_pause(void)
+SYSCALL_DEFINE0(pause)
 {
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -2588,7 +2577,7 @@
 #endif
 
 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
-asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
+SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
 {
 	sigset_t newset;
 
diff --git a/kernel/sys.c b/kernel/sys.c
index 763c3c1..e7dc0e1 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -143,7 +143,7 @@
 	return error;
 }
 
-asmlinkage long sys_setpriority(int which, int who, int niceval)
+SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
 {
 	struct task_struct *g, *p;
 	struct user_struct *user;
@@ -208,7 +208,7 @@
  * has been offset by 20 (ie it returns 40..1 instead of -20..19)
  * to stay compatible.
  */
-asmlinkage long sys_getpriority(int which, int who)
+SYSCALL_DEFINE2(getpriority, int, which, int, who)
 {
 	struct task_struct *g, *p;
 	struct user_struct *user;
@@ -355,7 +355,8 @@
  *
  * reboot doesn't sync: do that yourself before calling this.
  */
-asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
+SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
+		void __user *, arg)
 {
 	char buffer[256];
 
@@ -478,7 +479,7 @@
  * SMP: There are not races, the GIDs are checked only by filesystem
  *      operations (as far as semantic preservation is concerned).
  */
-asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
+SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
 {
 	const struct cred *old;
 	struct cred *new;
@@ -529,7 +530,7 @@
  *
  * SMP: Same implicit races as above.
  */
-asmlinkage long sys_setgid(gid_t gid)
+SYSCALL_DEFINE1(setgid, gid_t, gid)
 {
 	const struct cred *old;
 	struct cred *new;
@@ -597,7 +598,7 @@
  * 100% compatible with BSD.  A program which uses just setuid() will be
  * 100% compatible with POSIX with saved IDs. 
  */
-asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
+SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
 {
 	const struct cred *old;
 	struct cred *new;
@@ -661,7 +662,7 @@
  * will allow a root program to temporarily drop privileges and be able to
  * regain them by swapping the real and effective uid.  
  */
-asmlinkage long sys_setuid(uid_t uid)
+SYSCALL_DEFINE1(setuid, uid_t, uid)
 {
 	const struct cred *old;
 	struct cred *new;
@@ -705,7 +706,7 @@
  * This function implements a generic ability to update ruid, euid,
  * and suid.  This allows you to implement the 4.4 compatible seteuid().
  */
-asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
+SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
 {
 	const struct cred *old;
 	struct cred *new;
@@ -756,7 +757,7 @@
 	return retval;
 }
 
-asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
+SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid)
 {
 	const struct cred *cred = current_cred();
 	int retval;
@@ -771,7 +772,7 @@
 /*
  * Same as above, but for rgid, egid, sgid.
  */
-asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
+SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
 {
 	const struct cred *old;
 	struct cred *new;
@@ -814,7 +815,7 @@
 	return retval;
 }
 
-asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
+SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid)
 {
 	const struct cred *cred = current_cred();
 	int retval;
@@ -833,7 +834,7 @@
  * whatever uid it wants to). It normally shadows "euid", except when
  * explicitly set by setfsuid() or for access..
  */
-asmlinkage long sys_setfsuid(uid_t uid)
+SYSCALL_DEFINE1(setfsuid, uid_t, uid)
 {
 	const struct cred *old;
 	struct cred *new;
@@ -870,7 +871,7 @@
 /*
  * Samma på svenska..
  */
-asmlinkage long sys_setfsgid(gid_t gid)
+SYSCALL_DEFINE1(setfsgid, gid_t, gid)
 {
 	const struct cred *old;
 	struct cred *new;
@@ -919,7 +920,7 @@
 	tms->tms_cstime = cputime_to_clock_t(cstime);
 }
 
-asmlinkage long sys_times(struct tms __user * tbuf)
+SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
 {
 	if (tbuf) {
 		struct tms tmp;
@@ -944,7 +945,7 @@
  * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
  * LBT 04.03.94
  */
-asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
+SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
 {
 	struct task_struct *p;
 	struct task_struct *group_leader = current->group_leader;
@@ -1015,7 +1016,7 @@
 	return err;
 }
 
-asmlinkage long sys_getpgid(pid_t pid)
+SYSCALL_DEFINE1(getpgid, pid_t, pid)
 {
 	struct task_struct *p;
 	struct pid *grp;
@@ -1045,14 +1046,14 @@
 
 #ifdef __ARCH_WANT_SYS_GETPGRP
 
-asmlinkage long sys_getpgrp(void)
+SYSCALL_DEFINE0(getpgrp)
 {
 	return sys_getpgid(0);
 }
 
 #endif
 
-asmlinkage long sys_getsid(pid_t pid)
+SYSCALL_DEFINE1(getsid, pid_t, pid)
 {
 	struct task_struct *p;
 	struct pid *sid;
@@ -1080,7 +1081,7 @@
 	return retval;
 }
 
-asmlinkage long sys_setsid(void)
+SYSCALL_DEFINE0(setsid)
 {
 	struct task_struct *group_leader = current->group_leader;
 	struct pid *sid = task_pid(group_leader);
@@ -1311,7 +1312,7 @@
 
 EXPORT_SYMBOL(set_current_groups);
 
-asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
+SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
 {
 	const struct cred *cred = current_cred();
 	int i;
@@ -1340,7 +1341,7 @@
  *	without another task interfering.
  */
  
-asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
+SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
 {
 	struct group_info *group_info;
 	int retval;
@@ -1394,7 +1395,7 @@
 
 DECLARE_RWSEM(uts_sem);
 
-asmlinkage long sys_newuname(struct new_utsname __user * name)
+SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
 {
 	int errno = 0;
 
@@ -1405,7 +1406,7 @@
 	return errno;
 }
 
-asmlinkage long sys_sethostname(char __user *name, int len)
+SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
 {
 	int errno;
 	char tmp[__NEW_UTS_LEN];
@@ -1429,7 +1430,7 @@
 
 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
 
-asmlinkage long sys_gethostname(char __user *name, int len)
+SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
 {
 	int i, errno;
 	struct new_utsname *u;
@@ -1454,7 +1455,7 @@
  * Only setdomainname; getdomainname can be implemented by calling
  * uname()
  */
-asmlinkage long sys_setdomainname(char __user *name, int len)
+SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
 {
 	int errno;
 	char tmp[__NEW_UTS_LEN];
@@ -1477,7 +1478,7 @@
 	return errno;
 }
 
-asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
+SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
 {
 	if (resource >= RLIM_NLIMITS)
 		return -EINVAL;
@@ -1496,7 +1497,8 @@
  *	Back compatibility for getrlimit. Needed for some apps.
  */
  
-asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
+SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
+		struct rlimit __user *, rlim)
 {
 	struct rlimit x;
 	if (resource >= RLIM_NLIMITS)
@@ -1514,7 +1516,7 @@
 
 #endif
 
-asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
+SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
 {
 	struct rlimit new_rlim, *old_rlim;
 	int retval;
@@ -1687,7 +1689,7 @@
 	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
 }
 
-asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
+SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
 {
 	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
 	    who != RUSAGE_THREAD)
@@ -1695,14 +1697,14 @@
 	return getrusage(current, who, ru);
 }
 
-asmlinkage long sys_umask(int mask)
+SYSCALL_DEFINE1(umask, int, mask)
 {
 	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
 	return mask;
 }
 
-asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
-			  unsigned long arg4, unsigned long arg5)
+SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+		unsigned long, arg4, unsigned long, arg5)
 {
 	struct task_struct *me = current;
 	unsigned char comm[sizeof(me->comm)];
@@ -1815,8 +1817,8 @@
 	return error;
 }
 
-asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
-			   struct getcpu_cache __user *unused)
+SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
+		struct getcpu_cache __user *, unused)
 {
 	int err = 0;
 	int cpu = raw_smp_processor_id();
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index e14a232..27dad29 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -131,6 +131,7 @@
 cond_syscall(sys_io_submit);
 cond_syscall(sys_io_cancel);
 cond_syscall(sys_io_getevents);
+cond_syscall(sys_syslog);
 
 /* arch-specific weak syscall entries */
 cond_syscall(sys_pciconfig_read);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 92f6e5b..368d163 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -82,6 +82,9 @@
 extern int compat_log;
 extern int latencytop_enabled;
 extern int sysctl_nr_open_min, sysctl_nr_open_max;
+#ifndef CONFIG_MMU
+extern int sysctl_nr_trim_pages;
+#endif
 #ifdef CONFIG_RCU_TORTURE_TEST
 extern int rcutorture_runnable;
 #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
@@ -141,6 +144,7 @@
 
 #ifdef CONFIG_IA64
 extern int no_unaligned_warning;
+extern int unaligned_dump_stack;
 #endif
 
 #ifdef CONFIG_RT_MUTEXES
@@ -778,6 +782,14 @@
 	 	.mode		= 0644,
 		.proc_handler	= &proc_dointvec,
 	},
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "unaligned-dump-stack",
+		.data		= &unaligned_dump_stack,
+		.maxlen		= sizeof (int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
 #endif
 #ifdef CONFIG_DETECT_SOFTLOCKUP
 	{
@@ -1102,6 +1114,17 @@
 		.mode		= 0644,
 		.proc_handler	= &proc_dointvec
 	},
+#else
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "nr_trim_pages",
+		.data		= &sysctl_nr_trim_pages,
+		.maxlen		= sizeof(sysctl_nr_trim_pages),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec_minmax,
+		.strategy	= &sysctl_intvec,
+		.extra1		= &zero,
+	},
 #endif
 	{
 		.ctl_name	= VM_LAPTOP_MODE,
@@ -1674,7 +1697,7 @@
 	return error;
 }
 
-asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
+SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
 {
 	struct __sysctl_args tmp;
 	int error;
@@ -2975,7 +2998,7 @@
 #else /* CONFIG_SYSCTL_SYSCALL */
 
 
-asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
+SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
 {
 	struct __sysctl_args tmp;
 	int error;
diff --git a/kernel/time.c b/kernel/time.c
index 4886e3c..2951194 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -60,7 +60,7 @@
  * why not move it into the appropriate arch directory (for those
  * architectures that need it).
  */
-asmlinkage long sys_time(time_t __user * tloc)
+SYSCALL_DEFINE1(time, time_t __user *, tloc)
 {
 	time_t i = get_seconds();
 
@@ -79,7 +79,7 @@
  * architectures that need it).
  */
 
-asmlinkage long sys_stime(time_t __user *tptr)
+SYSCALL_DEFINE1(stime, time_t __user *, tptr)
 {
 	struct timespec tv;
 	int err;
@@ -99,8 +99,8 @@
 
 #endif /* __ARCH_WANT_SYS_TIME */
 
-asmlinkage long sys_gettimeofday(struct timeval __user *tv,
-				 struct timezone __user *tz)
+SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
+		struct timezone __user *, tz)
 {
 	if (likely(tv != NULL)) {
 		struct timeval ktv;
@@ -184,8 +184,8 @@
 	return 0;
 }
 
-asmlinkage long sys_settimeofday(struct timeval __user *tv,
-				struct timezone __user *tz)
+SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
+		struct timezone __user *, tz)
 {
 	struct timeval user_tv;
 	struct timespec	new_ts;
@@ -205,7 +205,7 @@
 	return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
 }
 
-asmlinkage long sys_adjtimex(struct timex __user *txc_p)
+SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
 {
 	struct timex txc;		/* Local copy of parameter */
 	int ret;
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 1ca9955..06f1975 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -45,7 +45,7 @@
  *
  * The value 8 is somewhat carefully chosen, as anything
  * larger can result in overflows. NSEC_PER_JIFFY grows as
- * HZ shrinks, so values greater then 8 overflow 32bits when
+ * HZ shrinks, so values greater than 8 overflow 32bits when
  * HZ=100.
  */
 #define JIFFIES_SHIFT	8
diff --git a/kernel/timer.c b/kernel/timer.c
index dee3f64..13dd64f 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1129,7 +1129,7 @@
  * For backwards compatibility?  This can be done in libc so Alpha
  * and all newer ports shouldn't need it.
  */
-asmlinkage unsigned long sys_alarm(unsigned int seconds)
+SYSCALL_DEFINE1(alarm, unsigned int, seconds)
 {
 	return alarm_setitimer(seconds);
 }
@@ -1152,7 +1152,7 @@
  *
  * This is SMP safe as current->tgid does not change.
  */
-asmlinkage long sys_getpid(void)
+SYSCALL_DEFINE0(getpid)
 {
 	return task_tgid_vnr(current);
 }
@@ -1163,7 +1163,7 @@
  * value of ->real_parent under rcu_read_lock(), see
  * release_task()->call_rcu(delayed_put_task_struct).
  */
-asmlinkage long sys_getppid(void)
+SYSCALL_DEFINE0(getppid)
 {
 	int pid;
 
@@ -1174,25 +1174,25 @@
 	return pid;
 }
 
-asmlinkage long sys_getuid(void)
+SYSCALL_DEFINE0(getuid)
 {
 	/* Only we change this so SMP safe */
 	return current_uid();
 }
 
-asmlinkage long sys_geteuid(void)
+SYSCALL_DEFINE0(geteuid)
 {
 	/* Only we change this so SMP safe */
 	return current_euid();
 }
 
-asmlinkage long sys_getgid(void)
+SYSCALL_DEFINE0(getgid)
 {
 	/* Only we change this so SMP safe */
 	return current_gid();
 }
 
-asmlinkage long sys_getegid(void)
+SYSCALL_DEFINE0(getegid)
 {
 	/* Only we change this so SMP safe */
 	return  current_egid();
@@ -1308,7 +1308,7 @@
 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
 
 /* Thread ID - the internal kernel "pid" */
-asmlinkage long sys_gettid(void)
+SYSCALL_DEFINE0(gettid)
 {
 	return task_pid_vnr(current);
 }
@@ -1400,7 +1400,7 @@
 	return 0;
 }
 
-asmlinkage long sys_sysinfo(struct sysinfo __user *info)
+SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
 {
 	struct sysinfo val;
 
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a9d9760..8b0daf0 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -168,7 +168,13 @@
  */
 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 {
-	return rb_event_length(event);
+	unsigned length = rb_event_length(event);
+	if (event->type != RINGBUF_TYPE_DATA)
+		return length;
+	length -= RB_EVNT_HDR_SIZE;
+	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
+                length -= sizeof(event->array[0]);
+	return length;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 
diff --git a/kernel/uid16.c b/kernel/uid16.c
index 2460c31..0314501 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -17,7 +17,7 @@
 
 #include <asm/uaccess.h>
 
-asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group)
+SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
 {
 	long ret = sys_chown(filename, low2highuid(user), low2highgid(group));
 	/* avoid REGPARM breakage on x86: */
@@ -25,7 +25,7 @@
 	return ret;
 }
 
-asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group)
+SYSCALL_DEFINE3(lchown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
 {
 	long ret = sys_lchown(filename, low2highuid(user), low2highgid(group));
 	/* avoid REGPARM breakage on x86: */
@@ -33,7 +33,7 @@
 	return ret;
 }
 
-asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group)
+SYSCALL_DEFINE3(fchown16, unsigned int, fd, old_uid_t, user, old_gid_t, group)
 {
 	long ret = sys_fchown(fd, low2highuid(user), low2highgid(group));
 	/* avoid REGPARM breakage on x86: */
@@ -41,7 +41,7 @@
 	return ret;
 }
 
-asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid)
+SYSCALL_DEFINE2(setregid16, old_gid_t, rgid, old_gid_t, egid)
 {
 	long ret = sys_setregid(low2highgid(rgid), low2highgid(egid));
 	/* avoid REGPARM breakage on x86: */
@@ -49,7 +49,7 @@
 	return ret;
 }
 
-asmlinkage long sys_setgid16(old_gid_t gid)
+SYSCALL_DEFINE1(setgid16, old_gid_t, gid)
 {
 	long ret = sys_setgid(low2highgid(gid));
 	/* avoid REGPARM breakage on x86: */
@@ -57,7 +57,7 @@
 	return ret;
 }
 
-asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid)
+SYSCALL_DEFINE2(setreuid16, old_uid_t, ruid, old_uid_t, euid)
 {
 	long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid));
 	/* avoid REGPARM breakage on x86: */
@@ -65,7 +65,7 @@
 	return ret;
 }
 
-asmlinkage long sys_setuid16(old_uid_t uid)
+SYSCALL_DEFINE1(setuid16, old_uid_t, uid)
 {
 	long ret = sys_setuid(low2highuid(uid));
 	/* avoid REGPARM breakage on x86: */
@@ -73,7 +73,7 @@
 	return ret;
 }
 
-asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid)
+SYSCALL_DEFINE3(setresuid16, old_uid_t, ruid, old_uid_t, euid, old_uid_t, suid)
 {
 	long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid),
 				 low2highuid(suid));
@@ -82,7 +82,7 @@
 	return ret;
 }
 
-asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid)
+SYSCALL_DEFINE3(getresuid16, old_uid_t __user *, ruid, old_uid_t __user *, euid, old_uid_t __user *, suid)
 {
 	const struct cred *cred = current_cred();
 	int retval;
@@ -94,7 +94,7 @@
 	return retval;
 }
 
-asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid)
+SYSCALL_DEFINE3(setresgid16, old_gid_t, rgid, old_gid_t, egid, old_gid_t, sgid)
 {
 	long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid),
 				 low2highgid(sgid));
@@ -103,7 +103,8 @@
 	return ret;
 }
 
-asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid)
+
+SYSCALL_DEFINE3(getresgid16, old_gid_t __user *, rgid, old_gid_t __user *, egid, old_gid_t __user *, sgid)
 {
 	const struct cred *cred = current_cred();
 	int retval;
@@ -115,7 +116,7 @@
 	return retval;
 }
 
-asmlinkage long sys_setfsuid16(old_uid_t uid)
+SYSCALL_DEFINE1(setfsuid16, old_uid_t, uid)
 {
 	long ret = sys_setfsuid(low2highuid(uid));
 	/* avoid REGPARM breakage on x86: */
@@ -123,7 +124,7 @@
 	return ret;
 }
 
-asmlinkage long sys_setfsgid16(old_gid_t gid)
+SYSCALL_DEFINE1(setfsgid16, old_gid_t, gid)
 {
 	long ret = sys_setfsgid(low2highgid(gid));
 	/* avoid REGPARM breakage on x86: */
@@ -161,7 +162,7 @@
 	return 0;
 }
 
-asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist)
+SYSCALL_DEFINE2(getgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
 {
 	const struct cred *cred = current_cred();
 	int i;
@@ -184,7 +185,7 @@
 	return i;
 }
 
-asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist)
+SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
 {
 	struct group_info *group_info;
 	int retval;
@@ -209,22 +210,22 @@
 	return retval;
 }
 
-asmlinkage long sys_getuid16(void)
+SYSCALL_DEFINE0(getuid16)
 {
 	return high2lowuid(current_uid());
 }
 
-asmlinkage long sys_geteuid16(void)
+SYSCALL_DEFINE0(geteuid16)
 {
 	return high2lowuid(current_euid());
 }
 
-asmlinkage long sys_getgid16(void)
+SYSCALL_DEFINE0(getgid16)
 {
 	return high2lowgid(current_gid());
 }
 
-asmlinkage long sys_getegid16(void)
+SYSCALL_DEFINE0(getegid16)
 {
 	return high2lowgid(current_egid());
 }
diff --git a/kernel/up.c b/kernel/up.c
new file mode 100644
index 0000000..1ff27a2
--- /dev/null
+++ b/kernel/up.c
@@ -0,0 +1,21 @@
+/*
+ * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+				int wait)
+{
+	WARN_ON(cpu != 0);
+
+	local_irq_disable();
+	(func)(info);
+	local_irq_enable();
+
+	return 0;
+}
+EXPORT_SYMBOL(smp_call_function_single);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2e75478..4c9ae60 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -512,6 +512,13 @@
 
 	  If unsure, say N.
 
+config DEBUG_NOMMU_REGIONS
+	bool "Debug the global anon/private NOMMU mapping region tree"
+	depends on DEBUG_KERNEL && !MMU
+	help
+	  This option causes the global tree of anonymous and private mapping
+	  regions to be regularly checked for invalid topology.
+
 config DEBUG_WRITECOUNT
 	bool "Debug filesystem writers count"
 	depends on DEBUG_KERNEL
@@ -566,14 +573,14 @@
 config FRAME_POINTER
 	bool "Compile the kernel with frame pointers"
 	depends on DEBUG_KERNEL && \
-		(X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \
-		 AVR32 || SUPERH || BLACKFIN || MN10300)
-	default y if DEBUG_INFO && UML
+		(CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \
+		 AVR32 || SUPERH || BLACKFIN || MN10300) || \
+		ARCH_WANT_FRAME_POINTERS
+	default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
 	help
-	  If you say Y here the resulting kernel image will be slightly larger
-	  and slower, but it might give very useful debugging information on
-	  some architectures or if you use external debuggers.
-	  If you don't debug the kernel, you can say N.
+	  If you say Y here the resulting kernel image will be slightly
+	  larger and slower, but it gives very useful debugging information
+	  in case of kernel bugs. (precise oopses/stacktraces/warnings)
 
 config BOOT_PRINTK_DELAY
 	bool "Delay each boot printk message by N milliseconds"
diff --git a/lib/idr.c b/lib/idr.c
index 1c4f928..c11c576 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -121,7 +121,7 @@
 {
 	while (idp->id_free_cnt < IDR_FREE_MAX) {
 		struct idr_layer *new;
-		new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
+		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
 		if (new == NULL)
 			return (0);
 		move_to_free_list(idp, new);
@@ -292,7 +292,7 @@
  * and go back to the idr_pre_get() call.  If the idr is full, it will
  * return -ENOSPC.
  *
- * @id returns a value in the range 0 ... 0x7fffffff
+ * @id returns a value in the range @starting_id ... 0x7fffffff
  */
 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
 {
@@ -623,16 +623,10 @@
 }
 EXPORT_SYMBOL(idr_replace);
 
-static void idr_cache_ctor(void *idr_layer)
-{
-	memset(idr_layer, 0, sizeof(struct idr_layer));
-}
-
 void __init idr_init_cache(void)
 {
 	idr_layer_cache = kmem_cache_create("idr_layer_cache",
-				sizeof(struct idr_layer), 0, SLAB_PANIC,
-				idr_cache_ctor);
+				sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
 }
 
 /**
@@ -723,7 +717,7 @@
  * and go back to the ida_pre_get() call.  If the ida is full, it will
  * return -ENOSPC.
  *
- * @p_id returns a value in the range 0 ... 0x7fffffff.
+ * @p_id returns a value in the range @starting_id ... 0x7fffffff.
  */
 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
 {
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8d3fb0b..4bb42a0 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -640,13 +640,14 @@
  *
  *	Returns: the index of the hole if found, otherwise returns an index
  *	outside of the set specified (in which case 'return - index >= max_scan'
- *	will be true).
+ *	will be true). In rare cases of index wrap-around, 0 will be returned.
  *
  *	radix_tree_next_hole may be called under rcu_read_lock. However, like
- *	radix_tree_gang_lookup, this will not atomically search a snapshot of the
- *	tree at a single point in time. For example, if a hole is created at index
- *	5, then subsequently a hole is created at index 10, radix_tree_next_hole
- *	covering both indexes may return 10 if called under rcu_read_lock.
+ *	radix_tree_gang_lookup, this will not atomically search a snapshot of
+ *	the tree at a single point in time. For example, if a hole is created
+ *	at index 5, then subsequently a hole is created at index 10,
+ *	radix_tree_next_hole covering both indexes may return 10 if called
+ *	under rcu_read_lock.
  */
 unsigned long radix_tree_next_hole(struct radix_tree_root *root,
 				unsigned long index, unsigned long max_scan)
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 48499c2..9956b996 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -292,7 +292,7 @@
 /*
  * This function returns the first node (in sort order) of the tree.
  */
-struct rb_node *rb_first(struct rb_root *root)
+struct rb_node *rb_first(const struct rb_root *root)
 {
 	struct rb_node	*n;
 
@@ -305,7 +305,7 @@
 }
 EXPORT_SYMBOL(rb_first);
 
-struct rb_node *rb_last(struct rb_root *root)
+struct rb_node *rb_last(const struct rb_root *root)
 {
 	struct rb_node	*n;
 
@@ -318,7 +318,7 @@
 }
 EXPORT_SYMBOL(rb_last);
 
-struct rb_node *rb_next(struct rb_node *node)
+struct rb_node *rb_next(const struct rb_node *node)
 {
 	struct rb_node *parent;
 
@@ -331,7 +331,7 @@
 		node = node->rb_right; 
 		while (node->rb_left)
 			node=node->rb_left;
-		return node;
+		return (struct rb_node *)node;
 	}
 
 	/* No right-hand children.  Everything down and left is
@@ -347,7 +347,7 @@
 }
 EXPORT_SYMBOL(rb_next);
 
-struct rb_node *rb_prev(struct rb_node *node)
+struct rb_node *rb_prev(const struct rb_node *node)
 {
 	struct rb_node *parent;
 
@@ -360,7 +360,7 @@
 		node = node->rb_left; 
 		while (node->rb_right)
 			node=node->rb_right;
-		return node;
+		return (struct rb_node *)node;
 	}
 
 	/* No left-hand children. Go up till we find an ancestor which
diff --git a/lib/sort.c b/lib/sort.c
index 6abbaf3..926d004 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -32,11 +32,11 @@
  * @base: pointer to data to sort
  * @num: number of elements
  * @size: size of each element
- * @cmp: pointer to comparison function
- * @swap: pointer to swap function or NULL
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
  *
  * This function does a heapsort on the given array. You may provide a
- * swap function optimized to your element type.
+ * swap_func function optimized to your element type.
  *
  * Sorting time is O(n log n) both on average and worst-case. While
  * qsort is about 20% faster on average, it suffers from exploitable
@@ -45,37 +45,39 @@
  */
 
 void sort(void *base, size_t num, size_t size,
-	  int (*cmp)(const void *, const void *),
-	  void (*swap)(void *, void *, int size))
+	  int (*cmp_func)(const void *, const void *),
+	  void (*swap_func)(void *, void *, int size))
 {
 	/* pre-scale counters for performance */
 	int i = (num/2 - 1) * size, n = num * size, c, r;
 
-	if (!swap)
-		swap = (size == 4 ? u32_swap : generic_swap);
+	if (!swap_func)
+		swap_func = (size == 4 ? u32_swap : generic_swap);
 
 	/* heapify */
 	for ( ; i >= 0; i -= size) {
 		for (r = i; r * 2 + size < n; r  = c) {
 			c = r * 2 + size;
-			if (c < n - size && cmp(base + c, base + c + size) < 0)
+			if (c < n - size &&
+					cmp_func(base + c, base + c + size) < 0)
 				c += size;
-			if (cmp(base + r, base + c) >= 0)
+			if (cmp_func(base + r, base + c) >= 0)
 				break;
-			swap(base + r, base + c, size);
+			swap_func(base + r, base + c, size);
 		}
 	}
 
 	/* sort */
 	for (i = n - size; i > 0; i -= size) {
-		swap(base, base + i, size);
+		swap_func(base, base + i, size);
 		for (r = 0; r * 2 + size < i; r = c) {
 			c = r * 2 + size;
-			if (c < i - size && cmp(base + c, base + c + size) < 0)
+			if (c < i - size &&
+					cmp_func(base + c, base + c + size) < 0)
 				c += size;
-			if (cmp(base + r, base + c) >= 0)
+			if (cmp_func(base + r, base + c) >= 0)
 				break;
-			swap(base + r, base + c, size);
+			swap_func(base + r, base + c, size);
 		}
 	}
 }
diff --git a/mm/fadvise.c b/mm/fadvise.c
index a1da969..54a0f80 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -24,7 +24,7 @@
  * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
  * deactivate the pages and clear PG_Referenced.
  */
-asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
 {
 	struct file *file = fget(fd);
 	struct address_space *mapping;
@@ -126,12 +126,26 @@
 	fput(file);
 	return ret;
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_fadvise64_64(long fd, loff_t offset, loff_t len, long advice)
+{
+	return SYSC_fadvise64_64((int) fd, offset, len, (int) advice);
+}
+SYSCALL_ALIAS(sys_fadvise64_64, SyS_fadvise64_64);
+#endif
 
 #ifdef __ARCH_WANT_SYS_FADVISE64
 
-asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice)
+SYSCALL_DEFINE(fadvise64)(int fd, loff_t offset, size_t len, int advice)
 {
 	return sys_fadvise64_64(fd, offset, len, advice);
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_fadvise64(long fd, loff_t offset, long len, long advice)
+{
+	return SYSC_fadvise64((int) fd, offset, (size_t)len, (int)advice);
+}
+SYSCALL_ALIAS(sys_fadvise64, SyS_fadvise64);
+#endif
 
 #endif
diff --git a/mm/filemap.c b/mm/filemap.c
index 2f55a1e..23acefe 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -460,7 +460,7 @@
 	VM_BUG_ON(!PageLocked(page));
 
 	error = mem_cgroup_cache_charge(page, current->mm,
-					gfp_mask & ~__GFP_HIGHMEM);
+					gfp_mask & GFP_RECLAIM_MASK);
 	if (error)
 		goto out;
 
@@ -1374,7 +1374,7 @@
 	return 0;
 }
 
-asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
+SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
 {
 	ssize_t ret;
 	struct file *file;
@@ -1393,6 +1393,13 @@
 	}
 	return ret;
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
+{
+	return SYSC_readahead((int) fd, offset, (size_t) count);
+}
+SYSCALL_ALIAS(sys_readahead, SyS_readahead);
+#endif
 
 #ifdef CONFIG_MMU
 /**
diff --git a/mm/fremap.c b/mm/fremap.c
index 62d5bbd..736ba7f 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -120,8 +120,8 @@
  * and the vma's default protection is used. Arbitrary protections
  * might be implemented in the future.
  */
-asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
-	unsigned long prot, unsigned long pgoff, unsigned long flags)
+SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+		unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
 {
 	struct mm_struct *mm = current->mm;
 	struct address_space *mapping;
diff --git a/mm/madvise.c b/mm/madvise.c
index f9349c1..b9ce574 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -281,7 +281,7 @@
  *  -EBADF  - map exists, but area maps something that isn't a file.
  *  -EAGAIN - a kernel resource was temporarily unavailable.
  */
-asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior)
+SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
 {
 	unsigned long end, tmp;
 	struct vm_area_struct * vma, *prev;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 51ee965..4d0ea3c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -21,11 +21,13 @@
 #include <linux/memcontrol.h>
 #include <linux/cgroup.h>
 #include <linux/mm.h>
+#include <linux/pagemap.h>
 #include <linux/smp.h>
 #include <linux/page-flags.h>
 #include <linux/backing-dev.h>
 #include <linux/bit_spinlock.h>
 #include <linux/rcupdate.h>
+#include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/spinlock.h>
@@ -34,12 +36,23 @@
 #include <linux/vmalloc.h>
 #include <linux/mm_inline.h>
 #include <linux/page_cgroup.h>
+#include "internal.h"
 
 #include <asm/uaccess.h>
 
 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
 #define MEM_CGROUP_RECLAIM_RETRIES	5
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
+int do_swap_account __read_mostly;
+static int really_do_swap_account __initdata = 1; /* for remember boot option*/
+#else
+#define do_swap_account		(0)
+#endif
+
+static DEFINE_MUTEX(memcg_tasklist);	/* can be hold under cgroup_mutex */
+
 /*
  * Statistics for memory cgroup.
  */
@@ -60,7 +73,7 @@
 } ____cacheline_aligned_in_smp;
 
 struct mem_cgroup_stat {
-	struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
+	struct mem_cgroup_stat_cpu cpustat[0];
 };
 
 /*
@@ -89,9 +102,10 @@
 	/*
 	 * spin_lock to protect the per cgroup LRU
 	 */
-	spinlock_t		lru_lock;
 	struct list_head	lists[NR_LRU_LISTS];
 	unsigned long		count[NR_LRU_LISTS];
+
+	struct zone_reclaim_stat reclaim_stat;
 };
 /* Macro for accessing counter */
 #define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
@@ -122,44 +136,73 @@
 	 */
 	struct res_counter res;
 	/*
+	 * the counter to account for mem+swap usage.
+	 */
+	struct res_counter memsw;
+	/*
 	 * Per cgroup active and inactive list, similar to the
 	 * per zone LRU lists.
 	 */
 	struct mem_cgroup_lru_info info;
 
-	int	prev_priority;	/* for recording reclaim priority */
 	/*
-	 * statistics.
+	  protect against reclaim related member.
+	*/
+	spinlock_t reclaim_param_lock;
+
+	int	prev_priority;	/* for recording reclaim priority */
+
+	/*
+	 * While reclaiming in a hiearchy, we cache the last child we
+	 * reclaimed from. Protected by hierarchy_mutex
+	 */
+	struct mem_cgroup *last_scanned_child;
+	/*
+	 * Should the accounting and control be hierarchical, per subtree?
+	 */
+	bool use_hierarchy;
+	unsigned long	last_oom_jiffies;
+	atomic_t	refcnt;
+
+	unsigned int	swappiness;
+
+	/*
+	 * statistics. This must be placed at the end of memcg.
 	 */
 	struct mem_cgroup_stat stat;
 };
-static struct mem_cgroup init_mem_cgroup;
 
 enum charge_type {
 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
 	MEM_CGROUP_CHARGE_TYPE_MAPPED,
 	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
 	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
+	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
 	NR_CHARGE_TYPE,
 };
 
 /* only for here (for easy reading.) */
 #define PCGF_CACHE	(1UL << PCG_CACHE)
 #define PCGF_USED	(1UL << PCG_USED)
-#define PCGF_ACTIVE	(1UL << PCG_ACTIVE)
 #define PCGF_LOCK	(1UL << PCG_LOCK)
-#define PCGF_FILE	(1UL << PCG_FILE)
 static const unsigned long
 pcg_default_flags[NR_CHARGE_TYPE] = {
-	PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */
-	PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */
-	PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
+	PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
+	PCGF_USED | PCGF_LOCK, /* Anon */
+	PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
 	0, /* FORCE */
 };
 
-/*
- * Always modified under lru lock. Then, not necessary to preempt_disable()
- */
+/* for encoding cft->private value on file */
+#define _MEM			(0)
+#define _MEMSWAP		(1)
+#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
+#define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
+#define MEMFILE_ATTR(val)	((val) & 0xffff)
+
+static void mem_cgroup_get(struct mem_cgroup *mem);
+static void mem_cgroup_put(struct mem_cgroup *mem);
+
 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
 					 struct page_cgroup *pc,
 					 bool charge)
@@ -167,10 +210,9 @@
 	int val = (charge)? 1 : -1;
 	struct mem_cgroup_stat *stat = &mem->stat;
 	struct mem_cgroup_stat_cpu *cpustat;
+	int cpu = get_cpu();
 
-	VM_BUG_ON(!irqs_disabled());
-
-	cpustat = &stat->cpustat[smp_processor_id()];
+	cpustat = &stat->cpustat[cpu];
 	if (PageCgroupCache(pc))
 		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
 	else
@@ -182,6 +224,7 @@
 	else
 		__mem_cgroup_stat_add_safe(cpustat,
 				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
+	put_cpu();
 }
 
 static struct mem_cgroup_per_zone *
@@ -197,6 +240,9 @@
 	int nid = page_cgroup_nid(pc);
 	int zid = page_cgroup_zid(pc);
 
+	if (!mem)
+		return NULL;
+
 	return mem_cgroup_zoneinfo(mem, nid, zid);
 }
 
@@ -236,79 +282,161 @@
 				struct mem_cgroup, css);
 }
 
-static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
-			struct page_cgroup *pc)
+static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 {
-	int lru = LRU_BASE;
+	struct mem_cgroup *mem = NULL;
+	/*
+	 * Because we have no locks, mm->owner's may be being moved to other
+	 * cgroup. We use css_tryget() here even if this looks
+	 * pessimistic (rather than adding locks here).
+	 */
+	rcu_read_lock();
+	do {
+		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+		if (unlikely(!mem))
+			break;
+	} while (!css_tryget(&mem->css));
+	rcu_read_unlock();
+	return mem;
+}
 
-	if (PageCgroupUnevictable(pc))
-		lru = LRU_UNEVICTABLE;
-	else {
-		if (PageCgroupActive(pc))
-			lru += LRU_ACTIVE;
-		if (PageCgroupFile(pc))
-			lru += LRU_FILE;
-	}
+static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
+{
+	if (!mem)
+		return true;
+	return css_is_removed(&mem->css);
+}
 
+/*
+ * Following LRU functions are allowed to be used without PCG_LOCK.
+ * Operations are called by routine of global LRU independently from memcg.
+ * What we have to take care of here is validness of pc->mem_cgroup.
+ *
+ * Changes to pc->mem_cgroup happens when
+ * 1. charge
+ * 2. moving account
+ * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
+ * It is added to LRU before charge.
+ * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
+ * When moving account, the page is not on LRU. It's isolated.
+ */
+
+void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
+{
+	struct page_cgroup *pc;
+	struct mem_cgroup *mem;
+	struct mem_cgroup_per_zone *mz;
+
+	if (mem_cgroup_disabled())
+		return;
+	pc = lookup_page_cgroup(page);
+	/* can happen while we handle swapcache. */
+	if (list_empty(&pc->lru) || !pc->mem_cgroup)
+		return;
+	/*
+	 * We don't check PCG_USED bit. It's cleared when the "page" is finally
+	 * removed from global LRU.
+	 */
+	mz = page_cgroup_zoneinfo(pc);
+	mem = pc->mem_cgroup;
 	MEM_CGROUP_ZSTAT(mz, lru) -= 1;
-
-	mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false);
-	list_del(&pc->lru);
+	list_del_init(&pc->lru);
+	return;
 }
 
-static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
-				struct page_cgroup *pc)
+void mem_cgroup_del_lru(struct page *page)
 {
-	int lru = LRU_BASE;
-
-	if (PageCgroupUnevictable(pc))
-		lru = LRU_UNEVICTABLE;
-	else {
-		if (PageCgroupActive(pc))
-			lru += LRU_ACTIVE;
-		if (PageCgroupFile(pc))
-			lru += LRU_FILE;
-	}
-
-	MEM_CGROUP_ZSTAT(mz, lru) += 1;
-	list_add(&pc->lru, &mz->lists[lru]);
-
-	mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true);
+	mem_cgroup_del_lru_list(page, page_lru(page));
 }
 
-static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
+void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
 {
-	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
-	int active    = PageCgroupActive(pc);
-	int file      = PageCgroupFile(pc);
-	int unevictable = PageCgroupUnevictable(pc);
-	enum lru_list from = unevictable ? LRU_UNEVICTABLE :
-				(LRU_FILE * !!file + !!active);
+	struct mem_cgroup_per_zone *mz;
+	struct page_cgroup *pc;
 
-	if (lru == from)
+	if (mem_cgroup_disabled())
 		return;
 
-	MEM_CGROUP_ZSTAT(mz, from) -= 1;
+	pc = lookup_page_cgroup(page);
 	/*
-	 * However this is done under mz->lru_lock, another flags, which
-	 * are not related to LRU, will be modified from out-of-lock.
-	 * We have to use atomic set/clear flags.
+	 * Used bit is set without atomic ops but after smp_wmb().
+	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
 	 */
-	if (is_unevictable_lru(lru)) {
-		ClearPageCgroupActive(pc);
-		SetPageCgroupUnevictable(pc);
-	} else {
-		if (is_active_lru(lru))
-			SetPageCgroupActive(pc);
-		else
-			ClearPageCgroupActive(pc);
-		ClearPageCgroupUnevictable(pc);
-	}
-
-	MEM_CGROUP_ZSTAT(mz, lru) += 1;
+	smp_rmb();
+	/* unused page is not rotated. */
+	if (!PageCgroupUsed(pc))
+		return;
+	mz = page_cgroup_zoneinfo(pc);
 	list_move(&pc->lru, &mz->lists[lru]);
 }
 
+void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
+{
+	struct page_cgroup *pc;
+	struct mem_cgroup_per_zone *mz;
+
+	if (mem_cgroup_disabled())
+		return;
+	pc = lookup_page_cgroup(page);
+	/*
+	 * Used bit is set without atomic ops but after smp_wmb().
+	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
+	 */
+	smp_rmb();
+	if (!PageCgroupUsed(pc))
+		return;
+
+	mz = page_cgroup_zoneinfo(pc);
+	MEM_CGROUP_ZSTAT(mz, lru) += 1;
+	list_add(&pc->lru, &mz->lists[lru]);
+}
+
+/*
+ * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
+ * lru because the page may.be reused after it's fully uncharged (because of
+ * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
+ * it again. This function is only used to charge SwapCache. It's done under
+ * lock_page and expected that zone->lru_lock is never held.
+ */
+static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
+{
+	unsigned long flags;
+	struct zone *zone = page_zone(page);
+	struct page_cgroup *pc = lookup_page_cgroup(page);
+
+	spin_lock_irqsave(&zone->lru_lock, flags);
+	/*
+	 * Forget old LRU when this page_cgroup is *not* used. This Used bit
+	 * is guarded by lock_page() because the page is SwapCache.
+	 */
+	if (!PageCgroupUsed(pc))
+		mem_cgroup_del_lru_list(page, page_lru(page));
+	spin_unlock_irqrestore(&zone->lru_lock, flags);
+}
+
+static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
+{
+	unsigned long flags;
+	struct zone *zone = page_zone(page);
+	struct page_cgroup *pc = lookup_page_cgroup(page);
+
+	spin_lock_irqsave(&zone->lru_lock, flags);
+	/* link when the page is linked to LRU but page_cgroup isn't */
+	if (PageLRU(page) && list_empty(&pc->lru))
+		mem_cgroup_add_lru_list(page, page_lru(page));
+	spin_unlock_irqrestore(&zone->lru_lock, flags);
+}
+
+
+void mem_cgroup_move_lists(struct page *page,
+			   enum lru_list from, enum lru_list to)
+{
+	if (mem_cgroup_disabled())
+		return;
+	mem_cgroup_del_lru_list(page, from);
+	mem_cgroup_add_lru_list(page, to);
+}
+
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
 {
 	int ret;
@@ -320,37 +448,6 @@
 }
 
 /*
- * This routine assumes that the appropriate zone's lru lock is already held
- */
-void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
-{
-	struct page_cgroup *pc;
-	struct mem_cgroup_per_zone *mz;
-	unsigned long flags;
-
-	if (mem_cgroup_subsys.disabled)
-		return;
-
-	/*
-	 * We cannot lock_page_cgroup while holding zone's lru_lock,
-	 * because other holders of lock_page_cgroup can be interrupted
-	 * with an attempt to rotate_reclaimable_page.  But we cannot
-	 * safely get to page_cgroup without it, so just try_lock it:
-	 * mem_cgroup_isolate_pages allows for page left on wrong list.
-	 */
-	pc = lookup_page_cgroup(page);
-	if (!trylock_page_cgroup(pc))
-		return;
-	if (pc && PageCgroupUsed(pc)) {
-		mz = page_cgroup_zoneinfo(pc);
-		spin_lock_irqsave(&mz->lru_lock, flags);
-		__mem_cgroup_move_lists(pc, lru);
-		spin_unlock_irqrestore(&mz->lru_lock, flags);
-	}
-	unlock_page_cgroup(pc);
-}
-
-/*
  * Calculate mapped_ratio under memory controller. This will be used in
  * vmscan.c for deteremining we have to reclaim mapped pages.
  */
@@ -372,39 +469,116 @@
  */
 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
 {
-	return mem->prev_priority;
+	int prev_priority;
+
+	spin_lock(&mem->reclaim_param_lock);
+	prev_priority = mem->prev_priority;
+	spin_unlock(&mem->reclaim_param_lock);
+
+	return prev_priority;
 }
 
 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
 {
+	spin_lock(&mem->reclaim_param_lock);
 	if (priority < mem->prev_priority)
 		mem->prev_priority = priority;
+	spin_unlock(&mem->reclaim_param_lock);
 }
 
 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
 {
+	spin_lock(&mem->reclaim_param_lock);
 	mem->prev_priority = priority;
+	spin_unlock(&mem->reclaim_param_lock);
 }
 
-/*
- * Calculate # of pages to be scanned in this priority/zone.
- * See also vmscan.c
- *
- * priority starts from "DEF_PRIORITY" and decremented in each loop.
- * (see include/linux/mmzone.h)
- */
-
-long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
-					int priority, enum lru_list lru)
+static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
 {
-	long nr_pages;
+	unsigned long active;
+	unsigned long inactive;
+	unsigned long gb;
+	unsigned long inactive_ratio;
+
+	inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
+	active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
+
+	gb = (inactive + active) >> (30 - PAGE_SHIFT);
+	if (gb)
+		inactive_ratio = int_sqrt(10 * gb);
+	else
+		inactive_ratio = 1;
+
+	if (present_pages) {
+		present_pages[0] = inactive;
+		present_pages[1] = active;
+	}
+
+	return inactive_ratio;
+}
+
+int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
+{
+	unsigned long active;
+	unsigned long inactive;
+	unsigned long present_pages[2];
+	unsigned long inactive_ratio;
+
+	inactive_ratio = calc_inactive_ratio(memcg, present_pages);
+
+	inactive = present_pages[0];
+	active = present_pages[1];
+
+	if (inactive * inactive_ratio < active)
+		return 1;
+
+	return 0;
+}
+
+unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
+				       struct zone *zone,
+				       enum lru_list lru)
+{
 	int nid = zone->zone_pgdat->node_id;
 	int zid = zone_idx(zone);
-	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
+	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
 
-	nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
+	return MEM_CGROUP_ZSTAT(mz, lru);
+}
 
-	return (nr_pages >> priority);
+struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
+						      struct zone *zone)
+{
+	int nid = zone->zone_pgdat->node_id;
+	int zid = zone_idx(zone);
+	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+
+	return &mz->reclaim_stat;
+}
+
+struct zone_reclaim_stat *
+mem_cgroup_get_reclaim_stat_from_page(struct page *page)
+{
+	struct page_cgroup *pc;
+	struct mem_cgroup_per_zone *mz;
+
+	if (mem_cgroup_disabled())
+		return NULL;
+
+	pc = lookup_page_cgroup(page);
+	/*
+	 * Used bit is set without atomic ops but after smp_wmb().
+	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
+	 */
+	smp_rmb();
+	if (!PageCgroupUsed(pc))
+		return NULL;
+
+	mz = page_cgroup_zoneinfo(pc);
+	if (!mz)
+		return NULL;
+
+	return &mz->reclaim_stat;
 }
 
 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -429,44 +603,469 @@
 	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
 	src = &mz->lists[lru];
 
-	spin_lock(&mz->lru_lock);
 	scan = 0;
 	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
 		if (scan >= nr_to_scan)
 			break;
+
+		page = pc->page;
 		if (unlikely(!PageCgroupUsed(pc)))
 			continue;
-		page = pc->page;
-
 		if (unlikely(!PageLRU(page)))
 			continue;
 
-		/*
-		 * TODO: play better with lumpy reclaim, grabbing anything.
-		 */
-		if (PageUnevictable(page) ||
-		    (PageActive(page) && !active) ||
-		    (!PageActive(page) && active)) {
-			__mem_cgroup_move_lists(pc, page_lru(page));
-			continue;
-		}
-
 		scan++;
-		list_move(&pc->lru, &pc_list);
-
 		if (__isolate_lru_page(page, mode, file) == 0) {
 			list_move(&page->lru, dst);
 			nr_taken++;
 		}
 	}
 
-	list_splice(&pc_list, src);
-	spin_unlock(&mz->lru_lock);
-
 	*scanned = scan;
 	return nr_taken;
 }
 
+#define mem_cgroup_from_res_counter(counter, member)	\
+	container_of(counter, struct mem_cgroup, member)
+
+/*
+ * This routine finds the DFS walk successor. This routine should be
+ * called with hierarchy_mutex held
+ */
+static struct mem_cgroup *
+__mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
+{
+	struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
+
+	curr_cgroup = curr->css.cgroup;
+	root_cgroup = root_mem->css.cgroup;
+
+	if (!list_empty(&curr_cgroup->children)) {
+		/*
+		 * Walk down to children
+		 */
+		cgroup = list_entry(curr_cgroup->children.next,
+						struct cgroup, sibling);
+		curr = mem_cgroup_from_cont(cgroup);
+		goto done;
+	}
+
+visit_parent:
+	if (curr_cgroup == root_cgroup) {
+		/* caller handles NULL case */
+		curr = NULL;
+		goto done;
+	}
+
+	/*
+	 * Goto next sibling
+	 */
+	if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
+		cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
+						sibling);
+		curr = mem_cgroup_from_cont(cgroup);
+		goto done;
+	}
+
+	/*
+	 * Go up to next parent and next parent's sibling if need be
+	 */
+	curr_cgroup = curr_cgroup->parent;
+	goto visit_parent;
+
+done:
+	return curr;
+}
+
+/*
+ * Visit the first child (need not be the first child as per the ordering
+ * of the cgroup list, since we track last_scanned_child) of @mem and use
+ * that to reclaim free pages from.
+ */
+static struct mem_cgroup *
+mem_cgroup_get_next_node(struct mem_cgroup *root_mem)
+{
+	struct cgroup *cgroup;
+	struct mem_cgroup *orig, *next;
+	bool obsolete;
+
+	/*
+	 * Scan all children under the mem_cgroup mem
+	 */
+	mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
+
+	orig = root_mem->last_scanned_child;
+	obsolete = mem_cgroup_is_obsolete(orig);
+
+	if (list_empty(&root_mem->css.cgroup->children)) {
+		/*
+		 * root_mem might have children before and last_scanned_child
+		 * may point to one of them. We put it later.
+		 */
+		if (orig)
+			VM_BUG_ON(!obsolete);
+		next = NULL;
+		goto done;
+	}
+
+	if (!orig || obsolete) {
+		cgroup = list_first_entry(&root_mem->css.cgroup->children,
+				struct cgroup, sibling);
+		next = mem_cgroup_from_cont(cgroup);
+	} else
+		next = __mem_cgroup_get_next_node(orig, root_mem);
+
+done:
+	if (next)
+		mem_cgroup_get(next);
+	root_mem->last_scanned_child = next;
+	if (orig)
+		mem_cgroup_put(orig);
+	mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
+	return (next) ? next : root_mem;
+}
+
+static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
+{
+	if (do_swap_account) {
+		if (res_counter_check_under_limit(&mem->res) &&
+			res_counter_check_under_limit(&mem->memsw))
+			return true;
+	} else
+		if (res_counter_check_under_limit(&mem->res))
+			return true;
+	return false;
+}
+
+static unsigned int get_swappiness(struct mem_cgroup *memcg)
+{
+	struct cgroup *cgrp = memcg->css.cgroup;
+	unsigned int swappiness;
+
+	/* root ? */
+	if (cgrp->parent == NULL)
+		return vm_swappiness;
+
+	spin_lock(&memcg->reclaim_param_lock);
+	swappiness = memcg->swappiness;
+	spin_unlock(&memcg->reclaim_param_lock);
+
+	return swappiness;
+}
+
+/*
+ * Dance down the hierarchy if needed to reclaim memory. We remember the
+ * last child we reclaimed from, so that we don't end up penalizing
+ * one child extensively based on its position in the children list.
+ *
+ * root_mem is the original ancestor that we've been reclaim from.
+ */
+static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
+						gfp_t gfp_mask, bool noswap)
+{
+	struct mem_cgroup *next_mem;
+	int ret = 0;
+
+	/*
+	 * Reclaim unconditionally and don't check for return value.
+	 * We need to reclaim in the current group and down the tree.
+	 * One might think about checking for children before reclaiming,
+	 * but there might be left over accounting, even after children
+	 * have left.
+	 */
+	ret += try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
+					   get_swappiness(root_mem));
+	if (mem_cgroup_check_under_limit(root_mem))
+		return 1;	/* indicate reclaim has succeeded */
+	if (!root_mem->use_hierarchy)
+		return ret;
+
+	next_mem = mem_cgroup_get_next_node(root_mem);
+
+	while (next_mem != root_mem) {
+		if (mem_cgroup_is_obsolete(next_mem)) {
+			next_mem = mem_cgroup_get_next_node(root_mem);
+			continue;
+		}
+		ret += try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
+						   get_swappiness(next_mem));
+		if (mem_cgroup_check_under_limit(root_mem))
+			return 1;	/* indicate reclaim has succeeded */
+		next_mem = mem_cgroup_get_next_node(root_mem);
+	}
+	return ret;
+}
+
+bool mem_cgroup_oom_called(struct task_struct *task)
+{
+	bool ret = false;
+	struct mem_cgroup *mem;
+	struct mm_struct *mm;
+
+	rcu_read_lock();
+	mm = task->mm;
+	if (!mm)
+		mm = &init_mm;
+	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+	if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
+		ret = true;
+	rcu_read_unlock();
+	return ret;
+}
+/*
+ * Unlike exported interface, "oom" parameter is added. if oom==true,
+ * oom-killer can be invoked.
+ */
+static int __mem_cgroup_try_charge(struct mm_struct *mm,
+			gfp_t gfp_mask, struct mem_cgroup **memcg,
+			bool oom)
+{
+	struct mem_cgroup *mem, *mem_over_limit;
+	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+	struct res_counter *fail_res;
+
+	if (unlikely(test_thread_flag(TIF_MEMDIE))) {
+		/* Don't account this! */
+		*memcg = NULL;
+		return 0;
+	}
+
+	/*
+	 * We always charge the cgroup the mm_struct belongs to.
+	 * The mm_struct's mem_cgroup changes on task migration if the
+	 * thread group leader migrates. It's possible that mm is not
+	 * set, if so charge the init_mm (happens for pagecache usage).
+	 */
+	mem = *memcg;
+	if (likely(!mem)) {
+		mem = try_get_mem_cgroup_from_mm(mm);
+		*memcg = mem;
+	} else {
+		css_get(&mem->css);
+	}
+	if (unlikely(!mem))
+		return 0;
+
+	VM_BUG_ON(mem_cgroup_is_obsolete(mem));
+
+	while (1) {
+		int ret;
+		bool noswap = false;
+
+		ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
+		if (likely(!ret)) {
+			if (!do_swap_account)
+				break;
+			ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
+							&fail_res);
+			if (likely(!ret))
+				break;
+			/* mem+swap counter fails */
+			res_counter_uncharge(&mem->res, PAGE_SIZE);
+			noswap = true;
+			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
+									memsw);
+		} else
+			/* mem counter fails */
+			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
+									res);
+
+		if (!(gfp_mask & __GFP_WAIT))
+			goto nomem;
+
+		ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
+							noswap);
+		if (ret)
+			continue;
+
+		/*
+		 * try_to_free_mem_cgroup_pages() might not give us a full
+		 * picture of reclaim. Some pages are reclaimed and might be
+		 * moved to swap cache or just unmapped from the cgroup.
+		 * Check the limit again to see if the reclaim reduced the
+		 * current usage of the cgroup before giving up
+		 *
+		 */
+		if (mem_cgroup_check_under_limit(mem_over_limit))
+			continue;
+
+		if (!nr_retries--) {
+			if (oom) {
+				mutex_lock(&memcg_tasklist);
+				mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
+				mutex_unlock(&memcg_tasklist);
+				mem_over_limit->last_oom_jiffies = jiffies;
+			}
+			goto nomem;
+		}
+	}
+	return 0;
+nomem:
+	css_put(&mem->css);
+	return -ENOMEM;
+}
+
+static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
+{
+	struct mem_cgroup *mem;
+	swp_entry_t ent;
+
+	if (!PageSwapCache(page))
+		return NULL;
+
+	ent.val = page_private(page);
+	mem = lookup_swap_cgroup(ent);
+	if (!mem)
+		return NULL;
+	if (!css_tryget(&mem->css))
+		return NULL;
+	return mem;
+}
+
+/*
+ * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
+ * USED state. If already USED, uncharge and return.
+ */
+
+static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
+				     struct page_cgroup *pc,
+				     enum charge_type ctype)
+{
+	/* try_charge() can return NULL to *memcg, taking care of it. */
+	if (!mem)
+		return;
+
+	lock_page_cgroup(pc);
+	if (unlikely(PageCgroupUsed(pc))) {
+		unlock_page_cgroup(pc);
+		res_counter_uncharge(&mem->res, PAGE_SIZE);
+		if (do_swap_account)
+			res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+		css_put(&mem->css);
+		return;
+	}
+	pc->mem_cgroup = mem;
+	smp_wmb();
+	pc->flags = pcg_default_flags[ctype];
+
+	mem_cgroup_charge_statistics(mem, pc, true);
+
+	unlock_page_cgroup(pc);
+}
+
+/**
+ * mem_cgroup_move_account - move account of the page
+ * @pc:	page_cgroup of the page.
+ * @from: mem_cgroup which the page is moved from.
+ * @to:	mem_cgroup which the page is moved to. @from != @to.
+ *
+ * The caller must confirm following.
+ * - page is not on LRU (isolate_page() is useful.)
+ *
+ * returns 0 at success,
+ * returns -EBUSY when lock is busy or "pc" is unstable.
+ *
+ * This function does "uncharge" from old cgroup but doesn't do "charge" to
+ * new cgroup. It should be done by a caller.
+ */
+
+static int mem_cgroup_move_account(struct page_cgroup *pc,
+	struct mem_cgroup *from, struct mem_cgroup *to)
+{
+	struct mem_cgroup_per_zone *from_mz, *to_mz;
+	int nid, zid;
+	int ret = -EBUSY;
+
+	VM_BUG_ON(from == to);
+	VM_BUG_ON(PageLRU(pc->page));
+
+	nid = page_cgroup_nid(pc);
+	zid = page_cgroup_zid(pc);
+	from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
+	to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
+
+	if (!trylock_page_cgroup(pc))
+		return ret;
+
+	if (!PageCgroupUsed(pc))
+		goto out;
+
+	if (pc->mem_cgroup != from)
+		goto out;
+
+	res_counter_uncharge(&from->res, PAGE_SIZE);
+	mem_cgroup_charge_statistics(from, pc, false);
+	if (do_swap_account)
+		res_counter_uncharge(&from->memsw, PAGE_SIZE);
+	css_put(&from->css);
+
+	css_get(&to->css);
+	pc->mem_cgroup = to;
+	mem_cgroup_charge_statistics(to, pc, true);
+	ret = 0;
+out:
+	unlock_page_cgroup(pc);
+	return ret;
+}
+
+/*
+ * move charges to its parent.
+ */
+
+static int mem_cgroup_move_parent(struct page_cgroup *pc,
+				  struct mem_cgroup *child,
+				  gfp_t gfp_mask)
+{
+	struct page *page = pc->page;
+	struct cgroup *cg = child->css.cgroup;
+	struct cgroup *pcg = cg->parent;
+	struct mem_cgroup *parent;
+	int ret;
+
+	/* Is ROOT ? */
+	if (!pcg)
+		return -EINVAL;
+
+
+	parent = mem_cgroup_from_cont(pcg);
+
+
+	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+	if (ret || !parent)
+		return ret;
+
+	if (!get_page_unless_zero(page)) {
+		ret = -EBUSY;
+		goto uncharge;
+	}
+
+	ret = isolate_lru_page(page);
+
+	if (ret)
+		goto cancel;
+
+	ret = mem_cgroup_move_account(pc, child, parent);
+
+	putback_lru_page(page);
+	if (!ret) {
+		put_page(page);
+		/* drop extra refcnt by try_charge() */
+		css_put(&parent->css);
+		return 0;
+	}
+
+cancel:
+	put_page(page);
+uncharge:
+	/* drop extra refcnt by try_charge() */
+	css_put(&parent->css);
+	/* uncharge if move fails */
+	res_counter_uncharge(&parent->res, PAGE_SIZE);
+	if (do_swap_account)
+		res_counter_uncharge(&parent->memsw, PAGE_SIZE);
+	return ret;
+}
+
 /*
  * Charge the memory controller for page usage.
  * Return
@@ -479,95 +1078,27 @@
 {
 	struct mem_cgroup *mem;
 	struct page_cgroup *pc;
-	unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
-	struct mem_cgroup_per_zone *mz;
-	unsigned long flags;
+	int ret;
 
 	pc = lookup_page_cgroup(page);
 	/* can happen at boot */
 	if (unlikely(!pc))
 		return 0;
 	prefetchw(pc);
-	/*
-	 * We always charge the cgroup the mm_struct belongs to.
-	 * The mm_struct's mem_cgroup changes on task migration if the
-	 * thread group leader migrates. It's possible that mm is not
-	 * set, if so charge the init_mm (happens for pagecache usage).
-	 */
 
-	if (likely(!memcg)) {
-		rcu_read_lock();
-		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
-		if (unlikely(!mem)) {
-			rcu_read_unlock();
-			return 0;
-		}
-		/*
-		 * For every charge from the cgroup, increment reference count
-		 */
-		css_get(&mem->css);
-		rcu_read_unlock();
-	} else {
-		mem = memcg;
-		css_get(&memcg->css);
-	}
+	mem = memcg;
+	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+	if (ret || !mem)
+		return ret;
 
-	while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
-		if (!(gfp_mask & __GFP_WAIT))
-			goto out;
-
-		if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
-			continue;
-
-		/*
-		 * try_to_free_mem_cgroup_pages() might not give us a full
-		 * picture of reclaim. Some pages are reclaimed and might be
-		 * moved to swap cache or just unmapped from the cgroup.
-		 * Check the limit again to see if the reclaim reduced the
-		 * current usage of the cgroup before giving up
-		 */
-		if (res_counter_check_under_limit(&mem->res))
-			continue;
-
-		if (!nr_retries--) {
-			mem_cgroup_out_of_memory(mem, gfp_mask);
-			goto out;
-		}
-	}
-
-
-	lock_page_cgroup(pc);
-	if (unlikely(PageCgroupUsed(pc))) {
-		unlock_page_cgroup(pc);
-		res_counter_uncharge(&mem->res, PAGE_SIZE);
-		css_put(&mem->css);
-
-		goto done;
-	}
-	pc->mem_cgroup = mem;
-	/*
-	 * If a page is accounted as a page cache, insert to inactive list.
-	 * If anon, insert to active list.
-	 */
-	pc->flags = pcg_default_flags[ctype];
-
-	mz = page_cgroup_zoneinfo(pc);
-
-	spin_lock_irqsave(&mz->lru_lock, flags);
-	__mem_cgroup_add_list(mz, pc);
-	spin_unlock_irqrestore(&mz->lru_lock, flags);
-	unlock_page_cgroup(pc);
-
-done:
+	__mem_cgroup_commit_charge(mem, pc, ctype);
 	return 0;
-out:
-	css_put(&mem->css);
-	return -ENOMEM;
 }
 
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
+int mem_cgroup_newpage_charge(struct page *page,
+			      struct mm_struct *mm, gfp_t gfp_mask)
 {
-	if (mem_cgroup_subsys.disabled)
+	if (mem_cgroup_disabled())
 		return 0;
 	if (PageCompound(page))
 		return 0;
@@ -589,7 +1120,10 @@
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
 				gfp_t gfp_mask)
 {
-	if (mem_cgroup_subsys.disabled)
+	struct mem_cgroup *mem = NULL;
+	int ret;
+
+	if (mem_cgroup_disabled())
 		return 0;
 	if (PageCompound(page))
 		return 0;
@@ -601,6 +1135,8 @@
 	 * For GFP_NOWAIT case, the page may be pre-charged before calling
 	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
 	 * charge twice. (It works but has to pay a bit larger cost.)
+	 * And when the page is SwapCache, it should take swap information
+	 * into account. This is under lock_page() now.
 	 */
 	if (!(gfp_mask & __GFP_WAIT)) {
 		struct page_cgroup *pc;
@@ -617,58 +1153,198 @@
 		unlock_page_cgroup(pc);
 	}
 
-	if (unlikely(!mm))
+	if (do_swap_account && PageSwapCache(page)) {
+		mem = try_get_mem_cgroup_from_swapcache(page);
+		if (mem)
+			mm = NULL;
+		  else
+			mem = NULL;
+		/* SwapCache may be still linked to LRU now. */
+		mem_cgroup_lru_del_before_commit_swapcache(page);
+	}
+
+	if (unlikely(!mm && !mem))
 		mm = &init_mm;
 
 	if (page_is_file_cache(page))
 		return mem_cgroup_charge_common(page, mm, gfp_mask,
 				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
-	else
-		return mem_cgroup_charge_common(page, mm, gfp_mask,
-				MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
+
+	ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+				MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
+	if (mem)
+		css_put(&mem->css);
+	if (PageSwapCache(page))
+		mem_cgroup_lru_add_after_commit_swapcache(page);
+
+	if (do_swap_account && !ret && PageSwapCache(page)) {
+		swp_entry_t ent = {.val = page_private(page)};
+		/* avoid double counting */
+		mem = swap_cgroup_record(ent, NULL);
+		if (mem) {
+			res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+			mem_cgroup_put(mem);
+		}
+	}
+	return ret;
 }
 
 /*
+ * While swap-in, try_charge -> commit or cancel, the page is locked.
+ * And when try_charge() successfully returns, one refcnt to memcg without
+ * struct page_cgroup is aquired. This refcnt will be cumsumed by
+ * "commit()" or removed by "cancel()"
+ */
+int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
+				 struct page *page,
+				 gfp_t mask, struct mem_cgroup **ptr)
+{
+	struct mem_cgroup *mem;
+	int ret;
+
+	if (mem_cgroup_disabled())
+		return 0;
+
+	if (!do_swap_account)
+		goto charge_cur_mm;
+	/*
+	 * A racing thread's fault, or swapoff, may have already updated
+	 * the pte, and even removed page from swap cache: return success
+	 * to go on to do_swap_page()'s pte_same() test, which should fail.
+	 */
+	if (!PageSwapCache(page))
+		return 0;
+	mem = try_get_mem_cgroup_from_swapcache(page);
+	if (!mem)
+		goto charge_cur_mm;
+	*ptr = mem;
+	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+	/* drop extra refcnt from tryget */
+	css_put(&mem->css);
+	return ret;
+charge_cur_mm:
+	if (unlikely(!mm))
+		mm = &init_mm;
+	return __mem_cgroup_try_charge(mm, mask, ptr, true);
+}
+
+void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+{
+	struct page_cgroup *pc;
+
+	if (mem_cgroup_disabled())
+		return;
+	if (!ptr)
+		return;
+	pc = lookup_page_cgroup(page);
+	mem_cgroup_lru_del_before_commit_swapcache(page);
+	__mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+	mem_cgroup_lru_add_after_commit_swapcache(page);
+	/*
+	 * Now swap is on-memory. This means this page may be
+	 * counted both as mem and swap....double count.
+	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
+	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
+	 * may call delete_from_swap_cache() before reach here.
+	 */
+	if (do_swap_account && PageSwapCache(page)) {
+		swp_entry_t ent = {.val = page_private(page)};
+		struct mem_cgroup *memcg;
+		memcg = swap_cgroup_record(ent, NULL);
+		if (memcg) {
+			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+			mem_cgroup_put(memcg);
+		}
+
+	}
+	/* add this page(page_cgroup) to the LRU we want. */
+
+}
+
+void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
+{
+	if (mem_cgroup_disabled())
+		return;
+	if (!mem)
+		return;
+	res_counter_uncharge(&mem->res, PAGE_SIZE);
+	if (do_swap_account)
+		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+	css_put(&mem->css);
+}
+
+
+/*
  * uncharge if !page_mapped(page)
  */
-static void
+static struct mem_cgroup *
 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
 {
 	struct page_cgroup *pc;
-	struct mem_cgroup *mem;
+	struct mem_cgroup *mem = NULL;
 	struct mem_cgroup_per_zone *mz;
-	unsigned long flags;
 
-	if (mem_cgroup_subsys.disabled)
-		return;
+	if (mem_cgroup_disabled())
+		return NULL;
+
+	if (PageSwapCache(page))
+		return NULL;
 
 	/*
 	 * Check if our page_cgroup is valid
 	 */
 	pc = lookup_page_cgroup(page);
 	if (unlikely(!pc || !PageCgroupUsed(pc)))
-		return;
+		return NULL;
 
 	lock_page_cgroup(pc);
-	if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page))
-	     || !PageCgroupUsed(pc)) {
-		/* This happens at race in zap_pte_range() and do_swap_page()*/
-		unlock_page_cgroup(pc);
-		return;
-	}
-	ClearPageCgroupUsed(pc);
+
 	mem = pc->mem_cgroup;
 
-	mz = page_cgroup_zoneinfo(pc);
-	spin_lock_irqsave(&mz->lru_lock, flags);
-	__mem_cgroup_remove_list(mz, pc);
-	spin_unlock_irqrestore(&mz->lru_lock, flags);
-	unlock_page_cgroup(pc);
+	if (!PageCgroupUsed(pc))
+		goto unlock_out;
+
+	switch (ctype) {
+	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+		if (page_mapped(page))
+			goto unlock_out;
+		break;
+	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
+		if (!PageAnon(page)) {	/* Shared memory */
+			if (page->mapping && !page_is_file_cache(page))
+				goto unlock_out;
+		} else if (page_mapped(page)) /* Anon */
+				goto unlock_out;
+		break;
+	default:
+		break;
+	}
 
 	res_counter_uncharge(&mem->res, PAGE_SIZE);
-	css_put(&mem->css);
+	if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
+		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
 
-	return;
+	mem_cgroup_charge_statistics(mem, pc, false);
+	ClearPageCgroupUsed(pc);
+	/*
+	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
+	 * freed from LRU. This is safe because uncharged page is expected not
+	 * to be reused (freed soon). Exception is SwapCache, it's handled by
+	 * special functions.
+	 */
+
+	mz = page_cgroup_zoneinfo(pc);
+	unlock_page_cgroup(pc);
+
+	/* at swapout, this memcg will be accessed to record to swap */
+	if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+		css_put(&mem->css);
+
+	return mem;
+
+unlock_out:
+	unlock_page_cgroup(pc);
+	return NULL;
 }
 
 void mem_cgroup_uncharge_page(struct page *page)
@@ -689,16 +1365,55 @@
 }
 
 /*
- * Before starting migration, account against new page.
+ * called from __delete_from_swap_cache() and drop "page" account.
+ * memcg information is recorded to swap_cgroup of "ent"
  */
-int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
+void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+{
+	struct mem_cgroup *memcg;
+
+	memcg = __mem_cgroup_uncharge_common(page,
+					MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
+	/* record memcg information */
+	if (do_swap_account && memcg) {
+		swap_cgroup_record(ent, memcg);
+		mem_cgroup_get(memcg);
+	}
+	if (memcg)
+		css_put(&memcg->css);
+}
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+/*
+ * called from swap_entry_free(). remove record in swap_cgroup and
+ * uncharge "memsw" account.
+ */
+void mem_cgroup_uncharge_swap(swp_entry_t ent)
+{
+	struct mem_cgroup *memcg;
+
+	if (!do_swap_account)
+		return;
+
+	memcg = swap_cgroup_record(ent, NULL);
+	if (memcg) {
+		res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+		mem_cgroup_put(memcg);
+	}
+}
+#endif
+
+/*
+ * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
+ * page belongs to.
+ */
+int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
 {
 	struct page_cgroup *pc;
 	struct mem_cgroup *mem = NULL;
-	enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
 	int ret = 0;
 
-	if (mem_cgroup_subsys.disabled)
+	if (mem_cgroup_disabled())
 		return 0;
 
 	pc = lookup_page_cgroup(page);
@@ -706,41 +1421,67 @@
 	if (PageCgroupUsed(pc)) {
 		mem = pc->mem_cgroup;
 		css_get(&mem->css);
-		if (PageCgroupCache(pc)) {
-			if (page_is_file_cache(page))
-				ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
-			else
-				ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-		}
 	}
 	unlock_page_cgroup(pc);
+
 	if (mem) {
-		ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
-			ctype, mem);
+		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
 		css_put(&mem->css);
 	}
+	*ptr = mem;
 	return ret;
 }
 
 /* remove redundant charge if migration failed*/
-void mem_cgroup_end_migration(struct page *newpage)
+void mem_cgroup_end_migration(struct mem_cgroup *mem,
+		struct page *oldpage, struct page *newpage)
 {
+	struct page *target, *unused;
+	struct page_cgroup *pc;
+	enum charge_type ctype;
+
+	if (!mem)
+		return;
+
+	/* at migration success, oldpage->mapping is NULL. */
+	if (oldpage->mapping) {
+		target = oldpage;
+		unused = NULL;
+	} else {
+		target = newpage;
+		unused = oldpage;
+	}
+
+	if (PageAnon(target))
+		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
+	else if (page_is_file_cache(target))
+		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
+	else
+		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
+
+	/* unused page is not on radix-tree now. */
+	if (unused)
+		__mem_cgroup_uncharge_common(unused, ctype);
+
+	pc = lookup_page_cgroup(target);
 	/*
-	 * At success, page->mapping is not NULL.
-	 * special rollback care is necessary when
-	 * 1. at migration failure. (newpage->mapping is cleared in this case)
-	 * 2. the newpage was moved but not remapped again because the task
-	 *    exits and the newpage is obsolete. In this case, the new page
-	 *    may be a swapcache. So, we just call mem_cgroup_uncharge_page()
-	 *    always for avoiding mess. The  page_cgroup will be removed if
-	 *    unnecessary. File cache pages is still on radix-tree. Don't
-	 *    care it.
+	 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
+	 * So, double-counting is effectively avoided.
 	 */
-	if (!newpage->mapping)
-		__mem_cgroup_uncharge_common(newpage,
-				MEM_CGROUP_CHARGE_TYPE_FORCE);
-	else if (PageAnon(newpage))
-		mem_cgroup_uncharge_page(newpage);
+	__mem_cgroup_commit_charge(mem, pc, ctype);
+
+	/*
+	 * Both of oldpage and newpage are still under lock_page().
+	 * Then, we don't have to care about race in radix-tree.
+	 * But we have to be careful that this page is unmapped or not.
+	 *
+	 * There is a case for !page_mapped(). At the start of
+	 * migration, oldpage was mapped. But now, it's zapped.
+	 * But we know *target* page is not freed/reused under us.
+	 * mem_cgroup_uncharge_page() does all necessary checks.
+	 */
+	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
+		mem_cgroup_uncharge_page(target);
 }
 
 /*
@@ -748,29 +1489,26 @@
  * This is typically used for page reclaiming for shmem for reducing side
  * effect of page allocation from shmem, which is used by some mem_cgroup.
  */
-int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
+int mem_cgroup_shrink_usage(struct page *page,
+			    struct mm_struct *mm,
+			    gfp_t gfp_mask)
 {
-	struct mem_cgroup *mem;
+	struct mem_cgroup *mem = NULL;
 	int progress = 0;
 	int retry = MEM_CGROUP_RECLAIM_RETRIES;
 
-	if (mem_cgroup_subsys.disabled)
+	if (mem_cgroup_disabled())
 		return 0;
-	if (!mm)
+	if (page)
+		mem = try_get_mem_cgroup_from_swapcache(page);
+	if (!mem && mm)
+		mem = try_get_mem_cgroup_from_mm(mm);
+	if (unlikely(!mem))
 		return 0;
 
-	rcu_read_lock();
-	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
-	if (unlikely(!mem)) {
-		rcu_read_unlock();
-		return 0;
-	}
-	css_get(&mem->css);
-	rcu_read_unlock();
-
 	do {
-		progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
-		progress += res_counter_check_under_limit(&mem->res);
+		progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
+		progress += mem_cgroup_check_under_limit(mem);
 	} while (!progress && --retry);
 
 	css_put(&mem->css);
@@ -779,117 +1517,295 @@
 	return 0;
 }
 
+static DEFINE_MUTEX(set_limit_mutex);
+
 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
-				   unsigned long long val)
+				unsigned long long val)
 {
 
 	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
 	int progress;
+	u64 memswlimit;
 	int ret = 0;
 
-	while (res_counter_set_limit(&memcg->res, val)) {
+	while (retry_count) {
 		if (signal_pending(current)) {
 			ret = -EINTR;
 			break;
 		}
-		if (!retry_count) {
-			ret = -EBUSY;
+		/*
+		 * Rather than hide all in some function, I do this in
+		 * open coded manner. You see what this really does.
+		 * We have to guarantee mem->res.limit < mem->memsw.limit.
+		 */
+		mutex_lock(&set_limit_mutex);
+		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+		if (memswlimit < val) {
+			ret = -EINVAL;
+			mutex_unlock(&set_limit_mutex);
 			break;
 		}
-		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
-		if (!progress)
+		ret = res_counter_set_limit(&memcg->res, val);
+		mutex_unlock(&set_limit_mutex);
+
+		if (!ret)
+			break;
+
+		progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
+							   false);
+  		if (!progress)			retry_count--;
+	}
+
+	return ret;
+}
+
+int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
+				unsigned long long val)
+{
+	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+	u64 memlimit, oldusage, curusage;
+	int ret;
+
+	if (!do_swap_account)
+		return -EINVAL;
+
+	while (retry_count) {
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+		/*
+		 * Rather than hide all in some function, I do this in
+		 * open coded manner. You see what this really does.
+		 * We have to guarantee mem->res.limit < mem->memsw.limit.
+		 */
+		mutex_lock(&set_limit_mutex);
+		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+		if (memlimit > val) {
+			ret = -EINVAL;
+			mutex_unlock(&set_limit_mutex);
+			break;
+		}
+		ret = res_counter_set_limit(&memcg->memsw, val);
+		mutex_unlock(&set_limit_mutex);
+
+		if (!ret)
+			break;
+
+		oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+		mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
+		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+		if (curusage >= oldusage)
 			retry_count--;
 	}
 	return ret;
 }
 
-
 /*
  * This routine traverse page_cgroup in given list and drop them all.
  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
  */
-#define FORCE_UNCHARGE_BATCH	(128)
-static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
-			    struct mem_cgroup_per_zone *mz,
-			    enum lru_list lru)
+static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
+				int node, int zid, enum lru_list lru)
 {
-	struct page_cgroup *pc;
-	struct page *page;
-	int count = FORCE_UNCHARGE_BATCH;
-	unsigned long flags;
+	struct zone *zone;
+	struct mem_cgroup_per_zone *mz;
+	struct page_cgroup *pc, *busy;
+	unsigned long flags, loop;
 	struct list_head *list;
+	int ret = 0;
 
+	zone = &NODE_DATA(node)->node_zones[zid];
+	mz = mem_cgroup_zoneinfo(mem, node, zid);
 	list = &mz->lists[lru];
 
-	spin_lock_irqsave(&mz->lru_lock, flags);
-	while (!list_empty(list)) {
-		pc = list_entry(list->prev, struct page_cgroup, lru);
-		page = pc->page;
-		if (!PageCgroupUsed(pc))
-			break;
-		get_page(page);
-		spin_unlock_irqrestore(&mz->lru_lock, flags);
-		/*
-		 * Check if this page is on LRU. !LRU page can be found
-		 * if it's under page migration.
-		 */
-		if (PageLRU(page)) {
-			__mem_cgroup_uncharge_common(page,
-					MEM_CGROUP_CHARGE_TYPE_FORCE);
-			put_page(page);
-			if (--count <= 0) {
-				count = FORCE_UNCHARGE_BATCH;
-				cond_resched();
-			}
-		} else {
-			spin_lock_irqsave(&mz->lru_lock, flags);
+	loop = MEM_CGROUP_ZSTAT(mz, lru);
+	/* give some margin against EBUSY etc...*/
+	loop += 256;
+	busy = NULL;
+	while (loop--) {
+		ret = 0;
+		spin_lock_irqsave(&zone->lru_lock, flags);
+		if (list_empty(list)) {
+			spin_unlock_irqrestore(&zone->lru_lock, flags);
 			break;
 		}
-		spin_lock_irqsave(&mz->lru_lock, flags);
+		pc = list_entry(list->prev, struct page_cgroup, lru);
+		if (busy == pc) {
+			list_move(&pc->lru, list);
+			busy = 0;
+			spin_unlock_irqrestore(&zone->lru_lock, flags);
+			continue;
+		}
+		spin_unlock_irqrestore(&zone->lru_lock, flags);
+
+		ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
+		if (ret == -ENOMEM)
+			break;
+
+		if (ret == -EBUSY || ret == -EINVAL) {
+			/* found lock contention or "pc" is obsolete. */
+			busy = pc;
+			cond_resched();
+		} else
+			busy = NULL;
 	}
-	spin_unlock_irqrestore(&mz->lru_lock, flags);
+
+	if (!ret && !list_empty(list))
+		return -EBUSY;
+	return ret;
 }
 
 /*
  * make mem_cgroup's charge to be 0 if there is no task.
  * This enables deleting this mem_cgroup.
  */
-static int mem_cgroup_force_empty(struct mem_cgroup *mem)
+static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
 {
-	int ret = -EBUSY;
-	int node, zid;
+	int ret;
+	int node, zid, shrink;
+	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+	struct cgroup *cgrp = mem->css.cgroup;
 
 	css_get(&mem->css);
-	/*
-	 * page reclaim code (kswapd etc..) will move pages between
-	 * active_list <-> inactive_list while we don't take a lock.
-	 * So, we have to do loop here until all lists are empty.
-	 */
+
+	shrink = 0;
+	/* should free all ? */
+	if (free_all)
+		goto try_to_free;
+move_account:
 	while (mem->res.usage > 0) {
-		if (atomic_read(&mem->css.cgroup->count) > 0)
+		ret = -EBUSY;
+		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
+			goto out;
+		ret = -EINTR;
+		if (signal_pending(current))
 			goto out;
 		/* This is for making all *used* pages to be on LRU. */
 		lru_add_drain_all();
-		for_each_node_state(node, N_POSSIBLE)
-			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
-				struct mem_cgroup_per_zone *mz;
+		ret = 0;
+		for_each_node_state(node, N_POSSIBLE) {
+			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
 				enum lru_list l;
-				mz = mem_cgroup_zoneinfo(mem, node, zid);
-				for_each_lru(l)
-					mem_cgroup_force_empty_list(mem, mz, l);
+				for_each_lru(l) {
+					ret = mem_cgroup_force_empty_list(mem,
+							node, zid, l);
+					if (ret)
+						break;
+				}
 			}
+			if (ret)
+				break;
+		}
+		/* it seems parent cgroup doesn't have enough mem */
+		if (ret == -ENOMEM)
+			goto try_to_free;
 		cond_resched();
 	}
 	ret = 0;
 out:
 	css_put(&mem->css);
 	return ret;
+
+try_to_free:
+	/* returns EBUSY if there is a task or if we come here twice. */
+	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
+		ret = -EBUSY;
+		goto out;
+	}
+	/* we call try-to-free pages for make this cgroup empty */
+	lru_add_drain_all();
+	/* try to free all pages in this cgroup */
+	shrink = 1;
+	while (nr_retries && mem->res.usage > 0) {
+		int progress;
+
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			goto out;
+		}
+		progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
+						false, get_swappiness(mem));
+		if (!progress) {
+			nr_retries--;
+			/* maybe some writeback is necessary */
+			congestion_wait(WRITE, HZ/10);
+		}
+
+	}
+	lru_add_drain();
+	/* try move_account...there may be some *locked* pages. */
+	if (mem->res.usage)
+		goto move_account;
+	ret = 0;
+	goto out;
+}
+
+int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
+{
+	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
+}
+
+
+static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
+{
+	return mem_cgroup_from_cont(cont)->use_hierarchy;
+}
+
+static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
+					u64 val)
+{
+	int retval = 0;
+	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+	struct cgroup *parent = cont->parent;
+	struct mem_cgroup *parent_mem = NULL;
+
+	if (parent)
+		parent_mem = mem_cgroup_from_cont(parent);
+
+	cgroup_lock();
+	/*
+	 * If parent's use_hiearchy is set, we can't make any modifications
+	 * in the child subtrees. If it is unset, then the change can
+	 * occur, provided the current cgroup has no children.
+	 *
+	 * For the root cgroup, parent_mem is NULL, we allow value to be
+	 * set if there are no children.
+	 */
+	if ((!parent_mem || !parent_mem->use_hierarchy) &&
+				(val == 1 || val == 0)) {
+		if (list_empty(&cont->children))
+			mem->use_hierarchy = val;
+		else
+			retval = -EBUSY;
+	} else
+		retval = -EINVAL;
+	cgroup_unlock();
+
+	return retval;
 }
 
 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
 {
-	return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
-				    cft->private);
+	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+	u64 val = 0;
+	int type, name;
+
+	type = MEMFILE_TYPE(cft->private);
+	name = MEMFILE_ATTR(cft->private);
+	switch (type) {
+	case _MEM:
+		val = res_counter_read_u64(&mem->res, name);
+		break;
+	case _MEMSWAP:
+		if (do_swap_account)
+			val = res_counter_read_u64(&mem->memsw, name);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return val;
 }
 /*
  * The user of this function is...
@@ -899,15 +1815,22 @@
 			    const char *buffer)
 {
 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+	int type, name;
 	unsigned long long val;
 	int ret;
 
-	switch (cft->private) {
+	type = MEMFILE_TYPE(cft->private);
+	name = MEMFILE_ATTR(cft->private);
+	switch (name) {
 	case RES_LIMIT:
 		/* This function does all necessary parse...reuse it */
 		ret = res_counter_memparse_write_strategy(buffer, &val);
-		if (!ret)
+		if (ret)
+			break;
+		if (type == _MEM)
 			ret = mem_cgroup_resize_limit(memcg, val);
+		else
+			ret = mem_cgroup_resize_memsw_limit(memcg, val);
 		break;
 	default:
 		ret = -EINVAL; /* should be BUG() ? */
@@ -916,27 +1839,59 @@
 	return ret;
 }
 
+static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
+		unsigned long long *mem_limit, unsigned long long *memsw_limit)
+{
+	struct cgroup *cgroup;
+	unsigned long long min_limit, min_memsw_limit, tmp;
+
+	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+	cgroup = memcg->css.cgroup;
+	if (!memcg->use_hierarchy)
+		goto out;
+
+	while (cgroup->parent) {
+		cgroup = cgroup->parent;
+		memcg = mem_cgroup_from_cont(cgroup);
+		if (!memcg->use_hierarchy)
+			break;
+		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
+		min_limit = min(min_limit, tmp);
+		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+		min_memsw_limit = min(min_memsw_limit, tmp);
+	}
+out:
+	*mem_limit = min_limit;
+	*memsw_limit = min_memsw_limit;
+	return;
+}
+
 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
 {
 	struct mem_cgroup *mem;
+	int type, name;
 
 	mem = mem_cgroup_from_cont(cont);
-	switch (event) {
+	type = MEMFILE_TYPE(event);
+	name = MEMFILE_ATTR(event);
+	switch (name) {
 	case RES_MAX_USAGE:
-		res_counter_reset_max(&mem->res);
+		if (type == _MEM)
+			res_counter_reset_max(&mem->res);
+		else
+			res_counter_reset_max(&mem->memsw);
 		break;
 	case RES_FAILCNT:
-		res_counter_reset_failcnt(&mem->res);
+		if (type == _MEM)
+			res_counter_reset_failcnt(&mem->res);
+		else
+			res_counter_reset_failcnt(&mem->memsw);
 		break;
 	}
 	return 0;
 }
 
-static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
-{
-	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
-}
-
 static const struct mem_cgroup_stat_desc {
 	const char *msg;
 	u64 unit;
@@ -985,43 +1940,171 @@
 		cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
 
 	}
+	{
+		unsigned long long limit, memsw_limit;
+		memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
+		cb->fill(cb, "hierarchical_memory_limit", limit);
+		if (do_swap_account)
+			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
+	}
+
+#ifdef CONFIG_DEBUG_VM
+	cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
+
+	{
+		int nid, zid;
+		struct mem_cgroup_per_zone *mz;
+		unsigned long recent_rotated[2] = {0, 0};
+		unsigned long recent_scanned[2] = {0, 0};
+
+		for_each_online_node(nid)
+			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+				mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
+
+				recent_rotated[0] +=
+					mz->reclaim_stat.recent_rotated[0];
+				recent_rotated[1] +=
+					mz->reclaim_stat.recent_rotated[1];
+				recent_scanned[0] +=
+					mz->reclaim_stat.recent_scanned[0];
+				recent_scanned[1] +=
+					mz->reclaim_stat.recent_scanned[1];
+			}
+		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
+		cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
+		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
+		cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
+	}
+#endif
+
 	return 0;
 }
 
+static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
+{
+	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+
+	return get_swappiness(memcg);
+}
+
+static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
+				       u64 val)
+{
+	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+	struct mem_cgroup *parent;
+
+	if (val > 100)
+		return -EINVAL;
+
+	if (cgrp->parent == NULL)
+		return -EINVAL;
+
+	parent = mem_cgroup_from_cont(cgrp->parent);
+
+	cgroup_lock();
+
+	/* If under hierarchy, only empty-root can set this value */
+	if ((parent->use_hierarchy) ||
+	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
+		cgroup_unlock();
+		return -EINVAL;
+	}
+
+	spin_lock(&memcg->reclaim_param_lock);
+	memcg->swappiness = val;
+	spin_unlock(&memcg->reclaim_param_lock);
+
+	cgroup_unlock();
+
+	return 0;
+}
+
+
 static struct cftype mem_cgroup_files[] = {
 	{
 		.name = "usage_in_bytes",
-		.private = RES_USAGE,
+		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
 		.read_u64 = mem_cgroup_read,
 	},
 	{
 		.name = "max_usage_in_bytes",
-		.private = RES_MAX_USAGE,
+		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
 		.trigger = mem_cgroup_reset,
 		.read_u64 = mem_cgroup_read,
 	},
 	{
 		.name = "limit_in_bytes",
-		.private = RES_LIMIT,
+		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
 		.write_string = mem_cgroup_write,
 		.read_u64 = mem_cgroup_read,
 	},
 	{
 		.name = "failcnt",
-		.private = RES_FAILCNT,
+		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
 		.trigger = mem_cgroup_reset,
 		.read_u64 = mem_cgroup_read,
 	},
 	{
-		.name = "force_empty",
-		.trigger = mem_force_empty_write,
-	},
-	{
 		.name = "stat",
 		.read_map = mem_control_stat_show,
 	},
+	{
+		.name = "force_empty",
+		.trigger = mem_cgroup_force_empty_write,
+	},
+	{
+		.name = "use_hierarchy",
+		.write_u64 = mem_cgroup_hierarchy_write,
+		.read_u64 = mem_cgroup_hierarchy_read,
+	},
+	{
+		.name = "swappiness",
+		.read_u64 = mem_cgroup_swappiness_read,
+		.write_u64 = mem_cgroup_swappiness_write,
+	},
 };
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+static struct cftype memsw_cgroup_files[] = {
+	{
+		.name = "memsw.usage_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
+		.read_u64 = mem_cgroup_read,
+	},
+	{
+		.name = "memsw.max_usage_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
+		.trigger = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read,
+	},
+	{
+		.name = "memsw.limit_in_bytes",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
+		.write_string = mem_cgroup_write,
+		.read_u64 = mem_cgroup_read,
+	},
+	{
+		.name = "memsw.failcnt",
+		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
+		.trigger = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read,
+	},
+};
+
+static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+	if (!do_swap_account)
+		return 0;
+	return cgroup_add_files(cont, ss, memsw_cgroup_files,
+				ARRAY_SIZE(memsw_cgroup_files));
+};
+#else
+static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+	return 0;
+}
+#endif
+
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
 {
 	struct mem_cgroup_per_node *pn;
@@ -1047,7 +2130,6 @@
 
 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
 		mz = &pn->zoneinfo[zone];
-		spin_lock_init(&mz->lru_lock);
 		for_each_lru(l)
 			INIT_LIST_HEAD(&mz->lists[l]);
 	}
@@ -1059,55 +2141,113 @@
 	kfree(mem->info.nodeinfo[node]);
 }
 
+static int mem_cgroup_size(void)
+{
+	int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
+	return sizeof(struct mem_cgroup) + cpustat_size;
+}
+
 static struct mem_cgroup *mem_cgroup_alloc(void)
 {
 	struct mem_cgroup *mem;
+	int size = mem_cgroup_size();
 
-	if (sizeof(*mem) < PAGE_SIZE)
-		mem = kmalloc(sizeof(*mem), GFP_KERNEL);
+	if (size < PAGE_SIZE)
+		mem = kmalloc(size, GFP_KERNEL);
 	else
-		mem = vmalloc(sizeof(*mem));
+		mem = vmalloc(size);
 
 	if (mem)
-		memset(mem, 0, sizeof(*mem));
+		memset(mem, 0, size);
 	return mem;
 }
 
-static void mem_cgroup_free(struct mem_cgroup *mem)
+/*
+ * At destroying mem_cgroup, references from swap_cgroup can remain.
+ * (scanning all at force_empty is too costly...)
+ *
+ * Instead of clearing all references at force_empty, we remember
+ * the number of reference from swap_cgroup and free mem_cgroup when
+ * it goes down to 0.
+ *
+ * Removal of cgroup itself succeeds regardless of refs from swap.
+ */
+
+static void __mem_cgroup_free(struct mem_cgroup *mem)
 {
-	if (sizeof(*mem) < PAGE_SIZE)
+	int node;
+
+	for_each_node_state(node, N_POSSIBLE)
+		free_mem_cgroup_per_zone_info(mem, node);
+
+	if (mem_cgroup_size() < PAGE_SIZE)
 		kfree(mem);
 	else
 		vfree(mem);
 }
 
+static void mem_cgroup_get(struct mem_cgroup *mem)
+{
+	atomic_inc(&mem->refcnt);
+}
 
-static struct cgroup_subsys_state *
+static void mem_cgroup_put(struct mem_cgroup *mem)
+{
+	if (atomic_dec_and_test(&mem->refcnt))
+		__mem_cgroup_free(mem);
+}
+
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+static void __init enable_swap_cgroup(void)
+{
+	if (!mem_cgroup_disabled() && really_do_swap_account)
+		do_swap_account = 1;
+}
+#else
+static void __init enable_swap_cgroup(void)
+{
+}
+#endif
+
+static struct cgroup_subsys_state * __ref
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
-	struct mem_cgroup *mem;
+	struct mem_cgroup *mem, *parent;
 	int node;
 
-	if (unlikely((cont->parent) == NULL)) {
-		mem = &init_mem_cgroup;
-	} else {
-		mem = mem_cgroup_alloc();
-		if (!mem)
-			return ERR_PTR(-ENOMEM);
-	}
-
-	res_counter_init(&mem->res);
+	mem = mem_cgroup_alloc();
+	if (!mem)
+		return ERR_PTR(-ENOMEM);
 
 	for_each_node_state(node, N_POSSIBLE)
 		if (alloc_mem_cgroup_per_zone_info(mem, node))
 			goto free_out;
+	/* root ? */
+	if (cont->parent == NULL) {
+		enable_swap_cgroup();
+		parent = NULL;
+	} else {
+		parent = mem_cgroup_from_cont(cont->parent);
+		mem->use_hierarchy = parent->use_hierarchy;
+	}
 
+	if (parent && parent->use_hierarchy) {
+		res_counter_init(&mem->res, &parent->res);
+		res_counter_init(&mem->memsw, &parent->memsw);
+	} else {
+		res_counter_init(&mem->res, NULL);
+		res_counter_init(&mem->memsw, NULL);
+	}
+	mem->last_scanned_child = NULL;
+	spin_lock_init(&mem->reclaim_param_lock);
+
+	if (parent)
+		mem->swappiness = get_swappiness(parent);
+	atomic_set(&mem->refcnt, 1);
 	return &mem->css;
 free_out:
-	for_each_node_state(node, N_POSSIBLE)
-		free_mem_cgroup_per_zone_info(mem, node);
-	if (cont->parent != NULL)
-		mem_cgroup_free(mem);
+	__mem_cgroup_free(mem);
 	return ERR_PTR(-ENOMEM);
 }
 
@@ -1115,26 +2255,33 @@
 					struct cgroup *cont)
 {
 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
-	mem_cgroup_force_empty(mem);
+	mem_cgroup_force_empty(mem, false);
 }
 
 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
 				struct cgroup *cont)
 {
-	int node;
 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+	struct mem_cgroup *last_scanned_child = mem->last_scanned_child;
 
-	for_each_node_state(node, N_POSSIBLE)
-		free_mem_cgroup_per_zone_info(mem, node);
-
-	mem_cgroup_free(mem_cgroup_from_cont(cont));
+	if (last_scanned_child) {
+		VM_BUG_ON(!mem_cgroup_is_obsolete(last_scanned_child));
+		mem_cgroup_put(last_scanned_child);
+	}
+	mem_cgroup_put(mem);
 }
 
 static int mem_cgroup_populate(struct cgroup_subsys *ss,
 				struct cgroup *cont)
 {
-	return cgroup_add_files(cont, ss, mem_cgroup_files,
-					ARRAY_SIZE(mem_cgroup_files));
+	int ret;
+
+	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
+				ARRAY_SIZE(mem_cgroup_files));
+
+	if (!ret)
+		ret = register_memsw_files(cont, ss);
+	return ret;
 }
 
 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
@@ -1142,25 +2289,12 @@
 				struct cgroup *old_cont,
 				struct task_struct *p)
 {
-	struct mm_struct *mm;
-	struct mem_cgroup *mem, *old_mem;
-
-	mm = get_task_mm(p);
-	if (mm == NULL)
-		return;
-
-	mem = mem_cgroup_from_cont(cont);
-	old_mem = mem_cgroup_from_cont(old_cont);
-
+	mutex_lock(&memcg_tasklist);
 	/*
-	 * Only thread group leaders are allowed to migrate, the mm_struct is
-	 * in effect owned by the leader
+	 * FIXME: It's better to move charges of this process from old
+	 * memcg to new memcg. But it's just on TODO-List now.
 	 */
-	if (!thread_group_leader(p))
-		goto out;
-
-out:
-	mmput(mm);
+	mutex_unlock(&memcg_tasklist);
 }
 
 struct cgroup_subsys mem_cgroup_subsys = {
@@ -1173,3 +2307,13 @@
 	.attach = mem_cgroup_move_task,
 	.early_init = 0,
 };
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+
+static int __init disable_swap_account(char *s)
+{
+	really_do_swap_account = 0;
+	return 1;
+}
+__setup("noswapaccount", disable_swap_account);
+#endif
diff --git a/mm/memory.c b/mm/memory.c
index 3f8fa06..22bfa7a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1511,6 +1511,7 @@
 			unsigned long pfn)
 {
 	int ret;
+	pgprot_t pgprot = vma->vm_page_prot;
 	/*
 	 * Technically, architectures with pte_special can avoid all these
 	 * restrictions (same for remap_pfn_range).  However we would like
@@ -1525,10 +1526,10 @@
 
 	if (addr < vma->vm_start || addr >= vma->vm_end)
 		return -EFAULT;
-	if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
+	if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
 		return -EINVAL;
 
-	ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+	ret = insert_pfn(vma, addr, pfn, pgprot);
 
 	if (ret)
 		untrack_pfn_vma(vma, pfn, PAGE_SIZE);
@@ -1671,9 +1672,15 @@
 
 	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 
-	err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
-	if (err)
+	err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
+	if (err) {
+		/*
+		 * To indicate that track_pfn related cleanup is not
+		 * needed from higher level routine calling unmap_vmas
+		 */
+		vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
 		return -EINVAL;
+	}
 
 	BUG_ON(addr >= end);
 	pfn -= addr >> PAGE_SHIFT;
@@ -2000,7 +2007,7 @@
 	cow_user_page(new_page, old_page, address, vma);
 	__SetPageUptodate(new_page);
 
-	if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
+	if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
 		goto oom_free_new;
 
 	/*
@@ -2392,6 +2399,7 @@
 	struct page *page;
 	swp_entry_t entry;
 	pte_t pte;
+	struct mem_cgroup *ptr = NULL;
 	int ret = 0;
 
 	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
@@ -2430,7 +2438,7 @@
 	lock_page(page);
 	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-	if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+	if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
 		ret = VM_FAULT_OOM;
 		unlock_page(page);
 		goto out;
@@ -2448,7 +2456,19 @@
 		goto out_nomap;
 	}
 
-	/* The page isn't present yet, go ahead with the fault. */
+	/*
+	 * The page isn't present yet, go ahead with the fault.
+	 *
+	 * Be careful about the sequence of operations here.
+	 * To get its accounting right, reuse_swap_page() must be called
+	 * while the page is counted on swap but not yet in mapcount i.e.
+	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
+	 * must be called after the swap_free(), or it will never succeed.
+	 * Because delete_from_swap_page() may be called by reuse_swap_page(),
+	 * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
+	 * in page->private. In this case, a record in swap_cgroup  is silently
+	 * discarded at swap_free().
+	 */
 
 	inc_mm_counter(mm, anon_rss);
 	pte = mk_pte(page, vma->vm_page_prot);
@@ -2456,10 +2476,11 @@
 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
 		write_access = 0;
 	}
-
 	flush_icache_page(vma, page);
 	set_pte_at(mm, address, page_table, pte);
 	page_add_anon_rmap(page, vma, address);
+	/* It's better to call commit-charge after rmap is established */
+	mem_cgroup_commit_charge_swapin(page, ptr);
 
 	swap_free(entry);
 	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
@@ -2480,7 +2501,7 @@
 out:
 	return ret;
 out_nomap:
-	mem_cgroup_uncharge_page(page);
+	mem_cgroup_cancel_charge_swapin(ptr);
 	pte_unmap_unlock(page_table, ptl);
 	unlock_page(page);
 	page_cache_release(page);
@@ -2510,7 +2531,7 @@
 		goto oom;
 	__SetPageUptodate(page);
 
-	if (mem_cgroup_charge(page, mm, GFP_KERNEL))
+	if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
 		goto oom_free_page;
 
 	entry = mk_pte(page, vma->vm_page_prot);
@@ -2601,7 +2622,7 @@
 				ret = VM_FAULT_OOM;
 				goto out;
 			}
-			if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+			if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
 				ret = VM_FAULT_OOM;
 				page_cache_release(page);
 				goto out;
@@ -3151,6 +3172,15 @@
 #ifdef CONFIG_PROVE_LOCKING
 void might_fault(void)
 {
+	/*
+	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
+	 * holding the mmap_sem, this is safe because kernel memory doesn't
+	 * get paged out, therefore we'll never actually fault, and the
+	 * below annotations will generate false positives.
+	 */
+	if (segment_eq(get_fs(), KERNEL_DS))
+		return;
+
 	might_sleep();
 	/*
 	 * it would be nicer only to annotate paths which are not under
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e412ffa..3eb4a6f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1068,10 +1068,9 @@
 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
 }
 
-asmlinkage long sys_mbind(unsigned long start, unsigned long len,
-			unsigned long mode,
-			unsigned long __user *nmask, unsigned long maxnode,
-			unsigned flags)
+SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
+		unsigned long, mode, unsigned long __user *, nmask,
+		unsigned long, maxnode, unsigned, flags)
 {
 	nodemask_t nodes;
 	int err;
@@ -1091,8 +1090,8 @@
 }
 
 /* Set the process memory policy */
-asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
-		unsigned long maxnode)
+SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
+		unsigned long, maxnode)
 {
 	int err;
 	nodemask_t nodes;
@@ -1110,9 +1109,9 @@
 	return do_set_mempolicy(mode, flags, &nodes);
 }
 
-asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
-		const unsigned long __user *old_nodes,
-		const unsigned long __user *new_nodes)
+SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+		const unsigned long __user *, old_nodes,
+		const unsigned long __user *, new_nodes)
 {
 	const struct cred *cred = current_cred(), *tcred;
 	struct mm_struct *mm;
@@ -1185,10 +1184,9 @@
 
 
 /* Retrieve NUMA policy */
-asmlinkage long sys_get_mempolicy(int __user *policy,
-				unsigned long __user *nmask,
-				unsigned long maxnode,
-				unsigned long addr, unsigned long flags)
+SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
+		unsigned long __user *, nmask, unsigned long, maxnode,
+		unsigned long, addr, unsigned long, flags)
 {
 	int err;
 	int uninitialized_var(pval);
diff --git a/mm/migrate.c b/mm/migrate.c
index 5537398..2bb4e1d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -121,20 +121,6 @@
 	if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
 		goto out;
 
-	/*
-	 * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge.
-	 * Failure is not an option here: we're now expected to remove every
-	 * migration pte, and will cause crashes otherwise.  Normally this
-	 * is not an issue: mem_cgroup_prepare_migration bumped up the old
-	 * page_cgroup count for safety, that's now attached to the new page,
-	 * so this charge should just be another incrementation of the count,
-	 * to keep in balance with rmap.c's mem_cgroup_uncharging.  But if
-	 * there's been a force_empty, those reference counts may no longer
-	 * be reliable, and this charge can actually fail: oh well, we don't
-	 * make the situation any worse by proceeding as if it had succeeded.
-	 */
-	mem_cgroup_charge(new, mm, GFP_ATOMIC);
-
 	get_page(new);
 	pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
 	if (is_write_migration_entry(entry))
@@ -378,9 +364,6 @@
 	anon = PageAnon(page);
 	page->mapping = NULL;
 
-	if (!anon) /* This page was removed from radix-tree. */
-		mem_cgroup_uncharge_cache_page(page);
-
 	/*
 	 * If any waiters have accumulated on the new page then
 	 * wake them up.
@@ -614,6 +597,7 @@
 	struct page *newpage = get_new_page(page, private, &result);
 	int rcu_locked = 0;
 	int charge = 0;
+	struct mem_cgroup *mem;
 
 	if (!newpage)
 		return -ENOMEM;
@@ -623,24 +607,26 @@
 		goto move_newpage;
 	}
 
-	charge = mem_cgroup_prepare_migration(page, newpage);
-	if (charge == -ENOMEM) {
-		rc = -ENOMEM;
-		goto move_newpage;
-	}
 	/* prepare cgroup just returns 0 or -ENOMEM */
-	BUG_ON(charge);
-
 	rc = -EAGAIN;
+
 	if (!trylock_page(page)) {
 		if (!force)
 			goto move_newpage;
 		lock_page(page);
 	}
 
+	/* charge against new page */
+	charge = mem_cgroup_prepare_migration(page, &mem);
+	if (charge == -ENOMEM) {
+		rc = -ENOMEM;
+		goto unlock;
+	}
+	BUG_ON(charge);
+
 	if (PageWriteback(page)) {
 		if (!force)
-			goto unlock;
+			goto uncharge;
 		wait_on_page_writeback(page);
 	}
 	/*
@@ -693,7 +679,9 @@
 rcu_unlock:
 	if (rcu_locked)
 		rcu_read_unlock();
-
+uncharge:
+	if (!charge)
+		mem_cgroup_end_migration(mem, page, newpage);
 unlock:
 	unlock_page(page);
 
@@ -709,8 +697,6 @@
 	}
 
 move_newpage:
-	if (!charge)
-		mem_cgroup_end_migration(newpage);
 
 	/*
 	 * Move the new page to the LRU. If migration was not successful
@@ -1069,10 +1055,10 @@
  * Move a list of pages in the address space of the currently executing
  * process.
  */
-asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
-			const void __user * __user *pages,
-			const int __user *nodes,
-			int __user *status, int flags)
+SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+		const void __user * __user *, pages,
+		const int __user *, nodes,
+		int __user *, status, int, flags)
 {
 	const struct cred *cred = current_cred(), *tcred;
 	struct task_struct *task;
diff --git a/mm/mincore.c b/mm/mincore.c
index 5178800..8cb508f 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -177,8 +177,8 @@
  *		mapped
  *  -EAGAIN - A kernel resource was temporarily unavailable.
  */
-asmlinkage long sys_mincore(unsigned long start, size_t len,
-	unsigned char __user * vec)
+SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
+		unsigned char __user *, vec)
 {
 	long retval;
 	unsigned long pages;
diff --git a/mm/mlock.c b/mm/mlock.c
index e125156..2904a34 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -530,7 +530,7 @@
 	return error;
 }
 
-asmlinkage long sys_mlock(unsigned long start, size_t len)
+SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
 {
 	unsigned long locked;
 	unsigned long lock_limit;
@@ -558,7 +558,7 @@
 	return error;
 }
 
-asmlinkage long sys_munlock(unsigned long start, size_t len)
+SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
 {
 	int ret;
 
@@ -595,7 +595,7 @@
 	return 0;
 }
 
-asmlinkage long sys_mlockall(int flags)
+SYSCALL_DEFINE1(mlockall, int, flags)
 {
 	unsigned long lock_limit;
 	int ret = -EINVAL;
@@ -623,7 +623,7 @@
 	return ret;
 }
 
-asmlinkage long sys_munlockall(void)
+SYSCALL_DEFINE0(munlockall)
 {
 	int ret;
 
diff --git a/mm/mmap.c b/mm/mmap.c
index a910c045..8d95902 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -245,7 +245,7 @@
 	return next;
 }
 
-asmlinkage unsigned long sys_brk(unsigned long brk)
+SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
 	unsigned long rlim, retval;
 	unsigned long newbrk, oldbrk;
@@ -1948,7 +1948,7 @@
 
 EXPORT_SYMBOL(do_munmap);
 
-asmlinkage long sys_munmap(unsigned long addr, size_t len)
+SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
 {
 	int ret;
 	struct mm_struct *mm = current->mm;
@@ -2472,3 +2472,13 @@
 
 	mutex_unlock(&mm_all_locks_mutex);
 }
+
+/*
+ * initialise the VMA slab
+ */
+void __init mmap_init(void)
+{
+	vm_area_cachep = kmem_cache_create("vm_area_struct",
+			sizeof(struct vm_area_struct), 0,
+			SLAB_PANIC, NULL);
+}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index d0f6e7c..abe2694 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -217,8 +217,8 @@
 	return error;
 }
 
-asmlinkage long
-sys_mprotect(unsigned long start, size_t len, unsigned long prot)
+SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+		unsigned long, prot)
 {
 	unsigned long vm_flags, nstart, end, tmp, reqprot;
 	struct vm_area_struct *vma, *prev;
diff --git a/mm/mremap.c b/mm/mremap.c
index 646de95..a39b7b9 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -420,9 +420,9 @@
 	return ret;
 }
 
-asmlinkage unsigned long sys_mremap(unsigned long addr,
-	unsigned long old_len, unsigned long new_len,
-	unsigned long flags, unsigned long new_addr)
+SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+		unsigned long, new_len, unsigned long, flags,
+		unsigned long, new_addr)
 {
 	unsigned long ret;
 
diff --git a/mm/msync.c b/mm/msync.c
index 07dae08..4083209 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -28,7 +28,7 @@
  * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to
  * applications.
  */
-asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
+SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
 {
 	unsigned long end;
 	struct mm_struct *mm = current->mm;
diff --git a/mm/nommu.c b/mm/nommu.c
index 1c28ea3..8cee8c8 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -6,11 +6,11 @@
  *
  *  See Documentation/nommu-mmap.txt
  *
- *  Copyright (c) 2004-2005 David Howells <dhowells@redhat.com>
+ *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
- *  Copyright (c) 2007      Paul Mundt <lethal@linux-sh.org>
+ *  Copyright (c) 2007-2008 Paul Mundt <lethal@linux-sh.org>
  */
 
 #include <linux/module.h>
@@ -33,6 +33,28 @@
 #include <asm/uaccess.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
+#include "internal.h"
+
+static inline __attribute__((format(printf, 1, 2)))
+void no_printk(const char *fmt, ...)
+{
+}
+
+#if 0
+#define kenter(FMT, ...) \
+	printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+	printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+	printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
+#else
+#define kenter(FMT, ...) \
+	no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+	no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
+#endif
 
 #include "internal.h"
 
@@ -40,19 +62,22 @@
 struct page *mem_map;
 unsigned long max_mapnr;
 unsigned long num_physpages;
-unsigned long askedalloc, realalloc;
 atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
 int sysctl_overcommit_ratio = 50; /* default is 50% */
 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+int sysctl_nr_trim_pages = 1; /* page trimming behaviour */
 int heap_stack_gap = 0;
 
+atomic_t mmap_pages_allocated;
+
 EXPORT_SYMBOL(mem_map);
 EXPORT_SYMBOL(num_physpages);
 
-/* list of shareable VMAs */
-struct rb_root nommu_vma_tree = RB_ROOT;
-DECLARE_RWSEM(nommu_vma_sem);
+/* list of mapped, potentially shareable regions */
+static struct kmem_cache *vm_region_jar;
+struct rb_root nommu_region_tree = RB_ROOT;
+DECLARE_RWSEM(nommu_region_sem);
 
 struct vm_operations_struct generic_file_vm_ops = {
 };
@@ -124,6 +149,20 @@
 		return ksize(objp);
 
 	/*
+	 * If it's not a compound page, see if we have a matching VMA
+	 * region. This test is intentionally done in reverse order,
+	 * so if there's no VMA, we still fall through and hand back
+	 * PAGE_SIZE for 0-order pages.
+	 */
+	if (!PageCompound(page)) {
+		struct vm_area_struct *vma;
+
+		vma = find_vma(current->mm, (unsigned long)objp);
+		if (vma)
+			return vma->vm_end - vma->vm_start;
+	}
+
+	/*
 	 * The ksize() function is only guaranteed to work for pointers
 	 * returned by kmalloc(). So handle arbitrary pointers here.
 	 */
@@ -377,7 +416,7 @@
  *  to a regular file.  in this case, the unmapping will need
  *  to invoke file system routines that need the global lock.
  */
-asmlinkage unsigned long sys_brk(unsigned long brk)
+SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
 	struct mm_struct *mm = current->mm;
 
@@ -401,39 +440,281 @@
 	return mm->brk = brk;
 }
 
-#ifdef DEBUG
-static void show_process_blocks(void)
+/*
+ * initialise the VMA and region record slabs
+ */
+void __init mmap_init(void)
 {
-	struct vm_list_struct *vml;
+	vm_region_jar = kmem_cache_create("vm_region_jar",
+					  sizeof(struct vm_region), 0,
+					  SLAB_PANIC, NULL);
+	vm_area_cachep = kmem_cache_create("vm_area_struct",
+					   sizeof(struct vm_area_struct), 0,
+					   SLAB_PANIC, NULL);
+}
 
-	printk("Process blocks %d:", current->pid);
+/*
+ * validate the region tree
+ * - the caller must hold the region lock
+ */
+#ifdef CONFIG_DEBUG_NOMMU_REGIONS
+static noinline void validate_nommu_regions(void)
+{
+	struct vm_region *region, *last;
+	struct rb_node *p, *lastp;
 
-	for (vml = &current->mm->context.vmlist; vml; vml = vml->next) {
-		printk(" %p: %p", vml, vml->vma);
-		if (vml->vma)
-			printk(" (%d @%lx #%d)",
-			       kobjsize((void *) vml->vma->vm_start),
-			       vml->vma->vm_start,
-			       atomic_read(&vml->vma->vm_usage));
-		printk(vml->next ? " ->" : ".\n");
+	lastp = rb_first(&nommu_region_tree);
+	if (!lastp)
+		return;
+
+	last = rb_entry(lastp, struct vm_region, vm_rb);
+	if (unlikely(last->vm_end <= last->vm_start))
+		BUG();
+	if (unlikely(last->vm_top < last->vm_end))
+		BUG();
+
+	while ((p = rb_next(lastp))) {
+		region = rb_entry(p, struct vm_region, vm_rb);
+		last = rb_entry(lastp, struct vm_region, vm_rb);
+
+		if (unlikely(region->vm_end <= region->vm_start))
+			BUG();
+		if (unlikely(region->vm_top < region->vm_end))
+			BUG();
+		if (unlikely(region->vm_start < last->vm_top))
+			BUG();
+
+		lastp = p;
 	}
 }
-#endif /* DEBUG */
+#else
+#define validate_nommu_regions() do {} while(0)
+#endif
+
+/*
+ * add a region into the global tree
+ */
+static void add_nommu_region(struct vm_region *region)
+{
+	struct vm_region *pregion;
+	struct rb_node **p, *parent;
+
+	validate_nommu_regions();
+
+	BUG_ON(region->vm_start & ~PAGE_MASK);
+
+	parent = NULL;
+	p = &nommu_region_tree.rb_node;
+	while (*p) {
+		parent = *p;
+		pregion = rb_entry(parent, struct vm_region, vm_rb);
+		if (region->vm_start < pregion->vm_start)
+			p = &(*p)->rb_left;
+		else if (region->vm_start > pregion->vm_start)
+			p = &(*p)->rb_right;
+		else if (pregion == region)
+			return;
+		else
+			BUG();
+	}
+
+	rb_link_node(&region->vm_rb, parent, p);
+	rb_insert_color(&region->vm_rb, &nommu_region_tree);
+
+	validate_nommu_regions();
+}
+
+/*
+ * delete a region from the global tree
+ */
+static void delete_nommu_region(struct vm_region *region)
+{
+	BUG_ON(!nommu_region_tree.rb_node);
+
+	validate_nommu_regions();
+	rb_erase(&region->vm_rb, &nommu_region_tree);
+	validate_nommu_regions();
+}
+
+/*
+ * free a contiguous series of pages
+ */
+static void free_page_series(unsigned long from, unsigned long to)
+{
+	for (; from < to; from += PAGE_SIZE) {
+		struct page *page = virt_to_page(from);
+
+		kdebug("- free %lx", from);
+		atomic_dec(&mmap_pages_allocated);
+		if (page_count(page) != 1)
+			kdebug("free page %p [%d]", page, page_count(page));
+		put_page(page);
+	}
+}
+
+/*
+ * release a reference to a region
+ * - the caller must hold the region semaphore, which this releases
+ * - the region may not have been added to the tree yet, in which case vm_top
+ *   will equal vm_start
+ */
+static void __put_nommu_region(struct vm_region *region)
+	__releases(nommu_region_sem)
+{
+	kenter("%p{%d}", region, atomic_read(&region->vm_usage));
+
+	BUG_ON(!nommu_region_tree.rb_node);
+
+	if (atomic_dec_and_test(&region->vm_usage)) {
+		if (region->vm_top > region->vm_start)
+			delete_nommu_region(region);
+		up_write(&nommu_region_sem);
+
+		if (region->vm_file)
+			fput(region->vm_file);
+
+		/* IO memory and memory shared directly out of the pagecache
+		 * from ramfs/tmpfs mustn't be released here */
+		if (region->vm_flags & VM_MAPPED_COPY) {
+			kdebug("free series");
+			free_page_series(region->vm_start, region->vm_top);
+		}
+		kmem_cache_free(vm_region_jar, region);
+	} else {
+		up_write(&nommu_region_sem);
+	}
+}
+
+/*
+ * release a reference to a region
+ */
+static void put_nommu_region(struct vm_region *region)
+{
+	down_write(&nommu_region_sem);
+	__put_nommu_region(region);
+}
 
 /*
  * add a VMA into a process's mm_struct in the appropriate place in the list
+ * and tree and add to the address space's page tree also if not an anonymous
+ * page
  * - should be called with mm->mmap_sem held writelocked
  */
-static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml)
+static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-	struct vm_list_struct **ppv;
+	struct vm_area_struct *pvma, **pp;
+	struct address_space *mapping;
+	struct rb_node **p, *parent;
 
-	for (ppv = &current->mm->context.vmlist; *ppv; ppv = &(*ppv)->next)
-		if ((*ppv)->vma->vm_start > vml->vma->vm_start)
+	kenter(",%p", vma);
+
+	BUG_ON(!vma->vm_region);
+
+	mm->map_count++;
+	vma->vm_mm = mm;
+
+	/* add the VMA to the mapping */
+	if (vma->vm_file) {
+		mapping = vma->vm_file->f_mapping;
+
+		flush_dcache_mmap_lock(mapping);
+		vma_prio_tree_insert(vma, &mapping->i_mmap);
+		flush_dcache_mmap_unlock(mapping);
+	}
+
+	/* add the VMA to the tree */
+	parent = NULL;
+	p = &mm->mm_rb.rb_node;
+	while (*p) {
+		parent = *p;
+		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
+
+		/* sort by: start addr, end addr, VMA struct addr in that order
+		 * (the latter is necessary as we may get identical VMAs) */
+		if (vma->vm_start < pvma->vm_start)
+			p = &(*p)->rb_left;
+		else if (vma->vm_start > pvma->vm_start)
+			p = &(*p)->rb_right;
+		else if (vma->vm_end < pvma->vm_end)
+			p = &(*p)->rb_left;
+		else if (vma->vm_end > pvma->vm_end)
+			p = &(*p)->rb_right;
+		else if (vma < pvma)
+			p = &(*p)->rb_left;
+		else if (vma > pvma)
+			p = &(*p)->rb_right;
+		else
+			BUG();
+	}
+
+	rb_link_node(&vma->vm_rb, parent, p);
+	rb_insert_color(&vma->vm_rb, &mm->mm_rb);
+
+	/* add VMA to the VMA list also */
+	for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) {
+		if (pvma->vm_start > vma->vm_start)
 			break;
+		if (pvma->vm_start < vma->vm_start)
+			continue;
+		if (pvma->vm_end < vma->vm_end)
+			break;
+	}
 
-	vml->next = *ppv;
-	*ppv = vml;
+	vma->vm_next = *pp;
+	*pp = vma;
+}
+
+/*
+ * delete a VMA from its owning mm_struct and address space
+ */
+static void delete_vma_from_mm(struct vm_area_struct *vma)
+{
+	struct vm_area_struct **pp;
+	struct address_space *mapping;
+	struct mm_struct *mm = vma->vm_mm;
+
+	kenter("%p", vma);
+
+	mm->map_count--;
+	if (mm->mmap_cache == vma)
+		mm->mmap_cache = NULL;
+
+	/* remove the VMA from the mapping */
+	if (vma->vm_file) {
+		mapping = vma->vm_file->f_mapping;
+
+		flush_dcache_mmap_lock(mapping);
+		vma_prio_tree_remove(vma, &mapping->i_mmap);
+		flush_dcache_mmap_unlock(mapping);
+	}
+
+	/* remove from the MM's tree and list */
+	rb_erase(&vma->vm_rb, &mm->mm_rb);
+	for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) {
+		if (*pp == vma) {
+			*pp = vma->vm_next;
+			break;
+		}
+	}
+
+	vma->vm_mm = NULL;
+}
+
+/*
+ * destroy a VMA record
+ */
+static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+	kenter("%p", vma);
+	if (vma->vm_ops && vma->vm_ops->close)
+		vma->vm_ops->close(vma);
+	if (vma->vm_file) {
+		fput(vma->vm_file);
+		if (vma->vm_flags & VM_EXECUTABLE)
+			removed_exe_file_vma(mm);
+	}
+	put_nommu_region(vma->vm_region);
+	kmem_cache_free(vm_area_cachep, vma);
 }
 
 /*
@@ -442,19 +723,26 @@
  */
 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
-	struct vm_list_struct *loop, *vml;
+	struct vm_area_struct *vma;
+	struct rb_node *n = mm->mm_rb.rb_node;
 
-	/* search the vm_start ordered list */
-	vml = NULL;
-	for (loop = mm->context.vmlist; loop; loop = loop->next) {
-		if (loop->vma->vm_start > addr)
-			break;
-		vml = loop;
+	/* check the cache first */
+	vma = mm->mmap_cache;
+	if (vma && vma->vm_start <= addr && vma->vm_end > addr)
+		return vma;
+
+	/* trawl the tree (there may be multiple mappings in which addr
+	 * resides) */
+	for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
+		vma = rb_entry(n, struct vm_area_struct, vm_rb);
+		if (vma->vm_start > addr)
+			return NULL;
+		if (vma->vm_end > addr) {
+			mm->mmap_cache = vma;
+			return vma;
+		}
 	}
 
-	if (vml && vml->vma->vm_end > addr)
-		return vml->vma;
-
 	return NULL;
 }
 EXPORT_SYMBOL(find_vma);
@@ -468,6 +756,10 @@
 	return find_vma(mm, addr);
 }
 
+/*
+ * expand a stack to a given address
+ * - not supported under NOMMU conditions
+ */
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
 	return -ENOMEM;
@@ -477,114 +769,37 @@
  * look up the first VMA exactly that exactly matches addr
  * - should be called with mm->mmap_sem at least held readlocked
  */
-static inline struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
-						    unsigned long addr)
-{
-	struct vm_list_struct *vml;
-
-	/* search the vm_start ordered list */
-	for (vml = mm->context.vmlist; vml; vml = vml->next) {
-		if (vml->vma->vm_start == addr)
-			return vml->vma;
-		if (vml->vma->vm_start > addr)
-			break;
-	}
-
-	return NULL;
-}
-
-/*
- * find a VMA in the global tree
- */
-static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
+static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
+					     unsigned long addr,
+					     unsigned long len)
 {
 	struct vm_area_struct *vma;
-	struct rb_node *n = nommu_vma_tree.rb_node;
+	struct rb_node *n = mm->mm_rb.rb_node;
+	unsigned long end = addr + len;
 
-	while (n) {
+	/* check the cache first */
+	vma = mm->mmap_cache;
+	if (vma && vma->vm_start == addr && vma->vm_end == end)
+		return vma;
+
+	/* trawl the tree (there may be multiple mappings in which addr
+	 * resides) */
+	for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
 		vma = rb_entry(n, struct vm_area_struct, vm_rb);
-
-		if (start < vma->vm_start)
-			n = n->rb_left;
-		else if (start > vma->vm_start)
-			n = n->rb_right;
-		else
+		if (vma->vm_start < addr)
+			continue;
+		if (vma->vm_start > addr)
+			return NULL;
+		if (vma->vm_end == end) {
+			mm->mmap_cache = vma;
 			return vma;
+		}
 	}
 
 	return NULL;
 }
 
 /*
- * add a VMA in the global tree
- */
-static void add_nommu_vma(struct vm_area_struct *vma)
-{
-	struct vm_area_struct *pvma;
-	struct address_space *mapping;
-	struct rb_node **p = &nommu_vma_tree.rb_node;
-	struct rb_node *parent = NULL;
-
-	/* add the VMA to the mapping */
-	if (vma->vm_file) {
-		mapping = vma->vm_file->f_mapping;
-
-		flush_dcache_mmap_lock(mapping);
-		vma_prio_tree_insert(vma, &mapping->i_mmap);
-		flush_dcache_mmap_unlock(mapping);
-	}
-
-	/* add the VMA to the master list */
-	while (*p) {
-		parent = *p;
-		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
-
-		if (vma->vm_start < pvma->vm_start) {
-			p = &(*p)->rb_left;
-		}
-		else if (vma->vm_start > pvma->vm_start) {
-			p = &(*p)->rb_right;
-		}
-		else {
-			/* mappings are at the same address - this can only
-			 * happen for shared-mem chardevs and shared file
-			 * mappings backed by ramfs/tmpfs */
-			BUG_ON(!(pvma->vm_flags & VM_SHARED));
-
-			if (vma < pvma)
-				p = &(*p)->rb_left;
-			else if (vma > pvma)
-				p = &(*p)->rb_right;
-			else
-				BUG();
-		}
-	}
-
-	rb_link_node(&vma->vm_rb, parent, p);
-	rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
-}
-
-/*
- * delete a VMA from the global list
- */
-static void delete_nommu_vma(struct vm_area_struct *vma)
-{
-	struct address_space *mapping;
-
-	/* remove the VMA from the mapping */
-	if (vma->vm_file) {
-		mapping = vma->vm_file->f_mapping;
-
-		flush_dcache_mmap_lock(mapping);
-		vma_prio_tree_remove(vma, &mapping->i_mmap);
-		flush_dcache_mmap_unlock(mapping);
-	}
-
-	/* remove from the master list */
-	rb_erase(&vma->vm_rb, &nommu_vma_tree);
-}
-
-/*
  * determine whether a mapping should be permitted and, if so, what sort of
  * mapping we're capable of supporting
  */
@@ -596,7 +811,7 @@
 				 unsigned long pgoff,
 				 unsigned long *_capabilities)
 {
-	unsigned long capabilities;
+	unsigned long capabilities, rlen;
 	unsigned long reqprot = prot;
 	int ret;
 
@@ -616,12 +831,12 @@
 		return -EINVAL;
 
 	/* Careful about overflows.. */
-	len = PAGE_ALIGN(len);
-	if (!len || len > TASK_SIZE)
+	rlen = PAGE_ALIGN(len);
+	if (!rlen || rlen > TASK_SIZE)
 		return -ENOMEM;
 
 	/* offset overflow? */
-	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
+	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
 		return -EOVERFLOW;
 
 	if (file) {
@@ -795,13 +1010,18 @@
 }
 
 /*
- * set up a shared mapping on a file
+ * set up a shared mapping on a file (the driver or filesystem provides and
+ * pins the storage)
  */
-static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
+static int do_mmap_shared_file(struct vm_area_struct *vma)
 {
 	int ret;
 
 	ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
+	if (ret == 0) {
+		vma->vm_region->vm_top = vma->vm_region->vm_end;
+		return ret;
+	}
 	if (ret != -ENOSYS)
 		return ret;
 
@@ -815,10 +1035,14 @@
 /*
  * set up a private mapping or an anonymous shared mapping
  */
-static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
+static int do_mmap_private(struct vm_area_struct *vma,
+			   struct vm_region *region,
+			   unsigned long len)
 {
+	struct page *pages;
+	unsigned long total, point, n, rlen;
 	void *base;
-	int ret;
+	int ret, order;
 
 	/* invoke the file's mapping function so that it can keep track of
 	 * shared mappings on devices or memory
@@ -826,34 +1050,63 @@
 	 */
 	if (vma->vm_file) {
 		ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
-		if (ret != -ENOSYS) {
+		if (ret == 0) {
 			/* shouldn't return success if we're not sharing */
-			BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE));
-			return ret; /* success or a real error */
+			BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
+			vma->vm_region->vm_top = vma->vm_region->vm_end;
+			return ret;
 		}
+		if (ret != -ENOSYS)
+			return ret;
 
 		/* getting an ENOSYS error indicates that direct mmap isn't
 		 * possible (as opposed to tried but failed) so we'll try to
 		 * make a private copy of the data and map that instead */
 	}
 
+	rlen = PAGE_ALIGN(len);
+
 	/* allocate some memory to hold the mapping
 	 * - note that this may not return a page-aligned address if the object
 	 *   we're allocating is smaller than a page
 	 */
-	base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
-	if (!base)
+	order = get_order(rlen);
+	kdebug("alloc order %d for %lx", order, len);
+
+	pages = alloc_pages(GFP_KERNEL, order);
+	if (!pages)
 		goto enomem;
 
-	vma->vm_start = (unsigned long) base;
-	vma->vm_end = vma->vm_start + len;
-	vma->vm_flags |= VM_MAPPED_COPY;
+	total = 1 << order;
+	atomic_add(total, &mmap_pages_allocated);
 
-#ifdef WARN_ON_SLACK
-	if (len + WARN_ON_SLACK <= kobjsize(result))
-		printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n",
-		       len, current->pid, kobjsize(result) - len);
-#endif
+	point = rlen >> PAGE_SHIFT;
+
+	/* we allocated a power-of-2 sized page set, so we may want to trim off
+	 * the excess */
+	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
+		while (total > point) {
+			order = ilog2(total - point);
+			n = 1 << order;
+			kdebug("shave %lu/%lu @%lu", n, total - point, total);
+			atomic_sub(n, &mmap_pages_allocated);
+			total -= n;
+			set_page_refcounted(pages + total);
+			__free_pages(pages + total, order);
+		}
+	}
+
+	for (point = 1; point < total; point++)
+		set_page_refcounted(&pages[point]);
+
+	base = page_address(pages);
+	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
+	region->vm_start = (unsigned long) base;
+	region->vm_end   = region->vm_start + rlen;
+	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
+
+	vma->vm_start = region->vm_start;
+	vma->vm_end   = region->vm_start + len;
 
 	if (vma->vm_file) {
 		/* read the contents of a file into the copy */
@@ -865,26 +1118,28 @@
 
 		old_fs = get_fs();
 		set_fs(KERNEL_DS);
-		ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
+		ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos);
 		set_fs(old_fs);
 
 		if (ret < 0)
 			goto error_free;
 
 		/* clear the last little bit */
-		if (ret < len)
-			memset(base + ret, 0, len - ret);
+		if (ret < rlen)
+			memset(base + ret, 0, rlen - ret);
 
 	} else {
 		/* if it's an anonymous mapping, then just clear it */
-		memset(base, 0, len);
+		memset(base, 0, rlen);
 	}
 
 	return 0;
 
 error_free:
-	kfree(base);
-	vma->vm_start = 0;
+	free_page_series(region->vm_start, region->vm_end);
+	region->vm_start = vma->vm_start = 0;
+	region->vm_end   = vma->vm_end = 0;
+	region->vm_top   = 0;
 	return ret;
 
 enomem:
@@ -904,13 +1159,14 @@
 			    unsigned long flags,
 			    unsigned long pgoff)
 {
-	struct vm_list_struct *vml = NULL;
-	struct vm_area_struct *vma = NULL;
+	struct vm_area_struct *vma;
+	struct vm_region *region;
 	struct rb_node *rb;
-	unsigned long capabilities, vm_flags;
-	void *result;
+	unsigned long capabilities, vm_flags, result;
 	int ret;
 
+	kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
+
 	if (!(flags & MAP_FIXED))
 		addr = round_hint_to_min(addr);
 
@@ -918,73 +1174,120 @@
 	 * mapping */
 	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
 				    &capabilities);
-	if (ret < 0)
+	if (ret < 0) {
+		kleave(" = %d [val]", ret);
 		return ret;
+	}
 
 	/* we've determined that we can make the mapping, now translate what we
 	 * now know into VMA flags */
 	vm_flags = determine_vm_flags(file, prot, flags, capabilities);
 
-	/* we're going to need to record the mapping if it works */
-	vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
-	if (!vml)
-		goto error_getting_vml;
+	/* we're going to need to record the mapping */
+	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
+	if (!region)
+		goto error_getting_region;
 
-	down_write(&nommu_vma_sem);
+	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+	if (!vma)
+		goto error_getting_vma;
 
-	/* if we want to share, we need to check for VMAs created by other
+	atomic_set(&region->vm_usage, 1);
+	region->vm_flags = vm_flags;
+	region->vm_pgoff = pgoff;
+
+	INIT_LIST_HEAD(&vma->anon_vma_node);
+	vma->vm_flags = vm_flags;
+	vma->vm_pgoff = pgoff;
+
+	if (file) {
+		region->vm_file = file;
+		get_file(file);
+		vma->vm_file = file;
+		get_file(file);
+		if (vm_flags & VM_EXECUTABLE) {
+			added_exe_file_vma(current->mm);
+			vma->vm_mm = current->mm;
+		}
+	}
+
+	down_write(&nommu_region_sem);
+
+	/* if we want to share, we need to check for regions created by other
 	 * mmap() calls that overlap with our proposed mapping
-	 * - we can only share with an exact match on most regular files
+	 * - we can only share with a superset match on most regular files
 	 * - shared mappings on character devices and memory backed files are
 	 *   permitted to overlap inexactly as far as we are concerned for in
 	 *   these cases, sharing is handled in the driver or filesystem rather
 	 *   than here
 	 */
 	if (vm_flags & VM_MAYSHARE) {
-		unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-		unsigned long vmpglen;
+		struct vm_region *pregion;
+		unsigned long pglen, rpglen, pgend, rpgend, start;
 
-		/* suppress VMA sharing for shared regions */
-		if (vm_flags & VM_SHARED &&
-		    capabilities & BDI_CAP_MAP_DIRECT)
-			goto dont_share_VMAs;
+		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+		pgend = pgoff + pglen;
 
-		for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
-			vma = rb_entry(rb, struct vm_area_struct, vm_rb);
+		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
+			pregion = rb_entry(rb, struct vm_region, vm_rb);
 
-			if (!(vma->vm_flags & VM_MAYSHARE))
+			if (!(pregion->vm_flags & VM_MAYSHARE))
 				continue;
 
 			/* search for overlapping mappings on the same file */
-			if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode)
+			if (pregion->vm_file->f_path.dentry->d_inode !=
+			    file->f_path.dentry->d_inode)
 				continue;
 
-			if (vma->vm_pgoff >= pgoff + pglen)
+			if (pregion->vm_pgoff >= pgend)
 				continue;
 
-			vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1;
-			vmpglen >>= PAGE_SHIFT;
-			if (pgoff >= vma->vm_pgoff + vmpglen)
+			rpglen = pregion->vm_end - pregion->vm_start;
+			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+			rpgend = pregion->vm_pgoff + rpglen;
+			if (pgoff >= rpgend)
 				continue;
 
-			/* handle inexactly overlapping matches between mappings */
-			if (vma->vm_pgoff != pgoff || vmpglen != pglen) {
+			/* handle inexactly overlapping matches between
+			 * mappings */
+			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
+			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
+				/* new mapping is not a subset of the region */
 				if (!(capabilities & BDI_CAP_MAP_DIRECT))
 					goto sharing_violation;
 				continue;
 			}
 
-			/* we've found a VMA we can share */
-			atomic_inc(&vma->vm_usage);
+			/* we've found a region we can share */
+			atomic_inc(&pregion->vm_usage);
+			vma->vm_region = pregion;
+			start = pregion->vm_start;
+			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
+			vma->vm_start = start;
+			vma->vm_end = start + len;
 
-			vml->vma = vma;
-			result = (void *) vma->vm_start;
-			goto shared;
+			if (pregion->vm_flags & VM_MAPPED_COPY) {
+				kdebug("share copy");
+				vma->vm_flags |= VM_MAPPED_COPY;
+			} else {
+				kdebug("share mmap");
+				ret = do_mmap_shared_file(vma);
+				if (ret < 0) {
+					vma->vm_region = NULL;
+					vma->vm_start = 0;
+					vma->vm_end = 0;
+					atomic_dec(&pregion->vm_usage);
+					pregion = NULL;
+					goto error_just_free;
+				}
+			}
+			fput(region->vm_file);
+			kmem_cache_free(vm_region_jar, region);
+			region = pregion;
+			result = start;
+			goto share;
 		}
 
-	dont_share_VMAs:
-		vma = NULL;
-
 		/* obtain the address at which to make a shared mapping
 		 * - this is the hook for quasi-memory character devices to
 		 *   tell us the location of a shared mapping
@@ -995,113 +1298,93 @@
 			if (IS_ERR((void *) addr)) {
 				ret = addr;
 				if (ret != (unsigned long) -ENOSYS)
-					goto error;
+					goto error_just_free;
 
 				/* the driver refused to tell us where to site
 				 * the mapping so we'll have to attempt to copy
 				 * it */
 				ret = (unsigned long) -ENODEV;
 				if (!(capabilities & BDI_CAP_MAP_COPY))
-					goto error;
+					goto error_just_free;
 
 				capabilities &= ~BDI_CAP_MAP_DIRECT;
+			} else {
+				vma->vm_start = region->vm_start = addr;
+				vma->vm_end = region->vm_end = addr + len;
 			}
 		}
 	}
 
-	/* we're going to need a VMA struct as well */
-	vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
-	if (!vma)
-		goto error_getting_vma;
-
-	INIT_LIST_HEAD(&vma->anon_vma_node);
-	atomic_set(&vma->vm_usage, 1);
-	if (file) {
-		get_file(file);
-		if (vm_flags & VM_EXECUTABLE) {
-			added_exe_file_vma(current->mm);
-			vma->vm_mm = current->mm;
-		}
-	}
-	vma->vm_file	= file;
-	vma->vm_flags	= vm_flags;
-	vma->vm_start	= addr;
-	vma->vm_end	= addr + len;
-	vma->vm_pgoff	= pgoff;
-
-	vml->vma = vma;
+	vma->vm_region = region;
 
 	/* set up the mapping */
 	if (file && vma->vm_flags & VM_SHARED)
-		ret = do_mmap_shared_file(vma, len);
+		ret = do_mmap_shared_file(vma);
 	else
-		ret = do_mmap_private(vma, len);
+		ret = do_mmap_private(vma, region, len);
 	if (ret < 0)
-		goto error;
+		goto error_put_region;
+
+	add_nommu_region(region);
 
 	/* okay... we have a mapping; now we have to register it */
-	result = (void *) vma->vm_start;
-
-	if (vma->vm_flags & VM_MAPPED_COPY) {
-		realalloc += kobjsize(result);
-		askedalloc += len;
-	}
-
-	realalloc += kobjsize(vma);
-	askedalloc += sizeof(*vma);
+	result = vma->vm_start;
 
 	current->mm->total_vm += len >> PAGE_SHIFT;
 
-	add_nommu_vma(vma);
+share:
+	add_vma_to_mm(current->mm, vma);
 
- shared:
-	realalloc += kobjsize(vml);
-	askedalloc += sizeof(*vml);
-
-	add_vma_to_mm(current->mm, vml);
-
-	up_write(&nommu_vma_sem);
+	up_write(&nommu_region_sem);
 
 	if (prot & PROT_EXEC)
-		flush_icache_range((unsigned long) result,
-				   (unsigned long) result + len);
+		flush_icache_range(result, result + len);
 
-#ifdef DEBUG
-	printk("do_mmap:\n");
-	show_process_blocks();
-#endif
+	kleave(" = %lx", result);
+	return result;
 
-	return (unsigned long) result;
-
- error:
-	up_write(&nommu_vma_sem);
-	kfree(vml);
+error_put_region:
+	__put_nommu_region(region);
 	if (vma) {
 		if (vma->vm_file) {
 			fput(vma->vm_file);
 			if (vma->vm_flags & VM_EXECUTABLE)
 				removed_exe_file_vma(vma->vm_mm);
 		}
-		kfree(vma);
+		kmem_cache_free(vm_area_cachep, vma);
 	}
+	kleave(" = %d [pr]", ret);
 	return ret;
 
- sharing_violation:
-	up_write(&nommu_vma_sem);
-	printk("Attempt to share mismatched mappings\n");
-	kfree(vml);
-	return -EINVAL;
+error_just_free:
+	up_write(&nommu_region_sem);
+error:
+	fput(region->vm_file);
+	kmem_cache_free(vm_region_jar, region);
+	fput(vma->vm_file);
+	if (vma->vm_flags & VM_EXECUTABLE)
+		removed_exe_file_vma(vma->vm_mm);
+	kmem_cache_free(vm_area_cachep, vma);
+	kleave(" = %d", ret);
+	return ret;
 
- error_getting_vma:
-	up_write(&nommu_vma_sem);
-	kfree(vml);
-	printk("Allocation of vma for %lu byte allocation from process %d failed\n",
+sharing_violation:
+	up_write(&nommu_region_sem);
+	printk(KERN_WARNING "Attempt to share mismatched mappings\n");
+	ret = -EINVAL;
+	goto error;
+
+error_getting_vma:
+	kmem_cache_free(vm_region_jar, region);
+	printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
+	       " from process %d failed\n",
 	       len, current->pid);
 	show_free_areas();
 	return -ENOMEM;
 
- error_getting_vml:
-	printk("Allocation of vml for %lu byte allocation from process %d failed\n",
+error_getting_region:
+	printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
+	       " from process %d failed\n",
 	       len, current->pid);
 	show_free_areas();
 	return -ENOMEM;
@@ -1109,90 +1392,188 @@
 EXPORT_SYMBOL(do_mmap_pgoff);
 
 /*
- * handle mapping disposal for uClinux
+ * split a vma into two pieces at address 'addr', a new vma is allocated either
+ * for the first part or the tail.
  */
-static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+	      unsigned long addr, int new_below)
 {
-	if (vma) {
-		down_write(&nommu_vma_sem);
+	struct vm_area_struct *new;
+	struct vm_region *region;
+	unsigned long npages;
 
-		if (atomic_dec_and_test(&vma->vm_usage)) {
-			delete_nommu_vma(vma);
+	kenter("");
 
-			if (vma->vm_ops && vma->vm_ops->close)
-				vma->vm_ops->close(vma);
+	/* we're only permitted to split anonymous regions that have a single
+	 * owner */
+	if (vma->vm_file ||
+	    atomic_read(&vma->vm_region->vm_usage) != 1)
+		return -ENOMEM;
 
-			/* IO memory and memory shared directly out of the pagecache from
-			 * ramfs/tmpfs mustn't be released here */
-			if (vma->vm_flags & VM_MAPPED_COPY) {
-				realalloc -= kobjsize((void *) vma->vm_start);
-				askedalloc -= vma->vm_end - vma->vm_start;
-				kfree((void *) vma->vm_start);
-			}
+	if (mm->map_count >= sysctl_max_map_count)
+		return -ENOMEM;
 
-			realalloc -= kobjsize(vma);
-			askedalloc -= sizeof(*vma);
+	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
+	if (!region)
+		return -ENOMEM;
 
-			if (vma->vm_file) {
-				fput(vma->vm_file);
-				if (vma->vm_flags & VM_EXECUTABLE)
-					removed_exe_file_vma(mm);
-			}
-			kfree(vma);
-		}
-
-		up_write(&nommu_vma_sem);
+	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+	if (!new) {
+		kmem_cache_free(vm_region_jar, region);
+		return -ENOMEM;
 	}
+
+	/* most fields are the same, copy all, and then fixup */
+	*new = *vma;
+	*region = *vma->vm_region;
+	new->vm_region = region;
+
+	npages = (addr - vma->vm_start) >> PAGE_SHIFT;
+
+	if (new_below) {
+		region->vm_top = region->vm_end = new->vm_end = addr;
+	} else {
+		region->vm_start = new->vm_start = addr;
+		region->vm_pgoff = new->vm_pgoff += npages;
+	}
+
+	if (new->vm_ops && new->vm_ops->open)
+		new->vm_ops->open(new);
+
+	delete_vma_from_mm(vma);
+	down_write(&nommu_region_sem);
+	delete_nommu_region(vma->vm_region);
+	if (new_below) {
+		vma->vm_region->vm_start = vma->vm_start = addr;
+		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
+	} else {
+		vma->vm_region->vm_end = vma->vm_end = addr;
+		vma->vm_region->vm_top = addr;
+	}
+	add_nommu_region(vma->vm_region);
+	add_nommu_region(new->vm_region);
+	up_write(&nommu_region_sem);
+	add_vma_to_mm(mm, vma);
+	add_vma_to_mm(mm, new);
+	return 0;
+}
+
+/*
+ * shrink a VMA by removing the specified chunk from either the beginning or
+ * the end
+ */
+static int shrink_vma(struct mm_struct *mm,
+		      struct vm_area_struct *vma,
+		      unsigned long from, unsigned long to)
+{
+	struct vm_region *region;
+
+	kenter("");
+
+	/* adjust the VMA's pointers, which may reposition it in the MM's tree
+	 * and list */
+	delete_vma_from_mm(vma);
+	if (from > vma->vm_start)
+		vma->vm_end = from;
+	else
+		vma->vm_start = to;
+	add_vma_to_mm(mm, vma);
+
+	/* cut the backing region down to size */
+	region = vma->vm_region;
+	BUG_ON(atomic_read(&region->vm_usage) != 1);
+
+	down_write(&nommu_region_sem);
+	delete_nommu_region(region);
+	if (from > region->vm_start) {
+		to = region->vm_top;
+		region->vm_top = region->vm_end = from;
+	} else {
+		region->vm_start = to;
+	}
+	add_nommu_region(region);
+	up_write(&nommu_region_sem);
+
+	free_page_series(from, to);
+	return 0;
 }
 
 /*
  * release a mapping
- * - under NOMMU conditions the parameters must match exactly to the mapping to
- *   be removed
+ * - under NOMMU conditions the chunk to be unmapped must be backed by a single
+ *   VMA, though it need not cover the whole VMA
  */
-int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 {
-	struct vm_list_struct *vml, **parent;
-	unsigned long end = addr + len;
+	struct vm_area_struct *vma;
+	struct rb_node *rb;
+	unsigned long end = start + len;
+	int ret;
 
-#ifdef DEBUG
-	printk("do_munmap:\n");
-#endif
+	kenter(",%lx,%zx", start, len);
 
-	for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) {
-		if ((*parent)->vma->vm_start > addr)
-			break;
-		if ((*parent)->vma->vm_start == addr &&
-		    ((len == 0) || ((*parent)->vma->vm_end == end)))
-			goto found;
+	if (len == 0)
+		return -EINVAL;
+
+	/* find the first potentially overlapping VMA */
+	vma = find_vma(mm, start);
+	if (!vma) {
+		printk(KERN_WARNING
+		       "munmap of memory not mmapped by process %d (%s):"
+		       " 0x%lx-0x%lx\n",
+		       current->pid, current->comm, start, start + len - 1);
+		return -EINVAL;
 	}
 
-	printk("munmap of non-mmaped memory by process %d (%s): %p\n",
-	       current->pid, current->comm, (void *) addr);
-	return -EINVAL;
+	/* we're allowed to split an anonymous VMA but not a file-backed one */
+	if (vma->vm_file) {
+		do {
+			if (start > vma->vm_start) {
+				kleave(" = -EINVAL [miss]");
+				return -EINVAL;
+			}
+			if (end == vma->vm_end)
+				goto erase_whole_vma;
+			rb = rb_next(&vma->vm_rb);
+			vma = rb_entry(rb, struct vm_area_struct, vm_rb);
+		} while (rb);
+		kleave(" = -EINVAL [split file]");
+		return -EINVAL;
+	} else {
+		/* the chunk must be a subset of the VMA found */
+		if (start == vma->vm_start && end == vma->vm_end)
+			goto erase_whole_vma;
+		if (start < vma->vm_start || end > vma->vm_end) {
+			kleave(" = -EINVAL [superset]");
+			return -EINVAL;
+		}
+		if (start & ~PAGE_MASK) {
+			kleave(" = -EINVAL [unaligned start]");
+			return -EINVAL;
+		}
+		if (end != vma->vm_end && end & ~PAGE_MASK) {
+			kleave(" = -EINVAL [unaligned split]");
+			return -EINVAL;
+		}
+		if (start != vma->vm_start && end != vma->vm_end) {
+			ret = split_vma(mm, vma, start, 1);
+			if (ret < 0) {
+				kleave(" = %d [split]", ret);
+				return ret;
+			}
+		}
+		return shrink_vma(mm, vma, start, end);
+	}
 
- found:
-	vml = *parent;
-
-	put_vma(mm, vml->vma);
-
-	*parent = vml->next;
-	realalloc -= kobjsize(vml);
-	askedalloc -= sizeof(*vml);
-	kfree(vml);
-
-	update_hiwater_vm(mm);
-	mm->total_vm -= len >> PAGE_SHIFT;
-
-#ifdef DEBUG
-	show_process_blocks();
-#endif
-
+erase_whole_vma:
+	delete_vma_from_mm(vma);
+	delete_vma(mm, vma);
+	kleave(" = 0");
 	return 0;
 }
 EXPORT_SYMBOL(do_munmap);
 
-asmlinkage long sys_munmap(unsigned long addr, size_t len)
+SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
 {
 	int ret;
 	struct mm_struct *mm = current->mm;
@@ -1204,32 +1585,26 @@
 }
 
 /*
- * Release all mappings
+ * release all the mappings made in a process's VM space
  */
-void exit_mmap(struct mm_struct * mm)
+void exit_mmap(struct mm_struct *mm)
 {
-	struct vm_list_struct *tmp;
+	struct vm_area_struct *vma;
 
-	if (mm) {
-#ifdef DEBUG
-		printk("Exit_mmap:\n");
-#endif
+	if (!mm)
+		return;
 
-		mm->total_vm = 0;
+	kenter("");
 
-		while ((tmp = mm->context.vmlist)) {
-			mm->context.vmlist = tmp->next;
-			put_vma(mm, tmp->vma);
+	mm->total_vm = 0;
 
-			realalloc -= kobjsize(tmp);
-			askedalloc -= sizeof(*tmp);
-			kfree(tmp);
-		}
-
-#ifdef DEBUG
-		show_process_blocks();
-#endif
+	while ((vma = mm->mmap)) {
+		mm->mmap = vma->vm_next;
+		delete_vma_from_mm(vma);
+		delete_vma(mm, vma);
 	}
+
+	kleave("");
 }
 
 unsigned long do_brk(unsigned long addr, unsigned long len)
@@ -1242,8 +1617,8 @@
  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
  *
  * under NOMMU conditions, we only permit changing a mapping's size, and only
- * as long as it stays within the hole allocated by the kmalloc() call in
- * do_mmap_pgoff() and the block is not shareable
+ * as long as it stays within the region allocated by do_mmap_private() and the
+ * block is not shareable
  *
  * MREMAP_FIXED is not supported under NOMMU conditions
  */
@@ -1254,13 +1629,16 @@
 	struct vm_area_struct *vma;
 
 	/* insanity checks first */
-	if (new_len == 0)
+	if (old_len == 0 || new_len == 0)
 		return (unsigned long) -EINVAL;
 
+	if (addr & ~PAGE_MASK)
+		return -EINVAL;
+
 	if (flags & MREMAP_FIXED && new_addr != addr)
 		return (unsigned long) -EINVAL;
 
-	vma = find_vma_exact(current->mm, addr);
+	vma = find_vma_exact(current->mm, addr, old_len);
 	if (!vma)
 		return (unsigned long) -EINVAL;
 
@@ -1270,22 +1648,18 @@
 	if (vma->vm_flags & VM_MAYSHARE)
 		return (unsigned long) -EPERM;
 
-	if (new_len > kobjsize((void *) addr))
+	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
 		return (unsigned long) -ENOMEM;
 
 	/* all checks complete - do it */
 	vma->vm_end = vma->vm_start + new_len;
-
-	askedalloc -= old_len;
-	askedalloc += new_len;
-
 	return vma->vm_start;
 }
 EXPORT_SYMBOL(do_mremap);
 
-asmlinkage unsigned long sys_mremap(unsigned long addr,
-	unsigned long old_len, unsigned long new_len,
-	unsigned long flags, unsigned long new_addr)
+SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+		unsigned long, new_len, unsigned long, flags,
+		unsigned long, new_addr)
 {
 	unsigned long ret;
 
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6b9e758..40ba050 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -429,7 +429,6 @@
 	unsigned long points = 0;
 	struct task_struct *p;
 
-	cgroup_lock();
 	read_lock(&tasklist_lock);
 retry:
 	p = select_bad_process(&points, mem);
@@ -444,7 +443,6 @@
 		goto retry;
 out:
 	read_unlock(&tasklist_lock);
-	cgroup_unlock();
 }
 #endif
 
@@ -560,6 +558,13 @@
 		/* Got some memory back in the last second. */
 		return;
 
+	/*
+	 * If this is from memcg, oom-killer is already invoked.
+	 * and not worth to go system-wide-oom.
+	 */
+	if (mem_cgroup_oom_called(current))
+		goto rest_and_return;
+
 	if (sysctl_panic_on_oom)
 		panic("out of memory from page fault. panic_on_oom is selected.\n");
 
@@ -571,6 +576,7 @@
 	 * Give "p" a good chance of killing itself before we
 	 * retry to allocate memory.
 	 */
+rest_and_return:
 	if (!test_thread_flag(TIF_MEMDIE))
 		schedule_timeout_uninterruptible(1);
 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7bf22e0..5675b30 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3523,10 +3523,10 @@
 			INIT_LIST_HEAD(&zone->lru[l].list);
 			zone->lru[l].nr_scan = 0;
 		}
-		zone->recent_rotated[0] = 0;
-		zone->recent_rotated[1] = 0;
-		zone->recent_scanned[0] = 0;
-		zone->recent_scanned[1] = 0;
+		zone->reclaim_stat.recent_rotated[0] = 0;
+		zone->reclaim_stat.recent_rotated[1] = 0;
+		zone->reclaim_stat.recent_scanned[0] = 0;
+		zone->reclaim_stat.recent_scanned[1] = 0;
 		zap_zone_vm_stats(zone);
 		zone->flags = 0;
 		if (!size)
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index d6507a6..7006a11 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -8,6 +8,7 @@
 #include <linux/memory.h>
 #include <linux/vmalloc.h>
 #include <linux/cgroup.h>
+#include <linux/swapops.h>
 
 static void __meminit
 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
@@ -15,6 +16,7 @@
 	pc->flags = 0;
 	pc->mem_cgroup = NULL;
 	pc->page = pfn_to_page(pfn);
+	INIT_LIST_HEAD(&pc->lru);
 }
 static unsigned long total_usage;
 
@@ -72,7 +74,7 @@
 
 	int nid, fail;
 
-	if (mem_cgroup_subsys.disabled)
+	if (mem_cgroup_disabled())
 		return;
 
 	for_each_online_node(nid)  {
@@ -103,13 +105,11 @@
 /* __alloc_bootmem...() is protected by !slab_available() */
 static int __init_refok init_section_page_cgroup(unsigned long pfn)
 {
-	struct mem_section *section;
+	struct mem_section *section = __pfn_to_section(pfn);
 	struct page_cgroup *base, *pc;
 	unsigned long table_size;
 	int nid, index;
 
-	section = __pfn_to_section(pfn);
-
 	if (!section->page_cgroup) {
 		nid = page_to_nid(pfn_to_page(pfn));
 		table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
@@ -145,7 +145,6 @@
 		__init_page_cgroup(pc, pfn + index);
 	}
 
-	section = __pfn_to_section(pfn);
 	section->page_cgroup = base - pfn;
 	total_usage += table_size;
 	return 0;
@@ -248,7 +247,7 @@
 	unsigned long pfn;
 	int fail = 0;
 
-	if (mem_cgroup_subsys.disabled)
+	if (mem_cgroup_disabled())
 		return;
 
 	for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
@@ -273,3 +272,199 @@
 }
 
 #endif
+
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+
+static DEFINE_MUTEX(swap_cgroup_mutex);
+struct swap_cgroup_ctrl {
+	struct page **map;
+	unsigned long length;
+};
+
+struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
+
+/*
+ * This 8bytes seems big..maybe we can reduce this when we can use "id" for
+ * cgroup rather than pointer.
+ */
+struct swap_cgroup {
+	struct mem_cgroup	*val;
+};
+#define SC_PER_PAGE	(PAGE_SIZE/sizeof(struct swap_cgroup))
+#define SC_POS_MASK	(SC_PER_PAGE - 1)
+
+/*
+ * SwapCgroup implements "lookup" and "exchange" operations.
+ * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
+ * against SwapCache. At swap_free(), this is accessed directly from swap.
+ *
+ * This means,
+ *  - we have no race in "exchange" when we're accessed via SwapCache because
+ *    SwapCache(and its swp_entry) is under lock.
+ *  - When called via swap_free(), there is no user of this entry and no race.
+ * Then, we don't need lock around "exchange".
+ *
+ * TODO: we can push these buffers out to HIGHMEM.
+ */
+
+/*
+ * allocate buffer for swap_cgroup.
+ */
+static int swap_cgroup_prepare(int type)
+{
+	struct page *page;
+	struct swap_cgroup_ctrl *ctrl;
+	unsigned long idx, max;
+
+	if (!do_swap_account)
+		return 0;
+	ctrl = &swap_cgroup_ctrl[type];
+
+	for (idx = 0; idx < ctrl->length; idx++) {
+		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+		if (!page)
+			goto not_enough_page;
+		ctrl->map[idx] = page;
+	}
+	return 0;
+not_enough_page:
+	max = idx;
+	for (idx = 0; idx < max; idx++)
+		__free_page(ctrl->map[idx]);
+
+	return -ENOMEM;
+}
+
+/**
+ * swap_cgroup_record - record mem_cgroup for this swp_entry.
+ * @ent: swap entry to be recorded into
+ * @mem: mem_cgroup to be recorded
+ *
+ * Returns old value at success, NULL at failure.
+ * (Of course, old value can be NULL.)
+ */
+struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
+{
+	int type = swp_type(ent);
+	unsigned long offset = swp_offset(ent);
+	unsigned long idx = offset / SC_PER_PAGE;
+	unsigned long pos = offset & SC_POS_MASK;
+	struct swap_cgroup_ctrl *ctrl;
+	struct page *mappage;
+	struct swap_cgroup *sc;
+	struct mem_cgroup *old;
+
+	if (!do_swap_account)
+		return NULL;
+
+	ctrl = &swap_cgroup_ctrl[type];
+
+	mappage = ctrl->map[idx];
+	sc = page_address(mappage);
+	sc += pos;
+	old = sc->val;
+	sc->val = mem;
+
+	return old;
+}
+
+/**
+ * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
+ * @ent: swap entry to be looked up.
+ *
+ * Returns pointer to mem_cgroup at success. NULL at failure.
+ */
+struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
+{
+	int type = swp_type(ent);
+	unsigned long offset = swp_offset(ent);
+	unsigned long idx = offset / SC_PER_PAGE;
+	unsigned long pos = offset & SC_POS_MASK;
+	struct swap_cgroup_ctrl *ctrl;
+	struct page *mappage;
+	struct swap_cgroup *sc;
+	struct mem_cgroup *ret;
+
+	if (!do_swap_account)
+		return NULL;
+
+	ctrl = &swap_cgroup_ctrl[type];
+	mappage = ctrl->map[idx];
+	sc = page_address(mappage);
+	sc += pos;
+	ret = sc->val;
+	return ret;
+}
+
+int swap_cgroup_swapon(int type, unsigned long max_pages)
+{
+	void *array;
+	unsigned long array_size;
+	unsigned long length;
+	struct swap_cgroup_ctrl *ctrl;
+
+	if (!do_swap_account)
+		return 0;
+
+	length = ((max_pages/SC_PER_PAGE) + 1);
+	array_size = length * sizeof(void *);
+
+	array = vmalloc(array_size);
+	if (!array)
+		goto nomem;
+
+	memset(array, 0, array_size);
+	ctrl = &swap_cgroup_ctrl[type];
+	mutex_lock(&swap_cgroup_mutex);
+	ctrl->length = length;
+	ctrl->map = array;
+	if (swap_cgroup_prepare(type)) {
+		/* memory shortage */
+		ctrl->map = NULL;
+		ctrl->length = 0;
+		vfree(array);
+		mutex_unlock(&swap_cgroup_mutex);
+		goto nomem;
+	}
+	mutex_unlock(&swap_cgroup_mutex);
+
+	printk(KERN_INFO
+		"swap_cgroup: uses %ld bytes of vmalloc for pointer array space"
+		" and %ld bytes to hold mem_cgroup pointers on swap\n",
+		array_size, length * PAGE_SIZE);
+	printk(KERN_INFO
+	"swap_cgroup can be disabled by noswapaccount boot option.\n");
+
+	return 0;
+nomem:
+	printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
+	printk(KERN_INFO
+		"swap_cgroup can be disabled by noswapaccount boot option\n");
+	return -ENOMEM;
+}
+
+void swap_cgroup_swapoff(int type)
+{
+	int i;
+	struct swap_cgroup_ctrl *ctrl;
+
+	if (!do_swap_account)
+		return;
+
+	mutex_lock(&swap_cgroup_mutex);
+	ctrl = &swap_cgroup_ctrl[type];
+	if (ctrl->map) {
+		for (i = 0; i < ctrl->length; i++) {
+			struct page *page = ctrl->map[i];
+			if (page)
+				__free_page(page);
+		}
+		vfree(ctrl->map);
+		ctrl->map = NULL;
+		ctrl->length = 0;
+	}
+	mutex_unlock(&swap_cgroup_mutex);
+}
+
+#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 5941f98..5d0de96 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -928,7 +928,11 @@
 	error = 1;
 	if (!inode)
 		goto out;
-	/* Precharge page using GFP_KERNEL while we can wait */
+	/*
+	 * Charge page using GFP_KERNEL while we can wait.
+	 * Charged back to the user(not to caller) when swap account is used.
+	 * add_to_page_cache() will be called with GFP_NOWAIT.
+	 */
 	error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
 	if (error)
 		goto out;
@@ -1320,15 +1324,19 @@
 		} else {
 			shmem_swp_unmap(entry);
 			spin_unlock(&info->lock);
-			unlock_page(swappage);
-			page_cache_release(swappage);
 			if (error == -ENOMEM) {
 				/* allow reclaim from this memory cgroup */
-				error = mem_cgroup_shrink_usage(current->mm,
+				error = mem_cgroup_shrink_usage(swappage,
+								current->mm,
 								gfp);
-				if (error)
+				if (error) {
+					unlock_page(swappage);
+					page_cache_release(swappage);
 					goto failed;
+				}
 			}
+			unlock_page(swappage);
+			page_cache_release(swappage);
 			goto repeat;
 		}
 	} else if (sgp == SGP_READ && !filepage) {
@@ -1379,7 +1387,7 @@
 
 			/* Precharge page while we can wait, compensate after */
 			error = mem_cgroup_cache_charge(filepage, current->mm,
-							gfp & ~__GFP_HIGHMEM);
+					GFP_KERNEL);
 			if (error) {
 				page_cache_release(filepage);
 				shmem_unacct_blocks(info->flags, 1);
diff --git a/mm/slub.c b/mm/slub.c
index f0e2892..6392ae5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2254,7 +2254,7 @@
 		 * Add some empty padding so that we can catch
 		 * overwrites from earlier objects rather than let
 		 * tracking information or the free pointer be
-		 * corrupted if an user writes before the start
+		 * corrupted if a user writes before the start
 		 * of the object.
 		 */
 		size += sizeof(void *);
diff --git a/mm/swap.c b/mm/swap.c
index ba2c0e8..8adb9fe 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -151,6 +151,26 @@
 	}
 }
 
+static void update_page_reclaim_stat(struct zone *zone, struct page *page,
+				     int file, int rotated)
+{
+	struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
+	struct zone_reclaim_stat *memcg_reclaim_stat;
+
+	memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
+
+	reclaim_stat->recent_scanned[file]++;
+	if (rotated)
+		reclaim_stat->recent_rotated[file]++;
+
+	if (!memcg_reclaim_stat)
+		return;
+
+	memcg_reclaim_stat->recent_scanned[file]++;
+	if (rotated)
+		memcg_reclaim_stat->recent_rotated[file]++;
+}
+
 /*
  * FIXME: speed this up?
  */
@@ -168,10 +188,8 @@
 		lru += LRU_ACTIVE;
 		add_page_to_lru_list(zone, page, lru);
 		__count_vm_event(PGACTIVATE);
-		mem_cgroup_move_lists(page, lru);
 
-		zone->recent_rotated[!!file]++;
-		zone->recent_scanned[!!file]++;
+		update_page_reclaim_stat(zone, page, !!file, 1);
 	}
 	spin_unlock_irq(&zone->lru_lock);
 }
@@ -386,12 +404,14 @@
 {
 	int i;
 	struct zone *zone = NULL;
+
 	VM_BUG_ON(is_unevictable_lru(lru));
 
 	for (i = 0; i < pagevec_count(pvec); i++) {
 		struct page *page = pvec->pages[i];
 		struct zone *pagezone = page_zone(page);
 		int file;
+		int active;
 
 		if (pagezone != zone) {
 			if (zone)
@@ -403,12 +423,11 @@
 		VM_BUG_ON(PageUnevictable(page));
 		VM_BUG_ON(PageLRU(page));
 		SetPageLRU(page);
+		active = is_active_lru(lru);
 		file = is_file_lru(lru);
-		zone->recent_scanned[file]++;
-		if (is_active_lru(lru)) {
+		if (active)
 			SetPageActive(page);
-			zone->recent_rotated[file]++;
-		}
+		update_page_reclaim_stat(zone, page, file, active);
 		add_page_to_lru_list(zone, page, lru);
 	}
 	if (zone)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 81c825f..3ecea98 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -17,6 +17,7 @@
 #include <linux/backing-dev.h>
 #include <linux/pagevec.h>
 #include <linux/migrate.h>
+#include <linux/page_cgroup.h>
 
 #include <asm/pgtable.h>
 
@@ -108,6 +109,8 @@
  */
 void __delete_from_swap_cache(struct page *page)
 {
+	swp_entry_t ent = {.val = page_private(page)};
+
 	VM_BUG_ON(!PageLocked(page));
 	VM_BUG_ON(!PageSwapCache(page));
 	VM_BUG_ON(PageWriteback(page));
@@ -118,6 +121,7 @@
 	total_swapcache_pages--;
 	__dec_zone_page_state(page, NR_FILE_PAGES);
 	INC_CACHE_INFO(del_total);
+	mem_cgroup_uncharge_swapcache(page, ent);
 }
 
 /**
diff --git a/mm/swapfile.c b/mm/swapfile.c
index eec5ca7..f48b831 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -33,6 +33,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <linux/swapops.h>
+#include <linux/page_cgroup.h>
 
 static DEFINE_SPINLOCK(swap_lock);
 static unsigned int nr_swapfiles;
@@ -470,8 +471,9 @@
 	return NULL;
 }
 
-static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
+static int swap_entry_free(struct swap_info_struct *p, swp_entry_t ent)
 {
+	unsigned long offset = swp_offset(ent);
 	int count = p->swap_map[offset];
 
 	if (count < SWAP_MAP_MAX) {
@@ -486,6 +488,7 @@
 				swap_list.next = p - swap_info;
 			nr_swap_pages++;
 			p->inuse_pages--;
+			mem_cgroup_uncharge_swap(ent);
 		}
 	}
 	return count;
@@ -501,7 +504,7 @@
 
 	p = swap_info_get(entry);
 	if (p) {
-		swap_entry_free(p, swp_offset(entry));
+		swap_entry_free(p, entry);
 		spin_unlock(&swap_lock);
 	}
 }
@@ -581,7 +584,7 @@
 
 	p = swap_info_get(entry);
 	if (p) {
-		if (swap_entry_free(p, swp_offset(entry)) == 1) {
+		if (swap_entry_free(p, entry) == 1) {
 			page = find_get_page(&swapper_space, entry.val);
 			if (page && !trylock_page(page)) {
 				page_cache_release(page);
@@ -690,17 +693,18 @@
 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 		unsigned long addr, swp_entry_t entry, struct page *page)
 {
+	struct mem_cgroup *ptr = NULL;
 	spinlock_t *ptl;
 	pte_t *pte;
 	int ret = 1;
 
-	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
+	if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr))
 		ret = -ENOMEM;
 
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
 		if (ret > 0)
-			mem_cgroup_uncharge_page(page);
+			mem_cgroup_cancel_charge_swapin(ptr);
 		ret = 0;
 		goto out;
 	}
@@ -710,6 +714,7 @@
 	set_pte_at(vma->vm_mm, addr, pte,
 		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
 	page_add_anon_rmap(page, vma, addr);
+	mem_cgroup_commit_charge_swapin(page, ptr);
 	swap_free(entry);
 	/*
 	 * Move the page to the active list so it is not
@@ -1372,7 +1377,7 @@
 	return ret;
 }
 
-asmlinkage long sys_swapoff(const char __user * specialfile)
+SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
 {
 	struct swap_info_struct * p = NULL;
 	unsigned short *swap_map;
@@ -1492,6 +1497,9 @@
 	spin_unlock(&swap_lock);
 	mutex_unlock(&swapon_mutex);
 	vfree(swap_map);
+	/* Destroy swap account informatin */
+	swap_cgroup_swapoff(type);
+
 	inode = mapping->host;
 	if (S_ISBLK(inode->i_mode)) {
 		struct block_device *bdev = I_BDEV(inode);
@@ -1625,7 +1633,7 @@
  *
  * The swapon system call
  */
-asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
+SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
 {
 	struct swap_info_struct * p;
 	char *name = NULL;
@@ -1809,6 +1817,11 @@
 		}
 		swap_map[page_nr] = SWAP_MAP_BAD;
 	}
+
+	error = swap_cgroup_swapon(type, maxpages);
+	if (error)
+		goto bad_swap;
+
 	nr_good_pages = swap_header->info.last_page -
 			swap_header->info.nr_badpages -
 			1 /* header page */;
@@ -1880,6 +1893,7 @@
 		bd_release(bdev);
 	}
 	destroy_swap_extents(p);
+	swap_cgroup_swapoff(type);
 bad_swap_2:
 	spin_lock(&swap_lock);
 	p->swap_file = NULL;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c5db9a7..75f49d3 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -14,7 +14,6 @@
 #include <linux/highmem.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
-#include <linux/mutex.h>
 #include <linux/interrupt.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -24,6 +23,7 @@
 #include <linux/rbtree.h>
 #include <linux/radix-tree.h>
 #include <linux/rcupdate.h>
+#include <linux/bootmem.h>
 
 #include <asm/atomic.h>
 #include <asm/uaccess.h>
@@ -495,7 +495,7 @@
 static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
 					int sync, int force_flush)
 {
-	static DEFINE_MUTEX(purge_lock);
+	static DEFINE_SPINLOCK(purge_lock);
 	LIST_HEAD(valist);
 	struct vmap_area *va;
 	int nr = 0;
@@ -506,10 +506,10 @@
 	 * the case that isn't actually used at the moment anyway.
 	 */
 	if (!sync && !force_flush) {
-		if (!mutex_trylock(&purge_lock))
+		if (!spin_trylock(&purge_lock))
 			return;
 	} else
-		mutex_lock(&purge_lock);
+		spin_lock(&purge_lock);
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(va, &vmap_area_list, list) {
@@ -541,7 +541,7 @@
 			__free_vmap_area(va);
 		spin_unlock(&vmap_area_lock);
 	}
-	mutex_unlock(&purge_lock);
+	spin_unlock(&purge_lock);
 }
 
 /*
@@ -984,6 +984,8 @@
 
 void __init vmalloc_init(void)
 {
+	struct vmap_area *va;
+	struct vm_struct *tmp;
 	int i;
 
 	for_each_possible_cpu(i) {
@@ -996,6 +998,14 @@
 		vbq->nr_dirty = 0;
 	}
 
+	/* Import existing vmlist entries. */
+	for (tmp = vmlist; tmp; tmp = tmp->next) {
+		va = alloc_bootmem(sizeof(struct vmap_area));
+		va->flags = tmp->flags | VM_VM_AREA;
+		va->va_start = (unsigned long)tmp->addr;
+		va->va_end = va->va_start + tmp->size;
+		__insert_vmap_area(va);
+	}
 	vmap_initialized = true;
 }
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b07c48b..9a27c44 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -125,11 +125,30 @@
 static DECLARE_RWSEM(shrinker_rwsem);
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
-#define scan_global_lru(sc)	(!(sc)->mem_cgroup)
+#define scanning_global_lru(sc)	(!(sc)->mem_cgroup)
 #else
-#define scan_global_lru(sc)	(1)
+#define scanning_global_lru(sc)	(1)
 #endif
 
+static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
+						  struct scan_control *sc)
+{
+	if (!scanning_global_lru(sc))
+		return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
+
+	return &zone->reclaim_stat;
+}
+
+static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
+				   enum lru_list lru)
+{
+	if (!scanning_global_lru(sc))
+		return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
+
+	return zone_page_state(zone, NR_LRU_BASE + lru);
+}
+
+
 /*
  * Add a shrinker callback to be called from the vm
  */
@@ -512,7 +531,6 @@
 		lru = LRU_UNEVICTABLE;
 		add_page_to_unevictable_list(page);
 	}
-	mem_cgroup_move_lists(page, lru);
 
 	/*
 	 * page's status can change while we move it among lru. If an evictable
@@ -547,7 +565,6 @@
 
 	lru = !!TestClearPageActive(page) + page_is_file_cache(page);
 	lru_cache_add_lru(page, lru);
-	mem_cgroup_move_lists(page, lru);
 	put_page(page);
 }
 #endif /* CONFIG_UNEVICTABLE_LRU */
@@ -813,6 +830,7 @@
 		return ret;
 
 	ret = -EBUSY;
+
 	if (likely(get_page_unless_zero(page))) {
 		/*
 		 * Be careful not to clear PageLRU until after we're
@@ -821,6 +839,7 @@
 		 */
 		ClearPageLRU(page);
 		ret = 0;
+		mem_cgroup_del_lru(page);
 	}
 
 	return ret;
@@ -1029,6 +1048,7 @@
 	struct pagevec pvec;
 	unsigned long nr_scanned = 0;
 	unsigned long nr_reclaimed = 0;
+	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
 	pagevec_init(&pvec, 1);
 
@@ -1070,13 +1090,14 @@
 		__mod_zone_page_state(zone, NR_INACTIVE_ANON,
 						-count[LRU_INACTIVE_ANON]);
 
-		if (scan_global_lru(sc)) {
+		if (scanning_global_lru(sc))
 			zone->pages_scanned += nr_scan;
-			zone->recent_scanned[0] += count[LRU_INACTIVE_ANON];
-			zone->recent_scanned[0] += count[LRU_ACTIVE_ANON];
-			zone->recent_scanned[1] += count[LRU_INACTIVE_FILE];
-			zone->recent_scanned[1] += count[LRU_ACTIVE_FILE];
-		}
+
+		reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
+		reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
+		reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
+		reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
+
 		spin_unlock_irq(&zone->lru_lock);
 
 		nr_scanned += nr_scan;
@@ -1108,7 +1129,7 @@
 		if (current_is_kswapd()) {
 			__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
 			__count_vm_events(KSWAPD_STEAL, nr_freed);
-		} else if (scan_global_lru(sc))
+		} else if (scanning_global_lru(sc))
 			__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
 
 		__count_zone_vm_events(PGSTEAL, zone, nr_freed);
@@ -1134,10 +1155,9 @@
 			SetPageLRU(page);
 			lru = page_lru(page);
 			add_page_to_lru_list(zone, page, lru);
-			mem_cgroup_move_lists(page, lru);
-			if (PageActive(page) && scan_global_lru(sc)) {
+			if (PageActive(page)) {
 				int file = !!page_is_file_cache(page);
-				zone->recent_rotated[file]++;
+				reclaim_stat->recent_rotated[file]++;
 			}
 			if (!pagevec_add(&pvec, page)) {
 				spin_unlock_irq(&zone->lru_lock);
@@ -1197,6 +1217,7 @@
 	struct page *page;
 	struct pagevec pvec;
 	enum lru_list lru;
+	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
 	lru_add_drain();
 	spin_lock_irq(&zone->lru_lock);
@@ -1207,10 +1228,10 @@
 	 * zone->pages_scanned is used for detect zone's oom
 	 * mem_cgroup remembers nr_scan by itself.
 	 */
-	if (scan_global_lru(sc)) {
+	if (scanning_global_lru(sc)) {
 		zone->pages_scanned += pgscanned;
-		zone->recent_scanned[!!file] += pgmoved;
 	}
+	reclaim_stat->recent_scanned[!!file] += pgmoved;
 
 	if (file)
 		__mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
@@ -1251,8 +1272,7 @@
 	 * This helps balance scan pressure between file and anonymous
 	 * pages in get_scan_ratio.
 	 */
-	if (scan_global_lru(sc))
-		zone->recent_rotated[!!file] += pgmoved;
+	reclaim_stat->recent_rotated[!!file] += pgmoved;
 
 	while (!list_empty(&l_inactive)) {
 		page = lru_to_page(&l_inactive);
@@ -1263,7 +1283,7 @@
 		ClearPageActive(page);
 
 		list_move(&page->lru, &zone->lru[lru].list);
-		mem_cgroup_move_lists(page, lru);
+		mem_cgroup_add_lru_list(page, lru);
 		pgmoved++;
 		if (!pagevec_add(&pvec, page)) {
 			__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
@@ -1292,6 +1312,38 @@
 	pagevec_release(&pvec);
 }
 
+static int inactive_anon_is_low_global(struct zone *zone)
+{
+	unsigned long active, inactive;
+
+	active = zone_page_state(zone, NR_ACTIVE_ANON);
+	inactive = zone_page_state(zone, NR_INACTIVE_ANON);
+
+	if (inactive * zone->inactive_ratio < active)
+		return 1;
+
+	return 0;
+}
+
+/**
+ * inactive_anon_is_low - check if anonymous pages need to be deactivated
+ * @zone: zone to check
+ * @sc:   scan control of this context
+ *
+ * Returns true if the zone does not have enough inactive anon pages,
+ * meaning some active anon pages need to be deactivated.
+ */
+static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
+{
+	int low;
+
+	if (scanning_global_lru(sc))
+		low = inactive_anon_is_low_global(zone);
+	else
+		low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
+	return low;
+}
+
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
 	struct zone *zone, struct scan_control *sc, int priority)
 {
@@ -1302,8 +1354,7 @@
 		return 0;
 	}
 
-	if (lru == LRU_ACTIVE_ANON &&
-	    (!scan_global_lru(sc) || inactive_anon_is_low(zone))) {
+	if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
 		shrink_active_list(nr_to_scan, zone, sc, priority, file);
 		return 0;
 	}
@@ -1325,6 +1376,7 @@
 	unsigned long anon, file, free;
 	unsigned long anon_prio, file_prio;
 	unsigned long ap, fp;
+	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
 	/* If we have no swap space, do not bother scanning anon pages. */
 	if (nr_swap_pages <= 0) {
@@ -1333,17 +1385,20 @@
 		return;
 	}
 
-	anon  = zone_page_state(zone, NR_ACTIVE_ANON) +
-		zone_page_state(zone, NR_INACTIVE_ANON);
-	file  = zone_page_state(zone, NR_ACTIVE_FILE) +
-		zone_page_state(zone, NR_INACTIVE_FILE);
-	free  = zone_page_state(zone, NR_FREE_PAGES);
+	anon  = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
+		zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
+	file  = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
+		zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
 
-	/* If we have very few page cache pages, force-scan anon pages. */
-	if (unlikely(file + free <= zone->pages_high)) {
-		percent[0] = 100;
-		percent[1] = 0;
-		return;
+	if (scanning_global_lru(sc)) {
+		free  = zone_page_state(zone, NR_FREE_PAGES);
+		/* If we have very few page cache pages,
+		   force-scan anon pages. */
+		if (unlikely(file + free <= zone->pages_high)) {
+			percent[0] = 100;
+			percent[1] = 0;
+			return;
+		}
 	}
 
 	/*
@@ -1357,17 +1412,17 @@
 	 *
 	 * anon in [0], file in [1]
 	 */
-	if (unlikely(zone->recent_scanned[0] > anon / 4)) {
+	if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
 		spin_lock_irq(&zone->lru_lock);
-		zone->recent_scanned[0] /= 2;
-		zone->recent_rotated[0] /= 2;
+		reclaim_stat->recent_scanned[0] /= 2;
+		reclaim_stat->recent_rotated[0] /= 2;
 		spin_unlock_irq(&zone->lru_lock);
 	}
 
-	if (unlikely(zone->recent_scanned[1] > file / 4)) {
+	if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
 		spin_lock_irq(&zone->lru_lock);
-		zone->recent_scanned[1] /= 2;
-		zone->recent_rotated[1] /= 2;
+		reclaim_stat->recent_scanned[1] /= 2;
+		reclaim_stat->recent_rotated[1] /= 2;
 		spin_unlock_irq(&zone->lru_lock);
 	}
 
@@ -1383,11 +1438,11 @@
 	 * proportional to the fraction of recently scanned pages on
 	 * each list that were recently referenced and in active use.
 	 */
-	ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
-	ap /= zone->recent_rotated[0] + 1;
+	ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+	ap /= reclaim_stat->recent_rotated[0] + 1;
 
-	fp = (file_prio + 1) * (zone->recent_scanned[1] + 1);
-	fp /= zone->recent_rotated[1] + 1;
+	fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+	fp /= reclaim_stat->recent_rotated[1] + 1;
 
 	/* Normalize to percentages */
 	percent[0] = 100 * ap / (ap + fp + 1);
@@ -1411,30 +1466,23 @@
 	get_scan_ratio(zone, sc, percent);
 
 	for_each_evictable_lru(l) {
-		if (scan_global_lru(sc)) {
-			int file = is_file_lru(l);
-			int scan;
+		int file = is_file_lru(l);
+		int scan;
 
-			scan = zone_page_state(zone, NR_LRU_BASE + l);
-			if (priority) {
-				scan >>= priority;
-				scan = (scan * percent[file]) / 100;
-			}
+		scan = zone_page_state(zone, NR_LRU_BASE + l);
+		if (priority) {
+			scan >>= priority;
+			scan = (scan * percent[file]) / 100;
+		}
+		if (scanning_global_lru(sc)) {
 			zone->lru[l].nr_scan += scan;
 			nr[l] = zone->lru[l].nr_scan;
 			if (nr[l] >= swap_cluster_max)
 				zone->lru[l].nr_scan = 0;
 			else
 				nr[l] = 0;
-		} else {
-			/*
-			 * This reclaim occurs not because zone memory shortage
-			 * but because memory controller hits its limit.
-			 * Don't modify zone reclaim related data.
-			 */
-			nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
-								priority, l);
-		}
+		} else
+			nr[l] = scan;
 	}
 
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -1467,9 +1515,7 @@
 	 * Even if we did not try to evict anon pages at all, we want to
 	 * rebalance the anon lru active/inactive ratio.
 	 */
-	if (!scan_global_lru(sc) || inactive_anon_is_low(zone))
-		shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
-	else if (!scan_global_lru(sc))
+	if (inactive_anon_is_low(zone, sc))
 		shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
 
 	throttle_vm_writeout(sc->gfp_mask);
@@ -1504,7 +1550,7 @@
 		 * Take care memory controller reclaiming has small influence
 		 * to global LRU.
 		 */
-		if (scan_global_lru(sc)) {
+		if (scanning_global_lru(sc)) {
 			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
 				continue;
 			note_zone_scanning_priority(zone, priority);
@@ -1557,12 +1603,12 @@
 
 	delayacct_freepages_start();
 
-	if (scan_global_lru(sc))
+	if (scanning_global_lru(sc))
 		count_vm_event(ALLOCSTALL);
 	/*
 	 * mem_cgroup will not do shrink_slab.
 	 */
-	if (scan_global_lru(sc)) {
+	if (scanning_global_lru(sc)) {
 		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
 
 			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1581,7 +1627,7 @@
 		 * Don't shrink slabs when reclaiming memory from
 		 * over limit cgroups
 		 */
-		if (scan_global_lru(sc)) {
+		if (scanning_global_lru(sc)) {
 			shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
 			if (reclaim_state) {
 				sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -1612,7 +1658,7 @@
 			congestion_wait(WRITE, HZ/10);
 	}
 	/* top priority shrink_zones still had more to do? don't OOM, then */
-	if (!sc->all_unreclaimable && scan_global_lru(sc))
+	if (!sc->all_unreclaimable && scanning_global_lru(sc))
 		ret = sc->nr_reclaimed;
 out:
 	/*
@@ -1625,7 +1671,7 @@
 	if (priority < 0)
 		priority = 0;
 
-	if (scan_global_lru(sc)) {
+	if (scanning_global_lru(sc)) {
 		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
 
 			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1661,19 +1707,24 @@
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
-						gfp_t gfp_mask)
+					   gfp_t gfp_mask,
+					   bool noswap,
+					   unsigned int swappiness)
 {
 	struct scan_control sc = {
 		.may_writepage = !laptop_mode,
 		.may_swap = 1,
 		.swap_cluster_max = SWAP_CLUSTER_MAX,
-		.swappiness = vm_swappiness,
+		.swappiness = swappiness,
 		.order = 0,
 		.mem_cgroup = mem_cont,
 		.isolate_pages = mem_cgroup_isolate_pages,
 	};
 	struct zonelist *zonelist;
 
+	if (noswap)
+		sc.may_swap = 0;
+
 	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
 			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
 	zonelist = NODE_DATA(numa_node_id())->node_zonelists;
@@ -1761,7 +1812,7 @@
 			 * Do some background aging of the anon list, to give
 			 * pages a chance to be referenced before reclaiming.
 			 */
-			if (inactive_anon_is_low(zone))
+			if (inactive_anon_is_low(zone, &sc))
 				shrink_active_list(SWAP_CLUSTER_MAX, zone,
 							&sc, priority, 0);
 
@@ -2404,6 +2455,7 @@
 
 		__dec_zone_state(zone, NR_UNEVICTABLE);
 		list_move(&page->lru, &zone->lru[l].list);
+		mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
 		__inc_zone_state(zone, NR_INACTIVE_ANON + l);
 		__count_vm_event(UNEVICTABLE_PGRESCUED);
 	} else {
@@ -2412,6 +2464,7 @@
 		 */
 		SetPageUnevictable(page);
 		list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
+		mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
 		if (page_evictable(page, NULL))
 			goto retry;
 	}
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index dd86a1d..6c13239 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -3,46 +3,35 @@
 #include <linux/if_vlan.h>
 #include "vlan.h"
 
-struct vlan_hwaccel_cb {
-	struct net_device	*dev;
-};
-
-static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb)
-{
-	return (struct vlan_hwaccel_cb *)skb->cb;
-}
-
 /* VLAN rx hw acceleration helper.  This acts like netif_{rx,receive_skb}(). */
 int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
 		      u16 vlan_tci, int polling)
 {
-	struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb);
-
-	if (skb_bond_should_drop(skb)) {
-		dev_kfree_skb_any(skb);
-		return NET_RX_DROP;
-	}
+	if (skb_bond_should_drop(skb))
+		goto drop;
 
 	skb->vlan_tci = vlan_tci;
-	cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
+	skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
+
+	if (!skb->dev)
+		goto drop;
 
 	return (polling ? netif_receive_skb(skb) : netif_rx(skb));
+
+drop:
+	dev_kfree_skb_any(skb);
+	return NET_RX_DROP;
 }
 EXPORT_SYMBOL(__vlan_hwaccel_rx);
 
 int vlan_hwaccel_do_receive(struct sk_buff *skb)
 {
-	struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb);
-	struct net_device *dev = cb->dev;
+	struct net_device *dev = skb->dev;
 	struct net_device_stats *stats;
 
+	skb->dev = vlan_dev_info(dev)->real_dev;
 	netif_nit_deliver(skb);
 
-	if (dev == NULL) {
-		kfree_skb(skb);
-		return -1;
-	}
-
 	skb->dev = dev;
 	skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
 	skb->vlan_tci = 0;
@@ -80,3 +69,79 @@
 	return vlan_dev_info(dev)->vlan_id;
 }
 EXPORT_SYMBOL_GPL(vlan_dev_vlan_id);
+
+static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
+			   unsigned int vlan_tci, struct sk_buff *skb)
+{
+	struct sk_buff *p;
+
+	if (skb_bond_should_drop(skb))
+		goto drop;
+
+	skb->vlan_tci = vlan_tci;
+	skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
+
+	if (!skb->dev)
+		goto drop;
+
+	for (p = napi->gro_list; p; p = p->next) {
+		NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev;
+		NAPI_GRO_CB(p)->flush = 0;
+	}
+
+	return dev_gro_receive(napi, skb);
+
+drop:
+	return 2;
+}
+
+int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
+		     unsigned int vlan_tci, struct sk_buff *skb)
+{
+	int err = NET_RX_SUCCESS;
+
+	switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
+	case -1:
+		return netif_receive_skb(skb);
+
+	case 2:
+		err = NET_RX_DROP;
+		/* fall through */
+
+	case 1:
+		kfree_skb(skb);
+		break;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(vlan_gro_receive);
+
+int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
+		   unsigned int vlan_tci, struct napi_gro_fraginfo *info)
+{
+	struct sk_buff *skb = napi_fraginfo_skb(napi, info);
+	int err = NET_RX_DROP;
+
+	if (!skb)
+		goto out;
+
+	err = NET_RX_SUCCESS;
+
+	switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
+	case -1:
+		return netif_receive_skb(skb);
+
+	case 2:
+		err = NET_RX_DROP;
+		/* fall through */
+
+	case 1:
+		napi_reuse_skb(napi, skb);
+		break;
+	}
+
+out:
+	return err;
+}
+EXPORT_SYMBOL(vlan_gro_frags);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 89a3bbd..4a19acd 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -546,6 +546,18 @@
 	return err;
 }
 
+static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
+{
+	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int err = 0;
+
+	if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
+		err = ops->ndo_neigh_setup(dev, pa);
+
+	return err;
+}
+
 static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
 {
 	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
@@ -713,6 +725,7 @@
 	.ndo_set_multicast_list	= vlan_dev_set_rx_mode,
 	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,
 	.ndo_do_ioctl		= vlan_dev_ioctl,
+	.ndo_neigh_setup	= vlan_dev_neigh_setup,
 };
 
 static const struct net_device_ops vlan_netdev_accel_ops = {
@@ -728,6 +741,7 @@
 	.ndo_set_multicast_list	= vlan_dev_set_rx_mode,
 	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,
 	.ndo_do_ioctl		= vlan_dev_ioctl,
+	.ndo_neigh_setup	= vlan_dev_neigh_setup,
 };
 
 void vlan_setup(struct net_device *dev)
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index 0663f99..7ed75c7 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -23,7 +23,7 @@
 	  guest partitions and a host partition.
 
 config NET_9P_RDMA
-	depends on INET && INFINIBAND && EXPERIMENTAL
+	depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL
 	tristate "9P RDMA Transport (Experimental)"
 	help
 	  This builds support for an RDMA transport.
diff --git a/net/Kconfig b/net/Kconfig
index 6ec2cce..bf27760 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -254,6 +254,8 @@
 
 endif # WIRELESS
 
+source "net/wimax/Kconfig"
+
 source "net/rfkill/Kconfig"
 source "net/9p/Kconfig"
 
diff --git a/net/Makefile b/net/Makefile
index ba44604..0fcce89 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -63,3 +63,4 @@
 ifeq ($(CONFIG_NET),y)
 obj-$(CONFIG_SYSCTL)		+= sysctl_net.o
 endif
+obj-$(CONFIG_WIMAX)		+= wimax/
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index b03ff58..89f99d3 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -443,13 +443,14 @@
 {
 	struct ifreq atreq;
 	struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr;
+	const struct net_device_ops *ops = iface->dev->netdev_ops;
 
 	sa->sat_addr.s_node = iface->address.s_node;
 	sa->sat_addr.s_net = ntohs(iface->address.s_net);
 
 	/* We pass the Net:Node to the drivers/cards by a Device ioctl. */
-	if (!(iface->dev->do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
-		(void)iface->dev->do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
+	if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) {
+		ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR);
 		if (iface->address.s_net != htons(sa->sat_addr.s_net) ||
 		    iface->address.s_node != sa->sat_addr.s_node)
 			iface->status |= ATIF_PROBE_FAIL;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index d20f8a4..0d9e506 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -165,7 +165,6 @@
 
 	struct socket    *sock;
 	struct net_device *dev;
-	struct net_device_stats stats;
 };
 
 void bnep_net_setup(struct net_device *dev);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 70fea8b..52a6ce0 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -306,7 +306,7 @@
 	struct sk_buff *nskb;
 	u8 type;
 
-	s->stats.rx_bytes += skb->len;
+	dev->stats.rx_bytes += skb->len;
 
 	type = *(u8 *) skb->data; skb_pull(skb, 1);
 
@@ -343,7 +343,7 @@
 	 * may not be modified and because of the alignment requirements. */
 	nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL);
 	if (!nskb) {
-		s->stats.rx_dropped++;
+		dev->stats.rx_dropped++;
 		kfree_skb(skb);
 		return -ENOMEM;
 	}
@@ -378,14 +378,14 @@
 	skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len);
 	kfree_skb(skb);
 
-	s->stats.rx_packets++;
+	dev->stats.rx_packets++;
 	nskb->ip_summed = CHECKSUM_NONE;
 	nskb->protocol  = eth_type_trans(nskb, dev);
 	netif_rx_ni(nskb);
 	return 0;
 
 badframe:
-	s->stats.rx_errors++;
+	dev->stats.rx_errors++;
 	kfree_skb(skb);
 	return 0;
 }
@@ -448,8 +448,8 @@
 	kfree_skb(skb);
 
 	if (len > 0) {
-		s->stats.tx_bytes += len;
-		s->stats.tx_packets++;
+		s->dev->stats.tx_bytes += len;
+		s->dev->stats.tx_packets++;
 		return 0;
 	}
 
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index f897da6..d7a0e97 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -55,12 +55,6 @@
 	return 0;
 }
 
-static struct net_device_stats *bnep_net_get_stats(struct net_device *dev)
-{
-	struct bnep_session *s = netdev_priv(dev);
-	return &s->stats;
-}
-
 static void bnep_net_set_mc_list(struct net_device *dev)
 {
 #ifdef CONFIG_BT_BNEP_MC_FILTER
@@ -128,11 +122,6 @@
 	netif_wake_queue(dev);
 }
 
-static int bnep_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-	return -EINVAL;
-}
-
 #ifdef CONFIG_BT_BNEP_MC_FILTER
 static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
 {
@@ -217,6 +206,18 @@
 	return 0;
 }
 
+static const struct net_device_ops bnep_netdev_ops = {
+	.ndo_open            = bnep_net_open,
+	.ndo_stop            = bnep_net_close,
+	.ndo_start_xmit	     = bnep_net_xmit,
+	.ndo_validate_addr   = eth_validate_addr,
+	.ndo_set_multicast_list = bnep_net_set_mc_list,
+	.ndo_set_mac_address = bnep_net_set_mac_addr,
+	.ndo_tx_timeout      = bnep_net_timeout,
+	.ndo_change_mtu	     = eth_change_mtu,
+
+};
+
 void bnep_net_setup(struct net_device *dev)
 {
 
@@ -224,15 +225,7 @@
 	dev->addr_len = ETH_ALEN;
 
 	ether_setup(dev);
-
-	dev->open            = bnep_net_open;
-	dev->stop            = bnep_net_close;
-	dev->hard_start_xmit = bnep_net_xmit;
-	dev->get_stats       = bnep_net_get_stats;
-	dev->do_ioctl        = bnep_net_ioctl;
-	dev->set_mac_address = bnep_net_set_mac_addr;
-	dev->set_multicast_list = bnep_net_set_mc_list;
+	dev->netdev_ops = &bnep_netdev_ops;
 
 	dev->watchdog_timeo  = HZ * 2;
-	dev->tx_timeout      = bnep_net_timeout;
 }
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index a65e43a..cf754ac 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -58,11 +58,11 @@
 static int brnf_call_iptables __read_mostly = 1;
 static int brnf_call_ip6tables __read_mostly = 1;
 static int brnf_call_arptables __read_mostly = 1;
-static int brnf_filter_vlan_tagged __read_mostly = 1;
-static int brnf_filter_pppoe_tagged __read_mostly = 1;
+static int brnf_filter_vlan_tagged __read_mostly = 0;
+static int brnf_filter_pppoe_tagged __read_mostly = 0;
 #else
-#define brnf_filter_vlan_tagged 1
-#define brnf_filter_pppoe_tagged 1
+#define brnf_filter_vlan_tagged 0
+#define brnf_filter_pppoe_tagged 0
 #endif
 
 static inline __be16 vlan_proto(const struct sk_buff *skb)
@@ -686,8 +686,11 @@
 	if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
 	    IS_PPPOE_IP(skb))
 		pf = PF_INET;
-	else
+	else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
+		 IS_PPPOE_IPV6(skb))
 		pf = PF_INET6;
+	else
+		return NF_ACCEPT;
 
 	nf_bridge_pull_encap_header(skb);
 
@@ -828,8 +831,11 @@
 	if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
 	    IS_PPPOE_IP(skb))
 		pf = PF_INET;
-	else
+	else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
+		 IS_PPPOE_IPV6(skb))
 		pf = PF_INET6;
+	else
+		return NF_ACCEPT;
 
 #ifdef CONFIG_NETFILTER_DEBUG
 	if (skb->dst == NULL) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index fa108c4..820252a 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -79,18 +79,19 @@
 {
 	par->match     = m->u.match;
 	par->matchinfo = m->data;
-	return m->u.match->match(skb, par);
+	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
 }
 
 static inline int ebt_dev_check(char *entry, const struct net_device *device)
 {
 	int i = 0;
-	const char *devname = device->name;
+	const char *devname;
 
 	if (*entry == '\0')
 		return 0;
 	if (!device)
 		return 1;
+	devname = device->name;
 	/* 1 is the wildcard token */
 	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
 		i++;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3dadb33..fa417ca 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -414,6 +414,12 @@
  *  The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
  *  filter for error frames (CAN_ERR_FLAG bit set in mask).
  *
+ *  The provided pointer to the sk_buff is guaranteed to be valid as long as
+ *  the callback function is running. The callback function must *not* free
+ *  the given sk_buff while processing it's task. When the given sk_buff is
+ *  needed after the end of the callback function it must be cloned inside
+ *  the callback function with skb_clone().
+ *
  * Return:
  *  0 on success
  *  -ENOMEM on missing cache mem to create subscription entry
@@ -569,13 +575,8 @@
 
 static inline void deliver(struct sk_buff *skb, struct receiver *r)
 {
-	struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
-
-	if (clone) {
-		clone->sk = skb->sk;
-		r->func(clone, r->data);
-		r->matches++;
-	}
+	r->func(skb, r->data);
+	r->matches++;
 }
 
 static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6248ae2..b7c7d46 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -347,16 +347,42 @@
 	struct bcm_op *op = (struct bcm_op *)data;
 	struct bcm_msg_head msg_head;
 
-	/* create notification to user */
-	msg_head.opcode  = TX_EXPIRED;
-	msg_head.flags   = op->flags;
-	msg_head.count   = op->count;
-	msg_head.ival1   = op->ival1;
-	msg_head.ival2   = op->ival2;
-	msg_head.can_id  = op->can_id;
-	msg_head.nframes = 0;
+	if (op->kt_ival1.tv64 && (op->count > 0)) {
 
-	bcm_send_to_user(op, &msg_head, NULL, 0);
+		op->count--;
+		if (!op->count && (op->flags & TX_COUNTEVT)) {
+
+			/* create notification to user */
+			msg_head.opcode  = TX_EXPIRED;
+			msg_head.flags   = op->flags;
+			msg_head.count   = op->count;
+			msg_head.ival1   = op->ival1;
+			msg_head.ival2   = op->ival2;
+			msg_head.can_id  = op->can_id;
+			msg_head.nframes = 0;
+
+			bcm_send_to_user(op, &msg_head, NULL, 0);
+		}
+	}
+
+	if (op->kt_ival1.tv64 && (op->count > 0)) {
+
+		/* send (next) frame */
+		bcm_can_tx(op);
+		hrtimer_start(&op->timer,
+			      ktime_add(ktime_get(), op->kt_ival1),
+			      HRTIMER_MODE_ABS);
+
+	} else {
+		if (op->kt_ival2.tv64) {
+
+			/* send (next) frame */
+			bcm_can_tx(op);
+			hrtimer_start(&op->timer,
+				      ktime_add(ktime_get(), op->kt_ival2),
+				      HRTIMER_MODE_ABS);
+		}
+	}
 }
 
 /*
@@ -365,33 +391,10 @@
 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
 {
 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
-	enum hrtimer_restart ret = HRTIMER_NORESTART;
 
-	if (op->kt_ival1.tv64 && (op->count > 0)) {
+	tasklet_schedule(&op->tsklet);
 
-		op->count--;
-		if (!op->count && (op->flags & TX_COUNTEVT))
-			tasklet_schedule(&op->tsklet);
-	}
-
-	if (op->kt_ival1.tv64 && (op->count > 0)) {
-
-		/* send (next) frame */
-		bcm_can_tx(op);
-		hrtimer_forward(hrtimer, ktime_get(), op->kt_ival1);
-		ret = HRTIMER_RESTART;
-
-	} else {
-		if (op->kt_ival2.tv64) {
-
-			/* send (next) frame */
-			bcm_can_tx(op);
-			hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
-			ret = HRTIMER_RESTART;
-		}
-	}
-
-	return ret;
+	return HRTIMER_NORESTART;
 }
 
 /*
@@ -633,7 +636,7 @@
 	hrtimer_cancel(&op->timer);
 
 	if (op->can_id != rxframe->can_id)
-		goto rx_freeskb;
+		return;
 
 	/* save rx timestamp */
 	op->rx_stamp = skb->tstamp;
@@ -645,19 +648,19 @@
 	if (op->flags & RX_RTR_FRAME) {
 		/* send reply for RTR-request (placed in op->frames[0]) */
 		bcm_can_tx(op);
-		goto rx_freeskb;
+		return;
 	}
 
 	if (op->flags & RX_FILTER_ID) {
 		/* the easiest case */
 		bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
-		goto rx_freeskb_starttimer;
+		goto rx_starttimer;
 	}
 
 	if (op->nframes == 1) {
 		/* simple compare with index 0 */
 		bcm_rx_cmp_to_index(op, 0, rxframe);
-		goto rx_freeskb_starttimer;
+		goto rx_starttimer;
 	}
 
 	if (op->nframes > 1) {
@@ -678,10 +681,8 @@
 		}
 	}
 
-rx_freeskb_starttimer:
+rx_starttimer:
 	bcm_rx_starttimer(op);
-rx_freeskb:
-	kfree_skb(skb);
 }
 
 /*
diff --git a/net/can/raw.c b/net/can/raw.c
index 27aab63..0703cba 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -99,13 +99,14 @@
 	struct raw_sock *ro = raw_sk(sk);
 	struct sockaddr_can *addr;
 
-	if (!ro->recv_own_msgs) {
-		/* check the received tx sock reference */
-		if (skb->sk == sk) {
-			kfree_skb(skb);
-			return;
-		}
-	}
+	/* check the received tx sock reference */
+	if (!ro->recv_own_msgs && skb->sk == sk)
+		return;
+
+	/* clone the given skb to be able to enqueue it into the rcv queue */
+	skb = skb_clone(skb, GFP_ATOMIC);
+	if (!skb)
+		return;
 
 	/*
 	 *  Put the datagram to the queue so that raw_recvmsg() can
diff --git a/net/core/dev.c b/net/core/dev.c
index 382df6c..8d67597 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -170,25 +170,6 @@
 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
 static struct list_head ptype_all __read_mostly;	/* Taps */
 
-#ifdef CONFIG_NET_DMA
-struct net_dma {
-	struct dma_client client;
-	spinlock_t lock;
-	cpumask_t channel_mask;
-	struct dma_chan **channels;
-};
-
-static enum dma_state_client
-netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
-	enum dma_state state);
-
-static struct net_dma net_dma = {
-	.client = {
-		.event_callback = netdev_dma_event,
-	},
-};
-#endif
-
 /*
  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
  * semaphore.
@@ -1107,6 +1088,11 @@
 		dev->flags |= IFF_UP;
 
 		/*
+		 *	Enable NET_DMA
+		 */
+		dmaengine_get();
+
+		/*
 		 *	Initialize multicasting status
 		 */
 		dev_set_rx_mode(dev);
@@ -1183,6 +1169,11 @@
 	 */
 	call_netdevice_notifiers(NETDEV_DOWN, dev);
 
+	/*
+	 *	Shutdown NET_DMA
+	 */
+	dmaengine_put();
+
 	return 0;
 }
 
@@ -2387,7 +2378,7 @@
 }
 EXPORT_SYMBOL(napi_gro_flush);
 
-static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
 	struct sk_buff **pp = NULL;
 	struct packet_type *ptype;
@@ -2401,6 +2392,9 @@
 	if (!(skb->dev->features & NETIF_F_GRO))
 		goto normal;
 
+	if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
+		goto normal;
+
 	rcu_read_lock();
 	list_for_each_entry_rcu(ptype, head, list) {
 		struct sk_buff *p;
@@ -2417,11 +2411,14 @@
 
 		for (p = napi->gro_list; p; p = p->next) {
 			count++;
-			NAPI_GRO_CB(p)->same_flow =
-				p->mac_len == mac_len &&
-				!memcmp(skb_mac_header(p), skb_mac_header(skb),
-					mac_len);
-			NAPI_GRO_CB(p)->flush = 0;
+
+			if (!NAPI_GRO_CB(p)->same_flow)
+				continue;
+
+			if (p->mac_len != mac_len ||
+			    memcmp(skb_mac_header(p), skb_mac_header(skb),
+				   mac_len))
+				NAPI_GRO_CB(p)->same_flow = 0;
 		}
 
 		pp = ptype->gro_receive(&napi->gro_list, skb);
@@ -2463,6 +2460,19 @@
 normal:
 	return -1;
 }
+EXPORT_SYMBOL(dev_gro_receive);
+
+static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+	struct sk_buff *p;
+
+	for (p = napi->gro_list; p; p = p->next) {
+		NAPI_GRO_CB(p)->same_flow = 1;
+		NAPI_GRO_CB(p)->flush = 0;
+	}
+
+	return dev_gro_receive(napi, skb);
+}
 
 int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
@@ -2479,11 +2489,20 @@
 }
 EXPORT_SYMBOL(napi_gro_receive);
 
-int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
+void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+{
+	__skb_pull(skb, skb_headlen(skb));
+	skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
+
+	napi->skb = skb;
+}
+EXPORT_SYMBOL(napi_reuse_skb);
+
+struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
+				  struct napi_gro_fraginfo *info)
 {
 	struct net_device *dev = napi->dev;
 	struct sk_buff *skb = napi->skb;
-	int err = NET_RX_DROP;
 
 	napi->skb = NULL;
 
@@ -2503,16 +2522,31 @@
 	skb->len += info->len;
 	skb->truesize += info->len;
 
-	if (!pskb_may_pull(skb, ETH_HLEN))
-		goto reuse;
-
-	err = NET_RX_SUCCESS;
+	if (!pskb_may_pull(skb, ETH_HLEN)) {
+		napi_reuse_skb(napi, skb);
+		goto out;
+	}
 
 	skb->protocol = eth_type_trans(skb, dev);
 
 	skb->ip_summed = info->ip_summed;
 	skb->csum = info->csum;
 
+out:
+	return skb;
+}
+EXPORT_SYMBOL(napi_fraginfo_skb);
+
+int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
+{
+	struct sk_buff *skb = napi_fraginfo_skb(napi, info);
+	int err = NET_RX_DROP;
+
+	if (!skb)
+		goto out;
+
+	err = NET_RX_SUCCESS;
+
 	switch (__napi_gro_receive(napi, skb)) {
 	case -1:
 		return netif_receive_skb(skb);
@@ -2521,17 +2555,7 @@
 		goto out;
 	}
 
-reuse:
-	skb_shinfo(skb)->nr_frags = 0;
-
-	skb->len -= skb->data_len;
-	skb->truesize -= skb->data_len;
-	skb->data_len = 0;
-
-	__skb_pull(skb, skb_headlen(skb));
-	skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
-
-	napi->skb = skb;
+	napi_reuse_skb(napi, skb);
 
 out:
 	return err;
@@ -2718,14 +2742,7 @@
 	 * There may not be any more sk_buffs coming right now, so push
 	 * any pending DMA copies to hardware
 	 */
-	if (!cpus_empty(net_dma.channel_mask)) {
-		int chan_idx;
-		for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
-			struct dma_chan *chan = net_dma.channels[chan_idx];
-			if (chan)
-				dma_async_memcpy_issue_pending(chan);
-		}
-	}
+	dma_issue_pending_all();
 #endif
 
 	return;
@@ -4414,6 +4431,45 @@
 }
 
 /**
+ *	init_dummy_netdev	- init a dummy network device for NAPI
+ *	@dev: device to init
+ *
+ *	This takes a network device structure and initialize the minimum
+ *	amount of fields so it can be used to schedule NAPI polls without
+ *	registering a full blown interface. This is to be used by drivers
+ *	that need to tie several hardware interfaces to a single NAPI
+ *	poll scheduler due to HW limitations.
+ */
+int init_dummy_netdev(struct net_device *dev)
+{
+	/* Clear everything. Note we don't initialize spinlocks
+	 * are they aren't supposed to be taken by any of the
+	 * NAPI code and this dummy netdev is supposed to be
+	 * only ever used for NAPI polls
+	 */
+	memset(dev, 0, sizeof(struct net_device));
+
+	/* make sure we BUG if trying to hit standard
+	 * register/unregister code path
+	 */
+	dev->reg_state = NETREG_DUMMY;
+
+	/* initialize the ref count */
+	atomic_set(&dev->refcnt, 1);
+
+	/* NAPI wants this */
+	INIT_LIST_HEAD(&dev->napi_list);
+
+	/* a dummy interface is started by default */
+	set_bit(__LINK_STATE_PRESENT, &dev->state);
+	set_bit(__LINK_STATE_START, &dev->state);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(init_dummy_netdev);
+
+
+/**
  *	register_netdev	- register a network device
  *	@dev: device to register
  *
@@ -4916,122 +4972,6 @@
 	return NOTIFY_OK;
 }
 
-#ifdef CONFIG_NET_DMA
-/**
- * net_dma_rebalance - try to maintain one DMA channel per CPU
- * @net_dma: DMA client and associated data (lock, channels, channel_mask)
- *
- * This is called when the number of channels allocated to the net_dma client
- * changes.  The net_dma client tries to have one DMA channel per CPU.
- */
-
-static void net_dma_rebalance(struct net_dma *net_dma)
-{
-	unsigned int cpu, i, n, chan_idx;
-	struct dma_chan *chan;
-
-	if (cpus_empty(net_dma->channel_mask)) {
-		for_each_online_cpu(cpu)
-			rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
-		return;
-	}
-
-	i = 0;
-	cpu = first_cpu(cpu_online_map);
-
-	for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
-		chan = net_dma->channels[chan_idx];
-
-		n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
-		   + (i < (num_online_cpus() %
-			cpus_weight(net_dma->channel_mask)) ? 1 : 0));
-
-		while(n) {
-			per_cpu(softnet_data, cpu).net_dma = chan;
-			cpu = next_cpu(cpu, cpu_online_map);
-			n--;
-		}
-		i++;
-	}
-}
-
-/**
- * netdev_dma_event - event callback for the net_dma_client
- * @client: should always be net_dma_client
- * @chan: DMA channel for the event
- * @state: DMA state to be handled
- */
-static enum dma_state_client
-netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
-	enum dma_state state)
-{
-	int i, found = 0, pos = -1;
-	struct net_dma *net_dma =
-		container_of(client, struct net_dma, client);
-	enum dma_state_client ack = DMA_DUP; /* default: take no action */
-
-	spin_lock(&net_dma->lock);
-	switch (state) {
-	case DMA_RESOURCE_AVAILABLE:
-		for (i = 0; i < nr_cpu_ids; i++)
-			if (net_dma->channels[i] == chan) {
-				found = 1;
-				break;
-			} else if (net_dma->channels[i] == NULL && pos < 0)
-				pos = i;
-
-		if (!found && pos >= 0) {
-			ack = DMA_ACK;
-			net_dma->channels[pos] = chan;
-			cpu_set(pos, net_dma->channel_mask);
-			net_dma_rebalance(net_dma);
-		}
-		break;
-	case DMA_RESOURCE_REMOVED:
-		for (i = 0; i < nr_cpu_ids; i++)
-			if (net_dma->channels[i] == chan) {
-				found = 1;
-				pos = i;
-				break;
-			}
-
-		if (found) {
-			ack = DMA_ACK;
-			cpu_clear(pos, net_dma->channel_mask);
-			net_dma->channels[i] = NULL;
-			net_dma_rebalance(net_dma);
-		}
-		break;
-	default:
-		break;
-	}
-	spin_unlock(&net_dma->lock);
-
-	return ack;
-}
-
-/**
- * netdev_dma_register - register the networking subsystem as a DMA client
- */
-static int __init netdev_dma_register(void)
-{
-	net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
-								GFP_KERNEL);
-	if (unlikely(!net_dma.channels)) {
-		printk(KERN_NOTICE
-				"netdev_dma: no memory for net_dma.channels\n");
-		return -ENOMEM;
-	}
-	spin_lock_init(&net_dma.lock);
-	dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
-	dma_async_client_register(&net_dma.client);
-	dma_async_client_chan_request(&net_dma.client);
-	return 0;
-}
-
-#else
-static int __init netdev_dma_register(void) { return -ENODEV; }
-#endif /* CONFIG_NET_DMA */
 
 /**
  *	netdev_increment_features - increment feature set by one
@@ -5251,8 +5191,6 @@
 	if (register_pernet_device(&default_device_ops))
 		goto out;
 
-	netdev_dma_register();
-
 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5110b35..65eac77 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2602,6 +2602,12 @@
 		       skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
 
 		skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
+		skb_shinfo(skb)->nr_frags = 0;
+
+		skb->truesize -= skb->data_len;
+		skb->len -= skb->data_len;
+		skb->data_len = 0;
+
 		NAPI_GRO_CB(skb)->free = 1;
 		goto done;
 	}
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index b28bf96..4b5db44 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -29,7 +29,7 @@
 	  http://www.ietf.org/rfc/rfc4342.txt
 
 	  The TFRC congestion control algorithms were initially described in
-	  RFC 5448.
+	  RFC 5348.
 
 	  This text was extracted from RFC 4340 (sec. 10.2),
 	  http://www.ietf.org/rfc/rfc4340.txt
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 60c412c..4902029 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -36,7 +36,7 @@
 	return rc;
 }
 
-void __exit tfrc_lib_exit(void)
+void tfrc_lib_exit(void)
 {
 	tfrc_rx_packet_history_exit();
 	tfrc_tx_packet_history_exit();
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a3a410d..a68fd79 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -286,6 +286,42 @@
 	.get_sset_count		= dsa_slave_get_sset_count,
 };
 
+#ifdef CONFIG_NET_DSA_TAG_DSA
+static const struct net_device_ops dsa_netdev_ops = {
+	.ndo_open	 	= dsa_slave_open,
+	.ndo_stop		= dsa_slave_close,
+	.ndo_start_xmit		= dsa_xmit,
+	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
+	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
+	.ndo_set_multicast_list = dsa_slave_set_rx_mode,
+	.ndo_set_mac_address	= dsa_slave_set_mac_address,
+	.ndo_do_ioctl		= dsa_slave_ioctl,
+};
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+static const struct net_device_ops edsa_netdev_ops = {
+	.ndo_open	 	= dsa_slave_open,
+	.ndo_stop		= dsa_slave_close,
+	.ndo_start_xmit		= edsa_xmit,
+	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
+	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
+	.ndo_set_multicast_list = dsa_slave_set_rx_mode,
+	.ndo_set_mac_address	= dsa_slave_set_mac_address,
+	.ndo_do_ioctl		= dsa_slave_ioctl,
+};
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+static const struct net_device_ops trailer_netdev_ops = {
+	.ndo_open	 	= dsa_slave_open,
+	.ndo_stop		= dsa_slave_close,
+	.ndo_start_xmit		= trailer_xmit,
+	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
+	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
+	.ndo_set_multicast_list = dsa_slave_set_rx_mode,
+	.ndo_set_mac_address	= dsa_slave_set_mac_address,
+	.ndo_do_ioctl		= dsa_slave_ioctl,
+};
+#endif
 
 /* slave device setup *******************************************************/
 struct net_device *
@@ -306,32 +342,27 @@
 	SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
 	memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN);
 	slave_dev->tx_queue_len = 0;
+
 	switch (ds->tag_protocol) {
 #ifdef CONFIG_NET_DSA_TAG_DSA
 	case htons(ETH_P_DSA):
-		slave_dev->hard_start_xmit = dsa_xmit;
+		slave_dev->netdev_ops = &dsa_netdev_ops;
 		break;
 #endif
 #ifdef CONFIG_NET_DSA_TAG_EDSA
 	case htons(ETH_P_EDSA):
-		slave_dev->hard_start_xmit = edsa_xmit;
+		slave_dev->netdev_ops = &edsa_netdev_ops;
 		break;
 #endif
 #ifdef CONFIG_NET_DSA_TAG_TRAILER
 	case htons(ETH_P_TRAILER):
-		slave_dev->hard_start_xmit = trailer_xmit;
+		slave_dev->netdev_ops = &trailer_netdev_ops;
 		break;
 #endif
 	default:
 		BUG();
 	}
-	slave_dev->open = dsa_slave_open;
-	slave_dev->stop = dsa_slave_close;
-	slave_dev->change_rx_flags = dsa_slave_change_rx_flags;
-	slave_dev->set_rx_mode = dsa_slave_set_rx_mode;
-	slave_dev->set_multicast_list = dsa_slave_set_rx_mode;
-	slave_dev->set_mac_address = dsa_slave_set_mac_address;
-	slave_dev->do_ioctl = dsa_slave_ioctl;
+
 	SET_NETDEV_DEV(slave_dev, parent);
 	slave_dev->vlan_features = master->vlan_features;
 
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index c922431..52cb693 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -93,13 +93,8 @@
 {
 	/* root is playing with raw sockets. */
 	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr)) {
-		if (net_ratelimit())
-			printk("iptable_filter: ignoring short SOCK_RAW "
-			       "packet.\n");
+	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
-	}
-
 	return ipt_do_table(skb, hook, in, out,
 			    dev_net(out)->ipv4.iptable_filter);
 }
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 69f2c42..3929d20 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -132,12 +132,8 @@
 
 	/* root is playing with raw sockets. */
 	if (skb->len < sizeof(struct iphdr)
-	    || ip_hdrlen(skb) < sizeof(struct iphdr)) {
-		if (net_ratelimit())
-			printk("iptable_mangle: ignoring short SOCK_RAW "
-			       "packet.\n");
+	    || ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
-	}
 
 	/* Save things which could affect route */
 	mark = skb->mark;
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 8faebfe..7f65d18 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -65,12 +65,8 @@
 {
 	/* root is playing with raw sockets. */
 	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr)) {
-		if (net_ratelimit())
-			printk("iptable_raw: ignoring short SOCK_RAW "
-			       "packet.\n");
+	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
-	}
 	return ipt_do_table(skb, hook, in, out,
 			    dev_net(out)->ipv4.iptable_raw);
 }
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index 36f3be3..a52a35f 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -96,12 +96,8 @@
 {
 	/* Somebody is playing with raw sockets. */
 	if (skb->len < sizeof(struct iphdr)
-	    || ip_hdrlen(skb) < sizeof(struct iphdr)) {
-		if (net_ratelimit())
-			printk(KERN_INFO "iptable_security: ignoring short "
-			       "SOCK_RAW packet.\n");
+	    || ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
-	}
 	return ipt_do_table(skb, hook, in, out,
 			    dev_net(out)->ipv4.iptable_security);
 }
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index b2141e1..4beb04f 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -145,11 +145,8 @@
 {
 	/* root is playing with raw sockets. */
 	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr)) {
-		if (net_ratelimit())
-			printk("ipt_hook: happy cracking.\n");
+	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
-	}
 	return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb);
 }
 
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 1fd3ef7..2a8bee2 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -20,7 +20,7 @@
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_log.h>
 
-static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ;
+static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
 
 static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
 			      struct nf_conntrack_tuple *tuple)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 35bcddf..0cd71b8 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -522,8 +522,12 @@
 				unsigned int offset, size_t len)
 {
 	struct tcp_splice_state *tss = rd_desc->arg.data;
+	int ret;
 
-	return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
+	ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags);
+	if (ret > 0)
+		rd_desc->count -= ret;
+	return ret;
 }
 
 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
@@ -531,6 +535,7 @@
 	/* Store TCP splice context information in read_descriptor_t. */
 	read_descriptor_t rd_desc = {
 		.arg.data = tss,
+		.count	  = tss->len,
 	};
 
 	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
@@ -611,11 +616,13 @@
 		tss.len -= ret;
 		spliced += ret;
 
+		if (!timeo)
+			break;
 		release_sock(sk);
 		lock_sock(sk);
 
 		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
-		    (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
+		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
 		    signal_pending(current))
 			break;
 	}
@@ -1313,7 +1320,7 @@
 		if ((available < target) &&
 		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
 		    !sysctl_tcp_low_latency &&
-		    __get_cpu_var(softnet_data).net_dma) {
+		    dma_find_channel(DMA_MEMCPY)) {
 			preempt_enable_no_resched();
 			tp->ucopy.pinned_list =
 					dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1523,7 +1530,7 @@
 		if (!(flags & MSG_TRUNC)) {
 #ifdef CONFIG_NET_DMA
 			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-				tp->ucopy.dma_chan = get_softnet_dma();
+				tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
 
 			if (tp->ucopy.dma_chan) {
 				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1628,7 +1635,6 @@
 
 		/* Safe to free early-copied skbs now */
 		__skb_queue_purge(&sk->sk_async_wait_queue);
-		dma_chan_put(tp->ucopy.dma_chan);
 		tp->ucopy.dma_chan = NULL;
 	}
 	if (tp->ucopy.pinned_list) {
@@ -2383,7 +2389,7 @@
 	unsigned int seq;
 	__be32 delta;
 	unsigned int oldlen;
-	unsigned int len;
+	unsigned int mss;
 
 	if (!pskb_may_pull(skb, sizeof(*th)))
 		goto out;
@@ -2399,10 +2405,13 @@
 	oldlen = (u16)~skb->len;
 	__skb_pull(skb, thlen);
 
+	mss = skb_shinfo(skb)->gso_size;
+	if (unlikely(skb->len <= mss))
+		goto out;
+
 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
 		/* Packet is from an untrusted source, reset gso_segs. */
 		int type = skb_shinfo(skb)->gso_type;
-		int mss;
 
 		if (unlikely(type &
 			     ~(SKB_GSO_TCPV4 |
@@ -2413,7 +2422,6 @@
 			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
 			goto out;
 
-		mss = skb_shinfo(skb)->gso_size;
 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 
 		segs = NULL;
@@ -2424,8 +2432,7 @@
 	if (IS_ERR(segs))
 		goto out;
 
-	len = skb_shinfo(skb)->gso_size;
-	delta = htonl(oldlen + (thlen + len));
+	delta = htonl(oldlen + (thlen + mss));
 
 	skb = segs;
 	th = tcp_hdr(skb);
@@ -2441,7 +2448,7 @@
 			     csum_fold(csum_partial(skb_transport_header(skb),
 						    thlen, skb->csum));
 
-		seq += len;
+		seq += mss;
 		skb = skb->next;
 		th = tcp_hdr(skb);
 
@@ -2542,6 +2549,7 @@
 
 	return pp;
 }
+EXPORT_SYMBOL(tcp_gro_receive);
 
 int tcp_gro_complete(struct sk_buff *skb)
 {
@@ -2558,6 +2566,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(tcp_gro_complete);
 
 #ifdef CONFIG_TCP_MD5SIG
 static unsigned long tcp_md5sig_users;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 99b7ecb..a6961d7 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5005,7 +5005,7 @@
 		return 0;
 
 	if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-		tp->ucopy.dma_chan = get_softnet_dma();
+		tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
 
 	if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9d839fa..19d7b42 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1594,7 +1594,7 @@
 #ifdef CONFIG_NET_DMA
 		struct tcp_sock *tp = tcp_sk(sk);
 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-			tp->ucopy.dma_chan = get_softnet_dma();
+			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
 		if (tp->ucopy.dma_chan)
 			ret = tcp_v4_do_rcv(sk, skb);
 		else
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 437b750..94f74f5 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -672,8 +672,7 @@
 
 EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
 
-static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
-						    int proto)
+static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
 {
 	struct inet6_protocol *ops = NULL;
 
@@ -704,7 +703,7 @@
 		__skb_pull(skb, len);
 	}
 
-	return ops;
+	return proto;
 }
 
 static int ipv6_gso_send_check(struct sk_buff *skb)
@@ -721,7 +720,9 @@
 	err = -EPROTONOSUPPORT;
 
 	rcu_read_lock();
-	ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+	ops = rcu_dereference(inet6_protos[
+		ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
+
 	if (likely(ops && ops->gso_send_check)) {
 		skb_reset_transport_header(skb);
 		err = ops->gso_send_check(skb);
@@ -757,7 +758,9 @@
 	segs = ERR_PTR(-EPROTONOSUPPORT);
 
 	rcu_read_lock();
-	ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+	ops = rcu_dereference(inet6_protos[
+		ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
+
 	if (likely(ops && ops->gso_segment)) {
 		skb_reset_transport_header(skb);
 		segs = ops->gso_segment(skb, features);
@@ -777,11 +780,105 @@
 	return segs;
 }
 
+struct ipv6_gro_cb {
+	struct napi_gro_cb napi;
+	int proto;
+};
+
+#define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb)
+
+static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
+					 struct sk_buff *skb)
+{
+	struct inet6_protocol *ops;
+	struct sk_buff **pp = NULL;
+	struct sk_buff *p;
+	struct ipv6hdr *iph;
+	unsigned int nlen;
+	int flush = 1;
+	int proto;
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
+		goto out;
+
+	iph = ipv6_hdr(skb);
+	__skb_pull(skb, sizeof(*iph));
+
+	flush += ntohs(iph->payload_len) != skb->len;
+
+	rcu_read_lock();
+	proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr);
+	IPV6_GRO_CB(skb)->proto = proto;
+	ops = rcu_dereference(inet6_protos[proto]);
+	if (!ops || !ops->gro_receive)
+		goto out_unlock;
+
+	flush--;
+	skb_reset_transport_header(skb);
+	nlen = skb_network_header_len(skb);
+
+	for (p = *head; p; p = p->next) {
+		struct ipv6hdr *iph2;
+
+		if (!NAPI_GRO_CB(p)->same_flow)
+			continue;
+
+		iph2 = ipv6_hdr(p);
+
+		/* All fields must match except length. */
+		if (nlen != skb_network_header_len(p) ||
+		    memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) ||
+		    memcmp(&iph->nexthdr, &iph2->nexthdr,
+			   nlen - offsetof(struct ipv6hdr, nexthdr))) {
+			NAPI_GRO_CB(p)->same_flow = 0;
+			continue;
+		}
+
+		NAPI_GRO_CB(p)->flush |= flush;
+	}
+
+	NAPI_GRO_CB(skb)->flush |= flush;
+
+	pp = ops->gro_receive(head, skb);
+
+out_unlock:
+	rcu_read_unlock();
+
+out:
+	NAPI_GRO_CB(skb)->flush |= flush;
+
+	return pp;
+}
+
+static int ipv6_gro_complete(struct sk_buff *skb)
+{
+	struct inet6_protocol *ops;
+	struct ipv6hdr *iph = ipv6_hdr(skb);
+	int err = -ENOSYS;
+
+	iph->payload_len = htons(skb->len - skb_network_offset(skb) -
+				 sizeof(*iph));
+
+	rcu_read_lock();
+	ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]);
+	if (WARN_ON(!ops || !ops->gro_complete))
+		goto out_unlock;
+
+	err = ops->gro_complete(skb);
+
+out_unlock:
+	rcu_read_unlock();
+
+	return err;
+}
+
 static struct packet_type ipv6_packet_type = {
 	.type = __constant_htons(ETH_P_IPV6),
 	.func = ipv6_rcv,
 	.gso_send_check = ipv6_gso_send_check,
 	.gso_segment = ipv6_gso_segment,
+	.gro_receive = ipv6_gro_receive,
+	.gro_complete = ipv6_gro_complete,
 };
 
 static int __init ipv6_packet_init(void)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 29c7c99..52ee1dc 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -298,6 +298,10 @@
 	struct fib6_walker_t *w = (void*)cb->args[2];
 
 	if (w) {
+		if (cb->args[4]) {
+			cb->args[4] = 0;
+			fib6_walker_unlink(w);
+		}
 		cb->args[2] = 0;
 		kfree(w);
 	}
@@ -330,15 +334,12 @@
 		read_lock_bh(&table->tb6_lock);
 		res = fib6_walk_continue(w);
 		read_unlock_bh(&table->tb6_lock);
-		if (res != 0) {
-			if (res < 0)
-				fib6_walker_unlink(w);
-			goto end;
+		if (res <= 0) {
+			fib6_walker_unlink(w);
+			cb->args[4] = 0;
 		}
-		fib6_walker_unlink(w);
-		cb->args[4] = 0;
 	}
-end:
+
 	return res;
 }
 
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index bd52151..c455cf4 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -26,7 +26,7 @@
 #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
 #include <net/netfilter/nf_log.h>
 
-static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
+static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
 
 static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
 				unsigned int dataoff,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 76f06b9..c4a5982 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2752,7 +2752,7 @@
 		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
 				  SLAB_HWCACHE_ALIGN, NULL);
 	if (!ip6_dst_ops_template.kmem_cachep)
-		goto out;;
+		goto out;
 
 	ret = register_pernet_subsys(&ip6_route_net_ops);
 	if (ret)
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 9048fe7..a031034 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -128,7 +128,7 @@
 
 int ipv6_sysctl_register(void)
 {
-	int err = -ENOMEM;;
+	int err = -ENOMEM;
 
 	ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table);
 	if (ip6_header == NULL)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e8b8337..e5b85d4 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -101,7 +101,7 @@
 	}
 }
 
-static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
+static __inline__ __sum16 tcp_v6_check(int len,
 				   struct in6_addr *saddr,
 				   struct in6_addr *daddr,
 				   __wsum base)
@@ -501,7 +501,7 @@
 	if (skb) {
 		struct tcphdr *th = tcp_hdr(skb);
 
-		th->check = tcp_v6_check(th, skb->len,
+		th->check = tcp_v6_check(skb->len,
 					 &treq->loc_addr, &treq->rmt_addr,
 					 csum_partial(th, skb->len, skb->csum));
 
@@ -942,6 +942,41 @@
 	return 0;
 }
 
+struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+{
+	struct ipv6hdr *iph = ipv6_hdr(skb);
+
+	switch (skb->ip_summed) {
+	case CHECKSUM_COMPLETE:
+		if (!tcp_v6_check(skb->len, &iph->saddr, &iph->daddr,
+				  skb->csum)) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			break;
+		}
+
+		/* fall through */
+	case CHECKSUM_NONE:
+		NAPI_GRO_CB(skb)->flush = 1;
+		return NULL;
+	}
+
+	return tcp_gro_receive(head, skb);
+}
+EXPORT_SYMBOL(tcp6_gro_receive);
+
+int tcp6_gro_complete(struct sk_buff *skb)
+{
+	struct ipv6hdr *iph = ipv6_hdr(skb);
+	struct tcphdr *th = tcp_hdr(skb);
+
+	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+				  &iph->saddr, &iph->daddr, 0);
+	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+
+	return tcp_gro_complete(skb);
+}
+EXPORT_SYMBOL(tcp6_gro_complete);
+
 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 				 u32 ts, struct tcp_md5sig_key *key, int rst)
 {
@@ -1429,14 +1464,14 @@
 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
 {
 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
-		if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
+		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
 				  &ipv6_hdr(skb)->daddr, skb->csum)) {
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 			return 0;
 		}
 	}
 
-	skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
+	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
 					      &ipv6_hdr(skb)->saddr,
 					      &ipv6_hdr(skb)->daddr, 0));
 
@@ -1640,7 +1675,7 @@
 #ifdef CONFIG_NET_DMA
 		struct tcp_sock *tp = tcp_sk(sk);
 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-			tp->ucopy.dma_chan = get_softnet_dma();
+			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
 		if (tp->ucopy.dma_chan)
 			ret = tcp_v6_do_rcv(sk, skb);
 		else
@@ -2062,6 +2097,8 @@
 	.err_handler	=	tcp_v6_err,
 	.gso_send_check	=	tcp_v6_gso_send_check,
 	.gso_segment	=	tcp_tso_segment,
+	.gro_receive	=	tcp6_gro_receive,
+	.gro_complete	=	tcp6_gro_complete,
 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
 
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 5f510a1..c5c0c52 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -469,7 +469,7 @@
 	struct ieee80211_sub_if_data *sdata;
 	u16 start_seq_num;
 	u8 *state;
-	int ret;
+	int ret = 0;
 
 	if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
 		return -EINVAL;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5abbc3f..b907482 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -699,7 +699,8 @@
 		return 0;
 
 	/* Setting ad-hoc mode on non-IBSS channel is not supported. */
-	if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS)
+	if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS &&
+	    type == NL80211_IFTYPE_ADHOC)
 		return -EOPNOTSUPP;
 
 	/*
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 929ba54..1159bdb 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -107,6 +107,7 @@
 
 	sta->flags = WLAN_STA_AUTHORIZED;
 	sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
+	rate_control_rate_init(sta);
 
 	return sta;
 }
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 2b3b490..3824990 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -395,13 +395,15 @@
 {
 	struct minstrel_sta_info *mi = priv_sta;
 	struct minstrel_priv *mp = priv;
-	struct minstrel_rate *mr_ctl;
+	struct ieee80211_local *local = hw_to_local(mp->hw);
+	struct ieee80211_rate *ctl_rate;
 	unsigned int i, n = 0;
 	unsigned int t_slot = 9; /* FIXME: get real slot time */
 
 	mi->lowest_rix = rate_lowest_index(sband, sta);
-	mr_ctl = &mi->r[rix_to_ndx(mi, mi->lowest_rix)];
-	mi->sp_ack_dur = mr_ctl->ack_time;
+	ctl_rate = &sband->bitrates[mi->lowest_rix];
+	mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate,
+				!!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
 
 	for (i = 0; i < sband->n_bitrates; i++) {
 		struct minstrel_rate *mr = &mi->r[n];
@@ -416,7 +418,7 @@
 
 		mr->rix = i;
 		mr->bitrate = sband->bitrates[i].bitrate / 5;
-		calc_rate_durations(mi, hw_to_local(mp->hw), mr,
+		calc_rate_durations(mi, local, mr,
 				&sband->bitrates[i]);
 
 		/* calculate maximum number of retransmissions before
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 7e83f74..90ce9dd 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -469,7 +469,7 @@
 				   const struct nf_conntrack_tuple *repl,
 				   gfp_t gfp)
 {
-	struct nf_conn *ct = NULL;
+	struct nf_conn *ct;
 
 	if (unlikely(!nf_conntrack_hash_rnd_initted)) {
 		get_random_bytes(&nf_conntrack_hash_rnd, 4);
@@ -551,7 +551,7 @@
 	}
 
 	ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
-	if (ct == NULL || IS_ERR(ct)) {
+	if (IS_ERR(ct)) {
 		pr_debug("Can't allocate conntrack.\n");
 		return (struct nf_conntrack_tuple_hash *)ct;
 	}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 00e8c27..3dddec6 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1134,7 +1134,7 @@
 	struct nf_conntrack_helper *helper;
 
 	ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC);
-	if (ct == NULL || IS_ERR(ct))
+	if (IS_ERR(ct))
 		return -ENOMEM;
 
 	if (!cda[CTA_TIMEOUT])
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 89837a4..bfbf521 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -273,6 +273,10 @@
 				have_rev = 1;
 		}
 	}
+
+	if (af != NFPROTO_UNSPEC && !have_rev)
+		return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
+
 	return have_rev;
 }
 
@@ -289,6 +293,10 @@
 				have_rev = 1;
 		}
 	}
+
+	if (af != NFPROTO_UNSPEC && !have_rev)
+		return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
+
 	return have_rev;
 }
 
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 29375ba..93acaa5 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -243,6 +243,17 @@
 
 static int __init time_mt_init(void)
 {
+	int minutes = sys_tz.tz_minuteswest;
+
+	if (minutes < 0) /* east of Greenwich */
+		printk(KERN_INFO KBUILD_MODNAME
+		       ": kernel timezone is +%02d%02d\n",
+		       -minutes / 60, -minutes % 60);
+	else /* west of Greenwich */
+		printk(KERN_INFO KBUILD_MODNAME
+		       ": kernel timezone is -%02d%02d\n",
+		       minutes / 60, minutes % 60);
+
 	return xt_register_match(&xt_time_mt_reg);
 }
 
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 3e1191c..1d3dd30 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -225,6 +225,7 @@
 	__genl_unregister_mc_group(family, grp);
 	genl_unlock();
 }
+EXPORT_SYMBOL(genl_unregister_mc_group);
 
 static void genl_unregister_mc_groups(struct genl_family *family)
 {
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index b0ceac2..6a91a32 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -227,6 +227,13 @@
 	return 0;
 }
 
+static const struct net_device_ops gprs_netdev_ops = {
+	.ndo_open	= gprs_open,
+	.ndo_stop	= gprs_close,
+	.ndo_start_xmit	= gprs_xmit,
+	.ndo_change_mtu	= gprs_set_mtu,
+};
+
 static void gprs_setup(struct net_device *dev)
 {
 	dev->features		= NETIF_F_FRAGLIST;
@@ -237,11 +244,8 @@
 	dev->addr_len		= 0;
 	dev->tx_queue_len	= 10;
 
+	dev->netdev_ops		= &gprs_netdev_ops;
 	dev->destructor		= free_netdev;
-	dev->open		= gprs_open;
-	dev->stop		= gprs_close;
-	dev->hard_start_xmit	= gprs_xmit; /* mandatory */
-	dev->change_mtu		= gprs_set_mtu;
 }
 
 /*
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 5070643..2f0f0b0 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -661,12 +661,13 @@
  * next pending event (0 for no event in pq).
  * Note: Applied are events whose have cl->pq_key <= q->now.
  */
-static psched_time_t htb_do_events(struct htb_sched *q, int level)
+static psched_time_t htb_do_events(struct htb_sched *q, int level,
+				   unsigned long start)
 {
 	/* don't run for longer than 2 jiffies; 2 is used instead of
 	   1 to simplify things when jiffy is going to be incremented
 	   too soon */
-	unsigned long stop_at = jiffies + 2;
+	unsigned long stop_at = start + 2;
 	while (time_before(jiffies, stop_at)) {
 		struct htb_class *cl;
 		long diff;
@@ -685,8 +686,8 @@
 		if (cl->cmode != HTB_CAN_SEND)
 			htb_add_to_wait_tree(q, cl, diff);
 	}
-	/* too much load - let's continue on next jiffie */
-	return q->now + PSCHED_TICKS_PER_SEC / HZ;
+	/* too much load - let's continue on next jiffie (including above) */
+	return q->now + 2 * PSCHED_TICKS_PER_SEC / HZ;
 }
 
 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
@@ -845,6 +846,7 @@
 	struct htb_sched *q = qdisc_priv(sch);
 	int level;
 	psched_time_t next_event;
+	unsigned long start_at;
 
 	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
 	skb = __skb_dequeue(&q->direct_queue);
@@ -857,6 +859,7 @@
 	if (!sch->q.qlen)
 		goto fin;
 	q->now = psched_get_time();
+	start_at = jiffies;
 
 	next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
 
@@ -866,14 +869,14 @@
 		psched_time_t event;
 
 		if (q->now >= q->near_ev_cache[level]) {
-			event = htb_do_events(q, level);
+			event = htb_do_events(q, level, start_at);
 			if (!event)
 				event = q->now + PSCHED_TICKS_PER_SEC;
 			q->near_ev_cache[level] = event;
 		} else
 			event = q->near_ev_cache[level];
 
-		if (event && next_event > event)
+		if (next_event > event)
 			next_event = event;
 
 		m = ~q->row_mask[level];
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index f3965df..33133d2 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -435,7 +435,7 @@
 	int i;
 
 	q->perturb_timer.function = sfq_perturbation;
-	q->perturb_timer.data = (unsigned long)sch;;
+	q->perturb_timer.data = (unsigned long)sch;
 	init_timer_deferrable(&q->perturb_timer);
 
 	for (i = 0; i < SFQ_HASH_DIVISOR; i++)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index cfc8e7c..ec697ce 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -289,9 +289,9 @@
 
 	do {
 		struct net_device *slave = qdisc_dev(q);
-		struct netdev_queue *slave_txq;
+		struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
+		const struct net_device_ops *slave_ops = slave->netdev_ops;
 
-		slave_txq = netdev_get_tx_queue(slave, 0);
 		if (slave_txq->qdisc_sleeping != q)
 			continue;
 		if (__netif_subqueue_stopped(slave, subq) ||
@@ -305,7 +305,7 @@
 			if (__netif_tx_trylock(slave_txq)) {
 				if (!netif_tx_queue_stopped(slave_txq) &&
 				    !netif_tx_queue_frozen(slave_txq) &&
-				    slave->hard_start_xmit(skb, slave) == 0) {
+				    slave_ops->ndo_start_xmit(skb, slave) == 0) {
 					__netif_tx_unlock(slave_txq);
 					master->slaves = NEXT_SLAVE(q);
 					netif_wake_queue(dev);
@@ -420,6 +420,14 @@
 	return 0;
 }
 
+static const struct net_device_ops teql_netdev_ops = {
+	.ndo_open	= teql_master_open,
+	.ndo_stop	= teql_master_close,
+	.ndo_start_xmit	= teql_master_xmit,
+	.ndo_get_stats	= teql_master_stats,
+	.ndo_change_mtu	= teql_master_mtu,
+};
+
 static __init void teql_master_setup(struct net_device *dev)
 {
 	struct teql_master *master = netdev_priv(dev);
@@ -436,11 +444,7 @@
 	ops->destroy	=	teql_destroy;
 	ops->owner	=	THIS_MODULE;
 
-	dev->open		= teql_master_open;
-	dev->hard_start_xmit	= teql_master_xmit;
-	dev->stop		= teql_master_close;
-	dev->get_stats		= teql_master_stats;
-	dev->change_mtu		= teql_master_mtu;
+	dev->netdev_ops =       &teql_netdev_ops;
 	dev->type		= ARPHRD_VOID;
 	dev->mtu		= 1500;
 	dev->tx_queue_len	= 100;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 52db5f6..56935bb 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -141,8 +141,8 @@
 /* Compare two byte vectors as numbers.  Return values
  * are:
  * 	  0 - vectors are equal
- * 	< 0 - vector 1 is smaller then vector2
- * 	> 0 - vector 1 is greater then vector2
+ * 	< 0 - vector 1 is smaller than vector2
+ * 	> 0 - vector 1 is greater than vector2
  *
  * Algorithm is:
  * 	This is performed by selecting the numerically smaller key vector...
@@ -489,7 +489,7 @@
 	return 0;
 
 out_err:
-	/* Clean up any successfull allocations */
+	/* Clean up any successful allocations */
 	sctp_auth_destroy_hmacs(ep->auth_hmacs);
 	return -ENOMEM;
 }
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 1c4e5d6..3a0cd07 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -4268,9 +4268,9 @@
 
 /*
  * Handle a protocol violation when the chunk length is invalid.
- * "Invalid" length is identified as smaller then the minimal length a
+ * "Invalid" length is identified as smaller than the minimal length a
  * given chunk can be.  For example, a SACK chunk has invalid length
- * if it's length is set to be smaller then the size of sctp_sack_chunk_t.
+ * if its length is set to be smaller than the size of sctp_sack_chunk_t.
  *
  * We inform the other end by sending an ABORT with a Protocol Violation
  * error code.
@@ -4300,7 +4300,7 @@
 
 /*
  * Handle a protocol violation when the parameter length is invalid.
- * "Invalid" length is identified as smaller then the minimal length a
+ * "Invalid" length is identified as smaller than the minimal length a
  * given parameter can be.
  */
 static sctp_disposition_t sctp_sf_violation_paramlen(
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b14a8f3..ff0a8f8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2717,7 +2717,7 @@
 				paths++;
 			}
 
-			/* Only validate asocmaxrxt if we have more then
+			/* Only validate asocmaxrxt if we have more than
 			 * one path/transport.  We do this because path
 			 * retransmissions are only counted when we have more
 			 * then one path.
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 35c73e8..9bd6456 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -227,7 +227,7 @@
 		 */
 		bitmap_zero(map->tsn_map, map->len);
 	} else {
-		/* If the gap is smaller then the map size,
+		/* If the gap is smaller than the map size,
 		 * shift the map by 'gap' bits and update further.
 		 */
 		bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len);
diff --git a/net/socket.c b/net/socket.c
index 06603d7..35dd737 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1214,7 +1214,7 @@
 	return __sock_create(&init_net, family, type, protocol, res, 1);
 }
 
-asmlinkage long sys_socket(int family, int type, int protocol)
+SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
 {
 	int retval;
 	struct socket *sock;
@@ -1255,8 +1255,8 @@
  *	Create a pair of connected sockets.
  */
 
-asmlinkage long sys_socketpair(int family, int type, int protocol,
-			       int __user *usockvec)
+SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
+		int __user *, usockvec)
 {
 	struct socket *sock1, *sock2;
 	int fd1, fd2, err;
@@ -1356,7 +1356,7 @@
  *	the protocol layer (having also checked the address is ok).
  */
 
-asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
+SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
 {
 	struct socket *sock;
 	struct sockaddr_storage address;
@@ -1385,7 +1385,7 @@
  *	ready for listening.
  */
 
-asmlinkage long sys_listen(int fd, int backlog)
+SYSCALL_DEFINE2(listen, int, fd, int, backlog)
 {
 	struct socket *sock;
 	int err, fput_needed;
@@ -1418,8 +1418,8 @@
  *	clean when we restucture accept also.
  */
 
-asmlinkage long sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
-			    int __user *upeer_addrlen, int flags)
+SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+		int __user *, upeer_addrlen, int, flags)
 {
 	struct socket *sock, *newsock;
 	struct file *newfile;
@@ -1502,8 +1502,8 @@
 	goto out_put;
 }
 
-asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr,
-			   int __user *upeer_addrlen)
+SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr,
+		int __user *, upeer_addrlen)
 {
 	return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0);
 }
@@ -1520,8 +1520,8 @@
  *	include the -EINPROGRESS status for such sockets.
  */
 
-asmlinkage long sys_connect(int fd, struct sockaddr __user *uservaddr,
-			    int addrlen)
+SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
+		int, addrlen)
 {
 	struct socket *sock;
 	struct sockaddr_storage address;
@@ -1552,8 +1552,8 @@
  *	name to user space.
  */
 
-asmlinkage long sys_getsockname(int fd, struct sockaddr __user *usockaddr,
-				int __user *usockaddr_len)
+SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
+		int __user *, usockaddr_len)
 {
 	struct socket *sock;
 	struct sockaddr_storage address;
@@ -1583,8 +1583,8 @@
  *	name to user space.
  */
 
-asmlinkage long sys_getpeername(int fd, struct sockaddr __user *usockaddr,
-				int __user *usockaddr_len)
+SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
+		int __user *, usockaddr_len)
 {
 	struct socket *sock;
 	struct sockaddr_storage address;
@@ -1615,9 +1615,9 @@
  *	the protocol.
  */
 
-asmlinkage long sys_sendto(int fd, void __user *buff, size_t len,
-			   unsigned flags, struct sockaddr __user *addr,
-			   int addr_len)
+SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
+		unsigned, flags, struct sockaddr __user *, addr,
+		int, addr_len)
 {
 	struct socket *sock;
 	struct sockaddr_storage address;
@@ -1660,7 +1660,8 @@
  *	Send a datagram down a socket.
  */
 
-asmlinkage long sys_send(int fd, void __user *buff, size_t len, unsigned flags)
+SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
+		unsigned, flags)
 {
 	return sys_sendto(fd, buff, len, flags, NULL, 0);
 }
@@ -1671,9 +1672,9 @@
  *	sender address from kernel to user space.
  */
 
-asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size,
-			     unsigned flags, struct sockaddr __user *addr,
-			     int __user *addr_len)
+SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+		unsigned, flags, struct sockaddr __user *, addr,
+		int __user *, addr_len)
 {
 	struct socket *sock;
 	struct iovec iov;
@@ -1725,8 +1726,8 @@
  *	to pass the user mode parameter for the protocols to sort out.
  */
 
-asmlinkage long sys_setsockopt(int fd, int level, int optname,
-			       char __user *optval, int optlen)
+SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
+		char __user *, optval, int, optlen)
 {
 	int err, fput_needed;
 	struct socket *sock;
@@ -1759,8 +1760,8 @@
  *	to pass a user mode parameter for the protocols to sort out.
  */
 
-asmlinkage long sys_getsockopt(int fd, int level, int optname,
-			       char __user *optval, int __user *optlen)
+SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
+		char __user *, optval, int __user *, optlen)
 {
 	int err, fput_needed;
 	struct socket *sock;
@@ -1789,7 +1790,7 @@
  *	Shutdown a socket.
  */
 
-asmlinkage long sys_shutdown(int fd, int how)
+SYSCALL_DEFINE2(shutdown, int, fd, int, how)
 {
 	int err, fput_needed;
 	struct socket *sock;
@@ -1815,7 +1816,7 @@
  *	BSD sendmsg interface
  */
 
-asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
 {
 	struct compat_msghdr __user *msg_compat =
 	    (struct compat_msghdr __user *)msg;
@@ -1921,8 +1922,8 @@
  *	BSD recvmsg interface
  */
 
-asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg,
-			    unsigned int flags)
+SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+		unsigned int, flags)
 {
 	struct compat_msghdr __user *msg_compat =
 	    (struct compat_msghdr __user *)msg;
@@ -2045,7 +2046,7 @@
  *  it is set by the callees.
  */
 
-asmlinkage long sys_socketcall(int call, unsigned long __user *args)
+SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
 {
 	unsigned long a[6];
 	unsigned long a0, a1;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index c996671..4735caa 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -98,7 +98,7 @@
 
 	return new;
 }
-EXPORT_SYMBOL(sunrpc_cache_lookup);
+EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
 
 
 static void queue_loose(struct cache_detail *detail, struct cache_head *ch);
@@ -173,7 +173,7 @@
 	cache_put(old, detail);
 	return tmp;
 }
-EXPORT_SYMBOL(sunrpc_cache_update);
+EXPORT_SYMBOL_GPL(sunrpc_cache_update);
 
 static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
 /*
@@ -245,7 +245,7 @@
 		cache_put(h, detail);
 	return rv;
 }
-EXPORT_SYMBOL(cache_check);
+EXPORT_SYMBOL_GPL(cache_check);
 
 /*
  * caches need to be periodically cleaned.
@@ -373,7 +373,7 @@
 	schedule_delayed_work(&cache_cleaner, 0);
 	return 0;
 }
-EXPORT_SYMBOL(cache_register);
+EXPORT_SYMBOL_GPL(cache_register);
 
 void cache_unregister(struct cache_detail *cd)
 {
@@ -399,7 +399,7 @@
 out:
 	printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
 }
-EXPORT_SYMBOL(cache_unregister);
+EXPORT_SYMBOL_GPL(cache_unregister);
 
 /* clean cache tries to find something to clean
  * and cleans it.
@@ -514,7 +514,7 @@
 	while (cache_clean() != -1)
 		cond_resched();
 }
-EXPORT_SYMBOL(cache_flush);
+EXPORT_SYMBOL_GPL(cache_flush);
 
 void cache_purge(struct cache_detail *detail)
 {
@@ -523,7 +523,7 @@
 	cache_flush();
 	detail->flush_time = 1;
 }
-EXPORT_SYMBOL(cache_purge);
+EXPORT_SYMBOL_GPL(cache_purge);
 
 
 /*
@@ -988,7 +988,7 @@
 	*bpp = bp;
 	*lp = len;
 }
-EXPORT_SYMBOL(qword_add);
+EXPORT_SYMBOL_GPL(qword_add);
 
 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
 {
@@ -1017,7 +1017,7 @@
 	*bpp = bp;
 	*lp = len;
 }
-EXPORT_SYMBOL(qword_addhex);
+EXPORT_SYMBOL_GPL(qword_addhex);
 
 static void warn_no_listener(struct cache_detail *detail)
 {
@@ -1140,7 +1140,7 @@
 	*dest = '\0';
 	return len;
 }
-EXPORT_SYMBOL(qword_get);
+EXPORT_SYMBOL_GPL(qword_get);
 
 
 /*
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 50b049c..085372e 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -106,7 +106,7 @@
 		seq_putc(seq, '\n');
 	}
 }
-EXPORT_SYMBOL(svc_seq_show);
+EXPORT_SYMBOL_GPL(svc_seq_show);
 
 /**
  * rpc_alloc_iostats - allocate an rpc_iostats structure
@@ -249,14 +249,14 @@
 {
 	return do_register(statp->program->pg_name, statp, fops);
 }
-EXPORT_SYMBOL(svc_proc_register);
+EXPORT_SYMBOL_GPL(svc_proc_register);
 
 void
 svc_proc_unregister(const char *name)
 {
 	remove_proc_entry(name, proc_net_rpc);
 }
-EXPORT_SYMBOL(svc_proc_unregister);
+EXPORT_SYMBOL_GPL(svc_proc_unregister);
 
 void
 rpc_proc_init(void)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 54c98d8..c51fed4 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -431,7 +431,7 @@
 {
 	return __svc_create(prog, bufsize, /*npools*/1, family, shutdown);
 }
-EXPORT_SYMBOL(svc_create);
+EXPORT_SYMBOL_GPL(svc_create);
 
 struct svc_serv *
 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
@@ -450,7 +450,7 @@
 
 	return serv;
 }
-EXPORT_SYMBOL(svc_create_pooled);
+EXPORT_SYMBOL_GPL(svc_create_pooled);
 
 /*
  * Destroy an RPC service. Should be called with appropriate locking to
@@ -492,7 +492,7 @@
 	kfree(serv->sv_pools);
 	kfree(serv);
 }
-EXPORT_SYMBOL(svc_destroy);
+EXPORT_SYMBOL_GPL(svc_destroy);
 
 /*
  * Allocate an RPC server's buffer space.
@@ -567,7 +567,7 @@
 out_enomem:
 	return ERR_PTR(-ENOMEM);
 }
-EXPORT_SYMBOL(svc_prepare_thread);
+EXPORT_SYMBOL_GPL(svc_prepare_thread);
 
 /*
  * Choose a pool in which to create a new thread, for svc_set_num_threads
@@ -689,7 +689,7 @@
 
 	return error;
 }
-EXPORT_SYMBOL(svc_set_num_threads);
+EXPORT_SYMBOL_GPL(svc_set_num_threads);
 
 /*
  * Called from a server thread as it's exiting. Caller must hold the BKL or
@@ -717,7 +717,7 @@
 	if (serv)
 		svc_destroy(serv);
 }
-EXPORT_SYMBOL(svc_exit_thread);
+EXPORT_SYMBOL_GPL(svc_exit_thread);
 
 #ifdef CONFIG_SUNRPC_REGISTER_V4
 
@@ -1231,7 +1231,7 @@
 	svc_putnl(resv, ntohl(rpc_stat));
 	goto sendit;
 }
-EXPORT_SYMBOL(svc_process);
+EXPORT_SYMBOL_GPL(svc_process);
 
 /*
  * Return (transport-specific) limit on the rpc payload.
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index bf5b5cd..e588df5 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -440,7 +440,7 @@
 		svc_xprt_enqueue(xprt);
 	}
 }
-EXPORT_SYMBOL(svc_reserve);
+EXPORT_SYMBOL_GPL(svc_reserve);
 
 static void svc_xprt_release(struct svc_rqst *rqstp)
 {
@@ -448,6 +448,9 @@
 
 	rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
 
+	kfree(rqstp->rq_deferred);
+	rqstp->rq_deferred = NULL;
+
 	svc_free_res_pages(rqstp);
 	rqstp->rq_res.page_len = 0;
 	rqstp->rq_res.page_base = 0;
@@ -498,7 +501,7 @@
 		spin_unlock_bh(&pool->sp_lock);
 	}
 }
-EXPORT_SYMBOL(svc_wake_up);
+EXPORT_SYMBOL_GPL(svc_wake_up);
 
 int svc_port_is_privileged(struct sockaddr *sin)
 {
@@ -515,8 +518,10 @@
 }
 
 /*
- * Make sure that we don't have too many active connections.  If we
- * have, something must be dropped.
+ * Make sure that we don't have too many active connections. If we have,
+ * something must be dropped. It's not clear what will happen if we allow
+ * "too many" connections, but when dealing with network-facing software,
+ * we have to code defensively. Here we do that by imposing hard limits.
  *
  * There's no point in trying to do random drop here for DoS
  * prevention. The NFS clients does 1 reconnect in 15 seconds. An
@@ -525,19 +530,27 @@
  * The only somewhat efficient mechanism would be if drop old
  * connections from the same IP first. But right now we don't even
  * record the client IP in svc_sock.
+ *
+ * single-threaded services that expect a lot of clients will probably
+ * need to set sv_maxconn to override the default value which is based
+ * on the number of threads
  */
 static void svc_check_conn_limits(struct svc_serv *serv)
 {
-	if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
+	unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
+				(serv->sv_nrthreads+3) * 20;
+
+	if (serv->sv_tmpcnt > limit) {
 		struct svc_xprt *xprt = NULL;
 		spin_lock_bh(&serv->sv_lock);
 		if (!list_empty(&serv->sv_tempsocks)) {
 			if (net_ratelimit()) {
 				/* Try to help the admin */
 				printk(KERN_NOTICE "%s: too many open  "
-				       "connections, consider increasing the "
-				       "number of nfsd threads\n",
-				       serv->sv_name);
+				       "connections, consider increasing %s\n",
+				       serv->sv_name, serv->sv_maxconn ?
+				       "the max number of connections." :
+				       "the number of threads.");
 			}
 			/*
 			 * Always select the oldest connection. It's not fair,
@@ -730,7 +743,7 @@
 		serv->sv_stats->netcnt++;
 	return len;
 }
-EXPORT_SYMBOL(svc_recv);
+EXPORT_SYMBOL_GPL(svc_recv);
 
 /*
  * Drop request
@@ -740,7 +753,7 @@
 	dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
 	svc_xprt_release(rqstp);
 }
-EXPORT_SYMBOL(svc_drop);
+EXPORT_SYMBOL_GPL(svc_drop);
 
 /*
  * Return reply to client.
@@ -837,6 +850,11 @@
 void svc_delete_xprt(struct svc_xprt *xprt)
 {
 	struct svc_serv	*serv = xprt->xpt_server;
+	struct svc_deferred_req *dr;
+
+	/* Only do this once */
+	if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
+		return;
 
 	dprintk("svc: svc_delete_xprt(%p)\n", xprt);
 	xprt->xpt_ops->xpo_detach(xprt);
@@ -851,12 +869,16 @@
 	 * while still attached to a queue, the queue itself
 	 * is about to be destroyed (in svc_destroy).
 	 */
-	if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) {
-		BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2);
-		if (test_bit(XPT_TEMP, &xprt->xpt_flags))
-			serv->sv_tmpcnt--;
+	if (test_bit(XPT_TEMP, &xprt->xpt_flags))
+		serv->sv_tmpcnt--;
+
+	for (dr = svc_deferred_dequeue(xprt); dr;
+	     dr = svc_deferred_dequeue(xprt)) {
 		svc_xprt_put(xprt);
+		kfree(dr);
 	}
+
+	svc_xprt_put(xprt);
 	spin_unlock_bh(&serv->sv_lock);
 }
 
@@ -902,17 +924,19 @@
 		container_of(dreq, struct svc_deferred_req, handle);
 	struct svc_xprt *xprt = dr->xprt;
 
-	if (too_many) {
+	spin_lock(&xprt->xpt_lock);
+	set_bit(XPT_DEFERRED, &xprt->xpt_flags);
+	if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
+		spin_unlock(&xprt->xpt_lock);
+		dprintk("revisit canceled\n");
 		svc_xprt_put(xprt);
 		kfree(dr);
 		return;
 	}
 	dprintk("revisit queued\n");
 	dr->xprt = NULL;
-	spin_lock(&xprt->xpt_lock);
 	list_add(&dr->handle.recent, &xprt->xpt_deferred);
 	spin_unlock(&xprt->xpt_lock);
-	set_bit(XPT_DEFERRED, &xprt->xpt_flags);
 	svc_xprt_enqueue(xprt);
 	svc_xprt_put(xprt);
 }
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 8a73cbb..e64109b 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -57,13 +57,13 @@
 	rqstp->rq_authop = aops;
 	return aops->accept(rqstp, authp);
 }
-EXPORT_SYMBOL(svc_authenticate);
+EXPORT_SYMBOL_GPL(svc_authenticate);
 
 int svc_set_client(struct svc_rqst *rqstp)
 {
 	return rqstp->rq_authop->set_client(rqstp);
 }
-EXPORT_SYMBOL(svc_set_client);
+EXPORT_SYMBOL_GPL(svc_set_client);
 
 /* A request, which was authenticated, has now executed.
  * Time to finalise the credentials and verifier
@@ -95,7 +95,7 @@
 	spin_unlock(&authtab_lock);
 	return rv;
 }
-EXPORT_SYMBOL(svc_auth_register);
+EXPORT_SYMBOL_GPL(svc_auth_register);
 
 void
 svc_auth_unregister(rpc_authflavor_t flavor)
@@ -105,7 +105,7 @@
 		authtab[flavor] = NULL;
 	spin_unlock(&authtab_lock);
 }
-EXPORT_SYMBOL(svc_auth_unregister);
+EXPORT_SYMBOL_GPL(svc_auth_unregister);
 
 /**************************************************
  * 'auth_domains' are stored in a hash table indexed by name.
@@ -132,7 +132,7 @@
 		spin_unlock(&auth_domain_lock);
 	}
 }
-EXPORT_SYMBOL(auth_domain_put);
+EXPORT_SYMBOL_GPL(auth_domain_put);
 
 struct auth_domain *
 auth_domain_lookup(char *name, struct auth_domain *new)
@@ -157,10 +157,10 @@
 	spin_unlock(&auth_domain_lock);
 	return new;
 }
-EXPORT_SYMBOL(auth_domain_lookup);
+EXPORT_SYMBOL_GPL(auth_domain_lookup);
 
 struct auth_domain *auth_domain_find(char *name)
 {
 	return auth_domain_lookup(name, NULL);
 }
-EXPORT_SYMBOL(auth_domain_find);
+EXPORT_SYMBOL_GPL(auth_domain_find);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 82240e6..5c865e2 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -64,7 +64,7 @@
 		rv = auth_domain_lookup(name, &new->h);
 	}
 }
-EXPORT_SYMBOL(unix_domain_find);
+EXPORT_SYMBOL_GPL(unix_domain_find);
 
 static void svcauth_unix_domain_release(struct auth_domain *dom)
 {
@@ -358,7 +358,7 @@
 	else
 		return -ENOMEM;
 }
-EXPORT_SYMBOL(auth_unix_add_addr);
+EXPORT_SYMBOL_GPL(auth_unix_add_addr);
 
 int auth_unix_forget_old(struct auth_domain *dom)
 {
@@ -370,7 +370,7 @@
 	udom->addr_changes++;
 	return 0;
 }
-EXPORT_SYMBOL(auth_unix_forget_old);
+EXPORT_SYMBOL_GPL(auth_unix_forget_old);
 
 struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
 {
@@ -395,13 +395,13 @@
 	cache_put(&ipm->h, &ip_map_cache);
 	return rv;
 }
-EXPORT_SYMBOL(auth_unix_lookup);
+EXPORT_SYMBOL_GPL(auth_unix_lookup);
 
 void svcauth_unix_purge(void)
 {
 	cache_purge(&ip_map_cache);
 }
-EXPORT_SYMBOL(svcauth_unix_purge);
+EXPORT_SYMBOL_GPL(svcauth_unix_purge);
 
 static inline struct ip_map *
 ip_map_cached_get(struct svc_rqst *rqstp)
@@ -714,7 +714,7 @@
 	return SVC_OK;
 }
 
-EXPORT_SYMBOL(svcauth_unix_set_client);
+EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
 
 static int
 svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index ef3238d..5763e64 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -59,6 +59,7 @@
 static int		svc_udp_recvfrom(struct svc_rqst *);
 static int		svc_udp_sendto(struct svc_rqst *);
 static void		svc_sock_detach(struct svc_xprt *);
+static void		svc_tcp_sock_detach(struct svc_xprt *);
 static void		svc_sock_free(struct svc_xprt *);
 
 static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
@@ -102,7 +103,6 @@
 static void svc_release_skb(struct svc_rqst *rqstp)
 {
 	struct sk_buff *skb = rqstp->rq_xprt_ctxt;
-	struct svc_deferred_req *dr = rqstp->rq_deferred;
 
 	if (skb) {
 		struct svc_sock *svsk =
@@ -112,10 +112,6 @@
 		dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
 		skb_free_datagram(svsk->sk_sk, skb);
 	}
-	if (dr) {
-		rqstp->rq_deferred = NULL;
-		kfree(dr);
-	}
 }
 
 union svc_pktinfo_u {
@@ -289,7 +285,7 @@
 		return -ENOENT;
 	return len;
 }
-EXPORT_SYMBOL(svc_sock_names);
+EXPORT_SYMBOL_GPL(svc_sock_names);
 
 /*
  * Check input queue length
@@ -1017,7 +1013,7 @@
 	.xpo_recvfrom = svc_tcp_recvfrom,
 	.xpo_sendto = svc_tcp_sendto,
 	.xpo_release_rqst = svc_release_skb,
-	.xpo_detach = svc_sock_detach,
+	.xpo_detach = svc_tcp_sock_detach,
 	.xpo_free = svc_sock_free,
 	.xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
 	.xpo_has_wspace = svc_tcp_has_wspace,
@@ -1101,7 +1097,7 @@
 	}
 	spin_unlock_bh(&serv->sv_lock);
 }
-EXPORT_SYMBOL(svc_sock_update_bufs);
+EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
 
 /*
  * Initialize socket for RPC use and create svc_sock struct
@@ -1287,6 +1283,24 @@
 	sk->sk_state_change = svsk->sk_ostate;
 	sk->sk_data_ready = svsk->sk_odata;
 	sk->sk_write_space = svsk->sk_owspace;
+
+	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+		wake_up_interruptible(sk->sk_sleep);
+}
+
+/*
+ * Disconnect the socket, and reset the callbacks
+ */
+static void svc_tcp_sock_detach(struct svc_xprt *xprt)
+{
+	struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
+
+	dprintk("svc: svc_tcp_sock_detach(%p)\n", svsk);
+
+	svc_sock_detach(xprt);
+
+	if (!test_bit(XPT_LISTENER, &xprt->xpt_flags))
+		kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR);
 }
 
 /*
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5aa024b..2f2d731 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -124,7 +124,7 @@
 static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
 				  struct tipc_node_map *nm_diff)
 {
-	int stop = sizeof(nm_a->map) / sizeof(u32);
+	int stop = ARRAY_SIZE(nm_a->map);
 	int w;
 	int b;
 	u32 map;
diff --git a/net/wimax/Kconfig b/net/wimax/Kconfig
new file mode 100644
index 0000000..18495cd
--- /dev/null
+++ b/net/wimax/Kconfig
@@ -0,0 +1,52 @@
+#
+# WiMAX LAN device configuration
+#
+# Note the ugly 'depends on' on WIMAX: that disallows RFKILL to be a
+# module if WIMAX is to be linked in. The WiMAX code is done in such a
+# way that it doesn't require and explicit dependency on RFKILL in
+# case an embedded system wants to rip it out.
+#
+# As well, enablement of the RFKILL code means we need the INPUT layer
+# support to inject events coming from hw rfkill switches. That
+# dependency could be killed if input.h provided appropiate means to
+# work when input is disabled.
+
+comment "WiMAX Wireless Broadband support requires CONFIG_INPUT enabled"
+	depends on INPUT = n && RFKILL != n
+
+menuconfig WIMAX
+	tristate "WiMAX Wireless Broadband support"
+	depends on (y && RFKILL != m) || m
+	depends on (INPUT && RFKILL != n) || RFKILL = n
+	help
+
+	  Select to configure support for devices that provide
+	  wireless broadband connectivity using the WiMAX protocol
+	  (IEEE 802.16).
+
+	  Please note that most of these devices require signing up
+	  for a service plan with a provider.
+
+	  The different WiMAX drivers can be enabled in the menu entry
+
+	  Device Drivers > Network device support > WiMAX Wireless
+	  Broadband devices
+
+	  If unsure, it is safe to select M (module).
+
+config WIMAX_DEBUG_LEVEL
+	int "WiMAX debug level"
+	depends on WIMAX
+	default 8
+	help
+
+	  Select the maximum debug verbosity level to be compiled into
+	  the WiMAX stack code.
+
+	  By default, debug messages are disabled at runtime and can
+	  be selectively enabled for different parts of the code using
+	  the sysfs debug-levels file.
+
+	  If set at zero, this will compile out all the debug code.
+
+	  It is recommended that it is left at 8.
diff --git a/net/wimax/Makefile b/net/wimax/Makefile
new file mode 100644
index 0000000..5b80b94
--- /dev/null
+++ b/net/wimax/Makefile
@@ -0,0 +1,13 @@
+
+obj-$(CONFIG_WIMAX)		+= wimax.o
+
+wimax-y :=		\
+	id-table.o	\
+	op-msg.o	\
+	op-reset.o	\
+	op-rfkill.o	\
+	stack.o
+
+wimax-$(CONFIG_DEBUG_FS) += debugfs.o
+
+
diff --git a/net/wimax/debug-levels.h b/net/wimax/debug-levels.h
new file mode 100644
index 0000000..1c29123
--- /dev/null
+++ b/net/wimax/debug-levels.h
@@ -0,0 +1,42 @@
+/*
+ * Linux WiMAX Stack
+ * Debug levels control file for the wimax module
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef __debug_levels__h__
+#define __debug_levels__h__
+
+/* Maximum compile and run time debug level for all submodules */
+#define D_MODULENAME wimax
+#define D_MASTER CONFIG_WIMAX_DEBUG_LEVEL
+
+#include <linux/wimax/debug.h>
+
+/* List of all the enabled modules */
+enum d_module {
+	D_SUBMODULE_DECLARE(debugfs),
+	D_SUBMODULE_DECLARE(id_table),
+	D_SUBMODULE_DECLARE(op_msg),
+	D_SUBMODULE_DECLARE(op_reset),
+	D_SUBMODULE_DECLARE(op_rfkill),
+	D_SUBMODULE_DECLARE(stack),
+};
+
+#endif /* #ifndef __debug_levels__h__ */
diff --git a/net/wimax/debugfs.c b/net/wimax/debugfs.c
new file mode 100644
index 0000000..87cf443
--- /dev/null
+++ b/net/wimax/debugfs.c
@@ -0,0 +1,90 @@
+/*
+ * Linux WiMAX
+ * Debugfs support
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#include <linux/debugfs.h>
+#include <linux/wimax.h>
+#include "wimax-internal.h"
+
+#define D_SUBMODULE debugfs
+#include "debug-levels.h"
+
+
+/* Debug framework control of debug levels */
+struct d_level D_LEVEL[] = {
+	D_SUBMODULE_DEFINE(debugfs),
+	D_SUBMODULE_DEFINE(id_table),
+	D_SUBMODULE_DEFINE(op_msg),
+	D_SUBMODULE_DEFINE(op_reset),
+	D_SUBMODULE_DEFINE(op_rfkill),
+	D_SUBMODULE_DEFINE(stack),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+#define __debugfs_register(prefix, name, parent)			\
+do {									\
+	result = d_level_register_debugfs(prefix, name, parent);	\
+	if (result < 0)							\
+		goto error;						\
+} while (0)
+
+
+int wimax_debugfs_add(struct wimax_dev *wimax_dev)
+{
+	int result;
+	struct net_device *net_dev = wimax_dev->net_dev;
+	struct device *dev = net_dev->dev.parent;
+	struct dentry *dentry;
+	char buf[128];
+
+	snprintf(buf, sizeof(buf), "wimax:%s", net_dev->name);
+	dentry = debugfs_create_dir(buf, NULL);
+	result = PTR_ERR(dentry);
+	if (IS_ERR(dentry)) {
+		if (result == -ENODEV)
+			result = 0;	/* No debugfs support */
+		else
+			dev_err(dev, "Can't create debugfs dentry: %d\n",
+				result);
+		goto out;
+	}
+	wimax_dev->debugfs_dentry = dentry;
+	__debugfs_register("wimax_dl_", debugfs, dentry);
+	__debugfs_register("wimax_dl_", id_table, dentry);
+	__debugfs_register("wimax_dl_", op_msg, dentry);
+	__debugfs_register("wimax_dl_", op_reset, dentry);
+	__debugfs_register("wimax_dl_", op_rfkill, dentry);
+	__debugfs_register("wimax_dl_", stack, dentry);
+	result = 0;
+out:
+	return result;
+
+error:
+	debugfs_remove_recursive(wimax_dev->debugfs_dentry);
+	return result;
+}
+
+void wimax_debugfs_rm(struct wimax_dev *wimax_dev)
+{
+	debugfs_remove_recursive(wimax_dev->debugfs_dentry);
+}
+
+
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c
new file mode 100644
index 0000000..5e685f7
--- /dev/null
+++ b/net/wimax/id-table.c
@@ -0,0 +1,144 @@
+/*
+ * Linux WiMAX
+ * Mappping of generic netlink family IDs to net devices
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * We assign a single generic netlink family ID to each device (to
+ * simplify lookup).
+ *
+ * We need a way to map family ID to a wimax_dev pointer.
+ *
+ * The idea is to use a very simple lookup. Using a netlink attribute
+ * with (for example) the interface name implies a heavier search over
+ * all the network devices; seemed kind of a waste given that we know
+ * we are looking for a WiMAX device and that most systems will have
+ * just a single WiMAX adapter.
+ *
+ * We put all the WiMAX devices in the system in a linked list and
+ * match the generic link family ID against the list.
+ *
+ * By using a linked list, the case of a single adapter in the system
+ * becomes (almost) no overhead, while still working for many more. If
+ * it ever goes beyond two, I'll be surprised.
+ */
+#include <linux/device.h>
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/wimax.h>
+#include "wimax-internal.h"
+
+
+#define D_SUBMODULE id_table
+#include "debug-levels.h"
+
+
+static DEFINE_SPINLOCK(wimax_id_table_lock);
+static struct list_head wimax_id_table = LIST_HEAD_INIT(wimax_id_table);
+
+
+/*
+ * wimax_id_table_add - add a gennetlink familiy ID / wimax_dev mapping
+ *
+ * @wimax_dev: WiMAX device descriptor to associate to the Generic
+ *     Netlink family ID.
+ *
+ * Look for an empty spot in the ID table; if none found, double the
+ * table's size and get the first spot.
+ */
+void wimax_id_table_add(struct wimax_dev *wimax_dev)
+{
+	d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
+	spin_lock(&wimax_id_table_lock);
+	list_add(&wimax_dev->id_table_node, &wimax_id_table);
+	spin_unlock(&wimax_id_table_lock);
+	d_fnend(3, NULL, "(wimax_dev %p)\n", wimax_dev);
+}
+
+
+/*
+ * wimax_get_netdev_by_info - lookup a wimax_dev from the gennetlink info
+ *
+ * The generic netlink family ID has been filled out in the
+ * nlmsghdr->nlmsg_type field, so we pull it from there, look it up in
+ * the mapping table and reference the wimax_dev.
+ *
+ * When done, the reference should be dropped with
+ * 'dev_put(wimax_dev->net_dev)'.
+ */
+struct wimax_dev *wimax_dev_get_by_genl_info(
+	struct genl_info *info, int ifindex)
+{
+	struct wimax_dev *wimax_dev = NULL;
+
+	d_fnstart(3, NULL, "(info %p ifindex %d)\n", info, ifindex);
+	spin_lock(&wimax_id_table_lock);
+	list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
+		if (wimax_dev->net_dev->ifindex == ifindex) {
+			dev_hold(wimax_dev->net_dev);
+			break;
+		}
+	}
+	if (wimax_dev == NULL)
+		d_printf(1, NULL, "wimax: no devices found with ifindex %d\n",
+			 ifindex);
+	spin_unlock(&wimax_id_table_lock);
+	d_fnend(3, NULL, "(info %p ifindex %d) = %p\n",
+		info, ifindex, wimax_dev);
+	return wimax_dev;
+}
+
+
+/*
+ * wimax_id_table_rm - Remove a gennetlink familiy ID / wimax_dev mapping
+ *
+ * @id: family ID to remove from the table
+ */
+void wimax_id_table_rm(struct wimax_dev *wimax_dev)
+{
+	spin_lock(&wimax_id_table_lock);
+	list_del_init(&wimax_dev->id_table_node);
+	spin_unlock(&wimax_id_table_lock);
+}
+
+
+/*
+ * Release the gennetlink family id / mapping table
+ *
+ * On debug, verify that the table is empty upon removal. We want the
+ * code always compiled, to ensure it doesn't bit rot. It will be
+ * compiled out if CONFIG_BUG is disabled.
+ */
+void wimax_id_table_release(void)
+{
+	struct wimax_dev *wimax_dev;
+
+#ifndef CONFIG_BUG
+	return;
+#endif
+	spin_lock(&wimax_id_table_lock);
+	list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
+		printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n",
+		       __func__, wimax_dev, wimax_dev->net_dev->ifindex);
+		WARN_ON(1);
+	}
+	spin_unlock(&wimax_id_table_lock);
+}
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
new file mode 100644
index 0000000..cb3b4ad
--- /dev/null
+++ b/net/wimax/op-msg.c
@@ -0,0 +1,421 @@
+/*
+ * Linux WiMAX
+ * Generic messaging interface between userspace and driver/device
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This implements a direct communication channel between user space and
+ * the driver/device, by which free form messages can be sent back and
+ * forth.
+ *
+ * This is intended for device-specific features, vendor quirks, etc.
+ *
+ * See include/net/wimax.h
+ *
+ * GENERIC NETLINK ENCODING AND CAPACITY
+ *
+ * A destination "pipe name" is added to each message; it is up to the
+ * drivers to assign or use those names (if using them at all).
+ *
+ * Messages are encoded as a binary netlink attribute using nla_put()
+ * using type NLA_UNSPEC (as some versions of libnl still in
+ * deployment don't yet understand NLA_BINARY).
+ *
+ * The maximum capacity of this transport is PAGESIZE per message (so
+ * the actual payload will be bit smaller depending on the
+ * netlink/generic netlink attributes and headers).
+ *
+ * RECEPTION OF MESSAGES
+ *
+ * When a message is received from user space, it is passed verbatim
+ * to the driver calling wimax_dev->op_msg_from_user(). The return
+ * value from this function is passed back to user space as an ack
+ * over the generic netlink protocol.
+ *
+ * The stack doesn't do any processing or interpretation of these
+ * messages.
+ *
+ * SENDING MESSAGES
+ *
+ * Messages can be sent with wimax_msg().
+ *
+ * If the message delivery needs to happen on a different context to
+ * that of its creation, wimax_msg_alloc() can be used to get a
+ * pointer to the message that can be delivered later on with
+ * wimax_msg_send().
+ *
+ * ROADMAP
+ *
+ * wimax_gnl_doit_msg_from_user()    Process a message from user space
+ *   wimax_dev_get_by_genl_info()
+ *   wimax_dev->op_msg_from_user()   Delivery of message to the driver
+ *
+ * wimax_msg()                       Send a message to user space
+ *   wimax_msg_alloc()
+ *   wimax_msg_send()
+ */
+#include <linux/device.h>
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+#include <linux/wimax.h>
+#include <linux/security.h>
+#include "wimax-internal.h"
+
+
+#define D_SUBMODULE op_msg
+#include "debug-levels.h"
+
+
+/**
+ * wimax_msg_alloc - Create a new skb for sending a message to userspace
+ *
+ * @wimax_dev: WiMAX device descriptor
+ * @pipe_name: "named pipe" the message will be sent to
+ * @msg: pointer to the message data to send
+ * @size: size of the message to send (in bytes), including the header.
+ * @gfp_flags: flags for memory allocation.
+ *
+ * Returns: %0 if ok, negative errno code on error
+ *
+ * Description:
+ *
+ * Allocates an skb that will contain the message to send to user
+ * space over the messaging pipe and initializes it, copying the
+ * payload.
+ *
+ * Once this call is done, you can deliver it with
+ * wimax_msg_send().
+ *
+ * IMPORTANT:
+ *
+ * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
+ * wimax_msg_send() depends on skb->data being placed at the
+ * beginning of the user message.
+ */
+struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
+				const char *pipe_name,
+				const void *msg, size_t size,
+				gfp_t gfp_flags)
+{
+	int result;
+	struct device *dev = wimax_dev->net_dev->dev.parent;
+	size_t msg_size;
+	void *genl_msg;
+	struct sk_buff *skb;
+
+	msg_size = nla_total_size(size)
+		+ nla_total_size(sizeof(u32))
+		+ (pipe_name ? nla_total_size(strlen(pipe_name)) : 0);
+	result = -ENOMEM;
+	skb = genlmsg_new(msg_size, gfp_flags);
+	if (skb == NULL)
+		goto error_new;
+	genl_msg = genlmsg_put(skb, 0, 0, &wimax_gnl_family,
+			       0, WIMAX_GNL_OP_MSG_TO_USER);
+	if (genl_msg == NULL) {
+		dev_err(dev, "no memory to create generic netlink message\n");
+		goto error_genlmsg_put;
+	}
+	result = nla_put_u32(skb, WIMAX_GNL_MSG_IFIDX,
+			     wimax_dev->net_dev->ifindex);
+	if (result < 0) {
+		dev_err(dev, "no memory to add ifindex attribute\n");
+		goto error_nla_put;
+	}
+	if (pipe_name) {
+		result = nla_put_string(skb, WIMAX_GNL_MSG_PIPE_NAME,
+					pipe_name);
+		if (result < 0) {
+			dev_err(dev, "no memory to add pipe_name attribute\n");
+			goto error_nla_put;
+		}
+	}
+	result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
+	if (result < 0) {
+		dev_err(dev, "no memory to add payload in attribute\n");
+		goto error_nla_put;
+	}
+	genlmsg_end(skb, genl_msg);
+	return skb;
+
+error_nla_put:
+error_genlmsg_put:
+error_new:
+	nlmsg_free(skb);
+	return ERR_PTR(result);
+
+}
+EXPORT_SYMBOL_GPL(wimax_msg_alloc);
+
+
+/**
+ * wimax_msg_data_len - Return a pointer and size of a message's payload
+ *
+ * @msg: Pointer to a message created with wimax_msg_alloc()
+ * @size: Pointer to where to store the message's size
+ *
+ * Returns the pointer to the message data.
+ */
+const void *wimax_msg_data_len(struct sk_buff *msg, size_t *size)
+{
+	struct nlmsghdr *nlh = (void *) msg->head;
+	struct nlattr *nla;
+
+	nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
+			      WIMAX_GNL_MSG_DATA);
+	if (nla == NULL) {
+		printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+		return NULL;
+	}
+	*size = nla_len(nla);
+	return nla_data(nla);
+}
+EXPORT_SYMBOL_GPL(wimax_msg_data_len);
+
+
+/**
+ * wimax_msg_data - Return a pointer to a message's payload
+ *
+ * @msg: Pointer to a message created with wimax_msg_alloc()
+ */
+const void *wimax_msg_data(struct sk_buff *msg)
+{
+	struct nlmsghdr *nlh = (void *) msg->head;
+	struct nlattr *nla;
+
+	nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
+			      WIMAX_GNL_MSG_DATA);
+	if (nla == NULL) {
+		printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+		return NULL;
+	}
+	return nla_data(nla);
+}
+EXPORT_SYMBOL_GPL(wimax_msg_data);
+
+
+/**
+ * wimax_msg_len - Return a message's payload length
+ *
+ * @msg: Pointer to a message created with wimax_msg_alloc()
+ */
+ssize_t wimax_msg_len(struct sk_buff *msg)
+{
+	struct nlmsghdr *nlh = (void *) msg->head;
+	struct nlattr *nla;
+
+	nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
+			      WIMAX_GNL_MSG_DATA);
+	if (nla == NULL) {
+		printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+		return -EINVAL;
+	}
+	return nla_len(nla);
+}
+EXPORT_SYMBOL_GPL(wimax_msg_len);
+
+
+/**
+ * wimax_msg_send - Send a pre-allocated message to user space
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @skb: &struct sk_buff returned by wimax_msg_alloc(). Note the
+ *     ownership of @skb is transferred to this function.
+ *
+ * Returns: 0 if ok, < 0 errno code on error
+ *
+ * Description:
+ *
+ * Sends a free-form message that was preallocated with
+ * wimax_msg_alloc() and filled up.
+ *
+ * Assumes that once you pass an skb to this function for sending, it
+ * owns it and will release it when done (on success).
+ *
+ * IMPORTANT:
+ *
+ * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
+ * wimax_msg_send() depends on skb->data being placed at the
+ * beginning of the user message.
+ */
+int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
+{
+	int result;
+	struct device *dev = wimax_dev->net_dev->dev.parent;
+	void *msg = skb->data;
+	size_t size = skb->len;
+	might_sleep();
+
+	d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size);
+	d_dump(2, dev, msg, size);
+	result = genlmsg_multicast(skb, 0, wimax_gnl_mcg.id, GFP_KERNEL);
+	d_printf(1, dev, "CTX: genl multicast result %d\n", result);
+	if (result == -ESRCH)	/* Nobody connected, ignore it */
+		result = 0;	/* btw, the skb is freed already */
+	return result;
+}
+EXPORT_SYMBOL_GPL(wimax_msg_send);
+
+
+/**
+ * wimax_msg - Send a message to user space
+ *
+ * @wimax_dev: WiMAX device descriptor (properly referenced)
+ * @pipe_name: "named pipe" the message will be sent to
+ * @buf: pointer to the message to send.
+ * @size: size of the buffer pointed to by @buf (in bytes).
+ * @gfp_flags: flags for memory allocation.
+ *
+ * Returns: %0 if ok, negative errno code on error.
+ *
+ * Description:
+ *
+ * Sends a free-form message to user space on the device @wimax_dev.
+ *
+ * NOTES:
+ *
+ * Once the @skb is given to this function, who will own it and will
+ * release it when done (unless it returns error).
+ */
+int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
+	      const void *buf, size_t size, gfp_t gfp_flags)
+{
+	int result = -ENOMEM;
+	struct sk_buff *skb;
+
+	skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
+	if (skb == NULL)
+		goto error_msg_new;
+	result = wimax_msg_send(wimax_dev, skb);
+error_msg_new:
+	return result;
+}
+EXPORT_SYMBOL_GPL(wimax_msg);
+
+
+static const
+struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+	[WIMAX_GNL_MSG_IFIDX] = {
+		.type = NLA_U32,
+	},
+	[WIMAX_GNL_MSG_DATA] = {
+		.type = NLA_UNSPEC,	/* libnl doesn't grok BINARY yet */
+	},
+};
+
+
+/*
+ * Relays a message from user space to the driver
+ *
+ * The skb is passed to the driver-specific function with the netlink
+ * and generic netlink headers already stripped.
+ *
+ * This call will block while handling/relaying the message.
+ */
+static
+int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
+{
+	int result, ifindex;
+	struct wimax_dev *wimax_dev;
+	struct device *dev;
+	struct nlmsghdr *nlh = info->nlhdr;
+	char *pipe_name;
+	void *msg_buf;
+	size_t msg_len;
+
+	might_sleep();
+	d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
+	result = -ENODEV;
+	if (info->attrs[WIMAX_GNL_MSG_IFIDX] == NULL) {
+		printk(KERN_ERR "WIMAX_GNL_MSG_FROM_USER: can't find IFIDX "
+		       "attribute\n");
+		goto error_no_wimax_dev;
+	}
+	ifindex = nla_get_u32(info->attrs[WIMAX_GNL_MSG_IFIDX]);
+	wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
+	if (wimax_dev == NULL)
+		goto error_no_wimax_dev;
+	dev = wimax_dev_to_dev(wimax_dev);
+
+	/* Unpack arguments */
+	result = -EINVAL;
+	if (info->attrs[WIMAX_GNL_MSG_DATA] == NULL) {
+		dev_err(dev, "WIMAX_GNL_MSG_FROM_USER: can't find MSG_DATA "
+			"attribute\n");
+		goto error_no_data;
+	}
+	msg_buf = nla_data(info->attrs[WIMAX_GNL_MSG_DATA]);
+	msg_len = nla_len(info->attrs[WIMAX_GNL_MSG_DATA]);
+
+	if (info->attrs[WIMAX_GNL_MSG_PIPE_NAME] == NULL)
+		pipe_name = NULL;
+	else {
+		struct nlattr *attr = info->attrs[WIMAX_GNL_MSG_PIPE_NAME];
+		size_t attr_len = nla_len(attr);
+		/* libnl-1.1 does not yet support NLA_NUL_STRING */
+		result = -ENOMEM;
+		pipe_name = kstrndup(nla_data(attr), attr_len + 1, GFP_KERNEL);
+		if (pipe_name == NULL)
+			goto error_alloc;
+		pipe_name[attr_len] = 0;
+	}
+	mutex_lock(&wimax_dev->mutex);
+	result = wimax_dev_is_ready(wimax_dev);
+	if (result < 0)
+		goto error_not_ready;
+	result = -ENOSYS;
+	if (wimax_dev->op_msg_from_user == NULL)
+		goto error_noop;
+
+	d_printf(1, dev,
+		 "CRX: nlmsghdr len %u type %u flags 0x%04x seq 0x%x pid %u\n",
+		 nlh->nlmsg_len, nlh->nlmsg_type, nlh->nlmsg_flags,
+		 nlh->nlmsg_seq, nlh->nlmsg_pid);
+	d_printf(1, dev, "CRX: wimax message %zu bytes\n", msg_len);
+	d_dump(2, dev, msg_buf, msg_len);
+
+	result = wimax_dev->op_msg_from_user(wimax_dev, pipe_name,
+					     msg_buf, msg_len, info);
+error_noop:
+error_not_ready:
+	mutex_unlock(&wimax_dev->mutex);
+error_alloc:
+	kfree(pipe_name);
+error_no_data:
+	dev_put(wimax_dev->net_dev);
+error_no_wimax_dev:
+	d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
+	return result;
+}
+
+
+/*
+ * Generic Netlink glue
+ */
+
+struct genl_ops wimax_gnl_msg_from_user = {
+	.cmd = WIMAX_GNL_OP_MSG_FROM_USER,
+	.flags = GENL_ADMIN_PERM,
+	.policy = wimax_gnl_msg_policy,
+	.doit = wimax_gnl_doit_msg_from_user,
+	.dumpit = NULL,
+};
+
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
new file mode 100644
index 0000000..ca26917
--- /dev/null
+++ b/net/wimax/op-reset.c
@@ -0,0 +1,143 @@
+/*
+ * Linux WiMAX
+ * Implement and export a method for resetting a WiMAX device
+ *
+ *
+ * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This implements a simple synchronous call to reset a WiMAX device.
+ *
+ * Resets aim at being warm, keeping the device handles active;
+ * however, when that fails, it falls back to a cold reset (that will
+ * disconnect and reconnect the device).
+ */
+
+#include <net/wimax.h>
+#include <net/genetlink.h>
+#include <linux/wimax.h>
+#include <linux/security.h>
+#include "wimax-internal.h"
+
+#define D_SUBMODULE op_reset
+#include "debug-levels.h"
+
+
+/**
+ * wimax_reset - Reset a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * Returns:
+ *
+ * %0 if ok and a warm reset was done (the device still exists in
+ * the system).
+ *
+ * -%ENODEV if a cold/bus reset had to be done (device has
+ * disconnected and reconnected, so current handle is not valid
+ * any more).
+ *
+ * -%EINVAL if the device is not even registered.
+ *
+ * Any other negative error code shall be considered as
+ * non-recoverable.
+ *
+ * Description:
+ *
+ * Called when wanting to reset the device for any reason. Device is
+ * taken back to power on status.
+ *
+ * This call blocks; on succesful return, the device has completed the
+ * reset process and is ready to operate.
+ */
+int wimax_reset(struct wimax_dev *wimax_dev)
+{
+	int result = -EINVAL;
+	struct device *dev = wimax_dev_to_dev(wimax_dev);
+	enum wimax_st state;
+
+	might_sleep();
+	d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
+	mutex_lock(&wimax_dev->mutex);
+	dev_hold(wimax_dev->net_dev);
+	state = wimax_dev->state;
+	mutex_unlock(&wimax_dev->mutex);
+
+	if (state >= WIMAX_ST_DOWN) {
+		mutex_lock(&wimax_dev->mutex_reset);
+		result = wimax_dev->op_reset(wimax_dev);
+		mutex_unlock(&wimax_dev->mutex_reset);
+	}
+	dev_put(wimax_dev->net_dev);
+
+	d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
+	return result;
+}
+EXPORT_SYMBOL(wimax_reset);
+
+
+static const
+struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+	[WIMAX_GNL_RESET_IFIDX] = {
+		.type = NLA_U32,
+	},
+};
+
+
+/*
+ * Exporting to user space over generic netlink
+ *
+ * Parse the reset command from user space, return error code.
+ *
+ * No attributes.
+ */
+static
+int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
+{
+	int result, ifindex;
+	struct wimax_dev *wimax_dev;
+	struct device *dev;
+
+	d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
+	result = -ENODEV;
+	if (info->attrs[WIMAX_GNL_RESET_IFIDX] == NULL) {
+		printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX "
+			"attribute\n");
+		goto error_no_wimax_dev;
+	}
+	ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RESET_IFIDX]);
+	wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
+	if (wimax_dev == NULL)
+		goto error_no_wimax_dev;
+	dev = wimax_dev_to_dev(wimax_dev);
+	/* Execute the operation and send the result back to user space */
+	result = wimax_reset(wimax_dev);
+	dev_put(wimax_dev->net_dev);
+error_no_wimax_dev:
+	d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
+	return result;
+}
+
+
+struct genl_ops wimax_gnl_reset = {
+	.cmd = WIMAX_GNL_OP_RESET,
+	.flags = GENL_ADMIN_PERM,
+	.policy = wimax_gnl_reset_policy,
+	.doit = wimax_gnl_doit_reset,
+	.dumpit = NULL,
+};
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
new file mode 100644
index 0000000..2b75aee
--- /dev/null
+++ b/net/wimax/op-rfkill.c
@@ -0,0 +1,532 @@
+/*
+ * Linux WiMAX
+ * RF-kill framework integration
+ *
+ *
+ * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This integrates into the Linux Kernel rfkill susbystem so that the
+ * drivers just have to do the bare minimal work, which is providing a
+ * method to set the software RF-Kill switch and to report changes in
+ * the software and hardware switch status.
+ *
+ * A non-polled generic rfkill device is embedded into the WiMAX
+ * subsystem's representation of a device.
+ *
+ * FIXME: Need polled support? use a timer or add the implementation
+ *     to the stack.
+ *
+ * All device drivers have to do is after wimax_dev_init(), call
+ * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update
+ * initial state and then every time it changes. See wimax.h:struct
+ * wimax_dev for more information.
+ *
+ * ROADMAP
+ *
+ * wimax_gnl_doit_rfkill()      User space calling wimax_rfkill()
+ *   wimax_rfkill()             Kernel calling wimax_rfkill()
+ *     __wimax_rf_toggle_radio()
+ *
+ * wimax_rfkill_toggle_radio()  RF-Kill subsytem calling
+ *   __wimax_rf_toggle_radio()
+ *
+ * __wimax_rf_toggle_radio()
+ *   wimax_dev->op_rfkill_sw_toggle() Driver backend
+ *   __wimax_state_change()
+ *
+ * wimax_report_rfkill_sw()     Driver reports state change
+ *   __wimax_state_change()
+ *
+ * wimax_report_rfkill_hw()     Driver reports state change
+ *   __wimax_state_change()
+ *
+ * wimax_rfkill_add()           Initialize/shutdown rfkill support
+ * wimax_rfkill_rm()            [called by wimax_dev_add/rm()]
+ */
+
+#include <net/wimax.h>
+#include <net/genetlink.h>
+#include <linux/wimax.h>
+#include <linux/security.h>
+#include <linux/rfkill.h>
+#include <linux/input.h>
+#include "wimax-internal.h"
+
+#define D_SUBMODULE op_rfkill
+#include "debug-levels.h"
+
+#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
+
+
+/**
+ * wimax_report_rfkill_hw - Reports changes in the hardware RF switch
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @state: New state of the RF Kill switch. %WIMAX_RF_ON radio on,
+ *     %WIMAX_RF_OFF radio off.
+ *
+ * When the device detects a change in the state of thehardware RF
+ * switch, it must call this function to let the WiMAX kernel stack
+ * know that the state has changed so it can be properly propagated.
+ *
+ * The WiMAX stack caches the state (the driver doesn't need to). As
+ * well, as the change is propagated it will come back as a request to
+ * change the software state to mirror the hardware state.
+ *
+ * If the device doesn't have a hardware kill switch, just report
+ * it on initialization as always on (%WIMAX_RF_ON, radio on).
+ */
+void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
+			    enum wimax_rf_state state)
+{
+	int result;
+	struct device *dev = wimax_dev_to_dev(wimax_dev);
+	enum wimax_st wimax_state;
+	enum rfkill_state rfkill_state;
+
+	d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+	BUG_ON(state == WIMAX_RF_QUERY);
+	BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
+
+	mutex_lock(&wimax_dev->mutex);
+	result = wimax_dev_is_ready(wimax_dev);
+	if (result < 0)
+		goto error_not_ready;
+
+	if (state != wimax_dev->rf_hw) {
+		wimax_dev->rf_hw = state;
+		rfkill_state = state == WIMAX_RF_ON ?
+			RFKILL_STATE_OFF : RFKILL_STATE_ON;
+		if (wimax_dev->rf_hw == WIMAX_RF_ON
+		    && wimax_dev->rf_sw == WIMAX_RF_ON)
+			wimax_state = WIMAX_ST_READY;
+		else
+			wimax_state = WIMAX_ST_RADIO_OFF;
+		__wimax_state_change(wimax_dev, wimax_state);
+		input_report_key(wimax_dev->rfkill_input, KEY_WIMAX,
+				 rfkill_state);
+	}
+error_not_ready:
+	mutex_unlock(&wimax_dev->mutex);
+	d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
+		wimax_dev, state, result);
+}
+EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
+
+
+/**
+ * wimax_report_rfkill_sw - Reports changes in the software RF switch
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on,
+ *     %WIMAX_RF_OFF radio off.
+ *
+ * Reports changes in the software RF switch state to the the WiMAX
+ * stack.
+ *
+ * The main use is during initialization, so the driver can query the
+ * device for its current software radio kill switch state and feed it
+ * to the system.
+ *
+ * On the side, the device does not change the software state by
+ * itself. In practice, this can happen, as the device might decide to
+ * switch (in software) the radio off for different reasons.
+ */
+void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
+			    enum wimax_rf_state state)
+{
+	int result;
+	struct device *dev = wimax_dev_to_dev(wimax_dev);
+	enum wimax_st wimax_state;
+
+	d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+	BUG_ON(state == WIMAX_RF_QUERY);
+	BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
+
+	mutex_lock(&wimax_dev->mutex);
+	result = wimax_dev_is_ready(wimax_dev);
+	if (result < 0)
+		goto error_not_ready;
+
+	if (state != wimax_dev->rf_sw) {
+		wimax_dev->rf_sw = state;
+		if (wimax_dev->rf_hw == WIMAX_RF_ON
+		    && wimax_dev->rf_sw == WIMAX_RF_ON)
+			wimax_state = WIMAX_ST_READY;
+		else
+			wimax_state = WIMAX_ST_RADIO_OFF;
+		__wimax_state_change(wimax_dev, wimax_state);
+	}
+error_not_ready:
+	mutex_unlock(&wimax_dev->mutex);
+	d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
+		wimax_dev, state, result);
+}
+EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw);
+
+
+/*
+ * Callback for the RF Kill toggle operation
+ *
+ * This function is called by:
+ *
+ * - The rfkill subsystem when the RF-Kill key is pressed in the
+ *   hardware and the driver notifies through
+ *   wimax_report_rfkill_hw(). The rfkill subsystem ends up calling back
+ *   here so the software RF Kill switch state is changed to reflect
+ *   the hardware switch state.
+ *
+ * - When the user sets the state through sysfs' rfkill/state file
+ *
+ * - When the user calls wimax_rfkill().
+ *
+ * This call blocks!
+ *
+ * WARNING! When we call rfkill_unregister(), this will be called with
+ * state 0!
+ *
+ * WARNING: wimax_dev must be locked
+ */
+static
+int __wimax_rf_toggle_radio(struct wimax_dev *wimax_dev,
+			    enum wimax_rf_state state)
+{
+	int result = 0;
+	struct device *dev = wimax_dev_to_dev(wimax_dev);
+	enum wimax_st wimax_state;
+
+	might_sleep();
+	d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+	if (wimax_dev->rf_sw == state)
+		goto out_no_change;
+	if (wimax_dev->op_rfkill_sw_toggle != NULL)
+		result = wimax_dev->op_rfkill_sw_toggle(wimax_dev, state);
+	else if (state == WIMAX_RF_OFF)	/* No op? can't turn off */
+		result = -ENXIO;
+	else				/* No op? can turn on */
+		result = 0;		/* should never happen tho */
+	if (result >= 0) {
+		result = 0;
+		wimax_dev->rf_sw = state;
+		wimax_state = state == WIMAX_RF_ON ?
+			WIMAX_ST_READY : WIMAX_ST_RADIO_OFF;
+		__wimax_state_change(wimax_dev, wimax_state);
+	}
+out_no_change:
+	d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
+		wimax_dev, state, result);
+	return result;
+}
+
+
+/*
+ * Translate from rfkill state to wimax state
+ *
+ * NOTE: Special state handling rules here
+ *
+ *     Just pretend the call didn't happen if we are in a state where
+ *     we know for sure it cannot be handled (WIMAX_ST_DOWN or
+ *     __WIMAX_ST_QUIESCING). rfkill() needs it to register and
+ *     unregister, as it will run this path.
+ *
+ * NOTE: This call will block until the operation is completed.
+ */
+static
+int wimax_rfkill_toggle_radio(void *data, enum rfkill_state state)
+{
+	int result;
+	struct wimax_dev *wimax_dev = data;
+	struct device *dev = wimax_dev_to_dev(wimax_dev);
+	enum wimax_rf_state rf_state;
+
+	d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+	switch (state) {
+	case RFKILL_STATE_ON:
+		rf_state = WIMAX_RF_OFF;
+		break;
+	case RFKILL_STATE_OFF:
+		rf_state = WIMAX_RF_ON;
+		break;
+	default:
+		BUG();
+	}
+	mutex_lock(&wimax_dev->mutex);
+	if (wimax_dev->state <= __WIMAX_ST_QUIESCING)
+		result = 0;	/* just pretend it didn't happen */
+	else
+		result = __wimax_rf_toggle_radio(wimax_dev, rf_state);
+	mutex_unlock(&wimax_dev->mutex);
+	d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
+		wimax_dev, state, result);
+	return result;
+}
+
+
+/**
+ * wimax_rfkill - Set the software RF switch state for a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @state: New RF state.
+ *
+ * Returns:
+ *
+ * >= 0 toggle state if ok, < 0 errno code on error. The toggle state
+ * is returned as a bitmap, bit 0 being the hardware RF state, bit 1
+ * the software RF state.
+ *
+ * 0 means disabled (%WIMAX_RF_ON, radio on), 1 means enabled radio
+ * off (%WIMAX_RF_OFF).
+ *
+ * Description:
+ *
+ * Called by the user when he wants to request the WiMAX radio to be
+ * switched on (%WIMAX_RF_ON) or off (%WIMAX_RF_OFF). With
+ * %WIMAX_RF_QUERY, just the current state is returned.
+ *
+ * NOTE:
+ *
+ * This call will block until the operation is complete.
+ */
+int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state)
+{
+	int result;
+	struct device *dev = wimax_dev_to_dev(wimax_dev);
+
+	d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+	mutex_lock(&wimax_dev->mutex);
+	result = wimax_dev_is_ready(wimax_dev);
+	if (result < 0)
+		goto error_not_ready;
+	switch (state) {
+	case WIMAX_RF_ON:
+	case WIMAX_RF_OFF:
+		result = __wimax_rf_toggle_radio(wimax_dev, state);
+		if (result < 0)
+			goto error;
+		break;
+	case WIMAX_RF_QUERY:
+		break;
+	default:
+		result = -EINVAL;
+		goto error;
+	}
+	result = wimax_dev->rf_sw << 1 | wimax_dev->rf_hw;
+error:
+error_not_ready:
+	mutex_unlock(&wimax_dev->mutex);
+	d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
+		wimax_dev, state, result);
+	return result;
+}
+EXPORT_SYMBOL(wimax_rfkill);
+
+
+/*
+ * Register a new WiMAX device's RF Kill support
+ *
+ * WARNING: wimax_dev->mutex must be unlocked
+ */
+int wimax_rfkill_add(struct wimax_dev *wimax_dev)
+{
+	int result;
+	struct rfkill *rfkill;
+	struct input_dev *input_dev;
+	struct device *dev = wimax_dev_to_dev(wimax_dev);
+
+	d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
+	/* Initialize RF Kill */
+	result = -ENOMEM;
+	rfkill = rfkill_allocate(dev, RFKILL_TYPE_WIMAX);
+	if (rfkill == NULL)
+		goto error_rfkill_allocate;
+	wimax_dev->rfkill = rfkill;
+
+	rfkill->name = wimax_dev->name;
+	rfkill->state = RFKILL_STATE_OFF;
+	rfkill->data = wimax_dev;
+	rfkill->toggle_radio = wimax_rfkill_toggle_radio;
+	rfkill->user_claim_unsupported = 1;
+
+	/* Initialize the input device for the hw key */
+	input_dev = input_allocate_device();
+	if (input_dev == NULL)
+		goto error_input_allocate;
+	wimax_dev->rfkill_input = input_dev;
+	d_printf(1, dev, "rfkill %p input %p\n", rfkill, input_dev);
+
+	input_dev->name = wimax_dev->name;
+	/* FIXME: get a real device bus ID and stuff? do we care? */
+	input_dev->id.bustype = BUS_HOST;
+	input_dev->id.vendor = 0xffff;
+	input_dev->evbit[0] = BIT(EV_KEY);
+	set_bit(KEY_WIMAX, input_dev->keybit);
+
+	/* Register both */
+	result = input_register_device(wimax_dev->rfkill_input);
+	if (result < 0)
+		goto error_input_register;
+	result = rfkill_register(wimax_dev->rfkill);
+	if (result < 0)
+		goto error_rfkill_register;
+
+	/* If there is no SW toggle op, SW RFKill is always on */
+	if (wimax_dev->op_rfkill_sw_toggle == NULL)
+		wimax_dev->rf_sw = WIMAX_RF_ON;
+
+	d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev);
+	return 0;
+
+	/* if rfkill_register() suceeds, can't use rfkill_free() any
+	 * more, only rfkill_unregister() [it owns the refcount]; with
+	 * the input device we have the same issue--hence the if. */
+error_rfkill_register:
+	input_unregister_device(wimax_dev->rfkill_input);
+	wimax_dev->rfkill_input = NULL;
+error_input_register:
+	if (wimax_dev->rfkill_input)
+		input_free_device(wimax_dev->rfkill_input);
+error_input_allocate:
+	rfkill_free(wimax_dev->rfkill);
+error_rfkill_allocate:
+	d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
+	return result;
+}
+
+
+/*
+ * Deregister a WiMAX device's RF Kill support
+ *
+ * Ick, we can't call rfkill_free() after rfkill_unregister()...oh
+ * well.
+ *
+ * WARNING: wimax_dev->mutex must be unlocked
+ */
+void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
+{
+	struct device *dev = wimax_dev_to_dev(wimax_dev);
+	d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
+	rfkill_unregister(wimax_dev->rfkill);	/* frees */
+	input_unregister_device(wimax_dev->rfkill_input);
+	d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev);
+}
+
+
+#else /* #ifdef CONFIG_RFKILL */
+
+void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
+			    enum wimax_rf_state state)
+{
+}
+EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
+
+void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
+			    enum wimax_rf_state state)
+{
+}
+EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw);
+
+int wimax_rfkill(struct wimax_dev *wimax_dev,
+		 enum wimax_rf_state state)
+{
+	return WIMAX_RF_ON << 1 | WIMAX_RF_ON;
+}
+EXPORT_SYMBOL_GPL(wimax_rfkill);
+
+int wimax_rfkill_add(struct wimax_dev *wimax_dev)
+{
+	return 0;
+}
+
+void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
+{
+}
+
+#endif /* #ifdef CONFIG_RFKILL */
+
+
+/*
+ * Exporting to user space over generic netlink
+ *
+ * Parse the rfkill command from user space, return a combination
+ * value that describe the states of the different toggles.
+ *
+ * Only one attribute: the new state requested (on, off or no change,
+ * just query).
+ */
+
+static const
+struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+	[WIMAX_GNL_RFKILL_IFIDX] = {
+		.type = NLA_U32,
+	},
+	[WIMAX_GNL_RFKILL_STATE] = {
+		.type = NLA_U32		/* enum wimax_rf_state */
+	},
+};
+
+
+static
+int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info)
+{
+	int result, ifindex;
+	struct wimax_dev *wimax_dev;
+	struct device *dev;
+	enum wimax_rf_state new_state;
+
+	d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
+	result = -ENODEV;
+	if (info->attrs[WIMAX_GNL_RFKILL_IFIDX] == NULL) {
+		printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX "
+			"attribute\n");
+		goto error_no_wimax_dev;
+	}
+	ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_IFIDX]);
+	wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
+	if (wimax_dev == NULL)
+		goto error_no_wimax_dev;
+	dev = wimax_dev_to_dev(wimax_dev);
+	result = -EINVAL;
+	if (info->attrs[WIMAX_GNL_RFKILL_STATE] == NULL) {
+		dev_err(dev, "WIMAX_GNL_RFKILL: can't find RFKILL_STATE "
+			"attribute\n");
+		goto error_no_pid;
+	}
+	new_state = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_STATE]);
+
+	/* Execute the operation and send the result back to user space */
+	result = wimax_rfkill(wimax_dev, new_state);
+error_no_pid:
+	dev_put(wimax_dev->net_dev);
+error_no_wimax_dev:
+	d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
+	return result;
+}
+
+
+struct genl_ops wimax_gnl_rfkill = {
+	.cmd = WIMAX_GNL_OP_RFKILL,
+	.flags = GENL_ADMIN_PERM,
+	.policy = wimax_gnl_rfkill_policy,
+	.doit = wimax_gnl_doit_rfkill,
+	.dumpit = NULL,
+};
+
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
new file mode 100644
index 0000000..d4da92f
--- /dev/null
+++ b/net/wimax/stack.c
@@ -0,0 +1,599 @@
+/*
+ * Linux WiMAX
+ * Initialization, addition and removal of wimax devices
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This implements:
+ *
+ *   - basic life cycle of 'struct wimax_dev' [wimax_dev_*()]; on
+ *     addition/registration initialize all subfields and allocate
+ *     generic netlink resources for user space communication. On
+ *     removal/unregistration, undo all that.
+ *
+ *   - device state machine [wimax_state_change()] and support to send
+ *     reports to user space when the state changes
+ *     [wimax_gnl_re_state_change*()].
+ *
+ * See include/net/wimax.h for rationales and design.
+ *
+ * ROADMAP
+ *
+ * [__]wimax_state_change()     Called by drivers to update device's state
+ *   wimax_gnl_re_state_change_alloc()
+ *   wimax_gnl_re_state_change_send()
+ *
+ * wimax_dev_init()	        Init a device
+ * wimax_dev_add()              Register
+ *   wimax_rfkill_add()
+ *   wimax_gnl_add()            Register all the generic netlink resources.
+ *   wimax_id_table_add()
+ * wimax_dev_rm()               Unregister
+ *   wimax_id_table_rm()
+ *   wimax_gnl_rm()
+ *   wimax_rfkill_rm()
+ */
+#include <linux/device.h>
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+#include <linux/wimax.h>
+#include "wimax-internal.h"
+
+
+#define D_SUBMODULE stack
+#include "debug-levels.h"
+
+/*
+ * Authoritative source for the RE_STATE_CHANGE attribute policy
+ *
+ * We don't really use it here, but /me likes to keep the definition
+ * close to where the data is generated.
+ */
+/*
+static const
+struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
+	[WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
+	[WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
+};
+*/
+
+
+/*
+ * Allocate a Report State Change message
+ *
+ * @header: save it, you need it for _send()
+ *
+ * Creates and fills a basic state change message; different code
+ * paths can then add more attributes to the message as needed.
+ *
+ * Use wimax_gnl_re_state_change_send() to send the returned skb.
+ *
+ * Returns: skb with the genl message if ok, IS_ERR() ptr on error
+ *     with an errno code.
+ */
+static
+struct sk_buff *wimax_gnl_re_state_change_alloc(
+	struct wimax_dev *wimax_dev,
+	enum wimax_st new_state, enum wimax_st old_state,
+	void **header)
+{
+	int result;
+	struct device *dev = wimax_dev_to_dev(wimax_dev);
+	void *data;
+	struct sk_buff *report_skb;
+
+	d_fnstart(3, dev, "(wimax_dev %p new_state %u old_state %u)\n",
+		  wimax_dev, new_state, old_state);
+	result = -ENOMEM;
+	report_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (report_skb == NULL) {
+		dev_err(dev, "RE_STCH: can't create message\n");
+		goto error_new;
+	}
+	data = genlmsg_put(report_skb, 0, wimax_gnl_mcg.id, &wimax_gnl_family,
+			   0, WIMAX_GNL_RE_STATE_CHANGE);
+	if (data == NULL) {
+		dev_err(dev, "RE_STCH: can't put data into message\n");
+		goto error_put;
+	}
+	*header = data;
+
+	result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_OLD, old_state);
+	if (result < 0) {
+		dev_err(dev, "RE_STCH: Error adding OLD attr: %d\n", result);
+		goto error_put;
+	}
+	result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_NEW, new_state);
+	if (result < 0) {
+		dev_err(dev, "RE_STCH: Error adding NEW attr: %d\n", result);
+		goto error_put;
+	}
+	result = nla_put_u32(report_skb, WIMAX_GNL_STCH_IFIDX,
+			     wimax_dev->net_dev->ifindex);
+	if (result < 0) {
+		dev_err(dev, "RE_STCH: Error adding IFINDEX attribute\n");
+		goto error_put;
+	}
+	d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %p\n",
+		wimax_dev, new_state, old_state, report_skb);
+	return report_skb;
+
+error_put:
+	nlmsg_free(report_skb);
+error_new:
+	d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %d\n",
+		wimax_dev, new_state, old_state, result);
+	return ERR_PTR(result);
+}
+
+
+/*
+ * Send a Report State Change message (as created with _alloc).
+ *
+ * @report_skb: as returned by wimax_gnl_re_state_change_alloc()
+ * @header: as returned by wimax_gnl_re_state_change_alloc()
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * If the message is  NULL, pretend it didn't happen.
+ */
+static
+int wimax_gnl_re_state_change_send(
+	struct wimax_dev *wimax_dev, struct sk_buff *report_skb,
+	void *header)
+{
+	int result = 0;
+	struct device *dev = wimax_dev_to_dev(wimax_dev);
+	d_fnstart(3, dev, "(wimax_dev %p report_skb %p)\n",
+		  wimax_dev, report_skb);
+	if (report_skb == NULL)
+		goto out;
+	genlmsg_end(report_skb, header);
+	result = genlmsg_multicast(report_skb, 0, wimax_gnl_mcg.id, GFP_KERNEL);
+	if (result == -ESRCH)	/* Nobody connected, ignore it */
+		result = 0;	/* btw, the skb is freed already */
+	if (result < 0) {
+		dev_err(dev, "RE_STCH: Error sending: %d\n", result);
+		nlmsg_free(report_skb);
+	}
+out:
+	d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n",
+		wimax_dev, report_skb, result);
+	return result;
+}
+
+
+static
+void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
+		       unsigned allowed_states_bm)
+{
+	if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
+		printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n",
+			old_state, new_state);
+	}
+}
+
+
+/*
+ * Set the current state of a WiMAX device [unlocking version of
+ * wimax_state_change().
+ */
+void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
+{
+	struct device *dev = wimax_dev_to_dev(wimax_dev);
+	enum wimax_st old_state = wimax_dev->state;
+	struct sk_buff *stch_skb;
+	void *header;
+
+	d_fnstart(3, dev, "(wimax_dev %p new_state %u [old %u])\n",
+		  wimax_dev, new_state, old_state);
+
+	if (WARN_ON(new_state >= __WIMAX_ST_INVALID)) {
+		dev_err(dev, "SW BUG: requesting invalid state %u\n",
+			new_state);
+		goto out;
+	}
+	if (old_state == new_state)
+		goto out;
+	header = NULL;	/* gcc complains? can't grok why */
+	stch_skb = wimax_gnl_re_state_change_alloc(
+		wimax_dev, new_state, old_state, &header);
+
+	/* Verify the state transition and do exit-from-state actions */
+	switch (old_state) {
+	case __WIMAX_ST_NULL:
+		__check_new_state(old_state, new_state,
+				  1 << WIMAX_ST_DOWN);
+		break;
+	case WIMAX_ST_DOWN:
+		__check_new_state(old_state, new_state,
+				  1 << __WIMAX_ST_QUIESCING
+				  | 1 << WIMAX_ST_UNINITIALIZED
+				  | 1 << WIMAX_ST_RADIO_OFF);
+		break;
+	case __WIMAX_ST_QUIESCING:
+		__check_new_state(old_state, new_state, 1 << WIMAX_ST_DOWN);
+		break;
+	case WIMAX_ST_UNINITIALIZED:
+		__check_new_state(old_state, new_state,
+				  1 << __WIMAX_ST_QUIESCING
+				  | 1 << WIMAX_ST_RADIO_OFF);
+		break;
+	case WIMAX_ST_RADIO_OFF:
+		__check_new_state(old_state, new_state,
+				  1 << __WIMAX_ST_QUIESCING
+				  | 1 << WIMAX_ST_READY);
+		break;
+	case WIMAX_ST_READY:
+		__check_new_state(old_state, new_state,
+				  1 << __WIMAX_ST_QUIESCING
+				  | 1 << WIMAX_ST_RADIO_OFF
+				  | 1 << WIMAX_ST_SCANNING
+				  | 1 << WIMAX_ST_CONNECTING
+				  | 1 << WIMAX_ST_CONNECTED);
+		break;
+	case WIMAX_ST_SCANNING:
+		__check_new_state(old_state, new_state,
+				  1 << __WIMAX_ST_QUIESCING
+				  | 1 << WIMAX_ST_RADIO_OFF
+				  | 1 << WIMAX_ST_READY
+				  | 1 << WIMAX_ST_CONNECTING
+				  | 1 << WIMAX_ST_CONNECTED);
+		break;
+	case WIMAX_ST_CONNECTING:
+		__check_new_state(old_state, new_state,
+				  1 << __WIMAX_ST_QUIESCING
+				  | 1 << WIMAX_ST_RADIO_OFF
+				  | 1 << WIMAX_ST_READY
+				  | 1 << WIMAX_ST_SCANNING
+				  | 1 << WIMAX_ST_CONNECTED);
+		break;
+	case WIMAX_ST_CONNECTED:
+		__check_new_state(old_state, new_state,
+				  1 << __WIMAX_ST_QUIESCING
+				  | 1 << WIMAX_ST_RADIO_OFF
+				  | 1 << WIMAX_ST_READY);
+		netif_tx_disable(wimax_dev->net_dev);
+		netif_carrier_off(wimax_dev->net_dev);
+		break;
+	case __WIMAX_ST_INVALID:
+	default:
+		dev_err(dev, "SW BUG: wimax_dev %p is in unknown state %u\n",
+			wimax_dev, wimax_dev->state);
+		WARN_ON(1);
+		goto out;
+	}
+
+	/* Execute the actions of entry to the new state */
+	switch (new_state) {
+	case __WIMAX_ST_NULL:
+		dev_err(dev, "SW BUG: wimax_dev %p entering NULL state "
+			"from %u\n", wimax_dev, wimax_dev->state);
+		WARN_ON(1);		/* Nobody can enter this state */
+		break;
+	case WIMAX_ST_DOWN:
+		break;
+	case __WIMAX_ST_QUIESCING:
+		break;
+	case WIMAX_ST_UNINITIALIZED:
+		break;
+	case WIMAX_ST_RADIO_OFF:
+		break;
+	case WIMAX_ST_READY:
+		break;
+	case WIMAX_ST_SCANNING:
+		break;
+	case WIMAX_ST_CONNECTING:
+		break;
+	case WIMAX_ST_CONNECTED:
+		netif_carrier_on(wimax_dev->net_dev);
+		netif_wake_queue(wimax_dev->net_dev);
+		break;
+	case __WIMAX_ST_INVALID:
+	default:
+		BUG();
+	}
+	__wimax_state_set(wimax_dev, new_state);
+	if (stch_skb)
+		wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header);
+out:
+	d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n",
+		wimax_dev, new_state, old_state);
+	return;
+}
+
+
+/**
+ * wimax_state_change - Set the current state of a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor (properly referenced)
+ * @new_state: New state to switch to
+ *
+ * This implements the state changes for the wimax devices. It will
+ *
+ * - verify that the state transition is legal (for now it'll just
+ *   print a warning if not) according to the table in
+ *   linux/wimax.h's documentation for 'enum wimax_st'.
+ *
+ * - perform the actions needed for leaving the current state and
+ *   whichever are needed for entering the new state.
+ *
+ * - issue a report to user space indicating the new state (and an
+ *   optional payload with information about the new state).
+ *
+ * NOTE: @wimax_dev must be locked
+ */
+void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
+{
+	mutex_lock(&wimax_dev->mutex);
+	__wimax_state_change(wimax_dev, new_state);
+	mutex_unlock(&wimax_dev->mutex);
+	return;
+}
+EXPORT_SYMBOL_GPL(wimax_state_change);
+
+
+/**
+ * wimax_state_get() - Return the current state of a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * Returns: Current state of the device according to its driver.
+ */
+enum wimax_st wimax_state_get(struct wimax_dev *wimax_dev)
+{
+	enum wimax_st state;
+	mutex_lock(&wimax_dev->mutex);
+	state = wimax_dev->state;
+	mutex_unlock(&wimax_dev->mutex);
+	return state;
+}
+EXPORT_SYMBOL_GPL(wimax_state_get);
+
+
+/**
+ * wimax_dev_init - initialize a newly allocated instance
+ *
+ * @wimax_dev: WiMAX device descriptor to initialize.
+ *
+ * Initializes fields of a freshly allocated @wimax_dev instance. This
+ * function assumes that after allocation, the memory occupied by
+ * @wimax_dev was zeroed.
+ */
+void wimax_dev_init(struct wimax_dev *wimax_dev)
+{
+	INIT_LIST_HEAD(&wimax_dev->id_table_node);
+	__wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED);
+	mutex_init(&wimax_dev->mutex);
+	mutex_init(&wimax_dev->mutex_reset);
+}
+EXPORT_SYMBOL_GPL(wimax_dev_init);
+
+/*
+ * This extern is declared here because it's easier to keep track --
+ * both declarations are a list of the same
+ */
+extern struct genl_ops
+	wimax_gnl_msg_from_user,
+	wimax_gnl_reset,
+	wimax_gnl_rfkill;
+
+static
+struct genl_ops *wimax_gnl_ops[] = {
+	&wimax_gnl_msg_from_user,
+	&wimax_gnl_reset,
+	&wimax_gnl_rfkill,
+};
+
+
+static
+size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
+			   unsigned char *addr, size_t addr_len)
+{
+	unsigned cnt, total;
+	for (total = cnt = 0; cnt < addr_len; cnt++)
+		total += scnprintf(addr_str + total, addr_str_size - total,
+				   "%02x%c", addr[cnt],
+				   cnt == addr_len - 1 ? '\0' : ':');
+	return total;
+}
+
+
+/**
+ * wimax_dev_add - Register a new WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor (as embedded in your @net_dev's
+ *     priv data). You must have called wimax_dev_init() on it before.
+ *
+ * @net_dev: net device the @wimax_dev is associated with. The
+ *     function expects SET_NETDEV_DEV() and register_netdev() were
+ *     already called on it.
+ *
+ * Registers the new WiMAX device, sets up the user-kernel control
+ * interface (generic netlink) and common WiMAX infrastructure.
+ *
+ * Note that the parts that will allow interaction with user space are
+ * setup at the very end, when the rest is in place, as once that
+ * happens, the driver might get user space control requests via
+ * netlink or from debugfs that might translate into calls into
+ * wimax_dev->op_*().
+ */
+int wimax_dev_add(struct wimax_dev *wimax_dev, struct net_device *net_dev)
+{
+	int result;
+	struct device *dev = net_dev->dev.parent;
+	char addr_str[32];
+
+	d_fnstart(3, dev, "(wimax_dev %p net_dev %p)\n", wimax_dev, net_dev);
+
+	/* Do the RFKILL setup before locking, as RFKILL will call
+	 * into our functions. */
+	wimax_dev->net_dev = net_dev;
+	result = wimax_rfkill_add(wimax_dev);
+	if (result < 0)
+		goto error_rfkill_add;
+
+	/* Set up user-space interaction */
+	mutex_lock(&wimax_dev->mutex);
+	wimax_id_table_add(wimax_dev);
+	result = wimax_debugfs_add(wimax_dev);
+	if (result < 0) {
+		dev_err(dev, "cannot initialize debugfs: %d\n",
+			result);
+		goto error_debugfs_add;
+	}
+
+	__wimax_state_set(wimax_dev, WIMAX_ST_DOWN);
+	mutex_unlock(&wimax_dev->mutex);
+
+	wimax_addr_scnprint(addr_str, sizeof(addr_str),
+			    net_dev->dev_addr, net_dev->addr_len);
+	dev_err(dev, "WiMAX interface %s (%s) ready\n",
+		net_dev->name, addr_str);
+	d_fnend(3, dev, "(wimax_dev %p net_dev %p) = 0\n", wimax_dev, net_dev);
+	return 0;
+
+error_debugfs_add:
+	wimax_id_table_rm(wimax_dev);
+	mutex_unlock(&wimax_dev->mutex);
+	wimax_rfkill_rm(wimax_dev);
+error_rfkill_add:
+	d_fnend(3, dev, "(wimax_dev %p net_dev %p) = %d\n",
+		wimax_dev, net_dev, result);
+	return result;
+}
+EXPORT_SYMBOL_GPL(wimax_dev_add);
+
+
+/**
+ * wimax_dev_rm - Unregister an existing WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * Unregisters a WiMAX device previously registered for use with
+ * wimax_add_rm().
+ *
+ * IMPORTANT! Must call before calling unregister_netdev().
+ *
+ * After this function returns, you will not get any more user space
+ * control requests (via netlink or debugfs) and thus to wimax_dev->ops.
+ *
+ * Reentrancy control is ensured by setting the state to
+ * %__WIMAX_ST_QUIESCING. rfkill operations coming through
+ * wimax_*rfkill*() will be stopped by the quiescing state; ops coming
+ * from the rfkill subsystem will be stopped by the support being
+ * removed by wimax_rfkill_rm().
+ */
+void wimax_dev_rm(struct wimax_dev *wimax_dev)
+{
+	d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
+
+	mutex_lock(&wimax_dev->mutex);
+	__wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
+	wimax_debugfs_rm(wimax_dev);
+	wimax_id_table_rm(wimax_dev);
+	__wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
+	mutex_unlock(&wimax_dev->mutex);
+	wimax_rfkill_rm(wimax_dev);
+	d_fnend(3, NULL, "(wimax_dev %p) = void\n", wimax_dev);
+}
+EXPORT_SYMBOL_GPL(wimax_dev_rm);
+
+struct genl_family wimax_gnl_family = {
+	.id = GENL_ID_GENERATE,
+	.name = "WiMAX",
+	.version = WIMAX_GNL_VERSION,
+	.hdrsize = 0,
+	.maxattr = WIMAX_GNL_ATTR_MAX,
+};
+
+struct genl_multicast_group wimax_gnl_mcg = {
+	.name = "msg",
+};
+
+
+
+/* Shutdown the wimax stack */
+static
+int __init wimax_subsys_init(void)
+{
+	int result, cnt;
+
+	d_fnstart(4, NULL, "()\n");
+	snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name),
+		 "WiMAX");
+	result = genl_register_family(&wimax_gnl_family);
+	if (unlikely(result < 0)) {
+		printk(KERN_ERR "cannot register generic netlink family: %d\n",
+		       result);
+		goto error_register_family;
+	}
+
+	for (cnt = 0; cnt < ARRAY_SIZE(wimax_gnl_ops); cnt++) {
+		result = genl_register_ops(&wimax_gnl_family,
+					   wimax_gnl_ops[cnt]);
+		d_printf(4, NULL, "registering generic netlink op code "
+			 "%u: %d\n", wimax_gnl_ops[cnt]->cmd, result);
+		if (unlikely(result < 0)) {
+			printk(KERN_ERR "cannot register generic netlink op "
+			       "code %u: %d\n",
+			       wimax_gnl_ops[cnt]->cmd, result);
+			goto error_register_ops;
+		}
+	}
+
+	result = genl_register_mc_group(&wimax_gnl_family, &wimax_gnl_mcg);
+	if (result < 0)
+		goto error_mc_group;
+	d_fnend(4, NULL, "() = 0\n");
+	return 0;
+
+error_mc_group:
+error_register_ops:
+	for (cnt--; cnt >= 0; cnt--)
+		genl_unregister_ops(&wimax_gnl_family,
+				    wimax_gnl_ops[cnt]);
+	genl_unregister_family(&wimax_gnl_family);
+error_register_family:
+	d_fnend(4, NULL, "() = %d\n", result);
+	return result;
+
+}
+module_init(wimax_subsys_init);
+
+
+/* Shutdown the wimax stack */
+static
+void __exit wimax_subsys_exit(void)
+{
+	int cnt;
+	wimax_id_table_release();
+	genl_unregister_mc_group(&wimax_gnl_family, &wimax_gnl_mcg);
+	for (cnt = ARRAY_SIZE(wimax_gnl_ops) - 1; cnt >= 0; cnt--)
+		genl_unregister_ops(&wimax_gnl_family,
+				    wimax_gnl_ops[cnt]);
+	genl_unregister_family(&wimax_gnl_family);
+}
+module_exit(wimax_subsys_exit);
+
+MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
+MODULE_DESCRIPTION("Linux WiMAX stack");
+MODULE_LICENSE("GPL");
+
diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h
new file mode 100644
index 0000000..1e743d2
--- /dev/null
+++ b/net/wimax/wimax-internal.h
@@ -0,0 +1,91 @@
+/*
+ * Linux WiMAX
+ * Internal API for kernel space WiMAX stack
+ *
+ *
+ * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This header file is for declarations and definitions internal to
+ * the WiMAX stack. For public APIs and documentation, see
+ * include/net/wimax.h and include/linux/wimax.h.
+ */
+
+#ifndef __WIMAX_INTERNAL_H__
+#define __WIMAX_INTERNAL_H__
+#ifdef __KERNEL__
+
+#include <linux/device.h>
+#include <net/wimax.h>
+
+
+/*
+ * Decide if a (locked) device is ready for use
+ *
+ * Before using the device structure, it must be locked
+ * (wimax_dev->mutex). As well, most operations need to call this
+ * function to check if the state is the right one.
+ *
+ * An error value will be returned if the state is not the right
+ * one. In that case, the caller should not attempt to use the device
+ * and just unlock it.
+ */
+static inline __must_check
+int wimax_dev_is_ready(struct wimax_dev *wimax_dev)
+{
+	if (wimax_dev->state == __WIMAX_ST_NULL)
+		return -EINVAL;	/* Device is not even registered! */
+	if (wimax_dev->state == WIMAX_ST_DOWN)
+		return -ENOMEDIUM;
+	if (wimax_dev->state == __WIMAX_ST_QUIESCING)
+		return -ESHUTDOWN;
+	return 0;
+}
+
+
+static inline
+void __wimax_state_set(struct wimax_dev *wimax_dev, enum wimax_st state)
+{
+	wimax_dev->state = state;
+}
+extern void __wimax_state_change(struct wimax_dev *, enum wimax_st);
+
+#ifdef CONFIG_DEBUG_FS
+extern int wimax_debugfs_add(struct wimax_dev *);
+extern void wimax_debugfs_rm(struct wimax_dev *);
+#else
+static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev)
+{
+	return 0;
+}
+static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {}
+#endif
+
+extern void wimax_id_table_add(struct wimax_dev *);
+extern struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
+extern void wimax_id_table_rm(struct wimax_dev *);
+extern void wimax_id_table_release(void);
+
+extern int wimax_rfkill_add(struct wimax_dev *);
+extern void wimax_rfkill_rm(struct wimax_dev *);
+
+extern struct genl_family wimax_gnl_family;
+extern struct genl_multicast_group wimax_gnl_mcg;
+
+#endif /* #ifdef __KERNEL__ */
+#endif /* #ifndef __WIMAX_INTERNAL_H__ */
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index e49a2d1..cb6a5bb 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1055,8 +1055,8 @@
 			return private(dev, iwr, cmd, info, handler);
 	}
 	/* Old driver API : call driver ioctl handler */
-	if (dev->do_ioctl)
-		return dev->do_ioctl(dev, ifr, cmd);
+	if (dev->netdev_ops->ndo_do_ioctl)
+		return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
 	return -EOPNOTSUPP;
 }
 
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index c609a4b..42cd183 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -63,7 +63,6 @@
 	if (len > skb_tailroom(skb))
 		len = skb_tailroom(skb);
 
-	skb->truesize += len;
 	__skb_put(skb, len);
 
 	len += plen;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b95a2d6..7877e79 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1914,10 +1914,17 @@
 }
 #endif
 
+/* For the xfrm_usersa_info cases we have to work around some 32-bit vs.
+ * 64-bit compatability issues.  On 32-bit the structure is 220 bytes, but
+ * for 64-bit it gets padded out to 224 bytes.  Those bytes are just
+ * padding and don't have any content we care about.  Therefore as long
+ * as we have enough bytes for the content we can make both cases work.
+ */
+
 #define XMSGSIZE(type) sizeof(struct type)
 
 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
-	[XFRM_MSG_NEWSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
+	[XFRM_MSG_NEWSA       - XFRM_MSG_BASE] = 220, /* see above */
 	[XFRM_MSG_DELSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
 	[XFRM_MSG_GETSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
 	[XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
@@ -1927,7 +1934,7 @@
 	[XFRM_MSG_ACQUIRE     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
 	[XFRM_MSG_EXPIRE      - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
 	[XFRM_MSG_UPDPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
-	[XFRM_MSG_UPDSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
+	[XFRM_MSG_UPDSA       - XFRM_MSG_BASE] = 220, /* see above */
 	[XFRM_MSG_POLEXPIRE   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
 	[XFRM_MSG_FLUSHSA     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
 	[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
diff --git a/scripts/.gitignore b/scripts/.gitignore
index b939fbd..09e2406 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -1,6 +1,7 @@
 #
 # Generated files
 #
+ihex2fw
 conmakehash
 kallsyms
 pnmtologo
diff --git a/scripts/Makefile b/scripts/Makefile
index aafdf06..035182e 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -2,11 +2,12 @@
 # scripts contains sources for various helper programs used throughout
 # the kernel for the build process.
 # ---------------------------------------------------------------------------
+# ihex2fw:       Parser/loader for IHEX formatted data
 # kallsyms:      Find all symbols in vmlinux
 # pnmttologo:    Convert pnm files to logo files
-# conmakehash:   Create chartable
 # conmakehash:	 Create arrays for initializing the kernel console tables
 
+hostprogs-y                      := ihex2fw
 hostprogs-$(CONFIG_KALLSYMS)     += kallsyms
 hostprogs-$(CONFIG_LOGO)         += pnmtologo
 hostprogs-$(CONFIG_VT)           += conmakehash
diff --git a/scripts/bootgraph.pl b/scripts/bootgraph.pl
index f0af9aa..b024630 100644
--- a/scripts/bootgraph.pl
+++ b/scripts/bootgraph.pl
@@ -41,11 +41,13 @@
 
 my %start;
 my %end;
+my %type;
 my $done = 0;
 my $maxtime = 0;
 my $firsttime = 100;
 my $count = 0;
 my %pids;
+my %pidctr;
 
 while (<>) {
 	my $line = $_;
@@ -53,6 +55,7 @@
 		my $func = $2;
 		if ($done == 0) {
 			$start{$func} = $1;
+			$type{$func} = 0;
 			if ($1 < $firsttime) {
 				$firsttime = $1;
 			}
@@ -63,12 +66,40 @@
 		$count = $count + 1;
 	}
 
+	if ($line =~ /([0-9\.]+)\] async_waiting @ ([0-9]+)/) {
+		my $pid = $2;
+		my $func;
+		if (!defined($pidctr{$pid})) {
+			$func = "wait_" . $pid . "_1";
+			$pidctr{$pid} = 1;
+		} else {
+			$pidctr{$pid} = $pidctr{$pid} + 1;
+			$func = "wait_" . $pid . "_" . $pidctr{$pid};
+		}
+		if ($done == 0) {
+			$start{$func} = $1;
+			$type{$func} = 1;
+			if ($1 < $firsttime) {
+				$firsttime = $1;
+			}
+		}
+		$pids{$func} = $pid;
+		$count = $count + 1;
+	}
+
 	if ($line =~ /([0-9\.]+)\] initcall ([a-zA-Z0-9\_]+)\+.*returned/) {
 		if ($done == 0) {
 			$end{$2} = $1;
 			$maxtime = $1;
 		}
 	}
+
+	if ($line =~ /([0-9\.]+)\] async_continuing @ ([0-9]+)/) {
+		my $pid = $2;
+		my $func =  "wait_" . $pid . "_" . $pidctr{$pid};
+		$end{$func} = $1;
+		$maxtime = $1;
+	}
 	if ($line =~ /Write protecting the/) {
 		$done = 1;
 	}
@@ -88,7 +119,7 @@
 }
 
 print "<?xml version=\"1.0\" standalone=\"no\"?> \n";
-print "<svg width=\"1000\" height=\"100%\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n";
+print "<svg width=\"2000\" height=\"100%\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n";
 
 my @styles;
 
@@ -105,8 +136,11 @@
 $styles[10] = "fill:rgb(255,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
 $styles[11] = "fill:rgb(128,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
 
-my $mult = 950.0 / ($maxtime - $firsttime);
-my $threshold = ($maxtime - $firsttime) / 60.0;
+my $style_wait = "fill:rgb(128,128,128);fill-opacity:0.5;stroke-width:0;stroke:rgb(0,0,0)";
+
+my $mult = 1950.0 / ($maxtime - $firsttime);
+my $threshold2 = ($maxtime - $firsttime) / 120.0;
+my $threshold = $threshold2/10;
 my $stylecounter = 0;
 my %rows;
 my $rowscount = 1;
@@ -116,7 +150,7 @@
 	my $duration = $end{$key} - $start{$key};
 
 	if ($duration >= $threshold) {
-		my ($s, $s2, $e, $w, $y, $y2, $style);
+		my ($s, $s2, $s3, $e, $w, $y, $y2, $style);
 		my $pid = $pids{$key};
 
 		if (!defined($rows{$pid})) {
@@ -125,6 +159,7 @@
 		}
 		$s = ($start{$key} - $firsttime) * $mult;
 		$s2 = $s + 6;
+		$s3 = $s + 1;
 		$e = ($end{$key} - $firsttime) * $mult;
 		$w = $e - $s;
 
@@ -137,8 +172,17 @@
 			$stylecounter = 0;
 		};
 
-		print "<rect x=\"$s\" width=\"$w\" y=\"$y\" height=\"145\" style=\"$style\"/>\n";
-		print "<text transform=\"translate($s2,$y2) rotate(90)\">$key</text>\n";
+		if ($type{$key} == 1) {
+			$y = $y + 15;
+			print "<rect x=\"$s\" width=\"$w\" y=\"$y\" height=\"115\" style=\"$style_wait\"/>\n";
+		} else {
+			print "<rect x=\"$s\" width=\"$w\" y=\"$y\" height=\"145\" style=\"$style\"/>\n";
+			if ($duration >= $threshold2) {
+				print "<text transform=\"translate($s2,$y2) rotate(90)\">$key</text>\n";
+			} else {
+				print "<text transform=\"translate($s3,$y2) rotate(90)\" font-size=\"3pt\">$key</text>\n";
+			}
+		}
 	}
 }
 
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 7bed4ed..45eb0ae 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -10,7 +10,7 @@
 my $P = $0;
 $P =~ s@.*/@@g;
 
-my $V = '0.26';
+my $V = '0.27';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -411,13 +411,15 @@
 
 	my $type = '';
 	my $level = 0;
-	my @stack = ([$type, $level]);
+	my @stack = ();
 	my $p;
 	my $c;
 	my $len = 0;
 
 	my $remainder;
 	while (1) {
+		@stack = (['', 0]) if ($#stack == -1);
+
 		#warn "CSB: blk<$blk> remain<$remain>\n";
 		# If we are about to drop off the end, pull in more
 		# context.
@@ -1663,7 +1665,7 @@
 			# Should not end with a space.
 			$to =~ s/\s+$//;
 			# '*'s should not have spaces between.
-			while ($to =~ s/(.)\s\*/$1\*/) {
+			while ($to =~ s/\*\s+\*/\*\*/) {
 			}
 
 			#print "from<$from> to<$to>\n";
@@ -1678,7 +1680,7 @@
 			# Should not end with a space.
 			$to =~ s/\s+$//;
 			# '*'s should not have spaces between.
-			while ($to =~ s/(.)\s\*/$1\*/) {
+			while ($to =~ s/\*\s+\*/\*\*/) {
 			}
 			# Modifiers should have spaces.
 			$to =~ s/(\b$Modifier$)/$1 /;
@@ -2014,7 +2016,11 @@
 
 			# Flatten any parentheses
 			$value =~ s/\)\(/\) \(/g;
-			while ($value !~ /(?:$Ident|-?$Constant)\s*$Compare\s*(?:$Ident|-?$Constant)/ && $value =~ s/\([^\(\)]*\)/1/) {
+			while ($value =~ s/\[[^\{\}]*\]/1/ ||
+			       $value !~ /(?:$Ident|-?$Constant)\s*
+					     $Compare\s*
+					     (?:$Ident|-?$Constant)/x &&
+			       $value =~ s/\([^\(\)]*\)/1/) {
 			}
 
 			if ($value =~ /^(?:$Ident|-?$Constant)$/) {
@@ -2102,6 +2108,11 @@
 				ERROR("trailing statements should be on next line\n" . $herecurr);
 			}
 		}
+# if should not continue a brace
+		if ($line =~ /}\s*if\b/) {
+			ERROR("trailing statements should be on next line\n" .
+				$herecurr);
+		}
 # case and default should not have general statements after them
 		if ($line =~ /^.\s*(?:case\s*.*|default\s*):/g &&
 		    $line !~ /\G(?:
@@ -2516,9 +2527,10 @@
 			WARN("please use device_initcall() instead of __initcall()\n" . $herecurr);
 		}
 # check for struct file_operations, ensure they are const.
-		if ($line =~ /\bstruct\s+file_operations\b/ &&
-		    $line !~ /\bconst\b/) {
-			WARN("struct file_operations should normally be const\n" . $herecurr);
+		if ($line !~ /\bconst\b/ &&
+		    $line =~ /\bstruct\s+(file_operations|seq_operations)\b/) {
+			WARN("struct $1 should normally be const\n" .
+				$herecurr);
 		}
 
 # use of NR_CPUS is usually wrong
diff --git a/scripts/config b/scripts/config
new file mode 100755
index 0000000..68b9761
--- /dev/null
+++ b/scripts/config
@@ -0,0 +1,150 @@
+#!/bin/bash
+# Manipulate options in a .config file from the command line
+
+usage() {
+	cat >&2 <<EOL
+Manipulate options in a .config file from the command line.
+Usage:
+config options command ...
+commands:
+	--enable|-e option   Enable option
+	--disable|-d option  Disable option
+	--module|-m option      Turn option into a module
+	--state|-s option       Print state of option (n,y,m,undef)
+
+	--enable-after|-E beforeopt option
+                             Enable option directly after other option
+	--disable-after|-D beforeopt option
+                             Disable option directly after other option
+	--module-after|-M beforeopt option
+                             Turn option into module directly after other option
+
+	commands can be repeated multiple times
+
+options:
+	--file .config file to change (default .config)
+
+config doesn't check the validity of the .config file. This is done at next
+ make time.
+The options need to be already in the file before they can be changed,
+but sometimes you can cheat with the --*-after options.
+EOL
+	exit 1
+}
+
+checkarg() {
+	ARG="$1"
+	if [ "$ARG" = "" ] ; then
+		usage
+	fi
+	case "$ARG" in
+	CONFIG_*)
+		ARG="${ARG/CONFIG_/}"
+		;;
+	esac
+	ARG="`echo $ARG | tr a-z A-Z`"
+}
+
+replace() {
+	sed -i -e "$@" $FN
+}
+
+if [ "$1" = "--file" ]; then
+	FN="$2"
+	if [ "$FN" = "" ] ; then
+		usage
+	fi
+	shift
+	shift
+else
+	FN=.config
+fi
+
+while [ "$1" != "" ] ; do
+	CMD="$1"
+	shift
+	case "$CMD" in
+	--enable|-e)
+		checkarg "$1"
+		replace "s/# CONFIG_$ARG is not set/CONFIG_$ARG=y/"
+		shift
+		;;
+
+	--disable|-d)
+		checkarg "$1"
+		replace "s/CONFIG_$ARG=[my]/# CONFIG_$ARG is not set/"
+		shift
+		;;
+
+	--module|-m)
+		checkarg "$1"
+		replace "s/CONFIG_$ARG=y/CONFIG_$ARG=m/" \
+			-e "s/# CONFIG_$ARG is not set/CONFIG_$ARG=m/"
+		shift
+		;;
+
+	--state|-s)
+		checkarg "$1"
+		if grep -q "# CONFIG_$ARG is not set" $FN ; then
+			echo n
+		else
+			V="$(grep "^CONFIG_$ARG=" $FN)"
+			if [ $? != 0 ] ; then
+				echo undef
+			else
+				V="${V/CONFIG_$ARG=/}"
+				V="${V/\"/}"
+				echo "$V"
+			fi
+		fi
+		shift
+		;;
+
+	--enable-after|-E)
+		checkarg "$1"
+		A=$ARG
+		checkarg "$2"
+		B=$ARG
+		replace "/CONFIG_$A=[my]/aCONFIG_$B=y" \
+			-e "/# CONFIG_$ARG is not set/a/CONFIG_$ARG=y" \
+			-e "s/# CONFIG_$ARG is not set/CONFIG_$ARG=y/"
+		shift
+		shift
+		;;
+
+	--disable-after|-D)
+		checkarg "$1"
+		A=$ARG
+		checkarg "$2"
+		B=$ARG
+		replace "/CONFIG_$A=[my]/a# CONFIG_$B is not set" \
+		-e "/# CONFIG_$ARG is not set/a/# CONFIG_$ARG is not set" \
+		-e "s/CONFIG_$ARG=[my]/# CONFIG_$ARG is not set/"
+		shift
+		shift
+		;;
+
+	--module-after|-M)
+		checkarg "$1"
+		A=$ARG
+		checkarg "$2"
+		B=$ARG
+		replace "/CONFIG_$A=[my]/aCONFIG_$B=m" \
+			-e "/# CONFIG_$ARG is not set/a/CONFIG_$ARG=m" \
+			-e "s/CONFIG_$ARG=y/CONFIG_$ARG=m/" \
+			-e "s/# CONFIG_$ARG is not set/CONFIG_$ARG=m/"
+		shift
+		shift
+		;;
+
+	# undocumented because it ignores --file (fixme)
+	--refresh)
+		yes "" | make oldconfig
+		;;
+
+	*)
+		usage
+		;;
+	esac
+done
+
diff --git a/firmware/ihex2fw.c b/scripts/ihex2fw.c
similarity index 100%
rename from firmware/ihex2fw.c
rename to scripts/ihex2fw.c
diff --git a/scripts/markup_oops.pl b/scripts/markup_oops.pl
index 700a7a6..d40449c 100644
--- a/scripts/markup_oops.pl
+++ b/scripts/markup_oops.pl
@@ -1,5 +1,7 @@
 #!/usr/bin/perl -w
 
+use File::Basename;
+
 # Copyright 2008, Intel Corporation
 #
 # This file is part of the Linux kernel
@@ -13,23 +15,41 @@
 
 
 my $vmlinux_name = $ARGV[0];
-
+if (!defined($vmlinux_name)) {
+	my $kerver = `uname -r`;
+	chomp($kerver);
+	$vmlinux_name = "/lib/modules/$kerver/build/vmlinux";
+	print "No vmlinux specified, assuming $vmlinux_name\n";
+}
+my $filename = $vmlinux_name;
 #
 # Step 1: Parse the oops to find the EIP value
 #
 
 my $target = "0";
+my $function;
+my $module = "";
+my $func_offset;
+my $vmaoffset = 0;
+
 while (<STDIN>) {
-	if ($_ =~ /EIP: 0060:\[\<([a-z0-9]+)\>\]/) {
+	my $line = $_;
+	if ($line =~ /EIP: 0060:\[\<([a-z0-9]+)\>\]/) {
 		$target = $1;
 	}
+	if ($line =~ /EIP is at ([a-zA-Z0-9\_]+)\+(0x[0-9a-f]+)\/0x[a-f0-9]/) {
+		$function = $1;
+		$func_offset = $2;
+	}
+
+	# check if it's a module
+	if ($line =~ /EIP is at ([a-zA-Z0-9\_]+)\+(0x[0-9a-f]+)\/0x[a-f0-9]+\W\[([a-zA-Z0-9\_\-]+)\]/) {
+		$module = $3;
+	}
 }
 
-if ($target =~ /^f8/) {
-	print "This script does not work on modules ... \n";
-	exit;
-}
-
+my $decodestart = hex($target) - hex($func_offset);
+my $decodestop = $decodestart + 8192;
 if ($target eq "0") {
 	print "No oops found!\n";
 	print "Usage: \n";
@@ -37,6 +57,29 @@
 	exit;
 }
 
+# if it's a module, we need to find the .ko file and calculate a load offset
+if ($module ne "") {
+	my $dir = dirname($filename);
+	$dir = $dir . "/";
+	my $mod = $module . ".ko";
+	my $modulefile = `find $dir -name $mod | head -1`;
+	chomp($modulefile);
+	$filename = $modulefile;
+	if ($filename eq "") {
+		print "Module .ko file for $module not found. Aborting\n";
+		exit;
+	}
+	# ok so we found the module, now we need to calculate the vma offset
+	open(FILE, "objdump -dS $filename |") || die "Cannot start objdump";
+	while (<FILE>) {
+		if ($_ =~ /^([0-9a-f]+) \<$function\>\:/) {
+			my $fu = $1;
+			$vmaoffset = hex($target) - hex($fu) - hex($func_offset);
+		}
+	}
+	close(FILE);
+}
+
 my $counter = 0;
 my $state   = 0;
 my $center  = 0;
@@ -59,9 +102,7 @@
 # first, parse the input into the lines array, but to keep size down,
 # we only do this for 4Kb around the sweet spot
 
-my $filename;
-
-open(FILE, "objdump -dS $vmlinux_name |") || die "Cannot start objdump";
+open(FILE, "objdump -dS --adjust-vma=$vmaoffset --start-address=$decodestart --stop-address=$decodestop $filename |") || die "Cannot start objdump";
 
 while (<FILE>) {
 	my $line = $_;
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 9e3451d..fdbe78b 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -24,6 +24,11 @@
 	tree=${srctree}/
 fi
 
+# Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH
+if [ "${ALLSOURCE_ARCHS}" = "" ]; then
+	ALLSOURCE_ARCHS=${SRCARCH}
+fi
+
 # find sources in arch/$ARCH
 find_arch_sources()
 {
@@ -54,26 +59,29 @@
 find_sources()
 {
 	find_arch_sources $1 "$2"
-	find_include_sources "$2"
-	find_other_sources "$2"
 }
 
 all_sources()
 {
-	find_sources $SRCARCH '*.[chS]'
+	for arch in $ALLSOURCE_ARCHS
+	do
+		find_sources $arch '*.[chS]'
+	done
 	if [ ! -z "$archinclude" ]; then
 		find_arch_include_sources $archinclude '*.[chS]'
 	fi
+	find_include_sources '*.[chS]'
+	find_other_sources '*.[chS]'
 }
 
 all_kconfigs()
 {
-	find_sources $SRCARCH 'Kconfig*'
+	find_sources $ALLSOURCE_ARCHS 'Kconfig*'
 }
 
 all_defconfigs()
 {
-	find_sources $SRCARCH "defconfig"
+	find_sources $ALLSOURCE_ARCHS "defconfig"
 }
 
 docscope()
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 5ba7870..3aacd0f 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -513,11 +513,14 @@
 	struct dev_cgroup *dev_cgroup;
 	struct dev_whitelist_item *wh;
 
+	if (!S_ISBLK(mode) && !S_ISCHR(mode))
+		return 0;
+
 	rcu_read_lock();
 
 	dev_cgroup = task_devcgroup(current);
 
-	list_for_each_entry(wh, &dev_cgroup->whitelist, list) {
+	list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
 		if (wh->type & DEV_ALL)
 			goto acc_check;
 		if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode))
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 0979679..b1ec3b4 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -54,11 +54,11 @@
  * - returns the new key's serial number
  * - implements add_key()
  */
-asmlinkage long sys_add_key(const char __user *_type,
-			    const char __user *_description,
-			    const void __user *_payload,
-			    size_t plen,
-			    key_serial_t ringid)
+SYSCALL_DEFINE5(add_key, const char __user *, _type,
+		const char __user *, _description,
+		const void __user *, _payload,
+		size_t, plen,
+		key_serial_t, ringid)
 {
 	key_ref_t keyring_ref, key_ref;
 	char type[32], *description;
@@ -146,10 +146,10 @@
  *   - if the _callout_info string is empty, it will be rendered as "-"
  * - implements request_key()
  */
-asmlinkage long sys_request_key(const char __user *_type,
-				const char __user *_description,
-				const char __user *_callout_info,
-				key_serial_t destringid)
+SYSCALL_DEFINE4(request_key, const char __user *, _type,
+		const char __user *, _description,
+		const char __user *, _callout_info,
+		key_serial_t, destringid)
 {
 	struct key_type *ktype;
 	struct key *key;
@@ -270,6 +270,7 @@
 
 	/* join the session */
 	ret = join_session_keyring(name);
+	kfree(name);
 
  error:
 	return ret;
@@ -1216,8 +1217,8 @@
 /*
  * the key control system call
  */
-asmlinkage long sys_keyctl(int option, unsigned long arg2, unsigned long arg3,
-			   unsigned long arg4, unsigned long arg5)
+SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
+		unsigned long, arg4, unsigned long, arg5)
 {
 	switch (option) {
 	case KEYCTL_GET_KEYRING_ID:
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index bf107a3..71e2b91 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -569,7 +569,7 @@
 	if (skp == NULL)
 		goto out;
 
-	rule += SMK_LABELLEN;;
+	rule += SMK_LABELLEN;
 	ret = sscanf(rule, "%d", &maplevel);
 	if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
 		goto out;
diff --git a/sound/oss/aedsp16.c b/sound/oss/aedsp16.c
index a0274f3..3ee9900 100644
--- a/sound/oss/aedsp16.c
+++ b/sound/oss/aedsp16.c
@@ -157,7 +157,7 @@
 
    Started Fri Mar 17 16:13:18 MET 1995
 
-   v0.1 (ALPHA, was an user-level program called AudioExcelDSP16.c)
+   v0.1 (ALPHA, was a user-level program called AudioExcelDSP16.c)
    - Initial code.
    v0.2 (ALPHA)
    - Cleanups.
diff --git a/sound/oss/dmasound/dmasound_atari.c b/sound/oss/dmasound/dmasound_atari.c
index 4d45bd6..57d9f15 100644
--- a/sound/oss/dmasound/dmasound_atari.c
+++ b/sound/oss/dmasound/dmasound_atari.c
@@ -851,8 +851,9 @@
 	mfp.tim_dt_a = 1;	/* Cause interrupt after first event. */
 	mfp.tim_ct_a = 8;	/* Turn on event counting. */
 	/* Register interrupt handler. */
-	request_irq(IRQ_MFP_TIMA, AtaInterrupt, IRQ_TYPE_SLOW, "DMA sound",
-		    AtaInterrupt);
+	if (request_irq(IRQ_MFP_TIMA, AtaInterrupt, IRQ_TYPE_SLOW, "DMA sound",
+			AtaInterrupt))
+		return 0;
 	mfp.int_en_a |= 0x20;	/* Turn interrupt on. */
 	mfp.int_mk_a |= 0x20;
 	return 1;
diff --git a/sound/oss/dmasound/dmasound_q40.c b/sound/oss/dmasound/dmasound_q40.c
index 1855b14..99bcb21 100644
--- a/sound/oss/dmasound/dmasound_q40.c
+++ b/sound/oss/dmasound/dmasound_q40.c
@@ -371,8 +371,9 @@
 static int __init Q40IrqInit(void)
 {
 	/* Register interrupt handler. */
-	request_irq(Q40_IRQ_SAMPLE, Q40StereoInterrupt, 0,
-		    "DMA sound", Q40Interrupt);
+	if (request_irq(Q40_IRQ_SAMPLE, Q40StereoInterrupt, 0,
+		    "DMA sound", Q40Interrupt))
+		return 0;
 
 	return(1);
 }
@@ -401,6 +402,7 @@
 	u_char *start;
 	u_long size;
 	u_char speed;
+	int error;
 
 	/* used by Q40Play() if all doubts whether there really is something
 	 * to be played are already wiped out.
@@ -419,11 +421,13 @@
 	master_outb( 0,SAMPLE_ENABLE_REG);
 	free_irq(Q40_IRQ_SAMPLE, Q40Interrupt);
 	if (dmasound.soft.stereo)
-	  	request_irq(Q40_IRQ_SAMPLE, Q40StereoInterrupt, 0,
-		    "Q40 sound", Q40Interrupt);
+		error = request_irq(Q40_IRQ_SAMPLE, Q40StereoInterrupt, 0,
+				    "Q40 sound", Q40Interrupt);
 	  else
-	        request_irq(Q40_IRQ_SAMPLE, Q40MonoInterrupt, 0,
-		    "Q40 sound", Q40Interrupt);
+		error = request_irq(Q40_IRQ_SAMPLE, Q40MonoInterrupt, 0,
+				    "Q40 sound", Q40Interrupt);
+	if (error && printk_ratelimit())
+		pr_err("Couldn't register sound interrupt\n");
 
 	master_outb( speed, SAMPLE_RATE_REG);
 	master_outb( 1,SAMPLE_CLEAR_REG);
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index e00421c..960fd79 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -135,7 +135,6 @@
 	struct hda_beep *beep = codec->beep;
 	if (beep) {
 		cancel_work_sync(&beep->beep_work);
-		flush_scheduled_work();
 
 		input_unregister_device(beep->dev);
 		kfree(beep);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index e16cf63..b7bba7d 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -373,7 +373,7 @@
 	unsol->queue[wp] = res;
 	unsol->queue[wp + 1] = res_ex;
 
-	schedule_work(&unsol->work);
+	queue_work(bus->workq, &unsol->work);
 
 	return 0;
 }
@@ -437,15 +437,17 @@
 
 	if (!bus)
 		return 0;
-	if (bus->unsol) {
-		flush_scheduled_work();
+	if (bus->workq)
+		flush_workqueue(bus->workq);
+	if (bus->unsol)
 		kfree(bus->unsol);
-	}
 	list_for_each_entry_safe(codec, n, &bus->codec_list, list) {
 		snd_hda_codec_free(codec);
 	}
 	if (bus->ops.private_free)
 		bus->ops.private_free(bus);
+	if (bus->workq)
+		destroy_workqueue(bus->workq);
 	kfree(bus);
 	return 0;
 }
@@ -485,6 +487,7 @@
 {
 	struct hda_bus *bus;
 	int err;
+	char qname[8];
 	static struct snd_device_ops dev_ops = {
 		.dev_register = snd_hda_bus_dev_register,
 		.dev_free = snd_hda_bus_dev_free,
@@ -514,6 +517,14 @@
 	mutex_init(&bus->cmd_mutex);
 	INIT_LIST_HEAD(&bus->codec_list);
 
+	snprintf(qname, sizeof(qname), "hda%d", card->number);
+	bus->workq = create_workqueue(qname);
+	if (!bus->workq) {
+		snd_printk(KERN_ERR "cannot create workqueue %s\n", qname);
+		kfree(bus);
+		return -ENOMEM;
+	}
+
 	err = snd_device_new(card, SNDRV_DEV_BUS, bus, &dev_ops);
 	if (err < 0) {
 		snd_hda_bus_free(bus);
@@ -684,7 +695,7 @@
 		return;
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 	cancel_delayed_work(&codec->power_work);
-	flush_scheduled_work();
+	flush_workqueue(codec->bus->workq);
 #endif
 	list_del(&codec->list);
 	snd_array_free(&codec->mixers);
@@ -735,6 +746,7 @@
 	codec->bus = bus;
 	codec->addr = codec_addr;
 	mutex_init(&codec->spdif_mutex);
+	mutex_init(&codec->control_mutex);
 	init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info));
 	init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head));
 	snd_array_init(&codec->mixers, sizeof(struct snd_kcontrol *), 32);
@@ -1272,7 +1284,7 @@
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 	cancel_delayed_work(&codec->power_work);
-	flush_scheduled_work();
+	flush_workqueue(codec->bus->workq);
 #endif
 	snd_hda_ctls_clear(codec);
 	/* relase PCMs */
@@ -1418,12 +1430,12 @@
 	unsigned long pval;
 	int err;
 
-	mutex_lock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_lock(&codec->control_mutex);
 	pval = kcontrol->private_value;
 	kcontrol->private_value = pval & ~AMP_VAL_IDX_MASK; /* index 0 */
 	err = snd_hda_mixer_amp_switch_get(kcontrol, ucontrol);
 	kcontrol->private_value = pval;
-	mutex_unlock(&codec->spdif_mutex);
+	mutex_unlock(&codec->control_mutex);
 	return err;
 }
 EXPORT_SYMBOL_HDA(snd_hda_mixer_bind_switch_get);
@@ -1435,7 +1447,7 @@
 	unsigned long pval;
 	int i, indices, err = 0, change = 0;
 
-	mutex_lock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_lock(&codec->control_mutex);
 	pval = kcontrol->private_value;
 	indices = (pval & AMP_VAL_IDX_MASK) >> AMP_VAL_IDX_SHIFT;
 	for (i = 0; i < indices; i++) {
@@ -1447,7 +1459,7 @@
 		change |= err;
 	}
 	kcontrol->private_value = pval;
-	mutex_unlock(&codec->spdif_mutex);
+	mutex_unlock(&codec->control_mutex);
 	return err < 0 ? err : change;
 }
 EXPORT_SYMBOL_HDA(snd_hda_mixer_bind_switch_put);
@@ -1462,12 +1474,12 @@
 	struct hda_bind_ctls *c;
 	int err;
 
-	mutex_lock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_lock(&codec->control_mutex);
 	c = (struct hda_bind_ctls *)kcontrol->private_value;
 	kcontrol->private_value = *c->values;
 	err = c->ops->info(kcontrol, uinfo);
 	kcontrol->private_value = (long)c;
-	mutex_unlock(&codec->spdif_mutex);
+	mutex_unlock(&codec->control_mutex);
 	return err;
 }
 EXPORT_SYMBOL_HDA(snd_hda_mixer_bind_ctls_info);
@@ -1479,12 +1491,12 @@
 	struct hda_bind_ctls *c;
 	int err;
 
-	mutex_lock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_lock(&codec->control_mutex);
 	c = (struct hda_bind_ctls *)kcontrol->private_value;
 	kcontrol->private_value = *c->values;
 	err = c->ops->get(kcontrol, ucontrol);
 	kcontrol->private_value = (long)c;
-	mutex_unlock(&codec->spdif_mutex);
+	mutex_unlock(&codec->control_mutex);
 	return err;
 }
 EXPORT_SYMBOL_HDA(snd_hda_mixer_bind_ctls_get);
@@ -1497,7 +1509,7 @@
 	unsigned long *vals;
 	int err = 0, change = 0;
 
-	mutex_lock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_lock(&codec->control_mutex);
 	c = (struct hda_bind_ctls *)kcontrol->private_value;
 	for (vals = c->values; *vals; vals++) {
 		kcontrol->private_value = *vals;
@@ -1507,7 +1519,7 @@
 		change |= err;
 	}
 	kcontrol->private_value = (long)c;
-	mutex_unlock(&codec->spdif_mutex);
+	mutex_unlock(&codec->control_mutex);
 	return err < 0 ? err : change;
 }
 EXPORT_SYMBOL_HDA(snd_hda_mixer_bind_ctls_put);
@@ -1519,12 +1531,12 @@
 	struct hda_bind_ctls *c;
 	int err;
 
-	mutex_lock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_lock(&codec->control_mutex);
 	c = (struct hda_bind_ctls *)kcontrol->private_value;
 	kcontrol->private_value = *c->values;
 	err = c->ops->tlv(kcontrol, op_flag, size, tlv);
 	kcontrol->private_value = (long)c;
-	mutex_unlock(&codec->spdif_mutex);
+	mutex_unlock(&codec->control_mutex);
 	return err;
 }
 EXPORT_SYMBOL_HDA(snd_hda_mixer_bind_tlv);
@@ -2712,6 +2724,67 @@
 EXPORT_SYMBOL_HDA(snd_hda_check_board_config);
 
 /**
+ * snd_hda_check_board_codec_sid_config - compare the current codec
+				          subsystem ID with the
+					  config table
+
+	   This is important for Gateway notebooks with SB450 HDA Audio
+	   where the vendor ID of the PCI device is:
+		ATI Technologies Inc SB450 HDA Audio [1002:437b]
+	   and the vendor/subvendor are found only at the codec.
+
+ * @codec: the HDA codec
+ * @num_configs: number of config enums
+ * @models: array of model name strings
+ * @tbl: configuration table, terminated by null entries
+ *
+ * Compares the modelname or PCI subsystem id of the current codec with the
+ * given configuration table.  If a matching entry is found, returns its
+ * config value (supposed to be 0 or positive).
+ *
+ * If no entries are matching, the function returns a negative value.
+ */
+int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
+			       int num_configs, const char **models,
+			       const struct snd_pci_quirk *tbl)
+{
+	const struct snd_pci_quirk *q;
+
+	/* Search for codec ID */
+	for (q = tbl; q->subvendor; q++) {
+		unsigned long vendorid = (q->subdevice) | (q->subvendor << 16);
+
+		if (vendorid == codec->subsystem_id)
+			break;
+	}
+
+	if (!q->subvendor)
+		return -1;
+
+	tbl = q;
+
+	if (tbl->value >= 0 && tbl->value < num_configs) {
+#ifdef CONFIG_SND_DEBUG_DETECT
+		char tmp[10];
+		const char *model = NULL;
+		if (models)
+			model = models[tbl->value];
+		if (!model) {
+			sprintf(tmp, "#%d", tbl->value);
+			model = tmp;
+		}
+		snd_printdd(KERN_INFO "hda_codec: model '%s' is selected "
+			    "for config %x:%x (%s)\n",
+			    model, tbl->subvendor, tbl->subdevice,
+			    (tbl->name ? tbl->name : "Unknown device"));
+#endif
+		return tbl->value;
+	}
+	return -1;
+}
+EXPORT_SYMBOL_HDA(snd_hda_check_board_codec_sid_config);
+
+/**
  * snd_hda_add_new_ctls - create controls from the array
  * @codec: the HDA codec
  * @knew: the array of struct snd_kcontrol_new
@@ -2803,7 +2876,7 @@
 		return;
 	if (power_save(codec)) {
 		codec->power_transition = 1; /* avoid reentrance */
-		schedule_delayed_work(&codec->power_work,
+		queue_delayed_work(codec->bus->workq, &codec->power_work,
 				msecs_to_jiffies(power_save(codec) * 1000));
 	}
 }
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 729fc764..5810ef5 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -614,6 +614,7 @@
 
 	/* unsolicited event queue */
 	struct hda_bus_unsolicited *unsol;
+	struct workqueue_struct *workq;	/* common workqueue for codecs */
 
 	/* assigned PCMs */
 	DECLARE_BITMAP(pcm_dev_bits, SNDRV_PCM_DEVICES);
@@ -771,6 +772,7 @@
 	struct hda_cache_rec cmd_cache;	/* cache for other commands */
 
 	struct mutex spdif_mutex;
+	struct mutex control_mutex;
 	unsigned int spdif_status;	/* IEC958 status bits */
 	unsigned short spdif_ctls;	/* SPDIF control bits */
 	unsigned int spdif_in_enable;	/* SPDIF input enable? */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index f04de11..11e791b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -996,10 +996,11 @@
 				spin_unlock(&chip->reg_lock);
 				snd_pcm_period_elapsed(azx_dev->substream);
 				spin_lock(&chip->reg_lock);
-			} else {
+			} else if (chip->bus && chip->bus->workq) {
 				/* bogus IRQ, process it later */
 				azx_dev->irq_pending = 1;
-				schedule_work(&chip->irq_pending_work);
+				queue_work(chip->bus->workq,
+					   &chip->irq_pending_work);
 			}
 		}
 	}
@@ -1741,7 +1742,6 @@
 	for (i = 0; i < chip->num_streams; i++)
 		chip->azx_dev[i].irq_pending = 0;
 	spin_unlock_irq(&chip->reg_lock);
-	flush_scheduled_work();
 }
 
 static struct snd_pcm_ops azx_pcm_ops = {
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 6f2fe0f..1dd8716 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -296,6 +296,9 @@
 int snd_hda_check_board_config(struct hda_codec *codec, int num_configs,
 			       const char **modelnames,
 			       const struct snd_pci_quirk *pci_list);
+int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
+                               int num_configs, const char **models,
+                               const struct snd_pci_quirk *tbl);
 int snd_hda_add_new_ctls(struct hda_codec *codec,
 			 struct snd_kcontrol_new *knew);
 
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 26247cf..2e7371e 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -3900,6 +3900,7 @@
 
 static struct snd_pci_quirk ad1884a_cfg_tbl[] = {
 	SND_PCI_QUIRK(0x103c, 0x3030, "HP", AD1884A_MOBILE),
+	SND_PCI_QUIRK(0x103c, 0x3037, "HP 2230s", AD1884A_LAPTOP),
 	SND_PCI_QUIRK(0x103c, 0x3056, "HP", AD1884A_MOBILE),
 	SND_PCI_QUIRK(0x103c, 0x30e6, "HP 6730b", AD1884A_LAPTOP),
 	SND_PCI_QUIRK(0x103c, 0x30e7, "HP EliteBook 8530p", AD1884A_LAPTOP),
@@ -4262,13 +4263,13 @@
 	spec->num_adc_nids = ARRAY_SIZE(ad1882_adc_nids);
 	spec->adc_nids = ad1882_adc_nids;
 	spec->capsrc_nids = ad1882_capsrc_nids;
-	if (codec->vendor_id == 0x11d1882)
+	if (codec->vendor_id == 0x11d41882)
 		spec->input_mux = &ad1882_capture_source;
 	else
 		spec->input_mux = &ad1882a_capture_source;
 	spec->num_mixers = 2;
 	spec->mixers[0] = ad1882_base_mixers;
-	if (codec->vendor_id == 0x11d1882)
+	if (codec->vendor_id == 0x11d41882)
 		spec->mixers[1] = ad1882_loopback_mixers;
 	else
 		spec->mixers[1] = ad1882a_loopback_mixers;
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index 0270fda..d57d813 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -160,14 +160,18 @@
  */
 static struct hda_codec_preset snd_hda_preset_nvhdmi[] = {
 	{ .id = 0x10de0002, .name = "MCP78 HDMI", .patch = patch_nvhdmi },
+	{ .id = 0x10de0006, .name = "MCP78 HDMI", .patch = patch_nvhdmi },
 	{ .id = 0x10de0007, .name = "MCP7A HDMI", .patch = patch_nvhdmi },
 	{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi },
+	{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi },
 	{} /* terminator */
 };
 
 MODULE_ALIAS("snd-hda-codec-id:10de0002");
+MODULE_ALIAS("snd-hda-codec-id:10de0006");
 MODULE_ALIAS("snd-hda-codec-id:10de0007");
 MODULE_ALIAS("snd-hda-codec-id:10de0067");
+MODULE_ALIAS("snd-hda-codec-id:10de8001");
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Nvidia HDMI HD-audio codec");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9065ebf..82dd084 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1502,11 +1502,11 @@
 	struct alc_spec *spec = codec->spec;
 	int err;
 
-	mutex_lock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_lock(&codec->control_mutex);
 	kcontrol->private_value = HDA_COMPOSE_AMP_VAL(spec->adc_nids[0], 3, 0,
 						      HDA_INPUT);
 	err = snd_hda_mixer_amp_volume_info(kcontrol, uinfo);
-	mutex_unlock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_unlock(&codec->control_mutex);
 	return err;
 }
 
@@ -1517,11 +1517,11 @@
 	struct alc_spec *spec = codec->spec;
 	int err;
 
-	mutex_lock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_lock(&codec->control_mutex);
 	kcontrol->private_value = HDA_COMPOSE_AMP_VAL(spec->adc_nids[0], 3, 0,
 						      HDA_INPUT);
 	err = snd_hda_mixer_amp_tlv(kcontrol, op_flag, size, tlv);
-	mutex_unlock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_unlock(&codec->control_mutex);
 	return err;
 }
 
@@ -1537,11 +1537,11 @@
 	unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
 	int err;
 
-	mutex_lock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_lock(&codec->control_mutex);
 	kcontrol->private_value = HDA_COMPOSE_AMP_VAL(spec->adc_nids[adc_idx],
 						      3, 0, HDA_INPUT);
 	err = func(kcontrol, ucontrol);
-	mutex_unlock(&codec->spdif_mutex); /* reuse spdif_mutex */
+	mutex_unlock(&codec->control_mutex);
 	return err;
 }
 
@@ -8461,6 +8461,10 @@
 	SND_PCI_QUIRK(0x1025, 0x0121, "Acer Aspire 5920G", ALC883_ACER_ASPIRE),
 	SND_PCI_QUIRK(0x1025, 0x013e, "Acer Aspire 4930G",
 		ALC888_ACER_ASPIRE_4930G),
+	SND_PCI_QUIRK(0x1025, 0x013f, "Acer Aspire 5930G",
+		ALC888_ACER_ASPIRE_4930G),
+	SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
+		ALC888_ACER_ASPIRE_4930G),
 	SND_PCI_QUIRK(0x1025, 0, "Acer laptop", ALC883_ACER), /* default Acer */
 	SND_PCI_QUIRK(0x1028, 0x020d, "Dell Inspiron 530", ALC888_6ST_DELL),
 	SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavillion", ALC883_6ST_DIG),
@@ -8522,6 +8526,7 @@
 	SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66),
 	SND_PCI_QUIRK(0x8086, 0x0001, "DG33BUC", ALC883_3ST_6ch_INTEL),
 	SND_PCI_QUIRK(0x8086, 0x0002, "DG33FBC", ALC883_3ST_6ch_INTEL),
+	SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC883_3ST_6ch_INTEL),
 	SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
 	{}
 };
@@ -10568,6 +10573,7 @@
 	SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FUJITSU),
 	SND_PCI_QUIRK(0x144d, 0xc032, "Samsung Q1 Ultra", ALC262_ULTRA),
 	SND_PCI_QUIRK(0x144d, 0xc039, "Samsung Q1U EL", ALC262_ULTRA),
+	SND_PCI_QUIRK(0x144d, 0xc510, "Samsung Q45", ALC262_HIPPO),
 	SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000 y410", ALC262_LENOVO_3000),
 	SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_BENQ_ED8),
 	SND_PCI_QUIRK(0x17ff, 0x058d, "Benq T31-16", ALC262_BENQ_T31),
@@ -11689,6 +11695,7 @@
 	SND_PCI_QUIRK(0x1025, 0x015b, "Acer Aspire One",
 						ALC268_ACER_ASPIRE_ONE),
 	SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
+	SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron Mini9", ALC268_DELL),
 	SND_PCI_QUIRK(0x103c, 0x30cc, "TOSHIBA", ALC268_TOSHIBA),
 	SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
 	SND_PCI_QUIRK(0x1179, 0xff10, "TOSHIBA A205", ALC268_TOSHIBA),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 35b83dc..c39deeb 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -55,7 +55,8 @@
 	STAC_9200_DELL_M25,
 	STAC_9200_DELL_M26,
 	STAC_9200_DELL_M27,
-	STAC_9200_GATEWAY,
+	STAC_9200_M4,
+	STAC_9200_M4_2,
 	STAC_9200_PANASONIC,
 	STAC_9200_MODELS
 };
@@ -89,14 +90,19 @@
 	STAC_DELL_M4_2,
 	STAC_DELL_M4_3,
 	STAC_HP_M4,
+	STAC_HP_DV5,
 	STAC_92HD71BXX_MODELS
 };
 
 enum {
 	STAC_925x_REF,
+	STAC_M1,
+	STAC_M1_2,
+	STAC_M2,
 	STAC_M2_2,
-	STAC_MA6,
-	STAC_PA6,
+	STAC_M3,
+	STAC_M5,
+	STAC_M6,
 	STAC_925x_MODELS
 };
 
@@ -331,6 +337,10 @@
 	0x03, 0x0c, 0x10, 0x40,
 };
 
+static hda_nid_t stac92hd83xxx_amp_nids[1] = {
+	0xc,
+};
+
 static hda_nid_t stac92hd71bxx_pwr_nids[3] = {
 	0x0a, 0x0d, 0x0f
 };
@@ -875,6 +885,8 @@
 static struct hda_verb stac925x_core_init[] = {
 	/* set dac0mux for dac converter */
 	{ 0x06, AC_VERB_SET_CONNECT_SEL, 0x00},
+	/* unmute and set max the selector */
+	{ 0x0e, AC_VERB_SET_AMP_GAIN_MUTE, 0xb01f },
 	{}
 };
 
@@ -1334,7 +1346,16 @@
 	0x02a19020, 0x01a19021, 0x90100140, 0x01813122,
 };
 
-/* 
+static unsigned int gateway9200_m4_pin_configs[8] = {
+	0x400000fe, 0x404500f4, 0x400100f0, 0x90110010,
+	0x400100f1, 0x02a1902e, 0x500000f2, 0x500000f3,
+};
+static unsigned int gateway9200_m4_2_pin_configs[8] = {
+	0x400000fe, 0x404500f4, 0x400100f0, 0x90110010,
+	0x400100f1, 0x02a1902e, 0x500000f2, 0x500000f3,
+};
+
+/*
     STAC 9200 pin configs for
     102801A8
     102801DE
@@ -1464,6 +1485,8 @@
 	[STAC_9200_DELL_M25] = dell9200_m25_pin_configs,
 	[STAC_9200_DELL_M26] = dell9200_m26_pin_configs,
 	[STAC_9200_DELL_M27] = dell9200_m27_pin_configs,
+	[STAC_9200_M4] = gateway9200_m4_pin_configs,
+	[STAC_9200_M4_2] = gateway9200_m4_2_pin_configs,
 	[STAC_9200_PANASONIC] = ref9200_pin_configs,
 };
 
@@ -1480,7 +1503,8 @@
 	[STAC_9200_DELL_M25] = "dell-m25",
 	[STAC_9200_DELL_M26] = "dell-m26",
 	[STAC_9200_DELL_M27] = "dell-m27",
-	[STAC_9200_GATEWAY] = "gateway",
+	[STAC_9200_M4] = "gateway-m4",
+	[STAC_9200_M4_2] = "gateway-m4-2",
 	[STAC_9200_PANASONIC] = "panasonic",
 };
 
@@ -1550,11 +1574,9 @@
 	/* Panasonic */
 	SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC),
 	/* Gateway machines needs EAPD to be set on resume */
-	SND_PCI_QUIRK(0x107b, 0x0205, "Gateway S-7110M", STAC_9200_GATEWAY),
-	SND_PCI_QUIRK(0x107b, 0x0317, "Gateway MT3423, MX341*",
-		      STAC_9200_GATEWAY),
-	SND_PCI_QUIRK(0x107b, 0x0318, "Gateway ML3019, MT3707",
-		      STAC_9200_GATEWAY),
+	SND_PCI_QUIRK(0x107b, 0x0205, "Gateway S-7110M", STAC_9200_M4),
+	SND_PCI_QUIRK(0x107b, 0x0317, "Gateway MT3423, MX341*", STAC_9200_M4_2),
+	SND_PCI_QUIRK(0x107b, 0x0318, "Gateway ML3019, MT3707", STAC_9200_M4_2),
 	/* OQO Mobile */
 	SND_PCI_QUIRK(0x1106, 0x3288, "OQO Model 2", STAC_9200_OQO),
 	{} /* terminator */
@@ -1565,44 +1587,85 @@
 	0x90a70320, 0x02214210, 0x01019020, 0x9033032e,
 };
 
-static unsigned int stac925x_MA6_pin_configs[8] = {
-	0x40c003f0, 0x424503f2, 0x01813022, 0x02a19021,
-	0x90a70320, 0x90100211, 0x400003f1, 0x9033032e,
+static unsigned int stac925xM1_pin_configs[8] = {
+	0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020,
+	0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e,
 };
 
-static unsigned int stac925x_PA6_pin_configs[8] = {
-	0x40c003f0, 0x424503f2, 0x01813022, 0x02a19021,
-	0x50a103f0, 0x90100211, 0x400003f1, 0x9033032e,
+static unsigned int stac925xM1_2_pin_configs[8] = {
+	0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020,
+	0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e,
+};
+
+static unsigned int stac925xM2_pin_configs[8] = {
+	0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020,
+	0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e,
 };
 
 static unsigned int stac925xM2_2_pin_configs[8] = {
-	0x40c003f3, 0x424503f2, 0x04180011, 0x02a19020,
-	0x50a103f0, 0x90100212, 0x400003f1, 0x9033032e,
+	0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020,
+	0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e,
+};
+
+static unsigned int stac925xM3_pin_configs[8] = {
+	0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020,
+	0x40a000f0, 0x90100210, 0x400003f1, 0x503303f3,
+};
+
+static unsigned int stac925xM5_pin_configs[8] = {
+	0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020,
+	0x40a000f0, 0x90100210, 0x400003f1, 0x9033032e,
+};
+
+static unsigned int stac925xM6_pin_configs[8] = {
+	0x40c003f4, 0x424503f2, 0x400000f3, 0x02a19020,
+	0x40a000f0, 0x90100210, 0x400003f1, 0x90330320,
 };
 
 static unsigned int *stac925x_brd_tbl[STAC_925x_MODELS] = {
 	[STAC_REF] = ref925x_pin_configs,
+	[STAC_M1] = stac925xM1_pin_configs,
+	[STAC_M1_2] = stac925xM1_2_pin_configs,
+	[STAC_M2] = stac925xM2_pin_configs,
 	[STAC_M2_2] = stac925xM2_2_pin_configs,
-	[STAC_MA6] = stac925x_MA6_pin_configs,
-	[STAC_PA6] = stac925x_PA6_pin_configs,
+	[STAC_M3] = stac925xM3_pin_configs,
+	[STAC_M5] = stac925xM5_pin_configs,
+	[STAC_M6] = stac925xM6_pin_configs,
 };
 
 static const char *stac925x_models[STAC_925x_MODELS] = {
 	[STAC_REF] = "ref",
+	[STAC_M1] = "m1",
+	[STAC_M1_2] = "m1-2",
+	[STAC_M2] = "m2",
 	[STAC_M2_2] = "m2-2",
-	[STAC_MA6] = "m6",
-	[STAC_PA6] = "pa6",
+	[STAC_M3] = "m3",
+	[STAC_M5] = "m5",
+	[STAC_M6] = "m6",
+};
+
+static struct snd_pci_quirk stac925x_codec_id_cfg_tbl[] = {
+	SND_PCI_QUIRK(0x107b, 0x0316, "Gateway M255", STAC_M2),
+	SND_PCI_QUIRK(0x107b, 0x0366, "Gateway MP6954", STAC_M5),
+	SND_PCI_QUIRK(0x107b, 0x0461, "Gateway NX560XL", STAC_M1),
+	SND_PCI_QUIRK(0x107b, 0x0681, "Gateway NX860", STAC_M2),
+	SND_PCI_QUIRK(0x107b, 0x0367, "Gateway MX6453", STAC_M1_2),
+	/* Not sure about the brand name for those */
+	SND_PCI_QUIRK(0x107b, 0x0281, "Gateway mobile", STAC_M1),
+	SND_PCI_QUIRK(0x107b, 0x0507, "Gateway mobile", STAC_M3),
+	SND_PCI_QUIRK(0x107b, 0x0281, "Gateway mobile", STAC_M6),
+	SND_PCI_QUIRK(0x107b, 0x0685, "Gateway mobile", STAC_M2_2),
+	{} /* terminator */
 };
 
 static struct snd_pci_quirk stac925x_cfg_tbl[] = {
 	/* SigmaTel reference board */
 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668, "DFI LanParty", STAC_REF),
 	SND_PCI_QUIRK(0x8384, 0x7632, "Stac9202 Reference Board", STAC_REF),
-	SND_PCI_QUIRK(0x107b, 0x0316, "Gateway M255", STAC_REF),
-	SND_PCI_QUIRK(0x107b, 0x0366, "Gateway MP6954", STAC_REF),
-	SND_PCI_QUIRK(0x107b, 0x0461, "Gateway NX560XL", STAC_MA6),
-	SND_PCI_QUIRK(0x107b, 0x0681, "Gateway NX860", STAC_PA6),
-	SND_PCI_QUIRK(0x1002, 0x437b, "Gateway MX6453", STAC_M2_2),
+
+	/* Default table for unknown ID */
+	SND_PCI_QUIRK(0x1002, 0x437b, "Gateway mobile", STAC_M2_2),
+
 	{} /* terminator */
 };
 
@@ -1682,7 +1745,7 @@
 static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
 	/* SigmaTel reference board */
 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x2668,
-		      "DFI LanParty", STAC_92HD71BXX_REF),
+		      "DFI LanParty", STAC_92HD83XXX_REF),
 	{} /* terminator */
 };
 
@@ -1716,6 +1779,7 @@
 	[STAC_DELL_M4_2]	= dell_m4_2_pin_configs,
 	[STAC_DELL_M4_3]	= dell_m4_3_pin_configs,
 	[STAC_HP_M4]		= NULL,
+	[STAC_HP_DV5]		= NULL,
 };
 
 static const char *stac92hd71bxx_models[STAC_92HD71BXX_MODELS] = {
@@ -1724,6 +1788,7 @@
 	[STAC_DELL_M4_2] = "dell-m4-2",
 	[STAC_DELL_M4_3] = "dell-m4-3",
 	[STAC_HP_M4] = "hp-m4",
+	[STAC_HP_DV5] = "hp-dv5",
 };
 
 static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
@@ -1736,6 +1801,8 @@
 		      "HP dv7", STAC_HP_M4),
 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x30fc,
 		      "HP dv7", STAC_HP_M4),
+	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3603,
+		      "HP dv5", STAC_HP_DV5),
 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361a,
 				"unknown HP", STAC_HP_M4),
 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
@@ -4163,8 +4230,19 @@
 			continue;
 		if (presence)
 			stac92xx_set_pinctl(codec, cfg->hp_pins[i], val);
+#if 0 /* FIXME */
+/* Resetting the pinctl like below may lead to (a sort of) regressions
+ * on some devices since they use the HP pin actually for line/speaker
+ * outs although the default pin config shows a different pin (that is
+ * wrong and useless).
+ *
+ * So, it's basically a problem of default pin configs, likely a BIOS issue.
+ * But, disabling the code below just works around it, and I'm too tired of
+ * bug reports with such devices... 
+ */
 		else
 			stac92xx_reset_pinctl(codec, cfg->hp_pins[i], val);
+#endif /* FIXME */
 	}
 } 
 
@@ -4390,7 +4468,8 @@
 	spec->num_adcs = 1;
 	spec->num_pwrs = 0;
 
-	if (spec->board_config == STAC_9200_GATEWAY ||
+	if (spec->board_config == STAC_9200_M4 ||
+	    spec->board_config == STAC_9200_M4_2 ||
 	    spec->board_config == STAC_9200_OQO)
 		spec->init = stac9200_eapd_init;
 	else
@@ -4408,6 +4487,12 @@
 		return err;
 	}
 
+	/* CF-74 has no headphone detection, and the driver should *NOT*
+	 * do detection and HP/speaker toggle because the hardware does it.
+	 */
+	if (spec->board_config == STAC_9200_PANASONIC)
+		spec->hp_detect = 0;
+
 	codec->patch_ops = stac92xx_patch_ops;
 
 	return 0;
@@ -4425,12 +4510,22 @@
 	codec->spec = spec;
 	spec->num_pins = ARRAY_SIZE(stac925x_pin_nids);
 	spec->pin_nids = stac925x_pin_nids;
-	spec->board_config = snd_hda_check_board_config(codec, STAC_925x_MODELS,
+
+	/* Check first for codec ID */
+	spec->board_config = snd_hda_check_board_codec_sid_config(codec,
+							STAC_925x_MODELS,
+							stac925x_models,
+							stac925x_codec_id_cfg_tbl);
+
+	/* Now checks for PCI ID, if codec ID is not found */
+	if (spec->board_config < 0)
+		spec->board_config = snd_hda_check_board_config(codec,
+							STAC_925x_MODELS,
 							stac925x_models,
 							stac925x_cfg_tbl);
  again:
 	if (spec->board_config < 0) {
-		snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC925x," 
+		snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC925x,"
 				      "using BIOS defaults\n");
 		err = stac92xx_save_bios_config_regs(codec);
 	} else
@@ -4672,6 +4767,7 @@
 	spec->dmux_nids = stac92hd83xxx_dmux_nids;
 	spec->adc_nids = stac92hd83xxx_adc_nids;
 	spec->pwr_nids = stac92hd83xxx_pwr_nids;
+	spec->amp_nids = stac92hd83xxx_amp_nids;
 	spec->pwr_mapping = stac92hd83xxx_pwr_mapping;
 	spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids);
 	spec->multiout.dac_nids = spec->dac_nids;
@@ -4689,6 +4785,7 @@
 	spec->num_pins = ARRAY_SIZE(stac92hd83xxx_pin_nids);
 	spec->num_dmuxes = ARRAY_SIZE(stac92hd83xxx_dmux_nids);
 	spec->num_adcs = ARRAY_SIZE(stac92hd83xxx_adc_nids);
+	spec->num_amps = ARRAY_SIZE(stac92hd83xxx_amp_nids);
 	spec->num_dmics = STAC92HD83XXX_NUM_DMICS;
 	spec->dinput_mux = &stac92hd83xxx_dmux;
 	spec->pin_nids = stac92hd83xxx_pin_nids;
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
index 98c6a8c..e9e829e 100644
--- a/sound/pci/oxygen/virtuoso.c
+++ b/sound/pci/oxygen/virtuoso.c
@@ -26,7 +26,7 @@
  * SPI 0 -> 1st PCM1796 (front)
  * SPI 1 -> 2nd PCM1796 (surround)
  * SPI 2 -> 3rd PCM1796 (center/LFE)
- * SPI 4 -> 4th PCM1796 (back)
+ * SPI 4 -> 4th PCM1796 (back) and EEPROM self-destruct (do not use!)
  *
  * GPIO 2 -> M0 of CS5381
  * GPIO 3 -> M1 of CS5381
@@ -207,6 +207,12 @@
 static inline void pcm1796_write_spi(struct oxygen *chip, unsigned int codec,
 				     u8 reg, u8 value)
 {
+	/*
+	 * We don't want to do writes on SPI 4 because the EEPROM, which shares
+	 * the same pin, might get confused and broken.  We'd better take care
+	 * that the driver works with the default register values ...
+	 */
+#if 0
 	/* maps ALSA channel pair number to SPI output */
 	static const u8 codec_map[4] = {
 		0, 1, 2, 4
@@ -217,6 +223,7 @@
 			 (codec_map[codec] << OXYGEN_SPI_CODEC_SHIFT) |
 			 OXYGEN_SPI_CEN_LATCH_CLOCK_HI,
 			 (reg << 8) | value);
+#endif
 }
 
 static inline void pcm1796_write_i2c(struct oxygen *chip, unsigned int codec,
@@ -750,6 +757,9 @@
 
 static int xonar_d2_control_filter(struct snd_kcontrol_new *template)
 {
+	if (!strncmp(template->name, "Master Playback ", 16))
+		/* disable volume/mute because they would require SPI writes */
+		return 1;
 	if (!strncmp(template->name, "CD Capture ", 11))
 		/* CD in is actually connected to the video in pin */
 		template->private_value ^= AC97_CD ^ AC97_VIDEO;
@@ -840,9 +850,8 @@
 	.dac_volume_min = 0x0f,
 	.dac_volume_max = 0xff,
 	.misc_flags = OXYGEN_MISC_MIDI,
-	.function_flags = OXYGEN_FUNCTION_SPI |
-			  OXYGEN_FUNCTION_ENABLE_SPI_4_5,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.function_flags = OXYGEN_FUNCTION_SPI,
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_I2S,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
 
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 8f9e385..ff32111 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -477,7 +477,7 @@
 		card->dma_start_bus_addr[SND_PS3_CH_R] =
 			runtime->dma_addr + (runtime->dma_bytes / 2);
 
-		pr_debug("%s: vaddr=%p bus=%#lx\n", __func__,
+		pr_debug("%s: vaddr=%p bus=%#llx\n", __func__,
 			 card->dma_start_vaddr[SND_PS3_CH_L],
 			 card->dma_start_bus_addr[SND_PS3_CH_L]);
 
@@ -1030,7 +1030,7 @@
 		pr_info("%s: nullbuffer alloc failed\n", __func__);
 		goto clean_preallocate;
 	}
-	pr_debug("%s: null vaddr=%p dma=%#lx\n", __func__,
+	pr_debug("%s: null vaddr=%p dma=%#llx\n", __func__,
 		 the_card.null_buffer_start_vaddr,
 		 the_card.null_buffer_start_dma_addr);
 	/* set default sample rate/word width */
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index 74c823d..bc8d654 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -187,7 +187,7 @@
 					au1x_pcm_dmatx_cb, (void *)pcd);
 
 	if (!pcd->ddma_chan)
-		return -ENOMEM;;
+		return -ENOMEM;
 
 	au1xxx_dbdma_set_devwidth(pcd->ddma_chan, msbits);
 	au1xxx_dbdma_ring_alloc(pcd->ddma_chan, 2);
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index fd0f338..ea370a4 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -197,7 +197,7 @@
 static const unsigned int twl4030_earpiece_values[] =
 		{0x0, 0x1, 0x2, 0x4};
 
-static const struct soc_value_enum twl4030_earpiece_enum =
+static const struct soc_enum twl4030_earpiece_enum =
 	SOC_VALUE_ENUM_SINGLE(TWL4030_REG_EAR_CTL, 1, 0x7,
 			ARRAY_SIZE(twl4030_earpiece_texts),
 			twl4030_earpiece_texts,
@@ -213,7 +213,7 @@
 static const unsigned int twl4030_predrivel_values[] =
 		{0x0, 0x1, 0x2, 0x4};
 
-static const struct soc_value_enum twl4030_predrivel_enum =
+static const struct soc_enum twl4030_predrivel_enum =
 	SOC_VALUE_ENUM_SINGLE(TWL4030_REG_PREDL_CTL, 1, 0x7,
 			ARRAY_SIZE(twl4030_predrivel_texts),
 			twl4030_predrivel_texts,
@@ -229,7 +229,7 @@
 static const unsigned int twl4030_predriver_values[] =
 		{0x0, 0x1, 0x2, 0x4};
 
-static const struct soc_value_enum twl4030_predriver_enum =
+static const struct soc_enum twl4030_predriver_enum =
 	SOC_VALUE_ENUM_SINGLE(TWL4030_REG_PREDR_CTL, 1, 0x7,
 			ARRAY_SIZE(twl4030_predriver_texts),
 			twl4030_predriver_texts,
@@ -317,7 +317,7 @@
 static const unsigned int twl4030_analoglmic_values[] =
 		{0x0, 0x1, 0x2, 0x4, 0x8};
 
-static const struct soc_value_enum twl4030_analoglmic_enum =
+static const struct soc_enum twl4030_analoglmic_enum =
 	SOC_VALUE_ENUM_SINGLE(TWL4030_REG_ANAMICL, 0, 0xf,
 			ARRAY_SIZE(twl4030_analoglmic_texts),
 			twl4030_analoglmic_texts,
@@ -333,7 +333,7 @@
 static const unsigned int twl4030_analogrmic_values[] =
 		{0x0, 0x1, 0x4};
 
-static const struct soc_value_enum twl4030_analogrmic_enum =
+static const struct soc_enum twl4030_analogrmic_enum =
 	SOC_VALUE_ENUM_SINGLE(TWL4030_REG_ANAMICR, 0, 0x5,
 			ARRAY_SIZE(twl4030_analogrmic_texts),
 			twl4030_analogrmic_texts,
@@ -1280,6 +1280,8 @@
 	struct snd_soc_codec *codec = socdev->codec;
 
 	printk(KERN_INFO "TWL4030 Audio Codec remove\n");
+	snd_soc_free_pcms(socdev);
+	snd_soc_dapm_free(socdev);
 	kfree(codec);
 
 	return 0;
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index 74abc9b4..366049d 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -212,7 +212,7 @@
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 		count = src - runtime->dma_addr;
 	else
-		count = dst - runtime->dma_addr;;
+		count = dst - runtime->dma_addr;
 
 	spin_unlock(&prtd->lock);
 
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 6cbe7e8..55fdb4a 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1585,37 +1585,6 @@
 EXPORT_SYMBOL_GPL(snd_soc_put_enum_double);
 
 /**
- * snd_soc_info_value_enum_double - semi enumerated double mixer info callback
- * @kcontrol: mixer control
- * @uinfo: control element information
- *
- * Callback to provide information about a double semi enumerated
- * mixer control.
- *
- * Semi enumerated mixer: the enumerated items are referred as values. Can be
- * used for handling bitfield coded enumeration for example.
- *
- * Returns 0 for success.
- */
-int snd_soc_info_value_enum_double(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_info *uinfo)
-{
-	struct soc_value_enum *e = (struct soc_value_enum *)
-			kcontrol->private_value;
-
-	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	uinfo->count = e->shift_l == e->shift_r ? 1 : 2;
-	uinfo->value.enumerated.items = e->max;
-
-	if (uinfo->value.enumerated.item > e->max - 1)
-		uinfo->value.enumerated.item = e->max - 1;
-	strcpy(uinfo->value.enumerated.name,
-		e->texts[uinfo->value.enumerated.item]);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_info_value_enum_double);
-
-/**
  * snd_soc_get_value_enum_double - semi enumerated double mixer get callback
  * @kcontrol: mixer control
  * @ucontrol: control element information
@@ -1631,8 +1600,7 @@
 	struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct soc_value_enum *e = (struct soc_value_enum *)
-			kcontrol->private_value;
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	unsigned short reg_val, val, mux;
 
 	reg_val = snd_soc_read(codec, e->reg);
@@ -1671,8 +1639,7 @@
 	struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct soc_value_enum *e = (struct soc_value_enum *)
-			kcontrol->private_value;
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	unsigned short val;
 	unsigned short mask;
 
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index ad0d801..a2f1da8 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -137,7 +137,7 @@
 	}
 	break;
 	case snd_soc_dapm_value_mux: {
-		struct soc_value_enum *e = (struct soc_value_enum *)
+		struct soc_enum *e = (struct soc_enum *)
 			w->kcontrols[i].private_value;
 		int val, item;
 
@@ -200,30 +200,6 @@
 	return -ENODEV;
 }
 
-/* connect value_mux widget to it's interconnecting audio paths */
-static int dapm_connect_value_mux(struct snd_soc_codec *codec,
-	struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
-	struct snd_soc_dapm_path *path, const char *control_name,
-	const struct snd_kcontrol_new *kcontrol)
-{
-	struct soc_value_enum *e = (struct soc_value_enum *)
-			kcontrol->private_value;
-	int i;
-
-	for (i = 0; i < e->max; i++) {
-		if (!(strcmp(control_name, e->texts[i]))) {
-			list_add(&path->list, &codec->dapm_paths);
-			list_add(&path->list_sink, &dest->sources);
-			list_add(&path->list_source, &src->sinks);
-			path->name = (char *)e->texts[i];
-			dapm_set_path_status(dest, path, 0);
-			return 0;
-		}
-	}
-
-	return -ENODEV;
-}
-
 /* connect mixer widget to it's interconnecting audio paths */
 static int dapm_connect_mixer(struct snd_soc_codec *codec,
 	struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
@@ -744,7 +720,8 @@
 	struct snd_soc_dapm_path *path;
 	int found = 0;
 
-	if (widget->id != snd_soc_dapm_mux)
+	if (widget->id != snd_soc_dapm_mux &&
+	    widget->id != snd_soc_dapm_value_mux)
 		return -ENODEV;
 
 	if (!snd_soc_test_bits(widget->codec, e->reg, mask, val))
@@ -774,45 +751,6 @@
 	return 0;
 }
 
-/* test and update the power status of a value_mux widget */
-static int dapm_value_mux_update_power(struct snd_soc_dapm_widget *widget,
-				 struct snd_kcontrol *kcontrol, int mask,
-				 int mux, int val, struct soc_value_enum *e)
-{
-	struct snd_soc_dapm_path *path;
-	int found = 0;
-
-	if (widget->id != snd_soc_dapm_value_mux)
-		return -ENODEV;
-
-	if (!snd_soc_test_bits(widget->codec, e->reg, mask, val))
-		return 0;
-
-	/* find dapm widget path assoc with kcontrol */
-	list_for_each_entry(path, &widget->codec->dapm_paths, list) {
-		if (path->kcontrol != kcontrol)
-			continue;
-
-		if (!path->name || !e->texts[mux])
-			continue;
-
-		found = 1;
-		/* we now need to match the string in the enum to the path */
-		if (!(strcmp(path->name, e->texts[mux])))
-			path->connect = 1; /* new connection */
-		else
-			path->connect = 0; /* old connection must be
-					      powered down */
-	}
-
-	if (found) {
-		dapm_power_widgets(widget->codec, SND_SOC_DAPM_STREAM_NOP);
-		dump_dapm(widget->codec, "mux power update");
-	}
-
-	return 0;
-}
-
 /* test and update the power status of a mixer or switch widget */
 static int dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
 				   struct snd_kcontrol *kcontrol, int reg,
@@ -1045,17 +983,12 @@
 		path->connect = 1;
 		return 0;
 	case snd_soc_dapm_mux:
+	case snd_soc_dapm_value_mux:
 		ret = dapm_connect_mux(codec, wsource, wsink, path, control,
 			&wsink->kcontrols[0]);
 		if (ret != 0)
 			goto err;
 		break;
-	case snd_soc_dapm_value_mux:
-		ret = dapm_connect_value_mux(codec, wsource, wsink, path,
-			control, &wsink->kcontrols[0]);
-		if (ret != 0)
-			goto err;
-		break;
 	case snd_soc_dapm_switch:
 	case snd_soc_dapm_mixer:
 		ret = dapm_connect_mixer(codec, wsource, wsink, path, control);
@@ -1382,8 +1315,7 @@
 	struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
-	struct soc_value_enum *e = (struct soc_value_enum *)
-			kcontrol->private_value;
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	unsigned short reg_val, val, mux;
 
 	reg_val = snd_soc_read(widget->codec, e->reg);
@@ -1423,8 +1355,7 @@
 	struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
-	struct soc_value_enum *e = (struct soc_value_enum *)
-			kcontrol->private_value;
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	unsigned short val, mux;
 	unsigned short mask;
 	int ret = 0;
@@ -1443,7 +1374,7 @@
 
 	mutex_lock(&widget->codec->mutex);
 	widget->value = val;
-	dapm_value_mux_update_power(widget, kcontrol, mask, mux, val, e);
+	dapm_mux_update_power(widget, kcontrol, mask, mux, val, e);
 	if (widget->event) {
 		if (widget->event_flags & SND_SOC_DAPM_PRE_REG) {
 			ret = widget->event(widget,
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index d44bf98..41c3875 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -2057,7 +2057,7 @@
 	if (err)
 		return err;
 
-	sprintf(card->longname, "%s at 0x%lx, irq %d",
+	sprintf(card->longname, "%s at 0x%llx, irq %d",
 		card->shortname,
 		op->resource[0].start,
 		op->irqs[0]);
diff --git a/sound/usb/caiaq/caiaq-device.c b/sound/usb/caiaq/caiaq-device.c
index a62500e..41c36b0 100644
--- a/sound/usb/caiaq/caiaq-device.c
+++ b/sound/usb/caiaq/caiaq-device.c
@@ -42,7 +42,7 @@
 #endif
 
 MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
-MODULE_DESCRIPTION("caiaq USB audio, version 1.3.9");
+MODULE_DESCRIPTION("caiaq USB audio, version 1.3.10");
 MODULE_LICENSE("GPL");
 MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2},"
 			 "{Native Instruments, RigKontrol3},"
diff --git a/sound/usb/caiaq/caiaq-device.h b/sound/usb/caiaq/caiaq-device.h
index f9fbdbae..ab56e73 100644
--- a/sound/usb/caiaq/caiaq-device.h
+++ b/sound/usb/caiaq/caiaq-device.h
@@ -75,6 +75,7 @@
 	wait_queue_head_t ep1_wait_queue;
 	wait_queue_head_t prepare_wait_queue;
 	int spec_received, audio_parm_answer;
+	int midi_out_active;
 
 	char vendor_name[CAIAQ_USB_STR_LEN];
 	char product_name[CAIAQ_USB_STR_LEN];
diff --git a/sound/usb/caiaq/caiaq-midi.c b/sound/usb/caiaq/caiaq-midi.c
index 30b57f9..f19fd36 100644
--- a/sound/usb/caiaq/caiaq-midi.c
+++ b/sound/usb/caiaq/caiaq-midi.c
@@ -59,6 +59,11 @@
 
 static int snd_usb_caiaq_midi_output_close(struct snd_rawmidi_substream *substream)
 {
+	struct snd_usb_caiaqdev *dev = substream->rmidi->private_data;
+	if (dev->midi_out_active) {
+		usb_kill_urb(&dev->midi_out_urb);
+		dev->midi_out_active = 0;
+	}
 	return 0;
 }
 
@@ -69,7 +74,8 @@
 	
 	dev->midi_out_buf[0] = EP1_CMD_MIDI_WRITE;
 	dev->midi_out_buf[1] = 0; /* port */
-	len = snd_rawmidi_transmit_peek(substream, dev->midi_out_buf+3, EP1_BUFSIZE-3);
+	len = snd_rawmidi_transmit(substream, dev->midi_out_buf + 3,
+				   EP1_BUFSIZE - 3);
 	
 	if (len <= 0)
 		return;
@@ -79,24 +85,24 @@
 	
 	ret = usb_submit_urb(&dev->midi_out_urb, GFP_ATOMIC);
 	if (ret < 0)
-		log("snd_usb_caiaq_midi_send(%p): usb_submit_urb() failed, %d\n",
-				substream, ret);
+		log("snd_usb_caiaq_midi_send(%p): usb_submit_urb() failed,"
+		    "ret=%d, len=%d\n",
+		    substream, ret, len);
+	else
+		dev->midi_out_active = 1;
 }
 
 static void snd_usb_caiaq_midi_output_trigger(struct snd_rawmidi_substream *substream, int up)
 {
 	struct snd_usb_caiaqdev *dev = substream->rmidi->private_data;
 	
-	if (dev->midi_out_substream != NULL)
-		return;
-	
-	if (!up) {
+	if (up) {
+		dev->midi_out_substream = substream;
+		if (!dev->midi_out_active)
+			snd_usb_caiaq_midi_send(dev, substream);
+	} else {
 		dev->midi_out_substream = NULL;
-		return;
 	}
-	
-	dev->midi_out_substream = substream;
-	snd_usb_caiaq_midi_send(dev, substream);
 }
 
 
@@ -161,16 +167,14 @@
 void snd_usb_caiaq_midi_output_done(struct urb* urb)
 {
 	struct snd_usb_caiaqdev *dev = urb->context;
-      	char *buf = urb->transfer_buffer;
 	
+	dev->midi_out_active = 0;
 	if (urb->status != 0)
 		return;
 
 	if (!dev->midi_out_substream)
 		return;
 
-	snd_rawmidi_transmit_ack(dev->midi_out_substream, buf[2]);
-	dev->midi_out_substream = NULL;
 	snd_usb_caiaq_midi_send(dev, dev->midi_out_substream);
 }
 
diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h
index 9211575..5d8ef09 100644
--- a/sound/usb/usbquirks.h
+++ b/sound/usb/usbquirks.h
@@ -128,6 +128,14 @@
 	.bInterfaceClass = USB_CLASS_AUDIO,
 	.bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL
 },
+{
+	USB_DEVICE(0x046d, 0x0990),
+	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+		.vendor_name = "Logitech, Inc.",
+		.product_name = "QuickCam Pro 9000",
+		.ifnum = QUIRK_NO_INTERFACE
+	}
+},
 
 /*
  * Yamaha devices
diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
index ca26c53..11639bd 100644
--- a/sound/usb/usx2y/usbusx2y.c
+++ b/sound/usb/usx2y/usbusx2y.c
@@ -238,7 +238,7 @@
 					send = 0;
 				for (j = 0; j < URBS_AsyncSeq  &&  !err; ++j)
 					if (0 == usX2Y->AS04.urb[j]->status) {
-						struct us428_p4out *p4out = us428ctls->p4out + send;	// FIXME if more then 1 p4out is new, 1 gets lost.
+						struct us428_p4out *p4out = us428ctls->p4out + send;	// FIXME if more than 1 p4out is new, 1 gets lost.
 						usb_fill_bulk_urb(usX2Y->AS04.urb[j], usX2Y->chip.dev,
 								  usb_sndbulkpipe(usX2Y->chip.dev, 0x04), &p4out->val.vol, 
 								  p4out->type == eLT_Light ? sizeof(struct us428_lights) : 5,